diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 5b55a1918aaa..afcd9905c434 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,6 +1,6 @@ { "ImportPath": "k8s.io/minikube", - "GoVersion": "go1.8", + "GoVersion": "go1.9", "GodepVersion": "v79", "Packages": [ "./..." @@ -23,33 +23,33 @@ }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/compute", - "Comment": "v10.0.4-beta-1-g786cc841", - "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" + "Comment": "v11.1.1-beta", + "Rev": "509eea43b93cec2f3f17acbe2578ef58703923f8" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/containerregistry", - "Comment": "v10.0.4-beta-1-g786cc841", - "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" + "Comment": "v11.1.1-beta", + "Rev": "509eea43b93cec2f3f17acbe2578ef58703923f8" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/disk", - "Comment": "v10.0.4-beta-1-g786cc841", - "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" + "Comment": "v11.1.1-beta", + "Rev": "509eea43b93cec2f3f17acbe2578ef58703923f8" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/network", - "Comment": "v10.0.4-beta-1-g786cc841", - "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" + "Comment": "v11.1.1-beta", + "Rev": "509eea43b93cec2f3f17acbe2578ef58703923f8" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/storage", - "Comment": "v10.0.4-beta-1-g786cc841", - "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" + "Comment": "v11.1.1-beta", + "Rev": "509eea43b93cec2f3f17acbe2578ef58703923f8" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", - "Comment": "v10.0.4-beta-1-g786cc841", - "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" + "Comment": "v11.1.1-beta", + "Rev": "509eea43b93cec2f3f17acbe2578ef58703923f8" }, { "ImportPath": "github.com/Azure/go-ansiterm", @@ -61,33 +61,41 @@ }, { "ImportPath": "github.com/Azure/go-autorest/autorest", - "Comment": "v8.0.0", - "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" + "Comment": "v9.1.0", + "Rev": "e14a70c556c8e0db173358d1a903dca345a8e75e" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/adal", - "Comment": "v8.0.0", - "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" + "Comment": "v9.1.0", + "Rev": "e14a70c556c8e0db173358d1a903dca345a8e75e" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/azure", - "Comment": "v8.0.0", - "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" + "Comment": "v9.1.0", + "Rev": "e14a70c556c8e0db173358d1a903dca345a8e75e" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/date", - "Comment": "v8.0.0", - "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" + "Comment": "v9.1.0", + "Rev": "e14a70c556c8e0db173358d1a903dca345a8e75e" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/to", - "Comment": "v8.0.0", - "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" + "Comment": "v9.1.0", + "Rev": "e14a70c556c8e0db173358d1a903dca345a8e75e" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/validation", - "Comment": "v8.0.0", - "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" + "Comment": "v9.1.0", + "Rev": "e14a70c556c8e0db173358d1a903dca345a8e75e" + }, + { + "ImportPath": "github.com/JeffAshton/win_pdh", + "Rev": "76bb4ee9f0ab50f77826f2a2ee7fb9d3880d6ec2" + }, + { + "ImportPath": "github.com/MakeNowJust/heredoc", + "Rev": "bb23615498cded5e105af4ce27de75b089cbe851" }, { "ImportPath": "github.com/Microsoft/go-winio", @@ -151,158 +159,163 @@ }, { "ImportPath": "github.com/aws/aws-sdk-go/aws", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/awserr", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/client", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/defaults", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/endpoints", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/request", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/session", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/signer/v4", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/internal/shareddefaults", + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/private/waiter", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/autoscaling", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/ec2", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/ecr", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/elb", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/elbv2", + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/kms", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/sts", - "Comment": "v1.6.10", - "Rev": "63ce630574a5ec05ecd8e8de5cea16332a5a684d" + "Comment": "v1.12.7", + "Rev": "760741802ad40f49ae9fc4a69ef6706d2527d62e" }, { "ImportPath": "github.com/beorn7/perks/quantile", @@ -403,35 +416,84 @@ "ImportPath": "github.com/codedellemc/goscaleio/types/v1", "Rev": "20e2ce2cf8852dc78bd42b76698dcd8dcd77b7b1" }, + { + "ImportPath": "github.com/container-storage-interface/spec/lib/go/csi", + "Rev": "ec298903f94e1d6d954de121b28044a2e1fdbf48" + }, + { + "ImportPath": "github.com/containerd/containerd/api/services/containers/v1", + "Comment": "v1.0.0-beta.2-159-g27d450a0", + "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" + }, + { + "ImportPath": "github.com/containerd/containerd/api/services/tasks/v1", + "Comment": "v1.0.0-beta.2-159-g27d450a0", + "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" + }, + { + "ImportPath": "github.com/containerd/containerd/api/services/version/v1", + "Comment": "v1.0.0-beta.2-159-g27d450a0", + "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" + }, + { + "ImportPath": "github.com/containerd/containerd/api/types", + "Comment": "v1.0.0-beta.2-159-g27d450a0", + "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" + }, + { + "ImportPath": "github.com/containerd/containerd/api/types/task", + "Comment": "v1.0.0-beta.2-159-g27d450a0", + "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" + }, + { + "ImportPath": "github.com/containerd/containerd/containers", + "Comment": "v1.0.0-beta.2-159-g27d450a0", + "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" + }, + { + "ImportPath": "github.com/containerd/containerd/dialer", + "Comment": "v1.0.0-beta.2-159-g27d450a0", + "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" + }, + { + "ImportPath": "github.com/containerd/containerd/errdefs", + "Comment": "v1.0.0-beta.2-159-g27d450a0", + "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" + }, + { + "ImportPath": "github.com/containerd/containerd/namespaces", + "Comment": "v1.0.0-beta.2-159-g27d450a0", + "Rev": "27d450a01bb533d7ebc5701eb52792565396b084" + }, { "ImportPath": "github.com/containernetworking/cni/libcni", - "Comment": "v0.5.2", - "Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e" + "Comment": "v0.6.0", + "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, { "ImportPath": "github.com/containernetworking/cni/pkg/invoke", - "Comment": "v0.5.2", - "Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e" + "Comment": "v0.6.0", + "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, { "ImportPath": "github.com/containernetworking/cni/pkg/types", - "Comment": "v0.5.2", - "Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e" + "Comment": "v0.6.0", + "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, { "ImportPath": "github.com/containernetworking/cni/pkg/types/020", - "Comment": "v0.5.2", - "Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e" + "Comment": "v0.6.0", + "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, { "ImportPath": "github.com/containernetworking/cni/pkg/types/current", - "Comment": "v0.5.2", - "Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e" + "Comment": "v0.6.0", + "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, { "ImportPath": "github.com/containernetworking/cni/pkg/version", - "Comment": "v0.5.2", - "Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e" + "Comment": "v0.6.0", + "Rev": "a7885cb6f8ab03fba07852ded351e4f5e7a112bf" }, { "ImportPath": "github.com/containers/image/copy", @@ -868,6 +930,10 @@ "Comment": "v1.1.0-1-g782f496", "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" }, + { + "ImportPath": "github.com/dchest/safefile", + "Rev": "855e8d98f1852d48dde521e0522408d1fe7e836a" + }, { "ImportPath": "github.com/dgrijalva/jwt-go", "Comment": "v3.0.0-4-g01aeca5", @@ -1093,71 +1159,6 @@ "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, - { - "ImportPath": "github.com/docker/engine-api/client", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/engine-api/client/transport", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/engine-api/client/transport/cancellable", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/engine-api/types", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/engine-api/types/blkiodev", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/engine-api/types/container", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/engine-api/types/filters", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/engine-api/types/network", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/engine-api/types/reference", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/engine-api/types/registry", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/engine-api/types/strslice", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/engine-api/types/time", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, - { - "ImportPath": "github.com/docker/engine-api/types/versions", - "Comment": "v0.3.1-78-gdea108d", - "Rev": "dea108d3aa0c67d7162a3fd8aa65f38a430019fd" - }, { "ImportPath": "github.com/docker/go-connections/nat", "Comment": "v0.3.0", @@ -1180,8 +1181,8 @@ }, { "ImportPath": "github.com/docker/libnetwork/ipvs", - "Comment": "v0.8.0-dev.2-908-gd5c82231", - "Rev": "d5c822319097cc01cc9bd5ffedd74c7ce7c894f2" + "Comment": "v0.8.0-dev.2-910-gba46b928", + "Rev": "ba46b928444931e6865d8618dc03622cac79aa6f" }, { "ImportPath": "github.com/docker/libtrust", @@ -1402,8 +1403,8 @@ }, { "ImportPath": "github.com/go-ini/ini", - "Comment": "v0-54-g2e44421", - "Rev": "2e44421e256d82ebbf3d4d4fcabe8930b905eff3" + "Comment": "v1.25.4", + "Rev": "300e940a926eb277d3901b20bdfcc54928ad3642" }, { "ImportPath": "github.com/go-openapi/analysis", @@ -1423,7 +1424,7 @@ }, { "ImportPath": "github.com/go-openapi/loads", - "Rev": "18441dfa706d924a39a030ee2c3b1d8d81917b38" + "Rev": "a80dea3052f00e5f032e860dd7355cd0cc67e24d" }, { "ImportPath": "github.com/go-openapi/runtime", @@ -1431,7 +1432,7 @@ }, { "ImportPath": "github.com/go-openapi/spec", - "Rev": "6aced65f8501fe1217321abf0749d354824ba2ff" + "Rev": "7abd5745472fff5eb3685386d5fb8bf38683154d" }, { "ImportPath": "github.com/go-openapi/strfmt", @@ -1439,7 +1440,7 @@ }, { "ImportPath": "github.com/go-openapi/swag", - "Rev": "1d0bd113de87027671077d3c71eb3ac5d7dbba72" + "Rev": "f3f9494671f93fcff853e3c6e9e948b3eb71e590" }, { "ImportPath": "github.com/go-openapi/validate", @@ -1470,6 +1471,11 @@ "Comment": "v0.4-3-gc0656edd", "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" }, + { + "ImportPath": "github.com/gogo/protobuf/types", + "Comment": "v0.4-3-gc0656edd", + "Rev": "c0656edd0d9eab7c66d1eb0c568f9039345796f7" + }, { "ImportPath": "github.com/golang/glog", "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" @@ -1480,231 +1486,249 @@ }, { "ImportPath": "github.com/golang/protobuf/jsonpb", - "Rev": "4bd1920723d7b7c925de087aa32e2187708897f7" + "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { "ImportPath": "github.com/golang/protobuf/proto", - "Rev": "4bd1920723d7b7c925de087aa32e2187708897f7" + "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { "ImportPath": "github.com/golang/protobuf/ptypes", - "Rev": "4bd1920723d7b7c925de087aa32e2187708897f7" + "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { "ImportPath": "github.com/golang/protobuf/ptypes/any", - "Rev": "4bd1920723d7b7c925de087aa32e2187708897f7" + "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { "ImportPath": "github.com/golang/protobuf/ptypes/duration", - "Rev": "4bd1920723d7b7c925de087aa32e2187708897f7" + "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" + }, + { + "ImportPath": "github.com/golang/protobuf/ptypes/empty", + "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" + }, + { + "ImportPath": "github.com/golang/protobuf/ptypes/struct", + "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { "ImportPath": "github.com/golang/protobuf/ptypes/timestamp", - "Rev": "4bd1920723d7b7c925de087aa32e2187708897f7" + "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, { "ImportPath": "github.com/google/btree", "Rev": "7d79101e329e5a3adf994758c578dab82b90c017" }, + { + "ImportPath": "github.com/google/cadvisor/accelerators", + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + }, { "ImportPath": "github.com/google/cadvisor/api", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/cache/memory", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/collector", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/container", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/container/common", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + }, + { + "ImportPath": "github.com/google/cadvisor/container/containerd", + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/container/crio", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/container/docker", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/container/libcontainer", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/container/raw", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/container/rkt", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/container/systemd", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/devicemapper", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/events", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/fs", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/healthz", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/http", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/http/mux", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/info/v1", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/info/v2", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/machine", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/manager", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/raw", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/rkt", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/metrics", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/pages", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/pages/static", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/storage", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/summary", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/utils", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/utils/cloudinfo", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload/netlink", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/utils/docker", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/utils/oomparser", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/utils/sysfs", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/utils/sysinfo", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/validate", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/version", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/cadvisor/zfs", - "Comment": "v0.27.1", - "Rev": "cda62a43857256fbc95dd31e7c810888f00f8ec7" + "Comment": "v0.28.3", + "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" }, { "ImportPath": "github.com/google/certificate-transparency/go", @@ -1744,127 +1768,123 @@ }, { "ImportPath": "github.com/gophercloud/gophercloud", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { - "ImportPath": "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "ImportPath": "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions", + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + }, + { + "ImportPath": "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes", + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/common/extensions", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/compute/v2/images", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/compute/v2/servers", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" - }, - { - "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" - }, - { - "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" - }, - { - "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { - "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external", + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { - "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips", + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { - "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers", + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + }, + { + "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/networks", + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/ports", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/utils", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gophercloud/gophercloud/pagination", - "Rev": "2bf16b94fdd9b01557c4d076e567fe5cbbe5a961" + "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" }, { "ImportPath": "github.com/gorilla/mux", @@ -2014,8 +2034,8 @@ }, { "ImportPath": "github.com/jmespath/go-jmespath", - "Comment": "0.2.2", - "Rev": "3433f3ea46d9f8019119e7dd41274e112a2359a9" + "Comment": "0.2.2-12-g0b12d6b", + "Rev": "0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74" }, { "ImportPath": "github.com/jonboulle/clockwork", @@ -2088,15 +2108,15 @@ }, { "ImportPath": "github.com/mailru/easyjson/buffer", - "Rev": "d5b7844b561a7bc640052f1b935f7b800330d7e0" + "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" }, { "ImportPath": "github.com/mailru/easyjson/jlexer", - "Rev": "d5b7844b561a7bc640052f1b935f7b800330d7e0" + "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" }, { "ImportPath": "github.com/mailru/easyjson/jwriter", - "Rev": "d5b7844b561a7bc640052f1b935f7b800330d7e0" + "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" }, { "ImportPath": "github.com/mattn/go-runewidth", @@ -2111,6 +2131,10 @@ "ImportPath": "github.com/miekg/dns", "Rev": "5d001d020961ae1c184f9f8152fdc73810481677" }, + { + "ImportPath": "github.com/mindprince/gonvml", + "Rev": "fee913ce8fb235edf54739d259ca0ecc226c7b8a" + }, { "ImportPath": "github.com/mistifyio/go-zfs", "Comment": "v2.1.1-5-g1b4ae6f", @@ -2120,6 +2144,10 @@ "ImportPath": "github.com/mitchellh/go-ps", "Rev": "4fdf99ab29366514c69ccccddab5dc58b8d84062" }, + { + "ImportPath": "github.com/mitchellh/go-wordwrap", + "Rev": "ad45545899c7b13c020ea92b2072220eefad42b8" + }, { "ImportPath": "github.com/mitchellh/mapstructure", "Rev": "53818660ed4955e899c0bcafa97299a388bd7c8e" @@ -2282,8 +2310,8 @@ }, { "ImportPath": "github.com/pkg/errors", - "Comment": "v0.7.0-13-ga221380", - "Rev": "a22138067af1c4942683050411a841ade67fe1eb" + "Comment": "v0.8.0", + "Rev": "645ef00459ed84a119197bfb8d8205042c6df63d" }, { "ImportPath": "github.com/pkg/profile", @@ -2335,123 +2363,18 @@ }, { "ImportPath": "github.com/r2d4/external-storage/lib/controller", - "Comment": "v1.0.0-22-g01d5830", - "Rev": "01d5830a6980d4c7572139cc40817132854bf394" + "Comment": "v1.0.0-23-g4ebf52e", + "Rev": "4ebf52ebe22099f73d229bf1ddd855d9113981f6" }, { "ImportPath": "github.com/r2d4/external-storage/lib/leaderelection", - "Comment": "v1.0.0-22-g01d5830", - "Rev": "01d5830a6980d4c7572139cc40817132854bf394" + "Comment": "v1.0.0-23-g4ebf52e", + "Rev": "4ebf52ebe22099f73d229bf1ddd855d9113981f6" }, { "ImportPath": "github.com/r2d4/external-storage/lib/leaderelection/resourcelock", - "Comment": "v1.0.0-22-g01d5830", - "Rev": "01d5830a6980d4c7572139cc40817132854bf394" - }, - { - "ImportPath": "github.com/rackspace/gophercloud", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/openstack", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/flavors", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/images", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/openstack/compute/v2/servers", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v2/tenants", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v2/tokens", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/openstack/identity/v3/tokens", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/openstack/utils", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/pagination", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/rackspace", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/rackspace/compute/v2/servers", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/rackspace/identity/v2/tokens", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/testhelper", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" - }, - { - "ImportPath": "github.com/rackspace/gophercloud/testhelper/client", - "Comment": "v1.0.0-1012-ge00690e8", - "Rev": "e00690e87603abe613e9f02c816c7c4bef82e063" + "Comment": "v1.0.0-23-g4ebf52e", + "Rev": "4ebf52ebe22099f73d229bf1ddd855d9113981f6" }, { "ImportPath": "github.com/rancher/go-rancher/client", @@ -2868,15 +2791,15 @@ }, { "ImportPath": "golang.org/x/sys/unix", - "Rev": "7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce" + "Rev": "95c6576299259db960f6c5b9b69ea52422860fce" }, { "ImportPath": "golang.org/x/sys/windows", - "Rev": "7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce" + "Rev": "95c6576299259db960f6c5b9b69ea52422860fce" }, { "ImportPath": "golang.org/x/sys/windows/registry", - "Rev": "7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce" + "Rev": "95c6576299259db960f6c5b9b69ea52422860fce" }, { "ImportPath": "golang.org/x/text/cases", @@ -2946,41 +2869,37 @@ "ImportPath": "golang.org/x/tools/container/intsets", "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" }, - { - "ImportPath": "google.golang.org/api/cloudkms/v1", - "Rev": "98825bb0065da4054e5da6db34f5fc598e50bc24" - }, { "ImportPath": "google.golang.org/api/cloudmonitoring/v2beta2", - "Rev": "98825bb0065da4054e5da6db34f5fc598e50bc24" + "Rev": "654f863362977d69086620b5f72f13e911da2410" }, { "ImportPath": "google.golang.org/api/compute/v0.alpha", - "Rev": "98825bb0065da4054e5da6db34f5fc598e50bc24" + "Rev": "654f863362977d69086620b5f72f13e911da2410" }, { "ImportPath": "google.golang.org/api/compute/v0.beta", - "Rev": "98825bb0065da4054e5da6db34f5fc598e50bc24" + "Rev": "654f863362977d69086620b5f72f13e911da2410" }, { "ImportPath": "google.golang.org/api/compute/v1", - "Rev": "98825bb0065da4054e5da6db34f5fc598e50bc24" + "Rev": "654f863362977d69086620b5f72f13e911da2410" }, { "ImportPath": "google.golang.org/api/container/v1", - "Rev": "98825bb0065da4054e5da6db34f5fc598e50bc24" + "Rev": "654f863362977d69086620b5f72f13e911da2410" }, { "ImportPath": "google.golang.org/api/gensupport", - "Rev": "98825bb0065da4054e5da6db34f5fc598e50bc24" + "Rev": "654f863362977d69086620b5f72f13e911da2410" }, { "ImportPath": "google.golang.org/api/googleapi", - "Rev": "98825bb0065da4054e5da6db34f5fc598e50bc24" + "Rev": "654f863362977d69086620b5f72f13e911da2410" }, { "ImportPath": "google.golang.org/api/googleapi/internal/uritemplates", - "Rev": "98825bb0065da4054e5da6db34f5fc598e50bc24" + "Rev": "654f863362977d69086620b5f72f13e911da2410" }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", @@ -3107,3297 +3026,3434 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/aggregator", - "Rev": "868f2f29720b192240e18284659231b440f9cda5" + "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "868f2f29720b192240e18284659231b440f9cda5" + "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "868f2f29720b192240e18284659231b440f9cda5" + "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "868f2f29720b192240e18284659231b440f9cda5" + "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "868f2f29720b192240e18284659231b440f9cda5" + "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + }, + { + "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", + "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + }, + { + "ImportPath": "k8s.io/kube-openapi/pkg/util/proto/validation", + "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-apiserver/app", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-apiserver/app/options", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-controller-manager/app", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-controller-manager/app/options", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-proxy/app", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubeadm/app/constants", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubelet/app", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubelet/app/options", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/apis/federation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/apis/federation/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/apis/federation/v1beta1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/batch/v1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/endpoints", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/events", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/helper", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/helper/qos", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "ImportPath": "k8s.io/kubernetes/pkg/api/legacyscheme", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "ImportPath": "k8s.io/kubernetes/pkg/api/persistentvolume", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/persistentvolume", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "ImportPath": "k8s.io/kubernetes/pkg/api/persistentvolumeclaim", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/pod", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/ref", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/resource", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/service", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/util", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/v1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/endpoints", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/v1/helper", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/v1/helper/qos", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/node", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/pod", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/resource", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/service", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/v1/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/abac", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/abac/latest", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/abac/v0", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/abac/v1beta1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admission", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admission/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/admission/v1alpha1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "ImportPath": "k8s.io/kubernetes/pkg/apis/admission/v1beta1", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1beta1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1beta2", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/v1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/v1beta1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v1beta1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/v1beta1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/core", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/core/helper", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/core/helper/qos", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/core/install", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/core/pods", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/core/v1", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/core/v1/helper", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/core/v1/validation", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/core/validation", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/events", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/events/install", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/events/v1beta1", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/imagepolicy", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/imagepolicy/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/networking", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/networking/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/networking/v1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/networking/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/v1beta1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/scheduling", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/scheduling/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/scheduling/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/settings", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/settings/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/settings/v1alpha1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/settings/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/util", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1alpha1", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1beta1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/auth/authorizer/abac", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/auth/nodeidentifier", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/bootstrap/api", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/capabilities", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/chaosclient", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/leaderelectionconfig", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/apps/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/autoscaling/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/batch/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/certificates/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/core/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/extensions/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/networking/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/policy/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/rbac/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/scheduling/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/settings/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/storage/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/aws", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/azure", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/photon", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/bootstrap", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/certificates", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/certificates/approver", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/controller/certificates/cleaner", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/certificates/signer", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/controller/clusterroleaggregation", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/cronjob", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/daemon", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/daemon/util", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/deployment", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/deployment/util", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/disruption", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/endpoint", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/garbagecollector", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/history", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/job", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/namespace", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/namespace/deletion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/node", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/node/ipam", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/node/ipam/cidrset", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/node/ipam/sync", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/node/scheduler", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/node/util", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/podautoscaler", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/podgc", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/replicaset", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/replication", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/resourcequota", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/route", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/service", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/serviceaccount", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/statefulset", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/ttl", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach/util", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/events", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/expand", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/expand/cache", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/expand/util", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/persistentvolume", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/pvcprotection", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/aws", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/azure", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/gcp", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/rancher", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/features", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/fieldpath", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/generated/openapi", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/admission", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/admission/util", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/authenticator", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/authorizer", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/options", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/server", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubectl/apps", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubectl/categories", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/templates", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/util", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/plugins", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/resource", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubectl/scheme", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/hash", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/slice", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/term", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/cri", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cadvisor", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/certificate", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/checkpoint", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/client", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/cpuset", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/util", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/config", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/configmap", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/container", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/deviceplugin", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/cm", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/errors", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/metrics", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/remote", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/envvars", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/events", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/eviction", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/eviction/api", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/gpu", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/gpu/nvidia", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/images", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/equal", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kuberuntime", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/leaky", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/lifecycle", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/metrics", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/mountpod", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/cni", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/dns", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/hairpin", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/hostport", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/kubenet", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/metrics", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/pleg", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/pod", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/preemption", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/prober", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/prober/results", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/qos", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/remote", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/rkt", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/secret", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/portforward", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/remotecommand", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/stats", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/streaming", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/stats", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/status", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/sysctl", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/types", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/cache", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/csr", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/format", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/ioutils", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/queue", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/sliceutils", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/store", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager/populator", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/winstats", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/master", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/master/controller/crdregistration", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/master/ports", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/master/reconcilers", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/master/tunneler", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/printers", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/printers/internalversion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/printers/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe/exec", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe/http", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe/tcp", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/config", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/healthcheck", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/iptables", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/ipvs", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/proxy/metrics", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/userspace", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/util", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/winkernel", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/winuserspace", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/quota", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/quota/evaluator/core", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/quota/generic", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/quota/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { - "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { - "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { - "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { - "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration/storage", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/rest", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration/storage", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/controllerrevision", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/rest", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/statefulset", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/statefulset/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authentication/rest", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authentication/tokenreview", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authorization/localsubjectaccessreview", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authorization/rest", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authorization/selfsubjectaccessreview", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authorization/selfsubjectrulesreview", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authorization/subjectaccessreview", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authorization/util", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/autoscaling/rest", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/batch/cronjob", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/batch/cronjob/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/batch/job", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/batch/job/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/batch/rest", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/cachesize", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/certificates/certificates", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/certificates/certificates/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/certificates/rest", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/componentstatus", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/configmap", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/configmap/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/endpoint", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/endpoint/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/event", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/event/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/limitrange", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/limitrange/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/namespace", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/namespace/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/node", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/node/rest", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/node/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/persistentvolume", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/pod", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/pod/rest", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/pod/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/podtemplate", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/podtemplate/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/rangeallocation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/replicationcontroller", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/resourcequota", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/resourcequota/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/rest", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/secret", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/secret/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/allocator", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/allocator/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/ipallocator", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/portallocator", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/portallocator/controller", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/serviceaccount", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/registry/events/rest", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/controller/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/daemonset", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/deployment", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/deployment/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/ingress", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/ingress/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/replicaset", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/rest", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/networking/networkpolicy", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/networking/rest", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/policy/rest", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/clusterrole", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/clusterrole/policybased", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/policybased", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/reconciliation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/rest", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/role", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/role/policybased", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/role/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/rolebinding", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/rolebinding/policybased", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/scheduling/priorityclass", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/scheduling/rest", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/settings/podpreset", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/settings/podpreset/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/settings/rest", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/storage/rest", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/storage/storageclass", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/storage/storageclass/storage", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/registry/storage/volumeattachment", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/registry/storage/volumeattachment/storage", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/routes", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/apparmor", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/group", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/user", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/securitycontext", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/serviceaccount", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/ssh", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/async", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/bandwidth", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/config", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/configz", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/dbus", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/ebtables", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/env", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/file", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/filesystem", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/flock", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/goroutinemap", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/hash", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/interrupt", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/io", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/ipconfig", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/ipset", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/iptables", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/ipvs", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/keymutex", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/labels", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/limitwriter", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/maps", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/metrics", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/mount", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/net/sets", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/netsh", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/node", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/nsenter", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/oom", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/parsers", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/pointer", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/procfs", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/reflector/prometheus", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/removeall", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/resizefs", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/resourcecontainer", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/rlimit", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/selinux", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/slice", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/strings", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/sysctl", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/system", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/tail", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/taints", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/term", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/tolerations", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/version", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/workqueue/prometheus", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/version", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/version/verflag", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/aws_ebs", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/azure_dd", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/azure_file", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/cephfs", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/cinder", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/configmap", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/volume/csi", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/downwardapi", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/empty_dir", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/fc", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/flexvolume", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/flocker", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/gce_pd", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/git_repo", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/glusterfs", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/host_path", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/iscsi", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/local", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/nfs", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/photon_pd", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/portworx", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/projected", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/quobyte", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/rbd", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/scaleio", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/secret", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/storageos", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/operationexecutor", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/types", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/volumehelper", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/vsphere_volume", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/admit", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/antiaffinity", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/deny", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/exec", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/gc", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/extendedresourcetoleration", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/imagepolicy", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/gc", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/initialization", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/imagepolicy", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/initialresources", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/limitranger", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/namespace/exists", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/noderestriction", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/resize", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/persistentvolumeclaim/pvcprotection", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podnodeselector", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podpreset", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/priority", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/resourcequota", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/install", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/webhook", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authorizer/node", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/api", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/api/latest", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/api/v1", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/api/validation", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/core", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/factory", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/metrics", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/util", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { - "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/rand", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder", + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/golang/expansion", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/gonum/graph", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/gonum/graph/simple", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/gonum/graph/traverse", - "Comment": "v1.8.0", - "Rev": "0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4" + "Comment": "v1.9.0", + "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + }, + { + "ImportPath": "k8s.io/utils/clock", + "Rev": "aedf551cdb8b0119df3a19c65fde413a13b34997" }, { "ImportPath": "k8s.io/utils/exec", - "Rev": "9fdc871a36f37980dd85f96d576b20d564cc0784" + "Rev": "aedf551cdb8b0119df3a19c65fde413a13b34997" }, { "ImportPath": "vbom.ml/util/sortorder", diff --git a/hack/kube-proxy-patch.diff b/hack/kube-proxy-patch.diff index b58441a0e3d8..b5dcaba101cb 100644 --- a/hack/kube-proxy-patch.diff +++ b/hack/kube-proxy-patch.diff @@ -1,8 +1,23 @@ -diff --git a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go -index b0f0eace5..02b631fb6 100644 ---- a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go -+++ b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go -@@ -60,7 +60,7 @@ func NewProxyServer(config *componentconfig.KubeProxyConfiguration, cleanupAndEx +diff --git a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go +index 2f9ea2eac..35a7c14e5 100644 +--- a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go ++++ b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go +@@ -318,6 +318,10 @@ func (o *Options) ApplyDefaults(in *kubeproxyconfig.KubeProxyConfiguration) (*ku + return out, nil + } + ++func (o *Options) SetConfig(in *kubeproxyconfig.KubeProxyConfiguration) { ++ o.config = in ++} ++ + // NewProxyCommand creates a *cobra.Command object with default parameters + func NewProxyCommand() *cobra.Command { + opts := NewOptions() +diff --git a/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/server.go b/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/server.go +index 93982f898..a116da41f 100644 +--- a/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/server.go ++++ b/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/server.go +@@ -380,7 +380,7 @@ func NewSchedulerServer(config *componentconfig.KubeSchedulerConfiguration, mast if c, err := configz.New("componentconfig"); err == nil { c.Set(config) } else { @@ -10,4 +25,4 @@ index b0f0eace5..02b631fb6 100644 + glog.Warningf("unable to register configz: %s", err) } - protocol := utiliptables.ProtocolIpv4 + // Prepare some Kube clients. diff --git a/pkg/localkube/proxy.go b/pkg/localkube/proxy.go index 1f203517d575..e617ad79a11b 100644 --- a/pkg/localkube/proxy.go +++ b/pkg/localkube/proxy.go @@ -23,9 +23,8 @@ import ( "time" "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/kubelet/qos" + "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig" ) var ( @@ -42,34 +41,36 @@ func StartProxyServer(lk LocalkubeServer) func() error { if lk.APIServerInsecurePort != 0 { bindaddress = lk.APIServerInsecureAddress.String() } - config := &componentconfig.KubeProxyConfiguration{ + + opts := kubeproxy.NewOptions() + config := &kubeproxyconfig.KubeProxyConfiguration{ OOMScoreAdj: &OOMScoreAdj, - ClientConnection: componentconfig.ClientConnectionConfiguration{ + ClientConnection: kubeproxyconfig.ClientConnectionConfiguration{ Burst: 10, QPS: 5, KubeConfigFile: util.DefaultKubeConfigPath, }, ConfigSyncPeriod: v1.Duration{Duration: 15 * time.Minute}, - IPTables: componentconfig.KubeProxyIPTablesConfiguration{ + IPTables: kubeproxyconfig.KubeProxyIPTablesConfiguration{ MasqueradeBit: &MasqueradeBit, SyncPeriod: v1.Duration{Duration: 30 * time.Second}, MinSyncPeriod: v1.Duration{Duration: 5 * time.Second}, }, BindAddress: bindaddress, - Mode: componentconfig.ProxyModeIPTables, + Mode: kubeproxyconfig.ProxyModeIPTables, FeatureGates: lk.FeatureGates, // Disable the healthz check HealthzBindAddress: "0", } + _, err := opts.ApplyDefaults(config) + if err != nil { + panic(err) + } + opts.SetConfig(config) lk.SetExtraConfigForComponent("proxy", &config) return func() error { - // Creating this config requires the API Server to be up, so do it in the start function itself. - server, err := kubeproxy.NewProxyServer(config, false, runtime.NewScheme(), lk.GetAPIServerInsecureURL()) - if err != nil { - panic(err) - } - return server.Run() + return opts.Run() } } diff --git a/pkg/localkube/scheduler.go b/pkg/localkube/scheduler.go index 54b46a568257..6fda922593d6 100644 --- a/pkg/localkube/scheduler.go +++ b/pkg/localkube/scheduler.go @@ -17,8 +17,8 @@ limitations under the License. package localkube import ( + "k8s.io/kubernetes/pkg/apis/componentconfig" scheduler "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app" - "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options" "k8s.io/minikube/pkg/util" ) @@ -27,10 +27,18 @@ func (lk LocalkubeServer) NewSchedulerServer() Server { } func StartSchedulerServer(lk LocalkubeServer) func() error { - config := options.NewSchedulerServer() + config := &componentconfig.KubeSchedulerConfiguration{} + opts, err := scheduler.NewOptions() + if err != nil { + panic(err) + } + config, err = opts.ApplyDefaults(config) + if err != nil { + panic(err) + } // master details - config.Kubeconfig = util.DefaultKubeConfigPath + config.ClientConnection.KubeConfigFile = util.DefaultKubeConfigPath // defaults from command config.EnableProfiling = true @@ -38,6 +46,10 @@ func StartSchedulerServer(lk LocalkubeServer) func() error { lk.SetExtraConfigForComponent("scheduler", &config) return func() error { - return scheduler.Run(config) + s, err := scheduler.NewSchedulerServer(config, "") + if err != nil { + return err + } + return s.Run(nil) } } diff --git a/test/integration/cluster_status_test.go b/test/integration/cluster_status_test.go index e1e6a88a897d..2714961505df 100644 --- a/test/integration/cluster_status_test.go +++ b/test/integration/cluster_status_test.go @@ -23,7 +23,7 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/minikube/test/integration/util" ) diff --git a/test/integration/pv_test.go b/test/integration/pv_test.go index ec09c46a91cf..a4bb2de00c7a 100644 --- a/test/integration/pv_test.go +++ b/test/integration/pv_test.go @@ -20,13 +20,14 @@ package integration import ( "fmt" - "github.com/pkg/errors" "path/filepath" "testing" "time" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/storage" commonutil "k8s.io/minikube/pkg/util" "k8s.io/minikube/test/integration/util" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE new file mode 100644 index 000000000000..2d1d72608c28 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE @@ -0,0 +1,5 @@ +Microsoft Azure-SDK-for-Go +Copyright 2014-2017 Microsoft + +This product includes software developed at +the Microsoft Corporation (https://www.microsoft.com). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go index 738c2c61ea0f..bf1a8fc34e38 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go @@ -14,9 +14,8 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,30 +23,27 @@ import ( "net/http" ) -// AvailabilitySetsClient is the the Compute Management Client. +// AvailabilitySetsClient is the compute Client type AvailabilitySetsClient struct { ManagementClient } -// NewAvailabilitySetsClient creates an instance of the AvailabilitySetsClient -// client. +// NewAvailabilitySetsClient creates an instance of the AvailabilitySetsClient client. func NewAvailabilitySetsClient(subscriptionID string) AvailabilitySetsClient { return NewAvailabilitySetsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewAvailabilitySetsClientWithBaseURI creates an instance of the -// AvailabilitySetsClient client. +// NewAvailabilitySetsClientWithBaseURI creates an instance of the AvailabilitySetsClient client. func NewAvailabilitySetsClientWithBaseURI(baseURI string, subscriptionID string) AvailabilitySetsClient { return AvailabilitySetsClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate create or update an availability set. // -// resourceGroupName is the name of the resource group. name is the name of the -// availability set. parameters is parameters supplied to the Create -// Availability Set operation. -func (client AvailabilitySetsClient) CreateOrUpdate(resourceGroupName string, name string, parameters AvailabilitySet) (result AvailabilitySet, err error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, name, parameters) +// resourceGroupName is the name of the resource group. availabilitySetName is the name of the availability set. +// parameters is parameters supplied to the Create Availability Set operation. +func (client AvailabilitySetsClient) CreateOrUpdate(resourceGroupName string, availabilitySetName string, parameters AvailabilitySet) (result AvailabilitySet, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, availabilitySetName, parameters) if err != nil { err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", nil, "Failure preparing request") return @@ -69,14 +65,14 @@ func (client AvailabilitySetsClient) CreateOrUpdate(resourceGroupName string, na } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client AvailabilitySetsClient) CreateOrUpdatePreparer(resourceGroupName string, name string, parameters AvailabilitySet) (*http.Request, error) { +func (client AvailabilitySetsClient) CreateOrUpdatePreparer(resourceGroupName string, availabilitySetName string, parameters AvailabilitySet) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "availabilitySetName": autorest.Encode("path", availabilitySetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -85,7 +81,7 @@ func (client AvailabilitySetsClient) CreateOrUpdatePreparer(resourceGroupName st autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{name}", pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}", pathParameters), autorest.WithJSON(parameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare(&http.Request{}) @@ -112,8 +108,7 @@ func (client AvailabilitySetsClient) CreateOrUpdateResponder(resp *http.Response // Delete delete an availability set. // -// resourceGroupName is the name of the resource group. availabilitySetName is -// the name of the availability set. +// resourceGroupName is the name of the resource group. availabilitySetName is the name of the availability set. func (client AvailabilitySetsClient) Delete(resourceGroupName string, availabilitySetName string) (result OperationStatusResponse, err error) { req, err := client.DeletePreparer(resourceGroupName, availabilitySetName) if err != nil { @@ -144,7 +139,7 @@ func (client AvailabilitySetsClient) DeletePreparer(resourceGroupName string, av "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -178,8 +173,7 @@ func (client AvailabilitySetsClient) DeleteResponder(resp *http.Response) (resul // Get retrieves information about an availability set. // -// resourceGroupName is the name of the resource group. availabilitySetName is -// the name of the availability set. +// resourceGroupName is the name of the resource group. availabilitySetName is the name of the availability set. func (client AvailabilitySetsClient) Get(resourceGroupName string, availabilitySetName string) (result AvailabilitySet, err error) { req, err := client.GetPreparer(resourceGroupName, availabilitySetName) if err != nil { @@ -210,7 +204,7 @@ func (client AvailabilitySetsClient) GetPreparer(resourceGroupName string, avail "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -274,7 +268,7 @@ func (client AvailabilitySetsClient) ListPreparer(resourceGroupName string) (*ht "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -306,11 +300,10 @@ func (client AvailabilitySetsClient) ListResponder(resp *http.Response) (result return } -// ListAvailableSizes lists all available virtual machine sizes that can be -// used to create a new virtual machine in an existing availability set. +// ListAvailableSizes lists all available virtual machine sizes that can be used to create a new virtual machine in an +// existing availability set. // -// resourceGroupName is the name of the resource group. availabilitySetName is -// the name of the availability set. +// resourceGroupName is the name of the resource group. availabilitySetName is the name of the availability set. func (client AvailabilitySetsClient) ListAvailableSizes(resourceGroupName string, availabilitySetName string) (result VirtualMachineSizeListResult, err error) { req, err := client.ListAvailableSizesPreparer(resourceGroupName, availabilitySetName) if err != nil { @@ -341,7 +334,7 @@ func (client AvailabilitySetsClient) ListAvailableSizesPreparer(resourceGroupNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go index c60452b9d794..98b1c0dad648 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go @@ -1,7 +1,6 @@ -// Package compute implements the Azure ARM Compute service API version -// 2016-04-30-preview. +// Package compute implements the Azure ARM Compute service API version . // -// The Compute Management Client. +// Compute Client package compute // Copyright (c) Microsoft and contributors. All rights reserved. @@ -18,9 +17,8 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/containerservices.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/containerservices.go new file mode 100644 index 000000000000..98c1706eef7b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/containerservices.go @@ -0,0 +1,579 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// ContainerServicesClient is the compute Client +type ContainerServicesClient struct { + ManagementClient +} + +// NewContainerServicesClient creates an instance of the ContainerServicesClient client. +func NewContainerServicesClient(subscriptionID string) ContainerServicesClient { + return NewContainerServicesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewContainerServicesClientWithBaseURI creates an instance of the ContainerServicesClient client. +func NewContainerServicesClientWithBaseURI(baseURI string, subscriptionID string) ContainerServicesClient { + return ContainerServicesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a container service with the specified configuration of orchestrator, masters, and +// agents. This method may poll for completion. Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. containerServiceName is the name of the container service in +// the specified subscription and resource group. parameters is parameters supplied to the Create or Update a Container +// Service operation. +func (client ContainerServicesClient) CreateOrUpdate(resourceGroupName string, containerServiceName string, parameters ContainerService, cancel <-chan struct{}) (<-chan ContainerService, <-chan error) { + resultChan := make(chan ContainerService, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.ContainerServiceProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.CustomProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.CustomProfile.Orchestrator", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.ContainerServiceProperties.ServicePrincipalProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.ServicePrincipalProfile.ClientID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.ContainerServiceProperties.ServicePrincipalProfile.Secret", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "parameters.ContainerServiceProperties.MasterProfile", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.MasterProfile.DNSPrefix", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.ContainerServiceProperties.AgentPoolProfiles", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.ContainerServiceProperties.WindowsProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.WindowsProfile.AdminUsername", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.WindowsProfile.AdminUsername", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([._]?[a-zA-Z0-9]+)*$`, Chain: nil}}}, + {Target: "parameters.ContainerServiceProperties.WindowsProfile.AdminPassword", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.WindowsProfile.AdminPassword", Name: validation.Pattern, Rule: `^(?=.*[a-z])(?=.*[A-Z])(?=.*[!@#$%\^&\*\(\)])[a-zA-Z\d!@#$%\^&\*\(\)]{12,123}$`, Chain: nil}}}, + }}, + {Target: "parameters.ContainerServiceProperties.LinuxProfile", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.LinuxProfile.AdminUsername", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.LinuxProfile.AdminUsername", Name: validation.Pattern, Rule: `^[a-z][a-z0-9_-]*$`, Chain: nil}}}, + {Target: "parameters.ContainerServiceProperties.LinuxProfile.SSH", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.LinuxProfile.SSH.PublicKeys", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "parameters.ContainerServiceProperties.DiagnosticsProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.DiagnosticsProfile.VMDiagnostics", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.DiagnosticsProfile.VMDiagnostics.Enabled", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + }}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "compute.ContainerServicesClient", "CreateOrUpdate") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result ContainerService + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, containerServiceName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ContainerServicesClient) CreateOrUpdatePreparer(resourceGroupName string, containerServiceName string, parameters ContainerService, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "containerServiceName": autorest.Encode("path", containerServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) CreateOrUpdateResponder(resp *http.Response) (result ContainerService, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified container service in the specified subscription and resource group. The operation does +// not delete other resources created as part of creating a container service, including storage accounts, VMs, and +// availability sets. All the other resources created with the container service are part of the same resource group +// and can be deleted individually. This method may poll for completion. Polling can be canceled by passing the cancel +// channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. containerServiceName is the name of the container service in +// the specified subscription and resource group. +func (client ContainerServicesClient) Delete(resourceGroupName string, containerServiceName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, containerServiceName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// DeletePreparer prepares the Delete request. +func (client ContainerServicesClient) DeletePreparer(resourceGroupName string, containerServiceName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "containerServiceName": autorest.Encode("path", containerServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the properties of the specified container service in the specified subscription and resource group. The +// operation returns the properties including state, orchestrator, number of masters and agents, and FQDNs of masters +// and agents. +// +// resourceGroupName is the name of the resource group. containerServiceName is the name of the container service in +// the specified subscription and resource group. +func (client ContainerServicesClient) Get(resourceGroupName string, containerServiceName string) (result ContainerService, err error) { + req, err := client.GetPreparer(resourceGroupName, containerServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ContainerServicesClient) GetPreparer(resourceGroupName string, containerServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "containerServiceName": autorest.Encode("path", containerServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) GetResponder(resp *http.Response) (result ContainerService, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of container services in the specified subscription. The operation returns properties of each +// container service including state, orchestrator, number of masters and agents, and FQDNs of masters and agents. +func (client ContainerServicesClient) List() (result ContainerServiceListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ContainerServicesClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/containerServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) ListResponder(resp *http.Response) (result ContainerServiceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ContainerServicesClient) ListNextResults(lastResults ContainerServiceListResult) (result ContainerServiceListResult, err error) { + req, err := lastResults.ContainerServiceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListComplete gets all elements from the list without paging. +func (client ContainerServicesClient) ListComplete(cancel <-chan struct{}) (<-chan ContainerService, <-chan error) { + resultChan := make(chan ContainerService) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListByResourceGroup gets a list of container services in the specified subscription and resource group. The +// operation returns properties of each container service including state, orchestrator, number of masters and agents, +// and FQDNs of masters and agents. +// +// resourceGroupName is the name of the resource group. +func (client ContainerServicesClient) ListByResourceGroup(resourceGroupName string) (result ContainerServiceListResult, err error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client ContainerServicesClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) ListByResourceGroupResponder(resp *http.Response) (result ContainerServiceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroupNextResults retrieves the next set of results, if any. +func (client ContainerServicesClient) ListByResourceGroupNextResults(lastResults ContainerServiceListResult) (result ContainerServiceListResult, err error) { + req, err := lastResults.ContainerServiceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "ListByResourceGroup", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "ListByResourceGroup", resp, "Failure sending next results request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "ListByResourceGroup", resp, "Failure responding to next results request") + } + + return +} + +// ListByResourceGroupComplete gets all elements from the list without paging. +func (client ContainerServicesClient) ListByResourceGroupComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan ContainerService, <-chan error) { + resultChan := make(chan ContainerService) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListByResourceGroup(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListByResourceGroupNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/disks.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/disks.go new file mode 100644 index 000000000000..47e9e4029ce7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/disks.go @@ -0,0 +1,818 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// DisksClient is the compute Client +type DisksClient struct { + ManagementClient +} + +// NewDisksClient creates an instance of the DisksClient client. +func NewDisksClient(subscriptionID string) DisksClient { + return NewDisksClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDisksClientWithBaseURI creates an instance of the DisksClient client. +func NewDisksClientWithBaseURI(baseURI string, subscriptionID string) DisksClient { + return DisksClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a disk. This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. diskName is the name of the disk within the given subscription +// and resource group. disk is disk object supplied in the body of the Put disk operation. +func (client DisksClient) CreateOrUpdate(resourceGroupName string, diskName string, disk Disk, cancel <-chan struct{}) (<-chan Disk, <-chan error) { + resultChan := make(chan Disk, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: disk, + Constraints: []validation.Constraint{{Target: "disk.DiskProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "disk.DiskProperties.CreationData", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "disk.DiskProperties.CreationData.ImageReference", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "disk.DiskProperties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "disk.DiskProperties.EncryptionSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "disk.DiskProperties.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "disk.DiskProperties.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "disk.DiskProperties.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "disk.DiskProperties.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "disk.DiskProperties.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "disk.DiskProperties.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "compute.DisksClient", "CreateOrUpdate") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result Disk + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, diskName, disk, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.DisksClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client DisksClient) CreateOrUpdatePreparer(resourceGroupName string, diskName string, disk Disk, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithJSON(disk), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client DisksClient) CreateOrUpdateResponder(resp *http.Response) (result Disk, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a disk. This method may poll for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. diskName is the name of the disk within the given subscription +// and resource group. +func (client DisksClient) Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, diskName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// DeletePreparer prepares the Delete request. +func (client DisksClient) DeletePreparer(resourceGroupName string, diskName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client DisksClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets information about a disk. +// +// resourceGroupName is the name of the resource group. diskName is the name of the disk within the given subscription +// and resource group. +func (client DisksClient) Get(resourceGroupName string, diskName string) (result Disk, err error) { + req, err := client.GetPreparer(resourceGroupName, diskName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DisksClient) GetPreparer(resourceGroupName string, diskName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DisksClient) GetResponder(resp *http.Response) (result Disk, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GrantAccess grants access to a disk. This method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. diskName is the name of the disk within the given subscription +// and resource group. grantAccessData is access data object supplied in the body of the get disk access operation. +func (client DisksClient) GrantAccess(resourceGroupName string, diskName string, grantAccessData GrantAccessData, cancel <-chan struct{}) (<-chan AccessURI, <-chan error) { + resultChan := make(chan AccessURI, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: grantAccessData, + Constraints: []validation.Constraint{{Target: "grantAccessData.DurationInSeconds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "compute.DisksClient", "GrantAccess") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result AccessURI + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.GrantAccessPreparer(resourceGroupName, diskName, grantAccessData, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "GrantAccess", nil, "Failure preparing request") + return + } + + resp, err := client.GrantAccessSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.DisksClient", "GrantAccess", resp, "Failure sending request") + return + } + + result, err = client.GrantAccessResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "GrantAccess", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// GrantAccessPreparer prepares the GrantAccess request. +func (client DisksClient) GrantAccessPreparer(resourceGroupName string, diskName string, grantAccessData GrantAccessData, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess", pathParameters), + autorest.WithJSON(grantAccessData), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GrantAccessSender sends the GrantAccess request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) GrantAccessSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GrantAccessResponder handles the response to the GrantAccess request. The method always +// closes the http.Response Body. +func (client DisksClient) GrantAccessResponder(resp *http.Response) (result AccessURI, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all the disks under a subscription. +func (client DisksClient) List() (result DiskList, err error) { + req, err := client.ListPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.DisksClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DisksClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DisksClient) ListResponder(resp *http.Response) (result DiskList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client DisksClient) ListNextResults(lastResults DiskList) (result DiskList, err error) { + req, err := lastResults.DiskListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.DisksClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.DisksClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListComplete gets all elements from the list without paging. +func (client DisksClient) ListComplete(cancel <-chan struct{}) (<-chan Disk, <-chan error) { + resultChan := make(chan Disk) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListByResourceGroup lists all the disks under a resource group. +// +// resourceGroupName is the name of the resource group. +func (client DisksClient) ListByResourceGroup(resourceGroupName string) (result DiskList, err error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.DisksClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client DisksClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client DisksClient) ListByResourceGroupResponder(resp *http.Response) (result DiskList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroupNextResults retrieves the next set of results, if any. +func (client DisksClient) ListByResourceGroupNextResults(lastResults DiskList) (result DiskList, err error) { + req, err := lastResults.DiskListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.DisksClient", "ListByResourceGroup", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.DisksClient", "ListByResourceGroup", resp, "Failure sending next results request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "ListByResourceGroup", resp, "Failure responding to next results request") + } + + return +} + +// ListByResourceGroupComplete gets all elements from the list without paging. +func (client DisksClient) ListByResourceGroupComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan Disk, <-chan error) { + resultChan := make(chan Disk) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListByResourceGroup(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListByResourceGroupNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// RevokeAccess revokes access to a disk. This method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. diskName is the name of the disk within the given subscription +// and resource group. +func (client DisksClient) RevokeAccess(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.RevokeAccessPreparer(resourceGroupName, diskName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "RevokeAccess", nil, "Failure preparing request") + return + } + + resp, err := client.RevokeAccessSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.DisksClient", "RevokeAccess", resp, "Failure sending request") + return + } + + result, err = client.RevokeAccessResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "RevokeAccess", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// RevokeAccessPreparer prepares the RevokeAccess request. +func (client DisksClient) RevokeAccessPreparer(resourceGroupName string, diskName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// RevokeAccessSender sends the RevokeAccess request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) RevokeAccessSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// RevokeAccessResponder handles the response to the RevokeAccess request. The method always +// closes the http.Response Body. +func (client DisksClient) RevokeAccessResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates (patches) a disk. This method may poll for completion. Polling can be canceled by passing the cancel +// channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. diskName is the name of the disk within the given subscription +// and resource group. disk is disk object supplied in the body of the Patch disk operation. +func (client DisksClient) Update(resourceGroupName string, diskName string, disk DiskUpdate, cancel <-chan struct{}) (<-chan Disk, <-chan error) { + resultChan := make(chan Disk, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result Disk + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.UpdatePreparer(resourceGroupName, diskName, disk, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Update", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// UpdatePreparer prepares the Update request. +func (client DisksClient) UpdatePreparer(resourceGroupName string, diskName string, disk DiskUpdate, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithJSON(disk), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client DisksClient) UpdateResponder(resp *http.Response) (result Disk, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/images.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/images.go index 64f14dd082ff..bbde2bc7f95f 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/images.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/images.go @@ -14,9 +14,8 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -25,7 +24,7 @@ import ( "net/http" ) -// ImagesClient is the the Compute Management Client. +// ImagesClient is the compute Client type ImagesClient struct { ManagementClient } @@ -40,14 +39,11 @@ func NewImagesClientWithBaseURI(baseURI string, subscriptionID string) ImagesCli return ImagesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate create or update an image. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// CreateOrUpdate create or update an image. This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. imageName is the name -// of the image. parameters is parameters supplied to the Create Image -// operation. +// resourceGroupName is the name of the resource group. imageName is the name of the image. parameters is parameters +// supplied to the Create Image operation. func (client ImagesClient) CreateOrUpdate(resourceGroupName string, imageName string, parameters Image, cancel <-chan struct{}) (<-chan Image, <-chan error) { resultChan := make(chan Image, 1) errChan := make(chan error, 1) @@ -67,8 +63,10 @@ func (client ImagesClient) CreateOrUpdate(resourceGroupName string, imageName st var err error var result Image defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -101,7 +99,7 @@ func (client ImagesClient) CreateOrUpdatePreparer(resourceGroupName string, imag "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -137,12 +135,10 @@ func (client ImagesClient) CreateOrUpdateResponder(resp *http.Response) (result return } -// Delete deletes an Image. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used to -// cancel polling and any outstanding HTTP requests. +// Delete deletes an Image. This method may poll for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. imageName is the name -// of the image. +// resourceGroupName is the name of the resource group. imageName is the name of the image. func (client ImagesClient) Delete(resourceGroupName string, imageName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -150,8 +146,10 @@ func (client ImagesClient) Delete(resourceGroupName string, imageName string, ca var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -184,7 +182,7 @@ func (client ImagesClient) DeletePreparer(resourceGroupName string, imageName st "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -220,8 +218,8 @@ func (client ImagesClient) DeleteResponder(resp *http.Response) (result Operatio // Get gets an image. // -// resourceGroupName is the name of the resource group. imageName is the name -// of the image. expand is the expand expression to apply on the operation. +// resourceGroupName is the name of the resource group. imageName is the name of the image. expand is the expand +// expression to apply on the operation. func (client ImagesClient) Get(resourceGroupName string, imageName string, expand string) (result Image, err error) { req, err := client.GetPreparer(resourceGroupName, imageName, expand) if err != nil { @@ -252,7 +250,7 @@ func (client ImagesClient) GetPreparer(resourceGroupName string, imageName strin "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -287,9 +285,8 @@ func (client ImagesClient) GetResponder(resp *http.Response) (result Image, err return } -// List gets the list of Images in the subscription. Use nextLink property in -// the response to get the next page of Images. Do this till nextLink is not -// null to fetch all the Images. +// List gets the list of Images in the subscription. Use nextLink property in the response to get the next page of +// Images. Do this till nextLink is not null to fetch all the Images. func (client ImagesClient) List() (result ImageListResult, err error) { req, err := client.ListPreparer() if err != nil { @@ -318,7 +315,7 @@ func (client ImagesClient) ListPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -374,6 +371,51 @@ func (client ImagesClient) ListNextResults(lastResults ImageListResult) (result return } +// ListComplete gets all elements from the list without paging. +func (client ImagesClient) ListComplete(cancel <-chan struct{}) (<-chan Image, <-chan error) { + resultChan := make(chan Image) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + // ListByResourceGroup gets the list of images under a resource group. // // resourceGroupName is the name of the resource group. @@ -406,7 +448,7 @@ func (client ImagesClient) ListByResourceGroupPreparer(resourceGroupName string) "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -461,3 +503,48 @@ func (client ImagesClient) ListByResourceGroupNextResults(lastResults ImageListR return } + +// ListByResourceGroupComplete gets all elements from the list without paging. +func (client ImagesClient) ListByResourceGroupComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan Image, <-chan error) { + resultChan := make(chan Image) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListByResourceGroup(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListByResourceGroupNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go index a9524daeffac..bae95ca2542e 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go @@ -14,9 +14,8 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -25,37 +24,176 @@ import ( "net/http" ) +// AccessLevel enumerates the values for access level. +type AccessLevel string + +const ( + // None specifies the none state for access level. + None AccessLevel = "None" + // Read specifies the read state for access level. + Read AccessLevel = "Read" +) + // CachingTypes enumerates the values for caching types. type CachingTypes string const ( - // None specifies the none state for caching types. - None CachingTypes = "None" - // ReadOnly specifies the read only state for caching types. - ReadOnly CachingTypes = "ReadOnly" - // ReadWrite specifies the read write state for caching types. - ReadWrite CachingTypes = "ReadWrite" + // CachingTypesNone specifies the caching types none state for caching types. + CachingTypesNone CachingTypes = "None" + // CachingTypesReadOnly specifies the caching types read only state for caching types. + CachingTypesReadOnly CachingTypes = "ReadOnly" + // CachingTypesReadWrite specifies the caching types read write state for caching types. + CachingTypesReadWrite CachingTypes = "ReadWrite" ) // ComponentNames enumerates the values for component names. type ComponentNames string const ( - // MicrosoftWindowsShellSetup specifies the microsoft windows shell setup - // state for component names. + // MicrosoftWindowsShellSetup specifies the microsoft windows shell setup state for component names. MicrosoftWindowsShellSetup ComponentNames = "Microsoft-Windows-Shell-Setup" ) +// ContainerServiceOrchestratorTypes enumerates the values for container service orchestrator types. +type ContainerServiceOrchestratorTypes string + +const ( + // Custom specifies the custom state for container service orchestrator types. + Custom ContainerServiceOrchestratorTypes = "Custom" + // DCOS specifies the dcos state for container service orchestrator types. + DCOS ContainerServiceOrchestratorTypes = "DCOS" + // Kubernetes specifies the kubernetes state for container service orchestrator types. + Kubernetes ContainerServiceOrchestratorTypes = "Kubernetes" + // Swarm specifies the swarm state for container service orchestrator types. + Swarm ContainerServiceOrchestratorTypes = "Swarm" +) + +// ContainerServiceVMSizeTypes enumerates the values for container service vm size types. +type ContainerServiceVMSizeTypes string + +const ( + // StandardA0 specifies the standard a0 state for container service vm size types. + StandardA0 ContainerServiceVMSizeTypes = "Standard_A0" + // StandardA1 specifies the standard a1 state for container service vm size types. + StandardA1 ContainerServiceVMSizeTypes = "Standard_A1" + // StandardA10 specifies the standard a10 state for container service vm size types. + StandardA10 ContainerServiceVMSizeTypes = "Standard_A10" + // StandardA11 specifies the standard a11 state for container service vm size types. + StandardA11 ContainerServiceVMSizeTypes = "Standard_A11" + // StandardA2 specifies the standard a2 state for container service vm size types. + StandardA2 ContainerServiceVMSizeTypes = "Standard_A2" + // StandardA3 specifies the standard a3 state for container service vm size types. + StandardA3 ContainerServiceVMSizeTypes = "Standard_A3" + // StandardA4 specifies the standard a4 state for container service vm size types. + StandardA4 ContainerServiceVMSizeTypes = "Standard_A4" + // StandardA5 specifies the standard a5 state for container service vm size types. + StandardA5 ContainerServiceVMSizeTypes = "Standard_A5" + // StandardA6 specifies the standard a6 state for container service vm size types. + StandardA6 ContainerServiceVMSizeTypes = "Standard_A6" + // StandardA7 specifies the standard a7 state for container service vm size types. + StandardA7 ContainerServiceVMSizeTypes = "Standard_A7" + // StandardA8 specifies the standard a8 state for container service vm size types. + StandardA8 ContainerServiceVMSizeTypes = "Standard_A8" + // StandardA9 specifies the standard a9 state for container service vm size types. + StandardA9 ContainerServiceVMSizeTypes = "Standard_A9" + // StandardD1 specifies the standard d1 state for container service vm size types. + StandardD1 ContainerServiceVMSizeTypes = "Standard_D1" + // StandardD11 specifies the standard d11 state for container service vm size types. + StandardD11 ContainerServiceVMSizeTypes = "Standard_D11" + // StandardD11V2 specifies the standard d11v2 state for container service vm size types. + StandardD11V2 ContainerServiceVMSizeTypes = "Standard_D11_v2" + // StandardD12 specifies the standard d12 state for container service vm size types. + StandardD12 ContainerServiceVMSizeTypes = "Standard_D12" + // StandardD12V2 specifies the standard d12v2 state for container service vm size types. + StandardD12V2 ContainerServiceVMSizeTypes = "Standard_D12_v2" + // StandardD13 specifies the standard d13 state for container service vm size types. + StandardD13 ContainerServiceVMSizeTypes = "Standard_D13" + // StandardD13V2 specifies the standard d13v2 state for container service vm size types. + StandardD13V2 ContainerServiceVMSizeTypes = "Standard_D13_v2" + // StandardD14 specifies the standard d14 state for container service vm size types. + StandardD14 ContainerServiceVMSizeTypes = "Standard_D14" + // StandardD14V2 specifies the standard d14v2 state for container service vm size types. + StandardD14V2 ContainerServiceVMSizeTypes = "Standard_D14_v2" + // StandardD1V2 specifies the standard d1v2 state for container service vm size types. + StandardD1V2 ContainerServiceVMSizeTypes = "Standard_D1_v2" + // StandardD2 specifies the standard d2 state for container service vm size types. + StandardD2 ContainerServiceVMSizeTypes = "Standard_D2" + // StandardD2V2 specifies the standard d2v2 state for container service vm size types. + StandardD2V2 ContainerServiceVMSizeTypes = "Standard_D2_v2" + // StandardD3 specifies the standard d3 state for container service vm size types. + StandardD3 ContainerServiceVMSizeTypes = "Standard_D3" + // StandardD3V2 specifies the standard d3v2 state for container service vm size types. + StandardD3V2 ContainerServiceVMSizeTypes = "Standard_D3_v2" + // StandardD4 specifies the standard d4 state for container service vm size types. + StandardD4 ContainerServiceVMSizeTypes = "Standard_D4" + // StandardD4V2 specifies the standard d4v2 state for container service vm size types. + StandardD4V2 ContainerServiceVMSizeTypes = "Standard_D4_v2" + // StandardD5V2 specifies the standard d5v2 state for container service vm size types. + StandardD5V2 ContainerServiceVMSizeTypes = "Standard_D5_v2" + // StandardDS1 specifies the standard ds1 state for container service vm size types. + StandardDS1 ContainerServiceVMSizeTypes = "Standard_DS1" + // StandardDS11 specifies the standard ds11 state for container service vm size types. + StandardDS11 ContainerServiceVMSizeTypes = "Standard_DS11" + // StandardDS12 specifies the standard ds12 state for container service vm size types. + StandardDS12 ContainerServiceVMSizeTypes = "Standard_DS12" + // StandardDS13 specifies the standard ds13 state for container service vm size types. + StandardDS13 ContainerServiceVMSizeTypes = "Standard_DS13" + // StandardDS14 specifies the standard ds14 state for container service vm size types. + StandardDS14 ContainerServiceVMSizeTypes = "Standard_DS14" + // StandardDS2 specifies the standard ds2 state for container service vm size types. + StandardDS2 ContainerServiceVMSizeTypes = "Standard_DS2" + // StandardDS3 specifies the standard ds3 state for container service vm size types. + StandardDS3 ContainerServiceVMSizeTypes = "Standard_DS3" + // StandardDS4 specifies the standard ds4 state for container service vm size types. + StandardDS4 ContainerServiceVMSizeTypes = "Standard_DS4" + // StandardG1 specifies the standard g1 state for container service vm size types. + StandardG1 ContainerServiceVMSizeTypes = "Standard_G1" + // StandardG2 specifies the standard g2 state for container service vm size types. + StandardG2 ContainerServiceVMSizeTypes = "Standard_G2" + // StandardG3 specifies the standard g3 state for container service vm size types. + StandardG3 ContainerServiceVMSizeTypes = "Standard_G3" + // StandardG4 specifies the standard g4 state for container service vm size types. + StandardG4 ContainerServiceVMSizeTypes = "Standard_G4" + // StandardG5 specifies the standard g5 state for container service vm size types. + StandardG5 ContainerServiceVMSizeTypes = "Standard_G5" + // StandardGS1 specifies the standard gs1 state for container service vm size types. + StandardGS1 ContainerServiceVMSizeTypes = "Standard_GS1" + // StandardGS2 specifies the standard gs2 state for container service vm size types. + StandardGS2 ContainerServiceVMSizeTypes = "Standard_GS2" + // StandardGS3 specifies the standard gs3 state for container service vm size types. + StandardGS3 ContainerServiceVMSizeTypes = "Standard_GS3" + // StandardGS4 specifies the standard gs4 state for container service vm size types. + StandardGS4 ContainerServiceVMSizeTypes = "Standard_GS4" + // StandardGS5 specifies the standard gs5 state for container service vm size types. + StandardGS5 ContainerServiceVMSizeTypes = "Standard_GS5" +) + +// DiskCreateOption enumerates the values for disk create option. +type DiskCreateOption string + +const ( + // Attach specifies the attach state for disk create option. + Attach DiskCreateOption = "Attach" + // Copy specifies the copy state for disk create option. + Copy DiskCreateOption = "Copy" + // Empty specifies the empty state for disk create option. + Empty DiskCreateOption = "Empty" + // FromImage specifies the from image state for disk create option. + FromImage DiskCreateOption = "FromImage" + // Import specifies the import state for disk create option. + Import DiskCreateOption = "Import" +) + // DiskCreateOptionTypes enumerates the values for disk create option types. type DiskCreateOptionTypes string const ( - // Attach specifies the attach state for disk create option types. - Attach DiskCreateOptionTypes = "attach" - // Empty specifies the empty state for disk create option types. - Empty DiskCreateOptionTypes = "empty" - // FromImage specifies the from image state for disk create option types. - FromImage DiskCreateOptionTypes = "fromImage" + // DiskCreateOptionTypesAttach specifies the disk create option types attach state for disk create option types. + DiskCreateOptionTypesAttach DiskCreateOptionTypes = "Attach" + // DiskCreateOptionTypesEmpty specifies the disk create option types empty state for disk create option types. + DiskCreateOptionTypesEmpty DiskCreateOptionTypes = "Empty" + // DiskCreateOptionTypesFromImage specifies the disk create option types from image state for disk create option types. + DiskCreateOptionTypesFromImage DiskCreateOptionTypes = "FromImage" ) // InstanceViewTypes enumerates the values for instance view types. @@ -66,16 +204,41 @@ const ( InstanceView InstanceViewTypes = "instanceView" ) -// OperatingSystemStateTypes enumerates the values for operating system state -// types. +// IPVersion enumerates the values for ip version. +type IPVersion string + +const ( + // IPv4 specifies the i pv 4 state for ip version. + IPv4 IPVersion = "IPv4" + // IPv6 specifies the i pv 6 state for ip version. + IPv6 IPVersion = "IPv6" +) + +// MaintenanceOperationResultCodeTypes enumerates the values for maintenance operation result code types. +type MaintenanceOperationResultCodeTypes string + +const ( + // MaintenanceOperationResultCodeTypesMaintenanceAborted specifies the maintenance operation result code types + // maintenance aborted state for maintenance operation result code types. + MaintenanceOperationResultCodeTypesMaintenanceAborted MaintenanceOperationResultCodeTypes = "MaintenanceAborted" + // MaintenanceOperationResultCodeTypesMaintenanceCompleted specifies the maintenance operation result code types + // maintenance completed state for maintenance operation result code types. + MaintenanceOperationResultCodeTypesMaintenanceCompleted MaintenanceOperationResultCodeTypes = "MaintenanceCompleted" + // MaintenanceOperationResultCodeTypesNone specifies the maintenance operation result code types none state for + // maintenance operation result code types. + MaintenanceOperationResultCodeTypesNone MaintenanceOperationResultCodeTypes = "None" + // MaintenanceOperationResultCodeTypesRetryLater specifies the maintenance operation result code types retry later + // state for maintenance operation result code types. + MaintenanceOperationResultCodeTypesRetryLater MaintenanceOperationResultCodeTypes = "RetryLater" +) + +// OperatingSystemStateTypes enumerates the values for operating system state types. type OperatingSystemStateTypes string const ( - // Generalized specifies the generalized state for operating system state - // types. + // Generalized specifies the generalized state for operating system state types. Generalized OperatingSystemStateTypes = "Generalized" - // Specialized specifies the specialized state for operating system state - // types. + // Specialized specifies the specialized state for operating system state types. Specialized OperatingSystemStateTypes = "Specialized" ) @@ -94,7 +257,7 @@ type PassNames string const ( // OobeSystem specifies the oobe system state for pass names. - OobeSystem PassNames = "oobeSystem" + OobeSystem PassNames = "OobeSystem" ) // ProtocolTypes enumerates the values for protocol types. @@ -111,19 +274,75 @@ const ( type ResourceIdentityType string const ( - // SystemAssigned specifies the system assigned state for resource identity - // type. + // SystemAssigned specifies the system assigned state for resource identity type. SystemAssigned ResourceIdentityType = "SystemAssigned" ) +// ResourceSkuCapacityScaleType enumerates the values for resource sku capacity scale type. +type ResourceSkuCapacityScaleType string + +const ( + // ResourceSkuCapacityScaleTypeAutomatic specifies the resource sku capacity scale type automatic state for resource + // sku capacity scale type. + ResourceSkuCapacityScaleTypeAutomatic ResourceSkuCapacityScaleType = "Automatic" + // ResourceSkuCapacityScaleTypeManual specifies the resource sku capacity scale type manual state for resource sku + // capacity scale type. + ResourceSkuCapacityScaleTypeManual ResourceSkuCapacityScaleType = "Manual" + // ResourceSkuCapacityScaleTypeNone specifies the resource sku capacity scale type none state for resource sku capacity + // scale type. + ResourceSkuCapacityScaleTypeNone ResourceSkuCapacityScaleType = "None" +) + +// ResourceSkuRestrictionsReasonCode enumerates the values for resource sku restrictions reason code. +type ResourceSkuRestrictionsReasonCode string + +const ( + // NotAvailableForSubscription specifies the not available for subscription state for resource sku restrictions reason + // code. + NotAvailableForSubscription ResourceSkuRestrictionsReasonCode = "NotAvailableForSubscription" + // QuotaID specifies the quota id state for resource sku restrictions reason code. + QuotaID ResourceSkuRestrictionsReasonCode = "QuotaId" +) + +// ResourceSkuRestrictionsType enumerates the values for resource sku restrictions type. +type ResourceSkuRestrictionsType string + +const ( + // Location specifies the location state for resource sku restrictions type. + Location ResourceSkuRestrictionsType = "Location" +) + +// RollingUpgradeActionType enumerates the values for rolling upgrade action type. +type RollingUpgradeActionType string + +const ( + // Cancel specifies the cancel state for rolling upgrade action type. + Cancel RollingUpgradeActionType = "Cancel" + // Start specifies the start state for rolling upgrade action type. + Start RollingUpgradeActionType = "Start" +) + +// RollingUpgradeStatusCode enumerates the values for rolling upgrade status code. +type RollingUpgradeStatusCode string + +const ( + // Cancelled specifies the cancelled state for rolling upgrade status code. + Cancelled RollingUpgradeStatusCode = "Cancelled" + // Completed specifies the completed state for rolling upgrade status code. + Completed RollingUpgradeStatusCode = "Completed" + // Faulted specifies the faulted state for rolling upgrade status code. + Faulted RollingUpgradeStatusCode = "Faulted" + // RollingForward specifies the rolling forward state for rolling upgrade status code. + RollingForward RollingUpgradeStatusCode = "RollingForward" +) + // SettingNames enumerates the values for setting names. type SettingNames string const ( // AutoLogon specifies the auto logon state for setting names. AutoLogon SettingNames = "AutoLogon" - // FirstLogonCommands specifies the first logon commands state for setting - // names. + // FirstLogonCommands specifies the first logon commands state for setting names. FirstLogonCommands SettingNames = "FirstLogonCommands" ) @@ -157,218 +376,338 @@ const ( Automatic UpgradeMode = "Automatic" // Manual specifies the manual state for upgrade mode. Manual UpgradeMode = "Manual" + // Rolling specifies the rolling state for upgrade mode. + Rolling UpgradeMode = "Rolling" ) -// VirtualMachineScaleSetSkuScaleType enumerates the values for virtual machine -// scale set sku scale type. +// VirtualMachineScaleSetSkuScaleType enumerates the values for virtual machine scale set sku scale type. type VirtualMachineScaleSetSkuScaleType string const ( - // VirtualMachineScaleSetSkuScaleTypeAutomatic specifies the virtual - // machine scale set sku scale type automatic state for virtual machine - // scale set sku scale type. + // VirtualMachineScaleSetSkuScaleTypeAutomatic specifies the virtual machine scale set sku scale type automatic state + // for virtual machine scale set sku scale type. VirtualMachineScaleSetSkuScaleTypeAutomatic VirtualMachineScaleSetSkuScaleType = "Automatic" - // VirtualMachineScaleSetSkuScaleTypeNone specifies the virtual machine - // scale set sku scale type none state for virtual machine scale set sku - // scale type. + // VirtualMachineScaleSetSkuScaleTypeNone specifies the virtual machine scale set sku scale type none state for virtual + // machine scale set sku scale type. VirtualMachineScaleSetSkuScaleTypeNone VirtualMachineScaleSetSkuScaleType = "None" ) -// VirtualMachineSizeTypes enumerates the values for virtual machine size -// types. +// VirtualMachineSizeTypes enumerates the values for virtual machine size types. type VirtualMachineSizeTypes string const ( - // BasicA0 specifies the basic a0 state for virtual machine size types. - BasicA0 VirtualMachineSizeTypes = "Basic_A0" - // BasicA1 specifies the basic a1 state for virtual machine size types. - BasicA1 VirtualMachineSizeTypes = "Basic_A1" - // BasicA2 specifies the basic a2 state for virtual machine size types. - BasicA2 VirtualMachineSizeTypes = "Basic_A2" - // BasicA3 specifies the basic a3 state for virtual machine size types. - BasicA3 VirtualMachineSizeTypes = "Basic_A3" - // BasicA4 specifies the basic a4 state for virtual machine size types. - BasicA4 VirtualMachineSizeTypes = "Basic_A4" - // StandardA0 specifies the standard a0 state for virtual machine size - // types. - StandardA0 VirtualMachineSizeTypes = "Standard_A0" - // StandardA1 specifies the standard a1 state for virtual machine size - // types. - StandardA1 VirtualMachineSizeTypes = "Standard_A1" - // StandardA10 specifies the standard a10 state for virtual machine size + // VirtualMachineSizeTypesBasicA0 specifies the virtual machine size types basic a0 state for virtual machine size // types. - StandardA10 VirtualMachineSizeTypes = "Standard_A10" - // StandardA11 specifies the standard a11 state for virtual machine size + VirtualMachineSizeTypesBasicA0 VirtualMachineSizeTypes = "Basic_A0" + // VirtualMachineSizeTypesBasicA1 specifies the virtual machine size types basic a1 state for virtual machine size // types. - StandardA11 VirtualMachineSizeTypes = "Standard_A11" - // StandardA2 specifies the standard a2 state for virtual machine size + VirtualMachineSizeTypesBasicA1 VirtualMachineSizeTypes = "Basic_A1" + // VirtualMachineSizeTypesBasicA2 specifies the virtual machine size types basic a2 state for virtual machine size // types. - StandardA2 VirtualMachineSizeTypes = "Standard_A2" - // StandardA3 specifies the standard a3 state for virtual machine size + VirtualMachineSizeTypesBasicA2 VirtualMachineSizeTypes = "Basic_A2" + // VirtualMachineSizeTypesBasicA3 specifies the virtual machine size types basic a3 state for virtual machine size // types. - StandardA3 VirtualMachineSizeTypes = "Standard_A3" - // StandardA4 specifies the standard a4 state for virtual machine size + VirtualMachineSizeTypesBasicA3 VirtualMachineSizeTypes = "Basic_A3" + // VirtualMachineSizeTypesBasicA4 specifies the virtual machine size types basic a4 state for virtual machine size // types. - StandardA4 VirtualMachineSizeTypes = "Standard_A4" - // StandardA5 specifies the standard a5 state for virtual machine size - // types. - StandardA5 VirtualMachineSizeTypes = "Standard_A5" - // StandardA6 specifies the standard a6 state for virtual machine size - // types. - StandardA6 VirtualMachineSizeTypes = "Standard_A6" - // StandardA7 specifies the standard a7 state for virtual machine size - // types. - StandardA7 VirtualMachineSizeTypes = "Standard_A7" - // StandardA8 specifies the standard a8 state for virtual machine size - // types. - StandardA8 VirtualMachineSizeTypes = "Standard_A8" - // StandardA9 specifies the standard a9 state for virtual machine size - // types. - StandardA9 VirtualMachineSizeTypes = "Standard_A9" - // StandardD1 specifies the standard d1 state for virtual machine size - // types. - StandardD1 VirtualMachineSizeTypes = "Standard_D1" - // StandardD11 specifies the standard d11 state for virtual machine size - // types. - StandardD11 VirtualMachineSizeTypes = "Standard_D11" - // StandardD11V2 specifies the standard d11v2 state for virtual machine + VirtualMachineSizeTypesBasicA4 VirtualMachineSizeTypes = "Basic_A4" + // VirtualMachineSizeTypesStandardA0 specifies the virtual machine size types standard a0 state for virtual machine // size types. - StandardD11V2 VirtualMachineSizeTypes = "Standard_D11_v2" - // StandardD12 specifies the standard d12 state for virtual machine size - // types. - StandardD12 VirtualMachineSizeTypes = "Standard_D12" - // StandardD12V2 specifies the standard d12v2 state for virtual machine + VirtualMachineSizeTypesStandardA0 VirtualMachineSizeTypes = "Standard_A0" + // VirtualMachineSizeTypesStandardA1 specifies the virtual machine size types standard a1 state for virtual machine // size types. - StandardD12V2 VirtualMachineSizeTypes = "Standard_D12_v2" - // StandardD13 specifies the standard d13 state for virtual machine size - // types. - StandardD13 VirtualMachineSizeTypes = "Standard_D13" - // StandardD13V2 specifies the standard d13v2 state for virtual machine + VirtualMachineSizeTypesStandardA1 VirtualMachineSizeTypes = "Standard_A1" + // VirtualMachineSizeTypesStandardA10 specifies the virtual machine size types standard a10 state for virtual machine // size types. - StandardD13V2 VirtualMachineSizeTypes = "Standard_D13_v2" - // StandardD14 specifies the standard d14 state for virtual machine size - // types. - StandardD14 VirtualMachineSizeTypes = "Standard_D14" - // StandardD14V2 specifies the standard d14v2 state for virtual machine + VirtualMachineSizeTypesStandardA10 VirtualMachineSizeTypes = "Standard_A10" + // VirtualMachineSizeTypesStandardA11 specifies the virtual machine size types standard a11 state for virtual machine // size types. - StandardD14V2 VirtualMachineSizeTypes = "Standard_D14_v2" - // StandardD15V2 specifies the standard d15v2 state for virtual machine + VirtualMachineSizeTypesStandardA11 VirtualMachineSizeTypes = "Standard_A11" + // VirtualMachineSizeTypesStandardA1V2 specifies the virtual machine size types standard a1v2 state for virtual machine // size types. - StandardD15V2 VirtualMachineSizeTypes = "Standard_D15_v2" - // StandardD1V2 specifies the standard d1v2 state for virtual machine size - // types. - StandardD1V2 VirtualMachineSizeTypes = "Standard_D1_v2" - // StandardD2 specifies the standard d2 state for virtual machine size - // types. - StandardD2 VirtualMachineSizeTypes = "Standard_D2" - // StandardD2V2 specifies the standard d2v2 state for virtual machine size - // types. - StandardD2V2 VirtualMachineSizeTypes = "Standard_D2_v2" - // StandardD3 specifies the standard d3 state for virtual machine size - // types. - StandardD3 VirtualMachineSizeTypes = "Standard_D3" - // StandardD3V2 specifies the standard d3v2 state for virtual machine size - // types. - StandardD3V2 VirtualMachineSizeTypes = "Standard_D3_v2" - // StandardD4 specifies the standard d4 state for virtual machine size - // types. - StandardD4 VirtualMachineSizeTypes = "Standard_D4" - // StandardD4V2 specifies the standard d4v2 state for virtual machine size - // types. - StandardD4V2 VirtualMachineSizeTypes = "Standard_D4_v2" - // StandardD5V2 specifies the standard d5v2 state for virtual machine size - // types. - StandardD5V2 VirtualMachineSizeTypes = "Standard_D5_v2" - // StandardDS1 specifies the standard ds1 state for virtual machine size - // types. - StandardDS1 VirtualMachineSizeTypes = "Standard_DS1" - // StandardDS11 specifies the standard ds11 state for virtual machine size - // types. - StandardDS11 VirtualMachineSizeTypes = "Standard_DS11" - // StandardDS11V2 specifies the standard ds11v2 state for virtual machine + VirtualMachineSizeTypesStandardA1V2 VirtualMachineSizeTypes = "Standard_A1_v2" + // VirtualMachineSizeTypesStandardA2 specifies the virtual machine size types standard a2 state for virtual machine // size types. - StandardDS11V2 VirtualMachineSizeTypes = "Standard_DS11_v2" - // StandardDS12 specifies the standard ds12 state for virtual machine size - // types. - StandardDS12 VirtualMachineSizeTypes = "Standard_DS12" - // StandardDS12V2 specifies the standard ds12v2 state for virtual machine + VirtualMachineSizeTypesStandardA2 VirtualMachineSizeTypes = "Standard_A2" + // VirtualMachineSizeTypesStandardA2mV2 specifies the virtual machine size types standard a2mv2 state for virtual + // machine size types. + VirtualMachineSizeTypesStandardA2mV2 VirtualMachineSizeTypes = "Standard_A2m_v2" + // VirtualMachineSizeTypesStandardA2V2 specifies the virtual machine size types standard a2v2 state for virtual machine // size types. - StandardDS12V2 VirtualMachineSizeTypes = "Standard_DS12_v2" - // StandardDS13 specifies the standard ds13 state for virtual machine size - // types. - StandardDS13 VirtualMachineSizeTypes = "Standard_DS13" - // StandardDS13V2 specifies the standard ds13v2 state for virtual machine + VirtualMachineSizeTypesStandardA2V2 VirtualMachineSizeTypes = "Standard_A2_v2" + // VirtualMachineSizeTypesStandardA3 specifies the virtual machine size types standard a3 state for virtual machine // size types. - StandardDS13V2 VirtualMachineSizeTypes = "Standard_DS13_v2" - // StandardDS14 specifies the standard ds14 state for virtual machine size - // types. - StandardDS14 VirtualMachineSizeTypes = "Standard_DS14" - // StandardDS14V2 specifies the standard ds14v2 state for virtual machine + VirtualMachineSizeTypesStandardA3 VirtualMachineSizeTypes = "Standard_A3" + // VirtualMachineSizeTypesStandardA4 specifies the virtual machine size types standard a4 state for virtual machine // size types. - StandardDS14V2 VirtualMachineSizeTypes = "Standard_DS14_v2" - // StandardDS15V2 specifies the standard ds15v2 state for virtual machine + VirtualMachineSizeTypesStandardA4 VirtualMachineSizeTypes = "Standard_A4" + // VirtualMachineSizeTypesStandardA4mV2 specifies the virtual machine size types standard a4mv2 state for virtual + // machine size types. + VirtualMachineSizeTypesStandardA4mV2 VirtualMachineSizeTypes = "Standard_A4m_v2" + // VirtualMachineSizeTypesStandardA4V2 specifies the virtual machine size types standard a4v2 state for virtual machine // size types. - StandardDS15V2 VirtualMachineSizeTypes = "Standard_DS15_v2" - // StandardDS1V2 specifies the standard ds1v2 state for virtual machine + VirtualMachineSizeTypesStandardA4V2 VirtualMachineSizeTypes = "Standard_A4_v2" + // VirtualMachineSizeTypesStandardA5 specifies the virtual machine size types standard a5 state for virtual machine // size types. - StandardDS1V2 VirtualMachineSizeTypes = "Standard_DS1_v2" - // StandardDS2 specifies the standard ds2 state for virtual machine size - // types. - StandardDS2 VirtualMachineSizeTypes = "Standard_DS2" - // StandardDS2V2 specifies the standard ds2v2 state for virtual machine + VirtualMachineSizeTypesStandardA5 VirtualMachineSizeTypes = "Standard_A5" + // VirtualMachineSizeTypesStandardA6 specifies the virtual machine size types standard a6 state for virtual machine // size types. - StandardDS2V2 VirtualMachineSizeTypes = "Standard_DS2_v2" - // StandardDS3 specifies the standard ds3 state for virtual machine size - // types. - StandardDS3 VirtualMachineSizeTypes = "Standard_DS3" - // StandardDS3V2 specifies the standard ds3v2 state for virtual machine + VirtualMachineSizeTypesStandardA6 VirtualMachineSizeTypes = "Standard_A6" + // VirtualMachineSizeTypesStandardA7 specifies the virtual machine size types standard a7 state for virtual machine // size types. - StandardDS3V2 VirtualMachineSizeTypes = "Standard_DS3_v2" - // StandardDS4 specifies the standard ds4 state for virtual machine size - // types. - StandardDS4 VirtualMachineSizeTypes = "Standard_DS4" - // StandardDS4V2 specifies the standard ds4v2 state for virtual machine + VirtualMachineSizeTypesStandardA7 VirtualMachineSizeTypes = "Standard_A7" + // VirtualMachineSizeTypesStandardA8 specifies the virtual machine size types standard a8 state for virtual machine // size types. - StandardDS4V2 VirtualMachineSizeTypes = "Standard_DS4_v2" - // StandardDS5V2 specifies the standard ds5v2 state for virtual machine + VirtualMachineSizeTypesStandardA8 VirtualMachineSizeTypes = "Standard_A8" + // VirtualMachineSizeTypesStandardA8mV2 specifies the virtual machine size types standard a8mv2 state for virtual + // machine size types. + VirtualMachineSizeTypesStandardA8mV2 VirtualMachineSizeTypes = "Standard_A8m_v2" + // VirtualMachineSizeTypesStandardA8V2 specifies the virtual machine size types standard a8v2 state for virtual machine // size types. - StandardDS5V2 VirtualMachineSizeTypes = "Standard_DS5_v2" - // StandardG1 specifies the standard g1 state for virtual machine size - // types. - StandardG1 VirtualMachineSizeTypes = "Standard_G1" - // StandardG2 specifies the standard g2 state for virtual machine size - // types. - StandardG2 VirtualMachineSizeTypes = "Standard_G2" - // StandardG3 specifies the standard g3 state for virtual machine size - // types. - StandardG3 VirtualMachineSizeTypes = "Standard_G3" - // StandardG4 specifies the standard g4 state for virtual machine size - // types. - StandardG4 VirtualMachineSizeTypes = "Standard_G4" - // StandardG5 specifies the standard g5 state for virtual machine size - // types. - StandardG5 VirtualMachineSizeTypes = "Standard_G5" - // StandardGS1 specifies the standard gs1 state for virtual machine size - // types. - StandardGS1 VirtualMachineSizeTypes = "Standard_GS1" - // StandardGS2 specifies the standard gs2 state for virtual machine size - // types. - StandardGS2 VirtualMachineSizeTypes = "Standard_GS2" - // StandardGS3 specifies the standard gs3 state for virtual machine size - // types. - StandardGS3 VirtualMachineSizeTypes = "Standard_GS3" - // StandardGS4 specifies the standard gs4 state for virtual machine size - // types. - StandardGS4 VirtualMachineSizeTypes = "Standard_GS4" - // StandardGS5 specifies the standard gs5 state for virtual machine size - // types. - StandardGS5 VirtualMachineSizeTypes = "Standard_GS5" + VirtualMachineSizeTypesStandardA8V2 VirtualMachineSizeTypes = "Standard_A8_v2" + // VirtualMachineSizeTypesStandardA9 specifies the virtual machine size types standard a9 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardA9 VirtualMachineSizeTypes = "Standard_A9" + // VirtualMachineSizeTypesStandardD1 specifies the virtual machine size types standard d1 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardD1 VirtualMachineSizeTypes = "Standard_D1" + // VirtualMachineSizeTypesStandardD11 specifies the virtual machine size types standard d11 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardD11 VirtualMachineSizeTypes = "Standard_D11" + // VirtualMachineSizeTypesStandardD11V2 specifies the virtual machine size types standard d11v2 state for virtual + // machine size types. + VirtualMachineSizeTypesStandardD11V2 VirtualMachineSizeTypes = "Standard_D11_v2" + // VirtualMachineSizeTypesStandardD12 specifies the virtual machine size types standard d12 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardD12 VirtualMachineSizeTypes = "Standard_D12" + // VirtualMachineSizeTypesStandardD12V2 specifies the virtual machine size types standard d12v2 state for virtual + // machine size types. + VirtualMachineSizeTypesStandardD12V2 VirtualMachineSizeTypes = "Standard_D12_v2" + // VirtualMachineSizeTypesStandardD13 specifies the virtual machine size types standard d13 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardD13 VirtualMachineSizeTypes = "Standard_D13" + // VirtualMachineSizeTypesStandardD13V2 specifies the virtual machine size types standard d13v2 state for virtual + // machine size types. + VirtualMachineSizeTypesStandardD13V2 VirtualMachineSizeTypes = "Standard_D13_v2" + // VirtualMachineSizeTypesStandardD14 specifies the virtual machine size types standard d14 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardD14 VirtualMachineSizeTypes = "Standard_D14" + // VirtualMachineSizeTypesStandardD14V2 specifies the virtual machine size types standard d14v2 state for virtual + // machine size types. + VirtualMachineSizeTypesStandardD14V2 VirtualMachineSizeTypes = "Standard_D14_v2" + // VirtualMachineSizeTypesStandardD15V2 specifies the virtual machine size types standard d15v2 state for virtual + // machine size types. + VirtualMachineSizeTypesStandardD15V2 VirtualMachineSizeTypes = "Standard_D15_v2" + // VirtualMachineSizeTypesStandardD1V2 specifies the virtual machine size types standard d1v2 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardD1V2 VirtualMachineSizeTypes = "Standard_D1_v2" + // VirtualMachineSizeTypesStandardD2 specifies the virtual machine size types standard d2 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardD2 VirtualMachineSizeTypes = "Standard_D2" + // VirtualMachineSizeTypesStandardD2V2 specifies the virtual machine size types standard d2v2 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardD2V2 VirtualMachineSizeTypes = "Standard_D2_v2" + // VirtualMachineSizeTypesStandardD3 specifies the virtual machine size types standard d3 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardD3 VirtualMachineSizeTypes = "Standard_D3" + // VirtualMachineSizeTypesStandardD3V2 specifies the virtual machine size types standard d3v2 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardD3V2 VirtualMachineSizeTypes = "Standard_D3_v2" + // VirtualMachineSizeTypesStandardD4 specifies the virtual machine size types standard d4 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardD4 VirtualMachineSizeTypes = "Standard_D4" + // VirtualMachineSizeTypesStandardD4V2 specifies the virtual machine size types standard d4v2 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardD4V2 VirtualMachineSizeTypes = "Standard_D4_v2" + // VirtualMachineSizeTypesStandardD5V2 specifies the virtual machine size types standard d5v2 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardD5V2 VirtualMachineSizeTypes = "Standard_D5_v2" + // VirtualMachineSizeTypesStandardDS1 specifies the virtual machine size types standard ds1 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardDS1 VirtualMachineSizeTypes = "Standard_DS1" + // VirtualMachineSizeTypesStandardDS11 specifies the virtual machine size types standard ds11 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardDS11 VirtualMachineSizeTypes = "Standard_DS11" + // VirtualMachineSizeTypesStandardDS11V2 specifies the virtual machine size types standard ds11v2 state for virtual + // machine size types. + VirtualMachineSizeTypesStandardDS11V2 VirtualMachineSizeTypes = "Standard_DS11_v2" + // VirtualMachineSizeTypesStandardDS12 specifies the virtual machine size types standard ds12 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardDS12 VirtualMachineSizeTypes = "Standard_DS12" + // VirtualMachineSizeTypesStandardDS12V2 specifies the virtual machine size types standard ds12v2 state for virtual + // machine size types. + VirtualMachineSizeTypesStandardDS12V2 VirtualMachineSizeTypes = "Standard_DS12_v2" + // VirtualMachineSizeTypesStandardDS13 specifies the virtual machine size types standard ds13 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardDS13 VirtualMachineSizeTypes = "Standard_DS13" + // VirtualMachineSizeTypesStandardDS13V2 specifies the virtual machine size types standard ds13v2 state for virtual + // machine size types. + VirtualMachineSizeTypesStandardDS13V2 VirtualMachineSizeTypes = "Standard_DS13_v2" + // VirtualMachineSizeTypesStandardDS14 specifies the virtual machine size types standard ds14 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardDS14 VirtualMachineSizeTypes = "Standard_DS14" + // VirtualMachineSizeTypesStandardDS14V2 specifies the virtual machine size types standard ds14v2 state for virtual + // machine size types. + VirtualMachineSizeTypesStandardDS14V2 VirtualMachineSizeTypes = "Standard_DS14_v2" + // VirtualMachineSizeTypesStandardDS15V2 specifies the virtual machine size types standard ds15v2 state for virtual + // machine size types. + VirtualMachineSizeTypesStandardDS15V2 VirtualMachineSizeTypes = "Standard_DS15_v2" + // VirtualMachineSizeTypesStandardDS1V2 specifies the virtual machine size types standard ds1v2 state for virtual + // machine size types. + VirtualMachineSizeTypesStandardDS1V2 VirtualMachineSizeTypes = "Standard_DS1_v2" + // VirtualMachineSizeTypesStandardDS2 specifies the virtual machine size types standard ds2 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardDS2 VirtualMachineSizeTypes = "Standard_DS2" + // VirtualMachineSizeTypesStandardDS2V2 specifies the virtual machine size types standard ds2v2 state for virtual + // machine size types. + VirtualMachineSizeTypesStandardDS2V2 VirtualMachineSizeTypes = "Standard_DS2_v2" + // VirtualMachineSizeTypesStandardDS3 specifies the virtual machine size types standard ds3 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardDS3 VirtualMachineSizeTypes = "Standard_DS3" + // VirtualMachineSizeTypesStandardDS3V2 specifies the virtual machine size types standard ds3v2 state for virtual + // machine size types. + VirtualMachineSizeTypesStandardDS3V2 VirtualMachineSizeTypes = "Standard_DS3_v2" + // VirtualMachineSizeTypesStandardDS4 specifies the virtual machine size types standard ds4 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardDS4 VirtualMachineSizeTypes = "Standard_DS4" + // VirtualMachineSizeTypesStandardDS4V2 specifies the virtual machine size types standard ds4v2 state for virtual + // machine size types. + VirtualMachineSizeTypesStandardDS4V2 VirtualMachineSizeTypes = "Standard_DS4_v2" + // VirtualMachineSizeTypesStandardDS5V2 specifies the virtual machine size types standard ds5v2 state for virtual + // machine size types. + VirtualMachineSizeTypesStandardDS5V2 VirtualMachineSizeTypes = "Standard_DS5_v2" + // VirtualMachineSizeTypesStandardF1 specifies the virtual machine size types standard f1 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardF1 VirtualMachineSizeTypes = "Standard_F1" + // VirtualMachineSizeTypesStandardF16 specifies the virtual machine size types standard f16 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardF16 VirtualMachineSizeTypes = "Standard_F16" + // VirtualMachineSizeTypesStandardF16s specifies the virtual machine size types standard f16s state for virtual machine + // size types. + VirtualMachineSizeTypesStandardF16s VirtualMachineSizeTypes = "Standard_F16s" + // VirtualMachineSizeTypesStandardF1s specifies the virtual machine size types standard f1s state for virtual machine + // size types. + VirtualMachineSizeTypesStandardF1s VirtualMachineSizeTypes = "Standard_F1s" + // VirtualMachineSizeTypesStandardF2 specifies the virtual machine size types standard f2 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardF2 VirtualMachineSizeTypes = "Standard_F2" + // VirtualMachineSizeTypesStandardF2s specifies the virtual machine size types standard f2s state for virtual machine + // size types. + VirtualMachineSizeTypesStandardF2s VirtualMachineSizeTypes = "Standard_F2s" + // VirtualMachineSizeTypesStandardF4 specifies the virtual machine size types standard f4 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardF4 VirtualMachineSizeTypes = "Standard_F4" + // VirtualMachineSizeTypesStandardF4s specifies the virtual machine size types standard f4s state for virtual machine + // size types. + VirtualMachineSizeTypesStandardF4s VirtualMachineSizeTypes = "Standard_F4s" + // VirtualMachineSizeTypesStandardF8 specifies the virtual machine size types standard f8 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardF8 VirtualMachineSizeTypes = "Standard_F8" + // VirtualMachineSizeTypesStandardF8s specifies the virtual machine size types standard f8s state for virtual machine + // size types. + VirtualMachineSizeTypesStandardF8s VirtualMachineSizeTypes = "Standard_F8s" + // VirtualMachineSizeTypesStandardG1 specifies the virtual machine size types standard g1 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardG1 VirtualMachineSizeTypes = "Standard_G1" + // VirtualMachineSizeTypesStandardG2 specifies the virtual machine size types standard g2 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardG2 VirtualMachineSizeTypes = "Standard_G2" + // VirtualMachineSizeTypesStandardG3 specifies the virtual machine size types standard g3 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardG3 VirtualMachineSizeTypes = "Standard_G3" + // VirtualMachineSizeTypesStandardG4 specifies the virtual machine size types standard g4 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardG4 VirtualMachineSizeTypes = "Standard_G4" + // VirtualMachineSizeTypesStandardG5 specifies the virtual machine size types standard g5 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardG5 VirtualMachineSizeTypes = "Standard_G5" + // VirtualMachineSizeTypesStandardGS1 specifies the virtual machine size types standard gs1 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardGS1 VirtualMachineSizeTypes = "Standard_GS1" + // VirtualMachineSizeTypesStandardGS2 specifies the virtual machine size types standard gs2 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardGS2 VirtualMachineSizeTypes = "Standard_GS2" + // VirtualMachineSizeTypesStandardGS3 specifies the virtual machine size types standard gs3 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardGS3 VirtualMachineSizeTypes = "Standard_GS3" + // VirtualMachineSizeTypesStandardGS4 specifies the virtual machine size types standard gs4 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardGS4 VirtualMachineSizeTypes = "Standard_GS4" + // VirtualMachineSizeTypesStandardGS5 specifies the virtual machine size types standard gs5 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardGS5 VirtualMachineSizeTypes = "Standard_GS5" + // VirtualMachineSizeTypesStandardH16 specifies the virtual machine size types standard h16 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardH16 VirtualMachineSizeTypes = "Standard_H16" + // VirtualMachineSizeTypesStandardH16m specifies the virtual machine size types standard h16m state for virtual machine + // size types. + VirtualMachineSizeTypesStandardH16m VirtualMachineSizeTypes = "Standard_H16m" + // VirtualMachineSizeTypesStandardH16mr specifies the virtual machine size types standard h16mr state for virtual + // machine size types. + VirtualMachineSizeTypesStandardH16mr VirtualMachineSizeTypes = "Standard_H16mr" + // VirtualMachineSizeTypesStandardH16r specifies the virtual machine size types standard h16r state for virtual machine + // size types. + VirtualMachineSizeTypesStandardH16r VirtualMachineSizeTypes = "Standard_H16r" + // VirtualMachineSizeTypesStandardH8 specifies the virtual machine size types standard h8 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardH8 VirtualMachineSizeTypes = "Standard_H8" + // VirtualMachineSizeTypesStandardH8m specifies the virtual machine size types standard h8m state for virtual machine + // size types. + VirtualMachineSizeTypesStandardH8m VirtualMachineSizeTypes = "Standard_H8m" + // VirtualMachineSizeTypesStandardL16s specifies the virtual machine size types standard l16s state for virtual machine + // size types. + VirtualMachineSizeTypesStandardL16s VirtualMachineSizeTypes = "Standard_L16s" + // VirtualMachineSizeTypesStandardL32s specifies the virtual machine size types standard l32s state for virtual machine + // size types. + VirtualMachineSizeTypesStandardL32s VirtualMachineSizeTypes = "Standard_L32s" + // VirtualMachineSizeTypesStandardL4s specifies the virtual machine size types standard l4s state for virtual machine + // size types. + VirtualMachineSizeTypesStandardL4s VirtualMachineSizeTypes = "Standard_L4s" + // VirtualMachineSizeTypesStandardL8s specifies the virtual machine size types standard l8s state for virtual machine + // size types. + VirtualMachineSizeTypesStandardL8s VirtualMachineSizeTypes = "Standard_L8s" + // VirtualMachineSizeTypesStandardNC12 specifies the virtual machine size types standard nc12 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardNC12 VirtualMachineSizeTypes = "Standard_NC12" + // VirtualMachineSizeTypesStandardNC24 specifies the virtual machine size types standard nc24 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardNC24 VirtualMachineSizeTypes = "Standard_NC24" + // VirtualMachineSizeTypesStandardNC24r specifies the virtual machine size types standard nc24r state for virtual + // machine size types. + VirtualMachineSizeTypesStandardNC24r VirtualMachineSizeTypes = "Standard_NC24r" + // VirtualMachineSizeTypesStandardNC6 specifies the virtual machine size types standard nc6 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardNC6 VirtualMachineSizeTypes = "Standard_NC6" + // VirtualMachineSizeTypesStandardNV12 specifies the virtual machine size types standard nv12 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardNV12 VirtualMachineSizeTypes = "Standard_NV12" + // VirtualMachineSizeTypesStandardNV24 specifies the virtual machine size types standard nv24 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardNV24 VirtualMachineSizeTypes = "Standard_NV24" + // VirtualMachineSizeTypesStandardNV6 specifies the virtual machine size types standard nv6 state for virtual machine + // size types. + VirtualMachineSizeTypesStandardNV6 VirtualMachineSizeTypes = "Standard_NV6" ) -// AdditionalUnattendContent is additional XML formatted information that can -// be included in the Unattend.xml file, which is used by Windows Setup. -// Contents are defined by setting name, component name, and the pass in which -// the content is a applied. +// AccessURI is a disk access SAS uri. +type AccessURI struct { + autorest.Response `json:"-"` + *AccessURIOutput `json:"properties,omitempty"` +} + +// AccessURIOutput is azure properties, including output. +type AccessURIOutput struct { + *AccessURIRaw `json:"output,omitempty"` +} + +// AccessURIRaw is this object gets 'bubbled up' through flattening. +type AccessURIRaw struct { + AccessSAS *string `json:"accessSAS,omitempty"` +} + +// AdditionalUnattendContent is specifies additional XML formatted information that can be included in the Unattend.xml +// file, which is used by Windows Setup. Contents are defined by setting name, component name, and the pass in which +// the content is applied. type AdditionalUnattendContent struct { PassName PassNames `json:"passName,omitempty"` ComponentName ComponentNames `json:"componentName,omitempty"` @@ -397,7 +736,14 @@ type APIErrorBase struct { Message *string `json:"message,omitempty"` } -// AvailabilitySet is create or update availability set parameters. +// AvailabilitySet is specifies information about the availability set that the virtual machine should be assigned to. +// Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. +// For more information about availability sets, see [Manage the availability of virtual +// machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). +//

For more information on Azure planned maintainance, see [Planned maintenance for virtual machines in +// Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) +//

Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to +// an availability set. type AvailabilitySet struct { autorest.Response `json:"-"` ID *string `json:"id,omitempty"` @@ -421,22 +767,141 @@ type AvailabilitySetProperties struct { PlatformFaultDomainCount *int32 `json:"platformFaultDomainCount,omitempty"` VirtualMachines *[]SubResource `json:"virtualMachines,omitempty"` Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` - Managed *bool `json:"managed,omitempty"` } -// BootDiagnostics is describes Boot Diagnostics. +// BootDiagnostics is boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to +// diagnose VM status.

For Linux Virtual Machines, you can easily view the output of your console log. +//

For both Windows and Linux virtual machines, Azure also enables you to see a screenshot of the VM from the +// hypervisor. type BootDiagnostics struct { Enabled *bool `json:"enabled,omitempty"` StorageURI *string `json:"storageUri,omitempty"` } -// BootDiagnosticsInstanceView is the instance view of a virtual machine boot -// diagnostics. +// BootDiagnosticsInstanceView is the instance view of a virtual machine boot diagnostics. type BootDiagnosticsInstanceView struct { ConsoleScreenshotBlobURI *string `json:"consoleScreenshotBlobUri,omitempty"` SerialConsoleLogBlobURI *string `json:"serialConsoleLogBlobUri,omitempty"` } +// ContainerService is container service. +type ContainerService struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *ContainerServiceProperties `json:"properties,omitempty"` +} + +// ContainerServiceAgentPoolProfile is profile for the container service agent pool. +type ContainerServiceAgentPoolProfile struct { + Name *string `json:"name,omitempty"` + Count *int32 `json:"count,omitempty"` + VMSize ContainerServiceVMSizeTypes `json:"vmSize,omitempty"` + DNSPrefix *string `json:"dnsPrefix,omitempty"` + Fqdn *string `json:"fqdn,omitempty"` +} + +// ContainerServiceCustomProfile is properties to configure a custom container service cluster. +type ContainerServiceCustomProfile struct { + Orchestrator *string `json:"orchestrator,omitempty"` +} + +// ContainerServiceDiagnosticsProfile is +type ContainerServiceDiagnosticsProfile struct { + VMDiagnostics *ContainerServiceVMDiagnostics `json:"vmDiagnostics,omitempty"` +} + +// ContainerServiceLinuxProfile is profile for Linux VMs in the container service cluster. +type ContainerServiceLinuxProfile struct { + AdminUsername *string `json:"adminUsername,omitempty"` + SSH *ContainerServiceSSHConfiguration `json:"ssh,omitempty"` +} + +// ContainerServiceListResult is the response from the List Container Services operation. +type ContainerServiceListResult struct { + autorest.Response `json:"-"` + Value *[]ContainerService `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ContainerServiceListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ContainerServiceListResult) ContainerServiceListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ContainerServiceMasterProfile is profile for the container service master. +type ContainerServiceMasterProfile struct { + Count *int32 `json:"count,omitempty"` + DNSPrefix *string `json:"dnsPrefix,omitempty"` + Fqdn *string `json:"fqdn,omitempty"` +} + +// ContainerServiceOrchestratorProfile is profile for the container service orchestrator. +type ContainerServiceOrchestratorProfile struct { + OrchestratorType ContainerServiceOrchestratorTypes `json:"orchestratorType,omitempty"` +} + +// ContainerServiceProperties is properties of the container service. +type ContainerServiceProperties struct { + ProvisioningState *string `json:"provisioningState,omitempty"` + OrchestratorProfile *ContainerServiceOrchestratorProfile `json:"orchestratorProfile,omitempty"` + CustomProfile *ContainerServiceCustomProfile `json:"customProfile,omitempty"` + ServicePrincipalProfile *ContainerServiceServicePrincipalProfile `json:"servicePrincipalProfile,omitempty"` + MasterProfile *ContainerServiceMasterProfile `json:"masterProfile,omitempty"` + AgentPoolProfiles *[]ContainerServiceAgentPoolProfile `json:"agentPoolProfiles,omitempty"` + WindowsProfile *ContainerServiceWindowsProfile `json:"windowsProfile,omitempty"` + LinuxProfile *ContainerServiceLinuxProfile `json:"linuxProfile,omitempty"` + DiagnosticsProfile *ContainerServiceDiagnosticsProfile `json:"diagnosticsProfile,omitempty"` +} + +// ContainerServiceServicePrincipalProfile is information about a service principal identity for the cluster to use for +// manipulating Azure APIs. +type ContainerServiceServicePrincipalProfile struct { + ClientID *string `json:"clientId,omitempty"` + Secret *string `json:"secret,omitempty"` +} + +// ContainerServiceSSHConfiguration is SSH configuration for Linux-based VMs running on Azure. +type ContainerServiceSSHConfiguration struct { + PublicKeys *[]ContainerServiceSSHPublicKey `json:"publicKeys,omitempty"` +} + +// ContainerServiceSSHPublicKey is contains information about SSH certificate public key data. +type ContainerServiceSSHPublicKey struct { + KeyData *string `json:"keyData,omitempty"` +} + +// ContainerServiceVMDiagnostics is profile for diagnostics on the container service VMs. +type ContainerServiceVMDiagnostics struct { + Enabled *bool `json:"enabled,omitempty"` + StorageURI *string `json:"storageUri,omitempty"` +} + +// ContainerServiceWindowsProfile is profile for Windows VMs in the container service cluster. +type ContainerServiceWindowsProfile struct { + AdminUsername *string `json:"adminUsername,omitempty"` + AdminPassword *string `json:"adminPassword,omitempty"` +} + +// CreationData is data used when creating a disk. +type CreationData struct { + CreateOption DiskCreateOption `json:"createOption,omitempty"` + StorageAccountID *string `json:"storageAccountId,omitempty"` + ImageReference *ImageDiskReference `json:"imageReference,omitempty"` + SourceURI *string `json:"sourceUri,omitempty"` + SourceResourceID *string `json:"sourceResourceId,omitempty"` +} + // DataDisk is describes a data disk. type DataDisk struct { Lun *int32 `json:"lun,omitempty"` @@ -454,11 +919,25 @@ type DataDiskImage struct { Lun *int32 `json:"lun,omitempty"` } -// DiagnosticsProfile is describes a diagnostics profile. +// DiagnosticsProfile is specifies the boot diagnostic settings state.

Minimum api-version: 2015-06-15. type DiagnosticsProfile struct { BootDiagnostics *BootDiagnostics `json:"bootDiagnostics,omitempty"` } +// Disk is disk resource. +type Disk struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + ManagedBy *string `json:"managedBy,omitempty"` + Sku *DiskSku `json:"sku,omitempty"` + Zones *[]string `json:"zones,omitempty"` + *DiskProperties `json:"properties,omitempty"` +} + // DiskEncryptionSettings is describes a Encryption Settings for a Disk type DiskEncryptionSettings struct { DiskEncryptionKey *KeyVaultSecretReference `json:"diskEncryptionKey,omitempty"` @@ -468,16 +947,80 @@ type DiskEncryptionSettings struct { // DiskInstanceView is the instance view of the disk. type DiskInstanceView struct { - Name *string `json:"name,omitempty"` - Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` + Name *string `json:"name,omitempty"` + EncryptionSettings *[]DiskEncryptionSettings `json:"encryptionSettings,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// DiskList is the List Disks operation response. +type DiskList struct { + autorest.Response `json:"-"` + Value *[]Disk `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// DiskListPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client DiskList) DiskListPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// DiskProperties is disk resource properties. +type DiskProperties struct { + TimeCreated *date.Time `json:"timeCreated,omitempty"` + OsType OperatingSystemTypes `json:"osType,omitempty"` + CreationData *CreationData `json:"creationData,omitempty"` + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// DiskSku is the disks and snapshots sku name. Can be Standard_LRS or Premium_LRS. +type DiskSku struct { + Name StorageAccountTypes `json:"name,omitempty"` + Tier *string `json:"tier,omitempty"` +} + +// DiskUpdate is disk update resource. +type DiskUpdate struct { + Tags *map[string]*string `json:"tags,omitempty"` + Sku *DiskSku `json:"sku,omitempty"` + *DiskUpdateProperties `json:"properties,omitempty"` +} + +// DiskUpdateProperties is disk resource update properties. +type DiskUpdateProperties struct { + OsType OperatingSystemTypes `json:"osType,omitempty"` + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"` +} + +// EncryptionSettings is encryption settings for disk or snapshot +type EncryptionSettings struct { + Enabled *bool `json:"enabled,omitempty"` + DiskEncryptionKey *KeyVaultAndSecretReference `json:"diskEncryptionKey,omitempty"` + KeyEncryptionKey *KeyVaultAndKeyReference `json:"keyEncryptionKey,omitempty"` } -// HardwareProfile is describes a hardware profile. +// GrantAccessData is data used for requesting a SAS. +type GrantAccessData struct { + Access AccessLevel `json:"access,omitempty"` + DurationInSeconds *int32 `json:"durationInSeconds,omitempty"` +} + +// HardwareProfile is specifies the hardware settings for the virtual machine. type HardwareProfile struct { VMSize VirtualMachineSizeTypes `json:"vmSize,omitempty"` } -// Image is describes an Image. +// Image is the source user image virtual hard disk. The virtual hard disk will be copied before being attached to the +// virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. type Image struct { autorest.Response `json:"-"` ID *string `json:"id,omitempty"` @@ -490,12 +1033,19 @@ type Image struct { // ImageDataDisk is describes a data disk. type ImageDataDisk struct { - Lun *int32 `json:"lun,omitempty"` - Snapshot *SubResource `json:"snapshot,omitempty"` - ManagedDisk *SubResource `json:"managedDisk,omitempty"` - BlobURI *string `json:"blobUri,omitempty"` - Caching CachingTypes `json:"caching,omitempty"` - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + Lun *int32 `json:"lun,omitempty"` + Snapshot *SubResource `json:"snapshot,omitempty"` + ManagedDisk *SubResource `json:"managedDisk,omitempty"` + BlobURI *string `json:"blobUri,omitempty"` + Caching CachingTypes `json:"caching,omitempty"` + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` +} + +// ImageDiskReference is the source image used for creating the disk. +type ImageDiskReference struct { + ID *string `json:"id,omitempty"` + Lun *int32 `json:"lun,omitempty"` } // ImageListResult is the List Image operation response. @@ -519,13 +1069,14 @@ func (client ImageListResult) ImageListResultPreparer() (*http.Request, error) { // ImageOSDisk is describes an Operating System disk. type ImageOSDisk struct { - OsType OperatingSystemTypes `json:"osType,omitempty"` - OsState OperatingSystemStateTypes `json:"osState,omitempty"` - Snapshot *SubResource `json:"snapshot,omitempty"` - ManagedDisk *SubResource `json:"managedDisk,omitempty"` - BlobURI *string `json:"blobUri,omitempty"` - Caching CachingTypes `json:"caching,omitempty"` - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + OsType OperatingSystemTypes `json:"osType,omitempty"` + OsState OperatingSystemStateTypes `json:"osState,omitempty"` + Snapshot *SubResource `json:"snapshot,omitempty"` + ManagedDisk *SubResource `json:"managedDisk,omitempty"` + BlobURI *string `json:"blobUri,omitempty"` + Caching CachingTypes `json:"caching,omitempty"` + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` } // ImageProperties is describes the properties of an Image. @@ -535,7 +1086,9 @@ type ImageProperties struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// ImageReference is the image reference. +// ImageReference is specifies information about the image to use. You can specify information about platform images, +// marketplace images, or virtual machine images. This element is required when you want to use a platform image, +// marketplace image, or virtual machine image, but is not used in other creation operations. type ImageReference struct { ID *string `json:"id,omitempty"` Publisher *string `json:"publisher,omitempty"` @@ -565,6 +1118,19 @@ type InstanceViewStatus struct { Time *date.Time `json:"time,omitempty"` } +// KeyVaultAndKeyReference is key Vault Key Url and vault id of KeK, KeK is optional and when provided is used to +// unwrap the encryptionKey +type KeyVaultAndKeyReference struct { + SourceVault *SourceVault `json:"sourceVault,omitempty"` + KeyURL *string `json:"keyUrl,omitempty"` +} + +// KeyVaultAndSecretReference is key Vault Secret Url and vault id of the encryption key +type KeyVaultAndSecretReference struct { + SourceVault *SourceVault `json:"sourceVault,omitempty"` + SecretURL *string `json:"secretUrl,omitempty"` +} + // KeyVaultKeyReference is describes a reference to Key Vault Key type KeyVaultKeyReference struct { KeyURL *string `json:"keyUrl,omitempty"` @@ -577,7 +1143,11 @@ type KeyVaultSecretReference struct { SourceVault *SubResource `json:"sourceVault,omitempty"` } -// LinuxConfiguration is describes Windows configuration of the OS Profile. +// LinuxConfiguration is specifies the Linux operating system settings on the virtual machine.

For a list of +// supported Linux distributions, see [Linux on Azure-Endorsed +// Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) +//

For running non-endorsed distributions, see [Information for Non-Endorsed +// Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). type LinuxConfiguration struct { DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty"` SSH *SSHConfiguration `json:"ssh,omitempty"` @@ -614,12 +1184,22 @@ type ListVirtualMachineImageResource struct { Value *[]VirtualMachineImageResource `json:"value,omitempty"` } -// LongRunningOperationProperties is compute-specific operation properties, -// including output +// LongRunningOperationProperties is compute-specific operation properties, including output type LongRunningOperationProperties struct { Output *map[string]interface{} `json:"output,omitempty"` } +// MaintenanceRedeployStatus is maintenance Operation Status. +type MaintenanceRedeployStatus struct { + IsCustomerInitiatedMaintenanceAllowed *bool `json:"isCustomerInitiatedMaintenanceAllowed,omitempty"` + PreMaintenanceWindowStartTime *date.Time `json:"preMaintenanceWindowStartTime,omitempty"` + PreMaintenanceWindowEndTime *date.Time `json:"preMaintenanceWindowEndTime,omitempty"` + MaintenanceWindowStartTime *date.Time `json:"maintenanceWindowStartTime,omitempty"` + MaintenanceWindowEndTime *date.Time `json:"maintenanceWindowEndTime,omitempty"` + LastOperationResultCode MaintenanceOperationResultCodeTypes `json:"lastOperationResultCode,omitempty"` + LastOperationMessage *string `json:"lastOperationMessage,omitempty"` +} + // ManagedDiskParameters is the parameters of a managed disk. type ManagedDiskParameters struct { ID *string `json:"id,omitempty"` @@ -632,13 +1212,12 @@ type NetworkInterfaceReference struct { *NetworkInterfaceReferenceProperties `json:"properties,omitempty"` } -// NetworkInterfaceReferenceProperties is describes a network interface -// reference properties. +// NetworkInterfaceReferenceProperties is describes a network interface reference properties. type NetworkInterfaceReferenceProperties struct { Primary *bool `json:"primary,omitempty"` } -// NetworkProfile is describes a network profile. +// NetworkProfile is specifies the network interfaces of the virtual machine. type NetworkProfile struct { NetworkInterfaces *[]NetworkInterfaceReference `json:"networkInterfaces,omitempty"` } @@ -653,7 +1232,9 @@ type OperationStatusResponse struct { Error *APIError `json:"error,omitempty"` } -// OSDisk is describes an Operating System disk. +// OSDisk is specifies information about the operating system disk used by the virtual machine.

For more +// information about disks, see [About disks and VHDs for Azure virtual +// machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). type OSDisk struct { OsType OperatingSystemTypes `json:"osType,omitempty"` EncryptionSettings *DiskEncryptionSettings `json:"encryptionSettings,omitempty"` @@ -671,7 +1252,7 @@ type OSDiskImage struct { OperatingSystem OperatingSystemTypes `json:"operatingSystem,omitempty"` } -// OSProfile is describes an OS profile. +// OSProfile is specifies the operating system settings for the virtual machine. type OSProfile struct { ComputerName *string `json:"computerName,omitempty"` AdminUsername *string `json:"adminUsername,omitempty"` @@ -682,7 +1263,10 @@ type OSProfile struct { Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` } -// Plan is plan for the resource. +// Plan is specifies information about the marketplace image used to create the virtual machine. This element is only +// used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for +// programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to +// deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. type Plan struct { Name *string `json:"name,omitempty"` Publisher *string `json:"publisher,omitempty"` @@ -690,8 +1274,7 @@ type Plan struct { PromotionCode *string `json:"promotionCode,omitempty"` } -// PurchasePlan is used for establishing the purchase context of any 3rd Party -// artifact through MarketPlace. +// PurchasePlan is used for establishing the purchase context of any 3rd Party artifact through MarketPlace. type PurchasePlan struct { Publisher *string `json:"publisher,omitempty"` Name *string `json:"name,omitempty"` @@ -707,6 +1290,195 @@ type Resource struct { Tags *map[string]*string `json:"tags,omitempty"` } +// ResourceSku is describes an available Compute SKU. +type ResourceSku struct { + ResourceType *string `json:"resourceType,omitempty"` + Name *string `json:"name,omitempty"` + Tier *string `json:"tier,omitempty"` + Size *string `json:"size,omitempty"` + Family *string `json:"family,omitempty"` + Kind *string `json:"kind,omitempty"` + Capacity *ResourceSkuCapacity `json:"capacity,omitempty"` + Locations *[]string `json:"locations,omitempty"` + APIVersions *[]string `json:"apiVersions,omitempty"` + Costs *[]ResourceSkuCosts `json:"costs,omitempty"` + Capabilities *[]ResourceSkuCapabilities `json:"capabilities,omitempty"` + Restrictions *[]ResourceSkuRestrictions `json:"restrictions,omitempty"` +} + +// ResourceSkuCapabilities is describes The SKU capabilites object. +type ResourceSkuCapabilities struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +// ResourceSkuCapacity is describes scaling information of a SKU. +type ResourceSkuCapacity struct { + Minimum *int64 `json:"minimum,omitempty"` + Maximum *int64 `json:"maximum,omitempty"` + Default *int64 `json:"default,omitempty"` + ScaleType ResourceSkuCapacityScaleType `json:"scaleType,omitempty"` +} + +// ResourceSkuCosts is describes metadata for retrieving price info. +type ResourceSkuCosts struct { + MeterID *string `json:"meterID,omitempty"` + Quantity *int64 `json:"quantity,omitempty"` + ExtendedUnit *string `json:"extendedUnit,omitempty"` +} + +// ResourceSkuRestrictions is describes scaling information of a SKU. +type ResourceSkuRestrictions struct { + Type ResourceSkuRestrictionsType `json:"type,omitempty"` + Values *[]string `json:"values,omitempty"` + ReasonCode ResourceSkuRestrictionsReasonCode `json:"reasonCode,omitempty"` +} + +// ResourceSkusResult is the Compute List Skus operation response. +type ResourceSkusResult struct { + autorest.Response `json:"-"` + Value *[]ResourceSku `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ResourceSkusResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ResourceSkusResult) ResourceSkusResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ResourceUpdate is the Resource model definition. +type ResourceUpdate struct { + Tags *map[string]*string `json:"tags,omitempty"` + Sku *DiskSku `json:"sku,omitempty"` +} + +// RollingUpgradePolicy is the configuration parameters used while performing a rolling upgrade. +type RollingUpgradePolicy struct { + MaxBatchInstancePercent *int32 `json:"maxBatchInstancePercent,omitempty"` + MaxUnhealthyInstancePercent *int32 `json:"maxUnhealthyInstancePercent,omitempty"` + MaxUnhealthyUpgradedInstancePercent *int32 `json:"maxUnhealthyUpgradedInstancePercent,omitempty"` + PauseTimeBetweenBatches *string `json:"pauseTimeBetweenBatches,omitempty"` +} + +// RollingUpgradeProgressInfo is information about the number of virtual machine instances in each upgrade state. +type RollingUpgradeProgressInfo struct { + SuccessfulInstanceCount *int32 `json:"successfulInstanceCount,omitempty"` + FailedInstanceCount *int32 `json:"failedInstanceCount,omitempty"` + InProgressInstanceCount *int32 `json:"inProgressInstanceCount,omitempty"` + PendingInstanceCount *int32 `json:"pendingInstanceCount,omitempty"` +} + +// RollingUpgradeRunningStatus is information about the current running state of the overall upgrade. +type RollingUpgradeRunningStatus struct { + Code RollingUpgradeStatusCode `json:"code,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + LastAction RollingUpgradeActionType `json:"lastAction,omitempty"` + LastActionTime *date.Time `json:"lastActionTime,omitempty"` +} + +// RollingUpgradeStatusInfo is the status of the latest virtual machine scale set rolling upgrade. +type RollingUpgradeStatusInfo struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *RollingUpgradeStatusInfoProperties `json:"properties,omitempty"` +} + +// RollingUpgradeStatusInfoProperties is the status of the latest virtual machine scale set rolling upgrade. +type RollingUpgradeStatusInfoProperties struct { + Policy *RollingUpgradePolicy `json:"policy,omitempty"` + RunningStatus *RollingUpgradeRunningStatus `json:"runningStatus,omitempty"` + Progress *RollingUpgradeProgressInfo `json:"progress,omitempty"` + Error *APIError `json:"error,omitempty"` +} + +// RunCommandDocument is describes the properties of a Run Command. +type RunCommandDocument struct { + autorest.Response `json:"-"` + Schema *string `json:"$schema,omitempty"` + ID *string `json:"id,omitempty"` + OsType OperatingSystemTypes `json:"osType,omitempty"` + Label *string `json:"label,omitempty"` + Description *string `json:"description,omitempty"` + Script *[]string `json:"script,omitempty"` + Parameters *[]RunCommandParameterDefinition `json:"parameters,omitempty"` +} + +// RunCommandDocumentBase is describes the properties of a Run Command metadata. +type RunCommandDocumentBase struct { + Schema *string `json:"$schema,omitempty"` + ID *string `json:"id,omitempty"` + OsType OperatingSystemTypes `json:"osType,omitempty"` + Label *string `json:"label,omitempty"` + Description *string `json:"description,omitempty"` +} + +// RunCommandInput is capture Virtual Machine parameters. +type RunCommandInput struct { + CommandID *string `json:"commandId,omitempty"` + Script *[]string `json:"script,omitempty"` + Parameters *[]RunCommandInputParameter `json:"parameters,omitempty"` +} + +// RunCommandInputParameter is describes the properties of a run command parameter. +type RunCommandInputParameter struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +// RunCommandListResult is the List Virtual Machine operation response. +type RunCommandListResult struct { + autorest.Response `json:"-"` + Value *[]RunCommandDocumentBase `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// RunCommandListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client RunCommandListResult) RunCommandListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// RunCommandParameterDefinition is describes the properties of a run command parameter. +type RunCommandParameterDefinition struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + DefaultValue *string `json:"defaultValue,omitempty"` + Required *bool `json:"required,omitempty"` +} + +// RunCommandResult is run command operation response. +type RunCommandResult struct { + autorest.Response `json:"-"` + Name *string `json:"name,omitempty"` + Status *string `json:"status,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + Error *APIError `json:"error,omitempty"` + *RunCommandResultProperties `json:"properties,omitempty"` +} + +// RunCommandResultProperties is compute-specific operation properties, including output +type RunCommandResultProperties struct { + Output *map[string]interface{} `json:"output,omitempty"` +} + // Sku is describes a virtual machine scale set sku. type Sku struct { Name *string `json:"name,omitempty"` @@ -714,19 +1486,64 @@ type Sku struct { Capacity *int64 `json:"capacity,omitempty"` } -// SSHConfiguration is sSH configuration for Linux based VMs running on Azure +// Snapshot is snapshot resource. +type Snapshot struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + ManagedBy *string `json:"managedBy,omitempty"` + Sku *DiskSku `json:"sku,omitempty"` + *DiskProperties `json:"properties,omitempty"` +} + +// SnapshotList is the List Snapshots operation response. +type SnapshotList struct { + autorest.Response `json:"-"` + Value *[]Snapshot `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// SnapshotListPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client SnapshotList) SnapshotListPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// SnapshotUpdate is snapshot update resource. +type SnapshotUpdate struct { + Tags *map[string]*string `json:"tags,omitempty"` + Sku *DiskSku `json:"sku,omitempty"` + *DiskUpdateProperties `json:"properties,omitempty"` +} + +// SourceVault is the vault id is an Azure Resource Manager Resoure id in the form +// /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName} +type SourceVault struct { + ID *string `json:"id,omitempty"` +} + +// SSHConfiguration is SSH configuration for Linux based VMs running on Azure type SSHConfiguration struct { PublicKeys *[]SSHPublicKey `json:"publicKeys,omitempty"` } -// SSHPublicKey is contains information about SSH certificate public key and -// the path on the Linux VM where the public key is placed. +// SSHPublicKey is contains information about SSH certificate public key and the path on the Linux VM where the public +// key is placed. type SSHPublicKey struct { Path *string `json:"path,omitempty"` KeyData *string `json:"keyData,omitempty"` } -// StorageProfile is describes a storage profile. +// StorageProfile is specifies the storage settings for the virtual machine disks. type StorageProfile struct { ImageReference *ImageReference `json:"imageReference,omitempty"` OsDisk *OSDisk `json:"osDisk,omitempty"` @@ -743,9 +1560,16 @@ type SubResourceReadOnly struct { ID *string `json:"id,omitempty"` } -// UpgradePolicy is describes an upgrade policy - automatic or manual. +// UpdateResource is the Update Resource model definition. +type UpdateResource struct { + Tags *map[string]*string `json:"tags,omitempty"` +} + +// UpgradePolicy is describes an upgrade policy - automatic, manual, or rolling. type UpgradePolicy struct { - Mode UpgradeMode `json:"mode,omitempty"` + Mode UpgradeMode `json:"mode,omitempty"` + RollingUpgradePolicy *RollingUpgradePolicy `json:"rollingUpgradePolicy,omitempty"` + AutomaticOSUpgrade *bool `json:"automaticOSUpgrade,omitempty"` } // Usage is describes Compute Resource Usage. @@ -762,15 +1586,14 @@ type UsageName struct { LocalizedValue *string `json:"localizedValue,omitempty"` } -// VaultCertificate is describes a single certificate reference in a Key Vault, -// and where the certificate should reside on the VM. +// VaultCertificate is describes a single certificate reference in a Key Vault, and where the certificate should reside +// on the VM. type VaultCertificate struct { CertificateURL *string `json:"certificateUrl,omitempty"` CertificateStore *string `json:"certificateStore,omitempty"` } -// VaultSecretGroup is describes a set of certificates which are all in the -// same Key Vault. +// VaultSecretGroup is describes a set of certificates which are all in the same Key Vault. type VaultSecretGroup struct { SourceVault *SubResource `json:"sourceVault,omitempty"` VaultCertificates *[]VaultCertificate `json:"vaultCertificates,omitempty"` @@ -793,10 +1616,10 @@ type VirtualMachine struct { *VirtualMachineProperties `json:"properties,omitempty"` Resources *[]VirtualMachineExtension `json:"resources,omitempty"` Identity *VirtualMachineIdentity `json:"identity,omitempty"` + Zones *[]string `json:"zones,omitempty"` } -// VirtualMachineAgentInstanceView is the instance view of the VM Agent running -// on the virtual machine. +// VirtualMachineAgentInstanceView is the instance view of the VM Agent running on the virtual machine. type VirtualMachineAgentInstanceView struct { VMAgentVersion *string `json:"vmAgentVersion,omitempty"` ExtensionHandlers *[]VirtualMachineExtensionHandlerInstanceView `json:"extensionHandlers,omitempty"` @@ -817,8 +1640,7 @@ type VirtualMachineCaptureResult struct { *VirtualMachineCaptureResultProperties `json:"properties,omitempty"` } -// VirtualMachineCaptureResultProperties is compute-specific operation -// properties, including output +// VirtualMachineCaptureResultProperties is compute-specific operation properties, including output type VirtualMachineCaptureResultProperties struct { Output *map[string]interface{} `json:"output,omitempty"` } @@ -834,8 +1656,7 @@ type VirtualMachineExtension struct { *VirtualMachineExtensionProperties `json:"properties,omitempty"` } -// VirtualMachineExtensionHandlerInstanceView is the instance view of a virtual -// machine extension handler. +// VirtualMachineExtensionHandlerInstanceView is the instance view of a virtual machine extension handler. type VirtualMachineExtensionHandlerInstanceView struct { Type *string `json:"type,omitempty"` TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` @@ -853,8 +1674,7 @@ type VirtualMachineExtensionImage struct { *VirtualMachineExtensionImageProperties `json:"properties,omitempty"` } -// VirtualMachineExtensionImageProperties is describes the properties of a -// Virtual Machine Extension Image. +// VirtualMachineExtensionImageProperties is describes the properties of a Virtual Machine Extension Image. type VirtualMachineExtensionImageProperties struct { OperatingSystem *string `json:"operatingSystem,omitempty"` ComputeRole *string `json:"computeRole,omitempty"` @@ -863,8 +1683,7 @@ type VirtualMachineExtensionImageProperties struct { SupportsMultipleExtensions *bool `json:"supportsMultipleExtensions,omitempty"` } -// VirtualMachineExtensionInstanceView is the instance view of a virtual -// machine extension. +// VirtualMachineExtensionInstanceView is the instance view of a virtual machine extension. type VirtualMachineExtensionInstanceView struct { Name *string `json:"name,omitempty"` Type *string `json:"type,omitempty"` @@ -873,8 +1692,7 @@ type VirtualMachineExtensionInstanceView struct { Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` } -// VirtualMachineExtensionProperties is describes the properties of a Virtual -// Machine Extension. +// VirtualMachineExtensionProperties is describes the properties of a Virtual Machine Extension. type VirtualMachineExtensionProperties struct { ForceUpdateTag *string `json:"forceUpdateTag,omitempty"` Publisher *string `json:"publisher,omitempty"` @@ -887,6 +1705,11 @@ type VirtualMachineExtensionProperties struct { InstanceView *VirtualMachineExtensionInstanceView `json:"instanceView,omitempty"` } +// VirtualMachineHealthStatus is the health status of the VM. +type VirtualMachineHealthStatus struct { + Status *InstanceViewStatus `json:"status,omitempty"` +} + // VirtualMachineIdentity is identity for the virtual machine. type VirtualMachineIdentity struct { PrincipalID *string `json:"principalId,omitempty"` @@ -904,8 +1727,7 @@ type VirtualMachineImage struct { *VirtualMachineImageProperties `json:"properties,omitempty"` } -// VirtualMachineImageProperties is describes the properties of a Virtual -// Machine Image. +// VirtualMachineImageProperties is describes the properties of a Virtual Machine Image. type VirtualMachineImageProperties struct { Plan *PurchasePlan `json:"plan,omitempty"` OsDiskImage *OSDiskImage `json:"osDiskImage,omitempty"` @@ -922,14 +1744,16 @@ type VirtualMachineImageResource struct { // VirtualMachineInstanceView is the instance view of a virtual machine. type VirtualMachineInstanceView struct { - PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty"` - PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` - RdpThumbPrint *string `json:"rdpThumbPrint,omitempty"` - VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"` - Disks *[]DiskInstanceView `json:"disks,omitempty"` - Extensions *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"` - BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"` - Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` + autorest.Response `json:"-"` + PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty"` + PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` + RdpThumbPrint *string `json:"rdpThumbPrint,omitempty"` + VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"` + MaintenanceRedeployStatus *MaintenanceRedeployStatus `json:"maintenanceRedeployStatus,omitempty"` + Disks *[]DiskInstanceView `json:"disks,omitempty"` + Extensions *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"` + BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` } // VirtualMachineListResult is the List Virtual Machine operation response. @@ -977,10 +1801,10 @@ type VirtualMachineScaleSet struct { Plan *Plan `json:"plan,omitempty"` *VirtualMachineScaleSetProperties `json:"properties,omitempty"` Identity *VirtualMachineScaleSetIdentity `json:"identity,omitempty"` + Zones *[]string `json:"zones,omitempty"` } -// VirtualMachineScaleSetDataDisk is describes a virtual machine scale set data -// disk. +// VirtualMachineScaleSetDataDisk is describes a virtual machine scale set data disk. type VirtualMachineScaleSetDataDisk struct { Name *string `json:"name,omitempty"` Lun *int32 `json:"lun,omitempty"` @@ -990,23 +1814,41 @@ type VirtualMachineScaleSetDataDisk struct { ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"` } -// VirtualMachineScaleSetExtension is describes a Virtual Machine Scale Set -// Extension. +// VirtualMachineScaleSetExtension is describes a Virtual Machine Scale Set Extension. type VirtualMachineScaleSetExtension struct { + autorest.Response `json:"-"` ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` *VirtualMachineScaleSetExtensionProperties `json:"properties,omitempty"` } -// VirtualMachineScaleSetExtensionProfile is describes a virtual machine scale -// set extension profile. +// VirtualMachineScaleSetExtensionListResult is the List VM scale set extension operation response. +type VirtualMachineScaleSetExtensionListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineScaleSetExtension `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetExtensionListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualMachineScaleSetExtensionListResult) VirtualMachineScaleSetExtensionListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualMachineScaleSetExtensionProfile is describes a virtual machine scale set extension profile. type VirtualMachineScaleSetExtensionProfile struct { Extensions *[]VirtualMachineScaleSetExtension `json:"extensions,omitempty"` } -// VirtualMachineScaleSetExtensionProperties is describes the properties of a -// Virtual Machine Scale Set Extension. +// VirtualMachineScaleSetExtensionProperties is describes the properties of a Virtual Machine Scale Set Extension. type VirtualMachineScaleSetExtensionProperties struct { + ForceUpdateTag *string `json:"forceUpdateTag,omitempty"` Publisher *string `json:"publisher,omitempty"` Type *string `json:"type,omitempty"` TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` @@ -1016,16 +1858,14 @@ type VirtualMachineScaleSetExtensionProperties struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// VirtualMachineScaleSetIdentity is identity for the virtual machine scale -// set. +// VirtualMachineScaleSetIdentity is identity for the virtual machine scale set. type VirtualMachineScaleSetIdentity struct { PrincipalID *string `json:"principalId,omitempty"` TenantID *string `json:"tenantId,omitempty"` Type ResourceIdentityType `json:"type,omitempty"` } -// VirtualMachineScaleSetInstanceView is the instance view of a virtual machine -// scale set. +// VirtualMachineScaleSetInstanceView is the instance view of a virtual machine scale set. type VirtualMachineScaleSetInstanceView struct { autorest.Response `json:"-"` VirtualMachine *VirtualMachineScaleSetInstanceViewStatusesSummary `json:"virtualMachine,omitempty"` @@ -1033,31 +1873,32 @@ type VirtualMachineScaleSetInstanceView struct { Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` } -// VirtualMachineScaleSetInstanceViewStatusesSummary is instance view statuses -// summary for virtual machines of a virtual machine scale set. +// VirtualMachineScaleSetInstanceViewStatusesSummary is instance view statuses summary for virtual machines of a +// virtual machine scale set. type VirtualMachineScaleSetInstanceViewStatusesSummary struct { StatusesSummary *[]VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty"` } -// VirtualMachineScaleSetIPConfiguration is describes a virtual machine scale -// set network profile's IP configuration. +// VirtualMachineScaleSetIPConfiguration is describes a virtual machine scale set network profile's IP configuration. type VirtualMachineScaleSetIPConfiguration struct { ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` *VirtualMachineScaleSetIPConfigurationProperties `json:"properties,omitempty"` } -// VirtualMachineScaleSetIPConfigurationProperties is describes a virtual -// machine scale set network profile's IP configuration properties. +// VirtualMachineScaleSetIPConfigurationProperties is describes a virtual machine scale set network profile's IP +// configuration properties. type VirtualMachineScaleSetIPConfigurationProperties struct { - Subnet *APIEntityReference `json:"subnet,omitempty"` - ApplicationGatewayBackendAddressPools *[]SubResource `json:"applicationGatewayBackendAddressPools,omitempty"` - LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"` - LoadBalancerInboundNatPools *[]SubResource `json:"loadBalancerInboundNatPools,omitempty"` + Subnet *APIEntityReference `json:"subnet,omitempty"` + Primary *bool `json:"primary,omitempty"` + PublicIPAddressConfiguration *VirtualMachineScaleSetPublicIPAddressConfiguration `json:"publicIPAddressConfiguration,omitempty"` + PrivateIPAddressVersion IPVersion `json:"privateIPAddressVersion,omitempty"` + ApplicationGatewayBackendAddressPools *[]SubResource `json:"applicationGatewayBackendAddressPools,omitempty"` + LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"` + LoadBalancerInboundNatPools *[]SubResource `json:"loadBalancerInboundNatPools,omitempty"` } -// VirtualMachineScaleSetListResult is the List Virtual Machine operation -// response. +// VirtualMachineScaleSetListResult is the List Virtual Machine operation response. type VirtualMachineScaleSetListResult struct { autorest.Response `json:"-"` Value *[]VirtualMachineScaleSet `json:"value,omitempty"` @@ -1076,8 +1917,7 @@ func (client VirtualMachineScaleSetListResult) VirtualMachineScaleSetListResultP autorest.WithBaseURL(to.String(client.NextLink))) } -// VirtualMachineScaleSetListSkusResult is the Virtual Machine Scale Set List -// Skus operation response. +// VirtualMachineScaleSetListSkusResult is the Virtual Machine Scale Set List Skus operation response. type VirtualMachineScaleSetListSkusResult struct { autorest.Response `json:"-"` Value *[]VirtualMachineScaleSetSku `json:"value,omitempty"` @@ -1096,8 +1936,7 @@ func (client VirtualMachineScaleSetListSkusResult) VirtualMachineScaleSetListSku autorest.WithBaseURL(to.String(client.NextLink))) } -// VirtualMachineScaleSetListWithLinkResult is the List Virtual Machine -// operation response. +// VirtualMachineScaleSetListWithLinkResult is the List Virtual Machine operation response. type VirtualMachineScaleSetListWithLinkResult struct { autorest.Response `json:"-"` Value *[]VirtualMachineScaleSet `json:"value,omitempty"` @@ -1116,35 +1955,42 @@ func (client VirtualMachineScaleSetListWithLinkResult) VirtualMachineScaleSetLis autorest.WithBaseURL(to.String(client.NextLink))) } -// VirtualMachineScaleSetManagedDiskParameters is describes the parameters of a -// ScaleSet managed disk. +// VirtualMachineScaleSetManagedDiskParameters is describes the parameters of a ScaleSet managed disk. type VirtualMachineScaleSetManagedDiskParameters struct { StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` } -// VirtualMachineScaleSetNetworkConfiguration is describes a virtual machine -// scale set network profile's network configurations. +// VirtualMachineScaleSetNetworkConfiguration is describes a virtual machine scale set network profile's network +// configurations. type VirtualMachineScaleSetNetworkConfiguration struct { ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` *VirtualMachineScaleSetNetworkConfigurationProperties `json:"properties,omitempty"` } -// VirtualMachineScaleSetNetworkConfigurationProperties is describes a virtual -// machine scale set network profile's IP configuration. +// VirtualMachineScaleSetNetworkConfigurationDNSSettings is describes a virtual machines scale sets network +// configuration's DNS settings. +type VirtualMachineScaleSetNetworkConfigurationDNSSettings struct { + DNSServers *[]string `json:"dnsServers,omitempty"` +} + +// VirtualMachineScaleSetNetworkConfigurationProperties is describes a virtual machine scale set network profile's IP +// configuration. type VirtualMachineScaleSetNetworkConfigurationProperties struct { - Primary *bool `json:"primary,omitempty"` - IPConfigurations *[]VirtualMachineScaleSetIPConfiguration `json:"ipConfigurations,omitempty"` + Primary *bool `json:"primary,omitempty"` + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty"` + NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"` + DNSSettings *VirtualMachineScaleSetNetworkConfigurationDNSSettings `json:"dnsSettings,omitempty"` + IPConfigurations *[]VirtualMachineScaleSetIPConfiguration `json:"ipConfigurations,omitempty"` } -// VirtualMachineScaleSetNetworkProfile is describes a virtual machine scale -// set network profile. +// VirtualMachineScaleSetNetworkProfile is describes a virtual machine scale set network profile. type VirtualMachineScaleSetNetworkProfile struct { + HealthProbe *APIEntityReference `json:"healthProbe,omitempty"` NetworkInterfaceConfigurations *[]VirtualMachineScaleSetNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"` } -// VirtualMachineScaleSetOSDisk is describes a virtual machine scale set -// operating system disk. +// VirtualMachineScaleSetOSDisk is describes a virtual machine scale set operating system disk. type VirtualMachineScaleSetOSDisk struct { Name *string `json:"name,omitempty"` Caching CachingTypes `json:"caching,omitempty"` @@ -1155,8 +2001,7 @@ type VirtualMachineScaleSetOSDisk struct { ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"` } -// VirtualMachineScaleSetOSProfile is describes a virtual machine scale set OS -// profile. +// VirtualMachineScaleSetOSProfile is describes a virtual machine scale set OS profile. type VirtualMachineScaleSetOSProfile struct { ComputerNamePrefix *string `json:"computerNamePrefix,omitempty"` AdminUsername *string `json:"adminUsername,omitempty"` @@ -1167,18 +2012,37 @@ type VirtualMachineScaleSetOSProfile struct { Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` } -// VirtualMachineScaleSetProperties is describes the properties of a Virtual -// Machine Scale Set. +// VirtualMachineScaleSetProperties is describes the properties of a Virtual Machine Scale Set. type VirtualMachineScaleSetProperties struct { UpgradePolicy *UpgradePolicy `json:"upgradePolicy,omitempty"` VirtualMachineProfile *VirtualMachineScaleSetVMProfile `json:"virtualMachineProfile,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` Overprovision *bool `json:"overprovision,omitempty"` + UniqueID *string `json:"uniqueId,omitempty"` SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"` } -// VirtualMachineScaleSetSku is describes an available virtual machine scale -// set sku. +// VirtualMachineScaleSetPublicIPAddressConfiguration is describes a virtual machines scale set IP Configuration's +// PublicIPAddress configuration +type VirtualMachineScaleSetPublicIPAddressConfiguration struct { + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetPublicIPAddressConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings is describes a virtual machines scale sets network +// configuration's DNS settings. +type VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings struct { + DomainNameLabel *string `json:"domainNameLabel,omitempty"` +} + +// VirtualMachineScaleSetPublicIPAddressConfigurationProperties is describes a virtual machines scale set IP +// Configuration's PublicIPAddress configuration +type VirtualMachineScaleSetPublicIPAddressConfigurationProperties struct { + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` + DNSSettings *VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings `json:"dnsSettings,omitempty"` +} + +// VirtualMachineScaleSetSku is describes an available virtual machine scale set sku. type VirtualMachineScaleSetSku struct { ResourceType *string `json:"resourceType,omitempty"` Sku *Sku `json:"sku,omitempty"` @@ -1193,16 +2057,122 @@ type VirtualMachineScaleSetSkuCapacity struct { ScaleType VirtualMachineScaleSetSkuScaleType `json:"scaleType,omitempty"` } -// VirtualMachineScaleSetStorageProfile is describes a virtual machine scale -// set storage profile. +// VirtualMachineScaleSetStorageProfile is describes a virtual machine scale set storage profile. type VirtualMachineScaleSetStorageProfile struct { ImageReference *ImageReference `json:"imageReference,omitempty"` OsDisk *VirtualMachineScaleSetOSDisk `json:"osDisk,omitempty"` DataDisks *[]VirtualMachineScaleSetDataDisk `json:"dataDisks,omitempty"` } -// VirtualMachineScaleSetVM is describes a virtual machine scale set virtual -// machine. +// VirtualMachineScaleSetUpdate is describes a Virtual Machine Scale Set. +type VirtualMachineScaleSetUpdate struct { + Tags *map[string]*string `json:"tags,omitempty"` + Sku *Sku `json:"sku,omitempty"` + Plan *Plan `json:"plan,omitempty"` + *VirtualMachineScaleSetUpdateProperties `json:"properties,omitempty"` + Identity *VirtualMachineScaleSetIdentity `json:"identity,omitempty"` +} + +// VirtualMachineScaleSetUpdateIPConfiguration is describes a virtual machine scale set network profile's IP +// configuration. +type VirtualMachineScaleSetUpdateIPConfiguration struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetUpdateIPConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetUpdateIPConfigurationProperties is describes a virtual machine scale set network profile's IP +// configuration properties. +type VirtualMachineScaleSetUpdateIPConfigurationProperties struct { + Subnet *APIEntityReference `json:"subnet,omitempty"` + Primary *bool `json:"primary,omitempty"` + PublicIPAddressConfiguration *VirtualMachineScaleSetUpdatePublicIPAddressConfiguration `json:"publicIPAddressConfiguration,omitempty"` + PrivateIPAddressVersion IPVersion `json:"privateIPAddressVersion,omitempty"` + ApplicationGatewayBackendAddressPools *[]SubResource `json:"applicationGatewayBackendAddressPools,omitempty"` + LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"` + LoadBalancerInboundNatPools *[]SubResource `json:"loadBalancerInboundNatPools,omitempty"` +} + +// VirtualMachineScaleSetUpdateNetworkConfiguration is describes a virtual machine scale set network profile's network +// configurations. +type VirtualMachineScaleSetUpdateNetworkConfiguration struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetUpdateNetworkConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetUpdateNetworkConfigurationProperties is describes a virtual machine scale set updatable +// network profile's IP configuration.Use this object for updating network profile's IP Configuration. +type VirtualMachineScaleSetUpdateNetworkConfigurationProperties struct { + Primary *bool `json:"primary,omitempty"` + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty"` + NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"` + DNSSettings *VirtualMachineScaleSetNetworkConfigurationDNSSettings `json:"dnsSettings,omitempty"` + IPConfigurations *[]VirtualMachineScaleSetUpdateIPConfiguration `json:"ipConfigurations,omitempty"` +} + +// VirtualMachineScaleSetUpdateNetworkProfile is describes a virtual machine scale set network profile. +type VirtualMachineScaleSetUpdateNetworkProfile struct { + NetworkInterfaceConfigurations *[]VirtualMachineScaleSetUpdateNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"` +} + +// VirtualMachineScaleSetUpdateOSDisk is describes virtual machine scale set operating system disk Update Object. This +// should be used for Updating VMSS OS Disk. +type VirtualMachineScaleSetUpdateOSDisk struct { + Caching CachingTypes `json:"caching,omitempty"` + Image *VirtualHardDisk `json:"image,omitempty"` + VhdContainers *[]string `json:"vhdContainers,omitempty"` + ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"` +} + +// VirtualMachineScaleSetUpdateOSProfile is describes a virtual machine scale set OS profile. +type VirtualMachineScaleSetUpdateOSProfile struct { + CustomData *string `json:"customData,omitempty"` + WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` + LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` + Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` +} + +// VirtualMachineScaleSetUpdateProperties is describes the properties of a Virtual Machine Scale Set. +type VirtualMachineScaleSetUpdateProperties struct { + UpgradePolicy *UpgradePolicy `json:"upgradePolicy,omitempty"` + VirtualMachineProfile *VirtualMachineScaleSetUpdateVMProfile `json:"virtualMachineProfile,omitempty"` + Overprovision *bool `json:"overprovision,omitempty"` + SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"` +} + +// VirtualMachineScaleSetUpdatePublicIPAddressConfiguration is describes a virtual machines scale set IP +// Configuration's PublicIPAddress configuration +type VirtualMachineScaleSetUpdatePublicIPAddressConfiguration struct { + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties is describes a virtual machines scale set IP +// Configuration's PublicIPAddress configuration +type VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties struct { + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` + DNSSettings *VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings `json:"dnsSettings,omitempty"` +} + +// VirtualMachineScaleSetUpdateStorageProfile is describes a virtual machine scale set storage profile. +type VirtualMachineScaleSetUpdateStorageProfile struct { + ImageReference *ImageReference `json:"imageReference,omitempty"` + OsDisk *VirtualMachineScaleSetUpdateOSDisk `json:"osDisk,omitempty"` + DataDisks *[]VirtualMachineScaleSetDataDisk `json:"dataDisks,omitempty"` +} + +// VirtualMachineScaleSetUpdateVMProfile is describes a virtual machine scale set virtual machine profile. +type VirtualMachineScaleSetUpdateVMProfile struct { + OsProfile *VirtualMachineScaleSetUpdateOSProfile `json:"osProfile,omitempty"` + StorageProfile *VirtualMachineScaleSetUpdateStorageProfile `json:"storageProfile,omitempty"` + NetworkProfile *VirtualMachineScaleSetUpdateNetworkProfile `json:"networkProfile,omitempty"` + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"` + LicenseType *string `json:"licenseType,omitempty"` +} + +// VirtualMachineScaleSetVM is describes a virtual machine scale set virtual machine. type VirtualMachineScaleSetVM struct { autorest.Response `json:"-"` ID *string `json:"id,omitempty"` @@ -1217,27 +2187,24 @@ type VirtualMachineScaleSetVM struct { Resources *[]VirtualMachineExtension `json:"resources,omitempty"` } -// VirtualMachineScaleSetVMExtensionsSummary is extensions summary for virtual -// machines of a virtual machine scale set. +// VirtualMachineScaleSetVMExtensionsSummary is extensions summary for virtual machines of a virtual machine scale set. type VirtualMachineScaleSetVMExtensionsSummary struct { Name *string `json:"name,omitempty"` StatusesSummary *[]VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty"` } -// VirtualMachineScaleSetVMInstanceIDs is specifies a list of virtual machine -// instance IDs from the VM scale set. +// VirtualMachineScaleSetVMInstanceIDs is specifies a list of virtual machine instance IDs from the VM scale set. type VirtualMachineScaleSetVMInstanceIDs struct { InstanceIds *[]string `json:"instanceIds,omitempty"` } -// VirtualMachineScaleSetVMInstanceRequiredIDs is specifies a list of virtual -// machine instance IDs from the VM scale set. +// VirtualMachineScaleSetVMInstanceRequiredIDs is specifies a list of virtual machine instance IDs from the VM scale +// set. type VirtualMachineScaleSetVMInstanceRequiredIDs struct { InstanceIds *[]string `json:"instanceIds,omitempty"` } -// VirtualMachineScaleSetVMInstanceView is the instance view of a virtual -// machine scale set VM. +// VirtualMachineScaleSetVMInstanceView is the instance view of a virtual machine scale set VM. type VirtualMachineScaleSetVMInstanceView struct { autorest.Response `json:"-"` PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty"` @@ -1246,13 +2213,13 @@ type VirtualMachineScaleSetVMInstanceView struct { VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"` Disks *[]DiskInstanceView `json:"disks,omitempty"` Extensions *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"` + VMHealth *VirtualMachineHealthStatus `json:"vmHealth,omitempty"` BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"` Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` PlacementGroupID *string `json:"placementGroupId,omitempty"` } -// VirtualMachineScaleSetVMListResult is the List Virtual Machine Scale Set VMs -// operation response. +// VirtualMachineScaleSetVMListResult is the List Virtual Machine Scale Set VMs operation response. type VirtualMachineScaleSetVMListResult struct { autorest.Response `json:"-"` Value *[]VirtualMachineScaleSetVM `json:"value,omitempty"` @@ -1271,17 +2238,17 @@ func (client VirtualMachineScaleSetVMListResult) VirtualMachineScaleSetVMListRes autorest.WithBaseURL(to.String(client.NextLink))) } -// VirtualMachineScaleSetVMProfile is describes a virtual machine scale set -// virtual machine profile. +// VirtualMachineScaleSetVMProfile is describes a virtual machine scale set virtual machine profile. type VirtualMachineScaleSetVMProfile struct { - OsProfile *VirtualMachineScaleSetOSProfile `json:"osProfile,omitempty"` - StorageProfile *VirtualMachineScaleSetStorageProfile `json:"storageProfile,omitempty"` - NetworkProfile *VirtualMachineScaleSetNetworkProfile `json:"networkProfile,omitempty"` - ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"` + OsProfile *VirtualMachineScaleSetOSProfile `json:"osProfile,omitempty"` + StorageProfile *VirtualMachineScaleSetStorageProfile `json:"storageProfile,omitempty"` + NetworkProfile *VirtualMachineScaleSetNetworkProfile `json:"networkProfile,omitempty"` + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"` + LicenseType *string `json:"licenseType,omitempty"` } -// VirtualMachineScaleSetVMProperties is describes the properties of a virtual -// machine scale set virtual machine. +// VirtualMachineScaleSetVMProperties is describes the properties of a virtual machine scale set virtual machine. type VirtualMachineScaleSetVMProperties struct { LatestModelApplied *bool `json:"latestModelApplied,omitempty"` VMID *string `json:"vmId,omitempty"` @@ -1312,14 +2279,14 @@ type VirtualMachineSizeListResult struct { Value *[]VirtualMachineSize `json:"value,omitempty"` } -// VirtualMachineStatusCodeCount is the status code and count of the virtual -// machine scale set instance view status summary. +// VirtualMachineStatusCodeCount is the status code and count of the virtual machine scale set instance view status +// summary. type VirtualMachineStatusCodeCount struct { Code *string `json:"code,omitempty"` Count *int32 `json:"count,omitempty"` } -// WindowsConfiguration is describes Windows Configuration of the OS Profile. +// WindowsConfiguration is specifies Windows operating system settings on the virtual machine. type WindowsConfiguration struct { ProvisionVMAgent *bool `json:"provisionVMAgent,omitempty"` EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty"` @@ -1328,14 +2295,12 @@ type WindowsConfiguration struct { WinRM *WinRMConfiguration `json:"winRM,omitempty"` } -// WinRMConfiguration is describes Windows Remote Management configuration of -// the VM +// WinRMConfiguration is describes Windows Remote Management configuration of the VM type WinRMConfiguration struct { Listeners *[]WinRMListener `json:"listeners,omitempty"` } -// WinRMListener is describes Protocol and thumbprint of Windows Remote -// Management listener +// WinRMListener is describes Protocol and thumbprint of Windows Remote Management listener type WinRMListener struct { Protocol ProtocolTypes `json:"protocol,omitempty"` CertificateURL *string `json:"certificateUrl,omitempty"` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/resourceskus.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/resourceskus.go new file mode 100644 index 000000000000..9dd7fffa8564 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/resourceskus.go @@ -0,0 +1,169 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ResourceSkusClient is the compute Client +type ResourceSkusClient struct { + ManagementClient +} + +// NewResourceSkusClient creates an instance of the ResourceSkusClient client. +func NewResourceSkusClient(subscriptionID string) ResourceSkusClient { + return NewResourceSkusClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewResourceSkusClientWithBaseURI creates an instance of the ResourceSkusClient client. +func NewResourceSkusClientWithBaseURI(baseURI string, subscriptionID string) ResourceSkusClient { + return ResourceSkusClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets the list of Microsoft.Compute SKUs available for your Subscription. +func (client ResourceSkusClient) List() (result ResourceSkusResult, err error) { + req, err := client.ListPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ResourceSkusClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/skus", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ResourceSkusClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ResourceSkusClient) ListResponder(resp *http.Response) (result ResourceSkusResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ResourceSkusClient) ListNextResults(lastResults ResourceSkusResult) (result ResourceSkusResult, err error) { + req, err := lastResults.ResourceSkusResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListComplete gets all elements from the list without paging. +func (client ResourceSkusClient) ListComplete(cancel <-chan struct{}) (<-chan ResourceSku, <-chan error) { + resultChan := make(chan ResourceSku) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/snapshots.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/snapshots.go new file mode 100644 index 000000000000..2c7135292156 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/snapshots.go @@ -0,0 +1,819 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// SnapshotsClient is the compute Client +type SnapshotsClient struct { + ManagementClient +} + +// NewSnapshotsClient creates an instance of the SnapshotsClient client. +func NewSnapshotsClient(subscriptionID string) SnapshotsClient { + return NewSnapshotsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSnapshotsClientWithBaseURI creates an instance of the SnapshotsClient client. +func NewSnapshotsClientWithBaseURI(baseURI string, subscriptionID string) SnapshotsClient { + return SnapshotsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a snapshot. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. snapshotName is the name of the snapshot within the given +// subscription and resource group. snapshot is snapshot object supplied in the body of the Put disk operation. +func (client SnapshotsClient) CreateOrUpdate(resourceGroupName string, snapshotName string, snapshot Snapshot, cancel <-chan struct{}) (<-chan Snapshot, <-chan error) { + resultChan := make(chan Snapshot, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: snapshot, + Constraints: []validation.Constraint{{Target: "snapshot.DiskProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.CreationData", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.CreationData.ImageReference", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "snapshot.DiskProperties.EncryptionSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "snapshot.DiskProperties.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "snapshot.DiskProperties.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "snapshot.DiskProperties.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "compute.SnapshotsClient", "CreateOrUpdate") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result Snapshot + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, snapshotName, snapshot, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SnapshotsClient) CreateOrUpdatePreparer(resourceGroupName string, snapshotName string, snapshot Snapshot, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithJSON(snapshot), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) CreateOrUpdateResponder(resp *http.Response) (result Snapshot, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a snapshot. This method may poll for completion. Polling can be canceled by passing the cancel +// channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. snapshotName is the name of the snapshot within the given +// subscription and resource group. +func (client SnapshotsClient) Delete(resourceGroupName string, snapshotName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, snapshotName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// DeletePreparer prepares the Delete request. +func (client SnapshotsClient) DeletePreparer(resourceGroupName string, snapshotName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets information about a snapshot. +// +// resourceGroupName is the name of the resource group. snapshotName is the name of the snapshot within the given +// subscription and resource group. +func (client SnapshotsClient) Get(resourceGroupName string, snapshotName string) (result Snapshot, err error) { + req, err := client.GetPreparer(resourceGroupName, snapshotName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SnapshotsClient) GetPreparer(resourceGroupName string, snapshotName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) GetResponder(resp *http.Response) (result Snapshot, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GrantAccess grants access to a snapshot. This method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. snapshotName is the name of the snapshot within the given +// subscription and resource group. grantAccessData is access data object supplied in the body of the get snapshot +// access operation. +func (client SnapshotsClient) GrantAccess(resourceGroupName string, snapshotName string, grantAccessData GrantAccessData, cancel <-chan struct{}) (<-chan AccessURI, <-chan error) { + resultChan := make(chan AccessURI, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: grantAccessData, + Constraints: []validation.Constraint{{Target: "grantAccessData.DurationInSeconds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "compute.SnapshotsClient", "GrantAccess") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result AccessURI + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.GrantAccessPreparer(resourceGroupName, snapshotName, grantAccessData, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "GrantAccess", nil, "Failure preparing request") + return + } + + resp, err := client.GrantAccessSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "GrantAccess", resp, "Failure sending request") + return + } + + result, err = client.GrantAccessResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "GrantAccess", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// GrantAccessPreparer prepares the GrantAccess request. +func (client SnapshotsClient) GrantAccessPreparer(resourceGroupName string, snapshotName string, grantAccessData GrantAccessData, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess", pathParameters), + autorest.WithJSON(grantAccessData), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GrantAccessSender sends the GrantAccess request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) GrantAccessSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GrantAccessResponder handles the response to the GrantAccess request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) GrantAccessResponder(resp *http.Response) (result AccessURI, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists snapshots under a subscription. +func (client SnapshotsClient) List() (result SnapshotList, err error) { + req, err := client.ListPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SnapshotsClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) ListResponder(resp *http.Response) (result SnapshotList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client SnapshotsClient) ListNextResults(lastResults SnapshotList) (result SnapshotList, err error) { + req, err := lastResults.SnapshotListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListComplete gets all elements from the list without paging. +func (client SnapshotsClient) ListComplete(cancel <-chan struct{}) (<-chan Snapshot, <-chan error) { + resultChan := make(chan Snapshot) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListByResourceGroup lists snapshots under a resource group. +// +// resourceGroupName is the name of the resource group. +func (client SnapshotsClient) ListByResourceGroup(resourceGroupName string) (result SnapshotList, err error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client SnapshotsClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) ListByResourceGroupResponder(resp *http.Response) (result SnapshotList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroupNextResults retrieves the next set of results, if any. +func (client SnapshotsClient) ListByResourceGroupNextResults(lastResults SnapshotList) (result SnapshotList, err error) { + req, err := lastResults.SnapshotListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "ListByResourceGroup", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "ListByResourceGroup", resp, "Failure sending next results request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "ListByResourceGroup", resp, "Failure responding to next results request") + } + + return +} + +// ListByResourceGroupComplete gets all elements from the list without paging. +func (client SnapshotsClient) ListByResourceGroupComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan Snapshot, <-chan error) { + resultChan := make(chan Snapshot) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListByResourceGroup(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListByResourceGroupNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// RevokeAccess revokes access to a snapshot. This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. snapshotName is the name of the snapshot within the given +// subscription and resource group. +func (client SnapshotsClient) RevokeAccess(resourceGroupName string, snapshotName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.RevokeAccessPreparer(resourceGroupName, snapshotName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "RevokeAccess", nil, "Failure preparing request") + return + } + + resp, err := client.RevokeAccessSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "RevokeAccess", resp, "Failure sending request") + return + } + + result, err = client.RevokeAccessResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "RevokeAccess", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// RevokeAccessPreparer prepares the RevokeAccess request. +func (client SnapshotsClient) RevokeAccessPreparer(resourceGroupName string, snapshotName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// RevokeAccessSender sends the RevokeAccess request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) RevokeAccessSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// RevokeAccessResponder handles the response to the RevokeAccess request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) RevokeAccessResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates (patches) a snapshot. This method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. snapshotName is the name of the snapshot within the given +// subscription and resource group. snapshot is snapshot object supplied in the body of the Patch snapshot operation. +func (client SnapshotsClient) Update(resourceGroupName string, snapshotName string, snapshot SnapshotUpdate, cancel <-chan struct{}) (<-chan Snapshot, <-chan error) { + resultChan := make(chan Snapshot, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result Snapshot + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.UpdatePreparer(resourceGroupName, snapshotName, snapshot, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Update", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// UpdatePreparer prepares the Update request. +func (client SnapshotsClient) UpdatePreparer(resourceGroupName string, snapshotName string, snapshot SnapshotUpdate, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithJSON(snapshot), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) UpdateResponder(resp *http.Response) (result Snapshot, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usage.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usage.go index 97c53e0efd7f..86e9ebabf3b7 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usage.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usage.go @@ -14,9 +14,8 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -25,7 +24,7 @@ import ( "net/http" ) -// UsageClient is the the Compute Management Client. +// UsageClient is the compute Client type UsageClient struct { ManagementClient } @@ -40,9 +39,8 @@ func NewUsageClientWithBaseURI(baseURI string, subscriptionID string) UsageClien return UsageClient{NewWithBaseURI(baseURI, subscriptionID)} } -// List gets, for the specified location, the current compute resource usage -// information as well as the limits for compute resources under the -// subscription. +// List gets, for the specified location, the current compute resource usage information as well as the limits for +// compute resources under the subscription. // // location is the location for which resource usage is queried. func (client UsageClient) List(location string) (result ListUsagesResult, err error) { @@ -80,7 +78,7 @@ func (client UsageClient) ListPreparer(location string) (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -135,3 +133,48 @@ func (client UsageClient) ListNextResults(lastResults ListUsagesResult) (result return } + +// ListComplete gets all elements from the list without paging. +func (client UsageClient) ListComplete(location string, cancel <-chan struct{}) (<-chan Usage, <-chan error) { + resultChan := make(chan Usage) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(location) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go index a5318ebf7983..2f9cac6f3bc7 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go @@ -14,16 +14,15 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/v10.0.2-beta arm-compute/2016-04-30-preview" + return "Azure-SDK-For-Go/v11.0.0-beta arm-compute/" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return "v10.0.2-beta" + return "v11.0.0-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go index fcd122704284..c486f63ced50 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go @@ -14,9 +14,8 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,25 +23,25 @@ import ( "net/http" ) -// VirtualMachineExtensionImagesClient is the the Compute Management Client. +// VirtualMachineExtensionImagesClient is the compute Client type VirtualMachineExtensionImagesClient struct { ManagementClient } -// NewVirtualMachineExtensionImagesClient creates an instance of the -// VirtualMachineExtensionImagesClient client. +// NewVirtualMachineExtensionImagesClient creates an instance of the VirtualMachineExtensionImagesClient client. func NewVirtualMachineExtensionImagesClient(subscriptionID string) VirtualMachineExtensionImagesClient { return NewVirtualMachineExtensionImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewVirtualMachineExtensionImagesClientWithBaseURI creates an instance of the -// VirtualMachineExtensionImagesClient client. +// NewVirtualMachineExtensionImagesClientWithBaseURI creates an instance of the VirtualMachineExtensionImagesClient +// client. func NewVirtualMachineExtensionImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionImagesClient { return VirtualMachineExtensionImagesClient{NewWithBaseURI(baseURI, subscriptionID)} } // Get gets a virtual machine extension image. // +// location is the name of a supported Azure region. func (client VirtualMachineExtensionImagesClient) Get(location string, publisherName string, typeParameter string, version string) (result VirtualMachineExtensionImage, err error) { req, err := client.GetPreparer(location, publisherName, typeParameter, version) if err != nil { @@ -75,7 +74,7 @@ func (client VirtualMachineExtensionImagesClient) GetPreparer(location string, p "version": autorest.Encode("path", version), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -109,6 +108,7 @@ func (client VirtualMachineExtensionImagesClient) GetResponder(resp *http.Respon // ListTypes gets a list of virtual machine extension image types. // +// location is the name of a supported Azure region. func (client VirtualMachineExtensionImagesClient) ListTypes(location string, publisherName string) (result ListVirtualMachineExtensionImage, err error) { req, err := client.ListTypesPreparer(location, publisherName) if err != nil { @@ -139,7 +139,7 @@ func (client VirtualMachineExtensionImagesClient) ListTypesPreparer(location str "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -173,7 +173,7 @@ func (client VirtualMachineExtensionImagesClient) ListTypesResponder(resp *http. // ListVersions gets a list of virtual machine extension image versions. // -// filter is the filter to apply on the operation. +// location is the name of a supported Azure region. filter is the filter to apply on the operation. func (client VirtualMachineExtensionImagesClient) ListVersions(location string, publisherName string, typeParameter string, filter string, top *int32, orderby string) (result ListVirtualMachineExtensionImage, err error) { req, err := client.ListVersionsPreparer(location, publisherName, typeParameter, filter, top, orderby) if err != nil { @@ -205,7 +205,7 @@ func (client VirtualMachineExtensionImagesClient) ListVersionsPreparer(location "type": autorest.Encode("path", typeParameter), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go index 7a876cfef1a2..e8f0b27f1b00 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go @@ -14,9 +14,8 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,33 +23,28 @@ import ( "net/http" ) -// VirtualMachineExtensionsClient is the the Compute Management Client. +// VirtualMachineExtensionsClient is the compute Client type VirtualMachineExtensionsClient struct { ManagementClient } -// NewVirtualMachineExtensionsClient creates an instance of the -// VirtualMachineExtensionsClient client. +// NewVirtualMachineExtensionsClient creates an instance of the VirtualMachineExtensionsClient client. func NewVirtualMachineExtensionsClient(subscriptionID string) VirtualMachineExtensionsClient { return NewVirtualMachineExtensionsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewVirtualMachineExtensionsClientWithBaseURI creates an instance of the -// VirtualMachineExtensionsClient client. +// NewVirtualMachineExtensionsClientWithBaseURI creates an instance of the VirtualMachineExtensionsClient client. func NewVirtualMachineExtensionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionsClient { return VirtualMachineExtensionsClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate the operation to create or update the extension. This method -// may poll for completion. Polling can be canceled by passing the cancel -// channel argument. The channel will be used to cancel polling and any -// outstanding HTTP requests. +// CreateOrUpdate the operation to create or update the extension. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. VMName is the name of -// the virtual machine where the extension should be create or updated. -// VMExtensionName is the name of the virtual machine extension. -// extensionParameters is parameters supplied to the Create Virtual Machine -// Extension operation. +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine where the extension +// should be create or updated. VMExtensionName is the name of the virtual machine extension. extensionParameters is +// parameters supplied to the Create Virtual Machine Extension operation. func (client VirtualMachineExtensionsClient) CreateOrUpdate(resourceGroupName string, VMName string, VMExtensionName string, extensionParameters VirtualMachineExtension, cancel <-chan struct{}) (<-chan VirtualMachineExtension, <-chan error) { resultChan := make(chan VirtualMachineExtension, 1) errChan := make(chan error, 1) @@ -58,8 +52,10 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdate(resourceGroupName st var err error var result VirtualMachineExtension defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -93,7 +89,7 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(resourceGrou "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -129,14 +125,11 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdateResponder(resp *http. return } -// Delete the operation to delete the extension. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// Delete the operation to delete the extension. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. VMName is the name of -// the virtual machine where the extension should be deleted. VMExtensionName -// is the name of the virtual machine extension. +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine where the extension +// should be deleted. VMExtensionName is the name of the virtual machine extension. func (client VirtualMachineExtensionsClient) Delete(resourceGroupName string, VMName string, VMExtensionName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -144,8 +137,10 @@ func (client VirtualMachineExtensionsClient) Delete(resourceGroupName string, VM var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -179,7 +174,7 @@ func (client VirtualMachineExtensionsClient) DeletePreparer(resourceGroupName st "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -215,9 +210,8 @@ func (client VirtualMachineExtensionsClient) DeleteResponder(resp *http.Response // Get the operation to get the extension. // -// resourceGroupName is the name of the resource group. VMName is the name of -// the virtual machine containing the extension. VMExtensionName is the name of -// the virtual machine extension. expand is the expand expression to apply on +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine containing the +// extension. VMExtensionName is the name of the virtual machine extension. expand is the expand expression to apply on // the operation. func (client VirtualMachineExtensionsClient) Get(resourceGroupName string, VMName string, VMExtensionName string, expand string) (result VirtualMachineExtension, err error) { req, err := client.GetPreparer(resourceGroupName, VMName, VMExtensionName, expand) @@ -250,7 +244,7 @@ func (client VirtualMachineExtensionsClient) GetPreparer(resourceGroupName strin "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go index 6c090568f520..9eda416e9584 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go @@ -14,9 +14,8 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,28 +23,25 @@ import ( "net/http" ) -// VirtualMachineImagesClient is the the Compute Management Client. +// VirtualMachineImagesClient is the compute Client type VirtualMachineImagesClient struct { ManagementClient } -// NewVirtualMachineImagesClient creates an instance of the -// VirtualMachineImagesClient client. +// NewVirtualMachineImagesClient creates an instance of the VirtualMachineImagesClient client. func NewVirtualMachineImagesClient(subscriptionID string) VirtualMachineImagesClient { return NewVirtualMachineImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewVirtualMachineImagesClientWithBaseURI creates an instance of the -// VirtualMachineImagesClient client. +// NewVirtualMachineImagesClientWithBaseURI creates an instance of the VirtualMachineImagesClient client. func NewVirtualMachineImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineImagesClient { return VirtualMachineImagesClient{NewWithBaseURI(baseURI, subscriptionID)} } // Get gets a virtual machine image. // -// location is the name of a supported Azure region. publisherName is a valid -// image publisher. offer is a valid image publisher offer. skus is a valid -// image SKU. version is a valid image SKU version. +// location is the name of a supported Azure region. publisherName is a valid image publisher. offer is a valid image +// publisher offer. skus is a valid image SKU. version is a valid image SKU version. func (client VirtualMachineImagesClient) Get(location string, publisherName string, offer string, skus string, version string) (result VirtualMachineImage, err error) { req, err := client.GetPreparer(location, publisherName, offer, skus, version) if err != nil { @@ -79,7 +75,7 @@ func (client VirtualMachineImagesClient) GetPreparer(location string, publisherN "version": autorest.Encode("path", version), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -111,12 +107,10 @@ func (client VirtualMachineImagesClient) GetResponder(resp *http.Response) (resu return } -// List gets a list of all virtual machine image versions for the specified -// location, publisher, offer, and SKU. +// List gets a list of all virtual machine image versions for the specified location, publisher, offer, and SKU. // -// location is the name of a supported Azure region. publisherName is a valid -// image publisher. offer is a valid image publisher offer. skus is a valid -// image SKU. filter is the filter to apply on the operation. +// location is the name of a supported Azure region. publisherName is a valid image publisher. offer is a valid image +// publisher offer. skus is a valid image SKU. filter is the filter to apply on the operation. func (client VirtualMachineImagesClient) List(location string, publisherName string, offer string, skus string, filter string, top *int32, orderby string) (result ListVirtualMachineImageResource, err error) { req, err := client.ListPreparer(location, publisherName, offer, skus, filter, top, orderby) if err != nil { @@ -149,7 +143,7 @@ func (client VirtualMachineImagesClient) ListPreparer(location string, publisher "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -190,11 +184,9 @@ func (client VirtualMachineImagesClient) ListResponder(resp *http.Response) (res return } -// ListOffers gets a list of virtual machine image offers for the specified -// location and publisher. +// ListOffers gets a list of virtual machine image offers for the specified location and publisher. // -// location is the name of a supported Azure region. publisherName is a valid -// image publisher. +// location is the name of a supported Azure region. publisherName is a valid image publisher. func (client VirtualMachineImagesClient) ListOffers(location string, publisherName string) (result ListVirtualMachineImageResource, err error) { req, err := client.ListOffersPreparer(location, publisherName) if err != nil { @@ -225,7 +217,7 @@ func (client VirtualMachineImagesClient) ListOffersPreparer(location string, pub "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -257,8 +249,7 @@ func (client VirtualMachineImagesClient) ListOffersResponder(resp *http.Response return } -// ListPublishers gets a list of virtual machine image publishers for the -// specified Azure location. +// ListPublishers gets a list of virtual machine image publishers for the specified Azure location. // // location is the name of a supported Azure region. func (client VirtualMachineImagesClient) ListPublishers(location string) (result ListVirtualMachineImageResource, err error) { @@ -290,7 +281,7 @@ func (client VirtualMachineImagesClient) ListPublishersPreparer(location string) "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -322,11 +313,10 @@ func (client VirtualMachineImagesClient) ListPublishersResponder(resp *http.Resp return } -// ListSkus gets a list of virtual machine image SKUs for the specified -// location, publisher, and offer. +// ListSkus gets a list of virtual machine image SKUs for the specified location, publisher, and offer. // -// location is the name of a supported Azure region. publisherName is a valid -// image publisher. offer is a valid image publisher offer. +// location is the name of a supported Azure region. publisherName is a valid image publisher. offer is a valid image +// publisher offer. func (client VirtualMachineImagesClient) ListSkus(location string, publisherName string, offer string) (result ListVirtualMachineImageResource, err error) { req, err := client.ListSkusPreparer(location, publisherName, offer) if err != nil { @@ -358,7 +348,7 @@ func (client VirtualMachineImagesClient) ListSkusPreparer(location string, publi "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineruncommands.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineruncommands.go new file mode 100644 index 000000000000..f422c92f58c0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineruncommands.go @@ -0,0 +1,250 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// VirtualMachineRunCommandsClient is the compute Client +type VirtualMachineRunCommandsClient struct { + ManagementClient +} + +// NewVirtualMachineRunCommandsClient creates an instance of the VirtualMachineRunCommandsClient client. +func NewVirtualMachineRunCommandsClient(subscriptionID string) VirtualMachineRunCommandsClient { + return NewVirtualMachineRunCommandsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineRunCommandsClientWithBaseURI creates an instance of the VirtualMachineRunCommandsClient client. +func NewVirtualMachineRunCommandsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineRunCommandsClient { + return VirtualMachineRunCommandsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets specific run command for a subscription in a location. +// +// location is the location upon which run commands is queried. commandID is the command id. +func (client VirtualMachineRunCommandsClient) Get(location string, commandID string) (result RunCommandDocument, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: location, + Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineRunCommandsClient", "Get") + } + + req, err := client.GetPreparer(location, commandID) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineRunCommandsClient) GetPreparer(location string, commandID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "commandId": autorest.Encode("path", commandID), + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/runCommands/{commandId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineRunCommandsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineRunCommandsClient) GetResponder(resp *http.Response) (result RunCommandDocument, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all available run commands for a subscription in a location. +// +// location is the location upon which run commands is queried. +func (client VirtualMachineRunCommandsClient) List(location string) (result RunCommandListResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: location, + Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineRunCommandsClient", "List") + } + + req, err := client.ListPreparer(location) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineRunCommandsClient) ListPreparer(location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/runCommands", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineRunCommandsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineRunCommandsClient) ListResponder(resp *http.Response) (result RunCommandListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualMachineRunCommandsClient) ListNextResults(lastResults RunCommandListResult) (result RunCommandListResult, err error) { + req, err := lastResults.RunCommandListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListComplete gets all elements from the list without paging. +func (client VirtualMachineRunCommandsClient) ListComplete(location string, cancel <-chan struct{}) (<-chan RunCommandDocumentBase, <-chan error) { + resultChan := make(chan RunCommandDocumentBase) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(location) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go index 686b7ace2327..ad1d829d14dc 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go @@ -14,9 +14,8 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -25,32 +24,27 @@ import ( "net/http" ) -// VirtualMachinesClient is the the Compute Management Client. +// VirtualMachinesClient is the compute Client type VirtualMachinesClient struct { ManagementClient } -// NewVirtualMachinesClient creates an instance of the VirtualMachinesClient -// client. +// NewVirtualMachinesClient creates an instance of the VirtualMachinesClient client. func NewVirtualMachinesClient(subscriptionID string) VirtualMachinesClient { return NewVirtualMachinesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewVirtualMachinesClientWithBaseURI creates an instance of the -// VirtualMachinesClient client. +// NewVirtualMachinesClientWithBaseURI creates an instance of the VirtualMachinesClient client. func NewVirtualMachinesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachinesClient { return VirtualMachinesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// Capture captures the VM by copying virtual hard disks of the VM and outputs -// a template that can be used to create similar VMs. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// Capture captures the VM by copying virtual hard disks of the VM and outputs a template that can be used to create +// similar VMs. This method may poll for completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. VMName is the name of -// the virtual machine. parameters is parameters supplied to the Capture -// Virtual Machine operation. +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. parameters is +// parameters supplied to the Capture Virtual Machine operation. func (client VirtualMachinesClient) Capture(resourceGroupName string, VMName string, parameters VirtualMachineCaptureParameters, cancel <-chan struct{}) (<-chan VirtualMachineCaptureResult, <-chan error) { resultChan := make(chan VirtualMachineCaptureResult, 1) errChan := make(chan error, 1) @@ -69,8 +63,10 @@ func (client VirtualMachinesClient) Capture(resourceGroupName string, VMName str var err error var result VirtualMachineCaptureResult defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -103,7 +99,7 @@ func (client VirtualMachinesClient) CapturePreparer(resourceGroupName string, VM "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -139,14 +135,11 @@ func (client VirtualMachinesClient) CaptureResponder(resp *http.Response) (resul return } -// ConvertToManagedDisks converts virtual machine disks from blob-based to -// managed disks. Virtual machine must be stop-deallocated before invoking this -// operation. This method may poll for completion. Polling can be canceled by -// passing the cancel channel argument. The channel will be used to cancel -// polling and any outstanding HTTP requests. +// ConvertToManagedDisks converts virtual machine disks from blob-based to managed disks. Virtual machine must be +// stop-deallocated before invoking this operation. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. VMName is the name of -// the virtual machine. +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. func (client VirtualMachinesClient) ConvertToManagedDisks(resourceGroupName string, VMName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -154,8 +147,10 @@ func (client VirtualMachinesClient) ConvertToManagedDisks(resourceGroupName stri var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -188,7 +183,7 @@ func (client VirtualMachinesClient) ConvertToManagedDisksPreparer(resourceGroupN "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -222,14 +217,12 @@ func (client VirtualMachinesClient) ConvertToManagedDisksResponder(resp *http.Re return } -// CreateOrUpdate the operation to create or update a virtual machine. This -// method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and any -// outstanding HTTP requests. +// CreateOrUpdate the operation to create or update a virtual machine. This method may poll for completion. Polling can +// be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // -// resourceGroupName is the name of the resource group. VMName is the name of -// the virtual machine. parameters is parameters supplied to the Create Virtual -// Machine operation. +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. parameters is +// parameters supplied to the Create Virtual Machine operation. func (client VirtualMachinesClient) CreateOrUpdate(resourceGroupName string, VMName string, parameters VirtualMachine, cancel <-chan struct{}) (<-chan VirtualMachine, <-chan error) { resultChan := make(chan VirtualMachine, 1) errChan := make(chan error, 1) @@ -261,8 +254,10 @@ func (client VirtualMachinesClient) CreateOrUpdate(resourceGroupName string, VMN var err error var result VirtualMachine defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -295,7 +290,7 @@ func (client VirtualMachinesClient) CreateOrUpdatePreparer(resourceGroupName str "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -331,14 +326,11 @@ func (client VirtualMachinesClient) CreateOrUpdateResponder(resp *http.Response) return } -// Deallocate shuts down the virtual machine and releases the compute -// resources. You are not billed for the compute resources that this virtual -// machine uses. This method may poll for completion. Polling can be canceled -// by passing the cancel channel argument. The channel will be used to cancel -// polling and any outstanding HTTP requests. +// Deallocate shuts down the virtual machine and releases the compute resources. You are not billed for the compute +// resources that this virtual machine uses. This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. VMName is the name of -// the virtual machine. +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. func (client VirtualMachinesClient) Deallocate(resourceGroupName string, VMName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -346,8 +338,10 @@ func (client VirtualMachinesClient) Deallocate(resourceGroupName string, VMName var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -380,7 +374,7 @@ func (client VirtualMachinesClient) DeallocatePreparer(resourceGroupName string, "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -414,13 +408,10 @@ func (client VirtualMachinesClient) DeallocateResponder(resp *http.Response) (re return } -// Delete the operation to delete a virtual machine. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// Delete the operation to delete a virtual machine. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. VMName is the name of -// the virtual machine. +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. func (client VirtualMachinesClient) Delete(resourceGroupName string, VMName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -428,8 +419,10 @@ func (client VirtualMachinesClient) Delete(resourceGroupName string, VMName stri var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -462,7 +455,7 @@ func (client VirtualMachinesClient) DeletePreparer(resourceGroupName string, VMN "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -498,8 +491,7 @@ func (client VirtualMachinesClient) DeleteResponder(resp *http.Response) (result // Generalize sets the state of the virtual machine to generalized. // -// resourceGroupName is the name of the resource group. VMName is the name of -// the virtual machine. +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. func (client VirtualMachinesClient) Generalize(resourceGroupName string, VMName string) (result OperationStatusResponse, err error) { req, err := client.GeneralizePreparer(resourceGroupName, VMName) if err != nil { @@ -530,7 +522,7 @@ func (client VirtualMachinesClient) GeneralizePreparer(resourceGroupName string, "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -562,12 +554,10 @@ func (client VirtualMachinesClient) GeneralizeResponder(resp *http.Response) (re return } -// Get retrieves information about the model view or the instance view of a -// virtual machine. +// Get retrieves information about the model view or the instance view of a virtual machine. // -// resourceGroupName is the name of the resource group. VMName is the name of -// the virtual machine. expand is the expand expression to apply on the -// operation. +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. expand is the expand +// expression to apply on the operation. func (client VirtualMachinesClient) Get(resourceGroupName string, VMName string, expand InstanceViewTypes) (result VirtualMachine, err error) { req, err := client.GetPreparer(resourceGroupName, VMName, expand) if err != nil { @@ -598,7 +588,7 @@ func (client VirtualMachinesClient) GetPreparer(resourceGroupName string, VMName "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -633,9 +623,73 @@ func (client VirtualMachinesClient) GetResponder(resp *http.Response) (result Vi return } -// List lists all of the virtual machines in the specified resource group. Use -// the nextLink property in the response to get the next page of virtual -// machines. +// InstanceView retrieves information about the run-time state of a virtual machine. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. +func (client VirtualMachinesClient) InstanceView(resourceGroupName string, VMName string) (result VirtualMachineInstanceView, err error) { + req, err := client.InstanceViewPreparer(resourceGroupName, VMName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "InstanceView", nil, "Failure preparing request") + return + } + + resp, err := client.InstanceViewSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "InstanceView", resp, "Failure sending request") + return + } + + result, err = client.InstanceViewResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "InstanceView", resp, "Failure responding to request") + } + + return +} + +// InstanceViewPreparer prepares the InstanceView request. +func (client VirtualMachinesClient) InstanceViewPreparer(resourceGroupName string, VMName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/instanceView", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// InstanceViewSender sends the InstanceView request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) InstanceViewSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// InstanceViewResponder handles the response to the InstanceView request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) InstanceViewResponder(resp *http.Response) (result VirtualMachineInstanceView, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all of the virtual machines in the specified resource group. Use the nextLink property in the response to +// get the next page of virtual machines. // // resourceGroupName is the name of the resource group. func (client VirtualMachinesClient) List(resourceGroupName string) (result VirtualMachineListResult, err error) { @@ -667,7 +721,7 @@ func (client VirtualMachinesClient) ListPreparer(resourceGroupName string) (*htt "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -723,9 +777,53 @@ func (client VirtualMachinesClient) ListNextResults(lastResults VirtualMachineLi return } -// ListAll lists all of the virtual machines in the specified subscription. Use -// the nextLink property in the response to get the next page of virtual -// machines. +// ListComplete gets all elements from the list without paging. +func (client VirtualMachinesClient) ListComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan VirtualMachine, <-chan error) { + resultChan := make(chan VirtualMachine) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListAll lists all of the virtual machines in the specified subscription. Use the nextLink property in the response +// to get the next page of virtual machines. func (client VirtualMachinesClient) ListAll() (result VirtualMachineListResult, err error) { req, err := client.ListAllPreparer() if err != nil { @@ -754,7 +852,7 @@ func (client VirtualMachinesClient) ListAllPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -810,11 +908,54 @@ func (client VirtualMachinesClient) ListAllNextResults(lastResults VirtualMachin return } -// ListAvailableSizes lists all available virtual machine sizes to which the -// specified virtual machine can be resized. +// ListAllComplete gets all elements from the list without paging. +func (client VirtualMachinesClient) ListAllComplete(cancel <-chan struct{}) (<-chan VirtualMachine, <-chan error) { + resultChan := make(chan VirtualMachine) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListAll() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListAllNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListAvailableSizes lists all available virtual machine sizes to which the specified virtual machine can be resized. // -// resourceGroupName is the name of the resource group. VMName is the name of -// the virtual machine. +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. func (client VirtualMachinesClient) ListAvailableSizes(resourceGroupName string, VMName string) (result VirtualMachineSizeListResult, err error) { req, err := client.ListAvailableSizesPreparer(resourceGroupName, VMName) if err != nil { @@ -845,7 +986,7 @@ func (client VirtualMachinesClient) ListAvailableSizesPreparer(resourceGroupName "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -877,14 +1018,94 @@ func (client VirtualMachinesClient) ListAvailableSizesResponder(resp *http.Respo return } -// PowerOff the operation to power off (stop) a virtual machine. The virtual -// machine can be restarted with the same provisioned resources. You are still -// charged for this virtual machine. This method may poll for completion. -// Polling can be canceled by passing the cancel channel argument. The channel -// will be used to cancel polling and any outstanding HTTP requests. +// PerformMaintenance the operation to perform maintenance on a virtual machine. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. +func (client VirtualMachinesClient) PerformMaintenance(resourceGroupName string, VMName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.PerformMaintenancePreparer(resourceGroupName, VMName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PerformMaintenance", nil, "Failure preparing request") + return + } + + resp, err := client.PerformMaintenanceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PerformMaintenance", resp, "Failure sending request") + return + } + + result, err = client.PerformMaintenanceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PerformMaintenance", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// PerformMaintenancePreparer prepares the PerformMaintenance request. +func (client VirtualMachinesClient) PerformMaintenancePreparer(resourceGroupName string, VMName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/performMaintenance", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// PerformMaintenanceSender sends the PerformMaintenance request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) PerformMaintenanceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// PerformMaintenanceResponder handles the response to the PerformMaintenance request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) PerformMaintenanceResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// PowerOff the operation to power off (stop) a virtual machine. The virtual machine can be restarted with the same +// provisioned resources. You are still charged for this virtual machine. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. VMName is the name of -// the virtual machine. +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. func (client VirtualMachinesClient) PowerOff(resourceGroupName string, VMName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -892,8 +1113,10 @@ func (client VirtualMachinesClient) PowerOff(resourceGroupName string, VMName st var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -926,7 +1149,7 @@ func (client VirtualMachinesClient) PowerOffPreparer(resourceGroupName string, V "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -960,13 +1183,11 @@ func (client VirtualMachinesClient) PowerOffResponder(resp *http.Response) (resu return } -// Redeploy the operation to redeploy a virtual machine. This method may poll -// for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// Redeploy the operation to redeploy a virtual machine. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. VMName is the name of -// the virtual machine. +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. func (client VirtualMachinesClient) Redeploy(resourceGroupName string, VMName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -974,8 +1195,10 @@ func (client VirtualMachinesClient) Redeploy(resourceGroupName string, VMName st var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -1008,7 +1231,7 @@ func (client VirtualMachinesClient) RedeployPreparer(resourceGroupName string, V "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1042,13 +1265,10 @@ func (client VirtualMachinesClient) RedeployResponder(resp *http.Response) (resu return } -// Restart the operation to restart a virtual machine. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// Restart the operation to restart a virtual machine. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. VMName is the name of -// the virtual machine. +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. func (client VirtualMachinesClient) Restart(resourceGroupName string, VMName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -1056,8 +1276,10 @@ func (client VirtualMachinesClient) Restart(resourceGroupName string, VMName str var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -1090,7 +1312,7 @@ func (client VirtualMachinesClient) RestartPreparer(resourceGroupName string, VM "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1124,13 +1346,103 @@ func (client VirtualMachinesClient) RestartResponder(resp *http.Response) (resul return } -// Start the operation to start a virtual machine. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// RunCommand run command on the VM. This method may poll for completion. Polling can be canceled by passing the cancel +// channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. VMName is the name of -// the virtual machine. +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. parameters is +// parameters supplied to the Run command operation. +func (client VirtualMachinesClient) RunCommand(resourceGroupName string, VMName string, parameters RunCommandInput, cancel <-chan struct{}) (<-chan RunCommandResult, <-chan error) { + resultChan := make(chan RunCommandResult, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.CommandID", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "compute.VirtualMachinesClient", "RunCommand") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result RunCommandResult + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.RunCommandPreparer(resourceGroupName, VMName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "RunCommand", nil, "Failure preparing request") + return + } + + resp, err := client.RunCommandSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "RunCommand", resp, "Failure sending request") + return + } + + result, err = client.RunCommandResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "RunCommand", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// RunCommandPreparer prepares the RunCommand request. +func (client VirtualMachinesClient) RunCommandPreparer(resourceGroupName string, VMName string, parameters RunCommandInput, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommand", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// RunCommandSender sends the RunCommand request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) RunCommandSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// RunCommandResponder handles the response to the RunCommand request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) RunCommandResponder(resp *http.Response) (result RunCommandResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Start the operation to start a virtual machine. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. func (client VirtualMachinesClient) Start(resourceGroupName string, VMName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -1138,8 +1450,10 @@ func (client VirtualMachinesClient) Start(resourceGroupName string, VMName strin var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -1172,7 +1486,7 @@ func (client VirtualMachinesClient) StartPreparer(resourceGroupName string, VMNa "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetextensions.go new file mode 100644 index 000000000000..9a198f148d20 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetextensions.go @@ -0,0 +1,416 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// VirtualMachineScaleSetExtensionsClient is the compute Client +type VirtualMachineScaleSetExtensionsClient struct { + ManagementClient +} + +// NewVirtualMachineScaleSetExtensionsClient creates an instance of the VirtualMachineScaleSetExtensionsClient client. +func NewVirtualMachineScaleSetExtensionsClient(subscriptionID string) VirtualMachineScaleSetExtensionsClient { + return NewVirtualMachineScaleSetExtensionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineScaleSetExtensionsClientWithBaseURI creates an instance of the +// VirtualMachineScaleSetExtensionsClient client. +func NewVirtualMachineScaleSetExtensionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetExtensionsClient { + return VirtualMachineScaleSetExtensionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the operation to create or update an extension. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set where the +// extension should be create or updated. vmssExtensionName is the name of the VM scale set extension. +// extensionParameters is parameters supplied to the Create VM scale set Extension operation. +func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdate(resourceGroupName string, VMScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtension, cancel <-chan struct{}) (<-chan VirtualMachineScaleSetExtension, <-chan error) { + resultChan := make(chan VirtualMachineScaleSetExtension, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result VirtualMachineScaleSetExtension + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, VMScaleSetName, vmssExtensionName, extensionParameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdatePreparer(resourceGroupName string, VMScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtension, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + "vmssExtensionName": autorest.Encode("path", vmssExtensionName), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}", pathParameters), + autorest.WithJSON(extensionParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineScaleSetExtension, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the operation to delete the extension. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set where the +// extension should be deleted. vmssExtensionName is the name of the VM scale set extension. +func (client VirtualMachineScaleSetExtensionsClient) Delete(resourceGroupName string, VMScaleSetName string, vmssExtensionName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, VMScaleSetName, vmssExtensionName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// DeletePreparer prepares the Delete request. +func (client VirtualMachineScaleSetExtensionsClient) DeletePreparer(resourceGroupName string, VMScaleSetName string, vmssExtensionName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + "vmssExtensionName": autorest.Encode("path", vmssExtensionName), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetExtensionsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetExtensionsClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get the operation to get the extension. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set containing the +// extension. vmssExtensionName is the name of the VM scale set extension. expand is the expand expression to apply on +// the operation. +func (client VirtualMachineScaleSetExtensionsClient) Get(resourceGroupName string, VMScaleSetName string, vmssExtensionName string, expand string) (result VirtualMachineScaleSetExtension, err error) { + req, err := client.GetPreparer(resourceGroupName, VMScaleSetName, vmssExtensionName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineScaleSetExtensionsClient) GetPreparer(resourceGroupName string, VMScaleSetName string, vmssExtensionName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + "vmssExtensionName": autorest.Encode("path", vmssExtensionName), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetExtensionsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetExtensionsClient) GetResponder(resp *http.Response) (result VirtualMachineScaleSetExtension, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of all extensions in a VM scale set. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set containing the +// extension. +func (client VirtualMachineScaleSetExtensionsClient) List(resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetExtensionListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, VMScaleSetName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineScaleSetExtensionsClient) ListPreparer(resourceGroupName string, VMScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetExtensionsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetExtensionsClient) ListResponder(resp *http.Response) (result VirtualMachineScaleSetExtensionListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualMachineScaleSetExtensionsClient) ListNextResults(lastResults VirtualMachineScaleSetExtensionListResult) (result VirtualMachineScaleSetExtensionListResult, err error) { + req, err := lastResults.VirtualMachineScaleSetExtensionListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListComplete gets all elements from the list without paging. +func (client VirtualMachineScaleSetExtensionsClient) ListComplete(resourceGroupName string, VMScaleSetName string, cancel <-chan struct{}) (<-chan VirtualMachineScaleSetExtension, <-chan error) { + resultChan := make(chan VirtualMachineScaleSetExtension) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName, VMScaleSetName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetrollingupgrades.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetrollingupgrades.go new file mode 100644 index 000000000000..45cc6e5c51cb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetrollingupgrades.go @@ -0,0 +1,271 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// VirtualMachineScaleSetRollingUpgradesClient is the compute Client +type VirtualMachineScaleSetRollingUpgradesClient struct { + ManagementClient +} + +// NewVirtualMachineScaleSetRollingUpgradesClient creates an instance of the +// VirtualMachineScaleSetRollingUpgradesClient client. +func NewVirtualMachineScaleSetRollingUpgradesClient(subscriptionID string) VirtualMachineScaleSetRollingUpgradesClient { + return NewVirtualMachineScaleSetRollingUpgradesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineScaleSetRollingUpgradesClientWithBaseURI creates an instance of the +// VirtualMachineScaleSetRollingUpgradesClient client. +func NewVirtualMachineScaleSetRollingUpgradesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetRollingUpgradesClient { + return VirtualMachineScaleSetRollingUpgradesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Cancel cancels the current virtual machine scale set rolling upgrade. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. +func (client VirtualMachineScaleSetRollingUpgradesClient) Cancel(resourceGroupName string, VMScaleSetName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.CancelPreparer(resourceGroupName, VMScaleSetName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "Cancel", nil, "Failure preparing request") + return + } + + resp, err := client.CancelSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "Cancel", resp, "Failure sending request") + return + } + + result, err = client.CancelResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "Cancel", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CancelPreparer prepares the Cancel request. +func (client VirtualMachineScaleSetRollingUpgradesClient) CancelPreparer(resourceGroupName string, VMScaleSetName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CancelSender sends the Cancel request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetRollingUpgradesClient) CancelSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CancelResponder handles the response to the Cancel request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetRollingUpgradesClient) CancelResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetLatest gets the status of the latest virtual machine scale set rolling upgrade. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. +func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatest(resourceGroupName string, VMScaleSetName string) (result RollingUpgradeStatusInfo, err error) { + req, err := client.GetLatestPreparer(resourceGroupName, VMScaleSetName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "GetLatest", nil, "Failure preparing request") + return + } + + resp, err := client.GetLatestSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "GetLatest", resp, "Failure sending request") + return + } + + result, err = client.GetLatestResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "GetLatest", resp, "Failure responding to request") + } + + return +} + +// GetLatestPreparer prepares the GetLatest request. +func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestPreparer(resourceGroupName string, VMScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetLatestSender sends the GetLatest request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetLatestResponder handles the response to the GetLatest request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestResponder(resp *http.Response) (result RollingUpgradeStatusInfo, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// StartOSUpgrade starts a rolling upgrade to move all virtual machine scale set instances to the latest available +// Platform Image OS version. Instances which are already running the latest available OS version are not affected. +// This method may poll for completion. Polling can be canceled by passing the cancel channel argument. The channel +// will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. +func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgrade(resourceGroupName string, VMScaleSetName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.StartOSUpgradePreparer(resourceGroupName, VMScaleSetName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "StartOSUpgrade", nil, "Failure preparing request") + return + } + + resp, err := client.StartOSUpgradeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "StartOSUpgrade", resp, "Failure sending request") + return + } + + result, err = client.StartOSUpgradeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "StartOSUpgrade", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// StartOSUpgradePreparer prepares the StartOSUpgrade request. +func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradePreparer(resourceGroupName string, VMScaleSetName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// StartOSUpgradeSender sends the StartOSUpgrade request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradeSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// StartOSUpgradeResponder handles the response to the StartOSUpgrade request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradeResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go index 53700c8dd9e7..72a868579acb 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go @@ -14,9 +14,8 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -25,43 +24,67 @@ import ( "net/http" ) -// VirtualMachineScaleSetsClient is the the Compute Management Client. +// VirtualMachineScaleSetsClient is the compute Client type VirtualMachineScaleSetsClient struct { ManagementClient } -// NewVirtualMachineScaleSetsClient creates an instance of the -// VirtualMachineScaleSetsClient client. +// NewVirtualMachineScaleSetsClient creates an instance of the VirtualMachineScaleSetsClient client. func NewVirtualMachineScaleSetsClient(subscriptionID string) VirtualMachineScaleSetsClient { return NewVirtualMachineScaleSetsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewVirtualMachineScaleSetsClientWithBaseURI creates an instance of the -// VirtualMachineScaleSetsClient client. +// NewVirtualMachineScaleSetsClientWithBaseURI creates an instance of the VirtualMachineScaleSetsClient client. func NewVirtualMachineScaleSetsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetsClient { return VirtualMachineScaleSetsClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate create or update a VM scale set. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// CreateOrUpdate create or update a VM scale set. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. name is the name of the -// VM scale set to create or update. parameters is the scale set object. -func (client VirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName string, name string, parameters VirtualMachineScaleSet, cancel <-chan struct{}) (<-chan VirtualMachineScaleSet, <-chan error) { +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set to create or +// update. parameters is the scale set object. +func (client VirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName string, VMScaleSetName string, parameters VirtualMachineScaleSet, cancel <-chan struct{}) (<-chan VirtualMachineScaleSet, <-chan error) { resultChan := make(chan VirtualMachineScaleSet, 1) errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxBatchInstancePercent", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxBatchInstancePercent", Name: validation.InclusiveMaximum, Rule: 100, Chain: nil}, + {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxBatchInstancePercent", Name: validation.InclusiveMinimum, Rule: 5, Chain: nil}, + }}, + {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyInstancePercent", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyInstancePercent", Name: validation.InclusiveMaximum, Rule: 100, Chain: nil}, + {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyInstancePercent", Name: validation.InclusiveMinimum, Rule: 5, Chain: nil}, + }}, + {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyUpgradedInstancePercent", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyUpgradedInstancePercent", Name: validation.InclusiveMaximum, Rule: 100, Chain: nil}, + {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyUpgradedInstancePercent", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil}, + }}, + }}, + }}, + }}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate") + close(errChan) + close(resultChan) + return resultChan, errChan + } + go func() { var err error var result VirtualMachineScaleSet defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() - req, err := client.CreateOrUpdatePreparer(resourceGroupName, name, parameters, cancel) + req, err := client.CreateOrUpdatePreparer(resourceGroupName, VMScaleSetName, parameters, cancel) if err != nil { err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", nil, "Failure preparing request") return @@ -83,14 +106,14 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName str } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(resourceGroupName string, name string, parameters VirtualMachineScaleSet, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(resourceGroupName string, VMScaleSetName string, parameters VirtualMachineScaleSet, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -99,7 +122,7 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(resourceGroup autorest.AsJSON(), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{name}", pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}", pathParameters), autorest.WithJSON(parameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare(&http.Request{Cancel: cancel}) @@ -126,16 +149,13 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdateResponder(resp *http.R return } -// Deallocate deallocates specific virtual machines in a VM scale set. Shuts -// down the virtual machines and releases the compute resources. You are not -// billed for the compute resources that this virtual machine scale set -// deallocates. This method may poll for completion. Polling can be canceled by -// passing the cancel channel argument. The channel will be used to cancel -// polling and any outstanding HTTP requests. +// Deallocate deallocates specific virtual machines in a VM scale set. Shuts down the virtual machines and releases the +// compute resources. You are not billed for the compute resources that this virtual machine scale set deallocates. +// This method may poll for completion. Polling can be canceled by passing the cancel channel argument. The channel +// will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. VMInstanceIDs is a list of virtual machine -// instance IDs from the VM scale set. +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. VMInstanceIDs +// is a list of virtual machine instance IDs from the VM scale set. func (client VirtualMachineScaleSetsClient) Deallocate(resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -143,8 +163,10 @@ func (client VirtualMachineScaleSetsClient) Deallocate(resourceGroupName string, var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -177,7 +199,7 @@ func (client VirtualMachineScaleSetsClient) DeallocatePreparer(resourceGroupName "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -216,12 +238,10 @@ func (client VirtualMachineScaleSetsClient) DeallocateResponder(resp *http.Respo return } -// Delete deletes a VM scale set. This method may poll for completion. Polling -// can be canceled by passing the cancel channel argument. The channel will be -// used to cancel polling and any outstanding HTTP requests. +// Delete deletes a VM scale set. This method may poll for completion. Polling can be canceled by passing the cancel +// channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. func (client VirtualMachineScaleSetsClient) Delete(resourceGroupName string, VMScaleSetName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -229,8 +249,10 @@ func (client VirtualMachineScaleSetsClient) Delete(resourceGroupName string, VMS var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -263,7 +285,7 @@ func (client VirtualMachineScaleSetsClient) DeletePreparer(resourceGroupName str "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -297,14 +319,12 @@ func (client VirtualMachineScaleSetsClient) DeleteResponder(resp *http.Response) return } -// DeleteInstances deletes virtual machines in a VM scale set. This method may -// poll for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// DeleteInstances deletes virtual machines in a VM scale set. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. VMInstanceIDs is a list of virtual machine -// instance IDs from the VM scale set. +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. VMInstanceIDs +// is a list of virtual machine instance IDs from the VM scale set. func (client VirtualMachineScaleSetsClient) DeleteInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -321,8 +341,10 @@ func (client VirtualMachineScaleSetsClient) DeleteInstances(resourceGroupName st var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -355,7 +377,7 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(resourceGrou "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -393,8 +415,7 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesResponder(resp *http. // Get display information about a virtual machine scale set. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. func (client VirtualMachineScaleSetsClient) Get(resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSet, err error) { req, err := client.GetPreparer(resourceGroupName, VMScaleSetName) if err != nil { @@ -425,7 +446,7 @@ func (client VirtualMachineScaleSetsClient) GetPreparer(resourceGroupName string "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -459,8 +480,7 @@ func (client VirtualMachineScaleSetsClient) GetResponder(resp *http.Response) (r // GetInstanceView gets the status of a VM scale set instance. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. func (client VirtualMachineScaleSetsClient) GetInstanceView(resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetInstanceView, err error) { req, err := client.GetInstanceViewPreparer(resourceGroupName, VMScaleSetName) if err != nil { @@ -491,7 +511,7 @@ func (client VirtualMachineScaleSetsClient) GetInstanceViewPreparer(resourceGrou "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -555,7 +575,7 @@ func (client VirtualMachineScaleSetsClient) ListPreparer(resourceGroupName strin "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -611,9 +631,53 @@ func (client VirtualMachineScaleSetsClient) ListNextResults(lastResults VirtualM return } -// ListAll gets a list of all VM Scale Sets in the subscription, regardless of -// the associated resource group. Use nextLink property in the response to get -// the next page of VM Scale Sets. Do this till nextLink is not null to fetch +// ListComplete gets all elements from the list without paging. +func (client VirtualMachineScaleSetsClient) ListComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan VirtualMachineScaleSet, <-chan error) { + resultChan := make(chan VirtualMachineScaleSet) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListAll gets a list of all VM Scale Sets in the subscription, regardless of the associated resource group. Use +// nextLink property in the response to get the next page of VM Scale Sets. Do this till nextLink is not null to fetch // all the VM Scale Sets. func (client VirtualMachineScaleSetsClient) ListAll() (result VirtualMachineScaleSetListWithLinkResult, err error) { req, err := client.ListAllPreparer() @@ -643,7 +707,7 @@ func (client VirtualMachineScaleSetsClient) ListAllPreparer() (*http.Request, er "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -699,11 +763,55 @@ func (client VirtualMachineScaleSetsClient) ListAllNextResults(lastResults Virtu return } -// ListSkus gets a list of SKUs available for your VM scale set, including the -// minimum and maximum VM instances allowed for each SKU. +// ListAllComplete gets all elements from the list without paging. +func (client VirtualMachineScaleSetsClient) ListAllComplete(cancel <-chan struct{}) (<-chan VirtualMachineScaleSet, <-chan error) { + resultChan := make(chan VirtualMachineScaleSet) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListAll() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListAllNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListSkus gets a list of SKUs available for your VM scale set, including the minimum and maximum VM instances allowed +// for each SKU. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. func (client VirtualMachineScaleSetsClient) ListSkus(resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetListSkusResult, err error) { req, err := client.ListSkusPreparer(resourceGroupName, VMScaleSetName) if err != nil { @@ -734,7 +842,7 @@ func (client VirtualMachineScaleSetsClient) ListSkusPreparer(resourceGroupName s "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -790,16 +898,58 @@ func (client VirtualMachineScaleSetsClient) ListSkusNextResults(lastResults Virt return } -// PowerOff power off (stop) one or more virtual machines in a VM scale set. -// Note that resources are still attached and you are getting charged for the -// resources. Instead, use deallocate to release resources and avoid charges. -// This method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and any -// outstanding HTTP requests. +// ListSkusComplete gets all elements from the list without paging. +func (client VirtualMachineScaleSetsClient) ListSkusComplete(resourceGroupName string, VMScaleSetName string, cancel <-chan struct{}) (<-chan VirtualMachineScaleSetSku, <-chan error) { + resultChan := make(chan VirtualMachineScaleSetSku) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListSkus(resourceGroupName, VMScaleSetName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListSkusNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// PowerOff power off (stop) one or more virtual machines in a VM scale set. Note that resources are still attached and +// you are getting charged for the resources. Instead, use deallocate to release resources and avoid charges. This +// method may poll for completion. Polling can be canceled by passing the cancel channel argument. The channel will be +// used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. VMInstanceIDs is a list of virtual machine -// instance IDs from the VM scale set. +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. VMInstanceIDs +// is a list of virtual machine instance IDs from the VM scale set. func (client VirtualMachineScaleSetsClient) PowerOff(resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -807,8 +957,10 @@ func (client VirtualMachineScaleSetsClient) PowerOff(resourceGroupName string, V var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -841,7 +993,7 @@ func (client VirtualMachineScaleSetsClient) PowerOffPreparer(resourceGroupName s "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -880,26 +1032,27 @@ func (client VirtualMachineScaleSetsClient) PowerOffResponder(resp *http.Respons return } -// Reimage reimages (upgrade the operating system) one or more virtual machines -// in a VM scale set. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used to -// cancel polling and any outstanding HTTP requests. +// Reimage reimages (upgrade the operating system) one or more virtual machines in a VM scale set. This method may poll +// for completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. -func (client VirtualMachineScaleSetsClient) Reimage(resourceGroupName string, VMScaleSetName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. VMInstanceIDs +// is a list of virtual machine instance IDs from the VM scale set. +func (client VirtualMachineScaleSetsClient) Reimage(resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) go func() { var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() - req, err := client.ReimagePreparer(resourceGroupName, VMScaleSetName, cancel) + req, err := client.ReimagePreparer(resourceGroupName, VMScaleSetName, VMInstanceIDs, cancel) if err != nil { err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", nil, "Failure preparing request") return @@ -921,23 +1074,28 @@ func (client VirtualMachineScaleSetsClient) Reimage(resourceGroupName string, VM } // ReimagePreparer prepares the Reimage request. -func (client VirtualMachineScaleSetsClient) ReimagePreparer(resourceGroupName string, VMScaleSetName string, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) ReimagePreparer(resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( + autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/reimage", pathParameters), autorest.WithQueryParameters(queryParameters)) + if VMInstanceIDs != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(VMInstanceIDs)) + } return preparer.Prepare(&http.Request{Cancel: cancel}) } @@ -962,27 +1120,27 @@ func (client VirtualMachineScaleSetsClient) ReimageResponder(resp *http.Response return } -// ReimageAll reimages all the disks ( including data disks ) in the virtual -// machines in a virtual machine scale set. This operation is only supported -// for managed disks. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used to -// cancel polling and any outstanding HTTP requests. +// ReimageAll reimages all the disks ( including data disks ) in the virtual machines in a VM scale set. This operation +// is only supported for managed disks. This method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. -func (client VirtualMachineScaleSetsClient) ReimageAll(resourceGroupName string, VMScaleSetName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. VMInstanceIDs +// is a list of virtual machine instance IDs from the VM scale set. +func (client VirtualMachineScaleSetsClient) ReimageAll(resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) go func() { var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() - req, err := client.ReimageAllPreparer(resourceGroupName, VMScaleSetName, cancel) + req, err := client.ReimageAllPreparer(resourceGroupName, VMScaleSetName, VMInstanceIDs, cancel) if err != nil { err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ReimageAll", nil, "Failure preparing request") return @@ -1004,23 +1162,28 @@ func (client VirtualMachineScaleSetsClient) ReimageAll(resourceGroupName string, } // ReimageAllPreparer prepares the ReimageAll request. -func (client VirtualMachineScaleSetsClient) ReimageAllPreparer(resourceGroupName string, VMScaleSetName string, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) ReimageAllPreparer(resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( + autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/reimageall", pathParameters), autorest.WithQueryParameters(queryParameters)) + if VMInstanceIDs != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(VMInstanceIDs)) + } return preparer.Prepare(&http.Request{Cancel: cancel}) } @@ -1045,14 +1208,12 @@ func (client VirtualMachineScaleSetsClient) ReimageAllResponder(resp *http.Respo return } -// Restart restarts one or more virtual machines in a VM scale set. This method -// may poll for completion. Polling can be canceled by passing the cancel -// channel argument. The channel will be used to cancel polling and any -// outstanding HTTP requests. +// Restart restarts one or more virtual machines in a VM scale set. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. VMInstanceIDs is a list of virtual machine -// instance IDs from the VM scale set. +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. VMInstanceIDs +// is a list of virtual machine instance IDs from the VM scale set. func (client VirtualMachineScaleSetsClient) Restart(resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -1060,8 +1221,10 @@ func (client VirtualMachineScaleSetsClient) Restart(resourceGroupName string, VM var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -1094,7 +1257,7 @@ func (client VirtualMachineScaleSetsClient) RestartPreparer(resourceGroupName st "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1133,14 +1296,12 @@ func (client VirtualMachineScaleSetsClient) RestartResponder(resp *http.Response return } -// Start starts one or more virtual machines in a VM scale set. This method may -// poll for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// Start starts one or more virtual machines in a VM scale set. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. VMInstanceIDs is a list of virtual machine -// instance IDs from the VM scale set. +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. VMInstanceIDs +// is a list of virtual machine instance IDs from the VM scale set. func (client VirtualMachineScaleSetsClient) Start(resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -1148,8 +1309,10 @@ func (client VirtualMachineScaleSetsClient) Start(resourceGroupName string, VMSc var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -1182,7 +1345,7 @@ func (client VirtualMachineScaleSetsClient) StartPreparer(resourceGroupName stri "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1221,14 +1384,96 @@ func (client VirtualMachineScaleSetsClient) StartResponder(resp *http.Response) return } -// UpdateInstances upgrades one or more virtual machines to the latest SKU set -// in the VM scale set model. This method may poll for completion. Polling can -// be canceled by passing the cancel channel argument. The channel will be used -// to cancel polling and any outstanding HTTP requests. +// Update update a VM scale set. This method may poll for completion. Polling can be canceled by passing the cancel +// channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set to create or +// update. parameters is the scale set object. +func (client VirtualMachineScaleSetsClient) Update(resourceGroupName string, VMScaleSetName string, parameters VirtualMachineScaleSetUpdate, cancel <-chan struct{}) (<-chan VirtualMachineScaleSet, <-chan error) { + resultChan := make(chan VirtualMachineScaleSet, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result VirtualMachineScaleSet + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.UpdatePreparer(resourceGroupName, VMScaleSetName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Update", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// UpdatePreparer prepares the Update request. +func (client VirtualMachineScaleSetsClient) UpdatePreparer(resourceGroupName string, VMScaleSetName string, parameters VirtualMachineScaleSetUpdate, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) UpdateResponder(resp *http.Response) (result VirtualMachineScaleSet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// UpdateInstances upgrades one or more virtual machines to the latest SKU set in the VM scale set model. This method +// may poll for completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to +// cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. VMInstanceIDs is a list of virtual machine -// instance IDs from the VM scale set. +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. VMInstanceIDs +// is a list of virtual machine instance IDs from the VM scale set. func (client VirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -1245,8 +1490,10 @@ func (client VirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName st var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -1279,7 +1526,7 @@ func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(resourceGrou "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go index 34e0934dcb68..5a15edae9131 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go @@ -14,9 +14,8 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,33 +23,28 @@ import ( "net/http" ) -// VirtualMachineScaleSetVMsClient is the the Compute Management Client. +// VirtualMachineScaleSetVMsClient is the compute Client type VirtualMachineScaleSetVMsClient struct { ManagementClient } -// NewVirtualMachineScaleSetVMsClient creates an instance of the -// VirtualMachineScaleSetVMsClient client. +// NewVirtualMachineScaleSetVMsClient creates an instance of the VirtualMachineScaleSetVMsClient client. func NewVirtualMachineScaleSetVMsClient(subscriptionID string) VirtualMachineScaleSetVMsClient { return NewVirtualMachineScaleSetVMsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewVirtualMachineScaleSetVMsClientWithBaseURI creates an instance of the -// VirtualMachineScaleSetVMsClient client. +// NewVirtualMachineScaleSetVMsClientWithBaseURI creates an instance of the VirtualMachineScaleSetVMsClient client. func NewVirtualMachineScaleSetVMsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetVMsClient { return VirtualMachineScaleSetVMsClient{NewWithBaseURI(baseURI, subscriptionID)} } -// Deallocate deallocates a specific virtual machine in a VM scale set. Shuts -// down the virtual machine and releases the compute resources it uses. You are -// not billed for the compute resources of this virtual machine once it is -// deallocated. This method may poll for completion. Polling can be canceled by -// passing the cancel channel argument. The channel will be used to cancel -// polling and any outstanding HTTP requests. +// Deallocate deallocates a specific virtual machine in a VM scale set. Shuts down the virtual machine and releases the +// compute resources it uses. You are not billed for the compute resources of this virtual machine once it is +// deallocated. This method may poll for completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. instanceID is the instance ID of the virtual -// machine. +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID is +// the instance ID of the virtual machine. func (client VirtualMachineScaleSetVMsClient) Deallocate(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -58,8 +52,10 @@ func (client VirtualMachineScaleSetVMsClient) Deallocate(resourceGroupName strin var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -93,7 +89,7 @@ func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(resourceGroupNa "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -127,14 +123,12 @@ func (client VirtualMachineScaleSetVMsClient) DeallocateResponder(resp *http.Res return } -// Delete deletes a virtual machine from a VM scale set. This method may poll -// for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// Delete deletes a virtual machine from a VM scale set. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. instanceID is the instance ID of the virtual -// machine. +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID is +// the instance ID of the virtual machine. func (client VirtualMachineScaleSetVMsClient) Delete(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -142,8 +136,10 @@ func (client VirtualMachineScaleSetVMsClient) Delete(resourceGroupName string, V var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -177,7 +173,7 @@ func (client VirtualMachineScaleSetVMsClient) DeletePreparer(resourceGroupName s "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -213,9 +209,8 @@ func (client VirtualMachineScaleSetVMsClient) DeleteResponder(resp *http.Respons // Get gets a virtual machine from a VM scale set. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. instanceID is the instance ID of the virtual -// machine. +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID is +// the instance ID of the virtual machine. func (client VirtualMachineScaleSetVMsClient) Get(resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVM, err error) { req, err := client.GetPreparer(resourceGroupName, VMScaleSetName, instanceID) if err != nil { @@ -247,7 +242,7 @@ func (client VirtualMachineScaleSetVMsClient) GetPreparer(resourceGroupName stri "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -281,9 +276,8 @@ func (client VirtualMachineScaleSetVMsClient) GetResponder(resp *http.Response) // GetInstanceView gets the status of a virtual machine from a VM scale set. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. instanceID is the instance ID of the virtual -// machine. +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID is +// the instance ID of the virtual machine. func (client VirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMInstanceView, err error) { req, err := client.GetInstanceViewPreparer(resourceGroupName, VMScaleSetName, instanceID) if err != nil { @@ -315,7 +309,7 @@ func (client VirtualMachineScaleSetVMsClient) GetInstanceViewPreparer(resourceGr "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -349,10 +343,9 @@ func (client VirtualMachineScaleSetVMsClient) GetInstanceViewResponder(resp *htt // List gets a list of all virtual machines in a VM scale sets. // -// resourceGroupName is the name of the resource group. -// virtualMachineScaleSetName is the name of the VM scale set. filter is the -// filter to apply to the operation. selectParameter is the list parameters. -// expand is the expand expression to apply to the operation. +// resourceGroupName is the name of the resource group. virtualMachineScaleSetName is the name of the VM scale set. +// filter is the filter to apply to the operation. selectParameter is the list parameters. expand is the expand +// expression to apply to the operation. func (client VirtualMachineScaleSetVMsClient) List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result VirtualMachineScaleSetVMListResult, err error) { req, err := client.ListPreparer(resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand) if err != nil { @@ -383,7 +376,7 @@ func (client VirtualMachineScaleSetVMsClient) ListPreparer(resourceGroupName str "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -448,16 +441,58 @@ func (client VirtualMachineScaleSetVMsClient) ListNextResults(lastResults Virtua return } -// PowerOff power off (stop) a virtual machine in a VM scale set. Note that -// resources are still attached and you are getting charged for the resources. -// Instead, use deallocate to release resources and avoid charges. This method -// may poll for completion. Polling can be canceled by passing the cancel -// channel argument. The channel will be used to cancel polling and any -// outstanding HTTP requests. +// ListComplete gets all elements from the list without paging. +func (client VirtualMachineScaleSetVMsClient) ListComplete(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string, cancel <-chan struct{}) (<-chan VirtualMachineScaleSetVM, <-chan error) { + resultChan := make(chan VirtualMachineScaleSetVM) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// PowerOff power off (stop) a virtual machine in a VM scale set. Note that resources are still attached and you are +// getting charged for the resources. Instead, use deallocate to release resources and avoid charges. This method may +// poll for completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to +// cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. instanceID is the instance ID of the virtual -// machine. +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID is +// the instance ID of the virtual machine. func (client VirtualMachineScaleSetVMsClient) PowerOff(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -465,8 +500,10 @@ func (client VirtualMachineScaleSetVMsClient) PowerOff(resourceGroupName string, var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -500,7 +537,7 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(resourceGroupName "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -534,14 +571,12 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffResponder(resp *http.Respo return } -// Reimage reimages (upgrade the operating system) a specific virtual machine -// in a VM scale set. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used to -// cancel polling and any outstanding HTTP requests. +// Reimage reimages (upgrade the operating system) a specific virtual machine in a VM scale set. This method may poll +// for completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. instanceID is the instance ID of the virtual -// machine. +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID is +// the instance ID of the virtual machine. func (client VirtualMachineScaleSetVMsClient) Reimage(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -549,8 +584,10 @@ func (client VirtualMachineScaleSetVMsClient) Reimage(resourceGroupName string, var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -584,7 +621,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(resourceGroupName "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -618,15 +655,12 @@ func (client VirtualMachineScaleSetVMsClient) ReimageResponder(resp *http.Respon return } -// ReimageAll allows you to re-image all the disks ( including data disks ) in -// the a virtual machine scale set instance. This operation is only supported -// for managed disks. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used to -// cancel polling and any outstanding HTTP requests. +// ReimageAll allows you to re-image all the disks ( including data disks ) in the a VM scale set instance. This +// operation is only supported for managed disks. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. instanceID is the instance ID of the virtual -// machine. +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID is +// the instance ID of the virtual machine. func (client VirtualMachineScaleSetVMsClient) ReimageAll(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -634,8 +668,10 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAll(resourceGroupName strin var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -669,7 +705,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAllPreparer(resourceGroupNa "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -703,14 +739,12 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAllResponder(resp *http.Res return } -// Restart restarts a virtual machine in a VM scale set. This method may poll -// for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// Restart restarts a virtual machine in a VM scale set. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. instanceID is the instance ID of the virtual -// machine. +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID is +// the instance ID of the virtual machine. func (client VirtualMachineScaleSetVMsClient) Restart(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -718,8 +752,10 @@ func (client VirtualMachineScaleSetVMsClient) Restart(resourceGroupName string, var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -753,7 +789,7 @@ func (client VirtualMachineScaleSetVMsClient) RestartPreparer(resourceGroupName "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -787,14 +823,11 @@ func (client VirtualMachineScaleSetVMsClient) RestartResponder(resp *http.Respon return } -// Start starts a virtual machine in a VM scale set. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// Start starts a virtual machine in a VM scale set. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. VMScaleSetName is the -// name of the VM scale set. instanceID is the instance ID of the virtual -// machine. +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID is +// the instance ID of the virtual machine. func (client VirtualMachineScaleSetVMsClient) Start(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { resultChan := make(chan OperationStatusResponse, 1) errChan := make(chan error, 1) @@ -802,8 +835,10 @@ func (client VirtualMachineScaleSetVMsClient) Start(resourceGroupName string, VM var err error var result OperationStatusResponse defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -837,7 +872,7 @@ func (client VirtualMachineScaleSetVMsClient) StartPreparer(resourceGroupName st "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go index c76b203e7e9d..dc2f2778bea2 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go @@ -14,9 +14,8 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -25,25 +24,22 @@ import ( "net/http" ) -// VirtualMachineSizesClient is the the Compute Management Client. +// VirtualMachineSizesClient is the compute Client type VirtualMachineSizesClient struct { ManagementClient } -// NewVirtualMachineSizesClient creates an instance of the -// VirtualMachineSizesClient client. +// NewVirtualMachineSizesClient creates an instance of the VirtualMachineSizesClient client. func NewVirtualMachineSizesClient(subscriptionID string) VirtualMachineSizesClient { return NewVirtualMachineSizesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewVirtualMachineSizesClientWithBaseURI creates an instance of the -// VirtualMachineSizesClient client. +// NewVirtualMachineSizesClientWithBaseURI creates an instance of the VirtualMachineSizesClient client. func NewVirtualMachineSizesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineSizesClient { return VirtualMachineSizesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// List lists all available virtual machine sizes for a subscription in a -// location. +// List lists all available virtual machine sizes for a subscription in a location. // // location is the location upon which virtual-machine-sizes is queried. func (client VirtualMachineSizesClient) List(location string) (result VirtualMachineSizeListResult, err error) { @@ -81,7 +77,7 @@ func (client VirtualMachineSizesClient) ListPreparer(location string) (*http.Req "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-04-30-preview" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go index e5e99db676c3..b3eea6cb2f49 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go @@ -1,5 +1,4 @@ -// Package containerregistry implements the Azure ARM Containerregistry service -// API version 2017-03-01. +// Package containerregistry implements the Azure ARM Containerregistry service API version 2017-10-01. // // package containerregistry @@ -18,9 +17,8 @@ package containerregistry // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/models.go index 66edd68c5694..fefd82b8d402 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/models.go @@ -14,9 +14,8 @@ package containerregistry // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -39,28 +38,159 @@ const ( type ProvisioningState string const ( + // Canceled specifies the canceled state for provisioning state. + Canceled ProvisioningState = "Canceled" // Creating specifies the creating state for provisioning state. Creating ProvisioningState = "Creating" + // Deleting specifies the deleting state for provisioning state. + Deleting ProvisioningState = "Deleting" + // Failed specifies the failed state for provisioning state. + Failed ProvisioningState = "Failed" // Succeeded specifies the succeeded state for provisioning state. Succeeded ProvisioningState = "Succeeded" + // Updating specifies the updating state for provisioning state. + Updating ProvisioningState = "Updating" +) + +// RegistryUsageUnit enumerates the values for registry usage unit. +type RegistryUsageUnit string + +const ( + // Bytes specifies the bytes state for registry usage unit. + Bytes RegistryUsageUnit = "Bytes" + // Count specifies the count state for registry usage unit. + Count RegistryUsageUnit = "Count" +) + +// SkuName enumerates the values for sku name. +type SkuName string + +const ( + // Basic specifies the basic state for sku name. + Basic SkuName = "Basic" + // Classic specifies the classic state for sku name. + Classic SkuName = "Classic" + // Premium specifies the premium state for sku name. + Premium SkuName = "Premium" + // Standard specifies the standard state for sku name. + Standard SkuName = "Standard" ) // SkuTier enumerates the values for sku tier. type SkuTier string const ( - // Basic specifies the basic state for sku tier. - Basic SkuTier = "Basic" + // SkuTierBasic specifies the sku tier basic state for sku tier. + SkuTierBasic SkuTier = "Basic" + // SkuTierClassic specifies the sku tier classic state for sku tier. + SkuTierClassic SkuTier = "Classic" + // SkuTierPremium specifies the sku tier premium state for sku tier. + SkuTierPremium SkuTier = "Premium" + // SkuTierStandard specifies the sku tier standard state for sku tier. + SkuTierStandard SkuTier = "Standard" +) + +// WebhookAction enumerates the values for webhook action. +type WebhookAction string + +const ( + // Delete specifies the delete state for webhook action. + Delete WebhookAction = "delete" + // Push specifies the push state for webhook action. + Push WebhookAction = "push" +) + +// WebhookStatus enumerates the values for webhook status. +type WebhookStatus string + +const ( + // Disabled specifies the disabled state for webhook status. + Disabled WebhookStatus = "disabled" + // Enabled specifies the enabled state for webhook status. + Enabled WebhookStatus = "enabled" ) +// Actor is the agent that initiated the event. For most situations, this could be from the authorization context of +// the request. +type Actor struct { + Name *string `json:"name,omitempty"` +} + +// CallbackConfig is the configuration of service URI and custom headers for the webhook. +type CallbackConfig struct { + autorest.Response `json:"-"` + ServiceURI *string `json:"serviceUri,omitempty"` + CustomHeaders *map[string]*string `json:"customHeaders,omitempty"` +} + +// Event is the event for a webhook. +type Event struct { + ID *string `json:"id,omitempty"` + EventRequestMessage *EventRequestMessage `json:"eventRequestMessage,omitempty"` + EventResponseMessage *EventResponseMessage `json:"eventResponseMessage,omitempty"` +} + +// EventContent is the content of the event request message. +type EventContent struct { + ID *string `json:"id,omitempty"` + Timestamp *date.Time `json:"timestamp,omitempty"` + Action *string `json:"action,omitempty"` + Target *Target `json:"target,omitempty"` + Request *Request `json:"request,omitempty"` + Actor *Actor `json:"actor,omitempty"` + Source *Source `json:"source,omitempty"` +} + +// EventInfo is the basic information of an event. +type EventInfo struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` +} + +// EventListResult is the result of a request to list events for a webhook. +type EventListResult struct { + autorest.Response `json:"-"` + Value *[]Event `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// EventListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client EventListResult) EventListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// EventRequestMessage is the event request message sent to the service URI. +type EventRequestMessage struct { + Content *EventContent `json:"content,omitempty"` + Headers *map[string]*string `json:"headers,omitempty"` + Method *string `json:"method,omitempty"` + RequestURI *string `json:"requestUri,omitempty"` + Version *string `json:"version,omitempty"` +} + +// EventResponseMessage is the event response message received from the service URI. +type EventResponseMessage struct { + Content *string `json:"content,omitempty"` + Headers *map[string]*string `json:"headers,omitempty"` + ReasonPhrase *string `json:"reasonPhrase,omitempty"` + StatusCode *string `json:"statusCode,omitempty"` + Version *string `json:"version,omitempty"` +} + // OperationDefinition is the definition of a container registry operation. type OperationDefinition struct { Name *string `json:"name,omitempty"` Display *OperationDisplayDefinition `json:"display,omitempty"` } -// OperationDisplayDefinition is the display information for a container -// registry operation. +// OperationDisplayDefinition is the display information for a container registry operation. type OperationDisplayDefinition struct { Provider *string `json:"provider,omitempty"` Resource *string `json:"resource,omitempty"` @@ -68,8 +198,7 @@ type OperationDisplayDefinition struct { Description *string `json:"description,omitempty"` } -// OperationListResult is the result of a request to list container registry -// operations. +// OperationListResult is the result of a request to list container registry operations. type OperationListResult struct { autorest.Response `json:"-"` Value *[]OperationDefinition `json:"value,omitempty"` @@ -88,8 +217,7 @@ func (client OperationListResult) OperationListResultPreparer() (*http.Request, autorest.WithBaseURL(to.String(client.NextLink))) } -// RegenerateCredentialParameters is the parameters used to regenerate the -// login credential. +// RegenerateCredentialParameters is the parameters used to regenerate the login credential. type RegenerateCredentialParameters struct { Name PasswordName `json:"name,omitempty"` } @@ -106,17 +234,7 @@ type Registry struct { *RegistryProperties `json:"properties,omitempty"` } -// RegistryCreateParameters is the parameters for creating a container -// registry. -type RegistryCreateParameters struct { - Tags *map[string]*string `json:"tags,omitempty"` - Location *string `json:"location,omitempty"` - Sku *Sku `json:"sku,omitempty"` - *RegistryPropertiesCreateParameters `json:"properties,omitempty"` -} - -// RegistryListCredentialsResult is the response from the ListCredentials -// operation. +// RegistryListCredentialsResult is the response from the ListCredentials operation. type RegistryListCredentialsResult struct { autorest.Response `json:"-"` Username *string `json:"username,omitempty"` @@ -142,15 +260,13 @@ func (client RegistryListResult) RegistryListResultPreparer() (*http.Request, er autorest.WithBaseURL(to.String(client.NextLink))) } -// RegistryNameCheckRequest is a request to check whether a container registry -// name is available. +// RegistryNameCheckRequest is a request to check whether a container registry name is available. type RegistryNameCheckRequest struct { Name *string `json:"name,omitempty"` Type *string `json:"type,omitempty"` } -// RegistryNameStatus is the result of a request to check the availability of a -// container registry name. +// RegistryNameStatus is the result of a request to check the availability of a container registry name. type RegistryNameStatus struct { autorest.Response `json:"-"` NameAvailable *bool `json:"nameAvailable,omitempty"` @@ -169,31 +285,88 @@ type RegistryProperties struct { LoginServer *string `json:"loginServer,omitempty"` CreationDate *date.Time `json:"creationDate,omitempty"` ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + Status *Status `json:"status,omitempty"` AdminUserEnabled *bool `json:"adminUserEnabled,omitempty"` StorageAccount *StorageAccountProperties `json:"storageAccount,omitempty"` } -// RegistryPropertiesCreateParameters is the parameters for creating the -// properties of a container registry. -type RegistryPropertiesCreateParameters struct { - AdminUserEnabled *bool `json:"adminUserEnabled,omitempty"` - StorageAccount *StorageAccountParameters `json:"storageAccount,omitempty"` -} - -// RegistryPropertiesUpdateParameters is the parameters for updating the -// properties of a container registry. +// RegistryPropertiesUpdateParameters is the parameters for updating the properties of a container registry. type RegistryPropertiesUpdateParameters struct { AdminUserEnabled *bool `json:"adminUserEnabled,omitempty"` - StorageAccount *StorageAccountParameters `json:"storageAccount,omitempty"` + StorageAccount *StorageAccountProperties `json:"storageAccount,omitempty"` } -// RegistryUpdateParameters is the parameters for updating a container -// registry. +// RegistryUpdateParameters is the parameters for updating a container registry. type RegistryUpdateParameters struct { Tags *map[string]*string `json:"tags,omitempty"` + Sku *Sku `json:"sku,omitempty"` *RegistryPropertiesUpdateParameters `json:"properties,omitempty"` } +// RegistryUsage is the quota usage for a container registry. +type RegistryUsage struct { + Name *string `json:"name,omitempty"` + Limit *int64 `json:"limit,omitempty"` + CurrentValue *int64 `json:"currentValue,omitempty"` + Unit RegistryUsageUnit `json:"unit,omitempty"` +} + +// RegistryUsageListResult is the result of a request to get container registry quota usages. +type RegistryUsageListResult struct { + autorest.Response `json:"-"` + Value *[]RegistryUsage `json:"value,omitempty"` +} + +// Replication is an object that represents a replication for a container registry. +type Replication struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *ReplicationProperties `json:"properties,omitempty"` +} + +// ReplicationListResult is the result of a request to list replications for a container registry. +type ReplicationListResult struct { + autorest.Response `json:"-"` + Value *[]Replication `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ReplicationListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ReplicationListResult) ReplicationListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ReplicationProperties is the properties of a replication. +type ReplicationProperties struct { + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + Status *Status `json:"status,omitempty"` +} + +// ReplicationUpdateParameters is the parameters for updating a replication. +type ReplicationUpdateParameters struct { + Tags *map[string]*string `json:"tags,omitempty"` +} + +// Request is the request that generated the event. +type Request struct { + ID *string `json:"id,omitempty"` + Addr *string `json:"addr,omitempty"` + Host *string `json:"host,omitempty"` + Method *string `json:"method,omitempty"` + Useragent *string `json:"useragent,omitempty"` +} + // Resource is an Azure resource. type Resource struct { ID *string `json:"id,omitempty"` @@ -205,19 +378,106 @@ type Resource struct { // Sku is the SKU of a container registry. type Sku struct { - Name *string `json:"name,omitempty"` + Name SkuName `json:"name,omitempty"` Tier SkuTier `json:"tier,omitempty"` } -// StorageAccountParameters is the parameters of a storage account for a -// container registry. -type StorageAccountParameters struct { - Name *string `json:"name,omitempty"` - AccessKey *string `json:"accessKey,omitempty"` +// Source is the registry node that generated the event. Put differently, while the actor initiates the event, the +// source generates it. +type Source struct { + Addr *string `json:"addr,omitempty"` + InstanceID *string `json:"instanceID,omitempty"` } -// StorageAccountProperties is the properties of a storage account for a -// container registry. +// Status is the status of an Azure resource at the time the operation was called. +type Status struct { + DisplayStatus *string `json:"displayStatus,omitempty"` + Message *string `json:"message,omitempty"` + Timestamp *date.Time `json:"timestamp,omitempty"` +} + +// StorageAccountProperties is the properties of a storage account for a container registry. Only applicable to Classic +// SKU. type StorageAccountProperties struct { - Name *string `json:"name,omitempty"` + ID *string `json:"id,omitempty"` +} + +// Target is the target of the event. +type Target struct { + MediaType *string `json:"mediaType,omitempty"` + Size *int64 `json:"size,omitempty"` + Digest *string `json:"digest,omitempty"` + Length *int64 `json:"length,omitempty"` + Repository *string `json:"repository,omitempty"` + URL *string `json:"url,omitempty"` + Tag *string `json:"tag,omitempty"` +} + +// Webhook is an object that represents a webhook for a container registry. +type Webhook struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *WebhookProperties `json:"properties,omitempty"` +} + +// WebhookCreateParameters is the parameters for creating a webhook. +type WebhookCreateParameters struct { + Tags *map[string]*string `json:"tags,omitempty"` + Location *string `json:"location,omitempty"` + *WebhookPropertiesCreateParameters `json:"properties,omitempty"` +} + +// WebhookListResult is the result of a request to list webhooks for a container registry. +type WebhookListResult struct { + autorest.Response `json:"-"` + Value *[]Webhook `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// WebhookListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client WebhookListResult) WebhookListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// WebhookProperties is the properties of a webhook. +type WebhookProperties struct { + Status WebhookStatus `json:"status,omitempty"` + Scope *string `json:"scope,omitempty"` + Actions *[]WebhookAction `json:"actions,omitempty"` + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` +} + +// WebhookPropertiesCreateParameters is the parameters for creating the properties of a webhook. +type WebhookPropertiesCreateParameters struct { + ServiceURI *string `json:"serviceUri,omitempty"` + CustomHeaders *map[string]*string `json:"customHeaders,omitempty"` + Status WebhookStatus `json:"status,omitempty"` + Scope *string `json:"scope,omitempty"` + Actions *[]WebhookAction `json:"actions,omitempty"` +} + +// WebhookPropertiesUpdateParameters is the parameters for updating the properties of a webhook. +type WebhookPropertiesUpdateParameters struct { + ServiceURI *string `json:"serviceUri,omitempty"` + CustomHeaders *map[string]*string `json:"customHeaders,omitempty"` + Status WebhookStatus `json:"status,omitempty"` + Scope *string `json:"scope,omitempty"` + Actions *[]WebhookAction `json:"actions,omitempty"` +} + +// WebhookUpdateParameters is the parameters for updating a webhook. +type WebhookUpdateParameters struct { + Tags *map[string]*string `json:"tags,omitempty"` + *WebhookPropertiesUpdateParameters `json:"properties,omitempty"` } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/operations.go index a1694180c5fb..a9143e739664 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/operations.go @@ -14,9 +14,8 @@ package containerregistry // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,8 +23,7 @@ import ( "net/http" ) -// OperationsClient is the client for the Operations methods of the -// Containerregistry service. +// OperationsClient is the client for the Operations methods of the Containerregistry service. type OperationsClient struct { ManagementClient } @@ -35,14 +33,12 @@ func NewOperationsClient(subscriptionID string) OperationsClient { return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewOperationsClientWithBaseURI creates an instance of the OperationsClient -// client. +// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client. func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} } -// List lists all of the available Azure Container Registry REST API -// operations. +// List lists all of the available Azure Container Registry REST API operations. func (client OperationsClient) List() (result OperationListResult, err error) { req, err := client.ListPreparer() if err != nil { @@ -67,7 +63,7 @@ func (client OperationsClient) List() (result OperationListResult, err error) { // ListPreparer prepares the List request. func (client OperationsClient) ListPreparer() (*http.Request, error) { - const APIVersion = "2017-03-01" + const APIVersion = "2017-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -122,3 +118,48 @@ func (client OperationsClient) ListNextResults(lastResults OperationListResult) return } + +// ListComplete gets all elements from the list without paging. +func (client OperationsClient) ListComplete(cancel <-chan struct{}) (<-chan OperationDefinition, <-chan error) { + resultChan := make(chan OperationDefinition) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go index fe03ba33818e..c1c9af59e838 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go @@ -14,9 +14,8 @@ package containerregistry // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -25,8 +24,7 @@ import ( "net/http" ) -// RegistriesClient is the client for the Registries methods of the -// Containerregistry service. +// RegistriesClient is the client for the Registries methods of the Containerregistry service. type RegistriesClient struct { ManagementClient } @@ -36,18 +34,15 @@ func NewRegistriesClient(subscriptionID string) RegistriesClient { return NewRegistriesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewRegistriesClientWithBaseURI creates an instance of the RegistriesClient -// client. +// NewRegistriesClientWithBaseURI creates an instance of the RegistriesClient client. func NewRegistriesClientWithBaseURI(baseURI string, subscriptionID string) RegistriesClient { return RegistriesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CheckNameAvailability checks whether the container registry name is -// available for use. The name must contain only alphanumeric characters, be -// globally unique, and between 5 and 60 characters in length. +// CheckNameAvailability checks whether the container registry name is available for use. The name must contain only +// alphanumeric characters, be globally unique, and between 5 and 50 characters in length. // -// registryNameCheckRequest is the object containing information for the -// availability request. +// registryNameCheckRequest is the object containing information for the availability request. func (client RegistriesClient) CheckNameAvailability(registryNameCheckRequest RegistryNameCheckRequest) (result RegistryNameStatus, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: registryNameCheckRequest, @@ -87,7 +82,7 @@ func (client RegistriesClient) CheckNameAvailabilityPreparer(registryNameCheckRe "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -121,16 +116,13 @@ func (client RegistriesClient) CheckNameAvailabilityResponder(resp *http.Respons return } -// Create creates a container registry with the specified parameters. This -// method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and any -// outstanding HTTP requests. +// Create creates a container registry with the specified parameters. This method may poll for completion. Polling can +// be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // -// resourceGroupName is the name of the resource group to which the container -// registry belongs. registryName is the name of the container registry. -// registryCreateParameters is the parameters for creating a container -// registry. -func (client RegistriesClient) Create(resourceGroupName string, registryName string, registryCreateParameters RegistryCreateParameters, cancel <-chan struct{}) (<-chan Registry, <-chan error) { +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. registry is the parameters for creating a container registry. +func (client RegistriesClient) Create(resourceGroupName string, registryName string, registry Registry, cancel <-chan struct{}) (<-chan Registry, <-chan error) { resultChan := make(chan Registry, 1) errChan := make(chan error, 1) if err := validation.Validate([]validation.Validation{ @@ -138,15 +130,11 @@ func (client RegistriesClient) Create(resourceGroupName string, registryName str Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, - {TargetValue: registryCreateParameters, - Constraints: []validation.Constraint{{Target: "registryCreateParameters.Location", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "registryCreateParameters.Sku", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "registryCreateParameters.Sku.Name", Name: validation.Null, Rule: true, Chain: nil}}}, - {Target: "registryCreateParameters.RegistryPropertiesCreateParameters", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "registryCreateParameters.RegistryPropertiesCreateParameters.StorageAccount", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "registryCreateParameters.RegistryPropertiesCreateParameters.StorageAccount.Name", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "registryCreateParameters.RegistryPropertiesCreateParameters.StorageAccount.AccessKey", Name: validation.Null, Rule: true, Chain: nil}, - }}, + {TargetValue: registry, + Constraints: []validation.Constraint{{Target: "registry.Sku", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "registry.RegistryProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "registry.RegistryProperties.StorageAccount", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "registry.RegistryProperties.StorageAccount.ID", Name: validation.Null, Rule: true, Chain: nil}}}, }}}}}); err != nil { errChan <- validation.NewErrorWithValidationError(err, "containerregistry.RegistriesClient", "Create") close(errChan) @@ -158,12 +146,14 @@ func (client RegistriesClient) Create(resourceGroupName string, registryName str var err error var result Registry defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() - req, err := client.CreatePreparer(resourceGroupName, registryName, registryCreateParameters, cancel) + req, err := client.CreatePreparer(resourceGroupName, registryName, registry, cancel) if err != nil { err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Create", nil, "Failure preparing request") return @@ -185,14 +175,14 @@ func (client RegistriesClient) Create(resourceGroupName string, registryName str } // CreatePreparer prepares the Create request. -func (client RegistriesClient) CreatePreparer(resourceGroupName string, registryName string, registryCreateParameters RegistryCreateParameters, cancel <-chan struct{}) (*http.Request, error) { +func (client RegistriesClient) CreatePreparer(resourceGroupName string, registryName string, registry Registry, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "registryName": autorest.Encode("path", registryName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -202,7 +192,7 @@ func (client RegistriesClient) CreatePreparer(resourceGroupName string, registry autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}", pathParameters), - autorest.WithJSON(registryCreateParameters), + autorest.WithJSON(registry), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare(&http.Request{Cancel: cancel}) } @@ -221,56 +211,73 @@ func (client RegistriesClient) CreateResponder(resp *http.Response) (result Regi err = autorest.Respond( resp, client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } -// Delete deletes a container registry. +// Delete deletes a container registry. This method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group to which the container -// registry belongs. registryName is the name of the container registry. -func (client RegistriesClient) Delete(resourceGroupName string, registryName string) (result autorest.Response, err error) { +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. +func (client RegistriesClient) Delete(resourceGroupName string, registryName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) if err := validation.Validate([]validation.Validation{ {TargetValue: registryName, Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "containerregistry.RegistriesClient", "Delete") - } - - req, err := client.DeletePreparer(resourceGroupName, registryName) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Delete", nil, "Failure preparing request") - return + errChan <- validation.NewErrorWithValidationError(err, "containerregistry.RegistriesClient", "Delete") + close(errChan) + close(resultChan) + return resultChan, errChan } - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Delete", resp, "Failure sending request") - return - } + go func() { + var err error + var result autorest.Response + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, registryName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Delete", nil, "Failure preparing request") + return + } - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Delete", resp, "Failure responding to request") - } + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Delete", resp, "Failure sending request") + return + } - return + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. -func (client RegistriesClient) DeletePreparer(resourceGroupName string, registryName string) (*http.Request, error) { +func (client RegistriesClient) DeletePreparer(resourceGroupName string, registryName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "registryName": autorest.Encode("path", registryName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -280,13 +287,15 @@ func (client RegistriesClient) DeletePreparer(resourceGroupName string, registry autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}", pathParameters), autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare(&http.Request{}) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client RegistriesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // DeleteResponder handles the response to the Delete request. The method always @@ -295,7 +304,7 @@ func (client RegistriesClient) DeleteResponder(resp *http.Response) (result auto err = autorest.Respond( resp, client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return @@ -303,8 +312,8 @@ func (client RegistriesClient) DeleteResponder(resp *http.Response) (result auto // Get gets the properties of the specified container registry. // -// resourceGroupName is the name of the resource group to which the container -// registry belongs. registryName is the name of the container registry. +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. func (client RegistriesClient) Get(resourceGroupName string, registryName string) (result Registry, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: registryName, @@ -343,7 +352,7 @@ func (client RegistriesClient) GetPreparer(resourceGroupName string, registryNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -404,7 +413,7 @@ func (client RegistriesClient) ListPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -460,11 +469,54 @@ func (client RegistriesClient) ListNextResults(lastResults RegistryListResult) ( return } -// ListByResourceGroup lists all the container registries under the specified -// resource group. +// ListComplete gets all elements from the list without paging. +func (client RegistriesClient) ListComplete(cancel <-chan struct{}) (<-chan Registry, <-chan error) { + resultChan := make(chan Registry) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListByResourceGroup lists all the container registries under the specified resource group. // -// resourceGroupName is the name of the resource group to which the container -// registry belongs. +// resourceGroupName is the name of the resource group to which the container registry belongs. func (client RegistriesClient) ListByResourceGroup(resourceGroupName string) (result RegistryListResult, err error) { req, err := client.ListByResourceGroupPreparer(resourceGroupName) if err != nil { @@ -494,7 +546,7 @@ func (client RegistriesClient) ListByResourceGroupPreparer(resourceGroupName str "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -550,11 +602,55 @@ func (client RegistriesClient) ListByResourceGroupNextResults(lastResults Regist return } -// ListCredentials lists the login credentials for the specified container -// registry. +// ListByResourceGroupComplete gets all elements from the list without paging. +func (client RegistriesClient) ListByResourceGroupComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan Registry, <-chan error) { + resultChan := make(chan Registry) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListByResourceGroup(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListByResourceGroupNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListCredentials lists the login credentials for the specified container registry. // -// resourceGroupName is the name of the resource group to which the container -// registry belongs. registryName is the name of the container registry. +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. func (client RegistriesClient) ListCredentials(resourceGroupName string, registryName string) (result RegistryListCredentialsResult, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: registryName, @@ -593,7 +689,7 @@ func (client RegistriesClient) ListCredentialsPreparer(resourceGroupName string, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -625,13 +721,85 @@ func (client RegistriesClient) ListCredentialsResponder(resp *http.Response) (re return } -// RegenerateCredential regenerates one of the login credentials for the -// specified container registry. +// ListUsages gets the quota usages for the specified container registry. // -// resourceGroupName is the name of the resource group to which the container -// registry belongs. registryName is the name of the container registry. -// regenerateCredentialParameters is specifies name of the password which -// should be regenerated -- password or password2. +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. +func (client RegistriesClient) ListUsages(resourceGroupName string, registryName string) (result RegistryUsageListResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "containerregistry.RegistriesClient", "ListUsages") + } + + req, err := client.ListUsagesPreparer(resourceGroupName, registryName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListUsages", nil, "Failure preparing request") + return + } + + resp, err := client.ListUsagesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListUsages", resp, "Failure sending request") + return + } + + result, err = client.ListUsagesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListUsages", resp, "Failure responding to request") + } + + return +} + +// ListUsagesPreparer prepares the ListUsages request. +func (client RegistriesClient) ListUsagesPreparer(resourceGroupName string, registryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-10-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/listUsages", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListUsagesSender sends the ListUsages request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) ListUsagesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListUsagesResponder handles the response to the ListUsages request. The method always +// closes the http.Response Body. +func (client RegistriesClient) ListUsagesResponder(resp *http.Response) (result RegistryUsageListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RegenerateCredential regenerates one of the login credentials for the specified container registry. +// +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. regenerateCredentialParameters is specifies name of the password which should be +// regenerated -- password or password2. func (client RegistriesClient) RegenerateCredential(resourceGroupName string, registryName string, regenerateCredentialParameters RegenerateCredentialParameters) (result RegistryListCredentialsResult, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: registryName, @@ -670,7 +838,7 @@ func (client RegistriesClient) RegenerateCredentialPreparer(resourceGroupName st "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -704,51 +872,67 @@ func (client RegistriesClient) RegenerateCredentialResponder(resp *http.Response return } -// Update updates a container registry with the specified parameters. +// Update updates a container registry with the specified parameters. This method may poll for completion. Polling can +// be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // -// resourceGroupName is the name of the resource group to which the container -// registry belongs. registryName is the name of the container registry. -// registryUpdateParameters is the parameters for updating a container -// registry. -func (client RegistriesClient) Update(resourceGroupName string, registryName string, registryUpdateParameters RegistryUpdateParameters) (result Registry, err error) { +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. registryUpdateParameters is the parameters for updating a container registry. +func (client RegistriesClient) Update(resourceGroupName string, registryName string, registryUpdateParameters RegistryUpdateParameters, cancel <-chan struct{}) (<-chan Registry, <-chan error) { + resultChan := make(chan Registry, 1) + errChan := make(chan error, 1) if err := validation.Validate([]validation.Validation{ {TargetValue: registryName, Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "containerregistry.RegistriesClient", "Update") - } - - req, err := client.UpdatePreparer(resourceGroupName, registryName, registryUpdateParameters) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Update", nil, "Failure preparing request") - return + errChan <- validation.NewErrorWithValidationError(err, "containerregistry.RegistriesClient", "Update") + close(errChan) + close(resultChan) + return resultChan, errChan } - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Update", resp, "Failure sending request") - return - } + go func() { + var err error + var result Registry + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.UpdatePreparer(resourceGroupName, registryName, registryUpdateParameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Update", nil, "Failure preparing request") + return + } - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Update", resp, "Failure responding to request") - } + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Update", resp, "Failure sending request") + return + } - return + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Update", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // UpdatePreparer prepares the Update request. -func (client RegistriesClient) UpdatePreparer(resourceGroupName string, registryName string, registryUpdateParameters RegistryUpdateParameters) (*http.Request, error) { +func (client RegistriesClient) UpdatePreparer(resourceGroupName string, registryName string, registryUpdateParameters RegistryUpdateParameters, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "registryName": autorest.Encode("path", registryName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-10-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -760,13 +944,15 @@ func (client RegistriesClient) UpdatePreparer(resourceGroupName string, registry autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}", pathParameters), autorest.WithJSON(registryUpdateParameters), autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare(&http.Request{}) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client RegistriesClient) UpdateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // UpdateResponder handles the response to the Update request. The method always @@ -775,7 +961,7 @@ func (client RegistriesClient) UpdateResponder(resp *http.Response) (result Regi err = autorest.Respond( resp, client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/replications.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/replications.go new file mode 100644 index 000000000000..79fce2d21ee8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/replications.go @@ -0,0 +1,564 @@ +package containerregistry + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// ReplicationsClient is the client for the Replications methods of the Containerregistry service. +type ReplicationsClient struct { + ManagementClient +} + +// NewReplicationsClient creates an instance of the ReplicationsClient client. +func NewReplicationsClient(subscriptionID string) ReplicationsClient { + return NewReplicationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewReplicationsClientWithBaseURI creates an instance of the ReplicationsClient client. +func NewReplicationsClientWithBaseURI(baseURI string, subscriptionID string) ReplicationsClient { + return ReplicationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create creates a replication for a container registry with the specified parameters. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. replicationName is the name of the replication. replication is the parameters for +// creating a replication. +func (client ReplicationsClient) Create(resourceGroupName string, registryName string, replicationName string, replication Replication, cancel <-chan struct{}) (<-chan Replication, <-chan error) { + resultChan := make(chan Replication, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: replicationName, + Constraints: []validation.Constraint{{Target: "replicationName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "replicationName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "replicationName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "containerregistry.ReplicationsClient", "Create") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result Replication + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.CreatePreparer(resourceGroupName, registryName, replicationName, replication, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Create", nil, "Failure preparing request") + return + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Create", resp, "Failure sending request") + return + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Create", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CreatePreparer prepares the Create request. +func (client ReplicationsClient) CreatePreparer(resourceGroupName string, registryName string, replicationName string, replication Replication, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "replicationName": autorest.Encode("path", replicationName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-10-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}", pathParameters), + autorest.WithJSON(replication), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client ReplicationsClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client ReplicationsClient) CreateResponder(resp *http.Response) (result Replication, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a replication from a container registry. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. replicationName is the name of the replication. +func (client ReplicationsClient) Delete(resourceGroupName string, registryName string, replicationName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: replicationName, + Constraints: []validation.Constraint{{Target: "replicationName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "replicationName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "replicationName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "containerregistry.ReplicationsClient", "Delete") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result autorest.Response + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, registryName, replicationName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// DeletePreparer prepares the Delete request. +func (client ReplicationsClient) DeletePreparer(resourceGroupName string, registryName string, replicationName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "replicationName": autorest.Encode("path", replicationName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-10-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ReplicationsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ReplicationsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the properties of the specified replication. +// +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. replicationName is the name of the replication. +func (client ReplicationsClient) Get(resourceGroupName string, registryName string, replicationName string) (result Replication, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: replicationName, + Constraints: []validation.Constraint{{Target: "replicationName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "replicationName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "replicationName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "containerregistry.ReplicationsClient", "Get") + } + + req, err := client.GetPreparer(resourceGroupName, registryName, replicationName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ReplicationsClient) GetPreparer(resourceGroupName string, registryName string, replicationName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "replicationName": autorest.Encode("path", replicationName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-10-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ReplicationsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ReplicationsClient) GetResponder(resp *http.Response) (result Replication, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all the replications for the specified container registry. +// +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. +func (client ReplicationsClient) List(resourceGroupName string, registryName string) (result ReplicationListResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "containerregistry.ReplicationsClient", "List") + } + + req, err := client.ListPreparer(resourceGroupName, registryName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ReplicationsClient) ListPreparer(resourceGroupName string, registryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-10-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ReplicationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ReplicationsClient) ListResponder(resp *http.Response) (result ReplicationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ReplicationsClient) ListNextResults(lastResults ReplicationListResult) (result ReplicationListResult, err error) { + req, err := lastResults.ReplicationListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListComplete gets all elements from the list without paging. +func (client ReplicationsClient) ListComplete(resourceGroupName string, registryName string, cancel <-chan struct{}) (<-chan Replication, <-chan error) { + resultChan := make(chan Replication) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName, registryName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// Update updates a replication for a container registry with the specified parameters. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. replicationName is the name of the replication. replicationUpdateParameters is the +// parameters for updating a replication. +func (client ReplicationsClient) Update(resourceGroupName string, registryName string, replicationName string, replicationUpdateParameters ReplicationUpdateParameters, cancel <-chan struct{}) (<-chan Replication, <-chan error) { + resultChan := make(chan Replication, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: replicationName, + Constraints: []validation.Constraint{{Target: "replicationName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "replicationName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "replicationName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "containerregistry.ReplicationsClient", "Update") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result Replication + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.UpdatePreparer(resourceGroupName, registryName, replicationName, replicationUpdateParameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.ReplicationsClient", "Update", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// UpdatePreparer prepares the Update request. +func (client ReplicationsClient) UpdatePreparer(resourceGroupName string, registryName string, replicationName string, replicationUpdateParameters ReplicationUpdateParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "replicationName": autorest.Encode("path", replicationName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-10-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}", pathParameters), + autorest.WithJSON(replicationUpdateParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client ReplicationsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client ReplicationsClient) UpdateResponder(resp *http.Response) (result Replication, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go index e586a01ce796..6e3ee863831d 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go @@ -14,16 +14,15 @@ package containerregistry // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/v10.0.2-beta arm-containerregistry/2017-03-01" + return "Azure-SDK-For-Go/v11.1.0-beta arm-containerregistry/2017-10-01" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return "v10.0.2-beta" + return "v11.1.0-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/webhooks.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/webhooks.go new file mode 100644 index 000000000000..ff5f9ce7844d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/webhooks.go @@ -0,0 +1,875 @@ +package containerregistry + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// WebhooksClient is the client for the Webhooks methods of the Containerregistry service. +type WebhooksClient struct { + ManagementClient +} + +// NewWebhooksClient creates an instance of the WebhooksClient client. +func NewWebhooksClient(subscriptionID string) WebhooksClient { + return NewWebhooksClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWebhooksClientWithBaseURI creates an instance of the WebhooksClient client. +func NewWebhooksClientWithBaseURI(baseURI string, subscriptionID string) WebhooksClient { + return WebhooksClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create creates a webhook for a container registry with the specified parameters. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. webhookName is the name of the webhook. webhookCreateParameters is the parameters +// for creating a webhook. +func (client WebhooksClient) Create(resourceGroupName string, registryName string, webhookName string, webhookCreateParameters WebhookCreateParameters, cancel <-chan struct{}) (<-chan Webhook, <-chan error) { + resultChan := make(chan Webhook, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: webhookName, + Constraints: []validation.Constraint{{Target: "webhookName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "webhookName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "webhookName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: webhookCreateParameters, + Constraints: []validation.Constraint{{Target: "webhookCreateParameters.Location", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "webhookCreateParameters.WebhookPropertiesCreateParameters", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "webhookCreateParameters.WebhookPropertiesCreateParameters.ServiceURI", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "webhookCreateParameters.WebhookPropertiesCreateParameters.Actions", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "containerregistry.WebhooksClient", "Create") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result Webhook + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.CreatePreparer(resourceGroupName, registryName, webhookName, webhookCreateParameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Create", nil, "Failure preparing request") + return + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Create", resp, "Failure sending request") + return + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Create", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CreatePreparer prepares the Create request. +func (client WebhooksClient) CreatePreparer(resourceGroupName string, registryName string, webhookName string, webhookCreateParameters WebhookCreateParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "webhookName": autorest.Encode("path", webhookName), + } + + const APIVersion = "2017-10-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}", pathParameters), + autorest.WithJSON(webhookCreateParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client WebhooksClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client WebhooksClient) CreateResponder(resp *http.Response) (result Webhook, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a webhook from a container registry. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. webhookName is the name of the webhook. +func (client WebhooksClient) Delete(resourceGroupName string, registryName string, webhookName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: webhookName, + Constraints: []validation.Constraint{{Target: "webhookName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "webhookName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "webhookName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "containerregistry.WebhooksClient", "Delete") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result autorest.Response + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, registryName, webhookName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// DeletePreparer prepares the Delete request. +func (client WebhooksClient) DeletePreparer(resourceGroupName string, registryName string, webhookName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "webhookName": autorest.Encode("path", webhookName), + } + + const APIVersion = "2017-10-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client WebhooksClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client WebhooksClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the properties of the specified webhook. +// +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. webhookName is the name of the webhook. +func (client WebhooksClient) Get(resourceGroupName string, registryName string, webhookName string) (result Webhook, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: webhookName, + Constraints: []validation.Constraint{{Target: "webhookName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "webhookName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "webhookName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "containerregistry.WebhooksClient", "Get") + } + + req, err := client.GetPreparer(resourceGroupName, registryName, webhookName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client WebhooksClient) GetPreparer(resourceGroupName string, registryName string, webhookName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "webhookName": autorest.Encode("path", webhookName), + } + + const APIVersion = "2017-10-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client WebhooksClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client WebhooksClient) GetResponder(resp *http.Response) (result Webhook, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetCallbackConfig gets the configuration of service URI and custom headers for the webhook. +// +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. webhookName is the name of the webhook. +func (client WebhooksClient) GetCallbackConfig(resourceGroupName string, registryName string, webhookName string) (result CallbackConfig, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: webhookName, + Constraints: []validation.Constraint{{Target: "webhookName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "webhookName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "webhookName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "containerregistry.WebhooksClient", "GetCallbackConfig") + } + + req, err := client.GetCallbackConfigPreparer(resourceGroupName, registryName, webhookName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "GetCallbackConfig", nil, "Failure preparing request") + return + } + + resp, err := client.GetCallbackConfigSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "GetCallbackConfig", resp, "Failure sending request") + return + } + + result, err = client.GetCallbackConfigResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "GetCallbackConfig", resp, "Failure responding to request") + } + + return +} + +// GetCallbackConfigPreparer prepares the GetCallbackConfig request. +func (client WebhooksClient) GetCallbackConfigPreparer(resourceGroupName string, registryName string, webhookName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "webhookName": autorest.Encode("path", webhookName), + } + + const APIVersion = "2017-10-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}/getCallbackConfig", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetCallbackConfigSender sends the GetCallbackConfig request. The method will close the +// http.Response Body if it receives an error. +func (client WebhooksClient) GetCallbackConfigSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetCallbackConfigResponder handles the response to the GetCallbackConfig request. The method always +// closes the http.Response Body. +func (client WebhooksClient) GetCallbackConfigResponder(resp *http.Response) (result CallbackConfig, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all the webhooks for the specified container registry. +// +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. +func (client WebhooksClient) List(resourceGroupName string, registryName string) (result WebhookListResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "containerregistry.WebhooksClient", "List") + } + + req, err := client.ListPreparer(resourceGroupName, registryName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client WebhooksClient) ListPreparer(resourceGroupName string, registryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-10-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client WebhooksClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client WebhooksClient) ListResponder(resp *http.Response) (result WebhookListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client WebhooksClient) ListNextResults(lastResults WebhookListResult) (result WebhookListResult, err error) { + req, err := lastResults.WebhookListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListComplete gets all elements from the list without paging. +func (client WebhooksClient) ListComplete(resourceGroupName string, registryName string, cancel <-chan struct{}) (<-chan Webhook, <-chan error) { + resultChan := make(chan Webhook) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName, registryName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListEvents lists recent events for the specified webhook. +// +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. webhookName is the name of the webhook. +func (client WebhooksClient) ListEvents(resourceGroupName string, registryName string, webhookName string) (result EventListResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: webhookName, + Constraints: []validation.Constraint{{Target: "webhookName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "webhookName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "webhookName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "containerregistry.WebhooksClient", "ListEvents") + } + + req, err := client.ListEventsPreparer(resourceGroupName, registryName, webhookName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "ListEvents", nil, "Failure preparing request") + return + } + + resp, err := client.ListEventsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "ListEvents", resp, "Failure sending request") + return + } + + result, err = client.ListEventsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "ListEvents", resp, "Failure responding to request") + } + + return +} + +// ListEventsPreparer prepares the ListEvents request. +func (client WebhooksClient) ListEventsPreparer(resourceGroupName string, registryName string, webhookName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "webhookName": autorest.Encode("path", webhookName), + } + + const APIVersion = "2017-10-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}/listEvents", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListEventsSender sends the ListEvents request. The method will close the +// http.Response Body if it receives an error. +func (client WebhooksClient) ListEventsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListEventsResponder handles the response to the ListEvents request. The method always +// closes the http.Response Body. +func (client WebhooksClient) ListEventsResponder(resp *http.Response) (result EventListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListEventsNextResults retrieves the next set of results, if any. +func (client WebhooksClient) ListEventsNextResults(lastResults EventListResult) (result EventListResult, err error) { + req, err := lastResults.EventListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "ListEvents", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListEventsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "ListEvents", resp, "Failure sending next results request") + } + + result, err = client.ListEventsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "ListEvents", resp, "Failure responding to next results request") + } + + return +} + +// ListEventsComplete gets all elements from the list without paging. +func (client WebhooksClient) ListEventsComplete(resourceGroupName string, registryName string, webhookName string, cancel <-chan struct{}) (<-chan Event, <-chan error) { + resultChan := make(chan Event) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListEvents(resourceGroupName, registryName, webhookName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListEventsNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// Ping triggers a ping event to be sent to the webhook. +// +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. webhookName is the name of the webhook. +func (client WebhooksClient) Ping(resourceGroupName string, registryName string, webhookName string) (result EventInfo, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: webhookName, + Constraints: []validation.Constraint{{Target: "webhookName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "webhookName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "webhookName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "containerregistry.WebhooksClient", "Ping") + } + + req, err := client.PingPreparer(resourceGroupName, registryName, webhookName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Ping", nil, "Failure preparing request") + return + } + + resp, err := client.PingSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Ping", resp, "Failure sending request") + return + } + + result, err = client.PingResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Ping", resp, "Failure responding to request") + } + + return +} + +// PingPreparer prepares the Ping request. +func (client WebhooksClient) PingPreparer(resourceGroupName string, registryName string, webhookName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "webhookName": autorest.Encode("path", webhookName), + } + + const APIVersion = "2017-10-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}/ping", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// PingSender sends the Ping request. The method will close the +// http.Response Body if it receives an error. +func (client WebhooksClient) PingSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// PingResponder handles the response to the Ping request. The method always +// closes the http.Response Body. +func (client WebhooksClient) PingResponder(resp *http.Response) (result EventInfo, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates a webhook with the specified parameters. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group to which the container registry belongs. registryName is the +// name of the container registry. webhookName is the name of the webhook. webhookUpdateParameters is the parameters +// for updating a webhook. +func (client WebhooksClient) Update(resourceGroupName string, registryName string, webhookName string, webhookUpdateParameters WebhookUpdateParameters, cancel <-chan struct{}) (<-chan Webhook, <-chan error) { + resultChan := make(chan Webhook, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: webhookName, + Constraints: []validation.Constraint{{Target: "webhookName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "webhookName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "webhookName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "containerregistry.WebhooksClient", "Update") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result Webhook + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.UpdatePreparer(resourceGroupName, registryName, webhookName, webhookUpdateParameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.WebhooksClient", "Update", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// UpdatePreparer prepares the Update request. +func (client WebhooksClient) UpdatePreparer(resourceGroupName string, registryName string, webhookName string, webhookUpdateParameters WebhookUpdateParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "webhookName": autorest.Encode("path", webhookName), + } + + const APIVersion = "2017-10-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}", pathParameters), + autorest.WithJSON(webhookUpdateParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client WebhooksClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client WebhooksClient) UpdateResponder(resp *http.Response) (result Webhook, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/version.go index a78b351a0e36..11c4a35ee9c3 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/version.go @@ -14,16 +14,15 @@ package disk // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator 1.1.0.0 +// Changes may cause incorrect behavior and will be lost if the code is regenerated. // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/v10.0.2-beta arm-disk/2016-04-30-preview" + return "Azure-SDK-For-Go/v10.2.0-beta arm-disk/2016-04-30-preview" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return "v10.0.2-beta" + return "v10.2.0-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go index 4ab4e0734905..c000be117bcd 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -25,31 +24,27 @@ import ( "net/http" ) -// ApplicationGatewaysClient is the composite Swagger for Network Client +// ApplicationGatewaysClient is the network Client type ApplicationGatewaysClient struct { ManagementClient } -// NewApplicationGatewaysClient creates an instance of the -// ApplicationGatewaysClient client. +// NewApplicationGatewaysClient creates an instance of the ApplicationGatewaysClient client. func NewApplicationGatewaysClient(subscriptionID string) ApplicationGatewaysClient { return NewApplicationGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewApplicationGatewaysClientWithBaseURI creates an instance of the -// ApplicationGatewaysClient client. +// NewApplicationGatewaysClientWithBaseURI creates an instance of the ApplicationGatewaysClient client. func NewApplicationGatewaysClientWithBaseURI(baseURI string, subscriptionID string) ApplicationGatewaysClient { return ApplicationGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} } -// BackendHealth gets the backend health of the specified application gateway -// in a resource group. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used to -// cancel polling and any outstanding HTTP requests. +// BackendHealth gets the backend health of the specified application gateway in a resource group. This method may poll +// for completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. applicationGatewayName -// is the name of the application gateway. expand is expands BackendAddressPool -// and BackendHttpSettings referenced in backend health. +// resourceGroupName is the name of the resource group. applicationGatewayName is the name of the application gateway. +// expand is expands BackendAddressPool and BackendHttpSettings referenced in backend health. func (client ApplicationGatewaysClient) BackendHealth(resourceGroupName string, applicationGatewayName string, expand string, cancel <-chan struct{}) (<-chan ApplicationGatewayBackendHealth, <-chan error) { resultChan := make(chan ApplicationGatewayBackendHealth, 1) errChan := make(chan error, 1) @@ -57,8 +52,10 @@ func (client ApplicationGatewaysClient) BackendHealth(resourceGroupName string, var err error var result ApplicationGatewayBackendHealth defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -91,7 +88,7 @@ func (client ApplicationGatewaysClient) BackendHealthPreparer(resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -128,14 +125,12 @@ func (client ApplicationGatewaysClient) BackendHealthResponder(resp *http.Respon return } -// CreateOrUpdate creates or updates the specified application gateway. This -// method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and any +// CreateOrUpdate creates or updates the specified application gateway. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any // outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. applicationGatewayName -// is the name of the application gateway. parameters is parameters supplied to -// the create or update application gateway operation. +// resourceGroupName is the name of the resource group. applicationGatewayName is the name of the application gateway. +// parameters is parameters supplied to the create or update application gateway operation. func (client ApplicationGatewaysClient) CreateOrUpdate(resourceGroupName string, applicationGatewayName string, parameters ApplicationGateway, cancel <-chan struct{}) (<-chan ApplicationGateway, <-chan error) { resultChan := make(chan ApplicationGateway, 1) errChan := make(chan error, 1) @@ -158,8 +153,10 @@ func (client ApplicationGatewaysClient) CreateOrUpdate(resourceGroupName string, var err error var result ApplicationGateway defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -192,7 +189,7 @@ func (client ApplicationGatewaysClient) CreateOrUpdatePreparer(resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -228,13 +225,10 @@ func (client ApplicationGatewaysClient) CreateOrUpdateResponder(resp *http.Respo return } -// Delete deletes the specified application gateway. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// Delete deletes the specified application gateway. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. applicationGatewayName -// is the name of the application gateway. +// resourceGroupName is the name of the resource group. applicationGatewayName is the name of the application gateway. func (client ApplicationGatewaysClient) Delete(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -242,8 +236,10 @@ func (client ApplicationGatewaysClient) Delete(resourceGroupName string, applica var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -276,7 +272,7 @@ func (client ApplicationGatewaysClient) DeletePreparer(resourceGroupName string, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -311,8 +307,7 @@ func (client ApplicationGatewaysClient) DeleteResponder(resp *http.Response) (re // Get gets the specified application gateway. // -// resourceGroupName is the name of the resource group. applicationGatewayName -// is the name of the application gateway. +// resourceGroupName is the name of the resource group. applicationGatewayName is the name of the application gateway. func (client ApplicationGatewaysClient) Get(resourceGroupName string, applicationGatewayName string) (result ApplicationGateway, err error) { req, err := client.GetPreparer(resourceGroupName, applicationGatewayName) if err != nil { @@ -343,7 +338,7 @@ func (client ApplicationGatewaysClient) GetPreparer(resourceGroupName string, ap "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -375,6 +370,70 @@ func (client ApplicationGatewaysClient) GetResponder(resp *http.Response) (resul return } +// GetSslPredefinedPolicy gets Ssl predefined policy with the specified policy name. +// +// predefinedPolicyName is name of Ssl predefined policy. +func (client ApplicationGatewaysClient) GetSslPredefinedPolicy(predefinedPolicyName string) (result ApplicationGatewaySslPredefinedPolicy, err error) { + req, err := client.GetSslPredefinedPolicyPreparer(predefinedPolicyName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "GetSslPredefinedPolicy", nil, "Failure preparing request") + return + } + + resp, err := client.GetSslPredefinedPolicySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "GetSslPredefinedPolicy", resp, "Failure sending request") + return + } + + result, err = client.GetSslPredefinedPolicyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "GetSslPredefinedPolicy", resp, "Failure responding to request") + } + + return +} + +// GetSslPredefinedPolicyPreparer prepares the GetSslPredefinedPolicy request. +func (client ApplicationGatewaysClient) GetSslPredefinedPolicyPreparer(predefinedPolicyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "predefinedPolicyName": autorest.Encode("path", predefinedPolicyName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default/predefinedPolicies/{predefinedPolicyName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSslPredefinedPolicySender sends the GetSslPredefinedPolicy request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) GetSslPredefinedPolicySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetSslPredefinedPolicyResponder handles the response to the GetSslPredefinedPolicy request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) GetSslPredefinedPolicyResponder(resp *http.Response) (result ApplicationGatewaySslPredefinedPolicy, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + // List lists all application gateways in a resource group. // // resourceGroupName is the name of the resource group. @@ -407,7 +466,7 @@ func (client ApplicationGatewaysClient) ListPreparer(resourceGroupName string) ( "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -463,6 +522,51 @@ func (client ApplicationGatewaysClient) ListNextResults(lastResults ApplicationG return } +// ListComplete gets all elements from the list without paging. +func (client ApplicationGatewaysClient) ListComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan ApplicationGateway, <-chan error) { + resultChan := make(chan ApplicationGateway) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + // ListAll gets all the application gateways in a subscription. func (client ApplicationGatewaysClient) ListAll() (result ApplicationGatewayListResult, err error) { req, err := client.ListAllPreparer() @@ -492,7 +596,7 @@ func (client ApplicationGatewaysClient) ListAllPreparer() (*http.Request, error) "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -548,8 +652,243 @@ func (client ApplicationGatewaysClient) ListAllNextResults(lastResults Applicati return } -// ListAvailableWafRuleSets lists all available web application firewall rule -// sets. +// ListAllComplete gets all elements from the list without paging. +func (client ApplicationGatewaysClient) ListAllComplete(cancel <-chan struct{}) (<-chan ApplicationGateway, <-chan error) { + resultChan := make(chan ApplicationGateway) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListAll() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListAllNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListAvailableSslOptions lists available Ssl options for configuring Ssl policy. +func (client ApplicationGatewaysClient) ListAvailableSslOptions() (result ApplicationGatewayAvailableSslOptions, err error) { + req, err := client.ListAvailableSslOptionsPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAvailableSslOptions", nil, "Failure preparing request") + return + } + + resp, err := client.ListAvailableSslOptionsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAvailableSslOptions", resp, "Failure sending request") + return + } + + result, err = client.ListAvailableSslOptionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAvailableSslOptions", resp, "Failure responding to request") + } + + return +} + +// ListAvailableSslOptionsPreparer prepares the ListAvailableSslOptions request. +func (client ApplicationGatewaysClient) ListAvailableSslOptionsPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAvailableSslOptionsSender sends the ListAvailableSslOptions request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) ListAvailableSslOptionsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAvailableSslOptionsResponder handles the response to the ListAvailableSslOptions request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) ListAvailableSslOptionsResponder(resp *http.Response) (result ApplicationGatewayAvailableSslOptions, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAvailableSslPredefinedPolicies lists all SSL predefined policies for configuring Ssl policy. +func (client ApplicationGatewaysClient) ListAvailableSslPredefinedPolicies() (result ApplicationGatewayAvailableSslPredefinedPolicies, err error) { + req, err := client.ListAvailableSslPredefinedPoliciesPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAvailableSslPredefinedPolicies", nil, "Failure preparing request") + return + } + + resp, err := client.ListAvailableSslPredefinedPoliciesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAvailableSslPredefinedPolicies", resp, "Failure sending request") + return + } + + result, err = client.ListAvailableSslPredefinedPoliciesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAvailableSslPredefinedPolicies", resp, "Failure responding to request") + } + + return +} + +// ListAvailableSslPredefinedPoliciesPreparer prepares the ListAvailableSslPredefinedPolicies request. +func (client ApplicationGatewaysClient) ListAvailableSslPredefinedPoliciesPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default/predefinedPolicies", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAvailableSslPredefinedPoliciesSender sends the ListAvailableSslPredefinedPolicies request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) ListAvailableSslPredefinedPoliciesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAvailableSslPredefinedPoliciesResponder handles the response to the ListAvailableSslPredefinedPolicies request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) ListAvailableSslPredefinedPoliciesResponder(resp *http.Response) (result ApplicationGatewayAvailableSslPredefinedPolicies, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAvailableSslPredefinedPoliciesNextResults retrieves the next set of results, if any. +func (client ApplicationGatewaysClient) ListAvailableSslPredefinedPoliciesNextResults(lastResults ApplicationGatewayAvailableSslPredefinedPolicies) (result ApplicationGatewayAvailableSslPredefinedPolicies, err error) { + req, err := lastResults.ApplicationGatewayAvailableSslPredefinedPoliciesPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAvailableSslPredefinedPolicies", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListAvailableSslPredefinedPoliciesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAvailableSslPredefinedPolicies", resp, "Failure sending next results request") + } + + result, err = client.ListAvailableSslPredefinedPoliciesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAvailableSslPredefinedPolicies", resp, "Failure responding to next results request") + } + + return +} + +// ListAvailableSslPredefinedPoliciesComplete gets all elements from the list without paging. +func (client ApplicationGatewaysClient) ListAvailableSslPredefinedPoliciesComplete(cancel <-chan struct{}) (<-chan ApplicationGatewaySslPredefinedPolicy, <-chan error) { + resultChan := make(chan ApplicationGatewaySslPredefinedPolicy) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListAvailableSslPredefinedPolicies() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListAvailableSslPredefinedPoliciesNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListAvailableWafRuleSets lists all available web application firewall rule sets. func (client ApplicationGatewaysClient) ListAvailableWafRuleSets() (result ApplicationGatewayAvailableWafRuleSetsResult, err error) { req, err := client.ListAvailableWafRuleSetsPreparer() if err != nil { @@ -578,7 +917,7 @@ func (client ApplicationGatewaysClient) ListAvailableWafRuleSetsPreparer() (*htt "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -610,13 +949,10 @@ func (client ApplicationGatewaysClient) ListAvailableWafRuleSetsResponder(resp * return } -// Start starts the specified application gateway. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// Start starts the specified application gateway. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. applicationGatewayName -// is the name of the application gateway. +// resourceGroupName is the name of the resource group. applicationGatewayName is the name of the application gateway. func (client ApplicationGatewaysClient) Start(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -624,8 +960,10 @@ func (client ApplicationGatewaysClient) Start(resourceGroupName string, applicat var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -658,7 +996,7 @@ func (client ApplicationGatewaysClient) StartPreparer(resourceGroupName string, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -691,13 +1029,11 @@ func (client ApplicationGatewaysClient) StartResponder(resp *http.Response) (res return } -// Stop stops the specified application gateway in a resource group. This -// method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and any -// outstanding HTTP requests. +// Stop stops the specified application gateway in a resource group. This method may poll for completion. Polling can +// be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // -// resourceGroupName is the name of the resource group. applicationGatewayName -// is the name of the application gateway. +// resourceGroupName is the name of the resource group. applicationGatewayName is the name of the application gateway. func (client ApplicationGatewaysClient) Stop(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -705,8 +1041,10 @@ func (client ApplicationGatewaysClient) Stop(resourceGroupName string, applicati var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -739,7 +1077,7 @@ func (client ApplicationGatewaysClient) StopPreparer(resourceGroupName string, a "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationsecuritygroups.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationsecuritygroups.go new file mode 100644 index 000000000000..1e84e706466f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationsecuritygroups.go @@ -0,0 +1,535 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ApplicationSecurityGroupsClient is the network Client +type ApplicationSecurityGroupsClient struct { + ManagementClient +} + +// NewApplicationSecurityGroupsClient creates an instance of the ApplicationSecurityGroupsClient client. +func NewApplicationSecurityGroupsClient(subscriptionID string) ApplicationSecurityGroupsClient { + return NewApplicationSecurityGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewApplicationSecurityGroupsClientWithBaseURI creates an instance of the ApplicationSecurityGroupsClient client. +func NewApplicationSecurityGroupsClientWithBaseURI(baseURI string, subscriptionID string) ApplicationSecurityGroupsClient { + return ApplicationSecurityGroupsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates an application security group. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group. applicationSecurityGroupName is the name of the application +// security group. parameters is parameters supplied to the create or update ApplicationSecurityGroup operation. +func (client ApplicationSecurityGroupsClient) CreateOrUpdate(resourceGroupName string, applicationSecurityGroupName string, parameters ApplicationSecurityGroup, cancel <-chan struct{}) (<-chan ApplicationSecurityGroup, <-chan error) { + resultChan := make(chan ApplicationSecurityGroup, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result ApplicationSecurityGroup + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, applicationSecurityGroupName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ApplicationSecurityGroupsClient) CreateOrUpdatePreparer(resourceGroupName string, applicationSecurityGroupName string, parameters ApplicationSecurityGroup, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationSecurityGroupName": autorest.Encode("path", applicationSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationSecurityGroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ApplicationSecurityGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result ApplicationSecurityGroup, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified application security group. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group. applicationSecurityGroupName is the name of the application +// security group. +func (client ApplicationSecurityGroupsClient) Delete(resourceGroupName string, applicationSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, applicationSecurityGroupName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// DeletePreparer prepares the Delete request. +func (client ApplicationSecurityGroupsClient) DeletePreparer(resourceGroupName string, applicationSecurityGroupName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationSecurityGroupName": autorest.Encode("path", applicationSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationSecurityGroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ApplicationSecurityGroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets information about the specified application security group. +// +// resourceGroupName is the name of the resource group. applicationSecurityGroupName is the name of the application +// security group. +func (client ApplicationSecurityGroupsClient) Get(resourceGroupName string, applicationSecurityGroupName string) (result ApplicationSecurityGroup, err error) { + req, err := client.GetPreparer(resourceGroupName, applicationSecurityGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ApplicationSecurityGroupsClient) GetPreparer(resourceGroupName string, applicationSecurityGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationSecurityGroupName": autorest.Encode("path", applicationSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationSecurityGroupsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ApplicationSecurityGroupsClient) GetResponder(resp *http.Response) (result ApplicationSecurityGroup, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all the application security groups in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client ApplicationSecurityGroupsClient) List(resourceGroupName string) (result ApplicationSecurityGroupListResult, err error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ApplicationSecurityGroupsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationSecurityGroupsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ApplicationSecurityGroupsClient) ListResponder(resp *http.Response) (result ApplicationSecurityGroupListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ApplicationSecurityGroupsClient) ListNextResults(lastResults ApplicationSecurityGroupListResult) (result ApplicationSecurityGroupListResult, err error) { + req, err := lastResults.ApplicationSecurityGroupListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListComplete gets all elements from the list without paging. +func (client ApplicationSecurityGroupsClient) ListComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan ApplicationSecurityGroup, <-chan error) { + resultChan := make(chan ApplicationSecurityGroup) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListAll gets all application security groups in a subscription. +func (client ApplicationSecurityGroupsClient) ListAll() (result ApplicationSecurityGroupListResult, err error) { + req, err := client.ListAllPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "ListAll", nil, "Failure preparing request") + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "ListAll", resp, "Failure sending request") + return + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client ApplicationSecurityGroupsClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationSecurityGroups", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationSecurityGroupsClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client ApplicationSecurityGroupsClient) ListAllResponder(resp *http.Response) (result ApplicationSecurityGroupListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client ApplicationSecurityGroupsClient) ListAllNextResults(lastResults ApplicationSecurityGroupListResult) (result ApplicationSecurityGroupListResult, err error) { + req, err := lastResults.ApplicationSecurityGroupListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "ListAll", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "ListAll", resp, "Failure sending next results request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationSecurityGroupsClient", "ListAll", resp, "Failure responding to next results request") + } + + return +} + +// ListAllComplete gets all elements from the list without paging. +func (client ApplicationSecurityGroupsClient) ListAllComplete(cancel <-chan struct{}) (<-chan ApplicationSecurityGroup, <-chan error) { + resultChan := make(chan ApplicationSecurityGroup) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListAll() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListAllNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/availableendpointservices.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/availableendpointservices.go new file mode 100644 index 000000000000..26bcaf8b647c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/availableendpointservices.go @@ -0,0 +1,172 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// AvailableEndpointServicesClient is the network Client +type AvailableEndpointServicesClient struct { + ManagementClient +} + +// NewAvailableEndpointServicesClient creates an instance of the AvailableEndpointServicesClient client. +func NewAvailableEndpointServicesClient(subscriptionID string) AvailableEndpointServicesClient { + return NewAvailableEndpointServicesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAvailableEndpointServicesClientWithBaseURI creates an instance of the AvailableEndpointServicesClient client. +func NewAvailableEndpointServicesClientWithBaseURI(baseURI string, subscriptionID string) AvailableEndpointServicesClient { + return AvailableEndpointServicesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List list what values of endpoint services are available for use. +// +// location is the location to check available endpoint services. +func (client AvailableEndpointServicesClient) List(location string) (result EndpointServicesListResult, err error) { + req, err := client.ListPreparer(location) + if err != nil { + err = autorest.NewErrorWithError(err, "network.AvailableEndpointServicesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.AvailableEndpointServicesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.AvailableEndpointServicesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AvailableEndpointServicesClient) ListPreparer(location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/virtualNetworkAvailableEndpointServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AvailableEndpointServicesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AvailableEndpointServicesClient) ListResponder(resp *http.Response) (result EndpointServicesListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client AvailableEndpointServicesClient) ListNextResults(lastResults EndpointServicesListResult) (result EndpointServicesListResult, err error) { + req, err := lastResults.EndpointServicesListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.AvailableEndpointServicesClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.AvailableEndpointServicesClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.AvailableEndpointServicesClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListComplete gets all elements from the list without paging. +func (client AvailableEndpointServicesClient) ListComplete(location string, cancel <-chan struct{}) (<-chan EndpointServiceResult, <-chan error) { + resultChan := make(chan EndpointServiceResult) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(location) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/bgpservicecommunities.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/bgpservicecommunities.go index 5024f5164965..10cfa2cdd558 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/bgpservicecommunities.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/bgpservicecommunities.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,19 +23,17 @@ import ( "net/http" ) -// BgpServiceCommunitiesClient is the composite Swagger for Network Client +// BgpServiceCommunitiesClient is the network Client type BgpServiceCommunitiesClient struct { ManagementClient } -// NewBgpServiceCommunitiesClient creates an instance of the -// BgpServiceCommunitiesClient client. +// NewBgpServiceCommunitiesClient creates an instance of the BgpServiceCommunitiesClient client. func NewBgpServiceCommunitiesClient(subscriptionID string) BgpServiceCommunitiesClient { return NewBgpServiceCommunitiesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewBgpServiceCommunitiesClientWithBaseURI creates an instance of the -// BgpServiceCommunitiesClient client. +// NewBgpServiceCommunitiesClientWithBaseURI creates an instance of the BgpServiceCommunitiesClient client. func NewBgpServiceCommunitiesClientWithBaseURI(baseURI string, subscriptionID string) BgpServiceCommunitiesClient { return BgpServiceCommunitiesClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -70,7 +67,7 @@ func (client BgpServiceCommunitiesClient) ListPreparer() (*http.Request, error) "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -125,3 +122,48 @@ func (client BgpServiceCommunitiesClient) ListNextResults(lastResults BgpService return } + +// ListComplete gets all elements from the list without paging. +func (client BgpServiceCommunitiesClient) ListComplete(cancel <-chan struct{}) (<-chan BgpServiceCommunity, <-chan error) { + resultChan := make(chan BgpServiceCommunity) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go index ec5bf7ced542..a3576c393aa9 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go @@ -1,6 +1,6 @@ // Package network implements the Azure ARM Network service API version . // -// Composite Swagger for Network Client +// Network Client package network // Copyright (c) Microsoft and contributors. All rights reserved. @@ -17,9 +17,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -53,12 +52,10 @@ func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { } } -// CheckDNSNameAvailability checks whether a domain name in the cloudapp.net -// zone is available for use. +// CheckDNSNameAvailability checks whether a domain name in the cloudapp.azure.com zone is available for use. // -// location is the location of the domain name. domainNameLabel is the domain -// name to be verified. It must conform to the following regular expression: -// ^[a-z][a-z0-9-]{1,61}[a-z0-9]$. +// location is the location of the domain name. domainNameLabel is the domain name to be verified. It must conform to +// the following regular expression: ^[a-z][a-z0-9-]{1,61}[a-z0-9]$. func (client ManagementClient) CheckDNSNameAvailability(location string, domainNameLabel string) (result DNSNameAvailabilityResult, err error) { req, err := client.CheckDNSNameAvailabilityPreparer(location, domainNameLabel) if err != nil { @@ -88,12 +85,10 @@ func (client ManagementClient) CheckDNSNameAvailabilityPreparer(location string, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(domainNameLabel) > 0 { - queryParameters["domainNameLabel"] = autorest.Encode("query", domainNameLabel) + "api-version": APIVersion, + "domainNameLabel": autorest.Encode("query", domainNameLabel), } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/defaultsecurityrules.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/defaultsecurityrules.go new file mode 100644 index 000000000000..9a3ded19d5c2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/defaultsecurityrules.go @@ -0,0 +1,241 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// DefaultSecurityRulesClient is the network Client +type DefaultSecurityRulesClient struct { + ManagementClient +} + +// NewDefaultSecurityRulesClient creates an instance of the DefaultSecurityRulesClient client. +func NewDefaultSecurityRulesClient(subscriptionID string) DefaultSecurityRulesClient { + return NewDefaultSecurityRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDefaultSecurityRulesClientWithBaseURI creates an instance of the DefaultSecurityRulesClient client. +func NewDefaultSecurityRulesClientWithBaseURI(baseURI string, subscriptionID string) DefaultSecurityRulesClient { + return DefaultSecurityRulesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get get the specified default network security rule. +// +// resourceGroupName is the name of the resource group. networkSecurityGroupName is the name of the network security +// group. defaultSecurityRuleName is the name of the default security rule. +func (client DefaultSecurityRulesClient) Get(resourceGroupName string, networkSecurityGroupName string, defaultSecurityRuleName string) (result SecurityRule, err error) { + req, err := client.GetPreparer(resourceGroupName, networkSecurityGroupName, defaultSecurityRuleName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DefaultSecurityRulesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.DefaultSecurityRulesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DefaultSecurityRulesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DefaultSecurityRulesClient) GetPreparer(resourceGroupName string, networkSecurityGroupName string, defaultSecurityRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "defaultSecurityRuleName": autorest.Encode("path", defaultSecurityRuleName), + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/defaultSecurityRules/{defaultSecurityRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DefaultSecurityRulesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DefaultSecurityRulesClient) GetResponder(resp *http.Response) (result SecurityRule, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all default security rules in a network security group. +// +// resourceGroupName is the name of the resource group. networkSecurityGroupName is the name of the network security +// group. +func (client DefaultSecurityRulesClient) List(resourceGroupName string, networkSecurityGroupName string) (result SecurityRuleListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, networkSecurityGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DefaultSecurityRulesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.DefaultSecurityRulesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DefaultSecurityRulesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DefaultSecurityRulesClient) ListPreparer(resourceGroupName string, networkSecurityGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/defaultSecurityRules", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DefaultSecurityRulesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DefaultSecurityRulesClient) ListResponder(resp *http.Response) (result SecurityRuleListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client DefaultSecurityRulesClient) ListNextResults(lastResults SecurityRuleListResult) (result SecurityRuleListResult, err error) { + req, err := lastResults.SecurityRuleListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.DefaultSecurityRulesClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.DefaultSecurityRulesClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.DefaultSecurityRulesClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListComplete gets all elements from the list without paging. +func (client DefaultSecurityRulesClient) ListComplete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan SecurityRule, <-chan error) { + resultChan := make(chan SecurityRule) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName, networkSecurityGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go index feb974fb97ca..9b7fd524e73c 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,33 +23,30 @@ import ( "net/http" ) -// ExpressRouteCircuitAuthorizationsClient is the composite Swagger for Network -// Client +// ExpressRouteCircuitAuthorizationsClient is the network Client type ExpressRouteCircuitAuthorizationsClient struct { ManagementClient } -// NewExpressRouteCircuitAuthorizationsClient creates an instance of the -// ExpressRouteCircuitAuthorizationsClient client. +// NewExpressRouteCircuitAuthorizationsClient creates an instance of the ExpressRouteCircuitAuthorizationsClient +// client. func NewExpressRouteCircuitAuthorizationsClient(subscriptionID string) ExpressRouteCircuitAuthorizationsClient { return NewExpressRouteCircuitAuthorizationsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewExpressRouteCircuitAuthorizationsClientWithBaseURI creates an instance of -// the ExpressRouteCircuitAuthorizationsClient client. +// NewExpressRouteCircuitAuthorizationsClientWithBaseURI creates an instance of the +// ExpressRouteCircuitAuthorizationsClient client. func NewExpressRouteCircuitAuthorizationsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitAuthorizationsClient { return ExpressRouteCircuitAuthorizationsClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates an authorization in the specified express -// route circuit. This method may poll for completion. Polling can be canceled -// by passing the cancel channel argument. The channel will be used to cancel +// CreateOrUpdate creates or updates an authorization in the specified express route circuit. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel // polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. circuitName is the name -// of the express route circuit. authorizationName is the name of the -// authorization. authorizationParameters is parameters supplied to the create -// or update express route circuit authorization operation. +// resourceGroupName is the name of the resource group. circuitName is the name of the express route circuit. +// authorizationName is the name of the authorization. authorizationParameters is parameters supplied to the create or +// update express route circuit authorization operation. func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdate(resourceGroupName string, circuitName string, authorizationName string, authorizationParameters ExpressRouteCircuitAuthorization, cancel <-chan struct{}) (<-chan ExpressRouteCircuitAuthorization, <-chan error) { resultChan := make(chan ExpressRouteCircuitAuthorization, 1) errChan := make(chan error, 1) @@ -58,8 +54,10 @@ func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdate(resourceGro var err error var result ExpressRouteCircuitAuthorization defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -93,7 +91,7 @@ func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdatePreparer(res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -129,14 +127,12 @@ func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateResponder(re return } -// Delete deletes the specified authorization from the specified express route -// circuit. This method may poll for completion. Polling can be canceled by -// passing the cancel channel argument. The channel will be used to cancel +// Delete deletes the specified authorization from the specified express route circuit. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel // polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. circuitName is the name -// of the express route circuit. authorizationName is the name of the -// authorization. +// resourceGroupName is the name of the resource group. circuitName is the name of the express route circuit. +// authorizationName is the name of the authorization. func (client ExpressRouteCircuitAuthorizationsClient) Delete(resourceGroupName string, circuitName string, authorizationName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -144,8 +140,10 @@ func (client ExpressRouteCircuitAuthorizationsClient) Delete(resourceGroupName s var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -179,7 +177,7 @@ func (client ExpressRouteCircuitAuthorizationsClient) DeletePreparer(resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -212,12 +210,10 @@ func (client ExpressRouteCircuitAuthorizationsClient) DeleteResponder(resp *http return } -// Get gets the specified authorization from the specified express route -// circuit. +// Get gets the specified authorization from the specified express route circuit. // -// resourceGroupName is the name of the resource group. circuitName is the name -// of the express route circuit. authorizationName is the name of the -// authorization. +// resourceGroupName is the name of the resource group. circuitName is the name of the express route circuit. +// authorizationName is the name of the authorization. func (client ExpressRouteCircuitAuthorizationsClient) Get(resourceGroupName string, circuitName string, authorizationName string) (result ExpressRouteCircuitAuthorization, err error) { req, err := client.GetPreparer(resourceGroupName, circuitName, authorizationName) if err != nil { @@ -249,7 +245,7 @@ func (client ExpressRouteCircuitAuthorizationsClient) GetPreparer(resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -283,8 +279,7 @@ func (client ExpressRouteCircuitAuthorizationsClient) GetResponder(resp *http.Re // List gets all authorizations in an express route circuit. // -// resourceGroupName is the name of the resource group. circuitName is the name -// of the circuit. +// resourceGroupName is the name of the resource group. circuitName is the name of the circuit. func (client ExpressRouteCircuitAuthorizationsClient) List(resourceGroupName string, circuitName string) (result AuthorizationListResult, err error) { req, err := client.ListPreparer(resourceGroupName, circuitName) if err != nil { @@ -315,7 +310,7 @@ func (client ExpressRouteCircuitAuthorizationsClient) ListPreparer(resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -370,3 +365,48 @@ func (client ExpressRouteCircuitAuthorizationsClient) ListNextResults(lastResult return } + +// ListComplete gets all elements from the list without paging. +func (client ExpressRouteCircuitAuthorizationsClient) ListComplete(resourceGroupName string, circuitName string, cancel <-chan struct{}) (<-chan ExpressRouteCircuitAuthorization, <-chan error) { + resultChan := make(chan ExpressRouteCircuitAuthorization) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName, circuitName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go index 28b926f24438..af9fc67e0394 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,32 +23,27 @@ import ( "net/http" ) -// ExpressRouteCircuitPeeringsClient is the composite Swagger for Network -// Client +// ExpressRouteCircuitPeeringsClient is the network Client type ExpressRouteCircuitPeeringsClient struct { ManagementClient } -// NewExpressRouteCircuitPeeringsClient creates an instance of the -// ExpressRouteCircuitPeeringsClient client. +// NewExpressRouteCircuitPeeringsClient creates an instance of the ExpressRouteCircuitPeeringsClient client. func NewExpressRouteCircuitPeeringsClient(subscriptionID string) ExpressRouteCircuitPeeringsClient { return NewExpressRouteCircuitPeeringsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewExpressRouteCircuitPeeringsClientWithBaseURI creates an instance of the -// ExpressRouteCircuitPeeringsClient client. +// NewExpressRouteCircuitPeeringsClientWithBaseURI creates an instance of the ExpressRouteCircuitPeeringsClient client. func NewExpressRouteCircuitPeeringsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitPeeringsClient { return ExpressRouteCircuitPeeringsClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates a peering in the specified express route -// circuits. This method may poll for completion. Polling can be canceled by -// passing the cancel channel argument. The channel will be used to cancel +// CreateOrUpdate creates or updates a peering in the specified express route circuits. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel // polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. circuitName is the name -// of the express route circuit. peeringName is the name of the peering. -// peeringParameters is parameters supplied to the create or update express +// resourceGroupName is the name of the resource group. circuitName is the name of the express route circuit. +// peeringName is the name of the peering. peeringParameters is parameters supplied to the create or update express // route circuit peering operation. func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdate(resourceGroupName string, circuitName string, peeringName string, peeringParameters ExpressRouteCircuitPeering, cancel <-chan struct{}) (<-chan ExpressRouteCircuitPeering, <-chan error) { resultChan := make(chan ExpressRouteCircuitPeering, 1) @@ -58,8 +52,10 @@ func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdate(resourceGroupName var err error var result ExpressRouteCircuitPeering defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -93,7 +89,7 @@ func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdatePreparer(resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -129,13 +125,12 @@ func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateResponder(resp *ht return } -// Delete deletes the specified peering from the specified express route -// circuit. This method may poll for completion. Polling can be canceled by -// passing the cancel channel argument. The channel will be used to cancel -// polling and any outstanding HTTP requests. +// Delete deletes the specified peering from the specified express route circuit. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. circuitName is the name -// of the express route circuit. peeringName is the name of the peering. +// resourceGroupName is the name of the resource group. circuitName is the name of the express route circuit. +// peeringName is the name of the peering. func (client ExpressRouteCircuitPeeringsClient) Delete(resourceGroupName string, circuitName string, peeringName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -143,8 +138,10 @@ func (client ExpressRouteCircuitPeeringsClient) Delete(resourceGroupName string, var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -178,7 +175,7 @@ func (client ExpressRouteCircuitPeeringsClient) DeletePreparer(resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -211,11 +208,10 @@ func (client ExpressRouteCircuitPeeringsClient) DeleteResponder(resp *http.Respo return } -// Get gets the specified authorization from the specified express route -// circuit. +// Get gets the specified authorization from the specified express route circuit. // -// resourceGroupName is the name of the resource group. circuitName is the name -// of the express route circuit. peeringName is the name of the peering. +// resourceGroupName is the name of the resource group. circuitName is the name of the express route circuit. +// peeringName is the name of the peering. func (client ExpressRouteCircuitPeeringsClient) Get(resourceGroupName string, circuitName string, peeringName string) (result ExpressRouteCircuitPeering, err error) { req, err := client.GetPreparer(resourceGroupName, circuitName, peeringName) if err != nil { @@ -247,7 +243,7 @@ func (client ExpressRouteCircuitPeeringsClient) GetPreparer(resourceGroupName st "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -281,8 +277,7 @@ func (client ExpressRouteCircuitPeeringsClient) GetResponder(resp *http.Response // List gets all peerings in a specified express route circuit. // -// resourceGroupName is the name of the resource group. circuitName is the name -// of the express route circuit. +// resourceGroupName is the name of the resource group. circuitName is the name of the express route circuit. func (client ExpressRouteCircuitPeeringsClient) List(resourceGroupName string, circuitName string) (result ExpressRouteCircuitPeeringListResult, err error) { req, err := client.ListPreparer(resourceGroupName, circuitName) if err != nil { @@ -313,7 +308,7 @@ func (client ExpressRouteCircuitPeeringsClient) ListPreparer(resourceGroupName s "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -368,3 +363,48 @@ func (client ExpressRouteCircuitPeeringsClient) ListNextResults(lastResults Expr return } + +// ListComplete gets all elements from the list without paging. +func (client ExpressRouteCircuitPeeringsClient) ListComplete(resourceGroupName string, circuitName string, cancel <-chan struct{}) (<-chan ExpressRouteCircuitPeering, <-chan error) { + resultChan := make(chan ExpressRouteCircuitPeering) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName, circuitName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go index b60aa10a4e94..63cfeff9e0a1 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,31 +23,27 @@ import ( "net/http" ) -// ExpressRouteCircuitsClient is the composite Swagger for Network Client +// ExpressRouteCircuitsClient is the network Client type ExpressRouteCircuitsClient struct { ManagementClient } -// NewExpressRouteCircuitsClient creates an instance of the -// ExpressRouteCircuitsClient client. +// NewExpressRouteCircuitsClient creates an instance of the ExpressRouteCircuitsClient client. func NewExpressRouteCircuitsClient(subscriptionID string) ExpressRouteCircuitsClient { return NewExpressRouteCircuitsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewExpressRouteCircuitsClientWithBaseURI creates an instance of the -// ExpressRouteCircuitsClient client. +// NewExpressRouteCircuitsClientWithBaseURI creates an instance of the ExpressRouteCircuitsClient client. func NewExpressRouteCircuitsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitsClient { return ExpressRouteCircuitsClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates an express route circuit. This method may -// poll for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// CreateOrUpdate creates or updates an express route circuit. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. circuitName is the name -// of the circuit. parameters is parameters supplied to the create or update -// express route circuit operation. +// resourceGroupName is the name of the resource group. circuitName is the name of the circuit. parameters is +// parameters supplied to the create or update express route circuit operation. func (client ExpressRouteCircuitsClient) CreateOrUpdate(resourceGroupName string, circuitName string, parameters ExpressRouteCircuit, cancel <-chan struct{}) (<-chan ExpressRouteCircuit, <-chan error) { resultChan := make(chan ExpressRouteCircuit, 1) errChan := make(chan error, 1) @@ -56,8 +51,10 @@ func (client ExpressRouteCircuitsClient) CreateOrUpdate(resourceGroupName string var err error var result ExpressRouteCircuit defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -90,7 +87,7 @@ func (client ExpressRouteCircuitsClient) CreateOrUpdatePreparer(resourceGroupNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -126,13 +123,10 @@ func (client ExpressRouteCircuitsClient) CreateOrUpdateResponder(resp *http.Resp return } -// Delete deletes the specified express route circuit. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// Delete deletes the specified express route circuit. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. circuitName is the name -// of the express route circuit. +// resourceGroupName is the name of the resource group. circuitName is the name of the express route circuit. func (client ExpressRouteCircuitsClient) Delete(resourceGroupName string, circuitName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -140,8 +134,10 @@ func (client ExpressRouteCircuitsClient) Delete(resourceGroupName string, circui var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -174,7 +170,7 @@ func (client ExpressRouteCircuitsClient) DeletePreparer(resourceGroupName string "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -209,8 +205,7 @@ func (client ExpressRouteCircuitsClient) DeleteResponder(resp *http.Response) (r // Get gets information about the specified express route circuit. // -// resourceGroupName is the name of the resource group. circuitName is the name -// of express route circuit. +// resourceGroupName is the name of the resource group. circuitName is the name of express route circuit. func (client ExpressRouteCircuitsClient) Get(resourceGroupName string, circuitName string) (result ExpressRouteCircuit, err error) { req, err := client.GetPreparer(resourceGroupName, circuitName) if err != nil { @@ -241,7 +236,7 @@ func (client ExpressRouteCircuitsClient) GetPreparer(resourceGroupName string, c "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -273,11 +268,10 @@ func (client ExpressRouteCircuitsClient) GetResponder(resp *http.Response) (resu return } -// GetPeeringStats gets all stats from an express route circuit in a resource -// group. +// GetPeeringStats gets all stats from an express route circuit in a resource group. // -// resourceGroupName is the name of the resource group. circuitName is the name -// of the express route circuit. peeringName is the name of the peering. +// resourceGroupName is the name of the resource group. circuitName is the name of the express route circuit. +// peeringName is the name of the peering. func (client ExpressRouteCircuitsClient) GetPeeringStats(resourceGroupName string, circuitName string, peeringName string) (result ExpressRouteCircuitStats, err error) { req, err := client.GetPeeringStatsPreparer(resourceGroupName, circuitName, peeringName) if err != nil { @@ -309,7 +303,7 @@ func (client ExpressRouteCircuitsClient) GetPeeringStatsPreparer(resourceGroupNa "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -341,11 +335,9 @@ func (client ExpressRouteCircuitsClient) GetPeeringStatsResponder(resp *http.Res return } -// GetStats gets all the stats from an express route circuit in a resource -// group. +// GetStats gets all the stats from an express route circuit in a resource group. // -// resourceGroupName is the name of the resource group. circuitName is the name -// of the express route circuit. +// resourceGroupName is the name of the resource group. circuitName is the name of the express route circuit. func (client ExpressRouteCircuitsClient) GetStats(resourceGroupName string, circuitName string) (result ExpressRouteCircuitStats, err error) { req, err := client.GetStatsPreparer(resourceGroupName, circuitName) if err != nil { @@ -376,7 +368,7 @@ func (client ExpressRouteCircuitsClient) GetStatsPreparer(resourceGroupName stri "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -440,7 +432,7 @@ func (client ExpressRouteCircuitsClient) ListPreparer(resourceGroupName string) "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -496,6 +488,51 @@ func (client ExpressRouteCircuitsClient) ListNextResults(lastResults ExpressRout return } +// ListComplete gets all elements from the list without paging. +func (client ExpressRouteCircuitsClient) ListComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan ExpressRouteCircuit, <-chan error) { + resultChan := make(chan ExpressRouteCircuit) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + // ListAll gets all the express route circuits in a subscription. func (client ExpressRouteCircuitsClient) ListAll() (result ExpressRouteCircuitListResult, err error) { req, err := client.ListAllPreparer() @@ -525,7 +562,7 @@ func (client ExpressRouteCircuitsClient) ListAllPreparer() (*http.Request, error "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -581,15 +618,57 @@ func (client ExpressRouteCircuitsClient) ListAllNextResults(lastResults ExpressR return } -// ListArpTable gets the currently advertised ARP table associated with the -// express route circuit in a resource group. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// ListAllComplete gets all elements from the list without paging. +func (client ExpressRouteCircuitsClient) ListAllComplete(cancel <-chan struct{}) (<-chan ExpressRouteCircuit, <-chan error) { + resultChan := make(chan ExpressRouteCircuit) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListAll() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListAllNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListArpTable gets the currently advertised ARP table associated with the express route circuit in a resource group. +// This method may poll for completion. Polling can be canceled by passing the cancel channel argument. The channel +// will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. circuitName is the name -// of the express route circuit. peeringName is the name of the peering. -// devicePath is the path of the device. +// resourceGroupName is the name of the resource group. circuitName is the name of the express route circuit. +// peeringName is the name of the peering. devicePath is the path of the device. func (client ExpressRouteCircuitsClient) ListArpTable(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (<-chan ExpressRouteCircuitsArpTableListResult, <-chan error) { resultChan := make(chan ExpressRouteCircuitsArpTableListResult, 1) errChan := make(chan error, 1) @@ -597,8 +676,10 @@ func (client ExpressRouteCircuitsClient) ListArpTable(resourceGroupName string, var err error var result ExpressRouteCircuitsArpTableListResult defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -633,7 +714,7 @@ func (client ExpressRouteCircuitsClient) ListArpTablePreparer(resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -667,15 +748,12 @@ func (client ExpressRouteCircuitsClient) ListArpTableResponder(resp *http.Respon return } -// ListRoutesTable gets the currently advertised routes table associated with -// the express route circuit in a resource group. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// ListRoutesTable gets the currently advertised routes table associated with the express route circuit in a resource +// group. This method may poll for completion. Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. circuitName is the name -// of the express route circuit. peeringName is the name of the peering. -// devicePath is the path of the device. +// resourceGroupName is the name of the resource group. circuitName is the name of the express route circuit. +// peeringName is the name of the peering. devicePath is the path of the device. func (client ExpressRouteCircuitsClient) ListRoutesTable(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (<-chan ExpressRouteCircuitsRoutesTableListResult, <-chan error) { resultChan := make(chan ExpressRouteCircuitsRoutesTableListResult, 1) errChan := make(chan error, 1) @@ -683,8 +761,10 @@ func (client ExpressRouteCircuitsClient) ListRoutesTable(resourceGroupName strin var err error var result ExpressRouteCircuitsRoutesTableListResult defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -719,7 +799,7 @@ func (client ExpressRouteCircuitsClient) ListRoutesTablePreparer(resourceGroupNa "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -753,15 +833,12 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableResponder(resp *http.Res return } -// ListRoutesTableSummary gets the currently advertised routes table summary -// associated with the express route circuit in a resource group. This method -// may poll for completion. Polling can be canceled by passing the cancel -// channel argument. The channel will be used to cancel polling and any -// outstanding HTTP requests. +// ListRoutesTableSummary gets the currently advertised routes table summary associated with the express route circuit +// in a resource group. This method may poll for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. circuitName is the name -// of the express route circuit. peeringName is the name of the peering. -// devicePath is the path of the device. +// resourceGroupName is the name of the resource group. circuitName is the name of the express route circuit. +// peeringName is the name of the peering. devicePath is the path of the device. func (client ExpressRouteCircuitsClient) ListRoutesTableSummary(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (<-chan ExpressRouteCircuitsRoutesTableSummaryListResult, <-chan error) { resultChan := make(chan ExpressRouteCircuitsRoutesTableSummaryListResult, 1) errChan := make(chan error, 1) @@ -769,8 +846,10 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableSummary(resourceGroupNam var err error var result ExpressRouteCircuitsRoutesTableSummaryListResult defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -805,7 +884,7 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableSummaryPreparer(resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go index 94db9a4f6147..5e39087b2a5c 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,20 +23,18 @@ import ( "net/http" ) -// ExpressRouteServiceProvidersClient is the composite Swagger for Network -// Client +// ExpressRouteServiceProvidersClient is the network Client type ExpressRouteServiceProvidersClient struct { ManagementClient } -// NewExpressRouteServiceProvidersClient creates an instance of the -// ExpressRouteServiceProvidersClient client. +// NewExpressRouteServiceProvidersClient creates an instance of the ExpressRouteServiceProvidersClient client. func NewExpressRouteServiceProvidersClient(subscriptionID string) ExpressRouteServiceProvidersClient { return NewExpressRouteServiceProvidersClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewExpressRouteServiceProvidersClientWithBaseURI creates an instance of the -// ExpressRouteServiceProvidersClient client. +// NewExpressRouteServiceProvidersClientWithBaseURI creates an instance of the ExpressRouteServiceProvidersClient +// client. func NewExpressRouteServiceProvidersClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteServiceProvidersClient { return ExpressRouteServiceProvidersClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -71,7 +68,7 @@ func (client ExpressRouteServiceProvidersClient) ListPreparer() (*http.Request, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -126,3 +123,48 @@ func (client ExpressRouteServiceProvidersClient) ListNextResults(lastResults Exp return } + +// ListComplete gets all elements from the list without paging. +func (client ExpressRouteServiceProvidersClient) ListComplete(cancel <-chan struct{}) (<-chan ExpressRouteServiceProvider, <-chan error) { + resultChan := make(chan ExpressRouteServiceProvider) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/inboundnatrules.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/inboundnatrules.go new file mode 100644 index 000000000000..eab80ac57669 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/inboundnatrules.go @@ -0,0 +1,436 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// InboundNatRulesClient is the network Client +type InboundNatRulesClient struct { + ManagementClient +} + +// NewInboundNatRulesClient creates an instance of the InboundNatRulesClient client. +func NewInboundNatRulesClient(subscriptionID string) InboundNatRulesClient { + return NewInboundNatRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewInboundNatRulesClientWithBaseURI creates an instance of the InboundNatRulesClient client. +func NewInboundNatRulesClientWithBaseURI(baseURI string, subscriptionID string) InboundNatRulesClient { + return InboundNatRulesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a load balancer inbound nat rule. This method may poll for completion. Polling can +// be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. loadBalancerName is the name of the load balancer. +// inboundNatRuleName is the name of the inbound nat rule. inboundNatRuleParameters is parameters supplied to the +// create or update inbound nat rule operation. +func (client InboundNatRulesClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, inboundNatRuleName string, inboundNatRuleParameters InboundNatRule, cancel <-chan struct{}) (<-chan InboundNatRule, <-chan error) { + resultChan := make(chan InboundNatRule, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: inboundNatRuleParameters, + Constraints: []validation.Constraint{{Target: "inboundNatRuleParameters.InboundNatRulePropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "inboundNatRuleParameters.InboundNatRulePropertiesFormat.BackendIPConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "inboundNatRuleParameters.InboundNatRulePropertiesFormat.BackendIPConfiguration.InterfaceIPConfigurationPropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "inboundNatRuleParameters.InboundNatRulePropertiesFormat.BackendIPConfiguration.InterfaceIPConfigurationPropertiesFormat.PublicIPAddress", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "inboundNatRuleParameters.InboundNatRulePropertiesFormat.BackendIPConfiguration.InterfaceIPConfigurationPropertiesFormat.PublicIPAddress.PublicIPAddressPropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "inboundNatRuleParameters.InboundNatRulePropertiesFormat.BackendIPConfiguration.InterfaceIPConfigurationPropertiesFormat.PublicIPAddress.PublicIPAddressPropertiesFormat.IPConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "inboundNatRuleParameters.InboundNatRulePropertiesFormat.BackendIPConfiguration.InterfaceIPConfigurationPropertiesFormat.PublicIPAddress.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "inboundNatRuleParameters.InboundNatRulePropertiesFormat.BackendIPConfiguration.InterfaceIPConfigurationPropertiesFormat.PublicIPAddress.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.PublicIPAddress", Name: validation.Null, Rule: false, Chain: nil}}}, + }}, + }}, + }}, + }}, + }}, + }}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "network.InboundNatRulesClient", "CreateOrUpdate") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result InboundNatRule + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, loadBalancerName, inboundNatRuleName, inboundNatRuleParameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client InboundNatRulesClient) CreateOrUpdatePreparer(resourceGroupName string, loadBalancerName string, inboundNatRuleName string, inboundNatRuleParameters InboundNatRule, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "inboundNatRuleName": autorest.Encode("path", inboundNatRuleName), + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}", pathParameters), + autorest.WithJSON(inboundNatRuleParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client InboundNatRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client InboundNatRulesClient) CreateOrUpdateResponder(resp *http.Response) (result InboundNatRule, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified load balancer inbound nat rule. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group. loadBalancerName is the name of the load balancer. +// inboundNatRuleName is the name of the inbound nat rule. +func (client InboundNatRulesClient) Delete(resourceGroupName string, loadBalancerName string, inboundNatRuleName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, loadBalancerName, inboundNatRuleName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// DeletePreparer prepares the Delete request. +func (client InboundNatRulesClient) DeletePreparer(resourceGroupName string, loadBalancerName string, inboundNatRuleName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "inboundNatRuleName": autorest.Encode("path", inboundNatRuleName), + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client InboundNatRulesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client InboundNatRulesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified load balancer inbound nat rule. +// +// resourceGroupName is the name of the resource group. loadBalancerName is the name of the load balancer. +// inboundNatRuleName is the name of the inbound nat rule. expand is expands referenced resources. +func (client InboundNatRulesClient) Get(resourceGroupName string, loadBalancerName string, inboundNatRuleName string, expand string) (result InboundNatRule, err error) { + req, err := client.GetPreparer(resourceGroupName, loadBalancerName, inboundNatRuleName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client InboundNatRulesClient) GetPreparer(resourceGroupName string, loadBalancerName string, inboundNatRuleName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "inboundNatRuleName": autorest.Encode("path", inboundNatRuleName), + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client InboundNatRulesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client InboundNatRulesClient) GetResponder(resp *http.Response) (result InboundNatRule, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all the inbound nat rules in a load balancer. +// +// resourceGroupName is the name of the resource group. loadBalancerName is the name of the load balancer. +func (client InboundNatRulesClient) List(resourceGroupName string, loadBalancerName string) (result InboundNatRuleListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, loadBalancerName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client InboundNatRulesClient) ListPreparer(resourceGroupName string, loadBalancerName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client InboundNatRulesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client InboundNatRulesClient) ListResponder(resp *http.Response) (result InboundNatRuleListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client InboundNatRulesClient) ListNextResults(lastResults InboundNatRuleListResult) (result InboundNatRuleListResult, err error) { + req, err := lastResults.InboundNatRuleListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InboundNatRulesClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListComplete gets all elements from the list without paging. +func (client InboundNatRulesClient) ListComplete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan InboundNatRule, <-chan error) { + resultChan := make(chan InboundNatRule) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName, loadBalancerName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceipconfigurations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceipconfigurations.go new file mode 100644 index 000000000000..ca0359f09006 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceipconfigurations.go @@ -0,0 +1,240 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// InterfaceIPConfigurationsClient is the network Client +type InterfaceIPConfigurationsClient struct { + ManagementClient +} + +// NewInterfaceIPConfigurationsClient creates an instance of the InterfaceIPConfigurationsClient client. +func NewInterfaceIPConfigurationsClient(subscriptionID string) InterfaceIPConfigurationsClient { + return NewInterfaceIPConfigurationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewInterfaceIPConfigurationsClientWithBaseURI creates an instance of the InterfaceIPConfigurationsClient client. +func NewInterfaceIPConfigurationsClientWithBaseURI(baseURI string, subscriptionID string) InterfaceIPConfigurationsClient { + return InterfaceIPConfigurationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets the specified network interface ip configuration. +// +// resourceGroupName is the name of the resource group. networkInterfaceName is the name of the network interface. +// IPConfigurationName is the name of the ip configuration name. +func (client InterfaceIPConfigurationsClient) Get(resourceGroupName string, networkInterfaceName string, IPConfigurationName string) (result InterfaceIPConfiguration, err error) { + req, err := client.GetPreparer(resourceGroupName, networkInterfaceName, IPConfigurationName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfaceIPConfigurationsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.InterfaceIPConfigurationsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfaceIPConfigurationsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client InterfaceIPConfigurationsClient) GetPreparer(resourceGroupName string, networkInterfaceName string, IPConfigurationName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "ipConfigurationName": autorest.Encode("path", IPConfigurationName), + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/ipConfigurations/{ipConfigurationName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client InterfaceIPConfigurationsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client InterfaceIPConfigurationsClient) GetResponder(resp *http.Response) (result InterfaceIPConfiguration, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List get all ip configurations in a network interface +// +// resourceGroupName is the name of the resource group. networkInterfaceName is the name of the network interface. +func (client InterfaceIPConfigurationsClient) List(resourceGroupName string, networkInterfaceName string) (result InterfaceIPConfigurationListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, networkInterfaceName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfaceIPConfigurationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.InterfaceIPConfigurationsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfaceIPConfigurationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client InterfaceIPConfigurationsClient) ListPreparer(resourceGroupName string, networkInterfaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/ipConfigurations", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client InterfaceIPConfigurationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client InterfaceIPConfigurationsClient) ListResponder(resp *http.Response) (result InterfaceIPConfigurationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client InterfaceIPConfigurationsClient) ListNextResults(lastResults InterfaceIPConfigurationListResult) (result InterfaceIPConfigurationListResult, err error) { + req, err := lastResults.InterfaceIPConfigurationListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfaceIPConfigurationsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.InterfaceIPConfigurationsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfaceIPConfigurationsClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListComplete gets all elements from the list without paging. +func (client InterfaceIPConfigurationsClient) ListComplete(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (<-chan InterfaceIPConfiguration, <-chan error) { + resultChan := make(chan InterfaceIPConfiguration) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName, networkInterfaceName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceloadbalancers.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceloadbalancers.go new file mode 100644 index 000000000000..c7b7d272f729 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceloadbalancers.go @@ -0,0 +1,173 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// InterfaceLoadBalancersClient is the network Client +type InterfaceLoadBalancersClient struct { + ManagementClient +} + +// NewInterfaceLoadBalancersClient creates an instance of the InterfaceLoadBalancersClient client. +func NewInterfaceLoadBalancersClient(subscriptionID string) InterfaceLoadBalancersClient { + return NewInterfaceLoadBalancersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewInterfaceLoadBalancersClientWithBaseURI creates an instance of the InterfaceLoadBalancersClient client. +func NewInterfaceLoadBalancersClientWithBaseURI(baseURI string, subscriptionID string) InterfaceLoadBalancersClient { + return InterfaceLoadBalancersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List list all load balancers in a network interface. +// +// resourceGroupName is the name of the resource group. networkInterfaceName is the name of the network interface. +func (client InterfaceLoadBalancersClient) List(resourceGroupName string, networkInterfaceName string) (result InterfaceLoadBalancerListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, networkInterfaceName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfaceLoadBalancersClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.InterfaceLoadBalancersClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfaceLoadBalancersClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client InterfaceLoadBalancersClient) ListPreparer(resourceGroupName string, networkInterfaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/loadBalancers", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client InterfaceLoadBalancersClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client InterfaceLoadBalancersClient) ListResponder(resp *http.Response) (result InterfaceLoadBalancerListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client InterfaceLoadBalancersClient) ListNextResults(lastResults InterfaceLoadBalancerListResult) (result InterfaceLoadBalancerListResult, err error) { + req, err := lastResults.InterfaceLoadBalancerListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfaceLoadBalancersClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.InterfaceLoadBalancersClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfaceLoadBalancersClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListComplete gets all elements from the list without paging. +func (client InterfaceLoadBalancersClient) ListComplete(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (<-chan LoadBalancer, <-chan error) { + resultChan := make(chan LoadBalancer) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName, networkInterfaceName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go index 8918c08b4b70..c2fdb3e901b7 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,7 +23,7 @@ import ( "net/http" ) -// InterfacesClient is the composite Swagger for Network Client +// InterfacesClient is the network Client type InterfacesClient struct { ManagementClient } @@ -34,20 +33,17 @@ func NewInterfacesClient(subscriptionID string) InterfacesClient { return NewInterfacesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewInterfacesClientWithBaseURI creates an instance of the InterfacesClient -// client. +// NewInterfacesClientWithBaseURI creates an instance of the InterfacesClient client. func NewInterfacesClientWithBaseURI(baseURI string, subscriptionID string) InterfacesClient { return InterfacesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates a network interface. This method may poll -// for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// CreateOrUpdate creates or updates a network interface. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. networkInterfaceName is -// the name of the network interface. parameters is parameters supplied to the -// create or update network interface operation. +// resourceGroupName is the name of the resource group. networkInterfaceName is the name of the network interface. +// parameters is parameters supplied to the create or update network interface operation. func (client InterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters Interface, cancel <-chan struct{}) (<-chan Interface, <-chan error) { resultChan := make(chan Interface, 1) errChan := make(chan error, 1) @@ -55,8 +51,10 @@ func (client InterfacesClient) CreateOrUpdate(resourceGroupName string, networkI var err error var result Interface defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -89,7 +87,7 @@ func (client InterfacesClient) CreateOrUpdatePreparer(resourceGroupName string, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -125,13 +123,10 @@ func (client InterfacesClient) CreateOrUpdateResponder(resp *http.Response) (res return } -// Delete deletes the specified network interface. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// Delete deletes the specified network interface. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. networkInterfaceName is -// the name of the network interface. +// resourceGroupName is the name of the resource group. networkInterfaceName is the name of the network interface. func (client InterfacesClient) Delete(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -139,8 +134,10 @@ func (client InterfacesClient) Delete(resourceGroupName string, networkInterface var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -173,7 +170,7 @@ func (client InterfacesClient) DeletePreparer(resourceGroupName string, networkI "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -208,8 +205,8 @@ func (client InterfacesClient) DeleteResponder(resp *http.Response) (result auto // Get gets information about the specified network interface. // -// resourceGroupName is the name of the resource group. networkInterfaceName is -// the name of the network interface. expand is expands referenced resources. +// resourceGroupName is the name of the resource group. networkInterfaceName is the name of the network interface. +// expand is expands referenced resources. func (client InterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result Interface, err error) { req, err := client.GetPreparer(resourceGroupName, networkInterfaceName, expand) if err != nil { @@ -240,7 +237,7 @@ func (client InterfacesClient) GetPreparer(resourceGroupName string, networkInte "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -275,13 +272,11 @@ func (client InterfacesClient) GetResponder(resp *http.Response) (result Interfa return } -// GetEffectiveRouteTable gets all route tables applied to a network interface. -// This method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and any +// GetEffectiveRouteTable gets all route tables applied to a network interface. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any // outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. networkInterfaceName is -// the name of the network interface. +// resourceGroupName is the name of the resource group. networkInterfaceName is the name of the network interface. func (client InterfacesClient) GetEffectiveRouteTable(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (<-chan EffectiveRouteListResult, <-chan error) { resultChan := make(chan EffectiveRouteListResult, 1) errChan := make(chan error, 1) @@ -289,8 +284,10 @@ func (client InterfacesClient) GetEffectiveRouteTable(resourceGroupName string, var err error var result EffectiveRouteListResult defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -323,7 +320,7 @@ func (client InterfacesClient) GetEffectiveRouteTablePreparer(resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -357,13 +354,11 @@ func (client InterfacesClient) GetEffectiveRouteTableResponder(resp *http.Respon return } -// GetVirtualMachineScaleSetNetworkInterface get the specified network -// interface in a virtual machine scale set. +// GetVirtualMachineScaleSetNetworkInterface get the specified network interface in a virtual machine scale set. // -// resourceGroupName is the name of the resource group. -// virtualMachineScaleSetName is the name of the virtual machine scale set. -// virtualmachineIndex is the virtual machine index. networkInterfaceName is -// the name of the network interface. expand is expands referenced resources. +// resourceGroupName is the name of the resource group. virtualMachineScaleSetName is the name of the virtual machine +// scale set. virtualmachineIndex is the virtual machine index. networkInterfaceName is the name of the network +// interface. expand is expands referenced resources. func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result Interface, err error) { req, err := client.GetVirtualMachineScaleSetNetworkInterfacePreparer(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand) if err != nil { @@ -396,7 +391,7 @@ func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfacePreparer "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), } - const APIVersion = "2016-09-01" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -463,7 +458,7 @@ func (client InterfacesClient) ListPreparer(resourceGroupName string) (*http.Req "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -519,6 +514,51 @@ func (client InterfacesClient) ListNextResults(lastResults InterfaceListResult) return } +// ListComplete gets all elements from the list without paging. +func (client InterfacesClient) ListComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan Interface, <-chan error) { + resultChan := make(chan Interface) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + // ListAll gets all network interfaces in a subscription. func (client InterfacesClient) ListAll() (result InterfaceListResult, err error) { req, err := client.ListAllPreparer() @@ -548,7 +588,7 @@ func (client InterfacesClient) ListAllPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -604,13 +644,56 @@ func (client InterfacesClient) ListAllNextResults(lastResults InterfaceListResul return } -// ListEffectiveNetworkSecurityGroups gets all network security groups applied -// to a network interface. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used to +// ListAllComplete gets all elements from the list without paging. +func (client InterfacesClient) ListAllComplete(cancel <-chan struct{}) (<-chan Interface, <-chan error) { + resultChan := make(chan Interface) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListAll() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListAllNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListEffectiveNetworkSecurityGroups gets all network security groups applied to a network interface. This method may +// poll for completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to // cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. networkInterfaceName is -// the name of the network interface. +// resourceGroupName is the name of the resource group. networkInterfaceName is the name of the network interface. func (client InterfacesClient) ListEffectiveNetworkSecurityGroups(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (<-chan EffectiveNetworkSecurityGroupListResult, <-chan error) { resultChan := make(chan EffectiveNetworkSecurityGroupListResult, 1) errChan := make(chan error, 1) @@ -618,8 +701,10 @@ func (client InterfacesClient) ListEffectiveNetworkSecurityGroups(resourceGroupN var err error var result EffectiveNetworkSecurityGroupListResult defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -652,7 +737,7 @@ func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsPreparer(resour "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -686,11 +771,10 @@ func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsResponder(resp return } -// ListVirtualMachineScaleSetNetworkInterfaces gets all network interfaces in a -// virtual machine scale set. +// ListVirtualMachineScaleSetNetworkInterfaces gets all network interfaces in a virtual machine scale set. // -// resourceGroupName is the name of the resource group. -// virtualMachineScaleSetName is the name of the virtual machine scale set. +// resourceGroupName is the name of the resource group. virtualMachineScaleSetName is the name of the virtual machine +// scale set. func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfaces(resourceGroupName string, virtualMachineScaleSetName string) (result InterfaceListResult, err error) { req, err := client.ListVirtualMachineScaleSetNetworkInterfacesPreparer(resourceGroupName, virtualMachineScaleSetName) if err != nil { @@ -721,7 +805,7 @@ func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesPrepar "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), } - const APIVersion = "2016-09-01" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -777,12 +861,56 @@ func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesNextRe return } -// ListVirtualMachineScaleSetVMNetworkInterfaces gets information about all -// network interfaces in a virtual machine in a virtual machine scale set. +// ListVirtualMachineScaleSetNetworkInterfacesComplete gets all elements from the list without paging. +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesComplete(resourceGroupName string, virtualMachineScaleSetName string, cancel <-chan struct{}) (<-chan Interface, <-chan error) { + resultChan := make(chan Interface) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListVirtualMachineScaleSetNetworkInterfaces(resourceGroupName, virtualMachineScaleSetName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListVirtualMachineScaleSetNetworkInterfacesNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListVirtualMachineScaleSetVMNetworkInterfaces gets information about all network interfaces in a virtual machine in +// a virtual machine scale set. // -// resourceGroupName is the name of the resource group. -// virtualMachineScaleSetName is the name of the virtual machine scale set. -// virtualmachineIndex is the virtual machine index. +// resourceGroupName is the name of the resource group. virtualMachineScaleSetName is the name of the virtual machine +// scale set. virtualmachineIndex is the virtual machine index. func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfaces(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (result InterfaceListResult, err error) { req, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesPreparer(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex) if err != nil { @@ -814,7 +942,7 @@ func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesPrep "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), } - const APIVersion = "2016-09-01" + const APIVersion = "2017-03-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -869,3 +997,48 @@ func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesNext return } + +// ListVirtualMachineScaleSetVMNetworkInterfacesComplete gets all elements from the list without paging. +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesComplete(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, cancel <-chan struct{}) (<-chan Interface, <-chan error) { + resultChan := make(chan Interface) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListVirtualMachineScaleSetVMNetworkInterfaces(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerbackendaddresspools.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerbackendaddresspools.go new file mode 100644 index 000000000000..de14c3556084 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerbackendaddresspools.go @@ -0,0 +1,241 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// LoadBalancerBackendAddressPoolsClient is the network Client +type LoadBalancerBackendAddressPoolsClient struct { + ManagementClient +} + +// NewLoadBalancerBackendAddressPoolsClient creates an instance of the LoadBalancerBackendAddressPoolsClient client. +func NewLoadBalancerBackendAddressPoolsClient(subscriptionID string) LoadBalancerBackendAddressPoolsClient { + return NewLoadBalancerBackendAddressPoolsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLoadBalancerBackendAddressPoolsClientWithBaseURI creates an instance of the LoadBalancerBackendAddressPoolsClient +// client. +func NewLoadBalancerBackendAddressPoolsClientWithBaseURI(baseURI string, subscriptionID string) LoadBalancerBackendAddressPoolsClient { + return LoadBalancerBackendAddressPoolsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets load balancer backend address pool. +// +// resourceGroupName is the name of the resource group. loadBalancerName is the name of the load balancer. +// backendAddressPoolName is the name of the backend address pool. +func (client LoadBalancerBackendAddressPoolsClient) Get(resourceGroupName string, loadBalancerName string, backendAddressPoolName string) (result BackendAddressPool, err error) { + req, err := client.GetPreparer(resourceGroupName, loadBalancerName, backendAddressPoolName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerBackendAddressPoolsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.LoadBalancerBackendAddressPoolsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerBackendAddressPoolsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client LoadBalancerBackendAddressPoolsClient) GetPreparer(resourceGroupName string, loadBalancerName string, backendAddressPoolName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "backendAddressPoolName": autorest.Encode("path", backendAddressPoolName), + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancerBackendAddressPoolsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client LoadBalancerBackendAddressPoolsClient) GetResponder(resp *http.Response) (result BackendAddressPool, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all the load balancer backed address pools. +// +// resourceGroupName is the name of the resource group. loadBalancerName is the name of the load balancer. +func (client LoadBalancerBackendAddressPoolsClient) List(resourceGroupName string, loadBalancerName string) (result LoadBalancerBackendAddressPoolListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, loadBalancerName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerBackendAddressPoolsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.LoadBalancerBackendAddressPoolsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerBackendAddressPoolsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client LoadBalancerBackendAddressPoolsClient) ListPreparer(resourceGroupName string, loadBalancerName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancerBackendAddressPoolsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client LoadBalancerBackendAddressPoolsClient) ListResponder(resp *http.Response) (result LoadBalancerBackendAddressPoolListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client LoadBalancerBackendAddressPoolsClient) ListNextResults(lastResults LoadBalancerBackendAddressPoolListResult) (result LoadBalancerBackendAddressPoolListResult, err error) { + req, err := lastResults.LoadBalancerBackendAddressPoolListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LoadBalancerBackendAddressPoolsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.LoadBalancerBackendAddressPoolsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerBackendAddressPoolsClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListComplete gets all elements from the list without paging. +func (client LoadBalancerBackendAddressPoolsClient) ListComplete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan BackendAddressPool, <-chan error) { + resultChan := make(chan BackendAddressPool) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName, loadBalancerName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerfrontendipconfigurations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerfrontendipconfigurations.go new file mode 100644 index 000000000000..515b875a1831 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerfrontendipconfigurations.go @@ -0,0 +1,242 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// LoadBalancerFrontendIPConfigurationsClient is the network Client +type LoadBalancerFrontendIPConfigurationsClient struct { + ManagementClient +} + +// NewLoadBalancerFrontendIPConfigurationsClient creates an instance of the LoadBalancerFrontendIPConfigurationsClient +// client. +func NewLoadBalancerFrontendIPConfigurationsClient(subscriptionID string) LoadBalancerFrontendIPConfigurationsClient { + return NewLoadBalancerFrontendIPConfigurationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLoadBalancerFrontendIPConfigurationsClientWithBaseURI creates an instance of the +// LoadBalancerFrontendIPConfigurationsClient client. +func NewLoadBalancerFrontendIPConfigurationsClientWithBaseURI(baseURI string, subscriptionID string) LoadBalancerFrontendIPConfigurationsClient { + return LoadBalancerFrontendIPConfigurationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets load balancer frontend IP configuration. +// +// resourceGroupName is the name of the resource group. loadBalancerName is the name of the load balancer. +// frontendIPConfigurationName is the name of the frontend IP configuration. +func (client LoadBalancerFrontendIPConfigurationsClient) Get(resourceGroupName string, loadBalancerName string, frontendIPConfigurationName string) (result FrontendIPConfiguration, err error) { + req, err := client.GetPreparer(resourceGroupName, loadBalancerName, frontendIPConfigurationName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerFrontendIPConfigurationsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.LoadBalancerFrontendIPConfigurationsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerFrontendIPConfigurationsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client LoadBalancerFrontendIPConfigurationsClient) GetPreparer(resourceGroupName string, loadBalancerName string, frontendIPConfigurationName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "frontendIPConfigurationName": autorest.Encode("path", frontendIPConfigurationName), + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/frontendIPConfigurations/{frontendIPConfigurationName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancerFrontendIPConfigurationsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client LoadBalancerFrontendIPConfigurationsClient) GetResponder(resp *http.Response) (result FrontendIPConfiguration, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all the load balancer frontend IP configurations. +// +// resourceGroupName is the name of the resource group. loadBalancerName is the name of the load balancer. +func (client LoadBalancerFrontendIPConfigurationsClient) List(resourceGroupName string, loadBalancerName string) (result LoadBalancerFrontendIPConfigurationListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, loadBalancerName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerFrontendIPConfigurationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.LoadBalancerFrontendIPConfigurationsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerFrontendIPConfigurationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client LoadBalancerFrontendIPConfigurationsClient) ListPreparer(resourceGroupName string, loadBalancerName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/frontendIPConfigurations", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancerFrontendIPConfigurationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client LoadBalancerFrontendIPConfigurationsClient) ListResponder(resp *http.Response) (result LoadBalancerFrontendIPConfigurationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client LoadBalancerFrontendIPConfigurationsClient) ListNextResults(lastResults LoadBalancerFrontendIPConfigurationListResult) (result LoadBalancerFrontendIPConfigurationListResult, err error) { + req, err := lastResults.LoadBalancerFrontendIPConfigurationListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LoadBalancerFrontendIPConfigurationsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.LoadBalancerFrontendIPConfigurationsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerFrontendIPConfigurationsClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListComplete gets all elements from the list without paging. +func (client LoadBalancerFrontendIPConfigurationsClient) ListComplete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan FrontendIPConfiguration, <-chan error) { + resultChan := make(chan FrontendIPConfiguration) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName, loadBalancerName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerloadbalancingrules.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerloadbalancingrules.go new file mode 100644 index 000000000000..2ecd6be0d4f0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerloadbalancingrules.go @@ -0,0 +1,241 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// LoadBalancerLoadBalancingRulesClient is the network Client +type LoadBalancerLoadBalancingRulesClient struct { + ManagementClient +} + +// NewLoadBalancerLoadBalancingRulesClient creates an instance of the LoadBalancerLoadBalancingRulesClient client. +func NewLoadBalancerLoadBalancingRulesClient(subscriptionID string) LoadBalancerLoadBalancingRulesClient { + return NewLoadBalancerLoadBalancingRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLoadBalancerLoadBalancingRulesClientWithBaseURI creates an instance of the LoadBalancerLoadBalancingRulesClient +// client. +func NewLoadBalancerLoadBalancingRulesClientWithBaseURI(baseURI string, subscriptionID string) LoadBalancerLoadBalancingRulesClient { + return LoadBalancerLoadBalancingRulesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets the specified load balancer load balancing rule. +// +// resourceGroupName is the name of the resource group. loadBalancerName is the name of the load balancer. +// loadBalancingRuleName is the name of the load balancing rule. +func (client LoadBalancerLoadBalancingRulesClient) Get(resourceGroupName string, loadBalancerName string, loadBalancingRuleName string) (result LoadBalancingRule, err error) { + req, err := client.GetPreparer(resourceGroupName, loadBalancerName, loadBalancingRuleName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerLoadBalancingRulesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.LoadBalancerLoadBalancingRulesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerLoadBalancingRulesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client LoadBalancerLoadBalancingRulesClient) GetPreparer(resourceGroupName string, loadBalancerName string, loadBalancingRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "loadBalancingRuleName": autorest.Encode("path", loadBalancingRuleName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/loadBalancingRules/{loadBalancingRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancerLoadBalancingRulesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client LoadBalancerLoadBalancingRulesClient) GetResponder(resp *http.Response) (result LoadBalancingRule, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all the load balancing rules in a load balancer. +// +// resourceGroupName is the name of the resource group. loadBalancerName is the name of the load balancer. +func (client LoadBalancerLoadBalancingRulesClient) List(resourceGroupName string, loadBalancerName string) (result LoadBalancerLoadBalancingRuleListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, loadBalancerName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerLoadBalancingRulesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.LoadBalancerLoadBalancingRulesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerLoadBalancingRulesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client LoadBalancerLoadBalancingRulesClient) ListPreparer(resourceGroupName string, loadBalancerName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/loadBalancingRules", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancerLoadBalancingRulesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client LoadBalancerLoadBalancingRulesClient) ListResponder(resp *http.Response) (result LoadBalancerLoadBalancingRuleListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client LoadBalancerLoadBalancingRulesClient) ListNextResults(lastResults LoadBalancerLoadBalancingRuleListResult) (result LoadBalancerLoadBalancingRuleListResult, err error) { + req, err := lastResults.LoadBalancerLoadBalancingRuleListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LoadBalancerLoadBalancingRulesClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.LoadBalancerLoadBalancingRulesClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerLoadBalancingRulesClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListComplete gets all elements from the list without paging. +func (client LoadBalancerLoadBalancingRulesClient) ListComplete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan LoadBalancingRule, <-chan error) { + resultChan := make(chan LoadBalancingRule) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName, loadBalancerName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancernetworkinterfaces.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancernetworkinterfaces.go new file mode 100644 index 000000000000..35650354255b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancernetworkinterfaces.go @@ -0,0 +1,174 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// LoadBalancerNetworkInterfacesClient is the network Client +type LoadBalancerNetworkInterfacesClient struct { + ManagementClient +} + +// NewLoadBalancerNetworkInterfacesClient creates an instance of the LoadBalancerNetworkInterfacesClient client. +func NewLoadBalancerNetworkInterfacesClient(subscriptionID string) LoadBalancerNetworkInterfacesClient { + return NewLoadBalancerNetworkInterfacesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLoadBalancerNetworkInterfacesClientWithBaseURI creates an instance of the LoadBalancerNetworkInterfacesClient +// client. +func NewLoadBalancerNetworkInterfacesClientWithBaseURI(baseURI string, subscriptionID string) LoadBalancerNetworkInterfacesClient { + return LoadBalancerNetworkInterfacesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets associated load balancer network interfaces. +// +// resourceGroupName is the name of the resource group. loadBalancerName is the name of the load balancer. +func (client LoadBalancerNetworkInterfacesClient) List(resourceGroupName string, loadBalancerName string) (result InterfaceListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, loadBalancerName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerNetworkInterfacesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.LoadBalancerNetworkInterfacesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerNetworkInterfacesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client LoadBalancerNetworkInterfacesClient) ListPreparer(resourceGroupName string, loadBalancerName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/networkInterfaces", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancerNetworkInterfacesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client LoadBalancerNetworkInterfacesClient) ListResponder(resp *http.Response) (result InterfaceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client LoadBalancerNetworkInterfacesClient) ListNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) { + req, err := lastResults.InterfaceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LoadBalancerNetworkInterfacesClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.LoadBalancerNetworkInterfacesClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerNetworkInterfacesClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListComplete gets all elements from the list without paging. +func (client LoadBalancerNetworkInterfacesClient) ListComplete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan Interface, <-chan error) { + resultChan := make(chan Interface) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName, loadBalancerName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerprobes.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerprobes.go new file mode 100644 index 000000000000..daee51e0f355 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerprobes.go @@ -0,0 +1,240 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// LoadBalancerProbesClient is the network Client +type LoadBalancerProbesClient struct { + ManagementClient +} + +// NewLoadBalancerProbesClient creates an instance of the LoadBalancerProbesClient client. +func NewLoadBalancerProbesClient(subscriptionID string) LoadBalancerProbesClient { + return NewLoadBalancerProbesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLoadBalancerProbesClientWithBaseURI creates an instance of the LoadBalancerProbesClient client. +func NewLoadBalancerProbesClientWithBaseURI(baseURI string, subscriptionID string) LoadBalancerProbesClient { + return LoadBalancerProbesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets load balancer probe. +// +// resourceGroupName is the name of the resource group. loadBalancerName is the name of the load balancer. probeName is +// the name of the probe. +func (client LoadBalancerProbesClient) Get(resourceGroupName string, loadBalancerName string, probeName string) (result Probe, err error) { + req, err := client.GetPreparer(resourceGroupName, loadBalancerName, probeName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerProbesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.LoadBalancerProbesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerProbesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client LoadBalancerProbesClient) GetPreparer(resourceGroupName string, loadBalancerName string, probeName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "probeName": autorest.Encode("path", probeName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancerProbesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client LoadBalancerProbesClient) GetResponder(resp *http.Response) (result Probe, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all the load balancer probes. +// +// resourceGroupName is the name of the resource group. loadBalancerName is the name of the load balancer. +func (client LoadBalancerProbesClient) List(resourceGroupName string, loadBalancerName string) (result LoadBalancerProbeListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, loadBalancerName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerProbesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.LoadBalancerProbesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerProbesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client LoadBalancerProbesClient) ListPreparer(resourceGroupName string, loadBalancerName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancerProbesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client LoadBalancerProbesClient) ListResponder(resp *http.Response) (result LoadBalancerProbeListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client LoadBalancerProbesClient) ListNextResults(lastResults LoadBalancerProbeListResult) (result LoadBalancerProbeListResult, err error) { + req, err := lastResults.LoadBalancerProbeListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LoadBalancerProbesClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.LoadBalancerProbesClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancerProbesClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListComplete gets all elements from the list without paging. +func (client LoadBalancerProbesClient) ListComplete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan Probe, <-chan error) { + resultChan := make(chan Probe) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName, loadBalancerName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go index 11d649b270a0..3a2528d85f4b 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,31 +23,26 @@ import ( "net/http" ) -// LoadBalancersClient is the composite Swagger for Network Client +// LoadBalancersClient is the network Client type LoadBalancersClient struct { ManagementClient } -// NewLoadBalancersClient creates an instance of the LoadBalancersClient -// client. +// NewLoadBalancersClient creates an instance of the LoadBalancersClient client. func NewLoadBalancersClient(subscriptionID string) LoadBalancersClient { return NewLoadBalancersClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewLoadBalancersClientWithBaseURI creates an instance of the -// LoadBalancersClient client. +// NewLoadBalancersClientWithBaseURI creates an instance of the LoadBalancersClient client. func NewLoadBalancersClientWithBaseURI(baseURI string, subscriptionID string) LoadBalancersClient { return LoadBalancersClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates a load balancer. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// CreateOrUpdate creates or updates a load balancer. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. loadBalancerName is the -// name of the load balancer. parameters is parameters supplied to the create -// or update load balancer operation. +// resourceGroupName is the name of the resource group. loadBalancerName is the name of the load balancer. parameters +// is parameters supplied to the create or update load balancer operation. func (client LoadBalancersClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters LoadBalancer, cancel <-chan struct{}) (<-chan LoadBalancer, <-chan error) { resultChan := make(chan LoadBalancer, 1) errChan := make(chan error, 1) @@ -56,8 +50,10 @@ func (client LoadBalancersClient) CreateOrUpdate(resourceGroupName string, loadB var err error var result LoadBalancer defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -90,7 +86,7 @@ func (client LoadBalancersClient) CreateOrUpdatePreparer(resourceGroupName strin "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -126,13 +122,10 @@ func (client LoadBalancersClient) CreateOrUpdateResponder(resp *http.Response) ( return } -// Delete deletes the specified load balancer. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// Delete deletes the specified load balancer. This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. loadBalancerName is the -// name of the load balancer. +// resourceGroupName is the name of the resource group. loadBalancerName is the name of the load balancer. func (client LoadBalancersClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -140,8 +133,10 @@ func (client LoadBalancersClient) Delete(resourceGroupName string, loadBalancerN var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -174,7 +169,7 @@ func (client LoadBalancersClient) DeletePreparer(resourceGroupName string, loadB "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -209,8 +204,8 @@ func (client LoadBalancersClient) DeleteResponder(resp *http.Response) (result a // Get gets the specified load balancer. // -// resourceGroupName is the name of the resource group. loadBalancerName is the -// name of the load balancer. expand is expands referenced resources. +// resourceGroupName is the name of the resource group. loadBalancerName is the name of the load balancer. expand is +// expands referenced resources. func (client LoadBalancersClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result LoadBalancer, err error) { req, err := client.GetPreparer(resourceGroupName, loadBalancerName, expand) if err != nil { @@ -241,7 +236,7 @@ func (client LoadBalancersClient) GetPreparer(resourceGroupName string, loadBala "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -308,7 +303,7 @@ func (client LoadBalancersClient) ListPreparer(resourceGroupName string) (*http. "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -364,6 +359,51 @@ func (client LoadBalancersClient) ListNextResults(lastResults LoadBalancerListRe return } +// ListComplete gets all elements from the list without paging. +func (client LoadBalancersClient) ListComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan LoadBalancer, <-chan error) { + resultChan := make(chan LoadBalancer) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + // ListAll gets all the load balancers in a subscription. func (client LoadBalancersClient) ListAll() (result LoadBalancerListResult, err error) { req, err := client.ListAllPreparer() @@ -393,7 +433,7 @@ func (client LoadBalancersClient) ListAllPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -448,3 +488,48 @@ func (client LoadBalancersClient) ListAllNextResults(lastResults LoadBalancerLis return } + +// ListAllComplete gets all elements from the list without paging. +func (client LoadBalancersClient) ListAllComplete(cancel <-chan struct{}) (<-chan LoadBalancer, <-chan error) { + resultChan := make(chan LoadBalancer) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListAll() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListAllNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go index c1e66076e198..64cdea20a72b 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -25,31 +24,27 @@ import ( "net/http" ) -// LocalNetworkGatewaysClient is the composite Swagger for Network Client +// LocalNetworkGatewaysClient is the network Client type LocalNetworkGatewaysClient struct { ManagementClient } -// NewLocalNetworkGatewaysClient creates an instance of the -// LocalNetworkGatewaysClient client. +// NewLocalNetworkGatewaysClient creates an instance of the LocalNetworkGatewaysClient client. func NewLocalNetworkGatewaysClient(subscriptionID string) LocalNetworkGatewaysClient { return NewLocalNetworkGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewLocalNetworkGatewaysClientWithBaseURI creates an instance of the -// LocalNetworkGatewaysClient client. +// NewLocalNetworkGatewaysClientWithBaseURI creates an instance of the LocalNetworkGatewaysClient client. func NewLocalNetworkGatewaysClientWithBaseURI(baseURI string, subscriptionID string) LocalNetworkGatewaysClient { return LocalNetworkGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates a local network gateway in the specified -// resource group. This method may poll for completion. Polling can be canceled -// by passing the cancel channel argument. The channel will be used to cancel +// CreateOrUpdate creates or updates a local network gateway in the specified resource group. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel // polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. localNetworkGatewayName -// is the name of the local network gateway. parameters is parameters supplied -// to the create or update local network gateway operation. +// resourceGroupName is the name of the resource group. localNetworkGatewayName is the name of the local network +// gateway. parameters is parameters supplied to the create or update local network gateway operation. func (client LocalNetworkGatewaysClient) CreateOrUpdate(resourceGroupName string, localNetworkGatewayName string, parameters LocalNetworkGateway, cancel <-chan struct{}) (<-chan LocalNetworkGateway, <-chan error) { resultChan := make(chan LocalNetworkGateway, 1) errChan := make(chan error, 1) @@ -68,8 +63,10 @@ func (client LocalNetworkGatewaysClient) CreateOrUpdate(resourceGroupName string var err error var result LocalNetworkGateway defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -102,7 +99,7 @@ func (client LocalNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -138,13 +135,11 @@ func (client LocalNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Resp return } -// Delete deletes the specified local network gateway. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// Delete deletes the specified local network gateway. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. localNetworkGatewayName -// is the name of the local network gateway. +// resourceGroupName is the name of the resource group. localNetworkGatewayName is the name of the local network +// gateway. func (client LocalNetworkGatewaysClient) Delete(resourceGroupName string, localNetworkGatewayName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -161,8 +156,10 @@ func (client LocalNetworkGatewaysClient) Delete(resourceGroupName string, localN var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -195,7 +192,7 @@ func (client LocalNetworkGatewaysClient) DeletePreparer(resourceGroupName string "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -230,8 +227,8 @@ func (client LocalNetworkGatewaysClient) DeleteResponder(resp *http.Response) (r // Get gets the specified local network gateway in a resource group. // -// resourceGroupName is the name of the resource group. localNetworkGatewayName -// is the name of the local network gateway. +// resourceGroupName is the name of the resource group. localNetworkGatewayName is the name of the local network +// gateway. func (client LocalNetworkGatewaysClient) Get(resourceGroupName string, localNetworkGatewayName string) (result LocalNetworkGateway, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: localNetworkGatewayName, @@ -268,7 +265,7 @@ func (client LocalNetworkGatewaysClient) GetPreparer(resourceGroupName string, l "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -332,7 +329,7 @@ func (client LocalNetworkGatewaysClient) ListPreparer(resourceGroupName string) "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -387,3 +384,48 @@ func (client LocalNetworkGatewaysClient) ListNextResults(lastResults LocalNetwor return } + +// ListComplete gets all elements from the list without paging. +func (client LocalNetworkGatewaysClient) ListComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan LocalNetworkGateway, <-chan error) { + resultChan := make(chan LocalNetworkGateway) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go index 505691db01b3..8e083f3669c6 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -35,75 +34,58 @@ const ( Deny Access = "Deny" ) -// ApplicationGatewayBackendHealthServerHealth enumerates the values for -// application gateway backend health server health. +// ApplicationGatewayBackendHealthServerHealth enumerates the values for application gateway backend health server +// health. type ApplicationGatewayBackendHealthServerHealth string const ( - // Down specifies the down state for application gateway backend health - // server health. + // Down specifies the down state for application gateway backend health server health. Down ApplicationGatewayBackendHealthServerHealth = "Down" - // Draining specifies the draining state for application gateway backend - // health server health. + // Draining specifies the draining state for application gateway backend health server health. Draining ApplicationGatewayBackendHealthServerHealth = "Draining" - // Partial specifies the partial state for application gateway backend - // health server health. + // Partial specifies the partial state for application gateway backend health server health. Partial ApplicationGatewayBackendHealthServerHealth = "Partial" - // Unknown specifies the unknown state for application gateway backend - // health server health. + // Unknown specifies the unknown state for application gateway backend health server health. Unknown ApplicationGatewayBackendHealthServerHealth = "Unknown" - // Up specifies the up state for application gateway backend health server - // health. + // Up specifies the up state for application gateway backend health server health. Up ApplicationGatewayBackendHealthServerHealth = "Up" ) -// ApplicationGatewayCookieBasedAffinity enumerates the values for application -// gateway cookie based affinity. +// ApplicationGatewayCookieBasedAffinity enumerates the values for application gateway cookie based affinity. type ApplicationGatewayCookieBasedAffinity string const ( - // Disabled specifies the disabled state for application gateway cookie - // based affinity. + // Disabled specifies the disabled state for application gateway cookie based affinity. Disabled ApplicationGatewayCookieBasedAffinity = "Disabled" - // Enabled specifies the enabled state for application gateway cookie based - // affinity. + // Enabled specifies the enabled state for application gateway cookie based affinity. Enabled ApplicationGatewayCookieBasedAffinity = "Enabled" ) -// ApplicationGatewayFirewallMode enumerates the values for application gateway -// firewall mode. +// ApplicationGatewayFirewallMode enumerates the values for application gateway firewall mode. type ApplicationGatewayFirewallMode string const ( - // Detection specifies the detection state for application gateway firewall - // mode. + // Detection specifies the detection state for application gateway firewall mode. Detection ApplicationGatewayFirewallMode = "Detection" - // Prevention specifies the prevention state for application gateway - // firewall mode. + // Prevention specifies the prevention state for application gateway firewall mode. Prevention ApplicationGatewayFirewallMode = "Prevention" ) -// ApplicationGatewayOperationalState enumerates the values for application -// gateway operational state. +// ApplicationGatewayOperationalState enumerates the values for application gateway operational state. type ApplicationGatewayOperationalState string const ( - // Running specifies the running state for application gateway operational - // state. + // Running specifies the running state for application gateway operational state. Running ApplicationGatewayOperationalState = "Running" - // Starting specifies the starting state for application gateway - // operational state. + // Starting specifies the starting state for application gateway operational state. Starting ApplicationGatewayOperationalState = "Starting" - // Stopped specifies the stopped state for application gateway operational - // state. + // Stopped specifies the stopped state for application gateway operational state. Stopped ApplicationGatewayOperationalState = "Stopped" - // Stopping specifies the stopping state for application gateway - // operational state. + // Stopping specifies the stopping state for application gateway operational state. Stopping ApplicationGatewayOperationalState = "Stopping" ) -// ApplicationGatewayProtocol enumerates the values for application gateway -// protocol. +// ApplicationGatewayProtocol enumerates the values for application gateway protocol. type ApplicationGatewayProtocol string const ( @@ -113,53 +95,147 @@ const ( HTTPS ApplicationGatewayProtocol = "Https" ) -// ApplicationGatewayRequestRoutingRuleType enumerates the values for -// application gateway request routing rule type. +// ApplicationGatewayRedirectType enumerates the values for application gateway redirect type. +type ApplicationGatewayRedirectType string + +const ( + // Found specifies the found state for application gateway redirect type. + Found ApplicationGatewayRedirectType = "Found" + // Permanent specifies the permanent state for application gateway redirect type. + Permanent ApplicationGatewayRedirectType = "Permanent" + // SeeOther specifies the see other state for application gateway redirect type. + SeeOther ApplicationGatewayRedirectType = "SeeOther" + // Temporary specifies the temporary state for application gateway redirect type. + Temporary ApplicationGatewayRedirectType = "Temporary" +) + +// ApplicationGatewayRequestRoutingRuleType enumerates the values for application gateway request routing rule type. type ApplicationGatewayRequestRoutingRuleType string const ( - // Basic specifies the basic state for application gateway request routing - // rule type. + // Basic specifies the basic state for application gateway request routing rule type. Basic ApplicationGatewayRequestRoutingRuleType = "Basic" - // PathBasedRouting specifies the path based routing state for application - // gateway request routing rule type. + // PathBasedRouting specifies the path based routing state for application gateway request routing rule type. PathBasedRouting ApplicationGatewayRequestRoutingRuleType = "PathBasedRouting" ) -// ApplicationGatewaySkuName enumerates the values for application gateway sku -// name. +// ApplicationGatewaySkuName enumerates the values for application gateway sku name. type ApplicationGatewaySkuName string const ( - // StandardLarge specifies the standard large state for application gateway - // sku name. + // StandardLarge specifies the standard large state for application gateway sku name. StandardLarge ApplicationGatewaySkuName = "Standard_Large" - // StandardMedium specifies the standard medium state for application - // gateway sku name. + // StandardMedium specifies the standard medium state for application gateway sku name. StandardMedium ApplicationGatewaySkuName = "Standard_Medium" - // StandardSmall specifies the standard small state for application gateway - // sku name. + // StandardSmall specifies the standard small state for application gateway sku name. StandardSmall ApplicationGatewaySkuName = "Standard_Small" // WAFLarge specifies the waf large state for application gateway sku name. WAFLarge ApplicationGatewaySkuName = "WAF_Large" - // WAFMedium specifies the waf medium state for application gateway sku - // name. + // WAFMedium specifies the waf medium state for application gateway sku name. WAFMedium ApplicationGatewaySkuName = "WAF_Medium" ) -// ApplicationGatewaySslProtocol enumerates the values for application gateway -// ssl protocol. +// ApplicationGatewaySslCipherSuite enumerates the values for application gateway ssl cipher suite. +type ApplicationGatewaySslCipherSuite string + +const ( + // TLSDHEDSSWITHAES128CBCSHA specifies the tlsdhedsswithaes128cbcsha state for application gateway ssl cipher suite. + TLSDHEDSSWITHAES128CBCSHA ApplicationGatewaySslCipherSuite = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA" + // TLSDHEDSSWITHAES128CBCSHA256 specifies the tlsdhedsswithaes128cbcsha256 state for application gateway ssl cipher + // suite. + TLSDHEDSSWITHAES128CBCSHA256 ApplicationGatewaySslCipherSuite = "TLS_DHE_DSS_WITH_AES_128_CBC_SHA256" + // TLSDHEDSSWITHAES256CBCSHA specifies the tlsdhedsswithaes256cbcsha state for application gateway ssl cipher suite. + TLSDHEDSSWITHAES256CBCSHA ApplicationGatewaySslCipherSuite = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA" + // TLSDHEDSSWITHAES256CBCSHA256 specifies the tlsdhedsswithaes256cbcsha256 state for application gateway ssl cipher + // suite. + TLSDHEDSSWITHAES256CBCSHA256 ApplicationGatewaySslCipherSuite = "TLS_DHE_DSS_WITH_AES_256_CBC_SHA256" + // TLSDHERSAWITHAES128CBCSHA specifies the tlsdhersawithaes128cbcsha state for application gateway ssl cipher suite. + TLSDHERSAWITHAES128CBCSHA ApplicationGatewaySslCipherSuite = "TLS_DHE_RSA_WITH_AES_128_CBC_SHA" + // TLSDHERSAWITHAES128GCMSHA256 specifies the tlsdhersawithaes128gcmsha256 state for application gateway ssl cipher + // suite. + TLSDHERSAWITHAES128GCMSHA256 ApplicationGatewaySslCipherSuite = "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256" + // TLSDHERSAWITHAES256CBCSHA specifies the tlsdhersawithaes256cbcsha state for application gateway ssl cipher suite. + TLSDHERSAWITHAES256CBCSHA ApplicationGatewaySslCipherSuite = "TLS_DHE_RSA_WITH_AES_256_CBC_SHA" + // TLSDHERSAWITHAES256GCMSHA384 specifies the tlsdhersawithaes256gcmsha384 state for application gateway ssl cipher + // suite. + TLSDHERSAWITHAES256GCMSHA384 ApplicationGatewaySslCipherSuite = "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384" + // TLSECDHEECDSAWITHAES128CBCSHA specifies the tlsecdheecdsawithaes128cbcsha state for application gateway ssl cipher + // suite. + TLSECDHEECDSAWITHAES128CBCSHA ApplicationGatewaySslCipherSuite = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA" + // TLSECDHEECDSAWITHAES128CBCSHA256 specifies the tlsecdheecdsawithaes128cbcsha256 state for application gateway ssl + // cipher suite. + TLSECDHEECDSAWITHAES128CBCSHA256 ApplicationGatewaySslCipherSuite = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256" + // TLSECDHEECDSAWITHAES128GCMSHA256 specifies the tlsecdheecdsawithaes128gcmsha256 state for application gateway ssl + // cipher suite. + TLSECDHEECDSAWITHAES128GCMSHA256 ApplicationGatewaySslCipherSuite = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256" + // TLSECDHEECDSAWITHAES256CBCSHA specifies the tlsecdheecdsawithaes256cbcsha state for application gateway ssl cipher + // suite. + TLSECDHEECDSAWITHAES256CBCSHA ApplicationGatewaySslCipherSuite = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA" + // TLSECDHEECDSAWITHAES256CBCSHA384 specifies the tlsecdheecdsawithaes256cbcsha384 state for application gateway ssl + // cipher suite. + TLSECDHEECDSAWITHAES256CBCSHA384 ApplicationGatewaySslCipherSuite = "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384" + // TLSECDHEECDSAWITHAES256GCMSHA384 specifies the tlsecdheecdsawithaes256gcmsha384 state for application gateway ssl + // cipher suite. + TLSECDHEECDSAWITHAES256GCMSHA384 ApplicationGatewaySslCipherSuite = "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + // TLSECDHERSAWITHAES128CBCSHA specifies the tlsecdhersawithaes128cbcsha state for application gateway ssl cipher + // suite. + TLSECDHERSAWITHAES128CBCSHA ApplicationGatewaySslCipherSuite = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA" + // TLSECDHERSAWITHAES128CBCSHA256 specifies the tlsecdhersawithaes128cbcsha256 state for application gateway ssl cipher + // suite. + TLSECDHERSAWITHAES128CBCSHA256 ApplicationGatewaySslCipherSuite = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" + // TLSECDHERSAWITHAES256CBCSHA specifies the tlsecdhersawithaes256cbcsha state for application gateway ssl cipher + // suite. + TLSECDHERSAWITHAES256CBCSHA ApplicationGatewaySslCipherSuite = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA" + // TLSECDHERSAWITHAES256CBCSHA384 specifies the tlsecdhersawithaes256cbcsha384 state for application gateway ssl cipher + // suite. + TLSECDHERSAWITHAES256CBCSHA384 ApplicationGatewaySslCipherSuite = "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384" + // TLSRSAWITH3DESEDECBCSHA specifies the tlsrsawith3desedecbcsha state for application gateway ssl cipher suite. + TLSRSAWITH3DESEDECBCSHA ApplicationGatewaySslCipherSuite = "TLS_RSA_WITH_3DES_EDE_CBC_SHA" + // TLSRSAWITHAES128CBCSHA specifies the tlsrsawithaes128cbcsha state for application gateway ssl cipher suite. + TLSRSAWITHAES128CBCSHA ApplicationGatewaySslCipherSuite = "TLS_RSA_WITH_AES_128_CBC_SHA" + // TLSRSAWITHAES128CBCSHA256 specifies the tlsrsawithaes128cbcsha256 state for application gateway ssl cipher suite. + TLSRSAWITHAES128CBCSHA256 ApplicationGatewaySslCipherSuite = "TLS_RSA_WITH_AES_128_CBC_SHA256" + // TLSRSAWITHAES128GCMSHA256 specifies the tlsrsawithaes128gcmsha256 state for application gateway ssl cipher suite. + TLSRSAWITHAES128GCMSHA256 ApplicationGatewaySslCipherSuite = "TLS_RSA_WITH_AES_128_GCM_SHA256" + // TLSRSAWITHAES256CBCSHA specifies the tlsrsawithaes256cbcsha state for application gateway ssl cipher suite. + TLSRSAWITHAES256CBCSHA ApplicationGatewaySslCipherSuite = "TLS_RSA_WITH_AES_256_CBC_SHA" + // TLSRSAWITHAES256CBCSHA256 specifies the tlsrsawithaes256cbcsha256 state for application gateway ssl cipher suite. + TLSRSAWITHAES256CBCSHA256 ApplicationGatewaySslCipherSuite = "TLS_RSA_WITH_AES_256_CBC_SHA256" + // TLSRSAWITHAES256GCMSHA384 specifies the tlsrsawithaes256gcmsha384 state for application gateway ssl cipher suite. + TLSRSAWITHAES256GCMSHA384 ApplicationGatewaySslCipherSuite = "TLS_RSA_WITH_AES_256_GCM_SHA384" +) + +// ApplicationGatewaySslPolicyName enumerates the values for application gateway ssl policy name. +type ApplicationGatewaySslPolicyName string + +const ( + // AppGwSslPolicy20150501 specifies the app gw ssl policy 20150501 state for application gateway ssl policy name. + AppGwSslPolicy20150501 ApplicationGatewaySslPolicyName = "AppGwSslPolicy20150501" + // AppGwSslPolicy20170401 specifies the app gw ssl policy 20170401 state for application gateway ssl policy name. + AppGwSslPolicy20170401 ApplicationGatewaySslPolicyName = "AppGwSslPolicy20170401" + // AppGwSslPolicy20170401S specifies the app gw ssl policy 20170401s state for application gateway ssl policy name. + AppGwSslPolicy20170401S ApplicationGatewaySslPolicyName = "AppGwSslPolicy20170401S" +) + +// ApplicationGatewaySslPolicyType enumerates the values for application gateway ssl policy type. +type ApplicationGatewaySslPolicyType string + +const ( + // Custom specifies the custom state for application gateway ssl policy type. + Custom ApplicationGatewaySslPolicyType = "Custom" + // Predefined specifies the predefined state for application gateway ssl policy type. + Predefined ApplicationGatewaySslPolicyType = "Predefined" +) + +// ApplicationGatewaySslProtocol enumerates the values for application gateway ssl protocol. type ApplicationGatewaySslProtocol string const ( - // TLSv10 specifies the tl sv 10 state for application gateway ssl - // protocol. + // TLSv10 specifies the tl sv 10 state for application gateway ssl protocol. TLSv10 ApplicationGatewaySslProtocol = "TLSv1_0" - // TLSv11 specifies the tl sv 11 state for application gateway ssl - // protocol. + // TLSv11 specifies the tl sv 11 state for application gateway ssl protocol. TLSv11 ApplicationGatewaySslProtocol = "TLSv1_1" - // TLSv12 specifies the tl sv 12 state for application gateway ssl - // protocol. + // TLSv12 specifies the tl sv 12 state for application gateway ssl protocol. TLSv12 ApplicationGatewaySslProtocol = "TLSv1_2" ) @@ -183,6 +259,16 @@ const ( Contains AssociationType = "Contains" ) +// AuthenticationMethod enumerates the values for authentication method. +type AuthenticationMethod string + +const ( + // EAPMSCHAPv2 specifies the eapmscha pv 2 state for authentication method. + EAPMSCHAPv2 AuthenticationMethod = "EAPMSCHAPv2" + // EAPTLS specifies the eaptls state for authentication method. + EAPTLS AuthenticationMethod = "EAPTLS" +) + // AuthorizationUseStatus enumerates the values for authorization use status. type AuthorizationUseStatus string @@ -197,23 +283,32 @@ const ( type BgpPeerState string const ( - // BgpPeerStateConnected specifies the bgp peer state connected state for - // bgp peer state. + // BgpPeerStateConnected specifies the bgp peer state connected state for bgp peer state. BgpPeerStateConnected BgpPeerState = "Connected" - // BgpPeerStateConnecting specifies the bgp peer state connecting state for - // bgp peer state. + // BgpPeerStateConnecting specifies the bgp peer state connecting state for bgp peer state. BgpPeerStateConnecting BgpPeerState = "Connecting" - // BgpPeerStateIdle specifies the bgp peer state idle state for bgp peer - // state. + // BgpPeerStateIdle specifies the bgp peer state idle state for bgp peer state. BgpPeerStateIdle BgpPeerState = "Idle" - // BgpPeerStateStopped specifies the bgp peer state stopped state for bgp - // peer state. + // BgpPeerStateStopped specifies the bgp peer state stopped state for bgp peer state. BgpPeerStateStopped BgpPeerState = "Stopped" - // BgpPeerStateUnknown specifies the bgp peer state unknown state for bgp - // peer state. + // BgpPeerStateUnknown specifies the bgp peer state unknown state for bgp peer state. BgpPeerStateUnknown BgpPeerState = "Unknown" ) +// ConnectionStatus enumerates the values for connection status. +type ConnectionStatus string + +const ( + // ConnectionStatusConnected specifies the connection status connected state for connection status. + ConnectionStatusConnected ConnectionStatus = "Connected" + // ConnectionStatusDegraded specifies the connection status degraded state for connection status. + ConnectionStatusDegraded ConnectionStatus = "Degraded" + // ConnectionStatusDisconnected specifies the connection status disconnected state for connection status. + ConnectionStatusDisconnected ConnectionStatus = "Disconnected" + // ConnectionStatusUnknown specifies the connection status unknown state for connection status. + ConnectionStatusUnknown ConnectionStatus = "Unknown" +) + // DhGroup enumerates the values for dh group. type DhGroup string @@ -250,17 +345,14 @@ const ( type EffectiveRouteSource string const ( - // EffectiveRouteSourceDefault specifies the effective route source default - // state for effective route source. + // EffectiveRouteSourceDefault specifies the effective route source default state for effective route source. EffectiveRouteSourceDefault EffectiveRouteSource = "Default" - // EffectiveRouteSourceUnknown specifies the effective route source unknown - // state for effective route source. + // EffectiveRouteSourceUnknown specifies the effective route source unknown state for effective route source. EffectiveRouteSourceUnknown EffectiveRouteSource = "Unknown" - // EffectiveRouteSourceUser specifies the effective route source user state - // for effective route source. + // EffectiveRouteSourceUser specifies the effective route source user state for effective route source. EffectiveRouteSourceUser EffectiveRouteSource = "User" - // EffectiveRouteSourceVirtualNetworkGateway specifies the effective route - // source virtual network gateway state for effective route source. + // EffectiveRouteSourceVirtualNetworkGateway specifies the effective route source virtual network gateway state for + // effective route source. EffectiveRouteSourceVirtualNetworkGateway EffectiveRouteSource = "VirtualNetworkGateway" ) @@ -274,79 +366,77 @@ const ( Invalid EffectiveRouteState = "Invalid" ) -// ExpressRouteCircuitPeeringAdvertisedPublicPrefixState enumerates the values -// for express route circuit peering advertised public prefix state. +// EffectiveSecurityRuleProtocol enumerates the values for effective security rule protocol. +type EffectiveSecurityRuleProtocol string + +const ( + // All specifies the all state for effective security rule protocol. + All EffectiveSecurityRuleProtocol = "All" + // TCP specifies the tcp state for effective security rule protocol. + TCP EffectiveSecurityRuleProtocol = "Tcp" + // UDP specifies the udp state for effective security rule protocol. + UDP EffectiveSecurityRuleProtocol = "Udp" +) + +// ExpressRouteCircuitPeeringAdvertisedPublicPrefixState enumerates the values for express route circuit peering +// advertised public prefix state. type ExpressRouteCircuitPeeringAdvertisedPublicPrefixState string const ( - // Configured specifies the configured state for express route circuit - // peering advertised public prefix state. + // Configured specifies the configured state for express route circuit peering advertised public prefix state. Configured ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "Configured" - // Configuring specifies the configuring state for express route circuit - // peering advertised public prefix state. + // Configuring specifies the configuring state for express route circuit peering advertised public prefix state. Configuring ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "Configuring" - // NotConfigured specifies the not configured state for express route - // circuit peering advertised public prefix state. + // NotConfigured specifies the not configured state for express route circuit peering advertised public prefix state. NotConfigured ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "NotConfigured" - // ValidationNeeded specifies the validation needed state for express route - // circuit peering advertised public prefix state. + // ValidationNeeded specifies the validation needed state for express route circuit peering advertised public prefix + // state. ValidationNeeded ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "ValidationNeeded" ) -// ExpressRouteCircuitPeeringState enumerates the values for express route -// circuit peering state. +// ExpressRouteCircuitPeeringState enumerates the values for express route circuit peering state. type ExpressRouteCircuitPeeringState string const ( - // ExpressRouteCircuitPeeringStateDisabled specifies the express route - // circuit peering state disabled state for express route circuit peering - // state. + // ExpressRouteCircuitPeeringStateDisabled specifies the express route circuit peering state disabled state for express + // route circuit peering state. ExpressRouteCircuitPeeringStateDisabled ExpressRouteCircuitPeeringState = "Disabled" - // ExpressRouteCircuitPeeringStateEnabled specifies the express route - // circuit peering state enabled state for express route circuit peering - // state. + // ExpressRouteCircuitPeeringStateEnabled specifies the express route circuit peering state enabled state for express + // route circuit peering state. ExpressRouteCircuitPeeringStateEnabled ExpressRouteCircuitPeeringState = "Enabled" ) -// ExpressRouteCircuitPeeringType enumerates the values for express route -// circuit peering type. +// ExpressRouteCircuitPeeringType enumerates the values for express route circuit peering type. type ExpressRouteCircuitPeeringType string const ( - // AzurePrivatePeering specifies the azure private peering state for - // express route circuit peering type. + // AzurePrivatePeering specifies the azure private peering state for express route circuit peering type. AzurePrivatePeering ExpressRouteCircuitPeeringType = "AzurePrivatePeering" - // AzurePublicPeering specifies the azure public peering state for express - // route circuit peering type. + // AzurePublicPeering specifies the azure public peering state for express route circuit peering type. AzurePublicPeering ExpressRouteCircuitPeeringType = "AzurePublicPeering" - // MicrosoftPeering specifies the microsoft peering state for express route - // circuit peering type. + // MicrosoftPeering specifies the microsoft peering state for express route circuit peering type. MicrosoftPeering ExpressRouteCircuitPeeringType = "MicrosoftPeering" ) -// ExpressRouteCircuitSkuFamily enumerates the values for express route circuit -// sku family. +// ExpressRouteCircuitSkuFamily enumerates the values for express route circuit sku family. type ExpressRouteCircuitSkuFamily string const ( - // MeteredData specifies the metered data state for express route circuit - // sku family. + // MeteredData specifies the metered data state for express route circuit sku family. MeteredData ExpressRouteCircuitSkuFamily = "MeteredData" - // UnlimitedData specifies the unlimited data state for express route - // circuit sku family. + // UnlimitedData specifies the unlimited data state for express route circuit sku family. UnlimitedData ExpressRouteCircuitSkuFamily = "UnlimitedData" ) -// ExpressRouteCircuitSkuTier enumerates the values for express route circuit -// sku tier. +// ExpressRouteCircuitSkuTier enumerates the values for express route circuit sku tier. type ExpressRouteCircuitSkuTier string const ( - // ExpressRouteCircuitSkuTierPremium specifies the express route circuit - // sku tier premium state for express route circuit sku tier. + // ExpressRouteCircuitSkuTierPremium specifies the express route circuit sku tier premium state for express route + // circuit sku tier. ExpressRouteCircuitSkuTierPremium ExpressRouteCircuitSkuTier = "Premium" - // ExpressRouteCircuitSkuTierStandard specifies the express route circuit - // sku tier standard state for express route circuit sku tier. + // ExpressRouteCircuitSkuTierStandard specifies the express route circuit sku tier standard state for express route + // circuit sku tier. ExpressRouteCircuitSkuTierStandard ExpressRouteCircuitSkuTier = "Standard" ) @@ -394,32 +484,23 @@ const ( type IpsecEncryption string const ( - // IpsecEncryptionAES128 specifies the ipsec encryption aes128 state for - // ipsec encryption. + // IpsecEncryptionAES128 specifies the ipsec encryption aes128 state for ipsec encryption. IpsecEncryptionAES128 IpsecEncryption = "AES128" - // IpsecEncryptionAES192 specifies the ipsec encryption aes192 state for - // ipsec encryption. + // IpsecEncryptionAES192 specifies the ipsec encryption aes192 state for ipsec encryption. IpsecEncryptionAES192 IpsecEncryption = "AES192" - // IpsecEncryptionAES256 specifies the ipsec encryption aes256 state for - // ipsec encryption. + // IpsecEncryptionAES256 specifies the ipsec encryption aes256 state for ipsec encryption. IpsecEncryptionAES256 IpsecEncryption = "AES256" - // IpsecEncryptionDES specifies the ipsec encryption des state for ipsec - // encryption. + // IpsecEncryptionDES specifies the ipsec encryption des state for ipsec encryption. IpsecEncryptionDES IpsecEncryption = "DES" - // IpsecEncryptionDES3 specifies the ipsec encryption des3 state for ipsec - // encryption. + // IpsecEncryptionDES3 specifies the ipsec encryption des3 state for ipsec encryption. IpsecEncryptionDES3 IpsecEncryption = "DES3" - // IpsecEncryptionGCMAES128 specifies the ipsec encryption gcmaes128 state - // for ipsec encryption. + // IpsecEncryptionGCMAES128 specifies the ipsec encryption gcmaes128 state for ipsec encryption. IpsecEncryptionGCMAES128 IpsecEncryption = "GCMAES128" - // IpsecEncryptionGCMAES192 specifies the ipsec encryption gcmaes192 state - // for ipsec encryption. + // IpsecEncryptionGCMAES192 specifies the ipsec encryption gcmaes192 state for ipsec encryption. IpsecEncryptionGCMAES192 IpsecEncryption = "GCMAES192" - // IpsecEncryptionGCMAES256 specifies the ipsec encryption gcmaes256 state - // for ipsec encryption. + // IpsecEncryptionGCMAES256 specifies the ipsec encryption gcmaes256 state for ipsec encryption. IpsecEncryptionGCMAES256 IpsecEncryption = "GCMAES256" - // IpsecEncryptionNone specifies the ipsec encryption none state for ipsec - // encryption. + // IpsecEncryptionNone specifies the ipsec encryption none state for ipsec encryption. IpsecEncryptionNone IpsecEncryption = "None" ) @@ -427,23 +508,17 @@ const ( type IpsecIntegrity string const ( - // IpsecIntegrityGCMAES128 specifies the ipsec integrity gcmaes128 state - // for ipsec integrity. + // IpsecIntegrityGCMAES128 specifies the ipsec integrity gcmaes128 state for ipsec integrity. IpsecIntegrityGCMAES128 IpsecIntegrity = "GCMAES128" - // IpsecIntegrityGCMAES192 specifies the ipsec integrity gcmaes192 state - // for ipsec integrity. + // IpsecIntegrityGCMAES192 specifies the ipsec integrity gcmaes192 state for ipsec integrity. IpsecIntegrityGCMAES192 IpsecIntegrity = "GCMAES192" - // IpsecIntegrityGCMAES256 specifies the ipsec integrity gcmaes256 state - // for ipsec integrity. + // IpsecIntegrityGCMAES256 specifies the ipsec integrity gcmaes256 state for ipsec integrity. IpsecIntegrityGCMAES256 IpsecIntegrity = "GCMAES256" - // IpsecIntegrityMD5 specifies the ipsec integrity md5 state for ipsec - // integrity. + // IpsecIntegrityMD5 specifies the ipsec integrity md5 state for ipsec integrity. IpsecIntegrityMD5 IpsecIntegrity = "MD5" - // IpsecIntegritySHA1 specifies the ipsec integrity sha1 state for ipsec - // integrity. + // IpsecIntegritySHA1 specifies the ipsec integrity sha1 state for ipsec integrity. IpsecIntegritySHA1 IpsecIntegrity = "SHA1" - // IpsecIntegritySHA256 specifies the ipsec integrity sha256 state for - // ipsec integrity. + // IpsecIntegritySHA256 specifies the ipsec integrity sha256 state for ipsec integrity. IpsecIntegritySHA256 IpsecIntegrity = "SHA256" ) @@ -457,6 +532,40 @@ const ( IPv6 IPVersion = "IPv6" ) +// IssueType enumerates the values for issue type. +type IssueType string + +const ( + // IssueTypeAgentStopped specifies the issue type agent stopped state for issue type. + IssueTypeAgentStopped IssueType = "AgentStopped" + // IssueTypeDNSResolution specifies the issue type dns resolution state for issue type. + IssueTypeDNSResolution IssueType = "DnsResolution" + // IssueTypeGuestFirewall specifies the issue type guest firewall state for issue type. + IssueTypeGuestFirewall IssueType = "GuestFirewall" + // IssueTypeNetworkSecurityRule specifies the issue type network security rule state for issue type. + IssueTypeNetworkSecurityRule IssueType = "NetworkSecurityRule" + // IssueTypePlatform specifies the issue type platform state for issue type. + IssueTypePlatform IssueType = "Platform" + // IssueTypePortThrottled specifies the issue type port throttled state for issue type. + IssueTypePortThrottled IssueType = "PortThrottled" + // IssueTypeSocketBind specifies the issue type socket bind state for issue type. + IssueTypeSocketBind IssueType = "SocketBind" + // IssueTypeUnknown specifies the issue type unknown state for issue type. + IssueTypeUnknown IssueType = "Unknown" + // IssueTypeUserDefinedRoute specifies the issue type user defined route state for issue type. + IssueTypeUserDefinedRoute IssueType = "UserDefinedRoute" +) + +// LoadBalancerSkuName enumerates the values for load balancer sku name. +type LoadBalancerSkuName string + +const ( + // LoadBalancerSkuNameBasic specifies the load balancer sku name basic state for load balancer sku name. + LoadBalancerSkuNameBasic LoadBalancerSkuName = "Basic" + // LoadBalancerSkuNameStandard specifies the load balancer sku name standard state for load balancer sku name. + LoadBalancerSkuNameStandard LoadBalancerSkuName = "Standard" +) + // LoadDistribution enumerates the values for load distribution. type LoadDistribution string @@ -465,8 +574,7 @@ const ( Default LoadDistribution = "Default" // SourceIP specifies the source ip state for load distribution. SourceIP LoadDistribution = "SourceIP" - // SourceIPProtocol specifies the source ip protocol state for load - // distribution. + // SourceIPProtocol specifies the source ip protocol state for load distribution. SourceIPProtocol LoadDistribution = "SourceIPProtocol" ) @@ -474,23 +582,17 @@ const ( type NextHopType string const ( - // NextHopTypeHyperNetGateway specifies the next hop type hyper net gateway - // state for next hop type. + // NextHopTypeHyperNetGateway specifies the next hop type hyper net gateway state for next hop type. NextHopTypeHyperNetGateway NextHopType = "HyperNetGateway" - // NextHopTypeInternet specifies the next hop type internet state for next - // hop type. + // NextHopTypeInternet specifies the next hop type internet state for next hop type. NextHopTypeInternet NextHopType = "Internet" - // NextHopTypeNone specifies the next hop type none state for next hop - // type. + // NextHopTypeNone specifies the next hop type none state for next hop type. NextHopTypeNone NextHopType = "None" - // NextHopTypeVirtualAppliance specifies the next hop type virtual - // appliance state for next hop type. + // NextHopTypeVirtualAppliance specifies the next hop type virtual appliance state for next hop type. NextHopTypeVirtualAppliance NextHopType = "VirtualAppliance" - // NextHopTypeVirtualNetworkGateway specifies the next hop type virtual - // network gateway state for next hop type. + // NextHopTypeVirtualNetworkGateway specifies the next hop type virtual network gateway state for next hop type. NextHopTypeVirtualNetworkGateway NextHopType = "VirtualNetworkGateway" - // NextHopTypeVnetLocal specifies the next hop type vnet local state for - // next hop type. + // NextHopTypeVnetLocal specifies the next hop type vnet local state for next hop type. NextHopTypeVnetLocal NextHopType = "VnetLocal" ) @@ -506,6 +608,18 @@ const ( Succeeded OperationStatus = "Succeeded" ) +// Origin enumerates the values for origin. +type Origin string + +const ( + // OriginInbound specifies the origin inbound state for origin. + OriginInbound Origin = "Inbound" + // OriginLocal specifies the origin local state for origin. + OriginLocal Origin = "Local" + // OriginOutbound specifies the origin outbound state for origin. + OriginOutbound Origin = "Outbound" +) + // PcError enumerates the values for pc error. type PcError string @@ -526,12 +640,12 @@ const ( type PcProtocol string const ( - // Any specifies the any state for pc protocol. - Any PcProtocol = "Any" - // TCP specifies the tcp state for pc protocol. - TCP PcProtocol = "TCP" - // UDP specifies the udp state for pc protocol. - UDP PcProtocol = "UDP" + // PcProtocolAny specifies the pc protocol any state for pc protocol. + PcProtocolAny PcProtocol = "Any" + // PcProtocolTCP specifies the pc protocol tcp state for pc protocol. + PcProtocolTCP PcProtocol = "TCP" + // PcProtocolUDP specifies the pc protocol udp state for pc protocol. + PcProtocolUDP PcProtocol = "UDP" ) // PcStatus enumerates the values for pc status. @@ -540,8 +654,7 @@ type PcStatus string const ( // PcStatusError specifies the pc status error state for pc status. PcStatusError PcStatus = "Error" - // PcStatusNotStarted specifies the pc status not started state for pc - // status. + // PcStatusNotStarted specifies the pc status not started state for pc status. PcStatusNotStarted PcStatus = "NotStarted" // PcStatusRunning specifies the pc status running state for pc status. PcStatusRunning PcStatus = "Running" @@ -575,11 +688,9 @@ const ( type ProbeProtocol string const ( - // ProbeProtocolHTTP specifies the probe protocol http state for probe - // protocol. + // ProbeProtocolHTTP specifies the probe protocol http state for probe protocol. ProbeProtocolHTTP ProbeProtocol = "Http" - // ProbeProtocolTCP specifies the probe protocol tcp state for probe - // protocol. + // ProbeProtocolTCP specifies the probe protocol tcp state for probe protocol. ProbeProtocolTCP ProbeProtocol = "Tcp" ) @@ -607,38 +718,41 @@ const ( type ProvisioningState string const ( - // ProvisioningStateDeleting specifies the provisioning state deleting - // state for provisioning state. + // ProvisioningStateDeleting specifies the provisioning state deleting state for provisioning state. ProvisioningStateDeleting ProvisioningState = "Deleting" - // ProvisioningStateFailed specifies the provisioning state failed state - // for provisioning state. + // ProvisioningStateFailed specifies the provisioning state failed state for provisioning state. ProvisioningStateFailed ProvisioningState = "Failed" - // ProvisioningStateSucceeded specifies the provisioning state succeeded - // state for provisioning state. + // ProvisioningStateSucceeded specifies the provisioning state succeeded state for provisioning state. ProvisioningStateSucceeded ProvisioningState = "Succeeded" - // ProvisioningStateUpdating specifies the provisioning state updating - // state for provisioning state. + // ProvisioningStateUpdating specifies the provisioning state updating state for provisioning state. ProvisioningStateUpdating ProvisioningState = "Updating" ) +// PublicIPAddressSkuName enumerates the values for public ip address sku name. +type PublicIPAddressSkuName string + +const ( + // PublicIPAddressSkuNameBasic specifies the public ip address sku name basic state for public ip address sku name. + PublicIPAddressSkuNameBasic PublicIPAddressSkuName = "Basic" + // PublicIPAddressSkuNameStandard specifies the public ip address sku name standard state for public ip address sku + // name. + PublicIPAddressSkuNameStandard PublicIPAddressSkuName = "Standard" +) + // RouteNextHopType enumerates the values for route next hop type. type RouteNextHopType string const ( - // RouteNextHopTypeInternet specifies the route next hop type internet - // state for route next hop type. + // RouteNextHopTypeInternet specifies the route next hop type internet state for route next hop type. RouteNextHopTypeInternet RouteNextHopType = "Internet" - // RouteNextHopTypeNone specifies the route next hop type none state for - // route next hop type. + // RouteNextHopTypeNone specifies the route next hop type none state for route next hop type. RouteNextHopTypeNone RouteNextHopType = "None" - // RouteNextHopTypeVirtualAppliance specifies the route next hop type - // virtual appliance state for route next hop type. + // RouteNextHopTypeVirtualAppliance specifies the route next hop type virtual appliance state for route next hop type. RouteNextHopTypeVirtualAppliance RouteNextHopType = "VirtualAppliance" - // RouteNextHopTypeVirtualNetworkGateway specifies the route next hop type - // virtual network gateway state for route next hop type. + // RouteNextHopTypeVirtualNetworkGateway specifies the route next hop type virtual network gateway state for route next + // hop type. RouteNextHopTypeVirtualNetworkGateway RouteNextHopType = "VirtualNetworkGateway" - // RouteNextHopTypeVnetLocal specifies the route next hop type vnet local - // state for route next hop type. + // RouteNextHopTypeVnetLocal specifies the route next hop type vnet local state for route next hop type. RouteNextHopTypeVnetLocal RouteNextHopType = "VnetLocal" ) @@ -646,11 +760,9 @@ const ( type SecurityRuleAccess string const ( - // SecurityRuleAccessAllow specifies the security rule access allow state - // for security rule access. + // SecurityRuleAccessAllow specifies the security rule access allow state for security rule access. SecurityRuleAccessAllow SecurityRuleAccess = "Allow" - // SecurityRuleAccessDeny specifies the security rule access deny state for - // security rule access. + // SecurityRuleAccessDeny specifies the security rule access deny state for security rule access. SecurityRuleAccessDeny SecurityRuleAccess = "Deny" ) @@ -658,11 +770,9 @@ const ( type SecurityRuleDirection string const ( - // SecurityRuleDirectionInbound specifies the security rule direction - // inbound state for security rule direction. + // SecurityRuleDirectionInbound specifies the security rule direction inbound state for security rule direction. SecurityRuleDirectionInbound SecurityRuleDirection = "Inbound" - // SecurityRuleDirectionOutbound specifies the security rule direction - // outbound state for security rule direction. + // SecurityRuleDirectionOutbound specifies the security rule direction outbound state for security rule direction. SecurityRuleDirectionOutbound SecurityRuleDirection = "Outbound" ) @@ -670,179 +780,169 @@ const ( type SecurityRuleProtocol string const ( - // SecurityRuleProtocolAsterisk specifies the security rule protocol - // asterisk state for security rule protocol. + // SecurityRuleProtocolAsterisk specifies the security rule protocol asterisk state for security rule protocol. SecurityRuleProtocolAsterisk SecurityRuleProtocol = "*" - // SecurityRuleProtocolTCP specifies the security rule protocol tcp state - // for security rule protocol. + // SecurityRuleProtocolTCP specifies the security rule protocol tcp state for security rule protocol. SecurityRuleProtocolTCP SecurityRuleProtocol = "Tcp" - // SecurityRuleProtocolUDP specifies the security rule protocol udp state - // for security rule protocol. + // SecurityRuleProtocolUDP specifies the security rule protocol udp state for security rule protocol. SecurityRuleProtocolUDP SecurityRuleProtocol = "Udp" ) -// ServiceProviderProvisioningState enumerates the values for service provider -// provisioning state. +// ServiceProviderProvisioningState enumerates the values for service provider provisioning state. type ServiceProviderProvisioningState string const ( - // Deprovisioning specifies the deprovisioning state for service provider - // provisioning state. + // Deprovisioning specifies the deprovisioning state for service provider provisioning state. Deprovisioning ServiceProviderProvisioningState = "Deprovisioning" - // NotProvisioned specifies the not provisioned state for service provider - // provisioning state. + // NotProvisioned specifies the not provisioned state for service provider provisioning state. NotProvisioned ServiceProviderProvisioningState = "NotProvisioned" - // Provisioned specifies the provisioned state for service provider - // provisioning state. + // Provisioned specifies the provisioned state for service provider provisioning state. Provisioned ServiceProviderProvisioningState = "Provisioned" - // Provisioning specifies the provisioning state for service provider - // provisioning state. + // Provisioning specifies the provisioning state for service provider provisioning state. Provisioning ServiceProviderProvisioningState = "Provisioning" ) +// Severity enumerates the values for severity. +type Severity string + +const ( + // SeverityError specifies the severity error state for severity. + SeverityError Severity = "Error" + // SeverityWarning specifies the severity warning state for severity. + SeverityWarning Severity = "Warning" +) + // TransportProtocol enumerates the values for transport protocol. type TransportProtocol string const ( - // TransportProtocolTCP specifies the transport protocol tcp state for - // transport protocol. + // TransportProtocolAll specifies the transport protocol all state for transport protocol. + TransportProtocolAll TransportProtocol = "All" + // TransportProtocolTCP specifies the transport protocol tcp state for transport protocol. TransportProtocolTCP TransportProtocol = "Tcp" - // TransportProtocolUDP specifies the transport protocol udp state for - // transport protocol. + // TransportProtocolUDP specifies the transport protocol udp state for transport protocol. TransportProtocolUDP TransportProtocol = "Udp" ) -// VirtualNetworkGatewayConnectionStatus enumerates the values for virtual -// network gateway connection status. +// VirtualNetworkGatewayConnectionStatus enumerates the values for virtual network gateway connection status. type VirtualNetworkGatewayConnectionStatus string const ( - // VirtualNetworkGatewayConnectionStatusConnected specifies the virtual - // network gateway connection status connected state for virtual network - // gateway connection status. + // VirtualNetworkGatewayConnectionStatusConnected specifies the virtual network gateway connection status connected + // state for virtual network gateway connection status. VirtualNetworkGatewayConnectionStatusConnected VirtualNetworkGatewayConnectionStatus = "Connected" - // VirtualNetworkGatewayConnectionStatusConnecting specifies the virtual - // network gateway connection status connecting state for virtual network - // gateway connection status. + // VirtualNetworkGatewayConnectionStatusConnecting specifies the virtual network gateway connection status connecting + // state for virtual network gateway connection status. VirtualNetworkGatewayConnectionStatusConnecting VirtualNetworkGatewayConnectionStatus = "Connecting" - // VirtualNetworkGatewayConnectionStatusNotConnected specifies the virtual - // network gateway connection status not connected state for virtual - // network gateway connection status. + // VirtualNetworkGatewayConnectionStatusNotConnected specifies the virtual network gateway connection status not + // connected state for virtual network gateway connection status. VirtualNetworkGatewayConnectionStatusNotConnected VirtualNetworkGatewayConnectionStatus = "NotConnected" - // VirtualNetworkGatewayConnectionStatusUnknown specifies the virtual - // network gateway connection status unknown state for virtual network - // gateway connection status. + // VirtualNetworkGatewayConnectionStatusUnknown specifies the virtual network gateway connection status unknown state + // for virtual network gateway connection status. VirtualNetworkGatewayConnectionStatusUnknown VirtualNetworkGatewayConnectionStatus = "Unknown" ) -// VirtualNetworkGatewayConnectionType enumerates the values for virtual -// network gateway connection type. +// VirtualNetworkGatewayConnectionType enumerates the values for virtual network gateway connection type. type VirtualNetworkGatewayConnectionType string const ( - // ExpressRoute specifies the express route state for virtual network - // gateway connection type. + // ExpressRoute specifies the express route state for virtual network gateway connection type. ExpressRoute VirtualNetworkGatewayConnectionType = "ExpressRoute" - // IPsec specifies the i psec state for virtual network gateway connection - // type. + // IPsec specifies the i psec state for virtual network gateway connection type. IPsec VirtualNetworkGatewayConnectionType = "IPsec" - // Vnet2Vnet specifies the vnet 2 vnet state for virtual network gateway - // connection type. + // Vnet2Vnet specifies the vnet 2 vnet state for virtual network gateway connection type. Vnet2Vnet VirtualNetworkGatewayConnectionType = "Vnet2Vnet" - // VPNClient specifies the vpn client state for virtual network gateway - // connection type. + // VPNClient specifies the vpn client state for virtual network gateway connection type. VPNClient VirtualNetworkGatewayConnectionType = "VPNClient" ) -// VirtualNetworkGatewaySkuName enumerates the values for virtual network -// gateway sku name. +// VirtualNetworkGatewaySkuName enumerates the values for virtual network gateway sku name. type VirtualNetworkGatewaySkuName string const ( - // VirtualNetworkGatewaySkuNameBasic specifies the virtual network gateway - // sku name basic state for virtual network gateway sku name. - VirtualNetworkGatewaySkuNameBasic VirtualNetworkGatewaySkuName = "Basic" - // VirtualNetworkGatewaySkuNameHighPerformance specifies the virtual - // network gateway sku name high performance state for virtual network + // VirtualNetworkGatewaySkuNameBasic specifies the virtual network gateway sku name basic state for virtual network // gateway sku name. + VirtualNetworkGatewaySkuNameBasic VirtualNetworkGatewaySkuName = "Basic" + // VirtualNetworkGatewaySkuNameHighPerformance specifies the virtual network gateway sku name high performance state + // for virtual network gateway sku name. VirtualNetworkGatewaySkuNameHighPerformance VirtualNetworkGatewaySkuName = "HighPerformance" - // VirtualNetworkGatewaySkuNameStandard specifies the virtual network - // gateway sku name standard state for virtual network gateway sku name. + // VirtualNetworkGatewaySkuNameStandard specifies the virtual network gateway sku name standard state for virtual + // network gateway sku name. VirtualNetworkGatewaySkuNameStandard VirtualNetworkGatewaySkuName = "Standard" - // VirtualNetworkGatewaySkuNameUltraPerformance specifies the virtual - // network gateway sku name ultra performance state for virtual network - // gateway sku name. + // VirtualNetworkGatewaySkuNameUltraPerformance specifies the virtual network gateway sku name ultra performance state + // for virtual network gateway sku name. VirtualNetworkGatewaySkuNameUltraPerformance VirtualNetworkGatewaySkuName = "UltraPerformance" - // VirtualNetworkGatewaySkuNameVpnGw1 specifies the virtual network gateway - // sku name vpn gw 1 state for virtual network gateway sku name. + // VirtualNetworkGatewaySkuNameVpnGw1 specifies the virtual network gateway sku name vpn gw 1 state for virtual network + // gateway sku name. VirtualNetworkGatewaySkuNameVpnGw1 VirtualNetworkGatewaySkuName = "VpnGw1" - // VirtualNetworkGatewaySkuNameVpnGw2 specifies the virtual network gateway - // sku name vpn gw 2 state for virtual network gateway sku name. + // VirtualNetworkGatewaySkuNameVpnGw2 specifies the virtual network gateway sku name vpn gw 2 state for virtual network + // gateway sku name. VirtualNetworkGatewaySkuNameVpnGw2 VirtualNetworkGatewaySkuName = "VpnGw2" - // VirtualNetworkGatewaySkuNameVpnGw3 specifies the virtual network gateway - // sku name vpn gw 3 state for virtual network gateway sku name. + // VirtualNetworkGatewaySkuNameVpnGw3 specifies the virtual network gateway sku name vpn gw 3 state for virtual network + // gateway sku name. VirtualNetworkGatewaySkuNameVpnGw3 VirtualNetworkGatewaySkuName = "VpnGw3" ) -// VirtualNetworkGatewaySkuTier enumerates the values for virtual network -// gateway sku tier. +// VirtualNetworkGatewaySkuTier enumerates the values for virtual network gateway sku tier. type VirtualNetworkGatewaySkuTier string const ( - // VirtualNetworkGatewaySkuTierBasic specifies the virtual network gateway - // sku tier basic state for virtual network gateway sku tier. - VirtualNetworkGatewaySkuTierBasic VirtualNetworkGatewaySkuTier = "Basic" - // VirtualNetworkGatewaySkuTierHighPerformance specifies the virtual - // network gateway sku tier high performance state for virtual network + // VirtualNetworkGatewaySkuTierBasic specifies the virtual network gateway sku tier basic state for virtual network // gateway sku tier. + VirtualNetworkGatewaySkuTierBasic VirtualNetworkGatewaySkuTier = "Basic" + // VirtualNetworkGatewaySkuTierHighPerformance specifies the virtual network gateway sku tier high performance state + // for virtual network gateway sku tier. VirtualNetworkGatewaySkuTierHighPerformance VirtualNetworkGatewaySkuTier = "HighPerformance" - // VirtualNetworkGatewaySkuTierStandard specifies the virtual network - // gateway sku tier standard state for virtual network gateway sku tier. + // VirtualNetworkGatewaySkuTierStandard specifies the virtual network gateway sku tier standard state for virtual + // network gateway sku tier. VirtualNetworkGatewaySkuTierStandard VirtualNetworkGatewaySkuTier = "Standard" - // VirtualNetworkGatewaySkuTierUltraPerformance specifies the virtual - // network gateway sku tier ultra performance state for virtual network - // gateway sku tier. + // VirtualNetworkGatewaySkuTierUltraPerformance specifies the virtual network gateway sku tier ultra performance state + // for virtual network gateway sku tier. VirtualNetworkGatewaySkuTierUltraPerformance VirtualNetworkGatewaySkuTier = "UltraPerformance" - // VirtualNetworkGatewaySkuTierVpnGw1 specifies the virtual network gateway - // sku tier vpn gw 1 state for virtual network gateway sku tier. + // VirtualNetworkGatewaySkuTierVpnGw1 specifies the virtual network gateway sku tier vpn gw 1 state for virtual network + // gateway sku tier. VirtualNetworkGatewaySkuTierVpnGw1 VirtualNetworkGatewaySkuTier = "VpnGw1" - // VirtualNetworkGatewaySkuTierVpnGw2 specifies the virtual network gateway - // sku tier vpn gw 2 state for virtual network gateway sku tier. + // VirtualNetworkGatewaySkuTierVpnGw2 specifies the virtual network gateway sku tier vpn gw 2 state for virtual network + // gateway sku tier. VirtualNetworkGatewaySkuTierVpnGw2 VirtualNetworkGatewaySkuTier = "VpnGw2" - // VirtualNetworkGatewaySkuTierVpnGw3 specifies the virtual network gateway - // sku tier vpn gw 3 state for virtual network gateway sku tier. + // VirtualNetworkGatewaySkuTierVpnGw3 specifies the virtual network gateway sku tier vpn gw 3 state for virtual network + // gateway sku tier. VirtualNetworkGatewaySkuTierVpnGw3 VirtualNetworkGatewaySkuTier = "VpnGw3" ) -// VirtualNetworkGatewayType enumerates the values for virtual network gateway -// type. +// VirtualNetworkGatewayType enumerates the values for virtual network gateway type. type VirtualNetworkGatewayType string const ( - // VirtualNetworkGatewayTypeExpressRoute specifies the virtual network - // gateway type express route state for virtual network gateway type. + // VirtualNetworkGatewayTypeExpressRoute specifies the virtual network gateway type express route state for virtual + // network gateway type. VirtualNetworkGatewayTypeExpressRoute VirtualNetworkGatewayType = "ExpressRoute" - // VirtualNetworkGatewayTypeVpn specifies the virtual network gateway type - // vpn state for virtual network gateway type. + // VirtualNetworkGatewayTypeVpn specifies the virtual network gateway type vpn state for virtual network gateway type. VirtualNetworkGatewayTypeVpn VirtualNetworkGatewayType = "Vpn" ) -// VirtualNetworkPeeringState enumerates the values for virtual network peering -// state. +// VirtualNetworkPeeringState enumerates the values for virtual network peering state. type VirtualNetworkPeeringState string const ( - // Connected specifies the connected state for virtual network peering - // state. + // Connected specifies the connected state for virtual network peering state. Connected VirtualNetworkPeeringState = "Connected" - // Disconnected specifies the disconnected state for virtual network - // peering state. + // Disconnected specifies the disconnected state for virtual network peering state. Disconnected VirtualNetworkPeeringState = "Disconnected" - // Initiated specifies the initiated state for virtual network peering - // state. + // Initiated specifies the initiated state for virtual network peering state. Initiated VirtualNetworkPeeringState = "Initiated" ) +// VpnClientProtocol enumerates the values for vpn client protocol. +type VpnClientProtocol string + +const ( + // IkeV2 specifies the ike v2 state for vpn client protocol. + IkeV2 VpnClientProtocol = "IkeV2" + // SSTP specifies the sstp state for vpn client protocol. + SSTP VpnClientProtocol = "SSTP" +) + // VpnType enumerates the values for vpn type. type VpnType string @@ -853,8 +953,8 @@ const ( RouteBased VpnType = "RouteBased" ) -// AddressSpace is addressSpace contains an array of IP address ranges that can -// be used by subnets of the virtual network. +// AddressSpace is addressSpace contains an array of IP address ranges that can be used by subnets of the virtual +// network. type AddressSpace struct { AddressPrefixes *[]string `json:"addressPrefixes,omitempty"` } @@ -871,114 +971,151 @@ type ApplicationGateway struct { Etag *string `json:"etag,omitempty"` } -// ApplicationGatewayAuthenticationCertificate is authentication certificates -// of an application gateway. +// ApplicationGatewayAuthenticationCertificate is authentication certificates of an application gateway. type ApplicationGatewayAuthenticationCertificate struct { ID *string `json:"id,omitempty"` *ApplicationGatewayAuthenticationCertificatePropertiesFormat `json:"properties,omitempty"` Name *string `json:"name,omitempty"` Etag *string `json:"etag,omitempty"` + Type *string `json:"type,omitempty"` } -// ApplicationGatewayAuthenticationCertificatePropertiesFormat is -// authentication certificates properties of an application gateway. +// ApplicationGatewayAuthenticationCertificatePropertiesFormat is authentication certificates properties of an +// application gateway. type ApplicationGatewayAuthenticationCertificatePropertiesFormat struct { Data *string `json:"data,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } -// ApplicationGatewayAvailableWafRuleSetsResult is response for -// ApplicationGatewayAvailableWafRuleSets API service call. +// ApplicationGatewayAvailableSslOptions is response for ApplicationGatewayAvailableSslOptions API service call. +type ApplicationGatewayAvailableSslOptions struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *ApplicationGatewayAvailableSslOptionsPropertiesFormat `json:"properties,omitempty"` +} + +// ApplicationGatewayAvailableSslOptionsPropertiesFormat is properties of ApplicationGatewayAvailableSslOptions +type ApplicationGatewayAvailableSslOptionsPropertiesFormat struct { + PredefinedPolicies *[]SubResource `json:"predefinedPolicies,omitempty"` + DefaultPolicy ApplicationGatewaySslPolicyName `json:"defaultPolicy,omitempty"` + AvailableCipherSuites *[]ApplicationGatewaySslCipherSuite `json:"availableCipherSuites,omitempty"` + AvailableProtocols *[]ApplicationGatewaySslProtocol `json:"availableProtocols,omitempty"` +} + +// ApplicationGatewayAvailableSslPredefinedPolicies is response for ApplicationGatewayAvailableSslOptions API service +// call. +type ApplicationGatewayAvailableSslPredefinedPolicies struct { + autorest.Response `json:"-"` + Value *[]ApplicationGatewaySslPredefinedPolicy `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ApplicationGatewayAvailableSslPredefinedPoliciesPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ApplicationGatewayAvailableSslPredefinedPolicies) ApplicationGatewayAvailableSslPredefinedPoliciesPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ApplicationGatewayAvailableWafRuleSetsResult is response for ApplicationGatewayAvailableWafRuleSets API service +// call. type ApplicationGatewayAvailableWafRuleSetsResult struct { autorest.Response `json:"-"` Value *[]ApplicationGatewayFirewallRuleSet `json:"value,omitempty"` } -// ApplicationGatewayBackendAddress is backend address of an application -// gateway. +// ApplicationGatewayBackendAddress is backend address of an application gateway. type ApplicationGatewayBackendAddress struct { Fqdn *string `json:"fqdn,omitempty"` IPAddress *string `json:"ipAddress,omitempty"` } -// ApplicationGatewayBackendAddressPool is backend Address Pool of an -// application gateway. +// ApplicationGatewayBackendAddressPool is backend Address Pool of an application gateway. type ApplicationGatewayBackendAddressPool struct { ID *string `json:"id,omitempty"` *ApplicationGatewayBackendAddressPoolPropertiesFormat `json:"properties,omitempty"` Name *string `json:"name,omitempty"` Etag *string `json:"etag,omitempty"` + Type *string `json:"type,omitempty"` } -// ApplicationGatewayBackendAddressPoolPropertiesFormat is properties of -// Backend Address Pool of an application gateway. +// ApplicationGatewayBackendAddressPoolPropertiesFormat is properties of Backend Address Pool of an application +// gateway. type ApplicationGatewayBackendAddressPoolPropertiesFormat struct { BackendIPConfigurations *[]InterfaceIPConfiguration `json:"backendIPConfigurations,omitempty"` BackendAddresses *[]ApplicationGatewayBackendAddress `json:"backendAddresses,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } -// ApplicationGatewayBackendHealth is list of -// ApplicationGatewayBackendHealthPool resources. +// ApplicationGatewayBackendHealth is list of ApplicationGatewayBackendHealthPool resources. type ApplicationGatewayBackendHealth struct { autorest.Response `json:"-"` BackendAddressPools *[]ApplicationGatewayBackendHealthPool `json:"backendAddressPools,omitempty"` } -// ApplicationGatewayBackendHealthHTTPSettings is application gateway -// BackendHealthHttp settings. +// ApplicationGatewayBackendHealthHTTPSettings is application gateway BackendHealthHttp settings. type ApplicationGatewayBackendHealthHTTPSettings struct { BackendHTTPSettings *ApplicationGatewayBackendHTTPSettings `json:"backendHttpSettings,omitempty"` Servers *[]ApplicationGatewayBackendHealthServer `json:"servers,omitempty"` } -// ApplicationGatewayBackendHealthPool is application gateway BackendHealth -// pool. +// ApplicationGatewayBackendHealthPool is application gateway BackendHealth pool. type ApplicationGatewayBackendHealthPool struct { BackendAddressPool *ApplicationGatewayBackendAddressPool `json:"backendAddressPool,omitempty"` BackendHTTPSettingsCollection *[]ApplicationGatewayBackendHealthHTTPSettings `json:"backendHttpSettingsCollection,omitempty"` } -// ApplicationGatewayBackendHealthServer is application gateway backendhealth -// http settings. +// ApplicationGatewayBackendHealthServer is application gateway backendhealth http settings. type ApplicationGatewayBackendHealthServer struct { Address *string `json:"address,omitempty"` - IPConfiguration *SubResource `json:"ipConfiguration,omitempty"` + IPConfiguration *InterfaceIPConfiguration `json:"ipConfiguration,omitempty"` Health ApplicationGatewayBackendHealthServerHealth `json:"health,omitempty"` } -// ApplicationGatewayBackendHTTPSettings is backend address pool settings of an -// application gateway. +// ApplicationGatewayBackendHTTPSettings is backend address pool settings of an application gateway. type ApplicationGatewayBackendHTTPSettings struct { ID *string `json:"id,omitempty"` *ApplicationGatewayBackendHTTPSettingsPropertiesFormat `json:"properties,omitempty"` Name *string `json:"name,omitempty"` Etag *string `json:"etag,omitempty"` + Type *string `json:"type,omitempty"` } -// ApplicationGatewayBackendHTTPSettingsPropertiesFormat is properties of -// Backend address pool settings of an application gateway. +// ApplicationGatewayBackendHTTPSettingsPropertiesFormat is properties of Backend address pool settings of an +// application gateway. type ApplicationGatewayBackendHTTPSettingsPropertiesFormat struct { - Port *int32 `json:"port,omitempty"` - Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` - CookieBasedAffinity ApplicationGatewayCookieBasedAffinity `json:"cookieBasedAffinity,omitempty"` - RequestTimeout *int32 `json:"requestTimeout,omitempty"` - Probe *SubResource `json:"probe,omitempty"` - AuthenticationCertificates *[]SubResource `json:"authenticationCertificates,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` - ConnectionDraining *ApplicationGatewayConnectionDraining `json:"connectionDraining,omitempty"` + Port *int32 `json:"port,omitempty"` + Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` + CookieBasedAffinity ApplicationGatewayCookieBasedAffinity `json:"cookieBasedAffinity,omitempty"` + RequestTimeout *int32 `json:"requestTimeout,omitempty"` + Probe *SubResource `json:"probe,omitempty"` + AuthenticationCertificates *[]SubResource `json:"authenticationCertificates,omitempty"` + ConnectionDraining *ApplicationGatewayConnectionDraining `json:"connectionDraining,omitempty"` + HostName *string `json:"hostName,omitempty"` + PickHostNameFromBackendAddress *bool `json:"pickHostNameFromBackendAddress,omitempty"` + AffinityCookieName *string `json:"affinityCookieName,omitempty"` + ProbeEnabled *bool `json:"probeEnabled,omitempty"` + Path *string `json:"path,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` } -// ApplicationGatewayConnectionDraining is connection draining allows open -// connections to a backend server to be active for a specified time after the -// backend server got removed from the configuration. +// ApplicationGatewayConnectionDraining is connection draining allows open connections to a backend server to be active +// for a specified time after the backend server got removed from the configuration. type ApplicationGatewayConnectionDraining struct { Enabled *bool `json:"enabled,omitempty"` DrainTimeoutInSec *int32 `json:"drainTimeoutInSec,omitempty"` } -// ApplicationGatewayFirewallDisabledRuleGroup is allows to disable rules -// within a rule group or an entire rule group. +// ApplicationGatewayFirewallDisabledRuleGroup is allows to disable rules within a rule group or an entire rule group. type ApplicationGatewayFirewallDisabledRuleGroup struct { RuleGroupName *string `json:"ruleGroupName,omitempty"` Rules *[]int32 `json:"rules,omitempty"` @@ -990,8 +1127,7 @@ type ApplicationGatewayFirewallRule struct { Description *string `json:"description,omitempty"` } -// ApplicationGatewayFirewallRuleGroup is a web application firewall rule -// group. +// ApplicationGatewayFirewallRuleGroup is a web application firewall rule group. type ApplicationGatewayFirewallRuleGroup struct { RuleGroupName *string `json:"ruleGroupName,omitempty"` Description *string `json:"description,omitempty"` @@ -1008,8 +1144,7 @@ type ApplicationGatewayFirewallRuleSet struct { *ApplicationGatewayFirewallRuleSetPropertiesFormat `json:"properties,omitempty"` } -// ApplicationGatewayFirewallRuleSetPropertiesFormat is properties of the web -// application firewall rule set. +// ApplicationGatewayFirewallRuleSetPropertiesFormat is properties of the web application firewall rule set. type ApplicationGatewayFirewallRuleSetPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` RuleSetType *string `json:"ruleSetType,omitempty"` @@ -1017,17 +1152,17 @@ type ApplicationGatewayFirewallRuleSetPropertiesFormat struct { RuleGroups *[]ApplicationGatewayFirewallRuleGroup `json:"ruleGroups,omitempty"` } -// ApplicationGatewayFrontendIPConfiguration is frontend IP configuration of an -// application gateway. +// ApplicationGatewayFrontendIPConfiguration is frontend IP configuration of an application gateway. type ApplicationGatewayFrontendIPConfiguration struct { ID *string `json:"id,omitempty"` *ApplicationGatewayFrontendIPConfigurationPropertiesFormat `json:"properties,omitempty"` Name *string `json:"name,omitempty"` Etag *string `json:"etag,omitempty"` + Type *string `json:"type,omitempty"` } -// ApplicationGatewayFrontendIPConfigurationPropertiesFormat is properties of -// Frontend IP configuration of an application gateway. +// ApplicationGatewayFrontendIPConfigurationPropertiesFormat is properties of Frontend IP configuration of an +// application gateway. type ApplicationGatewayFrontendIPConfigurationPropertiesFormat struct { PrivateIPAddress *string `json:"privateIPAddress,omitempty"` PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` @@ -1042,10 +1177,10 @@ type ApplicationGatewayFrontendPort struct { *ApplicationGatewayFrontendPortPropertiesFormat `json:"properties,omitempty"` Name *string `json:"name,omitempty"` Etag *string `json:"etag,omitempty"` + Type *string `json:"type,omitempty"` } -// ApplicationGatewayFrontendPortPropertiesFormat is properties of Frontend -// port of an application gateway. +// ApplicationGatewayFrontendPortPropertiesFormat is properties of Frontend port of an application gateway. type ApplicationGatewayFrontendPortPropertiesFormat struct { Port *int32 `json:"port,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` @@ -1057,10 +1192,10 @@ type ApplicationGatewayHTTPListener struct { *ApplicationGatewayHTTPListenerPropertiesFormat `json:"properties,omitempty"` Name *string `json:"name,omitempty"` Etag *string `json:"etag,omitempty"` + Type *string `json:"type,omitempty"` } -// ApplicationGatewayHTTPListenerPropertiesFormat is properties of HTTP -// listener of an application gateway. +// ApplicationGatewayHTTPListenerPropertiesFormat is properties of HTTP listener of an application gateway. type ApplicationGatewayHTTPListenerPropertiesFormat struct { FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` FrontendPort *SubResource `json:"frontendPort,omitempty"` @@ -1071,24 +1206,23 @@ type ApplicationGatewayHTTPListenerPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// ApplicationGatewayIPConfiguration is iP configuration of an application -// gateway. Currently 1 public and 1 private IP configuration is allowed. +// ApplicationGatewayIPConfiguration is IP configuration of an application gateway. Currently 1 public and 1 private IP +// configuration is allowed. type ApplicationGatewayIPConfiguration struct { ID *string `json:"id,omitempty"` *ApplicationGatewayIPConfigurationPropertiesFormat `json:"properties,omitempty"` Name *string `json:"name,omitempty"` Etag *string `json:"etag,omitempty"` + Type *string `json:"type,omitempty"` } -// ApplicationGatewayIPConfigurationPropertiesFormat is properties of IP -// configuration of an application gateway. +// ApplicationGatewayIPConfigurationPropertiesFormat is properties of IP configuration of an application gateway. type ApplicationGatewayIPConfigurationPropertiesFormat struct { Subnet *SubResource `json:"subnet,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } -// ApplicationGatewayListResult is response for ListApplicationGateways API -// service call. +// ApplicationGatewayListResult is response for ListApplicationGateways API service call. type ApplicationGatewayListResult struct { autorest.Response `json:"-"` Value *[]ApplicationGateway `json:"value,omitempty"` @@ -1107,22 +1241,22 @@ func (client ApplicationGatewayListResult) ApplicationGatewayListResultPreparer( autorest.WithBaseURL(to.String(client.NextLink))) } -// ApplicationGatewayPathRule is path rule of URL path map of an application -// gateway. +// ApplicationGatewayPathRule is path rule of URL path map of an application gateway. type ApplicationGatewayPathRule struct { ID *string `json:"id,omitempty"` *ApplicationGatewayPathRulePropertiesFormat `json:"properties,omitempty"` Name *string `json:"name,omitempty"` Etag *string `json:"etag,omitempty"` + Type *string `json:"type,omitempty"` } -// ApplicationGatewayPathRulePropertiesFormat is properties of probe of an -// application gateway. +// ApplicationGatewayPathRulePropertiesFormat is properties of path rule of an application gateway. type ApplicationGatewayPathRulePropertiesFormat struct { - Paths *[]string `json:"paths,omitempty"` - BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` - BackendHTTPSettings *SubResource `json:"backendHttpSettings,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` + Paths *[]string `json:"paths,omitempty"` + BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` + BackendHTTPSettings *SubResource `json:"backendHttpSettings,omitempty"` + RedirectConfiguration *SubResource `json:"redirectConfiguration,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` } // ApplicationGatewayProbe is probe of the application gateway. @@ -1131,18 +1265,27 @@ type ApplicationGatewayProbe struct { *ApplicationGatewayProbePropertiesFormat `json:"properties,omitempty"` Name *string `json:"name,omitempty"` Etag *string `json:"etag,omitempty"` + Type *string `json:"type,omitempty"` } -// ApplicationGatewayProbePropertiesFormat is properties of probe of an -// application gateway. +// ApplicationGatewayProbeHealthResponseMatch is application gateway probe health response match +type ApplicationGatewayProbeHealthResponseMatch struct { + Body *string `json:"body,omitempty"` + StatusCodes *[]string `json:"statusCodes,omitempty"` +} + +// ApplicationGatewayProbePropertiesFormat is properties of probe of an application gateway. type ApplicationGatewayProbePropertiesFormat struct { - Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` - Host *string `json:"host,omitempty"` - Path *string `json:"path,omitempty"` - Interval *int32 `json:"interval,omitempty"` - Timeout *int32 `json:"timeout,omitempty"` - UnhealthyThreshold *int32 `json:"unhealthyThreshold,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` + Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` + Host *string `json:"host,omitempty"` + Path *string `json:"path,omitempty"` + Interval *int32 `json:"interval,omitempty"` + Timeout *int32 `json:"timeout,omitempty"` + UnhealthyThreshold *int32 `json:"unhealthyThreshold,omitempty"` + PickHostNameFromBackendHTTPSettings *bool `json:"pickHostNameFromBackendHttpSettings,omitempty"` + MinServers *int32 `json:"minServers,omitempty"` + Match *ApplicationGatewayProbeHealthResponseMatch `json:"match,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` } // ApplicationGatewayPropertiesFormat is properties of the application gateway. @@ -1161,49 +1304,72 @@ type ApplicationGatewayPropertiesFormat struct { HTTPListeners *[]ApplicationGatewayHTTPListener `json:"httpListeners,omitempty"` URLPathMaps *[]ApplicationGatewayURLPathMap `json:"urlPathMaps,omitempty"` RequestRoutingRules *[]ApplicationGatewayRequestRoutingRule `json:"requestRoutingRules,omitempty"` + RedirectConfigurations *[]ApplicationGatewayRedirectConfiguration `json:"redirectConfigurations,omitempty"` WebApplicationFirewallConfiguration *ApplicationGatewayWebApplicationFirewallConfiguration `json:"webApplicationFirewallConfiguration,omitempty"` ResourceGUID *string `json:"resourceGuid,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } -// ApplicationGatewayRequestRoutingRule is request routing rule of an -// application gateway. +// ApplicationGatewayRedirectConfiguration is redirect configuration of an application gateway. +type ApplicationGatewayRedirectConfiguration struct { + ID *string `json:"id,omitempty"` + *ApplicationGatewayRedirectConfigurationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` + Type *string `json:"type,omitempty"` +} + +// ApplicationGatewayRedirectConfigurationPropertiesFormat is properties of redirect configuration of the application +// gateway. +type ApplicationGatewayRedirectConfigurationPropertiesFormat struct { + RedirectType ApplicationGatewayRedirectType `json:"redirectType,omitempty"` + TargetListener *SubResource `json:"targetListener,omitempty"` + TargetURL *string `json:"targetUrl,omitempty"` + IncludePath *bool `json:"includePath,omitempty"` + IncludeQueryString *bool `json:"includeQueryString,omitempty"` + RequestRoutingRules *[]SubResource `json:"requestRoutingRules,omitempty"` + URLPathMaps *[]SubResource `json:"urlPathMaps,omitempty"` + PathRules *[]SubResource `json:"pathRules,omitempty"` +} + +// ApplicationGatewayRequestRoutingRule is request routing rule of an application gateway. type ApplicationGatewayRequestRoutingRule struct { ID *string `json:"id,omitempty"` *ApplicationGatewayRequestRoutingRulePropertiesFormat `json:"properties,omitempty"` Name *string `json:"name,omitempty"` Etag *string `json:"etag,omitempty"` + Type *string `json:"type,omitempty"` } -// ApplicationGatewayRequestRoutingRulePropertiesFormat is properties of -// request routing rule of the application gateway. +// ApplicationGatewayRequestRoutingRulePropertiesFormat is properties of request routing rule of the application +// gateway. type ApplicationGatewayRequestRoutingRulePropertiesFormat struct { - RuleType ApplicationGatewayRequestRoutingRuleType `json:"ruleType,omitempty"` - BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` - BackendHTTPSettings *SubResource `json:"backendHttpSettings,omitempty"` - HTTPListener *SubResource `json:"httpListener,omitempty"` - URLPathMap *SubResource `json:"urlPathMap,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` + RuleType ApplicationGatewayRequestRoutingRuleType `json:"ruleType,omitempty"` + BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` + BackendHTTPSettings *SubResource `json:"backendHttpSettings,omitempty"` + HTTPListener *SubResource `json:"httpListener,omitempty"` + URLPathMap *SubResource `json:"urlPathMap,omitempty"` + RedirectConfiguration *SubResource `json:"redirectConfiguration,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` } -// ApplicationGatewaySku is sKU of an application gateway +// ApplicationGatewaySku is SKU of an application gateway type ApplicationGatewaySku struct { Name ApplicationGatewaySkuName `json:"name,omitempty"` Tier ApplicationGatewayTier `json:"tier,omitempty"` Capacity *int32 `json:"capacity,omitempty"` } -// ApplicationGatewaySslCertificate is sSL certificates of an application -// gateway. +// ApplicationGatewaySslCertificate is SSL certificates of an application gateway. type ApplicationGatewaySslCertificate struct { ID *string `json:"id,omitempty"` *ApplicationGatewaySslCertificatePropertiesFormat `json:"properties,omitempty"` Name *string `json:"name,omitempty"` Etag *string `json:"etag,omitempty"` + Type *string `json:"type,omitempty"` } -// ApplicationGatewaySslCertificatePropertiesFormat is properties of SSL -// certificates of an application gateway. +// ApplicationGatewaySslCertificatePropertiesFormat is properties of SSL certificates of an application gateway. type ApplicationGatewaySslCertificatePropertiesFormat struct { Data *string `json:"data,omitempty"` Password *string `json:"password,omitempty"` @@ -1211,31 +1377,48 @@ type ApplicationGatewaySslCertificatePropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// ApplicationGatewaySslPolicy is application gateway SSL policy. +// ApplicationGatewaySslPolicy is application Gateway Ssl policy. type ApplicationGatewaySslPolicy struct { - DisabledSslProtocols *[]ApplicationGatewaySslProtocol `json:"disabledSslProtocols,omitempty"` + DisabledSslProtocols *[]ApplicationGatewaySslProtocol `json:"disabledSslProtocols,omitempty"` + PolicyType ApplicationGatewaySslPolicyType `json:"policyType,omitempty"` + PolicyName ApplicationGatewaySslPolicyName `json:"policyName,omitempty"` + CipherSuites *[]ApplicationGatewaySslCipherSuite `json:"cipherSuites,omitempty"` + MinProtocolVersion ApplicationGatewaySslProtocol `json:"minProtocolVersion,omitempty"` } -// ApplicationGatewayURLPathMap is urlPathMaps give a url path to the backend -// mapping information for PathBasedRouting. +// ApplicationGatewaySslPredefinedPolicy is an Ssl predefined policy +type ApplicationGatewaySslPredefinedPolicy struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + *ApplicationGatewaySslPredefinedPolicyPropertiesFormat `json:"properties,omitempty"` +} + +// ApplicationGatewaySslPredefinedPolicyPropertiesFormat is properties of ApplicationGatewaySslPredefinedPolicy +type ApplicationGatewaySslPredefinedPolicyPropertiesFormat struct { + CipherSuites *[]ApplicationGatewaySslCipherSuite `json:"cipherSuites,omitempty"` + MinProtocolVersion ApplicationGatewaySslProtocol `json:"minProtocolVersion,omitempty"` +} + +// ApplicationGatewayURLPathMap is urlPathMaps give a url path to the backend mapping information for PathBasedRouting. type ApplicationGatewayURLPathMap struct { ID *string `json:"id,omitempty"` *ApplicationGatewayURLPathMapPropertiesFormat `json:"properties,omitempty"` Name *string `json:"name,omitempty"` Etag *string `json:"etag,omitempty"` + Type *string `json:"type,omitempty"` } -// ApplicationGatewayURLPathMapPropertiesFormat is properties of UrlPathMap of -// the application gateway. +// ApplicationGatewayURLPathMapPropertiesFormat is properties of UrlPathMap of the application gateway. type ApplicationGatewayURLPathMapPropertiesFormat struct { - DefaultBackendAddressPool *SubResource `json:"defaultBackendAddressPool,omitempty"` - DefaultBackendHTTPSettings *SubResource `json:"defaultBackendHttpSettings,omitempty"` - PathRules *[]ApplicationGatewayPathRule `json:"pathRules,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` + DefaultBackendAddressPool *SubResource `json:"defaultBackendAddressPool,omitempty"` + DefaultBackendHTTPSettings *SubResource `json:"defaultBackendHttpSettings,omitempty"` + DefaultRedirectConfiguration *SubResource `json:"defaultRedirectConfiguration,omitempty"` + PathRules *[]ApplicationGatewayPathRule `json:"pathRules,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` } -// ApplicationGatewayWebApplicationFirewallConfiguration is application gateway -// web application firewall configuration. +// ApplicationGatewayWebApplicationFirewallConfiguration is application gateway web application firewall configuration. type ApplicationGatewayWebApplicationFirewallConfiguration struct { Enabled *bool `json:"enabled,omitempty"` FirewallMode ApplicationGatewayFirewallMode `json:"firewallMode,omitempty"` @@ -1244,8 +1427,45 @@ type ApplicationGatewayWebApplicationFirewallConfiguration struct { DisabledRuleGroups *[]ApplicationGatewayFirewallDisabledRuleGroup `json:"disabledRuleGroups,omitempty"` } -// AuthorizationListResult is response for ListAuthorizations API service call -// retrieves all authorizations that belongs to an ExpressRouteCircuit. +// ApplicationSecurityGroup is an application security group in a resource group. +type ApplicationSecurityGroup struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *ApplicationSecurityGroupPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationSecurityGroupListResult is a list of application security groups. +type ApplicationSecurityGroupListResult struct { + autorest.Response `json:"-"` + Value *[]ApplicationSecurityGroup `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ApplicationSecurityGroupListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ApplicationSecurityGroupListResult) ApplicationSecurityGroupListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ApplicationSecurityGroupPropertiesFormat is application security group properties. +type ApplicationSecurityGroupPropertiesFormat struct { + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// AuthorizationListResult is response for ListAuthorizations API service call retrieves all authorizations that +// belongs to an ExpressRouteCircuit. type AuthorizationListResult struct { autorest.Response `json:"-"` Value *[]ExpressRouteCircuitAuthorization `json:"value,omitempty"` @@ -1271,29 +1491,97 @@ type AuthorizationPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// AzureAsyncOperationResult is the response body contains the status of the -// specified asynchronous operation, indicating whether it has succeeded, is in -// progress, or has failed. Note that this status is distinct from the HTTP -// status code returned for the Get Operation Status operation itself. If the -// asynchronous operation succeeded, the response body includes the HTTP status -// code for the successful request. If the asynchronous operation failed, the -// response body includes the HTTP status code for the failed request and error -// information regarding the failure. +// AvailableProvidersList is list of available countries with details. +type AvailableProvidersList struct { + autorest.Response `json:"-"` + Countries *[]AvailableProvidersListCountry `json:"countries,omitempty"` +} + +// AvailableProvidersListCity is city or town details. +type AvailableProvidersListCity struct { + CityName *string `json:"cityName,omitempty"` + Providers *[]string `json:"providers,omitempty"` +} + +// AvailableProvidersListCountry is country details. +type AvailableProvidersListCountry struct { + CountryName *string `json:"countryName,omitempty"` + Providers *[]string `json:"providers,omitempty"` + States *[]AvailableProvidersListState `json:"states,omitempty"` +} + +// AvailableProvidersListParameters is constraints that determine the list of available Internet service providers. +type AvailableProvidersListParameters struct { + AzureLocations *[]string `json:"azureLocations,omitempty"` + Country *string `json:"country,omitempty"` + State *string `json:"state,omitempty"` + City *string `json:"city,omitempty"` +} + +// AvailableProvidersListState is state details. +type AvailableProvidersListState struct { + StateName *string `json:"stateName,omitempty"` + Providers *[]string `json:"providers,omitempty"` + Cities *[]AvailableProvidersListCity `json:"cities,omitempty"` +} + +// AzureAsyncOperationResult is the response body contains the status of the specified asynchronous operation, +// indicating whether it has succeeded, is in progress, or has failed. Note that this status is distinct from the HTTP +// status code returned for the Get Operation Status operation itself. If the asynchronous operation succeeded, the +// response body includes the HTTP status code for the successful request. If the asynchronous operation failed, the +// response body includes the HTTP status code for the failed request and error information regarding the failure. type AzureAsyncOperationResult struct { Status OperationStatus `json:"status,omitempty"` Error *Error `json:"error,omitempty"` } +// AzureReachabilityReport is azure reachability report details. +type AzureReachabilityReport struct { + autorest.Response `json:"-"` + AggregationLevel *string `json:"aggregationLevel,omitempty"` + ProviderLocation *AzureReachabilityReportLocation `json:"providerLocation,omitempty"` + ReachabilityReport *[]AzureReachabilityReportItem `json:"reachabilityReport,omitempty"` +} + +// AzureReachabilityReportItem is azure reachability report details for a given provider location. +type AzureReachabilityReportItem struct { + Provider *string `json:"provider,omitempty"` + AzureLocation *string `json:"azureLocation,omitempty"` + Latencies *[]AzureReachabilityReportLatencyInfo `json:"latencies,omitempty"` +} + +// AzureReachabilityReportLatencyInfo is details on latency for a time series. +type AzureReachabilityReportLatencyInfo struct { + TimeStamp *date.Time `json:"timeStamp,omitempty"` + Score *int32 `json:"score,omitempty"` +} + +// AzureReachabilityReportLocation is parameters that define a geographic location. +type AzureReachabilityReportLocation struct { + Country *string `json:"country,omitempty"` + State *string `json:"state,omitempty"` + City *string `json:"city,omitempty"` +} + +// AzureReachabilityReportParameters is geographic and time constraints for Azure reachability report. +type AzureReachabilityReportParameters struct { + ProviderLocation *AzureReachabilityReportLocation `json:"providerLocation,omitempty"` + Providers *[]string `json:"providers,omitempty"` + AzureLocations *[]string `json:"azureLocations,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` +} + // BackendAddressPool is pool of backend IP addresses. type BackendAddressPool struct { + autorest.Response `json:"-"` ID *string `json:"id,omitempty"` *BackendAddressPoolPropertiesFormat `json:"properties,omitempty"` Name *string `json:"name,omitempty"` Etag *string `json:"etag,omitempty"` } -// BackendAddressPoolPropertiesFormat is properties of the backend address -// pool. +// BackendAddressPoolPropertiesFormat is properties of the backend address pool. type BackendAddressPoolPropertiesFormat struct { BackendIPConfigurations *[]InterfaceIPConfiguration `json:"backendIPConfigurations,omitempty"` LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` @@ -1301,16 +1589,17 @@ type BackendAddressPoolPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// BGPCommunity is contains bgp community information offered in Service -// Community resources. +// BGPCommunity is contains bgp community information offered in Service Community resources. type BGPCommunity struct { ServiceSupportedRegion *string `json:"serviceSupportedRegion,omitempty"` CommunityName *string `json:"communityName,omitempty"` CommunityValue *string `json:"communityValue,omitempty"` CommunityPrefixes *[]string `json:"communityPrefixes,omitempty"` + IsAuthorizedToUse *bool `json:"isAuthorizedToUse,omitempty"` + ServiceGroup *string `json:"serviceGroup,omitempty"` } -// BgpPeerStatus is bGP peer status details +// BgpPeerStatus is BGP peer status details type BgpPeerStatus struct { LocalAddress *string `json:"localAddress,omitempty"` Neighbor *string `json:"neighbor,omitempty"` @@ -1322,8 +1611,7 @@ type BgpPeerStatus struct { MessagesReceived *int64 `json:"messagesReceived,omitempty"` } -// BgpPeerStatusListResult is response for list BGP peer status API service -// call +// BgpPeerStatusListResult is response for list BGP peer status API service call type BgpPeerStatusListResult struct { autorest.Response `json:"-"` Value *[]BgpPeerStatus `json:"value,omitempty"` @@ -1339,8 +1627,7 @@ type BgpServiceCommunity struct { *BgpServiceCommunityPropertiesFormat `json:"properties,omitempty"` } -// BgpServiceCommunityListResult is response for the ListServiceCommunity API -// service call. +// BgpServiceCommunityListResult is response for the ListServiceCommunity API service call. type BgpServiceCommunityListResult struct { autorest.Response `json:"-"` Value *[]BgpServiceCommunity `json:"value,omitempty"` @@ -1365,7 +1652,7 @@ type BgpServiceCommunityPropertiesFormat struct { BgpCommunities *[]BGPCommunity `json:"bgpCommunities,omitempty"` } -// BgpSettings is bGP settings details +// BgpSettings is BGP settings details type BgpSettings struct { Asn *int64 `json:"asn,omitempty"` BgpPeeringAddress *string `json:"bgpPeeringAddress,omitempty"` @@ -1384,15 +1671,62 @@ type ConnectionSharedKey struct { Value *string `json:"value,omitempty"` } -// DhcpOptions is dhcpOptions contains an array of DNS servers available to VMs -// deployed in the virtual network. Standard DHCP option for a subnet overrides -// VNET DHCP options. +// ConnectivityDestination is parameters that define destination of connection. +type ConnectivityDestination struct { + ResourceID *string `json:"resourceId,omitempty"` + Address *string `json:"address,omitempty"` + Port *int32 `json:"port,omitempty"` +} + +// ConnectivityHop is information about a hop between the source and the destination. +type ConnectivityHop struct { + Type *string `json:"type,omitempty"` + ID *string `json:"id,omitempty"` + Address *string `json:"address,omitempty"` + ResourceID *string `json:"resourceId,omitempty"` + NextHopIds *[]string `json:"nextHopIds,omitempty"` + Issues *[]ConnectivityIssue `json:"issues,omitempty"` +} + +// ConnectivityInformation is information on the connectivity status. +type ConnectivityInformation struct { + autorest.Response `json:"-"` + Hops *[]ConnectivityHop `json:"hops,omitempty"` + ConnectionStatus ConnectionStatus `json:"connectionStatus,omitempty"` + AvgLatencyInMs *int32 `json:"avgLatencyInMs,omitempty"` + MinLatencyInMs *int32 `json:"minLatencyInMs,omitempty"` + MaxLatencyInMs *int32 `json:"maxLatencyInMs,omitempty"` + ProbesSent *int32 `json:"probesSent,omitempty"` + ProbesFailed *int32 `json:"probesFailed,omitempty"` +} + +// ConnectivityIssue is information about an issue encountered in the process of checking for connectivity. +type ConnectivityIssue struct { + Origin Origin `json:"origin,omitempty"` + Severity Severity `json:"severity,omitempty"` + Type IssueType `json:"type,omitempty"` + Context *[]map[string]*string `json:"context,omitempty"` +} + +// ConnectivityParameters is parameters that determine how the connectivity check will be performed. +type ConnectivityParameters struct { + Source *ConnectivitySource `json:"source,omitempty"` + Destination *ConnectivityDestination `json:"destination,omitempty"` +} + +// ConnectivitySource is parameters that define the source of the connection. +type ConnectivitySource struct { + ResourceID *string `json:"resourceId,omitempty"` + Port *int32 `json:"port,omitempty"` +} + +// DhcpOptions is dhcpOptions contains an array of DNS servers available to VMs deployed in the virtual network. +// Standard DHCP option for a subnet overrides VNET DHCP options. type DhcpOptions struct { DNSServers *[]string `json:"dnsServers,omitempty"` } -// DNSNameAvailabilityResult is response for the CheckDnsNameAvailability API -// service call. +// DNSNameAvailabilityResult is response for the CheckDnsNameAvailability API service call. type DNSNameAvailabilityResult struct { autorest.Response `json:"-"` Available *bool `json:"available,omitempty"` @@ -1403,17 +1737,16 @@ type EffectiveNetworkSecurityGroup struct { NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"` Association *EffectiveNetworkSecurityGroupAssociation `json:"association,omitempty"` EffectiveSecurityRules *[]EffectiveNetworkSecurityRule `json:"effectiveSecurityRules,omitempty"` + TagMap *map[string][]string `json:"tagMap,omitempty"` } -// EffectiveNetworkSecurityGroupAssociation is the effective network security -// group association. +// EffectiveNetworkSecurityGroupAssociation is the effective network security group association. type EffectiveNetworkSecurityGroupAssociation struct { Subnet *SubResource `json:"subnet,omitempty"` NetworkInterface *SubResource `json:"networkInterface,omitempty"` } -// EffectiveNetworkSecurityGroupListResult is response for list effective -// network security groups API service call. +// EffectiveNetworkSecurityGroupListResult is response for list effective network security groups API service call. type EffectiveNetworkSecurityGroupListResult struct { autorest.Response `json:"-"` Value *[]EffectiveNetworkSecurityGroup `json:"value,omitempty"` @@ -1422,17 +1755,21 @@ type EffectiveNetworkSecurityGroupListResult struct { // EffectiveNetworkSecurityRule is effective network security rules. type EffectiveNetworkSecurityRule struct { - Name *string `json:"name,omitempty"` - Protocol SecurityRuleProtocol `json:"protocol,omitempty"` - SourcePortRange *string `json:"sourcePortRange,omitempty"` - DestinationPortRange *string `json:"destinationPortRange,omitempty"` - SourceAddressPrefix *string `json:"sourceAddressPrefix,omitempty"` - DestinationAddressPrefix *string `json:"destinationAddressPrefix,omitempty"` - ExpandedSourceAddressPrefix *[]string `json:"expandedSourceAddressPrefix,omitempty"` - ExpandedDestinationAddressPrefix *[]string `json:"expandedDestinationAddressPrefix,omitempty"` - Access SecurityRuleAccess `json:"access,omitempty"` - Priority *int32 `json:"priority,omitempty"` - Direction SecurityRuleDirection `json:"direction,omitempty"` + Name *string `json:"name,omitempty"` + Protocol EffectiveSecurityRuleProtocol `json:"protocol,omitempty"` + SourcePortRange *string `json:"sourcePortRange,omitempty"` + DestinationPortRange *string `json:"destinationPortRange,omitempty"` + SourcePortRanges *[]string `json:"sourcePortRanges,omitempty"` + DestinationPortRanges *[]string `json:"destinationPortRanges,omitempty"` + SourceAddressPrefix *string `json:"sourceAddressPrefix,omitempty"` + DestinationAddressPrefix *string `json:"destinationAddressPrefix,omitempty"` + SourceAddressPrefixes *[]string `json:"sourceAddressPrefixes,omitempty"` + DestinationAddressPrefixes *[]string `json:"destinationAddressPrefixes,omitempty"` + ExpandedSourceAddressPrefix *[]string `json:"expandedSourceAddressPrefix,omitempty"` + ExpandedDestinationAddressPrefix *[]string `json:"expandedDestinationAddressPrefix,omitempty"` + Access SecurityRuleAccess `json:"access,omitempty"` + Priority *int32 `json:"priority,omitempty"` + Direction SecurityRuleDirection `json:"direction,omitempty"` } // EffectiveRoute is effective Route @@ -1445,14 +1782,39 @@ type EffectiveRoute struct { NextHopType RouteNextHopType `json:"nextHopType,omitempty"` } -// EffectiveRouteListResult is response for list effective route API service -// call. +// EffectiveRouteListResult is response for list effective route API service call. type EffectiveRouteListResult struct { autorest.Response `json:"-"` Value *[]EffectiveRoute `json:"value,omitempty"` NextLink *string `json:"nextLink,omitempty"` } +// EndpointServiceResult is endpoint service. +type EndpointServiceResult struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} + +// EndpointServicesListResult is response for the ListAvailableEndpointServices API service call. +type EndpointServicesListResult struct { + autorest.Response `json:"-"` + Value *[]EndpointServiceResult `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// EndpointServicesListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client EndpointServicesListResult) EndpointServicesListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + // Error is type Error struct { Code *string `json:"code,omitempty"` @@ -1482,8 +1844,7 @@ type ExpressRouteCircuit struct { Etag *string `json:"etag,omitempty"` } -// ExpressRouteCircuitArpTable is the ARP table associated with the -// ExpressRouteCircuit. +// ExpressRouteCircuitArpTable is the ARP table associated with the ExpressRouteCircuit. type ExpressRouteCircuitArpTable struct { Age *int32 `json:"age,omitempty"` Interface *string `json:"interface,omitempty"` @@ -1491,8 +1852,7 @@ type ExpressRouteCircuitArpTable struct { MacAddress *string `json:"macAddress,omitempty"` } -// ExpressRouteCircuitAuthorization is authorization in an ExpressRouteCircuit -// resource. +// ExpressRouteCircuitAuthorization is authorization in an ExpressRouteCircuit resource. type ExpressRouteCircuitAuthorization struct { autorest.Response `json:"-"` ID *string `json:"id,omitempty"` @@ -1501,8 +1861,7 @@ type ExpressRouteCircuitAuthorization struct { Etag *string `json:"etag,omitempty"` } -// ExpressRouteCircuitListResult is response for ListExpressRouteCircuit API -// service call. +// ExpressRouteCircuitListResult is response for ListExpressRouteCircuit API service call. type ExpressRouteCircuitListResult struct { autorest.Response `json:"-"` Value *[]ExpressRouteCircuit `json:"value,omitempty"` @@ -1533,13 +1892,15 @@ type ExpressRouteCircuitPeering struct { // ExpressRouteCircuitPeeringConfig is specifies the peering configuration. type ExpressRouteCircuitPeeringConfig struct { AdvertisedPublicPrefixes *[]string `json:"advertisedPublicPrefixes,omitempty"` + AdvertisedCommunities *[]string `json:"advertisedCommunities,omitempty"` AdvertisedPublicPrefixesState ExpressRouteCircuitPeeringAdvertisedPublicPrefixState `json:"advertisedPublicPrefixesState,omitempty"` + LegacyMode *int32 `json:"legacyMode,omitempty"` CustomerASN *int32 `json:"customerASN,omitempty"` RoutingRegistryName *string `json:"routingRegistryName,omitempty"` } -// ExpressRouteCircuitPeeringListResult is response for ListPeering API service -// call retrieves all peerings that belong to an ExpressRouteCircuit. +// ExpressRouteCircuitPeeringListResult is response for ListPeering API service call retrieves all peerings that belong +// to an ExpressRouteCircuit. type ExpressRouteCircuitPeeringListResult struct { autorest.Response `json:"-"` Value *[]ExpressRouteCircuitPeering `json:"value,omitempty"` @@ -1560,22 +1921,23 @@ func (client ExpressRouteCircuitPeeringListResult) ExpressRouteCircuitPeeringLis // ExpressRouteCircuitPeeringPropertiesFormat is type ExpressRouteCircuitPeeringPropertiesFormat struct { - PeeringType ExpressRouteCircuitPeeringType `json:"peeringType,omitempty"` - State ExpressRouteCircuitPeeringState `json:"state,omitempty"` - AzureASN *int32 `json:"azureASN,omitempty"` - PeerASN *int32 `json:"peerASN,omitempty"` - PrimaryPeerAddressPrefix *string `json:"primaryPeerAddressPrefix,omitempty"` - SecondaryPeerAddressPrefix *string `json:"secondaryPeerAddressPrefix,omitempty"` - PrimaryAzurePort *string `json:"primaryAzurePort,omitempty"` - SecondaryAzurePort *string `json:"secondaryAzurePort,omitempty"` - SharedKey *string `json:"sharedKey,omitempty"` - VlanID *int32 `json:"vlanId,omitempty"` - MicrosoftPeeringConfig *ExpressRouteCircuitPeeringConfig `json:"microsoftPeeringConfig,omitempty"` - Stats *ExpressRouteCircuitStats `json:"stats,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` - GatewayManagerEtag *string `json:"gatewayManagerEtag,omitempty"` - LastModifiedBy *string `json:"lastModifiedBy,omitempty"` - RouteFilter *RouteFilter `json:"routeFilter,omitempty"` + PeeringType ExpressRouteCircuitPeeringType `json:"peeringType,omitempty"` + State ExpressRouteCircuitPeeringState `json:"state,omitempty"` + AzureASN *int32 `json:"azureASN,omitempty"` + PeerASN *int32 `json:"peerASN,omitempty"` + PrimaryPeerAddressPrefix *string `json:"primaryPeerAddressPrefix,omitempty"` + SecondaryPeerAddressPrefix *string `json:"secondaryPeerAddressPrefix,omitempty"` + PrimaryAzurePort *string `json:"primaryAzurePort,omitempty"` + SecondaryAzurePort *string `json:"secondaryAzurePort,omitempty"` + SharedKey *string `json:"sharedKey,omitempty"` + VlanID *int32 `json:"vlanId,omitempty"` + MicrosoftPeeringConfig *ExpressRouteCircuitPeeringConfig `json:"microsoftPeeringConfig,omitempty"` + Stats *ExpressRouteCircuitStats `json:"stats,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + GatewayManagerEtag *string `json:"gatewayManagerEtag,omitempty"` + LastModifiedBy *string `json:"lastModifiedBy,omitempty"` + RouteFilter *RouteFilter `json:"routeFilter,omitempty"` + Ipv6PeeringConfig *Ipv6ExpressRouteCircuitPeeringConfig `json:"ipv6PeeringConfig,omitempty"` } // ExpressRouteCircuitPropertiesFormat is properties of ExpressRouteCircuit. @@ -1592,8 +1954,7 @@ type ExpressRouteCircuitPropertiesFormat struct { GatewayManagerEtag *string `json:"gatewayManagerEtag,omitempty"` } -// ExpressRouteCircuitRoutesTable is the routes table associated with the -// ExpressRouteCircuit +// ExpressRouteCircuitRoutesTable is the routes table associated with the ExpressRouteCircuit type ExpressRouteCircuitRoutesTable struct { NetworkProperty *string `json:"network,omitempty"` NextHop *string `json:"nextHop,omitempty"` @@ -1602,8 +1963,7 @@ type ExpressRouteCircuitRoutesTable struct { Path *string `json:"path,omitempty"` } -// ExpressRouteCircuitRoutesTableSummary is the routes table associated with -// the ExpressRouteCircuit. +// ExpressRouteCircuitRoutesTableSummary is the routes table associated with the ExpressRouteCircuit. type ExpressRouteCircuitRoutesTableSummary struct { Neighbor *string `json:"neighbor,omitempty"` V *int32 `json:"v,omitempty"` @@ -1612,16 +1972,14 @@ type ExpressRouteCircuitRoutesTableSummary struct { StatePfxRcd *string `json:"statePfxRcd,omitempty"` } -// ExpressRouteCircuitsArpTableListResult is response for ListArpTable -// associated with the Express Route Circuits API. +// ExpressRouteCircuitsArpTableListResult is response for ListArpTable associated with the Express Route Circuits API. type ExpressRouteCircuitsArpTableListResult struct { autorest.Response `json:"-"` Value *[]ExpressRouteCircuitArpTable `json:"value,omitempty"` NextLink *string `json:"nextLink,omitempty"` } -// ExpressRouteCircuitServiceProviderProperties is contains -// ServiceProviderProperties in an ExpressRouteCircuit. +// ExpressRouteCircuitServiceProviderProperties is contains ServiceProviderProperties in an ExpressRouteCircuit. type ExpressRouteCircuitServiceProviderProperties struct { ServiceProviderName *string `json:"serviceProviderName,omitempty"` PeeringLocation *string `json:"peeringLocation,omitempty"` @@ -1635,16 +1993,16 @@ type ExpressRouteCircuitSku struct { Family ExpressRouteCircuitSkuFamily `json:"family,omitempty"` } -// ExpressRouteCircuitsRoutesTableListResult is response for ListRoutesTable -// associated with the Express Route Circuits API. +// ExpressRouteCircuitsRoutesTableListResult is response for ListRoutesTable associated with the Express Route Circuits +// API. type ExpressRouteCircuitsRoutesTableListResult struct { autorest.Response `json:"-"` Value *[]ExpressRouteCircuitRoutesTable `json:"value,omitempty"` NextLink *string `json:"nextLink,omitempty"` } -// ExpressRouteCircuitsRoutesTableSummaryListResult is response for -// ListRoutesTable associated with the Express Route Circuits API. +// ExpressRouteCircuitsRoutesTableSummaryListResult is response for ListRoutesTable associated with the Express Route +// Circuits API. type ExpressRouteCircuitsRoutesTableSummaryListResult struct { autorest.Response `json:"-"` Value *[]ExpressRouteCircuitRoutesTableSummary `json:"value,omitempty"` @@ -1670,15 +2028,14 @@ type ExpressRouteServiceProvider struct { *ExpressRouteServiceProviderPropertiesFormat `json:"properties,omitempty"` } -// ExpressRouteServiceProviderBandwidthsOffered is contains bandwidths offered -// in ExpressRouteServiceProvider resources. +// ExpressRouteServiceProviderBandwidthsOffered is contains bandwidths offered in ExpressRouteServiceProvider +// resources. type ExpressRouteServiceProviderBandwidthsOffered struct { OfferName *string `json:"offerName,omitempty"` ValueInMbps *int32 `json:"valueInMbps,omitempty"` } -// ExpressRouteServiceProviderListResult is response for the -// ListExpressRouteServiceProvider API service call. +// ExpressRouteServiceProviderListResult is response for the ListExpressRouteServiceProvider API service call. type ExpressRouteServiceProviderListResult struct { autorest.Response `json:"-"` Value *[]ExpressRouteServiceProvider `json:"value,omitempty"` @@ -1697,8 +2054,7 @@ func (client ExpressRouteServiceProviderListResult) ExpressRouteServiceProviderL autorest.WithBaseURL(to.String(client.NextLink))) } -// ExpressRouteServiceProviderPropertiesFormat is properties of -// ExpressRouteServiceProvider. +// ExpressRouteServiceProviderPropertiesFormat is properties of ExpressRouteServiceProvider. type ExpressRouteServiceProviderPropertiesFormat struct { PeeringLocations *[]string `json:"peeringLocations,omitempty"` BandwidthsOffered *[]ExpressRouteServiceProviderBandwidthsOffered `json:"bandwidthsOffered,omitempty"` @@ -1719,22 +2075,22 @@ type FlowLogProperties struct { RetentionPolicy *RetentionPolicyParameters `json:"retentionPolicy,omitempty"` } -// FlowLogStatusParameters is parameters that define a resource to query flow -// log status. +// FlowLogStatusParameters is parameters that define a resource to query flow log status. type FlowLogStatusParameters struct { TargetResourceID *string `json:"targetResourceId,omitempty"` } // FrontendIPConfiguration is frontend IP address of the load balancer. type FrontendIPConfiguration struct { + autorest.Response `json:"-"` ID *string `json:"id,omitempty"` *FrontendIPConfigurationPropertiesFormat `json:"properties,omitempty"` - Name *string `json:"name,omitempty"` - Etag *string `json:"etag,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` + Zones *[]string `json:"zones,omitempty"` } -// FrontendIPConfigurationPropertiesFormat is properties of Frontend IP -// Configuration of the load balancer. +// FrontendIPConfigurationPropertiesFormat is properties of Frontend IP Configuration of the load balancer. type FrontendIPConfigurationPropertiesFormat struct { InboundNatRules *[]SubResource `json:"inboundNatRules,omitempty"` InboundNatPools *[]SubResource `json:"inboundNatPools,omitempty"` @@ -1784,12 +2140,32 @@ type InboundNatPoolPropertiesFormat struct { // InboundNatRule is inbound NAT rule of the load balancer. type InboundNatRule struct { + autorest.Response `json:"-"` ID *string `json:"id,omitempty"` *InboundNatRulePropertiesFormat `json:"properties,omitempty"` Name *string `json:"name,omitempty"` Etag *string `json:"etag,omitempty"` } +// InboundNatRuleListResult is response for ListInboundNatRule API service call. +type InboundNatRuleListResult struct { + autorest.Response `json:"-"` + Value *[]InboundNatRule `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// InboundNatRuleListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client InboundNatRuleListResult) InboundNatRuleListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + // InboundNatRulePropertiesFormat is properties of the inbound NAT rule. type InboundNatRulePropertiesFormat struct { FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` @@ -1820,7 +2196,7 @@ type InterfaceAssociation struct { SecurityRules *[]SecurityRule `json:"securityRules,omitempty"` } -// InterfaceDNSSettings is dNS settings of a network interface. +// InterfaceDNSSettings is DNS settings of a network interface. type InterfaceDNSSettings struct { DNSServers *[]string `json:"dnsServers,omitempty"` AppliedDNSServers *[]string `json:"appliedDnsServers,omitempty"` @@ -1831,12 +2207,32 @@ type InterfaceDNSSettings struct { // InterfaceIPConfiguration is iPConfiguration in a network interface. type InterfaceIPConfiguration struct { + autorest.Response `json:"-"` ID *string `json:"id,omitempty"` *InterfaceIPConfigurationPropertiesFormat `json:"properties,omitempty"` Name *string `json:"name,omitempty"` Etag *string `json:"etag,omitempty"` } +// InterfaceIPConfigurationListResult is response for list ip configurations API service call. +type InterfaceIPConfigurationListResult struct { + autorest.Response `json:"-"` + Value *[]InterfaceIPConfiguration `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// InterfaceIPConfigurationListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client InterfaceIPConfigurationListResult) InterfaceIPConfigurationListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + // InterfaceIPConfigurationPropertiesFormat is properties of IP configuration. type InterfaceIPConfigurationPropertiesFormat struct { ApplicationGatewayBackendAddressPools *[]ApplicationGatewayBackendAddressPool `json:"applicationGatewayBackendAddressPools,omitempty"` @@ -1848,11 +2244,11 @@ type InterfaceIPConfigurationPropertiesFormat struct { Subnet *Subnet `json:"subnet,omitempty"` Primary *bool `json:"primary,omitempty"` PublicIPAddress *PublicIPAddress `json:"publicIPAddress,omitempty"` + ApplicationSecurityGroups *[]ApplicationSecurityGroup `json:"applicationSecurityGroups,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } -// InterfaceListResult is response for the ListNetworkInterface API service -// call. +// InterfaceListResult is response for the ListNetworkInterface API service call. type InterfaceListResult struct { autorest.Response `json:"-"` Value *[]Interface `json:"value,omitempty"` @@ -1871,6 +2267,25 @@ func (client InterfaceListResult) InterfaceListResultPreparer() (*http.Request, autorest.WithBaseURL(to.String(client.NextLink))) } +// InterfaceLoadBalancerListResult is response for list ip configurations API service call. +type InterfaceLoadBalancerListResult struct { + autorest.Response `json:"-"` + Value *[]LoadBalancer `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// InterfaceLoadBalancerListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client InterfaceLoadBalancerListResult) InterfaceLoadBalancerListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + // InterfacePropertiesFormat is networkInterface properties. type InterfacePropertiesFormat struct { VirtualMachine *SubResource `json:"virtualMachine,omitempty"` @@ -1885,15 +2300,14 @@ type InterfacePropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// IPAddressAvailabilityResult is response for CheckIPAddressAvailability API -// service call +// IPAddressAvailabilityResult is response for CheckIPAddressAvailability API service call type IPAddressAvailabilityResult struct { autorest.Response `json:"-"` Available *bool `json:"available,omitempty"` AvailableIPAddresses *[]string `json:"availableIPAddresses,omitempty"` } -// IPConfiguration is iPConfiguration +// IPConfiguration is IP configuration type IPConfiguration struct { ID *string `json:"id,omitempty"` *IPConfigurationPropertiesFormat `json:"properties,omitempty"` @@ -1910,8 +2324,7 @@ type IPConfigurationPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// IpsecPolicy is an IPSec Policy configuration for a virtual network gateway -// connection +// IpsecPolicy is an IPSec Policy configuration for a virtual network gateway connection type IpsecPolicy struct { SaLifeTimeSeconds *int32 `json:"saLifeTimeSeconds,omitempty"` SaDataSizeKilobytes *int32 `json:"saDataSizeKilobytes,omitempty"` @@ -1923,6 +2336,15 @@ type IpsecPolicy struct { PfsGroup PfsGroup `json:"pfsGroup,omitempty"` } +// Ipv6ExpressRouteCircuitPeeringConfig is contains IPv6 peering config. +type Ipv6ExpressRouteCircuitPeeringConfig struct { + PrimaryPeerAddressPrefix *string `json:"primaryPeerAddressPrefix,omitempty"` + SecondaryPeerAddressPrefix *string `json:"secondaryPeerAddressPrefix,omitempty"` + MicrosoftPeeringConfig *ExpressRouteCircuitPeeringConfig `json:"microsoftPeeringConfig,omitempty"` + RouteFilter *RouteFilter `json:"routeFilter,omitempty"` + State ExpressRouteCircuitPeeringState `json:"state,omitempty"` +} + // LoadBalancer is loadBalancer resource type LoadBalancer struct { autorest.Response `json:"-"` @@ -1931,10 +2353,49 @@ type LoadBalancer struct { Type *string `json:"type,omitempty"` Location *string `json:"location,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` + Sku *LoadBalancerSku `json:"sku,omitempty"` *LoadBalancerPropertiesFormat `json:"properties,omitempty"` Etag *string `json:"etag,omitempty"` } +// LoadBalancerBackendAddressPoolListResult is response for ListBackendAddressPool API service call. +type LoadBalancerBackendAddressPoolListResult struct { + autorest.Response `json:"-"` + Value *[]BackendAddressPool `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// LoadBalancerBackendAddressPoolListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client LoadBalancerBackendAddressPoolListResult) LoadBalancerBackendAddressPoolListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// LoadBalancerFrontendIPConfigurationListResult is response for ListFrontendIPConfiguration API service call. +type LoadBalancerFrontendIPConfigurationListResult struct { + autorest.Response `json:"-"` + Value *[]FrontendIPConfiguration `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// LoadBalancerFrontendIPConfigurationListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client LoadBalancerFrontendIPConfigurationListResult) LoadBalancerFrontendIPConfigurationListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + // LoadBalancerListResult is response for ListLoadBalancers API service call. type LoadBalancerListResult struct { autorest.Response `json:"-"` @@ -1954,6 +2415,44 @@ func (client LoadBalancerListResult) LoadBalancerListResultPreparer() (*http.Req autorest.WithBaseURL(to.String(client.NextLink))) } +// LoadBalancerLoadBalancingRuleListResult is response for ListLoadBalancingRule API service call. +type LoadBalancerLoadBalancingRuleListResult struct { + autorest.Response `json:"-"` + Value *[]LoadBalancingRule `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// LoadBalancerLoadBalancingRuleListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client LoadBalancerLoadBalancingRuleListResult) LoadBalancerLoadBalancingRuleListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// LoadBalancerProbeListResult is response for ListProbe API service call. +type LoadBalancerProbeListResult struct { + autorest.Response `json:"-"` + Value *[]Probe `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// LoadBalancerProbeListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client LoadBalancerProbeListResult) LoadBalancerProbeListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + // LoadBalancerPropertiesFormat is properties of the load balancer. type LoadBalancerPropertiesFormat struct { FrontendIPConfigurations *[]FrontendIPConfiguration `json:"frontendIPConfigurations,omitempty"` @@ -1967,8 +2466,14 @@ type LoadBalancerPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// LoadBalancingRule is a loag balancing rule for a load balancer. +// LoadBalancerSku is SKU of a load balancer +type LoadBalancerSku struct { + Name LoadBalancerSkuName `json:"name,omitempty"` +} + +// LoadBalancingRule is a load balancing rule for a load balancer. type LoadBalancingRule struct { + autorest.Response `json:"-"` ID *string `json:"id,omitempty"` *LoadBalancingRulePropertiesFormat `json:"properties,omitempty"` Name *string `json:"name,omitempty"` @@ -1986,6 +2491,7 @@ type LoadBalancingRulePropertiesFormat struct { BackendPort *int32 `json:"backendPort,omitempty"` IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` EnableFloatingIP *bool `json:"enableFloatingIP,omitempty"` + DisableOutboundSnat *bool `json:"disableOutboundSnat,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } @@ -2001,8 +2507,7 @@ type LocalNetworkGateway struct { Etag *string `json:"etag,omitempty"` } -// LocalNetworkGatewayListResult is response for ListLocalNetworkGateways API -// service call. +// LocalNetworkGatewayListResult is response for ListLocalNetworkGateways API service call. type LocalNetworkGatewayListResult struct { autorest.Response `json:"-"` Value *[]LocalNetworkGateway `json:"value,omitempty"` @@ -2030,8 +2535,7 @@ type LocalNetworkGatewayPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// NextHopParameters is parameters that define the source and destination -// endpoint. +// NextHopParameters is parameters that define the source and destination endpoint. type NextHopParameters struct { TargetResourceID *string `json:"targetResourceId,omitempty"` SourceIPAddress *string `json:"sourceIPAddress,omitempty"` @@ -2068,8 +2572,7 @@ type PacketCapture struct { *PacketCaptureParameters `json:"properties,omitempty"` } -// PacketCaptureFilter is filter that is applied to packet capture request. -// Multiple filters can be applied. +// PacketCaptureFilter is filter that is applied to packet capture request. Multiple filters can be applied. type PacketCaptureFilter struct { Protocol PcProtocol `json:"protocol,omitempty"` LocalIPAddress *string `json:"localIPAddress,omitempty"` @@ -2084,8 +2587,7 @@ type PacketCaptureListResult struct { Value *[]PacketCaptureResult `json:"value,omitempty"` } -// PacketCaptureParameters is parameters that define the create packet capture -// operation. +// PacketCaptureParameters is parameters that define the create packet capture operation. type PacketCaptureParameters struct { Target *string `json:"target,omitempty"` BytesToCapturePerPacket *int32 `json:"bytesToCapturePerPacket,omitempty"` @@ -2115,8 +2617,7 @@ type PacketCaptureResult struct { *PacketCaptureResultProperties `json:"properties,omitempty"` } -// PacketCaptureResultProperties is describes the properties of a packet -// capture session. +// PacketCaptureResultProperties is describes the properties of a packet capture session. type PacketCaptureResultProperties struct { Target *string `json:"target,omitempty"` BytesToCapturePerPacket *int32 `json:"bytesToCapturePerPacket,omitempty"` @@ -2127,8 +2628,7 @@ type PacketCaptureResultProperties struct { ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` } -// PacketCaptureStorageLocation is describes the storage location for a packet -// capture session. +// PacketCaptureStorageLocation is describes the storage location for a packet capture session. type PacketCaptureStorageLocation struct { StorageID *string `json:"storageId,omitempty"` StoragePath *string `json:"storagePath,omitempty"` @@ -2156,13 +2656,14 @@ type PatchRouteFilterRule struct { // Probe is a load balancer probe. type Probe struct { + autorest.Response `json:"-"` ID *string `json:"id,omitempty"` *ProbePropertiesFormat `json:"properties,omitempty"` Name *string `json:"name,omitempty"` Etag *string `json:"etag,omitempty"` } -// ProbePropertiesFormat is +// ProbePropertiesFormat is load balancer probe resource. type ProbePropertiesFormat struct { LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` Protocol ProbeProtocol `json:"protocol,omitempty"` @@ -2181,20 +2682,20 @@ type PublicIPAddress struct { Type *string `json:"type,omitempty"` Location *string `json:"location,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` + Sku *PublicIPAddressSku `json:"sku,omitempty"` *PublicIPAddressPropertiesFormat `json:"properties,omitempty"` - Etag *string `json:"etag,omitempty"` + Etag *string `json:"etag,omitempty"` + Zones *[]string `json:"zones,omitempty"` } -// PublicIPAddressDNSSettings is contains FQDN of the DNS record associated -// with the public IP address +// PublicIPAddressDNSSettings is contains FQDN of the DNS record associated with the public IP address type PublicIPAddressDNSSettings struct { DomainNameLabel *string `json:"domainNameLabel,omitempty"` Fqdn *string `json:"fqdn,omitempty"` ReverseFqdn *string `json:"reverseFqdn,omitempty"` } -// PublicIPAddressListResult is response for ListPublicIpAddresses API service -// call. +// PublicIPAddressListResult is response for ListPublicIpAddresses API service call. type PublicIPAddressListResult struct { autorest.Response `json:"-"` Value *[]PublicIPAddress `json:"value,omitempty"` @@ -2225,13 +2726,17 @@ type PublicIPAddressPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// QueryTroubleshootingParameters is parameters that define the resource to -// query the troubleshooting result. +// PublicIPAddressSku is SKU of a public IP address +type PublicIPAddressSku struct { + Name PublicIPAddressSkuName `json:"name,omitempty"` +} + +// QueryTroubleshootingParameters is parameters that define the resource to query the troubleshooting result. type QueryTroubleshootingParameters struct { TargetResourceID *string `json:"targetResourceId,omitempty"` } -// Resource is +// Resource is common resource representation. type Resource struct { ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` @@ -2255,8 +2760,7 @@ type ResourceNavigationLinkFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// RetentionPolicyParameters is parameters that define the retention policy for -// flow log. +// RetentionPolicyParameters is parameters that define the retention policy for flow log. type RetentionPolicyParameters struct { Days *int32 `json:"days,omitempty"` Enabled *bool `json:"enabled,omitempty"` @@ -2320,8 +2824,7 @@ type RouteFilterRule struct { Tags *map[string]*string `json:"tags,omitempty"` } -// RouteFilterRuleListResult is response for the ListRouteFilterRules API -// service call +// RouteFilterRuleListResult is response for the ListRouteFilterRules API service call type RouteFilterRuleListResult struct { autorest.Response `json:"-"` Value *[]RouteFilterRule `json:"value,omitempty"` @@ -2425,8 +2928,7 @@ type SecurityGroup struct { Etag *string `json:"etag,omitempty"` } -// SecurityGroupListResult is response for ListNetworkSecurityGroups API -// service call. +// SecurityGroupListResult is response for ListNetworkSecurityGroups API service call. type SecurityGroupListResult struct { autorest.Response `json:"-"` Value *[]SecurityGroup `json:"value,omitempty"` @@ -2445,8 +2947,7 @@ func (client SecurityGroupListResult) SecurityGroupListResultPreparer() (*http.R autorest.WithBaseURL(to.String(client.NextLink))) } -// SecurityGroupNetworkInterface is network interface and all its associated -// security rules. +// SecurityGroupNetworkInterface is network interface and all its associated security rules. type SecurityGroupNetworkInterface struct { ID *string `json:"id,omitempty"` SecurityRuleAssociations *SecurityRuleAssociations `json:"securityRuleAssociations,omitempty"` @@ -2462,14 +2963,12 @@ type SecurityGroupPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// SecurityGroupViewParameters is parameters that define the VM to check -// security groups for. +// SecurityGroupViewParameters is parameters that define the VM to check security groups for. type SecurityGroupViewParameters struct { TargetResourceID *string `json:"targetResourceId,omitempty"` } -// SecurityGroupViewResult is the information about security rules applied to -// the specified VM. +// SecurityGroupViewResult is the information about security rules applied to the specified VM. type SecurityGroupViewResult struct { autorest.Response `json:"-"` NetworkInterfaces *[]SecurityGroupNetworkInterface `json:"networkInterfaces,omitempty"` @@ -2484,8 +2983,7 @@ type SecurityRule struct { Etag *string `json:"etag,omitempty"` } -// SecurityRuleAssociations is all security rules associated with the network -// interface. +// SecurityRuleAssociations is all security rules associated with the network interface. type SecurityRuleAssociations struct { NetworkInterfaceAssociation *InterfaceAssociation `json:"networkInterfaceAssociation,omitempty"` SubnetAssociation *SubnetAssociation `json:"subnetAssociation,omitempty"` @@ -2493,8 +2991,8 @@ type SecurityRuleAssociations struct { EffectiveSecurityRules *[]EffectiveNetworkSecurityRule `json:"effectiveSecurityRules,omitempty"` } -// SecurityRuleListResult is response for ListSecurityRule API service call. -// Retrieves all security rules that belongs to a network security group. +// SecurityRuleListResult is response for ListSecurityRule API service call. Retrieves all security rules that belongs +// to a network security group. type SecurityRuleListResult struct { autorest.Response `json:"-"` Value *[]SecurityRule `json:"value,omitempty"` @@ -2513,18 +3011,31 @@ func (client SecurityRuleListResult) SecurityRuleListResultPreparer() (*http.Req autorest.WithBaseURL(to.String(client.NextLink))) } -// SecurityRulePropertiesFormat is +// SecurityRulePropertiesFormat is security rule resource. type SecurityRulePropertiesFormat struct { - Description *string `json:"description,omitempty"` - Protocol SecurityRuleProtocol `json:"protocol,omitempty"` - SourcePortRange *string `json:"sourcePortRange,omitempty"` - DestinationPortRange *string `json:"destinationPortRange,omitempty"` - SourceAddressPrefix *string `json:"sourceAddressPrefix,omitempty"` - DestinationAddressPrefix *string `json:"destinationAddressPrefix,omitempty"` - Access SecurityRuleAccess `json:"access,omitempty"` - Priority *int32 `json:"priority,omitempty"` - Direction SecurityRuleDirection `json:"direction,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` + Description *string `json:"description,omitempty"` + Protocol SecurityRuleProtocol `json:"protocol,omitempty"` + SourcePortRange *string `json:"sourcePortRange,omitempty"` + DestinationPortRange *string `json:"destinationPortRange,omitempty"` + SourceAddressPrefix *string `json:"sourceAddressPrefix,omitempty"` + SourceAddressPrefixes *[]string `json:"sourceAddressPrefixes,omitempty"` + SourceApplicationSecurityGroups *[]ApplicationSecurityGroup `json:"sourceApplicationSecurityGroups,omitempty"` + DestinationAddressPrefix *string `json:"destinationAddressPrefix,omitempty"` + DestinationAddressPrefixes *[]string `json:"destinationAddressPrefixes,omitempty"` + DestinationApplicationSecurityGroups *[]ApplicationSecurityGroup `json:"destinationApplicationSecurityGroups,omitempty"` + SourcePortRanges *[]string `json:"sourcePortRanges,omitempty"` + DestinationPortRanges *[]string `json:"destinationPortRanges,omitempty"` + Access SecurityRuleAccess `json:"access,omitempty"` + Priority *int32 `json:"priority,omitempty"` + Direction SecurityRuleDirection `json:"direction,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ServiceEndpointPropertiesFormat is the service endpoint properties. +type ServiceEndpointPropertiesFormat struct { + Service *string `json:"service,omitempty"` + Locations *[]string `json:"locations,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` } // String is @@ -2548,8 +3059,7 @@ type SubnetAssociation struct { SecurityRules *[]SecurityRule `json:"securityRules,omitempty"` } -// SubnetListResult is response for ListSubnets API service callRetrieves all -// subnet that belongs to a virtual network +// SubnetListResult is response for ListSubnets API service callRetrieves all subnet that belongs to a virtual network type SubnetListResult struct { autorest.Response `json:"-"` Value *[]Subnet `json:"value,omitempty"` @@ -2568,17 +3078,18 @@ func (client SubnetListResult) SubnetListResultPreparer() (*http.Request, error) autorest.WithBaseURL(to.String(client.NextLink))) } -// SubnetPropertiesFormat is +// SubnetPropertiesFormat is properties of the subnet. type SubnetPropertiesFormat struct { - AddressPrefix *string `json:"addressPrefix,omitempty"` - NetworkSecurityGroup *SecurityGroup `json:"networkSecurityGroup,omitempty"` - RouteTable *RouteTable `json:"routeTable,omitempty"` - IPConfigurations *[]IPConfiguration `json:"ipConfigurations,omitempty"` - ResourceNavigationLinks *[]ResourceNavigationLink `json:"resourceNavigationLinks,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` + AddressPrefix *string `json:"addressPrefix,omitempty"` + NetworkSecurityGroup *SecurityGroup `json:"networkSecurityGroup,omitempty"` + RouteTable *RouteTable `json:"routeTable,omitempty"` + ServiceEndpoints *[]ServiceEndpointPropertiesFormat `json:"serviceEndpoints,omitempty"` + IPConfigurations *[]IPConfiguration `json:"ipConfigurations,omitempty"` + ResourceNavigationLinks *[]ResourceNavigationLink `json:"resourceNavigationLinks,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` } -// SubResource is +// SubResource is reference to another subresource. type SubResource struct { ID *string `json:"id,omitempty"` } @@ -2592,8 +3103,7 @@ type Topology struct { Resources *[]TopologyResource `json:"resources,omitempty"` } -// TopologyAssociation is resources that have an association with the parent -// resource. +// TopologyAssociation is resources that have an association with the parent resource. type TopologyAssociation struct { Name *string `json:"name,omitempty"` ResourceID *string `json:"resourceId,omitempty"` @@ -2605,8 +3115,7 @@ type TopologyParameters struct { TargetResourceGroupName *string `json:"targetResourceGroupName,omitempty"` } -// TopologyResource is the network resource topology information for the given -// resource group. +// TopologyResource is the network resource topology information for the given resource group. type TopologyResource struct { Name *string `json:"name,omitempty"` ID *string `json:"id,omitempty"` @@ -2614,8 +3123,7 @@ type TopologyResource struct { Associations *[]TopologyAssociation `json:"associations,omitempty"` } -// TroubleshootingDetails is information gained from troubleshooting of -// specified resource. +// TroubleshootingDetails is information gained from troubleshooting of specified resource. type TroubleshootingDetails struct { ID *string `json:"id,omitempty"` ReasonType *string `json:"reasonType,omitempty"` @@ -2624,8 +3132,7 @@ type TroubleshootingDetails struct { RecommendedActions *[]TroubleshootingRecommendedActions `json:"recommendedActions,omitempty"` } -// TroubleshootingParameters is parameters that define the resource to -// troubleshoot. +// TroubleshootingParameters is parameters that define the resource to troubleshoot. type TroubleshootingParameters struct { TargetResourceID *string `json:"targetResourceId,omitempty"` *TroubleshootingProperties `json:"properties,omitempty"` @@ -2637,8 +3144,7 @@ type TroubleshootingProperties struct { StoragePath *string `json:"storagePath,omitempty"` } -// TroubleshootingRecommendedActions is recommended actions based on discovered -// issues. +// TroubleshootingRecommendedActions is recommended actions based on discovered issues. type TroubleshootingRecommendedActions struct { ActionID *string `json:"actionId,omitempty"` ActionText *string `json:"actionText,omitempty"` @@ -2646,8 +3152,7 @@ type TroubleshootingRecommendedActions struct { ActionURIText *string `json:"actionUriText,omitempty"` } -// TroubleshootingResult is troubleshooting information gained from specified -// resource. +// TroubleshootingResult is troubleshooting information gained from specified resource. type TroubleshootingResult struct { autorest.Response `json:"-"` StartTime *date.Time `json:"startTime,omitempty"` @@ -2667,6 +3172,7 @@ type TunnelConnectionHealth struct { // Usage is describes network resource usage. type Usage struct { + ID *string `json:"id,omitempty"` Unit *string `json:"unit,omitempty"` CurrentValue *int64 `json:"currentValue,omitempty"` Limit *int64 `json:"limit,omitempty"` @@ -2698,8 +3204,7 @@ func (client UsagesListResult) UsagesListResultPreparer() (*http.Request, error) autorest.WithBaseURL(to.String(client.NextLink))) } -// VerificationIPFlowParameters is parameters that define the IP flow to be -// verified. +// VerificationIPFlowParameters is parameters that define the IP flow to be verified. type VerificationIPFlowParameters struct { TargetResourceID *string `json:"targetResourceId,omitempty"` Direction Direction `json:"direction,omitempty"` @@ -2711,8 +3216,7 @@ type VerificationIPFlowParameters struct { TargetNicResourceID *string `json:"targetNicResourceId,omitempty"` } -// VerificationIPFlowResult is results of IP flow verification on the target -// resource. +// VerificationIPFlowResult is results of IP flow verification on the target resource. type VerificationIPFlowResult struct { autorest.Response `json:"-"` Access Access `json:"access,omitempty"` @@ -2731,6 +3235,11 @@ type VirtualNetwork struct { Etag *string `json:"etag,omitempty"` } +// VirtualNetworkConnectionGatewayReference is a reference to VirtualNetworkGateway or LocalNetworkGateway resource. +type VirtualNetworkConnectionGatewayReference struct { + ID *string `json:"id,omitempty"` +} + // VirtualNetworkGateway is a common class for general resource information type VirtualNetworkGateway struct { autorest.Response `json:"-"` @@ -2743,8 +3252,7 @@ type VirtualNetworkGateway struct { Etag *string `json:"etag,omitempty"` } -// VirtualNetworkGatewayConnection is a common class for general resource -// information +// VirtualNetworkGatewayConnection is a common class for general resource information type VirtualNetworkGatewayConnection struct { autorest.Response `json:"-"` ID *string `json:"id,omitempty"` @@ -2756,8 +3264,39 @@ type VirtualNetworkGatewayConnection struct { Etag *string `json:"etag,omitempty"` } -// VirtualNetworkGatewayConnectionListResult is response for the -// ListVirtualNetworkGatewayConnections API service call +// VirtualNetworkGatewayConnectionListEntity is a common class for general resource information +type VirtualNetworkGatewayConnectionListEntity struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *VirtualNetworkGatewayConnectionListEntityPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VirtualNetworkGatewayConnectionListEntityPropertiesFormat is virtualNetworkGatewayConnection properties +type VirtualNetworkGatewayConnectionListEntityPropertiesFormat struct { + AuthorizationKey *string `json:"authorizationKey,omitempty"` + VirtualNetworkGateway1 *VirtualNetworkConnectionGatewayReference `json:"virtualNetworkGateway1,omitempty"` + VirtualNetworkGateway2 *VirtualNetworkConnectionGatewayReference `json:"virtualNetworkGateway2,omitempty"` + LocalNetworkGateway2 *VirtualNetworkConnectionGatewayReference `json:"localNetworkGateway2,omitempty"` + ConnectionType VirtualNetworkGatewayConnectionType `json:"connectionType,omitempty"` + RoutingWeight *int32 `json:"routingWeight,omitempty"` + SharedKey *string `json:"sharedKey,omitempty"` + ConnectionStatus VirtualNetworkGatewayConnectionStatus `json:"connectionStatus,omitempty"` + TunnelConnectionStatus *[]TunnelConnectionHealth `json:"tunnelConnectionStatus,omitempty"` + EgressBytesTransferred *int64 `json:"egressBytesTransferred,omitempty"` + IngressBytesTransferred *int64 `json:"ingressBytesTransferred,omitempty"` + Peer *SubResource `json:"peer,omitempty"` + EnableBgp *bool `json:"enableBgp,omitempty"` + UsePolicyBasedTrafficSelectors *bool `json:"usePolicyBasedTrafficSelectors,omitempty"` + IpsecPolicies *[]IpsecPolicy `json:"ipsecPolicies,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualNetworkGatewayConnectionListResult is response for the ListVirtualNetworkGatewayConnections API service call type VirtualNetworkGatewayConnectionListResult struct { autorest.Response `json:"-"` Value *[]VirtualNetworkGatewayConnection `json:"value,omitempty"` @@ -2776,8 +3315,7 @@ func (client VirtualNetworkGatewayConnectionListResult) VirtualNetworkGatewayCon autorest.WithBaseURL(to.String(client.NextLink))) } -// VirtualNetworkGatewayConnectionPropertiesFormat is -// virtualNetworkGatewayConnection properties +// VirtualNetworkGatewayConnectionPropertiesFormat is virtualNetworkGatewayConnection properties type VirtualNetworkGatewayConnectionPropertiesFormat struct { AuthorizationKey *string `json:"authorizationKey,omitempty"` VirtualNetworkGateway1 *VirtualNetworkGateway `json:"virtualNetworkGateway1,omitempty"` @@ -2798,8 +3336,7 @@ type VirtualNetworkGatewayConnectionPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// VirtualNetworkGatewayIPConfiguration is iP configuration for virtual network -// gateway +// VirtualNetworkGatewayIPConfiguration is IP configuration for virtual network gateway type VirtualNetworkGatewayIPConfiguration struct { ID *string `json:"id,omitempty"` *VirtualNetworkGatewayIPConfigurationPropertiesFormat `json:"properties,omitempty"` @@ -2807,8 +3344,7 @@ type VirtualNetworkGatewayIPConfiguration struct { Etag *string `json:"etag,omitempty"` } -// VirtualNetworkGatewayIPConfigurationPropertiesFormat is properties of -// VirtualNetworkGatewayIPConfiguration +// VirtualNetworkGatewayIPConfigurationPropertiesFormat is properties of VirtualNetworkGatewayIPConfiguration type VirtualNetworkGatewayIPConfigurationPropertiesFormat struct { PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` Subnet *SubResource `json:"subnet,omitempty"` @@ -2816,8 +3352,26 @@ type VirtualNetworkGatewayIPConfigurationPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// VirtualNetworkGatewayListResult is response for the -// ListVirtualNetworkGateways API service call. +// VirtualNetworkGatewayListConnectionsResult is response for the VirtualNetworkGatewayListConnections API service call +type VirtualNetworkGatewayListConnectionsResult struct { + autorest.Response `json:"-"` + Value *[]VirtualNetworkGatewayConnectionListEntity `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualNetworkGatewayListConnectionsResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualNetworkGatewayListConnectionsResult) VirtualNetworkGatewayListConnectionsResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualNetworkGatewayListResult is response for the ListVirtualNetworkGateways API service call. type VirtualNetworkGatewayListResult struct { autorest.Response `json:"-"` Value *[]VirtualNetworkGateway `json:"value,omitempty"` @@ -2858,8 +3412,7 @@ type VirtualNetworkGatewaySku struct { Capacity *int32 `json:"capacity,omitempty"` } -// VirtualNetworkListResult is response for the ListVirtualNetworks API service -// call. +// VirtualNetworkListResult is response for the ListVirtualNetworks API service call. type VirtualNetworkListResult struct { autorest.Response `json:"-"` Value *[]VirtualNetwork `json:"value,omitempty"` @@ -2878,6 +3431,25 @@ func (client VirtualNetworkListResult) VirtualNetworkListResultPreparer() (*http autorest.WithBaseURL(to.String(client.NextLink))) } +// VirtualNetworkListUsageResult is response for the virtual networks GetUsage API service call. +type VirtualNetworkListUsageResult struct { + autorest.Response `json:"-"` + Value *[]VirtualNetworkUsage `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualNetworkListUsageResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualNetworkListUsageResult) VirtualNetworkListUsageResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + // VirtualNetworkPeering is peerings in a virtual network resource. type VirtualNetworkPeering struct { autorest.Response `json:"-"` @@ -2887,8 +3459,8 @@ type VirtualNetworkPeering struct { Etag *string `json:"etag,omitempty"` } -// VirtualNetworkPeeringListResult is response for ListSubnets API service -// call. Retrieves all subnets that belong to a virtual network. +// VirtualNetworkPeeringListResult is response for ListSubnets API service call. Retrieves all subnets that belong to a +// virtual network. type VirtualNetworkPeeringListResult struct { autorest.Response `json:"-"` Value *[]VirtualNetworkPeering `json:"value,omitempty"` @@ -2907,7 +3479,7 @@ func (client VirtualNetworkPeeringListResult) VirtualNetworkPeeringListResultPre autorest.WithBaseURL(to.String(client.NextLink))) } -// VirtualNetworkPeeringPropertiesFormat is +// VirtualNetworkPeeringPropertiesFormat is properties of the virtual network peering. type VirtualNetworkPeeringPropertiesFormat struct { AllowVirtualNetworkAccess *bool `json:"allowVirtualNetworkAccess,omitempty"` AllowForwardedTraffic *bool `json:"allowForwardedTraffic,omitempty"` @@ -2918,7 +3490,7 @@ type VirtualNetworkPeeringPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// VirtualNetworkPropertiesFormat is +// VirtualNetworkPropertiesFormat is properties of the virtual network. type VirtualNetworkPropertiesFormat struct { AddressSpace *AddressSpace `json:"addressSpace,omitempty"` DhcpOptions *DhcpOptions `json:"dhcpOptions,omitempty"` @@ -2926,6 +3498,23 @@ type VirtualNetworkPropertiesFormat struct { VirtualNetworkPeerings *[]VirtualNetworkPeering `json:"virtualNetworkPeerings,omitempty"` ResourceGUID *string `json:"resourceGuid,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` + EnableDdosProtection *bool `json:"enableDdosProtection,omitempty"` + EnableVMProtection *bool `json:"enableVmProtection,omitempty"` +} + +// VirtualNetworkUsage is usage details for subnet. +type VirtualNetworkUsage struct { + CurrentValue *float64 `json:"currentValue,omitempty"` + ID *string `json:"id,omitempty"` + Limit *float64 `json:"limit,omitempty"` + Name *VirtualNetworkUsageName `json:"name,omitempty"` + Unit *string `json:"unit,omitempty"` +} + +// VirtualNetworkUsageName is usage strings container. +type VirtualNetworkUsageName struct { + LocalizedValue *string `json:"localizedValue,omitempty"` + Value *string `json:"value,omitempty"` } // VpnClientConfiguration is vpnClientConfiguration for P2S client. @@ -2933,15 +3522,20 @@ type VpnClientConfiguration struct { VpnClientAddressPool *AddressSpace `json:"vpnClientAddressPool,omitempty"` VpnClientRootCertificates *[]VpnClientRootCertificate `json:"vpnClientRootCertificates,omitempty"` VpnClientRevokedCertificates *[]VpnClientRevokedCertificate `json:"vpnClientRevokedCertificates,omitempty"` + VpnClientProtocols *[]VpnClientProtocol `json:"vpnClientProtocols,omitempty"` + RadiusServerAddress *string `json:"radiusServerAddress,omitempty"` + RadiusServerSecret *string `json:"radiusServerSecret,omitempty"` } // VpnClientParameters is vpn Client Parameters for package generation type VpnClientParameters struct { - ProcessorArchitecture ProcessorArchitecture `json:"processorArchitecture,omitempty"` + ProcessorArchitecture ProcessorArchitecture `json:"processorArchitecture,omitempty"` + AuthenticationMethod AuthenticationMethod `json:"authenticationMethod,omitempty"` + RadiusServerAuthCertificate *string `json:"radiusServerAuthCertificate,omitempty"` + ClientRootCertificates *[]string `json:"clientRootCertificates,omitempty"` } -// VpnClientRevokedCertificate is vPN client revoked certificate of virtual -// network gateway. +// VpnClientRevokedCertificate is VPN client revoked certificate of virtual network gateway. type VpnClientRevokedCertificate struct { ID *string `json:"id,omitempty"` *VpnClientRevokedCertificatePropertiesFormat `json:"properties,omitempty"` @@ -2949,15 +3543,14 @@ type VpnClientRevokedCertificate struct { Etag *string `json:"etag,omitempty"` } -// VpnClientRevokedCertificatePropertiesFormat is properties of the revoked VPN -// client certificate of virtual network gateway. +// VpnClientRevokedCertificatePropertiesFormat is properties of the revoked VPN client certificate of virtual network +// gateway. type VpnClientRevokedCertificatePropertiesFormat struct { Thumbprint *string `json:"thumbprint,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } -// VpnClientRootCertificate is vPN client root certificate of virtual network -// gateway +// VpnClientRootCertificate is VPN client root certificate of virtual network gateway type VpnClientRootCertificate struct { ID *string `json:"id,omitempty"` *VpnClientRootCertificatePropertiesFormat `json:"properties,omitempty"` @@ -2965,13 +3558,19 @@ type VpnClientRootCertificate struct { Etag *string `json:"etag,omitempty"` } -// VpnClientRootCertificatePropertiesFormat is properties of SSL certificates -// of application gateway +// VpnClientRootCertificatePropertiesFormat is properties of SSL certificates of application gateway type VpnClientRootCertificatePropertiesFormat struct { PublicCertData *string `json:"publicCertData,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } +// VpnDeviceScriptParameters is vpn device configuration script generation parameters +type VpnDeviceScriptParameters struct { + Vendor *string `json:"vendor,omitempty"` + DeviceFamily *string `json:"deviceFamily,omitempty"` + FirmwareVersion *string `json:"firmwareVersion,omitempty"` +} + // Watcher is network watcher in a resource group. type Watcher struct { autorest.Response `json:"-"` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/packetcaptures.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/packetcaptures.go index fbeb0d9ef4b1..1ad97a5e2aa1 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/packetcaptures.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/packetcaptures.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -25,31 +24,27 @@ import ( "net/http" ) -// PacketCapturesClient is the composite Swagger for Network Client +// PacketCapturesClient is the network Client type PacketCapturesClient struct { ManagementClient } -// NewPacketCapturesClient creates an instance of the PacketCapturesClient -// client. +// NewPacketCapturesClient creates an instance of the PacketCapturesClient client. func NewPacketCapturesClient(subscriptionID string) PacketCapturesClient { return NewPacketCapturesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewPacketCapturesClientWithBaseURI creates an instance of the -// PacketCapturesClient client. +// NewPacketCapturesClientWithBaseURI creates an instance of the PacketCapturesClient client. func NewPacketCapturesClientWithBaseURI(baseURI string, subscriptionID string) PacketCapturesClient { return PacketCapturesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// Create create and start a packet capture on the specified VM. This method -// may poll for completion. Polling can be canceled by passing the cancel -// channel argument. The channel will be used to cancel polling and any -// outstanding HTTP requests. +// Create create and start a packet capture on the specified VM. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. networkWatcherName is -// the name of the network watcher. packetCaptureName is the name of the packet -// capture session. parameters is parameters that define the create packet +// resourceGroupName is the name of the resource group. networkWatcherName is the name of the network watcher. +// packetCaptureName is the name of the packet capture session. parameters is parameters that define the create packet // capture operation. func (client PacketCapturesClient) Create(resourceGroupName string, networkWatcherName string, packetCaptureName string, parameters PacketCapture, cancel <-chan struct{}) (<-chan PacketCaptureResult, <-chan error) { resultChan := make(chan PacketCaptureResult, 1) @@ -70,8 +65,10 @@ func (client PacketCapturesClient) Create(resourceGroupName string, networkWatch var err error var result PacketCaptureResult defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -105,7 +102,7 @@ func (client PacketCapturesClient) CreatePreparer(resourceGroupName string, netw "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -141,14 +138,11 @@ func (client PacketCapturesClient) CreateResponder(resp *http.Response) (result return } -// Delete deletes the specified packet capture session. This method may poll -// for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// Delete deletes the specified packet capture session. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. networkWatcherName is -// the name of the network watcher. packetCaptureName is the name of the packet -// capture session. +// resourceGroupName is the name of the resource group. networkWatcherName is the name of the network watcher. +// packetCaptureName is the name of the packet capture session. func (client PacketCapturesClient) Delete(resourceGroupName string, networkWatcherName string, packetCaptureName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -156,8 +150,10 @@ func (client PacketCapturesClient) Delete(resourceGroupName string, networkWatch var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -191,7 +187,7 @@ func (client PacketCapturesClient) DeletePreparer(resourceGroupName string, netw "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -226,9 +222,8 @@ func (client PacketCapturesClient) DeleteResponder(resp *http.Response) (result // Get gets a packet capture session by name. // -// resourceGroupName is the name of the resource group. networkWatcherName is -// the name of the network watcher. packetCaptureName is the name of the packet -// capture session. +// resourceGroupName is the name of the resource group. networkWatcherName is the name of the network watcher. +// packetCaptureName is the name of the packet capture session. func (client PacketCapturesClient) Get(resourceGroupName string, networkWatcherName string, packetCaptureName string) (result PacketCaptureResult, err error) { req, err := client.GetPreparer(resourceGroupName, networkWatcherName, packetCaptureName) if err != nil { @@ -260,7 +255,7 @@ func (client PacketCapturesClient) GetPreparer(resourceGroupName string, network "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -292,14 +287,12 @@ func (client PacketCapturesClient) GetResponder(resp *http.Response) (result Pac return } -// GetStatus query the status of a running packet capture session. This method -// may poll for completion. Polling can be canceled by passing the cancel -// channel argument. The channel will be used to cancel polling and any -// outstanding HTTP requests. +// GetStatus query the status of a running packet capture session. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. networkWatcherName is -// the name of the Network Watcher resource. packetCaptureName is the name -// given to the packet capture session. +// resourceGroupName is the name of the resource group. networkWatcherName is the name of the Network Watcher resource. +// packetCaptureName is the name given to the packet capture session. func (client PacketCapturesClient) GetStatus(resourceGroupName string, networkWatcherName string, packetCaptureName string, cancel <-chan struct{}) (<-chan PacketCaptureQueryStatusResult, <-chan error) { resultChan := make(chan PacketCaptureQueryStatusResult, 1) errChan := make(chan error, 1) @@ -307,8 +300,10 @@ func (client PacketCapturesClient) GetStatus(resourceGroupName string, networkWa var err error var result PacketCaptureQueryStatusResult defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -342,7 +337,7 @@ func (client PacketCapturesClient) GetStatusPreparer(resourceGroupName string, n "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -378,8 +373,7 @@ func (client PacketCapturesClient) GetStatusResponder(resp *http.Response) (resu // List lists all packet capture sessions within the specified resource group. // -// resourceGroupName is the name of the resource group. networkWatcherName is -// the name of the Network Watcher resource. +// resourceGroupName is the name of the resource group. networkWatcherName is the name of the Network Watcher resource. func (client PacketCapturesClient) List(resourceGroupName string, networkWatcherName string) (result PacketCaptureListResult, err error) { req, err := client.ListPreparer(resourceGroupName, networkWatcherName) if err != nil { @@ -410,7 +404,7 @@ func (client PacketCapturesClient) ListPreparer(resourceGroupName string, networ "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -442,14 +436,11 @@ func (client PacketCapturesClient) ListResponder(resp *http.Response) (result Pa return } -// Stop stops a specified packet capture session. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// Stop stops a specified packet capture session. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. networkWatcherName is -// the name of the network watcher. packetCaptureName is the name of the packet -// capture session. +// resourceGroupName is the name of the resource group. networkWatcherName is the name of the network watcher. +// packetCaptureName is the name of the packet capture session. func (client PacketCapturesClient) Stop(resourceGroupName string, networkWatcherName string, packetCaptureName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -457,8 +448,10 @@ func (client PacketCapturesClient) Stop(resourceGroupName string, networkWatcher var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -492,7 +485,7 @@ func (client PacketCapturesClient) StopPreparer(resourceGroupName string, networ "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go index 896bc817d91e..af7e7ec85b66 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -25,31 +24,27 @@ import ( "net/http" ) -// PublicIPAddressesClient is the composite Swagger for Network Client +// PublicIPAddressesClient is the network Client type PublicIPAddressesClient struct { ManagementClient } -// NewPublicIPAddressesClient creates an instance of the -// PublicIPAddressesClient client. +// NewPublicIPAddressesClient creates an instance of the PublicIPAddressesClient client. func NewPublicIPAddressesClient(subscriptionID string) PublicIPAddressesClient { return NewPublicIPAddressesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewPublicIPAddressesClientWithBaseURI creates an instance of the -// PublicIPAddressesClient client. +// NewPublicIPAddressesClientWithBaseURI creates an instance of the PublicIPAddressesClient client. func NewPublicIPAddressesClientWithBaseURI(baseURI string, subscriptionID string) PublicIPAddressesClient { return PublicIPAddressesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates a static or dynamic public IP address. -// This method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and any +// CreateOrUpdate creates or updates a static or dynamic public IP address. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any // outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. publicIPAddressName is -// the name of the public IP address. parameters is parameters supplied to the -// create or update public IP address operation. +// resourceGroupName is the name of the resource group. publicIPAddressName is the name of the public IP address. +// parameters is parameters supplied to the create or update public IP address operation. func (client PublicIPAddressesClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress, cancel <-chan struct{}) (<-chan PublicIPAddress, <-chan error) { resultChan := make(chan PublicIPAddress, 1) errChan := make(chan error, 1) @@ -71,8 +66,10 @@ func (client PublicIPAddressesClient) CreateOrUpdate(resourceGroupName string, p var err error var result PublicIPAddress defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -105,7 +102,7 @@ func (client PublicIPAddressesClient) CreateOrUpdatePreparer(resourceGroupName s "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -141,13 +138,10 @@ func (client PublicIPAddressesClient) CreateOrUpdateResponder(resp *http.Respons return } -// Delete deletes the specified public IP address. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// Delete deletes the specified public IP address. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. publicIPAddressName is -// the name of the subnet. +// resourceGroupName is the name of the resource group. publicIPAddressName is the name of the subnet. func (client PublicIPAddressesClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -155,8 +149,10 @@ func (client PublicIPAddressesClient) Delete(resourceGroupName string, publicIPA var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -189,7 +185,7 @@ func (client PublicIPAddressesClient) DeletePreparer(resourceGroupName string, p "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -224,8 +220,8 @@ func (client PublicIPAddressesClient) DeleteResponder(resp *http.Response) (resu // Get gets the specified public IP address in a specified resource group. // -// resourceGroupName is the name of the resource group. publicIPAddressName is -// the name of the subnet. expand is expands referenced resources. +// resourceGroupName is the name of the resource group. publicIPAddressName is the name of the subnet. expand is +// expands referenced resources. func (client PublicIPAddressesClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result PublicIPAddress, err error) { req, err := client.GetPreparer(resourceGroupName, publicIPAddressName, expand) if err != nil { @@ -256,7 +252,7 @@ func (client PublicIPAddressesClient) GetPreparer(resourceGroupName string, publ "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -291,6 +287,81 @@ func (client PublicIPAddressesClient) GetResponder(resp *http.Response) (result return } +// GetVirtualMachineScaleSetPublicIPAddress get the specified public IP address in a virtual machine scale set. +// +// resourceGroupName is the name of the resource group. virtualMachineScaleSetName is the name of the virtual machine +// scale set. virtualmachineIndex is the virtual machine index. networkInterfaceName is the name of the network +// interface. IPConfigurationName is the name of the IP configuration. publicIPAddressName is the name of the public IP +// Address. expand is expands referenced resources. +func (client PublicIPAddressesClient) GetVirtualMachineScaleSetPublicIPAddress(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, IPConfigurationName string, publicIPAddressName string, expand string) (result PublicIPAddress, err error) { + req, err := client.GetVirtualMachineScaleSetPublicIPAddressPreparer(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, IPConfigurationName, publicIPAddressName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "GetVirtualMachineScaleSetPublicIPAddress", nil, "Failure preparing request") + return + } + + resp, err := client.GetVirtualMachineScaleSetPublicIPAddressSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "GetVirtualMachineScaleSetPublicIPAddress", resp, "Failure sending request") + return + } + + result, err = client.GetVirtualMachineScaleSetPublicIPAddressResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "GetVirtualMachineScaleSetPublicIPAddress", resp, "Failure responding to request") + } + + return +} + +// GetVirtualMachineScaleSetPublicIPAddressPreparer prepares the GetVirtualMachineScaleSetPublicIPAddress request. +func (client PublicIPAddressesClient) GetVirtualMachineScaleSetPublicIPAddressPreparer(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, IPConfigurationName string, publicIPAddressName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "ipConfigurationName": autorest.Encode("path", IPConfigurationName), + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "publicIpAddressName": autorest.Encode("path", publicIPAddressName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualmachineIndex": autorest.Encode("path", virtualmachineIndex), + "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses/{publicIpAddressName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetVirtualMachineScaleSetPublicIPAddressSender sends the GetVirtualMachineScaleSetPublicIPAddress request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) GetVirtualMachineScaleSetPublicIPAddressSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetVirtualMachineScaleSetPublicIPAddressResponder handles the response to the GetVirtualMachineScaleSetPublicIPAddress request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) GetVirtualMachineScaleSetPublicIPAddressResponder(resp *http.Response) (result PublicIPAddress, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + // List gets all public IP addresses in a resource group. // // resourceGroupName is the name of the resource group. @@ -323,7 +394,7 @@ func (client PublicIPAddressesClient) ListPreparer(resourceGroupName string) (*h "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -379,6 +450,51 @@ func (client PublicIPAddressesClient) ListNextResults(lastResults PublicIPAddres return } +// ListComplete gets all elements from the list without paging. +func (client PublicIPAddressesClient) ListComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan PublicIPAddress, <-chan error) { + resultChan := make(chan PublicIPAddress) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + // ListAll gets all the public IP addresses in a subscription. func (client PublicIPAddressesClient) ListAll() (result PublicIPAddressListResult, err error) { req, err := client.ListAllPreparer() @@ -408,7 +524,7 @@ func (client PublicIPAddressesClient) ListAllPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -463,3 +579,324 @@ func (client PublicIPAddressesClient) ListAllNextResults(lastResults PublicIPAdd return } + +// ListAllComplete gets all elements from the list without paging. +func (client PublicIPAddressesClient) ListAllComplete(cancel <-chan struct{}) (<-chan PublicIPAddress, <-chan error) { + resultChan := make(chan PublicIPAddress) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListAll() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListAllNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListVirtualMachineScaleSetPublicIPAddresses gets information about all public IP addresses on a virtual machine +// scale set level. +// +// resourceGroupName is the name of the resource group. virtualMachineScaleSetName is the name of the virtual machine +// scale set. +func (client PublicIPAddressesClient) ListVirtualMachineScaleSetPublicIPAddresses(resourceGroupName string, virtualMachineScaleSetName string) (result PublicIPAddressListResult, err error) { + req, err := client.ListVirtualMachineScaleSetPublicIPAddressesPreparer(resourceGroupName, virtualMachineScaleSetName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListVirtualMachineScaleSetPublicIPAddresses", nil, "Failure preparing request") + return + } + + resp, err := client.ListVirtualMachineScaleSetPublicIPAddressesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListVirtualMachineScaleSetPublicIPAddresses", resp, "Failure sending request") + return + } + + result, err = client.ListVirtualMachineScaleSetPublicIPAddressesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListVirtualMachineScaleSetPublicIPAddresses", resp, "Failure responding to request") + } + + return +} + +// ListVirtualMachineScaleSetPublicIPAddressesPreparer prepares the ListVirtualMachineScaleSetPublicIPAddresses request. +func (client PublicIPAddressesClient) ListVirtualMachineScaleSetPublicIPAddressesPreparer(resourceGroupName string, virtualMachineScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/publicipaddresses", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListVirtualMachineScaleSetPublicIPAddressesSender sends the ListVirtualMachineScaleSetPublicIPAddresses request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) ListVirtualMachineScaleSetPublicIPAddressesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListVirtualMachineScaleSetPublicIPAddressesResponder handles the response to the ListVirtualMachineScaleSetPublicIPAddresses request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) ListVirtualMachineScaleSetPublicIPAddressesResponder(resp *http.Response) (result PublicIPAddressListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListVirtualMachineScaleSetPublicIPAddressesNextResults retrieves the next set of results, if any. +func (client PublicIPAddressesClient) ListVirtualMachineScaleSetPublicIPAddressesNextResults(lastResults PublicIPAddressListResult) (result PublicIPAddressListResult, err error) { + req, err := lastResults.PublicIPAddressListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListVirtualMachineScaleSetPublicIPAddresses", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListVirtualMachineScaleSetPublicIPAddressesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListVirtualMachineScaleSetPublicIPAddresses", resp, "Failure sending next results request") + } + + result, err = client.ListVirtualMachineScaleSetPublicIPAddressesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListVirtualMachineScaleSetPublicIPAddresses", resp, "Failure responding to next results request") + } + + return +} + +// ListVirtualMachineScaleSetPublicIPAddressesComplete gets all elements from the list without paging. +func (client PublicIPAddressesClient) ListVirtualMachineScaleSetPublicIPAddressesComplete(resourceGroupName string, virtualMachineScaleSetName string, cancel <-chan struct{}) (<-chan PublicIPAddress, <-chan error) { + resultChan := make(chan PublicIPAddress) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListVirtualMachineScaleSetPublicIPAddresses(resourceGroupName, virtualMachineScaleSetName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListVirtualMachineScaleSetPublicIPAddressesNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListVirtualMachineScaleSetVMPublicIPAddresses gets information about all public IP addresses in a virtual machine IP +// configuration in a virtual machine scale set. +// +// resourceGroupName is the name of the resource group. virtualMachineScaleSetName is the name of the virtual machine +// scale set. virtualmachineIndex is the virtual machine index. networkInterfaceName is the network interface name. +// IPConfigurationName is the IP configuration name. +func (client PublicIPAddressesClient) ListVirtualMachineScaleSetVMPublicIPAddresses(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, IPConfigurationName string) (result PublicIPAddressListResult, err error) { + req, err := client.ListVirtualMachineScaleSetVMPublicIPAddressesPreparer(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, IPConfigurationName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListVirtualMachineScaleSetVMPublicIPAddresses", nil, "Failure preparing request") + return + } + + resp, err := client.ListVirtualMachineScaleSetVMPublicIPAddressesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListVirtualMachineScaleSetVMPublicIPAddresses", resp, "Failure sending request") + return + } + + result, err = client.ListVirtualMachineScaleSetVMPublicIPAddressesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListVirtualMachineScaleSetVMPublicIPAddresses", resp, "Failure responding to request") + } + + return +} + +// ListVirtualMachineScaleSetVMPublicIPAddressesPreparer prepares the ListVirtualMachineScaleSetVMPublicIPAddresses request. +func (client PublicIPAddressesClient) ListVirtualMachineScaleSetVMPublicIPAddressesPreparer(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, IPConfigurationName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "ipConfigurationName": autorest.Encode("path", IPConfigurationName), + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualmachineIndex": autorest.Encode("path", virtualmachineIndex), + "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListVirtualMachineScaleSetVMPublicIPAddressesSender sends the ListVirtualMachineScaleSetVMPublicIPAddresses request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) ListVirtualMachineScaleSetVMPublicIPAddressesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListVirtualMachineScaleSetVMPublicIPAddressesResponder handles the response to the ListVirtualMachineScaleSetVMPublicIPAddresses request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) ListVirtualMachineScaleSetVMPublicIPAddressesResponder(resp *http.Response) (result PublicIPAddressListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListVirtualMachineScaleSetVMPublicIPAddressesNextResults retrieves the next set of results, if any. +func (client PublicIPAddressesClient) ListVirtualMachineScaleSetVMPublicIPAddressesNextResults(lastResults PublicIPAddressListResult) (result PublicIPAddressListResult, err error) { + req, err := lastResults.PublicIPAddressListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListVirtualMachineScaleSetVMPublicIPAddresses", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListVirtualMachineScaleSetVMPublicIPAddressesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListVirtualMachineScaleSetVMPublicIPAddresses", resp, "Failure sending next results request") + } + + result, err = client.ListVirtualMachineScaleSetVMPublicIPAddressesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListVirtualMachineScaleSetVMPublicIPAddresses", resp, "Failure responding to next results request") + } + + return +} + +// ListVirtualMachineScaleSetVMPublicIPAddressesComplete gets all elements from the list without paging. +func (client PublicIPAddressesClient) ListVirtualMachineScaleSetVMPublicIPAddressesComplete(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, IPConfigurationName string, cancel <-chan struct{}) (<-chan PublicIPAddress, <-chan error) { + resultChan := make(chan PublicIPAddress) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListVirtualMachineScaleSetVMPublicIPAddresses(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, IPConfigurationName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListVirtualMachineScaleSetVMPublicIPAddressesNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilterrules.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilterrules.go index 378a75bb2f12..7c0ab17904c8 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilterrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilterrules.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -25,32 +24,28 @@ import ( "net/http" ) -// RouteFilterRulesClient is the composite Swagger for Network Client +// RouteFilterRulesClient is the network Client type RouteFilterRulesClient struct { ManagementClient } -// NewRouteFilterRulesClient creates an instance of the RouteFilterRulesClient -// client. +// NewRouteFilterRulesClient creates an instance of the RouteFilterRulesClient client. func NewRouteFilterRulesClient(subscriptionID string) RouteFilterRulesClient { return NewRouteFilterRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewRouteFilterRulesClientWithBaseURI creates an instance of the -// RouteFilterRulesClient client. +// NewRouteFilterRulesClientWithBaseURI creates an instance of the RouteFilterRulesClient client. func NewRouteFilterRulesClientWithBaseURI(baseURI string, subscriptionID string) RouteFilterRulesClient { return RouteFilterRulesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates a route in the specified route filter. -// This method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and any +// CreateOrUpdate creates or updates a route in the specified route filter. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any // outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. routeFilterName is the -// name of the route filter. ruleName is the name of the route filter rule. -// routeFilterRuleParameters is parameters supplied to the create or update -// route filter rule operation. +// resourceGroupName is the name of the resource group. routeFilterName is the name of the route filter. ruleName is +// the name of the route filter rule. routeFilterRuleParameters is parameters supplied to the create or update route +// filter rule operation. func (client RouteFilterRulesClient) CreateOrUpdate(resourceGroupName string, routeFilterName string, ruleName string, routeFilterRuleParameters RouteFilterRule, cancel <-chan struct{}) (<-chan RouteFilterRule, <-chan error) { resultChan := make(chan RouteFilterRule, 1) errChan := make(chan error, 1) @@ -70,8 +65,10 @@ func (client RouteFilterRulesClient) CreateOrUpdate(resourceGroupName string, ro var err error var result RouteFilterRule defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -105,7 +102,7 @@ func (client RouteFilterRulesClient) CreateOrUpdatePreparer(resourceGroupName st "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -141,13 +138,12 @@ func (client RouteFilterRulesClient) CreateOrUpdateResponder(resp *http.Response return } -// Delete deletes the specified rule from a route filter. This method may poll -// for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// Delete deletes the specified rule from a route filter. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. routeFilterName is the -// name of the route filter. ruleName is the name of the rule. +// resourceGroupName is the name of the resource group. routeFilterName is the name of the route filter. ruleName is +// the name of the rule. func (client RouteFilterRulesClient) Delete(resourceGroupName string, routeFilterName string, ruleName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -155,8 +151,10 @@ func (client RouteFilterRulesClient) Delete(resourceGroupName string, routeFilte var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -190,7 +188,7 @@ func (client RouteFilterRulesClient) DeletePreparer(resourceGroupName string, ro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -225,8 +223,8 @@ func (client RouteFilterRulesClient) DeleteResponder(resp *http.Response) (resul // Get gets the specified rule from a route filter. // -// resourceGroupName is the name of the resource group. routeFilterName is the -// name of the route filter. ruleName is the name of the rule. +// resourceGroupName is the name of the resource group. routeFilterName is the name of the route filter. ruleName is +// the name of the rule. func (client RouteFilterRulesClient) Get(resourceGroupName string, routeFilterName string, ruleName string) (result RouteFilterRule, err error) { req, err := client.GetPreparer(resourceGroupName, routeFilterName, ruleName) if err != nil { @@ -258,7 +256,7 @@ func (client RouteFilterRulesClient) GetPreparer(resourceGroupName string, route "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -292,8 +290,7 @@ func (client RouteFilterRulesClient) GetResponder(resp *http.Response) (result R // ListByRouteFilter gets all RouteFilterRules in a route filter. // -// resourceGroupName is the name of the resource group. routeFilterName is the -// name of the route filter. +// resourceGroupName is the name of the resource group. routeFilterName is the name of the route filter. func (client RouteFilterRulesClient) ListByRouteFilter(resourceGroupName string, routeFilterName string) (result RouteFilterRuleListResult, err error) { req, err := client.ListByRouteFilterPreparer(resourceGroupName, routeFilterName) if err != nil { @@ -324,7 +321,7 @@ func (client RouteFilterRulesClient) ListByRouteFilterPreparer(resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -380,15 +377,58 @@ func (client RouteFilterRulesClient) ListByRouteFilterNextResults(lastResults Ro return } -// Update updates a route in the specified route filter. This method may poll -// for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// ListByRouteFilterComplete gets all elements from the list without paging. +func (client RouteFilterRulesClient) ListByRouteFilterComplete(resourceGroupName string, routeFilterName string, cancel <-chan struct{}) (<-chan RouteFilterRule, <-chan error) { + resultChan := make(chan RouteFilterRule) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListByRouteFilter(resourceGroupName, routeFilterName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListByRouteFilterNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// Update updates a route in the specified route filter. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. routeFilterName is the -// name of the route filter. ruleName is the name of the route filter rule. -// routeFilterRuleParameters is parameters supplied to the update route filter -// rule operation. +// resourceGroupName is the name of the resource group. routeFilterName is the name of the route filter. ruleName is +// the name of the route filter rule. routeFilterRuleParameters is parameters supplied to the update route filter rule +// operation. func (client RouteFilterRulesClient) Update(resourceGroupName string, routeFilterName string, ruleName string, routeFilterRuleParameters PatchRouteFilterRule, cancel <-chan struct{}) (<-chan RouteFilterRule, <-chan error) { resultChan := make(chan RouteFilterRule, 1) errChan := make(chan error, 1) @@ -396,8 +436,10 @@ func (client RouteFilterRulesClient) Update(resourceGroupName string, routeFilte var err error var result RouteFilterRule defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -431,7 +473,7 @@ func (client RouteFilterRulesClient) UpdatePreparer(resourceGroupName string, ro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilters.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilters.go index 860ccae5c394..711e58fc9c98 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilters.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilters.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,7 +23,7 @@ import ( "net/http" ) -// RouteFiltersClient is the composite Swagger for Network Client +// RouteFiltersClient is the network Client type RouteFiltersClient struct { ManagementClient } @@ -34,20 +33,17 @@ func NewRouteFiltersClient(subscriptionID string) RouteFiltersClient { return NewRouteFiltersClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewRouteFiltersClientWithBaseURI creates an instance of the -// RouteFiltersClient client. +// NewRouteFiltersClientWithBaseURI creates an instance of the RouteFiltersClient client. func NewRouteFiltersClientWithBaseURI(baseURI string, subscriptionID string) RouteFiltersClient { return RouteFiltersClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates a route filter in a specified resource -// group. This method may poll for completion. Polling can be canceled by -// passing the cancel channel argument. The channel will be used to cancel -// polling and any outstanding HTTP requests. +// CreateOrUpdate creates or updates a route filter in a specified resource group. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. routeFilterName is the -// name of the route filter. routeFilterParameters is parameters supplied to -// the create or update route filter operation. +// resourceGroupName is the name of the resource group. routeFilterName is the name of the route filter. +// routeFilterParameters is parameters supplied to the create or update route filter operation. func (client RouteFiltersClient) CreateOrUpdate(resourceGroupName string, routeFilterName string, routeFilterParameters RouteFilter, cancel <-chan struct{}) (<-chan RouteFilter, <-chan error) { resultChan := make(chan RouteFilter, 1) errChan := make(chan error, 1) @@ -55,8 +51,10 @@ func (client RouteFiltersClient) CreateOrUpdate(resourceGroupName string, routeF var err error var result RouteFilter defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -89,7 +87,7 @@ func (client RouteFiltersClient) CreateOrUpdatePreparer(resourceGroupName string "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -125,13 +123,10 @@ func (client RouteFiltersClient) CreateOrUpdateResponder(resp *http.Response) (r return } -// Delete deletes the specified route filter. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// Delete deletes the specified route filter. This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. routeFilterName is the -// name of the route filter. +// resourceGroupName is the name of the resource group. routeFilterName is the name of the route filter. func (client RouteFiltersClient) Delete(resourceGroupName string, routeFilterName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -139,8 +134,10 @@ func (client RouteFiltersClient) Delete(resourceGroupName string, routeFilterNam var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -173,7 +170,7 @@ func (client RouteFiltersClient) DeletePreparer(resourceGroupName string, routeF "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -208,9 +205,8 @@ func (client RouteFiltersClient) DeleteResponder(resp *http.Response) (result au // Get gets the specified route filter. // -// resourceGroupName is the name of the resource group. routeFilterName is the -// name of the route filter. expand is expands referenced express route bgp -// peering resources. +// resourceGroupName is the name of the resource group. routeFilterName is the name of the route filter. expand is +// expands referenced express route bgp peering resources. func (client RouteFiltersClient) Get(resourceGroupName string, routeFilterName string, expand string) (result RouteFilter, err error) { req, err := client.GetPreparer(resourceGroupName, routeFilterName, expand) if err != nil { @@ -241,7 +237,7 @@ func (client RouteFiltersClient) GetPreparer(resourceGroupName string, routeFilt "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -305,7 +301,7 @@ func (client RouteFiltersClient) ListPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -361,6 +357,51 @@ func (client RouteFiltersClient) ListNextResults(lastResults RouteFilterListResu return } +// ListComplete gets all elements from the list without paging. +func (client RouteFiltersClient) ListComplete(cancel <-chan struct{}) (<-chan RouteFilter, <-chan error) { + resultChan := make(chan RouteFilter) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + // ListByResourceGroup gets all route filters in a resource group. // // resourceGroupName is the name of the resource group. @@ -393,7 +434,7 @@ func (client RouteFiltersClient) ListByResourceGroupPreparer(resourceGroupName s "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -449,14 +490,57 @@ func (client RouteFiltersClient) ListByResourceGroupNextResults(lastResults Rout return } -// Update updates a route filter in a specified resource group. This method may -// poll for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// ListByResourceGroupComplete gets all elements from the list without paging. +func (client RouteFiltersClient) ListByResourceGroupComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan RouteFilter, <-chan error) { + resultChan := make(chan RouteFilter) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListByResourceGroup(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListByResourceGroupNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// Update updates a route filter in a specified resource group. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. routeFilterName is the -// name of the route filter. routeFilterParameters is parameters supplied to -// the update route filter operation. +// resourceGroupName is the name of the resource group. routeFilterName is the name of the route filter. +// routeFilterParameters is parameters supplied to the update route filter operation. func (client RouteFiltersClient) Update(resourceGroupName string, routeFilterName string, routeFilterParameters PatchRouteFilter, cancel <-chan struct{}) (<-chan RouteFilter, <-chan error) { resultChan := make(chan RouteFilter, 1) errChan := make(chan error, 1) @@ -464,8 +548,10 @@ func (client RouteFiltersClient) Update(resourceGroupName string, routeFilterNam var err error var result RouteFilter defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -498,7 +584,7 @@ func (client RouteFiltersClient) UpdatePreparer(resourceGroupName string, routeF "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go index 1366d3c14472..0dcde6fa01d0 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,7 +23,7 @@ import ( "net/http" ) -// RoutesClient is the composite Swagger for Network Client +// RoutesClient is the network Client type RoutesClient struct { ManagementClient } @@ -39,14 +38,12 @@ func NewRoutesClientWithBaseURI(baseURI string, subscriptionID string) RoutesCli return RoutesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates a route in the specified route table. This -// method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and any +// CreateOrUpdate creates or updates a route in the specified route table. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any // outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. routeTableName is the -// name of the route table. routeName is the name of the route. routeParameters -// is parameters supplied to the create or update route operation. +// resourceGroupName is the name of the resource group. routeTableName is the name of the route table. routeName is the +// name of the route. routeParameters is parameters supplied to the create or update route operation. func (client RoutesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters Route, cancel <-chan struct{}) (<-chan Route, <-chan error) { resultChan := make(chan Route, 1) errChan := make(chan error, 1) @@ -54,8 +51,10 @@ func (client RoutesClient) CreateOrUpdate(resourceGroupName string, routeTableNa var err error var result Route defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -89,7 +88,7 @@ func (client RoutesClient) CreateOrUpdatePreparer(resourceGroupName string, rout "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -125,13 +124,12 @@ func (client RoutesClient) CreateOrUpdateResponder(resp *http.Response) (result return } -// Delete deletes the specified route from a route table. This method may poll -// for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// Delete deletes the specified route from a route table. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. routeTableName is the -// name of the route table. routeName is the name of the route. +// resourceGroupName is the name of the resource group. routeTableName is the name of the route table. routeName is the +// name of the route. func (client RoutesClient) Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -139,8 +137,10 @@ func (client RoutesClient) Delete(resourceGroupName string, routeTableName strin var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -174,7 +174,7 @@ func (client RoutesClient) DeletePreparer(resourceGroupName string, routeTableNa "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -209,8 +209,8 @@ func (client RoutesClient) DeleteResponder(resp *http.Response) (result autorest // Get gets the specified route from a route table. // -// resourceGroupName is the name of the resource group. routeTableName is the -// name of the route table. routeName is the name of the route. +// resourceGroupName is the name of the resource group. routeTableName is the name of the route table. routeName is the +// name of the route. func (client RoutesClient) Get(resourceGroupName string, routeTableName string, routeName string) (result Route, err error) { req, err := client.GetPreparer(resourceGroupName, routeTableName, routeName) if err != nil { @@ -242,7 +242,7 @@ func (client RoutesClient) GetPreparer(resourceGroupName string, routeTableName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -276,8 +276,7 @@ func (client RoutesClient) GetResponder(resp *http.Response) (result Route, err // List gets all routes in a route table. // -// resourceGroupName is the name of the resource group. routeTableName is the -// name of the route table. +// resourceGroupName is the name of the resource group. routeTableName is the name of the route table. func (client RoutesClient) List(resourceGroupName string, routeTableName string) (result RouteListResult, err error) { req, err := client.ListPreparer(resourceGroupName, routeTableName) if err != nil { @@ -308,7 +307,7 @@ func (client RoutesClient) ListPreparer(resourceGroupName string, routeTableName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -363,3 +362,48 @@ func (client RoutesClient) ListNextResults(lastResults RouteListResult) (result return } + +// ListComplete gets all elements from the list without paging. +func (client RoutesClient) ListComplete(resourceGroupName string, routeTableName string, cancel <-chan struct{}) (<-chan Route, <-chan error) { + resultChan := make(chan Route) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName, routeTableName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go index 80339f207869..e9b557bc0b37 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,7 +23,7 @@ import ( "net/http" ) -// RouteTablesClient is the composite Swagger for Network Client +// RouteTablesClient is the network Client type RouteTablesClient struct { ManagementClient } @@ -34,20 +33,17 @@ func NewRouteTablesClient(subscriptionID string) RouteTablesClient { return NewRouteTablesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewRouteTablesClientWithBaseURI creates an instance of the RouteTablesClient -// client. +// NewRouteTablesClientWithBaseURI creates an instance of the RouteTablesClient client. func NewRouteTablesClientWithBaseURI(baseURI string, subscriptionID string) RouteTablesClient { return RouteTablesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate create or updates a route table in a specified resource -// group. This method may poll for completion. Polling can be canceled by -// passing the cancel channel argument. The channel will be used to cancel -// polling and any outstanding HTTP requests. +// CreateOrUpdate create or updates a route table in a specified resource group. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. routeTableName is the -// name of the route table. parameters is parameters supplied to the create or -// update route table operation. +// resourceGroupName is the name of the resource group. routeTableName is the name of the route table. parameters is +// parameters supplied to the create or update route table operation. func (client RouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, parameters RouteTable, cancel <-chan struct{}) (<-chan RouteTable, <-chan error) { resultChan := make(chan RouteTable, 1) errChan := make(chan error, 1) @@ -55,8 +51,10 @@ func (client RouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTa var err error var result RouteTable defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -89,7 +87,7 @@ func (client RouteTablesClient) CreateOrUpdatePreparer(resourceGroupName string, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -125,13 +123,10 @@ func (client RouteTablesClient) CreateOrUpdateResponder(resp *http.Response) (re return } -// Delete deletes the specified route table. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// Delete deletes the specified route table. This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. routeTableName is the -// name of the route table. +// resourceGroupName is the name of the resource group. routeTableName is the name of the route table. func (client RouteTablesClient) Delete(resourceGroupName string, routeTableName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -139,8 +134,10 @@ func (client RouteTablesClient) Delete(resourceGroupName string, routeTableName var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -173,7 +170,7 @@ func (client RouteTablesClient) DeletePreparer(resourceGroupName string, routeTa "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -208,8 +205,8 @@ func (client RouteTablesClient) DeleteResponder(resp *http.Response) (result aut // Get gets the specified route table. // -// resourceGroupName is the name of the resource group. routeTableName is the -// name of the route table. expand is expands referenced resources. +// resourceGroupName is the name of the resource group. routeTableName is the name of the route table. expand is +// expands referenced resources. func (client RouteTablesClient) Get(resourceGroupName string, routeTableName string, expand string) (result RouteTable, err error) { req, err := client.GetPreparer(resourceGroupName, routeTableName, expand) if err != nil { @@ -240,7 +237,7 @@ func (client RouteTablesClient) GetPreparer(resourceGroupName string, routeTable "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -307,7 +304,7 @@ func (client RouteTablesClient) ListPreparer(resourceGroupName string) (*http.Re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -363,6 +360,51 @@ func (client RouteTablesClient) ListNextResults(lastResults RouteTableListResult return } +// ListComplete gets all elements from the list without paging. +func (client RouteTablesClient) ListComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan RouteTable, <-chan error) { + resultChan := make(chan RouteTable) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + // ListAll gets all route tables in a subscription. func (client RouteTablesClient) ListAll() (result RouteTableListResult, err error) { req, err := client.ListAllPreparer() @@ -392,7 +434,7 @@ func (client RouteTablesClient) ListAllPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -447,3 +489,48 @@ func (client RouteTablesClient) ListAllNextResults(lastResults RouteTableListRes return } + +// ListAllComplete gets all elements from the list without paging. +func (client RouteTablesClient) ListAllComplete(cancel <-chan struct{}) (<-chan RouteTable, <-chan error) { + resultChan := make(chan RouteTable) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListAll() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListAllNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go index 3faaf007fffe..b1a5ae4a6e29 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,32 +23,27 @@ import ( "net/http" ) -// SecurityGroupsClient is the composite Swagger for Network Client +// SecurityGroupsClient is the network Client type SecurityGroupsClient struct { ManagementClient } -// NewSecurityGroupsClient creates an instance of the SecurityGroupsClient -// client. +// NewSecurityGroupsClient creates an instance of the SecurityGroupsClient client. func NewSecurityGroupsClient(subscriptionID string) SecurityGroupsClient { return NewSecurityGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewSecurityGroupsClientWithBaseURI creates an instance of the -// SecurityGroupsClient client. +// NewSecurityGroupsClientWithBaseURI creates an instance of the SecurityGroupsClient client. func NewSecurityGroupsClientWithBaseURI(baseURI string, subscriptionID string) SecurityGroupsClient { return SecurityGroupsClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates a network security group in the specified -// resource group. This method may poll for completion. Polling can be canceled -// by passing the cancel channel argument. The channel will be used to cancel +// CreateOrUpdate creates or updates a network security group in the specified resource group. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel // polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. -// networkSecurityGroupName is the name of the network security group. -// parameters is parameters supplied to the create or update network security -// group operation. +// resourceGroupName is the name of the resource group. networkSecurityGroupName is the name of the network security +// group. parameters is parameters supplied to the create or update network security group operation. func (client SecurityGroupsClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup, cancel <-chan struct{}) (<-chan SecurityGroup, <-chan error) { resultChan := make(chan SecurityGroup, 1) errChan := make(chan error, 1) @@ -57,8 +51,10 @@ func (client SecurityGroupsClient) CreateOrUpdate(resourceGroupName string, netw var err error var result SecurityGroup defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -91,7 +87,7 @@ func (client SecurityGroupsClient) CreateOrUpdatePreparer(resourceGroupName stri "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -127,13 +123,11 @@ func (client SecurityGroupsClient) CreateOrUpdateResponder(resp *http.Response) return } -// Delete deletes the specified network security group. This method may poll -// for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// Delete deletes the specified network security group. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. -// networkSecurityGroupName is the name of the network security group. +// resourceGroupName is the name of the resource group. networkSecurityGroupName is the name of the network security +// group. func (client SecurityGroupsClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -141,8 +135,10 @@ func (client SecurityGroupsClient) Delete(resourceGroupName string, networkSecur var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -175,7 +171,7 @@ func (client SecurityGroupsClient) DeletePreparer(resourceGroupName string, netw "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -210,9 +206,8 @@ func (client SecurityGroupsClient) DeleteResponder(resp *http.Response) (result // Get gets the specified network security group. // -// resourceGroupName is the name of the resource group. -// networkSecurityGroupName is the name of the network security group. expand -// is expands referenced resources. +// resourceGroupName is the name of the resource group. networkSecurityGroupName is the name of the network security +// group. expand is expands referenced resources. func (client SecurityGroupsClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result SecurityGroup, err error) { req, err := client.GetPreparer(resourceGroupName, networkSecurityGroupName, expand) if err != nil { @@ -243,7 +238,7 @@ func (client SecurityGroupsClient) GetPreparer(resourceGroupName string, network "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -310,7 +305,7 @@ func (client SecurityGroupsClient) ListPreparer(resourceGroupName string) (*http "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -366,6 +361,51 @@ func (client SecurityGroupsClient) ListNextResults(lastResults SecurityGroupList return } +// ListComplete gets all elements from the list without paging. +func (client SecurityGroupsClient) ListComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan SecurityGroup, <-chan error) { + resultChan := make(chan SecurityGroup) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + // ListAll gets all network security groups in a subscription. func (client SecurityGroupsClient) ListAll() (result SecurityGroupListResult, err error) { req, err := client.ListAllPreparer() @@ -395,7 +435,7 @@ func (client SecurityGroupsClient) ListAllPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -450,3 +490,48 @@ func (client SecurityGroupsClient) ListAllNextResults(lastResults SecurityGroupL return } + +// ListAllComplete gets all elements from the list without paging. +func (client SecurityGroupsClient) ListAllComplete(cancel <-chan struct{}) (<-chan SecurityGroup, <-chan error) { + resultChan := make(chan SecurityGroup) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListAll() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListAllNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go index 2e2738605f83..9c76ef54e8b9 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -25,32 +24,28 @@ import ( "net/http" ) -// SecurityRulesClient is the composite Swagger for Network Client +// SecurityRulesClient is the network Client type SecurityRulesClient struct { ManagementClient } -// NewSecurityRulesClient creates an instance of the SecurityRulesClient -// client. +// NewSecurityRulesClient creates an instance of the SecurityRulesClient client. func NewSecurityRulesClient(subscriptionID string) SecurityRulesClient { return NewSecurityRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewSecurityRulesClientWithBaseURI creates an instance of the -// SecurityRulesClient client. +// NewSecurityRulesClientWithBaseURI creates an instance of the SecurityRulesClient client. func NewSecurityRulesClientWithBaseURI(baseURI string, subscriptionID string) SecurityRulesClient { return SecurityRulesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates a security rule in the specified network -// security group. This method may poll for completion. Polling can be canceled -// by passing the cancel channel argument. The channel will be used to cancel +// CreateOrUpdate creates or updates a security rule in the specified network security group. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel // polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. -// networkSecurityGroupName is the name of the network security group. -// securityRuleName is the name of the security rule. securityRuleParameters is -// parameters supplied to the create or update network security rule operation. +// resourceGroupName is the name of the resource group. networkSecurityGroupName is the name of the network security +// group. securityRuleName is the name of the security rule. securityRuleParameters is parameters supplied to the +// create or update network security rule operation. func (client SecurityRulesClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, securityRuleParameters SecurityRule, cancel <-chan struct{}) (<-chan SecurityRule, <-chan error) { resultChan := make(chan SecurityRule, 1) errChan := make(chan error, 1) @@ -70,8 +65,10 @@ func (client SecurityRulesClient) CreateOrUpdate(resourceGroupName string, netwo var err error var result SecurityRule defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -105,7 +102,7 @@ func (client SecurityRulesClient) CreateOrUpdatePreparer(resourceGroupName strin "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -141,14 +138,11 @@ func (client SecurityRulesClient) CreateOrUpdateResponder(resp *http.Response) ( return } -// Delete deletes the specified network security rule. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// Delete deletes the specified network security rule. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. -// networkSecurityGroupName is the name of the network security group. -// securityRuleName is the name of the security rule. +// resourceGroupName is the name of the resource group. networkSecurityGroupName is the name of the network security +// group. securityRuleName is the name of the security rule. func (client SecurityRulesClient) Delete(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -156,8 +150,10 @@ func (client SecurityRulesClient) Delete(resourceGroupName string, networkSecuri var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -191,7 +187,7 @@ func (client SecurityRulesClient) DeletePreparer(resourceGroupName string, netwo "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -226,9 +222,8 @@ func (client SecurityRulesClient) DeleteResponder(resp *http.Response) (result a // Get get the specified network security rule. // -// resourceGroupName is the name of the resource group. -// networkSecurityGroupName is the name of the network security group. -// securityRuleName is the name of the security rule. +// resourceGroupName is the name of the resource group. networkSecurityGroupName is the name of the network security +// group. securityRuleName is the name of the security rule. func (client SecurityRulesClient) Get(resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (result SecurityRule, err error) { req, err := client.GetPreparer(resourceGroupName, networkSecurityGroupName, securityRuleName) if err != nil { @@ -260,7 +255,7 @@ func (client SecurityRulesClient) GetPreparer(resourceGroupName string, networkS "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -294,8 +289,8 @@ func (client SecurityRulesClient) GetResponder(resp *http.Response) (result Secu // List gets all security rules in a network security group. // -// resourceGroupName is the name of the resource group. -// networkSecurityGroupName is the name of the network security group. +// resourceGroupName is the name of the resource group. networkSecurityGroupName is the name of the network security +// group. func (client SecurityRulesClient) List(resourceGroupName string, networkSecurityGroupName string) (result SecurityRuleListResult, err error) { req, err := client.ListPreparer(resourceGroupName, networkSecurityGroupName) if err != nil { @@ -326,7 +321,7 @@ func (client SecurityRulesClient) ListPreparer(resourceGroupName string, network "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -381,3 +376,48 @@ func (client SecurityRulesClient) ListNextResults(lastResults SecurityRuleListRe return } + +// ListComplete gets all elements from the list without paging. +func (client SecurityRulesClient) ListComplete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan SecurityRule, <-chan error) { + resultChan := make(chan SecurityRule) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName, networkSecurityGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go index 7d6ff882ae72..62a1789e591d 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,7 +23,7 @@ import ( "net/http" ) -// SubnetsClient is the composite Swagger for Network Client +// SubnetsClient is the network Client type SubnetsClient struct { ManagementClient } @@ -39,14 +38,12 @@ func NewSubnetsClientWithBaseURI(baseURI string, subscriptionID string) SubnetsC return SubnetsClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates a subnet in the specified virtual network. -// This method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and any +// CreateOrUpdate creates or updates a subnet in the specified virtual network. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any // outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. virtualNetworkName is -// the name of the virtual network. subnetName is the name of the subnet. -// subnetParameters is parameters supplied to the create or update subnet +// resourceGroupName is the name of the resource group. virtualNetworkName is the name of the virtual network. +// subnetName is the name of the subnet. subnetParameters is parameters supplied to the create or update subnet // operation. func (client SubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters Subnet, cancel <-chan struct{}) (<-chan Subnet, <-chan error) { resultChan := make(chan Subnet, 1) @@ -55,8 +52,10 @@ func (client SubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetw var err error var result Subnet defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -90,7 +89,7 @@ func (client SubnetsClient) CreateOrUpdatePreparer(resourceGroupName string, vir "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -126,12 +125,11 @@ func (client SubnetsClient) CreateOrUpdateResponder(resp *http.Response) (result return } -// Delete deletes the specified subnet. This method may poll for completion. -// Polling can be canceled by passing the cancel channel argument. The channel -// will be used to cancel polling and any outstanding HTTP requests. +// Delete deletes the specified subnet. This method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. virtualNetworkName is -// the name of the virtual network. subnetName is the name of the subnet. +// resourceGroupName is the name of the resource group. virtualNetworkName is the name of the virtual network. +// subnetName is the name of the subnet. func (client SubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -139,8 +137,10 @@ func (client SubnetsClient) Delete(resourceGroupName string, virtualNetworkName var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -174,7 +174,7 @@ func (client SubnetsClient) DeletePreparer(resourceGroupName string, virtualNetw "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -209,9 +209,8 @@ func (client SubnetsClient) DeleteResponder(resp *http.Response) (result autores // Get gets the specified subnet by virtual network and resource group. // -// resourceGroupName is the name of the resource group. virtualNetworkName is -// the name of the virtual network. subnetName is the name of the subnet. -// expand is expands referenced resources. +// resourceGroupName is the name of the resource group. virtualNetworkName is the name of the virtual network. +// subnetName is the name of the subnet. expand is expands referenced resources. func (client SubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result Subnet, err error) { req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, subnetName, expand) if err != nil { @@ -243,7 +242,7 @@ func (client SubnetsClient) GetPreparer(resourceGroupName string, virtualNetwork "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -280,8 +279,7 @@ func (client SubnetsClient) GetResponder(resp *http.Response) (result Subnet, er // List gets all subnets in a virtual network. // -// resourceGroupName is the name of the resource group. virtualNetworkName is -// the name of the virtual network. +// resourceGroupName is the name of the resource group. virtualNetworkName is the name of the virtual network. func (client SubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result SubnetListResult, err error) { req, err := client.ListPreparer(resourceGroupName, virtualNetworkName) if err != nil { @@ -312,7 +310,7 @@ func (client SubnetsClient) ListPreparer(resourceGroupName string, virtualNetwor "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -367,3 +365,48 @@ func (client SubnetsClient) ListNextResults(lastResults SubnetListResult) (resul return } + +// ListComplete gets all elements from the list without paging. +func (client SubnetsClient) ListComplete(resourceGroupName string, virtualNetworkName string, cancel <-chan struct{}) (<-chan Subnet, <-chan error) { + resultChan := make(chan Subnet) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName, virtualNetworkName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go index 34fa0df4b428..85c220065404 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -25,7 +24,7 @@ import ( "net/http" ) -// UsagesClient is the composite Swagger for Network Client +// UsagesClient is the network Client type UsagesClient struct { ManagementClient } @@ -40,7 +39,7 @@ func NewUsagesClientWithBaseURI(baseURI string, subscriptionID string) UsagesCli return UsagesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// List lists compute usages for a subscription. +// List list network usages for a subscription. // // location is the location where resource usage is queried. func (client UsagesClient) List(location string) (result UsagesListResult, err error) { @@ -78,7 +77,7 @@ func (client UsagesClient) ListPreparer(location string) (*http.Request, error) "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -133,3 +132,48 @@ func (client UsagesClient) ListNextResults(lastResults UsagesListResult) (result return } + +// ListComplete gets all elements from the list without paging. +func (client UsagesClient) ListComplete(location string, cancel <-chan struct{}) (<-chan Usage, <-chan error) { + resultChan := make(chan Usage) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(location) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go index 50b836179f85..7f510b4e4c6d 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go @@ -14,16 +14,15 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/v10.0.2-beta arm-network/" + return "Azure-SDK-For-Go/v11.0.0-beta arm-network/" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return "v10.0.2-beta" + return "v11.0.0-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go index 13e1392d2853..7db4d5de6c38 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -25,33 +24,29 @@ import ( "net/http" ) -// VirtualNetworkGatewayConnectionsClient is the composite Swagger for Network -// Client +// VirtualNetworkGatewayConnectionsClient is the network Client type VirtualNetworkGatewayConnectionsClient struct { ManagementClient } -// NewVirtualNetworkGatewayConnectionsClient creates an instance of the -// VirtualNetworkGatewayConnectionsClient client. +// NewVirtualNetworkGatewayConnectionsClient creates an instance of the VirtualNetworkGatewayConnectionsClient client. func NewVirtualNetworkGatewayConnectionsClient(subscriptionID string) VirtualNetworkGatewayConnectionsClient { return NewVirtualNetworkGatewayConnectionsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewVirtualNetworkGatewayConnectionsClientWithBaseURI creates an instance of -// the VirtualNetworkGatewayConnectionsClient client. +// NewVirtualNetworkGatewayConnectionsClientWithBaseURI creates an instance of the +// VirtualNetworkGatewayConnectionsClient client. func NewVirtualNetworkGatewayConnectionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkGatewayConnectionsClient { return VirtualNetworkGatewayConnectionsClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates a virtual network gateway connection in -// the specified resource group. This method may poll for completion. Polling -// can be canceled by passing the cancel channel argument. The channel will be -// used to cancel polling and any outstanding HTTP requests. +// CreateOrUpdate creates or updates a virtual network gateway connection in the specified resource group. This method +// may poll for completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to +// cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. -// virtualNetworkGatewayConnectionName is the name of the virtual network -// gateway connection. parameters is parameters supplied to the create or -// update virtual network gateway connection operation. +// resourceGroupName is the name of the resource group. virtualNetworkGatewayConnectionName is the name of the virtual +// network gateway connection. parameters is parameters supplied to the create or update virtual network gateway +// connection operation. func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VirtualNetworkGatewayConnection, cancel <-chan struct{}) (<-chan VirtualNetworkGatewayConnection, <-chan error) { resultChan := make(chan VirtualNetworkGatewayConnection, 1) errChan := make(chan error, 1) @@ -75,8 +70,10 @@ func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdate(resourceGrou var err error var result VirtualNetworkGatewayConnection defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -109,7 +106,7 @@ func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdatePreparer(reso "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -145,14 +142,12 @@ func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateResponder(res return } -// Delete deletes the specified virtual network Gateway connection. This method -// may poll for completion. Polling can be canceled by passing the cancel -// channel argument. The channel will be used to cancel polling and any -// outstanding HTTP requests. +// Delete deletes the specified virtual network Gateway connection. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. -// virtualNetworkGatewayConnectionName is the name of the virtual network -// gateway connection. +// resourceGroupName is the name of the resource group. virtualNetworkGatewayConnectionName is the name of the virtual +// network gateway connection. func (client VirtualNetworkGatewayConnectionsClient) Delete(resourceGroupName string, virtualNetworkGatewayConnectionName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -160,8 +155,10 @@ func (client VirtualNetworkGatewayConnectionsClient) Delete(resourceGroupName st var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -194,7 +191,7 @@ func (client VirtualNetworkGatewayConnectionsClient) DeletePreparer(resourceGrou "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -229,9 +226,8 @@ func (client VirtualNetworkGatewayConnectionsClient) DeleteResponder(resp *http. // Get gets the specified virtual network gateway connection by resource group. // -// resourceGroupName is the name of the resource group. -// virtualNetworkGatewayConnectionName is the name of the virtual network -// gateway connection. +// resourceGroupName is the name of the resource group. virtualNetworkGatewayConnectionName is the name of the virtual +// network gateway connection. func (client VirtualNetworkGatewayConnectionsClient) Get(resourceGroupName string, virtualNetworkGatewayConnectionName string) (result VirtualNetworkGatewayConnection, err error) { req, err := client.GetPreparer(resourceGroupName, virtualNetworkGatewayConnectionName) if err != nil { @@ -262,7 +258,7 @@ func (client VirtualNetworkGatewayConnectionsClient) GetPreparer(resourceGroupNa "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -294,13 +290,11 @@ func (client VirtualNetworkGatewayConnectionsClient) GetResponder(resp *http.Res return } -// GetSharedKey the Get VirtualNetworkGatewayConnectionSharedKey operation -// retrieves information about the specified virtual network gateway connection -// shared key through Network resource provider. +// GetSharedKey the Get VirtualNetworkGatewayConnectionSharedKey operation retrieves information about the specified +// virtual network gateway connection shared key through Network resource provider. // -// resourceGroupName is the name of the resource group. -// virtualNetworkGatewayConnectionName is the virtual network gateway -// connection shared key name. +// resourceGroupName is the name of the resource group. virtualNetworkGatewayConnectionName is the virtual network +// gateway connection shared key name. func (client VirtualNetworkGatewayConnectionsClient) GetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string) (result ConnectionSharedKey, err error) { req, err := client.GetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName) if err != nil { @@ -331,7 +325,7 @@ func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyPreparer(resour "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -363,8 +357,8 @@ func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyResponder(resp return } -// List the List VirtualNetworkGatewayConnections operation retrieves all the -// virtual network gateways connections created. +// List the List VirtualNetworkGatewayConnections operation retrieves all the virtual network gateways connections +// created. // // resourceGroupName is the name of the resource group. func (client VirtualNetworkGatewayConnectionsClient) List(resourceGroupName string) (result VirtualNetworkGatewayConnectionListResult, err error) { @@ -396,7 +390,7 @@ func (client VirtualNetworkGatewayConnectionsClient) ListPreparer(resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -452,18 +446,59 @@ func (client VirtualNetworkGatewayConnectionsClient) ListNextResults(lastResults return } -// ResetSharedKey the VirtualNetworkGatewayConnectionResetSharedKey operation -// resets the virtual network gateway connection shared key for passed virtual -// network gateway connection in the specified resource group through Network -// resource provider. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used to -// cancel polling and any outstanding HTTP requests. +// ListComplete gets all elements from the list without paging. +func (client VirtualNetworkGatewayConnectionsClient) ListComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan VirtualNetworkGatewayConnection, <-chan error) { + resultChan := make(chan VirtualNetworkGatewayConnection) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ResetSharedKey the VirtualNetworkGatewayConnectionResetSharedKey operation resets the virtual network gateway +// connection shared key for passed virtual network gateway connection in the specified resource group through Network +// resource provider. This method may poll for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. -// virtualNetworkGatewayConnectionName is the virtual network gateway -// connection reset shared key Name. parameters is parameters supplied to the -// begin reset virtual network gateway connection shared key operation through -// network resource provider. +// resourceGroupName is the name of the resource group. virtualNetworkGatewayConnectionName is the virtual network +// gateway connection reset shared key Name. parameters is parameters supplied to the begin reset virtual network +// gateway connection shared key operation through network resource provider. func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionResetSharedKey, cancel <-chan struct{}) (<-chan ConnectionResetSharedKey, <-chan error) { resultChan := make(chan ConnectionResetSharedKey, 1) errChan := make(chan error, 1) @@ -483,8 +518,10 @@ func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKey(resourceGrou var err error var result ConnectionResetSharedKey defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -517,7 +554,7 @@ func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyPreparer(reso "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -553,18 +590,14 @@ func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyResponder(res return } -// SetSharedKey the Put VirtualNetworkGatewayConnectionSharedKey operation sets -// the virtual network gateway connection shared key for passed virtual network -// gateway connection in the specified resource group through Network resource -// provider. This method may poll for completion. Polling can be canceled by -// passing the cancel channel argument. The channel will be used to cancel -// polling and any outstanding HTTP requests. +// SetSharedKey the Put VirtualNetworkGatewayConnectionSharedKey operation sets the virtual network gateway connection +// shared key for passed virtual network gateway connection in the specified resource group through Network resource +// provider. This method may poll for completion. Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. -// virtualNetworkGatewayConnectionName is the virtual network gateway -// connection name. parameters is parameters supplied to the Begin Set Virtual -// Network Gateway connection Shared key operation throughNetwork resource -// provider. +// resourceGroupName is the name of the resource group. virtualNetworkGatewayConnectionName is the virtual network +// gateway connection name. parameters is parameters supplied to the Begin Set Virtual Network Gateway connection +// Shared key operation throughNetwork resource provider. func (client VirtualNetworkGatewayConnectionsClient) SetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionSharedKey, cancel <-chan struct{}) (<-chan ConnectionSharedKey, <-chan error) { resultChan := make(chan ConnectionSharedKey, 1) errChan := make(chan error, 1) @@ -581,8 +614,10 @@ func (client VirtualNetworkGatewayConnectionsClient) SetSharedKey(resourceGroupN var err error var result ConnectionSharedKey defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -615,7 +650,7 @@ func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyPreparer(resour "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go index 720978e83ff8..5f56a204badf 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -25,32 +24,27 @@ import ( "net/http" ) -// VirtualNetworkGatewaysClient is the composite Swagger for Network Client +// VirtualNetworkGatewaysClient is the network Client type VirtualNetworkGatewaysClient struct { ManagementClient } -// NewVirtualNetworkGatewaysClient creates an instance of the -// VirtualNetworkGatewaysClient client. +// NewVirtualNetworkGatewaysClient creates an instance of the VirtualNetworkGatewaysClient client. func NewVirtualNetworkGatewaysClient(subscriptionID string) VirtualNetworkGatewaysClient { return NewVirtualNetworkGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewVirtualNetworkGatewaysClientWithBaseURI creates an instance of the -// VirtualNetworkGatewaysClient client. +// NewVirtualNetworkGatewaysClientWithBaseURI creates an instance of the VirtualNetworkGatewaysClient client. func NewVirtualNetworkGatewaysClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkGatewaysClient { return VirtualNetworkGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates a virtual network gateway in the specified -// resource group. This method may poll for completion. Polling can be canceled -// by passing the cancel channel argument. The channel will be used to cancel +// CreateOrUpdate creates or updates a virtual network gateway in the specified resource group. This method may poll +// for completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel // polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. -// virtualNetworkGatewayName is the name of the virtual network gateway. -// parameters is parameters supplied to create or update virtual network -// gateway operation. +// resourceGroupName is the name of the resource group. virtualNetworkGatewayName is the name of the virtual network +// gateway. parameters is parameters supplied to create or update virtual network gateway operation. func (client VirtualNetworkGatewaysClient) CreateOrUpdate(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway, cancel <-chan struct{}) (<-chan VirtualNetworkGateway, <-chan error) { resultChan := make(chan VirtualNetworkGateway, 1) errChan := make(chan error, 1) @@ -67,8 +61,10 @@ func (client VirtualNetworkGatewaysClient) CreateOrUpdate(resourceGroupName stri var err error var result VirtualNetworkGateway defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -101,7 +97,7 @@ func (client VirtualNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupN "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -137,13 +133,12 @@ func (client VirtualNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Re return } -// Delete deletes the specified virtual network gateway. This method may poll -// for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// Delete deletes the specified virtual network gateway. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. -// virtualNetworkGatewayName is the name of the virtual network gateway. +// resourceGroupName is the name of the resource group. virtualNetworkGatewayName is the name of the virtual network +// gateway. func (client VirtualNetworkGatewaysClient) Delete(resourceGroupName string, virtualNetworkGatewayName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -151,8 +146,10 @@ func (client VirtualNetworkGatewaysClient) Delete(resourceGroupName string, virt var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -185,7 +182,7 @@ func (client VirtualNetworkGatewaysClient) DeletePreparer(resourceGroupName stri "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -218,44 +215,56 @@ func (client VirtualNetworkGatewaysClient) DeleteResponder(resp *http.Response) return } -// Generatevpnclientpackage generates VPN client package for P2S client of the -// virtual network gateway in the specified resource group. +// Generatevpnclientpackage generates VPN client package for P2S client of the virtual network gateway in the specified +// resource group. This method may poll for completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. -// virtualNetworkGatewayName is the name of the virtual network gateway. -// parameters is parameters supplied to the generate virtual network gateway -// VPN client package operation. -func (client VirtualNetworkGatewaysClient) Generatevpnclientpackage(resourceGroupName string, virtualNetworkGatewayName string, parameters VpnClientParameters) (result String, err error) { - req, err := client.GeneratevpnclientpackagePreparer(resourceGroupName, virtualNetworkGatewayName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", nil, "Failure preparing request") - return - } - - resp, err := client.GeneratevpnclientpackageSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", resp, "Failure sending request") - return - } +// resourceGroupName is the name of the resource group. virtualNetworkGatewayName is the name of the virtual network +// gateway. parameters is parameters supplied to the generate virtual network gateway VPN client package operation. +func (client VirtualNetworkGatewaysClient) Generatevpnclientpackage(resourceGroupName string, virtualNetworkGatewayName string, parameters VpnClientParameters, cancel <-chan struct{}) (<-chan String, <-chan error) { + resultChan := make(chan String, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result String + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.GeneratevpnclientpackagePreparer(resourceGroupName, virtualNetworkGatewayName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", nil, "Failure preparing request") + return + } - result, err = client.GeneratevpnclientpackageResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", resp, "Failure responding to request") - } + resp, err := client.GeneratevpnclientpackageSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", resp, "Failure sending request") + return + } - return + result, err = client.GeneratevpnclientpackageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // GeneratevpnclientpackagePreparer prepares the Generatevpnclientpackage request. -func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackagePreparer(resourceGroupName string, virtualNetworkGatewayName string, parameters VpnClientParameters) (*http.Request, error) { +func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackagePreparer(resourceGroupName string, virtualNetworkGatewayName string, parameters VpnClientParameters, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -267,18 +276,106 @@ func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackagePreparer(reso autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage", pathParameters), autorest.WithJSON(parameters), autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare(&http.Request{}) + return preparer.Prepare(&http.Request{Cancel: cancel}) } // GeneratevpnclientpackageSender sends the Generatevpnclientpackage request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } // GeneratevpnclientpackageResponder handles the response to the Generatevpnclientpackage request. The method always // closes the http.Response Body. func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageResponder(resp *http.Response) (result String, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GenerateVpnProfile generates VPN profile for P2S client of the virtual network gateway in the specified resource +// group. Used for IKEV2 and radius based authentication. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group. virtualNetworkGatewayName is the name of the virtual network +// gateway. parameters is parameters supplied to the generate virtual network gateway VPN client package operation. +func (client VirtualNetworkGatewaysClient) GenerateVpnProfile(resourceGroupName string, virtualNetworkGatewayName string, parameters VpnClientParameters, cancel <-chan struct{}) (<-chan String, <-chan error) { + resultChan := make(chan String, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result String + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.GenerateVpnProfilePreparer(resourceGroupName, virtualNetworkGatewayName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GenerateVpnProfile", nil, "Failure preparing request") + return + } + + resp, err := client.GenerateVpnProfileSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GenerateVpnProfile", resp, "Failure sending request") + return + } + + result, err = client.GenerateVpnProfileResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GenerateVpnProfile", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// GenerateVpnProfilePreparer prepares the GenerateVpnProfile request. +func (client VirtualNetworkGatewaysClient) GenerateVpnProfilePreparer(resourceGroupName string, virtualNetworkGatewayName string, parameters VpnClientParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnprofile", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GenerateVpnProfileSender sends the GenerateVpnProfile request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) GenerateVpnProfileSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GenerateVpnProfileResponder handles the response to the GenerateVpnProfile request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) GenerateVpnProfileResponder(resp *http.Response) (result String, err error) { err = autorest.Respond( resp, client.ByInspecting(), @@ -291,8 +388,8 @@ func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageResponder(res // Get gets the specified virtual network gateway by resource group. // -// resourceGroupName is the name of the resource group. -// virtualNetworkGatewayName is the name of the virtual network gateway. +// resourceGroupName is the name of the resource group. virtualNetworkGatewayName is the name of the virtual network +// gateway. func (client VirtualNetworkGatewaysClient) Get(resourceGroupName string, virtualNetworkGatewayName string) (result VirtualNetworkGateway, err error) { req, err := client.GetPreparer(resourceGroupName, virtualNetworkGatewayName) if err != nil { @@ -323,7 +420,7 @@ func (client VirtualNetworkGatewaysClient) GetPreparer(resourceGroupName string, "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -355,15 +452,12 @@ func (client VirtualNetworkGatewaysClient) GetResponder(resp *http.Response) (re return } -// GetAdvertisedRoutes this operation retrieves a list of routes the virtual -// network gateway is advertising to the specified peer. This method may poll -// for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// GetAdvertisedRoutes this operation retrieves a list of routes the virtual network gateway is advertising to the +// specified peer. This method may poll for completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. -// virtualNetworkGatewayName is the name of the virtual network gateway. peer -// is the IP address of the peer +// resourceGroupName is the name of the resource group. virtualNetworkGatewayName is the name of the virtual network +// gateway. peer is the IP address of the peer func (client VirtualNetworkGatewaysClient) GetAdvertisedRoutes(resourceGroupName string, virtualNetworkGatewayName string, peer string, cancel <-chan struct{}) (<-chan GatewayRouteListResult, <-chan error) { resultChan := make(chan GatewayRouteListResult, 1) errChan := make(chan error, 1) @@ -371,8 +465,10 @@ func (client VirtualNetworkGatewaysClient) GetAdvertisedRoutes(resourceGroupName var err error var result GatewayRouteListResult defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -405,7 +501,7 @@ func (client VirtualNetworkGatewaysClient) GetAdvertisedRoutesPreparer(resourceG "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, "peer": autorest.Encode("query", peer), @@ -440,14 +536,12 @@ func (client VirtualNetworkGatewaysClient) GetAdvertisedRoutesResponder(resp *ht return } -// GetBgpPeerStatus the GetBgpPeerStatus operation retrieves the status of all -// BGP peers. This method may poll for completion. Polling can be canceled by -// passing the cancel channel argument. The channel will be used to cancel +// GetBgpPeerStatus the GetBgpPeerStatus operation retrieves the status of all BGP peers. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel // polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. -// virtualNetworkGatewayName is the name of the virtual network gateway. peer -// is the IP address of the peer to retrieve the status of. +// resourceGroupName is the name of the resource group. virtualNetworkGatewayName is the name of the virtual network +// gateway. peer is the IP address of the peer to retrieve the status of. func (client VirtualNetworkGatewaysClient) GetBgpPeerStatus(resourceGroupName string, virtualNetworkGatewayName string, peer string, cancel <-chan struct{}) (<-chan BgpPeerStatusListResult, <-chan error) { resultChan := make(chan BgpPeerStatusListResult, 1) errChan := make(chan error, 1) @@ -455,8 +549,10 @@ func (client VirtualNetworkGatewaysClient) GetBgpPeerStatus(resourceGroupName st var err error var result BgpPeerStatusListResult defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -489,7 +585,7 @@ func (client VirtualNetworkGatewaysClient) GetBgpPeerStatusPreparer(resourceGrou "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -526,14 +622,12 @@ func (client VirtualNetworkGatewaysClient) GetBgpPeerStatusResponder(resp *http. return } -// GetLearnedRoutes this operation retrieves a list of routes the virtual -// network gateway has learned, including routes learned from BGP peers. This -// method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and any -// outstanding HTTP requests. +// GetLearnedRoutes this operation retrieves a list of routes the virtual network gateway has learned, including routes +// learned from BGP peers. This method may poll for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. -// virtualNetworkGatewayName is the name of the virtual network gateway. +// resourceGroupName is the name of the resource group. virtualNetworkGatewayName is the name of the virtual network +// gateway. func (client VirtualNetworkGatewaysClient) GetLearnedRoutes(resourceGroupName string, virtualNetworkGatewayName string, cancel <-chan struct{}) (<-chan GatewayRouteListResult, <-chan error) { resultChan := make(chan GatewayRouteListResult, 1) errChan := make(chan error, 1) @@ -541,8 +635,10 @@ func (client VirtualNetworkGatewaysClient) GetLearnedRoutes(resourceGroupName st var err error var result GatewayRouteListResult defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -575,7 +671,7 @@ func (client VirtualNetworkGatewaysClient) GetLearnedRoutesPreparer(resourceGrou "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -609,6 +705,90 @@ func (client VirtualNetworkGatewaysClient) GetLearnedRoutesResponder(resp *http. return } +// GetVpnProfilePackageURL gets pre-generated VPN profile for P2S client of the virtual network gateway in the +// specified resource group. The profile needs to be generated first using generateVpnProfile. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. virtualNetworkGatewayName is the name of the virtual network +// gateway. +func (client VirtualNetworkGatewaysClient) GetVpnProfilePackageURL(resourceGroupName string, virtualNetworkGatewayName string, cancel <-chan struct{}) (<-chan String, <-chan error) { + resultChan := make(chan String, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result String + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.GetVpnProfilePackageURLPreparer(resourceGroupName, virtualNetworkGatewayName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetVpnProfilePackageURL", nil, "Failure preparing request") + return + } + + resp, err := client.GetVpnProfilePackageURLSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetVpnProfilePackageURL", resp, "Failure sending request") + return + } + + result, err = client.GetVpnProfilePackageURLResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetVpnProfilePackageURL", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// GetVpnProfilePackageURLPreparer prepares the GetVpnProfilePackageURL request. +func (client VirtualNetworkGatewaysClient) GetVpnProfilePackageURLPreparer(resourceGroupName string, virtualNetworkGatewayName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnprofilepackageurl", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GetVpnProfilePackageURLSender sends the GetVpnProfilePackageURL request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) GetVpnProfilePackageURLSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GetVpnProfilePackageURLResponder handles the response to the GetVpnProfilePackageURL request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) GetVpnProfilePackageURLResponder(resp *http.Response) (result String, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + // List gets all virtual network gateways by resource group. // // resourceGroupName is the name of the resource group. @@ -641,7 +821,7 @@ func (client VirtualNetworkGatewaysClient) ListPreparer(resourceGroupName string "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -697,15 +877,193 @@ func (client VirtualNetworkGatewaysClient) ListNextResults(lastResults VirtualNe return } -// Reset resets the primary of the virtual network gateway in the specified -// resource group. This method may poll for completion. Polling can be canceled -// by passing the cancel channel argument. The channel will be used to cancel +// ListComplete gets all elements from the list without paging. +func (client VirtualNetworkGatewaysClient) ListComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan VirtualNetworkGateway, <-chan error) { + resultChan := make(chan VirtualNetworkGateway) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListConnections gets all the connections in a virtual network gateway. +// +// resourceGroupName is the name of the resource group. virtualNetworkGatewayName is the name of the virtual network +// gateway. +func (client VirtualNetworkGatewaysClient) ListConnections(resourceGroupName string, virtualNetworkGatewayName string) (result VirtualNetworkGatewayListConnectionsResult, err error) { + req, err := client.ListConnectionsPreparer(resourceGroupName, virtualNetworkGatewayName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "ListConnections", nil, "Failure preparing request") + return + } + + resp, err := client.ListConnectionsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "ListConnections", resp, "Failure sending request") + return + } + + result, err = client.ListConnectionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "ListConnections", resp, "Failure responding to request") + } + + return +} + +// ListConnectionsPreparer prepares the ListConnections request. +func (client VirtualNetworkGatewaysClient) ListConnectionsPreparer(resourceGroupName string, virtualNetworkGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/connections", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListConnectionsSender sends the ListConnections request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) ListConnectionsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListConnectionsResponder handles the response to the ListConnections request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) ListConnectionsResponder(resp *http.Response) (result VirtualNetworkGatewayListConnectionsResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListConnectionsNextResults retrieves the next set of results, if any. +func (client VirtualNetworkGatewaysClient) ListConnectionsNextResults(lastResults VirtualNetworkGatewayListConnectionsResult) (result VirtualNetworkGatewayListConnectionsResult, err error) { + req, err := lastResults.VirtualNetworkGatewayListConnectionsResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "ListConnections", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListConnectionsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "ListConnections", resp, "Failure sending next results request") + } + + result, err = client.ListConnectionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "ListConnections", resp, "Failure responding to next results request") + } + + return +} + +// ListConnectionsComplete gets all elements from the list without paging. +func (client VirtualNetworkGatewaysClient) ListConnectionsComplete(resourceGroupName string, virtualNetworkGatewayName string, cancel <-chan struct{}) (<-chan VirtualNetworkGatewayConnectionListEntity, <-chan error) { + resultChan := make(chan VirtualNetworkGatewayConnectionListEntity) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListConnections(resourceGroupName, virtualNetworkGatewayName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListConnectionsNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// Reset resets the primary of the virtual network gateway in the specified resource group. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel // polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. -// virtualNetworkGatewayName is the name of the virtual network gateway. -// gatewayVip is virtual network gateway vip address supplied to the begin -// reset of the active-active feature enabled gateway. +// resourceGroupName is the name of the resource group. virtualNetworkGatewayName is the name of the virtual network +// gateway. gatewayVip is virtual network gateway vip address supplied to the begin reset of the active-active feature +// enabled gateway. func (client VirtualNetworkGatewaysClient) Reset(resourceGroupName string, virtualNetworkGatewayName string, gatewayVip string, cancel <-chan struct{}) (<-chan VirtualNetworkGateway, <-chan error) { resultChan := make(chan VirtualNetworkGateway, 1) errChan := make(chan error, 1) @@ -713,8 +1071,10 @@ func (client VirtualNetworkGatewaysClient) Reset(resourceGroupName string, virtu var err error var result VirtualNetworkGateway defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -747,7 +1107,7 @@ func (client VirtualNetworkGatewaysClient) ResetPreparer(resourceGroupName strin "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -783,3 +1143,138 @@ func (client VirtualNetworkGatewaysClient) ResetResponder(resp *http.Response) ( result.Response = autorest.Response{Response: resp} return } + +// SupportedVpnDevices gets a xml format representation for supported vpn devices. +// +// resourceGroupName is the name of the resource group. virtualNetworkGatewayName is the name of the virtual network +// gateway. +func (client VirtualNetworkGatewaysClient) SupportedVpnDevices(resourceGroupName string, virtualNetworkGatewayName string) (result String, err error) { + req, err := client.SupportedVpnDevicesPreparer(resourceGroupName, virtualNetworkGatewayName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "SupportedVpnDevices", nil, "Failure preparing request") + return + } + + resp, err := client.SupportedVpnDevicesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "SupportedVpnDevices", resp, "Failure sending request") + return + } + + result, err = client.SupportedVpnDevicesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "SupportedVpnDevices", resp, "Failure responding to request") + } + + return +} + +// SupportedVpnDevicesPreparer prepares the SupportedVpnDevices request. +func (client VirtualNetworkGatewaysClient) SupportedVpnDevicesPreparer(resourceGroupName string, virtualNetworkGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/supportedvpndevices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// SupportedVpnDevicesSender sends the SupportedVpnDevices request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) SupportedVpnDevicesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// SupportedVpnDevicesResponder handles the response to the SupportedVpnDevices request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) SupportedVpnDevicesResponder(resp *http.Response) (result String, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// VpnDeviceConfigurationScript gets a xml format representation for vpn device configuration script. +// +// resourceGroupName is the name of the resource group. virtualNetworkGatewayConnectionName is the name of the virtual +// network gateway connection for which the configuration script is generated. parameters is parameters supplied to the +// generate vpn device script operation. +func (client VirtualNetworkGatewaysClient) VpnDeviceConfigurationScript(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VpnDeviceScriptParameters) (result String, err error) { + req, err := client.VpnDeviceConfigurationScriptPreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "VpnDeviceConfigurationScript", nil, "Failure preparing request") + return + } + + resp, err := client.VpnDeviceConfigurationScriptSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "VpnDeviceConfigurationScript", resp, "Failure sending request") + return + } + + result, err = client.VpnDeviceConfigurationScriptResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "VpnDeviceConfigurationScript", resp, "Failure responding to request") + } + + return +} + +// VpnDeviceConfigurationScriptPreparer prepares the VpnDeviceConfigurationScript request. +func (client VirtualNetworkGatewaysClient) VpnDeviceConfigurationScriptPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VpnDeviceScriptParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/vpndeviceconfigurationscript", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// VpnDeviceConfigurationScriptSender sends the VpnDeviceConfigurationScript request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) VpnDeviceConfigurationScriptSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// VpnDeviceConfigurationScriptResponder handles the response to the VpnDeviceConfigurationScript request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) VpnDeviceConfigurationScriptResponder(resp *http.Response) (result String, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go index 27fafdfd0a46..0a945c1bb8d7 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,31 +23,27 @@ import ( "net/http" ) -// VirtualNetworkPeeringsClient is the composite Swagger for Network Client +// VirtualNetworkPeeringsClient is the network Client type VirtualNetworkPeeringsClient struct { ManagementClient } -// NewVirtualNetworkPeeringsClient creates an instance of the -// VirtualNetworkPeeringsClient client. +// NewVirtualNetworkPeeringsClient creates an instance of the VirtualNetworkPeeringsClient client. func NewVirtualNetworkPeeringsClient(subscriptionID string) VirtualNetworkPeeringsClient { return NewVirtualNetworkPeeringsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewVirtualNetworkPeeringsClientWithBaseURI creates an instance of the -// VirtualNetworkPeeringsClient client. +// NewVirtualNetworkPeeringsClientWithBaseURI creates an instance of the VirtualNetworkPeeringsClient client. func NewVirtualNetworkPeeringsClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkPeeringsClient { return VirtualNetworkPeeringsClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates a peering in the specified virtual -// network. This method may poll for completion. Polling can be canceled by -// passing the cancel channel argument. The channel will be used to cancel -// polling and any outstanding HTTP requests. +// CreateOrUpdate creates or updates a peering in the specified virtual network. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. virtualNetworkName is -// the name of the virtual network. virtualNetworkPeeringName is the name of -// the peering. virtualNetworkPeeringParameters is parameters supplied to the +// resourceGroupName is the name of the resource group. virtualNetworkName is the name of the virtual network. +// virtualNetworkPeeringName is the name of the peering. virtualNetworkPeeringParameters is parameters supplied to the // create or update virtual network peering operation. func (client VirtualNetworkPeeringsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, virtualNetworkPeeringParameters VirtualNetworkPeering, cancel <-chan struct{}) (<-chan VirtualNetworkPeering, <-chan error) { resultChan := make(chan VirtualNetworkPeering, 1) @@ -57,8 +52,10 @@ func (client VirtualNetworkPeeringsClient) CreateOrUpdate(resourceGroupName stri var err error var result VirtualNetworkPeering defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -92,7 +89,7 @@ func (client VirtualNetworkPeeringsClient) CreateOrUpdatePreparer(resourceGroupN "virtualNetworkPeeringName": autorest.Encode("path", virtualNetworkPeeringName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -128,14 +125,12 @@ func (client VirtualNetworkPeeringsClient) CreateOrUpdateResponder(resp *http.Re return } -// Delete deletes the specified virtual network peering. This method may poll -// for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// Delete deletes the specified virtual network peering. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. virtualNetworkName is -// the name of the virtual network. virtualNetworkPeeringName is the name of -// the virtual network peering. +// resourceGroupName is the name of the resource group. virtualNetworkName is the name of the virtual network. +// virtualNetworkPeeringName is the name of the virtual network peering. func (client VirtualNetworkPeeringsClient) Delete(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -143,8 +138,10 @@ func (client VirtualNetworkPeeringsClient) Delete(resourceGroupName string, virt var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -178,7 +175,7 @@ func (client VirtualNetworkPeeringsClient) DeletePreparer(resourceGroupName stri "virtualNetworkPeeringName": autorest.Encode("path", virtualNetworkPeeringName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -213,9 +210,8 @@ func (client VirtualNetworkPeeringsClient) DeleteResponder(resp *http.Response) // Get gets the specified virtual network peering. // -// resourceGroupName is the name of the resource group. virtualNetworkName is -// the name of the virtual network. virtualNetworkPeeringName is the name of -// the virtual network peering. +// resourceGroupName is the name of the resource group. virtualNetworkName is the name of the virtual network. +// virtualNetworkPeeringName is the name of the virtual network peering. func (client VirtualNetworkPeeringsClient) Get(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string) (result VirtualNetworkPeering, err error) { req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, virtualNetworkPeeringName) if err != nil { @@ -247,7 +243,7 @@ func (client VirtualNetworkPeeringsClient) GetPreparer(resourceGroupName string, "virtualNetworkPeeringName": autorest.Encode("path", virtualNetworkPeeringName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -281,8 +277,7 @@ func (client VirtualNetworkPeeringsClient) GetResponder(resp *http.Response) (re // List gets all virtual network peerings in a virtual network. // -// resourceGroupName is the name of the resource group. virtualNetworkName is -// the name of the virtual network. +// resourceGroupName is the name of the resource group. virtualNetworkName is the name of the virtual network. func (client VirtualNetworkPeeringsClient) List(resourceGroupName string, virtualNetworkName string) (result VirtualNetworkPeeringListResult, err error) { req, err := client.ListPreparer(resourceGroupName, virtualNetworkName) if err != nil { @@ -313,7 +308,7 @@ func (client VirtualNetworkPeeringsClient) ListPreparer(resourceGroupName string "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -368,3 +363,48 @@ func (client VirtualNetworkPeeringsClient) ListNextResults(lastResults VirtualNe return } + +// ListComplete gets all elements from the list without paging. +func (client VirtualNetworkPeeringsClient) ListComplete(resourceGroupName string, virtualNetworkName string, cancel <-chan struct{}) (<-chan VirtualNetworkPeering, <-chan error) { + resultChan := make(chan VirtualNetworkPeering) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName, virtualNetworkName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go index eab048c7252e..58ac8ab11229 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -24,29 +23,25 @@ import ( "net/http" ) -// VirtualNetworksClient is the composite Swagger for Network Client +// VirtualNetworksClient is the network Client type VirtualNetworksClient struct { ManagementClient } -// NewVirtualNetworksClient creates an instance of the VirtualNetworksClient -// client. +// NewVirtualNetworksClient creates an instance of the VirtualNetworksClient client. func NewVirtualNetworksClient(subscriptionID string) VirtualNetworksClient { return NewVirtualNetworksClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewVirtualNetworksClientWithBaseURI creates an instance of the -// VirtualNetworksClient client. +// NewVirtualNetworksClientWithBaseURI creates an instance of the VirtualNetworksClient client. func NewVirtualNetworksClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworksClient { return VirtualNetworksClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CheckIPAddressAvailability checks whether a private IP address is available -// for use. +// CheckIPAddressAvailability checks whether a private IP address is available for use. // -// resourceGroupName is the name of the resource group. virtualNetworkName is -// the name of the virtual network. IPAddress is the private IP address to be -// verified. +// resourceGroupName is the name of the resource group. virtualNetworkName is the name of the virtual network. +// IPAddress is the private IP address to be verified. func (client VirtualNetworksClient) CheckIPAddressAvailability(resourceGroupName string, virtualNetworkName string, IPAddress string) (result IPAddressAvailabilityResult, err error) { req, err := client.CheckIPAddressAvailabilityPreparer(resourceGroupName, virtualNetworkName, IPAddress) if err != nil { @@ -77,7 +72,7 @@ func (client VirtualNetworksClient) CheckIPAddressAvailabilityPreparer(resourceG "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -112,14 +107,12 @@ func (client VirtualNetworksClient) CheckIPAddressAvailabilityResponder(resp *ht return } -// CreateOrUpdate creates or updates a virtual network in the specified -// resource group. This method may poll for completion. Polling can be canceled -// by passing the cancel channel argument. The channel will be used to cancel +// CreateOrUpdate creates or updates a virtual network in the specified resource group. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel // polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. virtualNetworkName is -// the name of the virtual network. parameters is parameters supplied to the -// create or update virtual network operation +// resourceGroupName is the name of the resource group. virtualNetworkName is the name of the virtual network. +// parameters is parameters supplied to the create or update virtual network operation func (client VirtualNetworksClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork, cancel <-chan struct{}) (<-chan VirtualNetwork, <-chan error) { resultChan := make(chan VirtualNetwork, 1) errChan := make(chan error, 1) @@ -127,8 +120,10 @@ func (client VirtualNetworksClient) CreateOrUpdate(resourceGroupName string, vir var err error var result VirtualNetwork defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -161,7 +156,7 @@ func (client VirtualNetworksClient) CreateOrUpdatePreparer(resourceGroupName str "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -197,13 +192,10 @@ func (client VirtualNetworksClient) CreateOrUpdateResponder(resp *http.Response) return } -// Delete deletes the specified virtual network. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// Delete deletes the specified virtual network. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. virtualNetworkName is -// the name of the virtual network. +// resourceGroupName is the name of the resource group. virtualNetworkName is the name of the virtual network. func (client VirtualNetworksClient) Delete(resourceGroupName string, virtualNetworkName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -211,8 +203,10 @@ func (client VirtualNetworksClient) Delete(resourceGroupName string, virtualNetw var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -245,7 +239,7 @@ func (client VirtualNetworksClient) DeletePreparer(resourceGroupName string, vir "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -280,8 +274,8 @@ func (client VirtualNetworksClient) DeleteResponder(resp *http.Response) (result // Get gets the specified virtual network by resource group. // -// resourceGroupName is the name of the resource group. virtualNetworkName is -// the name of the virtual network. expand is expands referenced resources. +// resourceGroupName is the name of the resource group. virtualNetworkName is the name of the virtual network. expand +// is expands referenced resources. func (client VirtualNetworksClient) Get(resourceGroupName string, virtualNetworkName string, expand string) (result VirtualNetwork, err error) { req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, expand) if err != nil { @@ -312,7 +306,7 @@ func (client VirtualNetworksClient) GetPreparer(resourceGroupName string, virtua "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -379,7 +373,7 @@ func (client VirtualNetworksClient) ListPreparer(resourceGroupName string) (*htt "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -435,6 +429,51 @@ func (client VirtualNetworksClient) ListNextResults(lastResults VirtualNetworkLi return } +// ListComplete gets all elements from the list without paging. +func (client VirtualNetworksClient) ListComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan VirtualNetwork, <-chan error) { + resultChan := make(chan VirtualNetwork) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.List(resourceGroupName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + // ListAll gets all virtual networks in a subscription. func (client VirtualNetworksClient) ListAll() (result VirtualNetworkListResult, err error) { req, err := client.ListAllPreparer() @@ -464,7 +503,7 @@ func (client VirtualNetworksClient) ListAllPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -519,3 +558,182 @@ func (client VirtualNetworksClient) ListAllNextResults(lastResults VirtualNetwor return } + +// ListAllComplete gets all elements from the list without paging. +func (client VirtualNetworksClient) ListAllComplete(cancel <-chan struct{}) (<-chan VirtualNetwork, <-chan error) { + resultChan := make(chan VirtualNetwork) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListAll() + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListAllNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} + +// ListUsage lists usage stats. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is the name of the virtual network. +func (client VirtualNetworksClient) ListUsage(resourceGroupName string, virtualNetworkName string) (result VirtualNetworkListUsageResult, err error) { + req, err := client.ListUsagePreparer(resourceGroupName, virtualNetworkName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListUsage", nil, "Failure preparing request") + return + } + + resp, err := client.ListUsageSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListUsage", resp, "Failure sending request") + return + } + + result, err = client.ListUsageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListUsage", resp, "Failure responding to request") + } + + return +} + +// ListUsagePreparer prepares the ListUsage request. +func (client VirtualNetworksClient) ListUsagePreparer(resourceGroupName string, virtualNetworkName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/usages", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListUsageSender sends the ListUsage request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) ListUsageSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListUsageResponder handles the response to the ListUsage request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) ListUsageResponder(resp *http.Response) (result VirtualNetworkListUsageResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListUsageNextResults retrieves the next set of results, if any. +func (client VirtualNetworksClient) ListUsageNextResults(lastResults VirtualNetworkListUsageResult) (result VirtualNetworkListUsageResult, err error) { + req, err := lastResults.VirtualNetworkListUsageResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListUsage", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListUsageSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListUsage", resp, "Failure sending next results request") + } + + result, err = client.ListUsageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListUsage", resp, "Failure responding to next results request") + } + + return +} + +// ListUsageComplete gets all elements from the list without paging. +func (client VirtualNetworksClient) ListUsageComplete(resourceGroupName string, virtualNetworkName string, cancel <-chan struct{}) (<-chan VirtualNetworkUsage, <-chan error) { + resultChan := make(chan VirtualNetworkUsage) + errChan := make(chan error, 1) + go func() { + defer func() { + close(resultChan) + close(errChan) + }() + list, err := client.ListUsage(resourceGroupName, virtualNetworkName) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + for list.NextLink != nil { + list, err = client.ListUsageNextResults(list) + if err != nil { + errChan <- err + return + } + if list.Value != nil { + for _, item := range *list.Value { + select { + case <-cancel: + return + case resultChan <- item: + // Intentionally left blank + } + } + } + } + }() + return resultChan, errChan +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/watchers.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/watchers.go index 28febf847484..798d8c1bb221 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/watchers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/watchers.go @@ -14,9 +14,8 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -25,7 +24,7 @@ import ( "net/http" ) -// WatchersClient is the composite Swagger for Network Client +// WatchersClient is the network Client type WatchersClient struct { ManagementClient } @@ -35,18 +34,112 @@ func NewWatchersClient(subscriptionID string) WatchersClient { return NewWatchersClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewWatchersClientWithBaseURI creates an instance of the WatchersClient -// client. +// NewWatchersClientWithBaseURI creates an instance of the WatchersClient client. func NewWatchersClientWithBaseURI(baseURI string, subscriptionID string) WatchersClient { return WatchersClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates a network watcher in the specified -// resource group. +// CheckConnectivity verifies the possibility of establishing a direct TCP connection from a virtual machine to a given +// endpoint including another VM or an arbitrary remote server. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the network watcher resource group. networkWatcherName is the name of the network +// watcher resource. parameters is parameters that determine how the connectivity check will be performed. +func (client WatchersClient) CheckConnectivity(resourceGroupName string, networkWatcherName string, parameters ConnectivityParameters, cancel <-chan struct{}) (<-chan ConnectivityInformation, <-chan error) { + resultChan := make(chan ConnectivityInformation, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Source", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.Source.ResourceID", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.Destination", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "network.WatchersClient", "CheckConnectivity") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result ConnectivityInformation + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.CheckConnectivityPreparer(resourceGroupName, networkWatcherName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "CheckConnectivity", nil, "Failure preparing request") + return + } + + resp, err := client.CheckConnectivitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.WatchersClient", "CheckConnectivity", resp, "Failure sending request") + return + } + + result, err = client.CheckConnectivityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "CheckConnectivity", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CheckConnectivityPreparer prepares the CheckConnectivity request. +func (client WatchersClient) CheckConnectivityPreparer(resourceGroupName string, networkWatcherName string, parameters ConnectivityParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectivityCheck", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CheckConnectivitySender sends the CheckConnectivity request. The method will close the +// http.Response Body if it receives an error. +func (client WatchersClient) CheckConnectivitySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CheckConnectivityResponder handles the response to the CheckConnectivity request. The method always +// closes the http.Response Body. +func (client WatchersClient) CheckConnectivityResponder(resp *http.Response) (result ConnectivityInformation, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdate creates or updates a network watcher in the specified resource group. // -// resourceGroupName is the name of the resource group. networkWatcherName is -// the name of the network watcher. parameters is parameters that define the -// network watcher resource. +// resourceGroupName is the name of the resource group. networkWatcherName is the name of the network watcher. +// parameters is parameters that define the network watcher resource. func (client WatchersClient) CreateOrUpdate(resourceGroupName string, networkWatcherName string, parameters Watcher) (result Watcher, err error) { req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkWatcherName, parameters) if err != nil { @@ -77,7 +170,7 @@ func (client WatchersClient) CreateOrUpdatePreparer(resourceGroupName string, ne "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -111,13 +204,11 @@ func (client WatchersClient) CreateOrUpdateResponder(resp *http.Response) (resul return } -// Delete deletes the specified network watcher resource. This method may poll -// for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// Delete deletes the specified network watcher resource. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. networkWatcherName is -// the name of the network watcher. +// resourceGroupName is the name of the resource group. networkWatcherName is the name of the network watcher. func (client WatchersClient) Delete(resourceGroupName string, networkWatcherName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { resultChan := make(chan autorest.Response, 1) errChan := make(chan error, 1) @@ -125,8 +216,10 @@ func (client WatchersClient) Delete(resourceGroupName string, networkWatcherName var err error var result autorest.Response defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -159,7 +252,7 @@ func (client WatchersClient) DeletePreparer(resourceGroupName string, networkWat "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -194,8 +287,7 @@ func (client WatchersClient) DeleteResponder(resp *http.Response) (result autore // Get gets the specified network watcher by resource group. // -// resourceGroupName is the name of the resource group. networkWatcherName is -// the name of the network watcher. +// resourceGroupName is the name of the resource group. networkWatcherName is the name of the network watcher. func (client WatchersClient) Get(resourceGroupName string, networkWatcherName string) (result Watcher, err error) { req, err := client.GetPreparer(resourceGroupName, networkWatcherName) if err != nil { @@ -226,7 +318,7 @@ func (client WatchersClient) GetPreparer(resourceGroupName string, networkWatche "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -258,14 +350,109 @@ func (client WatchersClient) GetResponder(resp *http.Response) (result Watcher, return } -// GetFlowLogStatus queries status of flow log on a specified resource. This -// method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and any +// GetAzureReachabilityReport gets the relative latency score for internet service providers from a specified location +// to Azure regions. This method may poll for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the network watcher resource group. networkWatcherName is the name of the network +// watcher resource. parameters is parameters that determine Azure reachability report configuration. +func (client WatchersClient) GetAzureReachabilityReport(resourceGroupName string, networkWatcherName string, parameters AzureReachabilityReportParameters, cancel <-chan struct{}) (<-chan AzureReachabilityReport, <-chan error) { + resultChan := make(chan AzureReachabilityReport, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.ProviderLocation", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ProviderLocation.Country", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.StartTime", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.EndTime", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "network.WatchersClient", "GetAzureReachabilityReport") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result AzureReachabilityReport + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.GetAzureReachabilityReportPreparer(resourceGroupName, networkWatcherName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetAzureReachabilityReport", nil, "Failure preparing request") + return + } + + resp, err := client.GetAzureReachabilityReportSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetAzureReachabilityReport", resp, "Failure sending request") + return + } + + result, err = client.GetAzureReachabilityReportResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetAzureReachabilityReport", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// GetAzureReachabilityReportPreparer prepares the GetAzureReachabilityReport request. +func (client WatchersClient) GetAzureReachabilityReportPreparer(resourceGroupName string, networkWatcherName string, parameters AzureReachabilityReportParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/azureReachabilityReport", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GetAzureReachabilityReportSender sends the GetAzureReachabilityReport request. The method will close the +// http.Response Body if it receives an error. +func (client WatchersClient) GetAzureReachabilityReportSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GetAzureReachabilityReportResponder handles the response to the GetAzureReachabilityReport request. The method always +// closes the http.Response Body. +func (client WatchersClient) GetAzureReachabilityReportResponder(resp *http.Response) (result AzureReachabilityReport, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetFlowLogStatus queries status of flow log on a specified resource. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any // outstanding HTTP requests. // -// resourceGroupName is the name of the network watcher resource group. -// networkWatcherName is the name of the network watcher resource. parameters -// is parameters that define a resource to query flow log status. +// resourceGroupName is the name of the network watcher resource group. networkWatcherName is the name of the network +// watcher resource. parameters is parameters that define a resource to query flow log status. func (client WatchersClient) GetFlowLogStatus(resourceGroupName string, networkWatcherName string, parameters FlowLogStatusParameters, cancel <-chan struct{}) (<-chan FlowLogInformation, <-chan error) { resultChan := make(chan FlowLogInformation, 1) errChan := make(chan error, 1) @@ -282,8 +469,10 @@ func (client WatchersClient) GetFlowLogStatus(resourceGroupName string, networkW var err error var result FlowLogInformation defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -316,7 +505,7 @@ func (client WatchersClient) GetFlowLogStatusPreparer(resourceGroupName string, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -352,14 +541,11 @@ func (client WatchersClient) GetFlowLogStatusResponder(resp *http.Response) (res return } -// GetNextHop gets the next hop from the specified VM. This method may poll for -// completion. Polling can be canceled by passing the cancel channel argument. -// The channel will be used to cancel polling and any outstanding HTTP -// requests. +// GetNextHop gets the next hop from the specified VM. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. networkWatcherName is -// the name of the network watcher. parameters is parameters that define the -// source and destination endpoint. +// resourceGroupName is the name of the resource group. networkWatcherName is the name of the network watcher. +// parameters is parameters that define the source and destination endpoint. func (client WatchersClient) GetNextHop(resourceGroupName string, networkWatcherName string, parameters NextHopParameters, cancel <-chan struct{}) (<-chan NextHopResult, <-chan error) { resultChan := make(chan NextHopResult, 1) errChan := make(chan error, 1) @@ -378,8 +564,10 @@ func (client WatchersClient) GetNextHop(resourceGroupName string, networkWatcher var err error var result NextHopResult defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -412,7 +600,7 @@ func (client WatchersClient) GetNextHopPreparer(resourceGroupName string, networ "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -450,9 +638,8 @@ func (client WatchersClient) GetNextHopResponder(resp *http.Response) (result Ne // GetTopology gets the current network topology by resource group. // -// resourceGroupName is the name of the resource group. networkWatcherName is -// the name of the network watcher. parameters is parameters that define the -// representation of topology. +// resourceGroupName is the name of the resource group. networkWatcherName is the name of the network watcher. +// parameters is parameters that define the representation of topology. func (client WatchersClient) GetTopology(resourceGroupName string, networkWatcherName string, parameters TopologyParameters) (result Topology, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: parameters, @@ -489,7 +676,7 @@ func (client WatchersClient) GetTopologyPreparer(resourceGroupName string, netwo "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -523,14 +710,12 @@ func (client WatchersClient) GetTopologyResponder(resp *http.Response) (result T return } -// GetTroubleshooting initiate troubleshooting on a specified resource This -// method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and any -// outstanding HTTP requests. +// GetTroubleshooting initiate troubleshooting on a specified resource This method may poll for completion. Polling can +// be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // -// resourceGroupName is the name of the resource group. networkWatcherName is -// the name of the network watcher resource. parameters is parameters that -// define the resource to troubleshoot. +// resourceGroupName is the name of the resource group. networkWatcherName is the name of the network watcher resource. +// parameters is parameters that define the resource to troubleshoot. func (client WatchersClient) GetTroubleshooting(resourceGroupName string, networkWatcherName string, parameters TroubleshootingParameters, cancel <-chan struct{}) (<-chan TroubleshootingResult, <-chan error) { resultChan := make(chan TroubleshootingResult, 1) errChan := make(chan error, 1) @@ -551,8 +736,10 @@ func (client WatchersClient) GetTroubleshooting(resourceGroupName string, networ var err error var result TroubleshootingResult defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -585,7 +772,7 @@ func (client WatchersClient) GetTroubleshootingPreparer(resourceGroupName string "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -621,14 +808,12 @@ func (client WatchersClient) GetTroubleshootingResponder(resp *http.Response) (r return } -// GetTroubleshootingResult get the last completed troubleshooting result on a -// specified resource This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used to -// cancel polling and any outstanding HTTP requests. +// GetTroubleshootingResult get the last completed troubleshooting result on a specified resource This method may poll +// for completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. networkWatcherName is -// the name of the network watcher resource. parameters is parameters that -// define the resource to query the troubleshooting result. +// resourceGroupName is the name of the resource group. networkWatcherName is the name of the network watcher resource. +// parameters is parameters that define the resource to query the troubleshooting result. func (client WatchersClient) GetTroubleshootingResult(resourceGroupName string, networkWatcherName string, parameters QueryTroubleshootingParameters, cancel <-chan struct{}) (<-chan TroubleshootingResult, <-chan error) { resultChan := make(chan TroubleshootingResult, 1) errChan := make(chan error, 1) @@ -645,8 +830,10 @@ func (client WatchersClient) GetTroubleshootingResult(resourceGroupName string, var err error var result TroubleshootingResult defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -679,7 +866,7 @@ func (client WatchersClient) GetTroubleshootingResultPreparer(resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -715,14 +902,12 @@ func (client WatchersClient) GetTroubleshootingResultResponder(resp *http.Respon return } -// GetVMSecurityRules gets the configured and effective security group rules on -// the specified VM. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used to -// cancel polling and any outstanding HTTP requests. +// GetVMSecurityRules gets the configured and effective security group rules on the specified VM. This method may poll +// for completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. networkWatcherName is -// the name of the network watcher. parameters is parameters that define the VM -// to check security groups for. +// resourceGroupName is the name of the resource group. networkWatcherName is the name of the network watcher. +// parameters is parameters that define the VM to check security groups for. func (client WatchersClient) GetVMSecurityRules(resourceGroupName string, networkWatcherName string, parameters SecurityGroupViewParameters, cancel <-chan struct{}) (<-chan SecurityGroupViewResult, <-chan error) { resultChan := make(chan SecurityGroupViewResult, 1) errChan := make(chan error, 1) @@ -739,8 +924,10 @@ func (client WatchersClient) GetVMSecurityRules(resourceGroupName string, networ var err error var result SecurityGroupViewResult defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -773,7 +960,7 @@ func (client WatchersClient) GetVMSecurityRulesPreparer(resourceGroupName string "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -841,7 +1028,7 @@ func (client WatchersClient) ListPreparer(resourceGroupName string) (*http.Reque "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -902,7 +1089,7 @@ func (client WatchersClient) ListAllPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -934,14 +1121,97 @@ func (client WatchersClient) ListAllResponder(resp *http.Response) (result Watch return } -// SetFlowLogConfiguration configures flow log on a specified resource. This -// method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and any +// ListAvailableProviders lists all available internet service providers for a specified Azure region. This method may +// poll for completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to +// cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the network watcher resource group. networkWatcherName is the name of the network +// watcher resource. parameters is parameters that scope the list of available providers. +func (client WatchersClient) ListAvailableProviders(resourceGroupName string, networkWatcherName string, parameters AvailableProvidersListParameters, cancel <-chan struct{}) (<-chan AvailableProvidersList, <-chan error) { + resultChan := make(chan AvailableProvidersList, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result AvailableProvidersList + defer func() { + if err != nil { + errChan <- err + } + resultChan <- result + close(resultChan) + close(errChan) + }() + req, err := client.ListAvailableProvidersPreparer(resourceGroupName, networkWatcherName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "ListAvailableProviders", nil, "Failure preparing request") + return + } + + resp, err := client.ListAvailableProvidersSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.WatchersClient", "ListAvailableProviders", resp, "Failure sending request") + return + } + + result, err = client.ListAvailableProvidersResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "ListAvailableProviders", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// ListAvailableProvidersPreparer prepares the ListAvailableProviders request. +func (client WatchersClient) ListAvailableProvidersPreparer(resourceGroupName string, networkWatcherName string, parameters AvailableProvidersListParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/availableProvidersList", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ListAvailableProvidersSender sends the ListAvailableProviders request. The method will close the +// http.Response Body if it receives an error. +func (client WatchersClient) ListAvailableProvidersSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ListAvailableProvidersResponder handles the response to the ListAvailableProviders request. The method always +// closes the http.Response Body. +func (client WatchersClient) ListAvailableProvidersResponder(resp *http.Response) (result AvailableProvidersList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// SetFlowLogConfiguration configures flow log on a specified resource. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any // outstanding HTTP requests. // -// resourceGroupName is the name of the network watcher resource group. -// networkWatcherName is the name of the network watcher resource. parameters -// is parameters that define the configuration of flow log. +// resourceGroupName is the name of the network watcher resource group. networkWatcherName is the name of the network +// watcher resource. parameters is parameters that define the configuration of flow log. func (client WatchersClient) SetFlowLogConfiguration(resourceGroupName string, networkWatcherName string, parameters FlowLogInformation, cancel <-chan struct{}) (<-chan FlowLogInformation, <-chan error) { resultChan := make(chan FlowLogInformation, 1) errChan := make(chan error, 1) @@ -962,8 +1232,10 @@ func (client WatchersClient) SetFlowLogConfiguration(resourceGroupName string, n var err error var result FlowLogInformation defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -996,7 +1268,7 @@ func (client WatchersClient) SetFlowLogConfigurationPreparer(resourceGroupName s "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1032,14 +1304,12 @@ func (client WatchersClient) SetFlowLogConfigurationResponder(resp *http.Respons return } -// VerifyIPFlow verify IP flow from the specified VM to a location given the -// currently configured NSG rules. This method may poll for completion. Polling -// can be canceled by passing the cancel channel argument. The channel will be +// VerifyIPFlow verify IP flow from the specified VM to a location given the currently configured NSG rules. This +// method may poll for completion. Polling can be canceled by passing the cancel channel argument. The channel will be // used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. networkWatcherName is -// the name of the network watcher. parameters is parameters that define the IP -// flow to be verified. +// resourceGroupName is the name of the resource group. networkWatcherName is the name of the network watcher. +// parameters is parameters that define the IP flow to be verified. func (client WatchersClient) VerifyIPFlow(resourceGroupName string, networkWatcherName string, parameters VerificationIPFlowParameters, cancel <-chan struct{}) (<-chan VerificationIPFlowResult, <-chan error) { resultChan := make(chan VerificationIPFlowResult, 1) errChan := make(chan error, 1) @@ -1060,8 +1330,10 @@ func (client WatchersClient) VerifyIPFlow(resourceGroupName string, networkWatch var err error var result VerificationIPFlowResult defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -1094,7 +1366,7 @@ func (client WatchersClient) VerifyIPFlowPreparer(resourceGroupName string, netw "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2017-03-01" + const APIVersion = "2017-09-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go index f0606ac133f3..0870a03ded6e 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go @@ -14,9 +14,8 @@ package storage // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -35,18 +34,15 @@ func NewAccountsClient(subscriptionID string) AccountsClient { return NewAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewAccountsClientWithBaseURI creates an instance of the AccountsClient -// client. +// NewAccountsClientWithBaseURI creates an instance of the AccountsClient client. func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient { return AccountsClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CheckNameAvailability checks that the storage account name is valid and is -// not already in use. +// CheckNameAvailability checks that the storage account name is valid and is not already in use. // -// accountName is the name of the storage account within the specified resource -// group. Storage account names must be between 3 and 24 characters in length -// and use numbers and lower-case letters only. +// accountName is the name of the storage account within the specified resource group. Storage account names must be +// between 3 and 24 characters in length and use numbers and lower-case letters only. func (client AccountsClient) CheckNameAvailability(accountName AccountCheckNameAvailabilityParameters) (result CheckNameAvailabilityResult, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: accountName, @@ -82,7 +78,7 @@ func (client AccountsClient) CheckNameAvailabilityPreparer(accountName AccountCh "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-12-01" + const APIVersion = "2017-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -116,21 +112,16 @@ func (client AccountsClient) CheckNameAvailabilityResponder(resp *http.Response) return } -// Create asynchronously creates a new storage account with the specified -// parameters. If an account is already created and a subsequent create request -// is issued with different properties, the account properties will be updated. -// If an account is already created and a subsequent create or update request -// is issued with the exact same set of properties, the request will succeed. -// This method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and any -// outstanding HTTP requests. +// Create asynchronously creates a new storage account with the specified parameters. If an account is already created +// and a subsequent create request is issued with different properties, the account properties will be updated. If an +// account is already created and a subsequent create or update request is issued with the exact same set of +// properties, the request will succeed. This method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group within the user's -// subscription. The name is case insensitive. accountName is the name of the -// storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case -// letters only. parameters is the parameters to provide for the created -// account. +// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. +// accountName is the name of the storage account within the specified resource group. Storage account names must be +// between 3 and 24 characters in length and use numbers and lower-case letters only. parameters is the parameters to +// provide for the created account. func (client AccountsClient) Create(resourceGroupName string, accountName string, parameters AccountCreateParameters, cancel <-chan struct{}) (<-chan Account, <-chan error) { resultChan := make(chan Account, 1) errChan := make(chan error, 1) @@ -145,11 +136,11 @@ func (client AccountsClient) Create(resourceGroupName string, accountName string {TargetValue: parameters, Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: true, Chain: nil}, {Target: "parameters.Location", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.Identity", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Identity.Type", Name: validation.Null, Rule: true, Chain: nil}}}, {Target: "parameters.AccountPropertiesCreateParameters", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain.Name", Name: validation.Null, Rule: true, Chain: nil}}}, - {Target: "parameters.AccountPropertiesCreateParameters.Encryption", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.Encryption.KeySource", Name: validation.Null, Rule: true, Chain: nil}}}, }}}}}); err != nil { errChan <- validation.NewErrorWithValidationError(err, "storage.AccountsClient", "Create") close(errChan) @@ -161,8 +152,10 @@ func (client AccountsClient) Create(resourceGroupName string, accountName string var err error var result Account defer func() { + if err != nil { + errChan <- err + } resultChan <- result - errChan <- err close(resultChan) close(errChan) }() @@ -195,7 +188,7 @@ func (client AccountsClient) CreatePreparer(resourceGroupName string, accountNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-12-01" + const APIVersion = "2017-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -233,11 +226,9 @@ func (client AccountsClient) CreateResponder(resp *http.Response) (result Accoun // Delete deletes a storage account in Microsoft Azure. // -// resourceGroupName is the name of the resource group within the user's -// subscription. The name is case insensitive. accountName is the name of the -// storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case -// letters only. +// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. +// accountName is the name of the storage account within the specified resource group. Storage account names must be +// between 3 and 24 characters in length and use numbers and lower-case letters only. func (client AccountsClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, @@ -279,7 +270,7 @@ func (client AccountsClient) DeletePreparer(resourceGroupName string, accountNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-12-01" + const APIVersion = "2017-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -310,15 +301,12 @@ func (client AccountsClient) DeleteResponder(resp *http.Response) (result autore return } -// GetProperties returns the properties for the specified storage account -// including but not limited to name, SKU name, location, and account status. -// The ListKeys operation should be used to retrieve storage keys. +// GetProperties returns the properties for the specified storage account including but not limited to name, SKU name, +// location, and account status. The ListKeys operation should be used to retrieve storage keys. // -// resourceGroupName is the name of the resource group within the user's -// subscription. The name is case insensitive. accountName is the name of the -// storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case -// letters only. +// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. +// accountName is the name of the storage account within the specified resource group. Storage account names must be +// between 3 and 24 characters in length and use numbers and lower-case letters only. func (client AccountsClient) GetProperties(resourceGroupName string, accountName string) (result Account, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, @@ -360,7 +348,7 @@ func (client AccountsClient) GetPropertiesPreparer(resourceGroupName string, acc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-12-01" + const APIVersion = "2017-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -392,8 +380,8 @@ func (client AccountsClient) GetPropertiesResponder(resp *http.Response) (result return } -// List lists all the storage accounts available under the subscription. Note -// that storage keys are not returned; use the ListKeys operation for this. +// List lists all the storage accounts available under the subscription. Note that storage keys are not returned; use +// the ListKeys operation for this. func (client AccountsClient) List() (result AccountListResult, err error) { req, err := client.ListPreparer() if err != nil { @@ -422,7 +410,7 @@ func (client AccountsClient) ListPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-12-01" + const APIVersion = "2017-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -456,12 +444,10 @@ func (client AccountsClient) ListResponder(resp *http.Response) (result AccountL // ListAccountSAS list SAS credentials of a storage account. // -// resourceGroupName is the name of the resource group within the user's -// subscription. The name is case insensitive. accountName is the name of the -// storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case -// letters only. parameters is the parameters to provide to list SAS -// credentials for the storage account. +// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. +// accountName is the name of the storage account within the specified resource group. Storage account names must be +// between 3 and 24 characters in length and use numbers and lower-case letters only. parameters is the parameters to +// provide to list SAS credentials for the storage account. func (client AccountsClient) ListAccountSAS(resourceGroupName string, accountName string, parameters AccountSasParameters) (result ListAccountSasResponse, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, @@ -505,7 +491,7 @@ func (client AccountsClient) ListAccountSASPreparer(resourceGroupName string, ac "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-12-01" + const APIVersion = "2017-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -539,12 +525,10 @@ func (client AccountsClient) ListAccountSASResponder(resp *http.Response) (resul return } -// ListByResourceGroup lists all the storage accounts available under the given -// resource group. Note that storage keys are not returned; use the ListKeys -// operation for this. +// ListByResourceGroup lists all the storage accounts available under the given resource group. Note that storage keys +// are not returned; use the ListKeys operation for this. // -// resourceGroupName is the name of the resource group within the user's -// subscription. The name is case insensitive. +// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. func (client AccountsClient) ListByResourceGroup(resourceGroupName string) (result AccountListResult, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, @@ -582,7 +566,7 @@ func (client AccountsClient) ListByResourceGroupPreparer(resourceGroupName strin "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-12-01" + const APIVersion = "2017-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -616,11 +600,9 @@ func (client AccountsClient) ListByResourceGroupResponder(resp *http.Response) ( // ListKeys lists the access keys for the specified storage account. // -// resourceGroupName is the name of the resource group within the user's -// subscription. The name is case insensitive. accountName is the name of the -// storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case -// letters only. +// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. +// accountName is the name of the storage account within the specified resource group. Storage account names must be +// between 3 and 24 characters in length and use numbers and lower-case letters only. func (client AccountsClient) ListKeys(resourceGroupName string, accountName string) (result AccountListKeysResult, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, @@ -662,7 +644,7 @@ func (client AccountsClient) ListKeysPreparer(resourceGroupName string, accountN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-12-01" + const APIVersion = "2017-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -696,12 +678,10 @@ func (client AccountsClient) ListKeysResponder(resp *http.Response) (result Acco // ListServiceSAS list service SAS credentials of a specific resource. // -// resourceGroupName is the name of the resource group within the user's -// subscription. The name is case insensitive. accountName is the name of the -// storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case -// letters only. parameters is the parameters to provide to list service SAS -// credentials. +// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. +// accountName is the name of the storage account within the specified resource group. Storage account names must be +// between 3 and 24 characters in length and use numbers and lower-case letters only. parameters is the parameters to +// provide to list service SAS credentials. func (client AccountsClient) ListServiceSAS(resourceGroupName string, accountName string, parameters ServiceSasParameters) (result ListServiceSasResponse, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, @@ -747,7 +727,7 @@ func (client AccountsClient) ListServiceSASPreparer(resourceGroupName string, ac "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-12-01" + const APIVersion = "2017-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -781,15 +761,12 @@ func (client AccountsClient) ListServiceSASResponder(resp *http.Response) (resul return } -// RegenerateKey regenerates one of the access keys for the specified storage -// account. +// RegenerateKey regenerates one of the access keys for the specified storage account. // -// resourceGroupName is the name of the resource group within the user's -// subscription. The name is case insensitive. accountName is the name of the -// storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case -// letters only. regenerateKey is specifies name of the key which should be -// regenerated -- key1 or key2. +// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. +// accountName is the name of the storage account within the specified resource group. Storage account names must be +// between 3 and 24 characters in length and use numbers and lower-case letters only. regenerateKey is specifies name +// of the key which should be regenerated -- key1 or key2. func (client AccountsClient) RegenerateKey(resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (result AccountListKeysResult, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, @@ -833,7 +810,7 @@ func (client AccountsClient) RegenerateKeyPreparer(resourceGroupName string, acc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-12-01" + const APIVersion = "2017-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -867,23 +844,17 @@ func (client AccountsClient) RegenerateKeyResponder(resp *http.Response) (result return } -// Update the update operation can be used to update the SKU, encryption, -// access tier, or tags for a storage account. It can also be used to map the -// account to a custom domain. Only one custom domain is supported per storage -// account; the replacement/change of custom domain is not supported. In order -// to replace an old custom domain, the old value must be cleared/unregistered -// before a new value can be set. The update of multiple properties is -// supported. This call does not change the storage keys for the account. If -// you want to change the storage account keys, use the regenerate keys -// operation. The location and name of the storage account cannot be changed -// after creation. +// Update the update operation can be used to update the SKU, encryption, access tier, or tags for a storage account. +// It can also be used to map the account to a custom domain. Only one custom domain is supported per storage account; +// the replacement/change of custom domain is not supported. In order to replace an old custom domain, the old value +// must be cleared/unregistered before a new value can be set. The update of multiple properties is supported. This +// call does not change the storage keys for the account. If you want to change the storage account keys, use the +// regenerate keys operation. The location and name of the storage account cannot be changed after creation. // -// resourceGroupName is the name of the resource group within the user's -// subscription. The name is case insensitive. accountName is the name of the -// storage account within the specified resource group. Storage account names -// must be between 3 and 24 characters in length and use numbers and lower-case -// letters only. parameters is the parameters to provide for the updated -// account. +// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive. +// accountName is the name of the storage account within the specified resource group. Storage account names must be +// between 3 and 24 characters in length and use numbers and lower-case letters only. parameters is the parameters to +// provide for the updated account. func (client AccountsClient) Update(resourceGroupName string, accountName string, parameters AccountUpdateParameters) (result Account, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, @@ -925,7 +896,7 @@ func (client AccountsClient) UpdatePreparer(resourceGroupName string, accountNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-12-01" + const APIVersion = "2017-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go index a537bdd25ec0..133386ddbeb6 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go @@ -1,5 +1,4 @@ -// Package storage implements the Azure ARM Storage service API version -// 2016-12-01. +// Package storage implements the Azure ARM Storage service API version 2017-06-01. // // The Azure Storage Management API. package storage @@ -18,9 +17,8 @@ package storage // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go index 2e203018428e..37c1679ee845 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go @@ -14,9 +14,8 @@ package storage // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -43,6 +42,38 @@ const ( Unavailable AccountStatus = "unavailable" ) +// Action enumerates the values for action. +type Action string + +const ( + // Allow specifies the allow state for action. + Allow Action = "Allow" +) + +// Bypass enumerates the values for bypass. +type Bypass string + +const ( + // AzureServices specifies the azure services state for bypass. + AzureServices Bypass = "AzureServices" + // Logging specifies the logging state for bypass. + Logging Bypass = "Logging" + // Metrics specifies the metrics state for bypass. + Metrics Bypass = "Metrics" + // None specifies the none state for bypass. + None Bypass = "None" +) + +// DefaultAction enumerates the values for default action. +type DefaultAction string + +const ( + // DefaultActionAllow specifies the default action allow state for default action. + DefaultActionAllow DefaultAction = "Allow" + // DefaultActionDeny specifies the default action deny state for default action. + DefaultActionDeny DefaultAction = "Deny" +) + // HTTPProtocol enumerates the values for http protocol. type HTTPProtocol string @@ -63,6 +94,16 @@ const ( Read KeyPermission = "Read" ) +// KeySource enumerates the values for key source. +type KeySource string + +const ( + // MicrosoftKeyvault specifies the microsoft keyvault state for key source. + MicrosoftKeyvault KeySource = "Microsoft.Keyvault" + // MicrosoftStorage specifies the microsoft storage state for key source. + MicrosoftStorage KeySource = "Microsoft.Storage" +) + // Kind enumerates the values for kind. type Kind string @@ -95,28 +136,6 @@ const ( W Permissions = "w" ) -// Permissions1 enumerates the values for permissions 1. -type Permissions1 string - -const ( - // Permissions1A specifies the permissions 1a state for permissions 1. - Permissions1A Permissions1 = "a" - // Permissions1C specifies the permissions 1c state for permissions 1. - Permissions1C Permissions1 = "c" - // Permissions1D specifies the permissions 1d state for permissions 1. - Permissions1D Permissions1 = "d" - // Permissions1L specifies the permissions 1l state for permissions 1. - Permissions1L Permissions1 = "l" - // Permissions1P specifies the permissions 1p state for permissions 1. - Permissions1P Permissions1 = "p" - // Permissions1R specifies the permissions 1r state for permissions 1. - Permissions1R Permissions1 = "r" - // Permissions1U specifies the permissions 1u state for permissions 1. - Permissions1U Permissions1 = "u" - // Permissions1W specifies the permissions 1w state for permissions 1. - Permissions1W Permissions1 = "w" -) - // ProvisioningState enumerates the values for provisioning state. type ProvisioningState string @@ -139,30 +158,14 @@ const ( AlreadyExists Reason = "AlreadyExists" ) -// ResourceEnum enumerates the values for resource enum. -type ResourceEnum string +// ReasonCode enumerates the values for reason code. +type ReasonCode string const ( - // ResourceEnumB specifies the resource enum b state for resource enum. - ResourceEnumB ResourceEnum = "b" - // ResourceEnumC specifies the resource enum c state for resource enum. - ResourceEnumC ResourceEnum = "c" - // ResourceEnumF specifies the resource enum f state for resource enum. - ResourceEnumF ResourceEnum = "f" - // ResourceEnumS specifies the resource enum s state for resource enum. - ResourceEnumS ResourceEnum = "s" -) - -// ResourceTypes enumerates the values for resource types. -type ResourceTypes string - -const ( - // ResourceTypesC specifies the resource types c state for resource types. - ResourceTypesC ResourceTypes = "c" - // ResourceTypesO specifies the resource types o state for resource types. - ResourceTypesO ResourceTypes = "o" - // ResourceTypesS specifies the resource types s state for resource types. - ResourceTypesS ResourceTypes = "s" + // NotAvailableForSubscription specifies the not available for subscription state for reason code. + NotAvailableForSubscription ReasonCode = "NotAvailableForSubscription" + // QuotaID specifies the quota id state for reason code. + QuotaID ReasonCode = "QuotaId" ) // Services enumerates the values for services. @@ -179,6 +182,32 @@ const ( T Services = "t" ) +// SignedResource enumerates the values for signed resource. +type SignedResource string + +const ( + // SignedResourceB specifies the signed resource b state for signed resource. + SignedResourceB SignedResource = "b" + // SignedResourceC specifies the signed resource c state for signed resource. + SignedResourceC SignedResource = "c" + // SignedResourceF specifies the signed resource f state for signed resource. + SignedResourceF SignedResource = "f" + // SignedResourceS specifies the signed resource s state for signed resource. + SignedResourceS SignedResource = "s" +) + +// SignedResourceTypes enumerates the values for signed resource types. +type SignedResourceTypes string + +const ( + // SignedResourceTypesC specifies the signed resource types c state for signed resource types. + SignedResourceTypesC SignedResourceTypes = "c" + // SignedResourceTypesO specifies the signed resource types o state for signed resource types. + SignedResourceTypesO SignedResourceTypes = "o" + // SignedResourceTypesS specifies the signed resource types s state for signed resource types. + SignedResourceTypesS SignedResourceTypes = "s" +) + // SkuName enumerates the values for sku name. type SkuName string @@ -205,6 +234,22 @@ const ( Standard SkuTier = "Standard" ) +// State enumerates the values for state. +type State string + +const ( + // StateDeprovisioning specifies the state deprovisioning state for state. + StateDeprovisioning State = "deprovisioning" + // StateFailed specifies the state failed state for state. + StateFailed State = "failed" + // StateNetworkSourceDeleted specifies the state network source deleted state for state. + StateNetworkSourceDeleted State = "networkSourceDeleted" + // StateProvisioning specifies the state provisioning state for state. + StateProvisioning State = "provisioning" + // StateSucceeded specifies the state succeeded state for state. + StateSucceeded State = "succeeded" +) + // UsageUnit enumerates the values for usage unit. type UsageUnit string @@ -233,23 +278,23 @@ type Account struct { Tags *map[string]*string `json:"tags,omitempty"` Sku *Sku `json:"sku,omitempty"` Kind Kind `json:"kind,omitempty"` + Identity *Identity `json:"identity,omitempty"` *AccountProperties `json:"properties,omitempty"` } -// AccountCheckNameAvailabilityParameters is the parameters used to check the -// availabity of the storage account name. +// AccountCheckNameAvailabilityParameters is the parameters used to check the availabity of the storage account name. type AccountCheckNameAvailabilityParameters struct { Name *string `json:"name,omitempty"` Type *string `json:"type,omitempty"` } -// AccountCreateParameters is the parameters used when creating a storage -// account. +// AccountCreateParameters is the parameters used when creating a storage account. type AccountCreateParameters struct { Sku *Sku `json:"sku,omitempty"` Kind Kind `json:"kind,omitempty"` Location *string `json:"location,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` + Identity *Identity `json:"identity,omitempty"` *AccountPropertiesCreateParameters `json:"properties,omitempty"` } @@ -287,50 +332,49 @@ type AccountProperties struct { Encryption *Encryption `json:"encryption,omitempty"` AccessTier AccessTier `json:"accessTier,omitempty"` EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` + NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` } -// AccountPropertiesCreateParameters is the parameters used to create the -// storage account. +// AccountPropertiesCreateParameters is the parameters used to create the storage account. type AccountPropertiesCreateParameters struct { - CustomDomain *CustomDomain `json:"customDomain,omitempty"` - Encryption *Encryption `json:"encryption,omitempty"` - AccessTier AccessTier `json:"accessTier,omitempty"` - EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + Encryption *Encryption `json:"encryption,omitempty"` + NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` + AccessTier AccessTier `json:"accessTier,omitempty"` + EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` } -// AccountPropertiesUpdateParameters is the parameters used when updating a -// storage account. +// AccountPropertiesUpdateParameters is the parameters used when updating a storage account. type AccountPropertiesUpdateParameters struct { - CustomDomain *CustomDomain `json:"customDomain,omitempty"` - Encryption *Encryption `json:"encryption,omitempty"` - AccessTier AccessTier `json:"accessTier,omitempty"` - EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + Encryption *Encryption `json:"encryption,omitempty"` + AccessTier AccessTier `json:"accessTier,omitempty"` + EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` + NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"` } -// AccountRegenerateKeyParameters is the parameters used to regenerate the -// storage account key. +// AccountRegenerateKeyParameters is the parameters used to regenerate the storage account key. type AccountRegenerateKeyParameters struct { KeyName *string `json:"keyName,omitempty"` } -// AccountSasParameters is the parameters to list SAS credentials of a storage -// account. +// AccountSasParameters is the parameters to list SAS credentials of a storage account. type AccountSasParameters struct { - Services Services `json:"signedServices,omitempty"` - ResourceTypes ResourceTypes `json:"signedResourceTypes,omitempty"` - Permissions Permissions `json:"signedPermission,omitempty"` - IPAddressOrRange *string `json:"signedIp,omitempty"` - Protocols HTTPProtocol `json:"signedProtocol,omitempty"` - SharedAccessStartTime *date.Time `json:"signedStart,omitempty"` - SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"` - KeyToSign *string `json:"keyToSign,omitempty"` -} - -// AccountUpdateParameters is the parameters that can be provided when updating -// the storage account properties. + Services Services `json:"signedServices,omitempty"` + ResourceTypes SignedResourceTypes `json:"signedResourceTypes,omitempty"` + Permissions Permissions `json:"signedPermission,omitempty"` + IPAddressOrRange *string `json:"signedIp,omitempty"` + Protocols HTTPProtocol `json:"signedProtocol,omitempty"` + SharedAccessStartTime *date.Time `json:"signedStart,omitempty"` + SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"` + KeyToSign *string `json:"keyToSign,omitempty"` +} + +// AccountUpdateParameters is the parameters that can be provided when updating the storage account properties. type AccountUpdateParameters struct { Sku *Sku `json:"sku,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` + Identity *Identity `json:"identity,omitempty"` *AccountPropertiesUpdateParameters `json:"properties,omitempty"` } @@ -342,21 +386,26 @@ type CheckNameAvailabilityResult struct { Message *string `json:"message,omitempty"` } -// CustomDomain is the custom domain assigned to this storage account. This can -// be set via Update. +// CustomDomain is the custom domain assigned to this storage account. This can be set via Update. type CustomDomain struct { Name *string `json:"name,omitempty"` UseSubDomain *bool `json:"useSubDomain,omitempty"` } +// Dimension is dimension of blobs, possiblly be blob type or access tier. +type Dimension struct { + Name *string `json:"name,omitempty"` + DisplayName *string `json:"displayName,omitempty"` +} + // Encryption is the encryption settings on the storage account. type Encryption struct { - Services *EncryptionServices `json:"services,omitempty"` - KeySource *string `json:"keySource,omitempty"` + Services *EncryptionServices `json:"services,omitempty"` + KeySource KeySource `json:"keySource,omitempty"` + KeyVaultProperties *KeyVaultProperties `json:"keyvaultproperties,omitempty"` } -// EncryptionService is a service that allows server-side encryption to be -// used. +// EncryptionService is a service that allows server-side encryption to be used. type EncryptionService struct { Enabled *bool `json:"enabled,omitempty"` LastEnabledTime *date.Time `json:"lastEnabledTime,omitempty"` @@ -370,8 +419,7 @@ type EncryptionServices struct { Queue *EncryptionService `json:"queue,omitempty"` } -// Endpoints is the URIs that are used to perform a retrieval of a public blob, -// queue, or table object. +// Endpoints is the URIs that are used to perform a retrieval of a public blob, queue, or table object. type Endpoints struct { Blob *string `json:"blob,omitempty"` Queue *string `json:"queue,omitempty"` @@ -379,19 +427,86 @@ type Endpoints struct { File *string `json:"file,omitempty"` } +// Identity is identity for the resource. +type Identity struct { + PrincipalID *string `json:"principalId,omitempty"` + TenantID *string `json:"tenantId,omitempty"` + Type *string `json:"type,omitempty"` +} + +// IPRule is IP rule with specific IP or IP range in CIDR format. +type IPRule struct { + IPAddressOrRange *string `json:"value,omitempty"` + Action Action `json:"action,omitempty"` +} + +// KeyVaultProperties is properties of key vault. +type KeyVaultProperties struct { + KeyName *string `json:"keyname,omitempty"` + KeyVersion *string `json:"keyversion,omitempty"` + KeyVaultURI *string `json:"keyvaulturi,omitempty"` +} + // ListAccountSasResponse is the List SAS credentials operation response. type ListAccountSasResponse struct { autorest.Response `json:"-"` AccountSasToken *string `json:"accountSasToken,omitempty"` } -// ListServiceSasResponse is the List service SAS credentials operation -// response. +// ListServiceSasResponse is the List service SAS credentials operation response. type ListServiceSasResponse struct { autorest.Response `json:"-"` ServiceSasToken *string `json:"serviceSasToken,omitempty"` } +// MetricSpecification is metric specification of operation. +type MetricSpecification struct { + Name *string `json:"name,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + DisplayDescription *string `json:"displayDescription,omitempty"` + Unit *string `json:"unit,omitempty"` + Dimensions *[]Dimension `json:"dimensions,omitempty"` + AggregationType *string `json:"aggregationType,omitempty"` + FillGapWithZero *bool `json:"fillGapWithZero,omitempty"` + Category *string `json:"category,omitempty"` + ResourceIDDimensionNameOverride *string `json:"resourceIdDimensionNameOverride,omitempty"` +} + +// NetworkRuleSet is network rule set +type NetworkRuleSet struct { + Bypass Bypass `json:"bypass,omitempty"` + VirtualNetworkRules *[]VirtualNetworkRule `json:"virtualNetworkRules,omitempty"` + IPRules *[]IPRule `json:"ipRules,omitempty"` + DefaultAction DefaultAction `json:"defaultAction,omitempty"` +} + +// Operation is storage REST API operation definition. +type Operation struct { + Name *string `json:"name,omitempty"` + Display *OperationDisplay `json:"display,omitempty"` + Origin *string `json:"origin,omitempty"` + *OperationProperties `json:"properties,omitempty"` +} + +// OperationDisplay is display metadata associated with the operation. +type OperationDisplay struct { + Provider *string `json:"provider,omitempty"` + Resource *string `json:"resource,omitempty"` + Operation *string `json:"operation,omitempty"` +} + +// OperationListResult is result of the request to list Storage operations. It contains a list of operations and a URL +// link to get the next set of results. +type OperationListResult struct { + autorest.Response `json:"-"` + Value *[]Operation `json:"value,omitempty"` +} + +// OperationProperties is properties of operation, include metric specifications. +type OperationProperties struct { + ServiceSpecification *ServiceSpecification `json:"serviceSpecification,omitempty"` +} + // Resource is describes a storage resource. type Resource struct { ID *string `json:"id,omitempty"` @@ -401,33 +516,62 @@ type Resource struct { Tags *map[string]*string `json:"tags,omitempty"` } -// ServiceSasParameters is the parameters to list service SAS credentials of a -// speicific resource. +// Restriction is the restriction because of which SKU cannot be used. +type Restriction struct { + Type *string `json:"type,omitempty"` + Values *[]string `json:"values,omitempty"` + ReasonCode ReasonCode `json:"reasonCode,omitempty"` +} + +// ServiceSasParameters is the parameters to list service SAS credentials of a speicific resource. type ServiceSasParameters struct { - CanonicalizedResource *string `json:"canonicalizedResource,omitempty"` - Resource Resource `json:"signedResource,omitempty"` - Permissions Permissions `json:"signedPermission,omitempty"` - IPAddressOrRange *string `json:"signedIp,omitempty"` - Protocols HTTPProtocol `json:"signedProtocol,omitempty"` - SharedAccessStartTime *date.Time `json:"signedStart,omitempty"` - SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"` - Identifier *string `json:"signedIdentifier,omitempty"` - PartitionKeyStart *string `json:"startPk,omitempty"` - PartitionKeyEnd *string `json:"endPk,omitempty"` - RowKeyStart *string `json:"startRk,omitempty"` - RowKeyEnd *string `json:"endRk,omitempty"` - KeyToSign *string `json:"keyToSign,omitempty"` - CacheControl *string `json:"rscc,omitempty"` - ContentDisposition *string `json:"rscd,omitempty"` - ContentEncoding *string `json:"rsce,omitempty"` - ContentLanguage *string `json:"rscl,omitempty"` - ContentType *string `json:"rsct,omitempty"` + CanonicalizedResource *string `json:"canonicalizedResource,omitempty"` + Resource SignedResource `json:"signedResource,omitempty"` + Permissions Permissions `json:"signedPermission,omitempty"` + IPAddressOrRange *string `json:"signedIp,omitempty"` + Protocols HTTPProtocol `json:"signedProtocol,omitempty"` + SharedAccessStartTime *date.Time `json:"signedStart,omitempty"` + SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"` + Identifier *string `json:"signedIdentifier,omitempty"` + PartitionKeyStart *string `json:"startPk,omitempty"` + PartitionKeyEnd *string `json:"endPk,omitempty"` + RowKeyStart *string `json:"startRk,omitempty"` + RowKeyEnd *string `json:"endRk,omitempty"` + KeyToSign *string `json:"keyToSign,omitempty"` + CacheControl *string `json:"rscc,omitempty"` + ContentDisposition *string `json:"rscd,omitempty"` + ContentEncoding *string `json:"rsce,omitempty"` + ContentLanguage *string `json:"rscl,omitempty"` + ContentType *string `json:"rsct,omitempty"` +} + +// ServiceSpecification is one property of operation, include metric specifications. +type ServiceSpecification struct { + MetricSpecifications *[]MetricSpecification `json:"metricSpecifications,omitempty"` } // Sku is the SKU of the storage account. type Sku struct { - Name SkuName `json:"name,omitempty"` - Tier SkuTier `json:"tier,omitempty"` + Name SkuName `json:"name,omitempty"` + Tier SkuTier `json:"tier,omitempty"` + ResourceType *string `json:"resourceType,omitempty"` + Kind Kind `json:"kind,omitempty"` + Locations *[]string `json:"locations,omitempty"` + Capabilities *[]SKUCapability `json:"capabilities,omitempty"` + Restrictions *[]Restriction `json:"restrictions,omitempty"` +} + +// SKUCapability is the capability information in the specified sku, including file encryption, network acls, change +// notification, etc. +type SKUCapability struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +// SkuListResult is the response from the List Storage SKUs operation. +type SkuListResult struct { + autorest.Response `json:"-"` + Value *[]Sku `json:"value,omitempty"` } // Usage is describes Storage Resource Usage. @@ -444,9 +588,15 @@ type UsageListResult struct { Value *[]Usage `json:"value,omitempty"` } -// UsageName is the usage names that can be used; currently limited to -// StorageAccount. +// UsageName is the usage names that can be used; currently limited to StorageAccount. type UsageName struct { Value *string `json:"value,omitempty"` LocalizedValue *string `json:"localizedValue,omitempty"` } + +// VirtualNetworkRule is virtual Network rule. +type VirtualNetworkRule struct { + VirtualNetworkResourceID *string `json:"id,omitempty"` + Action Action `json:"action,omitempty"` + State State `json:"state,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/operations.go new file mode 100644 index 000000000000..cc46c6997920 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/operations.go @@ -0,0 +1,96 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// OperationsClient is the the Azure Storage Management API. +type OperationsClient struct { + ManagementClient +} + +// NewOperationsClient creates an instance of the OperationsClient client. +func NewOperationsClient(subscriptionID string) OperationsClient { + return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client. +func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { + return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists all of the available Storage Rest API operations. +func (client OperationsClient) List() (result OperationListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client OperationsClient) ListPreparer() (*http.Request, error) { + const APIVersion = "2017-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.Storage/operations"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/skus.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/skus.go new file mode 100644 index 000000000000..94d4d6f83ec5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/skus.go @@ -0,0 +1,100 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// SkusClient is the the Azure Storage Management API. +type SkusClient struct { + ManagementClient +} + +// NewSkusClient creates an instance of the SkusClient client. +func NewSkusClient(subscriptionID string) SkusClient { + return NewSkusClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSkusClientWithBaseURI creates an instance of the SkusClient client. +func NewSkusClientWithBaseURI(baseURI string, subscriptionID string) SkusClient { + return SkusClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists the available SKUs supported by Microsoft.Storage for given subscription. +func (client SkusClient) List() (result SkuListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SkusClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/skus", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SkusClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SkusClient) ListResponder(resp *http.Response) (result SkuListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go index b12a6d315fda..682e5c16c362 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go @@ -14,9 +14,8 @@ package storage // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" @@ -39,8 +38,7 @@ func NewUsageClientWithBaseURI(baseURI string, subscriptionID string) UsageClien return UsageClient{NewWithBaseURI(baseURI, subscriptionID)} } -// List gets the current usage count and the limit for the resources under the -// subscription. +// List gets the current usage count and the limit for the resources under the subscription. func (client UsageClient) List() (result UsageListResult, err error) { req, err := client.ListPreparer() if err != nil { @@ -69,7 +67,7 @@ func (client UsageClient) ListPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2016-12-01" + const APIVersion = "2017-06-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go index ac97c159fa6d..467102d59734 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go @@ -14,16 +14,15 @@ package storage // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 -// Changes may cause incorrect behavior and will be lost if the code is -// regenerated. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/v10.0.2-beta arm-storage/2016-12-01" + return "Azure-SDK-For-Go/v11.0.0-beta arm-storage/2017-06-01" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return "v10.0.2-beta" + return "v11.0.0-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md new file mode 100644 index 000000000000..6dc348e02af2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md @@ -0,0 +1,73 @@ +# Azure Storage SDK for Go + +The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform REST operations against the [Azure Storage Service](https://docs.microsoft.com/en-us/azure/storage/). To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](https://github.com/Azure/azure-sdk-for-go/tree/master/arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](https://github.com/Azure/azure-sdk-for-go/tree/master/management/storageservice) package. + +This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/). + +# Getting Started + + 1. Go get the SDK `go get -u github.com/Azure/azure-sdk-for=go/storage` + 1. If you don't already have one, [create a Storage Account](https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account). + - Take note of your Azure Storage Account Name and Azure Storage Account Key. They'll both be necessary for using this library. + - This option is production ready, but can also be used for development. + 1. (Optional, Windows only) Download and start the [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/). + 1. Checkout our existing [samples](https://github.com/Azure-Samples?q=Storage&language=go). + +# Contributing + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +When contributing, please conform to the following practices: + - Run [gofmt](https://golang.org/cmd/gofmt/) to use standard go formatting. + - Run [golint](https://github.com/golang/lint) to conform to standard naming conventions. + - Run [go vet](https://golang.org/cmd/vet/) to catch common Go mistakes. + - Use [GoASTScanner/gas](https://github.com/GoASTScanner/gas) to ensure there are no common security violations in your contribution. + - Run [go test](https://golang.org/cmd/go/#hdr-Test_packages) to catch possible bugs in the code: `go test ./storage/...`. + - This project uses HTTP recordings for testing. + - The recorder should be attached to the client before calling the functions to test and later stopped. + - If you updated an existing test, its recording might need to be updated. Run `go test ./storage/... -ow -check.f TestName` to rerecord the test. + - Important note: all HTTP requests in the recording must be unique: different bodies, headers (`User-Agent`, `Authorization` and `Date` or `x-ms-date` headers are ignored), URLs and methods. As opposed to the example above, the following test is not suitable for recording: + +``` go +func (s *StorageQueueSuite) TestQueueExists(c *chk.C) { +cli := getQueueClient(c) +rec := cli.client.appendRecorder(c) +defer rec.Stop() + +queue := cli.GetQueueReference(queueName(c)) +ok, err := queue.Exists() +c.Assert(err, chk.IsNil) +c.Assert(ok, chk.Equals, false) + +c.Assert(queue.Create(nil), chk.IsNil) +defer queue.Delete(nil) + +ok, err = queue.Exists() // This is the very same request as the one 5 lines above +// The test replayer gets confused and the test fails in the last line +c.Assert(err, chk.IsNil) +c.Assert(ok, chk.Equals, true) +} +``` + + - On the other side, this test does not repeat requests: the URLs are different. + +``` go +func (s *StorageQueueSuite) TestQueueExists(c *chk.C) { +cli := getQueueClient(c) +rec := cli.client.appendRecorder(c) +defer rec.Stop() + +queue1 := cli.GetQueueReference(queueName(c, "nonexistent")) +ok, err := queue1.Exists() +c.Assert(err, chk.IsNil) +c.Assert(ok, chk.Equals, false) + +queue2 := cli.GetQueueReference(queueName(c, "exisiting")) +c.Assert(queue2.Create(nil), chk.IsNil) +defer queue2.Delete(nil) + +ok, err = queue2.Exists() +c.Assert(err, chk.IsNil) +c.Assert(ok, chk.Equals, true) +} +``` \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go index 3292cb556951..8b5b96d48848 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go @@ -1,7 +1,23 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" + "crypto/md5" + "encoding/base64" "fmt" "net/http" "net/url" @@ -11,6 +27,8 @@ import ( // PutAppendBlob initializes an empty append blob with specified name. An // append blob must be created using this method before appending blocks. // +// See CreateBlockBlobFromReader for more info on creating blobs. +// // See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob func (b *Blob) PutAppendBlob(options *PutBlobOptions) error { params := url.Values{} @@ -29,8 +47,7 @@ func (b *Blob) PutAppendBlob(options *PutBlobOptions) error { if err != nil { return err } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) + return b.respondCreation(resp, BlobTypeAppend) } // AppendBlockOptions includes the options for an append block operation @@ -44,6 +61,7 @@ type AppendBlockOptions struct { IfMatch string `header:"If-Match"` IfNoneMatch string `header:"If-None-Match"` RequestID string `header:"x-ms-client-request-id"` + ContentMD5 bool } // AppendBlock appends a block to an append blob. @@ -58,6 +76,10 @@ func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error { if options != nil { params = addTimeout(params, options.Timeout) headers = mergeHeaders(headers, headersFromStruct(*options)) + if options.ContentMD5 { + md5sum := md5.Sum(chunk) + headers[headerContentMD5] = base64.StdEncoding.EncodeToString(md5sum[:]) + } } uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) @@ -65,6 +87,5 @@ func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error { if err != nil { return err } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) + return b.respondCreation(resp, BlobTypeAppend) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go index 608bf3133866..76794c305182 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go @@ -1,6 +1,20 @@ // Package storage provides clients for Microsoft Azure Storage Services. package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "fmt" @@ -41,16 +55,18 @@ const ( ) func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) { - authHeader, err := c.getSharedKey(verb, url, headers, auth) - if err != nil { - return nil, err + if !c.sasClient { + authHeader, err := c.getSharedKey(verb, url, headers, auth) + if err != nil { + return nil, err + } + headers[headerAuthorization] = authHeader } - headers[headerAuthorization] = authHeader return headers, nil } func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) { - canRes, err := c.buildCanonicalizedResource(url, auth) + canRes, err := c.buildCanonicalizedResource(url, auth, false) if err != nil { return "", err } @@ -62,15 +78,18 @@ func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth return c.createAuthorizationHeader(canString, auth), nil } -func (c *Client) buildCanonicalizedResource(uri string, auth authentication) (string, error) { +func (c *Client) buildCanonicalizedResource(uri string, auth authentication, sas bool) (string, error) { errMsg := "buildCanonicalizedResource error: %s" u, err := url.Parse(uri) if err != nil { return "", fmt.Errorf(errMsg, err.Error()) } - cr := bytes.NewBufferString("/") - cr.WriteString(c.getCanonicalizedAccountName()) + cr := bytes.NewBufferString("") + if c.accountName != StorageEmulatorAccountName || !sas { + cr.WriteString("/") + cr.WriteString(c.getCanonicalizedAccountName()) + } if len(u.Path) > 0 { // Any portion of the CanonicalizedResource string that is derived from diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go index dd9eb386cb5a..a9d3cfccb685 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/xml" "errors" @@ -90,7 +104,7 @@ type BlobProperties struct { CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"` ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"` ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"` - BlobType BlobType `xml:"x-ms-blob-blob-type"` + BlobType BlobType `xml:"BlobType"` SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` CopyID string `xml:"CopyId"` CopyStatus string `xml:"CopyStatus"` @@ -135,8 +149,7 @@ func (b *Blob) Exists() (bool, error) { } // GetURL gets the canonical URL to the blob with the specified name in the -// specified container. If name is not specified, the canonical URL for the entire -// container is obtained. +// specified container. // This method does not create a publicly accessible URL if the blob or container // is private and this method does not check if the blob exists. func (b *Blob) GetURL() string { @@ -174,11 +187,17 @@ type BlobRange struct { } func (br BlobRange) String() string { + if br.End == 0 { + return fmt.Sprintf("bytes=%d-", br.Start) + } return fmt.Sprintf("bytes=%d-%d", br.Start, br.End) } // Get returns a stream to read the blob. Caller must call both Read and Close() // to correctly close the underlying connection. +// +// See the GetRange method for use with a Range header. +// // See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) { rangeOptions := GetBlobRangeOptions{ @@ -192,7 +211,7 @@ func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) { if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { return nil, err } - if err := b.writePropoerties(resp.headers); err != nil { + if err := b.writeProperties(resp.headers, true); err != nil { return resp.body, err } return resp.body, nil @@ -212,7 +231,9 @@ func (b *Blob) GetRange(options *GetBlobRangeOptions) (io.ReadCloser, error) { if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil { return nil, err } - if err := b.writePropoerties(resp.headers); err != nil { + // Content-Length header should not be updated, as the service returns the range length + // (which is not alwys the full blob length) + if err := b.writeProperties(resp.headers, false); err != nil { return resp.body, err } return resp.body, nil @@ -225,7 +246,9 @@ func (b *Blob) getRange(options *GetBlobRangeOptions) (*storageResponse, error) if options != nil { if options.Range != nil { headers["Range"] = options.Range.String() - headers["x-ms-range-get-content-md5"] = fmt.Sprintf("%v", options.GetRangeContentMD5) + if options.GetRangeContentMD5 { + headers["x-ms-range-get-content-md5"] = "true" + } } if options.GetBlobOptions != nil { headers = mergeHeaders(headers, headersFromStruct(*options.GetBlobOptions)) @@ -322,18 +345,20 @@ func (b *Blob) GetProperties(options *GetBlobPropertiesOptions) error { if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { return err } - return b.writePropoerties(resp.headers) + return b.writeProperties(resp.headers, true) } -func (b *Blob) writePropoerties(h http.Header) error { +func (b *Blob) writeProperties(h http.Header, includeContentLen bool) error { var err error - var contentLength int64 - contentLengthStr := h.Get("Content-Length") - if contentLengthStr != "" { - contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) - if err != nil { - return err + contentLength := b.Properties.ContentLength + if includeContentLen { + contentLengthStr := h.Get("Content-Length") + if contentLengthStr != "" { + contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) + if err != nil { + return err + } } } @@ -425,8 +450,8 @@ func (b *Blob) SetProperties(options *SetBlobPropertiesOptions) error { uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) if b.Properties.BlobType == BlobTypePage { - headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("byte %v", b.Properties.ContentLength)) - if options != nil || options.SequenceNumberAction != nil { + headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("%v", b.Properties.ContentLength)) + if options != nil && options.SequenceNumberAction != nil { headers = addToHeaders(headers, "x-ms-sequence-number-action", string(*options.SequenceNumberAction)) if *options.SequenceNumberAction != SequenceNumberActionIncrement { headers = addToHeaders(headers, "x-ms-blob-sequence-number", fmt.Sprintf("%v", b.Properties.SequenceNumber)) @@ -615,3 +640,13 @@ func pathForResource(container, name string) string { } return fmt.Sprintf("/%s", container) } + +func (b *Blob) respondCreation(resp *storageResponse, bt BlobType) error { + readAndCloseBody(resp.body) + err := checkRespCode(resp.statusCode, []int{http.StatusCreated}) + if err != nil { + return err + } + b.Properties.BlobType = bt + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go index 43173d3a417a..e11af77441e2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "errors" "fmt" @@ -8,68 +22,122 @@ import ( "time" ) -// GetSASURIWithSignedIPAndProtocol creates an URL to the specified blob which contains the Shared -// Access Signature with specified permissions and expiration time. Also includes signedIPRange and allowed protocols. -// If old API version is used but no signedIP is passed (ie empty string) then this should still work. -// We only populate the signedIP when it non-empty. +// OverrideHeaders defines overridable response heaedrs in +// a request using a SAS URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type OverrideHeaders struct { + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + ContentType string +} + +// BlobSASOptions are options to construct a blob SAS +// URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type BlobSASOptions struct { + BlobServiceSASPermissions + OverrideHeaders + SASOptions +} + +// BlobServiceSASPermissions includes the available permissions for +// blob service SAS URI. +type BlobServiceSASPermissions struct { + Read bool + Add bool + Create bool + Write bool + Delete bool +} + +func (p BlobServiceSASPermissions) buildString() string { + permissions := "" + if p.Read { + permissions += "r" + } + if p.Add { + permissions += "a" + } + if p.Create { + permissions += "c" + } + if p.Write { + permissions += "w" + } + if p.Delete { + permissions += "d" + } + return permissions +} + +// GetSASURI creates an URL to the blob which contains the Shared +// Access Signature with the specified options. // -// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx -func (b *Blob) GetSASURIWithSignedIPAndProtocol(expiry time.Time, permissions string, signedIPRange string, HTTPSOnly bool) (string, error) { - var ( - signedPermissions = permissions - blobURL = b.GetURL() - ) - canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(blobURL, b.Container.bsc.auth) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +func (b *Blob) GetSASURI(options BlobSASOptions) (string, error) { + uri := b.GetURL() + signedResource := "b" + canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(uri, b.Container.bsc.auth, true) if err != nil { return "", err } - // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. - // It must include the service name (blob, table, queue or file) for version 2015-02-21 or - // later, the storage account name, and the resource name, and must be URL-decoded. - // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + permissions := options.BlobServiceSASPermissions.buildString() + return b.Container.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders) +} + +func (c *Client) blobAndFileSASURI(options SASOptions, uri, permissions, canonicalizedResource, signedResource string, headers OverrideHeaders) (string, error) { + start := "" + if options.Start != (time.Time{}) { + start = options.Start.UTC().Format(time.RFC3339) + } + + expiry := options.Expiry.UTC().Format(time.RFC3339) // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) - canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) + canonicalizedResource, err := url.QueryUnescape(canonicalizedResource) if err != nil { return "", err } - signedExpiry := expiry.UTC().Format(time.RFC3339) - - //If blob name is missing, resource is a container - signedResource := "c" - if len(b.Name) > 0 { - signedResource = "b" - } - - protocols := "https,http" - if HTTPSOnly { + protocols := "" + if options.UseHTTPS { protocols = "https" } - stringToSign, err := blobSASStringToSign(b.Container.bsc.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions, signedIPRange, protocols) + stringToSign, err := blobSASStringToSign(permissions, start, expiry, canonicalizedResource, options.Identifier, options.IP, protocols, c.apiVersion, headers) if err != nil { return "", err } - sig := b.Container.bsc.client.computeHmac256(stringToSign) + sig := c.computeHmac256(stringToSign) sasParams := url.Values{ - "sv": {b.Container.bsc.client.apiVersion}, - "se": {signedExpiry}, + "sv": {c.apiVersion}, + "se": {expiry}, "sr": {signedResource}, - "sp": {signedPermissions}, + "sp": {permissions}, "sig": {sig}, } - if b.Container.bsc.client.apiVersion >= "2015-04-05" { - sasParams.Add("spr", protocols) - if signedIPRange != "" { - sasParams.Add("sip", signedIPRange) + if c.apiVersion >= "2015-04-05" { + if protocols != "" { + sasParams.Add("spr", protocols) + } + if options.IP != "" { + sasParams.Add("sip", options.IP) } } - sasURL, err := url.Parse(blobURL) + // Add override response hedaers + addQueryParameter(sasParams, "rscc", headers.CacheControl) + addQueryParameter(sasParams, "rscd", headers.ContentDisposition) + addQueryParameter(sasParams, "rsce", headers.ContentEncoding) + addQueryParameter(sasParams, "rscl", headers.ContentLanguage) + addQueryParameter(sasParams, "rsct", headers.ContentType) + + sasURL, err := url.Parse(uri) if err != nil { return "", err } @@ -77,16 +145,12 @@ func (b *Blob) GetSASURIWithSignedIPAndProtocol(expiry time.Time, permissions st return sasURL.String(), nil } -// GetSASURI creates an URL to the specified blob which contains the Shared -// Access Signature with specified permissions and expiration time. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx -func (b *Blob) GetSASURI(expiry time.Time, permissions string) (string, error) { - return b.GetSASURIWithSignedIPAndProtocol(expiry, permissions, "", false) -} - -func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string, signedIP string, protocols string) (string, error) { - var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string +func blobSASStringToSign(signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion string, headers OverrideHeaders) (string, error) { + rscc := headers.CacheControl + rscd := headers.ContentDisposition + rsce := headers.ContentEncoding + rscl := headers.ContentLanguage + rsct := headers.ContentType if signedVersion >= "2015-02-21" { canonicalizedResource = "/blob" + canonicalizedResource diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go index 450b20f96724..8fe21b0cfd98 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go @@ -1,9 +1,25 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( + "fmt" "net/http" "net/url" "strconv" + "strings" ) // BlobStorageClient contains operations for Microsoft Azure Blob Storage @@ -45,6 +61,21 @@ func (b *BlobStorageClient) GetContainerReference(name string) *Container { } } +// GetContainerReferenceFromSASURI returns a Container object for the specified +// container SASURI +func GetContainerReferenceFromSASURI(sasuri url.URL) (*Container, error) { + path := strings.Split(sasuri.Path, "/") + if len(path) <= 1 { + return nil, fmt.Errorf("could not find a container in URI: %s", sasuri.String()) + } + cli := newSASClient().GetBlobService() + return &Container{ + bsc: &cli, + Name: path[1], + sasuri: sasuri, + }, nil +} + // ListContainers returns the list of containers in a storage account along with // pagination token and other response details. // diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go index 6a180d48d7cb..e0176d664add 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/xml" @@ -68,6 +82,8 @@ type BlockResponse struct { // CreateBlockBlob initializes an empty block blob with no blocks. // +// See CreateBlockBlobFromReader for more info on creating blobs. +// // See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error { return b.CreateBlockBlobFromReader(nil, options) @@ -77,10 +93,17 @@ func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error { // reader. Size must be the number of bytes read from reader. To // create an empty blob, use size==0 and reader==nil. // +// Any headers set in blob.Properties or metadata in blob.Metadata +// will be set on the blob. +// // The API rejects requests with size > 256 MiB (but this limit is not // checked by the SDK). To write a larger blob, use CreateBlockBlob, // PutBlock, and PutBlockList. // +// To create a blob from scratch, call container.GetBlobReference() to +// get an empty blob, fill in blob.Properties and blob.Metadata as +// appropriate then call this method. +// // See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error { params := url.Values{} @@ -91,12 +114,21 @@ func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions var n int64 var err error if blob != nil { - buf := &bytes.Buffer{} - n, err = io.Copy(buf, blob) - if err != nil { - return err + type lener interface { + Len() int } - blob = buf + // TODO(rjeczalik): handle io.ReadSeeker, in case blob is *os.File etc. + if l, ok := blob.(lener); ok { + n = int64(l.Len()) + } else { + var buf bytes.Buffer + n, err = io.Copy(&buf, blob) + if err != nil { + return err + } + blob = &buf + } + headers["Content-Length"] = strconv.FormatInt(n, 10) } b.Properties.ContentLength = n @@ -114,8 +146,7 @@ func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions if err != nil { return err } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) + return b.respondCreation(resp, BlobTypeBlock) } // PutBlockOptions includes the options for a put block operation @@ -163,8 +194,7 @@ func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, o if err != nil { return err } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) + return b.respondCreation(resp, BlobTypeBlock) } // PutBlockListOptions includes the options for a put block list operation diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go index 8671e52eb71f..8f6cd95da718 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go @@ -1,6 +1,20 @@ // Package storage provides clients for Microsoft Azure Storage Services. package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bufio" "bytes" @@ -33,7 +47,9 @@ const ( // basic client is created. DefaultAPIVersion = "2016-05-31" - defaultUseHTTPS = true + defaultUseHTTPS = true + defaultRetryAttempts = 5 + defaultRetryDuration = time.Second * 5 // StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator StorageEmulatorAccountName = "devstoreaccount1" @@ -56,7 +72,14 @@ const ( ) var ( - validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$") + validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$") + defaultValidStatusCodes = []int{ + http.StatusRequestTimeout, // 408 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } ) // Sender sends a request @@ -75,19 +98,13 @@ type DefaultSender struct { // Send is the default retry strategy in the client func (ds *DefaultSender) Send(c *Client, req *http.Request) (resp *http.Response, err error) { - b := []byte{} - if req.Body != nil { - b, err = ioutil.ReadAll(req.Body) + rr := autorest.NewRetriableRequest(req) + for attempts := 0; attempts < ds.RetryAttempts; attempts++ { + err = rr.Prepare() if err != nil { return resp, err } - } - - for attempts := 0; attempts < ds.RetryAttempts; attempts++ { - if len(b) > 0 { - req.Body = ioutil.NopCloser(bytes.NewBuffer(b)) - } - resp, err = c.HTTPClient.Do(req) + resp, err = c.HTTPClient.Do(rr.Request()) if err != nil || !autorest.ResponseHasStatusCode(resp, ds.ValidStatusCodes...) { return resp, err } @@ -118,6 +135,8 @@ type Client struct { baseURL string apiVersion string userAgent string + sasClient bool + accountSASToken url.Values } type storageResponse struct { @@ -212,13 +231,13 @@ func NewEmulatorClient() (Client, error) { // NewClient constructs a Client. This should be used if the caller wants // to specify whether to use HTTPS, a specific REST API version or a custom // storage endpoint than Azure Public Cloud. -func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { +func NewClient(accountName, accountKey, serviceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { var c Client if !IsValidStorageAccount(accountName) { return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName) } else if accountKey == "" { return c, fmt.Errorf("azure: account key required") - } else if blobServiceBaseURL == "" { + } else if serviceBaseURL == "" { return c, fmt.Errorf("azure: base storage service url required") } @@ -232,19 +251,14 @@ func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, u accountName: accountName, accountKey: key, useHTTPS: useHTTPS, - baseURL: blobServiceBaseURL, + baseURL: serviceBaseURL, apiVersion: apiVersion, + sasClient: false, UseSharedKeyLite: false, Sender: &DefaultSender{ - RetryAttempts: 5, - ValidStatusCodes: []int{ - http.StatusRequestTimeout, // 408 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - }, - RetryDuration: time.Second * 5, + RetryAttempts: defaultRetryAttempts, + ValidStatusCodes: defaultValidStatusCodes, + RetryDuration: defaultRetryDuration, }, } c.userAgent = c.getDefaultUserAgent() @@ -257,6 +271,43 @@ func IsValidStorageAccount(account string) bool { return validStorageAccount.MatchString(account) } +// NewAccountSASClient contructs a client that uses accountSAS authorization +// for its operations. +func NewAccountSASClient(account string, token url.Values, env azure.Environment) Client { + c := newSASClient() + c.accountSASToken = token + c.accountName = account + c.baseURL = env.StorageEndpointSuffix + + // Get API version and protocol from token + c.apiVersion = token.Get("sv") + c.useHTTPS = token.Get("spr") == "https" + return c +} + +func newSASClient() Client { + c := Client{ + HTTPClient: http.DefaultClient, + apiVersion: DefaultAPIVersion, + sasClient: true, + Sender: &DefaultSender{ + RetryAttempts: defaultRetryAttempts, + ValidStatusCodes: defaultValidStatusCodes, + RetryDuration: defaultRetryDuration, + }, + } + c.userAgent = c.getDefaultUserAgent() + return c +} + +func (c Client) isServiceSASClient() bool { + return c.sasClient && c.accountSASToken == nil +} + +func (c Client) isAccountSASClient() bool { + return c.sasClient && c.accountSASToken != nil +} + func (c Client) getDefaultUserAgent() string { return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s", runtime.Version(), @@ -329,6 +380,164 @@ func (c Client) getEndpoint(service, path string, params url.Values) string { return u.String() } +// AccountSASTokenOptions includes options for constructing +// an account SAS token. +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas +type AccountSASTokenOptions struct { + APIVersion string + Services Services + ResourceTypes ResourceTypes + Permissions Permissions + Start time.Time + Expiry time.Time + IP string + UseHTTPS bool +} + +// Services specify services accessible with an account SAS. +type Services struct { + Blob bool + Queue bool + Table bool + File bool +} + +// ResourceTypes specify the resources accesible with an +// account SAS. +type ResourceTypes struct { + Service bool + Container bool + Object bool +} + +// Permissions specifies permissions for an accountSAS. +type Permissions struct { + Read bool + Write bool + Delete bool + List bool + Add bool + Create bool + Update bool + Process bool +} + +// GetAccountSASToken creates an account SAS token +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas +func (c Client) GetAccountSASToken(options AccountSASTokenOptions) (url.Values, error) { + if options.APIVersion == "" { + options.APIVersion = c.apiVersion + } + + if options.APIVersion < "2015-04-05" { + return url.Values{}, fmt.Errorf("account SAS does not support API versions prior to 2015-04-05. API version : %s", options.APIVersion) + } + + // build services string + services := "" + if options.Services.Blob { + services += "b" + } + if options.Services.Queue { + services += "q" + } + if options.Services.Table { + services += "t" + } + if options.Services.File { + services += "f" + } + + // build resources string + resources := "" + if options.ResourceTypes.Service { + resources += "s" + } + if options.ResourceTypes.Container { + resources += "c" + } + if options.ResourceTypes.Object { + resources += "o" + } + + // build permissions string + permissions := "" + if options.Permissions.Read { + permissions += "r" + } + if options.Permissions.Write { + permissions += "w" + } + if options.Permissions.Delete { + permissions += "d" + } + if options.Permissions.List { + permissions += "l" + } + if options.Permissions.Add { + permissions += "a" + } + if options.Permissions.Create { + permissions += "c" + } + if options.Permissions.Update { + permissions += "u" + } + if options.Permissions.Process { + permissions += "p" + } + + // build start time, if exists + start := "" + if options.Start != (time.Time{}) { + start = options.Start.Format(time.RFC3339) + // For some reason I don't understand, it fails when the rest of the string is included + start = start[:10] + } + + // build expiry time + expiry := options.Expiry.Format(time.RFC3339) + // For some reason I don't understand, it fails when the rest of the string is included + expiry = expiry[:10] + + protocol := "https,http" + if options.UseHTTPS { + protocol = "https" + } + + stringToSign := strings.Join([]string{ + c.accountName, + permissions, + services, + resources, + start, + expiry, + options.IP, + protocol, + options.APIVersion, + "", + }, "\n") + signature := c.computeHmac256(stringToSign) + + sasParams := url.Values{ + "sv": {options.APIVersion}, + "ss": {services}, + "srt": {resources}, + "sp": {permissions}, + "se": {expiry}, + "spr": {protocol}, + "sig": {signature}, + } + if start != "" { + sasParams.Add("st", start) + } + if options.IP != "" { + sasParams.Add("sip", options.IP) + } + + return sasParams, nil +} + // GetBlobService returns a BlobStorageClient which can operate on the blob // service of the storage account. func (c Client) GetBlobService() BlobStorageClient { @@ -404,8 +613,17 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader return nil, errors.New("azure/storage: error creating request: " + err.Error()) } + // if a body was provided ensure that the content length was set. + // http.NewRequest() will automatically do this for a handful of types + // and for those that it doesn't we will handle here. + if body != nil && req.ContentLength < 1 { + if lr, ok := body.(*io.LimitedReader); ok { + setContentLengthFromLimitedReader(req, lr) + } + } + for k, v := range headers { - req.Header.Add(k, v) + req.Header[k] = append(req.Header[k], v) // Must bypass case munging present in `Add` by using map functions directly. See https://github.com/Azure/azure-sdk-for-go/issues/645 } resp, err := c.Sender.Send(&c, req) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go new file mode 100644 index 000000000000..e898e9bfaf94 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go @@ -0,0 +1,38 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/url" + "time" +) + +// SASOptions includes options used by SAS URIs for different +// services and resources. +type SASOptions struct { + APIVersion string + Start time.Time + Expiry time.Time + IP string + UseHTTPS bool + Identifier string +} + +func addQueryParameter(query url.Values, key, value string) url.Values { + if value != "" { + query.Add(key, value) + } + return query +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go index c2c9c055b562..8963c7a89b37 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/xml" "errors" @@ -18,12 +32,66 @@ type Container struct { Name string `xml:"Name"` Properties ContainerProperties `xml:"Properties"` Metadata map[string]string + sasuri url.URL +} + +// Client returns the HTTP client used by the Container reference. +func (c *Container) Client() *Client { + return &c.bsc.client } func (c *Container) buildPath() string { return fmt.Sprintf("/%s", c.Name) } +// GetURL gets the canonical URL to the container. +// This method does not create a publicly accessible URL if the container +// is private and this method does not check if the blob exists. +func (c *Container) GetURL() string { + container := c.Name + if container == "" { + container = "$root" + } + return c.bsc.client.getEndpoint(blobServiceName, pathForResource(container, ""), nil) +} + +// ContainerSASOptions are options to construct a container SAS +// URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type ContainerSASOptions struct { + ContainerSASPermissions + OverrideHeaders + SASOptions +} + +// ContainerSASPermissions includes the available permissions for +// a container SAS URI. +type ContainerSASPermissions struct { + BlobServiceSASPermissions + List bool +} + +// GetSASURI creates an URL to the container which contains the Shared +// Access Signature with the specified options. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +func (c *Container) GetSASURI(options ContainerSASOptions) (string, error) { + uri := c.GetURL() + signedResource := "c" + canonicalizedResource, err := c.bsc.client.buildCanonicalizedResource(uri, c.bsc.auth, true) + if err != nil { + return "", err + } + + // build permissions string + permissions := options.BlobServiceSASPermissions.buildString() + if options.List { + permissions += "l" + } + + return c.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders) +} + // ContainerProperties contains various properties of a container returned from // various endpoints like ListContainers. type ContainerProperties struct { @@ -224,7 +292,20 @@ func (c *Container) create(options *CreateContainerOptions) (*storageResponse, e // Exists returns true if a container with given name exists // on the storage account, otherwise returns false. func (c *Container) Exists() (bool, error) { - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}}) + q := url.Values{"restype": {"container"}} + var uri string + if c.bsc.client.isServiceSASClient() { + q = mergeParams(q, c.sasuri.Query()) + newURI := c.sasuri + newURI.RawQuery = q.Encode() + uri = newURI.String() + + } else { + if c.bsc.client.isAccountSASClient() { + q = mergeParams(q, c.bsc.client.accountSASToken) + } + uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) + } headers := c.bsc.client.getStandardHeaders() resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth) @@ -399,9 +480,20 @@ func (c *Container) delete(options *DeleteContainerOptions) (*storageResponse, e func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) { q := mergeParams(params.getParameters(), url.Values{ "restype": {"container"}, - "comp": {"list"}}, - ) - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) + "comp": {"list"}, + }) + var uri string + if c.bsc.client.isServiceSASClient() { + q = mergeParams(q, c.sasuri.Query()) + newURI := c.sasuri + newURI.RawQuery = q.Encode() + uri = newURI.String() + } else { + if c.bsc.client.isAccountSASClient() { + q = mergeParams(q, c.bsc.client.accountSASToken) + } + uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) + } headers := c.bsc.client.getStandardHeaders() headers = addToHeaders(headers, "x-ms-client-request-id", params.RequestID) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go index 377a3c622e9a..a4cc2527b67f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "errors" "fmt" @@ -50,7 +64,7 @@ type IncrementalCopyOptionsConditions struct { // Copy starts a blob copy operation and waits for the operation to // complete. sourceBlob parameter must be a canonical URL to the blob (can be -// obtained using GetBlobURL method.) There is no SLA on blob copy and therefore +// obtained using the GetURL method.) There is no SLA on blob copy and therefore // this helper method works faster on smaller files. // // See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob @@ -65,7 +79,7 @@ func (b *Blob) Copy(sourceBlob string, options *CopyOptions) error { // StartCopy starts a blob copy operation. // sourceBlob parameter must be a canonical URL to the blob (can be -// obtained using GetBlobURL method.) +// obtained using the GetURL method.) // // See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go index 29610329ec31..189e03802444 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go @@ -1,9 +1,24 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/xml" "net/http" "net/url" + "sync" ) // Directory represents a directory on a share. @@ -169,6 +184,7 @@ func (d *Directory) GetFileReference(name string) *File { Name: name, parent: d, share: d.share, + mutex: &sync.Mutex{}, } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go index 13e94750731f..9668ea669494 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/json" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go index 238ac6d6d95a..5fb516c55fdb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "errors" "fmt" @@ -8,6 +22,7 @@ import ( "net/http" "net/url" "strconv" + "sync" ) const fourMB = uint64(4194304) @@ -22,6 +37,7 @@ type File struct { Properties FileProperties `xml:"Properties"` share *Share FileCopyProperties FileCopyState + mutex *sync.Mutex } // FileProperties contains various properties of a file. @@ -148,7 +164,9 @@ func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error { return err } - f.updateEtagLastModifiedAndCopyHeaders(headers) + f.updateEtagAndLastModified(headers) + f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id") + f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status") return nil } @@ -399,14 +417,6 @@ func (f *File) updateEtagAndLastModified(headers http.Header) { f.Properties.LastModified = headers.Get("Last-Modified") } -// updates Etag, last modified date and x-ms-copy-id -func (f *File) updateEtagLastModifiedAndCopyHeaders(headers http.Header) { - f.Properties.Etag = headers.Get("Etag") - f.Properties.LastModified = headers.Get("Last-Modified") - f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id") - f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status") -} - // updates file properties from the specified HTTP header func (f *File) updateProperties(header http.Header) { size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64) @@ -430,7 +440,7 @@ func (f *File) URL() string { return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), nil) } -// WriteRangeOptions includes opptions for a write file range operation +// WriteRangeOptions includes options for a write file range operation type WriteRangeOptions struct { Timeout uint ContentMD5 string @@ -456,7 +466,11 @@ func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, options *WriteRa if err != nil { return err } - + // it's perfectly legal for multiple go routines to call WriteRange + // on the same *File (e.g. concurrently writing non-overlapping ranges) + // so we must take the file mutex before updating our properties. + f.mutex.Lock() f.updateEtagAndLastModified(headers) + f.mutex.Unlock() return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go index 81217bdfa80b..295e3d3e25c3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/xml" "fmt" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go index 415b740183b3..3d9d52d8e352 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "errors" "net/http" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go index 3ededcd421aa..7d9038a5f71f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/xml" "fmt" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go index 41d832e2be11..800adf129dcc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // MetadataLevel determines if operations should return a paylod, // and it level of detail. type MetadataLevel string diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go index bc5b398d3ff8..c59fd4b50b9d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/xml" "errors" @@ -160,6 +174,8 @@ func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesRespon // size in bytes (size must be aligned to a 512-byte boundary). A page blob must // be created using this method before writing pages. // +// See CreateBlockBlobFromReader for more info on creating blobs. +// // See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob func (b *Blob) PutPageBlob(options *PutBlobOptions) error { if b.Properties.ContentLength%512 != 0 { @@ -184,6 +200,5 @@ func (b *Blob) PutPageBlob(options *PutBlobOptions) error { if err != nil { return err } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) + return b.respondCreation(resp, BlobTypePage) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go index c2c7f742c452..499592ebd125 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/xml" "errors" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go new file mode 100644 index 000000000000..28d9ab937e24 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go @@ -0,0 +1,146 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/url" + "strings" + "time" +) + +// QueueSASOptions are options to construct a blob SAS +// URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type QueueSASOptions struct { + QueueSASPermissions + SASOptions +} + +// QueueSASPermissions includes the available permissions for +// a queue SAS URI. +type QueueSASPermissions struct { + Read bool + Add bool + Update bool + Process bool +} + +func (q QueueSASPermissions) buildString() string { + permissions := "" + + if q.Read { + permissions += "r" + } + if q.Add { + permissions += "a" + } + if q.Update { + permissions += "u" + } + if q.Process { + permissions += "p" + } + return permissions +} + +// GetSASURI creates an URL to the specified queue which contains the Shared +// Access Signature with specified permissions and expiration time. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +func (q *Queue) GetSASURI(options QueueSASOptions) (string, error) { + canonicalizedResource, err := q.qsc.client.buildCanonicalizedResource(q.buildPath(), q.qsc.auth, true) + if err != nil { + return "", err + } + + // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. + // It must include the service name (blob, table, queue or file) for version 2015-02-21 or + // later, the storage account name, and the resource name, and must be URL-decoded. + // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). + canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) + canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) + if err != nil { + return "", err + } + + signedStart := "" + if options.Start != (time.Time{}) { + signedStart = options.Start.UTC().Format(time.RFC3339) + } + signedExpiry := options.Expiry.UTC().Format(time.RFC3339) + + protocols := "https,http" + if options.UseHTTPS { + protocols = "https" + } + + permissions := options.QueueSASPermissions.buildString() + stringToSign, err := queueSASStringToSign(q.qsc.client.apiVersion, canonicalizedResource, signedStart, signedExpiry, options.IP, permissions, protocols, options.Identifier) + if err != nil { + return "", err + } + + sig := q.qsc.client.computeHmac256(stringToSign) + sasParams := url.Values{ + "sv": {q.qsc.client.apiVersion}, + "se": {signedExpiry}, + "sp": {permissions}, + "sig": {sig}, + } + + if q.qsc.client.apiVersion >= "2015-04-05" { + sasParams.Add("spr", protocols) + addQueryParameter(sasParams, "sip", options.IP) + } + + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), nil) + sasURL, err := url.Parse(uri) + if err != nil { + return "", err + } + sasURL.RawQuery = sasParams.Encode() + return sasURL.String(), nil +} + +func queueSASStringToSign(signedVersion, canonicalizedResource, signedStart, signedExpiry, signedIP, signedPermissions, protocols, signedIdentifier string) (string, error) { + + if signedVersion >= "2015-02-21" { + canonicalizedResource = "/queue" + canonicalizedResource + } + + // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 + if signedVersion >= "2015-04-05" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", + signedPermissions, + signedStart, + signedExpiry, + canonicalizedResource, + signedIdentifier, + signedIP, + protocols, + signedVersion), nil + + } + + // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + if signedVersion >= "2013-08-15" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion), nil + } + + return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go index 19b44941c8cc..29febe146f65 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // QueueServiceClient contains operations for Microsoft Azure Queue Storage // Service. type QueueServiceClient struct { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go index e6a868081a01..a14d9d324474 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "fmt" "net/http" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go index bee1c31ad61f..056ab398a812 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "strings" "time" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go index 88700fbc93e6..c102619c9873 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "net/http" "net/url" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go index 4eae3af9df76..6c01d32ee13e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/json" @@ -174,11 +188,7 @@ func (t *Table) Delete(timeout uint, options *TableOptions) error { } defer readAndCloseBody(resp.body) - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - - } - return nil + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) } // QueryOptions includes options for a query entities operation. @@ -261,10 +271,7 @@ func (t *Table) SetPermissions(tap []TableAccessPolicy, timeout uint, options *T } defer readAndCloseBody(resp.body) - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - } - return nil + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) } func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go index 7a0f0915c627..3f882417c65a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/json" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go index 895dcfded891..456bee7733af 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/json" "fmt" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go index 8a902be2f05b..7734b8f886f7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "crypto/hmac" @@ -18,7 +32,29 @@ import ( ) var ( - fixedTime = time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)) + fixedTime = time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)) + accountSASOptions = AccountSASTokenOptions{ + Services: Services{ + Blob: true, + }, + ResourceTypes: ResourceTypes{ + Service: true, + Container: true, + Object: true, + }, + Permissions: Permissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Add: true, + Create: true, + Update: true, + Process: true, + }, + Expiry: fixedTime, + UseHTTPS: true, + } ) func (c Client) computeHmac256(message string) string { @@ -136,7 +172,7 @@ func addTimeout(params url.Values, timeout uint) url.Values { func addSnapshot(params url.Values, snapshot *time.Time) url.Values { if snapshot != nil { - params.Add("snapshot", timeRfc1123Formatted(*snapshot)) + params.Add("snapshot", snapshot.Format("2006-01-02T15:04:05.0000000Z")) } return params } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go new file mode 100644 index 000000000000..67ff6ca03fe3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go @@ -0,0 +1,26 @@ +// +build !go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "io" + "net/http" +) + +func setContentLengthFromLimitedReader(req *http.Request, lr *io.LimitedReader) { + req.ContentLength = lr.N +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go new file mode 100644 index 000000000000..eada102c0cff --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go @@ -0,0 +1,32 @@ +// +build go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "io" + "io/ioutil" + "net/http" +) + +func setContentLengthFromLimitedReader(req *http.Request, lr *io.LimitedReader) { + req.ContentLength = lr.N + snapshot := *lr + req.GetBody = func() (io.ReadCloser, error) { + r := snapshot + return ioutil.NopCloser(&r), nil + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go index a23fff1e2e15..1cd3e03d12a8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + var ( sdkVersion = "10.0.2" ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go index 12375e0e4bb8..49e9214d598a 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -1,5 +1,19 @@ package adal +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "fmt" "net/url" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go index 6c511f8c8779..b38f4c245897 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -1,5 +1,19 @@ package adal +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + /* This file is largely based on rjw57/oauth2device's code, with the follow differences: * scope -> resource, and only allow a single one diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go b/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go new file mode 100644 index 000000000000..5e02d52ac27c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go @@ -0,0 +1,20 @@ +// +build !windows + +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// msiPath is the path to the MSI Extension settings file (to discover the endpoint) +var msiPath = "/var/lib/waagent/ManagedIdentity-Settings" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go b/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go new file mode 100644 index 000000000000..261b568829c8 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "os" + "strings" +) + +// msiPath is the path to the MSI Extension settings file (to discover the endpoint) +var msiPath = strings.Join([]string{os.Getenv("SystemDrive"), "WindowsAzure/Config/ManagedIdentity-Settings"}, "/") diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go index 73711c6674ea..9e15f2751f27 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -1,5 +1,19 @@ package adal +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/json" "fmt" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go index 7928c971abbe..0e5ad14d3962 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -1,5 +1,19 @@ package adal +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "net/http" ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index 559fc6653583..67dd97a18c18 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -1,5 +1,19 @@ package adal +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "crypto/rand" "crypto/rsa" @@ -15,12 +29,12 @@ import ( "strings" "time" + "github.com/Azure/go-autorest/autorest/date" "github.com/dgrijalva/jwt-go" ) const ( defaultRefresh = 5 * time.Minute - tokenBaseDate = "1970-01-01T00:00:00Z" // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow OAuthGrantTypeDeviceCode = "device_code" @@ -31,16 +45,10 @@ const ( // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows OAuthGrantTypeRefreshToken = "refresh_token" - // managedIdentitySettingsPath is the path to the MSI Extension settings file (to discover the endpoint) - managedIdentitySettingsPath = "/var/lib/waagent/ManagedIdentity-Settings" + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" ) -var expirationBase time.Time - -func init() { - expirationBase, _ = time.Parse(time.RFC3339, tokenBaseDate) -} - // OAuthTokenProvider is an interface which should be implemented by an access token retriever type OAuthTokenProvider interface { OAuthToken() string @@ -76,7 +84,10 @@ func (t Token) Expires() time.Time { if err != nil { s = -3600 } - return expirationBase.Add(time.Duration(s) * time.Second).UTC() + + expiration := date.NewUnixTimeFromSeconds(float64(s)) + + return time.Time(expiration).UTC() } // IsExpired returns true if the Token is expired, false otherwise. @@ -135,9 +146,7 @@ type ServicePrincipalMSISecret struct { } // SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// MSI extension requires the authority field to be set to the real tenant authority endpoint func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("authority", spt.oauthConfig.AuthorityEndpoint.String()) return nil } @@ -261,41 +270,43 @@ func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID s ) } -// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. -func NewServicePrincipalTokenFromMSI(oauthConfig OAuthConfig, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return newServicePrincipalTokenFromMSI(oauthConfig, resource, managedIdentitySettingsPath, callbacks...) +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +func GetMSIVMEndpoint() (string, error) { + return getMSIVMEndpoint(msiPath) } -func newServicePrincipalTokenFromMSI(oauthConfig OAuthConfig, resource, settingsPath string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { +func getMSIVMEndpoint(path string) (string, error) { // Read MSI settings - bytes, err := ioutil.ReadFile(settingsPath) + bytes, err := ioutil.ReadFile(path) if err != nil { - return nil, err + return "", err } msiSettings := struct { URL string `json:"url"` }{} err = json.Unmarshal(bytes, &msiSettings) if err != nil { - return nil, err + return "", err } + return msiSettings.URL, nil +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { // We set the oauth config token endpoint to be MSI's endpoint - // We leave the authority as-is so MSI can POST it with the token request - msiEndpointURL, err := url.Parse(msiSettings.URL) + msiEndpointURL, err := url.Parse(msiEndpoint) if err != nil { return nil, err } - msiTokenEndpointURL, err := msiEndpointURL.Parse("/oauth2/token") + oauthConfig, err := NewOAuthConfig(msiEndpointURL.String(), "") if err != nil { return nil, err } - oauthConfig.TokenEndpoint = *msiTokenEndpointURL - spt := &ServicePrincipalToken{ - oauthConfig: oauthConfig, + oauthConfig: *oauthConfig, secret: &ServicePrincipalMSISecret{}, resource: resource, autoRefresh: true, @@ -364,16 +375,24 @@ func (spt *ServicePrincipalToken) refreshInternal(resource string) error { req.ContentLength = int64(len(s)) req.Header.Set(contentType, mimeTypeFormPost) + if _, ok := spt.secret.(*ServicePrincipalMSISecret); ok { + req.Header.Set(metadataHeader, "true") + } resp, err := spt.sender.Do(req) if err != nil { return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) } + defer resp.Body.Close() + rb, err := ioutil.ReadAll(resp.Body) + if resp.StatusCode != http.StatusOK { - return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'", resp.StatusCode) + if err != nil { + return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body", resp.StatusCode) + } + return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)) } - rb, err := ioutil.ReadAll(resp.Body) if err != nil { return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) } diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go index 7f4e3d84540e..71e3ced2d6a6 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -1,12 +1,34 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "fmt" "net/http" + "net/url" + "strings" "github.com/Azure/go-autorest/autorest/adal" ) +const ( + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" +) + // Authorizer is the interface that provides a PrepareDecorator used to supply request // authorization. Most often, the Authorizer decorator runs last so it has access to the full // state of the formed HTTP request. @@ -55,3 +77,105 @@ func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { }) } } + +// BearerAuthorizerCallbackFunc is the authentication callback signature. +type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) + +// BearerAuthorizerCallback implements bearer authorization via a callback. +type BearerAuthorizerCallback struct { + sender Sender + callback BearerAuthorizerCallbackFunc +} + +// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback +// is invoked when the HTTP request is submitted. +func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if sender == nil { + sender = &http.Client{} + } + return &BearerAuthorizerCallback{sender: sender, callback: callback} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value +// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + // make a copy of the request and remove the body as it's not + // required and avoids us having to create a copy of it. + rCopy := *r + removeRequestBody(&rCopy) + + resp, err := bacb.sender.Do(&rCopy) + if err == nil && resp.StatusCode == 401 { + defer resp.Body.Close() + if hasBearerChallenge(resp) { + bc, err := newBearerChallenge(resp) + if err != nil { + return r, err + } + if bacb.callback != nil { + ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + if err != nil { + return r, err + } + return ba.WithAuthorization()(p).Prepare(r) + } + } + } + return r, err + }) + } +} + +// returns true if the HTTP response contains a bearer challenge +func hasBearerChallenge(resp *http.Response) bool { + authHeader := resp.Header.Get(bearerChallengeHeader) + if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { + return false + } + return true +} + +type bearerChallenge struct { + values map[string]string +} + +func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { + challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader)) + trimmedChallenge := challenge[len(bearer)+1:] + + // challenge is a set of key=value pairs that are comma delimited + pairs := strings.Split(trimmedChallenge, ",") + if len(pairs) < 1 { + err = fmt.Errorf("challenge '%s' contains no pairs", challenge) + return bc, err + } + + bc.values = make(map[string]string) + for i := range pairs { + trimmedPair := strings.TrimSpace(pairs[i]) + pair := strings.Split(trimmedPair, "=") + if len(pair) == 2 { + // remove the enclosing quotes + key := strings.Trim(pair[0], "\"") + value := strings.Trim(pair[1], "\"") + + switch key { + case "authorization", "authorization_uri": + // strip the tenant ID from the authorization URL + asURL, err := url.Parse(value) + if err != nil { + return bc, err + } + bc.values[tenantID] = asURL.Path[1:] + default: + bc.values[key] = value + } + } + } + + return bc, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go index 51f1c4bbcac2..37b907c77f51 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/autorest.go +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -57,6 +57,20 @@ generated clients, see the Client described below. */ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "net/http" "time" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go index 332a8909d1a9..ffbc8da28e52 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -1,5 +1,19 @@ package azure +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "fmt" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go index 3f4d13421aac..fa18356476b9 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -5,6 +5,20 @@ See the included examples for more detail. */ package azure +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/json" "fmt" @@ -165,7 +179,13 @@ func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { if decodeErr != nil { return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) } else if e.ServiceError == nil { - e.ServiceError = &ServiceError{Code: "Unknown", Message: "Unknown service error"} + // Check if error is unwrapped ServiceError + if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil || e.ServiceError.Message == "" { + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + } } e.RequestID = ExtractRequestID(resp) diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go index 1cf55651f2a4..30c4351a576a 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -1,5 +1,19 @@ package azure +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "fmt" "strings" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go new file mode 100644 index 000000000000..40d5f5ba002f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -0,0 +1,188 @@ +package azure + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. +// It also handles request retries +func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(r) + for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + + resp, err = autorest.SendWithSender(s, rr.Request(), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return resp, err + } + + if resp.StatusCode != http.StatusConflict { + return resp, err + } + var re RequestError + err = autorest.Respond( + resp, + autorest.ByUnmarshallingJSON(&re), + ) + if err != nil { + return resp, err + } + + if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { + err = register(client, r, re) + if err != nil { + return resp, fmt.Errorf("failed auto registering Resource Provider: %s", err) + } + } + } + return resp, errors.New("failed request and resource provider registration") + }) + } +} + +func getProvider(re RequestError) (string, error) { + if re.ServiceError != nil { + if re.ServiceError.Details != nil && len(*re.ServiceError.Details) > 0 { + detail := (*re.ServiceError.Details)[0].(map[string]interface{}) + return detail["target"].(string), nil + } + } + return "", errors.New("provider was not found in the response") +} + +func register(client autorest.Client, originalReq *http.Request, re RequestError) error { + subID := getSubscription(originalReq.URL.Path) + if subID == "" { + return errors.New("missing parameter subscriptionID to register resource provider") + } + providerName, err := getProvider(re) + if err != nil { + return fmt.Errorf("missing parameter provider to register resource provider: %s", err) + } + newURL := url.URL{ + Scheme: originalReq.URL.Scheme, + Host: originalReq.URL.Host, + } + + // taken from the resources SDK + // with almost identical code, this sections are easier to mantain + // It is also not a good idea to import the SDK here + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", providerName), + "subscriptionId": autorest.Encode("path", subID), + } + + const APIVersion = "2016-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + + req, err := preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req.Cancel = originalReq.Cancel + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + type Provider struct { + RegistrationState *string `json:"registrationState,omitempty"` + } + var provider Provider + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + // poll for registered provisioning state + now := time.Now() + for err == nil && time.Since(now) < client.PollingDuration { + // taken from the resources SDK + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + req, err = preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req.Cancel = originalReq.Cancel + + resp, err := autorest.SendWithSender(client.Sender, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + if provider.RegistrationState != nil && + *provider.RegistrationState == "Registered" { + break + } + + delayed := autorest.DelayWithRetryAfter(resp, originalReq.Cancel) + if !delayed { + autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Cancel) + } + } + if !(time.Since(now) < client.PollingDuration) { + return errors.New("polling for resource provider registration has exceeded the polling duration") + } + return err +} + +func getSubscription(path string) string { + parts := strings.Split(path, "/") + for i, v := range parts { + if v == "subscriptions" && (i+1) < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go index b5f94b5c3c75..c857e761168b 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -1,5 +1,19 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "fmt" @@ -33,8 +47,10 @@ var ( Version(), ) - statusCodesForRetry = []int{ + // StatusCodesForRetry are a defined group of status code for which the client will retry + StatusCodesForRetry = []int{ http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 http.StatusInternalServerError, // 500 http.StatusBadGateway, // 502 http.StatusServiceUnavailable, // 503 @@ -186,8 +202,7 @@ func (c Client) Do(r *http.Request) (*http.Response, error) { if err != nil { return nil, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") } - resp, err := SendWithSender(c.sender(), r, - DoRetryForStatusCodes(c.RetryAttempts, c.RetryDuration, statusCodesForRetry...)) + resp, err := SendWithSender(c.sender(), r) Respond(resp, c.ByInspecting()) return resp, err diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go index 80ca60e9b08a..c4571065685a 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/date.go +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -5,6 +5,20 @@ time.Time types. And both convert to time.Time through a ToTime method. */ package date +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "fmt" "time" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go index c1af6296348d..b453fad0491e 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/time.go +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -1,5 +1,19 @@ package date +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "regexp" "time" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go index 11995fb9f2c5..48fb39ba9b9a 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -1,5 +1,19 @@ package date +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "errors" "time" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go index e085c77eea50..7073959b2a9c 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -1,5 +1,19 @@ package date +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/binary" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go index 207b1a240a3a..12addf0ebb4b 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go +++ b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -1,5 +1,19 @@ package date +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "strings" "time" diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go index 4bcb8f27b210..f724f33327ed 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/error.go +++ b/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -1,5 +1,19 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "fmt" "net/http" @@ -31,6 +45,9 @@ type DetailedError struct { // Service Error is the response body of failed API in bytes ServiceError []byte + + // Response is the response object that was returned during failure if applicable. + Response *http.Response } // NewError creates a new Error conforming object from the passed packageType, method, and @@ -67,6 +84,7 @@ func NewErrorWithError(original error, packageType string, method string, resp * Method: method, StatusCode: statusCode, Message: fmt.Sprintf(message, args...), + Response: resp, } } diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go index afd114821bc7..2290c4010032 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -1,5 +1,19 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/json" diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go index 87f71e5854b5..a908a0adb708 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/responder.go +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -1,5 +1,19 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/json" diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go new file mode 100644 index 000000000000..fa11dbed79b1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -0,0 +1,52 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. +func NewRetriableRequest(req *http.Request) *RetriableRequest { + return &RetriableRequest{req: req} +} + +// Request returns the wrapped HTTP request. +func (rr *RetriableRequest) Request() *http.Request { + return rr.req +} + +func (rr *RetriableRequest) prepareFromByteReader() (err error) { + // fall back to making a copy (only do this once) + b := []byte{} + if rr.req.ContentLength > 0 { + b = make([]byte, rr.req.ContentLength) + _, err = io.ReadFull(rr.req.Body, b) + if err != nil { + return err + } + } else { + b, err = ioutil.ReadAll(rr.req.Body) + if err != nil { + return err + } + } + rr.br = bytes.NewReader(b) + rr.req.Body = ioutil.NopCloser(rr.br) + return err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go new file mode 100644 index 000000000000..7143cc61b58b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -0,0 +1,54 @@ +// +build !go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.br != nil { + _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go new file mode 100644 index 000000000000..ae15c6bf9629 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -0,0 +1,66 @@ +// +build go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + rc io.ReadCloser + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.rc != nil { + rr.req.Body = rr.rc + } else if rr.br != nil { + _, err = rr.br.Seek(0, io.SeekStart) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.req.GetBody != nil { + // this will allow us to preserve the body without having to + // make a copy. note we need to do this on each iteration + rr.rc, err = rr.req.GetBody() + if err != nil { + return err + } + } else if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.GetBody = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go index 9c0697815bba..7264c32f27db 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -1,12 +1,25 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( - "bytes" "fmt" - "io/ioutil" "log" "math" "net/http" + "strconv" "time" ) @@ -175,8 +188,13 @@ func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ... func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { return func(s Sender) Sender { return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) for attempt := 0; attempt < attempts; attempt++ { - resp, err = s.Do(r) + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) if err == nil { return resp, err } @@ -194,29 +212,43 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { return func(s Sender) Sender { return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - b := []byte{} - if r.Body != nil { - b, err = ioutil.ReadAll(r.Body) - if err != nil { - return resp, err - } - } - + rr := NewRetriableRequest(r) // Increment to add the first call (attempts denotes number of retries) attempts++ for attempt := 0; attempt < attempts; attempt++ { - r.Body = ioutil.NopCloser(bytes.NewBuffer(b)) - resp, err = s.Do(r) + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) if err != nil || !ResponseHasStatusCode(resp, codes...) { return resp, err } - DelayForBackoff(backoff, attempt, r.Cancel) + delayed := DelayWithRetryAfter(resp, r.Cancel) + if !delayed { + DelayForBackoff(backoff, attempt, r.Cancel) + } } return resp, err }) } } +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in +// responses with status code 429 +func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { + retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After")) + if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 { + select { + case <-time.After(time.Duration(retryAfter) * time.Second): + return true + case <-cancel: + return false + } + } + return false +} + // DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal // to or greater than the specified duration, exponentially backing off between requests using the // supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the @@ -224,9 +256,14 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { return func(s Sender) Sender { return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) end := time.Now().Add(d) for attempt := 0; time.Now().Before(end); attempt++ { - resp, err = s.Do(r) + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) if err == nil { return resp, err } diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go index 7b180b866b90..fdda2ce1aa8e 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go +++ b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go @@ -3,6 +3,20 @@ Package to provides helpers to ease working with pointer values of marshalled st */ package to +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // String returns a string value for the passed string pointer. It returns the empty string if the // pointer is nil. func String(s *string) string { diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go index 78067148b28d..dfdc6efdff06 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -1,5 +1,19 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/json" diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go index d7b0eadc5542..3fe62c93056d 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go @@ -3,6 +3,20 @@ Package validation provides methods for validating parameter value using reflect */ package validation +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "fmt" "reflect" @@ -91,15 +105,12 @@ func validateStruct(x reflect.Value, v Constraint, name ...string) error { return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.Target)) } - if err := Validate([]Validation{ + return Validate([]Validation{ { TargetValue: getInterfaceValue(f), Constraints: []Constraint{v}, }, - }); err != nil { - return err - } - return nil + }) } func validatePtr(x reflect.Value, v Constraint) error { @@ -205,14 +216,14 @@ func validateString(x reflect.Value, v Constraint) error { return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) } if len(s) > v.Rule.(int) { - return createError(x, v, fmt.Sprintf("value length must be less than %v", v.Rule)) + return createError(x, v, fmt.Sprintf("value length must be less than or equal to %v", v.Rule)) } case MinLength: if _, ok := v.Rule.(int); !ok { return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) } if len(s) < v.Rule.(int) { - return createError(x, v, fmt.Sprintf("value length must be greater than %v", v.Rule)) + return createError(x, v, fmt.Sprintf("value length must be greater than or equal to %v", v.Rule)) } case ReadOnly: if len(s) > 0 { @@ -273,6 +284,17 @@ func validateArrayMap(x reflect.Value, v Constraint) error { if x.Len() != 0 { return createError(x, v, "readonly parameter; must send as nil or empty in request") } + case Pattern: + reg, err := regexp.Compile(v.Rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + keys := x.MapKeys() + for _, k := range keys { + if !reg.MatchString(k.String()) { + return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.Rule)) + } + } default: return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.Name)) } diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go index a222e8efaaf9..f588807dbb9c 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -1,5 +1,19 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "fmt" diff --git a/vendor/github.com/JeffAshton/win_pdh/AUTHORS b/vendor/github.com/JeffAshton/win_pdh/AUTHORS new file mode 100644 index 000000000000..7129f3d730e3 --- /dev/null +++ b/vendor/github.com/JeffAshton/win_pdh/AUTHORS @@ -0,0 +1,14 @@ +# This is the official list of 'win_pdh' authors for copyright purposes. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +# Contributors +# ============ + +Alexander Neumann +Joseph Watson +Kevin Pors diff --git a/vendor/github.com/JeffAshton/win_pdh/LICENSE b/vendor/github.com/JeffAshton/win_pdh/LICENSE new file mode 100644 index 000000000000..5bf54be110d9 --- /dev/null +++ b/vendor/github.com/JeffAshton/win_pdh/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2010 The win_pdh Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. The names of the authors may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/JeffAshton/win_pdh/README.mdown b/vendor/github.com/JeffAshton/win_pdh/README.mdown new file mode 100644 index 000000000000..268cd9f845fb --- /dev/null +++ b/vendor/github.com/JeffAshton/win_pdh/README.mdown @@ -0,0 +1,15 @@ +About win_pdh +============= + +win_pdh is a Windows Performance Data Helper wrapper package for Go. + +Originally part of [walk](https://github.com/lxn/walk) and [win](https://github.com/lxn/win), it is now a separate +project. + +Setup +===== + +Make sure you have a working Go installation. +See [Getting Started](http://golang.org/doc/install.html) + +Now run `go get github.com/JeffAshton/win_pdh` diff --git a/vendor/github.com/JeffAshton/win_pdh/pdh.go b/vendor/github.com/JeffAshton/win_pdh/pdh.go new file mode 100644 index 000000000000..56199001af82 --- /dev/null +++ b/vendor/github.com/JeffAshton/win_pdh/pdh.go @@ -0,0 +1,453 @@ +// Copyright 2013 The win_pdh Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package win_pdh + +import ( + "syscall" + "unsafe" +) + +// Error codes +const ( + ERROR_SUCCESS = 0 + ERROR_INVALID_FUNCTION = 1 +) + +type ( + HANDLE uintptr +) + +// PDH error codes, which can be returned by all Pdh* functions. Taken from mingw-w64 pdhmsg.h +const ( + PDH_CSTATUS_VALID_DATA = 0x00000000 // The returned data is valid. + PDH_CSTATUS_NEW_DATA = 0x00000001 // The return data value is valid and different from the last sample. + PDH_CSTATUS_NO_MACHINE = 0x800007D0 // Unable to connect to the specified computer, or the computer is offline. + PDH_CSTATUS_NO_INSTANCE = 0x800007D1 + PDH_MORE_DATA = 0x800007D2 // The PdhGetFormattedCounterArray* function can return this if there's 'more data to be displayed'. + PDH_CSTATUS_ITEM_NOT_VALIDATED = 0x800007D3 + PDH_RETRY = 0x800007D4 + PDH_NO_DATA = 0x800007D5 // The query does not currently contain any counters (for example, limited access) + PDH_CALC_NEGATIVE_DENOMINATOR = 0x800007D6 + PDH_CALC_NEGATIVE_TIMEBASE = 0x800007D7 + PDH_CALC_NEGATIVE_VALUE = 0x800007D8 + PDH_DIALOG_CANCELLED = 0x800007D9 + PDH_END_OF_LOG_FILE = 0x800007DA + PDH_ASYNC_QUERY_TIMEOUT = 0x800007DB + PDH_CANNOT_SET_DEFAULT_REALTIME_DATASOURCE = 0x800007DC + PDH_CSTATUS_NO_OBJECT = 0xC0000BB8 + PDH_CSTATUS_NO_COUNTER = 0xC0000BB9 // The specified counter could not be found. + PDH_CSTATUS_INVALID_DATA = 0xC0000BBA // The counter was successfully found, but the data returned is not valid. + PDH_MEMORY_ALLOCATION_FAILURE = 0xC0000BBB + PDH_INVALID_HANDLE = 0xC0000BBC + PDH_INVALID_ARGUMENT = 0xC0000BBD // Required argument is missing or incorrect. + PDH_FUNCTION_NOT_FOUND = 0xC0000BBE + PDH_CSTATUS_NO_COUNTERNAME = 0xC0000BBF + PDH_CSTATUS_BAD_COUNTERNAME = 0xC0000BC0 // Unable to parse the counter path. Check the format and syntax of the specified path. + PDH_INVALID_BUFFER = 0xC0000BC1 + PDH_INSUFFICIENT_BUFFER = 0xC0000BC2 + PDH_CANNOT_CONNECT_MACHINE = 0xC0000BC3 + PDH_INVALID_PATH = 0xC0000BC4 + PDH_INVALID_INSTANCE = 0xC0000BC5 + PDH_INVALID_DATA = 0xC0000BC6 // specified counter does not contain valid data or a successful status code. + PDH_NO_DIALOG_DATA = 0xC0000BC7 + PDH_CANNOT_READ_NAME_STRINGS = 0xC0000BC8 + PDH_LOG_FILE_CREATE_ERROR = 0xC0000BC9 + PDH_LOG_FILE_OPEN_ERROR = 0xC0000BCA + PDH_LOG_TYPE_NOT_FOUND = 0xC0000BCB + PDH_NO_MORE_DATA = 0xC0000BCC + PDH_ENTRY_NOT_IN_LOG_FILE = 0xC0000BCD + PDH_DATA_SOURCE_IS_LOG_FILE = 0xC0000BCE + PDH_DATA_SOURCE_IS_REAL_TIME = 0xC0000BCF + PDH_UNABLE_READ_LOG_HEADER = 0xC0000BD0 + PDH_FILE_NOT_FOUND = 0xC0000BD1 + PDH_FILE_ALREADY_EXISTS = 0xC0000BD2 + PDH_NOT_IMPLEMENTED = 0xC0000BD3 + PDH_STRING_NOT_FOUND = 0xC0000BD4 + PDH_UNABLE_MAP_NAME_FILES = 0x80000BD5 + PDH_UNKNOWN_LOG_FORMAT = 0xC0000BD6 + PDH_UNKNOWN_LOGSVC_COMMAND = 0xC0000BD7 + PDH_LOGSVC_QUERY_NOT_FOUND = 0xC0000BD8 + PDH_LOGSVC_NOT_OPENED = 0xC0000BD9 + PDH_WBEM_ERROR = 0xC0000BDA + PDH_ACCESS_DENIED = 0xC0000BDB + PDH_LOG_FILE_TOO_SMALL = 0xC0000BDC + PDH_INVALID_DATASOURCE = 0xC0000BDD + PDH_INVALID_SQLDB = 0xC0000BDE + PDH_NO_COUNTERS = 0xC0000BDF + PDH_SQL_ALLOC_FAILED = 0xC0000BE0 + PDH_SQL_ALLOCCON_FAILED = 0xC0000BE1 + PDH_SQL_EXEC_DIRECT_FAILED = 0xC0000BE2 + PDH_SQL_FETCH_FAILED = 0xC0000BE3 + PDH_SQL_ROWCOUNT_FAILED = 0xC0000BE4 + PDH_SQL_MORE_RESULTS_FAILED = 0xC0000BE5 + PDH_SQL_CONNECT_FAILED = 0xC0000BE6 + PDH_SQL_BIND_FAILED = 0xC0000BE7 + PDH_CANNOT_CONNECT_WMI_SERVER = 0xC0000BE8 + PDH_PLA_COLLECTION_ALREADY_RUNNING = 0xC0000BE9 + PDH_PLA_ERROR_SCHEDULE_OVERLAP = 0xC0000BEA + PDH_PLA_COLLECTION_NOT_FOUND = 0xC0000BEB + PDH_PLA_ERROR_SCHEDULE_ELAPSED = 0xC0000BEC + PDH_PLA_ERROR_NOSTART = 0xC0000BED + PDH_PLA_ERROR_ALREADY_EXISTS = 0xC0000BEE + PDH_PLA_ERROR_TYPE_MISMATCH = 0xC0000BEF + PDH_PLA_ERROR_FILEPATH = 0xC0000BF0 + PDH_PLA_SERVICE_ERROR = 0xC0000BF1 + PDH_PLA_VALIDATION_ERROR = 0xC0000BF2 + PDH_PLA_VALIDATION_WARNING = 0x80000BF3 + PDH_PLA_ERROR_NAME_TOO_LONG = 0xC0000BF4 + PDH_INVALID_SQL_LOG_FORMAT = 0xC0000BF5 + PDH_COUNTER_ALREADY_IN_QUERY = 0xC0000BF6 + PDH_BINARY_LOG_CORRUPT = 0xC0000BF7 + PDH_LOG_SAMPLE_TOO_SMALL = 0xC0000BF8 + PDH_OS_LATER_VERSION = 0xC0000BF9 + PDH_OS_EARLIER_VERSION = 0xC0000BFA + PDH_INCORRECT_APPEND_TIME = 0xC0000BFB + PDH_UNMATCHED_APPEND_COUNTER = 0xC0000BFC + PDH_SQL_ALTER_DETAIL_FAILED = 0xC0000BFD + PDH_QUERY_PERF_DATA_TIMEOUT = 0xC0000BFE +) + +// Formatting options for GetFormattedCounterValue(). +const ( + PDH_FMT_RAW = 0x00000010 + PDH_FMT_ANSI = 0x00000020 + PDH_FMT_UNICODE = 0x00000040 + PDH_FMT_LONG = 0x00000100 // Return data as a long int. + PDH_FMT_DOUBLE = 0x00000200 // Return data as a double precision floating point real. + PDH_FMT_LARGE = 0x00000400 // Return data as a 64 bit integer. + PDH_FMT_NOSCALE = 0x00001000 // can be OR-ed: Do not apply the counter's default scaling factor. + PDH_FMT_1000 = 0x00002000 // can be OR-ed: multiply the actual value by 1,000. + PDH_FMT_NODATA = 0x00004000 // can be OR-ed: unknown what this is for, MSDN says nothing. + PDH_FMT_NOCAP100 = 0x00008000 // can be OR-ed: do not cap values > 100. + PERF_DETAIL_COSTLY = 0x00010000 + PERF_DETAIL_STANDARD = 0x0000FFFF +) + +type ( + PDH_HQUERY HANDLE // query handle + PDH_HCOUNTER HANDLE // counter handle +) + +// Union specialization for double values +type PDH_FMT_COUNTERVALUE_DOUBLE struct { + CStatus uint32 + DoubleValue float64 +} + +// Union specialization for 64 bit integer values +type PDH_FMT_COUNTERVALUE_LARGE struct { + CStatus uint32 + LargeValue int64 +} + +// Union specialization for long values +type PDH_FMT_COUNTERVALUE_LONG struct { + CStatus uint32 + LongValue int32 + padding [4]byte +} + +// Union specialization for double values, used by PdhGetFormattedCounterArrayDouble() +type PDH_FMT_COUNTERVALUE_ITEM_DOUBLE struct { + SzName *uint16 // pointer to a string + FmtValue PDH_FMT_COUNTERVALUE_DOUBLE +} + +// Union specialization for 'large' values, used by PdhGetFormattedCounterArrayLarge() +type PDH_FMT_COUNTERVALUE_ITEM_LARGE struct { + SzName *uint16 // pointer to a string + FmtValue PDH_FMT_COUNTERVALUE_LARGE +} + +// Union specialization for long values, used by PdhGetFormattedCounterArrayLong() +type PDH_FMT_COUNTERVALUE_ITEM_LONG struct { + SzName *uint16 // pointer to a string + FmtValue PDH_FMT_COUNTERVALUE_LONG +} + +var ( + // Library + libpdhDll *syscall.DLL + + // Functions + pdh_AddCounterW *syscall.Proc + pdh_AddEnglishCounterW *syscall.Proc + pdh_CloseQuery *syscall.Proc + pdh_CollectQueryData *syscall.Proc + pdh_GetFormattedCounterValue *syscall.Proc + pdh_GetFormattedCounterArrayW *syscall.Proc + pdh_OpenQuery *syscall.Proc + pdh_ValidatePathW *syscall.Proc +) + +func init() { + // Library + libpdhDll = syscall.MustLoadDLL("pdh.dll") + + // Functions + pdh_AddCounterW = libpdhDll.MustFindProc("PdhAddCounterW") + pdh_AddEnglishCounterW, _ = libpdhDll.FindProc("PdhAddEnglishCounterW") // XXX: only supported on versions > Vista. + pdh_CloseQuery = libpdhDll.MustFindProc("PdhCloseQuery") + pdh_CollectQueryData = libpdhDll.MustFindProc("PdhCollectQueryData") + pdh_GetFormattedCounterValue = libpdhDll.MustFindProc("PdhGetFormattedCounterValue") + pdh_GetFormattedCounterArrayW = libpdhDll.MustFindProc("PdhGetFormattedCounterArrayW") + pdh_OpenQuery = libpdhDll.MustFindProc("PdhOpenQuery") + pdh_ValidatePathW = libpdhDll.MustFindProc("PdhValidatePathW") +} + +// Adds the specified counter to the query. This is the internationalized version. Preferably, use the +// function PdhAddEnglishCounter instead. hQuery is the query handle, which has been fetched by PdhOpenQuery. +// szFullCounterPath is a full, internationalized counter path (this will differ per Windows language version). +// dwUserData is a 'user-defined value', which becomes part of the counter information. To retrieve this value +// later, call PdhGetCounterInfo() and access dwQueryUserData of the PDH_COUNTER_INFO structure. +// +// Examples of szFullCounterPath (in an English version of Windows): +// +// \\Processor(_Total)\\% Idle Time +// \\Processor(_Total)\\% Processor Time +// \\LogicalDisk(C:)\% Free Space +// +// To view all (internationalized...) counters on a system, there are three non-programmatic ways: perfmon utility, +// the typeperf command, and the the registry editor. perfmon.exe is perhaps the easiest way, because it's basically a +// full implemention of the pdh.dll API, except with a GUI and all that. The registry setting also provides an +// interface to the available counters, and can be found at the following key: +// +// HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Perflib\CurrentLanguage +// +// This registry key contains several values as follows: +// +// 1 +// 1847 +// 2 +// System +// 4 +// Memory +// 6 +// % Processor Time +// ... many, many more +// +// Somehow, these numeric values can be used as szFullCounterPath too: +// +// \2\6 will correspond to \\System\% Processor Time +// +// The typeperf command may also be pretty easy. To find all performance counters, simply execute: +// +// typeperf -qx +func PdhAddCounter(hQuery PDH_HQUERY, szFullCounterPath string, dwUserData uintptr, phCounter *PDH_HCOUNTER) uint32 { + ptxt, _ := syscall.UTF16PtrFromString(szFullCounterPath) + ret, _, _ := pdh_AddCounterW.Call( + uintptr(hQuery), + uintptr(unsafe.Pointer(ptxt)), + dwUserData, + uintptr(unsafe.Pointer(phCounter))) + + return uint32(ret) +} + +// Adds the specified language-neutral counter to the query. See the PdhAddCounter function. This function only exists on +// Windows versions higher than Vista. +func PdhAddEnglishCounter(hQuery PDH_HQUERY, szFullCounterPath string, dwUserData uintptr, phCounter *PDH_HCOUNTER) uint32 { + if pdh_AddEnglishCounterW == nil { + return ERROR_INVALID_FUNCTION + } + + ptxt, _ := syscall.UTF16PtrFromString(szFullCounterPath) + ret, _, _ := pdh_AddEnglishCounterW.Call( + uintptr(hQuery), + uintptr(unsafe.Pointer(ptxt)), + dwUserData, + uintptr(unsafe.Pointer(phCounter))) + + return uint32(ret) +} + +// Closes all counters contained in the specified query, closes all handles related to the query, +// and frees all memory associated with the query. +func PdhCloseQuery(hQuery PDH_HQUERY) uint32 { + ret, _, _ := pdh_CloseQuery.Call(uintptr(hQuery)) + + return uint32(ret) +} + +// Collects the current raw data value for all counters in the specified query and updates the status +// code of each counter. With some counters, this function needs to be repeatedly called before the value +// of the counter can be extracted with PdhGetFormattedCounterValue(). For example, the following code +// requires at least two calls: +// +// var handle win.PDH_HQUERY +// var counterHandle win.PDH_HCOUNTER +// ret := win.PdhOpenQuery(0, 0, &handle) +// ret = win.PdhAddEnglishCounter(handle, "\\Processor(_Total)\\% Idle Time", 0, &counterHandle) +// var derp win.PDH_FMT_COUNTERVALUE_DOUBLE +// +// ret = win.PdhCollectQueryData(handle) +// fmt.Printf("Collect return code is %x\n", ret) // return code will be PDH_CSTATUS_INVALID_DATA +// ret = win.PdhGetFormattedCounterValueDouble(counterHandle, 0, &derp) +// +// ret = win.PdhCollectQueryData(handle) +// fmt.Printf("Collect return code is %x\n", ret) // return code will be ERROR_SUCCESS +// ret = win.PdhGetFormattedCounterValueDouble(counterHandle, 0, &derp) +// +// The PdhCollectQueryData will return an error in the first call because it needs two values for +// displaying the correct data for the processor idle time. The second call will have a 0 return code. +func PdhCollectQueryData(hQuery PDH_HQUERY) uint32 { + ret, _, _ := pdh_CollectQueryData.Call(uintptr(hQuery)) + + return uint32(ret) +} + +// Formats the given hCounter using a 'double'. The result is set into the specialized union struct pValue. +// This function does not directly translate to a Windows counterpart due to union specialization tricks. +func PdhGetFormattedCounterValueDouble(hCounter PDH_HCOUNTER, lpdwType *uint32, pValue *PDH_FMT_COUNTERVALUE_DOUBLE) uint32 { + ret, _, _ := pdh_GetFormattedCounterValue.Call( + uintptr(hCounter), + uintptr(PDH_FMT_DOUBLE), + uintptr(unsafe.Pointer(lpdwType)), + uintptr(unsafe.Pointer(pValue))) + + return uint32(ret) +} + +// Formats the given hCounter using a large int (int64). The result is set into the specialized union struct pValue. +// This function does not directly translate to a Windows counterpart due to union specialization tricks. +func PdhGetFormattedCounterValueLarge(hCounter PDH_HCOUNTER, lpdwType *uint32, pValue *PDH_FMT_COUNTERVALUE_LARGE) uint32 { + ret, _, _ := pdh_GetFormattedCounterValue.Call( + uintptr(hCounter), + uintptr(PDH_FMT_LARGE), + uintptr(unsafe.Pointer(lpdwType)), + uintptr(unsafe.Pointer(pValue))) + + return uint32(ret) +} + +// Formats the given hCounter using a 'long'. The result is set into the specialized union struct pValue. +// This function does not directly translate to a Windows counterpart due to union specialization tricks. +// +// BUG(krpors): Testing this function on multiple systems yielded inconsistent results. For instance, +// the pValue.LongValue kept the value '192' on test system A, but on B this was '0', while the padding +// bytes of the struct got the correct value. Until someone can figure out this behaviour, prefer to use +// the Double or Large counterparts instead. These functions provide actually the same data, except in +// a different, working format. +func PdhGetFormattedCounterValueLong(hCounter PDH_HCOUNTER, lpdwType *uint32, pValue *PDH_FMT_COUNTERVALUE_LONG) uint32 { + ret, _, _ := pdh_GetFormattedCounterValue.Call( + uintptr(hCounter), + uintptr(PDH_FMT_LONG), + uintptr(unsafe.Pointer(lpdwType)), + uintptr(unsafe.Pointer(pValue))) + + return uint32(ret) +} + +// Returns an array of formatted counter values. Use this function when you want to format the counter values of a +// counter that contains a wildcard character for the instance name. The itemBuffer must a slice of type PDH_FMT_COUNTERVALUE_ITEM_DOUBLE. +// An example of how this function can be used: +// +// okPath := "\\Process(*)\\% Processor Time" // notice the wildcard * character +// +// // ommitted all necessary stuff ... +// +// var bufSize uint32 +// var bufCount uint32 +// var size uint32 = uint32(unsafe.Sizeof(win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE{})) +// var emptyBuf [1]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE // need at least 1 addressable null ptr. +// +// for { +// // collect +// ret := win.PdhCollectQueryData(queryHandle) +// if ret == win.ERROR_SUCCESS { +// ret = win.PdhGetFormattedCounterArrayDouble(counterHandle, &bufSize, &bufCount, &emptyBuf[0]) // uses null ptr here according to MSDN. +// if ret == win.PDH_MORE_DATA { +// filledBuf := make([]win.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, bufCount*size) +// ret = win.PdhGetFormattedCounterArrayDouble(counterHandle, &bufSize, &bufCount, &filledBuf[0]) +// for i := 0; i < int(bufCount); i++ { +// c := filledBuf[i] +// var s string = win.UTF16PtrToString(c.SzName) +// fmt.Printf("Index %d -> %s, value %v\n", i, s, c.FmtValue.DoubleValue) +// } +// +// filledBuf = nil +// // Need to at least set bufSize to zero, because if not, the function will not +// // return PDH_MORE_DATA and will not set the bufSize. +// bufCount = 0 +// bufSize = 0 +// } +// +// time.Sleep(2000 * time.Millisecond) +// } +// } +func PdhGetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *PDH_FMT_COUNTERVALUE_ITEM_DOUBLE) uint32 { + ret, _, _ := pdh_GetFormattedCounterArrayW.Call( + uintptr(hCounter), + uintptr(PDH_FMT_DOUBLE), + uintptr(unsafe.Pointer(lpdwBufferSize)), + uintptr(unsafe.Pointer(lpdwBufferCount)), + uintptr(unsafe.Pointer(itemBuffer))) + + return uint32(ret) +} + +// Returns an array of formatted counter values. Use this function when you want to format the counter values of a +// counter that contains a wildcard character for the instance name. The itemBuffer must a slice of type PDH_FMT_COUNTERVALUE_ITEM_LARGE. +// For an example usage, see PdhGetFormattedCounterArrayDouble. +func PdhGetFormattedCounterArrayLarge(hCounter PDH_HCOUNTER, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *PDH_FMT_COUNTERVALUE_ITEM_LARGE) uint32 { + ret, _, _ := pdh_GetFormattedCounterArrayW.Call( + uintptr(hCounter), + uintptr(PDH_FMT_LARGE), + uintptr(unsafe.Pointer(lpdwBufferSize)), + uintptr(unsafe.Pointer(lpdwBufferCount)), + uintptr(unsafe.Pointer(itemBuffer))) + + return uint32(ret) +} + +// Returns an array of formatted counter values. Use this function when you want to format the counter values of a +// counter that contains a wildcard character for the instance name. The itemBuffer must a slice of type PDH_FMT_COUNTERVALUE_ITEM_LONG. +// For an example usage, see PdhGetFormattedCounterArrayDouble. +// +// BUG(krpors): See description of PdhGetFormattedCounterValueLong(). +func PdhGetFormattedCounterArrayLong(hCounter PDH_HCOUNTER, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *PDH_FMT_COUNTERVALUE_ITEM_LONG) uint32 { + ret, _, _ := pdh_GetFormattedCounterArrayW.Call( + uintptr(hCounter), + uintptr(PDH_FMT_LONG), + uintptr(unsafe.Pointer(lpdwBufferSize)), + uintptr(unsafe.Pointer(lpdwBufferCount)), + uintptr(unsafe.Pointer(itemBuffer))) + + return uint32(ret) +} + +// Creates a new query that is used to manage the collection of performance data. +// szDataSource is a null terminated string that specifies the name of the log file from which to +// retrieve the performance data. If 0, performance data is collected from a real-time data source. +// dwUserData is a user-defined value to associate with this query. To retrieve the user data later, +// call PdhGetCounterInfo and access dwQueryUserData of the PDH_COUNTER_INFO structure. phQuery is +// the handle to the query, and must be used in subsequent calls. This function returns a PDH_ +// constant error code, or ERROR_SUCCESS if the call succeeded. +func PdhOpenQuery(szDataSource uintptr, dwUserData uintptr, phQuery *PDH_HQUERY) uint32 { + ret, _, _ := pdh_OpenQuery.Call( + szDataSource, + dwUserData, + uintptr(unsafe.Pointer(phQuery))) + + return uint32(ret) +} + +// Validates a path. Will return ERROR_SUCCESS when ok, or PDH_CSTATUS_BAD_COUNTERNAME when the path is +// erroneous. +func PdhValidatePath(path string) uint32 { + ptxt, _ := syscall.UTF16PtrFromString(path) + ret, _, _ := pdh_ValidatePathW.Call(uintptr(unsafe.Pointer(ptxt))) + + return uint32(ret) +} + +func UTF16PtrToString(s *uint16) string { + if s == nil { + return "" + } + return syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(s))[0:]) +} diff --git a/vendor/github.com/MakeNowJust/heredoc/LICENSE b/vendor/github.com/MakeNowJust/heredoc/LICENSE new file mode 100644 index 000000000000..8a58c222086a --- /dev/null +++ b/vendor/github.com/MakeNowJust/heredoc/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014-2017 TSUYUSATO Kitsune + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/MakeNowJust/heredoc/README.md b/vendor/github.com/MakeNowJust/heredoc/README.md new file mode 100644 index 000000000000..a3a65faba1a2 --- /dev/null +++ b/vendor/github.com/MakeNowJust/heredoc/README.md @@ -0,0 +1,53 @@ +# heredoc [![CircleCI](https://circleci.com/gh/MakeNowJust/heredoc.svg?style=svg)](https://circleci.com/gh/MakeNowJust/heredoc) [![Go Walker](http://gowalker.org/api/v1/badge)](https://gowalker.org/github.com/MakeNowJust/heredoc) + +## About + +Package heredoc provides the here-document with keeping indent. + +## Install + +```console +$ go get github.com/MakeNowJust/heredoc +``` + +## Import + +```go +// usual +import "github.com/MakeNowJust/heredoc" +// shortcuts +import . "github.com/MakeNowJust/heredoc/dot" +``` + +## Example + +```go +package main + +import ( + "fmt" + . "github.com/MakeNowJust/heredoc/dot" +) + +func main() { + fmt.Println(D(` + Lorem ipsum dolor sit amet, consectetur adipisicing elit, + sed do eiusmod tempor incididunt ut labore et dolore magna + aliqua. Ut enim ad minim veniam, ... + `)) + // Output: + // Lorem ipsum dolor sit amet, consectetur adipisicing elit, + // sed do eiusmod tempor incididunt ut labore et dolore magna + // aliqua. Ut enim ad minim veniam, ... + // +} +``` + +## API Document + + - [Go Walker - github.com/MakeNowJust/heredoc](https://gowalker.org/github.com/MakeNowJust/heredoc) + - [Go Walker - github.com/MakeNowJust/heredoc/dot](https://gowalker.org/github.com/MakeNowJust/heredoc/dot) + +## License + +This software is released under the MIT License, see LICENSE. diff --git a/vendor/github.com/MakeNowJust/heredoc/heredoc.go b/vendor/github.com/MakeNowJust/heredoc/heredoc.go new file mode 100644 index 000000000000..fea12e622f12 --- /dev/null +++ b/vendor/github.com/MakeNowJust/heredoc/heredoc.go @@ -0,0 +1,98 @@ +// Copyright (c) 2014-2017 TSUYUSATO Kitsune +// This software is released under the MIT License. +// http://opensource.org/licenses/mit-license.php + +// Package heredoc provides creation of here-documents from raw strings. +// +// Golang supports raw-string syntax. +// doc := ` +// Foo +// Bar +// ` +// But raw-string cannot recognize indentation. Thus such content is an indented string, equivalent to +// "\n\tFoo\n\tBar\n" +// I dont't want this! +// +// However this problem is solved by package heredoc. +// doc := heredoc.Doc(` +// Foo +// Bar +// `) +// Is equivalent to +// "Foo\nBar\n" +package heredoc + +import ( + "fmt" + "strings" + "unicode" +) + +const maxInt = int(^uint(0) >> 1) + +// Doc returns un-indented string as here-document. +func Doc(raw string) string { + skipFirstLine := false + if raw[0] == '\n' { + raw = raw[1:] + } else { + skipFirstLine = true + } + + lines := strings.Split(raw, "\n") + + minIndentSize := getMinIndent(lines, skipFirstLine) + lines = removeIndentation(lines, minIndentSize, skipFirstLine) + + return strings.Join(lines, "\n") +} + +// getMinIndent calculates the minimum indentation in lines, excluding empty lines. +func getMinIndent(lines []string, skipFirstLine bool) int { + minIndentSize := maxInt + + for i, line := range lines { + if i == 0 && skipFirstLine { + continue + } + + indentSize := 0 + for _, r := range []rune(line) { + if unicode.IsSpace(r) { + indentSize += 1 + } else { + break + } + } + + if len(line) == indentSize { + if i == len(lines)-1 && indentSize < minIndentSize { + lines[i] = "" + } + } else if indentSize < minIndentSize { + minIndentSize = indentSize + } + } + return minIndentSize +} + +// removeIndentation removes n characters from the front of each line in lines. +// Skips first line if skipFirstLine is true, skips empty lines. +func removeIndentation(lines []string, n int, skipFirstLine bool) []string { + for i, line := range lines { + if i == 0 && skipFirstLine { + continue + } + + if len(lines[i]) >= n { + lines[i] = line[n:] + } + } + return lines +} + +// Docf returns unindented and formatted string as here-document. +// Formatting is done as for fmt.Printf(). +func Docf(raw string, args ...interface{}) string { + return fmt.Sprintf(Doc(raw), args...) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index aeeada0d694b..788fe6e279b2 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -2,7 +2,6 @@ package client import ( "fmt" - "net/http/httputil" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/client/metadata" @@ -24,6 +23,13 @@ type ConfigProvider interface { ClientConfig(serviceName string, cfgs ...*aws.Config) Config } +// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not +// resolve the endpoint automatically. The service client's endpoint must be +// provided via the aws.Config.Endpoint field. +type ConfigNoResolveEndpointProvider interface { + ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config +} + // A Client implements the base client request and response handling // used by all service clients. type Client struct { @@ -39,7 +45,7 @@ func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, op svc := &Client{ Config: cfg, ClientInfo: info, - Handlers: handlers, + Handlers: handlers.Copy(), } switch retryer, ok := cfg.Retryer.(request.Retryer); { @@ -79,61 +85,6 @@ func (c *Client) AddDebugHandlers() { return } - c.Handlers.Send.PushFront(logRequest) - c.Handlers.Send.PushBack(logResponse) -} - -const logReqMsg = `DEBUG: Request %s/%s Details: ----[ REQUEST POST-SIGN ]----------------------------- -%s ------------------------------------------------------` - -const logReqErrMsg = `DEBUG ERROR: Request %s/%s: ----[ REQUEST DUMP ERROR ]----------------------------- -%s ------------------------------------------------------` - -func logRequest(r *request.Request) { - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - if logBody { - // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's - // Body as a NoOpCloser and will not be reset after read by the HTTP - // client reader. - r.ResetBody() - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) -} - -const logRespMsg = `DEBUG: Response %s/%s Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` - -const logRespErrMsg = `DEBUG ERROR: Response %s/%s: ----[ RESPONSE DUMP ERROR ]----------------------------- -%s ------------------------------------------------------` - -func logResponse(r *request.Request) { - var msg = "no response data" - if r.HTTPResponse != nil { - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, err := httputil.DumpResponse(r.HTTPResponse, logBody) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - msg = string(dumpedBody) - } else if r.Error != nil { - msg = r.Error.Error() - } - r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg)) + c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest}) + c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse}) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index 43a3676b7978..e25a460fbaa4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -15,11 +15,11 @@ import ( // the MaxRetries method: // // type retryer struct { -// service.DefaultRetryer +// client.DefaultRetryer // } // // // This implementation always has 100 max retries -// func (d retryer) MaxRetries() uint { return 100 } +// func (d retryer) MaxRetries() int { return 100 } type DefaultRetryer struct { NumMaxRetries int } @@ -54,6 +54,12 @@ func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { // ShouldRetry returns true if the request should be retried. func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable != nil { + return *r.Retryable + } + if r.HTTPResponse.StatusCode >= 500 { return true } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go new file mode 100644 index 000000000000..1f39c91f2e99 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -0,0 +1,108 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +------------------------------------------------------` + +type logWriter struct { + // Logger is what we will use to log the payload of a response. + Logger aws.Logger + // buf stores the contents of what has been read + buf *bytes.Buffer +} + +func (logger *logWriter) Write(b []byte) (int, error) { + return logger.buf.Write(b) +} + +type teeReaderCloser struct { + // io.Reader will be a tee reader that is used during logging. + // This structure will read from a body and write the contents to a logger. + io.Reader + // Source is used just to close when we are done reading. + Source io.ReadCloser +} + +func (reader *teeReaderCloser) Close() error { + return reader.Source.Close() +} + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + if logBody { + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.ResetBody() + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +func logResponse(r *request.Request) { + lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } + + handlerFn := func(req *request.Request) { + body, err := httputil.DumpResponse(req.HTTPResponse, false) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + lw.Logger.Log(fmt.Sprintf(logRespMsg, req.ClientInfo.ServiceName, req.Operation.Name, string(body))) + if req.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) { + lw.Logger.Log(string(b)) + } + } + + const handlerName = "awsdk.client.LogResponse.ResponseBody" + + r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) + r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index d58b81280ac6..ae3a286960d7 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -22,9 +22,9 @@ type RequestRetryer interface{} // // // Create Session with MaxRetry configuration to be shared by multiple // // service clients. -// sess, err := session.NewSession(&aws.Config{ +// sess := session.Must(session.NewSession(&aws.Config{ // MaxRetries: aws.Int(3), -// }) +// })) // // // Create S3 service client with a specific Region. // svc := s3.New(sess, &aws.Config{ @@ -53,6 +53,13 @@ type Config struct { // to use based on region. EndpointResolver endpoints.Resolver + // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call + // ShouldRetry regardless of whether or not if request.Retryable is set. + // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck + // is not set, then ShouldRetry will only be called if request.Retryable is nil. + // Proper handling of the request.Retryable field is important when setting this field. + EnforceShouldRetryCheck *bool + // The region to send requests to. This parameter is required and must // be configured globally or on a per-client basis unless otherwise // noted. A full list of regions is found in the "Regions and Endpoints" @@ -88,7 +95,7 @@ type Config struct { // recoverable failures. // // When nil or the value does not implement the request.Retryer interface, - // the request.DefaultRetryer will be used. + // the client.DefaultRetryer will be used. // // When both Retryer and MaxRetries are non-nil, the former is used and // the latter ignored. @@ -154,7 +161,8 @@ type Config struct { // the EC2Metadata overriding the timeout for default credentials chain. // // Example: - // sess, err := session.NewSession(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataDiableTimeoutOverride(true))) // // svc := s3.New(sess) // @@ -174,7 +182,7 @@ type Config struct { // // Only supported with. // - // sess, err := session.NewSession() + // sess := session.Must(session.NewSession()) // // svc := s3.New(sess, &aws.Config{ // UseDualStack: aws.Bool(true), @@ -186,13 +194,19 @@ type Config struct { // request delays. This value should only be used for testing. To adjust // the delay of a request see the aws/client.DefaultRetryer and // aws/request.Retryer. + // + // SleepDelay will prevent any Context from being used for canceling retry + // delay of an API operation. It is recommended to not use SleepDelay at all + // and specify a Retryer instead. SleepDelay func(time.Duration) // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. // Will default to false. This would only be used for empty directory names in s3 requests. // // Example: - // sess, err := session.NewSession(&aws.Config{DisableRestProtocolURICleaning: aws.Bool(true)) + // sess := session.Must(session.NewSession(&aws.Config{ + // DisableRestProtocolURICleaning: aws.Bool(true), + // })) // // svc := s3.New(sess) // out, err := svc.GetObject(&s3.GetObjectInput { @@ -207,9 +221,9 @@ type Config struct { // // // Create Session with MaxRetry configuration to be shared by multiple // // service clients. -// sess, err := session.NewSession(aws.NewConfig(). +// sess := session.Must(session.NewSession(aws.NewConfig(). // WithMaxRetries(3), -// ) +// )) // // // Create S3 service client with a specific Region. // svc := s3.New(sess, aws.NewConfig(). @@ -436,6 +450,10 @@ func mergeInConfig(dst *Config, other *Config) { if other.DisableRestProtocolURICleaning != nil { dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning } + + if other.EnforceShouldRetryCheck != nil { + dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck + } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context.go b/vendor/github.com/aws/aws-sdk-go/aws/context.go new file mode 100644 index 000000000000..79f426853b5d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context.go @@ -0,0 +1,71 @@ +package aws + +import ( + "time" +) + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return backgroundCtx +} + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go new file mode 100644 index 000000000000..8fdda5303380 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go @@ -0,0 +1,41 @@ +// +build !go1.7 + +package aws + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case backgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +var ( + backgroundCtx = new(emptyCtx) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go new file mode 100644 index 000000000000..064f75c925cc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go @@ -0,0 +1,9 @@ +// +build go1.7 + +package aws + +import "context" + +var ( + backgroundCtx = context.Background() +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go index 3b73a7da7f9d..ff5d58e06831 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -311,6 +311,24 @@ func TimeValue(v *time.Time) time.Time { return time.Time{} } +// SecondsTimeValue converts an int64 pointer to a time.Time value +// representing seconds since Epoch or time.Time{} if the pointer is nil. +func SecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix((*v / 1000), 0) + } + return time.Time{} +} + +// MillisecondsTimeValue converts an int64 pointer to a time.Time value +// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. +func MillisecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix(0, (*v * 1000000)) + } + return time.Time{} +} + // TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". // The result is undefined if the Unix time cannot be represented by an int64. // Which includes calling TimeUnixMilli on a zero Time is undefined. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index 8a7bafc78ca9..495e3ef62ca9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -27,7 +27,7 @@ type lener interface { // or will use the HTTPRequest.Header's "Content-Length" if defined. If unable // to determine request body length and no "Content-Length" was specified it will panic. // -// The Content-Length will only be aded to the request if the length of the body +// The Content-Length will only be added to the request if the length of the body // is greater than 0. If the body is empty or the current `Content-Length` // header is <= 0, the header will also be stripped. var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { @@ -71,8 +71,8 @@ var reStatusCode = regexp.MustCompile(`^(\d{3})`) // ValidateReqSigHandler is a request handler to ensure that the request's // signature doesn't expire before it is sent. This can happen when a request -// is built and signed signficantly before it is sent. Or significant delays -// occur whne retrying requests that would cause the signature to expire. +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. var ValidateReqSigHandler = request.NamedHandler{ Name: "core.ValidateReqSigHandler", Fn: func(r *request.Request) { @@ -98,44 +98,95 @@ var ValidateReqSigHandler = request.NamedHandler{ } // SendHandler is a request handler to send service request using HTTP client. -var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) { - var err error - r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest) - if err != nil { - // Prevent leaking if an HTTPResponse was returned. Clean up - // the body. - if r.HTTPResponse != nil { - r.HTTPResponse.Body.Close() +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects } - // Capture the case where url.Error is returned for error processing - // response. e.g. 301 without location header comes back as string - // error and r.HTTPResponse is nil. Other url redirect errors will - // comeback in a similar method. - if e, ok := err.(*url.Error); ok && e.Err != nil { - if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { - code, _ := strconv.ParseInt(s[1], 10, 64) - r.HTTPResponse = &http.Response{ - StatusCode: int(code), - Status: http.StatusText(int(code)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - return - } + + if request.NoBody == r.HTTPRequest.Body { + // Strip off the request body if the NoBody reader was used as a + // place holder for a request body. This prevents the SDK from + // making requests with a request body when it would be invalid + // to do so. + // + // Use a shallow copy of the http.Request to ensure the race condition + // of transport on Body will not trigger + reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest + reqCopy.Body = nil + r.HTTPRequest = &reqCopy + defer func() { + r.HTTPRequest = reqOrig + }() } - if r.HTTPResponse == nil { - // Add a dummy request response object to ensure the HTTPResponse - // value is consistent. + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) + } + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) r.HTTPResponse = &http.Response{ - StatusCode: int(0), - Status: http.StatusText(int(0)), + StatusCode: int(code), + Status: http.StatusText(int(code)), Body: ioutil.NopCloser(bytes.NewReader([]byte{})), } + return } - // Catch all other request errors. - r.Error = awserr.New("RequestError", "send request failed", err) - r.Retryable = aws.Bool(true) // network errors are retryable } -}} + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} // ValidateResponseHandler is a request handler to validate service response. var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { @@ -150,13 +201,22 @@ var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseH var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { // If one of the other handlers already set the retry state // we don't want to override it based on the service's state - if r.Retryable == nil { + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { r.Retryable = aws.Bool(r.ShouldRetry(r)) } if r.WillRetry() { r.RetryDelay = r.RetryRules(r) - r.Config.SleepDelay(r.RetryDelay) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } // when the expired token exception occurs the credentials // need to be expired locally so that the next request to diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go index 6efc77bf0932..f298d6596268 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -13,7 +13,7 @@ var ( // // @readonly ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", - `no valid providers in chain. Deprecated. + `no valid providers in chain. Deprecated. For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, nil) ) @@ -39,16 +39,18 @@ var ( // does not return any credentials ChainProvider will return the error // ErrNoValidProvidersFoundInChain // -// creds := NewChainCredentials( -// []Provider{ -// &EnvProvider{}, -// &EC2RoleProvider{ +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &ec2rolecreds.EC2RoleProvider{ // Client: ec2metadata.New(sess), // }, // }) // // // Usage of ChainCredentials with aws.Config -// svc := ec2.New(&aws.Config{Credentials: creds}) +// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: creds, +// }))) // type ChainProvider struct { Providers []Provider diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index 7b8ebf5f9d87..42416fc2f0fc 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -14,7 +14,7 @@ // // Example of using the environment variable credentials. // -// creds := NewEnvCredentials() +// creds := credentials.NewEnvCredentials() // // // Retrieve the credentials value // credValue, err := creds.Get() @@ -26,7 +26,7 @@ // This may be helpful to proactively expire credentials and refresh them sooner // than they would naturally expire on their own. // -// creds := NewCredentials(&EC2RoleProvider{}) +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) // creds.Expire() // credsValue, err := creds.Get() // // New credentials will be retrieved instead of from cache. @@ -43,7 +43,7 @@ // func (m *MyProvider) Retrieve() (Value, error) {...} // func (m *MyProvider) IsExpired() bool {...} // -// creds := NewCredentials(&MyProvider{}) +// creds := credentials.NewCredentials(&MyProvider{}) // credValue, err := creds.Get() // package credentials @@ -60,7 +60,9 @@ import ( // when making service API calls. For example, when accessing public // s3 buckets. // -// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) // // Access public S3 buckets. // // @readonly @@ -88,7 +90,7 @@ type Value struct { // The Provider should not need to implement its own mutexes, because // that will be managed by Credentials. type Provider interface { - // Refresh returns nil if it successfully retrieved the value. + // Retrieve returns nil if it successfully retrieved the value. // Error is returned if the value were not obtainable, or empty. Retrieve() (Value, error) @@ -97,6 +99,27 @@ type Provider interface { IsExpired() bool } +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + // A Expiry provides shared expiration logic to be used by credentials // providers to implement expiry functionality. // diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go index 96655bc46ae3..c14231a16f2b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -29,6 +29,7 @@ var ( // Environment variables used: // // * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// // * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY type EnvProvider struct { retrieved bool diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go index 7fb7cbf0db00..51e21e0f38f6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -3,11 +3,11 @@ package credentials import ( "fmt" "os" - "path/filepath" "github.com/go-ini/ini" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/shareddefaults" ) // SharedCredsProviderName provides a name of SharedCreds provider @@ -15,8 +15,6 @@ const SharedCredsProviderName = "SharedCredentialsProvider" var ( // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. - // - // @readonly ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) ) @@ -117,22 +115,23 @@ func loadProfile(filename, profile string) (Value, error) { // // Will return an error if the user's home directory path cannot be found. func (p *SharedCredentialsProvider) filename() (string, error) { - if p.Filename == "" { - if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { - return p.Filename, nil - } - - homeDir := os.Getenv("HOME") // *nix - if homeDir == "" { // Windows - homeDir = os.Getenv("USERPROFILE") - } - if homeDir == "" { - return "", ErrSharedCredentialsHomeNotFound - } - - p.Filename = filepath.Join(homeDir, ".aws", "credentials") + if len(p.Filename) != 0 { + return p.Filename, nil + } + + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { + return p.Filename, nil } + if home := shareddefaults.UserHomeDir(); len(home) == 0 { + // Backwards compatibility of home directly not found error being returned. + // This error is too verbose, failure when opening the file would of been + // a better error to return. + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = shareddefaults.SharedCredentialsFilename() + return p.Filename, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index 30c847ae221e..4108e433e64a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -1,7 +1,81 @@ -// Package stscreds are credential Providers to retrieve STS AWS credentials. -// -// STS provides multiple ways to retrieve credentials which can be used when making -// future AWS service API operation calls. +/* +Package stscreds are credential Providers to retrieve STS AWS credentials. + +STS provides multiple ways to retrieve credentials which can be used when making +future AWS service API operation calls. + +The SDK will ensure that per instance of credentials.Credentials all requests +to refresh the credentials will be synchronized. But, the SDK is unable to +ensure synchronous usage of the AssumeRoleProvider if the value is shared +between multiple Credentials, Sessions or service clients. + +Assume Role + +To assume an IAM role using STS with the SDK you can create a new Credentials +with the SDKs's stscreds package. + + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "myRoleArn") + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with static MFA Token + +To assume an IAM role with a MFA token you can either specify a MFA token code +directly or provide a function to prompt the user each time the credentials +need to refresh the role's credentials. Specifying the TokenCode should be used +for short lived operations that will not need to be refreshed, and when you do +not want to have direct control over the user provides their MFA token. + +With TokenCode the AssumeRoleProvider will be not be able to refresh the role's +credentials. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN using the MFA token code provided. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenCode = aws.String("00000000") + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with MFA Token Provider + +To assume an IAM role with MFA for longer running tasks where the credentials +may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +will allow the credential provider to prompt for new MFA token code when the +role's credentials need to be refreshed. + +The StdinTokenProvider function is available to prompt on stdin to retrieve +the MFA token code from the user. You can also implement custom prompts by +satisfing the TokenProvider function signature. + +Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +have undesirable results as the StdinTokenProvider will not be synchronized. A +single Credentials with an AssumeRoleProvider can be shared safely. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenProvider = stscreds.StdinTokenProvider + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +*/ package stscreds import ( @@ -9,11 +83,31 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/service/sts" ) +// StdinTokenProvider will prompt on stdout and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function go read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Printf("Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + // ProviderName provides a name of AssumeRole provider const ProviderName = "AssumeRoleProvider" @@ -27,8 +121,15 @@ type AssumeRoler interface { var DefaultDuration = time.Duration(15) * time.Minute // AssumeRoleProvider retrieves temporary credentials from the STS service, and -// keeps track of their expiration time. This provider must be used explicitly, -// as it is not included in the credentials chain. +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. type AssumeRoleProvider struct { credentials.Expiry @@ -65,8 +166,23 @@ type AssumeRoleProvider struct { // assumed requires MFA (that is, if the policy includes a condition that tests // for MFA). If the role being assumed requires MFA and if the TokenCode value // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // If SerialNumber is set and neither TokenCode nor TokenProvider are also + // set an error will be returned. TokenCode *string + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is also set and + // TokenCode is not set. + // + // If both TokenCode and TokenProvider is set, TokenProvider will be used and + // TokenCode is ignored. + TokenProvider func() (string, error) + // ExpiryWindow will allow the credentials to trigger refreshing prior to // the credentials actually expiring. This is beneficial so race conditions // with expiring credentials do not cause request to fail unexpectedly @@ -85,6 +201,10 @@ type AssumeRoleProvider struct { // // Takes a Config provider to create the STS client. The ConfigProvider is // satisfied by the session.Session type. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { p := &AssumeRoleProvider{ Client: sts.New(c), @@ -103,7 +223,11 @@ func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*As // AssumeRoleProvider. The credentials will expire every 15 minutes and the // role will be named after a nanosecond timestamp of this operation. // -// Takes an AssumeRoler which can be satisfiede by the STS client. +// Takes an AssumeRoler which can be satisfied by the STS client. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { p := &AssumeRoleProvider{ Client: svc, @@ -139,12 +263,25 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { if p.Policy != nil { input.Policy = p.Policy } - if p.SerialNumber != nil && p.TokenCode != nil { - input.SerialNumber = p.SerialNumber - input.TokenCode = p.TokenCode + if p.SerialNumber != nil { + if p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } else if p.TokenProvider != nil { + input.SerialNumber = p.SerialNumber + code, err := p.TokenProvider() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + input.TokenCode = aws.String(code) + } else { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("AssumeRoleTokenNotAvailable", + "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) + } } - roleOutput, err := p.Client.AssumeRole(input) + roleOutput, err := p.Client.AssumeRole(input) if err != nil { return credentials.Value{ProviderName: ProviderName}, err } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 0ef55040a8f8..07afe3b8e6d3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -10,10 +10,12 @@ package defaults import ( "fmt" "net/http" + "net/url" "os" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" @@ -56,7 +58,6 @@ func Config() *aws.Config { WithMaxRetries(aws.UseServiceDefaultRetries). WithLogger(aws.NewDefaultLogger()). WithLogLevel(aws.LogOff). - WithSleepDelay(time.Sleep). WithEndpointResolver(endpoints.DefaultResolver()) } @@ -97,23 +98,51 @@ func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credenti }) } -// RemoteCredProvider returns a credenitials provider for the default remote +const ( + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" + ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// RemoteCredProvider returns a credentials provider for the default remote // endpoints such as EC2 or ECS Roles. func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { - ecsCredURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } - if len(ecsCredURI) > 0 { - return ecsCredProvider(cfg, handlers, ecsCredURI) + if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("http://169.254.170.2%s", uri) + return httpCredProvider(cfg, handlers, u) } return ec2RoleProvider(cfg, handlers) } -func ecsCredProvider(cfg aws.Config, handlers request.Handlers, uri string) credentials.Provider { - const host = `169.254.170.2` +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string + + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else if host := aws.URLHostname(parsed); !(host == "localhost" || host == "127.0.0.1") { + errMsg = fmt.Sprintf("invalid host address, %q, only localhost and 127.0.0.1 are valid.", host) + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} - return endpointcreds.NewProviderClient(cfg, handlers, - fmt.Sprintf("http://%s%s", host, uri), +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute }, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go new file mode 100644 index 000000000000..ca0ee1dcc78e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go @@ -0,0 +1,27 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return shareddefaults.SharedCredentialsFilename() +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return shareddefaults.SharedConfigFilename() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go new file mode 100644 index 000000000000..4fcb6161848e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go @@ -0,0 +1,56 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.StringValue(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.StringValueSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 945f408bb9cd..f468de00d9d6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -1,4 +1,4 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. package endpoints @@ -46,23 +46,28 @@ const ( AcmServiceID = "acm" // Acm. ApigatewayServiceID = "apigateway" // Apigateway. ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. - AppstreamServiceID = "appstream" // Appstream. Appstream2ServiceID = "appstream2" // Appstream2. + AthenaServiceID = "athena" // Athena. AutoscalingServiceID = "autoscaling" // Autoscaling. + BatchServiceID = "batch" // Batch. BudgetsServiceID = "budgets" // Budgets. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. CloudformationServiceID = "cloudformation" // Cloudformation. CloudfrontServiceID = "cloudfront" // Cloudfront. CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. CloudsearchServiceID = "cloudsearch" // Cloudsearch. CloudtrailServiceID = "cloudtrail" // Cloudtrail. CodebuildServiceID = "codebuild" // Codebuild. CodecommitServiceID = "codecommit" // Codecommit. CodedeployServiceID = "codedeploy" // Codedeploy. CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. CognitoIdpServiceID = "cognito-idp" // CognitoIdp. CognitoSyncServiceID = "cognito-sync" // CognitoSync. ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. DatapipelineServiceID = "datapipeline" // Datapipeline. DevicefarmServiceID = "devicefarm" // Devicefarm. DirectconnectServiceID = "directconnect" // Directconnect. @@ -81,11 +86,14 @@ const ( ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. EsServiceID = "es" // Es. EventsServiceID = "events" // Events. FirehoseServiceID = "firehose" // Firehose. GameliftServiceID = "gamelift" // Gamelift. GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. HealthServiceID = "health" // Health. IamServiceID = "iam" // Iam. ImportexportServiceID = "importexport" // Importexport. @@ -100,10 +108,14 @@ const ( MachinelearningServiceID = "machinelearning" // Machinelearning. MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. OpsworksServiceID = "opsworks" // Opsworks. OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. PinpointServiceID = "pinpoint" // Pinpoint. PollyServiceID = "polly" // Polly. RdsServiceID = "rds" // Rds. @@ -111,6 +123,7 @@ const ( RekognitionServiceID = "rekognition" // Rekognition. Route53ServiceID = "route53" // Route53. Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. S3ServiceID = "s3" // S3. SdbServiceID = "sdb" // Sdb. ServicecatalogServiceID = "servicecatalog" // Servicecatalog. @@ -126,8 +139,10 @@ const ( StsServiceID = "sts" // Sts. SupportServiceID = "support" // Support. SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. WafServiceID = "waf" // Waf. WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. WorkspacesServiceID = "workspaces" // Workspaces. XrayServiceID = "xray" // Xray. ) @@ -135,17 +150,20 @@ const ( // DefaultResolver returns an Endpoint resolver that will be able // to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). // -// Casting the return value of this func to a EnumPartitions will -// allow you to get a list of the partitions in the order the endpoints -// will be resolved in. +// Use DefaultPartitions() to get the list of the default partitions. +func DefaultResolver() Resolver { + return defaultPartitions +} + +// DefaultPartitions returns a list of the partitions the SDK is bundled +// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US). // -// resolver := endpoints.DefaultResolver() -// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// partitions := endpoints.DefaultPartitions // for _, p := range partitions { // // ... inspect partitions // } -func DefaultResolver() Resolver { - return defaultPartitions +func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() } var defaultPartitions = partitions{ @@ -243,10 +261,14 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -278,13 +300,6 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "appstream": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, "appstream2": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -299,6 +314,17 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "athena": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "autoscaling": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -320,6 +346,20 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "batch": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "budgets": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -333,6 +373,18 @@ var awsPartition = partition{ }, }, }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "cloudformation": service{ Endpoints: endpoints{ @@ -381,6 +433,15 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "cloudhsmv2": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "cloudsearch": service{ Endpoints: endpoints{ @@ -418,18 +479,36 @@ var awsPartition = partition{ "codebuild": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "codecommit": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "codedeploy": service{ @@ -455,13 +534,32 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -470,9 +568,12 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -483,9 +584,12 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -496,9 +600,12 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -523,6 +630,12 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "cur": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, "datapipeline": service{ Endpoints: endpoints{ @@ -587,11 +700,16 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -728,15 +846,17 @@ var awsPartition = partition{ "elasticfilesystem": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "elasticloadbalancing": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -802,6 +922,16 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "entitlement.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, "es": service{ Endpoints: endpoints{ @@ -810,8 +940,10 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -827,8 +959,10 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -839,9 +973,12 @@ var awsPartition = partition{ "firehose": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "gamelift": service{ @@ -851,10 +988,15 @@ var awsPartition = partition{ "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -866,6 +1008,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, @@ -877,6 +1020,27 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "glue": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "health": service{ Endpoints: endpoints{ @@ -920,6 +1084,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -936,6 +1101,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -992,10 +1158,14 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1005,7 +1175,16 @@ var awsPartition = partition{ "lightsail": service{ Endpoints: endpoints{ - "us-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "logs": service{ @@ -1041,24 +1220,50 @@ var awsPartition = partition{ }, }, "metering.marketplace": service{ - + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, + "mgh": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, "mobileanalytics": service{ Endpoints: endpoints{ "us-east-1": endpoint{}, }, }, + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, "monitoring": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -1080,6 +1285,16 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, + }, + }, "opsworks": service{ Endpoints: endpoints{ @@ -1106,6 +1321,19 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, "pinpoint": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -1192,6 +1420,16 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, "s3": service{ PartitionEndpoint: "us-east-1", IsRegionalized: boxedTrue, @@ -1272,6 +1510,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -1296,20 +1535,27 @@ var awsPartition = partition{ "sms": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "snowball": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1366,10 +1612,13 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1381,7 +1630,10 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -1392,6 +1644,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -1407,7 +1660,7 @@ var awsPartition = partition{ }, "streams.dynamodb": service{ Defaults: endpoint{ - Protocols: []string{"http", "http", "https", "https"}, + Protocols: []string{"http", "https"}, CredentialScope: credentialScope{ Service: "dynamodb", }, @@ -1462,9 +1715,33 @@ var awsPartition = partition{ "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "support": service{ @@ -1492,6 +1769,25 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "tagging": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "waf": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -1511,6 +1807,18 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workdocs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1522,6 +1830,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -1534,8 +1843,10 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1572,6 +1883,18 @@ var awscnPartition = partition{ }, }, Services: services{ + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "autoscaling": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -1592,6 +1915,12 @@ var awscnPartition = partition{ "cn-north-1": endpoint{}, }, }, + "codedeploy": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "config": service{ Endpoints: endpoints{ @@ -1631,6 +1960,18 @@ var awscnPartition = partition{ }, }, }, + "ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "elasticache": service{ Endpoints: endpoints{ @@ -1645,7 +1986,7 @@ var awscnPartition = partition{ }, "elasticloadbalancing": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, Endpoints: endpoints{ "cn-north-1": endpoint{}, @@ -1686,6 +2027,16 @@ var awscnPartition = partition{ }, }, }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "kinesis": service{ Endpoints: endpoints{ @@ -1727,6 +2078,12 @@ var awscnPartition = partition{ "cn-north-1": endpoint{}, }, }, + "snowball": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "sns": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -1744,6 +2101,12 @@ var awscnPartition = partition{ "cn-north-1": endpoint{}, }, }, + "ssm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "storagegateway": service{ Endpoints: endpoints{ @@ -1752,7 +2115,7 @@ var awscnPartition = partition{ }, "streams.dynamodb": service{ Defaults: endpoint{ - Protocols: []string{"http", "http", "https", "https"}, + Protocols: []string{"http", "https"}, CredentialScope: credentialScope{ Service: "dynamodb", }, @@ -1769,6 +2132,12 @@ var awscnPartition = partition{ }, "swf": service{ + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "tagging": service{ + Endpoints: endpoints{ "cn-north-1": endpoint{}, }, @@ -1802,6 +2171,18 @@ var awsusgovPartition = partition{ }, }, Services: services{ + "acm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "autoscaling": service{ Endpoints: endpoints{ @@ -1828,6 +2209,12 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "config": service{ Endpoints: endpoints{ @@ -1885,6 +2272,12 @@ var awsusgovPartition = partition{ }, }, }, + "events": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "glacier": service{ Endpoints: endpoints{ @@ -1906,12 +2299,24 @@ var awsusgovPartition = partition{ }, }, }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "kms": service{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, }, + "lambda": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "logs": service{ Endpoints: endpoints{ @@ -1936,6 +2341,12 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "rekognition": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "s3": service{ Defaults: endpoint{ SignatureVersions: []string{"s3", "s3v4"}, @@ -1953,6 +2364,12 @@ var awsusgovPartition = partition{ }, }, }, + "sms": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "snowball": service{ Endpoints: endpoints{ @@ -1976,6 +2393,12 @@ var awsusgovPartition = partition{ }, }, }, + "ssm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "streams.dynamodb": service{ Defaults: endpoint{ CredentialScope: credentialScope{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go index a0e9bc454715..84316b92c053 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -21,12 +21,12 @@ // partitions := resolver.(endpoints.EnumPartitions).Partitions() // // for _, p := range partitions { -// fmt.Println("Regions for", p.Name) +// fmt.Println("Regions for", p.ID()) // for id, _ := range p.Regions() { // fmt.Println("*", id) // } // -// fmt.Println("Services for", p.Name) +// fmt.Println("Services for", p.ID()) // for id, _ := range p.Services() { // fmt.Println("*", id) // } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index 3adec1315d6d..9c3eedb48d5d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -27,6 +27,25 @@ type Options struct { // error will be returned. This option will prevent returning endpoints // that look valid, but may not resolve to any real endpoint. StrictMatching bool + + // Enables resolving a service endpoint based on the region provided if the + // service does not exist. The service endpoint ID will be used as the service + // domain name prefix. By default the endpoint resolver requires the service + // to be known when resolving endpoints. + // + // If resolving an endpoint on the partition list the provided region will + // be used to determine which partition's domain name pattern to the service + // endpoint ID with. If both the service and region are unkonwn and resolving + // the endpoint on partition list an UnknownEndpointError error will be returned. + // + // If resolving and endpoint on a partition specific resolver that partition's + // domain name pattern will be used with the service endpoint ID. If both + // region and service do not exist when resolving an endpoint on a specific + // partition the partition's domain pattern will be used to combine the + // endpoint and region together. + // + // This option is ignored if StrictMatching is enabled. + ResolveUnknownService bool } // Set combines all of the option functions together. @@ -54,6 +73,12 @@ func StrictMatchingOption(o *Options) { o.StrictMatching = true } +// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used +// as a functional option when resolving endpoints. +func ResolveUnknownServiceOption(o *Options) { + o.ResolveUnknownService = true +} + // A Resolver provides the interface for functionality to resolve endpoints. // The build in Partition and DefaultResolver return value satisfy this interface. type Resolver interface { @@ -99,6 +124,49 @@ type EnumPartitions interface { Partitions() []Partition } +// RegionsForService returns a map of regions for the partition and service. +// If either the partition or service does not exist false will be returned +// as the second parameter. +// +// This example shows how to get the regions for DynamoDB in the AWS partition. +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// This is equivalent to using the partition directly. +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { + for _, p := range ps { + if p.ID() != partitionID { + continue + } + if _, ok := p.p.Services[serviceID]; !ok { + break + } + + s := Service{ + id: serviceID, + p: p.p, + } + return s.Regions(), true + } + + return map[string]Region{}, false +} + +// PartitionForRegion returns the first partition which includes the region +// passed in. This includes both known regions and regions which match +// a pattern supported by the partition which may include regions that are +// not explicitly known by the partition. Use the Regions method of the +// returned Partition if explicit support is needed. +func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { + for _, p := range ps { + if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { + return p, true + } + } + + return Partition{}, false +} + // A Partition provides the ability to enumerate the partition's regions // and services. type Partition struct { @@ -107,33 +175,36 @@ type Partition struct { } // ID returns the identifier of the partition. -func (p *Partition) ID() string { return p.id } +func (p Partition) ID() string { return p.id } // EndpointFor attempts to resolve the endpoint based on service and region. // See Options for information on configuring how the endpoint is resolved. // // If the service cannot be found in the metadata the UnknownServiceError // error will be returned. This validation will occur regardless if -// StrictMatching is enabled. +// StrictMatching is enabled. To enable resolving unknown services set the +// "ResolveUnknownService" option to true. When StrictMatching is disabled +// this option allows the partition resolver to resolve a endpoint based on +// the service endpoint ID provided. // // When resolving endpoints you can choose to enable StrictMatching. This will // require the provided service and region to be known by the partition. // If the endpoint cannot be strictly resolved an error will be returned. This // mode is useful to ensure the endpoint resolved is valid. Without -// StrictMatching enabled the enpoint returned my look valid but may not work. +// StrictMatching enabled the endpoint returned my look valid but may not work. // StrictMatching requires the SDK to be updated if you want to take advantage -// of new regions and services expantions. +// of new regions and services expansions. // // Errors that can be returned. // * UnknownServiceError // * UnknownEndpointError -func (p *Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { +func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { return p.p.EndpointFor(service, region, opts...) } // Regions returns a map of Regions indexed by their ID. This is useful for // enumerating over the regions in a partition. -func (p *Partition) Regions() map[string]Region { +func (p Partition) Regions() map[string]Region { rs := map[string]Region{} for id := range p.p.Regions { rs[id] = Region{ @@ -147,7 +218,7 @@ func (p *Partition) Regions() map[string]Region { // Services returns a map of Service indexed by their ID. This is useful for // enumerating over the services in a partition. -func (p *Partition) Services() map[string]Service { +func (p Partition) Services() map[string]Service { ss := map[string]Service{} for id := range p.p.Services { ss[id] = Service{ @@ -167,16 +238,16 @@ type Region struct { } // ID returns the region's identifier. -func (r *Region) ID() string { return r.id } +func (r Region) ID() string { return r.id } // ResolveEndpoint resolves an endpoint from the context of the region given // a service. See Partition.EndpointFor for usage and errors that can be returned. -func (r *Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { +func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { return r.p.EndpointFor(service, r.id, opts...) } // Services returns a list of all services that are known to be in this region. -func (r *Region) Services() map[string]Service { +func (r Region) Services() map[string]Service { ss := map[string]Service{} for id, s := range r.p.Services { if _, ok := s.Endpoints[r.id]; ok { @@ -198,17 +269,38 @@ type Service struct { } // ID returns the identifier for the service. -func (s *Service) ID() string { return s.id } +func (s Service) ID() string { return s.id } // ResolveEndpoint resolves an endpoint from the context of a service given // a region. See Partition.EndpointFor for usage and errors that can be returned. -func (s *Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { +func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { return s.p.EndpointFor(s.id, region, opts...) } +// Regions returns a map of Regions that the service is present in. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Regions() map[string]Region { + rs := map[string]Region{} + for id := range s.p.Services[s.id].Endpoints { + if _, ok := s.p.Regions[id]; ok { + rs[id] = Region{ + id: id, + p: s.p, + } + } + } + + return rs +} + // Endpoints returns a map of Endpoints indexed by their ID for all known // endpoints for a service. -func (s *Service) Endpoints() map[string]Endpoint { +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Endpoints() map[string]Endpoint { es := map[string]Endpoint{} for id := range s.p.Services[s.id].Endpoints { es[id] = Endpoint{ @@ -231,15 +323,15 @@ type Endpoint struct { } // ID returns the identifier for an endpoint. -func (e *Endpoint) ID() string { return e.id } +func (e Endpoint) ID() string { return e.id } // ServiceID returns the identifier the endpoint belongs to. -func (e *Endpoint) ServiceID() string { return e.serviceID } +func (e Endpoint) ServiceID() string { return e.serviceID } // ResolveEndpoint resolves an endpoint from the context of a service and // region the endpoint represents. See Partition.EndpointFor for usage and // errors that can be returned. -func (e *Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { +func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { return e.p.EndpointFor(e.serviceID, e.id, opts...) } @@ -272,28 +364,6 @@ type EndpointNotFoundError struct { Region string } -//// NewEndpointNotFoundError builds and returns NewEndpointNotFoundError. -//func NewEndpointNotFoundError(p, s, r string) EndpointNotFoundError { -// return EndpointNotFoundError{ -// awsError: awserr.New("EndpointNotFoundError", "unable to find endpoint", nil), -// Partition: p, -// Service: s, -// Region: r, -// } -//} -// -//// Error returns string representation of the error. -//func (e EndpointNotFoundError) Error() string { -// extra := fmt.Sprintf("partition: %q, service: %q, region: %q", -// e.Partition, e.Service, e.Region) -// return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -//} -// -//// String returns the string representation of the error. -//func (e EndpointNotFoundError) String() string { -// return e.Error() -//} - // A UnknownServiceError is returned when the service does not resolve to an // endpoint. Includes a list of all known services for the partition. Returned // when a partition does not support the service. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index 6522ce9b359d..13d968a249e3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -79,7 +79,9 @@ func (p partition) EndpointFor(service, region string, opts ...func(*Options)) ( opt.Set(opts...) s, hasService := p.Services[service] - if !hasService { + if !(hasService || opt.ResolveUnknownService) { + // Only return error if the resolver will not fallback to creating + // endpoint based on service endpoint ID passed in. return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go index 1e7369dbfd1c..05e92df22af2 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -158,7 +158,7 @@ var funcMap = template.FuncMap{ const v3Tmpl = ` {{ define "defaults" -}} -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. package endpoints @@ -209,17 +209,20 @@ import ( // DefaultResolver returns an Endpoint resolver that will be able // to resolve endpoints for: {{ ListPartitionNames . }}. // - // Casting the return value of this func to a EnumPartitions will - // allow you to get a list of the partitions in the order the endpoints - // will be resolved in. + // Use DefaultPartitions() to get the list of the default partitions. + func DefaultResolver() Resolver { + return defaultPartitions + } + + // DefaultPartitions returns a list of the partitions the SDK is bundled + // with. The available partitions are: {{ ListPartitionNames . }}. // - // resolver := endpoints.DefaultResolver() - // partitions := resolver.(endpoints.EnumPartitions).Partitions() + // partitions := endpoints.DefaultPartitions // for _, p := range partitions { // // ... inspect partitions // } - func DefaultResolver() Resolver { - return defaultPartitions + func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() } var defaultPartitions = partitions{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go new file mode 100644 index 000000000000..91a6f277a7eb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go @@ -0,0 +1,12 @@ +package aws + +// JSONValue is a representation of a grab bag type that will be marshaled +// into a json string. This type can be used just like any other map. +// +// Example: +// +// values := aws.JSONValue{ +// "Foo": "Bar", +// } +// values["Baz"] = "Qux" +type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go index db87188e20c7..3babb5abdb69 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -26,14 +26,14 @@ func (l *LogLevelType) Value() LogLevelType { // Matches returns true if the v LogLevel is enabled by this LogLevel. Should be // used with logging sub levels. Is safe to use on nil value LogLevelTypes. If -// LogLevel is nill, will default to LogOff comparison. +// LogLevel is nil, will default to LogOff comparison. func (l *LogLevelType) Matches(v LogLevelType) bool { c := l.Value() return c&v == v } // AtLeast returns true if this LogLevel is at least high enough to satisfies v. -// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default // to LogOff comparison. func (l *LogLevelType) AtLeast(v LogLevelType) bool { c := l.Value() diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go new file mode 100644 index 000000000000..271da432ce17 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -0,0 +1,19 @@ +// +build !appengine,!plan9 + +package request + +import ( + "net" + "os" + "syscall" +) + +func isErrConnectionReset(err error) bool { + if opErr, ok := err.(*net.OpError); ok { + if sysErr, ok := opErr.Err.(*os.SyscallError); ok { + return sysErr.Err == syscall.ECONNRESET + } + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go new file mode 100644 index 000000000000..daf9eca43734 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go @@ -0,0 +1,11 @@ +// +build appengine plan9 + +package request + +import ( + "strings" +) + +func isErrConnectionReset(err error) bool { + return strings.Contains(err.Error(), "connection reset") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index 5279c19c09dd..802ac88ad5cd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -18,6 +18,7 @@ type Handlers struct { UnmarshalError HandlerList Retry HandlerList AfterRetry HandlerList + Complete HandlerList } // Copy returns of this handler's lists. @@ -33,6 +34,7 @@ func (h *Handlers) Copy() Handlers { UnmarshalMeta: h.UnmarshalMeta.copy(), Retry: h.Retry.copy(), AfterRetry: h.AfterRetry.copy(), + Complete: h.Complete.copy(), } } @@ -48,6 +50,7 @@ func (h *Handlers) Clear() { h.ValidateResponse.Clear() h.Retry.Clear() h.AfterRetry.Clear() + h.Complete.Clear() } // A HandlerListRunItem represents an entry in the HandlerList which @@ -85,13 +88,17 @@ func (l *HandlerList) copy() HandlerList { n := HandlerList{ AfterEachFn: l.AfterEachFn, } - n.list = append([]NamedHandler{}, l.list...) + if len(l.list) == 0 { + return n + } + + n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) return n } // Clear clears the handler list. func (l *HandlerList) Clear() { - l.list = []NamedHandler{} + l.list = l.list[0:0] } // Len returns the number of handlers in the list. @@ -101,33 +108,85 @@ func (l *HandlerList) Len() int { // PushBack pushes handler f to the back of the handler list. func (l *HandlerList) PushBack(f func(*Request)) { - l.list = append(l.list, NamedHandler{"__anonymous", f}) -} - -// PushFront pushes handler f to the front of the handler list. -func (l *HandlerList) PushFront(f func(*Request)) { - l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) + l.PushBackNamed(NamedHandler{"__anonymous", f}) } // PushBackNamed pushes named handler f to the back of the handler list. func (l *HandlerList) PushBackNamed(n NamedHandler) { + if cap(l.list) == 0 { + l.list = make([]NamedHandler, 0, 5) + } l.list = append(l.list, n) } +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.PushFrontNamed(NamedHandler{"__anonymous", f}) +} + // PushFrontNamed pushes named handler f to the front of the handler list. func (l *HandlerList) PushFrontNamed(n NamedHandler) { - l.list = append([]NamedHandler{n}, l.list...) + if cap(l.list) == len(l.list) { + // Allocating new list required + l.list = append([]NamedHandler{n}, l.list...) + } else { + // Enough room to prepend into list. + l.list = append(l.list, NamedHandler{}) + copy(l.list[1:], l.list) + l.list[0] = n + } } // Remove removes a NamedHandler n func (l *HandlerList) Remove(n NamedHandler) { - newlist := []NamedHandler{} - for _, m := range l.list { - if m.Name != n.Name { - newlist = append(newlist, m) + l.RemoveByName(n.Name) +} + +// RemoveByName removes a NamedHandler by name. +func (l *HandlerList) RemoveByName(name string) { + for i := 0; i < len(l.list); i++ { + m := l.list[i] + if m.Name == name { + // Shift array preventing creating new arrays + copy(l.list[i:], l.list[i+1:]) + l.list[len(l.list)-1] = NamedHandler{} + l.list = l.list[:len(l.list)-1] + + // decrement list so next check to length is correct + i-- } } - l.list = newlist +} + +// SwapNamed will swap out any existing handlers with the same name as the +// passed in NamedHandler returning true if handlers were swapped. False is +// returned otherwise. +func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == n.Name { + l.list[i].Fn = n.Fn + swapped = true + } + } + + return swapped +} + +// SetBackNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the end of the list. +func (l *HandlerList) SetBackNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushBackNamed(n) + } +} + +// SetFrontNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the beginning of +// the list. +func (l *HandlerList) SetFrontNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushFrontNamed(n) + } } // Run executes all handlers in the list with a given request object. @@ -163,6 +222,16 @@ func HandlerListStopOnError(item HandlerListRunItem) bool { return item.Request.Error == nil } +// WithAppendUserAgent will add a string to the user agent prefixed with a +// single white space. +func WithAppendUserAgent(s string) Option { + return func(r *Request) { + r.Handlers.Build.PushBack(func(r2 *Request) { + AddToUserAgent(r, s) + }) + } +} + // MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request // header. If the extra parameters are provided they will be added as metadata to the // name/version pair resulting in the following format. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 77312bb66558..911c058eef9c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -16,6 +16,24 @@ import ( "github.com/aws/aws-sdk-go/aws/client/metadata" ) +const ( + // ErrCodeSerialization is the serialization error code that is received + // during protocol unmarshaling. + ErrCodeSerialization = "SerializationError" + + // ErrCodeRead is an error that is returned during HTTP reads. + ErrCodeRead = "ReadError" + + // ErrCodeResponseTimeout is the connection timeout error that is received + // during body reads. + ErrCodeResponseTimeout = "ResponseTimeout" + + // CanceledErrorCode is the error code that will be returned by an + // API request that was canceled. Requests given a aws.Context may + // return this error when canceled. + CanceledErrorCode = "RequestCanceled" +) + // A Request is the service request to be made. type Request struct { Config aws.Config @@ -23,30 +41,33 @@ type Request struct { Handlers Handlers Retryer - Time time.Time - ExpireTime time.Duration - Operation *Operation - HTTPRequest *http.Request - HTTPResponse *http.Response - Body io.ReadSeeker - BodyStart int64 // offset from beginning of Body that the request body starts - Params interface{} - Error error - Data interface{} - RequestID string - RetryCount int - Retryable *bool - RetryDelay time.Duration - NotHoist bool - SignedHeaderVals http.Header - LastSignedAt time.Time + Time time.Time + ExpireTime time.Duration + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + DisableFollowRedirects bool + + context aws.Context built bool - // Need to persist an intermideant body betweend the input Body and HTTP + // Need to persist an intermediate body between the input Body and HTTP // request body because the HTTP Client's transport can maintain a reference // to the HTTP request's body after the client has returned. This value is - // safe to use concurrently and rewraps the input Body for each HTTP request. + // safe to use concurrently and wrap the input Body for each HTTP request. safeBody *offsetReader } @@ -60,14 +81,6 @@ type Operation struct { BeforePresignFn func(r *Request) error } -// Paginator keeps track of pagination configuration for an API operation. -type Paginator struct { - InputTokens []string - OutputTokens []string - LimitToken string - TruncationToken string -} - // New returns a new Request pointer for the service API // operation and parameters. // @@ -111,6 +124,94 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, return r } +// A Option is a functional option that can augment or modify a request when +// using a WithContext API operation method. +type Option func(*Request) + +// WithGetResponseHeader builds a request Option which will retrieve a single +// header value from the HTTP Response. If there are multiple values for the +// header key use WithGetResponseHeaders instead to access the http.Header +// map directly. The passed in val pointer must be non-nil. +// +// This Option can be used multiple times with a single API operation. +// +// var id2, versionID string +// svc.PutObjectWithContext(ctx, params, +// request.WithGetResponseHeader("x-amz-id-2", &id2), +// request.WithGetResponseHeader("x-amz-version-id", &versionID), +// ) +func WithGetResponseHeader(key string, val *string) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *val = req.HTTPResponse.Header.Get(key) + }) + } +} + +// WithGetResponseHeaders builds a request Option which will retrieve the +// headers from the HTTP response and assign them to the passed in headers +// variable. The passed in headers pointer must be non-nil. +// +// var headers http.Header +// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) +func WithGetResponseHeaders(headers *http.Header) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *headers = req.HTTPResponse.Header + }) + } +} + +// WithLogLevel is a request option that will set the request to use a specific +// log level when the request is made. +// +// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) +func WithLogLevel(l aws.LogLevelType) Option { + return func(r *Request) { + r.Config.LogLevel = aws.LogLevel(l) + } +} + +// ApplyOptions will apply each option to the request calling them in the order +// the were provided. +func (r *Request) ApplyOptions(opts ...Option) { + for _, opt := range opts { + opt(r) + } +} + +// Context will always returns a non-nil context. If Request does not have a +// context aws.BackgroundContext will be returned. +func (r *Request) Context() aws.Context { + if r.context != nil { + return r.context + } + return aws.BackgroundContext() +} + +// SetContext adds a Context to the current request that can be used to cancel +// a in-flight request. The Context value must not be nil, or this method will +// panic. +// +// Unlike http.Request.WithContext, SetContext does not return a copy of the +// Request. It is not safe to use use a single Request value for multiple +// requests. A new Request should be created for each API operation request. +// +// Go 1.6 and below: +// The http.Request's Cancel field will be set to the Done() value of +// the context. This will overwrite the Cancel field's value. +// +// Go 1.7 and above: +// The http.Request.WithContext will be used to set the context on the underlying +// http.Request. This will create a shallow copy of the http.Request. The SDK +// may create sub contexts in the future for nested requests such as retries. +func (r *Request) SetContext(ctx aws.Context) { + if ctx == nil { + panic("context cannot be nil") + } + setRequestContext(r, ctx) +} + // WillRetry returns if the request's can be retried. func (r *Request) WillRetry() bool { return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() @@ -168,11 +269,17 @@ func (r *Request) Presign(expireTime time.Duration) (string, error) { return r.HTTPRequest.URL.String(), nil } -// PresignRequest behaves just like presign, but hoists all headers and signs them. -// Also returns the signed hash back to the user +// PresignRequest behaves just like presign, with the addition of returning a +// set of headers that were signed. +// +// Returns the URL string for the API operation with signature in the query string, +// and the HTTP headers that were included in the signature. These headers must +// be included in any HTTP request made with the presigned URL. +// +// To prevent hoisting any headers to the query string set NotHoist to true on +// this Request value prior to calling PresignRequest. func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { r.ExpireTime = expireTime - r.NotHoist = true r.Sign() if r.Error != nil { return "", nil, r.Error @@ -237,10 +344,7 @@ func (r *Request) Sign() error { return r.Error } -// ResetBody rewinds the request body backto its starting position, and -// set's the HTTP Request body reference. When the body is read prior -// to being sent in the HTTP request it will need to be rewound. -func (r *Request) ResetBody() { +func (r *Request) getNextRequestBody() (io.ReadCloser, error) { if r.safeBody != nil { r.safeBody.Close() } @@ -262,14 +366,14 @@ func (r *Request) ResetBody() { // Related golang/go#18257 l, err := computeBodyLength(r.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to compute request body size", err) - return + return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) } + var body io.ReadCloser if l == 0 { - r.HTTPRequest.Body = noBodyReader + body = NoBody } else if l > 0 { - r.HTTPRequest.Body = r.safeBody + body = r.safeBody } else { // Hack to prevent sending bodies for methods where the body // should be ignored by the server. Sending bodies on these @@ -281,11 +385,13 @@ func (r *Request) ResetBody() { // a io.Reader that was not also an io.Seeker. switch r.Operation.HTTPMethod { case "GET", "HEAD", "DELETE": - r.HTTPRequest.Body = noBodyReader + body = NoBody default: - r.HTTPRequest.Body = r.safeBody + body = r.safeBody } } + + return body, nil } // Attempts to compute the length of the body of the reader using the @@ -344,6 +450,12 @@ func (r *Request) GetBody() io.ReadSeeker { // // Send will not close the request.Request's body. func (r *Request) Send() error { + defer func() { + // Regardless of success or failure of the request trigger the Complete + // request handlers. + r.Handlers.Complete.Run(r) + }() + for { if aws.BoolValue(r.Retryable) { if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { @@ -381,7 +493,7 @@ func (r *Request) Send() error { r.Handlers.Retry.Run(r) r.Handlers.AfterRetry.Run(r) if r.Error != nil { - debugLogReqError(r, "Send Request", false, r.Error) + debugLogReqError(r, "Send Request", false, err) return r.Error } debugLogReqError(r, "Send Request", true, err) @@ -390,12 +502,13 @@ func (r *Request) Send() error { r.Handlers.UnmarshalMeta.Run(r) r.Handlers.ValidateResponse.Run(r) if r.Error != nil { - err := r.Error r.Handlers.UnmarshalError.Run(r) + err := r.Error + r.Handlers.Retry.Run(r) r.Handlers.AfterRetry.Run(r) if r.Error != nil { - debugLogReqError(r, "Validate Response", false, r.Error) + debugLogReqError(r, "Validate Response", false, err) return r.Error } debugLogReqError(r, "Validate Response", true, err) @@ -408,7 +521,7 @@ func (r *Request) Send() error { r.Handlers.Retry.Run(r) r.Handlers.AfterRetry.Run(r) if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", false, r.Error) + debugLogReqError(r, "Unmarshal Response", false, err) return r.Error } debugLogReqError(r, "Unmarshal Response", true, err) @@ -446,6 +559,9 @@ func shouldRetryCancel(r *Request) bool { timeoutErr := false errStr := r.Error.Error() if ok { + if awsErr.Code() == CanceledErrorCode { + return false + } err := awsErr.OrigErr() netErr, netOK := err.(net.Error) timeoutErr = netOK && netErr.Temporary() diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go index 1323af9007b3..869b97a1a0fa 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -16,6 +16,24 @@ func (noBody) Read([]byte) (int, error) { return 0, io.EOF } func (noBody) Close() error { return nil } func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } -// Is an empty reader that will trigger the Go HTTP client to not include +// NoBody is an empty reader that will trigger the Go HTTP client to not include // and body in the HTTP request. -var noBodyReader = noBody{} +var NoBody = noBody{} + +// ResetBody rewinds the request body back to its starting position, and +// set's the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go index 8b963f4de279..c32fc69bc56f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -2,8 +2,32 @@ package request -import "net/http" +import ( + "net/http" +) -// Is a http.NoBody reader instructing Go HTTP client to not include +// NoBody is a http.NoBody reader instructing Go HTTP client to not include // and body in the HTTP request. -var noBodyReader = http.NoBody +var NoBody = http.NoBody + +// ResetBody rewinds the request body back to its starting position, and +// set's the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +// +// Will also set the Go 1.8's http.Request.GetBody member to allow retrying +// PUT/POST redirects. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body + r.HTTPRequest.GetBody = r.getNextRequestBody +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go new file mode 100644 index 000000000000..a7365cd1e46e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go @@ -0,0 +1,14 @@ +// +build go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest = r.HTTPRequest.WithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go new file mode 100644 index 000000000000..307fa0705be6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest.Cancel = ctx.Done() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go index 2939ec473f2b..59de6736b611 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -2,29 +2,125 @@ package request import ( "reflect" + "sync/atomic" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" ) -//type Paginater interface { -// HasNextPage() bool -// NextPage() *Request -// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error -//} +// A Pagination provides paginating of SDK API operations which are paginatable. +// Generally you should not use this type directly, but use the "Pages" API +// operations method to automatically perform pagination for you. Such as, +// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. +// +// Pagination differs from a Paginator type in that pagination is the type that +// does the pagination between API operations, and Paginator defines the +// configuration that will be used per page request. +// +// cont := true +// for p.Next() && cont { +// data := p.Page().(*s3.ListObjectsOutput) +// // process the page's data +// } +// return p.Err() +// +// See service client API operation Pages methods for examples how the SDK will +// use the Pagination type. +type Pagination struct { + // Function to return a Request value for each pagination request. + // Any configuration or handlers that need to be applied to the request + // prior to getting the next page should be done here before the request + // returned. + // + // NewRequest should always be built from the same API operations. It is + // undefined if different API operations are returned on subsequent calls. + NewRequest func() (*Request, error) -// HasNextPage returns true if this request has more pages of data available. -func (r *Request) HasNextPage() bool { - return len(r.nextPageTokens()) > 0 + started bool + nextTokens []interface{} + + err error + curPage interface{} } -// nextPageTokens returns the tokens to use when asking for the next page of -// data. +// HasNextPage will return true if Pagination is able to determine that the API +// operation has additional pages. False will be returned if there are no more +// pages remaining. +// +// Will always return true if Next has not been called yet. +func (p *Pagination) HasNextPage() bool { + return !(p.started && len(p.nextTokens) == 0) +} + +// Err returns the error Pagination encountered when retrieving the next page. +func (p *Pagination) Err() error { + return p.err +} + +// Page returns the current page. Page should only be called after a successful +// call to Next. It is undefined what Page will return if Page is called after +// Next returns false. +func (p *Pagination) Page() interface{} { + return p.curPage +} + +// Next will attempt to retrieve the next page for the API operation. When a page +// is retrieved true will be returned. If the page cannot be retrieved, or there +// are no more pages false will be returned. +// +// Use the Page method to retrieve the current page data. The data will need +// to be cast to the API operation's output type. +// +// Use the Err method to determine if an error occurred if Page returns false. +func (p *Pagination) Next() bool { + if !p.HasNextPage() { + return false + } + + req, err := p.NewRequest() + if err != nil { + p.err = err + return false + } + + if p.started { + for i, intok := range req.Operation.InputTokens { + awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) + } + } + p.started = true + + err = req.Send() + if err != nil { + p.err = err + return false + } + + p.nextTokens = req.nextPageTokens() + p.curPage = req.Data + + return true +} + +// A Paginator is the configuration data that defines how an API operation +// should be paginated. This type is used by the API service models to define +// the generated pagination config for service APIs. +// +// The Pagination type is what provides iterating between pages of an API. It +// is only used to store the token metadata the SDK should use for performing +// pagination. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// nextPageTokens returns the tokens to use when asking for the next page of data. func (r *Request) nextPageTokens() []interface{} { if r.Operation.Paginator == nil { return nil } - if r.Operation.TruncationToken != "" { tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) if len(tr) == 0 { @@ -61,9 +157,40 @@ func (r *Request) nextPageTokens() []interface{} { return tokens } +// Ensure a deprecated item is only logged once instead of each time its used. +func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { + if logger == nil { + return + } + if atomic.CompareAndSwapInt32(flag, 0, 1) { + logger.Log(msg) + } +} + +var ( + logDeprecatedHasNextPage int32 + logDeprecatedNextPage int32 + logDeprecatedEachPage int32 +) + +// HasNextPage returns true if this request has more pages of data available. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) HasNextPage() bool { + logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, + "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") + + return len(r.nextPageTokens()) > 0 +} + // NextPage returns a new Request that can be executed to return the next // page of result data. Call .Send() on this request to execute it. +// +// Deprecated Use Pagination type for configurable pagination of API operations func (r *Request) NextPage() *Request { + logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, + "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") + tokens := r.nextPageTokens() if len(tokens) == 0 { return nil @@ -90,7 +217,12 @@ func (r *Request) NextPage() *Request { // as the structure "T". The lastPage value represents whether the page is // the last page of data or not. The return value of this function should // return true to keep iterating or false to stop. +// +// Deprecated Use Pagination type for configurable pagination of API operations func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, + "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") + for page := r; page != nil; page = page.NextPage() { if err := page.Send(); err != nil { return err diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index ebd60ccc4fb8..f35fef213ed9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -8,7 +8,7 @@ import ( ) // Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the service.DefaultRetryer +// The default implementation used by most services is the client.DefaultRetryer // structure, which contains basic retry logic using exponential backoff. type Retryer interface { RetryRules(*Request) time.Duration @@ -26,8 +26,10 @@ func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { // retryableCodes is a collection of service response codes which are retry-able // without any further action. var retryableCodes = map[string]struct{}{ - "RequestError": {}, - "RequestTimeout": {}, + "RequestError": {}, + "RequestTimeout": {}, + ErrCodeResponseTimeout: {}, + "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout } var throttleCodes = map[string]struct{}{ @@ -36,7 +38,6 @@ var throttleCodes = map[string]struct{}{ "ThrottlingException": {}, "RequestLimitExceeded": {}, "RequestThrottled": {}, - "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once "TooManyRequestsException": {}, // Lambda functions "PriorRequestNotComplete": {}, // Route53 } @@ -68,35 +69,93 @@ func isCodeExpiredCreds(code string) bool { return ok } +var validParentCodes = map[string]struct{}{ + ErrCodeSerialization: {}, + ErrCodeRead: {}, +} + +type temporaryError interface { + Temporary() bool +} + +func isNestedErrorRetryable(parentErr awserr.Error) bool { + if parentErr == nil { + return false + } + + if _, ok := validParentCodes[parentErr.Code()]; !ok { + return false + } + + err := parentErr.OrigErr() + if err == nil { + return false + } + + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) + } + + if t, ok := err.(temporaryError); ok { + return t.Temporary() + } + + return isErrConnectionReset(err) +} + // IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if the request has no Error set. -func (r *Request) IsErrorRetryable() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeRetryable(err.Code()) +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) } } return false } // IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if the request has no Error set -func (r *Request) IsErrorThrottle() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeThrottle(err.Code()) +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeThrottle(aerr.Code()) } } return false } -// IsErrorExpired returns whether the error code is a credential expiry error. -// Returns false if the request has no Error set. -func (r *Request) IsErrorExpired() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeExpiredCreds(err.Code()) +// IsErrorExpiredCreds returns whether the error code is a credential expiry error. +// Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeExpiredCreds(aerr.Code()) } } return false } + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if the request has no Error set +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + return IsErrorThrottle(r.Error) +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go new file mode 100644 index 000000000000..09a44eb987ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go @@ -0,0 +1,94 @@ +package request + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var timeoutErr = awserr.New( + ErrCodeResponseTimeout, + "read on body has reached the timeout limit", + nil, +) + +type readResult struct { + n int + err error +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, timeoutErr + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +const ( + // HandlerResponseTimeout is what we use to signify the name of the + // response timeout handler. + HandlerResponseTimeout = "ResponseTimeoutHandler" +) + +// adaptToResponseTimeoutError is a handler that will replace any top level error +// to a ErrCodeResponseTimeout, if its child is that. +func adaptToResponseTimeoutError(req *Request) { + if err, ok := req.Error.(awserr.Error); ok { + aerr, ok := err.OrigErr().(awserr.Error) + if ok && aerr.Code() == ErrCodeResponseTimeout { + req.Error = aerr + } + } +} + +// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. +// This will allow for per read timeouts. If a timeout occurred, we will return the +// ErrCodeResponseTimeout. +// +// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) +func WithResponseReadTimeout(duration time.Duration) Option { + return func(r *Request) { + + var timeoutHandler = NamedHandler{ + HandlerResponseTimeout, + func(req *Request) { + req.HTTPResponse.Body = &timeoutReadCloser{ + reader: req.HTTPResponse.Body, + duration: duration, + } + }} + + // remove the handler so we are not stomping over any new durations. + r.Handlers.Send.RemoveByName(HandlerResponseTimeout) + r.Handlers.Send.PushBackNamed(timeoutHandler) + + r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) + r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go index 2520286b75dc..401246228219 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -220,7 +220,7 @@ type ErrParamMinLen struct { func NewErrParamMinLen(field string, min int) *ErrParamMinLen { return &ErrParamMinLen{ errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, + code: ParamMinLenErrCode, field: field, msg: fmt.Sprintf("minimum field size of %v", min), }, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go new file mode 100644 index 000000000000..4601f883cc51 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -0,0 +1,295 @@ +package request + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when +// the waiter's max attempts have been exhausted. +const WaiterResourceNotReadyErrorCode = "ResourceNotReady" + +// A WaiterOption is a function that will update the Waiter value's fields to +// configure the waiter. +type WaiterOption func(*Waiter) + +// WithWaiterMaxAttempts returns the maximum number of times the waiter should +// attempt to check the resource for the target state. +func WithWaiterMaxAttempts(max int) WaiterOption { + return func(w *Waiter) { + w.MaxAttempts = max + } +} + +// WaiterDelay will return a delay the waiter should pause between attempts to +// check the resource state. The passed in attempt is the number of times the +// Waiter has checked the resource state. +// +// Attempt is the number of attempts the Waiter has made checking the resource +// state. +type WaiterDelay func(attempt int) time.Duration + +// ConstantWaiterDelay returns a WaiterDelay that will always return a constant +// delay the waiter should use between attempts. It ignores the number of +// attempts made. +func ConstantWaiterDelay(delay time.Duration) WaiterDelay { + return func(attempt int) time.Duration { + return delay + } +} + +// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. +func WithWaiterDelay(delayer WaiterDelay) WaiterOption { + return func(w *Waiter) { + w.Delay = delayer + } +} + +// WithWaiterLogger returns a waiter option to set the logger a waiter +// should use to log warnings and errors to. +func WithWaiterLogger(logger aws.Logger) WaiterOption { + return func(w *Waiter) { + w.Logger = logger + } +} + +// WithWaiterRequestOptions returns a waiter option setting the request +// options for each request the waiter makes. Appends to waiter's request +// options already set. +func WithWaiterRequestOptions(opts ...Option) WaiterOption { + return func(w *Waiter) { + w.RequestOptions = append(w.RequestOptions, opts...) + } +} + +// A Waiter provides the functionality to perform a blocking call which will +// wait for a resource state to be satisfied by a service. +// +// This type should not be used directly. The API operations provided in the +// service packages prefixed with "WaitUntil" should be used instead. +type Waiter struct { + Name string + Acceptors []WaiterAcceptor + Logger aws.Logger + + MaxAttempts int + Delay WaiterDelay + + RequestOptions []Option + NewRequest func([]Option) (*Request, error) + SleepWithContext func(aws.Context, time.Duration) error +} + +// ApplyOptions updates the waiter with the list of waiter options provided. +func (w *Waiter) ApplyOptions(opts ...WaiterOption) { + for _, fn := range opts { + fn(w) + } +} + +// WaiterState are states the waiter uses based on WaiterAcceptor definitions +// to identify if the resource state the waiter is waiting on has occurred. +type WaiterState int + +// String returns the string representation of the waiter state. +func (s WaiterState) String() string { + switch s { + case SuccessWaiterState: + return "success" + case FailureWaiterState: + return "failure" + case RetryWaiterState: + return "retry" + default: + return "unknown waiter state" + } +} + +// States the waiter acceptors will use to identify target resource states. +const ( + SuccessWaiterState WaiterState = iota // waiter successful + FailureWaiterState // waiter failed + RetryWaiterState // waiter needs to be retried +) + +// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor +// definition's Expected attribute. +type WaiterMatchMode int + +// Modes the waiter will use when inspecting API response to identify target +// resource states. +const ( + PathAllWaiterMatch WaiterMatchMode = iota // match on all paths + PathWaiterMatch // match on specific path + PathAnyWaiterMatch // match on any path + PathListWaiterMatch // match on list of paths + StatusWaiterMatch // match on status code + ErrorWaiterMatch // match on error +) + +// String returns the string representation of the waiter match mode. +func (m WaiterMatchMode) String() string { + switch m { + case PathAllWaiterMatch: + return "pathAll" + case PathWaiterMatch: + return "path" + case PathAnyWaiterMatch: + return "pathAny" + case PathListWaiterMatch: + return "pathList" + case StatusWaiterMatch: + return "status" + case ErrorWaiterMatch: + return "error" + default: + return "unknown waiter match mode" + } +} + +// WaitWithContext will make requests for the API operation using NewRequest to +// build API requests. The request's response will be compared against the +// Waiter's Acceptors to determine the successful state of the resource the +// waiter is inspecting. +// +// The passed in context must not be nil. If it is nil a panic will occur. The +// Context will be used to cancel the waiter's pending requests and retry delays. +// Use aws.BackgroundContext if no context is available. +// +// The waiter will continue until the target state defined by the Acceptors, +// or the max attempts expires. +// +// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's +// retryer ShouldRetry returns false. This normally will happen when the max +// wait attempts expires. +func (w Waiter) WaitWithContext(ctx aws.Context) error { + + for attempt := 1; ; attempt++ { + req, err := w.NewRequest(w.RequestOptions) + if err != nil { + waiterLogf(w.Logger, "unable to create request %v", err) + return err + } + req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) + err = req.Send() + + // See if any of the acceptors match the request's response, or error + for _, a := range w.Acceptors { + if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { + return matchErr + } + } + + // The Waiter should only check the resource state MaxAttempts times + // This is here instead of in the for loop above to prevent delaying + // unnecessary when the waiter will not retry. + if attempt == w.MaxAttempts { + break + } + + // Delay to wait before inspecting the resource again + delay := w.Delay(attempt) + if sleepFn := req.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(delay) + } else { + sleepCtxFn := w.SleepWithContext + if sleepCtxFn == nil { + sleepCtxFn = aws.SleepWithContext + } + + if err := sleepCtxFn(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } + } + } + + return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) +} + +// A WaiterAcceptor provides the information needed to wait for an API operation +// to complete. +type WaiterAcceptor struct { + State WaiterState + Matcher WaiterMatchMode + Argument string + Expected interface{} +} + +// match returns if the acceptor found a match with the passed in request +// or error. True is returned if the acceptor made a match, error is returned +// if there was an error attempting to perform the match. +func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { + result := false + var vals []interface{} + + switch a.Matcher { + case PathAllWaiterMatch, PathWaiterMatch: + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case PathAnyWaiterMatch: + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case PathListWaiterMatch: + // ignored matcher + case StatusWaiterMatch: + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case ErrorWaiterMatch: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", + name, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + return false, nil + } + + switch a.State { + case SuccessWaiterState: + // waiter completed + return true, nil + case FailureWaiterState: + // Waiter failure state triggered + return true, awserr.New(WaiterResourceNotReadyErrorCode, + "failed waiting for successful resource state", err) + case RetryWaiterState: + // clear the error and retry the operation + return false, nil + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", + name, a.State) + return false, nil + } +} + +func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { + if logger != nil { + logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index d3dc8404ed21..ea7b886f81f5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -23,7 +23,7 @@ additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. Alternatively you can explicitly create a Session with shared config enabled. To do this you can use NewSessionWithOptions to configure how the Session will be created. Using the NewSessionWithOptions with SharedConfigState set to -SharedConfigEnabled will create the session as if the AWS_SDK_LOAD_CONFIG +SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG environment variable was set. Creating Sessions @@ -45,16 +45,16 @@ region, and profile loaded from the environment and shared config automatically. Requires the AWS_PROFILE to be set, or "default" is used. // Create Session - sess, err := session.NewSession() + sess := session.Must(session.NewSession()) // Create a Session with a custom region - sess, err := session.NewSession(&aws.Config{Region: aws.String("us-east-1")}) + sess := session.Must(session.NewSession(&aws.Config{ + Region: aws.String("us-east-1"), + })) // Create a S3 client instance from a session - sess, err := session.NewSession() - if err != nil { - // Handle Session creation error - } + sess := session.Must(session.NewSession()) + svc := s3.New(sess) Create Session With Option Overrides @@ -67,23 +67,25 @@ Use NewSessionWithOptions when you want to provide the config profile, or override the shared config state (AWS_SDK_LOAD_CONFIG). // Equivalent to session.NewSession() - sess, err := session.NewSessionWithOptions(session.Options{}) + sess := session.Must(session.NewSessionWithOptions(session.Options{ + // Options + })) // Specify profile to load for the session's config - sess, err := session.NewSessionWithOptions(session.Options{ + sess := session.Must(session.NewSessionWithOptions(session.Options{ Profile: "profile_name", - }) + })) // Specify profile for config and region for requests - sess, err := session.NewSessionWithOptions(session.Options{ + sess := session.Must(session.NewSessionWithOptions(session.Options{ Config: aws.Config{Region: aws.String("us-east-1")}, Profile: "profile_name", - }) + })) // Force enable Shared Config support - sess, err := session.NewSessionWithOptions(session.Options{ - SharedConfigState: SharedConfigEnable, - }) + sess := session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + })) Adding Handlers @@ -93,7 +95,8 @@ handler logs every request and its payload made by a service client: // Create a session, and add additional handlers for all service // clients created with the Session to inherit. Adds logging handler. - sess, err := session.NewSession() + sess := session.Must(session.NewSession()) + sess.Handlers.Send.PushFront(func(r *request.Request) { // Log every request made and its payload logger.Println("Request: %s/%s, Payload: %s", @@ -121,9 +124,8 @@ file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both files have the same format. If both config files are present the configuration from both files will be -read. The Session will be created from configuration values from the shared -credentials file (~/.aws/credentials) over those in the shared credentials -file (~/.aws/config). +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). Credentials are the values the SDK should use for authenticating requests with AWS Services. They arfrom a configuration file will need to include both @@ -138,15 +140,14 @@ the other two fields are also provided. Assume Role values allow you to configure the SDK to assume an IAM role using a set of credentials provided in a config file via the source_profile field. -Both "role_arn" and "source_profile" are required. The SDK does not support -assuming a role with MFA token Via the Session's constructor. You can use the -stscreds.AssumeRoleProvider credentials provider to specify custom -configuration and support for MFA. +Both "role_arn" and "source_profile" are required. The SDK supports assuming +a role with MFA token if the session option AssumeRoleTokenProvider +is set. role_arn = arn:aws:iam:::role/ source_profile = profile_with_creds external_id = 1234 - mfa_serial = not supported! + mfa_serial = role_session_name = session_name Region is the region the SDK should use for looking up AWS service endpoints @@ -154,6 +155,37 @@ and signing requests. region = us-east-1 +Assume Role with MFA token + +To create a session with support for assuming an IAM role with MFA set the +session option AssumeRoleTokenProvider to a function that will prompt for the +MFA token code when the SDK assumes the role and refreshes the role's credentials. +This allows you to configure the SDK via the shared config to assumea role +with MFA tokens. + +In order for the SDK to assume a role with MFA the SharedConfigState +session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG +environment variable set. + +The shared configuration instructs the SDK to assume an IAM role with MFA +when the mfa_serial configuration field is set in the shared config +(~/.aws/config) or shared credentials (~/.aws/credentials) file. + +If mfa_serial is set in the configuration, the SDK will assume the role, and +the AssumeRoleTokenProvider session option is not set an an error will +be returned when creating the session. + + sess := session.Must(session.NewSessionWithOptions(session.Options{ + AssumeRoleTokenProvider: stscreds.StdinTokenProvider, + })) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess) + +To setup assume role outside of a session see the stscrds.AssumeRoleProvider +documentation. + Environment Variables When a Session is created several environment variables can be set to adjust @@ -218,6 +250,24 @@ $HOME/.aws/config on Linux/Unix based systems, and AWS_CONFIG_FILE=$HOME/my_shared_config +Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +will use instead of the default system's root CA bundle. Use this only +if you want to replace the CA bundle the SDK uses for TLS requests. + + AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + +Enabling this option will attempt to merge the Transport into the SDK's HTTP +client. If the client's Transport is not a http.Transport an error will be +returned. If the Transport's TLS config is set this option will cause the SDK +to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +contains multiple certificates all of them will be loaded. + +The Session option CustomCABundle is also available when creating sessions +to also enable this feature. CustomCABundle session option field has priority +over the AWS_CA_BUNDLE environment variable, and will be used if both are set. +Setting a custom HTTPClient in the aws.Config options will override this setting. +To use this option and custom HTTP client, the HTTP client needs to be provided +when creating the session. Not the service client. */ package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index d2f0c844811a..f1adcf481982 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -2,12 +2,14 @@ package session import ( "os" - "path/filepath" "strconv" "github.com/aws/aws-sdk-go/aws/credentials" ) +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + // envConfig is a collection of environment values the SDK will read // setup config from. All environment values are optional. But some values // such as credentials require multiple values to be complete or the values @@ -75,6 +77,24 @@ type envConfig struct { // // AWS_CONFIG_FILE=$HOME/my_shared_config SharedConfigFile string + + // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the session. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string } var ( @@ -98,6 +118,12 @@ var ( "AWS_PROFILE", "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set } + sharedCredsFileEnvKey = []string{ + "AWS_SHARED_CREDENTIALS_FILE", + } + sharedConfigFileEnvKey = []string{ + "AWS_CONFIG_FILE", + } ) // loadEnvConfig retrieves the SDK's environment configuration. @@ -134,7 +160,7 @@ func envConfigLoad(enableSharedConfig bool) envConfig { if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { cfg.Creds = credentials.Value{} } else { - cfg.Creds.ProviderName = "EnvConfigCredentials" + cfg.Creds.ProviderName = EnvProviderName } regionKeys := regionEnvKeys @@ -147,8 +173,10 @@ func envConfigLoad(enableSharedConfig bool) envConfig { setFromEnvVal(&cfg.Region, regionKeys) setFromEnvVal(&cfg.Profile, profileKeys) - cfg.SharedCredentialsFile = sharedCredentialsFilename() - cfg.SharedConfigFile = sharedConfigFilename() + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) + setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + + cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") return cfg } @@ -161,28 +189,3 @@ func setFromEnvVal(dst *string, keys []string) { } } } - -func sharedCredentialsFilename() string { - if name := os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(name) > 0 { - return name - } - - return filepath.Join(userHomeDir(), ".aws", "credentials") -} - -func sharedConfigFilename() string { - if name := os.Getenv("AWS_CONFIG_FILE"); len(name) > 0 { - return name - } - - return filepath.Join(userHomeDir(), ".aws", "config") -} - -func userHomeDir() string { - homeDir := os.Getenv("HOME") // *nix - if len(homeDir) == 0 { // windows - homeDir = os.Getenv("USERPROFILE") - } - - return homeDir -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index c9427e913376..9f75d5ac5889 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -1,7 +1,13 @@ package session import ( + "crypto/tls" + "crypto/x509" "fmt" + "io" + "io/ioutil" + "net/http" + "os" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -40,7 +46,7 @@ type Session struct { // // If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value // the shared config file (~/.aws/config) will also be loaded, in addition to -// the shared credentials file (~/.aws/config). Values set in both the +// the shared credentials file (~/.aws/credentials). Values set in both the // shared config, and shared credentials will be taken from the shared // credentials file. // @@ -52,7 +58,7 @@ func New(cfgs ...*aws.Config) *Session { envCfg := loadEnvConfig() if envCfg.EnableSharedConfig { - s, err := newSession(envCfg, cfgs...) + s, err := newSession(Options{}, envCfg, cfgs...) if err != nil { // Old session.New expected all errors to be discovered when // a request is made, and would report the errors then. This @@ -73,7 +79,7 @@ func New(cfgs ...*aws.Config) *Session { return s } - return oldNewSession(cfgs...) + return deprecatedNewSession(cfgs...) } // NewSession returns a new Session created from SDK defaults, config files, @@ -83,7 +89,7 @@ func New(cfgs ...*aws.Config) *Session { // // If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value // the shared config file (~/.aws/config) will also be loaded in addition to -// the shared credentials file (~/.aws/config). Values set in both the +// the shared credentials file (~/.aws/credentials). Values set in both the // shared config, and shared credentials will be taken from the shared // credentials file. Enabling the Shared Config will also allow the Session // to be built with retrieving credentials with AssumeRole set in the config. @@ -92,9 +98,10 @@ func New(cfgs ...*aws.Config) *Session { // control through code how the Session will be created. Such as specifying the // config profile, and controlling if shared config is enabled or not. func NewSession(cfgs ...*aws.Config) (*Session, error) { - envCfg := loadEnvConfig() + opts := Options{} + opts.Config.MergeIn(cfgs...) - return newSession(envCfg, cfgs...) + return NewSessionWithOptions(opts) } // SharedConfigState provides the ability to optionally override the state @@ -147,6 +154,45 @@ type Options struct { // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable // and enable or disable the shared config functionality. SharedConfigState SharedConfigState + + // Ordered list of files the session will load configuration from. + // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. + SharedConfigFiles []string + + // When the SDK's shared config is configured to assume a role with MFA + // this option is required in order to provide the mechanism that will + // retrieve the MFA token. There is no default value for this field. If + // it is not set an error will be returned when creating the session. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed. Within the context of service clients + // all sharing the same session the SDK will ensure calls to the token + // provider are atomic. When sharing a token provider across multiple + // sessions additional synchronization logic is needed to ensure the + // token providers do not introduce race conditions. It is recommend to + // share the session where possible. + // + // stscreds.StdinTokenProvider is a basic implementation that will prompt + // from stdin for the MFA token code. + // + // This field is only used if the shared configuration is enabled, and + // the config enables assume role wit MFA via the mfa_serial field. + AssumeRoleTokenProvider func() (string, error) + + // Reader for a custom Credentials Authority (CA) bundle in PEM format that + // the SDK will use instead of the default system's root CA bundle. Use this + // only if you want to replace the CA bundle the SDK uses for TLS requests. + // + // Enabling this option will attempt to merge the Transport into the SDK's HTTP + // client. If the client's Transport is not a http.Transport an error will be + // returned. If the Transport's TLS config is set this option will cause the SDK + // to overwrite the Transport's TLS config's RootCAs value. If the CA + // bundle reader contains multiple certificates all of them will be loaded. + // + // The Session option CustomCABundle is also available when creating sessions + // to also enable this feature. CustomCABundle session option field has priority + // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + CustomCABundle io.Reader } // NewSessionWithOptions returns a new Session created from SDK defaults, config files, @@ -155,29 +201,29 @@ type Options struct { // // If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value // the shared config file (~/.aws/config) will also be loaded in addition to -// the shared credentials file (~/.aws/config). Values set in both the +// the shared credentials file (~/.aws/credentials). Values set in both the // shared config, and shared credentials will be taken from the shared // credentials file. Enabling the Shared Config will also allow the Session // to be built with retrieving credentials with AssumeRole set in the config. // // // Equivalent to session.New -// sess, err := session.NewSessionWithOptions(session.Options{}) +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) // // // Specify profile to load for the session's config -// sess, err := session.NewSessionWithOptions(session.Options{ +// sess := session.Must(session.NewSessionWithOptions(session.Options{ // Profile: "profile_name", -// }) +// })) // // // Specify profile for config and region for requests -// sess, err := session.NewSessionWithOptions(session.Options{ +// sess := session.Must(session.NewSessionWithOptions(session.Options{ // Config: aws.Config{Region: aws.String("us-east-1")}, // Profile: "profile_name", -// }) +// })) // // // Force enable Shared Config support -// sess, err := session.NewSessionWithOptions(session.Options{ -// SharedConfigState: SharedConfigEnable, -// }) +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) func NewSessionWithOptions(opts Options) (*Session, error) { var envCfg envConfig if opts.SharedConfigState == SharedConfigEnable { @@ -197,7 +243,25 @@ func NewSessionWithOptions(opts Options) (*Session, error) { envCfg.EnableSharedConfig = true } - return newSession(envCfg, &opts.Config) + if len(envCfg.SharedCredentialsFile) == 0 { + envCfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(envCfg.SharedConfigFile) == 0 { + envCfg.SharedConfigFile = defaults.SharedConfigFilename() + } + + // Only use AWS_CA_BUNDLE if session option is not provided. + if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { + f, err := os.Open(envCfg.CustomCABundle) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to open custom CA bundle PEM file", err) + } + defer f.Close() + opts.CustomCABundle = f + } + + return newSession(opts, envCfg, &opts.Config) } // Must is a helper function to ensure the Session is valid and there was no @@ -215,7 +279,7 @@ func Must(sess *Session, err error) *Session { return sess } -func oldNewSession(cfgs ...*aws.Config) *Session { +func deprecatedNewSession(cfgs ...*aws.Config) *Session { cfg := defaults.Config() handlers := defaults.Handlers() @@ -242,7 +306,7 @@ func oldNewSession(cfgs ...*aws.Config) *Session { return s } -func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { +func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { cfg := defaults.Config() handlers := defaults.Handlers() @@ -251,13 +315,18 @@ func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { userCfg := &aws.Config{} userCfg.MergeIn(cfgs...) - // Order config files will be loaded in with later files overwriting + // Ordered config files will be loaded in with later files overwriting // previous config file values. - cfgFiles := []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} - if !envCfg.EnableSharedConfig { - // The shared config file (~/.aws/config) is only loaded if instructed - // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). - cfgFiles = cfgFiles[1:] + var cfgFiles []string + if opts.SharedConfigFiles != nil { + cfgFiles = opts.SharedConfigFiles + } else { + cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } } // Load additional config from file(s) @@ -266,7 +335,9 @@ func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { return nil, err } - mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers) + if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { + return nil, err + } s := &Session{ Config: cfg, @@ -275,10 +346,62 @@ func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { initHandlers(s) + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { + return nil, err + } + } + return s, nil } -func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers) { +func loadCustomCABundle(s *Session, bundle io.Reader) error { + var t *http.Transport + switch v := s.Config.HTTPClient.Transport.(type) { + case *http.Transport: + t = v + default: + if s.Config.HTTPClient.Transport != nil { + return awserr.New("LoadCustomCABundleError", + "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) + } + } + if t == nil { + t = &http.Transport{} + } + + p, err := loadCertPool(bundle) + if err != nil { + return err + } + if t.TLSClientConfig == nil { + t.TLSClientConfig = &tls.Config{} + } + t.TLSClientConfig.RootCAs = p + + s.Config.HTTPClient.Transport = t + + return nil +} + +func loadCertPool(r io.Reader) (*x509.CertPool, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to read custom CA bundle PEM file", err) + } + + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(b) { + return nil, awserr.New("LoadCustomCABundleError", + "failed to load custom CA bundle PEM file", err) + } + + return p, nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error { // Merge in user provided configuration cfg.MergeIn(userCfg) @@ -302,6 +425,11 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( sharedCfg.AssumeRoleSource.Creds, ) + if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return AssumeRoleTokenProviderNotSetError{} + } cfg.Credentials = stscreds.NewCredentials( &Session{ Config: &cfgCp, @@ -311,11 +439,16 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share func(opt *stscreds.AssumeRoleProvider) { opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName + // Assume role with external ID if len(sharedCfg.AssumeRole.ExternalID) > 0 { opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) } - // MFA not supported + // Assume role with MFA + if len(sharedCfg.AssumeRole.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } }, ) } else if len(sharedCfg.Creds.AccessKeyID) > 0 { @@ -336,6 +469,33 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share }) } } + + return nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the +// MFAToken option is not set when shared config is configured load assume a +// role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) } type credProviderError struct { @@ -404,6 +564,10 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) ( func(opt *endpoints.Options) { opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL) opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack) + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true }, ) } @@ -416,3 +580,27 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) ( SigningName: resolved.SigningName, }, err } + +// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception +// that the EndpointResolver will not be used to resolve the endpoint. The only +// endpoint set must come from the aws.Config.Endpoint field. +func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + + region := aws.StringValue(s.Config.Region) + + if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { + resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = region + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningName: resolved.SigningName, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index b58076f5e321..09c8e5bc7abe 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -113,7 +113,7 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { f, err := ini.Load(b) if err != nil { - return nil, SharedConfigLoadError{Filename: filename} + return nil, SharedConfigLoadError{Filename: filename, Err: err} } files = append(files, sharedConfigFile{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go new file mode 100644 index 000000000000..6aa2ed241bb1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go @@ -0,0 +1,7 @@ +package v4 + +// WithUnsignedPayload will enable and set the UnsignedPayload field to +// true of the signer. +func WithUnsignedPayload(v4 *Signer) { + v4.UnsignedPayload = true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 98bfe742b3b2..15da57249a1c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -45,7 +45,7 @@ // If signing a request intended for HTTP2 server, and you're using Go 1.6.2 // through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the // request URL. https://github.com/golang/go/issues/16847 points to a bug in -// Go pre 1.8 that failes to make HTTP2 requests using absolute URL in the HTTP +// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP // message. URL.Opaque generally will force Go to make requests with absolute URL. // URL.RawPath does not do this, but RawPath must be a valid escaping of Path // or url.EscapedPath will ignore the RawPath escaping. @@ -55,7 +55,6 @@ package v4 import ( - "bytes" "crypto/hmac" "crypto/sha256" "encoding/hex" @@ -194,6 +193,10 @@ type Signer struct { // This value should only be used for testing. If it is nil the default // time.Now will be used. currentTimeFn func() time.Time + + // UnsignedPayload will prevent signing of the payload. This will only + // work for services that have support for this. + UnsignedPayload bool } // NewSigner returns a Signer pointer configured with the credentials and optional @@ -227,6 +230,7 @@ type signingCtx struct { isPresign bool formattedTime string formattedShortTime string + unsignedPayload bool bodyDigest string signedHeaders string @@ -317,6 +321,7 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi ServiceName: service, Region: region, DisableURIPathEscaping: v4.DisableURIPathEscaping, + unsignedPayload: v4.UnsignedPayload, } for key := range ctx.Query { @@ -396,7 +401,7 @@ var SignRequestHandler = request.NamedHandler{ } // SignSDKRequest signs an AWS request with the V4 signature. This -// request handler is bested used only with the SDK's built in service client's +// request handler should only be used with the SDK's built in service client's // API operation requests. // // This function should not be used on its on its own, but in conjunction with @@ -409,7 +414,18 @@ var SignRequestHandler = request.NamedHandler{ func SignSDKRequest(req *request.Request) { signSDKRequestWithCurrTime(req, time.Now) } -func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) { + +// BuildNamedHandler will build a generic handler for signing. +func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { + return request.NamedHandler{ + Name: name, + Fn: func(req *request.Request) { + signSDKRequestWithCurrTime(req, time.Now, opts...) + }, + } +} + +func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { // If the request does not need to be signed ignore the signing of the // request if the AnonymousCredentials object is used. if req.Config.Credentials == credentials.AnonymousCredentials { @@ -441,6 +457,10 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time v4.DisableRequestBodyOverwrite = true }) + for _, opt := range opts { + opt(v4) + } + signingTime := req.Time if !req.LastSignedAt.IsZero() { signingTime = req.LastSignedAt @@ -482,6 +502,8 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) { ctx.buildTime() // no depends ctx.buildCredentialString() // no depends + ctx.buildBodyDigest() + unsignedHeaders := ctx.Request.Header if ctx.isPresign { if !disableHeaderHoisting { @@ -493,7 +515,6 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) { } } - ctx.buildBodyDigest() ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) ctx.buildCanonicalString() // depends on canon headers / signed headers ctx.buildStringToSign() // depends on canon string @@ -583,14 +604,18 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { headerValues := make([]string, len(headers)) for i, k := range headers { if k == "host" { - headerValues[i] = "host:" + ctx.Request.URL.Host + if ctx.Request.Host != "" { + headerValues[i] = "host:" + ctx.Request.Host + } else { + headerValues[i] = "host:" + ctx.Request.URL.Host + } } else { headerValues[i] = k + ":" + strings.Join(ctx.SignedHeaderVals[k], ",") } } - - ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") + stripExcessSpaces(headerValues) + ctx.canonicalHeaders = strings.Join(headerValues, "\n") } func (ctx *signingCtx) buildCanonicalString() { @@ -634,14 +659,14 @@ func (ctx *signingCtx) buildSignature() { func (ctx *signingCtx) buildBodyDigest() { hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") if hash == "" { - if ctx.isPresign && ctx.ServiceName == "s3" { + if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") { hash = "UNSIGNED-PAYLOAD" } else if ctx.Body == nil { hash = emptyStringSHA256 } else { hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) } - if ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { + if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) } } @@ -692,49 +717,46 @@ func makeSha256Reader(reader io.ReadSeeker) []byte { return hash.Sum(nil) } -const doubleSpaces = " " - -var doubleSpaceBytes = []byte(doubleSpaces) +const doubleSpace = " " -func stripExcessSpaces(headerVals []string) []string { - vals := make([]string, len(headerVals)) - for i, str := range headerVals { - // Trim leading and trailing spaces - trimmed := strings.TrimSpace(str) +// stripExcessSpaces will rewrite the passed in slice's string values to not +// contain muliple side-by-side spaces. +func stripExcessSpaces(vals []string) { + var j, k, l, m, spaces int + for i, str := range vals { + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } - idx := strings.Index(trimmed, doubleSpaces) - var buf []byte - for idx > -1 { - // Multiple adjacent spaces found - if buf == nil { - // first time create the buffer - buf = []byte(trimmed) - } + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] - stripToIdx := -1 - for j := idx + 1; j < len(buf); j++ { - if buf[j] != ' ' { - buf = append(buf[:idx+1], buf[j:]...) - stripToIdx = j - break - } - } + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + vals[i] = str + continue + } - if stripToIdx >= 0 { - idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) - if idx >= 0 { - idx += stripToIdx + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ } + spaces++ } else { - idx = -1 + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ } } - if buf != nil { - vals[i] = string(buf) - } else { - vals[i] = trimmed - } + vals[i] = string(buf[:m]) } - return vals } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go index 9ca685e90409..0e2d864e10a8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -114,5 +114,5 @@ func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { func (b *WriteAtBuffer) Bytes() []byte { b.m.Lock() defer b.m.Unlock() - return b.buf[:len(b.buf):len(b.buf)] + return b.buf } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go new file mode 100644 index 000000000000..6192b2455b63 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go @@ -0,0 +1,12 @@ +// +build go1.8 + +package aws + +import "net/url" + +// URLHostname will extract the Hostname without port from the URL value. +// +// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. +func URLHostname(url *url.URL) string { + return url.Hostname() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go new file mode 100644 index 000000000000..0210d2720e77 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go @@ -0,0 +1,29 @@ +// +build !go1.8 + +package aws + +import ( + "net/url" + "strings" +) + +// URLHostname will extract the Hostname without port from the URL value. +// +// Copy of Go 1.8's net/url#URL.Hostname functionality. +func URLHostname(url *url.URL) string { + return stripPort(url.Host) + +} + +// stripPort is copy of Go 1.8 url#URL.Hostname functionality. +// https://golang.org/src/net/url/url.go +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index efd2812930a5..6c100863e126 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.6.10" +const SDKVersion = "1.12.7" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go new file mode 100644 index 000000000000..ebcbc2b40a3f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -0,0 +1,40 @@ +package shareddefaults + +import ( + "os" + "path/filepath" + "runtime" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go index e166a857d8fb..6efe43d5f394 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -4,7 +4,9 @@ package jsonutil import ( "bytes" "encoding/base64" + "encoding/json" "fmt" + "math" "reflect" "sort" "strconv" @@ -25,6 +27,7 @@ func BuildJSON(v interface{}) ([]byte, error) { } func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + origVal := value value = reflect.Indirect(value) if !value.IsValid() { return nil @@ -61,7 +64,7 @@ func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) err case "map": return buildMap(value, buf, tag) default: - return buildScalar(value, buf, tag) + return buildScalar(origVal, buf, tag) } } @@ -87,6 +90,10 @@ func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) first := true for i := 0; i < t.NumField(); i++ { member := value.Field(i) + + // This allocates the most memory. + // Additionally, we cannot skip nil fields due to + // idempotency auto filling. field := t.Field(i) if field.PkgPath != "" { @@ -182,21 +189,32 @@ func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) err return nil } -func buildScalar(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - switch value.Kind() { +func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + // prevents allocation on the heap. + scratch := [64]byte{} + switch value := reflect.Indirect(v); value.Kind() { case reflect.String: writeString(value.String(), buf) case reflect.Bool: - buf.WriteString(strconv.FormatBool(value.Bool())) + if value.Bool() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } case reflect.Int64: - buf.WriteString(strconv.FormatInt(value.Int(), 10)) + buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) case reflect.Float64: - buf.WriteString(strconv.FormatFloat(value.Float(), 'f', -1, 64)) + f := value.Float() + if math.IsInf(f, 0) || math.IsNaN(f) { + return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} + } + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) default: switch value.Type() { case timeType: - converted := value.Interface().(time.Time) - buf.WriteString(strconv.FormatInt(converted.UTC().Unix(), 10)) + converted := v.Interface().(*time.Time) + + buf.Write(strconv.AppendInt(scratch[:0], converted.UTC().Unix(), 10)) case byteSliceType: if !value.IsNil() { converted := value.Interface().([]byte) @@ -222,27 +240,31 @@ func buildScalar(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) return nil } +var hex = "0123456789abcdef" + func writeString(s string, buf *bytes.Buffer) { buf.WriteByte('"') - for _, r := range s { - if r == '"' { + for i := 0; i < len(s); i++ { + if s[i] == '"' { buf.WriteString(`\"`) - } else if r == '\\' { + } else if s[i] == '\\' { buf.WriteString(`\\`) - } else if r == '\b' { + } else if s[i] == '\b' { buf.WriteString(`\b`) - } else if r == '\f' { + } else if s[i] == '\f' { buf.WriteString(`\f`) - } else if r == '\r' { + } else if s[i] == '\r' { buf.WriteString(`\r`) - } else if r == '\t' { + } else if s[i] == '\t' { buf.WriteString(`\t`) - } else if r == '\n' { + } else if s[i] == '\n' { buf.WriteString(`\n`) - } else if r < 32 { - fmt.Fprintf(buf, "\\u%0.4x", r) + } else if s[i] < 32 { + buf.WriteString("\\u00") + buf.WriteByte(hex[s[i]>>4]) + buf.WriteByte(hex[s[i]&0xF]) } else { - buf.WriteRune(r) + buf.WriteByte(s[i]) } } buf.WriteByte('"') diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index f434ab7cb936..5ce9cba32915 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -80,7 +80,6 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri continue } - if protocol.CanSetIdempotencyToken(value.Field(i), field) { token := protocol.GetIdempotencyToken() elemValue = reflect.ValueOf(token) @@ -122,9 +121,17 @@ func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string return nil } + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + // check for unflattened list member if !q.isEC2 && tag.Get("flattened") == "" { - prefix += ".member" + if listName := tag.Get("locationNameList"); listName == "" { + prefix += ".member" + } else { + prefix += "." + listName + } } for i := 0; i < value.Len(); i++ { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index 20a41d462c23..71618356493d 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -4,6 +4,7 @@ package rest import ( "bytes" "encoding/base64" + "encoding/json" "fmt" "io" "net/http" @@ -82,8 +83,12 @@ func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bo if name == "" { name = field.Name } - if m.Kind() == reflect.Ptr { + if kind := m.Kind(); kind == reflect.Ptr { m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } } if !m.IsValid() { continue @@ -95,16 +100,16 @@ func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bo var err error switch field.Tag.Get("location") { case "headers": // header maps - err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName")) + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) case "header": - err = buildHeader(&r.HTTPRequest.Header, m, name) + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) case "uri": - err = buildURI(r.HTTPRequest.URL, m, name) + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) case "querystring": - err = buildQueryString(query, m, name) + err = buildQueryString(query, m, name, field.Tag) default: if buildGETQuery { - err = buildQueryString(query, m, name) + err = buildQueryString(query, m, name, field.Tag) } } r.Error = err @@ -145,8 +150,8 @@ func buildBody(r *request.Request, v reflect.Value) { } } -func buildHeader(header *http.Header, v reflect.Value, name string) error { - str, err := convertType(v) +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) if err == errValueNotSet { return nil } else if err != nil { @@ -158,9 +163,10 @@ func buildHeader(header *http.Header, v reflect.Value, name string) error { return nil } -func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") for _, key := range v.MapKeys() { - str, err := convertType(v.MapIndex(key)) + str, err := convertType(v.MapIndex(key), tag) if err == errValueNotSet { continue } else if err != nil { @@ -173,8 +179,8 @@ func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { return nil } -func buildURI(u *url.URL, v reflect.Value, name string) error { - value, err := convertType(v) +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) if err == errValueNotSet { return nil } else if err != nil { @@ -190,7 +196,7 @@ func buildURI(u *url.URL, v reflect.Value, name string) error { return nil } -func buildQueryString(query url.Values, v reflect.Value, name string) error { +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { switch value := v.Interface().(type) { case []*string: for _, item := range value { @@ -207,7 +213,7 @@ func buildQueryString(query url.Values, v reflect.Value, name string) error { } } default: - str, err := convertType(v) + str, err := convertType(v, tag) if err == errValueNotSet { return nil } else if err != nil { @@ -246,7 +252,7 @@ func EscapePath(path string, encodeSep bool) string { return buf.String() } -func convertType(v reflect.Value) (string, error) { +func convertType(v reflect.Value, tag reflect.StructTag) (string, error) { v = reflect.Indirect(v) if !v.IsValid() { return "", errValueNotSet @@ -266,6 +272,16 @@ func convertType(v reflect.Value) (string, error) { str = strconv.FormatFloat(value, 'f', -1, 64) case time.Time: str = value.UTC().Format(RFC822) + case aws.JSONValue: + b, err := json.Marshal(value) + if err != nil { + return "", err + } + if tag.Get("location") == "header" { + str = base64.StdEncoding.EncodeToString(b) + } else { + str = string(b) + } default: err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) return "", err diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 9c00921c09b1..7a779ee22602 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -3,6 +3,7 @@ package rest import ( "bytes" "encoding/base64" + "encoding/json" "fmt" "io" "io/ioutil" @@ -12,6 +13,7 @@ import ( "strings" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" ) @@ -111,7 +113,7 @@ func unmarshalLocationElements(r *request.Request, v reflect.Value) { case "statusCode": unmarshalStatusCode(m, r.HTTPResponse.StatusCode) case "header": - err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) if err != nil { r.Error = awserr.New("SerializationError", "failed to decode REST response", err) break @@ -158,8 +160,13 @@ func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) err return nil } -func unmarshalHeader(v reflect.Value, header string) error { - if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + isJSONValue := tag.Get("type") == "jsonvalue" + if isJSONValue { + if len(header) == 0 { + return nil + } + } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { return nil } @@ -196,6 +203,22 @@ func unmarshalHeader(v reflect.Value, header string) error { return err } v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + b := []byte(header) + var err error + if tag.Get("location") == "header" { + b, err = base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) default: err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) return err diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index c74c191967a2..7091b456d180 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -131,7 +131,6 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl continue } - mTag := field.Tag if mTag.Get("location") != "" { // skip non-body members continue diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index 64b6ddd3e18d..87584628a2b2 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -15,7 +15,10 @@ import ( // needs to match the shape of the XML expected to be decoded. // If the shape doesn't match unmarshaling will fail. func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { - n, _ := XMLToStruct(d, nil) + n, err := XMLToStruct(d, nil) + if err != nil { + return err + } if n.Children != nil { for _, root := range n.Children { for _, c := range root { @@ -23,7 +26,7 @@ func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { c = wrappedChild[0] // pull out wrapped element } - err := parse(reflect.ValueOf(v), c, "") + err = parse(reflect.ValueOf(v), c, "") if err != nil { if err == io.EOF { return nil diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go index 3112512a2103..3e970b629dae 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -40,11 +40,16 @@ func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { out := &XMLNode{} for { tok, err := d.Token() - if tok == nil || err == io.EOF { - break - } if err != nil { - return out, err + if err == io.EOF { + break + } else { + return out, err + } + } + + if tok == nil { + break } switch typed := tok.(type) { diff --git a/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go b/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go deleted file mode 100644 index b51e9449c105..000000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go +++ /dev/null @@ -1,134 +0,0 @@ -package waiter - -import ( - "fmt" - "reflect" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A Config provides a collection of configuration values to setup a generated -// waiter code with. -type Config struct { - Name string - Delay int - MaxAttempts int - Operation string - Acceptors []WaitAcceptor -} - -// A WaitAcceptor provides the information needed to wait for an API operation -// to complete. -type WaitAcceptor struct { - Expected interface{} - Matcher string - State string - Argument string -} - -// A Waiter provides waiting for an operation to complete. -type Waiter struct { - Config - Client interface{} - Input interface{} -} - -// Wait waits for an operation to complete, expire max attempts, or fail. Error -// is returned if the operation fails. -func (w *Waiter) Wait() error { - client := reflect.ValueOf(w.Client) - in := reflect.ValueOf(w.Input) - method := client.MethodByName(w.Config.Operation + "Request") - - for i := 0; i < w.MaxAttempts; i++ { - res := method.Call([]reflect.Value{in}) - req := res[0].Interface().(*request.Request) - req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter")) - - err := req.Send() - for _, a := range w.Acceptors { - result := false - var vals []interface{} - switch a.Matcher { - case "pathAll", "path": - // Require all matches to be equal for result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - if len(vals) == 0 { - break - } - result = true - for _, val := range vals { - if !awsutil.DeepEqual(val, a.Expected) { - result = false - break - } - } - case "pathAny": - // Only a single match needs to equal for the result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - for _, val := range vals { - if awsutil.DeepEqual(val, a.Expected) { - result = true - break - } - } - case "status": - s := a.Expected.(int) - result = s == req.HTTPResponse.StatusCode - case "error": - if aerr, ok := err.(awserr.Error); ok { - result = aerr.Code() == a.Expected.(string) - } - case "pathList": - // ignored matcher - default: - logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s", - w.Config.Operation, a.Matcher) - } - - if !result { - // If there was no matching result found there is nothing more to do - // for this response, retry the request. - continue - } - - switch a.State { - case "success": - // waiter completed - return nil - case "failure": - // Waiter failure state triggered - return awserr.New("ResourceNotReady", - fmt.Sprintf("failed waiting for successful resource state"), err) - case "retry": - // clear the error and retry the operation - err = nil - default: - logf(client, "WARNING: Waiter for %s encountered unexpected state: %s", - w.Config.Operation, a.State) - } - } - if err != nil { - return err - } - - time.Sleep(time.Second * time.Duration(w.Delay)) - } - - return awserr.New("ResourceNotReady", - fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil) -} - -func logf(client reflect.Value, msg string, args ...interface{}) { - cfgVal := client.FieldByName("Config") - if !cfgVal.IsValid() { - return - } - if cfg, ok := cfgVal.Interface().(*aws.Config); ok && cfg.Logger != nil { - cfg.Logger.Log(fmt.Sprintf(msg, args...)) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go index cca681bfbe55..761d8ecee8ac 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go @@ -1,12 +1,12 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. -// Package autoscaling provides a client for Auto Scaling. package autoscaling import ( "fmt" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/protocol" @@ -17,19 +17,18 @@ const opAttachInstances = "AttachInstances" // AttachInstancesRequest generates a "aws/request.Request" representing the // client's request for the AttachInstances operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See AttachInstances for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AttachInstances method directly -// instead. +// See AttachInstances for more information on using the AttachInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AttachInstancesRequest method. // req, resp := client.AttachInstancesRequest(params) @@ -67,7 +66,7 @@ func (c *AutoScaling) AttachInstancesRequest(input *AttachInstancesInput) (req * // being attached plus the desired capacity of the group exceeds the maximum // size of the group, the operation fails. // -// If there is a Classic load balancer attached to your Auto Scaling group, +// If there is a Classic Load Balancer attached to your Auto Scaling group, // the instances are also registered with the load balancer. If there are target // groups attached to your Auto Scaling group, the instances are also registered // with the target groups. @@ -84,34 +83,48 @@ func (c *AutoScaling) AttachInstancesRequest(input *AttachInstancesInput) (req * // API operation AttachInstances for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/AttachInstances func (c *AutoScaling) AttachInstances(input *AttachInstancesInput) (*AttachInstancesOutput, error) { req, out := c.AttachInstancesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AttachInstancesWithContext is the same as AttachInstances with the addition of +// the ability to pass a context and additional request options. +// +// See AttachInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) AttachInstancesWithContext(ctx aws.Context, input *AttachInstancesInput, opts ...request.Option) (*AttachInstancesOutput, error) { + req, out := c.AttachInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAttachLoadBalancerTargetGroups = "AttachLoadBalancerTargetGroups" // AttachLoadBalancerTargetGroupsRequest generates a "aws/request.Request" representing the // client's request for the AttachLoadBalancerTargetGroups operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See AttachLoadBalancerTargetGroups for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AttachLoadBalancerTargetGroups method directly -// instead. +// See AttachLoadBalancerTargetGroups for more information on using the AttachLoadBalancerTargetGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AttachLoadBalancerTargetGroupsRequest method. // req, resp := client.AttachLoadBalancerTargetGroupsRequest(params) @@ -157,34 +170,48 @@ func (c *AutoScaling) AttachLoadBalancerTargetGroupsRequest(input *AttachLoadBal // API operation AttachLoadBalancerTargetGroups for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/AttachLoadBalancerTargetGroups func (c *AutoScaling) AttachLoadBalancerTargetGroups(input *AttachLoadBalancerTargetGroupsInput) (*AttachLoadBalancerTargetGroupsOutput, error) { req, out := c.AttachLoadBalancerTargetGroupsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AttachLoadBalancerTargetGroupsWithContext is the same as AttachLoadBalancerTargetGroups with the addition of +// the ability to pass a context and additional request options. +// +// See AttachLoadBalancerTargetGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) AttachLoadBalancerTargetGroupsWithContext(ctx aws.Context, input *AttachLoadBalancerTargetGroupsInput, opts ...request.Option) (*AttachLoadBalancerTargetGroupsOutput, error) { + req, out := c.AttachLoadBalancerTargetGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAttachLoadBalancers = "AttachLoadBalancers" // AttachLoadBalancersRequest generates a "aws/request.Request" representing the // client's request for the AttachLoadBalancers operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See AttachLoadBalancers for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AttachLoadBalancers method directly -// instead. +// See AttachLoadBalancers for more information on using the AttachLoadBalancers +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AttachLoadBalancersRequest method. // req, resp := client.AttachLoadBalancersRequest(params) @@ -213,10 +240,10 @@ func (c *AutoScaling) AttachLoadBalancersRequest(input *AttachLoadBalancersInput // AttachLoadBalancers API operation for Auto Scaling. // -// Attaches one or more Classic load balancers to the specified Auto Scaling +// Attaches one or more Classic Load Balancers to the specified Auto Scaling // group. // -// To attach an Application load balancer instead, see AttachLoadBalancerTargetGroups. +// To attach an Application Load Balancer instead, see AttachLoadBalancerTargetGroups. // // To describe the load balancers for an Auto Scaling group, use DescribeLoadBalancers. // To detach the load balancer from the Auto Scaling group, use DetachLoadBalancers. @@ -233,34 +260,48 @@ func (c *AutoScaling) AttachLoadBalancersRequest(input *AttachLoadBalancersInput // API operation AttachLoadBalancers for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/AttachLoadBalancers func (c *AutoScaling) AttachLoadBalancers(input *AttachLoadBalancersInput) (*AttachLoadBalancersOutput, error) { req, out := c.AttachLoadBalancersRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AttachLoadBalancersWithContext is the same as AttachLoadBalancers with the addition of +// the ability to pass a context and additional request options. +// +// See AttachLoadBalancers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) AttachLoadBalancersWithContext(ctx aws.Context, input *AttachLoadBalancersInput, opts ...request.Option) (*AttachLoadBalancersOutput, error) { + req, out := c.AttachLoadBalancersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCompleteLifecycleAction = "CompleteLifecycleAction" // CompleteLifecycleActionRequest generates a "aws/request.Request" representing the // client's request for the CompleteLifecycleAction operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CompleteLifecycleAction for usage and error information. +// See CompleteLifecycleAction for more information on using the CompleteLifecycleAction +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CompleteLifecycleAction method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CompleteLifecycleActionRequest method. // req, resp := client.CompleteLifecycleActionRequest(params) @@ -321,34 +362,48 @@ func (c *AutoScaling) CompleteLifecycleActionRequest(input *CompleteLifecycleAct // API operation CompleteLifecycleAction for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/CompleteLifecycleAction func (c *AutoScaling) CompleteLifecycleAction(input *CompleteLifecycleActionInput) (*CompleteLifecycleActionOutput, error) { req, out := c.CompleteLifecycleActionRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CompleteLifecycleActionWithContext is the same as CompleteLifecycleAction with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteLifecycleAction for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) CompleteLifecycleActionWithContext(ctx aws.Context, input *CompleteLifecycleActionInput, opts ...request.Option) (*CompleteLifecycleActionOutput, error) { + req, out := c.CompleteLifecycleActionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateAutoScalingGroup = "CreateAutoScalingGroup" // CreateAutoScalingGroupRequest generates a "aws/request.Request" representing the // client's request for the CreateAutoScalingGroup operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CreateAutoScalingGroup for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateAutoScalingGroup method directly -// instead. +// See CreateAutoScalingGroup for more information on using the CreateAutoScalingGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateAutoScalingGroupRequest method. // req, resp := client.CreateAutoScalingGroupRequest(params) @@ -396,43 +451,57 @@ func (c *AutoScaling) CreateAutoScalingGroupRequest(input *CreateAutoScalingGrou // API operation CreateAutoScalingGroup for usage and error information. // // Returned Error Codes: -// * AlreadyExists +// * ErrCodeAlreadyExistsFault "AlreadyExists" // You already have an Auto Scaling group or launch configuration with this // name. // -// * LimitExceeded +// * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Auto Scaling resources (for example, // groups, launch configurations, or lifecycle hooks). For more information, // see DescribeAccountLimits. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/CreateAutoScalingGroup func (c *AutoScaling) CreateAutoScalingGroup(input *CreateAutoScalingGroupInput) (*CreateAutoScalingGroupOutput, error) { req, out := c.CreateAutoScalingGroupRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateAutoScalingGroupWithContext is the same as CreateAutoScalingGroup with the addition of +// the ability to pass a context and additional request options. +// +// See CreateAutoScalingGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) CreateAutoScalingGroupWithContext(ctx aws.Context, input *CreateAutoScalingGroupInput, opts ...request.Option) (*CreateAutoScalingGroupOutput, error) { + req, out := c.CreateAutoScalingGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateLaunchConfiguration = "CreateLaunchConfiguration" // CreateLaunchConfigurationRequest generates a "aws/request.Request" representing the // client's request for the CreateLaunchConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateLaunchConfiguration for usage and error information. +// See CreateLaunchConfiguration for more information on using the CreateLaunchConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateLaunchConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateLaunchConfigurationRequest method. // req, resp := client.CreateLaunchConfigurationRequest(params) @@ -480,43 +549,57 @@ func (c *AutoScaling) CreateLaunchConfigurationRequest(input *CreateLaunchConfig // API operation CreateLaunchConfiguration for usage and error information. // // Returned Error Codes: -// * AlreadyExists +// * ErrCodeAlreadyExistsFault "AlreadyExists" // You already have an Auto Scaling group or launch configuration with this // name. // -// * LimitExceeded +// * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Auto Scaling resources (for example, // groups, launch configurations, or lifecycle hooks). For more information, // see DescribeAccountLimits. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/CreateLaunchConfiguration func (c *AutoScaling) CreateLaunchConfiguration(input *CreateLaunchConfigurationInput) (*CreateLaunchConfigurationOutput, error) { req, out := c.CreateLaunchConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateLaunchConfigurationWithContext is the same as CreateLaunchConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLaunchConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) CreateLaunchConfigurationWithContext(ctx aws.Context, input *CreateLaunchConfigurationInput, opts ...request.Option) (*CreateLaunchConfigurationOutput, error) { + req, out := c.CreateLaunchConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateOrUpdateTags = "CreateOrUpdateTags" // CreateOrUpdateTagsRequest generates a "aws/request.Request" representing the // client's request for the CreateOrUpdateTags operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CreateOrUpdateTags for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateOrUpdateTags method directly -// instead. +// See CreateOrUpdateTags for more information on using the CreateOrUpdateTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateOrUpdateTagsRequest method. // req, resp := client.CreateOrUpdateTagsRequest(params) @@ -563,43 +646,60 @@ func (c *AutoScaling) CreateOrUpdateTagsRequest(input *CreateOrUpdateTagsInput) // API operation CreateOrUpdateTags for usage and error information. // // Returned Error Codes: -// * LimitExceeded +// * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Auto Scaling resources (for example, // groups, launch configurations, or lifecycle hooks). For more information, // see DescribeAccountLimits. // -// * AlreadyExists +// * ErrCodeAlreadyExistsFault "AlreadyExists" // You already have an Auto Scaling group or launch configuration with this // name. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // +// * ErrCodeResourceInUseFault "ResourceInUse" +// The operation can't be performed because the resource is in use. +// // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/CreateOrUpdateTags func (c *AutoScaling) CreateOrUpdateTags(input *CreateOrUpdateTagsInput) (*CreateOrUpdateTagsOutput, error) { req, out := c.CreateOrUpdateTagsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateOrUpdateTagsWithContext is the same as CreateOrUpdateTags with the addition of +// the ability to pass a context and additional request options. +// +// See CreateOrUpdateTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) CreateOrUpdateTagsWithContext(ctx aws.Context, input *CreateOrUpdateTagsInput, opts ...request.Option) (*CreateOrUpdateTagsOutput, error) { + req, out := c.CreateOrUpdateTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteAutoScalingGroup = "DeleteAutoScalingGroup" // DeleteAutoScalingGroupRequest generates a "aws/request.Request" representing the // client's request for the DeleteAutoScalingGroup operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteAutoScalingGroup for usage and error information. +// See DeleteAutoScalingGroup for more information on using the DeleteAutoScalingGroup +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteAutoScalingGroup method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteAutoScalingGroupRequest method. // req, resp := client.DeleteAutoScalingGroupRequest(params) @@ -654,41 +754,55 @@ func (c *AutoScaling) DeleteAutoScalingGroupRequest(input *DeleteAutoScalingGrou // API operation DeleteAutoScalingGroup for usage and error information. // // Returned Error Codes: -// * ScalingActivityInProgress +// * ErrCodeScalingActivityInProgressFault "ScalingActivityInProgress" // The operation can't be performed because there are scaling activities in // progress. // -// * ResourceInUse +// * ErrCodeResourceInUseFault "ResourceInUse" // The operation can't be performed because the resource is in use. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DeleteAutoScalingGroup func (c *AutoScaling) DeleteAutoScalingGroup(input *DeleteAutoScalingGroupInput) (*DeleteAutoScalingGroupOutput, error) { req, out := c.DeleteAutoScalingGroupRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteAutoScalingGroupWithContext is the same as DeleteAutoScalingGroup with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteAutoScalingGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DeleteAutoScalingGroupWithContext(ctx aws.Context, input *DeleteAutoScalingGroupInput, opts ...request.Option) (*DeleteAutoScalingGroupOutput, error) { + req, out := c.DeleteAutoScalingGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteLaunchConfiguration = "DeleteLaunchConfiguration" // DeleteLaunchConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteLaunchConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteLaunchConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteLaunchConfiguration method directly -// instead. +// See DeleteLaunchConfiguration for more information on using the DeleteLaunchConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteLaunchConfigurationRequest method. // req, resp := client.DeleteLaunchConfigurationRequest(params) @@ -733,37 +847,51 @@ func (c *AutoScaling) DeleteLaunchConfigurationRequest(input *DeleteLaunchConfig // API operation DeleteLaunchConfiguration for usage and error information. // // Returned Error Codes: -// * ResourceInUse +// * ErrCodeResourceInUseFault "ResourceInUse" // The operation can't be performed because the resource is in use. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DeleteLaunchConfiguration func (c *AutoScaling) DeleteLaunchConfiguration(input *DeleteLaunchConfigurationInput) (*DeleteLaunchConfigurationOutput, error) { req, out := c.DeleteLaunchConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteLaunchConfigurationWithContext is the same as DeleteLaunchConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLaunchConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DeleteLaunchConfigurationWithContext(ctx aws.Context, input *DeleteLaunchConfigurationInput, opts ...request.Option) (*DeleteLaunchConfigurationOutput, error) { + req, out := c.DeleteLaunchConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteLifecycleHook = "DeleteLifecycleHook" // DeleteLifecycleHookRequest generates a "aws/request.Request" representing the // client's request for the DeleteLifecycleHook operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteLifecycleHook for usage and error information. +// See DeleteLifecycleHook for more information on using the DeleteLifecycleHook +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteLifecycleHook method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteLifecycleHookRequest method. // req, resp := client.DeleteLifecycleHookRequest(params) @@ -805,34 +933,48 @@ func (c *AutoScaling) DeleteLifecycleHookRequest(input *DeleteLifecycleHookInput // API operation DeleteLifecycleHook for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DeleteLifecycleHook func (c *AutoScaling) DeleteLifecycleHook(input *DeleteLifecycleHookInput) (*DeleteLifecycleHookOutput, error) { req, out := c.DeleteLifecycleHookRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteLifecycleHookWithContext is the same as DeleteLifecycleHook with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLifecycleHook for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DeleteLifecycleHookWithContext(ctx aws.Context, input *DeleteLifecycleHookInput, opts ...request.Option) (*DeleteLifecycleHookOutput, error) { + req, out := c.DeleteLifecycleHookRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteNotificationConfiguration = "DeleteNotificationConfiguration" // DeleteNotificationConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteNotificationConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteNotificationConfiguration for usage and error information. +// See DeleteNotificationConfiguration for more information on using the DeleteNotificationConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteNotificationConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteNotificationConfigurationRequest method. // req, resp := client.DeleteNotificationConfigurationRequest(params) @@ -873,34 +1015,48 @@ func (c *AutoScaling) DeleteNotificationConfigurationRequest(input *DeleteNotifi // API operation DeleteNotificationConfiguration for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DeleteNotificationConfiguration func (c *AutoScaling) DeleteNotificationConfiguration(input *DeleteNotificationConfigurationInput) (*DeleteNotificationConfigurationOutput, error) { req, out := c.DeleteNotificationConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteNotificationConfigurationWithContext is the same as DeleteNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DeleteNotificationConfigurationWithContext(ctx aws.Context, input *DeleteNotificationConfigurationInput, opts ...request.Option) (*DeleteNotificationConfigurationOutput, error) { + req, out := c.DeleteNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeletePolicy = "DeletePolicy" // DeletePolicyRequest generates a "aws/request.Request" representing the // client's request for the DeletePolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeletePolicy for usage and error information. +// See DeletePolicy for more information on using the DeletePolicy +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeletePolicy method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeletePolicyRequest method. // req, resp := client.DeletePolicyRequest(params) @@ -944,34 +1100,48 @@ func (c *AutoScaling) DeletePolicyRequest(input *DeletePolicyInput) (req *reques // API operation DeletePolicy for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DeletePolicy func (c *AutoScaling) DeletePolicy(input *DeletePolicyInput) (*DeletePolicyOutput, error) { req, out := c.DeletePolicyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeletePolicyWithContext is the same as DeletePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DeletePolicyWithContext(ctx aws.Context, input *DeletePolicyInput, opts ...request.Option) (*DeletePolicyOutput, error) { + req, out := c.DeletePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteScheduledAction = "DeleteScheduledAction" // DeleteScheduledActionRequest generates a "aws/request.Request" representing the // client's request for the DeleteScheduledAction operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteScheduledAction for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteScheduledAction method directly -// instead. +// See DeleteScheduledAction for more information on using the DeleteScheduledAction +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteScheduledActionRequest method. // req, resp := client.DeleteScheduledActionRequest(params) @@ -1012,34 +1182,48 @@ func (c *AutoScaling) DeleteScheduledActionRequest(input *DeleteScheduledActionI // API operation DeleteScheduledAction for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DeleteScheduledAction func (c *AutoScaling) DeleteScheduledAction(input *DeleteScheduledActionInput) (*DeleteScheduledActionOutput, error) { req, out := c.DeleteScheduledActionRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteScheduledActionWithContext is the same as DeleteScheduledAction with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteScheduledAction for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DeleteScheduledActionWithContext(ctx aws.Context, input *DeleteScheduledActionInput, opts ...request.Option) (*DeleteScheduledActionOutput, error) { + req, out := c.DeleteScheduledActionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteTags = "DeleteTags" // DeleteTagsRequest generates a "aws/request.Request" representing the // client's request for the DeleteTags operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteTags for usage and error information. +// See DeleteTags for more information on using the DeleteTags +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteTags method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteTagsRequest method. // req, resp := client.DeleteTagsRequest(params) @@ -1080,34 +1264,51 @@ func (c *AutoScaling) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Re // API operation DeleteTags for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // +// * ErrCodeResourceInUseFault "ResourceInUse" +// The operation can't be performed because the resource is in use. +// // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DeleteTags func (c *AutoScaling) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { req, out := c.DeleteTagsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteTagsWithContext is the same as DeleteTags with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DeleteTagsWithContext(ctx aws.Context, input *DeleteTagsInput, opts ...request.Option) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeAccountLimits = "DescribeAccountLimits" // DescribeAccountLimitsRequest generates a "aws/request.Request" representing the // client's request for the DescribeAccountLimits operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeAccountLimits for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeAccountLimits method directly -// instead. +// See DescribeAccountLimits for more information on using the DescribeAccountLimits +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeAccountLimitsRequest method. // req, resp := client.DescribeAccountLimitsRequest(params) @@ -1150,34 +1351,48 @@ func (c *AutoScaling) DescribeAccountLimitsRequest(input *DescribeAccountLimitsI // API operation DescribeAccountLimits for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeAccountLimits func (c *AutoScaling) DescribeAccountLimits(input *DescribeAccountLimitsInput) (*DescribeAccountLimitsOutput, error) { req, out := c.DescribeAccountLimitsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeAccountLimitsWithContext is the same as DescribeAccountLimits with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAccountLimits for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeAccountLimitsWithContext(ctx aws.Context, input *DescribeAccountLimitsInput, opts ...request.Option) (*DescribeAccountLimitsOutput, error) { + req, out := c.DescribeAccountLimitsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeAdjustmentTypes = "DescribeAdjustmentTypes" // DescribeAdjustmentTypesRequest generates a "aws/request.Request" representing the // client's request for the DescribeAdjustmentTypes operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeAdjustmentTypes for usage and error information. +// See DescribeAdjustmentTypes for more information on using the DescribeAdjustmentTypes +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeAdjustmentTypes method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeAdjustmentTypesRequest method. // req, resp := client.DescribeAdjustmentTypesRequest(params) @@ -1216,34 +1431,48 @@ func (c *AutoScaling) DescribeAdjustmentTypesRequest(input *DescribeAdjustmentTy // API operation DescribeAdjustmentTypes for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeAdjustmentTypes func (c *AutoScaling) DescribeAdjustmentTypes(input *DescribeAdjustmentTypesInput) (*DescribeAdjustmentTypesOutput, error) { req, out := c.DescribeAdjustmentTypesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeAdjustmentTypesWithContext is the same as DescribeAdjustmentTypes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAdjustmentTypes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeAdjustmentTypesWithContext(ctx aws.Context, input *DescribeAdjustmentTypesInput, opts ...request.Option) (*DescribeAdjustmentTypesOutput, error) { + req, out := c.DescribeAdjustmentTypesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeAutoScalingGroups = "DescribeAutoScalingGroups" // DescribeAutoScalingGroupsRequest generates a "aws/request.Request" representing the // client's request for the DescribeAutoScalingGroups operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeAutoScalingGroups for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeAutoScalingGroups method directly -// instead. +// See DescribeAutoScalingGroups for more information on using the DescribeAutoScalingGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeAutoScalingGroupsRequest method. // req, resp := client.DescribeAutoScalingGroupsRequest(params) @@ -1288,18 +1517,33 @@ func (c *AutoScaling) DescribeAutoScalingGroupsRequest(input *DescribeAutoScalin // API operation DescribeAutoScalingGroups for usage and error information. // // Returned Error Codes: -// * InvalidNextToken +// * ErrCodeInvalidNextToken "InvalidNextToken" // The NextToken value is not valid. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeAutoScalingGroups func (c *AutoScaling) DescribeAutoScalingGroups(input *DescribeAutoScalingGroupsInput) (*DescribeAutoScalingGroupsOutput, error) { req, out := c.DescribeAutoScalingGroupsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeAutoScalingGroupsWithContext is the same as DescribeAutoScalingGroups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAutoScalingGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeAutoScalingGroupsWithContext(ctx aws.Context, input *DescribeAutoScalingGroupsInput, opts ...request.Option) (*DescribeAutoScalingGroupsOutput, error) { + req, out := c.DescribeAutoScalingGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeAutoScalingGroupsPages iterates over the pages of a DescribeAutoScalingGroups operation, @@ -1319,31 +1563,55 @@ func (c *AutoScaling) DescribeAutoScalingGroups(input *DescribeAutoScalingGroups // return pageNum <= 3 // }) // -func (c *AutoScaling) DescribeAutoScalingGroupsPages(input *DescribeAutoScalingGroupsInput, fn func(p *DescribeAutoScalingGroupsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeAutoScalingGroupsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeAutoScalingGroupsOutput), lastPage) - }) +func (c *AutoScaling) DescribeAutoScalingGroupsPages(input *DescribeAutoScalingGroupsInput, fn func(*DescribeAutoScalingGroupsOutput, bool) bool) error { + return c.DescribeAutoScalingGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeAutoScalingGroupsPagesWithContext same as DescribeAutoScalingGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeAutoScalingGroupsPagesWithContext(ctx aws.Context, input *DescribeAutoScalingGroupsInput, fn func(*DescribeAutoScalingGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeAutoScalingGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAutoScalingGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeAutoScalingGroupsOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribeAutoScalingInstances = "DescribeAutoScalingInstances" // DescribeAutoScalingInstancesRequest generates a "aws/request.Request" representing the // client's request for the DescribeAutoScalingInstances operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeAutoScalingInstances for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeAutoScalingInstances method directly -// instead. +// See DescribeAutoScalingInstances for more information on using the DescribeAutoScalingInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeAutoScalingInstancesRequest method. // req, resp := client.DescribeAutoScalingInstancesRequest(params) @@ -1388,18 +1656,33 @@ func (c *AutoScaling) DescribeAutoScalingInstancesRequest(input *DescribeAutoSca // API operation DescribeAutoScalingInstances for usage and error information. // // Returned Error Codes: -// * InvalidNextToken +// * ErrCodeInvalidNextToken "InvalidNextToken" // The NextToken value is not valid. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeAutoScalingInstances func (c *AutoScaling) DescribeAutoScalingInstances(input *DescribeAutoScalingInstancesInput) (*DescribeAutoScalingInstancesOutput, error) { req, out := c.DescribeAutoScalingInstancesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeAutoScalingInstancesWithContext is the same as DescribeAutoScalingInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAutoScalingInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeAutoScalingInstancesWithContext(ctx aws.Context, input *DescribeAutoScalingInstancesInput, opts ...request.Option) (*DescribeAutoScalingInstancesOutput, error) { + req, out := c.DescribeAutoScalingInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeAutoScalingInstancesPages iterates over the pages of a DescribeAutoScalingInstances operation, @@ -1419,31 +1702,55 @@ func (c *AutoScaling) DescribeAutoScalingInstances(input *DescribeAutoScalingIns // return pageNum <= 3 // }) // -func (c *AutoScaling) DescribeAutoScalingInstancesPages(input *DescribeAutoScalingInstancesInput, fn func(p *DescribeAutoScalingInstancesOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeAutoScalingInstancesRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeAutoScalingInstancesOutput), lastPage) - }) +func (c *AutoScaling) DescribeAutoScalingInstancesPages(input *DescribeAutoScalingInstancesInput, fn func(*DescribeAutoScalingInstancesOutput, bool) bool) error { + return c.DescribeAutoScalingInstancesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeAutoScalingInstancesPagesWithContext same as DescribeAutoScalingInstancesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeAutoScalingInstancesPagesWithContext(ctx aws.Context, input *DescribeAutoScalingInstancesInput, fn func(*DescribeAutoScalingInstancesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeAutoScalingInstancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAutoScalingInstancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeAutoScalingInstancesOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribeAutoScalingNotificationTypes = "DescribeAutoScalingNotificationTypes" // DescribeAutoScalingNotificationTypesRequest generates a "aws/request.Request" representing the // client's request for the DescribeAutoScalingNotificationTypes operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeAutoScalingNotificationTypes for usage and error information. +// See DescribeAutoScalingNotificationTypes for more information on using the DescribeAutoScalingNotificationTypes +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeAutoScalingNotificationTypes method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeAutoScalingNotificationTypesRequest method. // req, resp := client.DescribeAutoScalingNotificationTypesRequest(params) @@ -1482,34 +1789,48 @@ func (c *AutoScaling) DescribeAutoScalingNotificationTypesRequest(input *Describ // API operation DescribeAutoScalingNotificationTypes for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeAutoScalingNotificationTypes func (c *AutoScaling) DescribeAutoScalingNotificationTypes(input *DescribeAutoScalingNotificationTypesInput) (*DescribeAutoScalingNotificationTypesOutput, error) { req, out := c.DescribeAutoScalingNotificationTypesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeAutoScalingNotificationTypesWithContext is the same as DescribeAutoScalingNotificationTypes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAutoScalingNotificationTypes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeAutoScalingNotificationTypesWithContext(ctx aws.Context, input *DescribeAutoScalingNotificationTypesInput, opts ...request.Option) (*DescribeAutoScalingNotificationTypesOutput, error) { + req, out := c.DescribeAutoScalingNotificationTypesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeLaunchConfigurations = "DescribeLaunchConfigurations" // DescribeLaunchConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the DescribeLaunchConfigurations operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeLaunchConfigurations for usage and error information. +// See DescribeLaunchConfigurations for more information on using the DescribeLaunchConfigurations +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeLaunchConfigurations method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeLaunchConfigurationsRequest method. // req, resp := client.DescribeLaunchConfigurationsRequest(params) @@ -1554,18 +1875,33 @@ func (c *AutoScaling) DescribeLaunchConfigurationsRequest(input *DescribeLaunchC // API operation DescribeLaunchConfigurations for usage and error information. // // Returned Error Codes: -// * InvalidNextToken +// * ErrCodeInvalidNextToken "InvalidNextToken" // The NextToken value is not valid. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeLaunchConfigurations func (c *AutoScaling) DescribeLaunchConfigurations(input *DescribeLaunchConfigurationsInput) (*DescribeLaunchConfigurationsOutput, error) { req, out := c.DescribeLaunchConfigurationsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeLaunchConfigurationsWithContext is the same as DescribeLaunchConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLaunchConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeLaunchConfigurationsWithContext(ctx aws.Context, input *DescribeLaunchConfigurationsInput, opts ...request.Option) (*DescribeLaunchConfigurationsOutput, error) { + req, out := c.DescribeLaunchConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeLaunchConfigurationsPages iterates over the pages of a DescribeLaunchConfigurations operation, @@ -1585,31 +1921,55 @@ func (c *AutoScaling) DescribeLaunchConfigurations(input *DescribeLaunchConfigur // return pageNum <= 3 // }) // -func (c *AutoScaling) DescribeLaunchConfigurationsPages(input *DescribeLaunchConfigurationsInput, fn func(p *DescribeLaunchConfigurationsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeLaunchConfigurationsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeLaunchConfigurationsOutput), lastPage) - }) +func (c *AutoScaling) DescribeLaunchConfigurationsPages(input *DescribeLaunchConfigurationsInput, fn func(*DescribeLaunchConfigurationsOutput, bool) bool) error { + return c.DescribeLaunchConfigurationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLaunchConfigurationsPagesWithContext same as DescribeLaunchConfigurationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeLaunchConfigurationsPagesWithContext(ctx aws.Context, input *DescribeLaunchConfigurationsInput, fn func(*DescribeLaunchConfigurationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLaunchConfigurationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLaunchConfigurationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeLaunchConfigurationsOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribeLifecycleHookTypes = "DescribeLifecycleHookTypes" // DescribeLifecycleHookTypesRequest generates a "aws/request.Request" representing the // client's request for the DescribeLifecycleHookTypes operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeLifecycleHookTypes for usage and error information. +// See DescribeLifecycleHookTypes for more information on using the DescribeLifecycleHookTypes +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeLifecycleHookTypes method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeLifecycleHookTypesRequest method. // req, resp := client.DescribeLifecycleHookTypesRequest(params) @@ -1648,34 +2008,48 @@ func (c *AutoScaling) DescribeLifecycleHookTypesRequest(input *DescribeLifecycle // API operation DescribeLifecycleHookTypes for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeLifecycleHookTypes func (c *AutoScaling) DescribeLifecycleHookTypes(input *DescribeLifecycleHookTypesInput) (*DescribeLifecycleHookTypesOutput, error) { req, out := c.DescribeLifecycleHookTypesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeLifecycleHookTypesWithContext is the same as DescribeLifecycleHookTypes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLifecycleHookTypes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeLifecycleHookTypesWithContext(ctx aws.Context, input *DescribeLifecycleHookTypesInput, opts ...request.Option) (*DescribeLifecycleHookTypesOutput, error) { + req, out := c.DescribeLifecycleHookTypesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeLifecycleHooks = "DescribeLifecycleHooks" // DescribeLifecycleHooksRequest generates a "aws/request.Request" representing the // client's request for the DescribeLifecycleHooks operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeLifecycleHooks for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeLifecycleHooks method directly -// instead. +// See DescribeLifecycleHooks for more information on using the DescribeLifecycleHooks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeLifecycleHooksRequest method. // req, resp := client.DescribeLifecycleHooksRequest(params) @@ -1714,34 +2088,48 @@ func (c *AutoScaling) DescribeLifecycleHooksRequest(input *DescribeLifecycleHook // API operation DescribeLifecycleHooks for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeLifecycleHooks func (c *AutoScaling) DescribeLifecycleHooks(input *DescribeLifecycleHooksInput) (*DescribeLifecycleHooksOutput, error) { req, out := c.DescribeLifecycleHooksRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeLifecycleHooksWithContext is the same as DescribeLifecycleHooks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLifecycleHooks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeLifecycleHooksWithContext(ctx aws.Context, input *DescribeLifecycleHooksInput, opts ...request.Option) (*DescribeLifecycleHooksOutput, error) { + req, out := c.DescribeLifecycleHooksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeLoadBalancerTargetGroups = "DescribeLoadBalancerTargetGroups" // DescribeLoadBalancerTargetGroupsRequest generates a "aws/request.Request" representing the // client's request for the DescribeLoadBalancerTargetGroups operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeLoadBalancerTargetGroups for usage and error information. +// See DescribeLoadBalancerTargetGroups for more information on using the DescribeLoadBalancerTargetGroups +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeLoadBalancerTargetGroups method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeLoadBalancerTargetGroupsRequest method. // req, resp := client.DescribeLoadBalancerTargetGroupsRequest(params) @@ -1780,34 +2168,48 @@ func (c *AutoScaling) DescribeLoadBalancerTargetGroupsRequest(input *DescribeLoa // API operation DescribeLoadBalancerTargetGroups for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeLoadBalancerTargetGroups func (c *AutoScaling) DescribeLoadBalancerTargetGroups(input *DescribeLoadBalancerTargetGroupsInput) (*DescribeLoadBalancerTargetGroupsOutput, error) { req, out := c.DescribeLoadBalancerTargetGroupsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeLoadBalancerTargetGroupsWithContext is the same as DescribeLoadBalancerTargetGroups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLoadBalancerTargetGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeLoadBalancerTargetGroupsWithContext(ctx aws.Context, input *DescribeLoadBalancerTargetGroupsInput, opts ...request.Option) (*DescribeLoadBalancerTargetGroupsOutput, error) { + req, out := c.DescribeLoadBalancerTargetGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeLoadBalancers = "DescribeLoadBalancers" // DescribeLoadBalancersRequest generates a "aws/request.Request" representing the // client's request for the DescribeLoadBalancers operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeLoadBalancers for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeLoadBalancers method directly -// instead. +// See DescribeLoadBalancers for more information on using the DescribeLoadBalancers +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeLoadBalancersRequest method. // req, resp := client.DescribeLoadBalancersRequest(params) @@ -1838,8 +2240,8 @@ func (c *AutoScaling) DescribeLoadBalancersRequest(input *DescribeLoadBalancersI // // Describes the load balancers for the specified Auto Scaling group. // -// Note that this operation describes only Classic load balancers. If you have -// Application load balancers, use DescribeLoadBalancerTargetGroups instead. +// Note that this operation describes only Classic Load Balancers. If you have +// Application Load Balancers, use DescribeLoadBalancerTargetGroups instead. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1849,34 +2251,48 @@ func (c *AutoScaling) DescribeLoadBalancersRequest(input *DescribeLoadBalancersI // API operation DescribeLoadBalancers for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeLoadBalancers func (c *AutoScaling) DescribeLoadBalancers(input *DescribeLoadBalancersInput) (*DescribeLoadBalancersOutput, error) { req, out := c.DescribeLoadBalancersRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeLoadBalancersWithContext is the same as DescribeLoadBalancers with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLoadBalancers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeLoadBalancersWithContext(ctx aws.Context, input *DescribeLoadBalancersInput, opts ...request.Option) (*DescribeLoadBalancersOutput, error) { + req, out := c.DescribeLoadBalancersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeMetricCollectionTypes = "DescribeMetricCollectionTypes" // DescribeMetricCollectionTypesRequest generates a "aws/request.Request" representing the // client's request for the DescribeMetricCollectionTypes operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeMetricCollectionTypes for usage and error information. +// See DescribeMetricCollectionTypes for more information on using the DescribeMetricCollectionTypes +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeMetricCollectionTypes method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeMetricCollectionTypesRequest method. // req, resp := client.DescribeMetricCollectionTypesRequest(params) @@ -1918,34 +2334,48 @@ func (c *AutoScaling) DescribeMetricCollectionTypesRequest(input *DescribeMetric // API operation DescribeMetricCollectionTypes for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeMetricCollectionTypes func (c *AutoScaling) DescribeMetricCollectionTypes(input *DescribeMetricCollectionTypesInput) (*DescribeMetricCollectionTypesOutput, error) { req, out := c.DescribeMetricCollectionTypesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeMetricCollectionTypesWithContext is the same as DescribeMetricCollectionTypes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMetricCollectionTypes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeMetricCollectionTypesWithContext(ctx aws.Context, input *DescribeMetricCollectionTypesInput, opts ...request.Option) (*DescribeMetricCollectionTypesOutput, error) { + req, out := c.DescribeMetricCollectionTypesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeNotificationConfigurations = "DescribeNotificationConfigurations" // DescribeNotificationConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the DescribeNotificationConfigurations operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeNotificationConfigurations for usage and error information. +// See DescribeNotificationConfigurations for more information on using the DescribeNotificationConfigurations +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeNotificationConfigurations method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeNotificationConfigurationsRequest method. // req, resp := client.DescribeNotificationConfigurationsRequest(params) @@ -1991,18 +2421,33 @@ func (c *AutoScaling) DescribeNotificationConfigurationsRequest(input *DescribeN // API operation DescribeNotificationConfigurations for usage and error information. // // Returned Error Codes: -// * InvalidNextToken +// * ErrCodeInvalidNextToken "InvalidNextToken" // The NextToken value is not valid. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeNotificationConfigurations func (c *AutoScaling) DescribeNotificationConfigurations(input *DescribeNotificationConfigurationsInput) (*DescribeNotificationConfigurationsOutput, error) { req, out := c.DescribeNotificationConfigurationsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeNotificationConfigurationsWithContext is the same as DescribeNotificationConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeNotificationConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeNotificationConfigurationsWithContext(ctx aws.Context, input *DescribeNotificationConfigurationsInput, opts ...request.Option) (*DescribeNotificationConfigurationsOutput, error) { + req, out := c.DescribeNotificationConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeNotificationConfigurationsPages iterates over the pages of a DescribeNotificationConfigurations operation, @@ -2022,31 +2467,55 @@ func (c *AutoScaling) DescribeNotificationConfigurations(input *DescribeNotifica // return pageNum <= 3 // }) // -func (c *AutoScaling) DescribeNotificationConfigurationsPages(input *DescribeNotificationConfigurationsInput, fn func(p *DescribeNotificationConfigurationsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeNotificationConfigurationsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeNotificationConfigurationsOutput), lastPage) - }) +func (c *AutoScaling) DescribeNotificationConfigurationsPages(input *DescribeNotificationConfigurationsInput, fn func(*DescribeNotificationConfigurationsOutput, bool) bool) error { + return c.DescribeNotificationConfigurationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeNotificationConfigurationsPagesWithContext same as DescribeNotificationConfigurationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeNotificationConfigurationsPagesWithContext(ctx aws.Context, input *DescribeNotificationConfigurationsInput, fn func(*DescribeNotificationConfigurationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeNotificationConfigurationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeNotificationConfigurationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeNotificationConfigurationsOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribePolicies = "DescribePolicies" // DescribePoliciesRequest generates a "aws/request.Request" representing the // client's request for the DescribePolicies operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribePolicies for usage and error information. +// See DescribePolicies for more information on using the DescribePolicies +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribePolicies method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribePoliciesRequest method. // req, resp := client.DescribePoliciesRequest(params) @@ -2091,18 +2560,33 @@ func (c *AutoScaling) DescribePoliciesRequest(input *DescribePoliciesInput) (req // API operation DescribePolicies for usage and error information. // // Returned Error Codes: -// * InvalidNextToken +// * ErrCodeInvalidNextToken "InvalidNextToken" // The NextToken value is not valid. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribePolicies func (c *AutoScaling) DescribePolicies(input *DescribePoliciesInput) (*DescribePoliciesOutput, error) { req, out := c.DescribePoliciesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribePoliciesWithContext is the same as DescribePolicies with the addition of +// the ability to pass a context and additional request options. +// +// See DescribePolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribePoliciesWithContext(ctx aws.Context, input *DescribePoliciesInput, opts ...request.Option) (*DescribePoliciesOutput, error) { + req, out := c.DescribePoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribePoliciesPages iterates over the pages of a DescribePolicies operation, @@ -2122,31 +2606,55 @@ func (c *AutoScaling) DescribePolicies(input *DescribePoliciesInput) (*DescribeP // return pageNum <= 3 // }) // -func (c *AutoScaling) DescribePoliciesPages(input *DescribePoliciesInput, fn func(p *DescribePoliciesOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribePoliciesRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribePoliciesOutput), lastPage) - }) +func (c *AutoScaling) DescribePoliciesPages(input *DescribePoliciesInput, fn func(*DescribePoliciesOutput, bool) bool) error { + return c.DescribePoliciesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribePoliciesPagesWithContext same as DescribePoliciesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribePoliciesPagesWithContext(ctx aws.Context, input *DescribePoliciesInput, fn func(*DescribePoliciesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribePoliciesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribePoliciesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribePoliciesOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribeScalingActivities = "DescribeScalingActivities" // DescribeScalingActivitiesRequest generates a "aws/request.Request" representing the // client's request for the DescribeScalingActivities operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeScalingActivities for usage and error information. +// See DescribeScalingActivities for more information on using the DescribeScalingActivities +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeScalingActivities method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeScalingActivitiesRequest method. // req, resp := client.DescribeScalingActivitiesRequest(params) @@ -2191,18 +2699,33 @@ func (c *AutoScaling) DescribeScalingActivitiesRequest(input *DescribeScalingAct // API operation DescribeScalingActivities for usage and error information. // // Returned Error Codes: -// * InvalidNextToken +// * ErrCodeInvalidNextToken "InvalidNextToken" // The NextToken value is not valid. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeScalingActivities func (c *AutoScaling) DescribeScalingActivities(input *DescribeScalingActivitiesInput) (*DescribeScalingActivitiesOutput, error) { req, out := c.DescribeScalingActivitiesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeScalingActivitiesWithContext is the same as DescribeScalingActivities with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeScalingActivities for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeScalingActivitiesWithContext(ctx aws.Context, input *DescribeScalingActivitiesInput, opts ...request.Option) (*DescribeScalingActivitiesOutput, error) { + req, out := c.DescribeScalingActivitiesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeScalingActivitiesPages iterates over the pages of a DescribeScalingActivities operation, @@ -2222,31 +2745,55 @@ func (c *AutoScaling) DescribeScalingActivities(input *DescribeScalingActivities // return pageNum <= 3 // }) // -func (c *AutoScaling) DescribeScalingActivitiesPages(input *DescribeScalingActivitiesInput, fn func(p *DescribeScalingActivitiesOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeScalingActivitiesRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeScalingActivitiesOutput), lastPage) - }) +func (c *AutoScaling) DescribeScalingActivitiesPages(input *DescribeScalingActivitiesInput, fn func(*DescribeScalingActivitiesOutput, bool) bool) error { + return c.DescribeScalingActivitiesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeScalingActivitiesPagesWithContext same as DescribeScalingActivitiesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeScalingActivitiesPagesWithContext(ctx aws.Context, input *DescribeScalingActivitiesInput, fn func(*DescribeScalingActivitiesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeScalingActivitiesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeScalingActivitiesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeScalingActivitiesOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribeScalingProcessTypes = "DescribeScalingProcessTypes" // DescribeScalingProcessTypesRequest generates a "aws/request.Request" representing the // client's request for the DescribeScalingProcessTypes operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeScalingProcessTypes for usage and error information. +// See DescribeScalingProcessTypes for more information on using the DescribeScalingProcessTypes +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeScalingProcessTypes method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeScalingProcessTypesRequest method. // req, resp := client.DescribeScalingProcessTypesRequest(params) @@ -2285,34 +2832,48 @@ func (c *AutoScaling) DescribeScalingProcessTypesRequest(input *DescribeScalingP // API operation DescribeScalingProcessTypes for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeScalingProcessTypes func (c *AutoScaling) DescribeScalingProcessTypes(input *DescribeScalingProcessTypesInput) (*DescribeScalingProcessTypesOutput, error) { req, out := c.DescribeScalingProcessTypesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeScalingProcessTypesWithContext is the same as DescribeScalingProcessTypes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeScalingProcessTypes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeScalingProcessTypesWithContext(ctx aws.Context, input *DescribeScalingProcessTypesInput, opts ...request.Option) (*DescribeScalingProcessTypesOutput, error) { + req, out := c.DescribeScalingProcessTypesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeScheduledActions = "DescribeScheduledActions" // DescribeScheduledActionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeScheduledActions operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeScheduledActions for usage and error information. +// See DescribeScheduledActions for more information on using the DescribeScheduledActions +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeScheduledActions method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeScheduledActionsRequest method. // req, resp := client.DescribeScheduledActionsRequest(params) @@ -2358,18 +2919,33 @@ func (c *AutoScaling) DescribeScheduledActionsRequest(input *DescribeScheduledAc // API operation DescribeScheduledActions for usage and error information. // // Returned Error Codes: -// * InvalidNextToken +// * ErrCodeInvalidNextToken "InvalidNextToken" // The NextToken value is not valid. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeScheduledActions func (c *AutoScaling) DescribeScheduledActions(input *DescribeScheduledActionsInput) (*DescribeScheduledActionsOutput, error) { req, out := c.DescribeScheduledActionsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeScheduledActionsWithContext is the same as DescribeScheduledActions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeScheduledActions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeScheduledActionsWithContext(ctx aws.Context, input *DescribeScheduledActionsInput, opts ...request.Option) (*DescribeScheduledActionsOutput, error) { + req, out := c.DescribeScheduledActionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeScheduledActionsPages iterates over the pages of a DescribeScheduledActions operation, @@ -2389,31 +2965,55 @@ func (c *AutoScaling) DescribeScheduledActions(input *DescribeScheduledActionsIn // return pageNum <= 3 // }) // -func (c *AutoScaling) DescribeScheduledActionsPages(input *DescribeScheduledActionsInput, fn func(p *DescribeScheduledActionsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeScheduledActionsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeScheduledActionsOutput), lastPage) - }) +func (c *AutoScaling) DescribeScheduledActionsPages(input *DescribeScheduledActionsInput, fn func(*DescribeScheduledActionsOutput, bool) bool) error { + return c.DescribeScheduledActionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeScheduledActionsPagesWithContext same as DescribeScheduledActionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeScheduledActionsPagesWithContext(ctx aws.Context, input *DescribeScheduledActionsInput, fn func(*DescribeScheduledActionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeScheduledActionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeScheduledActionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeScheduledActionsOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribeTags = "DescribeTags" // DescribeTagsRequest generates a "aws/request.Request" representing the // client's request for the DescribeTags operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeTags for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeTags method directly -// instead. +// See DescribeTags for more information on using the DescribeTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeTagsRequest method. // req, resp := client.DescribeTagsRequest(params) @@ -2467,18 +3067,33 @@ func (c *AutoScaling) DescribeTagsRequest(input *DescribeTagsInput) (req *reques // API operation DescribeTags for usage and error information. // // Returned Error Codes: -// * InvalidNextToken +// * ErrCodeInvalidNextToken "InvalidNextToken" // The NextToken value is not valid. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeTags func (c *AutoScaling) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { req, out := c.DescribeTagsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeTagsWithContext is the same as DescribeTags with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeTagsWithContext(ctx aws.Context, input *DescribeTagsInput, opts ...request.Option) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeTagsPages iterates over the pages of a DescribeTags operation, @@ -2498,31 +3113,55 @@ func (c *AutoScaling) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutpu // return pageNum <= 3 // }) // -func (c *AutoScaling) DescribeTagsPages(input *DescribeTagsInput, fn func(p *DescribeTagsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeTagsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeTagsOutput), lastPage) - }) +func (c *AutoScaling) DescribeTagsPages(input *DescribeTagsInput, fn func(*DescribeTagsOutput, bool) bool) error { + return c.DescribeTagsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeTagsPagesWithContext same as DescribeTagsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeTagsPagesWithContext(ctx aws.Context, input *DescribeTagsInput, fn func(*DescribeTagsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeTagsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTagsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeTagsOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribeTerminationPolicyTypes = "DescribeTerminationPolicyTypes" // DescribeTerminationPolicyTypesRequest generates a "aws/request.Request" representing the // client's request for the DescribeTerminationPolicyTypes operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeTerminationPolicyTypes for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeTerminationPolicyTypes method directly -// instead. +// See DescribeTerminationPolicyTypes for more information on using the DescribeTerminationPolicyTypes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeTerminationPolicyTypesRequest method. // req, resp := client.DescribeTerminationPolicyTypesRequest(params) @@ -2561,34 +3200,48 @@ func (c *AutoScaling) DescribeTerminationPolicyTypesRequest(input *DescribeTermi // API operation DescribeTerminationPolicyTypes for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeTerminationPolicyTypes func (c *AutoScaling) DescribeTerminationPolicyTypes(input *DescribeTerminationPolicyTypesInput) (*DescribeTerminationPolicyTypesOutput, error) { req, out := c.DescribeTerminationPolicyTypesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeTerminationPolicyTypesWithContext is the same as DescribeTerminationPolicyTypes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTerminationPolicyTypes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeTerminationPolicyTypesWithContext(ctx aws.Context, input *DescribeTerminationPolicyTypesInput, opts ...request.Option) (*DescribeTerminationPolicyTypesOutput, error) { + req, out := c.DescribeTerminationPolicyTypesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDetachInstances = "DetachInstances" // DetachInstancesRequest generates a "aws/request.Request" representing the // client's request for the DetachInstances operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DetachInstances for usage and error information. +// See DetachInstances for more information on using the DetachInstances +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DetachInstances method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DetachInstancesRequest method. // req, resp := client.DetachInstancesRequest(params) @@ -2619,13 +3272,13 @@ func (c *AutoScaling) DetachInstancesRequest(input *DetachInstancesInput) (req * // // Removes one or more instances from the specified Auto Scaling group. // -// After the instances are detached, you can manage them independently from -// the rest of the Auto Scaling group. +// After the instances are detached, you can manage them independent of the +// Auto Scaling group. // // If you do not specify the option to decrement the desired capacity, Auto // Scaling launches instances to replace the ones that are detached. // -// If there is a Classic load balancer attached to the Auto Scaling group, the +// If there is a Classic Load Balancer attached to the Auto Scaling group, the // instances are deregistered from the load balancer. If there are target groups // attached to the Auto Scaling group, the instances are deregistered from the // target groups. @@ -2642,34 +3295,48 @@ func (c *AutoScaling) DetachInstancesRequest(input *DetachInstancesInput) (req * // API operation DetachInstances for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DetachInstances func (c *AutoScaling) DetachInstances(input *DetachInstancesInput) (*DetachInstancesOutput, error) { req, out := c.DetachInstancesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DetachInstancesWithContext is the same as DetachInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DetachInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DetachInstancesWithContext(ctx aws.Context, input *DetachInstancesInput, opts ...request.Option) (*DetachInstancesOutput, error) { + req, out := c.DetachInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDetachLoadBalancerTargetGroups = "DetachLoadBalancerTargetGroups" // DetachLoadBalancerTargetGroupsRequest generates a "aws/request.Request" representing the // client's request for the DetachLoadBalancerTargetGroups operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DetachLoadBalancerTargetGroups for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DetachLoadBalancerTargetGroups method directly -// instead. +// See DetachLoadBalancerTargetGroups for more information on using the DetachLoadBalancerTargetGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DetachLoadBalancerTargetGroupsRequest method. // req, resp := client.DetachLoadBalancerTargetGroupsRequest(params) @@ -2708,34 +3375,48 @@ func (c *AutoScaling) DetachLoadBalancerTargetGroupsRequest(input *DetachLoadBal // API operation DetachLoadBalancerTargetGroups for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DetachLoadBalancerTargetGroups func (c *AutoScaling) DetachLoadBalancerTargetGroups(input *DetachLoadBalancerTargetGroupsInput) (*DetachLoadBalancerTargetGroupsOutput, error) { req, out := c.DetachLoadBalancerTargetGroupsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DetachLoadBalancerTargetGroupsWithContext is the same as DetachLoadBalancerTargetGroups with the addition of +// the ability to pass a context and additional request options. +// +// See DetachLoadBalancerTargetGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DetachLoadBalancerTargetGroupsWithContext(ctx aws.Context, input *DetachLoadBalancerTargetGroupsInput, opts ...request.Option) (*DetachLoadBalancerTargetGroupsOutput, error) { + req, out := c.DetachLoadBalancerTargetGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDetachLoadBalancers = "DetachLoadBalancers" // DetachLoadBalancersRequest generates a "aws/request.Request" representing the // client's request for the DetachLoadBalancers operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DetachLoadBalancers for usage and error information. +// See DetachLoadBalancers for more information on using the DetachLoadBalancers +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DetachLoadBalancers method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DetachLoadBalancersRequest method. // req, resp := client.DetachLoadBalancersRequest(params) @@ -2764,11 +3445,11 @@ func (c *AutoScaling) DetachLoadBalancersRequest(input *DetachLoadBalancersInput // DetachLoadBalancers API operation for Auto Scaling. // -// Detaches one or more Classic load balancers from the specified Auto Scaling +// Detaches one or more Classic Load Balancers from the specified Auto Scaling // group. // -// Note that this operation detaches only Classic load balancers. If you have -// Application load balancers, use DetachLoadBalancerTargetGroups instead. +// Note that this operation detaches only Classic Load Balancers. If you have +// Application Load Balancers, use DetachLoadBalancerTargetGroups instead. // // When you detach a load balancer, it enters the Removing state while deregistering // the instances in the group. When all instances are deregistered, then you @@ -2783,34 +3464,48 @@ func (c *AutoScaling) DetachLoadBalancersRequest(input *DetachLoadBalancersInput // API operation DetachLoadBalancers for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DetachLoadBalancers func (c *AutoScaling) DetachLoadBalancers(input *DetachLoadBalancersInput) (*DetachLoadBalancersOutput, error) { req, out := c.DetachLoadBalancersRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DetachLoadBalancersWithContext is the same as DetachLoadBalancers with the addition of +// the ability to pass a context and additional request options. +// +// See DetachLoadBalancers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DetachLoadBalancersWithContext(ctx aws.Context, input *DetachLoadBalancersInput, opts ...request.Option) (*DetachLoadBalancersOutput, error) { + req, out := c.DetachLoadBalancersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDisableMetricsCollection = "DisableMetricsCollection" // DisableMetricsCollectionRequest generates a "aws/request.Request" representing the // client's request for the DisableMetricsCollection operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DisableMetricsCollection for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DisableMetricsCollection method directly -// instead. +// See DisableMetricsCollection for more information on using the DisableMetricsCollection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DisableMetricsCollectionRequest method. // req, resp := client.DisableMetricsCollectionRequest(params) @@ -2851,34 +3546,48 @@ func (c *AutoScaling) DisableMetricsCollectionRequest(input *DisableMetricsColle // API operation DisableMetricsCollection for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DisableMetricsCollection func (c *AutoScaling) DisableMetricsCollection(input *DisableMetricsCollectionInput) (*DisableMetricsCollectionOutput, error) { req, out := c.DisableMetricsCollectionRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DisableMetricsCollectionWithContext is the same as DisableMetricsCollection with the addition of +// the ability to pass a context and additional request options. +// +// See DisableMetricsCollection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DisableMetricsCollectionWithContext(ctx aws.Context, input *DisableMetricsCollectionInput, opts ...request.Option) (*DisableMetricsCollectionOutput, error) { + req, out := c.DisableMetricsCollectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opEnableMetricsCollection = "EnableMetricsCollection" // EnableMetricsCollectionRequest generates a "aws/request.Request" representing the // client's request for the EnableMetricsCollection operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See EnableMetricsCollection for usage and error information. +// See EnableMetricsCollection for more information on using the EnableMetricsCollection +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the EnableMetricsCollection method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the EnableMetricsCollectionRequest method. // req, resp := client.EnableMetricsCollectionRequest(params) @@ -2921,34 +3630,48 @@ func (c *AutoScaling) EnableMetricsCollectionRequest(input *EnableMetricsCollect // API operation EnableMetricsCollection for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/EnableMetricsCollection func (c *AutoScaling) EnableMetricsCollection(input *EnableMetricsCollectionInput) (*EnableMetricsCollectionOutput, error) { req, out := c.EnableMetricsCollectionRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// EnableMetricsCollectionWithContext is the same as EnableMetricsCollection with the addition of +// the ability to pass a context and additional request options. +// +// See EnableMetricsCollection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) EnableMetricsCollectionWithContext(ctx aws.Context, input *EnableMetricsCollectionInput, opts ...request.Option) (*EnableMetricsCollectionOutput, error) { + req, out := c.EnableMetricsCollectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opEnterStandby = "EnterStandby" // EnterStandbyRequest generates a "aws/request.Request" representing the // client's request for the EnterStandby operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See EnterStandby for usage and error information. +// See EnterStandby for more information on using the EnterStandby +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the EnterStandby method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the EnterStandbyRequest method. // req, resp := client.EnterStandbyRequest(params) @@ -2977,9 +3700,10 @@ func (c *AutoScaling) EnterStandbyRequest(input *EnterStandbyInput) (req *reques // EnterStandby API operation for Auto Scaling. // -// Moves the specified instances into Standby mode. +// Moves the specified instances into the standby state. // -// For more information, see Auto Scaling Lifecycle (http://docs.aws.amazon.com/autoscaling/latest/userguide/AutoScalingGroupLifecycle.html) +// For more information, see Temporarily Removing Instances from Your Auto Scaling +// Group (http://docs.aws.amazon.com/autoscaling/latest/userguide/as-enter-exit-standby.html) // in the Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2990,34 +3714,48 @@ func (c *AutoScaling) EnterStandbyRequest(input *EnterStandbyInput) (req *reques // API operation EnterStandby for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/EnterStandby func (c *AutoScaling) EnterStandby(input *EnterStandbyInput) (*EnterStandbyOutput, error) { req, out := c.EnterStandbyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// EnterStandbyWithContext is the same as EnterStandby with the addition of +// the ability to pass a context and additional request options. +// +// See EnterStandby for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) EnterStandbyWithContext(ctx aws.Context, input *EnterStandbyInput, opts ...request.Option) (*EnterStandbyOutput, error) { + req, out := c.EnterStandbyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opExecutePolicy = "ExecutePolicy" // ExecutePolicyRequest generates a "aws/request.Request" representing the // client's request for the ExecutePolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ExecutePolicy for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ExecutePolicy method directly -// instead. +// See ExecutePolicy for more information on using the ExecutePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ExecutePolicyRequest method. // req, resp := client.ExecutePolicyRequest(params) @@ -3058,38 +3796,52 @@ func (c *AutoScaling) ExecutePolicyRequest(input *ExecutePolicyInput) (req *requ // API operation ExecutePolicy for usage and error information. // // Returned Error Codes: -// * ScalingActivityInProgress +// * ErrCodeScalingActivityInProgressFault "ScalingActivityInProgress" // The operation can't be performed because there are scaling activities in // progress. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/ExecutePolicy func (c *AutoScaling) ExecutePolicy(input *ExecutePolicyInput) (*ExecutePolicyOutput, error) { req, out := c.ExecutePolicyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ExecutePolicyWithContext is the same as ExecutePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See ExecutePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) ExecutePolicyWithContext(ctx aws.Context, input *ExecutePolicyInput, opts ...request.Option) (*ExecutePolicyOutput, error) { + req, out := c.ExecutePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opExitStandby = "ExitStandby" // ExitStandbyRequest generates a "aws/request.Request" representing the // client's request for the ExitStandby operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ExitStandby for usage and error information. +// See ExitStandby for more information on using the ExitStandby +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ExitStandby method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ExitStandbyRequest method. // req, resp := client.ExitStandbyRequest(params) @@ -3118,9 +3870,10 @@ func (c *AutoScaling) ExitStandbyRequest(input *ExitStandbyInput) (req *request. // ExitStandby API operation for Auto Scaling. // -// Moves the specified instances out of Standby mode. +// Moves the specified instances out of the standby state. // -// For more information, see Auto Scaling Lifecycle (http://docs.aws.amazon.com/autoscaling/latest/userguide/AutoScalingGroupLifecycle.html) +// For more information, see Temporarily Removing Instances from Your Auto Scaling +// Group (http://docs.aws.amazon.com/autoscaling/latest/userguide/as-enter-exit-standby.html) // in the Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3131,34 +3884,48 @@ func (c *AutoScaling) ExitStandbyRequest(input *ExitStandbyInput) (req *request. // API operation ExitStandby for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/ExitStandby func (c *AutoScaling) ExitStandby(input *ExitStandbyInput) (*ExitStandbyOutput, error) { req, out := c.ExitStandbyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ExitStandbyWithContext is the same as ExitStandby with the addition of +// the ability to pass a context and additional request options. +// +// See ExitStandby for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) ExitStandbyWithContext(ctx aws.Context, input *ExitStandbyInput, opts ...request.Option) (*ExitStandbyOutput, error) { + req, out := c.ExitStandbyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutLifecycleHook = "PutLifecycleHook" // PutLifecycleHookRequest generates a "aws/request.Request" representing the // client's request for the PutLifecycleHook operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutLifecycleHook for usage and error information. +// See PutLifecycleHook for more information on using the PutLifecycleHook +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutLifecycleHook method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutLifecycleHookRequest method. // req, resp := client.PutLifecycleHookRequest(params) @@ -3227,39 +3994,53 @@ func (c *AutoScaling) PutLifecycleHookRequest(input *PutLifecycleHookInput) (req // API operation PutLifecycleHook for usage and error information. // // Returned Error Codes: -// * LimitExceeded +// * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Auto Scaling resources (for example, // groups, launch configurations, or lifecycle hooks). For more information, // see DescribeAccountLimits. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/PutLifecycleHook func (c *AutoScaling) PutLifecycleHook(input *PutLifecycleHookInput) (*PutLifecycleHookOutput, error) { req, out := c.PutLifecycleHookRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutLifecycleHookWithContext is the same as PutLifecycleHook with the addition of +// the ability to pass a context and additional request options. +// +// See PutLifecycleHook for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) PutLifecycleHookWithContext(ctx aws.Context, input *PutLifecycleHookInput, opts ...request.Option) (*PutLifecycleHookOutput, error) { + req, out := c.PutLifecycleHookRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutNotificationConfiguration = "PutNotificationConfiguration" // PutNotificationConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutNotificationConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutNotificationConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutNotificationConfiguration method directly -// instead. +// See PutNotificationConfiguration for more information on using the PutNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutNotificationConfigurationRequest method. // req, resp := client.PutNotificationConfigurationRequest(params) @@ -3308,39 +4089,53 @@ func (c *AutoScaling) PutNotificationConfigurationRequest(input *PutNotification // API operation PutNotificationConfiguration for usage and error information. // // Returned Error Codes: -// * LimitExceeded +// * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Auto Scaling resources (for example, // groups, launch configurations, or lifecycle hooks). For more information, // see DescribeAccountLimits. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/PutNotificationConfiguration func (c *AutoScaling) PutNotificationConfiguration(input *PutNotificationConfigurationInput) (*PutNotificationConfigurationOutput, error) { req, out := c.PutNotificationConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutNotificationConfigurationWithContext is the same as PutNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) PutNotificationConfigurationWithContext(ctx aws.Context, input *PutNotificationConfigurationInput, opts ...request.Option) (*PutNotificationConfigurationOutput, error) { + req, out := c.PutNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutScalingPolicy = "PutScalingPolicy" // PutScalingPolicyRequest generates a "aws/request.Request" representing the // client's request for the PutScalingPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutScalingPolicy for usage and error information. +// See PutScalingPolicy for more information on using the PutScalingPolicy +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutScalingPolicy method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutScalingPolicyRequest method. // req, resp := client.PutScalingPolicyRequest(params) @@ -3387,39 +4182,53 @@ func (c *AutoScaling) PutScalingPolicyRequest(input *PutScalingPolicyInput) (req // API operation PutScalingPolicy for usage and error information. // // Returned Error Codes: -// * LimitExceeded +// * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Auto Scaling resources (for example, // groups, launch configurations, or lifecycle hooks). For more information, // see DescribeAccountLimits. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/PutScalingPolicy func (c *AutoScaling) PutScalingPolicy(input *PutScalingPolicyInput) (*PutScalingPolicyOutput, error) { req, out := c.PutScalingPolicyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutScalingPolicyWithContext is the same as PutScalingPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutScalingPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) PutScalingPolicyWithContext(ctx aws.Context, input *PutScalingPolicyInput, opts ...request.Option) (*PutScalingPolicyOutput, error) { + req, out := c.PutScalingPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutScheduledUpdateGroupAction = "PutScheduledUpdateGroupAction" // PutScheduledUpdateGroupActionRequest generates a "aws/request.Request" representing the // client's request for the PutScheduledUpdateGroupAction operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutScheduledUpdateGroupAction for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutScheduledUpdateGroupAction method directly -// instead. +// See PutScheduledUpdateGroupAction for more information on using the PutScheduledUpdateGroupAction +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutScheduledUpdateGroupActionRequest method. // req, resp := client.PutScheduledUpdateGroupActionRequest(params) @@ -3465,43 +4274,57 @@ func (c *AutoScaling) PutScheduledUpdateGroupActionRequest(input *PutScheduledUp // API operation PutScheduledUpdateGroupAction for usage and error information. // // Returned Error Codes: -// * AlreadyExists +// * ErrCodeAlreadyExistsFault "AlreadyExists" // You already have an Auto Scaling group or launch configuration with this // name. // -// * LimitExceeded +// * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Auto Scaling resources (for example, // groups, launch configurations, or lifecycle hooks). For more information, // see DescribeAccountLimits. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/PutScheduledUpdateGroupAction func (c *AutoScaling) PutScheduledUpdateGroupAction(input *PutScheduledUpdateGroupActionInput) (*PutScheduledUpdateGroupActionOutput, error) { req, out := c.PutScheduledUpdateGroupActionRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutScheduledUpdateGroupActionWithContext is the same as PutScheduledUpdateGroupAction with the addition of +// the ability to pass a context and additional request options. +// +// See PutScheduledUpdateGroupAction for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) PutScheduledUpdateGroupActionWithContext(ctx aws.Context, input *PutScheduledUpdateGroupActionInput, opts ...request.Option) (*PutScheduledUpdateGroupActionOutput, error) { + req, out := c.PutScheduledUpdateGroupActionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opRecordLifecycleActionHeartbeat = "RecordLifecycleActionHeartbeat" // RecordLifecycleActionHeartbeatRequest generates a "aws/request.Request" representing the // client's request for the RecordLifecycleActionHeartbeat operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See RecordLifecycleActionHeartbeat for usage and error information. +// See RecordLifecycleActionHeartbeat for more information on using the RecordLifecycleActionHeartbeat +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RecordLifecycleActionHeartbeat method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the RecordLifecycleActionHeartbeatRequest method. // req, resp := client.RecordLifecycleActionHeartbeatRequest(params) @@ -3563,34 +4386,48 @@ func (c *AutoScaling) RecordLifecycleActionHeartbeatRequest(input *RecordLifecyc // API operation RecordLifecycleActionHeartbeat for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/RecordLifecycleActionHeartbeat func (c *AutoScaling) RecordLifecycleActionHeartbeat(input *RecordLifecycleActionHeartbeatInput) (*RecordLifecycleActionHeartbeatOutput, error) { req, out := c.RecordLifecycleActionHeartbeatRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// RecordLifecycleActionHeartbeatWithContext is the same as RecordLifecycleActionHeartbeat with the addition of +// the ability to pass a context and additional request options. +// +// See RecordLifecycleActionHeartbeat for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) RecordLifecycleActionHeartbeatWithContext(ctx aws.Context, input *RecordLifecycleActionHeartbeatInput, opts ...request.Option) (*RecordLifecycleActionHeartbeatOutput, error) { + req, out := c.RecordLifecycleActionHeartbeatRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opResumeProcesses = "ResumeProcesses" // ResumeProcessesRequest generates a "aws/request.Request" representing the // client's request for the ResumeProcesses operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ResumeProcesses for usage and error information. +// See ResumeProcesses for more information on using the ResumeProcesses +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ResumeProcesses method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ResumeProcessesRequest method. // req, resp := client.ResumeProcessesRequest(params) @@ -3636,37 +4473,51 @@ func (c *AutoScaling) ResumeProcessesRequest(input *ScalingProcessQuery) (req *r // API operation ResumeProcesses for usage and error information. // // Returned Error Codes: -// * ResourceInUse +// * ErrCodeResourceInUseFault "ResourceInUse" // The operation can't be performed because the resource is in use. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/ResumeProcesses func (c *AutoScaling) ResumeProcesses(input *ScalingProcessQuery) (*ResumeProcessesOutput, error) { req, out := c.ResumeProcessesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ResumeProcessesWithContext is the same as ResumeProcesses with the addition of +// the ability to pass a context and additional request options. +// +// See ResumeProcesses for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) ResumeProcessesWithContext(ctx aws.Context, input *ScalingProcessQuery, opts ...request.Option) (*ResumeProcessesOutput, error) { + req, out := c.ResumeProcessesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opSetDesiredCapacity = "SetDesiredCapacity" // SetDesiredCapacityRequest generates a "aws/request.Request" representing the // client's request for the SetDesiredCapacity operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See SetDesiredCapacity for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the SetDesiredCapacity method directly -// instead. +// See SetDesiredCapacity for more information on using the SetDesiredCapacity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the SetDesiredCapacityRequest method. // req, resp := client.SetDesiredCapacityRequest(params) @@ -3710,38 +4561,52 @@ func (c *AutoScaling) SetDesiredCapacityRequest(input *SetDesiredCapacityInput) // API operation SetDesiredCapacity for usage and error information. // // Returned Error Codes: -// * ScalingActivityInProgress +// * ErrCodeScalingActivityInProgressFault "ScalingActivityInProgress" // The operation can't be performed because there are scaling activities in // progress. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/SetDesiredCapacity func (c *AutoScaling) SetDesiredCapacity(input *SetDesiredCapacityInput) (*SetDesiredCapacityOutput, error) { req, out := c.SetDesiredCapacityRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// SetDesiredCapacityWithContext is the same as SetDesiredCapacity with the addition of +// the ability to pass a context and additional request options. +// +// See SetDesiredCapacity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) SetDesiredCapacityWithContext(ctx aws.Context, input *SetDesiredCapacityInput, opts ...request.Option) (*SetDesiredCapacityOutput, error) { + req, out := c.SetDesiredCapacityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opSetInstanceHealth = "SetInstanceHealth" // SetInstanceHealthRequest generates a "aws/request.Request" representing the // client's request for the SetInstanceHealth operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See SetInstanceHealth for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the SetInstanceHealth method directly -// instead. +// See SetInstanceHealth for more information on using the SetInstanceHealth +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the SetInstanceHealthRequest method. // req, resp := client.SetInstanceHealthRequest(params) @@ -3785,34 +4650,48 @@ func (c *AutoScaling) SetInstanceHealthRequest(input *SetInstanceHealthInput) (r // API operation SetInstanceHealth for usage and error information. // // Returned Error Codes: -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/SetInstanceHealth func (c *AutoScaling) SetInstanceHealth(input *SetInstanceHealthInput) (*SetInstanceHealthOutput, error) { req, out := c.SetInstanceHealthRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// SetInstanceHealthWithContext is the same as SetInstanceHealth with the addition of +// the ability to pass a context and additional request options. +// +// See SetInstanceHealth for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) SetInstanceHealthWithContext(ctx aws.Context, input *SetInstanceHealthInput, opts ...request.Option) (*SetInstanceHealthOutput, error) { + req, out := c.SetInstanceHealthRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opSetInstanceProtection = "SetInstanceProtection" // SetInstanceProtectionRequest generates a "aws/request.Request" representing the // client's request for the SetInstanceProtection operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See SetInstanceProtection for usage and error information. +// See SetInstanceProtection for more information on using the SetInstanceProtection +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the SetInstanceProtection method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the SetInstanceProtectionRequest method. // req, resp := client.SetInstanceProtectionRequest(params) @@ -3854,39 +4733,53 @@ func (c *AutoScaling) SetInstanceProtectionRequest(input *SetInstanceProtectionI // API operation SetInstanceProtection for usage and error information. // // Returned Error Codes: -// * LimitExceeded +// * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Auto Scaling resources (for example, // groups, launch configurations, or lifecycle hooks). For more information, // see DescribeAccountLimits. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/SetInstanceProtection func (c *AutoScaling) SetInstanceProtection(input *SetInstanceProtectionInput) (*SetInstanceProtectionOutput, error) { req, out := c.SetInstanceProtectionRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// SetInstanceProtectionWithContext is the same as SetInstanceProtection with the addition of +// the ability to pass a context and additional request options. +// +// See SetInstanceProtection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) SetInstanceProtectionWithContext(ctx aws.Context, input *SetInstanceProtectionInput, opts ...request.Option) (*SetInstanceProtectionOutput, error) { + req, out := c.SetInstanceProtectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opSuspendProcesses = "SuspendProcesses" // SuspendProcessesRequest generates a "aws/request.Request" representing the // client's request for the SuspendProcesses operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See SuspendProcesses for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the SuspendProcesses method directly -// instead. +// See SuspendProcesses for more information on using the SuspendProcesses +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the SuspendProcessesRequest method. // req, resp := client.SuspendProcessesRequest(params) @@ -3937,37 +4830,51 @@ func (c *AutoScaling) SuspendProcessesRequest(input *ScalingProcessQuery) (req * // API operation SuspendProcesses for usage and error information. // // Returned Error Codes: -// * ResourceInUse +// * ErrCodeResourceInUseFault "ResourceInUse" // The operation can't be performed because the resource is in use. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/SuspendProcesses func (c *AutoScaling) SuspendProcesses(input *ScalingProcessQuery) (*SuspendProcessesOutput, error) { req, out := c.SuspendProcessesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// SuspendProcessesWithContext is the same as SuspendProcesses with the addition of +// the ability to pass a context and additional request options. +// +// See SuspendProcesses for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) SuspendProcessesWithContext(ctx aws.Context, input *ScalingProcessQuery, opts ...request.Option) (*SuspendProcessesOutput, error) { + req, out := c.SuspendProcessesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opTerminateInstanceInAutoScalingGroup = "TerminateInstanceInAutoScalingGroup" // TerminateInstanceInAutoScalingGroupRequest generates a "aws/request.Request" representing the // client's request for the TerminateInstanceInAutoScalingGroup operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See TerminateInstanceInAutoScalingGroup for usage and error information. +// See TerminateInstanceInAutoScalingGroup for more information on using the TerminateInstanceInAutoScalingGroup +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the TerminateInstanceInAutoScalingGroup method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the TerminateInstanceInAutoScalingGroupRequest method. // req, resp := client.TerminateInstanceInAutoScalingGroupRequest(params) @@ -4010,38 +4917,52 @@ func (c *AutoScaling) TerminateInstanceInAutoScalingGroupRequest(input *Terminat // API operation TerminateInstanceInAutoScalingGroup for usage and error information. // // Returned Error Codes: -// * ScalingActivityInProgress +// * ErrCodeScalingActivityInProgressFault "ScalingActivityInProgress" // The operation can't be performed because there are scaling activities in // progress. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/TerminateInstanceInAutoScalingGroup func (c *AutoScaling) TerminateInstanceInAutoScalingGroup(input *TerminateInstanceInAutoScalingGroupInput) (*TerminateInstanceInAutoScalingGroupOutput, error) { req, out := c.TerminateInstanceInAutoScalingGroupRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// TerminateInstanceInAutoScalingGroupWithContext is the same as TerminateInstanceInAutoScalingGroup with the addition of +// the ability to pass a context and additional request options. +// +// See TerminateInstanceInAutoScalingGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) TerminateInstanceInAutoScalingGroupWithContext(ctx aws.Context, input *TerminateInstanceInAutoScalingGroupInput, opts ...request.Option) (*TerminateInstanceInAutoScalingGroupOutput, error) { + req, out := c.TerminateInstanceInAutoScalingGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opUpdateAutoScalingGroup = "UpdateAutoScalingGroup" // UpdateAutoScalingGroupRequest generates a "aws/request.Request" representing the // client's request for the UpdateAutoScalingGroup operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See UpdateAutoScalingGroup for usage and error information. +// See UpdateAutoScalingGroup for more information on using the UpdateAutoScalingGroup +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UpdateAutoScalingGroup method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the UpdateAutoScalingGroupRequest method. // req, resp := client.UpdateAutoScalingGroupRequest(params) @@ -4074,15 +4995,14 @@ func (c *AutoScaling) UpdateAutoScalingGroupRequest(input *UpdateAutoScalingGrou // // Updates the configuration for the specified Auto Scaling group. // +// The new settings take effect on any scaling activities after this call returns. +// Scaling activities that are currently in progress aren't affected. +// // To update an Auto Scaling group with a launch configuration with InstanceMonitoring -// set to False, you must first disable the collection of group metrics. Otherwise, +// set to false, you must first disable the collection of group metrics. Otherwise, // you will get an error. If you have previously enabled the collection of group // metrics, you can disable it using DisableMetricsCollection. // -// The new settings are registered upon the completion of this call. Any launch -// configuration settings take effect on any triggers after this call returns. -// Scaling activities that are currently in progress aren't affected. -// // Note the following: // // * If you specify a new value for MinSize without specifying a value for @@ -4105,19 +5025,34 @@ func (c *AutoScaling) UpdateAutoScalingGroupRequest(input *UpdateAutoScalingGrou // API operation UpdateAutoScalingGroup for usage and error information. // // Returned Error Codes: -// * ScalingActivityInProgress +// * ErrCodeScalingActivityInProgressFault "ScalingActivityInProgress" // The operation can't be performed because there are scaling activities in // progress. // -// * ResourceContention +// * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Auto Scaling resource (for example, // a group, instance, or load balancer). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/UpdateAutoScalingGroup func (c *AutoScaling) UpdateAutoScalingGroup(input *UpdateAutoScalingGroupInput) (*UpdateAutoScalingGroupOutput, error) { req, out := c.UpdateAutoScalingGroupRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// UpdateAutoScalingGroupWithContext is the same as UpdateAutoScalingGroup with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateAutoScalingGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) UpdateAutoScalingGroupWithContext(ctx aws.Context, input *UpdateAutoScalingGroupInput, opts ...request.Option) (*UpdateAutoScalingGroupOutput, error) { + req, out := c.UpdateAutoScalingGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // Describes scaling activity, which is a long-running process that represents @@ -4301,7 +5236,6 @@ func (s *Alarm) SetAlarmName(v string) *Alarm { return s } -// Contains the parameters for AttachInstances. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/AttachInstancesQuery type AttachInstancesInput struct { _ struct{} `type:"structure"` @@ -4368,7 +5302,6 @@ func (s AttachInstancesOutput) GoString() string { return s.String() } -// Contains the parameters for AttachLoadBalancerTargetGroups. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/AttachLoadBalancerTargetGroupsType type AttachLoadBalancerTargetGroupsInput struct { _ struct{} `type:"structure"` @@ -4440,7 +5373,6 @@ func (s AttachLoadBalancerTargetGroupsOutput) GoString() string { return s.String() } -// Contains the parameters for AttachLoadBalancers. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/AttachLoadBalancersType type AttachLoadBalancersInput struct { _ struct{} `type:"structure"` @@ -4497,7 +5429,6 @@ func (s *AttachLoadBalancersInput) SetLoadBalancerNames(v []*string) *AttachLoad return s } -// Contains the output of AttachLoadBalancers. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/AttachLoadBalancersResultType type AttachLoadBalancersOutput struct { _ struct{} `type:"structure"` @@ -4595,7 +5526,6 @@ func (s *BlockDeviceMapping) SetVirtualName(v string) *BlockDeviceMapping { return s } -// Contains the parameters for CompleteLifecycleAction. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/CompleteLifecycleActionType type CompleteLifecycleActionInput struct { _ struct{} `type:"structure"` @@ -4696,7 +5626,6 @@ func (s *CompleteLifecycleActionInput) SetLifecycleHookName(v string) *CompleteL return s } -// Contains the output of CompleteLifecycleAction. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/CompleteLifecycleActionAnswer type CompleteLifecycleActionOutput struct { _ struct{} `type:"structure"` @@ -4712,7 +5641,6 @@ func (s CompleteLifecycleActionOutput) GoString() string { return s.String() } -// Contains the parameters for CreateAutoScalingGroup. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/CreateAutoScalingGroupType type CreateAutoScalingGroupInput struct { _ struct{} `type:"structure"` @@ -4736,7 +5664,8 @@ type CreateAutoScalingGroupInput struct { // The number of EC2 instances that should be running in the group. This number // must be greater than or equal to the minimum size of the group and less than - // or equal to the maximum size of the group. + // or equal to the maximum size of the group. If you do not specify a desired + // capacity, the default is the minimum size of the group. DesiredCapacity *int64 `type:"integer"` // The amount of time, in seconds, that Auto Scaling waits before checking the @@ -4774,7 +5703,10 @@ type CreateAutoScalingGroupInput struct { // instead of a launch configuration. LaunchConfigurationName *string `min:"1" type:"string"` - // One or more Classic load balancers. To specify an Application load balancer, + // One or more lifecycle hooks. + LifecycleHookSpecificationList []*LifecycleHookSpecification `type:"list"` + + // One or more Classic Load Balancers. To specify an Application Load Balancer, // use TargetGroupARNs instead. // // For more information, see Using a Load Balancer With an Auto Scaling Group @@ -4872,6 +5804,16 @@ func (s *CreateAutoScalingGroupInput) Validate() error { if s.VPCZoneIdentifier != nil && len(*s.VPCZoneIdentifier) < 1 { invalidParams.Add(request.NewErrParamMinLen("VPCZoneIdentifier", 1)) } + if s.LifecycleHookSpecificationList != nil { + for i, v := range s.LifecycleHookSpecificationList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LifecycleHookSpecificationList", i), err.(request.ErrInvalidParams)) + } + } + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -4937,6 +5879,12 @@ func (s *CreateAutoScalingGroupInput) SetLaunchConfigurationName(v string) *Crea return s } +// SetLifecycleHookSpecificationList sets the LifecycleHookSpecificationList field's value. +func (s *CreateAutoScalingGroupInput) SetLifecycleHookSpecificationList(v []*LifecycleHookSpecification) *CreateAutoScalingGroupInput { + s.LifecycleHookSpecificationList = v + return s +} + // SetLoadBalancerNames sets the LoadBalancerNames field's value. func (s *CreateAutoScalingGroupInput) SetLoadBalancerNames(v []*string) *CreateAutoScalingGroupInput { s.LoadBalancerNames = v @@ -5006,7 +5954,6 @@ func (s CreateAutoScalingGroupOutput) GoString() string { return s.String() } -// Contains the parameters for CreateLaunchConfiguration. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/CreateLaunchConfigurationType type CreateLaunchConfigurationInput struct { _ struct{} `type:"structure"` @@ -5063,14 +6010,18 @@ type CreateLaunchConfigurationInput struct { IamInstanceProfile *string `min:"1" type:"string"` // The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances. + // + // If you do not specify InstanceId, you must specify ImageId. + // // For more information, see Finding an AMI (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html) // in the Amazon Elastic Compute Cloud User Guide. ImageId *string `min:"1" type:"string"` - // The ID of the instance to use to create the launch configuration. + // The ID of the instance to use to create the launch configuration. The new + // launch configuration derives attributes from the instance, with the exception + // of the block device mapping. // - // The new launch configuration derives attributes from the instance, with the - // exception of the block device mapping. + // If you do not specify InstanceId, you must specify both ImageId and InstanceType. // // To create a launch configuration with a block device mapping or override // any other instance attributes, specify them as part of the same request. @@ -5081,11 +6032,15 @@ type CreateLaunchConfigurationInput struct { InstanceId *string `min:"1" type:"string"` // Enables detailed monitoring (true) or basic monitoring (false) for the Auto - // Scaling instances. + // Scaling instances. The default is true. InstanceMonitoring *InstanceMonitoring `type:"structure"` - // The instance type of the EC2 instance. For information about available instance - // types, see Available Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#AvailableInstanceTypes) + // The instance type of the EC2 instance. + // + // If you do not specify InstanceId, you must specify InstanceType. + // + // For information about available instance types, see Available Instance Types + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#AvailableInstanceTypes) // in the Amazon Elastic Compute Cloud User Guide. InstanceType *string `min:"1" type:"string"` @@ -5336,7 +6291,6 @@ func (s CreateLaunchConfigurationOutput) GoString() string { return s.String() } -// Contains the parameters for CreateOrUpdateTags. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/CreateOrUpdateTagsType type CreateOrUpdateTagsInput struct { _ struct{} `type:"structure"` @@ -5401,7 +6355,102 @@ func (s CreateOrUpdateTagsOutput) GoString() string { return s.String() } -// Contains the parameters for DeleteAutoScalingGroup. +// Configures a customized metric for a target tracking policy. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/CustomizedMetricSpecification +type CustomizedMetricSpecification struct { + _ struct{} `type:"structure"` + + // The dimensions of the metric. + Dimensions []*MetricDimension `type:"list"` + + // The name of the metric. + // + // MetricName is a required field + MetricName *string `type:"string" required:"true"` + + // The namespace of the metric. + // + // Namespace is a required field + Namespace *string `type:"string" required:"true"` + + // The statistic of the metric. + // + // Statistic is a required field + Statistic *string `type:"string" required:"true" enum:"MetricStatistic"` + + // The unit of the metric. + Unit *string `type:"string"` +} + +// String returns the string representation +func (s CustomizedMetricSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomizedMetricSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomizedMetricSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomizedMetricSpecification"} + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Statistic == nil { + invalidParams.Add(request.NewErrParamRequired("Statistic")) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDimensions sets the Dimensions field's value. +func (s *CustomizedMetricSpecification) SetDimensions(v []*MetricDimension) *CustomizedMetricSpecification { + s.Dimensions = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *CustomizedMetricSpecification) SetMetricName(v string) *CustomizedMetricSpecification { + s.MetricName = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *CustomizedMetricSpecification) SetNamespace(v string) *CustomizedMetricSpecification { + s.Namespace = &v + return s +} + +// SetStatistic sets the Statistic field's value. +func (s *CustomizedMetricSpecification) SetStatistic(v string) *CustomizedMetricSpecification { + s.Statistic = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *CustomizedMetricSpecification) SetUnit(v string) *CustomizedMetricSpecification { + s.Unit = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DeleteAutoScalingGroupType type DeleteAutoScalingGroupInput struct { _ struct{} `type:"structure"` @@ -5470,7 +6519,6 @@ func (s DeleteAutoScalingGroupOutput) GoString() string { return s.String() } -// Contains the parameters for DeleteLaunchConfiguration. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/LaunchConfigurationNameType type DeleteLaunchConfigurationInput struct { _ struct{} `type:"structure"` @@ -5528,7 +6576,6 @@ func (s DeleteLaunchConfigurationOutput) GoString() string { return s.String() } -// Contains the parameters for DeleteLifecycleHook. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DeleteLifecycleHookType type DeleteLifecycleHookInput struct { _ struct{} `type:"structure"` @@ -5588,7 +6635,6 @@ func (s *DeleteLifecycleHookInput) SetLifecycleHookName(v string) *DeleteLifecyc return s } -// Contains the output of DeleteLifecycleHook. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DeleteLifecycleHookAnswer type DeleteLifecycleHookOutput struct { _ struct{} `type:"structure"` @@ -5604,7 +6650,6 @@ func (s DeleteLifecycleHookOutput) GoString() string { return s.String() } -// Contains the parameters for DeleteNotificationConfiguration. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DeleteNotificationConfigurationType type DeleteNotificationConfigurationInput struct { _ struct{} `type:"structure"` @@ -5680,7 +6725,6 @@ func (s DeleteNotificationConfigurationOutput) GoString() string { return s.String() } -// Contains the parameters for DeletePolicy. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DeletePolicyType type DeletePolicyInput struct { _ struct{} `type:"structure"` @@ -5750,7 +6794,6 @@ func (s DeletePolicyOutput) GoString() string { return s.String() } -// Contains the parameters for DeleteScheduledAction. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DeleteScheduledActionType type DeleteScheduledActionInput struct { _ struct{} `type:"structure"` @@ -5825,7 +6868,6 @@ func (s DeleteScheduledActionOutput) GoString() string { return s.String() } -// Contains the parameters for DeleteTags. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DeleteTagsType type DeleteTagsInput struct { _ struct{} `type:"structure"` @@ -5905,7 +6947,6 @@ func (s DescribeAccountLimitsInput) GoString() string { return s.String() } -// Contains the parameters for DescribeAccountLimits. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeAccountLimitsAnswer type DescribeAccountLimitsOutput struct { _ struct{} `type:"structure"` @@ -5974,7 +7015,6 @@ func (s DescribeAdjustmentTypesInput) GoString() string { return s.String() } -// Contains the parameters for DescribeAdjustmentTypes. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeAdjustmentTypesAnswer type DescribeAdjustmentTypesOutput struct { _ struct{} `type:"structure"` @@ -5999,7 +7039,6 @@ func (s *DescribeAdjustmentTypesOutput) SetAdjustmentTypes(v []*AdjustmentType) return s } -// Contains the parameters for DescribeAutoScalingGroups. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/GroupNamesType type DescribeAutoScalingGroupsInput struct { _ struct{} `type:"structure"` @@ -6008,7 +7047,8 @@ type DescribeAutoScalingGroupsInput struct { // described. AutoScalingGroupNames []*string `type:"list"` - // The maximum number of items to return with this call. + // The maximum number of items to return with this call. The default value is + // 50 and the maximum value is 100. MaxRecords *int64 `type:"integer"` // The token for the next set of items to return. (You received this token from @@ -6044,7 +7084,6 @@ func (s *DescribeAutoScalingGroupsInput) SetNextToken(v string) *DescribeAutoSca return s } -// Contains the output for DescribeAutoScalingGroups. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/GroupsType type DescribeAutoScalingGroupsOutput struct { _ struct{} `type:"structure"` @@ -6081,7 +7120,6 @@ func (s *DescribeAutoScalingGroupsOutput) SetNextToken(v string) *DescribeAutoSc return s } -// Contains the parameters for DescribeAutoScalingInstances. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeAutoScalingInstancesType type DescribeAutoScalingInstancesInput struct { _ struct{} `type:"structure"` @@ -6091,7 +7129,8 @@ type DescribeAutoScalingInstancesInput struct { // not exist, it is ignored with no error. InstanceIds []*string `type:"list"` - // The maximum number of items to return with this call. + // The maximum number of items to return with this call. The default value is + // 50 and the maximum value is 100. MaxRecords *int64 `type:"integer"` // The token for the next set of items to return. (You received this token from @@ -6127,7 +7166,6 @@ func (s *DescribeAutoScalingInstancesInput) SetNextToken(v string) *DescribeAuto return s } -// Contains the output of DescribeAutoScalingInstances. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/InstancesType type DescribeAutoScalingInstancesOutput struct { _ struct{} `type:"structure"` @@ -6177,7 +7215,6 @@ func (s DescribeAutoScalingNotificationTypesInput) GoString() string { return s.String() } -// Contains the output of DescribeAutoScalingNotificationTypes. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeAutoScalingNotificationTypesAnswer type DescribeAutoScalingNotificationTypesOutput struct { _ struct{} `type:"structure"` @@ -6202,7 +7239,6 @@ func (s *DescribeAutoScalingNotificationTypesOutput) SetAutoScalingNotificationT return s } -// Contains the parameters for DescribeLaunchConfigurations. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/LaunchConfigurationNamesType type DescribeLaunchConfigurationsInput struct { _ struct{} `type:"structure"` @@ -6211,7 +7247,8 @@ type DescribeLaunchConfigurationsInput struct { // are described. LaunchConfigurationNames []*string `type:"list"` - // The maximum number of items to return with this call. The default is 100. + // The maximum number of items to return with this call. The default value is + // 50 and the maximum value is 100. MaxRecords *int64 `type:"integer"` // The token for the next set of items to return. (You received this token from @@ -6247,7 +7284,6 @@ func (s *DescribeLaunchConfigurationsInput) SetNextToken(v string) *DescribeLaun return s } -// Contains the output of DescribeLaunchConfigurations. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/LaunchConfigurationsType type DescribeLaunchConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -6299,7 +7335,6 @@ func (s DescribeLifecycleHookTypesInput) GoString() string { return s.String() } -// Contains the output of DescribeLifecycleHookTypes. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeLifecycleHookTypesAnswer type DescribeLifecycleHookTypesOutput struct { _ struct{} `type:"structure"` @@ -6324,7 +7359,6 @@ func (s *DescribeLifecycleHookTypesOutput) SetLifecycleHookTypes(v []*string) *D return s } -// Contains the parameters for DescribeLifecycleHooks. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeLifecycleHooksType type DescribeLifecycleHooksInput struct { _ struct{} `type:"structure"` @@ -6377,7 +7411,6 @@ func (s *DescribeLifecycleHooksInput) SetLifecycleHookNames(v []*string) *Descri return s } -// Contains the output of DescribeLifecycleHooks. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeLifecycleHooksAnswer type DescribeLifecycleHooksOutput struct { _ struct{} `type:"structure"` @@ -6402,7 +7435,6 @@ func (s *DescribeLifecycleHooksOutput) SetLifecycleHooks(v []*LifecycleHook) *De return s } -// Contains the parameters for DescribeLoadBalancerTargetGroups. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeLoadBalancerTargetGroupsRequest type DescribeLoadBalancerTargetGroupsInput struct { _ struct{} `type:"structure"` @@ -6412,7 +7444,8 @@ type DescribeLoadBalancerTargetGroupsInput struct { // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - // The maximum number of items to return with this call. + // The maximum number of items to return with this call. The default value is + // 50 and the maximum value is 100. MaxRecords *int64 `type:"integer"` // The token for the next set of items to return. (You received this token from @@ -6464,7 +7497,6 @@ func (s *DescribeLoadBalancerTargetGroupsInput) SetNextToken(v string) *Describe return s } -// Contains the output of DescribeLoadBalancerTargetGroups. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeLoadBalancerTargetGroupsResponse type DescribeLoadBalancerTargetGroupsOutput struct { _ struct{} `type:"structure"` @@ -6499,7 +7531,6 @@ func (s *DescribeLoadBalancerTargetGroupsOutput) SetNextToken(v string) *Describ return s } -// Contains the parameters for DescribeLoadBalancers. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeLoadBalancersRequest type DescribeLoadBalancersInput struct { _ struct{} `type:"structure"` @@ -6509,7 +7540,8 @@ type DescribeLoadBalancersInput struct { // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - // The maximum number of items to return with this call. + // The maximum number of items to return with this call. The default value is + // 50 and the maximum value is 100. MaxRecords *int64 `type:"integer"` // The token for the next set of items to return. (You received this token from @@ -6561,7 +7593,6 @@ func (s *DescribeLoadBalancersInput) SetNextToken(v string) *DescribeLoadBalance return s } -// Contains the output of DescribeLoadBalancers. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeLoadBalancersResponse type DescribeLoadBalancersOutput struct { _ struct{} `type:"structure"` @@ -6611,7 +7642,6 @@ func (s DescribeMetricCollectionTypesInput) GoString() string { return s.String() } -// Contains the output of DescribeMetricsCollectionTypes. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeMetricCollectionTypesAnswer type DescribeMetricCollectionTypesOutput struct { _ struct{} `type:"structure"` @@ -6645,7 +7675,6 @@ func (s *DescribeMetricCollectionTypesOutput) SetMetrics(v []*MetricCollectionTy return s } -// Contains the parameters for DescribeNotificationConfigurations. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeNotificationConfigurationsType type DescribeNotificationConfigurationsInput struct { _ struct{} `type:"structure"` @@ -6653,7 +7682,8 @@ type DescribeNotificationConfigurationsInput struct { // The name of the group. AutoScalingGroupNames []*string `type:"list"` - // The maximum number of items to return with this call. + // The maximum number of items to return with this call. The default value is + // 50 and the maximum value is 100. MaxRecords *int64 `type:"integer"` // The token for the next set of items to return. (You received this token from @@ -6689,7 +7719,6 @@ func (s *DescribeNotificationConfigurationsInput) SetNextToken(v string) *Descri return s } -// Contains the output from DescribeNotificationConfigurations. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeNotificationConfigurationsAnswer type DescribeNotificationConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -6726,7 +7755,6 @@ func (s *DescribeNotificationConfigurationsOutput) SetNotificationConfigurations return s } -// Contains the parameters for DescribePolicies. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribePoliciesType type DescribePoliciesInput struct { _ struct{} `type:"structure"` @@ -6734,7 +7762,8 @@ type DescribePoliciesInput struct { // The name of the group. AutoScalingGroupName *string `min:"1" type:"string"` - // The maximum number of items to be returned with each call. + // The maximum number of items to be returned with each call. The default value + // is 50 and the maximum value is 100. MaxRecords *int64 `type:"integer"` // The token for the next set of items to return. (You received this token from @@ -6804,7 +7833,6 @@ func (s *DescribePoliciesInput) SetPolicyTypes(v []*string) *DescribePoliciesInp return s } -// Contains the output of DescribePolicies. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/PoliciesType type DescribePoliciesOutput struct { _ struct{} `type:"structure"` @@ -6839,7 +7867,6 @@ func (s *DescribePoliciesOutput) SetScalingPolicies(v []*ScalingPolicy) *Describ return s } -// Contains the parameters for DescribeScalingActivities. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeScalingActivitiesType type DescribeScalingActivitiesInput struct { _ struct{} `type:"structure"` @@ -6854,7 +7881,8 @@ type DescribeScalingActivitiesInput struct { // The name of the group. AutoScalingGroupName *string `min:"1" type:"string"` - // The maximum number of items to return with this call. + // The maximum number of items to return with this call. The default value is + // 100. MaxRecords *int64 `type:"integer"` // The token for the next set of items to return. (You received this token from @@ -6909,7 +7937,6 @@ func (s *DescribeScalingActivitiesInput) SetNextToken(v string) *DescribeScaling return s } -// Contains the output of DescribeScalingActivities. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/ActivitiesType type DescribeScalingActivitiesOutput struct { _ struct{} `type:"structure"` @@ -6962,7 +7989,6 @@ func (s DescribeScalingProcessTypesInput) GoString() string { return s.String() } -// Contains the output of DescribeScalingProcessTypes. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/ProcessesType type DescribeScalingProcessTypesOutput struct { _ struct{} `type:"structure"` @@ -6987,7 +8013,6 @@ func (s *DescribeScalingProcessTypesOutput) SetProcesses(v []*ProcessType) *Desc return s } -// Contains the parameters for DescribeScheduledActions. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeScheduledActionsType type DescribeScheduledActionsInput struct { _ struct{} `type:"structure"` @@ -6999,7 +8024,8 @@ type DescribeScheduledActionsInput struct { // provided, this parameter is ignored. EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` - // The maximum number of items to return with this call. + // The maximum number of items to return with this call. The default value is + // 50 and the maximum value is 100. MaxRecords *int64 `type:"integer"` // The token for the next set of items to return. (You received this token from @@ -7079,7 +8105,6 @@ func (s *DescribeScheduledActionsInput) SetStartTime(v time.Time) *DescribeSched return s } -// Contains the output of DescribeScheduledActions. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/ScheduledActionsType type DescribeScheduledActionsOutput struct { _ struct{} `type:"structure"` @@ -7114,7 +8139,6 @@ func (s *DescribeScheduledActionsOutput) SetScheduledUpdateGroupActions(v []*Sch return s } -// Contains the parameters for DescribeTags. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeTagsType type DescribeTagsInput struct { _ struct{} `type:"structure"` @@ -7122,7 +8146,8 @@ type DescribeTagsInput struct { // A filter used to scope the tags to return. Filters []*Filter `type:"list"` - // The maximum number of items to return with this call. + // The maximum number of items to return with this call. The default value is + // 50 and the maximum value is 100. MaxRecords *int64 `type:"integer"` // The token for the next set of items to return. (You received this token from @@ -7158,7 +8183,6 @@ func (s *DescribeTagsInput) SetNextToken(v string) *DescribeTagsInput { return s } -// Contains the output of DescribeTags. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/TagsType type DescribeTagsOutput struct { _ struct{} `type:"structure"` @@ -7208,7 +8232,6 @@ func (s DescribeTerminationPolicyTypesInput) GoString() string { return s.String() } -// Contains the output of DescribeTerminationPolicyTypes. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeTerminationPolicyTypesAnswer type DescribeTerminationPolicyTypesOutput struct { _ struct{} `type:"structure"` @@ -7234,7 +8257,6 @@ func (s *DescribeTerminationPolicyTypesOutput) SetTerminationPolicyTypes(v []*st return s } -// Contains the parameters for DetachInstances. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DetachInstancesQuery type DetachInstancesInput struct { _ struct{} `type:"structure"` @@ -7301,7 +8323,6 @@ func (s *DetachInstancesInput) SetShouldDecrementDesiredCapacity(v bool) *Detach return s } -// Contains the output of DetachInstances. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DetachInstancesAnswer type DetachInstancesOutput struct { _ struct{} `type:"structure"` @@ -7397,7 +8418,6 @@ func (s DetachLoadBalancerTargetGroupsOutput) GoString() string { return s.String() } -// Contains the parameters for DetachLoadBalancers. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DetachLoadBalancersType type DetachLoadBalancersInput struct { _ struct{} `type:"structure"` @@ -7454,7 +8474,6 @@ func (s *DetachLoadBalancersInput) SetLoadBalancerNames(v []*string) *DetachLoad return s } -// Contains the output for DetachLoadBalancers. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DetachLoadBalancersResultType type DetachLoadBalancersOutput struct { _ struct{} `type:"structure"` @@ -7470,7 +8489,6 @@ func (s DetachLoadBalancersOutput) GoString() string { return s.String() } -// Contains the parameters for DisableMetricsCollection. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DisableMetricsCollectionQuery type DisableMetricsCollectionInput struct { _ struct{} `type:"structure"` @@ -7667,7 +8685,6 @@ func (s *Ebs) SetVolumeType(v string) *Ebs { return s } -// Contains the parameters for EnableMetricsCollection. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/EnableMetricsCollectionQuery type EnableMetricsCollectionInput struct { _ struct{} `type:"structure"` @@ -7819,7 +8836,6 @@ func (s *EnabledMetric) SetMetric(v string) *EnabledMetric { return s } -// Contains the parameters for EnteStandby. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/EnterStandbyQuery type EnterStandbyInput struct { _ struct{} `type:"structure"` @@ -7889,7 +8905,6 @@ func (s *EnterStandbyInput) SetShouldDecrementDesiredCapacity(v bool) *EnterStan return s } -// Contains the output of EnterStandby. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/EnterStandbyAnswer type EnterStandbyOutput struct { _ struct{} `type:"structure"` @@ -7914,7 +8929,6 @@ func (s *EnterStandbyOutput) SetActivities(v []*Activity) *EnterStandbyOutput { return s } -// Contains the parameters for ExecutePolicy. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/ExecutePolicyType type ExecutePolicyInput struct { _ struct{} `type:"structure"` @@ -8031,7 +9045,6 @@ func (s ExecutePolicyOutput) GoString() string { return s.String() } -// Contains the parameters for ExitStandby. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/ExitStandbyQuery type ExitStandbyInput struct { _ struct{} `type:"structure"` @@ -8083,7 +9096,6 @@ func (s *ExitStandbyInput) SetInstanceIds(v []*string) *ExitStandbyInput { return s } -// Contains the parameters for ExitStandby. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/ExitStandbyAnswer type ExitStandbyOutput struct { _ struct{} `type:"structure"` @@ -8494,7 +9506,8 @@ type InstanceDetails struct { // InstanceId is a required field InstanceId *string `min:"1" type:"string" required:"true"` - // The launch configuration associated with the instance. + // The launch configuration used to launch the instance. This value is not available + // if you attached the instance to the Auto Scaling group. // // LaunchConfigurationName is a required field LaunchConfigurationName *string `min:"1" type:"string" required:"true"` @@ -8565,12 +9578,12 @@ func (s *InstanceDetails) SetProtectedFromScaleIn(v bool) *InstanceDetails { return s } -// Describes whether instance monitoring is enabled. +// Describes whether detailed monitoring is enabled for the Auto Scaling instances. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/InstanceMonitoring type InstanceMonitoring struct { _ struct{} `type:"structure"` - // If True, instance monitoring is enabled. + // If true, detailed monitoring is enabled. Otherwise, basic monitoring is enabled. Enabled *bool `type:"boolean"` } @@ -8796,14 +9809,9 @@ func (s *LaunchConfiguration) SetUserData(v string) *LaunchConfiguration { } // Describes a lifecycle hook, which tells Auto Scaling that you want to perform -// an action when an instance launches or terminates. When you have a lifecycle -// hook in place, the Auto Scaling group will either: -// -// * Pause the instance after it launches, but before it is put into service -// -// * Pause the instance as it terminates, but before it is fully terminated +// an action whenever it launches instances or whenever it terminates instances. // -// For more information, see Auto Scaling Lifecycle (http://docs.aws.amazon.com/autoscaling/latest/userguide/AutoScalingGroupLifecycle.html) +// For more information, see Auto Scaling Lifecycle Hooks (http://docs.aws.amazon.com/autoscaling/latest/userguide/lifecycle-hooks.html) // in the Auto Scaling User Guide. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/LifecycleHook type LifecycleHook struct { @@ -8823,9 +9831,8 @@ type LifecycleHook struct { GlobalTimeout *int64 `type:"integer"` // The maximum time, in seconds, that can elapse before the lifecycle hook times - // out. The default is 3600 seconds (1 hour). When the lifecycle hook times - // out, Auto Scaling performs the default action. You can prevent the lifecycle - // hook from timing out by calling RecordLifecycleActionHeartbeat. + // out. If the lifecycle hook times out, Auto Scaling performs the default action. + // You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat. HeartbeatTimeout *int64 `type:"integer"` // The name of the lifecycle hook. @@ -8839,24 +9846,9 @@ type LifecycleHook struct { // a message to the notification target. NotificationMetadata *string `min:"1" type:"string"` - // The ARN of the notification target that Auto Scaling uses to notify you when - // an instance is in the transition state for the lifecycle hook. This ARN target - // can be either an SQS queue or an SNS topic. The notification message sent - // to the target includes the following: - // - // * Lifecycle action token - // - // * User account ID - // - // * Name of the Auto Scaling group - // - // * Lifecycle hook name - // - // * EC2 instance ID - // - // * Lifecycle transition - // - // * Notification metadata + // The ARN of the target that Auto Scaling sends notifications to when an instance + // is in the transition state for the lifecycle hook. The notification target + // can be either an SQS queue or an SNS topic. NotificationTargetARN *string `min:"1" type:"string"` // The ARN of the IAM role that allows the Auto Scaling group to publish to @@ -8928,7 +9920,123 @@ func (s *LifecycleHook) SetRoleARN(v string) *LifecycleHook { return s } -// Describes the state of a Classic load balancer. +// Describes a lifecycle hook, which tells Auto Scaling that you want to perform +// an action whenever it launches instances or whenever it terminates instances. +// +// For more information, see Auto Scaling Lifecycle Hooks (http://docs.aws.amazon.com/autoscaling/latest/userguide/lifecycle-hooks.html) +// in the Auto Scaling User Guide. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/LifecycleHookSpecification +type LifecycleHookSpecification struct { + _ struct{} `type:"structure"` + + // Defines the action the Auto Scaling group should take when the lifecycle + // hook timeout elapses or if an unexpected failure occurs. The valid values + // are CONTINUE and ABANDON. The default value is CONTINUE. + DefaultResult *string `type:"string"` + + // The maximum time, in seconds, that can elapse before the lifecycle hook times + // out. If the lifecycle hook times out, Auto Scaling performs the default action. + // You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat. + HeartbeatTimeout *int64 `type:"integer"` + + // The name of the lifecycle hook. + // + // LifecycleHookName is a required field + LifecycleHookName *string `min:"1" type:"string" required:"true"` + + // The state of the EC2 instance to which you want to attach the lifecycle hook. + // For a list of lifecycle hook types, see DescribeLifecycleHookTypes. + LifecycleTransition *string `type:"string"` + + // Additional information that you want to include any time Auto Scaling sends + // a message to the notification target. + NotificationMetadata *string `min:"1" type:"string"` + + // The ARN of the target that Auto Scaling sends notifications to when an instance + // is in the transition state for the lifecycle hook. The notification target + // can be either an SQS queue or an SNS topic. + NotificationTargetARN *string `type:"string"` + + // The ARN of the IAM role that allows the Auto Scaling group to publish to + // the specified notification target. + RoleARN *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LifecycleHookSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleHookSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleHookSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleHookSpecification"} + if s.LifecycleHookName == nil { + invalidParams.Add(request.NewErrParamRequired("LifecycleHookName")) + } + if s.LifecycleHookName != nil && len(*s.LifecycleHookName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LifecycleHookName", 1)) + } + if s.NotificationMetadata != nil && len(*s.NotificationMetadata) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NotificationMetadata", 1)) + } + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDefaultResult sets the DefaultResult field's value. +func (s *LifecycleHookSpecification) SetDefaultResult(v string) *LifecycleHookSpecification { + s.DefaultResult = &v + return s +} + +// SetHeartbeatTimeout sets the HeartbeatTimeout field's value. +func (s *LifecycleHookSpecification) SetHeartbeatTimeout(v int64) *LifecycleHookSpecification { + s.HeartbeatTimeout = &v + return s +} + +// SetLifecycleHookName sets the LifecycleHookName field's value. +func (s *LifecycleHookSpecification) SetLifecycleHookName(v string) *LifecycleHookSpecification { + s.LifecycleHookName = &v + return s +} + +// SetLifecycleTransition sets the LifecycleTransition field's value. +func (s *LifecycleHookSpecification) SetLifecycleTransition(v string) *LifecycleHookSpecification { + s.LifecycleTransition = &v + return s +} + +// SetNotificationMetadata sets the NotificationMetadata field's value. +func (s *LifecycleHookSpecification) SetNotificationMetadata(v string) *LifecycleHookSpecification { + s.NotificationMetadata = &v + return s +} + +// SetNotificationTargetARN sets the NotificationTargetARN field's value. +func (s *LifecycleHookSpecification) SetNotificationTargetARN(v string) *LifecycleHookSpecification { + s.NotificationTargetARN = &v + return s +} + +// SetRoleARN sets the RoleARN field's value. +func (s *LifecycleHookSpecification) SetRoleARN(v string) *LifecycleHookSpecification { + s.RoleARN = &v + return s +} + +// Describes the state of a Classic Load Balancer. // // If you specify a load balancer when creating the Auto Scaling group, the // state of the load balancer is InService. @@ -9084,6 +10192,60 @@ func (s *MetricCollectionType) SetMetric(v string) *MetricCollectionType { return s } +// Describes the dimension of a metric. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/MetricDimension +type MetricDimension struct { + _ struct{} `type:"structure"` + + // The name of the dimension. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The value of the dimension. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s MetricDimension) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricDimension) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricDimension) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricDimension"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *MetricDimension) SetName(v string) *MetricDimension { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *MetricDimension) SetValue(v string) *MetricDimension { + s.Value = &v + return s +} + // Describes a granularity of a metric. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/MetricGranularityType type MetricGranularityType struct { @@ -9163,6 +10325,81 @@ func (s *NotificationConfiguration) SetTopicARN(v string) *NotificationConfigura return s } +// Configures a predefined metric for a target tracking policy. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/PredefinedMetricSpecification +type PredefinedMetricSpecification struct { + _ struct{} `type:"structure"` + + // The metric type. + // + // PredefinedMetricType is a required field + PredefinedMetricType *string `type:"string" required:"true" enum:"MetricType"` + + // Identifies the resource associated with the metric type. The following predefined + // metrics are available: + // + // * ASGAverageCPUUtilization - average CPU utilization of the Auto Scaling + // group + // + // * ASGAverageNetworkIn - average number of bytes received on all network + // interfaces by the Auto Scaling group + // + // * ASGAverageNetworkOut - average number of bytes sent out on all network + // interfaces by the Auto Scaling group + // + // * ALBRequestCountPerTarget - number of requests completed per target in + // an Application Load Balancer target group + // + // For predefined metric types ASGAverageCPUUtilization, ASGAverageNetworkIn + // and ASGAverageNetworkOut, the parameter must not be specified as the resource + // associated with the metric type is the Auto Scaling group. For predefined + // metric type ALBRequestCountPerTarget, the parameter must be specified in + // the format: app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id, + // where app/load-balancer-name/load-balancer-id is the final portion of the + // load balancer ARN, and targetgroup/target-group-name/target-group-id is the + // final portion of the target group ARN. The target group must be attached + // to the Auto Scaling group. + ResourceLabel *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PredefinedMetricSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PredefinedMetricSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PredefinedMetricSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PredefinedMetricSpecification"} + if s.PredefinedMetricType == nil { + invalidParams.Add(request.NewErrParamRequired("PredefinedMetricType")) + } + if s.ResourceLabel != nil && len(*s.ResourceLabel) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceLabel", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPredefinedMetricType sets the PredefinedMetricType field's value. +func (s *PredefinedMetricSpecification) SetPredefinedMetricType(v string) *PredefinedMetricSpecification { + s.PredefinedMetricType = &v + return s +} + +// SetResourceLabel sets the ResourceLabel field's value. +func (s *PredefinedMetricSpecification) SetResourceLabel(v string) *PredefinedMetricSpecification { + s.ResourceLabel = &v + return s +} + // Describes a process type. // // For more information, see Auto Scaling Processes (http://docs.aws.amazon.com/autoscaling/latest/userguide/as-suspend-resume-processes.html#process-types) @@ -9209,7 +10446,6 @@ func (s *ProcessType) SetProcessName(v string) *ProcessType { return s } -// Contains the parameters for PutLifecycleHook. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/PutLifecycleHookType type PutLifecycleHookInput struct { _ struct{} `type:"structure"` @@ -9225,10 +10461,12 @@ type PutLifecycleHookInput struct { // be either CONTINUE or ABANDON. The default value is ABANDON. DefaultResult *string `type:"string"` - // The amount of time, in seconds, that can elapse before the lifecycle hook - // times out. When the lifecycle hook times out, Auto Scaling performs the default - // action. You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat. - // The default is 3600 seconds (1 hour). + // The maximum time, in seconds, that can elapse before the lifecycle hook times + // out. The range is from 30 to 7200 seconds. The default is 3600 seconds (1 + // hour). + // + // If the lifecycle hook times out, Auto Scaling performs the default action. + // You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat. HeartbeatTimeout *int64 `type:"integer"` // The name of the lifecycle hook. @@ -9355,7 +10593,6 @@ func (s *PutLifecycleHookInput) SetRoleARN(v string) *PutLifecycleHookInput { return s } -// Contains the output of PutLifecycleHook. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/PutLifecycleHookAnswer type PutLifecycleHookOutput struct { _ struct{} `type:"structure"` @@ -9371,7 +10608,6 @@ func (s PutLifecycleHookOutput) GoString() string { return s.String() } -// Contains the parameters for PutNotificationConfiguration. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/PutNotificationConfigurationType type PutNotificationConfigurationInput struct { _ struct{} `type:"structure"` @@ -9462,19 +10698,18 @@ func (s PutNotificationConfigurationOutput) GoString() string { return s.String() } -// Contains the parameters for PutScalingPolicy. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/PutScalingPolicyType type PutScalingPolicyInput struct { _ struct{} `type:"structure"` - // The adjustment type. Valid values are ChangeInCapacity, ExactCapacity, and - // PercentChangeInCapacity. + // The adjustment type. The valid values are ChangeInCapacity, ExactCapacity, + // and PercentChangeInCapacity. + // + // This parameter is supported if the policy type is SimpleScaling or StepScaling. // // For more information, see Dynamic Scaling (http://docs.aws.amazon.com/autoscaling/latest/userguide/as-scale-based-on-demand.html) // in the Auto Scaling User Guide. - // - // AdjustmentType is a required field - AdjustmentType *string `min:"1" type:"string" required:"true"` + AdjustmentType *string `min:"1" type:"string"` // The name or ARN of the group. // @@ -9485,7 +10720,7 @@ type PutScalingPolicyInput struct { // the next scaling activity can start. If this parameter is not specified, // the default cooldown period for the group applies. // - // This parameter is not supported unless the policy type is SimpleScaling. + // This parameter is supported if the policy type is SimpleScaling. // // For more information, see Auto Scaling Cooldowns (http://docs.aws.amazon.com/autoscaling/latest/userguide/Cooldown.html) // in the Auto Scaling User Guide. @@ -9495,20 +10730,22 @@ type PutScalingPolicyInput struct { // to the CloudWatch metrics. The default is to use the value specified for // the default cooldown period for the group. // - // This parameter is not supported if the policy type is SimpleScaling. + // This parameter is supported if the policy type is StepScaling or TargetTrackingScaling. EstimatedInstanceWarmup *int64 `type:"integer"` - // The aggregation type for the CloudWatch metrics. Valid values are Minimum, + // The aggregation type for the CloudWatch metrics. The valid values are Minimum, // Maximum, and Average. If the aggregation type is null, the value is treated // as Average. // - // This parameter is not supported if the policy type is SimpleScaling. + // This parameter is supported if the policy type is StepScaling. MetricAggregationType *string `min:"1" type:"string"` // The minimum number of instances to scale. If the value of AdjustmentType // is PercentChangeInCapacity, the scaling policy changes the DesiredCapacity // of the Auto Scaling group by at least this many instances. Otherwise, the // error is ValidationError. + // + // This parameter is supported if the policy type is SimpleScaling or StepScaling. MinAdjustmentMagnitude *int64 `type:"integer"` // Available for backward compatibility. Use MinAdjustmentMagnitude instead. @@ -9519,8 +10756,8 @@ type PutScalingPolicyInput struct { // PolicyName is a required field PolicyName *string `min:"1" type:"string" required:"true"` - // The policy type. Valid values are SimpleScaling and StepScaling. If the policy - // type is null, the value is treated as SimpleScaling. + // The policy type. The valid values are SimpleScaling, StepScaling, and TargetTrackingScaling. + // If the policy type is null, the value is treated as SimpleScaling. PolicyType *string `min:"1" type:"string"` // The amount by which to scale, based on the specified adjustment type. A positive @@ -9537,6 +10774,12 @@ type PutScalingPolicyInput struct { // This parameter is required if the policy type is StepScaling and not supported // otherwise. StepAdjustments []*StepAdjustment `type:"list"` + + // A target tracking policy. + // + // This parameter is required if the policy type is TargetTrackingScaling and + // not supported otherwise. + TargetTrackingConfiguration *TargetTrackingConfiguration `type:"structure"` } // String returns the string representation @@ -9552,9 +10795,6 @@ func (s PutScalingPolicyInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *PutScalingPolicyInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "PutScalingPolicyInput"} - if s.AdjustmentType == nil { - invalidParams.Add(request.NewErrParamRequired("AdjustmentType")) - } if s.AdjustmentType != nil && len(*s.AdjustmentType) < 1 { invalidParams.Add(request.NewErrParamMinLen("AdjustmentType", 1)) } @@ -9586,6 +10826,11 @@ func (s *PutScalingPolicyInput) Validate() error { } } } + if s.TargetTrackingConfiguration != nil { + if err := s.TargetTrackingConfiguration.Validate(); err != nil { + invalidParams.AddNested("TargetTrackingConfiguration", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -9659,11 +10904,20 @@ func (s *PutScalingPolicyInput) SetStepAdjustments(v []*StepAdjustment) *PutScal return s } +// SetTargetTrackingConfiguration sets the TargetTrackingConfiguration field's value. +func (s *PutScalingPolicyInput) SetTargetTrackingConfiguration(v *TargetTrackingConfiguration) *PutScalingPolicyInput { + s.TargetTrackingConfiguration = v + return s +} + // Contains the output of PutScalingPolicy. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/PolicyARNType type PutScalingPolicyOutput struct { _ struct{} `type:"structure"` + // The CloudWatch alarms created for the target tracking policy. + Alarms []*Alarm `type:"list"` + // The Amazon Resource Name (ARN) of the policy. PolicyARN *string `min:"1" type:"string"` } @@ -9678,13 +10932,18 @@ func (s PutScalingPolicyOutput) GoString() string { return s.String() } +// SetAlarms sets the Alarms field's value. +func (s *PutScalingPolicyOutput) SetAlarms(v []*Alarm) *PutScalingPolicyOutput { + s.Alarms = v + return s +} + // SetPolicyARN sets the PolicyARN field's value. func (s *PutScalingPolicyOutput) SetPolicyARN(v string) *PutScalingPolicyOutput { s.PolicyARN = &v return s } -// Contains the parameters for PutScheduledUpdateGroupAction. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/PutScheduledUpdateGroupActionType type PutScheduledUpdateGroupActionInput struct { _ struct{} `type:"structure"` @@ -9834,7 +11093,6 @@ func (s PutScheduledUpdateGroupActionOutput) GoString() string { return s.String() } -// Contains the parameters for RecordLifecycleActionHeartbeat. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/RecordLifecycleActionHeartbeatType type RecordLifecycleActionHeartbeatInput struct { _ struct{} `type:"structure"` @@ -9920,7 +11178,6 @@ func (s *RecordLifecycleActionHeartbeatInput) SetLifecycleHookName(v string) *Re return s } -// Contains the output of RecordLifecycleActionHeartBeat. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/RecordLifecycleActionHeartbeatAnswer type RecordLifecycleActionHeartbeatOutput struct { _ struct{} `type:"structure"` @@ -9967,7 +11224,7 @@ type ScalingPolicy struct { AutoScalingGroupName *string `min:"1" type:"string"` // The amount of time, in seconds, after a scaling activity completes before - // any further trigger-related scaling activities can start. + // any further dynamic scaling activities can start. Cooldown *int64 `type:"integer"` // The estimated time, in seconds, until a newly launched instance can contribute @@ -10004,6 +11261,9 @@ type ScalingPolicy struct { // A set of adjustments that enable you to scale based on the size of the alarm // breach. StepAdjustments []*StepAdjustment `type:"list"` + + // A target tracking policy. + TargetTrackingConfiguration *TargetTrackingConfiguration `type:"structure"` } // String returns the string representation @@ -10094,7 +11354,12 @@ func (s *ScalingPolicy) SetStepAdjustments(v []*StepAdjustment) *ScalingPolicy { return s } -// Contains the parameters for SuspendProcesses and ResumeProcesses. +// SetTargetTrackingConfiguration sets the TargetTrackingConfiguration field's value. +func (s *ScalingPolicy) SetTargetTrackingConfiguration(v *TargetTrackingConfiguration) *ScalingPolicy { + s.TargetTrackingConfiguration = v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/ScalingProcessQuery type ScalingProcessQuery struct { _ struct{} `type:"structure"` @@ -10274,7 +11539,6 @@ func (s *ScheduledUpdateGroupAction) SetTime(v time.Time) *ScheduledUpdateGroupA return s } -// Contains the parameters for SetDesiredCapacity. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/SetDesiredCapacityType type SetDesiredCapacityInput struct { _ struct{} `type:"structure"` @@ -10358,7 +11622,6 @@ func (s SetDesiredCapacityOutput) GoString() string { return s.String() } -// Contains the parameters for SetInstanceHealth. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/SetInstanceHealthQuery type SetInstanceHealthInput struct { _ struct{} `type:"structure"` @@ -10450,7 +11713,6 @@ func (s SetInstanceHealthOutput) GoString() string { return s.String() } -// Contains the parameters for SetInstanceProtection. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/SetInstanceProtectionQuery type SetInstanceProtectionInput struct { _ struct{} `type:"structure"` @@ -10522,7 +11784,6 @@ func (s *SetInstanceProtectionInput) SetProtectedFromScaleIn(v bool) *SetInstanc return s } -// Contains the output of SetInstanceProtection. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/SetInstanceProtectionAnswer type SetInstanceProtectionOutput struct { _ struct{} `type:"structure"` @@ -10828,7 +12089,88 @@ func (s *TagDescription) SetValue(v string) *TagDescription { return s } -// Contains the parameters for TerminateInstanceInAutoScalingGroup. +// Represents a target tracking policy configuration. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/TargetTrackingConfiguration +type TargetTrackingConfiguration struct { + _ struct{} `type:"structure"` + + // A customized metric. + CustomizedMetricSpecification *CustomizedMetricSpecification `type:"structure"` + + // Indicates whether scale in by the target tracking policy is disabled. If + // the value is true, scale in is disabled and the target tracking policy won't + // remove instances from the Auto Scaling group. Otherwise, scale in is enabled + // and the target tracking policy can remove instances from the Auto Scaling + // group. The default value is false. + DisableScaleIn *bool `type:"boolean"` + + // A predefined metric. You can specify either a predefined metric or a customized + // metric. + PredefinedMetricSpecification *PredefinedMetricSpecification `type:"structure"` + + // The target value for the metric. + // + // TargetValue is a required field + TargetValue *float64 `type:"double" required:"true"` +} + +// String returns the string representation +func (s TargetTrackingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetTrackingConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetTrackingConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetTrackingConfiguration"} + if s.TargetValue == nil { + invalidParams.Add(request.NewErrParamRequired("TargetValue")) + } + if s.CustomizedMetricSpecification != nil { + if err := s.CustomizedMetricSpecification.Validate(); err != nil { + invalidParams.AddNested("CustomizedMetricSpecification", err.(request.ErrInvalidParams)) + } + } + if s.PredefinedMetricSpecification != nil { + if err := s.PredefinedMetricSpecification.Validate(); err != nil { + invalidParams.AddNested("PredefinedMetricSpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomizedMetricSpecification sets the CustomizedMetricSpecification field's value. +func (s *TargetTrackingConfiguration) SetCustomizedMetricSpecification(v *CustomizedMetricSpecification) *TargetTrackingConfiguration { + s.CustomizedMetricSpecification = v + return s +} + +// SetDisableScaleIn sets the DisableScaleIn field's value. +func (s *TargetTrackingConfiguration) SetDisableScaleIn(v bool) *TargetTrackingConfiguration { + s.DisableScaleIn = &v + return s +} + +// SetPredefinedMetricSpecification sets the PredefinedMetricSpecification field's value. +func (s *TargetTrackingConfiguration) SetPredefinedMetricSpecification(v *PredefinedMetricSpecification) *TargetTrackingConfiguration { + s.PredefinedMetricSpecification = v + return s +} + +// SetTargetValue sets the TargetValue field's value. +func (s *TargetTrackingConfiguration) SetTargetValue(v float64) *TargetTrackingConfiguration { + s.TargetValue = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/TerminateInstanceInAutoScalingGroupType type TerminateInstanceInAutoScalingGroupInput struct { _ struct{} `type:"structure"` @@ -10886,7 +12228,6 @@ func (s *TerminateInstanceInAutoScalingGroupInput) SetShouldDecrementDesiredCapa return s } -// Contains the output of TerminateInstancesInAutoScalingGroup. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/ActivityType type TerminateInstanceInAutoScalingGroupOutput struct { _ struct{} `type:"structure"` @@ -10911,7 +12252,6 @@ func (s *TerminateInstanceInAutoScalingGroupOutput) SetActivity(v *Activity) *Te return s } -// Contains the parameters for UpdateAutoScalingGroup. // Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/UpdateAutoScalingGroupType type UpdateAutoScalingGroupInput struct { _ struct{} `type:"structure"` @@ -11160,6 +12500,37 @@ const ( LifecycleStateStandby = "Standby" ) +const ( + // MetricStatisticAverage is a MetricStatistic enum value + MetricStatisticAverage = "Average" + + // MetricStatisticMinimum is a MetricStatistic enum value + MetricStatisticMinimum = "Minimum" + + // MetricStatisticMaximum is a MetricStatistic enum value + MetricStatisticMaximum = "Maximum" + + // MetricStatisticSampleCount is a MetricStatistic enum value + MetricStatisticSampleCount = "SampleCount" + + // MetricStatisticSum is a MetricStatistic enum value + MetricStatisticSum = "Sum" +) + +const ( + // MetricTypeAsgaverageCpuutilization is a MetricType enum value + MetricTypeAsgaverageCpuutilization = "ASGAverageCPUUtilization" + + // MetricTypeAsgaverageNetworkIn is a MetricType enum value + MetricTypeAsgaverageNetworkIn = "ASGAverageNetworkIn" + + // MetricTypeAsgaverageNetworkOut is a MetricType enum value + MetricTypeAsgaverageNetworkOut = "ASGAverageNetworkOut" + + // MetricTypeAlbrequestCountPerTarget is a MetricType enum value + MetricTypeAlbrequestCountPerTarget = "ALBRequestCountPerTarget" +) + const ( // ScalingActivityStatusCodePendingSpotBidPlacement is a ScalingActivityStatusCode enum value ScalingActivityStatusCodePendingSpotBidPlacement = "PendingSpotBidPlacement" diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/doc.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/doc.go new file mode 100644 index 000000000000..e1b144e95277 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/doc.go @@ -0,0 +1,30 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package autoscaling provides the client and types for making API +// requests to Auto Scaling. +// +// Auto Scaling is designed to automatically launch or terminate EC2 instances +// based on user-defined policies, schedules, and health checks. Use this service +// in conjunction with the Amazon CloudWatch and Elastic Load Balancing services. +// +// See https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01 for more information on this service. +// +// See autoscaling package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/autoscaling/ +// +// Using the Client +// +// To Auto Scaling with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Auto Scaling client AutoScaling for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/autoscaling/#New +package autoscaling diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/errors.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/errors.go new file mode 100644 index 000000000000..68ab1e3c12dd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/errors.go @@ -0,0 +1,47 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package autoscaling + +const ( + + // ErrCodeAlreadyExistsFault for service response error code + // "AlreadyExists". + // + // You already have an Auto Scaling group or launch configuration with this + // name. + ErrCodeAlreadyExistsFault = "AlreadyExists" + + // ErrCodeInvalidNextToken for service response error code + // "InvalidNextToken". + // + // The NextToken value is not valid. + ErrCodeInvalidNextToken = "InvalidNextToken" + + // ErrCodeLimitExceededFault for service response error code + // "LimitExceeded". + // + // You have already reached a limit for your Auto Scaling resources (for example, + // groups, launch configurations, or lifecycle hooks). For more information, + // see DescribeAccountLimits. + ErrCodeLimitExceededFault = "LimitExceeded" + + // ErrCodeResourceContentionFault for service response error code + // "ResourceContention". + // + // You already have a pending update to an Auto Scaling resource (for example, + // a group, instance, or load balancer). + ErrCodeResourceContentionFault = "ResourceContention" + + // ErrCodeResourceInUseFault for service response error code + // "ResourceInUse". + // + // The operation can't be performed because the resource is in use. + ErrCodeResourceInUseFault = "ResourceInUse" + + // ErrCodeScalingActivityInProgressFault for service response error code + // "ScalingActivityInProgress". + // + // The operation can't be performed because there are scaling activities in + // progress. + ErrCodeScalingActivityInProgressFault = "ScalingActivityInProgress" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go index 98e1bb4adb41..5e63d1c031f8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go @@ -1,4 +1,4 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package autoscaling @@ -11,12 +11,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/query" ) -// Auto Scaling is designed to automatically launch or terminate EC2 instances -// based on user-defined policies, schedules, and health checks. Use this service -// in conjunction with the Amazon CloudWatch and Elastic Load Balancing services. -// The service client's operations are safe to be used concurrently. -// It is not safe to mutate any of the client's properties though. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01 +// AutoScaling provides the API operation methods for making requests to +// Auto Scaling. See this package's package overview docs +// for details on the service. +// +// AutoScaling methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. type AutoScaling struct { *client.Client } diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/waiters.go index 15a9fd86efbe..c2e7e2ed8d28 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/waiters.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/waiters.go @@ -1,106 +1,163 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package autoscaling import ( - "github.com/aws/aws-sdk-go/private/waiter" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" ) // WaitUntilGroupExists uses the Auto Scaling API operation // DescribeAutoScalingGroups to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *AutoScaling) WaitUntilGroupExists(input *DescribeAutoScalingGroupsInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeAutoScalingGroups", - Delay: 5, + return c.WaitUntilGroupExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilGroupExistsWithContext is an extended version of WaitUntilGroupExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) WaitUntilGroupExistsWithContext(ctx aws.Context, input *DescribeAutoScalingGroupsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilGroupExists", MaxAttempts: 10, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "path", - Argument: "length(AutoScalingGroups) > `0`", + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "length(AutoScalingGroups) > `0`", Expected: true, }, { - State: "retry", - Matcher: "path", - Argument: "length(AutoScalingGroups) > `0`", + State: request.RetryWaiterState, + Matcher: request.PathWaiterMatch, Argument: "length(AutoScalingGroups) > `0`", Expected: false, }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeAutoScalingGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAutoScalingGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilGroupInService uses the Auto Scaling API operation // DescribeAutoScalingGroups to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *AutoScaling) WaitUntilGroupInService(input *DescribeAutoScalingGroupsInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeAutoScalingGroups", - Delay: 15, + return c.WaitUntilGroupInServiceWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilGroupInServiceWithContext is an extended version of WaitUntilGroupInService. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) WaitUntilGroupInServiceWithContext(ctx aws.Context, input *DescribeAutoScalingGroupsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilGroupInService", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "path", - Argument: "contains(AutoScalingGroups[].[length(Instances[?LifecycleState=='InService']) >= MinSize][], `false`)", + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "contains(AutoScalingGroups[].[length(Instances[?LifecycleState=='InService']) >= MinSize][], `false`)", Expected: false, }, { - State: "retry", - Matcher: "path", - Argument: "contains(AutoScalingGroups[].[length(Instances[?LifecycleState=='InService']) >= MinSize][], `false`)", + State: request.RetryWaiterState, + Matcher: request.PathWaiterMatch, Argument: "contains(AutoScalingGroups[].[length(Instances[?LifecycleState=='InService']) >= MinSize][], `false`)", Expected: true, }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeAutoScalingGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAutoScalingGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilGroupNotExists uses the Auto Scaling API operation // DescribeAutoScalingGroups to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *AutoScaling) WaitUntilGroupNotExists(input *DescribeAutoScalingGroupsInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeAutoScalingGroups", - Delay: 15, + return c.WaitUntilGroupNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilGroupNotExistsWithContext is an extended version of WaitUntilGroupNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) WaitUntilGroupNotExistsWithContext(ctx aws.Context, input *DescribeAutoScalingGroupsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilGroupNotExists", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "path", - Argument: "length(AutoScalingGroups) > `0`", + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "length(AutoScalingGroups) > `0`", Expected: false, }, { - State: "retry", - Matcher: "path", - Argument: "length(AutoScalingGroups) > `0`", + State: request.RetryWaiterState, + Matcher: request.PathWaiterMatch, Argument: "length(AutoScalingGroups) > `0`", Expected: true, }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeAutoScalingGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAutoScalingGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index e006e7e7b7eb..9d4570d6d8ad 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -1,12 +1,12 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. -// Package ec2 provides a client for Amazon Elastic Compute Cloud. package ec2 import ( "fmt" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/protocol" @@ -17,19 +17,18 @@ const opAcceptReservedInstancesExchangeQuote = "AcceptReservedInstancesExchangeQ // AcceptReservedInstancesExchangeQuoteRequest generates a "aws/request.Request" representing the // client's request for the AcceptReservedInstancesExchangeQuote operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See AcceptReservedInstancesExchangeQuote for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AcceptReservedInstancesExchangeQuote method directly -// instead. +// See AcceptReservedInstancesExchangeQuote for more information on using the AcceptReservedInstancesExchangeQuote +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AcceptReservedInstancesExchangeQuoteRequest method. // req, resp := client.AcceptReservedInstancesExchangeQuoteRequest(params) @@ -70,27 +69,41 @@ func (c *EC2) AcceptReservedInstancesExchangeQuoteRequest(input *AcceptReservedI // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptReservedInstancesExchangeQuote func (c *EC2) AcceptReservedInstancesExchangeQuote(input *AcceptReservedInstancesExchangeQuoteInput) (*AcceptReservedInstancesExchangeQuoteOutput, error) { req, out := c.AcceptReservedInstancesExchangeQuoteRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AcceptReservedInstancesExchangeQuoteWithContext is the same as AcceptReservedInstancesExchangeQuote with the addition of +// the ability to pass a context and additional request options. +// +// See AcceptReservedInstancesExchangeQuote for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AcceptReservedInstancesExchangeQuoteWithContext(ctx aws.Context, input *AcceptReservedInstancesExchangeQuoteInput, opts ...request.Option) (*AcceptReservedInstancesExchangeQuoteOutput, error) { + req, out := c.AcceptReservedInstancesExchangeQuoteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAcceptVpcPeeringConnection = "AcceptVpcPeeringConnection" // AcceptVpcPeeringConnectionRequest generates a "aws/request.Request" representing the // client's request for the AcceptVpcPeeringConnection operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See AcceptVpcPeeringConnection for usage and error information. +// See AcceptVpcPeeringConnection for more information on using the AcceptVpcPeeringConnection +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AcceptVpcPeeringConnection method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AcceptVpcPeeringConnectionRequest method. // req, resp := client.AcceptVpcPeeringConnectionRequest(params) @@ -121,8 +134,8 @@ func (c *EC2) AcceptVpcPeeringConnectionRequest(input *AcceptVpcPeeringConnectio // // Accept a VPC peering connection request. To accept a request, the VPC peering // connection must be in the pending-acceptance state, and you must be the owner -// of the peer VPC. Use the DescribeVpcPeeringConnections request to view your -// outstanding VPC peering connection requests. +// of the peer VPC. Use DescribeVpcPeeringConnections to view your outstanding +// VPC peering connection requests. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -133,27 +146,41 @@ func (c *EC2) AcceptVpcPeeringConnectionRequest(input *AcceptVpcPeeringConnectio // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AcceptVpcPeeringConnection func (c *EC2) AcceptVpcPeeringConnection(input *AcceptVpcPeeringConnectionInput) (*AcceptVpcPeeringConnectionOutput, error) { req, out := c.AcceptVpcPeeringConnectionRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AcceptVpcPeeringConnectionWithContext is the same as AcceptVpcPeeringConnection with the addition of +// the ability to pass a context and additional request options. +// +// See AcceptVpcPeeringConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AcceptVpcPeeringConnectionWithContext(ctx aws.Context, input *AcceptVpcPeeringConnectionInput, opts ...request.Option) (*AcceptVpcPeeringConnectionOutput, error) { + req, out := c.AcceptVpcPeeringConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAllocateAddress = "AllocateAddress" // AllocateAddressRequest generates a "aws/request.Request" representing the // client's request for the AllocateAddress operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See AllocateAddress for usage and error information. +// See AllocateAddress for more information on using the AllocateAddress +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AllocateAddress method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AllocateAddressRequest method. // req, resp := client.AllocateAddressRequest(params) @@ -182,10 +209,18 @@ func (c *EC2) AllocateAddressRequest(input *AllocateAddressInput) (req *request. // AllocateAddress API operation for Amazon Elastic Compute Cloud. // -// Acquires an Elastic IP address. +// Allocates an Elastic IP address. // // An Elastic IP address is for use either in the EC2-Classic platform or in -// a VPC. For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) +// a VPC. By default, you can allocate 5 Elastic IP addresses for EC2-Classic +// per region and 5 Elastic IP addresses for EC2-VPC per region. +// +// If you release an Elastic IP address for use in a VPC, you might be able +// to recover it. To recover an Elastic IP address that you released, specify +// it in the Address parameter. Note that you cannot recover an Elastic IP address +// that you released after it is allocated to another AWS account. +// +// For more information, see Elastic IP Addresses (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -197,27 +232,41 @@ func (c *EC2) AllocateAddressRequest(input *AllocateAddressInput) (req *request. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AllocateAddress func (c *EC2) AllocateAddress(input *AllocateAddressInput) (*AllocateAddressOutput, error) { req, out := c.AllocateAddressRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AllocateAddressWithContext is the same as AllocateAddress with the addition of +// the ability to pass a context and additional request options. +// +// See AllocateAddress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AllocateAddressWithContext(ctx aws.Context, input *AllocateAddressInput, opts ...request.Option) (*AllocateAddressOutput, error) { + req, out := c.AllocateAddressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAllocateHosts = "AllocateHosts" // AllocateHostsRequest generates a "aws/request.Request" representing the // client's request for the AllocateHosts operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See AllocateHosts for usage and error information. +// See AllocateHosts for more information on using the AllocateHosts +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AllocateHosts method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AllocateHostsRequest method. // req, resp := client.AllocateHostsRequest(params) @@ -259,27 +308,41 @@ func (c *EC2) AllocateHostsRequest(input *AllocateHostsInput) (req *request.Requ // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AllocateHosts func (c *EC2) AllocateHosts(input *AllocateHostsInput) (*AllocateHostsOutput, error) { req, out := c.AllocateHostsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AllocateHostsWithContext is the same as AllocateHosts with the addition of +// the ability to pass a context and additional request options. +// +// See AllocateHosts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AllocateHostsWithContext(ctx aws.Context, input *AllocateHostsInput, opts ...request.Option) (*AllocateHostsOutput, error) { + req, out := c.AllocateHostsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAssignIpv6Addresses = "AssignIpv6Addresses" // AssignIpv6AddressesRequest generates a "aws/request.Request" representing the // client's request for the AssignIpv6Addresses operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See AssignIpv6Addresses for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssignIpv6Addresses method directly -// instead. +// See AssignIpv6Addresses for more information on using the AssignIpv6Addresses +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssignIpv6AddressesRequest method. // req, resp := client.AssignIpv6AddressesRequest(params) @@ -326,27 +389,41 @@ func (c *EC2) AssignIpv6AddressesRequest(input *AssignIpv6AddressesInput) (req * // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssignIpv6Addresses func (c *EC2) AssignIpv6Addresses(input *AssignIpv6AddressesInput) (*AssignIpv6AddressesOutput, error) { req, out := c.AssignIpv6AddressesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AssignIpv6AddressesWithContext is the same as AssignIpv6Addresses with the addition of +// the ability to pass a context and additional request options. +// +// See AssignIpv6Addresses for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssignIpv6AddressesWithContext(ctx aws.Context, input *AssignIpv6AddressesInput, opts ...request.Option) (*AssignIpv6AddressesOutput, error) { + req, out := c.AssignIpv6AddressesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAssignPrivateIpAddresses = "AssignPrivateIpAddresses" // AssignPrivateIpAddressesRequest generates a "aws/request.Request" representing the // client's request for the AssignPrivateIpAddresses operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See AssignPrivateIpAddresses for usage and error information. +// See AssignPrivateIpAddresses for more information on using the AssignPrivateIpAddresses +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssignPrivateIpAddresses method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssignPrivateIpAddressesRequest method. // req, resp := client.AssignPrivateIpAddressesRequest(params) @@ -398,27 +475,41 @@ func (c *EC2) AssignPrivateIpAddressesRequest(input *AssignPrivateIpAddressesInp // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssignPrivateIpAddresses func (c *EC2) AssignPrivateIpAddresses(input *AssignPrivateIpAddressesInput) (*AssignPrivateIpAddressesOutput, error) { req, out := c.AssignPrivateIpAddressesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AssignPrivateIpAddressesWithContext is the same as AssignPrivateIpAddresses with the addition of +// the ability to pass a context and additional request options. +// +// See AssignPrivateIpAddresses for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssignPrivateIpAddressesWithContext(ctx aws.Context, input *AssignPrivateIpAddressesInput, opts ...request.Option) (*AssignPrivateIpAddressesOutput, error) { + req, out := c.AssignPrivateIpAddressesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAssociateAddress = "AssociateAddress" // AssociateAddressRequest generates a "aws/request.Request" representing the // client's request for the AssociateAddress operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See AssociateAddress for usage and error information. +// See AssociateAddress for more information on using the AssociateAddress +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssociateAddress method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssociateAddressRequest method. // req, resp := client.AssociateAddressRequest(params) @@ -455,12 +546,17 @@ func (c *EC2) AssociateAddressRequest(input *AssociateAddressInput) (req *reques // // [EC2-Classic, VPC in an EC2-VPC-only account] If the Elastic IP address is // already associated with a different instance, it is disassociated from that -// instance and associated with the specified instance. +// instance and associated with the specified instance. If you associate an +// Elastic IP address with an instance that has an existing Elastic IP address, +// the existing address is disassociated from the instance, but remains allocated +// to your account. // // [VPC in an EC2-Classic account] If you don't specify a private IP address, // the Elastic IP address is associated with the primary IP address. If the // Elastic IP address is already associated with a different instance or a network -// interface, you get an error unless you allow reassociation. +// interface, you get an error unless you allow reassociation. You cannot associate +// an Elastic IP address with an instance or network interface that has an existing +// Elastic IP address. // // This is an idempotent operation. If you perform the operation more than once, // Amazon EC2 doesn't return an error, and you may be charged for each time @@ -476,27 +572,41 @@ func (c *EC2) AssociateAddressRequest(input *AssociateAddressInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateAddress func (c *EC2) AssociateAddress(input *AssociateAddressInput) (*AssociateAddressOutput, error) { req, out := c.AssociateAddressRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AssociateAddressWithContext is the same as AssociateAddress with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateAddress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssociateAddressWithContext(ctx aws.Context, input *AssociateAddressInput, opts ...request.Option) (*AssociateAddressOutput, error) { + req, out := c.AssociateAddressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAssociateDhcpOptions = "AssociateDhcpOptions" // AssociateDhcpOptionsRequest generates a "aws/request.Request" representing the // client's request for the AssociateDhcpOptions operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See AssociateDhcpOptions for usage and error information. +// See AssociateDhcpOptions for more information on using the AssociateDhcpOptions +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssociateDhcpOptions method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssociateDhcpOptionsRequest method. // req, resp := client.AssociateDhcpOptionsRequest(params) @@ -549,27 +659,116 @@ func (c *EC2) AssociateDhcpOptionsRequest(input *AssociateDhcpOptionsInput) (req // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateDhcpOptions func (c *EC2) AssociateDhcpOptions(input *AssociateDhcpOptionsInput) (*AssociateDhcpOptionsOutput, error) { req, out := c.AssociateDhcpOptionsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AssociateDhcpOptionsWithContext is the same as AssociateDhcpOptions with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateDhcpOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssociateDhcpOptionsWithContext(ctx aws.Context, input *AssociateDhcpOptionsInput, opts ...request.Option) (*AssociateDhcpOptionsOutput, error) { + req, out := c.AssociateDhcpOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssociateIamInstanceProfile = "AssociateIamInstanceProfile" + +// AssociateIamInstanceProfileRequest generates a "aws/request.Request" representing the +// client's request for the AssociateIamInstanceProfile operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateIamInstanceProfile for more information on using the AssociateIamInstanceProfile +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateIamInstanceProfileRequest method. +// req, resp := client.AssociateIamInstanceProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateIamInstanceProfile +func (c *EC2) AssociateIamInstanceProfileRequest(input *AssociateIamInstanceProfileInput) (req *request.Request, output *AssociateIamInstanceProfileOutput) { + op := &request.Operation{ + Name: opAssociateIamInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateIamInstanceProfileInput{} + } + + output = &AssociateIamInstanceProfileOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociateIamInstanceProfile API operation for Amazon Elastic Compute Cloud. +// +// Associates an IAM instance profile with a running or stopped instance. You +// cannot associate more than one IAM instance profile with an instance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AssociateIamInstanceProfile for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateIamInstanceProfile +func (c *EC2) AssociateIamInstanceProfile(input *AssociateIamInstanceProfileInput) (*AssociateIamInstanceProfileOutput, error) { + req, out := c.AssociateIamInstanceProfileRequest(input) + return out, req.Send() +} + +// AssociateIamInstanceProfileWithContext is the same as AssociateIamInstanceProfile with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateIamInstanceProfile for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssociateIamInstanceProfileWithContext(ctx aws.Context, input *AssociateIamInstanceProfileInput, opts ...request.Option) (*AssociateIamInstanceProfileOutput, error) { + req, out := c.AssociateIamInstanceProfileRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAssociateRouteTable = "AssociateRouteTable" // AssociateRouteTableRequest generates a "aws/request.Request" representing the // client's request for the AssociateRouteTable operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See AssociateRouteTable for usage and error information. +// See AssociateRouteTable for more information on using the AssociateRouteTable +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssociateRouteTable method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssociateRouteTableRequest method. // req, resp := client.AssociateRouteTableRequest(params) @@ -616,27 +815,41 @@ func (c *EC2) AssociateRouteTableRequest(input *AssociateRouteTableInput) (req * // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateRouteTable func (c *EC2) AssociateRouteTable(input *AssociateRouteTableInput) (*AssociateRouteTableOutput, error) { req, out := c.AssociateRouteTableRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AssociateRouteTableWithContext is the same as AssociateRouteTable with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateRouteTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssociateRouteTableWithContext(ctx aws.Context, input *AssociateRouteTableInput, opts ...request.Option) (*AssociateRouteTableOutput, error) { + req, out := c.AssociateRouteTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAssociateSubnetCidrBlock = "AssociateSubnetCidrBlock" // AssociateSubnetCidrBlockRequest generates a "aws/request.Request" representing the // client's request for the AssociateSubnetCidrBlock operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See AssociateSubnetCidrBlock for usage and error information. +// See AssociateSubnetCidrBlock for more information on using the AssociateSubnetCidrBlock +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssociateSubnetCidrBlock method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssociateSubnetCidrBlockRequest method. // req, resp := client.AssociateSubnetCidrBlockRequest(params) @@ -678,27 +891,41 @@ func (c *EC2) AssociateSubnetCidrBlockRequest(input *AssociateSubnetCidrBlockInp // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateSubnetCidrBlock func (c *EC2) AssociateSubnetCidrBlock(input *AssociateSubnetCidrBlockInput) (*AssociateSubnetCidrBlockOutput, error) { req, out := c.AssociateSubnetCidrBlockRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AssociateSubnetCidrBlockWithContext is the same as AssociateSubnetCidrBlock with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateSubnetCidrBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssociateSubnetCidrBlockWithContext(ctx aws.Context, input *AssociateSubnetCidrBlockInput, opts ...request.Option) (*AssociateSubnetCidrBlockOutput, error) { + req, out := c.AssociateSubnetCidrBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAssociateVpcCidrBlock = "AssociateVpcCidrBlock" // AssociateVpcCidrBlockRequest generates a "aws/request.Request" representing the // client's request for the AssociateVpcCidrBlock operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See AssociateVpcCidrBlock for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssociateVpcCidrBlock method directly -// instead. +// See AssociateVpcCidrBlock for more information on using the AssociateVpcCidrBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssociateVpcCidrBlockRequest method. // req, resp := client.AssociateVpcCidrBlockRequest(params) @@ -727,8 +954,13 @@ func (c *EC2) AssociateVpcCidrBlockRequest(input *AssociateVpcCidrBlockInput) (r // AssociateVpcCidrBlock API operation for Amazon Elastic Compute Cloud. // -// Associates a CIDR block with your VPC. You can only associate a single Amazon-provided -// IPv6 CIDR block with your VPC. The IPv6 CIDR block size is fixed at /56. +// Associates a CIDR block with your VPC. You can associate a secondary IPv4 +// CIDR block, or you can associate an Amazon-provided IPv6 CIDR block. The +// IPv6 CIDR block size is fixed at /56. +// +// For more information about associating CIDR blocks with your VPC and applicable +// restrictions, see VPC and Subnet Sizing (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Subnets.html#VPC_Sizing) +// in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -739,27 +971,41 @@ func (c *EC2) AssociateVpcCidrBlockRequest(input *AssociateVpcCidrBlockInput) (r // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateVpcCidrBlock func (c *EC2) AssociateVpcCidrBlock(input *AssociateVpcCidrBlockInput) (*AssociateVpcCidrBlockOutput, error) { req, out := c.AssociateVpcCidrBlockRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AssociateVpcCidrBlockWithContext is the same as AssociateVpcCidrBlock with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateVpcCidrBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssociateVpcCidrBlockWithContext(ctx aws.Context, input *AssociateVpcCidrBlockInput, opts ...request.Option) (*AssociateVpcCidrBlockOutput, error) { + req, out := c.AssociateVpcCidrBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAttachClassicLinkVpc = "AttachClassicLinkVpc" // AttachClassicLinkVpcRequest generates a "aws/request.Request" representing the // client's request for the AttachClassicLinkVpc operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See AttachClassicLinkVpc for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AttachClassicLinkVpc method directly -// instead. +// See AttachClassicLinkVpc for more information on using the AttachClassicLinkVpc +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AttachClassicLinkVpcRequest method. // req, resp := client.AttachClassicLinkVpcRequest(params) @@ -810,27 +1056,41 @@ func (c *EC2) AttachClassicLinkVpcRequest(input *AttachClassicLinkVpcInput) (req // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachClassicLinkVpc func (c *EC2) AttachClassicLinkVpc(input *AttachClassicLinkVpcInput) (*AttachClassicLinkVpcOutput, error) { req, out := c.AttachClassicLinkVpcRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AttachClassicLinkVpcWithContext is the same as AttachClassicLinkVpc with the addition of +// the ability to pass a context and additional request options. +// +// See AttachClassicLinkVpc for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AttachClassicLinkVpcWithContext(ctx aws.Context, input *AttachClassicLinkVpcInput, opts ...request.Option) (*AttachClassicLinkVpcOutput, error) { + req, out := c.AttachClassicLinkVpcRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAttachInternetGateway = "AttachInternetGateway" // AttachInternetGatewayRequest generates a "aws/request.Request" representing the // client's request for the AttachInternetGateway operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See AttachInternetGateway for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AttachInternetGateway method directly -// instead. +// See AttachInternetGateway for more information on using the AttachInternetGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AttachInternetGatewayRequest method. // req, resp := client.AttachInternetGatewayRequest(params) @@ -874,27 +1134,41 @@ func (c *EC2) AttachInternetGatewayRequest(input *AttachInternetGatewayInput) (r // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachInternetGateway func (c *EC2) AttachInternetGateway(input *AttachInternetGatewayInput) (*AttachInternetGatewayOutput, error) { req, out := c.AttachInternetGatewayRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AttachInternetGatewayWithContext is the same as AttachInternetGateway with the addition of +// the ability to pass a context and additional request options. +// +// See AttachInternetGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AttachInternetGatewayWithContext(ctx aws.Context, input *AttachInternetGatewayInput, opts ...request.Option) (*AttachInternetGatewayOutput, error) { + req, out := c.AttachInternetGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAttachNetworkInterface = "AttachNetworkInterface" // AttachNetworkInterfaceRequest generates a "aws/request.Request" representing the // client's request for the AttachNetworkInterface operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See AttachNetworkInterface for usage and error information. +// See AttachNetworkInterface for more information on using the AttachNetworkInterface +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AttachNetworkInterface method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AttachNetworkInterfaceRequest method. // req, resp := client.AttachNetworkInterfaceRequest(params) @@ -934,27 +1208,41 @@ func (c *EC2) AttachNetworkInterfaceRequest(input *AttachNetworkInterfaceInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachNetworkInterface func (c *EC2) AttachNetworkInterface(input *AttachNetworkInterfaceInput) (*AttachNetworkInterfaceOutput, error) { req, out := c.AttachNetworkInterfaceRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AttachNetworkInterfaceWithContext is the same as AttachNetworkInterface with the addition of +// the ability to pass a context and additional request options. +// +// See AttachNetworkInterface for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AttachNetworkInterfaceWithContext(ctx aws.Context, input *AttachNetworkInterfaceInput, opts ...request.Option) (*AttachNetworkInterfaceOutput, error) { + req, out := c.AttachNetworkInterfaceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAttachVolume = "AttachVolume" // AttachVolumeRequest generates a "aws/request.Request" representing the // client's request for the AttachVolume operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See AttachVolume for usage and error information. +// See AttachVolume for more information on using the AttachVolume +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AttachVolume method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AttachVolumeRequest method. // req, resp := client.AttachVolumeRequest(params) @@ -1023,27 +1311,41 @@ func (c *EC2) AttachVolumeRequest(input *AttachVolumeInput) (req *request.Reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachVolume func (c *EC2) AttachVolume(input *AttachVolumeInput) (*VolumeAttachment, error) { req, out := c.AttachVolumeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AttachVolumeWithContext is the same as AttachVolume with the addition of +// the ability to pass a context and additional request options. +// +// See AttachVolume for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AttachVolumeWithContext(ctx aws.Context, input *AttachVolumeInput, opts ...request.Option) (*VolumeAttachment, error) { + req, out := c.AttachVolumeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAttachVpnGateway = "AttachVpnGateway" // AttachVpnGatewayRequest generates a "aws/request.Request" representing the // client's request for the AttachVpnGateway operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See AttachVpnGateway for usage and error information. +// See AttachVpnGateway for more information on using the AttachVpnGateway +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AttachVpnGateway method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AttachVpnGatewayRequest method. // req, resp := client.AttachVpnGatewayRequest(params) @@ -1072,8 +1374,11 @@ func (c *EC2) AttachVpnGatewayRequest(input *AttachVpnGatewayInput) (req *reques // AttachVpnGateway API operation for Amazon Elastic Compute Cloud. // -// Attaches a virtual private gateway to a VPC. For more information, see Adding -// a Hardware Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// Attaches a virtual private gateway to a VPC. You can attach one virtual private +// gateway to one VPC at a time. +// +// For more information, see Adding a Hardware Virtual Private Gateway to Your +// VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1085,27 +1390,41 @@ func (c *EC2) AttachVpnGatewayRequest(input *AttachVpnGatewayInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AttachVpnGateway func (c *EC2) AttachVpnGateway(input *AttachVpnGatewayInput) (*AttachVpnGatewayOutput, error) { req, out := c.AttachVpnGatewayRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AttachVpnGatewayWithContext is the same as AttachVpnGateway with the addition of +// the ability to pass a context and additional request options. +// +// See AttachVpnGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AttachVpnGatewayWithContext(ctx aws.Context, input *AttachVpnGatewayInput, opts ...request.Option) (*AttachVpnGatewayOutput, error) { + req, out := c.AttachVpnGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAuthorizeSecurityGroupEgress = "AuthorizeSecurityGroupEgress" // AuthorizeSecurityGroupEgressRequest generates a "aws/request.Request" representing the // client's request for the AuthorizeSecurityGroupEgress operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See AuthorizeSecurityGroupEgress for usage and error information. +// See AuthorizeSecurityGroupEgress for more information on using the AuthorizeSecurityGroupEgress +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AuthorizeSecurityGroupEgress method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AuthorizeSecurityGroupEgressRequest method. // req, resp := client.AuthorizeSecurityGroupEgressRequest(params) @@ -1149,7 +1468,8 @@ func (c *EC2) AuthorizeSecurityGroupEgressRequest(input *AuthorizeSecurityGroupE // range or a source group. For the TCP and UDP protocols, you must also specify // the destination port or port range. For the ICMP protocol, you must also // specify the ICMP type and code. You can use -1 for the type or code to mean -// all types or all codes. +// all types or all codes. You can optionally specify a description for the +// rule. // // Rule changes are propagated to affected instances as quickly as possible. // However, a small delay might occur. @@ -1163,27 +1483,41 @@ func (c *EC2) AuthorizeSecurityGroupEgressRequest(input *AuthorizeSecurityGroupE // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AuthorizeSecurityGroupEgress func (c *EC2) AuthorizeSecurityGroupEgress(input *AuthorizeSecurityGroupEgressInput) (*AuthorizeSecurityGroupEgressOutput, error) { req, out := c.AuthorizeSecurityGroupEgressRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AuthorizeSecurityGroupEgressWithContext is the same as AuthorizeSecurityGroupEgress with the addition of +// the ability to pass a context and additional request options. +// +// See AuthorizeSecurityGroupEgress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AuthorizeSecurityGroupEgressWithContext(ctx aws.Context, input *AuthorizeSecurityGroupEgressInput, opts ...request.Option) (*AuthorizeSecurityGroupEgressOutput, error) { + req, out := c.AuthorizeSecurityGroupEgressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAuthorizeSecurityGroupIngress = "AuthorizeSecurityGroupIngress" // AuthorizeSecurityGroupIngressRequest generates a "aws/request.Request" representing the // client's request for the AuthorizeSecurityGroupIngress operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See AuthorizeSecurityGroupIngress for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AuthorizeSecurityGroupIngress method directly -// instead. +// See AuthorizeSecurityGroupIngress for more information on using the AuthorizeSecurityGroupIngress +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AuthorizeSecurityGroupIngressRequest method. // req, resp := client.AuthorizeSecurityGroupIngressRequest(params) @@ -1232,6 +1566,8 @@ func (c *EC2) AuthorizeSecurityGroupIngressRequest(input *AuthorizeSecurityGroup // peer VPC in a VPC peering connection. For more information about VPC security // group limits, see Amazon VPC Limits (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Appendix_Limits.html). // +// You can optionally specify a description for the security group rule. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1241,27 +1577,41 @@ func (c *EC2) AuthorizeSecurityGroupIngressRequest(input *AuthorizeSecurityGroup // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AuthorizeSecurityGroupIngress func (c *EC2) AuthorizeSecurityGroupIngress(input *AuthorizeSecurityGroupIngressInput) (*AuthorizeSecurityGroupIngressOutput, error) { req, out := c.AuthorizeSecurityGroupIngressRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AuthorizeSecurityGroupIngressWithContext is the same as AuthorizeSecurityGroupIngress with the addition of +// the ability to pass a context and additional request options. +// +// See AuthorizeSecurityGroupIngress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AuthorizeSecurityGroupIngressWithContext(ctx aws.Context, input *AuthorizeSecurityGroupIngressInput, opts ...request.Option) (*AuthorizeSecurityGroupIngressOutput, error) { + req, out := c.AuthorizeSecurityGroupIngressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opBundleInstance = "BundleInstance" // BundleInstanceRequest generates a "aws/request.Request" representing the // client's request for the BundleInstance operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See BundleInstance for usage and error information. +// See BundleInstance for more information on using the BundleInstance +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the BundleInstance method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the BundleInstanceRequest method. // req, resp := client.BundleInstanceRequest(params) @@ -1309,27 +1659,41 @@ func (c *EC2) BundleInstanceRequest(input *BundleInstanceInput) (req *request.Re // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/BundleInstance func (c *EC2) BundleInstance(input *BundleInstanceInput) (*BundleInstanceOutput, error) { req, out := c.BundleInstanceRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// BundleInstanceWithContext is the same as BundleInstance with the addition of +// the ability to pass a context and additional request options. +// +// See BundleInstance for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) BundleInstanceWithContext(ctx aws.Context, input *BundleInstanceInput, opts ...request.Option) (*BundleInstanceOutput, error) { + req, out := c.BundleInstanceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCancelBundleTask = "CancelBundleTask" // CancelBundleTaskRequest generates a "aws/request.Request" representing the // client's request for the CancelBundleTask operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CancelBundleTask for usage and error information. +// See CancelBundleTask for more information on using the CancelBundleTask +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CancelBundleTask method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CancelBundleTaskRequest method. // req, resp := client.CancelBundleTaskRequest(params) @@ -1369,27 +1733,41 @@ func (c *EC2) CancelBundleTaskRequest(input *CancelBundleTaskInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelBundleTask func (c *EC2) CancelBundleTask(input *CancelBundleTaskInput) (*CancelBundleTaskOutput, error) { req, out := c.CancelBundleTaskRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CancelBundleTaskWithContext is the same as CancelBundleTask with the addition of +// the ability to pass a context and additional request options. +// +// See CancelBundleTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CancelBundleTaskWithContext(ctx aws.Context, input *CancelBundleTaskInput, opts ...request.Option) (*CancelBundleTaskOutput, error) { + req, out := c.CancelBundleTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCancelConversionTask = "CancelConversionTask" // CancelConversionTaskRequest generates a "aws/request.Request" representing the // client's request for the CancelConversionTask operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CancelConversionTask for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CancelConversionTask method directly -// instead. +// See CancelConversionTask for more information on using the CancelConversionTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CancelConversionTaskRequest method. // req, resp := client.CancelConversionTaskRequest(params) @@ -1438,27 +1816,41 @@ func (c *EC2) CancelConversionTaskRequest(input *CancelConversionTaskInput) (req // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelConversionTask func (c *EC2) CancelConversionTask(input *CancelConversionTaskInput) (*CancelConversionTaskOutput, error) { req, out := c.CancelConversionTaskRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CancelConversionTaskWithContext is the same as CancelConversionTask with the addition of +// the ability to pass a context and additional request options. +// +// See CancelConversionTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CancelConversionTaskWithContext(ctx aws.Context, input *CancelConversionTaskInput, opts ...request.Option) (*CancelConversionTaskOutput, error) { + req, out := c.CancelConversionTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCancelExportTask = "CancelExportTask" // CancelExportTaskRequest generates a "aws/request.Request" representing the // client's request for the CancelExportTask operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CancelExportTask for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CancelExportTask method directly -// instead. +// See CancelExportTask for more information on using the CancelExportTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CancelExportTaskRequest method. // req, resp := client.CancelExportTaskRequest(params) @@ -1503,27 +1895,41 @@ func (c *EC2) CancelExportTaskRequest(input *CancelExportTaskInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelExportTask func (c *EC2) CancelExportTask(input *CancelExportTaskInput) (*CancelExportTaskOutput, error) { req, out := c.CancelExportTaskRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CancelExportTaskWithContext is the same as CancelExportTask with the addition of +// the ability to pass a context and additional request options. +// +// See CancelExportTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CancelExportTaskWithContext(ctx aws.Context, input *CancelExportTaskInput, opts ...request.Option) (*CancelExportTaskOutput, error) { + req, out := c.CancelExportTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCancelImportTask = "CancelImportTask" // CancelImportTaskRequest generates a "aws/request.Request" representing the // client's request for the CancelImportTask operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CancelImportTask for usage and error information. +// See CancelImportTask for more information on using the CancelImportTask +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CancelImportTask method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CancelImportTaskRequest method. // req, resp := client.CancelImportTaskRequest(params) @@ -1563,27 +1969,41 @@ func (c *EC2) CancelImportTaskRequest(input *CancelImportTaskInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelImportTask func (c *EC2) CancelImportTask(input *CancelImportTaskInput) (*CancelImportTaskOutput, error) { req, out := c.CancelImportTaskRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CancelImportTaskWithContext is the same as CancelImportTask with the addition of +// the ability to pass a context and additional request options. +// +// See CancelImportTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CancelImportTaskWithContext(ctx aws.Context, input *CancelImportTaskInput, opts ...request.Option) (*CancelImportTaskOutput, error) { + req, out := c.CancelImportTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCancelReservedInstancesListing = "CancelReservedInstancesListing" // CancelReservedInstancesListingRequest generates a "aws/request.Request" representing the // client's request for the CancelReservedInstancesListing operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CancelReservedInstancesListing for usage and error information. +// See CancelReservedInstancesListing for more information on using the CancelReservedInstancesListing +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CancelReservedInstancesListing method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CancelReservedInstancesListingRequest method. // req, resp := client.CancelReservedInstancesListingRequest(params) @@ -1627,27 +2047,41 @@ func (c *EC2) CancelReservedInstancesListingRequest(input *CancelReservedInstanc // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelReservedInstancesListing func (c *EC2) CancelReservedInstancesListing(input *CancelReservedInstancesListingInput) (*CancelReservedInstancesListingOutput, error) { req, out := c.CancelReservedInstancesListingRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CancelReservedInstancesListingWithContext is the same as CancelReservedInstancesListing with the addition of +// the ability to pass a context and additional request options. +// +// See CancelReservedInstancesListing for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CancelReservedInstancesListingWithContext(ctx aws.Context, input *CancelReservedInstancesListingInput, opts ...request.Option) (*CancelReservedInstancesListingOutput, error) { + req, out := c.CancelReservedInstancesListingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCancelSpotFleetRequests = "CancelSpotFleetRequests" // CancelSpotFleetRequestsRequest generates a "aws/request.Request" representing the // client's request for the CancelSpotFleetRequests operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CancelSpotFleetRequests for usage and error information. +// See CancelSpotFleetRequests for more information on using the CancelSpotFleetRequests +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CancelSpotFleetRequests method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CancelSpotFleetRequestsRequest method. // req, resp := client.CancelSpotFleetRequestsRequest(params) @@ -1694,27 +2128,41 @@ func (c *EC2) CancelSpotFleetRequestsRequest(input *CancelSpotFleetRequestsInput // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelSpotFleetRequests func (c *EC2) CancelSpotFleetRequests(input *CancelSpotFleetRequestsInput) (*CancelSpotFleetRequestsOutput, error) { req, out := c.CancelSpotFleetRequestsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CancelSpotFleetRequestsWithContext is the same as CancelSpotFleetRequests with the addition of +// the ability to pass a context and additional request options. +// +// See CancelSpotFleetRequests for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CancelSpotFleetRequestsWithContext(ctx aws.Context, input *CancelSpotFleetRequestsInput, opts ...request.Option) (*CancelSpotFleetRequestsOutput, error) { + req, out := c.CancelSpotFleetRequestsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCancelSpotInstanceRequests = "CancelSpotInstanceRequests" // CancelSpotInstanceRequestsRequest generates a "aws/request.Request" representing the // client's request for the CancelSpotInstanceRequests operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CancelSpotInstanceRequests for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CancelSpotInstanceRequests method directly -// instead. +// See CancelSpotInstanceRequests for more information on using the CancelSpotInstanceRequests +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CancelSpotInstanceRequestsRequest method. // req, resp := client.CancelSpotInstanceRequestsRequest(params) @@ -1762,27 +2210,41 @@ func (c *EC2) CancelSpotInstanceRequestsRequest(input *CancelSpotInstanceRequest // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelSpotInstanceRequests func (c *EC2) CancelSpotInstanceRequests(input *CancelSpotInstanceRequestsInput) (*CancelSpotInstanceRequestsOutput, error) { req, out := c.CancelSpotInstanceRequestsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CancelSpotInstanceRequestsWithContext is the same as CancelSpotInstanceRequests with the addition of +// the ability to pass a context and additional request options. +// +// See CancelSpotInstanceRequests for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CancelSpotInstanceRequestsWithContext(ctx aws.Context, input *CancelSpotInstanceRequestsInput, opts ...request.Option) (*CancelSpotInstanceRequestsOutput, error) { + req, out := c.CancelSpotInstanceRequestsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opConfirmProductInstance = "ConfirmProductInstance" // ConfirmProductInstanceRequest generates a "aws/request.Request" representing the // client's request for the ConfirmProductInstance operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ConfirmProductInstance for usage and error information. +// See ConfirmProductInstance for more information on using the ConfirmProductInstance +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ConfirmProductInstance method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ConfirmProductInstanceRequest method. // req, resp := client.ConfirmProductInstanceRequest(params) @@ -1813,8 +2275,7 @@ func (c *EC2) ConfirmProductInstanceRequest(input *ConfirmProductInstanceInput) // // Determines whether a product code is associated with an instance. This action // can only be used by the owner of the product code. It is useful when a product -// code owner needs to verify whether another user's instance is eligible for -// support. +// code owner must verify whether another user's instance is eligible for support. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1825,27 +2286,115 @@ func (c *EC2) ConfirmProductInstanceRequest(input *ConfirmProductInstanceInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ConfirmProductInstance func (c *EC2) ConfirmProductInstance(input *ConfirmProductInstanceInput) (*ConfirmProductInstanceOutput, error) { req, out := c.ConfirmProductInstanceRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ConfirmProductInstanceWithContext is the same as ConfirmProductInstance with the addition of +// the ability to pass a context and additional request options. +// +// See ConfirmProductInstance for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ConfirmProductInstanceWithContext(ctx aws.Context, input *ConfirmProductInstanceInput, opts ...request.Option) (*ConfirmProductInstanceOutput, error) { + req, out := c.ConfirmProductInstanceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCopyFpgaImage = "CopyFpgaImage" + +// CopyFpgaImageRequest generates a "aws/request.Request" representing the +// client's request for the CopyFpgaImage operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CopyFpgaImage for more information on using the CopyFpgaImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CopyFpgaImageRequest method. +// req, resp := client.CopyFpgaImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyFpgaImage +func (c *EC2) CopyFpgaImageRequest(input *CopyFpgaImageInput) (req *request.Request, output *CopyFpgaImageOutput) { + op := &request.Operation{ + Name: opCopyFpgaImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyFpgaImageInput{} + } + + output = &CopyFpgaImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// CopyFpgaImage API operation for Amazon Elastic Compute Cloud. +// +// Copies the specified Amazon FPGA Image (AFI) to the current region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CopyFpgaImage for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyFpgaImage +func (c *EC2) CopyFpgaImage(input *CopyFpgaImageInput) (*CopyFpgaImageOutput, error) { + req, out := c.CopyFpgaImageRequest(input) + return out, req.Send() +} + +// CopyFpgaImageWithContext is the same as CopyFpgaImage with the addition of +// the ability to pass a context and additional request options. +// +// See CopyFpgaImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CopyFpgaImageWithContext(ctx aws.Context, input *CopyFpgaImageInput, opts ...request.Option) (*CopyFpgaImageOutput, error) { + req, out := c.CopyFpgaImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCopyImage = "CopyImage" // CopyImageRequest generates a "aws/request.Request" representing the // client's request for the CopyImage operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CopyImage for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CopyImage method directly -// instead. +// See CopyImage for more information on using the CopyImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CopyImageRequest method. // req, resp := client.CopyImageRequest(params) @@ -1878,7 +2427,8 @@ func (c *EC2) CopyImageRequest(input *CopyImageInput) (req *request.Request, out // region. You specify the destination region by using its endpoint when making // the request. // -// For more information, see Copying AMIs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html) +// For more information about the prerequisites and limits when copying an AMI, +// see Copying an AMI (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1890,27 +2440,41 @@ func (c *EC2) CopyImageRequest(input *CopyImageInput) (req *request.Request, out // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyImage func (c *EC2) CopyImage(input *CopyImageInput) (*CopyImageOutput, error) { req, out := c.CopyImageRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CopyImageWithContext is the same as CopyImage with the addition of +// the ability to pass a context and additional request options. +// +// See CopyImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CopyImageWithContext(ctx aws.Context, input *CopyImageInput, opts ...request.Option) (*CopyImageOutput, error) { + req, out := c.CopyImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCopySnapshot = "CopySnapshot" // CopySnapshotRequest generates a "aws/request.Request" representing the // client's request for the CopySnapshot operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CopySnapshot for usage and error information. +// See CopySnapshot for more information on using the CopySnapshot +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CopySnapshot method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CopySnapshotRequest method. // req, resp := client.CopySnapshotRequest(params) @@ -1969,27 +2533,41 @@ func (c *EC2) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopySnapshot func (c *EC2) CopySnapshot(input *CopySnapshotInput) (*CopySnapshotOutput, error) { req, out := c.CopySnapshotRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CopySnapshotWithContext is the same as CopySnapshot with the addition of +// the ability to pass a context and additional request options. +// +// See CopySnapshot for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CopySnapshotWithContext(ctx aws.Context, input *CopySnapshotInput, opts ...request.Option) (*CopySnapshotOutput, error) { + req, out := c.CopySnapshotRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateCustomerGateway = "CreateCustomerGateway" // CreateCustomerGatewayRequest generates a "aws/request.Request" representing the // client's request for the CreateCustomerGateway operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CreateCustomerGateway for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateCustomerGateway method directly -// instead. +// See CreateCustomerGateway for more information on using the CreateCustomerGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateCustomerGatewayRequest method. // req, resp := client.CreateCustomerGatewayRequest(params) @@ -2053,27 +2631,127 @@ func (c *EC2) CreateCustomerGatewayRequest(input *CreateCustomerGatewayInput) (r // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateCustomerGateway func (c *EC2) CreateCustomerGateway(input *CreateCustomerGatewayInput) (*CreateCustomerGatewayOutput, error) { req, out := c.CreateCustomerGatewayRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateCustomerGatewayWithContext is the same as CreateCustomerGateway with the addition of +// the ability to pass a context and additional request options. +// +// See CreateCustomerGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateCustomerGatewayWithContext(ctx aws.Context, input *CreateCustomerGatewayInput, opts ...request.Option) (*CreateCustomerGatewayOutput, error) { + req, out := c.CreateCustomerGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateDefaultVpc = "CreateDefaultVpc" + +// CreateDefaultVpcRequest generates a "aws/request.Request" representing the +// client's request for the CreateDefaultVpc operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDefaultVpc for more information on using the CreateDefaultVpc +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDefaultVpcRequest method. +// req, resp := client.CreateDefaultVpcRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateDefaultVpc +func (c *EC2) CreateDefaultVpcRequest(input *CreateDefaultVpcInput) (req *request.Request, output *CreateDefaultVpcOutput) { + op := &request.Operation{ + Name: opCreateDefaultVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDefaultVpcInput{} + } + + output = &CreateDefaultVpcOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDefaultVpc API operation for Amazon Elastic Compute Cloud. +// +// Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet +// in each Availability Zone. For more information about the components of a +// default VPC, see Default VPC and Default Subnets (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/default-vpc.html) +// in the Amazon Virtual Private Cloud User Guide. You cannot specify the components +// of the default VPC yourself. +// +// You can create a default VPC if you deleted your previous default VPC. You +// cannot have more than one default VPC per region. +// +// If your account supports EC2-Classic, you cannot use this action to create +// a default VPC in a region that supports EC2-Classic. If you want a default +// VPC in a region that supports EC2-Classic, see "I really want a default VPC +// for my existing EC2 account. Is that possible?" in the Default VPCs FAQ (http://aws.amazon.com/vpc/faqs/#Default_VPCs). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateDefaultVpc for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateDefaultVpc +func (c *EC2) CreateDefaultVpc(input *CreateDefaultVpcInput) (*CreateDefaultVpcOutput, error) { + req, out := c.CreateDefaultVpcRequest(input) + return out, req.Send() +} + +// CreateDefaultVpcWithContext is the same as CreateDefaultVpc with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDefaultVpc for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateDefaultVpcWithContext(ctx aws.Context, input *CreateDefaultVpcInput, opts ...request.Option) (*CreateDefaultVpcOutput, error) { + req, out := c.CreateDefaultVpcRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateDhcpOptions = "CreateDhcpOptions" // CreateDhcpOptionsRequest generates a "aws/request.Request" representing the // client's request for the CreateDhcpOptions operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateDhcpOptions for usage and error information. +// See CreateDhcpOptions for more information on using the CreateDhcpOptions +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateDhcpOptions method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateDhcpOptionsRequest method. // req, resp := client.CreateDhcpOptionsRequest(params) @@ -2115,16 +2793,16 @@ func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *requ // to receive a custom DNS hostname as specified in domain-name, you must // set domain-name-servers to a custom DNS server. // -// * domain-name - If you're using AmazonProvidedDNS in "us-east-1", specify -// "ec2.internal". If you're using AmazonProvidedDNS in another region, specify -// "region.compute.internal" (for example, "ap-northeast-1.compute.internal"). -// Otherwise, specify a domain name (for example, "MyCompany.com"). This -// value is used to complete unqualified DNS hostnames. Important: Some Linux -// operating systems accept multiple domain names separated by spaces. However, -// Windows and other Linux operating systems treat the value as a single -// domain, which results in unexpected behavior. If your DHCP options set -// is associated with a VPC that has instances with multiple operating systems, -// specify only one domain name. +// * domain-name - If you're using AmazonProvidedDNS in us-east-1, specify +// ec2.internal. If you're using AmazonProvidedDNS in another region, specify +// region.compute.internal (for example, ap-northeast-1.compute.internal). +// Otherwise, specify a domain name (for example, MyCompany.com). This value +// is used to complete unqualified DNS hostnames. Important: Some Linux operating +// systems accept multiple domain names separated by spaces. However, Windows +// and other Linux operating systems treat the value as a single domain, +// which results in unexpected behavior. If your DHCP options set is associated +// with a VPC that has instances with multiple operating systems, specify +// only one domain name. // // * ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) // servers. @@ -2152,27 +2830,41 @@ func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *requ // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateDhcpOptions func (c *EC2) CreateDhcpOptions(input *CreateDhcpOptionsInput) (*CreateDhcpOptionsOutput, error) { req, out := c.CreateDhcpOptionsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateDhcpOptionsWithContext is the same as CreateDhcpOptions with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDhcpOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateDhcpOptionsWithContext(ctx aws.Context, input *CreateDhcpOptionsInput, opts ...request.Option) (*CreateDhcpOptionsOutput, error) { + req, out := c.CreateDhcpOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateEgressOnlyInternetGateway = "CreateEgressOnlyInternetGateway" // CreateEgressOnlyInternetGatewayRequest generates a "aws/request.Request" representing the // client's request for the CreateEgressOnlyInternetGateway operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CreateEgressOnlyInternetGateway for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateEgressOnlyInternetGateway method directly -// instead. +// See CreateEgressOnlyInternetGateway for more information on using the CreateEgressOnlyInternetGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateEgressOnlyInternetGatewayRequest method. // req, resp := client.CreateEgressOnlyInternetGatewayRequest(params) @@ -2215,27 +2907,41 @@ func (c *EC2) CreateEgressOnlyInternetGatewayRequest(input *CreateEgressOnlyInte // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateEgressOnlyInternetGateway func (c *EC2) CreateEgressOnlyInternetGateway(input *CreateEgressOnlyInternetGatewayInput) (*CreateEgressOnlyInternetGatewayOutput, error) { req, out := c.CreateEgressOnlyInternetGatewayRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateEgressOnlyInternetGatewayWithContext is the same as CreateEgressOnlyInternetGateway with the addition of +// the ability to pass a context and additional request options. +// +// See CreateEgressOnlyInternetGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateEgressOnlyInternetGatewayWithContext(ctx aws.Context, input *CreateEgressOnlyInternetGatewayInput, opts ...request.Option) (*CreateEgressOnlyInternetGatewayOutput, error) { + req, out := c.CreateEgressOnlyInternetGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateFlowLogs = "CreateFlowLogs" // CreateFlowLogsRequest generates a "aws/request.Request" representing the // client's request for the CreateFlowLogs operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CreateFlowLogs for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateFlowLogs method directly -// instead. +// See CreateFlowLogs for more information on using the CreateFlowLogs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateFlowLogsRequest method. // req, resp := client.CreateFlowLogsRequest(params) @@ -2284,27 +2990,122 @@ func (c *EC2) CreateFlowLogsRequest(input *CreateFlowLogsInput) (req *request.Re // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateFlowLogs func (c *EC2) CreateFlowLogs(input *CreateFlowLogsInput) (*CreateFlowLogsOutput, error) { req, out := c.CreateFlowLogsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateFlowLogsWithContext is the same as CreateFlowLogs with the addition of +// the ability to pass a context and additional request options. +// +// See CreateFlowLogs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateFlowLogsWithContext(ctx aws.Context, input *CreateFlowLogsInput, opts ...request.Option) (*CreateFlowLogsOutput, error) { + req, out := c.CreateFlowLogsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateFpgaImage = "CreateFpgaImage" + +// CreateFpgaImageRequest generates a "aws/request.Request" representing the +// client's request for the CreateFpgaImage operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateFpgaImage for more information on using the CreateFpgaImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateFpgaImageRequest method. +// req, resp := client.CreateFpgaImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateFpgaImage +func (c *EC2) CreateFpgaImageRequest(input *CreateFpgaImageInput) (req *request.Request, output *CreateFpgaImageOutput) { + op := &request.Operation{ + Name: opCreateFpgaImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateFpgaImageInput{} + } + + output = &CreateFpgaImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateFpgaImage API operation for Amazon Elastic Compute Cloud. +// +// Creates an Amazon FPGA Image (AFI) from the specified design checkpoint (DCP). +// +// The create operation is asynchronous. To verify that the AFI is ready for +// use, check the output logs. +// +// An AFI contains the FPGA bitstream that is ready to download to an FPGA. +// You can securely deploy an AFI on one or more FPGA-accelerated instances. +// For more information, see the AWS FPGA Hardware Development Kit (https://github.com/aws/aws-fpga/). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateFpgaImage for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateFpgaImage +func (c *EC2) CreateFpgaImage(input *CreateFpgaImageInput) (*CreateFpgaImageOutput, error) { + req, out := c.CreateFpgaImageRequest(input) + return out, req.Send() +} + +// CreateFpgaImageWithContext is the same as CreateFpgaImage with the addition of +// the ability to pass a context and additional request options. +// +// See CreateFpgaImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateFpgaImageWithContext(ctx aws.Context, input *CreateFpgaImageInput, opts ...request.Option) (*CreateFpgaImageOutput, error) { + req, out := c.CreateFpgaImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateImage = "CreateImage" // CreateImageRequest generates a "aws/request.Request" representing the // client's request for the CreateImage operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateImage for usage and error information. +// See CreateImage for more information on using the CreateImage +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateImage method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateImageRequest method. // req, resp := client.CreateImageRequest(params) @@ -2353,27 +3154,41 @@ func (c *EC2) CreateImageRequest(input *CreateImageInput) (req *request.Request, // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateImage func (c *EC2) CreateImage(input *CreateImageInput) (*CreateImageOutput, error) { req, out := c.CreateImageRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateImageWithContext is the same as CreateImage with the addition of +// the ability to pass a context and additional request options. +// +// See CreateImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateImageWithContext(ctx aws.Context, input *CreateImageInput, opts ...request.Option) (*CreateImageOutput, error) { + req, out := c.CreateImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateInstanceExportTask = "CreateInstanceExportTask" // CreateInstanceExportTaskRequest generates a "aws/request.Request" representing the // client's request for the CreateInstanceExportTask operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateInstanceExportTask for usage and error information. +// See CreateInstanceExportTask for more information on using the CreateInstanceExportTask +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateInstanceExportTask method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateInstanceExportTaskRequest method. // req, resp := client.CreateInstanceExportTaskRequest(params) @@ -2418,27 +3233,41 @@ func (c *EC2) CreateInstanceExportTaskRequest(input *CreateInstanceExportTaskInp // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateInstanceExportTask func (c *EC2) CreateInstanceExportTask(input *CreateInstanceExportTaskInput) (*CreateInstanceExportTaskOutput, error) { req, out := c.CreateInstanceExportTaskRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateInstanceExportTaskWithContext is the same as CreateInstanceExportTask with the addition of +// the ability to pass a context and additional request options. +// +// See CreateInstanceExportTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateInstanceExportTaskWithContext(ctx aws.Context, input *CreateInstanceExportTaskInput, opts ...request.Option) (*CreateInstanceExportTaskOutput, error) { + req, out := c.CreateInstanceExportTaskRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateInternetGateway = "CreateInternetGateway" // CreateInternetGatewayRequest generates a "aws/request.Request" representing the // client's request for the CreateInternetGateway operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateInternetGateway for usage and error information. +// See CreateInternetGateway for more information on using the CreateInternetGateway +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateInternetGateway method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateInternetGatewayRequest method. // req, resp := client.CreateInternetGatewayRequest(params) @@ -2482,27 +3311,41 @@ func (c *EC2) CreateInternetGatewayRequest(input *CreateInternetGatewayInput) (r // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateInternetGateway func (c *EC2) CreateInternetGateway(input *CreateInternetGatewayInput) (*CreateInternetGatewayOutput, error) { req, out := c.CreateInternetGatewayRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateInternetGatewayWithContext is the same as CreateInternetGateway with the addition of +// the ability to pass a context and additional request options. +// +// See CreateInternetGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateInternetGatewayWithContext(ctx aws.Context, input *CreateInternetGatewayInput, opts ...request.Option) (*CreateInternetGatewayOutput, error) { + req, out := c.CreateInternetGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateKeyPair = "CreateKeyPair" // CreateKeyPairRequest generates a "aws/request.Request" representing the // client's request for the CreateKeyPair operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateKeyPair for usage and error information. +// See CreateKeyPair for more information on using the CreateKeyPair +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateKeyPair method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateKeyPairRequest method. // req, resp := client.CreateKeyPairRequest(params) @@ -2553,27 +3396,41 @@ func (c *EC2) CreateKeyPairRequest(input *CreateKeyPairInput) (req *request.Requ // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateKeyPair func (c *EC2) CreateKeyPair(input *CreateKeyPairInput) (*CreateKeyPairOutput, error) { req, out := c.CreateKeyPairRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateKeyPairWithContext is the same as CreateKeyPair with the addition of +// the ability to pass a context and additional request options. +// +// See CreateKeyPair for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateKeyPairWithContext(ctx aws.Context, input *CreateKeyPairInput, opts ...request.Option) (*CreateKeyPairOutput, error) { + req, out := c.CreateKeyPairRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateNatGateway = "CreateNatGateway" // CreateNatGatewayRequest generates a "aws/request.Request" representing the // client's request for the CreateNatGateway operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateNatGateway for usage and error information. +// See CreateNatGateway for more information on using the CreateNatGateway +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateNatGateway method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateNatGatewayRequest method. // req, resp := client.CreateNatGatewayRequest(params) @@ -2618,27 +3475,41 @@ func (c *EC2) CreateNatGatewayRequest(input *CreateNatGatewayInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNatGateway func (c *EC2) CreateNatGateway(input *CreateNatGatewayInput) (*CreateNatGatewayOutput, error) { req, out := c.CreateNatGatewayRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateNatGatewayWithContext is the same as CreateNatGateway with the addition of +// the ability to pass a context and additional request options. +// +// See CreateNatGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateNatGatewayWithContext(ctx aws.Context, input *CreateNatGatewayInput, opts ...request.Option) (*CreateNatGatewayOutput, error) { + req, out := c.CreateNatGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateNetworkAcl = "CreateNetworkAcl" // CreateNetworkAclRequest generates a "aws/request.Request" representing the // client's request for the CreateNetworkAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateNetworkAcl for usage and error information. +// See CreateNetworkAcl for more information on using the CreateNetworkAcl +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateNetworkAcl method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateNetworkAclRequest method. // req, resp := client.CreateNetworkAclRequest(params) @@ -2682,27 +3553,41 @@ func (c *EC2) CreateNetworkAclRequest(input *CreateNetworkAclInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkAcl func (c *EC2) CreateNetworkAcl(input *CreateNetworkAclInput) (*CreateNetworkAclOutput, error) { req, out := c.CreateNetworkAclRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateNetworkAclWithContext is the same as CreateNetworkAcl with the addition of +// the ability to pass a context and additional request options. +// +// See CreateNetworkAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateNetworkAclWithContext(ctx aws.Context, input *CreateNetworkAclInput, opts ...request.Option) (*CreateNetworkAclOutput, error) { + req, out := c.CreateNetworkAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateNetworkAclEntry = "CreateNetworkAclEntry" // CreateNetworkAclEntryRequest generates a "aws/request.Request" representing the // client's request for the CreateNetworkAclEntry operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateNetworkAclEntry for usage and error information. +// See CreateNetworkAclEntry for more information on using the CreateNetworkAclEntry +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateNetworkAclEntry method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateNetworkAclEntryRequest method. // req, resp := client.CreateNetworkAclEntryRequest(params) @@ -2760,27 +3645,41 @@ func (c *EC2) CreateNetworkAclEntryRequest(input *CreateNetworkAclEntryInput) (r // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkAclEntry func (c *EC2) CreateNetworkAclEntry(input *CreateNetworkAclEntryInput) (*CreateNetworkAclEntryOutput, error) { req, out := c.CreateNetworkAclEntryRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateNetworkAclEntryWithContext is the same as CreateNetworkAclEntry with the addition of +// the ability to pass a context and additional request options. +// +// See CreateNetworkAclEntry for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateNetworkAclEntryWithContext(ctx aws.Context, input *CreateNetworkAclEntryInput, opts ...request.Option) (*CreateNetworkAclEntryOutput, error) { + req, out := c.CreateNetworkAclEntryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateNetworkInterface = "CreateNetworkInterface" // CreateNetworkInterfaceRequest generates a "aws/request.Request" representing the // client's request for the CreateNetworkInterface operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateNetworkInterface for usage and error information. +// See CreateNetworkInterface for more information on using the CreateNetworkInterface +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateNetworkInterface method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateNetworkInterfaceRequest method. // req, resp := client.CreateNetworkInterfaceRequest(params) @@ -2824,27 +3723,119 @@ func (c *EC2) CreateNetworkInterfaceRequest(input *CreateNetworkInterfaceInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkInterface func (c *EC2) CreateNetworkInterface(input *CreateNetworkInterfaceInput) (*CreateNetworkInterfaceOutput, error) { req, out := c.CreateNetworkInterfaceRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateNetworkInterfaceWithContext is the same as CreateNetworkInterface with the addition of +// the ability to pass a context and additional request options. +// +// See CreateNetworkInterface for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateNetworkInterfaceWithContext(ctx aws.Context, input *CreateNetworkInterfaceInput, opts ...request.Option) (*CreateNetworkInterfaceOutput, error) { + req, out := c.CreateNetworkInterfaceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateNetworkInterfacePermission = "CreateNetworkInterfacePermission" + +// CreateNetworkInterfacePermissionRequest generates a "aws/request.Request" representing the +// client's request for the CreateNetworkInterfacePermission operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateNetworkInterfacePermission for more information on using the CreateNetworkInterfacePermission +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateNetworkInterfacePermissionRequest method. +// req, resp := client.CreateNetworkInterfacePermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkInterfacePermission +func (c *EC2) CreateNetworkInterfacePermissionRequest(input *CreateNetworkInterfacePermissionInput) (req *request.Request, output *CreateNetworkInterfacePermissionOutput) { + op := &request.Operation{ + Name: opCreateNetworkInterfacePermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateNetworkInterfacePermissionInput{} + } + + output = &CreateNetworkInterfacePermissionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateNetworkInterfacePermission API operation for Amazon Elastic Compute Cloud. +// +// Grants an AWS authorized partner account permission to attach the specified +// network interface to an instance in their account. +// +// You can grant permission to a single AWS account only, and only one account +// at a time. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateNetworkInterfacePermission for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkInterfacePermission +func (c *EC2) CreateNetworkInterfacePermission(input *CreateNetworkInterfacePermissionInput) (*CreateNetworkInterfacePermissionOutput, error) { + req, out := c.CreateNetworkInterfacePermissionRequest(input) + return out, req.Send() +} + +// CreateNetworkInterfacePermissionWithContext is the same as CreateNetworkInterfacePermission with the addition of +// the ability to pass a context and additional request options. +// +// See CreateNetworkInterfacePermission for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateNetworkInterfacePermissionWithContext(ctx aws.Context, input *CreateNetworkInterfacePermissionInput, opts ...request.Option) (*CreateNetworkInterfacePermissionOutput, error) { + req, out := c.CreateNetworkInterfacePermissionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreatePlacementGroup = "CreatePlacementGroup" // CreatePlacementGroupRequest generates a "aws/request.Request" representing the // client's request for the CreatePlacementGroup operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreatePlacementGroup for usage and error information. +// See CreatePlacementGroup for more information on using the CreatePlacementGroup +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreatePlacementGroup method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreatePlacementGroupRequest method. // req, resp := client.CreatePlacementGroupRequest(params) @@ -2875,8 +3866,8 @@ func (c *EC2) CreatePlacementGroupRequest(input *CreatePlacementGroupInput) (req // CreatePlacementGroup API operation for Amazon Elastic Compute Cloud. // -// Creates a placement group that you launch cluster instances into. You must -// give the group a name that's unique within the scope of your account. +// Creates a placement group that you launch cluster instances into. Give the +// group a name that's unique within the scope of your account. // // For more information about placement groups and cluster instances, see Cluster // Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using_cluster_computing.html) @@ -2891,27 +3882,41 @@ func (c *EC2) CreatePlacementGroupRequest(input *CreatePlacementGroupInput) (req // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreatePlacementGroup func (c *EC2) CreatePlacementGroup(input *CreatePlacementGroupInput) (*CreatePlacementGroupOutput, error) { req, out := c.CreatePlacementGroupRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreatePlacementGroupWithContext is the same as CreatePlacementGroup with the addition of +// the ability to pass a context and additional request options. +// +// See CreatePlacementGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreatePlacementGroupWithContext(ctx aws.Context, input *CreatePlacementGroupInput, opts ...request.Option) (*CreatePlacementGroupOutput, error) { + req, out := c.CreatePlacementGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateReservedInstancesListing = "CreateReservedInstancesListing" // CreateReservedInstancesListingRequest generates a "aws/request.Request" representing the // client's request for the CreateReservedInstancesListing operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateReservedInstancesListing for usage and error information. +// See CreateReservedInstancesListing for more information on using the CreateReservedInstancesListing +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateReservedInstancesListing method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateReservedInstancesListingRequest method. // req, resp := client.CreateReservedInstancesListingRequest(params) @@ -2945,6 +3950,10 @@ func (c *EC2) CreateReservedInstancesListingRequest(input *CreateReservedInstanc // listing at a time. To get a list of your Standard Reserved Instances, you // can use the DescribeReservedInstances operation. // +// Only Standard Reserved Instances with a capacity reservation can be sold +// in the Reserved Instance Marketplace. Convertible Reserved Instances and +// Standard Reserved Instances with a regional benefit cannot be sold. +// // The Reserved Instance Marketplace matches sellers who want to resell Standard // Reserved Instance capacity that they no longer need with buyers who want // to purchase additional capacity. Reserved Instances bought and sold through @@ -2970,27 +3979,41 @@ func (c *EC2) CreateReservedInstancesListingRequest(input *CreateReservedInstanc // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateReservedInstancesListing func (c *EC2) CreateReservedInstancesListing(input *CreateReservedInstancesListingInput) (*CreateReservedInstancesListingOutput, error) { req, out := c.CreateReservedInstancesListingRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateReservedInstancesListingWithContext is the same as CreateReservedInstancesListing with the addition of +// the ability to pass a context and additional request options. +// +// See CreateReservedInstancesListing for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateReservedInstancesListingWithContext(ctx aws.Context, input *CreateReservedInstancesListingInput, opts ...request.Option) (*CreateReservedInstancesListingOutput, error) { + req, out := c.CreateReservedInstancesListingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateRoute = "CreateRoute" // CreateRouteRequest generates a "aws/request.Request" representing the // client's request for the CreateRoute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CreateRoute for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateRoute method directly -// instead. +// See CreateRoute for more information on using the CreateRoute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateRouteRequest method. // req, resp := client.CreateRouteRequest(params) @@ -3049,27 +4072,41 @@ func (c *EC2) CreateRouteRequest(input *CreateRouteInput) (req *request.Request, // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateRoute func (c *EC2) CreateRoute(input *CreateRouteInput) (*CreateRouteOutput, error) { req, out := c.CreateRouteRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateRouteWithContext is the same as CreateRoute with the addition of +// the ability to pass a context and additional request options. +// +// See CreateRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateRouteWithContext(ctx aws.Context, input *CreateRouteInput, opts ...request.Option) (*CreateRouteOutput, error) { + req, out := c.CreateRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateRouteTable = "CreateRouteTable" // CreateRouteTableRequest generates a "aws/request.Request" representing the // client's request for the CreateRouteTable operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CreateRouteTable for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateRouteTable method directly -// instead. +// See CreateRouteTable for more information on using the CreateRouteTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateRouteTableRequest method. // req, resp := client.CreateRouteTableRequest(params) @@ -3113,27 +4150,41 @@ func (c *EC2) CreateRouteTableRequest(input *CreateRouteTableInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateRouteTable func (c *EC2) CreateRouteTable(input *CreateRouteTableInput) (*CreateRouteTableOutput, error) { req, out := c.CreateRouteTableRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateRouteTableWithContext is the same as CreateRouteTable with the addition of +// the ability to pass a context and additional request options. +// +// See CreateRouteTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateRouteTableWithContext(ctx aws.Context, input *CreateRouteTableInput, opts ...request.Option) (*CreateRouteTableOutput, error) { + req, out := c.CreateRouteTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateSecurityGroup = "CreateSecurityGroup" // CreateSecurityGroupRequest generates a "aws/request.Request" representing the // client's request for the CreateSecurityGroup operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CreateSecurityGroup for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateSecurityGroup method directly -// instead. +// See CreateSecurityGroup for more information on using the CreateSecurityGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateSecurityGroupRequest method. // req, resp := client.CreateSecurityGroupRequest(params) @@ -3199,27 +4250,41 @@ func (c *EC2) CreateSecurityGroupRequest(input *CreateSecurityGroupInput) (req * // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSecurityGroup func (c *EC2) CreateSecurityGroup(input *CreateSecurityGroupInput) (*CreateSecurityGroupOutput, error) { req, out := c.CreateSecurityGroupRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateSecurityGroupWithContext is the same as CreateSecurityGroup with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSecurityGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateSecurityGroupWithContext(ctx aws.Context, input *CreateSecurityGroupInput, opts ...request.Option) (*CreateSecurityGroupOutput, error) { + req, out := c.CreateSecurityGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateSnapshot = "CreateSnapshot" // CreateSnapshotRequest generates a "aws/request.Request" representing the // client's request for the CreateSnapshot operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CreateSnapshot for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateSnapshot method directly -// instead. +// See CreateSnapshot for more information on using the CreateSnapshot +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateSnapshotRequest method. // req, resp := client.CreateSnapshotRequest(params) @@ -3286,27 +4351,41 @@ func (c *EC2) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Re // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSnapshot func (c *EC2) CreateSnapshot(input *CreateSnapshotInput) (*Snapshot, error) { req, out := c.CreateSnapshotRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateSnapshotWithContext is the same as CreateSnapshot with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSnapshot for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateSnapshotWithContext(ctx aws.Context, input *CreateSnapshotInput, opts ...request.Option) (*Snapshot, error) { + req, out := c.CreateSnapshotRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateSpotDatafeedSubscription = "CreateSpotDatafeedSubscription" // CreateSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the // client's request for the CreateSpotDatafeedSubscription operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CreateSpotDatafeedSubscription for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateSpotDatafeedSubscription method directly -// instead. +// See CreateSpotDatafeedSubscription for more information on using the CreateSpotDatafeedSubscription +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateSpotDatafeedSubscriptionRequest method. // req, resp := client.CreateSpotDatafeedSubscriptionRequest(params) @@ -3349,27 +4428,41 @@ func (c *EC2) CreateSpotDatafeedSubscriptionRequest(input *CreateSpotDatafeedSub // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSpotDatafeedSubscription func (c *EC2) CreateSpotDatafeedSubscription(input *CreateSpotDatafeedSubscriptionInput) (*CreateSpotDatafeedSubscriptionOutput, error) { req, out := c.CreateSpotDatafeedSubscriptionRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateSpotDatafeedSubscriptionWithContext is the same as CreateSpotDatafeedSubscription with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSpotDatafeedSubscription for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateSpotDatafeedSubscriptionWithContext(ctx aws.Context, input *CreateSpotDatafeedSubscriptionInput, opts ...request.Option) (*CreateSpotDatafeedSubscriptionOutput, error) { + req, out := c.CreateSpotDatafeedSubscriptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateSubnet = "CreateSubnet" // CreateSubnetRequest generates a "aws/request.Request" representing the // client's request for the CreateSubnet operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CreateSubnet for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateSubnet method directly -// instead. +// See CreateSubnet for more information on using the CreateSubnet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateSubnetRequest method. // req, resp := client.CreateSubnetRequest(params) @@ -3400,19 +4493,18 @@ func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Reques // // Creates a subnet in an existing VPC. // -// When you create each subnet, you provide the VPC ID and the CIDR block you -// want for the subnet. After you create a subnet, you can't change its CIDR -// block. The subnet's IPv4 CIDR block can be the same as the VPC's IPv4 CIDR -// block (assuming you want only a single subnet in the VPC), or a subset of -// the VPC's IPv4 CIDR block. If you create more than one subnet in a VPC, the -// subnets' CIDR blocks must not overlap. The smallest IPv4 subnet (and VPC) -// you can create uses a /28 netmask (16 IPv4 addresses), and the largest uses -// a /16 netmask (65,536 IPv4 addresses). +// When you create each subnet, you provide the VPC ID and the IPv4 CIDR block +// you want for the subnet. After you create a subnet, you can't change its +// CIDR block. The size of the subnet's IPv4 CIDR block can be the same as a +// VPC's IPv4 CIDR block, or a subset of a VPC's IPv4 CIDR block. If you create +// more than one subnet in a VPC, the subnets' CIDR blocks must not overlap. +// The smallest IPv4 subnet (and VPC) you can create uses a /28 netmask (16 +// IPv4 addresses), and the largest uses a /16 netmask (65,536 IPv4 addresses). // // If you've associated an IPv6 CIDR block with your VPC, you can create a subnet // with an IPv6 CIDR block that uses a /64 prefix length. // -// AWS reserves both the first four and the last IP address in each subnet's +// AWS reserves both the first four and the last IPv4 address in each subnet's // CIDR block. They're not available for use. // // If you add more than one subnet to a VPC, they're set up in a star topology @@ -3436,27 +4528,41 @@ func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateSubnet func (c *EC2) CreateSubnet(input *CreateSubnetInput) (*CreateSubnetOutput, error) { req, out := c.CreateSubnetRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateSubnetWithContext is the same as CreateSubnet with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSubnet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateSubnetWithContext(ctx aws.Context, input *CreateSubnetInput, opts ...request.Option) (*CreateSubnetOutput, error) { + req, out := c.CreateSubnetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateTags = "CreateTags" // CreateTagsRequest generates a "aws/request.Request" representing the // client's request for the CreateTags operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CreateTags for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateTags method directly -// instead. +// See CreateTags for more information on using the CreateTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateTagsRequest method. // req, resp := client.CreateTagsRequest(params) @@ -3506,27 +4612,41 @@ func (c *EC2) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, o // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTags func (c *EC2) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) { req, out := c.CreateTagsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateTagsWithContext is the same as CreateTags with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateTagsWithContext(ctx aws.Context, input *CreateTagsInput, opts ...request.Option) (*CreateTagsOutput, error) { + req, out := c.CreateTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateVolume = "CreateVolume" // CreateVolumeRequest generates a "aws/request.Request" representing the // client's request for the CreateVolume operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CreateVolume for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateVolume method directly -// instead. +// See CreateVolume for more information on using the CreateVolume +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateVolumeRequest method. // req, resp := client.CreateVolumeRequest(params) @@ -3569,7 +4689,10 @@ func (c *EC2) CreateVolumeRequest(input *CreateVolumeInput) (req *request.Reques // encrypted. For more information, see Amazon EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) // in the Amazon Elastic Compute Cloud User Guide. // -// For more information, see Creating or Restoring an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html) +// You can tag your volumes during creation. For more information, see Tagging +// Your Amazon EC2 Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). +// +// For more information, see Creating an Amazon EBS Volume (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3581,27 +4704,41 @@ func (c *EC2) CreateVolumeRequest(input *CreateVolumeInput) (req *request.Reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVolume func (c *EC2) CreateVolume(input *CreateVolumeInput) (*Volume, error) { req, out := c.CreateVolumeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateVolumeWithContext is the same as CreateVolume with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVolume for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateVolumeWithContext(ctx aws.Context, input *CreateVolumeInput, opts ...request.Option) (*Volume, error) { + req, out := c.CreateVolumeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateVpc = "CreateVpc" // CreateVpcRequest generates a "aws/request.Request" representing the // client's request for the CreateVpc operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateVpc for usage and error information. +// See CreateVpc for more information on using the CreateVpc +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateVpc method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateVpcRequest method. // req, resp := client.CreateVpcRequest(params) @@ -3647,8 +4784,8 @@ func (c *EC2) CreateVpcRequest(input *CreateVpcInput) (req *request.Request, out // // You can specify the instance tenancy value for the VPC when you create it. // You can't change this value for the VPC after you create it. For more information, -// see Dedicated Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/dedicated-instance.html.html) -// in the Amazon Virtual Private Cloud User Guide. +// see Dedicated Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-instance.html) +// in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3659,27 +4796,41 @@ func (c *EC2) CreateVpcRequest(input *CreateVpcInput) (req *request.Request, out // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpc func (c *EC2) CreateVpc(input *CreateVpcInput) (*CreateVpcOutput, error) { req, out := c.CreateVpcRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateVpcWithContext is the same as CreateVpc with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVpc for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateVpcWithContext(ctx aws.Context, input *CreateVpcInput, opts ...request.Option) (*CreateVpcOutput, error) { + req, out := c.CreateVpcRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateVpcEndpoint = "CreateVpcEndpoint" // CreateVpcEndpointRequest generates a "aws/request.Request" representing the // client's request for the CreateVpcEndpoint operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateVpcEndpoint for usage and error information. +// See CreateVpcEndpoint for more information on using the CreateVpcEndpoint +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateVpcEndpoint method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateVpcEndpointRequest method. // req, resp := client.CreateVpcEndpointRequest(params) @@ -3714,7 +4865,7 @@ func (c *EC2) CreateVpcEndpointRequest(input *CreateVpcEndpointInput) (req *requ // that will control access to the service from your VPC. You can also specify // the VPC route tables that use the endpoint. // -// Currently, only endpoints to Amazon S3 are supported. +// Use DescribeVpcEndpointServices to get a list of supported AWS services. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3725,27 +4876,41 @@ func (c *EC2) CreateVpcEndpointRequest(input *CreateVpcEndpointInput) (req *requ // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcEndpoint func (c *EC2) CreateVpcEndpoint(input *CreateVpcEndpointInput) (*CreateVpcEndpointOutput, error) { req, out := c.CreateVpcEndpointRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateVpcEndpointWithContext is the same as CreateVpcEndpoint with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVpcEndpoint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateVpcEndpointWithContext(ctx aws.Context, input *CreateVpcEndpointInput, opts ...request.Option) (*CreateVpcEndpointOutput, error) { + req, out := c.CreateVpcEndpointRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateVpcPeeringConnection = "CreateVpcPeeringConnection" // CreateVpcPeeringConnectionRequest generates a "aws/request.Request" representing the // client's request for the CreateVpcPeeringConnection operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateVpcPeeringConnection for usage and error information. +// See CreateVpcPeeringConnection for more information on using the CreateVpcPeeringConnection +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateVpcPeeringConnection method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateVpcPeeringConnectionRequest method. // req, resp := client.CreateVpcPeeringConnectionRequest(params) @@ -3783,8 +4948,8 @@ func (c *EC2) CreateVpcPeeringConnectionRequest(input *CreateVpcPeeringConnectio // peering connection. The VPC peering connection request expires after 7 days, // after which it cannot be accepted or rejected. // -// A CreateVpcPeeringConnection request between VPCs with overlapping CIDR blocks -// results in the VPC peering connection having a status of failed. +// If you try to create a VPC peering connection between VPCs that have overlapping +// CIDR blocks, the VPC peering connection status goes to failed. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3795,27 +4960,41 @@ func (c *EC2) CreateVpcPeeringConnectionRequest(input *CreateVpcPeeringConnectio // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpcPeeringConnection func (c *EC2) CreateVpcPeeringConnection(input *CreateVpcPeeringConnectionInput) (*CreateVpcPeeringConnectionOutput, error) { req, out := c.CreateVpcPeeringConnectionRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateVpcPeeringConnectionWithContext is the same as CreateVpcPeeringConnection with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVpcPeeringConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateVpcPeeringConnectionWithContext(ctx aws.Context, input *CreateVpcPeeringConnectionInput, opts ...request.Option) (*CreateVpcPeeringConnectionOutput, error) { + req, out := c.CreateVpcPeeringConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateVpnConnection = "CreateVpnConnection" // CreateVpnConnectionRequest generates a "aws/request.Request" representing the // client's request for the CreateVpnConnection operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateVpnConnection for usage and error information. +// See CreateVpnConnection for more information on using the CreateVpnConnection +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateVpnConnection method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateVpnConnectionRequest method. // req, resp := client.CreateVpnConnectionRequest(params) @@ -3861,8 +5040,7 @@ func (c *EC2) CreateVpnConnectionRequest(input *CreateVpnConnectionInput) (req * // This is an idempotent operation. If you perform the operation more than once, // Amazon EC2 doesn't return an error. // -// For more information about VPN connections, see Adding a Hardware Virtual -// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// For more information, see AWS Managed VPN Connections (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3874,27 +5052,41 @@ func (c *EC2) CreateVpnConnectionRequest(input *CreateVpnConnectionInput) (req * // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpnConnection func (c *EC2) CreateVpnConnection(input *CreateVpnConnectionInput) (*CreateVpnConnectionOutput, error) { req, out := c.CreateVpnConnectionRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateVpnConnectionWithContext is the same as CreateVpnConnection with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVpnConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateVpnConnectionWithContext(ctx aws.Context, input *CreateVpnConnectionInput, opts ...request.Option) (*CreateVpnConnectionOutput, error) { + req, out := c.CreateVpnConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateVpnConnectionRoute = "CreateVpnConnectionRoute" // CreateVpnConnectionRouteRequest generates a "aws/request.Request" representing the // client's request for the CreateVpnConnectionRoute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateVpnConnectionRoute for usage and error information. +// See CreateVpnConnectionRoute for more information on using the CreateVpnConnectionRoute +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateVpnConnectionRoute method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateVpnConnectionRouteRequest method. // req, resp := client.CreateVpnConnectionRouteRequest(params) @@ -3943,27 +5135,41 @@ func (c *EC2) CreateVpnConnectionRouteRequest(input *CreateVpnConnectionRouteInp // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpnConnectionRoute func (c *EC2) CreateVpnConnectionRoute(input *CreateVpnConnectionRouteInput) (*CreateVpnConnectionRouteOutput, error) { req, out := c.CreateVpnConnectionRouteRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateVpnConnectionRouteWithContext is the same as CreateVpnConnectionRoute with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVpnConnectionRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateVpnConnectionRouteWithContext(ctx aws.Context, input *CreateVpnConnectionRouteInput, opts ...request.Option) (*CreateVpnConnectionRouteOutput, error) { + req, out := c.CreateVpnConnectionRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateVpnGateway = "CreateVpnGateway" // CreateVpnGatewayRequest generates a "aws/request.Request" representing the // client's request for the CreateVpnGateway operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateVpnGateway for usage and error information. +// See CreateVpnGateway for more information on using the CreateVpnGateway +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateVpnGateway method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateVpnGatewayRequest method. // req, resp := client.CreateVpnGatewayRequest(params) @@ -4009,27 +5215,41 @@ func (c *EC2) CreateVpnGatewayRequest(input *CreateVpnGatewayInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateVpnGateway func (c *EC2) CreateVpnGateway(input *CreateVpnGatewayInput) (*CreateVpnGatewayOutput, error) { req, out := c.CreateVpnGatewayRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateVpnGatewayWithContext is the same as CreateVpnGateway with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVpnGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateVpnGatewayWithContext(ctx aws.Context, input *CreateVpnGatewayInput, opts ...request.Option) (*CreateVpnGatewayOutput, error) { + req, out := c.CreateVpnGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteCustomerGateway = "DeleteCustomerGateway" // DeleteCustomerGatewayRequest generates a "aws/request.Request" representing the // client's request for the DeleteCustomerGateway operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteCustomerGateway for usage and error information. +// See DeleteCustomerGateway for more information on using the DeleteCustomerGateway +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteCustomerGateway method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteCustomerGatewayRequest method. // req, resp := client.DeleteCustomerGatewayRequest(params) @@ -4072,27 +5292,41 @@ func (c *EC2) DeleteCustomerGatewayRequest(input *DeleteCustomerGatewayInput) (r // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteCustomerGateway func (c *EC2) DeleteCustomerGateway(input *DeleteCustomerGatewayInput) (*DeleteCustomerGatewayOutput, error) { req, out := c.DeleteCustomerGatewayRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteCustomerGatewayWithContext is the same as DeleteCustomerGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteCustomerGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteCustomerGatewayWithContext(ctx aws.Context, input *DeleteCustomerGatewayInput, opts ...request.Option) (*DeleteCustomerGatewayOutput, error) { + req, out := c.DeleteCustomerGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteDhcpOptions = "DeleteDhcpOptions" // DeleteDhcpOptionsRequest generates a "aws/request.Request" representing the // client's request for the DeleteDhcpOptions operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteDhcpOptions for usage and error information. +// See DeleteDhcpOptions for more information on using the DeleteDhcpOptions +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteDhcpOptions method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteDhcpOptionsRequest method. // req, resp := client.DeleteDhcpOptionsRequest(params) @@ -4137,27 +5371,41 @@ func (c *EC2) DeleteDhcpOptionsRequest(input *DeleteDhcpOptionsInput) (req *requ // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteDhcpOptions func (c *EC2) DeleteDhcpOptions(input *DeleteDhcpOptionsInput) (*DeleteDhcpOptionsOutput, error) { req, out := c.DeleteDhcpOptionsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteDhcpOptionsWithContext is the same as DeleteDhcpOptions with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDhcpOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteDhcpOptionsWithContext(ctx aws.Context, input *DeleteDhcpOptionsInput, opts ...request.Option) (*DeleteDhcpOptionsOutput, error) { + req, out := c.DeleteDhcpOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteEgressOnlyInternetGateway = "DeleteEgressOnlyInternetGateway" // DeleteEgressOnlyInternetGatewayRequest generates a "aws/request.Request" representing the // client's request for the DeleteEgressOnlyInternetGateway operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteEgressOnlyInternetGateway for usage and error information. +// See DeleteEgressOnlyInternetGateway for more information on using the DeleteEgressOnlyInternetGateway +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteEgressOnlyInternetGateway method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteEgressOnlyInternetGatewayRequest method. // req, resp := client.DeleteEgressOnlyInternetGatewayRequest(params) @@ -4197,27 +5445,41 @@ func (c *EC2) DeleteEgressOnlyInternetGatewayRequest(input *DeleteEgressOnlyInte // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteEgressOnlyInternetGateway func (c *EC2) DeleteEgressOnlyInternetGateway(input *DeleteEgressOnlyInternetGatewayInput) (*DeleteEgressOnlyInternetGatewayOutput, error) { req, out := c.DeleteEgressOnlyInternetGatewayRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteEgressOnlyInternetGatewayWithContext is the same as DeleteEgressOnlyInternetGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteEgressOnlyInternetGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteEgressOnlyInternetGatewayWithContext(ctx aws.Context, input *DeleteEgressOnlyInternetGatewayInput, opts ...request.Option) (*DeleteEgressOnlyInternetGatewayOutput, error) { + req, out := c.DeleteEgressOnlyInternetGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteFlowLogs = "DeleteFlowLogs" // DeleteFlowLogsRequest generates a "aws/request.Request" representing the // client's request for the DeleteFlowLogs operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteFlowLogs for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteFlowLogs method directly -// instead. +// See DeleteFlowLogs for more information on using the DeleteFlowLogs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteFlowLogsRequest method. // req, resp := client.DeleteFlowLogsRequest(params) @@ -4257,27 +5519,115 @@ func (c *EC2) DeleteFlowLogsRequest(input *DeleteFlowLogsInput) (req *request.Re // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFlowLogs func (c *EC2) DeleteFlowLogs(input *DeleteFlowLogsInput) (*DeleteFlowLogsOutput, error) { req, out := c.DeleteFlowLogsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteFlowLogsWithContext is the same as DeleteFlowLogs with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteFlowLogs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteFlowLogsWithContext(ctx aws.Context, input *DeleteFlowLogsInput, opts ...request.Option) (*DeleteFlowLogsOutput, error) { + req, out := c.DeleteFlowLogsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteFpgaImage = "DeleteFpgaImage" + +// DeleteFpgaImageRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFpgaImage operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteFpgaImage for more information on using the DeleteFpgaImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteFpgaImageRequest method. +// req, resp := client.DeleteFpgaImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFpgaImage +func (c *EC2) DeleteFpgaImageRequest(input *DeleteFpgaImageInput) (req *request.Request, output *DeleteFpgaImageOutput) { + op := &request.Operation{ + Name: opDeleteFpgaImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteFpgaImageInput{} + } + + output = &DeleteFpgaImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteFpgaImage API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified Amazon FPGA Image (AFI). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteFpgaImage for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFpgaImage +func (c *EC2) DeleteFpgaImage(input *DeleteFpgaImageInput) (*DeleteFpgaImageOutput, error) { + req, out := c.DeleteFpgaImageRequest(input) + return out, req.Send() +} + +// DeleteFpgaImageWithContext is the same as DeleteFpgaImage with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteFpgaImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteFpgaImageWithContext(ctx aws.Context, input *DeleteFpgaImageInput, opts ...request.Option) (*DeleteFpgaImageOutput, error) { + req, out := c.DeleteFpgaImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteInternetGateway = "DeleteInternetGateway" // DeleteInternetGatewayRequest generates a "aws/request.Request" representing the // client's request for the DeleteInternetGateway operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteInternetGateway for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteInternetGateway method directly -// instead. +// See DeleteInternetGateway for more information on using the DeleteInternetGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteInternetGatewayRequest method. // req, resp := client.DeleteInternetGatewayRequest(params) @@ -4320,27 +5670,41 @@ func (c *EC2) DeleteInternetGatewayRequest(input *DeleteInternetGatewayInput) (r // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteInternetGateway func (c *EC2) DeleteInternetGateway(input *DeleteInternetGatewayInput) (*DeleteInternetGatewayOutput, error) { req, out := c.DeleteInternetGatewayRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteInternetGatewayWithContext is the same as DeleteInternetGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteInternetGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteInternetGatewayWithContext(ctx aws.Context, input *DeleteInternetGatewayInput, opts ...request.Option) (*DeleteInternetGatewayOutput, error) { + req, out := c.DeleteInternetGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteKeyPair = "DeleteKeyPair" // DeleteKeyPairRequest generates a "aws/request.Request" representing the // client's request for the DeleteKeyPair operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteKeyPair for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteKeyPair method directly -// instead. +// See DeleteKeyPair for more information on using the DeleteKeyPair +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteKeyPairRequest method. // req, resp := client.DeleteKeyPairRequest(params) @@ -4382,27 +5746,41 @@ func (c *EC2) DeleteKeyPairRequest(input *DeleteKeyPairInput) (req *request.Requ // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteKeyPair func (c *EC2) DeleteKeyPair(input *DeleteKeyPairInput) (*DeleteKeyPairOutput, error) { req, out := c.DeleteKeyPairRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteKeyPairWithContext is the same as DeleteKeyPair with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteKeyPair for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteKeyPairWithContext(ctx aws.Context, input *DeleteKeyPairInput, opts ...request.Option) (*DeleteKeyPairOutput, error) { + req, out := c.DeleteKeyPairRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteNatGateway = "DeleteNatGateway" // DeleteNatGatewayRequest generates a "aws/request.Request" representing the // client's request for the DeleteNatGateway operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteNatGateway for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteNatGateway method directly -// instead. +// See DeleteNatGateway for more information on using the DeleteNatGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteNatGatewayRequest method. // req, resp := client.DeleteNatGatewayRequest(params) @@ -4444,27 +5822,41 @@ func (c *EC2) DeleteNatGatewayRequest(input *DeleteNatGatewayInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNatGateway func (c *EC2) DeleteNatGateway(input *DeleteNatGatewayInput) (*DeleteNatGatewayOutput, error) { req, out := c.DeleteNatGatewayRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteNatGatewayWithContext is the same as DeleteNatGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteNatGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteNatGatewayWithContext(ctx aws.Context, input *DeleteNatGatewayInput, opts ...request.Option) (*DeleteNatGatewayOutput, error) { + req, out := c.DeleteNatGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteNetworkAcl = "DeleteNetworkAcl" // DeleteNetworkAclRequest generates a "aws/request.Request" representing the // client's request for the DeleteNetworkAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteNetworkAcl for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteNetworkAcl method directly -// instead. +// See DeleteNetworkAcl for more information on using the DeleteNetworkAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteNetworkAclRequest method. // req, resp := client.DeleteNetworkAclRequest(params) @@ -4507,27 +5899,41 @@ func (c *EC2) DeleteNetworkAclRequest(input *DeleteNetworkAclInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkAcl func (c *EC2) DeleteNetworkAcl(input *DeleteNetworkAclInput) (*DeleteNetworkAclOutput, error) { req, out := c.DeleteNetworkAclRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteNetworkAclWithContext is the same as DeleteNetworkAcl with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteNetworkAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteNetworkAclWithContext(ctx aws.Context, input *DeleteNetworkAclInput, opts ...request.Option) (*DeleteNetworkAclOutput, error) { + req, out := c.DeleteNetworkAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteNetworkAclEntry = "DeleteNetworkAclEntry" // DeleteNetworkAclEntryRequest generates a "aws/request.Request" representing the // client's request for the DeleteNetworkAclEntry operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteNetworkAclEntry for usage and error information. +// See DeleteNetworkAclEntry for more information on using the DeleteNetworkAclEntry +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteNetworkAclEntry method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteNetworkAclEntryRequest method. // req, resp := client.DeleteNetworkAclEntryRequest(params) @@ -4570,27 +5976,41 @@ func (c *EC2) DeleteNetworkAclEntryRequest(input *DeleteNetworkAclEntryInput) (r // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkAclEntry func (c *EC2) DeleteNetworkAclEntry(input *DeleteNetworkAclEntryInput) (*DeleteNetworkAclEntryOutput, error) { req, out := c.DeleteNetworkAclEntryRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteNetworkAclEntryWithContext is the same as DeleteNetworkAclEntry with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteNetworkAclEntry for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteNetworkAclEntryWithContext(ctx aws.Context, input *DeleteNetworkAclEntryInput, opts ...request.Option) (*DeleteNetworkAclEntryOutput, error) { + req, out := c.DeleteNetworkAclEntryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteNetworkInterface = "DeleteNetworkInterface" // DeleteNetworkInterfaceRequest generates a "aws/request.Request" representing the // client's request for the DeleteNetworkInterface operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteNetworkInterface for usage and error information. +// See DeleteNetworkInterface for more information on using the DeleteNetworkInterface +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteNetworkInterface method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteNetworkInterfaceRequest method. // req, resp := client.DeleteNetworkInterfaceRequest(params) @@ -4633,27 +6053,118 @@ func (c *EC2) DeleteNetworkInterfaceRequest(input *DeleteNetworkInterfaceInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkInterface func (c *EC2) DeleteNetworkInterface(input *DeleteNetworkInterfaceInput) (*DeleteNetworkInterfaceOutput, error) { req, out := c.DeleteNetworkInterfaceRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteNetworkInterfaceWithContext is the same as DeleteNetworkInterface with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteNetworkInterface for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteNetworkInterfaceWithContext(ctx aws.Context, input *DeleteNetworkInterfaceInput, opts ...request.Option) (*DeleteNetworkInterfaceOutput, error) { + req, out := c.DeleteNetworkInterfaceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteNetworkInterfacePermission = "DeleteNetworkInterfacePermission" + +// DeleteNetworkInterfacePermissionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNetworkInterfacePermission operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteNetworkInterfacePermission for more information on using the DeleteNetworkInterfacePermission +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteNetworkInterfacePermissionRequest method. +// req, resp := client.DeleteNetworkInterfacePermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkInterfacePermission +func (c *EC2) DeleteNetworkInterfacePermissionRequest(input *DeleteNetworkInterfacePermissionInput) (req *request.Request, output *DeleteNetworkInterfacePermissionOutput) { + op := &request.Operation{ + Name: opDeleteNetworkInterfacePermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteNetworkInterfacePermissionInput{} + } + + output = &DeleteNetworkInterfacePermissionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteNetworkInterfacePermission API operation for Amazon Elastic Compute Cloud. +// +// Deletes a permission for a network interface. By default, you cannot delete +// the permission if the account for which you're removing the permission has +// attached the network interface to an instance. However, you can force delete +// the permission, regardless of any attachment. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteNetworkInterfacePermission for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkInterfacePermission +func (c *EC2) DeleteNetworkInterfacePermission(input *DeleteNetworkInterfacePermissionInput) (*DeleteNetworkInterfacePermissionOutput, error) { + req, out := c.DeleteNetworkInterfacePermissionRequest(input) + return out, req.Send() +} + +// DeleteNetworkInterfacePermissionWithContext is the same as DeleteNetworkInterfacePermission with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteNetworkInterfacePermission for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteNetworkInterfacePermissionWithContext(ctx aws.Context, input *DeleteNetworkInterfacePermissionInput, opts ...request.Option) (*DeleteNetworkInterfacePermissionOutput, error) { + req, out := c.DeleteNetworkInterfacePermissionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeletePlacementGroup = "DeletePlacementGroup" // DeletePlacementGroupRequest generates a "aws/request.Request" representing the // client's request for the DeletePlacementGroup operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeletePlacementGroup for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeletePlacementGroup method directly -// instead. +// See DeletePlacementGroup for more information on using the DeletePlacementGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeletePlacementGroupRequest method. // req, resp := client.DeletePlacementGroupRequest(params) @@ -4698,27 +6209,41 @@ func (c *EC2) DeletePlacementGroupRequest(input *DeletePlacementGroupInput) (req // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeletePlacementGroup func (c *EC2) DeletePlacementGroup(input *DeletePlacementGroupInput) (*DeletePlacementGroupOutput, error) { req, out := c.DeletePlacementGroupRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeletePlacementGroupWithContext is the same as DeletePlacementGroup with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePlacementGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeletePlacementGroupWithContext(ctx aws.Context, input *DeletePlacementGroupInput, opts ...request.Option) (*DeletePlacementGroupOutput, error) { + req, out := c.DeletePlacementGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteRoute = "DeleteRoute" // DeleteRouteRequest generates a "aws/request.Request" representing the // client's request for the DeleteRoute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteRoute for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteRoute method directly -// instead. +// See DeleteRoute for more information on using the DeleteRoute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteRouteRequest method. // req, resp := client.DeleteRouteRequest(params) @@ -4760,27 +6285,41 @@ func (c *EC2) DeleteRouteRequest(input *DeleteRouteInput) (req *request.Request, // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteRoute func (c *EC2) DeleteRoute(input *DeleteRouteInput) (*DeleteRouteOutput, error) { req, out := c.DeleteRouteRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteRouteWithContext is the same as DeleteRoute with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteRouteWithContext(ctx aws.Context, input *DeleteRouteInput, opts ...request.Option) (*DeleteRouteOutput, error) { + req, out := c.DeleteRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteRouteTable = "DeleteRouteTable" // DeleteRouteTableRequest generates a "aws/request.Request" representing the // client's request for the DeleteRouteTable operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteRouteTable for usage and error information. +// See DeleteRouteTable for more information on using the DeleteRouteTable +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteRouteTable method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteRouteTableRequest method. // req, resp := client.DeleteRouteTableRequest(params) @@ -4824,27 +6363,41 @@ func (c *EC2) DeleteRouteTableRequest(input *DeleteRouteTableInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteRouteTable func (c *EC2) DeleteRouteTable(input *DeleteRouteTableInput) (*DeleteRouteTableOutput, error) { req, out := c.DeleteRouteTableRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteRouteTableWithContext is the same as DeleteRouteTable with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteRouteTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteRouteTableWithContext(ctx aws.Context, input *DeleteRouteTableInput, opts ...request.Option) (*DeleteRouteTableOutput, error) { + req, out := c.DeleteRouteTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteSecurityGroup = "DeleteSecurityGroup" // DeleteSecurityGroupRequest generates a "aws/request.Request" representing the // client's request for the DeleteSecurityGroup operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteSecurityGroup for usage and error information. +// See DeleteSecurityGroup for more information on using the DeleteSecurityGroup +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteSecurityGroup method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteSecurityGroupRequest method. // req, resp := client.DeleteSecurityGroupRequest(params) @@ -4890,27 +6443,41 @@ func (c *EC2) DeleteSecurityGroupRequest(input *DeleteSecurityGroupInput) (req * // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSecurityGroup func (c *EC2) DeleteSecurityGroup(input *DeleteSecurityGroupInput) (*DeleteSecurityGroupOutput, error) { req, out := c.DeleteSecurityGroupRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteSecurityGroupWithContext is the same as DeleteSecurityGroup with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSecurityGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteSecurityGroupWithContext(ctx aws.Context, input *DeleteSecurityGroupInput, opts ...request.Option) (*DeleteSecurityGroupOutput, error) { + req, out := c.DeleteSecurityGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteSnapshot = "DeleteSnapshot" // DeleteSnapshotRequest generates a "aws/request.Request" representing the // client's request for the DeleteSnapshot operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteSnapshot for usage and error information. +// See DeleteSnapshot for more information on using the DeleteSnapshot +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteSnapshot method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteSnapshotRequest method. // req, resp := client.DeleteSnapshotRequest(params) @@ -4966,27 +6533,41 @@ func (c *EC2) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Re // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSnapshot func (c *EC2) DeleteSnapshot(input *DeleteSnapshotInput) (*DeleteSnapshotOutput, error) { req, out := c.DeleteSnapshotRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteSnapshotWithContext is the same as DeleteSnapshot with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSnapshot for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteSnapshotWithContext(ctx aws.Context, input *DeleteSnapshotInput, opts ...request.Option) (*DeleteSnapshotOutput, error) { + req, out := c.DeleteSnapshotRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteSpotDatafeedSubscription = "DeleteSpotDatafeedSubscription" // DeleteSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the // client's request for the DeleteSpotDatafeedSubscription operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteSpotDatafeedSubscription for usage and error information. +// See DeleteSpotDatafeedSubscription for more information on using the DeleteSpotDatafeedSubscription +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteSpotDatafeedSubscription method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteSpotDatafeedSubscriptionRequest method. // req, resp := client.DeleteSpotDatafeedSubscriptionRequest(params) @@ -5028,27 +6609,41 @@ func (c *EC2) DeleteSpotDatafeedSubscriptionRequest(input *DeleteSpotDatafeedSub // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSpotDatafeedSubscription func (c *EC2) DeleteSpotDatafeedSubscription(input *DeleteSpotDatafeedSubscriptionInput) (*DeleteSpotDatafeedSubscriptionOutput, error) { req, out := c.DeleteSpotDatafeedSubscriptionRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteSpotDatafeedSubscriptionWithContext is the same as DeleteSpotDatafeedSubscription with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSpotDatafeedSubscription for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteSpotDatafeedSubscriptionWithContext(ctx aws.Context, input *DeleteSpotDatafeedSubscriptionInput, opts ...request.Option) (*DeleteSpotDatafeedSubscriptionOutput, error) { + req, out := c.DeleteSpotDatafeedSubscriptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteSubnet = "DeleteSubnet" // DeleteSubnetRequest generates a "aws/request.Request" representing the // client's request for the DeleteSubnet operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteSubnet for usage and error information. +// See DeleteSubnet for more information on using the DeleteSubnet +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteSubnet method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteSubnetRequest method. // req, resp := client.DeleteSubnetRequest(params) @@ -5091,27 +6686,41 @@ func (c *EC2) DeleteSubnetRequest(input *DeleteSubnetInput) (req *request.Reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteSubnet func (c *EC2) DeleteSubnet(input *DeleteSubnetInput) (*DeleteSubnetOutput, error) { req, out := c.DeleteSubnetRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteSubnetWithContext is the same as DeleteSubnet with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSubnet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteSubnetWithContext(ctx aws.Context, input *DeleteSubnetInput, opts ...request.Option) (*DeleteSubnetOutput, error) { + req, out := c.DeleteSubnetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteTags = "DeleteTags" // DeleteTagsRequest generates a "aws/request.Request" representing the // client's request for the DeleteTags operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteTags for usage and error information. +// See DeleteTags for more information on using the DeleteTags +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteTags method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteTagsRequest method. // req, resp := client.DeleteTagsRequest(params) @@ -5142,10 +6751,10 @@ func (c *EC2) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, o // DeleteTags API operation for Amazon Elastic Compute Cloud. // -// Deletes the specified set of tags from the specified set of resources. This -// call is designed to follow a DescribeTags request. +// Deletes the specified set of tags from the specified set of resources. // -// For more information about tags, see Tagging Your Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// To list the current tags, use DescribeTags. For more information about tags, +// see Tagging Your Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5157,27 +6766,41 @@ func (c *EC2) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, o // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTags func (c *EC2) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { req, out := c.DeleteTagsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteTagsWithContext is the same as DeleteTags with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteTagsWithContext(ctx aws.Context, input *DeleteTagsInput, opts ...request.Option) (*DeleteTagsOutput, error) { + req, out := c.DeleteTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteVolume = "DeleteVolume" // DeleteVolumeRequest generates a "aws/request.Request" representing the // client's request for the DeleteVolume operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteVolume for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteVolume method directly -// instead. +// See DeleteVolume for more information on using the DeleteVolume +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteVolumeRequest method. // req, resp := client.DeleteVolumeRequest(params) @@ -5225,27 +6848,41 @@ func (c *EC2) DeleteVolumeRequest(input *DeleteVolumeInput) (req *request.Reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVolume func (c *EC2) DeleteVolume(input *DeleteVolumeInput) (*DeleteVolumeOutput, error) { req, out := c.DeleteVolumeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteVolumeWithContext is the same as DeleteVolume with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVolume for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteVolumeWithContext(ctx aws.Context, input *DeleteVolumeInput, opts ...request.Option) (*DeleteVolumeOutput, error) { + req, out := c.DeleteVolumeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteVpc = "DeleteVpc" // DeleteVpcRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpc operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteVpc for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteVpc method directly -// instead. +// See DeleteVpc for more information on using the DeleteVpc +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteVpcRequest method. // req, resp := client.DeleteVpcRequest(params) @@ -5291,27 +6928,41 @@ func (c *EC2) DeleteVpcRequest(input *DeleteVpcInput) (req *request.Request, out // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpc func (c *EC2) DeleteVpc(input *DeleteVpcInput) (*DeleteVpcOutput, error) { req, out := c.DeleteVpcRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteVpcWithContext is the same as DeleteVpc with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVpc for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteVpcWithContext(ctx aws.Context, input *DeleteVpcInput, opts ...request.Option) (*DeleteVpcOutput, error) { + req, out := c.DeleteVpcRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteVpcEndpoints = "DeleteVpcEndpoints" // DeleteVpcEndpointsRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpcEndpoints operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteVpcEndpoints for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteVpcEndpoints method directly -// instead. +// See DeleteVpcEndpoints for more information on using the DeleteVpcEndpoints +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteVpcEndpointsRequest method. // req, resp := client.DeleteVpcEndpointsRequest(params) @@ -5352,27 +7003,41 @@ func (c *EC2) DeleteVpcEndpointsRequest(input *DeleteVpcEndpointsInput) (req *re // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcEndpoints func (c *EC2) DeleteVpcEndpoints(input *DeleteVpcEndpointsInput) (*DeleteVpcEndpointsOutput, error) { req, out := c.DeleteVpcEndpointsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteVpcEndpointsWithContext is the same as DeleteVpcEndpoints with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVpcEndpoints for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteVpcEndpointsWithContext(ctx aws.Context, input *DeleteVpcEndpointsInput, opts ...request.Option) (*DeleteVpcEndpointsOutput, error) { + req, out := c.DeleteVpcEndpointsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteVpcPeeringConnection = "DeleteVpcPeeringConnection" // DeleteVpcPeeringConnectionRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpcPeeringConnection operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteVpcPeeringConnection for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteVpcPeeringConnection method directly -// instead. +// See DeleteVpcPeeringConnection for more information on using the DeleteVpcPeeringConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteVpcPeeringConnectionRequest method. // req, resp := client.DeleteVpcPeeringConnectionRequest(params) @@ -5415,27 +7080,41 @@ func (c *EC2) DeleteVpcPeeringConnectionRequest(input *DeleteVpcPeeringConnectio // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpcPeeringConnection func (c *EC2) DeleteVpcPeeringConnection(input *DeleteVpcPeeringConnectionInput) (*DeleteVpcPeeringConnectionOutput, error) { req, out := c.DeleteVpcPeeringConnectionRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteVpcPeeringConnectionWithContext is the same as DeleteVpcPeeringConnection with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVpcPeeringConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteVpcPeeringConnectionWithContext(ctx aws.Context, input *DeleteVpcPeeringConnectionInput, opts ...request.Option) (*DeleteVpcPeeringConnectionOutput, error) { + req, out := c.DeleteVpcPeeringConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteVpnConnection = "DeleteVpnConnection" // DeleteVpnConnectionRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpnConnection operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteVpnConnection for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteVpnConnection method directly -// instead. +// See DeleteVpnConnection for more information on using the DeleteVpnConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteVpnConnectionRequest method. // req, resp := client.DeleteVpnConnectionRequest(params) @@ -5486,27 +7165,41 @@ func (c *EC2) DeleteVpnConnectionRequest(input *DeleteVpnConnectionInput) (req * // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpnConnection func (c *EC2) DeleteVpnConnection(input *DeleteVpnConnectionInput) (*DeleteVpnConnectionOutput, error) { req, out := c.DeleteVpnConnectionRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteVpnConnectionWithContext is the same as DeleteVpnConnection with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVpnConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteVpnConnectionWithContext(ctx aws.Context, input *DeleteVpnConnectionInput, opts ...request.Option) (*DeleteVpnConnectionOutput, error) { + req, out := c.DeleteVpnConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteVpnConnectionRoute = "DeleteVpnConnectionRoute" // DeleteVpnConnectionRouteRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpnConnectionRoute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteVpnConnectionRoute for usage and error information. +// See DeleteVpnConnectionRoute for more information on using the DeleteVpnConnectionRoute +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteVpnConnectionRoute method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteVpnConnectionRouteRequest method. // req, resp := client.DeleteVpnConnectionRouteRequest(params) @@ -5551,27 +7244,41 @@ func (c *EC2) DeleteVpnConnectionRouteRequest(input *DeleteVpnConnectionRouteInp // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpnConnectionRoute func (c *EC2) DeleteVpnConnectionRoute(input *DeleteVpnConnectionRouteInput) (*DeleteVpnConnectionRouteOutput, error) { req, out := c.DeleteVpnConnectionRouteRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteVpnConnectionRouteWithContext is the same as DeleteVpnConnectionRoute with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVpnConnectionRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteVpnConnectionRouteWithContext(ctx aws.Context, input *DeleteVpnConnectionRouteInput, opts ...request.Option) (*DeleteVpnConnectionRouteOutput, error) { + req, out := c.DeleteVpnConnectionRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteVpnGateway = "DeleteVpnGateway" // DeleteVpnGatewayRequest generates a "aws/request.Request" representing the // client's request for the DeleteVpnGateway operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteVpnGateway for usage and error information. +// See DeleteVpnGateway for more information on using the DeleteVpnGateway +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteVpnGateway method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteVpnGatewayRequest method. // req, resp := client.DeleteVpnGatewayRequest(params) @@ -5617,27 +7324,41 @@ func (c *EC2) DeleteVpnGatewayRequest(input *DeleteVpnGatewayInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteVpnGateway func (c *EC2) DeleteVpnGateway(input *DeleteVpnGatewayInput) (*DeleteVpnGatewayOutput, error) { req, out := c.DeleteVpnGatewayRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteVpnGatewayWithContext is the same as DeleteVpnGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVpnGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteVpnGatewayWithContext(ctx aws.Context, input *DeleteVpnGatewayInput, opts ...request.Option) (*DeleteVpnGatewayOutput, error) { + req, out := c.DeleteVpnGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeregisterImage = "DeregisterImage" // DeregisterImageRequest generates a "aws/request.Request" representing the // client's request for the DeregisterImage operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeregisterImage for usage and error information. +// See DeregisterImage for more information on using the DeregisterImage +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeregisterImage method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeregisterImageRequest method. // req, resp := client.DeregisterImageRequest(params) @@ -5669,9 +7390,14 @@ func (c *EC2) DeregisterImageRequest(input *DeregisterImageInput) (req *request. // DeregisterImage API operation for Amazon Elastic Compute Cloud. // // Deregisters the specified AMI. After you deregister an AMI, it can't be used -// to launch new instances. +// to launch new instances; however, it doesn't affect any instances that you've +// already launched from the AMI. You'll continue to incur usage costs for those +// instances until you terminate them. // -// This command does not delete the AMI. +// When you deregister an Amazon EBS-backed AMI, it doesn't affect the snapshot +// that was created for the root volume of the instance during the AMI creation +// process. When you deregister an instance store-backed AMI, it doesn't affect +// the files that you uploaded to Amazon S3 when you created the AMI. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5682,27 +7408,41 @@ func (c *EC2) DeregisterImageRequest(input *DeregisterImageInput) (req *request. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeregisterImage func (c *EC2) DeregisterImage(input *DeregisterImageInput) (*DeregisterImageOutput, error) { req, out := c.DeregisterImageRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeregisterImageWithContext is the same as DeregisterImage with the addition of +// the ability to pass a context and additional request options. +// +// See DeregisterImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeregisterImageWithContext(ctx aws.Context, input *DeregisterImageInput, opts ...request.Option) (*DeregisterImageOutput, error) { + req, out := c.DeregisterImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeAccountAttributes = "DescribeAccountAttributes" // DescribeAccountAttributesRequest generates a "aws/request.Request" representing the // client's request for the DescribeAccountAttributes operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeAccountAttributes for usage and error information. +// See DescribeAccountAttributes for more information on using the DescribeAccountAttributes +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeAccountAttributes method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeAccountAttributesRequest method. // req, resp := client.DescribeAccountAttributesRequest(params) @@ -5760,27 +7500,41 @@ func (c *EC2) DescribeAccountAttributesRequest(input *DescribeAccountAttributesI // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAccountAttributes func (c *EC2) DescribeAccountAttributes(input *DescribeAccountAttributesInput) (*DescribeAccountAttributesOutput, error) { req, out := c.DescribeAccountAttributesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeAccountAttributesWithContext is the same as DescribeAccountAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAccountAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeAccountAttributesWithContext(ctx aws.Context, input *DescribeAccountAttributesInput, opts ...request.Option) (*DescribeAccountAttributesOutput, error) { + req, out := c.DescribeAccountAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeAddresses = "DescribeAddresses" // DescribeAddressesRequest generates a "aws/request.Request" representing the // client's request for the DescribeAddresses operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeAddresses for usage and error information. +// See DescribeAddresses for more information on using the DescribeAddresses +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeAddresses method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeAddressesRequest method. // req, resp := client.DescribeAddressesRequest(params) @@ -5824,27 +7578,41 @@ func (c *EC2) DescribeAddressesRequest(input *DescribeAddressesInput) (req *requ // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAddresses func (c *EC2) DescribeAddresses(input *DescribeAddressesInput) (*DescribeAddressesOutput, error) { req, out := c.DescribeAddressesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeAddressesWithContext is the same as DescribeAddresses with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAddresses for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeAddressesWithContext(ctx aws.Context, input *DescribeAddressesInput, opts ...request.Option) (*DescribeAddressesOutput, error) { + req, out := c.DescribeAddressesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeAvailabilityZones = "DescribeAvailabilityZones" // DescribeAvailabilityZonesRequest generates a "aws/request.Request" representing the // client's request for the DescribeAvailabilityZones operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeAvailabilityZones for usage and error information. +// See DescribeAvailabilityZones for more information on using the DescribeAvailabilityZones +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeAvailabilityZones method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeAvailabilityZonesRequest method. // req, resp := client.DescribeAvailabilityZonesRequest(params) @@ -5890,27 +7658,41 @@ func (c *EC2) DescribeAvailabilityZonesRequest(input *DescribeAvailabilityZonesI // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeAvailabilityZones func (c *EC2) DescribeAvailabilityZones(input *DescribeAvailabilityZonesInput) (*DescribeAvailabilityZonesOutput, error) { req, out := c.DescribeAvailabilityZonesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeAvailabilityZonesWithContext is the same as DescribeAvailabilityZones with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAvailabilityZones for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeAvailabilityZonesWithContext(ctx aws.Context, input *DescribeAvailabilityZonesInput, opts ...request.Option) (*DescribeAvailabilityZonesOutput, error) { + req, out := c.DescribeAvailabilityZonesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeBundleTasks = "DescribeBundleTasks" // DescribeBundleTasksRequest generates a "aws/request.Request" representing the // client's request for the DescribeBundleTasks operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeBundleTasks for usage and error information. +// See DescribeBundleTasks for more information on using the DescribeBundleTasks +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeBundleTasks method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeBundleTasksRequest method. // req, resp := client.DescribeBundleTasksRequest(params) @@ -5955,27 +7737,41 @@ func (c *EC2) DescribeBundleTasksRequest(input *DescribeBundleTasksInput) (req * // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeBundleTasks func (c *EC2) DescribeBundleTasks(input *DescribeBundleTasksInput) (*DescribeBundleTasksOutput, error) { req, out := c.DescribeBundleTasksRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeBundleTasksWithContext is the same as DescribeBundleTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeBundleTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeBundleTasksWithContext(ctx aws.Context, input *DescribeBundleTasksInput, opts ...request.Option) (*DescribeBundleTasksOutput, error) { + req, out := c.DescribeBundleTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeClassicLinkInstances = "DescribeClassicLinkInstances" // DescribeClassicLinkInstancesRequest generates a "aws/request.Request" representing the // client's request for the DescribeClassicLinkInstances operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeClassicLinkInstances for usage and error information. +// See DescribeClassicLinkInstances for more information on using the DescribeClassicLinkInstances +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeClassicLinkInstances method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeClassicLinkInstancesRequest method. // req, resp := client.DescribeClassicLinkInstancesRequest(params) @@ -6018,27 +7814,41 @@ func (c *EC2) DescribeClassicLinkInstancesRequest(input *DescribeClassicLinkInst // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeClassicLinkInstances func (c *EC2) DescribeClassicLinkInstances(input *DescribeClassicLinkInstancesInput) (*DescribeClassicLinkInstancesOutput, error) { req, out := c.DescribeClassicLinkInstancesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeClassicLinkInstancesWithContext is the same as DescribeClassicLinkInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeClassicLinkInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeClassicLinkInstancesWithContext(ctx aws.Context, input *DescribeClassicLinkInstancesInput, opts ...request.Option) (*DescribeClassicLinkInstancesOutput, error) { + req, out := c.DescribeClassicLinkInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeConversionTasks = "DescribeConversionTasks" // DescribeConversionTasksRequest generates a "aws/request.Request" representing the // client's request for the DescribeConversionTasks operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeConversionTasks for usage and error information. +// See DescribeConversionTasks for more information on using the DescribeConversionTasks +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeConversionTasks method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeConversionTasksRequest method. // req, resp := client.DescribeConversionTasksRequest(params) @@ -6082,27 +7892,41 @@ func (c *EC2) DescribeConversionTasksRequest(input *DescribeConversionTasksInput // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeConversionTasks func (c *EC2) DescribeConversionTasks(input *DescribeConversionTasksInput) (*DescribeConversionTasksOutput, error) { req, out := c.DescribeConversionTasksRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeConversionTasksWithContext is the same as DescribeConversionTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeConversionTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeConversionTasksWithContext(ctx aws.Context, input *DescribeConversionTasksInput, opts ...request.Option) (*DescribeConversionTasksOutput, error) { + req, out := c.DescribeConversionTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeCustomerGateways = "DescribeCustomerGateways" // DescribeCustomerGatewaysRequest generates a "aws/request.Request" representing the // client's request for the DescribeCustomerGateways operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeCustomerGateways for usage and error information. +// See DescribeCustomerGateways for more information on using the DescribeCustomerGateways +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeCustomerGateways method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeCustomerGatewaysRequest method. // req, resp := client.DescribeCustomerGatewaysRequest(params) @@ -6146,27 +7970,41 @@ func (c *EC2) DescribeCustomerGatewaysRequest(input *DescribeCustomerGatewaysInp // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeCustomerGateways func (c *EC2) DescribeCustomerGateways(input *DescribeCustomerGatewaysInput) (*DescribeCustomerGatewaysOutput, error) { req, out := c.DescribeCustomerGatewaysRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeCustomerGatewaysWithContext is the same as DescribeCustomerGateways with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeCustomerGateways for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeCustomerGatewaysWithContext(ctx aws.Context, input *DescribeCustomerGatewaysInput, opts ...request.Option) (*DescribeCustomerGatewaysOutput, error) { + req, out := c.DescribeCustomerGatewaysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeDhcpOptions = "DescribeDhcpOptions" // DescribeDhcpOptionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeDhcpOptions operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeDhcpOptions for usage and error information. +// See DescribeDhcpOptions for more information on using the DescribeDhcpOptions +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeDhcpOptions method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeDhcpOptionsRequest method. // req, resp := client.DescribeDhcpOptionsRequest(params) @@ -6209,27 +8047,41 @@ func (c *EC2) DescribeDhcpOptionsRequest(input *DescribeDhcpOptionsInput) (req * // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeDhcpOptions func (c *EC2) DescribeDhcpOptions(input *DescribeDhcpOptionsInput) (*DescribeDhcpOptionsOutput, error) { req, out := c.DescribeDhcpOptionsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeDhcpOptionsWithContext is the same as DescribeDhcpOptions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDhcpOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeDhcpOptionsWithContext(ctx aws.Context, input *DescribeDhcpOptionsInput, opts ...request.Option) (*DescribeDhcpOptionsOutput, error) { + req, out := c.DescribeDhcpOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeEgressOnlyInternetGateways = "DescribeEgressOnlyInternetGateways" // DescribeEgressOnlyInternetGatewaysRequest generates a "aws/request.Request" representing the // client's request for the DescribeEgressOnlyInternetGateways operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeEgressOnlyInternetGateways for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeEgressOnlyInternetGateways method directly -// instead. +// See DescribeEgressOnlyInternetGateways for more information on using the DescribeEgressOnlyInternetGateways +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeEgressOnlyInternetGatewaysRequest method. // req, resp := client.DescribeEgressOnlyInternetGatewaysRequest(params) @@ -6269,27 +8121,116 @@ func (c *EC2) DescribeEgressOnlyInternetGatewaysRequest(input *DescribeEgressOnl // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeEgressOnlyInternetGateways func (c *EC2) DescribeEgressOnlyInternetGateways(input *DescribeEgressOnlyInternetGatewaysInput) (*DescribeEgressOnlyInternetGatewaysOutput, error) { req, out := c.DescribeEgressOnlyInternetGatewaysRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeEgressOnlyInternetGatewaysWithContext is the same as DescribeEgressOnlyInternetGateways with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeEgressOnlyInternetGateways for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeEgressOnlyInternetGatewaysWithContext(ctx aws.Context, input *DescribeEgressOnlyInternetGatewaysInput, opts ...request.Option) (*DescribeEgressOnlyInternetGatewaysOutput, error) { + req, out := c.DescribeEgressOnlyInternetGatewaysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeElasticGpus = "DescribeElasticGpus" + +// DescribeElasticGpusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeElasticGpus operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeElasticGpus for more information on using the DescribeElasticGpus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeElasticGpusRequest method. +// req, resp := client.DescribeElasticGpusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeElasticGpus +func (c *EC2) DescribeElasticGpusRequest(input *DescribeElasticGpusInput) (req *request.Request, output *DescribeElasticGpusOutput) { + op := &request.Operation{ + Name: opDescribeElasticGpus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeElasticGpusInput{} + } + + output = &DescribeElasticGpusOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeElasticGpus API operation for Amazon Elastic Compute Cloud. +// +// Describes the Elastic GPUs associated with your instances. For more information +// about Elastic GPUs, see Amazon EC2 Elastic GPUs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-gpus.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeElasticGpus for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeElasticGpus +func (c *EC2) DescribeElasticGpus(input *DescribeElasticGpusInput) (*DescribeElasticGpusOutput, error) { + req, out := c.DescribeElasticGpusRequest(input) + return out, req.Send() +} + +// DescribeElasticGpusWithContext is the same as DescribeElasticGpus with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeElasticGpus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeElasticGpusWithContext(ctx aws.Context, input *DescribeElasticGpusInput, opts ...request.Option) (*DescribeElasticGpusOutput, error) { + req, out := c.DescribeElasticGpusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeExportTasks = "DescribeExportTasks" // DescribeExportTasksRequest generates a "aws/request.Request" representing the // client's request for the DescribeExportTasks operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeExportTasks for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeExportTasks method directly -// instead. +// See DescribeExportTasks for more information on using the DescribeExportTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeExportTasksRequest method. // req, resp := client.DescribeExportTasksRequest(params) @@ -6329,27 +8270,41 @@ func (c *EC2) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req * // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportTasks func (c *EC2) DescribeExportTasks(input *DescribeExportTasksInput) (*DescribeExportTasksOutput, error) { req, out := c.DescribeExportTasksRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeExportTasksWithContext is the same as DescribeExportTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeExportTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeExportTasksWithContext(ctx aws.Context, input *DescribeExportTasksInput, opts ...request.Option) (*DescribeExportTasksOutput, error) { + req, out := c.DescribeExportTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeFlowLogs = "DescribeFlowLogs" // DescribeFlowLogsRequest generates a "aws/request.Request" representing the // client's request for the DescribeFlowLogs operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeFlowLogs for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeFlowLogs method directly -// instead. +// See DescribeFlowLogs for more information on using the DescribeFlowLogs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeFlowLogsRequest method. // req, resp := client.DescribeFlowLogsRequest(params) @@ -6391,27 +8346,191 @@ func (c *EC2) DescribeFlowLogsRequest(input *DescribeFlowLogsInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFlowLogs func (c *EC2) DescribeFlowLogs(input *DescribeFlowLogsInput) (*DescribeFlowLogsOutput, error) { req, out := c.DescribeFlowLogsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeFlowLogsWithContext is the same as DescribeFlowLogs with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFlowLogs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeFlowLogsWithContext(ctx aws.Context, input *DescribeFlowLogsInput, opts ...request.Option) (*DescribeFlowLogsOutput, error) { + req, out := c.DescribeFlowLogsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeFpgaImageAttribute = "DescribeFpgaImageAttribute" + +// DescribeFpgaImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFpgaImageAttribute operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFpgaImageAttribute for more information on using the DescribeFpgaImageAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFpgaImageAttributeRequest method. +// req, resp := client.DescribeFpgaImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImageAttribute +func (c *EC2) DescribeFpgaImageAttributeRequest(input *DescribeFpgaImageAttributeInput) (req *request.Request, output *DescribeFpgaImageAttributeOutput) { + op := &request.Operation{ + Name: opDescribeFpgaImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFpgaImageAttributeInput{} + } + + output = &DescribeFpgaImageAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFpgaImageAttribute API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified attribute of the specified Amazon FPGA Image (AFI). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeFpgaImageAttribute for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImageAttribute +func (c *EC2) DescribeFpgaImageAttribute(input *DescribeFpgaImageAttributeInput) (*DescribeFpgaImageAttributeOutput, error) { + req, out := c.DescribeFpgaImageAttributeRequest(input) + return out, req.Send() +} + +// DescribeFpgaImageAttributeWithContext is the same as DescribeFpgaImageAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFpgaImageAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeFpgaImageAttributeWithContext(ctx aws.Context, input *DescribeFpgaImageAttributeInput, opts ...request.Option) (*DescribeFpgaImageAttributeOutput, error) { + req, out := c.DescribeFpgaImageAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeFpgaImages = "DescribeFpgaImages" + +// DescribeFpgaImagesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFpgaImages operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFpgaImages for more information on using the DescribeFpgaImages +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFpgaImagesRequest method. +// req, resp := client.DescribeFpgaImagesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImages +func (c *EC2) DescribeFpgaImagesRequest(input *DescribeFpgaImagesInput) (req *request.Request, output *DescribeFpgaImagesOutput) { + op := &request.Operation{ + Name: opDescribeFpgaImages, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFpgaImagesInput{} + } + + output = &DescribeFpgaImagesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFpgaImages API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more available Amazon FPGA Images (AFIs). These include +// public AFIs, private AFIs that you own, and AFIs owned by other AWS accounts +// for which you have load permissions. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeFpgaImages for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImages +func (c *EC2) DescribeFpgaImages(input *DescribeFpgaImagesInput) (*DescribeFpgaImagesOutput, error) { + req, out := c.DescribeFpgaImagesRequest(input) + return out, req.Send() +} + +// DescribeFpgaImagesWithContext is the same as DescribeFpgaImages with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFpgaImages for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeFpgaImagesWithContext(ctx aws.Context, input *DescribeFpgaImagesInput, opts ...request.Option) (*DescribeFpgaImagesOutput, error) { + req, out := c.DescribeFpgaImagesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeHostReservationOfferings = "DescribeHostReservationOfferings" // DescribeHostReservationOfferingsRequest generates a "aws/request.Request" representing the // client's request for the DescribeHostReservationOfferings operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeHostReservationOfferings for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeHostReservationOfferings method directly -// instead. +// See DescribeHostReservationOfferings for more information on using the DescribeHostReservationOfferings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeHostReservationOfferingsRequest method. // req, resp := client.DescribeHostReservationOfferingsRequest(params) @@ -6459,27 +8578,41 @@ func (c *EC2) DescribeHostReservationOfferingsRequest(input *DescribeHostReserva // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHostReservationOfferings func (c *EC2) DescribeHostReservationOfferings(input *DescribeHostReservationOfferingsInput) (*DescribeHostReservationOfferingsOutput, error) { req, out := c.DescribeHostReservationOfferingsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeHostReservationOfferingsWithContext is the same as DescribeHostReservationOfferings with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeHostReservationOfferings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeHostReservationOfferingsWithContext(ctx aws.Context, input *DescribeHostReservationOfferingsInput, opts ...request.Option) (*DescribeHostReservationOfferingsOutput, error) { + req, out := c.DescribeHostReservationOfferingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeHostReservations = "DescribeHostReservations" // DescribeHostReservationsRequest generates a "aws/request.Request" representing the // client's request for the DescribeHostReservations operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeHostReservations for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeHostReservations method directly -// instead. +// See DescribeHostReservations for more information on using the DescribeHostReservations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeHostReservationsRequest method. // req, resp := client.DescribeHostReservationsRequest(params) @@ -6520,27 +8653,41 @@ func (c *EC2) DescribeHostReservationsRequest(input *DescribeHostReservationsInp // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHostReservations func (c *EC2) DescribeHostReservations(input *DescribeHostReservationsInput) (*DescribeHostReservationsOutput, error) { req, out := c.DescribeHostReservationsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeHostReservationsWithContext is the same as DescribeHostReservations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeHostReservations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeHostReservationsWithContext(ctx aws.Context, input *DescribeHostReservationsInput, opts ...request.Option) (*DescribeHostReservationsOutput, error) { + req, out := c.DescribeHostReservationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeHosts = "DescribeHosts" // DescribeHostsRequest generates a "aws/request.Request" representing the // client's request for the DescribeHosts operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeHosts for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeHosts method directly -// instead. +// See DescribeHosts for more information on using the DescribeHosts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeHostsRequest method. // req, resp := client.DescribeHostsRequest(params) @@ -6584,27 +8731,115 @@ func (c *EC2) DescribeHostsRequest(input *DescribeHostsInput) (req *request.Requ // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHosts func (c *EC2) DescribeHosts(input *DescribeHostsInput) (*DescribeHostsOutput, error) { req, out := c.DescribeHostsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeHostsWithContext is the same as DescribeHosts with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeHosts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeHostsWithContext(ctx aws.Context, input *DescribeHostsInput, opts ...request.Option) (*DescribeHostsOutput, error) { + req, out := c.DescribeHostsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeIamInstanceProfileAssociations = "DescribeIamInstanceProfileAssociations" + +// DescribeIamInstanceProfileAssociationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIamInstanceProfileAssociations operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeIamInstanceProfileAssociations for more information on using the DescribeIamInstanceProfileAssociations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeIamInstanceProfileAssociationsRequest method. +// req, resp := client.DescribeIamInstanceProfileAssociationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIamInstanceProfileAssociations +func (c *EC2) DescribeIamInstanceProfileAssociationsRequest(input *DescribeIamInstanceProfileAssociationsInput) (req *request.Request, output *DescribeIamInstanceProfileAssociationsOutput) { + op := &request.Operation{ + Name: opDescribeIamInstanceProfileAssociations, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeIamInstanceProfileAssociationsInput{} + } + + output = &DescribeIamInstanceProfileAssociationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeIamInstanceProfileAssociations API operation for Amazon Elastic Compute Cloud. +// +// Describes your IAM instance profile associations. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeIamInstanceProfileAssociations for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIamInstanceProfileAssociations +func (c *EC2) DescribeIamInstanceProfileAssociations(input *DescribeIamInstanceProfileAssociationsInput) (*DescribeIamInstanceProfileAssociationsOutput, error) { + req, out := c.DescribeIamInstanceProfileAssociationsRequest(input) + return out, req.Send() +} + +// DescribeIamInstanceProfileAssociationsWithContext is the same as DescribeIamInstanceProfileAssociations with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeIamInstanceProfileAssociations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeIamInstanceProfileAssociationsWithContext(ctx aws.Context, input *DescribeIamInstanceProfileAssociationsInput, opts ...request.Option) (*DescribeIamInstanceProfileAssociationsOutput, error) { + req, out := c.DescribeIamInstanceProfileAssociationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeIdFormat = "DescribeIdFormat" // DescribeIdFormatRequest generates a "aws/request.Request" representing the // client's request for the DescribeIdFormat operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeIdFormat for usage and error information. +// See DescribeIdFormat for more information on using the DescribeIdFormat +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeIdFormat method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeIdFormatRequest method. // req, resp := client.DescribeIdFormatRequest(params) @@ -6657,27 +8892,41 @@ func (c *EC2) DescribeIdFormatRequest(input *DescribeIdFormatInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIdFormat func (c *EC2) DescribeIdFormat(input *DescribeIdFormatInput) (*DescribeIdFormatOutput, error) { req, out := c.DescribeIdFormatRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeIdFormatWithContext is the same as DescribeIdFormat with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeIdFormat for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeIdFormatWithContext(ctx aws.Context, input *DescribeIdFormatInput, opts ...request.Option) (*DescribeIdFormatOutput, error) { + req, out := c.DescribeIdFormatRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeIdentityIdFormat = "DescribeIdentityIdFormat" // DescribeIdentityIdFormatRequest generates a "aws/request.Request" representing the // client's request for the DescribeIdentityIdFormat operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeIdentityIdFormat for usage and error information. +// See DescribeIdentityIdFormat for more information on using the DescribeIdentityIdFormat +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeIdentityIdFormat method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeIdentityIdFormatRequest method. // req, resp := client.DescribeIdentityIdFormatRequest(params) @@ -6728,27 +8977,41 @@ func (c *EC2) DescribeIdentityIdFormatRequest(input *DescribeIdentityIdFormatInp // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIdentityIdFormat func (c *EC2) DescribeIdentityIdFormat(input *DescribeIdentityIdFormatInput) (*DescribeIdentityIdFormatOutput, error) { req, out := c.DescribeIdentityIdFormatRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeIdentityIdFormatWithContext is the same as DescribeIdentityIdFormat with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeIdentityIdFormat for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeIdentityIdFormatWithContext(ctx aws.Context, input *DescribeIdentityIdFormatInput, opts ...request.Option) (*DescribeIdentityIdFormatOutput, error) { + req, out := c.DescribeIdentityIdFormatRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeImageAttribute = "DescribeImageAttribute" // DescribeImageAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeImageAttribute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeImageAttribute for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeImageAttribute method directly -// instead. +// See DescribeImageAttribute for more information on using the DescribeImageAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeImageAttributeRequest method. // req, resp := client.DescribeImageAttributeRequest(params) @@ -6789,27 +9052,41 @@ func (c *EC2) DescribeImageAttributeRequest(input *DescribeImageAttributeInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImageAttribute func (c *EC2) DescribeImageAttribute(input *DescribeImageAttributeInput) (*DescribeImageAttributeOutput, error) { req, out := c.DescribeImageAttributeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeImageAttributeWithContext is the same as DescribeImageAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImageAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeImageAttributeWithContext(ctx aws.Context, input *DescribeImageAttributeInput, opts ...request.Option) (*DescribeImageAttributeOutput, error) { + req, out := c.DescribeImageAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeImages = "DescribeImages" // DescribeImagesRequest generates a "aws/request.Request" representing the // client's request for the DescribeImages operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeImages for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeImages method directly -// instead. +// See DescribeImages for more information on using the DescribeImages +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeImagesRequest method. // req, resp := client.DescribeImagesRequest(params) @@ -6855,27 +9132,41 @@ func (c *EC2) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Re // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImages func (c *EC2) DescribeImages(input *DescribeImagesInput) (*DescribeImagesOutput, error) { req, out := c.DescribeImagesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeImagesWithContext is the same as DescribeImages with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImages for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeImagesWithContext(ctx aws.Context, input *DescribeImagesInput, opts ...request.Option) (*DescribeImagesOutput, error) { + req, out := c.DescribeImagesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeImportImageTasks = "DescribeImportImageTasks" // DescribeImportImageTasksRequest generates a "aws/request.Request" representing the // client's request for the DescribeImportImageTasks operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeImportImageTasks for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeImportImageTasks method directly -// instead. +// See DescribeImportImageTasks for more information on using the DescribeImportImageTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeImportImageTasksRequest method. // req, resp := client.DescribeImportImageTasksRequest(params) @@ -6916,27 +9207,41 @@ func (c *EC2) DescribeImportImageTasksRequest(input *DescribeImportImageTasksInp // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImportImageTasks func (c *EC2) DescribeImportImageTasks(input *DescribeImportImageTasksInput) (*DescribeImportImageTasksOutput, error) { req, out := c.DescribeImportImageTasksRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeImportImageTasksWithContext is the same as DescribeImportImageTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImportImageTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeImportImageTasksWithContext(ctx aws.Context, input *DescribeImportImageTasksInput, opts ...request.Option) (*DescribeImportImageTasksOutput, error) { + req, out := c.DescribeImportImageTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeImportSnapshotTasks = "DescribeImportSnapshotTasks" // DescribeImportSnapshotTasksRequest generates a "aws/request.Request" representing the // client's request for the DescribeImportSnapshotTasks operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeImportSnapshotTasks for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeImportSnapshotTasks method directly -// instead. +// See DescribeImportSnapshotTasks for more information on using the DescribeImportSnapshotTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeImportSnapshotTasksRequest method. // req, resp := client.DescribeImportSnapshotTasksRequest(params) @@ -6976,27 +9281,41 @@ func (c *EC2) DescribeImportSnapshotTasksRequest(input *DescribeImportSnapshotTa // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeImportSnapshotTasks func (c *EC2) DescribeImportSnapshotTasks(input *DescribeImportSnapshotTasksInput) (*DescribeImportSnapshotTasksOutput, error) { req, out := c.DescribeImportSnapshotTasksRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeImportSnapshotTasksWithContext is the same as DescribeImportSnapshotTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImportSnapshotTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeImportSnapshotTasksWithContext(ctx aws.Context, input *DescribeImportSnapshotTasksInput, opts ...request.Option) (*DescribeImportSnapshotTasksOutput, error) { + req, out := c.DescribeImportSnapshotTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeInstanceAttribute = "DescribeInstanceAttribute" // DescribeInstanceAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeInstanceAttribute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeInstanceAttribute for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeInstanceAttribute method directly -// instead. +// See DescribeInstanceAttribute for more information on using the DescribeInstanceAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeInstanceAttributeRequest method. // req, resp := client.DescribeInstanceAttributeRequest(params) @@ -7040,27 +9359,41 @@ func (c *EC2) DescribeInstanceAttributeRequest(input *DescribeInstanceAttributeI // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceAttribute func (c *EC2) DescribeInstanceAttribute(input *DescribeInstanceAttributeInput) (*DescribeInstanceAttributeOutput, error) { req, out := c.DescribeInstanceAttributeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeInstanceAttributeWithContext is the same as DescribeInstanceAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstanceAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstanceAttributeWithContext(ctx aws.Context, input *DescribeInstanceAttributeInput, opts ...request.Option) (*DescribeInstanceAttributeOutput, error) { + req, out := c.DescribeInstanceAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeInstanceStatus = "DescribeInstanceStatus" // DescribeInstanceStatusRequest generates a "aws/request.Request" representing the // client's request for the DescribeInstanceStatus operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeInstanceStatus for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeInstanceStatus method directly -// instead. +// See DescribeInstanceStatus for more information on using the DescribeInstanceStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeInstanceStatusRequest method. // req, resp := client.DescribeInstanceStatusRequest(params) @@ -7096,7 +9429,8 @@ func (c *EC2) DescribeInstanceStatusRequest(input *DescribeInstanceStatusInput) // DescribeInstanceStatus API operation for Amazon Elastic Compute Cloud. // // Describes the status of one or more instances. By default, only running instances -// are described, unless specified otherwise. +// are described, unless you specifically indicate to return the status of all +// instances. // // Instance status includes the following components: // @@ -7126,8 +9460,23 @@ func (c *EC2) DescribeInstanceStatusRequest(input *DescribeInstanceStatusInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceStatus func (c *EC2) DescribeInstanceStatus(input *DescribeInstanceStatusInput) (*DescribeInstanceStatusOutput, error) { req, out := c.DescribeInstanceStatusRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeInstanceStatusWithContext is the same as DescribeInstanceStatus with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstanceStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstanceStatusWithContext(ctx aws.Context, input *DescribeInstanceStatusInput, opts ...request.Option) (*DescribeInstanceStatusOutput, error) { + req, out := c.DescribeInstanceStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeInstanceStatusPages iterates over the pages of a DescribeInstanceStatus operation, @@ -7147,31 +9496,55 @@ func (c *EC2) DescribeInstanceStatus(input *DescribeInstanceStatusInput) (*Descr // return pageNum <= 3 // }) // -func (c *EC2) DescribeInstanceStatusPages(input *DescribeInstanceStatusInput, fn func(p *DescribeInstanceStatusOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeInstanceStatusRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeInstanceStatusOutput), lastPage) - }) +func (c *EC2) DescribeInstanceStatusPages(input *DescribeInstanceStatusInput, fn func(*DescribeInstanceStatusOutput, bool) bool) error { + return c.DescribeInstanceStatusPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstanceStatusPagesWithContext same as DescribeInstanceStatusPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstanceStatusPagesWithContext(ctx aws.Context, input *DescribeInstanceStatusInput, fn func(*DescribeInstanceStatusOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstanceStatusInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstanceStatusRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeInstanceStatusOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribeInstances = "DescribeInstances" // DescribeInstancesRequest generates a "aws/request.Request" representing the // client's request for the DescribeInstances operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeInstances for usage and error information. +// See DescribeInstances for more information on using the DescribeInstances +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeInstances method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeInstancesRequest method. // req, resp := client.DescribeInstancesRequest(params) @@ -7232,8 +9605,23 @@ func (c *EC2) DescribeInstancesRequest(input *DescribeInstancesInput) (req *requ // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstances func (c *EC2) DescribeInstances(input *DescribeInstancesInput) (*DescribeInstancesOutput, error) { req, out := c.DescribeInstancesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeInstancesWithContext is the same as DescribeInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstancesWithContext(ctx aws.Context, input *DescribeInstancesInput, opts ...request.Option) (*DescribeInstancesOutput, error) { + req, out := c.DescribeInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeInstancesPages iterates over the pages of a DescribeInstances operation, @@ -7253,31 +9641,55 @@ func (c *EC2) DescribeInstances(input *DescribeInstancesInput) (*DescribeInstanc // return pageNum <= 3 // }) // -func (c *EC2) DescribeInstancesPages(input *DescribeInstancesInput, fn func(p *DescribeInstancesOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeInstancesRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeInstancesOutput), lastPage) - }) +func (c *EC2) DescribeInstancesPages(input *DescribeInstancesInput, fn func(*DescribeInstancesOutput, bool) bool) error { + return c.DescribeInstancesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstancesPagesWithContext same as DescribeInstancesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstancesPagesWithContext(ctx aws.Context, input *DescribeInstancesInput, fn func(*DescribeInstancesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeInstancesOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribeInternetGateways = "DescribeInternetGateways" // DescribeInternetGatewaysRequest generates a "aws/request.Request" representing the // client's request for the DescribeInternetGateways operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeInternetGateways for usage and error information. +// See DescribeInternetGateways for more information on using the DescribeInternetGateways +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeInternetGateways method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeInternetGatewaysRequest method. // req, resp := client.DescribeInternetGatewaysRequest(params) @@ -7317,27 +9729,41 @@ func (c *EC2) DescribeInternetGatewaysRequest(input *DescribeInternetGatewaysInp // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInternetGateways func (c *EC2) DescribeInternetGateways(input *DescribeInternetGatewaysInput) (*DescribeInternetGatewaysOutput, error) { req, out := c.DescribeInternetGatewaysRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeInternetGatewaysWithContext is the same as DescribeInternetGateways with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInternetGateways for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInternetGatewaysWithContext(ctx aws.Context, input *DescribeInternetGatewaysInput, opts ...request.Option) (*DescribeInternetGatewaysOutput, error) { + req, out := c.DescribeInternetGatewaysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeKeyPairs = "DescribeKeyPairs" // DescribeKeyPairsRequest generates a "aws/request.Request" representing the // client's request for the DescribeKeyPairs operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeKeyPairs for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeKeyPairs method directly -// instead. +// See DescribeKeyPairs for more information on using the DescribeKeyPairs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeKeyPairsRequest method. // req, resp := client.DescribeKeyPairsRequest(params) @@ -7380,27 +9806,41 @@ func (c *EC2) DescribeKeyPairsRequest(input *DescribeKeyPairsInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeKeyPairs func (c *EC2) DescribeKeyPairs(input *DescribeKeyPairsInput) (*DescribeKeyPairsOutput, error) { req, out := c.DescribeKeyPairsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeKeyPairsWithContext is the same as DescribeKeyPairs with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeKeyPairs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeKeyPairsWithContext(ctx aws.Context, input *DescribeKeyPairsInput, opts ...request.Option) (*DescribeKeyPairsOutput, error) { + req, out := c.DescribeKeyPairsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeMovingAddresses = "DescribeMovingAddresses" // DescribeMovingAddressesRequest generates a "aws/request.Request" representing the // client's request for the DescribeMovingAddresses operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeMovingAddresses for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeMovingAddresses method directly -// instead. +// See DescribeMovingAddresses for more information on using the DescribeMovingAddresses +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeMovingAddressesRequest method. // req, resp := client.DescribeMovingAddressesRequest(params) @@ -7442,27 +9882,41 @@ func (c *EC2) DescribeMovingAddressesRequest(input *DescribeMovingAddressesInput // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeMovingAddresses func (c *EC2) DescribeMovingAddresses(input *DescribeMovingAddressesInput) (*DescribeMovingAddressesOutput, error) { req, out := c.DescribeMovingAddressesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeMovingAddressesWithContext is the same as DescribeMovingAddresses with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMovingAddresses for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeMovingAddressesWithContext(ctx aws.Context, input *DescribeMovingAddressesInput, opts ...request.Option) (*DescribeMovingAddressesOutput, error) { + req, out := c.DescribeMovingAddressesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeNatGateways = "DescribeNatGateways" // DescribeNatGatewaysRequest generates a "aws/request.Request" representing the // client's request for the DescribeNatGateways operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeNatGateways for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeNatGateways method directly -// instead. +// See DescribeNatGateways for more information on using the DescribeNatGateways +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeNatGatewaysRequest method. // req, resp := client.DescribeNatGatewaysRequest(params) @@ -7478,6 +9932,12 @@ func (c *EC2) DescribeNatGatewaysRequest(input *DescribeNatGatewaysInput) (req * Name: opDescribeNatGateways, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -7502,27 +9962,91 @@ func (c *EC2) DescribeNatGatewaysRequest(input *DescribeNatGatewaysInput) (req * // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNatGateways func (c *EC2) DescribeNatGateways(input *DescribeNatGatewaysInput) (*DescribeNatGatewaysOutput, error) { req, out := c.DescribeNatGatewaysRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeNatGatewaysWithContext is the same as DescribeNatGateways with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeNatGateways for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeNatGatewaysWithContext(ctx aws.Context, input *DescribeNatGatewaysInput, opts ...request.Option) (*DescribeNatGatewaysOutput, error) { + req, out := c.DescribeNatGatewaysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeNatGatewaysPages iterates over the pages of a DescribeNatGateways operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeNatGateways method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeNatGateways operation. +// pageNum := 0 +// err := client.DescribeNatGatewaysPages(params, +// func(page *DescribeNatGatewaysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeNatGatewaysPages(input *DescribeNatGatewaysInput, fn func(*DescribeNatGatewaysOutput, bool) bool) error { + return c.DescribeNatGatewaysPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeNatGatewaysPagesWithContext same as DescribeNatGatewaysPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeNatGatewaysPagesWithContext(ctx aws.Context, input *DescribeNatGatewaysInput, fn func(*DescribeNatGatewaysOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeNatGatewaysInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeNatGatewaysRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeNatGatewaysOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribeNetworkAcls = "DescribeNetworkAcls" // DescribeNetworkAclsRequest generates a "aws/request.Request" representing the // client's request for the DescribeNetworkAcls operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeNetworkAcls for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeNetworkAcls method directly -// instead. +// See DescribeNetworkAcls for more information on using the DescribeNetworkAcls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeNetworkAclsRequest method. // req, resp := client.DescribeNetworkAclsRequest(params) @@ -7565,27 +10089,41 @@ func (c *EC2) DescribeNetworkAclsRequest(input *DescribeNetworkAclsInput) (req * // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkAcls func (c *EC2) DescribeNetworkAcls(input *DescribeNetworkAclsInput) (*DescribeNetworkAclsOutput, error) { req, out := c.DescribeNetworkAclsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeNetworkAclsWithContext is the same as DescribeNetworkAcls with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeNetworkAcls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeNetworkAclsWithContext(ctx aws.Context, input *DescribeNetworkAclsInput, opts ...request.Option) (*DescribeNetworkAclsOutput, error) { + req, out := c.DescribeNetworkAclsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeNetworkInterfaceAttribute = "DescribeNetworkInterfaceAttribute" // DescribeNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeNetworkInterfaceAttribute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeNetworkInterfaceAttribute for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeNetworkInterfaceAttribute method directly -// instead. +// See DescribeNetworkInterfaceAttribute for more information on using the DescribeNetworkInterfaceAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeNetworkInterfaceAttributeRequest method. // req, resp := client.DescribeNetworkInterfaceAttributeRequest(params) @@ -7626,27 +10164,115 @@ func (c *EC2) DescribeNetworkInterfaceAttributeRequest(input *DescribeNetworkInt // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfaceAttribute func (c *EC2) DescribeNetworkInterfaceAttribute(input *DescribeNetworkInterfaceAttributeInput) (*DescribeNetworkInterfaceAttributeOutput, error) { req, out := c.DescribeNetworkInterfaceAttributeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeNetworkInterfaceAttributeWithContext is the same as DescribeNetworkInterfaceAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeNetworkInterfaceAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeNetworkInterfaceAttributeWithContext(ctx aws.Context, input *DescribeNetworkInterfaceAttributeInput, opts ...request.Option) (*DescribeNetworkInterfaceAttributeOutput, error) { + req, out := c.DescribeNetworkInterfaceAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeNetworkInterfacePermissions = "DescribeNetworkInterfacePermissions" + +// DescribeNetworkInterfacePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNetworkInterfacePermissions operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeNetworkInterfacePermissions for more information on using the DescribeNetworkInterfacePermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeNetworkInterfacePermissionsRequest method. +// req, resp := client.DescribeNetworkInterfacePermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfacePermissions +func (c *EC2) DescribeNetworkInterfacePermissionsRequest(input *DescribeNetworkInterfacePermissionsInput) (req *request.Request, output *DescribeNetworkInterfacePermissionsOutput) { + op := &request.Operation{ + Name: opDescribeNetworkInterfacePermissions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNetworkInterfacePermissionsInput{} + } + + output = &DescribeNetworkInterfacePermissionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeNetworkInterfacePermissions API operation for Amazon Elastic Compute Cloud. +// +// Describes the permissions for your network interfaces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeNetworkInterfacePermissions for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfacePermissions +func (c *EC2) DescribeNetworkInterfacePermissions(input *DescribeNetworkInterfacePermissionsInput) (*DescribeNetworkInterfacePermissionsOutput, error) { + req, out := c.DescribeNetworkInterfacePermissionsRequest(input) + return out, req.Send() +} + +// DescribeNetworkInterfacePermissionsWithContext is the same as DescribeNetworkInterfacePermissions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeNetworkInterfacePermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeNetworkInterfacePermissionsWithContext(ctx aws.Context, input *DescribeNetworkInterfacePermissionsInput, opts ...request.Option) (*DescribeNetworkInterfacePermissionsOutput, error) { + req, out := c.DescribeNetworkInterfacePermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeNetworkInterfaces = "DescribeNetworkInterfaces" // DescribeNetworkInterfacesRequest generates a "aws/request.Request" representing the // client's request for the DescribeNetworkInterfaces operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeNetworkInterfaces for usage and error information. +// See DescribeNetworkInterfaces for more information on using the DescribeNetworkInterfaces +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeNetworkInterfaces method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeNetworkInterfacesRequest method. // req, resp := client.DescribeNetworkInterfacesRequest(params) @@ -7686,27 +10312,41 @@ func (c *EC2) DescribeNetworkInterfacesRequest(input *DescribeNetworkInterfacesI // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfaces func (c *EC2) DescribeNetworkInterfaces(input *DescribeNetworkInterfacesInput) (*DescribeNetworkInterfacesOutput, error) { req, out := c.DescribeNetworkInterfacesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeNetworkInterfacesWithContext is the same as DescribeNetworkInterfaces with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeNetworkInterfaces for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeNetworkInterfacesWithContext(ctx aws.Context, input *DescribeNetworkInterfacesInput, opts ...request.Option) (*DescribeNetworkInterfacesOutput, error) { + req, out := c.DescribeNetworkInterfacesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribePlacementGroups = "DescribePlacementGroups" // DescribePlacementGroupsRequest generates a "aws/request.Request" representing the // client's request for the DescribePlacementGroups operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribePlacementGroups for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribePlacementGroups method directly -// instead. +// See DescribePlacementGroups for more information on using the DescribePlacementGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribePlacementGroupsRequest method. // req, resp := client.DescribePlacementGroupsRequest(params) @@ -7748,27 +10388,41 @@ func (c *EC2) DescribePlacementGroupsRequest(input *DescribePlacementGroupsInput // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePlacementGroups func (c *EC2) DescribePlacementGroups(input *DescribePlacementGroupsInput) (*DescribePlacementGroupsOutput, error) { req, out := c.DescribePlacementGroupsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribePlacementGroupsWithContext is the same as DescribePlacementGroups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribePlacementGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribePlacementGroupsWithContext(ctx aws.Context, input *DescribePlacementGroupsInput, opts ...request.Option) (*DescribePlacementGroupsOutput, error) { + req, out := c.DescribePlacementGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribePrefixLists = "DescribePrefixLists" // DescribePrefixListsRequest generates a "aws/request.Request" representing the // client's request for the DescribePrefixLists operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribePrefixLists for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribePrefixLists method directly -// instead. +// See DescribePrefixLists for more information on using the DescribePrefixLists +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribePrefixListsRequest method. // req, resp := client.DescribePrefixListsRequest(params) @@ -7812,27 +10466,41 @@ func (c *EC2) DescribePrefixListsRequest(input *DescribePrefixListsInput) (req * // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribePrefixLists func (c *EC2) DescribePrefixLists(input *DescribePrefixListsInput) (*DescribePrefixListsOutput, error) { req, out := c.DescribePrefixListsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribePrefixListsWithContext is the same as DescribePrefixLists with the addition of +// the ability to pass a context and additional request options. +// +// See DescribePrefixLists for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribePrefixListsWithContext(ctx aws.Context, input *DescribePrefixListsInput, opts ...request.Option) (*DescribePrefixListsOutput, error) { + req, out := c.DescribePrefixListsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeRegions = "DescribeRegions" // DescribeRegionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeRegions operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeRegions for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeRegions method directly -// instead. +// See DescribeRegions for more information on using the DescribeRegions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeRegionsRequest method. // req, resp := client.DescribeRegionsRequest(params) @@ -7875,27 +10543,41 @@ func (c *EC2) DescribeRegionsRequest(input *DescribeRegionsInput) (req *request. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeRegions func (c *EC2) DescribeRegions(input *DescribeRegionsInput) (*DescribeRegionsOutput, error) { req, out := c.DescribeRegionsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeRegionsWithContext is the same as DescribeRegions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeRegions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeRegionsWithContext(ctx aws.Context, input *DescribeRegionsInput, opts ...request.Option) (*DescribeRegionsOutput, error) { + req, out := c.DescribeRegionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeReservedInstances = "DescribeReservedInstances" // DescribeReservedInstancesRequest generates a "aws/request.Request" representing the // client's request for the DescribeReservedInstances operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeReservedInstances for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeReservedInstances method directly -// instead. +// See DescribeReservedInstances for more information on using the DescribeReservedInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeReservedInstancesRequest method. // req, resp := client.DescribeReservedInstancesRequest(params) @@ -7938,27 +10620,41 @@ func (c *EC2) DescribeReservedInstancesRequest(input *DescribeReservedInstancesI // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstances func (c *EC2) DescribeReservedInstances(input *DescribeReservedInstancesInput) (*DescribeReservedInstancesOutput, error) { req, out := c.DescribeReservedInstancesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeReservedInstancesWithContext is the same as DescribeReservedInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeReservedInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeReservedInstancesWithContext(ctx aws.Context, input *DescribeReservedInstancesInput, opts ...request.Option) (*DescribeReservedInstancesOutput, error) { + req, out := c.DescribeReservedInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeReservedInstancesListings = "DescribeReservedInstancesListings" // DescribeReservedInstancesListingsRequest generates a "aws/request.Request" representing the // client's request for the DescribeReservedInstancesListings operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeReservedInstancesListings for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeReservedInstancesListings method directly -// instead. +// See DescribeReservedInstancesListings for more information on using the DescribeReservedInstancesListings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeReservedInstancesListingsRequest method. // req, resp := client.DescribeReservedInstancesListingsRequest(params) @@ -8019,27 +10715,41 @@ func (c *EC2) DescribeReservedInstancesListingsRequest(input *DescribeReservedIn // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstancesListings func (c *EC2) DescribeReservedInstancesListings(input *DescribeReservedInstancesListingsInput) (*DescribeReservedInstancesListingsOutput, error) { req, out := c.DescribeReservedInstancesListingsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeReservedInstancesListingsWithContext is the same as DescribeReservedInstancesListings with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeReservedInstancesListings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeReservedInstancesListingsWithContext(ctx aws.Context, input *DescribeReservedInstancesListingsInput, opts ...request.Option) (*DescribeReservedInstancesListingsOutput, error) { + req, out := c.DescribeReservedInstancesListingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeReservedInstancesModifications = "DescribeReservedInstancesModifications" // DescribeReservedInstancesModificationsRequest generates a "aws/request.Request" representing the // client's request for the DescribeReservedInstancesModifications operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeReservedInstancesModifications for usage and error information. +// See DescribeReservedInstancesModifications for more information on using the DescribeReservedInstancesModifications +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeReservedInstancesModifications method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeReservedInstancesModificationsRequest method. // req, resp := client.DescribeReservedInstancesModificationsRequest(params) @@ -8091,8 +10801,23 @@ func (c *EC2) DescribeReservedInstancesModificationsRequest(input *DescribeReser // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstancesModifications func (c *EC2) DescribeReservedInstancesModifications(input *DescribeReservedInstancesModificationsInput) (*DescribeReservedInstancesModificationsOutput, error) { req, out := c.DescribeReservedInstancesModificationsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeReservedInstancesModificationsWithContext is the same as DescribeReservedInstancesModifications with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeReservedInstancesModifications for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeReservedInstancesModificationsWithContext(ctx aws.Context, input *DescribeReservedInstancesModificationsInput, opts ...request.Option) (*DescribeReservedInstancesModificationsOutput, error) { + req, out := c.DescribeReservedInstancesModificationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeReservedInstancesModificationsPages iterates over the pages of a DescribeReservedInstancesModifications operation, @@ -8112,31 +10837,55 @@ func (c *EC2) DescribeReservedInstancesModifications(input *DescribeReservedInst // return pageNum <= 3 // }) // -func (c *EC2) DescribeReservedInstancesModificationsPages(input *DescribeReservedInstancesModificationsInput, fn func(p *DescribeReservedInstancesModificationsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeReservedInstancesModificationsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeReservedInstancesModificationsOutput), lastPage) - }) +func (c *EC2) DescribeReservedInstancesModificationsPages(input *DescribeReservedInstancesModificationsInput, fn func(*DescribeReservedInstancesModificationsOutput, bool) bool) error { + return c.DescribeReservedInstancesModificationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeReservedInstancesModificationsPagesWithContext same as DescribeReservedInstancesModificationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeReservedInstancesModificationsPagesWithContext(ctx aws.Context, input *DescribeReservedInstancesModificationsInput, fn func(*DescribeReservedInstancesModificationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeReservedInstancesModificationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeReservedInstancesModificationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeReservedInstancesModificationsOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribeReservedInstancesOfferings = "DescribeReservedInstancesOfferings" // DescribeReservedInstancesOfferingsRequest generates a "aws/request.Request" representing the // client's request for the DescribeReservedInstancesOfferings operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeReservedInstancesOfferings for usage and error information. +// See DescribeReservedInstancesOfferings for more information on using the DescribeReservedInstancesOfferings +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeReservedInstancesOfferings method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeReservedInstancesOfferingsRequest method. // req, resp := client.DescribeReservedInstancesOfferingsRequest(params) @@ -8193,8 +10942,23 @@ func (c *EC2) DescribeReservedInstancesOfferingsRequest(input *DescribeReservedI // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeReservedInstancesOfferings func (c *EC2) DescribeReservedInstancesOfferings(input *DescribeReservedInstancesOfferingsInput) (*DescribeReservedInstancesOfferingsOutput, error) { req, out := c.DescribeReservedInstancesOfferingsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeReservedInstancesOfferingsWithContext is the same as DescribeReservedInstancesOfferings with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeReservedInstancesOfferings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeReservedInstancesOfferingsWithContext(ctx aws.Context, input *DescribeReservedInstancesOfferingsInput, opts ...request.Option) (*DescribeReservedInstancesOfferingsOutput, error) { + req, out := c.DescribeReservedInstancesOfferingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeReservedInstancesOfferingsPages iterates over the pages of a DescribeReservedInstancesOfferings operation, @@ -8214,31 +10978,55 @@ func (c *EC2) DescribeReservedInstancesOfferings(input *DescribeReservedInstance // return pageNum <= 3 // }) // -func (c *EC2) DescribeReservedInstancesOfferingsPages(input *DescribeReservedInstancesOfferingsInput, fn func(p *DescribeReservedInstancesOfferingsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeReservedInstancesOfferingsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeReservedInstancesOfferingsOutput), lastPage) - }) +func (c *EC2) DescribeReservedInstancesOfferingsPages(input *DescribeReservedInstancesOfferingsInput, fn func(*DescribeReservedInstancesOfferingsOutput, bool) bool) error { + return c.DescribeReservedInstancesOfferingsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeReservedInstancesOfferingsPagesWithContext same as DescribeReservedInstancesOfferingsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeReservedInstancesOfferingsPagesWithContext(ctx aws.Context, input *DescribeReservedInstancesOfferingsInput, fn func(*DescribeReservedInstancesOfferingsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeReservedInstancesOfferingsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeReservedInstancesOfferingsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeReservedInstancesOfferingsOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribeRouteTables = "DescribeRouteTables" // DescribeRouteTablesRequest generates a "aws/request.Request" representing the // client's request for the DescribeRouteTables operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeRouteTables for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeRouteTables method directly -// instead. +// See DescribeRouteTables for more information on using the DescribeRouteTables +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeRouteTablesRequest method. // req, resp := client.DescribeRouteTablesRequest(params) @@ -8286,27 +11074,41 @@ func (c *EC2) DescribeRouteTablesRequest(input *DescribeRouteTablesInput) (req * // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeRouteTables func (c *EC2) DescribeRouteTables(input *DescribeRouteTablesInput) (*DescribeRouteTablesOutput, error) { req, out := c.DescribeRouteTablesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeRouteTablesWithContext is the same as DescribeRouteTables with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeRouteTables for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeRouteTablesWithContext(ctx aws.Context, input *DescribeRouteTablesInput, opts ...request.Option) (*DescribeRouteTablesOutput, error) { + req, out := c.DescribeRouteTablesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeScheduledInstanceAvailability = "DescribeScheduledInstanceAvailability" // DescribeScheduledInstanceAvailabilityRequest generates a "aws/request.Request" representing the // client's request for the DescribeScheduledInstanceAvailability operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeScheduledInstanceAvailability for usage and error information. +// See DescribeScheduledInstanceAvailability for more information on using the DescribeScheduledInstanceAvailability +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeScheduledInstanceAvailability method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeScheduledInstanceAvailabilityRequest method. // req, resp := client.DescribeScheduledInstanceAvailabilityRequest(params) @@ -8354,27 +11156,41 @@ func (c *EC2) DescribeScheduledInstanceAvailabilityRequest(input *DescribeSchedu // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeScheduledInstanceAvailability func (c *EC2) DescribeScheduledInstanceAvailability(input *DescribeScheduledInstanceAvailabilityInput) (*DescribeScheduledInstanceAvailabilityOutput, error) { req, out := c.DescribeScheduledInstanceAvailabilityRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeScheduledInstanceAvailabilityWithContext is the same as DescribeScheduledInstanceAvailability with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeScheduledInstanceAvailability for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeScheduledInstanceAvailabilityWithContext(ctx aws.Context, input *DescribeScheduledInstanceAvailabilityInput, opts ...request.Option) (*DescribeScheduledInstanceAvailabilityOutput, error) { + req, out := c.DescribeScheduledInstanceAvailabilityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeScheduledInstances = "DescribeScheduledInstances" // DescribeScheduledInstancesRequest generates a "aws/request.Request" representing the // client's request for the DescribeScheduledInstances operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeScheduledInstances for usage and error information. +// See DescribeScheduledInstances for more information on using the DescribeScheduledInstances +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeScheduledInstances method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeScheduledInstancesRequest method. // req, resp := client.DescribeScheduledInstancesRequest(params) @@ -8414,27 +11230,41 @@ func (c *EC2) DescribeScheduledInstancesRequest(input *DescribeScheduledInstance // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeScheduledInstances func (c *EC2) DescribeScheduledInstances(input *DescribeScheduledInstancesInput) (*DescribeScheduledInstancesOutput, error) { req, out := c.DescribeScheduledInstancesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeScheduledInstancesWithContext is the same as DescribeScheduledInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeScheduledInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeScheduledInstancesWithContext(ctx aws.Context, input *DescribeScheduledInstancesInput, opts ...request.Option) (*DescribeScheduledInstancesOutput, error) { + req, out := c.DescribeScheduledInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeSecurityGroupReferences = "DescribeSecurityGroupReferences" // DescribeSecurityGroupReferencesRequest generates a "aws/request.Request" representing the // client's request for the DescribeSecurityGroupReferences operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeSecurityGroupReferences for usage and error information. +// See DescribeSecurityGroupReferences for more information on using the DescribeSecurityGroupReferences +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeSecurityGroupReferences method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeSecurityGroupReferencesRequest method. // req, resp := client.DescribeSecurityGroupReferencesRequest(params) @@ -8475,27 +11305,41 @@ func (c *EC2) DescribeSecurityGroupReferencesRequest(input *DescribeSecurityGrou // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSecurityGroupReferences func (c *EC2) DescribeSecurityGroupReferences(input *DescribeSecurityGroupReferencesInput) (*DescribeSecurityGroupReferencesOutput, error) { req, out := c.DescribeSecurityGroupReferencesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeSecurityGroupReferencesWithContext is the same as DescribeSecurityGroupReferences with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSecurityGroupReferences for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSecurityGroupReferencesWithContext(ctx aws.Context, input *DescribeSecurityGroupReferencesInput, opts ...request.Option) (*DescribeSecurityGroupReferencesOutput, error) { + req, out := c.DescribeSecurityGroupReferencesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeSecurityGroups = "DescribeSecurityGroups" // DescribeSecurityGroupsRequest generates a "aws/request.Request" representing the // client's request for the DescribeSecurityGroups operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeSecurityGroups for usage and error information. +// See DescribeSecurityGroups for more information on using the DescribeSecurityGroups +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeSecurityGroups method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeSecurityGroupsRequest method. // req, resp := client.DescribeSecurityGroupsRequest(params) @@ -8542,27 +11386,41 @@ func (c *EC2) DescribeSecurityGroupsRequest(input *DescribeSecurityGroupsInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSecurityGroups func (c *EC2) DescribeSecurityGroups(input *DescribeSecurityGroupsInput) (*DescribeSecurityGroupsOutput, error) { req, out := c.DescribeSecurityGroupsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeSecurityGroupsWithContext is the same as DescribeSecurityGroups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSecurityGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSecurityGroupsWithContext(ctx aws.Context, input *DescribeSecurityGroupsInput, opts ...request.Option) (*DescribeSecurityGroupsOutput, error) { + req, out := c.DescribeSecurityGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeSnapshotAttribute = "DescribeSnapshotAttribute" // DescribeSnapshotAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeSnapshotAttribute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeSnapshotAttribute for usage and error information. +// See DescribeSnapshotAttribute for more information on using the DescribeSnapshotAttribute +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeSnapshotAttribute method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeSnapshotAttributeRequest method. // req, resp := client.DescribeSnapshotAttributeRequest(params) @@ -8606,27 +11464,41 @@ func (c *EC2) DescribeSnapshotAttributeRequest(input *DescribeSnapshotAttributeI // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSnapshotAttribute func (c *EC2) DescribeSnapshotAttribute(input *DescribeSnapshotAttributeInput) (*DescribeSnapshotAttributeOutput, error) { req, out := c.DescribeSnapshotAttributeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeSnapshotAttributeWithContext is the same as DescribeSnapshotAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSnapshotAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSnapshotAttributeWithContext(ctx aws.Context, input *DescribeSnapshotAttributeInput, opts ...request.Option) (*DescribeSnapshotAttributeOutput, error) { + req, out := c.DescribeSnapshotAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeSnapshots = "DescribeSnapshots" // DescribeSnapshotsRequest generates a "aws/request.Request" representing the // client's request for the DescribeSnapshots operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeSnapshots for usage and error information. +// See DescribeSnapshots for more information on using the DescribeSnapshots +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeSnapshots method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeSnapshotsRequest method. // req, resp := client.DescribeSnapshotsRequest(params) @@ -8717,8 +11589,23 @@ func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *requ // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSnapshots func (c *EC2) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapshotsOutput, error) { req, out := c.DescribeSnapshotsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeSnapshotsWithContext is the same as DescribeSnapshots with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSnapshots for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSnapshotsWithContext(ctx aws.Context, input *DescribeSnapshotsInput, opts ...request.Option) (*DescribeSnapshotsOutput, error) { + req, out := c.DescribeSnapshotsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeSnapshotsPages iterates over the pages of a DescribeSnapshots operation, @@ -8738,31 +11625,55 @@ func (c *EC2) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapsho // return pageNum <= 3 // }) // -func (c *EC2) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn func(p *DescribeSnapshotsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeSnapshotsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeSnapshotsOutput), lastPage) - }) +func (c *EC2) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn func(*DescribeSnapshotsOutput, bool) bool) error { + return c.DescribeSnapshotsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeSnapshotsPagesWithContext same as DescribeSnapshotsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSnapshotsPagesWithContext(ctx aws.Context, input *DescribeSnapshotsInput, fn func(*DescribeSnapshotsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeSnapshotsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSnapshotsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeSnapshotsOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribeSpotDatafeedSubscription = "DescribeSpotDatafeedSubscription" // DescribeSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the // client's request for the DescribeSpotDatafeedSubscription operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeSpotDatafeedSubscription for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeSpotDatafeedSubscription method directly -// instead. +// See DescribeSpotDatafeedSubscription for more information on using the DescribeSpotDatafeedSubscription +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeSpotDatafeedSubscriptionRequest method. // req, resp := client.DescribeSpotDatafeedSubscriptionRequest(params) @@ -8804,27 +11715,41 @@ func (c *EC2) DescribeSpotDatafeedSubscriptionRequest(input *DescribeSpotDatafee // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotDatafeedSubscription func (c *EC2) DescribeSpotDatafeedSubscription(input *DescribeSpotDatafeedSubscriptionInput) (*DescribeSpotDatafeedSubscriptionOutput, error) { req, out := c.DescribeSpotDatafeedSubscriptionRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeSpotDatafeedSubscriptionWithContext is the same as DescribeSpotDatafeedSubscription with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSpotDatafeedSubscription for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSpotDatafeedSubscriptionWithContext(ctx aws.Context, input *DescribeSpotDatafeedSubscriptionInput, opts ...request.Option) (*DescribeSpotDatafeedSubscriptionOutput, error) { + req, out := c.DescribeSpotDatafeedSubscriptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeSpotFleetInstances = "DescribeSpotFleetInstances" // DescribeSpotFleetInstancesRequest generates a "aws/request.Request" representing the // client's request for the DescribeSpotFleetInstances operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeSpotFleetInstances for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeSpotFleetInstances method directly -// instead. +// See DescribeSpotFleetInstances for more information on using the DescribeSpotFleetInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeSpotFleetInstancesRequest method. // req, resp := client.DescribeSpotFleetInstancesRequest(params) @@ -8864,27 +11789,41 @@ func (c *EC2) DescribeSpotFleetInstancesRequest(input *DescribeSpotFleetInstance // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotFleetInstances func (c *EC2) DescribeSpotFleetInstances(input *DescribeSpotFleetInstancesInput) (*DescribeSpotFleetInstancesOutput, error) { req, out := c.DescribeSpotFleetInstancesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeSpotFleetInstancesWithContext is the same as DescribeSpotFleetInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSpotFleetInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSpotFleetInstancesWithContext(ctx aws.Context, input *DescribeSpotFleetInstancesInput, opts ...request.Option) (*DescribeSpotFleetInstancesOutput, error) { + req, out := c.DescribeSpotFleetInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeSpotFleetRequestHistory = "DescribeSpotFleetRequestHistory" // DescribeSpotFleetRequestHistoryRequest generates a "aws/request.Request" representing the // client's request for the DescribeSpotFleetRequestHistory operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeSpotFleetRequestHistory for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeSpotFleetRequestHistory method directly -// instead. +// See DescribeSpotFleetRequestHistory for more information on using the DescribeSpotFleetRequestHistory +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeSpotFleetRequestHistoryRequest method. // req, resp := client.DescribeSpotFleetRequestHistoryRequest(params) @@ -8929,27 +11868,41 @@ func (c *EC2) DescribeSpotFleetRequestHistoryRequest(input *DescribeSpotFleetReq // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotFleetRequestHistory func (c *EC2) DescribeSpotFleetRequestHistory(input *DescribeSpotFleetRequestHistoryInput) (*DescribeSpotFleetRequestHistoryOutput, error) { req, out := c.DescribeSpotFleetRequestHistoryRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeSpotFleetRequestHistoryWithContext is the same as DescribeSpotFleetRequestHistory with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSpotFleetRequestHistory for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSpotFleetRequestHistoryWithContext(ctx aws.Context, input *DescribeSpotFleetRequestHistoryInput, opts ...request.Option) (*DescribeSpotFleetRequestHistoryOutput, error) { + req, out := c.DescribeSpotFleetRequestHistoryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeSpotFleetRequests = "DescribeSpotFleetRequests" // DescribeSpotFleetRequestsRequest generates a "aws/request.Request" representing the // client's request for the DescribeSpotFleetRequests operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeSpotFleetRequests for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeSpotFleetRequests method directly -// instead. +// See DescribeSpotFleetRequests for more information on using the DescribeSpotFleetRequests +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeSpotFleetRequestsRequest method. // req, resp := client.DescribeSpotFleetRequestsRequest(params) @@ -8998,8 +11951,23 @@ func (c *EC2) DescribeSpotFleetRequestsRequest(input *DescribeSpotFleetRequestsI // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotFleetRequests func (c *EC2) DescribeSpotFleetRequests(input *DescribeSpotFleetRequestsInput) (*DescribeSpotFleetRequestsOutput, error) { req, out := c.DescribeSpotFleetRequestsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeSpotFleetRequestsWithContext is the same as DescribeSpotFleetRequests with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSpotFleetRequests for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSpotFleetRequestsWithContext(ctx aws.Context, input *DescribeSpotFleetRequestsInput, opts ...request.Option) (*DescribeSpotFleetRequestsOutput, error) { + req, out := c.DescribeSpotFleetRequestsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeSpotFleetRequestsPages iterates over the pages of a DescribeSpotFleetRequests operation, @@ -9019,31 +11987,55 @@ func (c *EC2) DescribeSpotFleetRequests(input *DescribeSpotFleetRequestsInput) ( // return pageNum <= 3 // }) // -func (c *EC2) DescribeSpotFleetRequestsPages(input *DescribeSpotFleetRequestsInput, fn func(p *DescribeSpotFleetRequestsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeSpotFleetRequestsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeSpotFleetRequestsOutput), lastPage) - }) +func (c *EC2) DescribeSpotFleetRequestsPages(input *DescribeSpotFleetRequestsInput, fn func(*DescribeSpotFleetRequestsOutput, bool) bool) error { + return c.DescribeSpotFleetRequestsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeSpotFleetRequestsPagesWithContext same as DescribeSpotFleetRequestsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSpotFleetRequestsPagesWithContext(ctx aws.Context, input *DescribeSpotFleetRequestsInput, fn func(*DescribeSpotFleetRequestsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeSpotFleetRequestsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSpotFleetRequestsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeSpotFleetRequestsOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribeSpotInstanceRequests = "DescribeSpotInstanceRequests" // DescribeSpotInstanceRequestsRequest generates a "aws/request.Request" representing the // client's request for the DescribeSpotInstanceRequests operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeSpotInstanceRequests for usage and error information. +// See DescribeSpotInstanceRequests for more information on using the DescribeSpotInstanceRequests +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeSpotInstanceRequests method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeSpotInstanceRequestsRequest method. // req, resp := client.DescribeSpotInstanceRequestsRequest(params) @@ -9097,27 +12089,41 @@ func (c *EC2) DescribeSpotInstanceRequestsRequest(input *DescribeSpotInstanceReq // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotInstanceRequests func (c *EC2) DescribeSpotInstanceRequests(input *DescribeSpotInstanceRequestsInput) (*DescribeSpotInstanceRequestsOutput, error) { req, out := c.DescribeSpotInstanceRequestsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeSpotInstanceRequestsWithContext is the same as DescribeSpotInstanceRequests with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSpotInstanceRequests for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSpotInstanceRequestsWithContext(ctx aws.Context, input *DescribeSpotInstanceRequestsInput, opts ...request.Option) (*DescribeSpotInstanceRequestsOutput, error) { + req, out := c.DescribeSpotInstanceRequestsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeSpotPriceHistory = "DescribeSpotPriceHistory" // DescribeSpotPriceHistoryRequest generates a "aws/request.Request" representing the // client's request for the DescribeSpotPriceHistory operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeSpotPriceHistory for usage and error information. +// See DescribeSpotPriceHistory for more information on using the DescribeSpotPriceHistory +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeSpotPriceHistory method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeSpotPriceHistoryRequest method. // req, resp := client.DescribeSpotPriceHistoryRequest(params) @@ -9170,8 +12176,23 @@ func (c *EC2) DescribeSpotPriceHistoryRequest(input *DescribeSpotPriceHistoryInp // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotPriceHistory func (c *EC2) DescribeSpotPriceHistory(input *DescribeSpotPriceHistoryInput) (*DescribeSpotPriceHistoryOutput, error) { req, out := c.DescribeSpotPriceHistoryRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeSpotPriceHistoryWithContext is the same as DescribeSpotPriceHistory with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSpotPriceHistory for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSpotPriceHistoryWithContext(ctx aws.Context, input *DescribeSpotPriceHistoryInput, opts ...request.Option) (*DescribeSpotPriceHistoryOutput, error) { + req, out := c.DescribeSpotPriceHistoryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeSpotPriceHistoryPages iterates over the pages of a DescribeSpotPriceHistory operation, @@ -9191,31 +12212,55 @@ func (c *EC2) DescribeSpotPriceHistory(input *DescribeSpotPriceHistoryInput) (*D // return pageNum <= 3 // }) // -func (c *EC2) DescribeSpotPriceHistoryPages(input *DescribeSpotPriceHistoryInput, fn func(p *DescribeSpotPriceHistoryOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeSpotPriceHistoryRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeSpotPriceHistoryOutput), lastPage) - }) +func (c *EC2) DescribeSpotPriceHistoryPages(input *DescribeSpotPriceHistoryInput, fn func(*DescribeSpotPriceHistoryOutput, bool) bool) error { + return c.DescribeSpotPriceHistoryPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeSpotPriceHistoryPagesWithContext same as DescribeSpotPriceHistoryPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSpotPriceHistoryPagesWithContext(ctx aws.Context, input *DescribeSpotPriceHistoryInput, fn func(*DescribeSpotPriceHistoryOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeSpotPriceHistoryInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSpotPriceHistoryRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeSpotPriceHistoryOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribeStaleSecurityGroups = "DescribeStaleSecurityGroups" // DescribeStaleSecurityGroupsRequest generates a "aws/request.Request" representing the // client's request for the DescribeStaleSecurityGroups operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeStaleSecurityGroups for usage and error information. +// See DescribeStaleSecurityGroups for more information on using the DescribeStaleSecurityGroups +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeStaleSecurityGroups method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeStaleSecurityGroupsRequest method. // req, resp := client.DescribeStaleSecurityGroupsRequest(params) @@ -9258,27 +12303,41 @@ func (c *EC2) DescribeStaleSecurityGroupsRequest(input *DescribeStaleSecurityGro // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeStaleSecurityGroups func (c *EC2) DescribeStaleSecurityGroups(input *DescribeStaleSecurityGroupsInput) (*DescribeStaleSecurityGroupsOutput, error) { req, out := c.DescribeStaleSecurityGroupsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeStaleSecurityGroupsWithContext is the same as DescribeStaleSecurityGroups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeStaleSecurityGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeStaleSecurityGroupsWithContext(ctx aws.Context, input *DescribeStaleSecurityGroupsInput, opts ...request.Option) (*DescribeStaleSecurityGroupsOutput, error) { + req, out := c.DescribeStaleSecurityGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeSubnets = "DescribeSubnets" // DescribeSubnetsRequest generates a "aws/request.Request" representing the // client's request for the DescribeSubnets operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeSubnets for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeSubnets method directly -// instead. +// See DescribeSubnets for more information on using the DescribeSubnets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeSubnetsRequest method. // req, resp := client.DescribeSubnetsRequest(params) @@ -9321,27 +12380,41 @@ func (c *EC2) DescribeSubnetsRequest(input *DescribeSubnetsInput) (req *request. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSubnets func (c *EC2) DescribeSubnets(input *DescribeSubnetsInput) (*DescribeSubnetsOutput, error) { req, out := c.DescribeSubnetsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeSubnetsWithContext is the same as DescribeSubnets with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSubnets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeSubnetsWithContext(ctx aws.Context, input *DescribeSubnetsInput, opts ...request.Option) (*DescribeSubnetsOutput, error) { + req, out := c.DescribeSubnetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeTags = "DescribeTags" // DescribeTagsRequest generates a "aws/request.Request" representing the // client's request for the DescribeTags operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeTags for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeTags method directly -// instead. +// See DescribeTags for more information on using the DescribeTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeTagsRequest method. // req, resp := client.DescribeTagsRequest(params) @@ -9390,8 +12463,23 @@ func (c *EC2) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeTags func (c *EC2) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { req, out := c.DescribeTagsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeTagsWithContext is the same as DescribeTags with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTagsWithContext(ctx aws.Context, input *DescribeTagsInput, opts ...request.Option) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeTagsPages iterates over the pages of a DescribeTags operation, @@ -9411,31 +12499,55 @@ func (c *EC2) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error // return pageNum <= 3 // }) // -func (c *EC2) DescribeTagsPages(input *DescribeTagsInput, fn func(p *DescribeTagsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeTagsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeTagsOutput), lastPage) - }) +func (c *EC2) DescribeTagsPages(input *DescribeTagsInput, fn func(*DescribeTagsOutput, bool) bool) error { + return c.DescribeTagsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeTagsPagesWithContext same as DescribeTagsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTagsPagesWithContext(ctx aws.Context, input *DescribeTagsInput, fn func(*DescribeTagsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeTagsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTagsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeTagsOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribeVolumeAttribute = "DescribeVolumeAttribute" // DescribeVolumeAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeVolumeAttribute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeVolumeAttribute for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeVolumeAttribute method directly -// instead. +// See DescribeVolumeAttribute for more information on using the DescribeVolumeAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeVolumeAttributeRequest method. // req, resp := client.DescribeVolumeAttributeRequest(params) @@ -9479,27 +12591,41 @@ func (c *EC2) DescribeVolumeAttributeRequest(input *DescribeVolumeAttributeInput // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumeAttribute func (c *EC2) DescribeVolumeAttribute(input *DescribeVolumeAttributeInput) (*DescribeVolumeAttributeOutput, error) { req, out := c.DescribeVolumeAttributeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeVolumeAttributeWithContext is the same as DescribeVolumeAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVolumeAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVolumeAttributeWithContext(ctx aws.Context, input *DescribeVolumeAttributeInput, opts ...request.Option) (*DescribeVolumeAttributeOutput, error) { + req, out := c.DescribeVolumeAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeVolumeStatus = "DescribeVolumeStatus" // DescribeVolumeStatusRequest generates a "aws/request.Request" representing the // client's request for the DescribeVolumeStatus operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeVolumeStatus for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeVolumeStatus method directly -// instead. +// See DescribeVolumeStatus for more information on using the DescribeVolumeStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeVolumeStatusRequest method. // req, resp := client.DescribeVolumeStatusRequest(params) @@ -9579,8 +12705,23 @@ func (c *EC2) DescribeVolumeStatusRequest(input *DescribeVolumeStatusInput) (req // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumeStatus func (c *EC2) DescribeVolumeStatus(input *DescribeVolumeStatusInput) (*DescribeVolumeStatusOutput, error) { req, out := c.DescribeVolumeStatusRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeVolumeStatusWithContext is the same as DescribeVolumeStatus with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVolumeStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVolumeStatusWithContext(ctx aws.Context, input *DescribeVolumeStatusInput, opts ...request.Option) (*DescribeVolumeStatusOutput, error) { + req, out := c.DescribeVolumeStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeVolumeStatusPages iterates over the pages of a DescribeVolumeStatus operation, @@ -9600,31 +12741,55 @@ func (c *EC2) DescribeVolumeStatus(input *DescribeVolumeStatusInput) (*DescribeV // return pageNum <= 3 // }) // -func (c *EC2) DescribeVolumeStatusPages(input *DescribeVolumeStatusInput, fn func(p *DescribeVolumeStatusOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeVolumeStatusRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeVolumeStatusOutput), lastPage) - }) +func (c *EC2) DescribeVolumeStatusPages(input *DescribeVolumeStatusInput, fn func(*DescribeVolumeStatusOutput, bool) bool) error { + return c.DescribeVolumeStatusPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeVolumeStatusPagesWithContext same as DescribeVolumeStatusPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVolumeStatusPagesWithContext(ctx aws.Context, input *DescribeVolumeStatusInput, fn func(*DescribeVolumeStatusOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeVolumeStatusInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVolumeStatusRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeVolumeStatusOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribeVolumes = "DescribeVolumes" // DescribeVolumesRequest generates a "aws/request.Request" representing the // client's request for the DescribeVolumes operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeVolumes for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeVolumes method directly -// instead. +// See DescribeVolumes for more information on using the DescribeVolumes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeVolumesRequest method. // req, resp := client.DescribeVolumesRequest(params) @@ -9680,8 +12845,23 @@ func (c *EC2) DescribeVolumesRequest(input *DescribeVolumesInput) (req *request. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumes func (c *EC2) DescribeVolumes(input *DescribeVolumesInput) (*DescribeVolumesOutput, error) { req, out := c.DescribeVolumesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeVolumesWithContext is the same as DescribeVolumes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVolumes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVolumesWithContext(ctx aws.Context, input *DescribeVolumesInput, opts ...request.Option) (*DescribeVolumesOutput, error) { + req, out := c.DescribeVolumesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeVolumesPages iterates over the pages of a DescribeVolumes operation, @@ -9701,31 +12881,141 @@ func (c *EC2) DescribeVolumes(input *DescribeVolumesInput) (*DescribeVolumesOutp // return pageNum <= 3 // }) // -func (c *EC2) DescribeVolumesPages(input *DescribeVolumesInput, fn func(p *DescribeVolumesOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeVolumesRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeVolumesOutput), lastPage) - }) +func (c *EC2) DescribeVolumesPages(input *DescribeVolumesInput, fn func(*DescribeVolumesOutput, bool) bool) error { + return c.DescribeVolumesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeVolumesPagesWithContext same as DescribeVolumesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVolumesPagesWithContext(ctx aws.Context, input *DescribeVolumesInput, fn func(*DescribeVolumesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeVolumesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVolumesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeVolumesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opDescribeVolumesModifications = "DescribeVolumesModifications" + +// DescribeVolumesModificationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVolumesModifications operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVolumesModifications for more information on using the DescribeVolumesModifications +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVolumesModificationsRequest method. +// req, resp := client.DescribeVolumesModificationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumesModifications +func (c *EC2) DescribeVolumesModificationsRequest(input *DescribeVolumesModificationsInput) (req *request.Request, output *DescribeVolumesModificationsOutput) { + op := &request.Operation{ + Name: opDescribeVolumesModifications, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVolumesModificationsInput{} + } + + output = &DescribeVolumesModificationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVolumesModifications API operation for Amazon Elastic Compute Cloud. +// +// Reports the current modification status of EBS volumes. +// +// Current-generation EBS volumes support modification of attributes including +// type, size, and (for io1 volumes) IOPS provisioning while either attached +// to or detached from an instance. Following an action from the API or the +// console to modify a volume, the status of the modification may be modifying, +// optimizing, completed, or failed. If a volume has never been modified, then +// certain elements of the returned VolumeModification objects are null. +// +// You can also use CloudWatch Events to check the status of a modification +// to an EBS volume. For information about CloudWatch Events, see the Amazon +// CloudWatch Events User Guide (http://docs.aws.amazon.com/AmazonCloudWatch/latest/events/). +// For more information, see Monitoring Volume Modifications" (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeVolumesModifications for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumesModifications +func (c *EC2) DescribeVolumesModifications(input *DescribeVolumesModificationsInput) (*DescribeVolumesModificationsOutput, error) { + req, out := c.DescribeVolumesModificationsRequest(input) + return out, req.Send() +} + +// DescribeVolumesModificationsWithContext is the same as DescribeVolumesModifications with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVolumesModifications for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVolumesModificationsWithContext(ctx aws.Context, input *DescribeVolumesModificationsInput, opts ...request.Option) (*DescribeVolumesModificationsOutput, error) { + req, out := c.DescribeVolumesModificationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeVpcAttribute = "DescribeVpcAttribute" // DescribeVpcAttributeRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcAttribute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeVpcAttribute for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeVpcAttribute method directly -// instead. +// See DescribeVpcAttribute for more information on using the DescribeVpcAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeVpcAttributeRequest method. // req, resp := client.DescribeVpcAttributeRequest(params) @@ -9766,27 +13056,41 @@ func (c *EC2) DescribeVpcAttributeRequest(input *DescribeVpcAttributeInput) (req // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcAttribute func (c *EC2) DescribeVpcAttribute(input *DescribeVpcAttributeInput) (*DescribeVpcAttributeOutput, error) { req, out := c.DescribeVpcAttributeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeVpcAttributeWithContext is the same as DescribeVpcAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcAttributeWithContext(ctx aws.Context, input *DescribeVpcAttributeInput, opts ...request.Option) (*DescribeVpcAttributeOutput, error) { + req, out := c.DescribeVpcAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeVpcClassicLink = "DescribeVpcClassicLink" // DescribeVpcClassicLinkRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcClassicLink operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeVpcClassicLink for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeVpcClassicLink method directly -// instead. +// See DescribeVpcClassicLink for more information on using the DescribeVpcClassicLink +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeVpcClassicLinkRequest method. // req, resp := client.DescribeVpcClassicLinkRequest(params) @@ -9826,27 +13130,41 @@ func (c *EC2) DescribeVpcClassicLinkRequest(input *DescribeVpcClassicLinkInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcClassicLink func (c *EC2) DescribeVpcClassicLink(input *DescribeVpcClassicLinkInput) (*DescribeVpcClassicLinkOutput, error) { req, out := c.DescribeVpcClassicLinkRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeVpcClassicLinkWithContext is the same as DescribeVpcClassicLink with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcClassicLink for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcClassicLinkWithContext(ctx aws.Context, input *DescribeVpcClassicLinkInput, opts ...request.Option) (*DescribeVpcClassicLinkOutput, error) { + req, out := c.DescribeVpcClassicLinkRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeVpcClassicLinkDnsSupport = "DescribeVpcClassicLinkDnsSupport" // DescribeVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcClassicLinkDnsSupport operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeVpcClassicLinkDnsSupport for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeVpcClassicLinkDnsSupport method directly -// instead. +// See DescribeVpcClassicLinkDnsSupport for more information on using the DescribeVpcClassicLinkDnsSupport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeVpcClassicLinkDnsSupportRequest method. // req, resp := client.DescribeVpcClassicLinkDnsSupportRequest(params) @@ -9879,8 +13197,8 @@ func (c *EC2) DescribeVpcClassicLinkDnsSupportRequest(input *DescribeVpcClassicL // the DNS hostname of a linked EC2-Classic instance resolves to its private // IP address when addressed from an instance in the VPC to which it's linked. // Similarly, the DNS hostname of an instance in a VPC resolves to its private -// IP address when addressed from a linked EC2-Classic instance. For more information -// about ClassicLink, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// IP address when addressed from a linked EC2-Classic instance. For more information, +// see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -9892,27 +13210,41 @@ func (c *EC2) DescribeVpcClassicLinkDnsSupportRequest(input *DescribeVpcClassicL // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcClassicLinkDnsSupport func (c *EC2) DescribeVpcClassicLinkDnsSupport(input *DescribeVpcClassicLinkDnsSupportInput) (*DescribeVpcClassicLinkDnsSupportOutput, error) { req, out := c.DescribeVpcClassicLinkDnsSupportRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeVpcClassicLinkDnsSupportWithContext is the same as DescribeVpcClassicLinkDnsSupport with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcClassicLinkDnsSupport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcClassicLinkDnsSupportWithContext(ctx aws.Context, input *DescribeVpcClassicLinkDnsSupportInput, opts ...request.Option) (*DescribeVpcClassicLinkDnsSupportOutput, error) { + req, out := c.DescribeVpcClassicLinkDnsSupportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeVpcEndpointServices = "DescribeVpcEndpointServices" // DescribeVpcEndpointServicesRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcEndpointServices operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeVpcEndpointServices for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeVpcEndpointServices method directly -// instead. +// See DescribeVpcEndpointServices for more information on using the DescribeVpcEndpointServices +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeVpcEndpointServicesRequest method. // req, resp := client.DescribeVpcEndpointServicesRequest(params) @@ -9953,27 +13285,41 @@ func (c *EC2) DescribeVpcEndpointServicesRequest(input *DescribeVpcEndpointServi // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpointServices func (c *EC2) DescribeVpcEndpointServices(input *DescribeVpcEndpointServicesInput) (*DescribeVpcEndpointServicesOutput, error) { req, out := c.DescribeVpcEndpointServicesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeVpcEndpointServicesWithContext is the same as DescribeVpcEndpointServices with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcEndpointServices for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcEndpointServicesWithContext(ctx aws.Context, input *DescribeVpcEndpointServicesInput, opts ...request.Option) (*DescribeVpcEndpointServicesOutput, error) { + req, out := c.DescribeVpcEndpointServicesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeVpcEndpoints = "DescribeVpcEndpoints" // DescribeVpcEndpointsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcEndpoints operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeVpcEndpoints for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeVpcEndpoints method directly -// instead. +// See DescribeVpcEndpoints for more information on using the DescribeVpcEndpoints +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeVpcEndpointsRequest method. // req, resp := client.DescribeVpcEndpointsRequest(params) @@ -10013,27 +13359,41 @@ func (c *EC2) DescribeVpcEndpointsRequest(input *DescribeVpcEndpointsInput) (req // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcEndpoints func (c *EC2) DescribeVpcEndpoints(input *DescribeVpcEndpointsInput) (*DescribeVpcEndpointsOutput, error) { req, out := c.DescribeVpcEndpointsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeVpcEndpointsWithContext is the same as DescribeVpcEndpoints with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcEndpoints for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcEndpointsWithContext(ctx aws.Context, input *DescribeVpcEndpointsInput, opts ...request.Option) (*DescribeVpcEndpointsOutput, error) { + req, out := c.DescribeVpcEndpointsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeVpcPeeringConnections = "DescribeVpcPeeringConnections" // DescribeVpcPeeringConnectionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcPeeringConnections operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeVpcPeeringConnections for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeVpcPeeringConnections method directly -// instead. +// See DescribeVpcPeeringConnections for more information on using the DescribeVpcPeeringConnections +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeVpcPeeringConnectionsRequest method. // req, resp := client.DescribeVpcPeeringConnectionsRequest(params) @@ -10073,27 +13433,41 @@ func (c *EC2) DescribeVpcPeeringConnectionsRequest(input *DescribeVpcPeeringConn // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcPeeringConnections func (c *EC2) DescribeVpcPeeringConnections(input *DescribeVpcPeeringConnectionsInput) (*DescribeVpcPeeringConnectionsOutput, error) { req, out := c.DescribeVpcPeeringConnectionsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeVpcPeeringConnectionsWithContext is the same as DescribeVpcPeeringConnections with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcPeeringConnections for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcPeeringConnectionsWithContext(ctx aws.Context, input *DescribeVpcPeeringConnectionsInput, opts ...request.Option) (*DescribeVpcPeeringConnectionsOutput, error) { + req, out := c.DescribeVpcPeeringConnectionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeVpcs = "DescribeVpcs" // DescribeVpcsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpcs operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeVpcs for usage and error information. +// See DescribeVpcs for more information on using the DescribeVpcs +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeVpcs method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeVpcsRequest method. // req, resp := client.DescribeVpcsRequest(params) @@ -10133,27 +13507,41 @@ func (c *EC2) DescribeVpcsRequest(input *DescribeVpcsInput) (req *request.Reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpcs func (c *EC2) DescribeVpcs(input *DescribeVpcsInput) (*DescribeVpcsOutput, error) { req, out := c.DescribeVpcsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeVpcsWithContext is the same as DescribeVpcs with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpcs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpcsWithContext(ctx aws.Context, input *DescribeVpcsInput, opts ...request.Option) (*DescribeVpcsOutput, error) { + req, out := c.DescribeVpcsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeVpnConnections = "DescribeVpnConnections" // DescribeVpnConnectionsRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpnConnections operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeVpnConnections for usage and error information. +// See DescribeVpnConnections for more information on using the DescribeVpnConnections +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeVpnConnections method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeVpnConnectionsRequest method. // req, resp := client.DescribeVpnConnectionsRequest(params) @@ -10197,27 +13585,41 @@ func (c *EC2) DescribeVpnConnectionsRequest(input *DescribeVpnConnectionsInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpnConnections func (c *EC2) DescribeVpnConnections(input *DescribeVpnConnectionsInput) (*DescribeVpnConnectionsOutput, error) { req, out := c.DescribeVpnConnectionsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeVpnConnectionsWithContext is the same as DescribeVpnConnections with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpnConnections for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpnConnectionsWithContext(ctx aws.Context, input *DescribeVpnConnectionsInput, opts ...request.Option) (*DescribeVpnConnectionsOutput, error) { + req, out := c.DescribeVpnConnectionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeVpnGateways = "DescribeVpnGateways" // DescribeVpnGatewaysRequest generates a "aws/request.Request" representing the // client's request for the DescribeVpnGateways operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeVpnGateways for usage and error information. +// See DescribeVpnGateways for more information on using the DescribeVpnGateways +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeVpnGateways method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeVpnGatewaysRequest method. // req, resp := client.DescribeVpnGatewaysRequest(params) @@ -10261,27 +13663,41 @@ func (c *EC2) DescribeVpnGatewaysRequest(input *DescribeVpnGatewaysInput) (req * // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVpnGateways func (c *EC2) DescribeVpnGateways(input *DescribeVpnGatewaysInput) (*DescribeVpnGatewaysOutput, error) { req, out := c.DescribeVpnGatewaysRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeVpnGatewaysWithContext is the same as DescribeVpnGateways with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVpnGateways for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeVpnGatewaysWithContext(ctx aws.Context, input *DescribeVpnGatewaysInput, opts ...request.Option) (*DescribeVpnGatewaysOutput, error) { + req, out := c.DescribeVpnGatewaysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDetachClassicLinkVpc = "DetachClassicLinkVpc" // DetachClassicLinkVpcRequest generates a "aws/request.Request" representing the // client's request for the DetachClassicLinkVpc operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DetachClassicLinkVpc for usage and error information. +// See DetachClassicLinkVpc for more information on using the DetachClassicLinkVpc +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DetachClassicLinkVpc method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DetachClassicLinkVpcRequest method. // req, resp := client.DetachClassicLinkVpcRequest(params) @@ -10323,27 +13739,41 @@ func (c *EC2) DetachClassicLinkVpcRequest(input *DetachClassicLinkVpcInput) (req // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachClassicLinkVpc func (c *EC2) DetachClassicLinkVpc(input *DetachClassicLinkVpcInput) (*DetachClassicLinkVpcOutput, error) { req, out := c.DetachClassicLinkVpcRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DetachClassicLinkVpcWithContext is the same as DetachClassicLinkVpc with the addition of +// the ability to pass a context and additional request options. +// +// See DetachClassicLinkVpc for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DetachClassicLinkVpcWithContext(ctx aws.Context, input *DetachClassicLinkVpcInput, opts ...request.Option) (*DetachClassicLinkVpcOutput, error) { + req, out := c.DetachClassicLinkVpcRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDetachInternetGateway = "DetachInternetGateway" // DetachInternetGatewayRequest generates a "aws/request.Request" representing the // client's request for the DetachInternetGateway operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DetachInternetGateway for usage and error information. +// See DetachInternetGateway for more information on using the DetachInternetGateway +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DetachInternetGateway method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DetachInternetGatewayRequest method. // req, resp := client.DetachInternetGatewayRequest(params) @@ -10376,7 +13806,7 @@ func (c *EC2) DetachInternetGatewayRequest(input *DetachInternetGatewayInput) (r // // Detaches an Internet gateway from a VPC, disabling connectivity between the // Internet and the VPC. The VPC must not contain any running instances with -// Elastic IP addresses. +// Elastic IP addresses or public IPv4 addresses. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10387,27 +13817,41 @@ func (c *EC2) DetachInternetGatewayRequest(input *DetachInternetGatewayInput) (r // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachInternetGateway func (c *EC2) DetachInternetGateway(input *DetachInternetGatewayInput) (*DetachInternetGatewayOutput, error) { req, out := c.DetachInternetGatewayRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DetachInternetGatewayWithContext is the same as DetachInternetGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DetachInternetGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DetachInternetGatewayWithContext(ctx aws.Context, input *DetachInternetGatewayInput, opts ...request.Option) (*DetachInternetGatewayOutput, error) { + req, out := c.DetachInternetGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDetachNetworkInterface = "DetachNetworkInterface" // DetachNetworkInterfaceRequest generates a "aws/request.Request" representing the // client's request for the DetachNetworkInterface operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DetachNetworkInterface for usage and error information. +// See DetachNetworkInterface for more information on using the DetachNetworkInterface +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DetachNetworkInterface method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DetachNetworkInterfaceRequest method. // req, resp := client.DetachNetworkInterfaceRequest(params) @@ -10449,27 +13893,41 @@ func (c *EC2) DetachNetworkInterfaceRequest(input *DetachNetworkInterfaceInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachNetworkInterface func (c *EC2) DetachNetworkInterface(input *DetachNetworkInterfaceInput) (*DetachNetworkInterfaceOutput, error) { req, out := c.DetachNetworkInterfaceRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DetachNetworkInterfaceWithContext is the same as DetachNetworkInterface with the addition of +// the ability to pass a context and additional request options. +// +// See DetachNetworkInterface for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DetachNetworkInterfaceWithContext(ctx aws.Context, input *DetachNetworkInterfaceInput, opts ...request.Option) (*DetachNetworkInterfaceOutput, error) { + req, out := c.DetachNetworkInterfaceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDetachVolume = "DetachVolume" // DetachVolumeRequest generates a "aws/request.Request" representing the // client's request for the DetachVolume operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DetachVolume for usage and error information. +// See DetachVolume for more information on using the DetachVolume +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DetachVolume method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DetachVolumeRequest method. // req, resp := client.DetachVolumeRequest(params) @@ -10522,27 +13980,41 @@ func (c *EC2) DetachVolumeRequest(input *DetachVolumeInput) (req *request.Reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachVolume func (c *EC2) DetachVolume(input *DetachVolumeInput) (*VolumeAttachment, error) { req, out := c.DetachVolumeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DetachVolumeWithContext is the same as DetachVolume with the addition of +// the ability to pass a context and additional request options. +// +// See DetachVolume for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DetachVolumeWithContext(ctx aws.Context, input *DetachVolumeInput, opts ...request.Option) (*VolumeAttachment, error) { + req, out := c.DetachVolumeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDetachVpnGateway = "DetachVpnGateway" // DetachVpnGatewayRequest generates a "aws/request.Request" representing the // client's request for the DetachVpnGateway operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DetachVpnGateway for usage and error information. +// See DetachVpnGateway for more information on using the DetachVpnGateway +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DetachVpnGateway method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DetachVpnGatewayRequest method. // req, resp := client.DetachVpnGatewayRequest(params) @@ -10591,27 +14063,41 @@ func (c *EC2) DetachVpnGatewayRequest(input *DetachVpnGatewayInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DetachVpnGateway func (c *EC2) DetachVpnGateway(input *DetachVpnGatewayInput) (*DetachVpnGatewayOutput, error) { req, out := c.DetachVpnGatewayRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DetachVpnGatewayWithContext is the same as DetachVpnGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DetachVpnGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DetachVpnGatewayWithContext(ctx aws.Context, input *DetachVpnGatewayInput, opts ...request.Option) (*DetachVpnGatewayOutput, error) { + req, out := c.DetachVpnGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDisableVgwRoutePropagation = "DisableVgwRoutePropagation" // DisableVgwRoutePropagationRequest generates a "aws/request.Request" representing the // client's request for the DisableVgwRoutePropagation operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DisableVgwRoutePropagation for usage and error information. +// See DisableVgwRoutePropagation for more information on using the DisableVgwRoutePropagation +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DisableVgwRoutePropagation method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DisableVgwRoutePropagationRequest method. // req, resp := client.DisableVgwRoutePropagationRequest(params) @@ -10654,27 +14140,41 @@ func (c *EC2) DisableVgwRoutePropagationRequest(input *DisableVgwRoutePropagatio // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableVgwRoutePropagation func (c *EC2) DisableVgwRoutePropagation(input *DisableVgwRoutePropagationInput) (*DisableVgwRoutePropagationOutput, error) { req, out := c.DisableVgwRoutePropagationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DisableVgwRoutePropagationWithContext is the same as DisableVgwRoutePropagation with the addition of +// the ability to pass a context and additional request options. +// +// See DisableVgwRoutePropagation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisableVgwRoutePropagationWithContext(ctx aws.Context, input *DisableVgwRoutePropagationInput, opts ...request.Option) (*DisableVgwRoutePropagationOutput, error) { + req, out := c.DisableVgwRoutePropagationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDisableVpcClassicLink = "DisableVpcClassicLink" // DisableVpcClassicLinkRequest generates a "aws/request.Request" representing the // client's request for the DisableVpcClassicLink operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DisableVpcClassicLink for usage and error information. +// See DisableVpcClassicLink for more information on using the DisableVpcClassicLink +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DisableVpcClassicLink method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DisableVpcClassicLinkRequest method. // req, resp := client.DisableVpcClassicLinkRequest(params) @@ -10715,27 +14215,41 @@ func (c *EC2) DisableVpcClassicLinkRequest(input *DisableVpcClassicLinkInput) (r // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableVpcClassicLink func (c *EC2) DisableVpcClassicLink(input *DisableVpcClassicLinkInput) (*DisableVpcClassicLinkOutput, error) { req, out := c.DisableVpcClassicLinkRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DisableVpcClassicLinkWithContext is the same as DisableVpcClassicLink with the addition of +// the ability to pass a context and additional request options. +// +// See DisableVpcClassicLink for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisableVpcClassicLinkWithContext(ctx aws.Context, input *DisableVpcClassicLinkInput, opts ...request.Option) (*DisableVpcClassicLinkOutput, error) { + req, out := c.DisableVpcClassicLinkRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDisableVpcClassicLinkDnsSupport = "DisableVpcClassicLinkDnsSupport" // DisableVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the // client's request for the DisableVpcClassicLinkDnsSupport operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DisableVpcClassicLinkDnsSupport for usage and error information. +// See DisableVpcClassicLinkDnsSupport for more information on using the DisableVpcClassicLinkDnsSupport +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DisableVpcClassicLinkDnsSupport method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DisableVpcClassicLinkDnsSupportRequest method. // req, resp := client.DisableVpcClassicLinkDnsSupportRequest(params) @@ -10779,27 +14293,41 @@ func (c *EC2) DisableVpcClassicLinkDnsSupportRequest(input *DisableVpcClassicLin // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableVpcClassicLinkDnsSupport func (c *EC2) DisableVpcClassicLinkDnsSupport(input *DisableVpcClassicLinkDnsSupportInput) (*DisableVpcClassicLinkDnsSupportOutput, error) { req, out := c.DisableVpcClassicLinkDnsSupportRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DisableVpcClassicLinkDnsSupportWithContext is the same as DisableVpcClassicLinkDnsSupport with the addition of +// the ability to pass a context and additional request options. +// +// See DisableVpcClassicLinkDnsSupport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisableVpcClassicLinkDnsSupportWithContext(ctx aws.Context, input *DisableVpcClassicLinkDnsSupportInput, opts ...request.Option) (*DisableVpcClassicLinkDnsSupportOutput, error) { + req, out := c.DisableVpcClassicLinkDnsSupportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDisassociateAddress = "DisassociateAddress" // DisassociateAddressRequest generates a "aws/request.Request" representing the // client's request for the DisassociateAddress operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DisassociateAddress for usage and error information. +// See DisassociateAddress for more information on using the DisassociateAddress +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DisassociateAddress method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DisassociateAddressRequest method. // req, resp := client.DisassociateAddressRequest(params) @@ -10849,27 +14377,117 @@ func (c *EC2) DisassociateAddressRequest(input *DisassociateAddressInput) (req * // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateAddress func (c *EC2) DisassociateAddress(input *DisassociateAddressInput) (*DisassociateAddressOutput, error) { req, out := c.DisassociateAddressRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DisassociateAddressWithContext is the same as DisassociateAddress with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateAddress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisassociateAddressWithContext(ctx aws.Context, input *DisassociateAddressInput, opts ...request.Option) (*DisassociateAddressOutput, error) { + req, out := c.DisassociateAddressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisassociateIamInstanceProfile = "DisassociateIamInstanceProfile" + +// DisassociateIamInstanceProfileRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateIamInstanceProfile operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateIamInstanceProfile for more information on using the DisassociateIamInstanceProfile +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateIamInstanceProfileRequest method. +// req, resp := client.DisassociateIamInstanceProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateIamInstanceProfile +func (c *EC2) DisassociateIamInstanceProfileRequest(input *DisassociateIamInstanceProfileInput) (req *request.Request, output *DisassociateIamInstanceProfileOutput) { + op := &request.Operation{ + Name: opDisassociateIamInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateIamInstanceProfileInput{} + } + + output = &DisassociateIamInstanceProfileOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisassociateIamInstanceProfile API operation for Amazon Elastic Compute Cloud. +// +// Disassociates an IAM instance profile from a running or stopped instance. +// +// Use DescribeIamInstanceProfileAssociations to get the association ID. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisassociateIamInstanceProfile for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateIamInstanceProfile +func (c *EC2) DisassociateIamInstanceProfile(input *DisassociateIamInstanceProfileInput) (*DisassociateIamInstanceProfileOutput, error) { + req, out := c.DisassociateIamInstanceProfileRequest(input) + return out, req.Send() +} + +// DisassociateIamInstanceProfileWithContext is the same as DisassociateIamInstanceProfile with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateIamInstanceProfile for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisassociateIamInstanceProfileWithContext(ctx aws.Context, input *DisassociateIamInstanceProfileInput, opts ...request.Option) (*DisassociateIamInstanceProfileOutput, error) { + req, out := c.DisassociateIamInstanceProfileRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDisassociateRouteTable = "DisassociateRouteTable" // DisassociateRouteTableRequest generates a "aws/request.Request" representing the // client's request for the DisassociateRouteTable operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DisassociateRouteTable for usage and error information. +// See DisassociateRouteTable for more information on using the DisassociateRouteTable +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DisassociateRouteTable method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DisassociateRouteTableRequest method. // req, resp := client.DisassociateRouteTableRequest(params) @@ -10916,27 +14534,41 @@ func (c *EC2) DisassociateRouteTableRequest(input *DisassociateRouteTableInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateRouteTable func (c *EC2) DisassociateRouteTable(input *DisassociateRouteTableInput) (*DisassociateRouteTableOutput, error) { req, out := c.DisassociateRouteTableRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DisassociateRouteTableWithContext is the same as DisassociateRouteTable with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateRouteTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisassociateRouteTableWithContext(ctx aws.Context, input *DisassociateRouteTableInput, opts ...request.Option) (*DisassociateRouteTableOutput, error) { + req, out := c.DisassociateRouteTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDisassociateSubnetCidrBlock = "DisassociateSubnetCidrBlock" // DisassociateSubnetCidrBlockRequest generates a "aws/request.Request" representing the // client's request for the DisassociateSubnetCidrBlock operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DisassociateSubnetCidrBlock for usage and error information. +// See DisassociateSubnetCidrBlock for more information on using the DisassociateSubnetCidrBlock +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DisassociateSubnetCidrBlock method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DisassociateSubnetCidrBlockRequest method. // req, resp := client.DisassociateSubnetCidrBlockRequest(params) @@ -10978,27 +14610,41 @@ func (c *EC2) DisassociateSubnetCidrBlockRequest(input *DisassociateSubnetCidrBl // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateSubnetCidrBlock func (c *EC2) DisassociateSubnetCidrBlock(input *DisassociateSubnetCidrBlockInput) (*DisassociateSubnetCidrBlockOutput, error) { req, out := c.DisassociateSubnetCidrBlockRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DisassociateSubnetCidrBlockWithContext is the same as DisassociateSubnetCidrBlock with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateSubnetCidrBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisassociateSubnetCidrBlockWithContext(ctx aws.Context, input *DisassociateSubnetCidrBlockInput, opts ...request.Option) (*DisassociateSubnetCidrBlockOutput, error) { + req, out := c.DisassociateSubnetCidrBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDisassociateVpcCidrBlock = "DisassociateVpcCidrBlock" // DisassociateVpcCidrBlockRequest generates a "aws/request.Request" representing the // client's request for the DisassociateVpcCidrBlock operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DisassociateVpcCidrBlock for usage and error information. +// See DisassociateVpcCidrBlock for more information on using the DisassociateVpcCidrBlock +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DisassociateVpcCidrBlock method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DisassociateVpcCidrBlockRequest method. // req, resp := client.DisassociateVpcCidrBlockRequest(params) @@ -11027,9 +14673,13 @@ func (c *EC2) DisassociateVpcCidrBlockRequest(input *DisassociateVpcCidrBlockInp // DisassociateVpcCidrBlock API operation for Amazon Elastic Compute Cloud. // -// Disassociates a CIDR block from a VPC. Currently, you can disassociate an -// IPv6 CIDR block only. You must detach or delete all gateways and resources -// that are associated with the CIDR block before you can disassociate it. +// Disassociates a CIDR block from a VPC. To disassociate the CIDR block, you +// must specify its association ID. You can get the association ID by using +// DescribeVpcs. You must detach or delete all gateways and resources that are +// associated with the CIDR block before you can disassociate it. +// +// You cannot disassociate the CIDR block with which you originally created +// the VPC (the primary CIDR block). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -11040,27 +14690,41 @@ func (c *EC2) DisassociateVpcCidrBlockRequest(input *DisassociateVpcCidrBlockInp // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateVpcCidrBlock func (c *EC2) DisassociateVpcCidrBlock(input *DisassociateVpcCidrBlockInput) (*DisassociateVpcCidrBlockOutput, error) { req, out := c.DisassociateVpcCidrBlockRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DisassociateVpcCidrBlockWithContext is the same as DisassociateVpcCidrBlock with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateVpcCidrBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisassociateVpcCidrBlockWithContext(ctx aws.Context, input *DisassociateVpcCidrBlockInput, opts ...request.Option) (*DisassociateVpcCidrBlockOutput, error) { + req, out := c.DisassociateVpcCidrBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opEnableVgwRoutePropagation = "EnableVgwRoutePropagation" // EnableVgwRoutePropagationRequest generates a "aws/request.Request" representing the // client's request for the EnableVgwRoutePropagation operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See EnableVgwRoutePropagation for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the EnableVgwRoutePropagation method directly -// instead. +// See EnableVgwRoutePropagation for more information on using the EnableVgwRoutePropagation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the EnableVgwRoutePropagationRequest method. // req, resp := client.EnableVgwRoutePropagationRequest(params) @@ -11103,27 +14767,41 @@ func (c *EC2) EnableVgwRoutePropagationRequest(input *EnableVgwRoutePropagationI // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVgwRoutePropagation func (c *EC2) EnableVgwRoutePropagation(input *EnableVgwRoutePropagationInput) (*EnableVgwRoutePropagationOutput, error) { req, out := c.EnableVgwRoutePropagationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// EnableVgwRoutePropagationWithContext is the same as EnableVgwRoutePropagation with the addition of +// the ability to pass a context and additional request options. +// +// See EnableVgwRoutePropagation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) EnableVgwRoutePropagationWithContext(ctx aws.Context, input *EnableVgwRoutePropagationInput, opts ...request.Option) (*EnableVgwRoutePropagationOutput, error) { + req, out := c.EnableVgwRoutePropagationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opEnableVolumeIO = "EnableVolumeIO" // EnableVolumeIORequest generates a "aws/request.Request" representing the // client's request for the EnableVolumeIO operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See EnableVolumeIO for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the EnableVolumeIO method directly -// instead. +// See EnableVolumeIO for more information on using the EnableVolumeIO +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the EnableVolumeIORequest method. // req, resp := client.EnableVolumeIORequest(params) @@ -11166,27 +14844,41 @@ func (c *EC2) EnableVolumeIORequest(input *EnableVolumeIOInput) (req *request.Re // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVolumeIO func (c *EC2) EnableVolumeIO(input *EnableVolumeIOInput) (*EnableVolumeIOOutput, error) { req, out := c.EnableVolumeIORequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// EnableVolumeIOWithContext is the same as EnableVolumeIO with the addition of +// the ability to pass a context and additional request options. +// +// See EnableVolumeIO for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) EnableVolumeIOWithContext(ctx aws.Context, input *EnableVolumeIOInput, opts ...request.Option) (*EnableVolumeIOOutput, error) { + req, out := c.EnableVolumeIORequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opEnableVpcClassicLink = "EnableVpcClassicLink" // EnableVpcClassicLinkRequest generates a "aws/request.Request" representing the // client's request for the EnableVpcClassicLink operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See EnableVpcClassicLink for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the EnableVpcClassicLink method directly -// instead. +// See EnableVpcClassicLink for more information on using the EnableVpcClassicLink +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the EnableVpcClassicLinkRequest method. // req, resp := client.EnableVpcClassicLinkRequest(params) @@ -11232,27 +14924,41 @@ func (c *EC2) EnableVpcClassicLinkRequest(input *EnableVpcClassicLinkInput) (req // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVpcClassicLink func (c *EC2) EnableVpcClassicLink(input *EnableVpcClassicLinkInput) (*EnableVpcClassicLinkOutput, error) { req, out := c.EnableVpcClassicLinkRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// EnableVpcClassicLinkWithContext is the same as EnableVpcClassicLink with the addition of +// the ability to pass a context and additional request options. +// +// See EnableVpcClassicLink for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) EnableVpcClassicLinkWithContext(ctx aws.Context, input *EnableVpcClassicLinkInput, opts ...request.Option) (*EnableVpcClassicLinkOutput, error) { + req, out := c.EnableVpcClassicLinkRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opEnableVpcClassicLinkDnsSupport = "EnableVpcClassicLinkDnsSupport" // EnableVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the // client's request for the EnableVpcClassicLinkDnsSupport operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See EnableVpcClassicLinkDnsSupport for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the EnableVpcClassicLinkDnsSupport method directly -// instead. +// See EnableVpcClassicLinkDnsSupport for more information on using the EnableVpcClassicLinkDnsSupport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the EnableVpcClassicLinkDnsSupportRequest method. // req, resp := client.EnableVpcClassicLinkDnsSupportRequest(params) @@ -11298,27 +15004,41 @@ func (c *EC2) EnableVpcClassicLinkDnsSupportRequest(input *EnableVpcClassicLinkD // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVpcClassicLinkDnsSupport func (c *EC2) EnableVpcClassicLinkDnsSupport(input *EnableVpcClassicLinkDnsSupportInput) (*EnableVpcClassicLinkDnsSupportOutput, error) { req, out := c.EnableVpcClassicLinkDnsSupportRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// EnableVpcClassicLinkDnsSupportWithContext is the same as EnableVpcClassicLinkDnsSupport with the addition of +// the ability to pass a context and additional request options. +// +// See EnableVpcClassicLinkDnsSupport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) EnableVpcClassicLinkDnsSupportWithContext(ctx aws.Context, input *EnableVpcClassicLinkDnsSupportInput, opts ...request.Option) (*EnableVpcClassicLinkDnsSupportOutput, error) { + req, out := c.EnableVpcClassicLinkDnsSupportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetConsoleOutput = "GetConsoleOutput" // GetConsoleOutputRequest generates a "aws/request.Request" representing the // client's request for the GetConsoleOutput operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetConsoleOutput for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetConsoleOutput method directly -// instead. +// See GetConsoleOutput for more information on using the GetConsoleOutput +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetConsoleOutputRequest method. // req, resp := client.GetConsoleOutputRequest(params) @@ -11355,7 +15075,7 @@ func (c *EC2) GetConsoleOutputRequest(input *GetConsoleOutputInput) (req *reques // the Amazon EC2 API and command line interface. // // Instance console output is buffered and posted shortly after instance boot, -// reboot, and termination. Amazon EC2 preserves the most recent 64 KB output +// reboot, and termination. Amazon EC2 preserves the most recent 64 KB output, // which is available for at least one hour after the most recent post. // // For Linux instances, the instance console output displays the exact console @@ -11375,27 +15095,41 @@ func (c *EC2) GetConsoleOutputRequest(input *GetConsoleOutputInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetConsoleOutput func (c *EC2) GetConsoleOutput(input *GetConsoleOutputInput) (*GetConsoleOutputOutput, error) { req, out := c.GetConsoleOutputRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetConsoleOutputWithContext is the same as GetConsoleOutput with the addition of +// the ability to pass a context and additional request options. +// +// See GetConsoleOutput for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetConsoleOutputWithContext(ctx aws.Context, input *GetConsoleOutputInput, opts ...request.Option) (*GetConsoleOutputOutput, error) { + req, out := c.GetConsoleOutputRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetConsoleScreenshot = "GetConsoleScreenshot" // GetConsoleScreenshotRequest generates a "aws/request.Request" representing the // client's request for the GetConsoleScreenshot operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetConsoleScreenshot for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetConsoleScreenshot method directly -// instead. +// See GetConsoleScreenshot for more information on using the GetConsoleScreenshot +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetConsoleScreenshotRequest method. // req, resp := client.GetConsoleScreenshotRequest(params) @@ -11437,27 +15171,41 @@ func (c *EC2) GetConsoleScreenshotRequest(input *GetConsoleScreenshotInput) (req // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetConsoleScreenshot func (c *EC2) GetConsoleScreenshot(input *GetConsoleScreenshotInput) (*GetConsoleScreenshotOutput, error) { req, out := c.GetConsoleScreenshotRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetConsoleScreenshotWithContext is the same as GetConsoleScreenshot with the addition of +// the ability to pass a context and additional request options. +// +// See GetConsoleScreenshot for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetConsoleScreenshotWithContext(ctx aws.Context, input *GetConsoleScreenshotInput, opts ...request.Option) (*GetConsoleScreenshotOutput, error) { + req, out := c.GetConsoleScreenshotRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetHostReservationPurchasePreview = "GetHostReservationPurchasePreview" // GetHostReservationPurchasePreviewRequest generates a "aws/request.Request" representing the // client's request for the GetHostReservationPurchasePreview operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetHostReservationPurchasePreview for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetHostReservationPurchasePreview method directly -// instead. +// See GetHostReservationPurchasePreview for more information on using the GetHostReservationPurchasePreview +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetHostReservationPurchasePreviewRequest method. // req, resp := client.GetHostReservationPurchasePreviewRequest(params) @@ -11502,27 +15250,41 @@ func (c *EC2) GetHostReservationPurchasePreviewRequest(input *GetHostReservation // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetHostReservationPurchasePreview func (c *EC2) GetHostReservationPurchasePreview(input *GetHostReservationPurchasePreviewInput) (*GetHostReservationPurchasePreviewOutput, error) { req, out := c.GetHostReservationPurchasePreviewRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetHostReservationPurchasePreviewWithContext is the same as GetHostReservationPurchasePreview with the addition of +// the ability to pass a context and additional request options. +// +// See GetHostReservationPurchasePreview for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetHostReservationPurchasePreviewWithContext(ctx aws.Context, input *GetHostReservationPurchasePreviewInput, opts ...request.Option) (*GetHostReservationPurchasePreviewOutput, error) { + req, out := c.GetHostReservationPurchasePreviewRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetPasswordData = "GetPasswordData" // GetPasswordDataRequest generates a "aws/request.Request" representing the // client's request for the GetPasswordData operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetPasswordData for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetPasswordData method directly -// instead. +// See GetPasswordData for more information on using the GetPasswordData +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetPasswordDataRequest method. // req, resp := client.GetPasswordDataRequest(params) @@ -11551,20 +15313,24 @@ func (c *EC2) GetPasswordDataRequest(input *GetPasswordDataInput) (req *request. // GetPasswordData API operation for Amazon Elastic Compute Cloud. // -// Retrieves the encrypted administrator password for an instance running Windows. +// Retrieves the encrypted administrator password for a running Windows instance. +// +// The Windows password is generated at boot by the EC2Config service or EC2Launch +// scripts (Windows Server 2016 and later). This usually only happens the first +// time an instance is launched. For more information, see EC2Config (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/UsingConfig_WinAMI.html) +// and EC2Launch (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2launch.html) +// in the Amazon Elastic Compute Cloud User Guide. // -// The Windows password is generated at boot if the EC2Config service plugin, -// Ec2SetPassword, is enabled. This usually only happens the first time an AMI -// is launched, and then Ec2SetPassword is automatically disabled. The password -// is not generated for rebundled AMIs unless Ec2SetPassword is enabled before -// bundling. +// For the EC2Config service, the password is not generated for rebundled AMIs +// unless Ec2SetPassword is enabled before bundling. // // The password is encrypted using the key pair that you specified when you // launched the instance. You must provide the corresponding key pair file. // -// Password generation and encryption takes a few moments. We recommend that -// you wait up to 15 minutes after launching an instance before trying to retrieve -// the generated password. +// When you launch an instance, password generation and encryption may take +// a few minutes. If you try to retrieve the password before it's available, +// the output returns an empty string. We recommend that you wait up to 15 minutes +// after launching an instance before trying to retrieve the generated password. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -11575,27 +15341,41 @@ func (c *EC2) GetPasswordDataRequest(input *GetPasswordDataInput) (req *request. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetPasswordData func (c *EC2) GetPasswordData(input *GetPasswordDataInput) (*GetPasswordDataOutput, error) { req, out := c.GetPasswordDataRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetPasswordDataWithContext is the same as GetPasswordData with the addition of +// the ability to pass a context and additional request options. +// +// See GetPasswordData for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetPasswordDataWithContext(ctx aws.Context, input *GetPasswordDataInput, opts ...request.Option) (*GetPasswordDataOutput, error) { + req, out := c.GetPasswordDataRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetReservedInstancesExchangeQuote = "GetReservedInstancesExchangeQuote" // GetReservedInstancesExchangeQuoteRequest generates a "aws/request.Request" representing the // client's request for the GetReservedInstancesExchangeQuote operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetReservedInstancesExchangeQuote for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetReservedInstancesExchangeQuote method directly -// instead. +// See GetReservedInstancesExchangeQuote for more information on using the GetReservedInstancesExchangeQuote +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetReservedInstancesExchangeQuoteRequest method. // req, resp := client.GetReservedInstancesExchangeQuoteRequest(params) @@ -11637,27 +15417,41 @@ func (c *EC2) GetReservedInstancesExchangeQuoteRequest(input *GetReservedInstanc // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetReservedInstancesExchangeQuote func (c *EC2) GetReservedInstancesExchangeQuote(input *GetReservedInstancesExchangeQuoteInput) (*GetReservedInstancesExchangeQuoteOutput, error) { req, out := c.GetReservedInstancesExchangeQuoteRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetReservedInstancesExchangeQuoteWithContext is the same as GetReservedInstancesExchangeQuote with the addition of +// the ability to pass a context and additional request options. +// +// See GetReservedInstancesExchangeQuote for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetReservedInstancesExchangeQuoteWithContext(ctx aws.Context, input *GetReservedInstancesExchangeQuoteInput, opts ...request.Option) (*GetReservedInstancesExchangeQuoteOutput, error) { + req, out := c.GetReservedInstancesExchangeQuoteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opImportImage = "ImportImage" // ImportImageRequest generates a "aws/request.Request" representing the // client's request for the ImportImage operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ImportImage for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ImportImage method directly -// instead. +// See ImportImage for more information on using the ImportImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ImportImageRequest method. // req, resp := client.ImportImageRequest(params) @@ -11700,27 +15494,41 @@ func (c *EC2) ImportImageRequest(input *ImportImageInput) (req *request.Request, // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportImage func (c *EC2) ImportImage(input *ImportImageInput) (*ImportImageOutput, error) { req, out := c.ImportImageRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ImportImageWithContext is the same as ImportImage with the addition of +// the ability to pass a context and additional request options. +// +// See ImportImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ImportImageWithContext(ctx aws.Context, input *ImportImageInput, opts ...request.Option) (*ImportImageOutput, error) { + req, out := c.ImportImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opImportInstance = "ImportInstance" // ImportInstanceRequest generates a "aws/request.Request" representing the // client's request for the ImportInstance operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ImportInstance for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ImportInstance method directly -// instead. +// See ImportInstance for more information on using the ImportInstance +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ImportInstanceRequest method. // req, resp := client.ImportInstanceRequest(params) @@ -11766,27 +15574,41 @@ func (c *EC2) ImportInstanceRequest(input *ImportInstanceInput) (req *request.Re // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportInstance func (c *EC2) ImportInstance(input *ImportInstanceInput) (*ImportInstanceOutput, error) { req, out := c.ImportInstanceRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ImportInstanceWithContext is the same as ImportInstance with the addition of +// the ability to pass a context and additional request options. +// +// See ImportInstance for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ImportInstanceWithContext(ctx aws.Context, input *ImportInstanceInput, opts ...request.Option) (*ImportInstanceOutput, error) { + req, out := c.ImportInstanceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opImportKeyPair = "ImportKeyPair" // ImportKeyPairRequest generates a "aws/request.Request" representing the // client's request for the ImportKeyPair operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ImportKeyPair for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ImportKeyPair method directly -// instead. +// See ImportKeyPair for more information on using the ImportKeyPair +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ImportKeyPairRequest method. // req, resp := client.ImportKeyPairRequest(params) @@ -11833,27 +15655,41 @@ func (c *EC2) ImportKeyPairRequest(input *ImportKeyPairInput) (req *request.Requ // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportKeyPair func (c *EC2) ImportKeyPair(input *ImportKeyPairInput) (*ImportKeyPairOutput, error) { req, out := c.ImportKeyPairRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ImportKeyPairWithContext is the same as ImportKeyPair with the addition of +// the ability to pass a context and additional request options. +// +// See ImportKeyPair for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ImportKeyPairWithContext(ctx aws.Context, input *ImportKeyPairInput, opts ...request.Option) (*ImportKeyPairOutput, error) { + req, out := c.ImportKeyPairRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opImportSnapshot = "ImportSnapshot" // ImportSnapshotRequest generates a "aws/request.Request" representing the // client's request for the ImportSnapshot operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ImportSnapshot for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ImportSnapshot method directly -// instead. +// See ImportSnapshot for more information on using the ImportSnapshot +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ImportSnapshotRequest method. // req, resp := client.ImportSnapshotRequest(params) @@ -11893,27 +15729,41 @@ func (c *EC2) ImportSnapshotRequest(input *ImportSnapshotInput) (req *request.Re // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportSnapshot func (c *EC2) ImportSnapshot(input *ImportSnapshotInput) (*ImportSnapshotOutput, error) { req, out := c.ImportSnapshotRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ImportSnapshotWithContext is the same as ImportSnapshot with the addition of +// the ability to pass a context and additional request options. +// +// See ImportSnapshot for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ImportSnapshotWithContext(ctx aws.Context, input *ImportSnapshotInput, opts ...request.Option) (*ImportSnapshotOutput, error) { + req, out := c.ImportSnapshotRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opImportVolume = "ImportVolume" // ImportVolumeRequest generates a "aws/request.Request" representing the // client's request for the ImportVolume operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ImportVolume for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ImportVolume method directly -// instead. +// See ImportVolume for more information on using the ImportVolume +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ImportVolumeRequest method. // req, resp := client.ImportVolumeRequest(params) @@ -11957,27 +15807,115 @@ func (c *EC2) ImportVolumeRequest(input *ImportVolumeInput) (req *request.Reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ImportVolume func (c *EC2) ImportVolume(input *ImportVolumeInput) (*ImportVolumeOutput, error) { req, out := c.ImportVolumeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ImportVolumeWithContext is the same as ImportVolume with the addition of +// the ability to pass a context and additional request options. +// +// See ImportVolume for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ImportVolumeWithContext(ctx aws.Context, input *ImportVolumeInput, opts ...request.Option) (*ImportVolumeOutput, error) { + req, out := c.ImportVolumeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyFpgaImageAttribute = "ModifyFpgaImageAttribute" + +// ModifyFpgaImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyFpgaImageAttribute operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyFpgaImageAttribute for more information on using the ModifyFpgaImageAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyFpgaImageAttributeRequest method. +// req, resp := client.ModifyFpgaImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFpgaImageAttribute +func (c *EC2) ModifyFpgaImageAttributeRequest(input *ModifyFpgaImageAttributeInput) (req *request.Request, output *ModifyFpgaImageAttributeOutput) { + op := &request.Operation{ + Name: opModifyFpgaImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyFpgaImageAttributeInput{} + } + + output = &ModifyFpgaImageAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyFpgaImageAttribute API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified attribute of the specified Amazon FPGA Image (AFI). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyFpgaImageAttribute for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFpgaImageAttribute +func (c *EC2) ModifyFpgaImageAttribute(input *ModifyFpgaImageAttributeInput) (*ModifyFpgaImageAttributeOutput, error) { + req, out := c.ModifyFpgaImageAttributeRequest(input) + return out, req.Send() +} + +// ModifyFpgaImageAttributeWithContext is the same as ModifyFpgaImageAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyFpgaImageAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyFpgaImageAttributeWithContext(ctx aws.Context, input *ModifyFpgaImageAttributeInput, opts ...request.Option) (*ModifyFpgaImageAttributeOutput, error) { + req, out := c.ModifyFpgaImageAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opModifyHosts = "ModifyHosts" // ModifyHostsRequest generates a "aws/request.Request" representing the // client's request for the ModifyHosts operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ModifyHosts for usage and error information. +// See ModifyHosts for more information on using the ModifyHosts +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ModifyHosts method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ModifyHostsRequest method. // req, resp := client.ModifyHostsRequest(params) @@ -12023,27 +15961,41 @@ func (c *EC2) ModifyHostsRequest(input *ModifyHostsInput) (req *request.Request, // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyHosts func (c *EC2) ModifyHosts(input *ModifyHostsInput) (*ModifyHostsOutput, error) { req, out := c.ModifyHostsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ModifyHostsWithContext is the same as ModifyHosts with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyHosts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyHostsWithContext(ctx aws.Context, input *ModifyHostsInput, opts ...request.Option) (*ModifyHostsOutput, error) { + req, out := c.ModifyHostsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opModifyIdFormat = "ModifyIdFormat" // ModifyIdFormatRequest generates a "aws/request.Request" representing the // client's request for the ModifyIdFormat operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ModifyIdFormat for usage and error information. +// See ModifyIdFormat for more information on using the ModifyIdFormat +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ModifyIdFormat method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ModifyIdFormatRequest method. // req, resp := client.ModifyIdFormatRequest(params) @@ -12099,27 +16051,41 @@ func (c *EC2) ModifyIdFormatRequest(input *ModifyIdFormatInput) (req *request.Re // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyIdFormat func (c *EC2) ModifyIdFormat(input *ModifyIdFormatInput) (*ModifyIdFormatOutput, error) { req, out := c.ModifyIdFormatRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ModifyIdFormatWithContext is the same as ModifyIdFormat with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyIdFormat for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyIdFormatWithContext(ctx aws.Context, input *ModifyIdFormatInput, opts ...request.Option) (*ModifyIdFormatOutput, error) { + req, out := c.ModifyIdFormatRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opModifyIdentityIdFormat = "ModifyIdentityIdFormat" // ModifyIdentityIdFormatRequest generates a "aws/request.Request" representing the // client's request for the ModifyIdentityIdFormat operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ModifyIdentityIdFormat for usage and error information. +// See ModifyIdentityIdFormat for more information on using the ModifyIdentityIdFormat +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ModifyIdentityIdFormat method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ModifyIdentityIdFormatRequest method. // req, resp := client.ModifyIdentityIdFormatRequest(params) @@ -12175,27 +16141,41 @@ func (c *EC2) ModifyIdentityIdFormatRequest(input *ModifyIdentityIdFormatInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyIdentityIdFormat func (c *EC2) ModifyIdentityIdFormat(input *ModifyIdentityIdFormatInput) (*ModifyIdentityIdFormatOutput, error) { req, out := c.ModifyIdentityIdFormatRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ModifyIdentityIdFormatWithContext is the same as ModifyIdentityIdFormat with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyIdentityIdFormat for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyIdentityIdFormatWithContext(ctx aws.Context, input *ModifyIdentityIdFormatInput, opts ...request.Option) (*ModifyIdentityIdFormatOutput, error) { + req, out := c.ModifyIdentityIdFormatRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opModifyImageAttribute = "ModifyImageAttribute" // ModifyImageAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifyImageAttribute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ModifyImageAttribute for usage and error information. +// See ModifyImageAttribute for more information on using the ModifyImageAttribute +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ModifyImageAttribute method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ModifyImageAttributeRequest method. // req, resp := client.ModifyImageAttributeRequest(params) @@ -12246,27 +16226,41 @@ func (c *EC2) ModifyImageAttributeRequest(input *ModifyImageAttributeInput) (req // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyImageAttribute func (c *EC2) ModifyImageAttribute(input *ModifyImageAttributeInput) (*ModifyImageAttributeOutput, error) { req, out := c.ModifyImageAttributeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ModifyImageAttributeWithContext is the same as ModifyImageAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyImageAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyImageAttributeWithContext(ctx aws.Context, input *ModifyImageAttributeInput, opts ...request.Option) (*ModifyImageAttributeOutput, error) { + req, out := c.ModifyImageAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opModifyInstanceAttribute = "ModifyInstanceAttribute" // ModifyInstanceAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifyInstanceAttribute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ModifyInstanceAttribute for usage and error information. +// See ModifyInstanceAttribute for more information on using the ModifyInstanceAttribute +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ModifyInstanceAttribute method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ModifyInstanceAttributeRequest method. // req, resp := client.ModifyInstanceAttributeRequest(params) @@ -12313,27 +16307,41 @@ func (c *EC2) ModifyInstanceAttributeRequest(input *ModifyInstanceAttributeInput // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstanceAttribute func (c *EC2) ModifyInstanceAttribute(input *ModifyInstanceAttributeInput) (*ModifyInstanceAttributeOutput, error) { req, out := c.ModifyInstanceAttributeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ModifyInstanceAttributeWithContext is the same as ModifyInstanceAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyInstanceAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyInstanceAttributeWithContext(ctx aws.Context, input *ModifyInstanceAttributeInput, opts ...request.Option) (*ModifyInstanceAttributeOutput, error) { + req, out := c.ModifyInstanceAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opModifyInstancePlacement = "ModifyInstancePlacement" // ModifyInstancePlacementRequest generates a "aws/request.Request" representing the // client's request for the ModifyInstancePlacement operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ModifyInstancePlacement for usage and error information. +// See ModifyInstancePlacement for more information on using the ModifyInstancePlacement +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ModifyInstancePlacement method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ModifyInstancePlacementRequest method. // req, resp := client.ModifyInstancePlacementRequest(params) @@ -12391,27 +16399,41 @@ func (c *EC2) ModifyInstancePlacementRequest(input *ModifyInstancePlacementInput // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstancePlacement func (c *EC2) ModifyInstancePlacement(input *ModifyInstancePlacementInput) (*ModifyInstancePlacementOutput, error) { req, out := c.ModifyInstancePlacementRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ModifyInstancePlacementWithContext is the same as ModifyInstancePlacement with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyInstancePlacement for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyInstancePlacementWithContext(ctx aws.Context, input *ModifyInstancePlacementInput, opts ...request.Option) (*ModifyInstancePlacementOutput, error) { + req, out := c.ModifyInstancePlacementRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opModifyNetworkInterfaceAttribute = "ModifyNetworkInterfaceAttribute" // ModifyNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifyNetworkInterfaceAttribute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ModifyNetworkInterfaceAttribute for usage and error information. +// See ModifyNetworkInterfaceAttribute for more information on using the ModifyNetworkInterfaceAttribute +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ModifyNetworkInterfaceAttribute method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ModifyNetworkInterfaceAttributeRequest method. // req, resp := client.ModifyNetworkInterfaceAttributeRequest(params) @@ -12454,27 +16476,41 @@ func (c *EC2) ModifyNetworkInterfaceAttributeRequest(input *ModifyNetworkInterfa // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyNetworkInterfaceAttribute func (c *EC2) ModifyNetworkInterfaceAttribute(input *ModifyNetworkInterfaceAttributeInput) (*ModifyNetworkInterfaceAttributeOutput, error) { req, out := c.ModifyNetworkInterfaceAttributeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ModifyNetworkInterfaceAttributeWithContext is the same as ModifyNetworkInterfaceAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyNetworkInterfaceAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyNetworkInterfaceAttributeWithContext(ctx aws.Context, input *ModifyNetworkInterfaceAttributeInput, opts ...request.Option) (*ModifyNetworkInterfaceAttributeOutput, error) { + req, out := c.ModifyNetworkInterfaceAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opModifyReservedInstances = "ModifyReservedInstances" // ModifyReservedInstancesRequest generates a "aws/request.Request" representing the // client's request for the ModifyReservedInstances operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ModifyReservedInstances for usage and error information. +// See ModifyReservedInstances for more information on using the ModifyReservedInstances +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ModifyReservedInstances method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ModifyReservedInstancesRequest method. // req, resp := client.ModifyReservedInstancesRequest(params) @@ -12520,27 +16556,41 @@ func (c *EC2) ModifyReservedInstancesRequest(input *ModifyReservedInstancesInput // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyReservedInstances func (c *EC2) ModifyReservedInstances(input *ModifyReservedInstancesInput) (*ModifyReservedInstancesOutput, error) { req, out := c.ModifyReservedInstancesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ModifyReservedInstancesWithContext is the same as ModifyReservedInstances with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyReservedInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyReservedInstancesWithContext(ctx aws.Context, input *ModifyReservedInstancesInput, opts ...request.Option) (*ModifyReservedInstancesOutput, error) { + req, out := c.ModifyReservedInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opModifySnapshotAttribute = "ModifySnapshotAttribute" // ModifySnapshotAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifySnapshotAttribute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ModifySnapshotAttribute for usage and error information. +// See ModifySnapshotAttribute for more information on using the ModifySnapshotAttribute +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ModifySnapshotAttribute method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ModifySnapshotAttributeRequest method. // req, resp := client.ModifySnapshotAttributeRequest(params) @@ -12594,27 +16644,41 @@ func (c *EC2) ModifySnapshotAttributeRequest(input *ModifySnapshotAttributeInput // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifySnapshotAttribute func (c *EC2) ModifySnapshotAttribute(input *ModifySnapshotAttributeInput) (*ModifySnapshotAttributeOutput, error) { req, out := c.ModifySnapshotAttributeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ModifySnapshotAttributeWithContext is the same as ModifySnapshotAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ModifySnapshotAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifySnapshotAttributeWithContext(ctx aws.Context, input *ModifySnapshotAttributeInput, opts ...request.Option) (*ModifySnapshotAttributeOutput, error) { + req, out := c.ModifySnapshotAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opModifySpotFleetRequest = "ModifySpotFleetRequest" // ModifySpotFleetRequestRequest generates a "aws/request.Request" representing the // client's request for the ModifySpotFleetRequest operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ModifySpotFleetRequest for usage and error information. +// See ModifySpotFleetRequest for more information on using the ModifySpotFleetRequest +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ModifySpotFleetRequest method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ModifySpotFleetRequestRequest method. // req, resp := client.ModifySpotFleetRequestRequest(params) @@ -12673,27 +16737,41 @@ func (c *EC2) ModifySpotFleetRequestRequest(input *ModifySpotFleetRequestInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifySpotFleetRequest func (c *EC2) ModifySpotFleetRequest(input *ModifySpotFleetRequestInput) (*ModifySpotFleetRequestOutput, error) { req, out := c.ModifySpotFleetRequestRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ModifySpotFleetRequestWithContext is the same as ModifySpotFleetRequest with the addition of +// the ability to pass a context and additional request options. +// +// See ModifySpotFleetRequest for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifySpotFleetRequestWithContext(ctx aws.Context, input *ModifySpotFleetRequestInput, opts ...request.Option) (*ModifySpotFleetRequestOutput, error) { + req, out := c.ModifySpotFleetRequestRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opModifySubnetAttribute = "ModifySubnetAttribute" // ModifySubnetAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifySubnetAttribute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ModifySubnetAttribute for usage and error information. +// See ModifySubnetAttribute for more information on using the ModifySubnetAttribute +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ModifySubnetAttribute method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ModifySubnetAttributeRequest method. // req, resp := client.ModifySubnetAttributeRequest(params) @@ -12735,27 +16813,147 @@ func (c *EC2) ModifySubnetAttributeRequest(input *ModifySubnetAttributeInput) (r // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifySubnetAttribute func (c *EC2) ModifySubnetAttribute(input *ModifySubnetAttributeInput) (*ModifySubnetAttributeOutput, error) { req, out := c.ModifySubnetAttributeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ModifySubnetAttributeWithContext is the same as ModifySubnetAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ModifySubnetAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifySubnetAttributeWithContext(ctx aws.Context, input *ModifySubnetAttributeInput, opts ...request.Option) (*ModifySubnetAttributeOutput, error) { + req, out := c.ModifySubnetAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyVolume = "ModifyVolume" + +// ModifyVolumeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVolume operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVolume for more information on using the ModifyVolume +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVolumeRequest method. +// req, resp := client.ModifyVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVolume +func (c *EC2) ModifyVolumeRequest(input *ModifyVolumeInput) (req *request.Request, output *ModifyVolumeOutput) { + op := &request.Operation{ + Name: opModifyVolume, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVolumeInput{} + } + + output = &ModifyVolumeOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVolume API operation for Amazon Elastic Compute Cloud. +// +// You can modify several parameters of an existing EBS volume, including volume +// size, volume type, and IOPS capacity. If your EBS volume is attached to a +// current-generation EC2 instance type, you may be able to apply these changes +// without stopping the instance or detaching the volume from it. For more information +// about modifying an EBS volume running Linux, see Modifying the Size, IOPS, +// or Type of an EBS Volume on Linux (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html). +// For more information about modifying an EBS volume running Windows, see Modifying +// the Size, IOPS, or Type of an EBS Volume on Windows (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html). +// +// When you complete a resize operation on your volume, you need to extend the +// volume's file-system size to take advantage of the new storage capacity. +// For information about extending a Linux file system, see Extending a Linux +// File System (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#recognize-expanded-volume-linux). +// For information about extending a Windows file system, see Extending a Windows +// File System (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html#recognize-expanded-volume-windows). +// +// You can use CloudWatch Events to check the status of a modification to an +// EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch +// Events User Guide (http://docs.aws.amazon.com/AmazonCloudWatch/latest/events/). +// You can also track the status of a modification using the DescribeVolumesModifications +// API. For information about tracking status changes using either method, see +// Monitoring Volume Modifications (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods). +// +// With previous-generation instance types, resizing an EBS volume may require +// detaching and reattaching the volume or stopping and restarting the instance. +// For more information about modifying an EBS volume running Linux, see Modifying +// the Size, IOPS, or Type of an EBS Volume on Linux (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html). +// For more information about modifying an EBS volume running Windows, see Modifying +// the Size, IOPS, or Type of an EBS Volume on Windows (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html). +// +// If you reach the maximum volume modification rate per volume limit, you will +// need to wait at least six hours before applying further modifications to +// the affected EBS volume. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVolume for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVolume +func (c *EC2) ModifyVolume(input *ModifyVolumeInput) (*ModifyVolumeOutput, error) { + req, out := c.ModifyVolumeRequest(input) + return out, req.Send() +} + +// ModifyVolumeWithContext is the same as ModifyVolume with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVolume for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVolumeWithContext(ctx aws.Context, input *ModifyVolumeInput, opts ...request.Option) (*ModifyVolumeOutput, error) { + req, out := c.ModifyVolumeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opModifyVolumeAttribute = "ModifyVolumeAttribute" // ModifyVolumeAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifyVolumeAttribute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ModifyVolumeAttribute for usage and error information. +// See ModifyVolumeAttribute for more information on using the ModifyVolumeAttribute +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ModifyVolumeAttribute method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ModifyVolumeAttributeRequest method. // req, resp := client.ModifyVolumeAttributeRequest(params) @@ -12806,27 +17004,41 @@ func (c *EC2) ModifyVolumeAttributeRequest(input *ModifyVolumeAttributeInput) (r // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVolumeAttribute func (c *EC2) ModifyVolumeAttribute(input *ModifyVolumeAttributeInput) (*ModifyVolumeAttributeOutput, error) { req, out := c.ModifyVolumeAttributeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ModifyVolumeAttributeWithContext is the same as ModifyVolumeAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVolumeAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVolumeAttributeWithContext(ctx aws.Context, input *ModifyVolumeAttributeInput, opts ...request.Option) (*ModifyVolumeAttributeOutput, error) { + req, out := c.ModifyVolumeAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opModifyVpcAttribute = "ModifyVpcAttribute" // ModifyVpcAttributeRequest generates a "aws/request.Request" representing the // client's request for the ModifyVpcAttribute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ModifyVpcAttribute for usage and error information. +// See ModifyVpcAttribute for more information on using the ModifyVpcAttribute +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ModifyVpcAttribute method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ModifyVpcAttributeRequest method. // req, resp := client.ModifyVpcAttributeRequest(params) @@ -12868,27 +17080,41 @@ func (c *EC2) ModifyVpcAttributeRequest(input *ModifyVpcAttributeInput) (req *re // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcAttribute func (c *EC2) ModifyVpcAttribute(input *ModifyVpcAttributeInput) (*ModifyVpcAttributeOutput, error) { req, out := c.ModifyVpcAttributeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ModifyVpcAttributeWithContext is the same as ModifyVpcAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpcAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpcAttributeWithContext(ctx aws.Context, input *ModifyVpcAttributeInput, opts ...request.Option) (*ModifyVpcAttributeOutput, error) { + req, out := c.ModifyVpcAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opModifyVpcEndpoint = "ModifyVpcEndpoint" // ModifyVpcEndpointRequest generates a "aws/request.Request" representing the // client's request for the ModifyVpcEndpoint operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ModifyVpcEndpoint for usage and error information. +// See ModifyVpcEndpoint for more information on using the ModifyVpcEndpoint +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ModifyVpcEndpoint method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ModifyVpcEndpointRequest method. // req, resp := client.ModifyVpcEndpointRequest(params) @@ -12930,27 +17156,41 @@ func (c *EC2) ModifyVpcEndpointRequest(input *ModifyVpcEndpointInput) (req *requ // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcEndpoint func (c *EC2) ModifyVpcEndpoint(input *ModifyVpcEndpointInput) (*ModifyVpcEndpointOutput, error) { req, out := c.ModifyVpcEndpointRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ModifyVpcEndpointWithContext is the same as ModifyVpcEndpoint with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpcEndpoint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpcEndpointWithContext(ctx aws.Context, input *ModifyVpcEndpointInput, opts ...request.Option) (*ModifyVpcEndpointOutput, error) { + req, out := c.ModifyVpcEndpointRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opModifyVpcPeeringConnectionOptions = "ModifyVpcPeeringConnectionOptions" // ModifyVpcPeeringConnectionOptionsRequest generates a "aws/request.Request" representing the // client's request for the ModifyVpcPeeringConnectionOptions operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ModifyVpcPeeringConnectionOptions for usage and error information. +// See ModifyVpcPeeringConnectionOptions for more information on using the ModifyVpcPeeringConnectionOptions +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ModifyVpcPeeringConnectionOptions method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ModifyVpcPeeringConnectionOptionsRequest method. // req, resp := client.ModifyVpcPeeringConnectionOptionsRequest(params) @@ -13009,27 +17249,41 @@ func (c *EC2) ModifyVpcPeeringConnectionOptionsRequest(input *ModifyVpcPeeringCo // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcPeeringConnectionOptions func (c *EC2) ModifyVpcPeeringConnectionOptions(input *ModifyVpcPeeringConnectionOptionsInput) (*ModifyVpcPeeringConnectionOptionsOutput, error) { req, out := c.ModifyVpcPeeringConnectionOptionsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ModifyVpcPeeringConnectionOptionsWithContext is the same as ModifyVpcPeeringConnectionOptions with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpcPeeringConnectionOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpcPeeringConnectionOptionsWithContext(ctx aws.Context, input *ModifyVpcPeeringConnectionOptionsInput, opts ...request.Option) (*ModifyVpcPeeringConnectionOptionsOutput, error) { + req, out := c.ModifyVpcPeeringConnectionOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opMonitorInstances = "MonitorInstances" // MonitorInstancesRequest generates a "aws/request.Request" representing the // client's request for the MonitorInstances operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See MonitorInstances for usage and error information. +// See MonitorInstances for more information on using the MonitorInstances +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the MonitorInstances method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the MonitorInstancesRequest method. // req, resp := client.MonitorInstancesRequest(params) @@ -13074,27 +17328,41 @@ func (c *EC2) MonitorInstancesRequest(input *MonitorInstancesInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/MonitorInstances func (c *EC2) MonitorInstances(input *MonitorInstancesInput) (*MonitorInstancesOutput, error) { req, out := c.MonitorInstancesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// MonitorInstancesWithContext is the same as MonitorInstances with the addition of +// the ability to pass a context and additional request options. +// +// See MonitorInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) MonitorInstancesWithContext(ctx aws.Context, input *MonitorInstancesInput, opts ...request.Option) (*MonitorInstancesOutput, error) { + req, out := c.MonitorInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opMoveAddressToVpc = "MoveAddressToVpc" // MoveAddressToVpcRequest generates a "aws/request.Request" representing the // client's request for the MoveAddressToVpc operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See MoveAddressToVpc for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the MoveAddressToVpc method directly -// instead. +// See MoveAddressToVpc for more information on using the MoveAddressToVpc +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the MoveAddressToVpcRequest method. // req, resp := client.MoveAddressToVpcRequest(params) @@ -13140,27 +17408,41 @@ func (c *EC2) MoveAddressToVpcRequest(input *MoveAddressToVpcInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/MoveAddressToVpc func (c *EC2) MoveAddressToVpc(input *MoveAddressToVpcInput) (*MoveAddressToVpcOutput, error) { req, out := c.MoveAddressToVpcRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// MoveAddressToVpcWithContext is the same as MoveAddressToVpc with the addition of +// the ability to pass a context and additional request options. +// +// See MoveAddressToVpc for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) MoveAddressToVpcWithContext(ctx aws.Context, input *MoveAddressToVpcInput, opts ...request.Option) (*MoveAddressToVpcOutput, error) { + req, out := c.MoveAddressToVpcRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPurchaseHostReservation = "PurchaseHostReservation" // PurchaseHostReservationRequest generates a "aws/request.Request" representing the // client's request for the PurchaseHostReservation operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PurchaseHostReservation for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PurchaseHostReservation method directly -// instead. +// See PurchaseHostReservation for more information on using the PurchaseHostReservation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PurchaseHostReservationRequest method. // req, resp := client.PurchaseHostReservationRequest(params) @@ -13203,27 +17485,41 @@ func (c *EC2) PurchaseHostReservationRequest(input *PurchaseHostReservationInput // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PurchaseHostReservation func (c *EC2) PurchaseHostReservation(input *PurchaseHostReservationInput) (*PurchaseHostReservationOutput, error) { req, out := c.PurchaseHostReservationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PurchaseHostReservationWithContext is the same as PurchaseHostReservation with the addition of +// the ability to pass a context and additional request options. +// +// See PurchaseHostReservation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) PurchaseHostReservationWithContext(ctx aws.Context, input *PurchaseHostReservationInput, opts ...request.Option) (*PurchaseHostReservationOutput, error) { + req, out := c.PurchaseHostReservationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPurchaseReservedInstancesOffering = "PurchaseReservedInstancesOffering" // PurchaseReservedInstancesOfferingRequest generates a "aws/request.Request" representing the // client's request for the PurchaseReservedInstancesOffering operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PurchaseReservedInstancesOffering for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PurchaseReservedInstancesOffering method directly -// instead. +// See PurchaseReservedInstancesOffering for more information on using the PurchaseReservedInstancesOffering +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PurchaseReservedInstancesOfferingRequest method. // req, resp := client.PurchaseReservedInstancesOfferingRequest(params) @@ -13272,27 +17568,41 @@ func (c *EC2) PurchaseReservedInstancesOfferingRequest(input *PurchaseReservedIn // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PurchaseReservedInstancesOffering func (c *EC2) PurchaseReservedInstancesOffering(input *PurchaseReservedInstancesOfferingInput) (*PurchaseReservedInstancesOfferingOutput, error) { req, out := c.PurchaseReservedInstancesOfferingRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PurchaseReservedInstancesOfferingWithContext is the same as PurchaseReservedInstancesOffering with the addition of +// the ability to pass a context and additional request options. +// +// See PurchaseReservedInstancesOffering for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) PurchaseReservedInstancesOfferingWithContext(ctx aws.Context, input *PurchaseReservedInstancesOfferingInput, opts ...request.Option) (*PurchaseReservedInstancesOfferingOutput, error) { + req, out := c.PurchaseReservedInstancesOfferingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPurchaseScheduledInstances = "PurchaseScheduledInstances" // PurchaseScheduledInstancesRequest generates a "aws/request.Request" representing the // client's request for the PurchaseScheduledInstances operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PurchaseScheduledInstances for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PurchaseScheduledInstances method directly -// instead. +// See PurchaseScheduledInstances for more information on using the PurchaseScheduledInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PurchaseScheduledInstancesRequest method. // req, resp := client.PurchaseScheduledInstancesRequest(params) @@ -13341,27 +17651,41 @@ func (c *EC2) PurchaseScheduledInstancesRequest(input *PurchaseScheduledInstance // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PurchaseScheduledInstances func (c *EC2) PurchaseScheduledInstances(input *PurchaseScheduledInstancesInput) (*PurchaseScheduledInstancesOutput, error) { req, out := c.PurchaseScheduledInstancesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PurchaseScheduledInstancesWithContext is the same as PurchaseScheduledInstances with the addition of +// the ability to pass a context and additional request options. +// +// See PurchaseScheduledInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) PurchaseScheduledInstancesWithContext(ctx aws.Context, input *PurchaseScheduledInstancesInput, opts ...request.Option) (*PurchaseScheduledInstancesOutput, error) { + req, out := c.PurchaseScheduledInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opRebootInstances = "RebootInstances" // RebootInstancesRequest generates a "aws/request.Request" representing the // client's request for the RebootInstances operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See RebootInstances for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RebootInstances method directly -// instead. +// See RebootInstances for more information on using the RebootInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the RebootInstancesRequest method. // req, resp := client.RebootInstancesRequest(params) @@ -13413,27 +17737,41 @@ func (c *EC2) RebootInstancesRequest(input *RebootInstancesInput) (req *request. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RebootInstances func (c *EC2) RebootInstances(input *RebootInstancesInput) (*RebootInstancesOutput, error) { req, out := c.RebootInstancesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// RebootInstancesWithContext is the same as RebootInstances with the addition of +// the ability to pass a context and additional request options. +// +// See RebootInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RebootInstancesWithContext(ctx aws.Context, input *RebootInstancesInput, opts ...request.Option) (*RebootInstancesOutput, error) { + req, out := c.RebootInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opRegisterImage = "RegisterImage" // RegisterImageRequest generates a "aws/request.Request" representing the // client's request for the RegisterImage operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See RegisterImage for usage and error information. +// See RegisterImage for more information on using the RegisterImage +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RegisterImage method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the RegisterImageRequest method. // req, resp := client.RegisterImageRequest(params) @@ -13471,31 +17809,27 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // in a single request, so you don't have to register the AMI yourself. // // You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from -// a snapshot of a root device volume. For more information, see Launching an -// Instance from a Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_LaunchingInstanceFromSnapshot.html) +// a snapshot of a root device volume. You specify the snapshot using the block +// device mapping. For more information, see Launching a Linux Instance from +// a Backup (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-launch-snapshot.html) // in the Amazon Elastic Compute Cloud User Guide. // +// You can't register an image where a secondary (non-root) snapshot has AWS +// Marketplace product codes. +// // Some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE -// Linux Enterprise Server (SLES), use the EC2 billingProduct code associated -// with an AMI to verify subscription status for package updates. Creating an -// AMI from an EBS snapshot does not maintain this billing code, and subsequent +// Linux Enterprise Server (SLES), use the EC2 billing product code associated +// with an AMI to verify the subscription status for package updates. Creating +// an AMI from an EBS snapshot does not maintain this billing code, and subsequent // instances launched from such an AMI will not be able to connect to package -// update infrastructure. -// -// Similarly, although you can create a Windows AMI from a snapshot, you can't -// successfully launch an instance from the AMI. -// -// To create Windows AMIs or to create AMIs for Linux operating systems that -// must retain AMI billing codes to work properly, see CreateImage. +// update infrastructure. To create an AMI that must retain billing codes, see +// CreateImage. // // If needed, you can deregister an AMI at any time. Any modifications you make // to an AMI backed by an instance store volume invalidates its registration. // If you make changes to an image, deregister the previous image and register // the new image. // -// You can't register an image where a secondary (non-root) snapshot has AWS -// Marketplace product codes. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -13505,27 +17839,41 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RegisterImage func (c *EC2) RegisterImage(input *RegisterImageInput) (*RegisterImageOutput, error) { req, out := c.RegisterImageRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// RegisterImageWithContext is the same as RegisterImage with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RegisterImageWithContext(ctx aws.Context, input *RegisterImageInput, opts ...request.Option) (*RegisterImageOutput, error) { + req, out := c.RegisterImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opRejectVpcPeeringConnection = "RejectVpcPeeringConnection" // RejectVpcPeeringConnectionRequest generates a "aws/request.Request" representing the // client's request for the RejectVpcPeeringConnection operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See RejectVpcPeeringConnection for usage and error information. +// See RejectVpcPeeringConnection for more information on using the RejectVpcPeeringConnection +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RejectVpcPeeringConnection method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the RejectVpcPeeringConnectionRequest method. // req, resp := client.RejectVpcPeeringConnectionRequest(params) @@ -13569,27 +17917,41 @@ func (c *EC2) RejectVpcPeeringConnectionRequest(input *RejectVpcPeeringConnectio // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RejectVpcPeeringConnection func (c *EC2) RejectVpcPeeringConnection(input *RejectVpcPeeringConnectionInput) (*RejectVpcPeeringConnectionOutput, error) { req, out := c.RejectVpcPeeringConnectionRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// RejectVpcPeeringConnectionWithContext is the same as RejectVpcPeeringConnection with the addition of +// the ability to pass a context and additional request options. +// +// See RejectVpcPeeringConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RejectVpcPeeringConnectionWithContext(ctx aws.Context, input *RejectVpcPeeringConnectionInput, opts ...request.Option) (*RejectVpcPeeringConnectionOutput, error) { + req, out := c.RejectVpcPeeringConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opReleaseAddress = "ReleaseAddress" // ReleaseAddressRequest generates a "aws/request.Request" representing the // client's request for the ReleaseAddress operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ReleaseAddress for usage and error information. +// See ReleaseAddress for more information on using the ReleaseAddress +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ReleaseAddress method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ReleaseAddressRequest method. // req, resp := client.ReleaseAddressRequest(params) @@ -13622,19 +17984,22 @@ func (c *EC2) ReleaseAddressRequest(input *ReleaseAddressInput) (req *request.Re // // Releases the specified Elastic IP address. // -// After releasing an Elastic IP address, it is released to the IP address pool -// and might be unavailable to you. Be sure to update your DNS records and any -// servers or devices that communicate with the address. If you attempt to release -// an Elastic IP address that you already released, you'll get an AuthFailure -// error if the address is already allocated to another AWS account. -// // [EC2-Classic, default VPC] Releasing an Elastic IP address automatically // disassociates it from any instance that it's associated with. To disassociate // an Elastic IP address without releasing it, use DisassociateAddress. // // [Nondefault VPC] You must use DisassociateAddress to disassociate the Elastic -// IP address before you try to release it. Otherwise, Amazon EC2 returns an -// error (InvalidIPAddress.InUse). +// IP address before you can release it. Otherwise, Amazon EC2 returns an error +// (InvalidIPAddress.InUse). +// +// After releasing an Elastic IP address, it is released to the IP address pool. +// Be sure to update your DNS records and any servers or devices that communicate +// with the address. If you attempt to release an Elastic IP address that you +// already released, you'll get an AuthFailure error if the address is already +// allocated to another AWS account. +// +// [EC2-VPC] After you release an Elastic IP address for use in a VPC, you might +// be able to recover it. For more information, see AllocateAddress. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -13645,27 +18010,41 @@ func (c *EC2) ReleaseAddressRequest(input *ReleaseAddressInput) (req *request.Re // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReleaseAddress func (c *EC2) ReleaseAddress(input *ReleaseAddressInput) (*ReleaseAddressOutput, error) { req, out := c.ReleaseAddressRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ReleaseAddressWithContext is the same as ReleaseAddress with the addition of +// the ability to pass a context and additional request options. +// +// See ReleaseAddress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ReleaseAddressWithContext(ctx aws.Context, input *ReleaseAddressInput, opts ...request.Option) (*ReleaseAddressOutput, error) { + req, out := c.ReleaseAddressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opReleaseHosts = "ReleaseHosts" // ReleaseHostsRequest generates a "aws/request.Request" representing the // client's request for the ReleaseHosts operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ReleaseHosts for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ReleaseHosts method directly -// instead. +// See ReleaseHosts for more information on using the ReleaseHosts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ReleaseHostsRequest method. // req, resp := client.ReleaseHostsRequest(params) @@ -13716,27 +18095,120 @@ func (c *EC2) ReleaseHostsRequest(input *ReleaseHostsInput) (req *request.Reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReleaseHosts func (c *EC2) ReleaseHosts(input *ReleaseHostsInput) (*ReleaseHostsOutput, error) { req, out := c.ReleaseHostsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ReleaseHostsWithContext is the same as ReleaseHosts with the addition of +// the ability to pass a context and additional request options. +// +// See ReleaseHosts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ReleaseHostsWithContext(ctx aws.Context, input *ReleaseHostsInput, opts ...request.Option) (*ReleaseHostsOutput, error) { + req, out := c.ReleaseHostsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opReplaceIamInstanceProfileAssociation = "ReplaceIamInstanceProfileAssociation" + +// ReplaceIamInstanceProfileAssociationRequest generates a "aws/request.Request" representing the +// client's request for the ReplaceIamInstanceProfileAssociation operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ReplaceIamInstanceProfileAssociation for more information on using the ReplaceIamInstanceProfileAssociation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ReplaceIamInstanceProfileAssociationRequest method. +// req, resp := client.ReplaceIamInstanceProfileAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceIamInstanceProfileAssociation +func (c *EC2) ReplaceIamInstanceProfileAssociationRequest(input *ReplaceIamInstanceProfileAssociationInput) (req *request.Request, output *ReplaceIamInstanceProfileAssociationOutput) { + op := &request.Operation{ + Name: opReplaceIamInstanceProfileAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReplaceIamInstanceProfileAssociationInput{} + } + + output = &ReplaceIamInstanceProfileAssociationOutput{} + req = c.newRequest(op, input, output) + return +} + +// ReplaceIamInstanceProfileAssociation API operation for Amazon Elastic Compute Cloud. +// +// Replaces an IAM instance profile for the specified running instance. You +// can use this action to change the IAM instance profile that's associated +// with an instance without having to disassociate the existing IAM instance +// profile first. +// +// Use DescribeIamInstanceProfileAssociations to get the association ID. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ReplaceIamInstanceProfileAssociation for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceIamInstanceProfileAssociation +func (c *EC2) ReplaceIamInstanceProfileAssociation(input *ReplaceIamInstanceProfileAssociationInput) (*ReplaceIamInstanceProfileAssociationOutput, error) { + req, out := c.ReplaceIamInstanceProfileAssociationRequest(input) + return out, req.Send() +} + +// ReplaceIamInstanceProfileAssociationWithContext is the same as ReplaceIamInstanceProfileAssociation with the addition of +// the ability to pass a context and additional request options. +// +// See ReplaceIamInstanceProfileAssociation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ReplaceIamInstanceProfileAssociationWithContext(ctx aws.Context, input *ReplaceIamInstanceProfileAssociationInput, opts ...request.Option) (*ReplaceIamInstanceProfileAssociationOutput, error) { + req, out := c.ReplaceIamInstanceProfileAssociationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opReplaceNetworkAclAssociation = "ReplaceNetworkAclAssociation" // ReplaceNetworkAclAssociationRequest generates a "aws/request.Request" representing the // client's request for the ReplaceNetworkAclAssociation operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ReplaceNetworkAclAssociation for usage and error information. +// See ReplaceNetworkAclAssociation for more information on using the ReplaceNetworkAclAssociation +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ReplaceNetworkAclAssociation method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ReplaceNetworkAclAssociationRequest method. // req, resp := client.ReplaceNetworkAclAssociationRequest(params) @@ -13779,27 +18251,41 @@ func (c *EC2) ReplaceNetworkAclAssociationRequest(input *ReplaceNetworkAclAssoci // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceNetworkAclAssociation func (c *EC2) ReplaceNetworkAclAssociation(input *ReplaceNetworkAclAssociationInput) (*ReplaceNetworkAclAssociationOutput, error) { req, out := c.ReplaceNetworkAclAssociationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ReplaceNetworkAclAssociationWithContext is the same as ReplaceNetworkAclAssociation with the addition of +// the ability to pass a context and additional request options. +// +// See ReplaceNetworkAclAssociation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ReplaceNetworkAclAssociationWithContext(ctx aws.Context, input *ReplaceNetworkAclAssociationInput, opts ...request.Option) (*ReplaceNetworkAclAssociationOutput, error) { + req, out := c.ReplaceNetworkAclAssociationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opReplaceNetworkAclEntry = "ReplaceNetworkAclEntry" // ReplaceNetworkAclEntryRequest generates a "aws/request.Request" representing the // client's request for the ReplaceNetworkAclEntry operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ReplaceNetworkAclEntry for usage and error information. +// See ReplaceNetworkAclEntry for more information on using the ReplaceNetworkAclEntry +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ReplaceNetworkAclEntry method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ReplaceNetworkAclEntryRequest method. // req, resp := client.ReplaceNetworkAclEntryRequest(params) @@ -13843,27 +18329,41 @@ func (c *EC2) ReplaceNetworkAclEntryRequest(input *ReplaceNetworkAclEntryInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceNetworkAclEntry func (c *EC2) ReplaceNetworkAclEntry(input *ReplaceNetworkAclEntryInput) (*ReplaceNetworkAclEntryOutput, error) { req, out := c.ReplaceNetworkAclEntryRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ReplaceNetworkAclEntryWithContext is the same as ReplaceNetworkAclEntry with the addition of +// the ability to pass a context and additional request options. +// +// See ReplaceNetworkAclEntry for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ReplaceNetworkAclEntryWithContext(ctx aws.Context, input *ReplaceNetworkAclEntryInput, opts ...request.Option) (*ReplaceNetworkAclEntryOutput, error) { + req, out := c.ReplaceNetworkAclEntryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opReplaceRoute = "ReplaceRoute" // ReplaceRouteRequest generates a "aws/request.Request" representing the // client's request for the ReplaceRoute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ReplaceRoute for usage and error information. +// See ReplaceRoute for more information on using the ReplaceRoute +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ReplaceRoute method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ReplaceRouteRequest method. // req, resp := client.ReplaceRouteRequest(params) @@ -13911,27 +18411,41 @@ func (c *EC2) ReplaceRouteRequest(input *ReplaceRouteInput) (req *request.Reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceRoute func (c *EC2) ReplaceRoute(input *ReplaceRouteInput) (*ReplaceRouteOutput, error) { req, out := c.ReplaceRouteRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ReplaceRouteWithContext is the same as ReplaceRoute with the addition of +// the ability to pass a context and additional request options. +// +// See ReplaceRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ReplaceRouteWithContext(ctx aws.Context, input *ReplaceRouteInput, opts ...request.Option) (*ReplaceRouteOutput, error) { + req, out := c.ReplaceRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opReplaceRouteTableAssociation = "ReplaceRouteTableAssociation" // ReplaceRouteTableAssociationRequest generates a "aws/request.Request" representing the // client's request for the ReplaceRouteTableAssociation operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ReplaceRouteTableAssociation for usage and error information. +// See ReplaceRouteTableAssociation for more information on using the ReplaceRouteTableAssociation +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ReplaceRouteTableAssociation method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ReplaceRouteTableAssociationRequest method. // req, resp := client.ReplaceRouteTableAssociationRequest(params) @@ -13979,27 +18493,41 @@ func (c *EC2) ReplaceRouteTableAssociationRequest(input *ReplaceRouteTableAssoci // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceRouteTableAssociation func (c *EC2) ReplaceRouteTableAssociation(input *ReplaceRouteTableAssociationInput) (*ReplaceRouteTableAssociationOutput, error) { req, out := c.ReplaceRouteTableAssociationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ReplaceRouteTableAssociationWithContext is the same as ReplaceRouteTableAssociation with the addition of +// the ability to pass a context and additional request options. +// +// See ReplaceRouteTableAssociation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ReplaceRouteTableAssociationWithContext(ctx aws.Context, input *ReplaceRouteTableAssociationInput, opts ...request.Option) (*ReplaceRouteTableAssociationOutput, error) { + req, out := c.ReplaceRouteTableAssociationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opReportInstanceStatus = "ReportInstanceStatus" // ReportInstanceStatusRequest generates a "aws/request.Request" representing the // client's request for the ReportInstanceStatus operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ReportInstanceStatus for usage and error information. +// See ReportInstanceStatus for more information on using the ReportInstanceStatus +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ReportInstanceStatus method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ReportInstanceStatusRequest method. // req, resp := client.ReportInstanceStatusRequest(params) @@ -14047,27 +18575,41 @@ func (c *EC2) ReportInstanceStatusRequest(input *ReportInstanceStatusInput) (req // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReportInstanceStatus func (c *EC2) ReportInstanceStatus(input *ReportInstanceStatusInput) (*ReportInstanceStatusOutput, error) { req, out := c.ReportInstanceStatusRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ReportInstanceStatusWithContext is the same as ReportInstanceStatus with the addition of +// the ability to pass a context and additional request options. +// +// See ReportInstanceStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ReportInstanceStatusWithContext(ctx aws.Context, input *ReportInstanceStatusInput, opts ...request.Option) (*ReportInstanceStatusOutput, error) { + req, out := c.ReportInstanceStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opRequestSpotFleet = "RequestSpotFleet" // RequestSpotFleetRequest generates a "aws/request.Request" representing the // client's request for the RequestSpotFleet operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See RequestSpotFleet for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RequestSpotFleet method directly -// instead. +// See RequestSpotFleet for more information on using the RequestSpotFleet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the RequestSpotFleetRequest method. // req, resp := client.RequestSpotFleetRequest(params) @@ -14123,27 +18665,41 @@ func (c *EC2) RequestSpotFleetRequest(input *RequestSpotFleetInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RequestSpotFleet func (c *EC2) RequestSpotFleet(input *RequestSpotFleetInput) (*RequestSpotFleetOutput, error) { req, out := c.RequestSpotFleetRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// RequestSpotFleetWithContext is the same as RequestSpotFleet with the addition of +// the ability to pass a context and additional request options. +// +// See RequestSpotFleet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RequestSpotFleetWithContext(ctx aws.Context, input *RequestSpotFleetInput, opts ...request.Option) (*RequestSpotFleetOutput, error) { + req, out := c.RequestSpotFleetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opRequestSpotInstances = "RequestSpotInstances" // RequestSpotInstancesRequest generates a "aws/request.Request" representing the // client's request for the RequestSpotInstances operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See RequestSpotInstances for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RequestSpotInstances method directly -// instead. +// See RequestSpotInstances for more information on using the RequestSpotInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the RequestSpotInstancesRequest method. // req, resp := client.RequestSpotInstancesRequest(params) @@ -14188,27 +18744,116 @@ func (c *EC2) RequestSpotInstancesRequest(input *RequestSpotInstancesInput) (req // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RequestSpotInstances func (c *EC2) RequestSpotInstances(input *RequestSpotInstancesInput) (*RequestSpotInstancesOutput, error) { req, out := c.RequestSpotInstancesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// RequestSpotInstancesWithContext is the same as RequestSpotInstances with the addition of +// the ability to pass a context and additional request options. +// +// See RequestSpotInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RequestSpotInstancesWithContext(ctx aws.Context, input *RequestSpotInstancesInput, opts ...request.Option) (*RequestSpotInstancesOutput, error) { + req, out := c.RequestSpotInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opResetFpgaImageAttribute = "ResetFpgaImageAttribute" + +// ResetFpgaImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ResetFpgaImageAttribute operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResetFpgaImageAttribute for more information on using the ResetFpgaImageAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResetFpgaImageAttributeRequest method. +// req, resp := client.ResetFpgaImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetFpgaImageAttribute +func (c *EC2) ResetFpgaImageAttributeRequest(input *ResetFpgaImageAttributeInput) (req *request.Request, output *ResetFpgaImageAttributeOutput) { + op := &request.Operation{ + Name: opResetFpgaImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetFpgaImageAttributeInput{} + } + + output = &ResetFpgaImageAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// ResetFpgaImageAttribute API operation for Amazon Elastic Compute Cloud. +// +// Resets the specified attribute of the specified Amazon FPGA Image (AFI) to +// its default value. You can only reset the load permission attribute. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ResetFpgaImageAttribute for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetFpgaImageAttribute +func (c *EC2) ResetFpgaImageAttribute(input *ResetFpgaImageAttributeInput) (*ResetFpgaImageAttributeOutput, error) { + req, out := c.ResetFpgaImageAttributeRequest(input) + return out, req.Send() +} + +// ResetFpgaImageAttributeWithContext is the same as ResetFpgaImageAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ResetFpgaImageAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ResetFpgaImageAttributeWithContext(ctx aws.Context, input *ResetFpgaImageAttributeInput, opts ...request.Option) (*ResetFpgaImageAttributeOutput, error) { + req, out := c.ResetFpgaImageAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opResetImageAttribute = "ResetImageAttribute" // ResetImageAttributeRequest generates a "aws/request.Request" representing the // client's request for the ResetImageAttribute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ResetImageAttribute for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ResetImageAttribute method directly -// instead. +// See ResetImageAttribute for more information on using the ResetImageAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ResetImageAttributeRequest method. // req, resp := client.ResetImageAttributeRequest(params) @@ -14252,27 +18897,41 @@ func (c *EC2) ResetImageAttributeRequest(input *ResetImageAttributeInput) (req * // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetImageAttribute func (c *EC2) ResetImageAttribute(input *ResetImageAttributeInput) (*ResetImageAttributeOutput, error) { req, out := c.ResetImageAttributeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ResetImageAttributeWithContext is the same as ResetImageAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ResetImageAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ResetImageAttributeWithContext(ctx aws.Context, input *ResetImageAttributeInput, opts ...request.Option) (*ResetImageAttributeOutput, error) { + req, out := c.ResetImageAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opResetInstanceAttribute = "ResetInstanceAttribute" // ResetInstanceAttributeRequest generates a "aws/request.Request" representing the // client's request for the ResetInstanceAttribute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ResetInstanceAttribute for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ResetInstanceAttribute method directly -// instead. +// See ResetInstanceAttribute for more information on using the ResetInstanceAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ResetInstanceAttributeRequest method. // req, resp := client.ResetInstanceAttributeRequest(params) @@ -14322,27 +18981,41 @@ func (c *EC2) ResetInstanceAttributeRequest(input *ResetInstanceAttributeInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetInstanceAttribute func (c *EC2) ResetInstanceAttribute(input *ResetInstanceAttributeInput) (*ResetInstanceAttributeOutput, error) { req, out := c.ResetInstanceAttributeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ResetInstanceAttributeWithContext is the same as ResetInstanceAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ResetInstanceAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ResetInstanceAttributeWithContext(ctx aws.Context, input *ResetInstanceAttributeInput, opts ...request.Option) (*ResetInstanceAttributeOutput, error) { + req, out := c.ResetInstanceAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opResetNetworkInterfaceAttribute = "ResetNetworkInterfaceAttribute" // ResetNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the // client's request for the ResetNetworkInterfaceAttribute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ResetNetworkInterfaceAttribute for usage and error information. +// See ResetNetworkInterfaceAttribute for more information on using the ResetNetworkInterfaceAttribute +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ResetNetworkInterfaceAttribute method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ResetNetworkInterfaceAttributeRequest method. // req, resp := client.ResetNetworkInterfaceAttributeRequest(params) @@ -14385,27 +19058,41 @@ func (c *EC2) ResetNetworkInterfaceAttributeRequest(input *ResetNetworkInterface // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetNetworkInterfaceAttribute func (c *EC2) ResetNetworkInterfaceAttribute(input *ResetNetworkInterfaceAttributeInput) (*ResetNetworkInterfaceAttributeOutput, error) { req, out := c.ResetNetworkInterfaceAttributeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ResetNetworkInterfaceAttributeWithContext is the same as ResetNetworkInterfaceAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ResetNetworkInterfaceAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ResetNetworkInterfaceAttributeWithContext(ctx aws.Context, input *ResetNetworkInterfaceAttributeInput, opts ...request.Option) (*ResetNetworkInterfaceAttributeOutput, error) { + req, out := c.ResetNetworkInterfaceAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opResetSnapshotAttribute = "ResetSnapshotAttribute" // ResetSnapshotAttributeRequest generates a "aws/request.Request" representing the // client's request for the ResetSnapshotAttribute operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ResetSnapshotAttribute for usage and error information. +// See ResetSnapshotAttribute for more information on using the ResetSnapshotAttribute +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ResetSnapshotAttribute method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ResetSnapshotAttributeRequest method. // req, resp := client.ResetSnapshotAttributeRequest(params) @@ -14451,27 +19138,41 @@ func (c *EC2) ResetSnapshotAttributeRequest(input *ResetSnapshotAttributeInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetSnapshotAttribute func (c *EC2) ResetSnapshotAttribute(input *ResetSnapshotAttributeInput) (*ResetSnapshotAttributeOutput, error) { req, out := c.ResetSnapshotAttributeRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ResetSnapshotAttributeWithContext is the same as ResetSnapshotAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ResetSnapshotAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ResetSnapshotAttributeWithContext(ctx aws.Context, input *ResetSnapshotAttributeInput, opts ...request.Option) (*ResetSnapshotAttributeOutput, error) { + req, out := c.ResetSnapshotAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opRestoreAddressToClassic = "RestoreAddressToClassic" // RestoreAddressToClassicRequest generates a "aws/request.Request" representing the // client's request for the RestoreAddressToClassic operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See RestoreAddressToClassic for usage and error information. +// See RestoreAddressToClassic for more information on using the RestoreAddressToClassic +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RestoreAddressToClassic method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the RestoreAddressToClassicRequest method. // req, resp := client.RestoreAddressToClassicRequest(params) @@ -14514,27 +19215,41 @@ func (c *EC2) RestoreAddressToClassicRequest(input *RestoreAddressToClassicInput // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RestoreAddressToClassic func (c *EC2) RestoreAddressToClassic(input *RestoreAddressToClassicInput) (*RestoreAddressToClassicOutput, error) { req, out := c.RestoreAddressToClassicRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// RestoreAddressToClassicWithContext is the same as RestoreAddressToClassic with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreAddressToClassic for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RestoreAddressToClassicWithContext(ctx aws.Context, input *RestoreAddressToClassicInput, opts ...request.Option) (*RestoreAddressToClassicOutput, error) { + req, out := c.RestoreAddressToClassicRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opRevokeSecurityGroupEgress = "RevokeSecurityGroupEgress" // RevokeSecurityGroupEgressRequest generates a "aws/request.Request" representing the // client's request for the RevokeSecurityGroupEgress operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See RevokeSecurityGroupEgress for usage and error information. +// See RevokeSecurityGroupEgress for more information on using the RevokeSecurityGroupEgress +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RevokeSecurityGroupEgress method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the RevokeSecurityGroupEgressRequest method. // req, resp := client.RevokeSecurityGroupEgressRequest(params) @@ -14567,13 +19282,14 @@ func (c *EC2) RevokeSecurityGroupEgressRequest(input *RevokeSecurityGroupEgressI // // [EC2-VPC only] Removes one or more egress rules from a security group for // EC2-VPC. This action doesn't apply to security groups for use in EC2-Classic. -// The values that you specify in the revoke request (for example, ports) must -// match the existing rule's values for the rule to be revoked. +// To remove a rule, the values that you specify (for example, ports) must match +// the existing rule's values exactly. // // Each rule consists of the protocol and the IPv4 or IPv6 CIDR range or source // security group. For the TCP and UDP protocols, you must also specify the // destination port or range of ports. For the ICMP protocol, you must also -// specify the ICMP type and code. +// specify the ICMP type and code. If the security group rule has a description, +// you do not have to specify the description to revoke the rule. // // Rule changes are propagated to instances within the security group as quickly // as possible. However, a small delay might occur. @@ -14587,27 +19303,41 @@ func (c *EC2) RevokeSecurityGroupEgressRequest(input *RevokeSecurityGroupEgressI // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RevokeSecurityGroupEgress func (c *EC2) RevokeSecurityGroupEgress(input *RevokeSecurityGroupEgressInput) (*RevokeSecurityGroupEgressOutput, error) { req, out := c.RevokeSecurityGroupEgressRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// RevokeSecurityGroupEgressWithContext is the same as RevokeSecurityGroupEgress with the addition of +// the ability to pass a context and additional request options. +// +// See RevokeSecurityGroupEgress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RevokeSecurityGroupEgressWithContext(ctx aws.Context, input *RevokeSecurityGroupEgressInput, opts ...request.Option) (*RevokeSecurityGroupEgressOutput, error) { + req, out := c.RevokeSecurityGroupEgressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opRevokeSecurityGroupIngress = "RevokeSecurityGroupIngress" // RevokeSecurityGroupIngressRequest generates a "aws/request.Request" representing the // client's request for the RevokeSecurityGroupIngress operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See RevokeSecurityGroupIngress for usage and error information. +// See RevokeSecurityGroupIngress for more information on using the RevokeSecurityGroupIngress +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RevokeSecurityGroupIngress method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the RevokeSecurityGroupIngressRequest method. // req, resp := client.RevokeSecurityGroupIngressRequest(params) @@ -14638,14 +19368,19 @@ func (c *EC2) RevokeSecurityGroupIngressRequest(input *RevokeSecurityGroupIngres // RevokeSecurityGroupIngress API operation for Amazon Elastic Compute Cloud. // -// Removes one or more ingress rules from a security group. The values that -// you specify in the revoke request (for example, ports) must match the existing -// rule's values for the rule to be removed. +// Removes one or more ingress rules from a security group. To remove a rule, +// the values that you specify (for example, ports) must match the existing +// rule's values exactly. +// +// [EC2-Classic security groups only] If the values you specify do not match +// the existing rule's values, no error is returned. Use DescribeSecurityGroups +// to verify that the rule has been removed. // // Each rule consists of the protocol and the CIDR range or source security // group. For the TCP and UDP protocols, you must also specify the destination // port or range of ports. For the ICMP protocol, you must also specify the -// ICMP type and code. +// ICMP type and code. If the security group rule has a description, you do +// not have to specify the description to revoke the rule. // // Rule changes are propagated to instances within the security group as quickly // as possible. However, a small delay might occur. @@ -14659,27 +19394,41 @@ func (c *EC2) RevokeSecurityGroupIngressRequest(input *RevokeSecurityGroupIngres // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RevokeSecurityGroupIngress func (c *EC2) RevokeSecurityGroupIngress(input *RevokeSecurityGroupIngressInput) (*RevokeSecurityGroupIngressOutput, error) { req, out := c.RevokeSecurityGroupIngressRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// RevokeSecurityGroupIngressWithContext is the same as RevokeSecurityGroupIngress with the addition of +// the ability to pass a context and additional request options. +// +// See RevokeSecurityGroupIngress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RevokeSecurityGroupIngressWithContext(ctx aws.Context, input *RevokeSecurityGroupIngressInput, opts ...request.Option) (*RevokeSecurityGroupIngressOutput, error) { + req, out := c.RevokeSecurityGroupIngressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opRunInstances = "RunInstances" // RunInstancesRequest generates a "aws/request.Request" representing the // client's request for the RunInstances operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See RunInstances for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RunInstances method directly -// instead. +// See RunInstances for more information on using the RunInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the RunInstancesRequest method. // req, resp := client.RunInstancesRequest(params) @@ -14730,7 +19479,7 @@ func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Reques // IPv4 range of your subnet. // // * Not all instance types support IPv6 addresses. For more information, -// see Amazon EC2 Instance Types (http://aws.amazon.com/ec2/instance-types/). +// see Instance Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). // // * If you don't specify a security group ID, we use the default security // group. For more information, see Security Groups (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html). @@ -14739,13 +19488,13 @@ func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Reques // not subscribed, the request fails. // // To ensure faster instance launches, break up large requests into smaller -// batches. For example, create 5 separate launch requests for 100 instances -// each instead of 1 launch request for 500 instances. +// batches. For example, create five separate launch requests for 100 instances +// each instead of one launch request for 500 instances. // // An instance is ready for you to use when it's in the running state. You can -// check the state of your instance using DescribeInstances. After launch, you -// can apply tags to your running instance (requires a resource ID). For more -// information, see CreateTags and Tagging Your Amazon EC2 Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). +// check the state of your instance using DescribeInstances. You can tag instances +// and EBS volumes during launch, after launch, or both. For more information, +// see CreateTags and Tagging Your Amazon EC2 Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). // // Linux instances have access to the public key of the key pair at boot. You // can use this key to provide secure access to the instance. Amazon EC2 public @@ -14767,27 +19516,41 @@ func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RunInstances func (c *EC2) RunInstances(input *RunInstancesInput) (*Reservation, error) { req, out := c.RunInstancesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// RunInstancesWithContext is the same as RunInstances with the addition of +// the ability to pass a context and additional request options. +// +// See RunInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RunInstancesWithContext(ctx aws.Context, input *RunInstancesInput, opts ...request.Option) (*Reservation, error) { + req, out := c.RunInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opRunScheduledInstances = "RunScheduledInstances" // RunScheduledInstancesRequest generates a "aws/request.Request" representing the // client's request for the RunScheduledInstances operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See RunScheduledInstances for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RunScheduledInstances method directly -// instead. +// See RunScheduledInstances for more information on using the RunScheduledInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the RunScheduledInstancesRequest method. // req, resp := client.RunScheduledInstancesRequest(params) @@ -14837,27 +19600,41 @@ func (c *EC2) RunScheduledInstancesRequest(input *RunScheduledInstancesInput) (r // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RunScheduledInstances func (c *EC2) RunScheduledInstances(input *RunScheduledInstancesInput) (*RunScheduledInstancesOutput, error) { req, out := c.RunScheduledInstancesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// RunScheduledInstancesWithContext is the same as RunScheduledInstances with the addition of +// the ability to pass a context and additional request options. +// +// See RunScheduledInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RunScheduledInstancesWithContext(ctx aws.Context, input *RunScheduledInstancesInput, opts ...request.Option) (*RunScheduledInstancesOutput, error) { + req, out := c.RunScheduledInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opStartInstances = "StartInstances" // StartInstancesRequest generates a "aws/request.Request" representing the // client's request for the StartInstances operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See StartInstances for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the StartInstances method directly -// instead. +// See StartInstances for more information on using the StartInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the StartInstancesRequest method. // req, resp := client.StartInstancesRequest(params) @@ -14886,16 +19663,20 @@ func (c *EC2) StartInstancesRequest(input *StartInstancesInput) (req *request.Re // StartInstances API operation for Amazon Elastic Compute Cloud. // -// Starts an Amazon EBS-backed AMI that you've previously stopped. +// Starts an Amazon EBS-backed instance that you've previously stopped. // // Instances that use Amazon EBS volumes as their root devices can be quickly // stopped and started. When an instance is stopped, the compute resources are -// released and you are not billed for hourly instance usage. However, your -// root partition Amazon EBS volume remains, continues to persist your data, -// and you are charged for Amazon EBS volume usage. You can restart your instance -// at any time. Each time you transition an instance from stopped to started, -// Amazon EC2 charges a full instance hour, even if transitions happen multiple -// times within a single hour. +// released and you are not billed for instance usage. However, your root partition +// Amazon EBS volume remains and continues to persist your data, and you are +// charged for Amazon EBS volume usage. You can restart your instance at any +// time. Every time you start your Windows instance, Amazon EC2 charges you +// for a full instance hour. If you stop and restart your Windows instance, +// a new instance hour begins and Amazon EC2 charges you for another full instance +// hour even if you are still within the same 60-minute period when it was stopped. +// Every time you start your Linux instance, Amazon EC2 charges a one-minute +// minimum for instance usage, and thereafter charges per second for instance +// usage. // // Before stopping an instance, make sure it is in a state from which it can // be restarted. Stopping an instance does not preserve data stored in RAM. @@ -14915,27 +19696,41 @@ func (c *EC2) StartInstancesRequest(input *StartInstancesInput) (req *request.Re // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/StartInstances func (c *EC2) StartInstances(input *StartInstancesInput) (*StartInstancesOutput, error) { req, out := c.StartInstancesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// StartInstancesWithContext is the same as StartInstances with the addition of +// the ability to pass a context and additional request options. +// +// See StartInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) StartInstancesWithContext(ctx aws.Context, input *StartInstancesInput, opts ...request.Option) (*StartInstancesOutput, error) { + req, out := c.StartInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opStopInstances = "StopInstances" // StopInstancesRequest generates a "aws/request.Request" representing the // client's request for the StopInstances operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See StopInstances for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the StopInstances method directly -// instead. +// See StopInstances for more information on using the StopInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the StopInstancesRequest method. // req, resp := client.StopInstancesRequest(params) @@ -14966,14 +19761,17 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ // // Stops an Amazon EBS-backed instance. // -// We don't charge hourly usage for a stopped instance, or data transfer fees; -// however, your root partition Amazon EBS volume remains, continues to persist -// your data, and you are charged for Amazon EBS volume usage. Each time you -// transition an instance from stopped to started, Amazon EC2 charges a full -// instance hour, even if transitions happen multiple times within a single -// hour. -// -// You can't start or stop Spot instances, and you can't stop instance store-backed +// We don't charge usage for a stopped instance, or data transfer fees; however, +// your root partition Amazon EBS volume remains and continues to persist your +// data, and you are charged for Amazon EBS volume usage. Every time you start +// your Windows instance, Amazon EC2 charges you for a full instance hour. If +// you stop and restart your Windows instance, a new instance hour begins and +// Amazon EC2 charges you for another full instance hour even if you are still +// within the same 60-minute period when it was stopped. Every time you start +// your Linux instance, Amazon EC2 charges a one-minute minimum for instance +// usage, and thereafter charges per second for instance usage. +// +// You can't start or stop Spot Instances, and you can't stop instance store-backed // instances. // // When you stop an instance, we shut it down. You can restart your instance @@ -15004,27 +19802,41 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/StopInstances func (c *EC2) StopInstances(input *StopInstancesInput) (*StopInstancesOutput, error) { req, out := c.StopInstancesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// StopInstancesWithContext is the same as StopInstances with the addition of +// the ability to pass a context and additional request options. +// +// See StopInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) StopInstancesWithContext(ctx aws.Context, input *StopInstancesInput, opts ...request.Option) (*StopInstancesOutput, error) { + req, out := c.StopInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opTerminateInstances = "TerminateInstances" // TerminateInstancesRequest generates a "aws/request.Request" representing the // client's request for the TerminateInstances operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See TerminateInstances for usage and error information. +// See TerminateInstances for more information on using the TerminateInstances +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the TerminateInstances method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the TerminateInstancesRequest method. // req, resp := client.TerminateInstancesRequest(params) @@ -15088,27 +19900,41 @@ func (c *EC2) TerminateInstancesRequest(input *TerminateInstancesInput) (req *re // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/TerminateInstances func (c *EC2) TerminateInstances(input *TerminateInstancesInput) (*TerminateInstancesOutput, error) { req, out := c.TerminateInstancesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// TerminateInstancesWithContext is the same as TerminateInstances with the addition of +// the ability to pass a context and additional request options. +// +// See TerminateInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) TerminateInstancesWithContext(ctx aws.Context, input *TerminateInstancesInput, opts ...request.Option) (*TerminateInstancesOutput, error) { + req, out := c.TerminateInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opUnassignIpv6Addresses = "UnassignIpv6Addresses" // UnassignIpv6AddressesRequest generates a "aws/request.Request" representing the // client's request for the UnassignIpv6Addresses operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See UnassignIpv6Addresses for usage and error information. +// See UnassignIpv6Addresses for more information on using the UnassignIpv6Addresses +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UnassignIpv6Addresses method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the UnassignIpv6AddressesRequest method. // req, resp := client.UnassignIpv6AddressesRequest(params) @@ -15148,27 +19974,41 @@ func (c *EC2) UnassignIpv6AddressesRequest(input *UnassignIpv6AddressesInput) (r // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnassignIpv6Addresses func (c *EC2) UnassignIpv6Addresses(input *UnassignIpv6AddressesInput) (*UnassignIpv6AddressesOutput, error) { req, out := c.UnassignIpv6AddressesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// UnassignIpv6AddressesWithContext is the same as UnassignIpv6Addresses with the addition of +// the ability to pass a context and additional request options. +// +// See UnassignIpv6Addresses for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) UnassignIpv6AddressesWithContext(ctx aws.Context, input *UnassignIpv6AddressesInput, opts ...request.Option) (*UnassignIpv6AddressesOutput, error) { + req, out := c.UnassignIpv6AddressesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opUnassignPrivateIpAddresses = "UnassignPrivateIpAddresses" // UnassignPrivateIpAddressesRequest generates a "aws/request.Request" representing the // client's request for the UnassignPrivateIpAddresses operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See UnassignPrivateIpAddresses for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UnassignPrivateIpAddresses method directly -// instead. +// See UnassignPrivateIpAddresses for more information on using the UnassignPrivateIpAddresses +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the UnassignPrivateIpAddressesRequest method. // req, resp := client.UnassignPrivateIpAddressesRequest(params) @@ -15210,27 +20050,41 @@ func (c *EC2) UnassignPrivateIpAddressesRequest(input *UnassignPrivateIpAddresse // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnassignPrivateIpAddresses func (c *EC2) UnassignPrivateIpAddresses(input *UnassignPrivateIpAddressesInput) (*UnassignPrivateIpAddressesOutput, error) { req, out := c.UnassignPrivateIpAddressesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// UnassignPrivateIpAddressesWithContext is the same as UnassignPrivateIpAddresses with the addition of +// the ability to pass a context and additional request options. +// +// See UnassignPrivateIpAddresses for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) UnassignPrivateIpAddressesWithContext(ctx aws.Context, input *UnassignPrivateIpAddressesInput, opts ...request.Option) (*UnassignPrivateIpAddressesOutput, error) { + req, out := c.UnassignPrivateIpAddressesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opUnmonitorInstances = "UnmonitorInstances" // UnmonitorInstancesRequest generates a "aws/request.Request" representing the // client's request for the UnmonitorInstances operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See UnmonitorInstances for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UnmonitorInstances method directly -// instead. +// See UnmonitorInstances for more information on using the UnmonitorInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the UnmonitorInstancesRequest method. // req, resp := client.UnmonitorInstancesRequest(params) @@ -15272,8 +20126,183 @@ func (c *EC2) UnmonitorInstancesRequest(input *UnmonitorInstancesInput) (req *re // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UnmonitorInstances func (c *EC2) UnmonitorInstances(input *UnmonitorInstancesInput) (*UnmonitorInstancesOutput, error) { req, out := c.UnmonitorInstancesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// UnmonitorInstancesWithContext is the same as UnmonitorInstances with the addition of +// the ability to pass a context and additional request options. +// +// See UnmonitorInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) UnmonitorInstancesWithContext(ctx aws.Context, input *UnmonitorInstancesInput, opts ...request.Option) (*UnmonitorInstancesOutput, error) { + req, out := c.UnmonitorInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSecurityGroupRuleDescriptionsEgress = "UpdateSecurityGroupRuleDescriptionsEgress" + +// UpdateSecurityGroupRuleDescriptionsEgressRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSecurityGroupRuleDescriptionsEgress operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSecurityGroupRuleDescriptionsEgress for more information on using the UpdateSecurityGroupRuleDescriptionsEgress +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSecurityGroupRuleDescriptionsEgressRequest method. +// req, resp := client.UpdateSecurityGroupRuleDescriptionsEgressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UpdateSecurityGroupRuleDescriptionsEgress +func (c *EC2) UpdateSecurityGroupRuleDescriptionsEgressRequest(input *UpdateSecurityGroupRuleDescriptionsEgressInput) (req *request.Request, output *UpdateSecurityGroupRuleDescriptionsEgressOutput) { + op := &request.Operation{ + Name: opUpdateSecurityGroupRuleDescriptionsEgress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSecurityGroupRuleDescriptionsEgressInput{} + } + + output = &UpdateSecurityGroupRuleDescriptionsEgressOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateSecurityGroupRuleDescriptionsEgress API operation for Amazon Elastic Compute Cloud. +// +// [EC2-VPC only] Updates the description of an egress (outbound) security group +// rule. You can replace an existing description, or add a description to a +// rule that did not have one previously. +// +// You specify the description as part of the IP permissions structure. You +// can remove a description for a security group rule by omitting the description +// parameter in the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation UpdateSecurityGroupRuleDescriptionsEgress for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UpdateSecurityGroupRuleDescriptionsEgress +func (c *EC2) UpdateSecurityGroupRuleDescriptionsEgress(input *UpdateSecurityGroupRuleDescriptionsEgressInput) (*UpdateSecurityGroupRuleDescriptionsEgressOutput, error) { + req, out := c.UpdateSecurityGroupRuleDescriptionsEgressRequest(input) + return out, req.Send() +} + +// UpdateSecurityGroupRuleDescriptionsEgressWithContext is the same as UpdateSecurityGroupRuleDescriptionsEgress with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSecurityGroupRuleDescriptionsEgress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) UpdateSecurityGroupRuleDescriptionsEgressWithContext(ctx aws.Context, input *UpdateSecurityGroupRuleDescriptionsEgressInput, opts ...request.Option) (*UpdateSecurityGroupRuleDescriptionsEgressOutput, error) { + req, out := c.UpdateSecurityGroupRuleDescriptionsEgressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSecurityGroupRuleDescriptionsIngress = "UpdateSecurityGroupRuleDescriptionsIngress" + +// UpdateSecurityGroupRuleDescriptionsIngressRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSecurityGroupRuleDescriptionsIngress operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSecurityGroupRuleDescriptionsIngress for more information on using the UpdateSecurityGroupRuleDescriptionsIngress +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSecurityGroupRuleDescriptionsIngressRequest method. +// req, resp := client.UpdateSecurityGroupRuleDescriptionsIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UpdateSecurityGroupRuleDescriptionsIngress +func (c *EC2) UpdateSecurityGroupRuleDescriptionsIngressRequest(input *UpdateSecurityGroupRuleDescriptionsIngressInput) (req *request.Request, output *UpdateSecurityGroupRuleDescriptionsIngressOutput) { + op := &request.Operation{ + Name: opUpdateSecurityGroupRuleDescriptionsIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSecurityGroupRuleDescriptionsIngressInput{} + } + + output = &UpdateSecurityGroupRuleDescriptionsIngressOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateSecurityGroupRuleDescriptionsIngress API operation for Amazon Elastic Compute Cloud. +// +// Updates the description of an ingress (inbound) security group rule. You +// can replace an existing description, or add a description to a rule that +// did not have one previously. +// +// You specify the description as part of the IP permissions structure. You +// can remove a description for a security group rule by omitting the description +// parameter in the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation UpdateSecurityGroupRuleDescriptionsIngress for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UpdateSecurityGroupRuleDescriptionsIngress +func (c *EC2) UpdateSecurityGroupRuleDescriptionsIngress(input *UpdateSecurityGroupRuleDescriptionsIngressInput) (*UpdateSecurityGroupRuleDescriptionsIngressOutput, error) { + req, out := c.UpdateSecurityGroupRuleDescriptionsIngressRequest(input) + return out, req.Send() +} + +// UpdateSecurityGroupRuleDescriptionsIngressWithContext is the same as UpdateSecurityGroupRuleDescriptionsIngress with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSecurityGroupRuleDescriptionsIngress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) UpdateSecurityGroupRuleDescriptionsIngressWithContext(ctx aws.Context, input *UpdateSecurityGroupRuleDescriptionsIngressInput, opts ...request.Option) (*UpdateSecurityGroupRuleDescriptionsIngressOutput, error) { + req, out := c.UpdateSecurityGroupRuleDescriptionsIngressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // Contains the parameters for accepting the quote. @@ -15500,6 +20529,11 @@ func (s *AccountAttributeValue) SetAttributeValue(v string) *AccountAttributeVal type ActiveInstance struct { _ struct{} `type:"structure"` + // The health status of the instance. If the status of either the instance status + // check or the system status check is impaired, the health status of the instance + // is unhealthy. Otherwise, the health status is healthy. + InstanceHealth *string `locationName:"instanceHealth" type:"string" enum:"InstanceHealthStatus"` + // The ID of the instance. InstanceId *string `locationName:"instanceId" type:"string"` @@ -15520,6 +20554,12 @@ func (s ActiveInstance) GoString() string { return s.String() } +// SetInstanceHealth sets the InstanceHealth field's value. +func (s *ActiveInstance) SetInstanceHealth(v string) *ActiveInstance { + s.InstanceHealth = &v + return s +} + // SetInstanceId sets the InstanceId field's value. func (s *ActiveInstance) SetInstanceId(v string) *ActiveInstance { s.InstanceId = &v @@ -15633,6 +20673,9 @@ func (s *Address) SetPublicIp(v string) *Address { type AllocateAddressInput struct { _ struct{} `type:"structure"` + // [EC2-VPC] The Elastic IP address to recover. + Address *string `type:"string"` + // Set to vpc to allocate the address for use with instances in a VPC. // // Default: The address is for use with instances in EC2-Classic. @@ -15655,6 +20698,12 @@ func (s AllocateAddressInput) GoString() string { return s.String() } +// SetAddress sets the Address field's value. +func (s *AllocateAddressInput) SetAddress(v string) *AllocateAddressInput { + s.Address = &v + return s +} + // SetDomain sets the Domain field's value. func (s *AllocateAddressInput) SetDomain(v string) *AllocateAddressInput { s.Domain = &v @@ -16216,6 +21265,83 @@ func (s AssociateDhcpOptionsOutput) GoString() string { return s.String() } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateIamInstanceProfileRequest +type AssociateIamInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The IAM instance profile. + // + // IamInstanceProfile is a required field + IamInstanceProfile *IamInstanceProfileSpecification `type:"structure" required:"true"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateIamInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateIamInstanceProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateIamInstanceProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateIamInstanceProfileInput"} + if s.IamInstanceProfile == nil { + invalidParams.Add(request.NewErrParamRequired("IamInstanceProfile")) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIamInstanceProfile sets the IamInstanceProfile field's value. +func (s *AssociateIamInstanceProfileInput) SetIamInstanceProfile(v *IamInstanceProfileSpecification) *AssociateIamInstanceProfileInput { + s.IamInstanceProfile = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *AssociateIamInstanceProfileInput) SetInstanceId(v string) *AssociateIamInstanceProfileInput { + s.InstanceId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateIamInstanceProfileResult +type AssociateIamInstanceProfileOutput struct { + _ struct{} `type:"structure"` + + // Information about the IAM instance profile association. + IamInstanceProfileAssociation *IamInstanceProfileAssociation `locationName:"iamInstanceProfileAssociation" type:"structure"` +} + +// String returns the string representation +func (s AssociateIamInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateIamInstanceProfileOutput) GoString() string { + return s.String() +} + +// SetIamInstanceProfileAssociation sets the IamInstanceProfileAssociation field's value. +func (s *AssociateIamInstanceProfileOutput) SetIamInstanceProfileAssociation(v *IamInstanceProfileAssociation) *AssociateIamInstanceProfileOutput { + s.IamInstanceProfileAssociation = v + return s +} + // Contains the parameters for AssociateRouteTable. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateRouteTableRequest type AssociateRouteTableInput struct { @@ -16402,6 +21528,9 @@ type AssociateVpcCidrBlockInput struct { // CIDR block. AmazonProvidedIpv6CidrBlock *bool `locationName:"amazonProvidedIpv6CidrBlock" type:"boolean"` + // An IPv4 CIDR block to associate with the VPC. + CidrBlock *string `type:"string"` + // The ID of the VPC. // // VpcId is a required field @@ -16437,6 +21566,12 @@ func (s *AssociateVpcCidrBlockInput) SetAmazonProvidedIpv6CidrBlock(v bool) *Ass return s } +// SetCidrBlock sets the CidrBlock field's value. +func (s *AssociateVpcCidrBlockInput) SetCidrBlock(v string) *AssociateVpcCidrBlockInput { + s.CidrBlock = &v + return s +} + // SetVpcId sets the VpcId field's value. func (s *AssociateVpcCidrBlockInput) SetVpcId(v string) *AssociateVpcCidrBlockInput { s.VpcId = &v @@ -16447,6 +21582,9 @@ func (s *AssociateVpcCidrBlockInput) SetVpcId(v string) *AssociateVpcCidrBlockIn type AssociateVpcCidrBlockOutput struct { _ struct{} `type:"structure"` + // Information about the IPv4 CIDR block association. + CidrBlockAssociation *VpcCidrBlockAssociation `locationName:"cidrBlockAssociation" type:"structure"` + // Information about the IPv6 CIDR block association. Ipv6CidrBlockAssociation *VpcIpv6CidrBlockAssociation `locationName:"ipv6CidrBlockAssociation" type:"structure"` @@ -16464,6 +21602,12 @@ func (s AssociateVpcCidrBlockOutput) GoString() string { return s.String() } +// SetCidrBlockAssociation sets the CidrBlockAssociation field's value. +func (s *AssociateVpcCidrBlockOutput) SetCidrBlockAssociation(v *VpcCidrBlockAssociation) *AssociateVpcCidrBlockOutput { + s.CidrBlockAssociation = v + return s +} + // SetIpv6CidrBlockAssociation sets the Ipv6CidrBlockAssociation field's value. func (s *AssociateVpcCidrBlockOutput) SetIpv6CidrBlockAssociation(v *VpcIpv6CidrBlockAssociation) *AssociateVpcCidrBlockOutput { s.Ipv6CidrBlockAssociation = v @@ -17146,12 +22290,16 @@ type AuthorizeSecurityGroupIngressInput struct { // The start of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 // type number. For the ICMP/ICMPv6 type number, use -1 to specify all types. + // If you specify all ICMP/ICMPv6 types, you must specify all codes. FromPort *int64 `type:"integer"` - // The ID of the security group. Required for a nondefault VPC. + // The ID of the security group. You must specify either the security group + // ID or the security group name in the request. For security groups in a nondefault + // VPC, you must specify the security group ID. GroupId *string `type:"string"` - // [EC2-Classic, default VPC] The name of the security group. + // [EC2-Classic, default VPC] The name of the security group. You must specify + // either the security group ID or the security group name in the request. GroupName *string `type:"string"` // A set of IP permissions. Can be used to specify multiple rules in a single @@ -17184,7 +22332,8 @@ type AuthorizeSecurityGroupIngressInput struct { SourceSecurityGroupOwnerId *string `type:"string"` // The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code - // number. For the ICMP/ICMPv6 code number, use -1 to specify all codes. + // number. For the ICMP/ICMPv6 code number, use -1 to specify all codes. If + // you specify all ICMP/ICMPv6 types, you must specify all codes. ToPort *int64 `type:"integer"` } @@ -18391,6 +23540,31 @@ func (s *CancelledSpotInstanceRequest) SetState(v string) *CancelledSpotInstance return s } +// Describes an IPv4 CIDR block. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CidrBlock +type CidrBlock struct { + _ struct{} `type:"structure"` + + // The IPv4 CIDR block. + CidrBlock *string `locationName:"cidrBlock" type:"string"` +} + +// String returns the string representation +func (s CidrBlock) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CidrBlock) GoString() string { + return s.String() +} + +// SetCidrBlock sets the CidrBlock field's value. +func (s *CidrBlock) SetCidrBlock(v string) *CidrBlock { + s.CidrBlock = &v + return s +} + // Describes the ClassicLink DNS support status of a VPC. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ClassicLinkDnsSupport type ClassicLinkDnsSupport struct { @@ -18717,6 +23891,123 @@ func (s *ConversionTask) SetTags(v []*Tag) *ConversionTask { return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyFpgaImageRequest +type CopyFpgaImageInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // The description for the new AFI. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The name for the new AFI. The default is the name of the source AFI. + Name *string `type:"string"` + + // The ID of the source AFI. + // + // SourceFpgaImageId is a required field + SourceFpgaImageId *string `type:"string" required:"true"` + + // The region that contains the source AFI. + // + // SourceRegion is a required field + SourceRegion *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyFpgaImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyFpgaImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyFpgaImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyFpgaImageInput"} + if s.SourceFpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("SourceFpgaImageId")) + } + if s.SourceRegion == nil { + invalidParams.Add(request.NewErrParamRequired("SourceRegion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CopyFpgaImageInput) SetClientToken(v string) *CopyFpgaImageInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CopyFpgaImageInput) SetDescription(v string) *CopyFpgaImageInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CopyFpgaImageInput) SetDryRun(v bool) *CopyFpgaImageInput { + s.DryRun = &v + return s +} + +// SetName sets the Name field's value. +func (s *CopyFpgaImageInput) SetName(v string) *CopyFpgaImageInput { + s.Name = &v + return s +} + +// SetSourceFpgaImageId sets the SourceFpgaImageId field's value. +func (s *CopyFpgaImageInput) SetSourceFpgaImageId(v string) *CopyFpgaImageInput { + s.SourceFpgaImageId = &v + return s +} + +// SetSourceRegion sets the SourceRegion field's value. +func (s *CopyFpgaImageInput) SetSourceRegion(v string) *CopyFpgaImageInput { + s.SourceRegion = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyFpgaImageResult +type CopyFpgaImageOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new AFI. + FpgaImageId *string `locationName:"fpgaImageId" type:"string"` +} + +// String returns the string representation +func (s CopyFpgaImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyFpgaImageOutput) GoString() string { + return s.String() +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *CopyFpgaImageOutput) SetFpgaImageId(v string) *CopyFpgaImageOutput { + s.FpgaImageId = &v + return s +} + // Contains the parameters for CopyImage. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyImageRequest type CopyImageInput struct { @@ -19146,6 +24437,59 @@ func (s *CreateCustomerGatewayOutput) SetCustomerGateway(v *CustomerGateway) *Cr return s } +// Contains the parameters for CreateDefaultVpc. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateDefaultVpcRequest +type CreateDefaultVpcInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s CreateDefaultVpcInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDefaultVpcInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateDefaultVpcInput) SetDryRun(v bool) *CreateDefaultVpcInput { + s.DryRun = &v + return s +} + +// Contains the output of CreateDefaultVpc. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateDefaultVpcResult +type CreateDefaultVpcOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPC. + Vpc *Vpc `locationName:"vpc" type:"structure"` +} + +// String returns the string representation +func (s CreateDefaultVpcOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDefaultVpcOutput) GoString() string { + return s.String() +} + +// SetVpc sets the Vpc field's value. +func (s *CreateDefaultVpcOutput) SetVpc(v *Vpc) *CreateDefaultVpcOutput { + s.Vpc = v + return s +} + // Contains the parameters for CreateDhcpOptions. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateDhcpOptionsRequest type CreateDhcpOptionsInput struct { @@ -19471,6 +24815,128 @@ func (s *CreateFlowLogsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *CreateFlo return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateFpgaImageRequest +type CreateFpgaImageInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // A description for the AFI. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The location of the encrypted design checkpoint in Amazon S3. The input must + // be a tarball. + // + // InputStorageLocation is a required field + InputStorageLocation *StorageLocation `type:"structure" required:"true"` + + // The location in Amazon S3 for the output logs. + LogsStorageLocation *StorageLocation `type:"structure"` + + // A name for the AFI. + Name *string `type:"string"` +} + +// String returns the string representation +func (s CreateFpgaImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFpgaImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFpgaImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFpgaImageInput"} + if s.InputStorageLocation == nil { + invalidParams.Add(request.NewErrParamRequired("InputStorageLocation")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateFpgaImageInput) SetClientToken(v string) *CreateFpgaImageInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateFpgaImageInput) SetDescription(v string) *CreateFpgaImageInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateFpgaImageInput) SetDryRun(v bool) *CreateFpgaImageInput { + s.DryRun = &v + return s +} + +// SetInputStorageLocation sets the InputStorageLocation field's value. +func (s *CreateFpgaImageInput) SetInputStorageLocation(v *StorageLocation) *CreateFpgaImageInput { + s.InputStorageLocation = v + return s +} + +// SetLogsStorageLocation sets the LogsStorageLocation field's value. +func (s *CreateFpgaImageInput) SetLogsStorageLocation(v *StorageLocation) *CreateFpgaImageInput { + s.LogsStorageLocation = v + return s +} + +// SetName sets the Name field's value. +func (s *CreateFpgaImageInput) SetName(v string) *CreateFpgaImageInput { + s.Name = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateFpgaImageResult +type CreateFpgaImageOutput struct { + _ struct{} `type:"structure"` + + // The global FPGA image identifier (AGFI ID). + FpgaImageGlobalId *string `locationName:"fpgaImageGlobalId" type:"string"` + + // The FPGA image identifier (AFI ID). + FpgaImageId *string `locationName:"fpgaImageId" type:"string"` +} + +// String returns the string representation +func (s CreateFpgaImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFpgaImageOutput) GoString() string { + return s.String() +} + +// SetFpgaImageGlobalId sets the FpgaImageGlobalId field's value. +func (s *CreateFpgaImageOutput) SetFpgaImageGlobalId(v string) *CreateFpgaImageOutput { + s.FpgaImageGlobalId = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *CreateFpgaImageOutput) SetFpgaImageId(v string) *CreateFpgaImageOutput { + s.FpgaImageId = &v + return s +} + // Contains the parameters for CreateImage. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateImageRequest type CreateImageInput struct { @@ -20357,6 +25823,115 @@ func (s *CreateNetworkInterfaceOutput) SetNetworkInterface(v *NetworkInterface) return s } +// Contains the parameters for CreateNetworkInterfacePermission. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkInterfacePermissionRequest +type CreateNetworkInterfacePermissionInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + AwsAccountId *string `type:"string"` + + // The AWS service. Currently not supported. + AwsService *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the network interface. + // + // NetworkInterfaceId is a required field + NetworkInterfaceId *string `type:"string" required:"true"` + + // The type of permission to grant. + // + // Permission is a required field + Permission *string `type:"string" required:"true" enum:"InterfacePermissionType"` +} + +// String returns the string representation +func (s CreateNetworkInterfacePermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkInterfacePermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateNetworkInterfacePermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateNetworkInterfacePermissionInput"} + if s.NetworkInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfaceId")) + } + if s.Permission == nil { + invalidParams.Add(request.NewErrParamRequired("Permission")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CreateNetworkInterfacePermissionInput) SetAwsAccountId(v string) *CreateNetworkInterfacePermissionInput { + s.AwsAccountId = &v + return s +} + +// SetAwsService sets the AwsService field's value. +func (s *CreateNetworkInterfacePermissionInput) SetAwsService(v string) *CreateNetworkInterfacePermissionInput { + s.AwsService = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateNetworkInterfacePermissionInput) SetDryRun(v bool) *CreateNetworkInterfacePermissionInput { + s.DryRun = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *CreateNetworkInterfacePermissionInput) SetNetworkInterfaceId(v string) *CreateNetworkInterfacePermissionInput { + s.NetworkInterfaceId = &v + return s +} + +// SetPermission sets the Permission field's value. +func (s *CreateNetworkInterfacePermissionInput) SetPermission(v string) *CreateNetworkInterfacePermissionInput { + s.Permission = &v + return s +} + +// Contains the output of CreateNetworkInterfacePermission. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateNetworkInterfacePermissionResult +type CreateNetworkInterfacePermissionOutput struct { + _ struct{} `type:"structure"` + + // Information about the permission for the network interface. + InterfacePermission *NetworkInterfacePermission `locationName:"interfacePermission" type:"structure"` +} + +// String returns the string representation +func (s CreateNetworkInterfacePermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNetworkInterfacePermissionOutput) GoString() string { + return s.String() +} + +// SetInterfacePermission sets the InterfacePermission field's value. +func (s *CreateNetworkInterfacePermissionOutput) SetInterfacePermission(v *NetworkInterfacePermission) *CreateNetworkInterfacePermissionOutput { + s.InterfacePermission = v + return s +} + // Contains the parameters for CreatePlacementGroup. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreatePlacementGroupRequest type CreatePlacementGroupInput struct { @@ -21293,6 +26868,9 @@ type CreateVolumeInput struct { // The snapshot from which to create the volume. SnapshotId *string `type:"string"` + // The tags to apply to the volume during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned // IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard // for Magnetic volumes. @@ -21366,6 +26944,12 @@ func (s *CreateVolumeInput) SetSnapshotId(v string) *CreateVolumeInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVolumeInput) SetTagSpecifications(v []*TagSpecification) *CreateVolumeInput { + s.TagSpecifications = v + return s +} + // SetVolumeType sets the VolumeType field's value. func (s *CreateVolumeInput) SetVolumeType(v string) *CreateVolumeInput { s.VolumeType = &v @@ -21781,11 +27365,7 @@ type CreateVpnConnectionInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // Indicates whether the VPN connection requires static routes. If you are creating - // a VPN connection for a device that does not support BGP, you must specify - // true. - // - // Default: false + // The options for the VPN connection. Options *VpnConnectionOptionsSpecification `locationName:"options" type:"structure"` // The type of VPN connection (ipsec.1). @@ -22384,6 +27964,81 @@ func (s *DeleteFlowLogsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *DeleteFlo return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFpgaImageRequest +type DeleteFpgaImageInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AFI. + // + // FpgaImageId is a required field + FpgaImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteFpgaImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFpgaImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFpgaImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFpgaImageInput"} + if s.FpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("FpgaImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteFpgaImageInput) SetDryRun(v bool) *DeleteFpgaImageInput { + s.DryRun = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *DeleteFpgaImageInput) SetFpgaImageId(v string) *DeleteFpgaImageInput { + s.FpgaImageId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFpgaImageResult +type DeleteFpgaImageOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DeleteFpgaImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFpgaImageOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *DeleteFpgaImageOutput) SetReturn(v bool) *DeleteFpgaImageOutput { + s.Return = &v + return s +} + // Contains the parameters for DeleteInternetGateway. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteInternetGatewayRequest type DeleteInternetGatewayInput struct { @@ -22812,6 +28467,93 @@ func (s DeleteNetworkInterfaceOutput) GoString() string { return s.String() } +// Contains the parameters for DeleteNetworkInterfacePermission. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkInterfacePermissionRequest +type DeleteNetworkInterfacePermissionInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Specify true to remove the permission even if the network interface is attached + // to an instance. + Force *bool `type:"boolean"` + + // The ID of the network interface permission. + // + // NetworkInterfacePermissionId is a required field + NetworkInterfacePermissionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNetworkInterfacePermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkInterfacePermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteNetworkInterfacePermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteNetworkInterfacePermissionInput"} + if s.NetworkInterfacePermissionId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkInterfacePermissionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteNetworkInterfacePermissionInput) SetDryRun(v bool) *DeleteNetworkInterfacePermissionInput { + s.DryRun = &v + return s +} + +// SetForce sets the Force field's value. +func (s *DeleteNetworkInterfacePermissionInput) SetForce(v bool) *DeleteNetworkInterfacePermissionInput { + s.Force = &v + return s +} + +// SetNetworkInterfacePermissionId sets the NetworkInterfacePermissionId field's value. +func (s *DeleteNetworkInterfacePermissionInput) SetNetworkInterfacePermissionId(v string) *DeleteNetworkInterfacePermissionInput { + s.NetworkInterfacePermissionId = &v + return s +} + +// Contains the output for DeleteNetworkInterfacePermission. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteNetworkInterfacePermissionResult +type DeleteNetworkInterfacePermissionOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds, otherwise returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DeleteNetworkInterfacePermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteNetworkInterfacePermissionOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *DeleteNetworkInterfacePermissionOutput) SetReturn(v bool) *DeleteNetworkInterfacePermissionOutput { + s.Return = &v + return s +} + // Contains the parameters for DeletePlacementGroup. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeletePlacementGroupRequest type DeletePlacementGroupInput struct { @@ -23283,15 +29025,17 @@ type DeleteTagsInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // The ID of the resource. For example, ami-1a2b3c4d. You can specify more than - // one resource ID. + // The IDs of one or more resources. // // Resources is a required field Resources []*string `locationName:"resourceId" type:"list" required:"true"` - // One or more tags to delete. If you omit the value parameter, we delete the - // tag regardless of its value. If you specify this parameter with an empty - // string as the value, we delete the key only if its value is an empty string. + // One or more tags to delete. If you omit this parameter, we delete all tags + // for the specified resources. Specify a tag key and an optional tag value + // to delete specific tags. If you specify a tag key without a tag value, we + // delete any tag with this key regardless of its value. If you specify a tag + // key with an empty string as the tag value, we delete the tag only if its + // value is an empty string. Tags []*Tag `locationName:"tag" locationNameList:"item" type:"list"` } @@ -24472,6 +30216,9 @@ type DescribeCustomerGatewaysInput struct { // is ipsec.1. // // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. // // * tag-key - The key of a tag assigned to the resource. This filter is // independent of the tag-value filter. For example, if you use both the @@ -24564,6 +30311,9 @@ type DescribeDhcpOptionsInput struct { // * value - The value for one of the options. // // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. // // * tag-key - The key of a tag assigned to the resource. This filter is // independent of the tag-value filter. For example, if you use both the @@ -24721,6 +30471,126 @@ func (s *DescribeEgressOnlyInternetGatewaysOutput) SetNextToken(v string) *Descr return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeElasticGpusRequest +type DescribeElasticGpusInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more Elastic GPU IDs. + ElasticGpuIds []*string `locationName:"ElasticGpuId" locationNameList:"item" type:"list"` + + // One or more filters. + // + // * availability-zone - The Availability Zone in which the Elastic GPU resides. + // + // * elastic-gpu-health - The status of the Elastic GPU (OK | IMPAIRED). + // + // * elastic-gpu-state - The state of the Elastic GPU (ATTACHED). + // + // * elastic-gpu-type - The type of Elastic GPU; for example, eg1.medium. + // + // * instance-id - The ID of the instance to which the Elastic GPU is associated. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. This + // value can be between 5 and 1000. + MaxResults *int64 `type:"integer"` + + // The token to request the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeElasticGpusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticGpusInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeElasticGpusInput) SetDryRun(v bool) *DescribeElasticGpusInput { + s.DryRun = &v + return s +} + +// SetElasticGpuIds sets the ElasticGpuIds field's value. +func (s *DescribeElasticGpusInput) SetElasticGpuIds(v []*string) *DescribeElasticGpusInput { + s.ElasticGpuIds = v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeElasticGpusInput) SetFilters(v []*Filter) *DescribeElasticGpusInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeElasticGpusInput) SetMaxResults(v int64) *DescribeElasticGpusInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeElasticGpusInput) SetNextToken(v string) *DescribeElasticGpusInput { + s.NextToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeElasticGpusResult +type DescribeElasticGpusOutput struct { + _ struct{} `type:"structure"` + + // Information about the Elastic GPUs. + ElasticGpuSet []*ElasticGpus `locationName:"elasticGpuSet" locationNameList:"item" type:"list"` + + // The total number of items to return. If the total number of items available + // is more than the value specified in max-items then a Next-Token will be provided + // in the output that you can use to resume pagination. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeElasticGpusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeElasticGpusOutput) GoString() string { + return s.String() +} + +// SetElasticGpuSet sets the ElasticGpuSet field's value. +func (s *DescribeElasticGpusOutput) SetElasticGpuSet(v []*ElasticGpus) *DescribeElasticGpusOutput { + s.ElasticGpuSet = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeElasticGpusOutput) SetMaxResults(v int64) *DescribeElasticGpusOutput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeElasticGpusOutput) SetNextToken(v string) *DescribeElasticGpusOutput { + s.NextToken = &v + return s +} + // Contains the parameters for DescribeExportTasks. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportTasksRequest type DescribeExportTasksInput struct { @@ -24872,6 +30742,253 @@ func (s *DescribeFlowLogsOutput) SetNextToken(v string) *DescribeFlowLogsOutput return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImageAttributeRequest +type DescribeFpgaImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The AFI attribute. + // + // Attribute is a required field + Attribute *string `type:"string" required:"true" enum:"FpgaImageAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AFI. + // + // FpgaImageId is a required field + FpgaImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeFpgaImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFpgaImageAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFpgaImageAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFpgaImageAttributeInput"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.FpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("FpgaImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *DescribeFpgaImageAttributeInput) SetAttribute(v string) *DescribeFpgaImageAttributeInput { + s.Attribute = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeFpgaImageAttributeInput) SetDryRun(v bool) *DescribeFpgaImageAttributeInput { + s.DryRun = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *DescribeFpgaImageAttributeInput) SetFpgaImageId(v string) *DescribeFpgaImageAttributeInput { + s.FpgaImageId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImageAttributeResult +type DescribeFpgaImageAttributeOutput struct { + _ struct{} `type:"structure"` + + // Information about the attribute. + FpgaImageAttribute *FpgaImageAttribute `locationName:"fpgaImageAttribute" type:"structure"` +} + +// String returns the string representation +func (s DescribeFpgaImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFpgaImageAttributeOutput) GoString() string { + return s.String() +} + +// SetFpgaImageAttribute sets the FpgaImageAttribute field's value. +func (s *DescribeFpgaImageAttributeOutput) SetFpgaImageAttribute(v *FpgaImageAttribute) *DescribeFpgaImageAttributeOutput { + s.FpgaImageAttribute = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImagesRequest +type DescribeFpgaImagesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * create-time - The creation time of the AFI. + // + // * fpga-image-id - The FPGA image identifier (AFI ID). + // + // * fpga-image-global-id - The global FPGA image identifier (AGFI ID). + // + // * name - The name of the AFI. + // + // * owner-id - The AWS account ID of the AFI owner. + // + // * product-code - The product code. + // + // * shell-version - The version of the AWS Shell that was used to create + // the bitstream. + // + // * state - The state of the AFI (pending | failed | available | unavailable). + // + // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. + // + // * tag-key - The key of a tag assigned to the resource. This filter is + // independent of the tag-value filter. For example, if you use both the + // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources + // assigned both the tag key Purpose (regardless of what the tag's value + // is), and the tag value X (regardless of what the tag's key is). If you + // want to list only resources where Purpose is X, see the tag:key=value + // filter. + // + // * tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + // + // * update-time - The time of the most recent update. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // One or more AFI IDs. + FpgaImageIds []*string `locationName:"FpgaImageId" locationNameList:"item" type:"list"` + + // The maximum number of results to return in a single call. + MaxResults *int64 `min:"5" type:"integer"` + + // The token to retrieve the next page of results. + NextToken *string `min:"1" type:"string"` + + // Filters the AFI by owner. Specify an AWS account ID, self (owner is the sender + // of the request), or an AWS owner alias (valid values are amazon | aws-marketplace). + Owners []*string `locationName:"Owner" locationNameList:"Owner" type:"list"` +} + +// String returns the string representation +func (s DescribeFpgaImagesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFpgaImagesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFpgaImagesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFpgaImagesInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeFpgaImagesInput) SetDryRun(v bool) *DescribeFpgaImagesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeFpgaImagesInput) SetFilters(v []*Filter) *DescribeFpgaImagesInput { + s.Filters = v + return s +} + +// SetFpgaImageIds sets the FpgaImageIds field's value. +func (s *DescribeFpgaImagesInput) SetFpgaImageIds(v []*string) *DescribeFpgaImagesInput { + s.FpgaImageIds = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeFpgaImagesInput) SetMaxResults(v int64) *DescribeFpgaImagesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFpgaImagesInput) SetNextToken(v string) *DescribeFpgaImagesInput { + s.NextToken = &v + return s +} + +// SetOwners sets the Owners field's value. +func (s *DescribeFpgaImagesInput) SetOwners(v []*string) *DescribeFpgaImagesInput { + s.Owners = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImagesResult +type DescribeFpgaImagesOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more FPGA images. + FpgaImages []*FpgaImage `locationName:"fpgaImageSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFpgaImagesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFpgaImagesOutput) GoString() string { + return s.String() +} + +// SetFpgaImages sets the FpgaImages field's value. +func (s *DescribeFpgaImagesOutput) SetFpgaImages(v []*FpgaImage) *DescribeFpgaImagesOutput { + s.FpgaImages = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFpgaImagesOutput) SetNextToken(v string) *DescribeFpgaImagesOutput { + s.NextToken = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHostReservationOfferingsRequest type DescribeHostReservationOfferingsInput struct { _ struct{} `type:"structure"` @@ -24880,8 +30997,7 @@ type DescribeHostReservationOfferingsInput struct { // // * instance-family - The instance family of the offering (e.g., m4). // - // * payment-option - The payment option (No Upfront | Partial Upfront | - // All Upfront). + // * payment-option - The payment option (NoUpfront | PartialUpfront | AllUpfront). Filter []*Filter `locationNameList:"Filter" type:"list"` // This is the maximum duration of the reservation you'd like to purchase, specified @@ -24966,7 +31082,7 @@ type DescribeHostReservationOfferingsOutput struct { NextToken *string `locationName:"nextToken" type:"string"` // Information about the offerings. - OfferingSet []*HostOffering `locationName:"offeringSet" type:"list"` + OfferingSet []*HostOffering `locationName:"offeringSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -24999,8 +31115,7 @@ type DescribeHostReservationsInput struct { // // * instance-family - The instance family (e.g., m4). // - // * payment-option - The payment option (No Upfront | Partial Upfront | - // All Upfront). + // * payment-option - The payment option (NoUpfront | PartialUpfront | AllUpfront). // // * state - The state of the reservation (payment-pending | payment-failed // | active | retired). @@ -25058,7 +31173,7 @@ type DescribeHostReservationsOutput struct { _ struct{} `type:"structure"` // Details about the reservation's configuration. - HostReservationSet []*HostReservation `locationName:"hostReservationSet" type:"list"` + HostReservationSet []*HostReservation `locationName:"hostReservationSet" locationNameList:"item" type:"list"` // The token to use to retrieve the next page of results. This value is null // when there are no more results to return. @@ -25194,6 +31309,113 @@ func (s *DescribeHostsOutput) SetNextToken(v string) *DescribeHostsOutput { return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIamInstanceProfileAssociationsRequest +type DescribeIamInstanceProfileAssociationsInput struct { + _ struct{} `type:"structure"` + + // One or more IAM instance profile associations. + AssociationIds []*string `locationName:"AssociationId" locationNameList:"AssociationId" type:"list"` + + // One or more filters. + // + // * instance-id - The ID of the instance. + // + // * state - The state of the association (associating | associated | disassociating + // | disassociated). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token to request the next page of results. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeIamInstanceProfileAssociationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIamInstanceProfileAssociationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeIamInstanceProfileAssociationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeIamInstanceProfileAssociationsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssociationIds sets the AssociationIds field's value. +func (s *DescribeIamInstanceProfileAssociationsInput) SetAssociationIds(v []*string) *DescribeIamInstanceProfileAssociationsInput { + s.AssociationIds = v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeIamInstanceProfileAssociationsInput) SetFilters(v []*Filter) *DescribeIamInstanceProfileAssociationsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeIamInstanceProfileAssociationsInput) SetMaxResults(v int64) *DescribeIamInstanceProfileAssociationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeIamInstanceProfileAssociationsInput) SetNextToken(v string) *DescribeIamInstanceProfileAssociationsInput { + s.NextToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIamInstanceProfileAssociationsResult +type DescribeIamInstanceProfileAssociationsOutput struct { + _ struct{} `type:"structure"` + + // Information about one or more IAM instance profile associations. + IamInstanceProfileAssociations []*IamInstanceProfileAssociation `locationName:"iamInstanceProfileAssociationSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeIamInstanceProfileAssociationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIamInstanceProfileAssociationsOutput) GoString() string { + return s.String() +} + +// SetIamInstanceProfileAssociations sets the IamInstanceProfileAssociations field's value. +func (s *DescribeIamInstanceProfileAssociationsOutput) SetIamInstanceProfileAssociations(v []*IamInstanceProfileAssociation) *DescribeIamInstanceProfileAssociationsOutput { + s.IamInstanceProfileAssociations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeIamInstanceProfileAssociationsOutput) SetNextToken(v string) *DescribeIamInstanceProfileAssociationsOutput { + s.NextToken = &v + return s +} + // Contains the parameters for DescribeIdFormat. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIdFormatRequest type DescribeIdFormatInput struct { @@ -25557,6 +31779,9 @@ type DescribeImagesInput struct { // * state-reason-message - The message for the state change. // // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. // // * tag-key - The key of a tag assigned to the resource. This filter is // independent of the tag-value filter. For example, if you use both the @@ -25932,7 +32157,7 @@ type DescribeInstanceAttributeOutput struct { // EC2 console, CLI, or API; otherwise, you can. DisableApiTermination *AttributeBooleanValue `locationName:"disableApiTermination" type:"structure"` - // Indicates whether the instance is optimized for EBS I/O. + // Indicates whether the instance is optimized for Amazon EBS I/O. EbsOptimized *AttributeBooleanValue `locationName:"ebsOptimized" type:"structure"` // Indicates whether enhanced networking with ENA is enabled. @@ -25964,8 +32189,8 @@ type DescribeInstanceAttributeOutput struct { RootDeviceName *AttributeValue `locationName:"rootDeviceName" type:"structure"` // Indicates whether source/destination checking is enabled. A value of true - // means checking is enabled, and false means checking is disabled. This value - // must be false for a NAT instance to perform NAT. + // means that checking is enabled, and false means that checking is disabled. + // This value must be false for a NAT instance to perform NAT. SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` // Indicates whether enhanced networking with the Intel 82599 Virtual Function @@ -26246,18 +32471,6 @@ type DescribeInstancesInput struct { // // * architecture - The instance architecture (i386 | x86_64). // - // * association.public-ip - The address of the Elastic IP address (IPv4) - // bound to the network interface. - // - // * association.ip-owner-id - The owner of the Elastic IP address (IPv4) - // associated with the network interface. - // - // * association.allocation-id - The allocation ID returned when you allocated - // the Elastic IP address (IPv4) for your network interface. - // - // * association.association-id - The association ID returned when the network - // interface was associated with an IPv4 address. - // // * availability-zone - The Availability Zone of the instance. // // * block-device-mapping.attach-time - The attach time for an EBS volume @@ -26343,6 +32556,18 @@ type DescribeInstancesInput struct { // * network-interface.addresses.association.ip-owner-id - The owner ID of // the private IPv4 address associated with the network interface. // + // * network-interface.association.public-ip - The address of the Elastic + // IP address (IPv4) bound to the network interface. + // + // * network-interface.association.ip-owner-id - The owner of the Elastic + // IP address (IPv4) associated with the network interface. + // + // * network-interface.association.allocation-id - The allocation ID returned + // when you allocated the Elastic IP address (IPv4) for your network interface. + // + // * network-interface.association.association-id - The association ID returned + // when the network interface was associated with an IPv4 address. + // // * network-interface.attachment.attachment-id - The ID of the interface // attachment. // @@ -26396,10 +32621,10 @@ type DescribeInstancesInput struct { // | in-use). // // * network-interface.source-dest-check - Whether the network interface - // performs source/destination checking. A value of true means checking is - // enabled, and false means checking is disabled. The value must be false - // for the network interface to perform network address translation (NAT) - // in your VPC. + // performs source/destination checking. A value of true means that checking + // is enabled, and false means that checking is disabled. The value must + // be false for the network interface to perform network address translation + // (NAT) in your VPC. // // * network-interface.subnet-id - The ID of the subnet for the network interface. // @@ -26434,9 +32659,9 @@ type DescribeInstancesInput struct { // ID is created any time you launch an instance. A reservation ID has a // one-to-one relationship with an instance launch request, but can be associated // with more than one instance if you launch multiple instances using the - // same launch request. For example, if you launch one instance, you'll get + // same launch request. For example, if you launch one instance, you get // one reservation ID. If you launch ten instances using the same launch - // request, you'll also get one reservation ID. + // request, you also get one reservation ID. // // * root-device-name - The name of the root device for the instance (for // example, /dev/sda1 or /dev/xvda). @@ -26446,10 +32671,10 @@ type DescribeInstancesInput struct { // // * source-dest-check - Indicates whether the instance performs source/destination // checking. A value of true means that checking is enabled, and false means - // checking is disabled. The value must be false for the instance to perform - // network address translation (NAT) in your VPC. + // that checking is disabled. The value must be false for the instance to + // perform network address translation (NAT) in your VPC. // - // * spot-instance-request-id - The ID of the Spot instance request. + // * spot-instance-request-id - The ID of the Spot Instance request. // // * state-reason-code - The reason code for the state change. // @@ -26457,16 +32682,17 @@ type DescribeInstancesInput struct { // // * subnet-id - The ID of the subnet for the instance. // - // * tag:key=value - The key/value combination of a tag assigned to the resource, - // where tag:key is the tag's key. + // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. // // * tag-key - The key of a tag assigned to the resource. This filter is // independent of the tag-value filter. For example, if you use both the // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. + // is), and the tag value X (regardless of the tag's key). If you want to + // list only resources where Purpose is X, see the tag:key=value filter. // // * tag-value - The value of a tag assigned to the resource. This filter // is independent of the tag-key filter. @@ -26590,6 +32816,9 @@ type DescribeInternetGatewaysInput struct { // * internet-gateway-id - The ID of the Internet gateway. // // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. // // * tag-key - The key of a tag assigned to the resource. This filter is // independent of the tag-value filter. For example, if you use both the @@ -26860,6 +33089,22 @@ type DescribeNatGatewaysInput struct { // // * subnet-id - The ID of the subnet in which the NAT gateway resides. // + // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. + // + // * tag-key - The key of a tag assigned to the resource. This filter is + // independent of the tag-value filter. For example, if you use both the + // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources + // assigned both the tag key Purpose (regardless of what the tag's value + // is), and the tag value X (regardless of what the tag's key is). If you + // want to list only resources where Purpose is X, see the tag:key=value + // filter. + // + // * tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. + // // * vpc-id - The ID of the VPC in which the NAT gateway resides. Filter []*Filter `locationNameList:"Filter" type:"list"` @@ -26997,6 +33242,9 @@ type DescribeNetworkAclsInput struct { // * network-acl-id - The ID of the network ACL. // // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. // // * tag-key - The key of a tag assigned to the resource. This filter is // independent of the tag-value filter. For example, if you use both the @@ -27076,7 +33324,7 @@ func (s *DescribeNetworkAclsOutput) SetNetworkAcls(v []*NetworkAcl) *DescribeNet type DescribeNetworkInterfaceAttributeInput struct { _ struct{} `type:"structure"` - // The attribute of the network interface. + // The attribute of the network interface. This parameter is required. Attribute *string `locationName:"attribute" type:"string" enum:"NetworkInterfaceAttribute"` // Checks whether you have the required permissions for the action, without @@ -27193,6 +33441,107 @@ func (s *DescribeNetworkInterfaceAttributeOutput) SetSourceDestCheck(v *Attribut return s } +// Contains the parameters for DescribeNetworkInterfacePermissions. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfacePermissionsRequest +type DescribeNetworkInterfacePermissionsInput struct { + _ struct{} `type:"structure"` + + // One or more filters. + // + // * network-interface-permission.network-interface-permission-id - The ID + // of the permission. + // + // * network-interface-permission.network-interface-id - The ID of the network + // interface. + // + // * network-interface-permission.aws-account-id - The AWS account ID. + // + // * network-interface-permission.aws-service - The AWS service. + // + // * network-interface-permission.permission - The type of permission (INSTANCE-ATTACH + // | EIP-ASSOCIATE). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. If + // this parameter is not specified, up to 50 results are returned by default. + MaxResults *int64 `type:"integer"` + + // One or more network interface permission IDs. + NetworkInterfacePermissionIds []*string `locationName:"NetworkInterfacePermissionId" type:"list"` + + // The token to request the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeNetworkInterfacePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfacePermissionsInput) GoString() string { + return s.String() +} + +// SetFilters sets the Filters field's value. +func (s *DescribeNetworkInterfacePermissionsInput) SetFilters(v []*Filter) *DescribeNetworkInterfacePermissionsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeNetworkInterfacePermissionsInput) SetMaxResults(v int64) *DescribeNetworkInterfacePermissionsInput { + s.MaxResults = &v + return s +} + +// SetNetworkInterfacePermissionIds sets the NetworkInterfacePermissionIds field's value. +func (s *DescribeNetworkInterfacePermissionsInput) SetNetworkInterfacePermissionIds(v []*string) *DescribeNetworkInterfacePermissionsInput { + s.NetworkInterfacePermissionIds = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeNetworkInterfacePermissionsInput) SetNextToken(v string) *DescribeNetworkInterfacePermissionsInput { + s.NextToken = &v + return s +} + +// Contains the output for DescribeNetworkInterfacePermissions. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfacePermissionsResult +type DescribeNetworkInterfacePermissionsOutput struct { + _ struct{} `type:"structure"` + + // The network interface permissions. + NetworkInterfacePermissions []*NetworkInterfacePermission `locationName:"networkInterfacePermissions" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeNetworkInterfacePermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNetworkInterfacePermissionsOutput) GoString() string { + return s.String() +} + +// SetNetworkInterfacePermissions sets the NetworkInterfacePermissions field's value. +func (s *DescribeNetworkInterfacePermissionsOutput) SetNetworkInterfacePermissions(v []*NetworkInterfacePermission) *DescribeNetworkInterfacePermissionsOutput { + s.NetworkInterfacePermissions = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeNetworkInterfacePermissionsOutput) SetNextToken(v string) *DescribeNetworkInterfacePermissionsOutput { + s.NextToken = &v + return s +} + // Contains the parameters for DescribeNetworkInterfaces. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeNetworkInterfacesRequest type DescribeNetworkInterfacesInput struct { @@ -27299,6 +33648,9 @@ type DescribeNetworkInterfacesInput struct { // * subnet-id - The ID of the subnet for the network interface. // // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. // // * tag-key - The key of a tag assigned to the resource. This filter is // independent of the tag-value filter. For example, if you use both the @@ -27684,6 +34036,9 @@ type DescribeReservedInstancesInput struct { // | payment-failed | retired). // // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. // // * tag-key - The key of a tag assigned to the resource. This filter is // independent of the tag-value filter. For example, if you use both the @@ -28000,6 +34355,9 @@ type DescribeReservedInstancesOfferingsInput struct { // with a tenancy of dedicated is applied to instances that run in a VPC on // single-tenant hardware (i.e., Dedicated Instances). // + // Important: The host value cannot be used with this parameter. Use the default + // or dedicated values only. + // // Default: default InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` @@ -28231,7 +34589,8 @@ type DescribeRouteTablesInput struct { // * association.subnet-id - The ID of the subnet involved in the association. // // * association.main - Indicates whether the route table is the main route - // table for the VPC (true | false). + // table for the VPC (true | false). Route tables that do not have an association + // ID are not returned in the response. // // * route-table-id - The ID of the route table. // @@ -28269,6 +34628,9 @@ type DescribeRouteTablesInput struct { // specified in a route in the table. // // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. // // * tag-key - The key of a tag assigned to the resource. This filter is // independent of the tag-value filter. For example, if you use both the @@ -28958,7 +35320,7 @@ type DescribeSnapshotsInput struct { // // * owner-alias - Value from an Amazon-maintained list (amazon | aws-marketplace // | microsoft) of snapshot owners. Not to be confused with the user-configured - // AWS account alias, which is set from the IAM consolew. + // AWS account alias, which is set from the IAM console. // // * owner-id - The ID of the AWS account that owns the snapshot. // @@ -28972,6 +35334,9 @@ type DescribeSnapshotsInput struct { // * status - The status of the snapshot (pending | completed | error). // // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. // // * tag-key - The key of a tag assigned to the resource. This filter is // independent of the tag-value filter. For example, if you use both the @@ -29596,34 +35961,34 @@ type DescribeSpotInstanceRequestsInput struct { // // * launch.key-name - The name of the key pair the instance launched with. // - // * launch.monitoring-enabled - Whether monitoring is enabled for the Spot - // instance. + // * launch.monitoring-enabled - Whether detailed monitoring is enabled for + // the Spot instance. // // * launch.ramdisk-id - The RAM disk ID. // - // * network-interface.network-interface-id - The ID of the network interface. + // * launched-availability-zone - The Availability Zone in which the bid + // is launched. // - // * network-interface.device-index - The index of the device for the network - // interface attachment on the instance. + // * network-interface.addresses.primary - Indicates whether the IP address + // is the primary private IP address. // - // * network-interface.subnet-id - The ID of the subnet for the instance. + // * network-interface.delete-on-termination - Indicates whether the network + // interface is deleted when the instance is terminated. // // * network-interface.description - A description of the network interface. // - // * network-interface.private-ip-address - The primary private IP address - // of the network interface. - // - // * network-interface.delete-on-termination - Indicates whether the network - // interface is deleted when the instance is terminated. + // * network-interface.device-index - The index of the device for the network + // interface attachment on the instance. // // * network-interface.group-id - The ID of the security group associated // with the network interface. // - // * network-interface.group-name - The name of the security group associated - // with the network interface. + // * network-interface.network-interface-id - The ID of the network interface. // - // * network-interface.addresses.primary - Indicates whether the IP address - // is the primary private IP address. + // * network-interface.private-ip-address - The primary private IP address + // of the network interface. + // + // * network-interface.subnet-id - The ID of the subnet for the instance. // // * product-description - The product description associated with the instance // (Linux/UNIX | Windows). @@ -29646,6 +36011,9 @@ type DescribeSpotInstanceRequestsInput struct { // request. // // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. // // * tag-key - The key of a tag assigned to the resource. This filter is // independent of the tag-value filter. For example, if you use both the @@ -29660,9 +36028,6 @@ type DescribeSpotInstanceRequestsInput struct { // // * type - The type of Spot instance request (one-time | persistent). // - // * launched-availability-zone - The Availability Zone in which the bid - // is launched. - // // * valid-from - The start date of the request. // // * valid-until - The end date of the request. @@ -30033,6 +36398,9 @@ type DescribeSubnetsInput struct { // * subnet-id - The ID of the subnet. // // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. // // * tag-key - The key of a tag assigned to the resource. This filter is // independent of the tag-value filter. For example, if you use both the @@ -30215,7 +36583,7 @@ func (s *DescribeTagsOutput) SetTags(v []*TagDescription) *DescribeTagsOutput { type DescribeVolumeAttributeInput struct { _ struct{} `type:"structure"` - // The instance attribute. + // The attribute of the volume. This parameter is required. Attribute *string `type:"string" enum:"VolumeAttributeName"` // Checks whether you have the required permissions for the action, without @@ -30496,6 +36864,9 @@ type DescribeVolumesInput struct { // | deleted | error). // // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. // // * tag-key - The key of a tag assigned to the resource. This filter is // independent of the tag-value filter. For example, if you use both the @@ -30520,10 +36891,10 @@ type DescribeVolumesInput struct { // results in a single page along with a NextToken response element. The remaining // results of the initial request can be seen by sending another DescribeVolumes // request with the returned NextToken value. This value can be between 5 and - // 1000; if MaxResults is given a value larger than 1000, only 1000 results - // are returned. If this parameter is not used, then DescribeVolumes returns - // all results. You cannot specify this parameter and the volume IDs parameter - // in the same request. + // 500; if MaxResults is given a value larger than 500, only 500 results are + // returned. If this parameter is not used, then DescribeVolumes returns all + // results. You cannot specify this parameter and the volume IDs parameter in + // the same request. MaxResults *int64 `locationName:"maxResults" type:"integer"` // The NextToken value returned from a previous paginated DescribeVolumes request @@ -30576,6 +36947,105 @@ func (s *DescribeVolumesInput) SetVolumeIds(v []*string) *DescribeVolumesInput { return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumesModificationsRequest +type DescribeVolumesModificationsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. Supported filters: volume-id, modification-state, target-size, + // target-iops, target-volume-type, original-size, original-iops, original-volume-type, + // start-time. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results (up to a limit of 500) to be returned in a + // paginated request. + MaxResults *int64 `type:"integer"` + + // The nextToken value returned by a previous paginated request. + NextToken *string `type:"string"` + + // One or more volume IDs for which in-progress modifications will be described. + VolumeIds []*string `locationName:"VolumeId" locationNameList:"VolumeId" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumesModificationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesModificationsInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeVolumesModificationsInput) SetDryRun(v bool) *DescribeVolumesModificationsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeVolumesModificationsInput) SetFilters(v []*Filter) *DescribeVolumesModificationsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeVolumesModificationsInput) SetMaxResults(v int64) *DescribeVolumesModificationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVolumesModificationsInput) SetNextToken(v string) *DescribeVolumesModificationsInput { + s.NextToken = &v + return s +} + +// SetVolumeIds sets the VolumeIds field's value. +func (s *DescribeVolumesModificationsInput) SetVolumeIds(v []*string) *DescribeVolumesModificationsInput { + s.VolumeIds = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumesModificationsResult +type DescribeVolumesModificationsOutput struct { + _ struct{} `type:"structure"` + + // Token for pagination, null if there are no more results + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of returned VolumeModification objects. + VolumesModifications []*VolumeModification `locationName:"volumeModificationSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVolumesModificationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVolumesModificationsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeVolumesModificationsOutput) SetNextToken(v string) *DescribeVolumesModificationsOutput { + s.NextToken = &v + return s +} + +// SetVolumesModifications sets the VolumesModifications field's value. +func (s *DescribeVolumesModificationsOutput) SetVolumesModifications(v []*VolumeModification) *DescribeVolumesModificationsOutput { + s.VolumesModifications = v + return s +} + // Contains the output of DescribeVolumes. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeVolumesResult type DescribeVolumesOutput struct { @@ -30839,6 +37309,9 @@ type DescribeVpcClassicLinkInput struct { // (true | false). // // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. // // * tag-key - The key of a tag assigned to the resource. This filter is // independent of the tag-value filter. For example, if you use both the @@ -31145,6 +37618,9 @@ type DescribeVpcPeeringConnectionsInput struct { // status of the VPC peering connection, if applicable. // // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. // // * tag-key - The key of a tag assigned to the resource. This filter is // independent of the tag-value filter. For example, if you use both the @@ -31232,10 +37708,19 @@ type DescribeVpcsInput struct { // One or more filters. // - // * cidr - The IPv4 CIDR block of the VPC. The CIDR block you specify must - // exactly match the VPC's CIDR block for information to be returned for - // the VPC. Must contain the slash followed by one or two digits (for example, - // /28). + // * cidr - The primary IPv4 CIDR block of the VPC. The CIDR block you specify + // must exactly match the VPC's CIDR block for information to be returned + // for the VPC. Must contain the slash followed by one or two digits (for + // example, /28). + // + // * cidr-block-association.cidr-block - An IPv4 CIDR block associated with + // the VPC. + // + // * cidr-block-association.association-id - The association ID for an IPv4 + // CIDR block associated with the VPC. + // + // * cidr-block-association.state - The state of an IPv4 CIDR block associated + // with the VPC. // // * dhcp-options-id - The ID of a set of DHCP options. // @@ -31253,6 +37738,9 @@ type DescribeVpcsInput struct { // * state - The state of the VPC (pending | available). // // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. // // * tag-key - The key of a tag assigned to the resource. This filter is // independent of the tag-value filter. For example, if you use both the @@ -31360,6 +37848,9 @@ type DescribeVpnConnectionsInput struct { // device. // // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. // // * tag-key - The key of a tag assigned to the resource. This filter is // independent of the tag-value filter. For example, if you use both the @@ -31465,6 +37956,9 @@ type DescribeVpnGatewaysInput struct { // | deleting | deleted). // // * tag:key=value - The key/value combination of a tag assigned to the resource. + // Specify the key of the tag in the filter name and the value of the tag + // in the filter value. For example, for the tag Purpose=X, specify tag:Purpose + // for the filter name and X for the filter value. // // * tag-key - The key of a tag assigned to the resource. This filter is // independent of the tag-value filter. For example, if you use both the @@ -32290,6 +38784,69 @@ func (s DisassociateAddressOutput) GoString() string { return s.String() } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateIamInstanceProfileRequest +type DisassociateIamInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The ID of the IAM instance profile association. + // + // AssociationId is a required field + AssociationId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateIamInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateIamInstanceProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateIamInstanceProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateIamInstanceProfileInput"} + if s.AssociationId == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssociationId sets the AssociationId field's value. +func (s *DisassociateIamInstanceProfileInput) SetAssociationId(v string) *DisassociateIamInstanceProfileInput { + s.AssociationId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateIamInstanceProfileResult +type DisassociateIamInstanceProfileOutput struct { + _ struct{} `type:"structure"` + + // Information about the IAM instance profile association. + IamInstanceProfileAssociation *IamInstanceProfileAssociation `locationName:"iamInstanceProfileAssociation" type:"structure"` +} + +// String returns the string representation +func (s DisassociateIamInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateIamInstanceProfileOutput) GoString() string { + return s.String() +} + +// SetIamInstanceProfileAssociation sets the IamInstanceProfileAssociation field's value. +func (s *DisassociateIamInstanceProfileOutput) SetIamInstanceProfileAssociation(v *IamInstanceProfileAssociation) *DisassociateIamInstanceProfileOutput { + s.IamInstanceProfileAssociation = v + return s +} + // Contains the parameters for DisassociateRouteTable. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateRouteTableRequest type DisassociateRouteTableInput struct { @@ -32473,6 +39030,9 @@ func (s *DisassociateVpcCidrBlockInput) SetAssociationId(v string) *Disassociate type DisassociateVpcCidrBlockOutput struct { _ struct{} `type:"structure"` + // Information about the IPv4 CIDR block association. + CidrBlockAssociation *VpcCidrBlockAssociation `locationName:"cidrBlockAssociation" type:"structure"` + // Information about the IPv6 CIDR block association. Ipv6CidrBlockAssociation *VpcIpv6CidrBlockAssociation `locationName:"ipv6CidrBlockAssociation" type:"structure"` @@ -32490,6 +39050,12 @@ func (s DisassociateVpcCidrBlockOutput) GoString() string { return s.String() } +// SetCidrBlockAssociation sets the CidrBlockAssociation field's value. +func (s *DisassociateVpcCidrBlockOutput) SetCidrBlockAssociation(v *VpcCidrBlockAssociation) *DisassociateVpcCidrBlockOutput { + s.CidrBlockAssociation = v + return s +} + // SetIpv6CidrBlockAssociation sets the Ipv6CidrBlockAssociation field's value. func (s *DisassociateVpcCidrBlockOutput) SetIpv6CidrBlockAssociation(v *VpcIpv6CidrBlockAssociation) *DisassociateVpcCidrBlockOutput { s.Ipv6CidrBlockAssociation = v @@ -32956,6 +39522,193 @@ func (s *EgressOnlyInternetGateway) SetEgressOnlyInternetGatewayId(v string) *Eg return s } +// Describes the association between an instance and an Elastic GPU. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ElasticGpuAssociation +type ElasticGpuAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the association. + ElasticGpuAssociationId *string `locationName:"elasticGpuAssociationId" type:"string"` + + // The state of the association between the instance and the Elastic GPU. + ElasticGpuAssociationState *string `locationName:"elasticGpuAssociationState" type:"string"` + + // The time the Elastic GPU was associated with the instance. + ElasticGpuAssociationTime *string `locationName:"elasticGpuAssociationTime" type:"string"` + + // The ID of the Elastic GPU. + ElasticGpuId *string `locationName:"elasticGpuId" type:"string"` +} + +// String returns the string representation +func (s ElasticGpuAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticGpuAssociation) GoString() string { + return s.String() +} + +// SetElasticGpuAssociationId sets the ElasticGpuAssociationId field's value. +func (s *ElasticGpuAssociation) SetElasticGpuAssociationId(v string) *ElasticGpuAssociation { + s.ElasticGpuAssociationId = &v + return s +} + +// SetElasticGpuAssociationState sets the ElasticGpuAssociationState field's value. +func (s *ElasticGpuAssociation) SetElasticGpuAssociationState(v string) *ElasticGpuAssociation { + s.ElasticGpuAssociationState = &v + return s +} + +// SetElasticGpuAssociationTime sets the ElasticGpuAssociationTime field's value. +func (s *ElasticGpuAssociation) SetElasticGpuAssociationTime(v string) *ElasticGpuAssociation { + s.ElasticGpuAssociationTime = &v + return s +} + +// SetElasticGpuId sets the ElasticGpuId field's value. +func (s *ElasticGpuAssociation) SetElasticGpuId(v string) *ElasticGpuAssociation { + s.ElasticGpuId = &v + return s +} + +// Describes the status of an Elastic GPU. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ElasticGpuHealth +type ElasticGpuHealth struct { + _ struct{} `type:"structure"` + + // The health status. + Status *string `locationName:"status" type:"string" enum:"ElasticGpuStatus"` +} + +// String returns the string representation +func (s ElasticGpuHealth) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticGpuHealth) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *ElasticGpuHealth) SetStatus(v string) *ElasticGpuHealth { + s.Status = &v + return s +} + +// A specification for an Elastic GPU. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ElasticGpuSpecification +type ElasticGpuSpecification struct { + _ struct{} `type:"structure"` + + // The type of Elastic GPU. + // + // Type is a required field + Type *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ElasticGpuSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticGpuSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ElasticGpuSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ElasticGpuSpecification"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetType sets the Type field's value. +func (s *ElasticGpuSpecification) SetType(v string) *ElasticGpuSpecification { + s.Type = &v + return s +} + +// Describes an Elastic GPU. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ElasticGpus +type ElasticGpus struct { + _ struct{} `type:"structure"` + + // The Availability Zone in the which the Elastic GPU resides. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The status of the Elastic GPU. + ElasticGpuHealth *ElasticGpuHealth `locationName:"elasticGpuHealth" type:"structure"` + + // The ID of the Elastic GPU. + ElasticGpuId *string `locationName:"elasticGpuId" type:"string"` + + // The state of the Elastic GPU. + ElasticGpuState *string `locationName:"elasticGpuState" type:"string" enum:"ElasticGpuState"` + + // The type of Elastic GPU. + ElasticGpuType *string `locationName:"elasticGpuType" type:"string"` + + // The ID of the instance to which the Elastic GPU is attached. + InstanceId *string `locationName:"instanceId" type:"string"` +} + +// String returns the string representation +func (s ElasticGpus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticGpus) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *ElasticGpus) SetAvailabilityZone(v string) *ElasticGpus { + s.AvailabilityZone = &v + return s +} + +// SetElasticGpuHealth sets the ElasticGpuHealth field's value. +func (s *ElasticGpus) SetElasticGpuHealth(v *ElasticGpuHealth) *ElasticGpus { + s.ElasticGpuHealth = v + return s +} + +// SetElasticGpuId sets the ElasticGpuId field's value. +func (s *ElasticGpus) SetElasticGpuId(v string) *ElasticGpus { + s.ElasticGpuId = &v + return s +} + +// SetElasticGpuState sets the ElasticGpuState field's value. +func (s *ElasticGpus) SetElasticGpuState(v string) *ElasticGpus { + s.ElasticGpuState = &v + return s +} + +// SetElasticGpuType sets the ElasticGpuType field's value. +func (s *ElasticGpus) SetElasticGpuType(v string) *ElasticGpus { + s.ElasticGpuType = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *ElasticGpus) SetInstanceId(v string) *ElasticGpus { + s.InstanceId = &v + return s +} + // Contains the parameters for EnableVgwRoutePropagation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableVgwRoutePropagationRequest type EnableVgwRoutePropagationInput struct { @@ -33631,6 +40384,252 @@ func (s *FlowLog) SetTrafficType(v string) *FlowLog { return s } +// Describes an Amazon FPGA image (AFI). +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/FpgaImage +type FpgaImage struct { + _ struct{} `type:"structure"` + + // The date and time the AFI was created. + CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"` + + // The description of the AFI. + Description *string `locationName:"description" type:"string"` + + // The global FPGA image identifier (AGFI ID). + FpgaImageGlobalId *string `locationName:"fpgaImageGlobalId" type:"string"` + + // The FPGA image identifier (AFI ID). + FpgaImageId *string `locationName:"fpgaImageId" type:"string"` + + // The name of the AFI. + Name *string `locationName:"name" type:"string"` + + // The alias of the AFI owner. Possible values include self, amazon, and aws-marketplace. + OwnerAlias *string `locationName:"ownerAlias" type:"string"` + + // The AWS account ID of the AFI owner. + OwnerId *string `locationName:"ownerId" type:"string"` + + // Information about the PCI bus. + PciId *PciId `locationName:"pciId" type:"structure"` + + // The product codes for the AFI. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + + // Indicates whether the AFI is public. + Public *bool `locationName:"public" type:"boolean"` + + // The version of the AWS Shell that was used to create the bitstream. + ShellVersion *string `locationName:"shellVersion" type:"string"` + + // Information about the state of the AFI. + State *FpgaImageState `locationName:"state" type:"structure"` + + // Any tags assigned to the AFI. + Tags []*Tag `locationName:"tags" locationNameList:"item" type:"list"` + + // The time of the most recent update to the AFI. + UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s FpgaImage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FpgaImage) GoString() string { + return s.String() +} + +// SetCreateTime sets the CreateTime field's value. +func (s *FpgaImage) SetCreateTime(v time.Time) *FpgaImage { + s.CreateTime = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *FpgaImage) SetDescription(v string) *FpgaImage { + s.Description = &v + return s +} + +// SetFpgaImageGlobalId sets the FpgaImageGlobalId field's value. +func (s *FpgaImage) SetFpgaImageGlobalId(v string) *FpgaImage { + s.FpgaImageGlobalId = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *FpgaImage) SetFpgaImageId(v string) *FpgaImage { + s.FpgaImageId = &v + return s +} + +// SetName sets the Name field's value. +func (s *FpgaImage) SetName(v string) *FpgaImage { + s.Name = &v + return s +} + +// SetOwnerAlias sets the OwnerAlias field's value. +func (s *FpgaImage) SetOwnerAlias(v string) *FpgaImage { + s.OwnerAlias = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *FpgaImage) SetOwnerId(v string) *FpgaImage { + s.OwnerId = &v + return s +} + +// SetPciId sets the PciId field's value. +func (s *FpgaImage) SetPciId(v *PciId) *FpgaImage { + s.PciId = v + return s +} + +// SetProductCodes sets the ProductCodes field's value. +func (s *FpgaImage) SetProductCodes(v []*ProductCode) *FpgaImage { + s.ProductCodes = v + return s +} + +// SetPublic sets the Public field's value. +func (s *FpgaImage) SetPublic(v bool) *FpgaImage { + s.Public = &v + return s +} + +// SetShellVersion sets the ShellVersion field's value. +func (s *FpgaImage) SetShellVersion(v string) *FpgaImage { + s.ShellVersion = &v + return s +} + +// SetState sets the State field's value. +func (s *FpgaImage) SetState(v *FpgaImageState) *FpgaImage { + s.State = v + return s +} + +// SetTags sets the Tags field's value. +func (s *FpgaImage) SetTags(v []*Tag) *FpgaImage { + s.Tags = v + return s +} + +// SetUpdateTime sets the UpdateTime field's value. +func (s *FpgaImage) SetUpdateTime(v time.Time) *FpgaImage { + s.UpdateTime = &v + return s +} + +// Describes an Amazon FPGA image (AFI) attribute. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/FpgaImageAttribute +type FpgaImageAttribute struct { + _ struct{} `type:"structure"` + + // The description of the AFI. + Description *string `locationName:"description" type:"string"` + + // The ID of the AFI. + FpgaImageId *string `locationName:"fpgaImageId" type:"string"` + + // One or more load permissions. + LoadPermissions []*LoadPermission `locationName:"loadPermissions" locationNameList:"item" type:"list"` + + // The name of the AFI. + Name *string `locationName:"name" type:"string"` + + // One or more product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s FpgaImageAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FpgaImageAttribute) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *FpgaImageAttribute) SetDescription(v string) *FpgaImageAttribute { + s.Description = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *FpgaImageAttribute) SetFpgaImageId(v string) *FpgaImageAttribute { + s.FpgaImageId = &v + return s +} + +// SetLoadPermissions sets the LoadPermissions field's value. +func (s *FpgaImageAttribute) SetLoadPermissions(v []*LoadPermission) *FpgaImageAttribute { + s.LoadPermissions = v + return s +} + +// SetName sets the Name field's value. +func (s *FpgaImageAttribute) SetName(v string) *FpgaImageAttribute { + s.Name = &v + return s +} + +// SetProductCodes sets the ProductCodes field's value. +func (s *FpgaImageAttribute) SetProductCodes(v []*ProductCode) *FpgaImageAttribute { + s.ProductCodes = v + return s +} + +// Describes the state of the bitstream generation process for an Amazon FPGA +// image (AFI). +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/FpgaImageState +type FpgaImageState struct { + _ struct{} `type:"structure"` + + // The state. The following are the possible values: + // + // * pending - AFI bitstream generation is in progress. + // + // * available - The AFI is available for use. + // + // * failed - AFI bitstream generation failed. + // + // * unavailable - The AFI is no longer available for use. + Code *string `locationName:"code" type:"string" enum:"FpgaImageStateCode"` + + // If the state is failed, this is the error message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s FpgaImageState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FpgaImageState) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *FpgaImageState) SetCode(v string) *FpgaImageState { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *FpgaImageState) SetMessage(v string) *FpgaImageState { + s.Message = &v + return s +} + // Contains the parameters for GetConsoleOutput. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetConsoleOutputRequest type GetConsoleOutputInput struct { @@ -33887,7 +40886,7 @@ type GetHostReservationPurchasePreviewOutput struct { // The purchase information of the Dedicated Host Reservation and the Dedicated // Hosts associated with it. - Purchase []*Purchase `locationName:"purchase" type:"list"` + Purchase []*Purchase `locationName:"purchase" locationNameList:"item" type:"list"` // The potential total hourly price of the reservation per hour. TotalHourlyPrice *string `locationName:"totalHourlyPrice" type:"string"` @@ -33990,7 +40989,8 @@ type GetPasswordDataOutput struct { // The ID of the Windows instance. InstanceId *string `locationName:"instanceId" type:"string"` - // The password of the instance. + // The password of the instance. Returns an empty string if the password is + // not available. PasswordData *string `locationName:"passwordData" type:"string"` // The time the data was last updated. @@ -34721,6 +41721,67 @@ func (s *IamInstanceProfile) SetId(v string) *IamInstanceProfile { return s } +// Describes an association between an IAM instance profile and an instance. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/IamInstanceProfileAssociation +type IamInstanceProfileAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the association. + AssociationId *string `locationName:"associationId" type:"string"` + + // The IAM instance profile. + IamInstanceProfile *IamInstanceProfile `locationName:"iamInstanceProfile" type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The state of the association. + State *string `locationName:"state" type:"string" enum:"IamInstanceProfileAssociationState"` + + // The time the IAM instance profile was associated with the instance. + Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s IamInstanceProfileAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IamInstanceProfileAssociation) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *IamInstanceProfileAssociation) SetAssociationId(v string) *IamInstanceProfileAssociation { + s.AssociationId = &v + return s +} + +// SetIamInstanceProfile sets the IamInstanceProfile field's value. +func (s *IamInstanceProfileAssociation) SetIamInstanceProfile(v *IamInstanceProfile) *IamInstanceProfileAssociation { + s.IamInstanceProfile = v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *IamInstanceProfileAssociation) SetInstanceId(v string) *IamInstanceProfileAssociation { + s.InstanceId = &v + return s +} + +// SetState sets the State field's value. +func (s *IamInstanceProfileAssociation) SetState(v string) *IamInstanceProfileAssociation { + s.State = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *IamInstanceProfileAssociation) SetTimestamp(v time.Time) *IamInstanceProfileAssociation { + s.Timestamp = &v + return s +} + // Describes an IAM instance profile. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/IamInstanceProfileSpecification type IamInstanceProfileSpecification struct { @@ -36353,13 +43414,16 @@ type Instance struct { // The idempotency token you provided when you launched the instance, if applicable. ClientToken *string `locationName:"clientToken" type:"string"` - // Indicates whether the instance is optimized for EBS I/O. This optimization + // Indicates whether the instance is optimized for Amazon EBS I/O. This optimization // provides dedicated throughput to Amazon EBS and an optimized configuration // stack to provide optimal I/O performance. This optimization isn't available // with all instance types. Additional usage charges apply when using an EBS // Optimized instance. EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + // The Elastic GPU associated with the instance. + ElasticGpuAssociations []*ElasticGpuAssociation `locationName:"elasticGpuAssociationSet" locationNameList:"item" type:"list"` + // Specifies whether enhanced networking with ENA is enabled. EnaSupport *bool `locationName:"enaSupport" type:"boolean"` @@ -36375,7 +43439,7 @@ type Instance struct { // The ID of the instance. InstanceId *string `locationName:"instanceId" type:"string"` - // Indicates whether this is a Spot instance or a Scheduled Instance. + // Indicates whether this is a Spot Instance or a Scheduled Instance. InstanceLifecycle *string `locationName:"instanceLifecycle" type:"string" enum:"InstanceLifecycleType"` // The instance type. @@ -36407,7 +43471,7 @@ type Instance struct { // DNS hostname can only be used inside the Amazon EC2 network. This name is // not available until the instance enters the running state. // - // [EC2-VPC] The Amazon-provided DNS server will resolve Amazon-provided private + // [EC2-VPC] The Amazon-provided DNS server resolves Amazon-provided private // DNS hostnames if you've enabled DNS resolution and DNS hostnames in your // VPC. If you are not using the Amazon-provided DNS server in your VPC, your // custom domain name servers must resolve the hostname as appropriate. @@ -36442,13 +43506,13 @@ type Instance struct { // Specifies whether to enable an instance launched in a VPC to perform NAT. // This controls whether source/destination checking is enabled on the instance. - // A value of true means checking is enabled, and false means checking is disabled. - // The value must be false for the instance to perform NAT. For more information, - // see NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) + // A value of true means that checking is enabled, and false means that checking + // is disabled. The value must be false for the instance to perform NAT. For + // more information, see NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) // in the Amazon Virtual Private Cloud User Guide. SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` - // If the request is a Spot instance request, the ID of the request. + // If the request is a Spot Instance request, the ID of the request. SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` // Specifies whether enhanced networking with the Intel 82599 Virtual Function @@ -36517,6 +43581,12 @@ func (s *Instance) SetEbsOptimized(v bool) *Instance { return s } +// SetElasticGpuAssociations sets the ElasticGpuAssociations field's value. +func (s *Instance) SetElasticGpuAssociations(v []*ElasticGpuAssociation) *Instance { + s.ElasticGpuAssociations = v + return s +} + // SetEnaSupport sets the EnaSupport field's value. func (s *Instance) SetEnaSupport(v bool) *Instance { s.EnaSupport = &v @@ -37834,7 +44904,8 @@ type IpPermission struct { _ struct{} `type:"structure"` // The start of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 - // type number. A value of -1 indicates all ICMP/ICMPv6 types. + // type number. A value of -1 indicates all ICMP/ICMPv6 types. If you specify + // all ICMP/ICMPv6 types, you must specify all codes. FromPort *int64 `locationName:"fromPort" type:"integer"` // The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). @@ -37861,6 +44932,7 @@ type IpPermission struct { // The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code. // A value of -1 indicates all ICMP/ICMPv6 codes for the specified ICMP type. + // If you specify all ICMP/ICMPv6 types, you must specify all codes. ToPort *int64 `locationName:"toPort" type:"integer"` // One or more security group and AWS account ID pairs. @@ -37927,6 +44999,13 @@ type IpRange struct { // The IPv4 CIDR range. You can either specify a CIDR range or a source security // group, not both. To specify a single IPv4 address, use the /32 prefix. CidrIp *string `locationName:"cidrIp" type:"string"` + + // A description for the security group rule that references this IPv4 address + // range. + // + // Constraints: Up to 255 characters in length. Allowed characters are a-z, + // A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$* + Description *string `locationName:"description" type:"string"` } // String returns the string representation @@ -37945,6 +45024,12 @@ func (s *IpRange) SetCidrIp(v string) *IpRange { return s } +// SetDescription sets the Description field's value. +func (s *IpRange) SetDescription(v string) *IpRange { + s.Description = &v + return s +} + // Describes an IPv6 CIDR block. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Ipv6CidrBlock type Ipv6CidrBlock struct { @@ -37978,6 +45063,13 @@ type Ipv6Range struct { // The IPv6 CIDR range. You can either specify a CIDR range or a source security // group, not both. To specify a single IPv6 address, use the /128 prefix. CidrIpv6 *string `locationName:"cidrIpv6" type:"string"` + + // A description for the security group rule that references this IPv6 address + // range. + // + // Constraints: Up to 255 characters in length. Allowed characters are a-z, + // A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$* + Description *string `locationName:"description" type:"string"` } // String returns the string representation @@ -37996,6 +45088,12 @@ func (s *Ipv6Range) SetCidrIpv6(v string) *Ipv6Range { return s } +// SetDescription sets the Description field's value. +func (s *Ipv6Range) SetDescription(v string) *Ipv6Range { + s.Description = &v + return s +} + // Describes a key pair. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/KeyPairInfo type KeyPairInfo struct { @@ -38267,6 +45365,259 @@ func (s *LaunchSpecification) SetUserData(v string) *LaunchSpecification { return s } +// Describes a load permission. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LoadPermission +type LoadPermission struct { + _ struct{} `type:"structure"` + + // The name of the group. + Group *string `locationName:"group" type:"string" enum:"PermissionGroup"` + + // The AWS account ID. + UserId *string `locationName:"userId" type:"string"` +} + +// String returns the string representation +func (s LoadPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadPermission) GoString() string { + return s.String() +} + +// SetGroup sets the Group field's value. +func (s *LoadPermission) SetGroup(v string) *LoadPermission { + s.Group = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *LoadPermission) SetUserId(v string) *LoadPermission { + s.UserId = &v + return s +} + +// Describes modifications to the load permissions of an Amazon FPGA image (AFI). +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LoadPermissionModifications +type LoadPermissionModifications struct { + _ struct{} `type:"structure"` + + // The load permissions to add. + Add []*LoadPermissionRequest `locationNameList:"item" type:"list"` + + // The load permissions to remove. + Remove []*LoadPermissionRequest `locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s LoadPermissionModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadPermissionModifications) GoString() string { + return s.String() +} + +// SetAdd sets the Add field's value. +func (s *LoadPermissionModifications) SetAdd(v []*LoadPermissionRequest) *LoadPermissionModifications { + s.Add = v + return s +} + +// SetRemove sets the Remove field's value. +func (s *LoadPermissionModifications) SetRemove(v []*LoadPermissionRequest) *LoadPermissionModifications { + s.Remove = v + return s +} + +// Describes a load permission. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LoadPermissionRequest +type LoadPermissionRequest struct { + _ struct{} `type:"structure"` + + // The name of the group. + Group *string `type:"string" enum:"PermissionGroup"` + + // The AWS account ID. + UserId *string `type:"string"` +} + +// String returns the string representation +func (s LoadPermissionRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadPermissionRequest) GoString() string { + return s.String() +} + +// SetGroup sets the Group field's value. +func (s *LoadPermissionRequest) SetGroup(v string) *LoadPermissionRequest { + s.Group = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *LoadPermissionRequest) SetUserId(v string) *LoadPermissionRequest { + s.UserId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFpgaImageAttributeRequest +type ModifyFpgaImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + Attribute *string `type:"string" enum:"FpgaImageAttributeName"` + + // A description for the AFI. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AFI. + // + // FpgaImageId is a required field + FpgaImageId *string `type:"string" required:"true"` + + // The load permission for the AFI. + LoadPermission *LoadPermissionModifications `type:"structure"` + + // A name for the AFI. + Name *string `type:"string"` + + // The operation type. + OperationType *string `type:"string" enum:"OperationType"` + + // One or more product codes. After you add a product code to an AFI, it can't + // be removed. This parameter is valid only when modifying the productCodes + // attribute. + ProductCodes []*string `locationName:"ProductCode" locationNameList:"ProductCode" type:"list"` + + // One or more user groups. This parameter is valid only when modifying the + // loadPermission attribute. + UserGroups []*string `locationName:"UserGroup" locationNameList:"UserGroup" type:"list"` + + // One or more AWS account IDs. This parameter is valid only when modifying + // the loadPermission attribute. + UserIds []*string `locationName:"UserId" locationNameList:"UserId" type:"list"` +} + +// String returns the string representation +func (s ModifyFpgaImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyFpgaImageAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyFpgaImageAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyFpgaImageAttributeInput"} + if s.FpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("FpgaImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *ModifyFpgaImageAttributeInput) SetAttribute(v string) *ModifyFpgaImageAttributeInput { + s.Attribute = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ModifyFpgaImageAttributeInput) SetDescription(v string) *ModifyFpgaImageAttributeInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyFpgaImageAttributeInput) SetDryRun(v bool) *ModifyFpgaImageAttributeInput { + s.DryRun = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *ModifyFpgaImageAttributeInput) SetFpgaImageId(v string) *ModifyFpgaImageAttributeInput { + s.FpgaImageId = &v + return s +} + +// SetLoadPermission sets the LoadPermission field's value. +func (s *ModifyFpgaImageAttributeInput) SetLoadPermission(v *LoadPermissionModifications) *ModifyFpgaImageAttributeInput { + s.LoadPermission = v + return s +} + +// SetName sets the Name field's value. +func (s *ModifyFpgaImageAttributeInput) SetName(v string) *ModifyFpgaImageAttributeInput { + s.Name = &v + return s +} + +// SetOperationType sets the OperationType field's value. +func (s *ModifyFpgaImageAttributeInput) SetOperationType(v string) *ModifyFpgaImageAttributeInput { + s.OperationType = &v + return s +} + +// SetProductCodes sets the ProductCodes field's value. +func (s *ModifyFpgaImageAttributeInput) SetProductCodes(v []*string) *ModifyFpgaImageAttributeInput { + s.ProductCodes = v + return s +} + +// SetUserGroups sets the UserGroups field's value. +func (s *ModifyFpgaImageAttributeInput) SetUserGroups(v []*string) *ModifyFpgaImageAttributeInput { + s.UserGroups = v + return s +} + +// SetUserIds sets the UserIds field's value. +func (s *ModifyFpgaImageAttributeInput) SetUserIds(v []*string) *ModifyFpgaImageAttributeInput { + s.UserIds = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFpgaImageAttributeResult +type ModifyFpgaImageAttributeOutput struct { + _ struct{} `type:"structure"` + + // Information about the attribute. + FpgaImageAttribute *FpgaImageAttribute `locationName:"fpgaImageAttribute" type:"structure"` +} + +// String returns the string representation +func (s ModifyFpgaImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyFpgaImageAttributeOutput) GoString() string { + return s.String() +} + +// SetFpgaImageAttribute sets the FpgaImageAttribute field's value. +func (s *ModifyFpgaImageAttributeOutput) SetFpgaImageAttribute(v *FpgaImageAttribute) *ModifyFpgaImageAttributeOutput { + s.FpgaImageAttribute = v + return s +} + // Contains the parameters for ModifyHosts. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyHostsRequest type ModifyHostsInput struct { @@ -38673,7 +46024,7 @@ type ModifyInstanceAttributeInput struct { BlockDeviceMappings []*InstanceBlockDeviceMappingSpecification `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` // If the value is true, you can't terminate the instance using the Amazon EC2 - // console, CLI, or API; otherwise, you can. You cannot use this paramater for + // console, CLI, or API; otherwise, you can. You cannot use this parameter for // Spot Instances. DisableApiTermination *AttributeBooleanValue `locationName:"disableApiTermination" type:"structure"` @@ -38683,7 +46034,7 @@ type ModifyInstanceAttributeInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // Specifies whether the instance is optimized for EBS I/O. This optimization + // Specifies whether the instance is optimized for Amazon EBS I/O. This optimization // provides dedicated throughput to Amazon EBS and an optimized configuration // stack to provide optimal EBS I/O performance. This optimization isn't available // with all instance types. Additional usage charges apply when using an EBS @@ -38726,8 +46077,8 @@ type ModifyInstanceAttributeInput struct { Ramdisk *AttributeValue `locationName:"ramdisk" type:"structure"` // Specifies whether source/destination checking is enabled. A value of true - // means that checking is enabled, and false means checking is disabled. This - // value must be false for a NAT instance to perform NAT. + // means that checking is enabled, and false means that checking is disabled. + // This value must be false for a NAT instance to perform NAT. SourceDestCheck *AttributeBooleanValue `type:"structure"` // Set to simple to enable enhanced networking with the Intel 82599 Virtual @@ -38741,8 +46092,8 @@ type ModifyInstanceAttributeInput struct { SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` // Changes the instance's user data to the specified value. If you are using - // an AWS SDK or command line tool, Base64-encoding is performed for you, and - // you can load the text from a file. Otherwise, you must provide Base64-encoded + // an AWS SDK or command line tool, base64-encoding is performed for you, and + // you can load the text from a file. Otherwise, you must provide base64-encoded // text. UserData *BlobAttributeValue `locationName:"userData" type:"structure"` @@ -39385,6 +46736,10 @@ type ModifySubnetAttributeInput struct { // subnet should be assigned an IPv6 address. This includes a network interface // that's created when launching an instance into the subnet (the instance therefore // receives an IPv6 address). + // + // If you enable the IPv6 addressing feature for your subnet, your network interface + // or instance only receives an IPv6 address if it's created using version 2016-11-15 + // or later of the Amazon EC2 API. AssignIpv6AddressOnCreation *AttributeBooleanValue `type:"structure"` // Specify true to indicate that network interfaces created in the specified @@ -39531,6 +46886,122 @@ func (s ModifyVolumeAttributeOutput) GoString() string { return s.String() } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVolumeRequest +type ModifyVolumeInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Target IOPS rate of the volume to be modified. + // + // Only valid for Provisioned IOPS SSD (io1) volumes. For more information about + // io1 IOPS configuration, see http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html#EBSVolumeTypes_piops + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html#EBSVolumeTypes_piops). + // + // Default: If no IOPS value is specified, the existing value is retained. + Iops *int64 `type:"integer"` + + // Target size in GiB of the volume to be modified. Target volume size must + // be greater than or equal to than the existing size of the volume. For information + // about available EBS volume sizes, see http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html). + // + // Default: If no size is specified, the existing size is retained. + Size *int64 `type:"integer"` + + // VolumeId is a required field + VolumeId *string `type:"string" required:"true"` + + // Target EBS volume type of the volume to be modified + // + // The API does not support modifications for volume type standard. You also + // cannot change the type of a volume to standard. + // + // Default: If no type is specified, the existing type is retained. + VolumeType *string `type:"string" enum:"VolumeType"` +} + +// String returns the string representation +func (s ModifyVolumeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVolumeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVolumeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVolumeInput"} + if s.VolumeId == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVolumeInput) SetDryRun(v bool) *ModifyVolumeInput { + s.DryRun = &v + return s +} + +// SetIops sets the Iops field's value. +func (s *ModifyVolumeInput) SetIops(v int64) *ModifyVolumeInput { + s.Iops = &v + return s +} + +// SetSize sets the Size field's value. +func (s *ModifyVolumeInput) SetSize(v int64) *ModifyVolumeInput { + s.Size = &v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *ModifyVolumeInput) SetVolumeId(v string) *ModifyVolumeInput { + s.VolumeId = &v + return s +} + +// SetVolumeType sets the VolumeType field's value. +func (s *ModifyVolumeInput) SetVolumeType(v string) *ModifyVolumeInput { + s.VolumeType = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVolumeResult +type ModifyVolumeOutput struct { + _ struct{} `type:"structure"` + + // A VolumeModification object. + VolumeModification *VolumeModification `locationName:"volumeModification" type:"structure"` +} + +// String returns the string representation +func (s ModifyVolumeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVolumeOutput) GoString() string { + return s.String() +} + +// SetVolumeModification sets the VolumeModification field's value. +func (s *ModifyVolumeOutput) SetVolumeModification(v *VolumeModification) *ModifyVolumeOutput { + s.VolumeModification = v + return s +} + // Contains the parameters for ModifyVpcAttribute. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcAttributeRequest type ModifyVpcAttributeInput struct { @@ -40129,6 +47600,9 @@ type NatGateway struct { // The ID of the subnet in which the NAT gateway is located. SubnetId *string `locationName:"subnetId" type:"string"` + // The tags for the NAT gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + // The ID of the VPC in which the NAT gateway is located. VpcId *string `locationName:"vpcId" type:"string"` } @@ -40197,6 +47671,12 @@ func (s *NatGateway) SetSubnetId(v string) *NatGateway { return s } +// SetTags sets the Tags field's value. +func (s *NatGateway) SetTags(v []*Tag) *NatGateway { + s.Tags = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *NatGateway) SetVpcId(v string) *NatGateway { s.VpcId = &v @@ -40856,6 +48336,110 @@ func (s *NetworkInterfaceIpv6Address) SetIpv6Address(v string) *NetworkInterface return s } +// Describes a permission for a network interface. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NetworkInterfacePermission +type NetworkInterfacePermission struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + AwsAccountId *string `locationName:"awsAccountId" type:"string"` + + // The AWS service. + AwsService *string `locationName:"awsService" type:"string"` + + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + + // The ID of the network interface permission. + NetworkInterfacePermissionId *string `locationName:"networkInterfacePermissionId" type:"string"` + + // The type of permission. + Permission *string `locationName:"permission" type:"string" enum:"InterfacePermissionType"` + + // Information about the state of the permission. + PermissionState *NetworkInterfacePermissionState `locationName:"permissionState" type:"structure"` +} + +// String returns the string representation +func (s NetworkInterfacePermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfacePermission) GoString() string { + return s.String() +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *NetworkInterfacePermission) SetAwsAccountId(v string) *NetworkInterfacePermission { + s.AwsAccountId = &v + return s +} + +// SetAwsService sets the AwsService field's value. +func (s *NetworkInterfacePermission) SetAwsService(v string) *NetworkInterfacePermission { + s.AwsService = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *NetworkInterfacePermission) SetNetworkInterfaceId(v string) *NetworkInterfacePermission { + s.NetworkInterfaceId = &v + return s +} + +// SetNetworkInterfacePermissionId sets the NetworkInterfacePermissionId field's value. +func (s *NetworkInterfacePermission) SetNetworkInterfacePermissionId(v string) *NetworkInterfacePermission { + s.NetworkInterfacePermissionId = &v + return s +} + +// SetPermission sets the Permission field's value. +func (s *NetworkInterfacePermission) SetPermission(v string) *NetworkInterfacePermission { + s.Permission = &v + return s +} + +// SetPermissionState sets the PermissionState field's value. +func (s *NetworkInterfacePermission) SetPermissionState(v *NetworkInterfacePermissionState) *NetworkInterfacePermission { + s.PermissionState = v + return s +} + +// Describes the state of a network interface permission. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NetworkInterfacePermissionState +type NetworkInterfacePermissionState struct { + _ struct{} `type:"structure"` + + // The state of the permission. + State *string `locationName:"state" type:"string" enum:"NetworkInterfacePermissionStateCode"` + + // A status message, if applicable. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s NetworkInterfacePermissionState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkInterfacePermissionState) GoString() string { + return s.String() +} + +// SetState sets the State field's value. +func (s *NetworkInterfacePermissionState) SetState(v string) *NetworkInterfacePermissionState { + s.State = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *NetworkInterfacePermissionState) SetStatusMessage(v string) *NetworkInterfacePermissionState { + s.StatusMessage = &v + return s +} + // Describes the private IPv4 address of a network interface. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/NetworkInterfacePrivateIpAddress type NetworkInterfacePrivateIpAddress struct { @@ -40941,6 +48525,59 @@ func (s *NewDhcpConfiguration) SetValues(v []*string) *NewDhcpConfiguration { return s } +// Describes the data that identifies an Amazon FPGA image (AFI) on the PCI +// bus. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PciId +type PciId struct { + _ struct{} `type:"structure"` + + // The ID of the device. + DeviceId *string `type:"string"` + + // The ID of the subsystem. + SubsystemId *string `type:"string"` + + // The ID of the vendor for the subsystem. + SubsystemVendorId *string `type:"string"` + + // The ID of the vendor. + VendorId *string `type:"string"` +} + +// String returns the string representation +func (s PciId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PciId) GoString() string { + return s.String() +} + +// SetDeviceId sets the DeviceId field's value. +func (s *PciId) SetDeviceId(v string) *PciId { + s.DeviceId = &v + return s +} + +// SetSubsystemId sets the SubsystemId field's value. +func (s *PciId) SetSubsystemId(v string) *PciId { + s.SubsystemId = &v + return s +} + +// SetSubsystemVendorId sets the SubsystemVendorId field's value. +func (s *PciId) SetSubsystemVendorId(v string) *PciId { + s.SubsystemVendorId = &v + return s +} + +// SetVendorId sets the VendorId field's value. +func (s *PciId) SetVendorId(v string) *PciId { + s.VendorId = &v + return s +} + // Describes the VPC peering connection options. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PeeringConnectionOptions type PeeringConnectionOptions struct { @@ -41048,10 +48685,13 @@ type Placement struct { // The name of the placement group the instance is in (for cluster compute instances). GroupName *string `locationName:"groupName" type:"string"` - // The ID of the Dedicted host on which the instance resides. This parameter - // is not support for the ImportInstance command. + // The ID of the Dedicated Host on which the instance resides. This parameter + // is not supported for the ImportInstance command. HostId *string `locationName:"hostId" type:"string"` + // Reserved for future use. + SpreadDomain *string `locationName:"spreadDomain" type:"string"` + // The tenancy of the instance (if the instance is running in a VPC). An instance // with a tenancy of dedicated runs on single-tenant hardware. The host tenancy // is not supported for the ImportInstance command. @@ -41092,6 +48732,12 @@ func (s *Placement) SetHostId(v string) *Placement { return s } +// SetSpreadDomain sets the SpreadDomain field's value. +func (s *Placement) SetSpreadDomain(v string) *Placement { + s.SpreadDomain = &v + return s +} + // SetTenancy sets the Tenancy field's value. func (s *Placement) SetTenancy(v string) *Placement { s.Tenancy = &v @@ -41223,6 +48869,13 @@ func (s *PrefixList) SetPrefixListName(v string) *PrefixList { type PrefixListId struct { _ struct{} `type:"structure"` + // A description for the security group rule that references this prefix list + // ID. + // + // Constraints: Up to 255 characters in length. Allowed characters are a-z, + // A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$* + Description *string `locationName:"description" type:"string"` + // The ID of the prefix. PrefixListId *string `locationName:"prefixListId" type:"string"` } @@ -41237,6 +48890,12 @@ func (s PrefixListId) GoString() string { return s.String() } +// SetDescription sets the Description field's value. +func (s *PrefixListId) SetDescription(v string) *PrefixListId { + s.Description = &v + return s +} + // SetPrefixListId sets the PrefixListId field's value. func (s *PrefixListId) SetPrefixListId(v string) *PrefixListId { s.PrefixListId = &v @@ -41761,7 +49420,7 @@ type PurchaseHostReservationOutput struct { CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` // Describes the details of the purchase. - Purchase []*Purchase `locationName:"purchase" type:"list"` + Purchase []*Purchase `locationName:"purchase" locationNameList:"item" type:"list"` // The total hourly price of the reservation calculated per hour. TotalHourlyPrice *string `locationName:"totalHourlyPrice" type:"string"` @@ -42213,6 +49872,11 @@ type RegisterImageInput struct { // the architecture specified in the manifest file. Architecture *string `locationName:"architecture" type:"string" enum:"ArchitectureValues"` + // The billing product codes. Your account must be authorized to specify billing + // product codes. Otherwise, you can use the AWS Marketplace to bill for the + // use of an AMI. + BillingProducts []*string `locationName:"BillingProduct" locationNameList:"item" type:"list"` + // One or more block device mapping entries. BlockDeviceMappings []*BlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` @@ -42298,6 +49962,12 @@ func (s *RegisterImageInput) SetArchitecture(v string) *RegisterImageInput { return s } +// SetBillingProducts sets the BillingProducts field's value. +func (s *RegisterImageInput) SetBillingProducts(v []*string) *RegisterImageInput { + s.BillingProducts = v + return s +} + // SetBlockDeviceMappings sets the BlockDeviceMappings field's value. func (s *RegisterImageInput) SetBlockDeviceMappings(v []*BlockDeviceMapping) *RegisterImageInput { s.BlockDeviceMappings = v @@ -42602,6 +50272,83 @@ func (s *ReleaseHostsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *ReleaseHost return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceIamInstanceProfileAssociationRequest +type ReplaceIamInstanceProfileAssociationInput struct { + _ struct{} `type:"structure"` + + // The ID of the existing IAM instance profile association. + // + // AssociationId is a required field + AssociationId *string `type:"string" required:"true"` + + // The IAM instance profile. + // + // IamInstanceProfile is a required field + IamInstanceProfile *IamInstanceProfileSpecification `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ReplaceIamInstanceProfileAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceIamInstanceProfileAssociationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplaceIamInstanceProfileAssociationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplaceIamInstanceProfileAssociationInput"} + if s.AssociationId == nil { + invalidParams.Add(request.NewErrParamRequired("AssociationId")) + } + if s.IamInstanceProfile == nil { + invalidParams.Add(request.NewErrParamRequired("IamInstanceProfile")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssociationId sets the AssociationId field's value. +func (s *ReplaceIamInstanceProfileAssociationInput) SetAssociationId(v string) *ReplaceIamInstanceProfileAssociationInput { + s.AssociationId = &v + return s +} + +// SetIamInstanceProfile sets the IamInstanceProfile field's value. +func (s *ReplaceIamInstanceProfileAssociationInput) SetIamInstanceProfile(v *IamInstanceProfileSpecification) *ReplaceIamInstanceProfileAssociationInput { + s.IamInstanceProfile = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceIamInstanceProfileAssociationResult +type ReplaceIamInstanceProfileAssociationOutput struct { + _ struct{} `type:"structure"` + + // Information about the IAM instance profile association. + IamInstanceProfileAssociation *IamInstanceProfileAssociation `locationName:"iamInstanceProfileAssociation" type:"structure"` +} + +// String returns the string representation +func (s ReplaceIamInstanceProfileAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceIamInstanceProfileAssociationOutput) GoString() string { + return s.String() +} + +// SetIamInstanceProfileAssociation sets the IamInstanceProfileAssociation field's value. +func (s *ReplaceIamInstanceProfileAssociationOutput) SetIamInstanceProfileAssociation(v *IamInstanceProfileAssociation) *ReplaceIamInstanceProfileAssociationOutput { + s.IamInstanceProfileAssociation = v + return s +} + // Contains the parameters for ReplaceNetworkAclAssociation. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ReplaceNetworkAclAssociationRequest type ReplaceNetworkAclAssociationInput struct { @@ -43117,7 +50864,7 @@ type ReportInstanceStatusInput struct { // Instances is a required field Instances []*string `locationName:"instanceId" locationNameList:"InstanceId" type:"list" required:"true"` - // One or more reason codes that describes the health state of your instance. + // One or more reason codes that describe the health state of your instance. // // * instance-stuck-in-state: My instance is stuck in a state. // @@ -43128,13 +50875,13 @@ type ReportInstanceStatusInput struct { // * password-not-available: A password is not available for my instance. // // * performance-network: My instance is experiencing performance problems - // which I believe are network related. + // that I believe are network related. // // * performance-instance-store: My instance is experiencing performance - // problems which I believe are related to the instance stores. + // problems that I believe are related to the instance stores. // // * performance-ebs-volume: My instance is experiencing performance problems - // which I believe are related to an EBS volume. + // that I believe are related to an EBS volume. // // * performance-other: My instance is experiencing performance problems. // @@ -43377,13 +51124,16 @@ type RequestSpotInstancesInput struct { // Default: 1 InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + // Indicates whether a Spot instance stops or terminates when it is interrupted. + InstanceInterruptionBehavior *string `type:"string" enum:"InstanceInterruptionBehavior"` + // The instance launch group. Launch groups are Spot instances that launch together // and terminate together. // // Default: Instances are launched and terminated individually LaunchGroup *string `locationName:"launchGroup" type:"string"` - // Describes the launch specification for an instance. + // The launch specification. LaunchSpecification *RequestSpotLaunchSpecification `type:"structure"` // The maximum hourly price (bid) for any Spot instance launched to fulfill @@ -43473,6 +51223,12 @@ func (s *RequestSpotInstancesInput) SetInstanceCount(v int64) *RequestSpotInstan return s } +// SetInstanceInterruptionBehavior sets the InstanceInterruptionBehavior field's value. +func (s *RequestSpotInstancesInput) SetInstanceInterruptionBehavior(v string) *RequestSpotInstancesInput { + s.InstanceInterruptionBehavior = &v + return s +} + // SetLaunchGroup sets the LaunchGroup field's value. func (s *RequestSpotInstancesInput) SetLaunchGroup(v string) *RequestSpotInstancesInput { s.LaunchGroup = &v @@ -43572,7 +51328,9 @@ type RequestSpotLaunchSpecification struct { // The name of the key pair. KeyName *string `locationName:"keyName" type:"string"` - // Describes the monitoring of an instance. + // Indicates whether basic or detailed monitoring is enabled for the instance. + // + // Default: Disabled Monitoring *RunInstancesMonitoringEnabled `locationName:"monitoring" type:"structure"` // One or more network interfaces. If you specify a network interface, you must @@ -43585,8 +51343,12 @@ type RequestSpotLaunchSpecification struct { // The ID of the RAM disk. RamdiskId *string `locationName:"ramdiskId" type:"string"` + // One or more security group IDs. SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"` + // One or more security groups. When requesting instances in a VPC, you must + // specify the IDs of the security groups. When requesting instances in EC2-Classic, + // you can specify the names or the IDs of the security groups. SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"item" type:"list"` // The ID of the subnet in which to launch the instance. @@ -44574,6 +52336,90 @@ func (s *ReservedInstancesOffering) SetUsagePrice(v float64) *ReservedInstancesO return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetFpgaImageAttributeRequest +type ResetFpgaImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute. + Attribute *string `type:"string" enum:"ResetFpgaImageAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AFI. + // + // FpgaImageId is a required field + FpgaImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetFpgaImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetFpgaImageAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetFpgaImageAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetFpgaImageAttributeInput"} + if s.FpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("FpgaImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *ResetFpgaImageAttributeInput) SetAttribute(v string) *ResetFpgaImageAttributeInput { + s.Attribute = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ResetFpgaImageAttributeInput) SetDryRun(v bool) *ResetFpgaImageAttributeInput { + s.DryRun = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *ResetFpgaImageAttributeInput) SetFpgaImageId(v string) *ResetFpgaImageAttributeInput { + s.FpgaImageId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetFpgaImageAttributeResult +type ResetFpgaImageAttributeOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ResetFpgaImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetFpgaImageAttributeOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *ResetFpgaImageAttributeOutput) SetReturn(v bool) *ResetFpgaImageAttributeOutput { + s.Return = &v + return s +} + // Contains the parameters for ResetImageAttribute. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetImageAttributeRequest type ResetImageAttributeInput struct { @@ -45556,15 +53402,18 @@ type RunInstancesInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // Indicates whether the instance is optimized for EBS I/O. This optimization + // Indicates whether the instance is optimized for Amazon EBS I/O. This optimization // provides dedicated throughput to Amazon EBS and an optimized configuration - // stack to provide optimal EBS I/O performance. This optimization isn't available - // with all instance types. Additional usage charges apply when using an EBS-optimized - // instance. + // stack to provide optimal Amazon EBS I/O performance. This optimization isn't + // available with all instance types. Additional usage charges apply when using + // an EBS-optimized instance. // // Default: false EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + // An Elastic GPU to associate with the instance. + ElasticGpuSpecification []*ElasticGpuSpecification `locationNameList:"item" type:"list"` + // The IAM instance profile. IamInstanceProfile *IamInstanceProfileSpecification `locationName:"iamInstanceProfile" type:"structure"` @@ -45676,12 +53525,17 @@ type RunInstancesInput struct { // [EC2-VPC] The ID of the subnet to launch the instance into. SubnetId *string `type:"string"` + // The tags to apply to the resources during launch. You can tag instances and + // volumes. The specified tags are applied to all instances or volumes that + // are created during launch. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The user data to make available to the instance. For more information, see // Running Commands on Your Linux Instance at Launch (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) // (Linux) and Adding User Data (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data) - // (Windows). If you are using an AWS SDK or command line tool, Base64-encoding + // (Windows). If you are using an AWS SDK or command line tool, base64-encoding // is performed for you, and you can load the text from a file. Otherwise, you - // must provide Base64-encoded text. + // must provide base64-encoded text. UserData *string `type:"string"` } @@ -45707,6 +53561,16 @@ func (s *RunInstancesInput) Validate() error { if s.MinCount == nil { invalidParams.Add(request.NewErrParamRequired("MinCount")) } + if s.ElasticGpuSpecification != nil { + for i, v := range s.ElasticGpuSpecification { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ElasticGpuSpecification", i), err.(request.ErrInvalidParams)) + } + } + } if s.Monitoring != nil { if err := s.Monitoring.Validate(); err != nil { invalidParams.AddNested("Monitoring", err.(request.ErrInvalidParams)) @@ -45765,6 +53629,12 @@ func (s *RunInstancesInput) SetEbsOptimized(v bool) *RunInstancesInput { return s } +// SetElasticGpuSpecification sets the ElasticGpuSpecification field's value. +func (s *RunInstancesInput) SetElasticGpuSpecification(v []*ElasticGpuSpecification) *RunInstancesInput { + s.ElasticGpuSpecification = v + return s +} + // SetIamInstanceProfile sets the IamInstanceProfile field's value. func (s *RunInstancesInput) SetIamInstanceProfile(v *IamInstanceProfileSpecification) *RunInstancesInput { s.IamInstanceProfile = v @@ -45873,6 +53743,12 @@ func (s *RunInstancesInput) SetSubnetId(v string) *RunInstancesInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *RunInstancesInput) SetTagSpecifications(v []*TagSpecification) *RunInstancesInput { + s.TagSpecifications = v + return s +} + // SetUserData sets the UserData field's value. func (s *RunInstancesInput) SetUserData(v string) *RunInstancesInput { s.UserData = &v @@ -47892,6 +55768,9 @@ type SpotFleetLaunchSpecification struct { // subnets, separate them using commas; for example, "subnet-a61dafcf, subnet-65ea5f08". SubnetId *string `locationName:"subnetId" type:"string"` + // The tags to apply during creation. + TagSpecifications []*SpotFleetTagSpecification `locationName:"tagSpecificationSet" locationNameList:"item" type:"list"` + // The user data to make available to the instances. If you are using an AWS // SDK or command line tool, Base64-encoding is performed for you, and you can // load the text from a file. Otherwise, you must provide Base64-encoded text. @@ -48027,6 +55906,12 @@ func (s *SpotFleetLaunchSpecification) SetSubnetId(v string) *SpotFleetLaunchSpe return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *SpotFleetLaunchSpecification) SetTagSpecifications(v []*SpotFleetTagSpecification) *SpotFleetLaunchSpecification { + s.TagSpecifications = v + return s +} + // SetUserData sets the UserData field's value. func (s *SpotFleetLaunchSpecification) SetUserData(v string) *SpotFleetLaunchSpecification { s.UserData = &v @@ -48169,11 +56054,17 @@ type SpotFleetRequestConfigData struct { // IamFleetRole is a required field IamFleetRole *string `locationName:"iamFleetRole" type:"string" required:"true"` + // Indicates whether a Spot instance stops or terminates when it is interrupted. + InstanceInterruptionBehavior *string `locationName:"instanceInterruptionBehavior" type:"string" enum:"InstanceInterruptionBehavior"` + // Information about the launch specifications for the Spot fleet request. // // LaunchSpecifications is a required field LaunchSpecifications []*SpotFleetLaunchSpecification `locationName:"launchSpecifications" locationNameList:"item" min:"1" type:"list" required:"true"` + // Indicates whether Spot fleet should replace unhealthy instances. + ReplaceUnhealthyInstances *bool `locationName:"replaceUnhealthyInstances" type:"boolean"` + // The bid price per unit hour. // // SpotPrice is a required field @@ -48285,12 +56176,24 @@ func (s *SpotFleetRequestConfigData) SetIamFleetRole(v string) *SpotFleetRequest return s } +// SetInstanceInterruptionBehavior sets the InstanceInterruptionBehavior field's value. +func (s *SpotFleetRequestConfigData) SetInstanceInterruptionBehavior(v string) *SpotFleetRequestConfigData { + s.InstanceInterruptionBehavior = &v + return s +} + // SetLaunchSpecifications sets the LaunchSpecifications field's value. func (s *SpotFleetRequestConfigData) SetLaunchSpecifications(v []*SpotFleetLaunchSpecification) *SpotFleetRequestConfigData { s.LaunchSpecifications = v return s } +// SetReplaceUnhealthyInstances sets the ReplaceUnhealthyInstances field's value. +func (s *SpotFleetRequestConfigData) SetReplaceUnhealthyInstances(v bool) *SpotFleetRequestConfigData { + s.ReplaceUnhealthyInstances = &v + return s +} + // SetSpotPrice sets the SpotPrice field's value. func (s *SpotFleetRequestConfigData) SetSpotPrice(v string) *SpotFleetRequestConfigData { s.SpotPrice = &v @@ -48327,6 +56230,41 @@ func (s *SpotFleetRequestConfigData) SetValidUntil(v time.Time) *SpotFleetReques return s } +// The tags for a Spot fleet resource. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SpotFleetTagSpecification +type SpotFleetTagSpecification struct { + _ struct{} `type:"structure"` + + // The type of resource. Currently, the only resource type that is supported + // is instance. + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // The tags. + Tags []*Tag `locationName:"tag" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s SpotFleetTagSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotFleetTagSpecification) GoString() string { + return s.String() +} + +// SetResourceType sets the ResourceType field's value. +func (s *SpotFleetTagSpecification) SetResourceType(v string) *SpotFleetTagSpecification { + s.ResourceType = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *SpotFleetTagSpecification) SetTags(v []*Tag) *SpotFleetTagSpecification { + s.Tags = v + return s +} + // Describes a Spot instance request. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SpotInstanceRequest type SpotInstanceRequest struct { @@ -48355,6 +56293,9 @@ type SpotInstanceRequest struct { // request. InstanceId *string `locationName:"instanceId" type:"string"` + // Indicates whether a Spot instance stops or terminates when it is interrupted. + InstanceInterruptionBehavior *string `locationName:"instanceInterruptionBehavior" type:"string" enum:"InstanceInterruptionBehavior"` + // The instance launch group. Launch groups are Spot instances that launch together // and terminate together. LaunchGroup *string `locationName:"launchGroup" type:"string"` @@ -48447,6 +56388,12 @@ func (s *SpotInstanceRequest) SetInstanceId(v string) *SpotInstanceRequest { return s } +// SetInstanceInterruptionBehavior sets the InstanceInterruptionBehavior field's value. +func (s *SpotInstanceRequest) SetInstanceInterruptionBehavior(v string) *SpotInstanceRequest { + s.InstanceInterruptionBehavior = &v + return s +} + // SetLaunchGroup sets the LaunchGroup field's value. func (s *SpotInstanceRequest) SetLaunchGroup(v string) *SpotInstanceRequest { s.LaunchGroup = &v @@ -48611,6 +56558,11 @@ type SpotPlacement struct { // The name of the placement group (for cluster instances). GroupName *string `locationName:"groupName" type:"string"` + + // The tenancy of the instance (if the instance is running in a VPC). An instance + // with a tenancy of dedicated runs on single-tenant hardware. The host tenancy + // is not supported for Spot instances. + Tenancy *string `locationName:"tenancy" type:"string" enum:"Tenancy"` } // String returns the string representation @@ -48635,6 +56587,12 @@ func (s *SpotPlacement) SetGroupName(v string) *SpotPlacement { return s } +// SetTenancy sets the Tenancy field's value. +func (s *SpotPlacement) SetTenancy(v string) *SpotPlacement { + s.Tenancy = &v + return s +} + // Describes the maximum hourly price (bid) for any Spot instance launched to // fulfill the request. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SpotPrice @@ -48940,14 +56898,16 @@ type StateReason struct { // The message for the state change. // - // * Server.SpotInstanceTermination: A Spot instance was terminated due to - // an increase in the market price. + // * Server.InsufficientInstanceCapacity: There was insufficient instance + // capacity to satisfy the launch request. // // * Server.InternalError: An internal error occurred during instance launch, // resulting in termination. // - // * Server.InsufficientInstanceCapacity: There was insufficient instance - // capacity to satisfy the launch request. + // * Server.ScheduledStop: The instance was stopped due to a scheduled retirement. + // + // * Server.SpotInstanceTermination: A Spot Instance was terminated due to + // an increase in the market price. // // * Client.InternalError: A client error caused the instance to terminate // on launch. @@ -48955,6 +56915,9 @@ type StateReason struct { // * Client.InstanceInitiatedShutdown: The instance was shut down using the // shutdown -h command from the instance. // + // * Client.InstanceTerminated: The instance was terminated or rebooted during + // AMI creation. + // // * Client.UserInitiatedShutdown: The instance was shut down using the Amazon // EC2 API. // @@ -49104,6 +57067,40 @@ func (s *Storage) SetS3(v *S3Storage) *Storage { return s } +// Describes a storage location in Amazon S3. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/StorageLocation +type StorageLocation struct { + _ struct{} `type:"structure"` + + // The name of the S3 bucket. + Bucket *string `type:"string"` + + // The key. + Key *string `type:"string"` +} + +// String returns the string representation +func (s StorageLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StorageLocation) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *StorageLocation) SetBucket(v string) *StorageLocation { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *StorageLocation) SetKey(v string) *StorageLocation { + s.Key = &v + return s +} + // Describes a subnet. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/Subnet type Subnet struct { @@ -49391,6 +57388,41 @@ func (s *TagDescription) SetValue(v string) *TagDescription { return s } +// The tags to apply to a resource when the resource is being created. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/TagSpecification +type TagSpecification struct { + _ struct{} `type:"structure"` + + // The type of resource to tag. Currently, the resource types that support tagging + // on creation are instance and volume. + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // The tags to apply to the resource. + Tags []*Tag `locationName:"Tag" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s TagSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagSpecification) GoString() string { + return s.String() +} + +// SetResourceType sets the ResourceType field's value. +func (s *TagSpecification) SetResourceType(v string) *TagSpecification { + s.ResourceType = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagSpecification) SetTags(v []*Tag) *TagSpecification { + s.Tags = v + return s +} + // Information about the Convertible Reserved Instance offering. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/TargetConfiguration type TargetConfiguration struct { @@ -49901,6 +57933,202 @@ func (s *UnsuccessfulItemError) SetMessage(v string) *UnsuccessfulItemError { return s } +// Contains the parameters for UpdateSecurityGroupRuleDescriptionsEgress. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UpdateSecurityGroupRuleDescriptionsEgressRequest +type UpdateSecurityGroupRuleDescriptionsEgressInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the security group. You must specify either the security group + // ID or the security group name in the request. For security groups in a nondefault + // VPC, you must specify the security group ID. + GroupId *string `type:"string"` + + // [Default VPC] The name of the security group. You must specify either the + // security group ID or the security group name in the request. + GroupName *string `type:"string"` + + // The IP permissions for the security group rule. + // + // IpPermissions is a required field + IpPermissions []*IpPermission `locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateSecurityGroupRuleDescriptionsEgressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSecurityGroupRuleDescriptionsEgressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSecurityGroupRuleDescriptionsEgressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSecurityGroupRuleDescriptionsEgressInput"} + if s.IpPermissions == nil { + invalidParams.Add(request.NewErrParamRequired("IpPermissions")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *UpdateSecurityGroupRuleDescriptionsEgressInput) SetDryRun(v bool) *UpdateSecurityGroupRuleDescriptionsEgressInput { + s.DryRun = &v + return s +} + +// SetGroupId sets the GroupId field's value. +func (s *UpdateSecurityGroupRuleDescriptionsEgressInput) SetGroupId(v string) *UpdateSecurityGroupRuleDescriptionsEgressInput { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *UpdateSecurityGroupRuleDescriptionsEgressInput) SetGroupName(v string) *UpdateSecurityGroupRuleDescriptionsEgressInput { + s.GroupName = &v + return s +} + +// SetIpPermissions sets the IpPermissions field's value. +func (s *UpdateSecurityGroupRuleDescriptionsEgressInput) SetIpPermissions(v []*IpPermission) *UpdateSecurityGroupRuleDescriptionsEgressInput { + s.IpPermissions = v + return s +} + +// Contains the output of UpdateSecurityGroupRuleDescriptionsEgress. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UpdateSecurityGroupRuleDescriptionsEgressResult +type UpdateSecurityGroupRuleDescriptionsEgressOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s UpdateSecurityGroupRuleDescriptionsEgressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSecurityGroupRuleDescriptionsEgressOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *UpdateSecurityGroupRuleDescriptionsEgressOutput) SetReturn(v bool) *UpdateSecurityGroupRuleDescriptionsEgressOutput { + s.Return = &v + return s +} + +// Contains the parameters for UpdateSecurityGroupRuleDescriptionsIngress. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UpdateSecurityGroupRuleDescriptionsIngressRequest +type UpdateSecurityGroupRuleDescriptionsIngressInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the security group. You must specify either the security group + // ID or the security group name in the request. For security groups in a nondefault + // VPC, you must specify the security group ID. + GroupId *string `type:"string"` + + // [EC2-Classic, default VPC] The name of the security group. You must specify + // either the security group ID or the security group name in the request. + GroupName *string `type:"string"` + + // The IP permissions for the security group rule. + // + // IpPermissions is a required field + IpPermissions []*IpPermission `locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateSecurityGroupRuleDescriptionsIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSecurityGroupRuleDescriptionsIngressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSecurityGroupRuleDescriptionsIngressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSecurityGroupRuleDescriptionsIngressInput"} + if s.IpPermissions == nil { + invalidParams.Add(request.NewErrParamRequired("IpPermissions")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *UpdateSecurityGroupRuleDescriptionsIngressInput) SetDryRun(v bool) *UpdateSecurityGroupRuleDescriptionsIngressInput { + s.DryRun = &v + return s +} + +// SetGroupId sets the GroupId field's value. +func (s *UpdateSecurityGroupRuleDescriptionsIngressInput) SetGroupId(v string) *UpdateSecurityGroupRuleDescriptionsIngressInput { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *UpdateSecurityGroupRuleDescriptionsIngressInput) SetGroupName(v string) *UpdateSecurityGroupRuleDescriptionsIngressInput { + s.GroupName = &v + return s +} + +// SetIpPermissions sets the IpPermissions field's value. +func (s *UpdateSecurityGroupRuleDescriptionsIngressInput) SetIpPermissions(v []*IpPermission) *UpdateSecurityGroupRuleDescriptionsIngressInput { + s.IpPermissions = v + return s +} + +// Contains the output of UpdateSecurityGroupRuleDescriptionsIngress. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UpdateSecurityGroupRuleDescriptionsIngressResult +type UpdateSecurityGroupRuleDescriptionsIngressOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s UpdateSecurityGroupRuleDescriptionsIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSecurityGroupRuleDescriptionsIngressOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *UpdateSecurityGroupRuleDescriptionsIngressOutput) SetReturn(v bool) *UpdateSecurityGroupRuleDescriptionsIngressOutput { + s.Return = &v + return s +} + // Describes the S3 bucket for the disk image. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/UserBucket type UserBucket struct { @@ -50001,6 +58229,13 @@ func (s *UserData) SetData(v string) *UserData { type UserIdGroupPair struct { _ struct{} `type:"structure"` + // A description for the security group rule that references this user ID group + // pair. + // + // Constraints: Up to 255 characters in length. Allowed characters are a-z, + // A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$* + Description *string `locationName:"description" type:"string"` + // The ID of the security group. GroupId *string `locationName:"groupId" type:"string"` @@ -50036,6 +58271,12 @@ func (s UserIdGroupPair) GoString() string { return s.String() } +// SetDescription sets the Description field's value. +func (s *UserIdGroupPair) SetDescription(v string) *UserIdGroupPair { + s.Description = &v + return s +} + // SetGroupId sets the GroupId field's value. func (s *UserIdGroupPair) SetGroupId(v string) *UserIdGroupPair { s.GroupId = &v @@ -50383,6 +58624,133 @@ func (s *VolumeDetail) SetSize(v int64) *VolumeDetail { return s } +// Describes the modification status of an EBS volume. +// +// If the volume has never been modified, some element values will be null. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VolumeModification +type VolumeModification struct { + _ struct{} `type:"structure"` + + // Modification completion or failure time. + EndTime *time.Time `locationName:"endTime" type:"timestamp" timestampFormat:"iso8601"` + + // Current state of modification. Modification state is null for unmodified + // volumes. + ModificationState *string `locationName:"modificationState" type:"string" enum:"VolumeModificationState"` + + // Original IOPS rate of the volume being modified. + OriginalIops *int64 `locationName:"originalIops" type:"integer"` + + // Original size of the volume being modified. + OriginalSize *int64 `locationName:"originalSize" type:"integer"` + + // Original EBS volume type of the volume being modified. + OriginalVolumeType *string `locationName:"originalVolumeType" type:"string" enum:"VolumeType"` + + // Modification progress from 0 to 100%. + Progress *int64 `locationName:"progress" type:"long"` + + // Modification start time + StartTime *time.Time `locationName:"startTime" type:"timestamp" timestampFormat:"iso8601"` + + // Generic status message on modification progress or failure. + StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Target IOPS rate of the volume being modified. + TargetIops *int64 `locationName:"targetIops" type:"integer"` + + // Target size of the volume being modified. + TargetSize *int64 `locationName:"targetSize" type:"integer"` + + // Target EBS volume type of the volume being modified. + TargetVolumeType *string `locationName:"targetVolumeType" type:"string" enum:"VolumeType"` + + // ID of the volume being modified. + VolumeId *string `locationName:"volumeId" type:"string"` +} + +// String returns the string representation +func (s VolumeModification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeModification) GoString() string { + return s.String() +} + +// SetEndTime sets the EndTime field's value. +func (s *VolumeModification) SetEndTime(v time.Time) *VolumeModification { + s.EndTime = &v + return s +} + +// SetModificationState sets the ModificationState field's value. +func (s *VolumeModification) SetModificationState(v string) *VolumeModification { + s.ModificationState = &v + return s +} + +// SetOriginalIops sets the OriginalIops field's value. +func (s *VolumeModification) SetOriginalIops(v int64) *VolumeModification { + s.OriginalIops = &v + return s +} + +// SetOriginalSize sets the OriginalSize field's value. +func (s *VolumeModification) SetOriginalSize(v int64) *VolumeModification { + s.OriginalSize = &v + return s +} + +// SetOriginalVolumeType sets the OriginalVolumeType field's value. +func (s *VolumeModification) SetOriginalVolumeType(v string) *VolumeModification { + s.OriginalVolumeType = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *VolumeModification) SetProgress(v int64) *VolumeModification { + s.Progress = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *VolumeModification) SetStartTime(v time.Time) *VolumeModification { + s.StartTime = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *VolumeModification) SetStatusMessage(v string) *VolumeModification { + s.StatusMessage = &v + return s +} + +// SetTargetIops sets the TargetIops field's value. +func (s *VolumeModification) SetTargetIops(v int64) *VolumeModification { + s.TargetIops = &v + return s +} + +// SetTargetSize sets the TargetSize field's value. +func (s *VolumeModification) SetTargetSize(v int64) *VolumeModification { + s.TargetSize = &v + return s +} + +// SetTargetVolumeType sets the TargetVolumeType field's value. +func (s *VolumeModification) SetTargetVolumeType(v string) *VolumeModification { + s.TargetVolumeType = &v + return s +} + +// SetVolumeId sets the VolumeId field's value. +func (s *VolumeModification) SetVolumeId(v string) *VolumeModification { + s.VolumeId = &v + return s +} + // Describes a volume status operation code. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VolumeStatusAction type VolumeStatusAction struct { @@ -50630,9 +58998,12 @@ func (s *VolumeStatusItem) SetVolumeStatus(v *VolumeStatusInfo) *VolumeStatusIte type Vpc struct { _ struct{} `type:"structure"` - // The IPv4 CIDR block for the VPC. + // The primary IPv4 CIDR block for the VPC. CidrBlock *string `locationName:"cidrBlock" type:"string"` + // Information about the IPv4 CIDR blocks associated with the VPC. + CidrBlockAssociationSet []*VpcCidrBlockAssociation `locationName:"cidrBlockAssociationSet" locationNameList:"item" type:"list"` + // The ID of the set of DHCP options you've associated with the VPC (or default // if the default options are associated with the VPC). DhcpOptionsId *string `locationName:"dhcpOptionsId" type:"string"` @@ -50672,6 +59043,12 @@ func (s *Vpc) SetCidrBlock(v string) *Vpc { return s } +// SetCidrBlockAssociationSet sets the CidrBlockAssociationSet field's value. +func (s *Vpc) SetCidrBlockAssociationSet(v []*VpcCidrBlockAssociation) *Vpc { + s.CidrBlockAssociationSet = v + return s +} + // SetDhcpOptionsId sets the DhcpOptionsId field's value. func (s *Vpc) SetDhcpOptionsId(v string) *Vpc { s.DhcpOptionsId = &v @@ -50748,6 +59125,49 @@ func (s *VpcAttachment) SetVpcId(v string) *VpcAttachment { return s } +// Describes an IPv4 CIDR block associated with a VPC. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpcCidrBlockAssociation +type VpcCidrBlockAssociation struct { + _ struct{} `type:"structure"` + + // The association ID for the IPv4 CIDR block. + AssociationId *string `locationName:"associationId" type:"string"` + + // The IPv4 CIDR block. + CidrBlock *string `locationName:"cidrBlock" type:"string"` + + // Information about the state of the CIDR block. + CidrBlockState *VpcCidrBlockState `locationName:"cidrBlockState" type:"structure"` +} + +// String returns the string representation +func (s VpcCidrBlockAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcCidrBlockAssociation) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *VpcCidrBlockAssociation) SetAssociationId(v string) *VpcCidrBlockAssociation { + s.AssociationId = &v + return s +} + +// SetCidrBlock sets the CidrBlock field's value. +func (s *VpcCidrBlockAssociation) SetCidrBlock(v string) *VpcCidrBlockAssociation { + s.CidrBlock = &v + return s +} + +// SetCidrBlockState sets the CidrBlockState field's value. +func (s *VpcCidrBlockAssociation) SetCidrBlockState(v *VpcCidrBlockState) *VpcCidrBlockAssociation { + s.CidrBlockState = v + return s +} + // Describes the state of a CIDR block. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpcCidrBlockState type VpcCidrBlockState struct { @@ -50952,15 +59372,15 @@ func (s *VpcIpv6CidrBlockAssociation) SetIpv6CidrBlockState(v *VpcCidrBlockState type VpcPeeringConnection struct { _ struct{} `type:"structure"` - // Information about the accepter VPC. CIDR block information is not returned - // when creating a VPC peering connection, or when describing a VPC peering - // connection that's in the initiating-request or pending-acceptance state. + // Information about the accepter VPC. CIDR block information is only returned + // when describing an active VPC peering connection. AccepterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"accepterVpcInfo" type:"structure"` // The time that an unaccepted VPC peering connection will expire. ExpirationTime *time.Time `locationName:"expirationTime" type:"timestamp" timestampFormat:"iso8601"` - // Information about the requester VPC. + // Information about the requester VPC. CIDR block information is only returned + // when describing an active VPC peering connection. RequesterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"requesterVpcInfo" type:"structure"` // The status of the VPC peering connection. @@ -51107,6 +59527,9 @@ type VpcPeeringConnectionVpcInfo struct { // The IPv4 CIDR block for the VPC. CidrBlock *string `locationName:"cidrBlock" type:"string"` + // Information about the IPv4 CIDR blocks for the VPC. + CidrBlockSet []*CidrBlock `locationName:"cidrBlockSet" locationNameList:"item" type:"list"` + // The IPv6 CIDR block for the VPC. Ipv6CidrBlockSet []*Ipv6CidrBlock `locationName:"ipv6CidrBlockSet" locationNameList:"item" type:"list"` @@ -51137,6 +59560,12 @@ func (s *VpcPeeringConnectionVpcInfo) SetCidrBlock(v string) *VpcPeeringConnecti return s } +// SetCidrBlockSet sets the CidrBlockSet field's value. +func (s *VpcPeeringConnectionVpcInfo) SetCidrBlockSet(v []*CidrBlock) *VpcPeeringConnectionVpcInfo { + s.CidrBlockSet = v + return s +} + // SetIpv6CidrBlockSet sets the Ipv6CidrBlockSet field's value. func (s *VpcPeeringConnectionVpcInfo) SetIpv6CidrBlockSet(v []*Ipv6CidrBlock) *VpcPeeringConnectionVpcInfo { s.Ipv6CidrBlockSet = v @@ -51166,6 +59595,12 @@ func (s *VpcPeeringConnectionVpcInfo) SetVpcId(v string) *VpcPeeringConnectionVp type VpnConnection struct { _ struct{} `type:"structure"` + // The category of the VPN connection. A value of VPN indicates an AWS VPN connection. + // A value of VPN-Classic indicates an AWS Classic VPN connection. For more + // information, see AWS Managed VPN Categories (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html#vpn-categories) + // in the Amazon Virtual Private Cloud User Guide. + Category *string `locationName:"category" type:"string"` + // The configuration information for the VPN connection's customer gateway (in // the native XML format). This element is always present in the CreateVpnConnection // response; however, it's present in the DescribeVpnConnections response only @@ -51210,6 +59645,12 @@ func (s VpnConnection) GoString() string { return s.String() } +// SetCategory sets the Category field's value. +func (s *VpnConnection) SetCategory(v string) *VpnConnection { + s.Category = &v + return s +} + // SetCustomerGatewayConfiguration sets the CustomerGatewayConfiguration field's value. func (s *VpnConnection) SetCustomerGatewayConfiguration(v string) *VpnConnection { s.CustomerGatewayConfiguration = &v @@ -51301,9 +59742,15 @@ func (s *VpnConnectionOptions) SetStaticRoutesOnly(v bool) *VpnConnectionOptions type VpnConnectionOptionsSpecification struct { _ struct{} `type:"structure"` - // Indicates whether the VPN connection uses static routes only. Static routes - // must be used for devices that don't support BGP. + // Indicate whether the VPN connection uses static routes only. If you are creating + // a VPN connection for a device that does not support BGP, you must specify + // true. + // + // Default: false StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` + + // The tunnel options for the VPN connection. + TunnelOptions []*VpnTunnelOptionsSpecification `locationNameList:"item" type:"list"` } // String returns the string representation @@ -51322,6 +59769,12 @@ func (s *VpnConnectionOptionsSpecification) SetStaticRoutesOnly(v bool) *VpnConn return s } +// SetTunnelOptions sets the TunnelOptions field's value. +func (s *VpnConnectionOptionsSpecification) SetTunnelOptions(v []*VpnTunnelOptionsSpecification) *VpnConnectionOptionsSpecification { + s.TunnelOptions = v + return s +} + // Describes a virtual private gateway. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpnGateway type VpnGateway struct { @@ -51436,6 +59889,63 @@ func (s *VpnStaticRoute) SetState(v string) *VpnStaticRoute { return s } +// The tunnel options for a VPN connection. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpnTunnelOptionsSpecification +type VpnTunnelOptionsSpecification struct { + _ struct{} `type:"structure"` + + // The pre-shared key (PSK) to establish initial authentication between the + // virtual private gateway and customer gateway. + // + // Constraints: Allowed characters are alphanumeric characters and ._. Must + // be between 8 and 64 characters in length and cannot start with zero (0). + PreSharedKey *string `type:"string"` + + // The range of inside IP addresses for the tunnel. Any specified CIDR blocks + // must be unique across all VPN connections that use the same virtual private + // gateway. + // + // Constraints: A size /30 CIDR block from the 169.254.0.0/16 range. The following + // CIDR blocks are reserved and cannot be used: + // + // * 169.254.0.0/30 + // + // * 169.254.1.0/30 + // + // * 169.254.2.0/30 + // + // * 169.254.3.0/30 + // + // * 169.254.4.0/30 + // + // * 169.254.5.0/30 + // + // * 169.254.169.252/30 + TunnelInsideCidr *string `type:"string"` +} + +// String returns the string representation +func (s VpnTunnelOptionsSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnTunnelOptionsSpecification) GoString() string { + return s.String() +} + +// SetPreSharedKey sets the PreSharedKey field's value. +func (s *VpnTunnelOptionsSpecification) SetPreSharedKey(v string) *VpnTunnelOptionsSpecification { + s.PreSharedKey = &v + return s +} + +// SetTunnelInsideCidr sets the TunnelInsideCidr field's value. +func (s *VpnTunnelOptionsSpecification) SetTunnelInsideCidr(v string) *VpnTunnelOptionsSpecification { + s.TunnelInsideCidr = &v + return s +} + const ( // AccountAttributeNameSupportedPlatforms is a AccountAttributeName enum value AccountAttributeNameSupportedPlatforms = "supported-platforms" @@ -51671,6 +60181,19 @@ const ( DomainTypeStandard = "standard" ) +const ( + // ElasticGpuStateAttached is a ElasticGpuState enum value + ElasticGpuStateAttached = "ATTACHED" +) + +const ( + // ElasticGpuStatusOk is a ElasticGpuStatus enum value + ElasticGpuStatusOk = "OK" + + // ElasticGpuStatusImpaired is a ElasticGpuStatus enum value + ElasticGpuStatusImpaired = "IMPAIRED" +) + const ( // EventCodeInstanceReboot is a EventCode enum value EventCodeInstanceReboot = "instance-reboot" @@ -51751,6 +60274,34 @@ const ( FlowLogsResourceTypeNetworkInterface = "NetworkInterface" ) +const ( + // FpgaImageAttributeNameDescription is a FpgaImageAttributeName enum value + FpgaImageAttributeNameDescription = "description" + + // FpgaImageAttributeNameName is a FpgaImageAttributeName enum value + FpgaImageAttributeNameName = "name" + + // FpgaImageAttributeNameLoadPermission is a FpgaImageAttributeName enum value + FpgaImageAttributeNameLoadPermission = "loadPermission" + + // FpgaImageAttributeNameProductCodes is a FpgaImageAttributeName enum value + FpgaImageAttributeNameProductCodes = "productCodes" +) + +const ( + // FpgaImageStateCodePending is a FpgaImageStateCode enum value + FpgaImageStateCodePending = "pending" + + // FpgaImageStateCodeFailed is a FpgaImageStateCode enum value + FpgaImageStateCodeFailed = "failed" + + // FpgaImageStateCodeAvailable is a FpgaImageStateCode enum value + FpgaImageStateCodeAvailable = "available" + + // FpgaImageStateCodeUnavailable is a FpgaImageStateCode enum value + FpgaImageStateCodeUnavailable = "unavailable" +) + const ( // GatewayTypeIpsec1 is a GatewayType enum value GatewayTypeIpsec1 = "ipsec.1" @@ -51772,6 +60323,20 @@ const ( HypervisorTypeXen = "xen" ) +const ( + // IamInstanceProfileAssociationStateAssociating is a IamInstanceProfileAssociationState enum value + IamInstanceProfileAssociationStateAssociating = "associating" + + // IamInstanceProfileAssociationStateAssociated is a IamInstanceProfileAssociationState enum value + IamInstanceProfileAssociationStateAssociated = "associated" + + // IamInstanceProfileAssociationStateDisassociating is a IamInstanceProfileAssociationState enum value + IamInstanceProfileAssociationStateDisassociating = "disassociating" + + // IamInstanceProfileAssociationStateDisassociated is a IamInstanceProfileAssociationState enum value + IamInstanceProfileAssociationStateDisassociated = "disassociated" +) + const ( // ImageAttributeNameDescription is a ImageAttributeName enum value ImageAttributeNameDescription = "description" @@ -51873,6 +60438,22 @@ const ( InstanceAttributeNameEnaSupport = "enaSupport" ) +const ( + // InstanceHealthStatusHealthy is a InstanceHealthStatus enum value + InstanceHealthStatusHealthy = "healthy" + + // InstanceHealthStatusUnhealthy is a InstanceHealthStatus enum value + InstanceHealthStatusUnhealthy = "unhealthy" +) + +const ( + // InstanceInterruptionBehaviorStop is a InstanceInterruptionBehavior enum value + InstanceInterruptionBehaviorStop = "stop" + + // InstanceInterruptionBehaviorTerminate is a InstanceInterruptionBehavior enum value + InstanceInterruptionBehaviorTerminate = "terminate" +) + const ( // InstanceLifecycleTypeSpot is a InstanceLifecycleType enum value InstanceLifecycleTypeSpot = "spot" @@ -52019,6 +60600,9 @@ const ( // InstanceTypeX132xlarge is a InstanceType enum value InstanceTypeX132xlarge = "x1.32xlarge" + // InstanceTypeX1e32xlarge is a InstanceType enum value + InstanceTypeX1e32xlarge = "x1e.32xlarge" + // InstanceTypeI2Xlarge is a InstanceType enum value InstanceTypeI2Xlarge = "i2.xlarge" @@ -52031,6 +60615,24 @@ const ( // InstanceTypeI28xlarge is a InstanceType enum value InstanceTypeI28xlarge = "i2.8xlarge" + // InstanceTypeI3Large is a InstanceType enum value + InstanceTypeI3Large = "i3.large" + + // InstanceTypeI3Xlarge is a InstanceType enum value + InstanceTypeI3Xlarge = "i3.xlarge" + + // InstanceTypeI32xlarge is a InstanceType enum value + InstanceTypeI32xlarge = "i3.2xlarge" + + // InstanceTypeI34xlarge is a InstanceType enum value + InstanceTypeI34xlarge = "i3.4xlarge" + + // InstanceTypeI38xlarge is a InstanceType enum value + InstanceTypeI38xlarge = "i3.8xlarge" + + // InstanceTypeI316xlarge is a InstanceType enum value + InstanceTypeI316xlarge = "i3.16xlarge" + // InstanceTypeHi14xlarge is a InstanceType enum value InstanceTypeHi14xlarge = "hi1.4xlarge" @@ -52085,6 +60687,15 @@ const ( // InstanceTypeG28xlarge is a InstanceType enum value InstanceTypeG28xlarge = "g2.8xlarge" + // InstanceTypeG34xlarge is a InstanceType enum value + InstanceTypeG34xlarge = "g3.4xlarge" + + // InstanceTypeG38xlarge is a InstanceType enum value + InstanceTypeG38xlarge = "g3.8xlarge" + + // InstanceTypeG316xlarge is a InstanceType enum value + InstanceTypeG316xlarge = "g3.16xlarge" + // InstanceTypeCg14xlarge is a InstanceType enum value InstanceTypeCg14xlarge = "cg1.4xlarge" @@ -52116,6 +60727,14 @@ const ( InstanceTypeF116xlarge = "f1.16xlarge" ) +const ( + // InterfacePermissionTypeInstanceAttach is a InterfacePermissionType enum value + InterfacePermissionTypeInstanceAttach = "INSTANCE-ATTACH" + + // InterfacePermissionTypeEipAssociate is a InterfacePermissionType enum value + InterfacePermissionTypeEipAssociate = "EIP-ASSOCIATE" +) + const ( // ListingStateAvailable is a ListingState enum value ListingStateAvailable = "available" @@ -52197,6 +60816,20 @@ const ( NetworkInterfaceAttributeAttachment = "attachment" ) +const ( + // NetworkInterfacePermissionStateCodePending is a NetworkInterfacePermissionStateCode enum value + NetworkInterfacePermissionStateCodePending = "pending" + + // NetworkInterfacePermissionStateCodeGranted is a NetworkInterfacePermissionStateCode enum value + NetworkInterfacePermissionStateCodeGranted = "granted" + + // NetworkInterfacePermissionStateCodeRevoking is a NetworkInterfacePermissionStateCode enum value + NetworkInterfacePermissionStateCodeRevoking = "revoking" + + // NetworkInterfacePermissionStateCodeRevoked is a NetworkInterfacePermissionStateCode enum value + NetworkInterfacePermissionStateCodeRevoked = "revoked" +) + const ( // NetworkInterfaceStatusAvailable is a NetworkInterfaceStatus enum value NetworkInterfaceStatusAvailable = "available" @@ -52387,6 +61020,11 @@ const ( ReservedInstanceStateRetired = "retired" ) +const ( + // ResetFpgaImageAttributeNameLoadPermission is a ResetFpgaImageAttributeName enum value + ResetFpgaImageAttributeNameLoadPermission = "loadPermission" +) + const ( // ResetImageAttributeNameLaunchPermission is a ResetImageAttributeName enum value ResetImageAttributeNameLaunchPermission = "launchPermission" @@ -52673,6 +61311,20 @@ const ( VolumeAttributeNameProductCodes = "productCodes" ) +const ( + // VolumeModificationStateModifying is a VolumeModificationState enum value + VolumeModificationStateModifying = "modifying" + + // VolumeModificationStateOptimizing is a VolumeModificationState enum value + VolumeModificationStateOptimizing = "optimizing" + + // VolumeModificationStateCompleted is a VolumeModificationState enum value + VolumeModificationStateCompleted = "completed" + + // VolumeModificationStateFailed is a VolumeModificationState enum value + VolumeModificationStateFailed = "failed" +) + const ( // VolumeStateCreating is a VolumeState enum value VolumeStateCreating = "creating" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go new file mode 100644 index 000000000000..5471677829cb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go @@ -0,0 +1,31 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ec2 provides the client and types for making API +// requests to Amazon Elastic Compute Cloud. +// +// Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity +// in the Amazon Web Services (AWS) cloud. Using Amazon EC2 eliminates your +// need to invest in hardware up front, so you can develop and deploy applications +// faster. +// +// See https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15 for more information on this service. +// +// See ec2 package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ec2/ +// +// Using the Client +// +// To Amazon Elastic Compute Cloud with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Elastic Compute Cloud client EC2 for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ec2/#New +package ec2 diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/errors.go new file mode 100644 index 000000000000..3d61d7e357e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/errors.go @@ -0,0 +1,3 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ec2 diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go index c289b5b04dfc..ba4433d388eb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go @@ -1,4 +1,4 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package ec2 @@ -11,13 +11,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/ec2query" ) -// Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity -// in the Amazon Web Services (AWS) cloud. Using Amazon EC2 eliminates your -// need to invest in hardware up front, so you can develop and deploy applications -// faster. -// The service client's operations are safe to be used concurrently. -// It is not safe to mutate any of the client's properties though. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15 +// EC2 provides the API operation methods for making requests to +// Amazon Elastic Compute Cloud. See this package's package overview docs +// for details on the service. +// +// EC2 methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. type EC2 struct { *client.Client } diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go index 94fab6d847b9..0469f0f01a16 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go @@ -1,1027 +1,1626 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package ec2 import ( - "github.com/aws/aws-sdk-go/private/waiter" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" ) // WaitUntilBundleTaskComplete uses the Amazon EC2 API operation // DescribeBundleTasks to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilBundleTaskComplete(input *DescribeBundleTasksInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeBundleTasks", - Delay: 15, + return c.WaitUntilBundleTaskCompleteWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBundleTaskCompleteWithContext is an extended version of WaitUntilBundleTaskComplete. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilBundleTaskCompleteWithContext(ctx aws.Context, input *DescribeBundleTasksInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBundleTaskComplete", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "BundleTasks[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "BundleTasks[].State", Expected: "complete", }, { - State: "failure", - Matcher: "pathAny", - Argument: "BundleTasks[].State", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "BundleTasks[].State", Expected: "failed", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeBundleTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeBundleTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilConversionTaskCancelled uses the Amazon EC2 API operation // DescribeConversionTasks to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilConversionTaskCancelled(input *DescribeConversionTasksInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeConversionTasks", - Delay: 15, + return c.WaitUntilConversionTaskCancelledWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilConversionTaskCancelledWithContext is an extended version of WaitUntilConversionTaskCancelled. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilConversionTaskCancelledWithContext(ctx aws.Context, input *DescribeConversionTasksInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilConversionTaskCancelled", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "ConversionTasks[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "ConversionTasks[].State", Expected: "cancelled", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeConversionTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeConversionTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilConversionTaskCompleted uses the Amazon EC2 API operation // DescribeConversionTasks to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilConversionTaskCompleted(input *DescribeConversionTasksInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeConversionTasks", - Delay: 15, + return c.WaitUntilConversionTaskCompletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilConversionTaskCompletedWithContext is an extended version of WaitUntilConversionTaskCompleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilConversionTaskCompletedWithContext(ctx aws.Context, input *DescribeConversionTasksInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilConversionTaskCompleted", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "ConversionTasks[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "ConversionTasks[].State", Expected: "completed", }, { - State: "failure", - Matcher: "pathAny", - Argument: "ConversionTasks[].State", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "ConversionTasks[].State", Expected: "cancelled", }, { - State: "failure", - Matcher: "pathAny", - Argument: "ConversionTasks[].State", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "ConversionTasks[].State", Expected: "cancelling", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeConversionTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeConversionTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilConversionTaskDeleted uses the Amazon EC2 API operation // DescribeConversionTasks to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilConversionTaskDeleted(input *DescribeConversionTasksInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeConversionTasks", - Delay: 15, + return c.WaitUntilConversionTaskDeletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilConversionTaskDeletedWithContext is an extended version of WaitUntilConversionTaskDeleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilConversionTaskDeletedWithContext(ctx aws.Context, input *DescribeConversionTasksInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilConversionTaskDeleted", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "ConversionTasks[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "ConversionTasks[].State", Expected: "deleted", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeConversionTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeConversionTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilCustomerGatewayAvailable uses the Amazon EC2 API operation // DescribeCustomerGateways to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilCustomerGatewayAvailable(input *DescribeCustomerGatewaysInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeCustomerGateways", - Delay: 15, + return c.WaitUntilCustomerGatewayAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilCustomerGatewayAvailableWithContext is an extended version of WaitUntilCustomerGatewayAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilCustomerGatewayAvailableWithContext(ctx aws.Context, input *DescribeCustomerGatewaysInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilCustomerGatewayAvailable", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "CustomerGateways[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "CustomerGateways[].State", Expected: "available", }, { - State: "failure", - Matcher: "pathAny", - Argument: "CustomerGateways[].State", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "CustomerGateways[].State", Expected: "deleted", }, { - State: "failure", - Matcher: "pathAny", - Argument: "CustomerGateways[].State", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "CustomerGateways[].State", Expected: "deleting", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeCustomerGatewaysInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeCustomerGatewaysRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilExportTaskCancelled uses the Amazon EC2 API operation // DescribeExportTasks to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilExportTaskCancelled(input *DescribeExportTasksInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeExportTasks", - Delay: 15, + return c.WaitUntilExportTaskCancelledWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilExportTaskCancelledWithContext is an extended version of WaitUntilExportTaskCancelled. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilExportTaskCancelledWithContext(ctx aws.Context, input *DescribeExportTasksInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilExportTaskCancelled", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "ExportTasks[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "ExportTasks[].State", Expected: "cancelled", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeExportTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeExportTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilExportTaskCompleted uses the Amazon EC2 API operation // DescribeExportTasks to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilExportTaskCompleted(input *DescribeExportTasksInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeExportTasks", - Delay: 15, + return c.WaitUntilExportTaskCompletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilExportTaskCompletedWithContext is an extended version of WaitUntilExportTaskCompleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilExportTaskCompletedWithContext(ctx aws.Context, input *DescribeExportTasksInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilExportTaskCompleted", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "ExportTasks[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "ExportTasks[].State", Expected: "completed", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeExportTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeExportTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilImageAvailable uses the Amazon EC2 API operation // DescribeImages to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilImageAvailable(input *DescribeImagesInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeImages", - Delay: 15, + return c.WaitUntilImageAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilImageAvailableWithContext is an extended version of WaitUntilImageAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilImageAvailableWithContext(ctx aws.Context, input *DescribeImagesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilImageAvailable", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "Images[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Images[].State", Expected: "available", }, { - State: "failure", - Matcher: "pathAny", - Argument: "Images[].State", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Images[].State", Expected: "failed", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeImagesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeImagesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilImageExists uses the Amazon EC2 API operation // DescribeImages to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilImageExists(input *DescribeImagesInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeImages", - Delay: 15, + return c.WaitUntilImageExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilImageExistsWithContext is an extended version of WaitUntilImageExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilImageExistsWithContext(ctx aws.Context, input *DescribeImagesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilImageExists", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "path", - Argument: "length(Images[]) > `0`", + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "length(Images[]) > `0`", Expected: true, }, { - State: "retry", - Matcher: "error", - Argument: "", + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, Expected: "InvalidAMIID.NotFound", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeImagesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeImagesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilInstanceExists uses the Amazon EC2 API operation // DescribeInstances to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilInstanceExists(input *DescribeInstancesInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeInstances", - Delay: 5, + return c.WaitUntilInstanceExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilInstanceExistsWithContext is an extended version of WaitUntilInstanceExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilInstanceExistsWithContext(ctx aws.Context, input *DescribeInstancesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilInstanceExists", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "path", - Argument: "length(Reservations[]) > `0`", + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "length(Reservations[]) > `0`", Expected: true, }, { - State: "retry", - Matcher: "error", - Argument: "", + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, Expected: "InvalidInstanceID.NotFound", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeInstancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilInstanceRunning uses the Amazon EC2 API operation // DescribeInstances to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilInstanceRunning(input *DescribeInstancesInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeInstances", - Delay: 15, + return c.WaitUntilInstanceRunningWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilInstanceRunningWithContext is an extended version of WaitUntilInstanceRunning. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilInstanceRunningWithContext(ctx aws.Context, input *DescribeInstancesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilInstanceRunning", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "Reservations[].Instances[].State.Name", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Reservations[].Instances[].State.Name", Expected: "running", }, { - State: "failure", - Matcher: "pathAny", - Argument: "Reservations[].Instances[].State.Name", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Reservations[].Instances[].State.Name", Expected: "shutting-down", }, { - State: "failure", - Matcher: "pathAny", - Argument: "Reservations[].Instances[].State.Name", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Reservations[].Instances[].State.Name", Expected: "terminated", }, { - State: "failure", - Matcher: "pathAny", - Argument: "Reservations[].Instances[].State.Name", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Reservations[].Instances[].State.Name", Expected: "stopping", }, { - State: "retry", - Matcher: "error", - Argument: "", + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, Expected: "InvalidInstanceID.NotFound", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeInstancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilInstanceStatusOk uses the Amazon EC2 API operation // DescribeInstanceStatus to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilInstanceStatusOk(input *DescribeInstanceStatusInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeInstanceStatus", - Delay: 15, + return c.WaitUntilInstanceStatusOkWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilInstanceStatusOkWithContext is an extended version of WaitUntilInstanceStatusOk. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilInstanceStatusOkWithContext(ctx aws.Context, input *DescribeInstanceStatusInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilInstanceStatusOk", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "InstanceStatuses[].InstanceStatus.Status", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "InstanceStatuses[].InstanceStatus.Status", Expected: "ok", }, { - State: "retry", - Matcher: "error", - Argument: "", + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, Expected: "InvalidInstanceID.NotFound", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeInstanceStatusInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstanceStatusRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilInstanceStopped uses the Amazon EC2 API operation // DescribeInstances to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilInstanceStopped(input *DescribeInstancesInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeInstances", - Delay: 15, + return c.WaitUntilInstanceStoppedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilInstanceStoppedWithContext is an extended version of WaitUntilInstanceStopped. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilInstanceStoppedWithContext(ctx aws.Context, input *DescribeInstancesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilInstanceStopped", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "Reservations[].Instances[].State.Name", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Reservations[].Instances[].State.Name", Expected: "stopped", }, { - State: "failure", - Matcher: "pathAny", - Argument: "Reservations[].Instances[].State.Name", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Reservations[].Instances[].State.Name", Expected: "pending", }, { - State: "failure", - Matcher: "pathAny", - Argument: "Reservations[].Instances[].State.Name", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Reservations[].Instances[].State.Name", Expected: "terminated", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeInstancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilInstanceTerminated uses the Amazon EC2 API operation // DescribeInstances to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilInstanceTerminated(input *DescribeInstancesInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeInstances", - Delay: 15, + return c.WaitUntilInstanceTerminatedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilInstanceTerminatedWithContext is an extended version of WaitUntilInstanceTerminated. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilInstanceTerminatedWithContext(ctx aws.Context, input *DescribeInstancesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilInstanceTerminated", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "Reservations[].Instances[].State.Name", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Reservations[].Instances[].State.Name", Expected: "terminated", }, { - State: "failure", - Matcher: "pathAny", - Argument: "Reservations[].Instances[].State.Name", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Reservations[].Instances[].State.Name", Expected: "pending", }, { - State: "failure", - Matcher: "pathAny", - Argument: "Reservations[].Instances[].State.Name", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Reservations[].Instances[].State.Name", Expected: "stopping", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeInstancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilKeyPairExists uses the Amazon EC2 API operation // DescribeKeyPairs to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilKeyPairExists(input *DescribeKeyPairsInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeKeyPairs", - Delay: 5, + return c.WaitUntilKeyPairExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilKeyPairExistsWithContext is an extended version of WaitUntilKeyPairExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilKeyPairExistsWithContext(ctx aws.Context, input *DescribeKeyPairsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilKeyPairExists", MaxAttempts: 6, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "length(KeyPairs[].KeyName) > `0`", + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "length(KeyPairs[].KeyName) > `0`", Expected: true, }, { - State: "retry", - Matcher: "error", - Argument: "", + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, Expected: "InvalidKeyPair.NotFound", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeKeyPairsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeKeyPairsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilNatGatewayAvailable uses the Amazon EC2 API operation // DescribeNatGateways to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilNatGatewayAvailable(input *DescribeNatGatewaysInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeNatGateways", - Delay: 15, + return c.WaitUntilNatGatewayAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilNatGatewayAvailableWithContext is an extended version of WaitUntilNatGatewayAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilNatGatewayAvailableWithContext(ctx aws.Context, input *DescribeNatGatewaysInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilNatGatewayAvailable", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "NatGateways[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "NatGateways[].State", Expected: "available", }, { - State: "failure", - Matcher: "pathAny", - Argument: "NatGateways[].State", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "NatGateways[].State", Expected: "failed", }, { - State: "failure", - Matcher: "pathAny", - Argument: "NatGateways[].State", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "NatGateways[].State", Expected: "deleting", }, { - State: "failure", - Matcher: "pathAny", - Argument: "NatGateways[].State", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "NatGateways[].State", Expected: "deleted", }, { - State: "retry", - Matcher: "error", - Argument: "", + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, Expected: "NatGatewayNotFound", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeNatGatewaysInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeNatGatewaysRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilNetworkInterfaceAvailable uses the Amazon EC2 API operation // DescribeNetworkInterfaces to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilNetworkInterfaceAvailable(input *DescribeNetworkInterfacesInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeNetworkInterfaces", - Delay: 20, + return c.WaitUntilNetworkInterfaceAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilNetworkInterfaceAvailableWithContext is an extended version of WaitUntilNetworkInterfaceAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilNetworkInterfaceAvailableWithContext(ctx aws.Context, input *DescribeNetworkInterfacesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilNetworkInterfaceAvailable", MaxAttempts: 10, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(20 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "NetworkInterfaces[].Status", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "NetworkInterfaces[].Status", Expected: "available", }, { - State: "failure", - Matcher: "error", - Argument: "", + State: request.FailureWaiterState, + Matcher: request.ErrorWaiterMatch, Expected: "InvalidNetworkInterfaceID.NotFound", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeNetworkInterfacesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeNetworkInterfacesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilPasswordDataAvailable uses the Amazon EC2 API operation // GetPasswordData to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilPasswordDataAvailable(input *GetPasswordDataInput) error { - waiterCfg := waiter.Config{ - Operation: "GetPasswordData", - Delay: 15, + return c.WaitUntilPasswordDataAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilPasswordDataAvailableWithContext is an extended version of WaitUntilPasswordDataAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilPasswordDataAvailableWithContext(ctx aws.Context, input *GetPasswordDataInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilPasswordDataAvailable", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "path", - Argument: "length(PasswordData) > `0`", + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "length(PasswordData) > `0`", Expected: true, }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *GetPasswordDataInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetPasswordDataRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilSnapshotCompleted uses the Amazon EC2 API operation // DescribeSnapshots to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilSnapshotCompleted(input *DescribeSnapshotsInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeSnapshots", - Delay: 15, + return c.WaitUntilSnapshotCompletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilSnapshotCompletedWithContext is an extended version of WaitUntilSnapshotCompleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilSnapshotCompletedWithContext(ctx aws.Context, input *DescribeSnapshotsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilSnapshotCompleted", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "Snapshots[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Snapshots[].State", Expected: "completed", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeSnapshotsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSnapshotsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilSpotInstanceRequestFulfilled uses the Amazon EC2 API operation // DescribeSpotInstanceRequests to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilSpotInstanceRequestFulfilled(input *DescribeSpotInstanceRequestsInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeSpotInstanceRequests", - Delay: 15, + return c.WaitUntilSpotInstanceRequestFulfilledWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilSpotInstanceRequestFulfilledWithContext is an extended version of WaitUntilSpotInstanceRequestFulfilled. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilSpotInstanceRequestFulfilledWithContext(ctx aws.Context, input *DescribeSpotInstanceRequestsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilSpotInstanceRequestFulfilled", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "SpotInstanceRequests[].Status.Code", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code", Expected: "fulfilled", }, { - State: "failure", - Matcher: "pathAny", - Argument: "SpotInstanceRequests[].Status.Code", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code", + Expected: "request-canceled-and-instance-running", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code", Expected: "schedule-expired", }, { - State: "failure", - Matcher: "pathAny", - Argument: "SpotInstanceRequests[].Status.Code", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code", Expected: "canceled-before-fulfillment", }, { - State: "failure", - Matcher: "pathAny", - Argument: "SpotInstanceRequests[].Status.Code", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code", Expected: "bad-parameters", }, { - State: "failure", - Matcher: "pathAny", - Argument: "SpotInstanceRequests[].Status.Code", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code", Expected: "system-error", }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "InvalidSpotInstanceRequestID.NotFound", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeSpotInstanceRequestsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSpotInstanceRequestsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilSubnetAvailable uses the Amazon EC2 API operation // DescribeSubnets to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilSubnetAvailable(input *DescribeSubnetsInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeSubnets", - Delay: 15, + return c.WaitUntilSubnetAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilSubnetAvailableWithContext is an extended version of WaitUntilSubnetAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilSubnetAvailableWithContext(ctx aws.Context, input *DescribeSubnetsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilSubnetAvailable", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "Subnets[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Subnets[].State", Expected: "available", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeSubnetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSubnetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilSystemStatusOk uses the Amazon EC2 API operation // DescribeInstanceStatus to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilSystemStatusOk(input *DescribeInstanceStatusInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeInstanceStatus", - Delay: 15, + return c.WaitUntilSystemStatusOkWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilSystemStatusOkWithContext is an extended version of WaitUntilSystemStatusOk. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilSystemStatusOkWithContext(ctx aws.Context, input *DescribeInstanceStatusInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilSystemStatusOk", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "InstanceStatuses[].SystemStatus.Status", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "InstanceStatuses[].SystemStatus.Status", Expected: "ok", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeInstanceStatusInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstanceStatusRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilVolumeAvailable uses the Amazon EC2 API operation // DescribeVolumes to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilVolumeAvailable(input *DescribeVolumesInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeVolumes", - Delay: 15, + return c.WaitUntilVolumeAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilVolumeAvailableWithContext is an extended version of WaitUntilVolumeAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilVolumeAvailableWithContext(ctx aws.Context, input *DescribeVolumesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilVolumeAvailable", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "Volumes[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Volumes[].State", Expected: "available", }, { - State: "failure", - Matcher: "pathAny", - Argument: "Volumes[].State", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Volumes[].State", Expected: "deleted", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeVolumesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVolumesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilVolumeDeleted uses the Amazon EC2 API operation // DescribeVolumes to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilVolumeDeleted(input *DescribeVolumesInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeVolumes", - Delay: 15, + return c.WaitUntilVolumeDeletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilVolumeDeletedWithContext is an extended version of WaitUntilVolumeDeleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilVolumeDeletedWithContext(ctx aws.Context, input *DescribeVolumesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilVolumeDeleted", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "Volumes[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Volumes[].State", Expected: "deleted", }, { - State: "success", - Matcher: "error", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.ErrorWaiterMatch, Expected: "InvalidVolume.NotFound", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeVolumesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVolumesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilVolumeInUse uses the Amazon EC2 API operation // DescribeVolumes to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilVolumeInUse(input *DescribeVolumesInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeVolumes", - Delay: 15, + return c.WaitUntilVolumeInUseWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilVolumeInUseWithContext is an extended version of WaitUntilVolumeInUse. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilVolumeInUseWithContext(ctx aws.Context, input *DescribeVolumesInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilVolumeInUse", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "Volumes[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Volumes[].State", Expected: "in-use", }, { - State: "failure", - Matcher: "pathAny", - Argument: "Volumes[].State", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Volumes[].State", Expected: "deleted", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeVolumesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVolumesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilVpcAvailable uses the Amazon EC2 API operation // DescribeVpcs to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilVpcAvailable(input *DescribeVpcsInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeVpcs", - Delay: 15, + return c.WaitUntilVpcAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilVpcAvailableWithContext is an extended version of WaitUntilVpcAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilVpcAvailableWithContext(ctx aws.Context, input *DescribeVpcsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilVpcAvailable", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "Vpcs[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Vpcs[].State", Expected: "available", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeVpcsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpcsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilVpcExists uses the Amazon EC2 API operation // DescribeVpcs to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilVpcExists(input *DescribeVpcsInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeVpcs", - Delay: 1, + return c.WaitUntilVpcExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilVpcExistsWithContext is an extended version of WaitUntilVpcExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilVpcExistsWithContext(ctx aws.Context, input *DescribeVpcsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilVpcExists", MaxAttempts: 5, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(1 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 200, }, { - State: "retry", - Matcher: "error", - Argument: "", + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, Expected: "InvalidVpcID.NotFound", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeVpcsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpcsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, +// WaitUntilVpcPeeringConnectionDeleted uses the Amazon EC2 API operation +// DescribeVpcPeeringConnections to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *EC2) WaitUntilVpcPeeringConnectionDeleted(input *DescribeVpcPeeringConnectionsInput) error { + return c.WaitUntilVpcPeeringConnectionDeletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilVpcPeeringConnectionDeletedWithContext is an extended version of WaitUntilVpcPeeringConnectionDeleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilVpcPeeringConnectionDeletedWithContext(ctx aws.Context, input *DescribeVpcPeeringConnectionsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilVpcPeeringConnectionDeleted", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "VpcPeeringConnections[].Status.Code", + Expected: "deleted", + }, + { + State: request.SuccessWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "InvalidVpcPeeringConnectionID.NotFound", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeVpcPeeringConnectionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpcPeeringConnectionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } - return w.Wait() + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) } // WaitUntilVpcPeeringConnectionExists uses the Amazon EC2 API operation // DescribeVpcPeeringConnections to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilVpcPeeringConnectionExists(input *DescribeVpcPeeringConnectionsInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeVpcPeeringConnections", - Delay: 15, + return c.WaitUntilVpcPeeringConnectionExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilVpcPeeringConnectionExistsWithContext is an extended version of WaitUntilVpcPeeringConnectionExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilVpcPeeringConnectionExistsWithContext(ctx aws.Context, input *DescribeVpcPeeringConnectionsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilVpcPeeringConnectionExists", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 200, }, { - State: "retry", - Matcher: "error", - Argument: "", + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, Expected: "InvalidVpcPeeringConnectionID.NotFound", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeVpcPeeringConnectionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpcPeeringConnectionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilVpnConnectionAvailable uses the Amazon EC2 API operation // DescribeVpnConnections to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilVpnConnectionAvailable(input *DescribeVpnConnectionsInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeVpnConnections", - Delay: 15, + return c.WaitUntilVpnConnectionAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilVpnConnectionAvailableWithContext is an extended version of WaitUntilVpnConnectionAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilVpnConnectionAvailableWithContext(ctx aws.Context, input *DescribeVpnConnectionsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilVpnConnectionAvailable", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "VpnConnections[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "VpnConnections[].State", Expected: "available", }, { - State: "failure", - Matcher: "pathAny", - Argument: "VpnConnections[].State", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "VpnConnections[].State", Expected: "deleting", }, { - State: "failure", - Matcher: "pathAny", - Argument: "VpnConnections[].State", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "VpnConnections[].State", Expected: "deleted", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeVpnConnectionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpnConnectionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilVpnConnectionDeleted uses the Amazon EC2 API operation // DescribeVpnConnections to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *EC2) WaitUntilVpnConnectionDeleted(input *DescribeVpnConnectionsInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeVpnConnections", - Delay: 15, + return c.WaitUntilVpnConnectionDeletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilVpnConnectionDeletedWithContext is an extended version of WaitUntilVpnConnectionDeleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) WaitUntilVpnConnectionDeletedWithContext(ctx aws.Context, input *DescribeVpnConnectionsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilVpnConnectionDeleted", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "VpnConnections[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "VpnConnections[].State", Expected: "deleted", }, { - State: "failure", - Matcher: "pathAny", - Argument: "VpnConnections[].State", + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "VpnConnections[].State", Expected: "pending", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeVpnConnectionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeVpnConnectionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go index 01e09341ca36..f9136852fda6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go @@ -1,11 +1,11 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. -// Package ecr provides a client for Amazon EC2 Container Registry. package ecr import ( "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) @@ -14,19 +14,18 @@ const opBatchCheckLayerAvailability = "BatchCheckLayerAvailability" // BatchCheckLayerAvailabilityRequest generates a "aws/request.Request" representing the // client's request for the BatchCheckLayerAvailability operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See BatchCheckLayerAvailability for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the BatchCheckLayerAvailability method directly -// instead. +// See BatchCheckLayerAvailability for more information on using the BatchCheckLayerAvailability +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the BatchCheckLayerAvailabilityRequest method. // req, resp := client.BatchCheckLayerAvailabilityRequest(params) @@ -70,41 +69,55 @@ func (c *ECR) BatchCheckLayerAvailabilityRequest(input *BatchCheckLayerAvailabil // API operation BatchCheckLayerAvailability for usage and error information. // // Returned Error Codes: -// * RepositoryNotFoundException +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// * InvalidParameterException +// * ErrCodeInvalidParameterException "InvalidParameterException" // The specified parameter is invalid. Review the available parameters for the // API request. // -// * ServerException +// * ErrCodeServerException "ServerException" // These errors are usually caused by a server-side issue. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchCheckLayerAvailability func (c *ECR) BatchCheckLayerAvailability(input *BatchCheckLayerAvailabilityInput) (*BatchCheckLayerAvailabilityOutput, error) { req, out := c.BatchCheckLayerAvailabilityRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// BatchCheckLayerAvailabilityWithContext is the same as BatchCheckLayerAvailability with the addition of +// the ability to pass a context and additional request options. +// +// See BatchCheckLayerAvailability for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) BatchCheckLayerAvailabilityWithContext(ctx aws.Context, input *BatchCheckLayerAvailabilityInput, opts ...request.Option) (*BatchCheckLayerAvailabilityOutput, error) { + req, out := c.BatchCheckLayerAvailabilityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opBatchDeleteImage = "BatchDeleteImage" // BatchDeleteImageRequest generates a "aws/request.Request" representing the // client's request for the BatchDeleteImage operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See BatchDeleteImage for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the BatchDeleteImage method directly -// instead. +// See BatchDeleteImage for more information on using the BatchDeleteImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the BatchDeleteImageRequest method. // req, resp := client.BatchDeleteImageRequest(params) @@ -151,41 +164,55 @@ func (c *ECR) BatchDeleteImageRequest(input *BatchDeleteImageInput) (req *reques // API operation BatchDeleteImage for usage and error information. // // Returned Error Codes: -// * ServerException +// * ErrCodeServerException "ServerException" // These errors are usually caused by a server-side issue. // -// * InvalidParameterException +// * ErrCodeInvalidParameterException "InvalidParameterException" // The specified parameter is invalid. Review the available parameters for the // API request. // -// * RepositoryNotFoundException +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchDeleteImage func (c *ECR) BatchDeleteImage(input *BatchDeleteImageInput) (*BatchDeleteImageOutput, error) { req, out := c.BatchDeleteImageRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// BatchDeleteImageWithContext is the same as BatchDeleteImage with the addition of +// the ability to pass a context and additional request options. +// +// See BatchDeleteImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) BatchDeleteImageWithContext(ctx aws.Context, input *BatchDeleteImageInput, opts ...request.Option) (*BatchDeleteImageOutput, error) { + req, out := c.BatchDeleteImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opBatchGetImage = "BatchGetImage" // BatchGetImageRequest generates a "aws/request.Request" representing the // client's request for the BatchGetImage operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See BatchGetImage for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the BatchGetImage method directly -// instead. +// See BatchGetImage for more information on using the BatchGetImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the BatchGetImageRequest method. // req, resp := client.BatchGetImageRequest(params) @@ -225,41 +252,55 @@ func (c *ECR) BatchGetImageRequest(input *BatchGetImageInput) (req *request.Requ // API operation BatchGetImage for usage and error information. // // Returned Error Codes: -// * ServerException +// * ErrCodeServerException "ServerException" // These errors are usually caused by a server-side issue. // -// * InvalidParameterException +// * ErrCodeInvalidParameterException "InvalidParameterException" // The specified parameter is invalid. Review the available parameters for the // API request. // -// * RepositoryNotFoundException +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchGetImage func (c *ECR) BatchGetImage(input *BatchGetImageInput) (*BatchGetImageOutput, error) { req, out := c.BatchGetImageRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// BatchGetImageWithContext is the same as BatchGetImage with the addition of +// the ability to pass a context and additional request options. +// +// See BatchGetImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) BatchGetImageWithContext(ctx aws.Context, input *BatchGetImageInput, opts ...request.Option) (*BatchGetImageOutput, error) { + req, out := c.BatchGetImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCompleteLayerUpload = "CompleteLayerUpload" // CompleteLayerUploadRequest generates a "aws/request.Request" representing the // client's request for the CompleteLayerUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CompleteLayerUpload for usage and error information. +// See CompleteLayerUpload for more information on using the CompleteLayerUpload +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CompleteLayerUpload method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CompleteLayerUploadRequest method. // req, resp := client.CompleteLayerUploadRequest(params) @@ -304,58 +345,72 @@ func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req * // API operation CompleteLayerUpload for usage and error information. // // Returned Error Codes: -// * ServerException +// * ErrCodeServerException "ServerException" // These errors are usually caused by a server-side issue. // -// * InvalidParameterException +// * ErrCodeInvalidParameterException "InvalidParameterException" // The specified parameter is invalid. Review the available parameters for the // API request. // -// * RepositoryNotFoundException +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// * UploadNotFoundException +// * ErrCodeUploadNotFoundException "UploadNotFoundException" // The upload could not be found, or the specified upload id is not valid for // this repository. // -// * InvalidLayerException +// * ErrCodeInvalidLayerException "InvalidLayerException" // The layer digest calculation performed by Amazon ECR upon receipt of the // image layer does not match the digest specified. // -// * LayerPartTooSmallException +// * ErrCodeLayerPartTooSmallException "LayerPartTooSmallException" // Layer parts must be at least 5 MiB in size. // -// * LayerAlreadyExistsException +// * ErrCodeLayerAlreadyExistsException "LayerAlreadyExistsException" // The image layer already exists in the associated repository. // -// * EmptyUploadException +// * ErrCodeEmptyUploadException "EmptyUploadException" // The specified layer upload does not contain any layer parts. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUpload func (c *ECR) CompleteLayerUpload(input *CompleteLayerUploadInput) (*CompleteLayerUploadOutput, error) { req, out := c.CompleteLayerUploadRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CompleteLayerUploadWithContext is the same as CompleteLayerUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteLayerUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) CompleteLayerUploadWithContext(ctx aws.Context, input *CompleteLayerUploadInput, opts ...request.Option) (*CompleteLayerUploadOutput, error) { + req, out := c.CompleteLayerUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateRepository = "CreateRepository" // CreateRepositoryRequest generates a "aws/request.Request" representing the // client's request for the CreateRepository operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CreateRepository for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateRepository method directly -// instead. +// See CreateRepository for more information on using the CreateRepository +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateRepositoryRequest method. // req, resp := client.CreateRepositoryRequest(params) @@ -394,17 +449,17 @@ func (c *ECR) CreateRepositoryRequest(input *CreateRepositoryInput) (req *reques // API operation CreateRepository for usage and error information. // // Returned Error Codes: -// * ServerException +// * ErrCodeServerException "ServerException" // These errors are usually caused by a server-side issue. // -// * InvalidParameterException +// * ErrCodeInvalidParameterException "InvalidParameterException" // The specified parameter is invalid. Review the available parameters for the // API request. // -// * RepositoryAlreadyExistsException +// * ErrCodeRepositoryAlreadyExistsException "RepositoryAlreadyExistsException" // The specified repository already exists in the specified registry. // -// * LimitExceededException +// * ErrCodeLimitExceededException "LimitExceededException" // The operation did not succeed because it would have exceeded a service limit // for your account. For more information, see Amazon ECR Default Service Limits // (http://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) @@ -413,27 +468,41 @@ func (c *ECR) CreateRepositoryRequest(input *CreateRepositoryInput) (req *reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepository func (c *ECR) CreateRepository(input *CreateRepositoryInput) (*CreateRepositoryOutput, error) { req, out := c.CreateRepositoryRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateRepositoryWithContext is the same as CreateRepository with the addition of +// the ability to pass a context and additional request options. +// +// See CreateRepository for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) CreateRepositoryWithContext(ctx aws.Context, input *CreateRepositoryInput, opts ...request.Option) (*CreateRepositoryOutput, error) { + req, out := c.CreateRepositoryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteRepository = "DeleteRepository" // DeleteRepositoryRequest generates a "aws/request.Request" representing the // client's request for the DeleteRepository operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteRepository for usage and error information. +// See DeleteRepository for more information on using the DeleteRepository +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteRepository method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteRepositoryRequest method. // req, resp := client.DeleteRepositoryRequest(params) @@ -473,45 +542,59 @@ func (c *ECR) DeleteRepositoryRequest(input *DeleteRepositoryInput) (req *reques // API operation DeleteRepository for usage and error information. // // Returned Error Codes: -// * ServerException +// * ErrCodeServerException "ServerException" // These errors are usually caused by a server-side issue. // -// * InvalidParameterException +// * ErrCodeInvalidParameterException "InvalidParameterException" // The specified parameter is invalid. Review the available parameters for the // API request. // -// * RepositoryNotFoundException +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// * RepositoryNotEmptyException +// * ErrCodeRepositoryNotEmptyException "RepositoryNotEmptyException" // The specified repository contains images. To delete a repository that contains // images, you must force the deletion with the force parameter. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepository func (c *ECR) DeleteRepository(input *DeleteRepositoryInput) (*DeleteRepositoryOutput, error) { req, out := c.DeleteRepositoryRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteRepositoryWithContext is the same as DeleteRepository with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteRepository for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DeleteRepositoryWithContext(ctx aws.Context, input *DeleteRepositoryInput, opts ...request.Option) (*DeleteRepositoryOutput, error) { + req, out := c.DeleteRepositoryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteRepositoryPolicy = "DeleteRepositoryPolicy" // DeleteRepositoryPolicyRequest generates a "aws/request.Request" representing the // client's request for the DeleteRepositoryPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteRepositoryPolicy for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteRepositoryPolicy method directly -// instead. +// See DeleteRepositoryPolicy for more information on using the DeleteRepositoryPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteRepositoryPolicyRequest method. // req, resp := client.DeleteRepositoryPolicyRequest(params) @@ -550,45 +633,59 @@ func (c *ECR) DeleteRepositoryPolicyRequest(input *DeleteRepositoryPolicyInput) // API operation DeleteRepositoryPolicy for usage and error information. // // Returned Error Codes: -// * ServerException +// * ErrCodeServerException "ServerException" // These errors are usually caused by a server-side issue. // -// * InvalidParameterException +// * ErrCodeInvalidParameterException "InvalidParameterException" // The specified parameter is invalid. Review the available parameters for the // API request. // -// * RepositoryNotFoundException +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// * RepositoryPolicyNotFoundException +// * ErrCodeRepositoryPolicyNotFoundException "RepositoryPolicyNotFoundException" // The specified repository and registry combination does not have an associated // repository policy. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryPolicy func (c *ECR) DeleteRepositoryPolicy(input *DeleteRepositoryPolicyInput) (*DeleteRepositoryPolicyOutput, error) { req, out := c.DeleteRepositoryPolicyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteRepositoryPolicyWithContext is the same as DeleteRepositoryPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteRepositoryPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DeleteRepositoryPolicyWithContext(ctx aws.Context, input *DeleteRepositoryPolicyInput, opts ...request.Option) (*DeleteRepositoryPolicyOutput, error) { + req, out := c.DeleteRepositoryPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeImages = "DescribeImages" // DescribeImagesRequest generates a "aws/request.Request" representing the // client's request for the DescribeImages operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeImages for usage and error information. +// See DescribeImages for more information on using the DescribeImages +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeImages method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeImagesRequest method. // req, resp := client.DescribeImagesRequest(params) @@ -639,25 +736,40 @@ func (c *ECR) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Re // API operation DescribeImages for usage and error information. // // Returned Error Codes: -// * ServerException +// * ErrCodeServerException "ServerException" // These errors are usually caused by a server-side issue. // -// * InvalidParameterException +// * ErrCodeInvalidParameterException "InvalidParameterException" // The specified parameter is invalid. Review the available parameters for the // API request. // -// * RepositoryNotFoundException +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// * ImageNotFoundException +// * ErrCodeImageNotFoundException "ImageNotFoundException" // The image requested does not exist in the specified repository. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImages func (c *ECR) DescribeImages(input *DescribeImagesInput) (*DescribeImagesOutput, error) { req, out := c.DescribeImagesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeImagesWithContext is the same as DescribeImages with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImages for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DescribeImagesWithContext(ctx aws.Context, input *DescribeImagesInput, opts ...request.Option) (*DescribeImagesOutput, error) { + req, out := c.DescribeImagesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeImagesPages iterates over the pages of a DescribeImages operation, @@ -677,31 +789,55 @@ func (c *ECR) DescribeImages(input *DescribeImagesInput) (*DescribeImagesOutput, // return pageNum <= 3 // }) // -func (c *ECR) DescribeImagesPages(input *DescribeImagesInput, fn func(p *DescribeImagesOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeImagesRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeImagesOutput), lastPage) - }) +func (c *ECR) DescribeImagesPages(input *DescribeImagesInput, fn func(*DescribeImagesOutput, bool) bool) error { + return c.DescribeImagesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeImagesPagesWithContext same as DescribeImagesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DescribeImagesPagesWithContext(ctx aws.Context, input *DescribeImagesInput, fn func(*DescribeImagesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeImagesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeImagesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeImagesOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribeRepositories = "DescribeRepositories" // DescribeRepositoriesRequest generates a "aws/request.Request" representing the // client's request for the DescribeRepositories operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeRepositories for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeRepositories method directly -// instead. +// See DescribeRepositories for more information on using the DescribeRepositories +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeRepositoriesRequest method. // req, resp := client.DescribeRepositoriesRequest(params) @@ -746,22 +882,37 @@ func (c *ECR) DescribeRepositoriesRequest(input *DescribeRepositoriesInput) (req // API operation DescribeRepositories for usage and error information. // // Returned Error Codes: -// * ServerException +// * ErrCodeServerException "ServerException" // These errors are usually caused by a server-side issue. // -// * InvalidParameterException +// * ErrCodeInvalidParameterException "InvalidParameterException" // The specified parameter is invalid. Review the available parameters for the // API request. // -// * RepositoryNotFoundException +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeRepositories func (c *ECR) DescribeRepositories(input *DescribeRepositoriesInput) (*DescribeRepositoriesOutput, error) { req, out := c.DescribeRepositoriesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeRepositoriesWithContext is the same as DescribeRepositories with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeRepositories for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DescribeRepositoriesWithContext(ctx aws.Context, input *DescribeRepositoriesInput, opts ...request.Option) (*DescribeRepositoriesOutput, error) { + req, out := c.DescribeRepositoriesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeRepositoriesPages iterates over the pages of a DescribeRepositories operation, @@ -781,31 +932,55 @@ func (c *ECR) DescribeRepositories(input *DescribeRepositoriesInput) (*DescribeR // return pageNum <= 3 // }) // -func (c *ECR) DescribeRepositoriesPages(input *DescribeRepositoriesInput, fn func(p *DescribeRepositoriesOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeRepositoriesRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeRepositoriesOutput), lastPage) - }) +func (c *ECR) DescribeRepositoriesPages(input *DescribeRepositoriesInput, fn func(*DescribeRepositoriesOutput, bool) bool) error { + return c.DescribeRepositoriesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeRepositoriesPagesWithContext same as DescribeRepositoriesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DescribeRepositoriesPagesWithContext(ctx aws.Context, input *DescribeRepositoriesInput, fn func(*DescribeRepositoriesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeRepositoriesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeRepositoriesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeRepositoriesOutput), !p.HasNextPage()) + } + return p.Err() } const opGetAuthorizationToken = "GetAuthorizationToken" // GetAuthorizationTokenRequest generates a "aws/request.Request" representing the // client's request for the GetAuthorizationToken operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetAuthorizationToken for usage and error information. +// See GetAuthorizationToken for more information on using the GetAuthorizationToken +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetAuthorizationToken method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetAuthorizationTokenRequest method. // req, resp := client.GetAuthorizationTokenRequest(params) @@ -851,37 +1026,51 @@ func (c *ECR) GetAuthorizationTokenRequest(input *GetAuthorizationTokenInput) (r // API operation GetAuthorizationToken for usage and error information. // // Returned Error Codes: -// * ServerException +// * ErrCodeServerException "ServerException" // These errors are usually caused by a server-side issue. // -// * InvalidParameterException +// * ErrCodeInvalidParameterException "InvalidParameterException" // The specified parameter is invalid. Review the available parameters for the // API request. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetAuthorizationToken func (c *ECR) GetAuthorizationToken(input *GetAuthorizationTokenInput) (*GetAuthorizationTokenOutput, error) { req, out := c.GetAuthorizationTokenRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetAuthorizationTokenWithContext is the same as GetAuthorizationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetAuthorizationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) GetAuthorizationTokenWithContext(ctx aws.Context, input *GetAuthorizationTokenInput, opts ...request.Option) (*GetAuthorizationTokenOutput, error) { + req, out := c.GetAuthorizationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetDownloadUrlForLayer = "GetDownloadUrlForLayer" // GetDownloadUrlForLayerRequest generates a "aws/request.Request" representing the // client's request for the GetDownloadUrlForLayer operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetDownloadUrlForLayer for usage and error information. +// See GetDownloadUrlForLayer for more information on using the GetDownloadUrlForLayer +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetDownloadUrlForLayer method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetDownloadUrlForLayerRequest method. // req, resp := client.GetDownloadUrlForLayerRequest(params) @@ -925,49 +1114,63 @@ func (c *ECR) GetDownloadUrlForLayerRequest(input *GetDownloadUrlForLayerInput) // API operation GetDownloadUrlForLayer for usage and error information. // // Returned Error Codes: -// * ServerException +// * ErrCodeServerException "ServerException" // These errors are usually caused by a server-side issue. // -// * InvalidParameterException +// * ErrCodeInvalidParameterException "InvalidParameterException" // The specified parameter is invalid. Review the available parameters for the // API request. // -// * LayersNotFoundException +// * ErrCodeLayersNotFoundException "LayersNotFoundException" // The specified layers could not be found, or the specified layer is not valid // for this repository. // -// * LayerInaccessibleException +// * ErrCodeLayerInaccessibleException "LayerInaccessibleException" // The specified layer is not available because it is not associated with an // image. Unassociated image layers may be cleaned up at any time. // -// * RepositoryNotFoundException +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetDownloadUrlForLayer func (c *ECR) GetDownloadUrlForLayer(input *GetDownloadUrlForLayerInput) (*GetDownloadUrlForLayerOutput, error) { req, out := c.GetDownloadUrlForLayerRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetDownloadUrlForLayerWithContext is the same as GetDownloadUrlForLayer with the addition of +// the ability to pass a context and additional request options. +// +// See GetDownloadUrlForLayer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) GetDownloadUrlForLayerWithContext(ctx aws.Context, input *GetDownloadUrlForLayerInput, opts ...request.Option) (*GetDownloadUrlForLayerOutput, error) { + req, out := c.GetDownloadUrlForLayerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetRepositoryPolicy = "GetRepositoryPolicy" // GetRepositoryPolicyRequest generates a "aws/request.Request" representing the // client's request for the GetRepositoryPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GetRepositoryPolicy for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetRepositoryPolicy method directly -// instead. +// See GetRepositoryPolicy for more information on using the GetRepositoryPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetRepositoryPolicyRequest method. // req, resp := client.GetRepositoryPolicyRequest(params) @@ -1006,45 +1209,59 @@ func (c *ECR) GetRepositoryPolicyRequest(input *GetRepositoryPolicyInput) (req * // API operation GetRepositoryPolicy for usage and error information. // // Returned Error Codes: -// * ServerException +// * ErrCodeServerException "ServerException" // These errors are usually caused by a server-side issue. // -// * InvalidParameterException +// * ErrCodeInvalidParameterException "InvalidParameterException" // The specified parameter is invalid. Review the available parameters for the // API request. // -// * RepositoryNotFoundException +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// * RepositoryPolicyNotFoundException +// * ErrCodeRepositoryPolicyNotFoundException "RepositoryPolicyNotFoundException" // The specified repository and registry combination does not have an associated // repository policy. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicy func (c *ECR) GetRepositoryPolicy(input *GetRepositoryPolicyInput) (*GetRepositoryPolicyOutput, error) { req, out := c.GetRepositoryPolicyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetRepositoryPolicyWithContext is the same as GetRepositoryPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetRepositoryPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) GetRepositoryPolicyWithContext(ctx aws.Context, input *GetRepositoryPolicyInput, opts ...request.Option) (*GetRepositoryPolicyOutput, error) { + req, out := c.GetRepositoryPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opInitiateLayerUpload = "InitiateLayerUpload" // InitiateLayerUploadRequest generates a "aws/request.Request" representing the // client's request for the InitiateLayerUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See InitiateLayerUpload for usage and error information. +// See InitiateLayerUpload for more information on using the InitiateLayerUpload +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the InitiateLayerUpload method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the InitiateLayerUploadRequest method. // req, resp := client.InitiateLayerUploadRequest(params) @@ -1087,41 +1304,55 @@ func (c *ECR) InitiateLayerUploadRequest(input *InitiateLayerUploadInput) (req * // API operation InitiateLayerUpload for usage and error information. // // Returned Error Codes: -// * ServerException +// * ErrCodeServerException "ServerException" // These errors are usually caused by a server-side issue. // -// * InvalidParameterException +// * ErrCodeInvalidParameterException "InvalidParameterException" // The specified parameter is invalid. Review the available parameters for the // API request. // -// * RepositoryNotFoundException +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUpload func (c *ECR) InitiateLayerUpload(input *InitiateLayerUploadInput) (*InitiateLayerUploadOutput, error) { req, out := c.InitiateLayerUploadRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// InitiateLayerUploadWithContext is the same as InitiateLayerUpload with the addition of +// the ability to pass a context and additional request options. +// +// See InitiateLayerUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) InitiateLayerUploadWithContext(ctx aws.Context, input *InitiateLayerUploadInput, opts ...request.Option) (*InitiateLayerUploadOutput, error) { + req, out := c.InitiateLayerUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opListImages = "ListImages" // ListImagesRequest generates a "aws/request.Request" representing the // client's request for the ListImages operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ListImages for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListImages method directly -// instead. +// See ListImages for more information on using the ListImages +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListImagesRequest method. // req, resp := client.ListImagesRequest(params) @@ -1172,22 +1403,37 @@ func (c *ECR) ListImagesRequest(input *ListImagesInput) (req *request.Request, o // API operation ListImages for usage and error information. // // Returned Error Codes: -// * ServerException +// * ErrCodeServerException "ServerException" // These errors are usually caused by a server-side issue. // -// * InvalidParameterException +// * ErrCodeInvalidParameterException "InvalidParameterException" // The specified parameter is invalid. Review the available parameters for the // API request. // -// * RepositoryNotFoundException +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImages func (c *ECR) ListImages(input *ListImagesInput) (*ListImagesOutput, error) { req, out := c.ListImagesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListImagesWithContext is the same as ListImages with the addition of +// the ability to pass a context and additional request options. +// +// See ListImages for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) ListImagesWithContext(ctx aws.Context, input *ListImagesInput, opts ...request.Option) (*ListImagesOutput, error) { + req, out := c.ListImagesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // ListImagesPages iterates over the pages of a ListImages operation, @@ -1207,31 +1453,55 @@ func (c *ECR) ListImages(input *ListImagesInput) (*ListImagesOutput, error) { // return pageNum <= 3 // }) // -func (c *ECR) ListImagesPages(input *ListImagesInput, fn func(p *ListImagesOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListImagesRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListImagesOutput), lastPage) - }) +func (c *ECR) ListImagesPages(input *ListImagesInput, fn func(*ListImagesOutput, bool) bool) error { + return c.ListImagesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListImagesPagesWithContext same as ListImagesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) ListImagesPagesWithContext(ctx aws.Context, input *ListImagesInput, fn func(*ListImagesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListImagesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListImagesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListImagesOutput), !p.HasNextPage()) + } + return p.Err() } const opPutImage = "PutImage" // PutImageRequest generates a "aws/request.Request" representing the // client's request for the PutImage operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutImage for usage and error information. +// See PutImage for more information on using the PutImage +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutImage method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutImageRequest method. // req, resp := client.PutImageRequest(params) @@ -1274,26 +1544,26 @@ func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, outpu // API operation PutImage for usage and error information. // // Returned Error Codes: -// * ServerException +// * ErrCodeServerException "ServerException" // These errors are usually caused by a server-side issue. // -// * InvalidParameterException +// * ErrCodeInvalidParameterException "InvalidParameterException" // The specified parameter is invalid. Review the available parameters for the // API request. // -// * RepositoryNotFoundException +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// * ImageAlreadyExistsException +// * ErrCodeImageAlreadyExistsException "ImageAlreadyExistsException" // The specified image has already been pushed, and there are no changes to // the manifest or image tag since the last push. // -// * LayersNotFoundException +// * ErrCodeLayersNotFoundException "LayersNotFoundException" // The specified layers could not be found, or the specified layer is not valid // for this repository. // -// * LimitExceededException +// * ErrCodeLimitExceededException "LimitExceededException" // The operation did not succeed because it would have exceeded a service limit // for your account. For more information, see Amazon ECR Default Service Limits // (http://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) @@ -1302,27 +1572,41 @@ func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, outpu // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImage func (c *ECR) PutImage(input *PutImageInput) (*PutImageOutput, error) { req, out := c.PutImageRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutImageWithContext is the same as PutImage with the addition of +// the ability to pass a context and additional request options. +// +// See PutImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) PutImageWithContext(ctx aws.Context, input *PutImageInput, opts ...request.Option) (*PutImageOutput, error) { + req, out := c.PutImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opSetRepositoryPolicy = "SetRepositoryPolicy" // SetRepositoryPolicyRequest generates a "aws/request.Request" representing the // client's request for the SetRepositoryPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See SetRepositoryPolicy for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the SetRepositoryPolicy method directly -// instead. +// See SetRepositoryPolicy for more information on using the SetRepositoryPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the SetRepositoryPolicyRequest method. // req, resp := client.SetRepositoryPolicyRequest(params) @@ -1361,41 +1645,55 @@ func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req * // API operation SetRepositoryPolicy for usage and error information. // // Returned Error Codes: -// * ServerException +// * ErrCodeServerException "ServerException" // These errors are usually caused by a server-side issue. // -// * InvalidParameterException +// * ErrCodeInvalidParameterException "InvalidParameterException" // The specified parameter is invalid. Review the available parameters for the // API request. // -// * RepositoryNotFoundException +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy func (c *ECR) SetRepositoryPolicy(input *SetRepositoryPolicyInput) (*SetRepositoryPolicyOutput, error) { req, out := c.SetRepositoryPolicyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// SetRepositoryPolicyWithContext is the same as SetRepositoryPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See SetRepositoryPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) SetRepositoryPolicyWithContext(ctx aws.Context, input *SetRepositoryPolicyInput, opts ...request.Option) (*SetRepositoryPolicyOutput, error) { + req, out := c.SetRepositoryPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opUploadLayerPart = "UploadLayerPart" // UploadLayerPartRequest generates a "aws/request.Request" representing the // client's request for the UploadLayerPart operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See UploadLayerPart for usage and error information. +// See UploadLayerPart for more information on using the UploadLayerPart +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UploadLayerPart method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the UploadLayerPartRequest method. // req, resp := client.UploadLayerPartRequest(params) @@ -1438,26 +1736,26 @@ func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request. // API operation UploadLayerPart for usage and error information. // // Returned Error Codes: -// * ServerException +// * ErrCodeServerException "ServerException" // These errors are usually caused by a server-side issue. // -// * InvalidParameterException +// * ErrCodeInvalidParameterException "InvalidParameterException" // The specified parameter is invalid. Review the available parameters for the // API request. // -// * InvalidLayerPartException +// * ErrCodeInvalidLayerPartException "InvalidLayerPartException" // The layer part size is not valid, or the first byte specified is not consecutive // to the last byte of a previous layer part upload. // -// * RepositoryNotFoundException +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// * UploadNotFoundException +// * ErrCodeUploadNotFoundException "UploadNotFoundException" // The upload could not be found, or the specified upload id is not valid for // this repository. // -// * LimitExceededException +// * ErrCodeLimitExceededException "LimitExceededException" // The operation did not succeed because it would have exceeded a service limit // for your account. For more information, see Amazon ECR Default Service Limits // (http://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) @@ -1466,8 +1764,23 @@ func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart func (c *ECR) UploadLayerPart(input *UploadLayerPartInput) (*UploadLayerPartOutput, error) { req, out := c.UploadLayerPartRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// UploadLayerPartWithContext is the same as UploadLayerPart with the addition of +// the ability to pass a context and additional request options. +// +// See UploadLayerPart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) UploadLayerPartWithContext(ctx aws.Context, input *UploadLayerPartInput, opts ...request.Option) (*UploadLayerPartOutput, error) { + req, out := c.UploadLayerPartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // An object representing authorization data for an Amazon ECR registry. diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go new file mode 100644 index 000000000000..987aa72fa68d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go @@ -0,0 +1,33 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ecr provides the client and types for making API +// requests to Amazon EC2 Container Registry. +// +// Amazon EC2 Container Registry (Amazon ECR) is a managed AWS Docker registry +// service. Customers can use the familiar Docker CLI to push, pull, and manage +// images. Amazon ECR provides a secure, scalable, and reliable registry. Amazon +// ECR supports private Docker repositories with resource-based permissions +// using AWS IAM so that specific users or Amazon EC2 instances can access repositories +// and images. Developers can use the Docker CLI to author and manage images. +// +// See https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21 for more information on this service. +// +// See ecr package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ecr/ +// +// Using the Client +// +// To Amazon EC2 Container Registry with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon EC2 Container Registry client ECR for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ecr/#New +package ecr diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go new file mode 100644 index 000000000000..4399e6a29570 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go @@ -0,0 +1,121 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ecr + +const ( + + // ErrCodeEmptyUploadException for service response error code + // "EmptyUploadException". + // + // The specified layer upload does not contain any layer parts. + ErrCodeEmptyUploadException = "EmptyUploadException" + + // ErrCodeImageAlreadyExistsException for service response error code + // "ImageAlreadyExistsException". + // + // The specified image has already been pushed, and there are no changes to + // the manifest or image tag since the last push. + ErrCodeImageAlreadyExistsException = "ImageAlreadyExistsException" + + // ErrCodeImageNotFoundException for service response error code + // "ImageNotFoundException". + // + // The image requested does not exist in the specified repository. + ErrCodeImageNotFoundException = "ImageNotFoundException" + + // ErrCodeInvalidLayerException for service response error code + // "InvalidLayerException". + // + // The layer digest calculation performed by Amazon ECR upon receipt of the + // image layer does not match the digest specified. + ErrCodeInvalidLayerException = "InvalidLayerException" + + // ErrCodeInvalidLayerPartException for service response error code + // "InvalidLayerPartException". + // + // The layer part size is not valid, or the first byte specified is not consecutive + // to the last byte of a previous layer part upload. + ErrCodeInvalidLayerPartException = "InvalidLayerPartException" + + // ErrCodeInvalidParameterException for service response error code + // "InvalidParameterException". + // + // The specified parameter is invalid. Review the available parameters for the + // API request. + ErrCodeInvalidParameterException = "InvalidParameterException" + + // ErrCodeLayerAlreadyExistsException for service response error code + // "LayerAlreadyExistsException". + // + // The image layer already exists in the associated repository. + ErrCodeLayerAlreadyExistsException = "LayerAlreadyExistsException" + + // ErrCodeLayerInaccessibleException for service response error code + // "LayerInaccessibleException". + // + // The specified layer is not available because it is not associated with an + // image. Unassociated image layers may be cleaned up at any time. + ErrCodeLayerInaccessibleException = "LayerInaccessibleException" + + // ErrCodeLayerPartTooSmallException for service response error code + // "LayerPartTooSmallException". + // + // Layer parts must be at least 5 MiB in size. + ErrCodeLayerPartTooSmallException = "LayerPartTooSmallException" + + // ErrCodeLayersNotFoundException for service response error code + // "LayersNotFoundException". + // + // The specified layers could not be found, or the specified layer is not valid + // for this repository. + ErrCodeLayersNotFoundException = "LayersNotFoundException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // The operation did not succeed because it would have exceeded a service limit + // for your account. For more information, see Amazon ECR Default Service Limits + // (http://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) + // in the Amazon EC2 Container Registry User Guide. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodeRepositoryAlreadyExistsException for service response error code + // "RepositoryAlreadyExistsException". + // + // The specified repository already exists in the specified registry. + ErrCodeRepositoryAlreadyExistsException = "RepositoryAlreadyExistsException" + + // ErrCodeRepositoryNotEmptyException for service response error code + // "RepositoryNotEmptyException". + // + // The specified repository contains images. To delete a repository that contains + // images, you must force the deletion with the force parameter. + ErrCodeRepositoryNotEmptyException = "RepositoryNotEmptyException" + + // ErrCodeRepositoryNotFoundException for service response error code + // "RepositoryNotFoundException". + // + // The specified repository could not be found. Check the spelling of the specified + // repository and ensure that you are performing operations on the correct registry. + ErrCodeRepositoryNotFoundException = "RepositoryNotFoundException" + + // ErrCodeRepositoryPolicyNotFoundException for service response error code + // "RepositoryPolicyNotFoundException". + // + // The specified repository and registry combination does not have an associated + // repository policy. + ErrCodeRepositoryPolicyNotFoundException = "RepositoryPolicyNotFoundException" + + // ErrCodeServerException for service response error code + // "ServerException". + // + // These errors are usually caused by a server-side issue. + ErrCodeServerException = "ServerException" + + // ErrCodeUploadNotFoundException for service response error code + // "UploadNotFoundException". + // + // The upload could not be found, or the specified upload id is not valid for + // this repository. + ErrCodeUploadNotFoundException = "UploadNotFoundException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go index 2c7904b759a2..95de12e25e68 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go @@ -1,4 +1,4 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package ecr @@ -11,15 +11,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) -// Amazon EC2 Container Registry (Amazon ECR) is a managed AWS Docker registry -// service. Customers can use the familiar Docker CLI to push, pull, and manage -// images. Amazon ECR provides a secure, scalable, and reliable registry. Amazon -// ECR supports private Docker repositories with resource-based permissions -// using AWS IAM so that specific users or Amazon EC2 instances can access repositories -// and images. Developers can use the Docker CLI to author and manage images. -// The service client's operations are safe to be used concurrently. -// It is not safe to mutate any of the client's properties though. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21 +// ECR provides the API operation methods for making requests to +// Amazon EC2 Container Registry. See this package's package overview docs +// for details on the service. +// +// ECR methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. type ECR struct { *client.Client } diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/api.go b/vendor/github.com/aws/aws-sdk-go/service/elb/api.go index 2fdca7da43b3..ba6c106496e2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elb/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/api.go @@ -1,12 +1,12 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. -// Package elb provides a client for Elastic Load Balancing. package elb import ( "fmt" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) @@ -15,19 +15,18 @@ const opAddTags = "AddTags" // AddTagsRequest generates a "aws/request.Request" representing the // client's request for the AddTags operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See AddTags for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AddTags method directly -// instead. +// See AddTags for more information on using the AddTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AddTagsRequest method. // req, resp := client.AddTagsRequest(params) @@ -63,7 +62,7 @@ func (c *ELB) AddTagsRequest(input *AddTagsInput) (req *request.Request, output // key is already associated with the load balancer, AddTags updates its value. // // For more information, see Tag Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/add-remove-tags.html) -// in the Classic Load Balancers Guide. +// in the Classic Load Balancer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -73,40 +72,54 @@ func (c *ELB) AddTagsRequest(input *AddTagsInput) (req *request.Request, output // API operation AddTags for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * TooManyTags +// * ErrCodeTooManyTagsException "TooManyTags" // The quota for the number of tags that can be assigned to a load balancer // has been reached. // -// * DuplicateTagKeys +// * ErrCodeDuplicateTagKeysException "DuplicateTagKeys" // A tag key was specified more than once. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/AddTags func (c *ELB) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { req, out := c.AddTagsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AddTagsWithContext is the same as AddTags with the addition of +// the ability to pass a context and additional request options. +// +// See AddTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) AddTagsWithContext(ctx aws.Context, input *AddTagsInput, opts ...request.Option) (*AddTagsOutput, error) { + req, out := c.AddTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opApplySecurityGroupsToLoadBalancer = "ApplySecurityGroupsToLoadBalancer" // ApplySecurityGroupsToLoadBalancerRequest generates a "aws/request.Request" representing the // client's request for the ApplySecurityGroupsToLoadBalancer operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ApplySecurityGroupsToLoadBalancer for usage and error information. +// See ApplySecurityGroupsToLoadBalancer for more information on using the ApplySecurityGroupsToLoadBalancer +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ApplySecurityGroupsToLoadBalancer method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ApplySecurityGroupsToLoadBalancerRequest method. // req, resp := client.ApplySecurityGroupsToLoadBalancerRequest(params) @@ -140,7 +153,7 @@ func (c *ELB) ApplySecurityGroupsToLoadBalancerRequest(input *ApplySecurityGroup // associated security groups. // // For more information, see Security Groups for Load Balancers in a VPC (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-groups.html#elb-vpc-security-groups) -// in the Classic Load Balancers Guide. +// in the Classic Load Balancer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -150,39 +163,53 @@ func (c *ELB) ApplySecurityGroupsToLoadBalancerRequest(input *ApplySecurityGroup // API operation ApplySecurityGroupsToLoadBalancer for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * InvalidConfigurationRequest +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" // The requested configuration change is not valid. // -// * InvalidSecurityGroup +// * ErrCodeInvalidSecurityGroupException "InvalidSecurityGroup" // One or more of the specified security groups do not exist. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/ApplySecurityGroupsToLoadBalancer func (c *ELB) ApplySecurityGroupsToLoadBalancer(input *ApplySecurityGroupsToLoadBalancerInput) (*ApplySecurityGroupsToLoadBalancerOutput, error) { req, out := c.ApplySecurityGroupsToLoadBalancerRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ApplySecurityGroupsToLoadBalancerWithContext is the same as ApplySecurityGroupsToLoadBalancer with the addition of +// the ability to pass a context and additional request options. +// +// See ApplySecurityGroupsToLoadBalancer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) ApplySecurityGroupsToLoadBalancerWithContext(ctx aws.Context, input *ApplySecurityGroupsToLoadBalancerInput, opts ...request.Option) (*ApplySecurityGroupsToLoadBalancerOutput, error) { + req, out := c.ApplySecurityGroupsToLoadBalancerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAttachLoadBalancerToSubnets = "AttachLoadBalancerToSubnets" // AttachLoadBalancerToSubnetsRequest generates a "aws/request.Request" representing the // client's request for the AttachLoadBalancerToSubnets operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See AttachLoadBalancerToSubnets for usage and error information. +// See AttachLoadBalancerToSubnets for more information on using the AttachLoadBalancerToSubnets +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AttachLoadBalancerToSubnets method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AttachLoadBalancerToSubnetsRequest method. // req, resp := client.AttachLoadBalancerToSubnetsRequest(params) @@ -217,7 +244,7 @@ func (c *ELB) AttachLoadBalancerToSubnetsRequest(input *AttachLoadBalancerToSubn // The load balancer evenly distributes requests across all registered subnets. // For more information, see Add or Remove Subnets for Your Load Balancer in // a VPC (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-manage-subnets.html) -// in the Classic Load Balancers Guide. +// in the Classic Load Balancer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -227,42 +254,56 @@ func (c *ELB) AttachLoadBalancerToSubnetsRequest(input *AttachLoadBalancerToSubn // API operation AttachLoadBalancerToSubnets for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * InvalidConfigurationRequest +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" // The requested configuration change is not valid. // -// * SubnetNotFound +// * ErrCodeSubnetNotFoundException "SubnetNotFound" // One or more of the specified subnets do not exist. // -// * InvalidSubnet +// * ErrCodeInvalidSubnetException "InvalidSubnet" // The specified VPC has no associated Internet gateway. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/AttachLoadBalancerToSubnets func (c *ELB) AttachLoadBalancerToSubnets(input *AttachLoadBalancerToSubnetsInput) (*AttachLoadBalancerToSubnetsOutput, error) { req, out := c.AttachLoadBalancerToSubnetsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AttachLoadBalancerToSubnetsWithContext is the same as AttachLoadBalancerToSubnets with the addition of +// the ability to pass a context and additional request options. +// +// See AttachLoadBalancerToSubnets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) AttachLoadBalancerToSubnetsWithContext(ctx aws.Context, input *AttachLoadBalancerToSubnetsInput, opts ...request.Option) (*AttachLoadBalancerToSubnetsOutput, error) { + req, out := c.AttachLoadBalancerToSubnetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opConfigureHealthCheck = "ConfigureHealthCheck" // ConfigureHealthCheckRequest generates a "aws/request.Request" representing the // client's request for the ConfigureHealthCheck operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ConfigureHealthCheck for usage and error information. +// See ConfigureHealthCheck for more information on using the ConfigureHealthCheck +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ConfigureHealthCheck method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ConfigureHealthCheckRequest method. // req, resp := client.ConfigureHealthCheckRequest(params) @@ -296,7 +337,7 @@ func (c *ELB) ConfigureHealthCheckRequest(input *ConfigureHealthCheckInput) (req // // For more information, see Configure Health Checks for Your Load Balancer // (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-healthchecks.html) -// in the Classic Load Balancers Guide. +// in the Classic Load Balancer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -306,33 +347,47 @@ func (c *ELB) ConfigureHealthCheckRequest(input *ConfigureHealthCheckInput) (req // API operation ConfigureHealthCheck for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/ConfigureHealthCheck func (c *ELB) ConfigureHealthCheck(input *ConfigureHealthCheckInput) (*ConfigureHealthCheckOutput, error) { req, out := c.ConfigureHealthCheckRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ConfigureHealthCheckWithContext is the same as ConfigureHealthCheck with the addition of +// the ability to pass a context and additional request options. +// +// See ConfigureHealthCheck for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) ConfigureHealthCheckWithContext(ctx aws.Context, input *ConfigureHealthCheckInput, opts ...request.Option) (*ConfigureHealthCheckOutput, error) { + req, out := c.ConfigureHealthCheckRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateAppCookieStickinessPolicy = "CreateAppCookieStickinessPolicy" // CreateAppCookieStickinessPolicyRequest generates a "aws/request.Request" representing the // client's request for the CreateAppCookieStickinessPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CreateAppCookieStickinessPolicy for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateAppCookieStickinessPolicy method directly -// instead. +// See CreateAppCookieStickinessPolicy for more information on using the CreateAppCookieStickinessPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateAppCookieStickinessPolicyRequest method. // req, resp := client.CreateAppCookieStickinessPolicyRequest(params) @@ -375,7 +430,7 @@ func (c *ELB) CreateAppCookieStickinessPolicyRequest(input *CreateAppCookieStick // being sticky until a new application cookie is issued. // // For more information, see Application-Controlled Session Stickiness (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-application) -// in the Classic Load Balancers Guide. +// in the Classic Load Balancer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -385,42 +440,56 @@ func (c *ELB) CreateAppCookieStickinessPolicyRequest(input *CreateAppCookieStick // API operation CreateAppCookieStickinessPolicy for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * DuplicatePolicyName +// * ErrCodeDuplicatePolicyNameException "DuplicatePolicyName" // A policy with the specified name already exists for this load balancer. // -// * TooManyPolicies +// * ErrCodeTooManyPoliciesException "TooManyPolicies" // The quota for the number of policies for this load balancer has been reached. // -// * InvalidConfigurationRequest +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" // The requested configuration change is not valid. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/CreateAppCookieStickinessPolicy func (c *ELB) CreateAppCookieStickinessPolicy(input *CreateAppCookieStickinessPolicyInput) (*CreateAppCookieStickinessPolicyOutput, error) { req, out := c.CreateAppCookieStickinessPolicyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateAppCookieStickinessPolicyWithContext is the same as CreateAppCookieStickinessPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See CreateAppCookieStickinessPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) CreateAppCookieStickinessPolicyWithContext(ctx aws.Context, input *CreateAppCookieStickinessPolicyInput, opts ...request.Option) (*CreateAppCookieStickinessPolicyOutput, error) { + req, out := c.CreateAppCookieStickinessPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateLBCookieStickinessPolicy = "CreateLBCookieStickinessPolicy" // CreateLBCookieStickinessPolicyRequest generates a "aws/request.Request" representing the // client's request for the CreateLBCookieStickinessPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateLBCookieStickinessPolicy for usage and error information. +// See CreateLBCookieStickinessPolicy for more information on using the CreateLBCookieStickinessPolicy +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateLBCookieStickinessPolicy method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateLBCookieStickinessPolicyRequest method. // req, resp := client.CreateLBCookieStickinessPolicyRequest(params) @@ -465,7 +534,7 @@ func (c *ELB) CreateLBCookieStickinessPolicyRequest(input *CreateLBCookieStickin // cookie expiration time, which is specified in the policy configuration. // // For more information, see Duration-Based Session Stickiness (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-duration) -// in the Classic Load Balancers Guide. +// in the Classic Load Balancer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -475,42 +544,56 @@ func (c *ELB) CreateLBCookieStickinessPolicyRequest(input *CreateLBCookieStickin // API operation CreateLBCookieStickinessPolicy for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * DuplicatePolicyName +// * ErrCodeDuplicatePolicyNameException "DuplicatePolicyName" // A policy with the specified name already exists for this load balancer. // -// * TooManyPolicies +// * ErrCodeTooManyPoliciesException "TooManyPolicies" // The quota for the number of policies for this load balancer has been reached. // -// * InvalidConfigurationRequest +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" // The requested configuration change is not valid. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/CreateLBCookieStickinessPolicy func (c *ELB) CreateLBCookieStickinessPolicy(input *CreateLBCookieStickinessPolicyInput) (*CreateLBCookieStickinessPolicyOutput, error) { req, out := c.CreateLBCookieStickinessPolicyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateLBCookieStickinessPolicyWithContext is the same as CreateLBCookieStickinessPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLBCookieStickinessPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) CreateLBCookieStickinessPolicyWithContext(ctx aws.Context, input *CreateLBCookieStickinessPolicyInput, opts ...request.Option) (*CreateLBCookieStickinessPolicyOutput, error) { + req, out := c.CreateLBCookieStickinessPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateLoadBalancer = "CreateLoadBalancer" // CreateLoadBalancerRequest generates a "aws/request.Request" representing the // client's request for the CreateLoadBalancer operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CreateLoadBalancer for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateLoadBalancer method directly -// instead. +// See CreateLoadBalancer for more information on using the CreateLoadBalancer +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateLoadBalancerRequest method. // req, resp := client.CreateLoadBalancerRequest(params) @@ -539,7 +622,7 @@ func (c *ELB) CreateLoadBalancerRequest(input *CreateLoadBalancerInput) (req *re // CreateLoadBalancer API operation for Elastic Load Balancing. // -// Creates a Classic load balancer. +// Creates a Classic Load Balancer. // // You can add listeners, security groups, subnets, and tags when you create // your load balancer, or you can add them later using CreateLoadBalancerListeners, @@ -551,7 +634,7 @@ func (c *ELB) CreateLoadBalancerRequest(input *CreateLoadBalancerInput) (req *re // You can create up to 20 load balancers per region per account. You can request // an increase for the number of load balancers for your account. For more information, // see Limits for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-limits.html) -// in the Classic Load Balancers Guide. +// in the Classic Load Balancer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -561,68 +644,82 @@ func (c *ELB) CreateLoadBalancerRequest(input *CreateLoadBalancerInput) (req *re // API operation CreateLoadBalancer for usage and error information. // // Returned Error Codes: -// * DuplicateLoadBalancerName +// * ErrCodeDuplicateAccessPointNameException "DuplicateLoadBalancerName" // The specified load balancer name already exists for this account. // -// * TooManyLoadBalancers +// * ErrCodeTooManyAccessPointsException "TooManyLoadBalancers" // The quota for the number of load balancers has been reached. // -// * CertificateNotFound +// * ErrCodeCertificateNotFoundException "CertificateNotFound" // The specified ARN does not refer to a valid SSL certificate in AWS Identity // and Access Management (IAM) or AWS Certificate Manager (ACM). Note that if // you recently uploaded the certificate to IAM, this error might indicate that // the certificate is not fully available yet. // -// * InvalidConfigurationRequest +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" // The requested configuration change is not valid. // -// * SubnetNotFound +// * ErrCodeSubnetNotFoundException "SubnetNotFound" // One or more of the specified subnets do not exist. // -// * InvalidSubnet +// * ErrCodeInvalidSubnetException "InvalidSubnet" // The specified VPC has no associated Internet gateway. // -// * InvalidSecurityGroup +// * ErrCodeInvalidSecurityGroupException "InvalidSecurityGroup" // One or more of the specified security groups do not exist. // -// * InvalidScheme +// * ErrCodeInvalidSchemeException "InvalidScheme" // The specified value for the schema is not valid. You can only specify a scheme // for load balancers in a VPC. // -// * TooManyTags +// * ErrCodeTooManyTagsException "TooManyTags" // The quota for the number of tags that can be assigned to a load balancer // has been reached. // -// * DuplicateTagKeys +// * ErrCodeDuplicateTagKeysException "DuplicateTagKeys" // A tag key was specified more than once. // -// * UnsupportedProtocol - +// * ErrCodeUnsupportedProtocolException "UnsupportedProtocol" +// The specified protocol or signature version is not supported. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/CreateLoadBalancer func (c *ELB) CreateLoadBalancer(input *CreateLoadBalancerInput) (*CreateLoadBalancerOutput, error) { req, out := c.CreateLoadBalancerRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateLoadBalancerWithContext is the same as CreateLoadBalancer with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLoadBalancer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) CreateLoadBalancerWithContext(ctx aws.Context, input *CreateLoadBalancerInput, opts ...request.Option) (*CreateLoadBalancerOutput, error) { + req, out := c.CreateLoadBalancerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateLoadBalancerListeners = "CreateLoadBalancerListeners" // CreateLoadBalancerListenersRequest generates a "aws/request.Request" representing the // client's request for the CreateLoadBalancerListeners operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CreateLoadBalancerListeners for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateLoadBalancerListeners method directly -// instead. +// See CreateLoadBalancerListeners for more information on using the CreateLoadBalancerListeners +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateLoadBalancerListenersRequest method. // req, resp := client.CreateLoadBalancerListenersRequest(params) @@ -657,7 +754,7 @@ func (c *ELB) CreateLoadBalancerListenersRequest(input *CreateLoadBalancerListen // listener. // // For more information, see Listeners for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) -// in the Classic Load Balancers Guide. +// in the Classic Load Balancer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -667,49 +764,63 @@ func (c *ELB) CreateLoadBalancerListenersRequest(input *CreateLoadBalancerListen // API operation CreateLoadBalancerListeners for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * DuplicateListener +// * ErrCodeDuplicateListenerException "DuplicateListener" // A listener already exists for the specified load balancer name and port, // but with a different instance port, protocol, or SSL certificate. // -// * CertificateNotFound +// * ErrCodeCertificateNotFoundException "CertificateNotFound" // The specified ARN does not refer to a valid SSL certificate in AWS Identity // and Access Management (IAM) or AWS Certificate Manager (ACM). Note that if // you recently uploaded the certificate to IAM, this error might indicate that // the certificate is not fully available yet. // -// * InvalidConfigurationRequest +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" // The requested configuration change is not valid. // -// * UnsupportedProtocol - +// * ErrCodeUnsupportedProtocolException "UnsupportedProtocol" +// The specified protocol or signature version is not supported. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/CreateLoadBalancerListeners func (c *ELB) CreateLoadBalancerListeners(input *CreateLoadBalancerListenersInput) (*CreateLoadBalancerListenersOutput, error) { req, out := c.CreateLoadBalancerListenersRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateLoadBalancerListenersWithContext is the same as CreateLoadBalancerListeners with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLoadBalancerListeners for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) CreateLoadBalancerListenersWithContext(ctx aws.Context, input *CreateLoadBalancerListenersInput, opts ...request.Option) (*CreateLoadBalancerListenersOutput, error) { + req, out := c.CreateLoadBalancerListenersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateLoadBalancerPolicy = "CreateLoadBalancerPolicy" // CreateLoadBalancerPolicyRequest generates a "aws/request.Request" representing the // client's request for the CreateLoadBalancerPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateLoadBalancerPolicy for usage and error information. +// See CreateLoadBalancerPolicy for more information on using the CreateLoadBalancerPolicy +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateLoadBalancerPolicy method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateLoadBalancerPolicyRequest method. // req, resp := client.CreateLoadBalancerPolicyRequest(params) @@ -752,45 +863,59 @@ func (c *ELB) CreateLoadBalancerPolicyRequest(input *CreateLoadBalancerPolicyInp // API operation CreateLoadBalancerPolicy for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * PolicyTypeNotFound +// * ErrCodePolicyTypeNotFoundException "PolicyTypeNotFound" // One or more of the specified policy types do not exist. // -// * DuplicatePolicyName +// * ErrCodeDuplicatePolicyNameException "DuplicatePolicyName" // A policy with the specified name already exists for this load balancer. // -// * TooManyPolicies +// * ErrCodeTooManyPoliciesException "TooManyPolicies" // The quota for the number of policies for this load balancer has been reached. // -// * InvalidConfigurationRequest +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" // The requested configuration change is not valid. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/CreateLoadBalancerPolicy func (c *ELB) CreateLoadBalancerPolicy(input *CreateLoadBalancerPolicyInput) (*CreateLoadBalancerPolicyOutput, error) { req, out := c.CreateLoadBalancerPolicyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateLoadBalancerPolicyWithContext is the same as CreateLoadBalancerPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLoadBalancerPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) CreateLoadBalancerPolicyWithContext(ctx aws.Context, input *CreateLoadBalancerPolicyInput, opts ...request.Option) (*CreateLoadBalancerPolicyOutput, error) { + req, out := c.CreateLoadBalancerPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteLoadBalancer = "DeleteLoadBalancer" // DeleteLoadBalancerRequest generates a "aws/request.Request" representing the // client's request for the DeleteLoadBalancer operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteLoadBalancer for usage and error information. +// See DeleteLoadBalancer for more information on using the DeleteLoadBalancer +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteLoadBalancer method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteLoadBalancerRequest method. // req, resp := client.DeleteLoadBalancerRequest(params) @@ -839,27 +964,41 @@ func (c *ELB) DeleteLoadBalancerRequest(input *DeleteLoadBalancerInput) (req *re // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/DeleteLoadBalancer func (c *ELB) DeleteLoadBalancer(input *DeleteLoadBalancerInput) (*DeleteLoadBalancerOutput, error) { req, out := c.DeleteLoadBalancerRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteLoadBalancerWithContext is the same as DeleteLoadBalancer with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLoadBalancer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) DeleteLoadBalancerWithContext(ctx aws.Context, input *DeleteLoadBalancerInput, opts ...request.Option) (*DeleteLoadBalancerOutput, error) { + req, out := c.DeleteLoadBalancerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteLoadBalancerListeners = "DeleteLoadBalancerListeners" // DeleteLoadBalancerListenersRequest generates a "aws/request.Request" representing the // client's request for the DeleteLoadBalancerListeners operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteLoadBalancerListeners for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteLoadBalancerListeners method directly -// instead. +// See DeleteLoadBalancerListeners for more information on using the DeleteLoadBalancerListeners +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteLoadBalancerListenersRequest method. // req, resp := client.DeleteLoadBalancerListenersRequest(params) @@ -898,33 +1037,47 @@ func (c *ELB) DeleteLoadBalancerListenersRequest(input *DeleteLoadBalancerListen // API operation DeleteLoadBalancerListeners for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/DeleteLoadBalancerListeners func (c *ELB) DeleteLoadBalancerListeners(input *DeleteLoadBalancerListenersInput) (*DeleteLoadBalancerListenersOutput, error) { req, out := c.DeleteLoadBalancerListenersRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteLoadBalancerListenersWithContext is the same as DeleteLoadBalancerListeners with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLoadBalancerListeners for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) DeleteLoadBalancerListenersWithContext(ctx aws.Context, input *DeleteLoadBalancerListenersInput, opts ...request.Option) (*DeleteLoadBalancerListenersOutput, error) { + req, out := c.DeleteLoadBalancerListenersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteLoadBalancerPolicy = "DeleteLoadBalancerPolicy" // DeleteLoadBalancerPolicyRequest generates a "aws/request.Request" representing the // client's request for the DeleteLoadBalancerPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteLoadBalancerPolicy for usage and error information. +// See DeleteLoadBalancerPolicy for more information on using the DeleteLoadBalancerPolicy +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteLoadBalancerPolicy method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteLoadBalancerPolicyRequest method. // req, resp := client.DeleteLoadBalancerPolicyRequest(params) @@ -964,36 +1117,50 @@ func (c *ELB) DeleteLoadBalancerPolicyRequest(input *DeleteLoadBalancerPolicyInp // API operation DeleteLoadBalancerPolicy for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * InvalidConfigurationRequest +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" // The requested configuration change is not valid. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/DeleteLoadBalancerPolicy func (c *ELB) DeleteLoadBalancerPolicy(input *DeleteLoadBalancerPolicyInput) (*DeleteLoadBalancerPolicyOutput, error) { req, out := c.DeleteLoadBalancerPolicyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteLoadBalancerPolicyWithContext is the same as DeleteLoadBalancerPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLoadBalancerPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) DeleteLoadBalancerPolicyWithContext(ctx aws.Context, input *DeleteLoadBalancerPolicyInput, opts ...request.Option) (*DeleteLoadBalancerPolicyOutput, error) { + req, out := c.DeleteLoadBalancerPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeregisterInstancesFromLoadBalancer = "DeregisterInstancesFromLoadBalancer" // DeregisterInstancesFromLoadBalancerRequest generates a "aws/request.Request" representing the // client's request for the DeregisterInstancesFromLoadBalancer operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeregisterInstancesFromLoadBalancer for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeregisterInstancesFromLoadBalancer method directly -// instead. +// See DeregisterInstancesFromLoadBalancer for more information on using the DeregisterInstancesFromLoadBalancer +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeregisterInstancesFromLoadBalancerRequest method. // req, resp := client.DeregisterInstancesFromLoadBalancerRequest(params) @@ -1030,7 +1197,7 @@ func (c *ELB) DeregisterInstancesFromLoadBalancerRequest(input *DeregisterInstan // from the load balancer. // // For more information, see Register or De-Register EC2 Instances (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-deregister-register-instances.html) -// in the Classic Load Balancers Guide. +// in the Classic Load Balancer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1040,36 +1207,128 @@ func (c *ELB) DeregisterInstancesFromLoadBalancerRequest(input *DeregisterInstan // API operation DeregisterInstancesFromLoadBalancer for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * InvalidInstance +// * ErrCodeInvalidEndPointException "InvalidInstance" // The specified endpoint is not valid. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/DeregisterInstancesFromLoadBalancer func (c *ELB) DeregisterInstancesFromLoadBalancer(input *DeregisterInstancesFromLoadBalancerInput) (*DeregisterInstancesFromLoadBalancerOutput, error) { req, out := c.DeregisterInstancesFromLoadBalancerRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeregisterInstancesFromLoadBalancerWithContext is the same as DeregisterInstancesFromLoadBalancer with the addition of +// the ability to pass a context and additional request options. +// +// See DeregisterInstancesFromLoadBalancer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) DeregisterInstancesFromLoadBalancerWithContext(ctx aws.Context, input *DeregisterInstancesFromLoadBalancerInput, opts ...request.Option) (*DeregisterInstancesFromLoadBalancerOutput, error) { + req, out := c.DeregisterInstancesFromLoadBalancerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeAccountLimits = "DescribeAccountLimits" + +// DescribeAccountLimitsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAccountLimits operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAccountLimits for more information on using the DescribeAccountLimits +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAccountLimitsRequest method. +// req, resp := client.DescribeAccountLimitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/DescribeAccountLimits +func (c *ELB) DescribeAccountLimitsRequest(input *DescribeAccountLimitsInput) (req *request.Request, output *DescribeAccountLimitsOutput) { + op := &request.Operation{ + Name: opDescribeAccountLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAccountLimitsInput{} + } + + output = &DescribeAccountLimitsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAccountLimits API operation for Elastic Load Balancing. +// +// Describes the current Elastic Load Balancing resource limits for your AWS +// account. +// +// For more information, see Limits for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-limits.html) +// in the Classic Load Balancer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation DescribeAccountLimits for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/DescribeAccountLimits +func (c *ELB) DescribeAccountLimits(input *DescribeAccountLimitsInput) (*DescribeAccountLimitsOutput, error) { + req, out := c.DescribeAccountLimitsRequest(input) + return out, req.Send() +} + +// DescribeAccountLimitsWithContext is the same as DescribeAccountLimits with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAccountLimits for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) DescribeAccountLimitsWithContext(ctx aws.Context, input *DescribeAccountLimitsInput, opts ...request.Option) (*DescribeAccountLimitsOutput, error) { + req, out := c.DescribeAccountLimitsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeInstanceHealth = "DescribeInstanceHealth" // DescribeInstanceHealthRequest generates a "aws/request.Request" representing the // client's request for the DescribeInstanceHealth operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeInstanceHealth for usage and error information. +// See DescribeInstanceHealth for more information on using the DescribeInstanceHealth +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeInstanceHealth method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeInstanceHealthRequest method. // req, resp := client.DescribeInstanceHealthRequest(params) @@ -1113,36 +1372,50 @@ func (c *ELB) DescribeInstanceHealthRequest(input *DescribeInstanceHealthInput) // API operation DescribeInstanceHealth for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * InvalidInstance +// * ErrCodeInvalidEndPointException "InvalidInstance" // The specified endpoint is not valid. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/DescribeInstanceHealth func (c *ELB) DescribeInstanceHealth(input *DescribeInstanceHealthInput) (*DescribeInstanceHealthOutput, error) { req, out := c.DescribeInstanceHealthRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeInstanceHealthWithContext is the same as DescribeInstanceHealth with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstanceHealth for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) DescribeInstanceHealthWithContext(ctx aws.Context, input *DescribeInstanceHealthInput, opts ...request.Option) (*DescribeInstanceHealthOutput, error) { + req, out := c.DescribeInstanceHealthRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeLoadBalancerAttributes = "DescribeLoadBalancerAttributes" // DescribeLoadBalancerAttributesRequest generates a "aws/request.Request" representing the // client's request for the DescribeLoadBalancerAttributes operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeLoadBalancerAttributes for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeLoadBalancerAttributes method directly -// instead. +// See DescribeLoadBalancerAttributes for more information on using the DescribeLoadBalancerAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeLoadBalancerAttributesRequest method. // req, resp := client.DescribeLoadBalancerAttributesRequest(params) @@ -1181,36 +1454,50 @@ func (c *ELB) DescribeLoadBalancerAttributesRequest(input *DescribeLoadBalancerA // API operation DescribeLoadBalancerAttributes for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * LoadBalancerAttributeNotFound +// * ErrCodeLoadBalancerAttributeNotFoundException "LoadBalancerAttributeNotFound" // The specified load balancer attribute does not exist. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/DescribeLoadBalancerAttributes func (c *ELB) DescribeLoadBalancerAttributes(input *DescribeLoadBalancerAttributesInput) (*DescribeLoadBalancerAttributesOutput, error) { req, out := c.DescribeLoadBalancerAttributesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeLoadBalancerAttributesWithContext is the same as DescribeLoadBalancerAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLoadBalancerAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) DescribeLoadBalancerAttributesWithContext(ctx aws.Context, input *DescribeLoadBalancerAttributesInput, opts ...request.Option) (*DescribeLoadBalancerAttributesOutput, error) { + req, out := c.DescribeLoadBalancerAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeLoadBalancerPolicies = "DescribeLoadBalancerPolicies" // DescribeLoadBalancerPoliciesRequest generates a "aws/request.Request" representing the // client's request for the DescribeLoadBalancerPolicies operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeLoadBalancerPolicies for usage and error information. +// See DescribeLoadBalancerPolicies for more information on using the DescribeLoadBalancerPolicies +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeLoadBalancerPolicies method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeLoadBalancerPoliciesRequest method. // req, resp := client.DescribeLoadBalancerPoliciesRequest(params) @@ -1256,36 +1543,50 @@ func (c *ELB) DescribeLoadBalancerPoliciesRequest(input *DescribeLoadBalancerPol // API operation DescribeLoadBalancerPolicies for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * PolicyNotFound +// * ErrCodePolicyNotFoundException "PolicyNotFound" // One or more of the specified policies do not exist. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/DescribeLoadBalancerPolicies func (c *ELB) DescribeLoadBalancerPolicies(input *DescribeLoadBalancerPoliciesInput) (*DescribeLoadBalancerPoliciesOutput, error) { req, out := c.DescribeLoadBalancerPoliciesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeLoadBalancerPoliciesWithContext is the same as DescribeLoadBalancerPolicies with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLoadBalancerPolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) DescribeLoadBalancerPoliciesWithContext(ctx aws.Context, input *DescribeLoadBalancerPoliciesInput, opts ...request.Option) (*DescribeLoadBalancerPoliciesOutput, error) { + req, out := c.DescribeLoadBalancerPoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeLoadBalancerPolicyTypes = "DescribeLoadBalancerPolicyTypes" // DescribeLoadBalancerPolicyTypesRequest generates a "aws/request.Request" representing the // client's request for the DescribeLoadBalancerPolicyTypes operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeLoadBalancerPolicyTypes for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeLoadBalancerPolicyTypes method directly -// instead. +// See DescribeLoadBalancerPolicyTypes for more information on using the DescribeLoadBalancerPolicyTypes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeLoadBalancerPolicyTypesRequest method. // req, resp := client.DescribeLoadBalancerPolicyTypesRequest(params) @@ -1335,33 +1636,47 @@ func (c *ELB) DescribeLoadBalancerPolicyTypesRequest(input *DescribeLoadBalancer // API operation DescribeLoadBalancerPolicyTypes for usage and error information. // // Returned Error Codes: -// * PolicyTypeNotFound +// * ErrCodePolicyTypeNotFoundException "PolicyTypeNotFound" // One or more of the specified policy types do not exist. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/DescribeLoadBalancerPolicyTypes func (c *ELB) DescribeLoadBalancerPolicyTypes(input *DescribeLoadBalancerPolicyTypesInput) (*DescribeLoadBalancerPolicyTypesOutput, error) { req, out := c.DescribeLoadBalancerPolicyTypesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeLoadBalancerPolicyTypesWithContext is the same as DescribeLoadBalancerPolicyTypes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLoadBalancerPolicyTypes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) DescribeLoadBalancerPolicyTypesWithContext(ctx aws.Context, input *DescribeLoadBalancerPolicyTypesInput, opts ...request.Option) (*DescribeLoadBalancerPolicyTypesOutput, error) { + req, out := c.DescribeLoadBalancerPolicyTypesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeLoadBalancers = "DescribeLoadBalancers" // DescribeLoadBalancersRequest generates a "aws/request.Request" representing the // client's request for the DescribeLoadBalancers operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DescribeLoadBalancers for usage and error information. +// See DescribeLoadBalancers for more information on using the DescribeLoadBalancers +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeLoadBalancers method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeLoadBalancersRequest method. // req, resp := client.DescribeLoadBalancersRequest(params) @@ -1407,17 +1722,31 @@ func (c *ELB) DescribeLoadBalancersRequest(input *DescribeLoadBalancersInput) (r // API operation DescribeLoadBalancers for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * DependencyThrottle - +// * ErrCodeDependencyThrottleException "DependencyThrottle" // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/DescribeLoadBalancers func (c *ELB) DescribeLoadBalancers(input *DescribeLoadBalancersInput) (*DescribeLoadBalancersOutput, error) { req, out := c.DescribeLoadBalancersRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeLoadBalancersWithContext is the same as DescribeLoadBalancers with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLoadBalancers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) DescribeLoadBalancersWithContext(ctx aws.Context, input *DescribeLoadBalancersInput, opts ...request.Option) (*DescribeLoadBalancersOutput, error) { + req, out := c.DescribeLoadBalancersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // DescribeLoadBalancersPages iterates over the pages of a DescribeLoadBalancers operation, @@ -1437,31 +1766,55 @@ func (c *ELB) DescribeLoadBalancers(input *DescribeLoadBalancersInput) (*Describ // return pageNum <= 3 // }) // -func (c *ELB) DescribeLoadBalancersPages(input *DescribeLoadBalancersInput, fn func(p *DescribeLoadBalancersOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeLoadBalancersRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeLoadBalancersOutput), lastPage) - }) +func (c *ELB) DescribeLoadBalancersPages(input *DescribeLoadBalancersInput, fn func(*DescribeLoadBalancersOutput, bool) bool) error { + return c.DescribeLoadBalancersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLoadBalancersPagesWithContext same as DescribeLoadBalancersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) DescribeLoadBalancersPagesWithContext(ctx aws.Context, input *DescribeLoadBalancersInput, fn func(*DescribeLoadBalancersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLoadBalancersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLoadBalancersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeLoadBalancersOutput), !p.HasNextPage()) + } + return p.Err() } const opDescribeTags = "DescribeTags" // DescribeTagsRequest generates a "aws/request.Request" representing the // client's request for the DescribeTags operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeTags for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeTags method directly -// instead. +// See DescribeTags for more information on using the DescribeTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeTagsRequest method. // req, resp := client.DescribeTagsRequest(params) @@ -1500,33 +1853,47 @@ func (c *ELB) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Reques // API operation DescribeTags for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/DescribeTags func (c *ELB) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { req, out := c.DescribeTagsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeTagsWithContext is the same as DescribeTags with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) DescribeTagsWithContext(ctx aws.Context, input *DescribeTagsInput, opts ...request.Option) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDetachLoadBalancerFromSubnets = "DetachLoadBalancerFromSubnets" // DetachLoadBalancerFromSubnetsRequest generates a "aws/request.Request" representing the // client's request for the DetachLoadBalancerFromSubnets operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DetachLoadBalancerFromSubnets for usage and error information. +// See DetachLoadBalancerFromSubnets for more information on using the DetachLoadBalancerFromSubnets +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DetachLoadBalancerFromSubnets method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DetachLoadBalancerFromSubnetsRequest method. // req, resp := client.DetachLoadBalancerFromSubnetsRequest(params) @@ -1570,36 +1937,50 @@ func (c *ELB) DetachLoadBalancerFromSubnetsRequest(input *DetachLoadBalancerFrom // API operation DetachLoadBalancerFromSubnets for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * InvalidConfigurationRequest +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" // The requested configuration change is not valid. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/DetachLoadBalancerFromSubnets func (c *ELB) DetachLoadBalancerFromSubnets(input *DetachLoadBalancerFromSubnetsInput) (*DetachLoadBalancerFromSubnetsOutput, error) { req, out := c.DetachLoadBalancerFromSubnetsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DetachLoadBalancerFromSubnetsWithContext is the same as DetachLoadBalancerFromSubnets with the addition of +// the ability to pass a context and additional request options. +// +// See DetachLoadBalancerFromSubnets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) DetachLoadBalancerFromSubnetsWithContext(ctx aws.Context, input *DetachLoadBalancerFromSubnetsInput, opts ...request.Option) (*DetachLoadBalancerFromSubnetsOutput, error) { + req, out := c.DetachLoadBalancerFromSubnetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDisableAvailabilityZonesForLoadBalancer = "DisableAvailabilityZonesForLoadBalancer" // DisableAvailabilityZonesForLoadBalancerRequest generates a "aws/request.Request" representing the // client's request for the DisableAvailabilityZonesForLoadBalancer operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DisableAvailabilityZonesForLoadBalancer for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DisableAvailabilityZonesForLoadBalancer method directly -// instead. +// See DisableAvailabilityZonesForLoadBalancer for more information on using the DisableAvailabilityZonesForLoadBalancer +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DisableAvailabilityZonesForLoadBalancerRequest method. // req, resp := client.DisableAvailabilityZonesForLoadBalancerRequest(params) @@ -1638,7 +2019,7 @@ func (c *ELB) DisableAvailabilityZonesForLoadBalancerRequest(input *DisableAvail // the traffic among its remaining Availability Zones. // // For more information, see Add or Remove Availability Zones (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-az.html) -// in the Classic Load Balancers Guide. +// in the Classic Load Balancer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1648,36 +2029,50 @@ func (c *ELB) DisableAvailabilityZonesForLoadBalancerRequest(input *DisableAvail // API operation DisableAvailabilityZonesForLoadBalancer for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * InvalidConfigurationRequest +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" // The requested configuration change is not valid. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/DisableAvailabilityZonesForLoadBalancer func (c *ELB) DisableAvailabilityZonesForLoadBalancer(input *DisableAvailabilityZonesForLoadBalancerInput) (*DisableAvailabilityZonesForLoadBalancerOutput, error) { req, out := c.DisableAvailabilityZonesForLoadBalancerRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DisableAvailabilityZonesForLoadBalancerWithContext is the same as DisableAvailabilityZonesForLoadBalancer with the addition of +// the ability to pass a context and additional request options. +// +// See DisableAvailabilityZonesForLoadBalancer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) DisableAvailabilityZonesForLoadBalancerWithContext(ctx aws.Context, input *DisableAvailabilityZonesForLoadBalancerInput, opts ...request.Option) (*DisableAvailabilityZonesForLoadBalancerOutput, error) { + req, out := c.DisableAvailabilityZonesForLoadBalancerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opEnableAvailabilityZonesForLoadBalancer = "EnableAvailabilityZonesForLoadBalancer" // EnableAvailabilityZonesForLoadBalancerRequest generates a "aws/request.Request" representing the // client's request for the EnableAvailabilityZonesForLoadBalancer operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See EnableAvailabilityZonesForLoadBalancer for usage and error information. +// See EnableAvailabilityZonesForLoadBalancer for more information on using the EnableAvailabilityZonesForLoadBalancer +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the EnableAvailabilityZonesForLoadBalancer method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the EnableAvailabilityZonesForLoadBalancerRequest method. // req, resp := client.EnableAvailabilityZonesForLoadBalancerRequest(params) @@ -1713,7 +2108,7 @@ func (c *ELB) EnableAvailabilityZonesForLoadBalancerRequest(input *EnableAvailab // Zones that contain instances. // // For more information, see Add or Remove Availability Zones (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-az.html) -// in the Classic Load Balancers Guide. +// in the Classic Load Balancer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1723,33 +2118,47 @@ func (c *ELB) EnableAvailabilityZonesForLoadBalancerRequest(input *EnableAvailab // API operation EnableAvailabilityZonesForLoadBalancer for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/EnableAvailabilityZonesForLoadBalancer func (c *ELB) EnableAvailabilityZonesForLoadBalancer(input *EnableAvailabilityZonesForLoadBalancerInput) (*EnableAvailabilityZonesForLoadBalancerOutput, error) { req, out := c.EnableAvailabilityZonesForLoadBalancerRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// EnableAvailabilityZonesForLoadBalancerWithContext is the same as EnableAvailabilityZonesForLoadBalancer with the addition of +// the ability to pass a context and additional request options. +// +// See EnableAvailabilityZonesForLoadBalancer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) EnableAvailabilityZonesForLoadBalancerWithContext(ctx aws.Context, input *EnableAvailabilityZonesForLoadBalancerInput, opts ...request.Option) (*EnableAvailabilityZonesForLoadBalancerOutput, error) { + req, out := c.EnableAvailabilityZonesForLoadBalancerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opModifyLoadBalancerAttributes = "ModifyLoadBalancerAttributes" // ModifyLoadBalancerAttributesRequest generates a "aws/request.Request" representing the // client's request for the ModifyLoadBalancerAttributes operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ModifyLoadBalancerAttributes for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ModifyLoadBalancerAttributes method directly -// instead. +// See ModifyLoadBalancerAttributes for more information on using the ModifyLoadBalancerAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ModifyLoadBalancerAttributesRequest method. // req, resp := client.ModifyLoadBalancerAttributesRequest(params) @@ -1785,7 +2194,7 @@ func (c *ELB) ModifyLoadBalancerAttributesRequest(input *ModifyLoadBalancerAttri // can modify the load balancer attribute ConnectionSettings by specifying an // idle connection timeout value for your load balancer. // -// For more information, see the following in the Classic Load Balancers Guide: +// For more information, see the following in the Classic Load Balancer Guide: // // * Cross-Zone Load Balancing (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-crosszone-lb.html) // @@ -1803,39 +2212,53 @@ func (c *ELB) ModifyLoadBalancerAttributesRequest(input *ModifyLoadBalancerAttri // API operation ModifyLoadBalancerAttributes for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * LoadBalancerAttributeNotFound +// * ErrCodeLoadBalancerAttributeNotFoundException "LoadBalancerAttributeNotFound" // The specified load balancer attribute does not exist. // -// * InvalidConfigurationRequest +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" // The requested configuration change is not valid. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/ModifyLoadBalancerAttributes func (c *ELB) ModifyLoadBalancerAttributes(input *ModifyLoadBalancerAttributesInput) (*ModifyLoadBalancerAttributesOutput, error) { req, out := c.ModifyLoadBalancerAttributesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ModifyLoadBalancerAttributesWithContext is the same as ModifyLoadBalancerAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyLoadBalancerAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) ModifyLoadBalancerAttributesWithContext(ctx aws.Context, input *ModifyLoadBalancerAttributesInput, opts ...request.Option) (*ModifyLoadBalancerAttributesOutput, error) { + req, out := c.ModifyLoadBalancerAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opRegisterInstancesWithLoadBalancer = "RegisterInstancesWithLoadBalancer" // RegisterInstancesWithLoadBalancerRequest generates a "aws/request.Request" representing the // client's request for the RegisterInstancesWithLoadBalancer operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See RegisterInstancesWithLoadBalancer for usage and error information. +// See RegisterInstancesWithLoadBalancer for more information on using the RegisterInstancesWithLoadBalancer +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RegisterInstancesWithLoadBalancer method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the RegisterInstancesWithLoadBalancerRequest method. // req, resp := client.RegisterInstancesWithLoadBalancerRequest(params) @@ -1886,7 +2309,7 @@ func (c *ELB) RegisterInstancesWithLoadBalancerRequest(input *RegisterInstancesW // To deregister instances from a load balancer, use DeregisterInstancesFromLoadBalancer. // // For more information, see Register or De-Register EC2 Instances (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-deregister-register-instances.html) -// in the Classic Load Balancers Guide. +// in the Classic Load Balancer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1896,36 +2319,50 @@ func (c *ELB) RegisterInstancesWithLoadBalancerRequest(input *RegisterInstancesW // API operation RegisterInstancesWithLoadBalancer for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * InvalidInstance +// * ErrCodeInvalidEndPointException "InvalidInstance" // The specified endpoint is not valid. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/RegisterInstancesWithLoadBalancer func (c *ELB) RegisterInstancesWithLoadBalancer(input *RegisterInstancesWithLoadBalancerInput) (*RegisterInstancesWithLoadBalancerOutput, error) { req, out := c.RegisterInstancesWithLoadBalancerRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// RegisterInstancesWithLoadBalancerWithContext is the same as RegisterInstancesWithLoadBalancer with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterInstancesWithLoadBalancer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) RegisterInstancesWithLoadBalancerWithContext(ctx aws.Context, input *RegisterInstancesWithLoadBalancerInput, opts ...request.Option) (*RegisterInstancesWithLoadBalancerOutput, error) { + req, out := c.RegisterInstancesWithLoadBalancerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opRemoveTags = "RemoveTags" // RemoveTagsRequest generates a "aws/request.Request" representing the // client's request for the RemoveTags operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See RemoveTags for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RemoveTags method directly -// instead. +// See RemoveTags for more information on using the RemoveTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the RemoveTagsRequest method. // req, resp := client.RemoveTagsRequest(params) @@ -1964,33 +2401,47 @@ func (c *ELB) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, o // API operation RemoveTags for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/RemoveTags func (c *ELB) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, error) { req, out := c.RemoveTagsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// RemoveTagsWithContext is the same as RemoveTags with the addition of +// the ability to pass a context and additional request options. +// +// See RemoveTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) RemoveTagsWithContext(ctx aws.Context, input *RemoveTagsInput, opts ...request.Option) (*RemoveTagsOutput, error) { + req, out := c.RemoveTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opSetLoadBalancerListenerSSLCertificate = "SetLoadBalancerListenerSSLCertificate" // SetLoadBalancerListenerSSLCertificateRequest generates a "aws/request.Request" representing the // client's request for the SetLoadBalancerListenerSSLCertificate operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See SetLoadBalancerListenerSSLCertificate for usage and error information. +// See SetLoadBalancerListenerSSLCertificate for more information on using the SetLoadBalancerListenerSSLCertificate +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the SetLoadBalancerListenerSSLCertificate method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the SetLoadBalancerListenerSSLCertificateRequest method. // req, resp := client.SetLoadBalancerListenerSSLCertificateRequest(params) @@ -2025,7 +2476,7 @@ func (c *ELB) SetLoadBalancerListenerSSLCertificateRequest(input *SetLoadBalance // // For more information about updating your SSL certificate, see Replace the // SSL Certificate for Your Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-update-ssl-cert.html) -// in the Classic Load Balancers Guide. +// in the Classic Load Balancer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2035,48 +2486,62 @@ func (c *ELB) SetLoadBalancerListenerSSLCertificateRequest(input *SetLoadBalance // API operation SetLoadBalancerListenerSSLCertificate for usage and error information. // // Returned Error Codes: -// * CertificateNotFound +// * ErrCodeCertificateNotFoundException "CertificateNotFound" // The specified ARN does not refer to a valid SSL certificate in AWS Identity // and Access Management (IAM) or AWS Certificate Manager (ACM). Note that if // you recently uploaded the certificate to IAM, this error might indicate that // the certificate is not fully available yet. // -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * ListenerNotFound +// * ErrCodeListenerNotFoundException "ListenerNotFound" // The load balancer does not have a listener configured at the specified port. // -// * InvalidConfigurationRequest +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" // The requested configuration change is not valid. // -// * UnsupportedProtocol - +// * ErrCodeUnsupportedProtocolException "UnsupportedProtocol" +// The specified protocol or signature version is not supported. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/SetLoadBalancerListenerSSLCertificate func (c *ELB) SetLoadBalancerListenerSSLCertificate(input *SetLoadBalancerListenerSSLCertificateInput) (*SetLoadBalancerListenerSSLCertificateOutput, error) { req, out := c.SetLoadBalancerListenerSSLCertificateRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// SetLoadBalancerListenerSSLCertificateWithContext is the same as SetLoadBalancerListenerSSLCertificate with the addition of +// the ability to pass a context and additional request options. +// +// See SetLoadBalancerListenerSSLCertificate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) SetLoadBalancerListenerSSLCertificateWithContext(ctx aws.Context, input *SetLoadBalancerListenerSSLCertificateInput, opts ...request.Option) (*SetLoadBalancerListenerSSLCertificateOutput, error) { + req, out := c.SetLoadBalancerListenerSSLCertificateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opSetLoadBalancerPoliciesForBackendServer = "SetLoadBalancerPoliciesForBackendServer" // SetLoadBalancerPoliciesForBackendServerRequest generates a "aws/request.Request" representing the // client's request for the SetLoadBalancerPoliciesForBackendServer operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See SetLoadBalancerPoliciesForBackendServer for usage and error information. +// See SetLoadBalancerPoliciesForBackendServer for more information on using the SetLoadBalancerPoliciesForBackendServer +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the SetLoadBalancerPoliciesForBackendServer method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the SetLoadBalancerPoliciesForBackendServerRequest method. // req, resp := client.SetLoadBalancerPoliciesForBackendServerRequest(params) @@ -2118,9 +2583,9 @@ func (c *ELB) SetLoadBalancerPoliciesForBackendServerRequest(input *SetLoadBalan // // For more information about enabling back-end instance authentication, see // Configure Back-end Instance Authentication (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html#configure_backendauth_clt) -// in the Classic Load Balancers Guide. For more information about Proxy Protocol, +// in the Classic Load Balancer Guide. For more information about Proxy Protocol, // see Configure Proxy Protocol Support (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-proxy-protocol.html) -// in the Classic Load Balancers Guide. +// in the Classic Load Balancer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2130,39 +2595,53 @@ func (c *ELB) SetLoadBalancerPoliciesForBackendServerRequest(input *SetLoadBalan // API operation SetLoadBalancerPoliciesForBackendServer for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * PolicyNotFound +// * ErrCodePolicyNotFoundException "PolicyNotFound" // One or more of the specified policies do not exist. // -// * InvalidConfigurationRequest +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" // The requested configuration change is not valid. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/SetLoadBalancerPoliciesForBackendServer func (c *ELB) SetLoadBalancerPoliciesForBackendServer(input *SetLoadBalancerPoliciesForBackendServerInput) (*SetLoadBalancerPoliciesForBackendServerOutput, error) { req, out := c.SetLoadBalancerPoliciesForBackendServerRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// SetLoadBalancerPoliciesForBackendServerWithContext is the same as SetLoadBalancerPoliciesForBackendServer with the addition of +// the ability to pass a context and additional request options. +// +// See SetLoadBalancerPoliciesForBackendServer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) SetLoadBalancerPoliciesForBackendServerWithContext(ctx aws.Context, input *SetLoadBalancerPoliciesForBackendServerInput, opts ...request.Option) (*SetLoadBalancerPoliciesForBackendServerOutput, error) { + req, out := c.SetLoadBalancerPoliciesForBackendServerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opSetLoadBalancerPoliciesOfListener = "SetLoadBalancerPoliciesOfListener" // SetLoadBalancerPoliciesOfListenerRequest generates a "aws/request.Request" representing the // client's request for the SetLoadBalancerPoliciesOfListener operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See SetLoadBalancerPoliciesOfListener for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the SetLoadBalancerPoliciesOfListener method directly -// instead. +// See SetLoadBalancerPoliciesOfListener for more information on using the SetLoadBalancerPoliciesOfListener +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the SetLoadBalancerPoliciesOfListenerRequest method. // req, resp := client.SetLoadBalancerPoliciesOfListenerRequest(params) @@ -2200,7 +2679,7 @@ func (c *ELB) SetLoadBalancerPoliciesOfListenerRequest(input *SetLoadBalancerPol // Configuration (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/ssl-config-update.html), // Duration-Based Session Stickiness (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-duration), // and Application-Controlled Session Stickiness (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-application) -// in the Classic Load Balancers Guide. +// in the Classic Load Balancer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2210,23 +2689,38 @@ func (c *ELB) SetLoadBalancerPoliciesOfListenerRequest(input *SetLoadBalancerPol // API operation SetLoadBalancerPoliciesOfListener for usage and error information. // // Returned Error Codes: -// * LoadBalancerNotFound +// * ErrCodeAccessPointNotFoundException "LoadBalancerNotFound" // The specified load balancer does not exist. // -// * PolicyNotFound +// * ErrCodePolicyNotFoundException "PolicyNotFound" // One or more of the specified policies do not exist. // -// * ListenerNotFound +// * ErrCodeListenerNotFoundException "ListenerNotFound" // The load balancer does not have a listener configured at the specified port. // -// * InvalidConfigurationRequest +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" // The requested configuration change is not valid. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/SetLoadBalancerPoliciesOfListener func (c *ELB) SetLoadBalancerPoliciesOfListener(input *SetLoadBalancerPoliciesOfListenerInput) (*SetLoadBalancerPoliciesOfListenerOutput, error) { req, out := c.SetLoadBalancerPoliciesOfListenerRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// SetLoadBalancerPoliciesOfListenerWithContext is the same as SetLoadBalancerPoliciesOfListener with the addition of +// the ability to pass a context and additional request options. +// +// See SetLoadBalancerPoliciesOfListener for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) SetLoadBalancerPoliciesOfListenerWithContext(ctx aws.Context, input *SetLoadBalancerPoliciesOfListenerInput, opts ...request.Option) (*SetLoadBalancerPoliciesOfListenerOutput, error) { + req, out := c.SetLoadBalancerPoliciesOfListenerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // Information about the AccessLog attribute. @@ -3011,7 +3505,7 @@ type CreateLoadBalancerInput struct { // The listeners. // // For more information, see Listeners for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) - // in the Classic Load Balancers Guide. + // in the Classic Load Balancer Guide. // // Listeners is a required field Listeners []*Listener `type:"list" required:"true"` @@ -3048,7 +3542,7 @@ type CreateLoadBalancerInput struct { // // For more information about tagging your load balancer, see Tag Your Classic // Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/add-remove-tags.html) - // in the Classic Load Balancers Guide. + // in the Classic Load Balancer Guide. Tags []*Tag `min:"1" type:"list"` } @@ -3657,6 +4151,87 @@ func (s *DeregisterInstancesFromLoadBalancerOutput) SetInstances(v []*Instance) return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/DescribeAccountLimitsInput +type DescribeAccountLimitsInput struct { + _ struct{} `type:"structure"` + + // The marker for the next set of results. (You received this marker from a + // previous call.) + Marker *string `type:"string"` + + // The maximum number of results to return with this call. + PageSize *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s DescribeAccountLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountLimitsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAccountLimitsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAccountLimitsInput"} + if s.PageSize != nil && *s.PageSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *DescribeAccountLimitsInput) SetMarker(v string) *DescribeAccountLimitsInput { + s.Marker = &v + return s +} + +// SetPageSize sets the PageSize field's value. +func (s *DescribeAccountLimitsInput) SetPageSize(v int64) *DescribeAccountLimitsInput { + s.PageSize = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/DescribeAccountLimitsOutput +type DescribeAccountLimitsOutput struct { + _ struct{} `type:"structure"` + + // Information about the limits. + Limits []*Limit `type:"list"` + + // The marker to use when requesting the next set of results. If there are no + // additional results, the string is empty. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAccountLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountLimitsOutput) GoString() string { + return s.String() +} + +// SetLimits sets the Limits field's value. +func (s *DescribeAccountLimitsOutput) SetLimits(v []*Limit) *DescribeAccountLimitsOutput { + s.Limits = v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *DescribeAccountLimitsOutput) SetNextMarker(v string) *DescribeAccountLimitsOutput { + s.NextMarker = &v + return s +} + // Contains the parameters for DescribeInstanceHealth. // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/DescribeEndPointStateInput type DescribeInstanceHealthInput struct { @@ -4582,11 +5157,49 @@ func (s *LBCookieStickinessPolicy) SetPolicyName(v string) *LBCookieStickinessPo return s } +// Information about an Elastic Load Balancing resource limit for your AWS account. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/Limit +type Limit struct { + _ struct{} `type:"structure"` + + // The maximum value of the limit. + Max *string `type:"string"` + + // The name of the limit. The possible values are: + // + // * classic-listeners + // + // * classic-load-balancers + Name *string `type:"string"` +} + +// String returns the string representation +func (s Limit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Limit) GoString() string { + return s.String() +} + +// SetMax sets the Max field's value. +func (s *Limit) SetMax(v string) *Limit { + s.Max = &v + return s +} + +// SetName sets the Name field's value. +func (s *Limit) SetName(v string) *Limit { + s.Name = &v + return s +} + // Information about a listener. // // For information about the protocols and the ports supported by Elastic Load // Balancing, see Listeners for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) -// in the Classic Load Balancers Guide. +// in the Classic Load Balancer Guide. // Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01/Listener type Listener struct { _ struct{} `type:"structure"` @@ -4693,11 +5306,7 @@ func (s *Listener) SetSSLCertificateId(v string) *Listener { type ListenerDescription struct { _ struct{} `type:"structure"` - // Information about a listener. - // - // For information about the protocols and the ports supported by Elastic Load - // Balancing, see Listeners for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) - // in the Classic Load Balancers Guide. + // The listener. Listener *Listener `type:"structure"` // The policies. If there are no policies enabled, the list is empty. @@ -4735,7 +5344,7 @@ type LoadBalancerAttributes struct { // and delivers the information to the Amazon S3 bucket that you specify. // // For more information, see Enable Access Logs (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html) - // in the Classic Load Balancers Guide. + // in the Classic Load Balancer Guide. AccessLog *AccessLog `type:"structure"` // This parameter is reserved. @@ -4745,7 +5354,7 @@ type LoadBalancerAttributes struct { // the load balancer shifts traffic away from a deregistered or unhealthy instance. // // For more information, see Configure Connection Draining (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-conn-drain.html) - // in the Classic Load Balancers Guide. + // in the Classic Load Balancer Guide. ConnectionDraining *ConnectionDraining `type:"structure"` // If enabled, the load balancer allows the connections to remain idle (no data @@ -4754,14 +5363,14 @@ type LoadBalancerAttributes struct { // By default, Elastic Load Balancing maintains a 60-second idle connection // timeout for both front-end and back-end connections of your load balancer. // For more information, see Configure Idle Connection Timeout (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html) - // in the Classic Load Balancers Guide. + // in the Classic Load Balancer Guide. ConnectionSettings *ConnectionSettings `type:"structure"` // If enabled, the load balancer routes the request traffic evenly across all // instances regardless of the Availability Zones. // // For more information, see Configure Cross-Zone Load Balancing (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-crosszone-lb.html) - // in the Classic Load Balancers Guide. + // in the Classic Load Balancer Guide. CrossZoneLoadBalancing *CrossZoneLoadBalancing `type:"structure"` } @@ -4849,7 +5458,7 @@ type LoadBalancerDescription struct { // The DNS name of the load balancer. // // For more information, see Configure a Custom Domain Name (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/using-domain-names-with-elb.html) - // in the Classic Load Balancers Guide. + // in the Classic Load Balancer Guide. CanonicalHostedZoneName *string `type:"string"` // The ID of the Amazon Route 53 hosted zone for the load balancer. @@ -5013,7 +5622,7 @@ func (s *LoadBalancerDescription) SetVPCId(v string) *LoadBalancerDescription { type ModifyLoadBalancerAttributesInput struct { _ struct{} `type:"structure"` - // The attributes of the load balancer. + // The attributes for the load balancer. // // LoadBalancerAttributes is a required field LoadBalancerAttributes *LoadBalancerAttributes `type:"structure" required:"true"` @@ -5072,7 +5681,7 @@ func (s *ModifyLoadBalancerAttributesInput) SetLoadBalancerName(v string) *Modif type ModifyLoadBalancerAttributesOutput struct { _ struct{} `type:"structure"` - // The attributes for a load balancer. + // Information about the load balancer attributes. LoadBalancerAttributes *LoadBalancerAttributes `type:"structure"` // The name of the load balancer. diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/doc.go b/vendor/github.com/aws/aws-sdk-go/service/elb/doc.go new file mode 100644 index 000000000000..92f97d7292e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/doc.go @@ -0,0 +1,55 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package elb provides the client and types for making API +// requests to Elastic Load Balancing. +// +// A load balancer distributes incoming traffic across your EC2 instances. This +// enables you to increase the availability of your application. The load balancer +// also monitors the health of its registered instances and ensures that it +// routes traffic only to healthy instances. You configure your load balancer +// to accept incoming traffic by specifying one or more listeners, which are +// configured with a protocol and port number for connections from clients to +// the load balancer and a protocol and port number for connections from the +// load balancer to the instances. +// +// Elastic Load Balancing supports two types of load balancers: Classic Load +// Balancers and Application Load Balancers (new). A Classic Load Balancer makes +// routing and load balancing decisions either at the transport layer (TCP/SSL) +// or the application layer (HTTP/HTTPS), and supports either EC2-Classic or +// a VPC. An Application Load Balancer makes routing and load balancing decisions +// at the application layer (HTTP/HTTPS), supports path-based routing, and can +// route requests to one or more ports on each EC2 instance or container instance +// in your virtual private cloud (VPC). For more information, see the Elastic +// Load Balancing User Guide (http://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/what-is-load-balancing.html). +// +// This reference covers the 2012-06-01 API, which supports Classic Load Balancers. +// The 2015-12-01 API supports Application Load Balancers. +// +// To get started, create a load balancer with one or more listeners using CreateLoadBalancer. +// Register your instances with the load balancer using RegisterInstancesWithLoadBalancer. +// +// All Elastic Load Balancing operations are idempotent, which means that they +// complete at most one time. If you repeat an operation, it succeeds with a +// 200 OK response code. +// +// See https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01 for more information on this service. +// +// See elb package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/elb/ +// +// Using the Client +// +// To Elastic Load Balancing with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Elastic Load Balancing client ELB for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/elb/#New +package elb diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/errors.go b/vendor/github.com/aws/aws-sdk-go/service/elb/errors.go new file mode 100644 index 000000000000..77ffb20ecd69 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/errors.go @@ -0,0 +1,136 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package elb + +const ( + + // ErrCodeAccessPointNotFoundException for service response error code + // "LoadBalancerNotFound". + // + // The specified load balancer does not exist. + ErrCodeAccessPointNotFoundException = "LoadBalancerNotFound" + + // ErrCodeCertificateNotFoundException for service response error code + // "CertificateNotFound". + // + // The specified ARN does not refer to a valid SSL certificate in AWS Identity + // and Access Management (IAM) or AWS Certificate Manager (ACM). Note that if + // you recently uploaded the certificate to IAM, this error might indicate that + // the certificate is not fully available yet. + ErrCodeCertificateNotFoundException = "CertificateNotFound" + + // ErrCodeDependencyThrottleException for service response error code + // "DependencyThrottle". + ErrCodeDependencyThrottleException = "DependencyThrottle" + + // ErrCodeDuplicateAccessPointNameException for service response error code + // "DuplicateLoadBalancerName". + // + // The specified load balancer name already exists for this account. + ErrCodeDuplicateAccessPointNameException = "DuplicateLoadBalancerName" + + // ErrCodeDuplicateListenerException for service response error code + // "DuplicateListener". + // + // A listener already exists for the specified load balancer name and port, + // but with a different instance port, protocol, or SSL certificate. + ErrCodeDuplicateListenerException = "DuplicateListener" + + // ErrCodeDuplicatePolicyNameException for service response error code + // "DuplicatePolicyName". + // + // A policy with the specified name already exists for this load balancer. + ErrCodeDuplicatePolicyNameException = "DuplicatePolicyName" + + // ErrCodeDuplicateTagKeysException for service response error code + // "DuplicateTagKeys". + // + // A tag key was specified more than once. + ErrCodeDuplicateTagKeysException = "DuplicateTagKeys" + + // ErrCodeInvalidConfigurationRequestException for service response error code + // "InvalidConfigurationRequest". + // + // The requested configuration change is not valid. + ErrCodeInvalidConfigurationRequestException = "InvalidConfigurationRequest" + + // ErrCodeInvalidEndPointException for service response error code + // "InvalidInstance". + // + // The specified endpoint is not valid. + ErrCodeInvalidEndPointException = "InvalidInstance" + + // ErrCodeInvalidSchemeException for service response error code + // "InvalidScheme". + // + // The specified value for the schema is not valid. You can only specify a scheme + // for load balancers in a VPC. + ErrCodeInvalidSchemeException = "InvalidScheme" + + // ErrCodeInvalidSecurityGroupException for service response error code + // "InvalidSecurityGroup". + // + // One or more of the specified security groups do not exist. + ErrCodeInvalidSecurityGroupException = "InvalidSecurityGroup" + + // ErrCodeInvalidSubnetException for service response error code + // "InvalidSubnet". + // + // The specified VPC has no associated Internet gateway. + ErrCodeInvalidSubnetException = "InvalidSubnet" + + // ErrCodeListenerNotFoundException for service response error code + // "ListenerNotFound". + // + // The load balancer does not have a listener configured at the specified port. + ErrCodeListenerNotFoundException = "ListenerNotFound" + + // ErrCodeLoadBalancerAttributeNotFoundException for service response error code + // "LoadBalancerAttributeNotFound". + // + // The specified load balancer attribute does not exist. + ErrCodeLoadBalancerAttributeNotFoundException = "LoadBalancerAttributeNotFound" + + // ErrCodePolicyNotFoundException for service response error code + // "PolicyNotFound". + // + // One or more of the specified policies do not exist. + ErrCodePolicyNotFoundException = "PolicyNotFound" + + // ErrCodePolicyTypeNotFoundException for service response error code + // "PolicyTypeNotFound". + // + // One or more of the specified policy types do not exist. + ErrCodePolicyTypeNotFoundException = "PolicyTypeNotFound" + + // ErrCodeSubnetNotFoundException for service response error code + // "SubnetNotFound". + // + // One or more of the specified subnets do not exist. + ErrCodeSubnetNotFoundException = "SubnetNotFound" + + // ErrCodeTooManyAccessPointsException for service response error code + // "TooManyLoadBalancers". + // + // The quota for the number of load balancers has been reached. + ErrCodeTooManyAccessPointsException = "TooManyLoadBalancers" + + // ErrCodeTooManyPoliciesException for service response error code + // "TooManyPolicies". + // + // The quota for the number of policies for this load balancer has been reached. + ErrCodeTooManyPoliciesException = "TooManyPolicies" + + // ErrCodeTooManyTagsException for service response error code + // "TooManyTags". + // + // The quota for the number of tags that can be assigned to a load balancer + // has been reached. + ErrCodeTooManyTagsException = "TooManyTags" + + // ErrCodeUnsupportedProtocolException for service response error code + // "UnsupportedProtocol". + // + // The specified protocol or signature version is not supported. + ErrCodeUnsupportedProtocolException = "UnsupportedProtocol" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/service.go b/vendor/github.com/aws/aws-sdk-go/service/elb/service.go index 68d7e2ac0186..057530f6cad8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/service.go @@ -1,4 +1,4 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package elb @@ -11,36 +11,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/query" ) -// A load balancer distributes incoming traffic across your EC2 instances. This -// enables you to increase the availability of your application. The load balancer -// also monitors the health of its registered instances and ensures that it -// routes traffic only to healthy instances. You configure your load balancer -// to accept incoming traffic by specifying one or more listeners, which are -// configured with a protocol and port number for connections from clients to -// the load balancer and a protocol and port number for connections from the -// load balancer to the instances. +// ELB provides the API operation methods for making requests to +// Elastic Load Balancing. See this package's package overview docs +// for details on the service. // -// Elastic Load Balancing supports two types of load balancers: Classic load -// balancers and Application load balancers (new). A Classic load balancer makes -// routing and load balancing decisions either at the transport layer (TCP/SSL) -// or the application layer (HTTP/HTTPS), and supports either EC2-Classic or -// a VPC. An Application load balancer makes routing and load balancing decisions -// at the application layer (HTTP/HTTPS), supports path-based routing, and can -// route requests to one or more ports on each EC2 instance or container instance -// in your virtual private cloud (VPC). For more information, see the . -// -// This reference covers the 2012-06-01 API, which supports Classic load balancers. -// The 2015-12-01 API supports Application load balancers. -// -// To get started, create a load balancer with one or more listeners using CreateLoadBalancer. -// Register your instances with the load balancer using RegisterInstancesWithLoadBalancer. -// -// All Elastic Load Balancing operations are idempotent, which means that they -// complete at most one time. If you repeat an operation, it succeeds with a -// 200 OK response code. -// The service client's operations are safe to be used concurrently. -// It is not safe to mutate any of the client's properties though. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancing-2012-06-01 +// ELB methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. type ELB struct { *client.Client } diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/elb/waiters.go index 89fc1d85b655..bdf449d36b17 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elb/waiters.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/waiters.go @@ -1,94 +1,158 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package elb import ( - "github.com/aws/aws-sdk-go/private/waiter" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" ) // WaitUntilAnyInstanceInService uses the Elastic Load Balancing API operation // DescribeInstanceHealth to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *ELB) WaitUntilAnyInstanceInService(input *DescribeInstanceHealthInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeInstanceHealth", - Delay: 15, + return c.WaitUntilAnyInstanceInServiceWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilAnyInstanceInServiceWithContext is an extended version of WaitUntilAnyInstanceInService. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) WaitUntilAnyInstanceInServiceWithContext(ctx aws.Context, input *DescribeInstanceHealthInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilAnyInstanceInService", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAny", - Argument: "InstanceStates[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "InstanceStates[].State", Expected: "InService", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeInstanceHealthInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstanceHealthRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilInstanceDeregistered uses the Elastic Load Balancing API operation // DescribeInstanceHealth to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *ELB) WaitUntilInstanceDeregistered(input *DescribeInstanceHealthInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeInstanceHealth", - Delay: 15, + return c.WaitUntilInstanceDeregisteredWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilInstanceDeregisteredWithContext is an extended version of WaitUntilInstanceDeregistered. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) WaitUntilInstanceDeregisteredWithContext(ctx aws.Context, input *DescribeInstanceHealthInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilInstanceDeregistered", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "InstanceStates[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "InstanceStates[].State", Expected: "OutOfService", }, { - State: "success", - Matcher: "error", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.ErrorWaiterMatch, Expected: "InvalidInstance", }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeInstanceHealthInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstanceHealthRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilInstanceInService uses the Elastic Load Balancing API operation // DescribeInstanceHealth to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *ELB) WaitUntilInstanceInService(input *DescribeInstanceHealthInput) error { - waiterCfg := waiter.Config{ - Operation: "DescribeInstanceHealth", - Delay: 15, + return c.WaitUntilInstanceInServiceWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilInstanceInServiceWithContext is an extended version of WaitUntilInstanceInService. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELB) WaitUntilInstanceInServiceWithContext(ctx aws.Context, input *DescribeInstanceHealthInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilInstanceInService", MaxAttempts: 40, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "pathAll", - Argument: "InstanceStates[].State", + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "InstanceStates[].State", Expected: "InService", }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "InvalidInstance", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeInstanceHealthInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstanceHealthRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go b/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go new file mode 100644 index 000000000000..03e1b5471ed2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go @@ -0,0 +1,7428 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package elbv2 + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAddTags = "AddTags" + +// AddTagsRequest generates a "aws/request.Request" representing the +// client's request for the AddTags operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AddTags for more information on using the AddTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AddTagsRequest method. +// req, resp := client.AddTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/AddTags +func (c *ELBV2) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { + op := &request.Operation{ + Name: opAddTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsInput{} + } + + output = &AddTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// AddTags API operation for Elastic Load Balancing. +// +// Adds the specified tags to the specified Elastic Load Balancing resource. +// You can tag your Application Load Balancers, Network Load Balancers, and +// your target groups. +// +// Each tag consists of a key and an optional value. If a resource already has +// a tag with the same key, AddTags updates its value. +// +// To list the current tags for your resources, use DescribeTags. To remove +// tags from your resources, use RemoveTags. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation AddTags for usage and error information. +// +// Returned Error Codes: +// * ErrCodeDuplicateTagKeysException "DuplicateTagKeys" +// A tag key was specified more than once. +// +// * ErrCodeTooManyTagsException "TooManyTags" +// You've reached the limit on the number of tags per load balancer. +// +// * ErrCodeLoadBalancerNotFoundException "LoadBalancerNotFound" +// The specified load balancer does not exist. +// +// * ErrCodeTargetGroupNotFoundException "TargetGroupNotFound" +// The specified target group does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/AddTags +func (c *ELBV2) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { + req, out := c.AddTagsRequest(input) + return out, req.Send() +} + +// AddTagsWithContext is the same as AddTags with the addition of +// the ability to pass a context and additional request options. +// +// See AddTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) AddTagsWithContext(ctx aws.Context, input *AddTagsInput, opts ...request.Option) (*AddTagsOutput, error) { + req, out := c.AddTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateListener = "CreateListener" + +// CreateListenerRequest generates a "aws/request.Request" representing the +// client's request for the CreateListener operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateListener for more information on using the CreateListener +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateListenerRequest method. +// req, resp := client.CreateListenerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateListener +func (c *ELBV2) CreateListenerRequest(input *CreateListenerInput) (req *request.Request, output *CreateListenerOutput) { + op := &request.Operation{ + Name: opCreateListener, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateListenerInput{} + } + + output = &CreateListenerOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateListener API operation for Elastic Load Balancing. +// +// Creates a listener for the specified Application Load Balancer or Network +// Load Balancer. +// +// You can create up to 10 listeners per load balancer. +// +// To update a listener, use ModifyListener. When you are finished with a listener, +// you can delete it using DeleteListener. If you are finished with both the +// listener and the load balancer, you can delete them both using DeleteLoadBalancer. +// +// For more information, see Listeners for Your Application Load Balancers (http://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html) +// in the Application Load Balancers Guide and Listeners for Your Network Load +// Balancers (http://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-listeners.html) +// in the Network Load Balancers Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation CreateListener for usage and error information. +// +// Returned Error Codes: +// * ErrCodeDuplicateListenerException "DuplicateListener" +// A listener with the specified port already exists. +// +// * ErrCodeTooManyListenersException "TooManyListeners" +// You've reached the limit on the number of listeners per load balancer. +// +// * ErrCodeTooManyCertificatesException "TooManyCertificates" +// You've reached the limit on the number of certificates per listener. +// +// * ErrCodeLoadBalancerNotFoundException "LoadBalancerNotFound" +// The specified load balancer does not exist. +// +// * ErrCodeTargetGroupNotFoundException "TargetGroupNotFound" +// The specified target group does not exist. +// +// * ErrCodeTargetGroupAssociationLimitException "TargetGroupAssociationLimit" +// You've reached the limit on the number of load balancers per target group. +// +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" +// The requested configuration is not valid. +// +// * ErrCodeIncompatibleProtocolsException "IncompatibleProtocols" +// The specified configuration is not valid with this protocol. +// +// * ErrCodeSSLPolicyNotFoundException "SSLPolicyNotFound" +// The specified SSL policy does not exist. +// +// * ErrCodeCertificateNotFoundException "CertificateNotFound" +// The specified certificate does not exist. +// +// * ErrCodeUnsupportedProtocolException "UnsupportedProtocol" +// The specified protocol is not supported. +// +// * ErrCodeTooManyRegistrationsForTargetIdException "TooManyRegistrationsForTargetId" +// You've reached the limit on the number of times a target can be registered +// with a load balancer. +// +// * ErrCodeTooManyTargetsException "TooManyTargets" +// You've reached the limit on the number of targets. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateListener +func (c *ELBV2) CreateListener(input *CreateListenerInput) (*CreateListenerOutput, error) { + req, out := c.CreateListenerRequest(input) + return out, req.Send() +} + +// CreateListenerWithContext is the same as CreateListener with the addition of +// the ability to pass a context and additional request options. +// +// See CreateListener for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) CreateListenerWithContext(ctx aws.Context, input *CreateListenerInput, opts ...request.Option) (*CreateListenerOutput, error) { + req, out := c.CreateListenerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateLoadBalancer = "CreateLoadBalancer" + +// CreateLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the CreateLoadBalancer operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLoadBalancer for more information on using the CreateLoadBalancer +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateLoadBalancerRequest method. +// req, resp := client.CreateLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateLoadBalancer +func (c *ELBV2) CreateLoadBalancerRequest(input *CreateLoadBalancerInput) (req *request.Request, output *CreateLoadBalancerOutput) { + op := &request.Operation{ + Name: opCreateLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLoadBalancerInput{} + } + + output = &CreateLoadBalancerOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateLoadBalancer API operation for Elastic Load Balancing. +// +// Creates an Application Load Balancer or a Network Load Balancer. +// +// When you create a load balancer, you can specify security groups, subnets, +// IP address type, and tags. Otherwise, you could do so later using SetSecurityGroups, +// SetSubnets, SetIpAddressType, and AddTags. +// +// To create listeners for your load balancer, use CreateListener. To describe +// your current load balancers, see DescribeLoadBalancers. When you are finished +// with a load balancer, you can delete it using DeleteLoadBalancer. +// +// You can create up to 20 load balancers per region per account. You can request +// an increase for the number of load balancers for your account. For more information, +// see Limits for Your Application Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-limits.html) +// in the Application Load Balancers Guide and Limits for Your Network Load +// Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-limits.html) +// in the Network Load Balancers Guide. +// +// For more information, see Application Load Balancers (http://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html) +// in the Application Load Balancers Guide and Network Load Balancers (http://docs.aws.amazon.com/elasticloadbalancing/latest/network/network-load-balancers.html) +// in the Network Load Balancers Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation CreateLoadBalancer for usage and error information. +// +// Returned Error Codes: +// * ErrCodeDuplicateLoadBalancerNameException "DuplicateLoadBalancerName" +// A load balancer with the specified name already exists. +// +// * ErrCodeTooManyLoadBalancersException "TooManyLoadBalancers" +// You've reached the limit on the number of load balancers for your AWS account. +// +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" +// The requested configuration is not valid. +// +// * ErrCodeSubnetNotFoundException "SubnetNotFound" +// The specified subnet does not exist. +// +// * ErrCodeInvalidSubnetException "InvalidSubnet" +// The specified subnet is out of available addresses. +// +// * ErrCodeInvalidSecurityGroupException "InvalidSecurityGroup" +// The specified security group does not exist. +// +// * ErrCodeInvalidSchemeException "InvalidScheme" +// The requested scheme is not valid. +// +// * ErrCodeTooManyTagsException "TooManyTags" +// You've reached the limit on the number of tags per load balancer. +// +// * ErrCodeDuplicateTagKeysException "DuplicateTagKeys" +// A tag key was specified more than once. +// +// * ErrCodeResourceInUseException "ResourceInUse" +// A specified resource is in use. +// +// * ErrCodeAllocationIdNotFoundException "AllocationIdNotFound" +// The specified allocation ID does not exist. +// +// * ErrCodeAvailabilityZoneNotSupportedException "AvailabilityZoneNotSupported" +// The specified Availability Zone is not supported. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateLoadBalancer +func (c *ELBV2) CreateLoadBalancer(input *CreateLoadBalancerInput) (*CreateLoadBalancerOutput, error) { + req, out := c.CreateLoadBalancerRequest(input) + return out, req.Send() +} + +// CreateLoadBalancerWithContext is the same as CreateLoadBalancer with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLoadBalancer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) CreateLoadBalancerWithContext(ctx aws.Context, input *CreateLoadBalancerInput, opts ...request.Option) (*CreateLoadBalancerOutput, error) { + req, out := c.CreateLoadBalancerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateRule = "CreateRule" + +// CreateRuleRequest generates a "aws/request.Request" representing the +// client's request for the CreateRule operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateRule for more information on using the CreateRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateRuleRequest method. +// req, resp := client.CreateRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateRule +func (c *ELBV2) CreateRuleRequest(input *CreateRuleInput) (req *request.Request, output *CreateRuleOutput) { + op := &request.Operation{ + Name: opCreateRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRuleInput{} + } + + output = &CreateRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateRule API operation for Elastic Load Balancing. +// +// Creates a rule for the specified listener. The listener must be associated +// with an Application Load Balancer. +// +// Rules are evaluated in priority order, from the lowest value to the highest +// value. When the condition for a rule is met, the specified action is taken. +// If no conditions are met, the action for the default rule is taken. For more +// information, see Listener Rules (http://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html#listener-rules) +// in the Application Load Balancers Guide. +// +// To view your current rules, use DescribeRules. To update a rule, use ModifyRule. +// To set the priorities of your rules, use SetRulePriorities. To delete a rule, +// use DeleteRule. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation CreateRule for usage and error information. +// +// Returned Error Codes: +// * ErrCodePriorityInUseException "PriorityInUse" +// The specified priority is in use. +// +// * ErrCodeTooManyTargetGroupsException "TooManyTargetGroups" +// You've reached the limit on the number of target groups for your AWS account. +// +// * ErrCodeTooManyRulesException "TooManyRules" +// You've reached the limit on the number of rules per load balancer. +// +// * ErrCodeTargetGroupAssociationLimitException "TargetGroupAssociationLimit" +// You've reached the limit on the number of load balancers per target group. +// +// * ErrCodeIncompatibleProtocolsException "IncompatibleProtocols" +// The specified configuration is not valid with this protocol. +// +// * ErrCodeListenerNotFoundException "ListenerNotFound" +// The specified listener does not exist. +// +// * ErrCodeTargetGroupNotFoundException "TargetGroupNotFound" +// The specified target group does not exist. +// +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" +// The requested configuration is not valid. +// +// * ErrCodeTooManyRegistrationsForTargetIdException "TooManyRegistrationsForTargetId" +// You've reached the limit on the number of times a target can be registered +// with a load balancer. +// +// * ErrCodeTooManyTargetsException "TooManyTargets" +// You've reached the limit on the number of targets. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateRule +func (c *ELBV2) CreateRule(input *CreateRuleInput) (*CreateRuleOutput, error) { + req, out := c.CreateRuleRequest(input) + return out, req.Send() +} + +// CreateRuleWithContext is the same as CreateRule with the addition of +// the ability to pass a context and additional request options. +// +// See CreateRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) CreateRuleWithContext(ctx aws.Context, input *CreateRuleInput, opts ...request.Option) (*CreateRuleOutput, error) { + req, out := c.CreateRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTargetGroup = "CreateTargetGroup" + +// CreateTargetGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateTargetGroup operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTargetGroup for more information on using the CreateTargetGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTargetGroupRequest method. +// req, resp := client.CreateTargetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateTargetGroup +func (c *ELBV2) CreateTargetGroupRequest(input *CreateTargetGroupInput) (req *request.Request, output *CreateTargetGroupOutput) { + op := &request.Operation{ + Name: opCreateTargetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTargetGroupInput{} + } + + output = &CreateTargetGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTargetGroup API operation for Elastic Load Balancing. +// +// Creates a target group. +// +// To register targets with the target group, use RegisterTargets. To update +// the health check settings for the target group, use ModifyTargetGroup. To +// monitor the health of targets in the target group, use DescribeTargetHealth. +// +// To route traffic to the targets in a target group, specify the target group +// in an action using CreateListener or CreateRule. +// +// To delete a target group, use DeleteTargetGroup. +// +// For more information, see Target Groups for Your Application Load Balancers +// (http://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-target-groups.html) +// in the Application Load Balancers Guide or Target Groups for Your Network +// Load Balancers (http://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-target-groups.html) +// in the Network Load Balancers Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation CreateTargetGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeDuplicateTargetGroupNameException "DuplicateTargetGroupName" +// A target group with the specified name already exists. +// +// * ErrCodeTooManyTargetGroupsException "TooManyTargetGroups" +// You've reached the limit on the number of target groups for your AWS account. +// +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" +// The requested configuration is not valid. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateTargetGroup +func (c *ELBV2) CreateTargetGroup(input *CreateTargetGroupInput) (*CreateTargetGroupOutput, error) { + req, out := c.CreateTargetGroupRequest(input) + return out, req.Send() +} + +// CreateTargetGroupWithContext is the same as CreateTargetGroup with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTargetGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) CreateTargetGroupWithContext(ctx aws.Context, input *CreateTargetGroupInput, opts ...request.Option) (*CreateTargetGroupOutput, error) { + req, out := c.CreateTargetGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteListener = "DeleteListener" + +// DeleteListenerRequest generates a "aws/request.Request" representing the +// client's request for the DeleteListener operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteListener for more information on using the DeleteListener +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteListenerRequest method. +// req, resp := client.DeleteListenerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeleteListener +func (c *ELBV2) DeleteListenerRequest(input *DeleteListenerInput) (req *request.Request, output *DeleteListenerOutput) { + op := &request.Operation{ + Name: opDeleteListener, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteListenerInput{} + } + + output = &DeleteListenerOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteListener API operation for Elastic Load Balancing. +// +// Deletes the specified listener. +// +// Alternatively, your listener is deleted when you delete the load balancer +// it is attached to using DeleteLoadBalancer. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation DeleteListener for usage and error information. +// +// Returned Error Codes: +// * ErrCodeListenerNotFoundException "ListenerNotFound" +// The specified listener does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeleteListener +func (c *ELBV2) DeleteListener(input *DeleteListenerInput) (*DeleteListenerOutput, error) { + req, out := c.DeleteListenerRequest(input) + return out, req.Send() +} + +// DeleteListenerWithContext is the same as DeleteListener with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteListener for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) DeleteListenerWithContext(ctx aws.Context, input *DeleteListenerInput, opts ...request.Option) (*DeleteListenerOutput, error) { + req, out := c.DeleteListenerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteLoadBalancer = "DeleteLoadBalancer" + +// DeleteLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLoadBalancer operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteLoadBalancer for more information on using the DeleteLoadBalancer +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteLoadBalancerRequest method. +// req, resp := client.DeleteLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeleteLoadBalancer +func (c *ELBV2) DeleteLoadBalancerRequest(input *DeleteLoadBalancerInput) (req *request.Request, output *DeleteLoadBalancerOutput) { + op := &request.Operation{ + Name: opDeleteLoadBalancer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLoadBalancerInput{} + } + + output = &DeleteLoadBalancerOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteLoadBalancer API operation for Elastic Load Balancing. +// +// Deletes the specified Application Load Balancer or Network Load Balancer +// and its attached listeners. +// +// You can't delete a load balancer if deletion protection is enabled. If the +// load balancer does not exist or has already been deleted, the call succeeds. +// +// Deleting a load balancer does not affect its registered targets. For example, +// your EC2 instances continue to run and are still registered to their target +// groups. If you no longer need these EC2 instances, you can stop or terminate +// them. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation DeleteLoadBalancer for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLoadBalancerNotFoundException "LoadBalancerNotFound" +// The specified load balancer does not exist. +// +// * ErrCodeOperationNotPermittedException "OperationNotPermitted" +// This operation is not allowed. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeleteLoadBalancer +func (c *ELBV2) DeleteLoadBalancer(input *DeleteLoadBalancerInput) (*DeleteLoadBalancerOutput, error) { + req, out := c.DeleteLoadBalancerRequest(input) + return out, req.Send() +} + +// DeleteLoadBalancerWithContext is the same as DeleteLoadBalancer with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLoadBalancer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) DeleteLoadBalancerWithContext(ctx aws.Context, input *DeleteLoadBalancerInput, opts ...request.Option) (*DeleteLoadBalancerOutput, error) { + req, out := c.DeleteLoadBalancerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteRule = "DeleteRule" + +// DeleteRuleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRule operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteRule for more information on using the DeleteRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteRuleRequest method. +// req, resp := client.DeleteRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeleteRule +func (c *ELBV2) DeleteRuleRequest(input *DeleteRuleInput) (req *request.Request, output *DeleteRuleOutput) { + op := &request.Operation{ + Name: opDeleteRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRuleInput{} + } + + output = &DeleteRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteRule API operation for Elastic Load Balancing. +// +// Deletes the specified rule. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation DeleteRule for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRuleNotFoundException "RuleNotFound" +// The specified rule does not exist. +// +// * ErrCodeOperationNotPermittedException "OperationNotPermitted" +// This operation is not allowed. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeleteRule +func (c *ELBV2) DeleteRule(input *DeleteRuleInput) (*DeleteRuleOutput, error) { + req, out := c.DeleteRuleRequest(input) + return out, req.Send() +} + +// DeleteRuleWithContext is the same as DeleteRule with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) DeleteRuleWithContext(ctx aws.Context, input *DeleteRuleInput, opts ...request.Option) (*DeleteRuleOutput, error) { + req, out := c.DeleteRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTargetGroup = "DeleteTargetGroup" + +// DeleteTargetGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTargetGroup operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTargetGroup for more information on using the DeleteTargetGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTargetGroupRequest method. +// req, resp := client.DeleteTargetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeleteTargetGroup +func (c *ELBV2) DeleteTargetGroupRequest(input *DeleteTargetGroupInput) (req *request.Request, output *DeleteTargetGroupOutput) { + op := &request.Operation{ + Name: opDeleteTargetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTargetGroupInput{} + } + + output = &DeleteTargetGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTargetGroup API operation for Elastic Load Balancing. +// +// Deletes the specified target group. +// +// You can delete a target group if it is not referenced by any actions. Deleting +// a target group also deletes any associated health checks. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation DeleteTargetGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceInUseException "ResourceInUse" +// A specified resource is in use. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeleteTargetGroup +func (c *ELBV2) DeleteTargetGroup(input *DeleteTargetGroupInput) (*DeleteTargetGroupOutput, error) { + req, out := c.DeleteTargetGroupRequest(input) + return out, req.Send() +} + +// DeleteTargetGroupWithContext is the same as DeleteTargetGroup with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTargetGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) DeleteTargetGroupWithContext(ctx aws.Context, input *DeleteTargetGroupInput, opts ...request.Option) (*DeleteTargetGroupOutput, error) { + req, out := c.DeleteTargetGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeregisterTargets = "DeregisterTargets" + +// DeregisterTargetsRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterTargets operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeregisterTargets for more information on using the DeregisterTargets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeregisterTargetsRequest method. +// req, resp := client.DeregisterTargetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeregisterTargets +func (c *ELBV2) DeregisterTargetsRequest(input *DeregisterTargetsInput) (req *request.Request, output *DeregisterTargetsOutput) { + op := &request.Operation{ + Name: opDeregisterTargets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterTargetsInput{} + } + + output = &DeregisterTargetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeregisterTargets API operation for Elastic Load Balancing. +// +// Deregisters the specified targets from the specified target group. After +// the targets are deregistered, they no longer receive traffic from the load +// balancer. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation DeregisterTargets for usage and error information. +// +// Returned Error Codes: +// * ErrCodeTargetGroupNotFoundException "TargetGroupNotFound" +// The specified target group does not exist. +// +// * ErrCodeInvalidTargetException "InvalidTarget" +// The specified target does not exist or is not in the same VPC as the target +// group. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeregisterTargets +func (c *ELBV2) DeregisterTargets(input *DeregisterTargetsInput) (*DeregisterTargetsOutput, error) { + req, out := c.DeregisterTargetsRequest(input) + return out, req.Send() +} + +// DeregisterTargetsWithContext is the same as DeregisterTargets with the addition of +// the ability to pass a context and additional request options. +// +// See DeregisterTargets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) DeregisterTargetsWithContext(ctx aws.Context, input *DeregisterTargetsInput, opts ...request.Option) (*DeregisterTargetsOutput, error) { + req, out := c.DeregisterTargetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeAccountLimits = "DescribeAccountLimits" + +// DescribeAccountLimitsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAccountLimits operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAccountLimits for more information on using the DescribeAccountLimits +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAccountLimitsRequest method. +// req, resp := client.DescribeAccountLimitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeAccountLimits +func (c *ELBV2) DescribeAccountLimitsRequest(input *DescribeAccountLimitsInput) (req *request.Request, output *DescribeAccountLimitsOutput) { + op := &request.Operation{ + Name: opDescribeAccountLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAccountLimitsInput{} + } + + output = &DescribeAccountLimitsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAccountLimits API operation for Elastic Load Balancing. +// +// Describes the current Elastic Load Balancing resource limits for your AWS +// account. +// +// For more information, see Limits for Your Application Load Balancers (http://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-limits.html) +// in the Application Load Balancer Guide or Limits for Your Network Load Balancers +// (http://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-limits.html) +// in the Network Load Balancers Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation DescribeAccountLimits for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeAccountLimits +func (c *ELBV2) DescribeAccountLimits(input *DescribeAccountLimitsInput) (*DescribeAccountLimitsOutput, error) { + req, out := c.DescribeAccountLimitsRequest(input) + return out, req.Send() +} + +// DescribeAccountLimitsWithContext is the same as DescribeAccountLimits with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAccountLimits for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) DescribeAccountLimitsWithContext(ctx aws.Context, input *DescribeAccountLimitsInput, opts ...request.Option) (*DescribeAccountLimitsOutput, error) { + req, out := c.DescribeAccountLimitsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeListeners = "DescribeListeners" + +// DescribeListenersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeListeners operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeListeners for more information on using the DescribeListeners +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeListenersRequest method. +// req, resp := client.DescribeListenersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeListeners +func (c *ELBV2) DescribeListenersRequest(input *DescribeListenersInput) (req *request.Request, output *DescribeListenersOutput) { + op := &request.Operation{ + Name: opDescribeListeners, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeListenersInput{} + } + + output = &DescribeListenersOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeListeners API operation for Elastic Load Balancing. +// +// Describes the specified listeners or the listeners for the specified Application +// Load Balancer or Network Load Balancer. You must specify either a load balancer +// or one or more listeners. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation DescribeListeners for usage and error information. +// +// Returned Error Codes: +// * ErrCodeListenerNotFoundException "ListenerNotFound" +// The specified listener does not exist. +// +// * ErrCodeLoadBalancerNotFoundException "LoadBalancerNotFound" +// The specified load balancer does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeListeners +func (c *ELBV2) DescribeListeners(input *DescribeListenersInput) (*DescribeListenersOutput, error) { + req, out := c.DescribeListenersRequest(input) + return out, req.Send() +} + +// DescribeListenersWithContext is the same as DescribeListeners with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeListeners for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) DescribeListenersWithContext(ctx aws.Context, input *DescribeListenersInput, opts ...request.Option) (*DescribeListenersOutput, error) { + req, out := c.DescribeListenersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeListenersPages iterates over the pages of a DescribeListeners operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeListeners method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeListeners operation. +// pageNum := 0 +// err := client.DescribeListenersPages(params, +// func(page *DescribeListenersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ELBV2) DescribeListenersPages(input *DescribeListenersInput, fn func(*DescribeListenersOutput, bool) bool) error { + return c.DescribeListenersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeListenersPagesWithContext same as DescribeListenersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) DescribeListenersPagesWithContext(ctx aws.Context, input *DescribeListenersInput, fn func(*DescribeListenersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeListenersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeListenersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeListenersOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opDescribeLoadBalancerAttributes = "DescribeLoadBalancerAttributes" + +// DescribeLoadBalancerAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoadBalancerAttributes operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLoadBalancerAttributes for more information on using the DescribeLoadBalancerAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLoadBalancerAttributesRequest method. +// req, resp := client.DescribeLoadBalancerAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeLoadBalancerAttributes +func (c *ELBV2) DescribeLoadBalancerAttributesRequest(input *DescribeLoadBalancerAttributesInput) (req *request.Request, output *DescribeLoadBalancerAttributesOutput) { + op := &request.Operation{ + Name: opDescribeLoadBalancerAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoadBalancerAttributesInput{} + } + + output = &DescribeLoadBalancerAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLoadBalancerAttributes API operation for Elastic Load Balancing. +// +// Describes the attributes for the specified Application Load Balancer or Network +// Load Balancer. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation DescribeLoadBalancerAttributes for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLoadBalancerNotFoundException "LoadBalancerNotFound" +// The specified load balancer does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeLoadBalancerAttributes +func (c *ELBV2) DescribeLoadBalancerAttributes(input *DescribeLoadBalancerAttributesInput) (*DescribeLoadBalancerAttributesOutput, error) { + req, out := c.DescribeLoadBalancerAttributesRequest(input) + return out, req.Send() +} + +// DescribeLoadBalancerAttributesWithContext is the same as DescribeLoadBalancerAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLoadBalancerAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) DescribeLoadBalancerAttributesWithContext(ctx aws.Context, input *DescribeLoadBalancerAttributesInput, opts ...request.Option) (*DescribeLoadBalancerAttributesOutput, error) { + req, out := c.DescribeLoadBalancerAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeLoadBalancers = "DescribeLoadBalancers" + +// DescribeLoadBalancersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoadBalancers operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLoadBalancers for more information on using the DescribeLoadBalancers +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLoadBalancersRequest method. +// req, resp := client.DescribeLoadBalancersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeLoadBalancers +func (c *ELBV2) DescribeLoadBalancersRequest(input *DescribeLoadBalancersInput) (req *request.Request, output *DescribeLoadBalancersOutput) { + op := &request.Operation{ + Name: opDescribeLoadBalancers, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeLoadBalancersInput{} + } + + output = &DescribeLoadBalancersOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLoadBalancers API operation for Elastic Load Balancing. +// +// Describes the specified load balancers or all of your load balancers. +// +// To describe the listeners for a load balancer, use DescribeListeners. To +// describe the attributes for a load balancer, use DescribeLoadBalancerAttributes. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation DescribeLoadBalancers for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLoadBalancerNotFoundException "LoadBalancerNotFound" +// The specified load balancer does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeLoadBalancers +func (c *ELBV2) DescribeLoadBalancers(input *DescribeLoadBalancersInput) (*DescribeLoadBalancersOutput, error) { + req, out := c.DescribeLoadBalancersRequest(input) + return out, req.Send() +} + +// DescribeLoadBalancersWithContext is the same as DescribeLoadBalancers with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLoadBalancers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) DescribeLoadBalancersWithContext(ctx aws.Context, input *DescribeLoadBalancersInput, opts ...request.Option) (*DescribeLoadBalancersOutput, error) { + req, out := c.DescribeLoadBalancersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeLoadBalancersPages iterates over the pages of a DescribeLoadBalancers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLoadBalancers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLoadBalancers operation. +// pageNum := 0 +// err := client.DescribeLoadBalancersPages(params, +// func(page *DescribeLoadBalancersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ELBV2) DescribeLoadBalancersPages(input *DescribeLoadBalancersInput, fn func(*DescribeLoadBalancersOutput, bool) bool) error { + return c.DescribeLoadBalancersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLoadBalancersPagesWithContext same as DescribeLoadBalancersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) DescribeLoadBalancersPagesWithContext(ctx aws.Context, input *DescribeLoadBalancersInput, fn func(*DescribeLoadBalancersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLoadBalancersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLoadBalancersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeLoadBalancersOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opDescribeRules = "DescribeRules" + +// DescribeRulesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRules operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeRules for more information on using the DescribeRules +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeRulesRequest method. +// req, resp := client.DescribeRulesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeRules +func (c *ELBV2) DescribeRulesRequest(input *DescribeRulesInput) (req *request.Request, output *DescribeRulesOutput) { + op := &request.Operation{ + Name: opDescribeRules, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeRulesInput{} + } + + output = &DescribeRulesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeRules API operation for Elastic Load Balancing. +// +// Describes the specified rules or the rules for the specified listener. You +// must specify either a listener or one or more rules. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation DescribeRules for usage and error information. +// +// Returned Error Codes: +// * ErrCodeListenerNotFoundException "ListenerNotFound" +// The specified listener does not exist. +// +// * ErrCodeRuleNotFoundException "RuleNotFound" +// The specified rule does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeRules +func (c *ELBV2) DescribeRules(input *DescribeRulesInput) (*DescribeRulesOutput, error) { + req, out := c.DescribeRulesRequest(input) + return out, req.Send() +} + +// DescribeRulesWithContext is the same as DescribeRules with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeRules for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) DescribeRulesWithContext(ctx aws.Context, input *DescribeRulesInput, opts ...request.Option) (*DescribeRulesOutput, error) { + req, out := c.DescribeRulesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeSSLPolicies = "DescribeSSLPolicies" + +// DescribeSSLPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSSLPolicies operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSSLPolicies for more information on using the DescribeSSLPolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSSLPoliciesRequest method. +// req, resp := client.DescribeSSLPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeSSLPolicies +func (c *ELBV2) DescribeSSLPoliciesRequest(input *DescribeSSLPoliciesInput) (req *request.Request, output *DescribeSSLPoliciesOutput) { + op := &request.Operation{ + Name: opDescribeSSLPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSSLPoliciesInput{} + } + + output = &DescribeSSLPoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSSLPolicies API operation for Elastic Load Balancing. +// +// Describes the specified policies or all policies used for SSL negotiation. +// +// For more information, see Security Policies (http://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies) +// in the Application Load Balancers Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation DescribeSSLPolicies for usage and error information. +// +// Returned Error Codes: +// * ErrCodeSSLPolicyNotFoundException "SSLPolicyNotFound" +// The specified SSL policy does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeSSLPolicies +func (c *ELBV2) DescribeSSLPolicies(input *DescribeSSLPoliciesInput) (*DescribeSSLPoliciesOutput, error) { + req, out := c.DescribeSSLPoliciesRequest(input) + return out, req.Send() +} + +// DescribeSSLPoliciesWithContext is the same as DescribeSSLPolicies with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSSLPolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) DescribeSSLPoliciesWithContext(ctx aws.Context, input *DescribeSSLPoliciesInput, opts ...request.Option) (*DescribeSSLPoliciesOutput, error) { + req, out := c.DescribeSSLPoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeTags = "DescribeTags" + +// DescribeTagsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTags operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTags for more information on using the DescribeTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTagsRequest method. +// req, resp := client.DescribeTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeTags +func (c *ELBV2) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { + op := &request.Operation{ + Name: opDescribeTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTagsInput{} + } + + output = &DescribeTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTags API operation for Elastic Load Balancing. +// +// Describes the tags for the specified resources. You can describe the tags +// for one or more Application Load Balancers, Network Load Balancers, and target +// groups. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation DescribeTags for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLoadBalancerNotFoundException "LoadBalancerNotFound" +// The specified load balancer does not exist. +// +// * ErrCodeTargetGroupNotFoundException "TargetGroupNotFound" +// The specified target group does not exist. +// +// * ErrCodeListenerNotFoundException "ListenerNotFound" +// The specified listener does not exist. +// +// * ErrCodeRuleNotFoundException "RuleNotFound" +// The specified rule does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeTags +func (c *ELBV2) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + return out, req.Send() +} + +// DescribeTagsWithContext is the same as DescribeTags with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) DescribeTagsWithContext(ctx aws.Context, input *DescribeTagsInput, opts ...request.Option) (*DescribeTagsOutput, error) { + req, out := c.DescribeTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeTargetGroupAttributes = "DescribeTargetGroupAttributes" + +// DescribeTargetGroupAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTargetGroupAttributes operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTargetGroupAttributes for more information on using the DescribeTargetGroupAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTargetGroupAttributesRequest method. +// req, resp := client.DescribeTargetGroupAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeTargetGroupAttributes +func (c *ELBV2) DescribeTargetGroupAttributesRequest(input *DescribeTargetGroupAttributesInput) (req *request.Request, output *DescribeTargetGroupAttributesOutput) { + op := &request.Operation{ + Name: opDescribeTargetGroupAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTargetGroupAttributesInput{} + } + + output = &DescribeTargetGroupAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTargetGroupAttributes API operation for Elastic Load Balancing. +// +// Describes the attributes for the specified target group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation DescribeTargetGroupAttributes for usage and error information. +// +// Returned Error Codes: +// * ErrCodeTargetGroupNotFoundException "TargetGroupNotFound" +// The specified target group does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeTargetGroupAttributes +func (c *ELBV2) DescribeTargetGroupAttributes(input *DescribeTargetGroupAttributesInput) (*DescribeTargetGroupAttributesOutput, error) { + req, out := c.DescribeTargetGroupAttributesRequest(input) + return out, req.Send() +} + +// DescribeTargetGroupAttributesWithContext is the same as DescribeTargetGroupAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTargetGroupAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) DescribeTargetGroupAttributesWithContext(ctx aws.Context, input *DescribeTargetGroupAttributesInput, opts ...request.Option) (*DescribeTargetGroupAttributesOutput, error) { + req, out := c.DescribeTargetGroupAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeTargetGroups = "DescribeTargetGroups" + +// DescribeTargetGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTargetGroups operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTargetGroups for more information on using the DescribeTargetGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTargetGroupsRequest method. +// req, resp := client.DescribeTargetGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeTargetGroups +func (c *ELBV2) DescribeTargetGroupsRequest(input *DescribeTargetGroupsInput) (req *request.Request, output *DescribeTargetGroupsOutput) { + op := &request.Operation{ + Name: opDescribeTargetGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeTargetGroupsInput{} + } + + output = &DescribeTargetGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTargetGroups API operation for Elastic Load Balancing. +// +// Describes the specified target groups or all of your target groups. By default, +// all target groups are described. Alternatively, you can specify one of the +// following to filter the results: the ARN of the load balancer, the names +// of one or more target groups, or the ARNs of one or more target groups. +// +// To describe the targets for a target group, use DescribeTargetHealth. To +// describe the attributes of a target group, use DescribeTargetGroupAttributes. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation DescribeTargetGroups for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLoadBalancerNotFoundException "LoadBalancerNotFound" +// The specified load balancer does not exist. +// +// * ErrCodeTargetGroupNotFoundException "TargetGroupNotFound" +// The specified target group does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeTargetGroups +func (c *ELBV2) DescribeTargetGroups(input *DescribeTargetGroupsInput) (*DescribeTargetGroupsOutput, error) { + req, out := c.DescribeTargetGroupsRequest(input) + return out, req.Send() +} + +// DescribeTargetGroupsWithContext is the same as DescribeTargetGroups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTargetGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) DescribeTargetGroupsWithContext(ctx aws.Context, input *DescribeTargetGroupsInput, opts ...request.Option) (*DescribeTargetGroupsOutput, error) { + req, out := c.DescribeTargetGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeTargetGroupsPages iterates over the pages of a DescribeTargetGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTargetGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTargetGroups operation. +// pageNum := 0 +// err := client.DescribeTargetGroupsPages(params, +// func(page *DescribeTargetGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ELBV2) DescribeTargetGroupsPages(input *DescribeTargetGroupsInput, fn func(*DescribeTargetGroupsOutput, bool) bool) error { + return c.DescribeTargetGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeTargetGroupsPagesWithContext same as DescribeTargetGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) DescribeTargetGroupsPagesWithContext(ctx aws.Context, input *DescribeTargetGroupsInput, fn func(*DescribeTargetGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeTargetGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTargetGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeTargetGroupsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opDescribeTargetHealth = "DescribeTargetHealth" + +// DescribeTargetHealthRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTargetHealth operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTargetHealth for more information on using the DescribeTargetHealth +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTargetHealthRequest method. +// req, resp := client.DescribeTargetHealthRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeTargetHealth +func (c *ELBV2) DescribeTargetHealthRequest(input *DescribeTargetHealthInput) (req *request.Request, output *DescribeTargetHealthOutput) { + op := &request.Operation{ + Name: opDescribeTargetHealth, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTargetHealthInput{} + } + + output = &DescribeTargetHealthOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeTargetHealth API operation for Elastic Load Balancing. +// +// Describes the health of the specified targets or all of your targets. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation DescribeTargetHealth for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidTargetException "InvalidTarget" +// The specified target does not exist or is not in the same VPC as the target +// group. +// +// * ErrCodeTargetGroupNotFoundException "TargetGroupNotFound" +// The specified target group does not exist. +// +// * ErrCodeHealthUnavailableException "HealthUnavailable" +// The health of the specified targets could not be retrieved due to an internal +// error. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeTargetHealth +func (c *ELBV2) DescribeTargetHealth(input *DescribeTargetHealthInput) (*DescribeTargetHealthOutput, error) { + req, out := c.DescribeTargetHealthRequest(input) + return out, req.Send() +} + +// DescribeTargetHealthWithContext is the same as DescribeTargetHealth with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTargetHealth for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) DescribeTargetHealthWithContext(ctx aws.Context, input *DescribeTargetHealthInput, opts ...request.Option) (*DescribeTargetHealthOutput, error) { + req, out := c.DescribeTargetHealthRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyListener = "ModifyListener" + +// ModifyListenerRequest generates a "aws/request.Request" representing the +// client's request for the ModifyListener operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyListener for more information on using the ModifyListener +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyListenerRequest method. +// req, resp := client.ModifyListenerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyListener +func (c *ELBV2) ModifyListenerRequest(input *ModifyListenerInput) (req *request.Request, output *ModifyListenerOutput) { + op := &request.Operation{ + Name: opModifyListener, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyListenerInput{} + } + + output = &ModifyListenerOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyListener API operation for Elastic Load Balancing. +// +// Modifies the specified properties of the specified listener. +// +// Any properties that you do not specify retain their current values. However, +// changing the protocol from HTTPS to HTTP removes the security policy and +// SSL certificate properties. If you change the protocol from HTTP to HTTPS, +// you must add the security policy and server certificate. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation ModifyListener for usage and error information. +// +// Returned Error Codes: +// * ErrCodeDuplicateListenerException "DuplicateListener" +// A listener with the specified port already exists. +// +// * ErrCodeTooManyListenersException "TooManyListeners" +// You've reached the limit on the number of listeners per load balancer. +// +// * ErrCodeTooManyCertificatesException "TooManyCertificates" +// You've reached the limit on the number of certificates per listener. +// +// * ErrCodeListenerNotFoundException "ListenerNotFound" +// The specified listener does not exist. +// +// * ErrCodeTargetGroupNotFoundException "TargetGroupNotFound" +// The specified target group does not exist. +// +// * ErrCodeTargetGroupAssociationLimitException "TargetGroupAssociationLimit" +// You've reached the limit on the number of load balancers per target group. +// +// * ErrCodeIncompatibleProtocolsException "IncompatibleProtocols" +// The specified configuration is not valid with this protocol. +// +// * ErrCodeSSLPolicyNotFoundException "SSLPolicyNotFound" +// The specified SSL policy does not exist. +// +// * ErrCodeCertificateNotFoundException "CertificateNotFound" +// The specified certificate does not exist. +// +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" +// The requested configuration is not valid. +// +// * ErrCodeUnsupportedProtocolException "UnsupportedProtocol" +// The specified protocol is not supported. +// +// * ErrCodeTooManyRegistrationsForTargetIdException "TooManyRegistrationsForTargetId" +// You've reached the limit on the number of times a target can be registered +// with a load balancer. +// +// * ErrCodeTooManyTargetsException "TooManyTargets" +// You've reached the limit on the number of targets. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyListener +func (c *ELBV2) ModifyListener(input *ModifyListenerInput) (*ModifyListenerOutput, error) { + req, out := c.ModifyListenerRequest(input) + return out, req.Send() +} + +// ModifyListenerWithContext is the same as ModifyListener with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyListener for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) ModifyListenerWithContext(ctx aws.Context, input *ModifyListenerInput, opts ...request.Option) (*ModifyListenerOutput, error) { + req, out := c.ModifyListenerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyLoadBalancerAttributes = "ModifyLoadBalancerAttributes" + +// ModifyLoadBalancerAttributesRequest generates a "aws/request.Request" representing the +// client's request for the ModifyLoadBalancerAttributes operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyLoadBalancerAttributes for more information on using the ModifyLoadBalancerAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyLoadBalancerAttributesRequest method. +// req, resp := client.ModifyLoadBalancerAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyLoadBalancerAttributes +func (c *ELBV2) ModifyLoadBalancerAttributesRequest(input *ModifyLoadBalancerAttributesInput) (req *request.Request, output *ModifyLoadBalancerAttributesOutput) { + op := &request.Operation{ + Name: opModifyLoadBalancerAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyLoadBalancerAttributesInput{} + } + + output = &ModifyLoadBalancerAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyLoadBalancerAttributes API operation for Elastic Load Balancing. +// +// Modifies the specified attributes of the specified Application Load Balancer +// or Network Load Balancer. +// +// If any of the specified attributes can't be modified as requested, the call +// fails. Any existing attributes that you do not modify retain their current +// values. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation ModifyLoadBalancerAttributes for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLoadBalancerNotFoundException "LoadBalancerNotFound" +// The specified load balancer does not exist. +// +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" +// The requested configuration is not valid. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyLoadBalancerAttributes +func (c *ELBV2) ModifyLoadBalancerAttributes(input *ModifyLoadBalancerAttributesInput) (*ModifyLoadBalancerAttributesOutput, error) { + req, out := c.ModifyLoadBalancerAttributesRequest(input) + return out, req.Send() +} + +// ModifyLoadBalancerAttributesWithContext is the same as ModifyLoadBalancerAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyLoadBalancerAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) ModifyLoadBalancerAttributesWithContext(ctx aws.Context, input *ModifyLoadBalancerAttributesInput, opts ...request.Option) (*ModifyLoadBalancerAttributesOutput, error) { + req, out := c.ModifyLoadBalancerAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyRule = "ModifyRule" + +// ModifyRuleRequest generates a "aws/request.Request" representing the +// client's request for the ModifyRule operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyRule for more information on using the ModifyRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyRuleRequest method. +// req, resp := client.ModifyRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyRule +func (c *ELBV2) ModifyRuleRequest(input *ModifyRuleInput) (req *request.Request, output *ModifyRuleOutput) { + op := &request.Operation{ + Name: opModifyRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyRuleInput{} + } + + output = &ModifyRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyRule API operation for Elastic Load Balancing. +// +// Modifies the specified rule. +// +// Any existing properties that you do not modify retain their current values. +// +// To modify the default action, use ModifyListener. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation ModifyRule for usage and error information. +// +// Returned Error Codes: +// * ErrCodeTargetGroupAssociationLimitException "TargetGroupAssociationLimit" +// You've reached the limit on the number of load balancers per target group. +// +// * ErrCodeIncompatibleProtocolsException "IncompatibleProtocols" +// The specified configuration is not valid with this protocol. +// +// * ErrCodeRuleNotFoundException "RuleNotFound" +// The specified rule does not exist. +// +// * ErrCodeOperationNotPermittedException "OperationNotPermitted" +// This operation is not allowed. +// +// * ErrCodeTooManyRegistrationsForTargetIdException "TooManyRegistrationsForTargetId" +// You've reached the limit on the number of times a target can be registered +// with a load balancer. +// +// * ErrCodeTooManyTargetsException "TooManyTargets" +// You've reached the limit on the number of targets. +// +// * ErrCodeTargetGroupNotFoundException "TargetGroupNotFound" +// The specified target group does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyRule +func (c *ELBV2) ModifyRule(input *ModifyRuleInput) (*ModifyRuleOutput, error) { + req, out := c.ModifyRuleRequest(input) + return out, req.Send() +} + +// ModifyRuleWithContext is the same as ModifyRule with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) ModifyRuleWithContext(ctx aws.Context, input *ModifyRuleInput, opts ...request.Option) (*ModifyRuleOutput, error) { + req, out := c.ModifyRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyTargetGroup = "ModifyTargetGroup" + +// ModifyTargetGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyTargetGroup operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyTargetGroup for more information on using the ModifyTargetGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyTargetGroupRequest method. +// req, resp := client.ModifyTargetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyTargetGroup +func (c *ELBV2) ModifyTargetGroupRequest(input *ModifyTargetGroupInput) (req *request.Request, output *ModifyTargetGroupOutput) { + op := &request.Operation{ + Name: opModifyTargetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyTargetGroupInput{} + } + + output = &ModifyTargetGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyTargetGroup API operation for Elastic Load Balancing. +// +// Modifies the health checks used when evaluating the health state of the targets +// in the specified target group. +// +// To monitor the health of the targets, use DescribeTargetHealth. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation ModifyTargetGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeTargetGroupNotFoundException "TargetGroupNotFound" +// The specified target group does not exist. +// +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" +// The requested configuration is not valid. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyTargetGroup +func (c *ELBV2) ModifyTargetGroup(input *ModifyTargetGroupInput) (*ModifyTargetGroupOutput, error) { + req, out := c.ModifyTargetGroupRequest(input) + return out, req.Send() +} + +// ModifyTargetGroupWithContext is the same as ModifyTargetGroup with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyTargetGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) ModifyTargetGroupWithContext(ctx aws.Context, input *ModifyTargetGroupInput, opts ...request.Option) (*ModifyTargetGroupOutput, error) { + req, out := c.ModifyTargetGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyTargetGroupAttributes = "ModifyTargetGroupAttributes" + +// ModifyTargetGroupAttributesRequest generates a "aws/request.Request" representing the +// client's request for the ModifyTargetGroupAttributes operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyTargetGroupAttributes for more information on using the ModifyTargetGroupAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyTargetGroupAttributesRequest method. +// req, resp := client.ModifyTargetGroupAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyTargetGroupAttributes +func (c *ELBV2) ModifyTargetGroupAttributesRequest(input *ModifyTargetGroupAttributesInput) (req *request.Request, output *ModifyTargetGroupAttributesOutput) { + op := &request.Operation{ + Name: opModifyTargetGroupAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyTargetGroupAttributesInput{} + } + + output = &ModifyTargetGroupAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyTargetGroupAttributes API operation for Elastic Load Balancing. +// +// Modifies the specified attributes of the specified target group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation ModifyTargetGroupAttributes for usage and error information. +// +// Returned Error Codes: +// * ErrCodeTargetGroupNotFoundException "TargetGroupNotFound" +// The specified target group does not exist. +// +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" +// The requested configuration is not valid. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyTargetGroupAttributes +func (c *ELBV2) ModifyTargetGroupAttributes(input *ModifyTargetGroupAttributesInput) (*ModifyTargetGroupAttributesOutput, error) { + req, out := c.ModifyTargetGroupAttributesRequest(input) + return out, req.Send() +} + +// ModifyTargetGroupAttributesWithContext is the same as ModifyTargetGroupAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyTargetGroupAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) ModifyTargetGroupAttributesWithContext(ctx aws.Context, input *ModifyTargetGroupAttributesInput, opts ...request.Option) (*ModifyTargetGroupAttributesOutput, error) { + req, out := c.ModifyTargetGroupAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterTargets = "RegisterTargets" + +// RegisterTargetsRequest generates a "aws/request.Request" representing the +// client's request for the RegisterTargets operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterTargets for more information on using the RegisterTargets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RegisterTargetsRequest method. +// req, resp := client.RegisterTargetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/RegisterTargets +func (c *ELBV2) RegisterTargetsRequest(input *RegisterTargetsInput) (req *request.Request, output *RegisterTargetsOutput) { + op := &request.Operation{ + Name: opRegisterTargets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterTargetsInput{} + } + + output = &RegisterTargetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// RegisterTargets API operation for Elastic Load Balancing. +// +// Registers the specified targets with the specified target group. +// +// By default, the load balancer routes requests to registered targets using +// the protocol and port number for the target group. Alternatively, you can +// override the port for a target when you register it. +// +// The target must be in the virtual private cloud (VPC) that you specified +// for the target group. If the target is an EC2 instance, it must be in the +// running state when you register it. +// +// Network Load Balancers do not support the following instance types as targets: +// C1, CC1, CC2, CG1, CG2, CR1, CS1, G1, G2, HI1, HS1, M1, M2, M3, and T1. +// +// To remove a target from a target group, use DeregisterTargets. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation RegisterTargets for usage and error information. +// +// Returned Error Codes: +// * ErrCodeTargetGroupNotFoundException "TargetGroupNotFound" +// The specified target group does not exist. +// +// * ErrCodeTooManyTargetsException "TooManyTargets" +// You've reached the limit on the number of targets. +// +// * ErrCodeInvalidTargetException "InvalidTarget" +// The specified target does not exist or is not in the same VPC as the target +// group. +// +// * ErrCodeTooManyRegistrationsForTargetIdException "TooManyRegistrationsForTargetId" +// You've reached the limit on the number of times a target can be registered +// with a load balancer. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/RegisterTargets +func (c *ELBV2) RegisterTargets(input *RegisterTargetsInput) (*RegisterTargetsOutput, error) { + req, out := c.RegisterTargetsRequest(input) + return out, req.Send() +} + +// RegisterTargetsWithContext is the same as RegisterTargets with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterTargets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) RegisterTargetsWithContext(ctx aws.Context, input *RegisterTargetsInput, opts ...request.Option) (*RegisterTargetsOutput, error) { + req, out := c.RegisterTargetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRemoveTags = "RemoveTags" + +// RemoveTagsRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTags operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RemoveTags for more information on using the RemoveTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RemoveTagsRequest method. +// req, resp := client.RemoveTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/RemoveTags +func (c *ELBV2) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { + op := &request.Operation{ + Name: opRemoveTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsInput{} + } + + output = &RemoveTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// RemoveTags API operation for Elastic Load Balancing. +// +// Removes the specified tags from the specified Elastic Load Balancing resource. +// +// To list the current tags for your resources, use DescribeTags. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation RemoveTags for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLoadBalancerNotFoundException "LoadBalancerNotFound" +// The specified load balancer does not exist. +// +// * ErrCodeTargetGroupNotFoundException "TargetGroupNotFound" +// The specified target group does not exist. +// +// * ErrCodeListenerNotFoundException "ListenerNotFound" +// The specified listener does not exist. +// +// * ErrCodeRuleNotFoundException "RuleNotFound" +// The specified rule does not exist. +// +// * ErrCodeTooManyTagsException "TooManyTags" +// You've reached the limit on the number of tags per load balancer. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/RemoveTags +func (c *ELBV2) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, error) { + req, out := c.RemoveTagsRequest(input) + return out, req.Send() +} + +// RemoveTagsWithContext is the same as RemoveTags with the addition of +// the ability to pass a context and additional request options. +// +// See RemoveTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) RemoveTagsWithContext(ctx aws.Context, input *RemoveTagsInput, opts ...request.Option) (*RemoveTagsOutput, error) { + req, out := c.RemoveTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSetIpAddressType = "SetIpAddressType" + +// SetIpAddressTypeRequest generates a "aws/request.Request" representing the +// client's request for the SetIpAddressType operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SetIpAddressType for more information on using the SetIpAddressType +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SetIpAddressTypeRequest method. +// req, resp := client.SetIpAddressTypeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/SetIpAddressType +func (c *ELBV2) SetIpAddressTypeRequest(input *SetIpAddressTypeInput) (req *request.Request, output *SetIpAddressTypeOutput) { + op := &request.Operation{ + Name: opSetIpAddressType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIpAddressTypeInput{} + } + + output = &SetIpAddressTypeOutput{} + req = c.newRequest(op, input, output) + return +} + +// SetIpAddressType API operation for Elastic Load Balancing. +// +// Sets the type of IP addresses used by the subnets of the specified Application +// Load Balancer or Network Load Balancer. +// +// Note that Network Load Balancers must use ipv4. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation SetIpAddressType for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLoadBalancerNotFoundException "LoadBalancerNotFound" +// The specified load balancer does not exist. +// +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" +// The requested configuration is not valid. +// +// * ErrCodeInvalidSubnetException "InvalidSubnet" +// The specified subnet is out of available addresses. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/SetIpAddressType +func (c *ELBV2) SetIpAddressType(input *SetIpAddressTypeInput) (*SetIpAddressTypeOutput, error) { + req, out := c.SetIpAddressTypeRequest(input) + return out, req.Send() +} + +// SetIpAddressTypeWithContext is the same as SetIpAddressType with the addition of +// the ability to pass a context and additional request options. +// +// See SetIpAddressType for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) SetIpAddressTypeWithContext(ctx aws.Context, input *SetIpAddressTypeInput, opts ...request.Option) (*SetIpAddressTypeOutput, error) { + req, out := c.SetIpAddressTypeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSetRulePriorities = "SetRulePriorities" + +// SetRulePrioritiesRequest generates a "aws/request.Request" representing the +// client's request for the SetRulePriorities operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SetRulePriorities for more information on using the SetRulePriorities +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SetRulePrioritiesRequest method. +// req, resp := client.SetRulePrioritiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/SetRulePriorities +func (c *ELBV2) SetRulePrioritiesRequest(input *SetRulePrioritiesInput) (req *request.Request, output *SetRulePrioritiesOutput) { + op := &request.Operation{ + Name: opSetRulePriorities, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetRulePrioritiesInput{} + } + + output = &SetRulePrioritiesOutput{} + req = c.newRequest(op, input, output) + return +} + +// SetRulePriorities API operation for Elastic Load Balancing. +// +// Sets the priorities of the specified rules. +// +// You can reorder the rules as long as there are no priority conflicts in the +// new order. Any existing rules that you do not specify retain their current +// priority. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation SetRulePriorities for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRuleNotFoundException "RuleNotFound" +// The specified rule does not exist. +// +// * ErrCodePriorityInUseException "PriorityInUse" +// The specified priority is in use. +// +// * ErrCodeOperationNotPermittedException "OperationNotPermitted" +// This operation is not allowed. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/SetRulePriorities +func (c *ELBV2) SetRulePriorities(input *SetRulePrioritiesInput) (*SetRulePrioritiesOutput, error) { + req, out := c.SetRulePrioritiesRequest(input) + return out, req.Send() +} + +// SetRulePrioritiesWithContext is the same as SetRulePriorities with the addition of +// the ability to pass a context and additional request options. +// +// See SetRulePriorities for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) SetRulePrioritiesWithContext(ctx aws.Context, input *SetRulePrioritiesInput, opts ...request.Option) (*SetRulePrioritiesOutput, error) { + req, out := c.SetRulePrioritiesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSetSecurityGroups = "SetSecurityGroups" + +// SetSecurityGroupsRequest generates a "aws/request.Request" representing the +// client's request for the SetSecurityGroups operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SetSecurityGroups for more information on using the SetSecurityGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SetSecurityGroupsRequest method. +// req, resp := client.SetSecurityGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/SetSecurityGroups +func (c *ELBV2) SetSecurityGroupsRequest(input *SetSecurityGroupsInput) (req *request.Request, output *SetSecurityGroupsOutput) { + op := &request.Operation{ + Name: opSetSecurityGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetSecurityGroupsInput{} + } + + output = &SetSecurityGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// SetSecurityGroups API operation for Elastic Load Balancing. +// +// Associates the specified security groups with the specified Application Load +// Balancer. The specified security groups override the previously associated +// security groups. +// +// Note that you can't specify a security group for a Network Load Balancer. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation SetSecurityGroups for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLoadBalancerNotFoundException "LoadBalancerNotFound" +// The specified load balancer does not exist. +// +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" +// The requested configuration is not valid. +// +// * ErrCodeInvalidSecurityGroupException "InvalidSecurityGroup" +// The specified security group does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/SetSecurityGroups +func (c *ELBV2) SetSecurityGroups(input *SetSecurityGroupsInput) (*SetSecurityGroupsOutput, error) { + req, out := c.SetSecurityGroupsRequest(input) + return out, req.Send() +} + +// SetSecurityGroupsWithContext is the same as SetSecurityGroups with the addition of +// the ability to pass a context and additional request options. +// +// See SetSecurityGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) SetSecurityGroupsWithContext(ctx aws.Context, input *SetSecurityGroupsInput, opts ...request.Option) (*SetSecurityGroupsOutput, error) { + req, out := c.SetSecurityGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSetSubnets = "SetSubnets" + +// SetSubnetsRequest generates a "aws/request.Request" representing the +// client's request for the SetSubnets operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SetSubnets for more information on using the SetSubnets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SetSubnetsRequest method. +// req, resp := client.SetSubnetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/SetSubnets +func (c *ELBV2) SetSubnetsRequest(input *SetSubnetsInput) (req *request.Request, output *SetSubnetsOutput) { + op := &request.Operation{ + Name: opSetSubnets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetSubnetsInput{} + } + + output = &SetSubnetsOutput{} + req = c.newRequest(op, input, output) + return +} + +// SetSubnets API operation for Elastic Load Balancing. +// +// Enables the Availability Zone for the specified subnets for the specified +// Application Load Balancer. The specified subnets replace the previously enabled +// subnets. +// +// Note that you can't change the subnets for a Network Load Balancer. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Elastic Load Balancing's +// API operation SetSubnets for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLoadBalancerNotFoundException "LoadBalancerNotFound" +// The specified load balancer does not exist. +// +// * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" +// The requested configuration is not valid. +// +// * ErrCodeSubnetNotFoundException "SubnetNotFound" +// The specified subnet does not exist. +// +// * ErrCodeInvalidSubnetException "InvalidSubnet" +// The specified subnet is out of available addresses. +// +// * ErrCodeAllocationIdNotFoundException "AllocationIdNotFound" +// The specified allocation ID does not exist. +// +// * ErrCodeAvailabilityZoneNotSupportedException "AvailabilityZoneNotSupported" +// The specified Availability Zone is not supported. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/SetSubnets +func (c *ELBV2) SetSubnets(input *SetSubnetsInput) (*SetSubnetsOutput, error) { + req, out := c.SetSubnetsRequest(input) + return out, req.Send() +} + +// SetSubnetsWithContext is the same as SetSubnets with the addition of +// the ability to pass a context and additional request options. +// +// See SetSubnets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) SetSubnetsWithContext(ctx aws.Context, input *SetSubnetsInput, opts ...request.Option) (*SetSubnetsOutput, error) { + req, out := c.SetSubnetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Information about an action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/Action +type Action struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the target group. + // + // TargetGroupArn is a required field + TargetGroupArn *string `type:"string" required:"true"` + + // The type of action. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"ActionTypeEnum"` +} + +// String returns the string representation +func (s Action) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Action) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Action) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Action"} + if s.TargetGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("TargetGroupArn")) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetGroupArn sets the TargetGroupArn field's value. +func (s *Action) SetTargetGroupArn(v string) *Action { + s.TargetGroupArn = &v + return s +} + +// SetType sets the Type field's value. +func (s *Action) SetType(v string) *Action { + s.Type = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/AddTagsInput +type AddTagsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + // + // ResourceArns is a required field + ResourceArns []*string `type:"list" required:"true"` + + // The tags. Each resource can have a maximum of 10 tags. + // + // Tags is a required field + Tags []*Tag `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsInput"} + if s.ResourceArns == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArns")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArns sets the ResourceArns field's value. +func (s *AddTagsInput) SetResourceArns(v []*string) *AddTagsInput { + s.ResourceArns = v + return s +} + +// SetTags sets the Tags field's value. +func (s *AddTagsInput) SetTags(v []*Tag) *AddTagsInput { + s.Tags = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/AddTagsOutput +type AddTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsOutput) GoString() string { + return s.String() +} + +// Information about an Availability Zone. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/AvailabilityZone +type AvailabilityZone struct { + _ struct{} `type:"structure"` + + // [Network Load Balancers] The static IP address. + LoadBalancerAddresses []*LoadBalancerAddress `type:"list"` + + // The ID of the subnet. + SubnetId *string `type:"string"` + + // The name of the Availability Zone. + ZoneName *string `type:"string"` +} + +// String returns the string representation +func (s AvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZone) GoString() string { + return s.String() +} + +// SetLoadBalancerAddresses sets the LoadBalancerAddresses field's value. +func (s *AvailabilityZone) SetLoadBalancerAddresses(v []*LoadBalancerAddress) *AvailabilityZone { + s.LoadBalancerAddresses = v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *AvailabilityZone) SetSubnetId(v string) *AvailabilityZone { + s.SubnetId = &v + return s +} + +// SetZoneName sets the ZoneName field's value. +func (s *AvailabilityZone) SetZoneName(v string) *AvailabilityZone { + s.ZoneName = &v + return s +} + +// Information about an SSL server certificate deployed on a load balancer. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/Certificate +type Certificate struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the certificate. + CertificateArn *string `type:"string"` +} + +// String returns the string representation +func (s Certificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Certificate) GoString() string { + return s.String() +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *Certificate) SetCertificateArn(v string) *Certificate { + s.CertificateArn = &v + return s +} + +// Information about a cipher used in a policy. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/Cipher +type Cipher struct { + _ struct{} `type:"structure"` + + // The name of the cipher. + Name *string `type:"string"` + + // The priority of the cipher. + Priority *int64 `type:"integer"` +} + +// String returns the string representation +func (s Cipher) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Cipher) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *Cipher) SetName(v string) *Cipher { + s.Name = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *Cipher) SetPriority(v int64) *Cipher { + s.Priority = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateListenerInput +type CreateListenerInput struct { + _ struct{} `type:"structure"` + + // [HTTPS listeners] The SSL server certificate. You must provide exactly one + // certificate. + Certificates []*Certificate `type:"list"` + + // The default action for the listener. For Application Load Balancers, the + // protocol of the specified target group must be HTTP or HTTPS. For Network + // Load Balancers, the protocol of the specified target group must be TCP. + // + // DefaultActions is a required field + DefaultActions []*Action `type:"list" required:"true"` + + // The Amazon Resource Name (ARN) of the load balancer. + // + // LoadBalancerArn is a required field + LoadBalancerArn *string `type:"string" required:"true"` + + // The port on which the load balancer is listening. + // + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` + + // The protocol for connections from clients to the load balancer. For Application + // Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load + // Balancers, the supported protocol is TCP. + // + // Protocol is a required field + Protocol *string `type:"string" required:"true" enum:"ProtocolEnum"` + + // [HTTPS listeners] The security policy that defines which ciphers and protocols + // are supported. The default is the current predefined security policy. + SslPolicy *string `type:"string"` +} + +// String returns the string representation +func (s CreateListenerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateListenerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateListenerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateListenerInput"} + if s.DefaultActions == nil { + invalidParams.Add(request.NewErrParamRequired("DefaultActions")) + } + if s.LoadBalancerArn == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerArn")) + } + if s.Port == nil { + invalidParams.Add(request.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + } + if s.Protocol == nil { + invalidParams.Add(request.NewErrParamRequired("Protocol")) + } + if s.DefaultActions != nil { + for i, v := range s.DefaultActions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DefaultActions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificates sets the Certificates field's value. +func (s *CreateListenerInput) SetCertificates(v []*Certificate) *CreateListenerInput { + s.Certificates = v + return s +} + +// SetDefaultActions sets the DefaultActions field's value. +func (s *CreateListenerInput) SetDefaultActions(v []*Action) *CreateListenerInput { + s.DefaultActions = v + return s +} + +// SetLoadBalancerArn sets the LoadBalancerArn field's value. +func (s *CreateListenerInput) SetLoadBalancerArn(v string) *CreateListenerInput { + s.LoadBalancerArn = &v + return s +} + +// SetPort sets the Port field's value. +func (s *CreateListenerInput) SetPort(v int64) *CreateListenerInput { + s.Port = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *CreateListenerInput) SetProtocol(v string) *CreateListenerInput { + s.Protocol = &v + return s +} + +// SetSslPolicy sets the SslPolicy field's value. +func (s *CreateListenerInput) SetSslPolicy(v string) *CreateListenerInput { + s.SslPolicy = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateListenerOutput +type CreateListenerOutput struct { + _ struct{} `type:"structure"` + + // Information about the listener. + Listeners []*Listener `type:"list"` +} + +// String returns the string representation +func (s CreateListenerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateListenerOutput) GoString() string { + return s.String() +} + +// SetListeners sets the Listeners field's value. +func (s *CreateListenerOutput) SetListeners(v []*Listener) *CreateListenerOutput { + s.Listeners = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateLoadBalancerInput +type CreateLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // [Application Load Balancers] The type of IP addresses used by the subnets + // for your load balancer. The possible values are ipv4 (for IPv4 addresses) + // and dualstack (for IPv4 and IPv6 addresses). Internal load balancers must + // use ipv4. + IpAddressType *string `type:"string" enum:"IpAddressType"` + + // The name of the load balancer. + // + // This name must be unique per region per account, can have a maximum of 32 + // characters, must contain only alphanumeric characters or hyphens, and must + // not begin or end with a hyphen. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The nodes of an Internet-facing load balancer have public IP addresses. The + // DNS name of an Internet-facing load balancer is publicly resolvable to the + // public IP addresses of the nodes. Therefore, Internet-facing load balancers + // can route requests from clients over the Internet. + // + // The nodes of an internal load balancer have only private IP addresses. The + // DNS name of an internal load balancer is publicly resolvable to the private + // IP addresses of the nodes. Therefore, internal load balancers can only route + // requests from clients with access to the VPC for the load balancer. + // + // The default is an Internet-facing load balancer. + Scheme *string `type:"string" enum:"LoadBalancerSchemeEnum"` + + // [Application Load Balancers] The IDs of the security groups to assign to + // the load balancer. + SecurityGroups []*string `type:"list"` + + // The IDs of the subnets to attach to the load balancer. You can specify only + // one subnet per Availability Zone. You must specify either subnets or subnet + // mappings. + // + // [Network Load Balancers] You can specify one Elastic IP address per subnet. + // + // [Application Load Balancers] You cannot specify Elastic IP addresses for + // your subnets. + SubnetMappings []*SubnetMapping `type:"list"` + + // The IDs of the subnets to attach to the load balancer. You can specify only + // one subnet per Availability Zone. You must specify either subnets or subnet + // mappings. + // + // [Application Load Balancers] You must specify subnets from at least two Availability + // Zones. + Subnets []*string `type:"list"` + + // One or more tags to assign to the load balancer. + Tags []*Tag `min:"1" type:"list"` + + // The type of load balancer to create. The default is application. + Type *string `type:"string" enum:"LoadBalancerTypeEnum"` +} + +// String returns the string representation +func (s CreateLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLoadBalancerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLoadBalancerInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIpAddressType sets the IpAddressType field's value. +func (s *CreateLoadBalancerInput) SetIpAddressType(v string) *CreateLoadBalancerInput { + s.IpAddressType = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateLoadBalancerInput) SetName(v string) *CreateLoadBalancerInput { + s.Name = &v + return s +} + +// SetScheme sets the Scheme field's value. +func (s *CreateLoadBalancerInput) SetScheme(v string) *CreateLoadBalancerInput { + s.Scheme = &v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *CreateLoadBalancerInput) SetSecurityGroups(v []*string) *CreateLoadBalancerInput { + s.SecurityGroups = v + return s +} + +// SetSubnetMappings sets the SubnetMappings field's value. +func (s *CreateLoadBalancerInput) SetSubnetMappings(v []*SubnetMapping) *CreateLoadBalancerInput { + s.SubnetMappings = v + return s +} + +// SetSubnets sets the Subnets field's value. +func (s *CreateLoadBalancerInput) SetSubnets(v []*string) *CreateLoadBalancerInput { + s.Subnets = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateLoadBalancerInput) SetTags(v []*Tag) *CreateLoadBalancerInput { + s.Tags = v + return s +} + +// SetType sets the Type field's value. +func (s *CreateLoadBalancerInput) SetType(v string) *CreateLoadBalancerInput { + s.Type = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateLoadBalancerOutput +type CreateLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // Information about the load balancer. + LoadBalancers []*LoadBalancer `type:"list"` +} + +// String returns the string representation +func (s CreateLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoadBalancerOutput) GoString() string { + return s.String() +} + +// SetLoadBalancers sets the LoadBalancers field's value. +func (s *CreateLoadBalancerOutput) SetLoadBalancers(v []*LoadBalancer) *CreateLoadBalancerOutput { + s.LoadBalancers = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateRuleInput +type CreateRuleInput struct { + _ struct{} `type:"structure"` + + // An action. Each action has the type forward and specifies a target group. + // + // Actions is a required field + Actions []*Action `type:"list" required:"true"` + + // The conditions. Each condition specifies a field name and a single value. + // + // If the field name is host-header, you can specify a single host name (for + // example, my.example.com). A host name is case insensitive, can be up to 128 + // characters in length, and can contain any of the following characters. Note + // that you can include up to three wildcard characters. + // + // * A-Z, a-z, 0-9 + // + // * - . + // + // * * (matches 0 or more characters) + // + // * ? (matches exactly 1 character) + // + // If the field name is path-pattern, you can specify a single path pattern. + // A path pattern is case sensitive, can be up to 128 characters in length, + // and can contain any of the following characters. Note that you can include + // up to three wildcard characters. + // + // * A-Z, a-z, 0-9 + // + // * _ - . $ / ~ " ' @ : + + // + // * & (using &) + // + // * * (matches 0 or more characters) + // + // * ? (matches exactly 1 character) + // + // Conditions is a required field + Conditions []*RuleCondition `type:"list" required:"true"` + + // The Amazon Resource Name (ARN) of the listener. + // + // ListenerArn is a required field + ListenerArn *string `type:"string" required:"true"` + + // The priority for the rule. A listener can't have multiple rules with the + // same priority. + // + // Priority is a required field + Priority *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s CreateRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRuleInput"} + if s.Actions == nil { + invalidParams.Add(request.NewErrParamRequired("Actions")) + } + if s.Conditions == nil { + invalidParams.Add(request.NewErrParamRequired("Conditions")) + } + if s.ListenerArn == nil { + invalidParams.Add(request.NewErrParamRequired("ListenerArn")) + } + if s.Priority == nil { + invalidParams.Add(request.NewErrParamRequired("Priority")) + } + if s.Priority != nil && *s.Priority < 1 { + invalidParams.Add(request.NewErrParamMinValue("Priority", 1)) + } + if s.Actions != nil { + for i, v := range s.Actions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Actions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActions sets the Actions field's value. +func (s *CreateRuleInput) SetActions(v []*Action) *CreateRuleInput { + s.Actions = v + return s +} + +// SetConditions sets the Conditions field's value. +func (s *CreateRuleInput) SetConditions(v []*RuleCondition) *CreateRuleInput { + s.Conditions = v + return s +} + +// SetListenerArn sets the ListenerArn field's value. +func (s *CreateRuleInput) SetListenerArn(v string) *CreateRuleInput { + s.ListenerArn = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *CreateRuleInput) SetPriority(v int64) *CreateRuleInput { + s.Priority = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateRuleOutput +type CreateRuleOutput struct { + _ struct{} `type:"structure"` + + // Information about the rule. + Rules []*Rule `type:"list"` +} + +// String returns the string representation +func (s CreateRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRuleOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *CreateRuleOutput) SetRules(v []*Rule) *CreateRuleOutput { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateTargetGroupInput +type CreateTargetGroupInput struct { + _ struct{} `type:"structure"` + + // The approximate amount of time, in seconds, between health checks of an individual + // target. For Application Load Balancers, the range is 5 to 300 seconds. For + // Network Load Balancers, the supported values are 10 or 30 seconds. The default + // is 30 seconds. + HealthCheckIntervalSeconds *int64 `min:"5" type:"integer"` + + // [HTTP/HTTPS health checks] The ping path that is the destination on the targets + // for health checks. The default is /. + HealthCheckPath *string `min:"1" type:"string"` + + // The port the load balancer uses when performing health checks on targets. + // The default is traffic-port, which is the port on which each target receives + // traffic from the load balancer. + HealthCheckPort *string `type:"string"` + + // The protocol the load balancer uses when performing health checks on targets. + // The TCP protocol is supported only if the protocol of the target group is + // TCP. For Application Load Balancers, the default is HTTP. For Network Load + // Balancers, the default is TCP. + HealthCheckProtocol *string `type:"string" enum:"ProtocolEnum"` + + // The amount of time, in seconds, during which no response from a target means + // a failed health check. For Application Load Balancers, the range is 2 to + // 60 seconds and the default is 5 seconds. For Network Load Balancers, this + // is 10 seconds for TCP and HTTPS health checks and 6 seconds for HTTP health + // checks. + HealthCheckTimeoutSeconds *int64 `min:"2" type:"integer"` + + // The number of consecutive health checks successes required before considering + // an unhealthy target healthy. For Application Load Balancers, the default + // is 5. For Network Load Balancers, the default is 3. + HealthyThresholdCount *int64 `min:"2" type:"integer"` + + // [HTTP/HTTPS health checks] The HTTP codes to use when checking for a successful + // response from a target. + Matcher *Matcher `type:"structure"` + + // The name of the target group. + // + // This name must be unique per region per account, can have a maximum of 32 + // characters, must contain only alphanumeric characters or hyphens, and must + // not begin or end with a hyphen. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The port on which the targets receive traffic. This port is used unless you + // specify a port override when registering the target. + // + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` + + // The protocol to use for routing traffic to the targets. For Application Load + // Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, + // the supported protocol is TCP. + // + // Protocol is a required field + Protocol *string `type:"string" required:"true" enum:"ProtocolEnum"` + + // The type of target that you must specify when registering targets with this + // target group. The possible values are instance (targets are specified by + // instance ID) or ip (targets are specified by IP address). The default is + // instance. Note that you can't specify targets for a target group using both + // instance IDs and IP addresses. + // + // If the target type is ip, specify IP addresses from the subnets of the virtual + // private cloud (VPC) for the target group, the RFC 1918 range (10.0.0.0/8, + // 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10). + // You can't specify publicly routable IP addresses. + TargetType *string `type:"string" enum:"TargetTypeEnum"` + + // The number of consecutive health check failures required before considering + // a target unhealthy. For Application Load Balancers, the default is 2. For + // Network Load Balancers, this value must be the same as the healthy threshold + // count. + UnhealthyThresholdCount *int64 `min:"2" type:"integer"` + + // The identifier of the virtual private cloud (VPC). + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTargetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTargetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTargetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTargetGroupInput"} + if s.HealthCheckIntervalSeconds != nil && *s.HealthCheckIntervalSeconds < 5 { + invalidParams.Add(request.NewErrParamMinValue("HealthCheckIntervalSeconds", 5)) + } + if s.HealthCheckPath != nil && len(*s.HealthCheckPath) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HealthCheckPath", 1)) + } + if s.HealthCheckTimeoutSeconds != nil && *s.HealthCheckTimeoutSeconds < 2 { + invalidParams.Add(request.NewErrParamMinValue("HealthCheckTimeoutSeconds", 2)) + } + if s.HealthyThresholdCount != nil && *s.HealthyThresholdCount < 2 { + invalidParams.Add(request.NewErrParamMinValue("HealthyThresholdCount", 2)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Port == nil { + invalidParams.Add(request.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + } + if s.Protocol == nil { + invalidParams.Add(request.NewErrParamRequired("Protocol")) + } + if s.UnhealthyThresholdCount != nil && *s.UnhealthyThresholdCount < 2 { + invalidParams.Add(request.NewErrParamMinValue("UnhealthyThresholdCount", 2)) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + if s.Matcher != nil { + if err := s.Matcher.Validate(); err != nil { + invalidParams.AddNested("Matcher", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHealthCheckIntervalSeconds sets the HealthCheckIntervalSeconds field's value. +func (s *CreateTargetGroupInput) SetHealthCheckIntervalSeconds(v int64) *CreateTargetGroupInput { + s.HealthCheckIntervalSeconds = &v + return s +} + +// SetHealthCheckPath sets the HealthCheckPath field's value. +func (s *CreateTargetGroupInput) SetHealthCheckPath(v string) *CreateTargetGroupInput { + s.HealthCheckPath = &v + return s +} + +// SetHealthCheckPort sets the HealthCheckPort field's value. +func (s *CreateTargetGroupInput) SetHealthCheckPort(v string) *CreateTargetGroupInput { + s.HealthCheckPort = &v + return s +} + +// SetHealthCheckProtocol sets the HealthCheckProtocol field's value. +func (s *CreateTargetGroupInput) SetHealthCheckProtocol(v string) *CreateTargetGroupInput { + s.HealthCheckProtocol = &v + return s +} + +// SetHealthCheckTimeoutSeconds sets the HealthCheckTimeoutSeconds field's value. +func (s *CreateTargetGroupInput) SetHealthCheckTimeoutSeconds(v int64) *CreateTargetGroupInput { + s.HealthCheckTimeoutSeconds = &v + return s +} + +// SetHealthyThresholdCount sets the HealthyThresholdCount field's value. +func (s *CreateTargetGroupInput) SetHealthyThresholdCount(v int64) *CreateTargetGroupInput { + s.HealthyThresholdCount = &v + return s +} + +// SetMatcher sets the Matcher field's value. +func (s *CreateTargetGroupInput) SetMatcher(v *Matcher) *CreateTargetGroupInput { + s.Matcher = v + return s +} + +// SetName sets the Name field's value. +func (s *CreateTargetGroupInput) SetName(v string) *CreateTargetGroupInput { + s.Name = &v + return s +} + +// SetPort sets the Port field's value. +func (s *CreateTargetGroupInput) SetPort(v int64) *CreateTargetGroupInput { + s.Port = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *CreateTargetGroupInput) SetProtocol(v string) *CreateTargetGroupInput { + s.Protocol = &v + return s +} + +// SetTargetType sets the TargetType field's value. +func (s *CreateTargetGroupInput) SetTargetType(v string) *CreateTargetGroupInput { + s.TargetType = &v + return s +} + +// SetUnhealthyThresholdCount sets the UnhealthyThresholdCount field's value. +func (s *CreateTargetGroupInput) SetUnhealthyThresholdCount(v int64) *CreateTargetGroupInput { + s.UnhealthyThresholdCount = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *CreateTargetGroupInput) SetVpcId(v string) *CreateTargetGroupInput { + s.VpcId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateTargetGroupOutput +type CreateTargetGroupOutput struct { + _ struct{} `type:"structure"` + + // Information about the target group. + TargetGroups []*TargetGroup `type:"list"` +} + +// String returns the string representation +func (s CreateTargetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTargetGroupOutput) GoString() string { + return s.String() +} + +// SetTargetGroups sets the TargetGroups field's value. +func (s *CreateTargetGroupOutput) SetTargetGroups(v []*TargetGroup) *CreateTargetGroupOutput { + s.TargetGroups = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeleteListenerInput +type DeleteListenerInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the listener. + // + // ListenerArn is a required field + ListenerArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteListenerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteListenerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteListenerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteListenerInput"} + if s.ListenerArn == nil { + invalidParams.Add(request.NewErrParamRequired("ListenerArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetListenerArn sets the ListenerArn field's value. +func (s *DeleteListenerInput) SetListenerArn(v string) *DeleteListenerInput { + s.ListenerArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeleteListenerOutput +type DeleteListenerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteListenerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteListenerOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeleteLoadBalancerInput +type DeleteLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the load balancer. + // + // LoadBalancerArn is a required field + LoadBalancerArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLoadBalancerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLoadBalancerInput"} + if s.LoadBalancerArn == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoadBalancerArn sets the LoadBalancerArn field's value. +func (s *DeleteLoadBalancerInput) SetLoadBalancerArn(v string) *DeleteLoadBalancerInput { + s.LoadBalancerArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeleteLoadBalancerOutput +type DeleteLoadBalancerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoadBalancerOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeleteRuleInput +type DeleteRuleInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the rule. + // + // RuleArn is a required field + RuleArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRuleInput"} + if s.RuleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RuleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRuleArn sets the RuleArn field's value. +func (s *DeleteRuleInput) SetRuleArn(v string) *DeleteRuleInput { + s.RuleArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeleteRuleOutput +type DeleteRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRuleOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeleteTargetGroupInput +type DeleteTargetGroupInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the target group. + // + // TargetGroupArn is a required field + TargetGroupArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTargetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTargetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTargetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTargetGroupInput"} + if s.TargetGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("TargetGroupArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetGroupArn sets the TargetGroupArn field's value. +func (s *DeleteTargetGroupInput) SetTargetGroupArn(v string) *DeleteTargetGroupInput { + s.TargetGroupArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeleteTargetGroupOutput +type DeleteTargetGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTargetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTargetGroupOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeregisterTargetsInput +type DeregisterTargetsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the target group. + // + // TargetGroupArn is a required field + TargetGroupArn *string `type:"string" required:"true"` + + // The targets. If you specified a port override when you registered a target, + // you must specify both the target ID and the port when you deregister it. + // + // Targets is a required field + Targets []*TargetDescription `type:"list" required:"true"` +} + +// String returns the string representation +func (s DeregisterTargetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterTargetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterTargetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterTargetsInput"} + if s.TargetGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("TargetGroupArn")) + } + if s.Targets == nil { + invalidParams.Add(request.NewErrParamRequired("Targets")) + } + if s.Targets != nil { + for i, v := range s.Targets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Targets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetGroupArn sets the TargetGroupArn field's value. +func (s *DeregisterTargetsInput) SetTargetGroupArn(v string) *DeregisterTargetsInput { + s.TargetGroupArn = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *DeregisterTargetsInput) SetTargets(v []*TargetDescription) *DeregisterTargetsInput { + s.Targets = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DeregisterTargetsOutput +type DeregisterTargetsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterTargetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterTargetsOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeAccountLimitsInput +type DescribeAccountLimitsInput struct { + _ struct{} `type:"structure"` + + // The marker for the next set of results. (You received this marker from a + // previous call.) + Marker *string `type:"string"` + + // The maximum number of results to return with this call. + PageSize *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s DescribeAccountLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountLimitsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAccountLimitsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAccountLimitsInput"} + if s.PageSize != nil && *s.PageSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *DescribeAccountLimitsInput) SetMarker(v string) *DescribeAccountLimitsInput { + s.Marker = &v + return s +} + +// SetPageSize sets the PageSize field's value. +func (s *DescribeAccountLimitsInput) SetPageSize(v int64) *DescribeAccountLimitsInput { + s.PageSize = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeAccountLimitsOutput +type DescribeAccountLimitsOutput struct { + _ struct{} `type:"structure"` + + // Information about the limits. + Limits []*Limit `type:"list"` + + // The marker to use when requesting the next set of results. If there are no + // additional results, the string is empty. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeAccountLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountLimitsOutput) GoString() string { + return s.String() +} + +// SetLimits sets the Limits field's value. +func (s *DescribeAccountLimitsOutput) SetLimits(v []*Limit) *DescribeAccountLimitsOutput { + s.Limits = v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *DescribeAccountLimitsOutput) SetNextMarker(v string) *DescribeAccountLimitsOutput { + s.NextMarker = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeListenersInput +type DescribeListenersInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Names (ARN) of the listeners. + ListenerArns []*string `type:"list"` + + // The Amazon Resource Name (ARN) of the load balancer. + LoadBalancerArn *string `type:"string"` + + // The marker for the next set of results. (You received this marker from a + // previous call.) + Marker *string `type:"string"` + + // The maximum number of results to return with this call. + PageSize *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s DescribeListenersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeListenersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeListenersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeListenersInput"} + if s.PageSize != nil && *s.PageSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetListenerArns sets the ListenerArns field's value. +func (s *DescribeListenersInput) SetListenerArns(v []*string) *DescribeListenersInput { + s.ListenerArns = v + return s +} + +// SetLoadBalancerArn sets the LoadBalancerArn field's value. +func (s *DescribeListenersInput) SetLoadBalancerArn(v string) *DescribeListenersInput { + s.LoadBalancerArn = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeListenersInput) SetMarker(v string) *DescribeListenersInput { + s.Marker = &v + return s +} + +// SetPageSize sets the PageSize field's value. +func (s *DescribeListenersInput) SetPageSize(v int64) *DescribeListenersInput { + s.PageSize = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeListenersOutput +type DescribeListenersOutput struct { + _ struct{} `type:"structure"` + + // Information about the listeners. + Listeners []*Listener `type:"list"` + + // The marker to use when requesting the next set of results. If there are no + // additional results, the string is empty. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeListenersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeListenersOutput) GoString() string { + return s.String() +} + +// SetListeners sets the Listeners field's value. +func (s *DescribeListenersOutput) SetListeners(v []*Listener) *DescribeListenersOutput { + s.Listeners = v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *DescribeListenersOutput) SetNextMarker(v string) *DescribeListenersOutput { + s.NextMarker = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeLoadBalancerAttributesInput +type DescribeLoadBalancerAttributesInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the load balancer. + // + // LoadBalancerArn is a required field + LoadBalancerArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeLoadBalancerAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLoadBalancerAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLoadBalancerAttributesInput"} + if s.LoadBalancerArn == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoadBalancerArn sets the LoadBalancerArn field's value. +func (s *DescribeLoadBalancerAttributesInput) SetLoadBalancerArn(v string) *DescribeLoadBalancerAttributesInput { + s.LoadBalancerArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeLoadBalancerAttributesOutput +type DescribeLoadBalancerAttributesOutput struct { + _ struct{} `type:"structure"` + + // Information about the load balancer attributes. + Attributes []*LoadBalancerAttribute `type:"list"` +} + +// String returns the string representation +func (s DescribeLoadBalancerAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerAttributesOutput) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *DescribeLoadBalancerAttributesOutput) SetAttributes(v []*LoadBalancerAttribute) *DescribeLoadBalancerAttributesOutput { + s.Attributes = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeLoadBalancersInput +type DescribeLoadBalancersInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Names (ARN) of the load balancers. You can specify up + // to 20 load balancers in a single call. + LoadBalancerArns []*string `type:"list"` + + // The marker for the next set of results. (You received this marker from a + // previous call.) + Marker *string `type:"string"` + + // The names of the load balancers. + Names []*string `type:"list"` + + // The maximum number of results to return with this call. + PageSize *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s DescribeLoadBalancersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLoadBalancersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLoadBalancersInput"} + if s.PageSize != nil && *s.PageSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoadBalancerArns sets the LoadBalancerArns field's value. +func (s *DescribeLoadBalancersInput) SetLoadBalancerArns(v []*string) *DescribeLoadBalancersInput { + s.LoadBalancerArns = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeLoadBalancersInput) SetMarker(v string) *DescribeLoadBalancersInput { + s.Marker = &v + return s +} + +// SetNames sets the Names field's value. +func (s *DescribeLoadBalancersInput) SetNames(v []*string) *DescribeLoadBalancersInput { + s.Names = v + return s +} + +// SetPageSize sets the PageSize field's value. +func (s *DescribeLoadBalancersInput) SetPageSize(v int64) *DescribeLoadBalancersInput { + s.PageSize = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeLoadBalancersOutput +type DescribeLoadBalancersOutput struct { + _ struct{} `type:"structure"` + + // Information about the load balancers. + LoadBalancers []*LoadBalancer `type:"list"` + + // The marker to use when requesting the next set of results. If there are no + // additional results, the string is empty. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLoadBalancersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancersOutput) GoString() string { + return s.String() +} + +// SetLoadBalancers sets the LoadBalancers field's value. +func (s *DescribeLoadBalancersOutput) SetLoadBalancers(v []*LoadBalancer) *DescribeLoadBalancersOutput { + s.LoadBalancers = v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *DescribeLoadBalancersOutput) SetNextMarker(v string) *DescribeLoadBalancersOutput { + s.NextMarker = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeRulesInput +type DescribeRulesInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the listener. + ListenerArn *string `type:"string"` + + // The marker for the next set of results. (You received this marker from a + // previous call.) + Marker *string `type:"string"` + + // The maximum number of results to return with this call. + PageSize *int64 `min:"1" type:"integer"` + + // The Amazon Resource Names (ARN) of the rules. + RuleArns []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeRulesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRulesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeRulesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeRulesInput"} + if s.PageSize != nil && *s.PageSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetListenerArn sets the ListenerArn field's value. +func (s *DescribeRulesInput) SetListenerArn(v string) *DescribeRulesInput { + s.ListenerArn = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeRulesInput) SetMarker(v string) *DescribeRulesInput { + s.Marker = &v + return s +} + +// SetPageSize sets the PageSize field's value. +func (s *DescribeRulesInput) SetPageSize(v int64) *DescribeRulesInput { + s.PageSize = &v + return s +} + +// SetRuleArns sets the RuleArns field's value. +func (s *DescribeRulesInput) SetRuleArns(v []*string) *DescribeRulesInput { + s.RuleArns = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeRulesOutput +type DescribeRulesOutput struct { + _ struct{} `type:"structure"` + + // The marker to use when requesting the next set of results. If there are no + // additional results, the string is empty. + NextMarker *string `type:"string"` + + // Information about the rules. + Rules []*Rule `type:"list"` +} + +// String returns the string representation +func (s DescribeRulesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRulesOutput) GoString() string { + return s.String() +} + +// SetNextMarker sets the NextMarker field's value. +func (s *DescribeRulesOutput) SetNextMarker(v string) *DescribeRulesOutput { + s.NextMarker = &v + return s +} + +// SetRules sets the Rules field's value. +func (s *DescribeRulesOutput) SetRules(v []*Rule) *DescribeRulesOutput { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeSSLPoliciesInput +type DescribeSSLPoliciesInput struct { + _ struct{} `type:"structure"` + + // The marker for the next set of results. (You received this marker from a + // previous call.) + Marker *string `type:"string"` + + // The names of the policies. + Names []*string `type:"list"` + + // The maximum number of results to return with this call. + PageSize *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s DescribeSSLPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSSLPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSSLPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSSLPoliciesInput"} + if s.PageSize != nil && *s.PageSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *DescribeSSLPoliciesInput) SetMarker(v string) *DescribeSSLPoliciesInput { + s.Marker = &v + return s +} + +// SetNames sets the Names field's value. +func (s *DescribeSSLPoliciesInput) SetNames(v []*string) *DescribeSSLPoliciesInput { + s.Names = v + return s +} + +// SetPageSize sets the PageSize field's value. +func (s *DescribeSSLPoliciesInput) SetPageSize(v int64) *DescribeSSLPoliciesInput { + s.PageSize = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeSSLPoliciesOutput +type DescribeSSLPoliciesOutput struct { + _ struct{} `type:"structure"` + + // The marker to use when requesting the next set of results. If there are no + // additional results, the string is empty. + NextMarker *string `type:"string"` + + // Information about the policies. + SslPolicies []*SslPolicy `type:"list"` +} + +// String returns the string representation +func (s DescribeSSLPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSSLPoliciesOutput) GoString() string { + return s.String() +} + +// SetNextMarker sets the NextMarker field's value. +func (s *DescribeSSLPoliciesOutput) SetNextMarker(v string) *DescribeSSLPoliciesOutput { + s.NextMarker = &v + return s +} + +// SetSslPolicies sets the SslPolicies field's value. +func (s *DescribeSSLPoliciesOutput) SetSslPolicies(v []*SslPolicy) *DescribeSSLPoliciesOutput { + s.SslPolicies = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeTagsInput +type DescribeTagsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Names (ARN) of the resources. + // + // ResourceArns is a required field + ResourceArns []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTagsInput"} + if s.ResourceArns == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArns")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArns sets the ResourceArns field's value. +func (s *DescribeTagsInput) SetResourceArns(v []*string) *DescribeTagsInput { + s.ResourceArns = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeTagsOutput +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // Information about the tags. + TagDescriptions []*TagDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +// SetTagDescriptions sets the TagDescriptions field's value. +func (s *DescribeTagsOutput) SetTagDescriptions(v []*TagDescription) *DescribeTagsOutput { + s.TagDescriptions = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeTargetGroupAttributesInput +type DescribeTargetGroupAttributesInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the target group. + // + // TargetGroupArn is a required field + TargetGroupArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTargetGroupAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTargetGroupAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTargetGroupAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTargetGroupAttributesInput"} + if s.TargetGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("TargetGroupArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetGroupArn sets the TargetGroupArn field's value. +func (s *DescribeTargetGroupAttributesInput) SetTargetGroupArn(v string) *DescribeTargetGroupAttributesInput { + s.TargetGroupArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeTargetGroupAttributesOutput +type DescribeTargetGroupAttributesOutput struct { + _ struct{} `type:"structure"` + + // Information about the target group attributes + Attributes []*TargetGroupAttribute `type:"list"` +} + +// String returns the string representation +func (s DescribeTargetGroupAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTargetGroupAttributesOutput) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *DescribeTargetGroupAttributesOutput) SetAttributes(v []*TargetGroupAttribute) *DescribeTargetGroupAttributesOutput { + s.Attributes = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeTargetGroupsInput +type DescribeTargetGroupsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the load balancer. + LoadBalancerArn *string `type:"string"` + + // The marker for the next set of results. (You received this marker from a + // previous call.) + Marker *string `type:"string"` + + // The names of the target groups. + Names []*string `type:"list"` + + // The maximum number of results to return with this call. + PageSize *int64 `min:"1" type:"integer"` + + // The Amazon Resource Names (ARN) of the target groups. + TargetGroupArns []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeTargetGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTargetGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTargetGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTargetGroupsInput"} + if s.PageSize != nil && *s.PageSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoadBalancerArn sets the LoadBalancerArn field's value. +func (s *DescribeTargetGroupsInput) SetLoadBalancerArn(v string) *DescribeTargetGroupsInput { + s.LoadBalancerArn = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeTargetGroupsInput) SetMarker(v string) *DescribeTargetGroupsInput { + s.Marker = &v + return s +} + +// SetNames sets the Names field's value. +func (s *DescribeTargetGroupsInput) SetNames(v []*string) *DescribeTargetGroupsInput { + s.Names = v + return s +} + +// SetPageSize sets the PageSize field's value. +func (s *DescribeTargetGroupsInput) SetPageSize(v int64) *DescribeTargetGroupsInput { + s.PageSize = &v + return s +} + +// SetTargetGroupArns sets the TargetGroupArns field's value. +func (s *DescribeTargetGroupsInput) SetTargetGroupArns(v []*string) *DescribeTargetGroupsInput { + s.TargetGroupArns = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeTargetGroupsOutput +type DescribeTargetGroupsOutput struct { + _ struct{} `type:"structure"` + + // The marker to use when requesting the next set of results. If there are no + // additional results, the string is empty. + NextMarker *string `type:"string"` + + // Information about the target groups. + TargetGroups []*TargetGroup `type:"list"` +} + +// String returns the string representation +func (s DescribeTargetGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTargetGroupsOutput) GoString() string { + return s.String() +} + +// SetNextMarker sets the NextMarker field's value. +func (s *DescribeTargetGroupsOutput) SetNextMarker(v string) *DescribeTargetGroupsOutput { + s.NextMarker = &v + return s +} + +// SetTargetGroups sets the TargetGroups field's value. +func (s *DescribeTargetGroupsOutput) SetTargetGroups(v []*TargetGroup) *DescribeTargetGroupsOutput { + s.TargetGroups = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeTargetHealthInput +type DescribeTargetHealthInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the target group. + // + // TargetGroupArn is a required field + TargetGroupArn *string `type:"string" required:"true"` + + // The targets. + Targets []*TargetDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeTargetHealthInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTargetHealthInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTargetHealthInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTargetHealthInput"} + if s.TargetGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("TargetGroupArn")) + } + if s.Targets != nil { + for i, v := range s.Targets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Targets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetGroupArn sets the TargetGroupArn field's value. +func (s *DescribeTargetHealthInput) SetTargetGroupArn(v string) *DescribeTargetHealthInput { + s.TargetGroupArn = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *DescribeTargetHealthInput) SetTargets(v []*TargetDescription) *DescribeTargetHealthInput { + s.Targets = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/DescribeTargetHealthOutput +type DescribeTargetHealthOutput struct { + _ struct{} `type:"structure"` + + // Information about the health of the targets. + TargetHealthDescriptions []*TargetHealthDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeTargetHealthOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTargetHealthOutput) GoString() string { + return s.String() +} + +// SetTargetHealthDescriptions sets the TargetHealthDescriptions field's value. +func (s *DescribeTargetHealthOutput) SetTargetHealthDescriptions(v []*TargetHealthDescription) *DescribeTargetHealthOutput { + s.TargetHealthDescriptions = v + return s +} + +// Information about an Elastic Load Balancing resource limit for your AWS account. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/Limit +type Limit struct { + _ struct{} `type:"structure"` + + // The maximum value of the limit. + Max *string `type:"string"` + + // The name of the limit. The possible values are: + // + // * application-load-balancers + // + // * listeners-per-application-load-balancer + // + // * listeners-per-network-load-balancer + // + // * network-load-balancers + // + // * rules-per-application-load-balancer + // + // * target-groups + // + // * targets-per-application-load-balancer + Name *string `type:"string"` +} + +// String returns the string representation +func (s Limit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Limit) GoString() string { + return s.String() +} + +// SetMax sets the Max field's value. +func (s *Limit) SetMax(v string) *Limit { + s.Max = &v + return s +} + +// SetName sets the Name field's value. +func (s *Limit) SetName(v string) *Limit { + s.Name = &v + return s +} + +// Information about a listener. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/Listener +type Listener struct { + _ struct{} `type:"structure"` + + // The SSL server certificate. You must provide a certificate if the protocol + // is HTTPS. + Certificates []*Certificate `type:"list"` + + // The default actions for the listener. + DefaultActions []*Action `type:"list"` + + // The Amazon Resource Name (ARN) of the listener. + ListenerArn *string `type:"string"` + + // The Amazon Resource Name (ARN) of the load balancer. + LoadBalancerArn *string `type:"string"` + + // The port on which the load balancer is listening. + Port *int64 `min:"1" type:"integer"` + + // The protocol for connections from clients to the load balancer. + Protocol *string `type:"string" enum:"ProtocolEnum"` + + // The security policy that defines which ciphers and protocols are supported. + // The default is the current predefined security policy. + SslPolicy *string `type:"string"` +} + +// String returns the string representation +func (s Listener) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Listener) GoString() string { + return s.String() +} + +// SetCertificates sets the Certificates field's value. +func (s *Listener) SetCertificates(v []*Certificate) *Listener { + s.Certificates = v + return s +} + +// SetDefaultActions sets the DefaultActions field's value. +func (s *Listener) SetDefaultActions(v []*Action) *Listener { + s.DefaultActions = v + return s +} + +// SetListenerArn sets the ListenerArn field's value. +func (s *Listener) SetListenerArn(v string) *Listener { + s.ListenerArn = &v + return s +} + +// SetLoadBalancerArn sets the LoadBalancerArn field's value. +func (s *Listener) SetLoadBalancerArn(v string) *Listener { + s.LoadBalancerArn = &v + return s +} + +// SetPort sets the Port field's value. +func (s *Listener) SetPort(v int64) *Listener { + s.Port = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *Listener) SetProtocol(v string) *Listener { + s.Protocol = &v + return s +} + +// SetSslPolicy sets the SslPolicy field's value. +func (s *Listener) SetSslPolicy(v string) *Listener { + s.SslPolicy = &v + return s +} + +// Information about a load balancer. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/LoadBalancer +type LoadBalancer struct { + _ struct{} `type:"structure"` + + // The Availability Zones for the load balancer. + AvailabilityZones []*AvailabilityZone `type:"list"` + + // The ID of the Amazon Route 53 hosted zone associated with the load balancer. + CanonicalHostedZoneId *string `type:"string"` + + // The date and time the load balancer was created. + CreatedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The public DNS name of the load balancer. + DNSName *string `type:"string"` + + // The type of IP addresses used by the subnets for your load balancer. The + // possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and + // IPv6 addresses). + IpAddressType *string `type:"string" enum:"IpAddressType"` + + // The Amazon Resource Name (ARN) of the load balancer. + LoadBalancerArn *string `type:"string"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string"` + + // The nodes of an Internet-facing load balancer have public IP addresses. The + // DNS name of an Internet-facing load balancer is publicly resolvable to the + // public IP addresses of the nodes. Therefore, Internet-facing load balancers + // can route requests from clients over the Internet. + // + // The nodes of an internal load balancer have only private IP addresses. The + // DNS name of an internal load balancer is publicly resolvable to the private + // IP addresses of the nodes. Therefore, internal load balancers can only route + // requests from clients with access to the VPC for the load balancer. + Scheme *string `type:"string" enum:"LoadBalancerSchemeEnum"` + + // The IDs of the security groups for the load balancer. + SecurityGroups []*string `type:"list"` + + // The state of the load balancer. + State *LoadBalancerState `type:"structure"` + + // The type of load balancer. + Type *string `type:"string" enum:"LoadBalancerTypeEnum"` + + // The ID of the VPC for the load balancer. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s LoadBalancer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancer) GoString() string { + return s.String() +} + +// SetAvailabilityZones sets the AvailabilityZones field's value. +func (s *LoadBalancer) SetAvailabilityZones(v []*AvailabilityZone) *LoadBalancer { + s.AvailabilityZones = v + return s +} + +// SetCanonicalHostedZoneId sets the CanonicalHostedZoneId field's value. +func (s *LoadBalancer) SetCanonicalHostedZoneId(v string) *LoadBalancer { + s.CanonicalHostedZoneId = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *LoadBalancer) SetCreatedTime(v time.Time) *LoadBalancer { + s.CreatedTime = &v + return s +} + +// SetDNSName sets the DNSName field's value. +func (s *LoadBalancer) SetDNSName(v string) *LoadBalancer { + s.DNSName = &v + return s +} + +// SetIpAddressType sets the IpAddressType field's value. +func (s *LoadBalancer) SetIpAddressType(v string) *LoadBalancer { + s.IpAddressType = &v + return s +} + +// SetLoadBalancerArn sets the LoadBalancerArn field's value. +func (s *LoadBalancer) SetLoadBalancerArn(v string) *LoadBalancer { + s.LoadBalancerArn = &v + return s +} + +// SetLoadBalancerName sets the LoadBalancerName field's value. +func (s *LoadBalancer) SetLoadBalancerName(v string) *LoadBalancer { + s.LoadBalancerName = &v + return s +} + +// SetScheme sets the Scheme field's value. +func (s *LoadBalancer) SetScheme(v string) *LoadBalancer { + s.Scheme = &v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *LoadBalancer) SetSecurityGroups(v []*string) *LoadBalancer { + s.SecurityGroups = v + return s +} + +// SetState sets the State field's value. +func (s *LoadBalancer) SetState(v *LoadBalancerState) *LoadBalancer { + s.State = v + return s +} + +// SetType sets the Type field's value. +func (s *LoadBalancer) SetType(v string) *LoadBalancer { + s.Type = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *LoadBalancer) SetVpcId(v string) *LoadBalancer { + s.VpcId = &v + return s +} + +// Information about a static IP address for a load balancer. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/LoadBalancerAddress +type LoadBalancerAddress struct { + _ struct{} `type:"structure"` + + // [Network Load Balancers] The allocation ID of the Elastic IP address. + AllocationId *string `type:"string"` + + // The static IP address. + IpAddress *string `type:"string"` +} + +// String returns the string representation +func (s LoadBalancerAddress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancerAddress) GoString() string { + return s.String() +} + +// SetAllocationId sets the AllocationId field's value. +func (s *LoadBalancerAddress) SetAllocationId(v string) *LoadBalancerAddress { + s.AllocationId = &v + return s +} + +// SetIpAddress sets the IpAddress field's value. +func (s *LoadBalancerAddress) SetIpAddress(v string) *LoadBalancerAddress { + s.IpAddress = &v + return s +} + +// Information about a load balancer attribute. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/LoadBalancerAttribute +type LoadBalancerAttribute struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + // + // * access_logs.s3.enabled - [Application Load Balancers] Indicates whether + // access logs stored in Amazon S3 are enabled. The value is true or false. + // + // * access_logs.s3.bucket - [Application Load Balancers] The name of the + // S3 bucket for the access logs. This attribute is required if access logs + // in Amazon S3 are enabled. The bucket must exist in the same region as + // the load balancer and have a bucket policy that grants Elastic Load Balancing + // permission to write to the bucket. + // + // * access_logs.s3.prefix - [Application Load Balancers] The prefix for + // the location in the S3 bucket. If you don't specify a prefix, the access + // logs are stored in the root of the bucket. + // + // * deletion_protection.enabled - Indicates whether deletion protection + // is enabled. The value is true or false. + // + // * idle_timeout.timeout_seconds - [Application Load Balancers] The idle + // timeout value, in seconds. The valid range is 1-4000. The default is 60 + // seconds. + Key *string `type:"string"` + + // The value of the attribute. + Value *string `type:"string"` +} + +// String returns the string representation +func (s LoadBalancerAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancerAttribute) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *LoadBalancerAttribute) SetKey(v string) *LoadBalancerAttribute { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *LoadBalancerAttribute) SetValue(v string) *LoadBalancerAttribute { + s.Value = &v + return s +} + +// Information about the state of the load balancer. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/LoadBalancerState +type LoadBalancerState struct { + _ struct{} `type:"structure"` + + // The state code. The initial state of the load balancer is provisioning. After + // the load balancer is fully set up and ready to route traffic, its state is + // active. If the load balancer could not be set up, its state is failed. + Code *string `type:"string" enum:"LoadBalancerStateEnum"` + + // A description of the state. + Reason *string `type:"string"` +} + +// String returns the string representation +func (s LoadBalancerState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancerState) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *LoadBalancerState) SetCode(v string) *LoadBalancerState { + s.Code = &v + return s +} + +// SetReason sets the Reason field's value. +func (s *LoadBalancerState) SetReason(v string) *LoadBalancerState { + s.Reason = &v + return s +} + +// Information to use when checking for a successful response from a target. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/Matcher +type Matcher struct { + _ struct{} `type:"structure"` + + // The HTTP codes. + // + // For Application Load Balancers, you can specify values between 200 and 499, + // and the default value is 200. You can specify multiple values (for example, + // "200,202") or a range of values (for example, "200-299"). + // + // For Network Load Balancers, this is 200 to 399. + // + // HttpCode is a required field + HttpCode *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Matcher) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Matcher) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Matcher) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Matcher"} + if s.HttpCode == nil { + invalidParams.Add(request.NewErrParamRequired("HttpCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHttpCode sets the HttpCode field's value. +func (s *Matcher) SetHttpCode(v string) *Matcher { + s.HttpCode = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyListenerInput +type ModifyListenerInput struct { + _ struct{} `type:"structure"` + + // The SSL server certificate. + Certificates []*Certificate `type:"list"` + + // The default action. For Application Load Balancers, the protocol of the specified + // target group must be HTTP or HTTPS. For Network Load Balancers, the protocol + // of the specified target group must be TCP. + DefaultActions []*Action `type:"list"` + + // The Amazon Resource Name (ARN) of the listener. + // + // ListenerArn is a required field + ListenerArn *string `type:"string" required:"true"` + + // The port for connections from clients to the load balancer. + Port *int64 `min:"1" type:"integer"` + + // The protocol for connections from clients to the load balancer. Application + // Load Balancers support HTTP and HTTPS and Network Load Balancers support + // TCP. + Protocol *string `type:"string" enum:"ProtocolEnum"` + + // The security policy that defines which protocols and ciphers are supported. + // For more information, see Security Policies (http://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies) + // in the Application Load Balancers Guide. + SslPolicy *string `type:"string"` +} + +// String returns the string representation +func (s ModifyListenerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyListenerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyListenerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyListenerInput"} + if s.ListenerArn == nil { + invalidParams.Add(request.NewErrParamRequired("ListenerArn")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + } + if s.DefaultActions != nil { + for i, v := range s.DefaultActions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DefaultActions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificates sets the Certificates field's value. +func (s *ModifyListenerInput) SetCertificates(v []*Certificate) *ModifyListenerInput { + s.Certificates = v + return s +} + +// SetDefaultActions sets the DefaultActions field's value. +func (s *ModifyListenerInput) SetDefaultActions(v []*Action) *ModifyListenerInput { + s.DefaultActions = v + return s +} + +// SetListenerArn sets the ListenerArn field's value. +func (s *ModifyListenerInput) SetListenerArn(v string) *ModifyListenerInput { + s.ListenerArn = &v + return s +} + +// SetPort sets the Port field's value. +func (s *ModifyListenerInput) SetPort(v int64) *ModifyListenerInput { + s.Port = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *ModifyListenerInput) SetProtocol(v string) *ModifyListenerInput { + s.Protocol = &v + return s +} + +// SetSslPolicy sets the SslPolicy field's value. +func (s *ModifyListenerInput) SetSslPolicy(v string) *ModifyListenerInput { + s.SslPolicy = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyListenerOutput +type ModifyListenerOutput struct { + _ struct{} `type:"structure"` + + // Information about the modified listeners. + Listeners []*Listener `type:"list"` +} + +// String returns the string representation +func (s ModifyListenerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyListenerOutput) GoString() string { + return s.String() +} + +// SetListeners sets the Listeners field's value. +func (s *ModifyListenerOutput) SetListeners(v []*Listener) *ModifyListenerOutput { + s.Listeners = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyLoadBalancerAttributesInput +type ModifyLoadBalancerAttributesInput struct { + _ struct{} `type:"structure"` + + // The load balancer attributes. + // + // Attributes is a required field + Attributes []*LoadBalancerAttribute `type:"list" required:"true"` + + // The Amazon Resource Name (ARN) of the load balancer. + // + // LoadBalancerArn is a required field + LoadBalancerArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyLoadBalancerAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyLoadBalancerAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyLoadBalancerAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyLoadBalancerAttributesInput"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + if s.LoadBalancerArn == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributes sets the Attributes field's value. +func (s *ModifyLoadBalancerAttributesInput) SetAttributes(v []*LoadBalancerAttribute) *ModifyLoadBalancerAttributesInput { + s.Attributes = v + return s +} + +// SetLoadBalancerArn sets the LoadBalancerArn field's value. +func (s *ModifyLoadBalancerAttributesInput) SetLoadBalancerArn(v string) *ModifyLoadBalancerAttributesInput { + s.LoadBalancerArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyLoadBalancerAttributesOutput +type ModifyLoadBalancerAttributesOutput struct { + _ struct{} `type:"structure"` + + // Information about the load balancer attributes. + Attributes []*LoadBalancerAttribute `type:"list"` +} + +// String returns the string representation +func (s ModifyLoadBalancerAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyLoadBalancerAttributesOutput) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *ModifyLoadBalancerAttributesOutput) SetAttributes(v []*LoadBalancerAttribute) *ModifyLoadBalancerAttributesOutput { + s.Attributes = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyRuleInput +type ModifyRuleInput struct { + _ struct{} `type:"structure"` + + // The actions. The target group must use the HTTP or HTTPS protocol. + Actions []*Action `type:"list"` + + // The conditions. + Conditions []*RuleCondition `type:"list"` + + // The Amazon Resource Name (ARN) of the rule. + // + // RuleArn is a required field + RuleArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyRuleInput"} + if s.RuleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RuleArn")) + } + if s.Actions != nil { + for i, v := range s.Actions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Actions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActions sets the Actions field's value. +func (s *ModifyRuleInput) SetActions(v []*Action) *ModifyRuleInput { + s.Actions = v + return s +} + +// SetConditions sets the Conditions field's value. +func (s *ModifyRuleInput) SetConditions(v []*RuleCondition) *ModifyRuleInput { + s.Conditions = v + return s +} + +// SetRuleArn sets the RuleArn field's value. +func (s *ModifyRuleInput) SetRuleArn(v string) *ModifyRuleInput { + s.RuleArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyRuleOutput +type ModifyRuleOutput struct { + _ struct{} `type:"structure"` + + // Information about the rule. + Rules []*Rule `type:"list"` +} + +// String returns the string representation +func (s ModifyRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyRuleOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *ModifyRuleOutput) SetRules(v []*Rule) *ModifyRuleOutput { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyTargetGroupAttributesInput +type ModifyTargetGroupAttributesInput struct { + _ struct{} `type:"structure"` + + // The attributes. + // + // Attributes is a required field + Attributes []*TargetGroupAttribute `type:"list" required:"true"` + + // The Amazon Resource Name (ARN) of the target group. + // + // TargetGroupArn is a required field + TargetGroupArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyTargetGroupAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTargetGroupAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyTargetGroupAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyTargetGroupAttributesInput"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + if s.TargetGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("TargetGroupArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributes sets the Attributes field's value. +func (s *ModifyTargetGroupAttributesInput) SetAttributes(v []*TargetGroupAttribute) *ModifyTargetGroupAttributesInput { + s.Attributes = v + return s +} + +// SetTargetGroupArn sets the TargetGroupArn field's value. +func (s *ModifyTargetGroupAttributesInput) SetTargetGroupArn(v string) *ModifyTargetGroupAttributesInput { + s.TargetGroupArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyTargetGroupAttributesOutput +type ModifyTargetGroupAttributesOutput struct { + _ struct{} `type:"structure"` + + // Information about the attributes. + Attributes []*TargetGroupAttribute `type:"list"` +} + +// String returns the string representation +func (s ModifyTargetGroupAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTargetGroupAttributesOutput) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *ModifyTargetGroupAttributesOutput) SetAttributes(v []*TargetGroupAttribute) *ModifyTargetGroupAttributesOutput { + s.Attributes = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyTargetGroupInput +type ModifyTargetGroupInput struct { + _ struct{} `type:"structure"` + + // The approximate amount of time, in seconds, between health checks of an individual + // target. For Application Load Balancers, the range is 5 to 300 seconds. For + // Network Load Balancers, the supported values are 10 or 30 seconds. + HealthCheckIntervalSeconds *int64 `min:"5" type:"integer"` + + // [HTTP/HTTPS health checks] The ping path that is the destination for the + // health check request. + HealthCheckPath *string `min:"1" type:"string"` + + // The port the load balancer uses when performing health checks on targets. + HealthCheckPort *string `type:"string"` + + // The protocol the load balancer uses when performing health checks on targets. + // The TCP protocol is supported only if the protocol of the target group is + // TCP. + HealthCheckProtocol *string `type:"string" enum:"ProtocolEnum"` + + // [HTTP/HTTPS health checks] The amount of time, in seconds, during which no + // response means a failed health check. + HealthCheckTimeoutSeconds *int64 `min:"2" type:"integer"` + + // The number of consecutive health checks successes required before considering + // an unhealthy target healthy. + HealthyThresholdCount *int64 `min:"2" type:"integer"` + + // [HTTP/HTTPS health checks] The HTTP codes to use when checking for a successful + // response from a target. + Matcher *Matcher `type:"structure"` + + // The Amazon Resource Name (ARN) of the target group. + // + // TargetGroupArn is a required field + TargetGroupArn *string `type:"string" required:"true"` + + // The number of consecutive health check failures required before considering + // the target unhealthy. For Network Load Balancers, this value must be the + // same as the healthy threshold count. + UnhealthyThresholdCount *int64 `min:"2" type:"integer"` +} + +// String returns the string representation +func (s ModifyTargetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTargetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyTargetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyTargetGroupInput"} + if s.HealthCheckIntervalSeconds != nil && *s.HealthCheckIntervalSeconds < 5 { + invalidParams.Add(request.NewErrParamMinValue("HealthCheckIntervalSeconds", 5)) + } + if s.HealthCheckPath != nil && len(*s.HealthCheckPath) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HealthCheckPath", 1)) + } + if s.HealthCheckTimeoutSeconds != nil && *s.HealthCheckTimeoutSeconds < 2 { + invalidParams.Add(request.NewErrParamMinValue("HealthCheckTimeoutSeconds", 2)) + } + if s.HealthyThresholdCount != nil && *s.HealthyThresholdCount < 2 { + invalidParams.Add(request.NewErrParamMinValue("HealthyThresholdCount", 2)) + } + if s.TargetGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("TargetGroupArn")) + } + if s.UnhealthyThresholdCount != nil && *s.UnhealthyThresholdCount < 2 { + invalidParams.Add(request.NewErrParamMinValue("UnhealthyThresholdCount", 2)) + } + if s.Matcher != nil { + if err := s.Matcher.Validate(); err != nil { + invalidParams.AddNested("Matcher", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHealthCheckIntervalSeconds sets the HealthCheckIntervalSeconds field's value. +func (s *ModifyTargetGroupInput) SetHealthCheckIntervalSeconds(v int64) *ModifyTargetGroupInput { + s.HealthCheckIntervalSeconds = &v + return s +} + +// SetHealthCheckPath sets the HealthCheckPath field's value. +func (s *ModifyTargetGroupInput) SetHealthCheckPath(v string) *ModifyTargetGroupInput { + s.HealthCheckPath = &v + return s +} + +// SetHealthCheckPort sets the HealthCheckPort field's value. +func (s *ModifyTargetGroupInput) SetHealthCheckPort(v string) *ModifyTargetGroupInput { + s.HealthCheckPort = &v + return s +} + +// SetHealthCheckProtocol sets the HealthCheckProtocol field's value. +func (s *ModifyTargetGroupInput) SetHealthCheckProtocol(v string) *ModifyTargetGroupInput { + s.HealthCheckProtocol = &v + return s +} + +// SetHealthCheckTimeoutSeconds sets the HealthCheckTimeoutSeconds field's value. +func (s *ModifyTargetGroupInput) SetHealthCheckTimeoutSeconds(v int64) *ModifyTargetGroupInput { + s.HealthCheckTimeoutSeconds = &v + return s +} + +// SetHealthyThresholdCount sets the HealthyThresholdCount field's value. +func (s *ModifyTargetGroupInput) SetHealthyThresholdCount(v int64) *ModifyTargetGroupInput { + s.HealthyThresholdCount = &v + return s +} + +// SetMatcher sets the Matcher field's value. +func (s *ModifyTargetGroupInput) SetMatcher(v *Matcher) *ModifyTargetGroupInput { + s.Matcher = v + return s +} + +// SetTargetGroupArn sets the TargetGroupArn field's value. +func (s *ModifyTargetGroupInput) SetTargetGroupArn(v string) *ModifyTargetGroupInput { + s.TargetGroupArn = &v + return s +} + +// SetUnhealthyThresholdCount sets the UnhealthyThresholdCount field's value. +func (s *ModifyTargetGroupInput) SetUnhealthyThresholdCount(v int64) *ModifyTargetGroupInput { + s.UnhealthyThresholdCount = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyTargetGroupOutput +type ModifyTargetGroupOutput struct { + _ struct{} `type:"structure"` + + // Information about the target group. + TargetGroups []*TargetGroup `type:"list"` +} + +// String returns the string representation +func (s ModifyTargetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTargetGroupOutput) GoString() string { + return s.String() +} + +// SetTargetGroups sets the TargetGroups field's value. +func (s *ModifyTargetGroupOutput) SetTargetGroups(v []*TargetGroup) *ModifyTargetGroupOutput { + s.TargetGroups = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/RegisterTargetsInput +type RegisterTargetsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the target group. + // + // TargetGroupArn is a required field + TargetGroupArn *string `type:"string" required:"true"` + + // The targets. The default port for a target is the port for the target group. + // You can specify a port override. If a target is already registered, you can + // register it again using a different port. + // + // Targets is a required field + Targets []*TargetDescription `type:"list" required:"true"` +} + +// String returns the string representation +func (s RegisterTargetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterTargetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterTargetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterTargetsInput"} + if s.TargetGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("TargetGroupArn")) + } + if s.Targets == nil { + invalidParams.Add(request.NewErrParamRequired("Targets")) + } + if s.Targets != nil { + for i, v := range s.Targets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Targets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetGroupArn sets the TargetGroupArn field's value. +func (s *RegisterTargetsInput) SetTargetGroupArn(v string) *RegisterTargetsInput { + s.TargetGroupArn = &v + return s +} + +// SetTargets sets the Targets field's value. +func (s *RegisterTargetsInput) SetTargets(v []*TargetDescription) *RegisterTargetsInput { + s.Targets = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/RegisterTargetsOutput +type RegisterTargetsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterTargetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterTargetsOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/RemoveTagsInput +type RemoveTagsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + // + // ResourceArns is a required field + ResourceArns []*string `type:"list" required:"true"` + + // The tag keys for the tags to remove. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsInput"} + if s.ResourceArns == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArns")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArns sets the ResourceArns field's value. +func (s *RemoveTagsInput) SetResourceArns(v []*string) *RemoveTagsInput { + s.ResourceArns = v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *RemoveTagsInput) SetTagKeys(v []*string) *RemoveTagsInput { + s.TagKeys = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/RemoveTagsOutput +type RemoveTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsOutput) GoString() string { + return s.String() +} + +// Information about a rule. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/Rule +type Rule struct { + _ struct{} `type:"structure"` + + // The actions. + Actions []*Action `type:"list"` + + // The conditions. + Conditions []*RuleCondition `type:"list"` + + // Indicates whether this is the default rule. + IsDefault *bool `type:"boolean"` + + // The priority. + Priority *string `type:"string"` + + // The Amazon Resource Name (ARN) of the rule. + RuleArn *string `type:"string"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +// SetActions sets the Actions field's value. +func (s *Rule) SetActions(v []*Action) *Rule { + s.Actions = v + return s +} + +// SetConditions sets the Conditions field's value. +func (s *Rule) SetConditions(v []*RuleCondition) *Rule { + s.Conditions = v + return s +} + +// SetIsDefault sets the IsDefault field's value. +func (s *Rule) SetIsDefault(v bool) *Rule { + s.IsDefault = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *Rule) SetPriority(v string) *Rule { + s.Priority = &v + return s +} + +// SetRuleArn sets the RuleArn field's value. +func (s *Rule) SetRuleArn(v string) *Rule { + s.RuleArn = &v + return s +} + +// Information about a condition for a rule. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/RuleCondition +type RuleCondition struct { + _ struct{} `type:"structure"` + + // The name of the field. The possible values are host-header and path-pattern. + Field *string `type:"string"` + + // The condition value. + // + // If the field name is host-header, you can specify a single host name (for + // example, my.example.com). A host name is case insensitive, can be up to 128 + // characters in length, and can contain any of the following characters. Note + // that you can include up to three wildcard characters. + // + // * A-Z, a-z, 0-9 + // + // * - . + // + // * * (matches 0 or more characters) + // + // * ? (matches exactly 1 character) + // + // If the field name is path-pattern, you can specify a single path pattern + // (for example, /img/*). A path pattern is case sensitive, can be up to 128 + // characters in length, and can contain any of the following characters. Note + // that you can include up to three wildcard characters. + // + // * A-Z, a-z, 0-9 + // + // * _ - . $ / ~ " ' @ : + + // + // * & (using &) + // + // * * (matches 0 or more characters) + // + // * ? (matches exactly 1 character) + Values []*string `type:"list"` +} + +// String returns the string representation +func (s RuleCondition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RuleCondition) GoString() string { + return s.String() +} + +// SetField sets the Field field's value. +func (s *RuleCondition) SetField(v string) *RuleCondition { + s.Field = &v + return s +} + +// SetValues sets the Values field's value. +func (s *RuleCondition) SetValues(v []*string) *RuleCondition { + s.Values = v + return s +} + +// Information about the priorities for the rules for a listener. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/RulePriorityPair +type RulePriorityPair struct { + _ struct{} `type:"structure"` + + // The rule priority. + Priority *int64 `min:"1" type:"integer"` + + // The Amazon Resource Name (ARN) of the rule. + RuleArn *string `type:"string"` +} + +// String returns the string representation +func (s RulePriorityPair) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RulePriorityPair) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RulePriorityPair) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RulePriorityPair"} + if s.Priority != nil && *s.Priority < 1 { + invalidParams.Add(request.NewErrParamMinValue("Priority", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPriority sets the Priority field's value. +func (s *RulePriorityPair) SetPriority(v int64) *RulePriorityPair { + s.Priority = &v + return s +} + +// SetRuleArn sets the RuleArn field's value. +func (s *RulePriorityPair) SetRuleArn(v string) *RulePriorityPair { + s.RuleArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/SetIpAddressTypeInput +type SetIpAddressTypeInput struct { + _ struct{} `type:"structure"` + + // The IP address type. The possible values are ipv4 (for IPv4 addresses) and + // dualstack (for IPv4 and IPv6 addresses). Internal load balancers must use + // ipv4. + // + // IpAddressType is a required field + IpAddressType *string `type:"string" required:"true" enum:"IpAddressType"` + + // The Amazon Resource Name (ARN) of the load balancer. + // + // LoadBalancerArn is a required field + LoadBalancerArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetIpAddressTypeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIpAddressTypeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetIpAddressTypeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetIpAddressTypeInput"} + if s.IpAddressType == nil { + invalidParams.Add(request.NewErrParamRequired("IpAddressType")) + } + if s.LoadBalancerArn == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIpAddressType sets the IpAddressType field's value. +func (s *SetIpAddressTypeInput) SetIpAddressType(v string) *SetIpAddressTypeInput { + s.IpAddressType = &v + return s +} + +// SetLoadBalancerArn sets the LoadBalancerArn field's value. +func (s *SetIpAddressTypeInput) SetLoadBalancerArn(v string) *SetIpAddressTypeInput { + s.LoadBalancerArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/SetIpAddressTypeOutput +type SetIpAddressTypeOutput struct { + _ struct{} `type:"structure"` + + // The IP address type. + IpAddressType *string `type:"string" enum:"IpAddressType"` +} + +// String returns the string representation +func (s SetIpAddressTypeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIpAddressTypeOutput) GoString() string { + return s.String() +} + +// SetIpAddressType sets the IpAddressType field's value. +func (s *SetIpAddressTypeOutput) SetIpAddressType(v string) *SetIpAddressTypeOutput { + s.IpAddressType = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/SetRulePrioritiesInput +type SetRulePrioritiesInput struct { + _ struct{} `type:"structure"` + + // The rule priorities. + // + // RulePriorities is a required field + RulePriorities []*RulePriorityPair `type:"list" required:"true"` +} + +// String returns the string representation +func (s SetRulePrioritiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetRulePrioritiesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetRulePrioritiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetRulePrioritiesInput"} + if s.RulePriorities == nil { + invalidParams.Add(request.NewErrParamRequired("RulePriorities")) + } + if s.RulePriorities != nil { + for i, v := range s.RulePriorities { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RulePriorities", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRulePriorities sets the RulePriorities field's value. +func (s *SetRulePrioritiesInput) SetRulePriorities(v []*RulePriorityPair) *SetRulePrioritiesInput { + s.RulePriorities = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/SetRulePrioritiesOutput +type SetRulePrioritiesOutput struct { + _ struct{} `type:"structure"` + + // Information about the rules. + Rules []*Rule `type:"list"` +} + +// String returns the string representation +func (s SetRulePrioritiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetRulePrioritiesOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *SetRulePrioritiesOutput) SetRules(v []*Rule) *SetRulePrioritiesOutput { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/SetSecurityGroupsInput +type SetSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the load balancer. + // + // LoadBalancerArn is a required field + LoadBalancerArn *string `type:"string" required:"true"` + + // The IDs of the security groups. + // + // SecurityGroups is a required field + SecurityGroups []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s SetSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetSecurityGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetSecurityGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetSecurityGroupsInput"} + if s.LoadBalancerArn == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerArn")) + } + if s.SecurityGroups == nil { + invalidParams.Add(request.NewErrParamRequired("SecurityGroups")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoadBalancerArn sets the LoadBalancerArn field's value. +func (s *SetSecurityGroupsInput) SetLoadBalancerArn(v string) *SetSecurityGroupsInput { + s.LoadBalancerArn = &v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *SetSecurityGroupsInput) SetSecurityGroups(v []*string) *SetSecurityGroupsInput { + s.SecurityGroups = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/SetSecurityGroupsOutput +type SetSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the security groups associated with the load balancer. + SecurityGroupIds []*string `type:"list"` +} + +// String returns the string representation +func (s SetSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetSecurityGroupsOutput) GoString() string { + return s.String() +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *SetSecurityGroupsOutput) SetSecurityGroupIds(v []*string) *SetSecurityGroupsOutput { + s.SecurityGroupIds = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/SetSubnetsInput +type SetSubnetsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the load balancer. + // + // LoadBalancerArn is a required field + LoadBalancerArn *string `type:"string" required:"true"` + + // The IDs of the subnets. You must specify subnets from at least two Availability + // Zones. You can specify only one subnet per Availability Zone. You must specify + // either subnets or subnet mappings. + // + // The load balancer is allocated one static IP address per subnet. You cannot + // specify your own Elastic IP addresses. + SubnetMappings []*SubnetMapping `type:"list"` + + // The IDs of the subnets. You must specify subnets from at least two Availability + // Zones. You can specify only one subnet per Availability Zone. You must specify + // either subnets or subnet mappings. + // + // Subnets is a required field + Subnets []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s SetSubnetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetSubnetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetSubnetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetSubnetsInput"} + if s.LoadBalancerArn == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerArn")) + } + if s.Subnets == nil { + invalidParams.Add(request.NewErrParamRequired("Subnets")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoadBalancerArn sets the LoadBalancerArn field's value. +func (s *SetSubnetsInput) SetLoadBalancerArn(v string) *SetSubnetsInput { + s.LoadBalancerArn = &v + return s +} + +// SetSubnetMappings sets the SubnetMappings field's value. +func (s *SetSubnetsInput) SetSubnetMappings(v []*SubnetMapping) *SetSubnetsInput { + s.SubnetMappings = v + return s +} + +// SetSubnets sets the Subnets field's value. +func (s *SetSubnetsInput) SetSubnets(v []*string) *SetSubnetsInput { + s.Subnets = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/SetSubnetsOutput +type SetSubnetsOutput struct { + _ struct{} `type:"structure"` + + // Information about the subnet and Availability Zone. + AvailabilityZones []*AvailabilityZone `type:"list"` +} + +// String returns the string representation +func (s SetSubnetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetSubnetsOutput) GoString() string { + return s.String() +} + +// SetAvailabilityZones sets the AvailabilityZones field's value. +func (s *SetSubnetsOutput) SetAvailabilityZones(v []*AvailabilityZone) *SetSubnetsOutput { + s.AvailabilityZones = v + return s +} + +// Information about a policy used for SSL negotiation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/SslPolicy +type SslPolicy struct { + _ struct{} `type:"structure"` + + // The ciphers. + Ciphers []*Cipher `type:"list"` + + // The name of the policy. + Name *string `type:"string"` + + // The protocols. + SslProtocols []*string `type:"list"` +} + +// String returns the string representation +func (s SslPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SslPolicy) GoString() string { + return s.String() +} + +// SetCiphers sets the Ciphers field's value. +func (s *SslPolicy) SetCiphers(v []*Cipher) *SslPolicy { + s.Ciphers = v + return s +} + +// SetName sets the Name field's value. +func (s *SslPolicy) SetName(v string) *SslPolicy { + s.Name = &v + return s +} + +// SetSslProtocols sets the SslProtocols field's value. +func (s *SslPolicy) SetSslProtocols(v []*string) *SslPolicy { + s.SslProtocols = v + return s +} + +// Information about a subnet mapping. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/SubnetMapping +type SubnetMapping struct { + _ struct{} `type:"structure"` + + // [Network Load Balancers] The allocation ID of the Elastic IP address. + AllocationId *string `type:"string"` + + // The ID of the subnet. + SubnetId *string `type:"string"` +} + +// String returns the string representation +func (s SubnetMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubnetMapping) GoString() string { + return s.String() +} + +// SetAllocationId sets the AllocationId field's value. +func (s *SubnetMapping) SetAllocationId(v string) *SubnetMapping { + s.AllocationId = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *SubnetMapping) SetSubnetId(v string) *SubnetMapping { + s.SubnetId = &v + return s +} + +// Information about a tag. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/Tag +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the tag. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value of the tag. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +// The tags associated with a resource. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/TagDescription +type TagDescription struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + ResourceArn *string `type:"string"` + + // Information about the tags. + Tags []*Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s TagDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagDescription) GoString() string { + return s.String() +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagDescription) SetResourceArn(v string) *TagDescription { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagDescription) SetTags(v []*Tag) *TagDescription { + s.Tags = v + return s +} + +// Information about a target. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/TargetDescription +type TargetDescription struct { + _ struct{} `type:"structure"` + + // The Availability Zone where the IP address is to be registered. Specify all + // to register an IP address outside the target group VPC with all Availability + // Zones that are enabled for the load balancer. + // + // If the IP address is in a subnet of the VPC for the target group, the Availability + // Zone is automatically detected and this parameter is optional. + // + // This parameter is not supported if the target type of the target group is + // instance. + AvailabilityZone *string `type:"string"` + + // The ID of the target. If the target type of the target group is instance, + // specify an instance ID. If the target type is ip, specify an IP address. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // The port on which the target is listening. + Port *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s TargetDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetDescription) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetDescription) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetDescription"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *TargetDescription) SetAvailabilityZone(v string) *TargetDescription { + s.AvailabilityZone = &v + return s +} + +// SetId sets the Id field's value. +func (s *TargetDescription) SetId(v string) *TargetDescription { + s.Id = &v + return s +} + +// SetPort sets the Port field's value. +func (s *TargetDescription) SetPort(v int64) *TargetDescription { + s.Port = &v + return s +} + +// Information about a target group. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/TargetGroup +type TargetGroup struct { + _ struct{} `type:"structure"` + + // The approximate amount of time, in seconds, between health checks of an individual + // target. + HealthCheckIntervalSeconds *int64 `min:"5" type:"integer"` + + // The destination for the health check request. + HealthCheckPath *string `min:"1" type:"string"` + + // The port to use to connect with the target. + HealthCheckPort *string `type:"string"` + + // The protocol to use to connect with the target. + HealthCheckProtocol *string `type:"string" enum:"ProtocolEnum"` + + // The amount of time, in seconds, during which no response means a failed health + // check. + HealthCheckTimeoutSeconds *int64 `min:"2" type:"integer"` + + // The number of consecutive health checks successes required before considering + // an unhealthy target healthy. + HealthyThresholdCount *int64 `min:"2" type:"integer"` + + // The Amazon Resource Names (ARN) of the load balancers that route traffic + // to this target group. + LoadBalancerArns []*string `type:"list"` + + // The HTTP codes to use when checking for a successful response from a target. + Matcher *Matcher `type:"structure"` + + // The port on which the targets are listening. + Port *int64 `min:"1" type:"integer"` + + // The protocol to use for routing traffic to the targets. + Protocol *string `type:"string" enum:"ProtocolEnum"` + + // The Amazon Resource Name (ARN) of the target group. + TargetGroupArn *string `type:"string"` + + // The name of the target group. + TargetGroupName *string `type:"string"` + + // The type of target that you must specify when registering targets with this + // target group. The possible values are instance (targets are specified by + // instance ID) or ip (targets are specified by IP address). + TargetType *string `type:"string" enum:"TargetTypeEnum"` + + // The number of consecutive health check failures required before considering + // the target unhealthy. + UnhealthyThresholdCount *int64 `min:"2" type:"integer"` + + // The ID of the VPC for the targets. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s TargetGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetGroup) GoString() string { + return s.String() +} + +// SetHealthCheckIntervalSeconds sets the HealthCheckIntervalSeconds field's value. +func (s *TargetGroup) SetHealthCheckIntervalSeconds(v int64) *TargetGroup { + s.HealthCheckIntervalSeconds = &v + return s +} + +// SetHealthCheckPath sets the HealthCheckPath field's value. +func (s *TargetGroup) SetHealthCheckPath(v string) *TargetGroup { + s.HealthCheckPath = &v + return s +} + +// SetHealthCheckPort sets the HealthCheckPort field's value. +func (s *TargetGroup) SetHealthCheckPort(v string) *TargetGroup { + s.HealthCheckPort = &v + return s +} + +// SetHealthCheckProtocol sets the HealthCheckProtocol field's value. +func (s *TargetGroup) SetHealthCheckProtocol(v string) *TargetGroup { + s.HealthCheckProtocol = &v + return s +} + +// SetHealthCheckTimeoutSeconds sets the HealthCheckTimeoutSeconds field's value. +func (s *TargetGroup) SetHealthCheckTimeoutSeconds(v int64) *TargetGroup { + s.HealthCheckTimeoutSeconds = &v + return s +} + +// SetHealthyThresholdCount sets the HealthyThresholdCount field's value. +func (s *TargetGroup) SetHealthyThresholdCount(v int64) *TargetGroup { + s.HealthyThresholdCount = &v + return s +} + +// SetLoadBalancerArns sets the LoadBalancerArns field's value. +func (s *TargetGroup) SetLoadBalancerArns(v []*string) *TargetGroup { + s.LoadBalancerArns = v + return s +} + +// SetMatcher sets the Matcher field's value. +func (s *TargetGroup) SetMatcher(v *Matcher) *TargetGroup { + s.Matcher = v + return s +} + +// SetPort sets the Port field's value. +func (s *TargetGroup) SetPort(v int64) *TargetGroup { + s.Port = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *TargetGroup) SetProtocol(v string) *TargetGroup { + s.Protocol = &v + return s +} + +// SetTargetGroupArn sets the TargetGroupArn field's value. +func (s *TargetGroup) SetTargetGroupArn(v string) *TargetGroup { + s.TargetGroupArn = &v + return s +} + +// SetTargetGroupName sets the TargetGroupName field's value. +func (s *TargetGroup) SetTargetGroupName(v string) *TargetGroup { + s.TargetGroupName = &v + return s +} + +// SetTargetType sets the TargetType field's value. +func (s *TargetGroup) SetTargetType(v string) *TargetGroup { + s.TargetType = &v + return s +} + +// SetUnhealthyThresholdCount sets the UnhealthyThresholdCount field's value. +func (s *TargetGroup) SetUnhealthyThresholdCount(v int64) *TargetGroup { + s.UnhealthyThresholdCount = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *TargetGroup) SetVpcId(v string) *TargetGroup { + s.VpcId = &v + return s +} + +// Information about a target group attribute. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/TargetGroupAttribute +type TargetGroupAttribute struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + // + // * deregistration_delay.timeout_seconds - The amount time for Elastic Load + // Balancing to wait before changing the state of a deregistering target + // from draining to unused. The range is 0-3600 seconds. The default value + // is 300 seconds. + // + // * stickiness.enabled - [Application Load Balancers] Indicates whether + // sticky sessions are enabled. The value is true or false. + // + // * stickiness.type - [Application Load Balancers] The type of sticky sessions. + // The possible value is lb_cookie. + // + // * stickiness.lb_cookie.duration_seconds - [Application Load Balancers] + // The time period, in seconds, during which requests from a client should + // be routed to the same target. After this time period expires, the load + // balancer-generated cookie is considered stale. The range is 1 second to + // 1 week (604800 seconds). The default value is 1 day (86400 seconds). + Key *string `type:"string"` + + // The value of the attribute. + Value *string `type:"string"` +} + +// String returns the string representation +func (s TargetGroupAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetGroupAttribute) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *TargetGroupAttribute) SetKey(v string) *TargetGroupAttribute { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *TargetGroupAttribute) SetValue(v string) *TargetGroupAttribute { + s.Value = &v + return s +} + +// Information about the current health of a target. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/TargetHealth +type TargetHealth struct { + _ struct{} `type:"structure"` + + // A description of the target health that provides additional details. If the + // state is healthy, a description is not provided. + Description *string `type:"string"` + + // The reason code. If the target state is healthy, a reason code is not provided. + // + // If the target state is initial, the reason code can be one of the following + // values: + // + // * Elb.RegistrationInProgress - The target is in the process of being registered + // with the load balancer. + // + // * Elb.InitialHealthChecking - The load balancer is still sending the target + // the minimum number of health checks required to determine its health status. + // + // If the target state is unhealthy, the reason code can be one of the following + // values: + // + // * Target.ResponseCodeMismatch - The health checks did not return an expected + // HTTP code. + // + // * Target.Timeout - The health check requests timed out. + // + // * Target.FailedHealthChecks - The health checks failed because the connection + // to the target timed out, the target response was malformed, or the target + // failed the health check for an unknown reason. + // + // * Elb.InternalError - The health checks failed due to an internal error. + // + // If the target state is unused, the reason code can be one of the following + // values: + // + // * Target.NotRegistered - The target is not registered with the target + // group. + // + // * Target.NotInUse - The target group is not used by any load balancer + // or the target is in an Availability Zone that is not enabled for its load + // balancer. + // + // * Target.IpUnusable - The target IP address is reserved for use by a load + // balancer. + // + // * Target.InvalidState - The target is in the stopped or terminated state. + // + // If the target state is draining, the reason code can be the following value: + // + // * Target.DeregistrationInProgress - The target is in the process of being + // deregistered and the deregistration delay period has not expired. + Reason *string `type:"string" enum:"TargetHealthReasonEnum"` + + // The state of the target. + State *string `type:"string" enum:"TargetHealthStateEnum"` +} + +// String returns the string representation +func (s TargetHealth) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetHealth) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *TargetHealth) SetDescription(v string) *TargetHealth { + s.Description = &v + return s +} + +// SetReason sets the Reason field's value. +func (s *TargetHealth) SetReason(v string) *TargetHealth { + s.Reason = &v + return s +} + +// SetState sets the State field's value. +func (s *TargetHealth) SetState(v string) *TargetHealth { + s.State = &v + return s +} + +// Information about the health of a target. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/TargetHealthDescription +type TargetHealthDescription struct { + _ struct{} `type:"structure"` + + // The port to use to connect with the target. + HealthCheckPort *string `type:"string"` + + // The description of the target. + Target *TargetDescription `type:"structure"` + + // The health information for the target. + TargetHealth *TargetHealth `type:"structure"` +} + +// String returns the string representation +func (s TargetHealthDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetHealthDescription) GoString() string { + return s.String() +} + +// SetHealthCheckPort sets the HealthCheckPort field's value. +func (s *TargetHealthDescription) SetHealthCheckPort(v string) *TargetHealthDescription { + s.HealthCheckPort = &v + return s +} + +// SetTarget sets the Target field's value. +func (s *TargetHealthDescription) SetTarget(v *TargetDescription) *TargetHealthDescription { + s.Target = v + return s +} + +// SetTargetHealth sets the TargetHealth field's value. +func (s *TargetHealthDescription) SetTargetHealth(v *TargetHealth) *TargetHealthDescription { + s.TargetHealth = v + return s +} + +const ( + // ActionTypeEnumForward is a ActionTypeEnum enum value + ActionTypeEnumForward = "forward" +) + +const ( + // IpAddressTypeIpv4 is a IpAddressType enum value + IpAddressTypeIpv4 = "ipv4" + + // IpAddressTypeDualstack is a IpAddressType enum value + IpAddressTypeDualstack = "dualstack" +) + +const ( + // LoadBalancerSchemeEnumInternetFacing is a LoadBalancerSchemeEnum enum value + LoadBalancerSchemeEnumInternetFacing = "internet-facing" + + // LoadBalancerSchemeEnumInternal is a LoadBalancerSchemeEnum enum value + LoadBalancerSchemeEnumInternal = "internal" +) + +const ( + // LoadBalancerStateEnumActive is a LoadBalancerStateEnum enum value + LoadBalancerStateEnumActive = "active" + + // LoadBalancerStateEnumProvisioning is a LoadBalancerStateEnum enum value + LoadBalancerStateEnumProvisioning = "provisioning" + + // LoadBalancerStateEnumActiveImpaired is a LoadBalancerStateEnum enum value + LoadBalancerStateEnumActiveImpaired = "active_impaired" + + // LoadBalancerStateEnumFailed is a LoadBalancerStateEnum enum value + LoadBalancerStateEnumFailed = "failed" +) + +const ( + // LoadBalancerTypeEnumApplication is a LoadBalancerTypeEnum enum value + LoadBalancerTypeEnumApplication = "application" + + // LoadBalancerTypeEnumNetwork is a LoadBalancerTypeEnum enum value + LoadBalancerTypeEnumNetwork = "network" +) + +const ( + // ProtocolEnumHttp is a ProtocolEnum enum value + ProtocolEnumHttp = "HTTP" + + // ProtocolEnumHttps is a ProtocolEnum enum value + ProtocolEnumHttps = "HTTPS" + + // ProtocolEnumTcp is a ProtocolEnum enum value + ProtocolEnumTcp = "TCP" +) + +const ( + // TargetHealthReasonEnumElbRegistrationInProgress is a TargetHealthReasonEnum enum value + TargetHealthReasonEnumElbRegistrationInProgress = "Elb.RegistrationInProgress" + + // TargetHealthReasonEnumElbInitialHealthChecking is a TargetHealthReasonEnum enum value + TargetHealthReasonEnumElbInitialHealthChecking = "Elb.InitialHealthChecking" + + // TargetHealthReasonEnumTargetResponseCodeMismatch is a TargetHealthReasonEnum enum value + TargetHealthReasonEnumTargetResponseCodeMismatch = "Target.ResponseCodeMismatch" + + // TargetHealthReasonEnumTargetTimeout is a TargetHealthReasonEnum enum value + TargetHealthReasonEnumTargetTimeout = "Target.Timeout" + + // TargetHealthReasonEnumTargetFailedHealthChecks is a TargetHealthReasonEnum enum value + TargetHealthReasonEnumTargetFailedHealthChecks = "Target.FailedHealthChecks" + + // TargetHealthReasonEnumTargetNotRegistered is a TargetHealthReasonEnum enum value + TargetHealthReasonEnumTargetNotRegistered = "Target.NotRegistered" + + // TargetHealthReasonEnumTargetNotInUse is a TargetHealthReasonEnum enum value + TargetHealthReasonEnumTargetNotInUse = "Target.NotInUse" + + // TargetHealthReasonEnumTargetDeregistrationInProgress is a TargetHealthReasonEnum enum value + TargetHealthReasonEnumTargetDeregistrationInProgress = "Target.DeregistrationInProgress" + + // TargetHealthReasonEnumTargetInvalidState is a TargetHealthReasonEnum enum value + TargetHealthReasonEnumTargetInvalidState = "Target.InvalidState" + + // TargetHealthReasonEnumTargetIpUnusable is a TargetHealthReasonEnum enum value + TargetHealthReasonEnumTargetIpUnusable = "Target.IpUnusable" + + // TargetHealthReasonEnumElbInternalError is a TargetHealthReasonEnum enum value + TargetHealthReasonEnumElbInternalError = "Elb.InternalError" +) + +const ( + // TargetHealthStateEnumInitial is a TargetHealthStateEnum enum value + TargetHealthStateEnumInitial = "initial" + + // TargetHealthStateEnumHealthy is a TargetHealthStateEnum enum value + TargetHealthStateEnumHealthy = "healthy" + + // TargetHealthStateEnumUnhealthy is a TargetHealthStateEnum enum value + TargetHealthStateEnumUnhealthy = "unhealthy" + + // TargetHealthStateEnumUnused is a TargetHealthStateEnum enum value + TargetHealthStateEnumUnused = "unused" + + // TargetHealthStateEnumDraining is a TargetHealthStateEnum enum value + TargetHealthStateEnumDraining = "draining" + + // TargetHealthStateEnumUnavailable is a TargetHealthStateEnum enum value + TargetHealthStateEnumUnavailable = "unavailable" +) + +const ( + // TargetTypeEnumInstance is a TargetTypeEnum enum value + TargetTypeEnumInstance = "instance" + + // TargetTypeEnumIp is a TargetTypeEnum enum value + TargetTypeEnumIp = "ip" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elbv2/doc.go b/vendor/github.com/aws/aws-sdk-go/service/elbv2/doc.go new file mode 100644 index 000000000000..def1b39bf8ae --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elbv2/doc.go @@ -0,0 +1,76 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package elbv2 provides the client and types for making API +// requests to Elastic Load Balancing. +// +// A load balancer distributes incoming traffic across targets, such as your +// EC2 instances. This enables you to increase the availability of your application. +// The load balancer also monitors the health of its registered targets and +// ensures that it routes traffic only to healthy targets. You configure your +// load balancer to accept incoming traffic by specifying one or more listeners, +// which are configured with a protocol and port number for connections from +// clients to the load balancer. You configure a target group with a protocol +// and port number for connections from the load balancer to the targets, and +// with health check settings to be used when checking the health status of +// the targets. +// +// Elastic Load Balancing supports the following types of load balancers: Application +// Load Balancers, Network Load Balancers, and Classic Load Balancers. +// +// An Application Load Balancer makes routing and load balancing decisions at +// the application layer (HTTP/HTTPS). A Network Load Balancer makes routing +// and load balancing decisions at the transport layer (TCP). Both Application +// Load Balancers and Network Load Balancers can route requests to one or more +// ports on each EC2 instance or container instance in your virtual private +// cloud (VPC). +// +// A Classic Load Balancer makes routing and load balancing decisions either +// at the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS), and +// supports either EC2-Classic or a VPC. For more information, see the Elastic +// Load Balancing User Guide (http://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/). +// +// This reference covers the 2015-12-01 API, which supports Application Load +// Balancers and Network Load Balancers. The 2012-06-01 API supports Classic +// Load Balancers. +// +// To get started, complete the following tasks: +// +// Create a load balancer using CreateLoadBalancer. +// +// Create a target group using CreateTargetGroup. +// +// Register targets for the target group using RegisterTargets. +// +// Create one or more listeners for your load balancer using CreateListener. +// +// To delete a load balancer and its related resources, complete the following +// tasks: +// +// Delete the load balancer using DeleteLoadBalancer. +// +// Delete the target group using DeleteTargetGroup. +// +// All Elastic Load Balancing operations are idempotent, which means that they +// complete at most one time. If you repeat an operation, it succeeds. +// +// See https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01 for more information on this service. +// +// See elbv2 package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/elbv2/ +// +// Using the Client +// +// To Elastic Load Balancing with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Elastic Load Balancing client ELBV2 for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/elbv2/#New +package elbv2 diff --git a/vendor/github.com/aws/aws-sdk-go/service/elbv2/errors.go b/vendor/github.com/aws/aws-sdk-go/service/elbv2/errors.go new file mode 100644 index 000000000000..df475d81575b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elbv2/errors.go @@ -0,0 +1,207 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package elbv2 + +const ( + + // ErrCodeAllocationIdNotFoundException for service response error code + // "AllocationIdNotFound". + // + // The specified allocation ID does not exist. + ErrCodeAllocationIdNotFoundException = "AllocationIdNotFound" + + // ErrCodeAvailabilityZoneNotSupportedException for service response error code + // "AvailabilityZoneNotSupported". + // + // The specified Availability Zone is not supported. + ErrCodeAvailabilityZoneNotSupportedException = "AvailabilityZoneNotSupported" + + // ErrCodeCertificateNotFoundException for service response error code + // "CertificateNotFound". + // + // The specified certificate does not exist. + ErrCodeCertificateNotFoundException = "CertificateNotFound" + + // ErrCodeDuplicateListenerException for service response error code + // "DuplicateListener". + // + // A listener with the specified port already exists. + ErrCodeDuplicateListenerException = "DuplicateListener" + + // ErrCodeDuplicateLoadBalancerNameException for service response error code + // "DuplicateLoadBalancerName". + // + // A load balancer with the specified name already exists. + ErrCodeDuplicateLoadBalancerNameException = "DuplicateLoadBalancerName" + + // ErrCodeDuplicateTagKeysException for service response error code + // "DuplicateTagKeys". + // + // A tag key was specified more than once. + ErrCodeDuplicateTagKeysException = "DuplicateTagKeys" + + // ErrCodeDuplicateTargetGroupNameException for service response error code + // "DuplicateTargetGroupName". + // + // A target group with the specified name already exists. + ErrCodeDuplicateTargetGroupNameException = "DuplicateTargetGroupName" + + // ErrCodeHealthUnavailableException for service response error code + // "HealthUnavailable". + // + // The health of the specified targets could not be retrieved due to an internal + // error. + ErrCodeHealthUnavailableException = "HealthUnavailable" + + // ErrCodeIncompatibleProtocolsException for service response error code + // "IncompatibleProtocols". + // + // The specified configuration is not valid with this protocol. + ErrCodeIncompatibleProtocolsException = "IncompatibleProtocols" + + // ErrCodeInvalidConfigurationRequestException for service response error code + // "InvalidConfigurationRequest". + // + // The requested configuration is not valid. + ErrCodeInvalidConfigurationRequestException = "InvalidConfigurationRequest" + + // ErrCodeInvalidSchemeException for service response error code + // "InvalidScheme". + // + // The requested scheme is not valid. + ErrCodeInvalidSchemeException = "InvalidScheme" + + // ErrCodeInvalidSecurityGroupException for service response error code + // "InvalidSecurityGroup". + // + // The specified security group does not exist. + ErrCodeInvalidSecurityGroupException = "InvalidSecurityGroup" + + // ErrCodeInvalidSubnetException for service response error code + // "InvalidSubnet". + // + // The specified subnet is out of available addresses. + ErrCodeInvalidSubnetException = "InvalidSubnet" + + // ErrCodeInvalidTargetException for service response error code + // "InvalidTarget". + // + // The specified target does not exist or is not in the same VPC as the target + // group. + ErrCodeInvalidTargetException = "InvalidTarget" + + // ErrCodeListenerNotFoundException for service response error code + // "ListenerNotFound". + // + // The specified listener does not exist. + ErrCodeListenerNotFoundException = "ListenerNotFound" + + // ErrCodeLoadBalancerNotFoundException for service response error code + // "LoadBalancerNotFound". + // + // The specified load balancer does not exist. + ErrCodeLoadBalancerNotFoundException = "LoadBalancerNotFound" + + // ErrCodeOperationNotPermittedException for service response error code + // "OperationNotPermitted". + // + // This operation is not allowed. + ErrCodeOperationNotPermittedException = "OperationNotPermitted" + + // ErrCodePriorityInUseException for service response error code + // "PriorityInUse". + // + // The specified priority is in use. + ErrCodePriorityInUseException = "PriorityInUse" + + // ErrCodeResourceInUseException for service response error code + // "ResourceInUse". + // + // A specified resource is in use. + ErrCodeResourceInUseException = "ResourceInUse" + + // ErrCodeRuleNotFoundException for service response error code + // "RuleNotFound". + // + // The specified rule does not exist. + ErrCodeRuleNotFoundException = "RuleNotFound" + + // ErrCodeSSLPolicyNotFoundException for service response error code + // "SSLPolicyNotFound". + // + // The specified SSL policy does not exist. + ErrCodeSSLPolicyNotFoundException = "SSLPolicyNotFound" + + // ErrCodeSubnetNotFoundException for service response error code + // "SubnetNotFound". + // + // The specified subnet does not exist. + ErrCodeSubnetNotFoundException = "SubnetNotFound" + + // ErrCodeTargetGroupAssociationLimitException for service response error code + // "TargetGroupAssociationLimit". + // + // You've reached the limit on the number of load balancers per target group. + ErrCodeTargetGroupAssociationLimitException = "TargetGroupAssociationLimit" + + // ErrCodeTargetGroupNotFoundException for service response error code + // "TargetGroupNotFound". + // + // The specified target group does not exist. + ErrCodeTargetGroupNotFoundException = "TargetGroupNotFound" + + // ErrCodeTooManyCertificatesException for service response error code + // "TooManyCertificates". + // + // You've reached the limit on the number of certificates per listener. + ErrCodeTooManyCertificatesException = "TooManyCertificates" + + // ErrCodeTooManyListenersException for service response error code + // "TooManyListeners". + // + // You've reached the limit on the number of listeners per load balancer. + ErrCodeTooManyListenersException = "TooManyListeners" + + // ErrCodeTooManyLoadBalancersException for service response error code + // "TooManyLoadBalancers". + // + // You've reached the limit on the number of load balancers for your AWS account. + ErrCodeTooManyLoadBalancersException = "TooManyLoadBalancers" + + // ErrCodeTooManyRegistrationsForTargetIdException for service response error code + // "TooManyRegistrationsForTargetId". + // + // You've reached the limit on the number of times a target can be registered + // with a load balancer. + ErrCodeTooManyRegistrationsForTargetIdException = "TooManyRegistrationsForTargetId" + + // ErrCodeTooManyRulesException for service response error code + // "TooManyRules". + // + // You've reached the limit on the number of rules per load balancer. + ErrCodeTooManyRulesException = "TooManyRules" + + // ErrCodeTooManyTagsException for service response error code + // "TooManyTags". + // + // You've reached the limit on the number of tags per load balancer. + ErrCodeTooManyTagsException = "TooManyTags" + + // ErrCodeTooManyTargetGroupsException for service response error code + // "TooManyTargetGroups". + // + // You've reached the limit on the number of target groups for your AWS account. + ErrCodeTooManyTargetGroupsException = "TooManyTargetGroups" + + // ErrCodeTooManyTargetsException for service response error code + // "TooManyTargets". + // + // You've reached the limit on the number of targets. + ErrCodeTooManyTargetsException = "TooManyTargets" + + // ErrCodeUnsupportedProtocolException for service response error code + // "UnsupportedProtocol". + // + // The specified protocol is not supported. + ErrCodeUnsupportedProtocolException = "UnsupportedProtocol" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elbv2/service.go b/vendor/github.com/aws/aws-sdk-go/service/elbv2/service.go new file mode 100644 index 000000000000..c3733846cace --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elbv2/service.go @@ -0,0 +1,93 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package elbv2 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// ELBV2 provides the API operation methods for making requests to +// Elastic Load Balancing. See this package's package overview docs +// for details on the service. +// +// ELBV2 methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type ELBV2 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "elasticloadbalancing" // Service endpoint prefix API calls made to. + EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. +) + +// New creates a new instance of the ELBV2 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ELBV2 client from just a session. +// svc := elbv2.New(mySession) +// +// // Create a ELBV2 client with additional configuration +// svc := elbv2.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ELBV2 { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ELBV2 { + svc := &ELBV2{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2015-12-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ELBV2 operation and runs any +// custom request initialization. +func (c *ELBV2) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elbv2/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/elbv2/waiters.go new file mode 100644 index 000000000000..f08f1aaf2039 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elbv2/waiters.go @@ -0,0 +1,270 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package elbv2 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilLoadBalancerAvailable uses the Elastic Load Balancing v2 API operation +// DescribeLoadBalancers to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *ELBV2) WaitUntilLoadBalancerAvailable(input *DescribeLoadBalancersInput) error { + return c.WaitUntilLoadBalancerAvailableWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilLoadBalancerAvailableWithContext is an extended version of WaitUntilLoadBalancerAvailable. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) WaitUntilLoadBalancerAvailableWithContext(ctx aws.Context, input *DescribeLoadBalancersInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilLoadBalancerAvailable", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "LoadBalancers[].State.Code", + Expected: "active", + }, + { + State: request.RetryWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "LoadBalancers[].State.Code", + Expected: "provisioning", + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "LoadBalancerNotFound", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeLoadBalancersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLoadBalancersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilLoadBalancerExists uses the Elastic Load Balancing v2 API operation +// DescribeLoadBalancers to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *ELBV2) WaitUntilLoadBalancerExists(input *DescribeLoadBalancersInput) error { + return c.WaitUntilLoadBalancerExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilLoadBalancerExistsWithContext is an extended version of WaitUntilLoadBalancerExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) WaitUntilLoadBalancerExistsWithContext(ctx aws.Context, input *DescribeLoadBalancersInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilLoadBalancerExists", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "LoadBalancerNotFound", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeLoadBalancersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLoadBalancersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilLoadBalancersDeleted uses the Elastic Load Balancing v2 API operation +// DescribeLoadBalancers to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *ELBV2) WaitUntilLoadBalancersDeleted(input *DescribeLoadBalancersInput) error { + return c.WaitUntilLoadBalancersDeletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilLoadBalancersDeletedWithContext is an extended version of WaitUntilLoadBalancersDeleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) WaitUntilLoadBalancersDeletedWithContext(ctx aws.Context, input *DescribeLoadBalancersInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilLoadBalancersDeleted", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.RetryWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "LoadBalancers[].State.Code", + Expected: "active", + }, + { + State: request.SuccessWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "LoadBalancerNotFound", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeLoadBalancersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLoadBalancersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilTargetDeregistered uses the Elastic Load Balancing v2 API operation +// DescribeTargetHealth to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *ELBV2) WaitUntilTargetDeregistered(input *DescribeTargetHealthInput) error { + return c.WaitUntilTargetDeregisteredWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilTargetDeregisteredWithContext is an extended version of WaitUntilTargetDeregistered. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) WaitUntilTargetDeregisteredWithContext(ctx aws.Context, input *DescribeTargetHealthInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilTargetDeregistered", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "InvalidTarget", + }, + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "TargetHealthDescriptions[].TargetHealth.State", + Expected: "unused", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeTargetHealthInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTargetHealthRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilTargetInService uses the Elastic Load Balancing v2 API operation +// DescribeTargetHealth to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *ELBV2) WaitUntilTargetInService(input *DescribeTargetHealthInput) error { + return c.WaitUntilTargetInServiceWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilTargetInServiceWithContext is an extended version of WaitUntilTargetInService. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ELBV2) WaitUntilTargetInServiceWithContext(ctx aws.Context, input *DescribeTargetHealthInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilTargetInService", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(15 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "TargetHealthDescriptions[].TargetHealth.State", + Expected: "healthy", + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "InvalidInstance", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeTargetHealthInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTargetHealthRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go index 31c464d6c459..ac1c5fda8f84 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go @@ -1,11 +1,12 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. -// Package kms provides a client for AWS Key Management Service. package kms import ( + "fmt" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/protocol" @@ -16,19 +17,18 @@ const opCancelKeyDeletion = "CancelKeyDeletion" // CancelKeyDeletionRequest generates a "aws/request.Request" representing the // client's request for the CancelKeyDeletion operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See CancelKeyDeletion for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CancelKeyDeletion method directly -// instead. +// See CancelKeyDeletion for more information on using the CancelKeyDeletion +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CancelKeyDeletionRequest method. // req, resp := client.CancelKeyDeletionRequest(params) @@ -73,22 +73,22 @@ func (c *KMS) CancelKeyDeletionRequest(input *CancelKeyDeletionInput) (req *requ // API operation CancelKeyDeletion for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -99,27 +99,41 @@ func (c *KMS) CancelKeyDeletionRequest(input *CancelKeyDeletionInput) (req *requ // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CancelKeyDeletion func (c *KMS) CancelKeyDeletion(input *CancelKeyDeletionInput) (*CancelKeyDeletionOutput, error) { req, out := c.CancelKeyDeletionRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CancelKeyDeletionWithContext is the same as CancelKeyDeletion with the addition of +// the ability to pass a context and additional request options. +// +// See CancelKeyDeletion for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) CancelKeyDeletionWithContext(ctx aws.Context, input *CancelKeyDeletionInput, opts ...request.Option) (*CancelKeyDeletionOutput, error) { + req, out := c.CancelKeyDeletionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateAlias = "CreateAlias" // CreateAliasRequest generates a "aws/request.Request" representing the // client's request for the CreateAlias operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateAlias for usage and error information. +// See CreateAlias for more information on using the CreateAlias +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateAlias method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateAliasRequest method. // req, resp := client.CreateAliasRequest(params) @@ -171,31 +185,31 @@ func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, // API operation CreateAlias for usage and error information. // // Returned Error Codes: -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * AlreadyExistsException +// * ErrCodeAlreadyExistsException "AlreadyExistsException" // The request was rejected because it attempted to create a resource that already // exists. // -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * InvalidAliasNameException +// * ErrCodeInvalidAliasNameException "InvalidAliasNameException" // The request was rejected because the specified alias name is not valid. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * LimitExceededException +// * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because a limit was exceeded. For more information, // see Limits (http://docs.aws.amazon.com/kms/latest/developerguide/limits.html) // in the AWS Key Management Service Developer Guide. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -206,27 +220,41 @@ func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateAlias func (c *KMS) CreateAlias(input *CreateAliasInput) (*CreateAliasOutput, error) { req, out := c.CreateAliasRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateAliasWithContext is the same as CreateAlias with the addition of +// the ability to pass a context and additional request options. +// +// See CreateAlias for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) CreateAliasWithContext(ctx aws.Context, input *CreateAliasInput, opts ...request.Option) (*CreateAliasOutput, error) { + req, out := c.CreateAliasRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateGrant = "CreateGrant" // CreateGrantRequest generates a "aws/request.Request" representing the // client's request for the CreateGrant operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateGrant for usage and error information. +// See CreateGrant for more information on using the CreateGrant +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateGrant method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateGrantRequest method. // req, resp := client.CreateGrantRequest(params) @@ -269,33 +297,33 @@ func (c *KMS) CreateGrantRequest(input *CreateGrantInput) (req *request.Request, // API operation CreateGrant for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * DisabledException +// * ErrCodeDisabledException "DisabledException" // The request was rejected because the specified CMK is not enabled. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidGrantTokenException +// * ErrCodeInvalidGrantTokenException "InvalidGrantTokenException" // The request was rejected because the specified grant token is not valid. // -// * LimitExceededException +// * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because a limit was exceeded. For more information, // see Limits (http://docs.aws.amazon.com/kms/latest/developerguide/limits.html) // in the AWS Key Management Service Developer Guide. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -306,27 +334,41 @@ func (c *KMS) CreateGrantRequest(input *CreateGrantInput) (req *request.Request, // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateGrant func (c *KMS) CreateGrant(input *CreateGrantInput) (*CreateGrantOutput, error) { req, out := c.CreateGrantRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateGrantWithContext is the same as CreateGrant with the addition of +// the ability to pass a context and additional request options. +// +// See CreateGrant for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) CreateGrantWithContext(ctx aws.Context, input *CreateGrantInput, opts ...request.Option) (*CreateGrantOutput, error) { + req, out := c.CreateGrantRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateKey = "CreateKey" // CreateKeyRequest generates a "aws/request.Request" representing the // client's request for the CreateKey operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See CreateKey for usage and error information. +// See CreateKey for more information on using the CreateKey +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateKey method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateKeyRequest method. // req, resp := client.CreateKeyRequest(params) @@ -375,54 +417,71 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out // API operation CreateKey for usage and error information. // // Returned Error Codes: -// * MalformedPolicyDocumentException +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocumentException" // The request was rejected because the specified policy is not syntactically // or semantically correct. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * UnsupportedOperationException +// * ErrCodeUnsupportedOperationException "UnsupportedOperationException" // The request was rejected because a specified parameter is not supported or // a specified resource is not valid for this operation. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * LimitExceededException +// * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because a limit was exceeded. For more information, // see Limits (http://docs.aws.amazon.com/kms/latest/developerguide/limits.html) // in the AWS Key Management Service Developer Guide. // +// * ErrCodeTagException "TagException" +// The request was rejected because one or more tags are not valid. +// // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateKey func (c *KMS) CreateKey(input *CreateKeyInput) (*CreateKeyOutput, error) { req, out := c.CreateKeyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateKeyWithContext is the same as CreateKey with the addition of +// the ability to pass a context and additional request options. +// +// See CreateKey for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) CreateKeyWithContext(ctx aws.Context, input *CreateKeyInput, opts ...request.Option) (*CreateKeyOutput, error) { + req, out := c.CreateKeyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDecrypt = "Decrypt" // DecryptRequest generates a "aws/request.Request" representing the // client's request for the Decrypt operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See Decrypt for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the Decrypt method directly -// instead. +// See Decrypt for more information on using the Decrypt +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DecryptRequest method. // req, resp := client.DecryptRequest(params) @@ -477,33 +536,33 @@ func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output // API operation Decrypt for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * DisabledException +// * ErrCodeDisabledException "DisabledException" // The request was rejected because the specified CMK is not enabled. // -// * InvalidCiphertextException +// * ErrCodeInvalidCiphertextException "InvalidCiphertextException" // The request was rejected because the specified ciphertext has been corrupted // or is otherwise invalid. // -// * KeyUnavailableException +// * ErrCodeKeyUnavailableException "KeyUnavailableException" // The request was rejected because the specified CMK was not available. The // request can be retried. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InvalidGrantTokenException +// * ErrCodeInvalidGrantTokenException "InvalidGrantTokenException" // The request was rejected because the specified grant token is not valid. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -514,27 +573,41 @@ func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Decrypt func (c *KMS) Decrypt(input *DecryptInput) (*DecryptOutput, error) { req, out := c.DecryptRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DecryptWithContext is the same as Decrypt with the addition of +// the ability to pass a context and additional request options. +// +// See Decrypt for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) DecryptWithContext(ctx aws.Context, input *DecryptInput, opts ...request.Option) (*DecryptOutput, error) { + req, out := c.DecryptRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteAlias = "DeleteAlias" // DeleteAliasRequest generates a "aws/request.Request" representing the // client's request for the DeleteAlias operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteAlias for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteAlias method directly -// instead. +// See DeleteAlias for more information on using the DeleteAlias +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteAliasRequest method. // req, resp := client.DeleteAliasRequest(params) @@ -575,19 +648,19 @@ func (c *KMS) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request, // API operation DeleteAlias for usage and error information. // // Returned Error Codes: -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -598,27 +671,41 @@ func (c *KMS) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request, // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteAlias func (c *KMS) DeleteAlias(input *DeleteAliasInput) (*DeleteAliasOutput, error) { req, out := c.DeleteAliasRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteAliasWithContext is the same as DeleteAlias with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteAlias for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) DeleteAliasWithContext(ctx aws.Context, input *DeleteAliasInput, opts ...request.Option) (*DeleteAliasOutput, error) { + req, out := c.DeleteAliasRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteImportedKeyMaterial = "DeleteImportedKeyMaterial" // DeleteImportedKeyMaterialRequest generates a "aws/request.Request" representing the // client's request for the DeleteImportedKeyMaterial operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DeleteImportedKeyMaterial for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteImportedKeyMaterial method directly -// instead. +// See DeleteImportedKeyMaterial for more information on using the DeleteImportedKeyMaterial +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteImportedKeyMaterialRequest method. // req, resp := client.DeleteImportedKeyMaterialRequest(params) @@ -668,26 +755,26 @@ func (c *KMS) DeleteImportedKeyMaterialRequest(input *DeleteImportedKeyMaterialI // API operation DeleteImportedKeyMaterial for usage and error information. // // Returned Error Codes: -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * UnsupportedOperationException +// * ErrCodeUnsupportedOperationException "UnsupportedOperationException" // The request was rejected because a specified parameter is not supported or // a specified resource is not valid for this operation. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -698,27 +785,41 @@ func (c *KMS) DeleteImportedKeyMaterialRequest(input *DeleteImportedKeyMaterialI // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteImportedKeyMaterial func (c *KMS) DeleteImportedKeyMaterial(input *DeleteImportedKeyMaterialInput) (*DeleteImportedKeyMaterialOutput, error) { req, out := c.DeleteImportedKeyMaterialRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteImportedKeyMaterialWithContext is the same as DeleteImportedKeyMaterial with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteImportedKeyMaterial for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) DeleteImportedKeyMaterialWithContext(ctx aws.Context, input *DeleteImportedKeyMaterialInput, opts ...request.Option) (*DeleteImportedKeyMaterialOutput, error) { + req, out := c.DeleteImportedKeyMaterialRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDescribeKey = "DescribeKey" // DescribeKeyRequest generates a "aws/request.Request" representing the // client's request for the DescribeKey operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DescribeKey for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DescribeKey method directly -// instead. +// See DescribeKey for more information on using the DescribeKey +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DescribeKeyRequest method. // req, resp := client.DescribeKeyRequest(params) @@ -757,45 +858,59 @@ func (c *KMS) DescribeKeyRequest(input *DescribeKeyInput) (req *request.Request, // API operation DescribeKey for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DescribeKey func (c *KMS) DescribeKey(input *DescribeKeyInput) (*DescribeKeyOutput, error) { req, out := c.DescribeKeyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DescribeKeyWithContext is the same as DescribeKey with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeKey for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) DescribeKeyWithContext(ctx aws.Context, input *DescribeKeyInput, opts ...request.Option) (*DescribeKeyOutput, error) { + req, out := c.DescribeKeyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDisableKey = "DisableKey" // DisableKeyRequest generates a "aws/request.Request" representing the // client's request for the DisableKey operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DisableKey for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DisableKey method directly -// instead. +// See DisableKey for more information on using the DisableKey +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DisableKeyRequest method. // req, resp := client.DisableKeyRequest(params) @@ -840,22 +955,22 @@ func (c *KMS) DisableKeyRequest(input *DisableKeyInput) (req *request.Request, o // API operation DisableKey for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -866,27 +981,41 @@ func (c *KMS) DisableKeyRequest(input *DisableKeyInput) (req *request.Request, o // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisableKey func (c *KMS) DisableKey(input *DisableKeyInput) (*DisableKeyOutput, error) { req, out := c.DisableKeyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DisableKeyWithContext is the same as DisableKey with the addition of +// the ability to pass a context and additional request options. +// +// See DisableKey for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) DisableKeyWithContext(ctx aws.Context, input *DisableKeyInput, opts ...request.Option) (*DisableKeyOutput, error) { + req, out := c.DisableKeyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDisableKeyRotation = "DisableKeyRotation" // DisableKeyRotationRequest generates a "aws/request.Request" representing the // client's request for the DisableKeyRotation operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DisableKeyRotation for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DisableKeyRotation method directly -// instead. +// See DisableKeyRotation for more information on using the DisableKeyRotation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DisableKeyRotationRequest method. // req, resp := client.DisableKeyRotationRequest(params) @@ -927,25 +1056,25 @@ func (c *KMS) DisableKeyRotationRequest(input *DisableKeyRotationInput) (req *re // API operation DisableKeyRotation for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * DisabledException +// * ErrCodeDisabledException "DisabledException" // The request was rejected because the specified CMK is not enabled. // -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -953,34 +1082,48 @@ func (c *KMS) DisableKeyRotationRequest(input *DisableKeyRotationInput) (req *re // Key State Affects Use of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide. // -// * UnsupportedOperationException +// * ErrCodeUnsupportedOperationException "UnsupportedOperationException" // The request was rejected because a specified parameter is not supported or // a specified resource is not valid for this operation. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisableKeyRotation func (c *KMS) DisableKeyRotation(input *DisableKeyRotationInput) (*DisableKeyRotationOutput, error) { req, out := c.DisableKeyRotationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DisableKeyRotationWithContext is the same as DisableKeyRotation with the addition of +// the ability to pass a context and additional request options. +// +// See DisableKeyRotation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) DisableKeyRotationWithContext(ctx aws.Context, input *DisableKeyRotationInput, opts ...request.Option) (*DisableKeyRotationOutput, error) { + req, out := c.DisableKeyRotationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opEnableKey = "EnableKey" // EnableKeyRequest generates a "aws/request.Request" representing the // client's request for the EnableKey operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See EnableKey for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the EnableKey method directly -// instead. +// See EnableKey for more information on using the EnableKey +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the EnableKeyRequest method. // req, resp := client.EnableKeyRequest(params) @@ -1021,27 +1164,27 @@ func (c *KMS) EnableKeyRequest(input *EnableKeyInput) (req *request.Request, out // API operation EnableKey for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * LimitExceededException +// * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because a limit was exceeded. For more information, // see Limits (http://docs.aws.amazon.com/kms/latest/developerguide/limits.html) // in the AWS Key Management Service Developer Guide. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -1052,27 +1195,41 @@ func (c *KMS) EnableKeyRequest(input *EnableKeyInput) (req *request.Request, out // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/EnableKey func (c *KMS) EnableKey(input *EnableKeyInput) (*EnableKeyOutput, error) { req, out := c.EnableKeyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// EnableKeyWithContext is the same as EnableKey with the addition of +// the ability to pass a context and additional request options. +// +// See EnableKey for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) EnableKeyWithContext(ctx aws.Context, input *EnableKeyInput, opts ...request.Option) (*EnableKeyOutput, error) { + req, out := c.EnableKeyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opEnableKeyRotation = "EnableKeyRotation" // EnableKeyRotationRequest generates a "aws/request.Request" representing the // client's request for the EnableKeyRotation operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See EnableKeyRotation for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the EnableKeyRotation method directly -// instead. +// See EnableKeyRotation for more information on using the EnableKeyRotation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the EnableKeyRotationRequest method. // req, resp := client.EnableKeyRotationRequest(params) @@ -1113,25 +1270,25 @@ func (c *KMS) EnableKeyRotationRequest(input *EnableKeyRotationInput) (req *requ // API operation EnableKeyRotation for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * DisabledException +// * ErrCodeDisabledException "DisabledException" // The request was rejected because the specified CMK is not enabled. // -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -1139,34 +1296,48 @@ func (c *KMS) EnableKeyRotationRequest(input *EnableKeyRotationInput) (req *requ // Key State Affects Use of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide. // -// * UnsupportedOperationException +// * ErrCodeUnsupportedOperationException "UnsupportedOperationException" // The request was rejected because a specified parameter is not supported or // a specified resource is not valid for this operation. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/EnableKeyRotation func (c *KMS) EnableKeyRotation(input *EnableKeyRotationInput) (*EnableKeyRotationOutput, error) { req, out := c.EnableKeyRotationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// EnableKeyRotationWithContext is the same as EnableKeyRotation with the addition of +// the ability to pass a context and additional request options. +// +// See EnableKeyRotation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) EnableKeyRotationWithContext(ctx aws.Context, input *EnableKeyRotationInput, opts ...request.Option) (*EnableKeyRotationOutput, error) { + req, out := c.EnableKeyRotationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opEncrypt = "Encrypt" // EncryptRequest generates a "aws/request.Request" representing the // client's request for the Encrypt operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See Encrypt for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the Encrypt method directly -// instead. +// See Encrypt for more information on using the Encrypt +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the EncryptRequest method. // req, resp := client.EncryptRequest(params) @@ -1224,32 +1395,32 @@ func (c *KMS) EncryptRequest(input *EncryptInput) (req *request.Request, output // API operation Encrypt for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * DisabledException +// * ErrCodeDisabledException "DisabledException" // The request was rejected because the specified CMK is not enabled. // -// * KeyUnavailableException +// * ErrCodeKeyUnavailableException "KeyUnavailableException" // The request was rejected because the specified CMK was not available. The // request can be retried. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InvalidKeyUsageException +// * ErrCodeInvalidKeyUsageException "InvalidKeyUsageException" // The request was rejected because the specified KeySpec value is not valid. // -// * InvalidGrantTokenException +// * ErrCodeInvalidGrantTokenException "InvalidGrantTokenException" // The request was rejected because the specified grant token is not valid. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -1260,27 +1431,41 @@ func (c *KMS) EncryptRequest(input *EncryptInput) (req *request.Request, output // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Encrypt func (c *KMS) Encrypt(input *EncryptInput) (*EncryptOutput, error) { req, out := c.EncryptRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// EncryptWithContext is the same as Encrypt with the addition of +// the ability to pass a context and additional request options. +// +// See Encrypt for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) EncryptWithContext(ctx aws.Context, input *EncryptInput, opts ...request.Option) (*EncryptOutput, error) { + req, out := c.EncryptRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGenerateDataKey = "GenerateDataKey" // GenerateDataKeyRequest generates a "aws/request.Request" representing the // client's request for the GenerateDataKey operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GenerateDataKey for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GenerateDataKey method directly -// instead. +// See GenerateDataKey for more information on using the GenerateDataKey +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GenerateDataKeyRequest method. // req, resp := client.GenerateDataKeyRequest(params) @@ -1344,7 +1529,7 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request. // data key from memory. // // To return only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. -// To return an arbitrary unpredictable byte string, use GenerateRandom. +// To return a random byte string that is cryptographically secure, use GenerateRandom. // // If you use the optional EncryptionContext field, you must store at least // enough information to be able to reconstruct the full encryption context @@ -1362,32 +1547,32 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request. // API operation GenerateDataKey for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * DisabledException +// * ErrCodeDisabledException "DisabledException" // The request was rejected because the specified CMK is not enabled. // -// * KeyUnavailableException +// * ErrCodeKeyUnavailableException "KeyUnavailableException" // The request was rejected because the specified CMK was not available. The // request can be retried. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InvalidKeyUsageException +// * ErrCodeInvalidKeyUsageException "InvalidKeyUsageException" // The request was rejected because the specified KeySpec value is not valid. // -// * InvalidGrantTokenException +// * ErrCodeInvalidGrantTokenException "InvalidGrantTokenException" // The request was rejected because the specified grant token is not valid. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -1398,27 +1583,41 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request. // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKey func (c *KMS) GenerateDataKey(input *GenerateDataKeyInput) (*GenerateDataKeyOutput, error) { req, out := c.GenerateDataKeyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GenerateDataKeyWithContext is the same as GenerateDataKey with the addition of +// the ability to pass a context and additional request options. +// +// See GenerateDataKey for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) GenerateDataKeyWithContext(ctx aws.Context, input *GenerateDataKeyInput, opts ...request.Option) (*GenerateDataKeyOutput, error) { + req, out := c.GenerateDataKeyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGenerateDataKeyWithoutPlaintext = "GenerateDataKeyWithoutPlaintext" // GenerateDataKeyWithoutPlaintextRequest generates a "aws/request.Request" representing the // client's request for the GenerateDataKeyWithoutPlaintext operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GenerateDataKeyWithoutPlaintext for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GenerateDataKeyWithoutPlaintext method directly -// instead. +// See GenerateDataKeyWithoutPlaintext for more information on using the GenerateDataKeyWithoutPlaintext +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GenerateDataKeyWithoutPlaintextRequest method. // req, resp := client.GenerateDataKeyWithoutPlaintextRequest(params) @@ -1471,32 +1670,32 @@ func (c *KMS) GenerateDataKeyWithoutPlaintextRequest(input *GenerateDataKeyWitho // API operation GenerateDataKeyWithoutPlaintext for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * DisabledException +// * ErrCodeDisabledException "DisabledException" // The request was rejected because the specified CMK is not enabled. // -// * KeyUnavailableException +// * ErrCodeKeyUnavailableException "KeyUnavailableException" // The request was rejected because the specified CMK was not available. The // request can be retried. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InvalidKeyUsageException +// * ErrCodeInvalidKeyUsageException "InvalidKeyUsageException" // The request was rejected because the specified KeySpec value is not valid. // -// * InvalidGrantTokenException +// * ErrCodeInvalidGrantTokenException "InvalidGrantTokenException" // The request was rejected because the specified grant token is not valid. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -1507,27 +1706,41 @@ func (c *KMS) GenerateDataKeyWithoutPlaintextRequest(input *GenerateDataKeyWitho // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyWithoutPlaintext func (c *KMS) GenerateDataKeyWithoutPlaintext(input *GenerateDataKeyWithoutPlaintextInput) (*GenerateDataKeyWithoutPlaintextOutput, error) { req, out := c.GenerateDataKeyWithoutPlaintextRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GenerateDataKeyWithoutPlaintextWithContext is the same as GenerateDataKeyWithoutPlaintext with the addition of +// the ability to pass a context and additional request options. +// +// See GenerateDataKeyWithoutPlaintext for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) GenerateDataKeyWithoutPlaintextWithContext(ctx aws.Context, input *GenerateDataKeyWithoutPlaintextInput, opts ...request.Option) (*GenerateDataKeyWithoutPlaintextOutput, error) { + req, out := c.GenerateDataKeyWithoutPlaintextRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGenerateRandom = "GenerateRandom" // GenerateRandomRequest generates a "aws/request.Request" representing the // client's request for the GenerateRandom operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See GenerateRandom for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GenerateRandom method directly -// instead. +// See GenerateRandom for more information on using the GenerateRandom +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GenerateRandomRequest method. // req, resp := client.GenerateRandomRequest(params) @@ -1556,7 +1769,11 @@ func (c *KMS) GenerateRandomRequest(input *GenerateRandomInput) (req *request.Re // GenerateRandom API operation for AWS Key Management Service. // -// Generates an unpredictable byte string. +// Returns a random byte string that is cryptographically secure. +// +// For more information about entropy and random number generation, see the +// AWS Key Management Service Cryptographic Details (https://d0.awsstatic.com/whitepapers/KMS-Cryptographic-Details.pdf) +// whitepaper. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1566,38 +1783,52 @@ func (c *KMS) GenerateRandomRequest(input *GenerateRandomInput) (req *request.Re // API operation GenerateRandom for usage and error information. // // Returned Error Codes: -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateRandom func (c *KMS) GenerateRandom(input *GenerateRandomInput) (*GenerateRandomOutput, error) { req, out := c.GenerateRandomRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GenerateRandomWithContext is the same as GenerateRandom with the addition of +// the ability to pass a context and additional request options. +// +// See GenerateRandom for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) GenerateRandomWithContext(ctx aws.Context, input *GenerateRandomInput, opts ...request.Option) (*GenerateRandomOutput, error) { + req, out := c.GenerateRandomRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetKeyPolicy = "GetKeyPolicy" // GetKeyPolicyRequest generates a "aws/request.Request" representing the // client's request for the GetKeyPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetKeyPolicy for usage and error information. +// See GetKeyPolicy for more information on using the GetKeyPolicy +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetKeyPolicy method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetKeyPolicyRequest method. // req, resp := client.GetKeyPolicyRequest(params) @@ -1636,22 +1867,22 @@ func (c *KMS) GetKeyPolicyRequest(input *GetKeyPolicyInput) (req *request.Reques // API operation GetKeyPolicy for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -1662,27 +1893,41 @@ func (c *KMS) GetKeyPolicyRequest(input *GetKeyPolicyInput) (req *request.Reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetKeyPolicy func (c *KMS) GetKeyPolicy(input *GetKeyPolicyInput) (*GetKeyPolicyOutput, error) { req, out := c.GetKeyPolicyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetKeyPolicyWithContext is the same as GetKeyPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetKeyPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) GetKeyPolicyWithContext(ctx aws.Context, input *GetKeyPolicyInput, opts ...request.Option) (*GetKeyPolicyOutput, error) { + req, out := c.GetKeyPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetKeyRotationStatus = "GetKeyRotationStatus" // GetKeyRotationStatusRequest generates a "aws/request.Request" representing the // client's request for the GetKeyRotationStatus operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetKeyRotationStatus for usage and error information. +// See GetKeyRotationStatus for more information on using the GetKeyRotationStatus +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetKeyRotationStatus method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetKeyRotationStatusRequest method. // req, resp := client.GetKeyRotationStatusRequest(params) @@ -1722,22 +1967,22 @@ func (c *KMS) GetKeyRotationStatusRequest(input *GetKeyRotationStatusInput) (req // API operation GetKeyRotationStatus for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -1745,34 +1990,48 @@ func (c *KMS) GetKeyRotationStatusRequest(input *GetKeyRotationStatusInput) (req // Key State Affects Use of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide. // -// * UnsupportedOperationException +// * ErrCodeUnsupportedOperationException "UnsupportedOperationException" // The request was rejected because a specified parameter is not supported or // a specified resource is not valid for this operation. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetKeyRotationStatus func (c *KMS) GetKeyRotationStatus(input *GetKeyRotationStatusInput) (*GetKeyRotationStatusOutput, error) { req, out := c.GetKeyRotationStatusRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetKeyRotationStatusWithContext is the same as GetKeyRotationStatus with the addition of +// the ability to pass a context and additional request options. +// +// See GetKeyRotationStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) GetKeyRotationStatusWithContext(ctx aws.Context, input *GetKeyRotationStatusInput, opts ...request.Option) (*GetKeyRotationStatusOutput, error) { + req, out := c.GetKeyRotationStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetParametersForImport = "GetParametersForImport" // GetParametersForImportRequest generates a "aws/request.Request" representing the // client's request for the GetParametersForImport operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetParametersForImport for usage and error information. +// See GetParametersForImport for more information on using the GetParametersForImport +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetParametersForImport method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetParametersForImportRequest method. // req, resp := client.GetParametersForImportRequest(params) @@ -1826,26 +2085,26 @@ func (c *KMS) GetParametersForImportRequest(input *GetParametersForImportInput) // API operation GetParametersForImport for usage and error information. // // Returned Error Codes: -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * UnsupportedOperationException +// * ErrCodeUnsupportedOperationException "UnsupportedOperationException" // The request was rejected because a specified parameter is not supported or // a specified resource is not valid for this operation. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -1856,27 +2115,41 @@ func (c *KMS) GetParametersForImportRequest(input *GetParametersForImportInput) // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetParametersForImport func (c *KMS) GetParametersForImport(input *GetParametersForImportInput) (*GetParametersForImportOutput, error) { req, out := c.GetParametersForImportRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetParametersForImportWithContext is the same as GetParametersForImport with the addition of +// the ability to pass a context and additional request options. +// +// See GetParametersForImport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) GetParametersForImportWithContext(ctx aws.Context, input *GetParametersForImportInput, opts ...request.Option) (*GetParametersForImportOutput, error) { + req, out := c.GetParametersForImportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opImportKeyMaterial = "ImportKeyMaterial" // ImportKeyMaterialRequest generates a "aws/request.Request" representing the // client's request for the ImportKeyMaterial operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ImportKeyMaterial for usage and error information. +// See ImportKeyMaterial for more information on using the ImportKeyMaterial +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ImportKeyMaterial method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ImportKeyMaterialRequest method. // req, resp := client.ImportKeyMaterialRequest(params) @@ -1935,26 +2208,26 @@ func (c *KMS) ImportKeyMaterialRequest(input *ImportKeyMaterialInput) (req *requ // API operation ImportKeyMaterial for usage and error information. // // Returned Error Codes: -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * UnsupportedOperationException +// * ErrCodeUnsupportedOperationException "UnsupportedOperationException" // The request was rejected because a specified parameter is not supported or // a specified resource is not valid for this operation. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -1962,49 +2235,63 @@ func (c *KMS) ImportKeyMaterialRequest(input *ImportKeyMaterialInput) (req *requ // Key State Affects Use of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide. // -// * InvalidCiphertextException +// * ErrCodeInvalidCiphertextException "InvalidCiphertextException" // The request was rejected because the specified ciphertext has been corrupted // or is otherwise invalid. // -// * IncorrectKeyMaterialException +// * ErrCodeIncorrectKeyMaterialException "IncorrectKeyMaterialException" // The request was rejected because the provided key material is invalid or // is not the same key material that was previously imported into this customer // master key (CMK). // -// * ExpiredImportTokenException +// * ErrCodeExpiredImportTokenException "ExpiredImportTokenException" // The request was rejected because the provided import token is expired. Use // GetParametersForImport to retrieve a new import token and public key, use // the new public key to encrypt the key material, and then try the request // again. // -// * InvalidImportTokenException +// * ErrCodeInvalidImportTokenException "InvalidImportTokenException" // The request was rejected because the provided import token is invalid or // is associated with a different customer master key (CMK). // // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ImportKeyMaterial func (c *KMS) ImportKeyMaterial(input *ImportKeyMaterialInput) (*ImportKeyMaterialOutput, error) { req, out := c.ImportKeyMaterialRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ImportKeyMaterialWithContext is the same as ImportKeyMaterial with the addition of +// the ability to pass a context and additional request options. +// +// See ImportKeyMaterial for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) ImportKeyMaterialWithContext(ctx aws.Context, input *ImportKeyMaterialInput, opts ...request.Option) (*ImportKeyMaterialOutput, error) { + req, out := c.ImportKeyMaterialRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opListAliases = "ListAliases" // ListAliasesRequest generates a "aws/request.Request" representing the // client's request for the ListAliases operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ListAliases for usage and error information. +// See ListAliases for more information on using the ListAliases +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListAliases method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListAliasesRequest method. // req, resp := client.ListAliasesRequest(params) @@ -2049,23 +2336,38 @@ func (c *KMS) ListAliasesRequest(input *ListAliasesInput) (req *request.Request, // API operation ListAliases for usage and error information. // // Returned Error Codes: -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InvalidMarkerException +// * ErrCodeInvalidMarkerException "InvalidMarkerException" // The request was rejected because the marker that specifies where pagination // should next begin is not valid. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListAliases func (c *KMS) ListAliases(input *ListAliasesInput) (*ListAliasesOutput, error) { req, out := c.ListAliasesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListAliasesWithContext is the same as ListAliases with the addition of +// the ability to pass a context and additional request options. +// +// See ListAliases for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) ListAliasesWithContext(ctx aws.Context, input *ListAliasesInput, opts ...request.Option) (*ListAliasesOutput, error) { + req, out := c.ListAliasesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // ListAliasesPages iterates over the pages of a ListAliases operation, @@ -2085,31 +2387,55 @@ func (c *KMS) ListAliases(input *ListAliasesInput) (*ListAliasesOutput, error) { // return pageNum <= 3 // }) // -func (c *KMS) ListAliasesPages(input *ListAliasesInput, fn func(p *ListAliasesOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListAliasesRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListAliasesOutput), lastPage) - }) +func (c *KMS) ListAliasesPages(input *ListAliasesInput, fn func(*ListAliasesOutput, bool) bool) error { + return c.ListAliasesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAliasesPagesWithContext same as ListAliasesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) ListAliasesPagesWithContext(ctx aws.Context, input *ListAliasesInput, fn func(*ListAliasesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAliasesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAliasesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListAliasesOutput), !p.HasNextPage()) + } + return p.Err() } const opListGrants = "ListGrants" // ListGrantsRequest generates a "aws/request.Request" representing the // client's request for the ListGrants operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ListGrants for usage and error information. +// See ListGrants for more information on using the ListGrants +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListGrants method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListGrantsRequest method. // req, resp := client.ListGrantsRequest(params) @@ -2154,26 +2480,26 @@ func (c *KMS) ListGrantsRequest(input *ListGrantsInput) (req *request.Request, o // API operation ListGrants for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InvalidMarkerException +// * ErrCodeInvalidMarkerException "InvalidMarkerException" // The request was rejected because the marker that specifies where pagination // should next begin is not valid. // -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -2184,8 +2510,23 @@ func (c *KMS) ListGrantsRequest(input *ListGrantsInput) (req *request.Request, o // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListGrants func (c *KMS) ListGrants(input *ListGrantsInput) (*ListGrantsResponse, error) { req, out := c.ListGrantsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListGrantsWithContext is the same as ListGrants with the addition of +// the ability to pass a context and additional request options. +// +// See ListGrants for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) ListGrantsWithContext(ctx aws.Context, input *ListGrantsInput, opts ...request.Option) (*ListGrantsResponse, error) { + req, out := c.ListGrantsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // ListGrantsPages iterates over the pages of a ListGrants operation, @@ -2205,31 +2546,55 @@ func (c *KMS) ListGrants(input *ListGrantsInput) (*ListGrantsResponse, error) { // return pageNum <= 3 // }) // -func (c *KMS) ListGrantsPages(input *ListGrantsInput, fn func(p *ListGrantsResponse, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListGrantsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListGrantsResponse), lastPage) - }) +func (c *KMS) ListGrantsPages(input *ListGrantsInput, fn func(*ListGrantsResponse, bool) bool) error { + return c.ListGrantsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListGrantsPagesWithContext same as ListGrantsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) ListGrantsPagesWithContext(ctx aws.Context, input *ListGrantsInput, fn func(*ListGrantsResponse, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListGrantsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListGrantsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListGrantsResponse), !p.HasNextPage()) + } + return p.Err() } const opListKeyPolicies = "ListKeyPolicies" // ListKeyPoliciesRequest generates a "aws/request.Request" representing the // client's request for the ListKeyPolicies operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ListKeyPolicies for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListKeyPolicies method directly -// instead. +// See ListKeyPolicies for more information on using the ListKeyPolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListKeyPoliciesRequest method. // req, resp := client.ListKeyPoliciesRequest(params) @@ -2274,22 +2639,22 @@ func (c *KMS) ListKeyPoliciesRequest(input *ListKeyPoliciesInput) (req *request. // API operation ListKeyPolicies for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -2300,8 +2665,23 @@ func (c *KMS) ListKeyPoliciesRequest(input *ListKeyPoliciesInput) (req *request. // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeyPolicies func (c *KMS) ListKeyPolicies(input *ListKeyPoliciesInput) (*ListKeyPoliciesOutput, error) { req, out := c.ListKeyPoliciesRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListKeyPoliciesWithContext is the same as ListKeyPolicies with the addition of +// the ability to pass a context and additional request options. +// +// See ListKeyPolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) ListKeyPoliciesWithContext(ctx aws.Context, input *ListKeyPoliciesInput, opts ...request.Option) (*ListKeyPoliciesOutput, error) { + req, out := c.ListKeyPoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // ListKeyPoliciesPages iterates over the pages of a ListKeyPolicies operation, @@ -2321,31 +2701,55 @@ func (c *KMS) ListKeyPolicies(input *ListKeyPoliciesInput) (*ListKeyPoliciesOutp // return pageNum <= 3 // }) // -func (c *KMS) ListKeyPoliciesPages(input *ListKeyPoliciesInput, fn func(p *ListKeyPoliciesOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListKeyPoliciesRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListKeyPoliciesOutput), lastPage) - }) +func (c *KMS) ListKeyPoliciesPages(input *ListKeyPoliciesInput, fn func(*ListKeyPoliciesOutput, bool) bool) error { + return c.ListKeyPoliciesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListKeyPoliciesPagesWithContext same as ListKeyPoliciesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) ListKeyPoliciesPagesWithContext(ctx aws.Context, input *ListKeyPoliciesInput, fn func(*ListKeyPoliciesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListKeyPoliciesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListKeyPoliciesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListKeyPoliciesOutput), !p.HasNextPage()) + } + return p.Err() } const opListKeys = "ListKeys" // ListKeysRequest generates a "aws/request.Request" representing the // client's request for the ListKeys operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See ListKeys for usage and error information. +// See ListKeys for more information on using the ListKeys +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListKeys method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListKeysRequest method. // req, resp := client.ListKeysRequest(params) @@ -2390,23 +2794,38 @@ func (c *KMS) ListKeysRequest(input *ListKeysInput) (req *request.Request, outpu // API operation ListKeys for usage and error information. // // Returned Error Codes: -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidMarkerException +// * ErrCodeInvalidMarkerException "InvalidMarkerException" // The request was rejected because the marker that specifies where pagination // should next begin is not valid. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeys func (c *KMS) ListKeys(input *ListKeysInput) (*ListKeysOutput, error) { req, out := c.ListKeysRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListKeysWithContext is the same as ListKeys with the addition of +// the ability to pass a context and additional request options. +// +// See ListKeys for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) ListKeysWithContext(ctx aws.Context, input *ListKeysInput, opts ...request.Option) (*ListKeysOutput, error) { + req, out := c.ListKeysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // ListKeysPages iterates over the pages of a ListKeys operation, @@ -2426,31 +2845,146 @@ func (c *KMS) ListKeys(input *ListKeysInput) (*ListKeysOutput, error) { // return pageNum <= 3 // }) // -func (c *KMS) ListKeysPages(input *ListKeysInput, fn func(p *ListKeysOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListKeysRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListKeysOutput), lastPage) - }) +func (c *KMS) ListKeysPages(input *ListKeysInput, fn func(*ListKeysOutput, bool) bool) error { + return c.ListKeysPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListKeysPagesWithContext same as ListKeysPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) ListKeysPagesWithContext(ctx aws.Context, input *ListKeysInput, fn func(*ListKeysOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListKeysInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListKeysRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListKeysOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListResourceTags = "ListResourceTags" + +// ListResourceTagsRequest generates a "aws/request.Request" representing the +// client's request for the ListResourceTags operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListResourceTags for more information on using the ListResourceTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListResourceTagsRequest method. +// req, resp := client.ListResourceTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListResourceTags +func (c *KMS) ListResourceTagsRequest(input *ListResourceTagsInput) (req *request.Request, output *ListResourceTagsOutput) { + op := &request.Operation{ + Name: opListResourceTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListResourceTagsInput{} + } + + output = &ListResourceTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListResourceTags API operation for AWS Key Management Service. +// +// Returns a list of all tags for the specified customer master key (CMK). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Key Management Service's +// API operation ListResourceTags for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalException "InternalException" +// The request was rejected because an internal exception occurred. The request +// can be retried. +// +// * ErrCodeNotFoundException "NotFoundException" +// The request was rejected because the specified entity or resource could not +// be found. +// +// * ErrCodeInvalidArnException "InvalidArnException" +// The request was rejected because a specified ARN was not valid. +// +// * ErrCodeInvalidMarkerException "InvalidMarkerException" +// The request was rejected because the marker that specifies where pagination +// should next begin is not valid. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListResourceTags +func (c *KMS) ListResourceTags(input *ListResourceTagsInput) (*ListResourceTagsOutput, error) { + req, out := c.ListResourceTagsRequest(input) + return out, req.Send() +} + +// ListResourceTagsWithContext is the same as ListResourceTags with the addition of +// the ability to pass a context and additional request options. +// +// See ListResourceTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) ListResourceTagsWithContext(ctx aws.Context, input *ListResourceTagsInput, opts ...request.Option) (*ListResourceTagsOutput, error) { + req, out := c.ListResourceTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opListRetirableGrants = "ListRetirableGrants" // ListRetirableGrantsRequest generates a "aws/request.Request" representing the // client's request for the ListRetirableGrants operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ListRetirableGrants for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListRetirableGrants method directly -// instead. +// See ListRetirableGrants for more information on using the ListRetirableGrants +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListRetirableGrantsRequest method. // req, resp := client.ListRetirableGrantsRequest(params) @@ -2493,49 +3027,63 @@ func (c *KMS) ListRetirableGrantsRequest(input *ListRetirableGrantsInput) (req * // API operation ListRetirableGrants for usage and error information. // // Returned Error Codes: -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InvalidMarkerException +// * ErrCodeInvalidMarkerException "InvalidMarkerException" // The request was rejected because the marker that specifies where pagination // should next begin is not valid. // -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListRetirableGrants func (c *KMS) ListRetirableGrants(input *ListRetirableGrantsInput) (*ListGrantsResponse, error) { req, out := c.ListRetirableGrantsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListRetirableGrantsWithContext is the same as ListRetirableGrants with the addition of +// the ability to pass a context and additional request options. +// +// See ListRetirableGrants for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) ListRetirableGrantsWithContext(ctx aws.Context, input *ListRetirableGrantsInput, opts ...request.Option) (*ListGrantsResponse, error) { + req, out := c.ListRetirableGrantsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutKeyPolicy = "PutKeyPolicy" // PutKeyPolicyRequest generates a "aws/request.Request" representing the // client's request for the PutKeyPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See PutKeyPolicy for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutKeyPolicy method directly -// instead. +// See PutKeyPolicy for more information on using the PutKeyPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutKeyPolicyRequest method. // req, resp := client.PutKeyPolicyRequest(params) @@ -2579,35 +3127,35 @@ func (c *KMS) PutKeyPolicyRequest(input *PutKeyPolicyInput) (req *request.Reques // API operation PutKeyPolicy for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * MalformedPolicyDocumentException +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocumentException" // The request was rejected because the specified policy is not syntactically // or semantically correct. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * UnsupportedOperationException +// * ErrCodeUnsupportedOperationException "UnsupportedOperationException" // The request was rejected because a specified parameter is not supported or // a specified resource is not valid for this operation. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * LimitExceededException +// * ErrCodeLimitExceededException "LimitExceededException" // The request was rejected because a limit was exceeded. For more information, // see Limits (http://docs.aws.amazon.com/kms/latest/developerguide/limits.html) // in the AWS Key Management Service Developer Guide. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -2618,27 +3166,41 @@ func (c *KMS) PutKeyPolicyRequest(input *PutKeyPolicyInput) (req *request.Reques // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/PutKeyPolicy func (c *KMS) PutKeyPolicy(input *PutKeyPolicyInput) (*PutKeyPolicyOutput, error) { req, out := c.PutKeyPolicyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutKeyPolicyWithContext is the same as PutKeyPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutKeyPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) PutKeyPolicyWithContext(ctx aws.Context, input *PutKeyPolicyInput, opts ...request.Option) (*PutKeyPolicyOutput, error) { + req, out := c.PutKeyPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opReEncrypt = "ReEncrypt" // ReEncryptRequest generates a "aws/request.Request" representing the // client's request for the ReEncrypt operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ReEncrypt for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ReEncrypt method directly -// instead. +// See ReEncrypt for more information on using the ReEncrypt +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ReEncryptRequest method. // req, resp := client.ReEncryptRequest(params) @@ -2688,36 +3250,36 @@ func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, out // API operation ReEncrypt for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * DisabledException +// * ErrCodeDisabledException "DisabledException" // The request was rejected because the specified CMK is not enabled. // -// * InvalidCiphertextException +// * ErrCodeInvalidCiphertextException "InvalidCiphertextException" // The request was rejected because the specified ciphertext has been corrupted // or is otherwise invalid. // -// * KeyUnavailableException +// * ErrCodeKeyUnavailableException "KeyUnavailableException" // The request was rejected because the specified CMK was not available. The // request can be retried. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InvalidKeyUsageException +// * ErrCodeInvalidKeyUsageException "InvalidKeyUsageException" // The request was rejected because the specified KeySpec value is not valid. // -// * InvalidGrantTokenException +// * ErrCodeInvalidGrantTokenException "InvalidGrantTokenException" // The request was rejected because the specified grant token is not valid. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -2728,27 +3290,41 @@ func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, out // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ReEncrypt func (c *KMS) ReEncrypt(input *ReEncryptInput) (*ReEncryptOutput, error) { req, out := c.ReEncryptRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ReEncryptWithContext is the same as ReEncrypt with the addition of +// the ability to pass a context and additional request options. +// +// See ReEncrypt for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) ReEncryptWithContext(ctx aws.Context, input *ReEncryptInput, opts ...request.Option) (*ReEncryptOutput, error) { + req, out := c.ReEncryptRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opRetireGrant = "RetireGrant" // RetireGrantRequest generates a "aws/request.Request" representing the // client's request for the RetireGrant operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See RetireGrant for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RetireGrant method directly -// instead. +// See RetireGrant for more information on using the RetireGrant +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the RetireGrantRequest method. // req, resp := client.RetireGrantRequest(params) @@ -2804,25 +3380,25 @@ func (c *KMS) RetireGrantRequest(input *RetireGrantInput) (req *request.Request, // API operation RetireGrant for usage and error information. // // Returned Error Codes: -// * InvalidGrantTokenException +// * ErrCodeInvalidGrantTokenException "InvalidGrantTokenException" // The request was rejected because the specified grant token is not valid. // -// * InvalidGrantIdException +// * ErrCodeInvalidGrantIdException "InvalidGrantIdException" // The request was rejected because the specified GrantId is not valid. // -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -2833,27 +3409,41 @@ func (c *KMS) RetireGrantRequest(input *RetireGrantInput) (req *request.Request, // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RetireGrant func (c *KMS) RetireGrant(input *RetireGrantInput) (*RetireGrantOutput, error) { req, out := c.RetireGrantRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// RetireGrantWithContext is the same as RetireGrant with the addition of +// the ability to pass a context and additional request options. +// +// See RetireGrant for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) RetireGrantWithContext(ctx aws.Context, input *RetireGrantInput, opts ...request.Option) (*RetireGrantOutput, error) { + req, out := c.RetireGrantRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opRevokeGrant = "RevokeGrant" // RevokeGrantRequest generates a "aws/request.Request" representing the // client's request for the RevokeGrant operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See RevokeGrant for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RevokeGrant method directly -// instead. +// See RevokeGrant for more information on using the RevokeGrant +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the RevokeGrantRequest method. // req, resp := client.RevokeGrantRequest(params) @@ -2895,25 +3485,25 @@ func (c *KMS) RevokeGrantRequest(input *RevokeGrantInput) (req *request.Request, // API operation RevokeGrant for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * InvalidGrantIdException +// * ErrCodeInvalidGrantIdException "InvalidGrantIdException" // The request was rejected because the specified GrantId is not valid. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -2924,27 +3514,41 @@ func (c *KMS) RevokeGrantRequest(input *RevokeGrantInput) (req *request.Request, // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RevokeGrant func (c *KMS) RevokeGrant(input *RevokeGrantInput) (*RevokeGrantOutput, error) { req, out := c.RevokeGrantRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// RevokeGrantWithContext is the same as RevokeGrant with the addition of +// the ability to pass a context and additional request options. +// +// See RevokeGrant for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) RevokeGrantWithContext(ctx aws.Context, input *RevokeGrantInput, opts ...request.Option) (*RevokeGrantOutput, error) { + req, out := c.RevokeGrantRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opScheduleKeyDeletion = "ScheduleKeyDeletion" // ScheduleKeyDeletionRequest generates a "aws/request.Request" representing the // client's request for the ScheduleKeyDeletion operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See ScheduleKeyDeletion for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ScheduleKeyDeletion method directly -// instead. +// See ScheduleKeyDeletion for more information on using the ScheduleKeyDeletion +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ScheduleKeyDeletionRequest method. // req, resp := client.ScheduleKeyDeletionRequest(params) @@ -2998,22 +3602,22 @@ func (c *KMS) ScheduleKeyDeletionRequest(input *ScheduleKeyDeletionInput) (req * // API operation ScheduleKeyDeletion for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -3024,27 +3628,261 @@ func (c *KMS) ScheduleKeyDeletionRequest(input *ScheduleKeyDeletionInput) (req * // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ScheduleKeyDeletion func (c *KMS) ScheduleKeyDeletion(input *ScheduleKeyDeletionInput) (*ScheduleKeyDeletionOutput, error) { req, out := c.ScheduleKeyDeletionRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ScheduleKeyDeletionWithContext is the same as ScheduleKeyDeletion with the addition of +// the ability to pass a context and additional request options. +// +// See ScheduleKeyDeletion for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) ScheduleKeyDeletionWithContext(ctx aws.Context, input *ScheduleKeyDeletionInput, opts ...request.Option) (*ScheduleKeyDeletionOutput, error) { + req, out := c.ScheduleKeyDeletionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/TagResource +func (c *KMS) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS Key Management Service. +// +// Adds or overwrites one or more tags for the specified customer master key +// (CMK). +// +// Each tag consists of a tag key and a tag value. Tag keys and tag values are +// both required, but tag values can be empty (null) strings. +// +// You cannot use the same tag key more than once per CMK. For example, consider +// a CMK with one tag whose tag key is Purpose and tag value is Test. If you +// send a TagResource request for this CMK with a tag key of Purpose and a tag +// value of Prod, it does not create a second tag. Instead, the original tag +// is overwritten with the new tag value. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Key Management Service's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalException "InternalException" +// The request was rejected because an internal exception occurred. The request +// can be retried. +// +// * ErrCodeNotFoundException "NotFoundException" +// The request was rejected because the specified entity or resource could not +// be found. +// +// * ErrCodeInvalidArnException "InvalidArnException" +// The request was rejected because a specified ARN was not valid. +// +// * ErrCodeInvalidStateException "InvalidStateException" +// The request was rejected because the state of the specified resource is not +// valid for this request. +// +// For more information about how key state affects the use of a CMK, see How +// Key State Affects Use of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the AWS Key Management Service Developer Guide. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// The request was rejected because a limit was exceeded. For more information, +// see Limits (http://docs.aws.amazon.com/kms/latest/developerguide/limits.html) +// in the AWS Key Management Service Developer Guide. +// +// * ErrCodeTagException "TagException" +// The request was rejected because one or more tags are not valid. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/TagResource +func (c *KMS) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UntagResource +func (c *KMS) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS Key Management Service. +// +// Removes the specified tag or tags from the specified customer master key +// (CMK). +// +// To remove a tag, you specify the tag key for each tag to remove. You do not +// specify the tag value. To overwrite the tag value for an existing tag, use +// TagResource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Key Management Service's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalException "InternalException" +// The request was rejected because an internal exception occurred. The request +// can be retried. +// +// * ErrCodeNotFoundException "NotFoundException" +// The request was rejected because the specified entity or resource could not +// be found. +// +// * ErrCodeInvalidArnException "InvalidArnException" +// The request was rejected because a specified ARN was not valid. +// +// * ErrCodeInvalidStateException "InvalidStateException" +// The request was rejected because the state of the specified resource is not +// valid for this request. +// +// For more information about how key state affects the use of a CMK, see How +// Key State Affects Use of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// in the AWS Key Management Service Developer Guide. +// +// * ErrCodeTagException "TagException" +// The request was rejected because one or more tags are not valid. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UntagResource +func (c *KMS) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opUpdateAlias = "UpdateAlias" // UpdateAliasRequest generates a "aws/request.Request" representing the // client's request for the UpdateAlias operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See UpdateAlias for usage and error information. +// See UpdateAlias for more information on using the UpdateAlias +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UpdateAlias method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the UpdateAliasRequest method. // req, resp := client.UpdateAliasRequest(params) @@ -3097,19 +3935,19 @@ func (c *KMS) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request, // API operation UpdateAlias for usage and error information. // // Returned Error Codes: -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -3120,27 +3958,41 @@ func (c *KMS) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request, // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateAlias func (c *KMS) UpdateAlias(input *UpdateAliasInput) (*UpdateAliasOutput, error) { req, out := c.UpdateAliasRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// UpdateAliasWithContext is the same as UpdateAlias with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateAlias for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) UpdateAliasWithContext(ctx aws.Context, input *UpdateAliasInput, opts ...request.Option) (*UpdateAliasOutput, error) { + req, out := c.UpdateAliasRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opUpdateKeyDescription = "UpdateKeyDescription" // UpdateKeyDescriptionRequest generates a "aws/request.Request" representing the // client's request for the UpdateKeyDescription operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See UpdateKeyDescription for usage and error information. +// See UpdateKeyDescription for more information on using the UpdateKeyDescription +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UpdateKeyDescription method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the UpdateKeyDescriptionRequest method. // req, resp := client.UpdateKeyDescriptionRequest(params) @@ -3181,22 +4033,22 @@ func (c *KMS) UpdateKeyDescriptionRequest(input *UpdateKeyDescriptionInput) (req // API operation UpdateKeyDescription for usage and error information. // // Returned Error Codes: -// * NotFoundException +// * ErrCodeNotFoundException "NotFoundException" // The request was rejected because the specified entity or resource could not // be found. // -// * InvalidArnException +// * ErrCodeInvalidArnException "InvalidArnException" // The request was rejected because a specified ARN was not valid. // -// * DependencyTimeoutException +// * ErrCodeDependencyTimeoutException "DependencyTimeoutException" // The system timed out while trying to fulfill the request. The request can // be retried. // -// * InternalException +// * ErrCodeInternalException "InternalException" // The request was rejected because an internal exception occurred. The request // can be retried. // -// * InvalidStateException +// * ErrCodeInvalidStateException "InvalidStateException" // The request was rejected because the state of the specified resource is not // valid for this request. // @@ -3207,8 +4059,23 @@ func (c *KMS) UpdateKeyDescriptionRequest(input *UpdateKeyDescriptionInput) (req // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateKeyDescription func (c *KMS) UpdateKeyDescription(input *UpdateKeyDescriptionInput) (*UpdateKeyDescriptionOutput, error) { req, out := c.UpdateKeyDescriptionRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// UpdateKeyDescriptionWithContext is the same as UpdateKeyDescription with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateKeyDescription for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *KMS) UpdateKeyDescriptionWithContext(ctx aws.Context, input *UpdateKeyDescriptionInput, opts ...request.Option) (*UpdateKeyDescriptionOutput, error) { + req, out := c.UpdateKeyDescriptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // Contains information about an alias. @@ -3417,11 +4284,9 @@ func (s CreateAliasOutput) GoString() string { type CreateGrantInput struct { _ struct{} `type:"structure"` - // The conditions under which the operations permitted by the grant are allowed. - // - // You can use this value to allow the operations permitted by the grant only - // when a specified encryption context is present. For more information, see - // Encryption Context (http://docs.aws.amazon.com/kms/latest/developerguide/encryption-context.html) + // A structure that you can use to allow certain operations in the grant only + // when the desired encryption context is present. For more information about + // encryption context, see Encryption Context (http://docs.aws.amazon.com/kms/latest/developerguide/encryption-context.html) // in the AWS Key Management Service Developer Guide. Constraints *GrantConstraints `type:"structure"` @@ -3435,10 +4300,10 @@ type CreateGrantInput struct { // grant permits. // // To specify the principal, use the Amazon Resource Name (ARN) (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // of an AWS principal. Valid AWS principals include AWS accounts (root), IAM - // users, federated users, and assumed role users. For examples of the ARN syntax - // to use for specifying a principal, see AWS Identity and Access Management - // (IAM) (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) + // of an AWS principal. Valid AWS principals include AWS accounts (root), IAM + // users, IAM roles, federated users, and assumed role users. For examples of + // the ARN syntax to use for specifying a principal, see AWS Identity and Access + // Management (IAM) (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) // in the Example ARNs section of the AWS General Reference. // // GranteePrincipal is a required field @@ -3617,8 +4482,8 @@ type CreateKeyInput struct { // section in the AWS Key Management Service Developer Guide. // // Use this parameter only when you include a policy in the request and you - // intend to prevent the principal making the request from making a subsequent - // PutKeyPolicy request on the CMK. + // intend to prevent the principal that is making the request from making a + // subsequent PutKeyPolicy request on the CMK. // // The default value is false. BypassPolicyLockoutSafetyCheck *bool `type:"boolean"` @@ -3651,18 +4516,18 @@ type CreateKeyInput struct { // If you specify a policy and do not set BypassPolicyLockoutSafetyCheck to // true, the policy must meet the following criteria: // - // * It must allow the principal making the CreateKey request to make a subsequent - // PutKeyPolicy request on the CMK. This reduces the likelihood that the - // CMK becomes unmanageable. For more information, refer to the scenario - // in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // * It must allow the principal that is making the CreateKey request to + // make a subsequent PutKeyPolicy request on the CMK. This reduces the likelihood + // that the CMK becomes unmanageable. For more information, refer to the + // scenario in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) // section in the AWS Key Management Service Developer Guide. // - // * The principal(s) specified in the key policy must exist and be visible - // to AWS KMS. When you create a new AWS principal (for example, an IAM user - // or role), you might need to enforce a delay before specifying the new - // principal in a key policy because the new principal might not immediately - // be visible to AWS KMS. For more information, see Changes that I make are - // not always immediately visible (http://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // * The principals that are specified in the key policy must exist and be + // visible to AWS KMS. When you create a new AWS principal (for example, + // an IAM user or role), you might need to enforce a delay before specifying + // the new principal in a key policy because the new principal might not + // immediately be visible to AWS KMS. For more information, see Changes that + // I make are not always immediately visible (http://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) // in the IAM User Guide. // // If you do not specify a policy, AWS KMS attaches a default key policy to @@ -3671,6 +4536,13 @@ type CreateKeyInput struct { // // The policy size limit is 32 KiB (32768 bytes). Policy *string `min:"1" type:"string"` + + // One or more tags. Each tag consists of a tag key and a tag value. Tag keys + // and tag values are both required, but tag values can be empty (null) strings. + // + // Use this parameter to tag the CMK when it is created. Alternately, you can + // omit this parameter and instead tag the CMK after it is created using TagResource. + Tags []*Tag `type:"list"` } // String returns the string representation @@ -3689,6 +4561,16 @@ func (s *CreateKeyInput) Validate() error { if s.Policy != nil && len(*s.Policy) < 1 { invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -3726,6 +4608,12 @@ func (s *CreateKeyInput) SetPolicy(v string) *CreateKeyInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateKeyInput) SetTags(v []*Tag) *CreateKeyInput { + s.Tags = v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateKeyResponse type CreateKeyOutput struct { _ struct{} `type:"structure"` @@ -4778,7 +5666,7 @@ func (s *GenerateRandomInput) SetNumberOfBytes(v int64) *GenerateRandomInput { type GenerateRandomOutput struct { _ struct{} `type:"structure"` - // The unpredictable byte string. + // The random byte string. // // Plaintext is automatically base64 encoded/decoded by the SDK. Plaintext []byte `min:"1" type:"blob"` @@ -5101,29 +5989,34 @@ func (s *GetParametersForImportOutput) SetPublicKey(v []byte) *GetParametersForI return s } -// A structure for specifying the conditions under which the operations permitted -// by the grant are allowed. -// -// You can use this structure to allow the operations permitted by the grant -// only when a specified encryption context is present. For more information -// about encryption context, see Encryption Context (http://docs.aws.amazon.com/kms/latest/developerguide/encryption-context.html) +// A structure that you can use to allow certain operations in the grant only +// when the desired encryption context is present. For more information about +// encryption context, see Encryption Context (http://docs.aws.amazon.com/kms/latest/developerguide/encryption-context.html) // in the AWS Key Management Service Developer Guide. +// +// Grant constraints apply only to operations that accept encryption context +// as input. For example, the DescribeKey operation does not accept encryption +// context as input. A grant that allows the DescribeKey operation does so regardless +// of the grant constraints. In constrast, the Encrypt operation accepts encryption +// context as input. A grant that allows the Encrypt operation does so only +// when the encryption context of the Encrypt operation satisfies the grant +// constraints. // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GrantConstraints type GrantConstraints struct { _ struct{} `type:"structure"` - // Contains a list of key-value pairs that must be present in the encryption - // context of a subsequent operation permitted by the grant. When a subsequent - // operation permitted by the grant includes an encryption context that matches - // this list, the grant allows the operation. Otherwise, the operation is not - // allowed. + // A list of key-value pairs that must be present in the encryption context + // of certain subsequent operations that the grant allows. When certain subsequent + // operations allowed by the grant include encryption context that matches this + // list, the grant allows the operation. Otherwise, the grant does not allow + // the operation. EncryptionContextEquals map[string]*string `type:"map"` - // Contains a list of key-value pairs, a subset of which must be present in - // the encryption context of a subsequent operation permitted by the grant. - // When a subsequent operation permitted by the grant includes an encryption - // context that matches this list or is a subset of this list, the grant allows - // the operation. Otherwise, the operation is not allowed. + // A list of key-value pairs, all of which must be present in the encryption + // context of certain subsequent operations that the grant allows. When certain + // subsequent operations allowed by the grant include encryption context that + // matches this list or is a superset of this list, the grant allows the operation. + // Otherwise, the grant does not allow the operation. EncryptionContextSubset map[string]*string `type:"map"` } @@ -5154,7 +6047,8 @@ func (s *GrantConstraints) SetEncryptionContextSubset(v map[string]*string) *Gra type GrantListEntry struct { _ struct{} `type:"structure"` - // The conditions under which the grant's operations are allowed. + // A list of key-value pairs that must be present in the encryption context + // of certain subsequent operations that the grant allows. Constraints *GrantConstraints `type:"structure"` // The date and time when the grant was created. @@ -5451,6 +6345,11 @@ type KeyMetadata struct { // KeyId is a required field KeyId *string `min:"1" type:"string" required:"true"` + // The CMK's manager. CMKs are either customer-managed or AWS-managed. For more + // information about the difference, see Customer Master Keys (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#master_keys) + // in the AWS Key Management Service Developer Guide. + KeyManager *string `type:"string" enum:"KeyManagerType"` + // The state of the CMK. // // For more information about how key state affects the use of a CMK, see How @@ -5534,6 +6433,12 @@ func (s *KeyMetadata) SetKeyId(v string) *KeyMetadata { return s } +// SetKeyManager sets the KeyManager field's value. +func (s *KeyMetadata) SetKeyManager(v string) *KeyMetadata { + s.KeyManager = &v + return s +} + // SetKeyState sets the KeyState field's value. func (s *KeyMetadata) SetKeyState(v string) *KeyMetadata { s.KeyState = &v @@ -5562,17 +6467,17 @@ func (s *KeyMetadata) SetValidTo(v time.Time) *KeyMetadata { type ListAliasesInput struct { _ struct{} `type:"structure"` - // When paginating results, specify the maximum number of items to return in - // the response. If additional items exist beyond the number you specify, the - // Truncated element in the response is set to true. + // Use this parameter to specify the maximum number of items to return. When + // this value is present, AWS KMS does not return more than the specified number + // of items, but it might return fewer. // // This value is optional. If you include a value, it must be between 1 and // 100, inclusive. If you do not include a value, it defaults to 50. Limit *int64 `min:"1" type:"integer"` - // Use this parameter only when paginating results and only in a subsequent - // request after you receive a response with truncated results. Set it to the - // value of NextMarker from the response you just received. + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. Marker *string `min:"1" type:"string"` } @@ -5621,13 +6526,14 @@ type ListAliasesOutput struct { // A list of key aliases in the user's account. Aliases []*AliasListEntry `type:"list"` - // When Truncated is true, this value is present and contains the value to use - // for the Marker parameter in a subsequent pagination request. + // When Truncated is true, this element is present and contains the value to + // use for the Marker parameter in a subsequent request. NextMarker *string `min:"1" type:"string"` - // A flag that indicates whether there are more items in the list. If your results - // were truncated, you can use the Marker parameter to make a subsequent pagination - // request to retrieve more items in the list. + // A flag that indicates whether there are more items in the list. When this + // value is true, the list in this response is truncated. To retrieve more items, + // pass the value of the NextMarker element in this response to the Marker parameter + // in a subsequent request. Truncated *bool `type:"boolean"` } @@ -5673,17 +6579,17 @@ type ListGrantsInput struct { // KeyId is a required field KeyId *string `min:"1" type:"string" required:"true"` - // When paginating results, specify the maximum number of items to return in - // the response. If additional items exist beyond the number you specify, the - // Truncated element in the response is set to true. + // Use this parameter to specify the maximum number of items to return. When + // this value is present, AWS KMS does not return more than the specified number + // of items, but it might return fewer. // // This value is optional. If you include a value, it must be between 1 and // 100, inclusive. If you do not include a value, it defaults to 50. Limit *int64 `min:"1" type:"integer"` - // Use this parameter only when paginating results and only in a subsequent - // request after you receive a response with truncated results. Set it to the - // value of NextMarker from the response you just received. + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. Marker *string `min:"1" type:"string"` } @@ -5744,13 +6650,14 @@ type ListGrantsResponse struct { // A list of grants. Grants []*GrantListEntry `type:"list"` - // When Truncated is true, this value is present and contains the value to use - // for the Marker parameter in a subsequent pagination request. + // When Truncated is true, this element is present and contains the value to + // use for the Marker parameter in a subsequent request. NextMarker *string `min:"1" type:"string"` - // A flag that indicates whether there are more items in the list. If your results - // were truncated, you can use the Marker parameter to make a subsequent pagination - // request to retrieve more items in the list. + // A flag that indicates whether there are more items in the list. When this + // value is true, the list in this response is truncated. To retrieve more items, + // pass the value of the NextMarker element in this response to the Marker parameter + // in a subsequent request. Truncated *bool `type:"boolean"` } @@ -5796,9 +6703,9 @@ type ListKeyPoliciesInput struct { // KeyId is a required field KeyId *string `min:"1" type:"string" required:"true"` - // When paginating results, specify the maximum number of items to return in - // the response. If additional items exist beyond the number you specify, the - // Truncated element in the response is set to true. + // Use this parameter to specify the maximum number of items to return. When + // this value is present, AWS KMS does not return more than the specified number + // of items, but it might return fewer. // // This value is optional. If you include a value, it must be between 1 and // 1000, inclusive. If you do not include a value, it defaults to 100. @@ -5806,9 +6713,9 @@ type ListKeyPoliciesInput struct { // Currently only 1 policy can be attached to a key. Limit *int64 `min:"1" type:"integer"` - // Use this parameter only when paginating results and only in a subsequent - // request after you receive a response with truncated results. Set it to the - // value of NextMarker from the response you just received. + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. Marker *string `min:"1" type:"string"` } @@ -5866,17 +6773,18 @@ func (s *ListKeyPoliciesInput) SetMarker(v string) *ListKeyPoliciesInput { type ListKeyPoliciesOutput struct { _ struct{} `type:"structure"` - // When Truncated is true, this value is present and contains the value to use - // for the Marker parameter in a subsequent pagination request. + // When Truncated is true, this element is present and contains the value to + // use for the Marker parameter in a subsequent request. NextMarker *string `min:"1" type:"string"` // A list of policy names. Currently, there is only one policy and it is named // "Default". PolicyNames []*string `type:"list"` - // A flag that indicates whether there are more items in the list. If your results - // were truncated, you can use the Marker parameter to make a subsequent pagination - // request to retrieve more items in the list. + // A flag that indicates whether there are more items in the list. When this + // value is true, the list in this response is truncated. To retrieve more items, + // pass the value of the NextMarker element in this response to the Marker parameter + // in a subsequent request. Truncated *bool `type:"boolean"` } @@ -5912,17 +6820,17 @@ func (s *ListKeyPoliciesOutput) SetTruncated(v bool) *ListKeyPoliciesOutput { type ListKeysInput struct { _ struct{} `type:"structure"` - // When paginating results, specify the maximum number of items to return in - // the response. If additional items exist beyond the number you specify, the - // Truncated element in the response is set to true. + // Use this parameter to specify the maximum number of items to return. When + // this value is present, AWS KMS does not return more than the specified number + // of items, but it might return fewer. // // This value is optional. If you include a value, it must be between 1 and // 1000, inclusive. If you do not include a value, it defaults to 100. Limit *int64 `min:"1" type:"integer"` - // Use this parameter only when paginating results and only in a subsequent - // request after you receive a response with truncated results. Set it to the - // value of NextMarker from the response you just received. + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. Marker *string `min:"1" type:"string"` } @@ -5971,13 +6879,14 @@ type ListKeysOutput struct { // A list of keys. Keys []*KeyListEntry `type:"list"` - // When Truncated is true, this value is present and contains the value to use - // for the Marker parameter in a subsequent pagination request. + // When Truncated is true, this element is present and contains the value to + // use for the Marker parameter in a subsequent request. NextMarker *string `min:"1" type:"string"` - // A flag that indicates whether there are more items in the list. If your results - // were truncated, you can use the Marker parameter to make a subsequent pagination - // request to retrieve more items in the list. + // A flag that indicates whether there are more items in the list. When this + // value is true, the list in this response is truncated. To retrieve more items, + // pass the value of the NextMarker element in this response to the Marker parameter + // in a subsequent request. Truncated *bool `type:"boolean"` } @@ -6009,21 +6918,150 @@ func (s *ListKeysOutput) SetTruncated(v bool) *ListKeysOutput { return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListResourceTagsRequest +type ListResourceTagsInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the CMK whose tags you are listing. You can use the + // unique key ID or the Amazon Resource Name (ARN) of the CMK. Examples: + // + // * Unique key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // KeyId is a required field + KeyId *string `min:"1" type:"string" required:"true"` + + // Use this parameter to specify the maximum number of items to return. When + // this value is present, AWS KMS does not return more than the specified number + // of items, but it might return fewer. + // + // This value is optional. If you include a value, it must be between 1 and + // 50, inclusive. If you do not include a value, it defaults to 50. + Limit *int64 `min:"1" type:"integer"` + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. + // + // Do not attempt to construct this value. Use only the value of NextMarker + // from the truncated response you just received. + Marker *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListResourceTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourceTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListResourceTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListResourceTagsInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKeyId sets the KeyId field's value. +func (s *ListResourceTagsInput) SetKeyId(v string) *ListResourceTagsInput { + s.KeyId = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ListResourceTagsInput) SetLimit(v int64) *ListResourceTagsInput { + s.Limit = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListResourceTagsInput) SetMarker(v string) *ListResourceTagsInput { + s.Marker = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListResourceTagsResponse +type ListResourceTagsOutput struct { + _ struct{} `type:"structure"` + + // When Truncated is true, this element is present and contains the value to + // use for the Marker parameter in a subsequent request. + // + // Do not assume or infer any information from this value. + NextMarker *string `min:"1" type:"string"` + + // A list of tags. Each tag consists of a tag key and a tag value. + Tags []*Tag `type:"list"` + + // A flag that indicates whether there are more items in the list. When this + // value is true, the list in this response is truncated. To retrieve more items, + // pass the value of the NextMarker element in this response to the Marker parameter + // in a subsequent request. + Truncated *bool `type:"boolean"` +} + +// String returns the string representation +func (s ListResourceTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourceTagsOutput) GoString() string { + return s.String() +} + +// SetNextMarker sets the NextMarker field's value. +func (s *ListResourceTagsOutput) SetNextMarker(v string) *ListResourceTagsOutput { + s.NextMarker = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ListResourceTagsOutput) SetTags(v []*Tag) *ListResourceTagsOutput { + s.Tags = v + return s +} + +// SetTruncated sets the Truncated field's value. +func (s *ListResourceTagsOutput) SetTruncated(v bool) *ListResourceTagsOutput { + s.Truncated = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListRetirableGrantsRequest type ListRetirableGrantsInput struct { _ struct{} `type:"structure"` - // When paginating results, specify the maximum number of items to return in - // the response. If additional items exist beyond the number you specify, the - // Truncated element in the response is set to true. + // Use this parameter to specify the maximum number of items to return. When + // this value is present, AWS KMS does not return more than the specified number + // of items, but it might return fewer. // // This value is optional. If you include a value, it must be between 1 and // 100, inclusive. If you do not include a value, it defaults to 50. Limit *int64 `min:"1" type:"integer"` - // Use this parameter only when paginating results and only in a subsequent - // request after you receive a response with truncated results. Set it to the - // value of NextMarker from the response you just received. + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. Marker *string `min:"1" type:"string"` // The retiring principal for which to list grants. @@ -6101,8 +7139,8 @@ type PutKeyPolicyInput struct { // For more information, refer to the scenario in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) // section in the AWS Key Management Service Developer Guide. // - // Use this parameter only when you intend to prevent the principal making the - // request from making a subsequent PutKeyPolicy request on the CMK. + // Use this parameter only when you intend to prevent the principal that is + // making the request from making a subsequent PutKeyPolicy request on the CMK. // // The default value is false. BypassPolicyLockoutSafetyCheck *bool `type:"boolean"` @@ -6123,18 +7161,18 @@ type PutKeyPolicyInput struct { // If you do not set BypassPolicyLockoutSafetyCheck to true, the policy must // meet the following criteria: // - // * It must allow the principal making the PutKeyPolicy request to make - // a subsequent PutKeyPolicy request on the CMK. This reduces the likelihood - // that the CMK becomes unmanageable. For more information, refer to the - // scenario in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // * It must allow the principal that is making the PutKeyPolicy request + // to make a subsequent PutKeyPolicy request on the CMK. This reduces the + // likelihood that the CMK becomes unmanageable. For more information, refer + // to the scenario in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) // section in the AWS Key Management Service Developer Guide. // - // * The principal(s) specified in the key policy must exist and be visible - // to AWS KMS. When you create a new AWS principal (for example, an IAM user - // or role), you might need to enforce a delay before specifying the new - // principal in a key policy because the new principal might not immediately - // be visible to AWS KMS. For more information, see Changes that I make are - // not always immediately visible (http://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // * The principals that are specified in the key policy must exist and be + // visible to AWS KMS. When you create a new AWS principal (for example, + // an IAM user or role), you might need to enforce a delay before specifying + // the new principal in a key policy because the new principal might not + // immediately be visible to AWS KMS. For more information, see Changes that + // I make are not always immediately visible (http://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) // in the IAM User Guide. // // The policy size limit is 32 KiB (32768 bytes). @@ -6636,6 +7674,226 @@ func (s *ScheduleKeyDeletionOutput) SetKeyId(v string) *ScheduleKeyDeletionOutpu return s } +// A key-value pair. A tag consists of a tag key and a tag value. Tag keys and +// tag values are both required, but tag values can be empty (null) strings. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Tag +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the tag. + // + // TagKey is a required field + TagKey *string `min:"1" type:"string" required:"true"` + + // The value of the tag. + // + // TagValue is a required field + TagValue *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.TagKey == nil { + invalidParams.Add(request.NewErrParamRequired("TagKey")) + } + if s.TagKey != nil && len(*s.TagKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKey", 1)) + } + if s.TagValue == nil { + invalidParams.Add(request.NewErrParamRequired("TagValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTagKey sets the TagKey field's value. +func (s *Tag) SetTagKey(v string) *Tag { + s.TagKey = &v + return s +} + +// SetTagValue sets the TagValue field's value. +func (s *Tag) SetTagValue(v string) *Tag { + s.TagValue = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/TagResourceRequest +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the CMK you are tagging. You can use the unique key + // ID or the Amazon Resource Name (ARN) of the CMK. Examples: + // + // * Unique key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // KeyId is a required field + KeyId *string `min:"1" type:"string" required:"true"` + + // One or more tags. Each tag consists of a tag key and a tag value. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKeyId sets the KeyId field's value. +func (s *TagResourceInput) SetKeyId(v string) *TagResourceInput { + s.KeyId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/TagResourceOutput +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UntagResourceRequest +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the CMK from which you are removing tags. You can + // use the unique key ID or the Amazon Resource Name (ARN) of the CMK. Examples: + // + // * Unique key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // KeyId is a required field + KeyId *string `min:"1" type:"string" required:"true"` + + // One or more tag keys. Specify only the tag keys, not the tag values. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKeyId sets the KeyId field's value. +func (s *UntagResourceInput) SetKeyId(v string) *UntagResourceInput { + s.KeyId = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UntagResourceOutput +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateAliasRequest type UpdateAliasInput struct { _ struct{} `type:"structure"` @@ -6853,6 +8111,14 @@ const ( GrantOperationDescribeKey = "DescribeKey" ) +const ( + // KeyManagerTypeAws is a KeyManagerType enum value + KeyManagerTypeAws = "AWS" + + // KeyManagerTypeCustomer is a KeyManagerType enum value + KeyManagerTypeCustomer = "CUSTOMER" +) + const ( // KeyStateEnabled is a KeyState enum value KeyStateEnabled = "Enabled" diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/doc.go b/vendor/github.com/aws/aws-sdk-go/service/kms/doc.go new file mode 100644 index 000000000000..0b6a33ca97f9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/doc.go @@ -0,0 +1,98 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package kms provides the client and types for making API +// requests to AWS Key Management Service. +// +// AWS Key Management Service (AWS KMS) is an encryption and key management +// web service. This guide describes the AWS KMS operations that you can call +// programmatically. For general information about AWS KMS, see the AWS Key +// Management Service Developer Guide (http://docs.aws.amazon.com/kms/latest/developerguide/). +// +// AWS provides SDKs that consist of libraries and sample code for various programming +// languages and platforms (Java, Ruby, .Net, iOS, Android, etc.). The SDKs +// provide a convenient way to create programmatic access to AWS KMS and other +// AWS services. For example, the SDKs take care of tasks such as signing requests +// (see below), managing errors, and retrying requests automatically. For more +// information about the AWS SDKs, including how to download and install them, +// see Tools for Amazon Web Services (http://aws.amazon.com/tools/). +// +// We recommend that you use the AWS SDKs to make programmatic API calls to +// AWS KMS. +// +// Clients must support TLS (Transport Layer Security) 1.0. We recommend TLS +// 1.2. Clients must also support cipher suites with Perfect Forward Secrecy +// (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral +// Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support +// these modes. +// +// Signing Requests +// +// Requests must be signed by using an access key ID and a secret access key. +// We strongly recommend that you do not use your AWS account (root) access +// key ID and secret key for everyday work with AWS KMS. Instead, use the access +// key ID and secret access key for an IAM user, or you can use the AWS Security +// Token Service to generate temporary security credentials that you can use +// to sign requests. +// +// All AWS KMS operations require Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// +// Logging API Requests +// +// AWS KMS supports AWS CloudTrail, a service that logs AWS API calls and related +// events for your AWS account and delivers them to an Amazon S3 bucket that +// you specify. By using the information collected by CloudTrail, you can determine +// what requests were made to AWS KMS, who made the request, when it was made, +// and so on. To learn more about CloudTrail, including how to turn it on and +// find your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/). +// +// Additional Resources +// +// For more information about credentials and request signing, see the following: +// +// * AWS Security Credentials (http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) +// - This topic provides general information about the types of credentials +// used for accessing AWS. +// +// * Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) +// - This section of the IAM User Guide describes how to create and use temporary +// security credentials. +// +// * Signature Version 4 Signing Process (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) +// - This set of topics walks you through the process of signing a request +// using an access key ID and a secret access key. +// +// Commonly Used APIs +// +// Of the APIs discussed in this guide, the following will prove the most useful +// for most applications. You will likely perform actions other than these, +// such as creating keys and assigning policies, by using the console. +// +// * Encrypt +// +// * Decrypt +// +// * GenerateDataKey +// +// * GenerateDataKeyWithoutPlaintext +// +// See https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01 for more information on this service. +// +// See kms package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/kms/ +// +// Using the Client +// +// To AWS Key Management Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Key Management Service client KMS for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/kms/#New +package kms diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go b/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go new file mode 100644 index 000000000000..0358c94437a4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go @@ -0,0 +1,154 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package kms + +const ( + + // ErrCodeAlreadyExistsException for service response error code + // "AlreadyExistsException". + // + // The request was rejected because it attempted to create a resource that already + // exists. + ErrCodeAlreadyExistsException = "AlreadyExistsException" + + // ErrCodeDependencyTimeoutException for service response error code + // "DependencyTimeoutException". + // + // The system timed out while trying to fulfill the request. The request can + // be retried. + ErrCodeDependencyTimeoutException = "DependencyTimeoutException" + + // ErrCodeDisabledException for service response error code + // "DisabledException". + // + // The request was rejected because the specified CMK is not enabled. + ErrCodeDisabledException = "DisabledException" + + // ErrCodeExpiredImportTokenException for service response error code + // "ExpiredImportTokenException". + // + // The request was rejected because the provided import token is expired. Use + // GetParametersForImport to retrieve a new import token and public key, use + // the new public key to encrypt the key material, and then try the request + // again. + ErrCodeExpiredImportTokenException = "ExpiredImportTokenException" + + // ErrCodeIncorrectKeyMaterialException for service response error code + // "IncorrectKeyMaterialException". + // + // The request was rejected because the provided key material is invalid or + // is not the same key material that was previously imported into this customer + // master key (CMK). + ErrCodeIncorrectKeyMaterialException = "IncorrectKeyMaterialException" + + // ErrCodeInternalException for service response error code + // "InternalException". + // + // The request was rejected because an internal exception occurred. The request + // can be retried. + ErrCodeInternalException = "InternalException" + + // ErrCodeInvalidAliasNameException for service response error code + // "InvalidAliasNameException". + // + // The request was rejected because the specified alias name is not valid. + ErrCodeInvalidAliasNameException = "InvalidAliasNameException" + + // ErrCodeInvalidArnException for service response error code + // "InvalidArnException". + // + // The request was rejected because a specified ARN was not valid. + ErrCodeInvalidArnException = "InvalidArnException" + + // ErrCodeInvalidCiphertextException for service response error code + // "InvalidCiphertextException". + // + // The request was rejected because the specified ciphertext has been corrupted + // or is otherwise invalid. + ErrCodeInvalidCiphertextException = "InvalidCiphertextException" + + // ErrCodeInvalidGrantIdException for service response error code + // "InvalidGrantIdException". + // + // The request was rejected because the specified GrantId is not valid. + ErrCodeInvalidGrantIdException = "InvalidGrantIdException" + + // ErrCodeInvalidGrantTokenException for service response error code + // "InvalidGrantTokenException". + // + // The request was rejected because the specified grant token is not valid. + ErrCodeInvalidGrantTokenException = "InvalidGrantTokenException" + + // ErrCodeInvalidImportTokenException for service response error code + // "InvalidImportTokenException". + // + // The request was rejected because the provided import token is invalid or + // is associated with a different customer master key (CMK). + ErrCodeInvalidImportTokenException = "InvalidImportTokenException" + + // ErrCodeInvalidKeyUsageException for service response error code + // "InvalidKeyUsageException". + // + // The request was rejected because the specified KeySpec value is not valid. + ErrCodeInvalidKeyUsageException = "InvalidKeyUsageException" + + // ErrCodeInvalidMarkerException for service response error code + // "InvalidMarkerException". + // + // The request was rejected because the marker that specifies where pagination + // should next begin is not valid. + ErrCodeInvalidMarkerException = "InvalidMarkerException" + + // ErrCodeInvalidStateException for service response error code + // "InvalidStateException". + // + // The request was rejected because the state of the specified resource is not + // valid for this request. + // + // For more information about how key state affects the use of a CMK, see How + // Key State Affects Use of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) + // in the AWS Key Management Service Developer Guide. + ErrCodeInvalidStateException = "InvalidStateException" + + // ErrCodeKeyUnavailableException for service response error code + // "KeyUnavailableException". + // + // The request was rejected because the specified CMK was not available. The + // request can be retried. + ErrCodeKeyUnavailableException = "KeyUnavailableException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // The request was rejected because a limit was exceeded. For more information, + // see Limits (http://docs.aws.amazon.com/kms/latest/developerguide/limits.html) + // in the AWS Key Management Service Developer Guide. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocumentException". + // + // The request was rejected because the specified policy is not syntactically + // or semantically correct. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocumentException" + + // ErrCodeNotFoundException for service response error code + // "NotFoundException". + // + // The request was rejected because the specified entity or resource could not + // be found. + ErrCodeNotFoundException = "NotFoundException" + + // ErrCodeTagException for service response error code + // "TagException". + // + // The request was rejected because one or more tags are not valid. + ErrCodeTagException = "TagException" + + // ErrCodeUnsupportedOperationException for service response error code + // "UnsupportedOperationException". + // + // The request was rejected because a specified parameter is not supported or + // a specified resource is not valid for this operation. + ErrCodeUnsupportedOperationException = "UnsupportedOperationException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/service.go b/vendor/github.com/aws/aws-sdk-go/service/kms/service.go index b4688cad32b8..3ff65de5e5bd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/service.go @@ -1,4 +1,4 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package kms @@ -11,80 +11,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) -// AWS Key Management Service (AWS KMS) is an encryption and key management -// web service. This guide describes the AWS KMS operations that you can call -// programmatically. For general information about AWS KMS, see the AWS Key -// Management Service Developer Guide (http://docs.aws.amazon.com/kms/latest/developerguide/). +// KMS provides the API operation methods for making requests to +// AWS Key Management Service. See this package's package overview docs +// for details on the service. // -// AWS provides SDKs that consist of libraries and sample code for various programming -// languages and platforms (Java, Ruby, .Net, iOS, Android, etc.). The SDKs -// provide a convenient way to create programmatic access to AWS KMS and other -// AWS services. For example, the SDKs take care of tasks such as signing requests -// (see below), managing errors, and retrying requests automatically. For more -// information about the AWS SDKs, including how to download and install them, -// see Tools for Amazon Web Services (http://aws.amazon.com/tools/). -// -// We recommend that you use the AWS SDKs to make programmatic API calls to -// AWS KMS. -// -// Clients must support TLS (Transport Layer Security) 1.0. We recommend TLS -// 1.2. Clients must also support cipher suites with Perfect Forward Secrecy -// (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral -// Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support -// these modes. -// -// Signing Requests -// -// Requests must be signed by using an access key ID and a secret access key. -// We strongly recommend that you do not use your AWS account (root) access -// key ID and secret key for everyday work with AWS KMS. Instead, use the access -// key ID and secret access key for an IAM user, or you can use the AWS Security -// Token Service to generate temporary security credentials that you can use -// to sign requests. -// -// All AWS KMS operations require Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). -// -// Logging API Requests -// -// AWS KMS supports AWS CloudTrail, a service that logs AWS API calls and related -// events for your AWS account and delivers them to an Amazon S3 bucket that -// you specify. By using the information collected by CloudTrail, you can determine -// what requests were made to AWS KMS, who made the request, when it was made, -// and so on. To learn more about CloudTrail, including how to turn it on and -// find your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/). -// -// Additional Resources -// -// For more information about credentials and request signing, see the following: -// -// * AWS Security Credentials (http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) -// - This topic provides general information about the types of credentials -// used for accessing AWS. -// -// * Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) -// - This section of the IAM User Guide describes how to create and use temporary -// security credentials. -// -// * Signature Version 4 Signing Process (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) -// - This set of topics walks you through the process of signing a request -// using an access key ID and a secret access key. -// -// Commonly Used APIs -// -// Of the APIs discussed in this guide, the following will prove the most useful -// for most applications. You will likely perform actions other than these, -// such as creating keys and assigning policies, by using the console. -// -// * Encrypt -// -// * Decrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyWithoutPlaintext -// The service client's operations are safe to be used concurrently. -// It is not safe to mutate any of the client's properties though. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01 +// KMS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. type KMS struct { *client.Client } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 19c57ed68c83..3b8be4378aeb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -1,11 +1,11 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. -// Package sts provides a client for AWS Security Token Service. package sts import ( "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) @@ -14,19 +14,18 @@ const opAssumeRole = "AssumeRole" // AssumeRoleRequest generates a "aws/request.Request" representing the // client's request for the AssumeRole operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See AssumeRole for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRole method directly -// instead. +// See AssumeRole for more information on using the AssumeRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssumeRoleRequest method. // req, resp := client.AssumeRoleRequest(params) @@ -153,16 +152,16 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // API operation AssumeRole for usage and error information. // // Returned Error Codes: -// * MalformedPolicyDocument +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" // The request was rejected because the policy document was malformed. The error // message describes the specific error. // -// * PackedPolicyTooLarge +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" // The request was rejected because the policy document was too large. The error // message describes how big the policy document is, in packed form, as a percentage // of what the API allows. // -// * RegionDisabledException +// * ErrCodeRegionDisabledException "RegionDisabledException" // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating @@ -172,27 +171,41 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { req, out := c.AssumeRoleRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AssumeRoleWithContext is the same as AssumeRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAssumeRoleWithSAML = "AssumeRoleWithSAML" // AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithSAML operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See AssumeRoleWithSAML for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRoleWithSAML method directly -// instead. +// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssumeRoleWithSAMLRequest method. // req, resp := client.AssumeRoleWithSAMLRequest(params) @@ -297,31 +310,31 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // API operation AssumeRoleWithSAML for usage and error information. // // Returned Error Codes: -// * MalformedPolicyDocument +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" // The request was rejected because the policy document was malformed. The error // message describes the specific error. // -// * PackedPolicyTooLarge +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" // The request was rejected because the policy document was too large. The error // message describes how big the policy document is, in packed form, as a percentage // of what the API allows. // -// * IDPRejectedClaim +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" // The identity provider (IdP) reported that authentication failed. This might // be because the claim is invalid. // // If this error is returned for the AssumeRoleWithWebIdentity operation, it // can also mean that the claim has expired or has been explicitly revoked. // -// * InvalidIdentityToken +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" // The web identity token that was passed could not be validated by AWS. Get // a new identity token from the identity provider and then retry the request. // -// * ExpiredTokenException +// * ErrCodeExpiredTokenException "ExpiredTokenException" // The web identity token that was passed is expired or is not valid. Get a // new identity token from the identity provider and then retry the request. // -// * RegionDisabledException +// * ErrCodeRegionDisabledException "RegionDisabledException" // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating @@ -331,27 +344,41 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { req, out := c.AssumeRoleWithSAMLRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithSAML for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" // AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithWebIdentity operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See AssumeRoleWithWebIdentity for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRoleWithWebIdentity method directly -// instead. +// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssumeRoleWithWebIdentityRequest method. // req, resp := client.AssumeRoleWithWebIdentityRequest(params) @@ -478,38 +505,38 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // API operation AssumeRoleWithWebIdentity for usage and error information. // // Returned Error Codes: -// * MalformedPolicyDocument +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" // The request was rejected because the policy document was malformed. The error // message describes the specific error. // -// * PackedPolicyTooLarge +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" // The request was rejected because the policy document was too large. The error // message describes how big the policy document is, in packed form, as a percentage // of what the API allows. // -// * IDPRejectedClaim +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" // The identity provider (IdP) reported that authentication failed. This might // be because the claim is invalid. // // If this error is returned for the AssumeRoleWithWebIdentity operation, it // can also mean that the claim has expired or has been explicitly revoked. // -// * IDPCommunicationError +// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" // The request could not be fulfilled because the non-AWS identity provider // (IDP) that was asked to verify the incoming identity token could not be reached. // This is often a transient error caused by network conditions. Retry the request // a limited number of times so that you don't exceed the request rate. If the // error persists, the non-AWS identity provider might be down or not responding. // -// * InvalidIdentityToken +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" // The web identity token that was passed could not be validated by AWS. Get // a new identity token from the identity provider and then retry the request. // -// * ExpiredTokenException +// * ErrCodeExpiredTokenException "ExpiredTokenException" // The web identity token that was passed is expired or is not valid. Get a // new identity token from the identity provider and then retry the request. // -// * RegionDisabledException +// * ErrCodeRegionDisabledException "RegionDisabledException" // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating @@ -519,27 +546,41 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { req, out := c.AssumeRoleWithWebIdentityRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithWebIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" // DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the // client's request for the DecodeAuthorizationMessage operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. // -// See DecodeAuthorizationMessage for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DecodeAuthorizationMessage method directly -// instead. +// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DecodeAuthorizationMessageRequest method. // req, resp := client.DecodeAuthorizationMessageRequest(params) @@ -609,7 +650,7 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // API operation DecodeAuthorizationMessage for usage and error information. // // Returned Error Codes: -// * InvalidAuthorizationMessageException +// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" // The error returned if the message passed to DecodeAuthorizationMessage was // invalid. This can happen if the token contains invalid characters, such as // linebreaks. @@ -617,27 +658,41 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { req, out := c.DecodeAuthorizationMessageRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of +// the ability to pass a context and additional request options. +// +// See DecodeAuthorizationMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetCallerIdentity = "GetCallerIdentity" // GetCallerIdentityRequest generates a "aws/request.Request" representing the // client's request for the GetCallerIdentity operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetCallerIdentity for usage and error information. +// See GetCallerIdentity for more information on using the GetCallerIdentity +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetCallerIdentity method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetCallerIdentityRequest method. // req, resp := client.GetCallerIdentityRequest(params) @@ -678,27 +733,41 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { req, out := c.GetCallerIdentityRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCallerIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetFederationToken = "GetFederationToken" // GetFederationTokenRequest generates a "aws/request.Request" representing the // client's request for the GetFederationToken operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetFederationToken for usage and error information. +// See GetFederationToken for more information on using the GetFederationToken +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetFederationToken method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetFederationTokenRequest method. // req, resp := client.GetFederationTokenRequest(params) @@ -814,16 +883,16 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // API operation GetFederationToken for usage and error information. // // Returned Error Codes: -// * MalformedPolicyDocument +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" // The request was rejected because the policy document was malformed. The error // message describes the specific error. // -// * PackedPolicyTooLarge +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" // The request was rejected because the policy document was too large. The error // message describes how big the policy document is, in packed form, as a percentage // of what the API allows. // -// * RegionDisabledException +// * ErrCodeRegionDisabledException "RegionDisabledException" // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating @@ -833,27 +902,41 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { req, out := c.GetFederationTokenRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetFederationTokenWithContext is the same as GetFederationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetFederationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetSessionToken = "GetSessionToken" // GetSessionTokenRequest generates a "aws/request.Request" representing the // client's request for the GetSessionToken operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetSessionToken for usage and error information. +// See GetSessionToken for more information on using the GetSessionToken +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetSessionToken method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetSessionTokenRequest method. // req, resp := client.GetSessionTokenRequest(params) @@ -937,7 +1020,7 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // API operation GetSessionToken for usage and error information. // // Returned Error Codes: -// * RegionDisabledException +// * ErrCodeRegionDisabledException "RegionDisabledException" // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating @@ -947,8 +1030,23 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { req, out := c.GetSessionTokenRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetSessionTokenWithContext is the same as GetSessionToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetSessionToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest @@ -980,7 +1078,7 @@ type AssumeRoleInput struct { // // The regex used to validated this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@:\/- + // also include underscores or any of the following characters: =,.@:/- ExternalId *string `min:"2" type:"string"` // An IAM policy in JSON format. @@ -2164,9 +2262,9 @@ type GetSessionTokenInput struct { // You can find the device for an IAM user by going to the AWS Management Console // and viewing the user's security credentials. // - // The regex used to validate this parameter is a string of characters consisting + // The regex used to validated this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- + // also include underscores or any of the following characters: =,.@:/- SerialNumber *string `min:"9" type:"string"` // The value provided by the MFA device, if MFA is required. If any policy requires diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go new file mode 100644 index 000000000000..a43fa80555c7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -0,0 +1,72 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sts provides the client and types for making API +// requests to AWS Security Token Service. +// +// The AWS Security Token Service (STS) is a web service that enables you to +// request temporary, limited-privilege credentials for AWS Identity and Access +// Management (IAM) users or for users that you authenticate (federated users). +// This guide provides descriptions of the STS API. For more detailed information +// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// As an alternative to using the API, you can use one of the AWS SDKs, which +// consist of libraries and sample code for various programming languages and +// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient +// way to create programmatic access to STS. For example, the SDKs take care +// of cryptographically signing requests, managing errors, and retrying requests +// automatically. For information about the AWS SDKs, including how to download +// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). +// +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about the Query API, +// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in Using IAM. For information about using security tokens with other AWS +// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// in the IAM User Guide. +// +// If you're new to AWS and need additional technical information about a specific +// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ +// (http://aws.amazon.com/documentation/). +// +// Endpoints +// +// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com +// that maps to the US East (N. Virginia) region. Additional regions are available +// and are activated by default. For more information, see Activating and Deactivating +// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) +// in the AWS General Reference. +// +// Recording API requests +// +// STS supports AWS CloudTrail, which is a service that records AWS calls for +// your AWS account and delivers log files to an Amazon S3 bucket. By using +// information collected by CloudTrail, you can determine what requests were +// successfully made to STS, who made the request, when it was made, and so +// on. To learn more about CloudTrail, including how to turn it on and find +// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. +// +// See sts package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ +// +// Using the Client +// +// To AWS Security Token Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Security Token Service client STS for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New +package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go new file mode 100644 index 000000000000..e24884ef371a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -0,0 +1,73 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +const ( + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // The web identity token that was passed is expired or is not valid. Get a + // new identity token from the identity provider and then retry the request. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeIDPCommunicationErrorException for service response error code + // "IDPCommunicationError". + // + // The request could not be fulfilled because the non-AWS identity provider + // (IDP) that was asked to verify the incoming identity token could not be reached. + // This is often a transient error caused by network conditions. Retry the request + // a limited number of times so that you don't exceed the request rate. If the + // error persists, the non-AWS identity provider might be down or not responding. + ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" + + // ErrCodeIDPRejectedClaimException for service response error code + // "IDPRejectedClaim". + // + // The identity provider (IdP) reported that authentication failed. This might + // be because the claim is invalid. + // + // If this error is returned for the AssumeRoleWithWebIdentity operation, it + // can also mean that the claim has expired or has been explicitly revoked. + ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" + + // ErrCodeInvalidAuthorizationMessageException for service response error code + // "InvalidAuthorizationMessageException". + // + // The error returned if the message passed to DecodeAuthorizationMessage was + // invalid. This can happen if the token contains invalid characters, such as + // linebreaks. + ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" + + // ErrCodeInvalidIdentityTokenException for service response error code + // "InvalidIdentityToken". + // + // The web identity token that was passed could not be validated by AWS. Get + // a new identity token from the identity provider and then retry the request. + ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocument". + // + // The request was rejected because the policy document was malformed. The error + // message describes the specific error. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" + + // ErrCodePackedPolicyTooLargeException for service response error code + // "PackedPolicyTooLarge". + // + // The request was rejected because the policy document was too large. The error + // message describes how big the policy document is, in packed form, as a percentage + // of what the API allows. + ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" + + // ErrCodeRegionDisabledException for service response error code + // "RegionDisabledException". + // + // STS is not activated in the requested region for the account that is being + // asked to generate credentials. The account administrator must use the IAM + // console to activate STS in that region. For more information, see Activating + // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodeRegionDisabledException = "RegionDisabledException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index 9c4bfb838b57..1ee5839e0462 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -1,4 +1,4 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package sts @@ -11,54 +11,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/query" ) -// The AWS Security Token Service (STS) is a web service that enables you to -// request temporary, limited-privilege credentials for AWS Identity and Access -// Management (IAM) users or for users that you authenticate (federated users). -// This guide provides descriptions of the STS API. For more detailed information -// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// STS provides the API operation methods for making requests to +// AWS Security Token Service. See this package's package overview docs +// for details on the service. // -// As an alternative to using the API, you can use one of the AWS SDKs, which -// consist of libraries and sample code for various programming languages and -// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient -// way to create programmatic access to STS. For example, the SDKs take care -// of cryptographically signing requests, managing errors, and retrying requests -// automatically. For information about the AWS SDKs, including how to download -// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). -// -// For information about setting up signatures and authorization through the -// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) -// in the AWS General Reference. For general information about the Query API, -// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) -// in Using IAM. For information about using security tokens with other AWS -// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) -// in the IAM User Guide. -// -// If you're new to AWS and need additional technical information about a specific -// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ -// (http://aws.amazon.com/documentation/). -// -// Endpoints -// -// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com -// that maps to the US East (N. Virginia) region. Additional regions are available -// and are activated by default. For more information, see Activating and Deactivating -// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) -// in the AWS General Reference. -// -// Recording API requests -// -// STS supports AWS CloudTrail, which is a service that records AWS calls for -// your AWS account and delivers log files to an Amazon S3 bucket. By using -// information collected by CloudTrail, you can determine what requests were -// successfully made to STS, who made the request, when it was made, and so -// on. To learn more about CloudTrail, including how to turn it on and find -// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). -// The service client's operations are safe to be used concurrently. -// It is not safe to mutate any of the client's properties though. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 +// STS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. type STS struct { *client.Client } diff --git a/vendor/github.com/container-storage-interface/spec/LICENSE b/vendor/github.com/container-storage-interface/spec/LICENSE new file mode 100644 index 000000000000..8dada3edaf50 --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go b/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go new file mode 100644 index 000000000000..6158a4870cc8 --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go @@ -0,0 +1,2525 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: csi.proto + +/* +Package csi is a generated protocol buffer package. + +It is generated from these files: + csi.proto + +It has these top-level messages: + GetSupportedVersionsRequest + GetSupportedVersionsResponse + Version + GetPluginInfoRequest + GetPluginInfoResponse + CreateVolumeRequest + CreateVolumeResponse + VolumeCapability + CapacityRange + VolumeInfo + DeleteVolumeRequest + DeleteVolumeResponse + ControllerPublishVolumeRequest + ControllerPublishVolumeResponse + ControllerUnpublishVolumeRequest + ControllerUnpublishVolumeResponse + ValidateVolumeCapabilitiesRequest + ValidateVolumeCapabilitiesResponse + ListVolumesRequest + ListVolumesResponse + GetCapacityRequest + GetCapacityResponse + ControllerProbeRequest + ControllerProbeResponse + ControllerGetCapabilitiesRequest + ControllerGetCapabilitiesResponse + ControllerServiceCapability + NodePublishVolumeRequest + NodePublishVolumeResponse + NodeUnpublishVolumeRequest + NodeUnpublishVolumeResponse + GetNodeIDRequest + GetNodeIDResponse + NodeProbeRequest + NodeProbeResponse + NodeGetCapabilitiesRequest + NodeGetCapabilitiesResponse + NodeServiceCapability +*/ +package csi + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type VolumeCapability_AccessMode_Mode int32 + +const ( + VolumeCapability_AccessMode_UNKNOWN VolumeCapability_AccessMode_Mode = 0 + // Can be published as read/write at one node at a time. + VolumeCapability_AccessMode_SINGLE_NODE_WRITER VolumeCapability_AccessMode_Mode = 1 + // Can be published as readonly at one node at a time. + VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 2 + // Can be published as readonly at multiple nodes simultaneously. + VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 3 + // Can be published at multiple nodes simultaneously. Only one of + // the node can be used as read/write. The rest will be readonly. + VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER VolumeCapability_AccessMode_Mode = 4 + // Can be published as read/write at multiple nodes + // simultaneously. + VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER VolumeCapability_AccessMode_Mode = 5 +) + +var VolumeCapability_AccessMode_Mode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SINGLE_NODE_WRITER", + 2: "SINGLE_NODE_READER_ONLY", + 3: "MULTI_NODE_READER_ONLY", + 4: "MULTI_NODE_SINGLE_WRITER", + 5: "MULTI_NODE_MULTI_WRITER", +} +var VolumeCapability_AccessMode_Mode_value = map[string]int32{ + "UNKNOWN": 0, + "SINGLE_NODE_WRITER": 1, + "SINGLE_NODE_READER_ONLY": 2, + "MULTI_NODE_READER_ONLY": 3, + "MULTI_NODE_SINGLE_WRITER": 4, + "MULTI_NODE_MULTI_WRITER": 5, +} + +func (x VolumeCapability_AccessMode_Mode) String() string { + return proto.EnumName(VolumeCapability_AccessMode_Mode_name, int32(x)) +} +func (VolumeCapability_AccessMode_Mode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{7, 2, 0} +} + +type ControllerServiceCapability_RPC_Type int32 + +const ( + ControllerServiceCapability_RPC_UNKNOWN ControllerServiceCapability_RPC_Type = 0 + ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME ControllerServiceCapability_RPC_Type = 1 + ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME ControllerServiceCapability_RPC_Type = 2 + ControllerServiceCapability_RPC_LIST_VOLUMES ControllerServiceCapability_RPC_Type = 3 + ControllerServiceCapability_RPC_GET_CAPACITY ControllerServiceCapability_RPC_Type = 4 +) + +var ControllerServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CREATE_DELETE_VOLUME", + 2: "PUBLISH_UNPUBLISH_VOLUME", + 3: "LIST_VOLUMES", + 4: "GET_CAPACITY", +} +var ControllerServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, + "CREATE_DELETE_VOLUME": 1, + "PUBLISH_UNPUBLISH_VOLUME": 2, + "LIST_VOLUMES": 3, + "GET_CAPACITY": 4, +} + +func (x ControllerServiceCapability_RPC_Type) String() string { + return proto.EnumName(ControllerServiceCapability_RPC_Type_name, int32(x)) +} +func (ControllerServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{26, 0, 0} +} + +type NodeServiceCapability_RPC_Type int32 + +const ( + NodeServiceCapability_RPC_UNKNOWN NodeServiceCapability_RPC_Type = 0 +) + +var NodeServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", +} +var NodeServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, +} + +func (x NodeServiceCapability_RPC_Type) String() string { + return proto.EnumName(NodeServiceCapability_RPC_Type_name, int32(x)) +} +func (NodeServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{37, 0, 0} +} + +// ////// +// ////// +type GetSupportedVersionsRequest struct { +} + +func (m *GetSupportedVersionsRequest) Reset() { *m = GetSupportedVersionsRequest{} } +func (m *GetSupportedVersionsRequest) String() string { return proto.CompactTextString(m) } +func (*GetSupportedVersionsRequest) ProtoMessage() {} +func (*GetSupportedVersionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type GetSupportedVersionsResponse struct { + // All the CSI versions that the Plugin supports. This field is + // REQUIRED. + SupportedVersions []*Version `protobuf:"bytes,1,rep,name=supported_versions,json=supportedVersions" json:"supported_versions,omitempty"` +} + +func (m *GetSupportedVersionsResponse) Reset() { *m = GetSupportedVersionsResponse{} } +func (m *GetSupportedVersionsResponse) String() string { return proto.CompactTextString(m) } +func (*GetSupportedVersionsResponse) ProtoMessage() {} +func (*GetSupportedVersionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *GetSupportedVersionsResponse) GetSupportedVersions() []*Version { + if m != nil { + return m.SupportedVersions + } + return nil +} + +// Specifies a version in Semantic Version 2.0 format. +// (http://semver.org/spec/v2.0.0.html) +type Version struct { + Major uint32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor uint32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch uint32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Version) GetMajor() uint32 { + if m != nil { + return m.Major + } + return 0 +} + +func (m *Version) GetMinor() uint32 { + if m != nil { + return m.Minor + } + return 0 +} + +func (m *Version) GetPatch() uint32 { + if m != nil { + return m.Patch + } + return 0 +} + +// ////// +// ////// +type GetPluginInfoRequest struct { + // The API version assumed by the CO. This is a REQUIRED field. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` +} + +func (m *GetPluginInfoRequest) Reset() { *m = GetPluginInfoRequest{} } +func (m *GetPluginInfoRequest) String() string { return proto.CompactTextString(m) } +func (*GetPluginInfoRequest) ProtoMessage() {} +func (*GetPluginInfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *GetPluginInfoRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +type GetPluginInfoResponse struct { + // The name MUST follow reverse domain name notation format + // (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). + // It SHOULD include the plugin's host company name and the plugin + // name, to minimize the possibility of collisions. It MUST be 63 + // characters or less, beginning and ending with an alphanumeric + // character ([a-z0-9A-Z]) with dashes (-), underscores (_), + // dots (.), and alphanumerics between. This field is REQUIRED. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // This field is REQUIRED. Value of this field is opaque to the CO. + VendorVersion string `protobuf:"bytes,2,opt,name=vendor_version,json=vendorVersion" json:"vendor_version,omitempty"` + // This field is OPTIONAL. Values are opaque to the CO. + Manifest map[string]string `protobuf:"bytes,3,rep,name=manifest" json:"manifest,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *GetPluginInfoResponse) Reset() { *m = GetPluginInfoResponse{} } +func (m *GetPluginInfoResponse) String() string { return proto.CompactTextString(m) } +func (*GetPluginInfoResponse) ProtoMessage() {} +func (*GetPluginInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *GetPluginInfoResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetPluginInfoResponse) GetVendorVersion() string { + if m != nil { + return m.VendorVersion + } + return "" +} + +func (m *GetPluginInfoResponse) GetManifest() map[string]string { + if m != nil { + return m.Manifest + } + return nil +} + +// ////// +// ////// +type CreateVolumeRequest struct { + // The API version assumed by the CO. This field is REQUIRED. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // The suggested name for the storage space. This field is REQUIRED. + // It serves two purposes: + // 1) Idempotency - This name is generated by the CO to achieve + // idempotency. If `CreateVolume` fails, the volume may or may not + // be provisioned. In this case, the CO may call `CreateVolume` + // again, with the same name, to ensure the volume exists. The + // Plugin should ensure that multiple `CreateVolume` calls for the + // same name do not result in more than one piece of storage + // provisioned corresponding to that name. If a Plugin is unable to + // enforce idempotency, the CO's error recovery logic could result + // in multiple (unused) volumes being provisioned. + // 2) Suggested name - Some storage systems allow callers to specify + // an identifier by which to refer to the newly provisioned + // storage. If a storage system supports this, it can optionally + // use this name as the identifier for the new volume. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // This field is OPTIONAL. This allows the CO to specify the capacity + // requirement of the volume to be provisioned. If not specified, the + // Plugin MAY choose an implementation-defined capacity range. + CapacityRange *CapacityRange `protobuf:"bytes,3,opt,name=capacity_range,json=capacityRange" json:"capacity_range,omitempty"` + // The capabilities that the provisioned volume MUST have: the Plugin + // MUST provision a volume that could satisfy ALL of the + // capabilities specified in this list. The Plugin MUST assume that + // the CO MAY use the provisioned volume later with ANY of the + // capabilities specified in this list. This also enables the CO to do + // early validation: if ANY of the specified volume capabilities are + // not supported by the Plugin, the call SHALL fail. This field is + // REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,4,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + Parameters map[string]string `protobuf:"bytes,5,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // End user credentials used to authenticate/authorize volume creation + // request. + // This field contains credential data, for example username and + // password. Each key must consist of alphanumeric characters, '-', + // '_' or '.'. Each value MUST contain a valid string. An SP MAY + // choose to accept binary (non-string) data by using a binary-to-text + // encoding scheme, like base64. An SP SHALL advertise the + // requirements for credentials in documentation. COs SHALL permit + // users to pass through the required credentials. This information is + // sensitive and MUST be treated as such (not logged, etc.) by the CO. + // This field is OPTIONAL. + UserCredentials map[string]string `protobuf:"bytes,6,rep,name=user_credentials,json=userCredentials" json:"user_credentials,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *CreateVolumeRequest) Reset() { *m = CreateVolumeRequest{} } +func (m *CreateVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*CreateVolumeRequest) ProtoMessage() {} +func (*CreateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *CreateVolumeRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *CreateVolumeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateVolumeRequest) GetCapacityRange() *CapacityRange { + if m != nil { + return m.CapacityRange + } + return nil +} + +func (m *CreateVolumeRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *CreateVolumeRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *CreateVolumeRequest) GetUserCredentials() map[string]string { + if m != nil { + return m.UserCredentials + } + return nil +} + +type CreateVolumeResponse struct { + // Contains all attributes of the newly created volume that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the volume. This field is REQUIRED. + VolumeInfo *VolumeInfo `protobuf:"bytes,1,opt,name=volume_info,json=volumeInfo" json:"volume_info,omitempty"` +} + +func (m *CreateVolumeResponse) Reset() { *m = CreateVolumeResponse{} } +func (m *CreateVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*CreateVolumeResponse) ProtoMessage() {} +func (*CreateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *CreateVolumeResponse) GetVolumeInfo() *VolumeInfo { + if m != nil { + return m.VolumeInfo + } + return nil +} + +// Specify a capability of a volume. +type VolumeCapability struct { + // Specifies what API the volume will be accessed using. One of the + // following fields MUST be specified. + // + // Types that are valid to be assigned to AccessType: + // *VolumeCapability_Block + // *VolumeCapability_Mount + AccessType isVolumeCapability_AccessType `protobuf_oneof:"access_type"` + // This is a REQUIRED field. + AccessMode *VolumeCapability_AccessMode `protobuf:"bytes,3,opt,name=access_mode,json=accessMode" json:"access_mode,omitempty"` +} + +func (m *VolumeCapability) Reset() { *m = VolumeCapability{} } +func (m *VolumeCapability) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability) ProtoMessage() {} +func (*VolumeCapability) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +type isVolumeCapability_AccessType interface { + isVolumeCapability_AccessType() +} + +type VolumeCapability_Block struct { + Block *VolumeCapability_BlockVolume `protobuf:"bytes,1,opt,name=block,oneof"` +} +type VolumeCapability_Mount struct { + Mount *VolumeCapability_MountVolume `protobuf:"bytes,2,opt,name=mount,oneof"` +} + +func (*VolumeCapability_Block) isVolumeCapability_AccessType() {} +func (*VolumeCapability_Mount) isVolumeCapability_AccessType() {} + +func (m *VolumeCapability) GetAccessType() isVolumeCapability_AccessType { + if m != nil { + return m.AccessType + } + return nil +} + +func (m *VolumeCapability) GetBlock() *VolumeCapability_BlockVolume { + if x, ok := m.GetAccessType().(*VolumeCapability_Block); ok { + return x.Block + } + return nil +} + +func (m *VolumeCapability) GetMount() *VolumeCapability_MountVolume { + if x, ok := m.GetAccessType().(*VolumeCapability_Mount); ok { + return x.Mount + } + return nil +} + +func (m *VolumeCapability) GetAccessMode() *VolumeCapability_AccessMode { + if m != nil { + return m.AccessMode + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*VolumeCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _VolumeCapability_OneofMarshaler, _VolumeCapability_OneofUnmarshaler, _VolumeCapability_OneofSizer, []interface{}{ + (*VolumeCapability_Block)(nil), + (*VolumeCapability_Mount)(nil), + } +} + +func _VolumeCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*VolumeCapability) + // access_type + switch x := m.AccessType.(type) { + case *VolumeCapability_Block: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Block); err != nil { + return err + } + case *VolumeCapability_Mount: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Mount); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("VolumeCapability.AccessType has unexpected type %T", x) + } + return nil +} + +func _VolumeCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*VolumeCapability) + switch tag { + case 1: // access_type.block + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VolumeCapability_BlockVolume) + err := b.DecodeMessage(msg) + m.AccessType = &VolumeCapability_Block{msg} + return true, err + case 2: // access_type.mount + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VolumeCapability_MountVolume) + err := b.DecodeMessage(msg) + m.AccessType = &VolumeCapability_Mount{msg} + return true, err + default: + return false, nil + } +} + +func _VolumeCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*VolumeCapability) + // access_type + switch x := m.AccessType.(type) { + case *VolumeCapability_Block: + s := proto.Size(x.Block) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *VolumeCapability_Mount: + s := proto.Size(x.Mount) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Indicate that the volume will be accessed via the block device API. +type VolumeCapability_BlockVolume struct { +} + +func (m *VolumeCapability_BlockVolume) Reset() { *m = VolumeCapability_BlockVolume{} } +func (m *VolumeCapability_BlockVolume) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_BlockVolume) ProtoMessage() {} +func (*VolumeCapability_BlockVolume) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } + +// Indicate that the volume will be accessed via the filesystem API. +type VolumeCapability_MountVolume struct { + // The filesystem type. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + FsType string `protobuf:"bytes,1,opt,name=fs_type,json=fsType" json:"fs_type,omitempty"` + // The mount options that can be used for the volume. This field is + // OPTIONAL. `mount_flags` MAY contain sensitive information. + // Therefore, the CO and the Plugin MUST NOT leak this information + // to untrusted entities. The total size of this repeated field + // SHALL NOT exceed 4 KiB. + MountFlags []string `protobuf:"bytes,2,rep,name=mount_flags,json=mountFlags" json:"mount_flags,omitempty"` +} + +func (m *VolumeCapability_MountVolume) Reset() { *m = VolumeCapability_MountVolume{} } +func (m *VolumeCapability_MountVolume) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_MountVolume) ProtoMessage() {} +func (*VolumeCapability_MountVolume) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 1} } + +func (m *VolumeCapability_MountVolume) GetFsType() string { + if m != nil { + return m.FsType + } + return "" +} + +func (m *VolumeCapability_MountVolume) GetMountFlags() []string { + if m != nil { + return m.MountFlags + } + return nil +} + +// Specify how a volume can be accessed. +type VolumeCapability_AccessMode struct { + // This field is REQUIRED. + Mode VolumeCapability_AccessMode_Mode `protobuf:"varint,1,opt,name=mode,enum=csi.VolumeCapability_AccessMode_Mode" json:"mode,omitempty"` +} + +func (m *VolumeCapability_AccessMode) Reset() { *m = VolumeCapability_AccessMode{} } +func (m *VolumeCapability_AccessMode) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_AccessMode) ProtoMessage() {} +func (*VolumeCapability_AccessMode) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 2} } + +func (m *VolumeCapability_AccessMode) GetMode() VolumeCapability_AccessMode_Mode { + if m != nil { + return m.Mode + } + return VolumeCapability_AccessMode_UNKNOWN +} + +// The capacity of the storage space in bytes. To specify an exact size, +// `required_bytes` and `limit_bytes` can be set to the same value. At +// least one of the these fields MUST be specified. +type CapacityRange struct { + // Volume must be at least this big. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + RequiredBytes uint64 `protobuf:"varint,1,opt,name=required_bytes,json=requiredBytes" json:"required_bytes,omitempty"` + // Volume must not be bigger than this. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + LimitBytes uint64 `protobuf:"varint,2,opt,name=limit_bytes,json=limitBytes" json:"limit_bytes,omitempty"` +} + +func (m *CapacityRange) Reset() { *m = CapacityRange{} } +func (m *CapacityRange) String() string { return proto.CompactTextString(m) } +func (*CapacityRange) ProtoMessage() {} +func (*CapacityRange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *CapacityRange) GetRequiredBytes() uint64 { + if m != nil { + return m.RequiredBytes + } + return 0 +} + +func (m *CapacityRange) GetLimitBytes() uint64 { + if m != nil { + return m.LimitBytes + } + return 0 +} + +// The information about a provisioned volume. +type VolumeInfo struct { + // The capacity of the volume in bytes. This field is OPTIONAL. If not + // set (value of 0), it indicates that the capacity of the volume is + // unknown (e.g., NFS share). + CapacityBytes uint64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes" json:"capacity_bytes,omitempty"` + // Contains identity information for the created volume. This field is + // REQUIRED. The identity information will be used by the CO in + // subsequent calls to refer to the provisioned volume. + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + // Attributes reflect static properties of a volume and MUST be passed + // to volume validation and publishing calls. + // Attributes SHALL be opaque to a CO. Attributes SHALL NOT be mutable + // and SHALL be safe for the CO to cache. Attributes SHOULD NOT + // contain sensitive information. Attributes MAY NOT uniquely identify + // a volume. A volume uniquely identified by `id` SHALL always report + // the same attributes. This field is OPTIONAL and when present MUST + // be passed to volume validation and publishing calls. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *VolumeInfo) Reset() { *m = VolumeInfo{} } +func (m *VolumeInfo) String() string { return proto.CompactTextString(m) } +func (*VolumeInfo) ProtoMessage() {} +func (*VolumeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *VolumeInfo) GetCapacityBytes() uint64 { + if m != nil { + return m.CapacityBytes + } + return 0 +} + +func (m *VolumeInfo) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *VolumeInfo) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +// ////// +// ////// +type DeleteVolumeRequest struct { + // The API version assumed by the CO. This field is REQUIRED. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // The ID of the volume to be deprovisioned. + // This field is REQUIRED. + VolumeId string `protobuf:"bytes,2,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // End user credentials used to authenticate/authorize volume deletion + // request. + // This field contains credential data, for example username and + // password. Each key must consist of alphanumeric characters, '-', + // '_' or '.'. Each value MUST contain a valid string. An SP MAY + // choose to accept binary (non-string) data by using a binary-to-text + // encoding scheme, like base64. An SP SHALL advertise the + // requirements for credentials in documentation. COs SHALL permit + // users to pass through the required credentials. This information is + // sensitive and MUST be treated as such (not logged, etc.) by the CO. + // This field is OPTIONAL. + UserCredentials map[string]string `protobuf:"bytes,3,rep,name=user_credentials,json=userCredentials" json:"user_credentials,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *DeleteVolumeRequest) Reset() { *m = DeleteVolumeRequest{} } +func (m *DeleteVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteVolumeRequest) ProtoMessage() {} +func (*DeleteVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *DeleteVolumeRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *DeleteVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *DeleteVolumeRequest) GetUserCredentials() map[string]string { + if m != nil { + return m.UserCredentials + } + return nil +} + +type DeleteVolumeResponse struct { +} + +func (m *DeleteVolumeResponse) Reset() { *m = DeleteVolumeResponse{} } +func (m *DeleteVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteVolumeResponse) ProtoMessage() {} +func (*DeleteVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +// ////// +// ////// +type ControllerPublishVolumeRequest struct { + // The API version assumed by the CO. This field is REQUIRED. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // The ID of the volume to be used on a node. + // This field is REQUIRED. + VolumeId string `protobuf:"bytes,2,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The ID of the node. This field is REQUIRED. The CO SHALL set this + // field to match the node ID returned by `GetNodeID`. + NodeId string `protobuf:"bytes,3,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` + // Whether to publish the volume in readonly mode. This field is + // REQUIRED. + Readonly bool `protobuf:"varint,5,opt,name=readonly" json:"readonly,omitempty"` + // End user credentials used to authenticate/authorize controller + // publish request. + // This field contains credential data, for example username and + // password. Each key must consist of alphanumeric characters, '-', + // '_' or '.'. Each value MUST contain a valid string. An SP MAY + // choose to accept binary (non-string) data by using a binary-to-text + // encoding scheme, like base64. An SP SHALL advertise the + // requirements for credentials in documentation. COs SHALL permit + // users to pass through the required credentials. This information is + // sensitive and MUST be treated as such (not logged, etc.) by the CO. + // This field is OPTIONAL. + UserCredentials map[string]string `protobuf:"bytes,6,rep,name=user_credentials,json=userCredentials" json:"user_credentials,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Attributes of the volume to be used on a node. This field is + // OPTIONAL and MUST match the attributes of the VolumeInfo identified + // by `volume_id`. + VolumeAttributes map[string]string `protobuf:"bytes,7,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *ControllerPublishVolumeRequest) Reset() { *m = ControllerPublishVolumeRequest{} } +func (m *ControllerPublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerPublishVolumeRequest) ProtoMessage() {} +func (*ControllerPublishVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *ControllerPublishVolumeRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *ControllerPublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ControllerPublishVolumeRequest) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *ControllerPublishVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *ControllerPublishVolumeRequest) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *ControllerPublishVolumeRequest) GetUserCredentials() map[string]string { + if m != nil { + return m.UserCredentials + } + return nil +} + +func (m *ControllerPublishVolumeRequest) GetVolumeAttributes() map[string]string { + if m != nil { + return m.VolumeAttributes + } + return nil +} + +type ControllerPublishVolumeResponse struct { + // The SP specific information that will be passed to the Plugin in + // the subsequent `NodePublishVolume` call for the given volume. + // This information is opaque to the CO. This field is OPTIONAL. + PublishVolumeInfo map[string]string `protobuf:"bytes,1,rep,name=publish_volume_info,json=publishVolumeInfo" json:"publish_volume_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *ControllerPublishVolumeResponse) Reset() { *m = ControllerPublishVolumeResponse{} } +func (m *ControllerPublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerPublishVolumeResponse) ProtoMessage() {} +func (*ControllerPublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{13} +} + +func (m *ControllerPublishVolumeResponse) GetPublishVolumeInfo() map[string]string { + if m != nil { + return m.PublishVolumeInfo + } + return nil +} + +// ////// +// ////// +type ControllerUnpublishVolumeRequest struct { + // The API version assumed by the CO. This field is REQUIRED. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,2,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The ID of the node. This field is OPTIONAL. The CO SHOULD set this + // field to match the node ID returned by `GetNodeID` or leave it + // unset. If the value is set, the SP MUST unpublish the volume from + // the specified node. If the value is unset, the SP MUST unpublish + // the volume from all nodes it is published to. + NodeId string `protobuf:"bytes,3,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` + // End user credentials used to authenticate/authorize controller + // unpublish request. + // This field contains credential data, for example username and + // password. Each key must consist of alphanumeric characters, '-', + // '_' or '.'. Each value MUST contain a valid string. An SP MAY + // choose to accept binary (non-string) data by using a binary-to-text + // encoding scheme, like base64. An SP SHALL advertise the + // requirements for credentials in documentation. COs SHALL permit + // users to pass through the required credentials. This information is + // sensitive and MUST be treated as such (not logged, etc.) by the CO. + // This field is OPTIONAL. + UserCredentials map[string]string `protobuf:"bytes,4,rep,name=user_credentials,json=userCredentials" json:"user_credentials,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *ControllerUnpublishVolumeRequest) Reset() { *m = ControllerUnpublishVolumeRequest{} } +func (m *ControllerUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerUnpublishVolumeRequest) ProtoMessage() {} +func (*ControllerUnpublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{14} +} + +func (m *ControllerUnpublishVolumeRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *ControllerUnpublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ControllerUnpublishVolumeRequest) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *ControllerUnpublishVolumeRequest) GetUserCredentials() map[string]string { + if m != nil { + return m.UserCredentials + } + return nil +} + +type ControllerUnpublishVolumeResponse struct { +} + +func (m *ControllerUnpublishVolumeResponse) Reset() { *m = ControllerUnpublishVolumeResponse{} } +func (m *ControllerUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerUnpublishVolumeResponse) ProtoMessage() {} +func (*ControllerUnpublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{15} +} + +// ////// +// ////// +type ValidateVolumeCapabilitiesRequest struct { + // The API version assumed by the CO. This is a REQUIRED field. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // The ID of the volume to check. This field is REQUIRED. + VolumeId string `protobuf:"bytes,2,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The capabilities that the CO wants to check for the volume. This + // call SHALL return "supported" only if all the volume capabilities + // specified below are supported. This field is REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,3,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` + // Attributes of the volume to check. This field is OPTIONAL and MUST + // match the attributes of the VolumeInfo identified by `volume_id`. + VolumeAttributes map[string]string `protobuf:"bytes,4,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *ValidateVolumeCapabilitiesRequest) Reset() { *m = ValidateVolumeCapabilitiesRequest{} } +func (m *ValidateVolumeCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*ValidateVolumeCapabilitiesRequest) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{16} +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeAttributes() map[string]string { + if m != nil { + return m.VolumeAttributes + } + return nil +} + +type ValidateVolumeCapabilitiesResponse struct { + // True if the Plugin supports the specified capabilities for the + // given volume. This field is REQUIRED. + Supported bool `protobuf:"varint,1,opt,name=supported" json:"supported,omitempty"` + // Message to the CO if `supported` above is false. This field is + // OPTIONAL. + // An empty string is equal to an unspecified field value. + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` +} + +func (m *ValidateVolumeCapabilitiesResponse) Reset() { *m = ValidateVolumeCapabilitiesResponse{} } +func (m *ValidateVolumeCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*ValidateVolumeCapabilitiesResponse) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{17} +} + +func (m *ValidateVolumeCapabilitiesResponse) GetSupported() bool { + if m != nil { + return m.Supported + } + return false +} + +func (m *ValidateVolumeCapabilitiesResponse) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +// ////// +// ////// +type ListVolumesRequest struct { + // The API version assumed by the CO. This field is REQUIRED. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListVolumes` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + MaxEntries uint32 `protobuf:"varint,2,opt,name=max_entries,json=maxEntries" json:"max_entries,omitempty"` + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListVolumes` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + StartingToken string `protobuf:"bytes,3,opt,name=starting_token,json=startingToken" json:"starting_token,omitempty"` +} + +func (m *ListVolumesRequest) Reset() { *m = ListVolumesRequest{} } +func (m *ListVolumesRequest) String() string { return proto.CompactTextString(m) } +func (*ListVolumesRequest) ProtoMessage() {} +func (*ListVolumesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *ListVolumesRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *ListVolumesRequest) GetMaxEntries() uint32 { + if m != nil { + return m.MaxEntries + } + return 0 +} + +func (m *ListVolumesRequest) GetStartingToken() string { + if m != nil { + return m.StartingToken + } + return "" +} + +type ListVolumesResponse struct { + Entries []*ListVolumesResponse_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` + // This token allows you to get the next page of entries for + // `ListVolumes` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListVolumes` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken" json:"next_token,omitempty"` +} + +func (m *ListVolumesResponse) Reset() { *m = ListVolumesResponse{} } +func (m *ListVolumesResponse) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse) ProtoMessage() {} +func (*ListVolumesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *ListVolumesResponse) GetEntries() []*ListVolumesResponse_Entry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *ListVolumesResponse) GetNextToken() string { + if m != nil { + return m.NextToken + } + return "" +} + +type ListVolumesResponse_Entry struct { + VolumeInfo *VolumeInfo `protobuf:"bytes,1,opt,name=volume_info,json=volumeInfo" json:"volume_info,omitempty"` +} + +func (m *ListVolumesResponse_Entry) Reset() { *m = ListVolumesResponse_Entry{} } +func (m *ListVolumesResponse_Entry) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse_Entry) ProtoMessage() {} +func (*ListVolumesResponse_Entry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} } + +func (m *ListVolumesResponse_Entry) GetVolumeInfo() *VolumeInfo { + if m != nil { + return m.VolumeInfo + } + return nil +} + +// ////// +// ////// +type GetCapacityRequest struct { + // The API version assumed by the CO. This is a REQUIRED field. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that satisfy ALL of the + // specified `volume_capabilities`. These are the same + // `volume_capabilities` the CO will use in `CreateVolumeRequest`. + // This field is OPTIONAL. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,2,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes with the given Plugin + // specific `parameters`. These are the same `parameters` the CO will + // use in `CreateVolumeRequest`. This field is OPTIONAL. + Parameters map[string]string `protobuf:"bytes,3,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *GetCapacityRequest) Reset() { *m = GetCapacityRequest{} } +func (m *GetCapacityRequest) String() string { return proto.CompactTextString(m) } +func (*GetCapacityRequest) ProtoMessage() {} +func (*GetCapacityRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *GetCapacityRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *GetCapacityRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *GetCapacityRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +type GetCapacityResponse struct { + // The available capacity of the storage that can be used to + // provision volumes. If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the available capacity of the + // storage. This field is REQUIRED. + AvailableCapacity uint64 `protobuf:"varint,1,opt,name=available_capacity,json=availableCapacity" json:"available_capacity,omitempty"` +} + +func (m *GetCapacityResponse) Reset() { *m = GetCapacityResponse{} } +func (m *GetCapacityResponse) String() string { return proto.CompactTextString(m) } +func (*GetCapacityResponse) ProtoMessage() {} +func (*GetCapacityResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *GetCapacityResponse) GetAvailableCapacity() uint64 { + if m != nil { + return m.AvailableCapacity + } + return 0 +} + +// ////// +// ////// +type ControllerProbeRequest struct { + // The API version assumed by the CO. This is a REQUIRED field. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` +} + +func (m *ControllerProbeRequest) Reset() { *m = ControllerProbeRequest{} } +func (m *ControllerProbeRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerProbeRequest) ProtoMessage() {} +func (*ControllerProbeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *ControllerProbeRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +type ControllerProbeResponse struct { +} + +func (m *ControllerProbeResponse) Reset() { *m = ControllerProbeResponse{} } +func (m *ControllerProbeResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerProbeResponse) ProtoMessage() {} +func (*ControllerProbeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +// ////// +// ////// +type ControllerGetCapabilitiesRequest struct { + // The API version assumed by the CO. This is a REQUIRED field. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` +} + +func (m *ControllerGetCapabilitiesRequest) Reset() { *m = ControllerGetCapabilitiesRequest{} } +func (m *ControllerGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerGetCapabilitiesRequest) ProtoMessage() {} +func (*ControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{24} +} + +func (m *ControllerGetCapabilitiesRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +type ControllerGetCapabilitiesResponse struct { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + Capabilities []*ControllerServiceCapability `protobuf:"bytes,2,rep,name=capabilities" json:"capabilities,omitempty"` +} + +func (m *ControllerGetCapabilitiesResponse) Reset() { *m = ControllerGetCapabilitiesResponse{} } +func (m *ControllerGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerGetCapabilitiesResponse) ProtoMessage() {} +func (*ControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{25} +} + +func (m *ControllerGetCapabilitiesResponse) GetCapabilities() []*ControllerServiceCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the controller service. +type ControllerServiceCapability struct { + // Types that are valid to be assigned to Type: + // *ControllerServiceCapability_Rpc + Type isControllerServiceCapability_Type `protobuf_oneof:"type"` +} + +func (m *ControllerServiceCapability) Reset() { *m = ControllerServiceCapability{} } +func (m *ControllerServiceCapability) String() string { return proto.CompactTextString(m) } +func (*ControllerServiceCapability) ProtoMessage() {} +func (*ControllerServiceCapability) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +type isControllerServiceCapability_Type interface { + isControllerServiceCapability_Type() +} + +type ControllerServiceCapability_Rpc struct { + Rpc *ControllerServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,oneof"` +} + +func (*ControllerServiceCapability_Rpc) isControllerServiceCapability_Type() {} + +func (m *ControllerServiceCapability) GetType() isControllerServiceCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *ControllerServiceCapability) GetRpc() *ControllerServiceCapability_RPC { + if x, ok := m.GetType().(*ControllerServiceCapability_Rpc); ok { + return x.Rpc + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ControllerServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ControllerServiceCapability_OneofMarshaler, _ControllerServiceCapability_OneofUnmarshaler, _ControllerServiceCapability_OneofSizer, []interface{}{ + (*ControllerServiceCapability_Rpc)(nil), + } +} + +func _ControllerServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ControllerServiceCapability) + // type + switch x := m.Type.(type) { + case *ControllerServiceCapability_Rpc: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Rpc); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ControllerServiceCapability.Type has unexpected type %T", x) + } + return nil +} + +func _ControllerServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ControllerServiceCapability) + switch tag { + case 1: // type.rpc + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ControllerServiceCapability_RPC) + err := b.DecodeMessage(msg) + m.Type = &ControllerServiceCapability_Rpc{msg} + return true, err + default: + return false, nil + } +} + +func _ControllerServiceCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ControllerServiceCapability) + // type + switch x := m.Type.(type) { + case *ControllerServiceCapability_Rpc: + s := proto.Size(x.Rpc) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ControllerServiceCapability_RPC struct { + Type ControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,enum=csi.ControllerServiceCapability_RPC_Type" json:"type,omitempty"` +} + +func (m *ControllerServiceCapability_RPC) Reset() { *m = ControllerServiceCapability_RPC{} } +func (m *ControllerServiceCapability_RPC) String() string { return proto.CompactTextString(m) } +func (*ControllerServiceCapability_RPC) ProtoMessage() {} +func (*ControllerServiceCapability_RPC) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{26, 0} +} + +func (m *ControllerServiceCapability_RPC) GetType() ControllerServiceCapability_RPC_Type { + if m != nil { + return m.Type + } + return ControllerServiceCapability_RPC_UNKNOWN +} + +// ////// +// ////// +type NodePublishVolumeRequest struct { + // The API version assumed by the CO. This is a REQUIRED field. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // The ID of the volume to publish. This field is REQUIRED. + VolumeId string `protobuf:"bytes,2,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + PublishVolumeInfo map[string]string `protobuf:"bytes,3,rep,name=publish_volume_info,json=publishVolumeInfo" json:"publish_volume_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure uniqueness of target_path per volume. + // This is a REQUIRED field. + TargetPath string `protobuf:"bytes,4,opt,name=target_path,json=targetPath" json:"target_path,omitempty"` + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,5,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` + // Whether to publish the volume in readonly mode. This field is + // REQUIRED. + Readonly bool `protobuf:"varint,6,opt,name=readonly" json:"readonly,omitempty"` + // End user credentials used to authenticate/authorize node + // publish request. + // This field contains credential data, for example username and + // password. Each key must consist of alphanumeric characters, '-', + // '_' or '.'. Each value MUST contain a valid string. An SP MAY + // choose to accept binary (non-string) data by using a binary-to-text + // encoding scheme, like base64. An SP SHALL advertise the + // requirements for credentials in documentation. COs SHALL permit + // users to pass through the required credentials. This information is + // sensitive and MUST be treated as such (not logged, etc.) by the CO. + // This field is OPTIONAL. + UserCredentials map[string]string `protobuf:"bytes,7,rep,name=user_credentials,json=userCredentials" json:"user_credentials,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Attributes of the volume to publish. This field is OPTIONAL and + // MUST match the attributes of the VolumeInfo identified by + // `volume_id`. + VolumeAttributes map[string]string `protobuf:"bytes,8,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *NodePublishVolumeRequest) Reset() { *m = NodePublishVolumeRequest{} } +func (m *NodePublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodePublishVolumeRequest) ProtoMessage() {} +func (*NodePublishVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *NodePublishVolumeRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *NodePublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodePublishVolumeRequest) GetPublishVolumeInfo() map[string]string { + if m != nil { + return m.PublishVolumeInfo + } + return nil +} + +func (m *NodePublishVolumeRequest) GetTargetPath() string { + if m != nil { + return m.TargetPath + } + return "" +} + +func (m *NodePublishVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *NodePublishVolumeRequest) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *NodePublishVolumeRequest) GetUserCredentials() map[string]string { + if m != nil { + return m.UserCredentials + } + return nil +} + +func (m *NodePublishVolumeRequest) GetVolumeAttributes() map[string]string { + if m != nil { + return m.VolumeAttributes + } + return nil +} + +type NodePublishVolumeResponse struct { +} + +func (m *NodePublishVolumeResponse) Reset() { *m = NodePublishVolumeResponse{} } +func (m *NodePublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodePublishVolumeResponse) ProtoMessage() {} +func (*NodePublishVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +// ////// +// ////// +type NodeUnpublishVolumeRequest struct { + // The API version assumed by the CO. This is a REQUIRED field. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,2,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + TargetPath string `protobuf:"bytes,3,opt,name=target_path,json=targetPath" json:"target_path,omitempty"` + // End user credentials used to authenticate/authorize node + // unpublish request. + // This field contains credential data, for example username and + // password. Each key must consist of alphanumeric characters, '-', + // '_' or '.'. Each value MUST contain a valid string. An SP MAY + // choose to accept binary (non-string) data by using a binary-to-text + // encoding scheme, like base64. An SP SHALL advertise the + // requirements for credentials in documentation. COs SHALL permit + // users to pass through the required credentials. This information is + // sensitive and MUST be treated as such (not logged, etc.) by the CO. + // This field is OPTIONAL. + UserCredentials map[string]string `protobuf:"bytes,4,rep,name=user_credentials,json=userCredentials" json:"user_credentials,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *NodeUnpublishVolumeRequest) Reset() { *m = NodeUnpublishVolumeRequest{} } +func (m *NodeUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeUnpublishVolumeRequest) ProtoMessage() {} +func (*NodeUnpublishVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *NodeUnpublishVolumeRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +func (m *NodeUnpublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeUnpublishVolumeRequest) GetTargetPath() string { + if m != nil { + return m.TargetPath + } + return "" +} + +func (m *NodeUnpublishVolumeRequest) GetUserCredentials() map[string]string { + if m != nil { + return m.UserCredentials + } + return nil +} + +type NodeUnpublishVolumeResponse struct { +} + +func (m *NodeUnpublishVolumeResponse) Reset() { *m = NodeUnpublishVolumeResponse{} } +func (m *NodeUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeUnpublishVolumeResponse) ProtoMessage() {} +func (*NodeUnpublishVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +// ////// +// ////// +type GetNodeIDRequest struct { + // The API version assumed by the CO. This is a REQUIRED field. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` +} + +func (m *GetNodeIDRequest) Reset() { *m = GetNodeIDRequest{} } +func (m *GetNodeIDRequest) String() string { return proto.CompactTextString(m) } +func (*GetNodeIDRequest) ProtoMessage() {} +func (*GetNodeIDRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +func (m *GetNodeIDRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +type GetNodeIDResponse struct { + // The ID of the node as understood by the SP which SHALL be used by + // CO in subsequent `ControllerPublishVolume`. + // This is a REQUIRED field. + NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` +} + +func (m *GetNodeIDResponse) Reset() { *m = GetNodeIDResponse{} } +func (m *GetNodeIDResponse) String() string { return proto.CompactTextString(m) } +func (*GetNodeIDResponse) ProtoMessage() {} +func (*GetNodeIDResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +func (m *GetNodeIDResponse) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +// ////// +// ////// +type NodeProbeRequest struct { + // The API version assumed by the CO. This is a REQUIRED field. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` +} + +func (m *NodeProbeRequest) Reset() { *m = NodeProbeRequest{} } +func (m *NodeProbeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeProbeRequest) ProtoMessage() {} +func (*NodeProbeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func (m *NodeProbeRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +type NodeProbeResponse struct { +} + +func (m *NodeProbeResponse) Reset() { *m = NodeProbeResponse{} } +func (m *NodeProbeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeProbeResponse) ProtoMessage() {} +func (*NodeProbeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +// ////// +// ////// +type NodeGetCapabilitiesRequest struct { + // The API version assumed by the CO. This is a REQUIRED field. + Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` +} + +func (m *NodeGetCapabilitiesRequest) Reset() { *m = NodeGetCapabilitiesRequest{} } +func (m *NodeGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*NodeGetCapabilitiesRequest) ProtoMessage() {} +func (*NodeGetCapabilitiesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } + +func (m *NodeGetCapabilitiesRequest) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +type NodeGetCapabilitiesResponse struct { + // All the capabilities that the node service supports. This field + // is OPTIONAL. + Capabilities []*NodeServiceCapability `protobuf:"bytes,1,rep,name=capabilities" json:"capabilities,omitempty"` +} + +func (m *NodeGetCapabilitiesResponse) Reset() { *m = NodeGetCapabilitiesResponse{} } +func (m *NodeGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*NodeGetCapabilitiesResponse) ProtoMessage() {} +func (*NodeGetCapabilitiesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } + +func (m *NodeGetCapabilitiesResponse) GetCapabilities() []*NodeServiceCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the node service. +type NodeServiceCapability struct { + // Types that are valid to be assigned to Type: + // *NodeServiceCapability_Rpc + Type isNodeServiceCapability_Type `protobuf_oneof:"type"` +} + +func (m *NodeServiceCapability) Reset() { *m = NodeServiceCapability{} } +func (m *NodeServiceCapability) String() string { return proto.CompactTextString(m) } +func (*NodeServiceCapability) ProtoMessage() {} +func (*NodeServiceCapability) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } + +type isNodeServiceCapability_Type interface { + isNodeServiceCapability_Type() +} + +type NodeServiceCapability_Rpc struct { + Rpc *NodeServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,oneof"` +} + +func (*NodeServiceCapability_Rpc) isNodeServiceCapability_Type() {} + +func (m *NodeServiceCapability) GetType() isNodeServiceCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *NodeServiceCapability) GetRpc() *NodeServiceCapability_RPC { + if x, ok := m.GetType().(*NodeServiceCapability_Rpc); ok { + return x.Rpc + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NodeServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NodeServiceCapability_OneofMarshaler, _NodeServiceCapability_OneofUnmarshaler, _NodeServiceCapability_OneofSizer, []interface{}{ + (*NodeServiceCapability_Rpc)(nil), + } +} + +func _NodeServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NodeServiceCapability) + // type + switch x := m.Type.(type) { + case *NodeServiceCapability_Rpc: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Rpc); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("NodeServiceCapability.Type has unexpected type %T", x) + } + return nil +} + +func _NodeServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NodeServiceCapability) + switch tag { + case 1: // type.rpc + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NodeServiceCapability_RPC) + err := b.DecodeMessage(msg) + m.Type = &NodeServiceCapability_Rpc{msg} + return true, err + default: + return false, nil + } +} + +func _NodeServiceCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NodeServiceCapability) + // type + switch x := m.Type.(type) { + case *NodeServiceCapability_Rpc: + s := proto.Size(x.Rpc) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type NodeServiceCapability_RPC struct { + Type NodeServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,enum=csi.NodeServiceCapability_RPC_Type" json:"type,omitempty"` +} + +func (m *NodeServiceCapability_RPC) Reset() { *m = NodeServiceCapability_RPC{} } +func (m *NodeServiceCapability_RPC) String() string { return proto.CompactTextString(m) } +func (*NodeServiceCapability_RPC) ProtoMessage() {} +func (*NodeServiceCapability_RPC) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37, 0} } + +func (m *NodeServiceCapability_RPC) GetType() NodeServiceCapability_RPC_Type { + if m != nil { + return m.Type + } + return NodeServiceCapability_RPC_UNKNOWN +} + +func init() { + proto.RegisterType((*GetSupportedVersionsRequest)(nil), "csi.GetSupportedVersionsRequest") + proto.RegisterType((*GetSupportedVersionsResponse)(nil), "csi.GetSupportedVersionsResponse") + proto.RegisterType((*Version)(nil), "csi.Version") + proto.RegisterType((*GetPluginInfoRequest)(nil), "csi.GetPluginInfoRequest") + proto.RegisterType((*GetPluginInfoResponse)(nil), "csi.GetPluginInfoResponse") + proto.RegisterType((*CreateVolumeRequest)(nil), "csi.CreateVolumeRequest") + proto.RegisterType((*CreateVolumeResponse)(nil), "csi.CreateVolumeResponse") + proto.RegisterType((*VolumeCapability)(nil), "csi.VolumeCapability") + proto.RegisterType((*VolumeCapability_BlockVolume)(nil), "csi.VolumeCapability.BlockVolume") + proto.RegisterType((*VolumeCapability_MountVolume)(nil), "csi.VolumeCapability.MountVolume") + proto.RegisterType((*VolumeCapability_AccessMode)(nil), "csi.VolumeCapability.AccessMode") + proto.RegisterType((*CapacityRange)(nil), "csi.CapacityRange") + proto.RegisterType((*VolumeInfo)(nil), "csi.VolumeInfo") + proto.RegisterType((*DeleteVolumeRequest)(nil), "csi.DeleteVolumeRequest") + proto.RegisterType((*DeleteVolumeResponse)(nil), "csi.DeleteVolumeResponse") + proto.RegisterType((*ControllerPublishVolumeRequest)(nil), "csi.ControllerPublishVolumeRequest") + proto.RegisterType((*ControllerPublishVolumeResponse)(nil), "csi.ControllerPublishVolumeResponse") + proto.RegisterType((*ControllerUnpublishVolumeRequest)(nil), "csi.ControllerUnpublishVolumeRequest") + proto.RegisterType((*ControllerUnpublishVolumeResponse)(nil), "csi.ControllerUnpublishVolumeResponse") + proto.RegisterType((*ValidateVolumeCapabilitiesRequest)(nil), "csi.ValidateVolumeCapabilitiesRequest") + proto.RegisterType((*ValidateVolumeCapabilitiesResponse)(nil), "csi.ValidateVolumeCapabilitiesResponse") + proto.RegisterType((*ListVolumesRequest)(nil), "csi.ListVolumesRequest") + proto.RegisterType((*ListVolumesResponse)(nil), "csi.ListVolumesResponse") + proto.RegisterType((*ListVolumesResponse_Entry)(nil), "csi.ListVolumesResponse.Entry") + proto.RegisterType((*GetCapacityRequest)(nil), "csi.GetCapacityRequest") + proto.RegisterType((*GetCapacityResponse)(nil), "csi.GetCapacityResponse") + proto.RegisterType((*ControllerProbeRequest)(nil), "csi.ControllerProbeRequest") + proto.RegisterType((*ControllerProbeResponse)(nil), "csi.ControllerProbeResponse") + proto.RegisterType((*ControllerGetCapabilitiesRequest)(nil), "csi.ControllerGetCapabilitiesRequest") + proto.RegisterType((*ControllerGetCapabilitiesResponse)(nil), "csi.ControllerGetCapabilitiesResponse") + proto.RegisterType((*ControllerServiceCapability)(nil), "csi.ControllerServiceCapability") + proto.RegisterType((*ControllerServiceCapability_RPC)(nil), "csi.ControllerServiceCapability.RPC") + proto.RegisterType((*NodePublishVolumeRequest)(nil), "csi.NodePublishVolumeRequest") + proto.RegisterType((*NodePublishVolumeResponse)(nil), "csi.NodePublishVolumeResponse") + proto.RegisterType((*NodeUnpublishVolumeRequest)(nil), "csi.NodeUnpublishVolumeRequest") + proto.RegisterType((*NodeUnpublishVolumeResponse)(nil), "csi.NodeUnpublishVolumeResponse") + proto.RegisterType((*GetNodeIDRequest)(nil), "csi.GetNodeIDRequest") + proto.RegisterType((*GetNodeIDResponse)(nil), "csi.GetNodeIDResponse") + proto.RegisterType((*NodeProbeRequest)(nil), "csi.NodeProbeRequest") + proto.RegisterType((*NodeProbeResponse)(nil), "csi.NodeProbeResponse") + proto.RegisterType((*NodeGetCapabilitiesRequest)(nil), "csi.NodeGetCapabilitiesRequest") + proto.RegisterType((*NodeGetCapabilitiesResponse)(nil), "csi.NodeGetCapabilitiesResponse") + proto.RegisterType((*NodeServiceCapability)(nil), "csi.NodeServiceCapability") + proto.RegisterType((*NodeServiceCapability_RPC)(nil), "csi.NodeServiceCapability.RPC") + proto.RegisterEnum("csi.VolumeCapability_AccessMode_Mode", VolumeCapability_AccessMode_Mode_name, VolumeCapability_AccessMode_Mode_value) + proto.RegisterEnum("csi.ControllerServiceCapability_RPC_Type", ControllerServiceCapability_RPC_Type_name, ControllerServiceCapability_RPC_Type_value) + proto.RegisterEnum("csi.NodeServiceCapability_RPC_Type", NodeServiceCapability_RPC_Type_name, NodeServiceCapability_RPC_Type_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Identity service + +type IdentityClient interface { + GetSupportedVersions(ctx context.Context, in *GetSupportedVersionsRequest, opts ...grpc.CallOption) (*GetSupportedVersionsResponse, error) + GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) +} + +type identityClient struct { + cc *grpc.ClientConn +} + +func NewIdentityClient(cc *grpc.ClientConn) IdentityClient { + return &identityClient{cc} +} + +func (c *identityClient) GetSupportedVersions(ctx context.Context, in *GetSupportedVersionsRequest, opts ...grpc.CallOption) (*GetSupportedVersionsResponse, error) { + out := new(GetSupportedVersionsResponse) + err := grpc.Invoke(ctx, "/csi.Identity/GetSupportedVersions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *identityClient) GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) { + out := new(GetPluginInfoResponse) + err := grpc.Invoke(ctx, "/csi.Identity/GetPluginInfo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Identity service + +type IdentityServer interface { + GetSupportedVersions(context.Context, *GetSupportedVersionsRequest) (*GetSupportedVersionsResponse, error) + GetPluginInfo(context.Context, *GetPluginInfoRequest) (*GetPluginInfoResponse, error) +} + +func RegisterIdentityServer(s *grpc.Server, srv IdentityServer) { + s.RegisterService(&_Identity_serviceDesc, srv) +} + +func _Identity_GetSupportedVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSupportedVersionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).GetSupportedVersions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Identity/GetSupportedVersions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).GetSupportedVersions(ctx, req.(*GetSupportedVersionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Identity_GetPluginInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPluginInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).GetPluginInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Identity/GetPluginInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).GetPluginInfo(ctx, req.(*GetPluginInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Identity_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.Identity", + HandlerType: (*IdentityServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSupportedVersions", + Handler: _Identity_GetSupportedVersions_Handler, + }, + { + MethodName: "GetPluginInfo", + Handler: _Identity_GetPluginInfo_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "csi.proto", +} + +// Client API for Controller service + +type ControllerClient interface { + CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) + DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) + ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) + ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) + GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) + ControllerProbe(ctx context.Context, in *ControllerProbeRequest, opts ...grpc.CallOption) (*ControllerProbeResponse, error) + ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) +} + +type controllerClient struct { + cc *grpc.ClientConn +} + +func NewControllerClient(cc *grpc.ClientConn) ControllerClient { + return &controllerClient{cc} +} + +func (c *controllerClient) CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) { + out := new(CreateVolumeResponse) + err := grpc.Invoke(ctx, "/csi.Controller/CreateVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) { + out := new(DeleteVolumeResponse) + err := grpc.Invoke(ctx, "/csi.Controller/DeleteVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) { + out := new(ControllerPublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.Controller/ControllerPublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) { + out := new(ControllerUnpublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.Controller/ControllerUnpublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) { + out := new(ValidateVolumeCapabilitiesResponse) + err := grpc.Invoke(ctx, "/csi.Controller/ValidateVolumeCapabilities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) { + out := new(ListVolumesResponse) + err := grpc.Invoke(ctx, "/csi.Controller/ListVolumes", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) { + out := new(GetCapacityResponse) + err := grpc.Invoke(ctx, "/csi.Controller/GetCapacity", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerProbe(ctx context.Context, in *ControllerProbeRequest, opts ...grpc.CallOption) (*ControllerProbeResponse, error) { + out := new(ControllerProbeResponse) + err := grpc.Invoke(ctx, "/csi.Controller/ControllerProbe", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) { + out := new(ControllerGetCapabilitiesResponse) + err := grpc.Invoke(ctx, "/csi.Controller/ControllerGetCapabilities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Controller service + +type ControllerServer interface { + CreateVolume(context.Context, *CreateVolumeRequest) (*CreateVolumeResponse, error) + DeleteVolume(context.Context, *DeleteVolumeRequest) (*DeleteVolumeResponse, error) + ControllerPublishVolume(context.Context, *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(context.Context, *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(context.Context, *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) + ListVolumes(context.Context, *ListVolumesRequest) (*ListVolumesResponse, error) + GetCapacity(context.Context, *GetCapacityRequest) (*GetCapacityResponse, error) + ControllerProbe(context.Context, *ControllerProbeRequest) (*ControllerProbeResponse, error) + ControllerGetCapabilities(context.Context, *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) +} + +func RegisterControllerServer(s *grpc.Server, srv ControllerServer) { + s.RegisterService(&_Controller_serviceDesc, srv) +} + +func _Controller_CreateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).CreateVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Controller/CreateVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).CreateVolume(ctx, req.(*CreateVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_DeleteVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).DeleteVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Controller/DeleteVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).DeleteVolume(ctx, req.(*DeleteVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerPublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerPublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerPublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Controller/ControllerPublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerPublishVolume(ctx, req.(*ControllerPublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerUnpublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerUnpublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Controller/ControllerUnpublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerUnpublishVolume(ctx, req.(*ControllerUnpublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ValidateVolumeCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateVolumeCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Controller/ValidateVolumeCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, req.(*ValidateVolumeCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ListVolumes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListVolumesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ListVolumes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Controller/ListVolumes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ListVolumes(ctx, req.(*ListVolumesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_GetCapacity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCapacityRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).GetCapacity(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Controller/GetCapacity", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).GetCapacity(ctx, req.(*GetCapacityRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerProbeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerProbe(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Controller/ControllerProbe", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerProbe(ctx, req.(*ControllerProbeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Controller/ControllerGetCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerGetCapabilities(ctx, req.(*ControllerGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Controller_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.Controller", + HandlerType: (*ControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateVolume", + Handler: _Controller_CreateVolume_Handler, + }, + { + MethodName: "DeleteVolume", + Handler: _Controller_DeleteVolume_Handler, + }, + { + MethodName: "ControllerPublishVolume", + Handler: _Controller_ControllerPublishVolume_Handler, + }, + { + MethodName: "ControllerUnpublishVolume", + Handler: _Controller_ControllerUnpublishVolume_Handler, + }, + { + MethodName: "ValidateVolumeCapabilities", + Handler: _Controller_ValidateVolumeCapabilities_Handler, + }, + { + MethodName: "ListVolumes", + Handler: _Controller_ListVolumes_Handler, + }, + { + MethodName: "GetCapacity", + Handler: _Controller_GetCapacity_Handler, + }, + { + MethodName: "ControllerProbe", + Handler: _Controller_ControllerProbe_Handler, + }, + { + MethodName: "ControllerGetCapabilities", + Handler: _Controller_ControllerGetCapabilities_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "csi.proto", +} + +// Client API for Node service + +type NodeClient interface { + NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) + NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) + GetNodeID(ctx context.Context, in *GetNodeIDRequest, opts ...grpc.CallOption) (*GetNodeIDResponse, error) + NodeProbe(ctx context.Context, in *NodeProbeRequest, opts ...grpc.CallOption) (*NodeProbeResponse, error) + NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) +} + +type nodeClient struct { + cc *grpc.ClientConn +} + +func NewNodeClient(cc *grpc.ClientConn) NodeClient { + return &nodeClient{cc} +} + +func (c *nodeClient) NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) { + out := new(NodePublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.Node/NodePublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) { + out := new(NodeUnpublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.Node/NodeUnpublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) GetNodeID(ctx context.Context, in *GetNodeIDRequest, opts ...grpc.CallOption) (*GetNodeIDResponse, error) { + out := new(GetNodeIDResponse) + err := grpc.Invoke(ctx, "/csi.Node/GetNodeID", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeProbe(ctx context.Context, in *NodeProbeRequest, opts ...grpc.CallOption) (*NodeProbeResponse, error) { + out := new(NodeProbeResponse) + err := grpc.Invoke(ctx, "/csi.Node/NodeProbe", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) { + out := new(NodeGetCapabilitiesResponse) + err := grpc.Invoke(ctx, "/csi.Node/NodeGetCapabilities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Node service + +type NodeServer interface { + NodePublishVolume(context.Context, *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) + NodeUnpublishVolume(context.Context, *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) + GetNodeID(context.Context, *GetNodeIDRequest) (*GetNodeIDResponse, error) + NodeProbe(context.Context, *NodeProbeRequest) (*NodeProbeResponse, error) + NodeGetCapabilities(context.Context, *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) +} + +func RegisterNodeServer(s *grpc.Server, srv NodeServer) { + s.RegisterService(&_Node_serviceDesc, srv) +} + +func _Node_NodePublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodePublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodePublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Node/NodePublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodePublishVolume(ctx, req.(*NodePublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeUnpublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeUnpublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Node/NodeUnpublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeUnpublishVolume(ctx, req.(*NodeUnpublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_GetNodeID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNodeIDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).GetNodeID(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Node/GetNodeID", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).GetNodeID(ctx, req.(*GetNodeIDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeProbeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeProbe(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Node/NodeProbe", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeProbe(ctx, req.(*NodeProbeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.Node/NodeGetCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetCapabilities(ctx, req.(*NodeGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Node_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.Node", + HandlerType: (*NodeServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "NodePublishVolume", + Handler: _Node_NodePublishVolume_Handler, + }, + { + MethodName: "NodeUnpublishVolume", + Handler: _Node_NodeUnpublishVolume_Handler, + }, + { + MethodName: "GetNodeID", + Handler: _Node_GetNodeID_Handler, + }, + { + MethodName: "NodeProbe", + Handler: _Node_NodeProbe_Handler, + }, + { + MethodName: "NodeGetCapabilities", + Handler: _Node_NodeGetCapabilities_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "csi.proto", +} + +func init() { proto.RegisterFile("csi.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1993 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4b, 0x73, 0xe3, 0x58, + 0x15, 0xb6, 0x6c, 0xe7, 0xe1, 0xe3, 0x38, 0xed, 0x5c, 0xe7, 0xa1, 0x28, 0xfd, 0x70, 0xab, 0xa7, + 0x7b, 0x42, 0x15, 0xe3, 0xa2, 0x3c, 0x54, 0xd1, 0xe9, 0x9e, 0x19, 0x48, 0x6c, 0x4f, 0x62, 0x26, + 0x71, 0xa7, 0x14, 0xa7, 0x9b, 0x01, 0xa6, 0x84, 0x62, 0xdf, 0xa4, 0x45, 0xcb, 0x92, 0x47, 0x92, + 0x5d, 0xed, 0x3d, 0x4b, 0x16, 0xec, 0xd8, 0xc1, 0x72, 0xa0, 0x58, 0x02, 0xbf, 0x80, 0xbf, 0x00, + 0xac, 0xd9, 0xf2, 0x0f, 0xa8, 0x62, 0x43, 0xdd, 0x87, 0x64, 0x49, 0x96, 0x1c, 0x3b, 0x93, 0x1e, + 0x66, 0x27, 0x9d, 0xc7, 0x77, 0xcf, 0xe3, 0xde, 0x73, 0xce, 0x95, 0x20, 0xd7, 0x71, 0xf4, 0x4a, + 0xdf, 0xb6, 0x5c, 0x0b, 0x65, 0x3a, 0x8e, 0x2e, 0xdf, 0x83, 0x9d, 0x43, 0xec, 0x9e, 0x0d, 0xfa, + 0x7d, 0xcb, 0x76, 0x71, 0xf7, 0x25, 0xb6, 0x1d, 0xdd, 0x32, 0x1d, 0x05, 0x7f, 0x39, 0xc0, 0x8e, + 0x2b, 0xff, 0x0c, 0xee, 0xc6, 0xb3, 0x9d, 0xbe, 0x65, 0x3a, 0x18, 0x3d, 0x07, 0xe4, 0x78, 0x4c, + 0x75, 0xc8, 0xb9, 0xa2, 0x50, 0xce, 0xec, 0xe6, 0xab, 0x2b, 0x15, 0xb2, 0x16, 0x57, 0x51, 0xd6, + 0x9c, 0x28, 0x88, 0xfc, 0x19, 0x2c, 0xf1, 0x67, 0xb4, 0x0e, 0x0b, 0x3d, 0xed, 0x97, 0x96, 0x2d, + 0x0a, 0x65, 0x61, 0xb7, 0xa0, 0xb0, 0x17, 0x4a, 0xd5, 0x4d, 0xcb, 0x16, 0xd3, 0x9c, 0x4a, 0x5e, + 0x08, 0xb5, 0xaf, 0xb9, 0x9d, 0xd7, 0x62, 0x86, 0x51, 0xe9, 0x8b, 0xfc, 0x09, 0xac, 0x1f, 0x62, + 0xf7, 0xd4, 0x18, 0x5c, 0xe9, 0x66, 0xd3, 0xbc, 0xb4, 0xb8, 0x07, 0xe8, 0x09, 0x2c, 0x71, 0xbb, + 0x28, 0x76, 0xd4, 0x2c, 0x8f, 0x29, 0xff, 0x43, 0x80, 0x8d, 0x08, 0x00, 0xf7, 0x11, 0x41, 0xd6, + 0xd4, 0x7a, 0x98, 0xaa, 0xe7, 0x14, 0xfa, 0x8c, 0x1e, 0xc3, 0xea, 0x10, 0x9b, 0x5d, 0xcb, 0xf6, + 0x9c, 0xa6, 0x26, 0xe6, 0x94, 0x02, 0xa3, 0x7a, 0x6e, 0xd5, 0x61, 0xb9, 0xa7, 0x99, 0xfa, 0x25, + 0x76, 0x5c, 0x31, 0x43, 0x83, 0xb2, 0x4b, 0x57, 0x8f, 0x5d, 0xa8, 0x72, 0xc2, 0x45, 0x1b, 0xa6, + 0x6b, 0x8f, 0x14, 0x5f, 0x53, 0x7a, 0x0e, 0x85, 0x10, 0x0b, 0x15, 0x21, 0xf3, 0x06, 0x8f, 0xb8, + 0x41, 0xe4, 0x91, 0xc4, 0x64, 0xa8, 0x19, 0x03, 0xcc, 0xcd, 0x60, 0x2f, 0xcf, 0xd2, 0x4f, 0x05, + 0xf9, 0xbf, 0x19, 0x28, 0xd5, 0x6c, 0xac, 0xb9, 0xf8, 0xa5, 0x65, 0x0c, 0x7a, 0x78, 0xce, 0xb8, + 0xf8, 0xde, 0xa7, 0x03, 0xde, 0xef, 0xc1, 0x6a, 0x47, 0xeb, 0x6b, 0x1d, 0xdd, 0x1d, 0xa9, 0xb6, + 0x66, 0x5e, 0x61, 0x9a, 0x8a, 0x7c, 0x15, 0x51, 0x88, 0x1a, 0x67, 0x29, 0x84, 0xa3, 0x14, 0x3a, + 0xc1, 0x57, 0xf4, 0x29, 0x94, 0x86, 0xd4, 0x0e, 0x95, 0xd0, 0x2f, 0x74, 0x43, 0x77, 0x75, 0xec, + 0x88, 0x59, 0x1a, 0x9c, 0x0d, 0x66, 0x02, 0xe5, 0xd7, 0x3c, 0xf6, 0x48, 0x41, 0xc3, 0x30, 0x45, + 0xc7, 0x0e, 0x3a, 0x02, 0xe8, 0x6b, 0xb6, 0xd6, 0xc3, 0x2e, 0xb6, 0x1d, 0x71, 0x21, 0x10, 0xdb, + 0x18, 0x67, 0x2b, 0xa7, 0xbe, 0x28, 0x8b, 0x6d, 0x40, 0x17, 0xfd, 0x04, 0x8a, 0x03, 0x07, 0xdb, + 0x6a, 0xc7, 0xc6, 0x5d, 0x6c, 0xba, 0xba, 0x66, 0x38, 0xe2, 0x22, 0xc5, 0xfb, 0x20, 0x11, 0xef, + 0xdc, 0xc1, 0x76, 0x6d, 0x2c, 0xcf, 0x40, 0xef, 0x0c, 0xc2, 0x54, 0xe9, 0x63, 0xb8, 0x13, 0x59, + 0x78, 0x9e, 0xcc, 0x49, 0x07, 0xb0, 0x1e, 0xb7, 0xce, 0x5c, 0xd9, 0x3f, 0x82, 0xf5, 0xb0, 0xfd, + 0x7c, 0x4f, 0x7f, 0x0f, 0xf2, 0x3c, 0x0d, 0xba, 0x79, 0x69, 0xf1, 0x1d, 0x70, 0x27, 0x10, 0x7e, + 0xba, 0x31, 0x61, 0xe8, 0x3f, 0xcb, 0xbf, 0xcb, 0x42, 0x31, 0x9a, 0x19, 0xb4, 0x07, 0x0b, 0x17, + 0x86, 0xd5, 0x79, 0xc3, 0x01, 0x1e, 0xc6, 0xe6, 0xaf, 0x72, 0x40, 0x44, 0x18, 0xf5, 0x28, 0xa5, + 0x30, 0x0d, 0xa2, 0xda, 0xb3, 0x06, 0xa6, 0x4b, 0x6d, 0x4e, 0x54, 0x3d, 0x21, 0x22, 0x63, 0x55, + 0xaa, 0x81, 0xf6, 0x21, 0xaf, 0x75, 0x3a, 0xd8, 0x71, 0xd4, 0x9e, 0xd5, 0xf5, 0xf6, 0x5e, 0x39, + 0x1e, 0x60, 0x9f, 0x0a, 0x9e, 0x58, 0x5d, 0xac, 0x80, 0xe6, 0x3f, 0x4b, 0x05, 0xc8, 0x07, 0xac, + 0x92, 0x0e, 0x21, 0x1f, 0x58, 0x09, 0x6d, 0xc1, 0xd2, 0xa5, 0xa3, 0xba, 0xa3, 0xbe, 0x77, 0xe8, + 0x17, 0x2f, 0x9d, 0xf6, 0xa8, 0x8f, 0xd1, 0x03, 0xc8, 0x53, 0x13, 0xd4, 0x4b, 0x43, 0xbb, 0x72, + 0xc4, 0x74, 0x39, 0xb3, 0x9b, 0x53, 0x80, 0x92, 0x3e, 0x25, 0x14, 0xe9, 0xdf, 0x02, 0xc0, 0x78, + 0x49, 0xb4, 0x07, 0x59, 0x6a, 0x22, 0x41, 0x59, 0xad, 0x3e, 0xbe, 0xce, 0xc4, 0x0a, 0xb5, 0x93, + 0xaa, 0xc8, 0xbf, 0x17, 0x20, 0x4b, 0x31, 0xf2, 0xb0, 0x74, 0xde, 0xfa, 0xac, 0xf5, 0xe2, 0x55, + 0xab, 0x98, 0x42, 0x9b, 0x80, 0xce, 0x9a, 0xad, 0xc3, 0xe3, 0x86, 0xda, 0x7a, 0x51, 0x6f, 0xa8, + 0xaf, 0x94, 0x66, 0xbb, 0xa1, 0x14, 0x05, 0xb4, 0x03, 0x5b, 0x41, 0xba, 0xd2, 0xd8, 0xaf, 0x37, + 0x14, 0xf5, 0x45, 0xeb, 0xf8, 0xf3, 0x62, 0x1a, 0x49, 0xb0, 0x79, 0x72, 0x7e, 0xdc, 0x6e, 0x4e, + 0xf2, 0x32, 0xe8, 0x2e, 0x88, 0x01, 0x1e, 0xc7, 0xe0, 0xb0, 0x59, 0x02, 0x1b, 0xe0, 0xb2, 0x47, + 0xce, 0x5c, 0x38, 0x28, 0xf8, 0x69, 0x20, 0x91, 0x92, 0x5f, 0x41, 0x21, 0x74, 0xf2, 0x49, 0x8d, + 0xb4, 0xf1, 0x97, 0x03, 0xdd, 0xc6, 0x5d, 0xf5, 0x62, 0xe4, 0x62, 0x87, 0x86, 0x21, 0xab, 0x14, + 0x3c, 0xea, 0x01, 0x21, 0x92, 0x98, 0x1a, 0x7a, 0x4f, 0x77, 0xb9, 0x4c, 0x9a, 0xca, 0x00, 0x25, + 0x51, 0x01, 0xf9, 0x6f, 0x02, 0xc0, 0x78, 0x53, 0x12, 0x58, 0xbf, 0xf8, 0x84, 0x60, 0x3d, 0x2a, + 0x83, 0x5d, 0x85, 0xb4, 0xde, 0xe5, 0x07, 0x22, 0xad, 0x77, 0xd1, 0x0f, 0x01, 0x34, 0xd7, 0xb5, + 0xf5, 0x8b, 0x01, 0x51, 0x61, 0xc5, 0xf8, 0x41, 0x64, 0xc3, 0x57, 0xf6, 0x7d, 0x09, 0x5e, 0x27, + 0xc6, 0x2a, 0xe4, 0x34, 0x47, 0xd8, 0x73, 0x9d, 0xc4, 0xff, 0x08, 0x50, 0xaa, 0x63, 0x03, 0xdf, + 0xb4, 0x0e, 0xef, 0x40, 0xce, 0x3b, 0xb1, 0x9e, 0x5b, 0xcb, 0xfc, 0x78, 0x76, 0x63, 0x6b, 0x58, + 0x26, 0x50, 0xc3, 0x62, 0x16, 0x9e, 0xb1, 0x86, 0xdd, 0x46, 0x11, 0xda, 0x84, 0xf5, 0xb0, 0x01, + 0xac, 0x08, 0xc9, 0x7f, 0xc9, 0xc2, 0xfd, 0x9a, 0x65, 0xba, 0xb6, 0x65, 0x18, 0xd8, 0x3e, 0x1d, + 0x5c, 0x18, 0xba, 0xf3, 0xfa, 0x1d, 0x44, 0x67, 0x0b, 0x96, 0x4c, 0xab, 0x4b, 0x59, 0x19, 0x76, + 0x9c, 0xc9, 0x6b, 0xb3, 0x8b, 0x0e, 0x60, 0x2d, 0xda, 0x8c, 0x46, 0x62, 0x96, 0xae, 0x93, 0xd0, + 0x8a, 0x8a, 0xc3, 0x68, 0x09, 0x94, 0x60, 0xd9, 0xc6, 0x5a, 0xd7, 0x32, 0x8d, 0x91, 0xb8, 0x50, + 0x16, 0x76, 0x97, 0x15, 0xff, 0x1d, 0x75, 0x12, 0x5b, 0xcb, 0x53, 0xd6, 0x5a, 0xa6, 0x3a, 0x3f, + 0x5b, 0x86, 0xd0, 0xa5, 0xef, 0x44, 0x60, 0x7f, 0x2f, 0xd1, 0x55, 0xf6, 0x66, 0x59, 0x85, 0xbd, + 0x45, 0x77, 0x3e, 0x77, 0x74, 0x4c, 0xbe, 0x8d, 0x9d, 0x20, 0xd5, 0x60, 0x23, 0x76, 0xb9, 0xb9, + 0xb6, 0xd3, 0xdf, 0x05, 0x78, 0x90, 0xe8, 0x13, 0xef, 0x6f, 0x6f, 0xa0, 0xd4, 0x67, 0x0c, 0x35, + 0xdc, 0xe7, 0x48, 0x58, 0x9e, 0x4f, 0x0f, 0x0b, 0x9f, 0xc6, 0x42, 0x54, 0x52, 0x1d, 0x58, 0x60, + 0xd6, 0xfa, 0x51, 0xba, 0x54, 0x87, 0xcd, 0x78, 0xe1, 0xb9, 0xdc, 0xfa, 0x53, 0x1a, 0xca, 0x63, + 0x9b, 0xce, 0xcd, 0xfe, 0x37, 0x7f, 0x1e, 0x70, 0xcc, 0x7e, 0x65, 0x93, 0xd9, 0xb3, 0x48, 0xc8, + 0xe2, 0xcd, 0xfb, 0x06, 0x6b, 0xca, 0x23, 0x78, 0x38, 0xc5, 0x1a, 0x5e, 0x60, 0xfe, 0x95, 0x86, + 0x87, 0x2f, 0x35, 0x43, 0xef, 0xfa, 0x03, 0x50, 0x70, 0x86, 0xbc, 0xd5, 0x98, 0x26, 0xcc, 0xb5, + 0x99, 0x79, 0xe7, 0x5a, 0x3d, 0xee, 0x34, 0xb3, 0x1c, 0x7c, 0xc4, 0x50, 0xae, 0xf3, 0x67, 0xe6, + 0x03, 0x7d, 0x2b, 0x87, 0xf1, 0xe7, 0x20, 0x4f, 0xb3, 0x88, 0x1f, 0xc7, 0xbb, 0x90, 0xf3, 0xaf, + 0x7f, 0x14, 0x77, 0x59, 0x19, 0x13, 0x90, 0x08, 0x4b, 0x3d, 0xec, 0x38, 0xda, 0x95, 0x87, 0xef, + 0xbd, 0xca, 0xbf, 0x12, 0x00, 0x1d, 0xeb, 0x0e, 0x9f, 0xcb, 0xe6, 0xce, 0x18, 0x19, 0xd7, 0xb4, + 0xb7, 0x2a, 0x36, 0x5d, 0x5b, 0xe7, 0xa3, 0x45, 0x41, 0x81, 0x9e, 0xf6, 0xb6, 0xc1, 0x28, 0x64, + 0x96, 0x70, 0x5c, 0xcd, 0x76, 0x75, 0xf3, 0x4a, 0x75, 0xad, 0x37, 0xd8, 0xe4, 0x07, 0xa2, 0xe0, + 0x51, 0xdb, 0x84, 0x28, 0xff, 0x51, 0x80, 0x52, 0xc8, 0x0c, 0xee, 0xd6, 0x53, 0x58, 0xf2, 0xb0, + 0x59, 0x65, 0xb9, 0x4f, 0xed, 0x88, 0x11, 0xad, 0xb0, 0x24, 0x78, 0xe2, 0xe8, 0x1e, 0x80, 0x89, + 0xdf, 0xba, 0x7c, 0x51, 0xe6, 0x75, 0x8e, 0x50, 0xe8, 0x82, 0xd2, 0x1e, 0x2c, 0xb0, 0x54, 0xcc, + 0x3f, 0xa7, 0xff, 0x3a, 0x0d, 0xe8, 0x10, 0xbb, 0xfe, 0x28, 0x36, 0x67, 0xc8, 0x12, 0xf6, 0x71, + 0x7a, 0xde, 0x7d, 0x7c, 0x18, 0xba, 0x9f, 0xb1, 0x63, 0xf0, 0xbe, 0x77, 0xf7, 0x8d, 0x18, 0x37, + 0xed, 0x7a, 0xf6, 0x35, 0x2f, 0x51, 0x72, 0x1d, 0x4a, 0xa1, 0x05, 0x79, 0xe6, 0x3e, 0x00, 0xa4, + 0x0d, 0x35, 0xdd, 0xd0, 0x2e, 0x0c, 0xe6, 0x29, 0xe1, 0xf2, 0x41, 0x72, 0xcd, 0xe7, 0x78, 0x6a, + 0xf2, 0x8f, 0x60, 0x33, 0xd0, 0x2e, 0x6c, 0xeb, 0x62, 0xde, 0x82, 0x2c, 0x6f, 0xc3, 0xd6, 0x04, + 0x02, 0xaf, 0x52, 0x3f, 0x0e, 0xd6, 0x7d, 0x6e, 0xec, 0x0d, 0x6b, 0x94, 0xac, 0x07, 0xcb, 0xe2, + 0x04, 0x16, 0x77, 0xbe, 0x0e, 0x2b, 0x31, 0xc9, 0x2d, 0x47, 0x4a, 0xfc, 0x19, 0xb6, 0x87, 0x7a, + 0x27, 0x98, 0xe7, 0x90, 0x96, 0xfc, 0xdb, 0x34, 0xec, 0x4c, 0x91, 0x46, 0x4f, 0x21, 0x63, 0xf7, + 0x3b, 0xdc, 0xdc, 0xf7, 0xae, 0x03, 0xaf, 0x28, 0xa7, 0xb5, 0xa3, 0x94, 0x42, 0x54, 0xa4, 0xbf, + 0x0a, 0x90, 0x51, 0x4e, 0x6b, 0xe8, 0x63, 0xc8, 0xfa, 0x77, 0xb0, 0xd5, 0xea, 0x77, 0x66, 0x81, + 0xa8, 0x90, 0x6b, 0x9a, 0x42, 0xd5, 0x64, 0x0b, 0xb2, 0xf4, 0xd2, 0x16, 0xba, 0x40, 0x89, 0xb0, + 0x5e, 0x53, 0x1a, 0xfb, 0xed, 0x86, 0x5a, 0x6f, 0x1c, 0x37, 0xda, 0x0d, 0xf5, 0xe5, 0x8b, 0xe3, + 0xf3, 0x93, 0x46, 0x51, 0x20, 0x37, 0xa1, 0xd3, 0xf3, 0x83, 0xe3, 0xe6, 0xd9, 0x91, 0x7a, 0xde, + 0xf2, 0x9e, 0x38, 0x37, 0x8d, 0x8a, 0xb0, 0x72, 0xdc, 0x3c, 0x6b, 0x73, 0xc2, 0x59, 0x31, 0x43, + 0x28, 0x87, 0x8d, 0xb6, 0x5a, 0xdb, 0x3f, 0xdd, 0xaf, 0x35, 0xdb, 0x9f, 0x17, 0xb3, 0x07, 0x8b, + 0xcc, 0x5e, 0xf9, 0x9f, 0x0b, 0x20, 0xb6, 0xac, 0x2e, 0x7e, 0x77, 0x13, 0x6d, 0x37, 0x7e, 0xbc, + 0x61, 0xc7, 0xec, 0xfb, 0x14, 0x30, 0xc9, 0x80, 0xd9, 0xe7, 0x1a, 0x52, 0x3e, 0x5d, 0xcd, 0xbe, + 0xc2, 0xae, 0xda, 0xd7, 0xdc, 0xd7, 0x74, 0x30, 0xce, 0x29, 0xc0, 0x48, 0xa7, 0x9a, 0xfb, 0x3a, + 0x7e, 0x7e, 0x5e, 0xb8, 0xf9, 0xfc, 0xbc, 0x18, 0x99, 0x9f, 0xbf, 0x88, 0x99, 0x47, 0xd8, 0x64, + 0x5b, 0x9d, 0xee, 0xe3, 0x6c, 0x93, 0xf3, 0x2f, 0xe2, 0x7a, 0xed, 0x32, 0xc5, 0xff, 0x70, 0x3a, + 0xfe, 0xac, 0x2d, 0xf6, 0x56, 0x26, 0xc3, 0x6f, 0xcf, 0xe4, 0xbd, 0x03, 0xdb, 0x31, 0x21, 0xe1, + 0x65, 0xec, 0xab, 0x34, 0x48, 0x84, 0xfb, 0x2e, 0x27, 0xd7, 0xc8, 0x8e, 0xcc, 0x4c, 0xec, 0x48, + 0x35, 0x71, 0x82, 0x1d, 0x9f, 0x8a, 0xff, 0xfb, 0xec, 0x7a, 0x0f, 0x76, 0x62, 0xed, 0xe0, 0x81, + 0x7c, 0x06, 0xc5, 0x43, 0xec, 0x12, 0x89, 0x66, 0x7d, 0xde, 0xfa, 0xff, 0x5d, 0x58, 0x0b, 0xe8, + 0xf2, 0x7a, 0x1f, 0x98, 0xf7, 0x85, 0xe0, 0xbc, 0x4f, 0x56, 0xa2, 0xf9, 0xbc, 0x49, 0x43, 0x2b, + 0xc1, 0x5a, 0x40, 0x97, 0x9b, 0x5e, 0x67, 0x5b, 0xe0, 0x6b, 0x36, 0xb1, 0x2f, 0x58, 0x7c, 0x92, + 0xda, 0xd7, 0x27, 0x91, 0xf6, 0xc5, 0x46, 0x2f, 0xc9, 0xcf, 0xef, 0x75, 0x8d, 0xeb, 0x0f, 0x02, + 0x6c, 0xc4, 0xca, 0xa1, 0x6a, 0xb0, 0x65, 0xdd, 0x4f, 0x06, 0x0c, 0x36, 0xab, 0x33, 0xd6, 0xab, + 0x7e, 0x10, 0xea, 0x55, 0x8f, 0xa6, 0xeb, 0x06, 0xbb, 0x54, 0x29, 0xa6, 0x4b, 0x79, 0x9d, 0xa4, + 0xfa, 0x67, 0x01, 0x96, 0x9b, 0x74, 0xa3, 0xb9, 0xa4, 0x1a, 0xae, 0xc7, 0xfd, 0x8b, 0x41, 0x65, + 0x6f, 0xac, 0x4a, 0xfa, 0x8b, 0x23, 0x3d, 0x9c, 0x22, 0xc1, 0x33, 0x97, 0x42, 0x47, 0x50, 0x08, + 0xfd, 0x96, 0x40, 0xdb, 0x71, 0xbf, 0x2a, 0x18, 0xa0, 0x94, 0xfc, 0x17, 0x43, 0x4e, 0x55, 0xbf, + 0x5a, 0x04, 0x18, 0xf7, 0x69, 0xd4, 0x80, 0x95, 0xe0, 0x37, 0x68, 0x24, 0x26, 0x7d, 0x56, 0x97, + 0xb6, 0x63, 0x38, 0xbe, 0x7d, 0x0d, 0x58, 0x09, 0x7e, 0x45, 0xe2, 0x30, 0x31, 0x5f, 0xb6, 0x38, + 0x4c, 0xec, 0x27, 0xa7, 0x14, 0xba, 0x0c, 0x0d, 0x62, 0xc1, 0x03, 0x88, 0x1e, 0xcd, 0xf0, 0xb9, + 0x44, 0x7a, 0x6f, 0x96, 0x8f, 0x07, 0x72, 0x0a, 0x19, 0xb0, 0x9d, 0x78, 0x41, 0x45, 0x8f, 0x67, + 0xba, 0x4e, 0x4b, 0x4f, 0xae, 0x13, 0xf3, 0x57, 0xb3, 0x40, 0x4a, 0xbe, 0x86, 0xa1, 0x27, 0xb3, + 0xdd, 0x1c, 0xa5, 0xf7, 0xaf, 0x95, 0xf3, 0x17, 0x3c, 0x80, 0x7c, 0xe0, 0x9a, 0x83, 0xb6, 0x26, + 0x2f, 0x3e, 0x0c, 0x52, 0x4c, 0xba, 0x11, 0x31, 0x8c, 0xc0, 0x6c, 0xce, 0x31, 0x26, 0xaf, 0x07, + 0x1c, 0x23, 0x66, 0x8c, 0x97, 0x53, 0xa8, 0x05, 0x77, 0x22, 0x73, 0x35, 0xda, 0x89, 0x66, 0x28, + 0x50, 0xde, 0xa4, 0xbb, 0xf1, 0xcc, 0xf8, 0xb4, 0x45, 0x2a, 0xd0, 0x44, 0xda, 0xe2, 0xeb, 0xdc, + 0x44, 0xda, 0x12, 0x0a, 0x99, 0x9c, 0xaa, 0xfe, 0x26, 0x03, 0x59, 0x52, 0x25, 0x50, 0x9b, 0x57, + 0xd3, 0xd0, 0x2e, 0xb9, 0x37, 0x75, 0x08, 0x91, 0xee, 0x27, 0xb1, 0x7d, 0x67, 0x7e, 0x0a, 0xa5, + 0x98, 0x46, 0x83, 0x1e, 0x5c, 0xd3, 0x0a, 0xa5, 0x72, 0xb2, 0x80, 0x8f, 0xfd, 0x11, 0xe4, 0xfc, + 0x4e, 0x83, 0x36, 0xbc, 0x0c, 0x85, 0xba, 0x96, 0xb4, 0x19, 0x25, 0x07, 0xb5, 0xfd, 0xee, 0xc1, + 0xb5, 0xa3, 0x9d, 0x88, 0x6b, 0x4f, 0x36, 0x19, 0xdf, 0xaf, 0x68, 0x7a, 0xc6, 0x7e, 0x25, 0x24, + 0xa6, 0x9c, 0x2c, 0xe0, 0x61, 0x5f, 0x2c, 0xd2, 0x9f, 0xe3, 0x1f, 0xfe, 0x2f, 0x00, 0x00, 0xff, + 0xff, 0x17, 0x25, 0x65, 0xbe, 0x29, 0x1f, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/LICENSE.code b/vendor/github.com/containerd/containerd/LICENSE.code new file mode 100644 index 000000000000..8f3fee627a45 --- /dev/null +++ b/vendor/github.com/containerd/containerd/LICENSE.code @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/containerd/LICENSE.docs b/vendor/github.com/containerd/containerd/LICENSE.docs new file mode 100644 index 000000000000..2f244ac81403 --- /dev/null +++ b/vendor/github.com/containerd/containerd/LICENSE.docs @@ -0,0 +1,395 @@ +Attribution 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution 4.0 International Public License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution 4.0 International Public License ("Public License"). To the +extent this Public License may be interpreted as a contract, You are +granted the Licensed Rights in consideration of Your acceptance of +these terms and conditions, and the Licensor grants You such rights in +consideration of benefits the Licensor receives from making the +Licensed Material available under these terms and conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + d. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + e. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + f. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + g. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + h. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + i. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + j. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + k. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + 4. If You Share Adapted Material You produce, the Adapter's + License You apply must not prevent recipients of the Adapted + Material from complying with this Public License. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material; and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the “Licensor.” The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE new file mode 100644 index 000000000000..8915f02773f5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go new file mode 100644 index 000000000000..90d30990317a --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go @@ -0,0 +1,2741 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/containerd/containerd/api/services/containers/v1/containers.proto +// DO NOT EDIT! + +/* + Package containers is a generated protocol buffer package. + + It is generated from these files: + github.com/containerd/containerd/api/services/containers/v1/containers.proto + + It has these top-level messages: + Container + GetContainerRequest + GetContainerResponse + ListContainersRequest + ListContainersResponse + CreateContainerRequest + CreateContainerResponse + UpdateContainerRequest + UpdateContainerResponse + DeleteContainerRequest +*/ +package containers + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import google_protobuf1 "github.com/gogo/protobuf/types" +import google_protobuf2 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf3 "github.com/gogo/protobuf/types" +import _ "github.com/gogo/protobuf/types" + +import time "time" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Container struct { + // ID is the user-specified identifier. + // + // This field may not be updated. + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Labels provides an area to include arbitrary data on containers. + // + // The combined size of a key/value pair cannot exceed 4096 bytes. + // + // Note that to add a new value to this field, read the existing set and + // include the entire result in the update call. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Image contains the reference of the image used to build the + // specification and snapshots for running this container. + // + // If this field is updated, the spec and rootfs needed to updated, as well. + Image string `protobuf:"bytes,3,opt,name=image,proto3" json:"image,omitempty"` + // Runtime specifies which runtime to use for executing this container. + Runtime *Container_Runtime `protobuf:"bytes,4,opt,name=runtime" json:"runtime,omitempty"` + // Spec to be used when creating the container. This is runtime specific. + Spec *google_protobuf1.Any `protobuf:"bytes,5,opt,name=spec" json:"spec,omitempty"` + // Snapshotter specifies the snapshotter name used for rootfs + Snapshotter string `protobuf:"bytes,6,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` + // SnapshotKey specifies the snapshot key to use for the container's root + // filesystem. When starting a task from this container, a caller should + // look up the mounts from the snapshot service and include those on the + // task create request. + // + // Snapshots referenced in this field will not be garbage collected. + // + // This field is set to empty when the rootfs is not a snapshot. + // + // This field may be updated. + SnapshotKey string `protobuf:"bytes,7,opt,name=snapshot_key,json=snapshotKey,proto3" json:"snapshot_key,omitempty"` + // CreatedAt is the time the container was first created. + CreatedAt time.Time `protobuf:"bytes,8,opt,name=created_at,json=createdAt,stdtime" json:"created_at"` + // UpdatedAt is the last time the container was mutated. + UpdatedAt time.Time `protobuf:"bytes,9,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"` + // Extensions allow clients to provide zero or more blobs that are directly + // associated with the container. One may provide protobuf, json, or other + // encoding formats. The primary use of this is to further decorate the + // container object with fields that may be specific to a client integration. + // + // The key portion of this map should identify a "name" for the extension + // that should be unique against other extensions. When updating extension + // data, one should only update the specified extension using field paths + // to select a specific map key. + Extensions map[string]google_protobuf1.Any `protobuf:"bytes,10,rep,name=extensions" json:"extensions" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Container) Reset() { *m = Container{} } +func (*Container) ProtoMessage() {} +func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{0} } + +type Container_Runtime struct { + // Name is the name of the runtime. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Options specify additional runtime initialization options. + Options *google_protobuf1.Any `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` +} + +func (m *Container_Runtime) Reset() { *m = Container_Runtime{} } +func (*Container_Runtime) ProtoMessage() {} +func (*Container_Runtime) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{0, 1} } + +type GetContainerRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *GetContainerRequest) Reset() { *m = GetContainerRequest{} } +func (*GetContainerRequest) ProtoMessage() {} +func (*GetContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{1} } + +type GetContainerResponse struct { + Container Container `protobuf:"bytes,1,opt,name=container" json:"container"` +} + +func (m *GetContainerResponse) Reset() { *m = GetContainerResponse{} } +func (*GetContainerResponse) ProtoMessage() {} +func (*GetContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{2} } + +type ListContainersRequest struct { + // Filters contains one or more filters using the syntax defined in the + // containerd filter package. + // + // The returned result will be those that match any of the provided + // filters. Expanded, containers that match the following will be + // returned: + // + // filters[0] or filters[1] or ... or filters[n-1] or filters[n] + // + // If filters is zero-length or nil, all items will be returned. + Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"` +} + +func (m *ListContainersRequest) Reset() { *m = ListContainersRequest{} } +func (*ListContainersRequest) ProtoMessage() {} +func (*ListContainersRequest) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{3} } + +type ListContainersResponse struct { + Containers []Container `protobuf:"bytes,1,rep,name=containers" json:"containers"` +} + +func (m *ListContainersResponse) Reset() { *m = ListContainersResponse{} } +func (*ListContainersResponse) ProtoMessage() {} +func (*ListContainersResponse) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{4} } + +type CreateContainerRequest struct { + Container Container `protobuf:"bytes,1,opt,name=container" json:"container"` +} + +func (m *CreateContainerRequest) Reset() { *m = CreateContainerRequest{} } +func (*CreateContainerRequest) ProtoMessage() {} +func (*CreateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{5} } + +type CreateContainerResponse struct { + Container Container `protobuf:"bytes,1,opt,name=container" json:"container"` +} + +func (m *CreateContainerResponse) Reset() { *m = CreateContainerResponse{} } +func (*CreateContainerResponse) ProtoMessage() {} +func (*CreateContainerResponse) Descriptor() ([]byte, []int) { + return fileDescriptorContainers, []int{6} +} + +// UpdateContainerRequest updates the metadata on one or more container. +// +// The operation should follow semantics described in +// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask, +// unless otherwise qualified. +type UpdateContainerRequest struct { + // Container provides the target values, as declared by the mask, for the update. + // + // The ID field must be set. + Container Container `protobuf:"bytes,1,opt,name=container" json:"container"` + // UpdateMask specifies which fields to perform the update on. If empty, + // the operation applies to all fields. + UpdateMask *google_protobuf3.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateContainerRequest) Reset() { *m = UpdateContainerRequest{} } +func (*UpdateContainerRequest) ProtoMessage() {} +func (*UpdateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{7} } + +type UpdateContainerResponse struct { + Container Container `protobuf:"bytes,1,opt,name=container" json:"container"` +} + +func (m *UpdateContainerResponse) Reset() { *m = UpdateContainerResponse{} } +func (*UpdateContainerResponse) ProtoMessage() {} +func (*UpdateContainerResponse) Descriptor() ([]byte, []int) { + return fileDescriptorContainers, []int{8} +} + +type DeleteContainerRequest struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *DeleteContainerRequest) Reset() { *m = DeleteContainerRequest{} } +func (*DeleteContainerRequest) ProtoMessage() {} +func (*DeleteContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{9} } + +func init() { + proto.RegisterType((*Container)(nil), "containerd.services.containers.v1.Container") + proto.RegisterType((*Container_Runtime)(nil), "containerd.services.containers.v1.Container.Runtime") + proto.RegisterType((*GetContainerRequest)(nil), "containerd.services.containers.v1.GetContainerRequest") + proto.RegisterType((*GetContainerResponse)(nil), "containerd.services.containers.v1.GetContainerResponse") + proto.RegisterType((*ListContainersRequest)(nil), "containerd.services.containers.v1.ListContainersRequest") + proto.RegisterType((*ListContainersResponse)(nil), "containerd.services.containers.v1.ListContainersResponse") + proto.RegisterType((*CreateContainerRequest)(nil), "containerd.services.containers.v1.CreateContainerRequest") + proto.RegisterType((*CreateContainerResponse)(nil), "containerd.services.containers.v1.CreateContainerResponse") + proto.RegisterType((*UpdateContainerRequest)(nil), "containerd.services.containers.v1.UpdateContainerRequest") + proto.RegisterType((*UpdateContainerResponse)(nil), "containerd.services.containers.v1.UpdateContainerResponse") + proto.RegisterType((*DeleteContainerRequest)(nil), "containerd.services.containers.v1.DeleteContainerRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Containers service + +type ContainersClient interface { + Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error) + List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) + Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) + Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) + Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) +} + +type containersClient struct { + cc *grpc.ClientConn +} + +func NewContainersClient(cc *grpc.ClientConn) ContainersClient { + return &containersClient{cc} +} + +func (c *containersClient) Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error) { + out := new(GetContainerResponse) + err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Get", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *containersClient) List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) { + out := new(ListContainersResponse) + err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/List", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *containersClient) Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) { + out := new(CreateContainerResponse) + err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Create", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *containersClient) Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) { + out := new(UpdateContainerResponse) + err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Update", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *containersClient) Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Delete", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Containers service + +type ContainersServer interface { + Get(context.Context, *GetContainerRequest) (*GetContainerResponse, error) + List(context.Context, *ListContainersRequest) (*ListContainersResponse, error) + Create(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) + Update(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) + Delete(context.Context, *DeleteContainerRequest) (*google_protobuf2.Empty, error) +} + +func RegisterContainersServer(s *grpc.Server, srv ContainersServer) { + s.RegisterService(&_Containers_serviceDesc, srv) +} + +func _Containers_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainersServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.containers.v1.Containers/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainersServer).Get(ctx, req.(*GetContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Containers_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListContainersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainersServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.containers.v1.Containers/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainersServer).List(ctx, req.(*ListContainersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Containers_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainersServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.containers.v1.Containers/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainersServer).Create(ctx, req.(*CreateContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Containers_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainersServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.containers.v1.Containers/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainersServer).Update(ctx, req.(*UpdateContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Containers_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainersServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.containers.v1.Containers/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainersServer).Delete(ctx, req.(*DeleteContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Containers_serviceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.containers.v1.Containers", + HandlerType: (*ContainersServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _Containers_Get_Handler, + }, + { + MethodName: "List", + Handler: _Containers_List_Handler, + }, + { + MethodName: "Create", + Handler: _Containers_Create_Handler, + }, + { + MethodName: "Update", + Handler: _Containers_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _Containers_Delete_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/containers/v1/containers.proto", +} + +func (m *Container) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Container) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintContainers(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovContainers(uint64(len(k))) + 1 + len(v) + sovContainers(uint64(len(v))) + i = encodeVarintContainers(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintContainers(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintContainers(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Image) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintContainers(dAtA, i, uint64(len(m.Image))) + i += copy(dAtA[i:], m.Image) + } + if m.Runtime != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintContainers(dAtA, i, uint64(m.Runtime.Size())) + n1, err := m.Runtime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Spec != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintContainers(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if len(m.Snapshotter) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintContainers(dAtA, i, uint64(len(m.Snapshotter))) + i += copy(dAtA[i:], m.Snapshotter) + } + if len(m.SnapshotKey) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintContainers(dAtA, i, uint64(len(m.SnapshotKey))) + i += copy(dAtA[i:], m.SnapshotKey) + } + dAtA[i] = 0x42 + i++ + i = encodeVarintContainers(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt))) + n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x4a + i++ + i = encodeVarintContainers(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt))) + n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Extensions) > 0 { + for k, _ := range m.Extensions { + dAtA[i] = 0x52 + i++ + v := m.Extensions[k] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovContainers(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovContainers(uint64(len(k))) + msgSize + i = encodeVarintContainers(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintContainers(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintContainers(dAtA, i, uint64((&v).Size())) + n5, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + } + return i, nil +} + +func (m *Container_Runtime) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Container_Runtime) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintContainers(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Options != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintContainers(dAtA, i, uint64(m.Options.Size())) + n6, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *GetContainerRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetContainerRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintContainers(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + return i, nil +} + +func (m *GetContainerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetContainerResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size())) + n7, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + return i, nil +} + +func (m *ListContainersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListContainersRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Filters) > 0 { + for _, s := range m.Filters { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListContainersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListContainersResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Containers) > 0 { + for _, msg := range m.Containers { + dAtA[i] = 0xa + i++ + i = encodeVarintContainers(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CreateContainerRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateContainerRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size())) + n8, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + return i, nil +} + +func (m *CreateContainerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateContainerResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size())) + n9, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + return i, nil +} + +func (m *UpdateContainerRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateContainerRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size())) + n10, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + if m.UpdateMask != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintContainers(dAtA, i, uint64(m.UpdateMask.Size())) + n11, err := m.UpdateMask.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + return i, nil +} + +func (m *UpdateContainerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateContainerResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size())) + n12, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + return i, nil +} + +func (m *DeleteContainerRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteContainerRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintContainers(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + return i, nil +} + +func encodeFixed64Containers(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Containers(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintContainers(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Container) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovContainers(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovContainers(uint64(len(k))) + 1 + len(v) + sovContainers(uint64(len(v))) + n += mapEntrySize + 1 + sovContainers(uint64(mapEntrySize)) + } + } + l = len(m.Image) + if l > 0 { + n += 1 + l + sovContainers(uint64(l)) + } + if m.Runtime != nil { + l = m.Runtime.Size() + n += 1 + l + sovContainers(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovContainers(uint64(l)) + } + l = len(m.Snapshotter) + if l > 0 { + n += 1 + l + sovContainers(uint64(l)) + } + l = len(m.SnapshotKey) + if l > 0 { + n += 1 + l + sovContainers(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) + n += 1 + l + sovContainers(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt) + n += 1 + l + sovContainers(uint64(l)) + if len(m.Extensions) > 0 { + for k, v := range m.Extensions { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovContainers(uint64(len(k))) + 1 + l + sovContainers(uint64(l)) + n += mapEntrySize + 1 + sovContainers(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Container_Runtime) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovContainers(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovContainers(uint64(l)) + } + return n +} + +func (m *GetContainerRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovContainers(uint64(l)) + } + return n +} + +func (m *GetContainerResponse) Size() (n int) { + var l int + _ = l + l = m.Container.Size() + n += 1 + l + sovContainers(uint64(l)) + return n +} + +func (m *ListContainersRequest) Size() (n int) { + var l int + _ = l + if len(m.Filters) > 0 { + for _, s := range m.Filters { + l = len(s) + n += 1 + l + sovContainers(uint64(l)) + } + } + return n +} + +func (m *ListContainersResponse) Size() (n int) { + var l int + _ = l + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovContainers(uint64(l)) + } + } + return n +} + +func (m *CreateContainerRequest) Size() (n int) { + var l int + _ = l + l = m.Container.Size() + n += 1 + l + sovContainers(uint64(l)) + return n +} + +func (m *CreateContainerResponse) Size() (n int) { + var l int + _ = l + l = m.Container.Size() + n += 1 + l + sovContainers(uint64(l)) + return n +} + +func (m *UpdateContainerRequest) Size() (n int) { + var l int + _ = l + l = m.Container.Size() + n += 1 + l + sovContainers(uint64(l)) + if m.UpdateMask != nil { + l = m.UpdateMask.Size() + n += 1 + l + sovContainers(uint64(l)) + } + return n +} + +func (m *UpdateContainerResponse) Size() (n int) { + var l int + _ = l + l = m.Container.Size() + n += 1 + l + sovContainers(uint64(l)) + return n +} + +func (m *DeleteContainerRequest) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovContainers(uint64(l)) + } + return n +} + +func sovContainers(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozContainers(x uint64) (n int) { + return sovContainers(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Container) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForExtensions := make([]string, 0, len(this.Extensions)) + for k, _ := range this.Extensions { + keysForExtensions = append(keysForExtensions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtensions) + mapStringForExtensions := "map[string]google_protobuf1.Any{" + for _, k := range keysForExtensions { + mapStringForExtensions += fmt.Sprintf("%v: %v,", k, this.Extensions[k]) + } + mapStringForExtensions += "}" + s := strings.Join([]string{`&Container{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Runtime:` + strings.Replace(fmt.Sprintf("%v", this.Runtime), "Container_Runtime", "Container_Runtime", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "google_protobuf1.Any", 1) + `,`, + `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, + `SnapshotKey:` + fmt.Sprintf("%v", this.SnapshotKey) + `,`, + `CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "google_protobuf4.Timestamp", 1), `&`, ``, 1) + `,`, + `UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf4.Timestamp", 1), `&`, ``, 1) + `,`, + `Extensions:` + mapStringForExtensions + `,`, + `}`, + }, "") + return s +} +func (this *Container_Runtime) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Container_Runtime{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf1.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetContainerRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetContainerRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func (this *GetContainerResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetContainerResponse{`, + `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListContainersRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListContainersRequest{`, + `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, + `}`, + }, "") + return s +} +func (this *ListContainersResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListContainersResponse{`, + `Containers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Containers), "Container", "Container", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateContainerRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateContainerRequest{`, + `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateContainerResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateContainerResponse{`, + `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateContainerRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateContainerRequest{`, + `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`, + `UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "google_protobuf3.FieldMask", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateContainerResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateContainerResponse{`, + `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteContainerRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteContainerRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func valueToStringContainers(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Container) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Container: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthContainers + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Labels == nil { + m.Labels = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthContainers + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Labels[mapkey] = mapvalue + } else { + var mapvalue string + m.Labels[mapkey] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Runtime == nil { + m.Runtime = &Container_Runtime{} + } + if err := m.Runtime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &google_protobuf1.Any{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Snapshotter = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SnapshotKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SnapshotKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthContainers + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Extensions == nil { + m.Extensions = make(map[string]google_protobuf1.Any) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthContainers + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthContainers + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &google_protobuf1.Any{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Extensions[mapkey] = *mapvalue + } else { + var mapvalue google_protobuf1.Any + m.Extensions[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContainers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContainers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Container_Runtime) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Runtime: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Runtime: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &google_protobuf1.Any{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContainers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContainers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetContainerRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetContainerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContainers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContainers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetContainerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetContainerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContainers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContainers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListContainersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListContainersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListContainersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContainers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContainers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListContainersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListContainersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListContainersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, Container{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContainers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContainers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateContainerRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateContainerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContainers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContainers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateContainerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateContainerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContainers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContainers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateContainerRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateContainerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateMask", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdateMask == nil { + m.UpdateMask = &google_protobuf3.FieldMask{} + } + if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContainers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContainers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateContainerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateContainerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContainers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContainers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteContainerRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteContainerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContainers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContainers + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContainers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContainers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipContainers(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowContainers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowContainers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowContainers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthContainers + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowContainers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipContainers(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthContainers = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowContainers = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/services/containers/v1/containers.proto", fileDescriptorContainers) +} + +var fileDescriptorContainers = []byte{ + // 776 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x72, 0x12, 0x5b, + 0x10, 0xce, 0x00, 0x81, 0xd0, 0xdc, 0xaa, 0x7b, 0xeb, 0x5c, 0x2e, 0x77, 0x1c, 0xab, 0x80, 0xb0, + 0xa2, 0x2c, 0x1d, 0x0c, 0x5a, 0x9a, 0x1f, 0x37, 0x21, 0x7f, 0x65, 0x99, 0x58, 0xa9, 0x51, 0x37, + 0xba, 0x88, 0x03, 0x74, 0xc8, 0xc8, 0xfc, 0x39, 0xe7, 0x40, 0x49, 0xb9, 0xd0, 0x47, 0x70, 0xe7, + 0x23, 0xf8, 0x2a, 0x59, 0xba, 0x74, 0x15, 0x13, 0x9e, 0xc4, 0x9a, 0x33, 0x33, 0xcc, 0x04, 0x06, + 0x85, 0x68, 0x76, 0xa7, 0x39, 0xfd, 0x7d, 0xfd, 0xf1, 0x75, 0xf7, 0x01, 0xd8, 0xef, 0x68, 0xec, + 0xa4, 0xd7, 0x94, 0x5b, 0x96, 0x51, 0x6b, 0x59, 0x26, 0x53, 0x35, 0x13, 0x9d, 0x76, 0xf4, 0xa8, + 0xda, 0x5a, 0x8d, 0xa2, 0xd3, 0xd7, 0x5a, 0x48, 0xc3, 0xcf, 0x69, 0xad, 0xbf, 0x12, 0x89, 0x64, + 0xdb, 0xb1, 0x98, 0x45, 0x96, 0x43, 0x9c, 0x1c, 0x60, 0xe4, 0x48, 0x56, 0x7f, 0x45, 0xca, 0x77, + 0xac, 0x8e, 0xc5, 0xb3, 0x6b, 0xee, 0xc9, 0x03, 0x4a, 0x37, 0x3a, 0x96, 0xd5, 0xd1, 0xb1, 0xc6, + 0xa3, 0x66, 0xef, 0xb8, 0xa6, 0x9a, 0x03, 0xff, 0xea, 0xe6, 0xf8, 0x15, 0x1a, 0x36, 0x0b, 0x2e, + 0xcb, 0xe3, 0x97, 0xc7, 0x1a, 0xea, 0xed, 0x23, 0x43, 0xa5, 0x5d, 0x3f, 0xa3, 0x34, 0x9e, 0xc1, + 0x34, 0x03, 0x29, 0x53, 0x0d, 0xdb, 0x4b, 0xa8, 0x7c, 0x4e, 0x43, 0x76, 0x2b, 0x90, 0x48, 0x0a, + 0x90, 0xd0, 0xda, 0xa2, 0x50, 0x16, 0xaa, 0xd9, 0x46, 0x7a, 0x78, 0x56, 0x4a, 0x3c, 0xde, 0x56, + 0x12, 0x5a, 0x9b, 0x1c, 0x42, 0x5a, 0x57, 0x9b, 0xa8, 0x53, 0x31, 0x51, 0x4e, 0x56, 0x73, 0xf5, + 0x55, 0xf9, 0x97, 0x5f, 0x55, 0x1e, 0xb1, 0xca, 0xfb, 0x1c, 0xba, 0x63, 0x32, 0x67, 0xa0, 0xf8, + 0x3c, 0x24, 0x0f, 0x8b, 0x9a, 0xa1, 0x76, 0x50, 0x4c, 0xba, 0xc5, 0x14, 0x2f, 0x20, 0x4f, 0x21, + 0xe3, 0xf4, 0x4c, 0x57, 0xa3, 0x98, 0x2a, 0x0b, 0xd5, 0x5c, 0xfd, 0xfe, 0x5c, 0x85, 0x14, 0x0f, + 0xab, 0x04, 0x24, 0xa4, 0x0a, 0x29, 0x6a, 0x63, 0x4b, 0x5c, 0xe4, 0x64, 0x79, 0xd9, 0x73, 0x43, + 0x0e, 0xdc, 0x90, 0x37, 0xcd, 0x81, 0xc2, 0x33, 0x48, 0x19, 0x72, 0xd4, 0x54, 0x6d, 0x7a, 0x62, + 0x31, 0x86, 0x8e, 0x98, 0xe6, 0xaa, 0xa2, 0x1f, 0x91, 0x65, 0xf8, 0x2b, 0x08, 0x8f, 0xba, 0x38, + 0x10, 0x33, 0x97, 0x53, 0x9e, 0xe0, 0x80, 0x6c, 0x01, 0xb4, 0x1c, 0x54, 0x19, 0xb6, 0x8f, 0x54, + 0x26, 0x2e, 0xf1, 0xa2, 0xd2, 0x44, 0xd1, 0xe7, 0x41, 0x0b, 0x1a, 0x4b, 0xa7, 0x67, 0xa5, 0x85, + 0x4f, 0xdf, 0x4b, 0x82, 0x92, 0xf5, 0x71, 0x9b, 0xcc, 0x25, 0xe9, 0xd9, 0xed, 0x80, 0x24, 0x3b, + 0x0f, 0x89, 0x8f, 0xdb, 0x64, 0xa4, 0x09, 0x80, 0xef, 0x18, 0x9a, 0x54, 0xb3, 0x4c, 0x2a, 0x02, + 0x6f, 0xda, 0xa3, 0xb9, 0xbc, 0xdc, 0x19, 0xc1, 0x79, 0xe3, 0x1a, 0x29, 0xb7, 0x8c, 0x12, 0x61, + 0x95, 0xd6, 0x20, 0x17, 0xe9, 0x2c, 0xf9, 0x07, 0x92, 0xae, 0x2d, 0x7c, 0x78, 0x14, 0xf7, 0xe8, + 0xf6, 0xb8, 0xaf, 0xea, 0x3d, 0x14, 0x13, 0x5e, 0x8f, 0x79, 0xb0, 0x9e, 0x58, 0x15, 0xa4, 0x03, + 0xc8, 0xf8, 0xbd, 0x22, 0x04, 0x52, 0xa6, 0x6a, 0xa0, 0x8f, 0xe3, 0x67, 0x22, 0x43, 0xc6, 0xb2, + 0x19, 0x97, 0x9e, 0xf8, 0x49, 0xe7, 0x82, 0x24, 0xe9, 0x19, 0xfc, 0x3d, 0x26, 0x37, 0x46, 0xcd, + 0xad, 0xa8, 0x9a, 0x69, 0x94, 0xa1, 0xc6, 0xca, 0x1d, 0xf8, 0x77, 0x0f, 0xd9, 0xc8, 0x10, 0x05, + 0xdf, 0xf6, 0x90, 0xb2, 0x69, 0x2b, 0x52, 0x39, 0x81, 0xfc, 0xe5, 0x74, 0x6a, 0x5b, 0x26, 0x45, + 0x72, 0x08, 0xd9, 0x91, 0xc5, 0x1c, 0x96, 0xab, 0xdf, 0x9e, 0xa7, 0x11, 0xbe, 0xf1, 0x21, 0x49, + 0x65, 0x05, 0xfe, 0xdb, 0xd7, 0x68, 0x58, 0x8a, 0x06, 0xd2, 0x44, 0xc8, 0x1c, 0x6b, 0x3a, 0x43, + 0x87, 0x8a, 0x42, 0x39, 0x59, 0xcd, 0x2a, 0x41, 0x58, 0xd1, 0xa1, 0x30, 0x0e, 0xf1, 0xe5, 0x29, + 0x00, 0x61, 0x61, 0x0e, 0xbb, 0x9a, 0xbe, 0x08, 0x4b, 0xe5, 0x0d, 0x14, 0xb6, 0xf8, 0x38, 0x4f, + 0x98, 0xf7, 0xe7, 0xcd, 0xe8, 0xc2, 0xff, 0x13, 0xb5, 0xae, 0xcd, 0xf9, 0x2f, 0x02, 0x14, 0x5e, + 0xf0, 0x1d, 0xbb, 0xfe, 0x6f, 0x46, 0x36, 0x20, 0xe7, 0xed, 0x33, 0x7f, 0xcf, 0xfd, 0xa9, 0x9d, + 0x7c, 0x08, 0x76, 0xdd, 0x27, 0xff, 0x40, 0xa5, 0x5d, 0xc5, 0x7f, 0x36, 0xdc, 0xb3, 0x6b, 0xcb, + 0x84, 0xd0, 0x6b, 0xb3, 0xe5, 0x2e, 0x14, 0xb6, 0x51, 0xc7, 0x18, 0x57, 0xa6, 0x2c, 0x4b, 0xfd, + 0x3c, 0x05, 0x10, 0x0e, 0x23, 0xe9, 0x43, 0x72, 0x0f, 0x19, 0x79, 0x30, 0x83, 0x8c, 0x98, 0x95, + 0x94, 0x1e, 0xce, 0x8d, 0xf3, 0xad, 0x78, 0x0f, 0x29, 0x77, 0x2d, 0xc8, 0x2c, 0x3f, 0x67, 0xb1, + 0x2b, 0x27, 0xad, 0x5d, 0x01, 0xe9, 0x17, 0xff, 0x00, 0x69, 0x6f, 0x72, 0xc9, 0x2c, 0x24, 0xf1, + 0x0b, 0x25, 0xad, 0x5f, 0x05, 0x1a, 0x0a, 0xf0, 0x66, 0x64, 0x26, 0x01, 0xf1, 0x73, 0x3f, 0x93, + 0x80, 0x69, 0x93, 0xf8, 0x0a, 0xd2, 0xde, 0xdc, 0xcc, 0x24, 0x20, 0x7e, 0xc4, 0xa4, 0xc2, 0xc4, + 0x46, 0xec, 0xb8, 0xff, 0x90, 0x1a, 0xaf, 0x4f, 0x2f, 0x8a, 0x0b, 0xdf, 0x2e, 0x8a, 0x0b, 0x1f, + 0x87, 0x45, 0xe1, 0x74, 0x58, 0x14, 0xbe, 0x0e, 0x8b, 0xc2, 0xf9, 0xb0, 0x28, 0xbc, 0xdc, 0xfd, + 0x8d, 0x3f, 0x7d, 0x1b, 0x61, 0xd4, 0x4c, 0xf3, 0x8a, 0xf7, 0x7e, 0x04, 0x00, 0x00, 0xff, 0xff, + 0x17, 0x73, 0xba, 0x43, 0x45, 0x0a, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto new file mode 100644 index 000000000000..0a2311c4c47e --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto @@ -0,0 +1,158 @@ +syntax = "proto3"; + +package containerd.services.containers.v1; + +import "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/containerd/containerd/api/services/containers/v1;containers"; + +// Containers provides metadata storage for containers used in the execution +// service. +// +// The objects here provide an state-independent view of containers for use in +// management and resource pinning. From that perspective, containers do not +// have a "state" but rather this is the set of resources that will be +// considered in use by the container. +// +// From the perspective of the execution service, these objects represent the +// base parameters for creating a container process. +// +// In general, when looking to add fields for this type, first ask yourself +// whether or not the function of the field has to do with runtime execution or +// is invariant of the runtime state of the container. If it has to do with +// runtime, or changes as the "container" is started and stops, it probably +// doesn't belong on this object. +service Containers { + rpc Get(GetContainerRequest) returns (GetContainerResponse); + rpc List(ListContainersRequest) returns (ListContainersResponse); + rpc Create(CreateContainerRequest) returns (CreateContainerResponse); + rpc Update(UpdateContainerRequest) returns (UpdateContainerResponse); + rpc Delete(DeleteContainerRequest) returns (google.protobuf.Empty); +} + +message Container { + // ID is the user-specified identifier. + // + // This field may not be updated. + string id = 1; + + // Labels provides an area to include arbitrary data on containers. + // + // The combined size of a key/value pair cannot exceed 4096 bytes. + // + // Note that to add a new value to this field, read the existing set and + // include the entire result in the update call. + map labels = 2; + + // Image contains the reference of the image used to build the + // specification and snapshots for running this container. + // + // If this field is updated, the spec and rootfs needed to updated, as well. + string image = 3; + + message Runtime { + // Name is the name of the runtime. + string name = 1; + // Options specify additional runtime initialization options. + google.protobuf.Any options = 2; + } + // Runtime specifies which runtime to use for executing this container. + Runtime runtime = 4; + + // Spec to be used when creating the container. This is runtime specific. + google.protobuf.Any spec = 5; + + // Snapshotter specifies the snapshotter name used for rootfs + string snapshotter = 6; + + // SnapshotKey specifies the snapshot key to use for the container's root + // filesystem. When starting a task from this container, a caller should + // look up the mounts from the snapshot service and include those on the + // task create request. + // + // Snapshots referenced in this field will not be garbage collected. + // + // This field is set to empty when the rootfs is not a snapshot. + // + // This field may be updated. + string snapshot_key = 7; + + // CreatedAt is the time the container was first created. + google.protobuf.Timestamp created_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + + // UpdatedAt is the last time the container was mutated. + google.protobuf.Timestamp updated_at = 9 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + + // Extensions allow clients to provide zero or more blobs that are directly + // associated with the container. One may provide protobuf, json, or other + // encoding formats. The primary use of this is to further decorate the + // container object with fields that may be specific to a client integration. + // + // The key portion of this map should identify a "name" for the extension + // that should be unique against other extensions. When updating extension + // data, one should only update the specified extension using field paths + // to select a specific map key. + map extensions = 10 [(gogoproto.nullable) = false]; +} + +message GetContainerRequest { + string id = 1; +} + +message GetContainerResponse { + Container container = 1 [(gogoproto.nullable) = false]; +} + +message ListContainersRequest { + // Filters contains one or more filters using the syntax defined in the + // containerd filter package. + // + // The returned result will be those that match any of the provided + // filters. Expanded, containers that match the following will be + // returned: + // + // filters[0] or filters[1] or ... or filters[n-1] or filters[n] + // + // If filters is zero-length or nil, all items will be returned. + repeated string filters = 1; +} + +message ListContainersResponse { + repeated Container containers = 1 [(gogoproto.nullable) = false]; +} + +message CreateContainerRequest { + Container container = 1 [(gogoproto.nullable) = false]; +} + +message CreateContainerResponse { + Container container = 1 [(gogoproto.nullable) = false]; +} + +// UpdateContainerRequest updates the metadata on one or more container. +// +// The operation should follow semantics described in +// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask, +// unless otherwise qualified. +message UpdateContainerRequest { + // Container provides the target values, as declared by the mask, for the update. + // + // The ID field must be set. + Container container = 1 [(gogoproto.nullable) = false]; + + // UpdateMask specifies which fields to perform the update on. If empty, + // the operation applies to all fields. + google.protobuf.FieldMask update_mask = 2; +} + +message UpdateContainerResponse { + Container container = 1 [(gogoproto.nullable) = false]; +} + +message DeleteContainerRequest { + string id = 1; +} diff --git a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go new file mode 100644 index 000000000000..0f587685b945 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go @@ -0,0 +1,5812 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/containerd/containerd/api/services/tasks/v1/tasks.proto +// DO NOT EDIT! + +/* + Package tasks is a generated protocol buffer package. + + It is generated from these files: + github.com/containerd/containerd/api/services/tasks/v1/tasks.proto + + It has these top-level messages: + CreateTaskRequest + CreateTaskResponse + StartRequest + StartResponse + DeleteTaskRequest + DeleteResponse + DeleteProcessRequest + GetRequest + GetResponse + ListTasksRequest + ListTasksResponse + KillRequest + ExecProcessRequest + ExecProcessResponse + ResizePtyRequest + CloseIORequest + PauseTaskRequest + ResumeTaskRequest + ListPidsRequest + ListPidsResponse + CheckpointTaskRequest + CheckpointTaskResponse + UpdateTaskRequest + MetricsRequest + MetricsResponse + WaitRequest + WaitResponse +*/ +package tasks + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/empty" +import google_protobuf1 "github.com/gogo/protobuf/types" +import _ "github.com/gogo/protobuf/gogoproto" +import containerd_types "github.com/containerd/containerd/api/types" +import containerd_types1 "github.com/containerd/containerd/api/types" +import containerd_types2 "github.com/containerd/containerd/api/types" +import containerd_v1_types "github.com/containerd/containerd/api/types/task" +import _ "github.com/gogo/protobuf/types" + +import time "time" +import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type CreateTaskRequest struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + // RootFS provides the pre-chroot mounts to perform in the shim before + // executing the container task. + // + // These are for mounts that cannot be performed in the user namespace. + // Typically, these mounts should be resolved from snapshots specified on + // the container object. + Rootfs []*containerd_types.Mount `protobuf:"bytes,3,rep,name=rootfs" json:"rootfs,omitempty"` + Stdin string `protobuf:"bytes,4,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,5,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,6,opt,name=stderr,proto3" json:"stderr,omitempty"` + Terminal bool `protobuf:"varint,7,opt,name=terminal,proto3" json:"terminal,omitempty"` + Checkpoint *containerd_types2.Descriptor `protobuf:"bytes,8,opt,name=checkpoint" json:"checkpoint,omitempty"` + Options *google_protobuf1.Any `protobuf:"bytes,9,opt,name=options" json:"options,omitempty"` +} + +func (m *CreateTaskRequest) Reset() { *m = CreateTaskRequest{} } +func (*CreateTaskRequest) ProtoMessage() {} +func (*CreateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{0} } + +type CreateTaskResponse struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` +} + +func (m *CreateTaskResponse) Reset() { *m = CreateTaskResponse{} } +func (*CreateTaskResponse) ProtoMessage() {} +func (*CreateTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{1} } + +type StartRequest struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (m *StartRequest) Reset() { *m = StartRequest{} } +func (*StartRequest) ProtoMessage() {} +func (*StartRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{2} } + +type StartResponse struct { + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` +} + +func (m *StartResponse) Reset() { *m = StartResponse{} } +func (*StartResponse) ProtoMessage() {} +func (*StartResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{3} } + +type DeleteTaskRequest struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` +} + +func (m *DeleteTaskRequest) Reset() { *m = DeleteTaskRequest{} } +func (*DeleteTaskRequest) ProtoMessage() {} +func (*DeleteTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{4} } + +type DeleteResponse struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + ExitStatus uint32 `protobuf:"varint,3,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt time.Time `protobuf:"bytes,4,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"` +} + +func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } +func (*DeleteResponse) ProtoMessage() {} +func (*DeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{5} } + +type DeleteProcessRequest struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (m *DeleteProcessRequest) Reset() { *m = DeleteProcessRequest{} } +func (*DeleteProcessRequest) ProtoMessage() {} +func (*DeleteProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{6} } + +type GetRequest struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (*GetRequest) ProtoMessage() {} +func (*GetRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{7} } + +type GetResponse struct { + Process *containerd_v1_types.Process `protobuf:"bytes,1,opt,name=process" json:"process,omitempty"` +} + +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (*GetResponse) ProtoMessage() {} +func (*GetResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{8} } + +type ListTasksRequest struct { + Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` +} + +func (m *ListTasksRequest) Reset() { *m = ListTasksRequest{} } +func (*ListTasksRequest) ProtoMessage() {} +func (*ListTasksRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{9} } + +type ListTasksResponse struct { + Tasks []*containerd_v1_types.Process `protobuf:"bytes,1,rep,name=tasks" json:"tasks,omitempty"` +} + +func (m *ListTasksResponse) Reset() { *m = ListTasksResponse{} } +func (*ListTasksResponse) ProtoMessage() {} +func (*ListTasksResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{10} } + +type KillRequest struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Signal uint32 `protobuf:"varint,3,opt,name=signal,proto3" json:"signal,omitempty"` + All bool `protobuf:"varint,4,opt,name=all,proto3" json:"all,omitempty"` +} + +func (m *KillRequest) Reset() { *m = KillRequest{} } +func (*KillRequest) ProtoMessage() {} +func (*KillRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{11} } + +type ExecProcessRequest struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Stdin string `protobuf:"bytes,2,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,3,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,4,opt,name=stderr,proto3" json:"stderr,omitempty"` + Terminal bool `protobuf:"varint,5,opt,name=terminal,proto3" json:"terminal,omitempty"` + // Spec for starting a process in the target container. + // + // For runc, this is a process spec, for example. + Spec *google_protobuf1.Any `protobuf:"bytes,6,opt,name=spec" json:"spec,omitempty"` + // id of the exec process + ExecID string `protobuf:"bytes,7,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (m *ExecProcessRequest) Reset() { *m = ExecProcessRequest{} } +func (*ExecProcessRequest) ProtoMessage() {} +func (*ExecProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{12} } + +type ExecProcessResponse struct { +} + +func (m *ExecProcessResponse) Reset() { *m = ExecProcessResponse{} } +func (*ExecProcessResponse) ProtoMessage() {} +func (*ExecProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{13} } + +type ResizePtyRequest struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Width uint32 `protobuf:"varint,3,opt,name=width,proto3" json:"width,omitempty"` + Height uint32 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *ResizePtyRequest) Reset() { *m = ResizePtyRequest{} } +func (*ResizePtyRequest) ProtoMessage() {} +func (*ResizePtyRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{14} } + +type CloseIORequest struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Stdin bool `protobuf:"varint,3,opt,name=stdin,proto3" json:"stdin,omitempty"` +} + +func (m *CloseIORequest) Reset() { *m = CloseIORequest{} } +func (*CloseIORequest) ProtoMessage() {} +func (*CloseIORequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{15} } + +type PauseTaskRequest struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` +} + +func (m *PauseTaskRequest) Reset() { *m = PauseTaskRequest{} } +func (*PauseTaskRequest) ProtoMessage() {} +func (*PauseTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{16} } + +type ResumeTaskRequest struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` +} + +func (m *ResumeTaskRequest) Reset() { *m = ResumeTaskRequest{} } +func (*ResumeTaskRequest) ProtoMessage() {} +func (*ResumeTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{17} } + +type ListPidsRequest struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` +} + +func (m *ListPidsRequest) Reset() { *m = ListPidsRequest{} } +func (*ListPidsRequest) ProtoMessage() {} +func (*ListPidsRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{18} } + +type ListPidsResponse struct { + // Processes includes the process ID and additional process information + Processes []*containerd_v1_types.ProcessInfo `protobuf:"bytes,1,rep,name=processes" json:"processes,omitempty"` +} + +func (m *ListPidsResponse) Reset() { *m = ListPidsResponse{} } +func (*ListPidsResponse) ProtoMessage() {} +func (*ListPidsResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{19} } + +type CheckpointTaskRequest struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ParentCheckpoint github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=parent_checkpoint,json=parentCheckpoint,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"parent_checkpoint"` + Options *google_protobuf1.Any `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (m *CheckpointTaskRequest) Reset() { *m = CheckpointTaskRequest{} } +func (*CheckpointTaskRequest) ProtoMessage() {} +func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{20} } + +type CheckpointTaskResponse struct { + Descriptors []*containerd_types2.Descriptor `protobuf:"bytes,1,rep,name=descriptors" json:"descriptors,omitempty"` +} + +func (m *CheckpointTaskResponse) Reset() { *m = CheckpointTaskResponse{} } +func (*CheckpointTaskResponse) ProtoMessage() {} +func (*CheckpointTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{21} } + +type UpdateTaskRequest struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Resources *google_protobuf1.Any `protobuf:"bytes,2,opt,name=resources" json:"resources,omitempty"` +} + +func (m *UpdateTaskRequest) Reset() { *m = UpdateTaskRequest{} } +func (*UpdateTaskRequest) ProtoMessage() {} +func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{22} } + +type MetricsRequest struct { + Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"` +} + +func (m *MetricsRequest) Reset() { *m = MetricsRequest{} } +func (*MetricsRequest) ProtoMessage() {} +func (*MetricsRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{23} } + +type MetricsResponse struct { + Metrics []*containerd_types1.Metric `protobuf:"bytes,1,rep,name=metrics" json:"metrics,omitempty"` +} + +func (m *MetricsResponse) Reset() { *m = MetricsResponse{} } +func (*MetricsResponse) ProtoMessage() {} +func (*MetricsResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{24} } + +type WaitRequest struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (m *WaitRequest) Reset() { *m = WaitRequest{} } +func (*WaitRequest) ProtoMessage() {} +func (*WaitRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{25} } + +type WaitResponse struct { + ExitStatus uint32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt time.Time `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"` +} + +func (m *WaitResponse) Reset() { *m = WaitResponse{} } +func (*WaitResponse) ProtoMessage() {} +func (*WaitResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{26} } + +func init() { + proto.RegisterType((*CreateTaskRequest)(nil), "containerd.services.tasks.v1.CreateTaskRequest") + proto.RegisterType((*CreateTaskResponse)(nil), "containerd.services.tasks.v1.CreateTaskResponse") + proto.RegisterType((*StartRequest)(nil), "containerd.services.tasks.v1.StartRequest") + proto.RegisterType((*StartResponse)(nil), "containerd.services.tasks.v1.StartResponse") + proto.RegisterType((*DeleteTaskRequest)(nil), "containerd.services.tasks.v1.DeleteTaskRequest") + proto.RegisterType((*DeleteResponse)(nil), "containerd.services.tasks.v1.DeleteResponse") + proto.RegisterType((*DeleteProcessRequest)(nil), "containerd.services.tasks.v1.DeleteProcessRequest") + proto.RegisterType((*GetRequest)(nil), "containerd.services.tasks.v1.GetRequest") + proto.RegisterType((*GetResponse)(nil), "containerd.services.tasks.v1.GetResponse") + proto.RegisterType((*ListTasksRequest)(nil), "containerd.services.tasks.v1.ListTasksRequest") + proto.RegisterType((*ListTasksResponse)(nil), "containerd.services.tasks.v1.ListTasksResponse") + proto.RegisterType((*KillRequest)(nil), "containerd.services.tasks.v1.KillRequest") + proto.RegisterType((*ExecProcessRequest)(nil), "containerd.services.tasks.v1.ExecProcessRequest") + proto.RegisterType((*ExecProcessResponse)(nil), "containerd.services.tasks.v1.ExecProcessResponse") + proto.RegisterType((*ResizePtyRequest)(nil), "containerd.services.tasks.v1.ResizePtyRequest") + proto.RegisterType((*CloseIORequest)(nil), "containerd.services.tasks.v1.CloseIORequest") + proto.RegisterType((*PauseTaskRequest)(nil), "containerd.services.tasks.v1.PauseTaskRequest") + proto.RegisterType((*ResumeTaskRequest)(nil), "containerd.services.tasks.v1.ResumeTaskRequest") + proto.RegisterType((*ListPidsRequest)(nil), "containerd.services.tasks.v1.ListPidsRequest") + proto.RegisterType((*ListPidsResponse)(nil), "containerd.services.tasks.v1.ListPidsResponse") + proto.RegisterType((*CheckpointTaskRequest)(nil), "containerd.services.tasks.v1.CheckpointTaskRequest") + proto.RegisterType((*CheckpointTaskResponse)(nil), "containerd.services.tasks.v1.CheckpointTaskResponse") + proto.RegisterType((*UpdateTaskRequest)(nil), "containerd.services.tasks.v1.UpdateTaskRequest") + proto.RegisterType((*MetricsRequest)(nil), "containerd.services.tasks.v1.MetricsRequest") + proto.RegisterType((*MetricsResponse)(nil), "containerd.services.tasks.v1.MetricsResponse") + proto.RegisterType((*WaitRequest)(nil), "containerd.services.tasks.v1.WaitRequest") + proto.RegisterType((*WaitResponse)(nil), "containerd.services.tasks.v1.WaitResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Tasks service + +type TasksClient interface { + // Create a task. + Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) + // Start a process. + Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) + // Delete a task and on disk state. + Delete(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*DeleteResponse, error) + DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error) + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) + List(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) + // Kill a task or process. + Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) + Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) + ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) + CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) + Pause(ctx context.Context, in *PauseTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) + Resume(ctx context.Context, in *ResumeTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) + ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error) + Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*CheckpointTaskResponse, error) + Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) + Metrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error) + Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error) +} + +type tasksClient struct { + cc *grpc.ClientConn +} + +func NewTasksClient(cc *grpc.ClientConn) TasksClient { + return &tasksClient{cc} +} + +func (c *tasksClient) Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) { + out := new(CreateTaskResponse) + err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Create", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) { + out := new(StartResponse) + err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Start", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Delete(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*DeleteResponse, error) { + out := new(DeleteResponse) + err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Delete", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error) { + out := new(DeleteResponse) + err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/DeleteProcess", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { + out := new(GetResponse) + err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Get", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) List(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) { + out := new(ListTasksResponse) + err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/List", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { + out := new(google_protobuf.Empty) + err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Kill", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { + out := new(google_protobuf.Empty) + err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Exec", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { + out := new(google_protobuf.Empty) + err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/ResizePty", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { + out := new(google_protobuf.Empty) + err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/CloseIO", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Pause(ctx context.Context, in *PauseTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { + out := new(google_protobuf.Empty) + err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Pause", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Resume(ctx context.Context, in *ResumeTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { + out := new(google_protobuf.Empty) + err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Resume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error) { + out := new(ListPidsResponse) + err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/ListPids", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*CheckpointTaskResponse, error) { + out := new(CheckpointTaskResponse) + err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Checkpoint", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) { + out := new(google_protobuf.Empty) + err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Update", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Metrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error) { + out := new(MetricsResponse) + err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Metrics", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error) { + out := new(WaitResponse) + err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Wait", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Tasks service + +type TasksServer interface { + // Create a task. + Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error) + // Start a process. + Start(context.Context, *StartRequest) (*StartResponse, error) + // Delete a task and on disk state. + Delete(context.Context, *DeleteTaskRequest) (*DeleteResponse, error) + DeleteProcess(context.Context, *DeleteProcessRequest) (*DeleteResponse, error) + Get(context.Context, *GetRequest) (*GetResponse, error) + List(context.Context, *ListTasksRequest) (*ListTasksResponse, error) + // Kill a task or process. + Kill(context.Context, *KillRequest) (*google_protobuf.Empty, error) + Exec(context.Context, *ExecProcessRequest) (*google_protobuf.Empty, error) + ResizePty(context.Context, *ResizePtyRequest) (*google_protobuf.Empty, error) + CloseIO(context.Context, *CloseIORequest) (*google_protobuf.Empty, error) + Pause(context.Context, *PauseTaskRequest) (*google_protobuf.Empty, error) + Resume(context.Context, *ResumeTaskRequest) (*google_protobuf.Empty, error) + ListPids(context.Context, *ListPidsRequest) (*ListPidsResponse, error) + Checkpoint(context.Context, *CheckpointTaskRequest) (*CheckpointTaskResponse, error) + Update(context.Context, *UpdateTaskRequest) (*google_protobuf.Empty, error) + Metrics(context.Context, *MetricsRequest) (*MetricsResponse, error) + Wait(context.Context, *WaitRequest) (*WaitResponse, error) +} + +func RegisterTasksServer(s *grpc.Server, srv TasksServer) { + s.RegisterService(&_Tasks_serviceDesc, srv) +} + +func _Tasks_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Create(ctx, req.(*CreateTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Start(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Start", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Start(ctx, req.(*StartRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Delete(ctx, req.(*DeleteTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_DeleteProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteProcessRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).DeleteProcess(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/DeleteProcess", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).DeleteProcess(ctx, req.(*DeleteProcessRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Get(ctx, req.(*GetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTasksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).List(ctx, req.(*ListTasksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Kill_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KillRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Kill(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Kill", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Kill(ctx, req.(*KillRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Exec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExecProcessRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Exec(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Exec", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Exec(ctx, req.(*ExecProcessRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_ResizePty_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResizePtyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).ResizePty(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/ResizePty", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).ResizePty(ctx, req.(*ResizePtyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_CloseIO_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CloseIORequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).CloseIO(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/CloseIO", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).CloseIO(ctx, req.(*CloseIORequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Pause_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PauseTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Pause(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Pause", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Pause(ctx, req.(*PauseTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Resume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResumeTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Resume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Resume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Resume(ctx, req.(*ResumeTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_ListPids_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListPidsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).ListPids(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/ListPids", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).ListPids(ctx, req.(*ListPidsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Checkpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CheckpointTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Checkpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Checkpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Checkpoint(ctx, req.(*CheckpointTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Update(ctx, req.(*UpdateTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Metrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MetricsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Metrics(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Metrics", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Metrics(ctx, req.(*MetricsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Wait_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WaitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Wait(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Wait", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Wait(ctx, req.(*WaitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Tasks_serviceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.tasks.v1.Tasks", + HandlerType: (*TasksServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Create", + Handler: _Tasks_Create_Handler, + }, + { + MethodName: "Start", + Handler: _Tasks_Start_Handler, + }, + { + MethodName: "Delete", + Handler: _Tasks_Delete_Handler, + }, + { + MethodName: "DeleteProcess", + Handler: _Tasks_DeleteProcess_Handler, + }, + { + MethodName: "Get", + Handler: _Tasks_Get_Handler, + }, + { + MethodName: "List", + Handler: _Tasks_List_Handler, + }, + { + MethodName: "Kill", + Handler: _Tasks_Kill_Handler, + }, + { + MethodName: "Exec", + Handler: _Tasks_Exec_Handler, + }, + { + MethodName: "ResizePty", + Handler: _Tasks_ResizePty_Handler, + }, + { + MethodName: "CloseIO", + Handler: _Tasks_CloseIO_Handler, + }, + { + MethodName: "Pause", + Handler: _Tasks_Pause_Handler, + }, + { + MethodName: "Resume", + Handler: _Tasks_Resume_Handler, + }, + { + MethodName: "ListPids", + Handler: _Tasks_ListPids_Handler, + }, + { + MethodName: "Checkpoint", + Handler: _Tasks_Checkpoint_Handler, + }, + { + MethodName: "Update", + Handler: _Tasks_Update_Handler, + }, + { + MethodName: "Metrics", + Handler: _Tasks_Metrics_Handler, + }, + { + MethodName: "Wait", + Handler: _Tasks_Wait_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/tasks/v1/tasks.proto", +} + +func (m *CreateTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if len(m.Rootfs) > 0 { + for _, msg := range m.Rootfs { + dAtA[i] = 0x1a + i++ + i = encodeVarintTasks(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Stdin) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdin))) + i += copy(dAtA[i:], m.Stdin) + } + if len(m.Stdout) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdout))) + i += copy(dAtA[i:], m.Stdout) + } + if len(m.Stderr) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.Stderr))) + i += copy(dAtA[i:], m.Stderr) + } + if m.Terminal { + dAtA[i] = 0x38 + i++ + if m.Terminal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Checkpoint != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintTasks(dAtA, i, uint64(m.Checkpoint.Size())) + n1, err := m.Checkpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Options != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintTasks(dAtA, i, uint64(m.Options.Size())) + n2, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *CreateTaskResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateTaskResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if m.Pid != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTasks(dAtA, i, uint64(m.Pid)) + } + return i, nil +} + +func (m *StartRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if len(m.ExecID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + return i, nil +} + +func (m *StartResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Pid != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTasks(dAtA, i, uint64(m.Pid)) + } + return i, nil +} + +func (m *DeleteTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + return i, nil +} + +func (m *DeleteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.Pid != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTasks(dAtA, i, uint64(m.Pid)) + } + if m.ExitStatus != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTasks(dAtA, i, uint64(m.ExitStatus)) + } + dAtA[i] = 0x22 + i++ + i = encodeVarintTasks(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt))) + n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *DeleteProcessRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteProcessRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if len(m.ExecID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + return i, nil +} + +func (m *GetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if len(m.ExecID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + return i, nil +} + +func (m *GetResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Process != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(m.Process.Size())) + n4, err := m.Process.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *ListTasksRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListTasksRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Filter) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.Filter))) + i += copy(dAtA[i:], m.Filter) + } + return i, nil +} + +func (m *ListTasksResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListTasksResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Tasks) > 0 { + for _, msg := range m.Tasks { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *KillRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KillRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if len(m.ExecID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + if m.Signal != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTasks(dAtA, i, uint64(m.Signal)) + } + if m.All { + dAtA[i] = 0x20 + i++ + if m.All { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ExecProcessRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecProcessRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if len(m.Stdin) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdin))) + i += copy(dAtA[i:], m.Stdin) + } + if len(m.Stdout) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdout))) + i += copy(dAtA[i:], m.Stdout) + } + if len(m.Stderr) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.Stderr))) + i += copy(dAtA[i:], m.Stderr) + } + if m.Terminal { + dAtA[i] = 0x28 + i++ + if m.Terminal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Spec != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintTasks(dAtA, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if len(m.ExecID) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + return i, nil +} + +func (m *ExecProcessResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecProcessResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ResizePtyRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResizePtyRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if len(m.ExecID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + if m.Width != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTasks(dAtA, i, uint64(m.Width)) + } + if m.Height != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTasks(dAtA, i, uint64(m.Height)) + } + return i, nil +} + +func (m *CloseIORequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CloseIORequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if len(m.ExecID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + if m.Stdin { + dAtA[i] = 0x18 + i++ + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PauseTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PauseTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + return i, nil +} + +func (m *ResumeTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResumeTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + return i, nil +} + +func (m *ListPidsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListPidsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + return i, nil +} + +func (m *ListPidsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListPidsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Processes) > 0 { + for _, msg := range m.Processes { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CheckpointTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckpointTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if len(m.ParentCheckpoint) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ParentCheckpoint))) + i += copy(dAtA[i:], m.ParentCheckpoint) + } + if m.Options != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTasks(dAtA, i, uint64(m.Options.Size())) + n6, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *CheckpointTaskResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckpointTaskResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Descriptors) > 0 { + for _, msg := range m.Descriptors { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *UpdateTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if m.Resources != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTasks(dAtA, i, uint64(m.Resources.Size())) + n7, err := m.Resources.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *MetricsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Filters) > 0 { + for _, s := range m.Filters { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *MetricsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Metrics) > 0 { + for _, msg := range m.Metrics { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *WaitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WaitRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if len(m.ExecID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) + i += copy(dAtA[i:], m.ExecID) + } + return i, nil +} + +func (m *WaitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WaitResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ExitStatus != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTasks(dAtA, i, uint64(m.ExitStatus)) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintTasks(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt))) + n8, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + return i, nil +} + +func encodeFixed64Tasks(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Tasks(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintTasks(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *CreateTaskRequest) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + if len(m.Rootfs) > 0 { + for _, e := range m.Rootfs { + l = e.Size() + n += 1 + l + sovTasks(uint64(l)) + } + } + l = len(m.Stdin) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + l = len(m.Stdout) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + l = len(m.Stderr) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + if m.Terminal { + n += 2 + } + if m.Checkpoint != nil { + l = m.Checkpoint.Size() + n += 1 + l + sovTasks(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovTasks(uint64(l)) + } + return n +} + +func (m *CreateTaskResponse) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + if m.Pid != 0 { + n += 1 + sovTasks(uint64(m.Pid)) + } + return n +} + +func (m *StartRequest) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + return n +} + +func (m *StartResponse) Size() (n int) { + var l int + _ = l + if m.Pid != 0 { + n += 1 + sovTasks(uint64(m.Pid)) + } + return n +} + +func (m *DeleteTaskRequest) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + return n +} + +func (m *DeleteResponse) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + if m.Pid != 0 { + n += 1 + sovTasks(uint64(m.Pid)) + } + if m.ExitStatus != 0 { + n += 1 + sovTasks(uint64(m.ExitStatus)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) + n += 1 + l + sovTasks(uint64(l)) + return n +} + +func (m *DeleteProcessRequest) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + return n +} + +func (m *GetRequest) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + return n +} + +func (m *GetResponse) Size() (n int) { + var l int + _ = l + if m.Process != nil { + l = m.Process.Size() + n += 1 + l + sovTasks(uint64(l)) + } + return n +} + +func (m *ListTasksRequest) Size() (n int) { + var l int + _ = l + l = len(m.Filter) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + return n +} + +func (m *ListTasksResponse) Size() (n int) { + var l int + _ = l + if len(m.Tasks) > 0 { + for _, e := range m.Tasks { + l = e.Size() + n += 1 + l + sovTasks(uint64(l)) + } + } + return n +} + +func (m *KillRequest) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + if m.Signal != 0 { + n += 1 + sovTasks(uint64(m.Signal)) + } + if m.All { + n += 2 + } + return n +} + +func (m *ExecProcessRequest) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + l = len(m.Stdin) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + l = len(m.Stdout) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + l = len(m.Stderr) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + if m.Terminal { + n += 2 + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovTasks(uint64(l)) + } + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + return n +} + +func (m *ExecProcessResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ResizePtyRequest) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + if m.Width != 0 { + n += 1 + sovTasks(uint64(m.Width)) + } + if m.Height != 0 { + n += 1 + sovTasks(uint64(m.Height)) + } + return n +} + +func (m *CloseIORequest) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + if m.Stdin { + n += 2 + } + return n +} + +func (m *PauseTaskRequest) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + return n +} + +func (m *ResumeTaskRequest) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + return n +} + +func (m *ListPidsRequest) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + return n +} + +func (m *ListPidsResponse) Size() (n int) { + var l int + _ = l + if len(m.Processes) > 0 { + for _, e := range m.Processes { + l = e.Size() + n += 1 + l + sovTasks(uint64(l)) + } + } + return n +} + +func (m *CheckpointTaskRequest) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + l = len(m.ParentCheckpoint) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovTasks(uint64(l)) + } + return n +} + +func (m *CheckpointTaskResponse) Size() (n int) { + var l int + _ = l + if len(m.Descriptors) > 0 { + for _, e := range m.Descriptors { + l = e.Size() + n += 1 + l + sovTasks(uint64(l)) + } + } + return n +} + +func (m *UpdateTaskRequest) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovTasks(uint64(l)) + } + return n +} + +func (m *MetricsRequest) Size() (n int) { + var l int + _ = l + if len(m.Filters) > 0 { + for _, s := range m.Filters { + l = len(s) + n += 1 + l + sovTasks(uint64(l)) + } + } + return n +} + +func (m *MetricsResponse) Size() (n int) { + var l int + _ = l + if len(m.Metrics) > 0 { + for _, e := range m.Metrics { + l = e.Size() + n += 1 + l + sovTasks(uint64(l)) + } + } + return n +} + +func (m *WaitRequest) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + l = len(m.ExecID) + if l > 0 { + n += 1 + l + sovTasks(uint64(l)) + } + return n +} + +func (m *WaitResponse) Size() (n int) { + var l int + _ = l + if m.ExitStatus != 0 { + n += 1 + sovTasks(uint64(m.ExitStatus)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) + n += 1 + l + sovTasks(uint64(l)) + return n +} + +func sovTasks(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTasks(x uint64) (n int) { + return sovTasks(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CreateTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateTaskRequest{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `Rootfs:` + strings.Replace(fmt.Sprintf("%v", this.Rootfs), "Mount", "containerd_types.Mount", 1) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, + `Checkpoint:` + strings.Replace(fmt.Sprintf("%v", this.Checkpoint), "Descriptor", "containerd_types2.Descriptor", 1) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf1.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateTaskResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateTaskResponse{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `}`, + }, "") + return s +} +func (this *StartRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StartRequest{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `}`, + }, "") + return s +} +func (this *StartResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StartResponse{`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteTaskRequest{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteResponse{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, + `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteProcessRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteProcessRequest{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `}`, + }, "") + return s +} +func (this *GetRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetRequest{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `}`, + }, "") + return s +} +func (this *GetResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetResponse{`, + `Process:` + strings.Replace(fmt.Sprintf("%v", this.Process), "Process", "containerd_v1_types.Process", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListTasksRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListTasksRequest{`, + `Filter:` + fmt.Sprintf("%v", this.Filter) + `,`, + `}`, + }, "") + return s +} +func (this *ListTasksResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListTasksResponse{`, + `Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Process", "containerd_v1_types.Process", 1) + `,`, + `}`, + }, "") + return s +} +func (this *KillRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KillRequest{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, + `All:` + fmt.Sprintf("%v", this.All) + `,`, + `}`, + }, "") + return s +} +func (this *ExecProcessRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecProcessRequest{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "google_protobuf1.Any", 1) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `}`, + }, "") + return s +} +func (this *ExecProcessResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecProcessResponse{`, + `}`, + }, "") + return s +} +func (this *ResizePtyRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResizePtyRequest{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `Width:` + fmt.Sprintf("%v", this.Width) + `,`, + `Height:` + fmt.Sprintf("%v", this.Height) + `,`, + `}`, + }, "") + return s +} +func (this *CloseIORequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CloseIORequest{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `}`, + }, "") + return s +} +func (this *PauseTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PauseTaskRequest{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *ResumeTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResumeTaskRequest{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *ListPidsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListPidsRequest{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *ListPidsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListPidsResponse{`, + `Processes:` + strings.Replace(fmt.Sprintf("%v", this.Processes), "ProcessInfo", "containerd_v1_types.ProcessInfo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CheckpointTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CheckpointTaskRequest{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `ParentCheckpoint:` + fmt.Sprintf("%v", this.ParentCheckpoint) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf1.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CheckpointTaskResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CheckpointTaskResponse{`, + `Descriptors:` + strings.Replace(fmt.Sprintf("%v", this.Descriptors), "Descriptor", "containerd_types2.Descriptor", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateTaskRequest{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Any", "google_protobuf1.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricsRequest{`, + `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, + `}`, + }, "") + return s +} +func (this *MetricsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricsResponse{`, + `Metrics:` + strings.Replace(fmt.Sprintf("%v", this.Metrics), "Metric", "containerd_types1.Metric", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WaitRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WaitRequest{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, + `}`, + }, "") + return s +} +func (this *WaitResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WaitResponse{`, + `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, + `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringTasks(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rootfs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rootfs = append(m.Rootfs, &containerd_types.Mount{}) + if err := m.Rootfs[len(m.Rootfs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdin = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdout = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stderr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Terminal = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Checkpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Checkpoint == nil { + m.Checkpoint = &containerd_types2.Descriptor{} + } + if err := m.Checkpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &google_protobuf1.Any{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateTaskResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateTaskResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) + } + m.ExitStatus = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitStatus |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteProcessRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteProcessRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteProcessRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Process", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Process == nil { + m.Process = &containerd_v1_types.Process{} + } + if err := m.Process.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListTasksRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListTasksRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListTasksRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filter = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListTasksResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListTasksResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListTasksResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tasks = append(m.Tasks, &containerd_v1_types.Process{}) + if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KillRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KillRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KillRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) + } + m.Signal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Signal |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field All", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.All = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecProcessRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecProcessRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdin = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdout = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stderr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Terminal = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &google_protobuf1.Any{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecProcessResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecProcessResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecProcessResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResizePtyRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResizePtyRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResizePtyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Width", wireType) + } + m.Width = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Width |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CloseIORequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CloseIORequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CloseIORequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PauseTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PauseTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PauseTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResumeTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResumeTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResumeTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListPidsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListPidsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListPidsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListPidsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListPidsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListPidsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Processes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Processes = append(m.Processes, &containerd_v1_types.ProcessInfo{}) + if err := m.Processes[len(m.Processes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckpointTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CheckpointTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CheckpointTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentCheckpoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ParentCheckpoint = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &google_protobuf1.Any{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckpointTaskResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CheckpointTaskResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CheckpointTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Descriptors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Descriptors = append(m.Descriptors, &containerd_types2.Descriptor{}) + if err := m.Descriptors[len(m.Descriptors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &google_protobuf1.Any{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metrics = append(m.Metrics, &containerd_types1.Metric{}) + if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WaitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WaitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WaitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExecID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WaitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WaitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WaitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) + } + m.ExitStatus = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitStatus |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTasks + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTasks + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTasks(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTasks + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTasks(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTasks + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTasks + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTasks + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTasks + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTasks + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTasks(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTasks = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTasks = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/services/tasks/v1/tasks.proto", fileDescriptorTasks) +} + +var fileDescriptorTasks = []byte{ + // 1317 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x5b, 0x6f, 0x1b, 0x45, + 0x1b, 0xee, 0xfa, 0xec, 0xd7, 0x49, 0x9b, 0xec, 0x97, 0xe6, 0x33, 0x4b, 0x15, 0x87, 0xe5, 0xc6, + 0x04, 0xba, 0x4b, 0x5d, 0x54, 0x21, 0x5a, 0x21, 0x35, 0x07, 0x22, 0x0b, 0xaa, 0xa6, 0xdb, 0x72, + 0x50, 0x25, 0x14, 0xb6, 0xbb, 0x13, 0x67, 0x14, 0x7b, 0x67, 0xbb, 0x33, 0x4e, 0x1b, 0xb8, 0x80, + 0x9f, 0xd0, 0x5b, 0x6e, 0xf8, 0x3d, 0xb9, 0xe4, 0x12, 0xa1, 0x2a, 0x50, 0xff, 0x0b, 0xee, 0xd0, + 0x1c, 0x76, 0xb3, 0xb1, 0x63, 0xaf, 0x93, 0x34, 0xdc, 0xb4, 0x33, 0xb3, 0xef, 0x69, 0x9e, 0x79, + 0x0f, 0x8f, 0x03, 0xab, 0x1d, 0xcc, 0x76, 0xfb, 0xcf, 0x2c, 0x8f, 0xf4, 0x6c, 0x8f, 0x04, 0xcc, + 0xc5, 0x01, 0x8a, 0xfc, 0xf4, 0xd2, 0x0d, 0xb1, 0x4d, 0x51, 0xb4, 0x8f, 0x3d, 0x44, 0x6d, 0xe6, + 0xd2, 0x3d, 0x6a, 0xef, 0xdf, 0x92, 0x0b, 0x2b, 0x8c, 0x08, 0x23, 0xfa, 0x8d, 0x63, 0x69, 0x2b, + 0x96, 0xb4, 0xa4, 0xc0, 0xfe, 0x2d, 0xe3, 0xdd, 0x0e, 0x21, 0x9d, 0x2e, 0xb2, 0x85, 0xec, 0xb3, + 0xfe, 0x8e, 0x8d, 0x7a, 0x21, 0x3b, 0x90, 0xaa, 0xc6, 0x3b, 0xc3, 0x1f, 0xdd, 0x20, 0xfe, 0xb4, + 0xd0, 0x21, 0x1d, 0x22, 0x96, 0x36, 0x5f, 0xa9, 0xd3, 0x3b, 0x53, 0xc5, 0xcb, 0x0e, 0x42, 0x44, + 0xed, 0x1e, 0xe9, 0x07, 0x4c, 0xe9, 0x7d, 0x7a, 0x16, 0x3d, 0xc4, 0x22, 0xec, 0xa9, 0xdb, 0x19, + 0x77, 0xcf, 0xa0, 0xe9, 0x23, 0xea, 0x45, 0x38, 0x64, 0x24, 0x52, 0xca, 0x9f, 0x9d, 0x41, 0x99, + 0x23, 0x26, 0xfe, 0x51, 0xba, 0x8d, 0x61, 0x6c, 0x18, 0xee, 0x21, 0xca, 0xdc, 0x5e, 0x28, 0x05, + 0xcc, 0xc3, 0x1c, 0xcc, 0xaf, 0x45, 0xc8, 0x65, 0xe8, 0x89, 0x4b, 0xf7, 0x1c, 0xf4, 0xbc, 0x8f, + 0x28, 0xd3, 0x5b, 0x30, 0x93, 0x98, 0xdf, 0xc6, 0x7e, 0x5d, 0x5b, 0xd6, 0x9a, 0xd5, 0xd5, 0x6b, + 0x83, 0xa3, 0x46, 0x6d, 0x2d, 0x3e, 0x6f, 0xaf, 0x3b, 0xb5, 0x44, 0xa8, 0xed, 0xeb, 0x36, 0x94, + 0x22, 0x42, 0xd8, 0x0e, 0xad, 0xe7, 0x97, 0xf3, 0xcd, 0x5a, 0xeb, 0xff, 0x56, 0xea, 0x49, 0x45, + 0x74, 0xd6, 0x03, 0x0e, 0xa6, 0xa3, 0xc4, 0xf4, 0x05, 0x28, 0x52, 0xe6, 0xe3, 0xa0, 0x5e, 0xe0, + 0xd6, 0x1d, 0xb9, 0xd1, 0x17, 0xa1, 0x44, 0x99, 0x4f, 0xfa, 0xac, 0x5e, 0x14, 0xc7, 0x6a, 0xa7, + 0xce, 0x51, 0x14, 0xd5, 0x4b, 0xc9, 0x39, 0x8a, 0x22, 0xdd, 0x80, 0x0a, 0x43, 0x51, 0x0f, 0x07, + 0x6e, 0xb7, 0x5e, 0x5e, 0xd6, 0x9a, 0x15, 0x27, 0xd9, 0xeb, 0xf7, 0x00, 0xbc, 0x5d, 0xe4, 0xed, + 0x85, 0x04, 0x07, 0xac, 0x5e, 0x59, 0xd6, 0x9a, 0xb5, 0xd6, 0x8d, 0xd1, 0xb0, 0xd6, 0x13, 0xc4, + 0x9d, 0x94, 0xbc, 0x6e, 0x41, 0x99, 0x84, 0x0c, 0x93, 0x80, 0xd6, 0xab, 0x42, 0x75, 0xc1, 0x92, + 0x68, 0x5a, 0x31, 0x9a, 0xd6, 0xfd, 0xe0, 0xc0, 0x89, 0x85, 0xcc, 0xa7, 0xa0, 0xa7, 0x91, 0xa4, + 0x21, 0x09, 0x28, 0x3a, 0x17, 0x94, 0x73, 0x90, 0x0f, 0xb1, 0x5f, 0xcf, 0x2d, 0x6b, 0xcd, 0x59, + 0x87, 0x2f, 0xcd, 0x0e, 0xcc, 0x3c, 0x66, 0x6e, 0xc4, 0x2e, 0xf2, 0x40, 0xef, 0x43, 0x19, 0xbd, + 0x44, 0xde, 0xb6, 0xb2, 0x5c, 0x5d, 0x85, 0xc1, 0x51, 0xa3, 0xb4, 0xf1, 0x12, 0x79, 0xed, 0x75, + 0xa7, 0xc4, 0x3f, 0xb5, 0x7d, 0xf3, 0x3d, 0x98, 0x55, 0x8e, 0x54, 0xfc, 0x2a, 0x16, 0xed, 0x38, + 0x96, 0x4d, 0x98, 0x5f, 0x47, 0x5d, 0x74, 0xe1, 0x8c, 0x31, 0x7f, 0xd3, 0xe0, 0xaa, 0xb4, 0x94, + 0x78, 0x5b, 0x84, 0x5c, 0xa2, 0x5c, 0x1a, 0x1c, 0x35, 0x72, 0xed, 0x75, 0x27, 0x87, 0x4f, 0x41, + 0x44, 0x6f, 0x40, 0x0d, 0xbd, 0xc4, 0x6c, 0x9b, 0x32, 0x97, 0xf5, 0x79, 0xce, 0xf1, 0x2f, 0xc0, + 0x8f, 0x1e, 0x8b, 0x13, 0xfd, 0x3e, 0x54, 0xf9, 0x0e, 0xf9, 0xdb, 0x2e, 0x13, 0x29, 0x56, 0x6b, + 0x19, 0x23, 0x0f, 0xf8, 0x24, 0x2e, 0x87, 0xd5, 0xca, 0xe1, 0x51, 0xe3, 0xca, 0xab, 0xbf, 0x1a, + 0x9a, 0x53, 0x91, 0x6a, 0xf7, 0x99, 0x49, 0x60, 0x41, 0xc6, 0xb7, 0x15, 0x11, 0x0f, 0x51, 0x7a, + 0xe9, 0xe8, 0x23, 0x80, 0x4d, 0x74, 0xf9, 0x8f, 0xbc, 0x01, 0x35, 0xe1, 0x46, 0x81, 0x7e, 0x07, + 0xca, 0xa1, 0xbc, 0xa0, 0x70, 0x31, 0x54, 0x23, 0xfb, 0xb7, 0x54, 0x99, 0xc4, 0x20, 0xc4, 0xc2, + 0xe6, 0x0a, 0xcc, 0x7d, 0x85, 0x29, 0xe3, 0x69, 0x90, 0x40, 0xb3, 0x08, 0xa5, 0x1d, 0xdc, 0x65, + 0x28, 0x92, 0xd1, 0x3a, 0x6a, 0xc7, 0x93, 0x26, 0x25, 0x9b, 0xd4, 0x46, 0x51, 0xb4, 0xf8, 0xba, + 0x26, 0x3a, 0xc6, 0x64, 0xb7, 0x52, 0xd4, 0x7c, 0xa5, 0x41, 0xed, 0x4b, 0xdc, 0xed, 0x5e, 0x36, + 0x48, 0xa2, 0xe1, 0xe0, 0x0e, 0x6f, 0x2b, 0x32, 0xb7, 0xd4, 0x8e, 0xa7, 0xa2, 0xdb, 0xed, 0x8a, + 0x8c, 0xaa, 0x38, 0x7c, 0x69, 0xfe, 0xa3, 0x81, 0xce, 0x95, 0xdf, 0x42, 0x96, 0x24, 0x3d, 0x31, + 0x77, 0x7a, 0x4f, 0xcc, 0x8f, 0xe9, 0x89, 0x85, 0xb1, 0x3d, 0xb1, 0x38, 0xd4, 0x13, 0x9b, 0x50, + 0xa0, 0x21, 0xf2, 0x44, 0x17, 0x1d, 0xd7, 0xd2, 0x84, 0x44, 0x1a, 0xa5, 0xf2, 0xd8, 0x54, 0xba, + 0x0e, 0xff, 0x3b, 0x71, 0x75, 0xf9, 0xb2, 0xe6, 0xaf, 0x1a, 0xcc, 0x39, 0x88, 0xe2, 0x1f, 0xd1, + 0x16, 0x3b, 0xb8, 0xf4, 0xa7, 0x5a, 0x80, 0xe2, 0x0b, 0xec, 0xb3, 0x5d, 0xf5, 0x52, 0x72, 0xc3, + 0xd1, 0xd9, 0x45, 0xb8, 0xb3, 0x2b, 0xab, 0x7f, 0xd6, 0x51, 0x3b, 0xf3, 0x67, 0xb8, 0xba, 0xd6, + 0x25, 0x14, 0xb5, 0x1f, 0xfe, 0x17, 0x81, 0xc9, 0xe7, 0xcc, 0x8b, 0x57, 0x90, 0x1b, 0xf3, 0x0b, + 0x98, 0xdb, 0x72, 0xfb, 0xf4, 0xc2, 0xfd, 0x73, 0x13, 0xe6, 0x1d, 0x44, 0xfb, 0xbd, 0x0b, 0x1b, + 0xda, 0x80, 0x6b, 0xbc, 0x38, 0xb7, 0xb0, 0x7f, 0x91, 0xe4, 0x35, 0x1d, 0xd9, 0x0f, 0xa4, 0x19, + 0x55, 0xe2, 0x9f, 0x43, 0x55, 0xb5, 0x0b, 0x14, 0x97, 0xf9, 0xf2, 0xa4, 0x32, 0x6f, 0x07, 0x3b, + 0xc4, 0x39, 0x56, 0x31, 0x5f, 0x6b, 0x70, 0x7d, 0x2d, 0x99, 0xc9, 0x17, 0xe5, 0x28, 0xdb, 0x30, + 0x1f, 0xba, 0x11, 0x0a, 0xd8, 0x76, 0x8a, 0x17, 0xc8, 0xe7, 0x6b, 0xf1, 0xfe, 0xff, 0xe7, 0x51, + 0x63, 0x25, 0xc5, 0xb6, 0x48, 0x88, 0x82, 0x44, 0x9d, 0xda, 0x1d, 0x72, 0xd3, 0xc7, 0x1d, 0x44, + 0x99, 0xb5, 0x2e, 0xfe, 0x73, 0xe6, 0xa4, 0xb1, 0xb5, 0x53, 0x39, 0x43, 0x7e, 0x1a, 0xce, 0xf0, + 0x1d, 0x2c, 0x0e, 0xdf, 0x2e, 0x01, 0xae, 0x76, 0xcc, 0x04, 0x4f, 0xed, 0x90, 0x23, 0xe4, 0x25, + 0xad, 0x60, 0xfe, 0x04, 0xf3, 0x5f, 0x87, 0xfe, 0x5b, 0xe0, 0x75, 0x2d, 0xa8, 0x46, 0x88, 0x92, + 0x7e, 0xe4, 0x21, 0x2a, 0xb0, 0x1a, 0x77, 0xa9, 0x63, 0x31, 0x73, 0x05, 0xae, 0x3e, 0x90, 0x04, + 0x38, 0xf6, 0x5c, 0x87, 0xb2, 0x9c, 0x04, 0xf2, 0x2a, 0x55, 0x27, 0xde, 0xf2, 0xe4, 0x4b, 0x64, + 0x93, 0xb9, 0x50, 0x56, 0xfc, 0x59, 0xdd, 0xbb, 0x7e, 0x0a, 0x97, 0x14, 0x02, 0x4e, 0x2c, 0x68, + 0xee, 0x40, 0xed, 0x5b, 0x17, 0x5f, 0xfe, 0xec, 0x8c, 0x60, 0x46, 0xfa, 0x51, 0xb1, 0x0e, 0xf1, + 0x10, 0x6d, 0x32, 0x0f, 0xc9, 0x9d, 0x87, 0x87, 0xb4, 0x5e, 0xcf, 0x40, 0x51, 0x4c, 0x4e, 0x7d, + 0x0f, 0x4a, 0x92, 0x63, 0xea, 0xb6, 0x35, 0xe9, 0x17, 0x93, 0x35, 0xc2, 0xe9, 0x8d, 0x8f, 0xa7, + 0x57, 0x50, 0x57, 0xfb, 0x01, 0x8a, 0x82, 0x0b, 0xea, 0x2b, 0x93, 0x55, 0xd3, 0xcc, 0xd4, 0xf8, + 0x70, 0x2a, 0x59, 0xe5, 0xa1, 0x03, 0x25, 0x49, 0xb0, 0xb2, 0xae, 0x33, 0x42, 0x38, 0x8d, 0x8f, + 0xa6, 0x51, 0x48, 0x1c, 0x3d, 0x87, 0xd9, 0x13, 0x4c, 0x4e, 0x6f, 0x4d, 0xa3, 0x7e, 0x72, 0xa0, + 0x9f, 0xd1, 0xe5, 0x53, 0xc8, 0x6f, 0x22, 0xa6, 0x37, 0x27, 0x2b, 0x1d, 0xd3, 0x3d, 0xe3, 0x83, + 0x29, 0x24, 0x13, 0xdc, 0x0a, 0xbc, 0xd3, 0xea, 0xd6, 0x64, 0x95, 0x61, 0x76, 0x66, 0xd8, 0x53, + 0xcb, 0x2b, 0x47, 0x6d, 0x28, 0x70, 0xb2, 0xa5, 0x67, 0xc4, 0x96, 0x22, 0x64, 0xc6, 0xe2, 0x48, + 0x72, 0x6f, 0xf0, 0x1f, 0xeb, 0xfa, 0x16, 0x14, 0x78, 0x29, 0xe9, 0x19, 0x79, 0x38, 0x4a, 0xa4, + 0xc6, 0x5a, 0x7c, 0x0c, 0xd5, 0x84, 0x63, 0x64, 0x41, 0x31, 0x4c, 0x46, 0xc6, 0x1a, 0x7d, 0x08, + 0x65, 0xc5, 0x0e, 0xf4, 0x8c, 0xf7, 0x3e, 0x49, 0x22, 0x26, 0x18, 0x2c, 0x8a, 0x69, 0x9f, 0x15, + 0xe1, 0x30, 0x25, 0x18, 0x6b, 0xf0, 0x11, 0x94, 0xe4, 0xd8, 0xcf, 0x2a, 0x9a, 0x11, 0x72, 0x30, + 0xd6, 0x24, 0x86, 0x4a, 0x3c, 0xb9, 0xf5, 0x9b, 0xd9, 0x39, 0x92, 0x22, 0x0a, 0x86, 0x35, 0xad, + 0xb8, 0xca, 0xa8, 0x17, 0x00, 0xa9, 0x79, 0x79, 0x3b, 0x03, 0xe2, 0xd3, 0x26, 0xbf, 0xf1, 0xc9, + 0xd9, 0x94, 0x94, 0xe3, 0x47, 0x50, 0x92, 0x03, 0x31, 0x0b, 0xb6, 0x91, 0xb1, 0x39, 0x16, 0xb6, + 0x1d, 0x28, 0xab, 0xd1, 0x95, 0x95, 0x2b, 0x27, 0xa7, 0xa1, 0x71, 0x73, 0x4a, 0x69, 0x15, 0xfa, + 0xf7, 0x50, 0xe0, 0x33, 0x27, 0xab, 0x0a, 0x53, 0xf3, 0xcf, 0x58, 0x99, 0x46, 0x54, 0x9a, 0x5f, + 0xfd, 0xe6, 0xf0, 0xcd, 0xd2, 0x95, 0x3f, 0xde, 0x2c, 0x5d, 0xf9, 0x65, 0xb0, 0xa4, 0x1d, 0x0e, + 0x96, 0xb4, 0xdf, 0x07, 0x4b, 0xda, 0xdf, 0x83, 0x25, 0xed, 0xe9, 0xbd, 0xf3, 0xfd, 0x65, 0xef, + 0xae, 0x58, 0x3c, 0x2b, 0x09, 0xb8, 0x6e, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x67, 0xc5, 0x63, + 0x32, 0x20, 0x14, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto new file mode 100644 index 000000000000..eb373185ebab --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto @@ -0,0 +1,209 @@ +syntax = "proto3"; + +package containerd.services.tasks.v1; + +import "google/protobuf/empty.proto"; +import "google/protobuf/any.proto"; +import "gogoproto/gogo.proto"; +import "github.com/containerd/containerd/api/types/mount.proto"; +import "github.com/containerd/containerd/api/types/metrics.proto"; +import "github.com/containerd/containerd/api/types/descriptor.proto"; +import "github.com/containerd/containerd/api/types/task/task.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/containerd/containerd/api/services/tasks/v1;tasks"; + +service Tasks { + // Create a task. + rpc Create(CreateTaskRequest) returns (CreateTaskResponse); + + // Start a process. + rpc Start(StartRequest) returns (StartResponse); + + // Delete a task and on disk state. + rpc Delete(DeleteTaskRequest) returns (DeleteResponse); + + rpc DeleteProcess(DeleteProcessRequest) returns (DeleteResponse); + + rpc Get(GetRequest) returns (GetResponse); + + rpc List(ListTasksRequest) returns (ListTasksResponse); + + // Kill a task or process. + rpc Kill(KillRequest) returns (google.protobuf.Empty); + + rpc Exec(ExecProcessRequest) returns (google.protobuf.Empty); + + rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty); + + rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty); + + rpc Pause(PauseTaskRequest) returns (google.protobuf.Empty); + + rpc Resume(ResumeTaskRequest) returns (google.protobuf.Empty); + + rpc ListPids(ListPidsRequest) returns (ListPidsResponse); + + rpc Checkpoint(CheckpointTaskRequest) returns (CheckpointTaskResponse); + + rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty); + + rpc Metrics(MetricsRequest) returns (MetricsResponse); + + rpc Wait(WaitRequest) returns (WaitResponse); +} + +message CreateTaskRequest { + string container_id = 1; + + // RootFS provides the pre-chroot mounts to perform in the shim before + // executing the container task. + // + // These are for mounts that cannot be performed in the user namespace. + // Typically, these mounts should be resolved from snapshots specified on + // the container object. + repeated containerd.types.Mount rootfs = 3; + + string stdin = 4; + string stdout = 5; + string stderr = 6; + bool terminal = 7; + + containerd.types.Descriptor checkpoint = 8; + + google.protobuf.Any options = 9; +} + +message CreateTaskResponse { + string container_id = 1; + uint32 pid = 2; +} + +message StartRequest { + string container_id = 1; + string exec_id = 2; +} + +message StartResponse { + uint32 pid = 1; +} + +message DeleteTaskRequest { + string container_id = 1; +} + +message DeleteResponse { + string id = 1; + uint32 pid = 2; + uint32 exit_status = 3; + google.protobuf.Timestamp exited_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message DeleteProcessRequest { + string container_id = 1; + string exec_id = 2; +} + +message GetRequest { + string container_id = 1; + string exec_id = 2; +} + +message GetResponse { + containerd.v1.types.Process process = 1; +} + +message ListTasksRequest { + string filter = 1; +} + +message ListTasksResponse { + repeated containerd.v1.types.Process tasks = 1; +} + +message KillRequest { + string container_id = 1; + string exec_id = 2; + uint32 signal = 3; + bool all = 4; +} + +message ExecProcessRequest { + string container_id = 1; + string stdin = 2; + string stdout = 3; + string stderr = 4; + bool terminal = 5; + // Spec for starting a process in the target container. + // + // For runc, this is a process spec, for example. + google.protobuf.Any spec = 6; + // id of the exec process + string exec_id = 7; +} + +message ExecProcessResponse { +} + +message ResizePtyRequest { + string container_id = 1; + string exec_id = 2; + uint32 width = 3; + uint32 height = 4; +} + +message CloseIORequest { + string container_id = 1; + string exec_id = 2; + bool stdin = 3; +} + +message PauseTaskRequest { + string container_id = 1; +} + +message ResumeTaskRequest { + string container_id = 1; +} + +message ListPidsRequest { + string container_id = 1; +} + +message ListPidsResponse { + // Processes includes the process ID and additional process information + repeated containerd.v1.types.ProcessInfo processes = 1; +} + +message CheckpointTaskRequest { + string container_id = 1; + string parent_checkpoint = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + google.protobuf.Any options = 3; +} + +message CheckpointTaskResponse { + repeated containerd.types.Descriptor descriptors = 1; +} + +message UpdateTaskRequest { + string container_id = 1; + google.protobuf.Any resources = 2; +} + +message MetricsRequest { + repeated string filters = 1; +} + +message MetricsResponse { + repeated types.Metric metrics = 1; +} + +message WaitRequest { + string container_id = 1; + string exec_id = 2; +} + +message WaitResponse { + uint32 exit_status = 1; + google.protobuf.Timestamp exited_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} diff --git a/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go b/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go new file mode 100644 index 000000000000..c403c8431be5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go @@ -0,0 +1,466 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/containerd/containerd/api/services/version/v1/version.proto +// DO NOT EDIT! + +/* + Package version is a generated protocol buffer package. + + It is generated from these files: + github.com/containerd/containerd/api/services/version/v1/version.proto + + It has these top-level messages: + VersionResponse +*/ +package version + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/empty" +import _ "github.com/gogo/protobuf/gogoproto" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type VersionResponse struct { + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` +} + +func (m *VersionResponse) Reset() { *m = VersionResponse{} } +func (*VersionResponse) ProtoMessage() {} +func (*VersionResponse) Descriptor() ([]byte, []int) { return fileDescriptorVersion, []int{0} } + +func init() { + proto.RegisterType((*VersionResponse)(nil), "containerd.services.version.v1.VersionResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Version service + +type VersionClient interface { + Version(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*VersionResponse, error) +} + +type versionClient struct { + cc *grpc.ClientConn +} + +func NewVersionClient(cc *grpc.ClientConn) VersionClient { + return &versionClient{cc} +} + +func (c *versionClient) Version(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*VersionResponse, error) { + out := new(VersionResponse) + err := grpc.Invoke(ctx, "/containerd.services.version.v1.Version/Version", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Version service + +type VersionServer interface { + Version(context.Context, *google_protobuf.Empty) (*VersionResponse, error) +} + +func RegisterVersionServer(s *grpc.Server, srv VersionServer) { + s.RegisterService(&_Version_serviceDesc, srv) +} + +func _Version_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_protobuf.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VersionServer).Version(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.version.v1.Version/Version", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VersionServer).Version(ctx, req.(*google_protobuf.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _Version_serviceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.version.v1.Version", + HandlerType: (*VersionServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Version", + Handler: _Version_Version_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/version/v1/version.proto", +} + +func (m *VersionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VersionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Version) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintVersion(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + } + if len(m.Revision) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintVersion(dAtA, i, uint64(len(m.Revision))) + i += copy(dAtA[i:], m.Revision) + } + return i, nil +} + +func encodeFixed64Version(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Version(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintVersion(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *VersionResponse) Size() (n int) { + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + sovVersion(uint64(l)) + } + l = len(m.Revision) + if l > 0 { + n += 1 + l + sovVersion(uint64(l)) + } + return n +} + +func sovVersion(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozVersion(x uint64) (n int) { + return sovVersion(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *VersionResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VersionResponse{`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func valueToStringVersion(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *VersionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVersion + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VersionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VersionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVersion + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVersion + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowVersion + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthVersion + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Revision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipVersion(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthVersion + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipVersion(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVersion + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVersion + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVersion + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthVersion + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowVersion + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipVersion(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthVersion = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowVersion = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/services/version/v1/version.proto", fileDescriptorVersion) +} + +var fileDescriptorVersion = []byte{ + // 241 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb, + 0x97, 0xa5, 0x16, 0x15, 0x67, 0xe6, 0xe7, 0xe9, 0x97, 0x19, 0xc2, 0x98, 0x7a, 0x05, 0x45, 0xf9, + 0x25, 0xf9, 0x42, 0x72, 0x08, 0x1d, 0x7a, 0x30, 0xd5, 0x7a, 0x30, 0x25, 0x65, 0x86, 0x52, 0xd2, + 0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa, 0x60, 0xd5, 0x49, 0xa5, 0x69, 0xfa, 0xa9, 0xb9, 0x05, + 0x25, 0x95, 0x10, 0xcd, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0xa6, 0x3e, 0x88, 0x05, 0x11, + 0x55, 0x72, 0xe7, 0xe2, 0x0f, 0x83, 0x18, 0x10, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, + 0x24, 0xc1, 0xc5, 0x0e, 0x35, 0x53, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc6, 0x15, 0x92, + 0xe2, 0xe2, 0x28, 0x4a, 0x2d, 0xcb, 0x04, 0x4b, 0x31, 0x81, 0xa5, 0xe0, 0x7c, 0xa3, 0x58, 0x2e, + 0x76, 0xa8, 0x41, 0x42, 0x41, 0x08, 0xa6, 0x98, 0x1e, 0xc4, 0x49, 0x7a, 0x30, 0x27, 0xe9, 0xb9, + 0x82, 0x9c, 0x24, 0xa5, 0xaf, 0x87, 0xdf, 0x2b, 0x7a, 0x68, 0x8e, 0x72, 0x8a, 0x3a, 0xf1, 0x50, + 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, + 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x03, 0xb9, 0x81, 0x6b, 0x0d, 0x65, 0x26, 0xb1, + 0x81, 0x1d, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xb6, 0x37, 0xd8, 0xc6, 0xa7, 0x01, 0x00, + 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto b/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto new file mode 100644 index 000000000000..2398fdcbedbd --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package containerd.services.version.v1; + +import "google/protobuf/empty.proto"; +import "gogoproto/gogo.proto"; + +// TODO(stevvooe): Should version service actually be versioned? +option go_package = "github.com/containerd/containerd/api/services/version/v1;version"; + +service Version { + rpc Version(google.protobuf.Empty) returns (VersionResponse); +} + +message VersionResponse { + string version = 1; + string revision = 2; +} diff --git a/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go new file mode 100644 index 000000000000..785d0507071b --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go @@ -0,0 +1,428 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/containerd/containerd/api/types/descriptor.proto +// DO NOT EDIT! + +/* + Package types is a generated protocol buffer package. + + It is generated from these files: + github.com/containerd/containerd/api/types/descriptor.proto + github.com/containerd/containerd/api/types/metrics.proto + github.com/containerd/containerd/api/types/mount.proto + github.com/containerd/containerd/api/types/platform.proto + + It has these top-level messages: + Descriptor + Metric + Mount + Platform +*/ +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// Descriptor describes a blob in a content store. +// +// This descriptor can be used to reference content from an +// oci descriptor found in a manifest. +// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor +type Descriptor struct { + MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` +} + +func (m *Descriptor) Reset() { *m = Descriptor{} } +func (*Descriptor) ProtoMessage() {} +func (*Descriptor) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} } + +func init() { + proto.RegisterType((*Descriptor)(nil), "containerd.types.Descriptor") +} +func (m *Descriptor) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Descriptor) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.MediaType) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDescriptor(dAtA, i, uint64(len(m.MediaType))) + i += copy(dAtA[i:], m.MediaType) + } + if len(m.Digest) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintDescriptor(dAtA, i, uint64(len(m.Digest))) + i += copy(dAtA[i:], m.Digest) + } + if m.Size_ != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintDescriptor(dAtA, i, uint64(m.Size_)) + } + return i, nil +} + +func encodeFixed64Descriptor(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Descriptor(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintDescriptor(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Descriptor) Size() (n int) { + var l int + _ = l + l = len(m.MediaType) + if l > 0 { + n += 1 + l + sovDescriptor(uint64(l)) + } + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovDescriptor(uint64(l)) + } + if m.Size_ != 0 { + n += 1 + sovDescriptor(uint64(m.Size_)) + } + return n +} + +func sovDescriptor(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozDescriptor(x uint64) (n int) { + return sovDescriptor(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Descriptor) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Descriptor{`, + `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, + `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, + `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, + `}`, + }, "") + return s +} +func valueToStringDescriptor(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Descriptor) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDescriptor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Descriptor: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Descriptor: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDescriptor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDescriptor + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MediaType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDescriptor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDescriptor + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDescriptor + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDescriptor(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDescriptor + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDescriptor(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDescriptor + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDescriptor + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDescriptor + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthDescriptor + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDescriptor + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipDescriptor(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthDescriptor = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDescriptor = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor.proto", fileDescriptorDescriptor) +} + +var fileDescriptorDescriptor = []byte{ + // 232 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16, + 0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, + 0x94, 0xe9, 0x81, 0x95, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, + 0x3a, 0xa5, 0x6e, 0x46, 0x2e, 0x2e, 0x17, 0xb8, 0x66, 0x21, 0x59, 0x2e, 0xae, 0xdc, 0xd4, 0x94, + 0xcc, 0xc4, 0x78, 0x90, 0x1e, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x4e, 0xb0, 0x48, 0x48, + 0x65, 0x41, 0xaa, 0x90, 0x17, 0x17, 0x5b, 0x4a, 0x66, 0x7a, 0x6a, 0x71, 0x89, 0x04, 0x13, 0x48, + 0xca, 0xc9, 0xe8, 0xc4, 0x3d, 0x79, 0x86, 0x5b, 0xf7, 0xe4, 0xb5, 0x90, 0x9c, 0x9a, 0x5f, 0x90, + 0x9a, 0x07, 0xb7, 0xbc, 0x58, 0x3f, 0x3d, 0x5f, 0x17, 0xa2, 0x45, 0xcf, 0x05, 0x4c, 0x05, 0x41, + 0x4d, 0x10, 0x12, 0xe2, 0x62, 0x29, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x56, 0x60, 0xd4, 0x60, 0x0e, + 0x02, 0xb3, 0x9d, 0xbc, 0x4e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c, + 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x18, 0x65, 0x40, + 0x7c, 0x60, 0x58, 0x83, 0xc9, 0x24, 0x36, 0xb0, 0x07, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x23, 0x14, 0xc9, 0x7c, 0x47, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/types/descriptor.proto b/vendor/github.com/containerd/containerd/api/types/descriptor.proto new file mode 100644 index 000000000000..7975ab06d922 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/descriptor.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package containerd.types; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/containerd/containerd/api/types;types"; + +// Descriptor describes a blob in a content store. +// +// This descriptor can be used to reference content from an +// oci descriptor found in a manifest. +// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor +message Descriptor { + string media_type = 1; + string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + int64 size = 3; +} diff --git a/vendor/github.com/containerd/containerd/api/types/doc.go b/vendor/github.com/containerd/containerd/api/types/doc.go new file mode 100644 index 000000000000..ab1254f4c2be --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/doc.go @@ -0,0 +1 @@ +package types diff --git a/vendor/github.com/containerd/containerd/api/types/metrics.pb.go b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go new file mode 100644 index 000000000000..f9aacf941aa0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go @@ -0,0 +1,429 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/containerd/containerd/api/types/metrics.proto +// DO NOT EDIT! + +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import google_protobuf1 "github.com/gogo/protobuf/types" +import _ "github.com/gogo/protobuf/types" + +import time "time" + +import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +type Metric struct { + Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,stdtime" json:"timestamp"` + ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Data *google_protobuf1.Any `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{0} } + +func init() { + proto.RegisterType((*Metric)(nil), "containerd.types.Metric") +} +func (m *Metric) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metric) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintMetrics(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp))) + n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.ID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.Data != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.Data.Size())) + n2, err := m.Data.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func encodeFixed64Metrics(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Metrics(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Metric) Size() (n int) { + var l int + _ = l + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovMetrics(uint64(l)) + l = len(m.ID) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Data != nil { + l = m.Data.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + return n +} + +func sovMetrics(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozMetrics(x uint64) (n int) { + return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Metric) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Metric{`, + `Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "Any", "google_protobuf1.Any", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringMetrics(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Metric) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Data == nil { + m.Data = &google_protobuf1.Any{} + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMetrics(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthMetrics + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipMetrics(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/types/metrics.proto", fileDescriptorMetrics) +} + +var fileDescriptorMetrics = []byte{ + // 256 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xa6, 0x96, + 0x14, 0x65, 0x26, 0x17, 0xeb, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0xd4, 0xe8, 0x81, + 0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x94, 0x64, + 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x98, 0x57, 0x09, + 0x95, 0x92, 0x47, 0x97, 0x2a, 0xc9, 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x80, 0x28, 0x50, + 0xea, 0x63, 0xe4, 0x62, 0xf3, 0x05, 0xdb, 0x2a, 0xe4, 0xc4, 0xc5, 0x09, 0x97, 0x95, 0x60, 0x54, + 0x60, 0xd4, 0xe0, 0x36, 0x92, 0xd2, 0x83, 0xe8, 0xd7, 0x83, 0xe9, 0xd7, 0x0b, 0x81, 0xa9, 0x70, + 0xe2, 0x38, 0x71, 0x4f, 0x9e, 0x61, 0xc2, 0x7d, 0x79, 0xc6, 0x20, 0x84, 0x36, 0x21, 0x31, 0x2e, + 0xa6, 0xcc, 0x14, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0x3c, + 0x5d, 0x82, 0x98, 0x32, 0x53, 0x84, 0x34, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc1, + 0xc6, 0x8a, 0x60, 0x18, 0xeb, 0x98, 0x57, 0x19, 0x04, 0x56, 0xe1, 0xe4, 0x75, 0xe2, 0xa1, 0x1c, + 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, + 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x26, 0xb1, 0x81, + 0xcd, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xf8, 0x51, 0x36, 0x74, 0x83, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/types/metrics.proto b/vendor/github.com/containerd/containerd/api/types/metrics.proto new file mode 100644 index 000000000000..d1629c7eec4f --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/metrics.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package containerd.types; + +import "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/containerd/containerd/api/types;types"; + +message Metric { + google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + string id = 2; + google.protobuf.Any data = 3; +} diff --git a/vendor/github.com/containerd/containerd/api/types/mount.pb.go b/vendor/github.com/containerd/containerd/api/types/mount.pb.go new file mode 100644 index 000000000000..cc835140e317 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/mount.pb.go @@ -0,0 +1,474 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/containerd/containerd/api/types/mount.proto +// DO NOT EDIT! + +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Mount describes mounts for a container. +// +// This type is the lingua franca of ContainerD. All services provide mounts +// to be used with the container at creation time. +// +// The Mount type follows the structure of the mount syscall, including a type, +// source, target and options. +type Mount struct { + // Type defines the nature of the mount. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` + // Target path in container + Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + // Options specifies zero or more fstab style mount options. + Options []string `protobuf:"bytes,4,rep,name=options" json:"options,omitempty"` +} + +func (m *Mount) Reset() { *m = Mount{} } +func (*Mount) ProtoMessage() {} +func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorMount, []int{0} } + +func init() { + proto.RegisterType((*Mount)(nil), "containerd.types.Mount") +} +func (m *Mount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Type) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintMount(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.Source) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintMount(dAtA, i, uint64(len(m.Source))) + i += copy(dAtA[i:], m.Source) + } + if len(m.Target) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintMount(dAtA, i, uint64(len(m.Target))) + i += copy(dAtA[i:], m.Target) + } + if len(m.Options) > 0 { + for _, s := range m.Options { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeFixed64Mount(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Mount(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintMount(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Mount) Size() (n int) { + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovMount(uint64(l)) + } + l = len(m.Source) + if l > 0 { + n += 1 + l + sovMount(uint64(l)) + } + l = len(m.Target) + if l > 0 { + n += 1 + l + sovMount(uint64(l)) + } + if len(m.Options) > 0 { + for _, s := range m.Options { + l = len(s) + n += 1 + l + sovMount(uint64(l)) + } + } + return n +} + +func sovMount(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozMount(x uint64) (n int) { + return sovMount(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Mount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mount{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `Options:` + fmt.Sprintf("%v", this.Options) + `,`, + `}`, + }, "") + return s +} +func valueToStringMount(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Mount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMount + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMount + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMount + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Target = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMount + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMount(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMount(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthMount + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipMount(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthMount = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMount = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/types/mount.proto", fileDescriptorMount) +} + +var fileDescriptorMount = []byte{ + // 200 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97, + 0xe6, 0x95, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x54, 0xe8, 0x81, 0x65, 0xa5, + 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x52, 0x2a, 0x17, 0xab, + 0x2f, 0x48, 0x9b, 0x90, 0x10, 0x17, 0x0b, 0x48, 0x9d, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, + 0x98, 0x2d, 0x24, 0xc6, 0xc5, 0x56, 0x9c, 0x5f, 0x5a, 0x94, 0x9c, 0x2a, 0xc1, 0x04, 0x16, 0x85, + 0xf2, 0x40, 0xe2, 0x25, 0x89, 0x45, 0xe9, 0xa9, 0x25, 0x12, 0xcc, 0x10, 0x71, 0x08, 0x4f, 0x48, + 0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33, + 0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, + 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01, + 0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x49, 0x6c, 0x60, 0x97, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, + 0xe5, 0xc7, 0x07, 0x3f, 0x1b, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/types/mount.proto b/vendor/github.com/containerd/containerd/api/types/mount.proto new file mode 100644 index 000000000000..031e65442478 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/mount.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package containerd.types; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/containerd/containerd/api/types;types"; + +// Mount describes mounts for a container. +// +// This type is the lingua franca of ContainerD. All services provide mounts +// to be used with the container at creation time. +// +// The Mount type follows the structure of the mount syscall, including a type, +// source, target and options. +message Mount { + // Type defines the nature of the mount. + string type = 1; + + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + string source = 2; + + // Target path in container + string target = 3; + + // Options specifies zero or more fstab style mount options. + repeated string options = 4; +} diff --git a/vendor/github.com/containerd/containerd/api/types/platform.pb.go b/vendor/github.com/containerd/containerd/api/types/platform.pb.go new file mode 100644 index 000000000000..0ca2afc2596c --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/platform.pb.go @@ -0,0 +1,412 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/containerd/containerd/api/types/platform.proto +// DO NOT EDIT! + +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Platform follows the structure of the OCI platform specification, from +// descriptors. +type Platform struct { + OS string `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"` + Architecture string `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"` + Variant string `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"` +} + +func (m *Platform) Reset() { *m = Platform{} } +func (*Platform) ProtoMessage() {} +func (*Platform) Descriptor() ([]byte, []int) { return fileDescriptorPlatform, []int{0} } + +func init() { + proto.RegisterType((*Platform)(nil), "containerd.types.Platform") +} +func (m *Platform) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Platform) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.OS) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlatform(dAtA, i, uint64(len(m.OS))) + i += copy(dAtA[i:], m.OS) + } + if len(m.Architecture) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlatform(dAtA, i, uint64(len(m.Architecture))) + i += copy(dAtA[i:], m.Architecture) + } + if len(m.Variant) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintPlatform(dAtA, i, uint64(len(m.Variant))) + i += copy(dAtA[i:], m.Variant) + } + return i, nil +} + +func encodeFixed64Platform(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Platform(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintPlatform(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Platform) Size() (n int) { + var l int + _ = l + l = len(m.OS) + if l > 0 { + n += 1 + l + sovPlatform(uint64(l)) + } + l = len(m.Architecture) + if l > 0 { + n += 1 + l + sovPlatform(uint64(l)) + } + l = len(m.Variant) + if l > 0 { + n += 1 + l + sovPlatform(uint64(l)) + } + return n +} + +func sovPlatform(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlatform(x uint64) (n int) { + return sovPlatform(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Platform) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Platform{`, + `OS:` + fmt.Sprintf("%v", this.OS) + `,`, + `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, + `Variant:` + fmt.Sprintf("%v", this.Variant) + `,`, + `}`, + }, "") + return s +} +func valueToStringPlatform(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Platform) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlatform + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Platform: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Platform: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlatform + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlatform + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OS = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlatform + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlatform + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architecture = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Variant", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlatform + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlatform + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Variant = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlatform(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlatform + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlatform(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlatform + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlatform + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlatform + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlatform + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlatform + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlatform(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlatform = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlatform = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/types/platform.proto", fileDescriptorPlatform) +} + +var fileDescriptorPlatform = []byte{ + // 203 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, + 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x17, 0xe4, 0x24, + 0x96, 0xa4, 0xe5, 0x17, 0xe5, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x14, 0xe9, + 0x81, 0x15, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, 0x3a, 0xa5, + 0x04, 0x2e, 0x8e, 0x00, 0xa8, 0x4e, 0x21, 0x31, 0x2e, 0xa6, 0xfc, 0x62, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0xfc, 0x83, 0x83, 0x98, 0xf2, 0x8b, 0x85, 0x94, + 0xb8, 0x78, 0x12, 0x8b, 0x92, 0x33, 0x32, 0x4b, 0x52, 0x93, 0x4b, 0x4a, 0x8b, 0x52, 0x25, 0x98, + 0x40, 0x2a, 0x82, 0x50, 0xc4, 0x84, 0x24, 0xb8, 0xd8, 0xcb, 0x12, 0x8b, 0x32, 0x13, 0xf3, 0x4a, + 0x24, 0x98, 0xc1, 0xd2, 0x30, 0xae, 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, + 0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, + 0x18, 0xa3, 0x0c, 0x88, 0xf7, 0x9e, 0x35, 0x98, 0x4c, 0x62, 0x03, 0x3b, 0xda, 0x18, 0x10, 0x00, + 0x00, 0xff, 0xff, 0x97, 0xa1, 0x99, 0x56, 0x19, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/types/platform.proto b/vendor/github.com/containerd/containerd/api/types/platform.proto new file mode 100644 index 000000000000..b1dce06233fb --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/platform.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package containerd.types; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/containerd/containerd/api/types;types"; + +// Platform follows the structure of the OCI platform specification, from +// descriptors. +message Platform { + string os = 1 [(gogoproto.customname) = "OS"]; + string architecture = 2; + string variant = 3; +} diff --git a/vendor/github.com/containerd/containerd/api/types/task/task.pb.go b/vendor/github.com/containerd/containerd/api/types/task/task.pb.go new file mode 100644 index 000000000000..ccc230ae7345 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/task/task.pb.go @@ -0,0 +1,907 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/containerd/containerd/api/types/task/task.proto +// DO NOT EDIT! + +/* + Package task is a generated protocol buffer package. + + It is generated from these files: + github.com/containerd/containerd/api/types/task/task.proto + + It has these top-level messages: + Process + ProcessInfo +*/ +package task + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/gogo/protobuf/types" +import google_protobuf2 "github.com/gogo/protobuf/types" + +import time "time" + +import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Status int32 + +const ( + StatusUnknown Status = 0 + StatusCreated Status = 1 + StatusRunning Status = 2 + StatusStopped Status = 3 + StatusPaused Status = 4 + StatusPausing Status = 5 +) + +var Status_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CREATED", + 2: "RUNNING", + 3: "STOPPED", + 4: "PAUSED", + 5: "PAUSING", +} +var Status_value = map[string]int32{ + "UNKNOWN": 0, + "CREATED": 1, + "RUNNING": 2, + "STOPPED": 3, + "PAUSED": 4, + "PAUSING": 5, +} + +func (x Status) String() string { + return proto.EnumName(Status_name, int32(x)) +} +func (Status) EnumDescriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} } + +type Process struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` + Status Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"` + Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` + Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"` + ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"` +} + +func (m *Process) Reset() { *m = Process{} } +func (*Process) ProtoMessage() {} +func (*Process) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} } + +type ProcessInfo struct { + // PID is the process ID. + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` + // Info contains additional process information. + // + // Info varies by platform. + Info *google_protobuf2.Any `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` +} + +func (m *ProcessInfo) Reset() { *m = ProcessInfo{} } +func (*ProcessInfo) ProtoMessage() {} +func (*ProcessInfo) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{1} } + +func init() { + proto.RegisterType((*Process)(nil), "containerd.v1.types.Process") + proto.RegisterType((*ProcessInfo)(nil), "containerd.v1.types.ProcessInfo") + proto.RegisterEnum("containerd.v1.types.Status", Status_name, Status_value) +} +func (m *Process) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Process) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if len(m.ID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.Pid != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTask(dAtA, i, uint64(m.Pid)) + } + if m.Status != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTask(dAtA, i, uint64(m.Status)) + } + if len(m.Stdin) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.Stdin))) + i += copy(dAtA[i:], m.Stdin) + } + if len(m.Stdout) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.Stdout))) + i += copy(dAtA[i:], m.Stdout) + } + if len(m.Stderr) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintTask(dAtA, i, uint64(len(m.Stderr))) + i += copy(dAtA[i:], m.Stderr) + } + if m.Terminal { + dAtA[i] = 0x40 + i++ + if m.Terminal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ExitStatus != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus)) + } + dAtA[i] = 0x52 + i++ + i = encodeVarintTask(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt))) + n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + return i, nil +} + +func (m *ProcessInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProcessInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Pid != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTask(dAtA, i, uint64(m.Pid)) + } + if m.Info != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTask(dAtA, i, uint64(m.Info.Size())) + n2, err := m.Info.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func encodeFixed64Task(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Task(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintTask(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Process) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.ID) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + if m.Pid != 0 { + n += 1 + sovTask(uint64(m.Pid)) + } + if m.Status != 0 { + n += 1 + sovTask(uint64(m.Status)) + } + l = len(m.Stdin) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.Stdout) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + l = len(m.Stderr) + if l > 0 { + n += 1 + l + sovTask(uint64(l)) + } + if m.Terminal { + n += 2 + } + if m.ExitStatus != 0 { + n += 1 + sovTask(uint64(m.ExitStatus)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) + n += 1 + l + sovTask(uint64(l)) + return n +} + +func (m *ProcessInfo) Size() (n int) { + var l int + _ = l + if m.Pid != 0 { + n += 1 + sovTask(uint64(m.Pid)) + } + if m.Info != nil { + l = m.Info.Size() + n += 1 + l + sovTask(uint64(l)) + } + return n +} + +func sovTask(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTask(x uint64) (n int) { + return sovTask(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Process) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Process{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, + `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, + `ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf1.Timestamp", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ProcessInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProcessInfo{`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `Info:` + strings.Replace(fmt.Sprintf("%v", this.Info), "Any", "google_protobuf2.Any", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringTask(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Process) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Process: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Process: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= (Status(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdin = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdout = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stderr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Terminal = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) + } + m.ExitStatus = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitStatus |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProcessInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProcessInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProcessInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTask + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Info == nil { + m.Info = &google_protobuf2.Any{} + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTask(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTask + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTask(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTask = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/types/task/task.proto", fileDescriptorTask) +} + +var fileDescriptorTask = []byte{ + // 543 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xcf, 0x6e, 0xd3, 0x4c, + 0x14, 0xc5, 0x33, 0x6e, 0xe3, 0x24, 0xe3, 0xb6, 0x9f, 0x3f, 0x13, 0x55, 0xc6, 0x20, 0xdb, 0xea, + 0xca, 0x62, 0x61, 0x8b, 0x74, 0xc7, 0x2e, 0xff, 0x84, 0x2c, 0x24, 0x37, 0x72, 0x12, 0xb1, 0x8c, + 0x9c, 0x78, 0x62, 0x46, 0x6d, 0x66, 0x2c, 0x7b, 0x0c, 0x64, 0xc7, 0x12, 0x75, 0xc5, 0x0b, 0x74, + 0x05, 0x4f, 0xc1, 0x13, 0x64, 0xc9, 0x0a, 0xb1, 0x0a, 0xd4, 0x4f, 0x82, 0xc6, 0x76, 0xd2, 0x08, + 0xd8, 0x8c, 0xee, 0x3d, 0xbf, 0x33, 0x77, 0xee, 0x1c, 0xf8, 0x22, 0xc2, 0xec, 0x4d, 0x36, 0xb7, + 0x17, 0x74, 0xe5, 0x2c, 0x28, 0x61, 0x01, 0x26, 0x28, 0x09, 0x0f, 0xcb, 0x20, 0xc6, 0x0e, 0x5b, + 0xc7, 0x28, 0x75, 0x58, 0x90, 0x5e, 0x17, 0x87, 0x1d, 0x27, 0x94, 0x51, 0xe5, 0xd1, 0x83, 0xcb, + 0x7e, 0xfb, 0xdc, 0x2e, 0x4c, 0x5a, 0x3b, 0xa2, 0x11, 0x2d, 0xb8, 0xc3, 0xab, 0xd2, 0xaa, 0x19, + 0x11, 0xa5, 0xd1, 0x0d, 0x72, 0x8a, 0x6e, 0x9e, 0x2d, 0x1d, 0x86, 0x57, 0x28, 0x65, 0xc1, 0x2a, + 0xae, 0x0c, 0x8f, 0xff, 0x34, 0x04, 0x64, 0x5d, 0xa2, 0x8b, 0x5c, 0x80, 0x8d, 0x51, 0x42, 0x17, + 0x28, 0x4d, 0x95, 0x0e, 0x3c, 0xd9, 0x3f, 0x3a, 0xc3, 0xa1, 0x0a, 0x4c, 0x60, 0xb5, 0x7a, 0xff, + 0xe5, 0x5b, 0x43, 0xea, 0xef, 0x74, 0x77, 0xe0, 0x4b, 0x7b, 0x93, 0x1b, 0x2a, 0xe7, 0x50, 0xc0, + 0xa1, 0x2a, 0x14, 0x4e, 0x31, 0xdf, 0x1a, 0x82, 0x3b, 0xf0, 0x05, 0x1c, 0x2a, 0x32, 0x3c, 0x8a, + 0x71, 0xa8, 0x1e, 0x99, 0xc0, 0x3a, 0xf5, 0x79, 0xa9, 0x5c, 0x42, 0x31, 0x65, 0x01, 0xcb, 0x52, + 0xf5, 0xd8, 0x04, 0xd6, 0x59, 0xe7, 0x89, 0xfd, 0x8f, 0x1f, 0xda, 0xe3, 0xc2, 0xe2, 0x57, 0x56, + 0xa5, 0x0d, 0xeb, 0x29, 0x0b, 0x31, 0x51, 0xeb, 0xfc, 0x05, 0xbf, 0x6c, 0x94, 0x73, 0x3e, 0x2a, + 0xa4, 0x19, 0x53, 0xc5, 0x42, 0xae, 0xba, 0x4a, 0x47, 0x49, 0xa2, 0x36, 0xf6, 0x3a, 0x4a, 0x12, + 0x45, 0x83, 0x4d, 0x86, 0x92, 0x15, 0x26, 0xc1, 0x8d, 0xda, 0x34, 0x81, 0xd5, 0xf4, 0xf7, 0xbd, + 0x62, 0x40, 0x09, 0xbd, 0xc7, 0x6c, 0x56, 0xed, 0xd6, 0x2a, 0x16, 0x86, 0x5c, 0x2a, 0x57, 0x51, + 0xba, 0xb0, 0xc5, 0x3b, 0x14, 0xce, 0x02, 0xa6, 0x42, 0x13, 0x58, 0x52, 0x47, 0xb3, 0xcb, 0x40, + 0xed, 0x5d, 0xa0, 0xf6, 0x64, 0x97, 0x78, 0xaf, 0xb9, 0xd9, 0x1a, 0xb5, 0x4f, 0x3f, 0x0d, 0xe0, + 0x37, 0xcb, 0x6b, 0x5d, 0x76, 0xe1, 0x42, 0xa9, 0xca, 0xd8, 0x25, 0x4b, 0xba, 0xcb, 0x06, 0x3c, + 0x64, 0x63, 0xc1, 0x63, 0x4c, 0x96, 0xb4, 0xc8, 0x51, 0xea, 0xb4, 0xff, 0x1a, 0xdf, 0x25, 0x6b, + 0xbf, 0x70, 0x3c, 0xfb, 0x0e, 0xa0, 0x58, 0x2d, 0xa6, 0xc3, 0xc6, 0xd4, 0x7b, 0xe5, 0x5d, 0xbd, + 0xf6, 0xe4, 0x9a, 0xf6, 0xff, 0xed, 0x9d, 0x79, 0x5a, 0x82, 0x29, 0xb9, 0x26, 0xf4, 0x1d, 0xe1, + 0xbc, 0xef, 0x0f, 0xbb, 0x93, 0xe1, 0x40, 0x06, 0x87, 0xbc, 0x9f, 0xa0, 0x80, 0xa1, 0x90, 0x73, + 0x7f, 0xea, 0x79, 0xae, 0xf7, 0x52, 0x16, 0x0e, 0xb9, 0x9f, 0x11, 0x82, 0x49, 0xc4, 0xf9, 0x78, + 0x72, 0x35, 0x1a, 0x0d, 0x07, 0xf2, 0xd1, 0x21, 0x1f, 0x33, 0x1a, 0xc7, 0x28, 0x54, 0x9e, 0x42, + 0x71, 0xd4, 0x9d, 0x8e, 0x87, 0x03, 0xf9, 0x58, 0x93, 0x6f, 0xef, 0xcc, 0x93, 0x12, 0x8f, 0x82, + 0x2c, 0x2d, 0xa7, 0x73, 0xca, 0xa7, 0xd7, 0x0f, 0x6f, 0x73, 0x8c, 0x49, 0xa4, 0x9d, 0x7d, 0xfc, + 0xac, 0xd7, 0xbe, 0x7e, 0xd1, 0xab, 0xdf, 0xf4, 0xd4, 0xcd, 0xbd, 0x5e, 0xfb, 0x71, 0xaf, 0xd7, + 0x3e, 0xe4, 0x3a, 0xd8, 0xe4, 0x3a, 0xf8, 0x96, 0xeb, 0xe0, 0x57, 0xae, 0x83, 0xb9, 0x58, 0xc4, + 0x70, 0xf9, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x19, 0xf7, 0x5b, 0x8f, 0x4e, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/containerd/api/types/task/task.proto b/vendor/github.com/containerd/containerd/api/types/task/task.proto new file mode 100644 index 000000000000..5845edf292e8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/task/task.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; + +package containerd.v1.types; + +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/any.proto"; + +enum Status { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "Status"; + + UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "StatusUnknown"]; + CREATED = 1 [(gogoproto.enumvalue_customname) = "StatusCreated"]; + RUNNING = 2 [(gogoproto.enumvalue_customname) = "StatusRunning"]; + STOPPED = 3 [(gogoproto.enumvalue_customname) = "StatusStopped"]; + PAUSED = 4 [(gogoproto.enumvalue_customname) = "StatusPaused"]; + PAUSING = 5 [(gogoproto.enumvalue_customname) = "StatusPausing"]; +} + +message Process { + string container_id = 1; + string id = 2; + uint32 pid = 3; + Status status = 4; + string stdin = 5; + string stdout = 6; + string stderr = 7; + bool terminal = 8; + uint32 exit_status = 9; + google.protobuf.Timestamp exited_at = 10 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message ProcessInfo { + // PID is the process ID. + uint32 pid = 1; + // Info contains additional process information. + // + // Info varies by platform. + google.protobuf.Any info = 2; +} diff --git a/vendor/github.com/containerd/containerd/containers/containers.go b/vendor/github.com/containerd/containerd/containers/containers.go new file mode 100644 index 000000000000..df4ad83c9ebf --- /dev/null +++ b/vendor/github.com/containerd/containerd/containers/containers.go @@ -0,0 +1,92 @@ +package containers + +import ( + "context" + "time" + + "github.com/gogo/protobuf/types" +) + +// Container represents the set of data pinned by a container. Unless otherwise +// noted, the resources here are considered in use by the container. +// +// The resources specified in this object are used to create tasks from the container. +type Container struct { + // ID uniquely identifies the container in a nameapace. + // + // This property is required and cannot be changed after creation. + ID string + + // Labels provide metadata extension for a contaienr. + // + // These are optional and fully mutable. + Labels map[string]string + + // Image specifies the image reference used for a container. + // + // This property is optional but immutable. + Image string + + // Runtime specifies which runtime should be used when launching container + // tasks. + // + // This property is required and immutable. + Runtime RuntimeInfo + + // Spec should carry the the runtime specification used to implement the + // container. + // + // This field is required but mutable. + Spec *types.Any + + // SnapshotKey specifies the snapshot key to use for the container's root + // filesystem. When starting a task from this container, a caller should + // look up the mounts from the snapshot service and include those on the + // task create request. + // + // This field is not required but immutable. + SnapshotKey string + + // Snapshotter specifies the snapshotter name used for rootfs + // + // This field is not required but immutable. + Snapshotter string + + // CreatedAt is the time at which the container was created. + CreatedAt time.Time + + // UpdatedAt is the time at which the container was updated. + UpdatedAt time.Time + + // Extensions stores client-specified metadata + Extensions map[string]types.Any +} + +// RuntimeInfo holds runtime specific information +type RuntimeInfo struct { + Name string + Options *types.Any +} + +// Store interacts with the underlying container storage +type Store interface { + Get(ctx context.Context, id string) (Container, error) + + // List returns containers that match one or more of the provided filters. + List(ctx context.Context, filters ...string) ([]Container, error) + + // Create a container in the store from the provided container. + Create(ctx context.Context, container Container) (Container, error) + + // Update the container with the provided container object. ID must be set. + // + // If one or more fieldpaths are provided, only the field corresponding to + // the fieldpaths will be mutated. + Update(ctx context.Context, container Container, fieldpaths ...string) (Container, error) + + // Delete a container using the id. + // + // nil will be returned on success. If the container is not known to the + // store, ErrNotFound will be returned. + Delete(ctx context.Context, id string) error +} diff --git a/vendor/github.com/containerd/containerd/dialer/dialer.go b/vendor/github.com/containerd/containerd/dialer/dialer.go new file mode 100644 index 000000000000..65af69f9bcab --- /dev/null +++ b/vendor/github.com/containerd/containerd/dialer/dialer.go @@ -0,0 +1,51 @@ +package dialer + +import ( + "net" + "time" + + "github.com/pkg/errors" +) + +type dialResult struct { + c net.Conn + err error +} + +// Dialer returns a GRPC net.Conn connected to the provided address +func Dialer(address string, timeout time.Duration) (net.Conn, error) { + var ( + stopC = make(chan struct{}) + synC = make(chan *dialResult) + ) + go func() { + defer close(synC) + for { + select { + case <-stopC: + return + default: + c, err := dialer(address, timeout) + if isNoent(err) { + <-time.After(10 * time.Millisecond) + continue + } + synC <- &dialResult{c, err} + return + } + } + }() + select { + case dr := <-synC: + return dr.c, dr.err + case <-time.After(timeout): + close(stopC) + go func() { + dr := <-synC + if dr != nil { + dr.c.Close() + } + }() + return nil, errors.Errorf("dial %s: timeout", address) + } +} diff --git a/vendor/github.com/containerd/containerd/dialer/dialer_unix.go b/vendor/github.com/containerd/containerd/dialer/dialer_unix.go new file mode 100644 index 000000000000..7f8d43b03105 --- /dev/null +++ b/vendor/github.com/containerd/containerd/dialer/dialer_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +package dialer + +import ( + "fmt" + "net" + "os" + "strings" + "syscall" + "time" +) + +// DialAddress returns the address with unix:// prepended to the +// provided address +func DialAddress(address string) string { + return fmt.Sprintf("unix://%s", address) +} + +func isNoent(err error) bool { + if err != nil { + if nerr, ok := err.(*net.OpError); ok { + if serr, ok := nerr.Err.(*os.SyscallError); ok { + if serr.Err == syscall.ENOENT { + return true + } + } + } + } + return false +} + +func dialer(address string, timeout time.Duration) (net.Conn, error) { + address = strings.TrimPrefix(address, "unix://") + return net.DialTimeout("unix", address, timeout) +} diff --git a/vendor/github.com/containerd/containerd/dialer/dialer_windows.go b/vendor/github.com/containerd/containerd/dialer/dialer_windows.go new file mode 100644 index 000000000000..2aac03898a35 --- /dev/null +++ b/vendor/github.com/containerd/containerd/dialer/dialer_windows.go @@ -0,0 +1,30 @@ +package dialer + +import ( + "net" + "os" + "syscall" + "time" + + winio "github.com/Microsoft/go-winio" +) + +func isNoent(err error) bool { + if err != nil { + if oerr, ok := err.(*os.PathError); ok { + if oerr.Err == syscall.ENOENT { + return true + } + } + } + return false +} + +func dialer(address string, timeout time.Duration) (net.Conn, error) { + return winio.DialPipe(address, &timeout) +} + +// DialAddress returns the dial address +func DialAddress(address string) string { + return address +} diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go new file mode 100644 index 000000000000..b4d6ea860b04 --- /dev/null +++ b/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -0,0 +1,62 @@ +// Package errdefs defines the common errors used throughout containerd +// packages. +// +// Use with errors.Wrap and error.Wrapf to add context to an error. +// +// To detect an error class, use the IsXXX functions to tell whether an error +// is of a certain type. +// +// The functions ToGRPC and FromGRPC can be used to map server-side and +// client-side errors to the correct types. +package errdefs + +import "github.com/pkg/errors" + +// Definitions of common error types used throughout containerd. All containerd +// errors returned by most packages will map into one of these errors classes. +// Packages should return errors of these types when they want to instruct a +// client to take a particular action. +// +// For the most part, we just try to provide local grpc errors. Most conditions +// map very well to those defined by grpc. +var ( + ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. + ErrInvalidArgument = errors.New("invalid argument") + ErrNotFound = errors.New("not found") + ErrAlreadyExists = errors.New("already exists") + ErrFailedPrecondition = errors.New("failed precondition") + ErrUnavailable = errors.New("unavailable") + ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented +) + +// IsInvalidArgument returns true if the error is due to an invalid argument +func IsInvalidArgument(err error) bool { + return errors.Cause(err) == ErrInvalidArgument +} + +// IsNotFound returns true if the error is due to a missing object +func IsNotFound(err error) bool { + return errors.Cause(err) == ErrNotFound +} + +// IsAlreadyExists returns true if the error is due to an already existing +// metadata item +func IsAlreadyExists(err error) bool { + return errors.Cause(err) == ErrAlreadyExists +} + +// IsFailedPrecondition returns true if an operation could not proceed to the +// lack of a particular condition +func IsFailedPrecondition(err error) bool { + return errors.Cause(err) == ErrFailedPrecondition +} + +// IsUnavailable returns true if the error is due to a resource being unavailable +func IsUnavailable(err error) bool { + return errors.Cause(err) == ErrUnavailable +} + +// IsNotImplemented returns true if the error is due to not being implemented +func IsNotImplemented(err error) bool { + return errors.Cause(err) == ErrNotImplemented +} diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go new file mode 100644 index 000000000000..2aa2e11b4bbd --- /dev/null +++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -0,0 +1,109 @@ +package errdefs + +import ( + "strings" + + "github.com/pkg/errors" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ToGRPC will attempt to map the backend containerd error into a grpc error, +// using the original error message as a description. +// +// Further information may be extracted from certain errors depending on their +// type. +// +// If the error is unmapped, the original error will be returned to be handled +// by the regular grpc error handling stack. +func ToGRPC(err error) error { + if err == nil { + return nil + } + + if isGRPCError(err) { + // error has already been mapped to grpc + return err + } + + switch { + case IsInvalidArgument(err): + return status.Errorf(codes.InvalidArgument, err.Error()) + case IsNotFound(err): + return status.Errorf(codes.NotFound, err.Error()) + case IsAlreadyExists(err): + return status.Errorf(codes.AlreadyExists, err.Error()) + case IsFailedPrecondition(err): + return status.Errorf(codes.FailedPrecondition, err.Error()) + case IsUnavailable(err): + return status.Errorf(codes.Unavailable, err.Error()) + case IsNotImplemented(err): + return status.Errorf(codes.Unimplemented, err.Error()) + } + + return err +} + +// ToGRPCf maps the error to grpc error codes, assembling the formatting string +// and combining it with the target error string. +// +// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...)) +func ToGRPCf(err error, format string, args ...interface{}) error { + return ToGRPC(errors.Wrapf(err, format, args...)) +} + +// FromGRPC returns the underlying error from a grpc service based on the grpc error code +func FromGRPC(err error) error { + if err == nil { + return nil + } + + var cls error // divide these into error classes, becomes the cause + + switch grpc.Code(err) { + case codes.InvalidArgument: + cls = ErrInvalidArgument + case codes.AlreadyExists: + cls = ErrAlreadyExists + case codes.NotFound: + cls = ErrNotFound + case codes.Unavailable: + cls = ErrUnavailable + case codes.FailedPrecondition: + cls = ErrFailedPrecondition + case codes.Unimplemented: + cls = ErrNotImplemented + default: + cls = ErrUnknown + } + + msg := rebaseMessage(cls, err) + if msg != "" { + err = errors.Wrapf(cls, msg) + } else { + err = errors.WithStack(cls) + } + + return err +} + +// rebaseMessage removes the repeats for an error at the end of an error +// string. This will happen when taking an error over grpc then remapping it. +// +// Effectively, we just remove the string of cls from the end of err if it +// appears there. +func rebaseMessage(cls error, err error) string { + desc := grpc.ErrorDesc(err) + clss := cls.Error() + if desc == clss { + return "" + } + + return strings.TrimSuffix(desc, ": "+clss) +} + +func isGRPCError(err error) bool { + _, ok := status.FromError(err) + return ok +} diff --git a/vendor/github.com/containerd/containerd/namespaces/context.go b/vendor/github.com/containerd/containerd/namespaces/context.go new file mode 100644 index 000000000000..70871148735d --- /dev/null +++ b/vendor/github.com/containerd/containerd/namespaces/context.go @@ -0,0 +1,63 @@ +package namespaces + +import ( + "os" + + "github.com/containerd/containerd/errdefs" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +const ( + // NamespaceEnvVar is the environment variable key name + NamespaceEnvVar = "CONTAINERD_NAMESPACE" + // Default is the name of the default namespace + Default = "default" +) + +type namespaceKey struct{} + +// WithNamespace sets a given namespace on the context +func WithNamespace(ctx context.Context, namespace string) context.Context { + ctx = context.WithValue(ctx, namespaceKey{}, namespace) // set our key for namespace + + // also store on the grpc headers so it gets picked up by any clients that + // are using this. + return withGRPCNamespaceHeader(ctx, namespace) +} + +// NamespaceFromEnv uses the namespace defined in CONTAINERD_NAMESPACE or +// default +func NamespaceFromEnv(ctx context.Context) context.Context { + namespace := os.Getenv(NamespaceEnvVar) + if namespace == "" { + namespace = Default + } + return WithNamespace(ctx, namespace) +} + +// Namespace returns the namespace from the context. +// +// The namespace is not guaranteed to be valid. +func Namespace(ctx context.Context) (string, bool) { + namespace, ok := ctx.Value(namespaceKey{}).(string) + if !ok { + return fromGRPCHeader(ctx) + } + + return namespace, ok +} + +// NamespaceRequired returns the valid namepace from the context or an error. +func NamespaceRequired(ctx context.Context) (string, error) { + namespace, ok := Namespace(ctx) + if !ok || namespace == "" { + return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "namespace is required") + } + + if err := Validate(namespace); err != nil { + return "", errors.Wrap(err, "namespace validation") + } + + return namespace, nil +} diff --git a/vendor/github.com/containerd/containerd/namespaces/grpc.go b/vendor/github.com/containerd/containerd/namespaces/grpc.go new file mode 100644 index 000000000000..c18fc933d133 --- /dev/null +++ b/vendor/github.com/containerd/containerd/namespaces/grpc.go @@ -0,0 +1,44 @@ +package namespaces + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +const ( + // GRPCHeader defines the header name for specifying a containerd namespace. + GRPCHeader = "containerd-namespace" +) + +// NOTE(stevvooe): We can stub this file out if we don't want a grpc dependency here. + +func withGRPCNamespaceHeader(ctx context.Context, namespace string) context.Context { + // also store on the grpc headers so it gets picked up by any clients that + // are using this. + nsheader := metadata.Pairs(GRPCHeader, namespace) + md, ok := metadata.FromOutgoingContext(ctx) // merge with outgoing context. + if !ok { + md = nsheader + } else { + // order ensures the latest is first in this list. + md = metadata.Join(nsheader, md) + } + + return metadata.NewOutgoingContext(ctx, md) +} + +func fromGRPCHeader(ctx context.Context) (string, bool) { + // try to extract for use in grpc servers. + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + // TODO(stevvooe): Check outgoing context? + return "", false + } + + values := md[GRPCHeader] + if len(values) == 0 { + return "", false + } + + return values[0], true +} diff --git a/vendor/github.com/containerd/containerd/namespaces/store.go b/vendor/github.com/containerd/containerd/namespaces/store.go new file mode 100644 index 000000000000..68ff714bb7fb --- /dev/null +++ b/vendor/github.com/containerd/containerd/namespaces/store.go @@ -0,0 +1,21 @@ +package namespaces + +import "context" + +// Store provides introspection about namespaces. +// +// Note that these are slightly different than other objects, which are record +// oriented. A namespace is really just a name and a set of labels. Objects +// that belong to a namespace are returned when the namespace is assigned to a +// given context. +// +// +type Store interface { + Create(ctx context.Context, namespace string, labels map[string]string) error + Labels(ctx context.Context, namespace string) (map[string]string, error) + SetLabel(ctx context.Context, namespace, key, value string) error + List(ctx context.Context) ([]string, error) + + // Delete removes the namespace. The namespace must be empty to be deleted. + Delete(ctx context.Context, namespace string) error +} diff --git a/vendor/github.com/containerd/containerd/namespaces/validate.go b/vendor/github.com/containerd/containerd/namespaces/validate.go new file mode 100644 index 000000000000..ff97a8cc8b55 --- /dev/null +++ b/vendor/github.com/containerd/containerd/namespaces/validate.go @@ -0,0 +1,67 @@ +// Package namespaces provides tools for working with namespaces across +// containerd. +// +// Namespaces collect resources such as containers and images, into a unique +// identifier space. This means that two applications can use the same +// identifiers and not conflict while using containerd. +// +// This package can be used to ensure that client and server functions +// correctly store the namespace on the context. +package namespaces + +import ( + "regexp" + + "github.com/containerd/containerd/errdefs" + "github.com/pkg/errors" +) + +const ( + maxLength = 76 + alpha = `[A-Za-z]` + alphanum = `[A-Za-z0-9]+` + label = alpha + alphanum + `(:?[-]+` + alpha + alphanum + `)*` +) + +var ( + // namespaceRe validates that a namespace matches valid identifiers. + // + // Rules for domains, defined in RFC 1035, section 2.3.1, are used for + // namespaces. + namespaceRe = regexp.MustCompile(reAnchor(label + reGroup("[.]"+reGroup(label)) + "*")) +) + +// Validate returns nil if the string s is a valid namespace. +// +// To allow such namespace identifiers to be used across various contexts +// safely, the character set has been restricted to that defined for domains in +// RFC 1035, section 2.3.1. This will make namespace identifiers safe for use +// across networks, filesystems and other media. +// +// The identifier specification departs from RFC 1035 in that it allows +// "labels" to start with number and only enforces a total length restriction +// of 76 characters. +// +// While the character set may be expanded in the future, namespace identifiers +// are guaranteed to be safely used as filesystem path components. +// +// For the most part, this doesn't need to be called directly when using the +// context-oriented functions. +func Validate(s string) error { + if len(s) > maxLength { + return errors.Wrapf(errdefs.ErrInvalidArgument, "namespace %q greater than maximum length (%d characters)", s, maxLength) + } + + if !namespaceRe.MatchString(s) { + return errors.Wrapf(errdefs.ErrInvalidArgument, "namespace %q must match %v", s, namespaceRe) + } + return nil +} + +func reGroup(s string) string { + return `(?:` + s + `)` +} + +func reAnchor(s string) string { + return `^` + s + `$` +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go index ba9d0c3b8490..39b639723051 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go @@ -57,13 +57,16 @@ func (args *Args) AsEnv() []string { pluginArgsStr = stringify(args.PluginArgs) } - env = append(env, - "CNI_COMMAND="+args.Command, - "CNI_CONTAINERID="+args.ContainerID, - "CNI_NETNS="+args.NetNS, - "CNI_ARGS="+pluginArgsStr, - "CNI_IFNAME="+args.IfName, - "CNI_PATH="+args.Path) + // Ensure that the custom values are first, so any value present in + // the process environment won't override them. + env = append([]string{ + "CNI_COMMAND=" + args.Command, + "CNI_CONTAINERID=" + args.ContainerID, + "CNI_NETNS=" + args.NetNS, + "CNI_ARGS=" + pluginArgsStr, + "CNI_IFNAME=" + args.IfName, + "CNI_PATH=" + args.Path, + }, env...) return env } diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go index d1bd860d37e7..93f1e75d9fa7 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go @@ -50,13 +50,9 @@ func pluginErr(err error, output []byte) error { if _, ok := err.(*exec.ExitError); ok { emsg := types.Error{} if perr := json.Unmarshal(output, &emsg); perr != nil { - return fmt.Errorf("netplugin failed but error parsing its diagnostic message %q: %v", string(output), perr) + emsg.Msg = fmt.Sprintf("netplugin failed but error parsing its diagnostic message %q: %v", string(output), perr) } - details := "" - if emsg.Details != "" { - details = fmt.Sprintf("; %v", emsg.Details) - } - return fmt.Errorf("%v%v", emsg.Msg, details) + return &emsg } return err diff --git a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go index 666cfe93e7e7..2833aba787f7 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go @@ -23,9 +23,9 @@ import ( "github.com/containernetworking/cni/pkg/types" ) -const implementedSpecVersion string = "0.2.0" +const ImplementedSpecVersion string = "0.2.0" -var SupportedVersions = []string{"", "0.1.0", implementedSpecVersion} +var SupportedVersions = []string{"", "0.1.0", ImplementedSpecVersion} // Compatibility types for CNI version 0.1.0 and 0.2.0 @@ -39,7 +39,7 @@ func NewResult(data []byte) (types.Result, error) { func GetResult(r types.Result) (*Result, error) { // We expect version 0.1.0/0.2.0 results - result020, err := r.GetAsVersion(implementedSpecVersion) + result020, err := r.GetAsVersion(ImplementedSpecVersion) if err != nil { return nil, err } @@ -52,18 +52,20 @@ func GetResult(r types.Result) (*Result, error) { // Result is what gets returned from the plugin (via stdout) to the caller type Result struct { - IP4 *IPConfig `json:"ip4,omitempty"` - IP6 *IPConfig `json:"ip6,omitempty"` - DNS types.DNS `json:"dns,omitempty"` + CNIVersion string `json:"cniVersion,omitempty"` + IP4 *IPConfig `json:"ip4,omitempty"` + IP6 *IPConfig `json:"ip6,omitempty"` + DNS types.DNS `json:"dns,omitempty"` } func (r *Result) Version() string { - return implementedSpecVersion + return ImplementedSpecVersion } func (r *Result) GetAsVersion(version string) (types.Result, error) { for _, supportedVersion := range SupportedVersions { if version == supportedVersion { + r.CNIVersion = version return r, nil } } diff --git a/vendor/github.com/containernetworking/cni/pkg/types/args.go b/vendor/github.com/containernetworking/cni/pkg/types/args.go index 66dcf9eae725..bd8640fc969a 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/args.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/args.go @@ -63,6 +63,12 @@ func GetKeyField(keyString string, v reflect.Value) reflect.Value { return v.Elem().FieldByName(keyString) } +// UnmarshalableArgsError is used to indicate error unmarshalling args +// from the args-string in the form "K=V;K2=V2;..." +type UnmarshalableArgsError struct { + error +} + // LoadArgs parses args from a string in the form "K=V;K2=V2;..." func LoadArgs(args string, container interface{}) error { if args == "" { @@ -85,8 +91,13 @@ func LoadArgs(args string, container interface{}) error { unknownArgs = append(unknownArgs, pair) continue } - - u := keyField.Addr().Interface().(encoding.TextUnmarshaler) + keyFieldIface := keyField.Addr().Interface() + u, ok := keyFieldIface.(encoding.TextUnmarshaler) + if !ok { + return UnmarshalableArgsError{fmt.Errorf( + "ARGS: cannot unmarshal into field '%s' - type '%s' does not implement encoding.TextUnmarshaler", + keyString, reflect.TypeOf(keyFieldIface))} + } err := u.UnmarshalText([]byte(valueString)) if err != nil { return fmt.Errorf("ARGS: error parsing value of pair %q: %v)", pair, err) diff --git a/vendor/github.com/containernetworking/cni/pkg/types/current/types.go b/vendor/github.com/containernetworking/cni/pkg/types/current/types.go index b89a5d3a0737..caac92ba77ca 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/current/types.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/current/types.go @@ -24,9 +24,9 @@ import ( "github.com/containernetworking/cni/pkg/types/020" ) -const implementedSpecVersion string = "0.3.1" +const ImplementedSpecVersion string = "0.3.1" -var SupportedVersions = []string{"0.3.0", implementedSpecVersion} +var SupportedVersions = []string{"0.3.0", ImplementedSpecVersion} func NewResult(data []byte) (types.Result, error) { result := &Result{} @@ -37,7 +37,7 @@ func NewResult(data []byte) (types.Result, error) { } func GetResult(r types.Result) (*Result, error) { - resultCurrent, err := r.GetAsVersion(implementedSpecVersion) + resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion) if err != nil { return nil, err } @@ -63,16 +63,16 @@ func convertFrom020(result types.Result) (*Result, error) { } newResult := &Result{ - DNS: oldResult.DNS, - Routes: []*types.Route{}, + CNIVersion: ImplementedSpecVersion, + DNS: oldResult.DNS, + Routes: []*types.Route{}, } if oldResult.IP4 != nil { newResult.IPs = append(newResult.IPs, &IPConfig{ - Version: "4", - Interface: -1, - Address: oldResult.IP4.IP, - Gateway: oldResult.IP4.Gateway, + Version: "4", + Address: oldResult.IP4.IP, + Gateway: oldResult.IP4.Gateway, }) for _, route := range oldResult.IP4.Routes { gw := route.GW @@ -88,10 +88,9 @@ func convertFrom020(result types.Result) (*Result, error) { if oldResult.IP6 != nil { newResult.IPs = append(newResult.IPs, &IPConfig{ - Version: "6", - Interface: -1, - Address: oldResult.IP6.IP, - Gateway: oldResult.IP6.Gateway, + Version: "6", + Address: oldResult.IP6.IP, + Gateway: oldResult.IP6.Gateway, }) for _, route := range oldResult.IP6.Routes { gw := route.GW @@ -117,6 +116,7 @@ func convertFrom030(result types.Result) (*Result, error) { if !ok { return nil, fmt.Errorf("failed to convert result") } + newResult.CNIVersion = ImplementedSpecVersion return newResult, nil } @@ -134,6 +134,7 @@ func NewResultFromResult(result types.Result) (*Result, error) { // Result is what gets returned from the plugin (via stdout) to the caller type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` Interfaces []*Interface `json:"interfaces,omitempty"` IPs []*IPConfig `json:"ips,omitempty"` Routes []*types.Route `json:"routes,omitempty"` @@ -143,7 +144,8 @@ type Result struct { // Convert to the older 0.2.0 CNI spec Result type func (r *Result) convertTo020() (*types020.Result, error) { oldResult := &types020.Result{ - DNS: r.DNS, + CNIVersion: types020.ImplementedSpecVersion, + DNS: r.DNS, } for _, ip := range r.IPs { @@ -189,12 +191,13 @@ func (r *Result) convertTo020() (*types020.Result, error) { } func (r *Result) Version() string { - return implementedSpecVersion + return ImplementedSpecVersion } func (r *Result) GetAsVersion(version string) (types.Result, error) { switch version { - case "0.3.0", implementedSpecVersion: + case "0.3.0", ImplementedSpecVersion: + r.CNIVersion = version return r, nil case types020.SupportedVersions[0], types020.SupportedVersions[1], types020.SupportedVersions[2]: return r.convertTo020() @@ -244,12 +247,18 @@ func (i *Interface) String() string { return fmt.Sprintf("%+v", *i) } +// Int returns a pointer to the int value passed in. Used to +// set the IPConfig.Interface field. +func Int(v int) *int { + return &v +} + // IPConfig contains values necessary to configure an IP address on an interface type IPConfig struct { // IP version, either "4" or "6" Version string // Index into Result structs Interfaces list - Interface int + Interface *int Address net.IPNet Gateway net.IP } @@ -261,7 +270,7 @@ func (i *IPConfig) String() string { // JSON (un)marshallable types type ipConfig struct { Version string `json:"version"` - Interface int `json:"interface,omitempty"` + Interface *int `json:"interface,omitempty"` Address types.IPNet `json:"address"` Gateway net.IP `json:"gateway,omitempty"` } diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go index 3263015afb7c..6412756007ed 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/types.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go @@ -136,7 +136,11 @@ type Error struct { } func (e *Error) Error() string { - return e.Msg + details := "" + if e.Details != "" { + details = fmt.Sprintf("; %v", e.Details) + } + return fmt.Sprintf("%v%v", e.Msg, details) } func (e *Error) Print() error { diff --git a/vendor/github.com/dchest/safefile/.travis.yml b/vendor/github.com/dchest/safefile/.travis.yml new file mode 100644 index 000000000000..354b7f8b2493 --- /dev/null +++ b/vendor/github.com/dchest/safefile/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - tip diff --git a/vendor/github.com/dchest/safefile/LICENSE b/vendor/github.com/dchest/safefile/LICENSE new file mode 100644 index 000000000000..e753ecd8ab70 --- /dev/null +++ b/vendor/github.com/dchest/safefile/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2013 Dmitry Chestnykh +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/dchest/safefile/README.md b/vendor/github.com/dchest/safefile/README.md new file mode 100644 index 000000000000..b1894f35e9d2 --- /dev/null +++ b/vendor/github.com/dchest/safefile/README.md @@ -0,0 +1,44 @@ +# safefile + +[![Build Status](https://travis-ci.org/dchest/safefile.svg)](https://travis-ci.org/dchest/safefile) [![Windows Build status](https://ci.appveyor.com/api/projects/status/owlifxeekg75t2ho?svg=true)](https://ci.appveyor.com/project/dchest/safefile) + +Go package safefile implements safe "atomic" saving of files. + +Instead of truncating and overwriting the destination file, it creates a +temporary file in the same directory, writes to it, and then renames the +temporary file to the original name when calling Commit. + + +### Installation + +``` +$ go get github.com/dchest/safefile +``` + +### Documentation + + + +### Example + +```go +f, err := safefile.Create("/home/ken/report.txt", 0644) +if err != nil { + // ... +} +// Created temporary file /home/ken/sf-ppcyksu5hyw2mfec.tmp + +defer f.Close() + +_, err = io.WriteString(f, "Hello world") +if err != nil { + // ... +} +// Wrote "Hello world" to /home/ken/sf-ppcyksu5hyw2mfec.tmp + +err = f.Commit() +if err != nil { + // ... +} +// Renamed /home/ken/sf-ppcyksu5hyw2mfec.tmp to /home/ken/report.txt +``` diff --git a/vendor/github.com/dchest/safefile/appveyor.yml b/vendor/github.com/dchest/safefile/appveyor.yml new file mode 100644 index 000000000000..198fb33d6261 --- /dev/null +++ b/vendor/github.com/dchest/safefile/appveyor.yml @@ -0,0 +1,24 @@ +version: "{build}" + +os: Windows Server 2012 R2 + +clone_folder: c:\projects\src\github.com\dchest\safefile + +environment: + PATH: c:\projects\bin;%PATH% + GOPATH: c:\projects + NOTIFY_TIMEOUT: 5s + +install: + - go version + - go get golang.org/x/tools/cmd/vet + - go get -v -t ./... + +build_script: + - go tool vet -all . + - go build ./... + - go test -v -race ./... + +test: off + +deploy: off diff --git a/vendor/github.com/dchest/safefile/rename.go b/vendor/github.com/dchest/safefile/rename.go new file mode 100644 index 000000000000..3193f22036f6 --- /dev/null +++ b/vendor/github.com/dchest/safefile/rename.go @@ -0,0 +1,9 @@ +// +build !plan9,!windows windows,go1.5 + +package safefile + +import "os" + +func rename(oldname, newname string) error { + return os.Rename(oldname, newname) +} diff --git a/vendor/github.com/dchest/safefile/rename_nonatomic.go b/vendor/github.com/dchest/safefile/rename_nonatomic.go new file mode 100644 index 000000000000..8782c28db477 --- /dev/null +++ b/vendor/github.com/dchest/safefile/rename_nonatomic.go @@ -0,0 +1,51 @@ +// +build plan9 windows,!go1.5 + +// os.Rename on Windows before Go 1.5 and Plan 9 will not overwrite existing +// files, thus we cannot guarantee atomic saving of file by doing rename. +// We will have to do some voodoo to minimize data loss on those systems. + +package safefile + +import ( + "os" + "path/filepath" +) + +func rename(oldname, newname string) error { + err := os.Rename(oldname, newname) + if err != nil { + // If newname exists ("original"), we will try renaming it to a + // new temporary name, then renaming oldname to the newname, + // and deleting the renamed original. If system crashes between + // renaming and deleting, the original file will still be available + // under the temporary name, so users can manually recover data. + // (No automatic recovery is possible because after crash the + // temporary name is not known.) + var origtmp string + for { + origtmp, err = makeTempName(newname, filepath.Base(newname)) + if err != nil { + return err + } + _, err = os.Stat(origtmp) + if err == nil { + continue // most likely will never happen + } + break + } + err = os.Rename(newname, origtmp) + if err != nil { + return err + } + err = os.Rename(oldname, newname) + if err != nil { + // Rename still fails, try to revert original rename, + // ignoring errors. + os.Rename(origtmp, newname) + return err + } + // Rename succeeded, now delete original file. + os.Remove(origtmp) + } + return nil +} diff --git a/vendor/github.com/dchest/safefile/safefile.go b/vendor/github.com/dchest/safefile/safefile.go new file mode 100644 index 000000000000..e7f21c1da697 --- /dev/null +++ b/vendor/github.com/dchest/safefile/safefile.go @@ -0,0 +1,197 @@ +// Copyright 2013 Dmitry Chestnykh. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package safefile implements safe "atomic" saving of files. +// +// Instead of truncating and overwriting the destination file, it creates a +// temporary file in the same directory, writes to it, and then renames the +// temporary file to the original name when calling Commit. +// +// Example: +// +// f, err := safefile.Create("/home/ken/report.txt", 0644) +// if err != nil { +// // ... +// } +// // Created temporary file /home/ken/sf-ppcyksu5hyw2mfec.tmp +// +// defer f.Close() +// +// _, err = io.WriteString(f, "Hello world") +// if err != nil { +// // ... +// } +// // Wrote "Hello world" to /home/ken/sf-ppcyksu5hyw2mfec.tmp +// +// err = f.Commit() +// if err != nil { +// // ... +// } +// // Renamed /home/ken/sf-ppcyksu5hyw2mfec.tmp to /home/ken/report.txt +// +package safefile + +import ( + "crypto/rand" + "encoding/base32" + "errors" + "io" + "os" + "path/filepath" + "strings" +) + +// ErrAlreadyCommitted error is returned when calling Commit on a file that +// has been already successfully committed. +var ErrAlreadyCommitted = errors.New("file already committed") + +type File struct { + *os.File + origName string + closeFunc func(*File) error + isClosed bool // if true, temporary file has been closed, but not renamed + isCommitted bool // if true, the file has been successfully committed +} + +func makeTempName(origname, prefix string) (tempname string, err error) { + origname = filepath.Clean(origname) + if len(origname) == 0 || origname[len(origname)-1] == filepath.Separator { + return "", os.ErrInvalid + } + // Generate 10 random bytes. + // This gives 80 bits of entropy, good enough + // for making temporary file name unpredictable. + var rnd [10]byte + if _, err := rand.Read(rnd[:]); err != nil { + return "", err + } + name := prefix + "-" + strings.ToLower(base32.StdEncoding.EncodeToString(rnd[:])) + ".tmp" + return filepath.Join(filepath.Dir(origname), name), nil +} + +// Create creates a temporary file in the same directory as filename, +// which will be renamed to the given filename when calling Commit. +func Create(filename string, perm os.FileMode) (*File, error) { + for { + tempname, err := makeTempName(filename, "sf") + if err != nil { + return nil, err + } + f, err := os.OpenFile(tempname, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm) + if err != nil { + if os.IsExist(err) { + continue + } + return nil, err + } + return &File{ + File: f, + origName: filename, + closeFunc: closeUncommitted, + }, nil + } +} + +// OrigName returns the original filename given to Create. +func (f *File) OrigName() string { + return f.origName +} + +// Close closes temporary file and removes it. +// If the file has been committed, Close is no-op. +func (f *File) Close() error { + return f.closeFunc(f) +} + +func closeUncommitted(f *File) error { + err0 := f.File.Close() + err1 := os.Remove(f.Name()) + f.closeFunc = closeAgainError + if err0 != nil { + return err0 + } + return err1 +} + +func closeAfterFailedRename(f *File) error { + // Remove temporary file. + // + // The note from Commit function applies here too, as we may be + // removing a different file. However, since we rely on our temporary + // names being unpredictable, this should not be a concern. + f.closeFunc = closeAgainError + return os.Remove(f.Name()) +} + +func closeCommitted(f *File) error { + // noop + return nil +} + +func closeAgainError(f *File) error { + return os.ErrInvalid +} + +// Commit safely commits data into the original file by syncing temporary +// file to disk, closing it and renaming to the original file name. +// +// In case of success, the temporary file is closed and no longer exists +// on disk. It is safe to call Close after Commit: the operation will do +// nothing. +// +// In case of error, the temporary file is still opened and exists on disk; +// it must be closed by callers by calling Close or by trying to commit again. + +// Note that when trying to Commit again after a failed Commit when the file +// has been closed, but not renamed to its original name (the new commit will +// try again to rename it), safefile cannot guarantee that the temporary file +// has not been changed, or that it is the same temporary file we were dealing +// with. However, since the temporary name is unpredictable, it is unlikely +// that this happened accidentally. If complete atomicity is needed, do not +// Commit again after error, write the file again. +func (f *File) Commit() error { + if f.isCommitted { + return ErrAlreadyCommitted + } + if !f.isClosed { + // Sync to disk. + err := f.Sync() + if err != nil { + return err + } + // Close underlying os.File. + err = f.File.Close() + if err != nil { + return err + } + f.isClosed = true + } + // Rename. + err := rename(f.Name(), f.origName) + if err != nil { + f.closeFunc = closeAfterFailedRename + return err + } + f.closeFunc = closeCommitted + f.isCommitted = true + return nil +} + +// WriteFile is a safe analog of ioutil.WriteFile. +func WriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := Create(filename, perm) + if err != nil { + return err + } + defer f.Close() + n, err := f.Write(data) + if err != nil { + return err + } + if err == nil && n < len(data) { + err = io.ErrShortWrite + return err + } + return f.Commit() +} diff --git a/vendor/github.com/docker/engine-api/LICENSE b/vendor/github.com/docker/engine-api/LICENSE deleted file mode 100644 index c157bff96a05..000000000000 --- a/vendor/github.com/docker/engine-api/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/engine-api/client/client.go b/vendor/github.com/docker/engine-api/client/client.go deleted file mode 100644 index 8c8c6fd18280..000000000000 --- a/vendor/github.com/docker/engine-api/client/client.go +++ /dev/null @@ -1,141 +0,0 @@ -package client - -import ( - "fmt" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - - "github.com/docker/engine-api/client/transport" - "github.com/docker/go-connections/tlsconfig" -) - -// Client is the API client that performs all operations -// against a docker server. -type Client struct { - // proto holds the client protocol i.e. unix. - proto string - // addr holds the client address. - addr string - // basePath holds the path to prepend to the requests. - basePath string - // transport is the interface to send request with, it implements transport.Client. - transport transport.Client - // version of the server to talk to. - version string - // custom http headers configured by users. - customHTTPHeaders map[string]string -} - -// NewEnvClient initializes a new API client based on environment variables. -// Use DOCKER_HOST to set the url to the docker server. -// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. -// Use DOCKER_CERT_PATH to load the tls certificates from. -// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. -func NewEnvClient() (*Client, error) { - var client *http.Client - if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { - options := tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, "ca.pem"), - CertFile: filepath.Join(dockerCertPath, "cert.pem"), - KeyFile: filepath.Join(dockerCertPath, "key.pem"), - InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", - } - tlsc, err := tlsconfig.Client(options) - if err != nil { - return nil, err - } - - client = &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsc, - }, - } - } - - host := os.Getenv("DOCKER_HOST") - if host == "" { - host = DefaultDockerHost - } - return NewClient(host, os.Getenv("DOCKER_API_VERSION"), client, nil) -} - -// NewClient initializes a new API client for the given host and API version. -// It won't send any version information if the version number is empty. -// It uses the given http client as transport. -// It also initializes the custom http headers to add to each request. -func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { - proto, addr, basePath, err := ParseHost(host) - if err != nil { - return nil, err - } - - transport, err := transport.NewTransportWithHTTP(proto, addr, client) - if err != nil { - return nil, err - } - - return &Client{ - proto: proto, - addr: addr, - basePath: basePath, - transport: transport, - version: version, - customHTTPHeaders: httpHeaders, - }, nil -} - -// getAPIPath returns the versioned request path to call the api. -// It appends the query parameters to the path if they are not empty. -func (cli *Client) getAPIPath(p string, query url.Values) string { - var apiPath string - if cli.version != "" { - v := strings.TrimPrefix(cli.version, "v") - apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p) - } else { - apiPath = fmt.Sprintf("%s%s", cli.basePath, p) - } - - u := &url.URL{ - Path: apiPath, - } - if len(query) > 0 { - u.RawQuery = query.Encode() - } - return u.String() -} - -// ClientVersion returns the version string associated with this -// instance of the Client. Note that this value can be changed -// via the DOCKER_API_VERSION env var. -func (cli *Client) ClientVersion() string { - return cli.version -} - -// UpdateClientVersion updates the version string associated with this -// instance of the Client. -func (cli *Client) UpdateClientVersion(v string) { - cli.version = v -} - -// ParseHost verifies that the given host strings is valid. -func ParseHost(host string) (string, string, string, error) { - protoAddrParts := strings.SplitN(host, "://", 2) - if len(protoAddrParts) == 1 { - return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host) - } - - var basePath string - proto, addr := protoAddrParts[0], protoAddrParts[1] - if proto == "tcp" { - parsed, err := url.Parse("tcp://" + addr) - if err != nil { - return "", "", "", err - } - addr = parsed.Host - basePath = parsed.Path - } - return proto, addr, basePath, nil -} diff --git a/vendor/github.com/docker/engine-api/client/client_darwin.go b/vendor/github.com/docker/engine-api/client/client_darwin.go deleted file mode 100644 index 4b47a178c48f..000000000000 --- a/vendor/github.com/docker/engine-api/client/client_darwin.go +++ /dev/null @@ -1,4 +0,0 @@ -package client - -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset -const DefaultDockerHost = "tcp://127.0.0.1:2375" diff --git a/vendor/github.com/docker/engine-api/client/client_unix.go b/vendor/github.com/docker/engine-api/client/client_unix.go deleted file mode 100644 index 572c5f87a78f..000000000000 --- a/vendor/github.com/docker/engine-api/client/client_unix.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build linux freebsd solaris openbsd - -package client - -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset -const DefaultDockerHost = "unix:///var/run/docker.sock" diff --git a/vendor/github.com/docker/engine-api/client/client_windows.go b/vendor/github.com/docker/engine-api/client/client_windows.go deleted file mode 100644 index 07c0c7a77492..000000000000 --- a/vendor/github.com/docker/engine-api/client/client_windows.go +++ /dev/null @@ -1,4 +0,0 @@ -package client - -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset -const DefaultDockerHost = "npipe:////./pipe/docker_engine" diff --git a/vendor/github.com/docker/engine-api/client/container_attach.go b/vendor/github.com/docker/engine-api/client/container_attach.go deleted file mode 100644 index 1b616bf03851..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_attach.go +++ /dev/null @@ -1,34 +0,0 @@ -package client - -import ( - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerAttach attaches a connection to a container in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { - query := url.Values{} - if options.Stream { - query.Set("stream", "1") - } - if options.Stdin { - query.Set("stdin", "1") - } - if options.Stdout { - query.Set("stdout", "1") - } - if options.Stderr { - query.Set("stderr", "1") - } - if options.DetachKeys != "" { - query.Set("detachKeys", options.DetachKeys) - } - - headers := map[string][]string{"Content-Type": {"text/plain"}} - return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) -} diff --git a/vendor/github.com/docker/engine-api/client/container_commit.go b/vendor/github.com/docker/engine-api/client/container_commit.go deleted file mode 100644 index d5c474990664..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_commit.go +++ /dev/null @@ -1,53 +0,0 @@ -package client - -import ( - "encoding/json" - "errors" - "net/url" - - distreference "github.com/docker/distribution/reference" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/reference" - "golang.org/x/net/context" -) - -// ContainerCommit applies changes into a container and creates a new tagged image. -func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) { - var repository, tag string - if options.Reference != "" { - distributionRef, err := distreference.ParseNamed(options.Reference) - if err != nil { - return types.ContainerCommitResponse{}, err - } - - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { - return types.ContainerCommitResponse{}, errors.New("refusing to create a tag with a digest reference") - } - - tag = reference.GetTagFromNamedRef(distributionRef) - repository = distributionRef.Name() - } - - query := url.Values{} - query.Set("container", container) - query.Set("repo", repository) - query.Set("tag", tag) - query.Set("comment", options.Comment) - query.Set("author", options.Author) - for _, change := range options.Changes { - query.Add("changes", change) - } - if options.Pause != true { - query.Set("pause", "0") - } - - var response types.ContainerCommitResponse - resp, err := cli.post(ctx, "/commit", query, options.Config, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/engine-api/client/container_copy.go b/vendor/github.com/docker/engine-api/client/container_copy.go deleted file mode 100644 index d3dd0b116c0f..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_copy.go +++ /dev/null @@ -1,97 +0,0 @@ -package client - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "path/filepath" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" -) - -// ContainerStatPath returns Stat information about a path inside the container filesystem. -func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { - query := url.Values{} - query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - - urlStr := fmt.Sprintf("/containers/%s/archive", containerID) - response, err := cli.head(ctx, urlStr, query, nil) - if err != nil { - return types.ContainerPathStat{}, err - } - defer ensureReaderClosed(response) - return getContainerPathStatFromHeader(response.header) -} - -// CopyToContainer copies content into the container filesystem. -func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { - query := url.Values{} - query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. - if !options.AllowOverwriteDirWithFile { - query.Set("noOverwriteDirNonDir", "true") - } - - apiPath := fmt.Sprintf("/containers/%s/archive", container) - - response, err := cli.putRaw(ctx, apiPath, query, content, nil) - if err != nil { - return err - } - defer ensureReaderClosed(response) - - if response.statusCode != http.StatusOK { - return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) - } - - return nil -} - -// CopyFromContainer gets the content from the container and returns it as a Reader -// to manipulate it in the host. It's up to the caller to close the reader. -func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { - query := make(url.Values, 1) - query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. - - apiPath := fmt.Sprintf("/containers/%s/archive", container) - response, err := cli.get(ctx, apiPath, query, nil) - if err != nil { - return nil, types.ContainerPathStat{}, err - } - - if response.statusCode != http.StatusOK { - return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) - } - - // In order to get the copy behavior right, we need to know information - // about both the source and the destination. The response headers include - // stat info about the source that we can use in deciding exactly how to - // copy it locally. Along with the stat info about the local destination, - // we have everything we need to handle the multiple possibilities there - // can be when copying a file/dir from one location to another file/dir. - stat, err := getContainerPathStatFromHeader(response.header) - if err != nil { - return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) - } - return response.body, stat, err -} - -func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { - var stat types.ContainerPathStat - - encodedStat := header.Get("X-Docker-Container-Path-Stat") - statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) - - err := json.NewDecoder(statDecoder).Decode(&stat) - if err != nil { - err = fmt.Errorf("unable to decode container path stat header: %s", err) - } - - return stat, err -} diff --git a/vendor/github.com/docker/engine-api/client/container_create.go b/vendor/github.com/docker/engine-api/client/container_create.go deleted file mode 100644 index 98935794dad9..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_create.go +++ /dev/null @@ -1,46 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - "strings" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/network" - "golang.org/x/net/context" -) - -type configWrapper struct { - *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig -} - -// ContainerCreate creates a new container based in the given configuration. -// It can be associated with a name, but it's not mandatory. -func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) { - var response types.ContainerCreateResponse - query := url.Values{} - if containerName != "" { - query.Set("name", containerName) - } - - body := configWrapper{ - Config: config, - HostConfig: hostConfig, - NetworkingConfig: networkingConfig, - } - - serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) - if err != nil { - if serverResp != nil && serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { - return response, imageNotFoundError{config.Image} - } - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/github.com/docker/engine-api/client/container_diff.go b/vendor/github.com/docker/engine-api/client/container_diff.go deleted file mode 100644 index f4bb3a46b99f..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_diff.go +++ /dev/null @@ -1,23 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerDiff shows differences in a container filesystem since it was started. -func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]types.ContainerChange, error) { - var changes []types.ContainerChange - - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) - if err != nil { - return changes, err - } - - err = json.NewDecoder(serverResp.body).Decode(&changes) - ensureReaderClosed(serverResp) - return changes, err -} diff --git a/vendor/github.com/docker/engine-api/client/container_exec.go b/vendor/github.com/docker/engine-api/client/container_exec.go deleted file mode 100644 index ff7e1a9d0596..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_exec.go +++ /dev/null @@ -1,49 +0,0 @@ -package client - -import ( - "encoding/json" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerExecCreate creates a new exec configuration to run an exec process. -func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error) { - var response types.ContainerExecCreateResponse - resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) - if err != nil { - return response, err - } - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} - -// ContainerExecStart starts an exec process already created in the docker host. -func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { - resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) - ensureReaderClosed(resp) - return err -} - -// ContainerExecAttach attaches a connection to an exec process in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) { - headers := map[string][]string{"Content-Type": {"application/json"}} - return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) -} - -// ContainerExecInspect returns information about a specific exec process on the docker host. -func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { - var response types.ContainerExecInspect - resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/engine-api/client/container_export.go b/vendor/github.com/docker/engine-api/client/container_export.go deleted file mode 100644 index 52194f3d3422..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_export.go +++ /dev/null @@ -1,20 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "golang.org/x/net/context" -) - -// ContainerExport retrieves the raw contents of a container -// and returns them as an io.ReadCloser. It's up to the caller -// to close the stream. -func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) - if err != nil { - return nil, err - } - - return serverResp.body, nil -} diff --git a/vendor/github.com/docker/engine-api/client/container_inspect.go b/vendor/github.com/docker/engine-api/client/container_inspect.go deleted file mode 100644 index afd71eefcb01..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_inspect.go +++ /dev/null @@ -1,65 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerInspect returns the container information. -func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ContainerJSON{}, containerNotFoundError{containerID} - } - return types.ContainerJSON{}, err - } - - var response types.ContainerJSON - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} - -// ContainerInspectWithRaw returns the container information and it's raw representation. -func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { - query := url.Values{} - if getSize { - query.Set("size", "1") - } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ContainerJSON{}, nil, containerNotFoundError{containerID} - } - return types.ContainerJSON{}, nil, err - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return types.ContainerJSON{}, nil, err - } - - var response types.ContainerJSON - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} - -func (cli *Client) containerInspectWithResponse(ctx context.Context, containerID string, query url.Values) (types.ContainerJSON, *serverResponse, error) { - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) - if err != nil { - return types.ContainerJSON{}, serverResp, err - } - - var response types.ContainerJSON - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, serverResp, err -} diff --git a/vendor/github.com/docker/engine-api/client/container_kill.go b/vendor/github.com/docker/engine-api/client/container_kill.go deleted file mode 100644 index 29f80c73adea..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_kill.go +++ /dev/null @@ -1,17 +0,0 @@ -package client - -import ( - "net/url" - - "golang.org/x/net/context" -) - -// ContainerKill terminates the container process but does not remove the container from the docker host. -func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { - query := url.Values{} - query.Set("signal", signal) - - resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_list.go b/vendor/github.com/docker/engine-api/client/container_list.go deleted file mode 100644 index 87f7333dc7bf..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_list.go +++ /dev/null @@ -1,56 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - "strconv" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "golang.org/x/net/context" -) - -// ContainerList returns the list of containers in the docker host. -func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { - query := url.Values{} - - if options.All { - query.Set("all", "1") - } - - if options.Limit != -1 { - query.Set("limit", strconv.Itoa(options.Limit)) - } - - if options.Since != "" { - query.Set("since", options.Since) - } - - if options.Before != "" { - query.Set("before", options.Before) - } - - if options.Size { - query.Set("size", "1") - } - - if options.Filter.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filter) - - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/containers/json", query, nil) - if err != nil { - return nil, err - } - - var containers []types.Container - err = json.NewDecoder(resp.body).Decode(&containers) - ensureReaderClosed(resp) - return containers, err -} diff --git a/vendor/github.com/docker/engine-api/client/container_logs.go b/vendor/github.com/docker/engine-api/client/container_logs.go deleted file mode 100644 index 08b9b9187649..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_logs.go +++ /dev/null @@ -1,52 +0,0 @@ -package client - -import ( - "io" - "net/url" - "time" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" - timetypes "github.com/docker/engine-api/types/time" -) - -// ContainerLogs returns the logs generated by a container in an io.ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/engine-api/client/container_pause.go b/vendor/github.com/docker/engine-api/client/container_pause.go deleted file mode 100644 index 412067a7821f..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_pause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "golang.org/x/net/context" - -// ContainerPause pauses the main process of a given container without terminating it. -func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_remove.go b/vendor/github.com/docker/engine-api/client/container_remove.go deleted file mode 100644 index cef4b8122089..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_remove.go +++ /dev/null @@ -1,27 +0,0 @@ -package client - -import ( - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerRemove kills and removes a container from the docker host. -func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { - query := url.Values{} - if options.RemoveVolumes { - query.Set("v", "1") - } - if options.RemoveLinks { - query.Set("link", "1") - } - - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_rename.go b/vendor/github.com/docker/engine-api/client/container_rename.go deleted file mode 100644 index 0e718da7c6ef..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_rename.go +++ /dev/null @@ -1,16 +0,0 @@ -package client - -import ( - "net/url" - - "golang.org/x/net/context" -) - -// ContainerRename changes the name of a given container. -func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { - query := url.Values{} - query.Set("name", newContainerName) - resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_resize.go b/vendor/github.com/docker/engine-api/client/container_resize.go deleted file mode 100644 index b95d26b335a6..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_resize.go +++ /dev/null @@ -1,29 +0,0 @@ -package client - -import ( - "net/url" - "strconv" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerResize changes the size of the tty for a container. -func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) -} - -// ContainerExecResize changes the size of the tty for an exec process running inside a container. -func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) -} - -func (cli *Client) resize(ctx context.Context, basePath string, height, width int) error { - query := url.Values{} - query.Set("h", strconv.Itoa(height)) - query.Set("w", strconv.Itoa(width)) - - resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_restart.go b/vendor/github.com/docker/engine-api/client/container_restart.go deleted file mode 100644 index 1c74b18ca515..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_restart.go +++ /dev/null @@ -1,19 +0,0 @@ -package client - -import ( - "net/url" - "strconv" - - "golang.org/x/net/context" -) - -// ContainerRestart stops and starts a container again. -// It makes the daemon to wait for the container to be up again for -// a specific amount of time, given the timeout. -func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout int) error { - query := url.Values{} - query.Set("t", strconv.Itoa(timeout)) - resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_start.go b/vendor/github.com/docker/engine-api/client/container_start.go deleted file mode 100644 index 12a979422eff..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_start.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "golang.org/x/net/context" - -// ContainerStart sends a request to the docker daemon to start a container. -func (cli *Client) ContainerStart(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/start", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_stats.go b/vendor/github.com/docker/engine-api/client/container_stats.go deleted file mode 100644 index 2cc67c3af173..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_stats.go +++ /dev/null @@ -1,24 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "golang.org/x/net/context" -) - -// ContainerStats returns near realtime stats for a given container. -// It's up to the caller to close the io.ReadCloser returned. -func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) { - query := url.Values{} - query.Set("stream", "0") - if stream { - query.Set("stream", "1") - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) - if err != nil { - return nil, err - } - return resp.body, err -} diff --git a/vendor/github.com/docker/engine-api/client/container_stop.go b/vendor/github.com/docker/engine-api/client/container_stop.go deleted file mode 100644 index 34d786291d58..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_stop.go +++ /dev/null @@ -1,18 +0,0 @@ -package client - -import ( - "net/url" - "strconv" - - "golang.org/x/net/context" -) - -// ContainerStop stops a container without terminating the process. -// The process is blocked until the container stops or the timeout expires. -func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout int) error { - query := url.Values{} - query.Set("t", strconv.Itoa(timeout)) - resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_top.go b/vendor/github.com/docker/engine-api/client/container_top.go deleted file mode 100644 index 5ad926ae088e..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_top.go +++ /dev/null @@ -1,28 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - "strings" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerTop shows process information from within a container. -func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (types.ContainerProcessList, error) { - var response types.ContainerProcessList - query := url.Values{} - if len(arguments) > 0 { - query.Set("ps_args", strings.Join(arguments, " ")) - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/engine-api/client/container_unpause.go b/vendor/github.com/docker/engine-api/client/container_unpause.go deleted file mode 100644 index 5c76211256cb..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_unpause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "golang.org/x/net/context" - -// ContainerUnpause resumes the process execution within a container -func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_update.go b/vendor/github.com/docker/engine-api/client/container_update.go deleted file mode 100644 index a5a1826dc4ac..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_update.go +++ /dev/null @@ -1,13 +0,0 @@ -package client - -import ( - "github.com/docker/engine-api/types/container" - "golang.org/x/net/context" -) - -// ContainerUpdate updates resources of a container -func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/container_wait.go b/vendor/github.com/docker/engine-api/client/container_wait.go deleted file mode 100644 index c26ff3f37869..000000000000 --- a/vendor/github.com/docker/engine-api/client/container_wait.go +++ /dev/null @@ -1,26 +0,0 @@ -package client - -import ( - "encoding/json" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" -) - -// ContainerWait pauses execution until a container exits. -// It returns the API status code as response of its readiness. -func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int, error) { - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) - if err != nil { - return -1, err - } - defer ensureReaderClosed(resp) - - var res types.ContainerWaitResponse - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { - return -1, err - } - - return res.StatusCode, nil -} diff --git a/vendor/github.com/docker/engine-api/client/errors.go b/vendor/github.com/docker/engine-api/client/errors.go deleted file mode 100644 index 17828bb7168b..000000000000 --- a/vendor/github.com/docker/engine-api/client/errors.go +++ /dev/null @@ -1,94 +0,0 @@ -package client - -import ( - "errors" - "fmt" -) - -// ErrConnectionFailed is an error raised when the connection between the client and the server failed. -var ErrConnectionFailed = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?") - -// imageNotFoundError implements an error returned when an image is not in the docker host. -type imageNotFoundError struct { - imageID string -} - -// Error returns a string representation of an imageNotFoundError -func (i imageNotFoundError) Error() string { - return fmt.Sprintf("Error: No such image: %s", i.imageID) -} - -// IsErrImageNotFound returns true if the error is caused -// when an image is not found in the docker host. -func IsErrImageNotFound(err error) bool { - _, ok := err.(imageNotFoundError) - return ok -} - -// containerNotFoundError implements an error returned when a container is not in the docker host. -type containerNotFoundError struct { - containerID string -} - -// Error returns a string representation of a containerNotFoundError -func (e containerNotFoundError) Error() string { - return fmt.Sprintf("Error: No such container: %s", e.containerID) -} - -// IsErrContainerNotFound returns true if the error is caused -// when a container is not found in the docker host. -func IsErrContainerNotFound(err error) bool { - _, ok := err.(containerNotFoundError) - return ok -} - -// networkNotFoundError implements an error returned when a network is not in the docker host. -type networkNotFoundError struct { - networkID string -} - -// Error returns a string representation of a networkNotFoundError -func (e networkNotFoundError) Error() string { - return fmt.Sprintf("Error: No such network: %s", e.networkID) -} - -// IsErrNetworkNotFound returns true if the error is caused -// when a network is not found in the docker host. -func IsErrNetworkNotFound(err error) bool { - _, ok := err.(networkNotFoundError) - return ok -} - -// volumeNotFoundError implements an error returned when a volume is not in the docker host. -type volumeNotFoundError struct { - volumeID string -} - -// Error returns a string representation of a networkNotFoundError -func (e volumeNotFoundError) Error() string { - return fmt.Sprintf("Error: No such volume: %s", e.volumeID) -} - -// IsErrVolumeNotFound returns true if the error is caused -// when a volume is not found in the docker host. -func IsErrVolumeNotFound(err error) bool { - _, ok := err.(volumeNotFoundError) - return ok -} - -// unauthorizedError represents an authorization error in a remote registry. -type unauthorizedError struct { - cause error -} - -// Error returns a string representation of an unauthorizedError -func (u unauthorizedError) Error() string { - return u.cause.Error() -} - -// IsErrUnauthorized returns true if the error is caused -// when a remote registry authentication fails -func IsErrUnauthorized(err error) bool { - _, ok := err.(unauthorizedError) - return ok -} diff --git a/vendor/github.com/docker/engine-api/client/events.go b/vendor/github.com/docker/engine-api/client/events.go deleted file mode 100644 index e379ce0a2945..000000000000 --- a/vendor/github.com/docker/engine-api/client/events.go +++ /dev/null @@ -1,48 +0,0 @@ -package client - -import ( - "io" - "net/url" - "time" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - timetypes "github.com/docker/engine-api/types/time" -) - -// Events returns a stream of events in the daemon in a ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error) { - query := url.Values{} - ref := time.Now() - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, ref) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - if options.Until != "" { - ts, err := timetypes.GetTimestamp(options.Until, ref) - if err != nil { - return nil, err - } - query.Set("until", ts) - } - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) - if err != nil { - return nil, err - } - query.Set("filters", filterJSON) - } - - serverResponse, err := cli.get(ctx, "/events", query, nil) - if err != nil { - return nil, err - } - return serverResponse.body, nil -} diff --git a/vendor/github.com/docker/engine-api/client/hijack.go b/vendor/github.com/docker/engine-api/client/hijack.go deleted file mode 100644 index dbd91ef62996..000000000000 --- a/vendor/github.com/docker/engine-api/client/hijack.go +++ /dev/null @@ -1,174 +0,0 @@ -package client - -import ( - "crypto/tls" - "errors" - "fmt" - "net" - "net/http/httputil" - "net/url" - "strings" - "time" - - "github.com/docker/engine-api/types" - "github.com/docker/go-connections/sockets" - "golang.org/x/net/context" -) - -// tlsClientCon holds tls information and a dialed connection. -type tlsClientCon struct { - *tls.Conn - rawConn net.Conn -} - -func (c *tlsClientCon) CloseWrite() error { - // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it - // on its underlying connection. - if conn, ok := c.rawConn.(types.CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// postHijacked sends a POST request and hijacks the connection. -func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { - bodyEncoded, err := encodeData(body) - if err != nil { - return types.HijackedResponse{}, err - } - - req, err := cli.newRequest("POST", path, query, bodyEncoded, headers) - if err != nil { - return types.HijackedResponse{}, err - } - req.Host = cli.addr - - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", "tcp") - - conn, err := dial(cli.proto, cli.addr, cli.transport.TLSConfig()) - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") - } - return types.HijackedResponse{}, err - } - - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := conn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - clientconn := httputil.NewClientConn(conn, nil) - defer clientconn.Close() - - // Server hijacks the connection, error 'connection closed' expected - _, err = clientconn.Do(req) - - rwc, br := clientconn.Hijack() - - return types.HijackedResponse{Conn: rwc, Reader: br}, err -} - -func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { - return tlsDialWithDialer(new(net.Dialer), network, addr, config) -} - -// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in -// order to return our custom tlsClientCon struct which holds both the tls.Conn -// object _and_ its underlying raw connection. The rationale for this is that -// we need to be able to close the write end of the connection when attaching, -// which tls.Conn does not provide. -func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { - // We want the Timeout and Deadline values from dialer to cover the - // whole process: TCP connection and TLS handshake. This means that we - // also need to start our own timers now. - timeout := dialer.Timeout - - if !dialer.Deadline.IsZero() { - deadlineTimeout := dialer.Deadline.Sub(time.Now()) - if timeout == 0 || deadlineTimeout < timeout { - timeout = deadlineTimeout - } - } - - var errChannel chan error - - if timeout != 0 { - errChannel = make(chan error, 2) - time.AfterFunc(timeout, func() { - errChannel <- errors.New("") - }) - } - - proxyDialer, err := sockets.DialerFromEnvironment(dialer) - if err != nil { - return nil, err - } - - rawConn, err := proxyDialer.Dial(network, addr) - if err != nil { - return nil, err - } - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := rawConn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - hostname := addr[:colonPos] - - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - if config.ServerName == "" { - // Make a copy to avoid polluting argument or default. - c := *config - c.ServerName = hostname - config = &c - } - - conn := tls.Client(rawConn, config) - - if timeout == 0 { - err = conn.Handshake() - } else { - go func() { - errChannel <- conn.Handshake() - }() - - err = <-errChannel - } - - if err != nil { - rawConn.Close() - return nil, err - } - - // This is Docker difference with standard's crypto/tls package: returned a - // wrapper which holds both the TLS and raw connections. - return &tlsClientCon{conn, rawConn}, nil -} - -func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { - if tlsConfig != nil && proto != "unix" && proto != "npipe" { - // Notice this isn't Go standard's tls.Dial function - return tlsDial(proto, addr, tlsConfig) - } - if proto == "npipe" { - return sockets.DialPipe(addr, 32*time.Second) - } - return net.Dial(proto, addr) -} diff --git a/vendor/github.com/docker/engine-api/client/image_build.go b/vendor/github.com/docker/engine-api/client/image_build.go deleted file mode 100644 index 4165c4e9a843..000000000000 --- a/vendor/github.com/docker/engine-api/client/image_build.go +++ /dev/null @@ -1,135 +0,0 @@ -package client - -import ( - "encoding/base64" - "encoding/json" - "io" - "net/http" - "net/url" - "regexp" - "strconv" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" -) - -var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) - -// ImageBuild sends request to the daemon to build images. -// The Body in the response implement an io.ReadCloser and it's up to the caller to -// close it. -func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { - query, err := imageBuildOptionsToQuery(options) - if err != nil { - return types.ImageBuildResponse{}, err - } - - headers := http.Header(make(map[string][]string)) - buf, err := json.Marshal(options.AuthConfigs) - if err != nil { - return types.ImageBuildResponse{}, err - } - headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) - headers.Set("Content-Type", "application/tar") - - serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) - if err != nil { - return types.ImageBuildResponse{}, err - } - - osType := getDockerOS(serverResp.header.Get("Server")) - - return types.ImageBuildResponse{ - Body: serverResp.body, - OSType: osType, - }, nil -} - -func imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { - query := url.Values{ - "t": options.Tags, - } - if options.SuppressOutput { - query.Set("q", "1") - } - if options.RemoteContext != "" { - query.Set("remote", options.RemoteContext) - } - if options.NoCache { - query.Set("nocache", "1") - } - if options.Remove { - query.Set("rm", "1") - } else { - query.Set("rm", "0") - } - - if options.ForceRemove { - query.Set("forcerm", "1") - } - - if options.PullParent { - query.Set("pull", "1") - } - - if !container.Isolation.IsDefault(options.Isolation) { - query.Set("isolation", string(options.Isolation)) - } - - query.Set("cpusetcpus", options.CPUSetCPUs) - query.Set("cpusetmems", options.CPUSetMems) - query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) - query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) - query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) - query.Set("memory", strconv.FormatInt(options.Memory, 10)) - query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) - query.Set("cgroupparent", options.CgroupParent) - query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) - query.Set("dockerfile", options.Dockerfile) - - ulimitsJSON, err := json.Marshal(options.Ulimits) - if err != nil { - return query, err - } - query.Set("ulimits", string(ulimitsJSON)) - - buildArgsJSON, err := json.Marshal(options.BuildArgs) - if err != nil { - return query, err - } - query.Set("buildargs", string(buildArgsJSON)) - - labelsJSON, err := json.Marshal(options.Labels) - if err != nil { - return query, err - } - query.Set("labels", string(labelsJSON)) - return query, nil -} - -func getDockerOS(serverHeader string) string { - var osType string - matches := headerRegexp.FindStringSubmatch(serverHeader) - if len(matches) > 0 { - osType = matches[1] - } - return osType -} - -// convertKVStringsToMap converts ["key=value"] to {"key":"value"} -func convertKVStringsToMap(values []string) map[string]string { - result := make(map[string]string, len(values)) - for _, value := range values { - kv := strings.SplitN(value, "=", 2) - if len(kv) == 1 { - result[kv[0]] = "" - } else { - result[kv[0]] = kv[1] - } - } - - return result -} diff --git a/vendor/github.com/docker/engine-api/client/image_create.go b/vendor/github.com/docker/engine-api/client/image_create.go deleted file mode 100644 index 6dfc0391c00e..000000000000 --- a/vendor/github.com/docker/engine-api/client/image_create.go +++ /dev/null @@ -1,34 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/reference" -) - -// ImageCreate creates a new image based in the parent options. -// It returns the JSON content in the response body. -func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { - repository, tag, err := reference.Parse(parentReference) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", repository) - query.Set("tag", tag) - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/images/create", query, nil, headers) -} diff --git a/vendor/github.com/docker/engine-api/client/image_history.go b/vendor/github.com/docker/engine-api/client/image_history.go deleted file mode 100644 index b2840b5ed849..000000000000 --- a/vendor/github.com/docker/engine-api/client/image_history.go +++ /dev/null @@ -1,22 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ImageHistory returns the changes in an image in history format. -func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]types.ImageHistory, error) { - var history []types.ImageHistory - serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) - if err != nil { - return history, err - } - - err = json.NewDecoder(serverResp.body).Decode(&history) - ensureReaderClosed(serverResp) - return history, err -} diff --git a/vendor/github.com/docker/engine-api/client/image_import.go b/vendor/github.com/docker/engine-api/client/image_import.go deleted file mode 100644 index 4e8749a01d5b..000000000000 --- a/vendor/github.com/docker/engine-api/client/image_import.go +++ /dev/null @@ -1,37 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "golang.org/x/net/context" - - "github.com/docker/distribution/reference" - "github.com/docker/engine-api/types" -) - -// ImageImport creates a new image based in the source options. -// It returns the JSON content in the response body. -func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { - if ref != "" { - //Check if the given image name can be resolved - if _, err := reference.ParseNamed(ref); err != nil { - return nil, err - } - } - - query := url.Values{} - query.Set("fromSrc", source.SourceName) - query.Set("repo", ref) - query.Set("tag", options.Tag) - query.Set("message", options.Message) - for _, change := range options.Changes { - query.Add("changes", change) - } - - resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/engine-api/client/image_inspect.go b/vendor/github.com/docker/engine-api/client/image_inspect.go deleted file mode 100644 index 859ba640869f..000000000000 --- a/vendor/github.com/docker/engine-api/client/image_inspect.go +++ /dev/null @@ -1,38 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ImageInspectWithRaw returns the image information and its raw representation. -func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string, getSize bool) (types.ImageInspect, []byte, error) { - query := url.Values{} - if getSize { - query.Set("size", "1") - } - serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ImageInspect{}, nil, imageNotFoundError{imageID} - } - return types.ImageInspect{}, nil, err - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return types.ImageInspect{}, nil, err - } - - var response types.ImageInspect - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/engine-api/client/image_list.go b/vendor/github.com/docker/engine-api/client/image_list.go deleted file mode 100644 index 347810e663d2..000000000000 --- a/vendor/github.com/docker/engine-api/client/image_list.go +++ /dev/null @@ -1,40 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "golang.org/x/net/context" -) - -// ImageList returns a list of images in the docker host. -func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) { - var images []types.Image - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) - if err != nil { - return images, err - } - query.Set("filters", filterJSON) - } - if options.MatchName != "" { - // FIXME rename this parameter, to not be confused with the filters flag - query.Set("filter", options.MatchName) - } - if options.All { - query.Set("all", "1") - } - - serverResp, err := cli.get(ctx, "/images/json", query, nil) - if err != nil { - return images, err - } - - err = json.NewDecoder(serverResp.body).Decode(&images) - ensureReaderClosed(serverResp) - return images, err -} diff --git a/vendor/github.com/docker/engine-api/client/image_load.go b/vendor/github.com/docker/engine-api/client/image_load.go deleted file mode 100644 index 72f55fdc0152..000000000000 --- a/vendor/github.com/docker/engine-api/client/image_load.go +++ /dev/null @@ -1,30 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" -) - -// ImageLoad loads an image in the docker host from the client host. -// It's up to the caller to close the io.ReadCloser in the -// ImageLoadResponse returned by this function. -func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { - v := url.Values{} - v.Set("quiet", "0") - if quiet { - v.Set("quiet", "1") - } - headers := map[string][]string{"Content-Type": {"application/x-tar"}} - resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) - if err != nil { - return types.ImageLoadResponse{}, err - } - return types.ImageLoadResponse{ - Body: resp.body, - JSON: resp.header.Get("Content-Type") == "application/json", - }, nil -} diff --git a/vendor/github.com/docker/engine-api/client/image_pull.go b/vendor/github.com/docker/engine-api/client/image_pull.go deleted file mode 100644 index e2c49ec52b14..000000000000 --- a/vendor/github.com/docker/engine-api/client/image_pull.go +++ /dev/null @@ -1,46 +0,0 @@ -package client - -import ( - "io" - "net/http" - "net/url" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/reference" -) - -// ImagePull requests the docker host to pull an image from a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -// -// FIXME(vdemeester): there is currently used in a few way in docker/docker -// - if not in trusted content, ref is used to pass the whole reference, and tag is empty -// - if in trusted content, ref is used to pass the reference name, and tag for the digest -func (cli *Client) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) { - repository, tag, err := reference.Parse(ref) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", repository) - if tag != "" && !options.All { - query.Set("tag", tag) - } - - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/engine-api/client/image_push.go b/vendor/github.com/docker/engine-api/client/image_push.go deleted file mode 100644 index 9c837a76d12c..000000000000 --- a/vendor/github.com/docker/engine-api/client/image_push.go +++ /dev/null @@ -1,54 +0,0 @@ -package client - -import ( - "errors" - "io" - "net/http" - "net/url" - - "golang.org/x/net/context" - - distreference "github.com/docker/distribution/reference" - "github.com/docker/engine-api/types" -) - -// ImagePush requests the docker host to push an image to a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -func (cli *Client) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) { - distributionRef, err := distreference.ParseNamed(ref) - if err != nil { - return nil, err - } - - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { - return nil, errors.New("cannot push a digest reference") - } - - var tag = "" - if nameTaggedRef, isNamedTagged := distributionRef.(distreference.NamedTagged); isNamedTagged { - tag = nameTaggedRef.Tag() - } - - query := url.Values{} - query.Set("tag", tag) - - resp, err := cli.tryImagePush(ctx, distributionRef.Name(), query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImagePush(ctx, distributionRef.Name(), query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (*serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) -} diff --git a/vendor/github.com/docker/engine-api/client/image_remove.go b/vendor/github.com/docker/engine-api/client/image_remove.go deleted file mode 100644 index 47224326e0c0..000000000000 --- a/vendor/github.com/docker/engine-api/client/image_remove.go +++ /dev/null @@ -1,31 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ImageRemove removes an image from the docker host. -func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) { - query := url.Values{} - - if options.Force { - query.Set("force", "1") - } - if !options.PruneChildren { - query.Set("noprune", "1") - } - - resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) - if err != nil { - return nil, err - } - - var dels []types.ImageDelete - err = json.NewDecoder(resp.body).Decode(&dels) - ensureReaderClosed(resp) - return dels, err -} diff --git a/vendor/github.com/docker/engine-api/client/image_save.go b/vendor/github.com/docker/engine-api/client/image_save.go deleted file mode 100644 index ecac880a32cc..000000000000 --- a/vendor/github.com/docker/engine-api/client/image_save.go +++ /dev/null @@ -1,22 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "golang.org/x/net/context" -) - -// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. -// It's up to the caller to store the images and close the stream. -func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { - query := url.Values{ - "names": imageIDs, - } - - resp, err := cli.get(ctx, "/images/get", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/engine-api/client/image_search.go b/vendor/github.com/docker/engine-api/client/image_search.go deleted file mode 100644 index 571ba3df3659..000000000000 --- a/vendor/github.com/docker/engine-api/client/image_search.go +++ /dev/null @@ -1,49 +0,0 @@ -package client - -import ( - "encoding/json" - "net/http" - "net/url" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/registry" - "golang.org/x/net/context" -) - -// ImageSearch makes the docker host to search by a term in a remote registry. -// The list of results is not sorted in any fashion. -func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { - var results []registry.SearchResult - query := url.Values{} - query.Set("term", term) - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) - if err != nil { - return results, err - } - query.Set("filters", filterJSON) - } - - resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return results, privilegeErr - } - resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) - } - if err != nil { - return results, err - } - - err = json.NewDecoder(resp.body).Decode(&results) - ensureReaderClosed(resp) - return results, err -} - -func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.get(ctx, "/images/search", query, headers) -} diff --git a/vendor/github.com/docker/engine-api/client/image_tag.go b/vendor/github.com/docker/engine-api/client/image_tag.go deleted file mode 100644 index 490de4e5fea0..000000000000 --- a/vendor/github.com/docker/engine-api/client/image_tag.go +++ /dev/null @@ -1,38 +0,0 @@ -package client - -import ( - "errors" - "fmt" - "net/url" - - "golang.org/x/net/context" - - distreference "github.com/docker/distribution/reference" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/reference" -) - -// ImageTag tags an image in the docker host -func (cli *Client) ImageTag(ctx context.Context, imageID, ref string, options types.ImageTagOptions) error { - distributionRef, err := distreference.ParseNamed(ref) - if err != nil { - return fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", ref) - } - - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { - return errors.New("refusing to create a tag with a digest reference") - } - - tag := reference.GetTagFromNamedRef(distributionRef) - - query := url.Values{} - query.Set("repo", distributionRef.Name()) - query.Set("tag", tag) - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.post(ctx, "/images/"+imageID+"/tag", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/info.go b/vendor/github.com/docker/engine-api/client/info.go deleted file mode 100644 index ff0958d65ce8..000000000000 --- a/vendor/github.com/docker/engine-api/client/info.go +++ /dev/null @@ -1,26 +0,0 @@ -package client - -import ( - "encoding/json" - "fmt" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// Info returns information about the docker server. -func (cli *Client) Info(ctx context.Context) (types.Info, error) { - var info types.Info - serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) - if err != nil { - return info, err - } - defer ensureReaderClosed(serverResp) - - if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { - return info, fmt.Errorf("Error reading remote info: %v", err) - } - - return info, nil -} diff --git a/vendor/github.com/docker/engine-api/client/interface.go b/vendor/github.com/docker/engine-api/client/interface.go deleted file mode 100644 index 2c6872f534be..000000000000 --- a/vendor/github.com/docker/engine-api/client/interface.go +++ /dev/null @@ -1,79 +0,0 @@ -package client - -import ( - "io" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/network" - "github.com/docker/engine-api/types/registry" -) - -// APIClient is an interface that clients that talk with a docker server must implement. -type APIClient interface { - ClientVersion() string - ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) - ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) - ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) - ContainerDiff(ctx context.Context, container string) ([]types.ContainerChange, error) - ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) - ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error) - ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) - ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error - ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error - ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) - ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) - ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) - ContainerKill(ctx context.Context, container, signal string) error - ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) - ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) - ContainerPause(ctx context.Context, container string) error - ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error - ContainerRename(ctx context.Context, container, newContainerName string) error - ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error - ContainerRestart(ctx context.Context, container string, timeout int) error - ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) - ContainerStats(ctx context.Context, container string, stream bool) (io.ReadCloser, error) - ContainerStart(ctx context.Context, container string) error - ContainerStop(ctx context.Context, container string, timeout int) error - ContainerTop(ctx context.Context, container string, arguments []string) (types.ContainerProcessList, error) - ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) error - ContainerWait(ctx context.Context, container string) (int, error) - CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) - CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error - Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error) - ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) - ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) - ImageHistory(ctx context.Context, image string) ([]types.ImageHistory, error) - ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) - ImageInspectWithRaw(ctx context.Context, image string, getSize bool) (types.ImageInspect, []byte, error) - ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) - ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) - ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) - ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) - ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) - ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) - ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) - ImageTag(ctx context.Context, image, ref string, options types.ImageTagOptions) error - Info(ctx context.Context) (types.Info, error) - NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error - NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) - NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error - NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) - NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) - NetworkRemove(ctx context.Context, networkID string) error - RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) - ServerVersion(ctx context.Context) (types.Version, error) - UpdateClientVersion(v string) - VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) - VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) - VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) - VolumeRemove(ctx context.Context, volumeID string) error -} - -// Ensure that Client always implements APIClient. -var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/engine-api/client/login.go b/vendor/github.com/docker/engine-api/client/login.go deleted file mode 100644 index 482f94789f08..000000000000 --- a/vendor/github.com/docker/engine-api/client/login.go +++ /dev/null @@ -1,28 +0,0 @@ -package client - -import ( - "encoding/json" - "net/http" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// RegistryLogin authenticates the docker server with a given docker registry. -// It returns UnauthorizerError when the authentication fails. -func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) { - resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) - - if resp != nil && resp.statusCode == http.StatusUnauthorized { - return types.AuthResponse{}, unauthorizedError{err} - } - if err != nil { - return types.AuthResponse{}, err - } - - var response types.AuthResponse - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/engine-api/client/network_connect.go b/vendor/github.com/docker/engine-api/client/network_connect.go deleted file mode 100644 index 9a402a3e6384..000000000000 --- a/vendor/github.com/docker/engine-api/client/network_connect.go +++ /dev/null @@ -1,18 +0,0 @@ -package client - -import ( - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/network" - "golang.org/x/net/context" -) - -// NetworkConnect connects a container to an existent network in the docker host. -func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { - nc := types.NetworkConnect{ - Container: containerID, - EndpointConfig: config, - } - resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/network_create.go b/vendor/github.com/docker/engine-api/client/network_create.go deleted file mode 100644 index c9c0b9fde772..000000000000 --- a/vendor/github.com/docker/engine-api/client/network_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client - -import ( - "encoding/json" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// NetworkCreate creates a new network in the docker host. -func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { - networkCreateRequest := types.NetworkCreateRequest{ - NetworkCreate: options, - Name: name, - } - var response types.NetworkCreateResponse - serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) - if err != nil { - return response, err - } - - json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/github.com/docker/engine-api/client/network_disconnect.go b/vendor/github.com/docker/engine-api/client/network_disconnect.go deleted file mode 100644 index a3e33672fef5..000000000000 --- a/vendor/github.com/docker/engine-api/client/network_disconnect.go +++ /dev/null @@ -1,14 +0,0 @@ -package client - -import ( - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// NetworkDisconnect disconnects a container from an existent network in the docker host. -func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { - nd := types.NetworkDisconnect{Container: containerID, Force: force} - resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/network_inspect.go b/vendor/github.com/docker/engine-api/client/network_inspect.go deleted file mode 100644 index 4f81e5ce40f7..000000000000 --- a/vendor/github.com/docker/engine-api/client/network_inspect.go +++ /dev/null @@ -1,24 +0,0 @@ -package client - -import ( - "encoding/json" - "net/http" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// NetworkInspect returns the information for a specific network configured in the docker host. -func (cli *Client) NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) { - var networkResource types.NetworkResource - resp, err := cli.get(ctx, "/networks/"+networkID, nil, nil) - if err != nil { - if resp.statusCode == http.StatusNotFound { - return networkResource, networkNotFoundError{networkID} - } - return networkResource, err - } - err = json.NewDecoder(resp.body).Decode(&networkResource) - ensureReaderClosed(resp) - return networkResource, err -} diff --git a/vendor/github.com/docker/engine-api/client/network_list.go b/vendor/github.com/docker/engine-api/client/network_list.go deleted file mode 100644 index 813109c1802c..000000000000 --- a/vendor/github.com/docker/engine-api/client/network_list.go +++ /dev/null @@ -1,31 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "golang.org/x/net/context" -) - -// NetworkList returns the list of networks configured in the docker host. -func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { - query := url.Values{} - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - var networkResources []types.NetworkResource - resp, err := cli.get(ctx, "/networks", query, nil) - if err != nil { - return networkResources, err - } - err = json.NewDecoder(resp.body).Decode(&networkResources) - ensureReaderClosed(resp) - return networkResources, err -} diff --git a/vendor/github.com/docker/engine-api/client/network_remove.go b/vendor/github.com/docker/engine-api/client/network_remove.go deleted file mode 100644 index 6bd674892420..000000000000 --- a/vendor/github.com/docker/engine-api/client/network_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "golang.org/x/net/context" - -// NetworkRemove removes an existent network from the docker host. -func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { - resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/client/request.go b/vendor/github.com/docker/engine-api/client/request.go deleted file mode 100644 index cdbb0975bd28..000000000000 --- a/vendor/github.com/docker/engine-api/client/request.go +++ /dev/null @@ -1,185 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/docker/engine-api/client/transport/cancellable" - "golang.org/x/net/context" -) - -// serverResponse is a wrapper for http API responses. -type serverResponse struct { - body io.ReadCloser - header http.Header - statusCode int -} - -// head sends an http request to the docker API using the method HEAD. -func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) -} - -// getWithContext sends an http request to the docker API using the method GET with a specific go context. -func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "GET", path, query, nil, headers) -} - -// postWithContext sends an http request to the docker API using the method POST with a specific go context. -func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "POST", path, query, obj, headers) -} - -func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { - return cli.sendClientRequest(ctx, "POST", path, query, body, headers) -} - -// put sends an http request to the docker API using the method PUT. -func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "PUT", path, query, obj, headers) -} - -// put sends an http request to the docker API using the method PUT. -func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { - return cli.sendClientRequest(ctx, "PUT", path, query, body, headers) -} - -// delete sends an http request to the docker API using the method DELETE. -func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) -} - -func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { - var body io.Reader - - if obj != nil { - var err error - body, err = encodeData(obj) - if err != nil { - return nil, err - } - if headers == nil { - headers = make(map[string][]string) - } - headers["Content-Type"] = []string{"application/json"} - } - - return cli.sendClientRequest(ctx, method, path, query, body, headers) -} - -func (cli *Client) sendClientRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { - serverResp := &serverResponse{ - body: nil, - statusCode: -1, - } - - expectedPayload := (method == "POST" || method == "PUT") - if expectedPayload && body == nil { - body = bytes.NewReader([]byte{}) - } - - req, err := cli.newRequest(method, path, query, body, headers) - if cli.proto == "unix" || cli.proto == "npipe" { - // For local communications, it doesn't matter what the host is. We just - // need a valid and meaningful host name. (See #189) - req.Host = "docker" - } - req.URL.Host = cli.addr - req.URL.Scheme = cli.transport.Scheme() - - if expectedPayload && req.Header.Get("Content-Type") == "" { - req.Header.Set("Content-Type", "text/plain") - } - - resp, err := cancellable.Do(ctx, cli.transport, req) - if resp != nil { - serverResp.statusCode = resp.StatusCode - } - - if err != nil { - if isTimeout(err) || strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { - return serverResp, ErrConnectionFailed - } - - if !cli.transport.Secure() && strings.Contains(err.Error(), "malformed HTTP response") { - return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) - } - if cli.transport.Secure() && strings.Contains(err.Error(), "remote error: bad certificate") { - return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) - } - - return serverResp, fmt.Errorf("An error occurred trying to connect: %v", err) - } - - if serverResp.statusCode < 200 || serverResp.statusCode >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return serverResp, err - } - if len(body) == 0 { - return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL) - } - return serverResp, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body)) - } - - serverResp.body = resp.Body - serverResp.header = resp.Header - return serverResp, nil -} - -func (cli *Client) newRequest(method, path string, query url.Values, body io.Reader, headers map[string][]string) (*http.Request, error) { - apiPath := cli.getAPIPath(path, query) - req, err := http.NewRequest(method, apiPath, body) - if err != nil { - return nil, err - } - - // Add CLI Config's HTTP Headers BEFORE we set the Docker headers - // then the user can't change OUR headers - for k, v := range cli.customHTTPHeaders { - req.Header.Set(k, v) - } - - if headers != nil { - for k, v := range headers { - req.Header[k] = v - } - } - - return req, nil -} - -func encodeData(data interface{}) (*bytes.Buffer, error) { - params := bytes.NewBuffer(nil) - if data != nil { - if err := json.NewEncoder(params).Encode(data); err != nil { - return nil, err - } - } - return params, nil -} - -func ensureReaderClosed(response *serverResponse) { - if response != nil && response.body != nil { - response.body.Close() - } -} - -func isTimeout(err error) bool { - type timeout interface { - Timeout() bool - } - e := err - switch urlErr := err.(type) { - case *url.Error: - e = urlErr.Err - } - t, ok := e.(timeout) - return ok && t.Timeout() -} diff --git a/vendor/github.com/docker/engine-api/client/transport/cancellable/LICENSE b/vendor/github.com/docker/engine-api/client/transport/cancellable/LICENSE deleted file mode 100644 index 6a66aea5eafe..000000000000 --- a/vendor/github.com/docker/engine-api/client/transport/cancellable/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler.go b/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler.go deleted file mode 100644 index 11dff60026c7..000000000000 --- a/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.5 - -package cancellable - -import ( - "net/http" - - "github.com/docker/engine-api/client/transport" -) - -func canceler(client transport.Sender, req *http.Request) func() { - // TODO(djd): Respect any existing value of req.Cancel. - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} diff --git a/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go b/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go deleted file mode 100644 index 8ff2845c28e8..000000000000 --- a/vendor/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.5 - -package cancellable - -import ( - "net/http" - - "github.com/docker/engine-api/client/transport" -) - -type requestCanceler interface { - CancelRequest(*http.Request) -} - -func canceler(client transport.Sender, req *http.Request) func() { - rc, ok := client.(requestCanceler) - if !ok { - return func() {} - } - return func() { - rc.CancelRequest(req) - } -} diff --git a/vendor/github.com/docker/engine-api/client/transport/cancellable/cancellable.go b/vendor/github.com/docker/engine-api/client/transport/cancellable/cancellable.go deleted file mode 100644 index 526feb0f456f..000000000000 --- a/vendor/github.com/docker/engine-api/client/transport/cancellable/cancellable.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cancellable provides helper function to cancel http requests. -package cancellable - -import ( - "io" - "net/http" - - "github.com/docker/engine-api/client/transport" - - "golang.org/x/net/context" -) - -func nop() {} - -var ( - testHookContextDoneBeforeHeaders = nop - testHookDoReturned = nop - testHookDidBodyClose = nop -) - -// Do sends an HTTP request with the provided transport.Sender and returns an HTTP response. -// If the client is nil, http.DefaultClient is used. -// If the context is canceled or times out, ctx.Err() will be returned. -// -// FORK INFORMATION: -// -// This function deviates from the upstream version in golang.org/x/net/context/ctxhttp by -// taking a Sender interface rather than a *http.Client directly. That allow us to use -// this funcion with mocked clients and hijacked connections. -func Do(ctx context.Context, client transport.Sender, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - - // Request cancelation changed in Go 1.5, see canceler.go and canceler_go14.go. - cancel := canceler(client, req) - - type responseAndError struct { - resp *http.Response - err error - } - result := make(chan responseAndError, 1) - - go func() { - resp, err := client.Do(req) - testHookDoReturned() - result <- responseAndError{resp, err} - }() - - var resp *http.Response - - select { - case <-ctx.Done(): - testHookContextDoneBeforeHeaders() - cancel() - // Clean up after the goroutine calling client.Do: - go func() { - if r := <-result; r.resp != nil && r.resp.Body != nil { - testHookDidBodyClose() - r.resp.Body.Close() - } - }() - return nil, ctx.Err() - case r := <-result: - var err error - resp, err = r.resp, r.err - if err != nil { - return resp, err - } - } - - c := make(chan struct{}) - go func() { - select { - case <-ctx.Done(): - cancel() - case <-c: - // The response's Body is closed. - } - }() - resp.Body = ¬ifyingReader{resp.Body, c} - - return resp, nil -} - -// notifyingReader is an io.ReadCloser that closes the notify channel after -// Close is called or a Read fails on the underlying ReadCloser. -type notifyingReader struct { - io.ReadCloser - notify chan<- struct{} -} - -func (r *notifyingReader) Read(p []byte) (int, error) { - n, err := r.ReadCloser.Read(p) - if err != nil && r.notify != nil { - close(r.notify) - r.notify = nil - } - return n, err -} - -func (r *notifyingReader) Close() error { - err := r.ReadCloser.Close() - if r.notify != nil { - close(r.notify) - r.notify = nil - } - return err -} diff --git a/vendor/github.com/docker/engine-api/client/transport/client.go b/vendor/github.com/docker/engine-api/client/transport/client.go deleted file mode 100644 index 13d4b3ab3de8..000000000000 --- a/vendor/github.com/docker/engine-api/client/transport/client.go +++ /dev/null @@ -1,47 +0,0 @@ -package transport - -import ( - "crypto/tls" - "net/http" -) - -// Sender is an interface that clients must implement -// to be able to send requests to a remote connection. -type Sender interface { - // Do sends request to a remote endpoint. - Do(*http.Request) (*http.Response, error) -} - -// Client is an interface that abstracts all remote connections. -type Client interface { - Sender - // Secure tells whether the connection is secure or not. - Secure() bool - // Scheme returns the connection protocol the client uses. - Scheme() string - // TLSConfig returns any TLS configuration the client uses. - TLSConfig() *tls.Config -} - -// tlsInfo returns information about the TLS configuration. -type tlsInfo struct { - tlsConfig *tls.Config -} - -// TLSConfig returns the TLS configuration. -func (t *tlsInfo) TLSConfig() *tls.Config { - return t.tlsConfig -} - -// Scheme returns protocol scheme to use. -func (t *tlsInfo) Scheme() string { - if t.tlsConfig != nil { - return "https" - } - return "http" -} - -// Secure returns true if there is a TLS configuration. -func (t *tlsInfo) Secure() bool { - return t.tlsConfig != nil -} diff --git a/vendor/github.com/docker/engine-api/client/transport/transport.go b/vendor/github.com/docker/engine-api/client/transport/transport.go deleted file mode 100644 index ff28af1855f6..000000000000 --- a/vendor/github.com/docker/engine-api/client/transport/transport.go +++ /dev/null @@ -1,57 +0,0 @@ -// Package transport provides function to send request to remote endpoints. -package transport - -import ( - "fmt" - "net/http" - - "github.com/docker/go-connections/sockets" -) - -// apiTransport holds information about the http transport to connect with the API. -type apiTransport struct { - *http.Client - *tlsInfo - transport *http.Transport -} - -// NewTransportWithHTTP creates a new transport based on the provided proto, address and http client. -// It uses Docker's default http transport configuration if the client is nil. -// It does not modify the client's transport if it's not nil. -func NewTransportWithHTTP(proto, addr string, client *http.Client) (Client, error) { - var transport *http.Transport - - if client != nil { - tr, ok := client.Transport.(*http.Transport) - if !ok { - return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport) - } - transport = tr - } else { - transport = defaultTransport(proto, addr) - client = &http.Client{ - Transport: transport, - } - } - - return &apiTransport{ - Client: client, - tlsInfo: &tlsInfo{transport.TLSClientConfig}, - transport: transport, - }, nil -} - -// CancelRequest stops a request execution. -func (a *apiTransport) CancelRequest(req *http.Request) { - a.transport.CancelRequest(req) -} - -// defaultTransport creates a new http.Transport with Docker's -// default transport configuration. -func defaultTransport(proto, addr string) *http.Transport { - tr := new(http.Transport) - sockets.ConfigureTransport(tr, proto, addr) - return tr -} - -var _ Client = &apiTransport{} diff --git a/vendor/github.com/docker/engine-api/client/version.go b/vendor/github.com/docker/engine-api/client/version.go deleted file mode 100644 index e037551a21b0..000000000000 --- a/vendor/github.com/docker/engine-api/client/version.go +++ /dev/null @@ -1,21 +0,0 @@ -package client - -import ( - "encoding/json" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ServerVersion returns information of the docker client and server host. -func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { - resp, err := cli.get(ctx, "/version", nil, nil) - if err != nil { - return types.Version{}, err - } - - var server types.Version - err = json.NewDecoder(resp.body).Decode(&server) - ensureReaderClosed(resp) - return server, err -} diff --git a/vendor/github.com/docker/engine-api/client/volume_create.go b/vendor/github.com/docker/engine-api/client/volume_create.go deleted file mode 100644 index cc1e1c177231..000000000000 --- a/vendor/github.com/docker/engine-api/client/volume_create.go +++ /dev/null @@ -1,20 +0,0 @@ -package client - -import ( - "encoding/json" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) { - var volume types.Volume - resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) - if err != nil { - return volume, err - } - err = json.NewDecoder(resp.body).Decode(&volume) - ensureReaderClosed(resp) - return volume, err -} diff --git a/vendor/github.com/docker/engine-api/client/volume_inspect.go b/vendor/github.com/docker/engine-api/client/volume_inspect.go deleted file mode 100644 index 4bf4a7b084ad..000000000000 --- a/vendor/github.com/docker/engine-api/client/volume_inspect.go +++ /dev/null @@ -1,24 +0,0 @@ -package client - -import ( - "encoding/json" - "net/http" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// VolumeInspect returns the information about a specific volume in the docker host. -func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { - var volume types.Volume - resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) - if err != nil { - if resp.statusCode == http.StatusNotFound { - return volume, volumeNotFoundError{volumeID} - } - return volume, err - } - err = json.NewDecoder(resp.body).Decode(&volume) - ensureReaderClosed(resp) - return volume, err -} diff --git a/vendor/github.com/docker/engine-api/client/volume_list.go b/vendor/github.com/docker/engine-api/client/volume_list.go deleted file mode 100644 index bb4c40d5f981..000000000000 --- a/vendor/github.com/docker/engine-api/client/volume_list.go +++ /dev/null @@ -1,32 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "golang.org/x/net/context" -) - -// VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) { - var volumes types.VolumesListResponse - query := url.Values{} - - if filter.Len() > 0 { - filterJSON, err := filters.ToParam(filter) - if err != nil { - return volumes, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/volumes", query, nil) - if err != nil { - return volumes, err - } - - err = json.NewDecoder(resp.body).Decode(&volumes) - ensureReaderClosed(resp) - return volumes, err -} diff --git a/vendor/github.com/docker/engine-api/client/volume_remove.go b/vendor/github.com/docker/engine-api/client/volume_remove.go deleted file mode 100644 index 0dce24c79b84..000000000000 --- a/vendor/github.com/docker/engine-api/client/volume_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "golang.org/x/net/context" - -// VolumeRemove removes a volume from the docker host. -func (cli *Client) VolumeRemove(ctx context.Context, volumeID string) error { - resp, err := cli.delete(ctx, "/volumes/"+volumeID, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/engine-api/types/auth.go b/vendor/github.com/docker/engine-api/types/auth.go deleted file mode 100644 index 056af6b84259..000000000000 --- a/vendor/github.com/docker/engine-api/types/auth.go +++ /dev/null @@ -1,22 +0,0 @@ -package types - -// AuthConfig contains authorization information for connecting to a Registry -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} diff --git a/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go b/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go deleted file mode 100644 index 931ae10ab1ef..000000000000 --- a/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go +++ /dev/null @@ -1,23 +0,0 @@ -package blkiodev - -import "fmt" - -// WeightDevice is a structure that holds device:weight pair -type WeightDevice struct { - Path string - Weight uint16 -} - -func (w *WeightDevice) String() string { - return fmt.Sprintf("%s:%d", w.Path, w.Weight) -} - -// ThrottleDevice is a structure that holds device:rate_per_second pair -type ThrottleDevice struct { - Path string - Rate uint64 -} - -func (t *ThrottleDevice) String() string { - return fmt.Sprintf("%s:%d", t.Path, t.Rate) -} diff --git a/vendor/github.com/docker/engine-api/types/client.go b/vendor/github.com/docker/engine-api/types/client.go deleted file mode 100644 index fa3b2cfb458c..000000000000 --- a/vendor/github.com/docker/engine-api/types/client.go +++ /dev/null @@ -1,235 +0,0 @@ -package types - -import ( - "bufio" - "io" - "net" - - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/filters" - "github.com/docker/go-units" -) - -// ContainerAttachOptions holds parameters to attach to a container. -type ContainerAttachOptions struct { - Stream bool - Stdin bool - Stdout bool - Stderr bool - DetachKeys string -} - -// ContainerCommitOptions holds parameters to commit changes into a container. -type ContainerCommitOptions struct { - Reference string - Comment string - Author string - Changes []string - Pause bool - Config *container.Config -} - -// ContainerExecInspect holds information returned by exec inspect. -type ContainerExecInspect struct { - ExecID string - ContainerID string - Running bool - ExitCode int -} - -// ContainerListOptions holds parameters to list containers with. -type ContainerListOptions struct { - Quiet bool - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filter filters.Args -} - -// ContainerLogsOptions holds parameters to filter logs with. -type ContainerLogsOptions struct { - ShowStdout bool - ShowStderr bool - Since string - Timestamps bool - Follow bool - Tail string - Details bool -} - -// ContainerRemoveOptions holds parameters to remove containers. -type ContainerRemoveOptions struct { - RemoveVolumes bool - RemoveLinks bool - Force bool -} - -// CopyToContainerOptions holds information -// about files to copy into a container -type CopyToContainerOptions struct { - AllowOverwriteDirWithFile bool -} - -// EventsOptions hold parameters to filter events with. -type EventsOptions struct { - Since string - Until string - Filters filters.Args -} - -// NetworkListOptions holds parameters to filter the list of networks with. -type NetworkListOptions struct { - Filters filters.Args -} - -// HijackedResponse holds connection information for a hijacked request. -type HijackedResponse struct { - Conn net.Conn - Reader *bufio.Reader -} - -// Close closes the hijacked connection and reader. -func (h *HijackedResponse) Close() { - h.Conn.Close() -} - -// CloseWriter is an interface that implements structs -// that close input streams to prevent from writing. -type CloseWriter interface { - CloseWrite() error -} - -// CloseWrite closes a readWriter for writing. -func (h *HijackedResponse) CloseWrite() error { - if conn, ok := h.Conn.(CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// ImageBuildOptions holds the information -// necessary to build images. -type ImageBuildOptions struct { - Tags []string - SuppressOutput bool - RemoteContext string - NoCache bool - Remove bool - ForceRemove bool - PullParent bool - Isolation container.Isolation - CPUSetCPUs string - CPUSetMems string - CPUShares int64 - CPUQuota int64 - CPUPeriod int64 - Memory int64 - MemorySwap int64 - CgroupParent string - ShmSize int64 - Dockerfile string - Ulimits []*units.Ulimit - BuildArgs map[string]string - AuthConfigs map[string]AuthConfig - Context io.Reader - Labels map[string]string -} - -// ImageBuildResponse holds information -// returned by a server after building -// an image. -type ImageBuildResponse struct { - Body io.ReadCloser - OSType string -} - -// ImageCreateOptions holds information to create images. -type ImageCreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry -} - -// ImageImportSource holds source information for ImageImport -type ImageImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName) - SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source) -} - -// ImageImportOptions holds information to import images from the client host. -type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image -} - -// ImageListOptions holds parameters to filter the list of images with. -type ImageListOptions struct { - MatchName string - All bool - Filters filters.Args -} - -// ImageLoadResponse returns information to the client about a load process. -type ImageLoadResponse struct { - // Body must be closed to avoid a resource leak - Body io.ReadCloser - JSON bool -} - -// ImagePullOptions holds information to pull images. -type ImagePullOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc -} - -// RequestPrivilegeFunc is a function interface that -// clients can supply to retry operations after -// getting an authorization error. -// This function returns the registry authentication -// header value in base 64 format, or an error -// if the privilege request fails. -type RequestPrivilegeFunc func() (string, error) - -//ImagePushOptions holds information to push images. -type ImagePushOptions ImagePullOptions - -// ImageRemoveOptions holds parameters to remove images. -type ImageRemoveOptions struct { - Force bool - PruneChildren bool -} - -// ImageSearchOptions holds parameters to search images with. -type ImageSearchOptions struct { - RegistryAuth string - PrivilegeFunc RequestPrivilegeFunc - Filters filters.Args -} - -// ImageTagOptions holds parameters to tag an image -type ImageTagOptions struct { - Force bool -} - -// ResizeOptions holds parameters to resize a tty. -// It can be used to resize container ttys and -// exec process ttys too. -type ResizeOptions struct { - Height int - Width int -} - -// VersionResponse holds version information for the client and the server -type VersionResponse struct { - Client *Version - Server *Version -} - -// ServerOK returns true when the client could connect to the docker server -// and parse the information received. It returns false otherwise. -func (v VersionResponse) ServerOK() bool { - return v.Server != nil -} diff --git a/vendor/github.com/docker/engine-api/types/configs.go b/vendor/github.com/docker/engine-api/types/configs.go deleted file mode 100644 index 7d4fcb343e33..000000000000 --- a/vendor/github.com/docker/engine-api/types/configs.go +++ /dev/null @@ -1,53 +0,0 @@ -package types - -import ( - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/network" -) - -// configs holds structs used for internal communication between the -// frontend (such as an http server) and the backend (such as the -// docker daemon). - -// ContainerCreateConfig is the parameter set to ContainerCreate() -type ContainerCreateConfig struct { - Name string - Config *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig - AdjustCPUShares bool -} - -// ContainerRmConfig holds arguments for the container remove -// operation. This struct is used to tell the backend what operations -// to perform. -type ContainerRmConfig struct { - ForceRemove, RemoveVolume, RemoveLink bool -} - -// ContainerCommitConfig contains build configs for commit operation, -// and is used when making a commit with the current state of the container. -type ContainerCommitConfig struct { - Pause bool - Repo string - Tag string - Author string - Comment string - // merge container config into commit config before commit - MergeConfigs bool - Config *container.Config -} - -// ExecConfig is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. -type ExecConfig struct { - User string // User that will run the command - Privileged bool // Is the container in privileged mode - Tty bool // Attach standard streams to a tty. - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStderr bool // Attach the standard output - AttachStdout bool // Attach the standard error - Detach bool // Execute in detach mode - DetachKeys string // Escape keys for detach - Cmd []string // Execution commands and args -} diff --git a/vendor/github.com/docker/engine-api/types/container/config.go b/vendor/github.com/docker/engine-api/types/container/config.go deleted file mode 100644 index 1dfc40834800..000000000000 --- a/vendor/github.com/docker/engine-api/types/container/config.go +++ /dev/null @@ -1,37 +0,0 @@ -package container - -import ( - "github.com/docker/engine-api/types/strslice" - "github.com/docker/go-connections/nat" -) - -// Config contains the configuration data about a container. -// It should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -// All fields added to this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -type Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - Image string // Name of the image as it was passed by the operator (eg. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container -} diff --git a/vendor/github.com/docker/engine-api/types/container/host_config.go b/vendor/github.com/docker/engine-api/types/container/host_config.go deleted file mode 100644 index 2446c1904d8a..000000000000 --- a/vendor/github.com/docker/engine-api/types/container/host_config.go +++ /dev/null @@ -1,320 +0,0 @@ -package container - -import ( - "strings" - - "github.com/docker/engine-api/types/blkiodev" - "github.com/docker/engine-api/types/strslice" - "github.com/docker/go-connections/nat" - "github.com/docker/go-units" -) - -// NetworkMode represents the container network stack. -type NetworkMode string - -// Isolation represents the isolation technology of a container. The supported -// values are platform specific -type Isolation string - -// IsDefault indicates the default isolation technology of a container. On Linux this -// is the native driver. On Windows, this is a Windows Server Container. -func (i Isolation) IsDefault() bool { - return strings.ToLower(string(i)) == "default" || string(i) == "" -} - -// IpcMode represents the container ipc stack. -type IpcMode string - -// IsPrivate indicates whether the container uses its private ipc stack. -func (n IpcMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's ipc stack. -func (n IpcMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's ipc stack. -func (n IpcMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the ipc stack is valid. -func (n IpcMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true -} - -// Container returns the name of the container ipc stack is going to be used. -func (n IpcMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UsernsMode represents userns mode in the container. -type UsernsMode string - -// IsHost indicates whether the container uses the host's userns. -func (n UsernsMode) IsHost() bool { - return n == "host" -} - -// IsPrivate indicates whether the container uses the a private userns. -func (n UsernsMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// Valid indicates whether the userns is valid. -func (n UsernsMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// CgroupSpec represents the cgroup to use for the container. -type CgroupSpec string - -// IsContainer indicates whether the container is using another container cgroup -func (c CgroupSpec) IsContainer() bool { - parts := strings.SplitN(string(c), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the cgroup spec is valid. -func (c CgroupSpec) Valid() bool { - return c.IsContainer() || c == "" -} - -// Container returns the name of the container whose cgroup will be used. -func (c CgroupSpec) Container() string { - parts := strings.SplitN(string(c), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UTSMode represents the UTS namespace of the container. -type UTSMode string - -// IsPrivate indicates whether the container uses its private UTS namespace. -func (n UTSMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// IsHost indicates whether the container uses the host's UTS namespace. -func (n UTSMode) IsHost() bool { - return n == "host" -} - -// Valid indicates whether the UTS namespace is valid. -func (n UTSMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// PidMode represents the pid namespace of the container. -type PidMode string - -// IsPrivate indicates whether the container uses its own new pid namespace. -func (n PidMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's pid namespace. -func (n PidMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's pid namespace. -func (n PidMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the pid namespace is valid. -func (n PidMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true -} - -// Container returns the name of the container whose pid namespace is going to be used. -func (n PidMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// DeviceMapping represents the device mapping between the host and the container. -type DeviceMapping struct { - PathOnHost string - PathInContainer string - CgroupPermissions string -} - -// RestartPolicy represents the restart policies of the container. -type RestartPolicy struct { - Name string - MaximumRetryCount int -} - -// IsNone indicates whether the container has the "no" restart policy. -// This means the container will not automatically restart when exiting. -func (rp *RestartPolicy) IsNone() bool { - return rp.Name == "no" -} - -// IsAlways indicates whether the container has the "always" restart policy. -// This means the container will automatically restart regardless of the exit status. -func (rp *RestartPolicy) IsAlways() bool { - return rp.Name == "always" -} - -// IsOnFailure indicates whether the container has the "on-failure" restart policy. -// This means the container will automatically restart of exiting with a non-zero exit status. -func (rp *RestartPolicy) IsOnFailure() bool { - return rp.Name == "on-failure" -} - -// IsUnlessStopped indicates whether the container has the -// "unless-stopped" restart policy. This means the container will -// automatically restart unless user has put it to stopped state. -func (rp *RestartPolicy) IsUnlessStopped() bool { - return rp.Name == "unless-stopped" -} - -// IsSame compares two RestartPolicy to see if they are the same -func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { - return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount -} - -// LogConfig represents the logging configuration of the container. -type LogConfig struct { - Type string - Config map[string]string -} - -// Resources contains container's resources (cgroups config, ulimits...) -type Resources struct { - // Applicable to all platforms - CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) - Memory int64 // Memory limit (in bytes) - - // Applicable to UNIX platforms - CgroupParent string // Parent cgroup. - BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) - BlkioWeightDevice []*blkiodev.WeightDevice - BlkioDeviceReadBps []*blkiodev.ThrottleDevice - BlkioDeviceWriteBps []*blkiodev.ThrottleDevice - BlkioDeviceReadIOps []*blkiodev.ThrottleDevice - BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice - CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period - CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota - CpusetCpus string // CpusetCpus 0-2, 0,1 - CpusetMems string // CpusetMems 0-2, 0,1 - Devices []DeviceMapping // List of devices to map inside the container - DiskQuota int64 // Disk limit (in bytes) - KernelMemory int64 // Kernel memory limit (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit int64 // Setting pids limit for a container - Ulimits []*units.Ulimit // List of ulimits to be set in the container - - // Applicable to Windows - CPUCount int64 `json:"CpuCount"` // CPU count - CPUPercent int64 `json:"CpuPercent"` // CPU percent - IOMaximumIOps uint64 // Maximum IOps for the container system drive - IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive - NetworkMaximumBandwidth uint64 // Maximum bandwidth of the network endpoint in bytes per second -} - -// UpdateConfig holds the mutable attributes of a Container. -// Those attributes can be updated at runtime. -type UpdateConfig struct { - // Contains container's resources (cgroups, ulimits) - Resources - RestartPolicy RestartPolicy -} - -// HostConfig the non-portable Config structure of a container. -// Here, "non-portable" means "dependent of the host we are running on". -// Portable information *should* appear in Config. -type HostConfig struct { - // Applicable to all platforms - Binds []string // List of volume bindings for this container - ContainerIDFile string // File (path) where the containerId is written - LogConfig LogConfig // Configuration of the logs for this container - NetworkMode NetworkMode // Network mode to use for the container - PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host - RestartPolicy RestartPolicy // Restart policy to be used for the container - AutoRemove bool // Automatically remove container when it exits - VolumeDriver string // Name of the volume driver used to mount volumes - VolumesFrom []string // List of volumes to take from other container - - // Applicable to UNIX platforms - CapAdd strslice.StrSlice // List of kernel capabilities to add to the container - CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for - ExtraHosts []string // List of extra hosts - GroupAdd []string // List of additional groups that the container process will run as - IpcMode IpcMode // IPC namespace to use for the container - Cgroup CgroupSpec // Cgroup to use for the container - Links []string // List of links (in the name:alias form) - OomScoreAdj int // Container preference for OOM-killing - PidMode PidMode // PID namespace to use for the container - Privileged bool // Is the container in privileged mode - PublishAllPorts bool // Should docker publish all exposed port for the container - ReadonlyRootfs bool // Is the container root filesystem in read-only - SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. - StorageOpt map[string]string // Storage driver options per container. - Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container - UTSMode UTSMode // UTS namespace to use for the container - UsernsMode UsernsMode // The user namespace to use for the container - ShmSize int64 // Total shm memory usage - Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container - - // Applicable to Windows - ConsoleSize [2]int // Initial console size - Isolation Isolation // Isolation technology of the container (eg default, hyperv) - - // Contains container's resources (cgroups, ulimits) - Resources -} diff --git a/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go b/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go deleted file mode 100644 index 4171059a476a..000000000000 --- a/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go +++ /dev/null @@ -1,81 +0,0 @@ -// +build !windows - -package container - -import "strings" - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() -} - -// IsPrivate indicates whether container uses it's private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsBridge() { - return "bridge" - } else if n.IsHost() { - return "host" - } else if n.IsContainer() { - return "container" - } else if n.IsNone() { - return "none" - } else if n.IsDefault() { - return "default" - } else if n.IsUserDefined() { - return n.UserDefined() - } - return "" -} - -// IsBridge indicates whether container uses the bridge network stack -func (n NetworkMode) IsBridge() bool { - return n == "bridge" -} - -// IsHost indicates whether container uses the host network stack. -func (n NetworkMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether container uses a container network stack. -func (n NetworkMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// ConnectedContainer is the id of the container which network this container is connected to. -func (n NetworkMode) ConnectedContainer() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() -} - -//UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} diff --git a/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go b/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go deleted file mode 100644 index 0ee332ba6899..000000000000 --- a/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go +++ /dev/null @@ -1,87 +0,0 @@ -package container - -import ( - "strings" -) - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// IsContainer indicates whether container uses a container network stack. -// Returns false as windows doesn't support this mode -func (n NetworkMode) IsContainer() bool { - return false -} - -// IsBridge indicates whether container uses the bridge network stack -// in windows it is given the name NAT -func (n NetworkMode) IsBridge() bool { - return n == "nat" -} - -// IsHost indicates whether container uses the host network stack. -// returns false as this is not supported by windows -func (n NetworkMode) IsHost() bool { - return false -} - -// IsPrivate indicates whether container uses its private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// ConnectedContainer is the id of the container which network this container is connected to. -// Returns blank string on windows -func (n NetworkMode) ConnectedContainer() string { - return "" -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsNone() && !n.IsBridge() -} - -// IsHyperV indicates the use of a Hyper-V partition for isolation -func (i Isolation) IsHyperV() bool { - return strings.ToLower(string(i)) == "hyperv" -} - -// IsProcess indicates the use of process isolation -func (i Isolation) IsProcess() bool { - return strings.ToLower(string(i)) == "process" -} - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() || i.IsHyperV() || i.IsProcess() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsDefault() { - return "default" - } else if n.IsBridge() { - return "nat" - } else if n.IsNone() { - return "none" - } else if n.IsUserDefined() { - return n.UserDefined() - } - - return "" -} - -//UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} diff --git a/vendor/github.com/docker/engine-api/types/filters/parse.go b/vendor/github.com/docker/engine-api/types/filters/parse.go deleted file mode 100644 index 0e0d7e380541..000000000000 --- a/vendor/github.com/docker/engine-api/types/filters/parse.go +++ /dev/null @@ -1,295 +0,0 @@ -// Package filters provides helper function to parse and handle command line -// filter, used for example in docker ps or docker images commands. -package filters - -import ( - "encoding/json" - "errors" - "fmt" - "regexp" - "strings" - - "github.com/docker/engine-api/types/versions" -) - -// Args stores filter arguments as map key:{map key: bool}. -// It contains an aggregation of the map of arguments (which are in the form -// of -f 'key=value') based on the key, and stores values for the same key -// in a map with string keys and boolean values. -// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' -// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} -type Args struct { - fields map[string]map[string]bool -} - -// NewArgs initializes a new Args struct. -func NewArgs() Args { - return Args{fields: map[string]map[string]bool{}} -} - -// ParseFlag parses the argument to the filter flag. Like -// -// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` -// -// If prev map is provided, then it is appended to, and returned. By default a new -// map is created. -func ParseFlag(arg string, prev Args) (Args, error) { - filters := prev - if len(arg) == 0 { - return filters, nil - } - - if !strings.Contains(arg, "=") { - return filters, ErrBadFormat - } - - f := strings.SplitN(arg, "=", 2) - - name := strings.ToLower(strings.TrimSpace(f[0])) - value := strings.TrimSpace(f[1]) - - filters.Add(name, value) - - return filters, nil -} - -// ErrBadFormat is an error returned in case of bad format for a filter. -var ErrBadFormat = errors.New("bad format of filter (expected name=value)") - -// ToParam packs the Args into a string for easy transport from client to server. -func ToParam(a Args) (string, error) { - // this way we don't URL encode {}, just empty space - if a.Len() == 0 { - return "", nil - } - - buf, err := json.Marshal(a.fields) - if err != nil { - return "", err - } - return string(buf), nil -} - -// ToParamWithVersion packs the Args into a string for easy transport from client to server. -// The generated string will depend on the specified version (corresponding to the API version). -func ToParamWithVersion(version string, a Args) (string, error) { - // this way we don't URL encode {}, just empty space - if a.Len() == 0 { - return "", nil - } - - // for daemons older than v1.10, filter must be of the form map[string][]string - buf := []byte{} - err := errors.New("") - if version != "" && versions.LessThan(version, "1.22") { - buf, err = json.Marshal(convertArgsToSlice(a.fields)) - } else { - buf, err = json.Marshal(a.fields) - } - if err != nil { - return "", err - } - return string(buf), nil -} - -// FromParam unpacks the filter Args. -func FromParam(p string) (Args, error) { - if len(p) == 0 { - return NewArgs(), nil - } - - r := strings.NewReader(p) - d := json.NewDecoder(r) - - m := map[string]map[string]bool{} - if err := d.Decode(&m); err != nil { - r.Seek(0, 0) - - // Allow parsing old arguments in slice format. - // Because other libraries might be sending them in this format. - deprecated := map[string][]string{} - if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { - m = deprecatedArgs(deprecated) - } else { - return NewArgs(), err - } - } - return Args{m}, nil -} - -// Get returns the list of values associates with a field. -// It returns a slice of strings to keep backwards compatibility with old code. -func (filters Args) Get(field string) []string { - values := filters.fields[field] - if values == nil { - return make([]string, 0) - } - slice := make([]string, 0, len(values)) - for key := range values { - slice = append(slice, key) - } - return slice -} - -// Add adds a new value to a filter field. -func (filters Args) Add(name, value string) { - if _, ok := filters.fields[name]; ok { - filters.fields[name][value] = true - } else { - filters.fields[name] = map[string]bool{value: true} - } -} - -// Del removes a value from a filter field. -func (filters Args) Del(name, value string) { - if _, ok := filters.fields[name]; ok { - delete(filters.fields[name], value) - } -} - -// Len returns the number of fields in the arguments. -func (filters Args) Len() int { - return len(filters.fields) -} - -// MatchKVList returns true if the values for the specified field matches the ones -// from the sources. -// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, -// field is 'label' and sources are {'label1': '1', 'label2': '2'} -// it returns true. -func (filters Args) MatchKVList(field string, sources map[string]string) bool { - fieldValues := filters.fields[field] - - //do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - - if sources == nil || len(sources) == 0 { - return false - } - - for name2match := range fieldValues { - testKV := strings.SplitN(name2match, "=", 2) - - v, ok := sources[testKV[0]] - if !ok { - return false - } - if len(testKV) == 2 && testKV[1] != v { - return false - } - } - - return true -} - -// Match returns true if the values for the specified field matches the source string -// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, -// field is 'image.name' and source is 'ubuntu' -// it returns true. -func (filters Args) Match(field, source string) bool { - if filters.ExactMatch(field, source) { - return true - } - - fieldValues := filters.fields[field] - for name2match := range fieldValues { - match, err := regexp.MatchString(name2match, source) - if err != nil { - continue - } - if match { - return true - } - } - return false -} - -// ExactMatch returns true if the source matches exactly one of the filters. -func (filters Args) ExactMatch(field, source string) bool { - fieldValues, ok := filters.fields[field] - //do not filter if there is no filter set or cannot determine filter - if !ok || len(fieldValues) == 0 { - return true - } - - // try to match full name value to avoid O(N) regular expression matching - if fieldValues[source] { - return true - } - return false -} - -// FuzzyMatch returns true if the source matches exactly one of the filters, -// or the source has one of the filters as a prefix. -func (filters Args) FuzzyMatch(field, source string) bool { - if filters.ExactMatch(field, source) { - return true - } - - fieldValues := filters.fields[field] - for prefix := range fieldValues { - if strings.HasPrefix(source, prefix) { - return true - } - } - return false -} - -// Include returns true if the name of the field to filter is in the filters. -func (filters Args) Include(field string) bool { - _, ok := filters.fields[field] - return ok -} - -// Validate ensures that all the fields in the filter are valid. -// It returns an error as soon as it finds an invalid field. -func (filters Args) Validate(accepted map[string]bool) error { - for name := range filters.fields { - if !accepted[name] { - return fmt.Errorf("Invalid filter '%s'", name) - } - } - return nil -} - -// WalkValues iterates over the list of filtered values for a field. -// It stops the iteration if it finds an error and it returns that error. -func (filters Args) WalkValues(field string, op func(value string) error) error { - if _, ok := filters.fields[field]; !ok { - return nil - } - for v := range filters.fields[field] { - if err := op(v); err != nil { - return err - } - } - return nil -} - -func deprecatedArgs(d map[string][]string) map[string]map[string]bool { - m := map[string]map[string]bool{} - for k, v := range d { - values := map[string]bool{} - for _, vv := range v { - values[vv] = true - } - m[k] = values - } - return m -} - -func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { - m := map[string][]string{} - for k, v := range f { - values := []string{} - for kk := range v { - if v[kk] { - values = append(values, kk) - } - } - m[k] = values - } - return m -} diff --git a/vendor/github.com/docker/engine-api/types/network/network.go b/vendor/github.com/docker/engine-api/types/network/network.go deleted file mode 100644 index bce60f5eec44..000000000000 --- a/vendor/github.com/docker/engine-api/types/network/network.go +++ /dev/null @@ -1,52 +0,0 @@ -package network - -// Address represents an IP address -type Address struct { - Addr string - PrefixLen int -} - -// IPAM represents IP Address Management -type IPAM struct { - Driver string - Options map[string]string //Per network IPAM driver options - Config []IPAMConfig -} - -// IPAMConfig represents IPAM configurations -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -// EndpointIPAMConfig represents IPAM configurations for the endpoint -type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` -} - -// EndpointSettings stores the network endpoint details -type EndpointSettings struct { - // Configurations - IPAMConfig *EndpointIPAMConfig - Links []string - Aliases []string - // Operational data - NetworkID string - EndpointID string - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - MacAddress string -} - -// NetworkingConfig represents the container's networking configuration for each of its interfaces -// Carries the networking configs specified in the `docker run` and `docker network connect` commands -type NetworkingConfig struct { - EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network -} diff --git a/vendor/github.com/docker/engine-api/types/reference/image_reference.go b/vendor/github.com/docker/engine-api/types/reference/image_reference.go deleted file mode 100644 index be9cf8ebed09..000000000000 --- a/vendor/github.com/docker/engine-api/types/reference/image_reference.go +++ /dev/null @@ -1,34 +0,0 @@ -package reference - -import ( - distreference "github.com/docker/distribution/reference" -) - -// Parse parses the given references and returns the repository and -// tag (if present) from it. If there is an error during parsing, it will -// return an error. -func Parse(ref string) (string, string, error) { - distributionRef, err := distreference.ParseNamed(ref) - if err != nil { - return "", "", err - } - - tag := GetTagFromNamedRef(distributionRef) - return distributionRef.Name(), tag, nil -} - -// GetTagFromNamedRef returns a tag from the specified reference. -// This function is necessary as long as the docker "server" api makes the distinction between repository -// and tags. -func GetTagFromNamedRef(ref distreference.Named) string { - var tag string - switch x := ref.(type) { - case distreference.Digested: - tag = x.Digest().String() - case distreference.NamedTagged: - tag = x.Tag() - default: - tag = "latest" - } - return tag -} diff --git a/vendor/github.com/docker/engine-api/types/registry/registry.go b/vendor/github.com/docker/engine-api/types/registry/registry.go deleted file mode 100644 index d2aca6f024b5..000000000000 --- a/vendor/github.com/docker/engine-api/types/registry/registry.go +++ /dev/null @@ -1,99 +0,0 @@ -package registry - -import ( - "encoding/json" - "net" -) - -// ServiceConfig stores daemon registry services configuration. -type ServiceConfig struct { - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string -} - -// NetIPNet is the net.IPNet type, which can be marshalled and -// unmarshalled to JSON -type NetIPNet net.IPNet - -// MarshalJSON returns the JSON representation of the IPNet -func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(ipnet).String()) -} - -// UnmarshalJSON sets the IPNet from a byte array of JSON -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { - var ipnetStr string - if err = json.Unmarshal(b, &ipnetStr); err == nil { - var cidr *net.IPNet - if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { - *ipnet = NetIPNet(*cidr) - } - } - return -} - -// IndexInfo contains information about a registry -// -// RepositoryInfo Examples: -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } -// -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } -type IndexInfo struct { - // Name is the name of the registry, such as "docker.io" - Name string - // Mirrors is a list of mirrors, expressed as URIs - Mirrors []string - // Secure is set to false if the registry is part of the list of - // insecure registries. Insecure registries accept HTTP and/or accept - // HTTPS with certificates from unknown CAs. - Secure bool - // Official indicates whether this is an official registry - Official bool -} - -// SearchResult describes a search result returned from a registry -type SearchResult struct { - // StarCount indicates the number of stars this repository has - StarCount int `json:"star_count"` - // IsOfficial is true if the result is from an official repository. - IsOfficial bool `json:"is_official"` - // Name is the name of the repository - Name string `json:"name"` - // IsAutomated indicates whether the result is automated - IsAutomated bool `json:"is_automated"` - // Description is a textual description of the repository - Description string `json:"description"` -} - -// SearchResults lists a collection search results returned from a registry -type SearchResults struct { - // Query contains the query string that generated the search results - Query string `json:"query"` - // NumResults indicates the number of results the query returned - NumResults int `json:"num_results"` - // Results is a slice containing the actual results for the search - Results []SearchResult `json:"results"` -} diff --git a/vendor/github.com/docker/engine-api/types/seccomp.go b/vendor/github.com/docker/engine-api/types/seccomp.go deleted file mode 100644 index e0305a9e3789..000000000000 --- a/vendor/github.com/docker/engine-api/types/seccomp.go +++ /dev/null @@ -1,68 +0,0 @@ -package types - -// Seccomp represents the config for a seccomp profile for syscall restriction. -type Seccomp struct { - DefaultAction Action `json:"defaultAction"` - Architectures []Arch `json:"architectures"` - Syscalls []*Syscall `json:"syscalls"` -} - -// Arch used for additional architectures -type Arch string - -// Additional architectures permitted to be used for system calls -// By default only the native architecture of the kernel is permitted -const ( - ArchX86 Arch = "SCMP_ARCH_X86" - ArchX86_64 Arch = "SCMP_ARCH_X86_64" - ArchX32 Arch = "SCMP_ARCH_X32" - ArchARM Arch = "SCMP_ARCH_ARM" - ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" - ArchMIPS Arch = "SCMP_ARCH_MIPS" - ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" - ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" - ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" - ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" - ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" -) - -// Action taken upon Seccomp rule match -type Action string - -// Define actions for Seccomp rules -const ( - ActKill Action = "SCMP_ACT_KILL" - ActTrap Action = "SCMP_ACT_TRAP" - ActErrno Action = "SCMP_ACT_ERRNO" - ActTrace Action = "SCMP_ACT_TRACE" - ActAllow Action = "SCMP_ACT_ALLOW" -) - -// Operator used to match syscall arguments in Seccomp -type Operator string - -// Define operators for syscall arguments in Seccomp -const ( - OpNotEqual Operator = "SCMP_CMP_NE" - OpLessThan Operator = "SCMP_CMP_LT" - OpLessEqual Operator = "SCMP_CMP_LE" - OpEqualTo Operator = "SCMP_CMP_EQ" - OpGreaterEqual Operator = "SCMP_CMP_GE" - OpGreaterThan Operator = "SCMP_CMP_GT" - OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" -) - -// Arg used for matching specific syscall arguments in Seccomp -type Arg struct { - Index uint `json:"index"` - Value uint64 `json:"value"` - ValueTwo uint64 `json:"valueTwo"` - Op Operator `json:"op"` -} - -// Syscall is used to match a syscall in Seccomp -type Syscall struct { - Name string `json:"name"` - Action Action `json:"action"` - Args []*Arg `json:"args"` -} diff --git a/vendor/github.com/docker/engine-api/types/stats.go b/vendor/github.com/docker/engine-api/types/stats.go deleted file mode 100644 index b420ebe7f6ab..000000000000 --- a/vendor/github.com/docker/engine-api/types/stats.go +++ /dev/null @@ -1,115 +0,0 @@ -// Package types is used for API stability in the types and response to the -// consumers of the API stats endpoint. -package types - -import "time" - -// ThrottlingData stores CPU throttling stats of one running container -type ThrottlingData struct { - // Number of periods with throttling active - Periods uint64 `json:"periods"` - // Number of periods when the container hits its throttling limit. - ThrottledPeriods uint64 `json:"throttled_periods"` - // Aggregate time the container was throttled for in nanoseconds. - ThrottledTime uint64 `json:"throttled_time"` -} - -// CPUUsage stores All CPU stats aggregated since container inception. -type CPUUsage struct { - // Total CPU time consumed. - // Units: nanoseconds. - TotalUsage uint64 `json:"total_usage"` - // Total CPU time consumed per core. - // Units: nanoseconds. - PercpuUsage []uint64 `json:"percpu_usage"` - // Time spent by tasks of the cgroup in kernel mode. - // Units: nanoseconds. - UsageInKernelmode uint64 `json:"usage_in_kernelmode"` - // Time spent by tasks of the cgroup in user mode. - // Units: nanoseconds. - UsageInUsermode uint64 `json:"usage_in_usermode"` -} - -// CPUStats aggregates and wraps all CPU related info of container -type CPUStats struct { - CPUUsage CPUUsage `json:"cpu_usage"` - SystemUsage uint64 `json:"system_cpu_usage"` - ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` -} - -// MemoryStats aggregates All memory stats since container inception -type MemoryStats struct { - // current res_counter usage for memory - Usage uint64 `json:"usage"` - // maximum usage ever recorded. - MaxUsage uint64 `json:"max_usage"` - // TODO(vishh): Export these as stronger types. - // all the stats exported via memory.stat. - Stats map[string]uint64 `json:"stats"` - // number of times memory usage hits limits. - Failcnt uint64 `json:"failcnt"` - Limit uint64 `json:"limit"` -} - -// BlkioStatEntry is one small entity to store a piece of Blkio stats -// TODO Windows: This can be factored out -type BlkioStatEntry struct { - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` - Op string `json:"op"` - Value uint64 `json:"value"` -} - -// BlkioStats stores All IO service stats for data read and write -// TODO Windows: This can be factored out -type BlkioStats struct { - // number of bytes transferred to and from the block device - IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` - IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` - IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` - IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` - IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` - IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` - SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` -} - -// NetworkStats aggregates All network stats of one container -// TODO Windows: This will require refactoring -type NetworkStats struct { - RxBytes uint64 `json:"rx_bytes"` - RxPackets uint64 `json:"rx_packets"` - RxErrors uint64 `json:"rx_errors"` - RxDropped uint64 `json:"rx_dropped"` - TxBytes uint64 `json:"tx_bytes"` - TxPackets uint64 `json:"tx_packets"` - TxErrors uint64 `json:"tx_errors"` - TxDropped uint64 `json:"tx_dropped"` -} - -// PidsStats contains the stats of a container's pids -type PidsStats struct { - // Current is the number of pids in the cgroup - Current uint64 `json:"current,omitempty"` - // Limit is the hard limit on the number of pids in the cgroup. - // A "Limit" of 0 means that there is no limit. - Limit uint64 `json:"limit,omitempty"` -} - -// Stats is Ultimate struct aggregating all types of stats of one container -type Stats struct { - Read time.Time `json:"read"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` - CPUStats CPUStats `json:"cpu_stats,omitempty"` - MemoryStats MemoryStats `json:"memory_stats,omitempty"` - BlkioStats BlkioStats `json:"blkio_stats,omitempty"` - PidsStats PidsStats `json:"pids_stats,omitempty"` -} - -// StatsJSON is newly used Networks -type StatsJSON struct { - Stats - - // Networks request version >=1.21 - Networks map[string]NetworkStats `json:"networks,omitempty"` -} diff --git a/vendor/github.com/docker/engine-api/types/strslice/strslice.go b/vendor/github.com/docker/engine-api/types/strslice/strslice.go deleted file mode 100644 index bad493fb89fd..000000000000 --- a/vendor/github.com/docker/engine-api/types/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/github.com/docker/engine-api/types/time/timestamp.go b/vendor/github.com/docker/engine-api/types/time/timestamp.go deleted file mode 100644 index d3695ba723b7..000000000000 --- a/vendor/github.com/docker/engine-api/types/time/timestamp.go +++ /dev/null @@ -1,124 +0,0 @@ -package time - -import ( - "fmt" - "math" - "strconv" - "strings" - "time" -) - -// These are additional predefined layouts for use in Time.Format and Time.Parse -// with --since and --until parameters for `docker logs` and `docker events` -const ( - rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone - rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone - dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 - dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 -) - -// GetTimestamp tries to parse given string as golang duration, -// then RFC3339 time and finally as a Unix timestamp. If -// any of these were successful, it returns a Unix timestamp -// as string otherwise returns the given value back. -// In case of duration input, the returned timestamp is computed -// as the given reference time minus the amount of the duration. -func GetTimestamp(value string, reference time.Time) (string, error) { - if d, err := time.ParseDuration(value); value != "0" && err == nil { - return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil - } - - var format string - var parseInLocation bool - - // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation - parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) - - if strings.Contains(value, ".") { - if parseInLocation { - format = rFC3339NanoLocal - } else { - format = time.RFC3339Nano - } - } else if strings.Contains(value, "T") { - // we want the number of colons in the T portion of the timestamp - tcolons := strings.Count(value, ":") - // if parseInLocation is off and we have a +/- zone offset (not Z) then - // there will be an extra colon in the input for the tz offset subtract that - // colon from the tcolons count - if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { - tcolons-- - } - if parseInLocation { - switch tcolons { - case 0: - format = "2006-01-02T15" - case 1: - format = "2006-01-02T15:04" - default: - format = rFC3339Local - } - } else { - switch tcolons { - case 0: - format = "2006-01-02T15Z07:00" - case 1: - format = "2006-01-02T15:04Z07:00" - default: - format = time.RFC3339 - } - } - } else if parseInLocation { - format = dateLocal - } else { - format = dateWithZone - } - - var t time.Time - var err error - - if parseInLocation { - t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) - } else { - t, err = time.Parse(format, value) - } - - if err != nil { - // if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp - if strings.Contains(value, "-") { - return "", err // was probably an RFC3339 like timestamp but the parser failed with an error - } - return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) - } - - return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil -} - -// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the -// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) -// if the incoming nanosecond portion is longer or shorter than 9 digits it is -// converted to nanoseconds. The expectation is that the seconds and -// seconds will be used to create a time variable. For example: -// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) -// if err == nil since := time.Unix(seconds, nanoseconds) -// returns seconds as def(aultSeconds) if value == "" -func ParseTimestamps(value string, def int64) (int64, int64, error) { - if value == "" { - return def, 0, nil - } - sa := strings.SplitN(value, ".", 2) - s, err := strconv.ParseInt(sa[0], 10, 64) - if err != nil { - return s, 0, err - } - if len(sa) != 2 { - return s, 0, nil - } - n, err := strconv.ParseInt(sa[1], 10, 64) - if err != nil { - return s, n, err - } - // should already be in nanoseconds but just in case convert n to nanoseonds - n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) - return s, n, nil -} diff --git a/vendor/github.com/docker/engine-api/types/types.go b/vendor/github.com/docker/engine-api/types/types.go deleted file mode 100644 index cb2dc9ac9d77..000000000000 --- a/vendor/github.com/docker/engine-api/types/types.go +++ /dev/null @@ -1,473 +0,0 @@ -package types - -import ( - "os" - "time" - - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/network" - "github.com/docker/engine-api/types/registry" - "github.com/docker/go-connections/nat" -) - -// ContainerCreateResponse contains the information returned to a client on the -// creation of a new container. -type ContainerCreateResponse struct { - // ID is the ID of the created container. - ID string `json:"Id"` - - // Warnings are any warnings encountered during the creation of the container. - Warnings []string `json:"Warnings"` -} - -// ContainerExecCreateResponse contains response of Remote API: -// POST "/containers/{name:.*}/exec" -type ContainerExecCreateResponse struct { - // ID is the exec ID. - ID string `json:"Id"` -} - -// ContainerUpdateResponse contains response of Remote API: -// POST /containers/{name:.*}/update -type ContainerUpdateResponse struct { - // Warnings are any warnings encountered during the updating of the container. - Warnings []string `json:"Warnings"` -} - -// AuthResponse contains response of Remote API: -// POST "/auth" -type AuthResponse struct { - // Status is the authentication status - Status string `json:"Status"` - - // IdentityToken is an opaque token used for authenticating - // a user after a successful login. - IdentityToken string `json:"IdentityToken,omitempty"` -} - -// ContainerWaitResponse contains response of Remote API: -// POST "/containers/"+containerID+"/wait" -type ContainerWaitResponse struct { - // StatusCode is the status code of the wait job - StatusCode int `json:"StatusCode"` -} - -// ContainerCommitResponse contains response of Remote API: -// POST "/commit?container="+containerID -type ContainerCommitResponse struct { - ID string `json:"Id"` -} - -// ContainerChange contains response of Remote API: -// GET "/containers/{name:.*}/changes" -type ContainerChange struct { - Kind int - Path string -} - -// ImageHistory contains response of Remote API: -// GET "/images/{name:.*}/history" -type ImageHistory struct { - ID string `json:"Id"` - Created int64 - CreatedBy string - Tags []string - Size int64 - Comment string -} - -// ImageDelete contains response of Remote API: -// DELETE "/images/{name:.*}" -type ImageDelete struct { - Untagged string `json:",omitempty"` - Deleted string `json:",omitempty"` -} - -// Image contains response of Remote API: -// GET "/images/json" -type Image struct { - ID string `json:"Id"` - ParentID string `json:"ParentId"` - RepoTags []string - RepoDigests []string - Created int64 - Size int64 - VirtualSize int64 - Labels map[string]string -} - -// GraphDriverData returns Image's graph driver config info -// when calling inspect command -type GraphDriverData struct { - Name string - Data map[string]string -} - -// RootFS returns Image's RootFS description including the layer IDs. -type RootFS struct { - Type string - Layers []string `json:",omitempty"` - BaseLayer string `json:",omitempty"` -} - -// ImageInspect contains response of Remote API: -// GET "/images/{name:.*}/json" -type ImageInspect struct { - ID string `json:"Id"` - RepoTags []string - RepoDigests []string - Parent string - Comment string - Created string - Container string - ContainerConfig *container.Config - DockerVersion string - Author string - Config *container.Config - Architecture string - Os string - Size int64 - VirtualSize int64 - GraphDriver GraphDriverData - RootFS RootFS -} - -// Port stores open ports info of container -// e.g. {"PrivatePort": 8080, "PublicPort": 80, "Type": "tcp"} -type Port struct { - IP string `json:",omitempty"` - PrivatePort int - PublicPort int `json:",omitempty"` - Type string -} - -// Container contains response of Remote API: -// GET "/containers/json" -type Container struct { - ID string `json:"Id"` - Names []string - Image string - ImageID string - Command string - Created int64 - Ports []Port - SizeRw int64 `json:",omitempty"` - SizeRootFs int64 `json:",omitempty"` - Labels map[string]string - State string - Status string - HostConfig struct { - NetworkMode string `json:",omitempty"` - } - NetworkSettings *SummaryNetworkSettings - Mounts []MountPoint -} - -// CopyConfig contains request body of Remote API: -// POST "/containers/"+containerID+"/copy" -type CopyConfig struct { - Resource string -} - -// ContainerPathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. -type ContainerPathStat struct { - Name string `json:"name"` - Size int64 `json:"size"` - Mode os.FileMode `json:"mode"` - Mtime time.Time `json:"mtime"` - LinkTarget string `json:"linkTarget"` -} - -// ContainerProcessList contains response of Remote API: -// GET "/containers/{name:.*}/top" -type ContainerProcessList struct { - Processes [][]string - Titles []string -} - -// Version contains response of Remote API: -// GET "/version" -type Version struct { - Version string - APIVersion string `json:"ApiVersion"` - GitCommit string - GoVersion string - Os string - Arch string - KernelVersion string `json:",omitempty"` - Experimental bool `json:",omitempty"` - BuildTime string `json:",omitempty"` -} - -// Info contains response of Remote API: -// GET "/info" -type Info struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool - CPUCfsPeriod bool `json:"CpuCfsPeriod"` - CPUCfsQuota bool `json:"CpuCfsQuota"` - CPUShares bool - CPUSet bool - IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` - Debug bool - NFd int - OomKillDisable bool - NGoroutines int - SystemTime string - ExecutionDriver string - LoggingDriver string - CgroupDriver string - NEventsListener int - KernelVersion string - OperatingSystem string - OSType string - Architecture string - IndexServerAddress string - RegistryConfig *registry.ServiceConfig - NCPU int - MemTotal int64 - DockerRootDir string - HTTPProxy string `json:"HttpProxy"` - HTTPSProxy string `json:"HttpsProxy"` - NoProxy string - Name string - Labels []string - ExperimentalBuild bool - ServerVersion string - ClusterStore string - ClusterAdvertise string - SecurityOptions []string -} - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by Info struct -type PluginsInfo struct { - // List of Volume plugins registered - Volume []string - // List of Network plugins registered - Network []string - // List of Authorization plugins registered - Authorization []string -} - -// ExecStartCheck is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package -type ExecStartCheck struct { - // ExecStart will first check if it's detached - Detach bool - // Check if there's a tty - Tty bool -} - -// ContainerState stores container's running state -// it's part of ContainerJSONBase and will return by "inspect" command -type ContainerState struct { - Status string - Running bool - Paused bool - Restarting bool - OOMKilled bool - Dead bool - Pid int - ExitCode int - Error string - StartedAt string - FinishedAt string -} - -// ContainerNode stores information about the node that a container -// is running on. It's only available in Docker Swarm -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int - Labels map[string]string -} - -// ContainerJSONBase contains response of Remote API: -// GET "/containers/{name:.*}/json" -type ContainerJSONBase struct { - ID string `json:"Id"` - Created string - Path string - Args []string - State *ContainerState - Image string - ResolvConfPath string - HostnamePath string - HostsPath string - LogPath string - Node *ContainerNode `json:",omitempty"` - Name string - RestartCount int - Driver string - MountLabel string - ProcessLabel string - AppArmorProfile string - ExecIDs []string - HostConfig *container.HostConfig - GraphDriver GraphDriverData - SizeRw *int64 `json:",omitempty"` - SizeRootFs *int64 `json:",omitempty"` -} - -// ContainerJSON is newly used struct along with MountPoint -type ContainerJSON struct { - *ContainerJSONBase - Mounts []MountPoint - Config *container.Config - NetworkSettings *NetworkSettings -} - -// NetworkSettings exposes the network settings in the api -type NetworkSettings struct { - NetworkSettingsBase - DefaultNetworkSettings - Networks map[string]*network.EndpointSettings -} - -// SummaryNetworkSettings provides a summary of container's networks -// in /containers/json -type SummaryNetworkSettings struct { - Networks map[string]*network.EndpointSettings -} - -// NetworkSettingsBase holds basic information about networks -type NetworkSettingsBase struct { - Bridge string - SandboxID string - HairpinMode bool - LinkLocalIPv6Address string - LinkLocalIPv6PrefixLen int - Ports nat.PortMap - SandboxKey string - SecondaryIPAddresses []network.Address - SecondaryIPv6Addresses []network.Address -} - -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. -type DefaultNetworkSettings struct { - EndpointID string - Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - IPAddress string - IPPrefixLen int - IPv6Gateway string - MacAddress string -} - -// MountPoint represents a mount point configuration inside the container. -type MountPoint struct { - Name string `json:",omitempty"` - Source string - Destination string - Driver string `json:",omitempty"` - Mode string - RW bool - Propagation string -} - -// Volume represents the configuration of a volume for the remote API -type Volume struct { - Name string // Name is the name of the volume - Driver string // Driver is the Driver name used to create the volume - Mountpoint string // Mountpoint is the location on disk of the volume - Status map[string]interface{} `json:",omitempty"` // Status provides low-level status information about the volume - Labels map[string]string // Labels is metadata specific to the volume - Scope string // Scope describes the level at which the volume exists (e.g. `global` for cluster-wide or `local` for machine level) -} - -// VolumesListResponse contains the response for the remote API: -// GET "/volumes" -type VolumesListResponse struct { - Volumes []*Volume // Volumes is the list of volumes being returned - Warnings []string // Warnings is a list of warnings that occurred when getting the list from the volume drivers -} - -// VolumeCreateRequest contains the response for the remote API: -// POST "/volumes/create" -type VolumeCreateRequest struct { - Name string // Name is the requested name of the volume - Driver string // Driver is the name of the driver that should be used to create the volume - DriverOpts map[string]string // DriverOpts holds the driver specific options to use for when creating the volume. - Labels map[string]string // Labels holds metadata specific to the volume being created. -} - -// NetworkResource is the body of the "get network" http response message -type NetworkResource struct { - Name string - ID string `json:"Id"` - Scope string - Driver string - EnableIPv6 bool - IPAM network.IPAM - Internal bool - Containers map[string]EndpointResource - Options map[string]string - Labels map[string]string -} - -// EndpointResource contains network resources allocated and used for a container in a network -type EndpointResource struct { - Name string - EndpointID string - MacAddress string - IPv4Address string - IPv6Address string -} - -// NetworkCreate is the expected body of the "create network" http request message -type NetworkCreate struct { - CheckDuplicate bool - Driver string - EnableIPv6 bool - IPAM network.IPAM - Internal bool - Options map[string]string - Labels map[string]string -} - -// NetworkCreateRequest is the request message sent to the server for network create call. -type NetworkCreateRequest struct { - NetworkCreate - Name string -} - -// NetworkCreateResponse is the response message sent by the server for network create call -type NetworkCreateResponse struct { - ID string `json:"Id"` - Warning string -} - -// NetworkConnect represents the data to be used to connect a container to the network -type NetworkConnect struct { - Container string - EndpointConfig *network.EndpointSettings `json:",omitempty"` -} - -// NetworkDisconnect represents the data to be used to disconnect a container from the network -type NetworkDisconnect struct { - Container string - Force bool -} diff --git a/vendor/github.com/docker/engine-api/types/versions/README.md b/vendor/github.com/docker/engine-api/types/versions/README.md deleted file mode 100644 index 76c516e6a317..000000000000 --- a/vendor/github.com/docker/engine-api/types/versions/README.md +++ /dev/null @@ -1,14 +0,0 @@ -## Legacy API type versions - -This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. - -Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. - -### Package name conventions - -The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: - -1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. -2. We cannot use `_` because golint complains abount it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. - -For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/engine-api/types/versions/compare.go b/vendor/github.com/docker/engine-api/types/versions/compare.go deleted file mode 100644 index 611d4fed66e5..000000000000 --- a/vendor/github.com/docker/engine-api/types/versions/compare.go +++ /dev/null @@ -1,62 +0,0 @@ -package versions - -import ( - "strconv" - "strings" -) - -// compare compares two version strings -// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. -func compare(v1, v2 string) int { - var ( - currTab = strings.Split(v1, ".") - otherTab = strings.Split(v2, ".") - ) - - max := len(currTab) - if len(otherTab) > max { - max = len(otherTab) - } - for i := 0; i < max; i++ { - var currInt, otherInt int - - if len(currTab) > i { - currInt, _ = strconv.Atoi(currTab[i]) - } - if len(otherTab) > i { - otherInt, _ = strconv.Atoi(otherTab[i]) - } - if currInt > otherInt { - return 1 - } - if otherInt > currInt { - return -1 - } - } - return 0 -} - -// LessThan checks if a version is less than another -func LessThan(v, other string) bool { - return compare(v, other) == -1 -} - -// LessThanOrEqualTo checks if a version is less than or equal to another -func LessThanOrEqualTo(v, other string) bool { - return compare(v, other) <= 0 -} - -// GreaterThan checks if a version is greater than another -func GreaterThan(v, other string) bool { - return compare(v, other) == 1 -} - -// GreaterThanOrEqualTo checks if a version is greater than or equal to another -func GreaterThanOrEqualTo(v, other string) bool { - return compare(v, other) >= 0 -} - -// Equal checks if a version is equal to another -func Equal(v, other string) bool { - return compare(v, other) == 0 -} diff --git a/vendor/github.com/docker/libnetwork/ipvs/ipvs.go b/vendor/github.com/docker/libnetwork/ipvs/ipvs.go index a285e102e36d..ebcdd808c34a 100644 --- a/vendor/github.com/docker/libnetwork/ipvs/ipvs.go +++ b/vendor/github.com/docker/libnetwork/ipvs/ipvs.go @@ -116,6 +116,13 @@ func (i *Handle) DelService(s *Service) error { return i.doCmd(s, nil, ipvsCmdDelService) } +// Flush deletes all existing services in the passed +// handle. +func (i *Handle) Flush() error { + _, err := i.doCmdWithoutAttr(ipvsCmdFlush) + return err +} + // NewDestination creates a new real server in the passed ipvs // service which should already be existing in the passed handle. func (i *Handle) NewDestination(s *Service, d *Destination) error { diff --git a/vendor/github.com/docker/libnetwork/ipvs/netlink.go b/vendor/github.com/docker/libnetwork/ipvs/netlink.go index b8d33dcdc4e8..2089283d14e4 100644 --- a/vendor/github.com/docker/libnetwork/ipvs/netlink.go +++ b/vendor/github.com/docker/libnetwork/ipvs/netlink.go @@ -402,6 +402,13 @@ func (i *Handle) doGetServicesCmd(svc *Service) ([]*Service, error) { return res, nil } +// doCmdWithoutAttr a simple wrapper of netlink socket execute command +func (i *Handle) doCmdWithoutAttr(cmd uint8) ([][]byte, error) { + req := newIPVSRequest(cmd) + req.Seq = atomic.AddUint32(&i.seq, 1) + return execute(i.sock, req, 0) +} + func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error) { var d Destination diff --git a/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore index 7adca9439c55..c5203bf6e73d 100644 --- a/vendor/github.com/go-ini/ini/.gitignore +++ b/vendor/github.com/go-ini/ini/.gitignore @@ -2,3 +2,4 @@ testdata/conf_out.ini ini.sublime-project ini.sublime-workspace testdata/conf_reflect.ini +.idea diff --git a/vendor/github.com/go-ini/ini/.travis.yml b/vendor/github.com/go-ini/ini/.travis.yml new file mode 100644 index 000000000000..65c872badfac --- /dev/null +++ b/vendor/github.com/go-ini/ini/.travis.yml @@ -0,0 +1,17 @@ +sudo: false +language: go +go: + - 1.4.x + - 1.5.x + - 1.6.x + - 1.7.x + - master + +script: + - go get golang.org/x/tools/cmd/cover + - go get github.com/smartystreets/goconvey + - go test -v -cover -race + +notifications: + email: + - u@gogs.io diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile new file mode 100644 index 000000000000..ac034e5258f4 --- /dev/null +++ b/vendor/github.com/go-ini/ini/Makefile @@ -0,0 +1,12 @@ +.PHONY: build test bench vet + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -race -test.bench=. -test.benchmem + +vet: + go vet diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md index 0652fb7339c6..85947422d70e 100644 --- a/vendor/github.com/go-ini/ini/README.md +++ b/vendor/github.com/go-ini/ini/README.md @@ -1,4 +1,4 @@ -ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini) +INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://sourcegraph.com/github.com/go-ini/ini/-/badge.svg)](https://sourcegraph.com/github.com/go-ini/ini?badge) === ![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) @@ -9,7 +9,7 @@ Package ini provides INI file read and write functionality in Go. ## Feature -- Load multiple data sources(`[]byte` or file) with overwrites. +- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites. - Read with recursion values. - Read with parent-child sections. - Read with auto-increment key names. @@ -22,16 +22,32 @@ Package ini provides INI file read and write functionality in Go. ## Installation +To use a tagged revision: + go get gopkg.in/ini.v1 +To use with latest changes: + + go get github.com/go-ini/ini + +Please add `-u` flag to update in the future. + +### Testing + +If you want to test on your machine, please apply `-t` flag: + + go get -t gopkg.in/ini.v1 + +Please add `-u` flag to update in the future. + ## Getting Started ### Loading from data sources -A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error. +A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error. ```go -cfg, err := ini.Load([]byte("raw data"), "filename") +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) ``` Or start with an empty object: @@ -40,12 +56,72 @@ Or start with an empty object: cfg := ini.Empty() ``` -When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later. +When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later. ```go err := cfg.Append("other file", []byte("other raw data")) ``` +If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error. + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual. + +#### Ignore cases of key name + +When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing. + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 and sec2 are the exactly same section object +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 and key2 are the exactly same key object +key1, err := cfg.GetKey("Key") +key2, err := cfg.GetKey("KeY") +``` + +#### MySQL-like boolean key + +MySQL's configuration allows a key without value as follows: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read. + +To generate such keys in your program, you could use `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + +#### Comment + +Take care that following format will be treated as comment: + +1. Line begins with `#` or `;` +2. Words after `#` or `;` +3. Words after section name (i.e words after `[some section name]`) + +If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```. + ### Working with sections To get a section, you would need to: @@ -63,7 +139,7 @@ section, err := cfg.GetSection("") When you're pretty sure the section exists, following code could make your life easier: ```go -section := cfg.Section("") +section := cfg.Section("section name") ``` What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. @@ -117,7 +193,7 @@ names := cfg.Section("").KeyStrings() To get a clone hash of keys and corresponding values: ```go -hash := cfg.GetSection("").KeysHash() +hash := cfg.Section("").KeysHash() ``` ### Working with values @@ -155,8 +231,8 @@ To get value with types: ```go // For boolean values: -// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On -// false when value is: 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off v, err = cfg.Section("").Key("BOOL").Bool() v, err = cfg.Section("").Key("FLOAT64").Float64() v, err = cfg.Section("").Key("INT").Int() @@ -230,6 +306,16 @@ cfg.Section("advance").Key("two_lines").String() // how about continuation lines cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 ``` +Well, I hate continuation lines, how do I disable that? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +Holy crap! + Note that single quotes around values will be stripped: ```ini @@ -268,9 +354,13 @@ vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), min vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 ``` -To auto-split value into slice: +##### Auto-split values into a slice + +To use zero value of type for invalid inputs: ```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] vals = cfg.Section("").Key("STRINGS").Strings(",") vals = cfg.Section("").Key("FLOAT64S").Float64s(",") vals = cfg.Section("").Key("INTS").Ints(",") @@ -280,6 +370,32 @@ vals = cfg.Section("").Key("UINT64S").Uint64s(",") vals = cfg.Section("").Key("TIMES").Times(",") ``` +To exclude invalid values out of result slice: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +Or to return nothing but error when have invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + ### Save your configuration Finally, it's time to save your configuration to somewhere. @@ -300,6 +416,12 @@ cfg.WriteTo(writer) cfg.WriteToIndent(writer, "\t") ``` +By default, spaces are used to align "=" sign between key and values, to disable that: + +```go +ini.PrettyFormat = false +``` + ## Advanced Usage ### Recursive Values @@ -341,6 +463,27 @@ CLONE_URL = https://%(IMPORT_PATH)s cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 ``` +#### Retrieve parent keys available to a child section + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +### Unparseable Sections + +Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`: + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1> This slide has the fuel listed in the wrong units `)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1> This slide has the fuel listed in the wrong units +------ end --- */ +``` + ### Auto-increment Key Names If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. @@ -425,8 +568,8 @@ Why not? ```go type Embeded struct { Dates []time.Time `delim:"|"` - Places []string - None []int + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` } type Author struct { @@ -461,8 +604,7 @@ GPA = 2.8 [Embeded] Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -Places = HangZhou,Boston -None = +places = HangZhou,Boston ``` #### Name Mapper @@ -482,7 +624,7 @@ type Info struct { } func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) // ... cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) @@ -496,6 +638,26 @@ func main() { Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. +#### Value Mapper + +To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`. + #### Other Notes On Map/Reflect Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md index 27fdb32fee00..163432db9aff 100644 --- a/vendor/github.com/go-ini/ini/README_ZH.md +++ b/vendor/github.com/go-ini/ini/README_ZH.md @@ -2,7 +2,7 @@ ## 功能特性 -- 支持覆盖加载多个数据源(`[]byte` 或文件) +- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`) - 支持递归读取键值 - 支持读取父子分区 - 支持读取自增键名 @@ -15,16 +15,32 @@ ## 下载安装 +使用一个特定版本: + go get gopkg.in/ini.v1 +使用最新版: + + go get github.com/go-ini/ini + +如需更新请添加 `-u` 选项。 + +### 测试安装 + +如果您想要在自己的机器上运行测试,请使用 `-t` 标记: + + go get -t gopkg.in/ini.v1 + +如需更新请添加 `-u` 选项。 + ## 开始使用 ### 从数据源加载 -一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 +一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 ```go -cfg, err := ini.Load([]byte("raw data"), "filename") +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) ``` 或者从一个空白的文件开始: @@ -39,6 +55,66 @@ cfg := ini.Empty() err := cfg.Append("other file", []byte("other raw data")) ``` +当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误): + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。 + +#### 忽略键名的大小写 + +有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写: + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 和 sec2 指向同一个分区对象 +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 和 key2 指向同一个键对象 +key1, err := cfg.GetKey("Key") +key2, err := cfg.GetKey("KeY") +``` + +#### 类似 MySQL 配置中的布尔值键 + +MySQL 的配置文件中会出现没有具体值的布尔类型的键: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +这些键的值永远为 `true`,且在保存到文件时也只会输出键名。 + +如果您想要通过程序来生成此类键,则可以使用 `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + +#### 关于注释 + +下述几种情况的内容将被视为注释: + +1. 所有以 `#` 或 `;` 开头的行 +2. 所有在 `#` 或 `;` 之后的内容 +3. 分区标签后的文字 (即 `[分区名]` 之后的内容) + +如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。 + ### 操作分区(Section) 获取指定分区: @@ -56,7 +132,7 @@ section, err := cfg.GetSection("") 当您非常确定某个分区是存在的,可以使用以下简便方法: ```go -section := cfg.Section("") +section := cfg.Section("section name") ``` 如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 @@ -110,7 +186,7 @@ names := cfg.Section("").KeyStrings() 获取分区下的所有键值对的克隆: ```go -hash := cfg.GetSection("").KeysHash() +hash := cfg.Section("").KeysHash() ``` ### 操作键值(Value) @@ -148,8 +224,8 @@ yes := cfg.Section("").HasValue("test value") ```go // 布尔值的规则: -// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On -// false 当值为:0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off v, err = cfg.Section("").Key("BOOL").Bool() v, err = cfg.Section("").Key("FLOAT64").Float64() v, err = cfg.Section("").Key("INT").Int() @@ -223,6 +299,16 @@ cfg.Section("advance").Key("two_lines").String() // how about continuation lines cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 ``` +可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +哇靠给力啊! + 需要注意的是,值两侧的单引号会被自动剔除: ```ini @@ -261,9 +347,13 @@ vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), min vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 ``` -自动分割键值为切片(slice): +##### 自动分割键值到切片(slice) + +当存在无效输入时,使用零值代替: ```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] vals = cfg.Section("").Key("STRINGS").Strings(",") vals = cfg.Section("").Key("FLOAT64S").Float64s(",") vals = cfg.Section("").Key("INTS").Ints(",") @@ -273,6 +363,32 @@ vals = cfg.Section("").Key("UINT64S").Uint64s(",") vals = cfg.Section("").Key("TIMES").Times(",") ``` +从结果切片中剔除无效输入: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +当存在无效输入时,直接返回错误: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + ### 保存配置 终于到了这个时刻,是时候保存一下配置了。 @@ -293,9 +409,15 @@ cfg.WriteTo(writer) cfg.WriteToIndent(writer, "\t") ``` -### 高级用法 +默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能: + +```go +ini.PrettyFormat = false +``` + +## 高级用法 -#### 递归读取键值 +### 递归读取键值 在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 @@ -315,7 +437,7 @@ cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini ``` -#### 读取父子分区 +### 读取父子分区 您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 @@ -334,7 +456,28 @@ CLONE_URL = https://%(IMPORT_PATH)s cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 ``` -#### 读取自增键名 +#### 获取上级父分区下的所有键名 + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +### 无法解析的分区 + +如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理: + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1> This slide has the fuel listed in the wrong units `)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1> This slide has the fuel listed in the wrong units +------ end --- */ +``` + +### 读取自增键名 如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 @@ -416,8 +559,8 @@ p := &Person{ ```go type Embeded struct { Dates []time.Time `delim:"|"` - Places []string - None []int + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` } type Author struct { @@ -452,8 +595,7 @@ GPA = 2.8 [Embeded] Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -Places = HangZhou,Boston -None = +places = HangZhou,Boston ``` #### 名称映射器(Name Mapper) @@ -473,7 +615,7 @@ type Info struct{ } func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) // ... cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) @@ -487,6 +629,26 @@ func main() { 使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 +#### 值映射器(Value Mapper) + +值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。 + #### 映射/反射的其它说明 任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go new file mode 100644 index 000000000000..80afe7431584 --- /dev/null +++ b/vendor/github.com/go-ini/ini/error.go @@ -0,0 +1,32 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" +) + +type ErrDelimiterNotFound struct { + Line string +} + +func IsErrDelimiterNotFound(err error) bool { + _, ok := err.(ErrDelimiterNotFound) + return ok +} + +func (err ErrDelimiterNotFound) Error() string { + return fmt.Sprintf("key-value delimiter not found: %s", err.Line) +} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go index 8b09ad3644f9..68d73aa750c7 100644 --- a/vendor/github.com/go-ini/ini/ini.go +++ b/vendor/github.com/go-ini/ini/ini.go @@ -16,11 +16,11 @@ package ini import ( - "bufio" "bytes" "errors" "fmt" "io" + "io/ioutil" "os" "regexp" "runtime" @@ -31,25 +31,35 @@ import ( ) const ( + // Name for default section. You can use this constant or the string literal. + // In most of cases, an empty string is all you need to access the section. DEFAULT_SECTION = "DEFAULT" + // Maximum allowed depth when recursively substituing variable names. _DEPTH_VALUES = 99 - - _VERSION = "1.7.0" + _VERSION = "1.25.4" ) +// Version returns current package version literal. func Version() string { return _VERSION } var ( + // Delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows + // at package init time. LineBreak = "\n" // Variable regexp pattern: %(variable)s varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) - // Write spaces around "=" to look better. + // Indicate whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. PrettyFormat = true + + // Explicitly write DEFAULT section header + DefaultHeader = false ) func init() { @@ -67,11 +77,12 @@ func inSlice(str string, s []string) bool { return false } -// dataSource is a interface that returns file content. +// dataSource is an interface that returns object which can be read and closed. type dataSource interface { ReadCloser() (io.ReadCloser, error) } +// sourceFile represents an object that contains content on the local file system. type sourceFile struct { name string } @@ -92,618 +103,24 @@ func (rc *bytesReadCloser) Close() error { return nil } +// sourceData represents an object that contains content in memory. type sourceData struct { data []byte } func (s *sourceData) ReadCloser() (io.ReadCloser, error) { - return &bytesReadCloser{bytes.NewReader(s.data)}, nil -} - -// ____ __. -// | |/ _|____ ___.__. -// | <_/ __ < | | -// | | \ ___/\___ | -// |____|__ \___ > ____| -// \/ \/\/ - -// Key represents a key under a section. -type Key struct { - s *Section - Comment string - name string - value string - isAutoIncr bool -} - -// Name returns name of key. -func (k *Key) Name() string { - return k.name -} - -// Value returns raw value of key for performance purpose. -func (k *Key) Value() string { - return k.value -} - -// String returns string representation of value. -func (k *Key) String() string { - val := k.value - if strings.Index(val, "%") == -1 { - return val - } - - for i := 0; i < _DEPTH_VALUES; i++ { - vr := varPattern.FindString(val) - if len(vr) == 0 { - break - } - - // Take off leading '%(' and trailing ')s'. - noption := strings.TrimLeft(vr, "%(") - noption = strings.TrimRight(noption, ")s") - - // Search in the same section. - nk, err := k.s.GetKey(noption) - if err != nil { - // Search again in default section. - nk, _ = k.s.f.Section("").GetKey(noption) - } - - // Substitute by new value and take off leading '%(' and trailing ')s'. - val = strings.Replace(val, vr, nk.value, -1) - } - return val -} - -// Validate accepts a validate function which can -// return modifed result as key value. -func (k *Key) Validate(fn func(string) string) string { - return fn(k.String()) -} - -// parseBool returns the boolean value represented by the string. -// -// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On, -// 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off. -// Any other value returns an error. -func parseBool(str string) (value bool, err error) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "OFF", "off", "Off": - return false, nil - } - return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) -} - -// Bool returns bool type value. -func (k *Key) Bool() (bool, error) { - return parseBool(k.String()) -} - -// Float64 returns float64 type value. -func (k *Key) Float64() (float64, error) { - return strconv.ParseFloat(k.String(), 64) -} - -// Int returns int type value. -func (k *Key) Int() (int, error) { - return strconv.Atoi(k.String()) -} - -// Int64 returns int64 type value. -func (k *Key) Int64() (int64, error) { - return strconv.ParseInt(k.String(), 10, 64) -} - -// Uint returns uint type valued. -func (k *Key) Uint() (uint, error) { - u, e := strconv.ParseUint(k.String(), 10, 64) - return uint(u), e -} - -// Uint64 returns uint64 type value. -func (k *Key) Uint64() (uint64, error) { - return strconv.ParseUint(k.String(), 10, 64) -} - -// Duration returns time.Duration type value. -func (k *Key) Duration() (time.Duration, error) { - return time.ParseDuration(k.String()) -} - -// TimeFormat parses with given format and returns time.Time type value. -func (k *Key) TimeFormat(format string) (time.Time, error) { - return time.Parse(format, k.String()) -} - -// Time parses with RFC3339 format and returns time.Time type value. -func (k *Key) Time() (time.Time, error) { - return k.TimeFormat(time.RFC3339) -} - -// MustString returns default value if key value is empty. -func (k *Key) MustString(defaultVal string) string { - val := k.String() - if len(val) == 0 { - return defaultVal - } - return val -} - -// MustBool always returns value without error, -// it returns false if error occurs. -func (k *Key) MustBool(defaultVal ...bool) bool { - val, err := k.Bool() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustFloat64 always returns value without error, -// it returns 0.0 if error occurs. -func (k *Key) MustFloat64(defaultVal ...float64) float64 { - val, err := k.Float64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustInt always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt(defaultVal ...int) int { - val, err := k.Int() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustInt64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt64(defaultVal ...int64) int64 { - val, err := k.Int64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustUint always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint(defaultVal ...uint) uint { - val, err := k.Uint() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustUint64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint64(defaultVal ...uint64) uint64 { - val, err := k.Uint64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustDuration always returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { - val, err := k.Duration() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustTimeFormat always parses with given format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { - val, err := k.TimeFormat(format) - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustTime always parses with RFC3339 format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTime(defaultVal ...time.Time) time.Time { - return k.MustTimeFormat(time.RFC3339, defaultVal...) -} - -// In always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) In(defaultVal string, candidates []string) string { - val := k.String() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InFloat64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { - val := k.MustFloat64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt(defaultVal int, candidates []int) int { - val := k.MustInt() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { - val := k.MustInt64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint(defaultVal uint, candidates []uint) uint { - val := k.MustUint() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { - val := k.MustUint64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTimeFormat always parses with given format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { - val := k.MustTimeFormat(format) - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTime always parses with RFC3339 format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { - return k.InTimeFormat(time.RFC3339, defaultVal, candidates) -} - -// RangeFloat64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { - val := k.MustFloat64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt(defaultVal, min, max int) int { - val := k.MustInt() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { - val := k.MustInt64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeTimeFormat checks if value with given format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { - val := k.MustTimeFormat(format) - if val.Unix() < min.Unix() || val.Unix() > max.Unix() { - return defaultVal - } - return val -} - -// RangeTime checks if value with RFC3339 format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { - return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) -} - -// Strings returns list of string devide by given delimiter. -func (k *Key) Strings(delim string) []string { - str := k.String() - if len(str) == 0 { - return []string{} - } - - vals := strings.Split(str, delim) - for i := range vals { - vals[i] = strings.TrimSpace(vals[i]) - } - return vals -} - -// Float64s returns list of float64 devide by given delimiter. -func (k *Key) Float64s(delim string) []float64 { - strs := k.Strings(delim) - vals := make([]float64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseFloat(strs[i], 64) - } - return vals -} - -// Ints returns list of int devide by given delimiter. -func (k *Key) Ints(delim string) []int { - strs := k.Strings(delim) - vals := make([]int, len(strs)) - for i := range strs { - vals[i], _ = strconv.Atoi(strs[i]) - } - return vals -} - -// Int64s returns list of int64 devide by given delimiter. -func (k *Key) Int64s(delim string) []int64 { - strs := k.Strings(delim) - vals := make([]int64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseInt(strs[i], 10, 64) - } - return vals -} - -// Uints returns list of uint devide by given delimiter. -func (k *Key) Uints(delim string) []uint { - strs := k.Strings(delim) - vals := make([]uint, len(strs)) - for i := range strs { - u, _ := strconv.ParseUint(strs[i], 10, 64) - vals[i] = uint(u) - } - return vals -} - -// Uint64s returns list of uint64 devide by given delimiter. -func (k *Key) Uint64s(delim string) []uint64 { - strs := k.Strings(delim) - vals := make([]uint64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseUint(strs[i], 10, 64) - } - return vals -} - -// TimesFormat parses with given format and returns list of time.Time devide by given delimiter. -func (k *Key) TimesFormat(format, delim string) []time.Time { - strs := k.Strings(delim) - vals := make([]time.Time, len(strs)) - for i := range strs { - vals[i], _ = time.Parse(format, strs[i]) - } - return vals -} - -// Times parses with RFC3339 format and returns list of time.Time devide by given delimiter. -func (k *Key) Times(delim string) []time.Time { - return k.TimesFormat(time.RFC3339, delim) -} - -// SetValue changes key value. -func (k *Key) SetValue(v string) { - k.value = v -} - -// _________ __ .__ -// / _____/ ____ _____/ |_|__| ____ ____ -// \_____ \_/ __ \_/ ___\ __\ |/ _ \ / \ -// / \ ___/\ \___| | | ( <_> ) | \ -// /_______ /\___ >\___ >__| |__|\____/|___| / -// \/ \/ \/ \/ - -// Section represents a config section. -type Section struct { - f *File - Comment string - name string - keys map[string]*Key - keyList []string - keysHash map[string]string -} - -func newSection(f *File, name string) *Section { - return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} -} - -// Name returns name of Section. -func (s *Section) Name() string { - return s.name -} - -// NewKey creates a new key to given section. -func (s *Section) NewKey(name, val string) (*Key, error) { - if len(name) == 0 { - return nil, errors.New("error creating new key: empty key name") - } - - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - if inSlice(name, s.keyList) { - s.keys[name].value = val - return s.keys[name], nil - } - - s.keyList = append(s.keyList, name) - s.keys[name] = &Key{s, "", name, val, false} - s.keysHash[name] = val - return s.keys[name], nil -} - -// GetKey returns key in section by given name. -func (s *Section) GetKey(name string) (*Key, error) { - // FIXME: change to section level lock? - if s.f.BlockMode { - s.f.lock.RLock() - } - key := s.keys[name] - if s.f.BlockMode { - s.f.lock.RUnlock() - } - - if key == nil { - // Check if it is a child-section. - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - return sec.GetKey(name) - } else { - break - } - } - return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) - } - return key, nil + return ioutil.NopCloser(bytes.NewReader(s.data)), nil } -// HasKey returns true if section contains a key with given name. -func (s *Section) Haskey(name string) bool { - key, _ := s.GetKey(name) - return key != nil +// sourceReadCloser represents an input stream with Close method. +type sourceReadCloser struct { + reader io.ReadCloser } -// HasKey returns true if section contains given raw value. -func (s *Section) HasValue(value string) bool { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - for _, k := range s.keys { - if value == k.value { - return true - } - } - return false +func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { + return s.reader, nil } -// Key assumes named Key exists in section and returns a zero-value when not. -func (s *Section) Key(name string) *Key { - key, err := s.GetKey(name) - if err != nil { - // It's OK here because the only possible error is empty key name, - // but if it's empty, this piece of code won't be executed. - key, _ = s.NewKey(name, "") - return key - } - return key -} - -// Keys returns list of keys of section. -func (s *Section) Keys() []*Key { - keys := make([]*Key, len(s.keyList)) - for i := range s.keyList { - keys[i] = s.Key(s.keyList[i]) - } - return keys -} - -// KeyStrings returns list of key names of section. -func (s *Section) KeyStrings() []string { - list := make([]string, len(s.keyList)) - copy(list, s.keyList) - return list -} - -// KeysHash returns keys hash consisting of names and values. -func (s *Section) KeysHash() map[string]string { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - hash := map[string]string{} - for key, value := range s.keysHash { - hash[key] = value - } - return hash -} - -// DeleteKey deletes a key from section. -func (s *Section) DeleteKey(name string) { - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - for i, k := range s.keyList { - if k == name { - s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) - delete(s.keys, name) - return - } - } -} - -// ___________.__.__ -// \_ _____/|__| | ____ -// | __) | | | _/ __ \ -// | \ | | |_\ ___/ -// \___ / |__|____/\___ > -// \/ \/ - // File represents a combination of a or more INI file(s) in memory. type File struct { // Should make things safe, but sometimes doesn't matter. @@ -719,16 +136,20 @@ type File struct { // To keep data in order. sectionList []string + options LoadOptions + NameMapper + ValueMapper } // newFile initializes File object with given data sources. -func newFile(dataSources []dataSource) *File { +func newFile(dataSources []dataSource, opts LoadOptions) *File { return &File{ BlockMode: true, dataSources: dataSources, sections: make(map[string]*Section), sectionList: make([]string, 0, 10), + options: opts, } } @@ -738,14 +159,31 @@ func parseDataSource(source interface{}) (dataSource, error) { return sourceFile{s}, nil case []byte: return &sourceData{s}, nil + case io.ReadCloser: + return &sourceReadCloser{s}, nil default: return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) } } -// Load loads and parses from INI data sources. -// Arguments can be mixed of file name with string type, or raw data in []byte. -func Load(source interface{}, others ...interface{}) (_ *File, err error) { +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool + // AllowShadows indicates whether to keep track of keys with same name under same section. + AllowShadows bool + // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise + // conform to key/value pairs. Specify the names of those blocks here. + UnparseableSections []string +} + +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { sources := make([]dataSource, len(others)+1) sources[0], err = parseDataSource(source) if err != nil { @@ -757,8 +195,36 @@ func Load(source interface{}, others ...interface{}) (_ *File, err error) { return nil, err } } - f := newFile(sources) - return f, f.Reload() + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it allows have shadow keys. +func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{AllowShadows: true}, source, others...) } // Empty returns an empty file object. @@ -772,6 +238,8 @@ func Empty() *File { func (f *File) NewSection(name string) (*Section, error) { if len(name) == 0 { return nil, errors.New("error creating new section: empty section name") + } else if f.options.Insensitive && name != DEFAULT_SECTION { + name = strings.ToLower(name) } if f.BlockMode { @@ -788,6 +256,18 @@ func (f *File) NewSection(name string) (*Section, error) { return f.sections[name], nil } +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + // NewSections creates a list of sections. func (f *File) NewSections(names ...string) (err error) { for _, name := range names { @@ -802,6 +282,8 @@ func (f *File) NewSections(names ...string) (err error) { func (f *File) GetSection(name string) (*Section, error) { if len(name) == 0 { name = DEFAULT_SECTION + } else if f.options.Insensitive { + name = strings.ToLower(name) } if f.BlockMode { @@ -811,7 +293,7 @@ func (f *File) GetSection(name string) (*Section, error) { sec := f.sections[name] if sec == nil { - return nil, fmt.Errorf("error when getting section: section '%s' not exists", name) + return nil, fmt.Errorf("section '%s' does not exist", name) } return sec, nil } @@ -864,240 +346,6 @@ func (f *File) DeleteSection(name string) { } } -func cutComment(str string) string { - i := strings.Index(str, "#") - if i == -1 { - return str - } - return str[:i] -} - -func checkMultipleLines(buf *bufio.Reader, line, val, valQuote string) (string, error) { - isEnd := false - for { - next, err := buf.ReadString('\n') - if err != nil { - if err != io.EOF { - return "", err - } - isEnd = true - } - pos := strings.LastIndex(next, valQuote) - if pos > -1 { - val += next[:pos] - break - } - val += next - if isEnd { - return "", fmt.Errorf("error parsing line: missing closing key quote from '%s' to '%s'", line, next) - } - } - return val, nil -} - -func checkContinuationLines(buf *bufio.Reader, val string) (string, bool, error) { - isEnd := false - for { - valLen := len(val) - if valLen == 0 || val[valLen-1] != '\\' { - break - } - val = val[:valLen-1] - - next, err := buf.ReadString('\n') - if err != nil { - if err != io.EOF { - return "", isEnd, err - } - isEnd = true - } - - next = strings.TrimSpace(next) - if len(next) == 0 { - break - } - val += next - } - return val, isEnd, nil -} - -// parse parses data through an io.Reader. -func (f *File) parse(reader io.Reader) error { - buf := bufio.NewReader(reader) - - // Handle BOM-UTF8. - // http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding - mask, err := buf.Peek(3) - if err == nil && len(mask) >= 3 && mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { - buf.Read(mask) - } - - count := 1 - comments := "" - isEnd := false - - section, err := f.NewSection(DEFAULT_SECTION) - if err != nil { - return err - } - - for { - line, err := buf.ReadString('\n') - line = strings.TrimSpace(line) - length := len(line) - - // Check error and ignore io.EOF just for a moment. - if err != nil { - if err != io.EOF { - return fmt.Errorf("error reading next line: %v", err) - } - // The last line of file could be an empty line. - if length == 0 { - break - } - isEnd = true - } - - // Skip empty lines. - if length == 0 { - continue - } - - switch { - case line[0] == '#' || line[0] == ';': // Comments. - if len(comments) == 0 { - comments = line - } else { - comments += LineBreak + line - } - continue - case line[0] == '[' && line[length-1] == ']': // New sction. - section, err = f.NewSection(strings.TrimSpace(line[1 : length-1])) - if err != nil { - return err - } - - if len(comments) > 0 { - section.Comment = comments - comments = "" - } - // Reset counter. - count = 1 - continue - } - - // Other possibilities. - var ( - i int - keyQuote string - kname string - valQuote string - val string - ) - - // Key name surrounded by quotes. - if line[0] == '"' { - if length > 6 && line[0:3] == `"""` { - keyQuote = `"""` - } else { - keyQuote = `"` - } - } else if line[0] == '`' { - keyQuote = "`" - } - if len(keyQuote) > 0 { - qLen := len(keyQuote) - pos := strings.Index(line[qLen:], keyQuote) - if pos == -1 { - return fmt.Errorf("error parsing line: missing closing key quote: %s", line) - } - pos = pos + qLen - i = strings.IndexAny(line[pos:], "=:") - if i < 0 { - return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) - } else if i == pos { - return fmt.Errorf("error parsing line: key is empty: %s", line) - } - i = i + pos - kname = line[qLen:pos] // Just keep spaces inside quotes. - } else { - i = strings.IndexAny(line, "=:") - if i < 0 { - return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) - } else if i == 0 { - return fmt.Errorf("error parsing line: key is empty: %s", line) - } - kname = strings.TrimSpace(line[0:i]) - } - - isAutoIncr := false - // Auto increment. - if kname == "-" { - isAutoIncr = true - kname = "#" + fmt.Sprint(count) - count++ - } - - lineRight := strings.TrimSpace(line[i+1:]) - lineRightLength := len(lineRight) - firstChar := "" - if lineRightLength >= 2 { - firstChar = lineRight[0:1] - } - if firstChar == "`" { - valQuote = "`" - } else if firstChar == `"` { - if lineRightLength >= 3 && lineRight[0:3] == `"""` { - valQuote = `"""` - } else { - valQuote = `"` - } - } else if firstChar == `'` { - valQuote = `'` - } - - if len(valQuote) > 0 { - qLen := len(valQuote) - pos := strings.LastIndex(lineRight[qLen:], valQuote) - // For multiple-line value check. - if pos == -1 { - if valQuote == `"` || valQuote == `'` { - return fmt.Errorf("error parsing line: single quote does not allow multiple-line value: %s", line) - } - - val = lineRight[qLen:] + "\n" - val, err = checkMultipleLines(buf, line, val, valQuote) - if err != nil { - return err - } - } else { - val = lineRight[qLen : pos+qLen] - } - } else { - val = strings.TrimSpace(cutComment(lineRight)) - val, isEnd, err = checkContinuationLines(buf, val) - if err != nil { - return err - } - } - - k, err := section.NewKey(kname, val) - if err != nil { - return err - } - k.isAutoIncr = isAutoIncr - if len(comments) > 0 { - k.Comment = comments - comments = "" - } - - if isEnd { - break - } - } - return nil -} - func (f *File) reload(s dataSource) error { r, err := s.ReadCloser() if err != nil { @@ -1112,6 +360,11 @@ func (f *File) reload(s dataSource) error { func (f *File) Reload() (err error) { for _, s := range f.dataSources { if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + f.parse(bytes.NewBuffer(nil)) + continue + } return err } } @@ -1135,7 +388,9 @@ func (f *File) Append(source interface{}, others ...interface{}) error { return f.Reload() } -// WriteToIndent writes file content into io.Writer with given value indention. +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { equalSign := "=" if PrettyFormat { @@ -1155,17 +410,46 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { } } - if i > 0 { + if i > 0 || DefaultHeader { if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { return 0, err } } else { - // Write nothing if default section is empty. + // Write nothing if default section is empty if len(sec.keyList) == 0 { continue } } + if sec.isRawSection { + if _, err = buf.WriteString(sec.rawBody); err != nil { + return 0, err + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modifed if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.ContainsAny(kname, "\"=:") { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + KEY_LIST: for _, kname := range sec.keyList { key := sec.Key(kname) if len(key.Comment) > 0 { @@ -1185,26 +469,44 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { } switch { - case key.isAutoIncr: + case key.isAutoIncrement: kname = "-" - case strings.Contains(kname, "`") || strings.Contains(kname, `"`): - kname = `"""` + kname + `"""` - case strings.Contains(kname, `=`) || strings.Contains(kname, `:`): + case strings.ContainsAny(kname, "\"=:"): kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` } - val := key.value - // In case key value contains "\n", "`" or "\"". - if strings.Contains(val, "\n") || strings.Contains(val, "`") || strings.Contains(val, `"`) || - strings.Contains(val, "#") { - val = `"""` + val + `"""` - } - if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil { - return 0, err + for _, val := range key.ValueWithShadows() { + if _, err = buf.WriteString(kname); err != nil { + return 0, err + } + + if key.isBooleanType { + if kname != sec.keyList[len(sec.keyList)-1] { + buf.WriteString(LineBreak) + } + continue KEY_LIST + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { + return 0, err + } } } - // Put a line between sections. + // Put a line between sections if _, err = buf.WriteString(LineBreak); err != nil { return 0, err } diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go new file mode 100644 index 000000000000..852696f4c4fa --- /dev/null +++ b/vendor/github.com/go-ini/ini/key.go @@ -0,0 +1,703 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + name string + value string + isAutoIncrement bool + isBooleanType bool + + isShadow bool + shadows []*Key + + Comment string +} + +// newKey simply return a key object with given values. +func newKey(s *Section, name, val string) *Key { + return &Key{ + s: s, + name: name, + value: val, + } +} + +func (k *Key) addShadow(val string) error { + if k.isShadow { + return errors.New("cannot add shadow to another shadow key") + } else if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add shadow to auto-increment or boolean key") + } + + shadow := newKey(k.s, k.name, val) + shadow.isShadow = true + k.shadows = append(k.shadows, shadow) + return nil +} + +// AddShadow adds a new shadow key to itself. +func (k *Key) AddShadow(val string) error { + if !k.s.f.options.AllowShadows { + return errors.New("shadow key is not allowed") + } + return k.addShadow(val) +} + +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv +type ValueMapper func(string) string + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// ValueWithShadows returns raw values of key and its shadows if any. +func (k *Key) ValueWithShadows() []string { + if len(k.shadows) == 0 { + return []string{k.value} + } + vals := make([]string, len(k.shadows)+1) + vals[0] = k.value + for i := range k.shadows { + vals[i+1] = k.shadows[i].value + } + return vals +} + +// transformValue takes a raw value and transforms to its final string. +func (k *Key) transformValue(val string) string { + if k.s.f.ValueMapper != nil { + val = k.s.f.ValueMapper(val) + } + + // Fail-fast if no indicate char found for recursive value + if !strings.Contains(val, "%") { + return val + } + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// String returns string representation of value. +func (k *Key) String() string { + return k.transformValue(k.value) +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + // vals[i] = k.transformValue(strings.TrimSpace(vals[i])) + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// StringsWithShadows returns list of string divided by given delimiter. +// Shadows will also be appended if any. +func (k *Key) StringsWithShadows(delim string) []string { + vals := k.ValueWithShadows() + results := make([]string, 0, len(vals)*2) + for i := range vals { + if len(vals) == 0 { + continue + } + + results = append(results, strings.Split(vals[i], delim)...) + } + + for i := range results { + results[i] = k.transformValue(strings.TrimSpace(results[i])) + } + return results +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.getFloat64s(delim, true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.getUints(delim, true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.getUint64s(delim, true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.getTimesFormat(format, delim, true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.getFloat64s(delim, false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.getUints(delim, false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.getUint64s(delim, false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.getTimesFormat(format, delim, false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.getFloat64s(delim, false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.parseInts(k.Strings(delim), false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.parseInt64s(k.Strings(delim), false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.getUints(delim, false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.getUint64s(delim, false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.getTimesFormat(format, delim, false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// getFloat64s returns list of float64 divided by given delimiter. +func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) { + strs := k.Strings(delim) + vals := make([]float64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseFloat(str, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInts transforms strings to ints. +func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { + vals := make([]int, 0, len(strs)) + for _, str := range strs { + val, err := strconv.Atoi(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInt64s transforms strings to int64s. +func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { + vals := make([]int64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getUints returns list of uint divided by given delimiter. +func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) { + strs := k.Strings(delim) + vals := make([]uint, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 0) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, uint(val)) + } + } + return vals, nil +} + +// getUint64s returns list of uint64 divided by given delimiter. +func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + strs := k.Strings(delim) + vals := make([]uint64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + strs := k.Strings(delim) + vals := make([]time.Time, 0, len(strs)) + for _, str := range strs { + val, err := time.Parse(format, str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go new file mode 100644 index 000000000000..673ef80ca267 --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser.go @@ -0,0 +1,358 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "unicode" +) + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(2) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 2 { + return nil + } + + switch { + case mask[0] == 254 && mask[1] == 255: + fallthrough + case mask[0] == 255 && mask[1] == 254: + p.buf.Read(mask) + case mask[0] == 239 && mask[1] == 187: + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } + if mask[2] == 191 { + p.buf.Read(mask) + } + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], "=:") + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, "=:") + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) { + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + return line[startIdx : pos+startIdx], nil + } + + // Won't be able to reach here if value only contains whitespace. + line = strings.TrimSpace(line) + + // Check continuation lines when desired. + if !ignoreContinuation && line[len(line)-1] == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + i := strings.IndexAny(line, "#;") + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + // Trim single quotes + if hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"') { + line = line[1 : len(line)-1] + } + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + section, _ := f.NewSection(DEFAULT_SECTION) + + var line []byte + var inUnparseableSection bool + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + // TODO(unknwon): use LastIndexByte when stop supporting Go1.4 + closeIdx := bytes.LastIndex(line, []byte("]")) + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + + inUnparseableSection = false + for i := range f.options.UnparseableSections { + if f.options.UnparseableSections[i] == name || + (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { + inUnparseableSection = true + continue + } + } + continue + } + + if inUnparseableSection { + section.isRawSection = true + section.rawBody += string(line) + continue + } + + kname, offset, err := readKeyName(line) + if err != nil { + // Treat as boolean key when desired, and whole line is key name. + if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { + kname, err := p.readValue(line, f.options.IgnoreContinuation) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + value, err := p.readValue(line[offset:], f.options.IgnoreContinuation) + if err != nil { + return err + } + + key, err := section.NewKey(kname, value) + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + } + return nil +} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go new file mode 100644 index 000000000000..c9fa27e9ca7c --- /dev/null +++ b/vendor/github.com/go-ini/ini/section.go @@ -0,0 +1,234 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string + + isRawSection bool + rawBody string +} + +func newSection(f *File, name string) *Section { + return &Section{ + f: f, + name: name, + keys: make(map[string]*Key), + keyList: make([]string, 0, 10), + keysHash: make(map[string]string), + } +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// Body returns rawBody of Section if the section was marked as unparseable. +// It still follows the other rules of the INI format surrounding leading/trailing whitespace. +func (s *Section) Body() string { + return strings.TrimSpace(s.rawBody) +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + if s.f.options.AllowShadows { + if err := s.keys[name].addShadow(val); err != nil { + return nil, err + } + } else { + s.keys[name].value = val + } + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = newKey(s, name, val) + s.keysHash[name] = val + return s.keys[name], nil +} + +// NewBooleanKey creates a new boolean type key to given section. +func (s *Section) NewBooleanKey(name string) (*Key, error) { + key, err := s.NewKey(name, "true") + if err != nil { + return nil, err + } + + key.isBooleanType = true + return key, nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Haskey is a backwards-compatible name for HasKey. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go index c11843710103..509c682fa6df 100644 --- a/vendor/github.com/go-ini/ini/struct.go +++ b/vendor/github.com/go-ini/ini/struct.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "reflect" + "strings" "time" "unicode" ) @@ -76,10 +77,69 @@ func parseDelim(actual string) string { var reflectTime = reflect.TypeOf(time.Now()).Kind() +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { + var strs []string + if allowShadow { + strs = key.StringsWithShadows(delim) + } else { + strs = key.Strings(delim) + } + + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals, _ = key.parseInts(strs, true, false) + case reflect.Int64: + vals, _ = key.parseInt64s(strs, true, false) + case reflect.Uint: + vals = key.Uints(delim) + case reflect.Uint64: + vals = key.Uint64s(delim) + case reflect.Float64: + vals = key.Float64s(delim) + case reflectTime: + vals = key.Times(delim) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + // setWithProperType sets proper value to field based on its type, // but it does not return error for failing parsing, // because we want to use default value that is already assigned to strcut. -func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { switch t.Kind() { case reflect.String: if len(key.String()) == 0 { @@ -94,20 +154,22 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri field.SetBool(boolVal) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: durationVal, err := key.Duration() - if err == nil { + // Skip zero value + if err == nil && int(durationVal) > 0 { field.Set(reflect.ValueOf(durationVal)) return nil } intVal, err := key.Int64() - if err != nil { + if err != nil || intVal == 0 { return nil } field.SetInt(intVal) // byte is an alias for uint8, so supporting uint8 breaks support for byte case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: durationVal, err := key.Duration() - if err == nil { + // Skip zero value + if err == nil && int(durationVal) > 0 { field.Set(reflect.ValueOf(durationVal)) return nil } @@ -118,7 +180,7 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri } field.SetUint(uintVal) - case reflect.Float64: + case reflect.Float32, reflect.Float64: floatVal, err := key.Float64() if err != nil { return nil @@ -131,35 +193,25 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri } field.Set(reflect.ValueOf(timeVal)) case reflect.Slice: - vals := key.Strings(delim) - numVals := len(vals) - if numVals == 0 { - return nil - } - - sliceOf := field.Type().Elem().Kind() - - var times []time.Time - if sliceOf == reflectTime { - times = key.Times(delim) - } - - slice := reflect.MakeSlice(field.Type(), numVals, numVals) - for i := 0; i < numVals; i++ { - switch sliceOf { - case reflectTime: - slice.Index(i).Set(reflect.ValueOf(times[i])) - default: - slice.Index(i).Set(reflect.ValueOf(vals[i])) - } - } - field.Set(slice) + return setSliceWithProperType(key, field, delim, allowShadow) default: return fmt.Errorf("unsupported type '%s'", t) } return nil } +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) { + opts := strings.SplitN(tag, ",", 3) + rawName = opts[0] + if len(opts) > 1 { + omitEmpty = opts[1] == "omitempty" + } + if len(opts) > 2 { + allowShadow = opts[2] == "allowshadow" + } + return rawName, omitEmpty, allowShadow +} + func (s *Section) mapTo(val reflect.Value) error { if val.Kind() == reflect.Ptr { val = val.Elem() @@ -175,7 +227,8 @@ func (s *Section) mapTo(val reflect.Value) error { continue } - fieldName := s.parseFieldName(tpField.Name, tag) + rawName, _, allowShadow := parseTagOptions(tag) + fieldName := s.parseFieldName(tpField.Name, rawName) if len(fieldName) == 0 || !field.CanSet() { continue } @@ -196,7 +249,8 @@ func (s *Section) mapTo(val reflect.Value) error { } if key, err := s.GetKey(fieldName); err == nil { - if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + delim := parseDelim(tpField.Tag.Get("delim")) + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { return fmt.Errorf("error mapping field(%s): %v", fieldName, err) } } @@ -238,40 +292,81 @@ func MapTo(v, source interface{}, others ...interface{}) error { return MapToWithMapper(v, nil, source, others...) } -// reflectWithProperType does the opposite thing with setWithProperType. +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + sliceOf := field.Type().Elem().Kind() + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { switch t.Kind() { case reflect.String: key.SetValue(field.String()) - case reflect.Bool, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float64, - reflectTime: - key.SetValue(fmt.Sprint(field)) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) case reflect.Slice: - vals := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - - var buf bytes.Buffer - isTime := fmt.Sprint(field.Type()) == "[]time.Time" - for i := 0; i < field.Len(); i++ { - if isTime { - buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339)) - } else { - buf.WriteString(fmt.Sprint(vals.Index(i))) - } - buf.WriteString(delim) - } - key.SetValue(buf.String()[:buf.Len()-1]) + return reflectSliceWithProperType(key, field, delim) default: return fmt.Errorf("unsupported type '%s'", t) } return nil } +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflectTime: + return v.Interface().(time.Time).IsZero() + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + func (s *Section) reflectFrom(val reflect.Value) error { if val.Kind() == reflect.Ptr { val = val.Elem() @@ -287,13 +382,18 @@ func (s *Section) reflectFrom(val reflect.Value) error { continue } - fieldName := s.parseFieldName(tpField.Name, tag) + opts := strings.SplitN(tag, ",", 2) + if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { + continue + } + + fieldName := s.parseFieldName(tpField.Name, opts[0]) if len(fieldName) == 0 || !field.CanSet() { continue } if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || - (tpField.Type.Kind() == reflect.Struct) { + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { // Note: The only error here is section doesn't exist. sec, err := s.f.GetSection(fieldName) if err != nil { @@ -301,7 +401,7 @@ func (s *Section) reflectFrom(val reflect.Value) error { sec, _ = s.f.NewSection(fieldName) } if err = sec.reflectFrom(field); err != nil { - return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) } continue } @@ -312,7 +412,7 @@ func (s *Section) reflectFrom(val reflect.Value) error { key, _ = s.NewKey(fieldName, "") } if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { - return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) } } diff --git a/vendor/github.com/go-openapi/loads/.editorconfig b/vendor/github.com/go-openapi/loads/.editorconfig new file mode 100644 index 000000000000..3152da69a5d7 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/loads/.pullapprove.yml b/vendor/github.com/go-openapi/loads/.pullapprove.yml deleted file mode 100644 index 5ec183e22442..000000000000 --- a/vendor/github.com/go-openapi/loads/.pullapprove.yml +++ /dev/null @@ -1,13 +0,0 @@ -approve_by_comment: true -approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)' -reject_regex: ^[Rr]ejected -reset_on_push: false -reviewers: - members: - - casualjim - - chancez - - frapposelli - - vburenin - - pytlesk4 - name: pullapprove - required: 1 diff --git a/vendor/github.com/go-openapi/loads/.travis.yml b/vendor/github.com/go-openapi/loads/.travis.yml new file mode 100644 index 000000000000..b0d357e659f9 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.travis.yml @@ -0,0 +1,16 @@ +language: go +go: +- 1.8 +install: +- go get -u github.com/stretchr/testify +- go get -u github.com/go-openapi/analysis +- go get -u github.com/go-openapi/spec +- go get -u github.com/go-openapi/swag +- go get -u gopkg.in/yaml.v2 +script: +- ./hack/coverage +after_success: +- bash <(curl -s https://codecov.io/bash) +notifications: + slack: + secure: OxkPwVp35qBTUilgWC8xykSj+sGMcj0h8IIOKD+Rflx2schZVlFfdYdyVBM+s9OqeOfvtuvnR9v1Ye2rPKAvcjWdC4LpRGUsgmItZaI6Um8Aj6+K9udCw5qrtZVfOVmRu8LieH//XznWWKdOultUuniW0MLqw5+II87Gd00RWbCGi0hk0PykHe7uK+PDA2BEbqyZ2WKKYCvfB3j+0nrFOHScXqnh0V05l2E83J4+Sgy1fsPy+1WdX58ZlNBG333ibaC1FS79XvKSmTgKRkx3+YBo97u6ZtUmJa5WZjf2OdLG3KIckGWAv6R5xgxeU31N0Ng8L332w/Edpp2O/M2bZwdnKJ8hJQikXIAQbICbr+lTDzsoNzMdEIYcHpJ5hjPbiUl3Bmd+Jnsjf5McgAZDiWIfpCKZ29tPCEkVwRsOCqkyPRMNMzHHmoja495P5jR+ODS7+J8RFg5xgcnOgpP9D4Wlhztlf5WyZMpkLxTUD+bZq2SRf50HfHFXTkfq22zPl3d1eq0yrLwh/Z/fWKkfb6SyysROL8y6s8u3dpFX1YHSg0BR6i913h4aoZw9B2BG27cafLLTwKYsp2dFo1PWl4O6u9giFJIeqwloZHLKKrwh0cBFhB7RH0I58asxkZpCH6uWjJierahmHe7iS+E6i+9oCHkOZ59hmCYNimIs3hM= diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md index 9d5c899970ef..6dbb8342e16f 100644 --- a/vendor/github.com/go-openapi/loads/README.md +++ b/vendor/github.com/go-openapi/loads/README.md @@ -1,4 +1,4 @@ -# Loads OAI specs [![Build Status](https://ci.vmware.run/api/badges/go-openapi/loads/status.svg)](https://ci.vmware.run/go-openapi/loads) [![Coverage](https://coverage.vmware.run/badges/go-openapi/loads/coverage.svg)](https://coverage.vmware.run/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# Loads OAI specs [![Build Status](https://travis-ci.org/go-openapi/loads.svg?branch=master)](https://travis-ci.org/go-openapi/loads) [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go index ff1ee1c9faa4..6d967389b11a 100644 --- a/vendor/github.com/go-openapi/loads/spec.go +++ b/vendor/github.com/go-openapi/loads/spec.go @@ -15,10 +15,13 @@ package loads import ( + "bytes" "encoding/json" "fmt" "net/url" + "path/filepath" + "github.com/go-openapi/analysis" "github.com/go-openapi/spec" "github.com/go-openapi/swag" @@ -39,7 +42,17 @@ type DocLoader func(string) (json.RawMessage, error) // DocMatcher represents a predicate to check if a loader matches type DocMatcher func(string) bool -var loaders = &loader{Match: func(_ string) bool { return true }, Fn: JSONDoc} +var ( + loaders *loader + defaultLoader *loader +) + +func init() { + defaultLoader = &loader{Match: func(_ string) bool { return true }, Fn: JSONDoc} + loaders = defaultLoader + spec.PathLoader = loaders.Fn + AddLoader(swag.YAMLMatcher, swag.YAMLDoc) +} // AddLoader for a document func AddLoader(predicate DocMatcher, load DocLoader) { @@ -49,7 +62,7 @@ func AddLoader(predicate DocMatcher, load DocLoader) { Fn: load, Next: prev, } - + spec.PathLoader = loaders.Fn } type loader struct { @@ -71,11 +84,12 @@ func JSONSpec(path string) (*Document, error) { // Document represents a swagger spec document type Document struct { // specAnalyzer - Analyzer *analysis.Spec - spec *spec.Swagger - origSpec *spec.Swagger - schema *spec.Schema - raw json.RawMessage + Analyzer *analysis.Spec + spec *spec.Swagger + specFilePath string + origSpec *spec.Swagger + schema *spec.Schema + raw json.RawMessage } // Spec loads a new spec document @@ -84,23 +98,39 @@ func Spec(path string) (*Document, error) { if err != nil { return nil, err } + var lastErr error for l := loaders.Next; l != nil; l = l.Next { if loaders.Match(specURL.Path) { b, err2 := loaders.Fn(path) if err2 != nil { - return nil, err2 + lastErr = err2 + continue + } + doc, err := Analyzed(b, "") + if err != nil { + return nil, err } - return Analyzed(b, "") + if doc != nil { + doc.specFilePath = path + } + return doc, nil } } - b, err := loaders.Fn(path) + if lastErr != nil { + return nil, lastErr + } + b, err := defaultLoader.Fn(path) if err != nil { return nil, err } - return Analyzed(b, "") -} -var swag20Schema = spec.MustLoadSwagger20Schema() + document, err := Analyzed(b, "") + if document != nil { + document.specFilePath = path + } + + return document, err +} // Analyzed creates a new analyzed spec document func Analyzed(data json.RawMessage, version string) (*Document, error) { @@ -111,40 +141,66 @@ func Analyzed(data json.RawMessage, version string) (*Document, error) { return nil, fmt.Errorf("spec version %q is not supported", version) } + raw := data + trimmed := bytes.TrimSpace(data) + if len(trimmed) > 0 { + if trimmed[0] != '{' && trimmed[0] != '[' { + yml, err := swag.BytesToYAMLDoc(trimmed) + if err != nil { + return nil, fmt.Errorf("analyzed: %v", err) + } + d, err := swag.YAMLToJSON(yml) + if err != nil { + return nil, fmt.Errorf("analyzed: %v", err) + } + raw = d + } + } + swspec := new(spec.Swagger) - if err := json.Unmarshal(data, swspec); err != nil { + if err := json.Unmarshal(raw, swspec); err != nil { return nil, err } origsqspec := new(spec.Swagger) - if err := json.Unmarshal(data, origsqspec); err != nil { + if err := json.Unmarshal(raw, origsqspec); err != nil { return nil, err } d := &Document{ Analyzer: analysis.New(swspec), - schema: swag20Schema, + schema: spec.MustLoadSwagger20Schema(), spec: swspec, - raw: data, + raw: raw, origSpec: origsqspec, } return d, nil } // Expanded expands the ref fields in the spec document and returns a new spec document -func (d *Document) Expanded() (*Document, error) { +func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { swspec := new(spec.Swagger) if err := json.Unmarshal(d.raw, swspec); err != nil { return nil, err } - if err := spec.ExpandSpec(swspec); err != nil { + + var expandOptions *spec.ExpandOptions + if len(options) > 0 { + expandOptions = options[1] + } else { + expandOptions = &spec.ExpandOptions{ + RelativeBase: filepath.Dir(d.specFilePath), + } + } + + if err := spec.ExpandSpec(swspec, expandOptions); err != nil { return nil, err } dd := &Document{ Analyzer: analysis.New(swspec), spec: swspec, - schema: swag20Schema, + schema: spec.MustLoadSwagger20Schema(), raw: d.raw, origSpec: d.origSpec, } @@ -201,3 +257,8 @@ func (d *Document) Pristine() *Document { dd, _ := Analyzed(d.Raw(), d.Version()) return dd } + +// SpecFilePath returns the file path of the spec if one is defined +func (d *Document) SpecFilePath() string { + return d.specFilePath +} diff --git a/vendor/github.com/go-openapi/spec/.drone.sec b/vendor/github.com/go-openapi/spec/.drone.sec deleted file mode 100644 index 60c5ebe38c08..000000000000 --- a/vendor/github.com/go-openapi/spec/.drone.sec +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.Epk8dDFH8U1RPYIPDpajZO26L5zFJ1wnQNGWxVHHo5cXrWF148kENoZzh35FT9cAxxPS_4CeVVpf59EgvCc8bem1puuj0gBZptn-lYa7iXZdI-ESN2Te7nF5VbZfwbnI62nEikYGyxz-ozL_IFuMl-qWek4iLerF8Z_xh0MZOJ_w8Nog7qb2WQov72d997TJv5ZKjWcRYPbnsAy1q60-Cqxq3a6enhcSPXqpK46nYSXGKfHvognWBJ_pxwkEqIBPN6hE4EfNtJjMf2LFKEdYy02nbHz78d-2YZ8wIUSJ-IWIwn3GTzObdGqRed20Qf3JtWTsOespmexDrLSeo3HW6A.7XaHW-Y1jjRAWt_W.S1Adut62RLOYZc-lN02M0MGczEucch3zIr4J1UPBPnZooWzntiE5UaUz0UdhjHVszQE5hTfG-yocKD1rDQGER6qrLtnJVrCm9J3n4lHglM-xOz1eZln1XKrWcAgZnAKaKSzuAa5scPG4iTHW6RwbWi_PWm04tBJ1yazdjaVo3uvuhflwvU9if7uMPMtscrDesbBVvpG89xmeudiFjX-wjsV5oGBIjz6ukEBAMKzNDMqikNoG4SnGenpxUpjUjMkDXxiC3BC8oL2_myeIfFeEOF066DqEN3CLkqBVO25zdpWAF4Ou2jKv--mgGEb_E1aMgiSoAVBnybene0TKn2IJ8rtkyRdmWlLIRKZdDT3v775C1FPK6-tYzS7NVg9nnuvpta5PhzYNkqI1Ie74Sl0I-RFClhsdx9dLDhoFEKCx2etC4UDX9jhj2u0Y2MrL76dRGE9kEV1hL1fh6HMvS4ZAAWw3Qce4skCjcL-2YyIOHzKjgLGkZsR5cTUQwCJyacVkdHUOUKFdDGZaUzWkFyeZ1oyrlG2d52svaplpU5-vCOVbWkqUN9rOALGPTC51Ur0L7DFx29aDImhaxZqTe2t9mcdqY7VLcO3JgUiD3JKsEet7s2EDeN44MqITv9KBS8wqJW4.sRv4ov0wB0IxTHw90kJy-A \ No newline at end of file diff --git a/vendor/github.com/go-openapi/spec/.drone.yml b/vendor/github.com/go-openapi/spec/.drone.yml deleted file mode 100644 index 6d04427372d1..000000000000 --- a/vendor/github.com/go-openapi/spec/.drone.yml +++ /dev/null @@ -1,35 +0,0 @@ -clone: - path: github.com/go-openapi/spec - -matrix: - GO_VERSION: - - "1.6" - -build: - integration: - image: golang:$$GO_VERSION - pull: true - commands: - - go get -u github.com/stretchr/testify/assert - - go get -u gopkg.in/yaml.v2 - - go get -u github.com/go-openapi/swag - - go get -u github.com/go-openapi/jsonpointer - - go get -u github.com/go-openapi/jsonreference - - go test -race - - go test -v -cover -coverprofile=coverage.out -covermode=count ./... - -notify: - slack: - channel: bots - webhook_url: $$SLACK_URL - username: drone - -publish: - coverage: - server: https://coverage.vmware.run - token: $$GITHUB_TOKEN - # threshold: 70 - # must_increase: true - when: - matrix: - GO_VERSION: "1.6" diff --git a/vendor/github.com/go-openapi/spec/.editorconfig b/vendor/github.com/go-openapi/spec/.editorconfig new file mode 100644 index 000000000000..3152da69a5d7 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/spec/.pullapprove.yml b/vendor/github.com/go-openapi/spec/.pullapprove.yml deleted file mode 100644 index 5ec183e22442..000000000000 --- a/vendor/github.com/go-openapi/spec/.pullapprove.yml +++ /dev/null @@ -1,13 +0,0 @@ -approve_by_comment: true -approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)' -reject_regex: ^[Rr]ejected -reset_on_push: false -reviewers: - members: - - casualjim - - chancez - - frapposelli - - vburenin - - pytlesk4 - name: pullapprove - required: 1 diff --git a/vendor/github.com/go-openapi/spec/.travis.yml b/vendor/github.com/go-openapi/spec/.travis.yml new file mode 100644 index 000000000000..ea80ef2a71e0 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.travis.yml @@ -0,0 +1,16 @@ +language: go +go: +- 1.7 +install: +- go get -u github.com/stretchr/testify +- go get -u github.com/go-openapi/swag +- go get -u gopkg.in/yaml.v2 +- go get -u github.com/go-openapi/jsonpointer +- go get -u github.com/go-openapi/jsonreference +script: +- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... +after_success: +- bash <(curl -s https://codecov.io/bash) +notifications: + slack: + secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md index 4b2af124acd0..1d1622082fe5 100644 --- a/vendor/github.com/go-openapi/spec/README.md +++ b/vendor/github.com/go-openapi/spec/README.md @@ -1,5 +1,5 @@ -# OAI object model [![Build Status](https://ci.vmware.run/api/badges/go-openapi/spec/status.svg)](https://ci.vmware.run/go-openapi/spec) [![Coverage](https://coverage.vmware.run/badges/go-openapi/spec/coverage.svg)](https://coverage.vmware.run/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# OAI object model [![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec) -The object model for OpenAPI specification documents \ No newline at end of file +The object model for OpenAPI specification documents diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go index 294cbccf7293..9afb5df194ed 100644 --- a/vendor/github.com/go-openapi/spec/bindata.go +++ b/vendor/github.com/go-openapi/spec/bindata.go @@ -1,17 +1,3 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - // Code generated by go-bindata. // sources: // schemas/jsonschema-draft-04.json @@ -83,7 +69,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xcc\x57\x3b\x6f\xdb\x30\x10\xde\xfd\x2b\x04\xa5\x63\x52\xb9\x40\xa7\x6c\x45\xbb\x18\x68\xd1\x0c\xdd\x0c\x0f\xb4\x75\xb2\x19\x50\xa4\x42\x51\x85\x0d\x43\xff\xbd\xa4\xa8\x07\x29\x91\x92\x2d\xbb\x48\xb4\xc4\xe1\xbd\xbe\x3b\xde\x8b\xe7\x45\x20\xbf\x10\xc7\xe1\x73\x10\x1e\x84\xc8\x9e\xa3\xe8\x35\x67\xf4\x29\xdf\x1d\x20\x45\x9f\x19\xdf\x47\x31\x47\x89\x78\x5a\x7e\x8d\xf4\xd9\x43\xf8\xa8\x85\x3e\xe9\xff\x67\x48\xc6\x90\xef\x38\xce\x04\x66\x54\x49\x7f\x67\x1c\x02\xcd\x12\xa4\x20\x50\xad\xa2\xe3\x4e\x30\xc5\x8a\x39\x97\xdc\x1a\x71\x45\xd0\x6c\xdf\x38\x47\x27\x8b\x50\x11\xc5\x29\x03\xa5\x1c\x55\xe4\x47\x9b\x98\x62\xba\x12\x90\x2a\x7d\x5f\x7a\x24\x5c\x9f\x9f\xa5\x83\x1c\x12\xa5\xe2\x21\x0c\xca\x96\xa9\xec\xf8\xc3\x8c\xe5\x12\xd7\x5f\x58\x51\x01\x7b\xe0\x7e\x10\xb8\x66\x18\xc2\xc0\x69\x91\x4a\x8e\xe5\x25\xfa\x7f\x40\x82\x0a\x22\x96\x43\x3b\x88\x90\xdf\x0a\xea\xda\x82\x1d\x19\x91\x8b\xfa\x58\xa5\x21\xc5\x1c\x6b\x9d\x0a\x42\x50\x06\x1b\x27\x8c\x1c\xa7\x19\x81\x3f\xd2\x97\x7c\x68\x1a\x68\xe5\xc0\xba\x8d\x74\x10\x6e\x19\x23\x80\xa8\xfa\xd9\x3a\x1e\x84\xb4\x20\x44\xff\x4d\xb7\xfa\x84\x6d\x5f\x61\x27\xd4\xaf\x5c\x70\x4c\xf7\xa1\xcf\x7e\x45\x9d\x73\xcf\xc6\x65\x36\x7c\x8d\xa9\xf2\xf2\x94\x28\x28\x7e\x2b\xa0\xa1\x0a\x5e\x40\x07\x73\x61\x80\x6d\x6d\x34\x8e\xe9\xd3\x8c\xb3\x0c\xb8\xc0\xbd\xe8\xe9\xa2\xf3\x78\x53\xa3\xec\x01\x49\x18\x4f\x91\xba\xab\xb0\xe0\x38\x74\xc6\xaa\x2b\xca\x7b\x6b\x16\x58\x10\x98\xd4\xeb\x14\xb5\xeb\x7d\x96\x82\x26\x4b\xcf\xe6\x71\x2a\xcf\xb0\x4c\xcd\x2a\xf7\x3d\x6a\x9b\x74\xf3\x56\x5e\x8f\x02\xc7\x1d\x29\x72\x59\x28\xbf\x5a\x16\xfb\xc6\x4d\xfb\xe8\x58\xb3\x8c\x1b\x77\x0a\x77\x86\xa6\xb4\xb4\xf5\x64\x93\xbb\xa0\x24\x88\xe4\x1e\x84\xad\x13\x37\x21\x9c\xd2\x72\x0b\x42\x74\xfc\x09\x74\x2f\x0e\xbd\x9e\x3b\xd5\xbc\x2c\x1f\xaf\xd6\xd0\xb6\x52\xbb\xdf\x22\x21\x80\x4f\xe7\xa8\xb7\x78\xb8\xd4\x7d\x74\x07\x13\xc5\x71\x05\x05\x91\xa6\x91\xf4\x7b\x38\x3d\xe9\x1e\x6e\x1d\xab\xef\x3c\x0c\x74\xbf\x7d\xd5\x6c\xce\x89\xa5\xbe\x8d\xf7\x66\xce\xee\xd1\x86\x67\x80\x34\xad\x8f\xc3\xb3\xae\xc6\x1c\xe3\xb7\xc2\x96\xd9\xb4\x72\x0c\xf0\xab\x92\xe9\x5a\x05\xee\x5c\xb2\x87\xc6\x7f\xa9\x9b\x17\x6b\xb0\xcc\x75\x77\x96\x16\xb7\xcf\x1c\xde\x0a\xcc\x21\x1e\x53\x64\x0e\x73\x4f\x81\xbc\xb8\x07\xa6\xe6\xfa\x50\x55\xe2\x5b\x4d\xad\x4b\xb6\xb6\x81\x49\x77\xc7\xca\x68\x1a\x90\x67\xd7\x78\x3f\x3c\xba\xa3\x8e\xdd\xe8\x7b\xc0\x8a\x21\x03\x1a\x03\xdd\xdd\x11\xd1\x20\xd3\x46\x72\x55\x7d\x93\x0d\xb3\xcf\x34\x52\x46\x03\xd9\x8d\x75\xe2\x0e\x42\xbd\xb9\xdf\xe9\xdd\x34\xb6\x24\x9b\x5b\xa4\x56\x3f\x6b\xac\xd8\x01\x30\x1e\x25\xce\x3a\x77\xc6\x73\xd4\xbd\x96\xc9\xf5\x06\xbc\xca\xf8\x44\xb0\x2e\x09\x5a\xf3\xf5\x3a\x94\x7b\xb7\xa8\x9f\x7f\x17\x8e\x58\x53\xb2\x0e\xfc\xf5\x92\x8c\xc2\x4c\x49\xca\x84\xe7\x7d\x5d\xb6\x2f\x7e\x4f\x79\xba\x96\xe6\x75\xb7\x87\x9b\x0d\xdc\xb5\xbd\xae\xbb\x85\xb8\x8e\x64\x67\xd1\xe8\x18\xe5\xe2\x5f\x00\x00\x00\xff\xff\x4e\x9b\x8d\xdf\x17\x11\x00\x00") +var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xc4\x57\x3b\x6f\xdb\x3e\x10\xdf\xf3\x29\x08\x26\x63\xf2\x97\xff\x40\x27\x6f\x45\xbb\x18\x68\xd1\x0c\xdd\x0c\x0f\xb4\x75\xb2\x19\x50\xa4\x42\x51\x81\x0d\x43\xdf\xbd\xa0\xa8\x07\x29\x91\x92\x2d\xbb\x8d\x97\x28\xbc\xd7\xef\x8e\xf7\xe2\xf9\x01\x21\x84\x30\x8d\xf1\x12\xe1\x83\x52\xd9\x32\x8a\xde\x72\xc1\x5f\xf2\xdd\x01\x52\xf2\x9f\x90\xfb\x28\x96\x24\x51\x2f\x8b\x2f\x91\x39\x7b\xc4\xcf\x46\xe8\xc9\xfc\x3f\x43\x32\x86\x7c\x27\x69\xa6\xa8\xe0\x5a\xfa\x9b\x90\x80\x0c\x0b\x4a\x41\x91\x5a\x45\xc7\x9d\x50\x4e\x35\x73\x8e\x97\xc8\x20\xae\x08\x86\xed\xab\x94\xe4\xe4\x10\x2a\xa2\x3a\x65\xa0\x95\x93\x8a\xfc\xec\x12\x53\xca\x57\x0a\x52\xad\xef\xff\x1e\x89\xd6\xe7\x67\x84\x9f\x24\x24\x5a\xc5\x23\x46\x65\xcb\x54\x76\xfc\x38\x13\x39\x55\xf4\x03\x56\x5c\xc1\x1e\x64\x18\x04\xad\x19\x86\x30\x68\x5a\xa4\x78\x89\x16\x97\xe8\xff\x0e\x09\x29\x98\x5a\x0c\xed\x10\xc6\x7e\x69\xa8\x6b\x07\x76\x64\x45\x2e\xea\x63\x45\xe5\xb3\x66\x8e\x8d\x4e\x0d\x01\x95\x68\xe3\x85\x91\xd3\x34\x63\xf0\xfb\x94\x41\x3e\x34\x0d\xbc\x72\x60\xdd\x46\x1a\xe1\xad\x10\x0c\x08\xd7\x9f\xad\xe3\x08\xf3\x82\x31\xf3\x37\xdd\x9a\x13\xb1\x7d\x83\x9d\xd2\x5f\xb9\x92\x94\xef\x71\xc8\x7e\x45\x9d\x73\xcf\xd6\x65\x36\x7c\x8d\xa9\xf2\xf2\x94\x28\x38\x7d\x2f\xa0\xa1\x2a\x59\x40\x07\xf3\xc1\x02\xdb\xda\x68\x1c\x33\xa7\x99\x14\x19\x48\x45\x7b\xd1\x33\x45\x17\xf0\xa6\x46\xd9\x03\x92\x08\x99\x12\x7d\x57\xb8\x90\x14\x7b\x63\xd5\x15\xe5\xbd\x35\x2b\xaa\x18\x4c\xea\xf5\x8a\xba\xf5\x3e\x4b\x41\x93\xa5\x67\xfb\x38\x2d\x98\xa2\x19\x83\x2a\xf7\x03\x6a\x9b\x74\x0b\x56\x5e\x8f\x02\xc7\x1d\x2b\x72\xfa\x01\x3f\x5b\x16\xf7\xc6\x6d\xfb\xe4\x58\xb3\x8c\x1b\xf7\x0a\x77\x86\xa6\xb4\xb4\xf5\xe4\x92\xbb\xa0\x24\x84\xe5\x01\x84\xad\x13\x37\x21\x9c\xd2\x72\x0b\x42\x72\xfc\x01\x7c\xaf\x0e\xbd\x9e\x3b\xd5\xbc\x1c\x1f\xaf\xd6\xd0\xb6\x52\xb7\xdf\x12\xa5\x40\x4e\xe7\x68\xb0\x78\x24\xec\xe1\xe8\x0f\x26\x89\xe3\x0a\x0a\x61\x4d\x23\xe9\xf7\x70\x7e\x32\x3d\xdc\x39\xd6\xbf\xf3\x30\xd0\xfd\xf6\x55\xb3\x79\x27\x96\xfe\x6d\x82\x37\x73\xf6\x8f\x36\x3a\x03\xa4\x6d\x7d\x1c\x9e\x73\x35\xf6\x18\xbf\x15\x76\x4a\x8e\x2b\xcf\x00\xbf\x2a\x99\xae\x55\xe0\xcf\x25\x77\x68\xfc\x95\xba\x79\x75\x06\xcb\x5c\x77\x67\x69\xf1\xfb\x2c\xe1\xbd\xa0\x12\xe2\x31\x45\xf6\x30\x0f\x14\xc8\xab\x7f\x60\x4e\x27\xe0\x3f\xaf\x92\xd0\x6a\x8a\x82\xdb\xc0\xa4\xbb\x63\x65\x34\x0d\x28\xb0\x6b\x7c\x1e\x1e\xd3\x51\xc7\x6e\xf4\x33\x60\xc5\x90\x01\x8f\x81\xef\xee\x88\x68\x90\x69\x23\xb9\x8a\x2e\x69\x98\x7d\xa6\x91\x32\x1a\xc8\x6e\x9c\x13\x7f\x10\xea\xcd\xfd\x4e\xef\xa6\xb1\x25\xd9\xde\x22\x8d\xfa\x59\x63\xc5\x0d\x80\xf5\x28\xf1\xd6\xb9\x37\x9e\xa3\xee\xb5\x4c\xbe\x37\xe0\x55\xc6\x27\x82\x75\x49\xd0\xda\xe0\xb9\x1d\xca\xbf\x5b\xd4\xcf\xbf\x0b\x47\xac\x2d\x59\x07\xfe\x7a\x49\xc1\x61\xa6\x24\x17\x2a\xf0\xbe\x2e\xdb\x17\x7f\xa0\x3c\x7d\x4b\xf3\xba\xdb\xc3\xed\x06\xee\xdb\x5e\xd7\xdd\x42\x5c\x47\xb2\xb3\x68\x75\x8c\xf2\xe1\x4f\x00\x00\x00\xff\xff\x4e\x9b\x8d\xdf\x17\x11\x00\x00") func jsonschemaDraft04JSONBytes() ([]byte, error) { return bindataRead( @@ -98,12 +84,12 @@ func jsonschemaDraft04JSON() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4375, mode: os.FileMode(420), modTime: time.Unix(1441640690, 0)} + info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4375, mode: os.FileMode(420), modTime: time.Unix(1482389892, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5d\xdf\x73\xdc\xb6\xf1\x7f\xcf\x5f\x81\xb9\x78\x46\xf6\x24\xd6\x39\xfe\x7e\x5f\xea\x97\x8c\x1a\x39\x89\x5a\xbb\xd2\xf8\x9c\xf6\xc1\x95\x67\x70\x24\x4e\x87\x84\x3f\x2e\x04\x29\xe9\xea\xea\x7f\xef\x02\xfc\x71\x04\x01\x90\x20\x89\x3b\x9d\x6d\x7a\xa6\x8d\x8e\x04\x16\x8b\xc5\x62\xf7\xb3\x0b\x10\xf8\xf4\x0d\x42\xb3\x94\xa6\x01\x99\xbd\x42\xb3\x33\xf4\xb7\xc5\xe5\x3f\xd0\xc2\x5b\x93\x10\xa3\x55\x9c\xa0\xc5\x1d\xbe\xb9\x21\x09\x7a\x79\xfa\x02\x9d\x5d\x5d\x9c\xce\xbe\xe7\x15\xa8\xcf\x4b\xaf\xd3\x74\xf3\x6a\x3e\x67\x79\x91\x53\x1a\xcf\x6f\x5f\xce\x99\xa8\x7b\xfa\x3b\x8b\xa3\x6f\xf3\xc2\x4f\xf2\x47\xb5\x1a\xfc\xe5\xf3\xa2\x60\x9c\xdc\xcc\xfd\x04\xaf\xd2\xe7\x2f\xfe\xbf\xa8\x5c\xd4\x4b\xb7\x1b\xc1\x54\xbc\xfc\x9d\x78\x69\xfe\x2c\x21\x7f\x66\x34\x21\xbc\xf9\x0f\xf0\x1b\x9e\x14\xad\x8b\xd7\x9c\xb3\x68\x15\x97\x7f\x6f\x70\xba\x66\x33\xf8\xfb\x5a\xd4\xc5\xbe\x4f\x53\x1a\x47\x38\xb8\x4a\xe2\x0d\x49\x52\x4a\x18\xd0\x59\xe1\x80\x11\x51\x00\xca\xa7\x24\x89\xa4\xb7\x9f\x72\x52\x1f\xef\x9f\x57\x3f\x78\x97\x12\xb2\xe2\xac\x7d\x3b\xf7\xc9\x8a\x46\x82\x2c\x9b\xdf\x92\xc8\x8f\x93\xd7\xf7\x29\x89\x18\x3c\x98\x89\xd2\x0f\xf0\xff\x0f\x39\x79\x0d\xdd\x92\xfb\x1a\xed\xb2\xdb\x2c\x4d\x68\x74\x53\xf4\x05\x9e\x93\x28\x0b\xab\x6e\x8b\x27\x30\x26\xb3\xe2\xd7\x75\x55\xcc\x27\xcc\x4b\xe8\x86\x73\xc4\xa9\xbc\x5f\x93\x6a\x0c\x6f\x49\xc2\xf9\x42\xf1\x0a\xa5\x6b\xca\x90\x1f\x7b\x59\x48\xa2\xf4\xb4\xe0\xb4\x2e\xc2\xce\xce\x8a\x52\x52\xbd\x75\xcc\x52\x9b\x8e\x14\x62\xe6\xaf\x3e\x7e\xf8\xf8\xe9\x61\x8e\x5e\xfd\x1b\xfe\x5d\x7f\xf7\xf4\xc7\x57\xf0\x97\xff\xdd\xb3\x1f\x9f\xcc\xda\xfa\xc3\x1b\x42\x4f\x23\x1c\x12\x04\x1a\x4a\x37\xcf\xf2\x1e\x11\xa1\xa0\xe8\xf5\x3d\x0e\x37\x01\x79\x85\x4e\x76\x8a\x79\x22\x73\xba\xc4\x8c\x5c\x81\x72\xf4\xe5\x76\xde\xca\x16\xa7\x8a\xb8\xce\xa1\x34\xd6\xb1\x33\xc7\x1b\x7a\xd2\x90\xb5\x50\xf8\x9a\x42\x18\xc5\x5d\x14\x7c\x43\x41\xc6\x12\x05\x0f\xde\x66\x0d\x12\x0d\xe6\xce\x50\x00\xd5\xb8\x90\xde\x5e\xbc\x7d\x8d\x78\x4f\x19\xc2\x9e\x47\x36\x29\xf1\xd1\x72\x5b\x31\xbb\xeb\x9e\x9e\x89\x90\xf8\x14\xbf\x87\xea\x2a\x1b\xa0\xdc\x7e\xe6\xf5\x67\xa3\x68\x1a\x79\x38\x42\x05\x8d\x51\x6c\x88\x29\xdf\x29\xcd\xca\x32\xec\x6a\xd6\x5e\x77\xd7\xaf\x17\x6e\xb4\x9f\x80\x5a\x82\xc2\x58\x31\x51\x94\x3d\x37\x51\x4b\x08\xdb\xc0\x43\x1b\xfd\x28\x8b\x1a\x69\x31\xe2\x65\x09\x4d\xb7\x16\xaa\x56\x96\xd4\xd6\x3f\xef\x23\x27\x5d\x25\x89\x6a\x8a\x6f\x98\x6e\x16\xe2\x24\xc1\xdb\x9d\x1e\xd0\x94\x84\xf5\x72\xc6\x06\x81\x5e\x69\x12\x1f\xaa\xda\x59\x44\xff\xcc\xc8\x45\x41\x23\x4d\x32\x22\xf1\x40\xee\xf9\x04\xc7\xc1\x79\xec\x59\x74\x49\x2a\xdd\xb0\xf0\x3a\x1d\x52\xcc\xa9\xc6\xad\xe9\x66\xcb\x2f\x24\x22\x09\x0e\x10\xaf\x9e\x84\x98\x3f\x46\x78\x19\x67\xa9\x66\xb6\x2a\x5e\x51\x3c\x2d\xcc\x7d\x55\xac\x72\xf4\x8a\xcf\xe8\xf2\x8c\xe5\xd4\x32\x78\x47\xf1\x5a\xf6\x90\x2d\x02\xd4\x7a\xc9\x52\x8e\xf2\xc0\x69\x3c\x66\xad\x1b\x8d\xd6\x0c\x06\x5c\x27\xdb\x33\x94\xab\x04\xc2\x91\x0f\x56\x87\x78\x14\x2c\xb7\x20\x5a\xf7\x24\x35\xce\xbe\x57\xa5\x3a\xa6\x75\x06\x20\x27\x4a\xa9\x57\x79\x64\x70\xed\x4b\x70\xd0\x9d\x8d\xcb\x94\x86\x33\x10\xc4\x11\x07\x04\xb5\xe7\x92\x0b\x5d\xac\xe3\x2c\x00\xcf\x40\x90\x4f\x57\x2b\x92\x00\x46\x40\xab\x24\x0e\x45\x09\x21\xa7\x53\x84\x7e\xa1\xe9\xaf\xd9\x12\xfd\x1c\xe0\xdb\x18\x74\x0f\xbd\xc5\xc9\x1f\x7e\x7c\x17\x21\x40\x16\x38\x08\xe2\x3b\xe2\x1b\x7a\x01\x6a\x14\xb2\xcb\xd5\x82\x24\xb7\xd4\x1b\x33\x8e\xdc\xeb\x0a\x62\x9c\x7b\x96\x93\x13\xa8\xb5\x5d\x8a\xe0\x32\x53\xec\xa5\x76\xea\x5a\x16\xd6\x52\x0a\xa0\x41\x30\xba\x76\x94\xca\xc2\xaa\xc2\x37\x1d\x7a\x83\x3b\x5b\x93\xf1\x53\x5e\x53\x32\x19\xa5\x34\x60\x60\x40\xd7\x24\x0d\xeb\x39\xfd\x0d\x73\x91\xc3\xb0\x91\x43\x48\x7d\x50\x30\xba\xda\x42\x59\x94\xa3\xba\x9c\xcb\x42\x12\x08\xda\x85\x80\x61\x0e\x91\x02\x8e\xe8\x7f\x44\xbf\x0c\x23\x9b\x25\xc1\x48\x5e\x7e\x7b\xf7\x06\x6d\x62\x0a\xfc\x00\x33\x05\x8e\xf3\x54\xb9\x9e\xca\x84\xf2\xe7\x9c\x06\xb8\x3b\x3d\x6b\x30\xe5\xe9\x58\xe6\x04\x0d\x04\xc3\x05\xde\x9e\x59\x49\xc9\xc0\x65\xce\x4c\x9b\xe5\x3d\x98\xb1\x97\x74\x5f\x9d\x4f\x46\xdd\xd7\xfb\x3c\xa1\x8d\x03\xfd\xdb\xfe\x14\xbc\xae\xd4\x45\x17\x05\xfc\x3d\x45\x17\xe9\x09\x43\x24\xf2\xe2\x2c\xc1\x37\x60\x44\x41\xe3\x32\xc6\xfd\x12\xba\x5c\x00\x28\x8e\x43\x18\x08\xba\x0c\xaa\x6a\x07\xd5\xfb\xaa\x4d\x2b\x5d\x3f\x16\x1d\x52\x42\x00\x4b\xeb\xf9\x8e\x04\x20\xeb\xdb\x3c\x84\x63\xa5\x0c\x68\xe4\xd3\x5b\xea\x67\x80\xc4\x80\x0d\x21\x21\x76\x8a\x40\x62\x5b\x14\x66\x10\xcd\x80\x8f\x4c\xca\x8a\x45\x95\x93\x32\xbc\x3c\x39\x55\xc2\xc8\x3d\x0a\xa3\xa6\x0e\x10\xa8\x5a\x11\xe3\x3d\xe5\xb0\xb8\x6d\x14\xdb\xe6\x8e\x4d\x00\x65\x92\xbe\x81\x6e\x27\xc2\x2f\x92\x49\x0a\x9f\x8d\xd1\xbc\x8c\x44\x72\x20\x04\x68\x92\xe7\xb4\xf2\xf6\x59\x81\x79\x96\x42\xcd\x61\xb0\x72\x72\x0c\xc6\x91\x3f\x29\x82\x69\xbf\x00\x86\x22\x1c\x95\x23\xe4\x86\xaa\x69\x22\xb8\x3d\xf6\xbd\x6a\xaf\x7f\xf7\x13\x02\x38\x97\x81\x9f\x15\x8e\x81\x09\x5c\x50\x0b\x56\xa5\x6e\xe9\x62\xc9\x3d\xf6\xaa\x6c\x6e\xbf\x9d\x32\x45\x79\x3d\x7b\x23\xfb\x8c\x06\x83\x6a\xac\x56\xb6\x5a\xe5\xda\xc4\xcb\x2e\x2f\xc6\xcd\xb9\xe2\xc4\x4c\xfe\xc9\x3e\x26\x70\xe1\x3a\x8e\xdd\xfa\x93\x3c\xdd\x36\x66\x88\x95\x04\x41\x48\x43\xf2\x3e\xa7\xd1\x99\x2e\xd4\xb8\xd6\x2a\xdb\x55\x42\x80\x5f\xdf\xbf\xbf\x42\x21\x40\x38\x70\xf9\x0d\x8b\xc2\xd9\xc0\x8d\xa1\xec\x09\x81\x76\x49\xa3\x81\x38\xe8\x88\xe2\x7c\x39\x3b\x24\x09\x43\xce\x10\x89\x57\x6a\x96\x48\x37\x54\xb5\x97\x0f\x52\x75\x43\x9a\xa8\x51\x70\x06\x0e\x22\xc4\xc9\x76\x54\xfc\xbd\x4c\x28\x81\x88\x35\xa7\x54\xaa\x45\x35\xf6\x8f\x16\xfc\x57\x1c\x7c\x3f\x22\xba\x37\x18\x5a\xf1\xce\x36\xa5\xd6\xa4\x59\x31\x76\xe1\xbb\x48\xfb\x14\x01\x27\xdd\xa5\x5c\xba\x64\xaf\x49\x6f\x1b\x84\xdb\x33\xc5\xdd\x22\x16\x4d\x9a\xbb\xc9\x96\x26\xf9\x3f\x88\xad\x82\x8e\x2b\xb6\xb4\x59\xf0\x16\x92\xbb\xf2\x66\x9a\xba\x5c\x78\x0b\x49\xc5\x0a\x36\x26\xb1\xb2\xee\xd2\x42\x4b\x59\x7b\x69\x52\xf3\x39\x0e\xf1\x70\x4a\x8c\xda\xb9\x8c\xe3\x80\xe0\xa8\xa9\x9e\x2b\x9c\x05\xa9\x84\xa6\x15\x46\xd5\xb4\x7d\x1b\xa7\x52\xea\x5e\xd0\x32\xc6\x48\x02\xf8\xbb\x02\x42\x47\xe4\x34\x0a\xc2\xbd\x81\xd0\x0d\xb1\xcc\x08\xee\x7c\xb4\x5e\xf9\x33\x47\x74\xe4\xe5\xd4\xe1\x84\x7c\x12\xc0\xdc\x72\x42\x2a\xde\x34\xa3\x81\xe1\xb4\xd6\x04\x2b\xd3\x65\x98\xa0\x70\xea\xad\x1d\x51\x72\x64\xb7\xb4\x93\x4e\xbb\x9a\x67\x9d\x9c\xc8\xeb\x56\x61\x2c\x4f\x29\x31\x61\xbb\x09\x05\x4b\x9e\xf0\x44\x04\x8e\xb6\xe8\x16\x07\xd4\xcf\x11\x26\x83\x60\x23\x83\x32\xb1\x2f\xc2\xa6\x93\xc2\xdc\xd4\xb3\x12\x21\x95\xa7\xec\x0f\x6e\x67\xfd\xd3\x0f\x2f\x9e\xff\xe5\xfa\xd3\xff\x3d\x3c\x7b\xf2\xdf\x8f\x4f\x8b\xf6\x9f\x3d\xe9\x67\xc1\xff\x89\x83\x8c\x18\xf2\x1c\x7b\x30\x2b\x51\x9c\x36\x40\xa8\x7e\x84\x2c\x65\xd4\x29\x25\x6d\x37\xfa\x77\x64\xd7\x95\x2e\xf5\xcb\xe5\x59\x53\xc1\x38\x22\x97\x2b\x29\x86\xe8\x31\x3a\xda\x81\xb1\xa8\xcf\xb7\x00\xbd\x23\x62\x6d\xc9\xd3\x2c\x89\x5c\x6b\x59\x1f\x1e\x14\xd5\xa7\x53\xd9\xc4\xbe\x23\xeb\x6a\xdb\x93\x54\x53\x95\x76\x53\x62\x2d\x52\x93\x93\x5f\x4a\x93\x3d\x28\xad\x68\x40\x16\x3a\x6a\xb5\x5f\xd7\x46\xbb\x6d\x6d\x21\xcb\xc2\x86\x48\x41\x89\xd5\x5b\x48\x55\xa5\x5b\x26\xef\x91\x61\x15\x49\x89\x55\xb9\x39\xcf\xa4\xe5\x4d\xcc\x5a\x9a\x77\x06\xf8\xf4\xd3\x4c\x90\xb4\x9e\x5f\xa9\x9c\x53\x91\x98\xd2\x85\x73\xca\x0e\x38\xf1\x54\x53\x92\x9b\x71\xb1\xa2\xde\x7c\x4a\xa3\x94\xdc\xa8\x8f\x75\xe8\x1c\x95\x29\x86\xce\x09\x51\xa5\xc4\x7a\x5b\x08\x5d\xc2\xc2\x04\x35\x12\x1a\x52\xbe\xca\xc0\xf2\x04\x85\x96\x9e\x17\x07\x01\x0c\x25\x54\xf8\x59\xcb\x93\x69\x85\xbb\x51\xcb\x80\x22\xcb\x60\xc5\x82\x64\x59\x58\x4b\x29\xc4\xf7\x34\xcc\x42\x3b\x4a\x65\x61\x83\x01\xf1\x82\x8c\x81\x50\xde\xf6\x21\xa9\xd4\xd2\x73\x09\xe5\xed\xb9\x2c\x0a\x77\x70\xd9\x87\xa4\x52\xcb\x24\xcb\x37\x24\xba\x49\x2d\xf1\xef\xae\xb8\xa9\xcf\xbd\xa8\x55\xc5\x4d\xb8\xbc\xd8\x39\x69\xb7\x14\x25\x0a\x9b\x7a\x79\x61\x3f\x55\xaa\xd2\xa6\x3e\xf6\xa1\x55\x96\xd6\xd2\x92\x33\x86\x16\xe4\xea\x15\xf4\xba\x12\x59\xeb\x47\x64\xd4\x09\x98\x79\x14\x3c\xe5\xa5\x12\x06\x1b\xfa\xb8\x2b\x6f\x98\xf9\xfd\x61\x90\xe2\x99\x1f\xc9\xe9\x36\x8b\xb7\xec\x4e\x85\xe0\xa9\x70\x54\x5b\x1e\x3a\x25\x62\x25\xfc\x0e\x82\x2b\x74\xff\x9c\x67\x3d\x45\x64\xd5\xbd\x6b\x86\xe7\x8d\x35\x65\x8c\xbb\x0f\x97\xb1\xbf\xbd\xaa\xd6\xf5\xc6\x6d\x7c\xa8\xbb\x16\x69\xdf\x9f\x8c\x1b\xaf\x8f\x31\x6d\xe3\x2a\xbb\x9d\xa7\xd6\x35\xc9\xed\x2a\x58\xe7\xcb\xf7\x94\xc7\xc5\x7c\x8f\x9b\xd8\x3d\x43\x21\x8a\x2e\xd0\x25\x2f\x9d\xb1\x71\xfb\xdb\x1c\xef\x18\xd9\x31\x6e\x40\x11\x63\x04\x76\xce\x09\x83\x95\x2b\x12\xc2\x41\xec\x61\xbd\xd0\x6c\xa0\x18\xd7\xe5\x6e\xbc\x54\x53\xe0\x3e\xb9\x52\x13\xdb\x77\x6b\x22\x12\x20\x71\x82\x20\x76\xcf\xbf\x6c\xa8\xd8\xe6\x83\x55\xb6\xc7\x4b\xe4\x09\x2c\x1c\x9c\x0e\xc8\xc4\x6a\xc3\x39\xbb\x38\x6d\xc4\xb6\x8a\x1c\xb7\x57\x16\x62\x91\x2d\x17\x4d\x46\x8e\x2d\xec\xe9\x9c\xeb\x9f\xa9\x06\x1c\xcf\x44\x93\x03\x3d\xfe\x4f\x3f\xd5\x26\xa3\x3a\xd4\xa8\x1e\x3e\x36\x35\x04\xa1\x86\x90\x75\x8a\x4d\xa7\xd8\x74\x8a\x4d\x5b\x7b\x3d\xc5\xa6\x5f\x68\x6c\xfa\x4d\xfd\xbf\x25\x4e\x02\xde\x93\xed\x04\x93\x26\x98\x54\x7b\x2a\x74\x62\x42\x49\xfb\x43\x49\x82\x99\xd7\xe1\x26\xdd\x36\x57\x15\xa5\x96\x6d\x76\xbf\xb4\xb1\x25\x9a\x61\x88\xc1\x94\xe2\x49\x19\x5c\x53\xdb\xe5\xb6\x60\x38\x0a\xb6\x5c\x6f\x45\xc2\x86\xaf\x8a\x73\xa6\x78\xce\x26\x33\x7d\x33\x31\x21\xbc\xa3\x44\x78\xff\x82\x01\x7c\xcb\xad\xfe\x04\xf5\xd0\x04\xf5\x26\xa8\x37\x41\x3d\xd4\x84\x7a\xdc\xe4\x9d\xe3\x14\x4f\x68\x6f\x42\x7b\xb5\xa7\xa5\x5a\x4c\x80\x6f\x02\x7c\x3a\xde\x3f\x0f\xc0\xd7\x78\xc8\xf7\x69\x4d\x20\x10\x4d\x20\x70\x02\x81\x5d\xbd\x9e\x40\xe0\xd7\x04\x02\xf9\x27\x2c\x9f\x27\x00\x34\x7d\xb6\x59\x3c\x2d\x1e\x75\x6f\x9f\x1c\x04\x18\xb5\x4e\x4d\xfa\xd6\xb1\xd6\xb4\xa8\xe1\x1c\x62\x1e\x39\x8c\xe4\x8a\x35\x41\xc8\x69\x65\xb5\xfa\xf7\x75\x40\xae\x09\x69\xa1\x09\x69\x4d\x48\x6b\x42\x5a\xa8\x89\xb4\xa2\x38\xfa\xeb\x21\x36\xa9\xea\x3f\x1e\x19\xf4\x75\x9a\x71\xd3\x9c\x4e\x74\x16\xf4\x5a\x32\x8e\x03\x29\x9a\x96\xab\x07\x92\x33\xa0\x61\x65\x64\xaf\x1b\x18\x5a\x33\xa4\x83\x04\x2e\xef\x62\x1e\xd8\x09\x45\xd1\x3a\xd8\x57\x76\x64\xda\x7e\x4d\x7b\x06\x80\x2b\x47\x8c\x94\xd5\x8f\xe1\x04\x04\x83\xa5\x13\xd0\x73\x3a\x07\x3d\xc6\x4b\x09\x17\x5c\x7f\xe7\x3e\x1c\xb8\x68\x8f\x5d\x2d\xfb\x67\x79\xb4\xfb\x7c\xd7\x9d\xb9\x74\x1a\xad\x35\xbc\x1e\xd1\xa0\xe6\x2b\xd0\x5e\x70\x67\x50\x93\x6d\x98\xa8\xd3\x66\x0f\x68\xb1\xeb\x73\x8e\x0e\x20\x36\xa4\x45\x17\x68\x6d\x40\xbb\x4e\x20\xdd\x90\xfe\xba\xc0\x7d\xa3\xfa\x3b\x0a\x1c\xda\xb6\x2c\xf9\x97\x98\x89\x38\xe4\xa2\x88\x99\x86\x01\x49\x07\x2d\x9f\xe7\xf3\xe9\xc5\x20\xf0\x39\x40\xe6\xa3\x10\xea\x3e\x25\xbd\xef\x86\xdb\x05\x6d\x81\x81\x07\x08\xbb\x13\x28\x83\xc0\xcd\xc7\x28\x1c\x42\xea\x07\x69\xbd\x5d\xf4\xa6\xb4\xdf\x18\x06\x72\xaf\x7f\x26\x67\x3e\x6c\x23\x94\x21\x96\xcc\x18\xc6\x74\x7e\xfa\xce\xcb\x44\xdb\xc3\x1e\xa1\xd0\xcc\xa8\xca\x95\x6b\xbf\x9a\x99\xd3\x0a\x4d\x3c\xe8\x01\x95\x26\x15\x36\x06\x4f\xd5\x02\x28\x8b\x94\xd3\xa3\x89\x51\xb7\xd0\x29\x1b\xb5\x1f\x94\x97\xfa\xb3\xfd\x7a\x32\x28\x0f\xd6\xa8\xa1\xc3\x41\xa0\xa2\xb6\x96\xb3\x09\x4d\x9d\x33\x1d\x5a\x68\xdb\x29\x3d\x77\x86\x98\xc1\x1c\x21\xe5\x7d\xea\x9e\x7b\x7d\x38\xeb\x27\x50\x9f\x72\x6c\x0e\x62\xc2\x69\x9c\x0c\x89\x4e\x12\x88\xf9\x2f\xa3\xc0\x78\x30\xe3\xe0\x23\xd8\xee\x43\xe5\x84\x54\xbd\x0c\x78\x41\x03\x26\x74\x7f\x24\x62\xb1\x02\x50\x17\xe8\x2e\x66\xb4\xfe\xba\xb0\x76\x0c\xcc\x17\x1e\x46\xbb\x38\xb0\x64\x0a\x9d\x8f\x26\x74\x7e\x14\x14\xe4\x66\xe9\xca\x6e\x4b\xcf\xde\xcc\xd9\xf1\x1a\xa3\xe6\x6a\x97\x85\x45\x1a\x75\xb3\xc3\x74\xfe\xd0\xb4\x12\xd9\x45\x69\x5a\x89\x9c\x56\x22\xa7\x95\xc8\xc7\x5b\x89\x7c\x04\xc8\x28\xf9\x24\xdd\xb5\x89\x63\x2f\x29\x2c\x69\xbe\xcb\x31\x0c\xbf\x16\x62\xa6\xf4\xb7\xe3\xd2\x42\x1d\x8d\xe1\xfe\x52\x75\x8a\x4a\x0c\xab\x77\x16\x56\x37\x4c\x98\xce\xd5\x97\x25\x2d\x87\x59\xfb\xf1\xf8\x16\x5b\x9d\xb4\x5d\x10\xa7\xed\x6e\x70\xe7\x65\x75\x7a\x3b\x04\x4a\x40\xef\x87\xd4\x04\x95\x4f\xe8\x32\x53\x0f\x6f\x1e\x0d\x02\xef\x12\xbc\xd9\xb8\x3a\xae\xfc\x58\xe6\x2a\xbf\xfc\xd3\x95\x06\xf5\xb9\x5e\xcc\xb5\xb6\x8d\x3c\x77\xd6\x19\xc0\x3f\x96\x71\xed\xb8\x7a\x76\xb8\xad\xd3\x9d\xc5\x6b\x95\xed\x5a\x62\x46\xbd\xb3\x2c\x5d\xf3\x7b\x24\xf2\xcd\xa6\x0b\xe5\xe8\xfd\x46\x0a\xcc\x8a\x30\xde\xd0\xbf\x93\xad\x1b\x5a\x31\x06\x06\x5f\x5e\x40\x60\x46\x3d\x9a\xba\xa4\x79\x85\x19\xbb\x8b\x13\xdf\x25\xcd\xb3\x0d\xe7\xd3\xa1\x28\x0b\xb2\x9e\x47\x18\xfb\x29\xf6\x89\x96\x6a\xf5\xf7\xb5\x56\xf3\xda\xc6\x79\xbf\x96\xe6\x31\x4e\xd2\x15\xbd\x75\xb9\xf5\xf9\xf8\x4c\x49\x63\x7e\x1d\x60\x0c\x1b\x28\xa2\xb1\xfd\xed\xc0\x23\x9c\x77\xbf\x7b\x88\x87\x7a\xae\x7e\xbb\xf8\x5b\xcf\x36\x6b\xe4\x42\x9c\x1f\xe5\x71\x7c\xba\x69\xb0\xd7\x87\xd5\xd1\x55\x10\xdf\x49\x77\x1c\x00\x4f\x71\x52\xdc\x27\xfb\x5b\x9f\x7b\xe9\xdc\x68\x6c\x2e\x14\x8b\x24\x18\xe7\x7b\x74\x6b\xb4\x10\x7e\x77\x7b\xcc\x83\x5e\xdb\x5e\x84\x22\xfa\xb0\xc8\x6b\x68\xa9\x29\x52\xee\xd1\x13\x8b\xbb\x87\x3f\xff\x59\xa1\x20\x8e\xc7\x9d\x15\x69\xfc\x07\xf9\xf2\x67\xc3\xa6\x10\xfa\xa1\x67\x43\x25\xdd\x69\x16\xc8\xb3\x40\x87\x91\xa7\x89\x50\xb4\xbc\xc7\x89\x80\x77\x72\x9f\xe6\xc2\xb1\xcc\x05\x35\xb0\x3b\x32\xa4\xf4\xf5\x4d\x93\x6a\x48\xbe\x30\xfc\x34\x4d\x42\xa4\x9f\x84\x8b\xe6\x28\x3a\x58\x78\x90\xbb\x2c\xb7\x2a\xdf\x3d\xea\x70\x49\xa6\xba\xe2\x59\x91\x6f\xc7\x3a\x4c\xe3\x0a\xc0\x6e\x96\x34\x5f\x4f\xef\x68\xa0\x88\x10\x9f\xf8\x28\x8d\xc5\xd9\x37\x08\x17\xf7\xf9\xe5\xf7\xb4\x06\x81\xf6\xfa\x89\x92\x37\xd9\x88\x69\xba\x3e\x38\xdd\xa9\xdc\x3b\x2f\x89\xc8\x9a\x8c\xe1\xce\x37\x6d\x1a\xce\x7a\x1d\xac\x76\x37\xeb\x20\xe1\xa7\x09\x8e\x18\xf0\xc4\x2f\xff\x48\x63\x2f\x0e\xca\xef\xd8\xc5\x75\xff\x6d\xe2\x34\xce\x7e\x9d\x79\x14\x1b\x92\x64\x0b\xc1\x9f\x30\xf9\xd1\x9d\xf2\xbb\x66\xf0\x7a\xcb\xa6\x65\x3b\x86\x89\x79\x95\xf5\x99\xc7\x6e\xa5\xab\x44\xe4\x9f\xa9\xfc\x73\x43\x37\xba\x8b\xc7\x77\x4b\x47\x82\x5c\x2b\x97\xbb\xb3\x7f\x0e\xc5\x6e\xed\x41\x58\x3f\x74\xc8\x8e\xff\xe6\xd6\x3e\x47\xdb\xfa\x4a\xf2\x7a\x27\xe1\x74\x2b\xdf\xae\xa9\xe6\x16\x1b\x67\xdb\xf7\x2a\x03\xae\xdb\x1b\xe0\xf2\x6b\xb7\xaa\x21\x65\x47\x8e\xb3\x2f\xdc\xca\x26\x5a\x76\xff\xb8\xff\xaa\xad\xea\x97\xb2\x87\xc7\xd9\x97\x6c\x6a\xbf\x9c\xb6\xa5\xdf\x50\x54\x1b\x2f\x65\xeb\x8f\xfb\x2f\x7a\x6a\x52\xdc\x6b\x6b\xf2\x17\x3c\x3b\xac\xd0\xdc\x90\xe4\xec\xcb\xb4\x9a\x18\x95\xbd\x93\xfb\x94\xe2\x3e\x1b\xd3\x0b\x51\xbf\xe7\xc9\xe9\x57\x67\xd5\x44\x88\xdc\x29\x7f\xd4\x54\x78\x19\x0c\x59\x20\x68\x7d\x54\x2a\x78\x52\xfc\xd5\xa8\x4d\x32\xbd\x3e\x2c\x97\x61\xfa\x37\xfc\x7f\x0f\xff\x0b\x00\x00\xff\xff\x31\x8b\xeb\xb6\x54\x9c\x00\x00") +var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\x87\x15\xca\x15\xe5\xf5\x94\x46\x9f\x33\x1a\x0c\x9a\xb1\x5a\xd9\x6a\x95\xcd\xcb\x7e\xec\x9a\xc5\x94\x3b\x37\x26\x31\xd7\xfc\xe4\x1f\x13\x8c\x31\x75\x9c\xba\xf7\x87\x3c\xa1\xb7\x4f\x17\x1b\x09\x82\x98\xc4\x70\x95\xd3\xe8\x4c\x48\x5a\xa6\xd6\x2a\x3d\x56\x42\x80\x9f\xaf\xae\x2e\x50\x0c\x42\xe0\x35\x34\x3c\x8a\x62\x03\x37\xba\xb2\x27\x04\xda\x25\x8d\x06\xe2\xa0\x13\x8a\xf3\xf5\xec\x10\x72\x67\x88\x90\x3d\x4b\x64\xeb\xaa\xda\x8f\xf7\x5a\x75\x47\x9a\xa8\x51\x70\x26\xd2\x38\xc6\x7c\xbb\x57\xfc\xbd\xe4\x04\x56\xa8\xa0\x54\x9a\x45\xd5\xf7\x0f\x16\xfc\x57\x1c\x3c\xdf\x23\xba\x77\x38\xda\x16\x4b\x31\x53\x6a\x4d\x9a\x15\x63\xe7\xe1\x18\x69\x9f\x22\xe0\x24\xbb\x94\x4b\x97\xee\x2d\xf9\x70\x87\x72\x7b\xe6\xc4\x33\x2a\x66\x5e\x1c\x35\x72\xe3\x2d\xda\x73\xe4\xc7\x51\x6d\xa4\xa1\x2a\x4f\xde\x94\xcb\xb2\x3e\x31\x48\xae\x82\xce\xc9\xc8\x65\xcd\xc3\xb7\x34\xb6\x2b\xdf\x58\x65\x78\x6e\x73\xac\x5e\x24\x0d\x3f\xdc\x70\x23\xc6\xda\x52\x0b\x2d\x63\x7d\xa9\x49\x2d\x54\x48\x28\xc0\x12\x9c\xe3\x63\xc9\x58\x04\x98\x36\x07\xc8\x0a\xa7\x91\xd4\xf0\xbc\xc1\xa8\xb9\x70\xd0\xc6\xa9\xb6\x78\x80\x5a\xa3\xb4\x2c\xf4\x18\x0b\x8a\x9d\xd0\xb4\x55\x10\xee\x0d\xc5\xd6\xe0\x99\x93\xdc\xa1\x04\xbb\xf1\xa7\x23\xd1\xd1\x97\x8c\x87\x13\x0a\x21\x02\xe9\x99\x25\xed\x20\xc5\x92\x66\x3c\x32\x9c\xd6\x06\xb0\x31\x5c\x86\x29\x0a\xcb\x60\x33\x12\xa5\x91\xfc\x96\x75\xd0\x59\xd7\x13\xbd\xd3\x23\x79\xdd\x2a\x90\xa6\x38\x06\x91\x39\x7f\x20\x72\x03\x1c\x2d\x01\x61\xba\x45\x37\x38\x22\x61\x8e\x71\x85\xc4\x32\x15\x28\x60\x61\x16\xb8\x3d\x29\xdc\x4d\x3d\x2f\x12\x13\x7d\xc8\x7e\x37\xee\xa8\x7f\xfa\xdb\xcb\x17\xff\x77\xfd\xf9\x7f\xee\x9f\x3d\xfe\xcf\xa7\xa7\x45\xfb\xcf\x1e\xf7\xf3\xe0\xff\xc4\x51\x0a\x8e\x4c\xcb\x01\xdc\x0a\x65\xb2\x01\x83\xed\x3d\xe4\xa9\xa3\x4e\x2d\x59\xc5\xe8\x2f\x48\x7d\x5a\x6e\x37\xbf\x5c\x9f\x35\x13\x64\x14\xfa\xef\x0b\x68\xa6\x0d\xb4\x8e\xf1\xa8\xff\xbb\x60\xf4\x03\x64\xab\x5b\x81\x65\x51\xe6\xda\xca\xfa\xf0\xb0\xac\x3e\x9c\xca\x26\x0e\x1d\xdb\x57\x5b\xbb\xb4\x9a\xa6\xb6\x9b\x1a\x6b\xd1\x9a\x9e\x7e\x33\x9a\xec\x41\x69\x45\x22\xb8\xb4\x51\xeb\x04\x77\xca\x6f\x7b\x7b\xc8\xb2\xb0\x95\x92\x25\x5b\xd0\x42\xaa\x2a\xdd\x32\x78\x4f\x0c\xab\x68\x46\x6c\xea\x6d\xf4\x5c\x5e\xde\xc4\xac\xa5\xf9\xd1\x00\x9f\x7d\x98\x65\x24\xbd\xc7\x97\xd4\xb3\x3a\xa8\x2b\xa0\x34\x76\xf9\x65\x5f\x2d\x25\x95\x1b\xcf\xd6\xf4\x9b\x5f\x09\x95\xb0\x36\x3f\xdb\xd0\x39\x2a\x93\x1c\x9d\x03\xa2\x4a\xca\xf5\xf6\x10\xb6\x94\x89\x0b\x6a\x70\x12\x13\x49\x6e\x40\xe4\x29\x12\x2b\xbd\x80\x45\x11\x04\xaa\xc2\x8f\x56\x9e\x5c\x6b\xec\x8d\x5a\x0e\x14\x59\x06\x2b\x1e\x24\xcb\xc2\x56\x4a\x31\xbe\x23\x71\x1a\xfb\x51\x2a\x0b\x3b\x1c\x48\x10\xa5\x82\xdc\xc0\xbb\x3e\x24\x8d\x5a\x76\x2e\x09\xed\xc1\x65\x51\xb8\x83\xcb\x3e\x24\x8d\x5a\x2e\x5d\xfe\x02\x74\x2d\x3d\xf1\xef\xae\xb8\x4b\xe6\x5e\xd4\xaa\xe2\x2e\x5c\x5e\xec\x0e\xf5\x5b\x0c\xcb\x0a\xbb\xa4\x3c\xf7\x1f\x2a\x55\x69\x97\x8c\x7d\x68\x95\xa5\xad\xb4\xf4\x9c\xa5\x07\xb9\x7a\x05\xbb\xad\x50\x6f\xfb\xa0\x4e\x9b\x48\x23\x49\x92\x28\x87\x19\x3e\x32\xee\xca\x3b\x46\x7e\x7f\x18\x64\xcc\xcc\x0f\x34\xe9\x36\x8b\xb7\x6c\xa8\xa5\x5b\x54\x4c\x54\x5b\x15\x3a\xf1\x6c\x2d\xfe\x96\xc8\x0d\xba\x7b\x81\x88\xc8\x23\xab\xee\x7d\x3b\x92\xa7\x60\x29\xe3\xdc\xff\xb8\x64\xe1\xf6\xa2\x5a\x59\xdc\x6f\xeb\x45\x7d\x6a\xd1\x76\x1e\xea\xb8\xf1\xfa\x14\xd3\x36\x63\xe5\xd7\xf3\xe4\xbe\x25\xbd\x5e\x05\xeb\x73\x74\xb5\x21\x2a\x2e\x4e\xa3\x30\xdf\xbf\x43\x28\x2a\xd1\xa5\x2a\x9d\x8a\xfd\x76\xd8\x8d\xbc\x67\x65\xc7\xb8\x03\x45\xec\xa3\xb0\x37\x8a\x70\x4c\x68\x91\x51\x8e\x58\x80\xed\x4a\xf3\x81\x62\xca\x96\xbb\xf1\x52\xcd\x80\xfb\xe4\x4a\x5d\x6c\xdf\x6e\x20\x4b\x80\x30\x8e\x28\x93\xf9\xe9\x8d\x8a\x6d\xd5\x59\x65\x7b\xaa\x44\x9e\xc0\xc2\xd1\x7c\x40\x26\xd6\x1a\xce\xf9\xc5\x69\x7b\x6c\xec\xc8\x71\x7b\xe5\x21\x2e\xd3\xe5\x65\x93\x91\x53\x0b\x7b\x3a\xc7\xfa\x17\x6a\x01\xa7\x33\xd0\xf4\x40\x0f\x39\x87\xda\xe4\x54\x87\x3a\xd5\xe3\xc7\xa6\x8e\x20\xd4\x11\xb2\x4e\xb1\xe9\x14\x9b\x4e\xb1\xe9\x14\x9b\xfe\x15\x63\xd3\x47\xf5\xff\x97\x38\xe9\xcf\x14\xf8\x76\x82\x49\x13\x4c\xaa\x7d\xcd\x6c\x62\x42\x49\x87\x43\x49\x19\x33\x6f\xe3\x44\x6e\x9b\xab\x8a\x3e\x86\xaa\x99\x52\x1b\x5b\x59\x33\x02\x09\xa0\x21\xa1\x6b\x84\x6b\x66\xbb\xdc\x16\x0c\xd3\x68\xab\xec\x36\x4b\xd8\x60\x8a\x40\x31\x85\x6e\x14\x57\x13\xc2\xfb\x92\x10\xde\xbf\x88\xdc\xbc\x53\x5e\x7f\x82\x7a\x13\xd4\x9b\xa0\xde\x04\xf5\x90\x01\xf5\x94\xcb\x7b\x83\x25\x9e\xd0\xde\x84\xf6\x6a\x5f\x4b\xb3\x98\x00\xdf\x04\xf8\x6c\xbc\x7f\x19\x80\xaf\xf1\x71\x45\x22\x98\x40\xe0\x04\x02\x27\x10\xd8\x29\xf5\x04\x02\xff\x4a\x20\x30\xc1\x72\xf3\x65\x02\x40\xd7\xc1\xd1\xe2\x6b\xf1\xa9\x7b\xfb\xe4\x20\xc0\x68\x9d\xd4\xb4\xd3\x96\xb5\xa6\xd1\x41\x20\xe6\x89\xc3\x48\x65\x58\x13\x84\x9c\x56\x56\x3b\x0c\xe0\x6b\x83\x5c\x13\xd2\x9a\x90\xd6\x84\xb4\x26\xa4\x85\x0c\xa4\x45\x19\xfd\xff\x63\x6c\x52\xb5\x1f\x1e\x19\x74\x3a\xcd\xb9\x69\xce\xa6\x3a\x0f\x7a\x2d\x19\xc7\x81\x14\x5d\xcb\xd5\x03\xc9\x39\xd0\xb0\xd1\xb3\xcd\xfb\x7a\x2d\x5d\x3a\x48\xe1\xfa\x2e\xe6\x81\x42\x18\x86\xd6\xc1\xbe\xb1\x23\xd3\xf7\x34\xed\x19\x0a\x0b\xc4\x48\x44\xfd\x22\x50\xb6\x42\x58\xbb\xe5\x3d\xa7\x73\xd4\x8b\xc4\x8c\x70\x61\xec\x73\xee\xc3\x81\x8b\xf5\xe2\xd7\x52\x3e\xcf\xeb\xeb\x17\x3b\x71\x16\xda\x7d\xb8\xde\xf0\x7a\x8f\x06\x2d\xa7\x40\x7b\xc1\x9d\x41\x4d\xb6\x61\xa2\x4e\x9f\x3d\xa0\xc5\xae\xe3\x1c\x1d\x40\x6c\x48\x8b\x63\xa0\xb5\x01\xed\x8e\x02\xe9\x86\xc8\x3b\x06\xee\xdb\x4b\xde\xbd\xc0\xa1\x6f\xcb\xda\xfc\xc2\x44\x16\x87\x9c\x17\x31\xd3\x30\x20\x39\x42\xcb\x6f\xf2\xf1\xf4\x72\x10\xf8\x1c\xa0\xf3\xbd\x10\xea\x21\x35\x7d\xe8\x86\xdb\x15\xed\x81\x81\x07\x28\xbb\x13\x28\xc7\xf8\xce\x7d\x8d\xc2\x31\xb4\x7e\x94\xd6\xdb\x55\xef\x4a\xfb\xed\xc3\x40\x3e\xeb\x9f\xe9\x99\x0f\xdf\x08\x65\x88\x27\x73\x86\x31\x9d\x47\xdf\x55\x19\xba\x3d\xee\x15\x0a\xcd\x8c\xaa\x5e\xb9\xf6\x57\x33\x73\x5a\xa1\x89\x7b\x3b\xa0\xb2\xa4\xc2\xf6\xc1\x53\xb5\x00\xca\x23\xe5\xf4\x60\x6a\xb4\x2d\x74\xea\x4e\xed\x3b\xe3\x47\xfb\xed\x82\x3d\x19\xd4\x3b\x6b\xaf\xae\x2b\x2f\x57\xb3\x82\x68\xcb\xed\x88\x2e\xe1\x5c\xd7\x26\xfa\x0a\x65\xe7\xce\x11\x33\xb4\xdd\x66\xe3\x37\xf6\xfa\x70\xd6\x4f\xa1\x21\x51\xd8\x3c\x26\x14\x4b\xc6\x87\x44\x27\x1c\x70\xf8\x9e\x46\xce\xab\x21\x07\x5f\xc1\x76\x17\x1b\x77\xb4\xda\x75\xa0\x0a\x3a\x30\xe1\xf8\x97\x32\x16\x2b\x00\x75\x85\xee\x62\x46\xef\xd3\x85\xb5\x6b\x60\xbe\xf2\x30\x7a\x8c\x0b\x4b\xa6\xd0\xf9\x64\x42\xe7\x07\x41\x41\xe3\x2c\x5d\xf9\x6d\xe9\x39\x98\x3b\x3b\x5d\x67\xd4\x5c\xed\xf2\xf0\x48\x7b\xbd\x2d\x31\xdd\x3f\x34\xad\x44\x76\x51\x9a\x56\x22\xa7\x95\xc8\x69\x25\xf2\xe1\x56\x22\x1f\x00\x32\x6a\x73\x92\xed\xe1\xc6\x7d\x9f\x49\x2c\x69\x7e\xc8\x31\x4c\x0c\xb4\xf2\x54\x3b\x79\x3b\x9e\x4d\xb4\xd1\x18\x3e\x5f\x9a\x93\xa2\x11\xc3\xda\x27\x0b\xaf\x37\x2e\x5c\x37\xfb\xeb\x9a\xd6\xc3\xac\xc3\xcc\xf8\x1e\x5b\x9d\xac\x22\x64\xb7\xed\x26\xb8\xf3\xb9\x3c\xbb\x1f\xe2\xb0\x22\x77\x43\x6a\x62\x29\x39\x59\xa6\xe6\xe5\xcd\x7b\x83\xc0\x5b\x8e\x93\x64\xac\xeb\xca\x4f\x65\xac\x4a\xbc\x1e\xcd\x82\xfa\x3c\x70\x36\xb6\xb5\xed\x79\xef\xec\x68\x00\xff\x54\xfa\xb5\xe3\xf1\xdb\xe1\xbe\xce\x76\x17\xaf\x57\xb6\x6b\x89\x05\x09\xce\x52\xb9\x01\x2a\x49\xbe\xd9\xf4\xd2\xb8\x7a\xbf\x91\x02\xf3\x22\x8c\x13\xf2\x77\xd8\x8e\x43\x8b\xe1\x54\x6e\x5e\x9d\xc7\x49\x44\x02\x22\xc7\xa4\x79\x81\x85\xb8\x65\x3c\x1c\x93\xe6\x59\xa2\xf8\x1c\x51\x95\x05\xd9\x20\x00\x21\x7e\x60\x21\x58\xa9\x56\xff\xbe\xb6\x5a\x5e\x5b\x3f\x1f\xd6\xd3\x3c\xc4\x4d\xba\x99\xb4\x63\x6e\x7d\x3e\x3d\x57\xd2\x18\x5f\x47\xe8\xc3\x06\x8a\x68\x6c\x7f\x3b\x72\x0f\xe7\xe2\x77\x77\xf1\xd0\x99\xab\xdf\x2e\xfe\xd6\xbb\xcd\x1a\xb9\x90\xd1\xaf\xf2\x38\x3d\xdb\x74\xf8\xeb\xe3\xda\xe8\x2a\x62\xb7\xda\x1b\x07\xa9\xdc\x30\x5e\xbc\x68\xfb\x6b\x9f\x97\xf1\xc6\xb1\xd8\x5c\x29\x1e\x49\x30\xc5\xf7\xde\xad\x91\x42\xf9\xdd\xed\x89\x80\x25\xbe\x37\xd7\xe7\x32\x5c\xe6\x35\xac\xd4\x0c\x2d\xf7\x90\xc4\xe3\xf5\xe3\x2f\x7f\x54\x18\x88\xe3\x61\x47\x85\x64\x7f\xc0\xd7\x3f\x1a\x92\x42\xe9\xc7\x1e\x0d\x95\x76\xa7\x51\xa0\x8f\x02\x1b\x46\x9e\x06\x42\xd1\xf2\x01\x07\x02\xde\xe9\x7d\x1a\x0b\xa7\x32\x16\xcc\xc0\xee\xc4\x90\xd2\x5f\x6f\x98\x54\x5d\xf2\x95\xe1\xa7\x69\x10\x3a\x06\xe1\x65\xb3\x17\x47\x58\x78\xd0\x45\xd6\x5b\xd5\x5f\x25\x1d\x71\x49\xa6\x7a\x64\xda\xd0\x6f\xc7\x3a\x4c\xe3\x09\xc0\x6e\x96\x2c\xa7\xa7\x77\x34\x10\x05\x08\x21\x44\x92\x65\x77\xdf\x20\x5c\xbc\xe7\x97\x3f\xf4\x1a\x45\xd6\xe7\x27\x4a\xde\x74\x27\x66\x11\x7d\x70\xba\xd3\x78\xf9\x1e\x0d\xca\xc8\x39\xde\x7c\xb3\xa6\xe1\xbc\xd7\xc1\x6a\x6f\xb3\x0e\x52\xbe\xe4\x98\x8a\x15\x70\x94\x70\x26\x59\xc0\xa2\xf2\x1c\xfb\xd9\xc5\xf9\xbc\xd5\x92\x9c\xa3\xdf\xe6\x1e\xb3\x0d\x49\xba\x87\x50\x5f\x84\xfe\xe9\xd6\xf8\xbb\xe6\xf0\x7a\xeb\xa6\x65\x3b\x86\x8b\x79\x93\xf5\x59\x20\x6e\xb4\xa7\x44\xf4\x3f\xa5\xfe\x67\x42\x12\xdb\xd3\xe7\xbb\xa5\xa3\x8c\x5c\x2b\x97\xbb\xbb\x7f\x8e\xc5\x6e\xed\x43\x5c\xbf\x74\xc8\x8f\xff\xe6\xd6\xbe\x91\xb6\xf5\x95\xe4\xed\x93\xc4\xa8\x5b\xf9\x76\x4d\x35\xb7\xd8\x8c\xb6\x7d\xaf\x72\xe0\xb6\xbd\x01\x63\x9e\x76\xab\x1a\x32\x76\xe4\x8c\x76\xc2\xad\x6c\xa2\x65\xf7\xcf\xf8\xa7\xda\x2a\xb9\x8c\x3d\x3c\xa3\x9d\x64\x33\xe5\x1a\xb5\x2d\xfb\x86\xa2\x5a\x7f\x19\x5b\x7f\xc6\x3f\xd1\x53\xd3\xe2\x41\x5b\xd3\x4f\xf0\xec\xb0\x42\x73\x43\xd2\x68\x27\xd3\x6a\x6a\x34\xf6\x4e\x1e\x52\x8b\x87\x6c\xcc\xae\x44\xfb\x9e\xa7\x51\x4f\x9d\x55\x03\x81\x8e\x67\xfc\xb4\x69\xf0\x3a\x18\xf2\x40\xd0\xf6\xa8\x34\xe3\xc9\x98\xaf\xf6\xda\x24\xd3\xeb\x60\xb9\x0e\xd3\x1f\xa9\xff\xee\x1f\xfd\x37\x00\x00\xff\xff\x69\x5d\x0a\x6a\x39\x9d\x00\x00") func v2SchemaJSONBytes() ([]byte, error) { return bindataRead( @@ -118,7 +104,7 @@ func v2SchemaJSON() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "v2/schema.json", size: 40020, mode: os.FileMode(420), modTime: time.Unix(1446147817, 0)} + info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(420), modTime: time.Unix(1482389892, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -176,7 +162,7 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ "jsonschema-draft-04.json": jsonschemaDraft04JSON, - "v2/schema.json": v2SchemaJSON, + "v2/schema.json": v2SchemaJSON, } // AssetDir returns the file names below a certain @@ -218,7 +204,6 @@ type bintree struct { Func func() (*asset, error) Children map[string]*bintree } - var _bintree = &bintree{nil, map[string]*bintree{ "jsonschema-draft-04.json": &bintree{jsonschemaDraft04JSON, map[string]*bintree{}}, "v2": &bintree{nil, map[string]*bintree{ @@ -272,3 +257,4 @@ func _filePath(dir, name string) string { cannonicalName := strings.Replace(name, "\\", "/", -1) return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) } + diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index eb1490b05540..b4429a21c825 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -17,7 +17,10 @@ package spec import ( "encoding/json" "fmt" + "log" "net/url" + "os" + "path/filepath" "reflect" "strings" "sync" @@ -26,6 +29,18 @@ import ( "github.com/go-openapi/swag" ) +var ( + // Debug enables logging when SWAGGER_DEBUG env var is not empty + Debug = os.Getenv("SWAGGER_DEBUG") != "" +) + +// ExpandOptions provides options for expand. +type ExpandOptions struct { + RelativeBase string + SkipSchemas bool + ContinueOnError bool +} + // ResolutionCache a cache for resolving urls type ResolutionCache interface { Get(string) (interface{}, bool) @@ -37,7 +52,11 @@ type simpleCache struct { store map[string]interface{} } -var resCache = initResolutionCache() +var resCache ResolutionCache + +func init() { + resCache = initResolutionCache() +} func initResolutionCache() ResolutionCache { return &simpleCache{store: map[string]interface{}{ @@ -47,8 +66,11 @@ func initResolutionCache() ResolutionCache { } func (s *simpleCache) Get(uri string) (interface{}, bool) { + debugLog("getting %q from resolution cache", uri) s.lock.Lock() v, ok := s.store[uri] + debugLog("got %q from resolution cache: %t", uri, ok) + s.lock.Unlock() return v, ok } @@ -59,9 +81,9 @@ func (s *simpleCache) Set(uri string, data interface{}) { s.lock.Unlock() } -// ResolveRef resolves a reference against a context root -func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { - resolver, err := defaultSchemaLoader(root, nil, nil) +// ResolveRefWithBase resolves a reference against a context root with preservation of base path +func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schema, error) { + resolver, err := defaultSchemaLoader(root, nil, opts, nil) if err != nil { return nil, err } @@ -73,9 +95,19 @@ func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { return result, nil } +// ResolveRef resolves a reference against a context root +func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { + return ResolveRefWithBase(root, ref, nil) +} + // ResolveParameter resolves a paramter reference against a context root func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { - resolver, err := defaultSchemaLoader(root, nil, nil) + return ResolveParameterWithBase(root, ref, nil) +} + +// ResolveParameterWithBase resolves a paramter reference against a context root and base path +func ResolveParameterWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Parameter, error) { + resolver, err := defaultSchemaLoader(root, nil, opts, nil) if err != nil { return nil, err } @@ -89,7 +121,12 @@ func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { // ResolveResponse resolves response a reference against a context root func ResolveResponse(root interface{}, ref Ref) (*Response, error) { - resolver, err := defaultSchemaLoader(root, nil, nil) + return ResolveResponseWithBase(root, ref, nil) +} + +// ResolveResponseWithBase resolves response a reference against a context root and base path +func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Response, error) { + resolver, err := defaultSchemaLoader(root, nil, opts, nil) if err != nil { return nil, err } @@ -101,23 +138,72 @@ func ResolveResponse(root interface{}, ref Ref) (*Response, error) { return result, nil } +// ResolveItems resolves header and parameter items reference against a context root and base path +func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error) { + resolver, err := defaultSchemaLoader(root, nil, opts, nil) + if err != nil { + return nil, err + } + + result := new(Items) + if err := resolver.Resolve(&ref, result); err != nil { + return nil, err + } + return result, nil +} + +// ResolvePathItem resolves response a path item against a context root and base path +func ResolvePathItem(root interface{}, ref Ref, opts *ExpandOptions) (*PathItem, error) { + resolver, err := defaultSchemaLoader(root, nil, opts, nil) + if err != nil { + return nil, err + } + + result := new(PathItem) + if err := resolver.Resolve(&ref, result); err != nil { + return nil, err + } + return result, nil +} + type schemaLoader struct { loadingRef *Ref startingRef *Ref currentRef *Ref root interface{} + options *ExpandOptions cache ResolutionCache loadDoc func(string) (json.RawMessage, error) } var idPtr, _ = jsonpointer.New("/id") -var schemaPtr, _ = jsonpointer.New("/$schema") var refPtr, _ = jsonpointer.New("/$ref") -func defaultSchemaLoader(root interface{}, ref *Ref, cache ResolutionCache) (*schemaLoader, error) { +// PathLoader function to use when loading remote refs +var PathLoader func(string) (json.RawMessage, error) + +func init() { + PathLoader = func(path string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil + } +} + +func defaultSchemaLoader( + root interface{}, + ref *Ref, + expandOptions *ExpandOptions, + cache ResolutionCache) (*schemaLoader, error) { + if cache == nil { cache = resCache } + if expandOptions == nil { + expandOptions = &ExpandOptions{} + } var ptr *jsonpointer.Pointer if ref != nil { @@ -127,18 +213,16 @@ func defaultSchemaLoader(root interface{}, ref *Ref, cache ResolutionCache) (*sc currentRef := nextRef(root, ref, ptr) return &schemaLoader{ - root: root, loadingRef: ref, startingRef: ref, + currentRef: currentRef, + root: root, + options: expandOptions, cache: cache, loadDoc: func(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil + debugLog("fetching document at %q", path) + return PathLoader(path) }, - currentRef: currentRef, }, nil } @@ -159,6 +243,7 @@ func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointe if startingRef == nil { return nil } + if ptr == nil { return startingRef } @@ -184,32 +269,111 @@ func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointe refRef, _, _ := refPtr.Get(node) if refRef != nil { - rf, _ := NewRef(refRef.(string)) + var rf Ref + switch value := refRef.(type) { + case string: + rf, _ = NewRef(value) + } nw, err := ret.Inherits(rf) if err != nil { break } + nwURL := nw.GetURL() + if nwURL.Scheme == "file" || (nwURL.Scheme == "" && nwURL.Host == "") { + nwpt := filepath.ToSlash(nwURL.Path) + if filepath.IsAbs(nwpt) { + _, err := os.Stat(nwpt) + if err != nil { + nwURL.Path = filepath.Join(".", nwpt) + } + } + } + ret = nw } } + return ret } +func debugLog(msg string, args ...interface{}) { + if Debug { + log.Printf(msg, args...) + } +} + +func normalizeFileRef(ref *Ref, relativeBase string) *Ref { + refURL := ref.GetURL() + debugLog("normalizing %s against %s (%s)", ref.String(), relativeBase, refURL.String()) + if strings.HasPrefix(refURL.String(), "#") { + return ref + } + + if refURL.Scheme == "file" || (refURL.Scheme == "" && refURL.Host == "") { + filePath := refURL.Path + debugLog("normalizing file path: %s", filePath) + + if !filepath.IsAbs(filepath.FromSlash(filePath)) && len(relativeBase) != 0 { + debugLog("joining %s with %s", relativeBase, filePath) + if fi, err := os.Stat(filepath.FromSlash(relativeBase)); err == nil { + if !fi.IsDir() { + relativeBase = filepath.Dir(filepath.FromSlash(relativeBase)) + } + } + filePath = filepath.Join(filepath.FromSlash(relativeBase), filepath.FromSlash(filePath)) + } + if !filepath.IsAbs(filepath.FromSlash(filePath)) { + pwd, err := os.Getwd() + if err == nil { + debugLog("joining cwd %s with %s", pwd, filePath) + filePath = filepath.Join(pwd, filepath.FromSlash(filePath)) + } + } + + debugLog("cleaning %s", filePath) + filePath = filepath.Clean(filepath.FromSlash(filePath)) + _, err := os.Stat(filepath.FromSlash(filePath)) + if err == nil { + debugLog("rewriting url %s to scheme \"\" path %s", refURL.String(), filePath) + slp := filepath.FromSlash(filePath) + if filepath.IsAbs(slp) && filepath.Separator == '\\' && len(slp) > 1 && slp[1] == ':' && ('a' <= slp[0] && slp[0] <= 'z' || 'A' <= slp[0] && slp[0] <= 'Z') { + slp = slp[2:] + } + refURL.Scheme = "" + refURL.Path = filepath.ToSlash(slp) + debugLog("new url with joined filepath: %s", refURL.String()) + *ref = MustCreateRef(refURL.String()) + } + } + + debugLog("refurl: %s", ref.GetURL().String()) + return ref +} + func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}) error { + tgt := reflect.ValueOf(target) if tgt.Kind() != reflect.Ptr { return fmt.Errorf("resolve ref: target needs to be a pointer") } oldRef := currentRef + if currentRef != nil { + debugLog("resolve ref current %s new %s", currentRef.String(), ref.String()) + nextRef := nextRef(node, ref, currentRef.GetPointer()) + if nextRef == nil || nextRef.GetURL() == nil { + return nil + } var err error - currentRef, err = currentRef.Inherits(*nextRef(node, ref, currentRef.GetPointer())) + currentRef, err = currentRef.Inherits(*nextRef) + debugLog("resolved ref current %s", currentRef.String()) if err != nil { return err } } + if currentRef == nil { currentRef = ref } @@ -245,42 +409,71 @@ func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{} return nil } - if refURL.Scheme != "" && refURL.Host != "" { - // most definitely take the red pill - data, _, _, err := r.load(refURL) - if err != nil { - return err - } + relativeBase := "" + if r.options != nil && r.options.RelativeBase != "" { + relativeBase = r.options.RelativeBase + } + normalizeFileRef(currentRef, relativeBase) + debugLog("current ref normalized file: %s", currentRef.String()) + normalizeFileRef(ref, relativeBase) + debugLog("ref normalized file: %s", currentRef.String()) - if ((oldRef == nil && currentRef != nil) || - (oldRef != nil && currentRef == nil) || - oldRef.String() != currentRef.String()) && - ((oldRef == nil && ref != nil) || - (oldRef != nil && ref == nil) || - (oldRef.String() != ref.String())) { + data, _, _, err := r.load(currentRef.GetURL()) + if err != nil { + return err + } - return r.resolveRef(currentRef, ref, data, target) - } + if ((oldRef == nil && currentRef != nil) || + (oldRef != nil && currentRef == nil) || + oldRef.String() != currentRef.String()) && + ((oldRef == nil && ref != nil) || + (oldRef != nil && ref == nil) || + (oldRef.String() != ref.String())) { - var res interface{} - if currentRef.String() != "" { - res, _, err = currentRef.GetPointer().Get(data) + return r.resolveRef(currentRef, ref, data, target) + } + + var res interface{} + if currentRef.String() != "" { + res, _, err = currentRef.GetPointer().Get(data) + if err != nil { + if strings.HasPrefix(ref.String(), "#") { + if r.loadingRef != nil { + rr, er := r.loadingRef.Inherits(*ref) + if er != nil { + return er + } + refURL = rr.GetURL() + + data, _, _, err = r.load(refURL) + if err != nil { + return err + } + } else { + data = r.root + } + } + + res, _, err = ref.GetPointer().Get(data) if err != nil { return err } - } else { - res = data - } - - if err := swag.DynamicJSONToStruct(res, target); err != nil { - return err } + } else { + res = data + } + if err := swag.DynamicJSONToStruct(res, target); err != nil { + return err } + + r.currentRef = currentRef + return nil } func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { + debugLog("loading schema from url: %s", refURL) toFetch := *refURL toFetch.Fragment = "" @@ -299,44 +492,51 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) return data, toFetch, fromCache, nil } -func (r *schemaLoader) Resolve(ref *Ref, target interface{}) error { - if err := r.resolveRef(r.currentRef, ref, r.root, target); err != nil { - return err - } - return nil +func (r *schemaLoader) Resolve(ref *Ref, target interface{}) error { + return r.resolveRef(r.currentRef, ref, r.root, target) } -type specExpander struct { - spec *Swagger - resolver *schemaLoader +func (r *schemaLoader) reset() { + ref := r.startingRef + + var ptr *jsonpointer.Pointer + if ref != nil { + ptr = ref.GetPointer() + } + + r.currentRef = nextRef(r.root, ref, ptr) } // ExpandSpec expands the references in a swagger spec -func ExpandSpec(spec *Swagger) error { - resolver, err := defaultSchemaLoader(spec, nil, nil) - if err != nil { +func ExpandSpec(spec *Swagger, options *ExpandOptions) error { + resolver, err := defaultSchemaLoader(spec, nil, options, nil) + // Just in case this ever returns an error. + if shouldStopOnError(err, resolver.options) { return err } - for key, defintition := range spec.Definitions { - var def *Schema - var err error - if def, err = expandSchema(defintition, []string{"#/definitions/" + key}, resolver); err != nil { - return err + if options == nil || !options.SkipSchemas { + for key, definition := range spec.Definitions { + var def *Schema + var err error + if def, err = expandSchema(definition, []string{"#/definitions/" + key}, resolver); shouldStopOnError(err, resolver.options) { + return err + } + resolver.reset() + spec.Definitions[key] = *def } - spec.Definitions[key] = *def } for key, parameter := range spec.Parameters { - if err := expandParameter(¶meter, resolver); err != nil { + if err := expandParameter(¶meter, resolver); shouldStopOnError(err, resolver.options) { return err } spec.Parameters[key] = parameter } for key, response := range spec.Responses { - if err := expandResponse(&response, resolver); err != nil { + if err := expandResponse(&response, resolver); shouldStopOnError(err, resolver.options) { return err } spec.Responses[key] = response @@ -344,7 +544,7 @@ func ExpandSpec(spec *Swagger) error { if spec.Paths != nil { for key, path := range spec.Paths.Paths { - if err := expandPathItem(&path, resolver); err != nil { + if err := expandPathItem(&path, resolver); shouldStopOnError(err, resolver.options) { return err } spec.Paths.Paths[key] = path @@ -354,9 +554,25 @@ func ExpandSpec(spec *Swagger) error { return nil } +func shouldStopOnError(err error, opts *ExpandOptions) bool { + if err != nil && !opts.ContinueOnError { + return true + } + + if err != nil { + log.Println(err) + } + + return false +} + // ExpandSchema expands the refs in the schema object func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { + return ExpandSchemaWithBasePath(schema, root, cache, nil) +} +// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options +func ExpandSchemaWithBasePath(schema *Schema, root interface{}, cache ResolutionCache, opts *ExpandOptions) error { if schema == nil { return nil } @@ -367,18 +583,17 @@ func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error nrr, _ := NewRef(schema.ID) var rrr *Ref if nrr.String() != "" { - switch root.(type) { + switch rt := root.(type) { case *Schema: - rid, _ := NewRef(root.(*Schema).ID) + rid, _ := NewRef(rt.ID) rrr, _ = rid.Inherits(nrr) case *Swagger: - rid, _ := NewRef(root.(*Swagger).ID) + rid, _ := NewRef(rt.ID) rrr, _ = rid.Inherits(nrr) } - } - resolver, err := defaultSchemaLoader(root, rrr, cache) + resolver, err := defaultSchemaLoader(root, rrr, opts, cache) if err != nil { return err } @@ -389,7 +604,7 @@ func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error } var s *Schema if s, err = expandSchema(*schema, refs, resolver); err != nil { - return nil + return err } *schema = *s return nil @@ -400,7 +615,15 @@ func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*S if target.Items.Schema != nil { t, err := expandSchema(*target.Items.Schema, parentRefs, resolver) if err != nil { - return nil, err + if target.Items.Schema.ID == "" { + target.Items.Schema.ID = target.ID + if err != nil { + t, err = expandSchema(*target.Items.Schema, parentRefs, resolver) + if err != nil { + return nil, err + } + } + } } *target.Items.Schema = *t } @@ -415,137 +638,173 @@ func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*S return &target, nil } -func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (schema *Schema, err error) { - defer func() { - schema = &target - }() +func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (*Schema, error) { if target.Ref.String() == "" && target.Ref.IsRoot() { - target = *resolver.root.(*Schema) - return + debugLog("skipping expand schema for no ref and root: %v", resolver.root) + + return resolver.root.(*Schema), nil } // t is the new expanded schema var t *Schema + for target.Ref.String() != "" { - // var newTarget Schema - pRefs := strings.Join(parentRefs, ",") - pRefs += "," - if strings.Contains(pRefs, target.Ref.String()+",") { - err = nil - return + if swag.ContainsStringsCI(parentRefs, target.Ref.String()) { + return &target, nil } - if err = resolver.Resolve(&target.Ref, &t); err != nil { - return + if err := resolver.Resolve(&target.Ref, &t); shouldStopOnError(err, resolver.options) { + return &target, err + } + + if swag.ContainsStringsCI(parentRefs, target.Ref.String()) { + debugLog("ref already exists in parent") + return &target, nil } parentRefs = append(parentRefs, target.Ref.String()) - target = *t + if t != nil { + target = *t + } } - if t, err = expandItems(target, parentRefs, resolver); err != nil { - return + t, err := expandItems(target, parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + target = *t } - target = *t for i := range target.AllOf { - if t, err = expandSchema(target.AllOf[i], parentRefs, resolver); err != nil { - return + t, err := expandSchema(target.AllOf[i], parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + target.AllOf[i] = *t } - target.AllOf[i] = *t } for i := range target.AnyOf { - if t, err = expandSchema(target.AnyOf[i], parentRefs, resolver); err != nil { - return + t, err := expandSchema(target.AnyOf[i], parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err } target.AnyOf[i] = *t } for i := range target.OneOf { - if t, err = expandSchema(target.OneOf[i], parentRefs, resolver); err != nil { - return + t, err := expandSchema(target.OneOf[i], parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + target.OneOf[i] = *t } - target.OneOf[i] = *t } if target.Not != nil { - if t, err = expandSchema(*target.Not, parentRefs, resolver); err != nil { - return + t, err := expandSchema(*target.Not, parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + *target.Not = *t } - *target.Not = *t } - for k, _ := range target.Properties { - if t, err = expandSchema(target.Properties[k], parentRefs, resolver); err != nil { - return + for k := range target.Properties { + t, err := expandSchema(target.Properties[k], parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + target.Properties[k] = *t } - target.Properties[k] = *t } if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { - if t, err = expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver); err != nil { - return + t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + *target.AdditionalProperties.Schema = *t } - *target.AdditionalProperties.Schema = *t } - for k, _ := range target.PatternProperties { - if t, err = expandSchema(target.PatternProperties[k], parentRefs, resolver); err != nil { - return + for k := range target.PatternProperties { + t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + target.PatternProperties[k] = *t } - target.PatternProperties[k] = *t } - for k, _ := range target.Dependencies { + for k := range target.Dependencies { if target.Dependencies[k].Schema != nil { - if t, err = expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver); err != nil { - return + t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + *target.Dependencies[k].Schema = *t } - *target.Dependencies[k].Schema = *t } } if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { - if t, err = expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver); err != nil { - return + t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + *target.AdditionalItems.Schema = *t } - *target.AdditionalItems.Schema = *t } - for k, _ := range target.Definitions { - if t, err = expandSchema(target.Definitions[k], parentRefs, resolver); err != nil { - return + for k := range target.Definitions { + t, err := expandSchema(target.Definitions[k], parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + target.Definitions[k] = *t } - target.Definitions[k] = *t } - return + return &target, nil } func expandPathItem(pathItem *PathItem, resolver *schemaLoader) error { if pathItem == nil { return nil } + if pathItem.Ref.String() != "" { if err := resolver.Resolve(&pathItem.Ref, &pathItem); err != nil { return err } + resolver.reset() + pathItem.Ref = Ref{} } for idx := range pathItem.Parameters { - if err := expandParameter(&(pathItem.Parameters[idx]), resolver); err != nil { + if err := expandParameter(&(pathItem.Parameters[idx]), resolver); shouldStopOnError(err, resolver.options) { return err } } - if err := expandOperation(pathItem.Get, resolver); err != nil { + if err := expandOperation(pathItem.Get, resolver); shouldStopOnError(err, resolver.options) { return err } - if err := expandOperation(pathItem.Head, resolver); err != nil { + if err := expandOperation(pathItem.Head, resolver); shouldStopOnError(err, resolver.options) { return err } - if err := expandOperation(pathItem.Options, resolver); err != nil { + if err := expandOperation(pathItem.Options, resolver); shouldStopOnError(err, resolver.options) { return err } - if err := expandOperation(pathItem.Put, resolver); err != nil { + if err := expandOperation(pathItem.Put, resolver); shouldStopOnError(err, resolver.options) { return err } - if err := expandOperation(pathItem.Post, resolver); err != nil { + if err := expandOperation(pathItem.Post, resolver); shouldStopOnError(err, resolver.options) { return err } - if err := expandOperation(pathItem.Patch, resolver); err != nil { + if err := expandOperation(pathItem.Patch, resolver); shouldStopOnError(err, resolver.options) { return err } - if err := expandOperation(pathItem.Delete, resolver); err != nil { + if err := expandOperation(pathItem.Delete, resolver); shouldStopOnError(err, resolver.options) { return err } return nil @@ -555,8 +814,9 @@ func expandOperation(op *Operation, resolver *schemaLoader) error { if op == nil { return nil } + for i, param := range op.Parameters { - if err := expandParameter(¶m, resolver); err != nil { + if err := expandParameter(¶m, resolver); shouldStopOnError(err, resolver.options) { return err } op.Parameters[i] = param @@ -564,11 +824,11 @@ func expandOperation(op *Operation, resolver *schemaLoader) error { if op.Responses != nil { responses := op.Responses - if err := expandResponse(responses.Default, resolver); err != nil { + if err := expandResponse(responses.Default, resolver); shouldStopOnError(err, resolver.options) { return err } for code, response := range responses.StatusCodeResponses { - if err := expandResponse(&response, resolver); err != nil { + if err := expandResponse(&response, resolver); shouldStopOnError(err, resolver.options) { return err } responses.StatusCodeResponses[code] = response @@ -582,22 +842,29 @@ func expandResponse(response *Response, resolver *schemaLoader) error { return nil } + var parentRefs []string + if response.Ref.String() != "" { - if err := resolver.Resolve(&response.Ref, response); err != nil { + parentRefs = append(parentRefs, response.Ref.String()) + if err := resolver.Resolve(&response.Ref, response); shouldStopOnError(err, resolver.options) { return err } + resolver.reset() + response.Ref = Ref{} } - if response.Schema != nil { - parentRefs := []string{response.Schema.Ref.String()} - if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); err != nil { + if !resolver.options.SkipSchemas && response.Schema != nil { + parentRefs = append(parentRefs, response.Schema.Ref.String()) + debugLog("response ref: %s", response.Schema.Ref) + if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); shouldStopOnError(err, resolver.options) { return err } - if s, err := expandSchema(*response.Schema, parentRefs, resolver); err != nil { + s, err := expandSchema(*response.Schema, parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { return err - } else { - *response.Schema = *s } + resolver.reset() + *response.Schema = *s } return nil } @@ -606,21 +873,28 @@ func expandParameter(parameter *Parameter, resolver *schemaLoader) error { if parameter == nil { return nil } + + var parentRefs []string + if parameter.Ref.String() != "" { - if err := resolver.Resolve(¶meter.Ref, parameter); err != nil { + parentRefs = append(parentRefs, parameter.Ref.String()) + if err := resolver.Resolve(¶meter.Ref, parameter); shouldStopOnError(err, resolver.options) { return err } + resolver.reset() + parameter.Ref = Ref{} } - if parameter.Schema != nil { - parentRefs := []string{parameter.Schema.Ref.String()} - if err := resolver.Resolve(¶meter.Schema.Ref, ¶meter.Schema); err != nil { + if !resolver.options.SkipSchemas && parameter.Schema != nil { + parentRefs = append(parentRefs, parameter.Schema.Ref.String()) + if err := resolver.Resolve(¶meter.Schema.Ref, ¶meter.Schema); shouldStopOnError(err, resolver.options) { return err } - if s, err := expandSchema(*parameter.Schema, parentRefs, resolver); err != nil { + s, err := expandSchema(*parameter.Schema, parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { return err - } else { - *parameter.Schema = *s } + resolver.reset() + *parameter.Schema = *s } return nil } diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go index 758b845310f4..85c4d454c1e8 100644 --- a/vendor/github.com/go-openapi/spec/header.go +++ b/vendor/github.com/go-openapi/spec/header.go @@ -16,7 +16,9 @@ package spec import ( "encoding/json" + "strings" + "github.com/go-openapi/jsonpointer" "github.com/go-openapi/swag" ) @@ -30,6 +32,7 @@ type HeaderProps struct { type Header struct { CommonValidations SimpleSchema + VendorExtensible HeaderProps } @@ -158,8 +161,35 @@ func (h *Header) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { return err } + if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { + return err + } if err := json.Unmarshal(data, &h.HeaderProps); err != nil { return err } return nil } + +// JSONLookup look up a value by the json property name +func (p Header) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.HeaderProps, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go index 4d57ea5ca6fc..46944fb699f3 100644 --- a/vendor/github.com/go-openapi/spec/items.go +++ b/vendor/github.com/go-openapi/spec/items.go @@ -16,7 +16,9 @@ package spec import ( "encoding/json" + "strings" + "github.com/go-openapi/jsonpointer" "github.com/go-openapi/swag" ) @@ -60,11 +62,12 @@ type CommonValidations struct { // Items a limited subset of JSON-Schema's items object. // It is used by parameter definitions that are not located in "body". // -// For more information: http://goo.gl/8us55a#items-object- +// For more information: http://goo.gl/8us55a#items-object type Items struct { Refable CommonValidations SimpleSchema + VendorExtensible } // NewItems creates a new instance of items @@ -197,3 +200,20 @@ func (i Items) MarshalJSON() ([]byte, error) { } return swag.ConcatJSON(b3, b1, b2), nil } + +// JSONLookup look up a value by the json property name +func (p Items) JSONLookup(token string) (interface{}, error) { + if token == "$ref" { + return &p.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go index 8fb66d12a53f..71aee1e80638 100644 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -16,6 +16,7 @@ package spec import ( "encoding/json" + "strings" "github.com/go-openapi/jsonpointer" "github.com/go-openapi/swag" @@ -100,15 +101,16 @@ func (p Parameter) JSONLookup(token string) (interface{}, error) { if token == "$ref" { return &p.Ref, nil } + r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) - if err != nil { + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { return nil, err } if r != nil { return r, nil } r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) - if err != nil { + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { return nil, err } if r != nil { diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go index 68631df8b4e9..4833b87e2fef 100644 --- a/vendor/github.com/go-openapi/spec/ref.go +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -55,7 +55,7 @@ func (r *Ref) RemoteURI() string { } // IsValidURI returns true when the url the ref points to can be found -func (r *Ref) IsValidURI() bool { +func (r *Ref) IsValidURI(basepaths ...string) bool { if r.String() == "" { return true } @@ -81,14 +81,18 @@ func (r *Ref) IsValidURI() bool { // check for local file pth := v if r.HasURLPathOnly { - p, e := filepath.Abs(pth) + base := "." + if len(basepaths) > 0 { + base = filepath.Dir(filepath.Join(basepaths...)) + } + p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) if e != nil { return false } pth = p } - fi, err := os.Stat(pth) + fi, err := os.Stat(filepath.ToSlash(pth)) if err != nil { return false } @@ -116,25 +120,18 @@ func NewRef(refURI string) (Ref, error) { return Ref{Ref: ref}, nil } -// MustCreateRef creates a ref object but +// MustCreateRef creates a ref object but panics when refURI is invalid. +// Use the NewRef method for a version that returns an error. func MustCreateRef(refURI string) Ref { return Ref{Ref: jsonreference.MustCreateRef(refURI)} } -// // NewResolvedRef creates a resolved ref -// func NewResolvedRef(refURI string, data interface{}) Ref { -// return Ref{ -// Ref: jsonreference.MustCreateRef(refURI), -// Resolved: data, -// } -// } - // MarshalJSON marshals this ref into a JSON object func (r Ref) MarshalJSON() ([]byte, error) { str := r.String() if str == "" { if r.IsRoot() { - return []byte(`{"$ref":"#"}`), nil + return []byte(`{"$ref":""}`), nil } return []byte("{}"), nil } diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go index 308cc8478fc6..a32b039eaf6d 100644 --- a/vendor/github.com/go-openapi/spec/response.go +++ b/vendor/github.com/go-openapi/spec/response.go @@ -17,6 +17,7 @@ package spec import ( "encoding/json" + "github.com/go-openapi/jsonpointer" "github.com/go-openapi/swag" ) @@ -34,6 +35,19 @@ type ResponseProps struct { type Response struct { Refable ResponseProps + VendorExtensible +} + +// JSONLookup look up a value by the json property name +func (p Response) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == "$ref" { + return &p.Ref, nil + } + r, _, err := jsonpointer.GetForToken(p.ResponseProps, token) + return r, err } // UnmarshalJSON hydrates this items instance with the data from JSON @@ -44,6 +58,9 @@ func (r *Response) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &r.Refable); err != nil { return err } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } return nil } @@ -57,7 +74,11 @@ func (r Response) MarshalJSON() ([]byte, error) { if err != nil { return nil, err } - return swag.ConcatJSON(b1, b2), nil + b3, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil } // NewResponse creates a new response instance diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go index ea071ca63d85..3ab06697f2ea 100644 --- a/vendor/github.com/go-openapi/spec/responses.go +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -51,7 +51,7 @@ func (r Responses) JSONLookup(token string) (interface{}, error) { } if i, err := strconv.Atoi(token); err == nil { if scr, ok := r.StatusCodeResponses[i]; ok { - return &scr, nil + return scr, nil } } return nil, fmt.Errorf("object has no field %q", token) diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go index eb88f005c5b6..1cdcc163f1b6 100644 --- a/vendor/github.com/go-openapi/spec/schema.go +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -201,8 +201,8 @@ func (r *SchemaURL) UnmarshalJSON(data []byte) error { type SchemaProps struct { ID string `json:"id,omitempty"` - Ref Ref `json:"-,omitempty"` - Schema SchemaURL `json:"-,omitempty"` + Ref Ref `json:"-"` + Schema SchemaURL `json:"-"` Description string `json:"description,omitempty"` Type StringOrArray `json:"type,omitempty"` Format string `json:"format,omitempty"` @@ -269,7 +269,7 @@ func (s Schema) JSONLookup(token string) (interface{}, error) { } r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) - if r != nil || err != nil { + if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) { return r, err } r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go index cc2ae56b2bed..0bb045bc06ad 100644 --- a/vendor/github.com/go-openapi/spec/spec.go +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -16,6 +16,8 @@ package spec import "encoding/json" +//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json +//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema //go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... //go:generate perl -pi -e s,Json,JSON,g bindata.go @@ -27,10 +29,15 @@ const ( ) var ( - jsonSchema = MustLoadJSONSchemaDraft04() - swaggerSchema = MustLoadSwagger20Schema() + jsonSchema *Schema + swaggerSchema *Schema ) +func init() { + jsonSchema = MustLoadJSONSchemaDraft04() + swaggerSchema = MustLoadSwagger20Schema() +} + // MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error func MustLoadJSONSchemaDraft04() *Schema { d, e := JSONSchemaDraft04() diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go index ff3ef875ed7f..23780c78a20b 100644 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -77,7 +77,7 @@ type SwaggerProps struct { Host string `json:"host,omitempty"` BasePath string `json:"basePath,omitempty"` // must start with a leading "/" Paths *Paths `json:"paths"` // required - Definitions Definitions `json:"definitions"` + Definitions Definitions `json:"definitions,omitempty"` Parameters map[string]Parameter `json:"parameters,omitempty"` Responses map[string]Response `json:"responses,omitempty"` SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` @@ -156,7 +156,7 @@ func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { if s.Schema != nil { return json.Marshal(s.Schema) } - return nil, nil + return []byte("null"), nil } // UnmarshalJSON converts this schema object or array from a JSON structure diff --git a/vendor/github.com/go-openapi/swag/.drone.sec b/vendor/github.com/go-openapi/swag/.drone.sec deleted file mode 100644 index 9cc7e64a2cfd..000000000000 --- a/vendor/github.com/go-openapi/swag/.drone.sec +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.darMHuSYnLhsknrnqjdXLGcyEJ5kM13yQbLGi2UWcKXpddHvPT3dMCnP5y7e27c76R2HFnvr56BqDMI-x0zyuQtjYKzSFUGjOcEhOH3OL1Hxu-Cm2z2443BS8asXNA3sEveAGQvG68jffm7CvtEtMo57wBpggI7UHbfIdLMK47s1EDpC4xanjiS1BJe5NC_Ikf0jfa6vf18oggbjxuoqSEvSNVdNRyZwG_npFaZFJzvdtehTG7GqunWjGiqBb81qNcEdzSdIZW7A_Esv4U-nOL5gGr55E9jKhv3bX4Z9ygGcSrJ3NCgR_3zRhYKkPEAOXIqQKfL6-h82BY--cHq9uw.NHL3X-1tjb8a8zF7.eRmLvOG32e7260K8rkI-HmUGG5Gb6Hu-obKKxBqHd-vVzsKnwTVJavLWktPqlXGMsnDt7MimSysNqsWemMUEviW2p3godnvjOOXTDb-RAtQ-39rvxnZ2bN8qwUVFrdiKTZD06l60yTeLW7L1psyLj50NxklFObhkpUcK5uukxLXT1SzGM9aY6_3dzW4HU9pZGQrIH1pj1UzvWIjz7iIzE1a37DHBN-FiYSASsw01v1SSIFr34gwlGcqzGfJBonffVrM4ordm3IiVm50Zvr25DrmYTKrQpJRB-KOvYxBNYDzjCaanHDyWGUGN44FUx38azHHEVBTaiOM7xwPeyCc-xTTv8WXGnL1xrhL3M_jNuwnbAjzL9X_li7KUSeYajwhGihdMZaHLYaqxh3NNnbPfYhR6sBxu8vaT1Sc4eE84QC4dV4OaAglPvrPdWL-DC7OYQyoPU8u9ggwUQHpFUzJyD549T_Tlgn-2Cw7kTe41VonH9HkoXGANDGtQCGTqTIEeFQJ3MDDucf5VteFP8_SJPfyJYxpStFt5U1AuULV9sXmpGQL_-GGFXowd0X0bHxFeo_eu1vm-oTqQQNbKRnyt5V3n4U9jhOUGnnIBy3JOG3DA2YhVJsHdlLZ9vaDpFYcxts4.SqYfES30FqVSufGbPZ6YXA \ No newline at end of file diff --git a/vendor/github.com/go-openapi/swag/.drone.yml b/vendor/github.com/go-openapi/swag/.drone.yml deleted file mode 100644 index acf10fdfbd12..000000000000 --- a/vendor/github.com/go-openapi/swag/.drone.yml +++ /dev/null @@ -1,32 +0,0 @@ -clone: - path: github.com/go-openapi/swag - -matrix: - GO_VERSION: - - "1.6" - -build: - integration: - image: golang:$$GO_VERSION - pull: true - commands: - - go get -u github.com/stretchr/testify - - go get -u github.com/mailru/easyjson - - go test -race - - go test -v -cover -coverprofile=coverage.out -covermode=count ./... - -notify: - slack: - channel: bots - webhook_url: $$SLACK_URL - username: drone - -publish: - coverage: - server: https://coverage.vmware.run - token: $$GITHUB_TOKEN - # threshold: 70 - # must_increase: true - when: - matrix: - GO_VERSION: "1.6" diff --git a/vendor/github.com/go-openapi/swag/.editorconfig b/vendor/github.com/go-openapi/swag/.editorconfig new file mode 100644 index 000000000000..3152da69a5d7 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/swag/.pullapprove.yml b/vendor/github.com/go-openapi/swag/.pullapprove.yml deleted file mode 100644 index 5ec183e22442..000000000000 --- a/vendor/github.com/go-openapi/swag/.pullapprove.yml +++ /dev/null @@ -1,13 +0,0 @@ -approve_by_comment: true -approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)' -reject_regex: ^[Rr]ejected -reset_on_push: false -reviewers: - members: - - casualjim - - chancez - - frapposelli - - vburenin - - pytlesk4 - name: pullapprove - required: 1 diff --git a/vendor/github.com/go-openapi/swag/.travis.yml b/vendor/github.com/go-openapi/swag/.travis.yml new file mode 100644 index 000000000000..24c69bdf36fd --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.travis.yml @@ -0,0 +1,14 @@ +language: go +go: +- 1.8 +install: +- go get -u github.com/stretchr/testify +- go get -u github.com/mailru/easyjson +- go get -u gopkg.in/yaml.v2 +script: +- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... +after_success: +- bash <(curl -s https://codecov.io/bash) +notifications: + slack: + secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md index c1d3c196ca7b..5d43728e8768 100644 --- a/vendor/github.com/go-openapi/swag/README.md +++ b/vendor/github.com/go-openapi/swag/README.md @@ -1,4 +1,4 @@ -# Swag [![Build Status](https://ci.vmware.run/api/badges/go-openapi/swag/status.svg)](https://ci.vmware.run/go-openapi/swag) [![Coverage](https://coverage.vmware.run/badges/go-openapi/swag/coverage.svg)](https://coverage.vmware.run/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# Swag [![Build Status](https://travis-ci.org/go-openapi/swag.svg?branch=master)](https://travis-ci.org/go-openapi/swag) [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) diff --git a/vendor/github.com/go-openapi/swag/convert.go b/vendor/github.com/go-openapi/swag/convert.go index 28d912410613..2bf5ecbba2c6 100644 --- a/vendor/github.com/go-openapi/swag/convert.go +++ b/vendor/github.com/go-openapi/swag/convert.go @@ -159,7 +159,7 @@ func FormatInt16(value int16) string { // FormatInt32 turns an int32 into a string func FormatInt32(value int32) string { - return strconv.FormatInt(int64(value), 10) + return strconv.Itoa(int(value)) } // FormatInt64 turns an int64 into a string diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go index 6e9ec20fc67a..cb20a6a0f79a 100644 --- a/vendor/github.com/go-openapi/swag/json.go +++ b/vendor/github.com/go-openapi/swag/json.go @@ -17,6 +17,7 @@ package swag import ( "bytes" "encoding/json" + "log" "reflect" "strings" "sync" @@ -110,28 +111,40 @@ func ConcatJSON(blobs ...[]byte) []byte { if len(b) < 3 { // yep empty but also the last one, so closing this thing if i == last && a > 0 { - buf.WriteByte(closing) + if err := buf.WriteByte(closing); err != nil { + log.Println(err) + } } continue } idx = 0 if a > 0 { // we need to join with a comma for everything beyond the first non-empty item - buf.WriteByte(comma) + if err := buf.WriteByte(comma); err != nil { + log.Println(err) + } idx = 1 // this is not the first or the last so we want to drop the leading bracket } if i != last { // not the last one, strip brackets - buf.Write(b[idx : len(b)-1]) + if _, err := buf.Write(b[idx : len(b)-1]); err != nil { + log.Println(err) + } } else { // last one, strip only the leading bracket - buf.Write(b[idx:]) + if _, err := buf.Write(b[idx:]); err != nil { + log.Println(err) + } } a++ } // somehow it ended up being empty, so provide a default value if buf.Len() == 0 { - buf.WriteByte(opening) - buf.WriteByte(closing) + if err := buf.WriteByte(opening); err != nil { + log.Println(err) + } + if err := buf.WriteByte(closing); err != nil { + log.Println(err) + } } return buf.Bytes() } @@ -139,15 +152,23 @@ func ConcatJSON(blobs ...[]byte) []byte { // ToDynamicJSON turns an object into a properly JSON typed structure func ToDynamicJSON(data interface{}) interface{} { // TODO: convert straight to a json typed map (mergo + iterate?) - b, _ := json.Marshal(data) + b, err := json.Marshal(data) + if err != nil { + log.Println(err) + } var res interface{} - json.Unmarshal(b, &res) + if err := json.Unmarshal(b, &res); err != nil { + log.Println(err) + } return res } // FromDynamicJSON turns an object into a properly JSON typed structure func FromDynamicJSON(data, target interface{}) error { - b, _ := json.Marshal(data) + b, err := json.Marshal(data) + if err != nil { + log.Println(err) + } return json.Unmarshal(b, target) } @@ -216,6 +237,8 @@ func newNameIndex(tpe reflect.Type) nameIndex { // GetJSONNames gets all the json property names for a type func (n *NameProvider) GetJSONNames(subject interface{}) []string { + n.lock.Lock() + defer n.lock.Unlock() tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() names, ok := n.index[tpe] if !ok { @@ -237,6 +260,8 @@ func (n *NameProvider) GetJSONName(subject interface{}, name string) (string, bo // GetJSONNameForType gets the json name for a go property name on a given type func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) { + n.lock.Lock() + defer n.lock.Unlock() names, ok := n.index[tpe] if !ok { names = n.makeNameIndex(tpe) @@ -246,8 +271,6 @@ func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string } func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex { - n.lock.Lock() - defer n.lock.Unlock() names := newNameIndex(tpe) n.index[tpe] = names return names @@ -261,6 +284,8 @@ func (n *NameProvider) GetGoName(subject interface{}, name string) (string, bool // GetGoNameForType gets the go name for a given type for a json property name func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) { + n.lock.Lock() + defer n.lock.Unlock() names, ok := n.index[tpe] if !ok { names = n.makeNameIndex(tpe) diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go index 6dbc31330ead..62ed1e80ab56 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -17,13 +17,25 @@ package swag import ( "fmt" "io/ioutil" + "log" "net/http" + "path/filepath" "strings" + "time" ) +// LoadHTTPTimeout the default timeout for load requests +var LoadHTTPTimeout = 30 * time.Second + // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in func LoadFromFileOrHTTP(path string) ([]byte, error) { - return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes)(path) + return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) +} + +// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in +// timeout arg allows for per request overriding of the request timeout +func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) { + return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(timeout))(path) } // LoadStrategy returns a loader function for a given path or uri @@ -31,19 +43,32 @@ func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func( if strings.HasPrefix(path, "http") { return remote } - return local + return func(pth string) ([]byte, error) { return local(filepath.FromSlash(pth)) } } -func loadHTTPBytes(path string) ([]byte, error) { - resp, err := http.Get(path) - if err != nil { - return nil, err - } - defer resp.Body.Close() +func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { + return func(path string) ([]byte, error) { + client := &http.Client{Timeout: timeout} + req, err := http.NewRequest("GET", path, nil) + if err != nil { + return nil, err + } + resp, err := client.Do(req) + defer func() { + if resp != nil { + if e := resp.Body.Close(); e != nil { + log.Println(e) + } + } + }() + if err != nil { + return nil, err + } - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status) - } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status) + } - return ioutil.ReadAll(resp.Body) + return ioutil.ReadAll(resp.Body) + } } diff --git a/vendor/github.com/go-openapi/swag/path.go b/vendor/github.com/go-openapi/swag/path.go index 273e9fbed94f..941bd0176b05 100644 --- a/vendor/github.com/go-openapi/swag/path.go +++ b/vendor/github.com/go-openapi/swag/path.go @@ -47,6 +47,9 @@ func FindInGoSearchPath(pkg string) string { // FullGoSearchPath gets the search paths for finding packages func FullGoSearchPath() string { allPaths := os.Getenv(GOPATHKey) + if allPaths == "" { + allPaths = filepath.Join(os.Getenv("HOME"), "go") + } if allPaths != "" { allPaths = strings.Join([]string{allPaths, runtime.GOROOT()}, ":") } else { diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index d8b54ee61ea9..40751aab441a 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -20,10 +20,12 @@ import ( "regexp" "sort" "strings" + "unicode" ) -// Taken from https://github.com/golang/lint/blob/1fab560e16097e5b69afb66eb93aab843ef77845/lint.go#L663-L698 +// Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 var commonInitialisms = map[string]bool{ + "ACL": true, "API": true, "ASCII": true, "CPU": true, @@ -44,19 +46,21 @@ var commonInitialisms = map[string]bool{ "RPC": true, "SLA": true, "SMTP": true, + "SQL": true, "SSH": true, "TCP": true, "TLS": true, "TTL": true, "UDP": true, - "UUID": true, - "UID": true, "UI": true, + "UID": true, + "UUID": true, "URI": true, "URL": true, "UTF8": true, "VM": true, "XML": true, + "XMPP": true, "XSRF": true, "XSS": true, } @@ -246,6 +250,9 @@ func ToJSONName(name string) string { // ToVarName camelcases a name which can be underscored or pascal cased func ToVarName(name string) string { res := ToGoName(name) + if _, ok := commonInitialisms[res]; ok { + return lower(res) + } if len(res) <= 1 { return lower(res) } @@ -263,7 +270,18 @@ func ToGoName(name string) string { } out = append(out, uw) } - return strings.Join(out, "") + + result := strings.Join(out, "") + if len(result) > 0 { + ud := upper(result[:1]) + ru := []rune(ud) + if unicode.IsUpper(ru[0]) { + result = ud + result[1:] + } else { + result = "X" + ud + result[1:] + } + } + return result } // ContainsStringsCI searches a slice of strings for a case-insensitive match diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go new file mode 100644 index 000000000000..26502f21d542 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -0,0 +1,215 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "encoding/json" + "fmt" + "path/filepath" + "strconv" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" + + yaml "gopkg.in/yaml.v2" +) + +// YAMLMatcher matches yaml +func YAMLMatcher(path string) bool { + ext := filepath.Ext(path) + return ext == ".yaml" || ext == ".yml" +} + +// YAMLToJSON converts YAML unmarshaled data into json compatible data +func YAMLToJSON(data interface{}) (json.RawMessage, error) { + jm, err := transformData(data) + if err != nil { + return nil, err + } + b, err := WriteJSON(jm) + return json.RawMessage(b), err +} + +func BytesToYAMLDoc(data []byte) (interface{}, error) { + var canary map[interface{}]interface{} // validate this is an object and not a different type + if err := yaml.Unmarshal(data, &canary); err != nil { + return nil, err + } + + var document yaml.MapSlice // preserve order that is present in the document + if err := yaml.Unmarshal(data, &document); err != nil { + return nil, err + } + return document, nil +} + +type JSONMapSlice []JSONMapItem + +func (s JSONMapSlice) MarshalJSON() ([]byte, error) { + w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty} + s.MarshalEasyJSON(w) + return w.BuildBytes() +} + +func (s JSONMapSlice) MarshalEasyJSON(w *jwriter.Writer) { + w.RawByte('{') + + ln := len(s) + last := ln - 1 + for i := 0; i < ln; i++ { + s[i].MarshalEasyJSON(w) + if i != last { // last item + w.RawByte(',') + } + } + + w.RawByte('}') +} + +func (s *JSONMapSlice) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{Data: data} + s.UnmarshalEasyJSON(&l) + return l.Error() +} +func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) { + if in.IsNull() { + in.Skip() + return + } + + var result JSONMapSlice + in.Delim('{') + for !in.IsDelim('}') { + var mi JSONMapItem + mi.UnmarshalEasyJSON(in) + result = append(result, mi) + } + *s = result +} + +type JSONMapItem struct { + Key string + Value interface{} +} + +func (s JSONMapItem) MarshalJSON() ([]byte, error) { + w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty} + s.MarshalEasyJSON(w) + return w.BuildBytes() +} + +func (s JSONMapItem) MarshalEasyJSON(w *jwriter.Writer) { + w.String(s.Key) + w.RawByte(':') + w.Raw(WriteJSON(s.Value)) +} + +func (s *JSONMapItem) UnmarshalEasyJSON(in *jlexer.Lexer) { + key := in.UnsafeString() + in.WantColon() + value := in.Interface() + in.WantComma() + s.Key = key + s.Value = value +} +func (s *JSONMapItem) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{Data: data} + s.UnmarshalEasyJSON(&l) + return l.Error() +} + +func transformData(input interface{}) (out interface{}, err error) { + switch in := input.(type) { + case yaml.MapSlice: + + o := make(JSONMapSlice, len(in)) + for i, mi := range in { + var nmi JSONMapItem + switch k := mi.Key.(type) { + case string: + nmi.Key = k + case int: + nmi.Key = strconv.Itoa(k) + default: + return nil, fmt.Errorf("types don't match expect map key string or int got: %T", mi.Key) + } + + v, err := transformData(mi.Value) + if err != nil { + return nil, err + } + nmi.Value = v + o[i] = nmi + } + return o, nil + case map[interface{}]interface{}: + o := make(JSONMapSlice, 0, len(in)) + for ke, va := range in { + var nmi JSONMapItem + switch k := ke.(type) { + case string: + nmi.Key = k + case int: + nmi.Key = strconv.Itoa(k) + default: + return nil, fmt.Errorf("types don't match expect map key string or int got: %T", ke) + } + + v, err := transformData(va) + if err != nil { + return nil, err + } + nmi.Value = v + o = append(o, nmi) + } + return o, nil + case []interface{}: + len1 := len(in) + o := make([]interface{}, len1) + for i := 0; i < len1; i++ { + o[i], err = transformData(in[i]) + if err != nil { + return nil, err + } + } + return o, nil + } + return input, nil +} + +// YAMLDoc loads a yaml document from either http or a file and converts it to json +func YAMLDoc(path string) (json.RawMessage, error) { + yamlDoc, err := YAMLData(path) + if err != nil { + return nil, err + } + + data, err := YAMLToJSON(yamlDoc) + if err != nil { + return nil, err + } + + return json.RawMessage(data), nil +} + +// YAMLData loads a yaml document from either http or a file +func YAMLData(path string) (interface{}, error) { + data, err := LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + + return BytesToYAMLDoc(data) +} diff --git a/vendor/github.com/gogo/protobuf/types/Makefile b/vendor/github.com/gogo/protobuf/types/Makefile new file mode 100644 index 000000000000..c326d2578248 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/Makefile @@ -0,0 +1,39 @@ +# Protocol Buffers for Go with Gadgets +# +# Copyright (c) 2016, The GoGo Authors. All rights reserved. +# http://github.com/gogo/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogotypes + go install github.com/gogo/protobuf/protoc-min-version + + protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/any.proto + protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/empty.proto + protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/timestamp.proto + protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/duration.proto + protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/struct.proto + protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/wrappers.proto + protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/field_mask.proto diff --git a/vendor/github.com/gogo/protobuf/types/any.go b/vendor/github.com/gogo/protobuf/types/any.go new file mode 100644 index 000000000000..c10caf405576 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/any.go @@ -0,0 +1,135 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/gogo/protobuf/proto" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *Any) (string, error) { + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func EmptyAny(any *Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = EmptyAny(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *Any, pb proto.Message) bool { + aname, err := AnyMessageName(any) + if err != nil { + return false + } + + return aname == proto.MessageName(pb) +} diff --git a/vendor/github.com/gogo/protobuf/types/any.pb.go b/vendor/github.com/gogo/protobuf/types/any.pb.go new file mode 100644 index 000000000000..902d6d62aaeb --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/any.pb.go @@ -0,0 +1,666 @@ +// Code generated by protoc-gen-gogo. +// source: any.proto +// DO NOT EDIT! + +/* + Package types is a generated protocol buffer package. + + It is generated from these files: + any.proto + + It has these top-level messages: + Any +*/ +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import bytes "bytes" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptorAny, []int{0} } +func (*Any) XXX_WellKnownType() string { return "Any" } + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} +func (this *Any) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Any) + if !ok { + that2, ok := that.(Any) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.TypeUrl != that1.TypeUrl { + if this.TypeUrl < that1.TypeUrl { + return -1 + } + return 1 + } + if c := bytes.Compare(this.Value, that1.Value); c != 0 { + return c + } + return 0 +} +func (this *Any) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Any) + if !ok { + that2, ok := that.(Any) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.TypeUrl != that1.TypeUrl { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + return true +} +func (this *Any) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Any{") + s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringAny(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Any) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Any) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TypeUrl) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAny(dAtA, i, uint64(len(m.TypeUrl))) + i += copy(dAtA[i:], m.TypeUrl) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAny(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func encodeFixed64Any(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Any(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintAny(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedAny(r randyAny, easy bool) *Any { + this := &Any{} + this.TypeUrl = string(randStringAny(r)) + v1 := r.Intn(100) + this.Value = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyAny interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneAny(r randyAny) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringAny(r randyAny) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneAny(r) + } + return string(tmps) +} +func randUnrecognizedAny(r randyAny, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldAny(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldAny(dAtA []byte, r randyAny, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateAny(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateAny(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateAny(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Any) Size() (n int) { + var l int + _ = l + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + sovAny(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovAny(uint64(l)) + } + return n +} + +func sovAny(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozAny(x uint64) (n int) { + return sovAny(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Any) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Any{`, + `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func valueToStringAny(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Any) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Any: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAny + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAny + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAny(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAny + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAny(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthAny + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipAny(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthAny = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAny = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("any.proto", fileDescriptorAny) } + +var fileDescriptorAny = []byte{ + // 204 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4c, 0xcc, 0xab, 0xd4, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x85, 0xf0, 0x92, + 0x4a, 0xd3, 0x94, 0xcc, 0xb8, 0x98, 0x1d, 0xf3, 0x2a, 0x85, 0x24, 0xb9, 0x38, 0x4a, 0x2a, 0x0b, + 0x52, 0xe3, 0x4b, 0x8b, 0x72, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xd8, 0x41, 0xfc, 0xd0, + 0xa2, 0x1c, 0x21, 0x11, 0x2e, 0xd6, 0xb2, 0xc4, 0x9c, 0xd2, 0x54, 0x09, 0x26, 0x05, 0x46, 0x0d, + 0x9e, 0x20, 0x08, 0xc7, 0xa9, 0xfe, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x3e, 0x3c, + 0x94, 0x63, 0xfc, 0xf1, 0x50, 0x8e, 0xb1, 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c, 0x27, + 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72, 0x0c, + 0x1f, 0x40, 0xe2, 0x8f, 0xe5, 0x18, 0xb9, 0x84, 0x93, 0xf3, 0x73, 0xf5, 0xd0, 0xac, 0x77, 0xe2, + 0x70, 0xcc, 0xab, 0x0c, 0x00, 0x71, 0x02, 0x18, 0xa3, 0x58, 0x41, 0x36, 0x16, 0x2f, 0x62, 0x62, + 0x76, 0x0f, 0x70, 0x5a, 0xc5, 0x24, 0xe7, 0x0e, 0x51, 0x1a, 0x00, 0x55, 0xaa, 0x17, 0x9e, 0x9a, + 0x93, 0xe3, 0x9d, 0x97, 0x5f, 0x9e, 0x17, 0x02, 0x52, 0x96, 0xc4, 0x06, 0x36, 0xc3, 0x18, 0x10, + 0x00, 0x00, 0xff, 0xff, 0xb7, 0x39, 0x2f, 0x89, 0xdd, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/doc.go b/vendor/github.com/gogo/protobuf/types/doc.go new file mode 100644 index 000000000000..ff2810af1ee0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package types contains code for interacting with well-known types. +*/ +package types diff --git a/vendor/github.com/gogo/protobuf/types/duration.go b/vendor/github.com/gogo/protobuf/types/duration.go new file mode 100644 index 000000000000..475d61f1db29 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func DurationFromProto(p *Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func DurationProto(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/types/duration.pb.go b/vendor/github.com/gogo/protobuf/types/duration.pb.go new file mode 100644 index 000000000000..9d7fa8f3130f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/duration.pb.go @@ -0,0 +1,500 @@ +// Code generated by protoc-gen-gogo. +// source: duration.proto +// DO NOT EDIT! + +/* + Package types is a generated protocol buffer package. + + It is generated from these files: + duration.proto + + It has these top-level messages: + Duration +*/ +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorDuration, []int{0} } +func (*Duration) XXX_WellKnownType() string { return "Duration" } + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} +func (this *Duration) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Duration) + if !ok { + that2, ok := that.(Duration) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Seconds != that1.Seconds { + if this.Seconds < that1.Seconds { + return -1 + } + return 1 + } + if this.Nanos != that1.Nanos { + if this.Nanos < that1.Nanos { + return -1 + } + return 1 + } + return 0 +} +func (this *Duration) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Duration) + if !ok { + that2, ok := that.(Duration) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Seconds != that1.Seconds { + return false + } + if this.Nanos != that1.Nanos { + return false + } + return true +} +func (this *Duration) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Duration{") + s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") + s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDuration(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Duration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Duration) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Seconds != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintDuration(dAtA, i, uint64(m.Seconds)) + } + if m.Nanos != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintDuration(dAtA, i, uint64(m.Nanos)) + } + return i, nil +} + +func encodeFixed64Duration(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Duration(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintDuration(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Duration) Size() (n int) { + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovDuration(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + sovDuration(uint64(m.Nanos)) + } + return n +} + +func sovDuration(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozDuration(x uint64) (n int) { + return sovDuration(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Duration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDuration(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDuration + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDuration(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthDuration + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipDuration(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthDuration = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDuration = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("duration.proto", fileDescriptorDuration) } + +var fileDescriptorDuration = []byte{ + // 203 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0x85, 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, + 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, + 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, + 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0xfe, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x3e, 0x3c, + 0x94, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, + 0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, 0xe5, 0x18, + 0xb9, 0x84, 0x93, 0xf3, 0x73, 0xf5, 0xd0, 0xac, 0x76, 0xe2, 0x85, 0x59, 0x1c, 0x00, 0x12, 0x09, + 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8, 0xb8, 0x88, 0x89, 0xd9, 0x3d, + 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x4b, 0x00, 0x54, 0x8b, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, + 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8, 0x2c, 0x63, 0x40, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x9d, 0x5a, 0x25, 0xa5, 0xe6, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/duration_gogo.go b/vendor/github.com/gogo/protobuf/types/duration_gogo.go new file mode 100644 index 000000000000..90e7670e21d1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/duration_gogo.go @@ -0,0 +1,100 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "fmt" + "time" +) + +func NewPopulatedDuration(r interface { + Int63() int64 +}, easy bool) *Duration { + this := &Duration{} + maxSecs := time.Hour.Nanoseconds() / 1e9 + max := 2 * maxSecs + s := int64(r.Int63()) % max + s -= maxSecs + neg := int64(1) + if s < 0 { + neg = -1 + } + this.Seconds = s + this.Nanos = int32(neg * (r.Int63() % 1e9)) + return this +} + +func (d *Duration) String() string { + td, err := DurationFromProto(d) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return td.String() +} + +func NewPopulatedStdDuration(r interface { + Int63() int64 +}, easy bool) *time.Duration { + dur := NewPopulatedDuration(r, easy) + d, err := DurationFromProto(dur) + if err != nil { + return nil + } + return &d +} + +func SizeOfStdDuration(d time.Duration) int { + dur := DurationProto(d) + return dur.Size() +} + +func StdDurationMarshal(d time.Duration) ([]byte, error) { + size := SizeOfStdDuration(d) + buf := make([]byte, size) + _, err := StdDurationMarshalTo(d, buf) + return buf, err +} + +func StdDurationMarshalTo(d time.Duration, data []byte) (int, error) { + dur := DurationProto(d) + return dur.MarshalTo(data) +} + +func StdDurationUnmarshal(d *time.Duration, data []byte) error { + dur := &Duration{} + if err := dur.Unmarshal(data); err != nil { + return err + } + dd, err := DurationFromProto(dur) + if err != nil { + return err + } + *d = dd + return nil +} diff --git a/vendor/github.com/gogo/protobuf/types/empty.pb.go b/vendor/github.com/gogo/protobuf/types/empty.pb.go new file mode 100644 index 000000000000..4cfac13261ad --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/empty.pb.go @@ -0,0 +1,457 @@ +// Code generated by protoc-gen-gogo. +// source: empty.proto +// DO NOT EDIT! + +/* +Package types is a generated protocol buffer package. + +It is generated from these files: + empty.proto + +It has these top-level messages: + Empty +*/ +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { +} + +func (m *Empty) Reset() { *m = Empty{} } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptorEmpty, []int{0} } +func (*Empty) XXX_WellKnownType() string { return "Empty" } + +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} +func (this *Empty) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Empty) + if !ok { + that2, ok := that.(Empty) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + return 0 +} +func (this *Empty) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Empty) + if !ok { + that2, ok := that.(Empty) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + return true +} +func (this *Empty) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&types.Empty{") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringEmpty(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Empty) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Empty) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func encodeFixed64Empty(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Empty(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintEmpty(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedEmpty(r randyEmpty, easy bool) *Empty { + this := &Empty{} + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyEmpty interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneEmpty(r randyEmpty) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringEmpty(r randyEmpty) string { + v1 := r.Intn(100) + tmps := make([]rune, v1) + for i := 0; i < v1; i++ { + tmps[i] = randUTF8RuneEmpty(r) + } + return string(tmps) +} +func randUnrecognizedEmpty(r randyEmpty, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldEmpty(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldEmpty(dAtA []byte, r randyEmpty, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + v2 := r.Int63() + if r.Intn(2) == 0 { + v2 *= -1 + } + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(v2)) + case 1: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateEmpty(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Empty) Size() (n int) { + var l int + _ = l + return n +} + +func sovEmpty(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozEmpty(x uint64) (n int) { + return sovEmpty(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Empty) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Empty{`, + `}`, + }, "") + return s +} +func valueToStringEmpty(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Empty) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEmpty + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Empty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipEmpty(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEmpty + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEmpty(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthEmpty + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipEmpty(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthEmpty = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEmpty = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("empty.proto", fileDescriptorEmpty) } + +var fileDescriptorEmpty = []byte{ + // 169 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4e, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x85, + 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0xd8, 0xb9, 0x58, 0x5d, 0x41, 0xf2, 0x4e, 0x2d, 0x8c, 0x17, 0x1e, + 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0xe3, 0x8f, 0x87, 0x72, 0x8c, 0x0d, + 0x8f, 0xe4, 0x18, 0x57, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, + 0x07, 0x8f, 0xe4, 0x18, 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0x00, 0x12, 0x7f, 0x2c, 0xc7, 0xc8, 0x25, + 0x9c, 0x9c, 0x9f, 0xab, 0x87, 0x66, 0xa0, 0x13, 0x17, 0xd8, 0xb8, 0x00, 0x10, 0x37, 0x80, 0x31, + 0x8a, 0xb5, 0xa4, 0xb2, 0x20, 0xb5, 0xf8, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, + 0x55, 0x4c, 0x72, 0xee, 0x10, 0xf5, 0x01, 0x50, 0xf5, 0x7a, 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, + 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x95, 0x49, 0x6c, 0x60, 0x83, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x7c, 0xa8, 0xf0, 0xc4, 0xb6, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/field_mask.pb.go b/vendor/github.com/gogo/protobuf/types/field_mask.pb.go new file mode 100644 index 000000000000..3bf0ff36999a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/field_mask.pb.go @@ -0,0 +1,738 @@ +// Code generated by protoc-gen-gogo. +// source: field_mask.proto +// DO NOT EDIT! + +/* +Package types is a generated protocol buffer package. + +It is generated from these files: + field_mask.proto + +It has these top-level messages: + FieldMask +*/ +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, the existing +// repeated values in the target resource will be overwritten by the new values. +// Note that a repeated field is only allowed in the last position of a `paths` +// string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then the existing sub-message in the target resource is +// overwritten. Given the target message: +// +// f { +// b { +// d : 1 +// x : 2 +// } +// c : 1 +// } +// +// And an update message: +// +// f { +// b { +// d : 10 +// } +// } +// +// then if the field mask is: +// +// paths: "f.b" +// +// then the result will be: +// +// f { +// b { +// d : 10 +// } +// c : 1 +// } +// +// However, if the update mask was: +// +// paths: "f.b.d" +// +// then the result would be: +// +// f { +// b { +// d : 10 +// x : 2 +// } +// c : 1 +// } +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +type FieldMask struct { + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths" json:"paths,omitempty"` +} + +func (m *FieldMask) Reset() { *m = FieldMask{} } +func (*FieldMask) ProtoMessage() {} +func (*FieldMask) Descriptor() ([]byte, []int) { return fileDescriptorFieldMask, []int{0} } + +func (m *FieldMask) GetPaths() []string { + if m != nil { + return m.Paths + } + return nil +} + +func init() { + proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask") +} +func (this *FieldMask) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*FieldMask) + if !ok { + that2, ok := that.(FieldMask) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Paths) != len(that1.Paths) { + if len(this.Paths) < len(that1.Paths) { + return -1 + } + return 1 + } + for i := range this.Paths { + if this.Paths[i] != that1.Paths[i] { + if this.Paths[i] < that1.Paths[i] { + return -1 + } + return 1 + } + } + return 0 +} +func (this *FieldMask) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*FieldMask) + if !ok { + that2, ok := that.(FieldMask) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Paths) != len(that1.Paths) { + return false + } + for i := range this.Paths { + if this.Paths[i] != that1.Paths[i] { + return false + } + } + return true +} +func (this *FieldMask) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.FieldMask{") + s = append(s, "Paths: "+fmt.Sprintf("%#v", this.Paths)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringFieldMask(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *FieldMask) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldMask) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeFixed64FieldMask(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32FieldMask(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintFieldMask(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedFieldMask(r randyFieldMask, easy bool) *FieldMask { + this := &FieldMask{} + v1 := r.Intn(10) + this.Paths = make([]string, v1) + for i := 0; i < v1; i++ { + this.Paths[i] = string(randStringFieldMask(r)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyFieldMask interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneFieldMask(r randyFieldMask) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringFieldMask(r randyFieldMask) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneFieldMask(r) + } + return string(tmps) +} +func randUnrecognizedFieldMask(r randyFieldMask, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldFieldMask(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldFieldMask(dAtA []byte, r randyFieldMask, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateFieldMask(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *FieldMask) Size() (n int) { + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + l = len(s) + n += 1 + l + sovFieldMask(uint64(l)) + } + } + return n +} + +func sovFieldMask(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozFieldMask(x uint64) (n int) { + return sovFieldMask(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *FieldMask) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FieldMask{`, + `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, + `}`, + }, "") + return s +} +func valueToStringFieldMask(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FieldMask) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFieldMask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldMask: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldMask: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFieldMask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFieldMask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFieldMask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFieldMask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFieldMask(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthFieldMask + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipFieldMask(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthFieldMask = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFieldMask = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("field_mask.proto", fileDescriptorFieldMask) } + +var fileDescriptorFieldMask = []byte{ + // 193 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0xcb, 0x4c, 0xcd, + 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, + 0xcf, 0x4f, 0xcf, 0x49, 0x85, 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0x14, 0xb9, 0x38, 0xdd, 0x40, 0x8a, + 0x7c, 0x13, 0x8b, 0xb3, 0x85, 0x44, 0xb8, 0x58, 0x0b, 0x12, 0x4b, 0x32, 0x8a, 0x25, 0x18, 0x15, + 0x98, 0x35, 0x38, 0x83, 0x20, 0x1c, 0xa7, 0x56, 0xc6, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, + 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, + 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, + 0x1e, 0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0xe4, 0x12, 0x4e, 0xce, 0xcf, 0xd5, 0x43, + 0xb3, 0xca, 0x89, 0x0f, 0x6e, 0x51, 0x00, 0x48, 0x28, 0x80, 0x31, 0x8a, 0xb5, 0xa4, 0xb2, 0x20, + 0xb5, 0x78, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0x86, 0x00, 0xa8, + 0x06, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, 0x90, 0xb2, 0x24, 0x36, + 0xb0, 0x49, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x51, 0x31, 0x89, 0xb5, 0xd6, 0x00, 0x00, + 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/struct.pb.go b/vendor/github.com/gogo/protobuf/types/struct.pb.go new file mode 100644 index 000000000000..366ea48e7470 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/struct.pb.go @@ -0,0 +1,1888 @@ +// Code generated by protoc-gen-gogo. +// source: struct.proto +// DO NOT EDIT! + +/* + Package types is a generated protocol buffer package. + + It is generated from these files: + struct.proto + + It has these top-level messages: + Struct + Value + ListValue +*/ +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strconv "strconv" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptorStruct, []int{0} } +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptorStruct, []int{0} } +func (*Struct) XXX_WellKnownType() string { return "Struct" } + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +func (m *Value) Reset() { *m = Value{} } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { return fileDescriptorStruct, []int{1} } +func (*Value) XXX_WellKnownType() string { return "Value" } + +type isValue_Kind interface { + isValue_Kind() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"` +} +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} +func (*Value_NumberValue) isValue_Kind() {} +func (*Value_StringValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} +func (*Value_StructValue) isValue_Kind() {} +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + _ = b.EncodeVarint(1<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + _ = b.EncodeVarint(2<<3 | proto.WireFixed64) + _ = b.EncodeFixed64(math.Float64bits(x.NumberValue)) + case *Value_StringValue: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.StringValue) + case *Value_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + _ = b.EncodeVarint(4<<3 | proto.WireVarint) + _ = b.EncodeVarint(t) + case *Value_StructValue: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StructValue); err != nil { + return err + } + case *Value_ListValue: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ListValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.Kind has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 1: // kind.null_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_NullValue{NullValue(x)} + return true, err + case 2: // kind.number_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Kind = &Value_NumberValue{math.Float64frombits(x)} + return true, err + case 3: // kind.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Kind = &Value_StringValue{x} + return true, err + case 4: // kind.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_BoolValue{x != 0} + return true, err + case 5: // kind.struct_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Struct) + err := b.DecodeMessage(msg) + m.Kind = &Value_StructValue{msg} + return true, err + case 6: // kind.list_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListValue) + err := b.DecodeMessage(msg) + m.Kind = &Value_ListValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + n += proto.SizeVarint(2<<3 | proto.WireFixed64) + n += 8 + case *Value_StringValue: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BoolValue: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += 1 + case *Value_StructValue: + s := proto.Size(x.StructValue) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_ListValue: + s := proto.Size(x.ListValue) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptorStruct, []int{2} } +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) +} +func (x NullValue) String() string { + s, ok := NullValue_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Struct) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Struct) + if !ok { + that2, ok := that.(Struct) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Fields) != len(that1.Fields) { + return false + } + for i := range this.Fields { + if !this.Fields[i].Equal(that1.Fields[i]) { + return false + } + } + return true +} +func (this *Value) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Value) + if !ok { + that2, ok := that.(Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if that1.Kind == nil { + if this.Kind != nil { + return false + } + } else if this.Kind == nil { + return false + } else if !this.Kind.Equal(that1.Kind) { + return false + } + return true +} +func (this *Value_NullValue) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Value_NullValue) + if !ok { + that2, ok := that.(Value_NullValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.NullValue != that1.NullValue { + return false + } + return true +} +func (this *Value_NumberValue) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Value_NumberValue) + if !ok { + that2, ok := that.(Value_NumberValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.NumberValue != that1.NumberValue { + return false + } + return true +} +func (this *Value_StringValue) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Value_StringValue) + if !ok { + that2, ok := that.(Value_StringValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.StringValue != that1.StringValue { + return false + } + return true +} +func (this *Value_BoolValue) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Value_BoolValue) + if !ok { + that2, ok := that.(Value_BoolValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.BoolValue != that1.BoolValue { + return false + } + return true +} +func (this *Value_StructValue) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Value_StructValue) + if !ok { + that2, ok := that.(Value_StructValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.StructValue.Equal(that1.StructValue) { + return false + } + return true +} +func (this *Value_ListValue) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Value_ListValue) + if !ok { + that2, ok := that.(Value_ListValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !this.ListValue.Equal(that1.ListValue) { + return false + } + return true +} +func (this *ListValue) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*ListValue) + if !ok { + that2, ok := that.(ListValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Values) != len(that1.Values) { + return false + } + for i := range this.Values { + if !this.Values[i].Equal(that1.Values[i]) { + return false + } + } + return true +} +func (this *Struct) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Struct{") + keysForFields := make([]string, 0, len(this.Fields)) + for k := range this.Fields { + keysForFields = append(keysForFields, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForFields) + mapStringForFields := "map[string]*Value{" + for _, k := range keysForFields { + mapStringForFields += fmt.Sprintf("%#v: %#v,", k, this.Fields[k]) + } + mapStringForFields += "}" + if this.Fields != nil { + s = append(s, "Fields: "+mapStringForFields+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&types.Value{") + if this.Kind != nil { + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value_NullValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_NullValue{` + + `NullValue:` + fmt.Sprintf("%#v", this.NullValue) + `}`}, ", ") + return s +} +func (this *Value_NumberValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_NumberValue{` + + `NumberValue:` + fmt.Sprintf("%#v", this.NumberValue) + `}`}, ", ") + return s +} +func (this *Value_StringValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_StringValue{` + + `StringValue:` + fmt.Sprintf("%#v", this.StringValue) + `}`}, ", ") + return s +} +func (this *Value_BoolValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_BoolValue{` + + `BoolValue:` + fmt.Sprintf("%#v", this.BoolValue) + `}`}, ", ") + return s +} +func (this *Value_StructValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_StructValue{` + + `StructValue:` + fmt.Sprintf("%#v", this.StructValue) + `}`}, ", ") + return s +} +func (this *Value_ListValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_ListValue{` + + `ListValue:` + fmt.Sprintf("%#v", this.ListValue) + `}`}, ", ") + return s +} +func (this *ListValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.ListValue{") + if this.Values != nil { + s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringStruct(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Struct) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Struct) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Fields) > 0 { + for k := range m.Fields { + dAtA[i] = 0xa + i++ + v := m.Fields[k] + msgSize := 0 + if v != nil { + msgSize = v.Size() + msgSize += 1 + sovStruct(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovStruct(uint64(len(k))) + msgSize + i = encodeVarintStruct(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintStruct(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintStruct(dAtA, i, uint64(v.Size())) + n1, err := v.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + } + } + return i, nil +} + +func (m *Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Kind != nil { + nn2, err := m.Kind.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn2 + } + return i, nil +} + +func (m *Value_NullValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x8 + i++ + i = encodeVarintStruct(dAtA, i, uint64(m.NullValue)) + return i, nil +} +func (m *Value_NumberValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x11 + i++ + i = encodeFixed64Struct(dAtA, i, uint64(math.Float64bits(float64(m.NumberValue)))) + return i, nil +} +func (m *Value_StringValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x1a + i++ + i = encodeVarintStruct(dAtA, i, uint64(len(m.StringValue))) + i += copy(dAtA[i:], m.StringValue) + return i, nil +} +func (m *Value_BoolValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x20 + i++ + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} +func (m *Value_StructValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.StructValue != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintStruct(dAtA, i, uint64(m.StructValue.Size())) + n3, err := m.StructValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} +func (m *Value_ListValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ListValue != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintStruct(dAtA, i, uint64(m.ListValue.Size())) + n4, err := m.ListValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} +func (m *ListValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for _, msg := range m.Values { + dAtA[i] = 0xa + i++ + i = encodeVarintStruct(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Struct(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Struct(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintStruct(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedStruct(r randyStruct, easy bool) *Struct { + this := &Struct{} + if r.Intn(10) == 0 { + v1 := r.Intn(10) + this.Fields = make(map[string]*Value) + for i := 0; i < v1; i++ { + this.Fields[randStringStruct(r)] = NewPopulatedValue(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedValue(r randyStruct, easy bool) *Value { + this := &Value{} + oneofNumber_Kind := []int32{1, 2, 3, 4, 5, 6}[r.Intn(6)] + switch oneofNumber_Kind { + case 1: + this.Kind = NewPopulatedValue_NullValue(r, easy) + case 2: + this.Kind = NewPopulatedValue_NumberValue(r, easy) + case 3: + this.Kind = NewPopulatedValue_StringValue(r, easy) + case 4: + this.Kind = NewPopulatedValue_BoolValue(r, easy) + case 5: + this.Kind = NewPopulatedValue_StructValue(r, easy) + case 6: + this.Kind = NewPopulatedValue_ListValue(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedValue_NullValue(r randyStruct, easy bool) *Value_NullValue { + this := &Value_NullValue{} + this.NullValue = NullValue([]int32{0}[r.Intn(1)]) + return this +} +func NewPopulatedValue_NumberValue(r randyStruct, easy bool) *Value_NumberValue { + this := &Value_NumberValue{} + this.NumberValue = float64(r.Float64()) + if r.Intn(2) == 0 { + this.NumberValue *= -1 + } + return this +} +func NewPopulatedValue_StringValue(r randyStruct, easy bool) *Value_StringValue { + this := &Value_StringValue{} + this.StringValue = string(randStringStruct(r)) + return this +} +func NewPopulatedValue_BoolValue(r randyStruct, easy bool) *Value_BoolValue { + this := &Value_BoolValue{} + this.BoolValue = bool(bool(r.Intn(2) == 0)) + return this +} +func NewPopulatedValue_StructValue(r randyStruct, easy bool) *Value_StructValue { + this := &Value_StructValue{} + this.StructValue = NewPopulatedStruct(r, easy) + return this +} +func NewPopulatedValue_ListValue(r randyStruct, easy bool) *Value_ListValue { + this := &Value_ListValue{} + this.ListValue = NewPopulatedListValue(r, easy) + return this +} +func NewPopulatedListValue(r randyStruct, easy bool) *ListValue { + this := &ListValue{} + if r.Intn(10) == 0 { + v2 := r.Intn(5) + this.Values = make([]*Value, v2) + for i := 0; i < v2; i++ { + this.Values[i] = NewPopulatedValue(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyStruct interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneStruct(r randyStruct) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringStruct(r randyStruct) string { + v3 := r.Intn(100) + tmps := make([]rune, v3) + for i := 0; i < v3; i++ { + tmps[i] = randUTF8RuneStruct(r) + } + return string(tmps) +} +func randUnrecognizedStruct(r randyStruct, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldStruct(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldStruct(dAtA []byte, r randyStruct, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + v4 := r.Int63() + if r.Intn(2) == 0 { + v4 *= -1 + } + dAtA = encodeVarintPopulateStruct(dAtA, uint64(v4)) + case 1: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateStruct(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateStruct(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Struct) Size() (n int) { + var l int + _ = l + if len(m.Fields) > 0 { + for k, v := range m.Fields { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovStruct(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovStruct(uint64(len(k))) + l + n += mapEntrySize + 1 + sovStruct(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Value) Size() (n int) { + var l int + _ = l + if m.Kind != nil { + n += m.Kind.Size() + } + return n +} + +func (m *Value_NullValue) Size() (n int) { + var l int + _ = l + n += 1 + sovStruct(uint64(m.NullValue)) + return n +} +func (m *Value_NumberValue) Size() (n int) { + var l int + _ = l + n += 9 + return n +} +func (m *Value_StringValue) Size() (n int) { + var l int + _ = l + l = len(m.StringValue) + n += 1 + l + sovStruct(uint64(l)) + return n +} +func (m *Value_BoolValue) Size() (n int) { + var l int + _ = l + n += 2 + return n +} +func (m *Value_StructValue) Size() (n int) { + var l int + _ = l + if m.StructValue != nil { + l = m.StructValue.Size() + n += 1 + l + sovStruct(uint64(l)) + } + return n +} +func (m *Value_ListValue) Size() (n int) { + var l int + _ = l + if m.ListValue != nil { + l = m.ListValue.Size() + n += 1 + l + sovStruct(uint64(l)) + } + return n +} +func (m *ListValue) Size() (n int) { + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = e.Size() + n += 1 + l + sovStruct(uint64(l)) + } + } + return n +} + +func sovStruct(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozStruct(x uint64) (n int) { + return sovStruct(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Struct) String() string { + if this == nil { + return "nil" + } + keysForFields := make([]string, 0, len(this.Fields)) + for k := range this.Fields { + keysForFields = append(keysForFields, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForFields) + mapStringForFields := "map[string]*Value{" + for _, k := range keysForFields { + mapStringForFields += fmt.Sprintf("%v: %v,", k, this.Fields[k]) + } + mapStringForFields += "}" + s := strings.Join([]string{`&Struct{`, + `Fields:` + mapStringForFields + `,`, + `}`, + }, "") + return s +} +func (this *Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *Value_NullValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_NullValue{`, + `NullValue:` + fmt.Sprintf("%v", this.NullValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_NumberValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_NumberValue{`, + `NumberValue:` + fmt.Sprintf("%v", this.NumberValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_StringValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_StringValue{`, + `StringValue:` + fmt.Sprintf("%v", this.StringValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_BoolValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_BoolValue{`, + `BoolValue:` + fmt.Sprintf("%v", this.BoolValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_StructValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_StructValue{`, + `StructValue:` + strings.Replace(fmt.Sprintf("%v", this.StructValue), "Struct", "Struct", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Value_ListValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_ListValue{`, + `ListValue:` + strings.Replace(fmt.Sprintf("%v", this.ListValue), "ListValue", "ListValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListValue{`, + `Values:` + strings.Replace(fmt.Sprintf("%v", this.Values), "Value", "Value", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringStruct(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Struct) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Struct: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Struct: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthStruct + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Fields == nil { + m.Fields = make(map[string]*Value) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthStruct + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthStruct + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &Value{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Fields[mapkey] = mapvalue + } else { + var mapvalue *Value + m.Fields[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NullValue", wireType) + } + var v NullValue + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NullValue(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Kind = &Value_NullValue{v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + m.Kind = &Value_NumberValue{float64(math.Float64frombits(v))} + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = &Value_StringValue{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Kind = &Value_BoolValue{b} + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StructValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Struct{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Value_StructValue{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ListValue{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Value_ListValue{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &Value{}) + if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStruct(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthStruct + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipStruct(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthStruct = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStruct = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("struct.proto", fileDescriptorStruct) } + +var fileDescriptorStruct = []byte{ + // 432 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xb1, 0x6f, 0xd3, 0x40, + 0x14, 0xc6, 0xfd, 0x9c, 0xc6, 0x22, 0xcf, 0x55, 0xa9, 0x0e, 0x09, 0xa2, 0x22, 0x1d, 0x51, 0xba, + 0x58, 0x08, 0x79, 0x08, 0x0b, 0x22, 0x2c, 0x58, 0x2a, 0xad, 0x84, 0x55, 0x19, 0x43, 0x8b, 0xc4, + 0x12, 0xe1, 0xd4, 0x8d, 0xac, 0x5e, 0xef, 0x2a, 0xfb, 0x0c, 0xca, 0x06, 0xff, 0x05, 0x33, 0x13, + 0x62, 0xe4, 0xaf, 0x60, 0xec, 0xc8, 0x88, 0x3d, 0x31, 0x76, 0xec, 0x88, 0xee, 0xce, 0x36, 0xa8, + 0x51, 0x36, 0xbf, 0xcf, 0xbf, 0xf7, 0xbd, 0xf7, 0xbd, 0xc3, 0xcd, 0x42, 0xe6, 0xe5, 0x5c, 0xfa, + 0x17, 0xb9, 0x90, 0x82, 0xdc, 0x5e, 0x08, 0xb1, 0x60, 0xa9, 0xa9, 0x92, 0xf2, 0x74, 0xfc, 0x05, + 0xd0, 0x79, 0xad, 0x09, 0x32, 0x45, 0xe7, 0x34, 0x4b, 0xd9, 0x49, 0x31, 0x84, 0x51, 0xcf, 0x73, + 0x27, 0xbb, 0xfe, 0x0d, 0xd8, 0x37, 0xa0, 0xff, 0x42, 0x53, 0x7b, 0x5c, 0xe6, 0xcb, 0xb8, 0x69, + 0xd9, 0x79, 0x85, 0xee, 0x7f, 0x32, 0xd9, 0xc6, 0xde, 0x59, 0xba, 0x1c, 0xc2, 0x08, 0xbc, 0x41, + 0xac, 0x3e, 0xc9, 0x23, 0xec, 0x7f, 0x78, 0xcf, 0xca, 0x74, 0x68, 0x8f, 0xc0, 0x73, 0x27, 0x77, + 0x57, 0xcc, 0x8f, 0xd5, 0xdf, 0xd8, 0x40, 0x4f, 0xed, 0x27, 0x30, 0xfe, 0x61, 0x63, 0x5f, 0x8b, + 0x64, 0x8a, 0xc8, 0x4b, 0xc6, 0x66, 0xc6, 0x40, 0x99, 0x6e, 0x4d, 0x76, 0x56, 0x0c, 0x0e, 0x4b, + 0xc6, 0x34, 0x7f, 0x60, 0xc5, 0x03, 0xde, 0x16, 0x64, 0x17, 0x37, 0x79, 0x79, 0x9e, 0xa4, 0xf9, + 0xec, 0xdf, 0x7c, 0x38, 0xb0, 0x62, 0xd7, 0xa8, 0x1d, 0x54, 0xc8, 0x3c, 0xe3, 0x8b, 0x06, 0xea, + 0xa9, 0xc5, 0x15, 0x64, 0x54, 0x03, 0x3d, 0x40, 0x4c, 0x84, 0x68, 0xd7, 0xd8, 0x18, 0x81, 0x77, + 0x4b, 0x8d, 0x52, 0x9a, 0x01, 0x9e, 0xb5, 0xd7, 0x6e, 0x90, 0xbe, 0x8e, 0x7a, 0x6f, 0xcd, 0x1d, + 0x1b, 0xfb, 0x72, 0x2e, 0xbb, 0x94, 0x2c, 0x2b, 0xda, 0x5e, 0x47, 0xf7, 0xae, 0xa6, 0x0c, 0xb3, + 0x42, 0x76, 0x29, 0x59, 0x5b, 0x04, 0x0e, 0x6e, 0x9c, 0x65, 0xfc, 0x64, 0x3c, 0xc5, 0x41, 0x47, + 0x10, 0x1f, 0x1d, 0x6d, 0xd6, 0xbe, 0xe8, 0xba, 0xa3, 0x37, 0xd4, 0xc3, 0xfb, 0x38, 0xe8, 0x8e, + 0x48, 0xb6, 0x10, 0x0f, 0x8f, 0xc2, 0x70, 0x76, 0xfc, 0x3c, 0x3c, 0xda, 0xdb, 0xb6, 0x82, 0xcf, + 0x70, 0x59, 0x51, 0xeb, 0x57, 0x45, 0xad, 0xab, 0x8a, 0xc2, 0x75, 0x45, 0xe1, 0x53, 0x4d, 0xe1, + 0x5b, 0x4d, 0xe1, 0x67, 0x4d, 0xe1, 0xb2, 0xa6, 0xf0, 0xbb, 0xa6, 0xf0, 0xa7, 0xa6, 0xd6, 0x55, + 0x4d, 0x01, 0xef, 0xcc, 0xc5, 0xf9, 0xcd, 0x71, 0x81, 0x6b, 0x92, 0x47, 0xaa, 0x8e, 0xe0, 0x5d, + 0x5f, 0x2e, 0x2f, 0xd2, 0xe2, 0x1a, 0xe0, 0xab, 0xdd, 0xdb, 0x8f, 0x82, 0xef, 0x36, 0xdd, 0x37, + 0x0d, 0x51, 0xbb, 0xdf, 0xdb, 0x94, 0xb1, 0x97, 0x5c, 0x7c, 0xe4, 0x6f, 0x14, 0x99, 0x38, 0xda, + 0xe9, 0xf1, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x75, 0xc5, 0x1c, 0x3b, 0xd5, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/timestamp.go b/vendor/github.com/gogo/protobuf/types/timestamp.go new file mode 100644 index 000000000000..521b62d925ec --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/timestamp.go @@ -0,0 +1,123 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func TimestampFromProto(ts *Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := &Timestamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *Timestamp) string { + t, err := TimestampFromProto(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/gogo/protobuf/types/timestamp.pb.go b/vendor/github.com/gogo/protobuf/types/timestamp.pb.go new file mode 100644 index 000000000000..8c705a3a3048 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/timestamp.pb.go @@ -0,0 +1,504 @@ +// Code generated by protoc-gen-gogo. +// source: timestamp.proto +// DO NOT EDIT! + +/* + Package types is a generated protocol buffer package. + + It is generated from these files: + timestamp.proto + + It has these top-level messages: + Timestamp +*/ +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorTimestamp, []int{0} } +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} +func (this *Timestamp) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Timestamp) + if !ok { + that2, ok := that.(Timestamp) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Seconds != that1.Seconds { + if this.Seconds < that1.Seconds { + return -1 + } + return 1 + } + if this.Nanos != that1.Nanos { + if this.Nanos < that1.Nanos { + return -1 + } + return 1 + } + return 0 +} +func (this *Timestamp) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Timestamp) + if !ok { + that2, ok := that.(Timestamp) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Seconds != that1.Seconds { + return false + } + if this.Nanos != that1.Nanos { + return false + } + return true +} +func (this *Timestamp) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Timestamp{") + s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") + s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTimestamp(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Timestamp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Seconds != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTimestamp(dAtA, i, uint64(m.Seconds)) + } + if m.Nanos != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTimestamp(dAtA, i, uint64(m.Nanos)) + } + return i, nil +} + +func encodeFixed64Timestamp(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Timestamp(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintTimestamp(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Timestamp) Size() (n int) { + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovTimestamp(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + sovTimestamp(uint64(m.Nanos)) + } + return n +} + +func sovTimestamp(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTimestamp(x uint64) (n int) { + return sovTimestamp(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Timestamp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTimestamp(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTimestamp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTimestamp(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTimestamp + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTimestamp(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTimestamp = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTimestamp = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("timestamp.proto", fileDescriptorTimestamp) } + +var fileDescriptorTimestamp = []byte{ + // 205 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, + 0x4f, 0xcf, 0x49, 0x85, 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, + 0x24, 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, + 0x83, 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, + 0x0d, 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x81, 0xf1, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, + 0x3e, 0x3c, 0x94, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, + 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, + 0xe5, 0x18, 0xb9, 0x84, 0x93, 0xf3, 0x73, 0xf5, 0xd0, 0x2c, 0x77, 0xe2, 0x83, 0x5b, 0x1d, 0x00, + 0x12, 0x0a, 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8, 0xb8, 0x88, 0x89, + 0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x4f, 0x00, 0x54, 0x8f, 0x5e, 0x78, 0x6a, + 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8, 0x30, 0x63, 0x40, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x9b, 0xa2, 0x42, 0xda, 0xea, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go new file mode 100644 index 000000000000..e03fa1315830 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go @@ -0,0 +1,94 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "time" +) + +func NewPopulatedTimestamp(r interface { + Int63() int64 +}, easy bool) *Timestamp { + this := &Timestamp{} + ns := int64(r.Int63()) + this.Seconds = ns / 1e9 + this.Nanos = int32(ns % 1e9) + return this +} + +func (ts *Timestamp) String() string { + return TimestampString(ts) +} + +func NewPopulatedStdTime(r interface { + Int63() int64 +}, easy bool) *time.Time { + timestamp := NewPopulatedTimestamp(r, easy) + t, err := TimestampFromProto(timestamp) + if err != nil { + return nil + } + return &t +} + +func SizeOfStdTime(t time.Time) int { + ts, err := TimestampProto(t) + if err != nil { + return 0 + } + return ts.Size() +} + +func StdTimeMarshal(t time.Time) ([]byte, error) { + size := SizeOfStdTime(t) + buf := make([]byte, size) + _, err := StdTimeMarshalTo(t, buf) + return buf, err +} + +func StdTimeMarshalTo(t time.Time, data []byte) (int, error) { + ts, err := TimestampProto(t) + if err != nil { + return 0, err + } + return ts.MarshalTo(data) +} + +func StdTimeUnmarshal(t *time.Time, data []byte) error { + ts := &Timestamp{} + if err := ts.Unmarshal(data); err != nil { + return err + } + tt, err := TimestampFromProto(ts) + if err != nil { + return err + } + *t = tt + return nil +} diff --git a/vendor/github.com/gogo/protobuf/types/wrappers.pb.go b/vendor/github.com/gogo/protobuf/types/wrappers.pb.go new file mode 100644 index 000000000000..125e35c8f4cc --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/wrappers.pb.go @@ -0,0 +1,2259 @@ +// Code generated by protoc-gen-gogo. +// source: wrappers.proto +// DO NOT EDIT! + +/* +Package types is a generated protocol buffer package. + +It is generated from these files: + wrappers.proto + +It has these top-level messages: + DoubleValue + FloatValue + Int64Value + UInt64Value + Int32Value + UInt32Value + BoolValue + StringValue + BytesValue +*/ +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import bytes "bytes" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{0} } +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{1} } +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{2} } +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{3} } +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{4} } +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{5} } +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{6} } +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{7} } +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{8} } +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} +func (this *DoubleValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*DoubleValue) + if !ok { + that2, ok := that.(DoubleValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *FloatValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*FloatValue) + if !ok { + that2, ok := that.(FloatValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *Int64Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Int64Value) + if !ok { + that2, ok := that.(Int64Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *UInt64Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*UInt64Value) + if !ok { + that2, ok := that.(UInt64Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *Int32Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Int32Value) + if !ok { + that2, ok := that.(Int32Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *UInt32Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*UInt32Value) + if !ok { + that2, ok := that.(UInt32Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *BoolValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BoolValue) + if !ok { + that2, ok := that.(BoolValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if !this.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *StringValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*StringValue) + if !ok { + that2, ok := that.(StringValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *BytesValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BytesValue) + if !ok { + that2, ok := that.(BytesValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.Value, that1.Value); c != 0 { + return c + } + return 0 +} +func (this *DoubleValue) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*DoubleValue) + if !ok { + that2, ok := that.(DoubleValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *FloatValue) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*FloatValue) + if !ok { + that2, ok := that.(FloatValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *Int64Value) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Int64Value) + if !ok { + that2, ok := that.(Int64Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *UInt64Value) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*UInt64Value) + if !ok { + that2, ok := that.(UInt64Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *Int32Value) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Int32Value) + if !ok { + that2, ok := that.(Int32Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *UInt32Value) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*UInt32Value) + if !ok { + that2, ok := that.(UInt32Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *BoolValue) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*BoolValue) + if !ok { + that2, ok := that.(BoolValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *StringValue) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*StringValue) + if !ok { + that2, ok := that.(StringValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *BytesValue) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*BytesValue) + if !ok { + that2, ok := that.(BytesValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + return true +} +func (this *DoubleValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.DoubleValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FloatValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.FloatValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Int64Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Int64Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UInt64Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.UInt64Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Int32Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Int32Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UInt32Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.UInt32Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BoolValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.BoolValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StringValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.StringValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BytesValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.BytesValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringWrappers(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *DoubleValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0x9 + i++ + i = encodeFixed64Wrappers(dAtA, i, uint64(math.Float64bits(float64(m.Value)))) + } + return i, nil +} + +func (m *FloatValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FloatValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0xd + i++ + i = encodeFixed32Wrappers(dAtA, i, uint32(math.Float32bits(float32(m.Value)))) + } + return i, nil +} + +func (m *Int64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int64Value) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + } + return i, nil +} + +func (m *UInt64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt64Value) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + } + return i, nil +} + +func (m *Int32Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int32Value) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + } + return i, nil +} + +func (m *UInt32Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt32Value) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + } + return i, nil +} + +func (m *BoolValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BoolValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value { + dAtA[i] = 0x8 + i++ + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *StringValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Value) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *BytesValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Value) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func encodeFixed64Wrappers(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Wrappers(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintWrappers(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedDoubleValue(r randyWrappers, easy bool) *DoubleValue { + this := &DoubleValue{} + this.Value = float64(r.Float64()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedFloatValue(r randyWrappers, easy bool) *FloatValue { + this := &FloatValue{} + this.Value = float32(r.Float32()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedInt64Value(r randyWrappers, easy bool) *Int64Value { + this := &Int64Value{} + this.Value = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedUInt64Value(r randyWrappers, easy bool) *UInt64Value { + this := &UInt64Value{} + this.Value = uint64(uint64(r.Uint32())) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedInt32Value(r randyWrappers, easy bool) *Int32Value { + this := &Int32Value{} + this.Value = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedUInt32Value(r randyWrappers, easy bool) *UInt32Value { + this := &UInt32Value{} + this.Value = uint32(r.Uint32()) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedBoolValue(r randyWrappers, easy bool) *BoolValue { + this := &BoolValue{} + this.Value = bool(bool(r.Intn(2) == 0)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedStringValue(r randyWrappers, easy bool) *StringValue { + this := &StringValue{} + this.Value = string(randStringWrappers(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedBytesValue(r randyWrappers, easy bool) *BytesValue { + this := &BytesValue{} + v1 := r.Intn(100) + this.Value = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyWrappers interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneWrappers(r randyWrappers) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringWrappers(r randyWrappers) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneWrappers(r) + } + return string(tmps) +} +func randUnrecognizedWrappers(r randyWrappers, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldWrappers(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldWrappers(dAtA []byte, r randyWrappers, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateWrappers(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *DoubleValue) Size() (n int) { + var l int + _ = l + if m.Value != 0 { + n += 9 + } + return n +} + +func (m *FloatValue) Size() (n int) { + var l int + _ = l + if m.Value != 0 { + n += 5 + } + return n +} + +func (m *Int64Value) Size() (n int) { + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + return n +} + +func (m *UInt64Value) Size() (n int) { + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + return n +} + +func (m *Int32Value) Size() (n int) { + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + return n +} + +func (m *UInt32Value) Size() (n int) { + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + return n +} + +func (m *BoolValue) Size() (n int) { + var l int + _ = l + if m.Value { + n += 2 + } + return n +} + +func (m *StringValue) Size() (n int) { + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWrappers(uint64(l)) + } + return n +} + +func (m *BytesValue) Size() (n int) { + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWrappers(uint64(l)) + } + return n +} + +func sovWrappers(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozWrappers(x uint64) (n int) { + return sovWrappers(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DoubleValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DoubleValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *FloatValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FloatValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Int64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Int64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *UInt64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UInt64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Int32Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Int32Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *UInt32Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UInt32Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *BoolValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BoolValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *StringValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StringValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *BytesValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BytesValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func valueToStringWrappers(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DoubleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FloatValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FloatValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FloatValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 4 + v = uint32(dAtA[iNdEx-4]) + v |= uint32(dAtA[iNdEx-3]) << 8 + v |= uint32(dAtA[iNdEx-2]) << 16 + v |= uint32(dAtA[iNdEx-1]) << 24 + m.Value = float32(math.Float32frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int32Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt32Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BoolValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BoolValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BoolValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StringValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StringValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StringValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWrappers + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BytesValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthWrappers + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWrappers(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthWrappers + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipWrappers(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthWrappers = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWrappers = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("wrappers.proto", fileDescriptorWrappers) } + +var fileDescriptorWrappers = []byte{ + // 278 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0x85, 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0x94, 0xb9, 0xb8, 0x5d, 0xf2, 0x4b, 0x93, 0x72, + 0x52, 0xc3, 0x12, 0x73, 0x4a, 0x53, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x40, 0x0c, 0x09, 0x46, 0x05, + 0x46, 0x0d, 0xc6, 0x20, 0x08, 0x47, 0x49, 0x89, 0x8b, 0xcb, 0x2d, 0x27, 0x3f, 0xb1, 0x04, 0x8b, + 0x1a, 0x26, 0x24, 0x35, 0x9e, 0x79, 0x25, 0x66, 0x26, 0x58, 0xd4, 0x30, 0xc3, 0xd4, 0x28, 0x73, + 0x71, 0x87, 0xe2, 0x52, 0xc4, 0x82, 0x6a, 0x90, 0xb1, 0x11, 0x16, 0x35, 0xac, 0x68, 0x06, 0x61, + 0x55, 0xc4, 0x0b, 0x53, 0xa4, 0xc8, 0xc5, 0xe9, 0x94, 0x9f, 0x9f, 0x83, 0x45, 0x09, 0x07, 0x92, + 0x39, 0xc1, 0x25, 0x45, 0x99, 0x79, 0xe9, 0x58, 0x14, 0x71, 0x22, 0x39, 0xc8, 0xa9, 0xb2, 0x24, + 0xb5, 0x18, 0x8b, 0x1a, 0x1e, 0xa8, 0x1a, 0xa7, 0x76, 0xc6, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, + 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, + 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, + 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0xe4, 0x12, 0x4e, 0xce, 0xcf, 0xd5, + 0x43, 0x8b, 0x0e, 0x27, 0xde, 0x70, 0x68, 0x7c, 0x05, 0x80, 0x44, 0x02, 0x18, 0xa3, 0x58, 0x4b, + 0x2a, 0x0b, 0x52, 0x8b, 0x7f, 0x30, 0x32, 0x2e, 0x62, 0x62, 0x76, 0x0f, 0x70, 0x5a, 0xc5, 0x24, + 0xe7, 0x0e, 0xd1, 0x12, 0x00, 0xd5, 0xa2, 0x17, 0x9e, 0x9a, 0x93, 0xe3, 0x9d, 0x97, 0x5f, 0x9e, + 0x17, 0x02, 0x52, 0x99, 0xc4, 0x06, 0x36, 0xcb, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x23, 0x27, + 0x6c, 0x5f, 0xfa, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go index 1fc8ae8d7038..110ae138427a 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go @@ -44,6 +44,7 @@ import ( "errors" "fmt" "io" + "math" "reflect" "sort" "strconv" @@ -51,6 +52,8 @@ import ( "time" "github.com/golang/protobuf/proto" + + stpb "github.com/golang/protobuf/ptypes/struct" ) // Marshaler is a configurable object for converting between @@ -70,6 +73,47 @@ type Marshaler struct { // Whether to use the original (.proto) name for fields. OrigName bool + + // A custom URL resolver to use when marshaling Any messages to JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// AnyResolver takes a type URL, present in an Any message, and resolves it into +// an instance of the associated message. +type AnyResolver interface { + Resolve(typeUrl string) (proto.Message, error) +} + +func defaultResolveAny(typeUrl string) (proto.Message, error) { + // Only the part of typeUrl after the last slash is relevant. + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return nil, fmt.Errorf("unknown message type %q", mname) + } + return reflect.New(mt.Elem()).Interface().(proto.Message), nil +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should +// also implement JSONPBUnmarshaler so that the custom format can be +// parsed. +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize +// the way they are unmarshaled from JSON. Messages that implement this +// should also implement JSONPBMarshaler so that the custom format can be +// produced. +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error } // Marshal marshals a protocol buffer into JSON. @@ -89,6 +133,12 @@ func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { type int32Slice []int32 +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(1), + `"-Infinity"`: math.Inf(-1), +} + // For sorting extensions ids to ensure stable output. func (s int32Slice) Len() int { return len(s) } func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } @@ -100,6 +150,31 @@ type wkt interface { // marshalObject writes a struct to the Writer. func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + if jsm, ok := v.(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(m) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", v, err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + + out.write(string(b)) + return out.err + } + s := reflect.ValueOf(v).Elem() // Handle well-known types. @@ -126,8 +201,8 @@ func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeU out.write(x) out.write(`s"`) return out.err - case "Struct": - // Let marshalValue handle the `fields` map. + case "Struct", "ListValue": + // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. // TODO: pass the correct Properties if needed. return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) case "Timestamp": @@ -180,7 +255,7 @@ func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeU // IsNil will panic on most value kinds. switch value.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + case reflect.Chan, reflect.Func, reflect.Interface: if value.IsNil() { continue } @@ -208,6 +283,10 @@ func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeU if value.Len() == 0 { continue } + case reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } } } @@ -290,16 +369,17 @@ func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) turl := v.Field(0).String() val := v.Field(1).Bytes() - // Only the part of type_url after the last slash is relevant. - mname := turl - if slash := strings.LastIndex(mname, "/"); slash >= 0 { - mname = mname[slash+1:] + var msg proto.Message + var err error + if m.AnyResolver != nil { + msg, err = m.AnyResolver.Resolve(turl) + } else { + msg, err = defaultResolveAny(turl) } - mt := proto.MessageType(mname) - if mt == nil { - return fmt.Errorf("unknown message type %q", mname) + if err != nil { + return err } - msg := reflect.New(mt.Elem()).Interface().(proto.Message) + if err := proto.Unmarshal(val, msg); err != nil { return err } @@ -371,10 +451,15 @@ func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v refle // marshalValue writes the value to the Writer. func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { - var err error v = reflect.Indirect(v) + // Handle nil pointer + if v.Kind() == reflect.Invalid { + out.write("null") + return out.err + } + // Handle repeated elements. if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { out.write("[") @@ -404,9 +489,6 @@ func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v refle // Handle well-known types. // Most are handled up in marshalObject (because 99% are messages). - type wkt interface { - XXX_WellKnownType() string - } if wkt, ok := v.Interface().(wkt); ok { switch wkt.XXX_WellKnownType() { case "NullValue": @@ -494,6 +576,24 @@ func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v refle return out.err } + // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + f := v.Float() + var sval string + switch { + case math.IsInf(f, 1): + sval = `"Infinity"` + case math.IsInf(f, -1): + sval = `"-Infinity"` + case math.IsNaN(f): + sval = `"NaN"` + } + if sval != "" { + out.write(sval) + return out.err + } + } + // Default handling defers to the encoding/json library. b, err := json.Marshal(v.Interface()) if err != nil { @@ -516,6 +616,12 @@ type Unmarshaler struct { // Whether to allow messages to contain unknown fields, as opposed to // failing to unmarshal. AllowUnknownFields bool + + // A custom URL resolver to use when unmarshaling Any messages from JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver } // UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. @@ -565,34 +671,97 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe // Allocate memory for pointer fields. if targetType.Kind() == reflect.Ptr { + // If input value is "null" and target is a pointer type, then the field should be treated as not set + // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. + _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) + if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler { + return nil + } target.Set(reflect.New(targetType.Elem())) + return u.unmarshalValue(target.Elem(), inputValue, prop) } - // Handle well-known types. - type wkt interface { - XXX_WellKnownType() string + if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, []byte(inputValue)) } - if wkt, ok := target.Addr().Interface().(wkt); ok { - switch wkt.XXX_WellKnownType() { + + // Handle well-known types that are not pointers. + if w, ok := target.Addr().Interface().(wkt); ok { + switch w.XXX_WellKnownType() { case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": - // "Wrappers use the same representation in JSON - // as the wrapped primitive type, except that null is allowed." - // encoding/json will turn JSON `null` into Go `nil`, - // so we don't have to do any extra work. return u.unmarshalValue(target.Field(0), inputValue, prop) case "Any": - return fmt.Errorf("unmarshaling Any not supported yet") + // Use json.RawMessage pointer type instead of value to support pre-1.8 version. + // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see + // https://github.com/golang/go/issues/14493 + var jsonFields map[string]*json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + val, ok := jsonFields["@type"] + if !ok || val == nil { + return errors.New("Any JSON doesn't have '@type'") + } + + var turl string + if err := json.Unmarshal([]byte(*val), &turl); err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) + } + target.Field(0).SetString(turl) + + var m proto.Message + var err error + if u.AnyResolver != nil { + m, err = u.AnyResolver.Resolve(turl) + } else { + m, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if _, ok := m.(wkt); ok { + val, ok := jsonFields["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + + if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } else { + delete(jsonFields, "@type") + nestedProto, err := json.Marshal(jsonFields) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } + + b, err := proto.Marshal(m) + if err != nil { + return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) + } + target.Field(1).SetBytes(b) + + return nil case "Duration": unq, err := strconv.Unquote(string(inputValue)) if err != nil { return err } + d, err := time.ParseDuration(unq) if err != nil { return fmt.Errorf("bad Duration: %v", err) } + ns := d.Nanoseconds() s := ns / 1e9 ns %= 1e9 @@ -604,13 +773,65 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe if err != nil { return err } + t, err := time.Parse(time.RFC3339Nano, unq) if err != nil { return fmt.Errorf("bad Timestamp: %v", err) } - target.Field(0).SetInt(int64(t.Unix())) + + target.Field(0).SetInt(t.Unix()) target.Field(1).SetInt(int64(t.Nanosecond())) return nil + case "Struct": + var m map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &m); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{})) + for k, jv := range m { + pv := &stpb.Value{} + if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) + } + target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) + } + return nil + case "ListValue": + var s []json.RawMessage + if err := json.Unmarshal(inputValue, &s); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s), len(s)))) + for i, sv := range s { + if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { + return err + } + } + return nil + case "Value": + ivStr := string(inputValue) + if ivStr == "null" { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{})) + } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v})) + } else if v, err := strconv.Unquote(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v})) + } else if v, err := strconv.ParseBool(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v})) + } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { + lv := &stpb.ListValue{} + target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv})) + return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) + } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { + sv := &stpb.Struct{} + target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv})) + return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) + } else { + return fmt.Errorf("unrecognized type for Value %q", ivStr) + } + return nil } } @@ -694,6 +915,26 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe } } } + // Handle proto2 extensions. + if len(jsonFields) > 0 { + if ep, ok := target.Addr().Interface().(proto.Message); ok { + for _, ext := range proto.RegisteredExtensions(ep) { + name := fmt.Sprintf("[%s]", ext.Name) + raw, ok := jsonFields[name] + if !ok { + continue + } + delete(jsonFields, name) + nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) + if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { + return err + } + if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { + return err + } + } + } + } if !u.AllowUnknownFields && len(jsonFields) > 0 { // Pick any field to be the scapegoat. var f string @@ -712,11 +953,13 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe if err := json.Unmarshal(inputValue, &slc); err != nil { return err } - len := len(slc) - target.Set(reflect.MakeSlice(targetType, len, len)) - for i := 0; i < len; i++ { - if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { - return err + if slc != nil { + l := len(slc) + target.Set(reflect.MakeSlice(targetType, l, l)) + for i := 0; i < l; i++ { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + return err + } } } return nil @@ -728,33 +971,35 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe if err := json.Unmarshal(inputValue, &mp); err != nil { return err } - target.Set(reflect.MakeMap(targetType)) - var keyprop, valprop *proto.Properties - if prop != nil { - // These could still be nil if the protobuf metadata is broken somehow. - // TODO: This won't work because the fields are unexported. - // We should probably just reparse them. - //keyprop, valprop = prop.mkeyprop, prop.mvalprop - } - for ks, raw := range mp { - // Unmarshal map key. The core json library already decoded the key into a - // string, so we handle that specially. Other types were quoted post-serialization. - var k reflect.Value - if targetType.Key().Kind() == reflect.String { - k = reflect.ValueOf(ks) - } else { - k = reflect.New(targetType.Key()).Elem() - if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil { - return err - } + if mp != nil { + target.Set(reflect.MakeMap(targetType)) + var keyprop, valprop *proto.Properties + if prop != nil { + // These could still be nil if the protobuf metadata is broken somehow. + // TODO: This won't work because the fields are unexported. + // We should probably just reparse them. + //keyprop, valprop = prop.mkeyprop, prop.mvalprop } + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil { + return err + } + } - // Unmarshal map value. - v := reflect.New(targetType.Elem()).Elem() - if err := u.unmarshalValue(v, raw, valprop); err != nil { - return err + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + if err := u.unmarshalValue(v, raw, valprop); err != nil { + return err + } + target.SetMapIndex(k, v) } - target.SetMapIndex(k, v) } return nil } @@ -766,6 +1011,15 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe inputValue = inputValue[1 : len(inputValue)-1] } + // Non-finite numbers can be encoded as strings. + isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isFloat { + if num, ok := nonFinite[string(inputValue)]; ok { + target.SetFloat(num) + return nil + } + } + // Use the encoding/json for parsing other value types. return json.Unmarshal(inputValue, target.Addr().Interface()) } diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go index 68b9b30cfaed..8b84d1b22d4c 100644 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -174,11 +174,11 @@ func sizeFixed32(x uint64) int { // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) } func sizeZigzag64(x uint64) int { - return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer @@ -1075,10 +1075,17 @@ func (o *Buffer) enc_map(p *Properties, base structPointer) error { func (o *Buffer) enc_exts(p *Properties, base structPointer) error { exts := structPointer_Extensions(base, p.field) - if err := encodeExtensions(exts); err != nil { + + v, mu := exts.extensionsRead() + if v == nil { + return nil + } + + mu.Lock() + defer mu.Unlock() + if err := encodeExtensionsMap(v); err != nil { return err } - v, _ := exts.extensionsRead() return o.enc_map_body(v) } diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index 6b9b36374660..eaad21831263 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -154,6 +154,7 @@ type ExtensionDesc struct { Field int32 // field number Name string // fully-qualified name of extension, for text formatting Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined } func (ed *ExtensionDesc) repeated() bool { diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index ac4ddbc07598..1c225504a013 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -73,7 +73,6 @@ for a protocol buffer variable v: When the .proto file specifies `syntax="proto3"`, there are some differences: - Non-repeated fields of non-message type are values instead of pointers. - - Getters are only generated for message and oneof fields. - Enum types do not get an Enum method. The simplest way to describe this is to see an example. diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go index 61f83c1e10fa..5e14513f28c9 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -865,7 +865,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) + fv.SetUint(x) return nil } case reflect.Uint64: diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index 89e07ae192a5..b2af97f4a982 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -51,6 +51,9 @@ const googleApis = "type.googleapis.com/" // function. AnyMessageName is provided for less common use cases like filtering a // sequence of Any messages based on a set of allowed message type names. func AnyMessageName(any *any.Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } slash := strings.LastIndex(any.TypeUrl, "/") if slash < 0 { return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index f2c6906b91b0..f34601723de2 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: github.com/golang/protobuf/ptypes/any/any.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/any.proto /* Package any is a generated protocol buffer package. It is generated from these files: - github.com/golang/protobuf/ptypes/any/any.proto + google/protobuf/any.proto It has these top-level messages: Any @@ -63,6 +62,16 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // any.Unpack(foo) // ... // +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack // methods only use the fully qualified type name after the last '/' @@ -132,24 +141,38 @@ func (*Any) ProtoMessage() {} func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + func init() { proto.RegisterType((*Any)(nil), "google.protobuf.Any") } -func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) } +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 187 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, - 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc, - 0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c, - 0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69, - 0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24, - 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1, - 0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19, - 0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, - 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9, - 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00, - 0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00, + // 185 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, + 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, + 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, + 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, + 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, + 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, + 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto index 81dcf46ccfd2..c74866762315 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -37,7 +37,6 @@ option go_package = "github.com/golang/protobuf/ptypes/any"; option java_package = "com.google.protobuf"; option java_outer_classname = "AnyProto"; option java_multiple_files = true; -option java_generate_equals_and_hash = true; option objc_class_prefix = "GPB"; // `Any` contains an arbitrary serialized protocol buffer message along with a @@ -75,6 +74,16 @@ option objc_class_prefix = "GPB"; // any.Unpack(foo) // ... // +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack // methods only use the fully qualified type name after the last '/' diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index 569748346d45..b2410a098ebd 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: github.com/golang/protobuf/ptypes/duration/duration.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto /* Package duration is a generated protocol buffer package. It is generated from these files: - github.com/golang/protobuf/ptypes/duration/duration.proto + google/protobuf/duration.proto It has these top-level messages: Duration @@ -35,6 +34,8 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // two Timestamp values is a Duration and it can be added or subtracted // from a Timestamp. Range is approximately +-10,000 years. // +// # Examples +// // Example 1: Compute Duration from two Timestamps in pseudo code. // // Timestamp start = ...; @@ -69,10 +70,27 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // end.nanos -= 1000000000; // } // +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// // type Duration struct { // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` // Signed fractions of a second at nanosecond resolution of the span // of time. Durations less than one second are represented with a 0 @@ -89,26 +107,38 @@ func (*Duration) ProtoMessage() {} func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (*Duration) XXX_WellKnownType() string { return "Duration" } -func init() { - proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 } func init() { - proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0) + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") } +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) } + var fileDescriptor0 = []byte{ - // 189 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, - 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29, - 0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, - 0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8, - 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60, - 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6, - 0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98, - 0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xbb, 0x80, 0x91, 0x71, 0x11, 0x13, - 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9, - 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01, - 0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0xfb, 0xb1, 0x51, 0x0e, 0x01, 0x00, 0x00, + // 190 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, + 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, + 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, + 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, + 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, + 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, + 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto index 96c1796d6594..975fce41aae0 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto @@ -33,11 +33,11 @@ syntax = "proto3"; package google.protobuf; option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; option go_package = "github.com/golang/protobuf/ptypes/duration"; option java_package = "com.google.protobuf"; option java_outer_classname = "DurationProto"; option java_multiple_files = true; -option java_generate_equals_and_hash = true; option objc_class_prefix = "GPB"; // A Duration represents a signed, fixed-length span of time represented @@ -47,6 +47,8 @@ option objc_class_prefix = "GPB"; // two Timestamp values is a Duration and it can be added or subtracted // from a Timestamp. Range is approximately +-10,000 years. // +// # Examples +// // Example 1: Compute Duration from two Timestamps in pseudo code. // // Timestamp start = ...; @@ -81,11 +83,28 @@ option objc_class_prefix = "GPB"; // end.nanos -= 1000000000; // } // +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// // message Duration { // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years int64 seconds = 1; // Signed fractions of a second at nanosecond resolution of the span diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go new file mode 100644 index 000000000000..e877b72c3f50 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -0,0 +1,66 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/empty.proto + +/* +Package empty is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/empty.proto + +It has these top-level messages: + Empty +*/ +package empty + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Empty) XXX_WellKnownType() string { return "Empty" } + +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} + +func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 148 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, + 0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, + 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, + 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c, + 0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, + 0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, + 0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6, + 0xb7, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto new file mode 100644 index 000000000000..03cacd233088 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto @@ -0,0 +1,52 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/empty"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +message Empty {} diff --git a/vendor/github.com/golang/protobuf/ptypes/regen.sh b/vendor/github.com/golang/protobuf/ptypes/regen.sh index 2a5b4e8bdccc..b50a9414ac24 100755 --- a/vendor/github.com/golang/protobuf/ptypes/regen.sh +++ b/vendor/github.com/golang/protobuf/ptypes/regen.sh @@ -8,14 +8,7 @@ PKG=github.com/golang/protobuf/ptypes UPSTREAM=https://github.com/google/protobuf UPSTREAM_SUBDIR=src/google/protobuf -PROTO_FILES=' - any.proto - duration.proto - empty.proto - struct.proto - timestamp.proto - wrappers.proto -' +PROTO_FILES=(any duration empty struct timestamp wrappers) function die() { echo 1>&2 $* @@ -36,31 +29,15 @@ pkgdir=$(go list -f '{{.Dir}}' $PKG) echo 1>&2 $pkgdir base=$(echo $pkgdir | sed "s,/$PKG\$,,") echo 1>&2 "base: $base" -cd $base +cd "$base" echo 1>&2 "fetching latest protos... " git clone -q $UPSTREAM $tmpdir -# Pass 1: build mapping from upstream filename to our filename. -declare -A filename_map -for f in $(cd $PKG && find * -name '*.proto'); do - echo -n 1>&2 "looking for latest version of $f... " - up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f) | grep -v /testdata/) - echo 1>&2 $up - if [ $(echo $up | wc -w) != "1" ]; then - die "not exactly one match" - fi - filename_map[$up]=$f -done -# Pass 2: copy files -for up in "${!filename_map[@]}"; do - f=${filename_map[$up]} - shortname=$(basename $f | sed 's,\.proto$,,') - cp $tmpdir/$UPSTREAM_SUBDIR/$up $PKG/$f -done -# Run protoc once per package. -for dir in $(find $PKG -name '*.proto' | xargs dirname | sort | uniq); do - echo 1>&2 "* $dir" - protoc --go_out=. $dir/*.proto +for file in ${PROTO_FILES[@]}; do + echo 1>&2 "* $file" + protoc --go_out=. -I$tmpdir/src $tmpdir/src/google/protobuf/$file.proto || die + cp $tmpdir/src/google/protobuf/$file.proto $PKG/$file done + echo 1>&2 "All OK" diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go new file mode 100644 index 000000000000..4cfe60818798 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -0,0 +1,380 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/struct.proto + +/* +Package structpb is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/struct.proto + +It has these top-level messages: + Struct + Value + ListValue +*/ +package structpb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (x NullValue) String() string { + return proto.EnumName(NullValue_name, int32(x)) +} +func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (m *Struct) String() string { return proto.CompactTextString(m) } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Struct) XXX_WellKnownType() string { return "Struct" } + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*Value) XXX_WellKnownType() string { return "Value" } + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"` +} +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,oneof"` +} +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,oneof"` +} +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"` +} +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} +func (*Value_NumberValue) isValue_Kind() {} +func (*Value_StringValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} +func (*Value_StructValue) isValue_Kind() {} +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + b.EncodeVarint(2<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.NumberValue)) + case *Value_StringValue: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *Value_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Value_StructValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StructValue); err != nil { + return err + } + case *Value_ListValue: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ListValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.Kind has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 1: // kind.null_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_NullValue{NullValue(x)} + return true, err + case 2: // kind.number_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Kind = &Value_NumberValue{math.Float64frombits(x)} + return true, err + case 3: // kind.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Kind = &Value_StringValue{x} + return true, err + case 4: // kind.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_BoolValue{x != 0} + return true, err + case 5: // kind.struct_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Struct) + err := b.DecodeMessage(msg) + m.Kind = &Value_StructValue{msg} + return true, err + case 6: // kind.list_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListValue) + err := b.DecodeMessage(msg) + m.Kind = &Value_ListValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + n += proto.SizeVarint(2<<3 | proto.WireFixed64) + n += 8 + case *Value_StringValue: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BoolValue: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += 1 + case *Value_StructValue: + s := proto.Size(x.StructValue) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_ListValue: + s := proto.Size(x.ListValue) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) +} + +func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, + 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94, + 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa, + 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff, + 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc, + 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15, + 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d, + 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce, + 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39, + 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab, + 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84, + 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48, + 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f, + 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59, + 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a, + 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64, + 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92, + 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25, + 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37, + 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6, + 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4, + 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda, + 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9, + 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53, + 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00, + 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto new file mode 100644 index 000000000000..7d7808e7fbb6 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto @@ -0,0 +1,96 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/struct;structpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 1b3657622027..47f10dbc2ccc 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -99,6 +99,15 @@ func Timestamp(ts *tspb.Timestamp) (time.Time, error) { return t, validateTimestamp(ts) } +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *tspb.Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. func TimestampProto(t time.Time) (*tspb.Timestamp, error) { diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index ffcc51594ac6..e23e4a25dafc 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto /* Package timestamp is a generated protocol buffer package. It is generated from these files: - github.com/golang/protobuf/ptypes/timestamp/timestamp.proto + google/protobuf/timestamp.proto It has these top-level messages: Timestamp @@ -40,6 +39,8 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // and from RFC 3339 date strings. // See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). // +// # Examples +// // Example 1: Compute Timestamp from POSIX `time()`. // // Timestamp timestamp; @@ -77,15 +78,36 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // // Example 5: Compute Timestamp from current time in Python. // -// now = time.time() -// seconds = int(now) -// nanos = int((now - seconds) * 10**9) -// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// to obtain a formatter capable of generating timestamps in this format. // // type Timestamp struct { // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` // Non-negative fractions of a second at nanosecond resolution. Negative @@ -101,27 +123,38 @@ func (*Timestamp) ProtoMessage() {} func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } -func init() { - proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 } func init() { - proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0) + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") } +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) } + var fileDescriptor0 = []byte{ - // 194 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, - 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9, - 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3, - 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24, - 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, - 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, - 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x91, 0x91, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x50, 0x27, - 0x3e, 0xb8, 0x91, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0x1c, 0xbd, 0x80, 0x91, 0xf1, - 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0xc3, 0x03, - 0xa0, 0xca, 0xf5, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0xda, 0x92, - 0xd8, 0xc0, 0xe6, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01, - 0x00, 0x00, + // 191 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, + 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, + 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, + 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, + 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, + 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, + 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto index 7992a85886fb..b7cbd17502f2 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -38,7 +38,6 @@ option go_package = "github.com/golang/protobuf/ptypes/timestamp"; option java_package = "com.google.protobuf"; option java_outer_classname = "TimestampProto"; option java_multiple_files = true; -option java_generate_equals_and_hash = true; option objc_class_prefix = "GPB"; // A Timestamp represents a point in time independent of any time zone @@ -53,6 +52,8 @@ option objc_class_prefix = "GPB"; // and from RFC 3339 date strings. // See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). // +// # Examples +// // Example 1: Compute Timestamp from POSIX `time()`. // // Timestamp timestamp; @@ -90,16 +91,37 @@ option objc_class_prefix = "GPB"; // // Example 5: Compute Timestamp from current time in Python. // -// now = time.time() -// seconds = int(now) -// nanos = int((now - seconds) * 10**9) -// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// to obtain a formatter capable of generating timestamps in this format. // // message Timestamp { // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. int64 seconds = 1; diff --git a/vendor/github.com/google/cadvisor/accelerators/nvidia.go b/vendor/github.com/google/cadvisor/accelerators/nvidia.go new file mode 100644 index 000000000000..054d206b2ac8 --- /dev/null +++ b/vendor/github.com/google/cadvisor/accelerators/nvidia.go @@ -0,0 +1,252 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package accelerators + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + info "github.com/google/cadvisor/info/v1" + + "github.com/golang/glog" + "github.com/mindprince/gonvml" +) + +type NvidiaManager struct { + sync.RWMutex + + // true if the NVML library (libnvidia-ml.so.1) was loaded successfully + nvmlInitialized bool + + // nvidiaDevices is a map from device minor number to a handle that can be used to get metrics about the device + nvidiaDevices map[int]gonvml.Device +} + +var sysFsPCIDevicesPath = "/sys/bus/pci/devices/" + +const nvidiaVendorId = "0x10de" + +// Setup initializes NVML if nvidia devices are present on the node. +func (nm *NvidiaManager) Setup() { + if !detectDevices(nvidiaVendorId) { + glog.V(4).Info("No NVIDIA devices found.") + return + } + + nm.initializeNVML() + if nm.nvmlInitialized { + return + } + go func() { + glog.V(2).Info("Starting goroutine to initialize NVML") + // TODO: use globalHousekeepingInterval + for range time.Tick(time.Minute) { + nm.initializeNVML() + if nm.nvmlInitialized { + return + } + } + }() +} + +// detectDevices returns true if a device with given pci id is present on the node. +func detectDevices(vendorId string) bool { + devices, err := ioutil.ReadDir(sysFsPCIDevicesPath) + if err != nil { + glog.Warningf("Error reading %q: %v", sysFsPCIDevicesPath, err) + return false + } + + for _, device := range devices { + vendorPath := filepath.Join(sysFsPCIDevicesPath, device.Name(), "vendor") + content, err := ioutil.ReadFile(vendorPath) + if err != nil { + glog.V(4).Infof("Error while reading %q: %v", vendorPath, err) + continue + } + if strings.EqualFold(strings.TrimSpace(string(content)), vendorId) { + glog.V(3).Infof("Found device with vendorId %q", vendorId) + return true + } + } + return false +} + +// initializeNVML initializes the NVML library and sets up the nvmlDevices map. +func (nm *NvidiaManager) initializeNVML() { + if err := gonvml.Initialize(); err != nil { + // This is under a logging level because otherwise we may cause + // log spam if the drivers/nvml is not installed on the system. + glog.V(4).Infof("Could not initialize NVML: %v", err) + return + } + numDevices, err := gonvml.DeviceCount() + if err != nil { + glog.Warningf("GPU metrics would not be available. Failed to get the number of nvidia devices: %v", err) + nm.Lock() + // Even though we won't have GPU metrics, the library was initialized and should be shutdown when exiting. + nm.nvmlInitialized = true + nm.Unlock() + return + } + glog.V(1).Infof("NVML initialized. Number of nvidia devices: %v", numDevices) + nm.nvidiaDevices = make(map[int]gonvml.Device, numDevices) + for i := 0; i < int(numDevices); i++ { + device, err := gonvml.DeviceHandleByIndex(uint(i)) + if err != nil { + glog.Warningf("Failed to get nvidia device handle %d: %v", i, err) + continue + } + minorNumber, err := device.MinorNumber() + if err != nil { + glog.Warningf("Failed to get nvidia device minor number: %v", err) + continue + } + nm.nvidiaDevices[int(minorNumber)] = device + } + nm.Lock() + // Doing this at the end to avoid race in accessing nvidiaDevices in GetCollector. + nm.nvmlInitialized = true + nm.Unlock() +} + +// Destroy shuts down NVML. +func (nm *NvidiaManager) Destroy() { + if nm.nvmlInitialized { + gonvml.Shutdown() + } +} + +// GetCollector returns a collector that can fetch nvidia gpu metrics for nvidia devices +// present in the devices.list file in the given devicesCgroupPath. +func (nm *NvidiaManager) GetCollector(devicesCgroupPath string) (AcceleratorCollector, error) { + nc := &NvidiaCollector{} + nm.RLock() + if !nm.nvmlInitialized || len(nm.nvidiaDevices) == 0 { + nm.RUnlock() + return nc, nil + } + nm.RUnlock() + nvidiaMinorNumbers, err := parseDevicesCgroup(devicesCgroupPath) + if err != nil { + return nc, err + } + for _, minor := range nvidiaMinorNumbers { + device, ok := nm.nvidiaDevices[minor] + if !ok { + return nc, fmt.Errorf("nvidia device minor number %d not found in cached devices", minor) + } + nc.Devices = append(nc.Devices, device) + } + return nc, nil +} + +// parseDevicesCgroup parses the devices cgroup devices.list file for the container +// and returns a list of minor numbers corresponding to NVIDIA GPU devices that the +// container is allowed to access. In cases where the container has access to all +// devices or all NVIDIA devices but the devices are not enumerated separately in +// the devices.list file, we return an empty list. +// This is defined as a variable to help in testing. +var parseDevicesCgroup = func(devicesCgroupPath string) ([]int, error) { + // Always return a non-nil slice + nvidiaMinorNumbers := []int{} + + devicesList := filepath.Join(devicesCgroupPath, "devices.list") + f, err := os.Open(devicesList) + if err != nil { + return nvidiaMinorNumbers, fmt.Errorf("error while opening devices cgroup file %q: %v", devicesList, err) + } + defer f.Close() + + s := bufio.NewScanner(f) + + // See https://www.kernel.org/doc/Documentation/cgroup-v1/devices.txt for the file format + for s.Scan() { + text := s.Text() + + fields := strings.Fields(text) + if len(fields) != 3 { + return nvidiaMinorNumbers, fmt.Errorf("invalid devices cgroup entry %q: must contain three whitespace-separated fields", text) + } + + // Split the second field to find out major:minor numbers + majorMinor := strings.Split(fields[1], ":") + if len(majorMinor) != 2 { + return nvidiaMinorNumbers, fmt.Errorf("invalid devices cgroup entry %q: second field should have one colon", text) + } + + // NVIDIA graphics devices are character devices with major number 195. + // https://github.com/torvalds/linux/blob/v4.13/Documentation/admin-guide/devices.txt#L2583 + if fields[0] == "c" && majorMinor[0] == "195" { + minorNumber, err := strconv.Atoi(majorMinor[1]) + if err != nil { + return nvidiaMinorNumbers, fmt.Errorf("invalid devices cgroup entry %q: minor number is not integer", text) + } + // We don't want devices like nvidiactl (195:255) and nvidia-modeset (195:254) + if minorNumber < 128 { + nvidiaMinorNumbers = append(nvidiaMinorNumbers, minorNumber) + } + // We are ignoring the "195:*" case + // where the container has access to all NVIDIA devices on the machine. + } + // We are ignoring the "*:*" case + // where the container has access to all devices on the machine. + } + return nvidiaMinorNumbers, nil +} + +type NvidiaCollector struct { + // Exposed for testing + Devices []gonvml.Device +} + +// UpdateStats updates the stats for NVIDIA GPUs (if any) attached to the container. +func (nc *NvidiaCollector) UpdateStats(stats *info.ContainerStats) error { + for _, device := range nc.Devices { + model, err := device.Name() + if err != nil { + return fmt.Errorf("error while getting gpu name: %v", err) + } + uuid, err := device.UUID() + if err != nil { + return fmt.Errorf("error while getting gpu uuid: %v", err) + } + memoryTotal, memoryUsed, err := device.MemoryInfo() + if err != nil { + return fmt.Errorf("error while getting gpu memory info: %v", err) + } + //TODO: Use housekeepingInterval + utilizationGPU, err := device.AverageGPUUtilization(10 * time.Second) + if err != nil { + return fmt.Errorf("error while getting gpu utilization: %v", err) + } + + stats.Accelerators = append(stats.Accelerators, info.AcceleratorStats{ + Make: "nvidia", + Model: model, + ID: uuid, + MemoryTotal: memoryTotal, + MemoryUsed: memoryUsed, + DutyCycle: uint64(utilizationGPU), + }) + } + return nil +} diff --git a/vendor/github.com/google/cadvisor/accelerators/types.go b/vendor/github.com/google/cadvisor/accelerators/types.go new file mode 100644 index 000000000000..d577953a045c --- /dev/null +++ b/vendor/github.com/google/cadvisor/accelerators/types.go @@ -0,0 +1,32 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package accelerators + +import info "github.com/google/cadvisor/info/v1" + +// This is supposed to store global state about an accelerator metrics collector. +// cadvisor manager will call Setup() when it starts and Destroy() when it stops. +// For each container detected by the cadvisor manager, it will call +// GetCollector() with the devices cgroup path for that container. +// GetCollector() is supposed to return an object that can update +// accelerator stats for that container. +type AcceleratorManager interface { + Setup() + Destroy() + GetCollector(deviceCgroup string) (AcceleratorCollector, error) +} + +type AcceleratorCollector interface { + UpdateStats(*info.ContainerStats) error +} diff --git a/vendor/github.com/google/cadvisor/container/common/fsHandler.go b/vendor/github.com/google/cadvisor/container/common/fsHandler.go index 451395f67986..d261c983a609 100644 --- a/vendor/github.com/google/cadvisor/container/common/fsHandler.go +++ b/vendor/github.com/google/cadvisor/container/common/fsHandler.go @@ -51,7 +51,6 @@ type realFsHandler struct { } const ( - longOp = time.Second timeout = 2 * time.Minute maxBackoffFactor = 20 ) @@ -93,10 +92,10 @@ func (fh *realFsHandler) update() error { fh.Lock() defer fh.Unlock() fh.lastUpdate = time.Now() - if rootDiskErr == nil && fh.rootfs != "" { + if rootInodeErr == nil && fh.rootfs != "" { fh.usage.InodeUsage = inodeUsage } - if rootInodeErr == nil && fh.rootfs != "" { + if rootDiskErr == nil && fh.rootfs != "" { fh.usage.TotalUsageBytes = baseUsage + extraDirUsage } if extraDiskErr == nil && fh.extraDir != "" { @@ -111,6 +110,7 @@ func (fh *realFsHandler) update() error { func (fh *realFsHandler) trackUsage() { fh.update() + longOp := time.Second for { select { case <-fh.stopChan: @@ -128,7 +128,11 @@ func (fh *realFsHandler) trackUsage() { } duration := time.Since(start) if duration > longOp { - glog.V(2).Infof("du and find on following dirs took %v: %v", duration, []string{fh.rootfs, fh.extraDir}) + // adapt longOp time so that message doesn't continue to print + // if the long duration is persistent either because of slow + // disk or lots of containers. + longOp = longOp + time.Second + glog.V(2).Infof("du and find on following dirs took %v: %v; will not log again for this container unless duration exceeds %v", duration, []string{fh.rootfs, fh.extraDir}, longOp) } } } diff --git a/vendor/github.com/google/cadvisor/container/container.go b/vendor/github.com/google/cadvisor/container/container.go index 697626b8ebeb..4550452481ef 100644 --- a/vendor/github.com/google/cadvisor/container/container.go +++ b/vendor/github.com/google/cadvisor/container/container.go @@ -35,6 +35,7 @@ const ( ContainerTypeRkt ContainerTypeSystemd ContainerTypeCrio + ContainerTypeContainerd ) // Interface for container operation handlers. diff --git a/vendor/github.com/google/cadvisor/container/containerd/client.go b/vendor/github.com/google/cadvisor/container/containerd/client.go new file mode 100644 index 000000000000..182fd010b298 --- /dev/null +++ b/vendor/github.com/google/cadvisor/container/containerd/client.go @@ -0,0 +1,122 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package containerd + +import ( + "context" + "time" + + containersapi "github.com/containerd/containerd/api/services/containers/v1" + tasksapi "github.com/containerd/containerd/api/services/tasks/v1" + versionapi "github.com/containerd/containerd/api/services/version/v1" + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/dialer" + "github.com/containerd/containerd/errdefs" + pempty "github.com/golang/protobuf/ptypes/empty" + "google.golang.org/grpc" +) + +const ( + // k8sNamespace is the namespace we use to connect containerd. + k8sNamespace = "k8s.io" +) + +type client struct { + containerService containersapi.ContainersClient + taskService tasksapi.TasksClient + versionService versionapi.VersionClient +} + +type containerdClient interface { + LoadContainer(ctx context.Context, id string) (*containers.Container, error) + TaskPid(ctx context.Context, id string) (uint32, error) + Version(ctx context.Context) (string, error) +} + +// Client creates a containerd client +func Client() (containerdClient, error) { + gopts := []grpc.DialOption{ + grpc.WithInsecure(), + grpc.FailOnNonTempDialError(true), + grpc.WithDialer(dialer.Dialer), + grpc.WithBlock(), + grpc.WithTimeout(2 * time.Second), + grpc.WithBackoffMaxDelay(3 * time.Second), + } + unary, stream := newNSInterceptors(k8sNamespace) + gopts = append(gopts, + grpc.WithUnaryInterceptor(unary), + grpc.WithStreamInterceptor(stream), + ) + + conn, err := grpc.Dial(dialer.DialAddress("/var/run/containerd/containerd.sock"), gopts...) + if err != nil { + return nil, err + } + c := &client{ + containerService: containersapi.NewContainersClient(conn), + taskService: tasksapi.NewTasksClient(conn), + versionService: versionapi.NewVersionClient(conn), + } + return c, err +} + +func (c *client) LoadContainer(ctx context.Context, id string) (*containers.Container, error) { + r, err := c.containerService.Get(ctx, &containersapi.GetContainerRequest{ + ID: id, + }) + if err != nil { + return nil, errdefs.FromGRPC(err) + } + return containerFromProto(r.Container), nil +} + +func (c *client) TaskPid(ctx context.Context, id string) (uint32, error) { + response, err := c.taskService.Get(ctx, &tasksapi.GetRequest{ + ContainerID: id, + }) + if err != nil { + return 0, errdefs.FromGRPC(err) + } + return response.Process.Pid, nil +} + +func (c *client) Version(ctx context.Context) (string, error) { + response, err := c.versionService.Version(ctx, &pempty.Empty{}) + if err != nil { + return "", errdefs.FromGRPC(err) + } + return response.Version, nil +} + +func containerFromProto(containerpb containersapi.Container) *containers.Container { + var runtime containers.RuntimeInfo + if containerpb.Runtime != nil { + runtime = containers.RuntimeInfo{ + Name: containerpb.Runtime.Name, + Options: containerpb.Runtime.Options, + } + } + return &containers.Container{ + ID: containerpb.ID, + Labels: containerpb.Labels, + Image: containerpb.Image, + Runtime: runtime, + Spec: containerpb.Spec, + Snapshotter: containerpb.Snapshotter, + SnapshotKey: containerpb.SnapshotKey, + Extensions: containerpb.Extensions, + } +} diff --git a/vendor/github.com/google/cadvisor/container/containerd/factory.go b/vendor/github.com/google/cadvisor/container/containerd/factory.go new file mode 100644 index 000000000000..dba43ef32e21 --- /dev/null +++ b/vendor/github.com/google/cadvisor/container/containerd/factory.go @@ -0,0 +1,148 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package containerd + +import ( + "flag" + "fmt" + "path" + "regexp" + "strings" + + "github.com/golang/glog" + "golang.org/x/net/context" + + "github.com/google/cadvisor/container" + "github.com/google/cadvisor/container/libcontainer" + "github.com/google/cadvisor/fs" + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/manager/watcher" +) + +var ArgContainerdEndpoint = flag.String("containerd", "unix:///var/run/containerd.sock", "containerd endpoint") + +// The namespace under which containerd aliases are unique. +const k8sContainerdNamespace = "containerd" + +// Regexp that identifies containerd cgroups, containers started with +// --cgroup-parent have another prefix than 'containerd' +var containerdCgroupRegexp = regexp.MustCompile(`([a-z0-9]{64})`) + +type containerdFactory struct { + machineInfoFactory info.MachineInfoFactory + client containerdClient + version string + // Information about the mounted cgroup subsystems. + cgroupSubsystems libcontainer.CgroupSubsystems + // Information about mounted filesystems. + fsInfo fs.FsInfo + ignoreMetrics container.MetricSet +} + +func (self *containerdFactory) String() string { + return k8sContainerdNamespace +} + +func (self *containerdFactory) NewContainerHandler(name string, inHostNamespace bool) (handler container.ContainerHandler, err error) { + client, err := Client() + if err != nil { + return + } + + metadataEnvs := []string{} + return newContainerdContainerHandler( + client, + name, + self.machineInfoFactory, + self.fsInfo, + &self.cgroupSubsystems, + inHostNamespace, + metadataEnvs, + self.ignoreMetrics, + ) +} + +// Returns the containerd ID from the full container name. +func ContainerNameToContainerdID(name string) string { + id := path.Base(name) + if matches := containerdCgroupRegexp.FindStringSubmatch(id); matches != nil { + return matches[1] + } + return id +} + +// isContainerName returns true if the cgroup with associated name +// corresponds to a containerd container. +func isContainerName(name string) bool { + // TODO: May be check with HasPrefix ContainerdNamespace + if strings.HasSuffix(name, ".mount") { + return false + } + return containerdCgroupRegexp.MatchString(path.Base(name)) +} + +// Containerd can handle and accept all containerd created containers +func (self *containerdFactory) CanHandleAndAccept(name string) (bool, bool, error) { + // if the container is not associated with containerd, we can't handle it or accept it. + if !isContainerName(name) { + return false, false, nil + } + // Check if the container is known to containerd and it is running. + id := ContainerNameToContainerdID(name) + // If container and task lookup in containerd fails then we assume + // that the container state is not known to containerd + ctx := context.Background() + _, err := self.client.LoadContainer(ctx, id) + if err != nil { + return false, false, fmt.Errorf("failed to load container: %v", err) + } + + return true, true, nil +} + +func (self *containerdFactory) DebugInfo() map[string][]string { + return map[string][]string{} +} + +// Register root container before running this function! +func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error { + client, err := Client() + if err != nil { + return fmt.Errorf("unable to create containerd client: %v", err) + } + + containerdVersion, err := client.Version(context.Background()) + if err != nil { + return fmt.Errorf("failed to fetch containerd client version: %v", err) + } + + cgroupSubsystems, err := libcontainer.GetCgroupSubsystems() + if err != nil { + return fmt.Errorf("failed to get cgroup subsystems: %v", err) + } + + glog.V(1).Infof("Registering containerd factory") + f := &containerdFactory{ + cgroupSubsystems: cgroupSubsystems, + client: client, + fsInfo: fsInfo, + machineInfoFactory: factory, + version: containerdVersion, + ignoreMetrics: ignoreMetrics, + } + + container.RegisterContainerHandlerFactory(f, []watcher.ContainerWatchSource{watcher.Raw}) + return nil +} diff --git a/vendor/github.com/google/cadvisor/container/containerd/grpc.go b/vendor/github.com/google/cadvisor/container/containerd/grpc.go new file mode 100644 index 000000000000..ba04eb1e2387 --- /dev/null +++ b/vendor/github.com/google/cadvisor/container/containerd/grpc.go @@ -0,0 +1,49 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//This code has been taken from containerd repo to avoid large library import +package containerd + +import ( + "github.com/containerd/containerd/namespaces" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type namespaceInterceptor struct { + namespace string +} + +func (ni namespaceInterceptor) unary(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + _, ok := namespaces.Namespace(ctx) + if !ok { + ctx = namespaces.WithNamespace(ctx, ni.namespace) + } + return invoker(ctx, method, req, reply, cc, opts...) +} + +func (ni namespaceInterceptor) stream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + _, ok := namespaces.Namespace(ctx) + if !ok { + ctx = namespaces.WithNamespace(ctx, ni.namespace) + } + return streamer(ctx, desc, cc, method, opts...) +} + +func newNSInterceptors(ns string) (grpc.UnaryClientInterceptor, grpc.StreamClientInterceptor) { + ni := namespaceInterceptor{ + namespace: ns, + } + return grpc.UnaryClientInterceptor(ni.unary), grpc.StreamClientInterceptor(ni.stream) +} diff --git a/vendor/github.com/google/cadvisor/container/containerd/handler.go b/vendor/github.com/google/cadvisor/container/containerd/handler.go new file mode 100644 index 000000000000..b369d9b133d1 --- /dev/null +++ b/vendor/github.com/google/cadvisor/container/containerd/handler.go @@ -0,0 +1,246 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Handler for containerd containers. +package containerd + +import ( + "encoding/json" + "fmt" + "path" + "strings" + "time" + + "github.com/opencontainers/runc/libcontainer/cgroups" + cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs" + libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs" + "golang.org/x/net/context" + + "github.com/google/cadvisor/container" + "github.com/google/cadvisor/container/common" + containerlibcontainer "github.com/google/cadvisor/container/libcontainer" + "github.com/google/cadvisor/fs" + info "github.com/google/cadvisor/info/v1" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +type containerdContainerHandler struct { + client containerdClient + name string + id string + aliases []string + machineInfoFactory info.MachineInfoFactory + // Absolute path to the cgroup hierarchies of this container. + // (e.g.: "cpu" -> "/sys/fs/cgroup/cpu/test") + cgroupPaths map[string]string + // Manager of this container's cgroups. + cgroupManager cgroups.Manager + fsInfo fs.FsInfo + poolName string + // Time at which this container was created. + creationTime time.Time + // Metadata associated with the container. + labels map[string]string + envs map[string]string + // The container PID used to switch namespaces as required + pid int + // Image name used for this container. + image string + // The host root FS to read + rootFs string + // Filesystem handler. + ignoreMetrics container.MetricSet +} + +var _ container.ContainerHandler = &containerdContainerHandler{} + +// newContainerdContainerHandler returns a new container.ContainerHandler +func newContainerdContainerHandler( + client containerdClient, + name string, + machineInfoFactory info.MachineInfoFactory, + fsInfo fs.FsInfo, + cgroupSubsystems *containerlibcontainer.CgroupSubsystems, + inHostNamespace bool, + metadataEnvs []string, + ignoreMetrics container.MetricSet, +) (container.ContainerHandler, error) { + // Create the cgroup paths. + cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints)) + for key, val := range cgroupSubsystems.MountPoints { + cgroupPaths[key] = path.Join(val, name) + } + + // Generate the equivalent cgroup manager for this container. + cgroupManager := &cgroupfs.Manager{ + Cgroups: &libcontainerconfigs.Cgroup{ + Name: name, + }, + Paths: cgroupPaths, + } + + id := ContainerNameToContainerdID(name) + // We assume that if load fails then the container is not known to containerd. + ctx := context.Background() + cntr, err := client.LoadContainer(ctx, id) + if err != nil { + return nil, err + } + + var spec specs.Spec + if err := json.Unmarshal(cntr.Spec.Value, &spec); err != nil { + return nil, err + } + + taskPid, err := client.TaskPid(ctx, id) + if err != nil { + return nil, err + } + rootfs := "/" + if !inHostNamespace { + rootfs = "/rootfs" + } + + handler := &containerdContainerHandler{ + id: id, + client: client, + name: name, + machineInfoFactory: machineInfoFactory, + cgroupPaths: cgroupPaths, + cgroupManager: cgroupManager, + rootFs: rootfs, + fsInfo: fsInfo, + envs: make(map[string]string), + labels: make(map[string]string), + ignoreMetrics: ignoreMetrics, + pid: int(taskPid), + creationTime: cntr.CreatedAt, + } + // Add the name and bare ID as aliases of the container. + handler.labels = cntr.Labels + handler.image = cntr.Image + handler.aliases = []string{id, name} + for _, envVar := range spec.Process.Env { + if envVar != "" { + splits := strings.SplitN(envVar, "=", 2) + if len(splits) == 2 { + handler.envs[splits[0]] = splits[1] + } + } + } + + return handler, nil +} + +func (self *containerdContainerHandler) ContainerReference() (info.ContainerReference, error) { + return info.ContainerReference{ + Id: self.id, + Name: self.name, + Namespace: k8sContainerdNamespace, + Labels: self.labels, + Aliases: self.aliases, + }, nil +} + +func (self *containerdContainerHandler) needNet() bool { + // Since containerd does not handle networking ideally we need to return based + // on ignoreMetrics list. Here the assumption is the presence of cri-containerd + // label + if !self.ignoreMetrics.Has(container.NetworkUsageMetrics) { + //TODO change it to exported cri-containerd constants + return self.labels["io.cri-containerd.kind"] == "sandbox" + } + return false +} + +func (self *containerdContainerHandler) GetSpec() (info.ContainerSpec, error) { + // TODO: Since we dont collect disk usage stats for containerd, we set hasFilesystem + // to false. Revisit when we support disk usage stats for containerd + hasFilesystem := false + spec, err := common.GetSpec(self.cgroupPaths, self.machineInfoFactory, self.needNet(), hasFilesystem) + spec.Labels = self.labels + spec.Envs = self.envs + spec.Image = self.image + + return spec, err +} + +func (self *containerdContainerHandler) getFsStats(stats *info.ContainerStats) error { + mi, err := self.machineInfoFactory.GetMachineInfo() + if err != nil { + return err + } + + if !self.ignoreMetrics.Has(container.DiskIOMetrics) { + common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo) + } + return nil +} + +func (self *containerdContainerHandler) GetStats() (*info.ContainerStats, error) { + stats, err := containerlibcontainer.GetStats(self.cgroupManager, self.rootFs, self.pid, self.ignoreMetrics) + if err != nil { + return stats, err + } + // Clean up stats for containers that don't have their own network - this + // includes containers running in Kubernetes pods that use the network of the + // infrastructure container. This stops metrics being reported multiple times + // for each container in a pod. + if !self.needNet() { + stats.Network = info.NetworkStats{} + } + + // Get filesystem stats. + err = self.getFsStats(stats) + return stats, err +} + +func (self *containerdContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) { + return []info.ContainerReference{}, nil +} + +func (self *containerdContainerHandler) GetCgroupPath(resource string) (string, error) { + path, ok := self.cgroupPaths[resource] + if !ok { + return "", fmt.Errorf("could not find path for resource %q for container %q\n", resource, self.name) + } + return path, nil +} + +func (self *containerdContainerHandler) GetContainerLabels() map[string]string { + return self.labels +} + +func (self *containerdContainerHandler) ListProcesses(listType container.ListType) ([]int, error) { + return containerlibcontainer.GetProcesses(self.cgroupManager) +} + +func (self *containerdContainerHandler) Exists() bool { + return common.CgroupExists(self.cgroupPaths) +} + +func (self *containerdContainerHandler) Type() container.ContainerType { + return container.ContainerTypeContainerd +} + +func (self *containerdContainerHandler) Start() { +} + +func (self *containerdContainerHandler) Cleanup() { +} + +func (self *containerdContainerHandler) GetContainerIPAddress() string { + // containerd doesnt take care of networking.So it doesnt maintain networking states + return "" +} diff --git a/vendor/github.com/google/cadvisor/container/crio/client.go b/vendor/github.com/google/cadvisor/container/crio/client.go index b588552cf44a..a47a3e7725a5 100644 --- a/vendor/github.com/google/cadvisor/container/crio/client.go +++ b/vendor/github.com/google/cadvisor/container/crio/client.go @@ -24,7 +24,7 @@ import ( ) const ( - CrioSocket = "/var/run/crio.sock" + CrioSocket = "/var/run/crio/crio.sock" maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) ) diff --git a/vendor/github.com/google/cadvisor/container/crio/factory.go b/vendor/github.com/google/cadvisor/container/crio/factory.go index b0151c7f12ef..0c77db69ed10 100644 --- a/vendor/github.com/google/cadvisor/container/crio/factory.go +++ b/vendor/github.com/google/cadvisor/container/crio/factory.go @@ -154,7 +154,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c return fmt.Errorf("failed to get cgroup subsystems: %v", err) } - glog.Infof("Registering CRI-O factory") + glog.V(1).Infof("Registering CRI-O factory") f := &crioFactory{ client: client, cgroupSubsystems: cgroupSubsystems, diff --git a/vendor/github.com/google/cadvisor/container/crio/handler.go b/vendor/github.com/google/cadvisor/container/crio/handler.go index 6e6fa58563ef..024341da8ab2 100644 --- a/vendor/github.com/google/cadvisor/container/crio/handler.go +++ b/vendor/github.com/google/cadvisor/container/crio/handler.go @@ -18,6 +18,7 @@ package crio import ( "fmt" "path" + "path/filepath" "strconv" "strings" "time" @@ -142,6 +143,12 @@ func newCrioContainerHandler( // get device ID from root, otherwise, it's going to error out as overlay // mounts doesn't have fixed dev ids. rootfsStorageDir = strings.TrimSuffix(rootfsStorageDir, "/merged") + switch storageDriver { + case overlayStorageDriver, overlay2StorageDriver: + // overlay and overlay2 driver are the same "overlay2" driver so treat + // them the same. + rootfsStorageDir = filepath.Join(rootfsStorageDir, "diff") + } // TODO: extract object mother method handler := &crioContainerHandler{ @@ -178,7 +185,7 @@ func newCrioContainerHandler( } // TODO for env vars we wanted to show from container.Config.Env from whitelist //for _, exposedEnv := range metadataEnvs { - //glog.Infof("TODO env whitelist: %v", exposedEnv) + //glog.V(4).Infof("TODO env whitelist: %v", exposedEnv) //} return handler, nil diff --git a/vendor/github.com/google/cadvisor/container/docker/client.go b/vendor/github.com/google/cadvisor/container/docker/client.go index a27bf0c2c784..44207c4f1f84 100644 --- a/vendor/github.com/google/cadvisor/container/docker/client.go +++ b/vendor/github.com/google/cadvisor/container/docker/client.go @@ -21,7 +21,7 @@ import ( "net/http" "sync" - dclient "github.com/docker/engine-api/client" + dclient "github.com/docker/docker/client" "github.com/docker/go-connections/tlsconfig" ) diff --git a/vendor/github.com/google/cadvisor/container/docker/docker.go b/vendor/github.com/google/cadvisor/container/docker/docker.go index 93d96c620bd0..b0ed227dda0d 100644 --- a/vendor/github.com/google/cadvisor/container/docker/docker.go +++ b/vendor/github.com/google/cadvisor/container/docker/docker.go @@ -19,44 +19,60 @@ import ( "fmt" "regexp" "strconv" - "strings" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" "golang.org/x/net/context" + "time" + "github.com/google/cadvisor/info/v1" "github.com/google/cadvisor/machine" ) +const defaultTimeout = time.Second * 5 + +func defaultContext() context.Context { + ctx, _ := context.WithTimeout(context.Background(), defaultTimeout) + return ctx +} + func Status() (v1.DockerStatus, error) { client, err := Client() if err != nil { return v1.DockerStatus{}, fmt.Errorf("unable to communicate with docker daemon: %v", err) } - dockerInfo, err := client.Info(context.Background()) + dockerInfo, err := client.Info(defaultContext()) if err != nil { return v1.DockerStatus{}, err } - return StatusFromDockerInfo(dockerInfo), nil + return StatusFromDockerInfo(dockerInfo) } -func StatusFromDockerInfo(dockerInfo dockertypes.Info) v1.DockerStatus { +func StatusFromDockerInfo(dockerInfo dockertypes.Info) (v1.DockerStatus, error) { out := v1.DockerStatus{} - out.Version = VersionString() - out.APIVersion = APIVersionString() out.KernelVersion = machine.KernelVersion() out.OS = dockerInfo.OperatingSystem out.Hostname = dockerInfo.Name out.RootDir = dockerInfo.DockerRootDir out.Driver = dockerInfo.Driver - out.ExecDriver = dockerInfo.ExecutionDriver out.NumImages = dockerInfo.Images out.NumContainers = dockerInfo.Containers out.DriverStatus = make(map[string]string, len(dockerInfo.DriverStatus)) for _, v := range dockerInfo.DriverStatus { out.DriverStatus[v[0]] = v[1] } - return out + var err error + ver, err := VersionString() + if err != nil { + return out, err + } + out.Version = ver + ver, err = APIVersionString() + if err != nil { + return out, err + } + out.APIVersion = ver + return out, nil } func Images() ([]v1.DockerImage, error) { @@ -64,7 +80,7 @@ func Images() ([]v1.DockerImage, error) { if err != nil { return nil, fmt.Errorf("unable to communicate with docker daemon: %v", err) } - images, err := client.ImageList(context.Background(), dockertypes.ImageListOptions{All: false}) + images, err := client.ImageList(defaultContext(), dockertypes.ImageListOptions{All: false}) if err != nil { return nil, err } @@ -97,14 +113,14 @@ func ValidateInfo() (*dockertypes.Info, error) { return nil, fmt.Errorf("unable to communicate with docker daemon: %v", err) } - dockerInfo, err := client.Info(context.Background()) + dockerInfo, err := client.Info(defaultContext()) if err != nil { return nil, fmt.Errorf("failed to detect Docker info: %v", err) } // Fall back to version API if ServerVersion is not set in info. if dockerInfo.ServerVersion == "" { - version, err := client.ServerVersion(context.Background()) + version, err := client.ServerVersion(defaultContext()) if err != nil { return nil, fmt.Errorf("unable to get docker version: %v", err) } @@ -119,13 +135,6 @@ func ValidateInfo() (*dockertypes.Info, error) { return nil, fmt.Errorf("cAdvisor requires docker version %v or above but we have found version %v reported as %q", []int{1, 0, 0}, version, dockerInfo.ServerVersion) } - // Check that the libcontainer execdriver is used if the version is < 1.11 - // (execution drivers are no longer supported as of 1.11). - if version[0] <= 1 && version[1] <= 10 && - !strings.HasPrefix(dockerInfo.ExecutionDriver, "native") { - return nil, fmt.Errorf("docker found, but not using native exec driver") - } - if dockerInfo.Driver == "" { return nil, fmt.Errorf("failed to find docker storage driver") } @@ -134,35 +143,43 @@ func ValidateInfo() (*dockertypes.Info, error) { } func Version() ([]int, error) { - return parseVersion(VersionString(), version_re, 3) + ver, err := VersionString() + if err != nil { + return nil, err + } + return parseVersion(ver, version_re, 3) } func APIVersion() ([]int, error) { - return parseVersion(APIVersionString(), apiversion_re, 2) + ver, err := APIVersionString() + if err != nil { + return nil, err + } + return parseVersion(ver, apiversion_re, 2) } -func VersionString() string { +func VersionString() (string, error) { docker_version := "Unknown" client, err := Client() if err == nil { - version, err := client.ServerVersion(context.Background()) + version, err := client.ServerVersion(defaultContext()) if err == nil { docker_version = version.Version } } - return docker_version + return docker_version, err } -func APIVersionString() string { +func APIVersionString() (string, error) { docker_api_version := "Unknown" client, err := Client() if err == nil { - version, err := client.ServerVersion(context.Background()) + version, err := client.ServerVersion(defaultContext()) if err == nil { docker_api_version = version.APIVersion } } - return docker_api_version + return docker_api_version, err } func parseVersion(version_string string, regex *regexp.Regexp, length int) ([]int, error) { diff --git a/vendor/github.com/google/cadvisor/container/docker/factory.go b/vendor/github.com/google/cadvisor/container/docker/factory.go index cdf70c70eab0..9eb1ff526d08 100644 --- a/vendor/github.com/google/cadvisor/container/docker/factory.go +++ b/vendor/github.com/google/cadvisor/container/docker/factory.go @@ -24,7 +24,7 @@ import ( "sync" "github.com/blang/semver" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" "github.com/google/cadvisor/container" "github.com/google/cadvisor/container/libcontainer" "github.com/google/cadvisor/devicemapper" @@ -35,7 +35,7 @@ import ( dockerutil "github.com/google/cadvisor/utils/docker" "github.com/google/cadvisor/zfs" - docker "github.com/docker/engine-api/client" + docker "github.com/docker/docker/client" "github.com/golang/glog" "golang.org/x/net/context" ) @@ -340,7 +340,8 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c glog.Errorf("devicemapper filesystem stats will not be reported: %v", err) } - status := StatusFromDockerInfo(*dockerInfo) + // Safe to ignore error - driver status should always be populated. + status, _ := StatusFromDockerInfo(*dockerInfo) thinPoolName = status.DriverStatus[dockerutil.DriverStatusPoolName] } @@ -352,7 +353,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c } } - glog.Infof("Registering Docker factory") + glog.V(1).Infof("Registering Docker factory") f := &dockerFactory{ cgroupSubsystems: cgroupSubsystems, client: client, diff --git a/vendor/github.com/google/cadvisor/container/docker/handler.go b/vendor/github.com/google/cadvisor/container/docker/handler.go index dab60eabe3df..541df67c99da 100644 --- a/vendor/github.com/google/cadvisor/container/docker/handler.go +++ b/vendor/github.com/google/cadvisor/container/docker/handler.go @@ -32,8 +32,8 @@ import ( dockerutil "github.com/google/cadvisor/utils/docker" "github.com/google/cadvisor/zfs" - docker "github.com/docker/engine-api/client" - dockercontainer "github.com/docker/engine-api/types/container" + dockercontainer "github.com/docker/docker/api/types/container" + docker "github.com/docker/docker/client" "github.com/golang/glog" "github.com/opencontainers/runc/libcontainer/cgroups" cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs" @@ -43,7 +43,8 @@ import ( const ( // The read write layers exist here. - aufsRWLayer = "diff" + aufsRWLayer = "diff" + overlay2RWLayer = "diff" // Path to the directory where docker stores log files if the json logging driver is enabled. pathToContainersDir = "containers" @@ -195,8 +196,10 @@ func newDockerContainerHandler( switch storageDriver { case aufsStorageDriver: rootfsStorageDir = path.Join(storageDir, string(aufsStorageDriver), aufsRWLayer, rwLayerID) - case overlayStorageDriver, overlay2StorageDriver: + case overlayStorageDriver: rootfsStorageDir = path.Join(storageDir, string(storageDriver), rwLayerID) + case overlay2StorageDriver: + rootfsStorageDir = path.Join(storageDir, string(storageDriver), rwLayerID, overlay2RWLayer) case zfsStorageDriver: status, err := Status() if err != nil { diff --git a/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go b/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go index b51477ee1745..c2194ac34ad9 100644 --- a/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go +++ b/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go @@ -55,19 +55,36 @@ func GetCgroupSubsystems() (CgroupSubsystems, error) { if err != nil { return CgroupSubsystems{}, err } + + return getCgroupSubsystemsHelper(allCgroups) +} + +func getCgroupSubsystemsHelper(allCgroups []cgroups.Mount) (CgroupSubsystems, error) { if len(allCgroups) == 0 { return CgroupSubsystems{}, fmt.Errorf("failed to find cgroup mounts") } // Trim the mounts to only the subsystems we care about. supportedCgroups := make([]cgroups.Mount, 0, len(allCgroups)) + recordedMountpoints := make(map[string]struct{}, len(allCgroups)) mountPoints := make(map[string]string, len(allCgroups)) for _, mount := range allCgroups { for _, subsystem := range mount.Subsystems { - if _, ok := supportedSubsystems[subsystem]; ok { + if _, ok := supportedSubsystems[subsystem]; !ok { + // Unsupported subsystem + continue + } + if _, ok := mountPoints[subsystem]; ok { + // duplicate mount for this subsystem; use the first one we saw + glog.V(5).Infof("skipping %s, already using mount at %s", mount.Mountpoint, mountPoints[subsystem]) + continue + } + if _, ok := recordedMountpoints[mount.Mountpoint]; !ok { + // avoid appending the same mount twice in e.g. `cpu,cpuacct` case supportedCgroups = append(supportedCgroups, mount) - mountPoints[subsystem] = mount.Mountpoint + recordedMountpoints[mount.Mountpoint] = struct{}{} } + mountPoints[subsystem] = mount.Mountpoint } } @@ -84,6 +101,7 @@ var supportedSubsystems map[string]struct{} = map[string]struct{}{ "memory": {}, "cpuset": {}, "blkio": {}, + "devices": {}, } // Get cgroup and networking stats of the specified container @@ -452,6 +470,17 @@ var numCpusFunc = getNumberOnlineCPUs func setCpuStats(s *cgroups.Stats, ret *info.ContainerStats) { ret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode ret.Cpu.Usage.System = s.CpuStats.CpuUsage.UsageInKernelmode + ret.Cpu.Usage.Total = 0 + ret.Cpu.CFS.Periods = s.CpuStats.ThrottlingData.Periods + ret.Cpu.CFS.ThrottledPeriods = s.CpuStats.ThrottlingData.ThrottledPeriods + ret.Cpu.CFS.ThrottledTime = s.CpuStats.ThrottlingData.ThrottledTime + + if len(s.CpuStats.CpuUsage.PercpuUsage) == 0 { + // libcontainer's 'GetStats' can leave 'PercpuUsage' nil if it skipped the + // cpuacct subsystem. + return + } + numPossible := uint32(len(s.CpuStats.CpuUsage.PercpuUsage)) // Note that as of https://patchwork.kernel.org/patch/8607101/ (kernel v4.7), // the percpu usage information includes extra zero values for all additional @@ -470,15 +499,11 @@ func setCpuStats(s *cgroups.Stats, ret *info.ContainerStats) { numActual = minUint32(numPossible, numActual) ret.Cpu.Usage.PerCpu = make([]uint64, numActual) - ret.Cpu.Usage.Total = 0 for i := uint32(0); i < numActual; i++ { ret.Cpu.Usage.PerCpu[i] = s.CpuStats.CpuUsage.PercpuUsage[i] ret.Cpu.Usage.Total += s.CpuStats.CpuUsage.PercpuUsage[i] } - ret.Cpu.CFS.Periods = s.CpuStats.ThrottlingData.Periods - ret.Cpu.CFS.ThrottledPeriods = s.CpuStats.ThrottlingData.ThrottledPeriods - ret.Cpu.CFS.ThrottledTime = s.CpuStats.ThrottlingData.ThrottledTime } // Copied from @@ -509,6 +534,7 @@ func setDiskIoStats(s *cgroups.Stats, ret *info.ContainerStats) { func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) { ret.Memory.Usage = s.MemoryStats.Usage.Usage + ret.Memory.MaxUsage = s.MemoryStats.Usage.MaxUsage ret.Memory.Failcnt = s.MemoryStats.Usage.Failcnt ret.Memory.Cache = s.MemoryStats.Stats["cache"] diff --git a/vendor/github.com/google/cadvisor/container/raw/factory.go b/vendor/github.com/google/cadvisor/container/raw/factory.go index 36d236c8dd5e..1b8a43a40776 100644 --- a/vendor/github.com/google/cadvisor/container/raw/factory.go +++ b/vendor/github.com/google/cadvisor/container/raw/factory.go @@ -83,7 +83,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, igno return err } - glog.Infof("Registering Raw factory") + glog.V(1).Infof("Registering Raw factory") factory := &rawFactory{ machineInfoFactory: machineInfoFactory, fsInfo: fsInfo, diff --git a/vendor/github.com/google/cadvisor/container/rkt/factory.go b/vendor/github.com/google/cadvisor/container/rkt/factory.go index f29c615ebaa1..3f79d753e0d2 100644 --- a/vendor/github.com/google/cadvisor/container/rkt/factory.go +++ b/vendor/github.com/google/cadvisor/container/rkt/factory.go @@ -86,7 +86,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, igno return fmt.Errorf("failed to find supported cgroup mounts for the raw factory") } - glog.Infof("Registering Rkt factory") + glog.V(1).Infof("Registering Rkt factory") factory := &rktFactory{ machineInfoFactory: machineInfoFactory, fsInfo: fsInfo, diff --git a/vendor/github.com/google/cadvisor/container/systemd/factory.go b/vendor/github.com/google/cadvisor/container/systemd/factory.go index 4e71d40bda6d..cb3b7c89cd34 100644 --- a/vendor/github.com/google/cadvisor/container/systemd/factory.go +++ b/vendor/github.com/google/cadvisor/container/systemd/factory.go @@ -51,7 +51,7 @@ func (f *systemdFactory) DebugInfo() map[string][]string { // Register registers the systemd container factory. func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error { - glog.Infof("Registering systemd factory") + glog.V(1).Infof("Registering systemd factory") factory := &systemdFactory{} container.RegisterContainerHandlerFactory(factory, []watcher.ContainerWatchSource{watcher.Raw}) return nil diff --git a/vendor/github.com/google/cadvisor/fs/fs.go b/vendor/github.com/google/cadvisor/fs/fs.go index 97afba8ece2a..271b01e3562f 100644 --- a/vendor/github.com/google/cadvisor/fs/fs.go +++ b/vendor/github.com/google/cadvisor/fs/fs.go @@ -136,8 +136,8 @@ func NewFsInfo(context Context) (FsInfo, error) { fsInfo.addDockerImagesLabel(context, mounts) fsInfo.addCrioImagesLabel(context, mounts) - glog.Infof("Filesystem UUIDs: %+v", fsInfo.fsUUIDToDeviceName) - glog.Infof("Filesystem partitions: %+v", fsInfo.partitions) + glog.V(1).Infof("Filesystem UUIDs: %+v", fsInfo.fsUUIDToDeviceName) + glog.V(1).Infof("Filesystem partitions: %+v", fsInfo.partitions) fsInfo.addSystemRootLabel(mounts) return fsInfo, nil } @@ -162,7 +162,7 @@ func getFsUUIDToDeviceNameMap() (map[string]string, error) { path := filepath.Join(dir, file.Name()) target, err := os.Readlink(path) if err != nil { - glog.Infof("Failed to resolve symlink for %q", path) + glog.Warningf("Failed to resolve symlink for %q", path) continue } device, err := filepath.Abs(filepath.Join(dir, target)) @@ -438,7 +438,7 @@ func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) { file, err := os.Open(diskStatsFile) if err != nil { if os.IsNotExist(err) { - glog.Infof("not collecting filesystem statistics because file %q was not available", diskStatsFile) + glog.Warningf("Not collecting filesystem statistics because file %q was not found", diskStatsFile) return diskStatsMap, nil } return nil, err @@ -561,12 +561,12 @@ func GetDirDiskUsage(dir string, timeout time.Duration) (uint64, error) { return 0, fmt.Errorf("failed to exec du - %v", err) } timer := time.AfterFunc(timeout, func() { - glog.Infof("killing cmd %v due to timeout(%s)", cmd.Args, timeout.String()) + glog.Warningf("Killing cmd %v due to timeout(%s)", cmd.Args, timeout.String()) cmd.Process.Kill() }) stdoutb, souterr := ioutil.ReadAll(stdoutp) if souterr != nil { - glog.Errorf("failed to read from stdout for cmd %v - %v", cmd.Args, souterr) + glog.Errorf("Failed to read from stdout for cmd %v - %v", cmd.Args, souterr) } stderrb, _ := ioutil.ReadAll(stderrp) err = cmd.Wait() @@ -600,13 +600,14 @@ func GetDirInodeUsage(dir string, timeout time.Duration) (uint64, error) { return 0, fmt.Errorf("failed to exec cmd %v - %v; stderr: %v", findCmd.Args, err, stderr.String()) } timer := time.AfterFunc(timeout, func() { - glog.Infof("killing cmd %v due to timeout(%s)", findCmd.Args, timeout.String()) + glog.Warningf("Killing cmd %v due to timeout(%s)", findCmd.Args, timeout.String()) findCmd.Process.Kill() }) - if err := findCmd.Wait(); err != nil { + err := findCmd.Wait() + timer.Stop() + if err != nil { return 0, fmt.Errorf("cmd %v failed. stderr: %s; err: %v", findCmd.Args, stderr.String(), err) } - timer.Stop() return counter.bytesWritten, nil } @@ -740,7 +741,7 @@ func getBtrfsMajorMinorIds(mount *mount.Info) (int, int, error) { return 0, 0, err } - glog.Infof("btrfs mount %#v", mount) + glog.V(4).Infof("btrfs mount %#v", mount) if buf.Mode&syscall.S_IFMT == syscall.S_IFBLK { err := syscall.Stat(mount.Mountpoint, buf) if err != nil { @@ -748,8 +749,8 @@ func getBtrfsMajorMinorIds(mount *mount.Info) (int, int, error) { return 0, 0, err } - glog.Infof("btrfs dev major:minor %d:%d\n", int(major(buf.Dev)), int(minor(buf.Dev))) - glog.Infof("btrfs rdev major:minor %d:%d\n", int(major(buf.Rdev)), int(minor(buf.Rdev))) + glog.V(4).Infof("btrfs dev major:minor %d:%d\n", int(major(buf.Dev)), int(minor(buf.Dev))) + glog.V(4).Infof("btrfs rdev major:minor %d:%d\n", int(major(buf.Rdev)), int(minor(buf.Rdev))) return int(major(buf.Dev)), int(minor(buf.Dev)), nil } else { diff --git a/vendor/github.com/google/cadvisor/http/handlers.go b/vendor/github.com/google/cadvisor/http/handlers.go index a2b4055dde0a..8950072b4b9c 100644 --- a/vendor/github.com/google/cadvisor/http/handlers.go +++ b/vendor/github.com/google/cadvisor/http/handlers.go @@ -60,7 +60,7 @@ func RegisterHandlers(mux httpmux.Mux, containerManager manager.Manager, httpAut // Setup the authenticator object if httpAuthFile != "" { - glog.Infof("Using auth file %s", httpAuthFile) + glog.V(1).Infof("Using auth file %s", httpAuthFile) secrets := auth.HtpasswdFileProvider(httpAuthFile) authenticator := auth.NewBasicAuthenticator(httpAuthRealm, secrets) mux.HandleFunc(static.StaticResource, authenticator.Wrap(staticHandler)) @@ -70,7 +70,7 @@ func RegisterHandlers(mux httpmux.Mux, containerManager manager.Manager, httpAut authenticated = true } if httpAuthFile == "" && httpDigestFile != "" { - glog.Infof("Using digest file %s", httpDigestFile) + glog.V(1).Infof("Using digest file %s", httpDigestFile) secrets := auth.HtdigestFileProvider(httpDigestFile) authenticator := auth.NewDigestAuthenticator(httpDigestRealm, secrets) mux.HandleFunc(static.StaticResource, authenticator.Wrap(staticHandler)) diff --git a/vendor/github.com/google/cadvisor/info/v1/container.go b/vendor/github.com/google/cadvisor/info/v1/container.go index 038db84147a3..8d0c28e0bb88 100644 --- a/vendor/github.com/google/cadvisor/info/v1/container.go +++ b/vendor/github.com/google/cadvisor/info/v1/container.go @@ -307,7 +307,7 @@ type CpuStats struct { } type PerDiskStats struct { - Device string `json:"-"` + Device string `json:"device"` Major uint64 `json:"major"` Minor uint64 `json:"minor"` Stats map[string]uint64 `json:"stats"` @@ -330,6 +330,10 @@ type MemoryStats struct { // Units: Bytes. Usage uint64 `json:"usage"` + // Maximum memory usage recorded. + // Units: Bytes. + MaxUsage uint64 `json:"max_usage"` + // Number of bytes of page cache memory. // Units: Bytes. Cache uint64 `json:"cache"` @@ -516,6 +520,29 @@ type FsStats struct { WeightedIoTime uint64 `json:"weighted_io_time"` } +type AcceleratorStats struct { + // Make of the accelerator (nvidia, amd, google etc.) + Make string `json:"make"` + + // Model of the accelerator (tesla-p100, tesla-k80 etc.) + Model string `json:"model"` + + // ID of the accelerator. + ID string `json:"id"` + + // Total accelerator memory. + // unit: bytes + MemoryTotal uint64 `json:"memory_total"` + + // Total accelerator memory allocated. + // unit: bytes + MemoryUsed uint64 `json:"memory_used"` + + // Percent of time over the past sample period during which + // the accelerator was actively processing. + DutyCycle uint64 `json:"duty_cycle"` +} + type ContainerStats struct { // The time of this stat point. Timestamp time.Time `json:"timestamp"` @@ -530,6 +557,9 @@ type ContainerStats struct { // Task load stats TaskStats LoadStats `json:"task_stats,omitempty"` + // Metrics for Accelerators. Each Accelerator corresponds to one element in the array. + Accelerators []AcceleratorStats `json:"accelerators,omitempty"` + // Custom metrics from all collectors CustomMetrics map[string][]MetricVal `json:"custom_metrics,omitempty"` } diff --git a/vendor/github.com/google/cadvisor/info/v2/container.go b/vendor/github.com/google/cadvisor/info/v2/container.go index ce102ec897b7..d32f571cb674 100644 --- a/vendor/github.com/google/cadvisor/info/v2/container.go +++ b/vendor/github.com/google/cadvisor/info/v2/container.go @@ -146,6 +146,8 @@ type ContainerStats struct { Filesystem *FilesystemStats `json:"filesystem,omitempty"` // Task load statistics Load *v1.LoadStats `json:"load_stats,omitempty"` + // Metrics for Accelerators. Each Accelerator corresponds to one element in the array. + Accelerators []v1.AcceleratorStats `json:"accelerators,omitempty"` // Custom Metrics CustomMetrics map[string][]v1.MetricVal `json:"custom_metrics,omitempty"` } @@ -234,6 +236,9 @@ type RequestOptions struct { Count int `json:"count"` // Whether to include stats for child subcontainers. Recursive bool `json:"recursive"` + // Update stats if they are older than MaxAge + // nil indicates no update, and 0 will always trigger an update. + MaxAge *time.Duration `json:"max_age"` } type ProcessInfo struct { diff --git a/vendor/github.com/google/cadvisor/info/v2/conversion.go b/vendor/github.com/google/cadvisor/info/v2/conversion.go index 1c0f91f8e5d4..97fb463b9fac 100644 --- a/vendor/github.com/google/cadvisor/info/v2/conversion.go +++ b/vendor/github.com/google/cadvisor/info/v2/conversion.go @@ -142,6 +142,9 @@ func ContainerStatsFromV1(containerName string, spec *v1.ContainerSpec, stats [] if spec.HasCustomMetrics { stat.CustomMetrics = val.CustomMetrics } + if len(val.Accelerators) > 0 { + stat.Accelerators = val.Accelerators + } // TODO(rjnagal): Handle load stats. newStats = append(newStats, stat) } @@ -203,9 +206,6 @@ func InstCpuStats(last, cur *v1.ContainerStats) (*CpuInstStats, error) { return nil, fmt.Errorf("different number of cpus") } timeDelta := cur.Timestamp.Sub(last.Timestamp) - if timeDelta <= 100*time.Millisecond { - return nil, fmt.Errorf("time delta unexpectedly small") - } // Nanoseconds to gain precision and avoid having zero seconds if the // difference between the timestamps is just under a second timeDeltaNs := uint64(timeDelta.Nanoseconds()) diff --git a/vendor/github.com/google/cadvisor/machine/info.go b/vendor/github.com/google/cadvisor/machine/info.go index a58321d595a6..be90f17e96ce 100644 --- a/vendor/github.com/google/cadvisor/machine/info.go +++ b/vendor/github.com/google/cadvisor/machine/info.go @@ -22,7 +22,6 @@ import ( "path/filepath" "strconv" "strings" - "syscall" "github.com/google/cadvisor/fs" info "github.com/google/cadvisor/info/v1" @@ -31,6 +30,8 @@ import ( "github.com/google/cadvisor/utils/sysinfo" "github.com/golang/glog" + + "golang.org/x/sys/unix" ) const hugepagesDirectory = "/sys/kernel/mm/hugepages/" @@ -48,7 +49,7 @@ func getInfoFromFiles(filePaths string) string { return strings.TrimSpace(string(id)) } } - glog.Infof("Couldn't collect info from any of the files in %q", filePaths) + glog.Warningf("Couldn't collect info from any of the files in %q", filePaths) return "" } @@ -189,19 +190,11 @@ func ContainerOsVersion() string { } func KernelVersion() string { - uname := &syscall.Utsname{} + uname := &unix.Utsname{} - if err := syscall.Uname(uname); err != nil { + if err := unix.Uname(uname); err != nil { return "Unknown" } - release := make([]byte, len(uname.Release)) - i := 0 - for _, c := range uname.Release { - release[i] = byte(c) - i++ - } - release = release[:bytes.IndexByte(release, 0)] - - return string(release) + return string(uname.Release[:bytes.IndexByte(uname.Release[:], 0)]) } diff --git a/vendor/github.com/google/cadvisor/machine/machine.go b/vendor/github.com/google/cadvisor/machine/machine.go index 7992216dd3ca..bd07414456ca 100644 --- a/vendor/github.com/google/cadvisor/machine/machine.go +++ b/vendor/github.com/google/cadvisor/machine/machine.go @@ -24,7 +24,6 @@ import ( // s390/s390x changes "runtime" - "syscall" info "github.com/google/cadvisor/info/v1" "github.com/google/cadvisor/utils" @@ -32,6 +31,8 @@ import ( "github.com/google/cadvisor/utils/sysinfo" "github.com/golang/glog" + + "golang.org/x/sys/unix" ) var ( @@ -265,18 +266,13 @@ func addNode(nodes *[]info.Node, id int) (int, error) { // s390/s390x changes func getMachineArch() (string, error) { - uname := syscall.Utsname{} - err := syscall.Uname(&uname) + uname := unix.Utsname{} + err := unix.Uname(&uname) if err != nil { return "", err } - var arch string - for _, val := range uname.Machine { - arch += string(int(val)) - } - - return arch, nil + return string(uname.Machine[:]), nil } // arm32 chanes diff --git a/vendor/github.com/google/cadvisor/manager/container.go b/vendor/github.com/google/cadvisor/manager/container.go index 4924db5b5e6b..8193bb5e3759 100644 --- a/vendor/github.com/google/cadvisor/manager/container.go +++ b/vendor/github.com/google/cadvisor/manager/container.go @@ -29,6 +29,7 @@ import ( "sync" "time" + "github.com/google/cadvisor/accelerators" "github.com/google/cadvisor/cache/memory" "github.com/google/cadvisor/collector" "github.com/google/cadvisor/container" @@ -39,6 +40,7 @@ import ( units "github.com/docker/go-units" "github.com/golang/glog" + "k8s.io/utils/clock" ) // Housekeeping interval. @@ -64,8 +66,11 @@ type containerData struct { housekeepingInterval time.Duration maxHousekeepingInterval time.Duration allowDynamicHousekeeping bool - lastUpdatedTime time.Time + infoLastUpdatedTime time.Time + statsLastUpdatedTime time.Time lastErrorTime time.Time + // used to track time + clock clock.Clock // Decay value used for load average smoothing. Interval length of 10 seconds is used. loadDecay float64 @@ -76,8 +81,14 @@ type containerData struct { // Tells the container to stop. stop chan bool + // Tells the container to immediately collect stats + onDemandChan chan chan struct{} + // Runs custom metric collectors. collectorManager collector.CollectorManager + + // nvidiaCollector updates stats for Nvidia GPUs attached to the container. + nvidiaCollector accelerators.AcceleratorCollector } // jitter returns a time.Duration between duration and duration + maxFactor * duration, @@ -106,16 +117,43 @@ func (c *containerData) Stop() error { } func (c *containerData) allowErrorLogging() bool { - if time.Since(c.lastErrorTime) > time.Minute { - c.lastErrorTime = time.Now() + if c.clock.Since(c.lastErrorTime) > time.Minute { + c.lastErrorTime = c.clock.Now() return true } return false } +// OnDemandHousekeeping performs housekeeping on the container and blocks until it has completed. +// It is designed to be used in conjunction with periodic housekeeping, and will cause the timer for +// periodic housekeeping to reset. This should be used sparingly, as calling OnDemandHousekeeping frequently +// can have serious performance costs. +func (c *containerData) OnDemandHousekeeping(maxAge time.Duration) { + if c.clock.Since(c.statsLastUpdatedTime) > maxAge { + housekeepingFinishedChan := make(chan struct{}) + c.onDemandChan <- housekeepingFinishedChan + select { + case <-c.stop: + case <-housekeepingFinishedChan: + } + } +} + +// notifyOnDemand notifies all calls to OnDemandHousekeeping that housekeeping is finished +func (c *containerData) notifyOnDemand() { + for { + select { + case finishedChan := <-c.onDemandChan: + close(finishedChan) + default: + return + } + } +} + func (c *containerData) GetInfo(shouldUpdateSubcontainers bool) (*containerInfo, error) { // Get spec and subcontainers. - if time.Since(c.lastUpdatedTime) > 5*time.Second { + if c.clock.Since(c.infoLastUpdatedTime) > 5*time.Second { err := c.updateSpec() if err != nil { return nil, err @@ -126,7 +164,7 @@ func (c *containerData) GetInfo(shouldUpdateSubcontainers bool) (*containerInfo, return nil, err } } - c.lastUpdatedTime = time.Now() + c.infoLastUpdatedTime = c.clock.Now() } // Make a copy of the info for the user. c.lock.Lock() @@ -306,7 +344,7 @@ func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace return processes, nil } -func newContainerData(containerName string, memoryCache *memory.InMemoryCache, handler container.ContainerHandler, logUsage bool, collectorManager collector.CollectorManager, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool) (*containerData, error) { +func newContainerData(containerName string, memoryCache *memory.InMemoryCache, handler container.ContainerHandler, logUsage bool, collectorManager collector.CollectorManager, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool, clock clock.Clock) (*containerData, error) { if memoryCache == nil { return nil, fmt.Errorf("nil memory storage") } @@ -328,6 +366,8 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h loadAvg: -1.0, // negative value indicates uninitialized. stop: make(chan bool, 1), collectorManager: collectorManager, + onDemandChan: make(chan chan struct{}, 100), + clock: clock, } cont.info.ContainerReference = ref @@ -337,8 +377,7 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h // Create cpu load reader. loadReader, err := cpuload.New() if err != nil { - // TODO(rjnagal): Promote to warning once we support cpu load inside namespaces. - glog.Infof("Could not initialize cpu load reader for %q: %s", ref.Name, err) + glog.Warningf("Could not initialize cpu load reader for %q: %s", ref.Name, err) } else { cont.loadReader = loadReader } @@ -358,7 +397,7 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h } // Determine when the next housekeeping should occur. -func (self *containerData) nextHousekeeping(lastHousekeeping time.Time) time.Time { +func (self *containerData) nextHousekeepingInterval() time.Duration { if self.allowDynamicHousekeeping { var empty time.Time stats, err := self.memoryCache.RecentStats(self.info.Name, empty, empty, 2) @@ -381,7 +420,7 @@ func (self *containerData) nextHousekeeping(lastHousekeeping time.Time) time.Tim } } - return lastHousekeeping.Add(jitter(self.housekeepingInterval, 1.0)) + return jitter(self.housekeepingInterval, 1.0) } // TODO(vmarmol): Implement stats collecting as a custom collector. @@ -407,24 +446,19 @@ func (c *containerData) housekeeping() { // Housekeep every second. glog.V(3).Infof("Start housekeeping for container %q\n", c.info.Name) - lastHousekeeping := time.Now() + houseKeepingTimer := c.clock.NewTimer(0 * time.Second) + defer houseKeepingTimer.Stop() for { - select { - case <-c.stop: - // Stop housekeeping when signaled. + if !c.housekeepingTick(houseKeepingTimer.C(), longHousekeeping) { return - default: - // Perform housekeeping. - start := time.Now() - c.housekeepingTick() - - // Log if housekeeping took too long. - duration := time.Since(start) - if duration >= longHousekeeping { - glog.V(3).Infof("[%s] Housekeeping took %s", c.info.Name, duration) + } + // Stop and drain the timer so that it is safe to reset it + if !houseKeepingTimer.Stop() { + select { + case <-houseKeepingTimer.C(): + default: } } - // Log usage if asked to do so. if c.logUsage { const numSamples = 60 @@ -432,7 +466,7 @@ func (c *containerData) housekeeping() { stats, err := c.memoryCache.RecentStats(c.info.Name, empty, empty, numSamples) if err != nil { if c.allowErrorLogging() { - glog.Infof("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err) + glog.Warningf("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err) } } else if len(stats) < numSamples { // Ignore, not enough stats yet. @@ -448,29 +482,39 @@ func (c *containerData) housekeeping() { instantUsageInCores := float64(stats[numSamples-1].Cpu.Usage.Total-stats[numSamples-2].Cpu.Usage.Total) / float64(stats[numSamples-1].Timestamp.Sub(stats[numSamples-2].Timestamp).Nanoseconds()) usageInCores := float64(usageCpuNs) / float64(stats[numSamples-1].Timestamp.Sub(stats[0].Timestamp).Nanoseconds()) usageInHuman := units.HumanSize(float64(usageMemory)) + // Don't set verbosity since this is already protected by the logUsage flag. glog.Infof("[%s] %.3f cores (average: %.3f cores), %s of memory", c.info.Name, instantUsageInCores, usageInCores, usageInHuman) } } - - next := c.nextHousekeeping(lastHousekeeping) - - // Schedule the next housekeeping. Sleep until that time. - if time.Now().Before(next) { - time.Sleep(next.Sub(time.Now())) - } else { - next = time.Now() - } - lastHousekeeping = next + houseKeepingTimer.Reset(c.nextHousekeepingInterval()) } } -func (c *containerData) housekeepingTick() { +func (c *containerData) housekeepingTick(timer <-chan time.Time, longHousekeeping time.Duration) bool { + select { + case <-c.stop: + // Stop housekeeping when signaled. + return false + case finishedChan := <-c.onDemandChan: + // notify the calling function once housekeeping has completed + defer close(finishedChan) + case <-timer: + } + start := c.clock.Now() err := c.updateStats() if err != nil { if c.allowErrorLogging() { - glog.Infof("Failed to update stats for container \"%s\": %s", c.info.Name, err) + glog.Warning("Failed to update stats for container \"%s\": %s", c.info.Name, err) } } + // Log if housekeeping took too long. + duration := c.clock.Since(start) + if duration >= longHousekeeping { + glog.V(3).Infof("[%s] Housekeeping took %s", c.info.Name, duration) + } + c.notifyOnDemand() + c.statsLastUpdatedTime = c.clock.Now() + return true } func (c *containerData) updateSpec() error { @@ -546,7 +590,7 @@ func (c *containerData) updateStats() error { var customStatsErr error cm := c.collectorManager.(*collector.GenericCollectorManager) if len(cm.Collectors) > 0 { - if cm.NextCollectionTime.Before(time.Now()) { + if cm.NextCollectionTime.Before(c.clock.Now()) { customStats, err := c.updateCustomStats() if customStats != nil { stats.CustomMetrics = customStats @@ -557,6 +601,12 @@ func (c *containerData) updateStats() error { } } + var nvidiaStatsErr error + if c.nvidiaCollector != nil { + // This updates the Accelerators field of the stats struct + nvidiaStatsErr = c.nvidiaCollector.UpdateStats(stats) + } + ref, err := c.handler.ContainerReference() if err != nil { // Ignore errors if the container is dead. @@ -572,6 +622,9 @@ func (c *containerData) updateStats() error { if statsErr != nil { return statsErr } + if nvidiaStatsErr != nil { + return nvidiaStatsErr + } return customStatsErr } diff --git a/vendor/github.com/google/cadvisor/manager/manager.go b/vendor/github.com/google/cadvisor/manager/manager.go index cafac0828a85..08955833bff0 100644 --- a/vendor/github.com/google/cadvisor/manager/manager.go +++ b/vendor/github.com/google/cadvisor/manager/manager.go @@ -18,6 +18,7 @@ package manager import ( "flag" "fmt" + "net/http" "os" "path" "strconv" @@ -25,9 +26,11 @@ import ( "sync" "time" + "github.com/google/cadvisor/accelerators" "github.com/google/cadvisor/cache/memory" "github.com/google/cadvisor/collector" "github.com/google/cadvisor/container" + "github.com/google/cadvisor/container/containerd" "github.com/google/cadvisor/container/crio" "github.com/google/cadvisor/container/docker" "github.com/google/cadvisor/container/raw" @@ -45,10 +48,9 @@ import ( "github.com/google/cadvisor/utils/sysfs" "github.com/google/cadvisor/version" - "net/http" - "github.com/golang/glog" "github.com/opencontainers/runc/libcontainer/cgroups" + "k8s.io/utils/clock" ) var globalHousekeepingInterval = flag.Duration("global_housekeeping_interval", 1*time.Minute, "Interval between global housekeepings") @@ -146,15 +148,21 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn if err != nil { return nil, err } - glog.Infof("cAdvisor running in container: %q", selfContainer) + glog.V(2).Infof("cAdvisor running in container: %q", selfContainer) - dockerStatus, err := docker.Status() - if err != nil { - glog.Warningf("Unable to connect to Docker: %v", err) + var ( + dockerStatus info.DockerStatus + rktPath string + ) + if tempDockerStatus, err := docker.Status(); err != nil { + glog.V(5).Infof("Docker not connected: %v", err) + } else { + dockerStatus = tempDockerStatus } - rktPath, err := rkt.RktPath() - if err != nil { - glog.Warningf("unable to connect to Rkt api service: %v", err) + if tmpRktPath, err := rkt.RktPath(); err != nil { + glog.V(5).Infof("Rkt not connected: %v", err) + } else { + rktPath = tmpRktPath } crioClient, err := crio.Client() @@ -163,7 +171,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn } crioInfo, err := crioClient.Info() if err != nil { - glog.Warningf("unable to connect to CRI-O api service: %v", err) + glog.V(5).Infof("CRI-O not connected: %v", err) } context := fs.Context{ @@ -206,6 +214,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn containerWatchers: []watcher.ContainerWatcher{}, eventsChannel: eventsChannel, collectorHttpClient: collectorHttpClient, + nvidiaManager: &accelerators.NvidiaManager{}, } machineInfo, err := machine.Info(sysfs, fsInfo, inHostNamespace) @@ -213,13 +222,13 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn return nil, err } newManager.machineInfo = *machineInfo - glog.Infof("Machine: %+v", newManager.machineInfo) + glog.V(1).Infof("Machine: %+v", newManager.machineInfo) versionInfo, err := getVersionInfo() if err != nil { return nil, err } - glog.Infof("Version: %+v", *versionInfo) + glog.V(1).Infof("Version: %+v", *versionInfo) newManager.eventHandler = events.NewEventManager(parseEventsStoragePolicy()) return newManager, nil @@ -251,18 +260,19 @@ type manager struct { containerWatchers []watcher.ContainerWatcher eventsChannel chan watcher.ContainerEvent collectorHttpClient *http.Client + nvidiaManager accelerators.AcceleratorManager } // Start the container manager. func (self *manager) Start() error { err := docker.Register(self, self.fsInfo, self.ignoreMetrics) if err != nil { - glog.Warningf("Docker container factory registration failed: %v.", err) + glog.V(5).Infof("Registration of the Docker container factory failed: %v.", err) } err = rkt.Register(self, self.fsInfo, self.ignoreMetrics) if err != nil { - glog.Warningf("Registration of the rkt container factory failed: %v", err) + glog.V(5).Infof("Registration of the rkt container factory failed: %v", err) } else { watcher, err := rktwatcher.NewRktContainerWatcher() if err != nil { @@ -271,14 +281,19 @@ func (self *manager) Start() error { self.containerWatchers = append(self.containerWatchers, watcher) } + err = containerd.Register(self, self.fsInfo, self.ignoreMetrics) + if err != nil { + glog.V(5).Infof("Registration of the containerd container factory failed: %v", err) + } + err = crio.Register(self, self.fsInfo, self.ignoreMetrics) if err != nil { - glog.Warningf("Registration of the crio container factory failed: %v", err) + glog.V(5).Infof("Registration of the crio container factory failed: %v", err) } err = systemd.Register(self, self.fsInfo, self.ignoreMetrics) if err != nil { - glog.Warningf("Registration of the systemd container factory failed: %v", err) + glog.V(5).Infof("Registration of the systemd container factory failed: %v", err) } err = raw.Register(self, self.fsInfo, self.ignoreMetrics) @@ -303,17 +318,20 @@ func (self *manager) Start() error { return nil } + // Setup collection of nvidia GPU metrics if any of them are attached to the machine. + self.nvidiaManager.Setup() + // Create root and then recover all containers. err = self.createContainer("/", watcher.Raw) if err != nil { return err } - glog.Infof("Starting recovery of all containers") + glog.V(2).Infof("Starting recovery of all containers") err = self.detectSubcontainers("/") if err != nil { return err } - glog.Infof("Recovery completed") + glog.V(2).Infof("Recovery completed") // Watch for new container. quitWatcher := make(chan error) @@ -332,6 +350,7 @@ func (self *manager) Start() error { } func (self *manager) Stop() error { + defer self.nvidiaManager.Destroy() // Stop and wait on all quit channels. for i, c := range self.quitChannels { // Send the exit signal and wait on the thread to exit (by closing the channel). @@ -695,6 +714,18 @@ func (self *manager) getRequestedContainers(containerName string, options v2.Req default: return containersMap, fmt.Errorf("invalid request type %q", options.IdType) } + if options.MaxAge != nil { + // update stats for all containers in containersMap + var waitGroup sync.WaitGroup + waitGroup.Add(len(containersMap)) + for _, container := range containersMap { + go func(cont *containerData) { + cont.OnDemandHousekeeping(*options.MaxAge) + waitGroup.Done() + }(container) + } + waitGroup.Wait() + } return containersMap, nil } @@ -792,6 +823,8 @@ func (m *manager) Exists(containerName string) bool { func (m *manager) GetProcessList(containerName string, options v2.RequestOptions) ([]v2.ProcessInfo, error) { // override recursive. Only support single container listing. options.Recursive = false + // override MaxAge. ProcessList does not require updated stats. + options.MaxAge = nil conts, err := m.getRequestedContainers(containerName, options) if err != nil { return nil, err @@ -816,29 +849,25 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c if err != nil { return fmt.Errorf("failed to read config file %q for config %q, container %q: %v", k, v, cont.info.Name, err) } - glog.V(3).Infof("Got config from %q: %q", v, configFile) + glog.V(4).Infof("Got config from %q: %q", v, configFile) if strings.HasPrefix(k, "prometheus") || strings.HasPrefix(k, "Prometheus") { newCollector, err := collector.NewPrometheusCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient) if err != nil { - glog.Infof("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err) - return err + return fmt.Errorf("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err) } err = cont.collectorManager.RegisterCollector(newCollector) if err != nil { - glog.Infof("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err) - return err + return fmt.Errorf("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err) } } else { newCollector, err := collector.NewCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient) if err != nil { - glog.Infof("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err) - return err + return fmt.Errorf("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err) } err = cont.collectorManager.RegisterCollector(newCollector) if err != nil { - glog.Infof("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err) - return err + return fmt.Errorf("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err) } } } @@ -907,17 +936,26 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche } logUsage := *logCadvisorUsage && containerName == m.cadvisorContainer - cont, err := newContainerData(containerName, m.memoryCache, handler, logUsage, collectorManager, m.maxHousekeepingInterval, m.allowDynamicHousekeeping) + cont, err := newContainerData(containerName, m.memoryCache, handler, logUsage, collectorManager, m.maxHousekeepingInterval, m.allowDynamicHousekeeping, clock.RealClock{}) if err != nil { return err } + devicesCgroupPath, err := handler.GetCgroupPath("devices") + if err != nil { + glog.Warningf("Error getting devices cgroup path: %v", err) + } else { + cont.nvidiaCollector, err = m.nvidiaManager.GetCollector(devicesCgroupPath) + if err != nil { + glog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err) + } + } // Add collectors labels := handler.GetContainerLabels() collectorConfigs := collector.GetCollectorConfigs(labels) err = m.registerCollectors(collectorConfigs, cont) if err != nil { - glog.Infof("failed to register collectors for %q: %v", containerName, err) + glog.Warningf("Failed to register collectors for %q: %v", containerName, err) } // Add the container name and all its aliases. The aliases must be within the namespace of the factory. @@ -1137,7 +1175,7 @@ func (self *manager) watchForNewContainers(quit chan error) error { } func (self *manager) watchForNewOoms() error { - glog.Infof("Started watching for new ooms in manager") + glog.V(2).Infof("Started watching for new ooms in manager") outStream := make(chan *oomparser.OomInstance, 10) oomLog, err := oomparser.New() if err != nil { @@ -1305,8 +1343,14 @@ func getVersionInfo() (*info.VersionInfo, error) { kernel_version := machine.KernelVersion() container_os := machine.ContainerOsVersion() - docker_version := docker.VersionString() - docker_api_version := docker.APIVersionString() + docker_version, err := docker.VersionString() + if err != nil { + return nil, err + } + docker_api_version, err := docker.APIVersionString() + if err != nil { + return nil, err + } return &info.VersionInfo{ KernelVersion: kernel_version, diff --git a/vendor/github.com/google/cadvisor/manager/watcher/rkt/rkt.go b/vendor/github.com/google/cadvisor/manager/watcher/rkt/rkt.go index d54c628886a0..4c54d9b94e07 100644 --- a/vendor/github.com/google/cadvisor/manager/watcher/rkt/rkt.go +++ b/vendor/github.com/google/cadvisor/manager/watcher/rkt/rkt.go @@ -53,7 +53,7 @@ func (self *rktContainerWatcher) Stop() error { } func (self *rktContainerWatcher) detectRktContainers(events chan watcher.ContainerEvent) { - glog.Infof("starting detectRktContainers thread") + glog.V(1).Infof("Starting detectRktContainers thread") ticker := time.Tick(10 * time.Second) curpods := make(map[string]*rktapi.Pod) @@ -92,7 +92,7 @@ func (self *rktContainerWatcher) syncRunningPods(pods []*rktapi.Pod, events chan for id, pod := range curpods { if _, ok := newpods[id]; !ok { for _, cgroup := range podToCgroup(pod) { - glog.Infof("cgroup to delete = %v", cgroup) + glog.V(2).Infof("cgroup to delete = %v", cgroup) self.sendDestroyEvent(cgroup, events) } } diff --git a/vendor/github.com/google/cadvisor/metrics/prometheus.go b/vendor/github.com/google/cadvisor/metrics/prometheus.go index 2c766d66489c..2dd7747b8341 100644 --- a/vendor/github.com/google/cadvisor/metrics/prometheus.go +++ b/vendor/github.com/google/cadvisor/metrics/prometheus.go @@ -226,11 +226,19 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo }, }, { name: "container_memory_usage_bytes", - help: "Current memory usage in bytes.", + help: "Current memory usage in bytes, including all memory regardless of when it was accessed", valueType: prometheus.GaugeValue, getValues: func(s *info.ContainerStats) metricValues { return metricValues{{value: float64(s.Memory.Usage)}} }, + }, + { + name: "container_memory_max_usage_bytes", + help: "Maximum memory usage recorded in bytes", + valueType: prometheus.GaugeValue, + getValues: func(s *info.ContainerStats) metricValues { + return metricValues{{value: float64(s.Memory.MaxUsage)}} + }, }, { name: "container_memory_working_set_bytes", help: "Current working set in bytes.", @@ -263,6 +271,51 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo }, } }, + }, { + name: "container_accelerator_memory_total_bytes", + help: "Total accelerator memory.", + valueType: prometheus.GaugeValue, + extraLabels: []string{"make", "model", "acc_id"}, + getValues: func(s *info.ContainerStats) metricValues { + values := make(metricValues, 0, len(s.Accelerators)) + for _, value := range s.Accelerators { + values = append(values, metricValue{ + value: float64(value.MemoryTotal), + labels: []string{value.Make, value.Model, value.ID}, + }) + } + return values + }, + }, { + name: "container_accelerator_memory_used_bytes", + help: "Total accelerator memory allocated.", + valueType: prometheus.GaugeValue, + extraLabels: []string{"make", "model", "acc_id"}, + getValues: func(s *info.ContainerStats) metricValues { + values := make(metricValues, 0, len(s.Accelerators)) + for _, value := range s.Accelerators { + values = append(values, metricValue{ + value: float64(value.MemoryUsed), + labels: []string{value.Make, value.Model, value.ID}, + }) + } + return values + }, + }, { + name: "container_accelerator_duty_cycle", + help: "Percent of time over the past sample period during which the accelerator was actively processing.", + valueType: prometheus.GaugeValue, + extraLabels: []string{"make", "model", "acc_id"}, + getValues: func(s *info.ContainerStats) metricValues { + values := make(metricValues, 0, len(s.Accelerators)) + for _, value := range s.Accelerators { + values = append(values, metricValue{ + value: float64(value.DutyCycle), + labels: []string{value.Make, value.Model, value.ID}, + }) + } + return values + }, }, { name: "container_fs_inodes_free", help: "Number of available Inodes", @@ -767,11 +820,19 @@ func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric) glog.Warningf("Couldn't get containers: %s", err) return } + rawLabels := map[string]struct{}{} + for _, container := range containers { + for l := range c.containerLabelsFunc(container) { + rawLabels[l] = struct{}{} + } + } for _, container := range containers { - labels, values := []string{}, []string{} - for l, v := range c.containerLabelsFunc(container) { + values := make([]string, 0, len(rawLabels)) + labels := make([]string, 0, len(rawLabels)) + containerLabels := c.containerLabelsFunc(container) + for l := range rawLabels { labels = append(labels, sanitizeLabelName(l)) - values = append(values, v) + values = append(values, containerLabels[l]) } // Container spec @@ -794,6 +855,8 @@ func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric) ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.Limit), values...) desc = prometheus.NewDesc("container_spec_memory_swap_limit_bytes", "Memory swap limit for the container.", labels, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.SwapLimit), values...) + desc = prometheus.NewDesc("container_spec_memory_reservation_limit_bytes", "Memory reservation limit for the container.", labels, nil) + ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.Reservation), values...) } // Now for the actual metrics diff --git a/vendor/github.com/google/cadvisor/pages/static/assets.go b/vendor/github.com/google/cadvisor/pages/static/assets.go index 0aa57596c3c1..156737f2bd43 100644 --- a/vendor/github.com/google/cadvisor/pages/static/assets.go +++ b/vendor/github.com/google/cadvisor/pages/static/assets.go @@ -15,12 +15,13 @@ // Code generated by go-bindata. // sources: -// pages/assets/js/bootstrap-3.1.1.min.js +// pages/assets/js/bootstrap-4.0.0-beta.2.min.js // pages/assets/js/containers.js // pages/assets/js/gcharts.js // pages/assets/js/google-jsapi.js -// pages/assets/js/jquery-1.10.2.min.js -// pages/assets/styles/bootstrap-3.1.1.min.css +// pages/assets/js/jquery-3.0.0.min.js +// pages/assets/js/popper.min.js +// pages/assets/styles/bootstrap-4.0.0-beta.2.min.css // pages/assets/styles/bootstrap-theme-3.1.1.min.css // pages/assets/styles/containers.css // DO NOT EDIT! @@ -90,22 +91,22 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _pagesAssetsJsBootstrap311MinJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x7d\x5d\x73\xe3\x36\xb6\xe0\xfb\xfc\x0a\x09\x33\x4b\x03\x31\x44\xcb\xdd\x93\xba\xb7\xa8\xa6\x55\x49\x4f\xdf\xda\x6c\x25\x99\xec\x4d\xdf\x4a\xd5\x6a\x34\x5b\x20\x08\x4a\xb0\x69\x52\x21\x29\x77\x7b\x2c\xed\x6f\xdf\xc2\x27\x01\x12\xb2\xec\x4e\xbf\xdc\x97\x6e\x91\xc4\xc7\xc1\xc1\xf9\x3e\x07\xf0\xd5\x37\xd3\x3f\x4d\xbe\x99\x7c\x5f\xd7\x5d\xdb\x35\x64\x37\x79\x78\x1b\x5f\xc7\xd7\x13\xb8\xed\xba\x5d\x72\x75\xb5\x61\x5d\x66\xbe\xc5\xb4\xbe\x47\xa2\xf5\xfb\x7a\xf7\xd8\xf0\xcd\xb6\x9b\xbc\x99\x5f\x5f\xcf\xde\xcc\xaf\xff\x3a\xf9\xf8\x89\x77\x1d\x6b\xf0\xe4\x87\x8a\xc6\xa2\xd1\x8f\x9c\xb2\xaa\x65\xf9\x64\x5f\xe5\xac\x99\xfc\xf4\xc3\x47\x35\x68\x2b\x46\xe5\xdd\x76\x9f\x89\xf1\xae\xba\x4f\x59\x7b\x65\xa7\xb8\xca\xca\x3a\xbb\xba\x27\x6d\xc7\x9a\xab\x1f\x7f\x78\xff\xe1\xe7\x5f\x3f\x88\x29\xaf\xfe\xc4\x0b\x08\xc4\x48\x05\xaf\x58\x0e\xd2\xb4\x7b\xdc\xb1\xba\x98\xdc\xfe\xef\x3d\x6b\x1e\x51\xb7\x6d\xea\x4f\x93\x8a\x7d\x9a\x7c\x68\x9a\xba\x81\xc0\x2e\xe8\xa2\x9d\xfc\x2f\xf2\x40\x7e\xa5\x0d\xdf\x75\x93\x86\xfd\xbe\xe7\x0d\x6b\x75\x3f\x80\x16\x97\xc5\xbe\xa2\x1d\xaf\x2b\x48\xd0\x13\xd8\xb7\x6c\xd2\x76\x0d\xa7\x1d\x58\x98\x0f\x93\x0c\xa2\xa7\x07\xd2\x4c\x48\x9a\xd7\x74\x7f\xcf\xaa\x2e\xa6\x0d\x23\x1d\xfb\x50\x32\xf1\x04\x81\x85\x1f\x20\x9c\xa5\x4f\xbf\xb1\xec\x8e\x77\x1f\x1b\x52\xb5\x5c\x8c\x90\x80\x4f\x83\x37\x1f\xaa\x1c\xe0\x9f\xea\x7f\xb9\x6d\x3a\xfb\x9b\x89\xaf\x7f\x77\xbf\xd5\x5e\xd7\x49\x3d\x68\xdb\x9d\x1c\xe6\xb8\x28\xea\x06\x0a\xe8\xe9\x84\x57\x93\x0c\xf1\x02\x3e\xd4\x3c\x9f\xcc\xa7\x69\x4a\xe2\xb6\x7b\x2c\xd9\x8a\xae\x51\xc3\xba\x7d\x53\x3d\xb1\x2a\x4f\xb2\x15\x5d\x1f\x17\xea\xc5\xf4\xfa\x48\xe2\xa2\x8a\xd9\xfd\xbe\x24\x1d\xf3\xa0\x48\x2d\xe6\x32\x85\x1f\x9a\x4e\xaf\x71\x9e\x76\x5b\xde\x2e\x08\x14\xff\xa1\xb8\xae\x18\x24\x71\xbb\xdf\xed\xea\xa6\x8b\x7b\xd8\x62\x56\xe5\xd8\x0e\x80\x9e\x68\x3a\x9d\x1f\xd1\x42\x0c\xc3\x52\xf7\xfd\xe1\x40\x60\x8e\xe2\xae\xe1\x9b\x0d\x6b\x4e\x8e\x85\x0c\xc4\x93\x96\x75\x1f\xf9\x3d\xab\xf7\x1d\x64\x38\x43\x58\xc0\x71\xc4\x04\x3a\x83\x86\x06\x49\x33\x88\x8e\xe8\x08\x35\x41\xe1\xd3\x74\x21\x60\xcc\xd2\x8b\x55\x4e\x3a\x32\xcb\x79\x7b\xcf\xdb\x36\x05\xa4\x64\x4d\x07\xd6\x17\x98\xf6\xe0\x53\xf4\x44\x20\x15\x48\x80\x80\x96\x9c\xde\x01\x9c\x49\x78\x62\x5a\xd6\x2d\x43\xc7\x05\x8d\x77\x4d\xdd\xd5\x82\x98\xd5\x3b\x0f\xa9\x96\x02\x29\x44\x4f\x85\xc5\x01\x90\x2d\xf3\x38\x6b\x63\x35\x2b\x8a\x1b\x76\x5f\x3f\x30\x88\x8e\x02\xb8\x3c\xd5\xd8\xc7\x2c\xcd\x63\xd2\x75\x0d\x04\x12\xd8\x8e\x34\x1b\xd6\x01\xb4\x60\x87\x03\xec\xbf\x6d\x1b\x56\x00\xd1\x98\x45\x11\x8b\x1b\xb6\x2b\x09\x65\xf0\x2a\xfe\x06\x2e\xd3\x3f\xaf\xfe\xf9\x8f\x76\xfd\xcd\x5f\xd0\x15\x06\x00\xa9\x0d\x2a\x52\x02\x19\x5a\x64\x51\x94\xc5\xbb\x86\x3d\xb0\xaa\xfb\x1b\x2b\xc8\xbe\xec\x20\xc2\x45\x5c\xb2\x6a\xd3\x6d\x0f\x07\x58\xa4\x79\xbc\x25\xed\xfb\x92\xb4\x2d\xd4\x08\x42\xcb\x3c\xc9\xe3\x1d\x69\x04\xdf\x20\xd1\xdc\xac\x2a\x4b\x49\xfc\xe1\x41\xb2\x93\x5c\x9f\xb3\x3c\x84\xb3\x98\xb7\x7a\x8e\x5f\xd4\x8c\x2c\x87\x48\xcc\xa1\x97\xae\x27\xe1\x15\x40\x38\xb4\xbd\x51\x54\x38\xb0\x14\x24\x67\x00\x2d\x8b\xe7\xc9\x93\xa2\x20\xdd\xc3\xeb\x6f\xe7\x28\xa1\x10\xa1\xe3\x42\xa3\x5b\xb0\x88\x84\x75\xd1\xff\xf4\x76\x52\xd3\xa6\xdc\x7b\x46\xe8\xd6\xa5\xc6\xf1\x96\x89\xcd\x82\xa0\x5f\xbf\xd8\xaf\xd1\x5b\xcc\x52\x21\xef\xa8\xea\x87\x30\x10\xf4\x59\x6d\x7a\xd1\x98\x45\x11\x5b\x65\xeb\x98\x92\xb2\x84\xb9\x20\x6e\xdc\x43\x17\xbf\xaf\xab\xb6\x6b\xf6\xb4\xab\x9b\x94\xba\x1f\xaa\xfa\x7d\x5d\x15\x25\xa7\x9d\xcb\x87\x7a\x01\xce\xf2\x72\xcb\x59\x46\x2a\x3a\x84\x6e\x37\x4f\x42\x3d\x23\x3b\x2e\x48\x7f\x44\xed\xaf\xe0\xb7\x9e\xa9\x70\x8e\x9e\x24\x22\xff\xc2\x94\xfc\x4d\x05\x93\x29\xbe\xaa\x77\xa2\x4d\x9b\x92\x98\x7d\xee\x58\x95\xc3\xa7\x23\xce\xe2\xbf\x7d\xf8\x8f\xef\xfe\xeb\xc7\x8f\xbf\xe2\x5c\x37\xe3\xed\x8f\x35\xc9\x79\xb5\x49\xa7\xd7\xc7\x45\xdf\x20\x7d\x2a\xd5\xfb\x8f\xec\x73\x97\x00\xfd\x10\xc7\x31\x10\xc3\xf4\xc0\xb7\xac\xfb\xb5\x23\x1d\x0b\x88\x40\x90\xf3\x96\x64\x25\xcb\x81\x16\x85\x16\x4c\xb9\xb5\x5c\x12\xe9\x6e\x2f\x38\x01\x3c\x90\x12\x24\x60\xdb\xdd\x97\x00\x17\x66\xdf\xd1\x22\xbb\x4c\x81\x00\x00\x60\x41\xdd\x42\x9e\xb1\xcf\x5d\x4f\x01\xf6\x15\xc0\xf9\x8a\xad\x05\x17\xc9\xff\x8b\x55\xb6\x3e\x1c\x5c\x34\xac\xb2\x35\xc2\x8e\x3c\x24\x62\x09\x9f\x1f\x5d\xe2\x03\xce\x82\x41\x9a\x66\x4b\x38\x44\xd0\x1c\xe7\x31\xc9\x73\xc5\x39\x14\x29\x99\x41\x31\x45\x28\xf1\x9b\x46\xd1\xa8\xef\x35\xce\x3d\xfe\xa4\x46\x52\x7d\x27\x07\x41\xe8\x88\x15\xd5\xcf\x91\x8f\xe0\xae\xde\x6c\x4a\x4f\x11\x28\x05\x3c\x9d\xe3\xcc\xc7\xaa\x22\xa4\xb6\x83\x5a\x20\xeb\x9e\x20\xdb\x77\x5d\x5d\xb5\x60\x7d\x81\x16\xbc\x80\x99\x16\x4b\x66\x9b\xfc\x31\x0a\x5e\xe5\x76\x5f\x16\xa0\x21\x39\xaf\x41\x9a\x4a\x7a\xdd\x41\x20\x40\x02\x28\x8a\xa0\x79\x41\xb7\x8c\xde\xb1\x5c\xbc\xf3\x07\x72\xc4\x1d\xed\xf8\x83\x10\x32\x24\x9d\x5e\x27\x99\x9e\x22\x36\xaf\x7d\xb1\x65\xde\x22\x4c\xa2\x68\x38\x09\x9e\x9e\x9b\x03\x39\xaa\x61\x4b\xaa\x0d\x03\xe8\x48\x86\xa0\x29\xc4\x0c\x7a\x2a\x09\x46\x95\x04\x53\x28\x5b\x38\xbf\x3d\x55\xf6\x85\x32\x4c\x8d\x04\x10\x2e\x52\x50\x67\xb7\x8c\x76\xbd\x88\xa2\x51\x44\x07\xc2\x4d\x37\xd7\xd2\x2d\x93\x23\xe2\x42\xc8\x37\xb5\x00\xb1\x2d\x4b\x43\x20\x10\x25\x54\xe8\x2c\xc3\x8f\x90\xf6\x62\x4e\x0d\xe4\xc9\xb9\xcc\xfb\x72\x5e\xd0\x69\x24\xd0\x73\x92\x4e\x0f\xd8\x8b\x3a\xe0\xd2\xe2\x3f\x53\xf5\x7d\x0d\xf0\x58\x5c\x10\x98\xc5\x4a\x2d\xa3\x05\x75\xb6\x36\xeb\x2a\x20\xf4\x1b\x4d\xa9\xa5\x70\x10\xcb\xb7\x08\x53\x3d\x23\x34\x38\x41\x78\xac\x89\x5f\x63\xcc\xf4\x70\x61\x3a\x16\xae\xda\x88\x8a\xff\xc2\xab\x9c\x53\xd2\xd5\x4d\x1b\xe4\x9f\x98\x92\xa6\xde\xb7\xac\x9c\xf5\x0d\xc1\x40\x30\x2b\x5c\xc6\x3b\xb2\x6f\x99\x96\x8f\x6d\xc9\xa5\xb4\x50\xc2\xa3\xea\x58\xf3\x40\x4a\x3d\x81\xa2\x53\xfd\xc0\x3b\x76\xdf\xa6\xd5\xbe\x2c\x31\xd8\xd6\x0f\xac\x11\x84\xe4\x0c\xae\x46\x1d\xd2\xbd\x40\xd3\xbd\x00\x4b\x18\x0d\x0d\xc0\x46\x08\xf6\x70\x28\x19\x84\xfa\x96\x25\x23\x0f\x6c\xd0\x92\x3e\xd2\xd2\xb4\xf4\x55\x86\x01\x39\xf9\x96\xbd\xc5\x72\xc0\x44\x83\x87\x3f\x35\x64\x97\x4c\xe7\xbe\x6c\x93\x23\x85\xac\x83\xec\x70\x80\x2e\x76\xa6\xd7\x46\x5d\xe9\x19\xa2\x88\x96\x8c\x34\x3f\xe8\x47\xe8\x7d\xf4\x11\xed\xf4\x99\x3a\x63\x5a\x19\x6d\xd0\xdc\xb2\xce\x0e\xe7\xad\xb7\x62\x9f\x3b\x2d\x9c\x83\xe3\x22\x63\x59\xbb\x4b\xdb\xb0\xee\x3b\xb9\x63\x3f\x54\x39\xfb\x1c\x60\xab\xc0\xae\x0e\x68\x48\x6c\xb2\x95\x92\xd8\xdd\x78\xb7\xaf\xb5\x20\x63\xba\xe5\x65\xde\xb0\x0a\x7a\x8d\x63\x2e\x00\x80\x6e\x97\x91\x86\x09\x28\x6f\x29\x6c\x34\x59\xfa\x6b\x81\xc8\xf8\x17\xd9\x8d\x3b\x8f\xd2\x2a\xb3\xeb\xc3\x61\x7e\x93\x2d\x95\x67\x95\xb8\x64\xbd\x1c\x52\x23\x83\x40\x7c\x12\x72\xc3\xf0\x0b\xf0\x3c\xa1\xb8\xab\x61\x86\x8e\x28\xc9\x85\x3e\xee\x37\x4f\x2c\x56\x90\x0e\x44\xfd\x04\x0c\x66\x37\xf9\x12\x88\xcd\x02\x09\x10\x22\x00\x60\x02\x1d\x00\x85\x09\x30\x58\xb9\x1c\xec\x45\xf4\x37\x37\x38\x1d\xec\x91\xa4\x8d\x89\x94\x38\x00\x69\x14\x44\x51\xd8\xec\x86\x03\x2d\x74\xce\x8f\xc3\x3d\xb7\xc1\xe9\x1c\x0d\x38\x20\x3d\xcb\x00\xfe\x5a\x05\xa4\xa7\xc8\xd0\x6c\xd0\x70\xd3\x18\x54\xf8\x1c\xa2\xad\x61\x0f\x5f\x30\x94\x42\xd2\xc0\x7c\x14\x9f\x06\x42\x57\x69\xd0\x17\xb0\x04\x4b\xe9\xe1\x90\xaf\xb2\xb5\x70\xb6\x7c\xa1\x89\x37\xa9\x02\x5d\x10\x0e\x28\x59\x21\x88\x42\x06\x6a\x00\xde\xba\x9f\x0a\xde\xb4\xe2\x5b\x49\xda\x0e\x60\xae\x5c\x76\x5e\xc0\x29\xb3\x66\x92\x78\xf2\x18\x5f\xc8\x32\x1d\x26\x58\x3c\xc3\xbb\x00\xad\xb6\x6b\x88\x8e\xbc\x80\x2c\x64\xaa\x04\xd0\x96\x4e\xaf\xa5\x1e\xba\xed\x3d\x40\x89\x21\x9f\x45\x9e\x1a\x26\x5c\xb1\xfc\xa3\xd4\x96\x09\x5b\xcd\xd7\x38\xe7\x0d\x93\x38\x4c\x36\x47\xcb\xa0\x61\x82\xbb\x45\xf8\x36\xe8\x44\x9a\x4d\x83\x3e\x48\x73\x5c\x68\x4d\xa2\x99\x6f\xa4\x03\x2d\xe1\xc3\xd1\x97\x17\x19\x7b\xf8\xb5\x92\x41\xe9\x6a\x02\xb9\x37\x57\x2f\x00\x57\x7c\x24\xb4\xd6\xda\x4d\xb7\x06\x7c\x6f\xf9\xa1\x53\x8e\xf2\x29\x6b\x53\xee\x09\x40\x4b\xc8\xfa\xe1\x32\x84\xc5\x46\xc4\x75\x51\xb4\xac\xfb\x8d\xe7\xdd\xd6\x75\x17\x36\x08\x33\xef\x29\x7f\x71\x40\x88\x79\x48\x5b\x65\x78\xb3\x8e\x6f\x6b\x5e\x41\x30\x11\x06\xef\x78\x41\x03\x57\x63\x65\x3e\xf8\x1d\x31\x77\xa8\xce\xf5\x8d\x9c\xa9\xf9\x98\x7a\xc6\x1b\x83\x8e\xc2\x69\x39\x15\x21\x60\x6f\xbf\xc9\x63\x2a\xc0\xeb\x57\x38\xcb\xf7\x0d\x11\x3f\x00\x12\x30\x50\x06\xe7\x78\x76\x8d\x10\x4a\x60\x7e\x82\x42\x58\x68\x9d\x03\xd6\xc1\x61\x7a\x0f\x40\x8c\x2c\x49\x6b\x3d\xa2\xf4\xbb\xe7\x03\x98\xd6\x0b\xef\xe9\x6b\xf8\x01\x3d\x1c\xb8\x38\xe9\x9f\x6b\x17\x18\x07\x3d\x05\x24\xe4\xdb\x30\xca\x41\x97\x34\x29\x94\x44\x1d\x78\x12\x3d\x0f\x8d\x7d\x89\x6a\x7f\x9f\x29\xfb\xd1\x8c\x22\x2c\x02\x48\x51\xb2\x59\xb2\xd5\x66\x0d\x51\x52\x38\x26\x14\x1b\x6a\x60\xeb\x65\x98\x49\x42\x7e\x86\xfd\x76\xde\xd3\xb0\x88\x3e\xeb\x6b\xd8\x41\x47\xde\x86\xc4\xc1\x1a\x4f\x9c\xa7\x59\x57\x87\xbc\x0e\xec\x6e\x11\x81\xc1\x18\xa1\xf4\x3d\xfc\x08\x21\x12\xbe\x69\x28\x3c\x78\x69\xc2\x83\x83\x9d\x65\x66\x3b\xcd\xbe\x8a\x1d\xf4\x66\x33\x50\x02\xb4\xd8\x44\x11\xec\x71\x2e\x8d\x5f\x66\x57\x0b\x0b\x84\xe1\xc9\xbe\x48\xec\x50\x80\xd0\xc4\x96\x6e\xc2\xbe\x11\x26\xf0\x13\xaf\xf2\xfa\x93\x42\x6f\x59\x93\xdc\x93\xb5\xc4\xc4\x13\x1a\xa1\xaa\x81\x1d\x73\x7d\x81\x82\x44\x9f\x19\x8c\x2e\xb2\x1e\xe8\xcc\x2c\xfb\xf8\x65\xde\xd8\xd7\x08\x75\xf5\xf2\x47\x88\x0b\xe9\x3c\x0d\x7c\x26\x61\x48\x5b\x25\xa6\x1e\xf5\x62\x06\x8d\xd0\xc0\x15\x50\xce\xa7\x16\x29\xc6\x21\xf7\x7d\x23\xf5\x76\xe4\x02\xe5\xfc\x9e\x55\x2d\x77\x03\x0c\x26\xc2\x73\x4a\xfb\x7c\x12\xba\x05\x58\x25\x4f\x96\xfa\x4d\x02\xb6\x4c\x5a\x38\x03\x13\x6b\x5b\x7f\x72\x07\xb7\xe6\x8c\x87\x0f\xe3\x1c\x05\xe6\xe3\xc2\xd3\x36\x5b\x6b\xcd\x92\x6d\xfd\x49\x32\x61\x5d\x96\x64\xd7\x32\x20\x63\x4b\x61\x01\x9c\x21\x3c\x0d\x87\xad\xfd\x18\x94\xd9\x00\xf7\x49\x1b\x10\x37\x93\x78\x47\x2a\x56\x4e\x6e\x26\xb1\x80\x47\x4c\x26\x04\xa1\x17\xcb\xca\x53\xea\xd0\xbe\x07\x58\x1e\x45\xb9\xbf\x60\x63\xc0\x51\xdb\x12\x82\xad\x54\xeb\x38\x3f\x1c\x42\x03\x61\x41\x32\x2a\xaf\xa0\xad\x3e\xbb\x79\x10\x2d\xfc\xa5\x7b\x4a\xac\x07\xc5\x51\x62\xfa\xa5\x90\xe0\x68\xc5\xd6\x70\x1e\xa4\xd2\x6b\x9d\x66\x70\xf6\xef\xfc\x44\x72\xcc\xf1\x54\x6c\x22\x30\x27\xe6\x02\x64\x2f\x84\x45\x68\xc2\xf9\x49\x25\xba\xad\x3f\x55\xfe\x8e\x1f\xa5\xa1\x1c\x32\x61\x8c\x6d\x5b\xa8\x90\xbb\x12\x07\x62\x25\x9b\x94\xc4\x94\xdc\xb3\xf2\x3d\x69\x19\x5c\x81\x96\x36\x75\x29\xf4\x92\xb1\x4c\x66\x00\x0d\x71\xf9\xac\x9d\x64\x03\xb9\x26\x74\x11\xb4\x42\xde\x7e\x3b\x97\x0b\xf7\x06\x5e\xcd\xd7\xab\xcd\x1a\x1d\x8f\x3e\xbf\x6c\x3d\x8f\xe4\x34\xbf\xbc\x8a\x5d\xb6\xc6\x88\xff\x7a\xec\x72\x92\xf8\x56\x74\xb8\x50\xba\x86\x08\xf5\xc6\xe9\xff\x94\x62\x62\xb0\xd1\x61\xca\x3c\x49\xc8\xe3\x5c\xd3\x49\xea\xcd\x47\xd4\xfb\x42\x92\xdb\xf2\x3c\x67\x03\x9a\x7b\x15\xc1\x83\x3e\x17\x1a\x22\x20\xe5\xef\x8c\x50\x35\x3f\x93\xab\x35\x24\x97\x9f\x25\xb9\x24\x77\xe8\x7f\x48\x67\xe3\xc0\xbe\x68\xb6\x7a\x8e\xac\x96\x4a\x42\x25\x92\x19\x81\xf0\x2d\x3d\x83\x55\xaf\x7a\xe1\x3d\x7d\x15\x83\xd5\xe2\xf3\x8b\x0d\xd6\xc5\x94\x45\x51\x61\x15\xa5\x5a\x41\x9a\xd2\x28\x82\x34\x9d\x52\x84\x07\x16\xab\x15\xba\x63\x8b\x75\x64\xf7\x46\x11\x93\x24\xde\x9b\xa2\xba\x77\xd0\x14\x35\xdf\x5e\x60\x8a\x1a\x14\x9e\x37\x45\xcd\xa0\x27\x02\xdf\xa9\x69\x70\xd6\x06\x3d\x61\x81\x8e\x0d\xb7\x2f\xb1\x4a\xb1\xca\x57\xe3\x4d\x5a\x04\x37\x77\x9b\x6e\x96\x26\x90\x9e\xd8\x0d\xe5\x3e\x50\x4a\x2f\x03\x84\x6f\x53\x1e\x45\x04\x72\x69\xb0\x6e\x7c\xa6\x3e\x1c\xe0\x6d\x14\xdd\x2a\xed\x7d\x11\x46\xc5\xca\x19\x2e\x05\x17\x97\xfc\xf2\x42\x1a\x95\x55\xdd\xc1\x3c\xc4\xce\xb9\xd0\xcd\xab\x62\xc4\x14\xa6\x25\x48\x80\x23\x1d\xc0\xda\xeb\x89\x70\xd1\x6b\xfa\xed\x0b\xad\x50\xa7\xf0\x25\x13\x96\x70\xde\xd7\x17\x60\x81\xc9\x13\x4c\x44\x21\x31\xb9\x68\x96\x0e\xa2\x35\x92\x92\x16\x6e\x39\x40\xbd\x63\x95\xcc\xa9\xe5\xa1\x0a\x00\xa3\x39\xf2\xa6\xde\xe5\xf5\xa7\x0a\x60\xf6\x4c\x15\xc0\xc0\x7d\x56\x43\x87\x24\xaa\x37\xdc\x11\x1d\x9d\x02\x0b\x1b\x03\xce\x82\x15\x13\x54\x52\x5e\xe6\x57\x4c\x50\xc1\xc9\x57\x7f\x5e\x7d\x37\xfb\x3f\x64\xf6\xaf\xf5\x55\xdc\xb1\xb6\x83\xf4\x24\x45\xfa\x65\x14\xb9\xe8\x2d\xec\x79\x23\xad\xa5\xb9\xa6\x4c\xbb\x65\x9e\x64\x36\xc6\xad\x8b\x3a\x80\x05\x7f\x96\x11\x7a\x27\x1e\x84\xa0\xf0\x59\xce\x34\x11\x2c\xe7\xc5\x78\x09\xcc\x06\xec\xdb\x23\xc3\x31\xe0\xd1\x71\x51\x3c\x23\xae\x73\x85\x25\x66\xbd\x1d\x15\x34\xe4\x2d\x04\xb1\x49\x7c\xe3\x49\x62\x73\xe0\x5a\x79\x17\x29\x35\x1c\x38\xa4\x00\x99\x9c\x85\x08\x4f\x37\xe8\x09\xd4\x55\x57\xef\xe9\xb6\xed\x48\xd3\x01\x5e\x4d\x6c\xbd\x95\xf9\xa1\x2b\xae\xa2\x68\x5a\x38\x69\xb1\x8a\x3c\x64\xa4\x99\x55\xc4\x0b\x47\xc3\x8b\x77\x39\x7f\x98\x50\x31\x5b\x0a\xc6\xb8\xbb\xba\xb9\x40\x31\xaf\x5a\xd6\x74\xdf\x15\x1d\x6b\x2c\xf9\x7a\xf5\x3a\x6a\xaf\xb6\x61\x82\xe6\x05\xec\x0b\x58\xf2\xb1\x9f\xd0\xa3\x78\x8b\x84\x17\x1c\x34\x6f\xb4\x49\x5e\xf8\x19\xda\x21\x11\x5b\x4b\xd4\x1d\x13\xb3\xb8\xa8\xe9\xbe\x85\xe8\x68\xeb\xb5\x8e\xd8\xdd\xc0\x3b\xf6\x28\x1a\x7b\xa4\xc0\x0b\x78\x05\xdf\xfe\xfb\xe1\xaf\xf3\xc3\x9b\x7f\x43\x9a\x6a\x33\xd1\xf4\x7d\x9d\x33\x34\x50\x8a\x2a\x7b\x3e\xaa\xf3\xc9\xe2\xb6\xab\x77\xbf\x34\xf5\x8e\x6c\x88\x92\x03\x78\x9a\xbf\x88\x12\xf2\xd3\x94\x30\xdd\x1c\x0e\x9b\x28\x7a\xf3\x6f\x69\xda\x03\xa4\xd9\x43\xbd\xfc\xb4\xe5\x74\x2b\x94\xaa\x14\xb2\x0c\x19\x04\xe0\x3c\x96\x5b\x06\xcd\x86\x81\x49\xc9\x13\x21\x55\xe3\x9c\x3f\xf0\x9c\x35\x28\x79\xe0\x2d\xcf\x4a\x36\x21\x00\xf3\x54\x8f\x00\x56\x4d\x5d\xb2\xf4\x9e\x55\xfb\x35\xb8\xdc\x5e\x02\x3c\x51\x6f\x4a\xde\x76\x59\xfd\x59\xbc\x94\xa0\x71\xcf\xeb\xba\x4d\xb9\xce\x31\xf1\xb8\xe0\xa5\x20\x1f\x90\x48\x50\x04\x7b\xbf\xfd\x77\x07\xfe\x28\xba\xbd\x99\x47\xd1\xed\x6c\x86\xff\x3a\xf7\xdf\xbf\xe3\x36\x81\x14\x45\xb7\x97\x97\xf8\xff\xdd\x0a\x1d\x92\xce\x11\xe6\x31\xfb\x1d\xde\xda\xe5\x1d\x8f\xc7\xa3\xf5\x25\x8a\xca\x52\xc1\xc2\x7b\x7a\x55\xcd\x11\xb5\x0a\xd8\x73\x21\x2d\x7d\xa1\x85\xef\x12\xf6\x84\x97\x4b\xeb\xa4\x78\xae\xf2\x28\xb7\x95\x47\x4e\x4a\xde\x8c\xe0\x59\x28\xc5\xe0\xdb\x79\x0b\xc5\xae\x76\x73\xce\x42\xb1\x83\x3a\x55\x48\xe7\x9b\xf4\x82\x76\x52\xd4\xcd\xbd\x63\xbc\x10\x59\x3b\x38\xa4\xfb\xe3\xf9\x21\x19\x1e\x4b\x55\xd5\x4b\x33\xe8\xa9\x7e\x3d\x3d\x4a\x0a\x1d\x11\x67\x88\xd9\xff\x58\xe2\x7f\x90\xa9\x0f\xd5\x01\x18\x11\xaa\xb3\x4f\xed\xaf\x42\x36\x05\x62\x4b\x42\x2b\x77\xa3\x84\xbc\x4e\x8e\xdc\xd7\x39\x29\x67\xb4\xae\x3a\x69\x58\xc5\x65\x4d\x72\x18\xe8\x8e\x03\x55\x4b\x27\x5c\x27\x31\x84\xaa\x8b\x94\x83\x03\x53\x5e\x34\x08\x4a\x19\xf0\x93\xe9\x1c\xdf\xb1\xc7\xac\x26\x4d\x2e\x7e\x0b\x11\x3b\x0a\x55\x0d\x35\x20\xf1\x18\x6b\xe5\x62\x60\xe8\xa8\x90\x61\xda\xcf\x8b\x49\x0d\x93\xce\x23\xe5\xa1\xd6\x30\xcc\x7b\x65\xc7\x61\xa4\xc0\xea\x1f\x5b\xdc\x26\xa1\x11\x66\xd1\x89\xa2\x49\x6f\xdb\xa6\xda\x13\x65\x2d\x25\xbb\x3e\xcf\xe5\x96\x4f\x28\xca\xd6\xa5\xae\x0e\x68\xc3\x1a\x58\xf5\x7a\x7d\xe1\x57\x4e\x08\xa4\xe8\x7d\x50\x63\x1b\xfc\x07\xfc\xb0\x60\x42\x8a\x86\x3c\x43\x55\xc2\xb9\x70\xbe\xd9\x92\x00\x53\x80\xea\x7c\x23\xbb\x1d\xab\xf2\x8f\xb5\x15\x14\x71\x56\xe7\x8f\x08\x3b\x4d\x04\xda\x21\x8a\x55\x6c\xe6\x63\xbd\x83\x73\x84\x73\x77\xf2\x61\x9e\x8b\x86\xc2\x08\xc2\x36\xd7\x46\x22\x69\x38\x99\x29\xdb\x13\xe0\xe9\xb5\x98\x8c\x55\x45\xdd\x50\xf6\x1f\x4a\xa6\xeb\x6a\x67\x6f\xe3\xab\xe7\x77\x3e\x5f\xd2\x13\x8c\x94\x73\x52\xd6\x1b\xf0\x8a\x8a\x6b\x67\x20\x05\x8f\xa5\x24\x76\x32\xb1\xf5\x76\x3e\x47\xc9\x99\x9e\xe8\xb9\xc8\x52\x86\x9e\xc2\x75\xc3\x01\xe3\x5f\x33\xf1\x89\x40\x49\xe6\x93\x7b\x14\x9d\x08\x1f\xf5\x65\x89\x9a\xde\xaf\x07\xf4\xee\x69\x8f\xa2\x80\x40\x2e\x8a\x57\xa7\x41\x18\x85\x81\x82\x3b\x3e\xd7\xc3\x9d\xe0\x9e\xd7\xa6\x5f\x75\xcd\xf2\x17\x44\x09\x2d\x17\xfe\x24\x26\x7e\x3e\x7e\x23\xf6\xd7\x6f\x0f\x87\x1b\xea\x12\x71\xea\xa5\x4c\xce\xe2\x51\x4a\x93\xd1\xeb\xb1\x84\x27\x03\x11\xbf\x9a\xaf\xd3\x34\x25\xba\x52\x4e\x17\xb9\xba\x38\x82\xe6\x5b\xcf\xfd\x03\xa5\xa3\xcd\x28\xa3\x10\xfc\x15\x49\x52\x18\x05\xec\x2c\x65\x79\x5a\xc9\x68\x8c\xd1\x4e\x48\x5d\xbe\xdf\x05\xe4\x64\x68\x7d\xc2\x9a\x25\xc6\x9a\xb5\x18\xef\x21\x4c\x7c\x59\x3e\x98\x4c\xa0\xf7\xc4\x6c\x01\xf6\x93\xfb\x18\x4e\xb5\x0c\x74\x89\x82\xe1\xb4\x98\x26\x9a\xf6\xbf\x37\xdf\x04\x15\x3f\x17\xc3\x34\x30\x0d\xa0\xf2\x47\x19\x07\xfa\xcd\xdc\x86\x1d\xcc\x73\x1f\x88\x18\x58\x22\xc2\xf8\xf0\xa7\xc8\x46\x83\x67\xe1\xe2\xdf\x11\x8b\xa9\xff\x13\x00\x6c\xb4\x3a\x4c\x09\x66\x86\x33\xda\xab\x8f\x79\x5b\x90\x06\xbe\xa9\x92\xe0\xe6\xeb\xe4\xe2\x92\x5e\x5e\x80\x89\xf4\x4f\x4f\x29\xae\x97\x2b\xe9\x10\xf1\x19\x6e\x91\x4c\x45\xf7\x8d\x50\x9c\x4a\xcb\x44\x11\x04\x6d\x47\x3a\x4e\x87\xc5\x95\x06\xbc\xe5\x90\x33\x15\x6f\xf5\x31\x5f\xf7\x9b\x23\x4c\x9c\xa0\xb0\x25\x72\xa1\x60\x7d\xd4\x0c\x95\xec\x60\xfb\x7d\x45\x8b\xa7\x99\xf1\x9e\x0d\x3b\xda\x96\xcf\x4a\xc6\xec\xb9\x93\x1d\x19\x44\x47\x56\xb6\x6c\x1a\xd8\x7a\x3b\xfe\x12\x06\x09\xf3\xfc\x31\x94\x17\x89\xf7\xaf\xb3\x0a\x94\x08\x55\x3b\x88\x97\x4b\xaa\x58\xf4\x3f\x07\xd9\xe6\x73\x4e\xa3\x8d\xfc\xe0\x22\x75\x72\xee\x46\xa5\x6d\x4e\x05\xca\xd9\x99\x40\x79\x71\x38\x8c\x86\xc3\x85\x1b\x05\xdf\x04\xa3\xe0\xcb\x62\x45\xd7\x30\x47\xc9\x46\x1a\x70\x51\x54\x28\x43\xce\x39\xef\x22\x07\x0b\x05\xc5\xd5\x87\xf3\xfe\xa6\xc2\xd3\xd9\x70\xb8\x1a\xae\xf7\xdd\x06\x07\x12\xac\x71\x1c\x2a\x02\xef\xfd\xf0\xc1\x91\x2c\x02\x69\x38\x36\x2e\x83\x85\xe7\x8a\x31\xc6\x3b\xd4\x47\xba\xfb\x8d\x52\x7e\x56\x32\xbd\xfa\xb3\x8e\x05\xe5\x28\x8a\x72\xa7\x7c\x83\x9a\x3a\x86\x05\x95\x01\x1e\x02\x50\xd0\x86\x63\x6a\x16\x93\x96\x54\x25\x6d\xd2\x27\xf2\x0d\x4f\x31\x86\x09\xc6\x00\x19\x2d\x35\xea\x59\x55\x66\xf8\x08\x1e\xf8\x43\xc0\x52\x47\xc8\xf4\x50\xe2\xd1\x91\x13\x4a\xb6\xaa\x08\x93\x76\xd4\x87\xda\xe9\x65\x43\x7a\x0c\xee\x8f\xfa\x05\xfe\x36\xc1\x99\x49\x0b\x3e\xee\x98\x27\x69\xd5\x03\xab\x64\xdc\x4c\x3d\x74\xaa\x48\x4d\x3d\xc8\xda\x72\x75\xfc\xc8\xf7\xd0\x7b\x07\x9c\x57\xbc\x83\xa0\xab\xeb\xb2\xe3\x3b\x80\xc5\x64\xbe\x0b\x4c\x2a\x7e\x2f\xa3\x17\xc2\xef\x95\x34\x24\x46\x48\x40\x57\xef\x00\x6e\x59\xc9\x04\x9f\x24\xc2\x54\x66\xf7\x3b\x21\x61\x12\x4f\x61\x99\x91\x6f\x02\x2f\x67\xa4\x69\xea\x4f\xe0\xe6\xdd\x55\xce\x1f\x82\x0d\x78\x55\xb1\xc6\x36\x90\xff\x5e\x60\x6d\x3b\xe8\xd2\xf9\x89\x0a\xa5\xe1\x8e\x77\x25\x4b\x00\xc0\x39\x2b\xc9\x63\x32\xc7\xdb\xee\xbe\x14\x70\xd1\xba\xea\x08\xaf\x98\x00\xd2\xd7\xfb\x62\xed\x5e\x5c\xc3\x16\xd1\x18\x94\x1a\x8f\x57\x62\x3e\x1b\x85\x39\x86\x05\x36\xf2\x61\xc3\xba\xbf\xab\x67\x98\x23\x7b\xa6\xd6\xdf\x38\x63\xff\xc4\xed\xae\x14\xf8\x9f\x00\xc5\x83\xca\x22\x5d\x14\xb3\xd9\x42\x71\xfc\x26\x65\xab\x62\x2d\xec\x02\x1d\x66\x4e\xd3\x0d\x3a\xa5\xd1\xc1\xa5\x05\xd6\x8f\xae\x98\x6d\xf2\xed\x7c\xc5\xde\x5a\xbf\x2e\x84\x1e\x9b\x88\x79\xee\x49\xb5\x27\x25\x98\xa6\x1b\x05\xc2\x36\xb5\x47\x28\x36\x4b\xf7\x7c\x44\x62\xec\x73\x80\xf9\xb8\x8d\x3a\x19\xa1\xdb\xd4\xfb\x0e\x8c\xaa\x16\xe0\xf6\x12\xbc\x16\x66\x39\xb3\x17\x22\x70\x07\xe4\xaf\x1f\x50\x82\x69\x4c\xfd\x63\xb0\x83\xd2\xb4\xff\x37\x54\x45\xe5\xb6\xc7\x4f\x96\x30\x35\x0a\x7b\xf6\x00\xe0\xa8\xed\x9b\x82\x7f\xfe\x28\x28\x15\xa2\xd1\x79\x08\x2d\x1d\xdb\x80\x72\xe9\x19\x72\xd4\x4b\xd3\x5a\xb0\x4a\x7f\x0c\xab\x33\xcd\x28\x7e\xa3\x45\x78\x86\x70\x16\x4b\x26\x8a\xa2\x51\xf5\xa3\xfd\x02\xf5\xaf\xf4\x49\xc6\xc2\xf4\x13\x16\x22\xdc\x3c\x1c\x11\xce\x02\x8b\x2c\xd9\x86\x74\x6c\x04\xb6\x29\x09\x79\x3a\x62\x9a\x8e\x61\xf5\x4a\xb6\xcd\x5e\x44\x11\x51\xa6\x87\xf7\xd6\x09\xc2\x0a\x86\xa6\x2b\xb2\x9e\xa6\xb9\x00\x79\x45\xd6\xa9\x50\xf7\x43\xb8\x24\x55\x05\xac\xff\x6c\xc2\xab\xb6\x23\x15\x15\x4b\x97\x73\xd0\xde\x36\x58\x66\x09\x81\x99\x6f\x13\xa3\x95\x25\x3e\x5d\x63\x32\x5e\x32\x44\xa8\xd7\xb5\x0e\xb5\xda\x25\xca\x83\x0b\xa6\xd8\x98\x1a\x89\x2e\x74\xab\x23\xcf\x85\xe1\x88\xa9\xa5\x55\xbd\x2b\x83\x17\xd2\xc0\x91\x55\x1c\xfd\x38\x69\xb8\x92\x59\x8c\x97\xa6\xee\x14\x62\x34\x15\xe9\x3a\x0e\x27\x92\xef\x11\x4a\xfa\x06\x2e\x36\x25\x4b\xfd\x37\xc3\xa6\x10\x50\xe7\xd1\x29\xa8\xfb\x85\xe8\x14\x03\x8e\xf1\x69\x1c\xf7\xd0\xc0\x12\x9f\xa6\xc1\x73\x05\x8a\x27\x2a\x0d\xbd\xc5\x1b\x47\x52\x78\x0d\x2a\x6e\x0e\xcd\x09\x54\xad\xdb\x64\x72\xef\x64\xbc\xec\x44\x7d\x95\x76\xa1\xc6\x47\xa0\x3a\xbe\x33\x65\x56\x2d\xeb\xec\x94\xbe\x04\xb6\xa6\x84\x30\x48\x7b\xbb\x4b\xc7\x67\x95\x96\x04\x66\xa1\xbd\xcc\xf1\xc6\xb0\x16\xc8\x32\xfc\xba\xf7\x1d\x71\xbe\x9a\xaf\xf1\x09\x37\x73\xd4\x0f\x17\xe9\xd5\x3f\xda\x25\xd9\x77\xf5\xf2\x1f\xed\xf2\x8a\xcb\xc4\xa3\xb4\x71\x99\xaa\x38\x66\x69\x7f\xe3\x41\x21\xcc\xe6\xc3\x41\x1a\x41\xb2\x66\x99\x75\x42\x0c\x21\x59\xc8\xff\xd4\xd5\xbb\x64\x8e\x4b\x56\x74\xc9\x1c\xe7\xbc\xdd\x09\x83\x04\x64\x65\x4d\xef\xc0\xd1\xb1\x38\xd9\x00\x3f\xd6\x52\x59\xe6\xbd\x37\x1f\x6e\x81\x92\xdc\x4b\x48\x7b\xcb\x34\x79\x4d\xc3\x32\xbf\xd4\xca\xe3\x53\xb5\x2a\x43\xbf\xf9\xd6\x7d\xa5\x4a\xdf\x04\xf9\x68\xed\x7f\x37\x08\x81\x98\xd0\x39\x2e\x53\x86\xef\xd3\x53\x69\xf7\x3e\x38\x7e\x38\x78\xd6\x71\xff\x01\x57\x29\x10\x6f\x86\xd1\x83\x1e\x0b\xaa\xec\x3a\x96\x46\xa0\x84\x35\xb9\x8b\xeb\x7d\xa7\x1f\x20\xc2\xf5\x6b\x46\x50\x4b\x33\x43\xa8\x27\x88\xf0\xee\xdc\x18\x73\xd1\x45\x22\x47\x26\x0c\x8a\x6e\xc1\x44\x97\xae\xab\xef\x41\x9a\xb2\x28\xda\xc6\x5d\xbd\xbb\xdc\xc6\xaa\xba\xf8\xf2\x76\x76\x7f\x53\x2f\x25\x69\x28\x2b\xb9\x6f\x34\xbb\x9f\xdd\xbe\x9b\x2f\x4d\x6f\x73\xe2\x4a\x37\x90\x0f\x97\xfc\xa6\xb2\x07\xb2\xe4\x7f\xfa\xab\xf8\x3d\xe3\xef\x76\x4b\xdd\x29\x61\x83\x63\x2c\xa5\x47\x59\xb2\x12\xe4\x77\x4b\x02\xef\x49\x49\xa5\xff\x9f\xff\x5d\x2d\x85\xe1\x2d\xe6\xf8\x56\xf3\x2c\xd9\xed\xca\xc7\x5f\x0c\x2f\xc0\xdf\xb1\x21\x4d\x47\x46\x0a\x9f\x41\x12\x56\x93\x86\x73\x03\xe3\x02\x04\x70\x49\x95\x3c\x3a\x2e\x9e\x0b\x71\x74\x7c\x37\x0e\x6f\x9c\x39\xff\xd3\x3c\x17\xd1\x68\x20\x1a\x14\x13\xfa\x0b\x0c\x1d\xa8\xc3\xcc\x91\x64\xc2\x18\x1f\xf2\xc9\xc6\x7d\xa5\x4b\x44\xb7\xe9\x8e\x34\x2d\xfb\xa1\xea\x20\x53\x87\x78\xee\x49\xb3\xe1\xd5\x4c\x09\x86\xeb\xb9\xe0\xb8\x13\x4d\xe4\xe6\xca\x36\x0b\xde\xfe\x4c\x7e\x86\x5b\x14\x45\x70\x2b\x33\xfa\xf2\x99\x8b\x67\x2e\x9e\x33\x41\x3c\x69\xa6\xe8\x0c\x67\x92\x16\x52\xf5\xdf\x25\xc7\x44\xc3\x24\x24\xaf\xd9\x5d\x21\xfa\x7a\xe3\x4f\x18\xe9\x9b\xc4\x75\x38\x59\x2f\xa9\x7e\x22\xdd\x36\x6e\xea\x7d\x95\x43\x22\x66\x40\x4a\x72\x79\xaf\xc5\x1b\xe1\xbd\x1e\x85\x75\x38\xf7\x4f\x1b\xc9\x72\x73\x55\xe8\x30\xc2\xd9\xdd\x18\x67\xd2\x97\x51\x6c\x41\xa3\xe8\x6e\x9a\x0a\xd9\x2a\x3d\x2d\x77\x95\x9b\xd9\x1d\xc2\x57\x8a\x4f\x0e\x5d\xbd\xb3\x05\x53\x6a\xb7\xca\x74\xbe\x50\xeb\x7f\x37\x8f\x22\x58\xa6\xb3\x37\xdf\xa8\x67\x83\x9d\x39\x66\x86\x6b\x33\x84\x5f\x06\x9a\xa6\x79\x2d\xe4\xbf\x13\xce\x29\x2c\x67\xc5\xe5\x2d\xbe\xc5\x8a\x17\x55\xb8\x6f\x32\x6e\x76\x37\xdb\xe0\x3b\xac\xf4\xc1\x22\x8f\x22\x67\xf2\x61\x38\xbb\xef\xe5\xf9\xf8\x36\xab\x2e\x9d\x62\xad\x4a\x28\x26\xcb\x6f\xe7\xdf\xc0\xeb\x19\xb9\xca\xd0\x25\xf8\x1f\x20\x01\xa3\x23\xa2\x56\xdf\x9e\x38\x22\xa1\x28\x3a\xb3\xb2\x40\xfb\x1f\x0b\x62\xd2\x82\xbe\xb7\xad\xad\x2e\x23\x08\x85\x1b\xbd\x54\x37\x8f\x24\xa0\x63\x9f\x3b\xb0\x16\x18\x25\x7e\x94\x43\x30\xed\x84\x57\x93\xae\xde\x4d\xd4\xae\x4d\x04\xc2\x26\x4a\x56\x3d\x9b\xe7\x73\x2e\x2f\xca\xb4\x21\x3a\x1d\x18\x4e\xbd\x82\xc5\x21\x79\xd3\x47\x69\x7a\x81\x73\xc2\x44\xc1\x6c\x9c\x3c\x0c\x59\x8d\x61\xc3\x88\x09\xca\x7f\xf6\x64\x68\xfe\xca\xe8\xee\x97\x88\xbe\x73\xc1\xdc\xa0\xe4\xd6\x07\xf8\xbc\x7d\x30\xbe\xe8\x99\xa3\x35\x0b\x48\x74\x60\x51\xc6\x58\xa4\xe1\xa3\xa3\xab\x53\x63\xa3\x11\x37\xf4\x58\x37\x7c\xc3\x2b\x52\xce\x74\x7b\x24\xfc\xb4\xd3\xdf\xf1\x78\x78\x9b\x27\xd5\x2d\x46\x44\xdf\xdb\xb5\xa7\x0e\x5a\xf7\x84\x3e\xf2\x41\x8d\x3d\x34\x36\xaa\x87\xd6\x62\x5f\x1d\xdf\xfb\xd1\x01\x0b\x35\x13\xa3\x7e\x2f\x64\x25\xaf\x36\xef\x4b\xce\xaa\xee\x3f\x19\xed\x96\x27\xde\x43\x94\x3c\xc9\x23\x49\x49\xe6\x89\x24\x65\x40\xd8\x97\x4a\x28\x1d\x87\x61\x0e\x6d\x8a\x8c\x97\x35\xd4\xf1\x03\xf1\xd2\xc7\xec\x7b\xeb\x85\x2c\xa5\x0a\x50\x32\x37\xd3\x16\x8c\x92\xff\x5a\xbd\x64\xb1\x84\xf4\xea\xcd\x8c\x5e\xbd\x39\x1a\x83\xc6\xed\x37\xcb\x9f\xed\xa0\x6d\x98\xe0\x4c\x57\x6f\x66\xf9\xd5\x1b\xb7\xfb\x8c\x1e\x93\x17\x35\x34\xf3\x1c\x47\x68\x08\x93\xf4\xf0\x32\x20\x13\x64\xd0\x62\xce\x6e\xb4\x5f\xd4\x3b\xa4\xe4\xc3\x01\x06\xb6\x5f\x38\x83\x5d\xc9\x96\xfa\x7f\xe5\x82\x64\xd2\xd9\xd0\xaf\x86\x37\x4a\xf0\xdd\xc9\x1b\x2f\xc4\x37\xfb\xeb\x70\x18\x1c\xaf\x33\x41\xd6\xc1\x80\xc4\x57\x27\xc3\x4b\x34\xe4\x57\xe7\xb7\x4e\x12\x4b\x89\x38\x54\x02\x2a\x26\x3b\x18\xff\x81\x94\x3c\xf7\x6e\xb1\x1a\xa7\xdd\xb5\x6b\xf0\x73\x9d\x33\x53\x38\xe4\xe6\x88\x03\xb1\x67\x13\x51\x53\xe7\xc6\x06\x11\x19\xe1\xa4\x8e\xa6\xeb\xe3\xb2\xc3\x53\x82\xed\xf3\xed\xaf\x43\xa5\x5a\x1f\xf4\xd7\x93\xbd\xdc\xa7\xe7\x6b\xbd\xfa\x20\xc7\xf2\xeb\x86\x31\xa4\xbb\xba\xa0\x7a\xaf\x06\x95\xfd\x54\xc5\x5a\x20\x15\x84\x26\x83\x58\x90\x0e\x10\x99\xb3\xb6\x6b\xea\x47\xcf\x64\x77\x23\x22\x6e\xd2\xc0\x68\x0e\xb9\x6b\x83\x12\x02\x0f\x28\xad\xe4\xfe\x16\x02\xd8\x4b\x21\x6a\xa2\x5a\xb8\x0f\x5f\xe3\xc0\x8d\xc9\x2a\x9c\xbe\x2a\x0a\xb2\xc3\x01\xe8\xc5\x0b\x7b\x42\xd8\xd2\xfe\x11\x1a\x9b\xf3\x78\xc5\x09\x9a\x3e\x55\xa8\x7b\x87\x92\x85\xe6\xd3\xf9\x74\xa1\xc1\x88\x4e\x18\xfe\xa1\xe4\x90\x4a\xe4\xec\xea\x9d\xba\x4f\x48\x25\x72\xd4\xf9\xc3\x7e\xa6\xf1\xed\xa7\xbf\xa8\x0e\xfd\x75\xa7\x06\xfa\xdb\x16\x20\x37\x11\xe4\xaa\xc0\x53\x18\xe8\x93\xb8\x4f\x4e\xa6\x48\x5f\x2b\x62\xa3\xe3\xba\x5e\x5e\xd7\x73\x26\x00\x9c\x48\x1d\x99\xb5\x78\x99\x21\x3f\x65\xb4\x7d\x3b\x68\xac\x05\xf5\xcd\xbb\xab\xed\xdb\x9b\xc0\x60\xb6\x8a\xd4\xcf\x29\x1d\x91\xcb\x34\x2f\x5b\xab\x6d\x8e\xfc\x3b\x9b\x3c\x82\xf8\xe3\x21\xfe\x2f\x35\xef\x9d\x28\xba\x0d\xc7\xf5\x26\xbf\x8f\xae\x97\x9a\xfc\x83\xce\xb6\x22\x37\xd4\x7d\x9c\x7f\x37\x23\xaa\xd0\x56\x3f\x34\x0d\x7b\x13\x41\x57\x62\xa2\x2d\xea\xf0\x32\xe4\xd4\x10\x1d\x0e\xa7\x1b\x04\xe2\xab\xaf\xb1\x25\xb5\xe6\x74\x91\x3a\xb6\xc3\x9e\xdd\x2e\x6b\x80\x64\x61\x03\xc4\x35\x3f\x2c\x82\xc3\x76\x47\x16\xeb\x06\x4b\xfb\x4b\xd9\x1e\x44\xda\x1e\xf6\xe5\xd7\x37\x16\x82\x46\xc2\x19\xab\xc6\x18\x05\xd2\xc2\x39\x65\xd6\x18\x63\xa1\xe3\x3b\x4f\x91\xe8\x7d\x5c\xb8\x0f\x5f\x43\x91\x18\x19\xf3\x47\x14\x89\x95\xb9\x5f\xa4\x48\x74\xef\x90\x22\x31\x9f\xce\x2b\x12\x83\x91\x57\x28\x12\xc7\xe9\x96\xae\x81\x0c\xbf\xcb\xe3\xab\xee\xa5\x77\x4d\x4d\x59\xdb\x2a\xe7\xd1\x4f\xe0\xa6\x04\xca\x1b\x8b\x79\x0b\x55\x00\x15\xe9\x68\x6b\x42\x6d\xc9\x5d\x9d\x3f\xa6\xc4\x7c\xd6\x2f\x55\xfc\x57\x87\x89\xd3\x51\x0a\x57\x9f\x78\x17\x7e\xb9\xfa\x35\x6b\x77\x8f\x4e\xa9\x4c\xf1\xba\x3b\x2d\x4c\x06\x36\x1d\xd0\x9b\xae\x0e\x85\xb2\x6c\x06\x0d\x4f\x89\x86\xaf\x36\x36\xe5\x32\xd2\x3f\xbd\x04\x93\xb8\x22\x0f\x93\x92\x4f\x6e\x26\x44\x1f\x8c\x53\xbe\x59\x9b\x12\xb8\x5a\x9b\x03\xdf\x72\x26\xef\x95\xba\x9e\x47\xd9\x86\x8e\x35\xdc\xb0\xa2\x61\xed\xd6\xd8\xcb\x1a\xf5\x10\x1d\xdd\x6a\x0c\x35\x43\x72\x3d\x1f\x46\x95\x64\xdf\xf3\x0e\x6d\x9a\xaa\x4d\x5a\x02\x35\x12\x48\xc0\x4e\x7b\xc3\x3a\x41\x7f\x6e\x0d\x0b\xa7\x32\x72\xd1\xef\xb3\x92\x0c\x1e\xce\x51\x7c\x4f\x42\x95\xfb\x23\x3e\x74\x4a\x95\xfc\xa2\xa6\x22\xbd\xfa\xe7\x9f\xe3\x2b\x93\x86\x89\x22\x79\x9b\xb4\xb9\x3b\x21\x8a\x0a\x7b\x84\xaf\x18\x55\x0a\xad\x56\x85\xbc\xf9\x4c\xfa\x91\xc2\x0a\xe2\xed\x6f\x72\xe9\x90\x0e\x88\x50\xc8\x6c\x38\x57\xa7\x83\x07\x5f\xfa\x12\x7f\x84\x30\x5b\xaf\x0f\x07\x59\x3d\x8a\xe2\xb6\x6e\x9c\x7c\x9f\xb4\xc2\x0c\x3f\xae\xe6\xeb\x99\x70\xfe\x8e\xe3\xd3\xaf\xd4\x60\x37\xde\xed\x5b\x95\xb5\x16\x92\x1a\x53\x83\x62\xe7\xfd\xf5\x5a\x07\x5c\xbd\x8b\xe5\x24\x4d\x9c\x76\x6e\x4f\x42\x7f\xe9\x91\xbf\x82\xc2\x58\x08\x7e\x2f\xe1\xc9\xa9\x17\x2a\xfe\x60\x0a\x8a\xc5\x26\x0f\xbf\xe1\x3c\xa5\xb3\xd0\xcc\x5b\x93\x5d\x31\x65\x2f\x6a\xd9\xe6\x16\x3a\xbd\x5a\xac\x6f\xf2\x74\x19\x42\x9e\x21\xbc\x49\x73\x73\x94\x6f\x33\x4d\x21\x49\x8b\xb8\x24\x6d\x07\x91\xc0\x96\x0e\x9d\xc9\x4e\xa4\x63\x90\xc8\x6c\xe7\x26\x8a\xb2\x77\x32\xa4\x3b\xe8\x79\xa2\x4b\x51\x37\x90\xf4\x15\x37\x64\x36\x5b\xa0\xcd\x34\x2d\x56\x64\x1d\x45\xd9\x4d\xca\xe4\x0f\x38\x65\x2b\x72\x79\xbd\x3e\x1c\xe4\xe0\xe2\xe7\x68\x30\xd1\x65\xa8\x61\xf5\x37\xcf\x3f\x1c\xf3\x7e\x66\x2e\x60\xec\x39\x46\xf9\xd0\xed\x7f\x55\x1d\x2f\x43\x22\x0b\x9f\xbb\x2a\xce\xc9\xcd\xda\x61\x2f\x4d\x7d\xa1\x9a\x16\x5c\x5c\x66\x97\x17\x60\x8d\x2f\x2e\x87\xcd\x04\xdf\xd9\xef\x17\xf2\xe0\x3d\xb5\x40\x41\x50\x72\x10\xba\x4f\x6d\x61\xaf\x69\x77\x0e\x1e\xdf\xb3\x6a\xef\x9c\xb1\x85\x79\x9a\xf7\xe7\x70\x4b\xee\x1c\x1e\x0c\x8c\x88\x70\x7f\xd6\x1b\x18\x6c\xf6\x3a\xa1\xdd\x3d\x0e\x2e\x24\xb6\xef\x17\xfe\xe3\xd7\xb0\x11\x9c\x39\x5f\x78\x33\x71\xdf\xe3\x8b\xae\x67\xb0\xdd\x43\x46\x41\xff\xf1\xbc\x59\xd0\xa3\xa1\x2f\x49\x7d\xd9\x5d\x56\xa2\x8f\xb9\x7c\xe6\x25\x17\x59\xd9\x99\xfe\xf0\x4d\x56\x86\x53\xdc\xc3\x84\xc7\xc5\xf9\x7a\x08\xb7\x13\xa6\x69\xd6\x13\xdb\xbe\xd4\x87\x7b\x5d\xd2\x44\x00\xe1\x3c\xcd\x06\x1a\x48\x5e\x85\x74\x38\xc0\x7c\x78\x7e\x3f\x4f\x4f\x56\xd1\xda\xa3\xfa\x78\x6a\x0f\xe2\x2b\x4e\x09\xdc\x69\xa9\x2b\xa3\xa9\x7f\xef\x63\x22\xa4\xdb\x84\x00\x21\xdf\xa4\x09\x36\x28\xed\xe8\x48\x36\xbe\xd5\xf2\xa8\xcf\x5b\x1b\x3e\x29\x10\x9e\x16\xcf\x5c\x8b\xb3\x49\x09\xcc\x4d\xbe\xd7\x08\x2f\x1f\x62\x4c\x5d\x13\x45\x7c\xdf\xe0\x4d\x9f\xf2\x77\x70\xde\x4f\xfb\x24\xf6\x24\xe9\x73\xbe\x12\xd6\x11\xa8\xc7\xd1\xf5\x2e\x01\x19\xa9\x62\xd4\xd6\x30\x65\xf2\x4f\x59\x04\xa5\x5c\x7f\xeb\x95\xb7\xa9\x93\x9b\xc9\xb9\x9b\x34\x43\x17\x5c\xe2\xcd\x52\x86\x6b\xbd\x60\x7c\x36\x48\x71\x0a\x7f\x6a\xe4\xa4\xaa\xeb\xb4\x4f\xc8\xbe\x28\xca\x5e\x2e\xf1\xb0\xa0\x2f\x7d\x81\x43\x61\xe9\xc3\x59\x0f\xde\x08\x12\x7c\xe9\x1f\xac\x58\x6c\xce\xfd\xc9\x0a\xf6\x5c\x16\x89\xc9\x3f\xcf\x31\x4a\x65\xf9\xb1\x3d\x92\x2d\xcc\x8f\xaf\x12\xd3\x23\xd9\xe8\xef\x57\x48\x62\x72\x45\xe8\xcb\x04\x68\x47\xb2\x60\x60\x8e\x64\x2f\x09\xca\x91\xec\x7c\x05\xbf\x18\xea\x64\xfd\xbe\x80\xda\xde\xae\x68\x5e\xee\x78\x39\xaa\xe9\x0f\x14\xc6\x9b\xbf\x86\xd3\x91\x4c\xf1\xff\x17\x56\x8d\xf7\xb5\xcc\x2f\xf4\x94\xfe\xa2\x14\x43\xea\x6b\x88\xde\x1b\x23\x45\xc1\x3f\x3b\x6b\xf6\xaf\x3c\xdf\x32\x7a\x67\xd2\x6a\xee\x25\xe9\xfd\x9f\xf8\x78\x71\xf7\xdf\x78\xb7\x95\xf2\xef\xc7\xba\xde\x05\x4b\x7d\x9d\xca\x6b\x39\xac\xa9\x7d\xdf\x57\x3b\x5e\xa9\x9f\x3b\x5e\x55\x36\x0f\xd6\xfb\x58\xde\x44\xea\x06\xc2\xff\xfc\xf0\xeb\x87\x8f\x29\x90\x03\x4d\xe4\xbf\xb3\xae\xde\xe9\x5f\x3a\x51\x86\x03\x2e\xd8\x7c\x9c\x56\x74\xe7\xf4\x2f\x48\x1b\xc1\xe4\x5d\x6e\xec\x7e\x78\xe6\x9e\x3c\x0d\xab\x27\x3d\x04\x94\xda\xe8\x33\xf1\x25\x5d\xf3\xe4\x18\xff\x78\x78\x8e\xcd\xe4\x10\x17\xa7\xa0\x48\xa9\xcc\xec\x91\xc1\x75\xf4\x27\xb7\xc9\x5d\x6e\xe0\xaf\x88\x9c\xa2\x11\x7c\x8d\x9e\x99\x21\x84\x43\xbb\x04\xdf\xdd\xeb\x8f\xc7\xf4\xfc\x6a\x7d\x90\xfc\x34\x62\x86\xb7\x53\x1b\xc4\x18\x47\x65\xe0\x30\xc9\x92\xc0\x7a\x87\xb7\x69\x11\x2b\xd2\x58\xe8\x84\xa8\x4b\x8c\x51\x04\x99\xf4\x3a\xd3\xbc\x3f\xc4\x64\x33\xe5\x85\x2c\xf2\xd9\xa4\x05\x0a\x65\x92\x37\x51\x04\xf5\x2c\x83\x9a\xbe\x60\xf3\xad\x1c\xcc\x00\x33\xec\x21\xc9\x82\x4b\xf2\x9f\x3a\x1c\x12\x45\xf9\x65\xff\xf4\x2e\x95\xc0\x2e\xa7\xd7\x89\x6a\xb8\x8d\x22\x05\xbe\x8f\x1a\x83\xce\x9b\x94\xce\xb6\x7d\x09\x9b\xea\xb3\x89\xa2\xcd\x4d\x9a\xeb\xaa\xb7\xe9\xb5\xad\x3f\xd5\x28\x99\xa6\x29\xd7\xe2\x48\x83\xe0\x0f\xae\xee\x3f\xae\x77\x32\xd3\xaf\x8b\x89\x34\x71\x5f\x42\xbe\x04\x33\x70\xc9\x13\x00\x10\xbe\xb3\xc6\xd1\xed\x25\xb0\x72\x05\x9c\xba\xe8\xe0\x0e\xe1\xbb\x67\xaf\x36\x30\xf2\x83\x63\x47\x82\xf4\xf9\x71\xbe\x34\xe1\x5e\x97\xb9\x21\x4a\x7a\x91\xf2\x42\x56\xbd\x75\xff\x30\x99\x5e\x81\x35\x27\xf5\x52\x31\xd0\xe0\x00\x24\xb6\xbb\x87\x62\xf4\x87\x2a\x14\x18\x32\x51\x4e\x67\xdb\x59\x78\xa7\x8e\x08\x99\xab\x60\xb4\xca\x96\xc3\x2f\xfa\x9f\x5f\x43\x6d\xeb\x0d\x78\xa1\x67\xa4\x17\xfa\x25\x5e\x91\xd2\x20\x01\xb5\xae\x3e\xbc\xe0\x8f\x51\xc9\x25\x7f\x91\x27\xa4\xc0\x3e\xeb\x08\x49\xbf\x43\xff\x69\x26\x13\xeb\x49\xcd\x8f\xc3\xe1\x49\x16\x7e\xcb\x87\xef\xe5\xde\xca\xbf\x13\xa4\x8b\xf7\xd4\x6e\xa7\x7e\x03\x64\x3b\x7c\xac\x77\x6e\xeb\xae\xde\xa5\xce\x27\x69\xd7\x0a\x18\xd5\x1d\x3a\xbd\xc1\xb0\xf8\xd3\xff\x0f\x00\x00\xff\xff\x92\x3b\x10\xd7\xb7\x71\x00\x00") +var _pagesAssetsJsBootstrap400Beta2MinJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xdc\xbd\x6b\x77\xe3\xb6\x76\x30\xfc\xbd\xbf\x42\xe2\xe9\xab\x21\x6a\x98\x23\x27\xa7\x3d\x3d\xd4\x70\xbc\x3c\xb6\x92\xf8\x8d\xc7\x76\x6d\x39\x69\xea\xa3\x7a\xd1\x22\x24\x21\x43\x01\x2a\x09\x8d\xc7\xb5\xd8\xdf\xfe\x2c\x5c\x09\x90\xa0\x6c\x4f\x92\xf3\x74\x3d\x5f\x66\x2c\x60\x13\xd7\x8d\x7d\xc3\xde\x1b\x6f\xff\xa9\xff\x0f\xbd\xde\x3f\xf5\x3e\x50\xca\x4a\x56\xa4\xeb\xde\xe7\x3f\x47\xc3\x68\xb8\x7f\x8f\x58\x1a\x7d\xd3\x0b\x97\x8c\xad\xcb\xf8\xed\xdb\x05\x62\xf7\x1a\x26\x9a\xd1\x15\x10\x9f\x1d\xd3\xf5\x63\x81\x17\x4b\xd6\xfb\x66\x78\x70\xb0\xff\xcd\xf0\xe0\x2f\xbd\xc9\x12\x59\xcd\x1d\x6d\xd8\x92\x16\xa5\xd5\x10\x66\xcb\xcd\x3d\x6f\xe2\x2d\x7b\xb8\x2f\xdf\x9a\x56\xdf\x2e\x8a\x74\xbd\x2c\xdf\xce\x28\x61\x05\xbe\xdf\x30\x5a\x94\xb2\x97\x33\x3c\x43\xa4\x44\x59\x6f\x43\x32\x54\xf4\x3e\x9e\x4e\x5e\xd2\xdc\x7d\x4e\xef\xdf\xae\xd2\x92\xa1\xe2\xed\xd9\xe9\xf1\xf8\xfc\x7a\x2c\x9a\x7b\xfb\x0f\x9f\xd3\xa2\x67\xe0\x92\xf9\x86\xcc\x18\xa6\x24\x64\x10\x41\x02\x9e\x82\x4d\x89\x7a\x25\x2b\xf0\x8c\x05\x23\x5d\xd9\xc3\xbc\x1a\x3c\xcd\x69\x11\xf2\xcf\x49\x32\x1c\x91\x77\x28\xca\x11\x59\xb0\xe5\x88\xec\xed\x81\x27\x5e\x8e\x13\x74\x4b\xa6\x23\x1c\x21\xb2\x59\xa1\x22\xbd\xcf\x51\x62\xff\xd8\x6e\xfb\x07\x10\x47\x33\x4a\xe6\x78\xb1\x91\xf5\xfd\x21\x0c\x3e\xa7\xf9\x06\x05\x98\xf4\xf0\x60\x10\xe2\xe8\xa1\xc0\x4c\xd5\x01\x78\x71\xff\x2b\x9a\xb1\x28\x43\x73\x4c\xd0\x65\x41\xd7\xa8\x60\x8f\x21\x83\x38\xfa\x84\x1e\x21\x06\x55\x85\x12\x34\x18\xa0\x68\x99\x96\x17\x0f\xc4\x40\x04\x19\x9a\xa7\x9b\x9c\x05\xe0\x10\x45\xea\xef\x18\x41\x92\x90\xc1\x80\xec\x00\x26\x06\x98\x8c\xf8\x9c\xca\x7a\x89\xc0\x93\x59\x10\x16\x32\xf0\x54\x20\xb6\x29\xc8\x53\x15\x31\x7a\xcd\x0a\x4c\x16\xd1\x2c\xcd\xf3\x90\x81\x68\x95\xb2\xd9\x32\x7c\xfb\xb7\x32\xbc\x4d\xf7\xff\xfb\x68\xff\x3f\xa6\x7b\xe0\x2d\xb8\x3d\x98\x46\x8c\x9e\xd1\x07\x54\x1c\xa7\x25\x0a\x41\x65\xda\xe3\x8d\xab\xe6\xee\x31\xc9\x26\x8f\x6b\x14\x17\x11\x22\x19\xcc\x50\x8e\x16\x29\x43\x56\xd1\x32\x25\x59\x8e\xe2\x7a\xeb\xc0\x13\x9e\x87\x28\x64\x11\x4b\x8b\x05\x62\x20\xc2\x65\xc8\x96\xb8\x04\x40\xb6\xd9\x63\x91\xfc\xe6\xe2\xfe\x57\xf5\x57\x11\xa5\xeb\x75\xfe\x28\xc0\x60\x5a\x2c\x36\x2b\x44\x58\x09\xaa\xaa\xb2\x36\x5d\xb4\xfb\x80\x49\x46\x1f\xa2\x7f\xbb\x21\x98\xa9\xf6\xfa\x07\x62\x69\x58\x92\xd1\x99\xf8\x30\x9a\x15\x28\x65\x68\x9c\x23\xfe\x2b\x0c\x0c\x7e\x05\x60\xa4\x91\x06\xf5\x30\xe9\x51\x80\xe7\x61\xc0\x31\x99\x6f\x67\x16\xf4\x13\xf6\xb8\x46\x74\xde\x63\x51\xc9\x1e\x73\x74\x8b\xa6\xaa\x8f\x27\x44\xb2\x98\xde\xa2\x69\x35\xd2\x9d\xd6\x43\x2b\xf9\x9c\x25\x26\x8a\x09\xe0\xa4\x7f\xa0\xc0\x7a\x48\x4e\x3d\xa2\x04\x85\x69\x34\xb9\x3a\x3a\xbf\x3e\x9d\x9c\x5e\x9c\xdf\x8d\xcf\x4f\xa0\xb5\x97\x38\xe9\x0f\x2b\x00\x4b\xc4\x26\x78\x85\xe8\x86\x85\x76\xe5\x76\x9b\x46\xac\xc0\x8b\x05\x2a\x26\x45\x4a\x4a\xcc\x2b\xc6\x24\x0b\x09\xa8\x20\x03\x90\xf7\x51\xf1\x11\x14\x49\xff\x00\xd2\xe4\xe9\x67\x74\xff\x09\xb3\x1a\x36\x0e\x1e\x1a\x25\x63\x92\x05\xf0\x23\xfd\x6f\x1b\x86\x99\xbf\x11\xaf\xbd\xb0\xeb\xa8\xf3\x69\x8f\x36\x60\x59\x67\x33\x15\x4c\x93\x27\x77\xe2\x71\x70\x5f\x36\x46\xb2\x40\xec\xe6\xf4\xc4\xc1\xa2\x8c\x3e\xb1\xbd\xe4\x7f\xfe\x27\x3c\x40\xff\xf2\x4f\x1f\x53\xb6\x8c\x8a\x94\x64\x74\x15\x02\x50\x3d\x2c\x71\x8e\x42\xb3\xe1\x0b\xc4\xd4\x6e\x7f\x78\x3c\xcd\x42\x06\x80\x5e\x7f\x56\xf1\xa6\xaf\x51\x8e\x66\x8c\x16\xdf\x15\x74\xa5\x00\x9d\xae\xd4\xe6\xf1\x76\x8e\x98\xa4\x78\x28\x0c\xb2\x94\xa5\xfb\x12\x89\x03\x30\x22\x83\x41\xf0\xa7\xa0\x9f\x24\x64\xbb\x0d\xdb\xc0\xcb\x02\xcd\x03\xb0\xdd\x06\x01\x18\xb1\xe2\xf1\xc9\xec\xbf\x1e\x24\x88\xe6\x58\xec\x98\x22\x54\xef\x87\x87\x24\x26\x9b\x3c\xaf\x66\xe2\x7c\x9a\x23\xdc\x13\x85\x15\x2c\xd0\x3c\xa7\x0f\xce\x38\xcd\x01\xa2\xf3\x79\x89\xd8\x0f\x88\xd3\xfb\x0a\xfa\x50\xc3\xf9\x0e\x71\x32\xa0\xa0\x42\x71\x6c\x41\x05\xcb\xcd\x7a\x4d\x0b\x56\x76\x7c\x66\x7a\xfb\x40\x69\x8e\x52\x12\x16\xa0\x82\xb8\xf4\x2d\x9f\x04\x0c\xd9\xed\x70\xba\xdd\x32\x10\x11\x9a\x09\x0a\x51\x41\x7e\x9e\x8e\x97\x68\xf6\xe9\x58\x50\xd9\xfa\x2b\x04\x09\xc4\x35\x0d\x2f\xf9\x71\xc4\xfc\x38\x2a\x0a\xbb\x2e\x28\xa3\xfc\xeb\x06\x71\x94\x44\x0d\xc3\x12\xc8\x5d\x2b\x12\x7c\x5b\x4e\x21\x4d\x08\xff\x2f\x4f\xe8\x60\x90\x46\x66\x94\x21\x05\x87\x01\x92\x7f\x07\x31\xff\x39\xc2\xf3\xb0\x4f\xd0\x43\xef\x0a\x2d\xc6\x5f\xd6\x61\x01\x22\x86\x4a\x16\xe6\x00\xb0\x65\x41\x1f\x7a\xbc\x6e\x5c\x14\xb4\x08\x51\xc4\xe8\xcd\x7a\xad\xc9\xe3\xde\x9b\xb8\x77\xb1\x16\x07\x3e\x78\xb3\x57\xee\xbd\x09\x7a\xeb\x82\x7e\xc6\x19\xca\x7a\x7c\xa0\xbc\x34\xe7\xa5\xf7\x1b\xd6\x43\x5f\xd6\x68\xc6\xac\x9a\x62\xef\x4d\x10\xbd\xe1\x14\x4d\x63\x66\x91\xe0\x10\x40\x14\xcd\x49\x84\x56\x9b\x9c\x13\x55\x7b\x23\x92\x12\xa6\x91\x77\x8b\x42\x30\x18\x84\x28\x42\x9f\x39\xe6\x97\x6b\x34\xc3\x69\x7e\xdb\xa4\x2d\xd3\x84\x84\x00\xc0\xb4\x0a\x01\x2c\x5a\x7c\x55\x23\xe7\x60\x80\x43\x6b\xad\x21\x02\x90\x88\x32\x48\x00\x64\x15\xa4\xce\x97\xe0\xc9\x82\x4d\xd4\x46\x49\x5a\x1b\xa2\xba\x06\x40\x7b\xfb\x66\x94\x94\xac\xd8\xf0\xd3\x97\x30\xc8\xa2\xbb\x3b\x51\x77\x77\x97\x20\x4e\x19\x2c\x74\x93\x44\x3c\x48\x73\x54\xb0\x00\x92\x84\x2f\xcd\x2d\x9b\x42\x9c\x3c\x1d\x9f\x5d\x5c\x8f\xe3\x60\x96\xd3\x12\x45\xf7\x65\xa4\x60\x44\xf1\x89\x2a\xcf\x9c\x8a\xd3\xe3\x1f\xef\x4e\x8e\x26\x47\x77\x47\x97\xa7\x1c\x00\xcf\x3e\x99\xfa\x48\x1c\xec\x74\x8d\x03\x3e\xc3\xa7\xa3\xb3\xf1\xd5\x24\xd6\xfd\x7e\x77\x74\x32\x8e\x83\x79\x9a\xa1\x00\x5e\xff\x70\xf1\x73\x1c\x94\x4b\xfa\x10\x34\xc6\xea\xb2\x5e\x4e\x7e\xa3\x3b\x85\x67\x09\xab\x34\x39\x31\xab\xa0\xf7\x9c\x44\x62\xa8\x89\x7d\x78\x58\xc2\xb6\x5b\xa7\x05\xc1\xce\x50\x22\xcb\x16\x88\x5d\x51\xaa\xc9\x5b\xc8\xc0\x48\x96\xab\xd3\x7c\xcc\xdb\x1b\x73\x54\x08\x11\xe7\xb4\x27\x52\x5c\xb8\x2c\x04\x7a\xa0\x2c\x04\xba\xf1\x02\xad\xe8\x67\xc3\x14\x11\xa8\x20\x89\x32\x5c\xae\x9d\xf1\x80\x27\x14\x49\xc0\x93\x94\xa5\xa1\x33\x2c\x18\x98\x05\x96\x1c\xa7\x9e\xb2\xa0\x58\x90\x34\x47\x9b\xb4\x69\x6c\x19\xf9\xc9\x71\xc8\x80\xc3\x37\x09\x97\xbd\x12\x14\x12\x70\x3b\x9c\x02\x88\xb7\x5b\xf1\x93\x01\xb9\x82\x25\x0b\x83\x28\xd8\xa3\x91\xd8\x3c\x05\x23\x46\xd0\x5a\x17\xcf\x20\x50\x24\x57\x0c\x47\x02\x83\x80\xc5\xac\x6b\x32\x49\x00\x24\xa2\x45\x67\xe1\x3c\xad\xf1\xa5\x18\x89\x4f\x25\xe4\x71\x9e\x96\x65\x48\x23\x8e\x3d\x00\x96\xdd\x87\x58\x7c\xb3\x4c\x4b\xfd\x01\x47\x3d\x70\x28\x4a\xb9\xb8\x50\x76\x8a\x0b\xa8\x66\x15\xd1\x5d\x86\x4a\x56\xd0\x47\xb3\x8e\x10\x81\x0a\x78\x49\x4a\x78\xf0\xcf\x43\x10\xcb\x9d\x6b\x7e\x25\xd0\xa1\x51\x9a\xb4\x58\x48\x86\x58\x3a\x5b\x86\xf5\x22\xa9\x15\x3c\xd1\x73\x0f\xb9\x3c\x12\xdd\xfd\xfa\x6f\x1b\x54\x3c\x9e\x12\x86\x8a\x79\x3a\xb3\xf0\xab\xa6\x3d\x62\x18\x88\xb7\xd6\x20\x01\x62\x9b\xb9\xcc\x04\xcb\x04\x8b\xa3\x1a\x5a\x88\x37\x2a\xb7\xdb\xb0\x4c\x38\x95\x66\x0a\xac\x05\xc4\x99\x03\x94\x44\x21\x48\x12\x2e\x62\x97\xb7\x64\x2a\xa1\x2b\x39\x40\x29\x76\x9e\xe0\x72\x85\xcb\x32\xf1\xb0\x58\x7b\xad\x85\x40\xbf\x96\x27\x4a\x9d\xaf\x90\x13\x39\xd1\x83\x6a\xb6\x82\x05\xa7\x9a\x9b\x3c\x87\xb7\x4f\x9f\xd0\x63\x1c\xfc\x34\xbe\xba\x3e\xbd\x38\x17\xa2\x4d\x9b\xab\x06\xb6\x6a\x17\x54\xd5\x94\xd3\xdb\xd0\xc2\xc4\x5a\x6c\xa0\x44\x2c\xb3\x4d\xd1\xe0\xd3\xc9\xe9\xf5\xc7\xd3\xeb\xeb\xf8\xcd\xad\x20\x66\x99\x9a\x89\xa2\x62\xd3\x37\x55\xa4\x20\x60\xda\x98\x6d\xc8\xd7\x2e\x05\x92\xf5\xdc\xb2\x69\x92\xb6\xf6\x4b\x57\x45\xc7\x16\xf1\x4e\x4d\x29\xa1\x9c\x9b\xe7\x78\xc6\x92\xb6\xb4\xa0\x5b\x25\xb0\xdd\x6e\x25\xf9\x51\xee\xa1\xfa\xf7\x1b\xc6\x28\x69\x90\xfd\xa3\xe3\xc9\xe9\x4f\xe3\x38\x48\x67\x0c\x7f\x46\x01\xfc\x70\x33\x99\x5c\x9c\xc7\xc1\x3d\x23\x01\xfc\xee\xe2\xf8\xe6\x3a\x0e\xe6\x74\xb6\x29\x83\x0a\x96\xc9\x93\x58\x9d\xc9\xc5\xf7\xdf\x9f\x8d\xef\x8e\x8f\xae\xae\x2e\x26\x7a\x79\x18\x5d\x2c\x72\xf4\x9f\xa6\x9b\xe9\x1b\x68\x01\xbb\x50\x1a\xa8\xe4\x50\xa7\xe7\x97\x37\x93\x38\xc0\x64\xbd\x61\x01\xd4\xe3\x89\x9a\x03\x8a\xf8\x88\x04\x33\xe9\xe4\x3c\xb2\xd1\x9a\xf5\xc8\xf1\xdf\x7d\x38\xbb\xb9\xb2\xc0\xc5\x6c\x3c\xe0\xbd\xfb\x7c\x53\xf8\x9a\xf9\xbd\xf8\x92\x9a\x7b\x6b\x63\xfa\x43\x48\xf8\x3f\x85\x3a\x95\xa6\xb9\x9a\x10\x97\x91\xb5\x96\x9c\x14\x73\x19\xab\x90\x0d\xd0\xf6\x67\x42\x0a\x2e\x23\xb1\xb2\x1a\x9a\x0a\xdd\x2e\x28\xd2\x0c\x53\x7e\x64\x69\x24\x04\x09\x5e\x13\xcd\xb8\xf8\x88\x32\x41\x2e\xdd\x96\x0c\xe9\xc4\x91\xdc\x19\x00\x18\x67\x21\x28\x2f\x91\xe8\x3d\x4d\x10\x17\xf0\x54\x8f\x0a\x86\x77\x99\xf2\xd6\x52\x97\x60\x9b\x46\x2a\x3c\x57\x4a\x2c\xe5\x5d\xd8\x0a\x01\x2e\xd3\xfb\x1c\x65\x5c\xce\x2f\x76\xd4\xd1\x68\xc6\x9b\x3c\xc3\x25\xe3\xf2\x0f\x4b\x31\x29\x9b\x5f\xef\x86\x50\x6a\xe7\xc8\x4c\x3f\xe9\xbf\x60\xfa\x10\x85\xb4\xa6\xcd\xc1\x6c\x99\x92\x05\x0a\x40\x45\x23\x81\x57\x21\xe0\x7b\x79\x50\x55\x64\x30\x70\xda\x8a\x4a\x47\x95\x49\x0b\x9c\xee\xaf\x0b\x54\x96\x28\x0b\xe0\x4b\x3a\x06\x90\x79\xf6\x47\xe2\x54\x73\x75\xbf\x4a\xec\x50\xe7\xd6\x2f\x77\xfc\x5e\x0c\xa7\xe6\x23\xba\xbb\x91\x94\x3b\x6c\x6e\xd3\x05\x0b\x31\x67\x3a\x72\xca\x8a\xeb\x60\xce\x75\x04\xc7\xf9\x23\x79\x03\x6d\xf2\x06\xe7\x3c\x2a\x42\x08\x1d\x89\xb3\xc5\xcc\x46\x9a\x32\x48\x6d\x77\xc4\xe5\x2e\x7b\x9b\x25\x9d\x03\x42\xf1\x15\x75\xf5\xd9\x57\x55\xc0\x43\xee\xa5\xae\xc6\xe1\xcd\xc2\x70\xd1\x44\x8c\xd9\x43\xfd\x9e\x1f\xb8\x92\xde\x2c\xd3\x52\x6b\x1c\xfc\x74\x8b\x11\xba\xc8\x27\xba\x83\x6f\xff\x53\x9e\x03\x4c\xc0\xe1\x3f\xbe\x95\x8a\x1f\x93\xa4\x06\x54\xff\x17\x19\xe2\xd2\xc3\x10\x67\x69\x41\x37\x25\xca\x39\x4b\xe4\x68\x56\xff\xc6\x09\x17\x7d\x09\xa4\x86\x55\xa6\xc9\x13\xe6\x4d\x7e\x4e\xf3\xf8\x9f\xd1\xb7\xf0\x13\x7a\xbc\xa7\x69\x91\xc5\xfd\x21\x2c\x73\x9c\xa1\xb8\x7f\x00\xd7\xe9\xa6\x44\x71\xb0\xa4\x9f\x51\x11\xc0\x87\x22\x5d\xc7\xfd\x61\x05\x73\xeb\xdb\x20\x24\x9b\xd5\x3d\x2a\xb6\xf7\x52\xe5\x07\x41\xdd\x54\xa0\xca\x02\xd5\x62\x10\xaa\x82\x6d\x29\xac\x8d\x20\xd0\x3d\x84\xb2\xc0\x6a\x44\x74\x66\x1a\xa8\xe0\x32\x79\x3a\x1f\xff\xfb\x24\x0e\x08\xfa\xc2\x02\x78\x79\x35\xfe\x29\x0e\x38\x4e\x06\xf0\x6c\xfc\xdd\x24\x0e\x72\x34\x67\x01\xbc\x3a\xfd\xfe\x87\x49\x1c\x08\xa3\x76\x50\xc1\x59\xf2\x74\x7d\x76\xca\xb5\x33\x31\x82\x60\x0f\x43\xfe\x5b\xfe\xe4\xbf\x7e\x1c\xff\x72\x72\xf1\xf3\x79\x1c\x7c\x42\x8f\x19\x7d\x20\xbc\xec\xe3\xc5\xcd\xf5\x78\x7c\x3e\x19\x5f\xc5\xc1\x8a\x2f\x20\xd7\x8a\x0a\x53\x73\x36\x3e\xe2\x0c\x5d\xd4\xe4\x28\xfd\x2c\x5a\x9d\x5c\xdc\x1c\xff\x20\xcc\x54\x8c\x6e\x66\x4b\x44\x44\xeb\x67\x17\x47\x27\x16\xa7\xce\x69\x2a\x34\x4e\xbd\x2f\x16\x67\xef\x94\x01\xda\xb0\x15\xdc\x24\x4f\xc7\x47\x57\x62\x2c\xb1\xb5\xe9\x4d\xd1\xc7\x99\xb9\x5e\x19\x0d\xbe\x8f\x19\x5a\xed\xcb\x75\x52\x2b\xe8\x56\xc9\xf5\x94\x6b\xee\xd6\xd8\x3b\xe0\xd6\xc8\xfd\x38\x9d\x8c\x3f\x36\x6a\x82\x0a\x66\xb5\x6c\x66\x64\x21\x59\x70\x27\x3f\x50\xa5\x91\xfb\xa1\x6a\xad\x59\xca\xc7\x75\x27\x87\x10\xb5\x47\x07\x7b\x91\x77\x60\xe7\x27\xa7\xc7\x47\x93\x8b\xab\x6b\xe7\x2b\x92\xe1\x59\xca\x68\x51\x06\x52\xc2\x53\xeb\x26\x05\x3c\xb1\x7a\x53\xd8\xb3\x7e\xed\x33\x3a\x55\xa0\x57\x1c\x52\x89\x82\x05\xce\x90\x75\x06\xa7\x6f\x2a\x38\xf7\xca\x59\x54\xd8\x4a\x94\xa4\xc5\x87\x57\x0a\xa6\xa4\xf8\x94\x3e\x59\x76\x99\x5c\x99\xb1\xc5\xc1\x34\x70\x79\xc9\x0f\x50\x96\xf4\x0f\x4c\xc9\x75\x8e\x33\x4c\x16\xa6\x48\x20\xa4\xb2\x10\xdb\xdf\xca\x4b\x8c\xda\x64\x20\xcd\x6d\x9c\xf2\xba\x0c\x93\x6b\x71\xb7\xc3\xa9\x19\x9e\x5e\xad\x71\x5d\xef\x91\xd6\xb2\xa8\x5e\x6d\xeb\xf3\x34\xcb\x84\x1e\xcd\xa5\x18\x44\x50\x51\x86\x40\x88\x98\xf3\x84\xb6\x45\xcc\x79\xc4\x37\xd3\x5e\xc3\xc6\x1c\xb5\x9d\x42\x6c\x4b\xb8\x8c\x38\x56\x80\x0a\xca\xef\x7e\x5e\x22\xf2\x13\x2e\xf1\xbd\x2b\xa3\xf6\x8d\x05\x78\x89\xb3\x0c\x11\x8f\x10\x82\xcb\x30\x88\x3f\xcb\x4f\x03\x30\x18\x04\x12\x32\xe8\x27\x1e\x89\xb6\x2c\xc3\x40\xc0\xe2\x1c\xb3\x47\x0e\x2e\x20\xf8\x08\x42\x31\x16\x8e\x7b\xaf\x98\x03\x47\x6a\xf9\x1d\xdf\x59\xd7\xec\xb3\xdd\x86\xcd\x7d\x1f\x6a\xf1\xa2\xbd\x01\xe6\x8c\xf0\xf5\x1f\x0c\x76\x58\x15\xc2\xd2\x7f\x55\xe0\xb6\x2b\xb7\x70\xf6\x38\xcb\x51\xd8\x1f\x02\x00\x67\x39\x4a\x8b\x53\x85\xaf\xa1\x8b\xbe\xc0\x87\xce\x7c\x5a\xe2\xfb\x67\xa7\x75\xd0\xfc\x7e\x30\x08\x5f\xdd\x1d\x70\x30\x3d\xaa\x9b\xea\xbb\xdd\x0d\x06\x8d\xd6\x92\x12\x31\xd3\x51\x7d\x65\x50\x6f\xf3\x35\x4b\x19\x3a\x34\x1b\x6d\xa1\x5a\x6c\x0a\x41\x74\x8f\xd5\x1a\x76\x0c\x04\x88\x7d\x66\xb4\xcb\x40\xe4\x3b\xfc\x5d\x9b\x6d\x91\x52\x21\xd0\x48\x29\xd5\x1c\xee\x53\x86\x56\xa7\x24\x43\x5f\x42\x4f\xa3\xd2\xc2\x1d\xb2\xf7\x16\x49\x52\x37\x0e\xfb\x07\xdb\x2d\x7b\x37\x04\x5c\xaf\x6a\x20\x2e\x68\x0d\x85\x12\x14\xce\x22\x4e\x3e\x61\x5b\xa6\xe1\xfa\x62\xc8\x40\x05\xa4\xae\x85\xe7\x21\x4e\x92\x84\x01\x5b\xd4\x16\x28\x1f\x02\xf8\x99\xe2\xac\x67\x61\x1b\x50\x17\x99\xec\x3d\x3e\x94\xa7\x3c\x96\x07\x65\x64\x9f\x9d\x12\x5a\xe3\xbf\x65\x53\x50\xf1\xe5\xf5\x29\x0e\xad\x81\xcf\xe7\x21\xe6\xc2\x5c\xa7\x42\x61\xa8\x62\x8b\x5e\x2b\x32\x6a\x95\x20\x0f\x9d\xf6\x10\x75\x83\xec\x4e\x99\xa6\xde\x2f\xa2\xfe\x2d\x5a\xac\x0f\x59\x4d\xcf\x7d\x6a\x0d\x49\x50\x84\xbe\x30\x44\xb2\xf0\xa9\x82\x29\x9f\x5c\x19\x35\xae\x5d\x38\x8f\x82\xb9\xb0\x67\xce\x3d\x64\xbb\x2d\x7d\x5a\x18\xab\xd0\x5c\xcb\x81\x1e\x0a\x4b\x49\x38\x8b\x94\xec\xe5\xb3\x51\xb2\xe8\x4e\x09\x64\x21\xe2\x62\xb6\x12\x41\x39\xc2\xd8\x3d\x08\x7c\x19\x0c\x42\x7f\xfb\xb5\x1c\xe7\xef\x42\x62\x1b\x52\xfa\x85\x82\x17\xd2\x9d\x1f\x5e\xe2\xa2\x1c\x0f\x25\x82\xad\x96\x2c\x2d\x58\x80\x49\xcf\xd0\x08\xfd\x87\xda\x91\xae\xb9\x6b\x79\xd1\x3e\x28\xcc\xe0\x3f\x73\x98\xf6\x60\x20\x28\x9f\xbe\xe4\x75\x2b\x9b\xc0\x89\xef\x3a\xd8\x3b\x0b\xf8\xcf\xc3\xe1\x1e\x6b\x13\xa5\x4a\x92\x25\xbd\x01\x49\xe3\x96\xbe\xff\x56\x58\xb6\xb6\x0c\x7d\x61\x69\x81\xd2\xb7\xd8\x28\x45\x42\xc1\x8a\x58\xba\x38\x4f\x57\x08\x80\xf2\x01\x8b\x4b\xca\xe8\x61\x89\x67\x4b\xf0\x34\x4b\x4b\xd4\xfb\xf6\x2f\x71\x5b\x97\x94\xf8\xcc\x4b\x43\x30\xba\x2f\x50\xfa\x69\x24\x81\xff\xda\x09\x2c\xf9\xab\x02\xd6\x0e\x0f\x72\x92\x95\x3e\x01\x86\xe8\xf9\xac\xb5\xf6\x71\x46\xd1\x2a\xfd\x84\x8e\x8a\x22\x7d\x0c\x85\xd1\x7a\x9d\x16\x88\xb0\xb0\x96\x65\x38\x59\x75\x48\x40\x84\x79\xc3\x17\x73\x61\x06\xaf\xbb\xfb\xf0\x78\x82\x0b\x24\xfa\x6a\xdc\x83\x29\xba\x9e\x24\x89\xa4\x60\x10\xab\x1f\x9c\x8e\xc1\xd2\x47\xaa\x11\x80\x45\xe2\xa3\xc9\x9c\x5e\x87\x78\x30\x18\x26\x49\x52\x6e\xb7\x64\x30\x28\x93\x24\x29\x80\xe1\x6c\x6a\x4f\xb9\x12\xa5\x09\x2c\x1a\x49\xe3\x5a\x58\xee\x85\x75\xc7\x87\xfb\x07\xf1\x01\x00\xff\x5f\xbb\x17\x25\x84\xed\x1f\x24\x49\x42\x0f\x1d\xca\xea\x19\xd1\x34\xb6\x21\xe8\x54\xac\x89\x92\x29\x38\x49\x6b\xdd\xab\x70\x09\xb8\x9b\x47\x81\x8e\x05\x79\x19\xf3\xe3\xcb\xa6\xef\x6b\x24\x3f\x1a\xc3\xa7\x02\xe5\x29\x43\xd9\x44\x20\x69\xcc\x60\xa6\xf7\x29\x26\x70\x5e\xd0\x55\x5c\x42\x46\x63\x5c\x01\xd7\x0d\xc3\xb6\x4d\xe9\xcb\x70\x00\x0b\x31\xbf\x12\xb1\x23\x41\x9b\x4f\x35\x21\xf6\xdd\x82\xd4\x9c\xb3\x49\xae\x6b\x4e\xd4\xae\x72\xa7\xe6\x9a\x1d\x37\xba\x74\x54\x0b\x0b\x9e\x36\xa2\xd9\x12\xe7\x59\x81\xc8\xad\x77\x89\xa7\x23\x21\xfb\x12\x10\xa5\x59\xd6\x68\x58\x1e\x21\xc1\x56\x7d\x7b\x06\x0b\x48\x61\x2a\x3d\x57\xf2\x97\x8a\x24\x70\xee\xdb\xd2\x1c\xc0\xbb\x84\x6c\xb7\xb9\xb6\x2f\xb6\x0f\x52\xc8\x38\x23\x5a\xf8\xbe\xbe\x03\x70\x95\x68\x67\x83\x86\x44\xc8\x0f\x49\x7d\xdc\x0e\x43\x9c\x6c\x22\xae\xe8\xc2\x22\xd9\xc8\x13\x48\x93\xa5\x28\x01\xb1\xa8\x14\x1a\xb2\xa8\x15\x47\x92\xd7\x8a\x22\x00\xef\xf8\x42\xdd\x59\xe6\xad\x4d\x6d\x3d\x6e\x69\x5d\x42\xbe\xe9\x71\x4a\xe9\xdc\xba\xd6\xa7\x20\xbc\x83\xd4\x7f\xef\x3a\x18\xe4\x83\xc1\x5d\x4b\x45\x48\xfa\x43\xb8\x52\xeb\x63\x58\x84\x14\x7c\xba\x30\x30\xbc\x93\xc8\xb1\x6e\x1c\x84\xe6\x39\xb8\xb3\xce\x01\x95\xe7\x60\xce\xcf\xc1\xa2\x02\xa3\xdd\xd7\x90\x1d\xf6\xdd\x8d\x3c\x6f\xe0\x30\x14\x2b\x66\x50\xab\xe0\x32\x86\x74\x4f\xe1\xbb\x86\xc2\xdc\xaa\xe4\xb2\x97\x03\x8d\x15\xc4\xee\x3b\x4d\x7e\x7e\xee\x1a\x16\xf9\xbd\xa0\x17\xec\x15\x3e\x9c\x96\x2d\xfa\xce\x91\xfc\x46\xfc\x8b\x85\x51\xd2\x51\xa2\xfd\xfe\x55\x86\x48\xa4\x1e\x0a\xb1\x06\x15\x1c\x76\x5e\xa9\xfe\xcb\x70\x08\x40\x1c\x76\x0e\xa7\xb9\x18\x75\xb9\x47\xc5\xef\xa4\x52\x6b\x00\x0c\xd2\x28\x29\xba\xaa\x20\xdd\x61\xf7\x66\xaf\xb6\x7b\x73\xc1\xb1\x21\x4b\x3a\xf5\x00\x8c\x02\x2a\x7c\x3e\x82\xc4\xf8\xc9\x0d\x06\xe6\x8b\x12\x32\x89\xa6\x45\x12\x48\x0b\xa0\x05\x77\xc8\xe2\x32\x12\x24\x88\x9f\xe4\xda\xaa\x4e\xa5\xcb\x5f\xd3\xae\x4e\xa4\x35\x5d\x5a\x23\xad\x66\x00\x96\x8a\x87\x39\x96\xad\x9e\x0a\x79\x93\x54\xfb\xf4\x99\x1a\x7c\x5b\x4c\x5b\x3e\x3e\x6f\xce\x69\x6f\x85\xd8\x92\x66\x3d\x92\xae\x50\xa6\x5d\x75\xde\x80\x11\x87\x0f\x41\x25\x7a\x2a\x2d\x7d\x33\xc4\xe6\xdc\x62\xbd\x19\xc2\xc8\x4f\xa3\x3b\x3e\xf8\xa3\x35\x3e\xce\xf1\xec\xd3\x0f\xd2\xb3\xb1\xa5\x0f\xe2\x1d\x3e\x10\x7c\x05\xc4\x02\x69\xe7\x26\x14\x62\x73\xa3\xc6\x4f\x6a\xe1\x9c\x4e\x6d\x38\x54\xbe\x50\xa9\xb3\x7d\x02\x58\xee\x5c\x73\x23\x61\x2e\xe9\xaf\xc7\xdd\x4d\x1b\xc5\x02\x30\xe2\x73\x4d\xcd\xc4\x85\x1a\xdf\xc6\x38\x6d\xe3\x2f\x00\x4c\x01\xcc\xd5\x18\x15\x42\xf1\xcd\xca\xa5\x3b\x90\x2b\xf6\x55\xe2\xbe\x9c\xfe\x86\x3b\x11\x28\x3f\x52\x2d\x76\x7c\xd4\x4b\xc5\xe5\x09\xed\xbe\x3c\x99\x35\x2f\x4f\xb2\xa8\x36\x1b\x72\xb6\xe9\xd9\x51\xbe\x9c\xd2\x0d\x55\x35\xe1\x58\x86\x5d\x8a\xa6\x9a\xbb\xe2\x54\xd4\x7b\x08\xb5\x11\x00\x8c\xe6\x1d\x6b\xcb\x20\xd3\xfb\x56\xd9\xb7\x14\x6d\x78\xef\x2d\xc5\xfc\x75\xb7\x14\x14\xb6\xdb\xad\xe0\xbc\x0a\x01\x9c\xf9\x6e\x29\x68\x9e\xa7\xeb\x12\x99\x5b\x0a\xf3\x1b\x9b\xdb\x09\x9a\x3c\xc9\xcb\x98\xb8\x3f\x84\x52\x18\x8f\x03\xe9\x16\xaa\xca\xeb\xbb\x05\x5d\xad\xef\x10\x34\x25\x0c\xc4\x45\x45\xed\x8f\x15\x39\x7d\xf1\xf2\x73\x59\x41\xdc\x9a\x1f\x84\xf5\x77\x89\x33\xd4\x2a\x3f\x19\x9f\xc7\xca\x18\xe8\xd6\x75\x9b\xef\x15\x88\x6d\xbe\x5f\xda\xa3\x0a\xe0\xf1\xc5\xd9\xd9\xd1\xa5\xf0\x55\xab\xdb\x93\x65\xa7\xe7\xdf\x9b\x52\x4e\xb2\x0c\xec\x49\x0d\x9c\xc9\x6b\x8e\x9f\x4f\x4f\x26\x3f\xc4\xc1\x03\xce\xd8\x32\x80\x3f\x8c\xa5\xb5\x7f\x89\xd4\x45\xc8\x46\x5b\xdf\xaf\xe3\x20\xe2\xfd\xc2\x5e\x64\x37\xbc\xc3\xb5\xc1\x8c\x6a\xfa\xa6\x82\x99\xd7\xa2\x8d\x1d\x8b\xb6\x25\x2e\xd8\x76\xe8\xda\xa9\xe0\x85\x06\x68\xc5\xc9\x84\x5a\xd6\x50\xd1\x3a\x87\x78\xbb\x2c\xd0\x3c\x09\xfe\xf4\x66\x8f\x45\x38\xdb\x7b\x13\x4c\x61\x27\xa8\xe5\xab\xeb\x7c\xf1\x06\xd4\x3e\xdf\x9c\xe1\x6d\x1c\x67\x05\x58\x24\xc3\x51\xf1\x0e\x1b\x35\x49\xc7\x0e\xd0\x84\x93\x7f\x98\x76\xd3\x6a\x0a\x46\x9c\x7a\xf5\x93\x44\x3b\x13\xcc\x71\xce\x50\xc1\x35\x4e\xed\xe0\xab\xc5\x60\x7b\xf2\xd1\x7a\x53\x2e\x43\x0a\x2a\x59\x25\x11\xbe\x69\x09\xe1\x65\x87\x66\x2d\x2f\x95\x02\x1b\xb7\x2c\x55\x0a\x54\x9b\x9d\xd3\x2c\x3b\x2a\x70\x7a\x44\xb2\x63\x8d\x50\x92\x55\xb8\xf6\xaf\xf6\xa0\x1a\x36\x4d\xb9\xbe\x6a\xf4\xf2\x87\x32\xed\x67\x09\x6e\x9b\xf6\x33\x8f\xf7\x48\xb7\x50\xb9\x94\x8e\x71\x72\x7a\xfc\x64\x86\xca\x27\x8d\x63\x72\x08\x2a\x98\x89\xbf\x3a\x8c\x52\xb5\x28\xde\x40\xcd\xc1\x60\x87\xa3\x82\xea\x53\x71\x55\x48\x47\x46\x8f\x93\x0b\x38\x18\x84\x61\xd1\xb4\x1c\x58\xf5\xc0\xa8\x5f\xa1\xf5\xa7\x96\xe5\xae\x01\xd0\x7b\xbe\xdd\x86\x85\x34\x57\x03\xd8\xe7\x2c\x3b\xa4\x89\xcd\x11\xc1\x60\x40\x5b\x43\xb7\xd8\xb7\x14\xef\x73\x39\xdc\x91\x8c\xa3\xf0\x8b\x84\x29\x80\xfd\xd4\xab\x78\x80\xa7\x42\x48\x29\x3b\x58\xb5\xa0\x88\x01\x80\x74\xbb\xb5\x46\x07\xe5\xc0\x85\x10\x37\xab\x8f\xf2\x09\x5e\x21\x52\x8a\x9d\x18\xb5\x86\x63\xcb\xbc\xcb\x48\x93\x34\x4b\xe0\x35\x85\xa7\xe7\xdf\x37\x6e\xa3\x54\x90\xc5\x6c\x9a\x0c\x3d\x38\xa9\x56\xb4\xd6\x4f\x1c\x84\xed\xe8\xf8\x04\x44\x29\x63\x85\xf2\x61\x41\x5f\xd6\x29\xc9\x84\x13\xcb\x50\xf5\xcd\x15\x00\x7b\xed\xc3\xfe\x50\x4e\x38\x6b\x60\xef\x73\x73\xe4\xd3\xf1\xcc\xd2\x9d\xba\x74\x01\x65\x9e\x19\x07\x01\x64\x9e\xc1\x1c\x08\x59\xcd\xb3\xdd\x12\x25\xce\x41\xc5\x91\xa2\x53\x99\x93\x88\x34\x4f\x82\x72\x56\xd0\x3c\x0f\xf6\xc2\xd9\xed\x70\xda\xf0\x63\x9f\x71\x41\x7c\x86\xc2\x03\xe0\xd9\x4f\xaf\x92\x96\xed\x50\x7e\xba\xb6\xd4\x29\xbe\x9d\x4f\xf7\x82\xf5\x97\x40\x4a\xd3\x99\x10\x00\xf9\x41\x5f\x3a\x36\x89\x97\x1e\xf4\x17\x9e\x73\x62\x9d\x27\x2e\x07\xec\x3e\x4f\x04\xc0\x3e\xe9\x38\x4f\x0d\xdb\x96\x7d\x20\x0c\x21\x71\x17\x00\x37\x16\x80\x73\x91\x0f\x74\x43\xb8\xa6\x77\x9c\x63\x44\xd8\x15\x9a\xb1\x10\xdc\x62\xb9\x30\xb5\x3e\xdd\xb8\xa6\x6b\x8d\xd7\x7f\xb0\x3a\xcf\xa1\x5b\xae\xf0\xb1\xeb\xac\x01\xcd\x29\x25\x5b\xec\x84\xb3\xf9\x64\x1b\xe8\x35\x8c\x33\x14\x9c\xb3\xb9\x7f\x9c\x2a\x51\xef\xe9\xea\x3c\xdf\x07\x40\x71\xd4\xce\x03\xee\x88\xaf\x2f\x3a\x7b\xaf\x3c\xf7\xf5\x51\x95\xd2\x25\xa8\x46\x1d\xa8\x11\x04\xdd\x6e\xe1\x87\x2f\x3b\x92\xb3\x1d\x47\x32\x9e\x49\x0d\x2b\x6b\x4d\xd2\xbd\xaa\xf5\x4a\x78\x8c\x7f\xf7\x3a\x07\x3d\x02\x3a\xaf\xcf\x94\x78\xb3\xfb\x42\xcd\x91\x0d\x9d\x0b\x34\x77\x68\xf2\x42\x2c\x7b\xd5\x85\x18\xe5\xa3\x33\xee\xad\xda\xba\xa8\x0b\xba\xee\xca\x52\x71\x57\x96\xb9\xa7\xdd\xa7\x35\x75\x52\xa2\x59\x24\x84\x78\x70\xa8\xfe\x88\x67\x91\x94\xe3\x75\xb3\x52\xa8\xf3\x13\x3f\x48\xc4\x5c\x47\xa5\x15\xc9\xe4\x11\xfb\xc0\x61\x48\x7c\x92\x23\xf4\x86\x34\xb6\xe1\xa2\x5f\xff\x8b\x0b\x08\x83\x81\xbf\x99\xdb\xe1\x14\x80\x98\x18\x93\xb0\xdb\xb7\xbe\x97\x2e\x92\x6e\xf9\x5d\x94\x2b\x14\x08\xde\xec\x79\x9a\x11\x22\x7a\xad\x9b\x13\x65\x6f\x2e\x9a\xca\xb2\x88\x5a\x62\x3b\x24\x5c\x2c\x16\x55\x1a\x42\x6d\x6a\x43\x00\xbc\x25\x53\xae\x37\xcb\x0d\xed\x6a\xa0\x61\x19\x97\x2e\xc0\xc6\x4a\xd6\x26\x51\x23\x62\xc9\x27\x0d\x77\x43\x8b\x5c\xc1\x3e\xee\xa0\x58\x18\x54\x15\xec\x18\xf5\xeb\xe2\x66\x8c\x13\xf7\xa1\x0a\x97\x91\x51\x85\xb0\x2d\x01\xbe\xca\x3c\x58\x9a\x38\x8c\x22\x29\x8d\x89\x30\x6d\x9c\xae\x52\x9b\x98\x7c\xb6\x41\xe5\x8c\x50\x0c\x06\xa9\xd1\x29\xde\x72\xe1\x7e\xcb\x19\xbf\x76\xc0\x04\xc2\xce\xa4\xd0\x87\x53\xe1\x42\x4a\xd1\xe8\x81\xeb\xa2\x22\x22\x18\xe8\x7e\x08\x2c\x00\x80\x6d\xfb\x62\xa7\xd5\xaf\xb8\x65\x2f\xb3\xfa\x31\x69\xf5\xe3\xf0\x9c\x7a\x0a\xa7\x5d\xfc\x87\x1b\xa8\xa8\x30\x50\xe1\x6e\x03\x55\xde\x34\x50\x39\x0a\xac\xe3\x1d\x1b\x1c\x89\x3b\xf5\x68\xb6\x29\xf8\xd1\x9a\x38\x37\xb8\x83\x41\x87\xd7\x2f\x76\xb6\x79\xa7\x61\x12\xb5\xcf\x65\xc3\x88\x05\xcb\x84\x69\x54\x39\xd4\x3e\xbf\xb1\x0a\xd0\x01\xa3\xac\xd3\xc4\x55\xba\xb6\xad\x36\xa0\xd7\xb6\x95\xbd\xce\xb6\x85\x61\xbb\xdd\x0a\x66\x55\x08\xe0\xc6\xfe\xac\x03\x95\x48\x0b\x8f\x82\x3a\xb7\x42\x56\xd0\x75\x46\x1f\x48\xaf\x40\xff\xb5\xc1\x05\xea\x5d\x52\x2e\x72\x47\xbf\x5a\xe9\x16\xd6\xba\x28\xa2\xc5\x02\x04\x40\x85\xad\x07\xfa\x5b\xe1\xda\x7b\x5f\x46\xf5\x6f\x2a\x5c\x7d\xb1\x38\x75\xd2\x98\x96\x27\x56\xf0\x6a\xf0\xed\xbf\x6e\xff\x3c\xdc\x7e\xf3\x97\x00\xc0\x65\xf2\x54\x1b\xbb\x82\x3d\xda\x30\x71\xf1\x12\xcb\x50\xa5\x7e\x69\xa3\x19\xff\x2d\xf0\x4c\x59\xbb\xcc\x6f\x9f\x19\x4c\x0f\xcf\xf2\x78\x55\xde\x1f\x16\xb4\x72\x38\xe8\x86\xbf\xb9\x74\xa1\x37\x6b\x3f\xac\x30\x88\x9d\x9c\x5e\x1f\x7d\x38\x1b\x9f\xc4\x75\x88\x84\x3d\x1b\x78\x72\x75\x71\x79\x73\x19\x8b\x95\xdc\xac\x03\xf8\x71\x7c\x7e\xa3\x7c\x64\x75\x8b\xfb\x2b\x44\x36\xda\x47\x96\xd7\x4b\x3f\x59\xb7\x5a\xf8\xc9\x0a\x03\xdb\x0e\x1b\x9a\xd9\x9e\xe9\x1b\xf8\xdd\xc5\xd5\xc7\xbb\xe3\x1f\x4e\xcf\x4e\xe2\xc0\x0c\xbe\x37\xa7\xc5\x4a\x76\x62\x95\x8a\x1e\x02\x78\x7e\xf4\xd3\x87\xa3\xab\xbb\xf3\xa3\x9f\xe2\x20\x22\xe9\xe7\xfb\xb4\xd8\x27\xe9\xe7\x00\xfe\x74\x7a\x7d\xfa\xe1\x4c\x5e\xb1\x5e\x37\xbf\xeb\xd5\x3f\x31\x43\xab\x98\x50\x16\x46\x7a\x2d\x80\xf4\xc8\x9d\x5c\x5c\xc6\x01\xa3\xeb\x7d\xe9\xc8\x02\x27\x17\x97\xca\x89\x79\xbd\x2f\x62\xf4\x3f\x5c\x4c\x26\x17\x1f\xe3\xe0\x9e\x32\x46\x57\x1a\x4c\x96\xca\xa8\x7c\x59\x21\xa3\xf6\xe7\xc9\x93\x8c\x32\x8f\x87\x70\x9e\x63\xe9\x2d\x7e\x67\x0a\x8d\xaf\xb8\xb2\xd4\xea\xf3\x03\x02\x09\x6d\xb9\x7b\x2f\xbc\x76\xc6\x54\x85\x14\x7b\xcd\x89\xf2\xa8\xbc\xc0\xc3\x15\x69\xd9\x93\xaf\x52\x5d\xfb\x11\x91\x8d\x26\x5e\xb5\x3b\xe1\xb9\x58\x6e\x05\x95\x21\x86\x66\x4c\x16\x19\x98\x2e\x97\xd6\x45\x92\xb6\xed\x5e\x0b\x8f\xdd\xab\x56\x5b\xb5\xd4\xaf\x77\x69\xa7\x81\x6a\x16\x69\x14\x07\x9a\xa4\xa6\x96\x98\xd8\xa4\xc5\x96\x82\x88\x8d\x7c\xc6\x57\xc0\x69\xd2\x18\x91\xd2\xe8\x4e\x78\x1d\xf1\x55\x29\x43\xc0\x85\x12\xc5\xe2\x9b\x9e\x14\x76\xdb\x95\xe5\x79\x21\xa5\x1e\x58\x6a\x1d\xda\x71\xa0\xe8\x17\x3b\xf4\x66\xea\x2a\xc2\x23\x57\x9a\x9a\x45\xf2\xe8\x02\xcb\xf1\xab\x35\x0f\x7d\x5a\x85\x5e\xb8\x03\x46\x5e\xf0\x03\x61\x74\xd3\x1e\xae\x06\x93\xd0\x43\x8f\x84\xd4\x42\x16\x58\xdb\x57\x05\x88\x42\x28\xf0\x2a\x87\xb0\xbe\x13\x92\xbc\x89\xea\xd3\x0d\x2c\xf9\x30\xb8\xa7\xd9\x63\xe0\x18\x11\x29\x09\x65\xd4\x81\x0c\xc7\x10\x88\x8e\x22\x42\xe9\xba\x69\x51\xd1\xa1\x5b\x0d\x5d\xb2\x1d\xb1\xe5\x5a\xbb\xdc\x95\xb2\xc5\x53\x85\x18\x50\x6e\x63\xbb\xc2\x6c\xad\xbb\xf9\xe7\xb0\x04\xd2\x6e\xb3\x78\xa5\x7e\x88\x3d\x66\x0c\x3a\x9f\x87\xd4\x17\xce\x65\x1f\x67\xf1\x5b\x19\x0b\xec\xdd\xd4\x76\x75\xc5\x50\x55\xd4\x72\xd8\xdc\x72\x21\x05\x2f\xa2\xcd\x3a\x4b\x19\xf2\x78\x6d\xef\xa4\x07\xcf\x77\x5b\xce\x96\x28\xdb\xe4\xe8\x46\xb4\x1f\x02\xde\xd7\x8b\x9d\x2b\x7d\x9e\x84\x4b\x29\xeb\x39\x0e\x8b\x9e\xc0\x63\x14\x95\x8c\xae\x2f\x0b\xba\x4e\x17\xa9\x6c\x59\xf8\x0d\x2a\x33\xbd\x1c\xc7\x2b\x54\x64\xe9\x55\x50\x8b\x55\x91\xea\xaa\xbd\x69\x4a\xd4\xef\x74\x31\xed\x6a\x69\x22\xb2\x42\x10\x3d\x30\x8b\x38\xfb\x49\xa7\xc0\xd9\x57\x91\xc1\x91\x85\x35\x02\xaf\x85\x1a\xb9\x11\x64\x81\xab\x44\x95\xe3\x28\xc8\xc1\xf4\x60\x2e\xf3\x74\xd6\x1a\x8a\x2d\xd4\x5a\xf3\xd7\x9e\x84\x90\x24\x59\x24\xf9\xe6\xc8\xca\x69\xd4\xa4\x69\x5c\x3d\xcf\xa2\xc9\xc5\x25\x7c\x01\xd9\x12\x5a\xb8\x80\x1e\x9f\x9f\x00\x10\xbf\xfc\x13\xc3\xc0\xf5\x12\xdb\x98\xfc\x12\xa3\x45\x9d\x51\x41\xca\x23\x41\x7d\x83\x65\x56\xc9\x22\x91\x9d\x36\x8b\xa7\x6a\x14\xe8\x3a\x4b\x37\xb3\xb5\x7e\x29\x3c\x1c\x92\x68\x4e\xbc\x88\xa9\x00\x5c\x47\x14\x53\x0a\x59\xa3\xa1\xd0\x54\x81\xed\xf6\x49\x28\xf9\xb1\x2e\x4a\x3c\x1d\x2b\x8d\xe7\x69\xad\x37\x3d\xae\x99\x80\x2e\x0a\x01\x5c\xd1\x0c\xcf\x31\x2a\xca\x58\x4b\x3b\x44\x0a\x35\x4f\x88\x08\x5e\x1e\x3b\x6d\xf3\x2a\x2b\xb1\x8b\x4b\x5d\xc4\x55\x8c\x69\x50\xe6\xb9\xba\x66\x8f\x39\x4a\x4c\x63\x7d\xf7\x8b\x4a\xa4\xb2\x68\x07\x0b\xbe\x4a\x7b\x27\xae\x73\x0f\xe6\xaa\x59\x5b\x45\x3f\x64\xc2\x56\xc0\x99\xba\xcc\x6a\x24\x72\x03\xf8\x7d\x72\x30\x24\xaf\x53\xbd\xc9\x2b\x55\x6f\x52\xab\xde\x8e\xc0\xd2\x72\x5a\x66\xdb\xed\xb7\x9c\x34\x4b\x5f\xe4\xc1\x20\x94\xfa\x43\x20\xca\x78\xe7\xdb\xed\x5f\x13\x53\x0f\x40\x9d\x28\xce\xbd\xe1\x73\xef\x82\xf9\x0a\x0d\x47\xe5\x3b\x6d\xd8\x19\x95\xda\xc8\x5d\x74\x12\x21\x72\x5b\x4e\x01\xa4\x09\x92\x7f\x99\xb5\xce\x9b\x82\x15\xaf\xae\x54\xa4\xbb\xbc\x6d\xa2\xf2\x68\x4b\x79\xaa\x68\xcb\x6d\x83\x41\x3f\x64\x7c\x6e\x52\x21\x4b\xf4\xdc\x06\x83\x97\xfa\x6c\x6f\xb7\x6a\x5d\xac\x6f\xed\x75\x19\x0c\x50\x1d\x7b\x5e\x40\x13\x54\x6b\x6e\x91\x6a\xfe\xcf\x35\x4b\x98\x2b\x0b\x80\x16\x0f\xe6\x00\xce\x3b\x32\xce\x84\xaf\xf2\xa9\xf7\x89\x47\xf3\x79\xb7\x7c\xc4\x17\x73\xb7\x00\x14\xcc\xd3\xbc\x44\x01\xc7\xe0\xcc\xb5\xe3\xd7\xc2\x4f\xe1\xad\xf0\x08\x3f\x52\x91\x86\x39\x50\xe2\x4f\x07\x32\xb4\x8d\x76\x70\x97\xab\x97\x31\xf8\x60\x41\xc6\x95\xa3\x17\x80\x64\xbb\x65\x8a\xd1\x9c\xd3\x0c\x89\xee\x94\x13\xd2\x8f\x52\xa7\xf6\x39\x96\x89\x98\x9f\x7e\xae\x31\x41\x6e\xf0\x76\xfb\x56\x06\xa7\x77\xa2\xc8\x60\xf0\xed\x37\x35\x46\x6c\xb7\x2f\x47\xad\xd0\xe7\xcb\xef\x13\x4c\x38\x81\xd2\x2a\x90\x96\xdf\x3b\x14\x1f\x73\x6f\xb7\x8b\xe7\x4b\x8d\x87\xf8\x15\x9d\x10\x6f\xb7\xdf\xfc\x25\xb1\x68\x83\x3d\x3f\xce\x30\xfb\x02\xa2\xef\x40\xf4\x2d\x5a\x61\x4c\x9f\xc4\xc8\x10\x8e\x62\x0e\xf8\x86\xca\x7b\x3f\xed\x30\xaf\x89\x84\x15\x46\xa0\x8f\xd2\xe8\xdb\x7f\xb5\x07\x53\xbc\x1f\x0e\x06\xc5\xfe\x3e\xfc\xf3\xd0\x29\x7e\x57\xfb\xde\x0f\x06\xc5\xde\x1e\x2c\xde\x0d\x07\x83\xb0\x48\x86\x00\x96\xb7\xc5\x54\xeb\x00\x55\xa5\xc3\xad\xec\x49\xd6\x99\x36\xea\x31\x37\x33\x72\xb8\x89\x21\x64\xce\x14\x50\xe9\xdd\xa8\x33\x46\x08\x5a\xa3\xdc\xf3\xd2\x3f\xdc\xfa\x39\x6f\x02\x72\x41\xb1\x0b\xf8\x4e\x98\x4a\xd3\x6e\x53\xe9\x32\x6a\x9a\xa1\x1a\xc6\xd2\x45\xc7\x49\xea\xfe\x9a\xcb\x5a\xcf\x7d\xe6\xda\xc9\x84\xe3\xb1\x68\xcb\x32\x70\xf1\x26\x6a\x7e\xe6\xfd\x6e\x87\x5d\xf7\x85\x27\x6d\xd1\xe9\xf0\x21\x8f\x4d\x23\x05\x83\x67\x00\xb5\x31\xab\xd1\x7f\xab\x33\xcb\x54\xdb\xee\xd6\x6b\xaa\x5d\xbc\xce\x54\x9b\x7a\xa6\x53\xc1\x45\x15\x02\xd7\x6b\x4d\x59\x50\x57\x34\x4b\x65\xa6\x84\xe8\xbe\x8c\xd4\x2f\xc7\x03\xf1\x3e\x9d\x7d\xca\x0a\xba\x8e\xfb\x43\x27\x3d\x82\x38\x0a\x22\x4f\xc2\x92\x3e\x08\x13\x57\x6a\x01\x7b\x92\x1c\x78\x12\x22\xc8\x36\xac\x04\x09\xbc\x29\xcb\xfe\x95\xdb\xa6\x59\x6b\x80\x6d\x27\x44\x55\xe1\xfa\x39\x5a\x85\xb6\x93\xa3\x2a\x16\x99\x2d\x4e\xcf\x55\xea\x20\x6c\x57\x5d\x8d\xaf\x4f\xff\x63\x1c\x07\x05\x2a\xf1\x7f\xdb\x1d\xab\x9d\x57\x29\xa4\x94\x65\x57\xa5\x90\xb2\xc0\xcc\x81\xd0\x80\xda\xa8\xdb\x06\x15\xe1\x76\x1c\xe3\x35\xa8\xe0\xdd\x9b\x75\x17\xa8\xdb\xae\x00\xee\x68\xb9\xd3\x0c\x2d\xea\x9b\xae\x98\xc7\x57\x17\x67\x67\x1f\x8e\xae\xee\x3e\x8e\x8f\xae\x6f\xae\x64\xd6\x87\x2c\xcd\xf7\xa5\x43\xcc\x7d\x5a\xec\xaf\x50\x5a\x6e\x0a\x14\xc0\x0f\x47\xc7\x3f\x72\x4d\x4d\x83\xe8\x5d\x0f\x20\xd7\xc1\x74\x29\x5d\x23\xb2\x23\x2b\xa0\x30\x4e\x1f\x9d\x5d\x7c\x1f\x07\x72\x44\xfb\x19\x4e\x73\xba\xdb\x0b\x53\x4e\x4d\x27\xa1\xea\x4a\xe5\x65\xa0\xbe\x3b\xfd\xf7\xf1\xc9\xdd\xf1\xc5\xf9\x64\x7c\x3e\x89\x83\x68\x8e\xbf\xa0\x6c\x9f\xd1\x35\xec\xa9\xbf\xa5\xe1\x16\xf6\x22\x5c\xee\x8b\x12\xd8\x8b\x4a\x86\x67\x9f\x1e\x39\x58\x00\xaf\x27\xa7\xc7\x3f\xfe\x62\x35\x61\x57\x2a\x9b\x95\x1c\xea\x55\x6d\x95\x96\xa3\x2d\x84\x3d\xfc\x59\x47\xd1\x97\x26\x20\xd0\x06\x5e\xb9\x4a\x96\xae\xce\x05\x01\xbe\x8e\x56\x7e\x01\xbd\x21\x8d\xa0\x5a\x7e\x00\xec\x1c\x09\x1f\x68\xf6\x78\xf1\x19\x15\xf3\x9c\x3e\x38\x3e\xaa\x78\x41\x68\x81\x3e\xa8\x56\x84\x0b\x77\x5d\x49\x0b\xbc\xc0\x24\xcd\xf9\xd7\x97\x69\x26\x42\x30\xb4\x1b\x9a\x41\x96\x9f\x71\xc6\x96\xc9\x50\x98\x80\x37\x3e\xd7\xc7\x4d\xcb\x04\xdc\x0a\x4a\x94\x03\xee\x70\x77\x64\xa0\x82\x9b\x86\xbf\xa3\x1b\x37\xbe\xcb\xe1\xd1\xe9\x01\x3c\x7d\x55\x8c\xcf\x52\xa6\x1d\xac\xc3\xe6\x9b\x1e\xbf\x43\x73\x4f\xe8\x78\x27\xb6\x82\xf0\x2a\x8f\x37\x99\x49\x13\x08\xdc\xed\xdb\x6e\x71\x97\xe6\xd0\xd8\x66\xbd\x25\x22\x0d\xd6\xb5\xde\x17\x3b\x66\xaa\x5d\x98\x66\xbf\x6e\x4a\x76\x22\x30\x4c\x04\x3f\x18\xe5\x83\x2b\x1a\x8e\xf7\x0e\x3f\xe9\x56\x5b\xe3\x72\x96\xae\x55\x60\x97\x55\x7c\x25\x08\xa8\x2e\xf6\x99\xee\xcc\x35\xad\x4a\xba\x37\x8b\xec\xa3\x0d\x3d\xd8\x41\x24\x32\x30\xc1\x4b\x43\xfb\x50\xa8\x06\x5b\x74\xd2\x8d\x2c\x20\x0d\x0f\x21\xf5\x41\x4d\x83\x61\x23\x7b\xa3\x9d\x8e\xdb\xfa\x98\x6b\x1f\x1d\x47\x65\x28\x2f\x65\xd5\x32\x2c\xe9\x83\xae\xf7\xc4\x4e\x11\x09\x61\xe5\x93\xe4\xda\x12\xdc\x34\x5c\xfc\x5a\xb8\xcd\x7c\x17\xd3\xb0\x0b\xe3\x1b\x08\xdf\xc4\xcb\xe7\xbd\xfc\x9a\x88\x38\x18\xf4\xfd\x98\x68\xc5\xed\x29\x7a\x33\xd2\x52\xfe\xd7\x1f\xb2\x51\xb1\xeb\x98\xbd\x1a\x0d\x2d\xe9\x77\x3e\x0f\xf3\x48\x89\x02\x1e\x0c\xf5\x79\x01\x7a\x4d\xf2\x0d\x3c\xf6\xa0\xa6\x00\x6a\xe1\x26\x80\xc5\x0b\x1d\xd7\xbc\x47\xe1\x8e\x63\xc9\x47\xce\xef\x04\xe6\xf8\x5d\xdb\xbe\x1d\x9a\xec\xa5\x35\xb8\xd0\x55\x36\x5f\x91\x51\x4e\x72\xd7\x3a\x96\x07\xea\xd5\x74\x19\x56\x83\x15\xc9\x05\xd8\xe1\xf0\xe6\xb9\xca\x50\xac\xce\x2a\xd9\xc5\xd8\x9c\xa2\x26\x6b\xb3\x2b\x3d\x27\xd6\xaa\x6e\xb0\x30\x79\x17\xb2\x51\xe9\xf6\x6f\x3a\x6e\x44\x5c\xc2\xc9\xe1\x5f\xed\x6a\xb7\xcb\x9f\x6e\xe3\xd0\x88\x0e\xa2\x20\x8c\x26\xbf\xe1\x88\xb9\xd7\x64\xb5\x21\xa5\x99\xf2\xb0\xae\x31\xe9\xca\x93\x24\x11\xbf\xc7\x67\xe3\x8f\xe3\xf3\xc9\xdd\xf9\xc5\xc9\x78\xbb\x75\x98\x47\x94\xae\xd7\x88\x64\xc7\x4b\x9c\xfb\x33\xea\xb8\xce\x9e\x02\x2f\xf3\xf4\x31\x09\xee\x73\x3a\xfb\x14\x34\x60\x24\x82\x36\xcd\x58\xca\x61\xa3\xd5\xa0\xd8\xd1\x09\x5d\x27\x43\x88\x07\x83\xaf\x71\x18\xb6\xdd\x7f\xb5\xed\x9a\x6b\x0d\x66\x6d\xc8\x9c\x16\x33\xf4\x9d\xb4\x36\x28\x82\xe7\xb2\xfd\x73\x0f\xdf\xb7\xd3\x84\x83\x27\xd2\x6c\x9b\xb4\xef\x32\x89\x37\xb2\xc8\x65\x6a\xf5\x05\x73\x35\xc2\x87\x6d\x16\xe9\xa1\x2d\x74\x17\xe5\xa0\x0a\xa3\xed\x49\x76\x5e\xce\x75\x91\x56\xc9\x9a\xd5\x2f\x68\x1f\x09\xfd\x45\x92\x24\x44\xb1\xda\xed\xb6\xf6\x26\x76\x8a\x5d\x37\xe3\x65\xca\x19\xb2\xe6\xce\x3a\x8e\x84\xb5\x56\xad\x92\xe3\x77\xf9\xc3\xce\xdc\x2d\x86\xc9\x39\x5b\xae\x35\x58\x1f\xc1\x0e\xf3\xa8\xa1\xf5\x39\xb7\x91\xc2\xec\x84\x8c\xdd\xdd\x9b\x15\x59\xca\xb8\xa0\xd2\xb4\xda\x88\x7c\x1d\x0c\xa7\xd1\x9f\x99\xa4\xc5\xed\x5e\x32\xc9\x43\x27\x28\x33\x8f\xa4\xf2\xeb\x4f\xfe\x62\x53\x41\x91\x03\x26\xb6\x3e\x16\x83\x92\x5f\xcb\xb1\x18\x66\xb3\x73\x18\x1d\xe7\x9e\x50\x82\x9a\xc7\xde\x63\xba\x56\x67\xbe\x8e\x53\xe9\x8e\xbc\xeb\x12\xc4\x5a\x62\xae\xcb\xf4\x95\xa4\x1b\xdd\x15\x88\xf7\x2f\x28\x3d\x07\x16\xae\x05\xaa\xd4\x16\xa5\xbb\xc2\x50\xb4\x6f\xbb\x5c\x1b\xd9\x87\x1e\x4e\x9b\xa3\x68\x4e\x67\xb9\x77\xd4\xac\x54\xa7\x0c\xf7\x71\x45\x60\xd8\x45\xbb\xf1\x26\xbf\x78\x96\x29\x1c\xca\xff\xe3\x20\x18\xd9\xa9\xaf\x3c\x67\xc3\x0c\xee\xe9\x59\x69\x0f\xd7\x6d\x99\x91\x77\xbd\x40\x93\xe1\xcf\x41\x73\x9a\x32\x03\xf0\x79\xba\x42\xc9\x32\xd2\xb6\x08\x4e\xd9\xdb\x0b\xe5\xa6\x52\x68\xd5\x0a\x96\x34\xa1\x8d\xfd\x7f\x91\x9e\x62\xaf\xa9\x5f\x0d\x38\xec\xd2\x0e\x0e\x62\xad\x52\xb4\xbd\x4e\x07\x83\x30\x28\x59\xca\xf0\x4c\xa4\xc2\x6d\x2d\xef\x61\x9b\x27\xc4\xa4\xa6\x1c\xb0\x68\xf1\x37\x33\x5d\xdf\x02\x34\x39\x5c\x5f\x67\x22\x93\xee\xc7\x3a\x6b\x8e\xcc\x44\x66\x45\xb1\x59\x52\x9d\x8f\x9d\xb0\x1d\x69\xf4\xab\x46\x62\x90\x06\x3a\xd5\x78\xd4\x85\xf6\xae\x37\xb9\xb4\xe0\xbb\x5c\xd4\x3d\x5b\xa1\xc8\xb1\xcc\x42\x50\x7d\x5d\x2e\x8f\x65\xfd\xbc\xc0\x4b\xe6\xde\xc5\x4a\xc5\x13\x02\x54\xe7\x45\x50\x23\xe2\x47\xd5\x96\x1d\xfd\x84\xb2\x29\xca\xc8\xd7\x6b\xde\x77\xdd\x0c\x46\x33\x11\x27\x25\xa1\x46\xfd\x0e\x91\x78\x30\x60\x46\xa7\x72\x49\xf0\x5a\x1a\x75\xce\xd0\x5c\x77\xef\xca\xc4\x22\xee\x0a\x74\x89\xda\x83\x41\xff\x99\x86\xaf\xf8\xb8\x76\xb4\xac\xe8\xa3\x4b\x6c\xdb\x14\xb2\x7b\xd0\x41\x8b\x6f\xb4\xbb\x0f\x02\xd1\x8d\x6b\x1d\x69\x2f\xbf\x2b\xc3\x76\xc4\xa2\x8d\xba\xb4\x0e\x16\xe5\x68\xce\xf6\x58\x24\xbc\x67\xdf\xa9\x27\xb8\x30\x21\x48\xce\xd7\xaf\x72\x18\x53\xe0\xb5\x53\x1e\x1a\x06\xbf\x63\xbc\xc6\x3a\xe0\x1f\x11\x3f\x55\xb3\xc8\x31\x8b\x36\x1d\xd3\xc5\xeb\x42\xfa\x2e\x4f\x5c\xad\x7a\x96\x0f\xca\x04\x1b\x32\xef\xa7\xaa\x50\x2e\xc2\x9c\x48\x60\x9d\xe5\xdb\xad\x82\xa5\xf7\x0b\xb8\x4e\x8b\x12\x7d\x97\xd3\x94\x85\x05\xd8\x63\x1d\x58\xc1\x09\xd8\x2c\x72\x0d\xb2\x2f\x1e\xfc\x2a\x2d\x16\x98\xb4\xc7\x2e\xcb\x7d\x43\x77\x6a\xcc\xc8\xdd\x52\x67\xe0\xfb\xbb\x07\xee\x1a\x8b\xff\x37\x0d\xbc\x7b\xc5\x55\x66\x2d\xf7\x10\x78\xb0\x81\x4b\x12\xc6\x17\xc1\x8f\x12\xba\xda\x8b\x17\xe4\x59\xbc\xc0\x60\x6f\x07\xc1\xa8\x29\x86\xf7\x68\x3c\x8b\xf3\x56\x1e\x36\x71\x17\xec\x1b\x24\x18\xf9\x82\xc3\xb0\x8a\x62\xf2\x0d\x1f\x03\xdb\x90\xd2\x6c\xce\x8b\xcf\x7b\x01\xec\x05\x7b\xcf\x21\x8b\x7f\xb8\x0d\x8c\x78\x76\xb4\x2e\x46\x34\x06\xeb\x36\x56\xe9\x88\x8b\xdd\xfb\xe8\xef\xd4\xf5\x54\xf1\xac\x13\xdb\xbd\x4e\xca\x98\xe2\x12\xc3\x1d\x94\xda\x27\x41\x8e\x98\x23\x34\xb6\x6f\xbb\xe0\x0e\x63\x05\xd0\xcf\x4a\x75\xd1\xff\x48\xe4\x1b\xd9\xd7\x8c\x57\x0c\xd0\xa4\x78\x70\xda\x55\x12\x8c\x6a\x17\xa2\x67\x62\xcd\xe0\x0b\x1e\x61\x28\xdb\x8f\x30\x68\x0b\x5d\xe1\x58\x99\x70\xc3\x3d\x15\x3c\x1f\x80\x56\x3f\x19\xa4\xe2\xc9\x0a\xcf\x33\x0e\xea\xba\xb1\x7c\x9d\x8f\x5b\xf9\x4a\x1f\x37\x0e\x1f\x12\x25\x3d\x15\xe2\xe2\x47\x24\x73\x5e\xd2\x07\x5e\xfc\xbf\x36\xea\x6c\xd6\xe9\x9d\xa0\x3d\x9d\xe4\xc2\x3e\x17\x42\x56\x28\x67\xa7\x42\x38\x3b\x8d\x2c\xc7\x95\xe6\xb6\xd7\xe1\x63\x6e\x52\x2b\xd2\x95\xd4\x6a\x14\x1c\x05\xda\x61\xdb\x04\xbd\x05\x47\x57\xe3\x56\xb1\xf0\xb0\xf2\xc5\xc2\xa5\x72\x28\xf2\x3a\x45\x5c\x72\xb9\x7e\x10\xfe\x2b\xab\x54\x7d\xa0\xfc\xc4\x1c\x8a\x8d\xdb\x09\xc8\xb1\x65\xcf\x01\xa3\xcd\xae\x57\x33\x64\xb8\x82\xed\x6d\xd1\x06\xf7\x7a\x5b\x6c\x5e\x1b\x18\xd7\x6e\xb7\x82\x9b\x2a\x04\x6e\xd6\xfb\xaf\x09\x8c\x63\x94\xe6\x0c\xaf\xcb\xaf\x09\x8c\x53\xdf\xca\x27\x2f\xee\xcb\xc8\xfc\xb6\x9f\xbe\xb0\xe3\xe1\xc2\xff\xdc\xfe\xed\x6f\x25\xb8\x2f\xf7\x15\xe8\xdf\xfe\x76\xbd\x17\xc0\x60\x11\x08\x9f\xcc\x94\xe0\x95\xf0\x60\xb1\x9c\x35\x18\x5a\xad\xb9\x9a\x13\xeb\x53\x0f\x19\x66\x39\x6a\xa5\xa0\xb2\x03\x9c\x94\x35\xa4\xfe\x24\x43\x79\xfa\x58\x87\x44\x49\x4a\x04\x02\xb8\x64\xab\xdc\x76\x0c\x51\xe7\xc2\xf7\x46\x46\xed\x92\x6c\x2a\xad\x1e\xfd\x41\x57\x20\x80\xca\x81\x13\x15\xed\x01\xd7\x6d\xcf\x53\xce\x72\x66\x9f\x2e\xdb\x7d\xa4\x22\x9b\x8a\xf4\x9e\x38\xba\x99\x5c\xc4\x41\xba\x61\x54\x04\x8f\x89\xc8\xb1\xc6\x2b\x1c\x8d\x18\x32\xe7\xbd\x0e\xe1\x0c\x51\x2f\x71\x7f\x58\xaf\xed\x9b\x77\x19\xfe\xdc\x13\xbc\xab\xde\xd5\x5e\x41\x73\x54\xff\x7c\x6f\xc3\xa4\x45\x41\x1f\x82\xf7\xef\xde\x66\xf8\xf3\x7b\xcf\xc7\xfb\x42\xfb\x30\x00\xe2\xdf\x37\xf5\xbe\x88\x0c\xd3\x3d\xe9\xe6\xa6\xf7\x53\xef\xd2\x50\xee\x8a\x48\x06\xa9\xb6\xa3\x7f\x60\x2f\xbf\x98\xb5\x09\x7d\xab\x17\xb8\x7f\xe0\x5b\xc8\x79\x8e\xd7\x32\x64\xd0\x0e\x48\xbc\xb8\x99\xc4\x01\xdd\x30\x19\x99\x67\x07\x66\xe2\x56\x60\x26\x76\x02\x33\x71\x23\x30\x13\xc3\xd3\xf3\xeb\xf1\xd5\x64\x7c\x12\x07\x98\x94\xa8\x60\x48\x3c\x47\xe2\x86\x6b\xe2\x96\x1f\x90\x29\x13\x43\x11\x85\x7c\x3c\x5f\xf7\x24\x8a\x88\x06\xec\xf4\x81\xb9\x4b\x9e\x26\x17\x17\x67\x93\xd3\xcb\x38\xa8\xcf\xa8\x2a\xba\x3b\x3d\x3f\x17\x5e\x25\xee\xce\xc1\xa3\xab\x2b\xde\x44\x24\x77\xba\x82\x8b\xe4\xe9\x87\x8b\x9f\x38\xa4\x7a\xa1\xc6\x79\xdf\xcd\x9d\x2e\xfc\x78\x74\x7e\x73\x74\x16\x07\xab\x94\x6c\xd2\x3c\xa8\xe0\xaa\xf3\x71\x0e\x54\xdf\xe4\x8e\xa5\xbf\x7d\xed\x54\xc0\x54\x9a\x6d\xfd\x5b\xf4\x2c\x9e\x03\xa8\x95\x6f\x99\xb0\x7d\x22\x31\x2b\xd1\xe1\x32\xed\x00\xc5\x86\x83\xcb\x33\xe1\x8a\x0c\x3b\x97\x7f\x25\x6a\x45\x1c\xae\x7c\x8f\x68\xac\x22\x19\x33\xe0\x7b\x82\xa2\x9e\x5d\x05\x57\xda\xe5\x76\x27\xe0\x01\x07\x94\x5c\x56\x97\xed\x02\x6f\x14\xd4\x1f\x77\x64\x48\xd6\x80\xa0\x4e\xa6\xa0\xf2\x4c\x38\x21\x42\x5c\xae\xf8\x71\xfc\x8b\xb4\xe3\xba\x66\x44\x93\x36\xcb\x7e\x87\xab\xd1\x40\xf3\x9b\x3a\xac\xef\x44\xbd\x55\x5e\x07\xf6\x75\x76\x20\xd2\x8d\xe2\xc6\x66\x47\x33\x69\xe8\xf4\x97\x73\x70\x5c\xfe\x8c\xd9\xf2\xc8\xae\x0c\xc1\x21\x8e\xee\xc4\x89\x0a\xc5\x06\x63\x10\xe3\xe8\x4e\x1c\x24\x5d\x60\xbc\x78\x95\x2d\x6e\x81\xd8\x04\xaf\x4d\xb8\xaa\x65\xb4\x9b\xab\x7c\x46\x8e\x01\x53\xcc\xcf\x6a\x50\x4a\x56\xfa\x06\xcf\x74\xcc\xd4\xbb\x94\x2b\xdf\xe5\xb8\x9b\x84\xde\x3e\x0c\xbe\x97\x13\x9c\xbb\x70\xdf\xe6\x19\xab\xac\x73\xb7\xd3\x02\x1f\xff\x34\x3e\x9f\xf8\xe1\xeb\x18\x24\x25\xff\xc9\x10\x00\xd7\x63\xb2\x3e\x3c\xc6\xd2\xc9\x70\xfb\x22\xa1\xc6\x59\x3b\xc9\x4c\xfb\xd1\x1c\xeb\xb8\xb7\x5e\x68\xd0\x07\xfe\xb7\x87\x20\xba\xe4\xa1\x2e\x69\xf9\x0e\x68\x9a\xc0\xb7\x6c\x77\x06\x3c\x79\xab\x94\x98\x17\x6c\xdc\x07\x6c\xd4\xdd\x53\xd0\x7e\xd1\x3a\xb8\xcc\x51\x5a\xa2\xde\xa6\x44\x3d\xde\x43\x8f\x92\x9e\x92\x4c\x7b\xaa\x8d\x32\x68\xba\x7b\xb5\x77\x51\x3c\x3c\x6d\x7c\xee\x45\xbd\x3c\x09\xc7\x94\x30\x81\xc2\xb5\xa7\x8e\xa6\x01\x4f\xcd\xa1\xd6\x1e\x39\xe6\x9e\xd9\x04\xa3\xd8\x90\x11\x7d\x20\xa8\x38\xe9\x30\x12\x3b\x8b\x2b\x53\xe7\x76\x48\xe9\xe6\x1a\x40\x49\xfa\xbe\x93\x07\x73\xa9\xbf\xdc\x9c\x9e\xb4\xa7\x7d\x7e\xf4\x71\x0c\x46\x69\xe3\xf6\x0e\x67\x01\xd4\x6f\xd3\xec\xb8\xdf\xcb\x50\x39\x2b\xf0\x3d\xca\xee\x1f\x6b\xf8\x52\xb0\x05\x66\x79\xf9\xa8\x6b\x12\x23\x44\xa9\xcc\x92\xe6\x82\x63\xae\x9c\x1c\xf8\x14\x96\x49\x57\xb0\x9d\xce\xb0\xa3\x85\x94\x43\x6f\xa9\x4a\xbd\x21\xf2\xab\xb8\xcb\x18\x7b\xe1\xa1\x95\x12\xf0\x88\xb1\x74\xb6\x14\x8b\xb6\x54\xa4\x27\xcd\xb2\xba\x54\x05\x66\xe8\xcc\x7a\xfd\x03\xfd\xd8\x87\x6a\xd4\x08\x55\x87\x8e\x61\x41\x07\x3d\x36\xa1\xc0\x48\x2c\x43\x66\x08\x92\x97\x81\x30\xf9\x3e\xe3\xd7\xa3\x11\xa7\x25\xdb\xad\x5c\x71\x73\xa7\xd6\x26\x55\x1a\x73\x3b\x0e\x86\x16\xd7\xbc\x01\xe8\x0e\x39\x4d\xa1\x15\x88\x38\xf3\x44\x1d\xea\xff\xed\x45\x91\x45\x95\x8a\x47\xbc\x47\xcb\xf4\x33\xa6\x85\x03\xd2\x92\x52\x2b\x28\xa4\xac\xf8\x49\xf5\x1c\xdf\x45\x42\xfc\xaa\x2a\x48\xc9\xb1\x30\x34\xc5\x6e\xd0\xb1\xf6\x8e\x35\x4d\xf4\x93\x04\xd5\xb8\x30\x18\x98\x37\x94\xa5\x3a\x67\xe0\x8e\xc5\x9b\xa3\xe2\x49\x12\x4a\xe4\x0d\xbb\xd3\xf4\xf3\xdf\x09\x8b\xa2\x8b\xf4\xf2\x56\xef\x37\xc7\x94\x75\x87\xdc\x8f\x64\x8e\x07\x27\x41\x5c\xfb\x2c\xb2\xe8\x6e\x8e\xbf\xd4\xf7\x61\xd6\xbb\x99\x16\x2f\x19\xb1\x36\x67\xe1\x82\x47\x1b\x7f\x3a\xa8\xea\x39\x80\x24\x49\x92\x4d\x74\x71\x33\x11\x9d\xda\xbc\xfe\x45\xd7\x7f\x82\x29\x5a\x42\x84\x73\xf3\x27\x2a\xbd\x77\x7e\x8b\x8e\x3b\x3f\x1a\xdd\x59\x90\x27\x37\x57\x47\xfc\x0f\x10\x2f\x42\x29\x5f\xec\x70\xf1\x54\x86\xa1\x16\xa1\x2d\x9e\x63\x2f\xc2\x95\xb3\xe5\x58\x54\xaf\x6b\x9f\x2f\x10\x5f\xad\xc1\x00\x3b\x5e\x5e\xf6\x2f\xc7\x50\x89\x85\xdb\x11\x17\x7c\xc8\x04\xaf\xe5\xd2\xf0\xa2\xdd\xce\x58\x36\xe1\x06\xc2\x43\xa9\x9d\x39\xd2\x3f\xfc\x93\xf1\xb9\xc9\x22\x40\x6a\xb1\x81\xf8\x64\x06\x75\xb1\xdb\x45\x69\x0a\x00\xfd\x29\x36\xb6\xdb\x50\xd8\x98\xec\x2b\xe5\xdf\xed\xb8\xec\x0a\xc1\xf4\x88\x4a\xb7\x0b\x69\x34\x9c\xd6\xae\x2a\xcd\x7a\xa1\xd2\xed\xa8\x17\x0a\xa0\xa8\xff\x7a\x1c\xc7\x5f\x79\xa1\xed\x53\x01\x01\xc7\xee\x76\xfa\x88\xaf\x49\x0d\xb1\x72\x45\x24\x8f\x31\xce\x79\xf9\x44\x9c\x17\x26\x9f\x16\x80\x2b\x0f\x73\x6d\xbe\xcd\xef\xd5\x23\x0c\x09\x0d\x6a\x8b\xd8\x7e\xb0\xc7\x44\x9b\x0e\xb4\x67\x3c\x46\x26\xd5\x7f\x18\x0f\x2e\x9d\x25\x5a\x59\x77\xea\x88\x0d\x86\xd7\x42\x78\x35\x72\x4d\x67\x4e\x85\xe6\x50\x47\x5a\x20\x52\x45\x5a\x2e\x62\x32\x38\xe4\x2e\x72\xac\x09\x6a\xb3\xac\x45\x82\xac\x71\x06\x38\x3a\x88\x58\x3c\x75\x1c\xd4\xb8\xdc\xe6\xbb\x9f\x30\x52\x53\x5c\xb2\x55\xde\x7e\xf5\x82\x08\x9f\x79\xed\x35\xba\xdd\x12\x95\x48\x11\x1c\x62\x91\x82\xaf\x7e\x71\x0a\x97\x21\x03\xdb\x2d\x8b\xd0\x6a\xcd\x1e\x43\x2d\x57\x84\x04\xc4\x2c\x62\xe8\x0b\x0b\x65\xf2\x40\xf1\xfe\x15\x88\xd9\x2d\x3e\x0c\x78\x9f\x41\x1c\xf0\xb2\x40\xdc\x15\xe8\xbd\x62\xbe\xf7\xd9\x1d\x19\xc7\xf3\x9e\x83\x66\xe4\xfb\xc2\x0a\x16\x18\x83\xbf\x78\xa7\xf1\x39\x09\x52\x7c\x73\xd8\x2a\xa9\x25\x47\xbf\xc8\x28\x80\x00\x64\x7c\xe4\xae\xb4\xe8\x0b\x93\x59\xde\x32\x37\x41\xf1\x54\x7c\x67\x9b\x44\x76\x39\xde\xe9\x3e\x95\x6e\x5e\xae\x73\xcc\xc2\xa0\x17\x80\x68\x4e\x8b\xb1\x7b\x3b\x2d\x2d\xd8\x26\xa0\x9e\x00\x87\x2f\xf3\x51\x79\x48\xb9\xcc\xc9\x62\xe4\x01\x6d\x34\xf4\x7b\x16\xaa\x2c\x2c\x48\x3f\x8d\xd8\xc3\xf3\x90\xf4\x93\x64\x11\x49\x93\x95\x46\x32\xce\xe0\x15\xbd\x3b\xf4\x75\x6a\xd9\xe8\x7c\xd5\xda\x01\xb5\x7c\x59\x43\xd2\xa4\xd7\xd9\xd0\xc5\xcd\x64\xd4\x5c\x09\xfc\xd2\x19\x2b\x93\x83\x7e\x79\xaf\x7c\xf1\x77\x52\xaa\xe1\xdf\x55\x4e\xe7\x1e\x4b\x00\x69\x1a\x02\x3c\x4f\x52\x6a\xc7\xd3\x3a\x64\xa5\x31\x0c\x1b\x8f\xbd\xf9\x6f\xe6\x78\x01\x9f\x8c\x09\x59\x99\x16\x2d\x9b\x7d\x60\x5c\x5a\xb9\x14\x28\x09\x8f\x40\x55\xfd\xd3\x83\xa6\xd6\x99\x7a\xcd\x21\x0d\x77\x7c\xa1\x60\xb6\x5b\x7d\xfb\xc0\x59\x91\x89\xdf\xf2\xeb\xa0\xbe\x5e\xe0\x8b\xfa\x08\x76\xea\xb6\xaa\xa5\x20\x90\x6c\x4a\xe2\xc2\x4e\xaa\xda\x52\xdb\x46\x21\x49\xa4\x73\xb0\xd7\x34\x87\x01\x30\x89\x49\xfe\x08\xd3\x9f\xcc\x6a\xc2\x64\x14\x94\x2b\x8f\x18\x4b\xba\xc9\x9f\x71\xa8\x44\x98\xb8\x16\x55\x86\x52\x28\x7c\xd6\x84\xc7\xb9\x84\x2d\x5c\x68\xf1\xf5\xd0\x2d\x96\x85\x71\xe8\xd8\xe7\x88\x65\x9c\xf3\x81\x43\xa2\x71\x5d\xdc\x70\x70\x19\xd3\xfe\x2d\x8c\x49\x87\x75\x23\xde\x57\x25\x1b\xe2\x75\x52\x8b\xd7\xc4\x3c\xcc\xe0\x69\x16\xc4\xba\x5a\x22\x80\x38\xd4\xff\x6f\x21\x00\xdd\xb0\x9d\x18\x70\xa0\xc2\x19\x3c\xa6\xe0\xed\xf6\xe5\x1b\x79\x71\x33\x79\x6e\x1f\x39\x81\xfb\x9a\x7d\x14\x7a\xa4\x76\xaf\x6d\x6d\x23\x2f\xb6\xbd\x6f\xf9\x36\x7a\x66\xe3\x5c\xb0\xa8\x84\x3a\xac\x87\x75\x98\xab\xb3\x72\xf5\x03\xc3\xee\x82\xb2\xa9\xb2\xc1\xf5\x87\x4a\x0e\x91\x77\x10\x3b\xa3\x8c\x9a\xaf\x80\x85\xaf\xcb\x5e\xd6\x4e\x5e\x06\xf4\xf2\x86\x44\xfe\x95\x3c\x89\x88\x7d\xf5\x0b\xf2\x85\xd0\x3f\x2a\xcf\x2b\x64\x44\x0a\x38\xe2\x7b\xf1\x57\xa2\xfe\x8f\x18\xbd\x16\x24\x39\xf4\x3d\x5e\x26\x96\x9d\xc9\xc7\x46\xcc\xdf\x89\xf9\xcb\xf9\xf8\x2b\xd3\xab\xad\x3c\xb8\xdf\x66\x4a\x4f\x95\x31\xdb\x4a\x44\x30\x09\x92\x90\xd9\x4f\x55\xd1\xd5\xe3\x2d\x9a\xf6\x1d\x8b\xde\x2d\x9a\x72\x0e\x74\x8b\xa6\x8d\xd2\x5a\xe0\x14\xc3\x73\xd4\xef\x17\x6b\x07\x90\x24\x4c\x65\xd1\x16\xd7\xc0\x01\x88\x56\x29\x9b\x2d\xc3\xd4\xbc\x25\x40\x38\x86\x5b\xef\xee\x38\xea\x00\x89\x7e\xa5\x98\x84\x86\x4d\xed\xb0\x40\x79\x32\xe4\x37\x4d\x06\x5d\xe6\x4e\x9f\x75\x94\xd5\x36\x33\x50\x0b\x0b\x46\x01\xed\x90\xe7\x9b\xd6\x12\x92\x78\xad\xc3\x62\xee\x82\x34\xb9\xdc\xfb\xcb\xbe\xe9\x34\x50\x89\x35\x9b\x26\x02\xa1\x2d\xfb\x6d\xce\x46\x3d\x97\x04\x41\x99\xaa\x05\x8d\xef\xf8\x80\x80\xdf\xfc\x16\x21\x69\xbb\x7f\xe9\x8b\x64\x00\xb1\x27\x73\xd9\x60\xc0\x44\xc2\x1f\xb2\xdd\xf6\xdf\xaa\x0b\x2e\x37\xab\xb8\x88\x6c\x36\xac\x43\x3d\x34\x88\x3d\x9e\x5f\xe6\xc6\xba\x23\xbf\xd9\x1f\x91\xe0\xec\xef\xf1\xf8\xdd\xcc\x00\x9e\x1f\x7d\x1c\x77\x41\xb1\xba\x39\xc5\x90\xbb\x06\x61\x2d\x95\xf9\x46\x68\x11\x5d\x4d\x67\x35\x98\xbe\xf8\xeb\x02\xc5\xaf\xc9\x04\x94\xb7\x5f\xf5\x53\x8e\x4c\xab\x97\xb9\x47\xad\x5e\xfb\x26\x5e\xbb\xdd\x0a\xae\xaa\x10\xc0\x3b\x4f\x32\x9a\x35\x5d\x2b\x8b\x99\xf4\x5a\x32\xbf\xeb\x84\x34\x65\x87\xd7\x92\x02\xb5\xbc\x96\xdc\x7c\xfb\x73\xc3\xdd\xac\x9b\x03\xed\x9a\x63\x54\x17\xe5\x24\xa1\x18\x4b\x1c\x04\x1d\x8e\x38\x7a\x60\x2f\x77\xc4\x59\x7e\xdb\xf8\x76\x7f\x89\xd2\x4c\x3a\xe2\x2c\xbf\x7d\xef\x69\x7c\x5f\x18\x16\x5d\x3f\x9d\x0a\xc0\xdc\x3f\x2d\xbe\xeb\xf0\xc9\x0c\xbc\xdb\x05\xab\x12\x09\xcf\x77\x65\x65\x99\x9c\x4e\xce\xc6\x71\x10\x35\x06\x0a\xeb\x1c\x28\xce\x08\x85\x07\x8f\xed\xa7\x43\x5a\x7e\x3a\xc4\xf1\xd3\x21\x0d\x3f\x1d\xe2\xf5\xd3\x21\x0d\x3f\x1d\xe2\xf1\xd3\x21\x5e\x3f\x1d\xd2\xe9\xa7\x43\x3a\xfd\x74\x88\xf3\xa6\x1f\xb6\x3c\x61\x32\xeb\xa8\xc9\x1c\x94\xea\xe6\xaf\x58\x08\x53\x70\x09\xe4\x1b\x72\x15\x0d\x33\xa8\xae\x64\xe7\x49\xd6\xf6\x40\x99\x3f\x6b\xc7\x6c\x98\xe6\xf4\xdb\x74\x0b\xeb\xb2\x93\xef\xf8\xef\x61\xd2\xd4\x3b\x28\x4d\x9a\xf3\x3f\xcc\xa4\x39\xff\x03\x4c\x9a\xb3\x48\x60\xa8\xcf\x94\xf9\xcc\x77\xda\xfb\x1f\xb6\x17\xb6\x69\x07\x5d\xd6\x76\xd0\xa5\xb6\x83\xce\xed\x4f\xba\x16\x69\x87\xb1\x42\x9d\xce\x40\xa4\x79\xdc\x6d\x3f\x54\xa0\x87\x9e\xb2\xe7\x6c\x88\x0a\x4c\x8e\xf7\x77\x96\x1a\xcb\xaf\x90\x1a\xdb\x2f\x47\xfc\x56\xf9\x46\x53\x5f\xbf\x7c\x63\x65\x66\x55\x32\x8e\xbc\x2d\xda\x25\xe3\x64\xdd\x32\x4e\xcd\x93\xfe\x9e\x32\x4e\xf6\xf7\x78\xe0\xf7\xf7\x96\x71\xf4\x52\xbd\x4c\xc6\xd9\xbc\x5c\xc6\x21\xaf\x97\x71\xb2\x2a\x9c\xb7\x84\x9c\xbf\xfb\xe3\x28\x1e\x1d\x52\x3d\x30\x58\xae\x1f\xb9\x98\xa3\xc5\x1a\x6c\x5e\x87\x38\x18\x42\x89\x20\xda\x9f\x58\x46\x0a\x8b\x77\x7f\x69\xfd\x86\x84\xd2\x91\x0d\xac\x71\xc1\x56\xd0\x9e\x67\x80\x53\xf5\xf8\xed\xd1\x64\x1c\x07\xc2\xbc\x90\x32\x61\x19\xb6\x46\x24\x03\x63\x62\x35\xc8\x46\xa5\xf3\x5a\x73\x1c\xe4\x34\xcd\x1c\x08\x3b\xa5\x5c\x9e\x3c\x9d\x5c\x5d\x5c\x8a\xfc\x04\xa7\x93\xf1\x47\xeb\xbd\x10\xcc\xd0\x4a\x3e\x3c\x22\x6a\xe5\x63\x1f\x8d\xb7\x3e\xe4\xf3\xa0\x6a\x98\x48\x3a\x59\xcb\xe7\xa5\x2f\x7f\xd1\xe9\xde\xca\xf5\xa3\x79\xae\x71\xfa\xc6\x7c\x12\xa9\x6f\xe0\xf9\xd1\x4f\x77\x67\xa7\xd7\x93\xbb\xef\xaf\x2e\x6e\x2e\x65\x76\x36\xd8\x8b\x72\x5c\xb2\xfd\x45\x41\x37\x6b\x0d\x72\xfe\xe3\xb5\xac\xdd\xcf\x31\xf9\x24\x4b\xf5\x5b\x22\xbc\x54\x0e\x58\x34\xa5\x8b\xeb\x46\x1a\xd3\xb1\x9e\x1f\xb1\xe6\xd8\x7a\x99\xa4\xb1\x06\x2a\xd9\x9d\x05\xa0\x42\x38\x84\x40\x76\xf1\xdd\x77\xd7\xe3\x49\x1c\xc8\xbd\x0f\xe0\xe5\x85\xbc\x9d\x8d\x83\x35\x95\xda\x70\x67\xa6\xb9\x96\x21\xd1\xcd\xb9\x60\x92\xca\xc9\x65\xd4\xfc\x3f\xf8\x70\x71\xf2\x8b\x32\xdd\xc9\xc0\x8f\x43\x19\xba\x1a\xbf\xf4\xd1\x62\x6d\xfb\x77\x53\x89\x4b\xe4\x54\x4c\xd5\xac\x3d\x17\xd8\xf7\xba\xe1\xea\x75\x7f\x06\xd0\x5d\x6d\x9d\xac\x4e\x65\x45\xbf\xd5\xa9\xf1\xe4\x27\x56\x81\x32\xb4\xc9\x78\xfc\x56\x1e\x20\x19\x44\x9d\x0c\x4d\xf0\xbc\xb3\x52\xe2\x86\x25\x55\xe1\x64\xbe\x04\x4d\x38\xba\x5b\x17\x74\x86\xca\x52\xe6\x2b\x13\x6d\x14\x68\x5e\xa0\x72\x59\xbb\x3c\x2a\x08\xa0\xb2\xe5\x11\x5f\xb6\x3c\xf5\x51\x67\x0a\x79\xcf\xe8\xcc\x4d\xbb\x53\x1a\xc9\xad\x3c\x9c\x45\x06\x8f\x66\x91\x44\x31\xce\x4f\x05\xd1\xd1\x4e\x67\x7a\x9d\x25\x95\x39\x24\xb1\xa7\x14\x16\x09\x4e\x92\xa4\x6e\xae\x7e\x7e\xf9\x5a\x27\xde\x09\x41\x3c\x1c\xbd\x74\x43\x9c\x85\x6f\xb4\x25\x4b\xc5\x53\x0e\xed\x17\x87\x35\xd2\x01\x2e\xae\x58\x19\x3e\x4c\xac\x15\xdd\x99\x55\x1a\xcf\x43\xaa\x82\xac\xa8\xca\x28\xad\x9f\x17\x26\x9d\x91\xdf\xe2\x8d\x18\x11\xfe\xb7\xdd\xa6\x91\x7c\x68\x5c\x99\x6a\x6f\xc5\xa3\x72\x78\x1a\x82\x88\xd1\xf5\x5e\x01\xa9\x79\x4c\x41\x38\xab\x9a\x77\xaf\x7d\xe2\x50\x05\xa2\x92\x16\xcc\x8e\xf9\xb4\xee\x00\x6f\x87\xd3\x7d\x74\x3b\x9c\x56\x9e\xbb\x5a\xe9\x2a\xa6\x96\x59\xbe\x99\x8d\xc4\x6c\x98\x59\x6b\x5d\x7a\x30\x55\xd9\x48\x5e\x9f\x04\xac\x66\x0b\xa0\xf3\x68\xcc\xe7\x61\x10\x35\x60\x5d\xf2\xd3\x3a\x6e\xe3\x76\x45\x3b\x53\x98\x21\x2e\x56\x99\x46\x2b\xdb\x6f\x59\x21\x56\xdb\x43\x79\xf7\x59\xd7\x19\xbf\x3a\x6c\xeb\x5c\xdc\x33\xf7\x87\x5e\xc3\xba\xb8\x18\xd1\x69\x89\x54\x3a\x14\x64\x25\x2a\x92\x92\x35\xce\x02\x30\x92\x0f\xf6\xd5\xae\xb3\xf2\x4e\xac\x05\x28\xde\xee\xd3\xe5\x49\xf0\xa7\x60\xaf\xd0\xa8\xe4\x37\x78\x53\x9d\x36\xcc\x3e\x85\x5d\x2a\x4b\x63\xed\x93\x44\x51\x08\x1f\xf1\x58\xa7\x0b\xf4\xcb\x85\xe5\x5c\xd9\xa8\x37\xb9\xb6\xdc\xde\xd5\xd2\xbe\x68\x00\x4e\x92\x8b\xed\xf6\x63\xca\x96\xd1\x2a\xfd\x12\x36\xa2\xd0\x2d\x20\xd8\x99\x09\xc3\x86\x32\x91\xc4\x72\xf8\xaf\x19\x52\xbd\x26\x76\x0a\x07\xd9\x82\x77\x19\xba\x02\x85\x25\x81\x10\x03\x51\x54\xbf\x2b\xe3\x87\x4b\x3e\x5d\xb6\x27\x91\x1d\xa2\x4e\xea\x48\x7c\x4f\x78\xec\xa1\x7d\x03\x6f\x2f\x81\xfd\x16\xb3\xbd\x5e\xfd\x24\xd1\xb9\xdf\x2c\x8e\xf5\x3e\x71\x2f\x26\xf5\x31\xbb\x75\x7e\x99\xac\xec\xd3\x51\xfb\xe4\xf5\x93\x04\x6b\xb7\x2f\x2d\x87\x86\x56\x70\x47\xfb\x8b\xc1\x80\xbd\x73\xce\xf9\xed\x70\xaa\x5b\xa8\x4b\xde\x0f\x81\xb3\x85\xed\xd3\x6e\x85\x81\x88\xeb\xc5\x10\x8c\xf4\x0d\x4e\x99\x38\xcd\x99\xa7\x2c\xf6\xf7\x47\xc0\x3b\x05\x77\xf6\x25\x1f\xcf\x7b\xb7\x0d\x51\xe8\xd5\x0d\x1b\x60\x7b\x07\xd3\xed\xb6\x39\x43\x5e\x0a\x5a\xcb\xd4\xec\x55\xe5\x3e\xd0\xf5\x9e\x4b\x18\x67\x15\x8c\xfc\xa6\x66\x6f\x85\x16\x19\xb2\xaa\x3d\x81\x60\x00\x46\x24\x21\x2e\x2f\xad\x99\x10\xda\xd3\xb9\x99\x15\x55\x92\x9a\xec\x14\xbe\xd9\xe3\x55\xcb\x02\xcd\x4d\xd9\x9b\xaa\x7e\x32\x53\x1b\x07\x60\x00\xc0\x08\xd7\xd7\xfd\xb9\x2b\xc0\x81\xc3\x10\x1b\x8f\x96\x5a\xb8\x53\x39\x8f\x2d\x69\x4f\xe5\xea\xaf\xed\x5b\x79\x24\x55\x01\x00\xb1\xa7\x10\xc4\xa1\xaf\x18\x6a\xef\xd7\x32\xd4\xa2\xa9\xd6\x1c\x80\x08\x79\x0e\x5d\x89\xb5\x21\x9a\x76\x74\xff\xa2\x26\x55\x03\xc6\x81\xd4\xea\xc8\xd7\x6c\x27\xbf\xd5\x3e\xaf\x69\xa4\xd5\x3c\x4f\xc6\x41\x99\xf4\x86\xef\x7e\xe3\xd5\x7e\x57\x82\xd2\xa2\xc9\x52\xf7\xea\xd8\x77\xcc\x58\x2a\x48\x7e\x9b\x75\x07\xb7\xad\x3b\xb6\xb4\xe0\x7b\x79\x47\xdd\x5f\xd5\x71\x6d\xc4\xff\xea\x8e\xdb\x96\x88\x53\x7b\x45\x7a\x02\xfc\x4a\xeb\x0d\xb6\xad\x37\xe4\x0f\xb7\xde\x60\x61\xe4\x20\x4e\x1e\x02\x2b\x89\x5f\x1a\x39\xba\x3a\xf4\x79\x20\x34\x9e\xf4\x59\x46\x5a\xbf\x56\xf6\x3f\x45\x00\x09\x27\x80\xf5\x5e\xdd\x92\x69\x77\xe8\x3d\x86\xfa\xc1\x5a\x19\x8c\xf0\x87\x86\xde\x93\xee\xd0\xfb\x95\xc7\xd2\x19\xcd\xb9\xfc\x74\x0f\x49\xf3\x49\x01\x96\xde\xfb\x1e\x14\x10\xc5\xee\x73\x02\xa6\xc8\x7e\x4c\x40\x14\x76\xa6\xda\x67\xe9\xbd\x6d\x15\xc1\x96\x55\xe4\x25\x76\x0f\xe8\x7b\xbe\xb5\xf3\xee\x86\xd6\xad\x3b\x66\x88\x17\x18\x42\x5a\xc6\x13\x59\x70\x77\x73\x16\x07\xef\x7b\x39\xee\xbd\xef\x99\xaa\x1d\xe9\xf9\xf9\x62\x4c\x61\xcf\x2d\x5c\xe3\x3c\x6f\x97\xf2\xde\x45\x22\xff\x67\x6d\x20\x35\x88\x1a\x94\x7a\x35\xf6\x7d\xaf\xf9\xd2\xab\xb1\x16\xa5\x5e\x63\x08\xab\x99\xa2\xb1\x7f\x54\x2a\xa6\xad\xad\x72\xa7\xcf\x84\x12\xf6\x1b\x29\xce\x7e\x73\xc2\xdc\x5d\x39\xe8\xb0\xa6\xb8\x9e\x64\xa0\x16\x90\xf5\x22\x90\xd4\x3b\x60\x0a\xf3\x76\x96\x47\xcd\x56\x69\x93\x2d\xdd\x0e\xa7\x70\xb9\x3b\x13\x89\xf5\x72\x20\x9e\x87\xb9\xec\x67\x96\x04\x37\x67\x41\x92\x24\xb9\x98\xa1\xb0\x18\xd1\xa8\xc6\x21\xfd\xf7\x28\x6d\x50\x9d\x5c\xbf\x61\x00\x00\x4c\x93\xf4\x36\xad\x05\x47\x65\x0c\xd1\xd1\x2e\x44\x3e\xa5\xb5\xf3\xd9\x55\x00\x33\x0b\xde\x97\x6b\x3f\xad\xa4\xca\xae\xc2\x03\x35\xcb\xdc\x78\x72\x3e\xea\xba\x0c\xc0\x7e\xe6\x8d\x21\x19\x0c\xfa\x9b\x8e\x14\xe8\x4b\xf1\x08\x11\x0a\x97\xd2\x90\xe0\x15\xe2\xb4\x3a\x9d\xeb\x4b\xc3\x36\x6f\xb4\xe7\x7e\x32\x6e\x67\x10\xb6\xa7\x5e\x36\xa6\xde\x82\x4e\x2b\x15\x0d\x68\xa5\x74\xf7\xe7\x2a\x2d\x41\x35\x52\x5e\xbf\xf5\xa0\x0b\x58\x58\x98\x0c\xe7\x20\x9e\xcb\x1c\xe2\xe9\x57\x98\x0f\x38\xa1\xf0\x19\x03\xc4\xa3\x5d\x1e\x61\x16\x12\x58\x28\x63\x0c\xcc\xa5\xc9\x6b\x99\x84\xa9\x46\x3b\x52\xa3\x5d\xfd\x96\x93\x85\x80\x20\x96\x69\xaf\xb4\x88\x45\x8d\x30\xc5\x31\x7e\x96\x88\xac\x99\x9d\x71\x33\xe2\x25\xda\xa5\x73\xd0\xa4\x17\xd0\xc6\xc3\x9e\xf2\xe8\x8e\x99\x06\x8e\xe9\x6a\x9d\x23\xbe\xe3\x70\x09\x67\xb0\x00\xd5\x68\x39\x18\xcc\x0e\x45\x73\xde\x60\x9b\xcd\xae\x60\x9b\x4d\x08\xa0\x1e\x8c\x2d\x8f\x61\x7d\x43\x99\xfa\x3a\x6f\x2c\x23\x4c\x85\xd0\x43\xc4\x53\x08\xcd\x76\xd4\xb2\x08\x84\xcc\x85\xb0\x5e\x6f\xb9\x59\x56\x2f\x3d\x16\x8f\x65\xe5\x7c\x70\x79\x47\xa3\x50\xb0\x07\xb1\x5b\xee\xe5\x68\x41\x65\x7e\x1f\xe2\x8b\xff\x95\x22\xa9\x78\x9b\xf7\x00\x54\xfa\x0d\xe5\x3a\xaf\x6b\xab\xf5\xe6\xd5\xab\x6e\xdd\x1b\x5d\x6c\xb5\x3e\x04\xb0\x38\x0c\xeb\xe4\xa9\xea\x99\x5f\xab\x27\x99\x51\x20\x6e\x39\x85\x69\x74\x70\x79\x00\x3f\x5a\xf6\xe2\xd9\x64\xda\x96\x01\x14\xa9\x5e\x26\xce\x83\xc8\xf5\x2a\x8b\x85\xd5\xbb\xde\xdc\x81\x96\xf2\x53\xaf\xc7\xf3\x2f\x1d\x57\xe9\x60\x90\x86\xa0\x82\x6c\x87\x10\xff\x82\x0c\x64\x46\x88\x87\x65\x82\x2d\xef\x30\x7e\xc6\xdd\x2c\x62\xe6\x21\x3a\x17\xca\x9f\x3b\x8c\xec\xc8\x1d\x46\x5e\x26\x9c\x13\x9d\x3b\x8c\xd4\xc2\x39\xfb\x0d\xc2\x39\x27\xe5\xdd\xb9\xbf\x48\x33\xf7\x17\x7d\xcd\xcb\x64\xed\x67\x4b\x1b\xaf\x90\x09\x29\x4f\xa7\xb4\xe2\xeb\x96\xb4\x3f\x31\x75\x8e\x64\x9d\xd6\xc5\xcf\x8a\xd6\xa2\x61\xe6\x19\x4d\xe5\xbc\x20\xf7\x7c\x6e\x2b\xb4\x23\xb7\xd5\x9b\xb2\xf7\xff\xa7\x9f\xd3\xeb\x59\x81\xd7\x4c\xe7\xb7\x2a\x7b\xb2\xc7\x48\xfd\xdf\x5b\x6d\x4a\xd6\xbb\x47\x3d\x4c\x66\xf9\x26\x43\x59\xef\x1e\xcd\x69\x81\x7a\xfe\x66\x22\x93\xfd\x4a\x4c\x43\x06\x96\x59\x51\x4d\x22\xa5\xa6\xfc\x15\x49\xbc\x64\xb7\xc3\xe9\xbb\x6f\x06\x03\x76\x7b\x30\x7d\xf7\xd7\xed\x56\xc4\xda\x0b\xcb\x92\x78\xec\xf3\xf6\x60\xca\xeb\xbe\x99\xbe\x3b\xd8\x6e\x79\xf9\xfb\xe4\xcf\xaf\x9f\x53\xca\x7a\x39\x4a\x4b\xa6\x27\xf5\xf9\x20\xfa\x6b\x74\xd0\xbb\xdf\xf0\xf2\xb2\xec\xb1\x65\x4a\x7a\x9f\x05\x9e\x05\xa0\x12\x29\xc5\x6f\x18\xce\x93\x12\xb2\xe8\x28\x47\x05\x4b\x52\xc8\xa2\x0f\xe2\x35\xca\x24\x87\x2c\x3a\x4e\x0b\xe1\xb8\x94\x2c\xf9\x0f\x9a\xe7\xe9\xba\x44\xc9\x0c\xb2\xe8\x44\x49\xc3\xc9\x06\xb2\x48\xe6\x5d\xcf\x20\x8b\x2e\xe5\xb5\x7c\x72\x07\x59\x74\xad\x15\xe2\x64\x01\x59\x34\x49\xef\x93\x15\xff\x5f\xfa\xaf\x25\x73\x8e\xdd\x4f\x15\xfc\x47\x28\x3d\x7c\xc1\xe8\x1f\xde\xbe\xfd\x53\xaf\xa4\x9b\x62\x86\x3e\xa6\xeb\x35\x26\x8b\x9b\xab\xb3\xe4\x5e\xcf\x39\x5a\x61\x12\xfd\x5a\x46\xab\x74\xfd\x7f\x02\x00\x00\xff\xff\x83\xb8\xb6\x8d\x84\xc5\x00\x00") -func pagesAssetsJsBootstrap311MinJsBytes() ([]byte, error) { +func pagesAssetsJsBootstrap400Beta2MinJsBytes() ([]byte, error) { return bindataRead( - _pagesAssetsJsBootstrap311MinJs, - "pages/assets/js/bootstrap-3.1.1.min.js", + _pagesAssetsJsBootstrap400Beta2MinJs, + "pages/assets/js/bootstrap-4.0.0-beta.2.min.js", ) } -func pagesAssetsJsBootstrap311MinJs() (*asset, error) { - bytes, err := pagesAssetsJsBootstrap311MinJsBytes() +func pagesAssetsJsBootstrap400Beta2MinJs() (*asset, error) { + bytes, err := pagesAssetsJsBootstrap400Beta2MinJsBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "pages/assets/js/bootstrap-3.1.1.min.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "pages/assets/js/bootstrap-4.0.0-beta.2.min.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -170,42 +171,62 @@ func pagesAssetsJsGoogleJsapiJs() (*asset, error) { return a, nil } -var _pagesAssetsJsJquery1102MinJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\xbd\x7b\x93\xdb\x36\x96\x28\xfe\xff\x7e\x8a\x26\xc7\xcb\x00\x16\xc4\x96\xec\x64\xee\x86\x32\x9a\xd7\xb1\xe3\x49\x76\xf2\x9a\xd8\x33\xc9\x2c\x45\xa7\xd8\x22\x24\x31\xa6\x40\x85\x84\xba\xd5\x11\x39\x9f\xfd\x57\x38\x00\x48\x90\xa2\x3a\xde\xbd\xf7\x57\x75\xcb\xe5\x16\x09\xe2\x8d\x83\x83\xf3\xc6\xf5\x53\xe7\xea\xd7\xbf\x1d\x58\xf9\x70\x75\x37\xf7\xe7\x33\xff\xd9\x55\x7d\x85\x56\xf8\xea\xd9\x6c\xf6\x19\xb9\x7a\x36\x9b\x3f\x37\xdf\xdf\x14\x07\x9e\x26\x22\x2b\x38\xb9\xfa\x9a\xaf\xfc\xab\xfa\xea\xd7\xdf\xe4\x17\xbf\x28\x37\xd7\x79\xb6\x62\xbc\x62\xff\x76\x7d\xfd\xbf\xaf\xaa\xe2\x50\xae\xd8\xb7\xc9\x7e\x9f\xf1\xcd\xdf\x7f\xfc\x86\xea\x7c\xbb\x8c\xfb\xbb\x64\xff\x6f\x4f\xaf\xff\x0d\xad\x0f\x7c\x25\xeb\x42\x8c\x08\x7c\xba\x4b\xca\x2b\x4e\x4a\x92\x51\xf1\xb0\x67\xc5\xfa\x4a\x90\x82\x32\x3f\x2f\x56\xaa\xc1\x84\x32\x3f\x2d\x56\x87\x1d\xe3\x82\x54\x34\x69\x5f\xbe\xcc\x19\xa4\xe5\x94\xf9\xaa\x9f\xe4\x40\x99\xff\x84\xac\xe8\xa9\x21\x7b\x1a\xc5\x64\x4d\x5d\x35\x32\x97\xa4\x74\xef\xaf\x0a\xbe\x4a\x04\xd9\xd2\xbd\xbf\x3f\x54\x5b\xb2\xa1\x7b\xbf\x92\xbd\x27\x3b\xba\xf7\x33\x9e\xb2\xe3\xf7\x6b\xf2\x40\x57\xbe\x28\xde\x8a\x32\xe3\x1b\x72\x47\x57\xfe\x36\xa9\xbe\xbf\xe7\x3f\x94\xc5\x9e\x95\xe2\x81\xdc\xd2\xb5\x2f\xca\x6c\x47\x8e\xb4\x3f\x92\x92\x89\x43\xc9\xaf\x38\xbb\xbf\x3a\xfa\x6b\xee\x67\x3c\x13\xf2\x0b\x29\x71\x43\xee\xe9\x75\x34\x99\xc6\x21\x0a\x83\x65\xfa\x74\xe9\xd7\x78\x99\x4e\x50\x18\x44\xec\xcb\x18\x3e\x2c\xd3\x49\x8d\xaf\x7d\x35\x81\xe4\x1d\xbd\x5e\xbe\x9d\x5c\x6f\xc8\x2b\x7a\xfd\x3e\x5a\x56\xcb\xc3\x9b\x2f\xdf\xbc\x59\x1e\x5f\xce\xe2\x49\x3d\x78\x7f\x72\xbd\x21\xdf\xd1\xeb\xf7\xb2\xea\xea\x29\x7a\x11\x2d\xef\x97\x3f\xc5\x93\x1b\x1c\xbd\xbf\x89\x9f\xd6\x7f\x42\xd1\xf2\x7e\x1a\x3f\xc5\xf8\xc9\x35\xf9\x40\xaf\xdf\xbf\x40\xcb\xfb\x09\x5e\x56\x4f\x97\xd7\xe1\x0d\x0a\x83\x17\xcb\xeb\xe5\xfc\xa6\x96\x9f\xbf\x84\xd6\x62\x12\x9c\x9a\x65\x15\x3f\x7d\x72\x4d\xde\xd2\x6b\x14\x06\xef\xeb\xa0\x26\x58\x35\xb0\x8c\xb0\xec\xd8\x4b\x7a\xbd\x5c\xca\x01\xb8\xcb\xe5\xf2\xfa\x76\xcd\x4b\x11\xd7\x87\x68\x99\x26\xd3\xf5\xcb\xe9\x9b\xf8\xf4\x69\x83\xaf\x37\xe4\x57\x7a\xed\x46\xef\x65\x9e\x72\xc9\xe3\xa7\x6e\x2d\xca\x03\xab\xd7\x49\x5e\xb1\x9a\x1f\xf2\xbc\x9e\xaa\x29\x99\x5c\x9c\x92\x0d\x79\x4d\xaf\xdf\x4f\x77\xd5\xf4\x9a\x7c\x43\xaf\xa7\x08\xda\xf8\x3d\xc6\xd7\x9b\x8c\x7c\x35\xbe\x06\xc2\x17\xc5\xdf\xf7\x7b\x56\xbe\x4a\x2a\x86\x70\x43\x7e\xb3\xf2\xe1\x13\x4a\xfc\x24\x4d\xbf\xbc\x63\x5c\x7c\x93\x55\x82\x71\x56\xd6\xb5\x9b\x17\x49\xea\x52\x4a\x99\x2f\x21\xb1\xae\xdd\x55\xb1\xdb\xe7\x4c\x30\x99\x98\xf8\x25\x4b\xd2\x87\xb7\x22\x11\x0c\x7b\x1e\xfa\x05\x61\x72\x54\x69\x08\xe3\x86\xfc\xd2\x35\x80\x4f\xe7\xd5\x87\x48\x56\xb0\x2b\xee\x58\x2f\x19\xb9\xaf\xbf\xff\xf6\x55\xc1\x85\x4c\x2b\x92\x94\xa5\x2e\xf9\x8d\x38\x73\x4c\xd8\x78\x76\xe8\xa3\xca\x82\x03\x94\xf8\x29\x13\xc9\x6a\x0b\x99\x90\x5b\x70\xe8\x4f\x25\xfb\xb8\xda\x26\x7c\xc3\x5c\xf2\x9b\xac\x6a\x90\x4b\x57\x82\x71\xb3\x90\x80\x4a\x8f\xfe\xbe\x2c\x44\x21\x47\x4d\x4f\x6a\xc3\x06\x6b\xb2\x2a\x78\x25\xca\xc3\x4a\x14\x65\x70\x24\x12\x98\x03\x6b\xae\x39\x29\xd5\xde\xcd\x48\xb1\xc8\xd6\xc8\x61\xd8\xcc\xfd\x36\xab\x64\x8a\x5b\xc1\x16\x72\xa9\xd9\xd8\x0c\x9f\xb2\x35\xca\xa8\xfb\x42\xcd\xf2\x6a\x9b\x94\x2f\x05\x9a\x61\xcf\x73\x6f\x7a\x49\xcc\xcf\x19\xdf\x88\xed\x74\x8e\x3d\xcf\xbc\xdc\xd0\xe7\x61\x24\x41\x86\x30\x22\x7f\xe2\xe0\x3b\x9f\x1d\xd9\x0a\x31\x4c\x9c\xac\xae\x9d\x2c\x9a\xc7\x9e\xc7\x75\x3f\x1c\x5e\xd7\xdc\x57\xa3\x09\x11\xaf\xeb\x12\xfb\xeb\x8c\xa7\x88\xe1\x40\x76\xd1\xb7\xc6\x87\x78\xfb\x4d\xf6\x5c\x56\x04\x7d\xe5\x94\x5f\x65\xbc\x12\x09\x5f\xc9\x01\x1c\x43\x1e\xcd\xe2\x80\x93\xa3\xbf\x63\xe5\x86\x21\x59\x0f\x39\xfa\xfb\xa4\xac\xd8\x57\xef\xbe\xfd\x06\x4a\x12\xee\x79\xdc\xe7\x45\xca\xde\x3d\xec\x59\xc8\xfd\xe2\x9e\xb3\xf2\xb5\xc6\x5a\x75\xcd\x83\x84\x38\x33\x8c\xc9\x07\x5f\xb0\x4a\xa8\xd6\x3c\xef\xe8\x67\xd5\x0f\x79\x92\xf1\xef\x6f\x7f\x65\x2b\x81\x38\xc6\xeb\xa2\x44\xd9\x55\xc6\xaf\x38\x96\x5f\xdf\x98\xd9\x97\xcd\x46\x59\x8c\x43\xfd\x80\xb8\x7c\x53\xa3\x4a\x84\x28\x51\x46\x20\x65\x61\xad\x48\x93\xad\x51\x41\x13\x7f\xc3\x0c\xe2\xfc\xe2\xe1\xeb\x14\x65\xd1\xb3\x18\x93\xc2\xf3\x0a\x39\x0a\xc6\xc5\x77\x45\xaa\xd6\xa9\xf0\xb3\xd4\xa1\x14\x32\xe8\x7a\xca\x76\x92\xa0\x29\xb5\x2c\x74\x4e\xa0\x1b\xb3\x98\x16\x8d\xd5\xa0\x9c\x5f\xc1\x8e\x82\x26\xf0\xdd\xaf\x58\xce\xe4\x5c\x53\x06\xef\x26\x2b\xeb\x66\x0a\xf5\x8a\x99\x4a\x55\xf6\x7e\x63\x38\xe8\xcd\x07\xc3\x61\xa9\xb7\x22\xc3\x01\x62\x6d\x5b\x0e\xa5\xc2\xf3\xd0\xa0\xfd\xf6\x91\xf4\x1a\x64\xe6\x49\x6e\xec\x5d\xf2\x81\xbd\x2c\xcb\xe4\x01\xa9\xf6\xe5\x16\x37\xc5\x02\xd7\x25\xaa\x3b\xc1\x8c\x88\x02\xb2\x05\xd6\xee\xd7\x23\xdb\xf8\xab\x24\xcf\xa1\x71\xdc\x90\x0d\xb3\xb7\x4f\x77\x58\x1c\xf2\x9c\x52\x06\x2b\xe9\xeb\xba\x10\x0e\x66\x37\x2a\x29\xb2\xc6\x3e\x61\x31\x2c\x71\xc4\xe2\x86\xc8\xf3\xeb\xad\x48\x56\x1f\x7a\x95\xca\xfd\x28\xa8\x0d\x9d\x3d\x28\xc7\x84\x75\x30\xe1\xef\x4b\x76\xa7\x60\x0d\xe6\x9a\x88\xde\xd4\x9b\x17\x22\x1a\xc2\x92\xd5\x36\x18\xc5\xb3\x47\x5f\x7e\x53\xfb\x40\xa6\x37\x04\x96\x61\x6c\xa4\x1a\x59\x4a\x54\xb3\xcb\x24\x52\xf6\xd3\x82\x33\xb9\x79\x01\x1c\x08\x1c\xc4\x23\xd3\x08\x9d\x69\x87\x8b\x36\x7e\xb2\xdf\xe7\x0f\xaa\xc9\xa4\xdc\xc0\xa6\x82\xe5\x59\x67\x65\x25\x2e\x55\xc0\x7e\x43\x33\xdc\x90\x3c\x79\x34\xcb\x74\x8e\x1b\xc2\x7e\x1b\x99\x53\x6b\x1d\x08\xa7\x13\x36\x41\xb0\x44\xc1\xac\xb7\xc9\xac\x7e\xf2\x1b\x3a\xf3\x3c\x71\xc3\x43\x58\xc3\x88\xc7\x71\x10\xc5\xb8\x21\xbb\x64\x3f\x36\x3d\x83\xe2\x12\x02\xf7\x6a\x90\x6d\x66\x41\x78\x9b\x9d\x69\xe0\x22\x5c\xce\xba\x1c\x3e\xe3\xe9\xc5\xd9\x6b\x57\xba\xae\xcf\x51\xdf\x21\xcf\xb1\x02\xa8\x60\x4b\xaa\xa2\x14\x41\x14\xfb\xf2\x97\x54\x7b\x58\x13\xf9\x0a\x4f\x0d\x69\xe9\x1a\xeb\xc4\x90\x69\xe4\xe8\xb3\xa3\x60\x3c\x85\x37\xf3\x6c\x75\x47\xce\x22\x1c\x1a\x24\x23\x05\x49\x24\x21\x67\xd6\x2e\x9a\xc5\x75\x7d\x6a\x48\x4e\xe7\xe4\xd0\x25\x9b\xd9\x5e\x51\x67\xbe\x90\x98\xd0\xbd\x2d\x8a\x9c\x25\xbc\x3b\x50\x2a\xcf\x43\x2b\x5a\xf5\x2a\x9b\x9b\xca\x9e\x61\xe2\x16\x30\x68\xab\x40\x5d\xf7\x10\x47\x85\xeb\x1a\x55\xf4\xd4\x60\x72\xa0\x94\xe6\x9e\x87\x2a\xb5\x19\xa6\xd3\x1c\x2f\x0e\x37\xf9\x22\x9f\x4c\xb0\x3c\x09\x0e\x79\xee\x50\x89\x41\xdb\x96\xf2\xd8\xc2\xd0\x05\x66\x54\xa2\x62\x52\xd2\x42\xfe\x54\x0e\xa5\xa5\xec\x9e\xe7\xc9\x9f\x21\x72\x2f\x65\xc3\xf2\xe0\xcd\x2a\xb5\xe5\x4b\x8c\x71\x88\x78\x88\x38\x75\xe6\x92\xe6\x55\x07\x82\x46\x41\x38\x94\xab\x80\x83\x36\xdd\xae\x0b\xbe\x9e\x1a\x22\x9b\xa7\x66\x1d\xd0\x8a\x24\xa4\xc4\x38\x30\x38\x10\xbe\x96\xb8\x05\xd7\xaa\x69\xd7\x0c\x9d\xd8\x71\x9f\xf0\xb4\x08\x5c\x45\x47\xbb\x13\xb4\x9e\x7c\x9b\x88\xad\x5f\xca\xe4\x1d\xc2\xd8\x2f\xd9\x3e\x4f\x56\x0c\x5d\x2f\x5f\x5f\x6f\x88\xeb\x62\xc2\x8b\x57\x05\x5f\xe7\xd9\xca\xda\x51\xc2\x02\xd0\x27\x94\xd2\xa3\xe7\x21\xf9\x74\xc0\x44\xc8\x93\x5c\xd5\xdf\x7e\xd0\xaf\x39\x26\xc7\x86\x64\xd5\x8f\x80\x3b\x9c\xb9\x42\x22\x3f\x25\x99\x08\xe6\x64\x5b\xe4\xe9\x8f\x67\x48\x85\x85\x1a\x9f\xc8\x5c\x93\x49\x60\x48\x31\x67\x36\x8a\x83\xb2\x35\x62\x94\x52\x67\x16\x3a\xd3\xa9\x55\x32\x70\xe4\x6c\x42\xf5\x90\xc9\x49\xfc\xdb\x22\x7d\x30\x07\x5e\xc5\xc4\xbb\x6c\xc7\x8a\x83\x40\xba\x10\x5e\xb4\x05\xa8\x33\x23\xcc\x91\x95\x7a\x5e\xaf\xd2\x9b\x99\x5c\x5c\xbf\x64\x55\x91\xdf\xb1\x9f\x32\xb1\x45\x09\x89\x8e\x31\x56\xdb\x47\x94\xd9\x66\xc3\x4a\xcf\x3b\xa2\x04\x9b\x37\xe4\x42\x71\x17\xfb\xc5\x7a\xdd\xbe\xe0\x46\x4e\x8b\x01\xd7\x11\xb4\xe1\x9a\x24\x49\x3e\x1d\x81\x6e\x45\x0c\xcb\x42\xea\x54\x82\xbf\x06\x8e\xea\x7a\xa4\x82\x44\x7e\x39\x2b\xfd\x53\xc6\xd3\xe2\xfe\xd2\x81\xe5\x48\x30\x64\x92\x5e\xbb\x87\x7c\xb2\xc4\x77\x87\x1d\x2b\xb3\xd5\x48\x11\x27\xab\xbe\x4b\xbe\x43\x40\x24\xbd\xc9\x8b\x44\x42\x2c\xf6\xbc\xac\x7a\x23\x11\x89\x6a\x51\x36\xfd\xd8\xf1\xc8\x26\xae\x1b\x9c\x6d\x67\x49\xa3\x5b\x13\x60\x52\xc3\x55\xf4\xa0\xd0\x23\xc3\x71\x5d\x9b\x62\x81\xf9\x2e\xbb\x6b\x6d\xa0\x33\x6c\xcf\x15\x3d\xdb\x95\x74\xac\xd9\xa9\xeb\x8e\x62\x51\xd8\x44\xcd\x95\x1c\x94\x1e\xef\x7c\x21\xca\x07\x00\x39\x1b\xd1\x7a\x9e\x73\xa7\x7b\x45\x5c\x2b\xdd\xc5\xd6\x17\xbb\x40\x87\x62\x89\x9b\x55\x3f\x98\x97\xef\xd7\x6e\xd7\x52\xb3\x4a\xc4\x6a\x8b\xca\x76\xae\xe7\x92\xce\x3b\xfa\xd5\x61\xbf\x2f\x4a\x21\x49\xcf\x6f\x92\x4a\x00\x9e\x92\xa4\xec\x55\x4b\xa5\xb7\x7d\xe1\x78\x61\x7d\x35\xe8\x81\x53\x4a\x45\x5d\x5b\xb9\xe4\xac\x7d\xb9\xdb\x8b\x87\x0b\xb3\x26\xa0\x1a\x61\x37\xe2\xcc\x75\x75\xce\xac\x21\xac\x2c\x8b\xb2\x57\x4a\x6c\xcb\xe2\xfe\xea\x4b\x99\x0e\x40\xd0\x92\xd1\x3d\x5a\x43\x1e\x7a\x66\x3d\x34\x47\xe1\x74\x1c\x85\x05\x27\x8b\xf3\x03\x42\x22\x3e\x4e\x05\x11\x54\xb2\x54\x42\x8e\x28\x59\xc8\xce\x96\xf4\x43\xcb\x3b\x64\xd4\xe1\x9e\x17\xc5\x66\xe8\x65\x18\x09\x7f\x55\xb2\x44\x30\x4d\x29\xa3\x52\x92\xe8\x71\x80\x4a\x7a\xf4\x6f\x0f\x59\x9e\xbe\x29\x93\x0d\x7c\x89\x58\x4c\x04\xc9\x30\xc9\xe4\x7e\xce\xb0\xe6\xda\x80\x39\x54\x54\x58\x14\x93\xd2\x5f\x6d\xb3\x3c\x95\xa4\x35\x50\x2a\x30\xd0\xff\x7c\xfb\xfd\x77\xdd\x40\xed\x93\x5d\x7e\x91\xc8\x52\xfe\x2a\xde\x22\xb4\x5f\x10\xc7\x81\xda\x17\x94\x87\x3c\x38\x63\xb3\x38\x0c\xfa\x08\x32\x0a\xc4\xb1\x64\x47\xbe\x54\x9c\x06\x6f\xb1\xf8\x4b\xe2\xfe\x6f\xb7\x43\xea\xbf\x12\x37\xb6\x5e\xdf\x4a\xfc\x8e\x71\xd8\x9e\x93\xae\xee\x9b\x3b\xe1\x18\xe1\x00\x1d\x7d\x58\x4d\xe4\x7e\xcd\xef\x92\x3c\x4b\xaf\x60\x30\xf0\x19\x88\x40\xe8\xe7\xcf\xf6\x42\x72\x05\x24\x25\xc9\x60\x73\xf1\x91\xc5\xe4\xbd\xc5\x94\x5b\x88\xf9\xaf\xbf\xff\xf6\x07\x59\x57\x19\xa2\x8c\x72\x76\x7f\xd5\x26\x90\x92\x66\x6a\x3e\xde\x94\xc5\x4e\x49\x6b\x10\x27\xae\xa4\x59\xaf\x8f\xbb\xdc\x95\xfc\x71\x09\x65\x5e\xae\x44\x76\xc7\x7e\xd6\xc7\xa5\xfb\x6d\xb6\x2a\x8b\xaa\x58\x0b\xff\xe7\x6f\xbf\x79\xfd\xfd\xb7\x2e\x26\xa5\x9f\x54\x0f\x7c\x45\x5d\x90\x4b\xb8\xa4\xf4\x25\x8b\xfc\xf3\xb7\xdf\x48\x2e\x4c\xef\xb1\x02\x9f\x4a\x2a\x0c\xdb\x52\x7a\x5e\x39\x94\x44\x79\x9e\x53\x5a\xfc\x55\xf5\xc5\xc3\xbb\x64\xf3\x5d\xb2\x63\xc8\x85\x8e\x96\x30\x69\x2e\xd6\x94\x8d\xc4\x1e\x83\x69\x94\x53\xa6\x66\xb1\x6c\x08\x2f\x8a\xbd\x4d\xd1\x35\x64\x93\x17\xb7\x49\xfe\xe5\x5d\x92\xf7\x4e\x5c\x21\x69\x02\x58\x6d\x81\xe1\x58\x95\x90\xfd\x76\x55\x66\x7b\x61\xa1\x7c\x81\x4f\xcc\x67\x77\x49\x6e\xb6\xb4\xa4\x1b\x91\x5c\xad\x55\xb2\x63\xf9\xab\xa4\x1a\xc5\xc0\xac\x85\x8a\xd7\xc4\xdd\x55\x53\x0b\x4c\xbe\x21\x5f\x61\xd9\xcd\x94\xc9\x41\x8e\x33\x08\x0a\x59\xca\xef\x12\xa2\xcd\xb3\x2f\x8a\x6f\x8a\x7b\x23\x9d\x91\xa8\xa6\x9f\x32\xc2\x72\x90\x0e\x82\xe8\x4c\x49\x08\x15\x7d\x98\xd0\x6f\x35\xdb\xae\x10\x45\x82\x4f\x12\x11\x2d\x8a\x9b\x6c\x91\x29\xe2\xad\xa4\x42\x73\x0d\x4c\xd2\x66\x72\x7a\x25\x31\x30\xc7\xb7\x25\x4b\x3e\x34\x2c\xaf\xd8\x55\x4b\xc9\xb1\x8f\x2d\x71\xb9\x2d\x35\xc1\xb2\x60\x46\xe4\xcf\xc7\xb5\xf7\x78\x29\x83\x9d\x58\x43\xe4\x52\x07\xb7\x9e\xe7\xdc\xaa\x32\xee\xf2\xb0\x66\xeb\xf5\xf2\x30\x9b\x25\x33\x17\x87\x8f\x9c\xa3\xae\x1b\xdc\x9a\xa3\xb1\x79\xec\xc0\x75\xdd\x00\xc9\x43\xb7\x5b\xed\x57\x12\x29\x48\xf6\x45\x33\xc6\xc1\x88\x18\x57\x22\xd8\x0e\x91\xb6\xa4\x02\xfa\x16\xb5\xb4\x2a\x0e\x0d\x62\xe4\xe4\x5c\x42\x14\x46\x2c\x0e\x18\x0e\xb6\xaa\x93\x9c\x30\x8c\x09\x6f\x48\xc6\xcf\xdb\x6c\x21\x42\xae\xbd\x80\xb5\xdf\x19\x14\xb2\x33\xec\x51\x7b\xbe\xc9\x19\x6e\xf9\x37\x1e\xce\x6e\x78\x08\x14\xee\x2e\x39\xa2\x19\x29\x27\x12\xa9\x06\xb3\x45\x79\xc3\x17\x5c\x93\xfc\x72\x75\x84\xe7\x89\x88\xc7\x94\xd2\xee\xac\xd1\x38\x60\x3a\x6f\x08\x8c\xa4\x27\x16\xd3\x7d\xa2\xdc\xb4\x96\x75\xa0\x5a\xd0\x19\x08\xc6\xf8\x61\x77\xcb\xca\x6e\xd8\x25\x9c\xcf\x8b\xf2\xa6\x58\x14\x93\x09\x66\x51\x36\x99\xc4\x94\x47\x45\xbc\x00\x58\xb9\xdf\x66\x39\x43\xf2\x5d\x12\xf3\xd6\xf7\xc9\xa4\x9d\x6c\xd3\x08\xcd\x08\x6b\xc8\xa6\x64\xfb\x8b\xdb\x27\x8a\x65\x4f\x40\xb0\xae\xca\x28\x02\x80\x3a\x0e\x5f\x24\xba\x0f\x25\x75\x1c\x81\x58\x54\xc4\xa4\xc0\x84\x2b\x6e\x26\x03\x06\x15\x52\x5b\x42\x21\x1b\x72\xb4\x7f\xb8\x55\x49\x45\xa3\x78\x01\xbb\xa7\xb7\x79\x4a\x2a\x0c\xf8\xcb\x53\x0b\x80\xa7\x04\xce\xc5\xf0\x82\x92\x81\x59\x0c\x77\xcf\x47\x16\xd3\xdd\x4d\xf5\xb6\x8e\x62\x02\x52\x99\x43\x96\x06\x73\xb2\x2f\x8b\xe3\xc3\xd8\x32\x4a\x6e\x55\x17\x1d\x3d\x67\x0b\xca\x22\x1e\x13\x4e\x19\x61\xb4\x90\x87\x7e\x5f\x2c\x85\x4a\xaa\xc5\x40\x2d\xdf\x48\x9e\x49\x92\xe3\x9c\x53\x67\xba\x67\x5c\xf1\xe8\x92\x68\x00\x75\x05\x1a\x56\x80\x25\x09\x91\xf9\xb2\xeb\x94\xd9\x3f\xf2\x40\x91\xbf\x93\x09\xc9\x70\x20\x1a\x92\xac\x56\xac\xaa\x06\x52\x5b\xc3\x80\xab\x11\xe6\x74\x06\x5a\x93\x96\xd9\x56\x08\x00\xf6\x54\x47\x6f\x1b\xf2\xb7\xc4\xf8\x54\x50\x67\x06\x00\x93\xcb\xf9\x2f\xf1\xd1\x57\xcd\x40\xed\x39\x29\xa3\x3c\x26\xce\x0c\x5a\x68\xf1\x64\xa6\x99\x50\x59\xb6\x3f\x47\x99\xe2\xc0\x9d\x19\x26\x2b\xb9\x68\x21\xe2\xe6\x7c\xca\xb0\xdc\xa9\x87\x3c\xc7\x01\x5a\x51\x4e\x38\x1d\x02\x99\x9e\xb9\x95\x2a\x71\x94\xb0\xc5\x71\x23\x31\x86\xe2\xcc\x5b\x06\x9e\x23\x26\xbb\x55\x92\x2a\xcc\x82\xcc\x60\xda\x3c\x26\x39\x31\x9f\x70\xc7\x1a\x17\x21\x0b\x56\xa1\xe9\x07\x0e\x0e\xa1\xcc\x34\x93\x99\x82\x44\x1e\x78\xf7\xe7\x82\x16\x04\xc4\x49\x22\x18\x96\x14\x80\x64\x1a\xe5\x29\x56\xdd\x9f\x6d\x0d\x4b\x6c\x4e\x12\x7a\x6a\x60\x2a\x0b\x40\x35\x38\x89\x8a\x98\x32\xbf\x12\x0f\x39\xec\xbc\xee\x91\x0a\x89\x0c\x32\xca\xcd\xa9\x44\x4a\x89\x69\xb1\x5d\xda\xca\x2d\x2b\xea\x36\x68\xd3\x6a\x2a\x8c\xf0\x8d\xda\x54\x01\xd0\x63\x80\xf0\xe8\xd1\x7f\xcd\xd6\xac\x2c\x59\x8a\x30\xb9\xac\xfe\x18\x63\x8b\xdb\x23\xf1\x4c\xf5\x81\xcf\x93\x1e\x53\x7b\x9c\xe7\xb5\x74\x1e\xd0\xcc\x29\xf1\x13\xf1\x31\x6a\x8f\x41\x2e\xa3\xf6\xd0\x1c\x80\xe6\xd3\x4a\x0d\xf2\xcc\x5f\x97\xc9\x8e\xb5\xb4\xdc\x99\xa2\x51\xd3\x81\x19\x3e\x35\x9a\xfa\x7b\xbb\x2a\x8b\x3c\xf7\x3c\x33\x9b\x57\x05\x52\xd3\x69\x09\x15\xa0\x89\x36\x2f\x72\x73\xb6\x16\xae\xa1\x29\xbb\x73\xd7\x9a\xd2\x82\x7c\x36\xc3\x4d\x4f\xbf\xd4\x34\x08\x1b\xca\x93\xb7\x12\x54\x49\xbb\x69\x01\xac\xfb\x85\x62\x7d\xae\xbe\x83\xa3\xe5\x4a\xd1\xc3\x57\x66\x9b\x5d\xc1\xe9\x09\xf0\x79\xf5\x23\xdb\x7c\x79\xdc\x5f\xa9\x03\x59\x71\x5f\x2e\xc8\xf7\x04\x72\xaf\x5c\x4c\xfa\xc7\xfa\x2a\x72\x23\x85\x06\xae\xdc\x89\x98\xb8\xb1\x1b\x9f\x11\x6b\x78\xd1\x4e\xc0\xb7\x9d\xc4\x94\x75\xc7\x6d\xcb\x3c\x2f\x5a\x59\xb0\xc5\x36\x87\xce\x3c\x98\x83\xf6\xc7\xf0\xd5\x9e\x27\x42\x67\x16\x74\xb2\x09\x6e\x33\xfa\x0e\xa5\x12\xf5\xce\x14\x87\x7a\x76\x98\xca\x13\xfb\x66\xe6\x79\x62\x3a\x57\x07\x44\x53\xd2\x23\x4a\x86\x03\x6b\xd5\xce\x0a\x1b\x92\x9c\x1c\xc8\x8a\xec\xc9\x9a\xa4\x64\x4b\x36\x64\x47\x1e\xc8\x1d\xb9\xa5\x6e\x95\xfd\xfe\x7b\xce\xdc\xc9\xd4\xec\x70\x72\x6f\x6b\xa4\xdf\xd1\x19\x79\x45\x67\xe4\x3b\x5a\x09\x84\xc9\x07\xf5\xf3\xa5\xfa\x79\x4b\x9d\x39\x79\x39\xae\xa2\x64\xb2\xff\x21\x7a\x2b\x51\xe2\x0c\x07\xb3\x86\xfc\xda\x69\xc0\x5f\xd3\xf9\x8b\x17\xcf\xe7\xe4\x1b\x7a\x6a\x86\xfa\xe7\xaf\xe4\x01\xfe\x1b\xfd\xca\xdf\x17\x7b\xf2\x8b\xfc\x3d\x54\x5b\xf2\xad\x79\xf8\x9e\x7e\xa5\x95\xda\x6f\xe8\x57\x46\xa9\xdd\x17\xfd\xa8\x05\x9a\x11\x6e\x8b\xb5\x01\x95\x2c\xf8\x8d\x58\x08\x45\xff\x28\xfd\x43\x8f\xf4\x11\x8b\x8e\xf4\xf9\x82\xba\xab\x2d\x5b\x7d\x60\x69\xad\x14\x23\x2c\xad\x81\x8b\xaa\x93\x83\x28\xd6\xc5\xea\x50\xc1\xd3\x3e\x4f\x1e\xea\x55\xc1\x45\x59\xe4\x55\x9d\x4a\xfc\x52\xa7\x59\x95\xdc\xe6\x2c\xad\xb7\x59\x9a\x32\x5e\x67\xd5\x2e\xd9\xd7\x79\x51\xec\xeb\xdd\x21\x17\xd9\x3e\x67\x75\xb1\x67\xbc\x96\xf0\x5f\xf0\xfc\xa1\x2e\xd9\x6f\x87\xac\x94\x6d\xad\x8a\xbd\x44\x17\x3f\x50\x37\x5a\x2e\x8f\xcf\x66\xcb\xa5\x58\x2e\xcb\xe5\x92\x2f\x97\xeb\xd8\x25\x3f\x52\x17\x85\xc1\x72\xb9\x5c\xfa\x75\xb4\x5c\xde\x4f\xe3\x3a\x7a\xbf\x5c\x1e\x67\xb3\xe9\x72\x79\x4c\x66\x31\x9e\xb8\xe4\x27\xfa\x63\x4b\xd7\xba\xf7\x2e\x71\xef\xff\xe4\x62\xf2\x84\xba\xcb\x65\xe4\x4e\x7e\x98\xb8\x4f\x91\x3b\xf9\x71\xe2\x62\xfd\x12\x06\x28\x7a\xfa\xfe\x49\xed\xfc\x2b\x0e\xa9\x9d\xf8\xc9\xd2\x8d\x31\xea\x1a\x7c\x2f\x7f\x63\xfc\x34\xc4\xcb\xe5\xf3\x1a\xb9\x93\x9f\x26\x2e\xae\x71\xad\xcb\x2c\x97\xb1\x4b\xbe\xa6\x6e\xa0\xab\x87\x82\x08\xfd\x61\x3d\x83\x0f\x08\x47\xcb\x65\x1c\xd7\xee\xe4\x49\x3b\x8c\xe7\xe4\x3f\xf0\xc4\xc5\x4f\x71\xed\x3f\xc5\xcb\xa5\x6c\x92\xfc\x4e\xd5\x66\x47\xee\x7b\x68\x7f\x02\x35\xbd\x6f\xab\x37\xd5\xe2\xa7\xaa\x7f\x93\x27\x2e\x71\x37\x2e\x26\x3f\x0f\x0a\x3e\x25\xea\xc7\xc5\xe4\xef\xc3\x4f\x28\xba\x99\xfc\x4b\xf6\xe5\x87\x76\xbe\x5c\x4c\xfe\x61\xb2\xc9\xf7\x68\xf2\xaf\xd8\xc5\xe4\x9f\x6d\x51\x6a\x8a\xbe\x5f\x2e\x63\x39\xf6\xa7\xf6\x04\x41\x17\xfe\xd3\x64\xfe\x1a\x93\xbf\xd8\x6d\xfe\x34\x71\x9f\xb8\x98\xfc\x8d\x9e\xbe\x7e\x1d\xb4\xe9\x7f\x32\x0b\x86\xc9\xab\x6f\x5e\xbe\x7d\xdb\x7d\x59\x2e\xfd\xee\xdb\xbb\x97\x7f\xe9\xbe\xc8\xe4\x01\x18\x3c\x75\xb1\xca\xf8\xf2\xdd\xbb\x1f\x03\xab\xd5\x27\x98\xfc\xf0\xf6\xcb\xbf\xbf\xfe\xde\x4e\xfc\x1a\x93\x57\x5f\x7d\xfd\x8d\xd5\x8d\x00\x01\xb4\x82\x2a\xaa\xce\x93\x4a\xd4\x5c\x6c\xe5\xff\xa9\x7c\xc1\x53\x04\x82\xa0\xba\x58\x4f\xe5\x56\xd7\xeb\xaf\x27\x83\xdd\x31\x5e\x17\x69\x5a\x23\x14\x4d\xa6\x71\x8d\xd1\x72\x99\x3e\xc5\xbc\xb6\x01\x0e\x3e\xe8\xf7\xe5\x32\x9d\xe0\x1a\xb7\x53\x07\x6b\xee\x66\x2e\x26\xb7\x45\x91\x5b\xe3\x0c\x03\x77\xf2\xc5\xc4\xc5\x4f\xf4\x67\xce\x58\x5a\xbd\x52\xfa\xbd\x60\xb0\x9c\x6a\x35\x83\xae\x37\xec\xb7\x7a\x23\xea\x5c\x8d\xa4\x1b\x58\xbf\xef\x28\x0c\xa6\xcb\x65\x8a\x43\xe8\xb2\xd5\x21\x14\xd2\xe8\xfd\x34\xae\x9f\xe8\xae\x35\xe4\xaf\xf4\xfa\x7d\xf4\xfe\x14\x4f\x96\x27\x30\x26\xe1\x89\xc8\xee\xd8\xd5\xf2\xfe\x9a\xfc\x97\x32\x62\xd1\x06\x2b\x13\x5c\x83\xa1\x4a\xbd\xf4\x4d\x02\x7e\x72\x4d\x98\x50\xb9\x32\xbe\x3f\x08\x8d\x7a\x6a\x39\x92\xa4\x64\x49\x7d\x7b\x10\xa2\xe0\xf8\xc9\x75\x46\x84\xcc\xb8\x5d\xa6\xf2\x99\x0b\x7a\xfd\x49\xbd\x5c\x5e\x6f\x48\x29\x5a\x68\x82\xbd\x14\x2d\xc1\x64\x25\x3e\xcd\xc9\x9f\x1b\xe8\x79\x58\xab\x61\xe1\xda\x87\x5e\x4b\x70\xcc\xc4\x19\xf5\xa9\xa8\x08\x77\x76\x74\x27\x62\xfa\xe7\xcf\x3e\x7b\xfe\xe7\x56\x82\x28\xd9\xa5\xba\xe6\xa1\x08\x66\x37\x65\xa8\x0e\x63\x7f\x5d\x16\xbb\x57\xdb\xa4\x7c\x55\xa4\x0c\x95\x13\x28\x81\x83\xb1\x8f\x9f\x7d\xf6\xec\xf3\x3f\xd7\xe5\xcd\xcd\x7c\x46\x3e\xfb\xf3\xf3\x67\xb3\x7a\x3e\x7b\xf6\xdc\x2b\x71\x03\x14\xcb\xb7\x9a\x08\xfc\x8a\x7e\xaf\x68\xd4\x7b\x5b\xbe\x48\xfa\x6f\x5f\x45\xf6\xbb\xe1\x88\xda\xa3\xd6\x08\xb7\x04\x3e\x7d\x4b\x4f\x50\x6f\xf0\x95\xce\x15\xf6\xcf\xad\x5f\x5a\xda\x53\x37\x2b\x70\x4f\x86\xd0\x49\x01\xda\xa3\xbf\xa4\xb3\x85\x62\x5e\x59\xc4\x25\xcf\x2a\xa2\x72\x32\x89\xf1\xa2\xe5\x56\xe5\x51\xd2\x34\x2d\xfd\x90\x08\x4d\x25\x67\xaa\x2e\xfb\x6c\x4e\xe1\x4c\x3e\x4a\xce\x04\x89\x50\x0c\xcd\x2a\x44\x70\x8f\x1d\x4a\xd7\x9e\xb7\x47\x42\x0b\x7b\xd7\x92\x83\x90\x84\x32\xf9\x03\xf1\xb1\xac\x74\xee\x50\x8a\x72\x2a\xda\xb9\xc1\x9e\xf7\xb9\x43\x69\xae\x73\x29\xa6\x75\xeb\x79\x4e\xa6\x0c\x24\xe8\x7f\x19\x19\xb2\x3c\x2e\x2b\x5a\x18\xb3\x91\xcf\xa9\x2c\x05\xc2\x28\x2a\x86\x16\x17\x15\x26\x4e\x52\xd7\x4e\x62\x1b\x5c\xd8\xfd\x48\xfc\x2c\xa5\x94\x56\xb8\x23\xf8\x24\xd7\x9d\x60\xc2\x5b\x86\x6a\x30\x7a\xcf\x83\x96\x7a\x69\xe7\xed\x62\xcf\xbb\x43\x82\x24\x58\x12\xb7\x8f\xb7\x01\xe3\xb3\x2c\x3f\x0c\xc8\x71\x22\xc6\x25\x9c\x20\xa7\x81\xa5\x91\xf3\xf0\x3c\xc6\x92\x42\xee\xe5\x7c\x95\x27\x55\xa5\x04\x81\xe2\xc2\x97\x3f\x6c\xad\xcd\x29\x47\x43\x78\x93\xad\x51\xe9\xff\x56\x25\x9e\x87\x9c\x4d\x5d\x3b\x1b\x25\xe7\x66\x18\x2b\x71\x10\x4d\xe9\x2d\x79\xa0\x82\x1c\xe9\xe7\x4a\xbf\xcb\xc8\x5c\x3d\x58\xba\x1d\x71\x41\x2e\x89\x4f\x2b\xba\x93\xb5\x11\x94\xaa\x75\x7c\x29\x44\x99\xdd\x1e\x04\x43\x6e\x96\xba\x18\x87\x3b\x9a\xb6\x27\x08\x17\xc4\x5d\x2e\x9f\x78\x2e\x0e\x84\x5f\x0d\x33\x93\x1d\x26\x3b\xea\x46\x59\x4a\x3f\x71\x27\xbb\x89\xfb\x49\x7c\xe5\x92\x03\x5d\x19\xf2\x4b\xed\x93\xc3\x74\x8a\x57\xd1\x21\xa6\xbb\xc9\x83\x40\xf2\x09\x2f\x1e\xe8\x3f\xcc\xb8\xe4\xdc\x75\x60\x53\xd7\x72\x64\x2b\xff\xd7\x22\xe3\xc8\x25\x2e\x06\xe5\x0f\x06\x96\x63\x38\x93\x0f\x3e\x58\x42\xbd\xd5\x36\x2c\x2f\x25\xc3\x0c\x73\xa8\x10\xc0\x3b\x7c\x6a\xd6\x19\x4f\xf2\xfc\xe1\x94\xd6\xb5\xd0\x7a\x8c\xc1\x80\x9b\xc6\x70\x21\x1f\x04\xea\x24\xc3\xbf\x13\xf7\xc9\xdc\xc5\x7a\xe3\x76\xbb\x59\xd2\xc0\x4a\xf3\x4f\xa3\xb8\x63\x12\x04\x02\x2e\xb8\x95\x7b\x00\xe8\xf1\x09\x95\xec\xc7\x4d\xe1\xaf\x92\xd5\x96\x7d\x03\x93\xe2\x79\x29\x93\x1c\xe8\x95\x88\x98\x5f\x6d\xb3\xb5\x40\x38\x26\x20\x90\x2b\x5b\x5b\xa3\xae\xbd\x5c\xd8\x82\xeb\xe8\x36\x06\x15\x6d\xf7\xfd\x20\x3a\xea\x77\x3d\x50\xf1\xb8\x69\x76\xe7\xe2\x45\x37\x77\x8e\x03\x1c\x96\x9a\x1e\x6e\x29\xd8\xcc\x34\xd9\x2b\xd1\x5f\x17\x3d\x79\xaf\x24\xda\x55\xb8\x48\xb2\x98\x16\x9a\x5b\x89\x3e\xba\xd4\xec\x57\xed\x62\x52\x76\x52\x39\x05\x11\xe5\x74\x8a\x0b\x30\xeb\xfa\x2a\xe1\x69\xce\x22\x1e\x95\x71\x4c\xad\x61\xef\x7b\xb5\x09\x09\xe4\xa5\x64\x91\x86\x4c\xd5\x9c\x52\x0b\xbb\x79\x1e\xfa\x97\xd0\xe6\xa6\x5f\x4b\x2e\xa1\xae\x5f\xe3\x29\xfa\x17\x1b\xa6\xc9\x5d\x5d\xb6\x26\x60\x4a\xd0\xae\x25\x92\x94\xfb\x9c\x1d\xc5\xdb\xec\x36\xcf\xf8\x46\xc9\x10\x28\x15\xd8\x70\x09\xad\x70\x32\x9c\x07\xd3\x79\xd7\xe3\xb5\xbd\x50\xb6\x2c\x42\x0f\xe1\xc2\x86\x34\x92\x38\x38\xfa\x81\x2f\x94\xf3\x0e\x46\x28\x94\x32\x6b\x7e\xd3\xff\xa3\xfa\x91\xd5\x40\x5d\xbb\x8a\xa4\x80\x37\x7c\xa1\xbd\xad\xdd\x5e\x2e\xd0\x88\x3d\x84\xa0\x13\x41\xec\x4f\xb6\x24\x88\x82\x9a\xb0\x15\x19\x0b\x4c\x12\x5a\xf4\xc1\x20\x99\x4e\x31\x8f\x32\x5a\x44\x49\x1c\x7b\x1e\x98\xfc\x51\x07\x95\xf2\x07\x8c\xfd\x70\x23\xff\x55\x34\x11\x7e\x56\xfd\xfc\xed\x37\xf4\x9c\xe3\x63\xa0\x1d\x1a\x9c\x9b\x0c\x0f\x05\x1d\xad\x45\x53\xe8\x7e\xf5\xee\xdb\x6f\xfa\x28\x32\x70\xe6\x0d\x29\x65\x2b\x5a\xc7\xac\x8c\xad\x13\x40\x79\xa6\xd6\xb3\xb6\x39\x65\xe1\x79\xcb\xc1\x3d\xc9\x28\xf7\x53\xb6\x4e\x0e\xb9\xf8\x47\xc6\xee\x5b\xd5\x81\x3a\xc9\x25\xd2\xe6\x16\xc4\xf2\x61\x57\x43\xb4\xa6\x9c\xa4\xf4\xec\x03\xd9\x52\xa7\x42\x1c\xf4\xb2\x99\x2d\x08\xf2\xbc\xcc\xa1\x34\xf3\x45\xb1\x1f\x7c\x41\x6e\xc1\x6f\xd9\xba\x28\xd9\x41\x8b\x8a\x2c\x29\xdf\x1e\xe1\x06\xf4\x84\x06\x21\x56\xf4\x60\x2d\xa6\xad\x2f\x5b\x99\x43\x8a\xba\x99\x4b\x1c\x36\x38\x38\xda\xcf\xae\xaa\x71\xec\x38\xbd\x58\x77\xb2\xdf\x33\x9e\x2a\xc4\xc2\x35\x06\x7b\x55\xec\x14\x06\x73\x31\xd6\xcd\x8d\x68\x20\x9f\xb6\x7a\xc7\xf3\x56\xdb\x63\xf5\x62\xbb\x19\xe7\xac\x94\xc0\x40\xdd\x17\x69\x76\x77\x05\x83\xa0\x9f\x24\x9f\xdc\xbc\xb8\x4e\xb3\xbb\x9b\x5e\xe2\x55\x66\x92\x5d\xc2\x7c\x60\x19\xa0\xc7\x83\x99\x79\x06\xe8\xe9\xc2\xf9\x2e\x79\x85\x41\x7f\x25\x15\x73\xa1\x83\x69\x6f\x62\x18\x96\xd4\xcd\x2d\x71\x78\xbf\x76\x59\x71\x5d\x8f\xa5\xa2\xdb\xb1\xc6\x42\x54\x80\xc1\xab\xff\xf5\xeb\x81\x08\x47\x52\x60\x5a\x54\x33\x20\xb3\x1c\x4a\x7f\xf5\xbc\x6d\x87\x69\x06\x54\x98\x65\x61\x21\xc1\xb9\x3b\x33\xc2\x88\xc7\x41\x14\x37\x0d\x91\x8d\xe6\x82\x95\xfd\x66\x3b\xb1\x9a\x39\x75\x4b\x41\x32\xd1\x56\x37\xba\x6c\xe7\x24\x8b\x44\xcf\x4d\x83\x03\xa4\x0f\xd6\x76\x84\xff\x17\x9a\xd5\x43\xd6\xb4\x75\xaf\x71\x39\x42\x35\x35\xe7\xe9\xaa\x63\xbd\x69\xb9\x4b\xf2\x03\xd3\x5d\x25\xba\x8b\xef\x5e\xfe\x85\x8e\xef\x96\xb0\xaf\xb8\x31\xd8\x4b\x6b\x69\x46\x8b\xc8\xbe\x84\xe3\x9f\xc0\x3e\x7c\x94\xaf\x21\x25\x8d\x62\xad\xd3\xba\x48\x06\x83\xe2\xe4\x29\x58\xb3\xe3\x93\x39\x26\x0b\xd0\xd8\xe1\xf9\x00\x9d\x95\x9a\xf4\x69\x07\xdf\x12\x35\x45\x63\x86\x0d\xd2\x8c\xe1\xc0\x2d\x5a\xfa\xbf\x31\xf6\xb6\x94\x06\xd2\xe1\x04\x74\xbb\x0f\xa6\x80\xec\xe4\x70\x37\xf2\x8f\xa2\xb2\xe9\x5f\x8d\x19\xc9\x90\x94\x94\xac\x05\x1a\x6c\xcd\x3e\xd2\x50\x7c\xfa\xcd\x8b\x62\xaf\x28\x43\x2d\x31\xa4\x9f\x48\x54\xa1\x12\x6f\x5e\x5c\xeb\x5c\x12\x6d\x9c\x51\xab\x6e\x64\x0a\xc5\x96\x09\xc5\x46\x4d\xa1\x25\xb8\x0b\x03\x80\x9e\x5a\x89\x3c\x5c\x3c\x5a\x57\xa0\x45\x97\x23\x35\x75\x9f\x1a\x4c\x06\x63\x52\x7b\x81\x0f\x49\x47\x45\x2e\xe0\xc5\x90\xec\x97\xcb\xe0\x12\x57\x09\x39\xa1\x27\x36\x9a\x12\x78\x98\xdd\x05\xbb\xca\xd1\xb1\x8b\xf7\xf4\x93\x4f\xba\x81\x7b\x9e\xe9\x6e\xf4\xf4\xfd\x93\x98\xb6\x63\xff\xe4\x93\x7a\xe9\x2e\x2f\x8f\x9b\x71\x10\xbc\x8e\x8d\xdb\x7c\x22\x6e\x60\xe4\xb3\x17\x6a\x79\x4a\x82\xa3\x8b\x89\x29\x49\xfc\xa7\x81\x0b\x7a\x37\x54\xfa\x3b\x49\x2f\xb3\xca\xe4\x37\x30\xf3\x40\x53\xff\x9e\xdd\x7e\xc8\xc4\xb7\xfd\x0c\x75\x9d\xfa\xbb\xe2\xf7\x91\xd4\x62\x2c\x67\x35\x48\x94\x90\x37\x3c\x13\xfc\x34\xab\x56\x05\xe7\x00\x2c\x90\x9f\x3e\xb4\x96\x77\x40\xe6\x93\xee\x3d\xaa\x1c\x39\xb3\x30\xa0\x9d\x1e\x90\x43\x5d\xf2\xb5\x5c\xfe\x0d\xdd\xb4\x13\xae\x85\x46\x1b\xcd\x6d\xd5\xf2\xb4\xdd\xd1\xdd\xf0\xfb\xce\xfe\x7e\x67\x86\x9f\x82\x39\x7d\x92\xf1\x0a\xcb\x61\xac\x8a\x9d\xc4\xfc\x86\x14\xfa\xa1\xa8\x32\xd9\xfb\x70\x4c\x98\xf2\x79\x8f\x8a\x0f\xd9\x90\xd2\x09\x24\xb5\x2f\xfa\x0c\xc8\xc2\xd2\x31\x94\x75\xed\x20\xa7\xac\xeb\xb9\x43\x69\x69\xd9\x2e\x3a\x92\x82\xd0\xbd\x0a\xbb\x47\x54\xe2\x80\x5d\xea\xa1\xe7\xcd\xff\xec\x5d\xfc\x0a\x56\xcc\x43\xd4\x09\xc6\x19\x0a\x11\x0a\x6a\x77\x12\x74\x0c\x96\x76\xc1\x99\x2d\x5a\x1e\x8b\xbc\xa4\x1f\x3b\x4b\xda\xb4\xd7\x30\x1d\x57\x4a\xa1\x02\x3a\xbe\x8c\x8a\xcb\x03\x79\x64\x8c\x97\x07\xd8\x1d\x7c\x59\x38\xf7\xb2\xba\x76\x4a\x30\x95\x7f\x0d\x0e\x56\x2c\x95\xcb\x70\xa9\x2c\x93\x87\x6f\x16\x32\xc5\x56\xdc\xa1\x7b\xc2\x70\x38\x9d\x07\xa2\x4b\x10\x38\x9c\x07\xab\xf0\x8d\x82\xce\x15\x61\x78\xda\x3e\x0b\x1c\xcc\x82\x4f\xbd\x4c\x16\x99\x5f\x5e\x21\xf8\x3c\x76\x7c\x75\xd6\x18\xdd\x12\x90\xa4\xb7\x22\xa4\xa2\x11\x8b\x49\x4e\x23\x01\xb2\xae\xb1\x79\xcd\xd6\xc8\x29\xea\xda\x49\xb0\x05\x63\xbc\x1d\x47\x38\x0f\x0a\xf9\x92\x3c\x3e\x90\x05\xc8\xcf\x28\x6d\x6b\xd1\x3c\xec\xa2\xa4\xcc\xf0\xbd\xb4\xb4\x81\xa5\xf2\x0f\x5c\x49\x00\x4a\x99\x4b\x8c\xe7\xca\xed\x5c\x2a\x07\x18\xbe\x53\x9a\x4b\x1e\x29\x9b\x4c\xba\xf5\xdb\x0b\xf8\x46\xe0\x4b\xa0\xb3\xdd\xcb\xce\xe7\xe6\x79\x1e\xcc\x1a\xc2\x71\xb0\x6e\x48\x22\x0c\x6a\x1b\x57\xe7\x81\xe4\x12\xbc\xd3\xe0\x8f\xc0\x76\x91\x16\x1b\x9e\x81\xee\x18\x3f\x66\xa4\x98\x0c\xa4\x98\x2d\xe1\xf5\x4f\xe2\xd2\x4f\x9e\xcc\xe5\x29\x40\xe4\x8e\x1e\x56\x5e\xd7\xce\xb6\xae\x77\x9e\xb7\x53\x38\x47\xe0\xba\xde\xc8\x83\x42\xbf\x61\x10\x0c\x29\xa4\xf2\xd0\x19\x01\x02\x43\x5f\xd7\x23\x58\xb3\xae\x3b\x64\xe3\x79\x73\x89\x3f\xba\x84\x4e\x4a\xda\x9a\x45\x59\xfa\xf1\x76\x4e\x04\x59\xab\x09\x89\x58\x6c\xce\x9c\x9b\x19\xcc\x8d\x41\x38\xa3\xf3\xf9\x07\xf3\x72\xa7\xec\x17\x65\x35\x92\x23\xa3\x7d\x2a\xe8\xf1\xc2\x1a\x35\xf4\x65\x2a\x7d\x41\x40\x4c\x12\x9a\x79\xde\x37\x6a\x96\xec\x9c\x64\x90\x13\x87\x19\x18\xbe\x38\x5b\x1c\xb4\x8c\x73\x02\xaa\x5d\x9b\x59\x94\x6b\x13\x0e\xe8\x71\x8e\x03\x94\xd0\x11\x7a\x98\x63\x90\xd0\x56\x7b\xb6\xca\xd6\x19\x4b\xc3\x44\x11\xc4\x60\xf5\x1b\x24\x30\x6a\x30\x1c\xa5\x97\xac\xa9\xdd\xb7\x0f\x5c\x24\xc7\x2b\xc8\x45\xae\x0e\xbc\x64\xab\x62\xc3\xb3\xdf\x59\x7a\xc5\x8e\xfb\x92\x55\x55\x56\xf0\xe0\xca\x9d\x30\x35\x89\x07\x9e\xfd\x76\x60\x6f\x25\x3b\x7f\x46\xef\x10\xde\x91\xbd\xb0\x71\xdf\x52\xa7\xf4\x53\x26\xd8\x4a\xbc\x3e\xec\xf3\x6c\x95\x08\x56\x91\x15\xd5\xb8\xf0\xad\x90\xb4\x83\xc4\xa2\xa0\x86\x46\x33\x49\x44\xc8\x0f\xe8\x25\x26\x6f\x0d\x51\x2c\x28\x03\xfb\x35\x0c\x47\x40\x54\xc4\x9e\x87\x32\xaa\xe5\xd0\x05\x36\xbb\x37\x9b\x4e\x31\xd3\xee\x47\x20\xf3\x20\xf3\xd6\x12\x82\x35\x24\xa1\x09\x50\xe2\xef\xd8\x71\xbc\xe7\xae\x4b\x4a\x3a\x03\x23\x3c\x03\xb0\xe0\xe1\x09\x9b\x4f\xd2\xe3\x59\x5d\x7f\xae\x7e\xe6\xf0\x0a\x1f\xce\x0d\x13\x7d\xc1\x8e\x42\x5b\xac\xb4\x98\xcf\x4e\x04\xf5\x39\xa3\x36\xbf\xbb\x60\x0b\x99\x60\x4b\xc8\xf8\x84\x26\x88\x75\x86\x51\xcf\x55\xd3\x9f\x42\xcb\x3d\x93\xd9\x7f\xc8\x15\xef\x4c\x45\x17\x72\xbe\xca\x78\x51\x4e\x26\xaa\x92\xee\x24\xe2\x0d\x29\x94\xfc\x45\xe1\x81\x8a\x9e\x2c\x11\x6a\xf0\xd9\x8c\x28\x72\xf5\x87\x8a\x1d\xd2\x22\xc8\x05\x01\xc4\x11\xfc\x8d\x74\x60\x1d\x9c\x1a\x22\xd9\x0d\xf9\x5b\xb2\x1c\x54\x72\xc1\xc9\xbd\x71\x83\x53\x9a\x95\x81\xdb\xa1\x58\x57\xfb\xd9\x39\xb3\x86\xb8\x57\x23\xdf\x1b\xe2\x4e\xda\xe4\x92\xdd\x65\xc5\xa1\xd2\xc3\xef\x95\xfd\xd7\xa5\x4c\x4d\x43\xf6\x25\x7b\x03\xcc\x68\x70\x02\x45\xec\x18\x73\x1b\xcd\x63\x2a\xff\x0c\x18\x53\xc2\xa2\xe7\x31\x45\x2c\xfa\x34\xae\x6b\x16\x7d\x16\xd7\xb5\x6d\xc5\xaa\x33\xb9\xff\xa2\xc0\xa2\x45\xcf\x24\xdc\x41\x11\x57\xee\x86\xe8\x79\x3c\x01\x2b\x98\x16\x78\xc9\xa7\xb8\xd1\x5a\xde\x47\x7b\xd1\xc3\x0a\xc4\xe5\x62\xab\x1a\x98\xc7\x6d\x4d\xcf\x71\x08\x4d\xd5\xb5\xd9\xc0\x60\x56\x26\xbb\xfc\x69\x4c\x27\xd0\xe7\x50\x76\x59\x3e\xfe\x39\xae\xeb\x39\x0e\x9e\x3d\x45\x2e\xbb\x63\x5c\x55\xf6\x1c\xdc\x59\xd2\xd4\xbc\x61\x59\xf6\x33\x55\xf6\x7f\xc5\x13\x16\xfd\xc7\x59\x86\x40\xfe\x78\xde\xb0\xc5\xc6\xa8\xb3\xcf\x18\x78\x52\x52\x47\x56\xea\x79\x72\x76\x0c\x90\xfd\xcd\x87\x39\xd0\x7a\x08\x59\x47\x08\xd8\x08\xa9\xea\x65\xd7\x1d\x89\xf4\x64\x19\x2a\xdf\x82\xd2\xf3\xfe\x53\x65\x2f\x31\x78\x24\xec\x04\x2a\xc1\xf1\x19\xde\x4a\x63\x9a\x82\x5c\x0c\x06\xf7\xca\xe1\x9b\xe3\xa9\x79\x06\x5b\x76\xf0\x00\x8e\x66\xdd\x24\x72\x39\xe4\x67\x31\x2d\xad\x14\x7b\xb9\x9e\x83\xd3\xd4\x5a\x43\xcf\xbb\x97\x7f\x19\x71\xe6\x1c\x0a\x33\xc6\xa5\xcc\x8a\x89\x0f\xcf\x4c\xfd\x9c\xd9\xa8\xe5\xf4\xc7\x5a\xba\x37\x8d\xb6\x50\x38\xef\xd7\x77\x11\x93\xd0\xd7\x4e\xba\xa8\x6b\xd4\x69\xab\xd1\xfb\xd6\xc8\x82\x4d\x5c\xa5\xa2\xae\x9f\x60\x57\x4e\xe8\x77\x88\x91\x31\xa7\x52\xb5\x00\x23\xe8\x6c\xd5\x49\x10\xac\x97\xba\x1e\x95\xde\x8c\x49\x6e\xb4\x20\xd3\xc5\xb0\xbf\x1a\xdc\x90\xc1\x4e\xed\x99\x68\xb6\xc9\x46\xe6\x4d\xf5\xc9\x8d\x4a\xcb\x27\x59\x19\xe2\x65\xa1\x64\xc0\xe4\x54\x05\x22\x44\xd9\x44\xe2\x70\x57\x25\x84\x99\xa4\x31\x03\xf3\x3d\xcc\x1c\x78\x7d\xaf\x5f\xb9\xe7\xcd\x24\x26\x6d\x41\x8b\xe3\xc0\x7d\xda\x7d\xb4\x3f\xdc\x4c\xe7\x81\xfb\xc4\xfe\xa6\x20\x68\x6a\xa4\xf0\x58\x35\xf5\x2f\x9d\x05\x49\xfc\x90\x01\x72\x18\xd6\x52\xdb\x9d\xab\xeb\xac\x83\x4c\xe3\xb9\x3d\x87\xca\x26\xee\xd4\x0d\x9c\x39\x96\xf8\xef\x1c\xa7\x28\xe3\xd0\x56\xf1\x4d\x01\x85\x00\xc9\xd5\x81\x36\x49\xa8\x9b\x27\x95\xb0\xd3\xa7\x9f\x62\x52\x51\x57\x9b\x94\x40\x4f\xcc\x7c\xca\x03\xad\xd4\x73\x32\xe2\x15\xe0\x38\x36\x2f\x60\x01\xb5\xec\x49\xae\xfa\xd1\x33\x8a\xa3\x85\x43\x69\x12\xba\xd6\x89\xe6\x8e\x20\xf9\x5d\x9f\xa9\x78\xa0\x95\x64\x8b\xc6\x77\x04\xb9\xa3\x4e\xee\x79\x0e\x44\x8c\xd8\x29\xbd\xba\x21\x11\x36\xf8\xb4\x6f\x09\xfd\x3d\xdd\x47\x9b\x18\x14\xed\xe1\xfe\xf2\xf6\x7a\x00\x53\xc2\xfd\x90\x3a\x75\xe6\x8b\x2d\xdd\x50\xb7\xe0\x39\x18\x14\x32\xcf\x73\xb6\x9e\xd7\x1b\x49\xd3\x6e\xef\x6c\x8d\xb6\x34\x4a\xc2\x9d\x75\x98\x07\x3b\x5f\xce\x3c\x3c\xc7\x24\xf1\xbc\x3b\x50\x0f\x47\xb7\x71\x5d\x23\xf9\xa3\xbd\x88\x57\x11\x8b\xc1\xe2\x20\xa5\x07\x89\xbc\x28\x7d\xe7\x79\x87\x68\x1e\x93\x75\x2f\xe1\x59\x4c\xf6\x34\x95\xc4\x7a\x67\x9d\x11\xa5\x71\x3b\xda\xc9\x24\xf5\xbc\xbd\xe7\xc9\x51\xd7\x35\x5a\xd3\x94\xce\x70\x5d\x6f\xfd\x7d\xb1\x47\x60\x70\xd0\x1f\xa8\xe7\x4d\x26\x92\xc0\x05\x96\xed\x24\x7b\x41\xa3\x77\x24\x25\xeb\x78\xa1\xbc\x4e\x5a\x9a\xe3\xce\xf3\xd0\x81\x22\xa1\xba\x2e\x74\xd7\xb1\x24\xd0\x65\xc7\x54\x17\xb1\xec\xed\xbc\xe7\x7f\xf0\x31\x7d\xfa\x6f\x2e\x8e\xee\x34\x74\x09\xed\x55\x87\xf6\x56\x87\xe4\x10\xd6\x31\x26\x6a\x54\x7d\x47\x98\xf5\x94\x66\x64\xad\x04\x1c\x12\xc2\xd7\xff\x5e\x7a\xde\xfa\xba\xbc\xa1\xb3\xa6\x19\x39\xde\x6c\x31\x6e\xe1\xef\x81\x18\xaa\x60\xb1\x0a\xbf\x62\x42\xd1\x1b\x55\x34\xe8\xb5\x7d\x58\xbb\x07\xae\x75\x5e\x2c\xbd\x52\x15\x28\x12\xba\x15\xdf\x46\xb7\x71\x58\x22\x81\x03\x73\x86\xdd\xcc\x43\xc4\x69\xc4\x08\x23\xae\x4b\x44\x4c\xec\xb6\x06\x86\x9d\x68\x68\x7e\x10\xda\xea\xc2\xd6\x27\x21\x23\x05\x2d\x61\x38\xe3\x4a\xc2\x8c\xbe\x31\xcc\x5d\x11\x25\x40\x5d\x80\xa6\x10\x14\x86\x90\xd2\xe0\xb1\xf3\x4b\xd6\x39\x03\xfb\xf9\xa0\x94\x24\x98\x9a\xa0\xe0\xc4\x0b\x11\xe4\x63\x62\xd0\x28\x56\x9c\x41\x49\xf3\x73\x5b\x80\xfe\x9c\xf4\x07\xd2\xb7\xef\x81\xc1\x00\x8b\x98\x91\x28\x96\xb8\x6c\xa0\x00\xaf\xa6\x53\x8c\x0a\x9a\x44\x55\xac\x48\x82\x4a\x0e\x47\xc8\x9f\x02\xf7\x07\x43\x32\x52\x74\x87\x9f\x8a\x1e\x52\x4a\x74\x26\xab\x2f\x24\xed\xe0\x70\x05\xac\x4d\x83\xc9\x36\xa9\x86\x23\x1b\x51\x19\xdb\xfc\xbd\xb0\x58\xd8\x06\x13\xc3\xc1\x7e\x74\x2d\x48\xd8\x2c\x43\x5d\x0b\x25\x23\x97\xdc\x4b\x5d\x4b\x9a\xbe\x3b\x5b\x98\x3c\x5b\x64\x23\x79\xc2\x37\x17\x1a\xf8\x8b\x26\xc7\xe0\x08\xbe\x04\xa8\x50\x1e\xc0\x94\xb0\x3f\xa0\x7c\xc8\x99\xaa\x7c\x91\x16\x57\xa0\xd5\xdf\x86\xc2\x87\x9a\x86\x76\x30\xc7\x5d\x1e\xc8\x0f\xb2\x03\xc3\x6f\x2a\xbd\x15\x10\xd0\x01\xef\x4c\xb8\xc4\xc4\x6a\xf7\xf2\x6e\xe0\xf2\x98\x34\xec\xdf\x50\x64\x38\x34\x62\xc0\x9d\xc8\xb0\xc1\x44\x24\x65\x2f\xcc\x8a\x6d\x90\xa6\xc3\x89\x41\x38\x21\xfd\x2c\xf7\xdf\xb6\xa7\x77\x52\x27\xea\x5c\x79\x24\x66\x69\x43\xca\xa2\x18\x0d\xdb\xc2\x28\xa5\x69\x43\xc0\xca\xf9\xd2\xf7\xb5\x9f\x80\xe3\x69\xeb\x51\x80\x9c\xb5\x6c\xf2\x0d\x98\x46\xd7\xdd\x33\x92\x14\x9c\xe3\x20\x13\x77\x8a\xf9\xdb\x92\xad\xeb\xfa\x5f\xcc\x17\xc9\x2d\xd8\x61\x40\x7c\x0f\x10\xc9\x8f\x93\x9e\x46\x60\x0f\x0e\x83\x0d\x31\xaf\x7f\x9c\x79\xd6\x10\xad\xef\x18\xa5\x93\x3f\xd2\x0e\x43\xc8\xfe\x33\xdf\xd8\x83\xd7\xae\xd2\xe8\x58\x9f\x8c\xee\xc6\x44\xd1\xb9\xd4\x37\xdb\xa4\xc6\x7e\x6b\x2b\x80\xe9\x20\xdd\xbb\x1e\x04\xdb\xed\x45\x3f\xe4\xc3\x47\xf1\xe3\xe0\xa4\x6f\x06\x79\xe3\xfe\x6f\xb7\xae\x9f\xf7\xe4\xed\x8a\x37\x67\x23\xf4\x44\xe7\xd9\xae\x7a\x39\x16\xf9\xa0\x3d\x67\x7c\xe8\x1f\x78\xb9\x6f\x59\x92\xb2\x72\x34\x04\x8c\x26\xd7\xbb\x1e\xe1\x86\xc0\x1c\x8f\xce\xd5\x58\x6e\x65\xab\xf2\x7f\xb8\x94\x96\xc5\x4b\x1b\x0a\xad\x4b\x12\x0d\x01\xbb\xe1\x73\xff\xff\x61\x55\x97\xda\xf4\x3c\xf0\xd2\xee\xea\x97\x0c\x21\x90\xfe\x48\xd0\x21\x97\x01\x44\x2d\x06\xe4\x32\x24\x27\x54\xe1\x36\xf2\xcf\xd6\xc2\x91\x66\x96\xa2\x59\x0c\x28\x74\xf0\xd9\x12\x34\x46\x62\x3a\x97\x79\xd8\x6f\xc3\x1c\x1d\xfb\x12\xcd\x6e\x78\xc8\x27\x22\xe0\x90\xf3\x8e\xf1\xf3\xda\x14\xa2\x51\x9e\x71\x0b\x01\x5e\xa4\xf4\x19\x66\x43\x0d\x2f\x6b\x30\x29\xd2\xf4\x52\xf9\xf9\xc7\x94\xcf\xcf\x06\xd3\x99\x23\xb7\x3d\x55\x15\x4d\xa7\x92\x1e\x5a\x98\x7a\xca\x5e\x3d\x9b\x8f\xaf\x47\xdc\x4c\x26\xe5\x78\x35\x60\xb1\x60\xc0\x9c\x8b\x2d\xb5\x80\xfe\xb7\x36\xb4\xc4\xa9\x4c\xd2\xac\x08\x9c\x99\x42\x37\xb7\xc5\x51\x3e\xaf\xb3\x9c\xc9\xdf\x7d\x52\x55\xf7\x45\x99\xca\xe7\x6c\x97\x6c\x64\x62\x83\x3b\x2a\x8d\xc7\x74\x2d\x90\x15\xa9\xe2\x54\x1d\x6e\x77\x99\x90\xf9\x4b\x56\x31\x71\x9e\x3f\x55\xf9\x8d\x85\xd6\x46\x20\x7c\x6a\x36\x76\xdc\x22\x63\xf0\x50\x75\x3d\xee\x91\x67\x10\x3b\x60\x23\xba\x3a\x76\x62\xe8\x3a\xd4\x1a\x27\xd3\x0f\x86\x75\xcf\xd6\x68\xd5\xba\xcb\x84\xb3\x60\xd5\x4a\x41\x17\x15\x65\x24\x97\x14\xd3\x41\xb6\x68\x44\x5c\x86\xca\xc1\x27\x08\x8b\x80\x4a\xfa\xb3\x32\x2b\xae\x30\x88\x4a\xc0\x97\x95\x56\xba\x9a\x32\x9a\xc5\x86\x41\xad\xeb\x0a\x93\x5c\x2d\x49\x46\xa3\x18\xcb\x13\xd5\x99\x13\x54\xd2\xbf\xb7\x55\x68\x61\x8b\x36\x9e\x24\xda\x85\xf7\xa4\x05\xca\x2a\xf0\x0a\x54\x6a\x11\x70\x57\xa0\xfb\xee\x1a\x6d\x59\x62\xb5\x00\x09\x04\x3c\xd2\xb3\x87\x1d\x54\xd2\xbf\x45\x49\xdc\xb6\x58\xd7\x87\x28\x89\x3d\x4f\x7e\x90\x4f\xa8\xc4\x2a\xda\xd1\x1f\xf4\x22\x21\x5a\xa1\x11\x94\x97\x5a\x57\xae\x8a\x8a\x9f\x69\xe7\xd8\x18\xb5\x07\x55\xd8\x49\xb9\x70\xf0\x01\x31\x92\xe3\x76\xf6\x3b\x63\xbd\x07\xd1\xf7\x81\xb2\xac\xd5\x5d\xb7\xef\x05\x55\x4e\x28\x8b\x44\xac\xc4\xef\x9d\x91\x46\x5b\xd5\x9d\xe8\xef\x1a\xe1\xa7\x59\x49\x0a\xca\x3d\xcf\x16\x89\x4a\x26\x85\x24\xf4\x55\xa7\x7d\x12\xea\x58\x0a\x7b\x2c\x77\xd6\xc9\xc6\x45\x54\xc6\x86\xc7\x13\xd6\x61\x54\xb4\x42\x62\x5d\x62\xc0\xb5\x1b\xaf\x5e\xe5\xcc\x46\xdf\x49\x98\x9c\x24\x72\xde\xaa\xf3\xca\x47\x6a\xf7\x3c\xa6\xeb\x69\x95\xb2\x8d\xc5\x02\x3e\xd6\x31\x09\xf8\x74\xc0\x54\x12\x24\x19\xe2\xd2\x62\x2c\xf7\x4a\xeb\x95\x03\x77\x89\xe1\xf0\xae\xeb\xdc\x16\x7f\xe7\xea\x44\x37\xdc\xaa\xaa\x80\x46\xfb\x98\x1c\x40\xd8\xaa\xfb\x57\xd7\x99\x4a\x90\xb9\xbb\xce\x76\x6b\x73\xdb\x33\x06\xee\xf8\xb1\xde\x7c\xb5\xe2\xa8\x01\xcf\x91\x49\x46\x6a\x8d\x1c\xc9\x3b\xe9\x8c\x23\x27\x7f\xc0\xe4\xf9\xd2\xb6\x78\x14\x67\x52\x1c\x92\xc8\xfd\x5e\xd1\x19\x44\xa9\xd5\x70\x76\xa0\xca\x4b\x5d\xe9\x0c\x16\xf9\x4d\xb5\xa8\x26\x13\x0c\xce\xe4\x8a\xc7\x81\xd8\x95\xa8\x80\x8a\xe4\x7b\x62\xd4\x22\xe4\x00\x8a\x7e\xf9\x52\x75\x3c\x56\xd2\xf5\xe1\xde\xea\x83\xc5\x0b\x95\x9e\xe7\x48\x4e\x4c\xe2\x13\x7a\x2f\xe4\x96\x24\x99\xe7\x39\x99\x4a\xcb\x64\x9a\xcc\x8f\x7b\x26\xaa\xda\xfb\xa2\x27\x09\xa2\x20\xda\x88\x62\xb2\xa5\x89\x19\xd0\x86\x16\x75\xfd\x9d\x40\xa2\xae\xdd\xa7\x2e\xa9\x3a\x6b\x85\xa8\x8a\x83\x0a\xd8\xba\x1d\x75\x58\x5d\x3b\x85\xe7\x89\x70\x13\x1c\x05\xda\x90\x35\x61\x50\x3d\x79\xa0\x3c\xcc\xea\x1a\x15\x21\x0b\xb6\x75\x5d\xe2\x30\x8a\x83\x24\xd8\x81\x6a\xd2\xf3\x38\xda\x91\x07\x95\xb3\xc4\xa7\x03\x3d\x0a\xf4\x40\x52\x4c\x4a\x74\x20\x72\x76\xe5\x87\x15\x3d\xf4\x57\x70\x25\xb9\xc6\x3d\x3d\x44\x2b\x98\xd1\x87\x28\x8d\x56\xb1\x64\x1c\x77\xfa\x69\x8f\xc1\x5c\xbe\x50\x41\x49\xeb\x5a\x47\x27\x95\x0d\x44\x31\x59\xd1\x87\xd1\xfa\x1e\x54\x7d\x07\xb5\x06\xbb\x68\x25\x2b\x5a\x64\x40\xbf\x90\x07\x40\xee\x24\xc7\xcd\x1f\x14\x47\x07\x9a\x19\xe5\x78\x41\xf6\x38\x58\xcb\xf4\x9b\xe9\xdc\xf3\x50\x11\x1d\x64\x37\x13\xf9\x23\xfb\xa8\x76\xc2\x03\x8c\x9a\x52\x9a\x84\x0f\x46\x1d\xb6\x25\xa6\x11\x1c\x3c\x60\x92\x85\xba\x1b\x09\x79\x20\x39\x0e\x8c\xed\x7f\x42\x1e\x70\x63\xe1\xbf\x77\xc2\x52\x90\x41\xe0\x65\x2b\x40\x43\xe1\x1b\xf5\x4f\x04\x62\x77\x89\x99\x25\x00\x27\x75\x6d\x7d\x92\x27\x1d\xc9\x69\x12\xce\x83\x19\x59\xd1\xbb\x71\xab\x51\x45\x2c\x56\xc4\x99\x61\xb2\xbf\x90\xe9\x4d\x1b\xa8\x03\xf8\x5d\x9d\x7b\x4d\xa3\x7e\xd0\x82\x96\xa2\x4e\x24\x04\xd7\x35\x77\x28\x3d\xc8\x53\x05\x09\xca\x71\x07\x6f\x2b\x9d\x3d\xd8\xeb\x07\xdc\xc4\x6a\x97\x65\x56\x2c\xbf\xfe\x28\x73\x3d\x4a\xbc\xa6\xd1\x9d\x40\xb7\x02\xad\x31\xe1\x58\xc9\xbe\x4e\x3a\xbf\x3a\xec\xac\xdc\xc6\xb1\x02\xe2\xd1\xca\x54\x7d\x78\x61\xc2\xa3\xdb\x58\xb1\x1f\x25\x9d\x4c\xf2\x45\x76\x53\x82\xda\x0e\x62\xaa\x5a\xed\x96\xa6\xdd\x9e\x58\xeb\x5e\xa0\xfc\x66\xee\x79\xaa\x1b\xf0\x28\x0f\xac\x56\x08\x9c\x4f\xe7\xd8\x44\x86\xd0\xe7\xa7\x7b\xa5\xd4\x3d\xf9\xf4\x99\xaa\x32\x74\x9f\xba\x81\xeb\x36\x56\xd8\x3e\xe3\xa2\xc1\x49\x79\x93\x7b\xde\xbb\xae\xca\x9c\x00\x32\xb8\x29\x55\x6a\x2b\x57\x6e\x53\xe1\xbc\xc4\xcd\xda\x90\xa2\xe6\xe4\x85\x1e\x76\x60\xf5\xaa\xe7\x85\x30\x23\x5d\xc0\x95\x1b\x3b\xd6\xc8\xcd\x8c\x58\x1a\x7f\x49\x3e\x49\xb4\x92\xaa\x72\xda\xf1\x5a\x6e\xa3\x3b\x3a\x23\xb7\xd4\x9d\xb9\xe4\x48\x2b\xcf\x8b\x62\x72\xaf\xd1\x66\x4a\x5e\xd1\x03\xf9\x8e\x56\x75\x9d\x78\x5e\x67\x1d\x8a\x24\xee\x49\x3d\x2f\xef\x79\xc8\xe4\x98\x7c\xa0\xef\x26\xda\xc9\xff\x55\x38\x0f\x7a\x81\x0d\xeb\xda\x57\x34\xf7\x3d\xec\xca\x5c\x99\x09\xe4\x24\xa3\x1c\x2f\x74\xd0\xc7\x2d\xfd\x4e\x2e\xe9\xe2\x76\x32\x51\x4e\x5d\x60\xe0\xbb\x69\x7d\xdb\x76\x94\x45\x9b\xc9\x04\xce\xc5\x1d\xda\xca\x21\x61\x7c\x52\x31\xc6\xd1\x16\x6b\xf9\xab\x6c\xe0\x1d\xfd\x40\x32\x3a\x99\xc8\x49\xf4\x3c\x84\xb6\xd4\xd9\xc9\xca\x3c\xef\x6e\x3a\x25\x95\xe7\x1d\x4d\x21\xc0\x4d\x77\x13\x7a\x4b\x4a\xcf\xbb\x75\x28\xbd\xeb\xb7\x28\x54\x8b\x3b\x74\x94\x9b\x9d\xac\xb0\x3e\xe3\x65\xa9\x9b\x99\xb6\xb7\xba\x9d\x4e\xf1\x11\x4e\xe4\x07\x75\x2e\xcb\x1f\xfa\x9b\xda\x72\x7b\x8c\x17\x0a\xab\xe0\xc6\xa0\x8a\x3d\x79\xc0\xe4\xde\xf3\x9c\xca\xf3\x1e\xda\x05\xf3\xbc\xbb\x49\xbb\x96\x73\xd0\x25\x76\xf6\x02\x68\xdf\x02\x84\x19\xe1\x81\xbe\xc2\xe4\xd8\x74\xf1\xd4\x72\x81\x2a\x1c\x54\x4d\x4e\x13\x65\x1e\x95\xe5\x8c\x3e\x62\x60\x0b\x01\x6a\xbe\xb4\x28\x6a\xa7\xc0\x27\xa5\x0c\x03\x7f\x2c\x49\xe7\x8a\x3e\x7e\xe5\xd3\x29\x2e\xe8\x3b\x81\x44\xc4\x63\x4c\x0a\x90\xc4\x9a\x53\x33\xc8\xcc\xd3\xa2\xa0\x5f\x22\x46\x5e\xc9\xd3\x4e\x22\x87\xd6\xdc\xb6\xa3\xf2\xbf\x1b\xd0\x74\x33\x92\x75\x8d\x69\x54\xa2\x36\xb4\x12\x09\x46\x65\x4c\xb8\xa5\x8c\x6f\x2b\xfa\x30\xf0\x65\x94\x67\xa9\x39\x44\x61\x18\x30\xb2\x4c\x89\xb4\xd6\x06\x93\x9f\xc0\x95\x70\x2d\xa9\xa5\xb5\xa5\xf7\xc4\xc4\x90\xb9\x37\xcf\x3c\xcf\xfd\xfa\xb5\xdc\xf2\xe8\x40\xab\x68\x16\x63\xcd\x44\xb7\x86\xeb\xca\x71\xc2\x76\xf5\xd9\xca\x8d\xd2\xe2\x9e\x0a\x14\xd6\x80\x7b\xc0\x3c\x8f\x76\x86\xee\xe8\x60\x10\x99\xcd\x13\x68\xad\xb9\xc0\x10\x74\x24\x9a\xc5\xc4\x11\x9d\xcf\x62\x87\x33\x2a\x43\xe1\x2b\x8a\xd9\x8c\xa9\x49\xe8\xdf\x7c\xdb\xe9\xd8\xb8\xb3\x85\xb3\xa0\x3a\x97\x60\x9f\x80\xec\xab\xa2\x24\x26\x56\xa7\xe5\x09\x6f\xe3\x4b\x49\x46\xee\xa9\xea\xb8\x3e\x55\x33\xba\x7f\xb4\xff\xda\x8d\xae\x32\x07\xdb\xb9\x3b\x9d\x76\x1c\xac\xcc\x11\x9b\x90\x39\x26\x8c\x66\xad\x95\xe7\x83\x00\x07\xce\x11\x57\xc5\x0c\x13\x6e\x54\x2d\x06\xae\x72\xc4\xc8\x1a\xa3\x8c\x08\xe2\x6c\x09\x27\xad\x1f\x1f\x26\xbc\xb1\xad\x65\xe8\xad\xf1\x01\x73\xb1\xb1\x95\xd1\x86\xa4\x60\xb3\x7f\x4b\xce\x2d\x6e\xe8\x5b\xb2\x47\x98\xf4\x0d\x10\x2f\x38\x48\xcc\x1f\xb1\x69\x1c\xf7\x84\x1b\xb1\x7e\x1e\xf5\x06\x49\xae\xb6\x25\x5b\xd3\x4f\xfe\xf4\xc9\xcd\x8b\xeb\xe4\xc6\x25\xee\x9f\x94\x60\xc7\xf2\xfb\xe8\x4b\x74\x64\x7e\xc9\x60\xd6\xf5\x4a\x28\xf9\x4e\x0d\xf2\xce\x2d\xcb\x36\x5b\x51\xdf\x67\xa9\xd8\xba\x64\xfc\xfc\xbf\x2a\x43\x11\x0c\xed\xa8\x88\x6b\x34\x9f\x03\x01\x73\x38\x0f\x9e\x0d\xdd\x76\xce\xcd\x85\xc7\x46\x05\x32\xac\xeb\xa1\xff\x4a\xdf\x5c\x1b\x40\x5c\x99\x6c\xbb\x7f\x30\x64\x95\xb5\x1d\xb3\x2e\x79\x69\x88\x75\xad\x65\x68\xce\x65\x19\x1a\x4c\x83\x71\x9c\x02\x53\xa1\x4b\xcb\x65\xa2\xdb\xf4\x7b\xd4\x59\x78\xeb\x4e\x7d\x71\xd6\x1d\x60\x8c\x16\xd6\xbc\xa3\xec\xa2\xb5\x5a\x66\x59\xab\x65\xda\x5a\x8d\xa9\xb8\x69\xce\x2c\x1c\x2c\x0b\xd8\xb1\x41\x50\x22\xb9\x77\x69\x22\x20\x12\xf1\xbe\xec\x19\x31\xe9\xb4\xc8\x0d\x5c\x15\xd5\x78\x5f\xb6\x52\x9a\xa3\x3e\x7f\x68\xef\x24\x22\x47\x5f\xc5\x9b\x6f\x2d\xc2\x20\xd4\xd4\xcf\xdf\x7e\xf3\xba\x58\xb5\x6e\x71\xe4\xd8\x19\x1e\x5a\x46\x88\x8d\x31\x0c\xfc\x1e\x62\x32\x19\x1c\xfe\xa6\x13\x15\x7c\x1f\x31\xc9\xd3\x76\x21\x6c\x20\xf0\x0e\x53\xe8\x06\xbd\x03\xd4\x48\xfa\xea\x3a\x70\x54\x75\x66\x0d\x26\xa2\x39\xfa\xaf\x92\x3c\xbf\x4d\x56\x1f\xaa\x9e\xa1\x1a\xa3\x23\x51\xf0\xbe\x07\xbd\xa4\x6c\x3c\xe8\x82\x34\x37\x44\x77\xb1\x17\xb6\x46\xc9\x93\x1c\xe6\x17\x7c\xc5\x80\x4e\x5a\xf5\xa2\x3b\x29\x3a\x94\xf9\x3b\xb6\x2b\xca\x07\xcf\x13\x24\xa3\x8e\x24\xc8\xaa\xba\x9e\x01\x67\x5a\xd0\xbc\x0b\xd5\xe3\xcc\x16\xb9\xe7\x15\x37\xc9\x22\x51\x24\x6b\x1e\x25\x86\xd6\x15\x12\xf1\x0b\xc3\xb7\xcf\xc1\xc2\x4f\x14\xfb\xef\xf9\x9b\x24\xaf\x24\xc0\x51\x67\xae\x31\x20\xc8\xa1\x72\x49\x52\x85\x87\x16\x77\xae\xd0\xc1\x9c\x0f\x38\x28\x43\xd9\xf5\x60\x6f\xb4\x11\x70\x89\xc6\x9e\x9e\x92\xb4\x17\xc6\x5c\xf6\xc0\xac\x80\xe9\xe6\xa2\x05\xf3\xab\x4c\x0e\xd1\x04\xa1\x1f\x44\x4a\x57\xc7\xb7\x8e\x3d\xc4\xf1\xa2\x17\x2b\xb9\x0c\x99\x06\x1d\xcf\xdb\xfb\xdb\xa4\x42\x1c\xd7\x75\x6e\x68\xdc\x00\xd4\x41\xa6\xe3\x5d\xe0\x00\x88\x82\x27\x69\x60\xdc\x60\x2b\x22\x1b\xe1\x61\x37\x8b\x81\x92\xd8\x09\xb2\x42\x16\x89\xa1\x82\xdc\x2b\xc7\xe0\x91\x38\xed\xb9\xa4\xfd\x60\x18\x5d\xa0\xb8\x11\x1b\x6e\xa3\x16\x93\xe3\xd2\x31\x11\x91\x20\x40\xc2\xdf\x4c\xe7\x38\x37\xe7\x56\x29\xcf\x2d\x08\x50\x77\x23\xbb\x5c\x4c\xa7\x24\x81\x27\x79\xbe\x36\x26\xe4\xfe\x36\x19\x57\x5e\x85\x5d\xe5\x8c\xe4\x60\xe4\xe2\x20\x27\xaf\x6b\x27\x6f\x8f\xf4\xa1\xc6\xa5\x1b\x89\x89\x2f\xa8\xda\xd0\xcb\x3b\x9a\xf1\x40\x4b\x2a\xfa\xf9\x46\x62\xd8\x3b\x79\x43\xf2\xc2\xbe\xe9\xa0\xad\xe2\x40\x05\x29\xeb\xda\x02\x22\x5d\x9b\xcc\x3f\x5a\xd7\x01\xb4\x05\x10\x94\x7b\x34\x60\xa9\x1c\x65\xe6\x79\xce\x01\xa8\x4d\x08\x67\x49\x04\x8d\x18\x11\x8a\xc4\x09\xf5\x2f\xc2\x81\x88\x09\x0f\xb5\x20\x40\xe0\x60\x85\x04\x36\xcd\xcb\x26\x46\xba\xbb\xf7\x4d\xdb\xc3\xeb\x0b\xac\x72\x63\xbd\x76\xb2\xa6\x45\x3c\x7b\x3b\x70\xbb\x09\xd7\x36\xa2\xf1\x89\x22\x57\x47\x20\x77\x89\x9b\x16\x9c\xb9\xc4\xc2\x41\xc8\x95\xf8\xe2\x4a\x61\x05\x79\x82\xe9\xbc\xa9\x1b\x13\x59\x10\x62\x23\x10\x77\x9d\x64\xf9\x1f\x95\xfb\x15\xf4\x70\x50\x8e\x17\x22\x5b\x3f\xb8\xc4\xdd\x97\xc5\xa6\x64\x55\x35\x28\x6b\x8a\xc5\x31\xe1\xd4\xdd\x33\x9e\x82\xc1\x50\x49\x4f\x10\xbc\x6d\x64\xca\x78\x43\x92\xfc\x3e\x79\xa8\x46\xbe\x65\xea\x06\x89\x6e\x16\x7d\xd9\x5d\x74\x36\xab\x62\xcb\xb8\x5d\x5c\x45\x24\x68\xb3\x75\x18\xbd\x0d\x7e\xd7\x39\x69\x8f\x63\x97\x42\x53\xf4\xb4\x90\x28\xb1\xa2\xfd\xb0\x8f\x91\x90\xd4\x28\x03\x27\x8a\xa8\x88\xe6\x71\x8c\xce\x5a\xaf\x3c\xaf\x1a\xbf\xc9\x62\xa1\x43\xfc\x77\xf5\x99\x80\x6f\x38\x64\xc3\xdb\x33\xda\x28\xf3\x7a\xec\xf2\x5d\xae\x07\xf6\xcd\x0a\x20\xee\xab\x55\xc1\x01\x8f\x92\x89\x2b\xa1\xcf\x8d\xa1\x51\x40\x83\x5d\x38\x39\x75\x81\x0c\xa9\x20\x06\x6b\xd7\x1f\x89\xea\x08\x53\x11\x0c\x70\x97\xb9\x21\xfa\xf1\x91\x20\xf1\x61\x0b\xa8\x8c\x94\xca\xac\x24\xb3\x8e\xd0\xd2\xdf\x67\x7b\x46\x4b\x5f\x2e\x10\x39\x9f\x67\x66\xcf\xf3\x33\x39\xcf\x45\xf4\x3c\x5e\x94\x6a\x4e\x29\x84\x15\x94\x4c\x33\x3c\xd8\x53\xcc\xa9\x5c\xf5\x68\xfe\x9e\xc5\xd1\xb3\xd8\x20\x07\x22\xa2\x67\xf0\x2e\x91\x03\x26\x72\x69\x66\x71\x3c\x12\x89\x53\x7d\x19\x4e\x55\x16\x96\xc1\xf8\xae\xed\xe7\xa7\x49\xbb\xcb\x81\xf4\x34\x33\x96\x61\xa2\x8c\x2d\x25\xf3\x9d\x41\xc8\xf0\x86\xdc\xf7\x40\xb3\xa7\x96\x38\x0b\xfb\x49\x7a\x41\x65\xe7\x2a\xc6\xd0\x23\xc0\x52\x06\xf2\x6c\x9f\x2b\xa7\xa3\xa0\x17\xda\x31\xb9\x14\x4a\xd3\xb6\xd3\x14\x92\xe0\x81\x11\x73\xf9\x34\xbc\x98\xe3\x66\x1e\x9e\x75\x31\x28\xc1\xa6\xa3\x0a\x0b\x0d\x74\x0a\xd7\x11\x8e\x83\xe9\x34\x53\x22\xc4\xee\x52\x04\x99\xde\x34\x4a\x85\x06\x41\x26\x6e\xe6\x10\xb6\xb3\xa2\xe6\x4e\x0c\x92\x77\x8f\x87\xf6\x71\x51\x6a\xf5\x0c\x8f\x44\x3c\x18\xbf\x4c\xea\xa6\xc0\x7e\x33\x5b\x26\x41\x82\x1c\x08\xc7\x7a\xc7\x14\xe7\x3b\x26\x81\x23\xb5\xc2\xd0\xe9\xd6\x5f\x68\xd8\x7b\x59\x07\x29\xac\x0d\x01\x14\xad\x09\x8a\x70\x66\x83\x42\x4a\x52\xf4\xc3\x0c\xd2\x64\x3c\x04\x49\xb6\x46\x43\x56\xa3\x8b\x19\x40\x5c\xe1\x62\x92\xda\xcc\xca\xd5\xd5\x8b\x3c\xe3\x1f\xae\x6f\x5e\x00\x2b\x79\xf3\xe2\x5a\xff\x1a\xd6\xec\x3a\xf9\xe4\x26\x91\xcc\x99\x62\x6b\xc0\x3b\x98\x7e\x62\xf4\xb0\x9f\x48\x36\x87\xd3\xf4\x72\xdc\x00\x38\x04\xcb\x8b\x39\x12\x57\x49\x04\x4a\xe5\x0d\x27\x1e\x72\xe5\x65\xaf\x29\x05\x43\xff\x2c\xaa\xf3\x01\x2b\x6a\xdf\x95\x8b\x5b\xf5\x7c\x63\xcf\x72\x6a\x83\x12\x8c\x49\x71\xb1\x23\xda\x0d\x17\x22\xb5\xaa\x7e\xf8\xab\xaa\x02\xd7\x10\x57\x14\xfb\x60\xbe\x3f\x2e\xd6\x79\x91\x88\x20\x67\x6b\xb1\x28\xf6\xc9\x2a\x13\x0f\x81\xff\x99\xab\xe2\x0c\xbd\xb5\xe6\x9c\xba\xc0\x7e\x59\x31\x0b\x88\xf0\x73\x96\xc8\x03\xeb\xa7\x6d\x26\x58\xb5\x4f\x56\x8c\x3e\xa7\x32\x8f\xc5\xf7\x19\x91\x0b\x11\xbe\xb8\x2d\xd2\x07\xea\x5c\xea\x2d\x7c\x6e\xfd\x70\x89\xf0\xb7\x62\x97\xbf\x65\x65\x96\xe4\xd9\xef\x8c\x3a\x17\x0b\xca\xc5\xb6\xcb\xc1\x48\xe9\xb5\x28\xf6\xd7\xda\xde\x7e\xc0\xeb\x41\x06\x39\x75\x02\x0c\x8b\xbe\x2b\xca\x1d\xb4\x91\x52\xf7\x3a\x01\x62\x78\x94\x43\x27\xc2\xd7\x53\x44\xaf\xdf\xcf\xfc\xcf\xda\xda\xd5\xd4\xea\x6f\x32\xdb\xaa\xaa\xe0\x62\x0f\xea\x38\xd6\xc4\x43\x92\xfc\x2a\x01\xed\x7b\x4e\x1d\xa7\x50\x9c\x21\x54\x2c\xde\x1a\x03\x9e\x43\x6b\xcb\x43\x84\xcf\xf8\x0a\xc0\xd3\x71\xce\x40\x60\x5d\x94\x3b\x17\x9b\x1c\x7a\xc2\x3e\x7b\x95\x17\x9c\x51\xf7\x45\xc0\x93\xbb\x9b\x17\xd7\xf0\x23\x57\xee\xac\x38\x4f\xee\x5c\xec\xaf\x64\x76\xe0\x5b\x9d\x19\xf6\x8b\x83\x50\xbb\x88\x08\x3f\xe3\x79\xc6\xd9\x17\xf2\x70\xf8\x8e\xb1\xb4\xfa\x26\x79\x28\x0e\x42\xf2\x31\xc2\xaf\xb6\x65\xc6\x3f\xfc\x54\x26\x7b\xf8\x5e\xa9\xd4\x7d\x76\x64\xb9\x11\xa3\xa8\x24\x15\x55\xe1\x4b\x75\x9d\x8e\xe4\xb3\x84\xcf\x0b\xe8\x22\x84\x18\x51\x29\x25\xcb\x33\xb9\x43\xbf\x4d\xca\x4d\xc6\x7f\xcc\x36\x5b\xfd\xe1\xb6\x38\xbe\xcd\x7e\xcf\xf8\xe6\x47\x9d\x43\x26\x17\xc6\xac\xaa\x57\xdd\x2b\x9d\x56\x0c\x46\xa4\xf3\x92\xaa\xb3\xf2\x82\x62\xc5\x5e\xbc\x6e\x13\x0e\xed\x47\x88\x77\xa4\x43\x41\xa4\xb0\xbe\xda\x8b\x6f\x8b\x4f\x67\xa3\x99\x37\xc5\xf9\xb4\xea\x4d\xa7\x2c\x27\xc6\xa5\x23\x72\x6e\xf7\x07\x41\x41\x4a\x52\x8c\x8b\x46\x88\x86\x0d\xb9\xef\xce\xea\xd2\x7e\xf4\x60\x42\x02\xf5\xc1\xd3\x3f\x4c\x7e\xa8\x55\x41\xd6\xb0\xa4\xf1\xe5\x57\x68\x73\xf8\x95\x77\x18\x35\x6f\x47\x66\xe4\x63\xed\x15\x1f\x98\xe4\x3d\xdc\x54\xc8\x1e\x98\x84\x76\x15\xf4\xbc\x6b\x70\x57\x50\x99\x0f\x17\xa7\xf7\xd6\x9a\x93\xb7\x85\xd3\x7e\xa0\x1a\x94\x0e\xc3\xd3\xac\xf2\x6c\xf5\xa1\x17\x97\x66\x08\x5f\xf3\x46\x9e\x0e\xc3\x66\xb3\xd5\x07\xa4\x8d\x37\xd6\x7d\xeb\x19\x15\x2f\x19\x6c\x71\x8a\xd5\xa1\xca\x38\x58\xd2\x0c\xce\x9f\x15\x75\x0b\xee\x4e\xd6\x6a\xaa\x44\xb4\x9e\xb8\x5f\x1c\x6e\x6f\x73\x56\xb9\x31\x5d\x41\x94\xdd\xba\x4e\x2d\x21\x5b\xb4\x8a\x7d\x7d\xa7\x14\x88\x0a\x16\xa9\x46\x0b\x92\x0f\xd8\x94\xc5\x81\xa7\xaf\xf2\x6c\x4f\xdd\x95\xb2\x84\x9d\xde\x16\x47\xf7\xac\xdf\xe3\x45\x24\xb2\x5e\xe5\x2c\x29\x61\xd8\x6f\x01\xf9\xf5\xea\x01\x94\x3c\x56\xb6\x1d\xff\xd5\x11\x0d\xad\xc9\xdb\xab\x72\xa8\x3b\x93\xe8\x63\x4d\x8e\x43\xa2\x5d\x1f\xe1\xd4\xdd\x27\xa9\x3c\x06\x82\xd9\x62\x07\x3b\x38\x98\x2d\x6e\x8b\x32\x65\x65\x30\x5b\xa4\x59\xb5\xcf\x93\x87\xe0\x56\xe2\x89\xc5\x6d\x71\x9c\x56\xb0\xa3\x03\xab\x87\x8b\xe9\xae\xf8\x7d\x7a\xe9\x9b\x8a\x92\x70\xe9\xb3\x0b\x90\x3a\x7e\x30\xa8\x03\x25\x9a\xc5\x8b\x1c\xac\x7e\xc6\xe9\x0b\xc2\x87\x67\x63\xdb\x79\x10\xbd\x06\xb3\x85\x92\xc4\x06\xb3\xc5\x5e\x23\xb7\x20\xb9\xad\x8a\xfc\x20\xd8\x42\x9e\xa3\xb3\x85\x3c\x3f\x83\xe9\xe7\x9f\x7f\xfe\xf9\xfe\xa8\x27\x61\xaa\x4f\x58\x77\xb0\x5b\x38\xee\xbd\xa6\x03\xe2\xc5\xd0\x2c\xa2\xbc\x79\x21\x52\x49\xbb\xa4\xf0\x20\xd4\xd3\xb5\x4c\xd7\xf4\x8c\xfb\xc8\xd1\x2f\x52\xb9\xbd\x41\x5f\xd2\x1f\xdb\xc7\xac\x15\x07\xe6\x78\x4f\x67\x12\x91\xc8\x3a\x8a\xf5\xba\x62\xe2\x2b\x98\x05\xbb\x56\x5d\x40\xc2\x60\x01\x8e\x7d\xfd\x54\x55\x4f\x87\xdf\xbf\x82\xb8\x1f\xdf\x43\x65\x15\xdd\x2b\x47\x9c\xf3\x06\x7a\xf3\x21\x77\xc1\xd9\xf2\xb4\x90\xa0\xba\x3e\x0a\x43\xf6\xa7\x73\x10\xb2\xbe\x9a\x19\x91\xd4\x90\x9e\x0a\xf9\xd8\x07\x5c\x05\x09\x9f\x0e\x56\xf7\xdf\x2f\x00\xc4\xfc\xdf\x17\x92\xd1\xaf\xee\x93\x3d\xca\xf5\x15\x0c\xb9\x1e\xc6\xef\x45\xb1\x0b\x4f\xf2\x6f\x30\x6f\xc0\xcd\xd4\x46\x5e\xed\x89\x47\x3f\x85\x6d\xab\x26\xe6\x27\xd9\xba\xe4\x3a\xe5\x6a\xbf\x2a\x76\xfb\x83\x60\x29\xec\x74\xcf\x43\xc3\x73\xd7\x9d\xff\xbb\xdc\xb0\xe8\x3c\x33\x4a\xa1\x2f\xb8\xae\x4f\x0d\xf6\x45\xb1\x1f\x3d\x62\xdd\x4f\xf7\x80\x33\x1e\xaf\x40\x4d\x08\xe4\x6d\xb0\x0f\x6f\x40\x12\x3f\x4a\xb4\x2a\x45\xca\x19\x31\x3a\x5c\xe1\xaa\xcd\xb1\xb3\x28\x02\x93\x06\x6d\x81\x22\x3c\xed\xa7\xc0\x66\xbb\x40\x4d\x58\x97\xad\x8d\x8d\xab\xb4\x27\xc6\x6a\x54\xd2\x88\x4a\x06\x9d\x5a\xcb\xe7\x50\x9a\xc1\x69\xf4\x28\x9c\x56\x13\x57\x4d\x92\x84\x26\x1b\xc8\x0c\x64\x29\x02\x6b\xa1\x40\xc1\xbd\x4c\x71\x3d\x1f\x42\x42\xdb\x54\xbb\xd3\x00\x48\xdd\x01\x26\x81\x38\x64\x3a\xec\x58\x8f\x26\xef\xcd\xda\x67\x6a\xd6\xce\x28\xba\xe7\xce\xb0\xd5\x4b\x1d\xf4\x3c\x64\x03\x37\x9d\x63\x49\x21\xd8\x21\x1f\x39\x96\x0c\x15\x2d\xa8\xba\x5b\x00\x37\xf2\xbd\xa2\x4a\xe0\x59\x50\x15\x20\xe2\xdf\x1a\x74\x6a\x94\x40\xff\x0b\xb8\x63\x7b\x79\x8a\x96\xd5\xf2\x6d\xfc\x74\xd9\xd4\xcb\xc8\x3c\xc7\xf8\xc9\x35\xf9\x81\x5e\xa3\xe8\xe5\xf4\xbf\x62\x7c\xbd\xe9\x94\x13\x3f\x9a\x0b\x3d\x40\x4c\xae\xee\xe1\xd8\x8b\xd7\x89\x48\x10\xc3\x56\xfc\x5c\xa5\x3e\x91\x87\x31\x58\xa1\xb5\x2c\xca\x81\xe6\xe1\x51\x85\xdb\x0c\x18\x59\xd1\x3c\x64\x51\x15\x07\xf2\x8f\xe7\x81\x43\xde\xca\xf3\x0e\xd1\x0a\x2c\xc4\xea\x5a\x3e\xf9\x69\x22\x12\x5c\xd7\x70\xab\xe4\x63\x77\x78\xad\xea\x1a\x99\x1a\xe9\x5e\xf9\xdc\x74\xf7\x93\x04\x15\x26\xb2\xba\xba\x46\xf2\x87\xe6\xe1\xa9\x09\x4e\xa2\x80\x6b\xc4\x8e\x3e\x2f\x8a\x7d\x83\x09\x3a\xbb\xf0\x8f\x8f\x5e\xf8\xc7\x41\xf7\x1b\x42\x4d\xad\xc4\x49\xbe\x11\x8e\x83\xb6\xd7\xfd\x4f\x90\x04\xae\xc3\x09\xd8\x88\x91\xac\xae\x51\x02\xa9\xed\x03\x58\x30\x26\x54\xbd\x60\x62\x6e\xd2\x4c\x22\x39\x69\xfa\xca\x2c\xc4\x71\x4c\x4b\x7c\x7e\x95\x11\x0f\xc1\x67\x89\xc7\x44\x29\xe0\x0a\xb8\xfe\xe4\xac\x2c\xc6\x41\x41\x13\x52\x58\x86\x8b\x3f\x59\xb7\xde\x8d\x2e\x2a\x68\x7f\xec\x85\x4c\x68\x61\x2d\x64\x45\x8b\x90\x45\xed\x9a\xc7\x41\xfb\x08\xd1\x85\xa3\x4a\x6b\xfd\xc1\x16\x90\x87\x32\x21\x90\x7f\xd4\x30\xf1\xa9\xbb\x77\x54\xe0\x50\x40\x08\x1d\xb0\x36\xd2\x57\xd2\x12\x6b\x04\x18\x07\x70\xe1\x5f\x19\x0a\x1a\x89\x38\x40\x82\xda\xe3\x53\x61\x98\x55\x06\xf9\x59\x58\xf7\x4c\x60\xdb\xac\xa2\x33\xba\xd4\xbc\x48\x19\x89\x28\x8b\xc1\xf6\x83\x87\xce\xd7\xa8\xc4\xea\xa6\x4e\xeb\x02\x42\xd4\x9a\x64\x36\x88\xd7\xb5\x09\x68\xd7\x8e\x84\x7c\xad\xc6\x0a\x06\x94\x30\x3f\x39\x4b\x38\x4c\x64\xc4\x62\xe2\xcc\x70\xd0\xdd\x93\xd8\xe3\x74\xea\x3a\x71\x68\xa2\xef\xb6\x0c\xad\x7a\x61\x9a\xf4\x96\x6e\x9a\x4e\x0a\xaf\x66\xfe\xd4\x10\x5e\xc8\xfa\x03\x88\xa3\x0d\x46\xe9\x84\xed\x6e\x19\x98\xb5\x2b\x50\x0e\xdc\x55\x5e\x65\x69\xf0\xfa\xd9\xff\x7a\xf5\xfa\x8b\x3f\x7f\x39\x7d\xf9\xe5\x9f\x5f\x4f\xe7\xf3\xd5\x7a\xfa\xf9\x9f\xbf\xf8\x8f\xe9\xa7\x9f\x7e\xfa\xd9\x67\xcf\x3f\xfb\x74\x36\x9b\xcd\x5c\x50\xd0\x40\x85\xa3\xd6\x75\x56\x50\x28\xbd\xf8\x91\xbd\xec\x72\x23\x77\x6f\xc4\x71\x98\xe7\x39\x5f\x83\x0b\x4a\xda\xaf\xb3\x27\x01\xfc\x51\xbf\x1b\x5d\xd5\xeb\x61\xde\x36\xe7\x4f\x3a\x06\xcb\x2f\x1f\x51\x1d\x81\x7b\x59\x7f\xf9\xa8\x2a\x55\xde\x0e\xee\xcf\x2e\x72\xb5\x43\xda\x3a\xfd\x10\xb7\x9f\x3b\xe3\xce\x3a\x43\x27\x18\xcf\x93\x58\x46\xd6\x1e\x5d\xd2\xb0\x1b\x2f\x7c\x47\xd4\xb5\xd0\xd7\xbc\x8e\xba\xc0\x5b\x61\x15\x89\x75\xfb\x31\x3a\x0d\xe7\xc5\xbe\x54\x4a\x1d\x02\x09\x9d\x91\xca\xdc\x74\xde\x45\x5c\x3a\xe9\xdb\x37\x5a\x05\x24\x2a\xe8\x11\x80\x1a\x55\x18\x02\x58\x57\xd6\x98\x9d\xa3\xff\x8b\xfa\x46\xd4\x3d\x83\xa9\xec\x61\xe5\x62\x6c\xb4\xbf\x95\xc5\x9a\x2d\x5a\xd7\x54\xad\xe2\xa5\x65\x94\xc4\xbe\x64\x85\x49\xdf\x79\xde\x95\x95\x4e\x5d\x65\x58\x63\x6f\x6b\xe3\xe0\xfe\x19\xc6\xe4\x09\xaa\xe4\x70\x20\x00\xed\xe2\x42\x47\x60\x41\x5b\x4b\x2b\xed\x25\x74\x76\x97\xab\xba\x02\x1d\x74\x00\x16\x89\xa8\x87\xad\x2e\x18\xc7\x0d\x0e\x46\xc4\xd0\x7f\x5c\x50\xb9\xb5\x56\xa1\xec\x2d\x23\x66\x2a\x89\xc4\x5f\xa0\xd7\x18\x87\xf5\xc1\xf5\xe0\x67\x0d\x74\x85\xba\xfe\x35\xf6\x6d\x38\x4f\x8c\x29\x05\xdc\xf0\x07\xa7\x47\x3f\x26\xb3\xb1\x3e\xd7\x53\x3d\xe9\x2e\xe8\xfc\x81\xb8\xd3\x27\x73\x77\x18\x9a\x02\x2a\x1a\x40\x61\x36\x72\xfa\x94\xfa\xe2\x21\xea\x8a\xf2\xa0\x1c\x10\xe0\x46\x1d\x75\xbf\xa5\x7a\x9d\x07\xae\x1c\xbc\x7a\x83\x60\x1e\x93\x72\xe2\xaa\xd7\x49\x19\x7c\x61\x82\x77\x84\x47\xbf\xbd\xb0\x14\x81\x1a\xa7\xbd\x10\xb3\xd1\x53\xa9\x86\xa9\x2c\x95\xed\x4b\x32\xbb\x43\xed\xeb\xd1\x6b\x62\xb3\x35\x52\x60\xe6\x2a\x7a\xe2\x0c\xc7\x83\x2e\x0d\x1c\xc6\x80\x36\x80\x6c\x23\x86\xf8\x1d\x2a\xfe\xed\xc0\x0e\x2c\x78\xd4\x94\x85\x85\x88\x53\x04\xa4\xc4\xd1\xc5\x13\x17\x8a\xb8\x44\xc2\xf8\x2f\x66\x34\x98\x94\x9e\x87\x9c\x4c\xdd\xf0\x6b\x14\x0f\x61\x2f\x0f\x91\x07\xa2\xbe\x0c\x51\x1e\x46\xc6\xca\x10\x0c\x67\xc1\x50\x2e\x10\x0d\x49\xd9\x59\x97\x04\x3e\x81\x6a\x59\xb6\xaf\x0c\x3a\xe8\xd1\x87\x5c\xca\xad\xbb\xa7\xe7\xe1\xad\xd3\x8c\xc4\x01\xbf\x40\xb6\xaf\x8a\xe2\x43\x65\x5c\xc0\xfb\x30\xcf\xba\x7a\x9a\x85\x9b\xf1\x56\x11\x4b\x15\x11\x6f\x57\x58\x4e\xa7\xe0\x05\x80\x64\x47\x14\x78\xf2\x36\x7e\x9a\x5d\x16\x93\x36\x5a\x6c\x25\xd9\x28\x73\x83\x1a\x49\xc0\x65\xc0\x29\x3d\xaf\xf0\xbc\x42\x39\x39\x82\xf6\x0b\x49\x6c\xdf\xf5\x75\xfc\xae\x48\x3d\xf5\x90\xc3\xed\xb4\xb0\xdd\x22\xc8\xd9\xef\xa6\xfb\xa4\x2c\x0e\x2e\x2b\xa3\x87\x6a\xc0\xa3\x6f\x1d\x38\xb2\x65\xb3\xd6\x12\x4f\xf7\x3f\x29\x8b\x8e\x33\x0c\x7e\x0e\x4d\xc6\xac\xe4\xd9\xe0\x56\xc0\xee\x4e\x09\x10\xbd\x30\xc2\xa8\x9c\x55\x35\xc7\xe5\xcd\x10\x6b\x85\x66\xc1\x35\xee\x27\x0c\x07\x10\x3a\x1d\x90\x59\x30\x8a\x70\xd4\x19\x66\x17\x54\xb7\x5b\xf6\xa1\x42\x63\x22\xa2\xd7\x94\x79\x9e\xbd\x92\x72\xff\x44\x33\x50\x9a\x19\x50\xe9\x50\xd7\x08\xac\xfe\x11\x0a\x1c\xad\x24\x3f\xbb\x1c\xb4\xa5\x59\x8e\xfe\xfa\x18\xca\x3f\x7e\xb5\x97\x1c\x15\xd8\x39\x49\xa2\xb5\xdd\x10\xa0\x53\x35\x63\x1c\xb5\xe8\xb1\xae\x4a\x13\x84\xe1\x05\x07\xa0\xb4\xf7\x01\x88\x07\x4d\x9e\x52\x2e\x6b\x43\x20\xed\x6f\x8f\x0e\x4f\x6f\x1d\xdd\x8f\x28\x1e\xd5\x71\xdb\x97\x5b\xce\x61\x4f\xf6\xf4\xaa\x4a\x85\x6e\xdf\xab\x45\x72\xbb\x6b\x23\x6a\xd0\x84\x40\x50\x86\xc5\x23\xa0\x24\xc0\x77\x5f\x63\x8c\x2e\x2a\x42\xd9\xa2\x23\x49\x98\x12\xd6\xdb\x50\x80\xc2\x4a\xb5\x27\xe5\xc6\x9f\x4c\x88\x7e\x83\x7d\x92\x77\xee\x43\x39\xb2\x15\x99\x1c\xce\x30\x39\xc4\xdf\xc9\xcf\xe4\xef\xf4\x3a\x5a\x8a\x65\xb9\xe4\xcb\x75\x7c\xbd\x21\xff\xa0\xd7\xcb\xf2\x7a\x43\xfe\xf9\xc7\x97\xf0\xd4\xea\x88\x87\xbb\x78\xfe\x53\x65\x4f\x6a\xf9\x1d\x52\xfe\xa2\x52\x86\x57\x88\xc1\xb7\xbf\xd1\x8e\x32\x1f\xe8\xe5\xc8\x5f\xad\x6f\xd0\xfa\xa2\xb7\x63\x25\xa1\x33\x0e\x7c\xed\x3d\x95\xb0\x44\x47\x20\x89\x88\x24\x37\xcf\x29\x8a\x96\xf8\x7d\xd9\xaf\xed\xe3\x08\x02\x59\xc8\xde\x10\xfb\xb2\xd8\x7f\x5c\x97\x64\xce\x3f\xea\xd2\x0f\xfd\xda\x7a\x7b\x4b\x96\x7f\x93\x1d\xd5\xa6\x22\xa3\x9d\x94\x54\x01\xe0\x1c\x16\x53\x61\xd0\xba\x4e\xe8\xee\xba\x80\x1d\x93\xa4\x29\x44\x84\x3e\xb7\x4f\xe8\xae\x11\x6d\xe9\xd7\x0e\xd8\xcf\xcd\x19\x3d\x0f\x62\xee\x0d\xee\x46\xc5\x97\xe7\x52\xe0\xd3\x11\xe6\x04\x50\x3a\x74\x02\x69\xc3\x09\x98\x29\x65\x33\xd6\xa9\x66\x31\x6e\x80\x34\xca\xc1\x7c\x40\x50\x1d\x11\xa3\x67\x9a\xb9\xa8\x3a\x9b\x46\x75\xef\x5d\x94\xc4\xa4\xa4\xc3\x98\xdc\x88\x77\xf5\xaa\xa0\x4f\x56\x82\x0a\xff\x64\x68\xb4\xbf\x83\xa7\x6c\x00\xdc\xec\xa9\x68\x9d\x33\x32\x2a\x54\x64\xc3\xd9\x8d\x15\xe4\xac\x8d\x1e\x25\xd9\xee\x09\x55\xcf\x0b\xab\x6e\x73\x63\x7b\x39\x6a\x32\xf8\x3f\x59\x09\x49\xce\x0f\x21\xa9\x93\xd9\xfc\xdf\x59\x1f\xab\x77\xff\x2f\x2f\xd1\x85\x15\xd2\x31\x64\x47\x56\xe9\x86\xce\x70\x49\xcb\xee\x7a\x38\xf3\x85\x9c\xad\x9b\x64\xbf\xf5\xca\x05\xae\xdb\x5f\x3c\x51\x6c\x36\xf9\xd9\xe2\x59\x34\x90\x5e\x02\x43\x4d\xdc\xaa\x6b\x3c\x7b\xb7\x59\x76\x0b\x46\x79\xa8\x08\x04\x6b\x5b\x28\xab\xad\xfe\x3a\xe0\x60\x78\x0f\xf1\xc8\x22\xf2\x6e\x11\xad\x5e\xf6\x16\x91\x0f\x16\x91\x80\x0f\x48\x83\xc7\xe9\x93\x5e\xdc\x4c\xca\x0d\x84\x96\x60\x62\xac\x9b\x02\x47\xb0\xde\x9a\x1b\xc7\xe2\x44\xdd\x0c\x56\xf8\xdb\xa4\x52\x3d\x11\x38\x2c\x7a\xe3\x12\x38\x28\xba\x91\x0b\xc5\x6f\xc0\x8d\x33\x59\x5d\x5b\x53\x07\xf7\xb4\xa0\x7e\xd7\x25\xc5\xf3\x4b\xc7\x11\xba\xbf\xfc\xd2\x7e\xfa\xe5\x17\x77\x08\xac\x83\x77\xda\x7f\xad\x6b\x06\x5a\xcd\xd0\x75\x83\xc7\x6a\xed\x42\xe3\x99\x31\x8d\x98\x7a\x41\xd8\x49\x00\x2c\xed\x34\x37\xbc\x91\xd3\xba\x91\x1c\x9c\xae\xe5\xb6\xe0\xb1\xbd\x1d\x64\x15\x26\xf9\xd1\x7d\xd0\xc2\xb9\x50\xf0\xad\xd9\x27\x2b\xec\xf6\x5d\x92\x8f\x45\x84\x04\x89\x85\x11\x53\x80\x0b\xdc\x00\xad\x18\x64\x91\x0d\x4c\x21\xf1\xd8\x29\xa4\x41\xa3\x58\x98\xf1\xd8\x83\x29\x68\x16\xf6\x61\xd0\x40\xe9\x5d\x92\x23\x8c\x03\x66\x04\xaa\x61\x41\x5d\x37\x38\xbb\xff\xb5\x08\x8b\x09\x85\x95\x31\x8c\x5b\x81\xb5\x04\x65\x97\xec\x51\x31\x16\x27\xd1\xba\xf5\x9e\x4d\xc0\x71\x92\x48\xda\xea\x2e\xc9\x81\x96\x8a\xa0\x93\xe0\x7c\x24\x59\x92\x7e\xf2\x05\xe9\x91\xa4\xbe\xdc\x8a\x09\x37\x03\xef\xeb\xd2\xaf\x98\x50\x23\x2a\x88\x31\x7a\x50\x4c\xaf\x82\x54\x65\xfd\x50\x18\x3c\xd9\x3a\xfa\xf7\xfa\x51\x8c\x74\xa2\x78\xb4\x07\x9b\xb6\x07\x10\x80\x61\xc3\x04\xea\xda\x87\x0e\x84\x3c\x00\x0f\x57\x65\x40\x31\x22\xc7\xee\x84\x12\xff\x20\xae\xab\x24\x26\x12\x15\xb9\x6e\x20\x69\x45\xe0\x99\x0c\xf9\x65\x7a\x15\x9c\x94\xc1\x56\x70\xea\x05\x64\x62\x1d\x17\x03\xfe\x66\x10\x01\x92\xb5\xfd\x59\xf4\xcc\x47\x05\xf8\xb8\x48\xa2\xb2\x31\x51\x83\x2e\x54\x27\x4f\x41\xca\x7c\xd5\x64\x05\x0e\xcd\xfd\x70\x41\x05\xd5\xa6\x66\xd3\x82\x33\x3b\xae\xcd\xec\x26\x03\xe9\x39\x88\x3f\xc0\x53\xbf\x08\xb3\xc9\xbc\x8d\xdf\x26\x4f\xd1\x9b\x2c\xac\x82\x22\xcc\x02\x1d\xdc\xa5\xb2\x3d\x89\xd5\x75\xe7\xc8\xe1\x6d\x8b\x9e\x97\x3b\x80\x91\x50\x47\xa8\x5a\xd6\x37\x21\x6f\x8d\x6f\x02\x35\x4e\xca\x2f\xfa\xe6\xe0\xba\xb6\xef\x88\x69\x4b\x2a\xf1\xa5\x5a\x75\x64\xe7\x20\x6e\xb1\x17\x9b\xb2\x38\xec\x5d\xed\xca\x26\xe8\x11\x71\xbd\x79\x48\xd1\x59\xe3\x25\xc6\x70\xbd\x0d\xd6\x2d\x27\x59\x8c\x07\xcc\x83\x29\x35\xd3\xab\x76\x92\x11\x7c\x80\x0c\x22\x3b\x77\xe2\x2b\x69\x26\x4f\x6f\x54\x76\x91\x9a\x3a\xaf\x82\x23\x2a\xbb\x2e\x49\x54\x04\xe0\xe9\xcc\x3a\x08\xa8\x6b\x34\x58\x44\x3a\x9d\x63\x52\x34\x4d\x43\x86\x34\x7e\x2b\xe2\x51\xda\xae\x7e\xa4\x67\xe6\x79\xcf\x1d\x4a\x2b\xcf\xfb\x0f\xf5\xf3\xcc\xb1\xee\x3d\x1c\x0d\x77\x0a\x16\xb3\x8a\x9a\x36\xee\xe4\x80\x7a\x2b\x65\x6b\xaa\x1c\x8a\x10\x53\x41\x4d\x86\x31\xcd\xe4\xec\x40\x5c\x65\xd8\x9c\x3c\x06\x38\x00\xef\x25\x38\xf6\x7c\x79\x4c\xe9\x6b\x54\x70\xf8\x73\xf0\x3b\xa0\x1a\xb9\x0d\x8b\x6e\xb7\x16\x9e\xa7\x41\x03\x25\xca\x60\x0a\xf8\x4e\x1c\x26\x01\x4a\x06\x7b\x87\x63\x8d\x0f\x93\x50\x04\x09\x36\x30\x55\x42\x7d\x55\x5b\x1f\x54\x54\x41\x45\xa5\xac\x0a\x76\x7e\x12\xc0\x24\xf7\x7c\xe9\xca\x89\x2b\x59\x47\x1c\xa0\x1e\x3f\x03\x0d\x09\x7c\x89\x33\xea\xc3\x0a\x5c\x90\xe3\x79\xa2\x3d\xe9\x01\xa3\x9d\x09\x3f\x07\xb7\xe3\x94\x16\x07\x23\xe7\x8d\x93\xcb\x13\xf7\x57\xcf\xfb\x5b\x5d\x3b\x7f\x69\x13\x58\x54\xc6\xd4\x99\x83\xce\xa3\x93\x54\xbb\xda\x43\x6e\xea\x4e\x38\x8e\xa9\xc9\x74\x6c\x27\x4f\x5f\xb8\x32\xbc\xec\xf0\x6f\x21\x0f\x4a\xac\x20\x4d\xe3\x33\x08\x51\x73\x3a\xdf\x21\xea\xba\x77\xb3\xd1\x3b\xc3\x34\xcf\xd3\xf6\x6a\x4a\xb0\x66\xed\x57\x46\x8c\xd1\x5c\x17\x9e\xae\x17\x60\x86\x8d\x9a\xbf\x09\xe5\xd0\xa3\xf3\x52\xb9\x1c\x4d\xd3\x28\xf6\xf2\x4d\x76\x0c\x4e\xee\xba\x28\xdd\xc0\xdd\x8a\x5d\xfe\xa6\x28\x5d\xa2\x03\xf4\x06\x96\xf5\xf0\x39\x33\x6a\xdf\x42\xf7\xdf\xde\x3d\x09\x58\xa1\x57\x5a\x6e\xdb\xee\x0b\x92\xc0\x76\x1e\xae\x66\xa1\x53\xcc\xce\xd0\x1a\xd5\x21\xa0\x66\x23\x80\x9a\x29\x67\xc2\x32\x18\xdf\x24\x59\x6f\x93\xa8\xcc\x6a\xa8\x66\xf1\x74\x18\xbd\x8f\x3c\x8e\x44\x72\x0b\x94\x52\x77\x22\x89\x10\x44\xe0\x5f\x73\x81\x04\x99\xcf\x70\xf0\xcf\xb3\xe0\x6b\x75\xfd\x9f\x67\x69\x9e\xa7\x62\xf9\x85\xb3\x00\xee\xb9\x6d\x30\xf9\x99\x9e\x01\x91\x7d\x71\x93\x22\x2c\xcf\xb6\x5e\x70\x0e\xf0\x7d\x20\x71\xfe\x06\xc1\x03\x7a\x13\xce\xf1\x1f\x6d\x07\x70\x4f\x24\xbc\x69\x8c\xf7\xc5\xf9\x8e\x53\x97\x41\xea\x9d\x7c\xbd\xbc\x9f\x5c\x6f\x30\x19\x93\x88\xea\xa2\xf6\xa5\x0a\x40\xa8\xb4\x33\xbb\x18\xcb\x41\xcf\x06\xd6\x03\xce\x36\x1a\xf2\x58\x59\x79\xf0\x84\x22\x40\xa3\xf5\x0a\xec\xa8\xc8\xa2\x24\xc3\xa3\x0e\xa8\x96\x23\xe7\x79\xe9\x82\x24\xcd\xd9\x36\xb1\x5d\x90\x1f\x99\xd7\x71\x77\xd7\x06\x13\x35\x54\x64\x9d\x0f\x7a\x2f\x0f\x21\xc2\x6e\x6d\x14\x71\x84\xa8\xef\xfc\x4b\x39\x11\x38\xf8\xdd\xf3\x7e\xd7\x5b\x07\xd4\x31\x0d\x26\xb2\xbd\xdf\xc7\xeb\x37\x71\x8f\xce\x1c\x7a\xbb\xe8\x6e\x59\x5d\xf7\xc1\x0c\xbe\x03\x2d\xd0\xbb\x69\x58\x99\x3d\x75\xc0\x08\x1a\x10\x83\xa7\x54\xfc\x6e\x45\xe1\xa9\x68\xbd\x9c\x9e\xf9\x23\x97\x38\xe4\x81\x50\x70\xd8\x5f\x0d\x3f\x4b\xcf\x17\x1f\xd4\x95\x23\xc9\xab\xa2\x28\xd3\xde\x85\x22\xff\x6d\x4f\x66\xd7\x85\xeb\x21\xa1\xbf\xad\x2b\xb3\x52\x12\x76\x44\xb7\xaf\x24\x9c\x74\x80\x4e\xba\xcd\x30\x5a\xfb\xa2\x0b\xc6\x54\x5a\xfe\xd2\xa5\x6e\x44\x28\x02\x0c\x96\xb0\x21\x36\x94\x68\x03\x50\x96\x66\x2a\x58\xc0\x28\x06\x31\x4b\xef\xaa\x60\xe4\xce\x3c\xd0\xfe\x35\x7a\x6b\x47\xae\xf6\xac\x77\x95\x79\xa7\x3b\xf4\x56\xee\xd3\x2d\x67\x8d\xb4\x20\xa9\x55\x84\xe7\xa4\x83\x9b\x1c\x44\xa1\x88\x07\x79\x34\x61\xcb\x1f\x66\xe0\x78\x20\xf1\x82\xee\x14\xf8\x1a\x10\xb7\x2a\x57\xfd\x0e\x81\x5f\x6f\x77\x5c\x88\x78\x38\xd9\x97\xae\x39\x14\xe4\x53\xc3\x95\x98\xc6\xb5\x4f\x4a\x6f\xe7\x29\xaf\x89\xcb\x75\xf6\x8c\xdb\xea\x5a\x8c\x51\xc7\xe3\x99\xa9\x90\x2c\x64\xaf\x03\x96\xbb\x03\x74\xa3\x1d\x57\x47\x1c\x5f\x38\x99\xd8\xc8\xad\x5e\x02\xac\x20\xfb\x1c\xce\x23\x97\x11\xf7\x32\x2a\x6a\x51\x33\x6d\x6a\x05\xcc\xe1\xe8\x12\xb7\x64\x49\xfa\x3d\xcf\x1f\x5c\xe2\xee\x92\xa3\xba\x40\x44\x12\x12\x2c\xcf\xdf\xee\x93\x15\x38\x6b\xc2\xdb\x0f\xca\xbe\x4f\x16\x29\xee\xdf\xee\x13\x2e\xd3\x8b\x5c\x3f\x1d\x2a\xf6\x6d\xb2\x77\x89\xbb\x2e\x93\x1d\xfb\x02\x8c\x4c\x21\x03\x80\xf1\x97\x1a\x8c\xed\xf5\x36\x8b\x2d\x0f\x2f\xc5\x6c\xf7\x58\x59\x90\x12\xf4\x66\x54\x7b\x81\xb4\xb3\xf9\x26\x3b\xb6\xbe\x23\x2e\xe3\xab\x02\xba\x67\x8d\x52\x11\x63\xc4\x35\x6e\x4f\xc3\xd6\x7b\x4c\xfd\x39\xf4\x73\x0b\x21\x1b\xb9\x02\x9c\xc2\xc6\x39\xc3\x66\x6e\x98\x61\x6e\xb8\x64\x6e\x34\x5e\x33\x3d\xd7\xee\x30\xd0\xf3\x7e\xa3\x12\x8e\xe9\x45\xe9\xc4\x59\x08\x07\xcd\x34\x87\x6e\xc1\xdd\x40\x93\x85\x0d\xd6\xba\x9b\xff\x7a\x44\x3f\x03\x3a\x16\x26\xe8\xf5\xfb\x0f\xec\xe1\x9a\x08\xa1\xf2\xee\x8a\x43\xc5\xea\x95\x8a\xd0\xb2\x63\xfc\x80\x6b\xf0\x1c\xb8\x26\x5c\xe7\xd0\x2e\x02\xf0\x53\xc3\xdf\xe2\x20\x6e\xf3\x43\x89\x9f\x5c\x93\x12\x32\x45\xef\xfd\xf8\x29\x46\x61\xb0\xf4\x91\x3f\xc1\x35\x7e\x72\xdd\xd9\x43\x64\xc2\xbe\x44\xa3\x4d\x2e\xac\x64\xeb\x92\xe8\x44\x68\x7d\x85\xa1\x36\xfb\xf1\x94\xb5\xb6\x82\xe1\x13\x98\x62\x81\xfb\xc3\x69\x93\x17\xb7\x49\x1e\x9c\x40\x7b\x31\x38\xf2\x24\x81\xab\xb6\x55\xcf\x0d\x8f\xe8\xa0\x53\xe4\xae\x53\xee\x03\xc3\x72\x87\x4f\xa5\xbf\x85\x83\xa5\xf4\x3c\xb4\xa2\x25\x29\xe9\xca\xa4\x90\x84\xae\xda\x68\x15\x70\x69\xec\x21\x93\x9b\x5b\x3d\x50\x63\xe9\x88\x09\xca\xe9\x9d\xea\x5e\x25\x39\xc6\xee\x4d\x05\x48\x5c\xd3\x3b\x5d\x25\x86\x90\xfa\xe6\x6d\x0c\x10\x34\xb7\x7a\x54\x12\x4f\x90\x69\x42\x55\xbe\x28\xb3\xcd\x86\x95\x10\x0d\x59\x89\x36\x42\x11\x98\x8f\x69\x56\xed\x81\x9e\x53\xee\xc6\x6b\x9f\xe5\x6c\x67\x79\xb1\x36\x44\x25\x51\x86\x89\x36\x8c\xe8\x0b\xe6\x5d\x37\x26\x87\xd6\x26\xc1\xba\xa8\xbe\xa2\x72\x27\x1e\xd9\x0a\xf1\xe8\x10\x2b\x3f\xc1\x0d\x7d\xa0\x55\x34\x8f\xc9\x8e\xa2\x2a\x7a\xa6\xaf\xdf\xd1\x76\x7f\xbe\x89\x6e\x83\xc9\xc6\xf3\xd0\x9e\x9a\x3e\xc2\x69\x98\xe4\x70\xaf\xc0\xa9\x21\x1b\x8a\x92\x70\x0f\x56\x79\x9b\x44\x00\x37\x12\xec\x7d\x49\x94\x03\xe3\x58\xd7\x1b\x72\xb1\x6c\xda\x19\x7a\x2a\xa6\x6d\x43\x8a\x32\xdb\x40\x1d\x0f\xca\xf4\xad\x20\x7a\x11\x83\x92\xc8\x65\x0a\xd4\xa2\x11\xb3\x9c\x41\x42\xec\x68\x45\x41\x02\x33\xdd\x11\xc6\xe7\xa1\x8c\x12\x4c\x24\x49\x02\x4e\x81\x81\xb9\x02\xd2\x77\x71\x43\x56\x98\xa0\x2d\x95\xbd\x93\xeb\xab\x9e\x20\x7c\x62\x3b\xba\x57\xc5\x81\x0b\x3a\x23\x7b\x79\x9e\x1e\xf6\x9e\xa7\x1f\xda\x28\xfe\x64\x47\xd6\x92\x0f\x72\xe6\x20\x15\x49\xd2\x14\x9c\x7d\xbe\xc9\x2a\xc1\x38\x2b\xc3\xf3\x24\x08\xae\xe8\xcc\x71\xc0\xfa\x1e\x45\x6c\xe8\x50\xe4\x4e\x36\x64\x8d\x31\x26\x7b\x59\x87\x5c\x12\xf9\x6b\x5a\x06\x67\x0d\x3d\x55\x06\xbe\xfb\x09\x54\xfd\x60\x4c\x92\x70\xdb\xc6\x25\xec\x0f\x6d\x32\x21\x33\x92\xe2\x60\xab\x24\x4e\x29\x20\x65\x58\x39\xb5\x5b\xe5\x84\x38\x33\xbc\xd0\x3e\xe6\xe7\x41\x32\xce\x83\x6a\x92\x91\x1d\x4c\x8f\xbe\x36\x97\x44\x92\xf1\xea\xef\xe5\x1d\x6c\xe0\x9d\xd9\x87\xf8\x24\x28\x44\xab\x1c\x03\x74\x71\x0e\xe8\x10\x93\xcb\xc0\xba\x68\x61\x3d\xa5\x1b\x05\xeb\xdb\xc7\x61\x3d\xc5\xa7\x73\x70\x4d\x0d\xb8\xa2\xf2\x51\x50\x4f\xc9\x9a\xae\x20\x37\xc8\x27\x2b\xb8\xb2\xca\xba\x13\x68\xb9\xf4\xb1\x3b\xd9\x6a\x98\x5b\x2e\x7d\x14\x06\xfe\xd3\xe5\xd2\xaf\xb1\x8b\x27\x2e\x92\x4f\x4f\x30\xb8\xb8\x15\x6d\x98\x31\x3d\xb6\x62\x3a\xc5\x09\x5d\x47\x45\x4c\x9c\xcc\xf3\x36\xe0\x37\x69\x76\x4b\x5d\x43\x10\x14\xb9\xbe\x90\xae\x00\xa0\xf2\x3c\xa7\xd2\x40\xef\xb7\x30\x8f\xeb\x5a\x52\xb5\x90\xcf\xec\x22\xcf\x43\xee\xd3\xa7\xae\xf2\x53\x77\xba\x74\xc0\x73\x06\x56\x0a\x32\xc7\xc4\x2e\xb3\xee\x03\xcf\x74\x4a\xf6\x9a\x19\x96\x1b\x43\x3d\xb5\xe6\x47\x18\x2f\x72\xcf\x73\xd6\x9d\xa1\xe4\xde\x17\x2c\x29\xd3\xe2\x9e\xcb\xec\xe6\xd9\x14\xd8\x92\x9d\x41\xb6\x7a\x3f\x19\x4e\x5b\xed\x08\x46\xd2\x2e\x87\xd1\x8e\xcb\xc9\xc7\xdd\x4d\x6c\xe9\x55\xc6\xaf\x56\xd8\x2c\xa7\x2a\x2e\x4b\x4e\x24\x60\x00\xa0\x4a\x68\x1e\x5a\xb2\xad\xb0\xe7\x19\x1b\x65\xd3\xc4\x99\x19\x92\xab\xe0\xd3\x85\x3b\xac\x34\x52\xef\xf6\x41\x1b\xd4\x75\xe4\x0c\xa3\x51\x56\xd7\x89\x44\xbf\x77\x6a\xb0\x26\x2a\x16\x30\xbf\x12\xa8\x38\xd9\x59\xdf\xda\x95\x83\x0c\xed\x9b\x05\xbc\x41\x04\x16\xa9\x07\xba\xa6\x99\x3c\x6e\x12\xf2\x1c\x38\xa3\x4e\x57\xf3\x1f\x83\x77\x87\x6b\x6c\xb8\x99\x9c\x1d\x4c\x72\xf4\x9b\x4e\xcf\xea\x83\x86\xd5\xf3\xd0\x8e\x6e\xac\x36\xc9\x86\xee\x5a\xf3\xb4\x9d\xde\x3e\x18\x84\xf0\x56\xe1\xc0\x95\x7c\x9a\x42\x5e\x9c\x72\xcb\xca\x39\xe4\x01\x67\xf7\x57\x47\x5f\x2d\xe7\x86\x9c\x7b\x10\x78\x1e\xc7\x84\xfb\x59\xf5\x4e\x75\x8d\x16\xe1\xb3\xe0\x39\xb1\xe6\x80\x5a\xf8\xdb\x4e\xff\xa5\x64\xd4\x7a\x0d\xcf\xf6\xe0\xee\x0f\xf7\x60\xa0\x2e\x08\xf5\x4b\x56\x1d\x72\x41\x05\xe1\xbe\xba\xb3\xa2\xae\x91\x79\xa4\x19\x58\x00\x02\xcd\x57\xc2\xad\xf0\x3d\x3b\x43\x02\x92\xb3\x8b\xa7\x5f\x51\xd7\xce\xde\x4c\x7b\x5d\xb7\x8f\xfa\xf0\xcf\x48\xa9\x20\x5f\x29\x07\x9c\xc2\xf3\x9c\xbd\xcf\x0b\xe5\xce\x09\xb6\xc4\x59\xf5\x13\x18\xbe\xa3\x4c\xdb\x0f\xaf\x68\x1f\x3d\xc9\xd3\xd7\xac\xf4\x6a\xb2\x91\xdb\xf9\x40\x0f\xf6\x2d\x1d\x8b\xc3\x62\x90\xa2\x71\xff\x01\x93\x35\x3d\x2c\xd6\x94\x52\x94\x0d\xef\xe8\x4c\xb0\xe7\xe9\x7c\xeb\x56\xba\x91\xb1\xfb\xba\x5e\xeb\xaa\x54\xc7\xea\x9a\xe1\x26\x6d\x95\xf9\xe8\x40\xb7\x51\x3a\x99\xc4\x58\x42\xa0\x9f\x55\x3f\x94\xc5\x3e\xd9\xc0\x0d\x1f\x6f\x45\xb1\xdf\xb3\x14\x61\xac\x36\x01\x4d\x6f\xe6\xe1\xca\xc2\xaf\x72\x2c\x15\x45\xe6\xb0\x38\x74\x3b\x10\x7c\xa3\x22\x55\x2c\xee\x8e\x93\x03\x71\xd5\xde\x75\x31\xb1\x62\xb8\x1c\x48\x89\x49\x45\x73\xcf\x3b\x44\x79\xac\x22\x79\x5a\x9e\x1b\x07\xdc\x66\xed\x97\xd1\x81\xb4\xb8\xbf\x2f\xa1\xe1\xd7\x6a\xd8\xca\xd2\x57\xf7\x79\x43\x60\x99\xe4\xd0\xf4\xe7\x1f\x54\x66\x39\x30\xcf\x43\xce\xde\xff\x45\x4f\x97\x5c\x70\xf3\xac\x5b\xd1\x97\x33\x99\xb6\xf0\xa0\x67\x19\xf6\xbc\xdc\xf3\xb2\x68\x13\x8f\xac\x3e\xcd\xe4\x68\xd6\x9e\x87\xe4\x83\xf2\x7c\x20\xe7\x44\xe7\x06\xdc\xc8\x65\x1d\x08\x6b\xb2\xfc\x01\x8c\x83\x87\x19\x45\x57\xd7\xba\x55\x3c\x99\xed\xd0\x40\x10\x28\x20\x55\x83\x7e\x84\x34\x53\xd1\x3a\x3b\xa2\xf3\x18\x68\x92\x9a\xca\x47\xc2\xa5\x1c\xba\x75\x55\xda\xf9\xfe\xd2\x32\xa3\x4b\x85\x90\x69\xc3\xed\xd4\x7e\x3d\x35\x60\xb6\xa2\xee\x12\x62\xdd\x46\x50\x7b\x15\x2a\x76\x56\x72\xf9\x5e\xeb\xbe\xd7\x75\xef\xd5\x52\x65\xeb\x43\x07\x9f\xaa\xb6\x39\x4d\x4b\x55\x76\x2e\x72\x90\x74\x78\x0b\xde\x05\xad\x22\xae\xc1\x9b\x5d\x02\xef\x13\xf3\x57\x87\x52\xee\x11\xdd\xb1\x42\x53\xf9\x5d\x3d\x19\x2d\xda\xe6\xa2\xc4\xaa\xf0\xeb\xdd\x8e\xa5\x59\x22\xd8\x68\xcd\xc8\x61\x3d\x14\x58\xd7\xfd\x77\x85\x07\x32\x8b\x14\x80\x4b\x9a\x74\x53\xdf\xdf\xfe\x4a\x33\x39\x6d\x89\x48\x68\xa6\xfc\x72\x4a\x8a\xd0\x70\xba\xb3\x96\xea\x88\x95\x53\xa2\x2a\x5e\xd7\x99\xe9\x33\xd6\xf0\xac\x07\x96\x77\xfe\x58\xcc\xa0\xd3\x76\x3b\x41\x14\x9c\xfe\x7e\x22\x2a\x56\x9d\x35\x44\x84\xbb\xe0\x6c\x2b\x7f\x5f\x54\xc2\x2c\x99\xe7\xf5\xdf\x7b\x4b\x48\x58\x07\xae\x66\x3a\x2f\x39\x79\x74\xd0\xc9\xfb\x54\x0d\x39\x48\x36\x0d\x56\x0a\xa0\xcb\xf3\x0e\xb6\x0d\x84\xc3\xb4\x80\xb1\xae\x5d\xe5\xf2\xef\xb4\xb7\x9d\x80\xfd\xd4\xe2\xe0\x00\xe8\x0d\xd0\x6c\x5d\x83\xa5\x84\xb6\x14\xe9\x55\xd9\x85\x7c\x70\x54\xcc\xfd\xf3\x8a\x01\xd9\x17\xb2\xbf\x12\x6a\xf2\xd6\x3f\x84\x2b\x2b\xac\xac\x25\xd2\xc0\x5a\xa5\x88\xca\x58\xa9\xb6\x10\x3c\x66\x3d\xae\x28\x3c\xa2\x92\x28\xb3\x0d\x38\xb6\xd1\x01\x24\x21\x4a\x1b\xa0\x3f\xe9\x7b\xa1\x0f\xed\xbd\xd0\x18\x2a\xf5\xbc\x42\x5f\x69\x81\x17\x45\x4b\xd8\x55\xfa\xc6\x08\xb9\xf8\xc1\xa1\x9b\xf9\xa2\xb1\xf0\x88\x36\x96\xcc\x07\xd9\xa1\xb1\xb6\x84\xb9\xe5\x09\xdc\x32\x21\xe6\xda\xf1\xcc\xc3\xc8\x22\x29\xda\x0b\x17\x16\xfd\x88\xe9\x10\x83\xa4\xa0\xcc\x58\xdd\xad\xb3\xa3\x12\xe5\x64\xf1\xa2\x32\x46\x1d\x56\x22\xad\xa8\xb9\xdf\x27\xd3\xf6\x58\x20\x70\x51\x1a\x2a\x36\xf8\xf4\x81\x3d\x68\xd5\x55\x23\xa9\x81\x0a\x64\x5c\x95\xfa\x06\x8f\xc6\x29\x4f\xbf\x6a\x93\x2c\x78\x26\x8c\xda\x44\x50\x81\x89\xa0\x65\x9f\xee\x17\xd3\x29\xe6\xb4\x8c\x44\x4c\x94\xba\x23\xe2\x71\xa7\x7d\x6c\x49\x12\xf3\x48\x0b\xbf\x2a\x57\x5a\xee\x22\x0f\x6a\xa2\x2e\x49\x52\x5f\x6d\x30\x6b\x4b\xb4\x1f\x2d\x22\x80\x30\x7f\xc7\x44\xf2\x57\xf6\x40\x1d\xa7\x7d\x26\x95\x0e\xae\x1e\x9a\x07\x08\xfc\x15\x30\xa5\xc1\xab\x02\x37\xc9\xc5\x5f\xd9\xc3\xd5\xad\x0a\x39\x71\xb5\x4a\xf8\x8a\xe5\x12\x96\xaf\x56\xa2\xcc\xe5\xa7\x1e\xee\xbb\x82\x9d\xff\xc3\x36\xa9\xd8\x95\x6e\xe3\x0a\xa2\x08\xb3\x54\x67\x00\x0a\x53\x26\xab\x3e\x5e\x89\x6c\xc7\xde\x8a\x64\xb7\xbf\xba\xcb\xd8\xfd\xd5\xfd\x36\x5b\x6d\x5d\xcb\x57\x91\x98\x75\x0c\x4e\x0d\xe9\x96\x46\x77\x6f\xb5\x4d\xca\x2b\xf9\xe7\x55\x91\xb2\xab\x0f\xec\x41\xfe\x97\xcf\x83\x2a\xe0\x46\xda\x51\x01\xb2\x09\xd1\x0a\x0d\xc3\x2c\xc2\x93\xb9\x26\xc2\x37\x95\x87\xdd\x63\x20\x7c\xdd\x0a\x26\xac\x69\x88\x05\x4b\xa6\x5f\x0a\x85\x5c\xa9\x9f\xea\x6a\x95\x67\x8c\x8b\x9f\xf5\xef\x3f\xaf\xd6\x65\xb1\xd3\x4b\x7a\xa5\xdc\x9e\x7f\xd6\xbf\xff\xbc\xda\x27\x1b\xf6\x33\xfc\xfd\xe7\x55\xb5\x2a\x19\xe3\x3f\xeb\xdf\x7f\x5e\x89\x42\x97\xfa\x83\xe1\xf5\x10\x61\x45\xb9\x46\x69\x80\x0c\xad\xb6\x17\x83\x39\x80\xa6\x8d\x6a\x97\xfb\xba\xd7\xa0\x17\x6e\x61\x6a\x48\x38\x92\x42\x9e\x2d\xfa\x5d\xd7\x0b\x18\xeb\xb6\x48\x1f\x88\xae\xb3\xab\x6c\x82\xc0\x5b\xa5\x5a\x95\x45\x9e\x7f\xc3\xd6\x42\x31\xaf\xbd\x84\x19\x9e\xaa\x5c\xaa\x8c\x95\xcb\x4e\x80\xab\xcd\x61\x9a\xda\xda\xff\xd9\xab\xfd\x5d\xb1\xef\x55\x0e\xef\x83\xba\xbb\x3c\xd6\xfb\x0c\x63\xe2\x30\xbf\x07\xb7\x40\xa9\xa1\x41\x22\xcd\xad\xad\x08\xaa\x47\x3d\x01\x41\x2e\x3b\x07\x80\x54\xd7\x15\x55\xa6\x66\x06\xb2\xe6\x5e\x15\xce\x83\x67\x5e\x15\x3e\x0f\x3e\xf5\xaa\xf0\x59\x30\x53\x70\xa4\x0f\xe4\xe0\x94\x17\x49\x1a\x9c\x0c\x67\x00\x57\x76\xab\xdb\xee\x4e\x67\x7c\x69\xeb\xaf\x28\xd9\x7f\x21\x69\x52\x85\xfc\x64\x76\x6c\x09\x70\xbb\x54\x84\x89\x33\xb7\x05\xb8\xa4\x27\xff\x70\xb5\xa0\xd9\x6d\xc8\x6d\x7e\x28\x47\x9b\xb4\xea\xa4\xbd\x66\x65\x89\x10\xb5\x8f\xd0\x94\xf6\xc4\x3a\x6b\xa2\x38\x08\xb7\x21\x70\x12\x3e\xd6\x88\xa5\x83\x55\x94\xa4\x56\xc3\x7a\x5e\xa7\x4e\x30\xa6\x8c\x2a\x14\xbb\x36\x18\xcd\x56\x1f\x42\xd4\x3d\x77\x7d\x31\x44\xfa\x98\xee\xc9\xd6\xf8\xea\x75\x25\x6e\xe2\xe2\xa6\x21\xb7\x6c\x5d\x94\xec\xc0\xd5\xe2\xd8\x34\x4a\x9f\x62\xd6\x24\x4a\x4b\x20\x49\xe2\x2a\xe3\x49\xfe\xa5\x16\x59\xc8\x76\x94\xe2\xd8\x64\xc5\x8d\x5c\xfc\x6c\x77\xc8\x7b\x71\x38\xb5\xe8\xcd\x68\x8b\x5b\xc9\xaa\x75\xc0\x10\x4e\x94\x9c\x95\x91\xac\x7a\xab\x6b\x50\x2e\xcf\x76\xab\xc1\xa9\x69\xf0\xa2\x0c\x07\x4c\x01\xca\x14\x19\x20\xf0\xb9\xbc\x5a\xdf\x0c\x92\x61\x92\x5d\xe0\x7d\xce\xf9\x26\x50\xb5\x58\x22\x1d\x9a\xd8\x6f\xad\xc8\x74\xa8\x3c\x65\x63\xb9\x3c\x6f\x34\x19\xee\x03\x72\xe6\xb8\x39\x53\xc1\x2a\xdd\x2f\x48\x26\xc4\x42\xf2\x0a\x76\x30\x27\x63\x6a\xa6\xe8\xa6\x0c\x6e\x32\x2d\x0d\x4f\xd5\xcb\x8c\xc0\xf0\x45\x0e\x44\x0d\x61\x54\x0d\x25\xa1\xea\x2a\xe3\x95\x90\xa7\x61\xb1\x36\xab\x11\x22\x08\x32\x09\x2a\x00\x05\x79\xbd\x55\xa0\xda\x2d\x04\x18\x4b\x4d\xc4\x40\xc2\xf9\xfc\xd2\xd6\xc4\xa0\x4d\x92\xb4\xbf\x0d\x3c\x4a\x84\x06\x7a\xa9\x1f\x7a\xeb\xa0\x3d\xa5\x7f\x18\x2c\x4e\x98\x89\xa0\x10\x9a\x56\x51\x5d\x20\x5c\xc9\xd6\x01\xaa\x14\x55\xa8\x6d\x86\xdb\x43\x99\xaa\x11\x99\xd7\xba\x96\x1b\xe4\x5e\x87\xfa\xb5\x28\x35\x08\x74\x86\x7b\x02\x20\x70\xec\x33\xf3\x68\x5d\xfd\x76\x3a\x1f\x6e\x50\x08\x32\xc6\x5c\xa9\xf4\x47\x78\x24\x99\xa1\x0f\x85\xe7\xb1\x66\xcf\x17\x62\x71\x69\xd6\x33\x41\xd8\x08\x07\x13\x9e\xb3\x34\x41\x7f\x31\x24\x44\x92\x01\x8f\xf3\xdf\xe9\xc9\xf9\xc8\xba\xbe\x0c\xaa\x35\x91\xbf\x7b\xdc\x14\x61\xbe\xa2\xcc\xd4\x89\x41\xc1\x8b\x5f\xe6\x1a\x9b\x3b\xbb\x63\xba\xfd\x47\xa6\x58\x76\x04\x72\x9d\x35\xda\x99\x37\x9c\x80\x02\x92\x73\x58\x06\x2e\x3c\x17\x77\xac\x74\x15\x61\x94\xb3\xe4\x8e\x99\x64\xc0\xf9\x43\x4b\x83\x01\xf7\x1f\xd3\x53\xef\xb8\x10\xc4\x48\x8c\x02\xa1\x59\x89\x31\x3b\x79\x25\x0f\xc8\xe8\xe0\x8c\x86\x68\x1d\x2d\x43\xac\x29\x1e\xf0\x2e\xce\x54\x48\x71\xa7\x0b\x4a\x8f\xcc\x9d\x5f\x6a\x77\xd2\xa2\xe5\x8c\x09\xef\x38\xf8\xf1\xd0\xc1\x44\x97\x11\x98\xf0\xa1\x09\x04\x44\x7c\xd3\x51\xdb\x94\x4d\xaa\x3d\x62\xfd\x1d\xd4\xde\x87\xfd\xc7\x1c\x81\x2a\x1a\x63\xe8\xcc\x83\xb6\xae\x24\xd5\x5b\x58\xf1\x96\xfe\x2f\xaa\x52\x49\x0e\xef\x4b\x56\x55\x26\xc1\x25\x67\x53\xd7\x52\x30\x60\x77\xdf\x99\x36\xb7\x47\xad\xda\xf5\x5d\xb2\xbe\x8f\x14\x87\xdc\x97\x3d\x09\xc4\xa2\xb4\x22\x21\x94\xc4\xed\x0d\x18\xbc\xae\xec\x6e\xb6\x19\xc6\xfb\xc4\x4c\xf2\x2f\xb7\x06\x98\x61\x36\x2f\xd4\x4e\x9c\x19\xdc\x97\x21\x70\x43\x1e\x39\x94\xfb\x75\x76\xea\x80\xe1\x17\x05\xeb\xb6\x3d\x07\xc8\x69\xb4\xc0\xba\xd3\xfa\x9a\xe3\x1a\xb9\x66\x0c\x83\x82\x84\xa9\x9e\x11\xa3\x07\xf9\x1f\x2e\xac\xd6\x71\xa8\xef\xed\x8c\xc1\x78\x7b\x40\xa6\x82\x09\x5e\x06\x32\xf5\xfd\x32\x90\xfd\x97\x62\x82\x7b\x6e\x15\x38\x44\x68\x9c\xca\xaa\x6b\xcb\xc2\xd6\x24\x0e\x17\x5a\xf5\x79\xaf\xaf\x58\x57\x1d\xf0\x7f\x51\xbf\xfd\x35\x6f\x63\x45\x02\x3d\xdd\xa7\x98\x4c\x79\xe5\x49\xa4\x3a\xf8\xcb\xaf\x87\x4a\xe8\x9a\x52\xc0\x76\x9d\x68\xf4\x6c\x27\x8c\x35\x78\x5e\xcb\x70\xa1\x47\x1b\x9a\x77\xcd\x74\x00\x60\xea\xd7\x12\x44\xe8\x8d\x22\x36\x47\x66\x43\x51\x91\x60\x4c\x91\x88\x0b\xb3\x61\xac\x8f\xb4\x90\xca\xac\x8c\x6d\xda\xda\xee\x36\x41\xdc\xde\xca\x9f\xed\xb6\x36\xc3\x78\x5b\xce\x00\x6c\x25\x49\x61\x91\x92\xea\xf5\x9d\x51\x69\x3c\x3e\xf8\x33\xe8\xb7\xf6\xed\x59\x3f\xed\x7d\x7b\x09\xa9\x77\x73\x60\x11\x5e\x0e\xa5\xfc\xf1\x5e\x6a\xd0\x04\xef\x0e\xc5\x12\x74\x40\xdc\x26\x86\xd6\xa9\xf0\x38\x66\x0f\xc4\x1f\x6c\xe2\xd1\x8d\xaa\x27\x06\x13\x67\x74\x67\xf5\x37\xaf\xe6\xb9\xda\xdd\x6b\xce\x55\xc5\xf5\xb5\x2c\x99\xe2\xc8\x6c\xf6\x69\xcc\xaf\x71\x46\xca\x9e\x01\xcb\xd9\xa2\x09\xd2\x62\xfc\xbe\x74\x1f\xd6\x6c\x31\x3c\x90\x45\x3c\x82\x35\xe0\xde\xf8\xc9\x44\x07\x80\xef\xd3\xe8\x4c\xa9\x61\xc7\x67\x4d\x16\x9c\x4e\xb9\x2c\x38\x46\xdf\x9b\xb2\x67\xe1\x20\x7a\x37\x6b\x93\x9e\x3a\x36\x21\x10\x30\xed\x3c\xac\x0d\x3e\x9d\x07\x49\x83\xc8\x5b\x25\x98\x68\x53\x61\xdd\xa9\xcb\xb0\x22\xcf\x38\x4a\xa0\x76\x06\x57\x60\x61\x1b\xf2\x9a\x6c\xad\xef\xca\x2e\x95\x90\x84\xd2\x2c\x44\x19\x95\xc4\x87\xac\x4c\x7b\x67\x41\x54\x8f\x91\x90\x64\x19\x18\x31\x09\x1c\xe8\x27\xd5\x03\x4c\x32\xa5\x1c\xca\x68\x21\x16\xe6\xe6\x55\x27\xb3\x5d\x80\x4d\x27\xe6\x54\xc5\x32\xab\x68\x46\xb2\x31\x23\xa5\x23\xc2\x7e\xb1\x5e\xcb\xa5\xbc\x70\xc3\x41\x43\x32\x65\x3c\x52\x19\xa3\x92\x6a\x60\x34\x35\xea\x36\xd8\x41\x51\x8b\xca\x18\xc9\xc0\x4b\xa0\xc1\x0d\x29\xf8\x18\xdf\x6a\x8b\x1e\x2c\x53\x12\x49\x2b\x17\xeb\xf5\x25\x4f\x08\xed\xfa\x30\xa4\xb9\xc1\x9c\xdf\x6c\xd8\xce\xe5\xd1\x4a\x24\x47\x34\xd4\x11\xa9\xd9\xb0\x14\x24\x61\xa7\xec\x98\xb8\xbe\x3b\xb1\x3e\x05\xdd\x27\xd2\xc9\xdb\x49\xa7\x04\x81\x79\xb9\x04\x68\x20\xbf\xb7\xe1\x68\xbd\x46\x05\xe1\x84\x45\x45\xdc\x07\x22\x4d\x81\x72\xcd\xc1\x5d\x8a\xa9\x67\xe0\x03\x3c\x94\x40\xb1\x52\xd2\x42\xfc\xc1\xe2\xd8\x38\x88\xb5\xab\x73\x26\x5c\xb1\xe5\x9f\x8f\xd6\x67\x04\x05\x4c\xfb\x94\xdb\xd5\x7d\xa5\xcd\xb8\xc6\x4c\xb0\x8d\xbb\x6a\x6b\xf6\x7d\x5e\xa1\xb6\xd5\x08\x84\x89\x78\x51\x09\x7a\xfd\xde\x8f\xde\x07\x7f\x5a\x46\x4b\x9f\xc4\x4f\x9f\x5c\x93\x5c\x1b\x3f\xaa\x83\xa5\xaa\x25\x48\xa0\x30\xf8\x3b\x17\x59\x5e\xbf\xcc\x73\x8c\xaf\xc9\x41\xd0\x8b\x06\x62\x64\x25\xe8\x69\xb5\xcd\xf2\xb4\x64\x1c\x42\x2e\x2b\x33\xd8\x4a\x3e\x73\x76\x84\x30\x71\xb2\xd2\xc0\x99\x35\xfd\x60\x16\xeb\x8c\x8f\xdc\xec\x42\x38\x85\x78\xfc\x9a\xd7\x68\x45\xfb\x96\xff\x75\x17\x42\xa4\xe7\xc5\xbf\x3f\x54\xdb\xb7\x22\x59\x7d\x50\x36\xaa\x5a\xda\x6e\xcd\xb9\xf2\xcd\x9f\x2d\x32\x7d\xc1\x02\x44\x05\xe8\x38\x93\x48\xc4\x6a\x09\xba\xfb\x96\x75\x54\x69\xbb\x90\x56\xf2\x30\x02\xf9\xad\xab\x11\xe9\xa0\x13\xd9\xcd\x3c\x34\x57\x79\xc1\x9d\x4b\x10\x9b\x58\x03\xbd\xca\x6c\xde\xc2\xde\x1b\xdc\x6a\xcd\x02\x46\xf8\xf9\x1d\x46\x66\x8a\x8e\x48\xd1\xd1\x76\x38\xa5\x45\x4f\x10\x79\x71\xf4\xe5\xe8\xe8\x95\x70\x02\xe2\x51\x75\xa3\x6f\x08\x2f\x46\x85\x78\x83\xa1\xae\xb5\xef\x2f\x03\x75\xb3\x22\xca\xcf\x44\xe6\x1f\x59\x78\x2e\x0b\x67\x63\x37\x37\x39\x8e\xc9\x3a\x16\xe4\xe1\xd0\x5d\xbb\x28\x97\x3f\x68\xab\xd3\xb3\xd3\x90\x55\x5e\x54\xac\xba\xe0\x03\xaa\x2e\xc2\xb4\x02\x4c\x68\x55\x61\x57\xed\x48\x3c\x50\x16\xc2\x3a\x28\xc5\xa4\xaf\x6d\x8b\xb1\xf1\xa4\x35\x17\x69\xc2\xad\xfe\x6a\xc3\x96\xf1\x82\x7b\x1e\x77\x28\x15\x0b\x4e\x6d\xd7\x56\x50\x6b\xce\x6f\x7a\x81\x20\x92\x30\xd1\xfa\x45\x0e\xb7\x54\x0d\x23\x45\x68\xef\x23\x7d\x23\xe4\x5b\x0d\x3e\x88\x13\x86\x31\x3e\x49\x7e\x5a\xdf\xf7\xb5\xe8\xdf\x29\xdf\x9f\xff\xa2\x0b\x35\xd7\x82\x6b\x81\x83\x42\x2e\x03\x78\x95\x8d\x39\x10\x84\x23\xd7\xba\x59\x77\x76\xe9\xb0\x4f\x72\x21\x20\x5a\x83\xb9\x6e\xcb\xff\xf5\xb7\x03\x2b\x1f\x42\x16\xcd\xe2\x40\x43\x70\xa0\x73\x2b\xb1\x72\x34\x8b\xad\x59\x09\x35\x2c\x97\x95\x40\x18\x8e\xab\x97\x79\x8e\xcc\x92\x06\xd3\xf9\x99\x25\x74\x4b\xa3\x8d\x75\x10\xbe\xf7\x0c\x8e\xe0\x18\xec\xee\xf3\x66\x71\xc0\x94\x73\xfc\x8e\x95\x1b\x85\xe6\xc1\xf7\x0e\x5b\x3b\x7d\x88\x6c\xcc\xa4\x95\x58\x85\x95\xf9\x22\xb1\x6f\xf3\x1a\xc0\xbd\x3c\xdc\x8d\x3f\xbe\x56\x6a\xb2\x3b\x65\x44\x17\x0c\xde\x5b\x7d\x21\xee\xc7\xdd\xdb\xeb\xab\x88\xd3\xe2\x8a\xc1\x05\xfe\x5a\xed\x79\x16\x30\xb2\xed\x31\x6b\x0c\xb9\xab\xa6\xf6\xe3\x3d\x31\xe6\xb2\xc6\x4e\x07\x1a\x0a\xed\x2c\xa4\x8f\x8b\x71\x01\x7f\x9a\x81\x9f\x61\x57\xa1\x8b\xdb\x12\x70\xb0\x5c\xf2\x0f\x1c\x2b\x0a\x32\x4e\x38\x48\x46\xda\x82\xa9\x70\xe5\xd7\xb7\xd9\x6d\x0e\x3e\x11\x8d\x3a\x6d\x2e\x66\x96\x5f\xb3\xe2\x50\x59\x05\x64\xf9\x97\x79\xfe\xe8\x60\x46\xda\xf8\xa3\x22\x17\x5a\xfa\xb8\x19\xb0\xdb\x83\x29\x90\xb5\x7d\xe4\xe4\x0d\xda\x85\xe2\x95\x7a\xb9\xb0\x62\xfa\x2b\x42\xac\xc7\xa8\x9e\x1a\x6c\x45\x9a\x26\x0c\x37\xa4\x3d\xea\x1f\xad\xc7\xbe\x2c\x54\x16\x32\x34\xc1\x1f\xe8\x83\x88\x9b\x81\x4f\x8d\x0b\xae\x27\xaa\x50\xa7\x08\x6d\x93\x94\x0d\x57\xab\x12\x0d\xcc\x6e\x8d\x62\xc2\x7c\xe8\xa0\xec\x7e\x85\x9b\x73\x41\xe8\x1a\x2e\x7b\xb2\x6d\x4e\x3b\xe5\x0f\xc4\xff\x55\xd1\x7d\xda\xcd\xee\xc2\x94\x2b\x3b\x12\x65\x58\x31\xfd\x4c\xd3\x8e\x58\xc5\xc0\x18\x86\xa6\xd4\xf1\x44\xf5\xde\x05\x71\x27\xb1\xce\x95\x1b\x49\x67\xae\x04\x04\xb1\x82\x8c\x1a\x79\xc8\x6c\x79\x7b\xd6\x40\x25\x99\x2f\xc9\xf3\xb2\x62\x08\x9b\x3a\x2c\xea\x02\xf7\x43\x53\x8c\x28\xe0\x2d\x62\xd1\xa2\x14\xc1\xcb\x9a\xba\x01\x2f\x04\x82\xd8\x2c\xd8\x55\x41\x57\x45\x6b\x8d\x32\x87\x4b\x73\xac\xf0\xbb\xa3\xc7\x4c\x49\x18\x0e\xa3\x32\x0e\xa2\x38\xe8\x67\x41\x8c\x1c\xfd\x4d\xc9\xf6\xbd\xeb\xce\xba\xdb\x7a\x7b\x7e\xf2\x8d\xc4\x9a\x69\x36\x1e\xf2\x40\x5f\x96\xcd\x22\x6e\x90\x5c\xa1\xa2\xe0\x16\xf6\x39\x59\x2a\xf5\xef\xbc\x97\x5e\xd7\xce\x11\x15\xd8\xcf\x2a\x89\x98\x31\x30\x76\x56\xa1\xf6\xce\x6c\x52\xf4\xec\x43\xb2\x76\xab\x8c\x9e\x2a\x91\xbe\x6f\x9f\xc1\xb5\xd0\xd6\x26\xc5\xfd\x51\x79\x1e\x73\x74\xc8\x49\x68\x87\x59\x57\x68\xdb\xb8\x7c\x2d\x7a\xe1\xb0\xad\xe0\x32\xa2\x0d\x51\xa5\xe7\x92\x91\x31\x77\x45\xc7\x11\xc6\xfc\xbb\x54\x26\x78\x5c\x85\x58\x11\xc3\x38\xc1\x63\xf5\x58\xf1\xd5\x28\x15\x5d\xe1\x33\xb0\x56\xa1\x00\x2a\x0d\xa0\x76\xdf\x34\xa0\x0b\x15\xad\x51\x50\x3b\xa5\x35\x5e\x7a\xac\x69\xfb\x36\x4e\x88\xdc\xa3\xba\xd1\x79\x4a\xa5\xa2\x3b\xa6\xb6\x6d\x90\xed\xda\xc5\x84\x53\x76\xf1\x5a\x1b\x65\x62\xda\xbb\xb2\x40\x87\x64\x30\x80\x8e\x07\x9f\x91\x50\xc6\xa4\xd6\x52\xc1\xad\xfd\x82\xba\xc9\xed\x6d\x59\x27\xa5\xc8\x56\x39\xab\x93\x2a\x4b\x59\x9d\x1c\xd2\xac\xa8\x6f\xd3\xac\x5e\x25\xfc\x2e\xa9\x6a\x08\xae\x2e\xff\xe4\x59\x25\xea\x94\x89\x24\xcb\xab\x7a\x9d\x6d\x56\x09\x84\x16\x91\x8f\x87\x92\xd5\xeb\xa2\x10\xac\xac\xb7\x2c\x49\xe5\x0f\x84\x33\xa9\x77\x49\xf9\xa1\xde\x31\xf9\x81\x27\x77\x75\x71\x10\xfb\x83\xa8\x4d\x60\xcb\xba\x62\x30\x15\x75\x75\xd8\xed\x92\xf2\xa1\x16\xd9\x8e\xd5\x77\x59\xca\x0a\x97\x6c\x04\xbd\xbe\xfa\xf5\x6f\x92\xae\x5a\xa6\x13\xea\xa2\x10\xce\xe8\x7a\x99\x4e\xb0\x7b\xbd\x21\x3b\x41\x8d\x2d\xf7\x0b\x14\x06\xee\x64\x2b\x26\x2e\x8e\x96\xcb\xea\xfa\x26\x76\x89\x9b\xb9\x98\x3c\x48\x2e\x70\x59\x4d\xae\xc9\x9d\xa0\xd7\x2f\x50\xe8\xa8\x48\x87\x65\xbd\x2a\xf2\x1a\xa2\x7c\xd7\xdb\xb2\xce\x76\x9b\x5a\xf9\xdc\xe5\x19\x87\xfe\x26\xf5\x3e\x29\x93\x1d\x46\x28\x5a\xde\x07\xf1\x04\x47\xef\x6f\xe2\xa7\x78\x79\x7d\x73\xbd\xc9\xc8\x2d\x54\xa6\xbf\x5c\x93\xa3\x7c\x85\x8b\xc1\xae\x33\x72\x2f\x5f\x6a\xef\x4f\xe1\xf2\x7e\xb2\xb8\x26\xef\x54\xbb\x41\xb5\x2a\xb3\xbd\xa8\x95\x4f\xab\x6c\x05\x5f\x67\xe4\x95\xb0\xc2\x29\xde\x16\xc7\x1a\xc4\x8f\xe0\xe8\xf7\x9d\xa0\xd7\x5a\xb4\xbd\xac\x9e\xa2\x30\x88\xde\xd3\xb8\xa6\xcb\xea\xa9\xf1\x5d\xf4\x65\x0d\x1f\x64\x0d\x4f\xea\xe5\x35\x0a\x83\x5f\x93\xbb\xa4\x66\xab\x5d\x82\x55\x63\xd7\x19\xf9\x52\x7e\x16\xe5\x81\x2d\xaf\x91\xff\x14\x5f\x93\xb7\x6a\x42\x9e\xbe\x70\x50\x18\x2c\xa3\x57\xaf\x5f\xbe\x7b\xb9\x8c\xea\xe9\x14\xd7\x32\x21\x5e\xc6\xf2\xf9\x66\x59\x3d\x7d\x72\xbd\x21\x2f\x05\x35\x11\x81\xa2\x39\x71\x5f\x28\x4e\xee\x6a\x77\xc8\x45\xb6\xcf\x19\xfd\xc4\x3c\x7d\x72\xe3\x12\xf7\xc5\xb5\xfa\x7e\xe3\xc6\x24\x67\x1b\xc6\x53\x55\x6a\x9d\xb1\x3c\xad\x98\x50\x79\xba\xb7\x98\xc8\xc5\x50\x79\x76\xc9\x5e\x7d\x86\x87\x98\xc0\xec\xab\x4f\x4a\x64\xa2\xbe\x9a\x67\xc9\xd0\xb2\x44\xd7\x6f\xee\x99\x71\xdb\x2b\x67\x62\x22\xca\x20\x7a\x46\xba\x7b\x6a\xe4\xe2\xe8\x2c\xf0\x68\x65\x5d\x15\xf9\x48\xde\x36\xe3\xaa\xc8\x01\x94\x55\xe9\xf6\xcd\x6e\x2b\x0d\xa2\xe7\x67\xe5\x45\xa9\xdb\x2b\x6f\x46\x1a\x6d\x2d\x49\x2c\x5f\x6b\xfb\x22\xb9\x30\x9a\x11\xd7\x25\xae\x1b\xc3\x18\x7f\x7e\xa1\x2e\xc8\x70\xf5\x4d\x19\x71\x43\x7e\x15\x34\x05\x0f\xb6\xd7\x82\xfe\x2a\x3e\xe6\x46\x93\xc5\x4b\x70\x69\x86\xfe\x53\xf5\x9c\x15\x9c\xbc\x34\x77\xde\xc9\x07\xb9\x89\xe5\x83\x19\x27\x3c\xab\x7d\x0e\xdf\xe5\xac\x43\x89\x2d\xbc\xa6\x7d\x49\xab\xb8\x40\xce\xf6\x23\x66\x5e\x42\xd3\xa1\xba\x09\x1c\x75\xdc\x93\x0a\x7c\x8a\xcc\x5d\x44\x08\x9d\xb1\x54\x67\xce\x08\x7a\xe8\xef\xd8\x11\x68\x3d\x60\x32\x94\xad\x0a\x3b\x8b\xcf\x29\xd9\x1a\xa8\xf8\x82\x85\x92\x9f\x16\xbb\x6f\x13\x9e\xed\x47\x6f\x77\x86\x93\xe3\x2c\x16\x5a\x5d\xcf\x47\xd2\x3e\x1f\x26\x19\xbc\xff\x8d\x11\x12\xe0\x45\x7f\x0d\x99\x8a\x7f\xbb\x2f\xd9\xff\x43\x1d\xcc\x78\xc5\x4a\xf1\x05\x28\xa2\xe4\xa9\xd6\xa3\x86\x65\x77\x95\x8e\xea\x7f\xd8\xdb\x33\xfd\xe9\x20\xe1\xac\x79\x23\x56\x4c\xd6\xe2\xa2\x95\xd9\xff\x1f\x8d\xf6\xe8\xa3\x06\x8f\xba\x3e\x76\x02\x18\x45\x68\x6a\x0b\xd9\x0e\xb6\x49\x46\xb5\x3c\x45\x19\x4d\x42\x54\xb2\x2c\xc6\x8b\x6c\x32\xc1\x9a\xea\xe3\xd6\x0a\xd9\x37\x61\xbc\x11\x88\x63\x4c\x78\xaf\xe3\x08\xe2\x15\xb5\x82\x2f\xde\xdf\x1b\x84\x63\xcf\xfb\x45\x40\x51\xe2\xaa\x03\xc2\x1d\xd4\x31\xb8\x0d\xa7\x2f\x7c\x3e\xbf\x39\x1c\x4c\x53\x88\x18\x0c\x43\x19\xab\x44\x22\xc6\x20\x8d\x3b\x0d\xe9\xc7\xc1\x38\x18\x48\xc6\x0c\xa7\x6f\x03\x54\xbf\x3b\xbd\x4f\x8b\x36\xac\xd9\x30\x44\x93\xb9\x9a\x54\xd9\xcf\xa9\x3c\x7a\xb7\xd3\xd9\x20\x44\x2b\x5c\x5b\x77\x29\xe4\xb5\x91\x65\x38\x73\x08\x73\xad\x5e\x45\xc8\x02\x6d\xd3\x22\x99\xaa\x31\x8d\x1e\x54\x6a\xc4\xe8\x42\x85\x91\x14\xbb\x0b\x0c\xf5\x25\xac\xa8\xa3\x8c\x2a\x1c\x07\x0e\x62\x67\x72\xbc\xee\xda\x09\x8b\xfb\xe8\xe0\x25\xe4\xdd\x25\x4b\x6d\x20\xc0\x8d\x80\x48\x80\xe0\xbc\xe0\x8c\x48\x9d\xeb\xfa\x9d\x25\x15\x74\x2e\x1c\x4d\x9e\xb7\x1b\xcf\x75\x76\xcf\xaa\xe7\x3d\x58\x39\x5f\x8a\x08\xdd\x6a\x67\x5c\xa6\xfc\x76\xe5\xf9\x86\xa3\x79\x3c\x88\x1b\x81\xf1\x49\x99\x38\xaa\x6e\xdf\x09\xe2\xbe\x78\x32\xbf\x79\x71\xfd\xe4\xd9\x8d\x8b\xc1\x6b\xa9\x27\x88\x6c\x85\x90\x30\x55\x67\x71\x67\x87\x3b\x07\x20\x8e\x58\x13\x44\x19\x5e\x70\x3a\xb3\x2e\x48\x68\xb8\x46\x04\x83\x03\x88\x3d\x7a\x98\xe8\x0e\xf7\x6f\xa9\x37\x26\x5c\x16\x1f\x7e\x0e\x0b\x51\x8f\xe7\x22\xb6\xb4\x22\x6e\x30\xec\xb1\x8f\xc5\x69\x5d\x30\x9b\x48\x4c\x26\x31\xc9\xf4\xc3\x22\x93\x1c\xa5\xe7\x95\x56\xdd\xfa\x8a\x30\x2d\xb5\xb7\xb1\x1a\xe9\x47\x0a\x46\x60\xcc\x69\x63\x42\xc9\xca\xe2\x86\x38\x33\x4c\xec\xe8\xf7\x26\x7b\x43\x94\x65\xe4\x45\x79\x61\xeb\x80\x0b\xea\x5e\x33\xa0\x33\x3e\x9f\xa9\xcb\xda\xf2\x07\x10\x82\x28\x75\x4f\xeb\xa4\x43\x72\x72\xa0\x33\xb2\xea\xc9\xb7\xf7\x4a\xc3\xb2\xa6\xab\xe9\x9c\x6c\x29\x8b\x66\x31\xd9\xf4\xe3\x9b\x6e\x81\x89\xda\xd4\xb5\x83\xe6\x37\x74\x35\x22\x00\xdf\x4a\x7c\xdb\x0b\xfd\x01\xd7\x56\x62\xcf\xfb\x4e\x43\xf4\xf6\xb1\x48\xcb\x2d\x97\xbf\xf7\xd9\x6f\xa8\xc4\x8b\x0d\xd8\x8d\xce\x62\x6a\x7b\x25\x81\x86\x50\xec\x72\x10\x83\x64\xdd\xa2\xea\x9b\x7c\xb0\xbe\xca\x0b\xe5\xf4\xe8\xdf\x1e\xb2\x3c\x6d\x99\x40\x75\x8e\x9c\xd1\x3f\xc4\x99\x13\x47\x43\x2e\x26\x25\xcd\x6d\x21\x97\xdc\x16\xb9\x25\x45\xea\x9c\xaf\xff\x3f\xd2\xde\xb6\xcb\x6d\x1b\x49\x1b\xfe\xbe\xbf\xa2\x85\xf5\x72\x01\x0b\xad\x97\x1e\xef\x73\xf6\xa1\x06\xe6\x71\xda\x79\x9b\x89\x1d\xc7\x6e\x4f\x9c\xa1\x39\x39\xb4\x04\xb9\x19\xb3\x41\x85\x84\xdc\xdd\x11\xf5\xdf\xef\x83\x2a\x00\x04\x29\xb6\xb3\x7b\xdf\x1f\xec\x16\x41\x10\x04\xf1\x5a\x55\xa8\xba\xae\x52\xd4\xcc\xf4\x26\x4c\xaa\xdc\x0e\xd4\x6f\x34\x2d\xbb\x7d\x82\x7f\xa7\x19\xaf\x44\xee\x16\x9f\xf5\xd3\xfd\x6a\x0f\x61\x46\x25\xdf\x4f\x84\xd8\x3a\xb6\x19\x58\xfd\x0a\x3e\x59\xc0\xc8\xa8\xcc\xf2\x8c\x36\xac\x9c\x7f\xa3\x69\x11\xec\x3c\xc0\x40\xec\xda\x22\xdd\x67\xbc\xe0\x7b\x8b\xa1\x8a\x6c\xe2\x79\xea\x5e\x77\xbe\x1c\x7e\x28\xd6\x31\xe7\xbf\x6b\x66\x86\xc0\xaa\xf2\xf5\xc9\x4d\x51\x9f\x5c\x5c\x8e\xf3\x38\x22\xa1\xe3\x4b\xc1\x09\x42\x18\x7c\xfd\x39\x2f\x09\xeb\xed\x9a\x0d\x2f\xc0\x48\x35\x6b\xea\x75\x72\x37\xfb\x55\x7e\xce\xcb\xb7\x75\x89\x09\x2c\xbe\x9b\x75\x4f\x52\x53\x3c\xa0\x0c\xe1\xdf\x4b\x34\xe2\x99\x4b\xbf\xb2\x20\xb4\x80\x5b\xc8\xde\xc0\xfa\xcb\xd8\xaa\xb4\x14\x73\xbd\xfd\x28\x34\x9f\xfc\xa0\x07\x88\xf3\xe1\x1e\x87\x30\x3c\xac\xb7\xf5\xa1\xa1\x2b\x30\x67\x87\xb2\x19\x27\xba\x06\xd3\xe3\x17\x29\xad\x61\xab\x91\x7d\x21\x74\x14\x31\xcc\xab\x15\xf6\x41\x16\xcb\xce\xa8\xf1\x9d\xee\xe1\x31\x81\x63\x23\xb5\x88\x7f\x43\xbc\x3e\x08\x74\x67\x53\x32\x27\x53\xeb\xd4\x1c\x14\xf4\x7b\x60\x1d\xf9\xda\x6d\x1b\xe8\x2c\xd6\xc1\xfb\x39\xcf\xc9\x74\x99\xc5\xa7\x78\x90\xf6\x0d\x61\xa9\xbf\xea\xc1\xe9\xd8\x40\x04\x93\x69\x9d\x31\xd8\x54\xdc\x68\x51\xbd\xd1\xc2\x27\xba\xa3\x37\xd1\x69\x9d\xf5\xc7\x52\x60\xdd\x79\xa1\x3d\xd2\x64\xbf\x73\x4c\xbf\x75\x90\x17\x7d\xb0\xe6\x0e\xfe\x82\xe7\xa2\x73\x80\xaa\x18\x6f\x44\x65\x61\x30\xcc\x1c\x69\x98\xa3\x7f\xce\x1d\x28\x41\xde\xe1\xd3\xc0\x37\xa9\xb3\x42\x9d\x35\x0c\xa9\xa4\x8c\xf0\x00\x40\xd3\x4e\x78\xb0\x3b\x67\xdf\xdd\x4b\x71\x93\xc7\xb4\xc0\x11\xd9\xf3\xa2\xc8\x71\xea\x75\x06\xd9\x23\xb7\xcc\x7a\x2c\x20\xbf\xfb\x51\x0f\xc0\x44\x57\x27\x9f\x0d\x2d\xa1\x84\x7e\x00\xf7\x98\x07\x82\x44\xc8\x8d\x1c\x45\x3a\x0c\xeb\x3b\x74\x5c\x3e\xd6\x05\xa7\x36\xdf\x59\x38\x88\x90\x3e\x30\x84\xe6\x9d\x17\x86\xd1\x5f\x86\x03\xc4\x17\xcc\x8e\x6e\x61\x32\xc2\x43\x14\x69\x98\xd1\x18\x65\x29\xef\x74\x42\xbf\xd3\x54\x33\xf8\x6d\x93\xf8\xef\x60\x2e\x8c\x3b\x8f\x0e\xa1\x12\xaa\x07\x12\x79\x47\x55\x2e\x64\xf7\xbb\x87\xb0\xe6\xd9\xd0\xa3\x48\x76\x0b\x07\x2c\x57\x00\x4f\xaf\xbb\x44\xc0\x47\x0f\xc5\x96\xe0\x16\x8b\xad\xcf\x2d\x7e\xc0\xa5\x13\xbc\x70\xc6\x98\x9a\x59\x8b\x83\x23\xbf\xd6\x1e\x03\x4b\x06\x34\xd8\x00\x47\x05\xdf\xfd\x19\xe1\x50\xa9\x4d\x73\x29\xf0\xc9\x3b\xeb\x75\x22\x54\xe2\xcb\xf5\xc4\xf0\x1d\xc6\x59\x17\x8b\xe0\x6e\xc6\x34\xa8\x65\xdb\x12\x87\x6d\xd5\xc1\xbf\xf7\x01\x12\xfb\x78\x89\xec\xe8\xcf\xfb\x70\x95\xba\xaa\x62\x82\xbf\x88\xd3\x94\x4d\x92\xfd\x49\x78\x28\xb1\xc4\xd6\x81\xd2\xa5\x3e\x03\xa5\x91\x80\xee\x48\x9c\xe8\xf6\xac\x2c\x63\x12\x88\x71\x23\x2e\xe7\x83\x93\x16\xd9\x3f\x69\x07\xb3\xfe\x1d\xce\xdf\xca\x6f\x5f\xb8\xc8\xe4\x4f\x85\x93\x57\x6b\x01\xc8\xbf\x5e\x6c\xc2\x7d\xd3\x6c\x99\x77\x10\x97\xcb\x52\x9d\x51\xc5\xf8\xb5\x07\x80\x50\x78\x5c\xfb\xd0\x61\x6d\xd1\x3f\x4a\xfd\x46\x87\xa1\x7c\x9e\x67\x22\x80\x4d\x3e\xd9\x07\x8c\x2c\xf8\xd0\x16\x61\xba\xea\xb1\xd1\x1c\x5c\x01\x70\xda\xed\x4e\x4c\x9e\xc1\xe2\x6e\x1e\x1e\x26\x77\x0f\x82\xca\xd1\xd8\x9d\x1d\xfc\x60\xc2\x13\xad\xb6\x95\x6e\x09\xae\x80\xd8\x8e\x41\x10\xf3\x44\xf5\xfc\xc7\x2b\xae\x58\xd2\xb8\xf3\x0d\x7f\x3a\xd6\x18\xc9\xc2\xdc\x0b\x5c\x56\xe0\xdc\x44\x0d\x94\x43\xc5\x12\x7f\xa2\x26\x33\xde\xb0\xb8\xe9\x56\xb0\xaf\x60\xbb\x19\x4c\x1b\xd0\x22\x07\xf3\xc6\xcf\x16\x16\x52\x49\x9e\xa8\x92\x27\x11\xe5\xdc\x88\x71\x5e\xce\x18\xec\xac\x1c\xf1\x8e\xc6\x96\x05\xe4\x32\x0b\xd0\xab\x27\x4e\xfd\x22\x7f\x35\xfb\xa6\xfb\xc0\x29\x79\x4a\x58\x52\x99\xfa\x85\x74\xec\x31\x7d\xde\x5f\x32\xfc\x2a\xc4\x9f\xeb\x9e\x86\x5d\x89\xe7\x3d\x83\x0e\xe3\x13\xfa\xd0\x9a\x7c\x92\x6e\x5b\x07\xcd\x16\x3d\xaa\xc7\x7e\xc2\xe0\x6b\x98\xdd\x9f\xbe\x81\x80\xe7\xc6\xfc\x85\xa9\xb3\x70\xc3\xc1\x6c\x5b\x66\x38\x4c\xa7\x39\x33\x03\x23\x8a\x7e\x34\x62\x24\x8e\x91\x62\x4b\x35\xe0\xb9\xdb\x71\xd5\xb4\x2d\x16\x00\x6e\x9a\x58\xe6\x48\x59\x66\x68\xbd\xe8\x4a\x01\xc7\x49\xd8\xad\x3b\x9f\x4d\xac\x51\x20\xff\x7a\xfa\xc5\x05\x58\x51\x6a\x3e\x29\xa3\x08\x66\x59\x60\x4b\xa9\x45\x23\x0a\xa4\x88\xac\x8e\xbc\x27\xb2\x3f\x14\x9b\xc7\x7b\xb0\x58\x42\x3a\x2d\x66\x2b\x36\x66\x7f\xe1\x1b\x00\x1d\x73\x62\xca\xee\xe9\xf5\xea\x1a\x5d\x9c\x2a\x21\xd3\xeb\x8c\x57\x6d\x0b\xf4\xe4\xac\xe7\x5c\x28\xee\x60\xfc\xd2\x8a\x31\x37\xe2\x37\xbc\x0a\xfc\x41\xaa\x2c\xae\x98\x77\x19\xbd\xb5\x03\xaa\x62\xec\x00\xad\xb8\xed\x49\x81\x27\x72\x9f\x25\xc8\x2e\x85\xd7\xe7\xab\x2f\xe9\xf3\x7c\x2d\x9e\xe9\xb4\xcc\xda\xf6\x99\xf6\x38\x27\xbc\x09\x46\xe5\x3a\x5d\x66\xd3\xea\x21\x85\x7f\xba\x4e\x2f\x8c\x1e\xbb\x4e\x17\x59\xc0\x39\xdb\x88\x66\x56\xe6\x76\xb4\xae\xfa\x80\xde\x5f\x30\x45\x54\x2c\x8a\x36\x16\x45\x7f\x68\x2c\xbe\xf7\x9f\x93\x2e\x32\xd6\x93\x49\x40\xd6\x65\x87\x4a\x58\xf1\x7b\x22\x44\xd9\xb6\x77\xbe\xd0\xc4\x9f\x43\x4c\x04\x7c\x4f\xef\xe6\x22\x6e\xe2\x26\x94\xc8\x0b\x81\xa1\xc4\x43\x75\x2c\xf8\xbe\x60\xed\xda\x8b\x30\x6b\x5a\x64\xdc\xc9\xde\x51\x34\xd9\x8f\x29\x75\x55\x6f\x6a\xef\xcd\x5a\xe5\xc6\x41\x13\xba\x12\xf0\x26\xd4\x5d\x04\xf1\x3c\x65\xe1\x4a\xd0\xf4\x0a\xeb\xdd\x5a\x35\x62\xdb\xf5\x02\xc2\x6d\x3d\xd4\xba\x15\x63\xab\x26\x8a\xb6\xfd\xd2\x42\x61\xc8\x8d\x3a\xbb\x96\xd8\xd3\xd5\x6f\x34\xdd\xf8\x88\x22\xfe\x95\x66\x30\x21\xec\xb1\xb9\xd8\xa4\xd7\xd3\x69\x06\x9c\x94\x93\xba\x6d\xcf\x97\x30\xfc\xdd\xd1\x6b\x65\x14\x5b\xc0\xd1\x0f\x16\xdf\x6a\xb0\xf8\xba\xa5\xa7\x3f\xec\x2b\x16\xcc\xff\x1c\xa6\x7d\xc3\xcc\x36\x73\x28\x82\xd7\x37\x88\x82\xef\x34\xce\x2a\xd4\x38\x95\xdb\xab\x9c\x9e\xd7\xe0\xe2\xb0\x45\x5e\x3b\x35\xc2\xa8\xeb\x04\x68\x04\x21\x59\xc0\xa6\xe1\x58\xc0\xf7\xc2\x12\x06\x03\xf0\xcd\x28\x07\x32\xdf\x0e\x21\x71\x4e\xf4\x9b\x26\x63\xab\x06\xd7\x10\x0a\xba\x4c\x00\x2b\xa4\x90\x75\x59\xa8\xb4\xcc\x8c\x00\x13\x45\xfb\xb4\xca\x10\x74\xca\xe9\x17\xcc\x0b\xdd\x5d\x4a\x5a\x67\xc9\xc0\x91\x18\xf8\x17\xfa\xd2\xb8\xf9\xae\xdc\x4b\xe3\xff\x66\x8a\xee\xa2\xa9\xcc\x15\x5f\x3b\xda\x66\x53\x01\x27\x6d\xa8\xa1\xd8\x0e\xc2\xc6\x49\x2a\x2d\x59\xac\x1c\xdc\x11\xdf\xb9\xa6\x67\xc7\x23\x77\x5a\xfc\x03\x46\xd8\xdf\xf2\x3b\x7a\xd8\xd7\x65\x2c\x81\x46\x3f\x26\xdf\x7e\x7d\x45\x00\x72\x12\x63\xce\xed\x20\xe0\x79\x73\xaf\xd6\xf1\x64\xc9\x2d\x5c\xe9\x64\xc9\x89\xbe\xae\xab\xdb\x86\xc4\xe0\x55\x3a\x0c\x3c\xb8\xad\xf3\xdd\xd0\x99\xea\xff\x96\xb1\xcb\x96\xd5\x67\xeb\x72\xb4\x33\xd6\x1a\xe4\x71\xf1\x1f\x32\x10\xb1\x99\xfc\x9d\x2e\x58\x27\x6e\xae\x4e\x5d\x13\x8d\xf2\xd3\xb3\xf4\xb9\xc2\xb9\x1e\x1a\xbd\xbb\xd8\xd4\x11\xfb\xbd\xe3\x8d\x08\x18\xfd\xbd\x2a\x28\x7b\xe9\x9d\x37\x9f\xb7\xb3\xe2\xd1\x4e\xcf\x5a\x6f\x1a\xe0\x7b\xb3\x63\x8c\xf7\xe2\x9f\x12\x67\x0d\xda\x12\x8a\x3a\x6d\xcd\x2f\x72\x78\x5a\x32\x2c\x85\xe4\xea\xe0\xf9\x45\xd9\xca\xb9\x29\x27\xaa\xeb\x24\x16\xeb\xc0\x64\xcc\xb0\xf6\xa3\x0c\x06\xbd\x7a\xaf\x1e\x1e\x0a\xea\x74\x28\xe8\x3e\xe3\x12\x8b\xf1\x55\x7b\xd5\x7f\xd9\xc0\x3f\x19\x3a\x9a\xb2\x11\xc7\xfd\x41\x44\x21\x6e\x30\x6d\xdb\x59\x82\xbd\x5a\x64\xc1\x11\x02\x8f\x34\x36\x33\xdf\xea\x39\x28\x5f\x69\xfe\x5a\xf3\x9f\x35\x7f\xa4\xc5\x3c\x2f\x77\xd7\xf9\x7b\x9a\xfe\x8b\x65\x8f\xdf\xb3\x79\xc1\xbf\xd7\x62\x5e\xed\xf2\x75\xa1\xef\xdf\x37\x8f\xc5\xfb\xe6\x31\xde\x64\x73\xfe\x07\x78\x44\xe8\x6a\xd7\xd6\xc5\xc7\x6b\xdd\x7e\xa8\xb4\xae\x6e\xda\x52\x6e\x35\x7b\x34\xe7\xef\xe0\xb6\x32\x62\x31\xec\xb6\x34\x99\x9c\xaf\x53\x99\x67\x6c\x36\x65\x73\xfe\xd6\xdc\xbe\xc9\xeb\x8f\x85\x9a\xf3\x7f\x74\xfe\x21\xff\xa2\x64\x7a\x3b\x25\x8c\xce\x1e\xb3\x47\xd6\x35\xe4\x97\x91\xdb\xc9\x64\x77\xc7\xd2\xfc\xfc\x8f\xff\xc8\xa6\x2e\xdf\xdf\xc2\x7c\xe9\xf4\x3c\x63\xc2\x66\xb7\x19\xbe\xd5\xe2\xf0\xd5\x8f\xcf\x7f\x89\xc9\x87\xb2\x5a\x7f\x22\x47\xfe\x93\x16\x87\x5d\xd5\x14\xe0\x38\x41\xf2\x0f\x4d\x55\xee\xb5\x24\xfc\x73\xd1\x14\x1f\x8a\xb2\xd0\xf7\x31\xb9\x2e\x36\x1b\xa9\x08\x60\xa7\x95\xf9\x7d\xf7\xf0\xdf\xb5\x38\x94\x52\x6b\x59\x5b\xec\xee\x78\xc1\xb7\x95\xd2\x3f\x03\xfc\x7b\xfc\x64\xb1\x38\xf2\x7f\x6a\x91\x92\xab\x6a\x47\x38\x79\x0d\xa0\xf0\x9c\x7c\x05\x0d\x45\x38\xf9\x41\x6e\x35\xc9\xb8\x54\x22\x25\x3f\xcb\x0f\x9f\x0a\x73\xf7\x47\xc2\xc9\x8b\xea\x0f\xc2\xc9\x4d\x43\xb2\x4e\x4f\xd4\x1d\x1d\x8a\xe5\x76\xf6\x4c\x43\xf6\x0c\x0a\xa0\x6b\x9e\x69\xb3\x6a\xe8\xea\xed\x6e\xe7\x84\xba\xa9\xb6\x7e\x83\x4b\x23\xf7\x6a\x5e\x08\xa9\x4e\x65\x18\x60\x32\x92\x2a\x2d\xb2\xa9\xe2\xc3\x37\x38\x59\xbb\x53\xc2\xd4\xf0\x14\x4e\xb7\xad\xe4\x44\x59\xfa\xa7\xbb\xd9\xba\x69\x8c\xc8\x6d\x1b\x8d\xe0\xf9\xd3\x17\x94\xaa\xae\xe8\x7a\x48\x77\xe3\x1c\xe1\x17\xc0\x9e\x12\x52\xb7\x39\x3e\xc3\x5a\x40\x24\x57\x8d\x38\xf1\x80\x98\x95\x67\x22\x08\x27\xae\xca\x8d\xaf\x09\x57\xc2\x66\x9c\xd9\x34\xae\x13\x78\xa2\x6d\xf1\x03\x30\xfa\x90\x0e\x72\x09\x42\x18\x82\xff\x0f\x6e\x44\x91\x52\xb4\x66\x5f\x7e\x2d\x2f\x15\xad\xbb\xd0\x40\xc6\x58\x8c\xaf\xa4\x85\x80\xc7\x39\x55\x51\x14\xbe\x7f\x52\x04\x28\xbb\x83\xc2\x8a\x44\xc5\xd8\xc6\x75\xd0\xc6\xcc\x06\x8b\x18\x4d\xea\xc1\xa6\xd1\xc1\x5b\x4e\x3e\x84\x8c\xa4\x8e\xb4\x84\x4e\x6c\x73\x91\x18\xcb\x62\x3d\x47\xef\x60\x63\x5d\x37\x43\xf0\xb6\x3f\x39\x52\x1d\x68\x5c\xe2\x70\x34\xd2\x95\xa3\xcf\x74\xb8\xf0\x0e\x4b\xed\x35\x28\x91\x45\x17\x80\x52\x3c\x6d\x40\x66\xca\x53\x95\x36\x59\xe6\x07\xa2\xb9\xe2\x93\x65\xa0\x38\xe6\x9e\x44\x7d\x82\x1e\x2d\xf0\x89\x8e\x8f\xca\x3f\xc7\x8e\xdc\x24\x8d\x12\xc6\x36\xd7\xd5\xed\xc8\xea\x5d\x2b\xfc\x28\x38\x2f\xbb\x2e\x36\x63\x5e\x16\x36\x0f\x73\x0c\x96\x23\x5b\xe5\x29\x57\xa5\x4c\xac\xc7\xbe\x79\x31\xb5\x3b\xa0\x79\x01\x7d\x60\x37\x54\xf6\x2d\x89\xdb\x19\xec\x83\xee\x12\x9f\x3d\x0e\x1c\x79\xd7\x4d\xe3\x39\xe6\x60\xe9\x1f\xb2\xfa\xb8\x75\xc8\x1d\x7f\xff\x0c\xda\xb5\xcd\xec\x39\x7d\x08\x9a\x1d\xc9\x92\xc4\x0a\x88\x94\xd6\x4d\xf3\x12\xb8\x04\xe3\xc3\xba\x2a\xf7\x37\x0a\xa0\xfa\xe2\xc9\x82\x6f\x8b\xb2\xfc\xd1\xbe\x6b\xd2\x5b\x44\x27\x0b\x5e\x16\x4a\x7e\xe7\xaf\xaa\x2e\x1b\xd0\x1e\xe0\x8f\xdd\x75\xae\x20\xde\xeb\xb6\xd8\x54\xb7\xf0\xeb\x0f\x24\x24\x32\xbf\xaa\xea\x06\xf0\x95\xd6\x0d\x80\x70\x34\xf1\x81\x6c\xcb\x2a\xd7\x24\xf0\xe9\x5a\x37\xcd\x37\x26\x2d\x21\xee\x17\x89\x09\x0c\x09\xbc\x38\x72\xb8\x38\x89\x4f\x45\x3c\x3b\x64\x71\x92\x03\xec\xde\x9e\x33\x2e\x8e\xaf\x1e\xc2\xb6\x08\x39\x6e\x14\x03\xd0\x42\xc8\x05\x9e\xa3\x38\x76\xa1\xc2\xa0\x92\xd3\xde\xb5\xd0\x8a\xee\x79\xc9\x8c\x42\x04\x37\x3a\x3e\xb4\xe0\xb2\xcc\x90\xfc\xcc\x2d\xe5\x4d\x47\xef\xd4\x80\xfe\xd0\x58\x5e\xa7\xc9\x92\x17\x96\x0b\xaa\x8a\xf7\xa9\x02\x68\xe2\xdc\xbb\x94\x07\xf1\x4e\x22\x87\x07\xff\x66\x15\x70\x54\xda\x6a\x41\xab\x74\x99\x4d\x97\xec\x71\x95\x5e\x64\x53\x20\x73\x82\x86\xa3\xdd\x4c\x62\x3c\x17\x8e\x4d\x92\xf1\x89\x0b\xb3\x35\xab\xae\xa3\x98\x34\x85\x17\xcd\xcb\xfc\x25\x05\x14\x6b\x77\x63\x22\x44\x6e\xbf\x0b\x47\x10\x36\x48\x3d\x15\x64\x77\x47\x7a\x08\x09\xa5\xcc\x6b\xb0\x73\xbd\x41\x4e\x12\x62\xf1\xb1\xc1\xa7\xb7\x83\x38\xfe\x90\xaf\x3f\x7d\xac\xab\xbd\xda\x10\xc0\xd7\x4d\x55\x26\x48\xa1\xae\x65\x0d\xc0\x0b\x4d\x47\x99\xd5\xc0\xe7\x35\x9e\x32\xab\x60\x0c\x5a\x94\x01\xf2\x16\x3c\x58\x5b\x7f\x84\x35\x3b\xd8\x71\x3e\x32\x4c\x1e\xee\xf7\xce\x18\xfa\xa7\x3d\x6e\xc7\xc7\xff\xa4\xdf\x07\x5d\x9d\x77\x5d\xbd\x30\x9a\x36\xcf\x2d\xb8\x64\x8e\xd3\x57\x41\x98\x00\x51\x40\x21\x63\x7b\x02\x0e\xa6\xfe\x8e\x99\xfe\xae\x01\x17\x99\x58\x8a\xa1\x3a\xa1\x95\x08\x7a\x39\xb7\x31\xac\x0b\x34\x1b\xbe\xdc\xdf\xc8\xba\x58\xd3\x8a\x25\x55\xdb\x2e\xe2\x9c\xc5\xb9\x59\x64\xc0\x6a\x7d\x59\xdd\xec\xf6\x5a\x6e\xa0\x87\x12\xfa\x3a\x00\x60\xd2\x7d\xd2\x99\x5e\x4e\xaa\xc1\xa1\x83\x1d\xf9\xcf\x7a\x9c\x81\xc8\xea\xe2\x75\xdb\xe2\xbe\x50\x8a\x26\x69\x10\x27\x09\xe1\x25\xe0\x6c\x82\x2a\xd6\xb6\xa6\xc5\x62\x1d\xcc\xb8\x6e\x82\x50\xe2\x4c\x46\x5f\x92\x57\x80\x69\x22\xd8\x34\x98\x91\x53\xd1\xb2\x50\xb2\x28\x7a\xab\x1d\xdf\x16\x9c\xb7\xef\x67\xc0\x0d\xc4\x2b\xb1\x9f\xdd\x14\xea\x67\xb8\xc8\xcd\x45\x7e\x87\x17\x5d\x7a\x90\xea\x9e\x13\xa5\xf9\x16\x5b\x86\x4b\x2b\xc2\x67\x2a\x1e\x3c\x95\x33\xc6\xcb\x23\x8b\xf3\x21\x66\x9f\xc3\xb0\x7d\x63\x65\x81\xd7\xa3\x74\x28\xb2\x97\xed\x7f\xd9\xdc\x0f\xb4\x2b\xce\xf5\x32\x8a\xf6\x51\x64\xe6\x0c\x38\x33\xec\x61\x48\x85\xcd\x36\xf9\x63\xd8\x6e\x46\x9f\x00\x78\x9f\x7a\xaf\x10\xa2\xca\x8c\xff\x1c\x8d\x72\x70\x13\xd6\x23\xf8\x29\xfa\x35\x87\x34\xc6\xb1\x0c\x41\xcc\xae\xf2\xa6\xf8\x43\xba\x6d\x49\xde\x90\xd8\xb4\xeb\x7e\xb6\x2b\xee\x24\xc0\x12\x4e\xcd\x72\xe2\x1e\x28\xc2\x92\x4d\x93\xc2\xd8\x2f\x13\x64\x61\x8a\xcb\xf0\x14\x67\x18\xee\xf2\x0f\x47\x65\xd0\xd9\xa9\x93\x17\xb9\xbe\x36\x7d\x44\x17\xbc\x4e\x97\xd9\x39\x55\x80\x42\x38\xa5\x35\x32\x1b\x98\x95\x2c\xd6\x01\x3d\xcb\x29\x31\x83\x50\x42\x08\x5a\x27\xe4\x03\x52\xfe\xc4\x8e\xf2\x87\xb0\xe4\x49\x6c\xf9\xa7\xc0\x41\x77\x19\x2f\xc0\xa0\x0e\x62\xf2\x93\xa7\xd5\xaa\x9a\x8a\x0b\x46\x50\xe3\xb2\x27\x8f\x34\x9f\x76\xf2\xd1\xf4\x9f\x1a\x58\x0a\x16\xb0\x0e\xd4\x09\xf5\x45\xbb\xcc\xe7\x9d\x54\xbf\xb3\xec\x44\xfd\x87\x5c\xe9\x93\xd3\x07\x6c\x7d\x31\xff\x94\xfc\x8c\x44\x59\xf8\x1c\x8b\xc3\x8a\x8c\x96\xdd\xa5\x4e\x4e\x6a\xfe\xe5\xb2\x3b\x69\xcf\xb7\x6b\x33\xe8\x2b\x93\x53\x84\x6d\x27\x67\x08\xd3\x09\x25\xc5\xee\x0a\x45\x10\xee\x64\xce\x3c\x30\xec\x7d\xa8\xee\xde\x14\x7f\x14\xea\x63\x14\xd9\xda\x9c\x5b\x2c\x9d\xa0\x92\x36\x0b\xb1\x52\x68\xb1\xa5\x8b\xa7\xa2\x68\x5b\x8b\x27\x01\x72\x44\x81\x6b\x31\xd8\x39\xe9\xe2\x69\x70\xd7\xe2\x73\xc2\x9c\x4a\x75\x30\x6b\x0a\x6f\x95\x2a\x56\x35\xec\xcd\x23\xf5\x7a\x2d\xcb\xc2\x28\xe1\x6d\x5b\x80\xa5\xa7\x2b\xa6\x08\x17\xf1\x82\xb5\xed\xc2\x89\xc5\xc5\xd4\x0d\xc0\xb6\xa5\xf9\xd8\x90\xe3\x35\xaf\x18\x4c\x99\xae\x71\xcb\xc0\x64\x92\x73\x25\xbe\xd5\xa9\xcc\x42\xd2\x59\x25\xf6\x28\x41\xf2\x4e\xd7\x89\x22\x73\xe7\x95\x16\xf4\x95\x6e\xdb\x3b\x4a\xfe\x8a\x41\x78\x67\xf0\x3f\xbe\x58\xfc\xe7\xe2\x3f\xcf\x70\xe1\x33\xbf\x90\x62\xcd\xfc\x9c\x3f\x25\x0c\x5a\x99\x58\x7a\x30\xe2\x35\xa2\x18\x34\xf5\xb3\x49\x71\x63\x9a\x23\x37\x75\x76\x76\xab\xab\x8a\xea\xe1\xea\xc8\x38\xd4\x20\x5d\x64\xfd\x30\xbf\xb6\xed\x25\x76\x26\x3a\xf7\x3c\xd7\xb3\xdb\xba\xd0\x92\x92\xbf\x4e\x36\x15\x70\x64\x9d\x5d\xeb\x9b\xf2\xe9\x5f\xf1\x7f\x0c\x69\x00\x37\xaa\xb2\x82\x73\x96\xae\x15\x5e\x69\x8b\x4c\x48\x19\xe3\xd0\x5a\x42\x31\xae\xba\x16\xdd\xf7\xa2\xbf\xee\x4e\xce\x78\x64\xff\x9b\xe0\xf0\x03\xe2\x87\x4d\x9b\xa8\x74\x91\x05\x0a\xe2\x2a\x40\x84\xb7\xce\x82\xf5\xb1\x63\x8a\x93\xd6\x42\x81\x73\xe1\x94\xbe\x2e\x90\x36\x4e\x98\xf9\x60\x91\xf2\x6b\xdd\x02\x31\x9e\xba\x69\x14\x45\xef\xec\x80\x3d\x35\x0d\x30\xa3\x7c\xdd\xe6\x3b\x2a\xf9\x4f\x9a\x9f\xea\x4a\x8d\xa3\xaa\x3c\xb2\xd8\xff\x8e\x47\x08\xe3\xba\x98\xbd\x3a\x8a\x60\x9a\xba\x0f\x76\xf7\x93\xdc\x49\x64\xff\x0f\xb3\xb7\x60\xe6\xfd\x8b\x21\x09\x9e\xd5\x4c\x9c\xd8\x86\xf4\x73\x36\xf1\xa4\xb9\x3a\x41\xe7\x7b\xdb\x2e\x14\xd0\x4f\xc2\xfd\x2b\x19\x6c\x67\x36\xc0\xd2\xb1\xe1\xe1\x25\x72\xe0\x27\xb3\xc5\xf2\x71\x30\x93\xd1\x0e\x36\x7b\xb4\x64\x53\x42\x62\x0d\x4a\x18\x21\x0f\x12\x50\xbb\x25\x01\x0f\xfb\x83\x77\x72\xe4\x9b\x77\xe2\x9c\x66\x09\x01\x1b\x21\x75\xdf\x45\xa6\xcb\xc5\xe2\xb1\x9e\x12\x66\x5e\xc0\x2b\x01\x7e\xab\x58\xb5\xb6\x55\xfe\x17\x21\x2b\x35\x33\x8a\x98\x58\x72\xaa\x9f\x8a\xa5\x49\x02\x39\x3a\x8a\x88\x3d\x07\xad\x8b\x1b\xda\x1d\x2d\x3e\x42\x17\xbc\x28\x3a\x39\x3f\x88\x22\x7a\x7a\xa6\x40\xf0\x4d\xce\x44\x83\x60\xc2\x93\xba\x6b\x25\xea\x2a\x23\x1e\x75\xa7\x7d\xbd\xd7\x15\x2c\xae\x00\xf9\xc2\x46\xb6\xf6\x6d\xae\x9e\x81\xd8\xae\xa5\x2f\x60\xb3\x03\xa3\x5e\xbf\xcb\x6f\xba\x1b\x63\xfc\x95\x6e\x0a\x76\x83\xfe\xe0\x2d\x8b\x85\x32\xca\xee\xb9\x33\x30\xfe\xac\x79\x2a\xdd\xb6\x8a\xe6\xc3\x0c\xc1\x54\xc2\xf3\x4e\x90\x5e\x5e\x59\x23\x26\x40\x32\xa8\xd9\x2e\xb8\x74\x2c\x84\x60\x84\x2c\xd1\xe6\xf8\xbf\x9a\xd9\xc1\xbc\xa6\xb5\xd5\x16\xba\x0d\xa8\x46\xb4\x0b\xff\x4a\xca\x52\x95\xc1\xae\x10\x3b\x9e\x4a\x6b\x5d\xd8\xd5\x9e\xe6\x0b\x7b\xa2\x41\x18\xb3\x20\x61\x86\xc6\xd6\x31\xa9\x74\xf1\x74\xb8\xa2\x04\x29\xb8\x39\xf7\x7c\xdc\x5d\x3f\x7d\x07\x25\xfe\x08\xd9\x1a\x67\xfb\x32\x62\x94\x74\x26\x31\x39\xb4\x77\x8d\x2c\x51\x9e\x38\xd5\x55\x14\x2c\xc4\xa3\x2c\x72\x93\xd1\x4f\x82\xd3\x85\x0e\x4b\x13\xba\xd4\xcc\x18\x2b\xd8\x98\x9f\xb8\xf8\xc4\x56\x82\x39\xf5\x67\xf2\x9d\x24\xa7\x3a\x13\x07\x3c\x6c\x8c\x4f\xdd\xc9\x8d\x3c\x73\x38\x02\x27\xff\x09\x40\x95\x0a\x30\xc5\x63\xa3\xe6\x5b\x01\x11\xdd\x9d\x8a\x54\x1a\x31\xaa\xce\xcc\x0b\x2a\x70\xd3\xaf\xd2\xfa\xfc\x02\xfe\x2e\x82\x08\xe5\x23\x7f\x1b\xc4\x0b\xd0\x41\xdd\x8c\xae\x2c\x2a\xe5\xa8\x12\xd7\x4a\xcc\xff\xe3\x62\x31\xff\xc8\x77\x4a\xcc\xdf\xa7\xef\xb3\x47\x73\xbe\x35\x3f\xeb\xe4\xbd\x9a\x7f\xe4\x1b\x85\xc1\x95\x88\x7b\xd8\x5a\x6a\x86\xe2\x26\xff\x28\xdb\x5a\x36\x52\xb7\xdb\xa2\x94\x10\x6c\x79\xad\xbe\xc0\xbb\xd8\x7e\x92\xf7\x1f\xa5\x62\xf3\xa2\x0f\xf3\xd3\xb8\x80\x88\x51\x64\x37\x08\x60\xa4\x16\xff\xc6\x66\x44\x3b\xa3\xe9\xf6\x7e\xd2\x43\x67\x35\xa3\xc7\x6d\x96\xe1\x1e\xcf\x68\xa4\x75\xd5\xea\x36\x61\x99\xf4\x60\x40\xd0\x84\x77\x1c\x43\x0d\x0a\x90\x65\x4d\x47\xf6\xce\x9e\x14\x20\x26\x4e\x9c\x45\xaf\x68\x28\x89\x1d\x13\x05\x61\x51\x74\xad\xc6\x90\xe9\xa2\x68\xb2\x51\x41\xa8\xbf\x3d\x25\xb2\xa7\xf9\x13\xef\x5c\xc5\xd8\x91\xf5\x3f\xae\x2f\x8b\xc0\x3b\x81\x8a\xb3\xaf\xe8\xa9\xc4\xfc\x8d\x7b\x54\x9e\xe8\x09\xae\x46\x62\x2a\x0e\xe6\x23\x62\x0d\xdf\xc2\x91\xab\xb7\x8b\x29\xd9\x2a\x4e\xde\xd7\xef\x15\x31\x8b\x72\x3c\x92\x55\x8d\x67\x45\x2f\x3c\xdc\xa2\xa1\x8b\xc5\x08\x88\x95\xf3\x03\xec\x7f\xde\xe0\xb4\x4f\xb3\x44\x53\x07\xfc\xa6\x13\xb3\xa1\xf2\x22\x2d\xac\x25\x38\x13\xc0\x86\x2a\xdf\xbe\xfe\xfe\xb2\xba\xd9\x55\x0a\x85\xb2\x29\x11\x64\x3a\x72\x47\xb3\x23\x5a\xf7\xd0\xfa\xa2\x04\x1e\x6b\xbf\x91\x5a\x17\xea\x23\xf2\x1a\x05\xd7\x33\x5d\xe7\x1b\x58\x57\x73\x60\x06\x72\xed\x69\xa6\x9d\xc3\xb2\xb1\xbc\x42\xaf\xca\xbc\x50\x96\x0c\x4d\x32\x66\x97\x1a\x19\x0a\x54\x15\xf5\x63\x06\x63\xa3\xd0\x51\xf4\x68\x1d\x8c\xbc\xbf\x80\x64\x1f\x15\xad\xb9\x4c\xeb\x8c\xab\xc0\x66\x5e\x58\x02\xae\x28\x70\x95\x5f\x2b\x4e\xa6\x84\x1d\x3b\x65\xf8\xe3\xd0\x75\xaa\x6f\xbd\xd7\xbe\x6e\x81\xb0\x67\x76\xdf\x83\x6a\xdb\x9d\x1f\x95\x49\x4d\xa5\xd9\x92\x4d\x69\x53\x92\x92\xe9\x29\x24\x5b\x91\xe8\x98\x10\x36\x25\x19\xe1\x05\x52\x72\x77\xae\x52\xaa\x6d\xdd\x03\x13\xef\x66\xa5\x19\x83\xd8\xbe\xe0\x83\x0b\xf3\xc1\x9a\xf9\xd7\x14\x50\x9c\x4e\x8b\x0c\x4b\xb4\x75\x25\x1f\xca\x7d\x7d\x06\xa8\x8c\x67\x16\xaa\xf1\xcc\x61\x34\x9e\x95\x55\xbe\x39\xab\x65\x53\xfc\x21\xcf\x90\x87\xe0\x0c\x31\xe4\xcf\x00\x9e\xf4\x6c\xf3\xa1\xc4\x1f\x00\x90\xbc\xa9\x6e\x15\xfe\xda\xef\xf0\xaf\x11\x65\xce\x3c\xa6\xf2\x99\x83\x51\x3e\xeb\x20\x97\xcf\x3a\x98\xe5\x33\x44\x9d\x3c\xb3\x21\xde\x1d\x06\x30\x94\xeb\xb0\x80\xcd\x8f\xfd\xee\x4c\xd6\x75\x55\x9f\x05\x9c\xb3\x7d\x52\x89\x53\x8f\x59\x9d\x8d\xc3\xa1\x9f\x1c\x80\x2c\xf0\x28\x02\xfa\xce\x06\x5a\x29\x87\x3c\x6e\x01\xe0\xf4\xa9\xa3\xc5\xb5\xf9\xc0\x2f\x80\xd4\x75\x5f\x6c\x64\x8a\xee\xa3\xa9\x06\x56\x32\x40\x89\x7e\x90\xdb\xdf\x63\x10\x22\xe2\x3d\x1c\xde\xec\xd5\xc9\x23\x83\x07\xb6\x5b\xff\x04\xeb\x48\x0c\xfe\x1c\xea\x10\xf0\x26\xcc\x08\xe1\x7b\xf5\xc0\x53\x3d\xc0\x91\x61\x0b\x26\xc1\xeb\xc9\x63\xf0\xbd\x75\x09\x9a\xcb\xb6\x35\x69\x40\x30\x8e\xbb\xe8\x8d\xe2\xf7\x8a\x7f\x56\xc2\xa1\xb3\x7f\x30\x3b\x68\x32\xe7\x77\x4a\xcc\xff\x7d\xf6\xf8\xd1\x9c\xdf\x2a\x31\xa7\x69\x12\x65\xec\x57\x91\xfe\x2b\xca\x1e\xcf\xf9\x15\xec\x97\xb3\xc7\x09\x8b\xd3\xb3\xf7\x3a\x7b\x4c\xd3\x7f\x99\x25\x32\x7b\xcc\xde\xd7\xc9\xa3\xf9\xc7\x1b\x7e\x69\xb7\xd4\xfc\x43\xb5\xd7\x6d\xbe\xdb\x99\x7f\xe7\x8d\xae\x6a\xb3\xff\xce\xa6\xe7\xd0\x77\x0d\xe2\x47\x94\xb0\x23\xb7\xb7\xc5\xe6\xa3\xd4\x2c\x7e\x34\xe7\x2f\xed\xe3\xdf\x7e\x7d\xd5\x7e\xf7\xf5\xb3\xe7\xec\xd1\x9c\x7f\x32\x69\xef\xe7\xef\xe7\x73\xfe\x35\xdc\x4e\xdf\xdf\xce\xa6\xe7\xd9\x34\x06\xe6\xe2\xf9\xfb\xb9\xa9\xc6\x3c\xf9\xf7\x18\xb9\x8c\x63\xfa\x7e\x33\x65\x2d\x6b\xd9\x9c\xbf\x31\x5f\xb8\x55\x33\x33\x7d\xf8\x33\x65\x24\x99\xdf\xe0\xff\xe7\x4a\x90\xc7\x73\xe2\x88\x79\xc8\x63\x1b\x60\x78\x0f\xb0\xdd\xb5\xdc\x5a\x2b\xfc\x0f\x8a\x99\xb4\x93\xd0\xfa\x9c\x30\x7e\xaf\x20\xa7\x20\x84\xdf\x2b\x61\xaf\x8e\x37\x4a\x7c\xad\xd0\x60\x77\xaf\xfa\x1e\x91\xc0\x43\xda\x2d\x6d\xdf\x85\x12\x69\xb0\x78\xa9\x11\x14\x52\x5c\xde\x35\xd7\x02\xaa\xea\xf6\x9b\x05\xaf\x84\xee\xbf\x24\xa4\x48\xcd\x56\x43\xbf\x23\xc5\x2c\x6a\x48\x2d\x2a\x74\x61\x23\x53\xb0\xc2\xa7\x8b\xcc\x08\xe4\xb5\x3f\xde\x07\x17\x6e\x8e\xb4\x07\x12\xa4\xb7\x34\x63\xb3\xbd\x42\xc2\x47\xc5\x58\x3c\xbc\x67\xd1\xd9\x82\xf8\x90\xdf\x87\x47\x16\xa6\xe9\x73\x21\x85\x10\xbf\xa9\xae\x21\x1a\x5a\xe2\xfd\xbd\x57\xb1\xd3\x12\x18\x02\xdc\xbe\x03\x27\x18\x69\x4f\xd5\xb0\x8f\xac\x45\x89\x54\x9b\xfe\xa4\x70\xd8\x74\xeb\xb6\xcd\x8d\xc8\xb9\xce\x92\x3c\x99\xd0\xbd\x58\xb3\x58\xc7\x54\xcd\x9c\xab\x57\xe3\xbf\x6a\xcd\x78\x63\xfe\x9b\x2c\x8d\x7c\xbd\xf7\xfe\x7b\x61\xe6\x74\x91\xb1\xb6\x9d\x54\x29\x79\x4c\xb2\x28\x6a\x60\xec\x04\x91\x46\x43\x36\x9d\xc1\x9e\x3c\xdb\x96\xb9\xfe\x11\x23\x94\x81\xaf\xce\x6f\x1b\x8a\xa9\xb4\xc8\x2c\x0b\x08\xad\xd2\x22\x4b\x64\x5c\xb7\x2d\xad\xc5\xe1\xc8\x58\x5a\x64\x42\x41\xa4\xb8\x53\xa0\x02\x2e\x86\xc9\x82\x1b\xdd\x8a\xdb\x03\x73\x33\xdc\x4f\xcc\xec\xa3\x88\x93\x51\xf4\x46\x39\x9b\xdf\x1b\x35\x8e\x00\xbb\x0a\x0d\xf4\x70\xaf\x84\x18\x14\x7b\xfa\x75\xd6\x49\xa1\x25\xd2\x84\x16\x1e\x5d\xaa\xe4\xd2\xd3\x85\x49\x9f\xba\x80\x23\xa7\xc1\xb8\x4c\x3c\x80\x69\xac\xa2\x68\x8c\x0a\x94\xe6\x82\xbc\xfa\xf1\xcd\x15\x61\xbc\x09\xfc\xb2\x47\x5c\xf9\xf2\xc0\x8d\xef\x5a\xdf\x94\xe8\xd6\x17\xab\x23\x9b\x6d\x2a\x25\x69\x28\x35\x56\xdd\x5a\xca\x1b\x0c\xcc\x2c\x92\x3b\x4a\x10\x7a\xc3\xbb\xa6\x81\xdc\xd7\xc8\xef\xae\x5e\xfc\x00\x56\x32\x40\xcd\x2c\xd0\xf9\x6a\xb6\xae\x6e\x76\xa5\xd4\x92\xd6\x51\xd4\xdf\x1c\xec\x31\x7a\xcd\xab\xb6\x4d\x81\xbe\x65\x57\xa9\x06\x9c\x63\xb9\xe6\x32\x03\x40\x6b\x70\x74\xf3\xba\x35\x8c\x16\x9d\xd7\x9a\x70\xfb\x1b\x94\x6d\xf3\xf3\xd2\xbe\xc7\x5e\x7e\x6d\xf6\x63\x97\x6b\x0f\x1e\x10\xee\x4a\xaa\x4d\x5f\x33\x1f\xdd\x92\xc7\xb6\xa1\xc1\xf9\x3d\xb2\xb8\xc7\x0b\x5e\xe6\x8d\x7e\x51\x6d\x8a\x6d\x21\x37\xf1\xe1\xc8\xa5\xce\x3f\x02\x75\x7b\x30\xb8\x63\xe8\x84\x7b\x15\x3a\x54\x16\xcd\x0f\xd5\x3a\x2f\xe3\x4b\x2b\x8c\xdd\xa8\x74\x99\x31\xef\x4c\xb9\xe0\xbb\xba\x32\x35\x07\xaf\xd8\xc9\xc2\xb9\x5b\x7a\xe4\x55\xec\x45\x33\x30\x8b\x35\xf0\x48\xcc\xef\xce\x6f\x6f\x6f\xcf\xb7\x55\x7d\x73\xbe\xaf\x4b\x14\x8b\x37\x2b\x20\xf5\x32\xba\xe2\xdb\xab\x6f\xce\xff\x9b\x70\xf4\x6f\x6d\xe2\x03\x79\x4c\xe2\xe7\x8a\x03\x48\x09\x44\x1e\xcd\x77\x46\xb8\x25\x18\xa0\x8f\x29\x38\x48\xee\xcc\x75\xef\x4d\x37\x25\x3f\x83\x0c\x77\xe6\xfe\x6f\x0d\x38\x76\x05\x19\x4c\x8a\xcd\xf1\x5b\xfe\x39\xb7\x2e\xa3\x01\x42\xdc\xc1\x94\x69\x9e\x9e\xe3\xeb\xe0\x4d\x73\x2c\x09\x9e\x9e\x1f\xb9\x1b\x14\xdf\x00\x44\x0e\x3e\x42\x5c\xe2\xbb\x17\x3f\x10\x5b\xf7\x70\xf0\xb8\xca\xb8\xb4\xbf\xbd\xf9\xf1\x25\xbe\xf7\xb3\xac\xb5\xac\xe1\xbb\xa1\x62\x24\x7e\x03\x93\x9e\xc3\x97\x82\x29\x99\x98\xe6\xc5\x4b\x53\x0a\x89\xed\xe0\x36\x85\xd8\x74\xf3\xbd\x2e\xf9\xdd\x8b\x1f\x8e\x3c\x58\xb6\xb0\x97\x5d\x0f\x01\x24\xee\xd1\x8f\x83\xfd\xee\x01\xf1\x28\xf9\x55\x51\x58\x1c\xfb\xeb\x21\xe3\x9a\xc5\xbf\x2a\xda\x4f\x05\x6c\x3e\x93\xf0\xaa\x96\xd6\x52\xf9\x9d\xa2\xcf\x14\x83\xc4\xab\x3a\x57\xcd\xae\xaa\xb5\x49\xfc\xcd\x26\x0e\x2c\x63\xa7\x90\xcb\xb0\x83\x4a\x8e\x84\x1b\x42\xc1\xe2\x7b\x12\x57\x0e\xe1\x18\xbe\x2e\xfb\x1d\x3d\x1c\xb9\x62\x7c\x2b\x76\x0e\x0f\xb5\x6d\x77\x7c\xd3\x5d\x46\x11\xdd\x06\x61\x2e\x5b\xab\x46\xb1\xe4\x8e\x6e\x3d\x4b\x12\xbf\x16\x77\xb3\xe7\x72\x2b\xeb\x5a\x6e\x28\x83\xc0\xf4\xcb\xbc\x2c\x3f\xe4\xeb\x4f\x0d\x25\x95\x5a\xcb\xb3\x1b\x79\x53\xd5\xf7\x84\xf1\x1b\xb1\x9b\x35\x3a\xd7\xfb\xe6\xd2\xa2\x16\xf2\x7b\xb3\x6f\x7e\x36\xff\x7d\x10\x0b\x7e\x2b\x08\x72\xb7\xc8\x0d\xe1\x97\xe2\x50\xcb\x7c\x73\xff\x46\x1b\x89\x71\xc1\x3f\x4a\xfd\xda\x8e\x89\xef\x00\x92\xeb\xd4\xfd\xd4\x08\x05\x17\x42\x88\x0f\xc8\x2a\xbc\x66\x87\xb5\x38\x1c\x1d\xd9\xa0\xb8\xb2\xd2\x4b\xce\xd8\x3a\xd5\xa7\x30\x0d\x42\xa7\x17\xd9\x51\x8b\x75\x3a\x88\xe5\xcc\x8e\x3d\xb5\x5d\xa3\xda\xae\x8f\xa6\x4e\xcf\xca\xb2\x5f\xad\x66\xc4\xfa\x01\x95\x4a\x72\x8b\xc9\xd9\x98\x2f\xf9\x7d\x2f\x1b\x7d\xf2\x21\xa1\xad\xb9\x57\x05\xb7\x11\x7d\x68\x5b\x2a\xc5\xe7\x54\x65\xf0\x5f\xdb\x4a\x7e\x9f\xca\x4c\x68\xb7\xda\x1a\xcd\xa1\x2e\x36\xf2\x45\x71\x83\x9c\x31\x23\x8b\xa2\x29\x64\x37\xbb\xb1\x39\x84\x74\xcf\x76\x9d\x33\xde\xb6\x00\x7e\x7b\xf1\xf4\x03\xb8\xcc\x5b\xef\xc7\x1b\xb3\xf0\xa6\x37\xc8\xe1\xa8\xb3\x0c\x55\xc6\xcb\x59\x5e\xde\xe6\xf7\x0d\x95\xe9\xa5\xed\xf3\x01\xf6\x37\xcf\x3f\x98\x71\x7e\xea\x42\x2c\xdb\xf6\xd6\xe5\xdc\x47\xd1\x7e\x06\x19\xa9\x66\xfc\x13\x5d\x70\xf7\x9d\x60\x1e\xb8\x9e\xed\xea\xea\xa6\x68\x24\xbd\xec\xb6\x2a\xf1\x71\x96\x6f\x36\xfc\x72\xd6\xe0\xc6\x21\x2e\x61\x6b\xe4\x97\x33\xd0\xf2\xc4\xe5\x6c\x9b\x17\x25\xdf\xcd\xf6\x75\x29\x28\x95\x6d\x0b\x3f\xdb\xf6\x5e\xb1\x69\x18\xdd\x7e\xa7\x78\x78\xf9\x49\x71\x58\xe1\xa7\x64\x3e\x27\x8c\xef\x30\x52\x5b\xcd\x6e\xa4\xbe\xae\x36\x6d\xab\x6c\xa4\xc4\xce\xa7\x60\x16\xbe\xeb\x44\x2c\x67\xbf\xef\x92\x30\x9e\xf0\x61\x41\x97\x90\x8c\xe3\xa8\xdb\xcd\xd6\x75\xd5\x34\xcf\xab\x9b\xbc\x40\xe8\x7e\x27\x8d\x43\xfd\x07\x02\x39\xef\x65\x17\x13\x88\x25\xa9\xd3\x65\x26\x84\x80\xcf\x88\xa2\x3a\xbd\xb0\x57\x17\x99\x29\x2f\xfd\x4b\xd6\xb6\x94\x5c\x6b\xbd\x8b\x51\x72\x5e\x66\x09\xf9\xef\x05\x89\xc9\x93\x27\x7f\x21\xe0\xd0\x63\x36\xb9\x41\x36\x28\xad\x97\x0f\xde\x8e\xe1\xd6\xbb\x59\xb0\x09\x76\x28\x9c\x5e\x4c\x73\xf9\x6c\x8b\x08\x67\x67\xc4\x4b\xd3\xc8\x81\x61\x87\xf1\xdf\xcd\x52\xc9\x77\x5c\xf1\x4b\xc6\x71\xa6\xdb\x91\x72\xb9\x2a\xc5\xce\x82\x19\xf0\x32\x8a\x16\x70\x58\x82\x7b\xfc\x74\xda\x91\xd5\x38\xa5\x3b\x10\x46\x7c\x5f\xe2\x9f\xbe\x5b\x30\xdf\xcd\xae\xf3\xc6\x85\x13\x4d\x5e\xda\xbd\x1e\xb3\x32\x5e\x09\x68\xfc\x5e\x2e\x98\x5c\xf6\xb3\xec\xfd\xa9\xa0\x1f\x54\x17\x59\x15\x91\x98\x24\x84\x4d\xed\x57\xda\x18\x11\xbc\x82\x9e\xcb\xd7\xd7\xd2\x61\xdd\xe3\x40\xbd\x55\x23\xa7\x31\xb7\x8a\x93\x47\xcb\x5f\x05\x99\x7e\x56\xd3\x29\x8b\xab\xe9\xe8\x6b\x88\xcf\x61\x0a\x2f\xb6\x4e\xd8\x81\x73\x85\x50\xfa\x81\x00\x96\xcb\xd9\x70\x7d\xa2\xe4\xfb\xed\xb9\xcb\x73\xfe\xa6\x50\x6b\x49\xf8\xc9\x93\x20\x5b\xe9\xfc\xe3\x97\x0a\x79\x59\x29\x79\xfe\xc2\x8c\x6e\xd2\xe5\x66\x8c\xd3\x6e\xbc\x74\xed\x68\xae\x02\x51\x69\x82\x24\x01\x2a\x4c\x63\xe3\x6f\xb2\x05\x9c\x9b\x2c\x84\xf7\x4a\x61\x7c\xec\x81\x67\x20\x52\x91\x70\xaa\x02\x2a\xdb\xce\x06\x13\x35\x69\xff\x4e\x96\x3c\x78\x67\x6a\xd4\xa6\x89\x99\xae\x61\x72\x42\xf8\x19\x99\x3e\x57\x53\xb2\x3a\xfb\x5d\x2c\x66\x0b\x38\x69\x64\x71\x57\x8c\x51\xba\x58\xa7\x33\xed\x66\x88\x3b\xd9\xb0\x91\xfa\x16\xdc\xdf\x46\x84\xad\x2d\xdd\xcd\x30\x1a\xdc\xc8\xc7\x30\x68\xba\x4b\x8c\xa9\xd8\xf2\x4b\xbe\x63\x96\x69\x01\xa7\x8e\x9f\x3b\x76\x89\x65\xab\x5b\x41\xe0\x27\xf1\x15\x39\xd8\x55\x34\x5e\x72\x58\x3e\xe3\x25\x77\x0b\x6d\xbc\x3c\xb2\xcb\xb4\xc8\xe8\xce\x55\x62\x2f\x7e\x37\x62\x8b\x9d\xa0\xec\x70\x39\xeb\x76\x70\xb1\x34\xd3\x72\x33\x98\x80\x10\xd5\x9e\x5e\xf2\x5d\x66\x86\x26\x48\xc8\xa6\xd5\x75\x71\x23\xab\xbd\x36\x4a\x10\x6d\x44\x23\xf5\x15\x26\x84\xe6\x7e\x57\x6b\x62\x33\x03\xda\xb3\x7b\x92\xa1\xd1\xe3\x83\x58\xf2\xfd\xac\x31\xf2\xfe\x3d\xff\xe4\x48\xe1\x5f\xa2\x78\x00\x3b\x19\x83\x80\xa7\xb3\x97\xab\x4f\xf4\x7c\xc9\x5f\xb2\x23\x86\xfc\xc1\x15\x79\x59\x9d\x79\x89\x8c\x04\x7e\x4c\x9f\xfa\xca\xff\x9a\xdf\xf3\xcf\xfc\x96\x5f\xf1\x97\x42\xad\x2e\x26\x42\x7c\x88\x22\xfa\x41\x5c\xf0\x26\x8a\xc0\xd5\xd2\x55\x1f\xe8\xe0\x35\xcf\x45\xd1\xb6\x84\xf0\x5e\xfb\xc8\xa7\x8b\xe4\x49\xbc\xe0\x6b\x21\x9f\x8a\x8b\xc5\x22\x8a\xfe\xb2\x58\x3c\x95\x6d\xfb\x97\xc5\x13\x21\x84\xe4\x75\x14\xd1\x5b\xf1\x42\xd1\x1d\xbf\x04\xdf\xc4\x5b\xf1\xa3\xb9\xb8\xe5\x97\x7c\xcd\xf8\x3a\xa1\x83\xc9\x7d\x25\x2e\x67\x27\x52\x13\x25\x3f\xe4\x8d\xf6\xd3\x99\x30\x7e\x35\xb6\x0e\x88\x2b\xc6\x1f\x78\xde\x4c\x5b\xff\x98\x9d\xc3\xe2\x8a\x31\x7e\x81\x15\x6d\x5b\xf2\xdd\xd7\xcf\x9e\x9b\xbd\x01\x97\xc9\xe4\xa5\x20\xaa\x72\x0e\x31\xb1\xfd\x1e\x4c\xd5\x37\xae\x22\x31\x7d\x29\x6e\x41\x56\x90\xfc\x5e\xdc\xe2\xd2\xf8\x59\xdc\xe2\xb6\xcd\xd7\x62\xf2\x99\xb1\x98\x7e\x16\x2f\xb9\xd9\xb4\x27\x2f\x59\x14\xd1\x97\x82\x48\xd4\x15\x17\x4f\x81\x23\x50\x2c\xcc\xf6\xe3\x64\x0e\x21\xfd\x4f\xa3\x5b\x08\xaa\xda\xf6\xa5\xd9\xe5\xf9\x3a\xb9\x36\x3a\x6b\x55\x7e\xc6\x00\xa0\x2d\x4f\xef\xf9\x4b\x7e\x99\xb1\xd8\xdc\x30\x12\xb6\x4b\xbf\xe4\x2f\xf9\xe7\xac\x2b\xd4\xc8\x47\xf4\xc6\x88\xb3\xba\x3f\xa4\xd7\x49\x4f\x61\x8d\x43\x55\xd6\x8c\x71\xbe\x4e\xee\x63\x53\xd2\xc7\xd9\xb6\xa8\x65\x50\x7e\xc6\x4c\x49\x74\x30\x3b\x3a\xa5\xd8\xce\x90\xf3\x73\xb7\xa5\x75\xfc\x53\x83\x0d\xad\xda\x91\x80\x50\xfe\x12\x44\x54\xa3\xfd\x3c\x0c\x46\x8e\xee\xae\x9a\x2b\x4e\x40\x67\x62\xf0\xcc\x1b\x50\xf8\x1e\x0a\x4a\x08\x9e\x71\x11\xa5\xc7\xee\xdc\x36\x05\x8f\x5a\x4e\x76\x55\x33\x72\x8a\x6e\x44\xd7\xbe\x47\x0c\xaf\xc6\xe3\xde\x30\x58\x44\x54\x6d\x5b\x70\xc7\x18\xc4\x47\x8c\x22\xaa\x33\x8a\x54\x68\x0f\xa9\xb9\x5b\xb6\x8a\x63\x1f\x48\xe2\xc5\x98\x73\x26\x98\x7d\x9c\x66\x0b\x5e\x99\x7e\xed\xb6\xca\x03\x79\x6c\x86\xf3\x3e\x5d\x64\x6c\x3f\x43\x93\x9a\xd9\xfc\x2d\xf1\xbc\x90\x5e\x98\x36\x1b\xd4\xc8\x9c\xe9\xed\x48\x2c\x84\x85\x32\x8b\x7d\x69\xe4\xea\x32\x6d\xb2\x28\x32\xff\x77\xc1\xeb\x7b\x6f\xc0\x6b\x1c\x37\x84\x59\x66\xd3\x45\x56\xa8\xb3\x9a\xe5\x50\x23\x90\xb8\x0f\xbe\x30\x34\x88\x4d\xf6\x16\xfe\xa8\x53\x9c\xd3\x06\xdc\x36\xcc\x8d\x8c\x1d\x72\xd1\xb8\x12\x21\xba\xa6\x61\xc7\x5c\xe4\x6d\x5b\xb8\xb1\x93\x27\x34\x9f\xe0\x37\x1b\x29\xdc\x55\x24\x67\x08\x33\x10\xfa\x60\xfe\xf8\x30\x0a\x80\x51\xed\xd6\x61\x83\x5a\x4b\x19\x82\x81\xa5\xcb\x8c\x75\xb4\x53\x41\x5d\xd9\x3e\xcd\x87\xda\x59\xef\x5b\xf2\x6c\x55\x89\xb5\xeb\x0a\x17\xbd\x6c\xda\xb1\xb3\x45\xa1\xd9\x01\xa3\x72\x55\x3a\x92\x0e\x6a\xd3\xa4\x8c\x22\xe0\xd8\x35\x55\xfc\x06\x54\xf2\x28\xa2\x5a\x84\x09\xc0\x13\xe6\x3e\x01\x90\x02\x2a\x1e\xbc\x1e\xc0\x0a\x60\x84\x54\xac\x12\xa5\x3f\x5c\x43\x49\xa0\x8c\xa2\x72\x62\x6e\x41\xd4\xb1\xd8\xa7\x25\xf4\x43\x95\xb5\xed\x3e\x25\x8f\xe1\x27\x9f\xe4\xcc\xef\xfd\x7b\x53\x5e\x23\x8a\xf0\x0c\xaa\x41\xd1\xbd\x02\x63\xa1\x2b\x02\x04\x0e\x5f\x0a\x5c\x31\xd3\xb3\x42\x4c\x16\x89\xc9\x56\x64\xf1\x1e\x4d\xaf\x93\x05\x06\x32\xa4\x8b\x8c\xaf\xbb\x51\x65\x3a\x20\x18\x59\x39\xe4\x34\xaf\xcf\xa3\x48\xa6\x2e\x1a\x38\x63\x5a\x00\x3e\x11\x7c\x98\xd9\x58\xf1\xda\xee\xa7\x3b\x7f\x42\x0d\xab\x77\x4c\xc0\xb8\x52\xdb\x85\x19\xc5\x86\x3c\xd9\xc5\x66\x3f\xc5\x2e\x6c\x00\x52\xbd\xae\x6e\xce\xc8\xd4\x7c\x8a\xae\x4c\x2b\x1c\x8f\xc7\x7e\x39\x8d\x33\xfd\xc1\xa4\xd6\xc7\x63\xcf\x7e\xe1\xed\x60\xb8\x04\x59\x83\x57\x67\xad\xe2\x67\x3d\x83\xd6\x03\xe9\x72\x7d\x33\x9a\x7e\x77\xde\xdd\xe9\xd9\xbd\xec\xdb\x46\x01\xaa\x07\x76\x2a\x30\x38\xd9\x22\xc6\xa3\x7c\x03\xf4\x35\x64\xd0\x3f\xba\x15\xce\xdb\x87\xa8\x8f\xd7\x0e\x4b\x90\x5e\x49\x40\x6a\x6d\xbc\x9a\x2c\x81\xff\xb6\xa7\x27\x5a\x48\x31\xb4\x59\x4a\xfb\x46\x81\x07\x01\xf8\x2a\x2f\xe3\x8c\xbf\x0a\xe6\x54\x50\x66\x07\x10\x94\x83\x14\x0a\xbe\xa8\xe6\x87\x85\x5c\x3b\xf1\xa7\x5f\xb9\x5e\xed\x01\xff\xe2\x59\xf6\xe9\x01\x94\x87\x28\x50\x28\x0f\x8a\xc9\x82\xcb\x19\xa6\x5e\xa2\x25\x14\x5c\xdb\x9c\x55\x74\x70\x0f\xc8\x9b\x6a\xb3\xe4\x18\xbd\x4c\xcd\x2a\x38\x61\x16\xe6\x07\x08\x5b\x30\xb4\x2c\x03\x68\xdf\xee\x42\x75\xdb\x4e\x54\x20\x92\xb5\xed\xdc\x3c\x2b\x37\xad\x13\x79\xe7\xd6\xed\x3e\xc8\x04\x81\x3d\x5f\x7e\x0d\x9c\x9a\xf6\xb1\x6d\xbf\x80\x52\xcb\x15\x3e\xa0\xdb\xb6\xa0\x17\x8b\x05\xf7\xd3\x80\xb1\x23\xaf\x4f\x50\x2b\xfb\x0c\x1e\x03\xab\x0a\x3b\x28\xf3\x32\xac\x1d\xd5\x96\x4f\xd0\x9e\x95\x7e\x03\x0c\x5e\x5f\x29\x31\xa7\x82\xbd\x4f\x68\x22\xa2\xf6\x11\x6b\xdf\x27\xef\x93\xf9\xaa\x37\xd5\x8c\x60\xb0\x8b\xc9\xda\x5a\xf6\xd0\x46\xbb\x73\x86\xbe\x53\x6c\xd0\x6f\x14\x22\xf4\x83\x98\x82\x48\x0e\x53\xf2\x2b\x2a\xa1\xa1\x15\x28\x95\x70\x18\x26\x47\x87\xbd\x79\x07\x98\x71\x77\xc1\x78\x1c\x09\x13\x52\x33\xc8\x34\xb1\xfa\xf2\x57\xca\x75\xd2\xbe\x2e\x59\x42\xf6\x75\x09\x31\x6a\x03\x1f\x2f\xab\x70\x4e\x68\x4f\xa1\x44\x7c\x43\x7f\x02\xf4\xa7\xc6\x79\xc2\xa2\xa8\x7b\x21\xe8\xef\x51\x44\xcc\xdf\xe0\xec\xa8\x6d\x09\x7e\x06\x00\xb9\xf6\xd4\x42\x5a\xb9\xea\xbb\xc6\xec\x3b\xd5\x0c\x6e\xb2\x64\x90\x40\x59\x3c\x48\xe1\x65\x82\xa8\x11\x69\x99\x79\x43\xc1\x57\x60\x28\x20\xd3\xca\x67\x77\xad\xa5\x06\xe6\x09\xd7\x6a\xce\x76\x60\xb3\x83\x9f\x4e\x65\xe6\x56\xb0\x07\xdb\xa9\x8a\xa6\xf6\x40\xa8\xeb\x5c\x92\xa1\xff\xcd\xfa\x4f\xab\x29\x39\xbb\xcd\x9b\x33\x55\xe9\x33\x33\x8c\x40\xf3\x30\x4d\x70\xe4\xfd\x26\x11\x28\x85\xf2\x5c\xc8\xb4\xca\x80\x34\x30\x2c\xb9\xe9\x0e\xb5\x8e\xbc\x70\x56\xc6\x20\x03\x3c\x90\x73\x65\xb7\xfd\x41\xe3\xd6\x83\xc6\xfa\xc6\x43\x9a\x40\x34\x5b\xaf\xed\x4d\x57\xe6\x14\xa1\x22\x1a\x91\x0b\x7d\x0c\x00\x54\x62\xed\x90\x01\x14\x7f\xad\xf8\xcf\x4a\x2c\xf8\x23\x25\xe4\xec\x19\xc8\xe7\xef\xd0\xc3\x28\x38\x34\xb3\x53\x03\x14\x6b\x69\x76\xf9\x57\x8a\xbd\x52\xa9\xcc\xec\xac\xec\x44\xd4\xef\x81\x8d\xbc\xbe\xf7\x3e\xb0\xf2\xf6\x4c\xce\xde\xbd\xf8\xe1\x3b\xad\x77\xd6\x10\x60\x77\x5e\xcd\x0e\xc1\xa1\xf4\x1f\xa3\x0f\xf6\x2a\x44\xc9\x8b\xc2\x2c\xe5\xd5\x56\x43\x89\x57\x57\xaf\x08\x0b\x0b\x1b\x9c\xe9\xde\x5d\xd7\xc3\x6f\x4a\x4e\xfa\x19\x89\x73\xed\x71\x58\x14\x99\xfa\xb7\xad\xa9\xcc\x31\xfe\xde\xb4\xce\xf0\xa0\xf8\xee\xba\xa6\xbd\x68\xc4\xaa\x6e\xc4\x64\xf2\x5a\x45\x11\xb9\x2d\xf4\xf5\x65\x2d\x37\x52\xe9\x22\x2f\x1b\x52\xa8\xb3\xd7\xb6\x0c\x0f\xaa\xf3\x5b\x7e\x07\xd9\xf9\x6b\xe5\x0e\x49\xbb\x2d\x2c\x74\xf4\x34\x72\xb0\x0a\xb7\xae\x1e\x0a\x6e\x55\x37\xf6\x34\x7b\x7c\x83\x0a\x49\x55\x61\x99\x81\x7a\x23\x5d\xc9\xbe\x91\xb5\xca\x6f\x64\x52\xce\xaa\x9d\x34\xf3\x14\x4c\xbd\xca\x6e\x3a\xb0\x71\xf1\x2e\x1b\xac\xfe\x4d\x73\x5b\xd5\x1b\x16\x7f\xe1\x11\x33\xc1\xee\xae\x6b\x14\x50\x3b\xfd\x20\x4c\x34\x0a\x82\x08\x12\xd2\x26\x5b\x29\xaf\x7e\x44\x51\x39\x1b\x9e\x00\x8c\xa5\xd1\xee\x11\x98\xd4\x61\x13\x15\x29\x79\x77\x6e\xc7\x99\xdc\x9c\x03\x6c\x20\x44\xe0\x8f\xa5\x0b\xd2\x1f\x98\x01\x9c\x35\x54\xbd\x60\xe5\xa9\xfd\xaa\xe1\x45\xda\x64\x6e\xdc\xed\xd9\xe1\x58\xa2\x91\x46\xf5\xcc\x7f\xb8\x30\x60\x74\x0f\xeb\x31\xf9\xba\x2d\xa0\xc1\x73\x2d\x78\x65\xb1\xa5\xc0\xe9\xd4\xb6\x4f\x00\x19\x38\xdc\xa2\xcd\x3d\x81\x31\x70\xe5\xd8\x0e\x7d\x37\x53\x55\xb5\xe3\x8f\x54\x14\x59\x63\xec\x2b\x65\x94\x1c\x5e\xb0\x27\x93\x7e\x61\xa6\x35\x9d\xb9\x0c\x14\xaf\x1d\x06\xdf\x97\xd6\x3a\xc0\xf7\xa2\x9c\x8d\x9e\x0b\x51\x76\xca\x87\x58\xf6\x0e\xce\xc1\x70\x07\x98\x9c\xfd\x74\x6c\xd3\xb5\x7f\x87\x49\xb3\x8d\xb7\x65\x87\xb5\x20\xe4\xd8\x80\xec\x62\xe7\x20\x58\x49\xbb\x2e\x4d\x96\x17\x17\x7f\x11\x42\x34\x60\x4d\xbb\x58\x3c\x61\x71\x23\xf0\x45\xc9\xc5\x62\x11\x3f\x59\x3c\x39\xda\xe2\x36\xec\x50\xb4\x6d\x45\xcf\x97\x7c\xc3\x8e\xbb\x28\xaa\x68\x63\x5a\x98\xef\xd9\xd1\x0d\xd2\x64\xd8\xbe\x49\x60\xa0\xab\x59\x4c\x73\x31\x9d\xfe\xac\xa0\x35\xe9\x2b\x88\x59\x02\xcf\x24\xf0\x7f\x47\x87\x3f\xfa\x48\x31\xc6\xa1\x8d\x45\xcd\xf8\x68\x9f\xd4\x2c\xae\xe9\x98\x44\x53\x47\x51\x3d\x14\x65\xde\x29\xfe\x56\xf1\x7f\x58\x27\x2b\x04\x0b\x68\x9b\xeb\xea\xb6\xbd\x2e\x36\x92\x3d\x9a\xf3\x5f\x54\x00\x7b\x92\xc4\x16\xf9\xa4\x65\x0e\x2a\x05\x51\x52\x3a\x38\x95\xbf\x29\x31\xff\x7d\x2f\xf7\x12\x1c\xb6\x1f\xcd\xf9\xb7\x4a\xa4\xaa\xce\xf8\x4f\x4a\xc0\xc9\x7b\x3a\x76\x6a\x87\x1e\xc2\x08\x13\x76\x2b\xa5\x8d\x5e\xaa\x85\x9a\xad\xf7\x66\xd1\x2b\xc4\x2f\xca\xc5\x3c\xf2\x4a\x14\x51\x54\xe0\x79\x4a\x18\xb5\x2d\xb3\x84\x90\x18\x63\xb6\x73\x31\xb8\x85\x21\x90\x13\x50\x05\xa7\x35\x8b\x22\x57\xa0\x0d\x65\x9a\xc9\x52\xde\x70\x09\x91\xcf\x4b\x5e\x8a\x0b\x40\x85\xc8\xa3\x28\x4f\xff\x92\xa1\x1a\x0a\x26\x15\x73\xc9\x0b\x51\x80\xf7\x51\x2e\xa6\x75\xdb\x2e\x57\x9b\xea\x0c\xf0\xf2\xc8\xec\xbf\x08\xcf\xe7\xa2\xe1\x2e\x76\xd7\x95\xcb\xf3\x69\xe5\xd4\xed\x66\x22\x04\x6d\xdc\xb7\xcd\x4d\x65\x96\x13\x18\x66\xe7\xe7\xa5\xb7\x41\x15\xa0\xb2\x2a\x33\x74\x6b\x2d\xa6\x79\xdb\x9a\x77\x2d\xcc\xc2\xa7\x0a\x2d\x2a\xae\x66\x52\x6d\x44\x91\x2e\xb3\x24\x9f\xd2\xc2\x06\xc8\x17\xe9\x45\x16\x4f\xcd\xff\x8c\xab\x63\x16\xec\x8f\x7f\x0f\x25\x8c\x51\xd3\xf0\x3b\x05\x9b\xf5\x3b\xef\x05\xd8\xed\x91\xff\x1c\x82\x59\x0a\xfa\x93\x4a\xb5\x75\xf4\xb2\x4e\x73\x3f\x29\x34\xc7\xf3\x4a\x2c\x78\x2e\x8a\x1e\x9a\x4a\x0e\x61\xa2\x53\x5c\x53\x8a\xb4\xca\xd0\xbe\xae\xb8\xe6\x1d\xf6\x55\x80\x01\x63\x39\x83\x7b\x9e\x53\xa6\xd4\x6f\x1d\x1a\x07\x84\xa8\x77\x67\xe8\x23\x32\x8d\x5d\x93\x4a\xe8\x83\x23\xe3\x65\x28\x11\x15\x5b\xea\x18\xb2\x27\xcb\x15\x9e\xa1\xbe\x53\x6d\x6b\x9a\x89\x2b\x11\xc4\xd3\xee\xb1\x0f\x4c\x83\x4d\xf7\xb3\xcd\xbe\x06\xc1\xf6\x1c\x07\xe8\xbc\x4b\x31\xbd\x53\x89\xe5\x79\x0d\x68\x32\xa5\xd8\xcf\xb4\x19\xcb\x4d\xaf\x19\x4a\x8b\x9c\xe2\x6e\xa6\x79\x36\xab\xf7\x8a\x76\x6e\xd0\xcd\x4c\x55\xba\xd8\xde\x83\x9d\x53\xf2\x74\xcf\x2b\xae\x32\xc6\x97\x4f\xab\x28\x2a\x13\x15\xd3\xa6\x67\x82\x35\x59\x32\x74\x51\xe3\x7b\xd1\xf8\xb3\xdd\x83\xf9\xea\x58\xf2\x1d\x80\x52\x84\xc0\xcd\x66\x0e\xed\x74\x90\x36\x59\xf0\x83\xc5\x89\xfb\x3a\x6f\x0a\xf5\x31\x3e\x1c\xc1\xcd\xa1\xaa\x8b\x8f\x85\xca\x4b\x1b\xe2\x5e\xc8\x26\xd6\x3e\xd1\x79\x7f\x28\xee\xdb\x27\xf6\x2d\xe8\x1a\x25\x56\xbe\x7d\x38\x7e\x71\x9c\x66\x3c\x98\xea\x71\xdf\xd1\x11\x83\x4c\xee\x66\x6e\x19\xd8\xcf\x4c\x55\xc1\x4a\x86\x3f\x67\xbd\x8a\xc2\x10\xb4\x37\x24\xa4\xf8\x76\xf4\xcd\x0f\x92\x6a\xcd\x78\x7d\xe4\x8d\xae\x02\x77\x94\x1e\xdf\xbd\x4e\x06\xfd\x15\xc3\x0a\xd0\x67\x51\x07\x5b\x93\x98\x2c\x56\xf5\x53\xb5\x52\x61\x37\x2a\xec\xc6\x65\x00\x4a\x7e\xda\x4d\x5c\x67\x2c\x6e\x42\x3b\xb9\x4b\xb5\xa7\xf3\x8c\xaf\xc5\x1e\x02\x3b\xf0\x5d\xba\xa6\xeb\xd1\xef\x66\xfd\xf9\xf4\xad\xf2\x13\x6a\xcf\x25\x77\xcf\x74\xf3\x6a\xe5\x4d\x27\x37\xf9\x8e\xae\xf9\x3f\x15\xdf\x0f\x3c\xee\xdc\x6b\x4c\x5f\xb2\x28\x0a\x2f\x1d\x03\x1f\x3c\xb2\xbd\x83\xd3\xa2\x9a\xfa\xe1\x53\x72\x37\xd6\x72\x55\xdc\xc4\x7b\x0e\x1b\x40\x6c\x4b\x80\x8b\x23\x63\x1c\x3e\x0c\xf8\xde\xdc\xbb\xdc\xb5\xf5\xc2\xb3\xa9\xe0\x76\x60\x7f\x3b\x0b\x02\x03\xef\x03\x97\xc3\xfc\xf6\x13\xde\xa6\xe1\x55\xb0\x64\xe9\xfa\x04\xe9\x89\xe7\x1d\x24\xba\xc4\x96\x1b\x80\xa4\x14\x02\xc0\xdc\x91\x92\x31\x88\x88\xa8\x30\x46\xba\x4a\x97\xee\xa6\xa8\x40\xc3\x51\x13\xe0\xf2\x47\xd7\xd7\xca\x9d\x49\x4b\xc0\x1e\xc8\x43\x0c\x8d\x3a\x33\x72\x14\x41\xb5\xde\xc8\xe7\x39\xb8\x1a\x5a\x3d\x9f\x56\xac\x7b\xb6\xce\xba\x6a\x56\x0c\x6b\xdb\xb6\xd4\xbe\x55\x65\x5c\x9b\x5f\x05\xc3\x33\x37\x53\x61\x51\x1c\xef\x66\xcf\x54\x71\x03\x13\xad\x43\x69\x97\x35\x3f\xc0\x00\x3d\xf1\x96\x19\x22\xe5\x51\x0d\xae\x50\xb8\x80\xc7\xe0\x9a\xe9\xad\xa8\xab\x10\xd9\xba\x8f\x91\xd5\x51\xaf\x40\x38\xc7\x4f\xca\xd4\x0c\xfe\x87\x0d\x12\x7e\x79\xb3\xa9\x46\x22\xab\x11\x2a\x4f\x76\xd0\xc9\xb7\xca\x67\x94\x2c\xfe\xd6\x13\x4b\xf6\xce\x21\xd4\xe9\xae\xe0\x4d\xe6\xe0\x89\xba\x36\x22\xd3\xce\x87\x9c\x6e\x7b\x30\x39\xca\x7c\x2a\xdf\x74\xf0\xfa\x9c\x6c\xef\x8c\xb8\x43\xd8\x4a\xe1\x40\x6d\x5b\x6a\xf6\x95\x5f\x3b\x19\x06\x73\x11\x66\xfd\x49\x9a\xd9\x5e\xc1\xcd\x8d\x11\x0b\xfd\x05\xac\xf8\x96\x27\x06\x8e\xa9\x78\x78\xd1\x53\xc5\xfd\x33\x6d\x5b\x52\x76\x64\xbc\x4b\x99\x4e\xf9\x7e\x64\x23\x1b\x4b\xeb\x1e\x3a\x3f\xe7\x77\x58\x79\x57\x55\xdb\x45\x6d\x1b\xd6\x01\xa0\x98\x18\x72\xa1\x86\x8d\x42\x5d\x68\x76\xa1\xce\x74\xdb\xda\xf8\x6c\x08\x5b\x41\x13\xdd\x67\x59\x6f\xcb\xea\x56\xa4\x3b\xff\x9b\x77\x3f\xdf\x05\xbf\x7f\xc9\xb8\x8d\x32\x1d\x87\x69\x0b\x02\x24\xfd\x4d\xc4\x4a\x62\x3d\x68\x01\x2c\xe3\xab\xb2\x5a\x7f\x7a\x29\xe5\xa6\xf9\x21\xbf\xaf\xf6\x3a\x8a\x5c\xe1\x46\xb7\x50\xb4\xc3\xbc\x66\xc9\xce\x86\xff\xc6\xbb\x0e\x50\xad\x17\xef\x0a\x4c\x3f\xae\x9e\xa0\x30\xf8\xef\xf2\x18\x7c\x5d\x05\x9a\xeb\xba\x50\x9f\x7e\xae\xf3\x1d\x54\xa2\x31\x7b\xcc\x69\x17\x04\x65\x74\x65\xa7\x8b\x2c\x6c\x9d\xf0\xce\x32\xbc\xf3\x4b\x78\xe7\x22\x73\x54\xfc\x35\x06\x0c\x01\x5c\x03\x2c\x45\xff\xb0\x72\x6a\x81\xf0\xab\x76\x91\xb0\xab\x54\xd5\xb6\x04\xa5\x76\xd3\xa6\x05\x2f\x84\x10\x74\x9b\x98\x2f\x92\x24\x26\x38\xb4\xd9\xba\x52\xba\x50\x7b\xb9\x5a\x9b\xa5\x62\x13\x45\x1b\x70\xd1\xef\xf0\x65\x6a\x76\x44\xdc\xe4\xa2\xf9\xda\x8c\x17\x6b\xf4\x58\x33\x76\xd8\x24\xae\x79\x0a\x75\x66\x86\xfc\x56\x6c\x6c\xd0\x29\x8b\xc7\x66\x12\x3f\x1c\x81\xf0\x85\xba\x6c\x62\xb2\x65\x7c\x8b\x61\xbc\x16\x21\x6c\x3f\xf0\xba\x66\x07\xb8\xeb\x00\xc3\xf8\xe9\x7d\xf4\xa5\xbb\x9b\xfd\x8a\x46\xdf\xe7\xc3\xd9\xeb\x3d\xeb\xd6\xac\xfb\x2c\xcd\xd7\xa9\xce\xd8\x31\x68\xd9\x35\xcb\xc5\x3f\x15\xdd\x26\xa6\x09\xe2\x05\xaf\xcd\x9e\x06\x77\x36\x6d\x4b\x4d\xa2\xc8\x71\xcf\xe3\x5b\x60\xbd\x30\x32\xb6\x4b\xb1\x7f\x03\x38\x8f\xba\x6d\xdd\xec\x31\x57\xc9\x32\x5e\xf4\xf8\x30\xea\x3a\x44\x59\x09\xec\x4b\x75\x6d\x36\x3f\x5d\x81\xe7\x54\xa1\x0a\x1d\xe4\x3b\x5a\xe9\x47\xd4\x35\x0f\xf3\x89\xc3\xba\x52\x8d\xae\xf7\x6b\x5d\xd5\x71\x5d\x73\xf3\xdc\x49\x24\x11\x9e\x0b\x23\x60\x5b\x29\x6f\x84\x8d\xc5\x33\x32\x85\x50\xdc\x02\xb9\x19\x11\x02\x9c\x29\x9a\x5b\xc0\x1d\x40\x77\x6f\x94\xe7\x84\x65\x36\xc3\x6f\xb5\x91\x9d\xb7\x56\x45\x03\x6d\x0c\x0b\x51\x1b\x51\xe3\x4f\x54\x48\x06\xda\x98\xea\xb4\xb1\x23\x5f\xef\xeb\x53\x6b\x37\x7e\xdb\x0e\xb7\x47\x5f\x49\x1f\x07\x0c\xb1\xd3\x1f\xa5\x46\x32\x02\xcb\x06\x19\x3e\xe3\x31\xc4\xbb\xfb\x47\x5e\xef\xd5\xa9\x73\x24\x57\x7f\xf6\x32\x4c\xa9\x1a\x61\xbf\xd8\x51\xc6\x39\xd9\x35\xb9\xb3\xad\x96\x06\x2d\x98\x39\x1e\xc0\x61\xee\xc7\x92\x2f\xf8\x72\xfc\x1e\x8b\x1d\x79\x60\x75\x2b\xa8\x6b\xca\xf3\xae\xc9\xd9\x63\x3d\xed\xae\xfa\x85\x34\x5a\xee\x2c\x23\x59\x98\xd4\xc1\xbf\xa2\xba\xe9\xca\xe7\x16\xa8\x36\x8a\xd4\xac\x91\x3a\x81\xff\xbf\xd8\x92\xfe\xbe\x15\x48\xf9\xc9\x40\x0d\xc6\x63\x78\x8f\x87\xe5\x89\x83\xe7\x35\x1d\xc4\xfb\xbb\x99\xdc\x73\x0c\x96\x50\xeb\x54\x62\x97\xb4\x2d\x5e\xbb\x00\x7a\xc4\xac\x0e\xd3\x5c\x4e\x23\xb4\xd8\x7d\xc4\xaa\xd9\x90\xce\x09\x61\xdc\xec\x16\x00\xa5\x04\xa8\x73\x3a\x5e\xb0\xb8\xff\x9a\x21\x5c\x05\x84\x43\xdc\x41\x73\xfa\xf2\x4f\x52\x8c\x64\xd2\xaf\x1d\x1d\xa9\x5e\x00\xac\x66\x9f\xeb\xe3\xa7\xd9\x44\xd6\xc1\x42\xf6\xaa\x6f\xf6\xb5\xdb\xa9\x84\x69\x35\xac\x35\xec\xdc\xb7\x47\xdf\x31\xb6\xff\x30\x2c\xf4\xaa\xda\x89\x91\xe4\x1f\xe4\x56\x8b\xc3\xf0\x6b\x6d\x8d\x43\x0c\x41\x48\xe8\x11\xd7\x8c\xbd\x9c\x1d\x83\xd8\x14\xbb\xf9\x70\xdc\x6a\x38\x6e\x3c\xc3\x38\x13\x1b\xd0\x8d\xd1\x26\xab\x91\x40\xd0\xde\xea\x88\x43\xa2\x6d\xc7\x10\x2a\x1f\x08\x48\xc2\xd8\xc6\x1c\xe4\x5f\x49\x0b\x6b\xe6\xe2\x58\xf0\x49\x94\xe8\x36\xdf\xc8\xab\xea\x4f\x42\x30\xed\x69\x9a\x52\x16\xfe\xc7\xe1\x4f\xf2\x85\xdb\xc5\x10\xe9\xd8\xbf\xd5\xc3\x59\xea\xa3\x0f\xdd\xb4\xf7\x1e\x22\x7d\x10\xc3\x0d\x17\x3c\x63\xef\x8c\xa6\x27\x91\x87\xa9\x36\x4a\xc4\x70\x1b\x14\xb2\xc6\x06\x08\x75\x7a\xf3\x28\x5b\xd1\x22\x60\xa5\x02\x64\x80\x6d\xa1\x8a\xe6\x1a\xa0\x4e\xf4\xcc\xa8\xbf\x14\x4e\x4d\x9c\xb3\xca\x0c\xef\x8b\x9c\x17\x6d\x5b\xa1\x14\x09\xce\x8e\x01\x74\x76\x6e\xdb\x17\x45\x4c\x9b\x89\xe7\x6c\xa8\x4e\xf7\x78\xed\x47\x1c\xd4\xe1\xf5\x2b\xa7\xe5\xc0\x15\xd7\xb4\xf6\xb5\x19\x0b\x46\xb3\x31\x60\x2e\x4a\x03\x69\xe7\x27\x4b\xbb\x06\x5a\xa1\xb7\x6d\x8d\xd4\xcb\xd3\xcc\x6d\x4d\xa3\xb0\xdd\x93\x85\x3d\x22\x9e\x08\x33\xda\xa7\xa4\x13\xf1\x09\x34\x3b\x68\xb5\x4d\x48\xb8\x65\x16\x42\x38\xb8\x60\x39\xa0\xca\x99\xff\xa1\xde\x51\x54\x50\x73\x11\x44\x7a\x83\x9a\x96\x9f\x66\xfc\x9b\xea\x30\xe7\xdc\x43\x90\xdf\xf3\x02\xad\xd4\xf9\xf9\x8a\x19\xc5\x0e\x66\xdb\x04\x19\x78\xd1\x7e\x0f\x75\x85\x5b\x50\xdb\x09\xcc\x0c\x0a\x09\x66\x70\x61\x8f\xd6\x8c\x6b\x31\x59\xf2\x0a\x74\xb6\xb5\xa4\x8a\x2f\x19\x5b\xc1\xc1\x7c\x0d\x30\xbf\x1b\x89\x4d\xa5\x91\xc2\xf7\xc8\x8e\x1c\x3b\x7e\xcc\xbd\xc2\x35\x31\x95\xc2\x36\xed\x97\xda\x95\xab\x7e\x7b\xf1\x5a\xa8\xd4\xb5\x2e\xc9\x78\x11\x5c\x62\x63\x67\xfd\xd6\xae\x93\xba\xb3\xb8\x40\xcb\xb8\x41\x09\xf1\x9e\x61\xc5\xa1\x8b\x8b\x28\x2a\x5c\x27\xc0\xdf\x00\xfc\x1c\x68\x28\xbb\x86\xd5\xd8\xb0\x1a\x1b\xd6\x52\x1b\x9b\xf6\xd4\x99\x1f\xea\x08\xe8\xac\xc3\xf6\x04\xca\x42\xd7\x96\x1a\xda\x12\xe4\x4b\xb1\x58\xe5\x4f\x35\xd0\xd9\xd6\xa9\x86\x50\x00\x9d\xd9\xca\xf6\x2e\xba\x1a\x31\x37\xde\xdd\x47\x0d\xfc\xea\x8a\xbe\x91\x42\x1c\x50\x9a\x8c\xe5\xd1\xf3\x02\x6b\x04\xdc\x5b\x3d\x79\x5a\xac\x8a\xa9\xb8\x38\xd7\x4c\x89\x7f\x02\x84\x40\x9d\x3a\x54\xbc\xa9\xca\x44\x9d\x76\xf0\x76\x2a\x13\x1d\x92\x87\x99\x46\x1e\x2b\xaa\xb6\x00\x8f\x32\x40\xe7\x3a\x34\x65\xb1\x91\xcf\xab\x5b\x15\x17\x35\xb5\x6a\x03\x87\xc4\xb7\x3b\x48\x82\x75\xdd\x26\x5d\x21\xa6\xb0\x49\xb6\xcb\x3f\xe3\x66\x4d\xfd\x5e\x75\x98\xbe\x58\xc6\x11\xd2\x7f\xdc\xeb\xe0\x06\x94\x84\x37\x6c\x41\xdd\x3d\x5b\xdc\xf1\xcf\xd9\xb6\x4e\x17\x6c\xb7\x0e\xfb\xc0\x79\x84\xcf\x32\x4b\xa9\x18\x3a\x71\x5a\x62\xd4\x91\xa8\x52\x99\xf4\x17\xd5\xf8\xe0\xdd\xb6\x55\xdb\x02\xa9\x26\x62\x90\x06\xb6\x93\x28\x92\x9d\xb9\x53\x72\x94\x0d\x63\xcc\xaa\x2d\x7c\x46\x00\xf7\x11\x45\xda\xaf\xbf\xb5\x17\x0b\x05\xc8\x1a\xd5\x76\x9b\x2c\xe2\x0e\xb1\xd6\x21\xe4\x76\x72\x68\xf7\x33\xee\x7e\x9a\xa5\x07\x65\x15\xf3\xb9\x4d\x12\xfc\x4e\xbb\x5c\x59\x1c\xa4\x77\x04\x3c\x0e\x29\xd7\xd9\x3f\xea\x6e\x17\x58\x00\xfa\xae\xbd\xc6\x85\xa0\x9e\x55\xe5\x46\xd4\xde\x30\xc7\xbb\x9f\xa2\x87\x68\x15\x3a\xa4\x9a\x67\x58\x14\xc1\xdf\x60\x6e\x70\x5b\xf4\xc9\x0a\x65\xd3\x8d\x38\x8f\xb2\x06\xe8\x2b\x07\xa3\xce\xe7\xa3\xbc\x0f\xf2\xc8\x41\x93\x19\xb9\x37\xfb\xaf\x73\xb0\xe5\xaf\xab\x86\xca\xc7\xf0\xf3\xd5\xf7\x6c\x7e\x01\x52\x0c\x2e\x42\x22\xcd\xc0\xae\x29\x4e\x84\x5d\x67\xee\x5c\x7f\x1a\xee\xc2\x12\xd6\x3d\xbb\x88\x39\xfe\xc8\xee\xe4\xc4\xb3\x41\x58\xfb\x98\x14\xca\x68\xec\x92\xb2\xb6\x35\xbf\x70\x21\x57\x6e\x91\xa9\xcf\xcf\xf9\xb2\x7b\xc8\x0c\x31\x90\x3c\xab\x1d\x85\xf3\x18\x7d\x0c\x0c\xaf\xbd\xbd\x55\x52\x58\xe0\xb1\x22\xce\x64\x06\x38\x5d\x77\xa8\x40\x50\x66\x9f\x2d\x94\x96\xf5\xe7\xbc\x14\xcb\xbf\xf0\xee\x76\xf8\x5d\x6f\x55\xdb\xd2\xb7\x4a\x34\x52\x7f\x6f\x33\x53\xdf\x00\xfd\x42\x98\x2b\xd5\xd4\x31\x2c\x03\xfc\xf6\xfd\xd3\x6f\x15\xe3\x6f\x71\xd7\x75\xf9\x61\xf4\x89\x43\x53\x56\xb7\xf1\xff\xb7\x58\xf0\x6d\xde\xe8\xf8\x62\xb1\xe0\x5e\x65\x00\x6a\x03\x2f\x7a\xc3\x11\xe8\xff\x10\xdf\xcb\xae\x01\x9b\xb1\x88\x66\x4b\x22\xe4\xfb\x2c\x3c\x05\xf0\x3c\x03\x42\x68\x7b\x58\x64\x7b\xc2\xc9\x8e\x08\x04\x26\x06\xfe\x7d\x27\xdc\xcf\x61\x41\x01\xad\xdf\x29\x03\x89\x2d\xd0\xe8\x5b\x08\x1e\x16\x52\x85\xaf\x3c\xfb\x8f\x38\x18\x09\x6b\xc1\x4b\xb9\xd5\xf1\xe2\xc8\x73\x47\x06\xce\x1b\x91\x47\x51\xde\x07\x0e\x46\x46\x50\x8f\xf7\xdc\x0c\x1d\x0a\x79\x48\x0b\xcf\x73\x96\x50\xbb\xbe\xe4\x46\x83\xfe\xaa\xda\x2b\xb3\x79\x5c\x96\x85\x54\xfa\xb5\x5c\x6b\x4b\x0b\x5d\x89\x07\xee\x53\xe0\x3a\xab\x6a\xda\x30\x0e\x15\xad\x66\xba\xda\x4d\x69\x3d\xdb\xe5\x1f\xe5\x2f\xf8\x61\x30\xca\x9d\x7a\xc2\xce\xa9\x9a\xad\xa1\x80\xab\x6a\xd7\xb6\x0b\x86\x9f\x86\x38\xb8\xee\xc9\x77\xc3\x27\x8d\x06\x13\x3c\x6a\x2e\xcd\xb3\x47\x16\x57\x66\x70\xd8\xce\x39\xf8\xc6\x1c\x67\xe0\x0b\x50\x60\x2d\x9a\x1c\x61\x2b\xd2\xe8\x5c\x17\x6b\x30\xdf\x80\xc2\x83\x68\x6d\x2e\x87\x20\xb5\x2c\x73\x5d\x7c\x96\xd6\x0c\x5e\x20\xab\x63\x25\x0a\xfb\x5a\xea\xed\xfc\xc0\x65\x5b\xed\x88\x07\xcf\x36\x09\x00\x89\x07\x1c\x69\x1d\x01\x88\x35\x1c\x6d\x8b\x3b\xb9\x81\x0b\x98\xb0\x8e\xaa\x0a\xd5\x56\x9e\xe6\xbc\xc9\xd8\xd3\xf3\xa5\x73\x06\x3f\x1c\xf9\x8e\x6f\x57\x65\x42\xd7\xa2\x08\x00\xf1\xf8\x4e\xac\x4d\xbb\xf3\xad\x58\x23\x28\x71\x4c\x77\x7d\xf4\xec\xb6\x5d\xf0\x6d\x98\xd4\x30\x68\xfb\x93\x9d\xc9\x08\x1b\xee\xb0\x47\xf1\x8a\xa1\xd1\x7b\x02\x90\x22\xbb\x28\xa2\x7b\xf3\x17\xaf\xce\xb1\xb3\x77\x5d\x16\xf3\x6e\xc8\x03\x98\xc6\x78\x7d\x6e\x7b\x76\xcb\x38\xd9\x9b\x55\x1c\x0c\xcb\x89\x9e\xc1\x45\x77\xae\x14\x17\xd0\x60\x7b\x54\x2e\x03\x65\xcd\xf3\xa7\xf4\x8f\x6e\x7b\x4c\x47\xd0\xc9\xc3\xa9\x52\xbb\xa9\xe2\xb4\x0b\xdf\xdc\x9e\xda\xa2\x1b\x06\x89\x16\xf5\x43\x63\x3c\xa6\x16\x1d\x0d\xfb\xfb\x95\x25\xce\xe1\x3a\x4c\x05\xc7\xac\x8e\x63\x12\x80\x51\x21\xb2\x1e\xb0\x21\x3d\x92\x20\x05\x33\xb4\x69\x37\x37\x42\x20\x27\x62\xf3\x5d\x55\x3b\x0f\x2f\x6c\xb2\x41\xc3\x8d\xe4\x33\xc3\xbf\xcb\x68\xa7\x1e\xf6\x09\x94\x7d\xee\x3f\x10\x85\x43\x20\x88\x99\xb8\xa9\x66\xfb\x05\x8b\x1f\x66\x05\xd2\x18\xeb\x22\xc2\xc3\xef\xfd\x5f\x41\xd3\x9d\xb4\x56\xdb\x7a\x66\x28\x90\x87\x42\x16\x6b\x6c\x25\x88\x71\x75\x13\x71\x64\x9e\x02\x55\x54\x58\xa4\xb7\x12\xb6\x6d\x73\x0c\xa3\x68\x0e\xdd\x92\x11\x93\x60\x35\x21\xdc\xaf\x42\x98\x6e\xd7\xa7\x3e\x14\xa2\x5f\x2b\xe6\xbf\x38\xa7\x65\xb6\x3a\x11\x3f\x8b\x3f\x27\x1a\xe9\xbc\xe0\xcc\x02\x19\x60\xc5\xc2\xd6\x90\x27\xa8\x2e\x26\x46\x1f\x0c\x90\xd4\x87\x2b\x76\x5a\x64\xb1\x34\xff\xd1\x3c\xc9\xfd\x32\x4a\xeb\xe4\x8e\xe6\x2c\x58\x1c\x29\x8b\x2b\x5e\x27\x55\x1c\xa4\x5f\x19\xe1\x81\xc1\xe3\xa2\x02\xf4\x29\x53\xab\xe1\x96\x65\x51\xef\x7b\x70\xe3\xf5\x80\x51\x0b\x81\x8a\xa9\x64\x89\x8c\xff\xff\xde\xe9\x51\xd2\x11\xf3\x16\xf2\xb6\x6d\xa5\x35\x1f\xe1\x13\xf1\x64\xe9\x35\x0c\xcb\xb1\xe1\x61\x80\x11\xfe\xda\x5a\xd2\x87\x5d\xe0\x1e\xf2\xf0\x95\x40\x11\x49\xa6\xd2\xc5\x13\xc4\x8a\x13\x12\x13\xe0\x30\x25\x53\x19\x3c\x0f\xa6\x24\xe8\xb0\x22\xec\xb0\xae\x37\x86\x0d\x60\x24\xdc\xc0\xd8\xe4\x8d\x0f\x05\x43\xf4\x79\x5a\x58\xe6\x81\x0a\xe3\x43\x9c\xc6\x15\x3b\xcc\x6a\xb6\xfa\xf2\x58\x08\x5d\xaf\x57\xa7\xad\xaa\x58\xa2\x1e\xee\x7f\x82\x3b\x1e\x99\xca\x0c\x9a\x5e\x75\x4d\x0f\x3e\xd0\xc3\x1d\xde\xbb\xad\x28\xc0\x6a\x06\x5f\xe3\xaa\x2c\xcd\xf3\xbc\xea\x5d\xb9\x0c\x38\xa9\x5c\x86\xfe\x55\xf7\x72\xc6\xe2\x42\x20\x93\x0d\xb8\x4b\xf1\x9a\x37\x2c\xf6\x5e\x4e\x70\xec\xd0\xb0\x23\x57\x3c\x4f\x8a\x58\xf3\xdc\x8f\x2b\x27\x41\x35\xc5\x1f\x72\xc4\xd7\x19\xd6\x0a\x2b\x6b\x61\xc6\x5c\x6d\xde\xc8\x72\x8b\x38\x5d\xf9\x66\xf3\x55\xbe\xfe\xc4\x4f\x14\xb5\x9b\x6a\xb3\x2f\x65\x14\xb9\xbf\x0f\x64\x30\xf2\x61\x55\xeb\x26\xe9\x5f\x8a\xbb\x98\xca\xd9\x6f\x3f\xed\x65\x7d\x2f\xe4\xec\x91\xb8\xe3\xc4\xd5\xad\x2b\x62\x23\xb7\x85\x92\x51\x84\x7f\x67\xf9\xcd\xc6\xfd\xa6\x04\x51\x3d\x08\x0f\x31\xa8\xba\x49\x73\x64\xec\xc8\xe8\x2d\xf4\x30\x5b\xfd\xdb\xff\x09\x00\x00\xff\xff\x31\xab\x8d\xd1\xac\x6b\x01\x00") +var _pagesAssetsJsJquery300MinJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\xfd\x7b\x97\xa3\x36\xb6\x30\x0e\xff\xff\x7c\x8a\x32\xd3\x87\xa0\xb2\x4c\xd9\x9d\x64\x9e\x13\x5c\x6a\x56\x27\x9d\x9e\xc9\x4c\x6e\x93\xee\x5c\x66\x30\x9d\x25\x40\x60\x5c\x18\x5c\x80\xab\xaa\x63\x98\xcf\xfe\x2e\x6d\x49\x20\x30\xee\x64\xce\x79\x9f\xb5\x7e\xab\x57\x97\x01\xdd\xa5\xad\xad\xbd\xb7\xf6\xe5\xe6\x7a\x76\xb5\xfb\xc7\x91\x95\xef\xaf\x1e\x3e\xb6\x9f\xdb\xab\xab\xe6\xca\x0a\xd1\xd5\xdf\xde\x5c\xbd\x2e\x8e\x79\x44\xeb\xb4\xc8\xaf\x68\x1e\x5d\x15\xf5\x96\x95\x57\x61\x91\xd7\x65\x1a\x1c\xeb\xa2\xac\xae\x9a\xab\xdd\x3d\x2f\x6a\x17\x65\x72\x93\xa5\x21\xcb\x2b\x76\x75\x7d\xf3\x7f\x66\xf1\x31\x0f\x79\x41\x8b\xe2\x00\x9d\x8c\x63\xc5\xae\xaa\xba\x4c\xc3\xda\x58\x1b\x45\xb0\x63\x61\x6d\x10\x52\xbf\x3f\xb0\x22\xbe\xda\x17\xd1\x31\x63\xa6\x79\x21\xc1\x66\x4f\x87\xa2\xac\x2b\x77\xf8\x4a\xa8\x1d\x15\xe1\x71\xcf\xf2\xda\x0d\x2c\x8a\x67\x4b\xe4\xf4\xad\xa2\x53\x1a\x5b\xb3\x3e\x0b\xaa\xb7\x65\xf1\x78\x95\xb3\xc7\xab\x2f\xcb\xb2\x28\x2d\x43\x8e\xb9\x64\xf7\xc7\xb4\x64\xd5\x15\xbd\x7a\x4c\xf3\xa8\x78\xbc\x7a\x4c\xeb\xed\x15\xbd\x52\x25\x0d\xb4\x2e\x59\x7d\x2c\xf3\xab\xc0\xa2\xa8\x75\xe0\xaf\x65\x1c\xf3\x88\xc5\x69\xce\x22\x63\xa6\xba\x2b\xca\xbb\xe2\xc7\xa9\xb7\x69\x85\x3f\x30\x0d\x0f\xb4\xbc\x0a\x89\xe7\xe3\x48\x1b\x09\x66\xe4\x3b\x98\x04\x3b\x61\xf5\xf7\x65\x51\x17\xbc\xee\xef\x62\x1c\x93\xd0\xae\xf8\x04\xe3\x84\x84\x76\x58\xe4\x21\xad\xf1\x96\x84\xf6\xe1\x58\x6d\x71\x4a\x42\x3b\xcd\x23\xf6\xf4\x5d\x8c\x77\xe4\xd4\xe2\x3b\xb2\xb3\xeb\xe2\x4d\x5d\xa6\x79\x82\x33\xb2\xb3\xb7\xb4\xfa\xee\x31\xff\xbe\x2c\x0e\xac\xac\xdf\xe3\x3d\xc9\xfa\xf4\x9c\xec\xed\x90\x66\x99\x25\x9a\x46\xb8\x20\xa7\x76\xad\xba\x7e\x75\x10\x9d\x0f\x48\xd0\x34\x91\xec\x76\x60\x87\x25\xa3\x35\xfb\x32\x63\xbc\xdb\x96\x51\x85\x65\x7a\xe0\x73\x15\xda\x35\x7b\xaa\x09\xc5\x81\xbd\x65\x34\xb2\xe9\xe1\xc0\xf2\xe8\x8b\x6d\x9a\x45\x56\x88\xec\x03\x2d\x59\x5e\x7f\x5b\x44\xcc\x2e\xd9\xbe\x78\x60\x2a\xa5\xe5\x15\xdf\x13\x03\x40\xd0\xc0\x25\x19\x4e\x9d\x5c\x02\xbe\x7e\xa5\x1d\xe7\x76\x9a\xa7\x35\xa4\xb4\xb8\x22\x37\xef\xbc\x4d\xb5\x39\xbe\xfe\xf2\xf5\xeb\xcd\xd3\xcb\xa5\x3f\x6f\x46\xef\xcf\x6e\x12\x5c\x93\x9b\x77\x8b\x7d\xb5\xb8\xc1\x47\x72\xb3\xb0\x3c\xba\xf8\xcd\x47\x37\x09\x7e\x98\x6e\x29\xb0\xeb\xe2\xc7\xc3\x81\x95\x5f\xd0\x8a\x59\xa8\x5d\xf3\x66\x49\x69\x1f\xd4\xa2\x90\x93\x00\x7d\xe7\x1e\x87\x45\x5e\xd5\xe5\x31\xac\x8b\xd2\x29\x71\xc6\xf2\xa4\xde\x3a\x4b\x5c\x17\x2f\xcb\x92\xbe\xef\xa1\xb2\xab\x3c\x16\xf3\xcd\x41\x04\xb5\x38\x61\xf5\x00\x72\xd5\x58\x8f\x59\x46\x08\x75\xf5\xcc\x0e\xbd\x5d\xba\xfc\xc9\xa3\x73\xfe\x63\x8b\xc6\x7c\x47\x7c\xf3\x5b\xcc\xe1\xe1\x4d\x4d\xc3\xbb\x41\x95\x7c\x72\x03\x52\xda\x7b\x56\x26\x0c\xaa\xb2\xb5\x4e\x5b\x08\xd3\x1e\xca\xed\x43\xc9\x1e\x04\x28\x10\x00\xe2\xa0\xc5\x8c\x86\xdb\xa9\x3e\x96\x36\x4f\x81\x0a\x31\x45\x2d\xde\xd3\xc3\x54\x36\x68\xb0\xeb\x99\x55\xda\x7b\x7a\xb0\x86\x1b\x24\xc0\x61\x97\x9d\x8a\x11\x07\x38\xe4\x0b\x8c\xf8\x1a\x73\xd0\x9f\x98\xc8\x51\xc5\x31\x07\xb7\xec\xbd\xec\x4f\x99\xc0\x9e\xaa\x78\x05\x71\x5a\x56\xf5\xa5\x0a\xd8\xbd\xb5\x44\x2d\xce\xe8\x07\xb3\x2c\x56\xa8\xc5\xec\x7e\x62\x5e\xb5\x95\xc0\x21\x99\xd3\xb9\xc5\x97\x29\x70\x96\xdd\xa4\x8e\xfa\x19\xbe\x20\x4b\xd3\x0c\x6f\x03\xd7\x83\x85\x0b\x7d\xdf\xf1\x7c\x5e\x7d\x1e\x5d\x1c\x65\xb7\x2a\x4d\x73\xbe\x80\x62\xe1\x9d\x2d\xae\x8a\xb2\x76\x42\x9b\xff\xe0\xea\x00\xd3\x16\xda\xe2\xa1\xc5\xa5\xcd\x9e\x6a\x96\x47\x04\xf6\x90\x7c\xd6\xda\xe3\xc3\xa1\x98\xcf\x7b\x84\x19\x8e\x71\x42\xba\x49\xf4\x96\x7e\xd3\x9c\x5a\xbc\x25\x2b\x9c\xf6\x9f\xd5\xb0\x77\x64\xb6\x5a\xc7\x1c\xaf\x06\x45\x91\x31\x9a\xf7\x58\x3c\x31\x4d\x6b\x47\x92\x41\x65\x5b\x59\xd9\x7c\x8e\xf0\x19\xda\x4f\x9a\xa6\xb4\xd3\xea\xb5\xea\x57\x82\x9a\xc6\x4a\xc8\xa9\x45\x78\x4b\x08\x49\x4d\xd3\x4a\x04\x64\x6e\x17\x0b\xb4\xde\xde\xa6\x6b\x5e\x51\x1a\x5b\x7c\xcb\xcc\x88\x45\x07\x2d\x21\xc4\xfb\x15\x5c\xa5\xf9\x15\x45\x21\x49\xbc\x00\x70\x2e\xff\x49\x66\x84\x44\xbc\x7b\xa6\xc9\x7f\x78\xab\xdf\x67\x34\xcd\xc5\x3c\x5b\x11\x6f\x98\x11\xd8\xc5\x76\x5a\xc1\xaf\x15\x21\x84\x5c\x8b\xb9\x16\x23\xb3\x15\xc7\xcb\xa6\x39\xcc\x10\x22\x37\xe4\xab\xe9\x40\xda\xb8\x4e\x48\x3d\xb5\x98\x77\x83\xa8\xf5\xb0\x76\x38\xc6\x11\x42\xce\x43\x91\x46\x57\x4b\xd9\x2b\xc8\x12\xa1\x0e\x88\x92\x7e\x01\xad\x13\x7b\x3a\xd0\x3c\x2a\x1c\x79\x8e\x19\x73\xeb\x7e\xfe\x0d\xad\xb7\x76\xc9\x3f\xef\x2d\x84\xec\x92\x1d\x32\x1a\x32\xeb\x66\xf3\xea\x26\xc1\x86\x81\x70\x5a\xfd\xc0\x68\xf4\xde\x99\x2d\x31\xe3\xa7\xe0\x00\x96\xc7\x27\x24\xdf\xcf\x79\x51\x1c\x74\x80\x6c\x71\xbf\x2e\x13\x1b\xdd\x50\x9f\x0c\x42\x48\x69\xf3\xf5\x84\x6a\xd2\xea\x67\x71\x28\x5e\x40\x73\x33\x42\x4d\x93\x12\x42\xa8\x2d\x4e\x4f\x5e\xe4\xdb\xe3\x9e\x95\x69\x38\x89\xc7\x64\xcd\x72\x66\x2c\x23\x3f\xee\x03\x56\xf2\x56\x83\xa6\x31\x2a\x38\xd7\xe0\x0d\x99\xe6\x2c\xad\xbe\xa5\xdf\x5a\x74\x71\xa0\x65\xc5\x5e\x67\x05\xad\x2d\x8a\xa0\x57\xda\xc2\x9c\x37\x83\x43\x59\xfd\xcc\x9a\xd1\xa6\x31\x3c\x01\xa7\x57\x22\xbf\x6f\xcc\x08\xb9\x13\x88\x8a\x22\x64\x9a\xd6\xcc\x0a\x08\xef\x14\x07\x9a\x90\x64\x0a\x87\x19\xda\x3e\x35\x90\x69\x06\xfa\xc6\xc5\xfa\x8c\x49\xf0\x0f\x4d\x53\x9e\xc7\x21\x22\x84\xe4\xa2\xab\x5f\xee\x0f\xf5\xfb\x4b\x5d\x5d\x6b\x10\x2e\xfb\xbc\x52\x9d\x5f\xb6\x98\x57\xfc\xa1\x13\x86\xce\x0d\xc3\x39\xdb\x86\x7c\xcc\xe7\xbd\xa3\xee\xce\xeb\x86\xed\x37\x8d\x2a\xe6\xa8\xf4\x16\x27\x59\x11\xd0\xec\xcb\x07\x9a\x0d\x1a\x3d\x00\x28\x84\x74\xcf\x32\x7e\xa6\x4e\x75\x88\x76\x30\x5b\x63\x63\x5f\x2d\x8c\x1e\x88\x8f\xf8\x01\x9d\x9d\x43\xfc\xbc\x06\x82\x04\x47\x64\xb9\x4e\x63\xeb\x91\xcf\xff\x89\xcf\x46\x48\xa8\xc4\x4d\xeb\xe8\x36\x5c\x47\x02\x41\x04\xb2\xe7\x5e\xe4\xe3\x08\xf3\x1f\x3e\xc5\xb3\x15\x0a\x4a\x46\xef\x5a\x96\x55\xec\x8a\x97\x8e\xc4\x5c\xfe\x6e\x09\xb5\x35\x69\x8b\xeb\x32\xdd\x7f\x68\x92\x0d\xc3\xb1\xf8\x44\xf7\x43\xaa\xf8\x9e\xe4\x67\xe6\x1d\x1b\x11\x0a\xfd\xc0\x38\x3c\x7b\xfe\x7a\xbc\x57\xac\x47\x49\xac\xf1\xf1\xba\xea\x5c\x0f\x71\x0f\xfa\xdd\x72\x79\xd4\x77\x28\x72\xb6\x12\xa4\x30\x45\x08\x87\x2d\x4e\xf3\xf3\x36\xb5\x23\x58\xf4\x3a\x70\x17\x2b\x27\x55\x90\x4c\x71\xc8\xbb\xcb\x9b\x1a\x75\x95\x4f\x99\xe8\xee\x3c\x50\x27\x42\x44\x96\x98\x9d\x2f\x02\xf5\xd8\x7c\xee\x93\xc0\x8b\xba\x51\xa9\x3c\x84\x61\x0e\x3d\x25\x3b\x9c\xf5\x4a\x35\x10\x61\xc6\x49\xe6\x98\x2c\xf9\x71\xa2\x9a\xda\x92\x59\xb8\x8e\x6f\x93\x75\x3c\x9f\xa3\x88\xcc\x02\x8b\x7a\xb1\x8f\x63\x84\xa3\x19\x21\x5b\xd3\x64\x70\xf6\xc2\xd7\x0e\x9b\xb2\x31\xb5\x22\x5a\x92\xad\x40\x13\x5b\xe2\xf9\x1d\x54\x01\x58\xf4\xe3\x89\x6f\x23\x68\x8e\x11\xd5\x1a\x0e\x11\x16\x0b\xc4\x4c\x73\x2b\x5a\x64\x68\xdd\xc1\x54\x2c\x60\xea\x77\x0b\x28\x6c\x2f\x69\x19\xcf\xc7\x5b\x4e\x24\x1e\xd3\xc8\x59\xe1\x43\x59\x3c\x4d\x02\x0a\x3f\xb1\x79\x5f\xcf\x20\x20\x30\x4d\xbe\x19\xf8\x71\x17\x10\x8a\x29\x09\x11\x1e\x1c\xaf\x14\x49\x9c\x71\x15\x11\x49\x6b\x76\xc7\x27\x7e\x8e\x30\x23\xe7\x14\x09\x95\x9d\x0b\x04\x2d\x82\x23\xc9\x92\x58\xe3\x0a\x10\xc7\x5e\xcc\xe6\xbd\x27\x54\xff\xe1\x67\x3c\xff\x9d\xcf\x31\xe3\xe7\xcc\xa3\xf3\x8a\xd6\xcc\xce\x8b\x47\x5c\x1d\x0f\x9c\xc3\x73\x8a\x16\x4d\xe1\xc7\x37\xef\xf7\x41\x91\xc1\x71\x1d\xe7\x9e\x78\xb3\xd3\x9a\x95\xb4\x2e\x4a\x9f\x84\x67\x9f\xf8\x78\x81\x4c\x35\x3e\x17\xb4\xc9\xd5\xb7\x70\x5c\x5c\x09\xfe\xe7\x4a\xcd\xc4\x15\xec\x88\x2b\xde\x8f\xab\x1f\x58\xf2\xe5\xd3\x41\xe2\x7a\x71\x20\xca\x86\x0d\xa0\xa6\x6a\xcb\xb8\x32\xd0\x88\xbf\xdb\x79\xdd\x21\x61\xcc\x83\xb9\xe1\x1b\x3e\xe1\x7c\xc4\xd7\xc5\x63\xc7\x47\xa0\x9e\xb1\x7a\xec\x4f\xb4\xd9\x8c\x9a\xa6\x21\x40\xcb\xe0\x60\x62\x9a\xb4\xa7\x28\xc7\xe7\x5d\x3f\x29\x33\xc2\x69\x8c\x19\x5f\x4f\x71\xc2\x5a\x94\x1f\x45\x06\xe5\x23\xe1\x07\x60\xd8\x34\x4b\x79\x2a\x76\x67\x64\x0f\x18\xc1\x8b\xa5\x69\x06\x8b\x95\x80\x4c\x60\xc1\x9e\xc8\xc4\x41\xa8\xc8\x41\xbc\xc5\x29\xde\xe1\x3b\x9c\xe1\x3d\xce\x71\x81\x0f\xf8\x1e\x97\xb8\xc2\x35\x3e\x12\xa3\x4a\x7f\xfb\x2d\x63\xc6\x7c\x75\xcd\x89\x08\x3e\x8d\xf8\x41\xe7\x6d\x1f\xc9\x12\x3f\x91\x25\x7e\x4f\xb6\xd4\x42\xf8\x37\xf1\xf3\x52\xfc\x7c\x3e\xcd\x86\x71\xa2\x80\x43\x70\x46\x66\x4b\x84\x97\x2d\xfe\x82\x9c\xda\x31\x37\xfb\x8a\xe3\x84\x2f\xc9\x2b\xfb\x50\x1c\xf0\x6b\xfe\xcb\x99\xe2\xbf\xa8\x87\xbf\x92\x57\x92\x77\xfe\x8a\x5c\xc2\x5b\x4b\xac\xed\xed\xf0\x36\x5a\x87\xe2\xc0\xa0\x5e\xe8\x03\x1d\x21\xfb\xa3\x68\x82\xc5\xaa\xc5\x7f\x23\x46\xb8\x65\xe1\x1d\x8b\x9a\x8a\x65\x2c\xac\x59\xd4\xd0\xea\x7d\x1e\x36\xf4\x58\x17\x71\x11\x1e\x2b\x78\x3a\x64\xf4\x7d\x03\x62\x93\x22\xab\x9a\x88\xc5\xac\x6c\xa2\xb4\xa2\x41\xc6\xa2\x66\x9b\x46\x11\xcb\x9b\xb4\xda\xd3\x43\x93\x15\xc5\xa1\xd9\x1f\xb3\x3a\x3d\x64\xac\x29\x0e\x2c\x6f\x4a\x46\xa3\x22\xcf\xde\x37\x52\x50\x11\x35\x55\x58\x1c\x58\x64\xe0\xbf\x13\xc3\xdb\x6c\x9e\x9e\x2f\x37\x9b\x7a\xb3\x29\x37\x9b\x7c\xb3\x89\x7d\x03\x7f\x4d\x0c\xcb\x75\x36\x9b\xcd\xc6\x6e\xbc\xcd\xe6\x71\xe1\x37\xde\xbb\xcd\x72\xb1\xd9\x3c\xd1\xa5\x8f\xe6\x06\xfe\x86\x18\x9b\x8d\x67\xcc\xff\x3e\x37\xae\x2d\x63\xfe\xf5\xdc\x40\x96\xeb\xc8\x77\xef\xfa\xdd\xb3\x66\xf6\x6f\xdf\x25\x48\x7e\x71\x9d\x8f\xac\xbe\xc6\x77\xfc\xf7\x23\x1f\x5d\xa3\x8f\x9a\x8d\x31\x4e\xd8\x18\x3c\x65\x63\x34\xb2\x5e\xd4\xc8\x5a\x36\x1b\xdf\xc0\xdf\x12\xc3\xe9\x1b\xdc\x6c\x2c\xcb\xfa\xcf\xab\x46\xcd\x38\xc5\x42\xde\x66\xe3\xfb\x8d\x31\xff\x66\x6e\xa0\x6b\xd4\xd8\xd7\x68\xb3\xe1\x4d\xe3\xef\x08\x87\x45\xb1\x9b\xad\xbf\xcf\x8d\xb9\x81\x8d\xc4\x40\xf8\x7b\xfd\xbb\xf1\x0e\xfa\x38\x87\x8a\xdf\xc9\x4a\x7d\xa4\x5a\x41\xd7\x62\x0c\xf3\x67\xb2\xf0\x3f\x26\x0a\x5f\x63\xf1\x63\x20\xfc\xc3\x54\xb2\xe5\xbd\x98\xff\x9b\x77\xf1\xef\x73\x03\x75\x59\xdf\x0c\xb2\x12\x95\xf5\xdd\x66\xe3\x7f\xb4\x31\xfc\x6b\x57\x9f\x3d\x68\xfb\xad\x5e\xe2\x5b\x84\x7f\x1c\x37\xf6\xf5\xdc\x78\x66\x20\xfc\x13\x39\x7d\xf5\xca\x19\xa4\xfd\x49\x4e\xbd\x81\xf0\x17\x5f\xbf\x7c\xf3\x66\x98\xba\xd9\xd8\x7d\xfa\xdb\x97\x7f\x19\xa6\x8a\xa4\xc6\xbb\xf6\x79\xf2\xcb\xb7\x6f\x7f\x70\x46\xed\x7e\x83\xf0\xf7\x6f\xbe\xfc\xf1\xd5\x77\xe3\x84\x6f\x11\xfe\xe2\xaf\x5f\x7d\x3d\xea\x8c\x63\x01\x54\x03\x5f\xde\x70\xce\xbb\xc9\xeb\x2d\xff\xbf\xe0\x2f\x68\x61\x85\xdb\x34\x8b\x9a\x22\x5e\x70\x74\x25\xc1\x45\xce\x0f\x7b\x60\x79\x53\x44\x51\x63\x59\xde\x7c\xe1\x37\xc8\xda\x6c\xa2\x6b\x94\x37\x3d\xc4\xca\x04\xf9\xbe\xd9\x44\x73\xd4\xa0\x6e\x32\x01\x34\x8c\xd4\x40\x98\x73\xab\xa3\x91\xf2\x9d\xf0\xb7\xb9\x81\x9e\xc9\x2c\x39\x63\x51\xf5\x45\x91\xd7\xec\xa9\x1e\x8f\x8d\x57\x27\x16\xd6\xe9\x7b\xc5\xee\x9b\xa4\x6e\x32\x31\xa2\x7e\x80\xc3\x31\x58\xae\xb3\xd8\x6c\x22\xe4\x42\xd7\xb5\x8e\x59\x2e\xf1\xde\x2d\xfc\xe6\x99\xec\x62\x8b\x7f\x26\x37\xbc\x57\x69\x7e\x38\xd6\x12\xd3\x34\xbc\x33\xb4\x64\xb4\x09\x8e\x75\x5d\xe4\xe8\xd9\x4d\x8a\x7f\x21\x37\xef\xb6\x9b\x88\x3f\xfe\x93\xdc\xbc\xf3\xde\x9d\xfc\xf9\xe6\xb4\xa9\xae\x37\x5e\x4e\xeb\xf4\x81\x5d\x6d\x1e\x6f\xf0\xbf\x44\x6d\x7f\xb2\x3c\x8e\x1a\xe6\xa8\xb1\x36\x8f\x73\xd4\x6c\x6c\xf5\x01\x3d\xbb\xc1\xcf\xc8\x8d\x37\xff\xb7\x7f\x83\x7f\x1d\x80\x17\x6c\x36\x6f\xb3\x89\xe8\x22\xf6\x4f\x2b\xfc\xe7\x16\x3a\xee\x36\x62\x54\xa8\xb1\xa1\xd3\x1c\x4e\x29\x25\x93\xa4\x15\x31\x96\x4f\xc6\x3c\x58\xfc\xf9\xd3\x4f\x3f\xfe\xb3\xa2\x75\x38\x99\x16\x35\x4d\xe8\x06\x4e\x74\xbb\x74\xc5\xb9\x6c\xc7\x65\xb1\xff\x62\x4b\xcb\x2f\x8a\x88\x59\xd1\x1c\x4a\x20\x67\x32\xf1\xc5\x8b\xd5\xb2\xf9\xf4\xd3\xe7\x9f\xfd\x19\xaf\x96\xcf\x3f\x36\xa3\xe6\xd3\x3f\x7f\xfc\x7c\x89\x5a\x1c\x50\x72\x63\x79\x1c\xfd\x3d\xad\xe2\xcd\xd3\xff\x8d\xfd\xe6\xdd\xc2\xdd\x44\xa8\x79\xb7\x78\x26\x11\xa3\x4c\x59\x6c\x8e\xaf\x5f\xbf\x7e\xcd\x67\xe1\x26\xc1\x21\xbd\x20\x18\x74\x8d\xcd\x92\x1f\xad\xd4\x35\x36\xc7\x38\x8e\x23\xc3\xa1\xe2\x7c\xb1\x96\x78\xb1\x42\x73\x63\xb3\x31\xe6\xd4\x0e\x65\xef\x5e\xd6\x96\x3a\x59\x16\x2b\xd4\x89\x5d\xad\xd5\x9f\xd1\xdc\xb8\x32\x1c\x91\xbd\xc5\x11\xd5\x09\xad\xbd\x05\xec\x0e\xa9\xa9\x35\xc9\x28\xa9\x03\x84\x33\x23\x4b\x7e\xe4\xc7\x45\xb9\x07\xd2\xa1\x69\x8c\x8c\x06\x2c\x33\xc4\xa1\x8e\x4f\x51\x5a\x3a\x46\x2f\x78\x35\x70\xce\x21\xd9\xc8\x58\xc2\xf2\xc8\x68\xd1\xba\x2e\xdf\x9f\xfe\x22\x29\xba\x57\xe4\xaf\x82\x84\x7b\xb0\x61\x07\xf2\x12\x15\xc2\xc3\xb7\x57\x9e\xfe\xae\x84\x90\x76\x5e\x44\xec\xed\xfb\x03\x6b\x43\x5a\x87\x5b\x2b\xa6\xe8\xf4\x17\x72\x82\x7a\x9d\x57\x32\x97\x3b\x9c\xd4\xd7\xb2\x59\x8a\x65\xb3\x01\x42\xed\x24\x23\x44\x35\xc6\x62\xfd\xb8\x4d\x33\xc6\x0f\x67\xc9\x4b\xcc\xe7\x3e\x5a\x77\x7c\x44\xb8\x58\xb5\x6d\xdb\xd1\x59\x09\x05\x08\x8c\x30\x13\x75\xc5\x78\x2b\x89\x98\x82\x13\x2f\x9c\xbe\x08\xec\xe2\x31\x67\xe5\xab\x9e\x54\x09\xdc\xa0\x1b\x8f\xf3\x19\xa7\xac\x23\x0e\xa2\x9e\xdf\xf1\x58\x33\x8d\x51\x9e\xd1\xa6\x59\xcd\x08\x79\x34\xcd\xcf\xc4\xcf\x0a\x5e\x3b\xca\x9a\x57\x30\x63\xa6\x69\x59\xbc\xe2\x41\x63\x4d\x13\x38\x0f\x68\x46\x48\x6e\x9a\x7b\x2b\x40\x18\x24\xed\x39\x3e\x20\xb8\xc4\x58\xc9\x7a\xad\x8c\xfc\xcb\x66\x4f\x2c\xe4\x04\x3b\x27\x4d\x62\x92\x79\x2b\x1f\xf2\x7c\x46\x78\x5b\x70\xe5\x61\xed\x48\x60\x27\xac\x96\x92\xf9\xcf\xdf\x7f\x15\x59\x31\x42\x83\x8e\xec\xec\x94\xc3\x4d\xdc\x7d\x14\xbc\xc7\x0e\xe1\x48\xb0\xc0\x69\x6c\x55\x20\xb5\xab\x26\xaa\x32\xcd\xda\x0a\xf0\x0e\x99\xe6\xef\xd5\xc3\x3b\x94\x79\xcf\x7d\x95\xae\x60\x2c\xc2\x7a\x17\xab\xcf\xdf\xbf\xa5\xc9\xb7\x74\x0f\xc2\x13\x0c\x3d\x84\xc1\x7d\xec\x23\xd3\x0c\x87\x39\xbf\xc8\x68\x55\xf1\xbc\x7c\xcd\xa6\x53\x7e\xb7\xb5\x2e\x27\x1f\x0d\x8e\xda\x34\xb6\x42\xfb\xbe\xa2\xa6\x39\x7b\xe9\x51\xbe\x27\x7d\xd3\xb4\x66\xf7\x4d\x33\xbb\xb7\x6b\x56\x01\x93\x2d\xd6\x02\xd6\xb4\x22\x01\x2e\x09\x5d\xab\xa9\x52\x22\x90\x19\x21\x02\x66\x78\xdd\x43\xb2\x1f\x9d\xac\x3b\xb1\x2e\x2f\x6b\x71\x7b\xc6\x2c\x23\x8d\x0c\x84\xdc\x3b\x72\xd7\x49\x04\x02\x8a\x43\x8a\x9c\xc0\xae\xc6\x19\xf1\x1d\x39\x22\x5c\x90\xc4\xa2\x08\x6f\x49\xa1\x48\x55\xb1\x11\xb6\x8b\x05\x2a\xbc\xad\x4f\x8c\x3f\x19\xf3\x3b\x3e\x82\x79\x45\x2d\xfe\x05\xad\x4b\x52\xd8\xbb\x22\xcd\x2d\x03\x1b\x08\x57\xe4\x99\x1a\x92\x69\xde\x53\x2b\xd0\x6e\x64\x50\xd3\x04\x7c\x32\x4a\xc4\x11\xc2\xd9\x2c\x56\x36\xdc\x77\xbc\x81\x03\xa8\x28\x5f\x66\x99\x55\xc2\xfc\x89\xdd\xfe\x84\x4e\x6d\x9c\xe6\x34\xcb\xde\x9f\xee\x08\x21\x47\xbe\x42\xe2\x8e\x67\x34\xe6\xb6\x6d\x65\xe5\xa9\xd5\x4b\x7c\xbe\xc7\xc6\xb3\x15\x3f\x91\x61\xa3\xf6\xbb\x97\x73\x07\x42\x30\xcd\xb9\xf1\xee\x73\x60\x85\x7c\x3f\x77\xf8\x10\x20\x2f\xe4\x43\x47\x2f\x22\x3b\xa4\xe1\x96\x7d\x0d\x53\x64\x9a\x11\xcb\x58\xcd\xae\x02\x8f\xda\xd5\x36\x8d\x6b\x0b\xf9\x38\xf0\x20\xaf\x4f\x98\xea\x4b\xd0\x37\x99\x52\x1d\xd5\x7a\x47\x9f\xcc\x96\x98\xf6\xe9\x3b\xda\x73\x6e\xf9\xf8\x26\x2c\x4e\x59\x16\x55\xac\x36\x04\x5e\x95\x52\xb8\x19\xb5\x02\x24\x67\xaa\x93\xb2\xcc\x56\xdd\x8c\xe9\xeb\xc0\x27\xee\xc2\x3d\x19\x60\x87\xfc\x98\x65\x1a\x7a\xbb\xa3\x43\x34\x29\xd9\xd3\xc6\xe0\xac\x7b\x38\x04\x14\xb6\x58\xa0\xc8\xa6\x75\x5d\xfe\x95\xe6\x51\xc6\xbc\xd0\x63\xbe\x4f\xb4\xb1\x67\x83\xda\x02\xd3\xa4\x38\xe2\x3c\xe6\x0a\xa4\xb3\x0a\x23\x8a\xf7\x40\x7b\xa7\x76\x55\x1c\xcb\x90\x7d\x95\x47\xec\x69\x11\xe8\x6f\x80\x3c\x07\x08\x28\x44\xa2\x3b\x21\x09\x6d\x7e\x18\xbd\x49\x83\x2c\xcd\x13\x8e\xd5\x42\x8d\xdb\x5a\xac\x3a\x11\x91\xbb\x72\x16\xab\xbe\x97\x7b\x7d\x85\xfa\x3b\xa4\xae\xdb\x17\xb6\xa1\xe2\x9f\x81\x94\x02\xf6\x98\xcf\x35\xdc\xe6\x11\x42\xb5\x39\xcd\xff\x57\xf5\x5b\x5a\x03\x4d\x63\x08\x12\x0d\xde\xd0\x85\xf6\x8a\x4b\xed\x29\x86\x5f\x1e\xee\xfc\xf0\x18\x02\x8a\x4e\x07\xac\xdc\xfe\xe4\x0f\xf4\x67\xad\xcc\xa0\x02\xbd\x34\x75\x82\xf1\x6b\x5a\xbd\xd2\x3e\x34\x8d\xfe\x65\x46\xc8\x8c\x9a\x26\xe3\x70\x3d\x55\x5a\x6b\x7d\xd4\x4d\x7d\xdc\x07\x7d\xdc\xa9\x46\xea\x68\xf4\x16\x99\x07\x58\x4f\x0a\x71\x24\xd6\x81\xe1\x98\x50\xcb\xf3\xb1\x02\x72\x1c\x20\x9c\x90\x78\x08\xf2\xc9\x62\x81\x42\x8f\x91\xd8\x4b\x7c\x8e\xd7\x39\xc4\x93\x99\x15\xf1\x1f\xfe\x8c\x50\xcb\xff\x75\x5d\xba\x1f\x6c\x7e\xd3\x9c\xba\xe9\xa7\x93\xc7\x97\x69\xd2\x36\x24\x09\xb5\xa5\x00\x8b\x9c\x5a\x1c\xf3\xf7\xb4\xfa\xe5\x9b\xaf\xcf\xe5\x2a\x20\xc8\xa5\x63\x62\x80\xa2\x4e\x62\x22\x5b\x50\x92\xfc\x59\x60\x9a\xc6\x5f\xdf\x7e\xf3\xf5\xf0\xa4\x69\xf1\x1e\x1a\x65\xb5\xaa\x64\x42\x84\xc3\x70\x42\xa8\x7b\xde\x98\xf3\xd0\xc9\x1b\x05\x05\xc2\x49\x89\x44\xdb\xd7\xc9\xb8\x37\xae\x95\x93\x04\x17\x24\x1f\x27\xe0\x03\x99\xc5\x56\x8e\xf0\x83\xa8\xc9\x62\x3c\x0f\x8b\xe9\x31\xab\x7f\x4a\xd9\x23\x32\x4d\xbe\x5d\x0e\x33\x42\x38\x21\xc4\x6c\x1a\x45\x5f\x3e\xb0\xbc\xfe\x3a\xad\x6a\x96\xb3\xd2\x3d\xff\x64\x19\xc7\x3c\x2b\x68\x64\xe0\x88\xe2\xd9\x0a\x39\x8c\x23\x2f\x1a\x6e\x21\x17\xaf\x50\x7b\xb5\x8c\x22\xef\xb3\x23\x84\x43\xc0\x74\x70\xf2\x54\x64\x77\x81\x90\x0e\x15\x35\x40\x8c\xd4\xc0\x33\x3a\x3a\xa4\xbb\x64\x03\xb5\xbc\xc6\xa9\x85\xbf\x58\xb7\xae\xfc\xa0\x0e\x8a\x2f\x8a\xbd\x38\x28\x0c\x84\x64\x73\xe7\x84\x90\x71\x6d\x20\x09\xc6\xe7\xad\x76\xf4\x0b\xf9\xa7\x38\xcb\xf3\x4b\x94\x90\x28\xc9\x09\xb7\x0b\x5d\x2c\x06\x5d\xa4\x88\x13\x74\x47\x3c\x1b\x55\xc8\xeb\x6a\x9a\xa9\xaf\xd6\x71\xdc\x4d\xde\x98\x6b\x45\x76\x9c\x66\x35\x2b\xed\xaf\x5e\x4d\xc1\x7d\x77\xe8\xff\x8a\x69\xaf\x79\x30\x39\x85\xe7\x24\x13\x3f\x20\xda\x16\xf3\x26\xf2\x68\xd8\x00\x3f\xb8\x38\x41\x36\xb1\x65\xc7\x44\xb1\x69\x1e\x7a\x6c\x3e\x22\x72\xfb\x2e\x85\xae\x17\xfa\x8e\xe7\xb7\x2d\x72\xfe\xf7\x83\x12\xcd\x5d\x44\x28\xdd\x38\x05\x6e\x3f\xff\x26\xc6\xdf\xf5\x8d\x13\xc7\x0f\x34\x3b\xb2\xff\x7f\xcf\x88\x10\xf7\x4e\xce\x0b\xe7\x3b\xa0\xc6\x90\xc4\x17\xfa\x87\x07\x1d\x53\x17\x92\x5e\xec\xaf\x19\x09\x26\x40\x88\x22\x8d\xa5\x8b\x09\x13\xdc\xdc\xff\xac\x09\x49\xcc\xc1\x82\xa9\xf9\x78\xfb\xf2\x2f\x64\x7a\xdf\xba\x53\x0c\xfe\xef\x4d\x95\x56\xfc\x22\x17\xe3\x00\x3f\xe1\x06\xe7\x14\x33\x55\x77\xfc\x93\x4c\x2e\x8e\x88\xe7\x63\x46\x96\xa3\xd9\x1f\x54\x0e\xb7\x3c\xd7\x20\x81\x40\x27\x45\x47\xc5\x70\xad\x86\x38\x55\x16\x6a\xd8\x5b\x72\x64\x61\x07\x34\x91\x22\x77\xe3\x0e\x5e\x40\x16\x38\x9e\x21\x8d\xcf\xfa\x8f\x81\x69\x58\xfc\x80\x3a\xc5\xa2\x0b\x5c\x18\x45\x2d\x2e\xf9\xc0\xef\xf9\x1f\xc1\x8b\xf5\xa8\x6d\x3c\x85\x70\xd7\x3e\xc2\x66\xe7\x68\x2c\xcf\x59\xc9\x8f\x4a\x62\xdc\xd2\xab\x34\x22\x1f\x19\xf3\xe3\xdc\xf8\xe8\xc5\xed\x0d\x7d\x71\x2b\x64\x68\xfd\xe7\xc5\xa6\xdc\x6c\x3e\xba\xda\x57\x34\xcb\x8a\xc7\x90\x1e\xea\x63\xc9\xc8\x47\x1f\xbd\xb8\x2d\x0e\x40\x13\x28\xf1\x3e\x7c\xbb\x11\x1f\x5f\xdc\xde\x88\xcf\x2f\x0c\x4c\xcf\x17\xda\xf0\x86\xd5\xbd\x23\x1f\x7d\xe4\x77\x48\xdd\x34\xef\xc5\xca\x18\xde\xf5\xbb\x67\x3e\xe9\x25\xed\x1f\x35\x1b\x63\x03\x32\xd8\xc9\x4a\x55\x4f\xfa\xaa\x9a\x46\x55\xd5\xcb\xf4\x5d\x07\xf6\x46\x23\x04\x99\x97\xea\x4a\xa3\x7f\x13\x31\xfe\xa9\xda\xfe\x4d\x2e\x94\x73\xe4\xa5\xc7\x44\x99\x3e\x69\xb2\x24\xfd\x13\x34\x37\xbf\x9e\x28\x6a\xff\xc9\x9e\x7b\xf3\x7f\xfb\x70\xcc\x8e\x96\x97\x8e\xd6\x73\x5b\xb2\x98\x7c\xf4\xd1\x55\x47\x54\x7e\xa4\x9e\x86\x0b\x3c\x99\x2e\x56\xef\x46\x5b\xbe\xf5\x05\x4e\x4e\x10\xf1\x68\x3d\x66\xc7\x39\xc8\x1b\xd8\x10\x37\x37\x30\x54\x1d\xfa\x02\x34\xca\x9e\x73\xe2\x01\x1b\xaf\x2e\x2d\x03\x4f\x27\xd1\x14\x6c\x40\x49\x21\x63\xee\x2e\x65\x0c\x84\x9f\xcf\x38\x2b\x36\xb1\x30\x2c\x87\x41\x4e\xd4\xd4\x25\x61\xc3\x51\x73\x61\x20\x7c\xb6\x6f\xba\x19\x9b\x2d\x2f\x37\xd3\x57\xf0\x47\xdb\x99\xaa\xe6\x1a\x3b\x4f\x06\xc2\xaa\x24\xb6\xaf\x1d\xbe\xf6\x88\x23\x80\x3d\xe7\x93\x59\xa5\xf2\x2b\x64\x50\x91\x42\x25\x35\x4d\x61\x3f\xb2\xe0\x2e\xad\xbf\x19\xe6\xe5\x09\xfb\xe2\xb7\x89\xaf\xc5\x54\xce\x6a\xf4\x91\x63\x97\x11\xf4\x85\x7c\x56\xc2\x22\xcf\x61\xe3\x41\x7e\x52\xc9\x6b\x6e\x0c\xb7\x38\xfd\x9b\x57\xcd\xf8\x3e\x87\x91\x95\x72\x64\x33\x62\xe0\x6f\x39\x54\xdf\x93\xfb\x6e\xc2\x34\xa9\xfa\xbd\x14\xcf\x34\x9c\x20\x2c\x49\x39\x95\xa7\xd4\xf3\x04\x6a\x3e\x0a\x3b\x2c\xf6\x9c\x9b\x53\x04\xfd\xf7\x45\x95\xf2\x6e\x23\x5c\x93\xa0\x69\xb4\x6c\x79\x4d\xd3\xbc\x42\xee\x94\x64\xf5\xb3\x01\x6b\xef\xd2\x31\x61\xef\x50\x1c\x09\x21\x69\xcf\x39\xae\xb5\xfb\xd9\xa8\x69\x66\xd6\x2c\x12\x12\xd0\xa8\xab\x88\x7f\x0d\xbb\xa6\xdd\xfe\xd1\x8a\x90\x43\x2f\x75\xdd\x34\x57\x7f\x36\x2f\xa6\x82\xf6\xdd\xf8\xe8\x4c\x63\x2b\x90\x02\x85\x80\x0c\x04\x5a\x3c\x45\x23\x10\x66\xcb\x75\x27\x78\xc1\x9f\x93\xc0\x3d\xab\x87\xea\x77\xbd\x19\xdf\x05\xcb\xb5\xb8\xb6\x98\x5d\xec\xd3\x62\x16\x5c\x4a\xea\x4e\x5d\x37\x72\xac\x88\x4c\x31\x7b\x84\x10\xeb\x5c\x20\x8c\xdc\xcb\x53\x10\x20\x67\x85\x57\x26\x9f\x75\xa1\xe7\xf9\x8a\x71\x16\x88\x45\x42\xb7\x6c\xba\x10\x34\x14\xb9\x7c\x7c\x79\xd3\x8c\xfa\x41\x08\x79\x30\xcd\xda\x7a\xc0\x14\xb9\x8b\x95\x13\x88\x5c\xc1\xa5\x5c\x01\x72\x57\xce\x9d\xfb\x95\x75\x87\x29\x5a\xf0\x9f\x00\x39\x4b\xe7\x13\x33\xe2\xa5\x57\x53\x0b\x74\x69\x62\xc3\x4e\x4f\xa8\x5f\x36\x20\x7e\xb4\xd7\x84\x78\xd4\xc7\x5b\xe2\x05\xbe\x10\xa3\x37\xcd\xac\x93\x39\xc3\x88\xba\x4e\xbb\x2b\x87\xf1\x97\x78\xaa\x83\xbc\x30\xd3\xe5\xd5\x52\xd6\xb5\x0e\x09\x5d\xf7\x02\x29\x0d\x7e\x12\xfb\x98\x0b\x91\x61\xc8\x73\x05\xd3\xb9\xb6\x7a\x2e\x29\x74\xf0\x22\x9f\x10\xb2\xf5\x22\x1f\x45\xf3\x79\x0f\x07\x19\x85\x34\x0c\x29\x8e\xcc\xf6\xc0\xbb\xbc\x55\xcf\x2b\x67\xd9\xe2\x1c\x39\x79\x8b\x13\xaa\xf0\xdd\xf4\x5d\x14\xdc\x6c\xe4\xc7\x2c\x13\x7f\x02\xa4\x17\xe9\xb0\xe7\xd9\x62\x4c\xc1\xa1\xba\x7f\xa0\x70\xff\xd0\xf1\x32\x6f\xb0\x41\x3e\x7a\xb6\xe2\xa4\x0b\x3e\xc3\xcb\xa6\x79\x00\x99\x79\xd0\xc9\xcc\xcb\xa6\x99\x95\x02\xeb\x04\x42\x29\x52\x93\xa2\x07\x08\x81\x64\x59\x6c\xa8\x0e\x69\x06\x40\xd1\x46\x4d\x33\x81\x68\x39\xa8\x2a\x6c\x24\xef\x54\xfa\x0f\x1d\xa6\xe9\x64\x8b\x52\xc0\xca\xd0\xa9\xed\x67\x28\xc0\xb9\x98\x1e\x8f\xfa\xea\xc4\x7a\xb1\x84\x99\x52\x18\x69\x72\x76\x7f\x67\x96\x94\xf9\x41\x42\x41\xca\x30\xaa\xe2\xc3\x85\x01\xf4\x19\x19\x48\x62\x47\x8a\x42\x3e\x8e\x09\x33\xcd\x2f\xc4\x2c\xe9\x39\xf1\x28\x27\x72\x19\xdc\x6f\xcd\x0e\x8a\xb7\x50\xe0\xd6\x69\x13\xc7\x6e\xec\xe8\xc2\x90\xa6\x99\x1d\xdc\x11\x6f\x1d\x20\xc7\x8a\xc9\x04\xd3\x09\x0b\x19\xdb\xd5\x81\x85\x69\x9c\xb2\xc8\x8d\x05\xe7\xe5\x80\x1c\x9a\x8f\x9f\x55\x21\x3d\x30\x72\xce\xbf\x8f\x14\x1e\xc5\xf5\x86\x28\x52\x96\x03\xc8\x3c\x57\x3f\x36\xde\xbc\xcf\x6b\xfa\x74\x05\x39\xf1\xd5\x31\x2f\x59\x58\x24\x79\xfa\x1b\x8b\xae\xd8\xd3\xa1\x64\x55\x95\x16\xb9\x73\x65\xcc\x65\x95\xc7\x3c\xbd\x3f\xb2\x37\x45\x39\x25\xff\xd2\x18\x2b\xc0\x03\x19\x99\x85\x76\xc4\x6a\x16\xd6\xaf\x8e\x87\x2c\x0d\x69\xcd\x2a\x7c\x47\x24\x4a\x7d\x53\x73\xd2\x05\xa4\xda\xe2\x62\x97\xd3\x30\x3c\xc1\xfa\x1c\xe1\x4c\xb1\x5d\x01\xa1\x5e\xcc\xd9\x2e\x38\x64\xbc\xd8\x07\xa1\x97\xe4\xb9\x62\x84\x34\xa1\x3b\x95\x3a\xf8\x20\x77\xc4\x2b\xa4\xe0\xf3\x0e\xa4\xf9\x98\xb6\x98\x91\x04\x26\xff\x2d\x7b\x9a\x1a\x40\x48\x0c\x03\x70\x65\xac\x9d\xd5\x3d\x3b\xce\x19\xbf\xb8\x69\x3e\x13\x3f\x2b\x78\x15\xcc\xda\x99\x46\x28\x18\xe8\x80\x3e\x43\x5e\x77\x58\x54\xff\x08\xda\xc4\x94\x50\x1b\x74\x17\x80\x36\x5c\xd3\x35\xff\xa0\xcb\xea\xc3\x39\x68\x3b\x77\x17\x86\x1f\x8b\xa6\x3f\xd1\x11\xac\xe8\xe9\x4f\x1c\x5a\x44\xbe\x7e\xde\x80\xc9\x87\x3a\x82\x5e\xa2\xd1\xe2\x48\x88\x32\x05\x7e\xa9\xc8\x49\xbb\xc8\x71\x3e\x5d\x62\x41\xa1\x7f\x5f\xb1\x63\x54\x38\x29\xc5\x80\x90\x9c\x9f\x70\xbf\x3b\x9c\x53\x8b\x39\x5b\xcb\x7f\x4b\x96\x81\xde\x83\x73\x32\x5e\x18\xce\xf9\x3d\xb8\x30\x0a\x99\x2d\x5b\x6c\x5c\x4d\xa4\xb7\xd8\x98\x77\x9f\x4b\xf6\x90\x16\xc7\x4a\x8e\x7e\x50\xf6\xdf\x97\x32\xb5\x2d\x3e\x94\xec\x35\x08\x8d\x9c\x13\x28\xcd\x4c\xc9\xb8\xbc\x95\x4f\xf8\x9f\xa1\x00\x09\x53\xef\x63\x9f\x58\xfc\x6f\xd3\x50\xef\x13\xf8\xfb\xa9\xdf\x34\xfa\x8e\x12\x39\x39\xaf\x06\x10\xf8\x9c\x43\x20\x94\x33\xf8\xbe\xf0\x3e\xf6\xe1\x42\x0c\xf7\xfa\x09\x9f\xa0\x56\x6a\xe3\x7c\xb0\x27\x03\x04\x83\x8d\xbc\xde\x8a\x06\x56\x7e\x57\xd3\xc7\xc8\x95\x9d\x53\xdb\xd9\xa2\xde\xd2\xe7\xfd\xfe\xc4\x27\x73\x8b\xff\xb8\xbc\xc7\xfc\xf1\xcf\x7e\xd3\xac\x90\xf3\xfc\xda\x32\xd8\x03\xcb\x45\x65\x1f\x83\xfa\x78\x14\xa9\x37\xc4\xcb\x7e\x2a\xca\xfe\x5f\x7f\x4e\xbd\xff\x3e\xcb\xe0\xf0\x1f\xd3\x1c\xb7\xd8\x2a\xd5\xa3\xa9\x7d\x33\xe3\xcd\x9b\x26\x9f\x1d\x05\x68\x3f\xd9\x30\x07\xf2\x26\x94\xd7\xe1\xf2\x6d\xe8\xc0\x80\x5c\x9e\x93\x0c\x67\xdc\x09\x4d\xf3\xad\xc8\x1e\xf2\x63\x2d\x20\x89\x15\xe2\xd9\x12\x89\x97\xce\xfa\xce\x32\x90\xd1\x5d\x47\x2c\x02\xb4\x50\xcf\x08\x16\x66\xc9\xeb\x5d\xf6\x73\x18\xf0\x11\x3f\xf7\x95\x6d\x1f\x7c\xd1\x57\xeb\x63\x84\x5a\x0e\xce\x02\x80\xde\xbe\xfc\xcb\x84\x31\xc4\x48\xee\x38\x7d\xe1\x25\xc4\x45\xee\x99\x22\xef\x6c\x20\x87\xd2\xc4\xae\xea\x72\x81\xa3\xc0\xe9\xbb\x2e\x29\x74\x14\x9a\x64\xe7\xdd\x7a\x2f\x6f\xd2\x3b\xb3\xb2\xa6\xb1\x82\x81\x32\x91\xf5\xae\xd3\x86\xa3\x73\x43\x68\x10\x35\xcf\x90\xc1\xe7\xf4\xbd\x45\xf1\x44\xbf\x02\xb1\x04\x13\x38\x2d\xec\x05\x4e\xda\x4b\xd3\xfc\xbe\xb0\x75\x2c\x68\x95\x22\x7f\x03\xc1\x4e\x6b\x51\x8b\x47\x1b\x77\xa0\x27\xdf\x7d\x56\x77\x52\x44\xd2\x03\x56\xa4\xd9\xd4\x09\x6d\x7a\xe6\x72\x5e\x90\xcf\x9b\x33\xe3\xd3\xc1\xe6\x1c\xa7\x1b\xe2\x93\xcb\x29\xd3\xd0\x51\x39\x5c\x36\x83\xd7\x77\xf2\x35\x34\xcd\x25\x21\x84\x75\x80\x16\x22\xc7\xb8\xee\x13\xf5\x84\x17\x8b\x95\x63\x3c\xd3\xd3\x04\x3c\xf5\xc0\x28\x9a\xfa\xb7\xcc\x62\x71\x64\xc1\x3a\x30\xfa\x8e\x63\x43\x50\x4d\x42\xe3\x4a\x1b\x51\x02\x8e\x39\xb8\xdb\x64\x1d\xa8\xaa\xba\xe7\x2b\xa8\x7d\x6e\x2c\x0c\x00\xde\x31\xb2\x51\xd6\x65\x52\x15\x87\x00\x6e\x01\xb2\xae\x07\x7a\x9c\x10\x23\xa3\x55\xad\x7f\x5f\x7c\x82\xf0\x96\x18\x52\x1f\x10\xba\xa1\x66\x97\x1f\x76\x91\x9c\x1f\xf7\x1c\x6a\x66\x33\x9d\xb9\xd0\xe0\x9d\xf7\x24\x15\xfd\x18\xe8\x34\x93\x78\x46\x48\xe2\x1a\xda\x69\x67\x4c\x9c\x00\xf7\x43\x2e\xa5\x24\x5b\xce\x80\x4d\x6f\x16\x5c\x91\x59\x6a\x9a\xb3\x2d\xae\xc9\x6c\xc5\x8f\xed\x7b\x38\x9d\x63\x45\x4a\x1c\xd0\x69\xdf\xf1\x17\x7b\xb2\xf7\x0e\x20\x02\xdf\xba\xfb\xcb\xdb\xaf\x74\xf8\xc8\xf7\x63\x32\x78\xb6\x5a\x17\xe4\x40\x8c\x22\xcf\x40\x05\x9c\x9a\xe6\xac\x30\xcd\xc1\x70\xda\x6e\xfb\xa7\xb1\x55\x10\x2f\x71\xef\xb5\xd3\xde\xb9\xb7\xf9\xf4\xc3\xb3\x8f\x13\xd3\xac\x78\xef\xee\x71\x46\xf6\xde\xd1\x6f\x1a\x8b\xff\x80\x3d\xde\x1d\xc9\xbc\xbd\xa4\xbc\xbe\x7a\xc5\x93\x06\xef\x90\x67\x47\xee\x3c\xea\x83\xea\x54\x4e\x76\x1c\x07\x82\x3e\xd3\xce\x5b\xf9\xb8\xe6\x14\xf1\xce\x7b\xee\xe3\x3d\x7f\xba\xd7\xb4\xca\xbc\xdc\xef\xa6\x63\x3e\xe7\x84\xb3\x69\xf2\x69\x69\x1a\xab\x26\x39\x59\xa2\xa6\x29\xec\x43\x71\xb0\x40\x19\x6a\x38\x13\xa6\x39\x9f\xd7\xa6\xb9\x07\xa6\xf3\xc4\x9b\x27\xde\x23\xce\x71\xed\xaf\x85\xa1\xcf\x40\xcd\x69\x4f\x82\xff\x37\x43\x43\xb8\x16\xb6\x42\x7f\x7c\x1c\xff\xe1\x8a\xcb\x81\xc2\x30\xfe\x67\x43\x50\x93\x53\xfb\x08\x8b\xf9\x1a\x5a\x36\xd5\x0b\xc2\x60\x18\x51\xd3\xd4\xff\x15\x11\x42\x96\xa6\x59\xdf\x44\x2f\xc8\xb2\x6d\x27\xce\xdd\xfe\x2e\x03\x28\x61\xa0\xd4\x2a\x98\xa4\xc8\xae\x58\x2d\x88\xa1\xca\xa3\x23\x6e\x47\xa3\x22\x8c\x63\x2e\x2f\xd0\x59\x74\x25\x2a\x10\x54\x7e\x67\xb9\xe3\x1d\x7d\x17\x18\x16\xa6\xd8\xb9\x95\x6b\x85\xc4\xa3\x98\x62\xc3\xc0\x81\x8f\xf5\xb6\x46\xd6\x02\x16\x1d\xb3\x4f\xba\x72\x01\xed\x6d\x80\x62\x02\x7c\xd5\x05\x95\x82\x88\x7c\xc5\xcf\x28\x2f\x01\x8a\x27\xf2\xc9\xcc\x0a\xf9\x0f\x7c\x69\xd1\xd4\xa9\xca\xab\x5b\xe2\x90\xa7\x32\x4e\x1a\x8a\xb9\x71\x4e\x79\x51\x3b\xe9\x50\xe6\x28\xce\x50\xcf\xc7\xd2\x43\xc0\xf6\x5c\x51\xaa\xbf\xd2\xe1\xd3\x31\x1c\x03\xc7\x6c\x9d\xb2\x63\x42\x22\x25\x25\x60\xd8\xf3\x39\x1a\xa5\xe7\xea\x63\x56\x4c\x12\x6f\xeb\x0b\x3a\x65\xcb\x87\x13\xf0\x9f\x18\x0d\x07\x83\x19\x8e\xfb\xe3\x18\x08\x1a\x1c\x71\x16\x9b\x57\x0f\x16\x4d\xf0\x11\x5e\x67\xa1\x80\xeb\xb6\x45\x78\x4b\xab\xf1\x18\x2f\x6a\xbb\x48\x85\x4d\x8d\x57\x6f\x11\x56\xac\xfa\x85\x5a\xe8\x98\x0e\xc2\xe7\xf5\x5a\x81\xce\xe5\x80\x3a\x4b\x9e\xb3\x92\x33\x5c\x4d\x03\x7c\x6e\x77\xde\x51\x7e\xde\xf1\x66\x33\x9a\x27\x17\x9a\xfc\x51\x92\x8f\x40\x27\x5c\x82\x5f\x28\x0f\xd0\x8b\xcf\xba\x38\x3a\x22\xce\xd4\x8c\xd6\x51\x71\x05\xd7\xa0\x07\x37\xb0\xa1\xa2\xb1\x06\xe1\xd3\x3e\x73\x78\x02\x6f\x7f\x9c\x26\xbe\x77\x76\x2b\x24\x1c\x35\x17\x0a\x9d\x9e\x25\xdc\x18\x76\xe3\x86\x73\x5b\x82\xc5\x58\x9a\x3a\x56\xfa\x42\xbd\x34\xb5\x45\xb8\xa6\xe5\xc0\x91\x81\xae\xb8\x5b\x84\x54\xc8\x76\xfb\x67\xbe\x2b\xb7\x83\xab\x6c\x71\xce\x03\xf9\x10\xd8\x69\xd4\xe2\xb2\x28\x26\x1d\x23\x50\x42\x48\xd1\x62\x30\xb0\xb9\x94\x9e\xdb\x34\xe4\xac\x9f\x14\x61\x9b\xa6\x35\x83\x26\x5f\x83\x55\x4e\xd3\x3f\x5b\x9c\xdc\x9c\xcd\x38\x56\x00\x99\x35\xb5\xb7\x25\x8b\x9b\xe6\xdf\xd4\xae\x69\x00\xfa\x6b\x60\x97\x0f\x57\x19\x4e\x41\xad\xd9\x0a\x61\x75\xb5\x01\xef\x4b\x84\xe5\xb5\xd7\x24\x75\xfe\x07\x35\xd1\x02\xde\x0b\x6a\x2b\x83\xa2\xc6\x10\xf7\x54\x5a\x92\xba\xfa\x6b\xb1\x7a\x9a\x26\xdd\x75\xfd\x30\xfd\xad\xab\x00\x06\x85\xfb\x0a\x41\x85\xbc\xc5\x6c\x7f\xa8\xdf\x0f\xaa\xfc\x43\x72\x80\x34\xb6\x7a\x81\xc4\xed\x9f\xa7\xec\x8f\x45\x1f\x26\x7a\x3b\xeb\x4e\x08\x1b\x5a\x87\xab\xe0\x2d\xa3\x11\x2b\xa7\xc6\xf6\x8b\xdc\x71\xdd\x9c\xa2\x16\xc3\x04\x4e\x65\xfe\x79\x22\xb3\xd0\xc4\xfb\x5f\x2e\x93\xa6\xcf\xa7\x80\x46\xfb\x14\xb4\x18\x0c\x45\xce\x6d\xb4\xc7\x55\x5d\x6a\xd3\x34\x0d\x5e\x43\x5f\xbf\x69\x5a\x82\x81\xb0\x02\x32\xe6\x55\x80\x18\x46\x9c\x57\x51\x65\xc6\xd2\x41\xe5\xfb\xe2\xa0\x21\x31\x35\x49\xde\xd2\x07\x1c\x37\x4a\xd6\x84\xa0\x5e\xb0\x58\xf1\x3c\xec\x7e\x9c\xa3\x67\x82\xbc\xf0\x76\xe9\x86\xf3\xc0\x09\x21\xe7\x03\xcb\xcf\x6b\xd3\x2c\xed\xd6\xe1\x6d\xb0\x0e\xe7\xe4\x39\xa2\x63\xed\x04\xda\x22\x5c\x44\xd1\x87\x8a\xaf\x7e\xa7\x78\x76\x36\x94\x81\xfd\x30\xe9\xfa\xba\x5e\x2c\x38\x11\xb3\x56\xd5\x44\x83\x6a\x92\x3f\x5c\xcd\x7c\x1e\xdd\x06\xd3\xb5\x80\x4e\x8e\x02\xf0\xbc\xde\x12\x0d\xdc\xef\x3b\x9b\xfd\x53\x49\xa3\xb4\x70\x66\x4b\x81\x46\x82\xe2\x89\x3f\xc7\x69\xc6\xf8\xef\x81\x56\xd5\x63\x51\x46\xfc\x39\xdd\xd3\x84\x7f\x6c\x51\x4f\x59\x05\x3e\xd9\x53\x2b\x40\x7d\x75\xd5\x31\xd8\xa7\x35\xcf\x5f\xb2\x8a\xd5\xe7\xf9\x73\x91\x5f\x29\x3c\x96\xd4\x42\xa7\xb6\xa4\x9a\x6b\x1b\xa5\xe9\x54\xf5\x3d\x1e\x90\x54\xc0\xc7\x97\x14\x27\x9c\xdb\xad\x8b\x3b\x96\xa7\xbf\x31\x32\x49\x04\xea\x66\xa3\xe4\x37\x25\x14\x48\x63\xeb\xae\xd3\x0b\x71\x97\xce\x5d\x27\x67\x5d\x6f\x09\xc5\x29\xa7\x7a\x76\xbc\x71\x25\x3e\x53\x94\x0a\x3a\x85\xa6\x39\xb3\x18\xf9\x87\xb0\x85\xd8\x82\xbb\x05\xbe\x45\xb6\x64\x2b\x2b\x61\xde\xd2\x57\x9c\x6e\xd3\x6c\x11\x4e\xa5\x44\x96\x78\x3e\xe2\x07\xdf\x6c\x85\x2d\x46\x7e\xe8\x6a\x00\x0b\x69\xa6\x94\xc3\x71\x2c\xb2\x9f\x84\x94\x3b\x14\xee\x13\xa0\x52\x8d\x04\xbb\x02\x45\x85\xbe\xd1\x8e\xb7\x16\x2b\x91\x5c\xa5\xf9\x95\x9a\x46\xc4\x3b\xfc\x93\x97\xf8\x5a\x9f\x77\x5e\xe2\x8b\x91\xf0\x27\x8b\x49\xb7\x11\xbf\xd3\x8b\x04\xcb\x2b\x18\x87\x5d\x6a\x3d\x8d\xad\x59\x28\xfd\x19\x74\x33\xbc\x95\xe9\xce\xd6\xed\x65\x67\xc8\xf9\xcd\xa2\x38\x45\xdd\xdc\x6b\x4e\x9f\x2a\xaa\x0e\x00\x81\x20\x97\x78\x60\x81\x63\x18\xeb\xe0\x36\x5c\x07\xf3\x39\x8a\xe6\x60\x5c\x2e\xee\x04\x7a\x35\xa3\xae\xa6\x9a\x0e\xed\xbf\x02\x3b\x4a\x4b\xcc\x38\x1d\xc1\x9e\x6a\x4e\x65\x37\x4d\x84\x13\x12\x9a\xa6\x2e\x7c\x25\x84\xc4\x78\x4b\x9e\xfa\xcb\xb3\x40\x9c\x43\xee\x80\x7f\x67\xbd\x2c\x3e\xf0\x22\x5f\xb1\x82\x81\x76\x0b\x9d\x74\xd2\x68\x59\xa2\x27\x5b\x3e\x24\x0b\xe0\x4c\xd1\x16\x20\x35\x3d\x6f\x63\xa2\x11\xd3\xa4\xb2\x96\xee\xb6\x79\x28\xf3\xbe\xdc\x3f\xb8\x8f\x08\x04\xf3\x16\xe8\xcc\x5b\x30\x62\xde\x82\x11\xf3\xc6\x4c\x93\x91\x0f\x58\x9d\x88\x46\x9b\x26\x58\x2b\x73\x1c\x8b\xb3\xac\x31\x27\xf3\x87\xec\x2a\x21\x64\xab\xe6\x69\xef\x3d\xf7\x09\x67\xca\x61\x9b\x7a\xb1\x4f\xf6\x18\xbe\x9d\x0f\xb0\x27\x00\xbb\xf5\x3e\x0e\xf4\xa7\x7b\x06\x6d\x30\xd5\x9d\xb4\x8c\x9e\xdb\x27\x80\xe3\x36\x8f\xf9\x32\xe3\x04\x41\xe1\x50\x7e\x70\x75\x2d\x3e\xd0\x73\xfc\xbc\x04\x08\xeb\x7c\x4f\x30\xf0\x3d\x21\xf8\x0a\x2f\xf2\x71\xa8\xdd\x39\xf4\xf6\xf8\x54\x17\x4f\xa9\xba\x38\x03\xe5\xf9\x78\x4b\x96\x38\xed\xb7\xc0\x8e\x08\xcf\x0d\x41\xe7\x7e\xc8\x8a\x09\x95\x0c\x14\x47\x51\xa1\x15\x43\x3d\x4d\x63\x25\xea\x42\x08\xef\x40\x89\x82\xbf\x6c\x91\xee\xd7\xa7\xeb\xc2\x93\xd6\x05\x8d\xd1\x8a\x4c\x73\xc6\xd9\x3c\xd3\xb4\x22\xf2\x44\xad\x08\xc1\xda\xcf\x98\xf8\xc6\xf8\x37\x9e\x1f\x0d\x14\xe5\x25\xe6\x1d\x41\xb5\x8f\x73\xfe\xa7\x20\x89\x1a\xcb\x81\xc4\x4d\xf3\x40\xad\xa0\x69\x8c\x6b\x03\x6f\x7b\x55\x10\x6f\xeb\x3b\x5b\xe0\x19\xef\xc9\x8c\x36\xcd\x2c\x36\xcd\xc0\x3d\x38\x8f\xd4\x3a\xe0\x3d\xa6\x50\x3d\x2e\x49\xe8\xb2\xa6\xb1\x62\x97\x3a\x45\xd3\x44\xc8\xf5\x7c\x27\x71\xee\xc1\xd2\xc3\x34\x43\xeb\x1e\x97\x22\x67\x84\x4e\x3b\xf2\x48\xad\x12\xe7\x08\x47\xd6\x0e\xf3\x89\xe5\x09\x77\x64\x37\x04\x84\x3b\xce\x92\x66\x64\xe7\xdd\xc1\x8c\x96\x5e\xee\xdd\xf9\x9c\x2b\xbd\x97\x4f\x19\x42\x6d\x77\xfb\xc5\xe9\x76\xf1\xc0\x1b\xf0\x7c\x7c\xd7\xa9\xd1\x0c\xeb\x2b\x45\x7d\x3b\xb1\x08\xf7\xde\x1d\xaf\x68\xcd\x80\xbe\x12\x3a\x88\x3b\x9c\xa2\xf6\x77\x8a\x5b\x3b\xc2\xdc\xaf\xac\x18\x67\xc8\xd9\xf3\x4f\x2f\x16\x2b\xd3\xb4\x62\x6f\xc7\x7b\x98\xf0\x1f\xde\x3d\xb1\xfd\x4b\x18\x30\x21\x24\x71\x4b\x75\x09\x58\x60\x55\x3f\x72\x4a\x84\x99\x2b\x7b\x90\xe0\x12\xa7\xc8\x51\x36\x57\x09\x2e\x07\xa6\x09\xef\x87\x48\x19\xc3\xf1\xda\xc3\x64\x42\x22\x5b\x5d\x79\x79\x70\xcf\xc0\xcf\x0c\x0e\xba\x49\xd3\x68\x49\xfc\x04\xc6\x29\x49\xdc\x95\xb3\xc4\x77\x97\x2c\x4c\x05\x2d\xbb\xc5\x9c\xbf\xc9\x2e\x64\xfa\xca\x0a\xb0\x60\x95\x65\xc6\x3d\xf1\x74\x51\x4a\xbf\xcf\x67\x09\x87\xdd\xa6\x09\x67\x84\xec\xf8\xa6\xb0\x02\x12\xa2\x1e\xd2\xee\x64\x76\x27\x93\x0f\xbd\x3f\x3a\x21\x4c\x60\xad\xbf\x4e\x6f\xe3\x75\x2a\xfc\x32\x84\xc3\xb1\xa6\x72\xac\x68\x4f\xbc\x9a\x5a\x47\x6a\xed\x11\x0e\x91\xdf\xe1\xbd\xb0\xa3\x69\xb4\xdc\x72\x9a\xc5\x0d\x2c\xff\x2a\x0f\x57\x84\x43\xef\xe8\x8b\x99\x66\x64\x3e\x4f\xd7\xec\x36\x5e\x33\xd1\xf2\xa0\x5d\xa6\xda\x1d\xc8\xcc\x9e\xa8\x95\xbe\x58\x99\xa6\xe8\x06\x3c\xf2\xf3\xb4\x93\x70\xa7\x8b\x15\x52\x0e\x53\xe4\xf9\x6e\x5c\x89\x4b\xae\x74\xf1\x5c\x54\xe9\x1a\xd7\x86\x63\x18\xad\xe6\xc7\x4b\x59\xc8\x85\x38\xbd\x65\xa6\xf9\xbe\xaf\x32\xc5\x8c\x63\x84\xdb\x58\x7c\xed\x84\xe6\xdd\x57\x38\xce\x51\xbb\x57\x14\xb3\xa2\x0c\xa0\x87\x3d\x80\xfd\x36\x34\xfe\xea\x04\x30\x9a\x23\x9f\x17\x4b\x1c\x93\x31\x8a\xc1\x77\xa2\x4c\x86\x0b\x7c\x8f\x4b\xb2\xc4\x15\x31\x96\x06\xae\x49\x6c\x9a\x9e\x8f\x8f\x7c\x67\x3d\x90\x1d\x7e\xe2\xa8\x06\xf4\x92\x95\x6a\xb6\xc5\x51\xce\x1d\xc2\xef\xc9\xe3\x9c\x08\x36\xe7\xc1\x5d\x39\x03\x57\x66\x4d\x63\xaf\xf0\x6f\xe4\xa9\xf3\xbd\x53\x94\xd6\x9d\xf0\x2a\x27\x34\x95\x92\xa6\xb9\x43\xeb\x6a\x46\xc8\x6f\xa6\x29\x9d\xc0\x65\xe4\xc9\xab\x7c\xb4\xae\xe6\x73\x81\x1a\x4c\x33\x43\xa7\x82\x2c\x71\xd2\x34\xd9\x99\x6a\x53\xde\x34\xd6\xde\xca\x38\x51\x35\x3b\x28\x09\xc8\x3d\xa1\x5e\x21\xb5\xd1\xef\xad\x8c\x97\xcc\xf1\x16\xa1\x93\xa4\x28\x33\x24\xe5\xc9\xbc\x37\x8f\xe4\x3d\x6a\x43\xd3\xb4\xac\x8c\xcc\xee\x79\x6b\xa6\x59\x2e\x16\x38\x36\xcd\x5a\x65\x07\x9c\x55\xce\x49\x85\x43\xd3\xe4\xfd\x2d\xa1\x4b\x5d\x73\x81\x68\xee\xde\xaa\xf1\x91\x4f\xad\xa6\x70\x5f\xbe\x58\x4a\x39\x72\xb5\x58\xa0\xda\xab\xfc\xa6\x39\xc2\x5f\x8b\xff\x90\x2f\x85\xa6\x48\x8a\xd0\xfa\xc8\x51\xce\x11\xb5\x0a\x8f\xa4\xf8\x88\xf0\x9d\x69\x72\xf4\x7d\xec\xd6\xd1\x34\xcb\xce\x5b\x13\x07\xd1\x81\x0a\x85\x95\xf6\x2a\x0a\x62\x6c\x78\x47\x1e\x10\xae\xdb\xde\x34\x82\x9f\x35\xc8\x89\x55\xbe\x2d\x49\x84\xd2\x5a\x9a\x4d\xf3\x02\x52\x07\x03\xfc\x37\xbd\xd4\x38\x81\x59\x8c\x4e\xe2\x7a\x30\x01\x0b\xe0\x1e\xf2\x94\xbe\xd5\x62\x81\x62\xf2\x9e\x5a\x81\x17\xfa\x08\xc7\xde\xd1\x77\x3b\x2d\x0b\x87\xa9\xa7\x75\x4c\x5e\x5a\x14\xff\xc6\xcf\x42\x7e\x42\xc6\x9d\x22\x01\xa1\x9a\xda\x7b\xda\xab\x18\x90\x4b\x32\xd3\xce\xcb\x0d\x99\x72\x81\x66\x9a\x14\xe7\x64\xc6\x4c\x33\xb1\x28\xd9\x77\xcd\xf0\x53\x08\x0e\x3d\x12\xc2\x9d\xc0\x0a\x04\x51\x12\xd1\xf3\x35\x4c\x49\x0e\xd2\x51\xed\x0e\x98\x73\x27\x72\x0d\x9e\x9b\xa6\xf1\xd5\x2b\x8e\x07\xac\x1d\x49\xbd\xa5\x8f\x24\xfb\xff\xd9\xc8\xd6\xf2\xc0\x37\x50\x87\x85\x52\xb8\xb0\x07\x2c\x04\xaa\x90\xc4\xea\x0c\x41\xac\x9d\x42\x69\x3a\xf7\x22\xe4\xa3\x01\xe2\x7d\x44\xde\xd2\xc7\x33\xcd\xbd\xcd\x1e\x2e\xb4\x07\x72\x3f\xdc\x23\x94\x54\xb1\x27\x82\xdc\x57\x63\x6b\x63\xf2\x93\xad\xfb\xc9\x50\x36\xc6\xee\xd2\x49\x87\x6b\x19\x2f\x16\xd0\x4d\x3e\xc2\xd8\xc7\xda\x38\xc0\x67\xad\x86\x4c\x39\xb5\x9a\x11\x31\x16\x79\xee\x32\x92\x7d\x68\x48\xd2\xb4\x39\x55\x47\xdf\xb4\x89\xb3\xb4\xe5\x4e\xd5\x61\x1c\xe3\x15\x1f\x22\xeb\xf4\x6e\x2b\x6a\xa5\x08\xcf\xe8\xd8\x90\x9c\xc3\x07\x0e\xd5\x0d\x92\x84\x28\x6b\xdf\x34\x5b\x8b\xe2\x1c\x21\x8b\x81\x16\x16\x0e\xf1\x2c\x68\x9a\x0f\xdb\x59\x83\x0f\x36\x5d\xd7\x88\x1c\x95\xe1\xae\x81\x94\xa6\x91\x54\xfa\x05\x93\xa7\x23\x3e\xd7\x57\x22\xb3\x59\x86\xf7\x16\xc2\x43\x3d\xd0\x0b\xb6\x5e\xab\x0f\xa8\xd5\x7e\xc0\x8a\x79\x42\x1f\xbf\x23\xee\xa7\xd4\xf2\xff\x24\x74\xf0\x0d\x6c\xfc\x49\x88\xb0\x7a\xe9\xe1\x48\x76\xc5\xf3\x73\x2e\xba\x69\xee\xa8\x90\x64\x35\x20\x7b\xdd\xb2\x34\xd9\xd6\xcd\x63\x1a\xd5\x5b\x03\x8f\x65\x30\x82\xbf\x9d\xb6\x0f\x0b\xb0\xd1\xdd\x0e\x0f\x59\x21\x77\xe5\x3c\x17\x06\x7c\xbd\x16\xdc\x99\xa6\xf7\xe4\xb8\x40\x5e\x77\x03\x46\x1e\xda\x48\x86\x2a\xfe\xb0\x19\x0c\x70\xb1\x69\xfc\xce\xa0\x45\xd6\x6e\xd4\xb2\xe4\xe4\x20\x4d\xf3\xf7\x85\x85\xfd\x44\x28\x4b\x4b\x50\xb6\xba\xb4\x64\xd2\x19\xe1\xa8\x4f\xbd\xa6\xbe\xec\xd6\xdf\xce\x3a\x04\x5c\xd5\x7a\x38\xf7\x5e\xe0\x83\xe8\xd8\x1d\xcd\xb5\x03\x7e\xf2\xa6\xf5\x08\x23\x4d\x8f\x30\xd2\xf5\x08\x11\x4e\x68\x0b\x96\x77\xb0\xe1\xc9\x13\xf8\x3a\x3d\x94\xe4\xa9\x57\x07\x93\x9f\x3c\xc3\x31\x84\xef\xd4\x43\xd9\x89\xa7\x4a\x5d\xff\x4f\xbd\x90\x27\xed\x2b\x2e\x85\x6b\xea\x27\xa5\x65\x07\x6e\xf0\x7e\xf9\xe6\xeb\x57\x45\x48\x9e\xc4\x23\x2e\x7b\x9d\xd0\xa7\xee\x91\xb7\x0b\x2a\x8e\x9d\x4a\xed\x93\xfc\x00\x8a\x9c\xef\x2f\x78\xba\x81\xd3\xae\xd3\xc1\x0c\xd5\xad\x0a\x05\x79\x09\x12\x9e\x42\x7a\xd1\xb9\x12\x0d\x68\x5f\x24\xd9\x52\x82\x45\x53\x65\x85\xea\x6a\x56\x9e\x7d\xb4\x3b\xa2\xa3\x16\xff\x76\xd1\xb5\x99\xe7\x4f\x48\xed\xc7\xe6\xfc\x74\x06\xb2\xed\x50\xd5\xac\xa9\xe4\xbd\x54\x53\x0d\x78\x77\x80\xe6\x7b\x79\xd1\xe7\x43\xaf\x6d\x7f\x50\x7f\x68\xe4\x1b\x8f\xf7\xf7\x0b\x72\xf3\xee\x56\x78\xe7\xf6\xde\x6d\x6e\x36\xcb\x17\x0e\xf8\x38\xab\x37\xe5\x26\xdf\xc4\xfe\x35\xf2\x86\xef\x9b\x1b\xf7\x85\xe5\x3a\xb7\x9b\x9b\xcd\xea\x45\x03\x8e\x90\x5e\x91\x9b\x77\xb6\xf7\xce\xf9\xd3\xc6\xdb\xd8\xd8\xbf\x7e\x76\xd3\x77\xf4\xcb\x91\x02\xcf\xc0\x15\x62\x80\xdc\xd2\x4e\x4a\x76\x18\xa8\x1f\x71\x2e\xa6\x33\xc1\x56\x5a\xcb\x11\x06\x85\xde\xb0\x45\x4e\x7f\x34\x4f\x95\x1e\x72\x54\xb2\xc8\x99\x0f\x99\xe0\x83\x45\x7b\x17\x9c\x9c\xd9\x92\x75\xbc\x52\x5a\xd5\x6e\x29\x59\x1b\xe9\xa3\xd3\xb1\x02\xa2\x7f\x42\xf8\x3f\xab\x7b\xec\xec\xa1\x45\xa8\x55\xf5\x5d\x00\xf5\xc0\x5b\xfa\xda\x65\x9e\x45\x89\xe1\xe4\x45\x6d\x81\x6a\x17\x32\x10\x16\x42\x2f\x75\xc2\x82\xc6\x8e\x3e\x6b\x40\xaf\x8c\xb4\xcb\x41\x83\xca\xf5\x22\xdf\xf1\x7c\x67\x98\xc5\xa2\x6a\x48\xc1\xd4\x90\xce\xbb\x8f\x35\x87\xd7\xd6\x09\xf4\x43\x27\x54\x05\x71\x34\x70\xe5\xcd\xe0\x4d\x77\xaa\xd9\xab\x90\x29\x24\x38\xf6\x6c\xce\xb7\xaa\x9c\x79\xed\xb6\x05\x64\xf3\x64\xb9\x0e\x6e\x23\x90\x99\x72\x72\xbe\x37\x87\x61\x5e\xe0\x63\x70\xef\xde\x0b\xd6\xa4\x18\x39\x24\xa3\x06\x3c\x1f\x61\xbd\x26\x31\x2f\x16\xc5\x50\x89\x66\xe7\xf9\x62\xe5\xea\x28\xd1\x0a\x91\x13\x76\xaa\x84\x13\x73\x36\x6a\xe7\x4b\xe9\x43\x1d\xe8\xd8\xd9\x0a\x81\x2f\xe8\xc9\x7b\xb6\x0f\x16\x5c\x0a\xe7\xc5\x53\x57\x7f\x33\x99\xf3\x5c\x99\xcf\x34\x5f\x76\xf4\x23\x88\xa9\xbb\x5e\x28\x5b\xf3\x56\x68\xd0\xbf\xc6\x7f\x11\xce\xcc\x36\xd5\xb5\x75\xeb\x6d\x1e\x37\x3f\xfb\xf3\x17\xc8\x7b\xf7\xc2\xbf\x6e\xfe\xa4\xfb\x33\xfb\x2b\xe9\xa2\x06\x4c\x42\x30\xc3\x31\x1c\x6e\x83\x75\xed\x48\xf9\xd7\x13\x9d\x14\xc8\x99\x18\xb7\x82\x6f\x5f\xfa\xa6\x69\xbc\x10\xcf\xbd\xb7\x2f\xbf\x77\xe3\xf9\x82\x7c\xec\x7a\x42\xd2\x00\xca\x12\xbe\xf3\x17\xe5\xcd\x09\x83\x45\x0b\xf3\x78\x76\x45\x89\x73\x0a\x32\xb0\x45\x84\x01\xd7\x0a\x9a\x26\x44\x72\xa5\x91\x73\xe6\xfd\x3d\xe8\xd2\xc0\xc4\x45\x79\x85\x0a\x48\x70\x95\xe6\x55\x4d\xf3\x90\x77\xb9\x74\xf9\x2e\x75\x02\xac\x87\x01\xc0\xa5\x0d\x6e\xb1\x39\xb1\x03\x25\x71\xa0\xb4\xd8\x60\x6b\x4e\xf8\xaa\x8a\x60\x59\xf1\x17\x62\x8d\xa0\xb5\x73\x37\xe7\x81\x70\x98\xcb\xae\xd2\xfc\x2a\x40\x03\x24\x0b\x1e\xef\x99\x8f\x5c\xf9\x60\x05\xfc\x4d\x8c\x0a\x34\x27\x19\x86\x2f\xba\xef\xfc\x8e\x7d\x23\xd1\xd8\x28\x9d\x79\xcf\x39\x4f\x68\x9a\xa2\xe2\xa5\x4f\x62\xac\xed\x62\xb2\x42\x58\xaf\x41\x33\x74\xeb\x0a\xd0\x61\x01\xb1\x17\x9d\x91\x93\x5c\xb7\x3f\xc9\xed\x92\xd1\xe8\xbd\x2b\x7f\x01\x42\xad\x92\x17\xe8\x5c\x39\x5b\xa2\x4a\xd4\xae\xff\xaa\x5d\xab\x71\x18\xc4\xaf\x49\x69\x45\x02\x7e\xbf\x12\xd0\x2b\x38\x83\xaa\x39\x94\xec\xc1\x72\x9d\x1f\xf3\x3a\xcd\x1a\xb0\x74\xbe\xc1\x7f\x23\x27\x50\x89\x2b\x59\x0e\x37\x84\x42\x77\xa5\xe2\xcf\xe0\xc5\x6d\xb6\xc4\xbc\x98\x33\x5b\x8a\x08\x15\x1d\x8e\xdb\xd2\x6a\xca\x73\xba\xea\x98\xce\x64\xeb\x1b\x79\x1a\x77\x09\xf7\x4b\xcb\x35\xbd\x0d\xd7\xf4\x0c\x7f\x89\x20\x11\x1e\xf5\x75\xfc\xd5\xe2\x30\x2b\x2a\xa6\x47\x54\x18\xba\xeb\x96\xe8\x55\x21\xdb\x98\x93\x4b\x09\x39\xc7\xb4\x82\xfe\x81\xdd\xd9\xa1\x05\x80\xad\x4e\xac\xdf\x23\x4a\x2f\xf2\xd7\xa1\x69\x86\x9c\x98\x59\x8f\xac\xb4\xc0\xcb\x57\xa7\xa7\xb0\x5a\x99\xa6\x95\xb8\x89\xd0\x7c\x91\xfa\xad\x63\xeb\xf9\x0b\xe7\x12\xf8\xd0\x46\xa7\xb8\xbb\x7e\x1e\x5c\xb6\x9d\x85\xa4\xe8\x6e\x40\x06\xf8\x38\x46\x4e\x0c\xea\x0b\x11\x7b\x9a\xd4\xe3\x70\x27\x7c\x79\xcb\xb3\xba\x04\xfb\x23\x01\xbc\x48\xf9\xe7\x16\x68\x57\xa1\x0c\x8e\x91\x1c\x89\x2b\x00\x39\xc9\x07\xdd\x13\x90\x5c\xf0\xb2\xe2\xfc\x3c\x87\xa2\x97\x59\x66\x29\x1c\xeb\x2c\x56\x2d\xa6\x51\xe4\x4c\x1a\x9e\x9d\x05\xf4\xd0\x46\x36\x88\x32\x92\xb0\xda\x42\xb8\x84\xc2\xe0\x0b\x9a\x46\xd1\xe7\xe3\xe8\x24\x7a\xa5\x34\x8a\x2c\xe5\x32\x7d\x14\xf7\xc2\x19\xbd\x2b\x60\xa5\x08\xb5\xba\x5f\xe5\xbf\x8b\xae\x8e\xc9\xed\xd5\x90\xdc\xee\xef\xe9\xa5\x77\xe8\xd3\x84\x9a\x8a\xd2\x0e\x39\x37\x7c\x0d\xa4\x39\x9a\x8e\x2b\xa5\x45\x94\xdc\xcf\x53\x43\xe4\x88\x41\xbf\xe5\x44\x5d\x6e\xd8\xf5\x97\xb4\xcc\xc7\xc5\xc0\x11\x7b\x3e\x56\x33\x91\x99\xf9\xf0\x07\xca\xbe\xa8\x15\x38\xe2\x52\xde\xb1\x66\xb3\xac\xfb\x65\x96\x5d\x1c\xc2\x44\xf5\x1f\xca\x7e\xa1\x85\xdf\x1f\xb3\xde\x0e\x0c\x9a\xd7\xf4\x07\xa6\x6a\xac\xab\xcd\x8b\x56\xe2\x65\x72\x5d\x7e\xb3\x2c\x7d\x91\x9b\xe6\xd4\x22\x8d\x87\x87\xd8\x36\x1d\x1e\x9e\x2c\xaf\xb3\xfc\x3c\xb3\x42\xd4\x13\x99\x39\xaf\x64\xa4\x71\x09\x4e\x85\x5c\x61\x85\xc8\xf2\xce\x7b\x93\x63\x41\x86\x9a\xed\x0f\x19\xad\x99\x01\x2a\x99\xa4\xcb\xd6\x34\x40\xd0\x8b\x3d\xe6\xf9\x98\xea\x9e\x39\xc1\x5c\x64\xb8\x61\xed\x38\xf7\xa8\xdf\x53\x3d\xda\xa5\x8c\x16\x87\x27\xe8\x69\x47\x03\xe6\x77\xa0\x7b\xff\x29\x82\x9b\xc7\x10\xe1\xc8\x34\xcf\xf0\x52\x04\x92\xb9\x8e\xe3\x88\xe0\x32\x42\xc3\xed\x2f\x38\xae\xfd\x1b\xe8\x00\x0f\x10\x05\x43\xf8\xab\x5e\x48\xc6\xec\x92\x3d\xb0\x12\x84\x1a\x78\x84\x5f\x18\x52\x24\xdf\xd7\xe4\xc6\x7b\x37\x60\x00\xe7\x37\x49\xbf\xf5\xbf\xe9\x37\xed\xa9\x93\x54\xcb\xfd\x2d\x0d\x63\xad\xaf\x41\xf0\x89\x87\x2a\xbf\x81\x17\xfa\x84\x9f\x5b\x38\x68\x4b\xfb\x0b\x9a\x65\x01\x0d\xef\xaa\x81\x21\x1e\x25\x13\x28\x99\xb7\xe8\xf4\x61\x59\x5a\x2c\xad\x3b\xbb\x4b\x5c\x71\xae\xc1\x0d\xf2\x62\x85\x53\x32\x3a\x5a\x19\x01\xa5\xc3\x22\x0f\x19\x8e\x48\x40\x66\xcb\xb5\xba\x98\x5d\xf3\x12\xe8\x14\x92\x44\x49\x5d\xa5\x08\x61\x3e\xdf\xde\xaa\x73\x05\xc5\xde\x56\x5d\x64\x85\xde\xd2\xc7\x21\x27\xc8\x40\xed\x1c\xcc\x17\xeb\xe2\xf0\x5d\xfe\x9a\x66\x95\xd0\x91\x89\x7b\xaf\xf2\xb3\x15\x6a\xa9\xbd\x67\xfb\xa2\x7c\x0f\xba\x27\xb3\x15\xe7\x2d\x66\x2b\xcc\xb3\xc6\x24\x74\x3d\xdf\x81\x08\x15\x3b\x72\x1a\x9c\x04\xbd\xee\xae\xbc\xec\x0e\x06\x75\x2f\x56\x38\x51\x67\x63\xaf\xd6\x7a\x15\x81\x26\xae\x58\x8b\x60\x1c\xf8\x49\xa7\xb5\x42\xbe\x29\x04\x9c\x98\x26\x84\x2e\xb3\x42\xd4\x34\xdd\x79\xeb\x80\xba\xa8\xe2\x25\x7b\x8a\x41\xf9\xc9\x0f\x91\x69\x42\x48\x31\xd4\x6a\x81\x08\xb0\xec\x68\xaa\xe0\xab\xc5\xc2\xb3\xe2\xc4\xb8\x14\xc4\x74\x61\x10\x26\x68\x18\x25\xce\x09\x49\x69\xcb\x60\x1a\x7c\x58\x38\x44\x9c\x96\x40\xb1\x12\x2f\x87\x78\x85\x70\x78\x4b\xb6\xa6\xb9\x5d\x2c\x5a\xd5\xf6\x98\x3e\xeb\x8e\xfd\xbe\x36\x8a\x63\x20\x4b\x62\xcd\x68\x79\xa4\xa4\x39\x58\x09\xd0\x6f\x92\xd5\x4b\x91\xde\x44\x4e\x46\x12\x71\x05\x03\x16\xa5\x83\xdc\x13\x4b\x3c\x8b\x5b\x9c\x15\xfa\x79\x3d\xae\x28\x6c\x9a\xa0\x69\x2c\x51\x9f\x6a\x9e\x17\x99\xac\x6e\xc6\x40\x1f\x91\xfd\x9c\xd6\x83\x40\x2e\x3d\x02\x67\x00\x8b\xe2\x02\x05\x74\xfa\xa5\x66\xb0\xab\x34\x84\x91\x13\xfa\x3d\x80\xe1\xa0\x69\xb4\x25\xe5\x75\x4f\xf4\x75\x67\xab\x46\xc7\xb1\xc1\xb4\x72\x93\xfd\x8d\xda\x0e\x8f\xec\x34\xfd\xa8\x6f\xf5\x35\xeb\x2f\x50\xbf\xeb\x2d\x99\xb5\xaf\xdf\x2b\xa5\x0e\x89\x78\xc1\x31\x28\x15\x4c\x53\x07\xf5\xe0\x03\xa1\x2c\xf6\x69\xc5\x90\xcb\x54\x84\x1d\x3b\x2a\x72\x06\x0c\x1e\x4d\x33\x0e\xfb\x53\xc5\xea\x2d\xcb\xfb\x32\x80\xcb\x9d\x40\xe2\x04\xc1\xb5\x60\x8f\xaa\x1b\xa7\x08\x29\x3f\xa4\xe0\xdb\x64\x9c\x0b\xb5\x6d\x8f\xcd\x5e\xb1\x98\x95\x83\x89\xe9\x2e\x87\x3d\xcf\xc8\x8b\x3a\x8d\xdf\x1b\xfc\xb8\x2d\x92\x92\x55\x95\x81\x35\xd4\x69\x19\x02\xb5\x80\x27\x94\xa9\xaf\xcf\x7d\xec\x19\x25\xab\x8a\xec\x81\x19\xd8\xe0\x03\x1d\x55\xc0\x91\xe2\xd5\x74\x2d\xc3\xa4\x25\x56\x15\x45\x86\xa8\x15\x1c\xf2\x62\x83\xcf\xda\xff\xb4\xd2\x15\x96\xf5\xf0\x4a\x7d\x1c\x11\xe3\xc0\xf2\x08\xc8\x09\x46\x4e\x55\x4d\xeb\x29\x48\x8b\x5a\x4c\xb3\x47\xfa\xbe\x9a\x0c\xfb\x07\xab\xd9\x03\x9f\x58\xd5\x33\x60\x34\x60\x7d\x8c\x49\x1b\x12\x58\x6c\xa9\xb2\xc0\xe9\xa1\xf4\x30\xe8\x85\xe0\xd4\xba\x1a\xfb\x23\x50\x2d\xe5\xd0\xc5\xa6\xc0\x73\x21\x9e\xa6\x0e\x74\xfe\xd7\x8b\xbc\x4f\x7c\x4e\x46\xcb\xa7\x75\xec\x45\xde\xca\xf7\xad\xb3\xc6\x19\xb8\x65\x9c\x8a\xc3\xb7\x1e\x03\xaf\x06\xf1\xdd\x23\xf0\x22\x02\x9e\x2c\x4e\x5c\x73\x18\x53\xdb\xc0\x96\xab\x2c\x27\x8e\xbf\x43\xc4\x4a\x27\xf0\x22\x6f\xe9\xcf\x0d\xbe\xc3\x0d\x5f\xb4\xcb\x44\xe8\xa3\xae\xf5\x16\xb5\x08\x53\xe1\x3d\x17\xf5\xad\xb5\x98\xcf\xa8\xae\x09\xa8\x59\x27\x2e\xfb\x1d\x9f\x58\x9d\x6a\xd8\xd8\x72\x45\xe4\xde\x8a\x98\x74\x5a\x70\x3c\xbc\x3b\x0f\xae\xb7\x03\x4e\xd6\x0a\x6e\x63\x24\x1d\xa3\x44\x72\xb2\x40\x11\x8a\x02\x0b\xda\x75\x4e\x0b\x5b\xca\x59\x0c\xe9\x19\xe1\xed\x56\xd8\x23\x5c\x55\x2c\x8b\x17\x30\x27\x47\xb8\xa2\x46\xeb\x1d\x38\x15\xfd\xa3\xb1\xbc\xf8\x72\x02\x40\x0d\x23\x02\xed\x90\xcb\xdc\x9d\x42\x27\x89\x15\xe3\x10\x7f\x8b\x19\x92\x8f\xdf\x71\x02\xcf\xb1\xe2\xf9\x1c\x7f\x38\x53\xf7\x35\x94\xeb\xc8\x97\x07\xf1\xb2\xd1\x8c\x90\x6f\x81\x68\x90\x98\x27\x25\x1c\xf7\x60\x8b\x35\x4d\xa8\x56\x19\x72\xc3\xb4\x70\xb2\xf6\x8e\x30\x77\xa7\x83\x3a\x47\xa2\x3b\x4b\x43\x66\x3d\x90\xdb\xec\x29\x64\x60\x3f\xf1\xd7\xa2\xb8\xe3\x40\x37\x9d\x62\x51\x7c\x67\x57\x9c\xc4\x7c\x5b\xd2\x90\x21\x1c\xcc\x57\x2f\x08\x3f\x4b\x79\x07\xbf\x9b\xe8\x60\x28\x41\x0e\x8e\x12\xd9\xb5\x75\xe0\xde\x59\xc8\xb1\xb4\x56\x12\x56\x03\xe5\x2a\x9a\xb7\xf4\x46\xc8\x85\x6c\x96\xb0\xfc\x66\xf5\xdb\x74\xcf\x8a\x63\x6d\xdd\xf1\xba\x3f\xb0\x7f\x39\xfa\xf6\x96\xbe\xf7\xb1\x0f\x4c\x73\x62\x2d\x31\x1d\xae\x23\x3f\x48\x9c\x6f\x31\x1d\xcc\x3e\x10\x88\x1f\x28\x14\x20\x37\x70\xbe\x85\x7c\xcf\x3f\x94\x2f\x42\x6e\xe4\x7c\x87\xd0\x70\x2f\xc9\xc7\x0f\x04\xfa\x73\xbb\xe3\x85\x62\x26\x2c\xe1\xe2\x73\x62\x3d\x9c\x20\xb9\x12\x12\x78\xcf\x39\x1d\x1d\x78\x9f\xfa\x6b\xe6\x05\x1c\xff\x90\x84\x77\x10\x6f\x4d\x13\x1e\x74\x74\x14\x91\x6d\x8b\x43\xef\xe3\x05\xf5\xbd\xe7\xbe\xf2\x6d\x86\x61\xd2\x9e\xfb\x36\xa7\x51\x10\x16\xa5\x02\x3e\x50\x4e\x03\x20\x1c\x7b\x81\xb7\xf4\xfd\x89\xa0\x57\x22\x65\x80\x64\x08\x21\xb1\x14\x0c\x3a\x17\x08\x8b\x41\x21\x92\x74\x84\x48\x8b\x30\xeb\x66\x2e\x46\x42\xee\x0a\xbb\x29\xc6\x31\xc2\x71\x8b\x1f\xb7\x6c\xd2\x56\x65\x1c\x7b\x33\x24\x01\x8e\x88\x0a\x3e\x89\xd9\x59\x14\x2f\x84\x13\x0d\xea\x2c\x84\xb7\x13\xbe\x5a\x7a\x74\x16\xa2\x53\xc4\x59\x46\x81\x45\xf9\xd3\xb8\xc9\x17\x2b\xf7\xac\x0d\x27\xc4\x8b\x45\xd0\x34\x89\xbe\x7d\x81\x19\x6c\x5b\x8e\xf3\x82\x5b\xc2\xf9\x40\x4e\x0b\x25\x02\x9d\x03\x7f\xa0\x72\xe3\x44\xee\x2c\x3c\x0b\x10\xee\x8e\x5a\xf0\x46\x0c\x67\xad\x85\x46\x61\x41\x99\x17\xfa\xa6\xc9\xff\x0a\x02\xa8\xbb\x0b\x4f\xc4\x19\x89\x34\xed\xa1\xef\x21\x37\x06\x82\x51\x35\xa4\x45\x5a\xeb\x01\x58\xb2\x99\xff\x20\x37\xef\xac\x2f\x1f\x68\xd6\x7c\x95\xd7\xac\xcc\x69\xd6\xfc\x40\xf3\x84\x35\x3f\xf0\x49\x64\x79\xc8\x1a\xe1\x9e\xa6\x01\x1d\xee\x1f\x7f\xf8\x0a\x01\x6e\x7e\x76\xb3\xbe\x84\x6b\xc8\x90\xe3\x01\x7e\xbe\x2a\x84\x93\x19\xf9\x68\x3f\xd2\x32\x37\xcd\xc0\x34\xff\x21\xef\xf6\xec\x9c\xee\x19\x1a\x67\x51\xc1\xab\xbb\x96\xae\xba\x96\x9c\x2b\x63\x1e\xd8\x7b\x56\x55\x34\x61\x38\x10\x78\x07\xc4\x1f\xa5\x90\x56\x7f\xa9\x72\x12\x9d\x10\x18\x20\x1e\x1d\xc7\xc2\xf1\x13\xb4\xa8\x85\x69\xf9\x61\x00\x46\x42\xe0\x0c\xb5\x4e\xc1\xd3\x0f\x62\x19\x28\xf2\x24\x3d\xe3\x0f\x35\x12\x46\xfd\xb1\x28\xea\x98\xa3\x9e\x02\xed\x22\x96\xae\x30\x64\xff\x99\xa6\xb5\x23\x9f\x07\x7b\x03\x3c\x8c\xcd\x96\xee\x62\x21\x2b\x86\x9c\x1c\x60\xa0\x02\xd4\x34\x56\xf7\x02\xee\xfc\x67\x22\x70\xca\x20\xff\x8b\x65\xd3\xfc\x30\x82\x5f\xaf\xf4\x41\xbe\xa8\x26\x10\x46\x45\xc4\xe0\x7a\xea\xe0\x0d\xc7\x37\xd2\x55\xff\xc8\x83\xf5\xab\xef\xbe\x91\x26\xa7\x5f\x17\x34\x62\x91\x81\xdf\x20\xfc\x7f\xe8\x74\x66\xe1\xbd\xfa\x4d\xd7\x9a\x85\x5a\x23\x2c\xf6\x87\x8c\xd5\xa0\xd6\x12\x89\xcf\x6f\xf8\x8e\x68\x1a\xc8\x2e\x99\x5f\x3d\xc5\x34\x67\xd1\xd8\x8b\x9f\x1d\x15\x6f\xc2\xb2\xc8\x32\x77\xb0\xd8\xb2\x1d\x70\x6f\x7c\xee\x7d\x7b\xb2\xef\x74\x22\xa3\xea\xb7\xd8\x3e\x6f\xc7\x8a\x75\x32\x7e\x9c\x22\x94\x26\x54\xe9\x09\x09\xd7\x5a\x2c\x0d\xa2\x71\xf3\xe8\xc4\xc8\x6c\x09\xf7\xa1\xdb\xab\x34\xbf\x0a\xd1\x5b\xa8\x76\x8b\x43\x6f\xeb\xe3\xd9\x12\xaa\xee\x4c\xfa\x07\x31\x74\x79\xc9\xf1\xb9\x05\xd1\x84\x67\x4b\x50\xc7\xb7\x12\xd7\xea\xef\xf4\x55\x74\x05\xe4\x58\x3b\x12\xe0\xe0\xec\xae\xb0\x63\x27\x7b\x39\x7c\x08\x4e\x2b\xe5\x7d\x57\x67\x0d\x10\x80\x15\x35\x0e\x71\xe2\x46\x4e\xa4\x62\x88\x6e\x7d\xbc\xc5\x2a\x49\xb3\x00\x60\x2e\x75\x76\xae\xea\x07\x72\x52\x37\x00\x6f\x31\x9c\x91\x8b\x5b\xfc\xe3\xd4\xf6\x1a\x5e\x72\x0b\xd7\x4f\xfa\xfb\x6c\xae\x5d\x81\xf7\x80\xfa\x13\xec\xea\xb4\xb2\x65\x00\x61\xa1\xdc\xc1\x9f\xe6\x3f\xd9\x10\x8f\xb1\x85\x5f\xb2\xc2\x3f\xe9\xc1\xce\xc1\x11\xd3\xd4\x69\xe4\xe9\xb5\x8d\xdc\xbd\x9c\x5a\xfc\xa3\x88\x48\xa8\xfb\x97\x1c\x96\x20\x81\x23\xc5\xf9\xc2\x51\x4b\x6f\xa7\x8f\xf5\x7c\x58\x2a\x4a\x07\x38\x2c\xf2\x38\x4d\x8e\x25\xc8\x37\xe0\xaa\x1c\xe1\xa0\xc5\x15\xab\x2f\x05\xf3\x14\x17\x4d\x30\x02\xe5\x3f\xf9\x2c\x4a\x26\x62\x5e\x69\x77\xc1\x69\xad\x00\xf9\x24\x5c\x0f\xe3\xc1\x8e\xf3\x44\x68\x18\xc7\x94\x8d\x43\xb9\x6b\x17\x26\x02\x28\xc1\xdd\xca\xa0\x33\xce\x68\x36\x38\x7f\x35\xf8\x30\xee\x55\x8b\x69\x18\xb2\xaa\xba\x24\xff\xee\x1b\x6a\x9a\x60\x42\x58\x1b\x98\x66\x97\x25\x74\xbb\x3b\x1a\xde\x57\x47\x5c\xd9\x54\xe2\x15\x87\x08\xf7\x57\x9e\x6e\xe8\x04\xe8\x5c\x62\x36\xb8\xd2\x1b\x83\xc2\x60\x2f\x02\xb3\xd3\xbd\x06\xe8\x34\x0c\x9a\xcd\xa9\x4e\x12\x80\x3c\x5a\x1b\xb0\x54\x65\xd1\x27\x00\x8b\x5b\xed\xab\xc8\xf5\x02\xdf\x09\x06\x32\xdd\x0b\xaa\xc3\x32\xfe\x4b\xe4\x05\x5e\xe8\xfb\xad\xa5\x4f\x11\x47\x0d\x5a\x68\x65\x2b\x42\xbf\x03\xad\x92\xd8\x93\x75\x8e\x52\x39\x9f\xb2\xa5\xd5\x2b\x5a\xd3\x3f\xbe\x55\xfa\x49\x91\x91\x3c\xf5\xfe\x04\x9c\x7a\xe2\xc5\x7f\x06\x9b\xcb\x9f\xf0\x2f\xf2\xf7\x9f\x52\xf3\xe1\x24\xd4\x1e\xae\x37\x6d\xb3\xf1\xd4\xb3\x8f\x9e\x41\xa0\x37\xef\xe5\xe2\x5f\xbe\x2e\x1f\x7f\xa6\xc5\xea\xae\xcb\x23\x1c\x29\xc0\x1b\xd2\xac\x62\x20\xf2\xe7\xdc\x23\x47\x81\xc2\x61\x14\x78\xc5\xe2\x27\xeb\x9c\xce\x0d\xc3\x9d\x53\xe7\x9f\x9d\x6a\xc6\xdf\xde\x7c\xf7\xad\x50\x1e\x00\x38\xd6\xcc\x08\x7e\x3d\xd3\x1a\xec\x41\x6e\xac\x5c\x04\x26\x15\xc4\x88\x68\x4d\x17\x9c\x74\x51\x1a\xbd\xff\xc2\xc6\xe2\x99\x69\x8c\x9d\x24\x84\x63\xe5\xc5\x08\x9d\x2b\x67\x84\x82\x39\x0c\xc9\x33\x2b\x44\x9a\x4f\xc8\x5f\x34\xd8\x16\xc7\x45\x48\x86\x1e\x13\x43\x4d\xf8\x35\xb5\x90\x9d\x21\xba\x4c\xb4\x28\x6a\x9a\x9f\xb5\xb7\x16\x47\xc3\x32\x83\xcd\xf9\x8b\x2d\x36\xaf\xea\x83\xdc\x4f\xaf\xc6\x45\xd0\xe9\x17\x49\x1e\x48\x27\x93\xbf\x7e\xa8\xd6\x9f\xc7\xb5\xfe\x7a\xb1\xda\x9f\x07\xd5\x02\x51\xa3\xe9\x0c\x9c\x35\x32\x8c\x07\x20\xaf\x90\x71\xc2\x39\xe4\x58\x53\xa4\x1d\xac\xb0\xd0\x8c\xd1\xae\x7f\xe0\x14\xfe\x05\xb0\x4c\x2c\x14\xc1\x62\xed\x8e\x7d\xf6\xb3\x48\xc1\x86\x9c\x44\xbe\xb4\x95\x81\x90\xb8\xf8\x38\xdb\xcc\x09\x50\xfd\x56\x44\xf8\x03\x90\xc7\x78\x09\x74\x51\xe7\x81\x4d\x80\x92\xb8\xb1\x1a\xa0\x6b\x29\x00\xfd\x14\x21\xfc\xab\xb0\xce\xf3\x22\x9f\x9f\xc2\x3f\x03\x5c\x8c\xbb\x80\x67\xcb\x4e\xc1\x52\x45\x89\x3a\x17\xac\x08\x14\x0a\x3c\xab\x46\x37\x0b\x48\x13\x1c\x21\x6a\x91\xf3\x56\x3c\x9f\x89\x51\xc1\xbc\x43\xc3\xc8\x81\x8c\xad\x20\xa7\x0b\x53\x1d\x07\xf7\x4a\xfa\x90\xe7\xd7\x8b\xe9\x02\xb6\x7f\xb7\x63\x1c\x04\x50\x2b\x5d\xc7\xe2\x73\x1e\x4f\xa4\xf0\x59\x98\x86\xd3\xe1\xc5\xfd\x79\x4b\x12\xd4\xba\x59\x10\xf0\xa6\x80\xed\xfe\xc8\x8e\x6c\xfa\xc4\xe6\xe3\xeb\x34\xb1\x02\x02\x86\x89\xf1\x93\x81\xe6\x06\x14\x32\x70\x44\x7e\xee\x4e\x2d\x1c\x9a\x26\x78\x9f\x1e\x1e\x28\x21\x72\x79\x2e\x6d\x67\xe8\xea\x39\x21\x42\x4e\xd4\xdf\x51\x41\x60\xbf\x16\x47\xec\xbc\x53\xe8\x04\xe1\xf8\x78\xfb\xc2\x61\x31\x29\x6d\xc8\x25\x1a\x8f\xba\x08\x5b\x10\x6c\xab\x33\xde\x26\xa5\xfd\x2b\x64\xe3\xac\x5f\xd5\xf9\x11\xd2\x64\x0a\xb6\x6c\x4d\x6c\xc5\xb5\x91\xe6\x9d\x18\x9d\xc8\x08\x39\x5a\x85\xd1\x62\x01\x26\x9f\x16\xef\x08\x91\xba\xba\xca\xed\xb0\x5e\x16\x61\x79\x34\xc5\x70\xf7\x87\xbb\x5b\x81\x04\xec\x43\x67\x91\x69\xc2\xee\x85\x8b\x24\x10\x46\x58\x1c\x63\xf4\x7d\x9d\x8e\x42\x2f\xa7\x1e\x72\x18\xeb\x0e\xf1\x88\x45\x08\x01\x09\x76\x53\x1d\xe2\x93\xb8\xa6\xba\x2c\x5c\x1f\x8b\x6a\x34\xbc\xe4\x05\xdd\x32\x87\xbe\x88\xcd\x34\xc6\x53\x53\xab\x24\xfa\xf9\x5c\xdd\x61\x4f\xa8\x11\x59\x22\xfc\x39\x9f\x40\xcc\x31\xc9\x19\xc4\xdf\x86\xae\x5a\x5b\x85\xea\xba\x70\x1d\x1d\xe5\xe6\x4c\x02\xfb\x10\x32\xba\xed\xb5\x1e\x42\x81\xdc\x09\x58\xae\x21\x15\x0a\xfe\x6a\xe5\xf8\x16\x06\x55\x9d\x1e\x34\xfa\xad\x33\x01\x9b\x1f\xde\x7e\x93\x95\x84\x19\xa3\xe5\x3f\x3e\x58\x8f\x84\x49\x01\xf1\xd8\xf3\x27\x65\x7a\x3a\xd5\xb7\xc2\x6c\x28\x5c\x12\x07\x05\x4e\x06\xea\x5d\x5b\x1d\xf6\x17\x8b\xa8\x69\xd8\x80\xcf\x8e\xb1\x17\xfb\x7c\x1b\x5c\x5e\x38\xb1\x10\x60\x8d\x24\xf7\xa3\x16\xe0\x4b\x22\x84\xd8\x4b\x7c\x4c\x07\xc0\x2a\x83\xc5\x00\x44\xf2\x33\x61\x3e\xc7\xf2\x0d\x80\x70\xdb\x73\x61\x5b\x4b\x17\xc9\x05\x9d\x30\x88\x52\x72\xe3\xcd\x17\xbe\xcb\xe9\xad\xe8\x7a\x63\x37\x68\x13\xcd\x2d\xd7\xf1\xd8\x97\x3e\x24\x6c\xa2\x79\x83\x6e\x64\xcc\x3b\x1c\x50\x32\x8e\x28\x0c\xe1\x88\x11\x69\x90\x65\xcc\x29\x9d\x1b\x08\x34\xdc\xff\xcb\xbf\xee\xa2\x0c\x87\x94\x78\xc6\xdb\xe2\x60\x60\xe3\x87\x34\xd9\xd6\x06\x36\x3e\x2f\xea\xba\xd8\x1b\xd8\xf8\x9a\xc5\xb5\xe1\x0f\x02\xc4\x0e\x63\xa4\x07\x4d\x43\xb1\x91\x17\xb9\x20\xe8\xec\xaa\x7e\x9f\x41\x70\x37\x08\x3d\xde\x18\x13\x5f\x39\x98\x75\x1a\x7c\x23\x67\xd5\x98\xb3\x6c\x5d\x75\xa5\x1d\xc2\xb6\x36\x64\x49\x43\x04\xa6\x1d\x73\xf8\x9d\x22\x2d\x4e\xc8\xa9\x05\x6e\x3d\x16\x5c\x53\xe2\xc5\xbe\x6a\xde\x8b\x7d\xdc\x3f\x92\x40\x84\x05\x0a\xbb\x28\xb0\x80\x89\x91\x5e\x5a\xcb\xcd\x2b\xea\x99\xad\x9e\xaa\x8d\xe9\x59\x27\xc8\x0a\x27\xe4\xf9\x12\x6f\x49\x74\xee\xf5\xf4\x2a\xb2\xc3\x63\x69\xe9\x7e\xea\xb5\xab\xff\x50\x9e\x17\xa0\xfa\x90\x12\x0e\x15\x3b\x4e\xb7\x86\xe0\xb0\xd6\x82\x0c\xdf\x42\x2c\x7d\x2f\xf0\x5d\xc3\x70\x8c\xc3\x93\x81\xf0\x1d\x19\x25\x35\x0d\x4f\x98\x11\xb2\x33\xcd\x79\x8a\x4c\x33\xa0\x42\xe3\xb7\x6b\x42\x78\xeb\xb8\x33\xcd\x3b\xef\x63\x1f\x0c\xa3\x4f\x3b\xb2\x6b\x1a\xfe\x8a\xd5\x1d\xf8\x1d\x99\xa7\x4d\xb3\x5a\x47\xc5\x55\x4c\xe2\xa6\x31\xec\x4f\x0d\x7c\x77\x43\x62\x5c\x8a\x99\x81\xce\xde\xcd\x77\x4a\xde\x19\xcf\x08\xb1\x62\xde\xef\x9b\x54\xaa\xbd\xc5\xa6\xb9\x58\x24\x1d\x35\xc3\x4f\xcc\x3b\x32\xbf\x6b\x1a\x5e\xf5\x92\x9f\x5c\xde\xca\x77\xef\xe6\x16\xff\x9d\xaf\xd0\x75\xe8\x3d\xf7\x9d\x39\xff\x8b\x23\xbe\x69\xec\x63\x9e\xd6\x64\x87\x23\xbb\xaa\x69\x59\x93\x3b\x1c\xd9\x2c\x8f\x08\x18\x18\x83\xdd\x46\x42\x61\xd5\xd5\x92\x6c\xa9\xee\x82\x77\x0c\x5f\x91\x66\xcf\x04\x8e\xaf\x75\x2e\xda\x65\x0e\xb8\xd4\x0d\x8a\xe8\xfd\x20\x3e\x49\x38\x32\x52\x03\x2f\x08\x12\x3c\x03\x0d\x3c\xf1\x87\xc2\x67\x76\x60\x2d\x4e\x58\x23\xc8\x8a\xf0\xce\x40\x18\xfa\x40\xd8\x20\xf4\x68\x4a\x47\xb6\x34\x9c\x66\x14\xca\x13\x4b\x9c\xf4\xbe\x2b\xe2\xdb\x64\x1d\xcf\xe7\x28\x02\x27\xe0\x30\x49\xef\x33\x06\x5e\x65\xa2\xe1\xae\xc3\x81\x6b\x75\x3d\xe0\xcb\x00\x70\x2d\x70\x57\xa4\x0d\xa1\x69\x84\xd1\xbb\x17\x73\x90\x1b\x55\x42\x20\x74\x9c\x21\xc4\x81\xa3\x4d\x1d\x51\x2b\x42\xaa\xde\x2d\x78\x8a\x40\x8e\x68\x71\xa6\xb5\x28\xbe\x60\x41\xf8\x6a\x0d\x0b\xa9\x14\xec\x3e\xb2\xec\xc6\x25\x6e\x6e\x98\x70\x70\xce\x87\x38\xea\x0f\x4f\x19\xe8\x53\x6a\x67\x75\xb5\x2d\x1e\x27\xb6\x59\x4a\xc5\xb9\x04\xd4\xe5\x36\x8d\xa6\xae\xd2\x65\x1e\xd4\xe2\xba\x48\x92\x6c\xea\xcc\x32\x82\xa2\xc8\x18\xd5\xef\x34\x5d\x49\x92\xf3\x86\x2d\xa9\x56\xce\x1b\x50\xcf\xe3\x83\x32\x92\xad\xb8\xa5\xf8\x55\x05\xd5\xab\x28\xdb\x76\xa7\xc1\x8e\x0a\xce\x5b\x39\x6b\x6a\xc0\x7d\x13\xd8\x1d\xdd\x51\x72\x33\xb4\x5d\x1a\x6a\xaa\xa1\x9b\x14\x67\xbc\xf8\xb3\x66\x73\x63\xb9\xce\x8e\x3e\xd0\x86\x85\x7b\x8a\xaa\xb0\x4c\x0f\xf5\x4d\x8a\xf7\x94\x9c\x84\xe7\x37\xc7\x5b\x61\x43\x45\x32\xda\x1f\xb3\x3a\x3d\x64\x8c\x7c\xa4\x9e\x3e\x7a\x61\x60\xa3\x8f\x61\xe4\xe3\x7a\xcb\x68\x24\x0a\x81\x41\xa9\x48\x97\x8f\x3e\x0e\x8b\xcc\xf1\x9e\x77\x89\xb7\x61\x91\x25\x65\x71\x3c\x88\x6c\xdd\x9b\x56\xa2\x2e\x07\x05\x6a\xbe\x1f\x65\xa5\xf0\xa8\x67\x8d\x1c\xef\xe3\x71\xd6\xdb\xba\x94\xd9\xcb\x17\x13\x65\x7e\x95\x06\x8b\x8e\xb7\xc4\x86\x81\x0d\xc3\x6f\xd7\x7b\x6a\x17\x87\x1a\x7a\x42\xc4\x73\x5a\xe4\x78\x4f\x6d\x28\xcd\x3f\xd5\x71\x51\xd4\xfc\x41\xf5\x18\x9e\xa9\xb8\xd1\xd8\xc3\x65\x36\x8d\xa0\xc4\x16\x5e\xa3\x1e\x25\xe5\xba\x73\x83\x8e\xe1\xbf\x1c\x8e\xef\x3c\xde\xdb\x85\x60\x8d\xc2\x6f\x0a\x72\xa6\x6b\x1a\x47\x3e\x72\x27\x82\x21\xa9\x1a\x3c\x1f\x8f\xe4\x77\xc2\xae\xce\xed\xf4\x3d\x29\xc8\x84\xc3\x61\xe4\xd8\x91\x83\x35\x40\xaf\x12\x35\x85\xb7\xd1\x3a\x9c\xcf\x91\xd8\xe9\xd4\x0b\x7d\x6c\x24\x59\x11\xd0\xec\xcb\x07\x9a\x19\x60\xa9\x2c\xd0\x4f\x30\x4e\x43\xc2\x20\xef\xc0\x81\xba\x31\xff\xe4\x6e\x1e\xe7\x6b\xcd\x90\xee\xfe\x92\xb7\x1c\xe5\xed\x0b\x67\x24\x90\x18\x5b\x21\xfe\xd7\x25\x4d\x00\x75\x23\xe5\x89\x66\x89\x8b\xbe\xb3\xf9\x6d\xb1\xce\x85\x79\x41\x4c\xa8\x97\xfb\x38\x16\x0e\x2a\x63\x34\x79\x35\x10\x23\xa4\x26\x66\x8f\x63\xcd\x6d\x4d\xec\x3b\x31\xea\xe2\x73\x1f\xa8\x90\x59\xc5\x08\x9d\x12\x02\xde\x1f\x06\x01\xbb\xc6\xb6\xcf\x51\xfa\xc0\x11\xec\x96\x58\x77\xf2\xc4\x8e\x51\xd3\x78\x02\x48\xd1\xb9\x7b\xfc\x94\xec\xa9\xb7\xf5\x9b\x66\x4f\x6d\x05\xd5\x38\xd1\x8c\x88\x53\x7e\xaa\x96\xf6\xb6\xde\x67\xdf\x97\x4c\x99\x58\xa0\x79\xca\xcf\xd7\x3b\x30\xf5\xd7\xfc\xcc\x24\x24\xe9\xdd\x2f\xaf\xfb\x01\x26\x83\x10\xfc\x09\xc9\x74\x05\xe5\x44\x77\x6d\x4a\x0c\x43\x88\x00\xa4\xbf\x0f\x35\xc2\xb7\xec\x09\xce\x44\x88\x4d\x91\x8d\x4a\xf0\xc5\xe8\x82\x30\xee\xbd\x5c\xba\xbd\x88\x40\x65\x47\x6a\x24\xc6\x38\x02\xe5\x46\x50\xf1\x51\x5e\x17\xd4\x34\xef\x88\x46\x51\xc6\xa3\x13\x3f\xe6\x3d\xce\xa9\x35\x9c\xfa\x18\x61\x43\xa0\x3e\x03\x6e\x63\x0a\x6a\x25\x08\x87\xe8\x74\xa7\xf5\x25\xf1\xee\x78\x5f\x32\xb5\x8a\xca\xe7\xa2\x81\x3a\x7b\xd5\xb8\x23\x6d\xb2\x76\x76\xa6\x90\x14\x5d\x86\xc2\x80\x0c\x83\xb7\x45\x17\x60\x21\x24\x67\x29\x2a\x3a\x5c\x38\x1d\x1d\x0e\x4e\x05\x08\x9a\x33\x4c\x56\x11\xf2\xb0\xd1\xc7\xca\x1b\xe7\x91\x11\xe3\x6a\x20\x65\x06\x14\x10\xc2\x85\x70\x11\xfa\x45\x56\xe4\x8c\xef\x2f\xfe\x0b\x6b\x3a\x5b\xa2\xd1\x5b\x07\x44\xca\xab\x28\x0e\x06\x96\xed\x7c\xfd\x69\xc9\xe8\x8b\xa7\xdb\x9b\xee\xd9\xc0\x85\x9d\x17\x50\xfd\x17\xa2\x14\x01\xab\xd7\x0b\x35\x0f\xac\xce\x2d\x71\x42\x96\x7c\xca\xc7\xc1\x81\x2b\x7e\xee\xdd\xb1\xf7\x37\xb8\x96\x07\xe8\xbe\x38\x56\xac\x39\x14\x69\x5e\xb3\xb2\x09\x85\x45\xf1\x9e\xe5\xc7\x26\x2a\x69\xd2\x44\x65\x71\x40\x4d\x98\xa5\xe1\xdd\x0d\x3e\x42\x19\xef\x9d\xed\x5f\x23\xce\x85\xd9\x96\x3d\x47\x0d\xd2\xd0\xd1\x03\xd5\x83\x1a\x74\x9f\x1f\xb5\xcf\xab\x81\x9f\x2d\x21\x23\xee\x18\x82\x81\xbb\xd8\x5e\xa7\xa8\x1d\x7a\x5f\xd2\x7d\x73\x01\xd5\x8b\xb7\xc3\x5b\xcb\xee\x3a\xe9\x74\xc6\xc9\x86\x20\x91\x8c\x9a\x26\xc4\x4a\xec\x8c\xfa\x4b\xcd\x00\x41\xfd\x5b\xa8\x3f\xf0\xb6\x3e\x8e\x35\x92\x2a\x8d\xa5\x51\x4c\x24\xfc\xdb\x10\xc2\x5c\x8b\x11\xce\x7f\x77\x75\x39\x32\xc1\x34\xcf\xaf\xb7\x42\x9e\x3b\xc2\x51\x97\x57\xbe\x6a\x5d\xe1\xb4\x34\x38\x2b\x67\xe4\x91\x76\x9b\x7a\xd6\x05\x57\xa2\xba\xbb\xfd\xd8\x34\xad\x84\x53\xcb\x53\x37\x93\xa5\x85\xec\x22\x8e\x2d\x0a\xda\x35\x53\x8a\x80\x2d\x66\x76\x72\x4c\x23\x92\xc0\x0f\xf8\x48\x83\xf7\x12\x7e\xe6\x73\xd0\x85\x3a\x17\x69\xb0\x07\x96\xd7\xc0\xb6\x4b\xa3\x05\x86\x23\xb8\x80\x6d\x65\x1a\x39\x89\x03\xcc\x39\x9d\x1b\x2f\x0d\xe2\x0e\xe8\xc7\x94\xf2\xfb\x8f\xef\x95\x58\x11\x49\x0f\xfd\xa1\xbd\x85\xa0\x32\xa5\x50\x88\xc7\x21\x89\xd5\x17\x50\xb3\x51\x9e\x09\x40\x34\x77\xc1\x5a\xac\xa4\x58\x04\x62\x16\x03\x0d\x47\x03\xc5\x56\x4a\xee\x45\xe7\x2b\xd4\x34\xda\x1b\x78\xf8\xb3\x12\x72\x2f\x9b\x14\xf7\xd6\xea\x6d\xa0\xbe\x21\x89\xe1\x09\xfa\xa3\xe4\xfd\x12\xd3\x56\x97\x69\x92\xb0\x12\x42\xb7\x8b\x20\xf4\xae\x4a\xe2\x84\x3c\x18\xf7\x2b\x36\x5c\x53\xeb\x91\xe1\x6a\x39\xae\x04\x3a\xc5\x40\xfa\x9d\x9b\x61\xf8\x78\x37\xbe\x74\xdb\x2d\x16\x68\x4b\x8e\xf2\xf0\x0c\xbc\x9d\x8f\xa4\x8f\xfc\x03\xd9\x7a\x2b\x1f\x17\xc4\xda\x7a\xcf\x65\x2c\x1b\xe9\xf6\xc4\x56\x7e\x4f\x10\xce\xc1\x9b\xbd\xea\x1c\x38\x8c\xa0\x99\x97\xfb\x4d\x73\x6a\x71\x4e\x2c\xe6\x66\x76\xc4\x32\x96\xf0\x33\xed\xfd\x81\x39\x99\x1d\xa4\x79\x04\xd7\x49\x4d\x93\xe3\x8b\x65\xef\x48\x2f\x7f\x06\x7f\x9a\x39\x2e\xca\x34\x81\x3a\x0e\xe2\xe6\x26\xc2\x72\x81\x9d\x10\xf3\x25\x72\xc4\x82\x61\xb5\xd4\x0e\xc3\xba\x07\x04\x07\x16\xfe\x82\x7b\x04\x69\x21\x8b\x30\x47\xe8\xd5\x81\x86\xcc\x29\xa4\x4f\x17\xdb\x40\x2d\x3f\x14\xad\x3d\x49\xbd\x9c\xcf\x8f\x7c\xe2\x64\xd1\xbe\x1b\xdd\x17\xc5\x31\xaf\xc9\x12\x67\xfc\x84\x38\x1e\x4c\x53\x3e\xf4\x5e\x08\x0a\x9c\xa0\x19\xdf\xb7\x4d\x73\xae\x97\x61\x9a\x13\xba\x1a\x39\x4e\x10\xc2\x19\x4f\xe0\xf3\xcc\x7f\x55\x75\x77\x08\xdf\x29\x00\x57\x00\x3b\xfc\x40\xc4\x74\x70\x6c\xe1\xee\x95\x89\xc3\xa8\xbf\xf3\x39\x5e\xe2\x3b\xe4\x48\xea\xe3\x0e\xa4\xfe\xb0\x1c\x62\x73\xf2\x51\x72\xbe\x6f\xf2\x3a\xf9\x8f\xed\xd2\xfe\xc2\xcd\x34\x07\x7b\xd6\x34\xf5\xfd\x84\x4e\xff\x19\xd0\xa6\xb1\xf5\xbf\x82\x5b\x74\xfa\x10\xd8\x46\xbf\x03\xb6\x02\x00\xa0\xc5\x2d\xd9\x42\x30\xa7\x51\xbc\x9c\xcd\xc6\x46\xc6\x5c\xc1\xd0\x66\x63\x5b\xae\x63\x5f\x6f\x36\x76\x83\x0c\x34\x37\x2c\xfe\xf4\x0c\x19\x70\xd5\x40\xf6\xe7\x8e\x97\xee\xc8\xde\x8b\x7d\x3c\x63\xa6\x79\x98\x11\x72\x67\x2b\xe8\x6f\x1a\x10\x92\xf2\xa5\x85\xef\x62\xed\xb7\xa6\x39\xdb\x0a\x20\xbe\xb3\x3b\x18\x46\x4d\x13\x99\xa6\xc8\x57\x75\x91\x00\x2d\xe3\xfa\x1a\xd4\x90\x9a\x66\xd6\x7f\x07\xb8\x1e\x38\x5a\xd2\xcb\x8c\xe0\x66\xb1\xc0\x99\x14\xd7\x70\x40\x17\x4f\x3d\x68\xa2\x75\x62\x9a\xb3\x7d\x7f\xab\xc8\x29\x58\x5a\x46\xc5\x63\xce\xb3\xab\x67\x55\xa0\xc0\x1d\xe2\x94\xfb\xa3\xd4\x55\xae\x2c\x8a\xf3\x3e\x87\xba\x34\x81\xbd\xd8\x76\x6a\x1f\x39\x3f\x99\x53\xa4\x96\xb4\xbb\xa3\xc8\xe7\x1c\x38\x00\x54\x67\x4b\xb4\x1e\x5f\xda\xa7\x00\x95\x5d\x6e\x43\x34\x72\x25\x80\xd2\x00\xb7\xd0\x12\xe1\x4e\x99\x68\xcb\xc6\xe2\xf4\x09\xd4\x8b\x94\xee\x14\xdf\x09\x20\x57\x96\xc6\x41\xa3\xcb\x0b\x84\x77\xc4\x12\x5b\x41\xb8\x55\x50\xad\x81\x2d\xa3\x27\x10\xbe\x2f\x25\x8a\x63\x18\xed\x52\xa5\xe0\x96\x73\x24\x24\xc0\xc2\xdb\xf6\xb8\x25\xe0\x27\x53\x2f\xd4\xd4\x42\xbd\x10\xb4\x3d\x82\x1e\xbc\x21\x18\x80\xb8\x08\x98\xdd\xd9\x87\x92\xbd\x92\x23\x6e\x9a\xc1\xab\x66\xbb\x1c\x88\x75\x42\xa7\x6d\xd7\x3f\x89\x79\x2a\x3d\x17\xde\x71\x72\x5c\xb1\x07\x56\x4c\xb6\x5e\xc8\xf9\x03\xd3\x9c\x05\x76\x5a\x7d\x5f\x16\x07\x9a\x40\x88\x81\x37\x75\x71\x38\xb0\xc8\xe2\x68\xc0\x0e\x8f\x65\xc9\xf2\x5a\x76\x2c\xb6\x59\xc6\xf6\x5a\xdc\x79\x2b\xe9\x8f\xf6\x4a\x04\xa7\x93\x15\x7e\xb5\xdf\xb3\x28\xa5\x35\x9b\xac\x39\xb0\xcb\x6e\x63\x40\x81\xfe\x55\x6c\x9c\x64\xb0\x71\xac\x40\x36\xf2\x5d\xb0\x23\x09\x0e\x6c\x7e\xe6\x90\x04\x7e\x30\x23\x96\x35\x5e\x99\xa4\xdb\xa2\xbe\x30\x4a\x15\xc5\x9b\x26\x51\xbd\x45\xf2\xd0\x96\x43\x4a\xb5\x1b\x61\x4e\x0a\x82\xcd\xc4\x31\xab\x09\x53\x46\x81\x56\x00\x36\xd4\x2c\xaf\x5f\x09\xe2\x9d\xf3\x41\x70\x59\xa8\x8d\xd0\x42\xa8\x77\x00\x68\x1f\x8a\xaa\x56\x2b\x66\x9a\xc3\xf7\xc1\x0a\x62\xd5\x1c\x28\xc2\x88\xd9\xbc\xac\x51\xc0\x81\x9a\x9f\x77\x29\x09\x86\x98\x00\xef\x08\xb5\x45\x40\x09\x70\xc0\x6c\x9a\x3b\x5d\x65\xc0\x32\x80\x2d\xd0\x3d\xe4\x53\x5b\xb8\xe0\x7f\x41\x56\x52\x13\x6f\x37\x23\xc2\x7f\xcb\x8e\xec\x06\xa6\xbd\x20\xd6\x93\xce\x96\xf4\x6a\x55\xad\xb3\xde\xaf\xff\xae\x0b\x88\x0d\xda\xa2\x48\xc8\x39\xa4\x49\xe7\xa9\xc5\xc2\x9b\x7d\x0a\x3b\x22\x22\x20\x41\x61\x24\xea\x30\xdc\xdc\xb8\x32\x7a\x81\x4e\xe2\x31\x9f\x93\xcd\x1e\xf3\x49\x34\xa0\x13\xdc\xd2\x62\xc2\x2d\x83\x74\x47\xb0\x03\xbb\x3f\xe9\xdb\x45\x24\xc9\x10\xa6\xbb\x2e\x84\x29\xc2\xa2\xc2\xb8\x73\x3c\x1f\x77\xa8\x71\x2b\x7d\x87\x73\x88\x70\x76\xda\x4a\xb4\xdd\xa2\xee\xa4\xb5\xca\x6d\xf0\xbb\xa5\x02\xe5\xfc\x0f\x3c\x9e\x03\x61\xcd\x21\x65\xb4\xae\xd3\x4a\x7c\xa5\x0d\xd8\xb6\xd7\x24\xc4\x14\x9f\x58\x7e\xdc\x33\xa5\xbf\x37\xd6\xe7\x03\x1d\xba\xb1\x45\x82\xc6\x05\x28\xe5\x12\xbe\x2f\xd2\x9c\x66\x50\x7f\xa7\x2c\x30\x95\x36\xb8\xe6\xf9\x60\xf1\xf3\x14\x8f\xfa\x23\xd5\xc2\x8b\x43\x95\xb7\xbe\xbf\x33\xba\xc7\x32\xad\xd5\xb3\xd4\x68\x04\xd9\x72\x8b\xe3\x74\xda\x11\x84\xd7\x69\x68\xfa\x2e\x75\xc0\xf3\xbe\x2d\x8f\x30\xd4\x62\x89\x28\x9c\x53\x56\xd0\xc8\x39\xe5\xc5\xe7\xc7\x40\x2a\x46\xca\x08\x29\x27\x49\xf8\x4f\xcc\xc2\x8c\x10\xce\x09\x0b\xef\x10\x36\x64\x1f\x4c\x45\x2c\x22\xa4\xe0\xd9\xaa\xc5\x03\xca\xc5\x80\x94\x34\x37\x5a\x1c\x64\xc7\xf2\x43\x6d\x90\x41\x1b\x3c\xf7\xa0\x09\xfe\xe1\x72\x0b\xc5\xb1\x36\x5a\x0c\xfb\xf2\x52\x1b\x86\x12\xc4\x73\x74\x00\x55\x0a\x8c\x20\xd4\x2c\x79\x49\xd3\xfc\x5c\x9e\x89\x52\x66\x33\xe8\x00\x64\x91\x3d\xe8\x64\xd2\xd3\x56\xfb\x12\x25\x61\x83\xc2\x21\x1e\xb0\xb8\x28\xd9\x31\x17\x33\xaf\xe3\xc4\xe1\xb1\xae\xd0\x31\x95\xb8\x91\x63\xab\x01\x90\x81\xc6\xe1\xe0\x8b\x2d\x1a\x05\xb1\x4a\x57\x0e\xb5\x6d\x2b\xf4\xf8\x3b\x2a\xe6\x4c\x41\x79\x52\xad\x9c\x37\x38\xa5\x6d\x2e\x75\xd0\xec\xf3\xaa\x06\x3a\x01\x03\x27\x42\x22\xb3\x6b\x51\x30\x1d\xeb\xdc\xe8\x0c\xbb\xaf\x3c\xea\x80\xea\xb0\xc8\x26\x3e\xa4\x95\x3c\x73\xbe\x17\x27\x10\x8b\x48\xe7\xb9\xb0\xfb\xd4\x34\xbd\x32\xd9\x59\xa2\x18\x4b\x3f\x39\xfc\x4c\x73\x1f\xa8\xf3\xa8\x9a\x14\x07\xbc\x5a\x2b\xd3\xfc\x58\x9c\x12\xf0\xa6\x07\xb9\x97\x5f\xfa\x93\xc1\xe9\xd6\x57\xc0\xc5\x80\x60\xa0\xc3\x77\x91\x05\x5c\x97\xb2\xa8\xcb\x32\x78\x97\xd7\x4d\x62\x0a\x70\x20\xd8\x44\x60\x3d\xd5\x59\x29\xd2\xd3\x3d\x7b\x53\xd3\xfd\x81\x88\x19\x55\xaf\x9c\x5c\xcd\x8b\x47\x4b\x1c\xe7\x42\x21\xa5\x47\x04\x70\x18\x0d\x71\x01\xe8\x09\x9e\xa1\x5b\x72\xd2\x1c\x45\x39\x32\x19\x9f\x2f\x03\x9f\xbf\x29\xe2\x49\x7c\xff\x00\x0d\x24\x32\xbc\x49\xf7\x47\x18\xbb\x33\x5b\xe1\x21\x75\x71\x6e\x9d\x7a\x0e\x2f\xeb\x4b\xc0\xf1\x40\x31\x35\xcd\x99\x4c\xee\x5a\x81\xc0\x46\x23\x1a\xa6\xc5\x23\x12\xe6\x3f\x69\xf8\x7c\x5c\x1f\x6a\xfa\x8c\x56\x12\x6d\x4f\xcd\xd2\x7f\xd2\x89\x0f\xcc\xf2\xef\xf5\x66\xaa\xa8\x25\x21\xec\xbc\xb7\x60\x60\x03\x2e\x6f\x68\x56\xff\x9d\xbd\xe7\x67\x51\x00\xc7\x06\x38\x96\x0a\xf9\x5e\xcf\xba\x03\x6c\x4b\xf3\x84\x45\x6f\x8b\x23\xc4\x1d\xe1\x5f\xea\x32\x93\xa5\x22\x56\xd3\x34\xe3\x4f\xb0\x18\xdf\x6f\x69\x05\x85\xf6\xac\xa6\x32\xcb\x81\x26\xec\x17\xf5\xf0\x4f\xfe\x00\x4a\x6b\x32\xf5\x21\x65\x8f\xfc\xd7\x08\xb7\xb4\x34\x64\x7b\xe5\x17\x7c\x3b\xce\x96\xf8\x4e\x64\xba\x63\xef\xd5\x17\x19\xc1\xa9\x7b\x12\x1d\xca\x52\x96\xd7\xbf\xf4\x8f\xd0\x4c\x11\xc7\x15\x13\x5f\xc5\x23\x7c\x95\xf2\xe6\xaf\x22\xed\x05\x0e\x1c\xde\xb1\xb0\x64\x2c\xff\xa5\x7f\x84\x12\x02\x29\x68\xe3\xaf\x0b\x29\x26\x16\x2f\xdd\xf7\xc7\x6d\x3a\xc9\xd1\x29\xda\x74\x3d\x72\xc2\x0a\xf9\x4d\xb3\xa2\x2a\x52\x15\xf8\x2c\x76\xa5\x9d\xa4\xad\x26\xc2\xed\x1f\x1d\x6a\x77\x73\xd1\x95\xd7\xf5\xc7\xeb\x51\x5d\x2b\x33\x70\x57\xce\x73\x33\x70\x3f\x76\x3e\x31\x03\xf7\xb9\xb3\x74\x64\x41\x01\x05\x4a\xa0\xca\x01\x04\x75\x60\x01\xa2\x79\xbe\x03\x4b\xc7\x80\xe7\xe2\x81\x95\x06\x86\xc7\x8c\xd1\x07\xa6\x3e\x1f\x6b\x43\x4d\xa2\xcc\x2e\xdf\x44\x01\xf9\x22\x8b\xa8\x24\x38\xd3\x47\xa7\xcd\x88\xed\xa1\x3e\x39\x0d\xc8\x81\x00\x2b\x51\x89\x13\x48\xca\xf4\x6c\xa6\x95\xff\x46\xcc\xc6\xb8\x18\xa2\x24\x74\x8c\x57\xa7\x69\x22\xe3\xd8\x46\x1c\xd7\x76\x37\x57\xe0\xbe\xa6\x69\xe4\x14\x92\xb8\x63\xc0\x74\xe9\xef\xb4\x58\x1b\xcb\x32\xe0\xdd\xf9\x4c\x6b\x71\x10\x78\x4c\xa9\x2b\x29\xaf\x45\xb4\x53\x1a\x84\xef\x2d\x2e\xf2\x73\xb9\xd8\xa5\xec\x78\xc5\x0b\xc4\xf1\x25\xfb\x17\xd0\xa7\x3d\xc7\x99\xfc\x4b\x37\x29\x8a\x26\x8a\xf4\x99\xc2\xa5\x45\x47\x3c\xbd\x10\xed\x47\x3d\x57\xeb\x46\xdd\x0c\xcd\x0d\xdb\x98\x6b\x49\x4e\x9f\x84\x7b\xae\x08\x47\x1d\xeb\x0a\x08\x6a\xf2\x02\x45\xc6\xb9\x00\xff\x81\x14\x09\x9c\x19\xc7\xe0\x61\x9b\x5e\x72\x0e\x18\xcc\x04\x83\xdb\x5b\xba\xf7\xee\x55\xc1\xa9\x48\x80\x83\x4e\x8b\x30\x54\xdc\x70\x48\x1e\xa5\x37\xb5\x8b\x17\x0e\x03\x45\x66\x1c\x0a\xbd\x69\xa9\x0e\xf2\x1b\xe8\x7c\xb8\x33\x5a\x32\xda\x04\x65\x13\x16\x59\xc3\xf6\x01\x8b\x9a\x6d\xd9\xa4\xfb\xa4\x01\x9a\xb3\xc9\xd2\xfc\xae\xe1\x58\xb1\x39\xd0\x92\xee\x91\x75\x59\x4d\xe4\x5a\x38\xb0\x44\x9b\x9b\x17\x37\x49\x8a\x5f\xf2\x06\xc4\x25\x69\x73\x0b\x7a\x37\xcd\x2d\xaf\xed\x26\xc5\x9f\x53\x72\x23\xaf\xf6\x36\xd5\xb5\xe5\x3a\xde\x3b\xe2\x37\x64\x53\x5d\xab\x1b\x3f\x1b\xdd\xa4\xf8\x0b\x4a\x6e\xde\xd5\xe5\x91\x6d\x6e\x2c\xfb\x1a\xdd\xe0\x57\xfc\xc3\xa6\xba\xbe\x9d\x59\xae\xb3\xf1\xbe\x78\xf5\xf2\xed\xcb\x8d\xd7\x2c\x16\xa8\xe1\x1f\xfc\x8d\xcf\x9f\x5f\x6c\xaa\xeb\x67\xba\xf1\xc8\x97\x74\x40\x1a\x0a\x2f\x55\xfc\x98\x30\x10\x27\xb1\xcf\xdd\xa1\x05\xfa\x0d\xb5\x51\x97\x06\x72\x4b\xcb\x78\x01\xca\x1c\x06\xa6\xc8\x5b\xfa\x4d\x43\x1d\xcd\x53\xca\xeb\x61\xf0\x20\xd8\x4d\x96\x40\x88\x97\x42\xdf\xcd\x8d\x1b\x63\x2e\x69\x4c\xad\xa6\xbf\xf4\x0a\x68\xe4\x0b\x29\xd1\x95\x58\xb1\xb3\x51\x73\xd5\x7e\xf5\x56\xbe\xa3\x48\xe4\xb3\x16\xf4\x5a\xff\x4a\x27\x85\x18\x38\x15\x7e\x1d\x46\xf1\x31\x39\x7b\x32\x94\x55\x5b\xb1\xa6\x93\x8e\x70\x42\x84\x5e\x46\x80\x63\x84\x77\x24\xee\xe5\xd6\x52\x1e\xa9\xa4\x3c\x38\xe9\x2f\x85\xd6\xdd\xc6\xd8\x49\xd7\x87\x4b\x1c\x91\x9d\xc7\xfc\xb1\xd2\x87\x7e\x61\x16\x60\x86\x79\x1e\x2f\xf4\x51\xfb\xcb\xb0\x53\x5b\xf2\x8b\xd6\xa9\x94\xe8\xee\xac\xb6\x08\xff\x22\xfb\x08\xce\x0e\xba\xa9\xf8\x6a\x14\x2b\xe4\x42\x00\xc5\xde\x85\x79\x68\x9a\xbb\xd1\xf1\x14\x28\x38\x25\x5d\xe4\x4b\x47\x16\x10\xda\x6a\x86\xba\x97\x86\x77\x90\x9f\xe9\x37\xce\x64\xe8\xf6\x5c\x53\xdd\xfb\x9b\xa6\x12\x1a\x10\x75\x0d\xe9\xf9\x38\x10\xdb\x96\x2f\x5c\xaf\xa7\xb2\xc4\xfb\xde\x1e\x35\x27\xfb\xc5\x0a\xdf\x83\x4f\x63\x5c\x0d\xdd\xa0\xdc\xc3\x2d\x43\xd5\x34\xfb\x17\xab\x09\xe3\xba\x7b\xd3\x9c\xe9\x17\xf4\xa6\xf9\xb9\x1c\xf1\xbd\xe6\xa9\x7d\x88\x66\x3a\x5f\x23\xd4\x66\xf7\x16\x43\xeb\xca\x34\x2d\x08\xa2\x7b\xaf\x89\xd6\x18\x8e\x41\x7f\xc4\x42\x08\xe1\xbf\x51\x2b\x56\x07\x05\x74\x68\x0f\x27\xd9\x3d\xb5\x38\x76\x5c\xfa\x23\xdd\x8b\xd9\x0a\x83\x6d\x6b\x4c\x98\xbe\x21\x57\x10\x4f\xbe\xd7\x29\xd1\xcd\x75\x62\x84\xe3\xa6\x89\xa4\xc0\x6b\x2b\x3d\xc5\xe5\xd4\x62\x9a\xc2\xc6\x6b\x00\x16\x15\x5a\x6e\x9d\xdd\xee\xd7\xd9\x7c\x8e\x76\x84\x61\xbe\x5f\x73\x88\xd0\x52\x0a\xd5\x01\x6b\x87\x67\x4b\x08\x11\x94\x72\x4e\x48\xe8\xb4\x6c\x71\x4e\xad\x5d\x5f\x23\x42\x38\x54\xa6\xb2\x99\x8f\x77\x38\x43\x22\xfe\x1a\x04\x7c\x21\x5b\x6f\xdb\x7b\xd6\x1d\x8d\x51\x74\x70\x8b\xff\x42\x11\x5f\xcf\x75\x76\x9b\xca\xce\x6c\x79\x55\x4a\x79\x64\xa7\x2b\x8f\xcc\xba\xad\xb8\x1b\x2a\x3e\x0d\x74\xa3\xef\xf0\x0e\x42\x3e\xd9\x55\x19\xba\xa5\xfd\x2b\x7b\xa0\xd9\x8f\x65\xc6\xf3\xa8\x67\x91\x88\x9c\x03\xaf\xbf\x57\xaa\xe9\x6c\xd9\x5e\x51\x08\x0d\x70\xd7\x8b\x54\x35\x94\xf2\xf7\xf3\xe8\x65\x98\x91\x60\xe0\xd4\x1b\x39\x14\xc7\x64\xb9\x96\xa1\x6d\x22\xa9\x71\x19\xcf\xe7\x28\x6c\x9a\xd5\x4c\xf7\xa6\x0d\xc4\x4c\xc6\x68\x0e\xfb\x3b\x17\xc1\xc2\xa2\x41\x54\x58\x2b\x1c\x8c\x30\x1a\x6b\xe7\x22\xd0\xc4\xe1\x45\xb5\xb5\x19\xd4\x31\xd0\xaa\xd5\x82\x35\x51\xdd\x7e\x4e\x57\x77\x9a\x0e\x55\xab\x26\xe8\x37\x8a\x8d\xdb\x67\xab\x17\xb7\x37\xcf\x9e\xbf\x30\x84\xef\xd8\x33\xfa\xa7\x23\x67\xa4\xe0\x98\x0e\x95\x52\x00\x6f\x7d\x40\xa3\x5d\xf8\xdd\x19\x2b\xb7\x88\xd9\xd3\x3d\xe1\xaf\x86\x1f\x84\x89\xa8\x88\x12\x60\x51\x24\x84\xcb\xa0\xca\xb4\xe5\x3b\x2a\xe7\x28\x54\xba\xb3\x8d\xc7\xf1\xe6\xbe\xa2\x56\xec\x45\x3e\x4e\xbc\xc8\x87\xf6\x03\xf0\x42\x8b\x84\x10\x39\x6e\x1a\x51\x1a\x74\xd2\x44\x85\x17\x2a\xfa\xeb\xa0\x22\xb8\x9e\x82\x93\x68\xdb\xfb\xb0\x80\x1e\x69\x9b\x33\xd1\xe2\xfc\x14\xd4\x4a\xf0\x2c\x35\x4d\x50\x81\xd4\x96\x74\x2b\x4c\x42\xf2\x33\xc3\x2e\x3d\xc6\x58\x04\x4a\xd6\x03\xfa\x1c\x80\xb1\xe3\x3b\xac\x10\xf4\x9e\x05\x3c\xa6\xb1\xf5\x23\xd8\xec\x83\x03\xea\xd0\xfb\xb9\x37\x8e\x85\x4f\xea\x88\xeb\xad\xaa\xd5\x17\xe6\x45\xbe\x3b\x22\xb9\x20\x24\xd8\xf0\xde\x0e\xf4\x63\xd4\xbd\xdd\x5a\x6f\x40\xd2\x76\x6d\xe8\xfd\xa2\xd9\x53\x5b\xfa\xab\x22\xff\xce\xa9\xf3\x08\x82\xa7\x4c\xfa\x48\xed\x88\x6d\xcd\x14\xee\x83\x19\xd1\x44\x0c\x60\x99\x67\x6c\x08\x48\xcf\x0d\xb7\xa9\x2b\x02\x55\x08\xfd\x63\xa9\xb4\x0c\xd1\x91\xd1\x19\x85\xba\x92\x97\x1c\x63\xf8\x1d\x7d\xfb\x6c\xfc\xa9\x69\x84\xd4\x4e\x57\x01\xa4\xbd\x35\x20\x3d\xb3\x8d\x42\x2d\x16\x8a\x68\x13\x7a\xda\x7f\xa3\x23\x46\x68\x30\x3c\x49\x16\x8d\x9a\x5f\x4d\x7c\xfb\x6c\xfc\x49\xd1\x6f\x5f\x76\x13\xbb\x1e\xea\xc3\x51\x21\x39\x3f\x94\xec\xff\x13\x5d\x4b\xf3\x8a\x95\xf5\xe7\x20\x16\xe6\x38\x6b\xe0\x12\x96\x77\x54\x48\x8c\xff\xe3\x7e\x42\xcb\x3a\x02\x1f\x7d\x18\x37\x2c\xb4\xe6\xf9\x92\xc5\xf5\x50\x5e\xfe\xff\xac\xb9\x41\x70\x11\xde\xf4\x99\xa3\xca\xce\x8f\x38\xc4\x2f\x90\x27\x99\x10\x87\x79\x81\x8f\x20\x9a\xc1\x38\x2a\x89\x35\x3a\xca\x28\x84\x22\xe0\x6c\xf6\x40\x75\x75\xc8\x0d\x4e\x1c\x1e\x9a\xa9\x94\x94\xae\x98\x26\x95\xbe\x3e\x08\x09\x5c\xea\x04\x62\x14\x9c\x88\x98\xb4\x09\x02\x1a\x66\x60\x36\xcb\x8f\xb7\x3f\xb8\xc3\x05\xa8\x48\x9b\x42\xd0\x06\x11\x34\xbb\x66\x20\x37\x34\xa1\x3e\x8b\xb4\xaf\x38\x96\x5e\x7f\x73\xd2\x8b\x05\x35\xcd\xd9\x4b\xda\xfb\xd4\x9d\xed\xa9\xd7\x29\x31\xd3\x0f\x29\x31\xfb\xe8\x44\xc9\x58\x45\x99\x22\xf0\x54\x09\xd7\xa9\x8a\xa7\x90\xe3\x08\xc5\x38\x56\xa3\xc8\x64\xe3\x05\x0b\xc4\x82\xe9\x7a\xa7\x7c\xa7\x90\xa5\x66\x95\xdf\x06\x12\xc2\x14\x92\x13\x3b\x1c\x6e\xb7\x2e\x63\x23\x49\x3a\x0c\xfd\x88\x2a\x19\xab\xd7\x59\x13\x7d\x00\xd2\x3b\xde\x65\x04\xde\xeb\x5e\xdf\x59\x6e\xed\xdb\xe5\xf9\xc8\x60\x8f\x49\xdb\x42\xd9\x17\xa9\x4e\x2e\x43\x79\xb4\x98\x2a\x8b\x67\x21\x6a\x85\x51\xbd\x2d\x1c\x43\x3c\x19\x0a\x6d\xf1\x4f\xf2\xd1\xc0\xfa\xd6\x72\x0c\x81\x2f\xd4\xd7\x97\xb0\x9b\x0d\xd8\xd4\x86\x9a\x80\x97\x59\xe6\x18\xda\x64\x4c\x88\xd6\x46\x0e\xa0\xe9\xc0\xa0\x49\x44\x29\x02\x4f\x36\x71\x17\x03\x6d\xb1\xc2\x09\x59\xae\x93\x5b\x12\xaf\x13\x4e\x64\x42\xa0\xc5\x58\x33\x80\x15\x1b\x82\x93\x5d\xa5\xc5\xbc\xc4\x47\x5e\xe0\x5b\x21\xc2\x4a\x85\x30\xc2\xa1\xf0\xfd\x3e\xd8\x9b\x9a\x4b\xe7\xa8\x77\xe9\x4c\xc9\xcd\xbb\x3d\x2d\x93\x34\xbf\xc1\xdf\x8c\x6d\x27\x95\xbd\xa4\x3b\x3b\x3c\x21\x61\x34\x39\x57\x36\x93\xdf\x52\x72\xbe\x9a\xa3\x88\x15\x1d\x83\x98\xb2\x47\x2d\x42\x4d\x68\x17\x07\x96\xb3\x12\xe4\x42\x14\x89\xde\x7e\x51\xec\x0f\xc7\x9a\x45\x6f\xc0\xac\x2e\x40\xed\x5a\x57\x26\xef\x28\xf6\x40\x5c\x3c\xa6\xe8\x94\x4a\x43\xa8\xb0\xaa\xde\xb2\xa7\x9a\x18\x41\xf1\xb4\xa8\xd2\xdf\xd2\x3c\x71\x82\xa2\x8c\x58\xb9\x08\x8a\xa7\xf5\x41\x86\x59\x73\x54\xac\xbb\xb5\xb4\x9c\x72\xc0\xfa\x6c\x2d\x06\xef\xd0\x63\x5d\xac\x45\x31\x67\x75\x78\x5a\x1f\x68\x14\xf1\x9a\xf8\x73\x5d\x1c\x9c\xd5\x7f\xad\x21\x26\x9a\xf3\xe9\xf2\xbf\x0c\x9c\xea\xba\xdc\x06\x2e\x87\x8a\xec\x5b\xe9\x85\x5a\x48\x4d\x86\x03\x4b\xd1\x3a\x24\xc6\xea\xbf\x0c\xa1\x08\x5a\x1c\x70\x42\x8c\xe7\x07\x61\x57\x6e\x8b\xce\x7c\xcd\xe2\x1a\x33\x62\x7c\xa2\x3e\x43\xcb\x58\x0d\x58\x64\x02\x43\x55\x62\x40\x77\x62\x2d\xaf\x96\xca\xfb\xa5\xf3\x0a\x5b\x4e\xa7\x83\x0b\xce\x76\x10\x20\xfe\x5c\xbf\x1e\x34\xef\x71\x7a\x21\x61\x9d\x76\x46\x76\xaa\x4f\x01\x0d\xef\x92\xb2\x38\xe6\xd1\x17\x59\x7a\x20\x86\xf4\x97\xce\x57\x80\xcf\xd6\x50\x83\x7d\xba\x88\x81\x0b\xd8\xe5\x25\xb0\x08\x30\x5b\xc3\x7a\x08\x21\xd3\xcd\xe1\xed\x39\x28\xc0\x42\x2e\xe5\x9a\xfd\xf7\xe1\x69\x2d\x02\xdb\x39\x4b\x58\xcd\xe5\x3a\x63\x71\xed\x2c\x3e\xfb\xec\xb3\xcf\xb4\xc5\x5e\x4a\x68\x58\xc0\x8a\x1f\x34\xe0\xa1\x01\xf8\xf9\x64\x86\xd8\x64\xdd\x52\xa7\x9a\x57\x85\x02\x9f\x0e\xe9\x13\xcb\x54\x60\xbf\x89\xb3\x3f\xb0\x20\xf6\x60\x50\x3c\xbd\x01\x40\xfd\x81\x65\xe9\x05\x57\xd1\x3c\x2b\x6b\x31\xd4\xf8\x4d\xbf\xa6\x17\x72\xc6\x1c\x29\x8b\xba\xbe\xe9\x60\xe8\x42\xde\xa4\x6d\x11\x6a\x2d\x2d\x68\xc3\x77\xf4\x12\x97\x07\xf3\xda\xdb\x6b\x85\x4d\xf3\x2d\xb0\x4d\x21\xe8\xa0\xc3\xc6\x55\x3a\x18\x20\x05\xb2\x02\xd4\x34\xa1\x17\xf8\xd8\xe0\x10\x9e\x0c\xa4\xfb\xe7\x7c\x21\x68\x55\x6b\xa6\xb4\x08\xe1\x59\x61\x8f\xc7\x6c\x21\xd3\xfc\x46\x9e\xac\x09\x32\xcd\xaf\xa9\x8a\xbe\x05\x5a\xfd\x5b\xb9\x3f\x18\xd9\xda\xfb\x34\xff\x19\x5e\x62\xfe\x42\x9f\xc4\x4b\xff\x5d\xfb\xaa\xca\x91\x04\xf3\x91\x3c\xca\x9c\xe2\x5b\xa4\x97\x61\x58\x2b\x15\x23\x4d\xc9\x2b\x71\x93\xb9\x61\x38\x5a\x10\xf1\xef\x07\x72\xda\xd3\xc0\x27\x55\x4f\x09\x59\x32\xa2\x8d\x94\x34\xd6\xd2\x13\x94\xd3\xc5\xed\x20\x01\xba\xa0\xbd\x2f\xf6\xee\x3f\xc0\x26\x23\x2f\x72\xd6\x80\x14\xd8\x72\x67\x8b\xd0\x63\xd4\x47\xf6\x1c\xdd\xe0\x1f\x78\xf2\x62\x71\x83\xdf\x50\x72\xea\x80\xd8\xe8\xa1\xf8\x21\xad\xd2\x20\xcd\xd2\xfa\xbd\x63\x6c\xd3\x28\x62\xb9\x81\x15\x5e\x94\x66\xb9\x2d\x7e\x4b\xc9\x29\x63\x75\xcd\xca\x37\x07\x1a\xf2\x0d\x62\x2c\x0d\x1c\x17\x79\xfd\xb3\xd8\x4c\xc6\x27\xcb\xa5\xd1\xe2\x1f\x29\xf1\x8c\x9f\x59\x70\x97\xd6\x06\x36\xbe\x29\x7e\x33\xb0\xb1\xaf\x0c\x1f\xff\x44\x2f\x60\x10\x09\x57\xdd\xac\xfd\x4c\x25\x4b\x40\x39\x57\xfa\x13\xed\xed\x1e\x94\xf7\xa6\x25\xa7\x99\x7e\x3c\x1c\x14\xcd\x34\x57\xf1\x11\x56\x08\x87\xe4\x47\x7a\xee\xac\x06\xfc\xfb\xfe\x48\xbd\xd0\x9f\x07\x78\x5c\x6f\xbf\x60\xbf\x50\x5d\xfd\x33\xac\xe0\x3a\xba\xf2\xe8\xc8\x9b\xda\x20\x89\xfc\x2c\x9c\x0f\x51\x84\x83\xbe\xa6\x7f\xd2\x51\x5c\x36\xa5\xcb\xdc\x07\x06\x73\x21\xb4\xf2\x9e\x3e\x59\x4b\x1c\x79\xcf\xfd\x85\x15\x36\xcd\x12\xa1\xb9\x15\x81\x81\x3b\x58\xb3\x3b\x5a\x9d\xff\xa2\x13\x7a\xd9\x64\x29\xad\x84\x43\x42\x88\x15\xb9\x12\xdd\x19\x8e\x42\x97\x06\x72\x3f\x71\x0c\x11\xc3\x13\x9c\x67\xac\x9c\xe5\x3a\xbe\xfd\x64\x1d\xcf\xc9\x73\x64\x08\x24\xa7\xac\xa0\x93\x79\xe7\x65\x20\x9c\x87\x60\x44\x3d\x5b\x42\x18\x89\xc8\xb5\xba\x1a\x55\xe6\x45\xef\x92\x40\x62\x4d\x63\x58\x48\xd5\x3e\x3b\x2f\x20\xbb\x29\xf2\xcf\x8d\x9f\x45\x8c\x51\x51\x0e\x39\x7a\x47\x26\xeb\xee\xbf\xce\xce\x7a\xfe\xe1\xba\x27\x82\xfd\x3f\x1b\xa3\x3c\x22\x10\x5b\x4c\x24\x32\x64\x88\x9f\xcc\x3d\x21\x31\xf0\xc6\xd0\x21\x70\x03\xcf\x56\xb8\xbf\xd5\xf8\xa6\xb3\x76\x74\x63\xc7\x8a\x48\x62\x9a\x56\x61\x9f\xa1\x7b\x0b\x35\x4d\xdc\xbb\x85\xe0\x2c\x18\x36\x38\x05\x62\x48\xf3\x9e\x98\x50\xcf\x10\x17\xe7\xc6\x3c\x38\x07\xfe\xa0\x03\x7e\x9f\x77\x19\x1c\x81\xbd\xce\x0a\x5a\x83\xb5\xe4\x12\xc7\x73\x05\x38\x1c\xc3\x4e\x01\x08\x00\xd4\x1c\xe0\x4d\x13\x1b\x86\x55\x25\xfc\xd0\x9c\x0a\xbe\xe3\xeb\xf7\xce\xe9\xdc\xab\x1e\xc8\xd2\x24\xc1\x07\x93\x65\xc8\xcc\x1d\x2f\x08\xe6\xf1\xa1\x6b\xac\x0c\x27\x04\xbd\xa9\xce\x35\x83\x73\xa2\x79\xba\x07\x5d\x88\xaf\x6a\x56\xc2\x03\x68\x9d\x0a\x75\xbd\xec\xb8\xef\x5f\xe3\x34\xcb\xbe\x93\xdd\xe0\xaf\x19\x7b\xfa\x4b\x59\x3c\xaa\xe7\x37\xdb\x32\xcd\xef\xe0\xad\xc7\x48\xb3\x25\xce\xd2\x9c\xfd\xb5\x7b\x2b\xfa\x0a\x04\x4d\x00\x0f\x87\x2d\x15\xda\x0b\x8f\x69\x54\x3c\xc2\xd3\x6f\x5f\x41\x34\x28\xfe\x54\x14\x7b\x50\xe1\x53\x3b\xde\x39\x19\x31\x9f\x5c\x3e\x7f\x55\x05\xf3\x6c\xb4\x18\x96\x6e\xe2\x9a\x58\xdc\xf7\x7e\x3c\x92\x6c\xfe\xf7\xe8\x5d\x2e\xbd\xe6\xb8\x03\x6f\xc7\xde\xf6\x52\xf2\x43\x77\xd2\x81\x56\xee\xe0\x34\x4e\x01\x2f\xfd\x42\xad\x2d\x12\x3e\x86\xd5\xe2\x81\xfb\x0b\xed\x75\xab\x19\x36\x87\x6e\x62\x9a\x46\xc2\x6a\x23\xcd\xaf\x12\x4d\x7f\xc1\x62\x24\x91\x1e\x85\x66\x2b\x1c\x21\xe4\x32\x67\xe7\x05\xbe\x63\xc5\x9d\x1d\x9b\xe6\xec\x0d\xa0\x94\x75\x28\x2e\x44\xc8\x34\x45\x10\x3d\x2b\x24\x71\xb7\x83\x62\x62\xe4\xb0\xee\x06\xc2\x82\xf9\xe7\xe4\xbf\xc4\x25\x2a\x49\xd6\x16\xce\xc1\x7b\xfd\xb9\xa3\x8f\x6d\xef\xe8\x03\x9d\xd3\x88\x4d\x63\xc8\xfb\x29\x50\xc3\xe8\x5d\x91\xf5\x34\xa2\xc1\xe9\x0c\x3e\x18\x62\xa4\xf9\x96\x95\x29\xc8\x68\x4d\xd3\xa8\x46\xf3\x40\x40\x9e\x9a\x48\x4f\x75\x7c\x35\xc1\x2a\xcc\xdd\xf1\x2f\x9d\xae\x29\x84\x78\x80\xea\xc2\x8e\x16\x40\x02\xc8\x27\xc0\xe1\x0f\xae\xef\xff\x64\x55\x87\x4b\xa9\xaf\xe0\x12\xf7\x5d\xeb\xdc\x6d\x48\xc4\x16\x21\x70\xc4\x51\xee\x69\x26\x5d\x71\x80\x4f\xc7\xb7\x14\x32\xbd\xa5\x02\x1b\xc1\x0e\x6e\x9a\xd0\xb5\x06\xe8\x85\xc9\xcb\xfb\x65\xd3\xa4\xd5\xeb\x34\x4f\x6b\x06\xc8\xae\x69\x96\x8e\x70\x28\xde\x31\xdd\x9e\x21\x48\x6e\x03\xcb\x93\xc8\x3f\x63\x8e\xb5\xb1\x91\x31\xa2\x51\x7b\x49\x79\x56\x9b\xfd\x43\xce\xd5\xb9\x4b\x1c\x7e\x12\x03\xa7\x05\xfa\x47\x3f\xb0\xb0\xae\xba\x50\x6a\x7c\xa7\x25\xac\xfe\x9c\xc3\x41\x9a\x27\x7d\x16\x0b\x09\x52\xcf\x7d\xa6\xa6\xc5\x61\xfc\xe9\x8d\x16\xa8\xb5\x23\xd8\xba\x3c\x2d\x3a\x73\x76\x3a\xf0\x7c\x13\x99\xe6\xb7\xf2\x5e\x21\x32\x4d\x89\x84\x23\xfc\xc7\xce\x91\x18\x69\x76\xa7\xc9\xf9\x06\x93\x2e\x6d\x2c\xd6\xd3\x0a\xa0\xbd\xaa\x0e\x11\x30\x92\xd4\x1c\xdc\x60\xa0\x48\x42\x9c\x28\x91\xbb\x9a\x6f\x8d\xcf\x24\xdf\x53\xab\xb0\xcf\x79\x07\x3c\x81\xf3\x45\xcf\x2c\x0d\x1c\x04\xfa\xef\x6b\xeb\xd7\x62\x72\xc2\x39\xd3\xb5\x80\x59\x3e\xf5\x65\x9c\x65\x3b\x31\xe3\x1f\xae\xa4\x45\xe2\xec\xd2\x44\x3c\x92\x8f\x37\x0c\xac\xb8\x39\xc3\xc0\x92\x0f\x94\xe4\xc0\xb9\x74\xa6\x03\x40\x3a\xe7\x20\x28\x6e\x2a\xfa\xc5\xd5\xef\x05\xe1\x72\xe8\xd4\x72\x94\x76\x6e\xe0\x1b\x2a\x83\xb4\x2b\x03\x39\x5e\xe8\xaf\xa3\xdb\x4f\xe0\xf2\x88\x79\x94\x93\x24\x91\xcf\xeb\x8f\xbd\xc8\x6f\x9a\xd8\x8b\x16\xcf\xe1\x77\xa9\x39\x54\x6a\xf1\xd7\x9d\xec\x50\xe1\xbf\xbe\x6b\x1c\xff\x90\x7f\x82\x93\xee\xc1\xc5\xc9\x18\xe5\x5c\x16\x84\x8e\xd8\x3b\x3e\x12\x4e\x47\xa6\xb1\x35\xf6\x05\x2b\xc6\x1c\x49\x7a\x88\xf5\xa6\x7a\xc9\x2d\x03\x81\x54\xec\x05\x5e\xe2\xfb\x3d\xa4\x79\x89\x2f\x0e\x0d\x35\x9c\xb8\x1d\xbb\x57\x0d\x5d\xdd\x61\x52\x88\x9c\x1e\x4c\x5b\xd0\xa4\x3a\xf7\x80\x38\x8c\x2b\xf8\xeb\x80\x10\x56\xca\x7c\xec\xf1\xea\x57\xda\x2b\xdc\x42\x8c\x59\x2d\x5f\x5b\xda\x6f\x1f\x19\xcb\xc9\xaf\x14\xeb\xf9\x86\x8a\xb9\xbf\x52\xcc\xcb\x4d\x58\x42\xe2\x58\xf9\x6b\xce\xd8\x5e\xe9\x55\x1f\xca\xe2\x40\x42\xa5\xb4\x54\xa5\x79\x42\xe0\xa2\x52\x3c\xf7\x4e\x28\x84\xde\x14\x38\x47\xa9\x48\xa0\xd4\x41\x69\x59\xab\xcb\x8c\x47\xa2\x74\x9d\x95\xb6\x28\xcb\x23\x12\x89\x47\xf0\xf9\x14\x8f\x4e\xc2\xb0\x3f\x09\x5b\x1c\x1e\xcb\x73\xe1\xab\x18\xe5\x41\x40\x4e\xd7\xdd\x0e\xce\xa8\x44\x87\xc2\x9b\x8a\xbc\xe4\xd2\xcb\x74\xdd\xef\xd3\x5b\x5c\x1e\x27\x42\x21\xe0\xf0\xf7\x1a\xd3\x27\xc0\x8e\x8e\x82\xd8\x93\x71\x25\x8b\x8a\x80\xb5\x1d\xcc\x99\xa7\xcd\xa5\xaf\xee\x34\xc6\x05\xaf\x29\x5e\xe2\xd5\x74\x9a\xbc\xa8\x13\xb5\xaa\x3b\x91\xe2\x91\x58\x6a\x56\x17\xfd\xec\xa3\xeb\x60\xde\xbf\x0d\xeb\xab\x6a\x76\x90\xf2\x70\xfd\x53\xaf\x0a\x22\xcc\xbc\x54\xfd\x2a\x9a\xab\x69\x82\x13\x09\x37\xec\xfc\x7b\x5e\x9a\xd4\x2e\x5d\xf8\xda\x6f\xf1\x19\xf4\x6a\x40\xaa\xa7\x61\xbd\x3e\x72\xea\x6c\x25\x46\xa7\xa6\x5c\x9b\xce\x5b\x00\x10\x9d\xbc\xcf\xda\x75\x9a\x52\x71\xe5\x9f\x3d\x51\xab\xaf\x1c\x1b\xc8\xcc\xe2\x40\x91\x69\xee\x30\xab\xa3\x98\x61\x4b\x7c\xc7\xe2\x3b\x28\x59\x04\xa6\x29\xd8\x18\x4e\x89\xb9\x81\xb3\x3c\x3b\x2d\x41\x24\xfe\x04\xb3\xda\xd5\x7f\xf6\xc5\xa2\xc8\xb9\xd8\xf5\x51\x27\x75\xbe\x5c\x94\xf6\xc1\x93\x73\x8f\x3e\x27\x47\x01\xb4\xf8\xa3\xd3\x21\xa5\xc1\x50\x20\x6d\x0e\x61\xf0\xc0\xee\x63\x30\xf9\x76\x05\x8e\xfc\xdf\x16\x07\x32\xf1\x19\x8e\xd4\xd3\x78\xcc\xa3\x81\xf0\x4d\x08\x1f\x06\x7a\x21\x53\x1d\x54\xea\xe2\x80\x65\x4e\x9c\xb3\xa1\xd3\xda\x1c\x2d\xae\x1e\xf9\x99\x77\x9e\x66\x7f\xba\x00\xa1\x43\x58\x54\x16\xbd\x86\xc7\xef\xbf\x42\x37\xcf\x35\x83\x1b\x03\xca\x1a\x10\xa8\xfd\x89\x9c\x81\x24\xee\x16\x88\x9c\x84\x3f\x6c\x1a\xe0\x20\xc0\x61\x20\x7c\x89\x08\x07\x61\x4d\xb5\x2d\x1e\x9b\x6d\x1a\x31\xf4\xec\x06\x47\x01\xb9\xe9\xfd\x40\x3e\xd3\x7c\x85\xb0\xc0\x42\xa7\x20\x00\xc7\x76\x42\xf2\xd4\xc5\x4b\x2c\xd9\xfd\x91\x55\xf5\x4b\xc5\x21\xbe\x2e\x85\xe3\xa7\xc9\xef\x16\x0b\x90\x33\x88\xa6\xc0\x02\xd1\x53\x50\x74\x7e\xa0\x19\x12\xaf\x35\x98\x18\x69\xea\x69\x71\xa0\x93\x19\xd3\xb1\x37\xa8\x52\x56\x6d\x11\xa6\x1c\xe0\xc1\x1c\xa4\xaf\x23\x09\xce\x23\x2a\x9f\xa4\x88\x99\x0a\x2d\xc1\x40\xca\x5d\x04\x29\x40\x9e\x2f\x02\x14\x12\xa0\x05\x30\xf3\x94\x94\x64\x1e\xfa\x84\x79\x9a\xb8\xc3\x27\x54\x8b\x72\x6b\x31\x5b\x32\xb0\x84\x49\xc9\x24\x3f\x93\xfb\x7e\x6c\x83\x29\x05\x26\xeb\x2e\xb0\x6b\x7e\xf0\xb1\x52\xf0\x0d\x9e\x8f\xec\xb0\xc8\x43\x5a\x0f\x92\x8c\x6b\x03\xe4\x07\x4b\x9c\x74\xd7\x4f\x9d\xe3\x39\xf0\x0d\xce\xbc\xd8\x17\xe8\x2f\xe4\x87\x74\xa7\x48\x17\x69\x8e\xfa\x82\x49\xe1\x71\xa7\xdf\x27\xa9\xff\x14\x24\x69\x8a\x27\xe0\x6f\x78\x2f\x34\xc6\x73\x4e\x8c\x14\x8a\xbd\xc5\x87\x01\x9f\x1c\x01\x15\xd2\x39\x07\xc1\x46\xfc\xc4\x01\x0d\x7c\xff\x00\x80\x49\x11\xf2\xd0\xe1\x2f\x78\x2c\xc6\x02\x5d\x24\xf6\x31\x87\xc4\xc8\x34\xad\xfe\x85\x2c\xf1\x96\x24\x9a\x2b\x5e\xac\xbf\xe8\x1e\x53\xfb\x32\x4d\xb3\xb5\x38\x0d\xd6\x7f\x99\xcf\xf1\xde\x16\x31\xe7\x74\xf0\x99\xfa\xd6\x17\x5a\x2c\x70\xef\xc7\x18\xba\x2a\xe7\xbe\x69\x92\xa1\x6f\x60\xd4\x4a\x8f\x80\x32\x06\x02\x84\xbd\x0f\x38\x08\x85\x81\x72\x70\x01\xc4\xb9\x14\x24\x43\x92\xf0\x54\x29\x76\x25\x70\x78\xe0\x5c\xc6\x3a\xb8\x06\xdf\x9d\x86\x63\x88\x09\x14\x06\x7c\xf0\x3c\x23\x9c\x84\x99\xdd\x6b\xc6\x5f\xf7\x5e\xe4\xa3\xb0\xc8\xeb\x34\x3f\xb2\xf5\x81\xcc\x96\x6d\xee\x45\x3e\xb9\x37\xcd\x7b\x20\x63\x7b\x7a\x2e\x42\x6d\x1a\x5b\x29\x99\xf0\x9f\x8f\xd3\xa6\x39\xfb\x9c\x23\x74\xca\xc6\xae\xe8\x4d\xd3\x0a\xed\xe2\x81\x95\x71\x56\x3c\x12\xaf\xe8\x9e\x71\xff\xf8\x8b\xf6\xfc\x4f\x1f\xef\xa0\x33\x9d\x77\x47\xb1\xda\x3b\x50\x7c\xec\xa0\xa5\xe7\x11\xc1\xf2\x7e\xcc\x39\xf6\x1e\x29\xef\x78\x39\xf7\x8e\xec\x1c\x2b\xa5\xe0\xea\x0d\x82\x94\x9c\xfb\x73\xdd\x4d\x57\x24\x0a\x21\x84\xb0\x65\xa4\x39\xc7\xd4\x50\x6b\xd3\xc8\xb7\x85\x10\xaa\x8b\x96\xc4\x11\xbc\x9b\x74\xf4\x2a\x24\x4d\x9c\xb1\x4b\xc1\x7f\x03\x44\x6b\xd2\xc0\xa8\xe8\x3c\x3f\xee\x5a\xa4\x0d\xfa\x8e\x74\x29\x78\x47\xfa\x71\x71\x8a\xf1\x0e\x04\x27\x9d\x07\xcb\x41\x8f\x40\xef\x53\xcd\x2a\x88\x2b\xbb\x55\xe8\x6e\x06\xa6\xc0\x59\xcb\xd7\x97\xf7\x96\xbe\xbe\x5e\x7a\xca\x4a\x4f\xf9\xa7\x9e\xf2\xdc\x6f\x11\xc2\x29\x99\xad\x7a\x50\xcf\x11\x1f\xfd\xbd\xab\xba\x90\xe6\x57\xf7\xa6\x69\x1d\xc8\xbd\x3c\x34\x90\x73\xaf\x7b\x18\x57\x68\x01\x9f\xd4\x2d\x06\x9f\x9d\xd8\x34\x2d\x55\x80\xcc\x0e\x08\x1f\x4c\x53\x5b\xdd\xff\x1f\x71\xef\xda\xe4\xb6\xad\xac\x0b\x7f\xdf\xbf\x42\xc2\xeb\x97\x45\x58\x18\x8d\xc6\x49\x9d\xda\x9b\x0a\xc2\x72\x1c\x67\x25\x6b\xd9\x71\x56\xec\xac\xe4\x14\xcd\x9d\x22\x78\x91\xa8\x91\x44\x99\xd2\xdc\x32\xd4\x7f\x3f\x85\x6e\x5c\x49\x8e\x93\xbd\x77\x9d\x3a\x1f\xec\x11\x41\x12\x24\x71\x69\x74\x37\xba\x9f\x67\xd8\xb8\x87\xae\x53\x1d\xc9\x5c\x0c\x09\x23\x73\x9c\xf7\x73\xc6\x3f\x93\x33\x83\xe2\x47\xac\x45\x78\x88\xe5\x0c\x89\x16\xac\x60\x3b\xca\xe0\xf2\x4f\xf2\x63\xe4\xf4\xa9\x95\xda\x79\x80\xbd\x53\xa9\xef\xeb\x12\xf5\x97\x2f\xbc\x40\xef\x8d\x18\x89\x79\x87\xd7\xc8\x31\x3b\x03\xa4\xb4\xeb\x57\x02\x12\x32\x25\x07\x00\x72\xd0\xb7\xef\x2a\x8a\x31\xc6\xb2\x3f\xf0\x02\x2e\x0d\x51\xca\x72\x45\xda\x03\x50\xac\x15\x33\x1c\x1b\x79\xda\xf3\x43\x15\xe8\x78\x42\x5b\x19\x7c\x4f\xf4\xb1\x92\x92\x14\x0a\xc2\x8a\xda\x7b\x8b\xd4\xbe\x6a\x45\xf1\x8d\xbb\x2e\x54\x4f\xcd\x53\x06\xcc\xcf\xa5\x02\xfd\x10\x00\x02\x6b\xbf\xfd\x7a\xb8\xb0\xc0\x4a\x75\x0d\x18\x0a\x18\x26\xe3\x60\x5c\xbb\x38\xd8\x23\xe3\x55\x83\x8c\x80\x96\x75\xa6\x3e\x19\xb4\x94\xab\xda\xcf\x84\xa3\x50\xed\x3b\x89\xae\xab\x60\xd3\x96\x3b\x3b\x37\x1b\xec\x2a\xa9\x36\xcc\x36\xc6\x08\xb9\x40\x44\xfa\x4b\x5b\x02\x9e\x78\x7e\x75\x51\x48\x53\x9b\xd5\x7c\x83\x2b\xef\xd1\x1a\xd4\x35\x18\xd4\xba\x3c\x59\xa5\xf3\xf6\x66\x1f\x5a\x27\xd0\xda\x61\x0f\x0c\x33\x96\x6c\x58\xc5\x64\x87\x54\x5f\x5d\x05\x41\x1d\xe7\x91\x14\x12\xc3\x8b\xae\x98\xec\xd1\xb5\x07\xf2\x9d\x01\x7e\x01\x9b\x5e\x01\xa7\xf4\xda\x40\x6d\x23\xe2\x40\xc6\x0e\xe0\xe9\x76\xb3\x0f\x04\x65\xcd\xe1\xe4\x94\x4d\x17\xec\x51\x05\xa1\xbe\x06\x95\x34\x7a\x3c\x33\x54\x4e\xa3\x81\xfd\x7b\x66\x39\x65\x3a\xd5\x52\xf9\x4e\xeb\xf2\x18\x09\x53\xf8\x0e\x4d\xac\x28\x67\xa6\x39\x23\xd3\xe0\xba\x0d\xa3\xdc\x34\x27\xc3\x56\x8a\x92\x94\x29\x48\x45\x79\xec\x92\x87\x9a\xad\x37\x65\xf7\x87\x19\xdb\x48\x4b\x0e\x28\xd6\xd5\xcf\xb9\xf7\x05\xa0\x1d\xa9\x13\xf8\x01\xa6\xed\x4d\x6f\x29\x88\x07\x56\x60\xa6\xe9\x08\x1f\xef\x82\x15\x5c\xc4\xbd\xee\x8d\xc0\xbb\x52\xba\xf9\xee\x98\x3d\xc2\xa7\x0b\x13\xd1\x65\xba\x3e\xc7\xae\xbf\x72\x12\x64\xc2\xbf\xde\xb1\x4c\xa4\x94\x46\x6b\x97\x1f\x53\x17\x2b\x4b\x53\xae\x7f\x1b\x30\x2d\xf0\x2d\x36\x22\xbc\x1e\x6d\x11\xea\x2b\x80\xde\x64\x33\xca\xe0\x86\x65\x4c\xdf\x6e\x74\x42\x9f\xe9\x0b\x52\x5f\x29\xc4\x6e\xb9\xba\xd9\x06\x2d\x2d\xf5\x64\x38\x41\xe1\x52\xde\xca\xb7\xbb\x7f\x50\x77\x82\x9b\xba\xb0\xc4\x94\xbb\xec\x10\x5e\xb3\x35\x20\xd0\xf8\x7c\xa9\xea\x1b\xc0\xb6\x0f\x02\xf7\x50\xa3\x11\x6d\x28\xdb\x58\x5e\x5b\x75\x85\x3e\x56\xf4\xb6\xaa\x54\xfe\xd6\x2f\xa7\xa9\xdf\x14\xe1\xad\x2a\x95\xbf\x8d\x7c\x51\x65\x78\x64\xec\x8d\x5d\xd9\x86\x66\xd2\xd4\x4c\xcf\xb0\x6c\x5f\xef\xa2\x0d\x43\x2a\x01\xb7\x01\xe4\xca\xb1\x39\xb7\x73\x63\xe0\xd8\x24\xa0\x6b\xc1\x1e\xb5\xba\x1e\x3d\x92\xe7\x24\x4a\x46\x10\x5d\x94\x23\xc9\x4e\x8a\x10\x09\x10\x94\x37\x2e\x0b\x73\x65\xdf\x32\xbb\x27\x2d\xa7\x67\x7e\x4e\xcf\x4c\x55\xdf\xf7\x23\x7a\x54\xc1\x34\x56\x3c\x00\x68\x32\x44\x19\xb7\x84\xfe\x4b\x37\xa0\x6d\x01\xf9\x9f\xbd\x08\xfa\x9c\x83\xd5\xe3\x5a\x1e\x79\xca\xfd\x43\x04\x47\xf2\x8a\x0c\xc5\x86\xc0\x30\x63\x35\x08\xa3\xa4\x16\xa9\x3d\xee\x13\x86\xc4\xfe\xf2\xa0\xeb\xc8\x68\xe4\x9f\x80\x59\xad\xa3\x04\x8f\x87\xb2\x2c\x06\x78\x0e\x28\x4b\xb2\x20\x18\x61\x9e\x71\x05\x65\x46\xa3\x47\x3d\x58\xa2\xbc\xeb\xa6\x79\x10\x88\x1e\x61\x26\xd0\xfd\x5a\xa9\x96\x69\xb9\x89\x97\x6a\xf6\x29\x07\x76\x25\x08\x84\xc3\xcb\x5a\xdd\xcf\x9b\xaa\x8a\x0b\x23\x0c\xf9\x22\xd2\xfb\x67\x26\xd1\xd2\x9e\x05\xeb\x5a\x1f\xc8\x65\x17\x4d\x78\xf9\x95\x47\xb7\x12\xa7\x38\xb1\xc5\x69\x34\x7e\x89\x91\xed\x7a\x33\xaf\xc0\xf1\x1b\x04\xea\xc7\x14\x77\x86\x42\x75\xc8\xd1\x0c\x2b\xe6\xcd\xb6\xe0\x85\x99\x4f\xcc\xfe\xf4\x49\x59\x3c\xf1\xd1\x6c\x0b\x2a\x2b\x6e\xb6\x85\xf5\xbf\xc9\xca\xd4\x23\x7b\x1c\x1b\xaa\x9c\x9e\xa5\x98\xf6\x7c\xe4\x55\x56\x94\x1f\x9a\xa7\xf3\x79\x11\x6c\x05\x83\x6e\x8b\x8c\x82\x1e\x6e\x76\xb4\xd9\x42\x63\x92\x4b\x65\x4d\x2a\x16\x30\x47\xcb\xd0\x6c\x90\x8b\x33\xcb\x54\xda\xb0\x3a\xf7\xd4\x26\x20\xef\x1b\x42\x19\x72\xd5\x40\xe3\x22\xc1\x75\x8f\xa7\x06\x95\x90\x6b\x04\xd6\x61\xfe\x90\x63\x15\x5d\x86\xa5\x06\x90\x46\x88\x95\xaa\xde\xd7\xc7\x35\xa1\x72\xf0\x80\x14\x0d\xa7\x0b\x7a\xb6\x54\xab\x78\x9e\xaf\x58\xd9\x75\x95\xea\x23\xc0\xf0\xb0\x19\xb8\x2b\xe5\x4b\xc5\xa6\x55\x17\xb1\x15\xed\xaf\x7d\xde\x14\x19\x4b\xbb\x97\x97\x2f\xb5\x26\x88\x22\x5d\x84\xb9\x79\x9b\x51\xae\x11\xcc\x12\x76\xf9\x46\x44\x10\x64\x2a\xb3\xf8\x34\xca\x90\x32\x9e\x3c\x8c\x2f\x31\x95\xd2\xc8\xc6\x9c\x7b\xcc\x24\xd0\xf2\x20\xaa\x8f\x90\x09\x6a\xdc\xdd\xb8\x68\x23\xbc\x93\xfc\x1f\x5e\x3d\x08\x0a\xc0\x8e\xa2\x96\x95\x10\x92\x40\x57\x23\x17\x1a\xc3\xdc\xde\x84\xcb\xbe\xc9\x2c\x2a\x2f\x2e\x96\xb4\x92\xb7\x48\xa9\xac\xd2\x43\x8c\x63\x36\x08\xe0\x94\x9e\x50\x52\x61\x86\x02\x39\xba\xb0\x53\x73\xca\x04\x9f\x5e\xb1\x4a\x83\xfb\x95\xec\x8a\xd2\xe5\x54\x04\x41\x2e\xc5\xce\x08\xf9\x0c\xf6\xfc\xa8\xeb\x50\x35\x6f\x68\xa8\x5d\x3e\xd7\xa6\x2c\x77\xdb\x4a\xea\xbb\x89\x6e\x57\x92\x02\x97\x85\xdf\xcc\xa9\xdf\xce\x45\x5c\x58\xed\x08\x4c\x03\x3d\x26\x81\x29\xd4\x63\xf0\x91\x9d\x0b\x58\xd7\xd8\xae\xf8\xd7\xc9\xca\x94\x16\x9c\xb0\x8d\x2a\xb0\x51\x05\x36\xaa\x4a\x19\x91\x6d\x29\x52\x33\xd2\xe5\x18\x83\x02\xdb\x96\xb2\x16\xd3\x8e\x02\xda\x11\x9d\x77\x8b\xa5\xf8\x6a\x05\x89\x11\x45\x22\xd2\x20\x90\xff\xab\x97\xf5\x0e\x1c\xe1\xa4\x87\xbb\xfe\xa8\x33\xf5\xb6\xc3\x95\x47\x86\xa1\xcf\x85\xa1\x37\xa6\xbf\x29\xae\xc9\x8c\xaa\x7d\x22\xd2\xa5\xfa\xeb\xae\x4c\xde\xfe\x17\xfa\xc3\xbb\x6e\x8c\x8a\x21\x1f\x0f\x20\xc4\xe9\xad\xc5\xd8\x4a\x84\x02\x5a\x12\x2b\x76\x63\xe6\x8f\xdb\xba\x28\xbf\x6d\xee\xf6\xd1\x4a\x28\x3f\x11\x65\x50\xf8\xcb\x01\x8a\xe0\xfd\x55\xd1\x07\x24\x89\x90\xc5\xea\x33\x29\x93\x82\xf7\x87\xbd\x0d\x25\xc2\x3a\xce\x50\xfe\xee\xe6\xe4\x9c\x80\x9a\xf0\x84\xaa\xc8\x9e\x53\xd5\x9d\xff\x3c\xb4\x7e\x28\xd5\xf5\x57\x0a\x2d\xa2\xe1\xf3\x70\x34\xf2\x24\xb5\xfe\xe1\xbe\xd0\x85\x3c\x19\xd8\x4c\xc7\x8b\x61\x4c\x58\x3f\xf0\x52\x7c\x95\x9b\x71\x37\x9b\xd1\x8c\x43\xc8\x6b\x16\xaa\xe0\x57\x9c\xb6\xb9\x19\x56\x17\x17\xec\x8a\x2e\x73\xe3\xe5\x53\x0e\xf5\xe6\x10\x82\x77\x59\x79\x9a\x1d\xf5\xd1\x47\x76\x56\x2f\xa1\x55\x16\xed\x8f\xcf\x5a\x40\xde\xf1\x7c\xde\xfc\xea\x0b\xe7\xb4\xfb\x59\x02\xe2\x18\x51\x28\x8a\x90\xea\x1b\x41\xf5\xf6\x2e\xc3\x08\x71\xe6\xac\xfb\xfc\xf1\xb8\x6d\xee\xa2\xff\xb5\x58\xb0\x2a\x3b\x9e\xa2\x17\x8b\x85\xdd\x3c\xf8\x72\xb1\x50\x0b\x6e\x51\x6e\xb3\x87\x1e\xd3\xb6\xe1\x91\x93\xd5\xc5\xae\xba\x21\x0d\x2f\x11\x09\xa4\xda\x01\x69\xee\x08\x78\x87\x81\xde\x59\x3f\x3d\x67\x7d\x2e\x35\xdc\x62\xf0\xfe\x19\xc6\x07\x99\x3d\x01\x05\xef\xd6\xdf\x27\x7d\x0a\x51\x9e\x89\xe1\x29\xc4\xce\x20\x48\x70\xfa\x59\xc0\x7a\xdc\x34\x24\x94\x2e\x15\xb8\x81\xc5\x47\xd3\xd8\xf1\xef\xf6\x9c\x20\xca\x21\x00\xd0\xb1\x46\xda\x01\x88\x57\x5d\x16\x3c\x57\x40\x1d\x65\xc1\x3e\xf3\x8e\xea\x5e\x4e\x4e\x44\xa3\x9e\x28\xc8\x7b\xd6\xcc\xe1\xc7\xbf\xf4\x79\x6e\x9e\xa4\x01\xe2\xb7\x82\xed\x04\x57\xb0\xc9\xd9\xe9\xd4\x7e\x0f\xd9\x9c\x4b\x4f\x67\x92\xe5\x9f\x0d\x2c\xc0\x5b\x9f\xdc\xb9\x67\x16\xce\xe1\xbf\x40\x9a\x66\x6f\x7a\x8a\xb7\x70\xf0\x5e\xbe\x77\xc8\xba\x93\xe5\x2a\xfe\x05\x92\x2f\xfd\x3b\xfe\x79\x21\xff\xd0\x01\x58\x37\xf7\x68\x47\x0c\xfe\x44\x0c\xd6\xe8\x41\x47\x2b\x84\x0a\x77\xdd\x4b\x47\xee\xba\x50\xea\x74\xd0\x86\x18\xc5\xd4\x4b\xeb\x82\xdd\x7b\x07\x9e\x5a\x0a\x69\x1d\xef\x15\x6f\x45\x64\xc0\xdf\x9d\x30\x09\x14\xe9\x3c\xc7\xf8\x70\xaf\x4d\x80\x26\xb8\xb4\x61\x6b\xa5\x1b\xbe\x57\xf0\xd2\x84\xad\x09\x4a\xe3\x22\x0a\x33\x9f\xd9\x40\xb0\x7c\x46\xe4\x28\xc6\x4a\x56\xa6\x12\x05\xf1\x01\x55\x68\x62\x47\xa8\xa0\xe0\x0a\x56\x3d\xd3\x8f\x67\xff\xa6\x90\xf0\x63\x4d\xc5\x0b\xb9\x56\xba\x05\x22\x04\xf6\x7e\xec\xd3\x41\x83\x83\x6d\xea\x8e\xcd\x20\x50\x23\x16\xe9\x14\x01\xce\x44\x03\x04\xaa\x35\x50\x8d\xdb\xa5\xbb\x53\x37\xa0\x7a\x50\x14\x94\x7a\x46\x48\xcd\x08\x81\xfa\xc6\xc6\x5f\x7f\xc3\x4e\x48\x35\xd9\x98\xb1\x52\xef\x1b\x10\xe4\xaa\x70\x72\x5e\x22\x2c\xed\x10\xa7\x24\x87\x11\xba\x15\xfd\x4d\x5f\xe6\x8b\x3f\xd0\xb2\x7b\xbd\x99\xab\xfd\x4b\xa7\x32\x34\xca\x0d\x44\xd8\x70\xf4\x20\xcb\x9d\x7a\xe9\xcb\x8f\x77\xb3\xcb\x15\x1d\xd5\x20\x76\x42\x05\x0c\x9a\x0e\x5c\x42\x91\x6f\xe4\x7a\xc4\x71\xbd\xe1\x6b\x82\xd6\xa5\x02\x2a\xeb\x5b\xa5\x0c\xfe\x87\x0d\x25\x15\xcd\xa9\x6a\x89\x57\x40\x80\xa0\xce\x57\x94\x95\x3a\xc1\x6c\xaf\x36\x89\x11\x89\x07\xa5\x5b\xa7\x81\x45\x3a\xc4\xe6\x02\xfe\xa6\x46\x5d\x98\x75\xf2\x8c\x2c\xf2\x45\xd2\x61\x88\x7a\xda\x17\x49\xb8\x79\xff\x79\x91\xd4\x03\x4f\xfd\xbc\x48\x72\xd2\x33\x12\xac\xfe\xbb\xfa\x3e\xc9\xd2\xae\xcb\xd2\xbe\x68\x1a\xbc\xdf\x7f\x4f\x34\x4d\x9e\x90\x34\x82\xdb\x17\x80\xa5\x13\x80\x01\x6c\xf8\x8d\x48\x3d\x31\xf2\xd7\xc5\x44\x06\x81\x83\x7f\x51\x24\xc8\x8b\x81\x93\xf2\xa0\xe7\x7b\x26\x30\x6e\x7a\x3c\x10\xa5\x27\x40\xc8\x29\x13\x10\x9f\x4b\x1c\x37\x27\x84\x13\xfe\xb0\x3f\x49\xed\x7b\x41\xa3\xbd\xd0\x30\x35\x1a\xd4\x86\x76\x5d\x33\x2c\x04\x00\xad\xb6\xac\xe2\x45\x74\x71\x25\xa7\xbc\x6a\x9d\xe8\x91\x54\x4d\x4b\x22\xb2\x3e\xed\xb6\xdf\x35\x2d\x61\x24\xdf\x66\xc7\x23\x89\xf0\xaf\xbc\x99\xc8\xae\xf3\x56\x5e\x10\xd5\x4e\x30\x87\x5e\x90\x9f\xf8\xac\xcc\x4b\x59\x35\xdb\xf5\xc2\x8b\xe8\xf0\x98\xf0\x74\x8d\xd0\x5a\x0c\x55\xac\x7e\x98\xc8\x48\xdd\x02\x20\xa3\xfd\x9b\xff\xea\x53\x7a\xd6\x87\xea\x29\xc2\x48\x5b\x66\xc5\xbb\xfd\xf6\x81\x30\xb2\xcb\xee\xdf\xc0\x14\x91\xcd\x54\x6e\xb7\x2a\xd1\x47\x1d\xfd\xa4\xc2\x12\x18\x69\x9b\xbb\xf7\x87\x6c\x2f\xcb\x9b\xad\xfa\x75\x73\x2c\xdf\x66\x07\xc2\x48\xd5\x66\xbb\xf2\x1b\x4c\x31\x60\x3a\xc5\xe0\x75\x81\x58\xbb\xae\x85\x23\x17\x79\x3d\x88\x11\x02\xc1\x5b\x2f\xc1\x64\x73\x83\xfe\x0e\xc2\x6d\x16\x97\x6d\xde\x34\xba\x82\xe3\x9f\x10\x27\x38\xe3\x93\xf0\x80\x4e\xdc\x75\x5d\xc5\xbf\xb9\x9c\x40\x30\x38\x68\xd7\x11\xe2\x93\xe7\x65\x45\xf1\x4a\x9e\x1b\x0b\x7d\xf3\x70\xe1\xc1\xcf\xdf\x73\xf6\xd1\xa7\xc5\x8a\x14\x5c\x8a\xde\x4e\x3f\x22\xcc\x3c\x90\xf5\x4f\x42\x65\x38\x2b\x84\xa1\xb1\x54\xf3\x8c\x3e\x0e\xda\x44\x2f\x56\x20\xaf\x6a\xc5\x21\x55\xf2\x4f\x22\xcc\xa5\xa9\x2e\x25\x4b\xee\x6c\xb8\x93\x09\x99\x1d\x44\x58\xd2\x99\x6c\xbe\xc7\x95\x43\xfb\x24\x92\x95\xbc\xdd\x21\xf6\x9e\x90\x59\x05\xd7\x41\x3e\x76\x31\xe3\x78\xb4\x5c\xf3\x83\x08\x0b\xca\xca\x29\xe7\x6b\x15\x08\x37\x68\x5d\xb6\xa6\xe7\xb3\x07\x13\xa0\xd2\x52\xff\x2f\xb7\xaf\xf3\x94\x3f\x69\xe2\xe9\x20\xcb\xdd\xb3\x23\xa5\xf8\xd2\xdf\x42\xc8\xff\xdb\x4e\xc1\xc3\xb1\xae\xf9\xfa\xe2\x8a\x16\xbc\x30\xc8\x3e\xe6\x0c\xfb\x9f\x74\x15\x1a\xe0\xfd\xae\x72\xf6\x1e\xd4\xe7\x2f\x9f\x62\x85\x14\x2e\x58\x17\xcf\x63\xe4\x72\x76\x06\xbf\xf2\x49\x78\xbd\x45\xa3\xfe\x0e\xc4\x48\x57\xe7\xb6\xab\x9d\xb7\xf4\xba\x3a\x37\x5d\x0d\x08\xf9\xf4\xfc\x04\x13\x25\x0e\x3c\x18\x76\x7e\xef\xf2\x9c\x3e\xa2\xba\xa8\x1e\x05\x8b\xf9\x48\x17\x0b\x5e\xa1\x92\x58\xce\xd7\xd9\x11\xdf\x44\xd0\xb8\xf4\xbe\x4b\x6a\xf0\xf6\xcb\x85\xda\xf9\xb6\x28\xd5\x41\x60\xda\x4f\x83\xae\x71\xfb\x01\x41\xf0\xab\xa5\x90\x27\xbf\xff\x6e\x16\xb4\xdf\x7f\x27\x06\xd4\xf8\xe8\x89\xbb\x41\x91\xe9\x68\xd1\x75\x19\xea\xa5\x84\x44\xae\x17\xd9\xaf\x17\x24\x23\x45\xd4\x0d\xf5\x59\xe3\x33\x96\x2f\x96\x82\xcb\x21\x97\xc9\x21\xe7\x8f\xfb\x42\x8d\xfb\xfe\x68\x0f\xd5\x70\x87\xd9\x80\x43\xde\x8c\x6b\x01\xe3\x59\xb3\x8a\x2d\x0d\x8f\x98\x52\x2c\x5b\xc1\x2f\x3f\xb6\x97\x2b\x5f\x4b\xbc\xcd\xb6\x4f\xc9\x13\x8d\x01\xb2\x84\x0c\xad\x27\xa6\x7b\xc1\x7b\xc3\x6e\xcc\x17\xaa\x14\xbb\x72\x39\x00\xab\x81\xd8\x8a\x22\xf6\x07\xa0\x1e\xa2\xb7\xd9\x36\xa4\x34\xca\x98\x26\x11\x2b\x39\x21\x91\xcd\x82\x52\xd3\xa5\x8c\xcb\x99\x3c\xe1\xc7\x6d\x94\x18\xb7\x81\x1b\xa1\xe5\x18\x86\x91\x72\x06\xca\xde\xcc\x66\x84\x9c\x29\x65\x52\xff\xba\xcd\xb6\x4e\x80\xb6\xa2\x1a\xe9\x17\x8f\xe3\x07\xa6\x10\xd0\xab\xd4\x48\xe1\x82\xdd\xda\x51\x58\x32\x02\xa6\x17\xe4\x58\x41\x5d\x68\x89\x95\x5a\xb2\x5a\xfc\x14\xf7\x65\xca\x91\x37\x29\x3f\xfb\x1a\xab\xe1\x6b\x84\x39\x17\x30\x6e\xed\x4b\xd0\x38\x8f\xa4\xb9\xa6\xbc\x2b\xa3\xa9\x1a\x5a\x3c\xb6\x40\x33\xad\x98\xdb\x72\xd9\x6e\xb9\x4e\x94\x71\x86\x93\xc9\x87\x84\x7d\xbb\xbf\xa6\xe6\xaa\x97\x71\x31\x87\xa7\x10\x10\x7d\x10\xa1\xc2\x97\xca\x28\x3d\x9f\x15\xb1\xd5\x13\xb5\xaa\x61\xab\xa9\x58\x8f\x20\x78\x7c\x55\x70\xc5\x95\x77\xea\xc2\xd0\x8f\x43\xb4\xf8\x9a\xaf\xc0\x93\x10\x01\xc1\xc7\x2a\xae\x66\x57\x91\x0d\xf0\x84\xf4\x8e\xea\xab\x45\x5c\x47\xab\xb8\x82\x10\xd5\x1a\x36\x6a\xeb\x0a\x4d\xdd\x94\x85\xd6\x13\xd5\x75\x05\xb0\x80\x06\xc1\x34\x37\x94\x1c\x41\x10\x4e\x73\x57\xf3\xd4\x27\xba\x6e\xfa\x4d\xe8\x9e\x61\x44\xb3\xca\x12\xaa\x91\xc9\xda\x30\x57\xd3\x81\xad\xcc\xf0\x58\x2a\x0a\x0c\x61\x80\xfa\xd6\x83\x5c\x2b\x37\x12\xca\x6b\x18\x39\x2f\xae\x4b\x9d\xc2\xe2\x86\xb3\x5a\x9e\xfa\x82\x97\xd2\x86\x0d\x0b\xab\xde\x5b\xb4\x19\x3b\x0e\x55\xad\x48\x0b\x4d\x59\x05\x62\x08\x36\xa7\x80\x6e\x48\x41\x21\x00\x48\xb1\xd7\x19\xfc\xe2\x8a\xb2\xea\x7c\xf6\xb4\x6e\xe5\x96\xb3\x7e\xc0\x9e\x36\xec\x4d\xc3\x74\xe8\x44\x80\x06\x1b\x64\xe8\x18\xd5\x56\x23\x78\x3a\xdf\x11\x66\xba\x69\x41\x80\x9e\xcf\xd6\xf3\x08\x46\x8e\xff\x44\xc0\x1a\x78\x52\x98\x0c\x60\x5f\xd5\xc0\x8e\x49\xb3\x27\x91\x76\x2c\x52\x25\x91\x8f\xca\x82\x57\xf4\x14\xf0\xa7\xd3\x4c\x12\xc0\x3d\xf1\xec\x72\x69\xe6\x96\x02\x9b\x63\x43\x66\x09\x39\xf0\x5d\x92\x47\x15\x49\x0c\x14\x64\x3c\x29\xbb\xae\x48\xd9\x81\x6f\x51\xc6\x0a\xa6\x90\x62\x63\xe4\x51\x8a\x04\xfb\xe4\x9c\x33\x70\xc8\x70\x81\xa5\x07\xb2\xfc\x61\x51\x02\xf4\x49\x6b\x5e\xf3\x92\xcb\xca\x99\x34\xce\xcb\x5e\xea\xae\x7b\x3c\x3d\x2a\x6b\xf4\x30\x1b\xd0\xfb\xc9\x81\x72\xb0\xaa\xd9\x1c\x94\xb2\x20\x08\x3f\xf1\x83\xf3\x4c\x76\xe0\x9f\xe6\x18\x97\x40\xd9\x27\xc5\x60\x46\xd9\x35\x77\x6e\x8d\x50\xd5\x96\x2d\x3d\x3b\x30\xc1\x85\x4b\x45\x22\x3c\xfa\x81\x03\x1b\x72\x60\x06\x01\x10\x02\xd5\xc7\x0f\xf8\x6a\xbc\x8a\x5f\x44\x5f\x30\xa7\x0d\xf8\x27\x4b\x87\xc7\x5c\xee\x24\xee\x5c\x14\x8f\x92\xa0\x7d\xfa\x53\x12\x34\x74\x07\x19\xfe\x23\x14\xd9\x4c\x28\x92\x07\xc0\x92\x55\x64\x11\x90\x51\xaa\x65\x70\x22\xd2\xc8\x9d\xca\x39\x03\xc7\xc6\x7e\x40\x9a\x75\x40\x08\xac\xaa\xeb\xa6\x7b\xdd\xfa\x5d\x67\x7e\xaa\xad\xaf\x92\xe5\x8a\xd7\x0a\x3d\x90\x55\x10\x4c\xf7\x73\x4d\xd3\xa2\x62\x2e\x7e\xad\xf7\x45\x73\x07\x81\xd4\x10\x18\xc5\xf7\x1e\x51\x5c\xd7\x1d\x98\xee\xf0\xcd\xec\x20\x97\xb9\x35\x5f\x3b\x02\x8e\x2e\xd7\xcb\x5e\x49\x83\x92\x0c\xf0\x76\xd6\xcb\x9a\x73\x1e\x96\x3e\xf6\x49\xd7\x01\xd0\x27\x5e\x57\xbb\x38\x49\x5d\x57\xab\xaa\xf0\xc5\xba\x2e\xa3\x67\x6b\x06\x84\x6b\xde\xa0\x1d\xf0\x39\xce\x2d\x9c\x0b\x7c\xf5\xf5\x55\xbc\x89\xf6\x86\xe9\x4e\x7e\xcb\x4e\x33\x95\xad\x9f\xa0\x29\xd3\xa4\x7e\x6b\xcd\x9d\x46\x28\xdb\x05\xc1\x4e\xb5\xe9\x9a\xe5\x94\xed\xf8\x75\x10\xac\x93\xeb\xd4\x39\x13\x04\xbf\x84\x6b\xea\xb2\x5e\x79\xb7\x98\x52\xdc\x6e\x1e\xf2\x60\x59\x97\x10\xbe\xfc\x41\xf6\xae\x18\x21\xa6\x08\xa9\xec\x69\xbd\x01\x15\x04\xf6\xb7\x7a\x5e\x33\x3f\xc0\xd6\x5a\xae\xc9\xe7\xa6\xbf\x84\x25\xed\xba\x6b\x74\xab\x19\xed\xae\x4c\x0e\xd0\x8c\xde\x28\x00\x12\xc3\x52\x7e\x5a\x0d\x84\xfd\xd7\x29\x8c\x4e\xcb\xa3\x68\xe6\x3a\x3f\x30\x59\x43\x38\x76\x4a\x0d\x78\x53\x45\x4d\x3d\x7a\xae\xa3\xe2\x90\x78\x22\x9e\xc2\x08\x49\x67\x96\xb3\x9c\xa1\x77\xbd\x47\xf9\xb1\x38\xd3\x65\xef\xf9\x61\x81\xd8\x6d\x82\x0e\x90\x36\x07\xc2\xb6\xcf\x37\xf3\x24\x0e\xbb\xae\x5b\xbe\xe8\x49\x63\x1c\xaa\xc2\xef\x15\x6b\xe7\x53\xa1\x69\x52\xeb\x76\x52\xb9\x27\x63\x75\xe6\x80\xf0\x69\xd7\x4e\x22\x57\x8c\x09\x2c\x1f\x13\xb5\xa2\x4c\xf4\x62\x32\x69\xcb\x63\xfd\x47\x39\xc1\x9c\xab\x09\xb0\x08\x4d\x0a\xb1\xc5\x1f\xc0\x8e\x50\x34\x77\x7b\xfc\x75\x73\xc0\xbf\xd2\x08\x9b\x18\x42\x85\x89\xe6\x50\x98\x58\xbe\x85\x89\xe5\x58\x98\x20\xf3\xc6\x04\x57\xf8\xc9\xf1\x46\xec\xea\xd3\xe4\xba\x7c\x80\x7a\xaf\xcb\x87\x43\x5b\x1e\x8f\xf2\xc7\xcd\x61\xe2\xd0\x27\x13\x27\x11\x78\x6c\xc3\xda\x77\xc0\xdb\x1d\x82\x81\xc7\x7a\x81\xf6\x2e\x2c\x87\x0a\x88\x2f\xd7\xac\x36\xaa\xd5\x46\xba\x77\x2d\x3f\xed\x33\x9d\x6b\xbf\x55\xaa\x09\xf6\x73\x43\x01\x62\x06\xbc\xa2\xaa\xb1\x39\x69\xf6\x9a\x68\xaa\xde\x4f\x32\x7b\x06\x93\x5d\xf3\x75\xf8\x88\x1c\x57\x86\x8f\x0a\xe9\xa8\x5c\xf2\xa8\xb1\x01\xe1\x6f\x6d\x2b\xb1\xae\xc6\x73\x28\x98\x21\x02\xf2\xb9\x18\xe9\x79\x39\x20\x4e\x44\x7d\xe9\xe6\xd0\x4f\x83\x55\xe0\x8e\x3d\x81\x8b\xe6\x8a\x4d\x1a\x28\x98\xa0\x4b\xb9\xe0\x0f\x59\x5a\x33\x1c\x8d\xcc\xbd\x96\x85\x65\xd7\x2d\xe8\xec\x0a\x00\x66\x91\xee\xf2\xbf\xff\xe0\x8b\xab\x65\x19\x7b\xd5\x97\x34\x0a\x8b\x51\xa6\x28\xfb\x36\x2a\x23\x41\x56\x00\x96\x0a\xa8\x5d\x27\xc1\xb3\xf9\xb6\xc9\x31\x46\xfa\xc6\x04\x29\xb0\x5b\x69\x21\xc7\x52\xe5\x02\x17\xfb\x6f\x6f\xdf\x0c\x11\xfb\xc0\xf7\x25\xba\x6e\x10\x8e\xa5\xb3\xfd\x41\x13\x04\x24\xca\x9c\x83\x38\xca\xe6\xdf\xbe\x7b\xfb\x93\xac\xb0\xa5\x58\xf1\x77\x6d\xb3\x7b\x0f\xb7\x83\x36\x56\xde\x9f\x2e\xef\x77\x5b\x42\x15\xcc\x64\x41\x1f\x35\xcf\xf5\xd9\x62\x00\x4e\x01\x3b\x4c\x6d\x6d\x1f\xbf\x79\xf8\x90\xad\xa4\xe5\x17\x12\xa8\xb2\x2d\xdb\xb6\x69\x9d\xbc\xa8\x76\x0e\x25\x21\xf9\x61\x7f\x9b\x6d\xeb\x62\xf2\xdb\xdb\x37\xd1\x84\xcc\x80\xed\x03\x9a\xe1\x4e\x7e\x6d\xf2\x31\x7d\x76\xc9\xee\xc1\x35\x10\x7f\xdc\x5f\xae\xd8\x83\x52\x4a\x71\x06\xab\x0d\xa7\xae\xde\x65\xab\xb2\x6b\xcb\x63\x79\xea\xaa\x7a\x5b\xc2\x0e\xd4\x1f\x9f\xdd\xaa\xba\x2e\x1f\x56\xe5\x9e\x5e\xd6\xd6\x3d\xfd\x52\xf4\x22\xfc\x46\xd3\xe9\xd5\x64\x11\x0e\xf8\x25\x2b\xe9\x63\xde\x75\x77\x7a\x57\x83\xc6\x45\x98\xc9\x11\x20\x6b\x9c\x91\x84\xcc\x86\xa4\x1c\x66\x57\xa6\x8c\x45\x44\xa4\xb2\x95\x12\x56\x6a\x1c\x7a\x4d\x19\x9e\x77\x9d\xbe\x73\xca\x79\x0b\x2b\xa9\x7c\x89\x02\x23\x82\xfd\xe8\x35\x41\xcd\xf3\x4a\xa8\x4e\x24\x65\x8a\x35\xc2\x98\xc9\x76\x7c\x7c\xf3\x14\x60\x2b\xc7\xe3\x96\x7c\xea\x40\x11\xd2\x48\x2c\x8b\x44\x47\x7d\xa5\xbc\xdc\xe7\x4d\x51\xfe\xf2\xf3\x0f\xaf\x9a\xdd\xa1\xd9\x23\x93\xde\x8c\x70\x32\x1b\x39\xe3\x9b\xe6\xc3\xd6\x05\x0c\xae\xf9\xe6\xd3\x4d\xd9\x3e\xa8\x75\xfc\xa7\x6d\x56\xef\x4d\x7c\xa5\x6e\x7c\x0f\xd3\x03\x23\xcc\x40\xc7\x65\xd6\x63\x61\x5a\xd1\x49\xae\x79\x29\xc2\x9c\x41\x12\x8d\x70\x40\x9e\x0a\xa5\xfd\x06\x84\xf6\x42\x4d\x8f\x65\x5b\x67\xdb\xfa\x8f\x31\x3c\x3f\xd5\xa2\xa1\x72\xc8\xa9\x0b\xf1\x3b\x28\xe4\x49\xbb\x45\x23\x15\x9c\x46\x30\x72\x31\x98\x45\xc5\x2a\xa0\xf7\xae\x54\x53\xca\xba\x1f\xb2\xd8\x55\xa9\x95\xb7\xf5\x4c\x75\xcc\xeb\xa0\x3a\xe3\x28\xf2\x12\xf9\x65\x73\xc9\x36\x56\xce\xac\xfa\x18\x92\x48\x1b\xfd\x84\x06\xc1\x1f\x6a\x30\x7b\xfe\x24\xa9\x5d\x3d\x08\x8b\x89\x8b\x67\x73\x0d\xff\x3e\x35\x9c\x14\x94\x9e\xa9\xff\x71\xee\xa8\x72\x1d\x68\x3e\x91\x13\x06\x4c\xf4\x7c\x65\x39\x8d\xd1\x4f\x96\x8f\xf8\xc9\x1e\xe5\x87\x44\x68\xe2\x28\x12\x48\x8b\x85\x7f\x2f\x18\xf9\xd8\x7e\xdc\x13\xb9\x16\x46\x23\x97\xe6\xe3\x97\x22\xdc\xaa\x96\xc8\xdf\x08\x7e\xf9\xff\xbf\x58\x5c\xae\xd8\x2b\xc1\x2f\xff\xbf\xf9\xf3\x67\x97\xec\x5b\xc1\x2f\xc3\x24\x0e\x52\xfa\x3b\x4f\xfe\x33\x48\x9f\x5f\xb2\xd7\x20\x6f\xe6\xcf\x63\x1a\x25\x93\x8f\xa7\xf4\x79\x98\xfc\xa7\xac\x31\x7d\x4e\x9f\x5d\xae\x76\xec\x3b\xbd\x23\x2e\x9a\x9b\x53\x97\x1d\x0e\xf2\xdf\xc5\xf1\xd4\xb4\x52\x78\xcd\x67\x17\x30\xec\x8e\x75\xb3\x07\x19\x26\xc5\x59\x77\x57\x17\x40\x7f\xf7\xec\x92\xfd\x4d\xdd\xfe\xb7\xd7\x1f\xba\xef\x5f\xbf\xfc\x96\x3e\xbb\x64\xdf\xcb\xb2\x8f\x97\x1f\x2f\x2f\xd9\x0f\x82\x3f\x9e\xd9\xdf\xe1\xff\x7f\x08\x4e\x9e\x5f\x12\x9d\x22\x4c\x9e\x13\xca\xde\x8c\x44\x42\x65\x84\x2e\xdf\x08\xd8\x76\xe5\x27\xfc\x6b\xa5\xe1\x5b\x77\xd7\xcd\x8f\x02\x1b\xae\x2e\x26\xd8\x57\x3e\x6b\xa9\xf3\x96\x17\xac\xea\x47\x21\xf8\x9e\xf6\xfe\x46\x50\x4e\xf5\x76\x08\xaf\x92\x52\x5a\x45\x64\x46\x38\xe7\x45\xb2\x48\xe3\xb0\xe0\x85\xc1\x49\xeb\x3a\xf2\x9c\x30\xcc\x6e\xcb\x20\x81\x35\x49\xa9\xc9\x0e\xc8\x29\x8d\xfa\xe7\xc0\x34\xcb\xdd\x64\xbc\x1f\xfb\xf2\x1e\xf1\x60\x32\xce\xf9\xdf\x85\x6d\x88\x55\xb8\xc6\xf3\xb5\x81\x77\x49\xd6\x29\x46\xb8\xa2\x34\x4a\xd6\x98\xef\xe0\x0c\x76\x75\xcb\x86\xaf\x55\x24\xf8\x53\x71\xd2\x9b\xae\xab\xba\xae\x4c\x36\x69\x5c\xc5\xd3\xb0\xe6\x1b\xaa\x02\x89\xa2\x10\x29\x8d\xa5\xa5\x67\x13\x1f\x36\x94\xad\xe4\x7f\xd3\x2b\x7a\xa6\xac\xd6\x4b\xf0\xca\xbd\x38\x59\xa4\xb4\xeb\xa6\x25\x64\x77\x04\xc1\x0a\x46\x80\xfd\xee\x77\xfd\x24\x44\xde\xce\xb3\x4d\x76\xff\xbe\x3c\x9d\xea\xfd\xea\x38\xaf\xb6\xd9\x49\x65\x73\x19\x1a\xeb\x1c\x57\x18\xeb\x33\x4e\xf2\x34\x08\xc2\xb0\x4c\xf2\x34\xce\xa2\xa2\xeb\xc2\x82\x3f\x9e\x29\x4d\xf2\x14\x4e\x5a\xf9\xea\xd0\x35\x4e\x17\x48\x9f\xe2\x00\x36\xfe\x34\x9e\x6f\xce\xb3\xb9\xda\x79\x3e\xb2\x9a\x67\xf6\xe3\x94\xe1\x4c\x9e\x03\x80\xac\xfc\xd4\xda\xf8\x5e\x4c\xba\x73\x01\x48\xa2\xd9\x7c\x57\xef\x94\xd5\x0f\x9e\xe5\x9f\xcb\xe3\xa1\xd9\x1f\xcb\xef\xcb\xac\x28\xdb\x90\x28\xd8\xf3\x8b\x0f\x48\x7f\x24\xc7\x63\x41\xcd\x7a\xba\x06\x0e\x7b\x08\x08\x97\xff\xa3\x68\x2b\x28\x7d\xac\x4d\x6f\x94\x74\x29\xda\x32\xbb\x86\x8c\xe9\x64\x91\xd6\xfb\x49\x4e\x2b\x78\x2d\x58\x7d\x2c\xf1\x56\x8e\x2e\x8b\x1a\x79\x9a\xe4\xb7\xdd\x96\x2d\x24\x61\x95\x33\x32\x21\x33\x79\x22\xa5\x8f\x15\x2f\x55\x8d\x2b\xc8\x84\x2f\xe9\x19\xf2\xbf\x57\xf2\x09\x26\xa8\xa4\x9a\xe2\xa7\x07\x81\x7d\x95\x8a\xb2\x3c\xa9\x52\xdb\xb0\xff\xec\x0f\x6f\x9b\xc8\x2f\x07\xfa\xb5\xdb\xa8\x6a\x5e\x41\x0b\x5c\x27\x57\x29\x32\x73\xc0\xaa\xe9\xbc\x2a\xdd\x24\xab\xfe\x1e\xbf\xf7\x29\xab\x74\x59\xf1\x6b\xdd\x1d\x7a\xa7\x13\xa0\x3b\xa5\xb1\x0c\x6d\xff\x5d\x5d\x6e\x8b\x63\x52\x21\x9d\xc4\x48\x79\xca\x05\x05\x86\x0d\x60\x64\x94\xaf\xf8\x1d\xac\x6c\x41\x10\x0a\xee\x16\x80\x75\xa1\x3f\x01\xb2\x75\x2b\xe6\x3c\x5e\x3e\x16\x47\x49\x25\xbb\xc4\xe8\x54\x04\xb8\xf5\xeb\x20\xa8\x21\x4e\x47\x76\xcb\x8a\x6f\x92\x1a\xba\xa1\x4a\xbb\x6e\x93\x90\xe7\xf0\x93\x4d\x57\xd4\x21\x88\x02\xb7\x62\xe9\x5a\x84\xeb\xe4\x2a\x55\x70\x77\xb6\x8a\xb5\xec\x49\x53\x0b\x1c\x51\xfa\xb8\x02\x98\xb5\x78\x05\xe4\x52\x91\xfc\x0f\xb2\x6b\x00\x1c\x52\x5e\xc3\xae\x4d\x4f\xca\x5a\xa9\x33\xb0\x56\x48\x98\x2d\x7f\x05\x41\x96\x90\xd3\xba\x6d\xee\x8e\x24\xa5\x82\xaf\x42\xad\x05\x4a\x95\x1e\x8f\x95\x9e\xbe\x35\x2b\xe4\xf1\x94\x9d\xca\xc8\xd3\xc4\x19\xfc\x89\x56\xf1\x36\x22\x3f\x36\x13\xec\xc2\x23\xa0\x69\xb4\xcd\x4e\x0e\xc7\x19\x99\x9c\x1a\xd9\x0a\x67\xbd\x7f\xac\xeb\x39\xde\x80\x95\x43\x98\x6c\xfa\x48\x9c\x1d\xbc\xc9\x2c\x3f\xd5\xb7\x65\xb4\x60\xdb\xec\x78\x7a\xdb\x14\x75\x55\x97\x05\x64\x95\x9e\x32\xc8\x2e\x75\xc5\x4c\xf4\x78\xd3\x6e\x23\xb5\xee\x30\xf0\x85\x90\xbf\xbd\xfe\x40\x58\x7d\x7c\xd3\xe4\xd9\x36\xfa\x4e\xab\x20\x02\x11\x4c\xf2\x66\x4b\x19\x72\x0b\x01\xe1\x64\xdb\xc8\xf7\x00\xbe\x15\x29\x57\x8e\x0f\xfb\x5c\xb1\x44\xcb\x49\x8d\xcc\xc7\xd9\xe1\xb0\xad\xd1\x96\xba\xbc\xbf\xb8\xbb\xbb\xbb\xa8\x9a\x76\x77\x71\xd3\x6e\x51\x3f\x2d\x96\x93\x7c\x2d\x1b\xe6\xc4\x7f\xf9\xf0\xdd\xc5\xbf\x13\x26\x6d\xb8\xc3\x49\x65\xe7\xfd\x43\x20\xfb\x08\x9a\x41\x07\xa9\x8c\x12\x64\x2b\xc0\x12\xf9\x93\xb0\x7b\x79\xec\x3d\x69\xb7\x65\x13\x63\x39\xb1\xcd\x11\xd0\x7a\x9d\x0b\x64\x89\xba\x62\x93\xdd\x66\x8a\x45\xe6\xac\xdf\xfd\x18\x3d\xca\x3a\x2f\x3f\x8a\xfb\xdd\xf6\xa3\xb8\xc4\x47\x5e\x7e\x14\xf2\xef\x25\xd6\x77\xf9\x51\xc8\xbf\x1f\xc5\xe5\x99\xf9\x73\x08\x6f\x26\xba\xf0\xb7\xb7\x6f\x88\xfa\x0a\x5d\xf4\xa1\xbc\x3f\xe9\xd7\xd2\x65\x7f\x7f\xff\xee\x47\x7c\x03\x35\x9b\x65\x0b\xc0\x2b\x92\x08\xcd\x41\x34\x06\x27\xf0\xcd\xc0\x33\x0a\x87\xb2\x16\x12\xc9\xbb\xd1\x7c\x54\xc5\xf2\xc3\x23\x6b\xaa\x9e\x99\xb3\xa8\x60\xcf\xeb\xae\xba\x3f\x45\xd3\xc5\xd9\x8c\x8d\x9b\x27\xc2\x02\x45\xfc\x4e\x84\xb0\x74\xf9\xab\x15\x65\x82\x46\xef\x44\xe8\x97\x02\x71\x8c\x2c\xb0\xac\x49\x6f\x45\xf8\x83\xa0\x50\xf8\xa1\xcd\xf6\xc7\x43\xd3\x9e\x64\xe1\xdf\x55\x61\x2f\xf3\x78\xcc\x07\x3f\xe0\x3d\xcc\x61\x7d\xec\x49\x57\x76\xcd\xb6\x6a\x7b\xc3\xbc\xd4\xcd\x21\x7c\x84\x14\xea\x03\x6f\xe6\xea\xb3\xbb\xae\x61\x9f\xec\x21\x6c\x35\x58\x70\xa4\x83\x32\x84\x68\xdc\x86\x07\x1a\xe9\xfd\x95\xa3\x97\x18\xcf\x4e\xbc\x9d\xbf\xca\xb6\x5b\x91\xe5\xd7\xc7\x90\x34\xfb\xbc\x9c\xec\xca\x5d\xd3\x3e\x10\xca\x6e\x78\x33\x97\x93\xf6\xe6\xf8\x0a\xd8\xfa\x1f\xcf\xec\x56\x8a\xfe\x3b\xf9\xdf\x3d\x27\x48\x4e\x5b\x16\x84\x3d\xf0\xc7\xb6\xcc\x8a\x87\xf7\x30\xc5\x81\x37\xde\x5f\x2b\x47\x40\xa9\xe4\x52\x81\x8b\xda\x9a\x3e\xae\xf9\xe3\xd9\x04\x4e\xbc\x16\x98\x95\xba\xa2\x74\x9d\x88\x21\x3f\x06\x17\xc9\x8b\xf4\x2c\xf8\x3a\xc9\x7a\x67\xce\x9e\x35\x20\xd0\x1a\x10\x67\xf9\x3e\x2f\xb7\x5b\xff\x95\x8e\x23\x46\xd5\xb5\x8a\x5c\x85\x9d\xc6\x9f\x11\x67\x68\xf0\x01\xce\xa0\xc2\xe7\x5c\x43\xba\xd4\xdd\xe0\x6d\x46\x8a\xba\x2e\x63\xb7\x49\x06\x6b\x14\xc6\xd3\x34\xb7\x65\xdb\xd6\x45\xf9\x56\xa9\x1a\x63\xd1\xa8\xf6\x31\x8d\x51\x49\x78\xa6\x6b\xb0\x5d\x34\xde\xca\x80\x0a\x71\x4d\x1f\x74\x96\x72\x96\x3c\xa8\x6e\x75\x13\xd8\x04\xda\xb8\x37\x89\x48\x79\x72\x03\xf9\x2b\x89\x48\x53\x9f\xce\x25\x13\x72\xd0\x8f\x04\x29\x76\xdd\xbd\x43\xe5\x5a\xce\xe1\xc2\x50\x50\xf6\x32\x5c\xe8\x50\x94\x33\x98\xec\x47\x03\x35\xf0\x40\x59\x33\xbf\x69\xb7\x3c\x0c\x45\xd7\xc1\xcf\xae\x53\xf2\x9c\xce\x08\xa1\xc6\xb8\xfa\x5e\x30\x47\x88\xcf\xc8\xe5\x25\x91\xf7\xc2\xbe\x40\x3e\xdf\x95\xa7\x75\x53\x74\x5d\xae\x08\xe5\x1a\x53\x82\x97\xb0\xc6\x2a\x2a\x3c\xb4\x07\x60\x03\xd0\xa7\xed\x0b\x42\x52\x15\x9b\xd1\xcc\xf3\xb6\x39\x1e\xbf\x6d\x76\x59\xbd\xa7\x8f\x9b\x71\x53\x48\x2e\xa1\x1b\xb4\x86\xe0\x63\x98\x3a\xc0\x3f\xcc\xab\x84\xbf\xe9\x7d\xcf\x4c\x1a\x52\xcd\xf1\x34\xc5\xb4\x7c\xe7\xc4\x06\xca\xd5\xaa\xfc\x07\x7d\xf4\xeb\x91\xa2\xaf\xae\xd4\x57\xc1\x16\x92\x5d\xd6\x6c\xa0\x95\x31\x17\xf4\x75\xea\x06\xae\xdd\x10\x78\x28\xdb\xb4\xcd\x0a\x80\x87\xcf\xb6\x94\xb2\x1f\xa5\xcc\x63\x0d\xcb\xd9\x03\x65\xd7\x5a\x7d\x7c\x58\x6e\xf5\x0e\x9c\x7c\x22\x2e\xa9\x6c\x1b\x04\x0b\x40\xae\xc1\x55\x7c\x36\x03\xbd\xdd\x73\xec\x13\x90\x66\xa7\xac\x3d\xd9\xfe\xc3\x3f\x3e\xa0\x35\x6b\x20\x66\x4a\x91\x0a\x4d\xff\xa6\x16\x72\xbc\x94\x32\xd5\xbe\x66\x78\xbc\x82\xc8\x0d\xef\xa6\xf8\x89\x06\x01\x4c\xdf\x66\xee\xac\xf1\x10\xcf\x64\xf7\x59\xff\x74\xc1\x07\x2c\x1e\xd5\x7a\xf8\xc7\xbc\xc7\x37\x82\x91\x19\x91\xb6\xe3\x5e\xbd\x21\xaa\xc5\x3a\x29\x91\x32\xd3\xfc\xd5\x8c\x87\xb7\xc2\xa0\x83\x93\x80\x44\x24\x26\x74\xa6\xfa\x41\x45\x9a\xe3\x11\xa0\x1c\x67\xf9\xba\xd4\x34\xb9\x15\xaf\x2c\x9f\xa1\x60\xe4\xd9\x15\xa1\x6c\x3f\x5e\x21\xf9\x9d\x93\xd9\x8d\x98\xcd\x26\xb3\xbd\x9e\x6f\x15\xfe\xac\x2b\xad\x62\x01\x2c\x83\xab\x73\x81\x7a\xfd\x30\xef\x0b\xc1\x90\xfc\x50\x5d\xe8\x6b\x2e\xde\xd7\xfb\xbc\x24\x6c\x70\x27\xec\xd9\x9c\xb2\xd5\xe7\x2a\xf9\xb1\xd9\x97\x17\x6f\xe5\x90\x26\xf6\x6a\x4a\x99\x33\x90\x6d\x67\xca\x23\xa7\xc7\xd4\x2e\x5e\xee\x96\xd1\xf1\x27\x79\x06\x1a\xf3\x6a\xa1\x6c\xec\x86\x97\xa0\xbd\x11\x57\x64\x80\x9d\xd4\xcc\x95\x5e\x97\xf8\x67\xd2\xf8\xc9\x33\x33\x65\x29\xf8\xc5\x31\x61\x13\x32\xfb\x87\x98\x91\xe5\xe4\x13\x5f\xcc\x17\x57\x24\x22\x84\x46\xb6\x1a\x80\x54\x00\x9b\x79\x27\x45\x72\x33\x5f\xe3\x72\x45\x47\xde\x77\xc7\xcc\xe9\x64\x87\xbc\x86\xcd\x1c\x99\x8a\xde\x97\xfb\x02\xe1\xe6\xcd\x21\x86\x41\x1c\xd8\x03\x6b\x28\xc7\x46\xbc\x36\xd1\x23\x0f\x4a\x66\x43\x25\xf7\x9c\xc0\x11\x61\xc8\x58\xdb\x58\xf4\x0b\xf6\x80\x08\x19\xcd\x5c\x29\xf2\xb2\x04\x10\x31\x1a\x74\xca\x53\x56\xf2\x1f\xa5\x5a\xa4\xe4\x06\xac\xf1\x0f\x73\xab\x1f\xf0\x2b\x29\x2b\x3e\xf5\xa4\x02\x50\x2e\x25\x0f\xac\x49\x3d\x49\xd3\xcc\x41\x31\x97\x3d\x70\xc2\x24\xbe\xaf\x17\xb0\x27\xfb\x14\x54\x9f\xfe\x0e\xa2\x2e\x27\xf4\xcc\xcc\xbd\x14\xa5\xf4\x35\x9f\x5e\xb1\x72\x7e\x94\x46\xc7\x2d\x7b\x49\xad\x70\x85\x95\x12\x0c\xa4\xc9\x1f\xcb\x97\xe1\xc5\x15\xfb\x83\x9e\x31\x2a\x13\x8e\xa4\xb5\x63\xb4\x3f\xe2\xc4\xa6\xbf\x44\x3f\x8d\x71\xdc\x80\x02\x77\xcb\xee\xd8\x3d\xcf\x97\xd7\x5d\x17\x5e\xf3\x29\x6c\x00\xf7\x32\x12\x6b\xd9\x5c\x6a\x7b\x78\xc5\xd7\x52\x1a\x31\xaf\xb1\xc4\xd7\x8b\xf8\xcb\x68\xc1\x36\x5c\x7c\xcd\x5f\x2c\x16\x41\x20\xbe\xfa\x62\xb1\xe8\xba\x2f\x16\x5f\x72\xce\x05\x93\xbd\x7c\xcb\x7f\x12\x61\xc3\x1e\x00\x26\xe5\x96\xff\x53\x1e\xdc\xb2\x07\x80\x3a\x89\xc3\xde\x54\xbf\xe3\x0f\x63\xce\x8c\x37\xd9\xf1\x64\x26\x37\xa1\xec\x6e\x4c\x2a\xf0\x3b\xca\x9e\xb8\x5f\x4e\x62\x73\x9b\x9a\xd1\xfc\x8e\x52\xf6\x02\x5f\xb4\xeb\xc8\xf7\xaf\x5f\x7e\x2b\x0d\x6a\x14\xe4\xf1\x3d\x27\xfb\x46\x53\x10\x44\xea\x7b\xb0\xf4\xb4\xd3\x2f\x12\x85\xf7\xfc\x16\xf4\x95\x92\xed\xf8\x2d\xca\xc7\x3d\xbf\xc5\xc1\xc6\x36\x7c\xba\x47\x89\x7b\xcf\xa6\x22\x08\xee\xbb\x4e\x8e\x5e\x65\xa6\x0a\x08\x4f\x17\x7c\x41\x29\xcc\x77\xd0\x7a\xb8\x30\x3f\x81\x1e\x28\xcc\xbb\xee\x5e\x2a\x1c\x6c\x13\x1f\x3d\x2c\x9d\x03\x4b\x76\xec\x9e\x3d\xa4\x34\x3a\xba\x60\x3a\x07\x39\x50\xef\xd9\x3e\xb5\x95\x4a\xf5\x2b\xbc\x91\x3a\xb3\xea\x4c\x6f\x88\x6f\x62\x1c\xe4\xca\xf4\x8d\xe0\xe8\x35\xbe\xa3\x1c\xf3\x6c\x13\xef\x22\x59\xdd\x09\xa0\xfe\x9c\x87\xa4\x54\xd6\x14\xf6\x66\xcb\x2b\x35\x1b\xcd\x8c\xb9\xb8\xd0\xeb\x2e\x6c\x8e\x8d\xad\xba\x0d\x44\xdc\x69\x0d\xf9\x01\x94\x62\x69\x76\x3d\x95\xa5\xd6\xea\x54\x1f\x96\x33\x02\x56\x1a\x85\x7b\xde\x83\xb1\x39\xae\x0f\xeb\x7b\x74\x80\x8f\x25\x38\xf5\xc2\xe0\x56\xe5\x89\x30\x72\x68\x8e\xa7\x21\x0a\x7c\x7f\x4f\xdc\xcb\x70\xef\x79\x7b\x21\x06\x16\x82\xb4\x0a\x00\x3b\xd7\x86\x15\x9a\x4d\x16\x74\x07\xec\xc5\x8c\xa9\x80\x30\x2d\x8e\xa3\x12\x5d\x10\x39\x53\x92\x2c\x02\xac\x90\xfe\xa6\x51\x10\x40\x70\xa6\xac\x55\x93\x0c\x8f\x85\xc8\xa9\x67\xba\x8f\x42\x77\x84\x79\x9c\x6e\x0b\x06\x8b\xb9\xe3\x73\xb8\x32\x5e\x89\x2b\xa6\xbd\x33\x10\xc9\xd1\xdb\x4f\xba\x6b\xb3\xc3\xcb\xed\x48\x44\xb3\xab\xa4\xc3\x72\xd5\xcf\x8f\x40\x3c\x07\x1b\x86\x9c\x2c\x52\x8c\x07\x56\xf0\xca\x03\xa2\x6a\x3a\x2f\x3f\x85\x0b\xea\x70\xcb\xe9\xcb\xfc\x64\x23\x8f\x7e\x52\xd7\xcc\xc4\xf8\xc6\x14\x60\x5e\xa1\x91\x97\x21\x1f\xa7\xd2\x9f\x91\x96\x53\xbe\xe1\xa0\xd4\xb2\x19\x1b\x32\x42\xcc\xd4\x50\xc6\x8f\x6c\x93\x1f\xf6\xfb\x71\x56\xe3\xbf\x90\x33\xe0\xa4\x87\x98\xaa\xfc\xe4\x10\xfa\xf9\x24\x01\x93\x07\x90\x73\x61\x5c\xce\xa1\x4d\xef\x8f\xf3\xb9\xea\xb7\x30\xa3\x91\x70\x18\x15\x29\xbe\xfd\x68\xfc\xb0\xf7\xde\xcb\xa7\x03\x72\x72\xff\xf5\xe5\x53\x84\x1f\x6f\x4e\x23\x7c\xd4\xcd\x7e\xf0\x30\x8f\x0d\x10\xba\x35\xcc\xe8\x7c\xdf\x9c\x42\x22\x9a\xe2\x81\x0c\x39\x6e\x6d\x26\x8d\x21\x39\xd4\x7b\x78\x9a\xba\x9c\x9e\x2d\xc6\x98\x4a\x58\x3d\x1c\xcb\x9b\xa2\x39\x6a\xe4\xc5\xe1\x2b\x4c\x7b\x17\x02\x1d\x95\x1c\x25\x30\x03\xc6\x4e\x8d\x55\x32\x0d\xb3\x39\x92\xe4\x00\xbe\x7f\xd7\xe9\x43\xa4\x7f\xf9\x0c\x2b\x04\x3c\xc6\xdb\xc2\xb8\x5f\xb7\x2e\x8c\x80\x54\x1a\x1c\x90\xf9\x6c\xfe\xdb\xdb\x37\xdf\x9f\x4e\x07\xa5\x8d\x29\xfd\x41\xd0\xc7\x33\x7a\x73\x7e\x16\xfc\x71\x01\xc0\x08\x57\x2f\x5e\x7c\x11\xbd\x58\x7c\x79\x66\xef\x45\x7f\x9f\xe4\x7e\xdd\x86\x74\x29\x75\xab\xf6\xc8\xa7\xd3\xf7\x22\x08\xc8\x5d\x7d\x5a\xbf\x6a\xcb\xa2\xdc\x9f\xea\x6c\x7b\x24\xf5\x7e\xf2\x5e\xb0\x06\x6e\xe4\xef\x05\x5c\xa6\x5e\xd6\xe8\x21\xe1\x20\x8e\x83\x15\xa8\x0a\xca\x9a\xbb\x4e\x56\x3c\x15\x9e\x0d\xab\x5d\xb2\x1e\x55\xaf\x1b\x65\xcb\x85\x7a\xbd\xba\x0a\xd7\x40\xd0\x18\x62\x20\x1e\x13\x60\xdb\x0a\xd4\xcb\xe4\xd1\xb1\x84\x60\x51\x48\x44\x3c\x1e\xef\x9a\xb6\x90\x12\xe0\x7e\xdd\xa2\x3b\xd1\xee\x05\xb8\x85\xeb\x64\x95\x72\xa7\x20\x59\xa5\x4b\x61\x9c\x1b\x41\xb0\x9e\xf7\x1d\x23\x63\x65\xa1\xbd\x45\x3e\xd3\xf9\xc2\xae\x2b\x13\xf2\xdb\x85\xea\xa0\xb2\xb8\x00\x42\xce\xb4\xeb\xc2\xd1\x72\x4e\xfc\x1e\x55\xe0\x9e\xf0\xda\x25\x5d\x0f\x35\xef\x15\x03\xaa\xcd\x65\x3e\xb6\x16\x38\x23\x27\x07\xaf\x5f\xc1\xd7\xf3\x66\xbf\x6d\x32\xfc\x01\xda\x09\xfc\x02\x5d\x15\x7e\x81\xca\x07\x6a\x0e\xc6\x9a\x41\x98\x21\x53\x6a\x38\x50\x51\xaf\xb5\x86\x1e\x29\xf5\x06\x4a\x07\xb0\x5f\x6b\xa5\x91\xc4\x55\xb8\x60\xea\x4a\x1a\xc9\x5e\xc4\x72\xb6\x76\x94\x1f\x79\xe2\x67\x91\xe8\xa2\xb4\xeb\x46\x2f\x43\xa7\x2c\xb0\x8d\xac\xcd\x8e\x8b\xb2\x9e\xe1\x0c\x1d\x89\x19\x72\xae\x2c\xef\x4f\xf1\xa3\xa8\xf7\x59\xfb\x10\xd9\xe2\x73\xf4\x08\xee\x5b\xff\xc2\x33\x5b\xcf\x47\xfd\x75\x21\x85\x24\x09\xd3\x92\x79\x48\x99\xdb\x9e\x79\xa8\xbf\xd6\xa6\x3e\x9b\x36\x8e\x6d\x6b\x17\xd1\x68\x7b\x3b\x9d\x26\x15\xd1\xb5\xa3\x84\x03\x3f\xd3\xa8\xb5\x91\x07\x41\x01\xc0\xcc\x2c\x97\xcf\xc7\xde\x42\x23\x63\x8d\xf6\x85\xf0\x0c\x59\xdc\x65\x45\x70\x27\x6d\x76\xd4\x8a\xd8\x06\xcd\x8e\xfa\x3c\x70\xb3\xc1\x63\xf2\x50\x67\xa3\x78\x6e\xe9\xd0\x1d\x7e\x99\x3b\x03\x00\x07\x41\x2f\x48\x73\xd4\x3d\x38\x6e\xfb\x7a\x6e\x65\xb3\x61\x81\x97\xa8\x9d\x09\xbb\xad\xc0\x26\xde\xce\xc3\x13\xe5\x65\xbe\x1b\x2d\xbf\xbf\xb0\x67\xbc\x0d\x0a\xf5\xb4\xcb\x8f\x22\x8c\x23\x59\x6b\x27\x2f\xa4\x58\x0c\xbb\x12\xde\x56\x02\x6c\x0a\xa8\x6a\xc6\x97\x7a\xd4\xa1\x5e\xdf\x66\x72\xa1\x65\xd9\x68\x63\x19\x15\xcc\x5b\x73\x0d\xd7\x33\xba\x59\xb0\xe1\xc0\xe1\x32\xbd\xa2\x6c\xd8\xa8\x08\xb1\x22\x95\x3b\xdb\x98\x56\x18\x8f\x3e\x03\xb6\x30\x3d\x27\xa2\xca\xe9\x59\x3e\x2d\x89\xa5\x6a\x41\xbe\xc2\xda\xbe\x26\x14\xa3\x7a\x1e\xd5\x76\x53\x94\xa9\x2e\x7d\x85\xc7\xec\xd8\xe6\x51\x26\x45\xf3\x99\xce\x9b\x7d\x48\xe4\x14\x99\x28\x33\xc8\x17\x52\x42\x07\x0e\xea\x50\x7b\x96\x05\x41\x15\x3a\x42\x05\xcd\xb3\x2f\x17\x5f\xc2\x12\x86\x87\xf2\x53\x0b\x70\x36\x78\xa0\x37\x42\x2a\x7c\x9f\x1b\xb1\x2a\x08\xe6\x83\xe0\x49\xca\x7e\x11\xfc\x32\xe4\xf4\x63\x1c\xc6\x3c\xe8\x9e\xd1\xee\x63\x8c\x21\x89\xce\x78\x94\xa6\xc6\x21\x22\xb9\xda\x95\xc0\x7d\xa6\x83\xde\xa4\x18\x72\x9c\x7c\x10\x18\xdf\x0d\x86\x0f\x66\x44\xcc\xc8\xef\xe8\xf4\xf2\x14\xe3\x0c\x82\x2e\xb2\xd1\x71\x21\x9f\x01\x5b\x51\x07\xc2\xbc\xcc\x93\x1e\x65\x97\x98\xc3\x45\x1a\x24\xed\x17\xe5\x71\x83\x35\x91\xc6\xe4\xa6\xdd\x92\x68\x90\x65\x26\x94\x4f\x0b\x9c\x8f\xe2\x7f\xea\x7c\xb4\xcf\x04\xe7\x60\x10\x10\xf9\x17\xb3\x90\xd7\x5d\x47\xf0\x2b\x80\xfc\xd7\x8b\xe8\xd0\xee\x79\xfd\x0d\xba\x45\x7b\x51\x7a\xfe\x49\x1a\xf7\x0a\x42\xa9\xc3\x7a\x25\x6c\x1d\x8b\x64\x9d\x72\xf9\x9f\x71\x4b\xfe\x82\x6e\xc9\x59\x69\x2e\xd7\x4d\x06\x4d\xe5\x78\x3f\x75\xd3\x69\x87\xa5\xba\x1c\x63\x00\x61\x51\xb7\xf1\x01\x6a\x6a\xe1\x9e\x61\xca\x87\xfb\x37\x2b\x1b\x1a\x5a\xce\xc8\xe4\x2e\x3b\x4e\xf6\xcd\x69\x22\xc7\x12\xf8\x34\x56\xc9\x22\x3d\x33\xbf\x61\x38\x1a\xb7\x00\x7f\x5d\xa6\x4c\xfe\xe7\xc1\xfd\x73\x13\x8c\x7d\x66\xc5\x08\x92\xb4\x11\x1f\x55\x0c\x61\xd4\x16\x58\x24\x2c\x69\x84\xd5\x41\x28\x25\x7c\xbc\xdf\xf4\x79\xaf\x29\xe5\x58\xbe\x39\xae\xc3\x92\x02\x23\x9d\xd7\x33\x15\x95\xb3\x74\x05\x06\xd6\x8a\x57\x96\x90\x42\x0b\x1c\x08\xd7\xc6\x7d\x89\xef\x3f\x00\x56\x08\xbc\xf6\x00\xcc\x8c\x17\xf3\x7a\x77\x40\x1b\x0b\xc6\xda\xc8\x4d\xa1\x1c\x97\xd2\x08\xb0\x78\x3b\x0e\xc5\xf5\x57\x72\x58\x7e\xfd\xd5\x25\xfe\x71\x0f\x08\x7b\x81\x82\xd4\xd8\x03\x4a\xc5\x3e\x43\x5e\x04\x6c\xe7\x42\x1d\x7d\xa7\x83\x93\x64\x6d\x31\x20\xd5\x98\x4d\xd2\xe5\x68\x1a\xb9\xda\x4e\x9d\x5e\x51\xbb\x85\xea\xd2\xb4\x8e\xb5\x46\x1c\x8a\xbf\xfa\xfd\x40\x76\xd5\xdb\xe7\x11\xd9\xb1\x94\x67\x70\x67\xa7\x30\x81\xd6\xb8\xc5\x23\x86\xf2\xb1\xa4\x34\x12\xbc\xa0\xac\xe2\xaf\x34\xf7\x3f\x5b\xf1\x69\x1e\x04\x49\xca\xaa\x38\xe9\x3f\xa2\x4a\xae\x52\x0a\x3c\x8b\x9f\x10\x9a\x5e\xb0\x15\x0c\x86\x95\xa1\xb0\x6b\xc3\x15\xb5\x32\xbc\x9d\xef\xca\x76\x55\x86\xb2\x3a\xd7\x0e\xd3\xde\x03\xd0\x95\x9e\x44\xa2\x61\x2b\x64\xdd\x58\xf3\xcc\x85\x10\xb0\x90\xe3\x98\xb0\x56\xf0\x83\x08\x35\xc7\xee\x9a\x52\x26\x8d\x75\x3c\x5a\x30\x79\xdc\x0f\xf5\xf5\xb7\xba\x23\x31\x82\x9a\x0b\x84\x26\x9c\xfc\xf4\xee\xfd\x07\x39\x35\x4d\xaa\x83\x1c\xf7\x03\x5f\x8a\x14\x95\x3d\x77\x0a\x06\x4f\xa8\x28\x12\xda\x03\xef\xcf\xe8\x63\x65\x67\x2e\x5b\xcd\xe5\xd5\x61\x11\xcb\x25\xb5\xa8\x6f\xe5\x7a\xaa\x2c\x71\x67\x54\x86\x19\xa5\x90\xba\x1b\x16\x68\x36\xeb\xe9\x9e\x07\x81\xef\xa6\x5a\x0d\x4c\x63\x0f\x5f\xb1\xea\x3a\x27\x34\x09\x34\x68\xc1\xb2\x14\x18\x3c\x94\xe3\xc2\x38\xc3\xec\x9e\x18\xb3\x9e\x3a\xd6\x73\xf3\xb9\x0e\x43\xcf\x95\xc8\xac\xf7\x7c\x48\xa8\xd8\xcf\x30\xf1\x4d\x7e\x58\xe1\x32\x3a\x62\xa8\x2b\xa0\xc4\x62\xdc\xcf\xb5\x6a\xcb\x43\x68\x50\x3c\x3d\x4f\x8a\x92\x15\xb0\xf8\x20\xb0\xbe\x9e\xfe\xac\x55\xc6\x38\x24\x69\xbc\x83\x9f\x4f\xe3\x23\xd9\x50\x08\x87\x03\x58\xf1\x56\x13\xca\xb6\x5c\x4a\x5a\xb6\xe3\x8f\xe7\x25\x91\x3a\x7c\x9d\x6b\xd2\x0e\x4d\xd0\xa1\xaf\xe6\x44\x13\xfd\x13\xca\xd6\x7c\xab\xde\x22\x44\xac\x5d\x55\x35\xf8\x46\x59\x6d\x0b\xb6\x40\x5e\xc8\x36\x3c\xb4\x24\xd9\x8a\xbf\xa3\xaa\xef\x01\x27\x8e\x5f\x53\xd8\xc7\xab\xdd\xf5\xfb\xe6\xd4\x40\x8a\x27\xdb\xc4\x61\xc1\xb7\xe6\x2d\x42\x60\x80\x04\x7e\xff\x52\x4a\x8d\xb2\x3a\xd1\x28\x5c\x0d\x99\x7a\x4b\xb7\xa8\x96\x45\x83\xa9\x05\x0e\x6d\xa1\xc1\xcc\x73\x0f\x07\x78\x4d\xa9\x06\x63\x16\x73\xc0\x49\x0d\x77\xf2\x2f\x1e\x5d\xac\xe5\xff\xb3\x95\xbd\x44\xbe\x08\x5c\x23\x7f\xa8\xe3\x8b\x35\xfc\x91\x8b\x2f\xb9\x39\x4a\x81\x2c\x4d\xf7\x58\x1a\xfa\xf5\x7e\xa5\x9f\xbb\xa3\xd1\x16\x5a\x6b\x87\xa4\x54\x8e\x9b\xb2\xe9\x77\x2e\xfd\x1c\xc4\x82\xd5\xbe\xc1\x3d\x37\xe6\x69\x83\xb1\x8c\xb5\xce\xcd\xe0\xd1\xb8\xb0\x42\xe7\x1b\x1b\x9c\x18\x37\xc7\xcc\x86\x3c\x3e\xe5\xfb\x91\x1d\x55\x3d\xc5\x2e\x09\x88\xb2\x3e\x4b\x7d\x2e\xf5\x2a\x75\xa0\x64\x36\x2c\x14\x4e\xce\x26\x7b\x3c\x35\x87\x08\xba\x7b\x56\xce\x0f\xd9\xaa\xfc\xdf\xf8\xce\x17\xf9\x3c\x87\xda\x3f\x34\x07\x26\x1b\x39\x2a\x54\x5b\xc3\x55\xbf\xf5\xae\x7a\x03\xd4\x96\x11\xd4\xb6\xc0\xeb\x17\xe7\x33\x33\x04\xee\x8e\xf4\xa9\x2b\xe3\x7d\xd5\x88\xa5\xcc\x24\xdb\xb1\x82\xfb\x75\xe8\x60\x62\x33\x96\x71\xe0\xe7\xee\x24\x8b\x05\xf2\xf9\x8f\x35\x4b\x14\xaa\xb4\x00\xec\x94\x9f\xd0\x77\x28\x5b\xcb\x29\x0d\x29\xfb\x26\xcc\xe4\xe3\x51\x52\x53\x08\xf1\xcd\xcc\x59\xaa\x5f\x0b\x1b\x4a\xcd\x3d\xb8\x1e\x79\x3a\x3f\x34\x07\xc3\xdb\x4d\xbd\xe6\x1a\x5e\x2b\x9b\xca\x5e\x7c\xa6\xd8\x05\x38\xea\xa1\xfe\x0b\xf3\x89\x48\xb5\xf5\x41\x0a\x59\x53\xaf\x1a\xf9\x58\x7d\xff\x52\x60\x32\xc5\x74\xc4\x33\x73\xbf\xf8\xbf\x94\x95\x31\x68\x2f\xed\x09\x87\x28\x0d\x2d\xc2\x46\xa4\x1d\xf8\xc5\xbd\x1b\xb5\x98\xed\xba\x36\x3b\xfb\x20\xbd\x86\x75\x2e\x22\xce\x88\x22\xcc\xb0\xd4\x61\xb9\x1a\x8f\x4f\xa4\xe4\x79\x97\x48\x59\xbe\x1c\xa0\xeb\x16\x9f\xa1\x16\xb5\xdc\xf4\x4b\xc7\x0f\xaf\x32\x6a\x33\x1a\x57\x3c\x8b\xfe\xa3\xcf\xf6\x54\xf1\xcc\x9d\x42\x2e\x5b\x71\x5c\xc5\x55\x22\xd2\x28\x4b\x8a\x14\x62\xdb\xc3\x2a\xae\x0c\xef\x5e\x98\xc7\x95\x3b\x7b\xa2\x92\xe5\x71\x19\x55\xee\xbc\xa3\x70\x2f\x2f\x81\x5c\xb4\x18\xe0\x01\xf6\x71\x99\xe5\xf2\x0b\x2b\xc0\xe7\x29\x8a\x81\x2a\xf7\x50\xdf\x97\xdb\x9f\x54\x67\x31\x3f\x99\xd3\xcd\x71\x55\xdc\xe8\x82\x32\x4d\x0a\x9f\x53\xb4\x17\xec\x1a\x91\x88\x14\x68\x6c\xa3\xfc\x4c\x9d\x4e\x55\xec\xe5\x86\x3f\x19\xc6\xb9\xe6\xf3\x1f\xe2\x20\xe3\x4d\x86\xf5\x16\xd4\x76\x32\xcb\xb4\xeb\x25\x12\x8c\x90\x88\x34\x37\x27\x28\x3e\xf7\xa0\x75\xa1\xa7\x0b\xa7\xa7\xad\x53\x98\xf7\xdb\x4d\x6a\xe0\x0e\xd8\xb4\xd1\xdb\x4b\xb9\xd2\xe6\x80\x87\x8a\x0c\x00\x15\x06\x2f\x6b\x96\xbb\x48\x13\xd0\x1b\x2d\xb3\x3f\x88\x00\xf3\xe1\xe9\x41\x24\x68\x0c\x51\xfa\x76\xe5\xc5\xcf\xa1\xb1\x48\xcc\x07\xa7\x91\x15\xd4\x7d\x89\x9d\x10\x94\xb0\x70\xd9\x7f\x80\xba\xa2\xc7\x62\x1c\x56\x23\x12\xde\xd0\xfd\x08\xb0\x8a\xc0\x12\x6d\xb6\x5b\x79\x3f\xab\xbc\x23\x7d\x81\xa6\xeb\xc7\x0b\xfc\x23\xfb\x70\xaa\xb3\x35\x60\xa0\xe3\xec\x97\x1f\xbf\xa6\x86\x8c\x12\xf1\x2f\xd6\xf4\x2c\x2d\x81\xb8\x8c\x74\xb0\x00\x40\x19\xf7\x12\x7e\x45\xed\x7a\x91\xfc\xfd\x5c\xad\xf3\x65\x2a\x1b\x9c\xe5\xb0\x2b\x34\xb8\xa5\x77\x43\x55\x99\x3b\xe8\x99\x69\x28\x84\x3f\xe1\x4f\x40\xd5\x52\xf1\x20\xdc\xec\x9f\xb8\xcb\xdc\x03\x28\xab\xbd\xc1\x15\x3b\x8f\x27\xcf\x9f\x13\xb5\x07\x27\x0b\x04\xcb\xba\x4e\x96\xb1\x5c\x4d\xdd\x75\xb3\x2d\x7e\x2e\xb3\xe2\xc1\xd3\x58\x33\x80\x58\xcd\x8a\x87\x5f\xb3\xfa\x34\x9b\x45\xea\x08\x58\x10\x40\xab\x82\xfc\x30\xee\x65\x8b\x69\x23\xf5\xef\xef\xdf\xfd\xc8\x9d\xa8\xe4\xd6\x24\xb1\xf1\x6f\x18\xd1\x4f\xb1\xc6\x0c\x02\x08\x07\x01\xfe\x9d\x67\xbb\x42\xff\x0e\x09\x06\xe2\x12\x96\xa4\x23\xcc\xd6\xad\x52\x5f\xfe\x25\x78\x36\xdf\xfc\x53\x5e\xc9\x7e\x95\xbf\x9f\xd9\x81\xbf\x6f\x5e\x35\xfb\x6a\x5b\xe7\x27\x3e\xa6\x6e\xcf\x9f\xc9\x95\x03\x94\xdf\x67\xfc\x57\x81\x34\x09\xaa\x2e\x73\x46\x1d\xfe\x4b\x50\xd6\x9e\x99\x00\xc0\x1a\x55\x26\x6f\x6b\x65\x31\x5d\xfe\xdb\xff\x09\x00\x00\xff\xff\x29\xff\x13\x14\x83\x52\x01\x00") -func pagesAssetsJsJquery1102MinJsBytes() ([]byte, error) { +func pagesAssetsJsJquery300MinJsBytes() ([]byte, error) { return bindataRead( - _pagesAssetsJsJquery1102MinJs, - "pages/assets/js/jquery-1.10.2.min.js", + _pagesAssetsJsJquery300MinJs, + "pages/assets/js/jquery-3.0.0.min.js", ) } -func pagesAssetsJsJquery1102MinJs() (*asset, error) { - bytes, err := pagesAssetsJsJquery1102MinJsBytes() +func pagesAssetsJsJquery300MinJs() (*asset, error) { + bytes, err := pagesAssetsJsJquery300MinJsBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "pages/assets/js/jquery-1.10.2.min.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "pages/assets/js/jquery-3.0.0.min.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _pagesAssetsStylesBootstrap311MinCss = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xbd\xeb\xaf\xe3\x38\xb2\x27\xf8\xfd\xfe\x15\xea\x2c\x24\xaa\xb2\xd3\x56\x49\x7e\x3f\x50\x67\x7b\xb6\x67\x30\xd3\xc0\xf4\xfd\xb2\xfd\x61\x80\xea\xdc\x05\x2d\xd1\xb6\xba\x64\x49\x57\x92\x4f\x9e\x2c\x8f\xff\xf7\x85\xf8\x66\x30\x28\xc9\xce\xcc\xc2\x2e\x70\xfb\xa0\x2b\xcf\x21\x7f\x0c\x06\x23\x82\x0c\x32\xc4\xc7\xcf\x7f\xfe\xd3\xbf\x05\x7f\x0e\xfe\xcf\xb2\x6c\x9b\xb6\x26\x55\xf0\x3a\x0f\xe3\x30\x0e\x7e\x3a\xb7\x6d\xb5\xfb\xf9\xe7\x13\x6d\x0f\x32\x2f\x4c\xca\xcb\x87\x0e\xfd\xd7\xb2\xfa\x52\x67\xa7\x73\x1b\xcc\xa2\x38\x9e\xce\xa2\x78\x11\xfc\xe3\x73\xd6\xb6\xb4\x9e\x04\x7f\x2b\x92\xb0\x03\xfd\xcf\x2c\xa1\x45\x43\xd3\xe0\x5a\xa4\xb4\x0e\xfe\xfe\xb7\x7f\x70\xa2\x4d\x47\x35\x6b\xcf\xd7\x43\x47\xef\xe7\xf6\xf3\xa1\xf9\x59\x55\xf1\xf3\x21\x2f\x0f\x3f\x5f\x48\xd3\xd2\xfa\xe7\xff\xf9\xb7\xbf\xfe\xb7\x7f\xff\xbf\xfe\x5b\x57\xe5\xcf\xff\xf6\x6f\x3f\xff\xf9\x4f\x41\x51\xd6\x17\x92\x67\xbf\xd3\x30\x69\x9a\x8e\xd5\x28\x8c\x82\xff\xcd\x68\x8b\xea\x82\xff\x1d\x9c\xb2\x36\xcc\xca\x9f\x15\x36\xf8\xf3\xcf\xe7\xf6\x92\xdf\x8e\x65\xd1\x4e\x8f\xe4\x92\xe5\x5f\x76\x0d\x29\x9a\x69\x43\xeb\xec\xb8\x9f\x5e\x9a\x69\x4b\xdf\xda\x69\x93\xfd\x4e\xa7\x24\xfd\xd7\xb5\x69\x77\x71\x14\xbd\xdf\x4f\x3f\xd3\xc3\x6f\x59\x8b\xe7\xde\x0f\x65\xfa\xe5\x76\x21\xf5\x29\x2b\x76\xd1\x9d\xd4\x6d\x96\xe4\x74\x42\x9a\x2c\xa5\x93\x94\xb6\x24\xcb\x9b\xc9\x31\x3b\x25\xa4\x6a\xb3\xb2\xe8\x7e\xbd\xd6\x74\x72\x2c\xcb\x4e\x4a\x67\x4a\xd2\xee\x9f\x53\x5d\x5e\xab\xc9\x85\x64\xc5\xa4\x20\xaf\x93\x86\x26\x0c\xdc\x5c\x2f\x17\x52\x7f\xb9\xa5\x59\x53\xe5\xe4\xcb\xee\x90\x97\xc9\x6f\x77\x72\x4d\xb3\x72\x92\x90\xe2\x95\x34\x93\xaa\x2e\x4f\x35\x6d\x9a\xc9\x6b\x96\xd2\x52\x21\xb3\x22\xcf\x0a\x3a\x65\x05\xf6\xaf\xb4\xe3\x8a\xe4\x53\x92\x67\xa7\x62\x77\x20\x0d\xed\x72\x39\xa1\x5d\x51\xb6\x3f\xfd\x9a\x94\x45\x5b\x97\x79\xf3\xe9\x83\x22\x51\x94\x05\xdd\x9f\x69\xa7\xdf\x5d\x74\xff\xf5\x9c\xa5\x29\x2d\x3e\x4d\x5a\x7a\xa9\x72\xd2\x52\x0b\x77\x27\xb7\x03\x49\x7e\xeb\x9a\x51\xa4\xbb\x28\x88\xee\x64\x47\x92\x36\x7b\xa5\x13\xb2\x3b\x97\xaf\xb4\xbe\x95\xd7\xb6\xab\xb4\x93\xd1\xe1\x50\xff\xda\x66\x6d\x4e\x3f\xdd\x0e\x65\x9d\xd2\x7a\x7a\x28\xdb\xb6\xbc\xec\xe2\xea\x2d\x48\xcb\xb6\xa5\xe9\xfd\x30\x69\xda\xba\x2c\x4e\x5c\x5d\x9f\x39\x1b\xeb\x28\xba\xa7\xc7\x82\xa7\x35\xed\x97\x9c\xee\xb2\x96\xe4\x59\x72\x3f\xc7\x22\x31\xfb\x9d\xee\x66\xf4\xb2\x17\x1a\x09\x57\x6b\x7a\x09\xa2\xfb\x85\xd4\xbf\x99\x2c\xfe\x70\x3c\x46\xfb\xa4\xcc\xcb\x7a\xf7\x43\x14\x45\xf7\xe6\x42\xf2\xdc\x20\xb1\x89\xde\xdf\x9b\xeb\x61\xd2\x5c\x2b\x23\x75\xbd\x7c\xbf\x67\x72\x95\x62\xd9\x57\x65\x93\x75\xaa\xda\xd5\x34\x27\x5d\x7b\xbd\xc2\xee\x28\xb5\x65\xb5\x9b\x86\x4b\x7a\xe9\x68\xdf\x44\xa3\xa7\xe1\xac\x4b\xc9\x2e\x27\x21\x8d\x5d\x74\x6f\x5e\x4f\x4c\x2f\xbb\xba\x2c\xdb\x0f\xb7\x4e\x80\xc7\xbc\xfc\xbc\xe3\x4a\xb8\x73\x23\x92\x56\x17\xd3\x4b\xb0\x88\xaa\xb7\xfb\xb9\xbe\x4d\x2f\xe5\xef\xd3\x43\xf9\xd6\xf1\x9b\x15\xa7\x5d\xa7\x57\x5a\xb4\x5d\xd2\xde\x93\xac\x54\x5c\xd5\x54\xd7\x44\xae\x6d\x79\x4f\xca\x94\x4e\x7e\x3b\xa4\x93\xaa\xa6\x93\x86\x5c\x2a\xab\xf3\x5c\xca\xa2\x6c\x2a\x92\xd0\x89\xfa\x6d\xaf\x65\x15\xd3\xcb\xfd\x70\x6d\xdb\xb2\x98\x64\x45\x75\x6d\x27\x65\xd5\x72\x33\x6f\x68\x4e\x93\x76\xd2\x75\x27\x52\x53\x72\xe3\x6a\xc8\x8a\x33\xad\xb3\x96\x51\x50\x7f\xa8\x7e\xc5\x29\x69\xf6\x5e\xb3\x26\x3b\xe4\x54\xd6\xc0\x49\xde\x58\x0f\x6d\x6b\x52\x34\xc7\xb2\xbe\x70\xcb\x14\x88\xae\xeb\x07\x8c\x91\x5f\xdb\x2f\x15\xfd\x85\x27\x7f\x9a\x18\x49\x35\x6d\x68\x6b\xa5\x34\xd7\xc3\x25\x6b\x3f\xdd\xe4\x08\x40\xaa\x8a\x92\x9a\x14\x09\xdd\xf1\xf2\xfb\xe4\x5a\x37\x65\xbd\xab\xca\xac\x68\x69\x2d\x2a\xfb\x35\xcd\x1a\x72\xc8\x69\xfa\xc9\xac\x56\x25\xde\x44\xa1\x94\x1e\xc9\x35\x6f\x45\xa1\xdd\x8e\xe9\xee\x58\x26\xd7\x66\x9a\x15\x05\xad\x39\x27\x6e\xba\x32\x93\x7d\x45\xd2\xb4\x53\x67\x74\x67\xd0\x9b\x69\x9b\x7c\xdc\xbb\x1b\xad\x49\xce\x34\xf9\xed\x50\xbe\xd9\x8d\x26\x69\x56\x76\xfd\x50\xd9\x86\xea\x92\x6f\x90\x3e\x2f\x51\x5c\x2f\x07\x5a\x7f\xda\xed\xa4\x54\x18\x53\xd3\xa6\xca\x8a\xa9\xa9\x70\x0f\xba\xbc\xb6\x36\xfa\x26\x18\x66\x16\x67\x0a\x9f\x92\x3a\x39\xa3\xc2\xef\xf4\x7c\xcc\x68\x9e\xee\xfb\xec\x5d\x16\x7c\xa8\x3b\x20\x1c\x68\xde\x79\xc2\x34\xe9\x98\xc8\x91\xc6\xfa\x0a\xa4\x34\x29\x6b\xd2\x8d\x13\x58\x6b\x98\x99\xb2\xe6\x34\xb4\x95\xca\xed\x86\xc2\xa6\xcc\xb3\x34\x68\xb2\xfc\x95\xd6\xaa\x2b\x04\xb3\x4a\x2b\x26\x9c\x2f\xe9\x25\x08\x57\x33\xf6\xcf\xba\x1b\x47\x72\x7a\xa2\x45\x8a\xd9\x88\xea\x70\x76\x27\x97\xfd\xd2\x19\x69\xdb\xce\x5c\xe5\x08\x9d\x94\x79\x4e\xaa\x86\xee\xe4\x2f\x7b\x91\xd1\xf5\x7b\x41\x3f\x9d\xb4\xe7\x9b\xae\xef\x2f\x17\x9a\x66\x24\xa8\xea\xac\x68\x6f\x7f\xe6\x9d\xb3\x39\x93\xb4\xfc\xcc\x9a\xfc\xa7\xec\x52\x95\x75\x4b\x8a\xd6\x18\x88\x8d\x44\x63\xb4\x66\x5d\xba\x22\x35\x2d\x5a\x13\xd0\x29\x10\xa3\x77\x27\x13\xc2\x46\x88\x96\xa6\xbc\x5a\xad\x80\x1d\x9b\x86\x70\xf7\xf7\xeb\xb9\xa6\xc7\x4f\x3b\x72\x6c\x69\x7d\x13\x36\xb0\x7b\x17\xfc\xf4\x2e\x20\x6d\x5b\xff\xd4\xe5\x7e\x08\xde\x7d\x78\x67\x7a\x2c\x2f\x9a\x65\x0b\x38\x23\xfc\x7f\xff\xf2\xee\x5f\xe4\x95\x34\x49\x9d\x55\xed\xee\x9d\x28\x39\x51\x99\x3f\xbc\x73\x88\xbd\xeb\xc6\xe0\x09\x73\xda\xff\x71\x2d\x5b\xea\x1a\xc3\x0f\xdb\xed\x76\x5f\x91\x13\x9d\x1e\x6a\x4a\x7e\x9b\x66\x45\x37\xd1\xd8\x91\xd7\x32\x4b\xef\x6d\x37\x9d\x50\x7e\x99\xa9\x6f\xca\x67\x18\x53\xa6\xe1\x7b\x5b\x4f\x3a\x37\xe3\x2b\xdf\xe5\x5d\xc8\xdb\xf4\x73\x96\xb6\x67\x36\xbb\x31\x64\x5a\x4d\xce\xb3\xc9\x79\x7e\x2b\xeb\xea\x4c\x8a\x66\x37\xdf\x7f\xce\xd2\xf2\x73\xb3\x9b\xdf\x79\x86\x41\x95\x35\x4b\x10\x15\x43\xb3\xed\x7b\x8f\x06\xe1\xb0\x20\xaf\x07\x52\xdb\xf3\x89\x90\x71\x1f\xb4\xe9\x44\xfe\x76\x36\x48\x4c\x85\xc1\x00\x42\x87\xb6\x78\x09\x13\x52\xd3\x76\x12\xa6\x75\x59\x5d\xab\x17\x23\x4d\x5a\x72\x5b\x56\x53\xcc\xe0\xee\x61\x4e\x0e\x34\x47\x64\xde\x4d\x10\xc2\xfe\xde\x60\x92\xe1\x82\xe7\x48\x9a\x06\xed\x79\xe2\x24\xa5\x48\x2d\x69\x9a\x1a\x54\xee\x7f\xbe\x21\xe3\x97\x31\x36\xc3\x91\xcf\xc8\x42\x53\xef\xbb\x03\x3d\x96\x35\x9d\x08\xa3\xfb\xc6\xd4\xf5\xd4\x9a\x79\xff\xd5\x2c\x5c\x1a\x33\x67\x52\x4d\xcf\xd9\xe9\x9c\x77\x63\x8b\x10\x7e\x7d\x3a\x90\x9f\xa2\x09\xfb\xf9\xc0\x67\xd1\xe6\xe4\xe2\xdd\xff\xa0\xf9\x2b\xed\x66\x52\xc1\xbf\xd3\x2b\x7d\x37\x51\x7f\x4f\xfe\x4b\x9d\x91\x7c\x62\x4c\xdd\x8d\x49\xc7\xa2\x7a\xb3\x66\x68\x71\xb8\x98\x6d\x96\xeb\x78\x31\x97\x83\xcc\x7c\x3e\xdf\xa3\x96\xc4\x47\xfe\x89\x35\xa3\xd0\x93\x14\x93\x37\x73\xaa\xc2\xeb\x95\x29\x66\xd5\x22\xed\x2e\x27\x38\x3f\x2c\x66\x9b\x43\x42\xf6\x70\x40\xe2\xf3\x67\x3e\x4b\x9e\x90\x1d\x73\xf4\xb2\xc8\x8c\xac\x16\xdb\x95\x53\xc4\x18\xc3\x04\x5e\xce\xae\xdb\x73\x56\x88\x29\xf4\x5e\xa6\x2d\xab\xb7\xa0\x1b\xe9\x03\xa9\x0e\x3e\x97\xa8\xb3\xe2\xc4\x9b\x2f\x91\xd3\xf2\x78\x6c\x68\xbb\x9b\xce\xaa\x37\x30\xc7\x8c\xd8\xe0\x00\xe6\xb6\x97\x2c\x4d\x73\x7a\x0f\xb3\xcb\x69\x5a\xd3\xa6\x2a\x8b\xa6\x9b\xf1\x87\xed\xf9\x7a\x39\x14\x24\xcb\x5f\xb2\xcb\xc9\xf8\x33\x20\x3c\x21\x21\x75\x79\x6d\x68\xce\x67\x0e\x2f\x61\xd6\xd2\x4b\x4f\x0e\x2b\x65\xaf\x81\xf6\xf6\x30\xb5\x37\xa7\x10\x9c\x9d\x4e\xb9\x54\x76\xb3\x69\x37\xcb\xb9\x36\xbb\x55\xf5\xc6\xb3\x15\x4b\xca\x5d\xf5\x18\x0e\x6a\x2d\x7b\xb4\x03\xef\xed\xfa\x3a\xa2\xaa\x0f\x74\x3e\x8c\xaf\x15\x48\x9e\x07\xe1\xac\x09\x28\x69\xe8\x34\x2b\xba\x29\xd1\x7e\x20\x1b\x5d\xd7\x0d\x09\x21\xc9\xea\x44\x8f\x59\x82\xa7\x65\xf4\xbe\x5b\x28\x70\xbd\x76\x83\xe1\x6e\x16\x55\x6f\x62\x7a\x21\x17\x62\x2c\x49\x4d\x21\xf4\xc0\x69\x36\x97\x52\x7a\x0f\x9b\x7a\x5a\x16\xf9\x97\x9b\x5a\x07\x91\x43\x53\xe6\xd7\x96\xee\x05\x63\x95\x5a\x60\xc4\xaa\x96\xdd\x34\x36\xe6\x30\xd1\x1e\x2c\x6f\xf6\x49\x9e\x55\xbb\x9a\x26\xad\x1a\x1f\x14\x2f\xf7\x73\xcc\xfd\xd0\xe4\xbc\x98\x9c\x97\x93\xf3\x6a\x12\x9e\xe3\x49\x78\x9e\x4d\xc2\xf3\x7c\x12\x9e\x17\x93\xf0\xbc\x9c\x84\xe7\x95\xbf\xc7\x8a\x29\xce\x32\x8a\x80\xc6\xe3\xbd\xb5\x14\xb9\x9f\xe3\x80\xad\x0b\x27\xe7\x99\xfc\x65\x2e\x7f\x59\xc8\x5f\x96\xf2\x97\x95\xf8\x25\x54\xc5\x42\x55\x2e\x54\x05\x43\x55\x32\x54\x45\x43\x55\xf6\x1c\x07\xa1\xaa\x32\x54\x75\x86\xaa\xd2\x50\xd5\x1a\xaa\x6a\x43\x5d\x6f\xa8\x2b\x0e\x75\xcd\xa1\xae\x3a\xd4\x75\x87\xba\xf2\xd0\x58\xfe\x0a\xe9\x2c\xa0\x74\xe4\xf8\xb9\xdd\x6e\xef\x4c\xe2\x4c\x11\x21\x57\x46\x78\x9e\x0f\x58\x54\xcc\x16\xa8\xb1\x23\x23\x43\x44\x8e\x90\xb5\xd4\xcc\xa6\x21\x22\x0a\x31\x69\xe9\x96\x9b\x7e\x69\xf9\xfe\xce\x6c\x84\x59\x4f\x28\x2d\x68\x65\x72\x1f\xfb\xb8\x5f\x38\x3a\x34\x54\x88\xd8\xc1\x2a\x80\x7a\x0b\x31\x15\x86\xb8\x36\x57\x2e\xf7\xeb\x8e\x7b\x26\x7b\x23\x71\xde\x8d\x6a\x5c\x15\x66\x2a\xe3\x98\x6b\xc6\x88\x8c\x2c\x58\x3b\x3a\x3e\x8c\xd4\x78\xd3\xa5\x32\x71\xdc\x6c\x5f\x7a\x17\xd2\x31\x52\x3b\xdf\x50\x29\xb7\x10\x44\x01\x93\x4d\x98\x77\x53\x4f\x64\x10\x31\x4a\xae\xe4\x9f\xc2\xc4\x66\x4e\x07\x5c\xc8\x35\xc3\x4f\x97\xac\x10\x43\xdb\x7a\xb5\xa9\xde\x3e\xdc\x78\x05\x46\x4b\xe2\xea\xed\x7e\x17\xb2\x72\xa2\x37\xcb\xf7\xf7\x24\x6b\xa9\x19\x28\x12\xeb\xe1\x90\xf9\xd2\x9c\x1e\x45\xb4\x80\x7b\xb2\xee\x6f\x91\xc5\x02\x9a\x66\x1e\x4b\x10\x99\x09\xed\x96\xf9\x66\x2e\x4f\x11\xd9\xff\xba\x36\x6d\x76\xfc\x62\xe6\x8b\x24\x01\xb8\x5c\xbb\x25\x89\xd1\x93\x78\x72\x55\x67\x2c\xce\x67\x4d\x14\xee\xc4\xca\x14\x61\x34\x39\x8b\x89\xd6\x31\x91\xc5\x9b\x6b\x92\xd0\x46\x4d\x1a\xe6\xc9\x7a\x35\x4f\x65\x71\x91\x69\x17\x9f\x1d\x96\x8b\x59\x22\x8a\x67\xc5\xb1\x54\x65\xe3\x75\xb4\x39\xca\xb2\x5d\x0e\x28\xb8\x58\xce\x56\xb2\xde\xcf\xa4\x2e\xb2\xe2\x24\xf3\x36\x64\x95\xce\x0f\xb2\xac\xc8\xb4\x8b\xaf\x56\xcb\x58\xd5\x9b\x92\xe2\xa4\xb3\xc8\x76\xb1\x58\xcc\x64\x69\x9e\x67\x17\xde\x2c\xe6\xcb\xf9\xe2\x1e\x1e\x4e\x50\x60\xcc\x1f\x3b\x5e\x5a\x89\x51\x17\x10\x04\x5d\xac\x94\xe7\xe1\xa4\xa4\xe9\x82\xd2\xe3\x31\x4a\x37\x9c\xa0\x2d\x56\x17\x9b\xc4\x74\x76\x98\x33\x82\x4c\xbe\x08\xb5\x2d\x4d\x8f\x6b\x4e\xcd\x10\xb4\x0b\x24\xc7\x74\xdb\xf9\xda\xc3\x49\x49\x1c\x99\x92\x24\xc7\x0d\x9d\x73\x6a\xb6\xe8\x11\xec\x9a\x26\x87\x25\x23\x28\x74\x80\x60\x66\x29\x4d\x29\xa7\x67\x29\xc3\x85\xd2\xc5\x61\x7b\xd8\xde\x43\xb6\x00\xe4\xeb\x4d\x39\xad\x92\x43\xc1\x56\xfb\xfe\x45\x54\xbd\x05\x51\x60\xcc\x30\xcc\xe8\xaf\x31\xb7\xb8\xe6\x93\x32\x37\xc7\xe5\x08\x1b\x94\xaf\x79\xc0\x80\xdd\x7f\xaf\x79\x50\xb2\xdf\x75\x39\x01\x8d\xee\x61\x9e\x35\xed\xf4\x5a\xb0\xc1\x20\x55\xfc\x75\x1d\x7f\xd7\x0d\x43\x8d\x1e\x27\xba\xe5\x27\x4b\xe0\x93\xad\x01\xac\x64\x8a\xe5\x4e\x97\x6c\x2c\xd4\x85\x5f\xf2\x0c\x8f\xc9\x5b\x44\x97\x7a\x3a\xc4\xc7\x9f\x2e\xe5\x9e\xf6\xb6\xbe\x13\xe0\x3d\x6d\x27\x69\x7a\xc3\xe7\xad\xf7\xb4\x75\x83\xe7\x6a\x8c\xe6\x8d\xe9\x19\x6f\xd3\x7c\x7a\x2e\xeb\xec\xf7\xb2\x68\x49\x1e\x74\xb4\xf2\x92\xb4\x6c\xa4\x94\x73\xbb\x55\xa7\xc3\x24\xa7\xa4\xe6\xc9\x70\xd0\x74\x26\x76\x0c\xa0\x12\x69\x9e\x67\x55\x93\x35\xfb\xcf\xe7\xac\xa5\x2c\x90\xd4\x89\xf4\x73\x4d\xaa\x3b\xac\xde\xe6\x3b\xde\x74\x8d\x37\xc3\x32\x13\xf6\x7b\x4a\x5a\x32\x2d\xeb\xec\x94\x15\x24\x9f\x8a\x4f\x0c\x22\xda\x7a\xa6\x79\x85\x18\x1c\x5f\x2b\x05\x7c\x30\xce\x8a\xac\xcd\x48\x9e\x35\x17\xc3\x99\x6c\xa3\xf7\x7b\x10\x58\xbe\x56\x15\xad\x13\xd2\xd0\xbb\x11\xab\x91\xf3\xd9\xce\x2c\x03\x63\x06\xc4\x9c\x24\xf4\x85\xeb\x70\xa9\xed\x5f\xda\x80\x69\xfd\x9a\x70\x50\xed\x72\xd2\xb4\xd3\xe4\x9c\xe5\xa9\x11\x1c\x0a\xae\xb9\x27\xa3\x34\x33\x9c\x9e\x60\x00\xc5\x57\x28\x23\x85\x3b\x54\x23\x41\xf8\x56\x7b\xf5\x65\x7d\x27\x19\x58\x6f\x77\x82\x75\xaa\x94\xb1\x08\x58\x33\x92\x1e\x9a\x19\x2a\x54\xf6\xe3\x3f\xd9\xd7\xc6\x7f\x46\xd1\x7f\x89\x7e\xbc\x87\x1a\x3f\xad\xe9\x2b\xad\x1b\x93\x44\x58\x5d\xf3\x5c\xf8\x75\xbb\x97\xc5\x66\xc7\x13\xdd\x5b\xae\x96\x64\x37\x34\x94\x62\xe9\x2b\x72\xcc\x1d\x63\xc3\xdb\x5e\x83\x29\x80\xc1\xa8\x78\x84\x63\x12\xb1\x20\x18\x8d\x70\x04\x11\x8f\xb0\x51\x09\x4b\xb6\x79\x58\xb3\xb7\x65\x1c\xe2\x6f\x58\x1f\x09\x13\xd1\xd3\xac\x3e\x12\x26\xc4\xb0\xa0\xce\x76\x02\x66\x47\x3f\x1a\x16\xea\x0a\xc8\x8d\xd2\x92\x34\xad\xbb\x29\x82\x77\xbe\x6b\xcc\x38\x3d\xdd\x63\xe0\xdb\xda\xdf\x69\x91\x97\x93\xbf\x97\x05\x49\xca\xc9\x5f\xcb\xa2\x29\x73\xd2\x4c\xde\xfd\xb5\xbc\xd6\x19\xad\x83\x7f\xa7\x9f\xdf\xe9\xaf\x6e\x8c\x96\x1a\x7f\x66\xd5\x5b\xb0\xb0\x46\x9b\x6e\x04\x93\xd3\x92\xf5\x6c\xb9\xa0\x58\x48\x63\x7b\x9c\x1d\x17\xc8\x50\xec\x86\x34\xee\xbf\x1d\xd2\x71\xb5\xe1\xf3\x32\x16\x81\xb3\x88\xce\xab\x37\x33\x9a\x9f\x15\x0d\x6d\x83\x28\x98\xc6\x6c\xaa\x60\x44\x0a\xc3\xd9\xf2\x03\xfb\x4e\x69\x8f\x47\x92\x99\x2d\x1b\x55\xc1\xda\xc4\x1c\x76\xe7\xfe\x28\xcf\xe7\xb2\x4e\x79\xf8\x7a\x27\x82\xd8\x79\xce\x13\x3b\x29\x88\xb4\xee\xef\x81\x40\xe2\xb2\xfb\x41\xa2\x43\x49\x92\x20\xa2\xac\x6a\x1a\x58\xda\x8b\x90\xa8\xa2\xfd\x81\xd4\x54\x51\x55\xd3\x29\x57\x12\x64\xc4\xf8\x64\x02\xaa\x8d\xee\x61\x57\xac\x49\xea\x32\xcf\x59\x50\xfb\x42\xde\xa4\x40\xe6\xdd\xf4\x4c\xf9\xed\xe9\x97\x1d\x87\xdd\xc3\xce\xfa\x49\x56\x50\x15\x39\xaa\x55\xb0\xc9\x9a\x01\xb1\x04\x6b\x48\x8d\xdd\xd9\x4d\x97\xd4\x33\xf5\xd0\x75\x89\xf4\x25\x73\xf7\x6e\x81\xed\x76\x86\x16\xd8\xae\x3d\x05\xe2\x59\x14\xa1\x25\xe2\x98\x17\xd1\x19\xd3\x63\x7e\xcd\xd2\x6f\xd6\xda\xb0\x2e\x3f\x5b\x73\x98\x69\xac\x6d\x55\x00\xa7\x1c\x99\x94\xf9\xf4\xad\x99\xc6\x13\xf6\x5b\x73\x91\xbf\x5d\x52\xf9\x5b\x7e\x92\xbf\xbd\x35\xd3\x99\xc2\xcd\x14\x6e\xa6\x70\x33\x85\x9b\x2b\xdc\x5c\xe1\xe6\x0a\x37\x57\xb8\x85\xc2\x2d\x14\x6e\xa1\x70\x0b\x85\x5b\x2a\xdc\x52\xe1\x96\x0a\xb7\x54\xb8\x95\xc2\xad\x14\x6e\xa5\x70\x2b\x85\x5b\x2b\xdc\x5a\xe1\xd6\x0a\xb7\x56\xb8\x8d\xc2\x6d\x14\x6e\xa3\x70\x1b\x85\xdb\x2a\xdc\x56\xe1\xb6\x0a\xb7\x55\xb8\x38\xd2\x82\x8e\xb4\xa4\x23\x2d\xea\x48\x63\x0d\xa5\x18\x5a\x31\xd4\xa2\xf5\x12\x6b\xc5\xc4\x5a\x33\xb1\x56\x4d\x3c\xbb\xb9\xdb\x47\x3a\x5b\x35\x02\xa6\xe3\x6c\xcb\xb6\x18\x6d\x13\x5a\xeb\x5a\xaf\x5a\x73\x5a\x37\x5a\xfa\x5a\xbe\x5a\x82\x86\x8c\x0c\x11\xb0\x16\x1a\x8b\x82\xbb\x91\xaa\x83\xd2\x3a\x35\x96\x7d\x33\x0e\x57\xfc\x7f\x6b\x23\x37\x12\xb9\x9b\x79\x38\x17\xff\xd3\xb9\x5b\x35\x0e\xe8\xb4\x8d\x48\x5b\xad\x10\x72\x6b\x91\xb9\xdc\x20\xd4\x56\x32\xd3\xe0\x6e\x29\xd2\x16\x18\x73\x0b\x91\x39\xc7\x78\x9b\x8b\xcc\x99\xc1\x9b\x12\x00\xc6\x9b\x94\x03\xc6\x1a\x9b\xb3\xc4\xb3\x9b\xd0\xad\x29\x3f\x9e\x15\x8b\x2c\x54\x88\x1c\x12\x09\x08\x2a\x49\x06\xd9\x0a\x84\x29\x4e\x96\xb1\x11\x19\xa8\x4c\x19\x62\x2d\x10\xa8\x60\x19\x62\x25\x11\x90\xf7\xa5\xc8\x40\x45\xcc\x10\x0b\x81\x40\xe5\xcc\x10\x73\x81\x98\x41\xce\x95\xc8\xbc\x9c\x4b\xc9\x79\x19\x97\x72\x8b\x8c\xe4\xe6\xdc\x69\x83\xf7\x3d\x5b\x19\x5d\x4e\xcc\x73\x3c\xba\xe8\x10\x11\x47\x78\x54\xd1\x9c\xa7\x5b\x0e\xb0\x35\xd1\x9c\xa7\x1b\x9e\xee\x51\x44\x73\x9e\xae\x39\xc0\xa3\x87\xe6\x3c\x5d\x09\x00\xe4\x7a\xc9\xd3\x3d\x5a\x68\xce\xd3\x05\x07\x78\x94\xd0\x9c\xa7\x73\x0e\x98\x41\x9e\xa5\xa0\xbc\x3c\x0b\x79\x79\x59\x16\xd2\xd2\x0a\xe0\x9f\x23\x3b\x15\x58\x8b\x7f\x53\x13\x12\x12\x5b\x10\x54\x25\x12\x1a\x59\x50\x54\x37\x02\xba\xb5\x90\xa6\x92\x04\x60\x63\x01\x50\x6d\x09\xe4\xda\x42\xa2\x6a\x13\xc8\x95\x8d\x74\xdb\xba\xb4\x00\xa8\x22\x05\x72\x61\x21\x51\x8d\x0a\xe4\xdc\x42\xce\xdc\x96\x02\x15\xf4\xb4\xd4\xd6\x44\x4f\x43\xa3\xd1\xa1\x28\x7b\x1a\xa4\x27\x3a\x7a\x2a\xa3\x27\x2b\x7a\x3a\xa2\x27\x1c\x7a\x4a\xa1\x27\x0d\x7a\x5a\x60\xf8\x7d\xc3\xad\x33\xaf\xed\xb8\x37\x9e\x0a\xdd\x1b\x2b\xe6\x75\x6f\x8c\xbe\xd7\xbd\x75\x7c\x40\xf7\xd6\x71\xe9\x75\x6f\x5d\x63\xbc\xee\xad\x6b\x33\x74\x6f\x9d\x44\xbc\xee\xad\x13\x9c\xd7\xbd\x75\xf2\x85\xee\xad\x93\xbe\xd7\xbd\x75\x4d\xf5\xb9\xb7\xe6\xe2\x75\x6f\x2a\xcb\xef\xde\x14\xc4\xef\xde\x24\xc4\x71\x6f\x32\xc3\xef\xde\x24\xc2\xef\xde\x24\xc2\x71\x6f\x32\xc3\xef\xde\x24\xc2\xef\xde\x24\xc2\x71\x6f\x32\xc3\xef\xde\x94\x5c\x7c\xee\x4d\x02\x80\x7b\x63\xc9\xa8\x7b\x53\x39\x5e\xf7\xa6\x10\x5e\xf7\x26\x11\xd0\xbd\xc9\x74\xaf\x7b\x93\x00\xaf\x7b\x93\x00\xe8\xde\x64\xba\xd7\xbd\x49\x80\xd7\xbd\x49\x00\x74\x6f\x32\xdd\xeb\xde\x94\x38\x3c\xee\x4d\xe6\xdb\xee\xad\xb9\x0c\xba\x37\x03\x32\xe4\xde\x0c\xe8\x90\x7b\xd3\x50\x8f\x7b\xd3\x80\x21\xf7\xa6\x91\x43\xee\x4d\x23\x3d\xee\x4d\x03\x86\xdc\x9b\x46\x0e\xb9\x37\x8d\xf4\xb8\x37\x0d\x18\x72\x6f\x86\x7c\xfb\xdd\x9b\x06\x42\xf7\xd6\x1b\xbe\x30\x17\xf7\x7a\xf9\xae\x17\xe8\x7a\x09\xae\x17\xd9\x7a\x19\xad\x17\xca\x7a\x29\xac\x17\xbb\xc6\x62\xd6\x58\xab\xb2\xa5\xa8\xe3\xdf\x78\x2a\xf4\x6f\xac\x98\xd7\xbf\x31\xfa\x5e\xff\xd6\xf1\x01\xfd\x5b\xc7\xa5\xd7\xbf\x75\x8d\xf1\xfa\xb7\xae\xcd\xd0\xbf\x75\x12\xf1\xfa\xb7\x4e\x70\x5e\xff\xd6\xc9\x17\xfa\xb7\x4e\xfa\x5e\xff\xd6\x35\xd5\xe7\xdf\x2e\xa9\xd7\xbf\xa9\x2c\xbf\x7f\x53\x10\xbf\x7f\x93\x10\xc7\xbf\xc9\x0c\xbf\x7f\x93\x08\xbf\x7f\x93\x08\xc7\xbf\xc9\x0c\xbf\x7f\x93\x08\xbf\x7f\x93\x08\xc7\xbf\xc9\x0c\xbf\x7f\x53\x72\xf1\xf9\x37\x09\x00\xfe\x8d\x25\xa3\xfe\x4d\xe5\x78\xfd\x9b\x42\x78\xfd\x9b\x44\x40\xff\x26\xd3\xbd\xfe\x4d\x02\xbc\xfe\x4d\x02\xa0\x7f\x93\xe9\x5e\xff\x26\x01\x5e\xff\x26\x01\xd0\xbf\xc9\x74\xaf\x7f\x53\xe2\xf0\xf8\x37\x99\x6f\xfb\xb7\x4b\x3a\xe8\xdf\x0c\xc8\x90\x7f\x33\xa0\x43\xfe\x4d\x43\x3d\xfe\x4d\x03\x86\xfc\x9b\x46\x0e\xf9\x37\x8d\xf4\xf8\x37\x0d\x18\xf2\x6f\x1a\x39\xe4\xdf\x34\xd2\xe3\xdf\x34\x60\xc8\xbf\x19\xf2\xed\xf7\x6f\x1a\x38\xc2\xbf\x19\xd1\x76\x33\x66\xad\xa3\xd2\x3a\xee\xac\x23\xcb\x3a\x76\xac\xa3\xc3\x3a\xfe\xab\x23\xbc\x3a\x86\x6b\x84\x68\x8d\x08\x2c\x0f\xb0\x42\x07\xc7\x53\xa1\x83\x63\xc5\xbc\x0e\x8e\xd1\xf7\x3a\xb8\x8e\x0f\xe8\xe0\x3a\x2e\xbd\x0e\xae\x6b\x8c\xd7\xc1\x75\x6d\x86\x0e\xae\x93\x88\xd7\xc1\x75\x82\xf3\x3a\xb8\x4e\xbe\xd0\xc1\x75\xd2\xf7\x3a\xb8\xae\xa9\x3e\x07\x97\x9f\xbc\x0e\x4e\x65\xf9\x1d\x9c\x82\xf8\x1d\x9c\x84\x38\x0e\x4e\x66\xf8\x1d\x9c\x44\xf8\x1d\x9c\x44\x38\x0e\x4e\x66\xf8\x1d\x9c\x44\xf8\x1d\x9c\x44\x38\x0e\x4e\x66\xf8\x1d\x9c\x92\x8b\xcf\xc1\x49\x00\x70\x70\x2c\x19\x75\x70\x2a\xc7\xeb\xe0\x14\xc2\xeb\xe0\x24\x02\x3a\x38\x99\xee\x75\x70\x12\xe0\x75\x70\x12\x00\x1d\x9c\x4c\xf7\x3a\x38\x09\xf0\x3a\x38\x09\x80\x0e\x4e\xa6\x7b\x1d\x9c\x12\x87\xc7\xc1\xc9\x7c\xdb\xc1\xe5\xa7\x41\x07\x67\x40\x86\x1c\x9c\x01\x1d\x72\x70\x1a\xea\x71\x70\x1a\x30\xe4\xe0\x34\x72\xc8\xc1\x69\xa4\xc7\xc1\x69\xc0\x90\x83\xd3\xc8\x21\x07\xa7\x91\x1e\x07\xa7\x01\x43\x0e\xce\x90\x6f\xbf\x83\xd3\x40\xc7\xc1\xb5\xea\x63\xb9\x71\xe8\xa2\xef\xc3\xfb\xbd\x3d\x23\x9b\x8c\x19\x15\x83\x02\xb2\xad\x8f\x83\x5e\xd8\x79\xbe\x97\xb6\x7e\x51\x47\xc8\x5e\xda\x43\x99\x7e\x01\x49\xc7\xb2\x6c\x41\x92\x2a\x98\xba\x05\x53\xb7\xa0\xde\xce\xb1\xf1\x6f\x90\x00\x87\x7f\xda\xb2\xf2\x1c\x0c\x49\xd3\x14\x69\x01\x3c\x3c\xc4\xdb\x0b\x76\xe4\xcd\x50\x2a\xe2\xee\x84\x8f\x92\xda\xee\x98\xd5\x72\x7b\x9b\xd1\xea\xa4\xcc\xd9\x31\xc7\x21\x1c\xcb\xb6\xf3\xbc\x24\x7b\x6b\x4e\x47\xd6\x9c\x8e\xaf\x39\x35\x4e\x29\xee\xa2\xbb\xa9\xbc\x8f\xec\xbf\x66\x3e\x2a\xad\x40\x9e\x54\x44\xcf\xb8\x89\x93\x88\x49\x59\xa4\xec\x56\x0e\xc4\xc6\xcc\x4c\xc7\xda\xcc\x4c\xc7\xee\x50\xb2\x69\x1f\x59\x2c\x13\xb1\xca\xa5\xea\x13\xea\x0c\x25\x7e\x80\x12\xa2\xb0\xe6\xe9\x3c\xb7\x75\x3a\xcf\x6d\x1c\x42\x33\xed\xa1\x89\xe4\x19\x2d\xfb\x06\xdc\x6b\x2e\xec\x5b\x34\xc4\xe8\x34\xd3\x32\x6b\xda\x3a\xab\x0c\xe6\x76\x45\x7b\xe6\x06\xf7\x53\x99\xa6\x1f\x0c\x5e\x07\x91\xe8\x29\xdc\x6d\xf7\x23\x2b\x63\xbb\xc4\x35\x01\xf1\xa7\xaa\x01\xcf\x46\xc9\xb2\x9d\x54\x7c\xd4\x0d\x92\x32\xff\x35\xc9\x49\xd3\xfc\xf9\x97\x6e\x94\xfe\xa4\x77\x4e\x34\x2d\x69\xb3\x64\xcf\x27\xfa\x6c\x43\xb6\x7d\x00\x3a\x29\xf3\xeb\xa5\xb8\xcb\xf3\xc4\x16\x95\x89\x3c\x5b\xfc\x2c\x6d\x9a\xe7\xee\x50\x97\x86\xe2\xea\x13\x77\xe8\x85\x39\xda\x22\x60\x8e\xd6\xbc\x97\x9a\x93\xa3\x6d\xd6\x43\x4d\x24\x23\x6e\x01\xc9\x11\xd4\x90\x1c\x48\xcd\xf1\x4e\x48\x0e\xa4\xd6\xa3\x71\xd4\x52\xb4\x88\xc4\x49\x59\x0f\xea\x3c\x02\x65\x41\xbc\xb6\x09\x50\x18\xc3\x74\xd3\xfd\x60\x16\x20\x8e\x87\x60\x26\x00\xb3\x0c\x1b\x80\x59\x86\x11\x78\x09\xba\x59\x86\x19\x78\x08\xca\x74\xcc\x10\x90\x2c\xa9\x3b\x24\xcb\x21\xe8\xda\x02\x92\xe5\x10\xc4\x84\x2b\x4e\xdb\x78\xad\xc1\x3a\x81\xe3\x37\x87\x11\x30\x1b\xe3\x37\x08\x00\x43\x99\x8e\xe8\x36\x59\x61\x16\x91\x15\xc7\x12\x33\x07\x2b\xdd\xb0\x05\x2b\xdd\x30\x04\x9c\xce\xd9\x43\xe7\x8c\xd2\x61\x89\x98\xf2\x61\xba\x54\x14\x4c\xb7\xe9\xb8\x3a\x87\xe9\x36\x1d\x54\x70\xfc\x34\x94\x57\xdb\xfa\x84\x94\x5f\xd5\x43\x18\x03\xe0\x57\xb2\x89\xc1\x18\x4d\x16\x74\x7e\x9c\x63\x1a\x16\xc7\xae\x30\x25\xc3\x2c\x43\xcf\x30\xcb\x50\xb5\x97\xa0\x9b\x65\x28\xdc\x43\x50\xa6\x63\x6a\x47\xb2\xa4\xc6\x90\x2c\x87\xa0\xab\x7f\x24\xcb\x21\x88\x7a\x00\x7e\x8a\xcd\x6b\x05\xd6\xc9\x36\xbf\x21\x8c\x80\xd9\x18\xbf\x39\x00\x18\xca\x34\x39\xce\x92\x04\xb3\x08\x7e\x70\x0e\x33\x08\x90\x63\xd8\x03\xc8\x31\xcc\xc1\x47\xcd\xc9\x31\x8c\x01\xa7\x26\x92\x31\x53\x70\x73\xa4\xe2\xdc\x1c\x48\xcd\xb5\x03\x37\x07\x52\x43\x05\xca\xcf\x1e\x7a\xad\xc0\x3c\x8f\xe8\x37\x82\x61\x94\x05\xf1\x9b\x80\x8d\x42\xe7\x01\x87\x24\x49\x12\x1d\x01\x57\x21\x82\xf5\x6a\xcd\x02\xe0\x9c\xae\xbe\x23\xc3\xbf\xfe\x67\x7b\x7c\x8d\xfd\xf7\xe2\xe4\x9c\x4a\x79\x13\x3b\xf2\xd9\x7d\x85\x2a\x95\x1f\x38\xe9\x92\xc8\xb5\x2d\xcf\x19\xdb\x21\xcc\x81\x07\x52\x7b\xee\xaa\x50\x37\x72\x29\x2a\xac\x40\x67\xed\x6d\x79\x4d\xce\x77\x87\xed\x97\x50\xc6\x40\xc0\xe1\x4a\x0f\x10\x5b\xc0\x20\x20\x77\x1d\x86\x80\xdc\x05\x59\x5f\x75\xe9\x98\xea\xfa\x40\xc6\x5a\x0d\x3b\x9e\xe8\x29\xe7\x2c\x4f\xfd\xb2\x41\x57\x79\x66\x30\xc0\xcb\x1c\xba\x82\x7d\xb4\xa4\x16\xe7\xa3\x25\xb5\x8c\x9f\xe6\xf6\xe1\x92\x5a\x1b\x66\xc9\x9b\x75\x1e\xef\x41\x49\x1b\x47\x27\x1f\x13\xf4\x63\x05\x0d\x39\x3f\x56\xd0\x10\xf3\x93\xac\x3e\x5a\xd0\x10\xb2\x71\x7c\xd4\x3a\x0f\x39\x4a\xc8\x72\x6d\xaf\x89\xf4\x75\x5a\x97\x81\xc7\x0b\x62\x35\x3e\xd2\x64\xbb\x20\xb8\x94\x34\xba\xeb\xcb\xf9\xf4\x29\x2d\x79\xc8\x4c\xdf\x92\xa3\xbf\x7b\x46\xf2\x12\x3e\xfb\x94\x9a\x31\xe6\x43\x3a\x9e\x2b\x33\x66\x31\x08\xc7\xda\x27\xc2\x8c\x93\x74\xfa\xe0\x2a\x72\x98\x7e\xd9\xfd\xdc\xf9\xed\x66\x9e\x8b\x84\x4c\x26\x96\xe0\x9e\x8e\x75\x14\xf5\xdd\xcb\xf8\x8d\x2e\x10\x73\xee\xa4\x9c\x60\x57\x57\xca\x8b\x47\x16\xec\x64\xa0\x12\x9f\x8c\x40\xff\x73\xbb\xef\xbf\x03\xf3\x98\xe5\xf4\x13\xb8\x4f\xd7\xaa\xb9\x38\xc1\x7c\x43\x6f\xe2\x32\xbb\x5f\x2f\xd7\xbc\xcd\xaa\x9c\x7e\x12\xd7\x84\xfd\xda\x69\xeb\x93\xef\x22\x4b\x56\x27\xbf\xa7\xcb\xbd\x79\xd3\x4d\x57\x6d\xfd\x6e\x57\x7b\x95\xd7\xb6\xba\xb6\xf8\x19\x4a\x26\xca\xb5\x7d\x6a\x72\xf8\x52\xb5\xe5\x72\x79\x0f\x8f\x65\x7d\x99\x8a\x8b\x84\xfd\xa6\xaf\x0e\x1a\x1a\x57\x40\xad\xaa\xb7\x20\x9e\x3d\x51\xa9\xef\x6e\x2e\x9d\x9a\x5d\xc8\x49\x5c\xd4\x30\xf2\x4c\xa6\x7d\x6f\xa8\x7d\x22\xb5\x2b\xdb\xfd\xdf\x3c\x92\x1a\xad\x97\x1f\xb0\xd3\xab\x5e\x2c\x72\x23\x98\xbe\x56\xb0\xac\xcd\x7b\xbf\x82\x30\x5e\x36\x13\x4d\xdc\xc9\xdb\x7f\x0b\x22\xb6\xea\x84\xd9\x99\xd4\x76\x3f\xac\x56\xe4\x48\xb7\xca\xee\xa2\x67\x84\x34\x89\x82\x28\xd8\xc8\x8c\x38\x9a\x4d\xe2\xf5\x72\x32\x9b\xcf\x27\xe1\xea\x21\x09\xf6\x12\x02\x8d\xe1\xb7\xe7\x56\x39\x49\xe8\xb9\xcc\x53\x7d\x9b\xcc\x76\xbb\xdd\x97\x15\x49\xb2\xf6\xcb\x2e\x06\x85\xba\x89\x34\xeb\x91\x9e\x82\x4e\x1d\xea\x26\xdc\xb1\x65\x8c\x2b\x82\xed\xf4\x9a\x92\xb4\x2c\xf2\x2f\x9f\x26\xd2\xe9\x68\x68\x60\x77\x31\x71\xb1\x45\x51\xb6\x53\x92\xe7\xe5\x67\x9a\x22\xdd\x81\x52\x6a\x34\x53\x5e\x67\x68\x13\x7a\xfc\x02\x5e\x76\x49\x8a\x01\x4d\x49\x4b\x3f\x59\xf7\x90\x74\xfd\x5b\x34\x99\xdf\x2e\xeb\x2e\x73\xee\x21\x1b\x02\x27\xa1\x1c\xf1\xe0\x1d\x7f\xfa\x28\xa4\x79\xa1\x58\xcf\x15\x5d\xf6\x71\x49\xfe\x29\x95\xd5\x11\x30\xe7\xa7\x6b\x0a\x30\x67\xb8\x87\xf7\x9f\x81\xcb\x9d\x05\x29\xd7\x51\xf1\x0c\x71\xe3\x0b\x96\xaf\xaa\x45\x6f\x63\x56\xd9\x08\x05\xed\xf8\x8c\xeb\x57\xac\x63\xc4\x46\x2b\x3f\x42\x81\x7e\xd4\xa2\x35\x84\x37\xd5\xb2\x17\x35\x3a\x2c\x8c\xb8\xb3\x06\xb9\xe3\x2d\x82\xdf\x88\xf9\x05\x91\xe3\xe4\x2a\x6a\xfe\xd8\xcf\xd8\x47\x87\x53\xec\x8a\x1c\xb1\x0f\xa3\x42\x66\x15\x46\xc7\xc3\xc4\x6c\xf6\x4b\x56\xc0\x49\x10\xf5\x9a\xe9\xb2\x34\x96\xe6\xc2\x91\x7e\xed\x9a\x4c\x3f\x48\xdb\x0e\x36\x48\x70\x23\xf0\xe6\x48\xd1\x62\x00\x49\xb8\x37\x53\x4a\xde\x1d\x7f\xee\x21\x1f\x01\x9b\x8b\x1c\x53\xe6\x46\xaf\x64\xb3\x15\x78\x23\xc3\xcc\x71\xf3\x4b\xf7\x72\x08\x31\xef\xc2\xa9\x5b\xa3\x4e\xa7\x73\x35\xc8\x49\xf8\x04\x4e\xdb\x1c\x42\xe2\xce\x4c\x96\x9a\x9f\x64\xea\x62\x65\x30\xcf\xae\xf5\xd1\x77\xd8\xa9\x8b\xf3\x00\xf7\xce\xdd\x16\x2b\xc8\x3e\xa0\x6f\x16\xef\x12\x20\xfb\xf9\xc9\xc7\xbe\x26\xc4\xd9\x3f\x93\x66\x7a\xa4\x34\xed\x9c\x80\x7b\xb2\xdc\xce\x07\xae\xc4\x3e\x54\xbe\x98\x85\x6c\x88\xf0\x17\x40\xea\x51\x17\x7f\xb2\x9d\x09\xdd\x1a\x42\x2c\x19\xf7\xd8\x54\x90\x4d\xff\xcc\xa9\x20\xf4\x1d\x7b\xe4\x8e\xbd\x8e\x1f\xf1\x0d\x26\x08\xcf\x34\xaf\xf8\xb8\x34\xb1\x33\x24\x8b\x62\xc8\xb7\xf2\xc4\xf8\x68\xe3\xa5\xcd\x23\x50\x35\x0c\xa1\x25\x54\x47\xb0\xee\xdd\xb3\xb1\x96\x98\xed\x39\x15\xc7\x7f\xef\xe9\x66\x0f\x3f\xe8\x4c\x8f\x5f\x0d\xf8\xf4\xfc\xae\x9b\xca\xff\xb0\x5a\x1f\xe2\xd5\xe6\xe1\x29\x9d\x51\x16\x70\xcd\x0d\x9e\x4d\x24\xa6\x24\x4d\xcb\xc2\x96\xf9\x1e\x15\xac\xff\x93\xa2\x57\x22\xda\xae\x11\x9d\x8a\x4f\x01\xae\xe9\xa9\x0c\xc4\xf4\x54\x9e\x61\x7a\x1a\x6f\x99\x9e\x0d\xb5\x4c\xcf\x29\x01\x4c\x4f\x5c\xbd\x68\x63\x7b\x4c\x8f\xe3\xff\x18\xd3\x43\xf9\xf1\x2c\x32\x96\xf1\xd7\x9a\x5e\x12\x91\x78\x75\x78\xce\xf4\x78\x59\xc0\xb5\xd7\xf4\x84\x0c\x51\xc1\xfa\xbf\x6c\x79\x25\xe2\x98\x9e\xa9\x53\x5a\xd7\x65\xed\x1a\x9e\x48\x46\xcc\x4e\xe4\x18\x46\x27\xb1\x96\xc9\x99\x30\xcb\xe0\x00\x1a\x98\x9b\xb8\xab\xd3\x44\xf6\x18\x1b\x47\xff\x31\xc6\x86\x70\x83\x9a\x1a\xbf\x4b\xf4\x2b\x4d\x8d\x6e\x16\x9b\xf9\x93\xa6\xc6\xca\x5a\x3c\x7b\x0d\x4d\xc8\x0f\x15\xaa\xff\xe3\x99\x47\x1a\x8e\x99\x49\x5d\x5a\x20\xbe\xf1\xc9\xfd\xce\xa3\x0d\xd0\xb9\x8f\x5d\xcd\xc2\x97\xf8\xd2\x4c\x54\xb7\x9e\x77\x3f\x3d\x87\xfd\x19\x1f\x62\x21\x64\xae\x1f\x47\x04\x2e\x3d\xeb\x8f\x3b\x42\x13\x86\xa9\x2c\xaa\x9c\x21\x76\xb7\xd3\x18\x82\x86\xe2\x5e\x6c\xea\xe6\x39\x13\xab\x88\xd5\x63\xa1\x98\x47\x55\x2a\x7a\xb6\x4d\x15\xae\xa3\x31\x59\x61\x77\x89\x46\xf0\x12\xc4\xd1\x1c\x60\xab\x5d\x94\xa7\xbe\x45\xad\x73\x8d\x6a\x04\xaa\x1a\x33\x11\xe5\x1b\x66\x45\x41\xe3\xea\x50\x38\x3a\x3a\xf9\xa6\x24\xad\x72\x6a\xa4\xc4\x8b\xa8\xf1\xd2\x5b\xb2\x6f\x89\xea\x48\x5e\xc4\x5d\x91\x06\x0c\x31\x78\x33\x03\x25\x38\x09\x37\x0e\x33\x70\x07\x19\x4e\x01\x0c\x0f\x90\xf5\x81\x4e\xed\x55\x8a\x7b\xdb\x36\xc2\xc0\x68\x1b\xd8\x9b\x37\x63\x1d\xda\x62\xdc\xc8\x01\x63\x14\xce\xf2\xc3\x13\xdb\xb0\x63\x19\x63\x23\xcf\xe6\xed\x7c\xc8\x95\x8b\x4f\xc6\xc5\xfd\xa1\xec\x6b\x43\xeb\x29\x5f\x46\x72\x8e\x58\x54\x14\x49\x6d\xdc\x44\x98\xc0\x84\x2a\x3e\x5b\xb0\x5f\xc5\x3e\x49\x9d\x12\x9a\x29\xdf\xe1\xfb\x05\xab\x55\xec\xeb\x50\xbc\xdc\x8c\x6f\x62\xe8\x13\x2b\x06\xaf\x26\x97\xfa\x61\x42\x8f\xf2\x7a\xe6\x08\xf3\xea\x2d\x58\x02\x3f\x1f\xcf\xf0\x79\x8b\x0f\xcb\xf8\x0a\x65\x98\x85\x71\xd6\x1f\x2c\x62\x36\x8d\xc4\x7e\x85\x09\x4e\xe9\x2b\x2d\xda\x86\xf3\x2e\x23\xbf\xe1\x6a\xb9\x3f\x66\x39\xbb\x8b\x35\xaf\xce\xe4\x27\x91\xf1\xcb\xca\xf8\x1e\x01\xde\xd3\x82\xef\x6b\x31\x46\xa7\xe2\xd1\xba\xdb\xd0\x6b\x3a\x60\xba\x92\x24\x89\x55\xde\x50\x9f\x4a\xd2\x06\xa4\x92\x0c\x75\xc9\x34\xb5\xb5\xb9\xac\x68\x11\xb0\x77\x9e\xd2\xf2\x73\x37\xba\x9e\x4e\x39\x1d\xcf\x23\x3d\x74\x3f\x70\x56\x95\x76\x3f\xf7\x6f\xc8\x03\x6a\x53\x56\x05\xb6\xee\x65\xea\xb0\x0d\x48\xe4\x04\x25\x86\x08\x58\x97\x17\x99\x03\x84\x11\x12\x9a\xbc\xab\x2c\x83\x3c\xcf\x1c\x22\xef\x92\xd0\xe4\x11\xa1\x1b\xf4\x45\xee\x50\x05\x98\xe6\x24\x34\xec\xad\x21\x1c\x57\x83\x1c\x42\x9e\xe9\x01\x41\x78\x20\xe9\x89\xf6\x3f\x61\x30\x9f\xcf\x79\xa1\x07\x1e\x3c\x80\xc1\x90\xe5\x9a\x1e\x52\x8b\x8a\xa9\x58\x99\x64\x28\x43\x26\x99\x12\x12\x69\x23\x0c\x7f\x14\xa7\xf3\xd9\x7a\x75\x88\x01\xa7\xb3\xcd\x92\x6e\xe8\xfd\x1b\xf2\xd0\xd3\xf9\x24\x31\xbb\xf3\x89\xd4\x11\x9d\x4f\x20\x27\x28\x31\x44\xc0\xa3\x3b\x9f\x5f\x47\x68\xe7\x73\xc9\xf7\x77\x3e\xbf\xbe\xf1\xce\xe7\xd2\x1f\xe8\x1a\x7d\x9a\x43\x3b\x9f\x53\xc3\x50\xe7\xb3\xed\x00\xe9\x7c\x63\xfb\x01\xe8\x82\xb2\x18\x7e\x70\xae\x2b\x07\x1e\x5a\xc1\x6d\x7b\x99\x1c\x36\xcb\x04\xd4\xbe\x48\x08\x5d\x24\x16\x15\x53\xc3\x32\xc9\xd0\x8a\x4c\x32\x45\x25\xd2\x46\xf4\x80\x51\x9c\x2e\xd6\x64\xb1\x58\x43\x39\x6d\x37\x8b\xf9\xf6\xfe\x0d\x79\xe8\xe9\x85\x92\x98\xdd\x0b\x45\xea\x88\x5e\xa8\xce\xb7\x60\xc4\x10\x01\x8f\xee\x85\x7e\x1d\xa1\xbd\xd0\x25\xdf\xdf\x0b\xfd\xfa\xc6\x7b\xa1\x4b\x7f\xa0\x8f\xf4\x69\x0e\xed\x85\x4e\x0d\x43\xbd\xd0\xb6\x03\xa4\x17\x8e\xed\x07\xa0\x17\xca\x62\xfe\x5e\x68\xbe\x57\xe4\xe9\x82\x87\x24\x4a\x29\xac\x7a\x75\xd8\xa4\x44\x93\x30\x75\xcb\xfe\x36\x94\xc1\xfe\x36\xc5\xd3\x25\x8c\x30\xf9\x61\xd6\xe6\xdb\xc3\x3c\x85\x7d\x6e\xb6\xda\x92\x43\x72\xff\xfa\xaa\x7b\xba\x1a\x23\x63\xf7\xb3\x2e\x69\x44\x27\xe3\x27\x88\x1c\x1a\x50\x7e\xa3\xfb\x16\x26\x7c\xb4\x57\x01\xaa\xfd\x5d\x0a\x53\x21\xde\x99\x00\xd9\x01\x3b\xc7\xf5\x81\xf6\x21\x9b\xf0\x50\x07\x32\xf4\x8a\xf5\x9e\x31\x26\x0c\xbb\x8e\x28\xe3\xef\x3a\xe0\xc5\x2e\xdc\x44\x8f\x11\x49\x17\xb0\x6a\x4a\xc9\x6c\xbe\xb2\xa8\x98\x3a\x94\x49\x86\x02\x64\x92\x29\x21\x91\x36\xc2\x9c\x47\x71\x4a\xd3\x6d\x32\xdb\x00\x4e\xd3\xe5\x66\x19\xcf\xee\xdf\x90\x87\x9e\x5e\x25\x89\xd9\x1d\x4b\xa4\x8e\xe8\x5b\xea\xb0\x16\x46\x0c\x11\xf0\xe8\x4e\xe6\xd7\x11\xda\xd5\x5c\xf2\xfd\xbd\xcd\xaf\x6f\xbc\xcf\xb9\xf4\x07\x7a\x47\x9f\xe6\xd0\xce\xe7\xd4\x30\xd4\xff\x6c\x3b\xc0\xd6\x70\x23\xfb\x01\x5c\xc9\x89\x62\xfe\x5e\x68\x3f\x7c\x87\x9b\x76\xba\x5d\xce\x17\x70\x0d\x99\x2e\xe6\xc7\x39\x31\x89\x58\x6b\x74\x9e\x62\x2e\xab\x79\x8a\xb5\xd4\x65\x49\x63\x02\x18\x63\x98\x9c\xcd\x67\x33\xf8\xf5\x9d\x24\xb3\xed\x6c\x79\xff\x56\x0c\xf4\x05\x50\x38\x29\x10\x3f\x61\x89\x63\xc2\x27\xe2\x68\x1c\x42\xc9\x95\xea\xf8\xd8\x89\x47\x2d\x78\xe4\x04\xd2\x1e\x08\x9c\x78\x14\xec\x09\x9b\x40\xe2\x43\x31\x0d\xaf\xae\xf0\x98\x09\x20\x3f\x18\x32\x31\xf5\x8e\x1e\x00\x1e\x65\xf0\xa0\xb3\xc9\x52\xfe\xce\x96\x67\xc5\x6f\x60\x85\xd7\xbf\x1f\xd1\x7d\x2e\x47\x92\x99\xa8\xdf\x2c\x29\x75\x09\x23\xec\x8d\x31\xd2\xfb\x4e\xcf\x43\x81\x58\xc0\x90\x61\x70\xec\x6f\xc3\x48\x0c\x86\xed\xaf\xe0\xe6\x55\x45\x03\x94\xc6\x3e\x48\xde\x7f\x1f\x12\x22\xae\x81\xee\x84\x71\x34\xba\xbf\xb8\xec\x6f\xb7\x5b\xff\xc7\x81\x69\x2e\xfc\x30\xff\x06\x9f\x9f\x5e\xba\xbf\x6e\xdf\x62\x03\x20\x5f\xef\x5c\x4c\xf2\xcd\xc5\x26\xff\xfc\xe6\x48\x46\xf4\xad\x31\x89\xbf\x35\x80\x77\xfe\xf1\xe1\x49\xda\xd8\x07\x7f\xf7\x30\x93\xfc\x76\x6c\xef\x25\x8c\x0c\x12\x1f\x0d\x6a\xf6\x86\x01\x6b\xdb\xf9\xf5\x70\xc9\xda\x4f\x1a\x6b\x1d\x9a\xa1\x0d\xf5\xe5\xf1\x97\xfb\x8d\x4c\xfb\xfb\x3b\x49\xe9\x4d\x7e\x0f\x89\xb0\x43\x18\x22\x93\x1d\x89\x08\x3a\xd1\x90\x7a\xdf\x9f\xcd\xa9\x86\x59\x71\x33\x4e\x12\x24\x65\x9e\x93\xaa\xd1\x7b\xa8\xb9\x81\xc9\xe4\x0e\x6d\x9f\x3f\x92\x59\xdd\x54\xd3\x7d\x10\x48\xa8\xc7\x7d\x25\x1d\x69\x01\xc7\x06\xe1\x7c\xc9\x5f\x8f\xdf\xf7\xe4\xdd\xff\x22\xde\x9b\x4b\xa8\xf5\xf2\xdc\x8f\xff\x3d\xff\x52\x9d\xb3\xa4\x2c\x9a\xe0\x7f\x90\xfc\x98\x67\xc5\xa9\xf9\x71\xdf\xd4\xc9\xee\x5a\xe7\x3f\x85\xe1\xcf\x1d\xba\xf9\xf9\xa4\x60\xd3\xb3\x84\x4d\x6b\x7a\xba\xe6\xa4\x0e\x69\xd9\x7e\x78\xbc\xc8\xff\xf1\x43\x46\x8f\xd9\xdb\x87\xe0\x58\xd6\x17\xd2\xfe\xf4\x23\xbd\x1c\x68\x9a\xd2\x74\xda\xcd\x12\x3a\x1d\xff\xf8\x61\x32\x9e\xe2\xe7\xf2\x78\xd4\xb4\xba\xbf\x1e\x2a\xde\xb6\x46\xe9\xb6\xbe\xd2\x87\x19\x68\x5e\x4f\x3f\x68\xc0\xff\xa3\x00\x22\x5f\x53\x6f\x5e\x4f\x3f\x7e\xb8\x87\x0a\x8b\x98\x81\x38\xe1\xb6\x47\x3f\x66\x8f\xd0\x9f\xfb\x2e\x61\xff\x6b\xef\xfa\x83\x6c\x57\xee\x52\x96\xed\x99\x4d\xc7\x0b\xf6\x40\x2a\x69\x68\xca\xbf\x21\x97\xcd\x1b\xc4\x9c\x6a\xf2\xa5\x49\x48\x4e\x8d\x16\x4d\x49\xd3\xd2\x3a\x6b\x7e\x83\x6f\x4b\xbe\xfb\xe7\x8c\xbc\x33\x81\x55\x7e\x6d\x10\xd0\xc1\x02\xd1\x6b\x5d\x22\xa0\x88\x24\x16\xec\x92\x15\x28\xb1\x59\x3c\xb3\x70\x49\x5e\x5e\x53\x04\xb7\x8a\x62\xbb\xda\xe2\x95\xe6\x65\x45\x11\xe8\x3a\xda\xda\xcd\xa0\x45\x92\x39\x2f\x69\x32\xe0\xd1\x02\x9e\x72\xd2\x20\x3c\xd2\x08\xd4\x7d\xb9\x36\x59\x82\xe2\xec\xb6\xf0\x53\x3b\x28\x70\x6e\x01\xcf\x94\xd4\x2d\x8a\x5b\xda\x04\x5b\x52\xa3\xb0\x95\x03\x9b\xd2\x4b\xd5\x7e\x41\xc1\x6b\x0b\x7c\x6d\x28\x4e\x73\x63\xc1\x8e\x59\x7e\x41\x61\xb6\xac\xdb\xf3\x34\x27\xf5\x09\x51\x0b\x8d\xe2\x08\x40\x51\x50\xec\xd0\xcb\x1a\x54\x36\xc0\x70\x4a\xc4\x9e\x69\x14\xdb\x82\xae\xe9\xa5\x7c\xc5\x99\x5b\x58\xc0\xdf\xcb\xf2\x32\xcd\x0a\x14\xb9\x74\x91\xe5\x15\x67\xd1\xd6\x4b\x79\x3c\xa2\x28\x5b\x21\x4d\x76\x2a\x08\x62\xae\x34\x8a\x6d\x95\x24\xe5\x09\x45\x01\x8d\xd4\xa4\x41\x25\x3d\xb3\xd5\x71\x2e\x2f\xa8\x60\x66\x31\xb4\x03\x1c\x66\x6b\xa3\xcd\x3c\xd4\x80\x3e\x4a\x82\x74\x76\x1a\xcd\x6c\x6d\x74\xeb\xd1\xbc\x24\xe9\x94\xe4\xa8\x9c\x67\x4b\x14\x8e\x42\x6d\x95\x5c\x2b\x2f\xd0\xd6\x4a\x56\x1c\xca\x37\x14\xb7\x01\x63\x26\xf9\x32\x4d\xb2\x3a\xf1\x88\x69\x0b\xec\xb1\xa2\x04\x6d\xd2\x3c\x02\xc0\x63\x4d\x71\x3d\xce\x6d\x05\xb1\x17\xcf\x3d\x72\x9a\xdb\x4a\xea\x1c\x16\x0a\xb3\x95\x74\xcc\x09\x6a\x68\xf3\x05\x1c\xc4\xd2\xea\x5c\x16\x14\x1d\x42\xe7\xb6\x8a\x5e\xcb\xfc\x7a\xa1\xbe\x1e\x31\x5f\x61\xe0\x4e\xad\x28\x7a\x8d\xa1\xaf\x15\x8a\xb5\xb5\xf5\x1f\x75\x52\xa6\xa8\xa2\xe6\xb6\xa2\x0e\xc4\x8b\x5c\x80\x61\x0d\x17\xd6\x22\x86\x28\x54\x4c\x0b\x5b\x43\x87\x12\x1f\xd6\x16\x73\x07\x76\x21\x35\x0e\xb5\xb5\x54\xd5\x59\x81\xda\xc6\xc2\x56\x50\x42\x2e\xb4\x26\x28\xd0\x56\x4e\x37\xe3\x40\x61\x6b\xc0\x62\x8e\x76\xb3\x85\xad\x90\xac\x25\x39\xee\x5b\x17\x60\x58\xeb\x96\x8e\x62\x8a\x84\xa0\x97\x91\x8b\xe6\x4b\x10\x0c\x6c\xeb\x86\x6d\x2a\xe4\x4b\x28\x0c\x3c\x43\xc0\x7c\x5f\x22\x0a\x9f\x23\xf0\xda\xcb\xf6\x02\x41\xff\xeb\xda\xb4\xd9\x11\xf5\xe5\xcb\xa5\xd3\xf7\x51\xd8\x0a\x8c\x65\x29\x2d\x5a\x7f\x0b\xe1\xc8\xc7\xd0\x7e\x9e\xc1\x44\x81\x24\xb4\x1b\xfd\xa7\xaf\x59\x4a\x91\x99\x21\x8d\x96\x60\x7a\x96\x25\xed\xb5\x46\xbb\xd6\xca\xd6\xe2\x85\x54\xd3\xce\xcc\x71\x49\xaf\x80\x62\xd2\x4e\x6e\x28\x70\x0e\x5c\x15\x6e\xc0\x2b\x5b\x17\x34\xcd\x70\x18\x98\xa2\x9d\x89\xa7\x2d\xb6\x0e\xd8\x8e\x61\x14\x67\x4b\xdf\x37\x5f\x59\x6d\xc0\x94\x8f\x56\xd3\x03\x49\x7e\xfb\x4c\x6a\xb4\x9f\xad\xb6\x40\x4b\x4d\xdb\x8b\x5f\x47\x60\xfc\xeb\x81\xc6\x8e\x07\x44\x61\xb6\x7e\x2a\x72\x6d\xd0\x96\xad\xe7\xa0\x65\x25\x3a\x92\xaf\x17\x60\x18\xaa\xbd\xfc\x2d\xdd\xa6\xf7\xc1\xe1\x64\x9a\x56\xbd\x70\x5b\x5f\xf4\x5f\x34\x41\xed\x64\xbd\x81\xfa\x7f\xad\x4b\xff\x30\xb3\xde\xa2\x70\x6f\x2f\xdc\x44\xce\xd2\x8d\xcd\x24\x51\x6c\xec\x2e\xcd\xfc\xe0\x19\x32\x83\xf6\xa3\xe7\x60\x52\xee\x47\xda\xfa\xfb\x8f\x2b\x6d\xba\x65\xb6\x1f\xbf\x04\xa3\xd2\xb1\xf4\x63\x81\x0a\x93\x9a\xd2\xa2\x39\x97\xb8\xe4\xd6\x58\x03\xfd\x53\xb8\xcd\x06\x36\xb1\x07\x0b\x67\x11\x45\x0f\x78\x6b\xab\x90\xd4\x75\xf9\xd9\x6b\x1f\xdb\x18\x01\x7b\xad\x63\x3b\x43\xd0\xf8\x0c\x69\x3b\x47\xa0\xbe\xa9\xd7\x76\xe1\x0e\x7e\xbe\xc9\xe7\x76\x09\xe4\xdc\x64\xbf\xd3\xe9\xf1\x9a\xa3\x6b\x9d\xed\x0a\x43\x37\x17\xe2\x81\x83\x5e\xf8\x96\xe4\xe4\x42\xfa\x0c\x2a\x06\x8b\xfa\x53\x86\x0a\x3a\x06\x6b\xfa\x9c\x12\x6c\xca\x1a\x83\x15\xfd\x31\x43\xbd\x40\x1c\x01\xa7\xf2\x85\xb2\x80\x1a\x0a\x5d\x3a\xd0\x24\x2f\xd1\x31\x33\x06\x01\x00\xf1\x35\xd4\xdf\xf4\x35\x1c\xb1\x0b\x9c\x2c\x18\xb3\x48\x4e\x8b\x14\x0d\x41\xc4\x20\x0e\x50\x93\x22\x2d\xb1\x80\x41\x0c\xa2\x00\x49\x79\xb9\x50\xd4\x01\xc7\x20\x14\x70\x21\xa7\x82\xe2\xc0\x19\x3a\x56\xa2\xf6\x1d\x83\x88\x80\x04\x7b\x2c\x3c\x06\x71\x81\x9a\xb6\x9f\xa9\x87\x0b\x38\x11\x28\xab\x8a\x1d\x9e\xc0\x63\x3b\x71\x0c\xe7\xd1\x39\xfb\x1c\xe4\x53\x31\x88\x12\x08\xb8\xcf\x78\x40\xa8\x40\x74\x1f\x79\x6e\x06\x2d\x01\x57\xa6\xac\x84\x3e\xf6\x83\x95\x81\x21\x84\x14\xf3\x90\x31\x88\x20\x1c\xae\x79\x7e\x2e\x6b\x94\x6d\x10\x45\x38\x50\xb4\xb7\xc7\x20\x8a\x90\x74\xcd\x3a\x66\x09\x69\x51\xc9\x81\x60\x42\x7b\xbe\x5e\x0e\x8d\xc7\x3a\x40\x24\x41\x60\x7d\xc6\x01\x82\x09\x67\x52\xa4\xde\x31\x38\x06\x01\x05\x06\xf6\x8c\xee\x31\x08\x2a\x30\xac\x87\xe1\xad\x8b\xf4\xb1\x0b\x62\x0a\xdc\x13\x0d\xb8\x8e\x18\x84\x17\xac\x42\x3e\xf6\x41\x9c\xc1\x2a\x83\x37\x03\x84\x1c\xac\x12\xde\xe6\xd8\x7a\x3d\xe5\xe5\x01\xd5\x3f\x08\x3d\x7c\xae\x69\x81\x46\x65\x63\x10\x76\x68\x49\xf3\x1b\xb6\x48\x8f\x41\xc0\x41\x9c\xe6\xc1\x80\xb6\x1a\x0f\x75\x46\x8f\x09\xc1\xfb\x37\x08\x38\x74\x7e\x91\xcf\x5b\x30\x30\x88\x39\xa4\xa4\x39\x1f\x4a\x7c\x82\x1a\x83\xc8\x43\x45\x2a\x5a\x27\x79\x86\xaa\x01\x84\x1f\x58\x5c\xda\x1b\x49\x8e\x41\x14\x82\x7d\xe3\xc5\x60\x20\x02\x71\x2e\x71\x6f\x03\x22\x10\xd5\xb5\x39\x57\x68\x08\x36\x06\x21\x88\x6b\x83\x37\xdc\x96\xfe\xe9\x80\x37\xd9\x96\x7b\x53\xe2\xa3\x35\x08\x28\x74\xb0\xe9\xe1\xcb\x94\x1d\xe1\x3a\xe0\x0e\x01\x84\x15\x60\x11\xcf\x3c\x29\x06\x01\x06\x59\x8c\x9f\x5b\xc4\xf0\x73\x3f\xde\x5b\xc7\x02\x67\xad\x6d\xeb\xec\x70\x6d\xd1\x10\x5e\x0c\x82\x0d\x6e\x21\x6f\x6d\x40\x5d\x05\x5b\xfc\x52\x54\x69\x4b\x38\x91\xab\x48\x81\x03\x61\x30\x9c\x7f\xd3\xf5\x8e\x16\x20\xea\xa0\xf0\xf8\x78\x04\x22\x0f\x79\x79\xc2\xbf\x06\xc4\xab\x18\xc6\x4a\xd1\x28\x6d\xbc\x82\xa1\xd7\x93\xe7\xa3\x41\x0c\xc2\x13\x05\xfd\x3c\xfd\x9c\x15\x69\xf9\x19\x05\xc3\xe9\x49\x52\xe2\xa3\x00\x0c\x53\x10\x34\xac\x10\x83\x28\x85\x6f\x7a\x01\x82\x14\x1d\x35\xbc\x56\x10\xdd\xbb\x54\x9e\xee\x05\xc2\x13\xf4\xcd\x07\x04\x71\x89\x86\xe2\xd6\xb1\x86\x6a\x29\xab\xea\xcb\x34\x45\xbf\x7a\xd2\x18\x84\x26\x04\xda\xdb\xaa\x35\x8c\x8f\x33\xb8\xf7\xdb\x52\x0c\x43\x15\x9a\x3c\x8a\x5e\x62\x68\x9f\x26\x40\xb4\x22\xa9\x69\x9a\xb5\xdd\x9c\x13\xe7\xdc\xd6\x1b\xdb\x93\x70\xc4\x87\x15\x18\xaf\xb8\xb6\x39\xad\x51\x37\x00\x42\x15\x67\x4a\x3c\x23\xd5\xc6\x99\xfa\x57\x35\x6d\x1a\x5c\xc8\x20\x48\x41\x49\xed\x75\x1c\x20\x44\xc1\x70\xbe\xb1\x08\x04\x28\xda\xf2\xb3\x87\x57\x30\x42\xb6\xa4\x45\x07\x45\x10\x96\x68\x52\x6f\xdc\x33\x06\x51\x89\x73\x1f\x14\xf4\xaf\xeb\xa1\xcd\xda\x1c\x1f\x96\x41\x3c\xa2\x61\x1b\xbe\x9a\x96\xd6\x1e\xd2\xd0\xdf\x5d\xd9\x8c\x31\x3f\xa0\xba\xdd\x42\xb7\xd7\xa1\x97\xd3\x18\xc5\x42\x7f\xd7\x61\x57\x1e\x2c\x74\x72\x1d\x76\xed\xc1\x82\xb9\x61\x59\x7d\x61\xb3\xd5\xa9\xe7\x93\x47\xbc\x85\x83\xe2\x29\x6b\x5a\xbe\xc5\xcc\x5f\x06\x7c\xfe\xc8\xcb\x6b\xda\xf7\x21\x31\x06\x11\x07\x5e\xc0\xfb\x39\x31\xde\x6e\x40\xcf\xa3\xec\xa5\xb5\xcc\xd3\xfb\xb6\xf0\x23\x2e\xa5\xd3\x94\x26\x59\x7a\x2d\xb1\x6d\x14\x74\x16\x75\x8a\x4a\x48\x4d\xdb\xbe\x8b\x54\x22\xbd\x87\xc9\x7a\xc5\xb1\x7a\xf3\xdc\xa9\x60\x3c\x71\xb7\x90\x57\x26\xec\xad\x7b\xc8\x17\xe8\x4d\x0a\xe6\x75\xf0\x28\xe2\xae\x36\x1a\x63\xf7\xe3\x81\x4d\xc8\xe0\xfa\x82\xc8\x00\x5c\x68\x71\xf5\xdc\x7c\xc7\x76\xc4\x89\x9d\x70\xbf\xb3\xcf\x18\x6f\x5d\x9a\xbe\x05\x8f\xed\xe8\x34\x2f\xf3\xd4\xcf\x25\xaf\xe0\x65\x89\xea\x9a\xf1\x99\xb8\xee\x9a\x7d\x67\x95\xbb\x78\xe4\x8d\xa5\xfa\x5e\x88\xbe\x73\xc8\xf8\xbd\xc7\x46\xaa\x75\x31\x01\xbb\xc3\x60\xc4\xc5\xc8\xfc\x3a\xa3\x78\x06\x2f\x36\x00\x97\x37\xf5\xc2\x0c\xa6\xbb\x15\x82\xdc\x3f\x78\x28\xdf\x80\xc8\x43\xf6\x10\x2e\xb3\x00\xf9\x18\x2e\x17\x35\xbf\xff\xd0\xc2\x06\x61\x9a\x75\x23\x5d\x2d\xef\x48\x8c\xd5\x4d\x27\xbb\x2d\x13\x2d\xdc\x4f\x87\x1c\x2f\xe1\x17\xa7\xdb\x84\x5f\xf2\xec\x85\xe0\x77\x67\xef\xe6\xd5\x5b\xc0\xee\x45\x4d\x72\x4a\xea\xdd\xa1\x6c\xcf\xfd\xdb\xac\x9c\x3b\xad\xe7\xf3\x39\x72\x21\x08\xc6\x82\xdc\x15\x8b\xe4\x70\xc3\xc5\xf6\xb9\xee\xd5\x81\xb3\xee\x07\xb3\x17\xf1\x04\x9b\x4d\x55\xbe\xda\x46\x60\x75\x2a\x03\xe7\x46\x67\x5b\xbb\x70\x3b\xa3\x44\xb9\x43\xaf\xe2\xb0\xf6\x6e\x3b\x9c\xc9\x2d\xbf\x08\x6f\x3a\xcb\xc3\x9d\x01\x80\xbb\x84\x7b\xea\x19\x4b\x0c\x6d\x61\xef\xfe\x6f\xfc\xfe\x11\xb1\xd2\xaf\xea\xf2\x94\xa5\xbb\xff\xfa\xbf\xfe\xd6\x65\xfd\x83\x4d\xa2\xca\xfa\x12\xfe\x3d\x4b\xea\xb2\x29\x8f\x6d\x78\xea\xba\x2a\x2d\xda\x9f\x68\xc1\x38\xf9\xe5\x48\xf2\x86\x7e\xd8\x63\xf7\xb4\x76\x33\xbb\x17\x30\xa0\x81\x5d\xa9\x0c\x42\xbc\xe3\x9f\xe8\x87\xaa\xff\xed\xd5\x7e\x5f\x1b\xd6\x01\xc4\xc3\xc9\x02\x02\x7a\x2b\x9f\xba\x0d\xf5\xa8\xde\x9d\xcb\xb0\x17\xd9\x0a\xec\xc4\xda\xfd\xa1\x47\xec\x63\xf6\x46\xd3\xbd\xc5\x54\xb4\x57\x57\x01\xf1\x5b\x84\xe4\xe0\xbd\xdd\x46\x77\x63\xe0\x81\x42\xf3\x8c\x43\xd7\x2a\xe0\x8e\x71\x12\x16\xe4\xf5\x40\xea\x29\xab\x53\xdc\x37\xa4\x0f\xbc\x08\x94\xf5\xea\x2b\x78\xc9\x41\xbb\x40\xe5\x7a\xdf\xe9\x4a\x2c\x6e\x06\x2b\xb3\x79\xef\x6a\x63\xaa\x53\xf7\xb4\xb9\xcf\x11\xf5\x5e\xeb\x24\x6a\xab\xf9\xce\x62\x9b\x38\x62\x18\x3d\x70\x9f\x99\xdc\xf5\xa6\x76\x73\x7f\xbb\x9c\x37\x20\x1b\x65\xd1\x89\x88\xe7\x3e\x33\x45\x90\xed\x96\xc7\x2a\x10\xdb\xe8\x9d\x5a\xb4\x03\x87\x54\xcc\xd3\x0a\x08\x2d\x27\x9b\xa7\x1a\xc7\x35\xb0\x42\x30\x9b\xa7\x9a\x87\x50\xb0\x52\x4e\x3e\x4b\x0e\x07\x8a\xc9\x23\x3a\xb2\x03\xcc\x9c\x16\x8e\x61\xd6\x1c\x37\x14\x90\x1d\xcb\xf8\x08\x64\xad\x13\x1d\x3d\x07\xfa\x57\xb4\x94\x91\x85\xde\x6e\x26\x0f\x30\xb4\x65\x99\x1f\x48\x6d\xe7\x2e\x41\x6e\x00\x59\x50\xe9\xc6\x05\x7f\x37\xa8\x78\x01\x32\x0c\xc8\x4c\x41\xc8\xbd\x58\xe4\xac\xb7\xd4\x25\x43\x86\xa8\x8b\xb2\xfd\xc9\x7c\xf2\xe8\x03\x4f\xd1\xef\xd5\xf0\x04\x38\x77\xfd\x70\x43\x0f\x31\x99\x3a\x34\x9e\x51\x02\x57\xef\xf9\x91\x0f\x56\x2e\xde\x23\xe6\xeb\x26\xc9\x89\x31\xbb\x07\x39\x4e\xcd\xba\x22\x57\x0e\x96\x55\xc3\x99\xbb\x83\x06\x1c\x75\x2d\x45\x19\xb2\x32\x20\x3f\x1e\x0b\x80\x80\x11\x3a\xe3\xa3\xca\x80\x8a\x04\x35\xf3\x79\x70\x20\x15\xd8\xb3\xb1\x12\x40\x34\xdf\x48\x35\xa2\x26\xe3\xd9\x24\xc7\xa8\xbe\x52\xe2\xce\x99\x50\x77\x34\xc3\xcf\x8e\xa2\x03\xcf\x0b\x1f\x63\x20\xd4\x3a\xb9\xb4\x31\xde\x8f\xe0\x1e\x68\xe3\x76\xc8\x69\x7e\x1a\x20\xc3\xa6\x27\x36\x9d\x78\x66\x11\xf2\xf0\xfd\x47\xdc\x33\xd7\xc7\x80\x3e\xa4\xf8\xc8\x39\x44\x39\x81\x41\xc6\x90\xfc\x04\x66\x37\x7c\xf2\xb0\x14\x0c\x82\x79\x8e\x7a\x3b\x4b\x4d\x6d\x7a\x68\x44\x81\xa0\x72\xf7\x79\x21\xaf\x7b\xf2\x4f\x26\x5e\x80\xae\xc1\x94\xd4\xb8\xab\xd4\x7a\xbd\x51\xbe\xf5\xc8\x8f\x99\x8d\x20\xab\x09\x79\xe1\xd0\xdb\x21\xb9\xa3\xda\xd1\x4f\xc7\xeb\x3e\xd9\x2b\x21\x7a\xc1\xec\x7a\x06\xe0\xf1\x87\x47\xbc\x9e\xc1\x0e\xce\x1e\x7a\xdc\xcd\xcd\x37\x40\xb1\xe0\xc7\x88\x91\x0d\x1b\x8d\xfc\xac\xf4\xf9\x9f\x9e\xf1\xcd\x60\xc6\x1d\x49\xc7\x8c\x7d\x98\x96\xbe\x81\x57\x41\xc9\xf6\x89\xbb\xc7\xdf\x3c\x41\xeb\x19\x4f\xf4\x90\xbe\x1c\xa7\xe4\xca\xcc\xeb\xa6\x9e\x54\x14\xdf\x25\x9d\x51\xfd\xd4\x1f\x7b\x61\xd0\x1c\x20\xf8\x93\x83\x39\xf9\x52\x5e\x5b\xb1\xf0\xd4\x87\xb0\xd9\xd7\xc0\x5d\x43\x2b\x52\x93\x96\xa2\x94\x9d\xd1\xcc\xce\xb1\x26\x25\x6c\x70\xb2\x38\x99\x26\x34\xcf\x25\x3b\xef\xfd\x15\x18\x93\x6a\xf3\xd4\xec\xaf\x29\x69\x89\x50\x98\x38\x5f\xdb\x7c\x62\x05\x5e\xdc\x8b\x9f\xc7\x81\xf5\xe5\xcf\xf6\xf9\x58\x73\x56\xec\x5f\xd5\x71\xf1\xfa\x05\x68\x50\xf9\x35\xc9\x49\xd3\xfc\xf9\x97\xa4\xcc\xa7\xd6\x4d\xd3\x43\xe7\x95\x0d\x1a\xf0\x99\x14\x87\x2f\xb5\x48\x32\x43\xba\xde\xc7\x7d\x6d\xe2\xec\x84\xb9\x49\x7f\xe2\xe4\x3a\x77\xc2\xf7\x43\x3a\x51\x33\x15\x7e\xd7\x07\x6c\x3c\xcc\x7b\x30\x6e\x13\x46\x00\xd1\x86\x0c\xbd\x94\xe3\xe1\xcb\x8b\x72\x39\x1b\x05\x95\xbc\xf9\x9e\xe5\xe9\x15\x4f\x0f\xda\x27\xa8\x91\x45\xa0\xc8\xcc\x27\x8d\xf4\x8d\x03\x7e\x73\xeb\x72\x07\xcc\x0d\x42\x60\x95\xdf\xe3\xb1\x27\x0f\xeb\x1e\xcc\x28\x63\x1b\xd5\x8c\xa1\x57\xa5\x3c\x7c\x79\x51\x23\x8d\xcd\xc7\x5b\xbf\x2d\x78\xc4\xd3\x83\x7e\xd0\xd8\x86\x44\xe6\x1a\x1b\x62\x3e\xcc\x8f\xf9\x07\x57\xd7\x6f\x21\x14\x47\x4c\x83\x9c\x4a\x1f\x2d\x03\x9e\x0f\x79\x66\x72\x3b\x2c\x09\xe9\x65\xdf\x63\xf7\xbf\x7b\xc2\x95\xee\x0b\x21\x43\x17\xc5\xf7\x9f\xf1\x37\xde\x51\x75\xaf\xbb\xc7\x9f\x92\x1c\xf9\x86\x2a\xc2\xab\x7e\x73\x6d\x68\x84\x40\x6e\x21\xf1\x51\xcb\x4f\xc3\xd7\xb3\x20\xb7\xb1\x38\xe4\xb0\x67\x2c\x7a\x41\xf0\x41\x62\x1e\xb9\xef\x99\x3b\xd8\x2f\x9e\xbb\x76\xed\xcd\x06\x13\x56\x31\x1d\x1c\x42\x18\xeb\xcd\x01\x30\x98\x96\xbb\x68\x18\xe4\x19\x17\x03\x1c\xa4\x63\x2e\x6b\x7c\x8b\x98\x87\xa3\x54\xbd\x72\x75\x9e\x32\xf7\x2b\xcb\x5c\xf3\xb8\x34\x7d\xb9\x48\x2b\x47\x8a\x61\x04\x76\x50\x4f\x4e\xa0\x10\x89\x99\x8e\x33\x1b\xcf\xba\xe9\xc9\x90\x5e\x9f\xfc\xe0\x0b\xfe\x70\x90\x44\x3e\xbc\xa8\xce\x1d\xa1\x1f\xca\x51\x2f\x85\x6c\xf6\xc0\x70\x2c\x12\x82\x7c\x42\xc0\xb0\xf2\x3b\x0e\x9a\x27\x3e\x90\xa0\x79\xee\xd7\x95\x6f\xd5\xdb\x6f\xe0\x21\x17\x84\xf5\x27\xcd\x13\x91\x49\x41\x5e\x9d\x77\x8b\xc0\xc2\x0a\x6c\x56\x61\x65\x5e\xf2\xac\x67\x75\x27\xbe\x3c\x73\xdc\x0b\x19\x42\x82\xa5\xcd\x52\xf0\x65\x6d\x8f\x50\x7f\x3f\xf2\x51\xbe\x73\x75\x92\x94\xf1\x61\xdf\xda\x1f\xe0\xe4\xda\x35\xf6\x6d\x2e\xc0\x37\x3f\xf4\x6e\x0d\xc0\xbe\xe1\x17\xe4\x35\x10\x5f\xe9\x27\xe6\x1f\x06\x23\x2a\x49\xbc\x81\xd6\xe3\xd3\xe1\x3e\x0b\x56\xba\x20\xaf\xd3\x6f\xb7\x8f\x46\xea\xe2\x25\xbb\x9c\x6e\x3a\x70\xaa\x8c\x63\xda\x92\x43\x63\x8f\x32\xe6\x4c\x23\x4d\x53\x0d\xeb\x0c\xc9\x7d\x12\x59\x14\x52\x26\x2a\xa1\x2f\xc4\xee\x1c\x3d\x5b\x08\x7a\xdf\xea\x71\x26\x3a\xc1\x82\xef\xcc\x02\xb5\x71\x15\xdc\xe0\xd5\xa1\x34\xe0\xff\x81\x2d\x31\xf6\xd7\x60\xa9\x86\x42\xdd\x3c\xcb\xb8\x7a\x9e\xc4\x77\x26\x6e\x69\x9a\x82\x30\x9b\xd7\xec\xc4\x93\x15\x9a\x67\xf6\x8b\x0e\x7e\x19\x01\x0f\x5b\x7b\x91\xaf\x88\x56\x9f\xad\x7d\x07\xf5\x42\x6e\xee\xdc\xd4\xd6\xf6\xd2\x54\x36\x20\x30\xbc\xf7\x41\x6f\xdc\xe8\xdd\xe7\xe0\x69\x43\x6f\xb0\xad\xaf\x51\x30\x30\x34\x0a\xad\xb7\xaa\x38\xf3\x6d\x5f\xfb\x5d\xcb\xf2\x21\xa0\x95\x79\x71\xe6\x7b\x8a\xb0\x7f\x3e\x21\xc3\x17\xd2\xdb\xe5\xc7\xf4\xb9\x3f\xb8\xd1\x76\x7f\x61\x57\x8d\x72\x5e\xaa\x2c\xcf\xc1\xc8\x64\x67\xe8\xb6\x42\xdd\x49\xc4\xc7\x3c\xbb\x81\x5d\xb2\x36\x00\x34\xce\x49\x36\x5b\xe4\x66\x3a\xfb\xf0\xfc\x1b\xed\x3a\x02\x4d\x4b\x92\xdf\xf0\xde\xaa\xb3\x0c\x96\xd9\xdb\xcf\xee\x77\x23\xdf\x68\x71\x1f\x1e\x14\x9e\x1d\x0b\xbe\xc7\x10\xf0\x58\xcf\x1f\xdd\xe1\x0d\xd1\x78\x47\xcf\xe7\x07\x84\xfe\x7e\x31\xa6\x4f\x7c\x87\x41\xe0\x5b\x0f\x00\x7f\x40\x23\xd1\x4e\xdf\x92\xc3\x54\xec\xca\x7b\x61\x7f\x54\xa4\x80\x17\x7f\x5a\x18\x31\xfb\x77\x27\xbc\x8c\x09\x68\xa1\xf0\x73\xec\x33\x1f\xab\xf8\xa6\x3b\x64\x2a\x6d\xbc\x1a\xb9\x8c\x9c\x37\x5b\xd9\xa6\xcb\xbe\xb9\xd0\xe0\xb6\x40\x64\xa4\x1b\xde\x4a\x28\x36\x83\x1a\xe3\xa7\xda\x36\xa8\xee\x55\xed\x66\x8e\xea\xa5\xf8\x8e\x4d\x39\x03\x9d\xbe\xed\x5e\xb3\x26\x3b\xe4\x14\x6e\xc0\x58\x1a\x7b\x32\xf8\x2e\x8d\xa5\x25\x4e\xef\x74\x0f\x7d\xda\x37\xe2\x9b\x2b\x66\xcb\xe5\x44\xfe\x3f\x8c\xf5\x3b\x7a\x8a\x9d\x26\xa9\xcb\x3c\xef\x16\x28\x6d\x79\x4d\xce\x4e\x4b\xd8\xc5\xb1\x12\xfc\x65\xc4\x08\x64\x89\xc1\x78\xa5\x16\xec\x1e\x05\xbb\x34\x60\xad\xce\x05\xb5\xcc\x0a\xff\xc4\xcf\xc9\x91\xa2\xdd\x1b\x41\x5d\x23\x55\xef\xcb\x17\xcb\x3d\x35\xf1\x17\x42\xd7\xd8\xa1\x96\x8a\x02\x77\x7b\xbb\x6a\x5b\x56\x01\x2c\xa8\x76\xb4\xf2\x27\x47\xfb\x31\xf6\xae\x57\x28\xaf\x81\xcf\x7e\x77\xf6\x26\x29\xc9\x0a\x5a\xbf\xd8\xd6\x38\xd1\x39\xd3\x63\x7e\xcd\x52\x7f\xfe\x8b\xcb\x9b\xaf\xac\x61\xce\xf0\xf9\xd5\xbd\xf3\x46\x6b\x8f\x59\xfc\x7f\x81\xeb\x08\x38\xfc\xbb\xab\xb5\x9b\x75\x18\x06\x6c\xe6\x61\xdd\x6a\xd8\xf4\x0d\x6a\x30\xe2\xee\x9a\x12\x6a\x15\x70\x0b\xb8\xb5\x7b\xdb\x3c\xb0\x33\x8f\x86\xd9\x19\xa8\x6a\x98\xc5\x9b\xb5\xdd\xdb\x16\x86\x87\xa4\xe8\x7a\x30\xf2\x62\x91\x88\x0d\xef\xd8\x91\x38\xd4\xa4\x48\xcd\xc5\xb2\x0a\x9b\x2c\xfb\x3f\x06\x33\x1f\x60\xb8\x08\x9b\xa2\xe1\x41\x75\x9a\x3f\xca\x32\x28\xcf\x17\x6d\x74\x81\x45\x74\xe2\x02\xb8\x55\xda\x30\xf7\x65\x63\x2d\x72\xb9\x51\xcf\xb3\x8f\x9b\x99\x81\xfd\x0e\xb2\xe9\x32\x58\xa0\x23\x36\x7c\x64\xa7\xb7\x8d\xe3\x32\x37\xd8\x09\xac\xe1\xf3\x1d\x8f\x05\x1c\x40\x93\xdc\xcd\xd6\x56\x76\x10\x8a\x2b\xa0\xe0\xf9\x0a\xae\x80\xd9\x4c\x2b\xd8\xfd\xf2\x12\x3b\xb5\x69\x72\x1f\x35\x61\x43\x26\x8b\x31\xbd\x58\xe8\xc2\x9a\x24\xa9\x7a\x74\x58\x71\xb7\x0e\x97\xd5\x5b\x30\x55\x61\x3d\x91\x2d\x22\x83\xc6\xd3\xd3\xb1\xf1\xd9\xd9\x7a\x72\x1e\xda\xb2\xe6\x4d\xc5\x9e\xd6\xab\xb5\xc9\x9b\x0a\x99\xc1\x99\x98\xb2\x1c\x3e\x06\xb9\x9b\x01\x99\x1f\xb6\x1e\xfa\xee\x37\x05\xae\x74\xbf\xb7\xf6\x71\xc2\x9a\x3f\x19\x42\x05\xce\xf1\x1a\xeb\xa3\x9b\xdc\x82\x39\x03\xb2\xf5\xd7\x78\x73\x64\x39\xaa\x18\x18\x23\xfa\xb1\x4e\x8c\xd2\x78\xef\x65\xd8\xac\x3a\xcb\x71\x02\x82\x46\x87\x10\xc1\x67\xb0\x32\xf7\x1b\xd5\x12\x31\x2a\x20\x2e\xeb\x40\x8b\xf9\x55\x03\x79\x51\x7d\xb8\x01\xec\x04\x8c\x66\x0f\x99\x4d\xf1\x13\x57\xc6\x90\x65\x60\xb4\xc3\x28\xeb\xcb\xa8\x47\xde\xdd\xe8\xf9\x98\x09\x31\xbe\x4e\x1b\x78\xc5\x64\x78\xf2\x3c\xe9\x9d\x59\x7f\x43\x52\x43\x23\xf8\x08\xb7\x5f\xd6\x17\xeb\x45\xfd\x51\xef\xcb\x7b\x3e\xe9\xbb\x34\xe1\x76\x08\xe4\x40\x35\x1b\x6b\xc6\x10\x34\x3e\xee\xd8\x1b\x44\x60\x0c\x46\x17\x49\xac\x87\xf9\x9f\x69\x05\xfb\x7e\x3e\x01\x54\xc5\xe7\xf2\x5e\x59\xf1\x61\x73\xe0\xa3\xd2\x68\x0e\xb0\x4f\xfa\x28\x4f\xf8\x67\x7d\x63\x80\x77\x83\x59\x9a\xc8\x99\x34\xd3\x23\xa5\x69\x37\x6a\xd9\x0a\x54\xc9\x7c\x96\x77\x1f\x76\x3e\x8e\x61\xb9\xa1\xad\x71\xd6\xe9\x2e\x0d\xe1\x0c\x7d\x0f\xa6\xef\xe6\xc8\x17\xb9\x2b\xbd\xd1\x07\x02\x0c\x26\x1e\x1a\x1c\xc1\x50\xec\x0f\x83\xf4\x7c\xfd\x1f\x8c\x81\xe0\xeb\x43\xbc\xca\xe7\x37\x27\xcb\x29\xa9\xfe\x92\xec\x1f\x6b\x0c\x70\xc8\x5f\x08\x32\xcb\xc4\x6e\x54\x26\x8e\x90\x52\x6f\x8d\x55\x6a\xe1\x96\xb2\xe6\x8d\xf4\xad\xb5\xf0\x4b\x17\xdf\xbf\xe2\x34\x09\xb9\x9f\xe0\x74\x78\xc5\x99\x4f\x5b\x3c\x8c\xb3\x0f\x63\xd9\x84\xbc\xa8\x2f\x63\x71\x9b\xee\x07\x3e\xd4\xb7\xee\x7e\x60\x69\xb0\x64\x10\xd8\xf5\x7a\x00\x08\xe6\x31\x38\xc6\xfe\x12\xc7\x3e\x7a\x0e\x3c\x87\xe5\x21\xc7\x24\x3b\x82\x35\x35\x71\x79\x00\x3b\xd0\x12\xf0\xb9\xdc\xb8\x95\xe0\xa9\x96\x74\xe4\xec\x80\xec\x20\x6a\x04\x83\x0f\x7d\xfc\x1c\xb0\x03\x46\xcf\xbc\x44\x60\x1c\x6e\x0c\x97\xbe\x1d\x00\x6c\xc7\xdc\x53\x76\x61\x1d\x9c\x90\x2f\xe0\x89\x6f\xc9\xfe\x02\x03\xbc\x5a\x0b\x49\xe4\x5d\xbe\x41\xfa\xc6\x22\xd3\x2d\xbe\xd9\x6c\xbc\xc5\x9d\x00\x1e\x04\x30\x67\xf6\x50\xb7\x66\x82\x37\xf6\x44\x0c\x60\xc6\xa8\x71\x68\x07\x05\x63\xc8\xd8\x4b\x39\xec\xe8\x91\x6a\x7a\x56\x5e\xe3\x3a\xf7\xe8\x35\xd8\x63\x65\xbf\xd9\x30\x80\xd7\x31\x6a\x6c\x18\x28\xfa\x6c\xfb\xbe\xf1\x28\xe2\xa9\x64\xdc\xd0\x32\x58\xf8\xe9\x46\x3e\x3d\x08\x79\xdb\x6a\x3e\xa8\xd9\x67\x94\xfa\xe9\x46\xc3\x7e\x14\x3a\x2b\x5e\x69\xdd\x60\x0f\x81\xce\x66\x33\xe0\xca\xa3\x4d\xf7\x03\x8b\xe2\xae\x5c\xee\xcd\xf2\x02\x81\x24\x71\xcc\xf0\x67\x72\xcc\xec\x21\x2d\xd3\x8f\xf7\xf1\x05\xfd\xf8\x18\xec\x40\x33\x70\x3f\xfe\x74\x4b\x70\x3f\xde\x8b\x1a\xc1\xe0\x43\xdb\x12\x06\x8c\xc0\xe7\xc7\x07\x70\x63\xb8\xf4\x75\xa1\xc5\x62\xf1\xa4\x5d\x60\x7e\x1c\xe9\x1c\xfd\x7e\xdc\x03\xf2\xf9\xa9\x61\xfa\xbd\x7e\x9c\xbd\xa7\xeb\x29\xee\xf8\x71\x08\x40\xfc\x78\x1c\x75\x3f\xfd\xea\x04\x7e\xbc\x07\x33\x46\x8d\x03\x7e\x9c\xdb\xd7\xde\x68\xee\xa0\x1f\x47\xaa\xf1\x8d\xc2\x20\x1a\xfb\xd0\xf8\xd6\x13\xe4\x15\x5b\x33\x9f\xe9\x2d\xc3\x33\x8e\x81\x61\x68\xf4\x8c\xe3\xb1\xb2\xdf\x6c\xc0\x1a\x3b\xe3\x78\xbc\xe8\xb3\xed\xfb\xc6\xe3\xdd\xf8\x19\xc7\x33\x85\x9f\x6e\xe4\xd3\xc3\xa5\xb7\xad\xe6\x8c\xa3\xcf\x28\xdd\x19\x07\x7f\x05\xbc\xa6\x24\x4d\xea\xeb\xe5\xa0\xbe\x82\x6c\x64\xc4\x1b\xd9\xf4\x02\xef\x58\xf4\x5d\x92\x87\x7d\x9b\xd3\x55\x99\x5b\xc4\xcc\x60\x27\xc0\x7c\xcc\x33\xe7\x62\xcd\x9f\xff\x19\x45\x24\x7a\xa7\xe2\xf3\xec\xb2\x8b\xbd\x9e\xb7\x59\x24\xe4\x8e\x22\x53\x3c\x15\x39\x65\x05\xfb\xf2\x8a\xc7\x5b\x41\x48\x55\xde\x31\x19\x99\x97\x72\x98\xad\xd2\xf4\xdc\x56\x81\xdc\xce\xe8\xec\x84\xa6\x22\xfd\x17\x68\xed\x9d\x43\x6c\x9e\x9d\xd9\x7d\x17\x1a\x8a\x97\xdd\x1f\xd9\xfb\xec\x9e\x69\xb0\x18\xb7\x0e\x59\xc0\x56\x59\x99\xac\x85\x76\x78\x75\xf4\xe5\x10\x20\x0b\xf2\x60\x9c\xc6\x70\x58\x30\xf2\x18\x07\x3d\x21\xcb\xbe\x1b\x29\xdc\x5a\x75\xdf\x77\x35\x89\xe7\x10\x79\xe2\x05\x29\x80\x3d\x1f\x3f\xe6\x1c\x02\x8b\x3b\x18\xf4\x8c\x81\x1b\x49\xed\xaa\x42\x33\xb0\xb6\x98\x85\x7a\xb2\xb1\x56\x59\x45\x79\xd3\xf4\x15\x00\x23\xb6\xd5\xa2\x87\x2d\x9c\x6d\xf6\x66\x85\x6a\x38\x75\x9b\x68\x65\x61\x0d\xb1\x01\x48\x6b\x4c\x5f\x81\xa7\xf7\x92\x45\x8e\xb6\xf4\xf5\x40\x43\xb5\xe8\x81\x16\x5d\xc7\x34\x3f\x39\x83\x89\x48\xe3\xe3\xc9\xc0\xa1\x4f\x97\x96\xbf\x3b\x23\xf9\x48\x7f\x32\x3b\xea\xca\xdf\x87\x57\x68\xdd\xbe\x6e\xec\x66\x0f\xf5\xe4\x55\x4f\x4f\x86\x75\x37\x17\x57\x86\x3c\xcd\x92\xa1\xe7\x18\xae\x4b\xaa\x47\x84\x6e\xfe\x80\x08\xe7\x7e\x11\xce\xd1\xba\xbd\x22\x74\xb2\x87\x44\x38\xef\x11\xa1\xac\x5b\x6f\x98\xc0\x9d\x24\x9c\x23\x38\xdb\xd2\x05\x95\x00\xf7\x97\x2c\x43\xb4\x83\xff\xce\x98\xee\x73\xd6\x5c\x4d\x8f\x5c\xf2\xec\x6e\x9d\xe6\xdf\x60\x74\xfd\x46\xdf\x96\x29\x8f\x9f\x92\xe3\x85\xc3\x82\xbe\xb5\xba\x45\xfc\x4f\xd6\x28\x63\xbb\x82\x02\x57\x35\x7d\xcd\xca\x6b\x63\x14\x50\x49\x46\x21\xbe\x4b\x43\x00\xc0\x50\x65\x27\xd9\x2d\x71\x06\x28\x37\x83\xd5\x32\x38\x6c\x61\x83\x14\xff\x30\x6e\xab\x4a\x29\x29\x9c\xd1\x4b\x10\xae\xba\xff\xcc\xe9\xc5\xe8\x51\xeb\xe5\x7b\xeb\x30\xfe\xda\x77\x18\x5f\xdd\x8d\x6c\x1d\x72\x18\xbe\x23\xe0\x40\x1a\xca\x58\xb1\x55\x1e\xce\x96\xf4\x22\x98\xfe\xf5\x5c\xd3\xe3\x27\x29\x2a\x33\x69\xdc\xed\xcc\x42\x1a\x55\x99\x71\x0b\x67\x14\x76\xec\xa1\x1e\xb0\xe5\x9d\xdd\x0b\xc7\x05\xe5\xce\xfc\xe4\x6e\x76\x41\xa0\xe7\xbb\x1e\x9b\xcc\x5a\x20\xa4\x09\x20\xcb\xb3\xbc\x3f\xd5\xe4\x8b\xa4\x55\xd5\xd9\x85\xd4\x5f\x90\x0a\xe5\xd1\x17\x0b\x87\xd5\x69\x67\x79\x43\x2e\xd1\x3a\x26\xaa\x09\xcd\x35\x49\x68\x83\xe1\x96\xc9\x61\xb3\x4c\x00\x0e\xab\xd6\xce\xf2\x55\xbb\x58\x6c\xd3\xc5\x42\x92\xcb\x8a\x63\x89\xd5\x79\x48\xa2\x94\x9a\x20\xac\x42\x23\xdd\xdb\xc8\xf8\x10\xa5\x4b\x49\x48\x3c\x02\x88\x05\x90\x22\x92\x2e\x28\xc0\x61\x75\xda\x59\xde\xcf\x2e\xc9\x76\x1d\x1f\x95\x79\x90\xe2\x84\xc6\x41\xd2\xed\x72\xbe\x00\x30\xd4\x88\xcc\x1c\x5f\x9d\xc9\x76\x1e\xcd\xba\xe5\x17\x49\x4f\xd4\xb3\x95\x45\xdf\xe6\x6f\x5e\x86\x33\xaf\xde\x82\xb5\x7b\xd3\x05\x1c\x11\x8c\x1e\x68\x0f\x0e\xbe\xce\x8e\x8c\x0b\x63\x6e\xf2\x60\x23\x9e\xed\x1a\xd8\xc6\x02\xd6\x30\x7f\x8f\xe6\xed\xee\xeb\xd1\x7c\x3b\x82\x44\x5a\xbb\x49\x98\x6b\xea\x1c\x10\x11\xd5\x70\xf9\xcb\xbf\x9e\x1a\x82\x48\xc8\x1c\x31\x3f\xbd\x9e\xb5\xf4\x22\xa7\xe7\x9c\xa8\x79\x08\x4e\x4d\xea\x45\xde\x6d\xc4\xc2\x11\x9e\xdd\x93\x45\xcd\xb5\x1e\x9b\x31\xfc\xeb\x7a\x39\x94\x6d\x6d\x5c\xc8\x32\x77\xf7\x6e\xb0\x24\x4e\x3c\x2b\xce\xb4\xce\x5a\x9f\x4b\x55\xe4\x82\x73\x3c\x31\xfe\x0a\xcf\xf1\xcd\x22\x60\x42\xe1\x76\x21\xb0\xb9\x7b\x16\x03\x7b\x9b\x45\xd1\xdd\xdc\x71\xad\xdb\x80\x5c\x98\x62\xf0\xa0\x8a\xdc\xc0\xd5\x97\x22\x5c\xca\xdf\x8f\x0b\x48\x91\x62\x7b\x47\x1c\x49\xf1\x7d\xc3\x1b\x77\xb3\xe5\x82\x4d\xe3\x51\x06\xad\xa9\x99\xf9\x68\x86\xd8\x34\xb2\x8a\x6c\x9e\x5d\x29\x6a\xb9\xac\xe6\x6c\xff\x11\x7b\x73\xb1\x20\x59\xee\xb9\x87\xde\xdd\x54\x33\x83\x3b\x8c\xcd\x33\xe4\x4f\x4f\xd1\xcc\x67\x36\x58\x6c\x4c\x3c\x31\x92\xe7\x41\x38\x6b\x02\x4a\x1a\x3a\xcd\x8a\x69\x79\x6d\xf7\xfd\xd9\x46\x93\x5e\xb2\xcb\x69\xa2\xff\x0c\xe4\xf9\x7b\x6d\xc4\xe6\xe6\x65\xe3\xf2\x75\xa2\x0b\xa9\xde\xaa\x53\xf8\xcc\xca\x48\x91\xa1\x28\xfc\x3e\x01\x5d\x7f\x98\x90\x8a\x05\xa7\x8c\xed\xf5\x7b\xf3\x2b\x05\xc9\x69\xdd\xde\xac\x53\x0a\x0f\x1e\x4e\xc3\xa2\x58\x8c\x6a\x70\x5e\xd8\xdb\xc9\x40\x7f\xe2\x20\xfe\x0f\x8f\x3b\x82\x31\x5a\x40\x5e\xaa\x89\xf8\xe5\xea\xec\x92\x54\x90\x8f\xd6\x85\xaa\x4b\xc5\xc4\x34\xcd\x9a\x4b\xd6\xb0\xa9\xe8\xcd\x36\xdd\x39\x8e\x0a\x42\xf6\x44\xab\x6f\xec\xed\x7c\x89\xd8\x4f\xc7\xfa\x39\xd6\xa8\x9e\xf9\x47\x7a\x3c\x46\x29\xdc\x3f\x95\xae\xe8\x36\x59\x29\xbd\x24\xeb\xd5\x3c\x05\xa4\x82\x73\x6d\xde\x9f\xa9\xbc\x24\x9d\x1d\xe6\x10\x6a\x8a\x54\x06\x83\x0e\xcb\x45\xe7\x4e\x79\x8e\x67\x9e\x92\x6e\x69\x7a\x5c\x03\xde\x0e\x09\xdd\x1c\xd5\x94\x79\x1e\xaf\xa3\xcd\xd1\xa4\x83\x33\x46\x56\x34\xa6\x56\x7d\x28\x57\x8b\xe5\x6c\xb5\x95\xa8\x9e\xf9\x4c\x72\xdc\xd0\x39\x60\xec\x48\xe8\x21\x49\x24\x63\x1b\xb2\x4a\xe7\x07\x40\x0a\xe7\xed\xb8\xa6\xf1\x61\x09\xa1\x08\x7b\xab\xd5\x32\xd6\x42\xf3\xce\x7b\x8e\xb3\x94\xa6\x30\xa4\xd6\xf1\x96\x2a\xb1\x91\xed\x62\xb1\x98\xd9\x94\x70\xe6\xe8\xe2\xb0\x4d\x22\x80\x44\x78\xdb\x2c\xe6\xcb\xf9\xe2\xfe\x17\x39\x80\xfd\x46\xbf\x1c\x6b\x72\xa1\x4d\x50\xd5\xe5\xa9\xa6\x4d\x33\x3d\xb0\x63\x5c\x75\x56\xd1\xe6\x76\xac\xcb\x8b\xc9\xba\x32\xee\x05\x5b\x64\xdf\xdb\x12\xcd\x8d\x82\xe8\x7e\xff\xcb\x77\xa4\x1d\x4a\x8a\x37\x78\xb5\x89\x79\x2e\x0a\x1b\x91\x46\x7f\x29\x18\xda\x24\xef\x3c\x94\xe4\xdd\x03\xef\x22\x35\xff\xec\x53\xae\x73\x2f\xa9\x7a\x8e\x8b\xdd\xd7\xd1\x73\x3b\xe3\x4c\x4f\x58\x3c\xcb\x52\x6f\x94\xb3\xa7\x79\x53\x63\x73\xbe\xf5\xda\xd4\x58\x24\xe2\x1e\x59\xc3\x82\x70\xc5\x3d\xe0\xde\x9b\x63\x88\x86\x1b\x4a\x1a\xd8\xc2\x72\x0e\x9f\xc8\xca\x3a\xb9\x90\x7a\xaa\x9e\xd8\x59\x2c\x53\x7a\x9a\x20\x27\x0c\x96\x1f\x82\xd9\xf2\xfd\xc4\xf0\x45\xce\xdf\xcb\xe8\xbd\xa7\xa4\x3f\x67\x0d\x68\x80\xbf\x3f\xb8\x67\xcb\xfe\xff\xc4\x31\xb3\x3f\xd6\x33\x17\x6c\xea\x26\x75\x22\x66\x14\x40\x49\x52\x27\xa4\xc8\xf8\x6b\xfb\x3b\x6c\x08\x08\x66\x4d\xc0\x65\x10\x64\xc5\x31\x2b\xb2\x96\xee\x1f\x2e\x61\xf7\xa5\x11\x4b\xf8\x7e\xfb\xc2\x08\xfc\xa7\x9d\x7d\x5f\x8e\x81\x0a\x07\xc2\x21\x03\xfa\x83\xa5\xff\x53\x79\x7f\xa8\xf2\x86\xa3\x4b\x03\xfa\x43\x08\xfc\xa7\x0a\xff\x50\x15\x0e\x46\xea\x06\x34\xe8\x96\xff\x4f\x05\x7e\x6f\x05\xb2\xc0\xce\x84\xff\x33\x3d\x94\xe9\x17\x67\x62\xfc\x7b\x59\x5e\x76\xb1\x8d\x0c\xf8\x3f\xf0\xac\x91\x00\x61\x0f\x50\x89\x3b\x74\x79\x3d\xe5\xe1\x5f\x34\x69\xe1\x4d\x39\x3c\xef\x4c\x49\xb7\x66\x96\x27\xc5\xa3\x20\x0a\x34\xe5\x17\xfe\x5a\x1f\x3b\x4e\x6b\x1f\x41\x8a\x20\x86\x1f\xa8\xb5\x0e\x2e\x69\xcc\x34\xcf\x9a\x16\x7e\x06\x74\xee\xb4\xd4\x01\xc8\x1b\xb2\x22\xb0\x4b\xdf\x61\xb8\xf2\xf1\x0b\x2e\x91\x8b\x0e\x1f\x89\x36\x39\x1c\x8c\x7a\x0b\x63\x68\xcf\x0a\xa4\xe9\x1e\xe6\x82\x17\x55\x0c\x6c\x52\xc1\xf7\xcd\x38\x15\xc9\x78\xac\xf5\x89\x11\xc7\x7c\xb4\x43\xb7\xdc\x20\x78\x34\x1a\xea\xc4\x38\xc9\xe1\x64\x06\x30\x41\x99\xa2\x11\xbf\x72\x4a\xa9\xe0\x99\x23\xfc\x47\xbe\xb6\x8a\xa7\x59\x7d\x31\x6f\x97\xbc\xc8\xf0\xd5\x2e\xb3\xbf\xd5\x2e\x16\x2f\x67\x5e\xa1\x0d\xb0\xfc\x44\x39\xd6\x96\x21\x25\xc9\xb8\xd8\x78\x7e\x3b\x0d\x3d\xcc\x6c\x7f\x21\x0f\xa7\xe6\x91\x01\x1a\xd3\xf4\xb8\x76\x2c\x5a\x2d\x22\xac\xd0\x1c\xa2\x2b\x1e\xd6\x73\x9b\x89\x13\xf0\xe3\x9e\x96\xa7\x24\xe0\xb3\x40\x95\x6f\x9f\x32\xf2\xb6\x27\xa2\xdb\x64\xe5\xaf\xc6\xdb\x11\x00\x60\x88\x1d\xbb\x63\xf4\x76\x07\xc9\xab\xbd\x93\x5f\xc4\x4a\x21\x79\xb6\x78\xb0\x62\x96\x58\x23\x59\xbc\x13\x69\xa4\x5b\xda\x03\x7a\x5e\x5d\x5d\x69\xaf\x70\x58\xa6\xad\x28\x5f\x1b\x92\x05\x9d\x1f\x91\x71\x90\xd1\xf0\x6b\xc9\xc8\xed\xe5\xe2\x11\xfd\x08\x16\x6d\xfd\x88\x78\x31\xa4\x2d\x17\x07\x56\xf4\x16\x1b\x88\x59\xe4\x17\x69\x1e\x4a\xc0\x8f\x7b\x5e\x51\x82\x80\x57\x4a\x32\xdf\x92\x90\xbf\x3d\xe4\x38\x4b\x12\x7f\x35\x7e\x8d\xd9\x80\x21\x76\x1e\xd0\x9b\xe4\xd5\xd2\x9b\x0c\xa7\x43\xf2\x62\x49\x60\x85\xb5\xb1\x66\xb2\x90\x38\xd2\x4c\xac\xbc\x17\xf6\xbc\xd2\x78\x79\xaf\x90\x44\xb6\x25\x1c\x6f\x5b\xe8\x21\x49\x50\x95\x71\x2a\x7e\x8d\x59\xf9\x03\xbc\x3c\xa0\x2f\xc9\xa8\xa5\x2f\xf9\x81\xc1\x27\xb0\x9e\x6b\x3f\x96\xc8\x6c\xcf\xba\xb9\x40\x4d\x28\xc1\xe3\x55\xf7\xb0\x22\x85\x73\x71\x89\x2f\x3a\x8f\x4d\x93\x7b\xbf\x22\x62\x81\x6d\x1e\x87\x8f\x41\x1c\x3e\xb2\x63\xda\x5e\x90\x60\x98\x2f\xab\xcc\x4f\x9e\x32\x43\x4a\xcb\x7b\x5b\x4f\xef\x65\x3c\x3d\x1b\x1d\x07\xf6\x5f\x1a\x75\xa3\x57\xe8\x8a\x43\x6c\xe0\x53\x23\x2f\xd7\x66\x6d\x4e\xfb\xf4\x1b\x99\xdf\x1d\x56\xee\x17\x4b\x83\x8c\x3a\x8f\x04\x32\x8f\x65\xd9\x1a\xf7\x5a\x19\x62\x19\xf8\x0a\x63\xdf\x6e\x84\x5c\x83\xee\x13\x14\xbe\x8d\x55\xf0\xf3\xe2\x5f\x09\x46\x08\xc4\x19\x46\x6e\xee\x15\x7e\x7b\xe7\x59\x21\x87\x8c\xb9\x7c\x73\x48\x7a\xd6\x76\x3d\x77\xa8\x8c\xb2\x0a\xab\x7e\xbd\xd4\x73\xab\x77\x9f\xf9\x18\xb3\x0c\x1c\x2b\x70\x69\x9c\x1f\xfb\xa4\xea\x5b\xde\xaa\xb7\x66\x45\x8b\xd8\x25\xce\x13\xeb\xaf\x69\x4d\x9b\xaa\x2c\x1a\xb6\x9d\x88\xa5\x78\xd5\xca\x72\xed\x17\x7c\x3c\x94\xec\x97\x3b\x9c\x72\xde\x15\xf8\x48\xc5\x38\x04\x5f\xda\x4e\x4a\x76\x4a\x6d\x19\x4d\x9b\x7e\x0b\xc6\xbf\xae\x1e\xbb\x5c\x37\x12\xfe\x11\x1c\x7f\x4d\x3d\x0f\xb6\xfd\xfc\x07\xc9\xb8\xa7\x9e\x07\xdb\xfe\x8d\x38\x7e\xac\x9e\xdb\x77\x33\x71\xf3\xc5\xa6\xef\x68\xe1\xbe\x6a\x1e\x36\xbc\x6f\xc1\xef\x57\x54\xf3\xb0\xd9\xfd\x21\xf2\xf5\x57\xf3\xb0\xd1\xfd\x21\xf2\x3d\x23\x1e\xd0\x77\xe2\xc4\x68\xcb\x18\xde\xcc\x57\x9a\x60\xa9\x91\x67\x5f\x06\x66\x33\x90\xaa\x68\xaf\x99\x50\x9b\xde\x7f\xec\xa8\xdc\xc7\xf9\xd7\xd4\x61\x95\xea\xe6\x86\xdf\x9b\xd5\xe7\xeb\x78\xa4\xc5\x23\xc7\xe1\xaf\x92\x6a\x9f\xd7\x78\xa0\xc5\xdf\x84\xd5\x47\xea\xe8\x3f\x5e\xf6\x35\x96\xfc\xb5\x5d\xf0\x2b\xaa\x78\xcc\xc6\xbe\x9e\xd1\xa7\xab\x78\xcc\xc2\xbe\xbb\x44\x7b\x9c\xc3\x43\xf6\xf5\xdd\x25\x8a\x79\x05\xcf\x50\xad\x4c\x58\x87\x08\x3e\x82\xa5\x8a\x93\x63\xf0\x79\xf3\x2e\x78\xed\xbe\x31\xc6\x95\x4d\x1e\x2d\x61\xaf\x38\xed\x0a\xa7\x3c\x8b\xfa\x45\x0c\x81\xf2\xa1\x19\x1f\x21\x3e\x7b\x78\x69\xeb\x97\x91\xc3\xd0\x93\x04\x34\xbc\x13\xc0\x57\xd5\x37\x8a\x80\x86\x77\x56\xf5\x55\xf5\x8d\x22\x80\x88\x63\x9c\xb3\x7c\x92\x00\x22\x8e\x67\xeb\x1b\x45\x00\x11\xc7\xb3\xf5\xe1\x04\xe0\x83\xa6\xc3\xd6\x3a\x62\xc0\x79\xae\x3c\x6a\x6a\x4f\xd5\x36\xa6\x3c\x6a\x68\x4f\xd5\x36\xa6\x3c\x6a\x66\x5f\x27\xc9\x9e\xf2\xa8\x91\x7d\x9d\x24\x47\xd5\x66\x98\xd8\xd7\x49\x32\x45\x7c\x8e\x7a\x05\xb9\x5f\xb0\xf6\x20\xff\x84\x64\xfb\x09\x38\xa2\x79\xbe\xbe\x51\x04\x06\xd8\x3b\x7f\x6d\xfb\x20\x81\x01\xf6\x1e\xa9\x0f\x27\xe0\x3e\x19\x37\x50\xbf\x39\x33\x79\x46\xbc\x7d\xe5\x1d\xd3\x7b\xba\xb6\x31\xe5\xfb\x79\x7b\x46\xb4\x7d\xe5\xfb\x79\x7b\xa4\x36\xb4\xfc\x80\x1e\x9d\x59\x9d\xfb\x49\x45\xc6\xc5\x7d\x9b\xd1\xac\xfc\x00\xfd\x4e\x86\xdc\xfe\x04\x9f\xf7\xc5\xa8\x7c\xb4\x89\xa9\xc3\x6e\x2e\x52\x7d\xcd\xc2\x5b\x8b\x62\x05\x75\x75\xcb\x60\x60\x7e\x31\x1b\x9a\xe2\xda\x14\xc5\x37\x23\x64\x66\x8a\xe1\xc6\x54\xec\x79\xa5\x98\x03\xd5\xcd\x06\xd8\xc5\x46\x06\xe0\x05\x08\x47\xef\x25\x1b\xfc\x98\x35\x96\xe6\x48\x21\x0e\xd3\x7b\x54\x34\x08\x45\x75\xff\x02\x7a\x4e\xd4\xc2\x78\x24\xf3\xf8\xde\xb4\x5e\xb2\x0f\x0a\xa7\x97\xe4\xb3\xf2\xb1\x89\xaa\x73\x22\xc8\x91\x4c\x80\xf1\x19\x4f\xff\xae\x30\xf4\xb0\x67\x3f\xe5\x47\x4d\xa8\x8f\xe4\xd3\x56\x64\x11\xe5\xa7\x31\x90\x93\xa1\x26\xc0\x27\x9f\xfe\x0d\x58\xe8\x81\xd3\x1e\xb2\x0f\x0a\xc7\x4f\xef\x59\xc9\xd8\x14\xd5\x39\x07\xe4\x74\x2a\xc0\x78\xe4\x33\xb0\xfb\x09\x3d\xf7\xda\x4f\xf9\x41\x11\xf5\x92\x7c\x56\x4a\x36\x51\x79\x96\x00\x39\x24\x6b\x43\x3c\x32\x1a\xd8\x6a\x84\x9e\xbe\xed\x25\xfc\xa0\x88\xfa\x28\x3e\x2b\x21\x49\xf3\x33\xcd\xf3\x9b\xf1\x44\xac\xb9\x9d\x7d\x17\x6f\x9f\x3a\x00\x6b\xba\x49\x3a\xef\x7e\x1e\x3f\x19\x3b\xb0\xcf\x66\x00\xca\xdb\x15\xb0\xcd\xf5\xff\x71\x2d\x5b\xe4\x31\x05\x5b\x6b\xe0\xf0\x29\x2f\x3f\xcd\xf5\x76\x9c\xd9\xc2\x79\x37\x8f\xdd\x98\xc1\x70\xcd\xc5\xba\xe3\xc0\x86\xb1\x00\x1f\x3f\xd6\x6f\xbe\x42\xd8\x73\x65\x87\xf7\xd2\xa8\x28\x8a\xf8\xe9\x5c\x6b\xb7\x51\x14\x30\x47\x59\x56\x24\xc9\xda\x2f\xbb\x70\xb6\x3f\x66\x79\x4b\xeb\x1d\xc9\xab\x33\xf9\x49\xa4\xff\x32\x8b\x3e\x08\x3e\xe4\x95\x38\xfc\x0f\x6b\xdb\x97\xaa\xa1\xff\x46\x16\x5d\xd9\x12\xaf\x6c\x19\x7d\xb8\x1f\xae\x6d\x5b\x16\xf2\x46\x03\x79\xc9\x29\xa4\xa4\x6d\x69\x17\xa9\xbd\x30\xc6\x5b\x44\xa4\xaa\x28\xa9\x49\x91\xc8\x93\x18\x97\x32\x25\xf9\xb4\xac\x68\x01\x8f\xa7\x88\x3c\xeb\x4a\x1b\x3d\xaf\x65\x97\x5f\x18\x8f\xd3\xf2\x57\x7b\xf7\xe0\xa5\x50\xbe\x71\x46\xbf\xb7\x2d\xb7\x9f\xc1\x87\x43\x97\xd1\xe0\x3b\xc0\x7b\xe3\xb5\x46\xc6\x58\x78\x24\x29\x0d\x44\x03\xd2\x8c\xe4\xe5\xe9\x66\x9d\x72\x3e\x96\xf5\x85\x5f\x95\x9b\x93\x96\xfe\x14\x4d\xa6\xb3\xe5\xfb\x0f\xfb\xe9\xa5\xe9\xcd\xef\x2d\xeb\x9e\xa2\x76\xaa\x0c\xc2\xb9\xb8\x53\xa4\xbc\xb6\xfb\xe9\xa5\xfc\xdd\x82\xab\xbf\x11\x6c\x69\x21\x4b\x1f\xce\x00\xe1\x08\x29\xa1\xac\x78\x48\x3e\x51\x9f\x70\x22\x9f\x64\xba\x8e\x60\x55\xe2\x1e\xcf\x71\x1e\x7b\x94\x27\x86\x58\x31\x71\x85\x2f\x52\x6e\xec\x19\x1d\x7d\x33\x93\x91\x6a\x0e\x42\xb3\x0f\xee\x68\x83\xef\x4a\x9c\x57\x6f\xc1\x16\x0c\x81\x70\x53\xa2\x0f\x63\x70\x9b\x67\xd5\x4e\x5f\xcd\xf3\xe6\x18\xef\xb4\x03\xa7\x75\x59\xc1\xb7\x75\xc7\xf6\x98\x05\xf2\x54\x66\x37\xde\x40\xfa\xac\x97\xdc\xe4\x00\x13\xe1\xe3\x8b\x56\xa1\x2a\x96\x15\xb7\x31\xa3\x92\x28\x06\x9e\xcb\xec\xdd\x68\xf9\x03\x7b\xe3\x69\x69\xbe\xa4\x1e\xaf\xd4\x2d\x40\xda\x2c\x38\x4d\x79\x87\x8b\xf9\xa8\xfb\x4c\x83\xcc\x3d\x93\xce\xbe\x56\x49\x53\x35\x8e\xed\x16\x75\xac\x4c\x39\x25\xc3\x26\xc5\x9a\x17\xbe\xf8\x65\xba\xf2\x80\xdd\x6a\xc9\x3c\xb8\x71\xc7\x03\x77\x47\xf8\x02\x9b\x37\xdc\xae\x22\x08\x0f\x6d\xf1\x31\x34\x1e\x3e\x63\xaa\x76\x0f\xad\x45\x48\x39\xb9\xfe\x46\x49\xf0\xbb\xc5\xdc\x32\xcc\x8d\x7f\xd4\xbf\xda\x97\x30\xf7\xbc\x60\x66\x75\x72\x9e\xb5\x8a\xf4\xa5\x1e\xec\xaa\xae\x80\x5d\x81\x04\x3a\x36\xda\xd1\xd4\x93\xaa\x7d\x3d\xcd\x03\x92\x15\x34\xf2\xd1\xbe\x79\xc7\x07\xf6\xce\xdf\x76\x3b\x33\x78\xcf\x25\xdf\x5b\x8e\x0f\xdb\xb2\xcc\xdb\xcc\xe8\x84\xe4\xd0\x94\xf9\xb5\xa5\xd6\xa3\xd6\xe0\x78\x21\x7b\x87\x3d\xcb\xbb\xae\x21\x1f\xce\xef\xb9\x0f\x24\x0e\x17\xfb\x11\xdd\x4f\x70\x62\xf5\xbb\x2d\x0e\xdf\x9a\xf8\xb6\xb4\xae\x4b\x9a\xce\x0d\x2b\x5d\xb2\x4b\x5b\x14\xd4\x3d\xb7\x69\x82\xc5\x61\x50\x09\x16\x6f\x67\x1b\xa4\x7b\x28\x9b\xa7\x46\xb9\xed\xf9\x29\x4f\xb3\xc2\xbe\x06\x6d\x16\xc1\x3b\xff\x36\x43\x37\xa8\x8c\x3c\xfb\x67\xbc\x99\x6e\x9e\x87\x94\x8c\x90\xba\x2e\x3f\x23\xaa\x07\x17\xbe\x44\xf6\xdc\x16\xd9\x27\xce\x4f\xb6\xb2\x4e\x6e\x29\x26\x00\x55\xd9\xa3\xf9\x32\x7a\x6f\xdf\xa6\x6e\x0c\x9a\x9c\x05\xf9\xe0\xb0\xb5\xf5\xd8\x1c\xea\x8d\xba\x18\x8d\x81\x0a\xbf\x8a\x3e\xb3\x1e\x6f\x05\xea\x68\xe8\x93\x35\xa0\xd4\x59\x48\x34\x7a\xbf\xb7\x5f\xdf\x64\x66\xee\xad\xc9\xaa\x8d\x6f\x0d\xc0\xea\xc3\xa4\x25\xab\x03\xaf\xf3\xfb\xeb\x8b\x64\x8d\x7b\xe3\xfb\x25\x5a\x9d\x7c\x3d\xd3\xad\x70\xb4\x31\x38\x95\xd9\x2b\x50\xa4\x3a\xd4\x24\xcc\x3a\xbf\x41\x15\x5e\xbd\x79\x4d\x62\x4c\x25\x55\x59\xb1\xf7\x28\xdc\xbe\x69\xb0\x6f\xce\x86\x62\x3d\x46\x8b\xc7\x67\xd5\xe8\xb2\x5e\x99\x7e\xdb\x76\xd6\xec\x7e\x27\xcf\x24\xb3\x67\x32\xe7\x4e\x40\xd9\xbb\x60\xdf\x66\x02\x2a\x2f\x16\x77\x8b\x8f\x00\xd9\x37\x9b\xd6\x17\x92\x2b\x61\x3a\x9e\x82\x4f\xc1\x65\xae\xef\x50\xbf\xcc\x77\xfd\x81\x9d\xef\xba\x00\x2b\x1f\x4e\xd4\xac\x27\x45\x16\xf6\x1d\xaf\x0b\xb0\x80\x5f\xc0\x05\xfc\x06\x0f\x9a\xac\xbb\x9f\x9e\x69\xe7\xa1\xfb\x01\x6a\x50\x43\x46\xa0\xcd\x4e\x2f\x48\x74\x14\x22\xe0\xaf\xc1\x0a\xc4\x4b\xc8\x0c\x7d\x02\xfe\xde\x91\x63\x8b\x5a\xad\x3d\x7f\xf8\x3a\xff\x62\x57\x09\x0e\xc0\xc4\x2e\x93\x82\x29\x1b\xc7\xef\x28\x13\x2f\xa7\xbc\xb3\x6c\x44\x12\xc6\x87\xa5\x38\x76\x3a\xae\x6c\x8e\x3b\xc2\x1b\x17\xe6\xea\x54\xcb\x62\xd9\x7c\x4f\xdc\xc9\x60\xf2\x6e\x30\x22\xf8\x57\xdc\x06\xef\xf6\x5a\xb7\x7b\xc7\xe2\x46\x33\xc7\x6e\xaa\xb5\xac\x5f\xb6\xdc\x72\x3b\xbc\xc5\x56\xbf\x89\xc1\x80\x0f\xea\xb0\x9c\x8e\x79\x67\xb0\x91\x0e\x84\x80\x33\x82\xb4\x9c\x77\x4c\x56\xbf\x90\x5a\xf4\x00\x33\x56\x93\x39\x85\xf1\xda\x36\x0e\x00\xe1\x23\xb7\xd1\x54\x2b\x03\x2a\x5c\x09\xd1\xc3\x0b\xd2\x6c\xb1\x8e\xf2\x6b\x7b\x90\x39\xab\xe9\x5d\x79\xa8\x6c\xf9\xb0\x76\x9f\xb6\xb9\x30\x41\x3d\xa6\xcf\x37\x44\x60\x24\x7b\x95\x6d\xf0\x81\x34\x5a\x5c\xbf\xf2\x40\xf5\x3c\x38\xa2\x0d\xe3\x1e\x26\xa4\x2e\xaf\x0d\x76\xa1\xbc\xce\x13\x13\x72\x77\x5d\x0c\xaf\xab\x31\x5f\xbf\xb7\x0b\xbf\x84\xec\x30\x9e\xe5\x82\x5d\x7a\x48\xf0\x4c\x5e\x31\x28\xae\xdf\x0d\x98\x43\x1e\xc8\xc7\x2b\xe7\xd7\xf5\xa2\x39\xfc\xe6\x5e\xcb\x83\xb8\x34\xc4\xd9\x5f\x27\xbd\xa0\x6f\xad\x9b\x5a\xd5\xf4\x15\x5e\xb2\x83\x53\xbc\xc9\x3d\x6d\x0f\x10\xf6\x4d\x7b\xfa\x14\xd0\x91\xbb\x09\xbf\x8d\xe5\x33\xc2\xb2\xe3\xf8\x08\x30\x8b\xc4\x99\x12\x13\x04\x5f\x6b\x78\x63\xf9\x2c\xa0\xaf\x16\x81\x33\x88\x01\x5c\xd7\x03\xea\x32\x1f\x9a\xfa\xa9\x85\x87\x90\xc9\xf2\xfd\x98\x20\xba\xf9\x99\x60\xf0\x9a\x4e\xf7\xd3\x00\xbc\x35\x74\xf5\xc1\x65\x9c\x8b\x60\xf4\x45\x5b\x4c\xe0\x8c\x8d\x69\xd3\x96\xd5\x4f\x76\x9c\x23\x88\x3e\xf8\x32\xa3\x28\x8a\x3f\x04\x9d\xf0\x3e\x8c\xb8\x1d\xab\x2d\x03\x26\xf2\x09\xa4\x3f\xf1\xd0\x34\x49\xd6\xb4\xa2\xa4\xdd\xf1\x7f\xa6\x6f\x52\xba\x55\x5d\x9e\xb2\x74\xf7\x5f\xff\xd7\xdf\xba\x2a\xff\x21\x63\xc3\xe1\xdf\xb3\xa4\x2e\x9b\xf2\xd8\x86\xaa\xfa\xa6\x25\x75\xfb\xd7\xae\x21\x4d\x5b\xff\xf2\xe3\x0f\x9b\x88\xff\xef\xc7\x49\x40\x8b\xd4\xc8\x88\x74\xc6\x7f\x17\x85\xff\xf1\xa5\xa2\xbf\xc4\x98\xa4\x0d\x23\x62\xa1\x65\x15\x34\xfd\x16\xc2\xe7\xb2\xf0\xcb\x7f\xf9\x95\xc2\x17\xf4\xa1\x42\xfe\x00\xe1\x47\x3e\xe1\x6f\x1e\x10\xbe\xfa\xe8\x05\xd3\xf9\xf7\x2f\x15\xee\x76\xba\x18\x8c\xda\x8c\x0a\x76\xc1\x5a\xc4\x9b\xab\xdd\xb8\xe4\xb2\x20\x32\xc1\xf8\xaa\x32\x4f\xf9\x97\xea\xcc\x10\xc9\x99\xbe\xd6\x25\x9f\x4e\x8c\x43\x72\x8b\xc3\x87\xa6\x6e\x12\x21\x17\xa4\xcb\x3d\xfe\x88\xe1\x83\xed\xc0\x59\x55\x73\x35\x2f\xbd\xf1\x4d\xe7\x0d\x12\x2b\xf5\x1e\x8a\x83\x92\xbe\xc9\x58\x5e\xf5\x86\xdd\xf7\xac\x16\x9c\xc8\x14\x8e\x8d\xc9\x47\x72\xc9\xf2\x2f\xbb\x86\xd6\xd9\xb1\x97\x0b\xf8\xee\xe3\x8f\xff\x9c\x45\xf3\xed\x8f\xbd\xb2\x40\xcb\x90\x1f\x2d\xef\x94\x66\x09\x69\xcb\xba\x41\xb4\x2b\x97\x1b\xec\xe9\x02\x39\x4f\x56\xb1\x87\xe5\x5e\x86\xc2\xc1\xe4\x79\x1e\xbd\x87\x6f\x46\x8e\x78\x00\x0b\x61\xc9\x7d\x0e\x6b\x6a\x2e\x24\x63\x43\xe6\x86\x88\x75\xac\xa3\x63\xb4\x68\x77\xd3\xed\x76\x8b\x3d\x00\x60\x3e\x35\x67\xbc\x6b\xe2\xff\xb2\x6c\x84\x6a\x82\x7f\x22\x8f\x40\x19\x43\x9a\xd5\x79\x8d\x16\xc9\xc9\x91\x8a\x07\x88\xa6\xcc\x8c\xa6\xcc\x7c\xb7\x8f\x98\xba\x96\x6f\x22\x38\x4a\xe3\x73\x8b\xa5\x9c\xd8\x77\xbf\x99\x7b\x2e\x74\xe8\x68\x6f\x3e\xa9\x61\x5d\x3f\x68\xe2\xbf\xe9\x4c\x81\x33\xcd\xbe\xc6\xdc\xcc\xb2\xec\x8b\xfc\xa8\x07\x41\xbe\xc3\xb0\xe6\xed\xde\x63\xfb\xfe\xdc\xb0\xc3\xb9\xd3\xf7\x97\x4e\xdf\xb7\x5f\x5a\x99\x5b\xeb\x15\xa5\x58\x86\x9d\xa9\xf5\xd9\xcc\xe8\x53\xc6\x03\x31\x78\x4f\x36\x77\xf1\xde\xc3\x24\xa7\xa4\x3e\x66\x6f\x62\x30\x98\xe8\x04\xb6\xf4\x9a\xe8\x47\x53\x34\x42\xa5\x40\xc8\xf4\x98\x5f\xb3\xd4\x05\x8a\x74\x01\xef\xd6\x75\x12\xa2\xd6\x78\x93\xb0\x73\xd4\xd3\x73\x59\x67\xbf\x77\xc5\xf2\x80\x27\xf0\x9b\x41\x24\xbc\x0f\x23\xc8\x1c\xda\x4e\xb6\x65\x7e\x20\x9a\x63\x33\xcd\x80\xf1\xab\x3d\xe4\x73\x48\x2f\x3a\xcd\x2a\xe8\x47\x09\x52\x05\x79\x55\x05\xba\xdf\x75\xb2\xc9\x83\xf8\xd3\xca\x14\x1f\x73\x01\x46\xa6\xda\x50\xb9\xeb\x0a\x82\x55\xba\x80\xb3\xc7\xe9\x14\x88\xff\xa5\xb2\xe4\x76\x2d\x23\x5f\x25\x09\x90\xf9\x65\x54\xc1\xac\x44\x77\x4d\x2e\x87\x61\xb6\xbd\xfc\xde\x63\x42\x1e\x83\x31\x0c\x63\x84\x31\x20\x8a\x1e\xa5\x54\x43\x5d\xb6\x8a\x50\x9d\x78\xa4\x6f\x49\xd9\x95\x2b\x26\x44\x21\xaf\x4e\x28\xbb\x43\xd9\x9e\xef\x21\x1f\x20\xc5\x77\x66\x3b\x0a\x3a\xe2\x25\x1d\xf3\xba\x59\x63\xb3\xd7\x9f\xb2\x4b\x55\xd6\x2d\x29\xda\xbb\x71\x69\xad\x7e\xb1\xc1\xcc\x3f\x67\x29\xb5\xe2\x12\x66\x66\x73\x2e\x3f\xdb\x5c\x99\xb9\x59\x21\xbe\xee\xde\x8c\x0f\xbe\x72\x6b\x14\x1b\xb6\x19\xf1\x6e\x08\xdb\x45\x3f\x47\x01\xd9\xbb\x01\x5c\x38\xba\xf7\x3e\xe5\xad\x76\x6b\x31\xb6\x53\x5a\x78\x18\xdf\x3b\xfc\x98\x6c\x93\xe3\x31\x7b\x03\xbb\x4a\xee\x7f\x99\x5e\x9a\xe9\x6b\x46\x3f\x77\x30\x31\x5e\xa7\xf4\x35\x4b\x28\x77\x2c\xf7\x50\xb4\x75\xfa\xd6\x4c\xd4\xef\xcd\x45\xff\x7e\x49\xf5\xef\xf9\xc9\x27\x52\xff\xe3\xff\x9a\xbe\x57\xe2\xac\x53\x61\x40\xde\xdb\xda\xda\x9b\x37\xad\xcb\xcf\x26\xa1\xb3\xd9\x9c\x36\xf5\x97\x4b\x68\x9e\x1b\x05\xbd\x9b\x1f\x84\x1f\x56\xad\xda\x6e\x63\xab\x55\xcd\x65\x64\xab\x0c\xa0\xd3\x2a\x98\xe7\x6f\x55\x73\x31\x5b\xe5\x94\x1b\x6e\x15\xdf\x16\x01\x5b\x15\xc7\xdd\x0c\xd1\x68\xd6\x25\x1d\xd9\x2c\x03\xe8\x34\x0b\xe6\xf9\x9b\x75\x49\xcd\x66\x39\xe5\x86\x9b\x15\xb3\xbd\x04\x46\x03\x0c\x4b\xed\x6f\x80\x01\x74\x1a\x00\xf3\xfc\x0d\xc8\x4f\x66\x03\x9c\x72\xde\x06\x38\xbd\x85\x77\x6b\xd3\x5e\x41\x47\x7b\xd8\x52\x05\xc5\xe6\xf2\x00\xc5\x01\x2b\x11\x24\x0d\x3d\x0d\x93\x54\x1a\x12\x85\xfd\x43\x89\x1e\x92\xaa\x3a\x2b\xda\x81\x11\x87\x63\x3c\x45\xfa\x55\x6f\x63\x1d\xed\x23\xd9\x7e\x03\x60\x60\xd3\x06\xb0\xd2\x1e\x33\x10\x4d\x10\x82\xe9\x6d\xf4\xfd\xdf\xfe\xdf\x00\x00\x00\xff\xff\xdf\xbf\xcf\xa1\x7a\x86\x01\x00") +var _pagesAssetsJsPopperMinJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x7c\x6b\x97\xdb\xb6\xb5\xe8\xf7\xfe\x0a\xea\x9c\x96\x00\x2c\x88\x23\xa5\x37\xf7\xdc\x4b\x19\xd1\x72\x9c\x71\xe3\xd4\xf6\xb8\xf6\xb8\x4e\x4a\x73\xc5\x1c\x11\x1a\x21\xa6\x08\x16\x80\xe6\x51\x49\xff\xfd\x2c\xbc\x48\x50\xe2\x8c\xd3\x7e\xf0\x88\xc4\x63\x63\xef\x8d\xfd\x06\xe8\xb3\x27\x7f\x88\x9e\xf3\xe6\x5e\xb0\xeb\xb5\x8a\xe0\x73\x14\xbd\xa0\x25\x15\x6c\xc9\xa3\x7f\xb0\x1b\x5e\xf1\xe8\x9b\xe9\xec\x7f\xfe\x10\xfd\xc0\xa4\x12\xec\x6a\xab\x68\x19\x6d\xeb\x92\x8a\x48\xad\x69\xf4\xfa\xe5\x65\xf4\x8a\x2d\x69\x2d\x69\x04\x2b\xf7\xa0\xa8\xd8\xc8\xa8\x10\x34\x2a\x54\xb4\x56\xaa\x49\xcf\xce\x78\x43\x6b\xc9\xb7\x62\x49\x13\x2e\xae\xcf\xdc\x50\x79\xf6\xfa\xe5\x25\x4a\xfe\x10\x3d\x39\x83\xab\x6d\xbd\x54\x8c\xd7\x90\x62\x85\x76\x80\x5f\xfd\x46\x97\x0a\x10\xa2\xee\x1b\xca\x57\x11\xbd\x6b\xb8\x50\x32\x8e\x81\x5e\x7d\xc5\x6a\x5a\x82\x91\xef\xdc\xf0\x72\x5b\xd1\x85\xfd\x49\xdc\x50\xa2\x20\x4a\x81\x07\xdb\x41\xb2\xb3\xe3\xd8\xfe\x26\xc5\xa6\x5c\xd8\x47\xa8\x50\x4a\x93\xb7\xbc\x69\xa8\xd0\x93\x0f\x08\xaa\x35\x93\xb8\xc5\x0c\xed\xc0\x56\xd2\x48\x33\x62\xa9\xc0\xdc\xb7\x47\x14\x52\xb4\x13\x54\x6d\x45\x1d\xd1\x38\x06\x99\x45\x3e\x7a\xe1\x06\xe4\x80\x10\xb2\x3b\x24\x8a\xbf\x57\x82\xd5\xd7\xc9\xb2\xa8\x2a\x48\xd1\xa1\x85\xa0\x2c\xd5\x6c\x05\x67\x23\x42\x68\x52\xf3\x92\x5e\xde\x37\x14\x59\xa8\x59\x3e\xbf\x29\x44\xc4\xc9\x35\x55\xcf\xf9\xa6\xd1\xbb\xf0\x5e\xdd\x57\x14\x52\x5c\x6f\xab\x0a\xcd\xdd\xea\x6a\xc1\x33\x95\xa7\xbc\x83\xcc\x3b\xdc\xc0\x8f\x97\xaf\x5f\x69\x54\x2c\xfc\x37\xc5\x86\x2e\x68\x4a\x93\xa6\x10\xb4\x56\x6f\x78\x49\xf7\x7b\x9a\xac\xb9\x54\xdd\xf4\x5a\x4f\x67\x2b\x38\xf2\xa8\x44\x25\x5f\x6e\x37\xb4\x56\xc9\x15\x2f\xef\xe7\xf2\x96\xa9\xe5\x1a\x76\x10\xd1\x6e\x59\x48\x6a\x97\x4a\xcd\xe3\xf7\x17\x3f\xfc\x02\x52\xcf\x9e\x84\xdf\xd6\x54\xfc\xd0\x03\x62\x86\xfd\xb7\x07\x1c\x8c\x35\xbd\x07\x4d\x3a\x23\x0a\x52\x84\x05\x61\x09\xbf\xa1\x62\x55\xf1\x5b\xdc\x04\x2f\x3f\x63\x19\xbc\xfd\xe2\xf9\x71\x06\x8b\xad\xe2\x7b\xb9\x14\xbc\xaa\xd0\x59\xa2\xa8\x54\x50\x8c\xe5\xb8\x41\x0b\x9a\xd6\x50\x73\x27\xd8\x06\xa1\xa9\xb5\x9c\xa6\x71\x4c\x13\xbe\x5a\x49\xaa\xde\x1a\xfe\x60\x46\x78\x1c\xf3\x96\x50\xbf\x04\x8b\x63\x4b\xe2\x88\x10\xfd\x6c\x28\xd7\xcf\x8b\x89\xde\xcb\x0c\x5c\xfe\x00\x30\xb8\x7c\xf6\xfd\xab\x73\x90\x27\xac\x2e\xe9\xdd\xc5\x0a\x76\x70\x50\x1c\x03\xa9\x0a\xc5\x96\x7a\x6b\x14\xe4\x18\x34\x5c\x32\x23\xb3\x68\x21\x20\x47\x29\x4f\xe9\xe2\x98\x71\x9e\x5b\xe7\x15\xd5\x3f\x69\xf9\x40\x47\x47\x5c\xe3\x89\x53\x81\x00\x38\x2a\x5a\x0a\x54\x1c\xc3\x56\x4e\xd4\x7e\x2f\x20\x4d\x56\x4c\x48\x0f\xee\xf9\x9a\x55\x25\xd2\x32\x14\xb0\x4d\x06\xf2\xaf\xc5\xd1\x88\x58\x27\x55\x0b\x9a\x4a\x18\x36\x04\x53\xcb\x56\xf0\x47\x74\xbf\x1f\x75\x92\xbf\xdf\x8f\x94\xfe\x77\xac\x0a\xd1\x43\x84\x3a\x0d\xa1\xc9\x92\x6f\xf4\x52\x9e\x53\x6f\x1d\x33\xa1\x42\xb1\x5e\x3c\xf9\xe1\xe2\xf9\x87\xd7\xe7\x6f\x2e\x7f\x7d\x7b\xf1\xfe\xe5\xe5\xcb\x8b\x37\xbf\xbe\xb8\x78\xf5\xea\xe2\xe3\xcb\x37\x7f\xd1\x7b\xbc\xa0\xa9\xc2\x35\xe1\x0b\x95\x52\x5c\x90\x76\xb9\xa5\xa0\x85\xa2\xef\x8a\xfa\x9a\x42\x34\x2f\x12\x49\xd5\x7b\x55\x08\x05\x19\x9e\x22\x6c\xde\xcf\xeb\x12\xd6\x78\x8a\x0c\x2a\x15\x29\x34\x2a\x1b\x5e\x3f\xab\x97\x54\x2a\x2e\x9e\xf3\x5a\x15\xac\xa6\x62\xce\x56\x90\x8e\x08\xa9\xe2\x58\xe9\x9f\xfd\x9e\x25\x4b\xdb\x29\x61\x8d\x3c\xa5\x0d\xac\xd0\xa2\x4a\x05\xac\x2c\xc4\x15\xd1\x8c\xf6\x72\xb7\x32\x9a\xba\x28\xa1\x7d\xc0\x0a\xa5\x9a\x99\x12\x2a\x64\x1a\x02\x26\x17\xdd\xce\xcf\x9e\x16\xe2\xda\x10\x24\x93\x8a\xd6\xd7\x6a\x1d\xc7\x37\x9c\x95\xd1\x74\x44\x48\xdb\x95\xcd\xf2\x45\xf8\x92\x02\xc5\x1b\x80\x39\x31\xbf\x5a\x32\x16\xc0\xea\xd4\x25\x6f\x40\xea\x9e\x5f\xd1\x95\x02\x98\x85\xd2\xc5\x56\xd0\x8a\x16\x21\x84\xed\xf7\xad\x64\x31\x8b\x4f\x4d\xbe\x22\xd6\x58\x9c\x8c\xb0\x6b\xb1\xfa\xda\x0d\xd9\xef\x6b\xcf\x12\x91\xf1\xfc\xe0\x8d\x87\x7e\x6e\x39\x50\x59\x31\xb3\x22\xf2\xcd\xef\xe3\xc1\x37\x79\x1c\x87\x6f\x98\x91\x02\x2a\x6c\x58\x80\x70\x6d\x5f\x2a\x4d\xb3\xb6\x4b\x7c\x31\x99\xa5\xb3\x79\x6b\xba\x14\x6f\xc6\x84\x3d\x11\x58\x5b\x31\xa5\xf8\xc6\xbf\xe9\x19\x63\x52\x9b\x67\xe3\x74\xdd\x4b\x87\xec\x2a\x44\x16\xdc\x39\x7e\x1b\xee\xa6\x40\x33\x1c\x33\x62\x5f\x09\xe1\x0b\xf0\x4e\x03\x01\x29\xf8\xde\x2c\x03\x3c\x0a\x4d\x21\x24\x7d\x51\xf1\x42\x41\x9a\x81\x2b\x2e\x4a\x2a\xc0\x98\x8f\xc1\x47\x56\xaa\x35\xc8\xf1\x6c\x8a\xc6\xc3\x83\x58\x6f\x50\x87\xd8\x46\x23\x86\x39\x66\xad\xb6\xff\x04\x55\x06\xac\x95\x04\x63\x9a\x63\x95\x39\x59\x30\x6f\x3c\x03\xcb\x8a\x69\x93\xee\xde\x82\x91\xbc\x37\x92\x51\x88\x16\xbd\x01\x63\x96\x81\x4d\x21\xae\x59\x0d\xc6\x10\xfc\x48\x0d\x91\xda\xb0\x2c\x80\x95\x39\xc3\x00\xf4\xd8\x38\xc7\x90\xd4\x71\x08\xe5\x69\x48\xcc\x1a\x5a\x16\x53\xd2\xf3\x68\x58\x91\x87\x2c\x0c\xe6\x44\xe3\x19\xc7\x27\x5e\x58\x79\xad\xdc\xad\x0d\x02\xe9\xa6\x45\x05\x1b\x96\x21\x7c\xab\x19\xaa\xdb\x2d\x67\x5d\xf3\xa1\xc3\x67\x19\x18\x51\x49\xe1\xee\x80\x29\xde\x19\x01\x49\x9d\xd0\xd0\xc4\x00\xc1\x56\x9e\x52\x2b\x63\x34\xb1\x4b\x1e\x02\xd2\xae\x3b\x37\xb6\x3b\x68\x1d\xd4\x68\x23\x25\xee\x77\xda\x3e\x5e\x53\xf5\x3d\xdf\xd6\x25\xab\xaf\x9f\x9b\xdd\x79\x47\x97\x0a\x5a\x13\xa3\x45\x9c\x86\x22\x4e\xbd\x88\xcf\xb9\x13\x69\xcc\xbd\x08\x63\xde\x89\x36\xe6\xad\x30\x1f\x96\x85\x09\x09\xd0\xee\x40\x2b\x49\xa3\xaf\xad\x29\xc8\x4e\x03\x4c\x2d\x5c\xac\x78\x93\x9a\xb5\x1c\xcb\x1c\xe0\x89\xeb\x76\x0c\xf6\x4b\x4f\xcc\xd0\x03\x6e\xc8\x50\x74\xb3\x86\x28\xdd\x1d\xb0\x24\x8d\x65\x9d\x8e\x6f\xac\x44\x7e\xb4\xaf\xc2\x01\x17\x16\x78\x49\x1a\xc7\xce\x6e\xe4\x8f\xee\x5d\xf8\x15\x85\x41\xae\x22\x3e\x3c\x30\xa0\x26\x12\x6f\xda\x16\x3b\x65\x52\x6a\xd6\x57\xfb\xfd\xc6\x6e\xc6\xb5\x09\x61\xe6\xd5\x84\xac\xe0\x35\x06\x77\x00\xe1\x8d\x7b\xbe\xd7\x26\xc4\xa2\x38\x21\x15\x16\x0e\x89\x09\xd9\x78\x73\xb6\x84\x22\xd8\xe0\x2d\xa4\x98\x5b\xa8\xcc\xc8\x24\x16\x1d\xfd\x5d\x68\x81\x1b\xa2\x45\x01\x4b\x72\x0d\x39\xc2\x25\xd1\xd1\x1c\x2e\x74\x90\x81\xf0\x8a\x04\xca\x5f\x24\x56\xf7\x2f\x79\x63\xc8\xd1\x7a\x8f\x37\x43\x23\xb4\xda\x75\x43\xd6\x64\x09\x77\x7a\xc7\x1a\xcd\x94\x89\x34\x7f\x57\xd8\xec\x67\x63\x78\x3a\x91\xf6\x67\xe3\xb6\xd3\xed\x84\xdf\xc7\xa6\x15\x5f\xcd\xac\x75\x62\x55\xf9\x92\x37\x64\x8a\xfd\x9b\x5e\x92\x4c\xf1\x88\xc5\xb1\xb0\x44\x6f\xfb\x98\xb5\x93\x0c\x4e\x57\x43\x9d\x1a\x86\xee\x9d\xaf\x0d\x8a\x64\x35\xd9\xe2\xb5\xdf\x51\xf7\x66\xf0\x24\x9b\xc9\x15\x5e\x3b\xb1\x70\x2f\x1d\x52\xdb\x3e\x52\x57\x6e\x7b\x20\x5b\xf0\xce\x83\x97\x28\xe5\x84\x90\x32\x08\x0c\xcb\x30\xdc\x83\x6b\x52\xc1\x35\xe6\x08\xe1\x75\xb7\xa7\x57\x61\x78\xf6\xb8\x53\xe4\x64\x6b\x5c\x04\x66\xe4\x27\xa8\x42\x89\xc6\xb7\xac\x2e\xf9\x6d\xc2\xea\x9a\x0a\x27\xe3\x53\xad\xc9\xdd\x38\x2b\x9c\xbd\x81\x5e\xc4\xa7\x5a\x8e\x0a\xa8\x10\x6e\x7a\xde\x4d\x12\xb3\xc7\xc2\x2a\xdb\x98\x07\x0c\xb7\x3b\xed\x94\xb3\xed\x31\xdc\xb6\xfb\xcd\xfc\x4e\xd7\x87\x79\x2b\xcb\x32\x90\xe5\x5b\x4f\x37\x7b\x28\x2c\x3d\x8e\x1d\x16\xa3\x59\x0a\x56\xec\x8e\x96\x36\x62\xa6\x61\xc4\xbc\xdf\xdf\x1e\x47\xf5\xf7\xc6\x6d\x31\xec\x84\xa7\xb1\xd4\x4c\x2d\xee\x53\x6d\x1e\x6c\x14\x6a\xa2\x95\x1b\x46\x6f\x75\xfa\xa8\x41\x0b\xd4\x90\x2b\x28\xd1\x5c\xdb\x31\x33\xb7\x98\x3b\xaf\x65\x73\x02\x33\x68\x01\x0b\xa2\x33\x09\x85\x10\x6e\x11\x2e\x5a\x52\xe2\x18\x16\x5f\xdb\x51\x84\x52\x60\x37\xc4\x42\xfc\xea\x84\xb4\x20\xc2\x45\x99\x5b\x58\x60\x69\x71\xf7\x1c\x0a\x17\x1f\xdd\x42\x89\x2c\xe1\x2b\xb2\x86\x5a\xab\x57\x4e\xe7\xf0\x92\xac\xac\x36\xce\x1b\x6b\xdc\x2b\xa3\x1c\x55\xb0\xbf\x8d\xd3\x11\xb2\x19\x9b\x4e\xdc\x38\xeb\x5f\x59\x75\xa9\xc2\x1d\x6f\xac\xda\x90\xe5\xd8\xf6\x5a\xfb\xdf\x90\xca\x5b\x31\x3f\x99\xe1\xc6\x7b\x93\xc6\xab\x1a\x6b\xd7\x32\xcf\xdd\xf6\x9d\x87\x8a\x61\x8d\x87\x76\x29\x96\x86\x36\xf1\x7d\x12\xe4\xbc\x37\x3e\x4e\xc1\xb5\x9d\x29\xc8\xb7\xbf\x2f\xe6\xfb\x36\x8c\x7b\xbf\xcd\xd3\xa9\xe6\xeb\x64\x66\xbc\x8a\x4f\xdd\x80\x4e\x29\x41\x1b\xa5\xd3\xb9\x15\xaa\x7b\xa8\x17\x14\xb8\x6e\xf5\x65\x37\x68\xf1\x94\xe1\x71\x63\xdd\x96\xf5\xef\xed\x40\xcb\x0a\x65\x7f\x4f\x4c\xa4\x77\xfc\xc3\x70\x5b\xee\x29\xf7\x70\xb0\x02\xee\x46\x2b\xbb\x5f\x4d\xcf\x8b\xb6\xa0\x0f\xb8\x24\x17\xa6\x68\x91\x7c\xa1\xf7\x12\x4a\x94\x6c\x8a\x26\xa8\xc9\xf4\x42\x93\x2f\xf4\x3e\xa5\x07\x2c\x33\x9a\xe3\x5d\x21\x68\x91\x9e\x43\xfd\x82\x0e\xe8\x80\x12\xc9\x85\x3a\xaa\xe6\xf8\x4d\x4a\xf4\xe0\x09\x35\x3f\x07\xed\x89\xca\x64\xc5\x2a\x45\x45\x6f\xa5\xfe\x5e\xb3\xd3\xbd\xfe\x8e\xf0\xd0\xe8\xc5\x31\xeb\x5a\x7e\x74\xee\x04\x57\x64\xfa\xb4\x70\x7b\xbd\x28\xb2\x69\xae\x29\x4b\x4b\xf7\x80\x57\x84\x26\xb2\xa9\x98\x82\x60\x02\x50\x36\xcb\x3d\xf4\x6a\x0c\x57\x0b\x30\x01\xe3\x55\x0a\x40\x60\x45\x2e\xac\x50\x79\x5b\x55\x42\xfd\xe2\x27\x6d\xf5\xe6\x07\x83\x5f\x75\x74\x9c\x96\x6c\x10\xe6\xa1\x8b\x52\x9d\xba\xf5\x02\x72\xdf\x6e\xc3\x58\x6d\xec\x07\x3a\xb5\xde\x0d\xce\x32\x31\xaf\xb6\xfc\x6e\xff\x7b\xd1\xca\xb8\xb5\xca\xfd\x90\x65\xcc\x5b\x23\x5d\x77\xc4\xdc\x75\xc4\xd8\x48\x0d\x08\x1b\xda\x5a\xe9\xb5\x5e\xc2\x0b\xa7\xcd\x14\xb5\xf8\x03\xdb\x02\x0e\x5d\x52\x24\x68\x53\x15\x4b\x0a\xcf\xf4\x94\xbd\x99\xbe\xb7\xa3\xf6\x8a\x37\x67\xd7\x78\x40\xe0\x54\x46\xf3\x30\xb8\x7d\xef\xf7\x81\x13\x1e\xee\xe0\x34\x77\x11\xac\xe6\x7d\x47\x37\xeb\x6b\x09\x6b\x95\x49\x10\x57\x9f\x71\xc4\x58\x32\x82\x0a\x8d\xf6\x82\x62\x61\xe8\xf1\x34\x4a\xdd\x50\xd9\x44\xcc\xd0\x59\xea\x06\x0b\x11\x68\x13\x6e\x42\xfb\x42\x37\xda\xe7\xd4\x77\xb6\x6c\xcd\x9a\x9c\xa8\xac\xc9\xc7\x2a\x2b\xf3\xb3\x6f\x26\xcc\xfc\xe0\x3a\x93\x39\xd1\x51\x83\x5c\xa8\x4c\xe6\x13\x96\x15\x79\xaa\xb2\x3b\x28\x51\x8e\x83\xbd\xb8\xec\x29\xd4\x33\x21\x8a\xfb\xa4\x11\x5c\x71\x75\xdf\xd0\x64\xc5\xea\x72\x61\x7f\x6c\x31\xd3\x69\x97\xd2\xec\xe9\x80\xfc\xe0\x59\xc8\x56\x70\x08\xc4\x4b\xcd\x82\xd6\xbe\x75\x4d\x43\xf6\x80\x66\x2a\xd7\x41\xe7\xc1\x27\x10\x1a\xc3\xc7\xc7\xb5\x80\x3d\xab\x43\xcd\x79\x0e\x5d\x86\x69\xeb\x03\xd6\x46\x1b\xaf\xaf\x4d\xa7\xac\xd8\x92\xc2\x29\xfe\x41\x47\x27\x75\xb1\xa1\x00\x33\xd4\x42\xac\x93\x15\x17\xe7\xc5\x72\xdd\x21\xaa\xd0\x4e\x65\x5d\x11\x38\x8f\xe3\x25\xaf\x25\xaf\x68\x72\x5b\x88\x1a\x82\xcf\x1b\x5e\xb2\x15\xa3\x22\xf1\x63\x3e\x47\x4c\x46\x25\x6d\x04\x5d\x16\x8a\x96\x38\xda\x4a\x1a\x05\xc3\xea\xcf\x23\xe0\x69\xed\x81\xde\xef\x55\xb2\xaa\xe7\x2a\xa1\x75\x71\x55\xd1\x32\x8e\x29\x64\x3a\xdc\xe3\x4e\xc7\x64\xd2\xd8\xda\xf2\xf2\xa4\x09\xe1\xae\x45\xd0\x15\x15\xb4\x5e\xd2\xde\xb8\xb6\x55\x5b\x0f\x06\x39\x56\x08\x1d\x10\x0e\xbc\xde\x1b\x68\xeb\x68\x6a\xcd\x64\x22\x55\xa1\x68\xc2\xe4\x0f\x54\x2a\xc1\xef\x69\xe9\x93\xdd\x1d\xab\xa5\x2a\xea\x25\x4d\x4d\x6d\x5b\x6a\x93\x24\x75\x92\x54\x08\xc1\x6f\xdf\x77\xaf\xca\x95\xfb\xcd\xdb\xaa\x62\x4d\x43\xcb\x74\x34\xc3\x0e\xa1\x74\x77\x38\xcc\xe9\x00\xd2\x17\xb0\x43\x00\x9b\x47\x4b\xa2\x7d\x0e\xc8\xa0\x89\x31\x06\xda\xd7\x92\x1b\x3b\x89\x37\x9a\x12\xd9\x75\xe0\x81\x15\x1e\x01\x8a\x7b\x50\xfc\x9e\xc9\x44\xa3\x9f\x5c\xe9\x04\xb4\x10\x8c\x4a\x1f\x47\x3f\x36\xba\x29\x4a\x9d\xad\x6a\x34\xb9\x60\xd7\xac\x2e\xaa\xb7\x2d\xba\x74\x10\x43\xb7\xbb\xef\x61\x88\xe0\x10\x01\xc1\x74\x74\x32\x3f\xf1\xb1\x2c\x01\xc5\x95\xe4\xd5\x56\x51\x80\x29\x79\x6e\xa1\xb6\x58\x62\x8a\x70\x6f\xa7\x9f\x9b\xfa\x64\xb9\xe8\x11\xc5\xeb\x0f\x4d\x59\x28\xed\x74\x52\x38\x34\x9c\x8c\xa6\xf8\x68\x86\xed\x31\x21\x74\x27\x5c\x5f\x7a\x76\x87\x26\x92\x6f\xe8\x89\xd3\xd6\x61\x99\x56\x4a\xe3\xb3\x9d\x16\x04\x65\x72\x6d\xde\x7a\x75\x8a\x8f\x7a\xe2\x8a\x0b\x68\x9d\x4b\x36\x9a\x61\xb0\x91\x00\x83\x8f\xf4\xea\x0b\xd3\x36\xf9\x35\xff\x17\xc0\xe0\x02\xe4\x26\xe4\x5b\xae\x0b\xf1\x4c\xc1\x29\x4a\x14\xff\xa0\x79\xf5\xbc\x90\x14\xa2\x31\x75\x76\x61\xa6\xed\xfe\x74\x5e\x3f\x55\xce\xef\x4f\x66\xf3\x7a\x3c\xf6\x2e\x5b\x65\x75\x8e\x05\x61\x0b\x00\xc6\x6c\xcc\x53\x5b\x9e\x1c\x38\x4e\xea\x15\x84\x12\xa3\x22\x99\xc8\xbd\x41\x14\x87\xa0\xd4\xdd\x11\xf3\x16\x76\x2e\x6b\x50\x03\x35\xab\xbf\x1c\xef\x22\x28\x9a\xa6\xba\x37\x6a\x07\xb4\xad\x08\x44\x27\x11\x74\xc3\x6f\xe8\x33\xaf\x88\x10\xdc\x4d\x5a\xb9\x01\x28\x54\x03\x8b\xa3\x09\xed\x08\x00\x03\x3d\x9d\x4c\x0d\xf5\x2a\xde\x0c\x76\x64\x1f\x21\x50\xa2\xa8\xe5\x8a\x8b\x0d\x40\x39\x01\x7e\xd9\x92\x49\xbd\xbb\xe7\x37\xb4\x56\xaf\x98\x54\xb4\xa6\x42\x42\xd4\x17\x25\x8b\xff\x45\xed\x38\x10\xc7\xe1\x02\xdd\x69\x80\x1b\x67\x4e\x16\x42\xfa\x2d\xb4\x8e\xc1\xdf\x3f\x98\x20\x77\xa7\x5f\x2a\x29\xe9\xaa\xd8\x56\xea\xef\x8c\xde\xa6\x36\xa5\xea\x20\xfc\xd8\xd5\x2f\x6d\x56\xd0\xa6\x6b\x34\xac\x99\x88\xd3\xc3\x96\x00\x28\x9d\x37\x49\x51\x96\x3d\xd2\x8d\xcf\xda\x35\x85\x94\xec\x86\xa6\xa3\xe9\x01\x61\xb1\xdf\xff\x08\x6b\xd8\x84\xc7\x1e\x6e\x71\xcc\x92\x66\x2b\xd7\xb0\x09\x54\xe1\x59\x87\x1a\x4f\xb6\x46\x67\x4d\xc1\x8c\x30\xac\xe9\x3e\x5d\x11\x08\x2a\xd9\xbf\x28\xc0\xbd\xe1\x7d\x24\x5c\x79\xad\x0e\x4e\x0d\x7e\x84\x02\xfb\xc2\xeb\xd1\x54\x9e\x84\xa9\xad\x44\x6d\x83\x33\x97\x44\x60\x9e\x50\x8d\x84\x3c\xb7\xba\xad\x25\x3a\x70\x40\x2f\x21\xda\x05\xa2\xdf\x1b\xba\xdf\x07\xe6\x87\x3c\x83\x8f\xd8\x6d\x7c\xec\x3f\xe4\x72\x4d\xcb\x6d\x45\xad\x29\x0b\x33\xfb\xd7\x3d\xab\x64\x18\x65\x85\xe9\x01\x5e\xa9\x90\x60\x84\x55\x9f\xe2\xd3\xe8\x81\xa2\x1d\x1d\x86\xe8\x59\xd8\x87\x78\x40\xfd\x06\xa2\x4d\xc4\xf1\x32\x24\xcb\xdb\x26\xcf\x5a\x37\xee\x88\xbb\x33\x1c\x1c\xdf\xbd\x7b\x84\xbb\x71\x0c\x97\xda\xa7\x57\xcf\x6a\xb6\x29\xf4\xf0\x17\xa2\xd8\x50\x38\xc4\xbe\x80\xbd\xe4\xf5\xe0\x3e\x98\xbe\x90\xcd\x1f\x82\x33\x64\x30\x22\x84\xc6\xf1\x88\xc9\x37\xc5\x1b\x18\x9e\x1f\x20\x14\xc7\x4c\xbe\x60\x35\x33\xde\xa3\x9b\xfe\x8b\xdd\xa5\x30\xbf\x54\xe8\x94\xd9\x6d\x6a\x05\xc0\xdc\x85\xe6\x2e\xa0\xf6\x41\xb4\x2d\x4c\xe3\x36\x62\x77\x09\xc6\x40\xe8\x1e\xc7\x1f\xa0\xca\x78\xae\x4d\x2a\x23\xa0\xb9\x03\xda\xd1\x5a\xa3\xc6\x75\x00\xce\xf3\x31\x0b\x5d\xd1\x6f\xff\x26\x8e\x1a\xc2\x7c\x34\x33\x81\x29\x3d\xb1\xd4\x5c\xc7\xde\x92\xaa\xa0\x05\x1b\x74\xc2\x25\x5f\xf4\x13\xca\xe3\xb8\x39\x74\xa8\x5e\x7b\x9d\x07\xc5\x35\x19\x8d\x58\x1c\x0f\xb8\xe1\xd6\x41\xeb\x69\x3a\xde\xd6\xa3\xba\x80\x34\x31\x35\xdc\xa7\xcc\xfe\xda\xa2\xeb\xa8\x2d\x92\x80\xcf\x60\xac\xc6\xe0\x33\x98\x1f\x05\xc9\xe6\xdc\xe8\x33\x18\x83\xc8\x7b\x2e\x1d\x24\x0b\xfa\xcf\x2d\x13\xb4\x8c\xae\xee\x23\x30\x16\xbd\xde\x3a\x32\x2b\x44\x8a\x47\xb7\x5c\x7c\xc1\xd1\x15\x8d\xe4\x56\x50\xdd\xc0\xea\x65\xb5\x2d\x69\xc4\x54\x74\x45\x57\x5c\x50\x3b\x7b\x04\xd0\xe1\x34\xf9\xfc\x6b\x20\x7b\xb4\x2e\xdd\xd1\x8e\x54\x85\xd0\x19\x97\xfd\xb5\x6d\xba\x37\x0d\x8e\xd1\xfe\xf9\x1f\x9c\x7a\x86\x27\x7e\x33\x1d\x74\x14\x5d\x96\x42\x75\x0a\x5e\xf8\x58\x83\x8f\x67\x28\x59\xf2\x7a\x59\x28\xd8\x36\x4e\x31\x47\xc1\x5d\x0c\x96\x08\x7a\x43\x85\x8e\x51\x52\xd6\x21\xf6\xf7\xbe\x1b\xaa\x09\x4d\x36\xe6\xc8\xe4\x0c\xc2\x45\xfa\x69\xb2\xff\x34\x46\x8b\x4f\xe5\x93\x4f\x89\xfe\x8b\x60\xf2\x04\x9d\x21\x2c\xc8\xb8\xd6\x38\x35\xa4\xce\xbe\xc9\xcd\xc6\x89\xae\x06\xc5\x56\x50\x67\x49\x4d\x57\xa5\xfa\x13\x70\x65\x3f\xe9\x2f\x69\x34\xee\x6e\xc6\x9f\x1a\x90\x4a\xc2\xe7\x57\x82\x16\x5f\xec\xe5\x8b\x3f\xb9\xbb\x1a\x7f\x12\x20\x75\xce\x2e\x95\x84\xd9\xbb\x17\x25\x59\x42\xd9\x92\x55\x66\x2a\x3f\x9b\x4d\xa7\x4f\xc4\xc1\xd4\x4b\xd7\x9a\xfd\xcd\x7e\x0f\x6e\x4c\xf1\xb2\x41\xae\x50\xea\x86\x17\xc4\x0f\x59\xfc\x04\x1f\x3a\x59\xfb\x3d\xa5\xe9\xf4\xab\xd3\x1f\xab\x80\x17\x0e\xe5\x36\x86\x6b\x77\xe3\x5f\xc7\xbb\x91\x4d\xf1\x34\xff\x6a\x79\x80\x21\xdc\xb4\x25\xa4\x33\xf8\x69\xbc\xff\x34\x41\x67\x0f\x96\xcd\x68\xa2\x04\xdb\x40\xed\x21\x64\xb0\x4b\x97\xb0\x19\x48\x96\x27\xf6\x12\x90\xa4\x85\xd0\x52\x81\xf7\x9f\xe4\x19\x3a\x20\x34\x6f\x32\x99\xc7\xb1\xa9\x47\xea\xc7\x6e\xb3\xb1\x0e\x1e\xfb\x4a\x7b\x61\xb3\x8a\x48\xd2\xa6\x10\x3a\xd4\xd7\x6a\x7a\xbb\x66\x8a\x46\xb2\x29\x96\x14\x4a\x64\xee\x68\x1d\xa7\xba\x45\xb4\xe4\x9b\x4d\x11\x41\x8c\x22\x9d\x2b\xd2\xa2\x4c\x5c\xc2\x5b\x92\xb3\x4f\xf2\x09\xfe\x24\x9f\xec\x3f\xc9\xf1\x19\x2e\x88\x41\x45\x2e\xb2\x26\x4f\xb3\xa6\x55\x02\xd9\x6a\x46\x66\xd0\xb4\x4c\x2a\x51\x36\xcd\x73\x84\x8f\xda\x66\x79\xee\x47\x7b\x08\x72\x3c\x43\x28\xef\x24\xa8\x38\xe2\x6a\xb7\x57\xd0\x5a\xe0\x91\x48\x05\x3a\x2d\xc4\x34\x64\x34\x0b\x8b\x4f\xe5\x76\x49\x07\x2b\x93\xc0\x98\x90\x8c\xb6\x69\x83\xe1\xb2\xd9\xfe\x31\xc0\x60\x12\xec\xbb\x42\x0b\xd8\x1b\x49\x94\x5e\x67\x8a\x29\x4a\x9b\xa3\xae\xb1\xed\x9b\xe9\x3e\xea\x89\x54\xe8\x80\xb3\xfc\x41\x41\xd1\xc6\xa1\xb6\x67\xc3\x5a\x58\x8a\x81\xa0\x44\xe3\x4d\x07\x7c\x93\x66\xcb\x07\xe3\xfc\x60\x9d\xa9\x7c\x4c\xf8\x13\x08\x26\x96\x34\x36\x99\xe5\xe6\x86\x02\xb2\x70\x03\x0b\xfb\x97\xe0\xba\x01\x66\x44\xb9\x7c\x14\xd7\xbd\x34\x57\xb4\xa7\x9b\x52\x87\xc8\x3e\xbd\x95\x44\x04\xe1\x43\x49\xea\xa3\x92\x5c\xeb\xb8\x3e\xc0\x31\x43\x8b\x6c\xcc\xf0\x34\x4f\xff\x05\x19\x6e\xb0\xc4\x25\x72\x7a\x45\x08\x29\x17\xd0\x1d\x0a\xf0\x6c\x9a\xbb\x73\x86\x09\xe1\xd9\x2c\x47\xbe\xe2\xf8\xd0\xb0\xb1\x1f\xe6\xae\xa7\x98\x41\xbe\xc3\x8c\x32\x87\x7e\x6e\x90\x0b\x1b\xec\x11\xdd\xc0\x40\x07\x4d\xa7\xe9\x36\xa3\x6f\x30\x3d\xf8\x34\xf5\x57\xf2\xba\x50\xeb\x64\xc3\x6a\xfc\xb3\x7d\x5c\x55\x9c\x0b\xfc\x93\x6b\x2f\xee\xf0\xdf\xc8\x50\x52\x69\xed\xd2\xf0\xfd\x45\x6f\xcf\xf0\x3f\x48\x06\xce\xcb\x6b\x0a\x30\xb8\x14\xac\xd4\x89\x1e\x06\x2f\x98\xa0\x2b\x7e\x07\x72\xfc\x47\x32\xc5\x94\x92\xe9\x9c\xd2\xa7\xff\x70\x72\x36\xa7\x74\x4c\x66\x88\xad\xe0\xdf\xe2\x78\xfa\x94\xd4\xc5\x0d\xbb\x2e\x14\x17\xc9\x56\x52\xf1\xec\x5a\xdb\x47\x2f\xbc\xff\xc8\x28\xcd\x11\xda\xfd\x91\xcc\xac\xf5\xb7\x77\xeb\xb0\xa2\xe4\x6f\x71\xec\x2c\xe7\x5b\xc1\x37\x4c\x52\xcc\x29\x51\x74\x71\x5a\x98\xef\x74\x2a\xb8\x18\xa9\x74\x80\xaf\xd5\xa0\x0f\x24\x11\x54\xf2\xea\x86\x42\x94\xa8\x35\xad\x61\x38\xc3\x28\x86\xb6\x88\xe8\x70\x48\xff\xdd\x65\x24\x55\x97\x6c\x43\xf9\x56\x0d\xc2\xc4\x7f\xd4\x50\x31\xa3\x24\xe8\x75\xe0\x7c\x49\x91\x99\xd8\xd0\xa8\x79\xc7\xb3\xa2\x69\xfe\x4e\x85\x64\xbc\xee\x8c\xeb\xeb\xf7\x2f\xcf\xa3\xd9\x14\x20\x84\xd9\x01\xd7\x01\xcc\xf6\xee\x1a\xa4\x91\xaf\xab\xf1\x55\xa4\x10\x52\x6b\xc1\x6f\xa3\x9a\xde\x46\x97\xf7\x0d\x3d\x17\x82\x0b\x08\x9e\x17\x75\xcd\x55\xb4\x2c\xaa\x4a\x9b\xd9\xaa\x90\x32\x2a\x64\x54\xb4\x14\x02\x74\xc0\xa2\x87\x73\x78\xc7\x54\x2f\xe6\xc5\x90\x1f\x55\x3a\x4c\x9d\x83\xdb\x12\x07\x4f\x68\xbd\xdd\x50\xa1\x83\x3e\x12\xbe\xec\xf7\xa3\x19\x36\x07\xd5\x2b\x76\xbd\xb5\xfd\xa3\x29\x06\x37\x45\xb5\xa5\x40\xc7\x6c\xa6\x5a\x79\x2b\x98\x72\x7d\x08\xbb\x88\xd8\x8a\xec\x5b\xc1\x1b\x2a\xd4\x3d\xa4\x98\x9b\x53\x14\xde\x86\x6c\x5d\xc5\xb5\x77\x41\x48\xc7\x9f\x50\x75\xf5\x65\xcc\x11\x66\xa6\x4d\x27\xc2\xea\x70\x80\x08\x37\x7d\x8e\xea\x90\xd8\x87\x50\x3a\x90\xa4\x8b\x87\x70\x50\x78\x67\x50\x4f\x39\xee\x88\x4c\x47\x53\x1c\x52\xa8\xdf\x3d\x45\x26\x2b\x4e\x4d\x29\x9a\x63\x7a\xc0\x92\xfa\x53\x2f\x9d\x35\x5f\xd7\xfb\x7d\x28\x87\x6d\x65\x0a\x73\x32\x9b\xf3\x93\x18\x72\xce\xc7\x63\xe4\x07\x31\x8d\xaa\x0a\x82\x49\x9e\x63\x85\x1c\xf4\xae\xbc\xbe\x2e\xe4\xc5\x6d\xed\x69\xb0\x17\x81\x35\x2b\xe2\x18\xd2\x8c\xe9\xcc\x84\xe5\x5d\x85\xfc\x80\x4b\x4a\x32\x73\xe0\x38\xb1\xa1\x2e\xb6\xa7\x8f\xf6\x67\xa2\x43\x5e\x93\x14\xb5\xbd\x36\x41\xd2\x2d\xb6\xcf\x9e\x28\xfa\x5e\x1f\xcb\xd8\x56\x3b\xc2\x9d\x1a\x86\x2f\x5d\xab\x9f\x68\x6c\xb1\x1d\x62\x4f\x42\x6c\x8b\xed\xce\x71\x41\x49\xe9\x63\xe0\x3f\x23\x5c\x51\xb2\x7b\xf1\xea\xe5\xdb\x14\xac\x2a\xd6\x00\xfc\xfc\xd5\xc5\xf3\xbf\x7e\x7c\xf9\xfe\x3c\x05\xcb\x8a\x2f\xbf\xdc\x32\x49\x01\x7e\x7e\xf1\xe1\xcd\xe5\xf9\xbb\xb0\x93\x6f\x6b\x45\x45\x37\xe6\x80\x57\xc3\xda\xa0\x60\x10\xb0\x99\x42\xb6\xf8\xfd\xd7\xfa\x16\xe1\x4b\xba\x3b\xcc\x6b\x9b\x29\x63\x85\x86\x0a\x0e\x03\x26\x44\x67\x3e\x54\xaa\xa3\x6c\xbb\x76\xa9\x3f\x3a\x58\x30\xf6\x8d\x70\x97\x87\xdb\xd7\xe4\xca\x1c\xce\xac\x99\x44\xfd\x72\x19\xb1\x97\xbe\x54\xf2\x83\x8d\xbf\x25\x16\xbd\x5c\x7d\x17\x94\x12\xd3\xd1\x0c\xb7\x25\x5c\xfd\xd2\x2b\x30\xa4\x59\x7e\x38\x2a\x8c\xdb\xab\xcb\xbf\xfd\x73\x4b\xc5\xfd\x42\x7b\xba\x94\x87\xe5\x3e\x6d\x0b\x99\xef\x66\xba\x9b\x3d\x50\x1c\x27\xbb\x03\xee\x1d\x14\x1f\x21\x1d\x94\x36\x45\xf7\x8c\x06\x52\x69\x8a\x76\xf5\x29\xfc\x8c\xe6\x27\x8c\xe8\xf5\xee\xf7\xbb\x43\x08\x7a\x21\x7a\xdd\xe9\xce\x84\x37\xfd\x2a\x6b\xef\x68\x7b\x98\xae\x47\xcf\xbb\x75\x2e\x9d\xd2\x03\x1e\xc6\xf7\xf1\x33\x6f\x97\x72\x4f\x94\x4f\xb9\x8f\x70\x1b\x3e\xb2\x0a\xcf\x90\x54\xc2\xeb\x57\xbc\x28\x51\x1c\xfb\x47\x58\x07\x41\x57\xed\x63\xb1\xba\xab\xa0\xe1\xda\x55\x71\xfc\x7a\x56\xf8\xdc\x8d\xbb\x86\xf4\x98\xd0\x2b\x25\xcd\x1b\x57\xa7\xb5\x18\x0c\x57\x78\x07\x4a\x50\xa4\x69\x73\x2b\x6d\xdc\x33\x73\x49\x00\xd8\x65\x01\xb6\x66\xfa\x54\x91\xde\x38\x0b\xa8\xd5\xe1\x70\xc0\x76\x52\x69\xc5\xfc\xe1\x59\x6f\x87\x66\x0d\xe1\xfb\x30\x88\x97\x83\x0b\x0f\x55\xb5\x1f\x86\xf1\xae\x07\x23\xd7\xee\x0c\x76\xd7\xb3\x69\xf2\x41\xb1\x4a\x92\xf0\x84\x81\xf4\x83\xc1\xc5\x75\xc5\xaf\x8a\xca\xd5\xa9\x91\xfb\xf4\xc4\x4c\xc3\xab\x20\xf8\x96\xa4\xa4\xba\xc1\x2b\x04\xd9\xb5\x5d\x6d\x34\x8b\x7b\xbb\xa1\x9d\xde\x51\xf9\xdd\x9c\xdd\xb9\xc3\x9d\x90\x98\x03\xf6\x87\x44\xfd\xd6\x56\x42\xd3\x9d\x5c\xb3\x95\x4a\x77\x46\x80\xd3\xd9\x74\x8a\x69\xb7\xca\xaa\x1e\x88\xdd\xc2\xc4\x81\x13\xd5\xcf\x09\x4c\x8e\xd1\xbf\x7a\xc1\x74\x3e\xdd\xdd\x08\x77\x69\x86\x20\xa1\x98\x37\xa4\xee\x92\x0e\x97\x9c\xb5\xee\x4a\xbb\xbd\xde\xd1\x7d\x49\x64\xff\xa4\xbe\xd0\x0d\x47\x87\xf2\xb8\x22\x3b\xe3\xc2\xd2\xc6\x98\x9c\x12\x8b\xac\xd4\x81\x7f\x5d\xf6\x5a\xc6\x22\x2b\xf2\x49\x93\x15\x39\x0a\xcf\x3c\x9d\xed\xb4\xe6\xaa\xc1\x95\x76\xdd\xed\xf5\xf3\xc3\xc1\x9d\x94\x7a\xbe\x7d\x73\xc2\xb7\xbf\xf8\x11\xd3\x03\x6e\x84\xd9\xbf\x0b\xf7\x01\x8b\x9f\xf4\xe7\x47\x98\xdd\xdd\x11\x57\xa7\x67\x9b\xf6\xcb\x0d\x1f\x93\xfa\x13\x95\x79\xd0\xd4\xf9\x06\x53\x25\x84\x9c\x08\xc8\x91\x3f\xd9\xbe\x3f\x9d\x8d\x87\x26\x63\xe5\x4f\x4a\x31\x47\xf3\x10\x11\xc2\xe6\xce\x3d\x27\x8d\x60\x5c\x30\x75\x6f\x2a\x26\x7d\xee\x61\x2d\xcc\x82\x6d\x0a\x71\x7f\x22\x47\x9c\x34\x19\x6d\x53\x48\xfd\xfc\x94\x65\x34\x8f\xe3\x91\x4a\xa8\x5c\x16\x0d\xfd\xc8\xd4\xfa\x9d\x47\xc5\xd0\xf0\x13\xd4\xe3\xb0\x1e\x87\x74\x64\x69\xef\x51\x73\xa4\x63\xbd\x25\xd7\xa8\x0d\xad\xd3\xa5\x97\xb4\x2f\x35\x35\x69\x32\xde\x43\xe1\xbb\xaf\xa0\x50\x93\x5f\xa1\x9e\x63\x50\x98\xc0\x10\xb2\xbb\xb0\xd5\xde\xba\x42\x2d\x86\x1c\xd7\xe8\x70\x78\xe4\x1a\x43\xab\x59\xa6\xe0\x92\xf9\x20\xac\x2f\xf6\x14\x2d\x40\x4b\x25\x48\x81\xe3\x2b\x98\x37\xad\x8c\xca\x4c\xe5\xe6\x64\xf7\xf4\xdc\xd9\x64\xb9\xd8\x6f\x55\xda\xae\x21\x7a\xa5\x77\xa7\x71\x39\x76\xbb\x9e\x7e\x8b\x4f\x64\x2f\xed\xdf\x91\x3c\xe0\x2f\x94\x36\x97\xfc\x9a\xaa\x35\x15\x5e\xb2\xff\xcf\xef\x32\x23\xde\x14\x68\x19\x77\x02\xa3\xad\x47\xe0\xfc\x42\x5b\x73\x64\x67\x04\xf9\x19\x37\xde\x54\xf4\xd1\x6f\x79\x66\x6e\xeb\x35\x0b\x47\x65\x67\x4d\x4b\xdd\xd8\xb7\x1f\xcd\x83\x97\x7a\x78\x26\xf3\xa7\x02\x32\x6d\x3c\x74\x20\x7f\xc4\xd9\xac\xcc\x89\xeb\x9d\x70\x6d\x44\x30\xcf\xca\xfc\x3b\xdd\x24\x1f\x9d\x20\xb5\x0c\x6b\x4b\x62\xee\x66\x78\xce\x7d\xfb\x98\x4d\xf0\xc7\x09\xa6\x3e\xfc\x22\x54\xe2\xf0\xcc\x59\x83\x03\x18\x84\xfb\x72\x7c\xa1\xb1\xd6\x99\xa3\xfb\xb2\x8a\xad\x20\x90\xe6\xcb\xc5\xce\x83\xd5\x26\xf1\xad\xc9\x89\x99\x48\x4c\x14\xf9\x9e\x56\x74\xa9\xb8\xd0\x0c\x1e\xd5\x1d\x68\x7b\x25\xd4\x7c\xee\x75\x32\x71\xe0\x13\xa8\x7e\x49\xf3\xe3\xb3\x77\x6f\x5e\xbe\xf9\x4b\x1a\x7d\x36\x14\x78\xfc\x3e\x47\x9b\xad\x54\xd1\x15\x8d\x96\x6b\x56\x95\x11\x5f\x45\x4c\xc9\xc8\x42\x8d\xdc\xa0\x11\x40\x98\xba\x03\xd1\x87\x45\x26\x30\x50\xa6\x4e\xeb\x64\xae\x24\x4d\x20\x73\x85\x97\xa9\x9e\x86\x74\x32\x25\x10\xae\x48\x71\x5a\x8e\x5c\xe9\xc6\xe0\xe3\x15\x73\x15\x57\xf1\x57\xfc\xd6\xdf\x64\xc0\x6b\x3d\x24\x14\xbb\x6b\xdd\x70\xe5\x3f\x64\x71\xba\xb8\x25\xaf\x60\x8d\xb2\x2a\x9f\x97\xd9\x75\x3e\xd9\x3e\x95\xd9\x26\x1f\x92\xa3\x4d\x3e\x21\xba\x6f\x02\xed\x40\x84\x70\x99\x6d\xf2\xf1\xf6\x3b\x99\x5d\x3f\x30\x63\x4c\xec\x90\x89\x1e\x32\x60\x26\x96\x27\x93\xac\xcf\xb8\xb2\xf3\xca\xac\xca\xcf\xbe\x99\x6c\xcf\xbe\xc1\xb7\x44\x0d\xf8\x20\x7c\x1f\xde\x63\xbc\xed\xbe\xde\x59\x99\xaf\x8d\xf0\xf9\x51\xb7\xff\x28\x69\xd5\xfb\x28\x09\xdf\x90\xab\xc9\x10\xbd\xf7\x93\x73\xaf\x96\x37\xe4\x27\xf8\x2b\x94\x59\x95\x4f\xb6\xf8\x06\xe1\xa9\xa6\xc6\x48\x4e\x7b\xec\x1a\x90\x67\x3a\x08\x64\x3a\xc3\x69\x28\x64\x78\x83\x4d\x45\x4f\x68\xf3\x06\x6f\xac\xad\x66\x78\x8d\x01\x40\x98\x69\xbd\xc4\xd4\xdb\xbb\xec\x6e\x62\xa6\xe7\xc0\x5e\x95\xf2\xaa\xfa\x7f\xbf\xe2\xbe\xd9\x0a\x7e\x79\x40\x47\xcd\xf9\x45\xa8\x94\x6c\x05\x69\xe2\xee\x61\xc5\x71\x78\x79\x8a\x90\x81\x3b\x4a\x7d\x6d\xe6\xff\x89\x43\x1f\x08\x2b\x90\xb9\xda\xf3\x90\xf6\xd4\xe4\x0e\x32\x84\x1f\x54\xb0\x59\xbe\xdf\x03\x80\x1b\x92\xe5\xfe\x4c\x4a\x25\x57\x74\x5d\xdc\x30\x2e\xec\xe1\x54\x54\xd1\xc4\x94\x0e\x1a\x92\x31\x5c\xe7\xc1\x21\x95\xee\xea\x8a\x05\x0d\xf9\x27\x64\xe8\xb8\xfb\xb8\xa4\x60\x46\xe1\xd1\xd4\x0f\xf4\x67\x5b\x0d\xe9\x16\x9e\x77\xb7\xd3\x4f\xfc\xae\xc4\xa5\xd9\x25\x36\x22\x44\xee\xf7\x8d\x2b\x2c\x10\x42\xca\xf1\x2c\xd8\x9b\xaf\x32\xc5\xec\x42\x71\x1a\xfe\x54\x64\xe8\x82\xd8\x8a\xfc\x8c\x37\xa4\x2d\x8d\xb3\x38\x5e\xc1\xc2\xde\x0a\x47\xdf\xad\xa0\xbd\x5d\x8f\xf6\xfb\x2e\xba\x70\x43\x4c\xfb\x53\x3d\xc2\x0e\xde\xef\x7d\x49\xdc\x0d\xb0\xa6\xc4\x02\x51\xbc\xd1\x03\xba\x72\xb8\x1b\xa3\xdb\x0d\x0c\x37\x18\xaf\x49\x08\xdb\x7e\xeb\x81\xf0\x92\xf4\x90\x72\xdf\x69\x21\x7c\x4d\x02\x20\xe6\x8b\x11\x84\xb7\xa4\xbf\x38\x6f\x61\x5f\xf5\xc8\x5c\x1f\xd1\xb4\xec\x11\x70\x7d\x8c\xed\x16\xdf\x7e\xc5\xbf\x33\x6d\x6f\x46\x23\x65\x34\xe7\xef\x85\x60\xa6\x5e\x23\xe3\x18\xde\xda\xef\xae\xdd\x97\x1e\x66\x69\xdd\xe4\xce\x98\x85\x59\x7b\x74\x3c\xe8\xda\xb5\x75\xa3\xb6\x68\x0e\x37\xfb\xfd\xd5\x7e\x7f\x6f\x5d\xb9\x53\x51\x32\x9a\x62\xd3\x61\xaf\x20\x34\x59\x39\x9e\xe5\x08\xdf\xc7\x31\x14\xe4\xaf\x50\xa0\xfe\x05\x48\x36\x86\xc2\xdc\x3a\x17\x29\x00\x03\x56\xd7\x7d\x11\x78\x2c\x3e\xef\x07\xb5\xfa\xf1\x1b\x87\xc8\xdc\x26\x1c\xb4\x3a\x14\xdb\x8a\x9d\x0d\x10\x0f\xd8\xab\x88\x2f\xe4\x3d\x1a\xf4\xb5\x5f\xce\x1c\xb0\x31\x5e\xde\x08\xfe\x4f\x68\x04\x67\xff\x59\xc2\xd8\x79\xe5\x9a\x30\x4f\xa8\x20\xac\x97\x1f\x3e\xee\x95\x39\x32\x79\x23\xe9\x84\xe5\xe4\xd6\x48\x70\x49\xbb\xe7\x85\x73\x22\x32\x9e\x4f\xa0\x5c\x98\x9e\xa3\x28\x30\x4f\xa7\xfd\xbd\xbc\x83\x03\xd7\x3a\xc9\x52\x87\x42\x3a\x98\x5b\xb3\x92\x7a\xde\xfc\xbf\x47\xa3\xe0\xc7\x62\x38\x0d\x05\x60\x70\x94\x37\x1e\x47\x71\x6a\xd0\xbc\x70\x72\x39\x0c\xf5\xb4\xfe\x75\xb2\x80\xbd\x2c\x57\x6c\xe8\x01\x05\xee\x41\x7b\x27\xff\xdd\xc9\x53\xa3\xf3\xfb\xbd\xfd\xe2\xe4\x3b\x67\x17\xf4\xbb\xe2\xcd\x77\x5e\xf3\xf5\xbb\xe9\x78\xea\xec\x89\xa1\x76\x6a\xc0\x6b\xda\x3a\x3a\xec\xbb\x39\x62\x4d\xba\x5b\xc9\x19\xb8\x9b\xf0\xad\x9a\xf0\xd5\xa4\x43\x03\xe4\x04\x00\x13\x62\x1a\x68\xb3\x47\xa0\xcd\x7e\x17\xb4\xd1\x2c\x4c\xe8\x97\xf6\x3b\x0e\x73\x5b\xb3\xdd\xc1\x6f\x7f\x5f\x86\x7e\x67\x12\x97\x7b\x5c\x9f\xfa\x80\xe6\xdf\xd8\x8e\xe0\xba\x68\xb8\x13\xd7\xcd\xf6\xd9\x72\x49\x2b\x2a\x8c\x7d\x9b\xb7\x75\xef\xe6\xf8\x5a\x40\x17\x43\x1f\xcd\xf9\x1c\xd9\x7a\x60\xb4\xe1\x37\xb4\x8c\x14\x8f\x3e\x87\xf4\x7e\xee\x2e\xf7\x14\x75\x19\xdd\xb2\xaa\x8a\x6a\xae\xec\xbd\x9e\x46\xeb\x3d\x2d\x23\x56\x47\xab\xad\xda\x0a\x1a\xdd\xd8\x43\x34\xa9\x83\x71\x5b\xd6\x4a\x7e\x93\xfe\xe2\xbc\xc4\x25\x2e\xba\xab\xfd\xcd\x42\x1d\x13\x90\x36\xb8\x22\x43\x15\x0c\xbc\x22\xd7\xb0\x42\x78\x43\x76\xfe\x4e\x6b\x5a\xb7\xd7\x5b\x0f\x78\xed\x3e\x51\xf9\x19\xd6\xce\x4d\x29\xde\x98\x37\xe3\x87\xdc\x47\x2a\xfa\xdd\x3b\x20\xfb\x11\x8b\x6e\xb1\xfe\xeb\x80\x97\x24\xf0\x32\xdc\x7f\x01\xe2\x33\xc3\x6d\x50\x2d\x60\xad\x99\x70\x91\xf9\x15\xe9\x5f\x99\xd5\x7a\x51\x06\xe0\x96\x8b\x89\xff\xfa\x6e\xec\x3f\x3f\x4d\xcd\x27\xa9\x58\x76\x70\xb7\x7a\x94\xb1\x32\x63\xf7\x25\x6a\x6a\xbf\x4e\xc5\x45\x1c\x5f\xa1\x4d\x76\x95\x13\xbb\x4a\x55\x28\xfa\xe7\x12\x82\xb1\x1c\x83\xe6\x0e\x47\x60\x5c\xda\x87\x29\x02\x78\x93\x2d\x73\x32\xc5\x9b\x6c\x6b\x7e\x12\xbd\x67\xcf\xd7\x45\x7d\x4d\x49\x80\x63\xf7\x71\xe3\x6d\x1f\xd1\x59\x3a\xc3\xf7\x3d\xa4\x66\xe9\x6c\x6e\x80\x96\x4f\x6e\x2d\x58\xf9\xe4\xbe\x0f\x78\x39\x06\x1a\x8b\xad\x39\x58\x3e\x27\xbb\xff\x0a\xee\x2c\xff\x57\x1a\x58\xc9\xe0\xab\xa0\x4e\x0d\x9d\x9b\x3b\xef\xe9\x66\x7b\x73\xcf\x77\x6f\xda\x86\x36\xa0\x7f\x1f\xf6\x1f\xc5\xf4\xfd\x31\xc6\xaf\x1d\x4b\xdb\x68\x8a\xef\xba\x1d\xbe\xf7\xdb\x79\xc0\x9d\xba\x79\x7d\xff\xff\x8f\x5a\x6c\x47\xd3\x2f\x83\x6e\xd9\xe3\xfc\xdb\x60\x6f\x9f\xe0\x30\x4d\x89\xe3\xf0\xfc\xa2\x4f\x4d\x7b\xd2\xf5\x0b\xec\x4f\x1a\xa0\xda\x9e\x1d\xa4\xfd\xb3\xd6\xf0\x23\xc9\x0b\x58\x63\x85\x29\xc2\x0d\xb9\x81\x3c\xbc\xfc\xa1\x9b\x31\xff\xfa\x17\x15\x27\x43\xfc\x67\x14\xed\xa5\xb8\xfe\xb5\xc8\xde\x95\x76\xdc\x20\xfc\x0b\x54\xb8\xd3\xec\xee\x0b\x08\xf3\xa9\xcb\xc9\xbe\x59\x13\x72\x38\x1c\xf0\x8a\x1e\xd0\xfc\x0f\x67\x67\xff\x1d\xd9\xff\x65\xea\x75\xd1\x34\xac\xbe\xfe\xf0\xee\x15\x71\xd5\x85\x0d\xab\x93\xdf\x64\xb2\x29\x9a\x3f\xfc\x6f\x00\x00\x00\xff\xff\x10\xa0\xcc\x28\xf4\x4a\x00\x00") -func pagesAssetsStylesBootstrap311MinCssBytes() ([]byte, error) { +func pagesAssetsJsPopperMinJsBytes() ([]byte, error) { return bindataRead( - _pagesAssetsStylesBootstrap311MinCss, - "pages/assets/styles/bootstrap-3.1.1.min.css", + _pagesAssetsJsPopperMinJs, + "pages/assets/js/popper.min.js", ) } -func pagesAssetsStylesBootstrap311MinCss() (*asset, error) { - bytes, err := pagesAssetsStylesBootstrap311MinCssBytes() +func pagesAssetsJsPopperMinJs() (*asset, error) { + bytes, err := pagesAssetsJsPopperMinJsBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "pages/assets/styles/bootstrap-3.1.1.min.css", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "pages/assets/js/popper.min.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _pagesAssetsStylesBootstrap400Beta2MinCss = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xbd\x6b\x8f\xe3\x38\x92\x28\xfa\xfd\xfc\x0a\x6d\x16\x0a\x5d\x39\x6d\xb9\x24\xf9\x99\x4e\x54\x63\x66\x1a\xb3\x38\x03\x4c\xef\x87\x9d\xb3\xc0\x05\xfa\xd4\x05\xf4\xa0\x6d\x4d\x49\x96\xae\x24\x57\x2a\xc7\xf0\xfe\xf6\x0b\xbe\x24\x3e\x82\x94\xe4\x74\x76\xcf\x01\xce\xd6\x4e\xa7\x4c\x05\x23\x82\x8c\x60\x44\x90\x22\x83\x9f\xff\xf0\x6f\xff\xc3\xf9\x83\xf3\xe7\xa2\x68\xea\xa6\x0a\x4b\xe7\xfb\x72\xee\xcd\x3d\x37\x42\x4d\x38\x0f\x9c\x4f\xc7\xa6\x29\xeb\xdd\xe7\xcf\x07\xd4\x44\x1c\x66\x1e\x17\xf9\x23\xae\xf5\x73\x51\xbe\x56\xe9\xe1\xd8\x38\x81\xe7\xfb\x6e\xe0\xf9\x1b\xe7\x7f\x1d\x91\x80\xed\x4f\xe7\xe6\x58\x54\xb5\x11\xf8\x25\x6d\x1a\x54\xcd\x9c\xbf\x9e\xe2\x39\x06\xfa\x5b\x1a\xa3\x53\x8d\x12\xe7\x7c\x4a\x50\xe5\xfc\xf2\xd7\xff\x25\xb0\x90\x36\xc7\x73\x84\x89\x7f\x6e\x5e\xa2\xfa\x73\xc7\xcf\xe7\x28\x2b\xa2\xcf\x79\x58\x37\xa8\xfa\xfc\xb7\xbf\xfe\xfc\x97\xff\xf8\xfb\x5f\x30\x7f\x9f\x77\x55\x51\x34\x17\xd7\x8d\xb2\x33\xda\x7d\xf0\xbc\x4d\xb4\xdf\x3f\xbb\x6e\x7a\x4a\xd2\x43\xb1\xfb\xb0\x5e\xfb\xde\x3e\x78\x76\xdd\xf2\x5c\x95\x19\xda\x7d\x58\xef\x97\x41\xec\xe3\x82\xf4\xf4\x6d\xf7\x01\x6d\x17\x68\x1b\x3f\xbb\x6e\x85\x92\xdd\x87\x24\x5e\xac\x96\xab\x67\xd7\x2d\xaa\xf0\x74\x40\xbb\x0f\xfb\x64\x83\xfc\xe5\xb3\xeb\xbe\xa2\x2c\x2b\x5e\x76\x1f\xf6\xfb\xd8\xf7\x36\xcf\xae\x7b\xa8\x10\x3a\xed\x3e\x04\xdb\x70\x43\x6a\x34\x28\xcc\x76\x1f\x02\x2f\x7e\x7a\xc2\xaf\xe3\xd7\xf0\xb4\xfb\xe0\x6f\xc2\x20\xda\x3e\xbb\xee\xcb\x31\x6d\x30\x3a\xc2\xdb\xa1\x0a\x5f\x77\x1f\xb6\xeb\x2d\x7a\x5a\xb3\x9f\x6e\x12\x56\xdf\x76\x1f\x16\xcb\x45\xb8\xf4\x30\x73\x55\x9a\x87\xd5\xab\xd0\xa0\x1a\xc5\xc5\x29\x21\x65\x5d\xcd\xfa\x1c\xc7\xa8\xae\x05\x2e\xd2\xd3\xbe\x10\xc9\x86\xd5\x29\x3d\x1d\x04\xb6\x13\xdc\xae\x4a\x68\x69\x86\xc5\xb5\xfb\xb0\xdf\xee\x9f\xf6\x21\x01\x90\x18\x89\x2a\x14\x7e\x2b\x8b\xf4\xd4\xb8\x6d\xbd\x53\x4a\xea\x7c\xb7\xda\xac\xcb\x56\x2e\xcd\x93\xdd\x66\xbd\x55\x4b\xb3\xc3\xee\xe9\x29\x50\x4b\xdb\x6c\xe7\x07\x9e\x47\x8a\xf7\xc5\xa9\x71\xf7\x61\x9e\x66\xaf\x6e\x1d\x9e\x6a\xb7\x46\x55\xba\xdf\xb9\x61\x59\x66\xc8\xad\x5f\xeb\x06\xe5\xb3\x3f\x67\xe9\xe9\xdb\x2f\x61\xfc\x77\xf2\xf3\xdf\x8b\x53\x33\x7b\xf8\x3b\x3a\x14\xc8\xf9\xaf\xbf\x3e\xcc\xfe\xb3\x88\x8a\xa6\x98\x3d\xfc\x4f\x94\x7d\x47\x4d\x1a\x87\xce\x7f\xa0\x33\x7a\x98\xfd\xa9\x4a\xc3\x6c\xd6\x23\x9d\x3d\xfc\x09\x23\x75\x7e\x2e\xb2\xa2\x72\xfe\x92\x17\xff\x48\x1f\x7a\x3c\x7a\xc1\xdf\x5f\xf3\xa8\xc8\x1e\x14\x26\xf3\xe2\x54\xd4\x65\x18\xa3\xdd\xc3\xdf\xff\xfd\x97\xe2\x54\xb8\xff\x89\x0e\xe7\x2c\xac\x1e\x66\xbf\xa0\x53\x56\xcc\x7e\x29\x4e\x61\x5c\xcc\x7e\x2e\x4e\x75\x91\x85\xf5\xec\xe1\x6f\x69\x84\xaa\xb0\x49\x8b\x93\x83\xe1\x1f\x66\x0f\x3f\x17\xe7\x2a\x45\x95\xf3\x1f\xe8\xe5\x61\xd6\x21\xbc\xfe\x31\x47\x49\x1a\x3a\x65\x95\x9e\x9a\xcb\x1f\x66\xbb\x5d\xb8\xc7\x63\x68\xb7\x8b\xd0\xbe\xa8\xd0\xa5\x41\x6d\xe3\xd6\xc7\x30\x29\x5e\x76\xa7\xe2\x84\xfe\x2d\xcd\xcb\xa2\x6a\xc2\x53\xf3\x1c\x15\x2d\xfc\xe6\x1a\xce\xc2\xdd\xf7\xb4\x4e\x1b\x94\x50\x04\x09\x8a\x0b\xca\xce\x8e\x8c\xc5\x2c\x3d\xa1\x6b\x18\x45\xd5\xaf\x4d\xda\x64\xe8\x2b\x23\x7b\x89\x8b\x53\x83\x4e\xcd\xee\xc1\xf9\xf4\xe0\x84\x4d\x53\x7d\x22\xef\x1f\x9d\x87\xc7\x87\x6b\x59\xa1\x0b\x51\x6f\x97\xf6\x45\x59\x21\xf7\xa5\x0a\x4b\x81\x70\x94\x15\xf1\xb7\xff\xef\x5c\x34\x68\x86\xa1\xa3\xa2\x4a\x50\xb5\xf3\xcb\xd6\xa9\x8b\x2c\x4d\x9c\x0f\x4f\x4f\x4f\xcf\x65\x78\x40\x54\x35\xdc\xf4\x54\xa7\x09\xda\x85\xdf\x8b\x34\xb9\x36\x47\x14\x26\x97\x24\xad\xcb\x2c\x7c\xdd\x35\x61\x94\x21\x17\x17\xa1\xca\x3d\x54\xc5\xb9\xbc\xa6\xf9\x61\xd6\x54\x17\x53\xfd\x63\x30\x3b\x2e\x66\xe5\xa5\xa8\xca\x63\x78\xaa\x77\x8b\xe7\x97\x34\x29\x5e\xea\xdd\x82\xbe\x12\x2b\x92\xe6\xb2\x7a\xf3\x53\xf8\x3d\x0a\xab\x8e\x32\xee\xcc\xeb\x3c\x0a\x93\x03\xd4\x02\xcf\xf3\xae\x73\xc2\x1c\x7b\xe9\xc6\x45\x96\x85\x65\x8d\x76\xfc\x41\xe8\x10\x0a\xe9\x34\xc9\x8c\x3f\x1d\x2f\x51\x18\x7f\xc3\x0d\x3a\x25\xb8\x6a\x51\x11\x6b\xa1\xd5\x71\x29\x76\x94\xf4\x95\x85\xa2\x23\xc0\x59\x92\x24\x02\x96\x2b\xa0\x4c\x44\x63\xd2\x7f\x62\x43\xc1\x78\x8f\x8a\xf6\x7a\x6c\xf2\xec\x22\xa8\xfb\xae\x1f\x3e\xcf\x58\x51\xdc\x23\x22\xa6\xc3\x9f\xfb\xab\x67\xf7\x05\x45\xdf\xd2\xc6\xa5\x7a\x99\xfe\x13\xb9\x61\xf2\x8f\x73\xdd\xec\x7c\xcf\xfb\xf8\xec\xe6\xb5\xe5\x4d\xf1\x1d\x55\xfb\xac\x78\x71\xeb\xe6\x35\x43\xbb\x3a\xae\x8a\x2c\x8b\xc2\xaa\x47\x1a\x96\xee\x31\x3d\x1c\x89\xa9\x62\x9d\xd3\x54\xe1\xa9\x2e\xc3\x0a\x9d\x9a\xeb\x1f\x31\x96\xef\x29\x7a\xc1\x8d\xbc\xbc\xa4\x49\x73\xdc\x25\xe8\x7b\x1a\x23\x97\xfc\xb8\x86\x55\x93\xc6\x19\x9a\x85\x58\x2d\x66\x49\x1a\x66\xc5\x61\xb6\x4f\x0f\x71\x58\x62\xe5\xc7\x8f\xe7\x0a\xcd\xf6\x45\x81\xfb\x85\x2a\xd7\xec\x48\xb4\x6b\x96\x87\xe9\x69\x76\x0a\xbf\xcf\x6a\x14\x63\xe0\x4e\x1f\x88\x52\x5f\xa3\x22\x79\xbd\xe4\x61\x75\x48\x4f\x3b\xef\x59\xec\xae\x7f\x25\xbb\x45\xf8\xc2\x9d\xbf\xf3\x2b\x94\xd3\x9f\x2f\x54\x7c\x4b\xcf\x53\xc4\xb9\x7a\x66\xfa\x17\xf8\xc1\x2a\x78\x7a\x26\x92\x0b\xb3\xf4\x70\xda\x65\x68\xdf\x3c\x83\x8a\x7a\xfd\xb5\x09\xa3\xf4\x94\xa0\xf6\xcb\x83\xeb\x3f\x7c\xdd\xed\x8b\xf8\x5c\x5f\x8a\x73\x83\x91\xef\x3c\x41\x05\x8f\x95\xa8\x71\xcc\xb6\x60\x95\x7b\x66\x2c\x78\xcf\x5c\x25\x88\xb9\x8a\x32\x74\x3d\xfa\x33\x3a\x8e\x8f\xcb\xd9\x71\x35\x3b\xae\x59\x9f\xbb\x4d\x51\xee\xbc\x67\xf6\x23\x2a\x9a\xa6\xc8\x77\xf3\x55\x85\xf2\x6b\x69\x03\xc1\xdd\x40\x2d\x5d\x12\x36\xa1\x5b\x54\xe9\x21\x3d\x85\x99\x4b\xed\xde\x4c\xb0\x81\x66\x4b\x29\x2b\x3d\x04\xe1\x24\x45\xd3\xa0\xe4\x79\x10\x20\x3e\x57\x75\x51\xed\x8e\x28\x2b\x9f\xbb\x21\x48\x18\xf5\xae\x61\x92\x54\xa8\xae\x2f\x7a\x03\x98\x58\xc9\xa8\x39\x15\x55\x1e\x66\x92\x24\xd3\xd3\x11\x55\x69\x73\x4d\xb2\x59\x91\xcd\xce\xd9\x60\x7f\x14\x99\x53\x60\x58\xe7\x8c\xc1\x1d\x52\xc9\xe9\xeb\x75\x1c\x25\xcd\x45\xd4\xa0\x8d\xe7\x5d\x93\xe4\x02\xc8\x80\x13\xc1\x6a\xb3\xf3\x04\x37\xd0\x8d\x18\xc7\x73\x08\xe9\x64\x7f\xba\x08\xad\x49\x9b\x30\x4b\xe3\x6b\x34\xab\x9b\xaa\x38\x1d\x24\x72\x51\x91\x25\xa8\xba\xd6\x79\x98\x31\xfb\x44\x14\x7b\xeb\x7d\xbc\xd6\xe7\x68\x56\x9f\xcb\x4b\x59\xd4\x29\xe9\xe8\x0a\x65\x61\x93\x7e\x47\xc2\x00\xd8\xac\x3e\x4a\xbd\xe4\x3d\x7f\x47\xd8\x3c\x84\x19\xd3\xf1\x28\xac\x11\x71\x84\xf5\x39\xba\xb0\xd6\xb8\xf3\x60\x85\xf2\x2b\xc6\x8d\x7b\xcf\x9d\xe3\x5f\xe1\x85\xa9\x3f\x8b\xd1\x54\x29\x63\x87\xa1\x0f\x16\xc1\x70\x99\x14\xc8\xad\xbf\xa5\xe5\xae\x88\xfe\x81\xe2\xa6\xbe\x86\xbb\x23\x1e\x0e\x3d\xb1\xd5\x3a\x5a\x98\x55\xea\x1a\xee\x4e\x45\xf3\xe9\xd7\x63\x85\xf6\x5f\x1f\xe9\x33\x1f\x9a\x5f\x1f\x19\x16\xa6\x1a\x20\xcb\x76\x04\x74\x5c\xcf\xec\x30\x22\xc3\x6f\x25\xd5\x9b\x90\x6b\x5c\x24\x68\xf6\x2d\x4a\x70\x10\x31\xab\xc3\xbc\x94\xdc\x53\x17\x3c\xf5\x61\x94\x68\xf7\xb0\x45\xa8\xd0\xd0\x18\xe8\x2d\x4f\x78\x6e\x0a\xab\x7b\xba\x52\xaf\xa1\xa9\x72\x9a\x1f\x2e\x8a\x4a\xe5\x69\x92\x64\x88\x8f\x6c\x3e\x60\xb1\x8a\x7d\x3f\x90\x56\x93\x49\xcc\xe3\xa5\x23\x7e\x4c\x93\x04\x9d\xae\xbf\x56\x45\x86\xbe\x44\xe7\xa6\x29\x4e\x5f\x67\xe1\x2c\xac\x50\x38\xa3\x3f\x67\xe9\xa9\x3c\x37\xac\xcb\x5e\x4b\xf4\x85\x4c\x58\xbe\x3e\xce\xb2\x30\x42\xd9\xac\x46\x19\x8a\x9b\x59\x7d\xce\xf1\x34\x62\x86\x3b\x1f\x57\xbe\x10\x4f\x5c\x9c\xe3\xa3\x1b\x12\x6f\xb6\xcb\xc3\x53\x5a\x9e\x33\x22\x93\x67\xe3\x9b\xab\x3d\xb2\xb9\x32\x47\x7a\x29\xc3\x24\x49\x4f\x07\xd2\xbd\xf3\x0d\x31\x01\xbc\x88\xdb\x05\x5a\xca\x94\x99\xcd\x64\x54\x0f\xc3\xd0\xb9\x24\x8a\xa3\x15\xaf\xcd\xf1\x22\x80\x71\xdb\x46\x5a\xdb\xb9\xe4\xf4\x44\x46\x36\xb1\x33\xa0\x4f\xa0\x7d\xc7\xdb\x51\x85\x49\x7a\xae\xb1\x61\x22\xc5\x8a\xbe\xe1\x10\x8a\x99\x67\x5e\xb4\x2a\x5b\x07\xeb\x85\xc3\x07\x2e\xa9\xe1\x56\xb8\x7d\xa4\x45\x57\x51\x38\xb3\xa2\x6c\x68\x0c\xc1\xa4\xd1\x49\x01\x8c\x17\xf8\x40\xe9\x75\x96\x97\x40\x46\x5d\x24\x74\xd1\xdc\x25\x7b\x4b\xe9\xd2\x7e\x23\x76\x67\x5f\x54\x39\xd5\x3c\xa6\x34\xa8\x46\xcd\xd7\x19\xfd\x51\x9f\xa3\x3c\x6d\xbe\x72\x05\xc3\xf1\x9f\x43\xdf\x30\x0d\xbc\xf0\x66\x87\x65\x89\xc2\x2a\x3c\xc5\x68\x47\x5f\x5d\x25\xb8\xdd\xce\xcd\x8b\x7f\xb2\xce\x49\x4f\x27\x54\xcd\x44\x72\xc6\xd7\x8c\x01\xe0\x3d\x13\x90\xf6\x82\x2b\xdc\xce\x03\x06\x17\xe9\x1b\x8a\x39\x3e\xa2\xf8\x5b\x54\xb4\x5f\x67\x42\x21\x16\x7f\xf1\x15\x8e\x7e\x9f\x3b\xc4\x22\x9a\x24\x6c\x90\x84\x02\x17\x34\x69\x8e\xdc\xac\x88\xc3\x4c\x7a\x95\x17\xa7\xe6\x28\x95\x60\x40\xb0\x0f\xb3\xb4\x6e\x70\xc4\xdd\xe9\x87\x6c\x84\x2a\x44\xb4\x81\xdb\x94\xeb\x3e\x45\x59\x52\xa3\xe6\x92\xa7\x27\x1a\xe0\xee\xbc\x9e\xdf\xe7\x4e\xb9\xd8\x54\xc0\xbb\x66\xe8\x80\x4e\x89\x1c\xb8\x3e\xd3\x8a\x24\x08\xcf\xc3\xd6\x15\x7e\xaa\xa8\x64\x97\x2e\x98\x54\x5a\x00\x68\xe7\xb3\x6c\xfa\xc5\xd9\x21\x8d\x53\xae\x65\x55\x1c\x48\x54\x63\x72\xbe\xb4\xcb\x4e\xe7\x3c\x42\x15\xd6\x08\xd6\x6b\x44\xea\x6e\x5d\x62\xae\xa8\x9a\x1a\x00\x8b\x73\x23\x03\x5e\x18\x8b\xb8\x4b\x19\xf6\x1a\x85\x55\x7c\xfc\xca\x47\xbc\x5b\xec\xf7\x35\x6a\x76\x2e\x59\xa5\xd0\xc5\x24\x8c\x1b\x56\xb3\x27\x47\x0b\xdc\x18\x03\x66\x32\x6b\x26\xd8\xde\x17\x42\x3a\x41\x88\xf5\x75\xf6\x69\x86\xdc\x73\x99\x15\x61\xc2\xdb\x83\x05\xd1\x75\xb1\x79\x64\x16\xe7\x06\x9b\x08\xc8\x44\x5e\x99\x7b\xe8\x5e\x62\x45\x74\xd3\x06\xe5\xd7\x06\xe5\x65\x16\x36\x48\x9e\xfd\xfe\x4a\xfd\xd2\x57\xa9\x54\x9c\xa2\x1e\xfd\xd9\xfc\x18\xcc\xe6\xc7\xc5\x6c\x7e\x5c\xce\xe6\xc7\xd5\x6c\x7e\x5c\xcf\x8c\xd1\xbb\xae\x59\x90\x31\x64\x81\xdf\x4a\x9b\xa9\x04\xb2\xa2\x11\xfa\x47\x5f\x08\x0a\x03\x6a\xf6\x31\x4f\xc7\x40\x2c\xa7\xa5\x0b\x3c\xfd\x17\x15\x7a\xc3\xc0\x97\xb3\xe3\xf2\xa2\x6a\xfa\x15\x37\xe7\xb8\x92\xca\x03\xf6\x62\x8d\x1b\x25\xcf\xb2\xae\xf3\x0c\x85\x89\x0e\x2d\xb5\x69\xe1\x79\xd7\x39\xeb\x4d\x57\xe4\x7c\x0d\x40\xaa\xad\xef\x6b\x8a\x6d\x5b\xcd\x21\x2a\xe6\xba\x62\x0f\x2c\x27\xd6\x15\x3b\x69\x31\xae\xee\xb1\x12\x63\x30\x5f\x98\x25\x88\x61\x18\xb7\x5d\xdc\xac\x13\xd8\x6e\x4d\xa3\x3a\x44\xe1\x27\x6f\x86\xff\xcd\xfd\xc7\xeb\x9c\xcc\x05\x66\xc0\x8c\x40\x9d\xe9\x5e\xe7\x79\x58\x7d\x9b\xe1\xff\x74\xae\x63\x1e\x60\x7a\xfa\x9c\x36\xde\x6f\xd1\xe2\x3a\x27\x63\xe2\x7c\x22\x7e\x25\xe9\x02\x1c\x3a\xa3\x79\x26\x2f\x05\x97\x43\xa1\xe9\x18\x9b\x02\x4b\x06\x1d\x3c\x46\x35\x30\x1a\x30\x66\x61\xdd\xb8\xf1\x31\xcd\x92\x47\xde\x9f\x15\x1d\x25\x65\x7b\x9d\xa7\xa7\xb4\x49\xc3\x2c\xad\x73\xa1\x3f\x9e\xbc\x8f\xcf\x4a\x28\x70\x2e\x4b\x54\xc5\x61\x8d\xae\x73\x6d\x72\x06\xcc\x34\x25\x9d\xef\x2b\xb8\x74\xe1\x44\x71\x30\xb2\x20\xa4\x80\x0f\xa8\xdc\x2d\x46\x75\x6b\x8d\xff\x3b\xf0\xfc\xa5\xf3\xbf\x3d\xef\x4f\xde\xc3\x75\x9e\xe6\x07\x77\x9f\x9d\x53\x3c\xcf\x94\xbc\x95\x68\xd6\x09\x54\x73\x3c\xe7\xd1\x29\x4c\x33\x41\xc2\x44\x33\xc1\x75\x8b\x67\x70\xc1\xec\x59\x8e\x10\x19\x02\xd2\x6f\x74\x62\x19\x66\x99\x33\x0f\x6a\x07\x85\x35\x72\xd3\x13\x76\x37\xaa\x1b\x95\x18\x63\x73\x05\x58\xc2\xf4\xa5\x8b\x27\x0e\x90\x5d\x94\x46\x50\x07\xcd\x43\x6e\x59\xbe\x72\x37\xdb\x27\x4c\xf2\x9a\xf5\x9b\x97\xac\x31\x31\xa1\xcb\x2b\x94\x3b\xf3\xa5\xac\x3b\x02\x87\x51\xb2\xf4\x97\x1b\x48\x26\xf4\x43\x04\x24\x80\x6b\xf8\x93\x44\xc4\x53\xe2\x0c\x0d\x19\xf7\x0b\xdf\xa2\x64\x3c\x67\x44\x29\x34\xb6\xd8\x5a\x98\xca\x16\xe6\xea\x5b\x94\x38\x22\x05\x4f\x1c\x2d\x9e\x62\x84\x36\x9e\x47\xe6\xa1\xf2\x58\x19\x9a\x95\x82\x8c\x52\x96\x30\x36\x47\xe9\x16\x7d\x0a\x31\xd0\x51\xe2\xa2\x84\x3a\x39\x9a\x97\x15\x72\xe9\xac\x97\x4c\x02\xb1\x9a\x33\x6d\x5c\x2c\xbd\xb2\xed\xa6\xcc\xee\x2b\x9b\x1c\x5f\xe7\x78\x10\x87\x29\x0e\xd0\xf5\xb8\x92\xd9\x29\x7f\x55\x76\x51\x36\x35\x91\xa4\x44\xb2\x65\x24\xfc\x15\xd7\x90\xc8\x60\x62\xdf\x46\x3e\xf5\xe1\x2f\xf9\x08\xf5\x78\x11\xc8\xf6\x63\x71\x85\x79\xbc\x02\x95\xc8\x37\x2a\x43\xa5\x4d\x60\xa8\x44\x3e\x61\x19\x2a\x3d\xad\x0d\x95\xe8\x17\x2e\x43\x2d\xdf\xa7\x0c\xf6\x2f\x99\x9d\x7b\x87\x9e\x9b\x57\xc5\x4b\xa7\x79\x6e\x5e\xbb\xfb\x0c\xb5\x78\xb6\xc3\xcb\xf0\xef\x67\xfe\x82\x7c\xc5\xd9\xe1\xff\x3c\x2b\x3f\x25\x52\xae\x48\x9d\xd0\x22\x25\xd7\xf9\xa9\x70\x0f\xe7\xa6\x41\x55\x2d\x7b\x28\x4f\x59\x16\x14\x00\x7f\x9a\xc7\x45\x36\x13\x0b\x7e\x8d\xb3\xb0\xae\xff\xf0\x25\x2e\x32\xf7\xeb\x45\xee\x08\x4f\xee\x05\xef\x4a\x6b\x63\x50\x9f\xfd\xf1\xd8\x5f\xfe\x3b\xa0\x7f\xd9\x9f\x05\xfd\xb3\xa4\x7f\x56\xf4\xcf\x9a\xfe\xd9\xd0\x3f\x5b\xfa\xe7\x89\xfe\xc1\xbd\x48\x9f\xb2\x03\xff\xcb\x69\xe1\x27\xaf\x7f\x14\x4a\x83\xee\xb1\x7f\x5a\x74\x4f\xcb\xee\x69\xd5\x3d\xad\xbb\xa7\x4d\xf7\xb4\xed\x9e\x9e\xba\xa7\x9e\x9f\x3c\xe1\x7f\x39\x3f\xf8\xc9\xeb\x1f\x85\xd2\xa0\x7b\xec\x9f\x16\xdd\xd3\xb2\x7b\x5a\x75\x4f\xeb\xee\x69\xd3\x3d\x6d\xbb\xa7\xa7\xee\xa9\xe7\xa7\xce\xf9\x5f\xce\x0f\x7e\xf2\xfa\x47\xa1\x34\xe8\x1e\xfb\xa7\x45\xf7\xb4\xec\x9e\x56\xdd\xd3\xba\x7b\xda\x74\x4f\xdb\xee\xe9\xa9\x7b\xea\xf9\x69\x33\xfe\x97\xf3\xd3\xf6\xea\xd1\xf6\x1a\xd2\xf6\x4a\xd2\x76\x7a\xd2\x76\xaa\xd2\x76\xda\xd2\x76\x0a\xd3\x76\x3a\xd3\x76\x6a\xd3\x76\x9a\xd3\x76\xca\xd3\x52\xfd\x01\x56\xa7\xc5\xb9\x78\x7a\xea\x7c\xbd\x30\xcc\x6d\x23\x9f\xa8\xfd\xa5\x1b\xb5\x65\x85\xf6\xa8\xaa\x50\x42\x3d\x80\x47\x07\x6f\x14\xd6\x29\xf9\x7e\xdf\x81\x11\x2e\xbe\xa3\x9d\x4f\x01\x0e\x55\xf1\xb2\xf3\x95\x20\xe6\xda\x69\x7d\x87\x9f\x2c\x74\x12\xf3\x22\xff\xa2\x95\x98\xdd\xe1\x38\x68\xa0\x4b\x46\x9e\x8c\x60\x3b\x5f\x90\xff\xfb\xf8\x0c\x14\xf5\xf5\xbb\x32\x8a\x24\x90\x91\xf8\xeb\xf9\x1a\xff\xdf\x46\xc0\x22\x94\x09\x4d\xe9\x0a\x29\x9e\x85\x8c\x27\x58\x09\x08\xf0\x8f\xbe\x66\xb0\x62\x55\x96\x72\x95\xc5\x42\x6f\x80\x50\xd6\x23\xe8\x0b\x29\x9e\x95\x8c\x67\xe9\xeb\x4d\x10\xca\x7a\x3c\x7d\x21\xc5\xb3\x96\xf1\xac\x3c\x01\xc1\x4a\x5a\xd3\x59\x71\x31\x6e\x94\x2a\x80\x0c\x56\x90\x10\x56\x8a\x14\xb6\x32\x9e\x35\x20\x85\x35\x24\x85\xb5\x22\x85\x27\x19\xcf\x46\x94\xc2\x46\x92\xc2\x86\x4b\xc1\xf7\x14\x35\x02\xc4\xb0\x85\xc4\xb0\x55\xc4\xe0\x2b\xfa\xf8\x04\xc8\xe1\x09\x92\xc3\x93\x22\x07\x5f\xd5\x49\x4f\x94\x04\xb0\xbc\x76\x9d\xd3\xf8\x6a\x9f\x56\x75\xd3\x8f\x5a\x3a\x0b\x71\xfd\x67\xfe\xc0\xe1\x7c\x15\x86\x83\x74\x10\x81\x0a\x11\x30\x88\x80\x43\x2c\x54\x88\x05\x83\x58\x70\x88\xa5\x0a\xb1\x64\x10\x4b\x0e\xb1\x52\x21\x56\x0c\x62\xc5\x21\xd6\x2a\xc4\x9a\x41\xac\x39\xc4\x46\x85\xd8\x30\x88\x0d\x87\xd8\xaa\x10\x5b\x06\xb1\xe5\x10\x4f\x2a\xc4\x13\x83\x78\xea\x7a\xcc\xd3\xba\xcc\xe3\x7d\xe6\x75\x40\x7a\xbf\x76\x1d\xdb\xf7\xbd\xd6\xb5\x3e\xef\x5b\x1f\x77\x2e\x59\x2f\x74\xfd\x8b\x18\xcf\x08\x83\x85\xbd\x0f\xa4\xf7\xa2\x2d\x62\x00\x0b\x09\x80\x98\x1c\xf6\x66\x29\xbd\x11\x6d\x09\x03\x58\x49\x00\xa2\x91\x60\x00\x6b\x09\x80\xd8\x02\xf6\x66\x23\xbf\xd1\xf9\xde\x4a\x00\x6b\x9d\xef\x27\x09\x60\x23\xf0\xed\x7b\x72\x9f\xe8\x8c\xfb\x72\xaf\x09\xc3\xca\x16\xde\x63\xa7\xfe\x9e\x9e\x8e\x85\x0c\x6f\x75\x76\x38\xa8\xb9\x87\xbf\xc3\x11\xd1\x9d\x5c\x1e\x0e\xa9\xa6\x7b\x3d\x1c\x7e\xdd\xc9\xf1\xe1\xf8\xed\x4e\xbe\x0f\x07\x80\xd3\xdd\x1f\x0e\x16\xef\xe4\x01\x71\xb4\x79\x27\x27\x88\xc3\xd5\xe9\x7e\x90\xc4\xd2\x77\x72\x85\x24\x18\xbf\x93\x37\x24\xd1\xfc\x8d\x0e\xb1\xce\x47\xfb\x44\x69\x7c\x99\xdc\xa2\x34\x78\x4c\x9e\x51\x1a\x16\x26\xe7\x28\x8d\x02\x93\x7f\x94\xf4\xdb\xe4\x22\x25\xcd\x35\x79\x49\x49\x51\x4d\x8e\x52\x52\x41\x93\xaf\x94\x94\xcb\xe4\x2e\x65\x5d\x32\x7b\x4c\x59\x4f\xcc\x4e\x53\xd6\x01\x8b\xdf\xac\x73\x57\x76\x13\x9e\xf8\x6a\xc8\xab\x12\xf1\x0e\x38\x56\x22\x5d\x93\x6f\x25\x52\x1d\x70\xaf\x44\xa8\x03\x1e\x96\xc8\xd4\xe4\x64\x89\x2c\x07\xfc\x2c\x11\xe5\x80\xab\x25\x92\x34\x79\x5b\x2a\xc1\x01\x87\x4b\xc5\x67\xf0\xb9\xd6\xe5\x31\x3c\xb3\x7f\x4f\xa7\xcb\xd6\x0d\xde\xea\x74\xf3\xe4\x3e\x4e\x37\x4f\xee\xe6\x74\xf3\xe4\x16\xa7\x9b\x27\x77\x73\xba\x79\x72\x37\xa7\x9b\x27\xb7\x38\xdd\x3c\xb9\x9b\xd3\xcd\x93\xbb\x39\xdd\x3c\xb9\xc5\xe9\x92\x05\xb5\x3b\x39\x5d\xb2\x22\x77\x27\xa7\x4b\x96\xf4\x6e\x74\xba\x79\x32\xda\xe9\x4a\xe3\xcb\xe4\x74\xa5\xc1\x63\x72\xba\xd2\xb0\x30\x39\x5d\x69\x14\x98\x9c\xae\xa4\xdf\x26\xa7\x2b\x69\xae\xc9\xe9\x4a\x8a\x6a\x72\xba\x92\x0a\x9a\x9c\xae\xa4\x5c\x26\xa7\x2b\xeb\x92\xd9\xe9\xca\x7a\x62\x76\xba\xb2\x0e\x58\x9c\x6e\x9e\x18\x9d\x2e\x11\xb0\xdd\xe9\x12\xf1\x0e\x38\x5d\x22\x5d\x93\xd3\x25\x52\x1d\x70\xba\x44\xa8\x03\x4e\x97\xc8\xd4\xe4\x74\x89\x2c\x07\x9c\x2e\x11\xe5\x80\xd3\x25\x92\x34\x39\x5d\x2a\xc1\x01\xa7\x4b\xc5\x37\xde\xe9\xf6\x9f\x97\x32\x37\x3b\xbc\xa7\xd3\x65\x1f\x0f\xde\xea\x74\xb3\xc3\x7d\x9c\x6e\x76\xb8\x9b\xd3\xcd\x0e\xb7\x38\xdd\xec\x70\x37\xa7\x9b\x1d\xee\xe6\x74\xb3\xc3\x2d\x4e\x37\x3b\xdc\xcd\xe9\x66\x87\xbb\x39\xdd\xec\x70\x8b\xd3\x25\x5f\xd5\xee\xe4\x74\xc9\x67\xb9\x3b\x39\x5d\xf2\x5d\xef\x46\xa7\x9b\x1d\x46\x3b\x5d\x69\x7c\x99\x9c\xae\x34\x78\x4c\x4e\x57\x1a\x16\x26\xa7\x2b\x8d\x02\x93\xd3\x95\xf4\xdb\xe4\x74\x25\xcd\x35\x39\x5d\x49\x51\x4d\x4e\x57\x52\x41\x93\xd3\x95\x94\xcb\xe4\x74\x65\x5d\x32\x3b\x5d\x59\x4f\xcc\x4e\x57\xd6\x01\x8b\xd3\xcd\x0e\x46\xa7\x4b\x04\x6c\x77\xba\x44\xbc\x03\x4e\x97\x48\xd7\xe4\x74\x89\x54\x07\x9c\x2e\x11\xea\x80\xd3\x25\x32\x35\x39\x5d\x22\xcb\x01\xa7\x4b\x44\x39\xe0\x74\x89\x24\x4d\x4e\x97\x4a\x70\xc0\xe9\x52\xf1\x8d\x77\xba\xc2\xf6\x8c\xcc\x6d\xdf\xf5\x4b\x6a\x7b\x9f\x8f\xa9\xed\x9d\xbe\xa7\xb6\xf7\xfb\xa4\xda\xde\xf4\x55\xb5\xbd\xdf\x87\xd5\xf6\x7e\xdf\x56\xdb\x9b\x3e\xaf\xb6\xf7\xfb\xc2\xda\xde\xef\x23\x6b\x7b\xd3\x77\xd6\xf6\x8e\x9f\x5a\xdb\x3b\x7e\x6d\x6d\xdf\xf0\xc1\xb5\xcd\x46\x7b\x5d\x69\x7c\x99\xbc\xae\x34\x78\x4c\x5e\x57\x1a\x16\x26\xaf\x2b\x8d\x02\x93\xd7\x95\xf4\xdb\xe4\x75\x25\xcd\x35\x79\x5d\x49\x51\x4d\x5e\x57\x52\x41\x93\xd7\x95\x94\xcb\xe4\x75\x65\x5d\x32\x7b\x5d\x59\x4f\xcc\x5e\x57\xd6\x01\x8b\xd7\x6d\x33\xa3\xd7\x25\x02\xb6\x7b\x5d\x22\xde\x01\xaf\x4b\xa4\x6b\xf2\xba\x44\xaa\x03\x5e\x97\x08\x75\xc0\xeb\x12\x99\x9a\xbc\x2e\x91\xe5\x80\xd7\x25\xa2\x1c\xf0\xba\x44\x92\x26\xaf\x4b\x25\x38\xe0\x75\xa9\xf8\x4c\x5e\x97\xa5\x46\x30\x1f\x3a\x82\xce\x00\x58\x76\xa9\x82\x19\x14\xba\x2d\xbe\xf4\x8c\xa1\x72\xb4\xa8\x29\x4a\xf8\x34\xc1\x07\xf4\x84\x62\xb4\xef\x50\x1e\x51\x48\x52\x29\xa8\x47\x93\x08\x6b\xca\xf1\xec\xc0\x88\x25\x2a\x92\xd7\x1f\xc9\x7f\x2f\x02\x55\x23\x3c\xcf\x1d\x01\x9e\xad\x67\x79\x1e\xea\x5c\x48\xfa\x80\x7f\x08\x4d\x5e\x90\x3d\xf9\x72\x3e\x08\x20\x19\x84\x44\x74\x7a\x2e\x09\x53\x75\xda\x65\x10\x12\xd6\x97\x52\xaf\xf1\x28\xa0\x6c\xbb\x96\x35\x55\x5a\x62\x78\xdc\x5f\x4e\x53\xed\x4e\xcd\xd1\x2d\xf6\x6e\xf3\x5a\xa2\x4f\x45\x92\x3c\xea\x1d\x23\x9e\x01\xf1\x56\x8f\x1c\x13\x39\x7a\xdc\xe3\xa1\x27\x91\xed\x95\x37\x7d\x6d\x96\x76\x67\x26\xff\xfc\xa9\x6f\x59\x57\x02\xe5\xeb\x88\xb6\x49\xd8\x4b\x8b\x72\x22\xd7\x33\xf1\xf3\xe1\x69\x1f\x27\x63\xaa\x0a\xac\xd8\x80\x20\xee\x64\x12\x5d\x3a\xa1\x99\x5a\x20\x90\x10\xca\x20\x8c\x49\x92\xec\x51\x00\x32\xdd\x67\x2b\x32\xb5\x38\xde\x27\x41\xb2\x1e\x53\xd9\xd4\x66\x0d\x0c\xe2\x51\x26\xc3\x12\x26\xcd\xe4\x9f\x62\x8b\x79\x09\x88\x6b\x81\xd6\x71\x04\xb3\xcc\x32\x31\x99\x5a\x1b\xf9\xc9\x3e\x1a\x51\xd5\xd8\x56\x19\x08\xd4\x3e\x89\x44\x7a\xda\x17\x33\xe1\x59\x40\x4c\x7f\x82\x28\x10\x5a\x21\x98\x4b\x92\x58\xca\xd4\xba\x30\x4a\x12\xb4\x1a\xa8\x67\x6a\x9a\x08\x01\x31\x25\x23\x67\x19\xad\x66\xf2\x4f\x01\x77\x57\x02\x67\xd4\x41\x28\x0a\x41\x46\x79\xaa\x2c\x53\x1b\xf7\x7b\xb4\x0d\xfd\xe1\xaa\xa6\x66\x2a\x40\x06\xee\x04\x12\x34\x53\xd7\x4c\xfa\x25\x20\xe7\x05\x20\xa2\x55\x6c\x52\x54\x96\xff\xcb\xd8\x4a\x3f\xf2\xa2\xcd\x60\x4d\x53\x23\x65\x18\x90\x35\x89\x00\xc9\xe8\x33\x13\x7f\x08\x98\xd9\x6f\x10\x4b\xb2\x4f\xf6\x08\x64\x93\xe6\x33\x33\xb5\x0f\xc5\x28\xde\xc3\x56\x47\xa8\x68\x6a\x9e\x04\x02\xf1\x25\xa3\x4f\xc2\xea\xdb\x4c\x78\x96\xa4\x87\x7f\x82\x46\x66\x1d\x6f\x63\x58\x45\x49\x6a\x36\xa3\x85\x79\x8a\xa2\x08\xee\x92\xbe\x9e\x59\x6e\x3d\x04\x68\x18\x24\xe4\x61\xdc\xa4\xdf\xd1\x4c\xfa\x25\x60\xe6\x05\x00\x22\x93\xdf\x95\x98\xa1\xf5\xa7\xba\x6e\x23\x0a\x53\x9b\x65\x98\xf1\xcc\x3a\x73\x12\xd5\x90\x3e\xc3\xa1\xcd\x84\x63\x63\xac\x70\x11\x2c\xb6\x0b\xa4\xa0\x23\xba\x25\xe0\x5b\x3e\xad\xbc\x15\x74\x40\x8e\xc6\x60\x0a\x4a\x39\x30\xc3\xac\x8d\xe1\x4b\x84\x77\x64\xe5\x74\x9a\xa3\xf2\x53\x8e\xe4\xa0\xa6\x10\x48\x43\x0c\xea\x01\x30\xb7\x46\x7d\xc1\x6a\x35\xe3\xff\x13\x63\x3f\x01\xf5\x84\x30\x50\xc6\x86\xe5\xdc\x2d\x11\xf6\x0b\x34\x9b\x15\x59\x21\xa4\xc8\x2b\x54\x97\xc5\xa9\x4e\xbf\xe3\x28\xdc\x9c\x56\xa1\x3b\x17\xd7\xb2\x64\x32\x3c\x35\x41\x97\x50\x86\x9c\x96\xc3\x4e\x81\x24\x5e\x81\xb2\xcd\xe0\x22\x5c\xf9\x98\x92\x33\x1f\x7d\xf2\x19\x88\x15\x63\xd7\x03\x2d\xda\xac\x37\x70\x8b\x72\x4b\xa2\x88\xdf\xb8\x45\x79\x32\xa5\x45\x4f\x4f\x3e\xdc\xa2\xec\xf0\x2f\xd3\xa2\xec\x30\xa5\x45\xbe\xff\xf4\x04\x37\xa9\xcd\xfe\x65\x9a\xd4\x66\xe6\x26\x69\xd0\xff\x2a\x5c\x9b\x8d\xd4\xbe\xa8\x72\x37\x2e\x4e\x4d\x55\x58\xfa\xb8\x9f\x76\x93\xa5\x06\x87\xad\x38\x28\x69\xf6\x0c\x69\xf5\x8c\x96\x5d\x31\xd5\x69\x1e\x1e\x90\x9e\x45\x2c\x4b\xcb\x5d\x9f\x54\xa9\x05\x0e\xb0\xc7\x28\x59\x26\xf0\x11\x6a\xf1\x0c\xbb\x68\xc9\xc5\x83\xec\xce\xdc\x5f\xd5\xb3\x3e\x8d\xa8\xf6\x4e\xee\xa5\x1d\xe9\x77\xd4\x96\xe1\x29\xd1\x4d\xac\x7e\xb2\x58\xed\x65\x96\x76\x69\x54\xe7\x48\xbe\x67\xeb\x45\xc9\x7e\xdf\x25\x67\xf2\xc4\xcc\xa7\x9e\x83\xff\xd1\xb3\xde\xcc\x8f\xfb\xc1\x82\xda\xf8\x00\x7b\x0c\xa5\x05\x5d\x7a\x99\xf2\xdc\xb8\x65\x16\xc6\xe8\x48\xd2\xcc\x5d\xe4\x44\x55\x45\x19\xc6\x69\xf3\x4a\x8e\xe1\x8b\x08\x70\x0f\xdc\x5a\xf7\x6d\x95\xa7\x57\x49\xd2\x1a\x2b\x7f\x32\x93\x8a\x7f\xad\x50\x98\x14\xa7\xec\xf5\x2b\x14\xd6\xd2\x90\xa3\xc7\x48\x13\x4b\xc9\x78\x49\x2a\x32\xac\xfc\x3c\x93\x5b\x7e\xce\x9a\xb4\xcc\xd0\xd7\x47\x9e\x7b\x27\x0e\xb3\xf8\x53\x40\x35\xd1\xf9\xd1\x09\xca\xf6\x11\x44\x45\x34\x82\x76\xcc\xf7\x30\x3b\xa3\x31\xca\x21\xb7\x92\x24\xcc\x91\x5b\xe8\x92\x0c\x69\x4a\xfa\x4e\xf2\x55\x81\x40\xd1\x4c\x62\x62\x02\x33\xc2\x2d\x1f\xe1\x3f\x3a\xd8\xbb\xa8\xd9\xcc\x20\x10\x25\x5b\xa2\x6a\x05\x54\x92\xd8\x3d\xe9\x54\x07\x69\x8a\x00\x7a\x8a\x99\x21\x9a\x75\x0e\xd0\x0c\x06\x89\x06\x30\xd5\xf9\x76\x33\x44\x95\x66\xa0\x92\xd2\xc3\x2d\xe0\xfc\x70\xac\x58\xed\x45\x35\xb7\x8e\x24\xd9\x32\x0b\xd3\x53\x83\xda\xe6\x2d\x14\x54\x6b\x3d\xc6\x94\x51\x83\x0b\x64\x4f\x60\xbe\xa2\x6c\x1d\xcf\xc4\xab\x5c\x9c\x1d\x66\xa3\xe0\xea\x7c\x36\xa7\xc6\x82\xa4\x96\x73\xb3\xc3\x4f\x63\xea\x8d\xae\x24\x82\x85\x49\x52\x9c\xf4\x9a\xe2\xef\xa8\x39\x19\x51\x45\x8d\x52\xb9\xce\x6f\xe0\xd5\x52\x69\x80\x57\x5c\x73\x02\xaf\x83\xc9\x05\xac\x72\x50\xd9\xb4\xb3\x32\x96\x5b\x81\x2d\xe6\xc0\x1d\x35\x0b\x9b\x61\xf0\x81\x99\x4a\x06\x09\x32\x63\x1c\x35\x27\x9b\x39\xd7\x18\x9f\xe8\x0e\x4c\xf5\xb5\x1e\xb2\x22\x01\x88\xba\x75\x3e\xda\x0d\xf9\xf3\xad\x2f\x39\x22\x7d\x3c\xda\x86\x8c\x7d\x58\x8c\x1d\x39\x92\x7c\x09\x33\x70\xb6\xa5\x21\xe9\x2e\x34\xe9\x42\xe4\x6e\x91\x6e\x76\x78\x9b\x74\xfb\xfa\x6f\x97\x6e\x76\x98\x10\x64\x6c\x37\xba\x70\x09\x71\x20\xbd\x15\x7b\x4d\x3c\x88\x31\x31\x0f\xcf\x7b\x45\x40\xdf\x25\x91\x8a\x96\x47\x85\x24\x77\xe0\xf4\x58\x6e\x94\xee\xa7\x2d\x33\x8a\x96\x27\xa2\xc7\x44\x52\x61\x02\x39\x28\xc0\x76\x4b\xc9\x53\x85\xfa\x73\x1e\x44\x3a\x42\x21\x0b\xa1\x94\x4c\x5f\xda\x7b\x39\x7f\x05\xd3\x6f\x35\xc5\xb4\x58\x8d\xe6\x38\xed\x38\x0e\xa3\xba\xc8\xce\x0d\xd2\x65\xa3\x24\xa1\x91\x04\xc6\x51\x91\xcc\x6c\xb6\x7c\xb1\xb4\x03\x79\x26\x40\xad\x2e\xd0\x60\x30\xd9\xef\x75\xfe\x3d\xcc\xd2\xc4\xdd\x23\x94\xe0\x48\x42\x4a\x99\x08\xb0\xae\x1b\x74\xbe\x52\x47\xae\xf6\xe0\xe8\x9a\xa2\xc0\xfa\x0e\xf4\x06\xf9\xa6\x8d\xe7\xa6\xff\x74\x49\xfe\xe4\xdd\xea\x59\x22\xc9\x77\x5e\x79\xbd\x6a\xc8\x49\xba\x09\x33\xfe\x08\xe7\x62\x4d\x94\x45\x26\x5b\x4b\x6f\xe6\xaf\x37\xb3\xf5\xd3\x6c\xbe\x7d\x84\x3d\x51\x7c\xae\x9b\x22\x77\xb9\x6d\x20\x81\x7e\xaa\xcc\x49\x84\xe2\x97\x90\x3d\x86\x0d\x56\x3a\xa9\xf6\x0e\x86\x91\xec\x15\x29\x57\xd6\x2e\x79\xcf\xc2\xac\xb0\x44\xd7\x30\x43\xfc\xe5\x30\x5b\x06\x48\x9d\x39\x36\x0d\xb6\xcd\x61\x85\x6e\x25\x73\x58\x98\xf1\xff\x56\x34\x6f\x36\x00\xc7\x54\xca\xd0\x52\x1d\x9b\x15\xac\x43\x36\xdc\x33\x3a\xe6\xf1\x75\x4c\x64\xf4\x6e\x1d\xa4\x62\xae\xc2\xc7\x9a\x32\x63\x54\x2d\x53\xd7\x07\x3f\x6a\xa6\xc1\x40\xac\xaf\xbb\x33\x54\xbc\xc0\x2a\xca\x1d\xa1\x4c\xf7\xbf\xf5\xd7\x49\x1a\x87\x4d\x51\x99\xba\x54\xc2\xb3\x1b\x40\x62\x58\x36\x37\x28\xe3\x48\x16\x13\x54\xc7\x55\x4a\x6f\xdf\x78\x03\x93\x02\x1a\x43\x97\x91\xc4\xb9\x06\x66\xc8\xbb\x2e\x9e\x83\xb9\xe8\xeb\xef\xcc\x95\xed\x86\x65\x2c\x0b\x3c\x05\xe6\x9b\x58\x11\x2e\x75\x11\x58\xea\x12\xe4\x9a\x59\xb2\x9b\x34\x95\xf6\x0d\xd6\x2a\x3d\xdd\xc9\x23\xd2\xab\xab\x7a\x84\xff\x67\xf8\xc4\x20\xf0\x66\xab\xc5\x34\x9f\xc8\x5a\xa8\xdb\xdc\xee\x85\xd5\x64\x9a\xa0\x24\x93\xc7\x80\x14\x15\xe6\x7d\x6c\x62\xc9\xe4\x1d\x95\xd7\x63\xd8\x1b\xe3\x21\x25\x50\xab\xd6\x09\xdd\x0c\xfb\x48\x86\xea\xbf\x35\x7d\x04\xfc\xa4\x06\x6b\xf4\x95\x16\xac\x83\xa0\xe3\x7c\xa6\x85\xc2\xb4\x7a\xa3\x7c\xe7\x78\x6a\xf6\x6a\x13\x7c\x28\xab\x72\x8b\x17\x35\x56\xbd\xc0\xea\xac\x39\xa9\x8e\xef\xb7\x79\xd2\x41\x34\xa6\x4f\xd0\xb0\xd2\x8e\x66\x73\xb2\x37\x1d\x81\xc8\xd0\x75\xb2\xe7\x50\xf1\x4c\xf4\xa8\xb6\xea\x76\x83\x34\x9e\x8d\x09\x5e\x75\x0c\x9a\x29\x7e\x75\x94\x31\xd4\xe9\xdf\x60\xe5\x88\xde\x2b\x93\xdb\xe1\x65\x09\x72\xc5\x44\x55\xbc\x38\xfd\xd2\x84\x5c\xd4\x41\xd2\x99\x6d\x8c\x4e\x0d\xaa\x9e\xc9\x0f\x92\x8a\xbc\x66\x45\x12\x03\xe2\xf8\x14\x36\x7c\x5b\xf2\x7a\x89\x95\xe5\x0b\x5d\x86\xdb\x30\xc0\x99\x70\x4a\x2b\x8c\xbf\xf1\xc2\x7f\x9c\xeb\x26\xdd\xbf\xba\x3c\xd7\x38\x2b\x86\x17\x21\xa4\x56\xd1\x15\xa4\xd1\xec\x99\xce\x77\xdd\x5f\x04\x63\x98\x57\x3f\x6b\x4b\xcb\x1f\xc2\x91\x33\xc3\x82\x86\x11\xa1\xf0\xfd\xc5\x90\xe0\x5c\xac\x29\xac\x03\x5e\x7a\xa2\x66\x15\xfa\x7d\x74\x41\x3a\x81\x67\x4c\xce\x0d\x77\xb3\x69\xb1\xcb\x02\xad\xac\x71\x75\xab\x72\x10\x6d\xb6\x42\xa5\x2f\x79\xa9\xf8\x65\xe3\xfe\x3b\x75\xa4\xb5\x07\x8c\x7e\xb2\xeb\x89\xba\x09\x9b\x34\x7e\x1e\xb1\x62\x17\x80\x87\x3e\x50\xdb\x30\x61\x29\xa4\x8f\x61\xdd\x05\x35\x8a\x3a\x77\x33\x14\xd2\xed\xd7\x2b\x59\xa6\x07\x39\x50\x2f\x52\x14\x6e\xa8\xe2\x7a\x24\xdd\x7d\x43\x86\x34\x7c\x39\x18\xdf\x82\x70\xae\x51\xc5\x63\x37\x32\x49\x21\x77\x1e\x01\xa5\xb5\x5e\xa8\x15\x68\x7b\x43\xc4\x4f\x95\xb7\x6d\x62\x19\xdc\x55\xa2\x44\x38\x64\xc7\x88\xb8\x85\x64\x26\xed\x3b\x01\xde\x76\xfb\x4e\xd4\x77\x44\x10\xdc\x9b\xe2\x47\xba\xad\x0f\xbc\x59\x0e\xbf\x9f\x0b\xa0\xca\x5d\x72\x93\xf6\x8a\x60\x54\xfd\xae\x09\x8c\x8d\xff\xba\xf0\x2d\x11\xf3\xf5\xea\xda\x7f\x58\xe1\xaf\xd9\x47\x8a\xae\xf2\xe3\x9c\x6f\x9d\x1d\x06\xdd\x51\xd0\x0b\xb8\x21\xe8\x1a\xca\x4c\xf1\x2b\xa1\x7a\x74\x4e\x48\x3f\x2e\x15\xd8\x30\x57\x2e\xfa\x8e\x4e\x4d\xdd\x77\x0d\x3f\x30\x62\xdf\x25\xca\x2e\x36\x94\x23\x31\x5a\x28\x61\x91\x6f\x28\x34\xe1\x5a\x3f\x25\xea\x4e\x58\xcf\x5b\x07\x71\x2c\xe1\x12\x84\xd6\xa1\x1f\x0e\x88\x04\x91\x31\x89\x75\xf8\x24\xc9\x75\x28\x3b\x09\xbe\xa1\xd5\xe3\x65\x3d\xb2\xca\x8e\x57\xa9\x8f\xc5\xcb\x4f\x72\x2b\xaa\xa2\x4c\x8a\x17\xec\x0a\x0e\x87\x0c\x0d\xf6\x75\x10\xc7\x5a\x0b\x56\x71\xb4\x1f\xab\xf8\xbc\x17\xbb\x43\x36\x76\x8a\x6c\xcf\x91\xb2\x41\x8b\x5f\xf6\x22\xe2\x19\xa3\x2b\x9b\x60\x13\x6d\x97\x0a\xb6\x75\xbc\x59\x6d\x12\x05\x9b\xa8\x2d\x3d\x89\x61\x7d\xf1\x17\xcb\x99\xbf\x0c\x66\xfe\xca\x03\xda\xaa\xe8\x4c\x8f\xd8\xa2\x35\x13\x7a\x60\x82\xde\x8c\xad\x04\x68\x8e\xd0\x9a\x29\xba\x43\xbb\x59\xed\xfb\xf5\x1a\x6d\xd6\x56\xdd\x81\x7b\x94\x1e\x5b\x1a\xd8\x8b\x4e\x2f\xaf\x87\xd7\x50\x05\x2c\x63\x34\x27\xf0\xb7\xdb\xc5\x56\xc1\xe5\xa3\x0d\x5a\x2c\x25\x5c\x92\xde\x30\xf4\x93\x96\x34\x95\x16\xaa\x1a\xc3\x50\x5a\xf4\x65\x74\xab\xa7\x68\xcb\xa8\x2a\x90\xae\xf0\x56\x4c\xd1\x14\xda\xad\x6a\x5f\xc7\x9b\xe5\xc2\xee\x5e\x81\x5e\x4c\x4f\xfb\x62\x80\xd8\x26\x0c\x22\x4d\xb0\xa4\xb0\x47\x31\x46\x43\xfc\xc5\x76\xa9\x8d\x53\xdf\xdf\x84\xdb\xa8\x47\x24\xaa\x07\x41\x3c\x62\x4a\xbe\x98\xf9\xeb\x60\xe6\x6f\x97\x72\xb3\x14\xcd\x20\xd8\x2c\x6a\x31\xae\x9d\x13\x74\x62\x04\x3c\xa0\x10\x94\xf3\x49\xda\x40\xba\x50\x65\xdc\xdb\x78\x1b\xbb\xcf\x81\xfa\x8d\x9d\x95\xbb\x74\xa8\x7d\x70\xcf\x67\xec\x7b\x1b\x85\x20\x2d\x94\xb0\xc8\x4a\x01\xe3\x42\x5e\xb8\xf5\x3c\x05\x57\xb2\x78\x42\x9e\x27\xe1\x12\xf5\x82\xa3\x1f\xa1\x1a\xab\xd5\xcc\x7f\x5a\xcc\x36\x5a\x0b\x15\xe5\xe0\x28\x2d\xfa\x31\xba\xd5\x13\x54\x64\x5c\x15\x40\x4b\xba\x56\xc0\x8a\x02\xf7\x35\xed\x56\xa5\x05\xf1\xfa\x69\xe5\xd9\xcd\x06\xd0\x8b\xf4\xb8\xa1\x5d\x2f\xe9\xc2\xa1\x2a\x5a\xb6\x9a\xd8\x23\x19\x63\x3a\xe2\x6d\xb0\x58\x2c\x14\x54\x51\x12\xf8\x0b\x4f\x44\x25\x2a\x09\x43\x3e\x69\x45\x4f\x6e\x9d\xa2\x22\x0c\xa1\x45\x43\xc6\x36\x78\x82\x82\x8c\xaa\x01\xe8\x07\x6f\xc1\x14\x3b\x42\xfb\x53\xed\xe4\xc0\xdf\x07\x89\x5d\x3d\xf4\x0e\x24\x47\xea\x06\x8c\x88\x74\xdb\x9c\x54\x28\xe0\x18\x65\x42\x02\xb4\x46\x2a\xa6\x24\x44\x1e\x5a\x09\x98\x44\xdd\xa0\xa8\x47\xa8\xc6\x72\x3b\x0b\x96\x4f\xb3\x40\x8c\xac\x28\x36\x59\x37\x28\x42\x9b\xf1\x18\xd7\xda\x09\x9a\x31\xa6\x02\xa0\x18\x8c\xfb\x49\x66\x83\x74\xa5\x66\xa2\x93\xa7\x64\xc0\xbf\x40\xbd\x37\x7c\x40\x72\xb1\x5c\x84\x4b\x55\x0d\x69\x61\x8f\x62\x54\x44\xba\x08\x36\x81\xe6\x17\x93\xc0\x0f\x96\x3d\x22\xd9\x64\x54\xdf\x46\x68\xc5\x2a\x98\xad\xb6\xb3\xf5\x52\x6e\x94\x66\x2d\xaa\x6f\x36\x85\x18\xd7\xca\x49\x96\x62\x10\x1e\xb4\x13\x98\xf3\x49\xd1\x06\xe9\x40\x2d\x4c\xf2\x43\xdf\x6e\x25\xf4\x5e\xe3\x97\x3f\x2b\x0b\x22\x7c\x11\xc0\x7a\x08\x00\x3e\xab\x65\x5c\x37\x50\x48\x8d\x5b\x35\x19\x58\x8b\x50\x70\x8a\x8a\xa4\x92\xbb\x69\x15\x45\xc5\x2f\xeb\x97\x4a\xa2\x53\xb5\xf1\xbd\x08\x77\xcd\x04\x95\x9b\x56\x15\xd0\x3e\xad\x89\xd3\x96\x5a\x8c\x02\x9a\xba\xd4\xc2\xf9\xd0\x96\x5c\xf8\xd2\xc2\x5b\x75\x51\x5c\x8d\xd0\x88\x8d\xd1\xc6\xc1\x35\x0e\x0d\x2b\xa4\x8f\x6f\x5e\xa7\xd1\xa9\xc0\x5a\x09\xac\xdb\x8c\xef\x51\x53\x37\xdd\xa0\x99\x6f\x58\xc4\x01\x9a\x3a\x45\x3b\x2d\x02\x9b\xbe\x98\xd3\xf1\x22\x2f\xea\xf0\x85\x8c\xb7\x6a\xa7\xb8\xf6\xa1\x90\x1a\xe5\x67\x87\xd6\x53\x14\x9c\xa0\x66\xbe\x65\x25\x48\xc5\x6f\xd0\x49\x75\x65\x68\x7c\x2f\xc2\x5d\x73\x8b\x3e\xde\xba\x4c\xa4\x35\x71\x8a\x2e\x5a\x04\x34\x75\xb9\x88\xf3\x21\x2e\x1b\xf1\x85\x93\xb7\xaa\xa1\xb8\xd6\x22\xd2\x19\xb5\xb6\x34\xb4\x78\x23\x22\x84\x14\xf0\x0d\x6b\x4d\x12\x6a\x58\xf7\xe4\xb5\xa7\xf1\xfd\x06\x74\xc6\x0d\x5a\x77\xdb\x42\x94\xdc\xac\x49\x21\xa2\x59\x18\x93\x17\xa4\x38\x17\xca\xc2\x14\x5f\x8c\x79\xab\xce\x89\xeb\x37\x0a\xa9\x31\x6a\x37\xb8\x26\xa4\xe0\x84\x34\xef\x4d\xab\x59\x2a\x7e\x58\xfd\xb4\xd5\xad\xf1\xbd\x08\x77\xcd\x0d\x4a\x78\xf3\x52\x97\xd6\xc4\x29\xaa\x68\x11\xd0\xd4\x25\x2f\xce\x87\xbc\xf4\xc5\xd7\x7c\xde\xaa\x88\xe2\x32\x91\x4c\x69\x8c\x1e\x0e\xae\x3c\xc9\x28\x21\x35\x7c\xcb\x7a\x99\x82\x1d\x56\x42\x75\xfd\x6c\x7c\x07\x82\xbd\x72\x83\x0a\xde\xba\x98\xa6\x36\x6f\x8a\x02\x5a\x24\x33\x75\x51\x8d\xb3\x21\x2d\xae\xf1\x85\xa5\x37\x1b\x42\x61\x2d\x4a\x22\x24\xab\x1f\xcf\xaa\x35\x7d\x81\x4b\x42\x0a\x29\xe0\x9b\x16\xe5\x64\xec\xb0\x02\x2a\x8b\x74\xe3\x3b\x10\xea\x95\x1b\xd4\xef\xc6\x15\x3b\xa5\x69\xb0\xf2\xdd\x22\x97\xe9\x8b\x77\xfd\x40\xe8\x17\xf1\xf8\x42\xd6\x5b\x15\x50\x5c\xfb\x12\xe9\x8c\x31\x7f\x83\x8b\x69\x22\x42\xd8\xf8\xdd\xb8\xf2\x27\x21\x36\xd9\x3d\x71\x25\x70\x7c\xaf\x01\x5d\x71\x93\xcd\xbb\x65\x59\x50\x6e\xd6\x14\x7b\x67\x11\xc5\xc4\xe5\xc1\x2c\x3d\x7d\xbb\xa8\xdb\xfc\xa6\xae\x6e\x61\x24\xb2\x06\x79\xde\x6a\x1d\x2d\x9e\xd5\xad\x6b\xe7\x53\x82\x2a\xdc\xe6\x31\xd9\x47\x6c\xd4\xe4\xef\x0c\xa7\x5e\xad\xe0\x8a\x62\xa7\xf4\x7b\xc4\x08\x22\xf5\x13\xc3\x49\x57\x23\x71\x05\xa8\xcf\x3e\x40\xd2\x7f\x90\x3a\x87\x3b\xe6\x38\xe8\x69\xd4\xb9\x40\xa3\x4f\x68\x73\x97\x3c\x19\x18\x25\xd9\xe1\x69\x4c\x01\x26\xc0\xfc\x28\x80\x8b\xe7\xc6\xc8\x09\x72\xb2\xb1\xf7\xd7\xe6\xb5\x44\x5f\xa2\x73\xd3\x14\xa7\xaf\x3d\xf4\x4c\x78\x59\xa1\x1a\x35\x86\x77\xf5\x39\xca\x53\xf1\xa5\xb8\xad\x7e\xbe\x0f\x13\xd4\xed\x43\xf4\xc4\xad\x98\xac\x90\xee\xa3\xc4\xed\x0e\x2b\x0a\x4e\xc6\xd8\x45\x48\x10\x15\x17\x59\x16\x96\x35\x92\xce\xe4\xf5\xc5\x14\x5e\x3e\xc3\xd3\x54\x86\xd7\x2c\xd7\x5a\xf1\x72\x25\xe9\x17\x87\xa0\xa8\x34\x3b\x5a\x78\x76\xa5\x6f\x83\x66\xf2\xf2\xba\x34\x71\xbb\x63\x9a\x24\xe8\x24\xb6\x96\xc2\x38\xf3\x05\xdb\x34\x7a\xed\x4c\xc6\x8c\x3c\x9d\x4b\x1d\xf1\x55\x35\x2b\xbb\x5d\xb8\x6f\x50\x65\xdb\x22\xef\xf5\xdc\x88\x9b\xaf\xe7\xc1\x6a\xa5\xef\x3f\x66\xa5\x7c\x4f\xf4\xc3\x83\x98\x82\x7e\xbe\x40\x39\xdd\x97\xdb\x69\x21\xdd\xca\xdc\x95\x43\xa9\x85\xba\x84\x45\xec\x37\xa5\x0e\x56\xd1\x9b\x87\xf2\xb2\x79\xe5\x8d\x54\xf6\x8e\x77\xb0\x39\x3a\x9d\x6d\xa7\x29\x69\x85\xee\x50\xa5\xef\x79\x9e\x7c\xae\x72\x9f\x15\x61\xb3\xc3\x60\xcf\xc2\x35\x3b\x9e\x90\x8c\x89\x59\x02\xde\x85\xbb\x39\xcb\xc5\xe2\x39\x6a\xaa\xa7\x67\x39\xb6\x10\x76\x58\x13\xfc\x59\x5a\x37\x2c\xfd\x9f\x96\x29\x0f\xf6\x10\x83\xf9\xf3\xc4\xbc\xaf\xfe\x4a\x3f\xab\x49\x33\x43\x50\x95\x72\x94\x5e\xb3\x9c\x15\x60\x4d\x04\x6a\xfe\x0e\xaa\xe7\xdd\xa0\x71\xba\xba\x0e\xaa\x1e\xd4\xcc\x51\x2a\x98\xa4\xdf\xd3\x04\x55\x17\xa5\xbd\x9d\xda\xa8\x76\xc0\x7e\xb1\x43\x87\x36\x6d\x90\x25\x61\xab\xe2\x41\x7c\xea\x42\xe2\x0c\x85\xd5\x2e\x2a\x9a\xa3\xb6\xdd\xdf\xa8\x9a\xec\x5c\x18\xb4\xfb\xbf\x57\x45\x1c\x7e\x08\x39\x11\x25\x26\xf9\x2e\x77\xb9\x50\xde\x86\xb0\xf6\xb7\x7e\xa4\x85\x10\xa6\x51\xc0\x26\x41\x12\xc2\x2e\x56\x93\xc9\xb0\x9d\xe7\xc2\xf8\x19\x49\x84\x7f\x98\x94\x89\xf4\x11\x84\x4c\xe6\xa6\x8f\x32\xd2\x70\x83\xfc\x52\x0f\x71\x44\x61\x22\x8c\x27\x2a\x6e\x25\x12\x99\x0f\x24\x9b\x53\x8e\x9e\x33\x16\x75\xb1\x0a\x81\xc9\x4c\x88\x51\xf8\x90\xb4\xe4\xdb\xa1\xd9\x1f\xc9\x50\x57\x0f\xe5\x08\xc5\xa6\x33\x59\x3a\x29\x21\x28\x22\x2f\x58\x92\x29\x8d\xbe\x70\x44\xcd\x97\x8e\xa8\xd1\x5f\x46\xd4\x54\x09\x55\x02\x4c\x35\xb9\x4b\x08\x8c\xd5\xa5\xe9\x01\x84\x7e\xe8\xbd\x10\xd8\xf6\xe4\x75\xac\x06\x64\x02\x0e\x90\x59\x07\x3f\xfe\xa8\x74\x61\x5f\xa8\x49\xd8\xe9\x1f\xc1\x5a\xc2\x2b\xb3\x76\x80\x44\x81\xb7\x03\x18\x60\x36\x8c\x30\x72\x36\x2c\x96\x33\xa9\x6c\x69\x67\x34\x45\x91\x45\x61\x75\x8f\x1c\x57\xf2\x61\xb1\xba\x09\xab\x46\x3b\x2b\x46\xde\x93\x57\x12\x79\xf3\x61\x41\x45\xa0\x78\x2a\xb9\x23\xb7\x7b\xb9\xf1\x31\xcd\x12\x3a\xb9\xdc\x65\xa1\x5c\xa0\xfa\xa2\xc7\x8b\xec\xdb\x3d\x0d\xb1\x80\x53\xf5\x55\x66\xc8\x89\xc4\x9b\xa2\xa4\x6e\xb8\x63\x43\x76\xbc\xca\x4b\x8d\x72\x4f\x48\xef\x07\x49\xf5\x55\x4f\xac\x41\x8b\x1c\xe1\x66\x9a\x18\x92\xde\xa9\xfc\x30\x69\xf5\x31\x20\x0c\x30\x42\x66\xd4\x74\x0d\x88\x88\x61\xb3\x75\xbf\xda\x4d\xaa\x3d\x18\x85\x42\x5d\x7f\xb8\x8f\xf0\x18\x69\x9b\x08\x75\x45\xbc\x59\x4a\x3f\xaa\xad\x70\xeb\x32\x4b\x1b\x25\x79\xdc\x7c\xb5\x0e\xa4\xc4\xa9\x34\xcc\x63\xa5\x36\x44\x86\xa8\x4e\x9b\xb3\x1b\xaa\xf3\xa9\xfc\x48\x36\x95\xf4\xae\x2c\x18\xdd\xac\x94\x75\x02\xb6\x16\x61\xa5\x99\x1d\x46\xd2\x84\x48\x6a\x14\x3b\xaf\xdf\x99\xbe\x24\xad\x50\x4c\xfc\x6f\x5c\x64\xe7\xfc\xf4\x0c\x97\x2a\x07\x70\xa9\xad\x14\xcf\xdf\xf6\x76\x72\xca\x19\x5c\x88\x37\x67\x84\x9b\xb8\xa8\xcb\x1d\x80\x3f\x36\xba\x9b\x9f\x46\xb8\xad\x9f\x46\xb8\x2d\x15\x46\x76\x5b\x38\xe0\xc7\x5e\x4b\x3d\x07\x6d\x8a\x1e\x86\x8d\x8e\xc5\xde\xa8\x91\x88\xc5\x5e\x5c\x6c\x46\x60\xa4\x35\x55\xc8\xd9\x6c\xc4\xa0\x49\xd0\xcd\x94\x91\xd0\xdd\xcc\x33\x88\xf6\x46\x3b\x7d\x03\x2e\x83\xc1\xbe\x9f\x44\xde\xc5\x76\x03\x82\xfa\x35\x09\x9b\x90\xb5\x82\x2d\x22\xd6\x5f\x09\x66\x47\x58\x24\x24\x79\x04\xa2\xa2\xfd\x3a\x1b\x07\x8f\xd1\x17\x36\x60\xc1\x4b\x4d\xa5\x63\xa8\x4a\x49\x02\x2b\x4b\x64\x4d\x06\xdb\x42\xb6\xec\xe2\x3d\x3e\x83\x07\x97\xc5\x68\xd0\x3e\xa3\x1a\x93\xdf\xa0\x6e\x2a\xd4\xc4\x47\xc9\xc0\xf2\x32\xd1\xf2\x09\x54\x95\x44\x1a\x3a\x0f\xdd\x9c\xa2\x9f\x5f\xf9\xd2\xfc\xaa\xfb\xc5\x28\xa8\x37\x5f\x7a\x16\x7a\xdd\x74\xc6\x0c\xc1\x66\x47\x66\x00\x79\x9e\xb6\xb0\x50\x9b\x0d\x65\x34\x16\xb3\x22\xbc\x39\xa7\x84\xad\xd9\xc3\xa6\x08\xe0\x75\x72\xad\x5b\xdd\xc3\x88\x5e\x82\xd6\x0b\xb4\x6a\x17\x43\x3e\x06\x7b\x26\x7c\x6d\x4d\xca\x7e\xe9\x88\x9e\xa0\x62\xe0\x82\xa9\x91\xf7\x8a\x00\xed\x19\xce\x97\x7e\x4b\x46\x74\x9d\xcc\x84\x1c\xe9\x83\x39\xd1\x01\xc6\xa1\x44\xe0\xb7\xa4\xfa\x1e\x60\xdc\xf6\x61\x6c\x30\xd9\x37\xc1\x07\x1b\x69\x3b\x1c\xb3\xc8\xe2\xaa\xf5\xe0\x40\x1c\x33\xec\xa6\x0d\x32\x2d\xec\x50\x63\x82\xb1\x38\x46\x4e\xbc\x0d\xe8\x46\x52\x55\xc1\x04\x67\x37\xa6\xc6\xbd\xe6\x91\xc3\x3d\x7f\x91\x96\xf8\x87\x05\x2b\xaf\x1b\x8c\x30\xa8\x63\xc5\x32\x4d\x05\xc6\x57\x51\xba\x72\x92\xbc\xa6\xb1\xaa\x05\xa9\xf7\x5a\x3a\x11\xa5\xf0\xe3\x98\x3e\xbf\x88\x9f\x5e\x14\x35\xb0\x2f\xf4\x8e\x88\x7a\x7a\xd3\xe3\x81\x6b\xdc\xf0\xfd\x05\xfa\x47\x55\x08\x8e\x4c\xde\x80\xd5\x46\x08\x16\x0a\x74\xfa\x97\x7a\x8c\xd3\xbf\xb3\x85\x37\xae\x12\x8b\x1b\xd8\xd2\x3b\xf5\x0e\xe6\xe1\x22\x67\xfa\x87\x5a\x7e\x97\xc1\xd3\x2f\x6a\xab\x33\xe2\x49\x56\x58\x28\x1f\x6d\x7b\x4d\x4b\xa5\x53\x1a\xcb\xc8\x1b\xc4\x3f\xc0\x33\xa8\x16\xf6\x3a\xec\x5b\xc6\xc8\x16\x4e\x63\x6b\x1a\x43\xba\xe6\x2a\xd9\xdb\xde\xfc\x05\x29\x4f\x4f\x42\x5c\xa8\x2d\x63\xc9\x9f\xc2\xa8\x9e\xfa\x62\x5a\x62\x29\x83\x28\x30\x89\xe3\xac\xbb\x7e\x77\x51\x98\x07\x57\xde\x91\xf0\x04\xd9\xf2\xa4\x8e\x38\x73\x67\xc0\x4d\x7a\xdd\x96\x81\x55\xdd\xfe\x85\x03\x5c\x4c\x68\x36\x98\x07\x0c\xa4\x47\x95\xe2\xd6\xa6\x44\x8b\x64\x63\x6c\x0a\xff\x40\x3a\x25\x9f\x6c\xf7\xcd\x7b\x1a\x46\x20\xf1\x2b\xdf\xd5\x35\x9c\xa3\x4f\xda\x21\xc2\x37\x73\xd1\x3d\x22\xe0\xb7\x76\xfc\x9e\x6b\x22\xd1\x43\x7d\xae\xff\x4e\xe9\xf0\xa0\x2b\xec\xc5\xd2\x0a\x95\x28\xc4\xc0\xec\x49\x7c\xd7\x35\x97\x4e\x9c\x1c\x7d\xfe\x44\x3c\xe7\xca\xfb\xe8\xac\xbc\x8f\x7d\xbf\xb1\x50\xdc\x92\xec\x10\x9e\x50\x0d\xd7\x1f\x37\x94\xb4\x6d\xaf\xe7\x2a\xfb\xf4\x90\x84\x4d\xb8\x23\xbf\x3f\xd7\xdf\x0f\x3f\xb6\x79\xf6\x1c\x1f\xc3\xaa\x46\xcd\x97\x73\xb3\xdf\xce\x3e\x2e\x7e\xae\xbf\x1f\x9c\x36\xcf\x4e\xf5\x97\x1f\x8e\x4d\x53\xee\x3e\x7f\x7e\x79\x79\x99\xbf\x2c\xe6\x45\x75\xf8\x1c\x78\x9e\x87\x6b\xfe\xe0\x7c\x4f\xd1\xcb\x9f\x8b\xf6\xcb\x0f\x78\xf4\x6c\x9d\xed\x0f\x1f\x17\x7f\xf9\xb8\xf8\xb9\x0c\x9b\xa3\xb3\x4f\xb3\xec\xcb\x0f\x1f\x83\xc5\x7e\xbf\xff\xc1\x49\xbe\xfc\xf0\xcb\x7a\xbe\x5a\x2f\xe7\x9b\x55\xe6\x2e\xe6\xab\x27\x67\x31\x5f\xfb\x81\xeb\xcf\x57\x8b\x2d\xfe\xef\xea\x6f\x9e\xb3\x9c\x07\x6b\x27\x98\x3f\x6d\x96\xce\x66\x1e\xac\x9c\xad\x13\xcc\xfd\xa7\xc5\x3f\x7f\xf8\x4c\x11\x63\xaa\x1f\x17\x7f\x79\x78\x1c\xdb\x45\xd8\x2c\x35\xa8\xca\xd3\x53\xd8\xd8\x06\xaa\xf9\x70\xef\x6f\xd9\x83\x4b\x67\x29\xf6\x60\xdd\x54\xc5\x37\x24\xf7\xa1\xe7\x04\xc7\xa5\xb9\x3b\xc8\xa4\x6e\xb4\xba\x89\xba\x6a\xaa\xf9\x2f\xa6\x68\xee\xd2\x71\x97\x82\xaa\xc5\x69\x15\x67\xc8\xa9\xbe\xfc\xb0\xf8\x41\x56\x39\xb3\xca\x50\xee\x6b\xb7\x6e\x42\xdc\xac\x09\x0b\x59\xe3\xbe\xe5\x18\x29\x69\x69\x59\x95\x5d\x62\xca\xe8\x1f\xa8\xfd\xa3\x01\x1b\x8f\xfa\xa4\x5c\xf0\xa6\x7b\x8f\xba\xfb\x86\x3d\xef\xe3\xb3\xf9\xaa\x4e\x2d\x61\xa8\x3f\x67\x0b\x55\xf2\x82\x95\x7d\x05\x0a\xce\x7e\x2a\xec\x8a\xc2\x9e\xd2\x79\xdf\x11\xb6\x02\x6d\xd4\x62\xb1\xa0\xe3\x2b\x70\xbc\xbf\x91\x31\xf6\xcf\xdc\x73\xb0\x49\x5a\x1c\x97\xba\xf9\x71\x3a\x2f\xe1\x54\x74\xe7\x29\xed\x05\x83\x5f\xd8\x96\xad\xe3\x7b\xe5\xd4\xfb\x7a\xb9\x1f\x0c\xcb\x12\x85\x55\x78\x8a\x91\xe0\x06\xd5\x42\xe5\xb7\x22\x7e\x68\x0f\xb8\x76\x89\x2e\x58\x67\xf2\x55\xac\x12\x8e\xfe\x8e\x36\xbe\x9d\x8f\x2c\x80\xc3\x89\x4d\x15\xf2\x63\xf7\x88\xa9\x61\x0f\xab\x2e\x5e\x4d\xac\x07\xa5\x14\xc8\xad\x73\xeb\xc5\x80\xcf\x13\xee\x14\xed\x27\xd0\x9b\xd5\x47\x29\x2b\xbc\x25\x7e\xbf\x71\x34\x6a\xdf\x0d\xb4\x84\xf2\x17\x61\xf3\xed\x92\x06\xf7\xd3\x90\xef\x3c\x20\x94\x17\x12\xd6\xcb\xb1\xb6\x92\xbd\x5f\x3d\x65\xe1\xd1\xe1\xa1\x85\xda\x6a\x38\x2f\xa1\x81\x63\x4c\xef\x99\xdf\xd1\xa9\x6c\x45\x5e\x4d\x31\x5f\xa3\xcc\xd5\xef\x18\x9b\xf6\x37\x5e\x8f\x5e\x84\x87\x2e\x2e\xc8\xc2\xd3\xe1\x13\x3a\x3d\xca\x1b\x6f\xbb\x6d\xc1\x3f\x1f\x8b\xa2\x46\xd8\x0a\xa2\xf9\x7c\xfe\x00\xe3\xe0\x97\x1f\xc0\xe2\x20\x1f\xe0\xfb\x95\x8d\x67\xa6\x92\xe4\x99\x4b\x66\xad\x4c\x05\xee\x2d\xa7\xb7\x7e\xc6\xc0\xda\x48\xa9\xb1\x8d\xe8\x03\x9d\xc9\x7b\xa4\xeb\xc7\x3f\x57\xc5\x4b\x8d\x1e\xae\xf3\x53\xf8\xfd\x1e\xdb\xe6\xe4\xbc\xe9\xc0\x6d\xc5\xd2\xbe\x77\x42\x96\x1e\x56\xb2\xee\x77\x25\x5a\xc2\x41\xf9\x0a\x45\xf7\xdb\x96\x4f\x9b\x03\xcd\x4d\xa7\x7f\x30\x40\x13\x46\xb5\xfc\xa1\x5d\xec\xf8\x24\x49\x7a\x30\x87\x3c\x91\x0d\xd9\x72\xdb\xe8\x7a\x91\x0c\x46\x1a\x66\x4d\x66\x6e\x58\x7e\x95\xbf\xa0\x68\x8b\xec\x7c\xdc\xe8\xd4\xc4\xbe\x51\xde\xd0\x5e\x92\xdd\x28\x55\x36\xa7\xfb\x0b\x34\xd5\xd2\x79\xb7\x9e\xf6\xd2\xfb\x92\xec\x88\xee\xe9\x41\xfc\xcf\xe5\xed\xdd\x13\x2e\xdc\x4f\x12\x2a\x44\x87\x3a\xf9\x1e\xb5\xf1\x0c\x04\x33\x07\xd3\xf7\xa6\x60\xdc\x65\x9a\x65\xba\xfc\x21\xd1\x29\x90\xdd\x6e\x60\xe1\x1d\x3d\x55\xd8\xe1\x1a\xb5\xbe\x84\xa1\x71\x74\x28\xa8\xea\xd0\x57\x7d\xed\xfb\x2a\xc5\x42\xf7\x65\xa5\x38\x7e\xd7\x50\xb9\x65\x85\xf6\xa8\xaa\x50\xc2\x97\xde\x49\x69\x14\xd6\x29\xee\xa4\x1e\x8c\xd8\xde\xef\x68\xe7\x53\x80\x43\x55\xbc\xec\x7c\x88\x62\x13\x46\x7c\x07\xd8\x4f\xe4\x47\x19\x9e\xd4\xa3\x5d\x12\x0c\xd3\x08\x65\x23\xfd\x29\xfc\x1e\x85\xd5\x9b\x36\x5a\x0c\x6f\x0a\x9e\x76\xd5\x04\xdb\xdf\xa6\xed\x73\x23\x9f\x29\xdc\x08\x35\x2f\x08\x9d\x4c\x36\x2f\x0a\xab\x9f\xe6\xb8\x46\x98\x9e\x50\x35\xd3\x8b\xdc\x7d\x76\x4e\xa7\x4c\x05\x7f\x97\xe6\xf1\xb6\xb8\x51\x85\xc3\x5a\x30\x8a\x94\x43\x56\x3f\x80\x63\x56\x3f\x80\x97\x7c\x07\xce\x66\x9a\x0f\xb7\xc8\xac\x09\x26\xb4\x2f\x1b\x70\x31\x18\x70\x9a\x03\x1d\xb9\xb5\xf2\x06\x77\xca\x78\x11\x2c\xd0\xd0\x0d\xf1\x62\x25\xc3\x69\x3a\x76\x9f\x09\xdd\x1d\x2d\x91\x32\xde\x9f\x23\x4b\x13\x14\xe5\x4a\xd0\x71\xb7\x3b\xcc\x69\xb2\x30\x24\xf8\x17\x8c\x0c\xf9\x3d\x64\x67\xc6\x6f\xe4\xe1\xed\x21\x1f\x65\x2b\x6d\x6f\x86\x76\xcb\x08\x74\xfc\xd7\x70\x4c\xca\xea\xfb\x75\xc7\x20\xb0\xa1\x28\x23\x2f\x1d\xa1\x8e\x0c\xd4\x4d\xe3\xc2\x70\x0b\x0c\x9b\x52\xcd\x57\xc2\xba\xf6\x1c\x38\x91\xc7\xd6\x3a\xc4\x13\x79\x7d\x2b\xfb\xd5\x04\xfb\xfa\x32\x16\x96\x23\x5f\xa8\xd5\xcd\xea\x56\x9b\x15\xb9\x50\x8b\x71\x4e\x27\xbe\x64\x1f\x8d\x66\xf6\xc0\x97\xcc\x00\x0e\xa8\xb9\xe5\x26\x2f\x15\xf7\x45\xbf\xe8\x8a\x1d\x83\x03\x0b\xa7\x1e\x15\x51\xc9\x39\xa2\x09\x01\x2c\x44\x55\xbc\xa8\xe6\xa1\x2a\x5e\xec\x78\x8c\x23\x99\x4f\x81\x26\x55\xa7\x7d\x7a\x91\xa6\xaf\xf4\x44\x8b\x1d\x89\xc1\x04\x01\xdf\xf1\x64\x5b\x70\xab\x0a\xc8\x2e\x4e\x94\x99\x50\x60\x61\x5a\x3b\x50\x2e\x18\xf2\x7f\x4b\xf3\xb2\xa8\x9a\xf0\xd4\x48\x26\x5d\x28\x36\x19\xae\x2e\xe0\x62\x86\x6b\xa0\xe7\xb8\x05\x92\x23\x1f\x1d\x1c\x3e\xca\x8b\xad\x2d\x5d\xa8\x62\x13\x19\x3c\xe6\x80\x41\xb7\x59\x6f\x00\xdd\xcf\x13\x4b\x8f\x4b\x2f\x6f\x1e\x74\x9b\xf5\x16\x24\xfc\x9b\x0e\xba\x3c\xb9\xcf\xa0\x93\xf1\x4c\x1e\x74\xd6\xea\x63\x07\x9d\x8a\xe4\x8d\x83\x6e\xb2\x0a\xdc\x34\xe8\x04\xa6\x7f\xbf\x41\x27\x30\x31\x66\xd0\x61\xf0\xb7\x0d\xba\xa7\x27\x1f\xd0\xfd\xec\x60\xe9\x71\xe9\xe5\xcd\x83\xee\xe9\x29\x00\x09\xff\xa6\x83\x2e\x3b\xdc\x67\xd0\xc9\x78\x26\x0f\x3a\x6b\xf5\xb1\x83\x4e\x45\xf2\xc6\x41\x37\x59\x05\x6e\x1a\x74\x02\xd3\xbf\xdf\xa0\x13\x98\x18\x33\xe8\x30\xf8\xdb\x06\x9d\xef\x3f\x3d\x01\xca\xdf\x66\x96\x2e\x97\x5e\xde\x3c\xea\xfc\xc0\xf3\x40\xca\xbf\xe9\xb0\x6b\xb3\xfb\x0c\x3b\x19\xcf\xe4\x61\x67\xad\x3e\x76\xd8\xa9\x48\xde\x38\xec\x26\xeb\xc0\x4d\xc3\x4e\x60\xfa\xf7\x1b\x76\x02\x13\x63\x86\x1d\x06\x9f\x32\xec\xe4\xea\xbf\xa5\x7e\x9b\x25\x38\x79\x08\xcb\xd5\xef\x32\x6a\xde\x32\x64\xde\x3e\x5e\xee\x39\x58\xa6\xf4\xf3\x2d\xc3\xe4\xf7\x1f\x23\x53\x06\xc8\xa4\xd1\xc1\xab\x66\x74\xc3\x85\xb4\x10\x4a\x17\xef\xc5\xe4\x4d\x4f\x8f\xb6\x0a\xca\x8a\x10\x04\x21\xe6\xdb\x19\x85\x58\xd6\x0e\xbd\xe2\x6a\x54\x45\x2b\x67\x32\xa4\x89\xc3\xcd\x28\x42\xea\xe7\x28\x11\xc3\xc2\x8a\x81\x7e\x31\xe8\x3f\xa8\x8c\x61\x56\xfc\x2c\x33\x04\x5a\x1f\x8b\x17\x1b\x20\xf8\x39\x67\x94\x88\xb8\x4a\x42\xc2\x91\xbf\x77\x49\x79\xc0\x06\xd0\xd1\xd5\xc1\x37\xed\x80\x93\xf6\x28\x2d\xf0\xff\xff\x30\x62\x6b\x13\xb0\x55\x90\xf1\xed\xb0\xff\x9f\xaf\x1e\x7f\x60\xef\x68\x34\xf5\xe5\x87\xa0\x2b\xc8\xd2\x13\x8a\xc3\xf2\xcb\x0f\x84\xeb\xae\x38\x4f\x1b\x54\x65\x69\x9e\x36\x5f\x7e\xf0\x3d\xba\x2b\x6a\xe9\x6c\x8e\x41\xf0\xcb\xd2\xf1\x57\xf4\x6f\xb0\x38\x06\x01\xb0\xc5\x0e\xee\x27\xd4\x36\x53\x06\x04\x86\x77\xc2\x49\x82\x25\x35\xac\x63\x87\x81\x8c\x18\xd6\x49\x58\x7d\x03\xed\x4b\xf7\xe9\x13\x86\x52\xc8\x03\x00\x6a\x22\x59\x18\x99\xd1\x90\x04\xab\xd5\x8c\xff\x4f\xec\x3d\x73\x6d\x1b\x4b\x03\xc6\x44\x22\xb6\x19\x45\x0d\x34\x29\x12\x9e\xc0\x8a\xc7\x68\x58\x2c\x24\x15\xbb\x62\x81\x94\xcc\x8a\x0e\x67\xfc\x48\x0c\x33\x0c\x98\x12\x45\x3c\x80\x41\x91\x20\x7c\x43\x57\xfc\xab\x5a\x15\xcc\xb4\xd3\xff\xe7\x77\xb3\x2d\x72\x67\x29\xa6\x65\xd4\x10\x91\xec\x8b\x45\xc4\x90\x51\x81\x20\xf4\x61\x1d\x87\x55\xf2\xa6\x8f\xe7\x23\xbf\x6c\xf6\xb3\x64\xef\xf9\xa5\xa8\x12\x1a\x19\x46\x15\x0a\xbf\xb9\xf8\xf7\xc8\x94\x98\xdd\xae\x9d\xa1\x8c\x98\x81\x31\x25\x26\x6e\xf1\x4f\xc7\x4a\x3e\xeb\xe6\xa9\x07\xd1\x08\xd4\x9c\x7c\x70\xd5\x0e\x9b\x39\x42\x39\xcb\x83\x38\x9c\x3b\x63\xe4\x1e\x1f\x8d\x6e\x7f\x52\x4f\x27\xdb\xbf\xb3\xe5\x0d\x91\x29\x03\x27\x3c\x45\xda\x6e\x54\x24\xaf\x83\x1b\x48\xf8\xd7\x52\x5f\xaa\xda\xa4\x4d\x86\xd4\x9d\xe2\x1b\x01\xa0\x3e\x47\x12\x0c\xd9\x7c\xc3\x77\xa5\xea\x3b\x45\x09\x4e\xd4\x36\x62\x3b\x61\xa8\xc1\x7d\x61\x1d\xd4\x8f\xfd\xa3\xb4\x0b\x5d\x6e\x09\x4b\xca\xd8\x7d\x14\xe6\x3b\xc9\x03\x30\xd7\x81\xa6\xba\xa2\x26\x7a\x8b\xc7\x67\xd3\x5e\x33\x55\x63\x25\xea\x90\x52\x31\x81\x91\xfd\x88\xec\x4b\xb5\xeb\xf8\x65\xfb\xe8\x00\x45\x9e\xe3\x49\x08\x7f\x14\xf4\x67\xac\x0a\x77\x3d\xbc\x2f\x8a\xc6\xdc\x23\x63\x7b\x40\xce\x7c\x6a\x68\x3e\x25\x05\x28\x77\xb7\x09\xd2\x83\x9a\xab\x17\x49\xad\xa7\xbb\xfe\xe4\x03\xae\xf3\x35\x24\x50\x57\x4e\x69\x41\x8f\x87\x72\x50\x25\xbd\xb1\x4c\x82\xec\xe2\xb2\xd3\x90\xb0\xb1\xda\x69\x7e\x70\xb1\xee\x66\xe1\xeb\xe0\x56\xe2\x7e\x2f\x08\xdd\x22\x02\x8f\xc4\x34\x3f\x08\x69\xb1\x9e\x87\xb4\x47\xe0\xa3\x29\x4a\xa0\xa6\x6a\xcc\x74\x14\x46\xc3\x66\xa5\x46\x5b\x03\x10\x84\xec\x98\x99\x28\x60\xd3\x8c\x64\x13\x24\xe4\x4b\xbf\xe3\x39\x1a\x8e\xdb\xa1\x2e\x55\x56\x29\x7f\x55\xb6\x96\x0d\x09\x3d\x63\xfa\x42\x1a\xb0\x8c\x46\x8a\x94\xa3\xda\x2b\x25\xbb\x18\x29\xd1\xd9\x1a\xdd\xf0\x9d\x8f\xed\xc7\xc7\x67\xf1\x79\xb2\xcf\x97\xb6\x6c\x09\x1c\x76\x4a\x2c\x59\x60\xcc\x30\xe3\x98\x9e\x12\x7f\x27\x29\x31\x03\x78\xa3\x98\x28\x6b\xa3\xe5\x04\xd0\xb4\x75\x31\xec\xde\x84\xda\x3f\x4a\x7c\x33\x23\xa0\x64\x5b\x50\xeb\x98\xc2\x93\x49\x99\x3c\x6c\x48\x1d\xd9\x7e\x58\x76\xcc\x8e\xc4\xc2\xec\xc2\x44\x8e\x74\x8f\x31\x31\xcf\x85\x05\xa3\xb9\x89\x37\xe1\x00\x1b\x38\x84\xa9\x38\x65\xaf\xa0\x47\x94\x6c\x3f\x5c\x63\x1c\xff\x13\x82\xd4\x61\x3a\xc3\x42\x9c\x16\x99\x8a\x04\x47\xa6\xd2\x15\x18\x03\x72\x62\xdd\x03\xab\xd6\xde\xd9\xbd\xd1\x0a\xe2\xea\x58\x67\xbc\x53\xb3\x56\x83\xb6\x8c\x05\xde\x03\xd6\x8c\x61\xb8\xf0\xc3\x42\xf4\xb7\x1b\x17\xe7\x53\xb3\x5b\x3c\x2b\x3f\x15\xa8\x43\x58\x76\xfb\x22\xf5\x22\x90\x47\xdb\xce\x44\xfa\x61\x09\x4f\x09\x93\xb8\x3a\xe7\xd1\x1d\xcf\xa7\x74\xf1\xaa\x1e\xed\xf9\x74\x57\xe7\xd0\xfd\x0c\xd2\x39\x1d\x55\x3d\x7b\x9e\x49\x2c\xfd\xa3\x5a\xd0\x9d\xc3\xb1\x6e\x9c\xb5\x7f\x9e\x51\x92\xbb\x77\xdb\x33\x3f\x3f\x8c\xa0\x4f\xa6\x46\x1d\x17\xc6\xfb\x7c\xde\x8c\x89\xdd\xca\x23\xd7\x51\x8e\x72\xf0\x93\x38\x65\x78\x48\x4f\xa4\xda\xb0\xa0\x95\x7d\xd1\x9a\xb8\x40\xa9\x94\xe1\x01\x69\x73\x1b\x87\x16\x6b\xf3\x3f\x6f\xdc\xd9\x1c\x8b\x8d\xea\xc9\x89\x16\xbf\xa7\x66\x37\xaa\x36\xef\xab\x51\x60\x3d\x2a\x62\xef\x33\xea\x8c\x38\x34\x62\xb8\xa9\xbd\xc7\xcf\x97\x47\x45\x0a\xb2\xfe\x41\x27\x0e\x47\x9e\xce\xb9\x0a\x48\xcd\x2b\x4f\xd0\xc1\x30\x68\x62\xe6\x97\xad\x72\xe8\x2e\x58\x0d\x5d\x7d\x65\x38\xb4\x28\xf3\xc6\x97\xd3\xfa\x82\x31\x77\x63\x8d\x32\x1f\x4a\x5f\xb0\x61\x40\xf6\xbe\x08\x5d\xa3\x4e\xb5\xc7\xec\x48\x9f\xaf\x60\x8c\xb6\x61\x60\x52\xfc\xc5\x80\xde\x2f\xb8\x52\xc2\xd4\x26\x8e\x02\x80\x9a\x0e\x20\x91\xab\x73\xb0\xbb\x26\x5d\xac\x05\x23\xbc\xa5\xb7\x82\x21\x2b\x61\x64\xff\x26\x9b\x31\x68\x32\xa8\x63\x0a\x93\x83\xdd\xeb\xe0\xfe\xc2\xdd\xb5\x54\xcf\x85\x4b\x29\x3b\x37\x6a\xca\x4e\x20\x45\x27\x70\x8b\x8c\x72\xb2\x20\x0a\x6b\x44\xaf\x8d\x83\xbd\x28\x66\x96\x9e\xff\x55\x3e\xc5\x47\xcd\xc9\x61\x6d\xd1\x0d\x06\x3f\xc1\xc7\x10\x90\x45\x18\x75\xcb\xc3\x5a\x73\xa9\x6b\x2d\x63\x25\xb9\xf9\xa9\x43\x52\xa5\x79\x58\xbd\x8e\x3b\x81\x27\x55\xf9\xf5\x58\xa1\xfd\xd7\xee\x02\x10\xe0\x95\x76\x49\xe3\xe8\x7b\x6b\xd6\x41\x1c\x73\x72\xdd\x5d\xfb\x63\xee\xd6\xd7\x2a\x41\x5c\xaa\x2f\x6f\xe5\x73\x1d\x6f\x56\x9b\xa4\x23\x29\xdf\xc3\x6f\xbb\x75\x5d\xa9\x02\xf2\x28\xbd\xba\x95\x43\x1f\x6d\xd0\x62\xc9\xc9\x89\xf7\xb3\xdb\x2e\xe9\x16\xe1\x21\xde\x84\xf2\x9b\x19\xf3\x37\xe1\x36\xe2\x84\x94\x7b\xbc\x7d\xdf\x37\xde\xda\xac\x54\x81\xd8\x93\x5f\xc9\x97\x34\xf9\xfe\x58\x0e\x93\xc5\x13\xf2\x3c\x4e\x4e\xbe\xdf\xd9\x76\xab\xaf\x5c\x03\xe2\x4f\x7a\x73\x6b\x07\x46\x49\xe0\x2f\x3a\xf6\xa4\xdb\x7f\x0d\xdd\xc7\xaf\xdd\xed\x2b\x40\xcc\x89\x2f\x6e\xee\xba\x10\x79\x48\xe8\x88\x6a\xe0\x80\x6f\x77\x29\x6b\x07\x0f\x77\x5b\x57\x7e\xb3\xd6\x25\x81\x1f\x2c\xaf\xf3\x7f\x9c\xf3\xa8\x68\x2a\x21\xc9\x73\x60\x98\x34\x05\xe0\xa2\xbf\x61\x92\xb4\xb0\xcf\x43\x75\xaa\x4b\x4c\x95\x38\x30\x81\xa7\x51\x5b\xfa\xd4\x74\x09\xd7\x79\x98\xa1\xaa\x01\xbc\xc6\xa8\x4f\x3b\x7e\xef\x65\x27\x1d\xea\x23\x44\xc9\x47\x81\x7e\xf8\xb2\xf3\xa8\xfc\xa5\x76\x63\xeb\xc6\xe3\xdc\xba\x49\x5a\xe7\x69\x5d\xa7\x51\x86\x9c\x79\x9c\x15\xb5\x29\xa5\x45\xff\x59\xc0\xd0\x1c\x90\xb4\xe2\xdb\x3c\x6f\xe9\x6d\x81\x4b\xc5\x3f\xc4\x31\x5a\x69\xf1\x7b\xb4\x4d\x42\xec\xf3\x24\x54\xce\xb1\x12\x43\x15\x06\xfa\xb4\x8f\x13\x1d\x54\x6c\x7f\xc7\x41\xb0\x59\x05\x1c\x50\x73\x6c\xcb\xf5\x32\x5c\x82\x41\xf5\x06\x6d\x91\x7a\x5f\x73\x92\x24\x7b\xa4\x23\x83\x59\x8c\xf7\x49\x90\xac\x75\x60\x80\xc9\x00\x2d\xfc\xc5\xa2\x03\x95\xbd\x9a\xbf\x5a\x6d\x82\x25\x34\xe6\x97\x28\x49\x54\x16\xe3\x05\x5a\xc7\x91\x82\x0a\x66\x30\xf2\x93\x7d\xa4\x81\x42\x7d\x18\x05\xc8\xef\xd8\x13\x5d\x9a\x17\xaf\x96\x6b\xe0\x23\xe5\x87\xc4\x47\xf1\xde\x57\xe5\x8b\xd0\x0a\x45\x22\x1e\x98\xb1\x30\x4a\x12\x6c\xcc\x04\x38\x88\xab\x75\x10\xf7\x9d\xa6\xf8\xb3\xed\x6a\xbd\xf4\xa0\x4e\xdb\xef\xf7\x8b\x38\x51\xef\xe1\xde\x23\x14\x85\x0a\x2a\x98\xb7\xfd\x1e\x6d\x43\x5f\x05\x05\xd8\x5b\x2d\x16\x7b\xaf\x63\x4f\x76\x66\x9b\xc0\x8f\x41\x91\xee\xb7\xc9\x46\x13\xe9\x7e\x15\x0b\x22\xa5\x98\x0c\xcc\xf9\x91\x17\x6d\x14\x48\x80\xb7\xe5\x93\x1f\xf8\x9b\xde\x5c\x08\x9e\x6c\xeb\x6f\xfd\x6d\x00\xb1\x86\xf0\x3f\x95\xb5\x64\x9f\xec\x91\x84\x08\xe6\x0c\xc5\x28\xde\xaf\x65\x40\x80\xb1\xf5\x16\xff\xeb\x1b\xd0\xbb\x31\x3f\xf2\x51\x00\x39\xd9\x64\x9d\x6c\x93\x27\x75\x14\xac\xe3\x6d\x1c\x8a\x78\x0c\x43\xe0\x29\x8a\x22\x24\xc1\x41\x9a\xb6\xf4\x56\xde\xea\xfa\x47\xbe\x44\xf9\x0d\xbd\xee\xab\x30\x47\xb5\x53\x56\xc5\xa1\x42\x75\xed\x46\x61\xe5\xd6\x4d\x95\x96\xa8\xbe\xec\xab\x22\xbf\x40\x69\x33\x7d\x9a\x56\xa7\x29\xc0\xb7\x9e\xe3\x5d\xaf\x7f\x7c\x47\xdc\x73\x8e\x71\x78\x4d\x4c\xcc\x4d\xaa\x5e\x34\x2a\xcc\x7b\x37\xf0\x07\x7a\xfb\x82\xa6\xd8\xae\xfb\xdd\x9e\x31\xe5\x36\xa8\x31\xcb\x58\x32\x9f\xac\xff\x13\x7d\x27\x1a\xbd\x54\xda\x3d\xe0\x46\xa2\x53\xf3\x69\xb9\x4a\xd0\x61\x06\xec\x73\x5b\x3d\x3a\xc1\xea\xe3\x4c\x70\xf0\xda\xef\x95\xf7\xd1\x50\xd3\xfc\x66\xa3\xe0\x50\x7e\x3f\xea\xa7\xe7\xfb\x8c\x20\x52\x0b\xc3\x53\x9a\x87\x0d\x4a\xba\xe5\x7a\x5a\x80\xb5\x07\xd2\x44\xc7\xe7\x17\x6a\x3b\xe9\x69\x9f\x9e\xd2\x06\x3d\x4f\xae\x71\x9d\x93\xc8\x6d\xb2\x16\xd8\xae\x05\x63\x38\xd5\xbd\x46\xec\x4b\xe8\x55\xd8\x9b\xf2\x5b\xa7\xd8\xb8\xaa\xfb\x62\xdc\x90\xd4\x16\xb7\x28\xc8\x29\x89\xf4\xfb\x74\x4d\x38\x78\xdc\x6e\x78\x2b\x45\xef\x22\xf2\xf1\xd7\xe6\x1a\x30\xcb\xeb\xef\xec\x1e\x60\x73\x72\x42\x05\xcb\xf8\x35\x5a\x5b\x28\x4d\x73\x2c\x8d\x5b\x88\xd5\xf7\x05\xbd\xdf\x7e\x3b\xcb\x96\x3a\x6d\x9b\xd7\x1d\x3e\x64\x6a\x0d\x01\x55\xc2\xba\x95\x4d\x81\x15\x2e\x2e\x56\xb1\x8c\x4e\x4b\x49\x76\x82\xaa\x78\x99\xd6\xdc\xe3\xc3\x82\x80\x7a\x9f\x9d\xeb\xa3\xb6\xfb\x4c\xb9\x04\x44\xde\xd0\xa0\x4f\xea\x54\x7c\xb7\x6c\xce\x04\xf1\xdc\xb2\xd9\x12\x32\x1a\x63\xa7\x58\x6c\x36\x15\x9a\x10\xcc\xe8\x85\x65\xe3\xf0\x9b\xd1\x30\x35\x33\xbf\xa7\x37\x0c\xd8\x89\x31\x24\x03\x40\xca\x37\x12\x43\xbb\xd9\xd4\xd0\xc8\x10\xdf\x2c\x6f\x27\x36\xd7\xaf\x00\x87\x14\x93\xf2\xa0\x28\x26\xe9\x30\x0d\xf1\xf8\xb9\x27\x9b\x66\xea\x2d\xe8\x50\x98\x98\x37\xd0\xb0\xa1\x32\xca\xaf\x87\xb0\x4a\x50\x45\x34\x08\x26\xfb\x22\x53\x1f\xb0\xd9\xb3\x85\xad\x01\x39\xaa\x70\x76\x49\x72\x3e\x24\x49\xb2\xce\xd3\x51\x8f\x9c\xa0\xb3\xb9\x38\xd0\x06\x8a\xc0\xc8\x3a\x84\xdf\x8c\xc6\x2c\x41\xf6\xde\x2e\x3f\x09\xc9\x00\x90\xbc\x3c\x69\x6a\x37\x5b\x58\x30\x32\x34\x24\x39\x09\x6a\x60\xf5\x9c\xf1\x20\xc9\x8d\x75\x98\x86\x78\xd4\xca\x05\x5b\xa4\xd0\x99\xc7\xb5\x4d\x2c\xeb\x98\x0d\x08\x8c\xb2\x22\x2f\xad\x82\x12\xaa\xdb\x20\x64\x23\x69\x6a\x25\x5b\x62\x81\xf9\x18\x90\x8f\x00\x32\x60\x1e\x19\x75\xd9\x3c\xd2\xee\xd1\xb0\x8e\x5f\xc0\x21\x6b\x35\x3a\xe7\x0c\x81\x89\x6b\x10\xbf\x19\x8d\x51\x50\xfc\xbd\x55\x56\x32\x92\x01\x20\x49\x62\xb6\x76\x6f\x43\xdf\xcc\xf0\x80\xd0\x64\xa8\x81\xef\x7a\x8c\x07\x39\x4d\x3a\xed\x30\x0d\xf1\xd8\x95\x2d\xba\x88\xa5\xb3\x4f\xeb\x9b\xd8\x86\xb0\x1b\x91\x18\x65\xc6\x5e\x5b\x45\x26\xa1\xb0\xc3\x48\x02\x33\xb7\x98\x2e\xc6\x99\xb8\x19\x90\x97\x04\x64\x17\x17\xe7\x40\x12\x17\xeb\x2a\x0d\xef\xc8\xc5\x3e\xba\xae\xa7\xf3\x4e\xaa\x9b\x78\x06\x70\x9b\x50\x18\x45\x45\xdf\x5a\x25\x25\x22\xb0\x82\xc8\x03\xcb\xd4\x56\xb6\x34\x69\x60\x65\x40\x4c\x22\xcc\xc0\xa0\x62\xf4\xe5\x41\x45\x3b\x09\x90\xfe\x88\x95\x4f\xb6\xc8\x09\x29\x58\xf5\xcd\xac\x57\x2a\x66\x03\x02\xcb\x60\xaa\xbe\x0d\x0d\xa5\xae\xba\x0d\x42\x0e\x26\x4c\xad\x64\x4b\xb4\x30\x1f\x83\x83\xa8\x52\x93\x3d\x1b\xc2\x08\x46\x5d\x0e\x23\x68\xf7\xb0\xaf\x64\x34\x5f\x29\x99\x44\x4a\x1b\xa5\x56\xea\x15\xb4\xfa\x7e\x96\x6e\x62\xe0\xd1\x45\x97\xee\x56\x00\xbf\x6c\x1d\x8f\xe4\x92\xee\x6e\x19\x98\xaf\x18\x41\x3e\x7d\xa7\x3f\x94\xa9\x8f\x07\xaf\xde\x74\x48\x36\xab\x2b\xeb\x12\xf6\x89\x8f\x2d\xa3\x78\x86\xac\xa2\x9e\xe9\x96\x8d\xeb\x3c\x2f\x92\x30\x73\x8b\x12\x9d\x2e\xca\x4a\x30\x7b\xd7\xaf\xe1\xec\xd3\x16\x25\x03\x67\x8a\xf8\x94\xdf\xf7\x56\xfd\xdd\x58\x94\x79\x65\x9d\x59\xb8\x90\x83\xd0\x99\xef\xc3\x04\x39\x8c\x9f\x24\x0d\xb3\xe2\x70\x21\x2b\x9d\x94\x38\x6f\x00\x29\xda\x17\x55\xee\xcc\x17\xb5\x83\xc2\x1a\xb9\xc5\xb9\x79\x16\x20\x6f\x85\x98\x0d\x90\xd0\x5e\x53\x3c\x59\xd8\xa0\x4f\xde\xcc\x0d\x56\x1f\x1f\x9f\x2d\xef\x78\x33\x69\xfe\x72\xa9\x99\x76\xcc\x9e\x09\xad\xf7\x28\x8a\x8f\xe1\xec\xa4\xe8\xb6\x5d\x4f\xf3\x92\x57\x96\xca\x44\x22\xae\x2f\xd1\xd1\xf5\x4a\x72\x58\x93\xdd\x92\x41\xae\x75\x01\x2f\x2c\xa7\xa8\xd8\xfa\xfb\x6f\x71\x18\x58\xe1\x42\xbd\x68\xc5\x60\x06\xb2\xb4\xdc\xf5\x29\x85\x07\x0e\x00\x07\xda\xf1\x5f\xb2\xc1\x50\x55\x58\x17\x13\x48\xaa\xa2\xbc\x75\x84\x2c\xa1\xc8\xdd\xf3\x34\xfc\x64\x64\x88\xb7\xbb\x28\xaf\xb1\x46\x5d\x44\x03\x43\xdf\xb3\x83\xa8\xf7\x5c\x7c\x7f\x53\xb2\x72\x72\x8a\xcb\x78\x75\x82\xfc\x4d\xc9\xba\xa1\x14\xde\x00\x2a\x37\xdb\x51\x2c\xa3\x70\x84\x8c\x9e\x6f\x73\xc4\xff\x8a\xc3\x02\x3a\x89\xac\x5d\x3a\xde\xc9\xa0\x48\x5e\x2d\x37\xd2\x0e\x9e\x81\x26\x07\xed\x28\x2a\x76\x4a\xf6\xbd\xbe\x98\xa1\x53\x02\x27\xca\xc2\x2f\x20\x11\xc9\xe7\x6d\xbb\x15\x7e\x91\xd9\x9f\x80\xdb\x7b\xc5\x0d\xd6\x7c\xe5\x1a\xa8\x23\xde\xe2\x2c\x9d\xf5\x93\xeb\xd4\x71\x55\x64\x59\x14\x56\x6e\x8e\xc2\xfa\x6c\xbe\xa0\xe5\xe9\xe9\xe9\xa9\x6c\x99\xe9\x5a\x61\x73\xc5\x84\x45\x9e\x3b\xdf\x43\xf1\x59\x76\x14\x49\xa6\x51\xc8\x3b\xed\x79\xbd\xfe\x2c\x3c\x45\x65\xea\x5c\x00\x5d\x60\x50\x5b\x62\x4d\x5a\x27\x13\xd1\x6f\x69\x9d\x79\x53\x14\x59\x93\x96\x96\xbb\x47\x7d\x6f\xa3\xde\x39\xd9\xdd\x64\x44\xa2\x94\x7d\x98\xa7\xd9\xeb\x0e\x3b\xfa\x0c\xb9\xf5\x6b\xdd\xa0\x7c\xf6\xe7\x2c\x3d\x7d\xfb\x25\x8c\xff\x4e\x7e\xfe\x7b\x71\x6a\x66\x0f\x7f\x47\x87\x02\x39\xff\xf5\xd7\x87\xd9\x7f\x16\x51\xd1\x14\xb3\x87\xff\x89\xb2\xef\xa8\x49\xe3\xd0\xf9\x0f\x74\x46\x0f\xb3\x3f\x55\x69\x98\xcd\xea\xf0\x54\xbb\x35\xaa\xd2\xfd\xec\xe1\x4f\x18\xa9\xf3\x33\xb6\x54\xce\x5f\xf2\xe2\x1f\xe9\x43\x8f\x47\x2f\xf8\xfb\x6b\x1e\x15\xd9\x03\x0b\xa7\xd8\x91\x8b\x2a\x0f\xb3\xc1\x2b\xfd\x85\x0f\x63\x58\x91\xc4\xdf\xd4\x0e\x81\xe1\x91\x18\x7c\xf5\x05\xbd\xef\x24\x65\x19\x6a\xb0\xfb\xc0\x26\x0a\x6b\x3c\x63\x88\xa4\x95\x20\x19\x25\xa4\x12\x15\x4a\xda\xa6\x4c\x8a\xe8\xae\x68\x52\x91\x8e\x73\x6d\xd7\x38\x98\xb1\x42\x30\xe6\x4c\xe6\x8a\x15\x7f\xea\x5e\x38\xf3\xb0\xaa\x8a\x17\x40\x27\xa0\xab\x47\x57\x82\xe6\x63\xdb\x22\x23\xb1\x5c\x73\x64\xba\x83\x85\xbf\xa0\xf2\x23\xc6\xa0\xe7\x39\xaa\x5d\xf6\xe8\xe2\xd6\xff\xda\xba\x65\x16\xc6\x28\x47\xa7\xe6\xff\xfd\xd2\x14\xe5\xd7\x19\x04\xda\x60\x97\xc9\x2c\x0e\xb6\xbe\xde\x04\x84\xac\x21\x26\xbc\xbc\xb3\xfa\xcf\x2b\x53\x31\xf3\x2e\x1a\xa0\xd0\xf5\xa4\x74\xa0\x64\x51\xb6\x52\xf2\x7b\xda\x75\x9d\x64\x1c\xd2\xda\x67\x7d\x9f\x0a\x71\xfa\xa3\x38\x25\x46\x12\xee\x56\x9a\xe7\xaf\x9b\x0a\x38\xa2\xfc\xc7\x20\xb5\x75\x2d\xbb\xff\x8f\x76\x2e\x3f\x2b\x3c\x1d\xb7\xb5\x73\x45\x1a\x6a\xf7\x12\x1b\x3f\xa6\x77\xa5\x1e\xa6\x41\xc2\xe4\x3e\xa6\xca\x03\x77\x32\x3b\x98\x7a\x8b\xfa\x32\xb4\xb6\x6e\xa6\x20\xbc\x9f\xd9\x97\xc6\x1b\x90\x5b\xfb\x59\x22\x32\x5d\x8f\x3d\xde\xcf\xca\x27\xeb\xc9\xdd\x8c\x49\xc1\x9d\x8c\xdf\xdc\xa4\xc8\x04\xa5\xad\x83\x31\x00\xef\x5e\x16\xa0\xdf\x80\xda\xda\xbd\x02\x89\xae\x73\x95\x44\x41\xa3\xb4\x59\xeb\x67\x12\x0f\x03\xbd\xec\xa6\xa7\x13\xaa\x84\x68\x82\xa4\x12\xee\x62\xba\x45\xd9\x3a\x5b\x42\xab\x9b\x19\xe9\x07\x70\xc0\x79\x88\x69\x83\x57\x51\x92\xd5\x0a\xd3\xbe\x62\x6d\x9e\xb3\xd6\x23\x96\x8e\x55\x1c\x76\xfd\xdf\xc0\xe5\xff\x9c\xc0\xe5\x37\x99\x6a\x77\x3a\x36\x2d\xf6\x99\x6f\x85\x7b\xd7\xe7\x4b\x00\x11\xbb\x1b\x72\xa6\x15\x1b\xc3\x22\x99\xce\xa4\x20\xc9\x40\xc3\x34\xe6\x09\xf7\x06\x86\x47\x56\xc2\x66\x88\x3d\x1a\xe3\x31\x00\x14\xc7\x63\x4a\x02\x80\x89\x68\x3b\x93\x0b\x63\xd7\xa3\xb2\xa9\x98\x55\xc1\x4d\xa8\xc8\x4d\xb5\x95\x35\x1b\x7e\x20\xe8\x93\x3d\x1f\xf3\x8b\xb7\xb4\x6a\x24\x73\x1d\x59\x96\x64\x6a\xab\x67\x85\xda\x2a\x0b\x24\x54\x47\xa5\xb1\xb6\x7a\xbc\x6f\xc7\x6b\xdd\xc7\x19\x24\x09\x94\x3e\x11\x96\x78\x86\xad\x3f\x38\xae\xff\x38\x8a\x65\xba\xa3\x6c\x14\x9f\x3c\x18\x06\x80\x69\x30\x2c\x2d\x46\x4c\xd0\x69\x25\x20\x36\xe1\x57\x03\xe2\xe9\xb8\xa7\xe9\xb5\x21\x96\x1e\x60\xcf\x46\x63\x30\xe6\x96\x84\x44\x22\x90\x69\xda\x7e\x2b\xcb\x8c\x15\x40\x53\xc4\xa0\xfe\x36\xf5\x1e\x2d\x01\xa0\x17\x29\x47\x46\x05\x87\x26\x1e\xe3\xf5\xb9\x9b\x78\x00\xd0\x6c\xe2\x21\xc8\x66\x8a\x42\xab\x73\x0f\x23\x01\x65\xee\x71\x03\xf2\x69\x2a\x6d\x9a\xb7\x0c\x31\x68\xa3\x32\x62\x82\xa3\xd9\x9e\x69\x5a\x7d\x3b\xd7\x3c\xf9\x89\x36\xb6\xa4\x59\xd4\x6d\x7a\x3d\x5e\x0e\x50\x57\x12\x9e\x86\x14\x5b\x9e\xeb\x4d\xd6\x6c\x87\x43\xf3\x3c\x95\x63\x7a\x0e\xae\x62\x9f\x7e\xac\xbc\x8f\x60\x88\x18\x78\x5a\xae\x3b\x0f\x9c\x87\xe9\x1f\x28\xf6\x1b\xfc\x6f\x64\x7b\xd9\xdc\x16\x80\x25\x73\x5b\x79\xc5\x7b\xc2\x38\x96\x27\xb8\x06\xf4\xda\x04\x77\x32\xea\x69\x63\x18\x9e\x1a\xdb\x99\xb3\x51\x80\x66\xd0\x46\x9f\x44\x4d\xed\xb4\xe1\x7b\x1b\xc3\xd2\x5c\x1e\x70\x8c\x6f\x19\xba\x63\x3b\x5f\xef\x41\xc6\xcf\xd0\xc0\x15\x17\x0f\xc4\x61\xab\xa5\xab\x05\x92\xe5\x74\x1f\xc1\x84\x7d\x11\xda\x09\x61\x68\x66\x48\x46\x8c\xe5\x93\x5f\x84\xff\xd9\xf3\x84\x2e\x46\xa7\x09\x5d\x88\xe9\x3a\x15\x9b\x01\x25\x06\xe9\xcd\x4c\xf2\x0a\x37\x5f\x3a\xf4\x42\x52\x92\x15\xe7\x1a\x65\xfa\xf7\xbe\xfe\x1d\x5b\x8b\x31\x7d\x53\x27\x67\x80\xb4\x6d\x16\x7d\x65\xfb\x91\x19\xb2\x0c\x30\xfa\xf3\x9f\x40\xd1\xbe\x8d\x62\x4d\xf7\x38\x18\x36\x48\x8c\x7a\x0b\x6d\x9d\xe0\x15\xf9\x2b\xac\x1f\xfb\x30\x46\xee\xf7\xb4\x4e\xa3\x34\x4b\x9b\x57\xbe\x41\xc1\xf2\x8a\xd7\x2e\x51\x55\x97\x28\xa6\xd7\xf6\x7a\x74\x81\x4b\x2b\x52\xba\xd2\x3d\xa1\xb6\x99\x29\x65\x65\x85\xbe\x2b\x65\x86\xdb\xc1\xc7\xe1\x32\xf9\x21\xa8\xbe\x52\x84\xb5\x1d\x42\xa9\x14\xd1\xa9\x8c\x65\x8f\xc8\xff\xf3\x09\xde\x22\x82\xcb\xaf\x7f\xac\xcf\x65\x59\x54\x4d\xed\x7c\xfa\xa4\xe1\x60\x0b\x16\x65\x85\x6a\x54\x7d\x47\xee\x22\x79\x74\x8a\xca\xf9\x64\x03\xa0\xe9\xff\x7e\x9b\x96\x2d\x12\x6a\x4a\xc1\xe6\x75\x2f\xaf\x57\x26\x41\x08\xbd\xca\x06\xe6\xd6\xde\x99\x78\xc8\x18\xfa\x93\xbc\x7a\x97\x2e\xbd\x5f\x03\x16\x09\x61\xd3\xd6\x6d\xdd\x7b\x53\xcf\x99\xe4\x67\xef\x38\xd7\xd2\x73\xee\x6f\xdc\x75\xb7\xb4\x60\x91\x50\x36\x6d\x5d\xe7\x8a\x7d\xd7\xa1\xc7\xf1\x63\x55\x64\xaa\x91\xe0\xc5\x36\x3b\xd1\xef\x0f\xfa\x7d\xce\x4a\x33\x37\xb1\xfa\x68\xff\x40\x20\xed\x5f\x84\x9a\xdd\xed\x67\x04\x5f\xd2\xfd\xa4\x70\xcf\x18\x6b\x92\x97\xe3\x12\xd3\x74\x7b\xb3\x9e\xc5\x2f\xe8\xb0\x20\x84\x54\xc7\x3a\xa7\x7d\xd0\x0c\xbe\x26\xf7\x95\x18\x58\x1d\xbc\x3f\x3d\x10\xb6\xa5\x90\x67\x61\xb3\xa6\x78\xc6\x7b\xfa\x45\xe9\x36\x86\xde\x74\xb9\xca\xf0\x45\x2a\xce\x3e\xcd\xb2\x2f\x3f\x7c\x0c\x16\xfb\xfd\xfe\x07\xf9\x36\x96\xad\xb3\x15\x2f\x5a\x49\xbe\xfc\xf0\xcb\x6a\x1e\xac\x1c\x2f\x73\x97\x0e\xfd\xe7\xcf\x57\x2e\xfe\x5f\x40\xff\xe7\xb0\xbf\x2e\x2b\xff\x27\x70\x43\x8a\x59\x30\xff\x62\x6d\x0d\xe6\x1b\xd2\x56\x7f\xbe\xc2\xed\x74\x84\xf6\x91\x67\x5e\xbe\x74\xc9\x3f\x6b\x5b\xd3\x53\x92\xc6\x61\x53\x54\x35\x60\x48\x94\xad\x86\x64\x4a\xab\x7e\x85\x5b\x4d\xb0\x2f\x23\xcc\x06\x7c\x14\x9e\xa7\x6b\xff\xa8\x64\x67\xff\xa8\xa6\x5d\x05\x5b\xe6\x64\xa9\x6d\x2f\x9d\x27\xed\xa5\xeb\x7e\xf1\x5d\x57\xfd\xd8\x5a\xf4\x73\xfc\x4a\x2d\x20\x0c\xe1\xdf\xc4\x8c\xe0\xbe\x39\x35\x64\xf7\x18\x74\xc4\x1c\xbe\x6c\x07\xe6\x7c\x60\x5d\xc2\x15\x85\x32\x90\xc9\xb8\x4b\xcf\x21\x2f\x4d\x98\x29\xd3\x89\x20\xb4\xa7\x87\x9d\x9f\x7f\x27\xda\x3c\x70\x36\x9c\x0a\xef\x07\x6a\x58\x92\x44\x08\x26\xcd\xc5\x0a\xc2\xb3\x83\x75\xac\xe2\xc2\xfe\x1b\x72\xa7\x70\xb8\x33\x03\xe1\xfb\xb6\x2b\x56\xb4\xb9\xb0\xeb\x9c\x7a\x4a\x9e\x49\xf2\x62\xc8\x30\xd9\x5f\x10\xc9\x6b\x34\x45\xa9\x02\x37\x45\xa9\xc3\xe5\x69\x92\x64\x1a\x5e\x5a\xaa\x43\xb3\x05\x5d\x95\x0b\x52\x0a\xf0\x80\x9b\x03\x57\x11\x5e\x19\xea\x41\x0d\x60\xe5\x62\x8d\xe8\xd0\x1d\x0e\x37\x1d\xd2\x17\xc0\x43\x01\xbe\x3b\x4c\x22\x14\x51\xc7\x6d\x4a\x45\xa9\xd0\xed\x0f\x36\x9b\xd2\x50\xaa\x94\xf5\x03\xce\x52\xa1\x89\x3a\x4d\x30\xa9\x52\x67\x07\x72\x4d\xc9\x25\x35\xda\xca\xc1\x5c\xa1\xc8\x44\x97\xa6\x8d\x54\xe8\x92\x63\xa5\xa6\x94\x91\x2a\x51\xe9\x78\x29\xff\x6d\x24\x47\x92\x41\x2a\xe4\xf8\x11\x49\x53\x22\x48\x95\xa2\x7a\x54\x52\x28\x32\xd1\xa5\x29\x1e\x15\xba\xec\x90\x9f\x29\xbd\xa3\x4a\x56\x39\xec\xd7\x97\x98\x88\xd2\xc4\x8d\x0a\x51\x7a\x58\xcd\x94\xef\x44\xa5\x29\x1f\x5a\xeb\x0a\x8c\xcd\x24\xe9\x18\xb5\x66\x56\xdf\x00\x58\x9a\x8a\x51\x6f\xa4\x70\x04\x8b\xff\x36\x4a\x93\x24\x59\x54\xa5\x79\x4c\x1b\x83\xe9\x55\x20\x85\xd8\x52\x87\x17\x5e\x4a\xd5\xc8\x5a\xdb\x45\x4f\x74\x4d\xf7\x81\xeb\xa0\xae\xc7\x81\x3d\xe0\x65\x53\x94\x1d\x00\xcd\xa0\x01\x00\xd1\x45\x3d\x4f\x49\xe8\x01\x00\xb2\xaf\x10\x9e\x9a\x4c\x03\x00\x25\x2b\x8a\x1d\x20\xf5\x81\x00\x58\x67\xf7\x80\x1c\x24\x00\xb8\x60\xae\xe4\xf3\x7e\xaa\xa9\xea\x2a\x70\x0b\x23\x81\x6b\xd6\x85\x83\x53\xc3\x20\x9f\x56\x53\x8d\x02\x87\xed\x46\xb5\x92\xf2\x4e\x19\xd1\x1c\x9c\x0f\x46\x39\xf1\xa1\x3a\x10\xbb\xee\xa3\xa3\x48\x46\xad\x8e\xa0\x1e\x75\xf5\x4d\x81\xd5\x94\xbf\xe3\x9a\x6a\xaf\xc2\xb3\xd4\xd7\x44\x4b\x11\x7c\x5f\x09\x00\x37\x7c\x37\x89\x70\xf5\xb3\x3d\xb5\x0f\x84\xbd\x12\x7b\x62\xb0\xde\x70\xc2\x1f\x88\xc6\xd8\x5b\x4f\x8c\x74\x6c\x6d\xee\xc9\x90\x4f\x50\x13\x7b\x6a\x12\x81\x38\xad\xe2\x0c\x29\x82\x5b\x79\x1f\x21\x58\x4f\xbd\xa0\x44\x04\x8a\x33\x14\x56\xfb\xb4\xe5\x21\xae\xfc\x49\x8f\xbc\xc5\x11\xd3\x51\x8a\x54\x13\x17\xcf\x30\xa4\xd5\x7e\x11\x67\xe2\xd2\xd0\x57\x99\xa9\x43\x20\x34\x3a\x06\xa7\xf4\x32\xb8\x0c\x07\x00\x34\x61\x94\xf5\x14\xc9\x2f\x00\xc0\xad\x8a\x17\x19\x08\x97\x40\x80\x31\xca\x32\x05\x12\x17\xc9\xa0\x78\x9e\x74\xd3\xcd\xe8\x42\x0f\x68\x38\x84\x72\x18\x95\x00\x20\x60\x34\x9f\xb8\x49\xdc\x3a\x1f\x92\x58\x9d\x8f\x11\x5a\x07\x35\x5a\x6e\x75\x3e\x2c\xba\x3a\x1f\x96\x1e\x87\x19\x23\xc0\x0e\x76\x94\x0c\xeb\xfc\x6d\x62\xec\xfb\xe4\x5e\x92\x04\x44\xb9\x59\x6f\x99\x28\xf3\xc1\xc1\x97\x8f\x1a\x7f\xf9\xe4\x21\x98\x8f\x18\x85\xf9\x88\x81\x98\x4f\x18\x8b\xf9\xa4\xe1\x98\xbf\x71\x44\xe6\x77\x1f\x94\x96\x43\x6a\x89\x9b\x1d\x86\x44\x99\x1d\xc6\x88\xb2\x83\x1a\x2d\xca\xec\x30\x2c\xca\xec\x30\x2c\x4a\x0e\x33\x46\x94\x1d\xec\x28\x51\x66\x87\xb7\x89\xb2\xef\x93\x77\x14\xa5\x4f\xce\x01\x10\x59\xb6\xd9\x90\x2c\xdb\x6c\x8c\x2c\x3b\xa8\xd1\xb2\x6c\xb3\x61\x59\xb6\xd9\xb0\x2c\x39\xcc\x18\x59\x76\xb0\xa3\x64\xd9\x66\x6f\x93\x65\xdf\x27\x77\x93\xe5\x3c\xc1\x13\x92\x53\xa3\x74\x9d\x22\x35\x26\x71\x02\x79\x31\x54\x51\x7b\xbb\x47\xad\x48\x7b\x14\xee\x21\x0d\x51\xb1\xdf\xc2\xff\x14\xfd\x32\xa0\xb0\xa9\xfa\x75\x8e\xf2\x08\x87\xf3\xa8\x2e\x8b\x53\x9d\x7e\x87\xae\x85\x81\x76\x8e\x91\xc5\xd1\x3e\xcf\x86\xb6\x89\x43\x45\xab\xdd\xdb\xc6\x62\x56\x21\x50\x55\xab\x38\x5a\x09\xf9\xac\x37\xd3\x01\x49\x01\x50\x9e\x92\xdc\xd2\xc0\x8b\x22\xfa\x07\x8a\x1b\xe0\xc5\xf7\x34\x41\xc5\xf0\xb7\x49\xb6\x78\x0c\xad\x13\x77\x17\xef\xee\x3c\xbd\x49\x6e\xe0\x47\xaf\x4f\xfd\xda\xb8\xb0\x88\xbb\x0c\xe6\xdb\xd5\xc6\x5f\x2e\x3e\x02\xd5\xfc\xb5\xa9\xda\x6a\x3d\x0f\x56\x50\x95\x65\xf4\xba\x00\x6b\x6c\x40\x70\x3f\x7a\xf5\x41\x70\xfa\x41\x8d\x7c\x07\xc1\xb6\x06\xc8\x4f\x21\x19\x1c\x35\x4d\x85\x62\x8d\xc8\x5b\x9a\xba\x02\x42\x45\xdf\x98\xb1\xa9\xef\x7b\xc6\xdc\x0a\x7d\x47\x55\x8d\x0c\x0c\xf2\xd7\x56\x46\x75\x20\x89\x61\x1b\x09\x19\x62\xa8\x01\x46\x42\x2f\x55\x28\x5c\x88\xdb\xdd\xbd\xa8\xe2\x03\x5e\x30\x04\xf4\xb6\x28\x05\x05\x2d\x04\x91\xa8\xaf\x04\x3e\xf4\xe6\x76\x64\x4d\xed\xb4\x00\x5c\xe7\xca\x97\x33\x9a\x41\xe3\x22\x7f\x64\x23\x65\x02\x56\x30\x43\x83\x02\xa4\x63\x46\xa7\xe4\xa2\x65\x7b\x18\xc2\x2a\x81\xe8\x38\xe9\x47\x93\x0b\xf4\x4d\xd0\x82\x59\x05\xd0\xf1\xb2\xac\x20\x17\x30\x93\x88\x05\xb3\x94\x53\xc4\x46\x20\x24\x2b\x0b\x0a\xfe\x24\xad\x9b\x2a\x8d\xce\x0d\x1a\x24\x41\xeb\x8b\x14\x84\x1d\x16\xaa\x10\x85\xa3\x7d\x02\x62\x38\x79\x8a\x09\xa5\x24\x3d\x8a\x50\x16\x9f\x86\x0e\x99\xf9\x53\xc5\x26\x7e\x01\x33\xa0\xd4\x65\x26\x22\xec\x3e\x97\x29\x28\xf5\xcf\x65\x12\x52\xf3\xd7\x34\xde\x8f\x15\x6a\xe2\xa3\xde\x93\xa4\xd8\x80\x54\x7b\xcb\x71\x1a\x86\x18\x09\x1a\xc0\x71\x26\x55\xb3\xca\x08\x1c\x63\x3d\x62\x48\x52\xb6\x61\x26\x23\x55\xa5\xd5\xe3\x35\x48\xcc\x3c\xce\x64\xc4\xda\x28\xeb\x31\xeb\x43\x4d\x46\x6d\x1c\x68\x32\x05\x75\x98\xf5\x04\xc0\xb1\x06\xd1\x30\x8d\xb4\x5e\x9a\x8a\x8e\x88\xf2\x84\xf5\xa4\x23\x60\xd2\x94\x1a\x65\x7b\xb2\xff\xba\xc7\x4a\xd3\xdd\x13\xf5\xc3\x2f\x34\x9c\xb8\x8a\xf2\x46\xc2\xa6\x28\x9d\x80\x0e\xd6\x3a\x82\xcf\xa6\x72\x04\xab\xa4\x6f\x02\x4e\x48\xe1\x7a\x8c\xa0\xb6\x11\x7c\xaa\xaa\x09\x28\x0d\xba\x46\xb0\x9a\x14\x8d\xe0\xd4\x6d\x83\x80\xd5\x68\x20\x08\x5e\xb3\x7d\x60\x7d\xaa\x88\x5e\xea\x55\x58\xf6\x04\xaf\x2e\x78\xf3\xca\x23\x95\x41\x7e\xb7\x30\xaf\xce\xef\x1f\xe9\x51\xf6\xde\x35\xd8\xeb\xd8\xfe\x0d\xe2\xbd\x3a\x7f\x7b\xc8\x47\x96\x8b\xef\x12\xf5\x31\x6e\xde\x3d\xf0\xcb\xdf\x31\xf6\xab\xf3\x77\x09\xff\xb0\x52\xbc\x53\x04\x58\xe7\xef\x1f\x04\xd6\xf9\x7b\xc7\x81\x9a\x4c\xef\x11\x0a\xaa\xc2\x7c\x6b\x34\x08\x48\xf1\xcd\x01\x21\x16\xdf\x3b\xc5\x84\xf9\x7b\x85\x85\x9a\xb0\xee\x19\x19\xaa\x42\xbb\x57\x70\x08\x08\xef\x6e\xf1\x21\x34\x06\xef\x1e\x22\x02\x83\xf0\x3d\xa2\x44\x40\x6b\xee\x15\x28\xe2\x16\xdc\x37\x56\xd4\x34\xf1\x4e\xe1\xa2\xaa\x84\x77\x88\x18\x01\xfd\xbb\x47\xd0\x08\xda\x8f\xbb\xc5\x8d\x80\x32\xdc\x1e\x3a\x5a\x3e\x75\x12\xcc\x79\x72\xb7\xd8\x31\x4f\xee\x1f\x3b\x52\xf6\xde\x35\x76\xec\xd8\xfe\x0d\x62\xc7\x3c\x79\x7b\xec\x48\xbe\x4f\xdf\x25\x76\x64\xdc\xbc\x77\xec\x98\x27\xef\x18\x3b\xe6\xc9\xbb\xc4\x8e\x58\x29\xde\x29\x76\xcc\x93\xf7\x8f\x1d\xf3\xe4\x9d\x63\x47\x5d\xa6\xf7\x88\x1d\x55\x61\xbe\x35\x76\x04\xa4\xf8\xe6\xd8\x11\x8b\xef\x7d\x62\x47\xd2\xa7\xef\x11\x3b\xea\xc2\xba\x67\xec\xa8\x0a\xed\x5e\xb1\x23\x20\xbc\xbb\xc5\x8e\xd0\x18\xbc\x7b\xec\x08\x0c\xc2\x77\x88\x1d\x21\xad\xb9\x57\xec\x88\x5b\x70\xd7\xd8\x51\xd7\xc4\x3b\xc5\x8e\xaa\x12\xde\x21\x76\x04\xf4\xef\x1e\xb1\x23\x68\x3f\xee\x15\x3b\x42\xca\x70\xd7\xd8\x91\xef\xad\xa2\x6a\x76\xb8\x5b\xec\x98\x1d\xee\x1f\x3b\x52\xf6\xde\x35\x76\xec\xd8\xfe\x0d\x62\xc7\xec\xf0\xf6\xd8\x91\x6c\x88\xbb\x4b\xec\xc8\xb8\x79\xef\xd8\x31\x3b\xbc\x63\xec\x98\x1d\xde\x25\x76\xc4\x4a\xf1\x4e\xb1\x63\x76\x78\xff\xd8\x31\x3b\xbc\x73\xec\xa8\xcb\xf4\x1e\xb1\xa3\x2a\xcc\xb7\xc6\x8e\x80\x14\xdf\x1c\x3b\x62\xf1\xbd\x4f\xec\x48\xfa\xf4\x3d\x62\x47\x5d\x58\xf7\x8c\x1d\x55\xa1\xdd\x2b\x76\x04\x84\x77\xb7\xd8\x11\x1a\x83\x77\x8f\x1d\x81\x41\xf8\x0e\xb1\x23\xa4\x35\xf7\x8a\x1d\x71\x0b\xee\x1a\x3b\xea\x9a\x78\xa7\xd8\x51\x55\xc2\x3b\xc4\x8e\x80\xfe\xdd\x23\x76\x04\xed\xc7\xbd\x62\x47\x48\x19\xee\x1a\x3b\x76\x9b\xb9\x09\xea\x36\xbb\x5b\xf0\xd8\x66\xf7\x0f\x1e\x29\x7b\xef\x1a\x3c\x76\x6c\xff\x06\xc1\x63\x9b\xbd\x3d\x78\x24\x3b\xf0\xef\x12\x3c\x32\x6e\xde\x3b\x78\x6c\xb3\x77\x0c\x1e\xdb\xec\x5d\x82\x47\xac\x14\xef\x14\x3c\xb6\xd9\xfb\x07\x8f\x6d\xf6\xce\xc1\xa3\x2e\xd3\x7b\x04\x8f\xaa\x30\xdf\x1a\x3c\x02\x52\x7c\x73\xf0\x88\xc5\xf7\x3e\xc1\x23\xe9\xd3\xf7\x08\x1e\x75\x61\xdd\x33\x78\x54\x85\x76\xaf\xe0\x11\x10\xde\xdd\x82\x47\x68\x0c\xde\x3d\x78\x04\x06\xe1\x3b\x04\x8f\x90\xd6\xdc\x2b\x78\xc4\x2d\xb8\x6b\xf0\xa8\x6b\xe2\x9d\x82\x47\x55\x09\xef\x10\x3c\x02\xfa\x77\x8f\xe0\x11\xb4\x1f\xf7\x0a\x1e\x21\x65\x78\x43\xf0\x38\x27\x57\x19\xd3\x53\xfe\xf4\x56\x63\xfc\x28\xc7\x11\x18\x80\x66\x34\x10\xee\x3d\xd6\x41\xc8\x49\x2a\x0a\x01\x1f\xde\x02\x77\x55\xe2\x9a\x75\x3e\xcc\x40\x9d\x8f\xe1\x81\x9f\x0e\x07\xd9\xb0\x7e\xa1\xc7\xb5\xf3\x64\x98\x8f\x3c\x19\xc3\x07\x3f\xda\x3c\x96\x8f\x7e\xb5\x97\x48\xe3\x30\xcc\x47\x76\x18\xc3\x07\x3f\x97\x3b\x96\x0f\x61\xe6\x80\xab\xb7\xd9\x30\x23\x38\x7e\x1f\x66\x84\x1f\x2a\x85\x19\x99\xf3\x33\x65\xd8\x02\x34\x69\xdc\x9f\x31\xa3\xbf\x45\x7c\x1d\x28\x3f\x81\xa7\x9f\xc9\x03\xc1\xf9\x69\x35\xfd\xfc\x1a\x08\x4e\x6e\xe7\x55\x2e\xeb\x05\x01\xeb\x26\x8d\xbf\x09\xf7\xba\xf2\x9c\xa1\xb4\x5c\x18\x88\x42\x9b\xe4\x37\xd7\x39\xc1\x4e\xaf\x3a\xb4\xdd\x0e\xac\x5d\x96\xb5\xf0\x78\x5d\x7e\xd5\x9c\x5c\x7d\xf8\x5a\xe1\x85\x27\xa5\x59\x35\xb4\x82\xa6\x56\x55\x1a\xf0\xf8\x78\x99\xd3\x27\x99\x73\xb9\xaa\xda\x6c\xd6\xa4\x9e\x83\xc0\xbb\x5e\xe7\x75\xe5\x16\xa7\xec\x15\x38\x5b\xc8\xf4\xb2\x4f\xd8\xe7\x0b\xd7\x95\x69\x47\x2b\x9f\xc9\x8d\x52\x78\xaa\xc6\xd2\xaa\x7b\x8f\xca\x7d\x56\x78\x4a\xd3\x25\x8a\xc6\xd0\x6e\x19\x36\xc7\x5d\x7a\xaa\x51\xf3\x69\xe5\x7d\x7c\x7c\x06\x0b\xfb\x53\x8b\x8c\x55\x97\xe4\x63\x0a\xa3\x0c\xed\xd8\xbd\xf3\xc0\x1b\xf2\xa4\x2a\xb3\x78\x65\x36\x6b\x13\x79\xee\x9a\x42\x12\x5b\x67\x88\xb6\x85\xa6\x2d\xd4\xaf\xe4\xd2\x9b\x40\x72\x9b\xca\x3f\xaf\xf3\x17\x37\x58\x5d\x58\x46\xd1\x95\x94\x53\xe5\xc5\x5d\x79\x17\x7e\x61\xad\xf2\x66\xc3\xeb\x6c\xd4\x3a\xbe\xc7\x2b\xf9\x9e\x5c\xeb\x88\x29\xf1\x84\xa5\x2b\xe5\xd5\xca\xbb\x74\x57\xec\x2a\xaf\x36\x5d\xad\x8d\x5a\x0b\x13\x13\x8e\x8e\x8a\x2f\x73\xca\x4a\x7f\x39\x9c\x06\x70\xec\x00\x4c\x28\x5c\xef\xc2\xaf\xc3\x95\xca\x1b\xd7\x9b\xcd\xf3\xd7\xee\xb5\x9e\x83\x2a\xaf\x08\x48\xdb\x83\x00\xf9\xa7\xf2\x48\xc5\x03\xa5\x9e\xca\x33\x15\x95\x9e\x77\x2a\x77\x7d\xce\xa9\x9e\x47\x27\x6f\x5c\x9f\x90\xf1\xa5\xfb\x5e\x74\xb8\x8a\xc0\xb5\x3d\x9c\x78\x87\xb2\xc2\xb8\x82\x91\xdf\xf4\xa5\x83\x66\x2a\x52\xe1\x2e\x67\xb9\x09\x41\xd7\x04\xa0\x05\x01\xa1\x17\x48\x2d\x00\x1a\x10\x10\x5a\x81\xd2\x00\x80\x7f\x05\x1f\xe7\x1f\x60\x5f\x41\x49\xd9\xd7\xb9\x5f\x70\xee\x7d\x9d\xf9\x05\x21\xb6\x10\x99\xd7\xa0\x2a\x02\xd5\xf6\x50\x2c\x99\xa5\xce\xba\x82\x8d\xa7\x69\xd5\x39\x57\x10\xd2\x3c\x98\x1a\xe3\xcb\x8e\x71\xa8\xdf\x97\x84\xd8\x52\x62\x1d\xea\xf8\x25\xa1\xb5\x54\x98\x87\x7a\x5e\xc1\xc8\xd9\x87\xba\x5e\x41\x4a\x1b\x00\xf4\xfd\x8a\x37\x61\xa1\x37\x60\x45\xc8\xad\xc4\x06\x68\x50\x15\x81\x6a\x7b\x28\x96\xe8\x55\x67\x5e\xc1\xc6\x98\xd7\x00\x33\x15\x21\x4d\x14\xab\x80\x95\xae\xd7\xdf\x4b\x2a\xbd\x20\x06\xa6\x7c\xed\xdf\xeb\x16\xa6\x24\x16\xa6\x6c\x05\x18\xc0\xc4\x94\x91\x86\x09\xb2\x31\x65\xa6\x21\xd3\x8d\x4c\xe9\xfa\xfd\xcd\x16\xda\xf8\x2d\x89\x95\x29\x5f\x7b\x20\x83\x99\x29\x89\x99\x29\x5b\x01\xd0\x64\x67\xca\x48\xc3\x69\x34\x34\x65\xa6\xa1\x35\x58\x9a\xd2\x0d\xe4\x1b\x3a\x94\x66\x04\x84\x64\x20\x37\x03\x68\x45\x40\xc8\x05\x6a\x2b\x80\x46\xa8\x18\x4d\xd6\xa6\xcc\x34\xa4\xb0\xb9\x29\xdd\x45\xd7\x04\x75\x44\x97\xc4\xde\x94\xaf\x3d\x08\x68\x70\x4a\x62\x70\xca\x56\x00\x83\x2d\x4e\x19\x69\xf8\x0c\x26\xa7\xcc\x34\x94\xa0\xcd\x29\xdd\x65\xcf\x3d\x24\x81\x25\xa1\xb7\x94\xf9\x87\x44\xb0\x24\xe4\x96\x6a\x0b\x20\x19\xa8\x38\x8d\x76\xa7\xcc\x34\xb4\x06\xc3\x53\xba\xab\xae\x1d\xda\xd8\x26\x96\xa7\x7c\xed\x41\x40\xd3\x53\x12\xd3\x53\xb6\x02\x18\x6c\x7b\xca\x48\xc3\x67\x30\x3e\x65\xa6\xa1\x04\xad\x0f\x3b\x8c\xc0\x2c\xa7\xba\x60\x93\x37\xe4\x35\x31\x77\x02\x1c\x69\x84\x06\x5b\x71\xd8\x56\x82\xad\xba\xd0\x55\xb1\xa2\x20\x66\xd6\x1c\x0d\x3c\x83\x91\x93\x36\x29\xc0\xe6\x65\x84\x1c\x4f\xfc\x8d\xd1\x1c\x7e\x47\xf8\x11\x80\xc0\x98\x8e\x01\xb6\x12\x20\x1c\xd9\x81\x38\x0d\xf1\x1d\x88\x16\x8a\xf2\xea\xa1\x40\x0f\x03\x70\xaa\xc3\xe1\x1e\x83\x6e\x25\x68\x4b\xd0\x07\x62\xb7\x85\x7e\x20\x01\x63\x00\x58\x0f\xc4\x80\xf8\x3d\x27\x3f\x18\x09\x32\xe0\x56\x02\x36\xc7\x83\x20\x6e\x4b\x54\x08\xa2\x37\xc5\x86\xb5\x3d\x3c\xc4\xaf\x39\xed\xa1\x20\x91\xc1\xb6\x12\xac\x31\x54\x04\x31\x9b\x03\x46\x10\xb9\x21\x6c\xac\x87\x22\x47\x0c\xc0\x69\x0f\xc7\x8f\x0c\xba\x95\xa0\x2d\x51\x24\x88\xdd\x16\x4b\x82\x04\x8c\x11\x65\x6d\x0f\x2a\xf1\x6b\x4e\x7d\x28\xb4\x64\xb0\xad\x04\x6b\x0c\x30\x41\xcc\xe6\x30\x13\x44\x6e\x08\x36\x89\x71\x31\xc5\x9b\xd4\x04\x95\xaf\x12\x14\x18\x75\x32\xc8\x56\x86\x84\x63\x4f\x18\xab\x21\x02\x85\x11\x43\x71\x28\xb1\x26\xd6\x50\x94\x1a\x9e\xf2\x55\x02\x35\x07\xa4\x0c\xbc\x95\xc1\x2d\x61\x29\x8c\xdf\x16\x9c\xc2\x24\x8c\x21\x2a\x31\x2b\xb6\x28\x95\x1a\xa0\xf2\x55\x82\x34\xc6\xaa\x0c\xba\x95\xa1\xcd\x11\x2b\x8c\xdd\x12\xb7\xc2\x04\x4c\xd1\x2b\xb1\x2f\x96\x00\x96\x1a\xa2\xf2\x55\x02\x34\x85\xb1\x0c\xb8\x95\x81\x8d\xc1\x2c\x8c\xdb\x1c\xd2\xc2\xe8\x0d\x81\x2d\x31\x2e\xd6\xd8\x96\xda\xa1\xf2\x55\x02\x35\x47\xb8\x0c\xbc\x95\xc1\x2d\x71\x2e\x8c\xdf\x16\xed\xc2\x24\x8c\x31\x2f\xb1\x34\x96\xb0\x97\x9a\xa4\xf2\x55\x02\x34\x05\xbf\x0c\xb8\x95\x81\x8d\x21\x30\x8c\xdb\x1c\x08\xc3\xe8\x0d\xe1\x70\x3d\x18\x11\x33\x08\x6e\x9f\x47\xc4\xc5\x7d\x8d\x56\xad\x61\x8c\x8e\x2d\x54\xcc\x31\xb2\x85\x10\x14\x29\x5b\xbe\x74\xe5\x6e\x9e\x98\x43\x65\xfc\x8e\x70\x26\x00\x81\xa1\x32\x03\x6c\x25\x40\x38\x54\x06\x71\x1a\x42\x65\x10\x2d\x14\x2a\xe7\xc9\x40\xa8\x8c\x01\x38\xd5\xe1\x50\x99\x41\xb7\x12\xb4\x25\x54\x06\xb1\xdb\x42\x65\x90\x80\x31\x54\xce\x13\x7b\xa8\x8c\xdf\x73\xf2\x83\xa1\x32\x03\x6e\x25\x60\x73\xa8\x0c\xe2\xb6\x84\xca\x20\x7a\x53\xa8\x9c\x27\xd6\x50\x19\xbf\xe6\xb4\x87\x42\x65\x06\xdb\x4a\xb0\xc6\x50\x19\xc4\x6c\x0e\x95\x41\xe4\x86\x50\x39\x4f\x06\x42\x65\x0c\xc0\x69\x0f\x87\xca\x0c\xba\x95\xa0\x2d\xa1\x32\x88\xdd\x16\x2a\x83\x04\x8c\xa1\x72\x9e\x58\x43\x65\xfc\x9a\x53\x1f\x0a\x95\x19\x6c\x2b\xc1\x1a\x43\x65\x10\xb3\x39\x54\x06\x91\x1b\x42\x65\x62\x5c\x4c\xa1\x32\x35\x41\xe5\xab\x04\x05\x86\xca\x0c\xb2\x95\x21\xe1\x50\x19\xc6\x6a\x08\x95\x61\xc4\x50\xa8\x4c\xac\x89\x35\x54\xa6\x86\xa7\x7c\x95\x40\xcd\xa1\x32\x03\x6f\x65\x70\x4b\xa8\x0c\xe3\xb7\x85\xca\x30\x09\x63\xa8\x4c\xcc\x8a\x2d\x54\xa6\x06\xa8\x7c\x95\x20\x8d\xa1\x32\x83\x6e\x65\x68\x73\xa8\x0c\x63\xb7\x84\xca\x30\x01\x53\xa8\x4c\xec\x8b\x25\x54\xa6\x86\xa8\x7c\x95\x00\x4d\xa1\x32\x03\x6e\x65\x60\x63\xa8\x0c\xe3\x36\x87\xca\x30\x7a\x43\xa8\x4c\x8c\x8b\x35\x54\xa6\x76\xa8\x7c\x95\x40\xcd\xa1\x32\x03\x6f\x65\x70\x4b\xa8\x0c\xe3\xb7\x85\xca\x30\x09\x63\xa8\x4c\x2c\x8d\x25\x54\xa6\x26\xa9\x7c\x95\x00\x4d\xa1\x32\x03\x6e\x65\x60\x63\xa8\x0c\xe3\x36\x87\xca\x30\x7a\x43\xa8\xcc\x4f\x23\x9b\x43\x65\x06\xc1\xed\xf3\x88\x50\xb9\xaf\xd1\xaa\x35\x8c\xa1\xb2\x85\x8a\x39\x54\xb6\x10\x1a\x19\x2a\xf3\xcd\x58\xb9\x9b\x1d\xcc\xa1\x32\x7e\x47\x38\x13\x80\xc0\x50\x99\x01\xb6\x12\x20\x1c\x2a\x83\x38\x0d\xa1\x32\x88\x16\x0a\x95\xb3\xc3\x40\xa8\x8c\x01\x38\xd5\xe1\x50\x99\x41\xb7\x12\xb4\x25\x54\x06\xb1\xdb\x42\x65\x90\x80\x31\x54\xce\x0e\xf6\x50\x19\xbf\xe7\xe4\x07\x43\x65\x06\xdc\x4a\xc0\xe6\x50\x19\xc4\x6d\x09\x95\x41\xf4\xa6\x50\x39\x3b\x58\x43\x65\xfc\x9a\xd3\x1e\x0a\x95\x19\x6c\x2b\xc1\x1a\x43\x65\x10\xb3\x39\x54\x06\x91\x1b\x42\xe5\xec\x30\x10\x2a\x63\x00\x4e\x7b\x38\x54\x66\xd0\xad\x04\x6d\x09\x95\x41\xec\xb6\x50\x19\x24\x60\x0c\x95\xb3\x83\x35\x54\xc6\xaf\x39\xf5\xa1\x50\x99\xc1\xb6\x12\xac\x31\x54\x06\x31\x9b\x43\x65\x10\xb9\x21\x54\x26\xc6\xc5\x14\x2a\x53\x13\x54\xbe\x4a\x50\x60\xa8\xcc\x20\x5b\x19\x12\x0e\x95\x61\xac\x86\x50\x19\x46\x0c\x85\xca\xc4\x9a\x58\x43\x65\x6a\x78\xca\x57\x09\xd4\x1c\x2a\x33\xf0\x56\x06\xb7\x84\xca\x30\x7e\x5b\xa8\x0c\x93\x30\x86\xca\xc4\xac\xd8\x42\x65\x6a\x80\xca\x57\x09\xd2\x18\x2a\x33\xe8\x56\x86\x36\x87\xca\x30\x76\x4b\xa8\x0c\x13\x30\x85\xca\xc4\xbe\x58\x42\x65\x6a\x88\xca\x57\x09\xd0\x14\x2a\x33\xe0\x56\x06\x36\x86\xca\x30\x6e\x73\xa8\x0c\xa3\x37\x84\xca\xc4\xb8\x58\x43\x65\x6a\x87\xca\x57\x09\xd4\x1c\x2a\x33\xf0\x56\x06\xb7\x84\xca\x30\x7e\x5b\xa8\x0c\x93\x30\x86\xca\xc4\xd2\x58\x42\x65\x6a\x92\xca\x57\x09\xd0\x14\x2a\x33\xe0\x56\x06\x36\x86\xca\x30\x6e\x73\xa8\x0c\xa3\x37\x84\xca\xfc\xf0\xb5\x39\x54\x66\x10\xdc\x3e\x8f\x08\x95\xfb\x1a\xad\x5a\xc3\x18\x2a\x5b\xa8\x98\x43\x65\x0b\xa1\x91\xa1\x72\x77\x5e\x20\x77\xdb\xcc\x1c\x2b\xb7\x19\x8b\x6b\x05\x20\x30\x56\x6e\xf9\x6e\x58\x11\x10\x8e\x95\x41\x9c\x86\x58\x19\x44\x0b\xc5\xca\x6d\x36\x10\x2b\xb7\x19\x8b\x66\x05\x48\x73\xac\xdc\xf2\xed\xb1\x22\xb4\x25\x56\x06\xb1\xdb\x62\x65\x90\x80\x31\x56\x6e\x33\x7b\xac\xdc\x66\x2c\x9e\x15\x00\x8d\xb1\x72\xcb\xf7\xce\x8a\xc0\xe6\x58\x19\xc4\x6d\x89\x95\x41\xf4\xa6\x58\xb9\xcd\xac\xb1\x72\x9b\xb1\x88\x56\x80\x33\xc5\xca\x2d\xdf\x58\x2b\xc2\x1a\x63\x65\x10\xb3\x39\x56\x06\x91\x1b\x62\xe5\x36\x1b\x88\x95\xdb\x8c\x45\xb3\x02\xa4\x39\x56\x6e\xf9\x7e\x5b\x11\xda\x12\x2b\x83\xd8\x6d\xb1\x32\x48\xc0\x18\x2b\xb7\x99\x35\x56\x6e\x33\x16\xd1\x0a\x70\xa6\x58\xb9\xe5\xdb\x71\x45\x58\x63\xac\x0c\x62\x36\xc7\xca\x20\x72\x43\xac\x4c\x8c\x8b\x29\x56\xa6\x26\xa8\x7c\x95\xa0\xc0\x58\xb9\xe5\xbb\x75\x25\x48\x38\x56\x86\xb1\x1a\x62\x65\x18\x31\x14\x2b\x13\x6b\x62\x8d\x95\xa9\xe1\x29\x5f\x25\x50\x73\xac\xdc\xf2\xed\xbb\x12\xb8\x25\x56\x86\xf1\xdb\x62\x65\x98\x84\x31\x56\x26\x66\xc5\x16\x2b\x53\x03\x54\xbe\x4a\x90\xc6\x58\xb9\xe5\x7b\x7b\x25\x68\x73\xac\x0c\x63\xb7\xc4\xca\x30\x01\x53\xac\x4c\xec\x8b\x25\x56\xa6\x86\xa8\x7c\x95\x00\x4d\xb1\x72\xcb\x37\xfe\x4a\xc0\xc6\x58\x19\xc6\x6d\x8e\x95\x61\xf4\x86\x58\x99\x18\x17\x6b\xac\x4c\xed\x50\xf9\x2a\x81\x9a\x63\xe5\x96\xef\x07\x96\xc0\x2d\xb1\x32\x8c\xdf\x16\x2b\xc3\x24\x8c\xb1\x32\xb1\x34\x96\x58\x99\x9a\xa4\xf2\x55\x02\x34\xc5\xca\x2d\xdf\x2e\x2c\x01\x1b\x63\x65\x18\xb7\x39\x56\x86\xd1\x1b\x62\x65\x7e\xd6\xdc\x1c\x2b\xb7\x59\x1f\xc5\xca\xd0\xa6\x58\xb9\x15\xf6\x0f\x2b\x35\x8c\xb1\xb2\x85\x8a\x39\x56\xb6\x10\x02\x63\xe5\x79\x83\xda\xc6\x65\x79\x06\x2e\xe4\x07\x3d\xa1\xad\xa5\x1e\x60\xa0\x2c\xe3\x8b\x7e\x5a\x51\x83\x6c\xaa\xf3\x29\x0e\x1b\x74\x51\x0f\x3f\x92\xb7\x5d\x21\xca\xb2\xb4\xac\xd3\x1a\x38\x00\xc9\x10\x91\xb3\xbd\x02\x6b\xea\x01\x5f\xf2\x8a\x1e\xee\x15\xa0\xb4\x13\xbe\xe4\x1d\x3b\x48\x2f\xc0\x69\x67\xe4\xcd\xfb\xb7\x49\x2d\x7e\x0a\x7c\x88\xa1\xee\x24\xf8\x20\x4f\x7d\x52\x7a\x1b\x5b\x96\xcd\x32\xa4\x1a\x3f\x15\x3e\xc4\x57\x77\x32\x7c\x90\xaf\x3e\xe1\xe9\x44\xbe\xf8\x97\x09\x2a\xbd\xc3\x38\xbe\xba\x93\xe2\x83\x7c\xf5\xc9\xb4\x26\xf2\xd5\x4d\x03\x49\x3d\x7e\x6a\x7c\x88\xb1\xee\xe4\xf8\x20\x63\x7d\xa2\x06\x2b\x63\xac\x15\xc5\x0b\xaa\xe2\xb0\x46\x17\x36\x5a\xc2\x53\xbd\x2f\xaa\x7c\xd7\xbd\xd0\xf0\x9f\xcb\x12\xae\xd2\xbd\xd0\xf5\x3d\x2c\xd3\x26\xcc\xd2\x7f\x6a\x75\xfa\x37\x62\xa5\x7d\x71\x6a\xdc\x17\x72\x12\xd5\xcd\xe8\x79\xf9\xbe\x64\xb7\xf0\x3c\x13\x30\x3d\xef\x2b\x41\x2f\xcd\xd0\x51\x91\x25\x12\xec\x06\x80\x25\xec\xc5\x14\xac\x6e\x5e\x33\xb4\xa3\x25\x5a\x23\x89\xe5\xb8\xc4\x45\x56\x54\xbb\x0f\xfb\xfd\x5e\x03\x28\xab\x34\x0f\xab\x57\x0e\xe2\x79\x9b\x48\x82\x0a\x25\x30\x7a\x1e\x7a\xa6\x14\x1e\xb1\xc5\xea\x31\xac\x83\x58\x67\xa4\x46\x71\x71\x4a\x04\x4a\xdb\xf5\x16\x3d\xad\x75\x4a\x1d\xa0\x4c\xab\x2f\x96\xa8\xad\xe3\xcd\x6a\x93\xe8\xd4\xce\x71\x8c\xea\x9a\x43\x05\xdb\x70\xb3\x5c\x01\xb4\x28\x98\x42\x89\x15\x4a\x74\x7c\xb4\x41\x8b\xa5\x46\x27\x3d\xed\x8b\x0e\x64\x13\x06\xd1\x56\x27\x82\x61\x64\x0a\xa4\x44\x46\xef\x6f\xc2\x6d\xa4\x4b\x2f\xac\x4e\xe9\xe9\xd0\xcb\x2f\xf6\xbd\x8d\x4e\x81\x81\xc9\x44\x78\xa1\x44\x27\x59\x3c\x21\x59\x9d\x08\x6c\x12\x9e\x0e\x02\x50\xbc\x58\x41\xbd\x45\xa1\x64\x2a\xac\x4c\x22\x12\x25\x81\xbf\xd0\x89\xd0\x31\xc3\x9b\xb2\xdd\x3f\xed\x43\x9d\x06\x01\x92\x49\xd0\x22\xb9\x19\x21\xf2\xd0\x0a\x68\x46\xf5\x8d\x83\x2c\x96\x8b\x70\xe9\x41\x8d\xa8\xbe\xa9\x4d\xa8\xbe\x29\xd2\x48\x02\x3f\xd0\x85\x9d\x9f\x1b\x94\x18\xd5\x97\xc2\x1c\xd3\x04\x91\x61\xb9\xf3\x3e\x7b\x4e\xf8\x4c\xa1\x89\x69\x29\xc3\x0a\x9d\x1a\xea\xe2\xeb\x63\x98\x14\x2f\xf4\xc0\x7f\x14\xc6\xdf\x0e\x24\x81\x90\xab\x43\xf7\x59\x0b\x58\x4e\x81\x0b\xf9\x9b\x66\x69\xf3\xca\xd3\x0c\x88\x4c\xa4\x27\x00\x8e\x06\x17\x02\xd8\xff\xf8\xfc\x87\x0f\x4e\x5d\x9c\xab\x18\xfd\x12\x96\x65\x7a\x3a\xfc\xd7\x7f\xfe\xed\x4b\x54\x14\x4d\xdd\x54\x61\x39\xcf\xd3\xd3\x3c\xae\xeb\x79\x1e\x96\xce\x1f\x3e\xff\xff\x01\x00\x00\xff\xff\xc4\xaa\x95\x3d\x6f\xf1\x01\x00") + +func pagesAssetsStylesBootstrap400Beta2MinCssBytes() ([]byte, error) { + return bindataRead( + _pagesAssetsStylesBootstrap400Beta2MinCss, + "pages/assets/styles/bootstrap-4.0.0-beta.2.min.css", + ) +} + +func pagesAssetsStylesBootstrap400Beta2MinCss() (*asset, error) { + bytes, err := pagesAssetsStylesBootstrap400Beta2MinCssBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "pages/assets/styles/bootstrap-4.0.0-beta.2.min.css", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -302,14 +323,15 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ - "pages/assets/js/bootstrap-3.1.1.min.js": pagesAssetsJsBootstrap311MinJs, - "pages/assets/js/containers.js": pagesAssetsJsContainersJs, - "pages/assets/js/gcharts.js": pagesAssetsJsGchartsJs, - "pages/assets/js/google-jsapi.js": pagesAssetsJsGoogleJsapiJs, - "pages/assets/js/jquery-1.10.2.min.js": pagesAssetsJsJquery1102MinJs, - "pages/assets/styles/bootstrap-3.1.1.min.css": pagesAssetsStylesBootstrap311MinCss, - "pages/assets/styles/bootstrap-theme-3.1.1.min.css": pagesAssetsStylesBootstrapTheme311MinCss, - "pages/assets/styles/containers.css": pagesAssetsStylesContainersCss, + "pages/assets/js/bootstrap-4.0.0-beta.2.min.js": pagesAssetsJsBootstrap400Beta2MinJs, + "pages/assets/js/containers.js": pagesAssetsJsContainersJs, + "pages/assets/js/gcharts.js": pagesAssetsJsGchartsJs, + "pages/assets/js/google-jsapi.js": pagesAssetsJsGoogleJsapiJs, + "pages/assets/js/jquery-3.0.0.min.js": pagesAssetsJsJquery300MinJs, + "pages/assets/js/popper.min.js": pagesAssetsJsPopperMinJs, + "pages/assets/styles/bootstrap-4.0.0-beta.2.min.css": pagesAssetsStylesBootstrap400Beta2MinCss, + "pages/assets/styles/bootstrap-theme-3.1.1.min.css": pagesAssetsStylesBootstrapTheme311MinCss, + "pages/assets/styles/containers.css": pagesAssetsStylesContainersCss, } // AssetDir returns the file names below a certain @@ -356,16 +378,17 @@ var _bintree = &bintree{nil, map[string]*bintree{ "pages": {nil, map[string]*bintree{ "assets": {nil, map[string]*bintree{ "js": {nil, map[string]*bintree{ - "bootstrap-3.1.1.min.js": {pagesAssetsJsBootstrap311MinJs, map[string]*bintree{}}, - "containers.js": {pagesAssetsJsContainersJs, map[string]*bintree{}}, - "gcharts.js": {pagesAssetsJsGchartsJs, map[string]*bintree{}}, - "google-jsapi.js": {pagesAssetsJsGoogleJsapiJs, map[string]*bintree{}}, - "jquery-1.10.2.min.js": {pagesAssetsJsJquery1102MinJs, map[string]*bintree{}}, + "bootstrap-4.0.0-beta.2.min.js": {pagesAssetsJsBootstrap400Beta2MinJs, map[string]*bintree{}}, + "containers.js": {pagesAssetsJsContainersJs, map[string]*bintree{}}, + "gcharts.js": {pagesAssetsJsGchartsJs, map[string]*bintree{}}, + "google-jsapi.js": {pagesAssetsJsGoogleJsapiJs, map[string]*bintree{}}, + "jquery-3.0.0.min.js": {pagesAssetsJsJquery300MinJs, map[string]*bintree{}}, + "popper.min.js": {pagesAssetsJsPopperMinJs, map[string]*bintree{}}, }}, "styles": {nil, map[string]*bintree{ - "bootstrap-3.1.1.min.css": {pagesAssetsStylesBootstrap311MinCss, map[string]*bintree{}}, - "bootstrap-theme-3.1.1.min.css": {pagesAssetsStylesBootstrapTheme311MinCss, map[string]*bintree{}}, - "containers.css": {pagesAssetsStylesContainersCss, map[string]*bintree{}}, + "bootstrap-4.0.0-beta.2.min.css": {pagesAssetsStylesBootstrap400Beta2MinCss, map[string]*bintree{}}, + "bootstrap-theme-3.1.1.min.css": {pagesAssetsStylesBootstrapTheme311MinCss, map[string]*bintree{}}, + "containers.css": {pagesAssetsStylesContainersCss, map[string]*bintree{}}, }}, }}, }}, diff --git a/vendor/github.com/google/cadvisor/pages/static/static.go b/vendor/github.com/google/cadvisor/pages/static/static.go index cac3776af89a..21a3ed4bc952 100644 --- a/vendor/github.com/google/cadvisor/pages/static/static.go +++ b/vendor/github.com/google/cadvisor/pages/static/static.go @@ -28,25 +28,27 @@ import ( const StaticResource = "/static/" -var bootstrapJs, _ = Asset("pages/assets/js/bootstrap-3.1.1.min.js") +var popper, _ = Asset("pages/assets/js/popper.min.js") +var bootstrapJs, _ = Asset("pages/assets/js/bootstrap-4.0.0-beta.2.min.js") var containersJs, _ = Asset("pages/assets/js/containers.js") var gchartsJs, _ = Asset("pages/assets/js/gcharts.js") var googleJsapiJs, _ = Asset("pages/assets/js/google-jsapi.js") -var jqueryJs, _ = Asset("pages/assets/js/jquery-1.10.2.min.js") +var jqueryJs, _ = Asset("pages/assets/js/jquery-3.0.0.min.js") -var bootstrapCss, _ = Asset("pages/assets/styles/bootstrap-3.1.1.min.css") +var bootstrapCss, _ = Asset("pages/assets/styles/bootstrap-4.0.0-beta.2.min.css") var bootstrapThemeCss, _ = Asset("pages/assets/styles/bootstrap-theme-3.1.1.min.css") var containersCss, _ = Asset("pages/assets/styles/containers.css") var staticFiles = map[string][]byte{ - "bootstrap-3.1.1.min.css": bootstrapCss, - "bootstrap-3.1.1.min.js": bootstrapJs, - "bootstrap-theme-3.1.1.min.css": bootstrapThemeCss, - "containers.css": containersCss, - "containers.js": containersJs, - "gcharts.js": gchartsJs, - "google-jsapi.js": googleJsapiJs, - "jquery-1.10.2.min.js": jqueryJs, + "popper.min.js": popper, + "bootstrap-4.0.0-beta.2.min.css": bootstrapCss, + "bootstrap-4.0.0-beta.2.min.js": bootstrapJs, + "bootstrap-theme-3.1.1.min.css": bootstrapThemeCss, + "containers.css": containersCss, + "containers.js": containersJs, + "gcharts.js": gchartsJs, + "google-jsapi.js": googleJsapiJs, + "jquery-3.0.0.min.js": jqueryJs, } func HandleRequest(w http.ResponseWriter, u *url.URL) { diff --git a/vendor/github.com/google/cadvisor/pages/templates.go b/vendor/github.com/google/cadvisor/pages/templates.go index 75ffc94df86c..45f1c1840029 100644 --- a/vendor/github.com/google/cadvisor/pages/templates.go +++ b/vendor/github.com/google/cadvisor/pages/templates.go @@ -83,7 +83,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _pagesAssetsHtmlContainersHtml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x5a\xcd\x72\xdb\x38\x12\x3e\x4b\x4f\xd1\xc3\xda\xc3\x6c\x55\x48\xc5\x49\x2e\x9b\x95\x55\xa5\x51\x92\x1d\xed\x38\xb6\xcb\xb2\x67\x6a\x8e\x10\xd9\x22\x11\x83\x00\x07\x00\x25\x6b\x5d\x7e\xf7\x2d\x00\xa4\xc4\x5f\xc9\x7f\x95\x8c\x2e\x96\x08\x74\xf7\xd7\x5f\x77\x03\x0d\xc2\xe3\x9f\x7c\x7f\x08\x30\x13\xd9\x56\xd2\x38\xd1\xf0\xee\xed\xc9\x07\xf8\x8f\x10\x31\x43\x98\xf3\x30\x80\x29\x63\x70\x65\x86\x14\x5c\xa1\x42\xb9\xc6\x28\x18\x0e\x01\xce\x68\x88\x5c\x61\x04\x39\x8f\x50\x82\x4e\x10\xa6\x19\x09\x13\x2c\x47\xde\xc0\xef\x28\x15\x15\x1c\xde\x05\x6f\xe1\x67\x33\xc1\x2b\x86\xbc\x7f\xfe\x7b\x08\xb0\x15\x39\xa4\x64\x0b\x5c\x68\xc8\x15\x82\x4e\xa8\x82\x15\x65\x08\x78\x17\x62\xa6\x81\x72\x08\x45\x9a\x31\x4a\x78\x88\xb0\xa1\x3a\xb1\x66\x0a\x25\xc1\x10\xe0\xcf\x42\x85\x58\x6a\x42\x39\x10\x08\x45\xb6\x05\xb1\xaa\xce\x03\xa2\x0d\x5e\xf3\x49\xb4\xce\x3e\x8e\x46\x9b\xcd\x26\x20\x16\x6b\x20\x64\x3c\x62\x6e\x9e\x1a\x9d\xcd\x67\x9f\xcf\x17\x9f\xfd\x77\xc1\x5b\x23\x71\xc3\x19\x2a\x05\x12\xff\xca\xa9\xc4\x08\x96\x5b\x20\x59\xc6\x68\x48\x96\x0c\x81\x91\x0d\x08\x09\x24\x96\x88\x11\x68\x61\xd0\x6e\x24\xd5\x94\xc7\x6f\x40\x89\x95\xde\x10\x89\x43\x80\x88\x2a\x2d\xe9\x32\xd7\x35\xaa\x4a\x6c\x54\xd5\x26\x08\x0e\x84\x83\x37\x5d\xc0\x7c\xe1\xc1\x2f\xd3\xc5\x7c\xf1\x66\x08\xf0\xc7\xfc\xfa\xd7\x8b\x9b\x6b\xf8\x63\x7a\x75\x35\x3d\xbf\x9e\x7f\x5e\xc0\xc5\x15\xcc\x2e\xce\x3f\xcd\xaf\xe7\x17\xe7\x0b\xb8\xf8\x02\xd3\xf3\x3f\xe1\xb7\xf9\xf9\xa7\x37\x80\x54\x27\x28\x01\xef\x32\x69\xf0\x0b\x09\xd4\x90\x68\xe2\x06\xb0\x40\xac\x01\x58\x09\x07\x48\x65\x18\xd2\x15\x0d\x81\x11\x1e\xe7\x24\x46\x88\xc5\x1a\x25\xa7\x3c\x86\x0c\x65\x4a\x95\x09\xa5\x02\xc2\xa3\x21\x00\xa3\x29\xd5\x44\xdb\x27\x2d\xa7\x82\xa1\xef\x4f\x86\xc3\x71\xa2\x53\x36\x19\x02\x8c\x13\x24\xd1\xc4\x86\x60\xac\xa9\x66\x38\x09\xa7\xd1\x9a\x2a\x21\xc1\x87\xfb\xfb\xe0\x13\x55\x19\x23\xdb\x73\x92\xe2\xc3\xc3\x78\xe4\xa6\xb8\xe9\x3f\xf9\x3e\x9c\x11\x8d\x4a\xdb\x4c\xa0\x0c\x23\x83\x00\x52\xca\xe9\x8a\x62\x04\xb3\xc5\x02\x8c\x35\x3b\x9b\x51\x7e\x0b\x12\xd9\xa9\xa7\xf4\x96\xa1\x4a\x10\xb5\x07\x89\xc4\xd5\xa9\x77\x7f\x1f\x5c\x09\xa1\x1f\x1e\x94\xc1\x1d\x8e\x96\x42\x68\xa5\x25\xc9\xfc\xf7\xc1\x49\x70\x12\xa4\x94\x07\xa1\x52\xde\x64\xb8\xb7\x7c\x91\x19\x0f\x09\x33\xce\xa5\xf8\x52\x3b\x56\x49\x8f\xb5\xa7\x68\x0c\x05\x37\xc9\x8e\x52\xb5\x00\x1f\xa4\xea\xbf\x64\x4d\x16\xa1\xa4\x99\xde\x7b\xa2\xdc\x6f\x25\xc3\xb6\x9d\x6f\x7f\xe5\x28\xb7\xfe\x49\x70\xf2\x36\x78\x67\x11\x7f\x53\xde\x64\x3c\x72\x32\x8f\x50\xd0\x45\x71\xbf\x0a\xbd\xcd\xf0\xd4\xd3\x78\xa7\x47\xdf\xc8\x9a\xb8\xa7\x5e\xb7\xe6\xd8\xae\x4f\xfe\x37\x45\x32\xda\x50\xf9\x6c\x9d\x15\x5a\x5f\x09\x64\x98\x10\xa9\xdb\xda\xc6\xa3\xb2\x1e\xc6\x4b\x11\x6d\x0b\x03\x11\x5d\x43\xc8\x88\x52\xa7\xde\x0e\x89\xcb\x3b\x5f\x25\x62\x13\x12\x85\x1e\x4c\x8a\x75\x6c\x4c\x9a\xb9\xe1\xed\x85\x99\xaf\x52\xff\xe4\x9d\x07\x34\x3a\xf5\x98\x88\x85\xb7\x13\x1b\x91\xdd\xd7\x9a\xbd\x52\x64\x32\x1c\x54\x07\x32\x12\xa3\x6f\xc0\xa2\x34\x43\xa6\x92\x4f\x26\xed\x82\x4d\x4e\x8c\xdc\x28\xa2\x6b\xf3\x57\xb0\x52\x7c\x29\x91\x44\xa1\xcc\xd3\xa5\x93\xbe\xbf\x97\x84\xc7\x08\xff\xc8\x88\x44\xae\x67\x3b\x37\x3f\x9e\x42\x70\x59\x7f\xa6\x1e\x1e\xac\x41\x46\x27\x15\x67\x9b\x92\xc1\x19\xe5\xb7\x0f\x0f\xde\xa4\x63\xe8\x1a\xef\xb4\x41\x47\x26\xe3\x11\xa3\x05\x00\xe4\x91\x51\x3c\x1e\x09\xb6\x27\xc5\x02\x77\x3f\xee\xef\xe9\x0a\x82\xb9\x72\xa4\x1e\xe1\x0a\x8a\xcf\x38\xf9\xb0\x07\x19\x04\xa3\x48\x84\xb7\x86\xb1\x4f\xf6\x2f\xec\x7d\x72\x60\x92\x0f\x3d\xa6\x1d\xb8\x2a\x90\x45\xbe\x0c\xab\x8c\xbc\x2c\x76\xef\x27\x35\x7d\xe3\x51\xf2\xbe\x1a\xb8\x8a\x30\xa3\x4a\xfb\xb1\x14\x79\xd6\x88\x9c\xaa\x28\xb0\x61\x6b\x22\x1c\xd4\x92\xb3\x36\xbf\x0c\x56\xdb\x88\x4f\x35\xa6\x36\x88\xb5\xf9\xfb\x08\x36\x82\x57\x61\xad\x9f\x42\xc7\xa0\x8b\xc1\x42\x13\x9d\xbf\x06\x81\x9f\x24\x5d\xa3\x04\xa7\xaf\x49\x60\xce\x8e\xf2\xe7\x52\x43\x59\x71\xcb\x5f\x03\x9f\x4b\x79\xa7\x06\x3a\x28\x1a\xab\x8c\xf0\xd2\x8a\x51\xe3\x33\xb2\x44\x66\xb9\xab\xea\x0e\x7e\xc3\xad\xa1\xce\x4c\x9f\x40\x73\xf0\x77\xc2\x72\x5b\xb9\xcd\xba\xa8\xb3\xe6\x9c\xdd\x63\x1b\x3c\x0f\xda\x42\x0b\x49\x62\x1c\x2f\xe5\xa4\x00\x64\x54\xf5\x91\x35\xd8\x73\x65\xcd\xb7\xb8\xea\x47\xf5\x54\xbe\x2a\xfa\xdb\x7c\x55\x07\xeb\x7c\x0d\x76\x74\x0d\xc6\xa3\x9c\x59\x6f\x4a\x26\x8b\x07\x7d\xd9\xda\x55\xe3\xce\xab\x79\x4a\x62\x3c\x9e\xa1\xb0\xfb\xf4\xa7\x2a\x54\x3e\x26\x67\x9d\x6a\x97\xac\x95\x91\x2a\x2e\xa7\xcd\xec\x17\x2e\x4f\x7c\x6a\x65\xcc\xbe\x55\x9b\x65\x42\xb8\x94\xfb\xdf\xc7\x7c\xbb\x42\x25\x72\x19\xa2\x9a\xae\x09\x65\xa6\x6d\x7e\x85\x1a\x9c\x2b\xc1\x6c\xeb\xd9\xa8\x3f\x67\x72\x96\xe5\x55\x63\xbd\x89\x56\x61\xa2\x37\x7f\x80\x84\x9a\xae\x4d\x93\x5e\x58\xf4\x6d\x6f\x0a\x19\xe1\xc8\xdc\x77\x6f\x32\xbb\xbc\x71\xe1\xdf\x6b\x2c\x16\xef\x0c\x43\x03\x27\x38\x33\xcd\xf2\xce\xf1\xc3\x26\x0f\xd5\x51\x42\xa4\x89\x63\x99\xa3\x99\xa4\x5c\xbb\x87\x6d\x63\x50\x53\x93\x73\xba\x53\xa3\xaa\x6a\xda\xc8\xab\x41\xec\xf0\xe5\x2b\xb9\x7b\x25\x77\xbe\x92\x3b\xb0\xaa\x1a\x1e\xcd\x44\xdd\xa1\xbd\xc5\x7e\x9f\x42\xf1\x22\x97\xd4\xed\xcb\xdd\x99\x32\x26\x36\xe6\x40\x22\xda\x41\x32\x16\x1a\x06\x21\xf8\x4a\xc2\x84\x72\x9c\xf3\x95\x08\xce\xf3\xd4\xca\x95\x6b\x4c\x1b\x7d\xb9\xd4\xec\x7e\x3b\x27\xbe\x62\x2a\xe4\xf6\xfb\x26\xbc\xb3\x79\x20\xe7\xdd\x84\xc0\xbd\x2d\xb0\x6a\x5e\x4e\x6f\x45\x59\xb3\x02\xe8\xff\xf0\x80\xe1\xfe\xa4\x29\xe4\x6f\x38\xd5\x07\xe4\x9f\x93\x55\x85\x9e\x57\x2a\x94\xae\x22\x69\x3b\x7d\xb4\x46\x7a\xdd\x2d\x24\x5f\xe0\xe8\x62\x43\xb2\xd7\x5a\xe4\x36\x24\xeb\x5c\x16\xda\x1e\x57\xac\x3e\xc3\xeb\x8a\xf4\x11\xcf\x9b\xa5\x57\x78\x57\xeb\x42\x9f\xbd\x99\xdd\x28\xd3\x1a\xf5\x77\xe2\xb6\xf2\x8a\xfa\xcb\x24\x4d\x89\xdc\x1e\x68\x03\xcc\x2c\x63\x81\xf2\xb8\xdd\x08\xd4\xa7\x15\xc5\x7c\xb1\x46\xb9\xa6\xb8\x39\xdc\x1e\x54\x3b\x84\xdc\x20\xf6\x63\x92\xc7\xe8\xd5\x55\x9a\xd3\xec\xae\x65\xf8\x21\xde\x5c\x4a\x11\xa2\x52\xc7\xba\x9d\xaa\x3b\x59\x29\xe2\x6b\x91\x3d\xca\xa1\x9e\x3e\xe3\x3b\xba\x69\x5b\x8e\xc7\x38\xd8\xe1\x4d\xc3\xc0\x87\xc9\xb5\xd0\x84\x41\x99\x87\x1f\x6c\x66\x56\xf8\x09\xb3\xdc\xd7\x66\x8a\xef\x02\x6f\x5f\x6a\xec\x49\x81\xf2\xd5\x93\x51\x35\xbb\xbc\x81\x33\x41\x22\x98\xae\x51\x1e\xd0\xc7\x04\x89\xea\x8a\x76\x6f\xa4\xaa\xc8\x2c\x26\xc8\xec\x11\x5a\xf6\x2a\xcb\x50\xfa\x66\xff\xef\xc4\xd7\xad\xf2\x17\x89\xe4\x36\x12\x1b\xde\xa7\xd3\xa9\x5a\x96\xd3\x7a\x95\xb6\x53\xe3\xe8\xee\xfc\x1d\xd3\xa4\xdc\xa8\xbf\x53\xa6\xa4\xd6\xdc\xf1\x30\x2c\xe5\xa8\xf1\xa4\x02\x40\x8a\x0d\x74\x1f\x78\x0e\x86\xb0\x31\xad\xbd\x1c\xff\xcb\x9e\x2d\x6b\xae\x4a\x11\x4b\xb4\x2f\x50\xa1\xf5\xe9\x9a\xe8\x2f\x89\x84\xea\x0f\x3f\x32\x07\x55\xe9\x95\xeb\x88\x1b\x48\x84\xf6\x1d\x15\x9d\x9a\xa1\xbe\x57\x29\xe9\x0b\xce\xb6\xde\xe4\x57\xa1\xa1\x0c\x98\x3b\x24\x77\x48\xb6\xd9\x7c\x0a\x5c\xca\x57\xa2\x01\x36\x14\x2c\x7a\x0e\xda\x99\x60\xd1\x63\xe1\x0e\x06\x9d\xb8\xbb\x1f\xb6\x23\xf7\xde\xab\x66\x97\xc6\xbb\xe6\xea\xf3\xc4\xa2\x3c\x47\xbd\x11\xf2\xf6\x89\x55\x39\x78\x79\x39\x16\x86\x8b\xcd\xfe\x29\x85\x38\x68\x8e\x46\x52\x64\x26\xf9\xdb\x05\xb2\xcc\xb5\x16\xbb\x78\x2d\x35\x87\xa5\xe6\x7e\x84\x2b\x92\x33\x0d\xa5\x9c\xaf\x45\x1c\x33\xf4\x8a\xf7\xd9\x4e\xc8\xf1\xcc\x1d\x4a\x5f\x21\xc3\xd0\x1e\x01\x76\xc6\x20\x22\x9a\x14\xa2\x15\x0c\x40\x24\x25\x7e\x42\x54\x26\xb2\x3c\x3b\xf5\xb4\xcc\xb1\x78\x88\x77\x19\xe1\x11\x46\xa7\xde\x8a\x30\x85\x1d\x29\xe6\xd2\xab\xdb\x70\x19\xeb\xee\xfc\xaa\x25\x66\x48\x24\x56\xe6\x0e\xca\x4c\x70\x9e\xb5\x58\xca\x59\xb7\x49\xaf\x49\xb0\x9f\x22\xcf\x3d\x90\xc2\x78\xec\xbe\x5b\xc7\x6c\x77\xc9\x30\x5a\x6e\x0f\x32\xd6\xce\xf9\xe2\xf5\xd0\x81\xb4\x7d\xca\x82\x9c\x48\x91\xc7\x49\x96\xeb\xf6\x2a\xb8\x5b\x96\x4b\x78\xcb\xad\x46\xd5\xde\xbe\x9f\x61\xf6\xb3\x94\xc2\xbe\x3e\x6e\x6d\x01\xa5\x2d\xb4\x33\xfa\x8d\x35\x9c\x6f\x54\xe8\x17\xf5\xc3\xb6\xcc\x2f\x94\xa1\xda\x2a\x8d\xe9\xe3\x3b\xc8\xd5\x4e\xc6\xed\x7d\x9d\x4d\x64\xbf\xa6\x9e\x65\x6a\x96\x2b\x2d\xd2\xaf\xa8\x25\x0d\x9f\xca\xc7\x91\xc5\x6a\x70\x88\x81\xa9\xbb\xe1\x36\x79\x0c\x85\xf5\xe6\x8a\x75\x28\x57\x1a\xbd\x94\x75\xc2\x4f\x9d\x9e\xa3\xf9\x30\x68\x1e\x36\x3b\x6e\x41\x7e\x58\x6a\x74\xdc\x9d\x1c\xcb\x8e\xc7\x35\x55\x19\x98\xbe\xd9\xb6\x35\x1f\x9b\xeb\x05\xe5\x59\xae\x6b\xad\x6e\xf5\x86\xc4\x8f\xdc\x45\x9c\x1f\x8a\x9c\x6b\xaf\x73\xff\xde\x6d\xdd\x5d\x72\x56\x7d\x8f\xdc\x9a\xb0\x1c\x4f\x4f\xde\x36\x20\xf7\x2f\x34\x9d\x08\x6b\xdd\x60\x43\x53\xf7\x02\xf8\x4c\x0e\x5d\x33\x72\x94\xc6\xa2\x8d\xf8\x7b\x32\x59\x6b\xb5\x9c\x15\x29\x18\xab\x98\x59\x32\x11\xde\x36\x19\x68\xef\x8f\xcd\x9e\xfc\x15\xc3\xd2\xb3\x74\x77\x0c\x56\x87\x2a\x03\x87\xaf\xd2\x4b\x61\xa5\x89\xd4\x97\x24\xc6\x9f\xef\xef\x83\xdd\x0d\xaa\xbb\x71\x7e\x03\xe6\x59\xed\xfc\x6d\x1f\xb5\x8e\x5b\xf6\xa9\xbb\xca\xb5\x5f\xcb\x7b\x5d\xfb\xdf\x47\xe6\x13\x49\xb2\x71\xd7\x23\xc6\x4c\xfd\x26\xa6\x98\x54\xbf\xb9\x77\x17\xf6\xe3\x91\xfb\xd7\x96\xff\x07\x00\x00\xff\xff\x4b\x13\x4b\x6c\x3d\x25\x00\x00") +var _pagesAssetsHtmlContainersHtml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x5a\xcd\x72\xdb\x38\x12\x3e\x4b\x4f\xd1\xc3\xda\xc3\x6c\x55\x48\xd9\x89\x2f\x9b\x95\x55\xa5\x51\x92\x1d\xed\x38\x76\xca\xb2\x67\x6a\x8e\x20\xd9\x22\x11\x43\x04\x06\x00\x25\x6b\x5d\x7e\xf7\x2d\x00\xa4\xc4\x5f\x29\xfe\xa9\x64\x74\xb1\x44\xa0\xbb\xbf\xfe\xba\x1b\x68\x10\x1e\xff\xe4\xfb\x43\x80\x19\x17\x5b\x49\x93\x54\xc3\xdb\x93\xd3\x33\xf8\x0f\xe7\x09\x43\x98\x67\x51\x00\x53\xc6\xe0\xda\x0c\x29\xb8\x46\x85\x72\x8d\x71\x30\x1c\x02\x5c\xd0\x08\x33\x85\x31\xe4\x59\x8c\x12\x74\x8a\x30\x15\x24\x4a\xb1\x1c\x79\x03\xbf\xa3\x54\x94\x67\xf0\x36\x38\x81\x9f\xcd\x04\xaf\x18\xf2\xfe\xf9\xef\x21\xc0\x96\xe7\xb0\x22\x5b\xc8\xb8\x86\x5c\x21\xe8\x94\x2a\x58\x52\x86\x80\xf7\x11\x0a\x0d\x34\x83\x88\xaf\x04\xa3\x24\x8b\x10\x36\x54\xa7\xd6\x4c\xa1\x24\x18\x02\xfc\x59\xa8\xe0\xa1\x26\x34\x03\x02\x11\x17\x5b\xe0\xcb\xea\x3c\x20\xda\xe0\x35\x9f\x54\x6b\xf1\x7e\x34\xda\x6c\x36\x01\xb1\x58\x03\x2e\x93\x11\x73\xf3\xd4\xe8\x62\x3e\xfb\x78\xb9\xf8\xe8\xbf\x0d\x4e\x8c\xc4\x6d\xc6\x50\x29\x90\xf8\x57\x4e\x25\xc6\x10\x6e\x81\x08\xc1\x68\x44\x42\x86\xc0\xc8\x06\xb8\x04\x92\x48\xc4\x18\x34\x37\x68\x37\x92\x6a\x9a\x25\x6f\x40\xf1\xa5\xde\x10\x89\x43\x80\x98\x2a\x2d\x69\x98\xeb\x1a\x55\x25\x36\xaa\x6a\x13\x78\x06\x24\x03\x6f\xba\x80\xf9\xc2\x83\x5f\xa6\x8b\xf9\xe2\xcd\x10\xe0\x8f\xf9\xcd\xaf\x57\xb7\x37\xf0\xc7\xf4\xfa\x7a\x7a\x79\x33\xff\xb8\x80\xab\x6b\x98\x5d\x5d\x7e\x98\xdf\xcc\xaf\x2e\x17\x70\xf5\x09\xa6\x97\x7f\xc2\x6f\xf3\xcb\x0f\x6f\x00\xa9\x4e\x51\x02\xde\x0b\x69\xf0\x73\x09\xd4\x90\x68\xe2\x06\xb0\x40\xac\x01\x58\x72\x07\x48\x09\x8c\xe8\x92\x46\xc0\x48\x96\xe4\x24\x41\x48\xf8\x1a\x65\x46\xb3\x04\x04\xca\x15\x55\x26\x94\x0a\x48\x16\x0f\x01\x18\x5d\x51\x4d\xb4\x7d\xd2\x72\x2a\x18\xfa\xfe\x64\x38\x1c\xa7\x7a\xc5\x26\x43\x80\x71\x8a\x24\x9e\xd8\x10\x8c\x35\xd5\x0c\x27\xd1\x34\x5e\x53\xc5\x25\xf8\xf0\xf0\x10\x7c\xa0\x4a\x30\xb2\xbd\x24\x2b\x7c\x7c\x1c\x8f\xdc\x14\x37\x5d\x45\x92\x0a\x0d\x4a\x46\xe7\xde\xc3\x43\x70\xcd\xb9\x7e\x7c\x54\xc6\x72\x34\x12\x5c\x08\x94\xc1\x8a\x66\xc1\x57\xe5\x4d\xc6\x23\x37\xb9\x90\xfc\xc9\xf7\xe1\x82\x68\x54\xda\xe6\x10\x65\x18\x1b\xec\xb0\xa2\x19\x5d\x52\x8c\x61\xb6\x58\x80\xc1\x69\x67\x33\x9a\xdd\x81\x44\x76\xee\x29\xbd\x65\xa8\x52\x44\xed\x41\x2a\x71\xd9\xb6\x1b\x72\xae\x95\x96\x44\xf8\x67\xc1\x49\x70\xe2\x87\xa8\x49\xf0\xd6\xe2\x88\x94\xf2\x26\xc3\x3d\x80\x2b\x61\x28\x22\xcc\xb0\xb3\xc2\x97\x9a\xb3\x4a\xfc\x77\xc1\x69\x70\xda\xb2\xf6\x14\x8d\x11\xcf\x4c\xb5\xa0\x54\x2d\xc0\x07\x19\xfb\x2f\x59\x93\x85\x0b\xc8\xce\x93\x43\x01\xfa\xfa\x57\x8e\x72\xeb\xbf\x33\x2c\xf5\x85\xe9\x90\xfc\x01\xa2\xfb\x35\xe9\xad\xc0\x73\x4f\xe3\xbd\x1e\x7d\x25\x6b\xe2\x9e\x7a\xdd\x06\x12\xbb\xcc\xf9\x5f\x15\x11\xb4\xa1\xf2\xd9\x3a\x2b\xe4\xbe\x12\xc8\x28\x25\x52\xb7\xb5\x8d\x47\x65\x59\x8d\x43\x1e\x6f\x0b\x03\x31\x5d\x43\xc4\x88\x52\xe7\xde\x0e\x89\xcb\x3e\x5f\xa5\x7c\x13\x11\x85\x1e\x4c\x8a\xe5\x70\x4c\x9a\x19\xe2\xed\x85\x99\xaf\x56\xfe\xe9\x5b\x0f\x68\x7c\xee\x31\x9e\x70\x6f\x27\x36\x22\xbb\xaf\x35\x7b\xa5\xc8\x64\x38\xa8\x0e\x08\x92\xa0\x6f\xc0\xa2\x34\x43\x66\x41\x38\x9d\xb4\xeb\x3e\x3d\x35\x72\xa3\x98\xae\xcd\x5f\xce\x4a\xf1\x50\x22\x89\x23\x99\xaf\x42\x27\xfd\xf0\x20\x49\x96\x20\xfc\x43\x10\x89\x99\x9e\xed\xdc\x7c\x7f\x0e\xc1\x97\xfa\x33\xf5\xf8\x68\x0d\x32\x3a\xa9\x38\xdb\x94\x0c\x2e\x68\x76\xf7\xf8\xe8\x4d\x3a\x86\x6e\xf0\x5e\x1b\x74\x64\x32\x1e\x31\x5a\x00\xc0\x2c\x36\x8a\xc7\x23\xce\xf6\xa4\x58\xe0\xee\xc7\xc3\x03\x5d\x42\x30\x57\x8e\xd4\x23\x5c\x41\xf1\x19\xa7\x67\x7b\x90\x41\x30\x8a\x79\x74\x67\x18\xfb\x60\xff\xc2\xde\x27\x07\x26\x3d\xeb\x31\xed\xc0\x55\x81\x2c\xf2\x30\xaa\x32\xf2\xb2\xd8\xbd\x9b\xd4\xf4\x8d\x47\xe9\xbb\x6a\xe0\x2a\xc2\x8c\x2a\xed\x27\x92\xe7\xa2\x11\x39\x55\x51\x60\xc3\xd6\x44\x38\xa8\x25\x67\x6d\x7e\x19\xac\xb6\x11\x9f\x6a\x5c\xd9\x20\xd6\xe6\xef\x23\xd8\x08\x5e\x85\xb5\x7e\x0a\x1d\x83\x2e\x06\x0b\x4d\x74\xfe\x1a\x04\x7e\x90\x74\x8d\x12\x9c\xbe\x26\x81\x39\x3b\xca\x9f\x4b\x0d\x65\xc5\x2d\x7f\x0d\x7c\x2e\xe5\x9d\x1a\xe8\xa0\x68\xac\x04\xc9\x4a\x2b\x46\x8d\xcf\x48\x88\xcc\x72\x57\xd5\x1d\xfc\x86\x5b\x43\x9d\x99\x3e\x81\xe6\xe0\xef\x84\xe5\xb6\x72\x9b\x75\x51\x67\xcd\x39\xbb\xc7\x36\x78\x1e\xb4\x85\xe6\x92\x24\x38\x0e\xe5\xa4\x00\x64\x54\xf5\x91\x35\xd8\x73\x65\xcd\xb7\xb8\xea\x47\xf5\x54\xbe\x2a\xfa\xdb\x7c\x55\x07\xeb\x7c\x0d\x76\x74\x0d\xc6\xa3\x9c\x59\x6f\x4a\x26\x8b\x07\x7d\xd9\xda\x55\xe3\xce\xab\xf9\x8a\x24\x78\x3c\x43\x61\xf7\xe9\x4f\x55\xa8\x7c\x4c\xce\x3a\xd5\x2e\x59\x2b\x23\x55\x5c\x4e\x9b\xd9\x2f\x5c\x9e\xf8\xd4\xca\x98\x7d\xab\x36\xcb\x84\x30\x94\xfb\xdf\xc7\x7c\xbb\x46\xc5\x73\x19\xa1\x9a\xae\x09\x65\xa6\xfb\x7e\x85\x1a\x9c\x2b\xce\x6c\x07\xdb\xa8\x3f\x67\x72\x26\xf2\xaa\xb1\xde\x44\xab\x30\xd1\x9b\x3f\x40\x22\x4d\xd7\xa6\xd7\x2f\x2c\xfa\xb6\xc5\x05\x41\x32\x64\xee\xbb\x37\x99\x7d\xb9\x75\xe1\xdf\x6b\x2c\x16\x6f\x81\x91\x81\x13\x5c\x98\x9e\x7b\xe7\xf8\x61\x93\x87\xea\x28\x25\xd2\xc4\xb1\xcc\x51\x21\x69\xa6\xdd\xc3\xb6\x31\xa8\xa9\xc9\x33\xba\x53\xa3\xaa\x6a\xda\xc8\xab\x41\xec\xf0\xe5\x33\xb9\x7f\x25\x77\x3e\x93\x7b\xb0\xaa\x1a\x1e\xcd\x78\xdd\xa1\xbd\xc5\x7e\x9f\x22\xfe\x22\x97\xd4\xdd\xcb\xdd\x99\x32\xc6\x37\xe6\x74\xc2\xdb\x41\x32\x16\x1a\x06\x21\xf8\x4c\xa2\x94\x66\x38\xcf\x96\x3c\xb8\xcc\x57\x56\xae\x5c\x63\xda\xe8\xcb\xa5\x66\xf7\xdb\x39\xf1\x19\x57\x5c\x6e\xbf\x6f\xc2\x3b\x9b\x07\x72\xde\x4d\x08\xdc\x4b\x07\xab\xe6\xe5\xf4\x56\x94\x35\x2b\x80\xfe\x0f\x0f\x18\xee\x4f\x9a\x42\xfe\x36\xa3\xfa\x80\xfc\x73\xb2\xaa\xd0\xf3\x4a\x85\xd2\x55\x24\x6d\xa7\x8f\xd6\x48\xaf\xbb\x85\xe4\x0b\x1c\x5d\x6c\x88\x78\xad\x45\x6e\x43\x44\xe7\xb2\xd0\xf6\xb8\x62\xf5\x19\x5e\x57\xa4\x8f\x78\xde\x2c\xbd\xc2\xbb\x5a\x17\xfa\xec\xcd\xec\x56\x99\xd6\xa8\xbf\x13\xb7\x95\x57\xd4\x9f\x90\x74\x45\xe4\xf6\x40\x1b\x60\x66\x19\x0b\x34\x4b\xda\x8d\x40\x7d\x5a\x51\xcc\x57\x6b\x94\x6b\x8a\x9b\xc3\xed\x41\xb5\x43\xc8\x0d\x62\x3f\x21\x79\x82\x5e\x5d\xa5\x39\xcd\xee\x5a\x86\x1f\xe2\xcd\x17\xc9\x23\x54\xea\x58\xb7\x53\x75\x47\x94\x22\xbe\xe6\xe2\x9b\x1c\xea\xe9\x33\xbe\xa3\x9b\xb6\xe5\xf8\x16\x07\x3b\xbc\x69\x18\x38\x9b\xdc\x70\x4d\x18\x94\x79\x78\x66\x33\xb3\xc2\x4f\x24\x72\x5f\x9b\x29\xbe\x0b\xbc\x7d\xa9\xb1\x27\x05\xca\x17\x50\x46\xd5\xec\xcb\x2d\x5c\x70\x12\xc3\x74\x8d\xf2\x80\x3e\xc6\x49\x5c\x57\xb4\x7b\x2f\x55\x45\x66\x31\x81\xb0\x47\x68\xd9\xab\x4c\xa0\xf4\xcd\xfe\xdf\x89\xaf\x5b\xe5\x2f\x12\xc9\x5d\xcc\x37\x59\x9f\x4e\xa7\x2a\x2c\xa7\xf5\x2a\x6d\xa7\xc6\xd1\xdd\xf9\x3b\xa6\x49\xb9\x51\x7f\xa7\x4c\x59\x59\x73\xc7\xc3\x10\xca\x51\xe3\x49\x05\x80\xe4\x1b\xe8\x3e\xf0\x1c\x0c\x61\x63\x5a\x7b\x39\xfe\x97\x3d\x5b\xd6\x5c\x95\x3c\x91\x68\x5f\xa3\x42\xeb\xd3\x35\xd1\x0f\x89\x84\xea\x0f\x3f\x36\x07\x55\xe9\x95\xeb\x88\x1b\x48\xb9\xf6\x1d\x15\x9d\x9a\xa1\xbe\x57\x29\xe9\xf3\x8c\x6d\xbd\xc9\xaf\x5c\x43\x19\x30\x77\x48\xee\x90\x6c\xb3\xf9\x14\xb8\x34\x5b\xf2\x06\xd8\x88\xb3\xf8\x39\x68\x67\x9c\xc5\xdf\x0a\x77\x30\xe8\xc4\xdd\xfd\xb0\x1d\xb9\x77\x5e\x35\xbb\x34\xde\x37\x57\x9f\x27\x16\xe5\x25\xea\x0d\x97\x77\x4f\xac\xca\xc1\xcb\xcb\xb1\x30\x5c\x6c\xf6\x4f\x29\xc4\x41\x73\x34\x96\x5c\x98\xe4\x6f\x17\x48\x98\x6b\xcd\x77\xf1\x0a\x75\x06\xa1\xce\xfc\x18\x97\x24\x67\x1a\x4a\x39\x5f\xf3\x24\x61\xe8\x15\xef\xb3\x9d\x90\xe3\x39\x73\x28\x7d\x85\x0c\x23\x7b\x04\xd8\x19\x83\x98\x68\x52\x88\x56\x30\x00\x91\x94\xf8\x29\x51\x82\x8b\x5c\x9c\x7b\x5a\xe6\x58\x3c\xc4\x7b\x41\xb2\x18\xe3\x73\x6f\x49\x98\xc2\x8e\x14\x73\xe9\xd5\x6d\xb8\x8c\x75\x77\x7e\xd5\x12\x33\x22\x12\x2b\x73\x07\x65\x26\x38\xcf\x5a\x2c\xe5\xac\xdb\xa4\xd7\x24\xd8\x5f\x61\x96\x7b\x20\xb9\xf1\xd8\x7d\xb7\x8e\xd9\xee\x92\x61\x1c\x6e\x0f\x32\xd6\xce\xf9\xe2\xf5\xd0\x81\xb4\x7d\xca\x82\x9c\x4a\x9e\x27\xa9\xc8\x75\x7b\x15\xdc\x2d\xcb\x25\xbc\x70\xab\x51\xb5\xb7\xef\x67\x98\xfd\x28\x25\xb7\xaf\x8f\x5b\x5b\x40\x69\x0b\xed\x8c\x7e\x63\x0d\xe7\x1b\x15\xfa\x49\xfd\xb0\x2d\xf3\x13\x65\xa8\xb6\x4a\xe3\xea\xdb\x3b\xc8\xe5\x4e\xc6\xed\x7d\x9d\x4d\x64\xbf\xa6\x9e\x65\x6a\x96\x2b\xcd\x57\x9f\x51\x4b\x1a\x3d\x95\x8f\x23\x8b\xd5\xe0\x10\x03\x53\x77\x51\x6e\xf2\x18\x0a\xeb\xcd\x15\xeb\x50\xae\x34\x7a\x29\xeb\x84\xbf\x72\x7a\x8e\xe6\xc3\xa0\x79\xd8\xec\xb8\x05\xf9\x61\xa9\xd1\x71\x77\x72\x2c\x3b\xbe\xad\xa9\x12\x60\xfa\x66\xdb\xd6\xbc\x6f\xae\x17\x34\x13\xb9\xae\xb5\xba\xd5\x1b\x12\x3f\x76\x17\x71\x7e\xc4\xf3\x4c\x7b\x9d\xfb\xf7\x6e\xeb\xee\x92\xb3\xea\x7b\xe4\xd6\x84\xe5\x78\x7e\x7a\xd2\x80\xdc\xbf\xd0\x74\x22\xac\x75\x83\x0d\x4d\xdd\x0b\xe0\x33\x39\x74\xcd\xc8\x51\x1a\x8b\x36\xe2\xef\xc9\x64\xad\xd5\x72\x56\x24\x67\xac\x62\x26\x64\x3c\xba\x6b\x32\xd0\xde\x1f\x9b\x3d\xf9\x2b\x86\xa5\x67\xe9\xee\x18\xac\x0e\x55\x06\x0e\x5f\xa5\x97\xc2\x4a\x13\xa9\xbf\x90\x04\x7f\x7e\x78\x08\x76\x37\xa8\xee\xc6\xf9\x0d\x98\x67\xb5\xf3\xb7\x7d\xd4\x3a\x6e\xd9\xa7\xee\x2a\xd7\x7e\x2d\xef\x75\xed\x3f\x31\x99\x4f\x2c\xc9\xc6\x5d\x8f\x18\x33\xf5\x9b\x98\x62\x52\xfd\xe6\xde\x5d\xd8\x8f\x47\xee\x3f\x64\xfe\x1f\x00\x00\xff\xff\x17\x71\xfc\x09\x84\x25\x00\x00") func pagesAssetsHtmlContainersHtmlBytes() ([]byte, error) { return bindataRead( diff --git a/vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go b/vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go index e536d90be1c0..f3d29b8dd05e 100644 --- a/vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go +++ b/vendor/github.com/google/cadvisor/utils/cpuload/cpuload.go @@ -41,6 +41,6 @@ func New() (CpuLoadReader, error) { if err != nil { return nil, fmt.Errorf("failed to create a netlink based cpuload reader: %v", err) } - glog.V(3).Info("Using a netlink-based load reader") + glog.V(4).Info("Using a netlink-based load reader") return reader, nil } diff --git a/vendor/github.com/google/cadvisor/utils/docker/docker.go b/vendor/github.com/google/cadvisor/utils/docker/docker.go index d59f6f1fd966..e19097d6c7a8 100644 --- a/vendor/github.com/google/cadvisor/utils/docker/docker.go +++ b/vendor/github.com/google/cadvisor/utils/docker/docker.go @@ -19,7 +19,7 @@ import ( "os" "strings" - dockertypes "github.com/docker/engine-api/types" + dockertypes "github.com/docker/docker/api/types" ) const ( diff --git a/vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go b/vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go index 184cdd73fda2..a73243f2e3e8 100644 --- a/vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go +++ b/vendor/github.com/google/cadvisor/utils/oomparser/oomparser.go @@ -143,7 +143,7 @@ func (glogAdapter) Infof(format string, args ...interface{}) { glog.V(4).Infof(format, args) } func (glogAdapter) Warningf(format string, args ...interface{}) { - glog.Infof(format, args) + glog.V(2).Infof(format, args) } func (glogAdapter) Errorf(format string, args ...interface{}) { glog.Warningf(format, args) diff --git a/vendor/github.com/google/cadvisor/validate/validate.go b/vendor/github.com/google/cadvisor/validate/validate.go index 06c8ba45f5da..c73311cf64ac 100644 --- a/vendor/github.com/google/cadvisor/validate/validate.go +++ b/vendor/github.com/google/cadvisor/validate/validate.go @@ -190,7 +190,7 @@ func validateDockerInfo() (string, string) { return Unsupported, fmt.Sprintf("Docker setup is invalid: %v", err) } - desc := fmt.Sprintf("Docker exec driver is %s. Storage driver is %s.\n", info.ExecutionDriver, info.Driver) + desc := fmt.Sprintf("Storage driver is %s.\n", info.Driver) return Recommended, desc } diff --git a/vendor/github.com/gophercloud/gophercloud/.gitignore b/vendor/github.com/gophercloud/gophercloud/.gitignore index ead84456eb0e..df9048a010a9 100644 --- a/vendor/github.com/gophercloud/gophercloud/.gitignore +++ b/vendor/github.com/gophercloud/gophercloud/.gitignore @@ -1 +1,2 @@ **/*.swp +.idea diff --git a/vendor/github.com/gophercloud/gophercloud/.travis.yml b/vendor/github.com/gophercloud/gophercloud/.travis.yml index 5d1486901dd3..59c419495272 100644 --- a/vendor/github.com/gophercloud/gophercloud/.travis.yml +++ b/vendor/github.com/gophercloud/gophercloud/.travis.yml @@ -12,6 +12,8 @@ go: env: global: - secure: "xSQsAG5wlL9emjbCdxzz/hYQsSpJ/bABO1kkbwMSISVcJ3Nk0u4ywF+LS4bgeOnwPfmFvNTOqVDu3RwEvMeWXSI76t1piCPcObutb2faKLVD/hLoAS76gYX+Z8yGWGHrSB7Do5vTPj1ERe2UljdrnsSeOXzoDwFxYRaZLX4bBOB4AyoGvRniil5QXPATiA1tsWX1VMicj8a4F8X+xeESzjt1Q5Iy31e7vkptu71bhvXCaoo5QhYwT+pLR9dN0S1b7Ro0KVvkRefmr1lUOSYd2e74h6Lc34tC1h3uYZCS4h47t7v5cOXvMNxinEj2C51RvbjvZI1RLVdkuAEJD1Iz4+Ote46nXbZ//6XRZMZz/YxQ13l7ux1PFjgEB6HAapmF5Xd8PRsgeTU9LRJxpiTJ3P5QJ3leS1va8qnziM5kYipj/Rn+V8g2ad/rgkRox9LSiR9VYZD2Pe45YCb1mTKSl2aIJnV7nkOqsShY5LNB4JZSg7xIffA+9YVDktw8dJlATjZqt7WvJJ49g6A61mIUV4C15q2JPGKTkZzDiG81NtmS7hFa7k0yaE2ELgYocbcuyUcAahhxntYTC0i23nJmEHVNiZmBO3u7EgpWe4KGVfumU+lt12tIn5b3dZRBBUk3QakKKozSK1QPHGpk/AZGrhu7H6l8to6IICKWtDcyMPQ=" +before_script: +- go vet ./... script: - ./script/coverage - ./script/format diff --git a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml new file mode 100644 index 000000000000..c259d03e184f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml @@ -0,0 +1,12 @@ +- project: + name: gophercloud/gophercloud + check: + jobs: + - gophercloud-unittest + - gophercloud-acceptance-test + recheck-mitaka: + jobs: + - gophercloud-acceptance-test-mitaka + recheck-pike: + jobs: + - gophercloud-acceptance-test-pike diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/doc.go new file mode 100644 index 000000000000..a78d3d0482c2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/doc.go @@ -0,0 +1,86 @@ +/* +Package volumeactions provides information and interaction with volumes in the +OpenStack Block Storage service. A volume is a detachable block storage +device, akin to a USB hard drive. + +Example of Attaching a Volume to an Instance + + attachOpts := volumeactions.AttachOpts{ + MountPoint: "/mnt", + Mode: "rw", + InstanceUUID: server.ID, + } + + err := volumeactions.Attach(client, volume.ID, attachOpts).ExtractErr() + if err != nil { + panic(err) + } + + detachOpts := volumeactions.DetachOpts{ + AttachmentID: volume.Attachments[0].AttachmentID, + } + + err = volumeactions.Detach(client, volume.ID, detachOpts).ExtractErr() + if err != nil { + panic(err) + } + + +Example of Creating an Image from a Volume + + uploadImageOpts := volumeactions.UploadImageOpts{ + ImageName: "my_vol", + Force: true, + } + + volumeImage, err := volumeactions.UploadImage(client, volume.ID, uploadImageOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", volumeImage) + +Example of Extending a Volume's Size + + extendOpts := volumeactions.ExtendSizeOpts{ + NewSize: 100, + } + + err := volumeactions.ExtendSize(client, volume.ID, extendOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example of Initializing a Volume Connection + + connectOpts := &volumeactions.InitializeConnectionOpts{ + IP: "127.0.0.1", + Host: "stack", + Initiator: "iqn.1994-05.com.redhat:17cf566367d2", + Multipath: gophercloud.Disabled, + Platform: "x86_64", + OSType: "linux2", + } + + connectionInfo, err := volumeactions.InitializeConnection(client, volume.ID, connectOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", connectionInfo["data"]) + + terminateOpts := &volumeactions.InitializeConnectionOpts{ + IP: "127.0.0.1", + Host: "stack", + Initiator: "iqn.1994-05.com.redhat:17cf566367d2", + Multipath: gophercloud.Disabled, + Platform: "x86_64", + OSType: "linux2", + } + + err = volumeactions.TerminateConnection(client, volume.ID, terminateOpts).ExtractErr() + if err != nil { + panic(err) + } +*/ +package volumeactions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go new file mode 100644 index 000000000000..a3916c77c167 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go @@ -0,0 +1,263 @@ +package volumeactions + +import ( + "github.com/gophercloud/gophercloud" +) + +// AttachOptsBuilder allows extensions to add additional parameters to the +// Attach request. +type AttachOptsBuilder interface { + ToVolumeAttachMap() (map[string]interface{}, error) +} + +// AttachMode describes the attachment mode for volumes. +type AttachMode string + +// These constants determine how a volume is attached. +const ( + ReadOnly AttachMode = "ro" + ReadWrite AttachMode = "rw" +) + +// AttachOpts contains options for attaching a Volume. +type AttachOpts struct { + // The mountpoint of this volume. + MountPoint string `json:"mountpoint,omitempty"` + + // The nova instance ID, can't set simultaneously with HostName. + InstanceUUID string `json:"instance_uuid,omitempty"` + + // The hostname of baremetal host, can't set simultaneously with InstanceUUID. + HostName string `json:"host_name,omitempty"` + + // Mount mode of this volume. + Mode AttachMode `json:"mode,omitempty"` +} + +// ToVolumeAttachMap assembles a request body based on the contents of a +// AttachOpts. +func (opts AttachOpts) ToVolumeAttachMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-attach") +} + +// Attach will attach a volume based on the values in AttachOpts. +func Attach(client *gophercloud.ServiceClient, id string, opts AttachOptsBuilder) (r AttachResult) { + b, err := opts.ToVolumeAttachMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(attachURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// BeginDetach will mark the volume as detaching. +func BeginDetaching(client *gophercloud.ServiceClient, id string) (r BeginDetachingResult) { + b := map[string]interface{}{"os-begin_detaching": make(map[string]interface{})} + _, r.Err = client.Post(beginDetachingURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// DetachOptsBuilder allows extensions to add additional parameters to the +// Detach request. +type DetachOptsBuilder interface { + ToVolumeDetachMap() (map[string]interface{}, error) +} + +// DetachOpts contains options for detaching a Volume. +type DetachOpts struct { + // AttachmentID is the ID of the attachment between a volume and instance. + AttachmentID string `json:"attachment_id,omitempty"` +} + +// ToVolumeDetachMap assembles a request body based on the contents of a +// DetachOpts. +func (opts DetachOpts) ToVolumeDetachMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-detach") +} + +// Detach will detach a volume based on volume ID. +func Detach(client *gophercloud.ServiceClient, id string, opts DetachOptsBuilder) (r DetachResult) { + b, err := opts.ToVolumeDetachMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(detachURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// Reserve will reserve a volume based on volume ID. +func Reserve(client *gophercloud.ServiceClient, id string) (r ReserveResult) { + b := map[string]interface{}{"os-reserve": make(map[string]interface{})} + _, r.Err = client.Post(reserveURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// Unreserve will unreserve a volume based on volume ID. +func Unreserve(client *gophercloud.ServiceClient, id string) (r UnreserveResult) { + b := map[string]interface{}{"os-unreserve": make(map[string]interface{})} + _, r.Err = client.Post(unreserveURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// InitializeConnectionOptsBuilder allows extensions to add additional parameters to the +// InitializeConnection request. +type InitializeConnectionOptsBuilder interface { + ToVolumeInitializeConnectionMap() (map[string]interface{}, error) +} + +// InitializeConnectionOpts hosts options for InitializeConnection. +// The fields are specific to the storage driver in use and the destination +// attachment. +type InitializeConnectionOpts struct { + IP string `json:"ip,omitempty"` + Host string `json:"host,omitempty"` + Initiator string `json:"initiator,omitempty"` + Wwpns []string `json:"wwpns,omitempty"` + Wwnns string `json:"wwnns,omitempty"` + Multipath *bool `json:"multipath,omitempty"` + Platform string `json:"platform,omitempty"` + OSType string `json:"os_type,omitempty"` +} + +// ToVolumeInitializeConnectionMap assembles a request body based on the contents of a +// InitializeConnectionOpts. +func (opts InitializeConnectionOpts) ToVolumeInitializeConnectionMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "connector") + return map[string]interface{}{"os-initialize_connection": b}, err +} + +// InitializeConnection initializes an iSCSI connection by volume ID. +func InitializeConnection(client *gophercloud.ServiceClient, id string, opts InitializeConnectionOptsBuilder) (r InitializeConnectionResult) { + b, err := opts.ToVolumeInitializeConnectionMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(initializeConnectionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// TerminateConnectionOptsBuilder allows extensions to add additional parameters to the +// TerminateConnection request. +type TerminateConnectionOptsBuilder interface { + ToVolumeTerminateConnectionMap() (map[string]interface{}, error) +} + +// TerminateConnectionOpts hosts options for TerminateConnection. +type TerminateConnectionOpts struct { + IP string `json:"ip,omitempty"` + Host string `json:"host,omitempty"` + Initiator string `json:"initiator,omitempty"` + Wwpns []string `json:"wwpns,omitempty"` + Wwnns string `json:"wwnns,omitempty"` + Multipath *bool `json:"multipath,omitempty"` + Platform string `json:"platform,omitempty"` + OSType string `json:"os_type,omitempty"` +} + +// ToVolumeTerminateConnectionMap assembles a request body based on the contents of a +// TerminateConnectionOpts. +func (opts TerminateConnectionOpts) ToVolumeTerminateConnectionMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "connector") + return map[string]interface{}{"os-terminate_connection": b}, err +} + +// TerminateConnection terminates an iSCSI connection by volume ID. +func TerminateConnection(client *gophercloud.ServiceClient, id string, opts TerminateConnectionOptsBuilder) (r TerminateConnectionResult) { + b, err := opts.ToVolumeTerminateConnectionMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(teminateConnectionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// ExtendSizeOptsBuilder allows extensions to add additional parameters to the +// ExtendSize request. +type ExtendSizeOptsBuilder interface { + ToVolumeExtendSizeMap() (map[string]interface{}, error) +} + +// ExtendSizeOpts contains options for extending the size of an existing Volume. +// This object is passed to the volumes.ExtendSize function. +type ExtendSizeOpts struct { + // NewSize is the new size of the volume, in GB. + NewSize int `json:"new_size" required:"true"` +} + +// ToVolumeExtendSizeMap assembles a request body based on the contents of an +// ExtendSizeOpts. +func (opts ExtendSizeOpts) ToVolumeExtendSizeMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-extend") +} + +// ExtendSize will extend the size of the volume based on the provided information. +// This operation does not return a response body. +func ExtendSize(client *gophercloud.ServiceClient, id string, opts ExtendSizeOptsBuilder) (r ExtendSizeResult) { + b, err := opts.ToVolumeExtendSizeMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(extendSizeURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// UploadImageOptsBuilder allows extensions to add additional parameters to the +// UploadImage request. +type UploadImageOptsBuilder interface { + ToVolumeUploadImageMap() (map[string]interface{}, error) +} + +// UploadImageOpts contains options for uploading a Volume to image storage. +type UploadImageOpts struct { + // Container format, may be bare, ofv, ova, etc. + ContainerFormat string `json:"container_format,omitempty"` + + // Disk format, may be raw, qcow2, vhd, vdi, vmdk, etc. + DiskFormat string `json:"disk_format,omitempty"` + + // The name of image that will be stored in glance. + ImageName string `json:"image_name,omitempty"` + + // Force image creation, usable if volume attached to instance. + Force bool `json:"force,omitempty"` +} + +// ToVolumeUploadImageMap assembles a request body based on the contents of a +// UploadImageOpts. +func (opts UploadImageOpts) ToVolumeUploadImageMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-volume_upload_image") +} + +// UploadImage will upload an image based on the values in UploadImageOptsBuilder. +func UploadImage(client *gophercloud.ServiceClient, id string, opts UploadImageOptsBuilder) (r UploadImageResult) { + b, err := opts.ToVolumeUploadImageMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(uploadURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go new file mode 100644 index 000000000000..9815f0c26ac3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go @@ -0,0 +1,186 @@ +package volumeactions + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" +) + +// AttachResult contains the response body and error from an Attach request. +type AttachResult struct { + gophercloud.ErrResult +} + +// BeginDetachingResult contains the response body and error from a BeginDetach +// request. +type BeginDetachingResult struct { + gophercloud.ErrResult +} + +// DetachResult contains the response body and error from a Detach request. +type DetachResult struct { + gophercloud.ErrResult +} + +// UploadImageResult contains the response body and error from an UploadImage +// request. +type UploadImageResult struct { + gophercloud.Result +} + +// ReserveResult contains the response body and error from a Reserve request. +type ReserveResult struct { + gophercloud.ErrResult +} + +// UnreserveResult contains the response body and error from an Unreserve +// request. +type UnreserveResult struct { + gophercloud.ErrResult +} + +// TerminateConnectionResult contains the response body and error from a +// TerminateConnection request. +type TerminateConnectionResult struct { + gophercloud.ErrResult +} + +// InitializeConnectionResult contains the response body and error from an +// InitializeConnection request. +type InitializeConnectionResult struct { + gophercloud.Result +} + +// ExtendSizeResult contains the response body and error from an ExtendSize request. +type ExtendSizeResult struct { + gophercloud.ErrResult +} + +// Extract will get the connection information out of the +// InitializeConnectionResult object. +// +// This will be a generic map[string]interface{} and the results will be +// dependent on the type of connection made. +func (r InitializeConnectionResult) Extract() (map[string]interface{}, error) { + var s struct { + ConnectionInfo map[string]interface{} `json:"connection_info"` + } + err := r.ExtractInto(&s) + return s.ConnectionInfo, err +} + +// ImageVolumeType contains volume type information obtained from UploadImage +// action. +type ImageVolumeType struct { + // The ID of a volume type. + ID string `json:"id"` + + // Human-readable display name for the volume type. + Name string `json:"name"` + + // Human-readable description for the volume type. + Description string `json:"display_description"` + + // Flag for public access. + IsPublic bool `json:"is_public"` + + // Extra specifications for volume type. + ExtraSpecs map[string]interface{} `json:"extra_specs"` + + // ID of quality of service specs. + QosSpecsID string `json:"qos_specs_id"` + + // Flag for deletion status of volume type. + Deleted bool `json:"deleted"` + + // The date when volume type was deleted. + DeletedAt time.Time `json:"-"` + + // The date when volume type was created. + CreatedAt time.Time `json:"-"` + + // The date when this volume was last updated. + UpdatedAt time.Time `json:"-"` +} + +func (r *ImageVolumeType) UnmarshalJSON(b []byte) error { + type tmp ImageVolumeType + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + DeletedAt gophercloud.JSONRFC3339MilliNoZ `json:"deleted_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = ImageVolumeType(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + r.DeletedAt = time.Time(s.DeletedAt) + + return err +} + +// VolumeImage contains information about volume uploaded to an image service. +type VolumeImage struct { + // The ID of a volume an image is created from. + VolumeID string `json:"id"` + + // Container format, may be bare, ofv, ova, etc. + ContainerFormat string `json:"container_format"` + + // Disk format, may be raw, qcow2, vhd, vdi, vmdk, etc. + DiskFormat string `json:"disk_format"` + + // Human-readable description for the volume. + Description string `json:"display_description"` + + // The ID of the created image. + ImageID string `json:"image_id"` + + // Human-readable display name for the image. + ImageName string `json:"image_name"` + + // Size of the volume in GB. + Size int `json:"size"` + + // Current status of the volume. + Status string `json:"status"` + + // The date when this volume was last updated. + UpdatedAt time.Time `json:"-"` + + // Volume type object of used volume. + VolumeType ImageVolumeType `json:"volume_type"` +} + +func (r *VolumeImage) UnmarshalJSON(b []byte) error { + type tmp VolumeImage + var s struct { + tmp + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = VolumeImage(s.tmp) + + r.UpdatedAt = time.Time(s.UpdatedAt) + + return err +} + +// Extract will get an object with info about the uploaded image out of the +// UploadImageResult object. +func (r UploadImageResult) Extract() (VolumeImage, error) { + var s struct { + VolumeImage VolumeImage `json:"os-volume_upload_image"` + } + err := r.ExtractInto(&s) + return s.VolumeImage, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go new file mode 100644 index 000000000000..5efd2b25c05b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go @@ -0,0 +1,39 @@ +package volumeactions + +import "github.com/gophercloud/gophercloud" + +func attachURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("volumes", id, "action") +} + +func beginDetachingURL(c *gophercloud.ServiceClient, id string) string { + return attachURL(c, id) +} + +func detachURL(c *gophercloud.ServiceClient, id string) string { + return attachURL(c, id) +} + +func uploadURL(c *gophercloud.ServiceClient, id string) string { + return attachURL(c, id) +} + +func reserveURL(c *gophercloud.ServiceClient, id string) string { + return attachURL(c, id) +} + +func unreserveURL(c *gophercloud.ServiceClient, id string) string { + return attachURL(c, id) +} + +func initializeConnectionURL(c *gophercloud.ServiceClient, id string) string { + return attachURL(c, id) +} + +func teminateConnectionURL(c *gophercloud.ServiceClient, id string) string { + return attachURL(c, id) +} + +func extendSizeURL(c *gophercloud.ServiceClient, id string) string { + return attachURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/doc.go deleted file mode 100644 index e3af39f513a4..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package apiversions provides information and interaction with the different -// API versions for the OpenStack Block Storage service, code-named Cinder. -package apiversions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/requests.go deleted file mode 100644 index 725c13a76150..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/requests.go +++ /dev/null @@ -1,20 +0,0 @@ -package apiversions - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// List lists all the Cinder API versions available to end-users. -func List(c *gophercloud.ServiceClient) pagination.Pager { - return pagination.NewPager(c, listURL(c), func(r pagination.PageResult) pagination.Page { - return APIVersionPage{pagination.SinglePageBase(r)} - }) -} - -// Get will retrieve the volume type with the provided ID. To extract the volume -// type from the result, call the Extract method on the GetResult. -func Get(client *gophercloud.ServiceClient, v string) (r GetResult) { - _, r.Err = client.Get(getURL(client, v), &r.Body, nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/results.go deleted file mode 100644 index f510c6d103ab..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/results.go +++ /dev/null @@ -1,49 +0,0 @@ -package apiversions - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// APIVersion represents an API version for Cinder. -type APIVersion struct { - ID string `json:"id"` // unique identifier - Status string `json:"status"` // current status - Updated string `json:"updated"` // date last updated -} - -// APIVersionPage is the page returned by a pager when traversing over a -// collection of API versions. -type APIVersionPage struct { - pagination.SinglePageBase -} - -// IsEmpty checks whether an APIVersionPage struct is empty. -func (r APIVersionPage) IsEmpty() (bool, error) { - is, err := ExtractAPIVersions(r) - return len(is) == 0, err -} - -// ExtractAPIVersions takes a collection page, extracts all of the elements, -// and returns them a slice of APIVersion structs. It is effectively a cast. -func ExtractAPIVersions(r pagination.Page) ([]APIVersion, error) { - var s struct { - Versions []APIVersion `json:"versions"` - } - err := (r.(APIVersionPage)).ExtractInto(&s) - return s.Versions, err -} - -// GetResult represents the result of a get operation. -type GetResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts an API version resource. -func (r GetResult) Extract() (*APIVersion, error) { - var s struct { - Version *APIVersion `json:"version"` - } - err := r.ExtractInto(&s) - return s.Version, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/urls.go deleted file mode 100644 index d1861ac196d8..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/urls.go +++ /dev/null @@ -1,18 +0,0 @@ -package apiversions - -import ( - "net/url" - "strings" - - "github.com/gophercloud/gophercloud" -) - -func getURL(c *gophercloud.ServiceClient, version string) string { - return c.ServiceURL(strings.TrimRight(version, "/") + "/") -} - -func listURL(c *gophercloud.ServiceClient) string { - u, _ := url.Parse(c.ServiceURL("")) - u.Path = "/" - return u.String() -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/doc.go similarity index 100% rename from vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/doc.go rename to vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/doc.go diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/requests.go new file mode 100644 index 000000000000..18c9cb272ecb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/requests.go @@ -0,0 +1,182 @@ +package volumes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToVolumeCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a Volume. This object is passed to +// the volumes.Create function. For more information about these parameters, +// see the Volume object. +type CreateOpts struct { + // The size of the volume, in GB + Size int `json:"size" required:"true"` + // The availability zone + AvailabilityZone string `json:"availability_zone,omitempty"` + // ConsistencyGroupID is the ID of a consistency group + ConsistencyGroupID string `json:"consistencygroup_id,omitempty"` + // The volume description + Description string `json:"description,omitempty"` + // One or more metadata key and value pairs to associate with the volume + Metadata map[string]string `json:"metadata,omitempty"` + // The volume name + Name string `json:"name,omitempty"` + // the ID of the existing volume snapshot + SnapshotID string `json:"snapshot_id,omitempty"` + // SourceReplica is a UUID of an existing volume to replicate with + SourceReplica string `json:"source_replica,omitempty"` + // the ID of the existing volume + SourceVolID string `json:"source_volid,omitempty"` + // The ID of the image from which you want to create the volume. + // Required to create a bootable volume. + ImageID string `json:"imageRef,omitempty"` + // The associated volume type + VolumeType string `json:"volume_type,omitempty"` +} + +// ToVolumeCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume") +} + +// Create will create a new Volume based on the values in CreateOpts. To extract +// the Volume object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToVolumeCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// Delete will delete the existing Volume with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get retrieves the Volume with the provided ID. To extract the Volume object +// from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToVolumeListQuery() (string, error) +} + +// ListOpts holds options for listing Volumes. It is passed to the volumes.List +// function. +type ListOpts struct { + // admin-only option. Set it to true to see all tenant volumes. + AllTenants bool `q:"all_tenants"` + // List only volumes that contain Metadata. + Metadata map[string]string `q:"metadata"` + // List only volumes that have Name as the display name. + Name string `q:"name"` + // List only volumes that have a status of Status. + Status string `q:"status"` +} + +// ToVolumeListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToVolumeListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns Volumes optionally limited by the conditions provided in ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToVolumeListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return VolumePage{pagination.SinglePageBase(r)} + }) +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToVolumeUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contain options for updating an existing Volume. This object is passed +// to the volumes.Update function. For more information about the parameters, see +// the Volume object. +type UpdateOpts struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +// ToVolumeUpdateMap assembles a request body based on the contents of an +// UpdateOpts. +func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume") +} + +// Update will update the Volume with provided information. To extract the updated +// Volume from the response, call the Extract method on the UpdateResult. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToVolumeUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// IDFromName is a convienience function that returns a server's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + pages, err := List(client, nil).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractVolumes(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "volume"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "volume"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/results.go new file mode 100644 index 000000000000..5ebe36a33854 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/results.go @@ -0,0 +1,159 @@ +package volumes + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Attachment represents a Volume Attachment record +type Attachment struct { + AttachedAt time.Time `json:"-"` + AttachmentID string `json:"attachment_id"` + Device string `json:"device"` + HostName string `json:"host_name"` + ID string `json:"id"` + ServerID string `json:"server_id"` + VolumeID string `json:"volume_id"` +} + +// UnmarshalJSON is our unmarshalling helper +func (r *Attachment) UnmarshalJSON(b []byte) error { + type tmp Attachment + var s struct { + tmp + AttachedAt gophercloud.JSONRFC3339MilliNoZ `json:"attached_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Attachment(s.tmp) + + r.AttachedAt = time.Time(s.AttachedAt) + + return err +} + +// Volume contains all the information associated with an OpenStack Volume. +type Volume struct { + // Unique identifier for the volume. + ID string `json:"id"` + // Current status of the volume. + Status string `json:"status"` + // Size of the volume in GB. + Size int `json:"size"` + // AvailabilityZone is which availability zone the volume is in. + AvailabilityZone string `json:"availability_zone"` + // The date when this volume was created. + CreatedAt time.Time `json:"-"` + // The date when this volume was last updated + UpdatedAt time.Time `json:"-"` + // Instances onto which the volume is attached. + Attachments []Attachment `json:"attachments"` + // Human-readable display name for the volume. + Name string `json:"name"` + // Human-readable description for the volume. + Description string `json:"description"` + // The type of volume to create, either SATA or SSD. + VolumeType string `json:"volume_type"` + // The ID of the snapshot from which the volume was created + SnapshotID string `json:"snapshot_id"` + // The ID of another block storage volume from which the current volume was created + SourceVolID string `json:"source_volid"` + // Arbitrary key-value pairs defined by the user. + Metadata map[string]string `json:"metadata"` + // UserID is the id of the user who created the volume. + UserID string `json:"user_id"` + // Indicates whether this is a bootable volume. + Bootable string `json:"bootable"` + // Encrypted denotes if the volume is encrypted. + Encrypted bool `json:"encrypted"` + // ReplicationStatus is the status of replication. + ReplicationStatus string `json:"replication_status"` + // ConsistencyGroupID is the consistency group ID. + ConsistencyGroupID string `json:"consistencygroup_id"` + // Multiattach denotes if the volume is multi-attach capable. + Multiattach bool `json:"multiattach"` +} + +// UnmarshalJSON another unmarshalling function +func (r *Volume) UnmarshalJSON(b []byte) error { + type tmp Volume + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Volume(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + return err +} + +// VolumePage is a pagination.pager that is returned from a call to the List function. +type VolumePage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a ListResult contains no Volumes. +func (r VolumePage) IsEmpty() (bool, error) { + volumes, err := ExtractVolumes(r) + return len(volumes) == 0, err +} + +// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call. +func ExtractVolumes(r pagination.Page) ([]Volume, error) { + var s []Volume + err := ExtractVolumesInto(r, &s) + return s, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Volume object out of the commonResult object. +func (r commonResult) Extract() (*Volume, error) { + var s Volume + err := r.ExtractInto(&s) + return &s, err +} + +// ExtractInto converts our response data into a volume struct +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "volume") +} + +// ExtractVolumesInto similar to ExtractInto but operates on a `list` of volumes +func ExtractVolumesInto(r pagination.Page, v interface{}) error { + return r.(VolumePage).Result.ExtractIntoSlicePtr(v, "volumes") +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// UpdateResult contains the response body and error from an Update request. +type UpdateResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/urls.go new file mode 100644 index 000000000000..170724905abe --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/urls.go @@ -0,0 +1,23 @@ +package volumes + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("volumes") +} + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("volumes", "detail") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("volumes", id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/util.go new file mode 100644 index 000000000000..e86c1b4b4ee5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/util.go @@ -0,0 +1,22 @@ +package volumes + +import ( + "github.com/gophercloud/gophercloud" +) + +// WaitForStatus will continually poll the resource, checking for a particular +// status. It will do this for the amount of seconds defined. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/client.go b/vendor/github.com/gophercloud/gophercloud/openstack/client.go index e1c699fcbfa3..c796795b8ee9 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/client.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/client.go @@ -4,6 +4,8 @@ import ( "fmt" "net/url" "reflect" + "regexp" + "strings" "github.com/gophercloud/gophercloud" tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" @@ -12,8 +14,13 @@ import ( ) const ( - v20 = "v2.0" - v30 = "v3.0" + // v2 represents Keystone v2. + // It should never increase beyond 2.0. + v2 = "v2.0" + + // v3 represents Keystone v3. + // The version can be anything from v3 to v3.x. + v3 = "v3" ) /* @@ -35,24 +42,25 @@ func NewClient(endpoint string) (*gophercloud.ProviderClient, error) { if err != nil { return nil, err } - hadPath := u.Path != "" - u.Path, u.RawQuery, u.Fragment = "", "", "" - base := u.String() - endpoint = gophercloud.NormalizeURL(endpoint) - base = gophercloud.NormalizeURL(base) + u.RawQuery, u.Fragment = "", "" - if hadPath { - return &gophercloud.ProviderClient{ - IdentityBase: base, - IdentityEndpoint: endpoint, - }, nil + var base string + versionRe := regexp.MustCompile("v[0-9.]+/?") + if version := versionRe.FindString(u.Path); version != "" { + base = strings.Replace(u.String(), version, "", -1) + } else { + base = u.String() } + endpoint = gophercloud.NormalizeURL(endpoint) + base = gophercloud.NormalizeURL(base) + return &gophercloud.ProviderClient{ IdentityBase: base, - IdentityEndpoint: "", + IdentityEndpoint: endpoint, }, nil + } /* @@ -92,8 +100,8 @@ func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.Provider // supported at the provided endpoint. func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { versions := []*utils.Version{ - {ID: v20, Priority: 20, Suffix: "/v2.0/"}, - {ID: v30, Priority: 30, Suffix: "/v3/"}, + {ID: v2, Priority: 20, Suffix: "/v2.0/"}, + {ID: v3, Priority: 30, Suffix: "/v3/"}, } chosen, endpoint, err := utils.ChooseVersion(client, versions) @@ -102,9 +110,9 @@ func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOp } switch chosen.ID { - case v20: + case v2: return v2auth(client, endpoint, options, gophercloud.EndpointOpts{}) - case v30: + case v3: return v3auth(client, endpoint, &options, gophercloud.EndpointOpts{}) default: // The switch statement must be out of date from the versions list. @@ -241,6 +249,13 @@ func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOp } } + // Ensure endpoint still has a suffix of v3. + // This is because EndpointLocator might have found a versionless + // endpoint and requests will fail unless targeted at /v3. + if !strings.HasSuffix(endpoint, "v3/") { + endpoint = endpoint + "v3/" + } + return &gophercloud.ServiceClient{ ProviderClient: client, Endpoint: endpoint, @@ -293,8 +308,12 @@ func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.Endpoi return initClientOpts(client, eo, "volumev2") } -// NewSharedFileSystemV2 creates a ServiceClient that may be used to access the -// v2 shared file system service. +// NewBlockStorageV3 creates a ServiceClient that may be used to access the v3 block storage service. +func NewBlockStorageV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "volumev3") +} + +// NewSharedFileSystemV2 creates a ServiceClient that may be used to access the v2 shared file system service. func NewSharedFileSystemV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { return initClientOpts(client, eo, "sharev2") } @@ -331,3 +350,11 @@ func NewImageServiceV2(client *gophercloud.ProviderClient, eo gophercloud.Endpoi sc.ResourceBase = sc.Endpoint + "v2/" return sc, err } + +// NewLoadBalancerV2 creates a ServiceClient that may be used to access the v2 +// load balancer service. +func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "load-balancer") + sc.ResourceBase = sc.Endpoint + "v2.0/" + return sc, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go index 2661cfac2094..a7bc15c3e5cc 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go @@ -41,5 +41,23 @@ Example to Create a Flavor if err != nil { panic(err) } + +Example to List Flavor Access + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + allPages, err := flavors.ListAccesses(computeClient, flavorID).AllPages() + if err != nil { + panic(err) + } + + allAccesses, err := flavors.ExtractAccesses(allPages) + if err != nil { + panic(err) + } + + for _, access := range allAccesses { + fmt.Printf("%+v", access) + } */ package flavors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go index 317fd8530c37..6eb3678b2bd2 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go @@ -43,7 +43,6 @@ const ( the Marker for the current call. */ type ListOpts struct { - // ChangesSince, if provided, instructs List to return only those things which // have changed since the timestamp provided. ChangesSince string `q:"changes-since"` @@ -154,6 +153,15 @@ func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { return } +// ListAccesses retrieves the tenants which have access to a flavor. +func ListAccesses(client *gophercloud.ServiceClient, id string) pagination.Pager { + url := accessURL(client, id) + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return AccessPage{pagination.SinglePageBase(r)} + }) +} + // IDFromName is a convienience function that returns a flavor's ID given its // name. func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go index fda11d3e06af..fb5c335b8eac 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go @@ -12,6 +12,8 @@ type commonResult struct { gophercloud.Result } +// CreateResult is the response of a Get operations. Call its Extract method to +// interpret it as a Flavor. type CreateResult struct { commonResult } @@ -63,7 +65,7 @@ type Flavor struct { VCPUs int `json:"vcpus"` // IsPublic indicates whether the flavor is public. - IsPublic bool `json:"is_public"` + IsPublic bool `json:"os-flavor-access:is_public"` } func (r *Flavor) UnmarshalJSON(b []byte) error { @@ -131,3 +133,32 @@ func ExtractFlavors(r pagination.Page) ([]Flavor, error) { err := (r.(FlavorPage)).ExtractInto(&s) return s.Flavors, err } + +// AccessPage contains a single page of all FlavorAccess entries for a flavor. +type AccessPage struct { + pagination.SinglePageBase +} + +// IsEmpty indicates whether an AccessPage is empty. +func (page AccessPage) IsEmpty() (bool, error) { + v, err := ExtractAccesses(page) + return len(v) == 0, err +} + +// ExtractAccesses interprets a page of results as a slice of FlavorAccess. +func ExtractAccesses(r pagination.Page) ([]FlavorAccess, error) { + var s struct { + FlavorAccesses []FlavorAccess `json:"flavor_access"` + } + err := (r.(AccessPage)).ExtractInto(&s) + return s.FlavorAccesses, err +} + +// FlavorAccess represents an ACL of tenant access to a specific Flavor. +type FlavorAccess struct { + // FlavorID is the unique ID of the flavor. + FlavorID string `json:"flavor_id"` + + // TenantID is the unique ID of the tenant. + TenantID string `json:"tenant_id"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go index 518d05b36996..04d33bf1279f 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go @@ -19,3 +19,7 @@ func createURL(client *gophercloud.ServiceClient) string { func deleteURL(client *gophercloud.ServiceClient, id string) string { return client.ServiceURL("flavors", id) } + +func accessURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "os-flavor-access") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go index c445f0ede0ef..626eb63e9104 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go @@ -515,7 +515,7 @@ func (opts ResizeOpts) ToServerResizeMap() (map[string]interface{}, error) { // Note that this implies rebuilding it. // // Unfortunately, one cannot pass rebuild parameters to the resize function. -// When the resize completes, the server will be in RESIZE_VERIFY state. +// When the resize completes, the server will be in VERIFY_RESIZE state. // While in this state, you can explore the use of the new server's // configuration. If you like it, call ConfirmResize() to commit the resize // permanently. Otherwise, call RevertResize() to restore the old configuration. diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go index 7ef80e92e2e7..c6c1ff43f7b3 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go @@ -211,6 +211,16 @@ type Server struct { // SecurityGroups includes the security groups that this instance has applied // to it. SecurityGroups []map[string]interface{} `json:"security_groups"` + + // Fault contains failure information about a server. + Fault Fault `json:"fault"` +} + +type Fault struct { + Code int `json:"code"` + Created time.Time `json:"created"` + Details string `json:"details"` + Message string `json:"message"` } func (r *Server) UnmarshalJSON(b []byte) error { diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/doc.go new file mode 100644 index 000000000000..b8261684e700 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/doc.go @@ -0,0 +1,46 @@ +/* +Package external provides information and interaction with the external +extension for the OpenStack Networking service. + +Example to List Networks with External Information + + type NetworkWithExternalExt struct { + networks.Network + external.NetworkExternalExt + } + + var allNetworks []NetworkWithExternalExt + + allPages, err := networks.List(networkClient, nil).AllPages() + if err != nil { + panic(err) + } + + err = networks.ExtractNetworksInto(allPages, &allNetworks) + if err != nil { + panic(err) + } + + for _, network := range allNetworks { + fmt.Println("%+v\n", network) + } + +Example to Create a Network with External Information + + iTrue := true + networkCreateOpts := networks.CreateOpts{ + Name: "private", + AdminStateUp: &iTrue, + } + + createOpts := external.CreateOptsExt{ + networkCreateOpts, + &iTrue, + } + + network, err := networks.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } +*/ +package external diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/requests.go new file mode 100644 index 000000000000..f28e574612c2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/requests.go @@ -0,0 +1,56 @@ +package external + +import ( + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" +) + +// CreateOptsExt is the structure used when creating new external network +// resources. It embeds networks.CreateOpts and so inherits all of its required +// and optional fields, with the addition of the External field. +type CreateOptsExt struct { + networks.CreateOptsBuilder + External *bool `json:"router:external,omitempty"` +} + +// ToNetworkCreateMap adds the router:external options to the base network +// creation options. +func (opts CreateOptsExt) ToNetworkCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToNetworkCreateMap() + if err != nil { + return nil, err + } + + if opts.External == nil { + return base, nil + } + + networkMap := base["network"].(map[string]interface{}) + networkMap["router:external"] = opts.External + + return base, nil +} + +// UpdateOptsExt is the structure used when updating existing external network +// resources. It embeds networks.UpdateOpts and so inherits all of its required +// and optional fields, with the addition of the External field. +type UpdateOptsExt struct { + networks.UpdateOptsBuilder + External *bool `json:"router:external,omitempty"` +} + +// ToNetworkUpdateMap casts an UpdateOpts struct to a map. +func (opts UpdateOptsExt) ToNetworkUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToNetworkUpdateMap() + if err != nil { + return nil, err + } + + if opts.External == nil { + return base, nil + } + + networkMap := base["network"].(map[string]interface{}) + networkMap["router:external"] = opts.External + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/results.go new file mode 100644 index 000000000000..7cbbffdcf8a0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/results.go @@ -0,0 +1,8 @@ +package external + +// NetworkExternalExt represents a decorated form of a Network with based on the +// "external-net" extension. +type NetworkExternalExt struct { + // Specifies whether the network is an external network or not. + External bool `json:"router:external"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go index d6dbb350e66c..e19c8e74c47b 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go @@ -8,7 +8,16 @@ import ( // GatewayInfo represents the information of an external gateway for any // particular network router. type GatewayInfo struct { - NetworkID string `json:"network_id"` + NetworkID string `json:"network_id"` + EnableSNAT *bool `json:"enable_snat,omitempty"` + ExternalFixedIPs []ExternalFixedIP `json:"external_fixed_ips,omitempty"` +} + +// ExternalFixedIP is the IP address and subnet ID of the external gateway of a +// router. +type ExternalFixedIP struct { + IPAddress string `json:"ip_address"` + SubnetID string `json:"subnet_id"` } // Route is a possible route in a router. diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/doc.go deleted file mode 100644 index bad3324b5ed0..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/doc.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Package members provides information and interaction with Members of the -Load Balancer as a Service extension for the OpenStack Networking service. - -Example to List Members - - listOpts := members.ListOpts{ - ProtocolPort: 80, - } - - allPages, err := members.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allMembers, err := members.ExtractMembers(allPages) - if err != nil { - panic(err) - } - - for _, member := range allMembers { - fmt.Printf("%+v\n", member) - } - -Example to Create a Member - - createOpts := members.CreateOpts{ - Address: "192.168.2.14", - ProtocolPort: 80, - PoolID: "0b266a12-0fdf-4434-bd11-649d84e54bd5" - } - - member, err := members.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Member - - memberID := "46592c54-03f7-40ef-9cdf-b1fcf2775ddf" - - updateOpts := members.UpdateOpts{ - AdminStateUp: gophercloud.Disabled, - } - - member, err := members.Update(networkClient, memberID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Member - - memberID := "46592c54-03f7-40ef-9cdf-b1fcf2775ddf" - err := members.Delete(networkClient, memberID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package members diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go deleted file mode 100644 index 1a3128844405..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go +++ /dev/null @@ -1,124 +0,0 @@ -package members - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the floating IP attributes you want to see returned. SortKey allows you to -// sort by a particular network attribute. SortDir sets the direction, and is -// either `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - Status string `q:"status"` - Weight int `q:"weight"` - AdminStateUp *bool `q:"admin_state_up"` - TenantID string `q:"tenant_id"` - PoolID string `q:"pool_id"` - Address string `q:"address"` - ProtocolPort int `q:"protocol_port"` - ID string `q:"id"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// List returns a Pager which allows you to iterate over a collection of -// members. It accepts a ListOpts struct, which allows you to filter and sort -// the returned collection for greater efficiency. -// -// Default policy settings return only those members that are owned by the -// tenant who submits the request, unless an admin user submits the request. -func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { - q, err := gophercloud.BuildQueryString(&opts) - if err != nil { - return pagination.Pager{Err: err} - } - u := rootURL(c) + q.String() - return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { - return MemberPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToLBMemberCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains all the values needed to create a new pool member. -type CreateOpts struct { - // Address is the IP address of the member. - Address string `json:"address" required:"true"` - - // ProtocolPort is the port on which the application is hosted. - ProtocolPort int `json:"protocol_port" required:"true"` - - // PoolID is the pool to which this member will belong. - PoolID string `json:"pool_id" required:"true"` - - // TenantID is only required if the caller has an admin role and wants - // to create a pool for another tenant. - TenantID string `json:"tenant_id,omitempty"` -} - -// ToLBMemberCreateMap builds a request body from CreateOpts. -func (opts CreateOpts) ToLBMemberCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "member") -} - -// Create accepts a CreateOpts struct and uses the values to create a new -// load balancer pool member. -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToLBMemberCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) - return -} - -// Get retrieves a particular pool member based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToLBMemberUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contains the values used when updating a pool member. -type UpdateOpts struct { - // The administrative state of the member, which is up (true) or down (false). - AdminStateUp *bool `json:"admin_state_up,omitempty"` -} - -// ToLBMemberUpdateMap builds a request body from UpdateOpts. -func (opts UpdateOpts) ToLBMemberUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "member") -} - -// Update allows members to be updated. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToLBMemberUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201, 202}, - }) - return -} - -// Delete will permanently delete a particular member based on its unique ID. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(resourceURL(c, id), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go deleted file mode 100644 index 804dbe8445f8..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go +++ /dev/null @@ -1,109 +0,0 @@ -package members - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Member represents the application running on a backend server. -type Member struct { - // Status is the status of the member. Indicates whether the member - // is operational. - Status string - - // Weight is the weight of member. - Weight int - - // AdminStateUp is the administrative state of the member, which is up - // (true) or down (false). - AdminStateUp bool `json:"admin_state_up"` - - // TenantID is the owner of the member. - TenantID string `json:"tenant_id"` - - // PoolID is the pool to which the member belongs. - PoolID string `json:"pool_id"` - - // Address is the IP address of the member. - Address string - - // ProtocolPort is the port on which the application is hosted. - ProtocolPort int `json:"protocol_port"` - - // ID is the unique ID for the member. - ID string -} - -// MemberPage is the page returned by a pager when traversing over a -// collection of pool members. -type MemberPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of members has reached -// the end of a page and the pager seeks to traverse over a new one. In order -// to do this, it needs to construct the next page's URL. -func (r MemberPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"members_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a MemberPage struct is empty. -func (r MemberPage) IsEmpty() (bool, error) { - is, err := ExtractMembers(r) - return len(is) == 0, err -} - -// ExtractMembers accepts a Page struct, specifically a MemberPage struct, -// and extracts the elements into a slice of Member structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractMembers(r pagination.Page) ([]Member, error) { - var s struct { - Members []Member `json:"members"` - } - err := (r.(MemberPage)).ExtractInto(&s) - return s.Members, err -} - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a member. -func (r commonResult) Extract() (*Member, error) { - var s struct { - Member *Member `json:"member"` - } - err := r.ExtractInto(&s) - return s.Member, err -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a Member. -type CreateResult struct { - commonResult -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret it as a Member. -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its Extract -// method to interpret it as a Member. -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its -// ExtractErr method to determine if the result succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go deleted file mode 100644 index e2248f81f45b..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go +++ /dev/null @@ -1,16 +0,0 @@ -package members - -import "github.com/gophercloud/gophercloud" - -const ( - rootPath = "lb" - resourcePath = "members" -) - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(rootPath, resourcePath) -} - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/doc.go deleted file mode 100644 index b5c0f29f0546..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/doc.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Package monitors provides information and interaction with the Monitors -of the Load Balancer as a Service extension for the OpenStack Networking -Service. - -Example to List Monitors - - listOpts: monitors.ListOpts{ - Type: "HTTP", - } - - allPages, err := monitors.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allMonitors, err := monitors.ExtractMonitors(allPages) - if err != nil { - panic(err) - } - - for _, monitor := range allMonitors { - fmt.Printf("%+v\n", monitor) - } - -Example to Create a Monitor - - createOpts := monitors.CreateOpts{ - Type: "HTTP", - Delay: 20, - Timeout: 20, - MaxRetries: 5, - URLPath: "/check", - ExpectedCodes: "200-299", - } - - monitor, err := monitors.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Monitor - - monitorID := "681aed03-aadb-43ae-aead-b9016375650a" - - updateOpts := monitors.UpdateOpts{ - Timeout: 30, - } - - monitor, err := monitors.Update(networkClient, monitorID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Member - - monitorID := "681aed03-aadb-43ae-aead-b9016375650a" - err := monitors.Delete(networkClient, monitorID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package monitors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go deleted file mode 100644 index 9ed0c769c900..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go +++ /dev/null @@ -1,227 +0,0 @@ -package monitors - -import ( - "fmt" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the floating IP attributes you want to see returned. SortKey allows you to -// sort by a particular network attribute. SortDir sets the direction, and is -// either `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - ID string `q:"id"` - TenantID string `q:"tenant_id"` - Type string `q:"type"` - Delay int `q:"delay"` - Timeout int `q:"timeout"` - MaxRetries int `q:"max_retries"` - HTTPMethod string `q:"http_method"` - URLPath string `q:"url_path"` - ExpectedCodes string `q:"expected_codes"` - AdminStateUp *bool `q:"admin_state_up"` - Status string `q:"status"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// List returns a Pager which allows you to iterate over a collection of -// monitors. It accepts a ListOpts struct, which allows you to filter and sort -// the returned collection for greater efficiency. -// -// Default policy settings return only those monitors that are owned by the -// tenant who submits the request, unless an admin user submits the request. -func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { - q, err := gophercloud.BuildQueryString(&opts) - if err != nil { - return pagination.Pager{Err: err} - } - u := rootURL(c) + q.String() - return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { - return MonitorPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// MonitorType is the type for all the types of LB monitors. -type MonitorType string - -// Constants that represent approved monitoring types. -const ( - TypePING MonitorType = "PING" - TypeTCP MonitorType = "TCP" - TypeHTTP MonitorType = "HTTP" - TypeHTTPS MonitorType = "HTTPS" -) - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToLBMonitorCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains all the values needed to create a new health monitor. -type CreateOpts struct { - // MonitorType is the type of probe, which is PING, TCP, HTTP, or HTTPS, - // that is sent by the load balancer to verify the member state. - Type MonitorType `json:"type" required:"true"` - - // Delay is the time, in seconds, between sending probes to members. - Delay int `json:"delay" required:"true"` - - // Timeout is the maximum number of seconds for a monitor to wait for a ping - // reply before it times out. The value must be less than the delay value. - Timeout int `json:"timeout" required:"true"` - - // MaxRetries is the number of permissible ping failures before changing the - // member's status to INACTIVE. Must be a number between 1 and 10. - MaxRetries int `json:"max_retries" required:"true"` - - // URLPath is the URI path that will be accessed if monitor type - // is HTTP or HTTPS. Required for HTTP(S) types. - URLPath string `json:"url_path,omitempty"` - - // HTTPMethod is the HTTP method used for requests by the monitor. If this - // attribute is not specified, it defaults to "GET". Required for HTTP(S) - // types. - HTTPMethod string `json:"http_method,omitempty"` - - // ExpectedCodes is the expected HTTP codes for a passing HTTP(S) monitor - // You can either specify a single status like "200", or a range like - // "200-202". Required for HTTP(S) types. - ExpectedCodes string `json:"expected_codes,omitempty"` - - // TenantID is only required if the caller has an admin role and wants - // to create a pool for another tenant. - TenantID string `json:"tenant_id,omitempty"` - - // AdminStateUp denotes whether the monitor is administratively up or down. - AdminStateUp *bool `json:"admin_state_up,omitempty"` -} - -// ToLBMonitorCreateMap builds a request body from CreateOpts. -func (opts CreateOpts) ToLBMonitorCreateMap() (map[string]interface{}, error) { - if opts.Type == TypeHTTP || opts.Type == TypeHTTPS { - if opts.URLPath == "" { - err := gophercloud.ErrMissingInput{} - err.Argument = "monitors.CreateOpts.URLPath" - return nil, err - } - if opts.ExpectedCodes == "" { - err := gophercloud.ErrMissingInput{} - err.Argument = "monitors.CreateOpts.ExpectedCodes" - return nil, err - } - } - if opts.Delay < opts.Timeout { - err := gophercloud.ErrInvalidInput{} - err.Argument = "monitors.CreateOpts.Delay/monitors.CreateOpts.Timeout" - err.Info = "Delay must be greater than or equal to timeout" - return nil, err - } - return gophercloud.BuildRequestBody(opts, "health_monitor") -} - -// Create is an operation which provisions a new health monitor. There are -// different types of monitor you can provision: PING, TCP or HTTP(S). Below -// are examples of how to create each one. -// -// Here is an example config struct to use when creating a PING or TCP monitor: -// -// CreateOpts{Type: TypePING, Delay: 20, Timeout: 10, MaxRetries: 3} -// CreateOpts{Type: TypeTCP, Delay: 20, Timeout: 10, MaxRetries: 3} -// -// Here is an example config struct to use when creating a HTTP(S) monitor: -// -// CreateOpts{Type: TypeHTTP, Delay: 20, Timeout: 10, MaxRetries: 3, -// HttpMethod: "HEAD", ExpectedCodes: "200"} -// -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToLBMonitorCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) - return -} - -// Get retrieves a particular health monitor based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToLBMonitorUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contains all the values needed to update an existing monitor. -// Attributes not listed here but appear in CreateOpts are immutable and cannot -// be updated. -type UpdateOpts struct { - // Delay is the time, in seconds, between sending probes to members. - Delay int `json:"delay,omitempty"` - - // Timeout is the maximum number of seconds for a monitor to wait for a ping - // reply before it times out. The value must be less than the delay value. - Timeout int `json:"timeout,omitempty"` - - // MaxRetries is the number of permissible ping failures before changing the - // member's status to INACTIVE. Must be a number between 1 and 10. - MaxRetries int `json:"max_retries,omitempty"` - - // URLPath is the URI path that will be accessed if monitor type - // is HTTP or HTTPS. - URLPath string `json:"url_path,omitempty"` - - // HTTPMethod is the HTTP method used for requests by the monitor. If this - // attribute is not specified, it defaults to "GET". - HTTPMethod string `json:"http_method,omitempty"` - - // ExpectedCodes is the expected HTTP codes for a passing HTTP(S) monitor - // You can either specify a single status like "200", or a range like - // "200-202". - ExpectedCodes string `json:"expected_codes,omitempty"` - - // AdminStateUp denotes whether the monitor is administratively up or down. - AdminStateUp *bool `json:"admin_state_up,omitempty"` -} - -// ToLBMonitorUpdateMap builds a request body from UpdateOpts. -func (opts UpdateOpts) ToLBMonitorUpdateMap() (map[string]interface{}, error) { - if opts.Delay > 0 && opts.Timeout > 0 && opts.Delay < opts.Timeout { - err := gophercloud.ErrInvalidInput{} - err.Argument = "monitors.CreateOpts.Delay/monitors.CreateOpts.Timeout" - err.Value = fmt.Sprintf("%d/%d", opts.Delay, opts.Timeout) - err.Info = "Delay must be greater than or equal to timeout" - return nil, err - } - return gophercloud.BuildRequestBody(opts, "health_monitor") -} - -// Update is an operation which modifies the attributes of the specified -// monitor. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToLBMonitorUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 202}, - }) - return -} - -// Delete will permanently delete a particular monitor based on its unique ID. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(resourceURL(c, id), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go deleted file mode 100644 index cc99f7cced70..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go +++ /dev/null @@ -1,141 +0,0 @@ -package monitors - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Monitor represents a load balancer health monitor. A health monitor is used -// to determine whether or not back-end members of the VIP's pool are usable -// for processing a request. A pool can have several health monitors associated -// with it. There are different types of health monitors supported: -// -// PING: used to ping the members using ICMP. -// TCP: used to connect to the members using TCP. -// HTTP: used to send an HTTP request to the member. -// HTTPS: used to send a secure HTTP request to the member. -// -// When a pool has several monitors associated with it, each member of the pool -// is monitored by all these monitors. If any monitor declares the member as -// unhealthy, then the member status is changed to INACTIVE and the member -// won't participate in its pool's load balancing. In other words, ALL monitors -// must declare the member to be healthy for it to stay ACTIVE. -type Monitor struct { - // ID is the unique ID for the Monitor. - ID string - - // Name is the monitor name. Does not have to be unique. - Name string - - // TenantID is the owner of the Monitor. - TenantID string `json:"tenant_id"` - - // Type is the type of probe sent by the load balancer to verify the member - // state, which is PING, TCP, HTTP, or HTTPS. - Type string - - // Delay is the time, in seconds, between sending probes to members. - Delay int - - // Timeout is the maximum number of seconds for a monitor to wait for a - // connection to be established before it times out. This value must be less - // than the delay value. - Timeout int - - // MaxRetries is the number of allowed connection failures before changing the - // status of the member to INACTIVE. A valid value is from 1 to 10. - MaxRetries int `json:"max_retries"` - - // HTTPMethod is the HTTP method that the monitor uses for requests. - HTTPMethod string `json:"http_method"` - - // URLPath is the HTTP path of the request sent by the monitor to test the - // health of a member. Must be a string beginning with a forward slash (/). - URLPath string `json:"url_path"` - - // ExpectedCodes is the expected HTTP codes for a passing HTTP(S) monitor. - ExpectedCodes string `json:"expected_codes"` - - // AdminStateUp is the administrative state of the health monitor, which is up - // (true) or down (false). - AdminStateUp bool `json:"admin_state_up"` - - // Status is the status of the health monitor. Indicates whether the health - // monitor is operational. - Status string -} - -// MonitorPage is the page returned by a pager when traversing over a -// collection of health monitors. -type MonitorPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of monitors has reached -// the end of a page and the pager seeks to traverse over a new one. In order -// to do this, it needs to construct the next page's URL. -func (r MonitorPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"health_monitors_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a PoolPage struct is empty. -func (r MonitorPage) IsEmpty() (bool, error) { - is, err := ExtractMonitors(r) - return len(is) == 0, err -} - -// ExtractMonitors accepts a Page struct, specifically a MonitorPage struct, -// and extracts the elements into a slice of Monitor structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractMonitors(r pagination.Page) ([]Monitor, error) { - var s struct { - Monitors []Monitor `json:"health_monitors"` - } - err := (r.(MonitorPage)).ExtractInto(&s) - return s.Monitors, err -} - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a monitor. -func (r commonResult) Extract() (*Monitor, error) { - var s struct { - Monitor *Monitor `json:"health_monitor"` - } - err := r.ExtractInto(&s) - return s.Monitor, err -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a Monitor. -type CreateResult struct { - commonResult -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret it as a Monitor. -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its Extract -// method to interpret it as a Monitor. -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its Extract -// method to determine if the request succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go deleted file mode 100644 index e9d90fcc5692..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go +++ /dev/null @@ -1,16 +0,0 @@ -package monitors - -import "github.com/gophercloud/gophercloud" - -const ( - rootPath = "lb" - resourcePath = "health_monitors" -) - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(rootPath, resourcePath) -} - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/doc.go deleted file mode 100644 index 25c4204dc930..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/doc.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Package pools provides information and interaction with the Pools of the -Load Balancing as a Service extension for the OpenStack Networking service. - -Example to List Pools - - listOpts := pools.ListOpts{ - SubnetID: "d9bd223b-f1a9-4f98-953b-df977b0f902d", - } - - allPages, err := pools.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allPools, err := pools.ExtractPools(allPages) - if err != nil { - panic(err) - } - - for _, pool := range allPools { - fmt.Printf("%+v\n", pool) - } - -Example to Create a Pool - - createOpts := pools.CreateOpts{ - LBMethod: pools.LBMethodRoundRobin, - Protocol: "HTTP", - Name: "Example pool", - SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", - Provider: "haproxy", - } - - pool, err := pools.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Pool - - poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" - - updateOpts := pools.UpdateOpts{ - LBMethod: pools.LBMethodLeastConnections, - } - - pool, err := pools.Update(networkClient, poolID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Pool - - poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" - err := pools.Delete(networkClient, poolID).ExtractErr() - if err != nil { - panic(err) - } - -Example to Associate a Monitor to a Pool - - poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" - monitorID := "8bbfbe1c-6faa-4d97-abdb-0df6c90df70b" - - pool, err := pools.AssociateMonitor(networkClient, poolID, monitorID).Extract() - if err != nil { - panic(err) - } - -Example to Disassociate a Monitor from a Pool - - poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" - monitorID := "8bbfbe1c-6faa-4d97-abdb-0df6c90df70b" - - pool, err := pools.DisassociateMonitor(networkClient, poolID, monitorID).Extract() - if err != nil { - panic(err) - } -*/ -package pools diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go deleted file mode 100644 index b3593548d371..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go +++ /dev/null @@ -1,175 +0,0 @@ -package pools - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the floating IP attributes you want to see returned. SortKey allows you to -// sort by a particular network attribute. SortDir sets the direction, and is -// either `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - Status string `q:"status"` - LBMethod string `q:"lb_method"` - Protocol string `q:"protocol"` - SubnetID string `q:"subnet_id"` - TenantID string `q:"tenant_id"` - AdminStateUp *bool `q:"admin_state_up"` - Name string `q:"name"` - ID string `q:"id"` - VIPID string `q:"vip_id"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// List returns a Pager which allows you to iterate over a collection of -// pools. It accepts a ListOpts struct, which allows you to filter and sort -// the returned collection for greater efficiency. -// -// Default policy settings return only those pools that are owned by the -// tenant who submits the request, unless an admin user submits the request. -func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { - q, err := gophercloud.BuildQueryString(&opts) - if err != nil { - return pagination.Pager{Err: err} - } - u := rootURL(c) + q.String() - return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { - return PoolPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// LBMethod is a type used for possible load balancing methods. -type LBMethod string - -// LBProtocol is a type used for possible load balancing protocols. -type LBProtocol string - -// Supported attributes for create/update operations. -const ( - LBMethodRoundRobin LBMethod = "ROUND_ROBIN" - LBMethodLeastConnections LBMethod = "LEAST_CONNECTIONS" - - ProtocolTCP LBProtocol = "TCP" - ProtocolHTTP LBProtocol = "HTTP" - ProtocolHTTPS LBProtocol = "HTTPS" -) - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToLBPoolCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains all the values needed to create a new pool. -type CreateOpts struct { - // Name of the pool. - Name string `json:"name" required:"true"` - - // Protocol used by the pool members, you can use either - // ProtocolTCP, ProtocolHTTP, or ProtocolHTTPS. - Protocol LBProtocol `json:"protocol" required:"true"` - - // TenantID is only required if the caller has an admin role and wants - // to create a pool for another tenant. - TenantID string `json:"tenant_id,omitempty"` - - // SubnetID is the network on which the members of the pool will be located. - // Only members that are on this network can be added to the pool. - SubnetID string `json:"subnet_id,omitempty"` - - // LBMethod is the algorithm used to distribute load between the members of - // the pool. The current specification supports LBMethodRoundRobin and - // LBMethodLeastConnections as valid values for this attribute. - LBMethod LBMethod `json:"lb_method" required:"true"` - - // Provider of the pool. - Provider string `json:"provider,omitempty"` -} - -// ToLBPoolCreateMap builds a request body based on CreateOpts. -func (opts CreateOpts) ToLBPoolCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "pool") -} - -// Create accepts a CreateOptsBuilder and uses the values to create a new -// load balancer pool. -func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToLBPoolCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) - return -} - -// Get retrieves a particular pool based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters ot the -// Update request. -type UpdateOptsBuilder interface { - ToLBPoolUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contains the values used when updating a pool. -type UpdateOpts struct { - // Name of the pool. - Name string `json:"name,omitempty"` - - // LBMethod is the algorithm used to distribute load between the members of - // the pool. The current specification supports LBMethodRoundRobin and - // LBMethodLeastConnections as valid values for this attribute. - LBMethod LBMethod `json:"lb_method,omitempty"` -} - -// ToLBPoolUpdateMap builds a request body based on UpdateOpts. -func (opts UpdateOpts) ToLBPoolUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "pool") -} - -// Update allows pools to be updated. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToLBPoolUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Delete will permanently delete a particular pool based on its unique ID. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(resourceURL(c, id), nil) - return -} - -// AssociateMonitor will associate a health monitor with a particular pool. -// Once associated, the health monitor will start monitoring the members of the -// pool and will deactivate these members if they are deemed unhealthy. A -// member can be deactivated (status set to INACTIVE) if any of health monitors -// finds it unhealthy. -func AssociateMonitor(c *gophercloud.ServiceClient, poolID, monitorID string) (r AssociateResult) { - b := map[string]interface{}{"health_monitor": map[string]string{"id": monitorID}} - _, r.Err = c.Post(associateURL(c, poolID), b, &r.Body, nil) - return -} - -// DisassociateMonitor will disassociate a health monitor with a particular -// pool. When dissociation is successful, the health monitor will no longer -// check for the health of the members of the pool. -func DisassociateMonitor(c *gophercloud.ServiceClient, poolID, monitorID string) (r AssociateResult) { - _, r.Err = c.Delete(disassociateURL(c, poolID, monitorID), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go deleted file mode 100644 index c2bae82d56ea..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go +++ /dev/null @@ -1,137 +0,0 @@ -package pools - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Pool represents a logical set of devices, such as web servers, that you -// group together to receive and process traffic. The load balancing function -// chooses a member of the pool according to the configured load balancing -// method to handle the new requests or connections received on the VIP address. -// There is only one pool per virtual IP. -type Pool struct { - // Status of the pool. Indicates whether the pool is operational. - Status string - - // LBMethod is the load-balancer algorithm, which is round-robin, - // least-connections, and so on. This value, which must be supported, is - // dependent on the provider. - LBMethod string `json:"lb_method"` - - // Protocol of the pool, which is TCP, HTTP, or HTTPS. - Protocol string - - // Description for the pool. - Description string - - // MonitorIDs are the IDs of associated monitors which check the health of - // the pool members. - MonitorIDs []string `json:"health_monitors"` - - // SubnetID is the network on which the members of the pool will be located. - // Only members that are on this network can be added to the pool. - SubnetID string `json:"subnet_id"` - - // TenantID is the owner of the pool. - TenantID string `json:"tenant_id"` - - // AdminStateUp is the administrative state of the pool, which is up - // (true) or down (false). - AdminStateUp bool `json:"admin_state_up"` - - // Name of the pool. - Name string - - // MemberIDs is the list of member IDs that belong to the pool. - MemberIDs []string `json:"members"` - - // ID is the unique ID for the pool. - ID string - - // VIPID is the ID of the virtual IP associated with this pool. - VIPID string `json:"vip_id"` - - // The provider. - Provider string -} - -// PoolPage is the page returned by a pager when traversing over a -// collection of pools. -type PoolPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of pools has reached -// the end of a page and the pager seeks to traverse over a new one. In order -// to do this, it needs to construct the next page's URL. -func (r PoolPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"pools_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a PoolPage struct is empty. -func (r PoolPage) IsEmpty() (bool, error) { - is, err := ExtractPools(r) - return len(is) == 0, err -} - -// ExtractPools accepts a Page struct, specifically a PoolPage struct, -// and extracts the elements into a slice of Router structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractPools(r pagination.Page) ([]Pool, error) { - var s struct { - Pools []Pool `json:"pools"` - } - err := (r.(PoolPage)).ExtractInto(&s) - return s.Pools, err -} - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a router. -func (r commonResult) Extract() (*Pool, error) { - var s struct { - Pool *Pool `json:"pool"` - } - err := r.ExtractInto(&s) - return s.Pool, err -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a Pool. -type CreateResult struct { - commonResult -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret it as a Pool. -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its Extract -// method to interpret it as a Pool. -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its -// ExtractErr method to interpret it as a Pool. -type DeleteResult struct { - gophercloud.ErrResult -} - -// AssociateResult represents the result of an association operation. Call its Extract -// method to interpret it as a Pool. -type AssociateResult struct { - commonResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go deleted file mode 100644 index fe3601bbec95..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go +++ /dev/null @@ -1,25 +0,0 @@ -package pools - -import "github.com/gophercloud/gophercloud" - -const ( - rootPath = "lb" - resourcePath = "pools" - monitorPath = "health_monitors" -) - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(rootPath, resourcePath) -} - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id) -} - -func associateURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id, monitorPath) -} - -func disassociateURL(c *gophercloud.ServiceClient, poolID, monitorID string) string { - return c.ServiceURL(rootPath, resourcePath, poolID, monitorPath, monitorID) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/doc.go deleted file mode 100644 index 7fd861044b67..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/doc.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Package vips provides information and interaction with the Virtual IPs of the -Load Balancing as a Service extension for the OpenStack Networking service. - -Example to List Virtual IPs - - listOpts := vips.ListOpts{ - SubnetID: "d9bd223b-f1a9-4f98-953b-df977b0f902d", - } - - allPages, err := vips.List(networkClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allVIPs, err := vips.ExtractVIPs(allPages) - if err != nil { - panic(err) - } - - for _, vip := range allVIPs { - fmt.Printf("%+v\n", vip) - } - -Example to Create a Virtual IP - - createOpts := vips.CreateOpts{ - Protocol: "HTTP", - Name: "NewVip", - AdminStateUp: gophercloud.Enabled, - SubnetID: "8032909d-47a1-4715-90af-5153ffe39861", - PoolID: "61b1f87a-7a21-4ad3-9dda-7f81d249944f", - ProtocolPort: 80, - Persistence: &vips.SessionPersistence{Type: "SOURCE_IP"}, - } - - vip, err := vips.Create(networkClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Virtual IP - - vipID := "93f1bad4-0423-40a8-afac-3fc541839912" - - i1000 := 1000 - updateOpts := vips.UpdateOpts{ - ConnLimit: &i1000, - Persistence: &vips.SessionPersistence{Type: "SOURCE_IP"}, - } - - vip, err := vips.Update(networkClient, vipID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Virtual IP - - vipID := "93f1bad4-0423-40a8-afac-3fc541839912" - err := vips.Delete(networkClient, vipID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package vips diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go deleted file mode 100644 index 53b81bfdb927..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go +++ /dev/null @@ -1,180 +0,0 @@ -package vips - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the floating IP attributes you want to see returned. SortKey allows you to -// sort by a particular network attribute. SortDir sets the direction, and is -// either `asc' or `desc'. Marker and Limit are used for pagination. -type ListOpts struct { - ID string `q:"id"` - Name string `q:"name"` - AdminStateUp *bool `q:"admin_state_up"` - Status string `q:"status"` - TenantID string `q:"tenant_id"` - SubnetID string `q:"subnet_id"` - Address string `q:"address"` - PortID string `q:"port_id"` - Protocol string `q:"protocol"` - ProtocolPort int `q:"protocol_port"` - ConnectionLimit int `q:"connection_limit"` - Limit int `q:"limit"` - Marker string `q:"marker"` - SortKey string `q:"sort_key"` - SortDir string `q:"sort_dir"` -} - -// List returns a Pager which allows you to iterate over a collection of -// Virtual IPs. It accepts a ListOpts struct, which allows you to filter and -// sort the returned collection for greater efficiency. -// -// Default policy settings return only those virtual IPs that are owned by the -// tenant who submits the request, unless an admin user submits the request. -func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { - q, err := gophercloud.BuildQueryString(&opts) - if err != nil { - return pagination.Pager{Err: err} - } - u := rootURL(c) + q.String() - return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { - return VIPPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create Request. -type CreateOptsBuilder interface { - ToVIPCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains all the values needed to create a new virtual IP. -type CreateOpts struct { - // Name is the human-readable name for the VIP. Does not have to be unique. - Name string `json:"name" required:"true"` - - // SubnetID is the network on which to allocate the VIP's address. A tenant - // can only create VIPs on networks authorized by policy (e.g. networks that - // belong to them or networks that are shared). - SubnetID string `json:"subnet_id" required:"true"` - - // Protocol - can either be TCP, HTTP or HTTPS. - Protocol string `json:"protocol" required:"true"` - - // ProtocolPort is the port on which to listen for client traffic. - ProtocolPort int `json:"protocol_port" required:"true"` - - // PoolID is the ID of the pool with which the VIP is associated. - PoolID string `json:"pool_id" required:"true"` - - // TenantID is only required if the caller has an admin role and wants - // to create a pool for another tenant. - TenantID string `json:"tenant_id,omitempty"` - - // Address is the IP address of the VIP. - Address string `json:"address,omitempty"` - - // Description is the human-readable description for the VIP. - Description string `json:"description,omitempty"` - - // Persistence is the the of session persistence to use. - // Omit this field to prevent session persistence. - Persistence *SessionPersistence `json:"session_persistence,omitempty"` - - // ConnLimit is the maximum number of connections allowed for the VIP. - ConnLimit *int `json:"connection_limit,omitempty"` - - // AdminStateUp is the administrative state of the VIP. A valid value is - // true (UP) or false (DOWN). - AdminStateUp *bool `json:"admin_state_up,omitempty"` -} - -// ToVIPCreateMap builds a request body from CreateOpts. -func (opts CreateOpts) ToVIPCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "vip") -} - -// Create is an operation which provisions a new virtual IP based on the -// configuration defined in the CreateOpts struct. Once the request is -// validated and progress has started on the provisioning process, a -// CreateResult will be returned. -// -// Please note that the PoolID should refer to a pool that is not already -// associated with another vip. If the pool is already used by another vip, -// then the operation will fail with a 409 Conflict error will be returned. -// -// Users with an admin role can create VIPs on behalf of other tenants by -// specifying a TenantID attribute different than their own. -func Create(c *gophercloud.ServiceClient, opts CreateOpts) (r CreateResult) { - b, err := opts.ToVIPCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) - return -} - -// Get retrieves a particular virtual IP based on its unique ID. -func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToVIPUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contains all the values needed to update an existing virtual IP. -// Attributes not listed here but appear in CreateOpts are immutable and cannot -// be updated. -type UpdateOpts struct { - // Name is the human-readable name for the VIP. Does not have to be unique. - Name *string `json:"name,omitempty"` - - // PoolID is the ID of the pool with which the VIP is associated. - PoolID *string `json:"pool_id,omitempty"` - - // Description is the human-readable description for the VIP. - Description *string `json:"description,omitempty"` - - // Persistence is the the of session persistence to use. - // Omit this field to prevent session persistence. - Persistence *SessionPersistence `json:"session_persistence,omitempty"` - - // ConnLimit is the maximum number of connections allowed for the VIP. - ConnLimit *int `json:"connection_limit,omitempty"` - - // AdminStateUp is the administrative state of the VIP. A valid value is - // true (UP) or false (DOWN). - AdminStateUp *bool `json:"admin_state_up,omitempty"` -} - -// ToVIPUpdateMap builds a request body based on UpdateOpts. -func (opts UpdateOpts) ToVIPUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "vip") -} - -// Update is an operation which modifies the attributes of the specified VIP. -func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToVIPUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 202}, - }) - return -} - -// Delete will permanently delete a particular virtual IP based on its unique ID. -func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = c.Delete(resourceURL(c, id), nil) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go deleted file mode 100644 index cb0994a7b250..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go +++ /dev/null @@ -1,156 +0,0 @@ -package vips - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// SessionPersistence represents the session persistence feature of the load -// balancing service. It attempts to force connections or requests in the same -// session to be processed by the same member as long as it is ative. Three -// types of persistence are supported: -// -// SOURCE_IP: With this mode, all connections originating from the same source -// IP address, will be handled by the same member of the pool. -// HTTP_COOKIE: With this persistence mode, the load balancing function will -// create a cookie on the first request from a client. Subsequent -// requests containing the same cookie value will be handled by -// the same member of the pool. -// APP_COOKIE: With this persistence mode, the load balancing function will -// rely on a cookie established by the backend application. All -// requests carrying the same cookie value will be handled by the -// same member of the pool. -type SessionPersistence struct { - // Type is the type of persistence mode. - Type string `json:"type"` - - // CookieName is the name of cookie if persistence mode is set appropriately. - CookieName string `json:"cookie_name,omitempty"` -} - -// VirtualIP is the primary load balancing configuration object that specifies -// the virtual IP address and port on which client traffic is received, as well -// as other details such as the load balancing method to be use, protocol, etc. -// This entity is sometimes known in LB products under the name of a "virtual -// server", a "vserver" or a "listener". -type VirtualIP struct { - // ID is the unique ID for the VIP. - ID string `json:"id"` - - // TenantID is the owner of the VIP. - TenantID string `json:"tenant_id"` - - // Name is the human-readable name for the VIP. Does not have to be unique. - Name string `json:"name"` - - // Description is the human-readable description for the VIP. - Description string `json:"description"` - - // SubnetID is the ID of the subnet on which to allocate the VIP address. - SubnetID string `json:"subnet_id"` - - // Address is the IP address of the VIP. - Address string `json:"address"` - - // Protocol of the VIP address. A valid value is TCP, HTTP, or HTTPS. - Protocol string `json:"protocol"` - - // ProtocolPort is the port on which to listen to client traffic that is - // associated with the VIP address. A valid value is from 0 to 65535. - ProtocolPort int `json:"protocol_port"` - - // PoolID is the ID of the pool with which the VIP is associated. - PoolID string `json:"pool_id"` - - // PortID is the ID of the port which belongs to the load balancer. - PortID string `json:"port_id"` - - // Persistence indicates whether connections in the same session will be - // processed by the same pool member or not. - Persistence SessionPersistence `json:"session_persistence"` - - // ConnLimit is the maximum number of connections allowed for the VIP. - // Default is -1, meaning no limit. - ConnLimit int `json:"connection_limit"` - - // AdminStateUp is the administrative state of the VIP. A valid value is - // true (UP) or false (DOWN). - AdminStateUp bool `json:"admin_state_up"` - - // Status is the status of the VIP. Indicates whether the VIP is operational. - Status string `json:"status"` -} - -// VIPPage is the page returned by a pager when traversing over a -// collection of virtual IPs. -type VIPPage struct { - pagination.LinkedPageBase -} - -// NextPageURL is invoked when a paginated collection of routers has reached -// the end of a page and the pager seeks to traverse over a new one. In order -// to do this, it needs to construct the next page's URL. -func (r VIPPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"vips_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// IsEmpty checks whether a VIPPage struct is empty. -func (r VIPPage) IsEmpty() (bool, error) { - is, err := ExtractVIPs(r) - return len(is) == 0, err -} - -// ExtractVIPs accepts a Page struct, specifically a VIPPage struct, -// and extracts the elements into a slice of VirtualIP structs. In other words, -// a generic collection is mapped into a relevant slice. -func ExtractVIPs(r pagination.Page) ([]VirtualIP, error) { - var s struct { - VIPs []VirtualIP `json:"vips"` - } - err := (r.(VIPPage)).ExtractInto(&s) - return s.VIPs, err -} - -type commonResult struct { - gophercloud.Result -} - -// Extract is a function that accepts a result and extracts a VirtualIP. -func (r commonResult) Extract() (*VirtualIP, error) { - var s struct { - VirtualIP *VirtualIP `json:"vip" json:"vip"` - } - err := r.ExtractInto(&s) - return s.VirtualIP, err -} - -// CreateResult represents the result of a create operation. Call its Extract -// method to interpret it as a VirtualIP -type CreateResult struct { - commonResult -} - -// GetResult represents the result of a get operation. Call its Extract -// method to interpret it as a VirtualIP -type GetResult struct { - commonResult -} - -// UpdateResult represents the result of an update operation. Call its Extract -// method to interpret it as a VirtualIP -type UpdateResult struct { - commonResult -} - -// DeleteResult represents the result of a delete operation. Call its -// ExtractErr method to determine if the request succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go deleted file mode 100644 index 584a1cf680c9..000000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go +++ /dev/null @@ -1,16 +0,0 @@ -package vips - -import "github.com/gophercloud/gophercloud" - -const ( - rootPath = "lb" - resourcePath = "vips" -) - -func rootURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL(rootPath, resourcePath) -} - -func resourceURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL(rootPath, resourcePath, id) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go new file mode 100644 index 000000000000..e768b71f8203 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go @@ -0,0 +1,65 @@ +/* +Package networks contains functionality for working with Neutron network +resources. A network is an isolated virtual layer-2 broadcast domain that is +typically reserved for the tenant who created it (unless you configure the +network to be shared). Tenants can create multiple networks until the +thresholds per-tenant quota is reached. + +In the v2.0 Networking API, the network is the main entity. Ports and subnets +are always associated with a network. + +Example to List Networks + + listOpts := networks.ListOpts{ + TenantID: "a99e9b4e620e4db09a2dfb6e42a01e66", + } + + allPages, err := networks.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allNetworks, err := networks.ExtractNetworks(allPages) + if err != nil { + panic(err) + } + + for _, network := range allNetworks { + fmt.Printf("%+v", network) + } + +Example to Create a Network + + iTrue := true + createOpts := networks.CreateOpts{ + Name: "network_1", + AdminStateUp: &iTrue, + } + + network, err := networks.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Network + + networkID := "484cda0e-106f-4f4b-bb3f-d413710bbe78" + + updateOpts := networks.UpdateOpts{ + Name: "new_name", + } + + network, err := networks.Update(networkClient, networkID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Network + + networkID := "484cda0e-106f-4f4b-bb3f-d413710bbe78" + err := networks.Delete(networkClient, networkID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package networks diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go new file mode 100644 index 000000000000..5b61b247192b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go @@ -0,0 +1,165 @@ +package networks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToNetworkListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the network attributes you want to see returned. SortKey allows you to sort +// by a particular network attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Status string `q:"status"` + Name string `q:"name"` + AdminStateUp *bool `q:"admin_state_up"` + TenantID string `q:"tenant_id"` + Shared *bool `q:"shared"` + ID string `q:"id"` + Marker string `q:"marker"` + Limit int `q:"limit"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToNetworkListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToNetworkListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// networks. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToNetworkListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return NetworkPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a specific network based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(getURL(c, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToNetworkCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents options used to create a network. +type CreateOpts struct { + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Name string `json:"name,omitempty"` + Shared *bool `json:"shared,omitempty"` + TenantID string `json:"tenant_id,omitempty"` +} + +// ToNetworkCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToNetworkCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "network") +} + +// Create accepts a CreateOpts struct and creates a new network using the values +// provided. This operation does not actually require a request body, i.e. the +// CreateOpts struct argument can be empty. +// +// The tenant ID that is contained in the URI is the tenant that creates the +// network. An admin user, however, has the option of specifying another tenant +// ID in the CreateOpts struct. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToNetworkCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(createURL(c), b, &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToNetworkUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents options used to update a network. +type UpdateOpts struct { + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Name string `json:"name,omitempty"` + Shared *bool `json:"shared,omitempty"` +} + +// ToNetworkUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToNetworkUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "network") +} + +// Update accepts a UpdateOpts struct and updates an existing network using the +// values provided. For more information, see the Create function. +func Update(c *gophercloud.ServiceClient, networkID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToNetworkUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(updateURL(c, networkID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Delete accepts a unique ID and deletes the network associated with it. +func Delete(c *gophercloud.ServiceClient, networkID string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, networkID), nil) + return +} + +// IDFromName is a convenience function that returns a network's ID, given +// its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + pages, err := List(client, nil).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractNetworks(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "network"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "network"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go new file mode 100644 index 000000000000..ffd0259f1d2f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go @@ -0,0 +1,111 @@ +package networks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a network resource. +func (r commonResult) Extract() (*Network, error) { + var s Network + err := r.ExtractInto(&s) + return &s, err +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "network") +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Network. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Network. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Network. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Network represents, well, a network. +type Network struct { + // UUID for the network + ID string `json:"id"` + + // Human-readable name for the network. Might not be unique. + Name string `json:"name"` + + // The administrative state of network. If false (down), the network does not + // forward packets. + AdminStateUp bool `json:"admin_state_up"` + + // Indicates whether network is currently operational. Possible values include + // `ACTIVE', `DOWN', `BUILD', or `ERROR'. Plug-ins might define additional + // values. + Status string `json:"status"` + + // Subnets associated with this network. + Subnets []string `json:"subnets"` + + // Owner of network. + TenantID string `json:"tenant_id"` + + // Specifies whether the network resource can be accessed by any tenant. + Shared bool `json:"shared"` +} + +// NetworkPage is the page returned by a pager when traversing over a +// collection of networks. +type NetworkPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of networks has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r NetworkPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"networks_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a NetworkPage struct is empty. +func (r NetworkPage) IsEmpty() (bool, error) { + is, err := ExtractNetworks(r) + return len(is) == 0, err +} + +// ExtractNetworks accepts a Page struct, specifically a NetworkPage struct, +// and extracts the elements into a slice of Network structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractNetworks(r pagination.Page) ([]Network, error) { + var s []Network + err := ExtractNetworksInto(r, &s) + return s, err +} + +func ExtractNetworksInto(r pagination.Page, v interface{}) error { + return r.(NetworkPage).Result.ExtractIntoSlicePtr(v, "networks") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/urls.go new file mode 100644 index 000000000000..4a8fb1dc7d3c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/urls.go @@ -0,0 +1,31 @@ +package networks + +import "github.com/gophercloud/gophercloud" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("networks", id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("networks") +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/results.go index c50da6dfffdf..ebef98d5de0e 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/results.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/results.go @@ -11,11 +11,13 @@ type commonResult struct { // Extract is a function that accepts a result and extracts a port resource. func (r commonResult) Extract() (*Port, error) { - var s struct { - Port *Port `json:"port"` - } + var s Port err := r.ExtractInto(&s) - return s.Port, err + return &s, err +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "port") } // CreateResult represents the result of a create operation. Call its Extract @@ -128,9 +130,11 @@ func (r PortPage) IsEmpty() (bool, error) { // and extracts the elements into a slice of Port structs. In other words, // a generic collection is mapped into a relevant slice. func ExtractPorts(r pagination.Page) ([]Port, error) { - var s struct { - Ports []Port `json:"ports"` - } - err := (r.(PortPage)).ExtractInto(&s) - return s.Ports, err + var s []Port + err := ExtractPortsInto(r, &s) + return s, err +} + +func ExtractPortsInto(r pagination.Page, v interface{}) error { + return r.(PortPage).Result.ExtractIntoSlicePtr(v, "ports") } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go index c605d084444d..27da19f91a8d 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go @@ -68,11 +68,6 @@ func ChooseVersion(client *gophercloud.ProviderClient, recognized []*Version) (* return nil, "", err } - byID := make(map[string]*Version) - for _, version := range recognized { - byID[version.ID] = version - } - var highest *Version var endpoint string @@ -84,20 +79,22 @@ func ChooseVersion(client *gophercloud.ProviderClient, recognized []*Version) (* } } - if matching, ok := byID[value.ID]; ok { - // Prefer a version that exactly matches the provided endpoint. - if href == identityEndpoint { - if href == "" { - return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", value.ID, client.IdentityBase) + for _, version := range recognized { + if strings.Contains(value.ID, version.ID) { + // Prefer a version that exactly matches the provided endpoint. + if href == identityEndpoint { + if href == "" { + return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", value.ID, client.IdentityBase) + } + return version, href, nil } - return matching, href, nil - } - // Otherwise, find the highest-priority version with a whitelisted status. - if goodStatus[strings.ToLower(value.Status)] { - if highest == nil || matching.Priority > highest.Priority { - highest = matching - endpoint = href + // Otherwise, find the highest-priority version with a whitelisted status. + if goodStatus[strings.ToLower(value.Status)] { + if highest == nil || version.Priority > highest.Priority { + highest = version + endpoint = href + } } } } diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pager.go b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go index 6f1609ef2e3c..7c65926b725e 100644 --- a/vendor/github.com/gophercloud/gophercloud/pagination/pager.go +++ b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go @@ -22,7 +22,6 @@ var ( // Depending on the pagination strategy of a particular resource, there may be an additional subinterface that the result type // will need to implement. type Page interface { - // NextPageURL generates the URL for the page of data that follows this collection. // Return "" if no such page exists. NextPageURL() (string, error) diff --git a/vendor/github.com/gophercloud/gophercloud/params.go b/vendor/github.com/gophercloud/gophercloud/params.go index 6afc8f8b7215..687af3dc0ce6 100644 --- a/vendor/github.com/gophercloud/gophercloud/params.go +++ b/vendor/github.com/gophercloud/gophercloud/params.go @@ -347,12 +347,21 @@ func BuildQueryString(opts interface{}) (*url.URL, error) { params.Add(tags[0], v.Index(i).String()) } } + case reflect.Map: + if v.Type().Key().Kind() == reflect.String && v.Type().Elem().Kind() == reflect.String { + var s []string + for _, k := range v.MapKeys() { + value := v.MapIndex(k).String() + s = append(s, fmt.Sprintf("'%s':'%s'", k.String(), value)) + } + params.Add(tags[0], fmt.Sprintf("{%s}", strings.Join(s, ", "))) + } } } else { // Otherwise, the field is not set. if len(tags) == 2 && tags[1] == "required" { // And the field is required. Return an error. - return nil, fmt.Errorf("Required query parameter [%s] not set.", f.Name) + return &url.URL{}, fmt.Errorf("Required query parameter [%s] not set.", f.Name) } } } diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go index f88682381dda..01b3010739ae 100644 --- a/vendor/github.com/gophercloud/gophercloud/provider_client.go +++ b/vendor/github.com/gophercloud/gophercloud/provider_client.go @@ -145,10 +145,6 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) } req.Header.Set("Accept", applicationJSON) - for k, v := range client.AuthenticatedHeaders() { - req.Header.Add(k, v) - } - // Set the User-Agent header req.Header.Set("User-Agent", client.UserAgent.Join()) @@ -162,6 +158,11 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) } } + // get latest token from client + for k, v := range client.AuthenticatedHeaders() { + req.Header.Set(k, v) + } + // Set connection parameter to close the connection immediately when we've got the response req.Close = true diff --git a/vendor/github.com/gophercloud/gophercloud/service_client.go b/vendor/github.com/gophercloud/gophercloud/service_client.go index 1160fefa7c2c..d1a48fea35b3 100644 --- a/vendor/github.com/gophercloud/gophercloud/service_client.go +++ b/vendor/github.com/gophercloud/gophercloud/service_client.go @@ -114,6 +114,8 @@ func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) { opts.MoreHeaders["X-OpenStack-Nova-API-Version"] = client.Microversion case "sharev2": opts.MoreHeaders["X-OpenStack-Manila-API-Version"] = client.Microversion + case "volume": + opts.MoreHeaders["X-OpenStack-Volume-API-Version"] = client.Microversion } if client.Type != "" { diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile index ad17bf0012e8..a828d2848f0d 100644 --- a/vendor/github.com/jmespath/go-jmespath/Makefile +++ b/vendor/github.com/jmespath/go-jmespath/Makefile @@ -35,7 +35,7 @@ buildfuzz: go-fuzz-build github.com/jmespath/go-jmespath/fuzz fuzz: buildfuzz - go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/corpus + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata bench: go test -bench . -cpuprofile cpu.out diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go index 67df3fc1c878..9cfa988bc5b8 100644 --- a/vendor/github.com/jmespath/go-jmespath/api.go +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -1,5 +1,42 @@ package jmespath +import "strconv" + +// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + // Search evaluates a JMESPath expression against input data and returns the result. func Search(expression string, data interface{}) (interface{}, error) { intr := newInterpreter() diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go index 8a3f2ef0dce8..9b7cd89b4bcc 100644 --- a/vendor/github.com/jmespath/go-jmespath/functions.go +++ b/vendor/github.com/jmespath/go-jmespath/functions.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "math" + "reflect" "sort" "strconv" "strings" @@ -124,197 +125,197 @@ type functionCaller struct { func newFunctionCaller() *functionCaller { caller := &functionCaller{} caller.functionTable = map[string]functionEntry{ - "length": functionEntry{ + "length": { name: "length", arguments: []argSpec{ - argSpec{types: []jpType{jpString, jpArray, jpObject}}, + {types: []jpType{jpString, jpArray, jpObject}}, }, handler: jpfLength, }, - "starts_with": functionEntry{ + "starts_with": { name: "starts_with", arguments: []argSpec{ - argSpec{types: []jpType{jpString}}, - argSpec{types: []jpType{jpString}}, + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, }, handler: jpfStartsWith, }, - "abs": functionEntry{ + "abs": { name: "abs", arguments: []argSpec{ - argSpec{types: []jpType{jpNumber}}, + {types: []jpType{jpNumber}}, }, handler: jpfAbs, }, - "avg": functionEntry{ + "avg": { name: "avg", arguments: []argSpec{ - argSpec{types: []jpType{jpArrayNumber}}, + {types: []jpType{jpArrayNumber}}, }, handler: jpfAvg, }, - "ceil": functionEntry{ + "ceil": { name: "ceil", arguments: []argSpec{ - argSpec{types: []jpType{jpNumber}}, + {types: []jpType{jpNumber}}, }, handler: jpfCeil, }, - "contains": functionEntry{ + "contains": { name: "contains", arguments: []argSpec{ - argSpec{types: []jpType{jpArray, jpString}}, - argSpec{types: []jpType{jpAny}}, + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, }, handler: jpfContains, }, - "ends_with": functionEntry{ + "ends_with": { name: "ends_with", arguments: []argSpec{ - argSpec{types: []jpType{jpString}}, - argSpec{types: []jpType{jpString}}, + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, }, handler: jpfEndsWith, }, - "floor": functionEntry{ + "floor": { name: "floor", arguments: []argSpec{ - argSpec{types: []jpType{jpNumber}}, + {types: []jpType{jpNumber}}, }, handler: jpfFloor, }, - "map": functionEntry{ + "map": { name: "amp", arguments: []argSpec{ - argSpec{types: []jpType{jpExpref}}, - argSpec{types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, }, handler: jpfMap, hasExpRef: true, }, - "max": functionEntry{ + "max": { name: "max", arguments: []argSpec{ - argSpec{types: []jpType{jpArrayNumber, jpArrayString}}, + {types: []jpType{jpArrayNumber, jpArrayString}}, }, handler: jpfMax, }, - "merge": functionEntry{ + "merge": { name: "merge", arguments: []argSpec{ - argSpec{types: []jpType{jpObject}, variadic: true}, + {types: []jpType{jpObject}, variadic: true}, }, handler: jpfMerge, }, - "max_by": functionEntry{ + "max_by": { name: "max_by", arguments: []argSpec{ - argSpec{types: []jpType{jpArray}}, - argSpec{types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, }, handler: jpfMaxBy, hasExpRef: true, }, - "sum": functionEntry{ + "sum": { name: "sum", arguments: []argSpec{ - argSpec{types: []jpType{jpArrayNumber}}, + {types: []jpType{jpArrayNumber}}, }, handler: jpfSum, }, - "min": functionEntry{ + "min": { name: "min", arguments: []argSpec{ - argSpec{types: []jpType{jpArrayNumber, jpArrayString}}, + {types: []jpType{jpArrayNumber, jpArrayString}}, }, handler: jpfMin, }, - "min_by": functionEntry{ + "min_by": { name: "min_by", arguments: []argSpec{ - argSpec{types: []jpType{jpArray}}, - argSpec{types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, }, handler: jpfMinBy, hasExpRef: true, }, - "type": functionEntry{ + "type": { name: "type", arguments: []argSpec{ - argSpec{types: []jpType{jpAny}}, + {types: []jpType{jpAny}}, }, handler: jpfType, }, - "keys": functionEntry{ + "keys": { name: "keys", arguments: []argSpec{ - argSpec{types: []jpType{jpObject}}, + {types: []jpType{jpObject}}, }, handler: jpfKeys, }, - "values": functionEntry{ + "values": { name: "values", arguments: []argSpec{ - argSpec{types: []jpType{jpObject}}, + {types: []jpType{jpObject}}, }, handler: jpfValues, }, - "sort": functionEntry{ + "sort": { name: "sort", arguments: []argSpec{ - argSpec{types: []jpType{jpArrayString, jpArrayNumber}}, + {types: []jpType{jpArrayString, jpArrayNumber}}, }, handler: jpfSort, }, - "sort_by": functionEntry{ + "sort_by": { name: "sort_by", arguments: []argSpec{ - argSpec{types: []jpType{jpArray}}, - argSpec{types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, }, handler: jpfSortBy, hasExpRef: true, }, - "join": functionEntry{ + "join": { name: "join", arguments: []argSpec{ - argSpec{types: []jpType{jpString}}, - argSpec{types: []jpType{jpArrayString}}, + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, }, handler: jpfJoin, }, - "reverse": functionEntry{ + "reverse": { name: "reverse", arguments: []argSpec{ - argSpec{types: []jpType{jpArray, jpString}}, + {types: []jpType{jpArray, jpString}}, }, handler: jpfReverse, }, - "to_array": functionEntry{ + "to_array": { name: "to_array", arguments: []argSpec{ - argSpec{types: []jpType{jpAny}}, + {types: []jpType{jpAny}}, }, handler: jpfToArray, }, - "to_string": functionEntry{ + "to_string": { name: "to_string", arguments: []argSpec{ - argSpec{types: []jpType{jpAny}}, + {types: []jpType{jpAny}}, }, handler: jpfToString, }, - "to_number": functionEntry{ + "to_number": { name: "to_number", arguments: []argSpec{ - argSpec{types: []jpType{jpAny}}, + {types: []jpType{jpAny}}, }, handler: jpfToNumber, }, - "not_null": functionEntry{ + "not_null": { name: "not_null", arguments: []argSpec{ - argSpec{types: []jpType{jpAny}, variadic: true}, + {types: []jpType{jpAny}, variadic: true}, }, handler: jpfNotNull, }, @@ -357,7 +358,7 @@ func (a *argSpec) typeCheck(arg interface{}) error { return nil } case jpArray: - if _, ok := arg.([]interface{}); ok { + if isSliceType(arg) { return nil } case jpObject: @@ -409,8 +410,9 @@ func jpfLength(arguments []interface{}) (interface{}, error) { arg := arguments[0] if c, ok := arg.(string); ok { return float64(utf8.RuneCountInString(c)), nil - } else if c, ok := arg.([]interface{}); ok { - return float64(len(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil } else if c, ok := arg.(map[string]interface{}); ok { return float64(len(c)), nil } diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go index c8f4bcebd87d..1240a1755215 100644 --- a/vendor/github.com/jmespath/go-jmespath/parser.go +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -353,7 +353,7 @@ func (p *Parser) nud(token token) (ASTNode, error) { case tFlatten: left := ASTNode{ nodeType: ASTFlatten, - children: []ASTNode{ASTNode{nodeType: ASTIdentity}}, + children: []ASTNode{{nodeType: ASTIdentity}}, } right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) if err != nil { @@ -378,7 +378,7 @@ func (p *Parser) nud(token token) (ASTNode, error) { } return ASTNode{ nodeType: ASTProjection, - children: []ASTNode{ASTNode{nodeType: ASTIdentity}, right}, + children: []ASTNode{{nodeType: ASTIdentity}, right}, }, nil } else { return p.parseMultiSelectList() diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go index 4de4a51db562..07fb4bc1f7bf 100644 --- a/vendor/github.com/mailru/easyjson/buffer/pool.go +++ b/vendor/github.com/mailru/easyjson/buffer/pool.go @@ -179,18 +179,25 @@ func (b *Buffer) DumpTo(w io.Writer) (written int, err error) { } // BuildBytes creates a single byte slice with all the contents of the buffer. Data is -// copied if it does not fit in a single chunk. -func (b *Buffer) BuildBytes() []byte { +// copied if it does not fit in a single chunk. You can optionally provide one byte +// slice as argument that it will try to reuse. +func (b *Buffer) BuildBytes(reuse ...[]byte) []byte { if len(b.bufs) == 0 { - ret := b.Buf b.toPool = nil b.Buf = nil - return ret } - ret := make([]byte, 0, b.Size()) + var ret []byte + size := b.Size() + + // If we got a buffer as argument and it is big enought, reuse it. + if len(reuse) == 1 && cap(reuse[0]) >= size { + ret = reuse[0][:0] + } else { + ret = make([]byte, 0, size) + } for _, buf := range b.bufs { ret = append(ret, buf...) putBuf(buf) @@ -205,3 +212,59 @@ func (b *Buffer) BuildBytes() []byte { return ret } + +type readCloser struct { + offset int + bufs [][]byte +} + +func (r *readCloser) Read(p []byte) (n int, err error) { + for _, buf := range r.bufs { + // Copy as much as we can. + x := copy(p[n:], buf[r.offset:]) + n += x // Increment how much we filled. + + // Did we empty the whole buffer? + if r.offset+x == len(buf) { + // On to the next buffer. + r.offset = 0 + r.bufs = r.bufs[1:] + + // We can release this buffer. + putBuf(buf) + } else { + r.offset += x + } + + if n == len(p) { + break + } + } + // No buffers left or nothing read? + if len(r.bufs) == 0 { + err = io.EOF + } + return +} + +func (r *readCloser) Close() error { + // Release all remaining buffers. + for _, buf := range r.bufs { + putBuf(buf) + } + // In case Close gets called multiple times. + r.bufs = nil + + return nil +} + +// ReadCloser creates an io.ReadCloser with all the contents of the buffer. +func (b *Buffer) ReadCloser() io.ReadCloser { + ret := &readCloser{0, append(b.bufs, b.Buf)} + + b.bufs = nil + b.toPool = nil + b.Buf = nil + + return ret +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go new file mode 100644 index 000000000000..ff7b27c5b203 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go @@ -0,0 +1,24 @@ +// This file will only be included to the build if neither +// easyjson_nounsafe nor appengine build tag is set. See README notes +// for more details. + +//+build !easyjson_nounsafe +//+build !appengine + +package jlexer + +import ( + "reflect" + "unsafe" +) + +// bytesToStr creates a string pointing at the slice to avoid copying. +// +// Warning: the string returned by the function should be used with care, as the whole input data +// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data +// may be garbage-collected even when the string exists. +func bytesToStr(data []byte) string { + h := (*reflect.SliceHeader)(unsafe.Pointer(&data)) + shdr := reflect.StringHeader{Data: h.Data, Len: h.Len} + return *(*string)(unsafe.Pointer(&shdr)) +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go new file mode 100644 index 000000000000..864d1be67638 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go @@ -0,0 +1,13 @@ +// This file is included to the build if any of the buildtags below +// are defined. Refer to README notes for more details. + +//+build easyjson_nounsafe appengine + +package jlexer + +// bytesToStr creates a string normally from []byte +// +// Note that this method is roughly 1.5x slower than using the 'unsafe' method. +func bytesToStr(data []byte) string { + return string(data) +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go index d700c0a32fb5..e81f1031b062 100644 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -5,12 +5,14 @@ package jlexer import ( + "encoding/base64" + "errors" "fmt" "io" - "reflect" "strconv" + "unicode" + "unicode/utf16" "unicode/utf8" - "unsafe" ) // tokenKind determines type of a token. @@ -45,11 +47,13 @@ type Lexer struct { firstElement bool // Whether current element is the first in array or an object. wantSep byte // A comma or a colon character, which need to occur before a token. - err error // Error encountered during lexing, if any. + UseMultipleErrors bool // If we want to use multiple errors. + fatalError error // Fatal error occurred during lexing. It is usually a syntax error. + multipleErrors []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors. } -// fetchToken scans the input for the next token. -func (r *Lexer) fetchToken() { +// FetchToken scans the input for the next token. +func (r *Lexer) FetchToken() { r.token.kind = tokenUndef r.start = r.pos @@ -147,7 +151,7 @@ func (r *Lexer) fetchToken() { return } } - r.err = io.EOF + r.fatalError = io.EOF return } @@ -199,17 +203,6 @@ func (r *Lexer) fetchFalse() { } } -// bytesToStr creates a string pointing at the slice to avoid copying. -// -// Warning: the string returned by the function should be used with care, as the whole input data -// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data -// may be garbage-collected even when the string exists. -func bytesToStr(data []byte) string { - h := (*reflect.SliceHeader)(unsafe.Pointer(&data)) - shdr := reflect.StringHeader{h.Data, h.Len} - return *(*string)(unsafe.Pointer(&shdr)) -} - // fetchNumber scans a number literal token. func (r *Lexer) fetchNumber() { hasE := false @@ -265,6 +258,33 @@ func findStringLen(data []byte) (hasEscapes bool, length int) { return false, len(data) } +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var val rune + for i := 2; i < len(s) && i < 6; i++ { + var v byte + c := s[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + v = c - '0' + case 'a', 'b', 'c', 'd', 'e', 'f': + v = c - 'a' + 10 + case 'A', 'B', 'C', 'D', 'E', 'F': + v = c - 'A' + 10 + default: + return -1 + } + + val <<= 4 + val |= rune(v) + } + return val +} + // processEscape processes a single escape sequence and returns number of bytes processed. func (r *Lexer) processEscape(data []byte) (int, error) { if len(data) < 2 { @@ -292,39 +312,28 @@ func (r *Lexer) processEscape(data []byte) (int, error) { r.token.byteValue = append(r.token.byteValue, '\t') return 2, nil case 'u': - default: - return 0, fmt.Errorf("syntax error") - } - - var val rune - - for i := 2; i < len(data) && i < 6; i++ { - var v byte - c = data[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - v = c - '0' - case 'a', 'b', 'c', 'd', 'e', 'f': - v = c - 'a' + 10 - case 'A', 'B', 'C', 'D', 'E', 'F': - v = c - 'A' + 10 - default: - return 0, fmt.Errorf("syntax error") + rr := getu4(data) + if rr < 0 { + return 0, errors.New("syntax error") } - val <<= 4 - val |= rune(v) - } - - l := utf8.RuneLen(val) - if l == -1 { - return 0, fmt.Errorf("invalid unicode escape") + read := 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(data[read:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + read += 6 + rr = dec + } else { + rr = unicode.ReplacementChar + } + } + var d [4]byte + s := utf8.EncodeRune(d[:], rr) + r.token.byteValue = append(r.token.byteValue, d[:s]...) + return read, nil } - var d [4]byte - utf8.EncodeRune(d[:], val) - r.token.byteValue = append(r.token.byteValue, d[:l]...) - return 6, nil + return 0, errors.New("syntax error") } // fetchString scans a string literal token. @@ -368,11 +377,11 @@ func (r *Lexer) fetchString() { // scanToken scans the next token if no token is currently available in the lexer. func (r *Lexer) scanToken() { - if r.token.kind != tokenUndef || r.err != nil { + if r.token.kind != tokenUndef || r.fatalError != nil { return } - r.fetchToken() + r.FetchToken() } // consume resets the current token to allow scanning the next one. @@ -383,20 +392,20 @@ func (r *Lexer) consume() { // Ok returns true if no error (including io.EOF) was encountered during scanning. func (r *Lexer) Ok() bool { - return r.err == nil + return r.fatalError == nil } const maxErrorContextLen = 13 func (r *Lexer) errParse(what string) { - if r.err == nil { + if r.fatalError == nil { var str string if len(r.Data)-r.pos <= maxErrorContextLen { str = string(r.Data) } else { str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..." } - r.err = &LexerError{ + r.fatalError = &LexerError{ Reason: what, Offset: r.pos, Data: str, @@ -409,36 +418,64 @@ func (r *Lexer) errSyntax() { } func (r *Lexer) errInvalidToken(expected string) { - if r.err == nil { - var str string - if len(r.token.byteValue) <= maxErrorContextLen { - str = string(r.token.byteValue) - } else { - str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..." + if r.fatalError != nil { + return + } + if r.UseMultipleErrors { + r.pos = r.start + r.consume() + r.SkipRecursive() + switch expected { + case "[": + r.token.delimValue = ']' + r.token.kind = tokenDelim + case "{": + r.token.delimValue = '}' + r.token.kind = tokenDelim } - r.err = &LexerError{ + r.addNonfatalError(&LexerError{ Reason: fmt.Sprintf("expected %s", expected), - Offset: r.pos, - Data: str, - } + Offset: r.start, + Data: string(r.Data[r.start:r.pos]), + }) + return } + + var str string + if len(r.token.byteValue) <= maxErrorContextLen { + str = string(r.token.byteValue) + } else { + str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..." + } + r.fatalError = &LexerError{ + Reason: fmt.Sprintf("expected %s", expected), + Offset: r.pos, + Data: str, + } +} + +func (r *Lexer) GetPos() int { + return r.pos } // Delim consumes a token and verifies that it is the given delimiter. func (r *Lexer) Delim(c byte) { if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() + r.FetchToken() } + if !r.Ok() || r.token.delimValue != c { + r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled. r.errInvalidToken(string([]byte{c})) + } else { + r.consume() } - r.consume() } // IsDelim returns true if there was no scanning error and next token is the given delimiter. func (r *Lexer) IsDelim(c byte) bool { if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() + r.FetchToken() } return !r.Ok() || r.token.delimValue == c } @@ -446,7 +483,7 @@ func (r *Lexer) IsDelim(c byte) bool { // Null verifies that the next token is null and consumes it. func (r *Lexer) Null() { if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() + r.FetchToken() } if !r.Ok() || r.token.kind != tokenNull { r.errInvalidToken("null") @@ -457,7 +494,7 @@ func (r *Lexer) Null() { // IsNull returns true if the next token is a null keyword. func (r *Lexer) IsNull() bool { if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() + r.FetchToken() } return r.Ok() && r.token.kind == tokenNull } @@ -465,7 +502,7 @@ func (r *Lexer) IsNull() bool { // Skip skips a single token. func (r *Lexer) Skip() { if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() + r.FetchToken() } r.consume() } @@ -476,7 +513,6 @@ func (r *Lexer) Skip() { // Note: no syntax validation is performed on the skipped data. func (r *Lexer) SkipRecursive() { r.scanToken() - var start, end byte if r.token.delimValue == '{' { @@ -505,7 +541,7 @@ func (r *Lexer) SkipRecursive() { return } case c == '\\' && inQuotes: - wasEscape = true + wasEscape = !wasEscape continue case c == '"' && inQuotes: inQuotes = wasEscape @@ -515,7 +551,11 @@ func (r *Lexer) SkipRecursive() { wasEscape = false } r.pos = len(r.Data) - r.err = io.EOF + r.fatalError = &LexerError{ + Reason: "EOF reached while skipping array/object or token", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + } } // Raw fetches the next item recursively as a data slice @@ -527,48 +567,107 @@ func (r *Lexer) Raw() []byte { return r.Data[r.start:r.pos] } -// UnsafeString returns the string value if the token is a string literal. -// -// Warning: returned string may point to the input buffer, so the string should not outlive -// the input buffer. Intended pattern of usage is as an argument to a switch statement. -func (r *Lexer) UnsafeString() string { +// IsStart returns whether the lexer is positioned at the start +// of an input string. +func (r *Lexer) IsStart() bool { + return r.pos == 0 +} + +// Consumed reads all remaining bytes from the input, publishing an error if +// there is anything but whitespace remaining. +func (r *Lexer) Consumed() { + if r.pos > len(r.Data) || !r.Ok() { + return + } + + for _, c := range r.Data[r.pos:] { + if c != ' ' && c != '\t' && c != '\r' && c != '\n' { + r.AddError(&LexerError{ + Reason: "invalid character '" + string(c) + "' after top-level value", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + }) + return + } + + r.pos++ + r.start++ + } +} + +func (r *Lexer) unsafeString() (string, []byte) { if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() + r.FetchToken() } if !r.Ok() || r.token.kind != tokenString { r.errInvalidToken("string") - return "" + return "", nil } - + bytes := r.token.byteValue ret := bytesToStr(r.token.byteValue) r.consume() + return ret, bytes +} + +// UnsafeString returns the string value if the token is a string literal. +// +// Warning: returned string may point to the input buffer, so the string should not outlive +// the input buffer. Intended pattern of usage is as an argument to a switch statement. +func (r *Lexer) UnsafeString() string { + ret, _ := r.unsafeString() + return ret +} + +// UnsafeBytes returns the byte slice if the token is a string literal. +func (r *Lexer) UnsafeBytes() []byte { + _, ret := r.unsafeString() return ret } // String reads a string literal. func (r *Lexer) String() string { if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() + r.FetchToken() } if !r.Ok() || r.token.kind != tokenString { r.errInvalidToken("string") return "" - } ret := string(r.token.byteValue) r.consume() return ret } +// Bytes reads a string literal and base64 decodes it into a byte slice. +func (r *Lexer) Bytes() []byte { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return nil + } + ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) + len, err := base64.StdEncoding.Decode(ret, r.token.byteValue) + if err != nil { + r.fatalError = &LexerError{ + Reason: err.Error(), + } + return nil + } + + r.consume() + return ret[:len] +} + // Bool reads a true or false boolean keyword. func (r *Lexer) Bool() bool { if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() + r.FetchToken() } if !r.Ok() || r.token.kind != tokenBool { r.errInvalidToken("bool") return false - } ret := r.token.boolValue r.consume() @@ -577,12 +676,11 @@ func (r *Lexer) Bool() bool { func (r *Lexer) number() string { if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() + r.FetchToken() } if !r.Ok() || r.token.kind != tokenNumber { r.errInvalidToken("number") return "" - } ret := bytesToStr(r.token.byteValue) r.consume() @@ -597,9 +695,11 @@ func (r *Lexer) Uint8() uint8 { n, err := strconv.ParseUint(s, 10, 8) if err != nil { - r.err = &LexerError{ + r.addNonfatalError(&LexerError{ + Offset: r.start, Reason: err.Error(), - } + Data: s, + }) } return uint8(n) } @@ -612,9 +712,11 @@ func (r *Lexer) Uint16() uint16 { n, err := strconv.ParseUint(s, 10, 16) if err != nil { - r.err = &LexerError{ + r.addNonfatalError(&LexerError{ + Offset: r.start, Reason: err.Error(), - } + Data: s, + }) } return uint16(n) } @@ -627,9 +729,11 @@ func (r *Lexer) Uint32() uint32 { n, err := strconv.ParseUint(s, 10, 32) if err != nil { - r.err = &LexerError{ + r.addNonfatalError(&LexerError{ + Offset: r.start, Reason: err.Error(), - } + Data: s, + }) } return uint32(n) } @@ -642,9 +746,11 @@ func (r *Lexer) Uint64() uint64 { n, err := strconv.ParseUint(s, 10, 64) if err != nil { - r.err = &LexerError{ + r.addNonfatalError(&LexerError{ + Offset: r.start, Reason: err.Error(), - } + Data: s, + }) } return n } @@ -661,9 +767,11 @@ func (r *Lexer) Int8() int8 { n, err := strconv.ParseInt(s, 10, 8) if err != nil { - r.err = &LexerError{ + r.addNonfatalError(&LexerError{ + Offset: r.start, Reason: err.Error(), - } + Data: s, + }) } return int8(n) } @@ -676,9 +784,11 @@ func (r *Lexer) Int16() int16 { n, err := strconv.ParseInt(s, 10, 16) if err != nil { - r.err = &LexerError{ + r.addNonfatalError(&LexerError{ + Offset: r.start, Reason: err.Error(), - } + Data: s, + }) } return int16(n) } @@ -691,9 +801,11 @@ func (r *Lexer) Int32() int32 { n, err := strconv.ParseInt(s, 10, 32) if err != nil { - r.err = &LexerError{ + r.addNonfatalError(&LexerError{ + Offset: r.start, Reason: err.Error(), - } + Data: s, + }) } return int32(n) } @@ -706,9 +818,11 @@ func (r *Lexer) Int64() int64 { n, err := strconv.ParseInt(s, 10, 64) if err != nil { - r.err = &LexerError{ + r.addNonfatalError(&LexerError{ + Offset: r.start, Reason: err.Error(), - } + Data: s, + }) } return n } @@ -718,61 +832,69 @@ func (r *Lexer) Int() int { } func (r *Lexer) Uint8Str() uint8 { - s := r.UnsafeString() + s, b := r.unsafeString() if !r.Ok() { return 0 } n, err := strconv.ParseUint(s, 10, 8) if err != nil { - r.err = &LexerError{ + r.addNonfatalError(&LexerError{ + Offset: r.start, Reason: err.Error(), - } + Data: string(b), + }) } return uint8(n) } func (r *Lexer) Uint16Str() uint16 { - s := r.UnsafeString() + s, b := r.unsafeString() if !r.Ok() { return 0 } n, err := strconv.ParseUint(s, 10, 16) if err != nil { - r.err = &LexerError{ + r.addNonfatalError(&LexerError{ + Offset: r.start, Reason: err.Error(), - } + Data: string(b), + }) } return uint16(n) } func (r *Lexer) Uint32Str() uint32 { - s := r.UnsafeString() + s, b := r.unsafeString() if !r.Ok() { return 0 } n, err := strconv.ParseUint(s, 10, 32) if err != nil { - r.err = &LexerError{ + r.addNonfatalError(&LexerError{ + Offset: r.start, Reason: err.Error(), - } + Data: string(b), + }) } return uint32(n) } func (r *Lexer) Uint64Str() uint64 { - s := r.UnsafeString() + s, b := r.unsafeString() if !r.Ok() { return 0 } n, err := strconv.ParseUint(s, 10, 64) if err != nil { - r.err = &LexerError{ + r.addNonfatalError(&LexerError{ + Offset: r.start, Reason: err.Error(), - } + Data: string(b), + }) } return n } @@ -782,61 +904,69 @@ func (r *Lexer) UintStr() uint { } func (r *Lexer) Int8Str() int8 { - s := r.UnsafeString() + s, b := r.unsafeString() if !r.Ok() { return 0 } n, err := strconv.ParseInt(s, 10, 8) if err != nil { - r.err = &LexerError{ + r.addNonfatalError(&LexerError{ + Offset: r.start, Reason: err.Error(), - } + Data: string(b), + }) } return int8(n) } func (r *Lexer) Int16Str() int16 { - s := r.UnsafeString() + s, b := r.unsafeString() if !r.Ok() { return 0 } n, err := strconv.ParseInt(s, 10, 16) if err != nil { - r.err = &LexerError{ + r.addNonfatalError(&LexerError{ + Offset: r.start, Reason: err.Error(), - } + Data: string(b), + }) } return int16(n) } func (r *Lexer) Int32Str() int32 { - s := r.UnsafeString() + s, b := r.unsafeString() if !r.Ok() { return 0 } n, err := strconv.ParseInt(s, 10, 32) if err != nil { - r.err = &LexerError{ + r.addNonfatalError(&LexerError{ + Offset: r.start, Reason: err.Error(), - } + Data: string(b), + }) } return int32(n) } func (r *Lexer) Int64Str() int64 { - s := r.UnsafeString() + s, b := r.unsafeString() if !r.Ok() { return 0 } n, err := strconv.ParseInt(s, 10, 64) if err != nil { - r.err = &LexerError{ + r.addNonfatalError(&LexerError{ + Offset: r.start, Reason: err.Error(), - } + Data: string(b), + }) } return n } @@ -853,9 +983,11 @@ func (r *Lexer) Float32() float32 { n, err := strconv.ParseFloat(s, 32) if err != nil { - r.err = &LexerError{ + r.addNonfatalError(&LexerError{ + Offset: r.start, Reason: err.Error(), - } + Data: s, + }) } return float32(n) } @@ -868,27 +1000,53 @@ func (r *Lexer) Float64() float64 { n, err := strconv.ParseFloat(s, 64) if err != nil { - r.err = &LexerError{ + r.addNonfatalError(&LexerError{ + Offset: r.start, Reason: err.Error(), - } + Data: s, + }) } return n } func (r *Lexer) Error() error { - return r.err + return r.fatalError } func (r *Lexer) AddError(e error) { - if r.err == nil { - r.err = e + if r.fatalError == nil { + r.fatalError = e } } +func (r *Lexer) AddNonFatalError(e error) { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Data: string(r.Data[r.start:r.pos]), + Reason: e.Error(), + }) +} + +func (r *Lexer) addNonfatalError(err *LexerError) { + if r.UseMultipleErrors { + // We don't want to add errors with the same offset. + if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset { + return + } + r.multipleErrors = append(r.multipleErrors, err) + return + } + r.fatalError = err +} + +func (r *Lexer) GetNonFatalErrors() []*LexerError { + return r.multipleErrors +} + // Interface fetches an interface{} analogous to the 'encoding/json' package. func (r *Lexer) Interface() interface{} { if r.token.kind == tokenUndef && r.Ok() { - r.fetchToken() + r.FetchToken() } if !r.Ok() { diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go index 907675f9ca51..7b55293a0f86 100644 --- a/vendor/github.com/mailru/easyjson/jwriter/writer.go +++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go @@ -2,6 +2,7 @@ package jwriter import ( + "encoding/base64" "io" "strconv" "unicode/utf8" @@ -9,10 +10,22 @@ import ( "github.com/mailru/easyjson/buffer" ) +// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but +// Flags field in Writer is used to set and pass them around. +type Flags int + +const ( + NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'. + NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'. +) + // Writer is a JSON writer. type Writer struct { - Error error - Buffer buffer.Buffer + Flags Flags + + Error error + Buffer buffer.Buffer + NoEscapeHTML bool } // Size returns the size of the data that was written out. @@ -25,13 +38,24 @@ func (w *Writer) DumpTo(out io.Writer) (written int, err error) { return w.Buffer.DumpTo(out) } -// BuildBytes returns writer data as a single byte slice. -func (w *Writer) BuildBytes() ([]byte, error) { +// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice +// as argument that it will try to reuse. +func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) { + if w.Error != nil { + return nil, w.Error + } + + return w.Buffer.BuildBytes(reuse...), nil +} + +// ReadCloser returns an io.ReadCloser that can be used to read the data. +// ReadCloser also resets the buffer. +func (w *Writer) ReadCloser() (io.ReadCloser, error) { if w.Error != nil { return nil, w.Error } - return w.Buffer.BuildBytes(), nil + return w.Buffer.ReadCloser(), nil } // RawByte appends raw binary data to the buffer. @@ -44,7 +68,7 @@ func (w *Writer) RawString(s string) { w.Buffer.AppendString(s) } -// RawByte appends raw binary data to the buffer or sets the error if it is given. Useful for +// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for // calling with results of MarshalJSON-like functions. func (w *Writer) Raw(data []byte, err error) { switch { @@ -59,6 +83,34 @@ func (w *Writer) Raw(data []byte, err error) { } } +// RawText encloses raw binary data in quotes and appends in to the buffer. +// Useful for calling with results of MarshalText-like functions. +func (w *Writer) RawText(data []byte, err error) { + switch { + case w.Error != nil: + return + case err != nil: + w.Error = err + case len(data) > 0: + w.String(string(data)) + default: + w.RawString("null") + } +} + +// Base64Bytes appends data to the buffer after base64 encoding it +func (w *Writer) Base64Bytes(data []byte) { + if data == nil { + w.Buffer.AppendString("null") + return + } + w.Buffer.AppendByte('"') + dst := make([]byte, base64.StdEncoding.EncodedLen(len(data))) + base64.StdEncoding.Encode(dst, data) + w.Buffer.AppendBytes(dst) + w.Buffer.AppendByte('"') +} + func (w *Writer) Uint8(n uint8) { w.Buffer.EnsureSpace(3) w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) @@ -200,6 +252,16 @@ func (w *Writer) Bool(v bool) { const chars = "0123456789abcdef" +func isNotEscapedSingleChar(c byte, escapeHTML bool) bool { + // Note: might make sense to use a table if there are more chars to escape. With 4 chars + // it benchmarks the same. + if escapeHTML { + return c != '<' && c != '>' && c != '&' && c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf + } else { + return c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf + } +} + func (w *Writer) String(s string) { w.Buffer.AppendByte('"') @@ -209,39 +271,32 @@ func (w *Writer) String(s string) { p := 0 // last non-escape symbol for i := 0; i < len(s); { - // single-with character - if c := s[i]; c < utf8.RuneSelf { - var escape byte + c := s[i] + + if isNotEscapedSingleChar(c, !w.NoEscapeHTML) { + // single-width character, no escaping is required + i++ + continue + } else if c < utf8.RuneSelf { + // single-with character, need to escape + w.Buffer.AppendString(s[p:i]) switch c { case '\t': - escape = 't' + w.Buffer.AppendString(`\t`) case '\r': - escape = 'r' + w.Buffer.AppendString(`\r`) case '\n': - escape = 'n' + w.Buffer.AppendString(`\n`) case '\\': - escape = '\\' + w.Buffer.AppendString(`\\`) case '"': - escape = '"' - case '<', '>': - // do nothing + w.Buffer.AppendString(`\"`) default: - if c >= 0x20 { - // no escaping is required - i++ - continue - } - } - if escape != 0 { - w.Buffer.AppendString(s[p:i]) - w.Buffer.AppendByte('\\') - w.Buffer.AppendByte(escape) - } else { - w.Buffer.AppendString(s[p:i]) w.Buffer.AppendString(`\u00`) w.Buffer.AppendByte(chars[c>>4]) w.Buffer.AppendByte(chars[c&0xf]) } + i++ p = i continue diff --git a/vendor/github.com/mindprince/gonvml/LICENSE b/vendor/github.com/mindprince/gonvml/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/mindprince/gonvml/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/mindprince/gonvml/Makefile b/vendor/github.com/mindprince/gonvml/Makefile new file mode 100644 index 000000000000..b329e964d41f --- /dev/null +++ b/vendor/github.com/mindprince/gonvml/Makefile @@ -0,0 +1,20 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +PKG=github.com/mindprince/gonvml + +.PHONY: build +build: + docker run -v $(shell pwd):/go/src/$(PKG) --workdir=/go/src/$(PKG) golang:1.8 go build cmd/example/example.go + diff --git a/vendor/github.com/mindprince/gonvml/NVML_NOTICE b/vendor/github.com/mindprince/gonvml/NVML_NOTICE new file mode 100644 index 000000000000..3fc131329d8a --- /dev/null +++ b/vendor/github.com/mindprince/gonvml/NVML_NOTICE @@ -0,0 +1,32 @@ +Copyright 1993-2016 NVIDIA Corporation. All rights reserved. + +NOTICE TO USER: + +This source code is subject to NVIDIA ownership rights under U.S. and +international Copyright laws. Users and possessors of this source code +are hereby granted a nonexclusive, royalty-free license to use this code +in individual and commercial software. + +NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE +CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR +IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, +OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE +OR PERFORMANCE OF THIS SOURCE CODE. + +U.S. Government End Users. This source code is a "commercial item" as +that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of +"commercial computer software" and "commercial computer software +documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) +and is provided to the U.S. Government only as a commercial end item. +Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through +227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the +source code with only those rights set forth herein. + +Any use of this source code in individual and commercial software must +include, in the user documentation and internal comments to the code, +the above Disclaimer and U.S. Government End Users Notice. diff --git a/vendor/github.com/mindprince/gonvml/README.md b/vendor/github.com/mindprince/gonvml/README.md new file mode 100644 index 000000000000..91bbf5740789 --- /dev/null +++ b/vendor/github.com/mindprince/gonvml/README.md @@ -0,0 +1,19 @@ +Go Bindings for NVML +-------------------- + +[NVML or NVIDIA Management +Library](https://developer.nvidia.com/nvidia-management-library-nvml) is a +C-based API that can be used for monitoring NVIDIA GPU devices. It's closed +source but can be downloaded as part of the [GPU Deployment +Kit](https://developer.nvidia.com/gpu-deployment-kit). + +The [NVML API +Reference](http://docs.nvidia.com/deploy/nvml-api/nvml-api-reference.html) +describe various methods that are available as part of NVML. + +The `nvml.h` file is included in this repository so that we don't depend on +the presence of NVML in the build environment. + +The `bindings.go` file is the cgo bridge which calls the NVML functions. The +cgo preamble in `bindings.go` uses `dlopen` to dynamically load NVML and makes +its functions available. diff --git a/vendor/github.com/mindprince/gonvml/bindings.go b/vendor/github.com/mindprince/gonvml/bindings.go new file mode 100644 index 000000000000..ae1711d85e10 --- /dev/null +++ b/vendor/github.com/mindprince/gonvml/bindings.go @@ -0,0 +1,431 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gonvml + +// #cgo LDFLAGS: -ldl +/* +#include +#include +#include + +#include "nvml.h" + +// nvmlHandle is the handle for dynamically loaded libnvidia-ml.so +void *nvmlHandle; + +nvmlReturn_t (*nvmlInitFunc)(void); + +nvmlReturn_t (*nvmlShutdownFunc)(void); + +const char* (*nvmlErrorStringFunc)(nvmlReturn_t result); +const char* nvmlErrorString(nvmlReturn_t result) { + if (nvmlErrorStringFunc == NULL) { + return "nvmlErrorString Function Not Found"; + } + return nvmlErrorStringFunc(result); +} + +nvmlReturn_t (*nvmlSystemGetDriverVersionFunc)(char *version, unsigned int length); +nvmlReturn_t nvmlSystemGetDriverVersion(char *version, unsigned int length) { + if (nvmlSystemGetDriverVersionFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + return nvmlSystemGetDriverVersionFunc(version, length); +} + +nvmlReturn_t (*nvmlDeviceGetCountFunc)(unsigned int *deviceCount); +nvmlReturn_t nvmlDeviceGetCount(unsigned int *deviceCount) { + if (nvmlDeviceGetCountFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + return nvmlDeviceGetCountFunc(deviceCount); +} + +nvmlReturn_t (*nvmlDeviceGetHandleByIndexFunc)(unsigned int index, nvmlDevice_t *device); +nvmlReturn_t nvmlDeviceGetHandleByIndex(unsigned int index, nvmlDevice_t *device) { + if (nvmlDeviceGetHandleByIndexFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + return nvmlDeviceGetHandleByIndexFunc(index, device); +} + +nvmlReturn_t (*nvmlDeviceGetMinorNumberFunc)(nvmlDevice_t device, unsigned int *minorNumber); +nvmlReturn_t nvmlDeviceGetMinorNumber(nvmlDevice_t device, unsigned int *minorNumber) { + if (nvmlDeviceGetMinorNumberFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + return nvmlDeviceGetMinorNumberFunc(device, minorNumber); +} + +nvmlReturn_t (*nvmlDeviceGetUUIDFunc)(nvmlDevice_t device, char *uuid, unsigned int length); +nvmlReturn_t nvmlDeviceGetUUID(nvmlDevice_t device, char *uuid, unsigned int length) { + if (nvmlDeviceGetUUIDFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + return nvmlDeviceGetUUIDFunc(device, uuid, length); +} + +nvmlReturn_t (*nvmlDeviceGetNameFunc)(nvmlDevice_t device, char *name, unsigned int length); +nvmlReturn_t nvmlDeviceGetName(nvmlDevice_t device, char *name, unsigned int length) { + if (nvmlDeviceGetNameFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + return nvmlDeviceGetNameFunc(device, name, length); +} + +nvmlReturn_t (*nvmlDeviceGetMemoryInfoFunc)(nvmlDevice_t device, nvmlMemory_t *memory); +nvmlReturn_t nvmlDeviceGetMemoryInfo(nvmlDevice_t device, nvmlMemory_t *memory) { + if (nvmlDeviceGetMemoryInfoFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + return nvmlDeviceGetMemoryInfoFunc(device, memory); +} + +nvmlReturn_t (*nvmlDeviceGetUtilizationRatesFunc)(nvmlDevice_t device, nvmlUtilization_t *utilization); +nvmlReturn_t nvmlDeviceGetUtilizationRates(nvmlDevice_t device, nvmlUtilization_t *utilization) { + if (nvmlDeviceGetUtilizationRatesFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + return nvmlDeviceGetUtilizationRatesFunc(device, utilization); +} + +nvmlReturn_t (*nvmlDeviceGetPowerUsageFunc)(nvmlDevice_t device, unsigned int *power); +nvmlReturn_t nvmlDeviceGetPowerUsage(nvmlDevice_t device, unsigned int *power) { + if (nvmlDeviceGetPowerUsageFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + return nvmlDeviceGetPowerUsageFunc(device, power); +} + +nvmlReturn_t (*nvmlDeviceGetSamplesFunc)(nvmlDevice_t device, nvmlSamplingType_t type, unsigned long long lastSeenTimeStamp, nvmlValueType_t *sampleValType, unsigned int *sampleCount, nvmlSample_t *samples); + +// Loads the "libnvidia-ml.so.1" shared library. +// Loads all symbols needed and initializes NVML. +// Call this before calling any other methods. +nvmlReturn_t nvmlInit_dl(void) { + nvmlHandle = dlopen("libnvidia-ml.so.1", RTLD_LAZY); + if (nvmlHandle == NULL) { + return NVML_ERROR_LIBRARY_NOT_FOUND; + } + nvmlInitFunc = dlsym(nvmlHandle, "nvmlInit_v2"); + if (nvmlInitFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + nvmlShutdownFunc = dlsym(nvmlHandle, "nvmlShutdown"); + if (nvmlShutdownFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + nvmlErrorStringFunc = dlsym(nvmlHandle, "nvmlErrorString"); + if (nvmlErrorStringFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + nvmlSystemGetDriverVersionFunc = dlsym(nvmlHandle, "nvmlSystemGetDriverVersion"); + if (nvmlSystemGetDriverVersionFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + nvmlDeviceGetCountFunc = dlsym(nvmlHandle, "nvmlDeviceGetCount_v2"); + if (nvmlDeviceGetCountFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + nvmlDeviceGetHandleByIndexFunc = dlsym(nvmlHandle, "nvmlDeviceGetHandleByIndex_v2"); + if (nvmlDeviceGetHandleByIndexFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + nvmlDeviceGetMinorNumberFunc = dlsym(nvmlHandle, "nvmlDeviceGetMinorNumber"); + if (nvmlDeviceGetMinorNumberFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + nvmlDeviceGetUUIDFunc = dlsym(nvmlHandle, "nvmlDeviceGetUUID"); + if (nvmlDeviceGetUUIDFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + nvmlDeviceGetNameFunc = dlsym(nvmlHandle, "nvmlDeviceGetName"); + if (nvmlDeviceGetNameFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + nvmlDeviceGetMemoryInfoFunc = dlsym(nvmlHandle, "nvmlDeviceGetMemoryInfo"); + if (nvmlDeviceGetMemoryInfoFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + nvmlDeviceGetUtilizationRatesFunc = dlsym(nvmlHandle, "nvmlDeviceGetUtilizationRates"); + if (nvmlDeviceGetUtilizationRatesFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + nvmlDeviceGetPowerUsageFunc = dlsym(nvmlHandle, "nvmlDeviceGetPowerUsage"); + if (nvmlDeviceGetPowerUsageFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + nvmlDeviceGetSamplesFunc = dlsym(nvmlHandle, "nvmlDeviceGetSamples"); + if (nvmlDeviceGetSamplesFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + nvmlReturn_t result = nvmlInitFunc(); + if (result != NVML_SUCCESS) { + dlclose(nvmlHandle); + nvmlHandle = NULL; + return result; + } + return NVML_SUCCESS; +} + +// Shuts down NVML and decrements the reference count on the dynamically loaded +// "libnvidia-ml.so.1" library. +// Call this once NVML is no longer being used. +nvmlReturn_t nvmlShutdown_dl(void) { + if (nvmlHandle == NULL) { + return NVML_SUCCESS; + } + if (nvmlShutdownFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + nvmlReturn_t r = nvmlShutdownFunc(); + if (r != NVML_SUCCESS) { + return r; + } + return (dlclose(nvmlHandle) ? NVML_ERROR_UNKNOWN : NVML_SUCCESS); +} + +// This function is here because the API provided by NVML is not very user +// friendly. This function can be used to get average utilization.gpu or +// power.draw. +// +// `device`: The identifier of the target device. +// `type`: Type of sampling event. Only NVML_TOTAL_POWER_SAMPLES and NVML_GPU_UTILIZATION_SAMPLES are supported. +// `lastSeenTimeStamp`: Return average using samples with timestamp greather than this timestamp. Unix epoch in micro seconds. +// `averageUsage`: Reference in which average is returned. +// +// In my experiments, I found that NVML_GPU_UTILIZATION_SAMPLES buffer stores +// 100 samples that are uniformly spread with ~6 samples per second. So the +// buffer stores last ~16s of data. +// NVML_TOTAL_POWER_SAMPLES buffer stores 120 samples, but in different runs I +// noticed them to be non-uniformly separated. Sometimes 120 samples only +// consisted of 10s of data and sometimes they were spread over 60s. +// +nvmlReturn_t nvmlDeviceGetAverageUsage(nvmlDevice_t device, nvmlSamplingType_t type, unsigned long long lastSeenTimeStamp, unsigned int* averageUsage) { + if (nvmlHandle == NULL) { + return NVML_ERROR_LIBRARY_NOT_FOUND; + } + if (nvmlDeviceGetSamplesFunc == NULL) { + return NVML_ERROR_FUNCTION_NOT_FOUND; + } + + // We don't really use this because both the metrics we support + // averagePowerUsage and averageGPUUtilization are unsigned int. + nvmlValueType_t sampleValType; + + // This will be set to the number of samples that can be queried. We would + // need to allocate an array of this size to store the samples. + unsigned int sampleCount; + + // Invoking this method with `samples` set to NULL sets the sampleCount. + nvmlReturn_t r = nvmlDeviceGetSamplesFunc(device, type, lastSeenTimeStamp, &sampleValType, &sampleCount, NULL); + if (r != NVML_SUCCESS) { + return r; + } + + // Allocate memory to store sampleCount samples. + // In my experiments, the sampleCount at this stage was always 120 for + // NVML_TOTAL_POWER_SAMPLES and 100 for NVML_GPU_UTILIZATION_SAMPLES + nvmlSample_t* samples = (nvmlSample_t*) malloc(sampleCount * sizeof(nvmlSample_t)); + + r = nvmlDeviceGetSamplesFunc(device, type, lastSeenTimeStamp, &sampleValType, &sampleCount, samples); + if (r != NVML_SUCCESS) { + free(samples); + return r; + } + + int i = 0; + unsigned int sum = 0; + for (; i < sampleCount; i++) { + sum += samples[i].sampleValue.uiVal; + } + *averageUsage = sum/sampleCount; + + free(samples); + return r; +} +*/ +import "C" + +import ( + "errors" + "fmt" + "time" +) + +const ( + szDriver = C.NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE + szName = C.NVML_DEVICE_NAME_BUFFER_SIZE + szUUID = C.NVML_DEVICE_UUID_BUFFER_SIZE +) + +var errLibraryNotLoaded = errors.New("could not load NVML library") + +// Initialize initializes NVML. +// Call this before calling any other methods. +func Initialize() error { + return errorString(C.nvmlInit_dl()) +} + +// Shutdown shuts down NVML. +// Call this once NVML is no longer being used. +func Shutdown() error { + return errorString(C.nvmlShutdown_dl()) +} + +// errorString takes a nvmlReturn_t and converts it into a golang error. +// It uses a nvml method to convert to a user friendly error message. +func errorString(ret C.nvmlReturn_t) error { + if ret == C.NVML_SUCCESS { + return nil + } + // We need to special case this because if nvml library is not found + // nvmlErrorString() method will not work. + if ret == C.NVML_ERROR_LIBRARY_NOT_FOUND || C.nvmlHandle == nil { + return errLibraryNotLoaded + } + err := C.GoString(C.nvmlErrorString(ret)) + return fmt.Errorf("nvml: %v", err) +} + +// SystemDriverVersion returns the the driver version on the system. +func SystemDriverVersion() (string, error) { + if C.nvmlHandle == nil { + return "", errLibraryNotLoaded + } + var driver [szDriver]C.char + r := C.nvmlSystemGetDriverVersion(&driver[0], szDriver) + return C.GoString(&driver[0]), errorString(r) +} + +// DeviceCount returns the number of nvidia devices on the system. +func DeviceCount() (uint, error) { + if C.nvmlHandle == nil { + return 0, errLibraryNotLoaded + } + var n C.uint + r := C.nvmlDeviceGetCount(&n) + return uint(n), errorString(r) +} + +// Device is the handle for the device. +// This handle is obtained by calling DeviceHandleByIndex(). +type Device struct { + dev C.nvmlDevice_t +} + +// DeviceHandleByIndex returns the device handle for a particular index. +// The indices range from 0 to DeviceCount()-1. The order in which NVML +// enumerates devices has no guarantees of consistency between reboots. +func DeviceHandleByIndex(idx uint) (Device, error) { + if C.nvmlHandle == nil { + return Device{}, errLibraryNotLoaded + } + var dev C.nvmlDevice_t + r := C.nvmlDeviceGetHandleByIndex(C.uint(idx), &dev) + return Device{dev}, errorString(r) +} + +// MinorNumber returns the minor number for the device. +// The minor number for the device is such that the Nvidia device node +// file for each GPU will have the form /dev/nvidia[minor number]. +func (d Device) MinorNumber() (uint, error) { + if C.nvmlHandle == nil { + return 0, errLibraryNotLoaded + } + var n C.uint + r := C.nvmlDeviceGetMinorNumber(d.dev, &n) + return uint(n), errorString(r) +} + +// UUID returns the globally unique immutable UUID associated with this device. +func (d Device) UUID() (string, error) { + if C.nvmlHandle == nil { + return "", errLibraryNotLoaded + } + var uuid [szUUID]C.char + r := C.nvmlDeviceGetUUID(d.dev, &uuid[0], szUUID) + return C.GoString(&uuid[0]), errorString(r) +} + +// Name returns the product name of the device. +func (d Device) Name() (string, error) { + if C.nvmlHandle == nil { + return "", errLibraryNotLoaded + } + var name [szName]C.char + r := C.nvmlDeviceGetName(d.dev, &name[0], szName) + return C.GoString(&name[0]), errorString(r) +} + +// MemoryInfo returns the total and used memory (in bytes) of the device. +func (d Device) MemoryInfo() (uint64, uint64, error) { + if C.nvmlHandle == nil { + return 0, 0, errLibraryNotLoaded + } + var memory C.nvmlMemory_t + r := C.nvmlDeviceGetMemoryInfo(d.dev, &memory) + return uint64(memory.total), uint64(memory.used), errorString(r) +} + +// UtilizationRates returns the percent of time over the past sample period during which: +// utilization.gpu: one or more kernels were executing on the GPU. +// utilizatoin.memory: global (device) memory was being read or written. +func (d Device) UtilizationRates() (uint, uint, error) { + if C.nvmlHandle == nil { + return 0, 0, errLibraryNotLoaded + } + var utilization C.nvmlUtilization_t + r := C.nvmlDeviceGetUtilizationRates(d.dev, &utilization) + return uint(utilization.gpu), uint(utilization.memory), errorString(r) +} + +// PowerUsage returns the power usage for this GPU and its associated circuitry +// in milliwatts. The reading is accurate to within +/- 5% of current power draw. +func (d Device) PowerUsage() (uint, error) { + if C.nvmlHandle == nil { + return 0, errLibraryNotLoaded + } + var n C.uint + r := C.nvmlDeviceGetPowerUsage(d.dev, &n) + return uint(n), errorString(r) +} + +// AveragePowerUsage returns the power usage for this GPU and its associated circuitry +// in milliwatts averaged over the samples collected in the last `since` duration. +func (d Device) AveragePowerUsage(since time.Duration) (uint, error) { + if C.nvmlHandle == nil { + return 0, errLibraryNotLoaded + } + lastTs := C.ulonglong(time.Now().Add(-1*since).UnixNano() / 1000) + var n C.uint + r := C.nvmlDeviceGetAverageUsage(d.dev, C.NVML_TOTAL_POWER_SAMPLES, lastTs, &n) + return uint(n), errorString(r) +} + +// AverageGPUUtilization returns the utilization.gpu metric (percent of time +// one of more kernels were executing on the GPU) averaged over the samples +// collected in the last `since` duration. +func (d Device) AverageGPUUtilization(since time.Duration) (uint, error) { + if C.nvmlHandle == nil { + return 0, errLibraryNotLoaded + } + lastTs := C.ulonglong(time.Now().Add(-1*since).UnixNano() / 1000) + var n C.uint + r := C.nvmlDeviceGetAverageUsage(d.dev, C.NVML_GPU_UTILIZATION_SAMPLES, lastTs, &n) + return uint(n), errorString(r) +} diff --git a/vendor/github.com/mindprince/gonvml/nvml.h b/vendor/github.com/mindprince/gonvml/nvml.h new file mode 100644 index 000000000000..6febac22b602 --- /dev/null +++ b/vendor/github.com/mindprince/gonvml/nvml.h @@ -0,0 +1,5605 @@ +/* + * Copyright 1993-2016 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO USER: + * + * This source code is subject to NVIDIA ownership rights under U.S. and + * international Copyright laws. Users and possessors of this source code + * are hereby granted a nonexclusive, royalty-free license to use this code + * in individual and commercial software. + * + * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE + * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR + * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH + * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, + * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS + * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE + * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE + * OR PERFORMANCE OF THIS SOURCE CODE. + * + * U.S. Government End Users. This source code is a "commercial item" as + * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of + * "commercial computer software" and "commercial computer software + * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) + * and is provided to the U.S. Government only as a commercial end item. + * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through + * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the + * source code with only those rights set forth herein. + * + * Any use of this source code in individual and commercial software must + * include, in the user documentation and internal comments to the code, + * the above Disclaimer and U.S. Government End Users Notice. + */ + +/* +NVML API Reference + +The NVIDIA Management Library (NVML) is a C-based programmatic interface for monitoring and +managing various states within NVIDIA Tesla &tm; GPUs. It is intended to be a platform for building +3rd party applications, and is also the underlying library for the NVIDIA-supported nvidia-smi +tool. NVML is thread-safe so it is safe to make simultaneous NVML calls from multiple threads. + +API Documentation + +Supported platforms: +- Windows: Windows Server 2008 R2 64bit, Windows Server 2012 R2 64bit, Windows 7 64bit, Windows 8 64bit, Windows 10 64bit +- Linux: 32-bit and 64-bit +- Hypervisors: Windows Server 2008R2/2012 Hyper-V 64bit, Citrix XenServer 6.2 SP1+, VMware ESX 5.1/5.5 + +Supported products: +- Full Support + - All Tesla products, starting with the Fermi architecture + - All Quadro products, starting with the Fermi architecture + - All GRID products, starting with the Kepler architecture + - Selected GeForce Titan products +- Limited Support + - All Geforce products, starting with the Fermi architecture + +The NVML library can be found at \%ProgramW6432\%\\"NVIDIA Corporation"\\NVSMI\\ on Windows. It is +not be added to the system path by default. To dynamically link to NVML, add this path to the PATH +environmental variable. To dynamically load NVML, call LoadLibrary with this path. + +On Linux the NVML library will be found on the standard library path. For 64 bit Linux, both the 32 bit +and 64 bit NVML libraries will be installed. + +Online documentation for this library is available at http://docs.nvidia.com/deploy/nvml-api/index.html +*/ + +#ifndef __nvml_nvml_h__ +#define __nvml_nvml_h__ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * On Windows, set up methods for DLL export + * define NVML_STATIC_IMPORT when using nvml_loader library + */ +#if defined _WINDOWS + #if !defined NVML_STATIC_IMPORT + #if defined NVML_LIB_EXPORT + #define DECLDIR __declspec(dllexport) + #else + #define DECLDIR __declspec(dllimport) + #endif + #else + #define DECLDIR + #endif +#else + #define DECLDIR +#endif + +/** + * NVML API versioning support + */ +#define NVML_API_VERSION 8 +#define NVML_API_VERSION_STR "8" +#define nvmlInit nvmlInit_v2 +#define nvmlDeviceGetPciInfo nvmlDeviceGetPciInfo_v2 +#define nvmlDeviceGetCount nvmlDeviceGetCount_v2 +#define nvmlDeviceGetHandleByIndex nvmlDeviceGetHandleByIndex_v2 +#define nvmlDeviceGetHandleByPciBusId nvmlDeviceGetHandleByPciBusId_v2 + +/***************************************************************************************************/ +/** @defgroup nvmlDeviceStructs Device Structs + * @{ + */ +/***************************************************************************************************/ + +/** + * Special constant that some fields take when they are not available. + * Used when only part of the struct is not available. + * + * Each structure explicitly states when to check for this value. + */ +#define NVML_VALUE_NOT_AVAILABLE (-1) + +typedef struct nvmlDevice_st* nvmlDevice_t; + +/** + * Buffer size guaranteed to be large enough for pci bus id + */ +#define NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE 16 + +/** + * PCI information about a GPU device. + */ +typedef struct nvmlPciInfo_st +{ + char busId[NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE]; //!< The tuple domain:bus:device.function PCI identifier (& NULL terminator) + unsigned int domain; //!< The PCI domain on which the device's bus resides, 0 to 0xffff + unsigned int bus; //!< The bus on which the device resides, 0 to 0xff + unsigned int device; //!< The device's id on the bus, 0 to 31 + unsigned int pciDeviceId; //!< The combined 16-bit device id and 16-bit vendor id + + // Added in NVML 2.285 API + unsigned int pciSubSystemId; //!< The 32-bit Sub System Device ID + + // NVIDIA reserved for internal use only + unsigned int reserved0; + unsigned int reserved1; + unsigned int reserved2; + unsigned int reserved3; +} nvmlPciInfo_t; + +/** + * Detailed ECC error counts for a device. + * + * @deprecated Different GPU families can have different memory error counters + * See \ref nvmlDeviceGetMemoryErrorCounter + */ +typedef struct nvmlEccErrorCounts_st +{ + unsigned long long l1Cache; //!< L1 cache errors + unsigned long long l2Cache; //!< L2 cache errors + unsigned long long deviceMemory; //!< Device memory errors + unsigned long long registerFile; //!< Register file errors +} nvmlEccErrorCounts_t; + +/** + * Utilization information for a device. + * Each sample period may be between 1 second and 1/6 second, depending on the product being queried. + */ +typedef struct nvmlUtilization_st +{ + unsigned int gpu; //!< Percent of time over the past sample period during which one or more kernels was executing on the GPU + unsigned int memory; //!< Percent of time over the past sample period during which global (device) memory was being read or written +} nvmlUtilization_t; + +/** + * Memory allocation information for a device. + */ +typedef struct nvmlMemory_st +{ + unsigned long long total; //!< Total installed FB memory (in bytes) + unsigned long long free; //!< Unallocated FB memory (in bytes) + unsigned long long used; //!< Allocated FB memory (in bytes). Note that the driver/GPU always sets aside a small amount of memory for bookkeeping +} nvmlMemory_t; + +/** + * BAR1 Memory allocation Information for a device + */ +typedef struct nvmlBAR1Memory_st +{ + unsigned long long bar1Total; //!< Total BAR1 Memory (in bytes) + unsigned long long bar1Free; //!< Unallocated BAR1 Memory (in bytes) + unsigned long long bar1Used; //!< Allocated Used Memory (in bytes) +}nvmlBAR1Memory_t; + +/** + * Information about running compute processes on the GPU + */ +typedef struct nvmlProcessInfo_st +{ + unsigned int pid; //!< Process ID + unsigned long long usedGpuMemory; //!< Amount of used GPU memory in bytes. + //! Under WDDM, \ref NVML_VALUE_NOT_AVAILABLE is always reported + //! because Windows KMD manages all the memory and not the NVIDIA driver +} nvmlProcessInfo_t; + +/** + * Enum to represent type of bridge chip + */ +typedef enum nvmlBridgeChipType_enum +{ + NVML_BRIDGE_CHIP_PLX = 0, + NVML_BRIDGE_CHIP_BRO4 = 1 +}nvmlBridgeChipType_t; + +/** + * Maximum number of NvLink links supported + */ +#define NVML_NVLINK_MAX_LINKS 6 + +/** + * Enum to represent the NvLink utilization counter packet units + */ +typedef enum nvmlNvLinkUtilizationCountUnits_enum +{ + NVML_NVLINK_COUNTER_UNIT_CYCLES = 0, // count by cycles + NVML_NVLINK_COUNTER_UNIT_PACKETS = 1, // count by packets + NVML_NVLINK_COUNTER_UNIT_BYTES = 2, // count by bytes + + // this must be last + NVML_NVLINK_COUNTER_UNIT_COUNT +} nvmlNvLinkUtilizationCountUnits_t; + +/** + * Enum to represent the NvLink utilization counter packet types to count + * ** this is ONLY applicable with the units as packets or bytes + * ** as specified in \a nvmlNvLinkUtilizationCountUnits_t + * ** all packet filter descriptions are target GPU centric + * ** these can be "OR'd" together + */ +typedef enum nvmlNvLinkUtilizationCountPktTypes_enum +{ + NVML_NVLINK_COUNTER_PKTFILTER_NOP = 0x1, // no operation packets + NVML_NVLINK_COUNTER_PKTFILTER_READ = 0x2, // read packets + NVML_NVLINK_COUNTER_PKTFILTER_WRITE = 0x4, // write packets + NVML_NVLINK_COUNTER_PKTFILTER_RATOM = 0x8, // reduction atomic requests + NVML_NVLINK_COUNTER_PKTFILTER_NRATOM = 0x10, // non-reduction atomic requests + NVML_NVLINK_COUNTER_PKTFILTER_FLUSH = 0x20, // flush requests + NVML_NVLINK_COUNTER_PKTFILTER_RESPDATA = 0x40, // responses with data + NVML_NVLINK_COUNTER_PKTFILTER_RESPNODATA = 0x80, // responses without data + NVML_NVLINK_COUNTER_PKTFILTER_ALL = 0xFF // all packets +} nvmlNvLinkUtilizationCountPktTypes_t; + +/** + * Struct to define the NVLINK counter controls + */ +typedef struct nvmlNvLinkUtilizationControl_st +{ + nvmlNvLinkUtilizationCountUnits_t units; + nvmlNvLinkUtilizationCountPktTypes_t pktfilter; +} nvmlNvLinkUtilizationControl_t; + +/** + * Enum to represent NvLink queryable capabilities + */ +typedef enum nvmlNvLinkCapability_enum +{ + NVML_NVLINK_CAP_P2P_SUPPORTED = 0, // P2P over NVLink is supported + NVML_NVLINK_CAP_SYSMEM_ACCESS = 1, // Access to system memory is supported + NVML_NVLINK_CAP_P2P_ATOMICS = 2, // P2P atomics are supported + NVML_NVLINK_CAP_SYSMEM_ATOMICS= 3, // System memory atomics are supported + NVML_NVLINK_CAP_SLI_BRIDGE = 4, // SLI is supported over this link + NVML_NVLINK_CAP_VALID = 5, // Link is supported on this device + // should be last + NVML_NVLINK_CAP_COUNT +} nvmlNvLinkCapability_t; + +/** + * Enum to represent NvLink queryable error counters + */ +typedef enum nvmlNvLinkErrorCounter_enum +{ + NVML_NVLINK_ERROR_DL_REPLAY = 0, // Data link transmit replay error counter + NVML_NVLINK_ERROR_DL_RECOVERY = 1, // Data link transmit recovery error counter + NVML_NVLINK_ERROR_DL_CRC_FLIT = 2, // Data link receive flow control digit CRC error counter + NVML_NVLINK_ERROR_DL_CRC_DATA = 3, // Data link receive data CRC error counter + + // this must be last + NVML_NVLINK_ERROR_COUNT +} nvmlNvLinkErrorCounter_t; + +/** + * Represents level relationships within a system between two GPUs + * The enums are spaced to allow for future relationships + */ +typedef enum nvmlGpuLevel_enum +{ + NVML_TOPOLOGY_INTERNAL = 0, // e.g. Tesla K80 + NVML_TOPOLOGY_SINGLE = 10, // all devices that only need traverse a single PCIe switch + NVML_TOPOLOGY_MULTIPLE = 20, // all devices that need not traverse a host bridge + NVML_TOPOLOGY_HOSTBRIDGE = 30, // all devices that are connected to the same host bridge + NVML_TOPOLOGY_CPU = 40, // all devices that are connected to the same CPU but possibly multiple host bridges + NVML_TOPOLOGY_SYSTEM = 50, // all devices in the system + + // there is purposefully no COUNT here because of the need for spacing above +} nvmlGpuTopologyLevel_t; + +/* P2P Capability Index Status*/ +typedef enum nvmlGpuP2PStatus_enum +{ + NVML_P2P_STATUS_OK = 0, + NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED, + NVML_P2P_STATUS_GPU_NOT_SUPPORTED, + NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED, + NVML_P2P_STATUS_DISABLED_BY_REGKEY, + NVML_P2P_STATUS_NOT_SUPPORTED, + NVML_P2P_STATUS_UNKNOWN + +} nvmlGpuP2PStatus_t; + +/* P2P Capability Index*/ +typedef enum nvmlGpuP2PCapsIndex_enum +{ + NVML_P2P_CAPS_INDEX_READ = 0, + NVML_P2P_CAPS_INDEX_WRITE, + NVML_P2P_CAPS_INDEX_NVLINK, + NVML_P2P_CAPS_INDEX_ATOMICS, + NVML_P2P_CAPS_INDEX_PROP, + NVML_P2P_CAPS_INDEX_UNKNOWN +}nvmlGpuP2PCapsIndex_t; + +/** + * Maximum limit on Physical Bridges per Board + */ +#define NVML_MAX_PHYSICAL_BRIDGE (128) + +/** + * Information about the Bridge Chip Firmware + */ +typedef struct nvmlBridgeChipInfo_st +{ + nvmlBridgeChipType_t type; //!< Type of Bridge Chip + unsigned int fwVersion; //!< Firmware Version. 0=Version is unavailable +}nvmlBridgeChipInfo_t; + +/** + * This structure stores the complete Hierarchy of the Bridge Chip within the board. The immediate + * bridge is stored at index 0 of bridgeInfoList, parent to immediate bridge is at index 1 and so forth. + */ +typedef struct nvmlBridgeChipHierarchy_st +{ + unsigned char bridgeCount; //!< Number of Bridge Chips on the Board + nvmlBridgeChipInfo_t bridgeChipInfo[NVML_MAX_PHYSICAL_BRIDGE]; //!< Hierarchy of Bridge Chips on the board +}nvmlBridgeChipHierarchy_t; + +/** + * Represents Type of Sampling Event + */ +typedef enum nvmlSamplingType_enum +{ + NVML_TOTAL_POWER_SAMPLES = 0, //!< To represent total power drawn by GPU + NVML_GPU_UTILIZATION_SAMPLES = 1, //!< To represent percent of time during which one or more kernels was executing on the GPU + NVML_MEMORY_UTILIZATION_SAMPLES = 2, //!< To represent percent of time during which global (device) memory was being read or written + NVML_ENC_UTILIZATION_SAMPLES = 3, //!< To represent percent of time during which NVENC remains busy + NVML_DEC_UTILIZATION_SAMPLES = 4, //!< To represent percent of time during which NVDEC remains busy + NVML_PROCESSOR_CLK_SAMPLES = 5, //!< To represent processor clock samples + NVML_MEMORY_CLK_SAMPLES = 6, //!< To represent memory clock samples + + // Keep this last + NVML_SAMPLINGTYPE_COUNT +}nvmlSamplingType_t; + +/** + * Represents the queryable PCIe utilization counters + */ +typedef enum nvmlPcieUtilCounter_enum +{ + NVML_PCIE_UTIL_TX_BYTES = 0, // 1KB granularity + NVML_PCIE_UTIL_RX_BYTES = 1, // 1KB granularity + + // Keep this last + NVML_PCIE_UTIL_COUNT +} nvmlPcieUtilCounter_t; + +/** + * Represents the type for sample value returned + */ +typedef enum nvmlValueType_enum +{ + NVML_VALUE_TYPE_DOUBLE = 0, + NVML_VALUE_TYPE_UNSIGNED_INT = 1, + NVML_VALUE_TYPE_UNSIGNED_LONG = 2, + NVML_VALUE_TYPE_UNSIGNED_LONG_LONG = 3, + NVML_VALUE_TYPE_SIGNED_LONG_LONG = 4, + + // Keep this last + NVML_VALUE_TYPE_COUNT +}nvmlValueType_t; + + +/** + * Union to represent different types of Value + */ +typedef union nvmlValue_st +{ + double dVal; //!< If the value is double + unsigned int uiVal; //!< If the value is unsigned int + unsigned long ulVal; //!< If the value is unsigned long + unsigned long long ullVal; //!< If the value is unsigned long long + signed long long sllVal; //!< If the value is signed long long +}nvmlValue_t; + +/** + * Information for Sample + */ +typedef struct nvmlSample_st +{ + unsigned long long timeStamp; //!< CPU Timestamp in microseconds + nvmlValue_t sampleValue; //!< Sample Value +}nvmlSample_t; + +/** + * Represents type of perf policy for which violation times can be queried + */ +typedef enum nvmlPerfPolicyType_enum +{ + NVML_PERF_POLICY_POWER = 0, //!< How long did power violations cause the GPU to be below application clocks + NVML_PERF_POLICY_THERMAL = 1, //!< How long did thermal violations cause the GPU to be below application clocks + NVML_PERF_POLICY_SYNC_BOOST = 2, //!< How long did sync boost cause the GPU to be below application clocks + NVML_PERF_POLICY_BOARD_LIMIT = 3, //!< How long did the board limit cause the GPU to be below application clocks + NVML_PERF_POLICY_LOW_UTILIZATION = 4, //!< How long did low utilization cause the GPU to be below application clocks + NVML_PERF_POLICY_RELIABILITY = 5, //!< How long did the board reliability limit cause the GPU to be below application clocks + + NVML_PERF_POLICY_TOTAL_APP_CLOCKS = 10, //!< Total time the GPU was held below application clocks by any limiter (0 - 5 above) + NVML_PERF_POLICY_TOTAL_BASE_CLOCKS = 11, //!< Total time the GPU was held below base clocks + + // Keep this last + NVML_PERF_POLICY_COUNT +}nvmlPerfPolicyType_t; + +/** + * Struct to hold perf policy violation status data + */ +typedef struct nvmlViolationTime_st +{ + unsigned long long referenceTime; //!< referenceTime represents CPU timestamp in microseconds + unsigned long long violationTime; //!< violationTime in Nanoseconds +}nvmlViolationTime_t; + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlDeviceEnumvs Device Enums + * @{ + */ +/***************************************************************************************************/ + +/** + * Generic enable/disable enum. + */ +typedef enum nvmlEnableState_enum +{ + NVML_FEATURE_DISABLED = 0, //!< Feature disabled + NVML_FEATURE_ENABLED = 1 //!< Feature enabled +} nvmlEnableState_t; + +//! Generic flag used to specify the default behavior of some functions. See description of particular functions for details. +#define nvmlFlagDefault 0x00 +//! Generic flag used to force some behavior. See description of particular functions for details. +#define nvmlFlagForce 0x01 + +/** + * * The Brand of the GPU + * */ +typedef enum nvmlBrandType_enum +{ + NVML_BRAND_UNKNOWN = 0, + NVML_BRAND_QUADRO = 1, + NVML_BRAND_TESLA = 2, + NVML_BRAND_NVS = 3, + NVML_BRAND_GRID = 4, + NVML_BRAND_GEFORCE = 5, + + // Keep this last + NVML_BRAND_COUNT +} nvmlBrandType_t; + +/** + * Temperature thresholds. + */ +typedef enum nvmlTemperatureThresholds_enum +{ + NVML_TEMPERATURE_THRESHOLD_SHUTDOWN = 0, // Temperature at which the GPU will shut down + // for HW protection + NVML_TEMPERATURE_THRESHOLD_SLOWDOWN = 1, // Temperature at which the GPU will begin slowdown + // Keep this last + NVML_TEMPERATURE_THRESHOLD_COUNT +} nvmlTemperatureThresholds_t; + +/** + * Temperature sensors. + */ +typedef enum nvmlTemperatureSensors_enum +{ + NVML_TEMPERATURE_GPU = 0, //!< Temperature sensor for the GPU die + + // Keep this last + NVML_TEMPERATURE_COUNT +} nvmlTemperatureSensors_t; + +/** + * Compute mode. + * + * NVML_COMPUTEMODE_EXCLUSIVE_PROCESS was added in CUDA 4.0. + * Earlier CUDA versions supported a single exclusive mode, + * which is equivalent to NVML_COMPUTEMODE_EXCLUSIVE_THREAD in CUDA 4.0 and beyond. + */ +typedef enum nvmlComputeMode_enum +{ + NVML_COMPUTEMODE_DEFAULT = 0, //!< Default compute mode -- multiple contexts per device + NVML_COMPUTEMODE_EXCLUSIVE_THREAD = 1, //!< Support Removed + NVML_COMPUTEMODE_PROHIBITED = 2, //!< Compute-prohibited mode -- no contexts per device + NVML_COMPUTEMODE_EXCLUSIVE_PROCESS = 3, //!< Compute-exclusive-process mode -- only one context per device, usable from multiple threads at a time + + // Keep this last + NVML_COMPUTEMODE_COUNT +} nvmlComputeMode_t; + +/** + * ECC bit types. + * + * @deprecated See \ref nvmlMemoryErrorType_t for a more flexible type + */ +#define nvmlEccBitType_t nvmlMemoryErrorType_t + +/** + * Single bit ECC errors + * + * @deprecated Mapped to \ref NVML_MEMORY_ERROR_TYPE_CORRECTED + */ +#define NVML_SINGLE_BIT_ECC NVML_MEMORY_ERROR_TYPE_CORRECTED + +/** + * Double bit ECC errors + * + * @deprecated Mapped to \ref NVML_MEMORY_ERROR_TYPE_UNCORRECTED + */ +#define NVML_DOUBLE_BIT_ECC NVML_MEMORY_ERROR_TYPE_UNCORRECTED + +/** + * Memory error types + */ +typedef enum nvmlMemoryErrorType_enum +{ + /** + * A memory error that was corrected + * + * For ECC errors, these are single bit errors + * For Texture memory, these are errors fixed by resend + */ + NVML_MEMORY_ERROR_TYPE_CORRECTED = 0, + /** + * A memory error that was not corrected + * + * For ECC errors, these are double bit errors + * For Texture memory, these are errors where the resend fails + */ + NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1, + + + // Keep this last + NVML_MEMORY_ERROR_TYPE_COUNT //!< Count of memory error types + +} nvmlMemoryErrorType_t; + +/** + * ECC counter types. + * + * Note: Volatile counts are reset each time the driver loads. On Windows this is once per boot. On Linux this can be more frequent. + * On Linux the driver unloads when no active clients exist. If persistence mode is enabled or there is always a driver + * client active (e.g. X11), then Linux also sees per-boot behavior. If not, volatile counts are reset each time a compute app + * is run. + */ +typedef enum nvmlEccCounterType_enum +{ + NVML_VOLATILE_ECC = 0, //!< Volatile counts are reset each time the driver loads. + NVML_AGGREGATE_ECC = 1, //!< Aggregate counts persist across reboots (i.e. for the lifetime of the device) + + // Keep this last + NVML_ECC_COUNTER_TYPE_COUNT //!< Count of memory counter types +} nvmlEccCounterType_t; + +/** + * Clock types. + * + * All speeds are in Mhz. + */ +typedef enum nvmlClockType_enum +{ + NVML_CLOCK_GRAPHICS = 0, //!< Graphics clock domain + NVML_CLOCK_SM = 1, //!< SM clock domain + NVML_CLOCK_MEM = 2, //!< Memory clock domain + NVML_CLOCK_VIDEO = 3, //!< Video encoder/decoder clock domain + + // Keep this last + NVML_CLOCK_COUNT //usedGpuMemory is not supported + + + unsigned long long time; //!< Amount of time in ms during which the compute context was active. The time is reported as 0 if + //!< the process is not terminated + + unsigned long long startTime; //!< CPU Timestamp in usec representing start time for the process + + unsigned int isRunning; //!< Flag to represent if the process is running (1 for running, 0 for terminated) + + unsigned int reserved[5]; //!< Reserved for future use +} nvmlAccountingStats_t; + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlVgpuConstants Vgpu Constants + * @{ + */ +/***************************************************************************************************/ + +/** + * Buffer size guaranteed to be large enough for \ref nvmlVgpuTypeGetLicense + */ +#define NVML_GRID_LICENSE_BUFFER_SIZE 128 + +#define NVML_VGPU_NAME_BUFFER_SIZE 64 + +#define NVML_MAX_VGPU_TYPES_PER_PGPU 17 + +#define NVML_MAX_VGPU_INSTANCES_PER_PGPU 24 + +#define NVML_GRID_LICENSE_FEATURE_MAX_COUNT 3 + +#define NVML_GRID_LICENSE_INFO_MAX_LENGTH 128 + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlVgpuEnum Vgpu Enum + * @{ + */ +/***************************************************************************************************/ + +/*! + * Types of VM identifiers + */ +typedef enum nvmlVgpuVmIdType { + NVML_VGPU_VM_ID_DOMAIN_ID = 0, //!< VM ID represents DOMAIN ID + NVML_VGPU_VM_ID_UUID = 1, //!< VM ID represents UUID +} nvmlVgpuVmIdType_t; + +// vGPU GUEST info state. +typedef enum nvmlVgpuGuestInfoState_enum +{ + NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED = 0, //= 0 and < \a unitCount + * @param unit Reference in which to return the unit handle + * + * @return + * - \ref NVML_SUCCESS if \a unit has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a unit is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlUnitGetHandleByIndex(unsigned int index, nvmlUnit_t *unit); + +/** + * Retrieves the static information associated with a unit. + * + * For S-class products. + * + * See \ref nvmlUnitInfo_t for details on available unit info. + * + * @param unit The identifier of the target unit + * @param info Reference in which to return the unit information + * + * @return + * - \ref NVML_SUCCESS if \a info has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a info is NULL + */ +nvmlReturn_t DECLDIR nvmlUnitGetUnitInfo(nvmlUnit_t unit, nvmlUnitInfo_t *info); + +/** + * Retrieves the LED state associated with this unit. + * + * For S-class products. + * + * See \ref nvmlLedState_t for details on allowed states. + * + * @param unit The identifier of the target unit + * @param state Reference in which to return the current LED state + * + * @return + * - \ref NVML_SUCCESS if \a state has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a state is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlUnitSetLedState() + */ +nvmlReturn_t DECLDIR nvmlUnitGetLedState(nvmlUnit_t unit, nvmlLedState_t *state); + +/** + * Retrieves the PSU stats for the unit. + * + * For S-class products. + * + * See \ref nvmlPSUInfo_t for details on available PSU info. + * + * @param unit The identifier of the target unit + * @param psu Reference in which to return the PSU information + * + * @return + * - \ref NVML_SUCCESS if \a psu has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a psu is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlUnitGetPsuInfo(nvmlUnit_t unit, nvmlPSUInfo_t *psu); + +/** + * Retrieves the temperature readings for the unit, in degrees C. + * + * For S-class products. + * + * Depending on the product, readings may be available for intake (type=0), + * exhaust (type=1) and board (type=2). + * + * @param unit The identifier of the target unit + * @param type The type of reading to take + * @param temp Reference in which to return the intake temperature + * + * @return + * - \ref NVML_SUCCESS if \a temp has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit or \a type is invalid or \a temp is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlUnitGetTemperature(nvmlUnit_t unit, unsigned int type, unsigned int *temp); + +/** + * Retrieves the fan speed readings for the unit. + * + * For S-class products. + * + * See \ref nvmlUnitFanSpeeds_t for details on available fan speed info. + * + * @param unit The identifier of the target unit + * @param fanSpeeds Reference in which to return the fan speed information + * + * @return + * - \ref NVML_SUCCESS if \a fanSpeeds has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a fanSpeeds is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlUnitGetFanSpeedInfo(nvmlUnit_t unit, nvmlUnitFanSpeeds_t *fanSpeeds); + +/** + * Retrieves the set of GPU devices that are attached to the specified unit. + * + * For S-class products. + * + * The \a deviceCount argument is expected to be set to the size of the input \a devices array. + * + * @param unit The identifier of the target unit + * @param deviceCount Reference in which to provide the \a devices array size, and + * to return the number of attached GPU devices + * @param devices Reference in which to return the references to the attached GPU devices + * + * @return + * - \ref NVML_SUCCESS if \a deviceCount and \a devices have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a deviceCount indicates that the \a devices array is too small + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid, either of \a deviceCount or \a devices is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlUnitGetDevices(nvmlUnit_t unit, unsigned int *deviceCount, nvmlDevice_t *devices); + +/** + * Retrieves the IDs and firmware versions for any Host Interface Cards (HICs) in the system. + * + * For S-class products. + * + * The \a hwbcCount argument is expected to be set to the size of the input \a hwbcEntries array. + * The HIC must be connected to an S-class system for it to be reported by this function. + * + * @param hwbcCount Size of hwbcEntries array + * @param hwbcEntries Array holding information about hwbc + * + * @return + * - \ref NVML_SUCCESS if \a hwbcCount and \a hwbcEntries have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if either \a hwbcCount or \a hwbcEntries is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a hwbcCount indicates that the \a hwbcEntries array is too small + */ +nvmlReturn_t DECLDIR nvmlSystemGetHicVersion(unsigned int *hwbcCount, nvmlHwbcEntry_t *hwbcEntries); +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlDeviceQueries Device Queries + * This chapter describes that queries that NVML can perform against each device. + * In each case the device is identified with an nvmlDevice_t handle. This handle is obtained by + * calling one of \ref nvmlDeviceGetHandleByIndex(), \ref nvmlDeviceGetHandleBySerial(), + * \ref nvmlDeviceGetHandleByPciBusId(). or \ref nvmlDeviceGetHandleByUUID(). + * @{ + */ +/***************************************************************************************************/ + + /** + * Retrieves the number of compute devices in the system. A compute device is a single GPU. + * + * For all products. + * + * Note: New nvmlDeviceGetCount_v2 (default in NVML 5.319) returns count of all devices in the system + * even if nvmlDeviceGetHandleByIndex_v2 returns NVML_ERROR_NO_PERMISSION for such device. + * Update your code to handle this error, or use NVML 4.304 or older nvml header file. + * For backward binary compatibility reasons _v1 version of the API is still present in the shared + * library. + * Old _v1 version of nvmlDeviceGetCount doesn't count devices that NVML has no permission to talk to. + * + * @param deviceCount Reference in which to return the number of accessible devices + * + * @return + * - \ref NVML_SUCCESS if \a deviceCount has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a deviceCount is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCount(unsigned int *deviceCount); + +/** + * Acquire the handle for a particular device, based on its index. + * + * For all products. + * + * Valid indices are derived from the \a accessibleDevices count returned by + * \ref nvmlDeviceGetCount(). For example, if \a accessibleDevices is 2 the valid indices + * are 0 and 1, corresponding to GPU 0 and GPU 1. + * + * The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it + * is recommended that devices be looked up by their PCI ids or UUID. See + * \ref nvmlDeviceGetHandleByUUID() and \ref nvmlDeviceGetHandleByPciBusId(). + * + * Note: The NVML index may not correlate with other APIs, such as the CUDA device index. + * + * Starting from NVML 5, this API causes NVML to initialize the target GPU + * NVML may initialize additional GPUs if: + * - The target GPU is an SLI slave + * + * Note: New nvmlDeviceGetCount_v2 (default in NVML 5.319) returns count of all devices in the system + * even if nvmlDeviceGetHandleByIndex_v2 returns NVML_ERROR_NO_PERMISSION for such device. + * Update your code to handle this error, or use NVML 4.304 or older nvml header file. + * For backward binary compatibility reasons _v1 version of the API is still present in the shared + * library. + * Old _v1 version of nvmlDeviceGetCount doesn't count devices that NVML has no permission to talk to. + * + * This means that nvmlDeviceGetHandleByIndex_v2 and _v1 can return different devices for the same index. + * If you don't touch macros that map old (_v1) versions to _v2 versions at the top of the file you don't + * need to worry about that. + * + * @param index The index of the target GPU, >= 0 and < \a accessibleDevices + * @param device Reference in which to return the device handle + * + * @return + * - \ref NVML_SUCCESS if \a device has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a device is NULL + * - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to talk to this device + * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetIndex + * @see nvmlDeviceGetCount + */ +nvmlReturn_t DECLDIR nvmlDeviceGetHandleByIndex(unsigned int index, nvmlDevice_t *device); + +/** + * Acquire the handle for a particular device, based on its board serial number. + * + * For Fermi &tm; or newer fully supported devices. + * + * This number corresponds to the value printed directly on the board, and to the value returned by + * \ref nvmlDeviceGetSerial(). + * + * @deprecated Since more than one GPU can exist on a single board this function is deprecated in favor + * of \ref nvmlDeviceGetHandleByUUID. + * For dual GPU boards this function will return NVML_ERROR_INVALID_ARGUMENT. + * + * Starting from NVML 5, this API causes NVML to initialize the target GPU + * NVML may initialize additional GPUs as it searches for the target GPU + * + * @param serial The board serial number of the target GPU + * @param device Reference in which to return the device handle + * + * @return + * - \ref NVML_SUCCESS if \a device has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a serial is invalid, \a device is NULL or more than one + * device has the same serial (dual GPU boards) + * - \ref NVML_ERROR_NOT_FOUND if \a serial does not match a valid device on the system + * - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables + * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs + * - \ref NVML_ERROR_GPU_IS_LOST if any GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetSerial + * @see nvmlDeviceGetHandleByUUID + */ +nvmlReturn_t DECLDIR nvmlDeviceGetHandleBySerial(const char *serial, nvmlDevice_t *device); + +/** + * Acquire the handle for a particular device, based on its globally unique immutable UUID associated with each device. + * + * For all products. + * + * @param uuid The UUID of the target GPU + * @param device Reference in which to return the device handle + * + * Starting from NVML 5, this API causes NVML to initialize the target GPU + * NVML may initialize additional GPUs as it searches for the target GPU + * + * @return + * - \ref NVML_SUCCESS if \a device has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a uuid is invalid or \a device is null + * - \ref NVML_ERROR_NOT_FOUND if \a uuid does not match a valid device on the system + * - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables + * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs + * - \ref NVML_ERROR_GPU_IS_LOST if any GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetUUID + */ +nvmlReturn_t DECLDIR nvmlDeviceGetHandleByUUID(const char *uuid, nvmlDevice_t *device); + +/** + * Acquire the handle for a particular device, based on its PCI bus id. + * + * For all products. + * + * This value corresponds to the nvmlPciInfo_t::busId returned by \ref nvmlDeviceGetPciInfo(). + * + * Starting from NVML 5, this API causes NVML to initialize the target GPU + * NVML may initialize additional GPUs if: + * - The target GPU is an SLI slave + * + * \note NVML 4.304 and older version of nvmlDeviceGetHandleByPciBusId"_v1" returns NVML_ERROR_NOT_FOUND + * instead of NVML_ERROR_NO_PERMISSION. + * + * @param pciBusId The PCI bus id of the target GPU + * @param device Reference in which to return the device handle + * + * @return + * - \ref NVML_SUCCESS if \a device has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pciBusId is invalid or \a device is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a pciBusId does not match a valid device on the system + * - \ref NVML_ERROR_INSUFFICIENT_POWER if the attached device has improperly attached external power cables + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to talk to this device + * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetHandleByPciBusId(const char *pciBusId, nvmlDevice_t *device); + +/** + * Retrieves the name of this device. + * + * For all products. + * + * The name is an alphanumeric string that denotes a particular product, e.g. Tesla &tm; C2070. It will not + * exceed 64 characters in length (including the NULL terminator). See \ref + * nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE. + * + * @param device The identifier of the target device + * @param name Reference in which to return the product name + * @param length The maximum allowed length of the string returned in \a name + * + * @return + * - \ref NVML_SUCCESS if \a name has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a name is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetName(nvmlDevice_t device, char *name, unsigned int length); + +/** + * Retrieves the brand of this device. + * + * For all products. + * + * The type is a member of \ref nvmlBrandType_t defined above. + * + * @param device The identifier of the target device + * @param type Reference in which to return the product brand type + * + * @return + * - \ref NVML_SUCCESS if \a name has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a type is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBrand(nvmlDevice_t device, nvmlBrandType_t *type); + +/** + * Retrieves the NVML index of this device. + * + * For all products. + * + * Valid indices are derived from the \a accessibleDevices count returned by + * \ref nvmlDeviceGetCount(). For example, if \a accessibleDevices is 2 the valid indices + * are 0 and 1, corresponding to GPU 0 and GPU 1. + * + * The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it + * is recommended that devices be looked up by their PCI ids or GPU UUID. See + * \ref nvmlDeviceGetHandleByPciBusId() and \ref nvmlDeviceGetHandleByUUID(). + * + * Note: The NVML index may not correlate with other APIs, such as the CUDA device index. + * + * @param device The identifier of the target device + * @param index Reference in which to return the NVML index of the device + * + * @return + * - \ref NVML_SUCCESS if \a index has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a index is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetHandleByIndex() + * @see nvmlDeviceGetCount() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetIndex(nvmlDevice_t device, unsigned int *index); + +/** + * Retrieves the globally unique board serial number associated with this device's board. + * + * For all products with an inforom. + * + * The serial number is an alphanumeric string that will not exceed 30 characters (including the NULL terminator). + * This number matches the serial number tag that is physically attached to the board. See \ref + * nvmlConstants::NVML_DEVICE_SERIAL_BUFFER_SIZE. + * + * @param device The identifier of the target device + * @param serial Reference in which to return the board/module serial number + * @param length The maximum allowed length of the string returned in \a serial + * + * @return + * - \ref NVML_SUCCESS if \a serial has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a serial is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSerial(nvmlDevice_t device, char *serial, unsigned int length); + +/** + * Retrieves an array of unsigned ints (sized to cpuSetSize) of bitmasks with the ideal CPU affinity for the device + * For example, if processors 0, 1, 32, and 33 are ideal for the device and cpuSetSize == 2, + * result[0] = 0x3, result[1] = 0x3 + * + * For Kepler &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device The identifier of the target device + * @param cpuSetSize The size of the cpuSet array that is safe to access + * @param cpuSet Array reference in which to return a bitmask of CPUs, 64 CPUs per + * unsigned long on 64-bit machines, 32 on 32-bit machines + * + * @return + * - \ref NVML_SUCCESS if \a cpuAffinity has been filled + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, cpuSetSize == 0, or cpuSet is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCpuAffinity(nvmlDevice_t device, unsigned int cpuSetSize, unsigned long *cpuSet); + +/** + * Sets the ideal affinity for the calling thread and device using the guidelines + * given in nvmlDeviceGetCpuAffinity(). Note, this is a change as of version 8.0. + * Older versions set the affinity for a calling process and all children. + * Currently supports up to 64 processors. + * + * For Kepler &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if the calling process has been successfully bound + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetCpuAffinity(nvmlDevice_t device); + +/** + * Clear all affinity bindings for the calling thread. Note, this is a change as of version + * 8.0 as older versions cleared the affinity for a calling process and all children. + * + * For Kepler &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if the calling process has been successfully unbound + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceClearCpuAffinity(nvmlDevice_t device); + +/** + * Retrieve the common ancestor for two devices + * For all products. + * Supported on Linux only. + * + * @param device1 The identifier of the first device + * @param device2 The identifier of the second device + * @param pathInfo A \ref nvmlGpuTopologyLevel_t that gives the path type + * + * @return + * - \ref NVML_SUCCESS if \a pathInfo has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device1, or \a device2 is invalid, or \a pathInfo is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature + * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTopologyCommonAncestor(nvmlDevice_t device1, nvmlDevice_t device2, nvmlGpuTopologyLevel_t *pathInfo); + +/** + * Retrieve the set of GPUs that are nearest to a given device at a specific interconnectivity level + * For all products. + * Supported on Linux only. + * + * @param device The identifier of the first device + * @param level The \ref nvmlGpuTopologyLevel_t level to search for other GPUs + * @param count When zero, is set to the number of matching GPUs such that \a deviceArray + * can be malloc'd. When non-zero, \a deviceArray will be filled with \a count + * number of device handles. + * @param deviceArray An array of device handles for GPUs found at \a level + * + * @return + * - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a level, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count + * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature + * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTopologyNearestGpus(nvmlDevice_t device, nvmlGpuTopologyLevel_t level, unsigned int *count, nvmlDevice_t *deviceArray); + +/** + * Retrieve the set of GPUs that have a CPU affinity with the given CPU number + * For all products. + * Supported on Linux only. + * + * @param cpuNumber The CPU number + * @param count When zero, is set to the number of matching GPUs such that \a deviceArray + * can be malloc'd. When non-zero, \a deviceArray will be filled with \a count + * number of device handles. + * @param deviceArray An array of device handles for GPUs found with affinity to \a cpuNumber + * + * @return + * - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a cpuNumber, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count + * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature + * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery + */ +nvmlReturn_t DECLDIR nvmlSystemGetTopologyGpuSet(unsigned int cpuNumber, unsigned int *count, nvmlDevice_t *deviceArray); + +/** + * Retrieve the status for a given p2p capability index between a given pair of GPU + * + * @param device1 The first device + * @param device2 The second device + * @param p2pIndex p2p Capability Index being looked for between \a device1 and \a device2 + * @param p2pStatus Reference in which to return the status of the \a p2pIndex + * between \a device1 and \a device2 + * @return + * - \ref NVML_SUCCESS if \a p2pStatus has been populated + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device1 or \a device2 or \a p2pIndex is invalid or \a p2pStatus is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetP2PStatus(nvmlDevice_t device1, nvmlDevice_t device2, nvmlGpuP2PCapsIndex_t p2pIndex,nvmlGpuP2PStatus_t *p2pStatus); + +/** + * Retrieves the globally unique immutable UUID associated with this device, as a 5 part hexadecimal string, + * that augments the immutable, board serial identifier. + * + * For all products. + * + * The UUID is a globally unique identifier. It is the only available identifier for pre-Fermi-architecture products. + * It does NOT correspond to any identifier printed on the board. It will not exceed 80 characters in length + * (including the NULL terminator). See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. + * + * @param device The identifier of the target device + * @param uuid Reference in which to return the GPU UUID + * @param length The maximum allowed length of the string returned in \a uuid + * + * @return + * - \ref NVML_SUCCESS if \a uuid has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a uuid is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetUUID(nvmlDevice_t device, char *uuid, unsigned int length); + +/** + * Retrieves minor number for the device. The minor number for the device is such that the Nvidia device node file for + * each GPU will have the form /dev/nvidia[minor number]. + * + * For all products. + * Supported only for Linux + * + * @param device The identifier of the target device + * @param minorNumber Reference in which to return the minor number for the device + * @return + * - \ref NVML_SUCCESS if the minor number is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minorNumber is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMinorNumber(nvmlDevice_t device, unsigned int *minorNumber); + +/** + * Retrieves the the device board part number which is programmed into the board's InfoROM + * + * For all products. + * + * @param device Identifier of the target device + * @param partNumber Reference to the buffer to return + * @param length Length of the buffer reference + * + * @return + * - \ref NVML_SUCCESS if \a partNumber has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NOT_SUPPORTED if the needed VBIOS fields have not been filled + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a serial is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBoardPartNumber(nvmlDevice_t device, char* partNumber, unsigned int length); + +/** + * Retrieves the version information for the device's infoROM object. + * + * For all products with an inforom. + * + * Fermi and higher parts have non-volatile on-board memory for persisting device info, such as aggregate + * ECC counts. The version of the data structures in this memory may change from time to time. It will not + * exceed 16 characters in length (including the NULL terminator). + * See \ref nvmlConstants::NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE. + * + * See \ref nvmlInforomObject_t for details on the available infoROM objects. + * + * @param device The identifier of the target device + * @param object The target infoROM object + * @param version Reference in which to return the infoROM version + * @param length The maximum allowed length of the string returned in \a version + * + * @return + * - \ref NVML_SUCCESS if \a version has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetInforomImageVersion + */ +nvmlReturn_t DECLDIR nvmlDeviceGetInforomVersion(nvmlDevice_t device, nvmlInforomObject_t object, char *version, unsigned int length); + +/** + * Retrieves the global infoROM image version + * + * For all products with an inforom. + * + * Image version just like VBIOS version uniquely describes the exact version of the infoROM flashed on the board + * in contrast to infoROM object version which is only an indicator of supported features. + * Version string will not exceed 16 characters in length (including the NULL terminator). + * See \ref nvmlConstants::NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE. + * + * @param device The identifier of the target device + * @param version Reference in which to return the infoROM image version + * @param length The maximum allowed length of the string returned in \a version + * + * @return + * - \ref NVML_SUCCESS if \a version has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetInforomVersion + */ +nvmlReturn_t DECLDIR nvmlDeviceGetInforomImageVersion(nvmlDevice_t device, char *version, unsigned int length); + +/** + * Retrieves the checksum of the configuration stored in the device's infoROM. + * + * For all products with an inforom. + * + * Can be used to make sure that two GPUs have the exact same configuration. + * Current checksum takes into account configuration stored in PWR and ECC infoROM objects. + * Checksum can change between driver releases or when user changes configuration (e.g. disable/enable ECC) + * + * @param device The identifier of the target device + * @param checksum Reference in which to return the infoROM configuration checksum + * + * @return + * - \ref NVML_SUCCESS if \a checksum has been set + * - \ref NVML_ERROR_CORRUPTED_INFOROM if the device's checksum couldn't be retrieved due to infoROM corruption + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a checksum is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetInforomConfigurationChecksum(nvmlDevice_t device, unsigned int *checksum); + +/** + * Reads the infoROM from the flash and verifies the checksums. + * + * For all products with an inforom. + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if infoROM is not corrupted + * - \ref NVML_ERROR_CORRUPTED_INFOROM if the device's infoROM is corrupted + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceValidateInforom(nvmlDevice_t device); + +/** + * Retrieves the display mode for the device. + * + * For all products. + * + * This method indicates whether a physical display (e.g. monitor) is currently connected to + * any of the device's connectors. + * + * See \ref nvmlEnableState_t for details on allowed modes. + * + * @param device The identifier of the target device + * @param display Reference in which to return the display mode + * + * @return + * - \ref NVML_SUCCESS if \a display has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a display is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDisplayMode(nvmlDevice_t device, nvmlEnableState_t *display); + +/** + * Retrieves the display active state for the device. + * + * For all products. + * + * This method indicates whether a display is initialized on the device. + * For example whether X Server is attached to this device and has allocated memory for the screen. + * + * Display can be active even when no monitor is physically attached. + * + * See \ref nvmlEnableState_t for details on allowed modes. + * + * @param device The identifier of the target device + * @param isActive Reference in which to return the display active state + * + * @return + * - \ref NVML_SUCCESS if \a isActive has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isActive is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDisplayActive(nvmlDevice_t device, nvmlEnableState_t *isActive); + +/** + * Retrieves the persistence mode associated with this device. + * + * For all products. + * For Linux only. + * + * When driver persistence mode is enabled the driver software state is not torn down when the last + * client disconnects. By default this feature is disabled. + * + * See \ref nvmlEnableState_t for details on allowed modes. + * + * @param device The identifier of the target device + * @param mode Reference in which to return the current driver persistence mode + * + * @return + * - \ref NVML_SUCCESS if \a mode has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetPersistenceMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPersistenceMode(nvmlDevice_t device, nvmlEnableState_t *mode); + +/** + * Retrieves the PCI attributes of this device. + * + * For all products. + * + * See \ref nvmlPciInfo_t for details on the available PCI info. + * + * @param device The identifier of the target device + * @param pci Reference in which to return the PCI info + * + * @return + * - \ref NVML_SUCCESS if \a pci has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pci is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPciInfo(nvmlDevice_t device, nvmlPciInfo_t *pci); + +/** + * Retrieves the maximum PCIe link generation possible with this device and system + * + * I.E. for a generation 2 PCIe device attached to a generation 1 PCIe bus the max link generation this function will + * report is generation 1. + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param maxLinkGen Reference in which to return the max PCIe link generation + * + * @return + * - \ref NVML_SUCCESS if \a maxLinkGen has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkGen is null + * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMaxPcieLinkGeneration(nvmlDevice_t device, unsigned int *maxLinkGen); + +/** + * Retrieves the maximum PCIe link width possible with this device and system + * + * I.E. for a device with a 16x PCIe bus width attached to a 8x PCIe system bus this function will report + * a max link width of 8. + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param maxLinkWidth Reference in which to return the max PCIe link generation + * + * @return + * - \ref NVML_SUCCESS if \a maxLinkWidth has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkWidth is null + * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMaxPcieLinkWidth(nvmlDevice_t device, unsigned int *maxLinkWidth); + +/** + * Retrieves the current PCIe link generation + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param currLinkGen Reference in which to return the current PCIe link generation + * + * @return + * - \ref NVML_SUCCESS if \a currLinkGen has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a currLinkGen is null + * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCurrPcieLinkGeneration(nvmlDevice_t device, unsigned int *currLinkGen); + +/** + * Retrieves the current PCIe link width + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param currLinkWidth Reference in which to return the current PCIe link generation + * + * @return + * - \ref NVML_SUCCESS if \a currLinkWidth has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a currLinkWidth is null + * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCurrPcieLinkWidth(nvmlDevice_t device, unsigned int *currLinkWidth); + +/** + * Retrieve PCIe utilization information. + * This function is querying a byte counter over a 20ms interval and thus is the + * PCIe throughput over that interval. + * + * For Maxwell &tm; or newer fully supported devices. + * + * This method is not supported in virtual machines running virtual GPU (vGPU). + * + * @param device The identifier of the target device + * @param counter The specific counter that should be queried \ref nvmlPcieUtilCounter_t + * @param value Reference in which to return throughput in KB/s + * + * @return + * - \ref NVML_SUCCESS if \a value has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a counter is invalid, or \a value is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPcieThroughput(nvmlDevice_t device, nvmlPcieUtilCounter_t counter, unsigned int *value); + +/** + * Retrieve the PCIe replay counter. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param value Reference in which to return the counter's value + * + * @return + * - \ref NVML_SUCCESS if \a value and \a rollover have been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a value or \a rollover are NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPcieReplayCounter(nvmlDevice_t device, unsigned int *value); + +/** + * Retrieves the current clock speeds for the device. + * + * For Fermi &tm; or newer fully supported devices. + * + * See \ref nvmlClockType_t for details on available clock information. + * + * @param device The identifier of the target device + * @param type Identify which clock domain to query + * @param clock Reference in which to return the clock speed in MHz + * + * @return + * - \ref NVML_SUCCESS if \a clock has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device cannot report the specified clock + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int *clock); + +/** + * Retrieves the maximum clock speeds for the device. + * + * For Fermi &tm; or newer fully supported devices. + * + * See \ref nvmlClockType_t for details on available clock information. + * + * \note On GPUs from Fermi family current P0 clocks (reported by \ref nvmlDeviceGetClockInfo) can differ from max clocks + * by few MHz. + * + * @param device The identifier of the target device + * @param type Identify which clock domain to query + * @param clock Reference in which to return the clock speed in MHz + * + * @return + * - \ref NVML_SUCCESS if \a clock has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device cannot report the specified clock + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMaxClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int *clock); + +/** + * Retrieves the current setting of a clock that applications will use unless an overspec situation occurs. + * Can be changed using \ref nvmlDeviceSetApplicationsClocks. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param clockType Identify which clock domain to query + * @param clockMHz Reference in which to return the clock in MHz + * + * @return + * - \ref NVML_SUCCESS if \a clockMHz has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetApplicationsClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int *clockMHz); + +/** + * Retrieves the default applications clock that GPU boots with or + * defaults to after \ref nvmlDeviceResetApplicationsClocks call. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param clockType Identify which clock domain to query + * @param clockMHz Reference in which to return the default clock in MHz + * + * @return + * - \ref NVML_SUCCESS if \a clockMHz has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * \see nvmlDeviceGetApplicationsClock + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDefaultApplicationsClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int *clockMHz); + +/** + * Resets the application clock to the default value + * + * This is the applications clock that will be used after system reboot or driver reload. + * Default value is constant, but the current value an be changed using \ref nvmlDeviceSetApplicationsClocks. + * + * On Pascal and newer hardware, if clocks were previously locked with \ref nvmlDeviceSetApplicationsClocks, + * this call will unlock clocks. This returns clocks their default behavior ofautomatically boosting above + * base clocks as thermal limits allow. + * + * @see nvmlDeviceGetApplicationsClock + * @see nvmlDeviceSetApplicationsClocks + * + * For Fermi &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if new settings were successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceResetApplicationsClocks(nvmlDevice_t device); + +/** + * Retrieves the clock speed for the clock specified by the clock type and clock ID. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param clockType Identify which clock domain to query + * @param clockId Identify which clock in the domain to query + * @param clockMHz Reference in which to return the clock in MHz + * + * @return + * - \ref NVML_SUCCESS if \a clockMHz has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetClock(nvmlDevice_t device, nvmlClockType_t clockType, nvmlClockId_t clockId, unsigned int *clockMHz); + +/** + * Retrieves the customer defined maximum boost clock speed specified by the given clock type. + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param clockType Identify which clock domain to query + * @param clockMHz Reference in which to return the clock in MHz + * + * @return + * - \ref NVML_SUCCESS if \a clockMHz has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device or the \a clockType on this device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMaxCustomerBoostClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int *clockMHz); + +/** + * Retrieves the list of possible memory clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param count Reference in which to provide the \a clocksMHz array size, and + * to return the number of elements + * @param clocksMHz Reference in which to return the clock in MHz + * + * @return + * - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to the number of + * required elements) + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetApplicationsClocks + * @see nvmlDeviceGetSupportedGraphicsClocks + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedMemoryClocks(nvmlDevice_t device, unsigned int *count, unsigned int *clocksMHz); + +/** + * Retrieves the list of possible graphics clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param memoryClockMHz Memory clock for which to return possible graphics clocks + * @param count Reference in which to provide the \a clocksMHz array size, and + * to return the number of elements + * @param clocksMHz Reference in which to return the clocks in MHz + * + * @return + * - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NOT_FOUND if the specified \a memoryClockMHz is not a supported frequency + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetApplicationsClocks + * @see nvmlDeviceGetSupportedMemoryClocks + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedGraphicsClocks(nvmlDevice_t device, unsigned int memoryClockMHz, unsigned int *count, unsigned int *clocksMHz); + +/** + * Retrieve the current state of Auto Boosted clocks on a device and store it in \a isEnabled + * + * For Kepler &tm; or newer fully supported devices. + * + * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates + * to maximize performance as thermal limits allow. + * + * On Pascal and newer hardware, Auto Aoosted clocks are controlled through application clocks. + * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost + * behavior. + * + * @param device The identifier of the target device + * @param isEnabled Where to store the current state of Auto Boosted clocks of the target device + * @param defaultIsEnabled Where to store the default Auto Boosted clocks behavior of the target device that the device will + * revert to when no applications are using the GPU + * + * @return + * - \ref NVML_SUCCESS If \a isEnabled has been been set with the Auto Boosted clocks state of \a device + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isEnabled is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t *isEnabled, nvmlEnableState_t *defaultIsEnabled); + +/** + * Try to set the current state of Auto Boosted clocks on a device. + * + * For Kepler &tm; or newer fully supported devices. + * + * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates + * to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock + * rates are desired. + * + * Non-root users may use this API by default but can be restricted by root from using this API by calling + * \ref nvmlDeviceSetAPIRestriction with apiType=NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS. + * Note: Persistence Mode is required to modify current Auto Boost settings, therefore, it must be enabled. + * + * On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks. + * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost + * behavior. + * + * @param device The identifier of the target device + * @param enabled What state to try to set Auto Boosted clocks of the target device to + * + * @return + * - \ref NVML_SUCCESS If the Auto Boosted clocks were successfully set to the state specified by \a enabled + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + */ +nvmlReturn_t DECLDIR nvmlDeviceSetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled); + +/** + * Try to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will + * return to when no compute running processes (e.g. CUDA application which have an active context) are running + * + * For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. + * Requires root/admin permissions. + * + * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates + * to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock + * rates are desired. + * + * On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks. + * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost + * behavior. + * + * @param device The identifier of the target device + * @param enabled What state to try to set default Auto Boosted clocks of the target device to + * @param flags Flags that change the default behavior. Currently Unused. + * + * @return + * - \ref NVML_SUCCESS If the Auto Boosted clock's default state was successfully set to the state specified by \a enabled + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NO_PERMISSION If the calling user does not have permission to change Auto Boosted clock's default state. + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + */ +nvmlReturn_t DECLDIR nvmlDeviceSetDefaultAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled, unsigned int flags); + + +/** + * Retrieves the intended operating speed of the device's fan. + * + * Note: The reported speed is the intended fan speed. If the fan is physically blocked and unable to spin, the + * output will not match the actual fan speed. + * + * For all discrete products with dedicated fans. + * + * The fan speed is expressed as a percent of the maximum, i.e. full speed is 100%. + * + * @param device The identifier of the target device + * @param speed Reference in which to return the fan speed percentage + * + * @return + * - \ref NVML_SUCCESS if \a speed has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a speed is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetFanSpeed(nvmlDevice_t device, unsigned int *speed); + +/** + * Retrieves the current temperature readings for the device, in degrees C. + * + * For all products. + * + * See \ref nvmlTemperatureSensors_t for details on available temperature sensors. + * + * @param device The identifier of the target device + * @param sensorType Flag that indicates which sensor reading to retrieve + * @param temp Reference in which to return the temperature reading + * + * @return + * - \ref NVML_SUCCESS if \a temp has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a sensorType is invalid or \a temp is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have the specified sensor + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTemperature(nvmlDevice_t device, nvmlTemperatureSensors_t sensorType, unsigned int *temp); + +/** + * Retrieves the temperature threshold for the GPU with the specified threshold type in degrees C. + * + * For Kepler &tm; or newer fully supported devices. + * + * See \ref nvmlTemperatureThresholds_t for details on available temperature thresholds. + * + * @param device The identifier of the target device + * @param thresholdType The type of threshold value queried + * @param temp Reference in which to return the temperature reading + * @return + * - \ref NVML_SUCCESS if \a temp has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a thresholdType is invalid or \a temp is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a temperature sensor or is unsupported + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, unsigned int *temp); + +/** + * Retrieves the current performance state for the device. + * + * For Fermi &tm; or newer fully supported devices. + * + * See \ref nvmlPstates_t for details on allowed performance states. + * + * @param device The identifier of the target device + * @param pState Reference in which to return the performance state reading + * + * @return + * - \ref NVML_SUCCESS if \a pState has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPerformanceState(nvmlDevice_t device, nvmlPstates_t *pState); + +/** + * Retrieves current clocks throttling reasons. + * + * For all fully supported products. + * + * \note More than one bit can be enabled at the same time. Multiple reasons can be affecting clocks at once. + * + * @param device The identifier of the target device + * @param clocksThrottleReasons Reference in which to return bitmask of active clocks throttle + * reasons + * + * @return + * - \ref NVML_SUCCESS if \a clocksThrottleReasons has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clocksThrottleReasons is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlClocksThrottleReasons + * @see nvmlDeviceGetSupportedClocksThrottleReasons + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCurrentClocksThrottleReasons(nvmlDevice_t device, unsigned long long *clocksThrottleReasons); + +/** + * Retrieves bitmask of supported clocks throttle reasons that can be returned by + * \ref nvmlDeviceGetCurrentClocksThrottleReasons + * + * For all fully supported products. + * + * This method is not supported in virtual machines running virtual GPU (vGPU). + * + * @param device The identifier of the target device + * @param supportedClocksThrottleReasons Reference in which to return bitmask of supported + * clocks throttle reasons + * + * @return + * - \ref NVML_SUCCESS if \a supportedClocksThrottleReasons has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a supportedClocksThrottleReasons is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlClocksThrottleReasons + * @see nvmlDeviceGetCurrentClocksThrottleReasons + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedClocksThrottleReasons(nvmlDevice_t device, unsigned long long *supportedClocksThrottleReasons); + +/** + * Deprecated: Use \ref nvmlDeviceGetPerformanceState. This function exposes an incorrect generalization. + * + * Retrieve the current performance state for the device. + * + * For Fermi &tm; or newer fully supported devices. + * + * See \ref nvmlPstates_t for details on allowed performance states. + * + * @param device The identifier of the target device + * @param pState Reference in which to return the performance state reading + * + * @return + * - \ref NVML_SUCCESS if \a pState has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerState(nvmlDevice_t device, nvmlPstates_t *pState); + +/** + * This API has been deprecated. + * + * Retrieves the power management mode associated with this device. + * + * For products from the Fermi family. + * - Requires \a NVML_INFOROM_POWER version 3.0 or higher. + * + * For from the Kepler or newer families. + * - Does not require \a NVML_INFOROM_POWER object. + * + * This flag indicates whether any power management algorithm is currently active on the device. An + * enabled state does not necessarily mean the device is being actively throttled -- only that + * that the driver will do so if the appropriate conditions are met. + * + * See \ref nvmlEnableState_t for details on allowed modes. + * + * @param device The identifier of the target device + * @param mode Reference in which to return the current power management mode + * + * @return + * - \ref NVML_SUCCESS if \a mode has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementMode(nvmlDevice_t device, nvmlEnableState_t *mode); + +/** + * Retrieves the power management limit associated with this device. + * + * For Fermi &tm; or newer fully supported devices. + * + * The power limit defines the upper boundary for the card's power draw. If + * the card's total power draw reaches this limit the power management algorithm kicks in. + * + * This reading is only available if power management mode is supported. + * See \ref nvmlDeviceGetPowerManagementMode. + * + * @param device The identifier of the target device + * @param limit Reference in which to return the power management limit in milliwatts + * + * @return + * - \ref NVML_SUCCESS if \a limit has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a limit is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementLimit(nvmlDevice_t device, unsigned int *limit); + +/** + * Retrieves information about possible values of power management limits on this device. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param minLimit Reference in which to return the minimum power management limit in milliwatts + * @param maxLimit Reference in which to return the maximum power management limit in milliwatts + * + * @return + * - \ref NVML_SUCCESS if \a minLimit and \a maxLimit have been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minLimit or \a maxLimit is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetPowerManagementLimit + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementLimitConstraints(nvmlDevice_t device, unsigned int *minLimit, unsigned int *maxLimit); + +/** + * Retrieves default power management limit on this device, in milliwatts. + * Default power management limit is a power management limit that the device boots with. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param defaultLimit Reference in which to return the default power management limit in milliwatts + * + * @return + * - \ref NVML_SUCCESS if \a defaultLimit has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a defaultLimit is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementDefaultLimit(nvmlDevice_t device, unsigned int *defaultLimit); + +/** + * Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory) + * + * For Fermi &tm; or newer fully supported devices. + * + * On Fermi and Kepler GPUs the reading is accurate to within +/- 5% of current power draw. + * + * It is only available if power management mode is supported. See \ref nvmlDeviceGetPowerManagementMode. + * + * @param device The identifier of the target device + * @param power Reference in which to return the power usage information + * + * @return + * - \ref NVML_SUCCESS if \a power has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a power is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support power readings + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerUsage(nvmlDevice_t device, unsigned int *power); + +/** + * Retrieves total energy consumption for this GPU in millijoules (mJ) since the driver was last reloaded + * + * For newer than Pascal &tm; fully supported devices. + * + * @param device The identifier of the target device + * @param energy Reference in which to return the energy consumption information + * + * @return + * - \ref NVML_SUCCESS if \a energy has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a energy is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support energy readings + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTotalEnergyConsumption(nvmlDevice_t device, unsigned long long *energy); + +/** + * Get the effective power limit that the driver enforces after taking into account all limiters + * + * Note: This can be different from the \ref nvmlDeviceGetPowerManagementLimit if other limits are set elsewhere + * This includes the out of band power limit interface + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The device to communicate with + * @param limit Reference in which to return the power management limit in milliwatts + * + * @return + * - \ref NVML_SUCCESS if \a limit has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a limit is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetEnforcedPowerLimit(nvmlDevice_t device, unsigned int *limit); + +/** + * Retrieves the current GOM and pending GOM (the one that GPU will switch to after reboot). + * + * For GK110 M-class and X-class Tesla &tm; products from the Kepler family. + * Modes \ref NVML_GOM_LOW_DP and \ref NVML_GOM_ALL_ON are supported on fully supported GeForce products. + * Not supported on Quadro ® and Tesla &tm; C-class products. + * + * @param device The identifier of the target device + * @param current Reference in which to return the current GOM + * @param pending Reference in which to return the pending GOM + * + * @return + * - \ref NVML_SUCCESS if \a mode has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a current or \a pending is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlGpuOperationMode_t + * @see nvmlDeviceSetGpuOperationMode + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t *current, nvmlGpuOperationMode_t *pending); + +/** + * Retrieves the amount of used, free and total memory available on the device, in bytes. + * + * For all products. + * + * Enabling ECC reduces the amount of total available memory, due to the extra required parity bits. + * Under WDDM most device memory is allocated and managed on startup by Windows. + * + * Under Linux and Windows TCC, the reported amount of used memory is equal to the sum of memory allocated + * by all active channels on the device. + * + * See \ref nvmlMemory_t for details on available memory info. + * + * @param device The identifier of the target device + * @param memory Reference in which to return the memory information + * + * @return + * - \ref NVML_SUCCESS if \a memory has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMemoryInfo(nvmlDevice_t device, nvmlMemory_t *memory); + +/** + * Retrieves the current compute mode for the device. + * + * For all products. + * + * See \ref nvmlComputeMode_t for details on allowed compute modes. + * + * @param device The identifier of the target device + * @param mode Reference in which to return the current compute mode + * + * @return + * - \ref NVML_SUCCESS if \a mode has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetComputeMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetComputeMode(nvmlDevice_t device, nvmlComputeMode_t *mode); + +/** + * Retrieves the CUDA compute capability of the device. + * + * For all products. + * + * Returns the major and minor compute capability version numbers of the + * device. The major and minor versions are equivalent to the + * CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR and + * CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR attributes that would be + * returned by CUDA's cuDeviceGetAttribute(). + * + * @param device The identifier of the target device + * @param major Reference in which to return the major CUDA compute capability + * @param minor Reference in which to return the minor CUDA compute capability + * + * @return + * - \ref NVML_SUCCESS if \a major and \a minor have been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a major or \a minor are NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCudaComputeCapability(nvmlDevice_t device, int *major, int *minor); + +/** + * Retrieves the current and pending ECC modes for the device. + * + * For Fermi &tm; or newer fully supported devices. + * Only applicable to devices with ECC. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher. + * + * Changing ECC modes requires a reboot. The "pending" ECC mode refers to the target mode following + * the next reboot. + * + * See \ref nvmlEnableState_t for details on allowed modes. + * + * @param device The identifier of the target device + * @param current Reference in which to return the current ECC mode + * @param pending Reference in which to return the pending ECC mode + * + * @return + * - \ref NVML_SUCCESS if \a current and \a pending have been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or either \a current or \a pending is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetEccMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetEccMode(nvmlDevice_t device, nvmlEnableState_t *current, nvmlEnableState_t *pending); + +/** + * Retrieves the device boardId from 0-N. + * Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with + * \ref nvmlDeviceGetMultiGpuBoard() to decide if they are on the same board as well. + * The boardId returned is a unique ID for the current configuration. Uniqueness and ordering across + * reboots and system configurations is not guaranteed (i.e. if a Tesla K40c returns 0x100 and + * the two GPUs on a Tesla K10 in the same system returns 0x200 it is not guaranteed they will + * always return those values but they will always be different from each other). + * + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param boardId Reference in which to return the device's board ID + * + * @return + * - \ref NVML_SUCCESS if \a boardId has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a boardId is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBoardId(nvmlDevice_t device, unsigned int *boardId); + +/** + * Retrieves whether the device is on a Multi-GPU Board + * Devices that are on multi-GPU boards will set \a multiGpuBool to a non-zero value. + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param multiGpuBool Reference in which to return a zero or non-zero value + * to indicate whether the device is on a multi GPU board + * + * @return + * - \ref NVML_SUCCESS if \a multiGpuBool has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a multiGpuBool is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMultiGpuBoard(nvmlDevice_t device, unsigned int *multiGpuBool); + +/** + * Retrieves the total ECC error counts for the device. + * + * For Fermi &tm; or newer fully supported devices. + * Only applicable to devices with ECC. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher. + * Requires ECC Mode to be enabled. + * + * The total error count is the sum of errors across each of the separate memory systems, i.e. the total set of + * errors across the entire device. + * + * See \ref nvmlMemoryErrorType_t for a description of available error types.\n + * See \ref nvmlEccCounterType_t for a description of available counter types. + * + * @param device The identifier of the target device + * @param errorType Flag that specifies the type of the errors. + * @param counterType Flag that specifies the counter-type of the errors. + * @param eccCounts Reference in which to return the specified ECC errors + * + * @return + * - \ref NVML_SUCCESS if \a eccCounts has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a errorType or \a counterType is invalid, or \a eccCounts is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceClearEccErrorCounts() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTotalEccErrors(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, nvmlEccCounterType_t counterType, unsigned long long *eccCounts); + +/** + * Retrieves the detailed ECC error counts for the device. + * + * @deprecated This API supports only a fixed set of ECC error locations + * On different GPU architectures different locations are supported + * See \ref nvmlDeviceGetMemoryErrorCounter + * + * For Fermi &tm; or newer fully supported devices. + * Only applicable to devices with ECC. + * Requires \a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based ECC counts. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher to report all other ECC counts. + * Requires ECC Mode to be enabled. + * + * Detailed errors provide separate ECC counts for specific parts of the memory system. + * + * Reports zero for unsupported ECC error counters when a subset of ECC error counters are supported. + * + * See \ref nvmlMemoryErrorType_t for a description of available bit types.\n + * See \ref nvmlEccCounterType_t for a description of available counter types.\n + * See \ref nvmlEccErrorCounts_t for a description of provided detailed ECC counts. + * + * @param device The identifier of the target device + * @param errorType Flag that specifies the type of the errors. + * @param counterType Flag that specifies the counter-type of the errors. + * @param eccCounts Reference in which to return the specified ECC errors + * + * @return + * - \ref NVML_SUCCESS if \a eccCounts has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a errorType or \a counterType is invalid, or \a eccCounts is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceClearEccErrorCounts() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDetailedEccErrors(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, nvmlEccCounterType_t counterType, nvmlEccErrorCounts_t *eccCounts); + +/** + * Retrieves the requested memory error counter for the device. + * + * For Fermi &tm; or newer fully supported devices. + * Requires \a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based memory error counts. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher to report all other memory error counts. + * + * Only applicable to devices with ECC. + * + * Requires ECC Mode to be enabled. + * + * See \ref nvmlMemoryErrorType_t for a description of available memory error types.\n + * See \ref nvmlEccCounterType_t for a description of available counter types.\n + * See \ref nvmlMemoryLocation_t for a description of available counter locations.\n + * + * @param device The identifier of the target device + * @param errorType Flag that specifies the type of error. + * @param counterType Flag that specifies the counter-type of the errors. + * @param locationType Specifies the location of the counter. + * @param count Reference in which to return the ECC counter + * + * @return + * - \ref NVML_SUCCESS if \a count has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a bitTyp,e \a counterType or \a locationType is + * invalid, or \a count is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support ECC error reporting in the specified memory + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMemoryErrorCounter(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, + nvmlEccCounterType_t counterType, + nvmlMemoryLocation_t locationType, unsigned long long *count); + +/** + * Retrieves the current utilization rates for the device's major subsystems. + * + * For Fermi &tm; or newer fully supported devices. + * + * See \ref nvmlUtilization_t for details on available utilization rates. + * + * \note During driver initialization when ECC is enabled one can see high GPU and Memory Utilization readings. + * This is caused by ECC Memory Scrubbing mechanism that is performed during driver initialization. + * + * @param device The identifier of the target device + * @param utilization Reference in which to return the utilization information + * + * @return + * - \ref NVML_SUCCESS if \a utilization has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a utilization is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetUtilizationRates(nvmlDevice_t device, nvmlUtilization_t *utilization); + +/** + * Retrieves the current utilization and sampling size in microseconds for the Encoder + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param utilization Reference to an unsigned int for encoder utilization info + * @param samplingPeriodUs Reference to an unsigned int for the sampling period in US + * + * @return + * - \ref NVML_SUCCESS if \a utilization has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetEncoderUtilization(nvmlDevice_t device, unsigned int *utilization, unsigned int *samplingPeriodUs); + +/** + * Retrieves the current capacity of the device's encoder, in macroblocks per second. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param encoderQueryType Type of encoder to query + * @param encoderCapacity Reference to an unsigned int for the encoder capacity + * + * @return + * - \ref NVML_SUCCESS if \a encoderCapacity is fetched + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a encoderCapacity is NULL, or \a device or \a encoderQueryType + * are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if device does not support the encoder specified in \a encodeQueryType + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetEncoderCapacity (nvmlDevice_t device, nvmlEncoderType_t encoderQueryType, unsigned int *encoderCapacity); + +/** + * Retrieves the current encoder statistics for a given device. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param sessionCount Reference to an unsigned int for count of active encoder sessions + * @param averageFps Reference to an unsigned int for trailing average FPS of all active sessions + * @param averageLatency Reference to an unsigned int for encode latency in microseconds + * + * @return + * - \ref NVML_SUCCESS if \a sessionCount, \a averageFps and \a averageLatency is fetched + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount, or \a device or \a averageFps, + * or \a averageLatency is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetEncoderStats (nvmlDevice_t device, unsigned int *sessionCount, + unsigned int *averageFps, unsigned int *averageLatency); + +/** + * Retrieves information about active encoder sessions on a target device. + * + * An array of active encoder sessions is returned in the caller-supplied buffer pointed at by \a sessionInfos. The + * array elememt count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions + * written to the buffer. + * + * If the supplied buffer is not large enough to accomodate the active session array, the function returns + * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlEncoderSessionInfo_t array required in \a sessionCount. + * To query the number of active encoder sessions, call this function with *sessionCount = 0. The code will return + * NVML_SUCCESS with number of active encoder sessions updated in *sessionCount. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param sessionCount Reference to caller supplied array size, and returns the number of sessions. + * @param sessionInfos Reference in which to return the session information + * + * @return + * - \ref NVML_SUCCESS if \a sessionInfos is fetched + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is returned in \a sessionCount + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL. + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetEncoderSessions(nvmlDevice_t device, unsigned int *sessionCount, nvmlEncoderSessionInfo_t *sessionInfos); + +/** + * Retrieves the current utilization and sampling size in microseconds for the Decoder + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param utilization Reference to an unsigned int for decoder utilization info + * @param samplingPeriodUs Reference to an unsigned int for the sampling period in US + * + * @return + * - \ref NVML_SUCCESS if \a utilization has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDecoderUtilization(nvmlDevice_t device, unsigned int *utilization, unsigned int *samplingPeriodUs); + +/** + * Retrieves the current and pending driver model for the device. + * + * For Fermi &tm; or newer fully supported devices. + * For windows only. + * + * On Windows platforms the device driver can run in either WDDM or WDM (TCC) mode. If a display is attached + * to the device it must run in WDDM mode. TCC mode is preferred if a display is not attached. + * + * See \ref nvmlDriverModel_t for details on available driver models. + * + * @param device The identifier of the target device + * @param current Reference in which to return the current driver model + * @param pending Reference in which to return the pending driver model + * + * @return + * - \ref NVML_SUCCESS if either \a current and/or \a pending have been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or both \a current and \a pending are NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the platform is not windows + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetDriverModel() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDriverModel(nvmlDevice_t device, nvmlDriverModel_t *current, nvmlDriverModel_t *pending); + +/** + * Get VBIOS version of the device. + * + * For all products. + * + * The VBIOS version may change from time to time. It will not exceed 32 characters in length + * (including the NULL terminator). See \ref nvmlConstants::NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE. + * + * @param device The identifier of the target device + * @param version Reference to which to return the VBIOS version + * @param length The maximum allowed length of the string returned in \a version + * + * @return + * - \ref NVML_SUCCESS if \a version has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a version is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVbiosVersion(nvmlDevice_t device, char *version, unsigned int length); + +/** + * Get Bridge Chip Information for all the bridge chips on the board. + * + * For all fully supported products. + * Only applicable to multi-GPU products. + * + * @param device The identifier of the target device + * @param bridgeHierarchy Reference to the returned bridge chip Hierarchy + * + * @return + * - \ref NVML_SUCCESS if bridge chip exists + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a bridgeInfo is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if bridge chip not supported on the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBridgeChipInfo(nvmlDevice_t device, nvmlBridgeChipHierarchy_t *bridgeHierarchy); + +/** + * Get information about processes with a compute context on a device + * + * For Fermi &tm; or newer fully supported devices. + * + * This function returns information only about compute running processes (e.g. CUDA application which have + * active context). Any graphics applications (e.g. using OpenGL, DirectX) won't be listed by this function. + * + * To query the current number of running compute processes, call this function with *infoCount = 0. The + * return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call + * \a infos is allowed to be NULL. + * + * The usedGpuMemory field returned is all of the memory used by the application. + * + * Keep in mind that information returned by this call is dynamic and the number of elements might change in + * time. Allocate more space for \a infos table in case new compute processes are spawned. + * + * @param device The identifier of the target device + * @param infoCount Reference in which to provide the \a infos array size, and + * to return the number of returned elements + * @param infos Reference in which to return the process information + * + * @return + * - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small + * \a infoCount will contain minimal amount of space necessary for + * the call to complete + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see \ref nvmlSystemGetProcessName + */ +nvmlReturn_t DECLDIR nvmlDeviceGetComputeRunningProcesses(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); + +/** + * Get information about processes with a graphics context on a device + * + * For Kepler &tm; or newer fully supported devices. + * + * This function returns information only about graphics based processes + * (eg. applications using OpenGL, DirectX) + * + * To query the current number of running graphics processes, call this function with *infoCount = 0. The + * return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call + * \a infos is allowed to be NULL. + * + * The usedGpuMemory field returned is all of the memory used by the application. + * + * Keep in mind that information returned by this call is dynamic and the number of elements might change in + * time. Allocate more space for \a infos table in case new graphics processes are spawned. + * + * @param device The identifier of the target device + * @param infoCount Reference in which to provide the \a infos array size, and + * to return the number of returned elements + * @param infos Reference in which to return the process information + * + * @return + * - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small + * \a infoCount will contain minimal amount of space necessary for + * the call to complete + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see \ref nvmlSystemGetProcessName + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGraphicsRunningProcesses(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); + +/** + * Check if the GPU devices are on the same physical board. + * + * For all fully supported products. + * + * @param device1 The first GPU device + * @param device2 The second GPU device + * @param onSameBoard Reference in which to return the status. + * Non-zero indicates that the GPUs are on the same board. + * + * @return + * - \ref NVML_SUCCESS if \a onSameBoard has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a dev1 or \a dev2 are invalid or \a onSameBoard is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this check is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the either GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceOnSameBoard(nvmlDevice_t device1, nvmlDevice_t device2, int *onSameBoard); + +/** + * Retrieves the root/admin permissions on the target API. See \a nvmlRestrictedAPI_t for the list of supported APIs. + * If an API is restricted only root users can call that API. See \a nvmlDeviceSetAPIRestriction to change current permissions. + * + * For all fully supported products. + * + * @param device The identifier of the target device + * @param apiType Target API type for this operation + * @param isRestricted Reference in which to return the current restriction + * NVML_FEATURE_ENABLED indicates that the API is root-only + * NVML_FEATURE_DISABLED indicates that the API is accessible to all users + * + * @return + * - \ref NVML_SUCCESS if \a isRestricted has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a apiType incorrect or \a isRestricted is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device or the device does not support + * the feature that is being queried (E.G. Enabling/disabling Auto Boosted clocks is + * not supported by the device) + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlRestrictedAPI_t + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t *isRestricted); + +/** + * Gets recent samples for the GPU. + * + * For Kepler &tm; or newer fully supported devices. + * + * Based on type, this method can be used to fetch the power, utilization or clock samples maintained in the buffer by + * the driver. + * + * Power, Utilization and Clock samples are returned as type "unsigned int" for the union nvmlValue_t. + * + * To get the size of samples that user needs to allocate, the method is invoked with samples set to NULL. + * The returned samplesCount will provide the number of samples that can be queried. The user needs to + * allocate the buffer with size as samplesCount * sizeof(nvmlSample_t). + * + * lastSeenTimeStamp represents CPU timestamp in microseconds. Set it to 0 to fetch all the samples maintained by the + * underlying buffer. Set lastSeenTimeStamp to one of the timeStamps retrieved from the date of the previous query + * to get more recent samples. + * + * This method fetches the number of entries which can be accommodated in the provided samples array, and the + * reference samplesCount is updated to indicate how many samples were actually retrieved. The advantage of using this + * method for samples in contrast to polling via existing methods is to get get higher frequency data at lower polling cost. + * + * @param device The identifier for the target device + * @param type Type of sampling event + * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. + * @param sampleValType Output parameter to represent the type of sample value as described in nvmlSampleVal_t + * @param sampleCount Reference to provide the number of elements which can be queried in samples array + * @param samples Reference in which samples are returned + + * @return + * - \ref NVML_SUCCESS if samples are successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a samplesCount is NULL or + * reference to \a sampleCount is 0 for non null \a samples + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSamples(nvmlDevice_t device, nvmlSamplingType_t type, unsigned long long lastSeenTimeStamp, + nvmlValueType_t *sampleValType, unsigned int *sampleCount, nvmlSample_t *samples); + +/** + * Gets Total, Available and Used size of BAR1 memory. + * + * BAR1 is used to map the FB (device memory) so that it can be directly accessed by the CPU or by 3rd party + * devices (peer-to-peer on the PCIE bus). + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param bar1Memory Reference in which BAR1 memory + * information is returned. + * + * @return + * - \ref NVML_SUCCESS if BAR1 memory is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a bar1Memory is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBAR1MemoryInfo(nvmlDevice_t device, nvmlBAR1Memory_t *bar1Memory); + + +/** + * Gets the duration of time during which the device was throttled (lower than requested clocks) due to power + * or thermal constraints. + * + * The method is important to users who are tying to understand if their GPUs throttle at any point during their applications. The + * difference in violation times at two different reference times gives the indication of GPU throttling event. + * + * Violation for thermal capping is not supported at this time. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param perfPolicyType Represents Performance policy which can trigger GPU throttling + * @param violTime Reference to which violation time related information is returned + * + * + * @return + * - \ref NVML_SUCCESS if violation time is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a perfPolicyType is invalid, or \a violTime is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetViolationStatus(nvmlDevice_t device, nvmlPerfPolicyType_t perfPolicyType, nvmlViolationTime_t *violTime); + +/** + * @} + */ + +/** @addtogroup nvmlAccountingStats + * @{ + */ + +/** + * Queries the state of per process accounting mode. + * + * For Kepler &tm; or newer fully supported devices. + * + * See \ref nvmlDeviceGetAccountingStats for more details. + * See \ref nvmlDeviceSetAccountingMode + * + * @param device The identifier of the target device + * @param mode Reference in which to return the current accounting mode + * + * @return + * - \ref NVML_SUCCESS if the mode has been successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode are NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAccountingMode(nvmlDevice_t device, nvmlEnableState_t *mode); + +/** + * Queries process's accounting stats. + * + * For Kepler &tm; or newer fully supported devices. + * + * Accounting stats capture GPU utilization and other statistics across the lifetime of a process. + * Accounting stats can be queried during life time of the process and after its termination. + * The time field in \ref nvmlAccountingStats_t is reported as 0 during the lifetime of the process and + * updated to actual running time after its termination. + * Accounting stats are kept in a circular buffer, newly created processes overwrite information about old + * processes. + * + * See \ref nvmlAccountingStats_t for description of each returned metric. + * List of processes that can be queried can be retrieved from \ref nvmlDeviceGetAccountingPids. + * + * @note Accounting Mode needs to be on. See \ref nvmlDeviceGetAccountingMode. + * @note Only compute and graphics applications stats can be queried. Monitoring applications stats can't be + * queried since they don't contribute to GPU utilization. + * @note In case of pid collision stats of only the latest process (that terminated last) will be reported + * + * @warning On Kepler devices per process statistics are accurate only if there's one process running on a GPU. + * + * @param device The identifier of the target device + * @param pid Process Id of the target process to query stats for + * @param stats Reference in which to return the process's accounting stats + * + * @return + * - \ref NVML_SUCCESS if stats have been successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a stats are NULL + * - \ref NVML_ERROR_NOT_FOUND if process stats were not found + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature or accounting mode is disabled + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetAccountingBufferSize + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAccountingStats(nvmlDevice_t device, unsigned int pid, nvmlAccountingStats_t *stats); + +/** + * Queries list of processes that can be queried for accounting stats. The list of processes returned + * can be in running or terminated state. + * + * For Kepler &tm; or newer fully supported devices. + * + * To just query the number of processes ready to be queried, call this function with *count = 0 and + * pids=NULL. The return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if list is empty. + * + * For more details see \ref nvmlDeviceGetAccountingStats. + * + * @note In case of PID collision some processes might not be accessible before the circular buffer is full. + * + * @param device The identifier of the target device + * @param count Reference in which to provide the \a pids array size, and + * to return the number of elements ready to be queried + * @param pids Reference in which to return list of process ids + * + * @return + * - \ref NVML_SUCCESS if pids were successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature or accounting mode is disabled + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to + * expected value) + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetAccountingBufferSize + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAccountingPids(nvmlDevice_t device, unsigned int *count, unsigned int *pids); + +/** + * Returns the number of processes that the circular buffer with accounting pids can hold. + * + * For Kepler &tm; or newer fully supported devices. + * + * This is the maximum number of processes that accounting information will be stored for before information + * about oldest processes will get overwritten by information about new processes. + * + * @param device The identifier of the target device + * @param bufferSize Reference in which to provide the size (in number of elements) + * of the circular buffer for accounting stats. + * + * @return + * - \ref NVML_SUCCESS if buffer size was successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a bufferSize is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature or accounting mode is disabled + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetAccountingStats + * @see nvmlDeviceGetAccountingPids + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAccountingBufferSize(nvmlDevice_t device, unsigned int *bufferSize); + +/** @} */ + +/** @addtogroup nvmlDeviceQueries + * @{ + */ + +/** + * Returns the list of retired pages by source, including pages that are pending retirement + * The address information provided from this API is the hardware address of the page that was retired. Note + * that this does not match the virtual address used in CUDA, but will match the address information in XID 63 + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param cause Filter page addresses by cause of retirement + * @param pageCount Reference in which to provide the \a addresses buffer size, and + * to return the number of retired pages that match \a cause + * Set to 0 to query the size without allocating an \a addresses buffer + * @param addresses Buffer to write the page addresses into + * + * @return + * - \ref NVML_SUCCESS if \a pageCount was populated and \a addresses was filled + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a pageCount indicates the buffer is not large enough to store all the + * matching page addresses. \a pageCount is set to the needed size. + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a pageCount is NULL, \a cause is invalid, or + * \a addresses is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPages(nvmlDevice_t device, nvmlPageRetirementCause_t cause, + unsigned int *pageCount, unsigned long long *addresses); + +/** + * Check if any pages are pending retirement and need a reboot to fully retire. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param isPending Reference in which to return the pending status + * + * @return + * - \ref NVML_SUCCESS if \a isPending was populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isPending is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPagesPendingStatus(nvmlDevice_t device, nvmlEnableState_t *isPending); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlUnitCommands Unit Commands + * This chapter describes NVML operations that change the state of the unit. For S-class products. + * Each of these requires root/admin access. Non-admin users will see an NVML_ERROR_NO_PERMISSION + * error code when invoking any of these methods. + * @{ + */ +/***************************************************************************************************/ + +/** + * Set the LED state for the unit. The LED can be either green (0) or amber (1). + * + * For S-class products. + * Requires root/admin permissions. + * + * This operation takes effect immediately. + * + * + * Current S-Class products don't provide unique LEDs for each unit. As such, both front + * and back LEDs will be toggled in unison regardless of which unit is specified with this command. + * + * See \ref nvmlLedColor_t for available colors. + * + * @param unit The identifier of the target unit + * @param color The target LED color + * + * @return + * - \ref NVML_SUCCESS if the LED color has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit or \a color is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlUnitGetLedState() + */ +nvmlReturn_t DECLDIR nvmlUnitSetLedState(nvmlUnit_t unit, nvmlLedColor_t color); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlDeviceCommands Device Commands + * This chapter describes NVML operations that change the state of the device. + * Each of these requires root/admin access. Non-admin users will see an NVML_ERROR_NO_PERMISSION + * error code when invoking any of these methods. + * @{ + */ +/***************************************************************************************************/ + +/** + * Set the persistence mode for the device. + * + * For all products. + * For Linux only. + * Requires root/admin permissions. + * + * The persistence mode determines whether the GPU driver software is torn down after the last client + * exits. + * + * This operation takes effect immediately. It is not persistent across reboots. After each reboot the + * persistence mode is reset to "Disabled". + * + * See \ref nvmlEnableState_t for available modes. + * + * @param device The identifier of the target device + * @param mode The target persistence mode + * + * @return + * - \ref NVML_SUCCESS if the persistence mode was set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetPersistenceMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceSetPersistenceMode(nvmlDevice_t device, nvmlEnableState_t mode); + +/** + * Set the compute mode for the device. + * + * For all products. + * Requires root/admin permissions. + * + * The compute mode determines whether a GPU can be used for compute operations and whether it can + * be shared across contexts. + * + * This operation takes effect immediately. Under Linux it is not persistent across reboots and + * always resets to "Default". Under windows it is persistent. + * + * Under windows compute mode may only be set to DEFAULT when running in WDDM + * + * See \ref nvmlComputeMode_t for details on available compute modes. + * + * @param device The identifier of the target device + * @param mode The target compute mode + * + * @return + * - \ref NVML_SUCCESS if the compute mode was set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetComputeMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceSetComputeMode(nvmlDevice_t device, nvmlComputeMode_t mode); + +/** + * Set the ECC mode for the device. + * + * For Kepler &tm; or newer fully supported devices. + * Only applicable to devices with ECC. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher. + * Requires root/admin permissions. + * + * The ECC mode determines whether the GPU enables its ECC support. + * + * This operation takes effect after the next reboot. + * + * See \ref nvmlEnableState_t for details on available modes. + * + * @param device The identifier of the target device + * @param ecc The target ECC mode + * + * @return + * - \ref NVML_SUCCESS if the ECC mode was set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a ecc is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetEccMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceSetEccMode(nvmlDevice_t device, nvmlEnableState_t ecc); + +/** + * Clear the ECC error and other memory error counts for the device. + * + * For Kepler &tm; or newer fully supported devices. + * Only applicable to devices with ECC. + * Requires \a NVML_INFOROM_ECC version 2.0 or higher to clear aggregate location-based ECC counts. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher to clear all other ECC counts. + * Requires root/admin permissions. + * Requires ECC Mode to be enabled. + * + * Sets all of the specified ECC counters to 0, including both detailed and total counts. + * + * This operation takes effect immediately. + * + * See \ref nvmlMemoryErrorType_t for details on available counter types. + * + * @param device The identifier of the target device + * @param counterType Flag that indicates which type of errors should be cleared. + * + * @return + * - \ref NVML_SUCCESS if the error counts were cleared + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a counterType is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see + * - nvmlDeviceGetDetailedEccErrors() + * - nvmlDeviceGetTotalEccErrors() + */ +nvmlReturn_t DECLDIR nvmlDeviceClearEccErrorCounts(nvmlDevice_t device, nvmlEccCounterType_t counterType); + +/** + * Set the driver model for the device. + * + * For Fermi &tm; or newer fully supported devices. + * For windows only. + * Requires root/admin permissions. + * + * On Windows platforms the device driver can run in either WDDM or WDM (TCC) mode. If a display is attached + * to the device it must run in WDDM mode. + * + * It is possible to force the change to WDM (TCC) while the display is still attached with a force flag (nvmlFlagForce). + * This should only be done if the host is subsequently powered down and the display is detached from the device + * before the next reboot. + * + * This operation takes effect after the next reboot. + * + * Windows driver model may only be set to WDDM when running in DEFAULT compute mode. + * + * Change driver model to WDDM is not supported when GPU doesn't support graphics acceleration or + * will not support it after reboot. See \ref nvmlDeviceSetGpuOperationMode. + * + * See \ref nvmlDriverModel_t for details on available driver models. + * See \ref nvmlFlagDefault and \ref nvmlFlagForce + * + * @param device The identifier of the target device + * @param driverModel The target driver model + * @param flags Flags that change the default behavior + * + * @return + * - \ref NVML_SUCCESS if the driver model has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a driverModel is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the platform is not windows or the device does not support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetDriverModel() + */ +nvmlReturn_t DECLDIR nvmlDeviceSetDriverModel(nvmlDevice_t device, nvmlDriverModel_t driverModel, unsigned int flags); + +/** + * Set clocks that applications will lock to. + * + * Sets the clocks that compute and graphics applications will be running at. + * e.g. CUDA driver requests these clocks during context creation which means this property + * defines clocks at which CUDA applications will be running unless some overspec event + * occurs (e.g. over power, over thermal or external HW brake). + * + * Can be used as a setting to request constant performance. + * + * On Pascal and newer hardware, this will automatically disable automatic boosting of clocks. + * + * On K80 and newer Kepler and Maxwell GPUs, users desiring fixed performance should also call + * \ref nvmlDeviceSetAutoBoostedClocksEnabled to prevent clocks from automatically boosting + * above the clock value being set. + * + * For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. + * Requires root/admin permissions. + * + * See \ref nvmlDeviceGetSupportedMemoryClocks and \ref nvmlDeviceGetSupportedGraphicsClocks + * for details on how to list available clocks combinations. + * + * After system reboot or driver reload applications clocks go back to their default value. + * See \ref nvmlDeviceResetApplicationsClocks. + * + * @param device The identifier of the target device + * @param memClockMHz Requested memory clock in MHz + * @param graphicsClockMHz Requested graphics clock in MHz + * + * @return + * - \ref NVML_SUCCESS if new settings were successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memClockMHz and \a graphicsClockMHz + * is not a valid clock combination + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetApplicationsClocks(nvmlDevice_t device, unsigned int memClockMHz, unsigned int graphicsClockMHz); + +/** + * Set new power limit of this device. + * + * For Kepler &tm; or newer fully supported devices. + * Requires root/admin permissions. + * + * See \ref nvmlDeviceGetPowerManagementLimitConstraints to check the allowed ranges of values. + * + * \note Limit is not persistent across reboots or driver unloads. + * Enable persistent mode to prevent driver from unloading when no application is using the device. + * + * @param device The identifier of the target device + * @param limit Power management limit in milliwatts to set + * + * @return + * - \ref NVML_SUCCESS if \a limit has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a defaultLimit is out of range + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetPowerManagementLimitConstraints + * @see nvmlDeviceGetPowerManagementDefaultLimit + */ +nvmlReturn_t DECLDIR nvmlDeviceSetPowerManagementLimit(nvmlDevice_t device, unsigned int limit); + +/** + * Sets new GOM. See \a nvmlGpuOperationMode_t for details. + * + * For GK110 M-class and X-class Tesla &tm; products from the Kepler family. + * Modes \ref NVML_GOM_LOW_DP and \ref NVML_GOM_ALL_ON are supported on fully supported GeForce products. + * Not supported on Quadro ® and Tesla &tm; C-class products. + * Requires root/admin permissions. + * + * Changing GOMs requires a reboot. + * The reboot requirement might be removed in the future. + * + * Compute only GOMs don't support graphics acceleration. Under windows switching to these GOMs when + * pending driver model is WDDM is not supported. See \ref nvmlDeviceSetDriverModel. + * + * @param device The identifier of the target device + * @param mode Target GOM + * + * @return + * - \ref NVML_SUCCESS if \a mode has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode incorrect + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support GOM or specific mode + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlGpuOperationMode_t + * @see nvmlDeviceGetGpuOperationMode + */ +nvmlReturn_t DECLDIR nvmlDeviceSetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t mode); + +/** + * Changes the root/admin restructions on certain APIs. See \a nvmlRestrictedAPI_t for the list of supported APIs. + * This method can be used by a root/admin user to give non-root/admin access to certain otherwise-restricted APIs. + * The new setting lasts for the lifetime of the NVIDIA driver; it is not persistent. See \a nvmlDeviceGetAPIRestriction + * to query the current restriction settings. + * + * For Kepler &tm; or newer fully supported devices. + * Requires root/admin permissions. + * + * @param device The identifier of the target device + * @param apiType Target API type for this operation + * @param isRestricted The target restriction + * + * @return + * - \ref NVML_SUCCESS if \a isRestricted has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a apiType incorrect + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support changing API restrictions or the device does not support + * the feature that api restrictions are being set for (E.G. Enabling/disabling auto + * boosted clocks is not supported by the device) + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlRestrictedAPI_t + */ +nvmlReturn_t DECLDIR nvmlDeviceSetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t isRestricted); + +/** + * @} + */ + +/** @addtogroup nvmlAccountingStats + * @{ + */ + +/** + * Enables or disables per process accounting. + * + * For Kepler &tm; or newer fully supported devices. + * Requires root/admin permissions. + * + * @note This setting is not persistent and will default to disabled after driver unloads. + * Enable persistence mode to be sure the setting doesn't switch off to disabled. + * + * @note Enabling accounting mode has no negative impact on the GPU performance. + * + * @note Disabling accounting clears all accounting pids information. + * + * See \ref nvmlDeviceGetAccountingMode + * See \ref nvmlDeviceGetAccountingStats + * See \ref nvmlDeviceClearAccountingPids + * + * @param device The identifier of the target device + * @param mode The target accounting mode + * + * @return + * - \ref NVML_SUCCESS if the new mode has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a mode are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetAccountingMode(nvmlDevice_t device, nvmlEnableState_t mode); + +/** + * Clears accounting information about all processes that have already terminated. + * + * For Kepler &tm; or newer fully supported devices. + * Requires root/admin permissions. + * + * See \ref nvmlDeviceGetAccountingMode + * See \ref nvmlDeviceGetAccountingStats + * See \ref nvmlDeviceSetAccountingMode + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if accounting information has been cleared + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceClearAccountingPids(nvmlDevice_t device); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup NvLink NvLink Methods + * This chapter describes methods that NVML can perform on NVLINK enabled devices. + * @{ + */ +/***************************************************************************************************/ + +/** + * Retrieves the state of the device's NvLink for the link specified + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param isActive \a nvmlEnableState_t where NVML_FEATURE_ENABLED indicates that + * the link is active and NVML_FEATURE_DISABLED indicates it + * is inactive + * + * @return + * - \ref NVML_SUCCESS if \a isActive has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a isActive is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkState(nvmlDevice_t device, unsigned int link, nvmlEnableState_t *isActive); + +/** + * Retrieves the version of the device's NvLink for the link specified + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param version Requested NvLink version + * + * @return + * - \ref NVML_SUCCESS if \a version has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a version is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkVersion(nvmlDevice_t device, unsigned int link, unsigned int *version); + +/** + * Retrieves the requested capability from the device's NvLink for the link specified + * Please refer to the \a nvmlNvLinkCapability_t structure for the specific caps that can be queried + * The return value should be treated as a boolean. + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param capability Specifies the \a nvmlNvLinkCapability_t to be queried + * @param capResult A boolean for the queried capability indicating that feature is available + * + * @return + * - \ref NVML_SUCCESS if \a capResult has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a capability is invalid or \a capResult is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkCapability(nvmlDevice_t device, unsigned int link, + nvmlNvLinkCapability_t capability, unsigned int *capResult); + +/** + * Retrieves the PCI information for the remote node on a NvLink link + * Note: pciSubSystemId is not filled in this function and is indeterminate + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param pci \a nvmlPciInfo_t of the remote node for the specified link + * + * @return + * - \ref NVML_SUCCESS if \a pci has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a pci is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkRemotePciInfo(nvmlDevice_t device, unsigned int link, nvmlPciInfo_t *pci); + +/** + * Retrieves the specified error counter value + * Please refer to \a nvmlNvLinkErrorCounter_t for error counters that are available + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param counter Specifies the NvLink counter to be queried + * @param counterValue Returned counter value + * + * @return + * - \ref NVML_SUCCESS if \a counter has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a counter is invalid or \a counterValue is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkErrorCounter(nvmlDevice_t device, unsigned int link, + nvmlNvLinkErrorCounter_t counter, unsigned long long *counterValue); + +/** + * Resets all error counters to zero + * Please refer to \a nvmlNvLinkErrorCounter_t for the list of error counters that are reset + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * + * @return + * - \ref NVML_SUCCESS if the reset is successful + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceResetNvLinkErrorCounters(nvmlDevice_t device, unsigned int link); + +/** + * Set the NVLINK utilization counter control information for the specified counter, 0 or 1. + * Please refer to \a nvmlNvLinkUtilizationControl_t for the structure definition. Performs a reset + * of the counters if the reset parameter is non-zero. + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param counter Specifies the counter that should be set (0 or 1). + * @param link Specifies the NvLink link to be queried + * @param control A reference to the \a nvmlNvLinkUtilizationControl_t to set + * @param reset Resets the counters on set if non-zero + * + * @return + * - \ref NVML_SUCCESS if the control has been set successfully + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, \a link, or \a control is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetNvLinkUtilizationControl(nvmlDevice_t device, unsigned int link, unsigned int counter, + nvmlNvLinkUtilizationControl_t *control, unsigned int reset); + +/** + * Get the NVLINK utilization counter control information for the specified counter, 0 or 1. + * Please refer to \a nvmlNvLinkUtilizationControl_t for the structure definition + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param counter Specifies the counter that should be set (0 or 1). + * @param link Specifies the NvLink link to be queried + * @param control A reference to the \a nvmlNvLinkUtilizationControl_t to place information + * + * @return + * - \ref NVML_SUCCESS if the control has been set successfully + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, \a link, or \a control is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkUtilizationControl(nvmlDevice_t device, unsigned int link, unsigned int counter, + nvmlNvLinkUtilizationControl_t *control); + + +/** + * Retrieve the NVLINK utilization counter based on the current control for a specified counter. + * In general it is good practice to use \a nvmlDeviceSetNvLinkUtilizationControl + * before reading the utilization counters as they have no default state + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param counter Specifies the counter that should be read (0 or 1). + * @param rxcounter Receive counter return value + * @param txcounter Transmit counter return value + * + * @return + * - \ref NVML_SUCCESS if \a rxcounter and \a txcounter have been successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, or \a link is invalid or \a rxcounter or \a txcounter are NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkUtilizationCounter(nvmlDevice_t device, unsigned int link, unsigned int counter, + unsigned long long *rxcounter, unsigned long long *txcounter); + +/** + * Freeze the NVLINK utilization counters + * Both the receive and transmit counters are operated on by this function + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param counter Specifies the counter that should be frozen (0 or 1). + * @param freeze NVML_FEATURE_ENABLED = freeze the receive and transmit counters + * NVML_FEATURE_DISABLED = unfreeze the receive and transmit counters + * + * @return + * - \ref NVML_SUCCESS if counters were successfully frozen or unfrozen + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, \a counter, or \a freeze is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceFreezeNvLinkUtilizationCounter (nvmlDevice_t device, unsigned int link, + unsigned int counter, nvmlEnableState_t freeze); + +/** + * Reset the NVLINK utilization counters + * Both the receive and transmit counters are operated on by this function + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be reset + * @param counter Specifies the counter that should be reset (0 or 1) + * + * @return + * - \ref NVML_SUCCESS if counters were successfully reset + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a counter is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceResetNvLinkUtilizationCounter (nvmlDevice_t device, unsigned int link, unsigned int counter); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlEvents Event Handling Methods + * This chapter describes methods that NVML can perform against each device to register and wait for + * some event to occur. + * @{ + */ +/***************************************************************************************************/ + +/** + * Create an empty set of events. + * Event set should be freed by \ref nvmlEventSetFree + * + * For Fermi &tm; or newer fully supported devices. + * @param set Reference in which to return the event handle + * + * @return + * - \ref NVML_SUCCESS if the event has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a set is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlEventSetFree + */ +nvmlReturn_t DECLDIR nvmlEventSetCreate(nvmlEventSet_t *set); + +/** + * Starts recording of events on a specified devices and add the events to specified \ref nvmlEventSet_t + * + * For Fermi &tm; or newer fully supported devices. + * Ecc events are available only on ECC enabled devices (see \ref nvmlDeviceGetTotalEccErrors) + * Power capping events are available only on Power Management enabled devices (see \ref nvmlDeviceGetPowerManagementMode) + * + * For Linux only. + * + * \b IMPORTANT: Operations on \a set are not thread safe + * + * This call starts recording of events on specific device. + * All events that occurred before this call are not recorded. + * Checking if some event occurred can be done with \ref nvmlEventSetWait + * + * If function reports NVML_ERROR_UNKNOWN, event set is in undefined state and should be freed. + * If function reports NVML_ERROR_NOT_SUPPORTED, event set can still be used. None of the requested eventTypes + * are registered in that case. + * + * @param device The identifier of the target device + * @param eventTypes Bitmask of \ref nvmlEventType to record + * @param set Set to which add new event types + * + * @return + * - \ref NVML_SUCCESS if the event has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventTypes is invalid or \a set is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the platform does not support this feature or some of requested event types + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlEventType + * @see nvmlDeviceGetSupportedEventTypes + * @see nvmlEventSetWait + * @see nvmlEventSetFree + */ +nvmlReturn_t DECLDIR nvmlDeviceRegisterEvents(nvmlDevice_t device, unsigned long long eventTypes, nvmlEventSet_t set); + +/** + * Returns information about events supported on device + * + * For Fermi &tm; or newer fully supported devices. + * + * Events are not supported on Windows. So this function returns an empty mask in \a eventTypes on Windows. + * + * @param device The identifier of the target device + * @param eventTypes Reference in which to return bitmask of supported events + * + * @return + * - \ref NVML_SUCCESS if the eventTypes has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventType is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlEventType + * @see nvmlDeviceRegisterEvents + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedEventTypes(nvmlDevice_t device, unsigned long long *eventTypes); + +/** + * Waits on events and delivers events + * + * For Fermi &tm; or newer fully supported devices. + * + * If some events are ready to be delivered at the time of the call, function returns immediately. + * If there are no events ready to be delivered, function sleeps till event arrives + * but not longer than specified timeout. This function in certain conditions can return before + * specified timeout passes (e.g. when interrupt arrives) + * + * In case of xid error, the function returns the most recent xid error type seen by the system. If there are multiple + * xid errors generated before nvmlEventSetWait is invoked then the last seen xid error type is returned for all + * xid error events. + * + * @param set Reference to set of events to wait on + * @param data Reference in which to return event data + * @param timeoutms Maximum amount of wait time in milliseconds for registered event + * + * @return + * - \ref NVML_SUCCESS if the data has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a data is NULL + * - \ref NVML_ERROR_TIMEOUT if no event arrived in specified timeout or interrupt arrived + * - \ref NVML_ERROR_GPU_IS_LOST if a GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlEventType + * @see nvmlDeviceRegisterEvents + */ +nvmlReturn_t DECLDIR nvmlEventSetWait(nvmlEventSet_t set, nvmlEventData_t * data, unsigned int timeoutms); + +/** + * Releases events in the set + * + * For Fermi &tm; or newer fully supported devices. + * + * @param set Reference to events to be released + * + * @return + * - \ref NVML_SUCCESS if the event has been successfully released + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceRegisterEvents + */ +nvmlReturn_t DECLDIR nvmlEventSetFree(nvmlEventSet_t set); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlZPI Drain states + * This chapter describes methods that NVML can perform against each device to control their drain state + * and recognition by NVML and NVIDIA kernel driver. These methods can be used with out-of-band tools to + * power on/off GPUs, enable robust reset scenarios, etc. + * @{ + */ +/***************************************************************************************************/ + +/** + * Modify the drain state of a GPU. This method forces a GPU to no longer accept new incoming requests. + * Any new NVML process will no longer see this GPU. Persistence mode for this GPU must be turned off before + * this call is made. + * Must be called as administrator. + * For Linux only. + * + * For Pascal &tm; or newer fully supported devices. + * Some Kepler devices supported. + * + * @param pciInfo The PCI address of the GPU drain state to be modified + * @param newState The drain state that should be entered, see \ref nvmlEnableState_t + * + * @return + * - \ref NVML_SUCCESS if counters were successfully reset + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex or \a newState is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the calling process has insufficient permissions to perform operation + * - \ref NVML_ERROR_IN_USE if the device has persistence mode turned on + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceModifyDrainState (nvmlPciInfo_t *pciInfo, nvmlEnableState_t newState); + +/** + * Query the drain state of a GPU. This method is used to check if a GPU is in a currently draining + * state. + * For Linux only. + * + * For Pascal &tm; or newer fully supported devices. + * Some Kepler devices supported. + * + * @param pciInfo The PCI address of the GPU drain state to be queried + * @param currentState The current drain state for this GPU, see \ref nvmlEnableState_t + * + * @return + * - \ref NVML_SUCCESS if counters were successfully reset + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex or \a currentState is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceQueryDrainState (nvmlPciInfo_t *pciInfo, nvmlEnableState_t *currentState); + +/** + * This method will remove the specified GPU from the view of both NVML and the NVIDIA kernel driver + * as long as no other processes are attached. If other processes are attached, this call will return + * NVML_ERROR_IN_USE and the GPU will be returned to its original "draining" state. Note: the + * only situation where a process can still be attached after nvmlDeviceModifyDrainState() is called + * to initiate the draining state is if that process was using, and is still using, a GPU before the + * call was made. Also note, persistence mode counts as an attachment to the GPU thus it must be disabled + * prior to this call. + * + * For long-running NVML processes please note that this will change the enumeration of current GPUs. + * For example, if there are four GPUs present and GPU1 is removed, the new enumeration will be 0-2. + * Also, device handles after the removed GPU will not be valid and must be re-established. + * Must be run as administrator. + * For Linux only. + * + * For Pascal &tm; or newer fully supported devices. + * Some Kepler devices supported. + * + * @param pciInfo The PCI address of the GPU to be removed + * + * @return + * - \ref NVML_SUCCESS if counters were successfully reset + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_IN_USE if the device is still in use and cannot be removed + */ +nvmlReturn_t DECLDIR nvmlDeviceRemoveGpu (nvmlPciInfo_t *pciInfo); + +/** + * Request the OS and the NVIDIA kernel driver to rediscover a portion of the PCI subsystem looking for GPUs that + * were previously removed. The portion of the PCI tree can be narrowed by specifying a domain, bus, and device. + * If all are zeroes then the entire PCI tree will be searched. Please note that for long-running NVML processes + * the enumeration will change based on how many GPUs are discovered and where they are inserted in bus order. + * + * In addition, all newly discovered GPUs will be initialized and their ECC scrubbed which may take several seconds + * per GPU. Also, all device handles are no longer guaranteed to be valid post discovery. + * + * Must be run as administrator. + * For Linux only. + * + * For Pascal &tm; or newer fully supported devices. + * Some Kepler devices supported. + * + * @param pciInfo The PCI tree to be searched. Only the domain, bus, and device + * fields are used in this call. + * + * @return + * - \ref NVML_SUCCESS if counters were successfully reset + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pciInfo is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the operating system does not support this feature + * - \ref NVML_ERROR_OPERATING_SYSTEM if the operating system is denying this feature + * - \ref NVML_ERROR_NO_PERMISSION if the calling process has insufficient permissions to perform operation + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceDiscoverGpus (nvmlPciInfo_t *pciInfo); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlFieldValueQueries Field Value Queries + * This chapter describes NVML operations that are associated with retrieving Field Values from NVML + * @{ + */ +/***************************************************************************************************/ + +/** + * Request values for a list of fields for a device. This API allows multiple fields to be queried at once. + * If any of the underlying fieldIds are populated by the same driver call, the results for those field IDs + * will be populated from a single call rather than making a driver call for each fieldId. + * + * @param device The device handle of the GPU to request field values for + * @param valuesCount Number of entries in values that should be retrieved + * @param values Array of \a valuesCount structures to hold field values. + * Each value's fieldId must be populated prior to this call + * + * @return + * - \ref NVML_SUCCESS if any values in \a values were populated. Note that you must + * check the nvmlReturn field of each value for each individual + * status + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a values is NULL + */ +nvmlReturn_t DECLDIR nvmlDeviceGetFieldValues(nvmlDevice_t device, int valuesCount, nvmlFieldValue_t *values); + + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlGridQueries Grid Queries + * This chapter describes NVML operations that are associated with NVIDIA GRID products. + * @{ + */ +/***************************************************************************************************/ + +/** + * This method is used to get the virtualization mode corresponding to the GPU. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device Identifier of the target device + * @param pVirtualMode Reference to virtualization mode. One of NVML_GPU_VIRTUALIZATION_? + * + * @return + * - \ref NVML_SUCCESS if \a pVirtualMode is fetched + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pVirtualMode is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t *pVirtualMode); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlGridCommands Grid Commands + * This chapter describes NVML operations that are associated with NVIDIA GRID products. + * @{ + */ +/***************************************************************************************************/ + +/** + * This method is used to set the virtualization mode corresponding to the GPU. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device Identifier of the target device + * @param virtualMode virtualization mode. One of NVML_GPU_VIRTUALIZATION_? + * + * @return + * - \ref NVML_SUCCESS if \a pVirtualMode is set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pVirtualMode is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_SUPPORTED if setting of virtualization mode is not supported. + * - \ref NVML_ERROR_NO_PERMISSION if setting of virtualization mode is not allowed for this client. + */ +nvmlReturn_t DECLDIR nvmlDeviceSetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t virtualMode); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlVgpu vGPU Management + * @{ + * + * Set of APIs supporting GRID vGPU + */ +/***************************************************************************************************/ + +/** + * Retrieve the supported vGPU types on a physical GPU (device). + * + * An array of supported vGPU types for the physical GPU indicated by \a device is returned in the caller-supplied buffer + * pointed at by \a vgpuTypeIds. The element count of nvmlVgpuTypeId_t array is passed in \a vgpuCount, and \a vgpuCount + * is used to return the number of vGPU types written to the buffer. + * + * If the supplied buffer is not large enough to accomodate the vGPU type array, the function returns + * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuTypeId_t array required in \a vgpuCount. + * To query the number of vGPU types supported for the GPU, call this function with *vgpuCount = 0. + * The code will return NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU types are supported. + * + * @param device The identifier of the target device + * @param vgpuCount Pointer to caller-supplied array size, and returns number of vGPU types + * @param vgpuTypeIds Pointer to caller-supplied array in which to return list of vGPU types + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_INSUFFICIENT_SIZE \a vgpuTypeIds buffer is too small, array element count is returned in \a vgpuCount + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuCount is NULL or \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device + * - \ref NVML_ERROR_VGPU_ECC_NOT_SUPPORTED if ECC is enabled on the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuTypeId_t *vgpuTypeIds); + +/** + * Retrieve the currently creatable vGPU types on a physical GPU (device). + * + * An array of creatable vGPU types for the physical GPU indicated by \a device is returned in the caller-supplied buffer + * pointed at by \a vgpuTypeIds. The element count of nvmlVgpuTypeId_t array is passed in \a vgpuCount, and \a vgpuCount + * is used to return the number of vGPU types written to the buffer. + * + * The creatable vGPU types for a device may differ over time, as there may be restrictions on what type of vGPU types + * can concurrently run on a device. For example, if only one vGPU type is allowed at a time on a device, then the creatable + * list will be restricted to whatever vGPU type is already running on the device. + * + * If the supplied buffer is not large enough to accomodate the vGPU type array, the function returns + * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuTypeId_t array required in \a vgpuCount. + * To query the number of vGPU types createable for the GPU, call this function with *vgpuCount = 0. + * The code will return NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU types are creatable. + * + * @param device The identifier of the target device + * @param vgpuCount Pointer to caller-supplied array size, and returns number of vGPU types + * @param vgpuTypeIds Pointer to caller-supplied array in which to return list of vGPU types + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_INSUFFICIENT_SIZE \a vgpuTypeIds buffer is too small, array element count is returned in \a vgpuCount + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuCount is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device + * - \ref NVML_ERROR_VGPU_ECC_NOT_SUPPORTED if ECC is enabled on the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCreatableVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuTypeId_t *vgpuTypeIds); + +/** + * Retrieve the class of a vGPU type. It will not exceed 64 characters in length (including the NUL terminator). + * See \ref nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param vgpuTypeClass Pointer to string array to return class in + * @param size Size of string + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuTypeClass is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetClass(nvmlVgpuTypeId_t vgpuTypeId, char *vgpuTypeClass, unsigned int *size); + +/** + * Retrieve the vGPU type name. + * + * The name is an alphanumeric string that denotes a particular vGPU, e.g. GRID M60-2Q. It will not + * exceed 64 characters in length (including the NUL terminator). See \ref + * nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param vgpuTypeName Pointer to buffer to return name + * @param size Size of buffer + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a name is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetName(nvmlVgpuTypeId_t vgpuTypeId, char *vgpuTypeName, unsigned int *size); + +/** + * Retrieve the device ID of a vGPU type. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param deviceID Device ID and vendor ID of the device contained in single 32 bit value + * @param subsystemID Subsytem ID and subsytem vendor ID of the device contained in single 32 bit value + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a deviceId or \a subsystemID are NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetDeviceID(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long *deviceID, unsigned long long *subsystemID); + +/** + * Retrieve the vGPU framebuffer size in bytes. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param fbSize Pointer to framebuffer size in bytes + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a fbSize is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetFramebufferSize(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long *fbSize); + +/** + * Retrieve count of vGPU's supported display heads. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param numDisplayHeads Pointer to number of display heads + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a numDisplayHeads is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetNumDisplayHeads(nvmlVgpuTypeId_t vgpuTypeId, unsigned int *numDisplayHeads); + +/** + * Retrieve vGPU display head's maximum supported resolution. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param displayIndex Zero-based index of display head + * @param xdim Pointer to maximum number of pixels in X dimension + * @param ydim Pointer to maximum number of pixels in Y dimension + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a xdim or \a ydim are NULL, or \a displayIndex + * is out of range. + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetResolution(nvmlVgpuTypeId_t vgpuTypeId, unsigned int displayIndex, unsigned int *xdim, unsigned int *ydim); + +/** + * Retrieve license requirements for a vGPU type + * + * The license type and version required to run the specified vGPU type is returned as an alphanumeric string, in the form + * ",", for example "GRID-Virtual-PC,2.0". If a vGPU is runnable with* more than one type of license, + * the licenses are delimited by a semicolon, for example "GRID-Virtual-PC,2.0;GRID-Virtual-WS,2.0;GRID-Virtual-WS-Ext,2.0". + * + * The total length of the returned string will not exceed 128 characters, including the NUL terminator. + * See \ref nvmlVgpuConstants::NVML_GRID_LICENSE_BUFFER_SIZE. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param vgpuTypeLicenseString Pointer to buffer to return license info + * @param size Size of \a vgpuTypeLicenseString buffer + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuTypeLicenseString is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetLicense(nvmlVgpuTypeId_t vgpuTypeId, char *vgpuTypeLicenseString, unsigned int size); + +/** + * Retrieve the static frame rate limit value of the vGPU type + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param frameRateLimit Reference to return the frame rate limit value + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_NOT_SUPPORTED if frame rate limiter is turned off for the vGPU type + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a frameRateLimit is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetFrameRateLimit(nvmlVgpuTypeId_t vgpuTypeId, unsigned int *frameRateLimit); + +/** + * Retrieve the maximum number of vGPU instances creatable on a device for given vGPU type + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param vgpuTypeId Handle to vGPU type + * @param vgpuInstanceCount Pointer to get the max number of vGPU instances + * that can be created on a deicve for given vgpuTypeId + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid or is not supported on target device, + * or \a vgpuInstanceCount is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetMaxInstances(nvmlDevice_t device, nvmlVgpuTypeId_t vgpuTypeId, unsigned int *vgpuInstanceCount); + +/** + * Retrieve the active vGPU instances on a device. + * + * An array of active vGPU instances is returned in the caller-supplied buffer pointed at by \a vgpuInstances. The + * array elememt count is passed in \a vgpuCount, and \a vgpuCount is used to return the number of vGPU instances + * written to the buffer. + * + * If the supplied buffer is not large enough to accomodate the vGPU instance array, the function returns + * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuInstance_t array required in \a vgpuCount. + * To query the number of active vGPU instances, call this function with *vgpuCount = 0. The code will return + * NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU Types are supported. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param vgpuCount Pointer which passes in the array size as well as get + * back the number of types + * @param vgpuInstances Pointer to array in which to return list of vGPU instances + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a vgpuCount is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetActiveVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuInstance_t *vgpuInstances); + +/** + * Retrieve the VM ID associated with a vGPU instance. + * + * The VM ID is returned as a string, not exceeding 80 characters in length (including the NUL terminator). + * See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. + * + * The format of the VM ID varies by platform, and is indicated by the type identifier returned in \a vmIdType. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param vmId Pointer to caller-supplied buffer to hold VM ID + * @param size Size of buffer in bytes + * @param vmIdType Pointer to hold VM ID type + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a vmId or \a vmIdType are NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetVmID(nvmlVgpuInstance_t vgpuInstance, char *vmId, unsigned int size, nvmlVgpuVmIdType_t *vmIdType); + +/** + * Retrieve the UUID of a vGPU instance. + * + * The UUID is a globally unique identifier associated with the vGPU, and is returned as a 5-part hexadecimal string, + * not exceeding 80 characters in length (including the NULL terminator). + * See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param uuid Pointer to caller-supplied buffer to hold vGPU UUID + * @param size Size of buffer in bytes + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a uuid is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetUUID(nvmlVgpuInstance_t vgpuInstance, char *uuid, unsigned int size); + +/** + * Retrieve the NVIDIA driver version installed in the VM associated with a vGPU. + * + * The version is returned as an alphanumeric string in the caller-supplied buffer \a version. The length of the version + * string will not exceed 80 characters in length (including the NUL terminator). + * See \ref nvmlConstants::NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE. + * + * nvmlVgpuInstanceGetVmDriverVersion() may be called at any time for a vGPU instance. The guest VM driver version is + * returned as "Unknown" if no NVIDIA driver is installed in the VM, or the VM has not yet booted to the point where the + * NVIDIA driver is loaded and initialized. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param version Caller-supplied buffer to return driver version string + * @param length Size of \a version buffer + * + * @return + * - \ref NVML_SUCCESS if \a version has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetVmDriverVersion(nvmlVgpuInstance_t vgpuInstance, char* version, unsigned int length); + +/** + * Retrieve the framebuffer usage in bytes. + * + * Framebuffer usage is the amont of vGPU framebuffer memory that is currently in use by the VM. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance The identifier of the target instance + * @param fbUsage Pointer to framebuffer usage in bytes + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a fbUsage is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFbUsage(nvmlVgpuInstance_t vgpuInstance, unsigned long long *fbUsage); + +/** + * Retrieve the current licensing state of the vGPU instance. + * + * If the vGPU is currently licensed, \a licensed is set to 1, otherwise it is set to 0. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param licensed Reference to return the licensing status + * + * @return + * - \ref NVML_SUCCESS if \a licensed has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a licensed is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetLicenseStatus(nvmlVgpuInstance_t vgpuInstance, unsigned int *licensed); + +/** + * Retrieve the vGPU type of a vGPU instance. + * + * Returns the vGPU type ID of vgpu assigned to the vGPU instance. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param vgpuTypeId Reference to return the vgpuTypeId + * + * @return + * - \ref NVML_SUCCESS if \a vgpuTypeId has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a vgpuTypeId is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetType(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuTypeId_t *vgpuTypeId); + +/** + * Retrieve the frame rate limit set for the vGPU instance. + * + * Returns the value of the frame rate limit set for the vGPU instance + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param frameRateLimit Reference to return the frame rate limit + * + * @return + * - \ref NVML_SUCCESS if \a frameRateLimit has been set + * - \ref NVML_ERROR_NOT_SUPPORTED if frame rate limiter is turned off for the vGPU type + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a frameRateLimit is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFrameRateLimit(nvmlVgpuInstance_t vgpuInstance, unsigned int *frameRateLimit); + +/** + * Retrieve the encoder Capacity of a vGPU instance, in macroblocks per second. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param encoderCapacity Reference to an unsigned int for the encoder capacity + * + * @return + * - \ref NVML_SUCCESS if \a encoderCapacity has been retrived + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid, or \a encoderQueryType is invalid + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int *encoderCapacity); + +/** + * Set the encoder Capacity of a vGPU instance, in macroblocks per second. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param encoderCapacity Unsigned int for the encoder capacity value + * + * @return + * - \ref NVML_SUCCESS if \a encoderCapacity has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceSetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int encoderCapacity); + +/** + * Retrieves current utilization for vGPUs on a physical GPU (device). + * + * For Kepler &tm; or newer fully supported devices. + * + * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for vGPU instances running + * on a device. Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer + * pointed at by \a utilizationSamples. One utilization sample structure is returned per vGPU instance, and includes the + * CPU timestamp at which the samples were recorded. Individual utilization values are returned as "unsigned int" values + * in nvmlValue_t unions. The function sets the caller-supplied \a sampleValType to NVML_VALUE_TYPE_UNSIGNED_INT to + * indicate the returned value type. + * + * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with + * \a utilizationSamples set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance + * count in \a vgpuInstanceSamplesCount, or NVML_SUCCESS if the current vGPU instance count is zero. The caller should allocate + * a buffer of size vgpuInstanceSamplesCount * sizeof(nvmlVgpuInstanceUtilizationSample_t). Invoke the function again with + * the allocated buffer passed in \a utilizationSamples, and \a vgpuInstanceSamplesCount set to the number of entries the + * buffer is sized for. + * + * On successful return, the function updates \a vgpuInstanceSampleCount with the number of vGPU utilization sample + * structures that were actually written. This may differ from a previously read value as vGPU instances are created or + * destroyed. + * + * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 + * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp + * to a timeStamp retrieved from a previous query to read utilization since the previous query. + * + * @param device The identifier for the target device + * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. + * @param sampleValType Pointer to caller-supplied buffer to hold the type of returned sample values + * @param vgpuInstanceSamplesCount Pointer to caller-supplied array size, and returns number of vGPU instances + * @param utilizationSamples Pointer to caller-supplied buffer in which vGPU utilization samples are returned + + * @return + * - \ref NVML_SUCCESS if utilization samples are successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a vgpuInstanceSamplesCount or \a sampleValType is + * NULL, or a sample count of 0 is passed with a non-NULL \a utilizationSamples + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if supplied \a vgpuInstanceSamplesCount is too small to return samples for all + * vGPU instances currently executing on the device + * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, + nvmlValueType_t *sampleValType, unsigned int *vgpuInstanceSamplesCount, + nvmlVgpuInstanceUtilizationSample_t *utilizationSamples); + +/** + * Retrieves current utilization for processes running on vGPUs on a physical GPU (device). + * + * For Maxwell &tm; or newer fully supported devices. + * + * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for processes running on + * vGPU instances active on a device. Utilization values are returned as an array of utilization sample structures in the + * caller-supplied buffer pointed at by \a utilizationSamples. One utilization sample structure is returned per process running + * on vGPU instances, that had some non-zero utilization during the last sample period. It includes the CPU timestamp at which + * the samples were recorded. Individual utilization values are returned as "unsigned int" values. + * + * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with + * \a utilizationSamples set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance + * count in \a vgpuProcessSamplesCount. The caller should allocate a buffer of size + * vgpuProcessSamplesCount * sizeof(nvmlVgpuProcessUtilizationSample_t). Invoke the function again with + * the allocated buffer passed in \a utilizationSamples, and \a vgpuProcessSamplesCount set to the number of entries the + * buffer is sized for. + * + * On successful return, the function updates \a vgpuSubProcessSampleCount with the number of vGPU sub process utilization sample + * structures that were actually written. This may differ from a previously read value depending on the number of processes that are active + * in any given sample period. + * + * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 + * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp + * to a timeStamp retrieved from a previous query to read utilization since the previous query. + * + * @param device The identifier for the target device + * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. + * @param vgpuProcessSamplesCount Pointer to caller-supplied array size, and returns number of processes running on vGPU instances + * @param utilizationSamples Pointer to caller-supplied buffer in which vGPU sub process utilization samples are returned + + * @return + * - \ref NVML_SUCCESS if utilization samples are successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a vgpuProcessSamplesCount or a sample count of 0 is + * passed with a non-NULL \a utilizationSamples + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if supplied \a vgpuProcessSamplesCount is too small to return samples for all + * vGPU instances currently executing on the device + * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuProcessUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, + unsigned int *vgpuProcessSamplesCount, + nvmlVgpuProcessUtilizationSample_t *utilizationSamples); +/** + * Retrieve the GRID licensable features. + * + * Identifies whether the system supports GRID Software Licensing. If it does, return the list of licensable feature(s) + * and their current license status. + * + * @param device Identifier of the target device + * @param pGridLicensableFeatures Pointer to structure in which GRID licensable features are returned + * + * @return + * - \ref NVML_SUCCESS if licensable features are successfully retrieved + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pGridLicensableFeatures is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures(nvmlDevice_t device, nvmlGridLicensableFeatures_t *pGridLicensableFeatures); + +/** + * Retrieves the current encoder statistics of a vGPU Instance + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param sessionCount Reference to an unsigned int for count of active encoder sessions + * @param averageFps Reference to an unsigned int for trailing average FPS of all active sessions + * @param averageLatency Reference to an unsigned int for encode latency in microseconds + * + * @return + * - \ref NVML_SUCCESS if \a sessionCount, \a averageFps and \a averageLatency is fetched + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount , or \a averageFps or \a averageLatency is NULL + * or \a vgpuInstance is invalid. + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderStats(nvmlVgpuInstance_t vgpuInstance, unsigned int *sessionCount, + unsigned int *averageFps, unsigned int *averageLatency); + +/** + * Retrieves information about all active encoder sessions on a vGPU Instance. + * + * An array of active encoder sessions is returned in the caller-supplied buffer pointed at by \a sessionInfo. The + * array elememt count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions + * written to the buffer. + * + * If the supplied buffer is not large enough to accomodate the active session array, the function returns + * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlEncoderSessionInfo_t array required in \a sessionCount. + * To query the number of active encoder sessions, call this function with *sessionCount = 0. The code will return + * NVML_SUCCESS with number of active encoder sessions updated in *sessionCount. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param sessionCount Reference to caller supplied array size, and returns + * the number of sessions. + * @param sessionInfo Reference to caller supplied array in which the list + * of session information us returned. + * + * @return + * - \ref NVML_SUCCESS if \a sessionInfo is fetched + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is + returned in \a sessionCount + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL or \a vgpuInstance is invalid.. + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderSessions(nvmlVgpuInstance_t vgpuInstance, unsigned int *sessionCount, nvmlEncoderSessionInfo_t *sessionInfo); + +/** + * Retrieves the current utilization and process ID + * + * For Maxwell &tm; or newer fully supported devices. + * + * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for processes running. + * Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer pointed at + * by \a utilization. One utilization sample structure is returned per process running, that had some non-zero utilization + * during the last sample period. It includes the CPU timestamp at which the samples were recorded. Individual utilization values + * are returned as "unsigned int" values. + * + * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with + * \a utilization set to NULL. The caller should allocate a buffer of size + * processSamplesCount * sizeof(nvmlProcessUtilizationSample_t). Invoke the function again with the allocated buffer passed + * in \a utilization, and \a processSamplesCount set to the number of entries the buffer is sized for. + * + * On successful return, the function updates \a processSamplesCount with the number of process utilization sample + * structures that were actually written. This may differ from a previously read value as instances are created or + * destroyed. + * + * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 + * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp + * to a timeStamp retrieved from a previous query to read utilization since the previous query. + * + * @param device The identifier of the target device + * @param utilization Pointer to caller-supplied buffer in which guest process utilization samples are returned + * @param processSamplesCount Pointer to caller-supplied array size, and returns number of processes running + * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. + + * @return + * - \ref NVML_SUCCESS if \a utilization has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetProcessUtilization(nvmlDevice_t device, nvmlProcessUtilizationSample_t *utilization, + unsigned int *processSamplesCount, unsigned long long lastSeenTimeStamp); + +/** @} */ + +/** + * NVML API versioning support + */ +#if defined(__NVML_API_VERSION_INTERNAL) +#undef nvmlDeviceGetPciInfo +#undef nvmlDeviceGetCount +#undef nvmlDeviceGetHandleByIndex +#undef nvmlDeviceGetHandleByPciBusId +#undef nvmlInit +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md new file mode 100644 index 000000000000..229851590442 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-wordwrap/README.md b/vendor/github.com/mitchellh/go-wordwrap/README.md new file mode 100644 index 000000000000..60ae3117008d --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/README.md @@ -0,0 +1,39 @@ +# go-wordwrap + +`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that +automatically wraps words into multiple lines. The primary use case for this +is in formatting CLI output, but of course word wrapping is a generally useful +thing to do. + +## Installation and Usage + +Install using `go get github.com/mitchellh/go-wordwrap`. + +Full documentation is available at +http://godoc.org/github.com/mitchellh/go-wordwrap + +Below is an example of its usage ignoring errors: + +```go +wrapped := wordwrap.WrapString("foo bar baz", 3) +fmt.Println(wrapped) +``` + +Would output: + +``` +foo +bar +baz +``` + +## Word Wrap Algorithm + +This library doesn't use any clever algorithm for word wrapping. The wrapping +is actually very naive: whenever there is whitespace or an explicit linebreak. +The goal of this library is for word wrapping CLI output, so the input is +typically pretty well controlled human language. Because of this, the naive +approach typically works just fine. + +In the future, we'd like to make the algorithm more advanced. We would do +so without breaking the API. diff --git a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go new file mode 100644 index 000000000000..ac67205bc2e5 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go @@ -0,0 +1,73 @@ +package wordwrap + +import ( + "bytes" + "unicode" +) + +// WrapString wraps the given string within lim width in characters. +// +// Wrapping is currently naive and only happens at white-space. A future +// version of the library will implement smarter wrapping. This means that +// pathological cases can dramatically reach past the limit, such as a very +// long word. +func WrapString(s string, lim uint) string { + // Initialize a buffer with a slightly larger size to account for breaks + init := make([]byte, 0, len(s)) + buf := bytes.NewBuffer(init) + + var current uint + var wordBuf, spaceBuf bytes.Buffer + + for _, char := range s { + if char == '\n' { + if wordBuf.Len() == 0 { + if current+uint(spaceBuf.Len()) > lim { + current = 0 + } else { + current += uint(spaceBuf.Len()) + spaceBuf.WriteTo(buf) + } + spaceBuf.Reset() + } else { + current += uint(spaceBuf.Len() + wordBuf.Len()) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + } + buf.WriteRune(char) + current = 0 + } else if unicode.IsSpace(char) { + if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { + current += uint(spaceBuf.Len() + wordBuf.Len()) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + } + + spaceBuf.WriteRune(char) + } else { + + wordBuf.WriteRune(char) + + if current+uint(spaceBuf.Len()+wordBuf.Len()) > lim && uint(wordBuf.Len()) < lim { + buf.WriteRune('\n') + current = 0 + spaceBuf.Reset() + } + } + } + + if wordBuf.Len() == 0 { + if current+uint(spaceBuf.Len()) <= lim { + spaceBuf.WriteTo(buf) + } + } else { + spaceBuf.WriteTo(buf) + wordBuf.WriteTo(buf) + } + + return buf.String() +} diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml index 024e28466edc..588ceca183f4 100644 --- a/vendor/github.com/pkg/errors/.travis.yml +++ b/vendor/github.com/pkg/errors/.travis.yml @@ -4,6 +4,7 @@ go: - 1.4.3 - 1.5.4 - 1.6.2 + - 1.7.1 - tip script: diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go index 75780c99130d..842ee80456db 100644 --- a/vendor/github.com/pkg/errors/errors.go +++ b/vendor/github.com/pkg/errors/errors.go @@ -14,13 +14,18 @@ // Adding context to an error // // The errors.Wrap function returns a new error that adds context to the -// original error. For example +// original error by recording a stack trace at the point Wrap is called, +// and the supplied message. For example // // _, err := ioutil.ReadAll(r) // if err != nil { // return errors.Wrap(err, "read failed") // } // +// If additional control is required the errors.WithStack and errors.WithMessage +// functions destructure errors.Wrap into its component operations of annotating +// an error with a stack trace and an a message, respectively. +// // Retrieving the cause of an error // // Using errors.Wrap constructs a stack of errors, adding context to the @@ -127,8 +132,22 @@ func (f *fundamental) Format(s fmt.State, verb rune) { return } fallthrough - case 's', 'q': + case 's': io.WriteString(s, f.msg) + case 'q': + fmt.Fprintf(s, "%q", f.msg) + } +} + +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. +func WithStack(err error) error { + if err == nil { + return nil + } + return &withStack{ + err, + callers(), } } @@ -155,7 +174,8 @@ func (w *withStack) Format(s fmt.State, verb rune) { } } -// Wrap returns an error annotating err with message. +// Wrap returns an error annotating err with a stack trace +// at the point Wrap is called, and the supplied message. // If err is nil, Wrap returns nil. func Wrap(err error, message string) error { if err == nil { @@ -171,7 +191,8 @@ func Wrap(err error, message string) error { } } -// Wrapf returns an error annotating err with the format specifier. +// Wrapf returns an error annotating err with a stack trace +// at the point Wrapf is call, and the format specifier. // If err is nil, Wrapf returns nil. func Wrapf(err error, format string, args ...interface{}) error { if err == nil { @@ -187,6 +208,18 @@ func Wrapf(err error, format string, args ...interface{}) error { } } +// WithMessage annotates err with a new message. +// If err is nil, WithMessage returns nil. +func WithMessage(err error, message string) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: message, + } +} + type withMessage struct { cause error msg string diff --git a/vendor/github.com/r2d4/external-storage/lib/controller/controller.go b/vendor/github.com/r2d4/external-storage/lib/controller/controller.go index 1be73111db88..25c5c63e2ba4 100644 --- a/vendor/github.com/r2d4/external-storage/lib/controller/controller.go +++ b/vendor/github.com/r2d4/external-storage/lib/controller/controller.go @@ -41,7 +41,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/tools/reference" - "k8s.io/kubernetes/pkg/api/v1/helper" + "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/util/goroutinemap" utilversion "k8s.io/kubernetes/pkg/util/version" ) @@ -535,14 +535,8 @@ func (ctrl *ProvisionController) isOnlyRecordUpdate(oldClaim, newClaim *v1.Persi // removeRecord returns a claim with its leader election record annotation and // ResourceVersion set blank func (ctrl *ProvisionController) removeRecord(claim *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) { - clone, err := scheme.Scheme.DeepCopy(claim) - if err != nil { - return nil, fmt.Errorf("Error cloning claim: %v", err) - } - claimClone, ok := clone.(*v1.PersistentVolumeClaim) - if !ok { - return nil, fmt.Errorf("Unexpected claim cast error: %v", claimClone) - } + var claimClone *v1.PersistentVolumeClaim + claim.DeepCopyInto(claimClone) if claimClone.Annotations == nil { claimClone.Annotations = make(map[string]string) diff --git a/vendor/github.com/rackspace/gophercloud/.travis.yml b/vendor/github.com/rackspace/gophercloud/.travis.yml deleted file mode 100644 index 42c095de0bc5..000000000000 --- a/vendor/github.com/rackspace/gophercloud/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -language: go -sudo: false -install: - - go get golang.org/x/crypto/ssh - - go get -v -tags 'fixtures acceptance' ./... -go: - - 1.4 - - 1.5 - - tip -env: - - COVERALLS_TOKEN=2k7PTU3xa474Hymwgdj6XjqenNfGTNkO8 -before_install: - - go get github.com/axw/gocov/gocov - - go get github.com/mattn/goveralls - - go get github.com/pierrre/gotestcover - - if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi -script: - - $HOME/gopath/bin/gotestcover -v -tags=fixtures -coverprofile=cover.out ./... -after_success: - - $HOME/gopath/bin/goveralls -service=travis-ci -coverprofile=cover.out diff --git a/vendor/github.com/rackspace/gophercloud/CONTRIBUTING.md b/vendor/github.com/rackspace/gophercloud/CONTRIBUTING.md deleted file mode 100644 index 73c95bd82634..000000000000 --- a/vendor/github.com/rackspace/gophercloud/CONTRIBUTING.md +++ /dev/null @@ -1,275 +0,0 @@ -# Contributing to gophercloud - -- [Getting started](#getting-started) -- [Tests](#tests) -- [Style guide](#basic-style-guide) -- [5 ways to get involved](#5-ways-to-get-involved) - -## Setting up your git workspace - -As a contributor you will need to setup your workspace in a slightly different -way than just downloading it. Here are the basic installation instructions: - -1. Configure your `$GOPATH` and run `go get` as described in the main -[README](/README.md#how-to-install) but add `-tags "fixtures acceptance"` to -get dependencies for unit and acceptance tests. - -2. Move into the directory that houses your local repository: - - ```bash - cd ${GOPATH}/src/github.com/rackspace/gophercloud - ``` - -3. Fork the `rackspace/gophercloud` repository and update your remote refs. You -will need to rename the `origin` remote branch to `upstream`, and add your -fork as `origin` instead: - - ```bash - git remote rename origin upstream - git remote add origin git@github.com//gophercloud - ``` - -4. Checkout the latest development branch: - - ```bash - git checkout master - ``` - -5. If you're working on something (discussed more in detail below), you will -need to checkout a new feature branch: - - ```bash - git checkout -b my-new-feature - ``` - -Another thing to bear in mind is that you will need to add a few extra -environment variables for acceptance tests - this is documented in our -[acceptance tests readme](/acceptance). - -## Tests - -When working on a new or existing feature, testing will be the backbone of your -work since it helps uncover and prevent regressions in the codebase. There are -two types of test we use in gophercloud: unit tests and acceptance tests, which -are both described below. - -### Unit tests - -Unit tests are the fine-grained tests that establish and ensure the behaviour -of individual units of functionality. We usually test on an -operation-by-operation basis (an operation typically being an API action) with -the use of mocking to set up explicit expectations. Each operation will set up -its HTTP response expectation, and then test how the system responds when fed -this controlled, pre-determined input. - -To make life easier, we've introduced a bunch of test helpers to simplify the -process of testing expectations with assertions: - -```go -import ( - "testing" - - "github.com/rackspace/gophercloud/testhelper" -) - -func TestSomething(t *testing.T) { - result, err := Operation() - - testhelper.AssertEquals(t, "foo", result.Bar) - testhelper.AssertNoErr(t, err) -} - -func TestSomethingElse(t *testing.T) { - testhelper.CheckEquals(t, "expected", "actual") -} -``` - -`AssertEquals` and `AssertNoErr` will throw a fatal error if a value does not -match an expected value or if an error has been declared, respectively. You can -also use `CheckEquals` and `CheckNoErr` for the same purpose; the only difference -being that `t.Errorf` is raised rather than `t.Fatalf`. - -Here is a truncated example of mocked HTTP responses: - -```go -import ( - "testing" - - th "github.com/rackspace/gophercloud/testhelper" - fake "github.com/rackspace/gophercloud/testhelper/client" -) - -func TestGet(t *testing.T) { - // Setup the HTTP request multiplexer and server - th.SetupHTTP() - defer th.TeardownHTTP() - - th.Mux.HandleFunc("/networks/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { - // Test we're using the correct HTTP method - th.TestMethod(t, r, "GET") - - // Test we're setting the auth token - th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) - - // Set the appropriate headers for our mocked response - w.Header().Add("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - - // Set the HTTP body - fmt.Fprintf(w, ` -{ - "network": { - "status": "ACTIVE", - "name": "private-network", - "admin_state_up": true, - "tenant_id": "4fd44f30292945e481c7b8a0c8908869", - "shared": true, - "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" - } -} - `) - }) - - // Call our API operation - network, err := Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() - - // Assert no errors and equality - th.AssertNoErr(t, err) - th.AssertEquals(t, n.Status, "ACTIVE") -} -``` - -### Acceptance tests - -As we've already mentioned, unit tests have a very narrow and confined focus - -they test small units of behaviour. Acceptance tests on the other hand have a -far larger scope: they are fully functional tests that test the entire API of a -service in one fell swoop. They don't care about unit isolation or mocking -expectations, they instead do a full run-through and consequently test how the -entire system _integrates_ together. When an API satisfies expectations, it -proves by default that the requirements for a contract have been met. - -Please be aware that acceptance tests will hit a live API - and may incur -service charges from your provider. Although most tests handle their own -teardown procedures, it is always worth manually checking that resources are -deleted after the test suite finishes. - -### Running tests - -To run all tests: - -```bash -go test -tags fixtures ./... -``` - -To run all tests with verbose output: - -```bash -go test -v -tags fixtures ./... -``` - -To run tests that match certain [build tags](): - -```bash -go test -tags "fixtures foo bar" ./... -``` - -To run tests for a particular sub-package: - -```bash -cd ./path/to/package && go test -tags fixtures . -``` - -## Basic style guide - -We follow the standard formatting recommendations and language idioms set out -in the [Effective Go](https://golang.org/doc/effective_go.html) guide. It's -definitely worth reading - but the relevant sections are -[formatting](https://golang.org/doc/effective_go.html#formatting) -and [names](https://golang.org/doc/effective_go.html#names). - -## 5 ways to get involved - -There are five main ways you can get involved in our open-source project, and -each is described briefly below. Once you've made up your mind and decided on -your fix, you will need to follow the same basic steps that all submissions are -required to adhere to: - -1. [fork](https://help.github.com/articles/fork-a-repo/) the `rackspace/gophercloud` repository -2. checkout a [new branch](https://github.com/Kunena/Kunena-Forum/wiki/Create-a-new-branch-with-git-and-manage-branches) -3. submit your branch as a [pull request](https://help.github.com/articles/creating-a-pull-request/) - -### 1. Providing feedback - -On of the easiest ways to get readily involved in our project is to let us know -about your experiences using our SDK. Feedback like this is incredibly useful -to us, because it allows us to refine and change features based on what our -users want and expect of us. There are a bunch of ways to get in contact! You -can [ping us](https://developer.rackspace.com/support/) via e-mail, talk to us on irc -(#rackspace-dev on freenode), [tweet us](https://twitter.com/rackspace), or -submit an issue on our [bug tracker](/issues). Things you might like to tell us -are: - -* how easy was it to start using our SDK? -* did it meet your expectations? If not, why not? -* did our documentation help or hinder you? -* what could we improve in general? - -### 2. Fixing bugs - -If you want to start fixing open bugs, we'd really appreciate that! Bug fixing -is central to any project. The best way to get started is by heading to our -[bug tracker](https://github.com/rackspace/gophercloud/issues) and finding open -bugs that you think nobody is working on. It might be useful to comment on the -thread to see the current state of the issue and if anybody has made any -breakthroughs on it so far. - -### 3. Improving documentation - -We have three forms of documentation: - -* short README documents that briefly introduce a topic -* reference documentation on [godoc.org](http://godoc.org) that is automatically -generated from source code comments -* user documentation on our [homepage](http://gophercloud.io) that includes -getting started guides, installation guides and code samples - -If you feel that a certain section could be improved - whether it's to clarify -ambiguity, correct a technical mistake, or to fix a grammatical error - please -feel entitled to do so! We welcome doc pull requests with the same childlike -enthusiasm as any other contribution! - -### 4. Optimizing existing features - -If you would like to improve or optimize an existing feature, please be aware -that we adhere to [semantic versioning](http://semver.org) - which means that -we cannot introduce breaking changes to the API without a major version change -(v1.x -> v2.x). Making that leap is a big step, so we encourage contributors to -refactor rather than rewrite. Running tests will prevent regression and avoid -the possibility of breaking somebody's current implementation. - -Another tip is to keep the focus of your work as small as possible - try not to -introduce a change that affects lots and lots of files because it introduces -added risk and increases the cognitive load on the reviewers checking your -work. Change-sets which are easily understood and will not negatively impact -users are more likely to be integrated quickly. - -Lastly, if you're seeking to optimize a particular operation, you should try to -demonstrate a negative performance impact - perhaps using go's inbuilt -[benchmark capabilities](http://dave.cheney.net/2013/06/30/how-to-write-benchmarks-in-go). - -### 5. Working on a new feature - -If you've found something we've left out, definitely feel free to start work on -introducing that feature. It's always useful to open an issue or submit a pull -request early on to indicate your intent to a core contributor - this enables -quick/early feedback and can help steer you in the right direction by avoiding -known issues. It might also help you avoid losing time implementing something -that might not ever work. One tip is to prefix your Pull Request issue title -with [wip] - then people know it's a work in progress. - -You must ensure that all of your work is well tested - both in terms of unit -and acceptance tests. Untested code will not be merged because it introduces -too much of a risk to end-users. - -Happy hacking! diff --git a/vendor/github.com/rackspace/gophercloud/CONTRIBUTORS.md b/vendor/github.com/rackspace/gophercloud/CONTRIBUTORS.md deleted file mode 100644 index 63beb30b20df..000000000000 --- a/vendor/github.com/rackspace/gophercloud/CONTRIBUTORS.md +++ /dev/null @@ -1,13 +0,0 @@ -Contributors -============ - -| Name | Email | -| ---- | ----- | -| Samuel A. Falvo II | -| Glen Campbell | -| Jesse Noller | -| Jon Perritt | -| Ash Wilson | -| Jamie Hannaford | -| Don Schenck | don.schenck@rackspace.com> -| Joe Topjian | diff --git a/vendor/github.com/rackspace/gophercloud/LICENSE b/vendor/github.com/rackspace/gophercloud/LICENSE deleted file mode 100644 index fbbbc9e4cbad..000000000000 --- a/vendor/github.com/rackspace/gophercloud/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Copyright 2012-2013 Rackspace, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use -this file except in compliance with the License. You may obtain a copy of the -License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed -under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -CONDITIONS OF ANY KIND, either express or implied. See the License for the -specific language governing permissions and limitations under the License. - ------- - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/rackspace/gophercloud/README.md b/vendor/github.com/rackspace/gophercloud/README.md deleted file mode 100644 index 4b0042b3b703..000000000000 --- a/vendor/github.com/rackspace/gophercloud/README.md +++ /dev/null @@ -1,160 +0,0 @@ -# Gophercloud: an OpenStack SDK for Go -[![Build Status](https://travis-ci.org/rackspace/gophercloud.svg?branch=master)](https://travis-ci.org/rackspace/gophercloud) [![Coverage Status](https://coveralls.io/repos/rackspace/gophercloud/badge.png)](https://coveralls.io/r/rackspace/gophercloud) - -Gophercloud is a flexible SDK that allows you to consume and work with OpenStack -clouds in a simple and idiomatic way using golang. Many services are supported, -including Compute, Block Storage, Object Storage, Networking, and Identity. -Each service API is backed with getting started guides, code samples, reference -documentation, unit tests and acceptance tests. - -## Useful links - -* [Gophercloud homepage](http://gophercloud.io) -* [Reference documentation](http://godoc.org/github.com/rackspace/gophercloud) -* [Getting started guides](http://gophercloud.io/docs) -* [Effective Go](https://golang.org/doc/effective_go.html) - -## How to install - -Before installing, you need to ensure that your [GOPATH environment variable](https://golang.org/doc/code.html#GOPATH) -is pointing to an appropriate directory where you want to install Gophercloud: - -```bash -mkdir $HOME/go -export GOPATH=$HOME/go -``` - -To protect yourself against changes in your dependencies, we highly recommend choosing a -[dependency management solution](https://github.com/golang/go/wiki/PackageManagementTools) for -your projects, such as [godep](https://github.com/tools/godep). Once this is set up, you can install -Gophercloud as a dependency like so: - -```bash -go get github.com/rackspace/gophercloud - -# Edit your code to import relevant packages from "github.com/rackspace/gophercloud" - -godep save ./... -``` - -This will install all the source files you need into a `Godeps/_workspace` directory, which is -referenceable from your own source files when you use the `godep go` command. - -## Getting started - -### Credentials - -Because you'll be hitting an API, you will need to retrieve your OpenStack -credentials and either store them as environment variables or in your local Go -files. The first method is recommended because it decouples credential -information from source code, allowing you to push the latter to your version -control system without any security risk. - -You will need to retrieve the following: - -* username -* password -* tenant name or tenant ID -* a valid Keystone identity URL - -For users that have the OpenStack dashboard installed, there's a shortcut. If -you visit the `project/access_and_security` path in Horizon and click on the -"Download OpenStack RC File" button at the top right hand corner, you will -download a bash file that exports all of your access details to environment -variables. To execute the file, run `source admin-openrc.sh` and you will be -prompted for your password. - -### Authentication - -Once you have access to your credentials, you can begin plugging them into -Gophercloud. The next step is authentication, and this is handled by a base -"Provider" struct. To get one, you can either pass in your credentials -explicitly, or tell Gophercloud to use environment variables: - -```go -import ( - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/openstack" - "github.com/rackspace/gophercloud/openstack/utils" -) - -// Option 1: Pass in the values yourself -opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://my-openstack.com:5000/v2.0", - Username: "{username}", - Password: "{password}", - TenantID: "{tenant_id}", -} - -// Option 2: Use a utility function to retrieve all your environment variables -opts, err := openstack.AuthOptionsFromEnv() -``` - -Once you have the `opts` variable, you can pass it in and get back a -`ProviderClient` struct: - -```go -provider, err := openstack.AuthenticatedClient(opts) -``` - -The `ProviderClient` is the top-level client that all of your OpenStack services -derive from. The provider contains all of the authentication details that allow -your Go code to access the API - such as the base URL and token ID. - -### Provision a server - -Once we have a base Provider, we inject it as a dependency into each OpenStack -service. In order to work with the Compute API, we need a Compute service -client; which can be created like so: - -```go -client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ - Region: os.Getenv("OS_REGION_NAME"), -}) -``` - -We then use this `client` for any Compute API operation we want. In our case, -we want to provision a new server - so we invoke the `Create` method and pass -in the flavor ID (hardware specification) and image ID (operating system) we're -interested in: - -```go -import "github.com/rackspace/gophercloud/openstack/compute/v2/servers" - -server, err := servers.Create(client, servers.CreateOpts{ - Name: "My new server!", - FlavorRef: "flavor_id", - ImageRef: "image_id", -}).Extract() -``` - -If you are unsure about what images and flavors are, you can read our [Compute -Getting Started guide](http://gophercloud.io/docs/compute). The above code -sample creates a new server with the parameters, and embodies the new resource -in the `server` variable (a -[`servers.Server`](http://godoc.org/github.com/rackspace/gophercloud) struct). - -### Next steps - -Cool! You've handled authentication, got your `ProviderClient` and provisioned -a new server. You're now ready to use more OpenStack services. - -* [Getting started with Compute](http://gophercloud.io/docs/compute) -* [Getting started with Object Storage](http://gophercloud.io/docs/object-storage) -* [Getting started with Networking](http://gophercloud.io/docs/networking) -* [Getting started with Block Storage](http://gophercloud.io/docs/block-storage) -* [Getting started with Identity](http://gophercloud.io/docs/identity) - -## Contributing - -Engaging the community and lowering barriers for contributors is something we -care a lot about. For this reason, we've taken the time to write a [contributing -guide](./CONTRIBUTING.md) for folks interested in getting involved in our project. -If you're not sure how you can get involved, feel free to submit an issue or -[contact us](https://developer.rackspace.com/support/). You don't need to be a -Go expert - all members of the community are welcome! - -## Help and feedback - -If you're struggling with something or have spotted a potential bug, feel free -to submit an issue to our [bug tracker](/issues) or [contact us directly](https://developer.rackspace.com/support/). diff --git a/vendor/github.com/rackspace/gophercloud/UPGRADING.md b/vendor/github.com/rackspace/gophercloud/UPGRADING.md deleted file mode 100644 index 76a94d570329..000000000000 --- a/vendor/github.com/rackspace/gophercloud/UPGRADING.md +++ /dev/null @@ -1,338 +0,0 @@ -# Upgrading to v1.0.0 - -With the arrival of this new major version increment, the unfortunate news is -that breaking changes have been introduced to existing services. The API -has been completely rewritten from the ground up to make the library more -extensible, maintainable and easy-to-use. - -Below we've compiled upgrade instructions for the various services that -existed before. If you have a specific issue that is not addressed below, -please [submit an issue](/issues/new) or -[e-mail our support team](https://developer.rackspace.com/support/). - -* [Authentication](#authentication) -* [Servers](#servers) - * [List servers](#list-servers) - * [Get server details](#get-server-details) - * [Create server](#create-server) - * [Resize server](#resize-server) - * [Reboot server](#reboot-server) - * [Update server](#update-server) - * [Rebuild server](#rebuild-server) - * [Change admin password](#change-admin-password) - * [Delete server](#delete-server) - * [Rescue server](#rescue-server) -* [Images and flavors](#images-and-flavors) - * [List images](#list-images) - * [List flavors](#list-flavors) - * [Create/delete image](#createdelete-image) -* [Other](#other) - * [List keypairs](#list-keypairs) - * [Create/delete keypair](#createdelete-keypair) - * [List IP addresses](#list-ip-addresses) - -# Authentication - -One of the major differences that this release introduces is the level of -sub-packaging to differentiate between services and providers. You now have -the option of authenticating with OpenStack and other providers (like Rackspace). - -To authenticate with a vanilla OpenStack installation, you can either specify -your credentials like this: - -```go -import ( - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/openstack" -) - -opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://my-openstack.com:5000/v2.0", - Username: "{username}", - Password: "{password}", - TenantID: "{tenant_id}", -} -``` - -Or have them pulled in through environment variables, like this: - -```go -opts, err := openstack.AuthOptionsFromEnv() -``` - -Once you have your `AuthOptions` struct, you pass it in to get back a `Provider`, -like so: - -```go -provider, err := openstack.AuthenticatedClient(opts) -``` - -This provider is the top-level structure that all services are created from. - -# Servers - -Before you can interact with the Compute API, you need to retrieve a -`gophercloud.ServiceClient`. To do this: - -```go -// Define your region, etc. -opts := gophercloud.EndpointOpts{Region: "RegionOne"} - -client, err := openstack.NewComputeV2(provider, opts) -``` - -## List servers - -All operations that involve API collections (servers, flavors, images) now use -the `pagination.Pager` interface. This interface represents paginated entities -that can be iterated over. - -Once you have a Pager, you can then pass a callback function into its `EachPage` -method, and this will allow you to traverse over the collection and execute -arbitrary functionality. So, an example with list servers: - -```go -import ( - "fmt" - "github.com/rackspace/gophercloud/pagination" - "github.com/rackspace/gophercloud/openstack/compute/v2/servers" -) - -// We have the option of filtering the server list. If we want the full -// collection, leave it as an empty struct or nil -opts := servers.ListOpts{Name: "server_1"} - -// Retrieve a pager (i.e. a paginated collection) -pager := servers.List(client, opts) - -// Define an anonymous function to be executed on each page's iteration -err := pager.EachPage(func(page pagination.Page) (bool, error) { - serverList, err := servers.ExtractServers(page) - - // `s' will be a servers.Server struct - for _, s := range serverList { - fmt.Printf("We have a server. ID=%s, Name=%s", s.ID, s.Name) - } -}) -``` - -## Get server details - -```go -import "github.com/rackspace/gophercloud/openstack/compute/v2/servers" - -// Get the HTTP result -response := servers.Get(client, "server_id") - -// Extract a Server struct from the response -server, err := response.Extract() -``` - -## Create server - -```go -import "github.com/rackspace/gophercloud/openstack/compute/v2/servers" - -// Define our options -opts := servers.CreateOpts{ - Name: "new_server", - FlavorRef: "flavorID", - ImageRef: "imageID", -} - -// Get our response -response := servers.Create(client, opts) - -// Extract -server, err := response.Extract() -``` - -## Change admin password - -```go -import "github.com/rackspace/gophercloud/openstack/compute/v2/servers" - -result := servers.ChangeAdminPassword(client, "server_id", "newPassword_&123") -``` - -## Resize server - -```go -import "github.com/rackspace/gophercloud/openstack/compute/v2/servers" - -result := servers.Resize(client, "server_id", "new_flavor_id") -``` - -## Reboot server - -```go -import "github.com/rackspace/gophercloud/openstack/compute/v2/servers" - -// You have a choice of two reboot methods: servers.SoftReboot or servers.HardReboot -result := servers.Reboot(client, "server_id", servers.SoftReboot) -``` - -## Update server - -```go -import "github.com/rackspace/gophercloud/openstack/compute/v2/servers" - -opts := servers.UpdateOpts{Name: "new_name"} - -server, err := servers.Update(client, "server_id", opts).Extract() -``` - -## Rebuild server - -```go -import "github.com/rackspace/gophercloud/openstack/compute/v2/servers" - -// You have the option of specifying additional options -opts := RebuildOpts{ - Name: "new_name", - AdminPass: "admin_password", - ImageID: "image_id", - Metadata: map[string]string{"owner": "me"}, -} - -result := servers.Rebuild(client, "server_id", opts) - -// You can extract a servers.Server struct from the HTTP response -server, err := result.Extract() -``` - -## Delete server - -```go -import "github.com/rackspace/gophercloud/openstack/compute/v2/servers" - -response := servers.Delete(client, "server_id") -``` - -## Rescue server - -The server rescue extension for Compute is not currently supported. - -# Images and flavors - -## List images - -As with listing servers (see above), you first retrieve a Pager, and then pass -in a callback over each page: - -```go -import ( - "github.com/rackspace/gophercloud/pagination" - "github.com/rackspace/gophercloud/openstack/compute/v2/images" -) - -// We have the option of filtering the image list. If we want the full -// collection, leave it as an empty struct -opts := images.ListOpts{ChangesSince: "2014-01-01T01:02:03Z", Name: "Ubuntu 12.04"} - -// Retrieve a pager (i.e. a paginated collection) -pager := images.List(client, opts) - -// Define an anonymous function to be executed on each page's iteration -err := pager.EachPage(func(page pagination.Page) (bool, error) { - imageList, err := images.ExtractImages(page) - - for _, i := range imageList { - // "i" will be an images.Image - } -}) -``` - -## List flavors - -```go -import ( - "github.com/rackspace/gophercloud/pagination" - "github.com/rackspace/gophercloud/openstack/compute/v2/flavors" -) - -// We have the option of filtering the flavor list. If we want the full -// collection, leave it as an empty struct -opts := flavors.ListOpts{ChangesSince: "2014-01-01T01:02:03Z", MinRAM: 4} - -// Retrieve a pager (i.e. a paginated collection) -pager := flavors.List(client, opts) - -// Define an anonymous function to be executed on each page's iteration -err := pager.EachPage(func(page pagination.Page) (bool, error) { - flavorList, err := networks.ExtractFlavors(page) - - for _, f := range flavorList { - // "f" will be a flavors.Flavor - } -}) -``` - -## Create/delete image - -Image management has been shifted to Glance, but unfortunately this service is -not supported as of yet. You can, however, list Compute images like so: - -```go -import "github.com/rackspace/gophercloud/openstack/compute/v2/images" - -// Retrieve a pager (i.e. a paginated collection) -pager := images.List(client, opts) - -// Define an anonymous function to be executed on each page's iteration -err := pager.EachPage(func(page pagination.Page) (bool, error) { - imageList, err := images.ExtractImages(page) - - for _, i := range imageList { - // "i" will be an images.Image - } -}) -``` - -# Other - -## List keypairs - -```go -import "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs" - -// Retrieve a pager (i.e. a paginated collection) -pager := keypairs.List(client, opts) - -// Define an anonymous function to be executed on each page's iteration -err := pager.EachPage(func(page pagination.Page) (bool, error) { - keyList, err := keypairs.ExtractKeyPairs(page) - - for _, k := range keyList { - // "k" will be a keypairs.KeyPair - } -}) -``` - -## Create/delete keypairs - -To create a new keypair, you need to specify its name and, optionally, a -pregenerated OpenSSH-formatted public key. - -```go -import "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs" - -opts := keypairs.CreateOpts{ - Name: "new_key", - PublicKey: "...", -} - -response := keypairs.Create(client, opts) - -key, err := response.Extract() -``` - -To delete an existing keypair: - -```go -response := keypairs.Delete(client, "keypair_id") -``` - -## List IP addresses - -This operation is not currently supported. diff --git a/vendor/github.com/rackspace/gophercloud/auth_options.go b/vendor/github.com/rackspace/gophercloud/auth_options.go deleted file mode 100644 index 07ace1366ba3..000000000000 --- a/vendor/github.com/rackspace/gophercloud/auth_options.go +++ /dev/null @@ -1,55 +0,0 @@ -package gophercloud - -/* -AuthOptions stores information needed to authenticate to an OpenStack cluster. -You can populate one manually, or use a provider's AuthOptionsFromEnv() function -to read relevant information from the standard environment variables. Pass one -to a provider's AuthenticatedClient function to authenticate and obtain a -ProviderClient representing an active session on that provider. - -Its fields are the union of those recognized by each identity implementation and -provider. -*/ -type AuthOptions struct { - // IdentityEndpoint specifies the HTTP endpoint that is required to work with - // the Identity API of the appropriate version. While it's ultimately needed by - // all of the identity services, it will often be populated by a provider-level - // function. - IdentityEndpoint string - - // Username is required if using Identity V2 API. Consult with your provider's - // control panel to discover your account's username. In Identity V3, either - // UserID or a combination of Username and DomainID or DomainName are needed. - Username, UserID string - - // Exactly one of Password or APIKey is required for the Identity V2 and V3 - // APIs. Consult with your provider's control panel to discover your account's - // preferred method of authentication. - Password, APIKey string - - // At most one of DomainID and DomainName must be provided if using Username - // with Identity V3. Otherwise, either are optional. - DomainID, DomainName string - - // The TenantID and TenantName fields are optional for the Identity V2 API. - // Some providers allow you to specify a TenantName instead of the TenantId. - // Some require both. Your provider's authentication policies will determine - // how these fields influence authentication. - TenantID, TenantName string - - // AllowReauth should be set to true if you grant permission for Gophercloud to - // cache your credentials in memory, and to allow Gophercloud to attempt to - // re-authenticate automatically if/when your token expires. If you set it to - // false, it will not cache these settings, but re-authentication will not be - // possible. This setting defaults to false. - // - // NOTE: The reauth function will try to re-authenticate endlessly if left unchecked. - // The way to limit the number of attempts is to provide a custom HTTP client to the provider client - // and provide a transport that implements the RoundTripper interface and stores the number of failed retries. - // For an example of this, see here: https://github.com/rackspace/rack/blob/1.0.0/auth/clients.go#L311 - AllowReauth bool - - // TokenID allows users to authenticate (possibly as another user) with an - // authentication token ID. - TokenID string -} diff --git a/vendor/github.com/rackspace/gophercloud/auth_results.go b/vendor/github.com/rackspace/gophercloud/auth_results.go deleted file mode 100644 index 856a23382e90..000000000000 --- a/vendor/github.com/rackspace/gophercloud/auth_results.go +++ /dev/null @@ -1,14 +0,0 @@ -package gophercloud - -import "time" - -// AuthResults [deprecated] is a leftover type from the v0.x days. It was -// intended to describe common functionality among identity service results, but -// is not actually used anywhere. -type AuthResults interface { - // TokenID returns the token's ID value from the authentication response. - TokenID() (string, error) - - // ExpiresAt retrieves the token's expiration time. - ExpiresAt() (time.Time, error) -} diff --git a/vendor/github.com/rackspace/gophercloud/doc.go b/vendor/github.com/rackspace/gophercloud/doc.go deleted file mode 100644 index fb81a9d8f174..000000000000 --- a/vendor/github.com/rackspace/gophercloud/doc.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Package gophercloud provides a multi-vendor interface to OpenStack-compatible -clouds. The library has a three-level hierarchy: providers, services, and -resources. - -Provider structs represent the service providers that offer and manage a -collection of services. Examples of providers include: OpenStack, Rackspace, -HP. These are defined like so: - - opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://my-openstack.com:5000/v2.0", - Username: "{username}", - Password: "{password}", - TenantID: "{tenant_id}", - } - - provider, err := openstack.AuthenticatedClient(opts) - -Service structs are specific to a provider and handle all of the logic and -operations for a particular OpenStack service. Examples of services include: -Compute, Object Storage, Block Storage. In order to define one, you need to -pass in the parent provider, like so: - - opts := gophercloud.EndpointOpts{Region: "RegionOne"} - - client := openstack.NewComputeV2(provider, opts) - -Resource structs are the domain models that services make use of in order -to work with and represent the state of API resources: - - server, err := servers.Get(client, "{serverId}").Extract() - -Intermediate Result structs are returned for API operations, which allow -generic access to the HTTP headers, response body, and any errors associated -with the network transaction. To turn a result into a usable resource struct, -you must call the Extract method which is chained to the response, or an -Extract function from an applicable extension: - - result := servers.Get(client, "{serverId}") - - // Attempt to extract the disk configuration from the OS-DCF disk config - // extension: - config, err := diskconfig.ExtractGet(result) - -All requests that enumerate a collection return a Pager struct that is used to -iterate through the results one page at a time. Use the EachPage method on that -Pager to handle each successive Page in a closure, then use the appropriate -extraction method from that request's package to interpret that Page as a slice -of results: - - err := servers.List(client, nil).EachPage(func (page pagination.Page) (bool, error) { - s, err := servers.ExtractServers(page) - if err != nil { - return false, err - } - - // Handle the []servers.Server slice. - - // Return "false" or an error to prematurely stop fetching new pages. - return true, nil - }) - -This top-level package contains utility functions and data types that are used -throughout the provider and service packages. Of particular note for end users -are the AuthOptions and EndpointOpts structs. -*/ -package gophercloud diff --git a/vendor/github.com/rackspace/gophercloud/endpoint_search.go b/vendor/github.com/rackspace/gophercloud/endpoint_search.go deleted file mode 100644 index 5189431212c1..000000000000 --- a/vendor/github.com/rackspace/gophercloud/endpoint_search.go +++ /dev/null @@ -1,92 +0,0 @@ -package gophercloud - -import "errors" - -var ( - // ErrServiceNotFound is returned when no service in a service catalog matches - // the provided EndpointOpts. This is generally returned by provider service - // factory methods like "NewComputeV2()" and can mean that a service is not - // enabled for your account. - ErrServiceNotFound = errors.New("No suitable service could be found in the service catalog.") - - // ErrEndpointNotFound is returned when no available endpoints match the - // provided EndpointOpts. This is also generally returned by provider service - // factory methods, and usually indicates that a region was specified - // incorrectly. - ErrEndpointNotFound = errors.New("No suitable endpoint could be found in the service catalog.") -) - -// Availability indicates to whom a specific service endpoint is accessible: -// the internet at large, internal networks only, or only to administrators. -// Different identity services use different terminology for these. Identity v2 -// lists them as different kinds of URLs within the service catalog ("adminURL", -// "internalURL", and "publicURL"), while v3 lists them as "Interfaces" in an -// endpoint's response. -type Availability string - -const ( - // AvailabilityAdmin indicates that an endpoint is only available to - // administrators. - AvailabilityAdmin Availability = "admin" - - // AvailabilityPublic indicates that an endpoint is available to everyone on - // the internet. - AvailabilityPublic Availability = "public" - - // AvailabilityInternal indicates that an endpoint is only available within - // the cluster's internal network. - AvailabilityInternal Availability = "internal" -) - -// EndpointOpts specifies search criteria used by queries against an -// OpenStack service catalog. The options must contain enough information to -// unambiguously identify one, and only one, endpoint within the catalog. -// -// Usually, these are passed to service client factory functions in a provider -// package, like "rackspace.NewComputeV2()". -type EndpointOpts struct { - // Type [required] is the service type for the client (e.g., "compute", - // "object-store"). Generally, this will be supplied by the service client - // function, but a user-given value will be honored if provided. - Type string - - // Name [optional] is the service name for the client (e.g., "nova") as it - // appears in the service catalog. Services can have the same Type but a - // different Name, which is why both Type and Name are sometimes needed. - Name string - - // Region [required] is the geographic region in which the endpoint resides, - // generally specifying which datacenter should house your resources. - // Required only for services that span multiple regions. - Region string - - // Availability [optional] is the visibility of the endpoint to be returned. - // Valid types include the constants AvailabilityPublic, AvailabilityInternal, - // or AvailabilityAdmin from this package. - // - // Availability is not required, and defaults to AvailabilityPublic. Not all - // providers or services offer all Availability options. - Availability Availability -} - -/* -EndpointLocator is an internal function to be used by provider implementations. - -It provides an implementation that locates a single endpoint from a service -catalog for a specific ProviderClient based on user-provided EndpointOpts. The -provider then uses it to discover related ServiceClients. -*/ -type EndpointLocator func(EndpointOpts) (string, error) - -// ApplyDefaults is an internal method to be used by provider implementations. -// -// It sets EndpointOpts fields if not already set, including a default type. -// Currently, EndpointOpts.Availability defaults to the public endpoint. -func (eo *EndpointOpts) ApplyDefaults(t string) { - if eo.Type == "" { - eo.Type = t - } - if eo.Availability == "" { - eo.Availability = AvailabilityPublic - } -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/auth_env.go b/vendor/github.com/rackspace/gophercloud/openstack/auth_env.go deleted file mode 100644 index 2d226d6090c0..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/auth_env.go +++ /dev/null @@ -1,61 +0,0 @@ -package openstack - -import ( - "fmt" - "os" - - "github.com/rackspace/gophercloud" -) - -var nilOptions = gophercloud.AuthOptions{} - -// ErrNoAuthUrl, ErrNoUsername, and ErrNoPassword errors indicate of the required OS_AUTH_URL, OS_USERNAME, or OS_PASSWORD -// environment variables, respectively, remain undefined. See the AuthOptions() function for more details. -var ( - ErrNoAuthURL = fmt.Errorf("Environment variable OS_AUTH_URL needs to be set.") - ErrNoUsername = fmt.Errorf("Environment variable OS_USERNAME, OS_USERID, or OS_TOKEN needs to be set.") - ErrNoPassword = fmt.Errorf("Environment variable OS_PASSWORD or OS_TOKEN needs to be set.") -) - -// AuthOptionsFromEnv fills out an AuthOptions structure from the environment -// variables: OS_AUTH_URL, OS_USERNAME, OS_USERID, OS_PASSWORD, OS_TENANT_ID, -// OS_TENANT_NAME, OS_DOMAIN_ID, OS_DOMAIN_NAME, OS_TOKEN. It checks that -// (1) OS_AUTH_URL is set, (2) OS_USERNAME, OS_USERID, or OS_TOKEN is set, -// (3) OS_PASSWORD or OS_TOKEN is set. -func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { - authURL := os.Getenv("OS_AUTH_URL") - username := os.Getenv("OS_USERNAME") - userID := os.Getenv("OS_USERID") - password := os.Getenv("OS_PASSWORD") - tenantID := os.Getenv("OS_TENANT_ID") - tenantName := os.Getenv("OS_TENANT_NAME") - domainID := os.Getenv("OS_DOMAIN_ID") - domainName := os.Getenv("OS_DOMAIN_NAME") - tokenID := os.Getenv("OS_TOKEN") - - if authURL == "" { - return nilOptions, ErrNoAuthURL - } - - if username == "" && userID == "" && tokenID == "" { - return nilOptions, ErrNoUsername - } - - if password == "" && tokenID == "" { - return nilOptions, ErrNoPassword - } - - ao := gophercloud.AuthOptions{ - IdentityEndpoint: authURL, - UserID: userID, - Username: username, - Password: password, - TenantID: tenantID, - TenantName: tenantName, - DomainID: domainID, - DomainName: domainName, - TokenID: tokenID, - } - - return ao, nil -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests.go deleted file mode 100644 index 3e9243ac4957..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests.go +++ /dev/null @@ -1,236 +0,0 @@ -package volumes - -import ( - "fmt" - - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/pagination" -) - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToVolumeCreateMap() (map[string]interface{}, error) -} - -// CreateOpts contains options for creating a Volume. This object is passed to -// the volumes.Create function. For more information about these parameters, -// see the Volume object. -type CreateOpts struct { - // OPTIONAL - Availability string - // OPTIONAL - Description string - // OPTIONAL - Metadata map[string]string - // OPTIONAL - Name string - // REQUIRED - Size int - // OPTIONAL - SnapshotID, SourceVolID, ImageID string - // OPTIONAL - VolumeType string -} - -// ToVolumeCreateMap assembles a request body based on the contents of a -// CreateOpts. -func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) { - v := make(map[string]interface{}) - - if opts.Size == 0 { - return nil, fmt.Errorf("Required CreateOpts field 'Size' not set.") - } - v["size"] = opts.Size - - if opts.Availability != "" { - v["availability_zone"] = opts.Availability - } - if opts.Description != "" { - v["display_description"] = opts.Description - } - if opts.ImageID != "" { - v["imageRef"] = opts.ImageID - } - if opts.Metadata != nil { - v["metadata"] = opts.Metadata - } - if opts.Name != "" { - v["display_name"] = opts.Name - } - if opts.SourceVolID != "" { - v["source_volid"] = opts.SourceVolID - } - if opts.SnapshotID != "" { - v["snapshot_id"] = opts.SnapshotID - } - if opts.VolumeType != "" { - v["volume_type"] = opts.VolumeType - } - - return map[string]interface{}{"volume": v}, nil -} - -// Create will create a new Volume based on the values in CreateOpts. To extract -// the Volume object from the response, call the Extract method on the -// CreateResult. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { - var res CreateResult - - reqBody, err := opts.ToVolumeCreateMap() - if err != nil { - res.Err = err - return res - } - - _, res.Err = client.Post(createURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201}, - }) - return res -} - -// Delete will delete the existing Volume with the provided ID. -func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { - var res DeleteResult - _, res.Err = client.Delete(deleteURL(client, id), nil) - return res -} - -// Get retrieves the Volume with the provided ID. To extract the Volume object -// from the response, call the Extract method on the GetResult. -func Get(client *gophercloud.ServiceClient, id string) GetResult { - var res GetResult - _, res.Err = client.Get(getURL(client, id), &res.Body, nil) - return res -} - -// ListOptsBuilder allows extensions to add additional parameters to the List -// request. -type ListOptsBuilder interface { - ToVolumeListQuery() (string, error) -} - -// ListOpts holds options for listing Volumes. It is passed to the volumes.List -// function. -type ListOpts struct { - // admin-only option. Set it to true to see all tenant volumes. - AllTenants bool `q:"all_tenants"` - // List only volumes that contain Metadata. - Metadata map[string]string `q:"metadata"` - // List only volumes that have Name as the display name. - Name string `q:"name"` - // List only volumes that have a status of Status. - Status string `q:"status"` -} - -// ToVolumeListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToVolumeListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - if err != nil { - return "", err - } - return q.String(), nil -} - -// List returns Volumes optionally limited by the conditions provided in ListOpts. -func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listURL(client) - if opts != nil { - query, err := opts.ToVolumeListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - createPage := func(r pagination.PageResult) pagination.Page { - return ListResult{pagination.SinglePageBase(r)} - } - - return pagination.NewPager(client, url, createPage) -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToVolumeUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts contain options for updating an existing Volume. This object is passed -// to the volumes.Update function. For more information about the parameters, see -// the Volume object. -type UpdateOpts struct { - // OPTIONAL - Name string - // OPTIONAL - Description string - // OPTIONAL - Metadata map[string]string -} - -// ToVolumeUpdateMap assembles a request body based on the contents of an -// UpdateOpts. -func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) { - v := make(map[string]interface{}) - - if opts.Description != "" { - v["display_description"] = opts.Description - } - if opts.Metadata != nil { - v["metadata"] = opts.Metadata - } - if opts.Name != "" { - v["display_name"] = opts.Name - } - - return map[string]interface{}{"volume": v}, nil -} - -// Update will update the Volume with provided information. To extract the updated -// Volume from the response, call the Extract method on the UpdateResult. -func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult { - var res UpdateResult - - reqBody, err := opts.ToVolumeUpdateMap() - if err != nil { - res.Err = err - return res - } - - _, res.Err = client.Put(updateURL(client, id), reqBody, &res.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return res -} - -// IDFromName is a convienience function that returns a server's ID given its name. -func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { - volumeCount := 0 - volumeID := "" - if name == "" { - return "", fmt.Errorf("A volume name must be provided.") - } - pager := List(client, nil) - pager.EachPage(func(page pagination.Page) (bool, error) { - volumeList, err := ExtractVolumes(page) - if err != nil { - return false, err - } - - for _, s := range volumeList { - if s.Name == name { - volumeCount++ - volumeID = s.ID - } - } - return true, nil - }) - - switch volumeCount { - case 0: - return "", fmt.Errorf("Unable to find volume: %s", name) - case 1: - return volumeID, nil - default: - return "", fmt.Errorf("Found %d volumes matching %s", volumeCount, name) - } -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/results.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/results.go deleted file mode 100644 index 2fd4ef14eae0..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/results.go +++ /dev/null @@ -1,113 +0,0 @@ -package volumes - -import ( - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/pagination" - - "github.com/mitchellh/mapstructure" -) - -// Volume contains all the information associated with an OpenStack Volume. -type Volume struct { - // Current status of the volume. - Status string `mapstructure:"status"` - - // Human-readable display name for the volume. - Name string `mapstructure:"display_name"` - - // Instances onto which the volume is attached. - Attachments []map[string]interface{} `mapstructure:"attachments"` - - // This parameter is no longer used. - AvailabilityZone string `mapstructure:"availability_zone"` - - // Indicates whether this is a bootable volume. - Bootable string `mapstructure:"bootable"` - - // The date when this volume was created. - CreatedAt string `mapstructure:"created_at"` - - // Human-readable description for the volume. - Description string `mapstructure:"display_description"` - - // The type of volume to create, either SATA or SSD. - VolumeType string `mapstructure:"volume_type"` - - // The ID of the snapshot from which the volume was created - SnapshotID string `mapstructure:"snapshot_id"` - - // The ID of another block storage volume from which the current volume was created - SourceVolID string `mapstructure:"source_volid"` - - // Arbitrary key-value pairs defined by the user. - Metadata map[string]string `mapstructure:"metadata"` - - // Unique identifier for the volume. - ID string `mapstructure:"id"` - - // Size of the volume in GB. - Size int `mapstructure:"size"` -} - -// CreateResult contains the response body and error from a Create request. -type CreateResult struct { - commonResult -} - -// GetResult contains the response body and error from a Get request. -type GetResult struct { - commonResult -} - -// DeleteResult contains the response body and error from a Delete request. -type DeleteResult struct { - gophercloud.ErrResult -} - -// ListResult is a pagination.pager that is returned from a call to the List function. -type ListResult struct { - pagination.SinglePageBase -} - -// IsEmpty returns true if a ListResult contains no Volumes. -func (r ListResult) IsEmpty() (bool, error) { - volumes, err := ExtractVolumes(r) - if err != nil { - return true, err - } - return len(volumes) == 0, nil -} - -// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call. -func ExtractVolumes(page pagination.Page) ([]Volume, error) { - var response struct { - Volumes []Volume `json:"volumes"` - } - - err := mapstructure.Decode(page.(ListResult).Body, &response) - return response.Volumes, err -} - -// UpdateResult contains the response body and error from an Update request. -type UpdateResult struct { - commonResult -} - -type commonResult struct { - gophercloud.Result -} - -// Extract will get the Volume object out of the commonResult object. -func (r commonResult) Extract() (*Volume, error) { - if r.Err != nil { - return nil, r.Err - } - - var res struct { - Volume *Volume `json:"volume"` - } - - err := mapstructure.Decode(r.Body, &res) - - return res.Volume, err -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls.go deleted file mode 100644 index 29629a1af8d6..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls.go +++ /dev/null @@ -1,23 +0,0 @@ -package volumes - -import "github.com/rackspace/gophercloud" - -func createURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("volumes") -} - -func listURL(c *gophercloud.ServiceClient) string { - return createURL(c) -} - -func deleteURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("volumes", id) -} - -func getURL(c *gophercloud.ServiceClient, id string) string { - return deleteURL(c, id) -} - -func updateURL(c *gophercloud.ServiceClient, id string) string { - return deleteURL(c, id) -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/util.go b/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/util.go deleted file mode 100644 index 1dda695ea091..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/util.go +++ /dev/null @@ -1,22 +0,0 @@ -package volumes - -import ( - "github.com/rackspace/gophercloud" -) - -// WaitForStatus will continually poll the resource, checking for a particular -// status. It will do this for the amount of seconds defined. -func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { - return gophercloud.WaitFor(secs, func() (bool, error) { - current, err := Get(c, id).Extract() - if err != nil { - return false, err - } - - if current.Status == status { - return true, nil - } - - return false, nil - }) -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/client.go b/vendor/github.com/rackspace/gophercloud/openstack/client.go deleted file mode 100644 index a323e9b2cafd..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/client.go +++ /dev/null @@ -1,368 +0,0 @@ -package openstack - -import ( - "fmt" - "net/url" - "strconv" - "strings" - - "github.com/rackspace/gophercloud" - tokens2 "github.com/rackspace/gophercloud/openstack/identity/v2/tokens" - tokens3 "github.com/rackspace/gophercloud/openstack/identity/v3/tokens" - "github.com/rackspace/gophercloud/openstack/utils" -) - -const ( - v20 = "v2.0" - v30 = "v3.0" -) - -// NewClient prepares an unauthenticated ProviderClient instance. -// Most users will probably prefer using the AuthenticatedClient function instead. -// This is useful if you wish to explicitly control the version of the identity service that's used for authentication explicitly, -// for example. -func NewClient(endpoint string) (*gophercloud.ProviderClient, error) { - u, err := url.Parse(endpoint) - if err != nil { - return nil, err - } - - u.RawQuery, u.Fragment = "", "" - - // Base is url with path - endpoint = gophercloud.NormalizeURL(endpoint) - base := gophercloud.NormalizeURL(u.String()) - - path := u.Path - if !strings.HasSuffix(path, "/") { - path = path + "/" - } - - parts := strings.Split(path[0:len(path)-1], "/") - for index, version := range parts { - if 2 <= len(version) && len(version) <= 4 && strings.HasPrefix(version, "v") { - _, err := strconv.ParseFloat(version[1:], 64) - if err == nil { - // post version suffixes in path are not supported - // version must be on the last index - if index < len(parts)-1 { - return nil, fmt.Errorf("Path suffixes (after version) are not supported.") - } - switch version { - case "v2.0", "v3": - // valid version found, strip from base - return &gophercloud.ProviderClient{ - IdentityBase: base[0 : len(base)-len(version)-1], - IdentityEndpoint: endpoint, - }, nil - default: - return nil, fmt.Errorf("Invalid identity endpoint version %v. Supported versions: v2.0, v3", version) - } - } - } - } - - return &gophercloud.ProviderClient{ - IdentityBase: base, - IdentityEndpoint: "", - }, nil -} - -// AuthenticatedClient logs in to an OpenStack cloud found at the identity endpoint specified by options, acquires a token, and -// returns a Client instance that's ready to operate. -// It first queries the root identity endpoint to determine which versions of the identity service are supported, then chooses -// the most recent identity service available to proceed. -func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) { - client, err := NewClient(options.IdentityEndpoint) - if err != nil { - return nil, err - } - - err = Authenticate(client, options) - if err != nil { - return nil, err - } - return client, nil -} - -// Authenticate or re-authenticate against the most recent identity service supported at the provided endpoint. -func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { - versions := []*utils.Version{ - {ID: v20, Priority: 20, Suffix: "/v2.0/"}, - {ID: v30, Priority: 30, Suffix: "/v3/"}, - } - - chosen, endpoint, err := utils.ChooseVersion(client, versions) - if err != nil { - return err - } - - switch chosen.ID { - case v20: - return v2auth(client, endpoint, options) - case v30: - return v3auth(client, endpoint, options) - default: - // The switch statement must be out of date from the versions list. - return fmt.Errorf("Unrecognized identity version: %s", chosen.ID) - } -} - -// AuthenticateV2 explicitly authenticates against the identity v2 endpoint. -func AuthenticateV2(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { - return v2auth(client, "", options) -} - -func v2auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions) error { - v2Client := NewIdentityV2(client) - if endpoint != "" { - v2Client.Endpoint = endpoint - } - - result := tokens2.Create(v2Client, tokens2.AuthOptions{AuthOptions: options}) - - token, err := result.ExtractToken() - if err != nil { - return err - } - - catalog, err := result.ExtractServiceCatalog() - if err != nil { - return err - } - - if options.AllowReauth { - client.ReauthFunc = func() error { - client.TokenID = "" - return v2auth(client, endpoint, options) - } - } - client.TokenID = token.ID - client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { - return V2EndpointURL(catalog, opts) - } - - return nil -} - -// AuthenticateV3 explicitly authenticates against the identity v3 service. -func AuthenticateV3(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { - return v3auth(client, "", options) -} - -func v3auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions) error { - // Override the generated service endpoint with the one returned by the version endpoint. - v3Client := NewIdentityV3(client) - if endpoint != "" { - v3Client.Endpoint = endpoint - } - - // copy the auth options to a local variable that we can change. `options` - // needs to stay as-is for reauth purposes - v3Options := options - - var scope *tokens3.Scope - if options.TenantID != "" { - scope = &tokens3.Scope{ - ProjectID: options.TenantID, - } - v3Options.TenantID = "" - v3Options.TenantName = "" - } else { - if options.TenantName != "" { - scope = &tokens3.Scope{ - ProjectName: options.TenantName, - DomainID: options.DomainID, - DomainName: options.DomainName, - } - v3Options.TenantName = "" - } - } - - result := tokens3.Create(v3Client, tokens3.AuthOptions{AuthOptions: v3Options}, scope) - - token, err := result.ExtractToken() - if err != nil { - return err - } - - catalog, err := result.ExtractServiceCatalog() - if err != nil { - return err - } - - client.TokenID = token.ID - - if options.AllowReauth { - client.ReauthFunc = func() error { - client.TokenID = "" - return v3auth(client, endpoint, options) - } - } - client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { - return V3EndpointURL(catalog, opts) - } - - return nil -} - -// NewIdentityV2 creates a ServiceClient that may be used to interact with the v2 identity service. -func NewIdentityV2(client *gophercloud.ProviderClient) *gophercloud.ServiceClient { - v2Endpoint := client.IdentityBase + "v2.0/" - - return &gophercloud.ServiceClient{ - ProviderClient: client, - Endpoint: v2Endpoint, - } -} - -// NewIdentityV3 creates a ServiceClient that may be used to access the v3 identity service. -func NewIdentityV3(client *gophercloud.ProviderClient) *gophercloud.ServiceClient { - v3Endpoint := client.IdentityBase + "v3/" - - return &gophercloud.ServiceClient{ - ProviderClient: client, - Endpoint: v3Endpoint, - } -} - -func NewIdentityAdminV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("identity") - eo.Availability = gophercloud.AvailabilityAdmin - - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - - // Force using v2 API - if strings.Contains(url, "/v3") { - url = strings.Replace(url, "/v3", "/v2.0", -1) - } - - return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil -} - -func NewIdentityAdminV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("identity") - eo.Availability = gophercloud.AvailabilityAdmin - - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - - // Force using v3 API - if strings.Contains(url, "/v2.0") { - url = strings.Replace(url, "/v2.0", "/v3", -1) - } - - return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil -} - -// NewObjectStorageV1 creates a ServiceClient that may be used with the v1 object storage package. -func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("object-store") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil -} - -// NewComputeV2 creates a ServiceClient that may be used with the v2 compute package. -func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("compute") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil -} - -// NewNetworkV2 creates a ServiceClient that may be used with the v2 network package. -func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("network") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - return &gophercloud.ServiceClient{ - ProviderClient: client, - Endpoint: url, - ResourceBase: url + "v2.0/", - }, nil -} - -// NewBlockStorageV1 creates a ServiceClient that may be used to access the v1 block storage service. -func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("volume") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil -} - -// NewBlockStorageV2 creates a ServiceClient that may be used to access the v2 block storage service. -func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("volumev2") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil -} - -// NewCDNV1 creates a ServiceClient that may be used to access the OpenStack v1 -// CDN service. -func NewCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("cdn") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil -} - -// NewOrchestrationV1 creates a ServiceClient that may be used to access the v1 orchestration service. -func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("orchestration") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil -} - -// NewDBV1 creates a ServiceClient that may be used to access the v1 DB service. -func NewDBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("database") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil -} - -// NewImageServiceV2 creates a ServiceClient that may be used to access the v2 image service. -func NewImageServiceV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("image") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - return &gophercloud.ServiceClient{ProviderClient: client, - Endpoint: url, - ResourceBase: url + "v2/"}, nil -} - -// NewTelemetryV2 creates a ServiceClient that may be used to access the v2 telemetry service. -func NewTelemetryV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("metering") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go deleted file mode 100644 index a15c98d39bc7..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go +++ /dev/null @@ -1,120 +0,0 @@ -package bootfromvolume - -import ( - "errors" - "strconv" - - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/openstack/compute/v2/servers" -) - -// SourceType represents the type of medium being used to create the volume. -type SourceType string - -const ( - Volume SourceType = "volume" - Snapshot SourceType = "snapshot" - Image SourceType = "image" - Blank SourceType = "blank" -) - -// BlockDevice is a structure with options for booting a server instance -// from a volume. The volume may be created from an image, snapshot, or another -// volume. -type BlockDevice struct { - // BootIndex [optional] is the boot index. It defaults to 0. - BootIndex int `json:"boot_index"` - - // DeleteOnTermination [optional] specifies whether or not to delete the attached volume - // when the server is deleted. Defaults to `false`. - DeleteOnTermination bool `json:"delete_on_termination"` - - // DestinationType [optional] is the type that gets created. Possible values are "volume" - // and "local". - DestinationType string `json:"destination_type"` - - // GuestFormat [optional] specifies the format of the block device. - GuestFormat string `json:"guest_format"` - - // SourceType [required] must be one of: "volume", "snapshot", "image". - SourceType SourceType `json:"source_type"` - - // UUID [required] is the unique identifier for the volume, snapshot, or image (see above) - UUID string `json:"uuid"` - - // VolumeSize [optional] is the size of the volume to create (in gigabytes). - VolumeSize int `json:"volume_size"` -} - -// CreateOptsExt is a structure that extends the server `CreateOpts` structure -// by allowing for a block device mapping. -type CreateOptsExt struct { - servers.CreateOptsBuilder - BlockDevice []BlockDevice `json:"block_device_mapping_v2,omitempty"` -} - -// ToServerCreateMap adds the block device mapping option to the base server -// creation options. -func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { - base, err := opts.CreateOptsBuilder.ToServerCreateMap() - if err != nil { - return nil, err - } - - if len(opts.BlockDevice) == 0 { - return nil, errors.New("Required fields UUID and SourceType not set.") - } - - serverMap := base["server"].(map[string]interface{}) - - blockDevice := make([]map[string]interface{}, len(opts.BlockDevice)) - - for i, bd := range opts.BlockDevice { - if string(bd.SourceType) == "" { - return nil, errors.New("SourceType must be one of: volume, image, snapshot.") - } - - blockDevice[i] = make(map[string]interface{}) - - blockDevice[i]["source_type"] = bd.SourceType - blockDevice[i]["boot_index"] = strconv.Itoa(bd.BootIndex) - blockDevice[i]["delete_on_termination"] = strconv.FormatBool(bd.DeleteOnTermination) - if bd.VolumeSize > 0 { - blockDevice[i]["volume_size"] = strconv.Itoa(bd.VolumeSize) - } - if bd.UUID != "" { - blockDevice[i]["uuid"] = bd.UUID - } - if bd.DestinationType != "" { - blockDevice[i]["destination_type"] = bd.DestinationType - } - if bd.GuestFormat != "" { - blockDevice[i]["guest_format"] = bd.GuestFormat - } - - } - serverMap["block_device_mapping_v2"] = blockDevice - - return base, nil -} - -// Create requests the creation of a server from the given block device mapping. -func Create(client *gophercloud.ServiceClient, opts servers.CreateOptsBuilder) servers.CreateResult { - var res servers.CreateResult - - reqBody, err := opts.ToServerCreateMap() - if err != nil { - res.Err = err - return res - } - - // Delete imageName and flavorName that come from ToServerCreateMap(). - // As of Liberty, Boot From Volume is failing if they are passed. - delete(reqBody["server"].(map[string]interface{}), "imageName") - delete(reqBody["server"].(map[string]interface{}), "flavorName") - - _, res.Err = client.Post(createURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 202}, - }) - return res -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go deleted file mode 100644 index f60329f0f385..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go +++ /dev/null @@ -1,10 +0,0 @@ -package bootfromvolume - -import ( - os "github.com/rackspace/gophercloud/openstack/compute/v2/servers" -) - -// CreateResult temporarily contains the response from a Create call. -type CreateResult struct { - os.CreateResult -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go deleted file mode 100644 index 0cffe25ffdcb..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go +++ /dev/null @@ -1,7 +0,0 @@ -package bootfromvolume - -import "github.com/rackspace/gophercloud" - -func createURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("os-volumes_boot") -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/doc.go deleted file mode 100644 index 80785faca9fb..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package diskconfig provides information and interaction with the Disk -// Config extension that works with the OpenStack Compute service. -package diskconfig diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/requests.go deleted file mode 100644 index 7407e0d175ee..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/requests.go +++ /dev/null @@ -1,114 +0,0 @@ -package diskconfig - -import ( - "errors" - - "github.com/rackspace/gophercloud/openstack/compute/v2/servers" -) - -// DiskConfig represents one of the two possible settings for the DiskConfig option when creating, -// rebuilding, or resizing servers: Auto or Manual. -type DiskConfig string - -const ( - // Auto builds a server with a single partition the size of the target flavor disk and - // automatically adjusts the filesystem to fit the entire partition. Auto may only be used with - // images and servers that use a single EXT3 partition. - Auto DiskConfig = "AUTO" - - // Manual builds a server using whatever partition scheme and filesystem are present in the source - // image. If the target flavor disk is larger, the remaining space is left unpartitioned. This - // enables images to have non-EXT3 filesystems, multiple partitions, and so on, and enables you - // to manage the disk configuration. It also results in slightly shorter boot times. - Manual DiskConfig = "MANUAL" -) - -// ErrInvalidDiskConfig is returned if an invalid string is specified for a DiskConfig option. -var ErrInvalidDiskConfig = errors.New("DiskConfig must be either diskconfig.Auto or diskconfig.Manual.") - -// Validate ensures that a DiskConfig contains an appropriate value. -func (config DiskConfig) validate() error { - switch config { - case Auto, Manual: - return nil - default: - return ErrInvalidDiskConfig - } -} - -// CreateOptsExt adds a DiskConfig option to the base CreateOpts. -type CreateOptsExt struct { - servers.CreateOptsBuilder - - // DiskConfig [optional] controls how the created server's disk is partitioned. - DiskConfig DiskConfig `json:"OS-DCF:diskConfig,omitempty"` -} - -// ToServerCreateMap adds the diskconfig option to the base server creation options. -func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { - base, err := opts.CreateOptsBuilder.ToServerCreateMap() - if err != nil { - return nil, err - } - - if string(opts.DiskConfig) == "" { - return base, nil - } - - serverMap := base["server"].(map[string]interface{}) - serverMap["OS-DCF:diskConfig"] = string(opts.DiskConfig) - - return base, nil -} - -// RebuildOptsExt adds a DiskConfig option to the base RebuildOpts. -type RebuildOptsExt struct { - servers.RebuildOptsBuilder - - // DiskConfig [optional] controls how the rebuilt server's disk is partitioned. - DiskConfig DiskConfig -} - -// ToServerRebuildMap adds the diskconfig option to the base server rebuild options. -func (opts RebuildOptsExt) ToServerRebuildMap() (map[string]interface{}, error) { - err := opts.DiskConfig.validate() - if err != nil { - return nil, err - } - - base, err := opts.RebuildOptsBuilder.ToServerRebuildMap() - if err != nil { - return nil, err - } - - serverMap := base["rebuild"].(map[string]interface{}) - serverMap["OS-DCF:diskConfig"] = string(opts.DiskConfig) - - return base, nil -} - -// ResizeOptsExt adds a DiskConfig option to the base server resize options. -type ResizeOptsExt struct { - servers.ResizeOptsBuilder - - // DiskConfig [optional] controls how the resized server's disk is partitioned. - DiskConfig DiskConfig -} - -// ToServerResizeMap adds the diskconfig option to the base server creation options. -func (opts ResizeOptsExt) ToServerResizeMap() (map[string]interface{}, error) { - err := opts.DiskConfig.validate() - if err != nil { - return nil, err - } - - base, err := opts.ResizeOptsBuilder.ToServerResizeMap() - if err != nil { - return nil, err - } - - serverMap := base["resize"].(map[string]interface{}) - serverMap["OS-DCF:diskConfig"] = string(opts.DiskConfig) - - return base, nil -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/results.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/results.go deleted file mode 100644 index 10ec2dafcb8d..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig/results.go +++ /dev/null @@ -1,60 +0,0 @@ -package diskconfig - -import ( - "github.com/mitchellh/mapstructure" - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/openstack/compute/v2/servers" - "github.com/rackspace/gophercloud/pagination" -) - -func commonExtract(result gophercloud.Result) (*DiskConfig, error) { - var resp struct { - Server struct { - DiskConfig string `mapstructure:"OS-DCF:diskConfig"` - } `mapstructure:"server"` - } - - err := mapstructure.Decode(result.Body, &resp) - if err != nil { - return nil, err - } - - config := DiskConfig(resp.Server.DiskConfig) - return &config, nil -} - -// ExtractGet returns the disk configuration from a servers.Get call. -func ExtractGet(result servers.GetResult) (*DiskConfig, error) { - return commonExtract(result.Result) -} - -// ExtractUpdate returns the disk configuration from a servers.Update call. -func ExtractUpdate(result servers.UpdateResult) (*DiskConfig, error) { - return commonExtract(result.Result) -} - -// ExtractRebuild returns the disk configuration from a servers.Rebuild call. -func ExtractRebuild(result servers.RebuildResult) (*DiskConfig, error) { - return commonExtract(result.Result) -} - -// ExtractDiskConfig returns the DiskConfig setting for a specific server acquired from an -// servers.ExtractServers call, while iterating through a Pager. -func ExtractDiskConfig(page pagination.Page, index int) (*DiskConfig, error) { - casted := page.(servers.ServerPage).Body - - type server struct { - DiskConfig string `mapstructure:"OS-DCF:diskConfig"` - } - var response struct { - Servers []server `mapstructure:"servers"` - } - - err := mapstructure.Decode(casted, &response) - if err != nil { - return nil, err - } - - config := DiskConfig(response.Servers[index].DiskConfig) - return &config, nil -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go deleted file mode 100644 index 22f68d80e529..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package volumeattach provides the ability to attach and detach volumes -// to instances -package volumeattach diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go deleted file mode 100644 index b4ebedea86ff..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go +++ /dev/null @@ -1,75 +0,0 @@ -package volumeattach - -import ( - "errors" - - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/pagination" -) - -// List returns a Pager that allows you to iterate over a collection of VolumeAttachments. -func List(client *gophercloud.ServiceClient, serverId string) pagination.Pager { - return pagination.NewPager(client, listURL(client, serverId), func(r pagination.PageResult) pagination.Page { - return VolumeAttachmentsPage{pagination.SinglePageBase(r)} - }) -} - -// CreateOptsBuilder describes struct types that can be accepted by the Create call. Notable, the -// CreateOpts struct in this package does. -type CreateOptsBuilder interface { - ToVolumeAttachmentCreateMap() (map[string]interface{}, error) -} - -// CreateOpts specifies volume attachment creation or import parameters. -type CreateOpts struct { - // Device is the device that the volume will attach to the instance as. Omit for "auto" - Device string - - // VolumeID is the ID of the volume to attach to the instance - VolumeID string -} - -// ToVolumeAttachmentCreateMap constructs a request body from CreateOpts. -func (opts CreateOpts) ToVolumeAttachmentCreateMap() (map[string]interface{}, error) { - if opts.VolumeID == "" { - return nil, errors.New("Missing field required for volume attachment creation: VolumeID") - } - - volumeAttachment := make(map[string]interface{}) - volumeAttachment["volumeId"] = opts.VolumeID - if opts.Device != "" { - volumeAttachment["device"] = opts.Device - } - - return map[string]interface{}{"volumeAttachment": volumeAttachment}, nil -} - -// Create requests the creation of a new volume attachment on the server -func Create(client *gophercloud.ServiceClient, serverId string, opts CreateOptsBuilder) CreateResult { - var res CreateResult - - reqBody, err := opts.ToVolumeAttachmentCreateMap() - if err != nil { - res.Err = err - return res - } - - _, res.Err = client.Post(createURL(client, serverId), reqBody, &res.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return res -} - -// Get returns public data about a previously created VolumeAttachment. -func Get(client *gophercloud.ServiceClient, serverId, aId string) GetResult { - var res GetResult - _, res.Err = client.Get(getURL(client, serverId, aId), &res.Body, nil) - return res -} - -// Delete requests the deletion of a previous stored VolumeAttachment from the server. -func Delete(client *gophercloud.ServiceClient, serverId, aId string) DeleteResult { - var res DeleteResult - _, res.Err = client.Delete(deleteURL(client, serverId, aId), nil) - return res -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go deleted file mode 100644 index 26be39e4f706..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go +++ /dev/null @@ -1,84 +0,0 @@ -package volumeattach - -import ( - "github.com/mitchellh/mapstructure" - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/pagination" -) - -// VolumeAttach controls the attachment of a volume to an instance. -type VolumeAttachment struct { - // ID is a unique id of the attachment - ID string `mapstructure:"id"` - - // Device is what device the volume is attached as - Device string `mapstructure:"device"` - - // VolumeID is the ID of the attached volume - VolumeID string `mapstructure:"volumeId"` - - // ServerID is the ID of the instance that has the volume attached - ServerID string `mapstructure:"serverId"` -} - -// VolumeAttachmentsPage stores a single, only page of VolumeAttachments -// results from a List call. -type VolumeAttachmentsPage struct { - pagination.SinglePageBase -} - -// IsEmpty determines whether or not a VolumeAttachmentsPage is empty. -func (page VolumeAttachmentsPage) IsEmpty() (bool, error) { - va, err := ExtractVolumeAttachments(page) - return len(va) == 0, err -} - -// ExtractVolumeAttachments interprets a page of results as a slice of -// VolumeAttachments. -func ExtractVolumeAttachments(page pagination.Page) ([]VolumeAttachment, error) { - casted := page.(VolumeAttachmentsPage).Body - var response struct { - VolumeAttachments []VolumeAttachment `mapstructure:"volumeAttachments"` - } - - err := mapstructure.WeakDecode(casted, &response) - - return response.VolumeAttachments, err -} - -type VolumeAttachmentResult struct { - gophercloud.Result -} - -// Extract is a method that attempts to interpret any VolumeAttachment resource -// response as a VolumeAttachment struct. -func (r VolumeAttachmentResult) Extract() (*VolumeAttachment, error) { - if r.Err != nil { - return nil, r.Err - } - - var res struct { - VolumeAttachment *VolumeAttachment `json:"volumeAttachment" mapstructure:"volumeAttachment"` - } - - err := mapstructure.Decode(r.Body, &res) - return res.VolumeAttachment, err -} - -// CreateResult is the response from a Create operation. Call its Extract method to interpret it -// as a VolumeAttachment. -type CreateResult struct { - VolumeAttachmentResult -} - -// GetResult is the response from a Get operation. Call its Extract method to interpret it -// as a VolumeAttachment. -type GetResult struct { - VolumeAttachmentResult -} - -// DeleteResult is the response from a Delete operation. Call its Extract method to determine if -// the call succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go deleted file mode 100644 index 9d9d1786db3e..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go +++ /dev/null @@ -1,25 +0,0 @@ -package volumeattach - -import "github.com/rackspace/gophercloud" - -const resourcePath = "os-volume_attachments" - -func resourceURL(c *gophercloud.ServiceClient, serverId string) string { - return c.ServiceURL("servers", serverId, resourcePath) -} - -func listURL(c *gophercloud.ServiceClient, serverId string) string { - return resourceURL(c, serverId) -} - -func createURL(c *gophercloud.ServiceClient, serverId string) string { - return resourceURL(c, serverId) -} - -func getURL(c *gophercloud.ServiceClient, serverId, aId string) string { - return c.ServiceURL("servers", serverId, resourcePath, aId) -} - -func deleteURL(c *gophercloud.ServiceClient, serverId, aId string) string { - return getURL(c, serverId, aId) -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/doc.go deleted file mode 100644 index 5822e1bcf6d2..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package flavors provides information and interaction with the flavor API -// resource in the OpenStack Compute service. -// -// A flavor is an available hardware configuration for a server. Each flavor -// has a unique combination of disk space, memory capacity and priority for CPU -// time. -package flavors diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests.go deleted file mode 100644 index 59123aaf76f3..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests.go +++ /dev/null @@ -1,103 +0,0 @@ -package flavors - -import ( - "fmt" - - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the -// List request. -type ListOptsBuilder interface { - ToFlavorListQuery() (string, error) -} - -// ListOpts helps control the results returned by the List() function. -// For example, a flavor with a minDisk field of 10 will not be returned if you specify MinDisk set to 20. -// Typically, software will use the last ID of the previous call to List to set the Marker for the current call. -type ListOpts struct { - - // ChangesSince, if provided, instructs List to return only those things which have changed since the timestamp provided. - ChangesSince string `q:"changes-since"` - - // MinDisk and MinRAM, if provided, elides flavors which do not meet your criteria. - MinDisk int `q:"minDisk"` - MinRAM int `q:"minRam"` - - // Marker and Limit control paging. - // Marker instructs List where to start listing from. - Marker string `q:"marker"` - - // Limit instructs List to refrain from sending excessively large lists of flavors. - Limit int `q:"limit"` -} - -// ToFlavorListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToFlavorListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - if err != nil { - return "", err - } - return q.String(), nil -} - -// ListDetail instructs OpenStack to provide a list of flavors. -// You may provide criteria by which List curtails its results for easier processing. -// See ListOpts for more details. -func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listURL(client) - if opts != nil { - query, err := opts.ToFlavorListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - createPage := func(r pagination.PageResult) pagination.Page { - return FlavorPage{pagination.LinkedPageBase{PageResult: r}} - } - - return pagination.NewPager(client, url, createPage) -} - -// Get instructs OpenStack to provide details on a single flavor, identified by its ID. -// Use ExtractFlavor to convert its result into a Flavor. -func Get(client *gophercloud.ServiceClient, id string) GetResult { - var res GetResult - _, res.Err = client.Get(getURL(client, id), &res.Body, nil) - return res -} - -// IDFromName is a convienience function that returns a flavor's ID given its name. -func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { - flavorCount := 0 - flavorID := "" - if name == "" { - return "", fmt.Errorf("A flavor name must be provided.") - } - pager := ListDetail(client, nil) - pager.EachPage(func(page pagination.Page) (bool, error) { - flavorList, err := ExtractFlavors(page) - if err != nil { - return false, err - } - - for _, f := range flavorList { - if f.Name == name { - flavorCount++ - flavorID = f.ID - } - } - return true, nil - }) - - switch flavorCount { - case 0: - return "", fmt.Errorf("Unable to find flavor: %s", name) - case 1: - return flavorID, nil - default: - return "", fmt.Errorf("Found %d flavors matching %s", flavorCount, name) - } -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/results.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/results.go deleted file mode 100644 index 8dddd705c93d..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/results.go +++ /dev/null @@ -1,122 +0,0 @@ -package flavors - -import ( - "errors" - "reflect" - - "github.com/mitchellh/mapstructure" - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/pagination" -) - -// ErrCannotInterpret is returned by an Extract call if the response body doesn't have the expected structure. -var ErrCannotInterpet = errors.New("Unable to interpret a response body.") - -// GetResult temporarily holds the response from a Get call. -type GetResult struct { - gophercloud.Result -} - -// Extract provides access to the individual Flavor returned by the Get function. -func (gr GetResult) Extract() (*Flavor, error) { - if gr.Err != nil { - return nil, gr.Err - } - - var result struct { - Flavor Flavor `mapstructure:"flavor"` - } - - cfg := &mapstructure.DecoderConfig{ - DecodeHook: defaulter, - Result: &result, - } - decoder, err := mapstructure.NewDecoder(cfg) - if err != nil { - return nil, err - } - err = decoder.Decode(gr.Body) - return &result.Flavor, err -} - -// Flavor records represent (virtual) hardware configurations for server resources in a region. -type Flavor struct { - // The Id field contains the flavor's unique identifier. - // For example, this identifier will be useful when specifying which hardware configuration to use for a new server instance. - ID string `mapstructure:"id"` - - // The Disk and RA< fields provide a measure of storage space offered by the flavor, in GB and MB, respectively. - Disk int `mapstructure:"disk"` - RAM int `mapstructure:"ram"` - - // The Name field provides a human-readable moniker for the flavor. - Name string `mapstructure:"name"` - - RxTxFactor float64 `mapstructure:"rxtx_factor"` - - // Swap indicates how much space is reserved for swap. - // If not provided, this field will be set to 0. - Swap int `mapstructure:"swap"` - - // VCPUs indicates how many (virtual) CPUs are available for this flavor. - VCPUs int `mapstructure:"vcpus"` -} - -// FlavorPage contains a single page of the response from a List call. -type FlavorPage struct { - pagination.LinkedPageBase -} - -// IsEmpty determines if a page contains any results. -func (p FlavorPage) IsEmpty() (bool, error) { - flavors, err := ExtractFlavors(p) - if err != nil { - return true, err - } - return len(flavors) == 0, nil -} - -// NextPageURL uses the response's embedded link reference to navigate to the next page of results. -func (p FlavorPage) NextPageURL() (string, error) { - type resp struct { - Links []gophercloud.Link `mapstructure:"flavors_links"` - } - - var r resp - err := mapstructure.Decode(p.Body, &r) - if err != nil { - return "", err - } - - return gophercloud.ExtractNextURL(r.Links) -} - -func defaulter(from, to reflect.Kind, v interface{}) (interface{}, error) { - if (from == reflect.String) && (to == reflect.Int) { - return 0, nil - } - return v, nil -} - -// ExtractFlavors provides access to the list of flavors in a page acquired from the List operation. -func ExtractFlavors(page pagination.Page) ([]Flavor, error) { - casted := page.(FlavorPage).Body - var container struct { - Flavors []Flavor `mapstructure:"flavors"` - } - - cfg := &mapstructure.DecoderConfig{ - DecodeHook: defaulter, - Result: &container, - } - decoder, err := mapstructure.NewDecoder(cfg) - if err != nil { - return container.Flavors, err - } - err = decoder.Decode(casted) - if err != nil { - return container.Flavors, err - } - - return container.Flavors, nil -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/urls.go deleted file mode 100644 index 683c107dcbe9..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/urls.go +++ /dev/null @@ -1,13 +0,0 @@ -package flavors - -import ( - "github.com/rackspace/gophercloud" -) - -func getURL(client *gophercloud.ServiceClient, id string) string { - return client.ServiceURL("flavors", id) -} - -func listURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("flavors", "detail") -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/doc.go deleted file mode 100644 index 0edaa3f02556..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package images provides information and interaction with the image API -// resource in the OpenStack Compute service. -// -// An image is a collection of files used to create or rebuild a server. -// Operators provide a number of pre-built OS images by default. You may also -// create custom images from cloud servers you have launched. -package images diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests.go deleted file mode 100644 index 1e021ad4cd05..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests.go +++ /dev/null @@ -1,109 +0,0 @@ -package images - -import ( - "fmt" - - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the -// List request. -type ListOptsBuilder interface { - ToImageListQuery() (string, error) -} - -// ListOpts contain options for limiting the number of Images returned from a call to ListDetail. -type ListOpts struct { - // When the image last changed status (in date-time format). - ChangesSince string `q:"changes-since"` - // The number of Images to return. - Limit int `q:"limit"` - // UUID of the Image at which to set a marker. - Marker string `q:"marker"` - // The name of the Image. - Name string `q:"name"` - // The name of the Server (in URL format). - Server string `q:"server"` - // The current status of the Image. - Status string `q:"status"` - // The value of the type of image (e.g. BASE, SERVER, ALL) - Type string `q:"type"` -} - -// ToImageListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToImageListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - if err != nil { - return "", err - } - return q.String(), nil -} - -// ListDetail enumerates the available images. -func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listDetailURL(client) - if opts != nil { - query, err := opts.ToImageListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - - createPage := func(r pagination.PageResult) pagination.Page { - return ImagePage{pagination.LinkedPageBase{PageResult: r}} - } - - return pagination.NewPager(client, url, createPage) -} - -// Get acquires additional detail about a specific image by ID. -// Use ExtractImage() to interpret the result as an openstack Image. -func Get(client *gophercloud.ServiceClient, id string) GetResult { - var result GetResult - _, result.Err = client.Get(getURL(client, id), &result.Body, nil) - return result -} - -// Delete deletes the specified image ID. -func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { - var result DeleteResult - _, result.Err = client.Delete(deleteURL(client, id), nil) - return result -} - -// IDFromName is a convienience function that returns an image's ID given its name. -func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { - imageCount := 0 - imageID := "" - if name == "" { - return "", fmt.Errorf("An image name must be provided.") - } - pager := ListDetail(client, &ListOpts{ - Name: name, - }) - pager.EachPage(func(page pagination.Page) (bool, error) { - imageList, err := ExtractImages(page) - if err != nil { - return false, err - } - - for _, i := range imageList { - if i.Name == name { - imageCount++ - imageID = i.ID - } - } - return true, nil - }) - - switch imageCount { - case 0: - return "", fmt.Errorf("Unable to find image: %s", name) - case 1: - return imageID, nil - default: - return "", fmt.Errorf("Found %d images matching %s", imageCount, name) - } -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/results.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/results.go deleted file mode 100644 index 482e7d6f6ea1..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/results.go +++ /dev/null @@ -1,97 +0,0 @@ -package images - -import ( - "github.com/mitchellh/mapstructure" - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/pagination" -) - -// GetResult temporarily stores a Get response. -type GetResult struct { - gophercloud.Result -} - -// DeleteResult represents the result of an image.Delete operation. -type DeleteResult struct { - gophercloud.ErrResult -} - -// Extract interprets a GetResult as an Image. -func (gr GetResult) Extract() (*Image, error) { - if gr.Err != nil { - return nil, gr.Err - } - - var decoded struct { - Image Image `mapstructure:"image"` - } - - err := mapstructure.Decode(gr.Body, &decoded) - return &decoded.Image, err -} - -// Image is used for JSON (un)marshalling. -// It provides a description of an OS image. -type Image struct { - // ID contains the image's unique identifier. - ID string - - Created string - - // MinDisk and MinRAM specify the minimum resources a server must provide to be able to install the image. - MinDisk int - MinRAM int - - // Name provides a human-readable moniker for the OS image. - Name string - - // The Progress and Status fields indicate image-creation status. - // Any usable image will have 100% progress. - Progress int - Status string - - Updated string - - Metadata map[string]string -} - -// ImagePage contains a single page of results from a List operation. -// Use ExtractImages to convert it into a slice of usable structs. -type ImagePage struct { - pagination.LinkedPageBase -} - -// IsEmpty returns true if a page contains no Image results. -func (page ImagePage) IsEmpty() (bool, error) { - images, err := ExtractImages(page) - if err != nil { - return true, err - } - return len(images) == 0, nil -} - -// NextPageURL uses the response's embedded link reference to navigate to the next page of results. -func (page ImagePage) NextPageURL() (string, error) { - type resp struct { - Links []gophercloud.Link `mapstructure:"images_links"` - } - - var r resp - err := mapstructure.Decode(page.Body, &r) - if err != nil { - return "", err - } - - return gophercloud.ExtractNextURL(r.Links) -} - -// ExtractImages converts a page of List results into a slice of usable Image structs. -func ExtractImages(page pagination.Page) ([]Image, error) { - casted := page.(ImagePage).Body - var results struct { - Images []Image `mapstructure:"images"` - } - - err := mapstructure.Decode(casted, &results) - return results.Images, err -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls.go deleted file mode 100644 index b1bf1038fb08..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls.go +++ /dev/null @@ -1,15 +0,0 @@ -package images - -import "github.com/rackspace/gophercloud" - -func listDetailURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("images", "detail") -} - -func getURL(client *gophercloud.ServiceClient, id string) string { - return client.ServiceURL("images", id) -} - -func deleteURL(client *gophercloud.ServiceClient, id string) string { - return client.ServiceURL("images", id) -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/doc.go deleted file mode 100644 index fe4567120c7d..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Package servers provides information and interaction with the server API -// resource in the OpenStack Compute service. -// -// A server is a virtual machine instance in the compute system. In order for -// one to be provisioned, a valid flavor and image are required. -package servers diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/fixtures.go deleted file mode 100644 index 85cea70a8831..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/fixtures.go +++ /dev/null @@ -1,692 +0,0 @@ -// +build fixtures - -package servers - -import ( - "fmt" - "net/http" - "testing" - - th "github.com/rackspace/gophercloud/testhelper" - "github.com/rackspace/gophercloud/testhelper/client" -) - -// ServerListBody contains the canned body of a servers.List response. -const ServerListBody = ` -{ - "servers": [ - { - "status": "ACTIVE", - "updated": "2014-09-25T13:10:10Z", - "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", - "OS-EXT-SRV-ATTR:host": "devstack", - "addresses": { - "private": [ - { - "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:7c:1b:2b", - "version": 4, - "addr": "10.0.0.32", - "OS-EXT-IPS:type": "fixed" - } - ] - }, - "links": [ - { - "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", - "rel": "self" - }, - { - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", - "rel": "bookmark" - } - ], - "key_name": null, - "image": { - "id": "f90f6034-2570-4974-8351-6b49732ef2eb", - "links": [ - { - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", - "rel": "bookmark" - } - ] - }, - "OS-EXT-STS:task_state": null, - "OS-EXT-STS:vm_state": "active", - "OS-EXT-SRV-ATTR:instance_name": "instance-0000001e", - "OS-SRV-USG:launched_at": "2014-09-25T13:10:10.000000", - "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", - "flavor": { - "id": "1", - "links": [ - { - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", - "rel": "bookmark" - } - ] - }, - "id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", - "security_groups": [ - { - "name": "default" - } - ], - "OS-SRV-USG:terminated_at": null, - "OS-EXT-AZ:availability_zone": "nova", - "user_id": "9349aff8be7545ac9d2f1d00999a23cd", - "name": "herp", - "created": "2014-09-25T13:10:02Z", - "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", - "OS-DCF:diskConfig": "MANUAL", - "os-extended-volumes:volumes_attached": [], - "accessIPv4": "", - "accessIPv6": "", - "progress": 0, - "OS-EXT-STS:power_state": 1, - "config_drive": "", - "metadata": {} - }, - { - "status": "ACTIVE", - "updated": "2014-09-25T13:04:49Z", - "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", - "OS-EXT-SRV-ATTR:host": "devstack", - "addresses": { - "private": [ - { - "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:9e:89:be", - "version": 4, - "addr": "10.0.0.31", - "OS-EXT-IPS:type": "fixed" - } - ] - }, - "links": [ - { - "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", - "rel": "self" - }, - { - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", - "rel": "bookmark" - } - ], - "key_name": null, - "image": { - "id": "f90f6034-2570-4974-8351-6b49732ef2eb", - "links": [ - { - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", - "rel": "bookmark" - } - ] - }, - "OS-EXT-STS:task_state": null, - "OS-EXT-STS:vm_state": "active", - "OS-EXT-SRV-ATTR:instance_name": "instance-0000001d", - "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", - "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", - "flavor": { - "id": "1", - "links": [ - { - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", - "rel": "bookmark" - } - ] - }, - "id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba", - "security_groups": [ - { - "name": "default" - } - ], - "OS-SRV-USG:terminated_at": null, - "OS-EXT-AZ:availability_zone": "nova", - "user_id": "9349aff8be7545ac9d2f1d00999a23cd", - "name": "derp", - "created": "2014-09-25T13:04:41Z", - "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", - "OS-DCF:diskConfig": "MANUAL", - "os-extended-volumes:volumes_attached": [], - "accessIPv4": "", - "accessIPv6": "", - "progress": 0, - "OS-EXT-STS:power_state": 1, - "config_drive": "", - "metadata": {} - } - ] -} -` - -// SingleServerBody is the canned body of a Get request on an existing server. -const SingleServerBody = ` -{ - "server": { - "status": "ACTIVE", - "updated": "2014-09-25T13:04:49Z", - "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", - "OS-EXT-SRV-ATTR:host": "devstack", - "addresses": { - "private": [ - { - "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:9e:89:be", - "version": 4, - "addr": "10.0.0.31", - "OS-EXT-IPS:type": "fixed" - } - ] - }, - "links": [ - { - "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", - "rel": "self" - }, - { - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", - "rel": "bookmark" - } - ], - "key_name": null, - "image": { - "id": "f90f6034-2570-4974-8351-6b49732ef2eb", - "links": [ - { - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", - "rel": "bookmark" - } - ] - }, - "OS-EXT-STS:task_state": null, - "OS-EXT-STS:vm_state": "active", - "OS-EXT-SRV-ATTR:instance_name": "instance-0000001d", - "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", - "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", - "flavor": { - "id": "1", - "links": [ - { - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", - "rel": "bookmark" - } - ] - }, - "id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba", - "security_groups": [ - { - "name": "default" - } - ], - "OS-SRV-USG:terminated_at": null, - "OS-EXT-AZ:availability_zone": "nova", - "user_id": "9349aff8be7545ac9d2f1d00999a23cd", - "name": "derp", - "created": "2014-09-25T13:04:41Z", - "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", - "OS-DCF:diskConfig": "MANUAL", - "os-extended-volumes:volumes_attached": [], - "accessIPv4": "", - "accessIPv6": "", - "progress": 0, - "OS-EXT-STS:power_state": 1, - "config_drive": "", - "metadata": {} - } -} -` - -const ServerPasswordBody = ` -{ - "password": "xlozO3wLCBRWAa2yDjCCVx8vwNPypxnypmRYDa/zErlQ+EzPe1S/Gz6nfmC52mOlOSCRuUOmG7kqqgejPof6M7bOezS387zjq4LSvvwp28zUknzy4YzfFGhnHAdai3TxUJ26pfQCYrq8UTzmKF2Bq8ioSEtVVzM0A96pDh8W2i7BOz6MdoiVyiev/I1K2LsuipfxSJR7Wdke4zNXJjHHP2RfYsVbZ/k9ANu+Nz4iIH8/7Cacud/pphH7EjrY6a4RZNrjQskrhKYed0YERpotyjYk1eDtRe72GrSiXteqCM4biaQ5w3ruS+AcX//PXk3uJ5kC7d67fPXaVz4WaQRYMg==" -} -` - -var ( - // ServerHerp is a Server struct that should correspond to the first result in ServerListBody. - ServerHerp = Server{ - Status: "ACTIVE", - Updated: "2014-09-25T13:10:10Z", - HostID: "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", - Addresses: map[string]interface{}{ - "private": []interface{}{ - map[string]interface{}{ - "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:7c:1b:2b", - "version": float64(4), - "addr": "10.0.0.32", - "OS-EXT-IPS:type": "fixed", - }, - }, - }, - Links: []interface{}{ - map[string]interface{}{ - "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", - "rel": "self", - }, - map[string]interface{}{ - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", - "rel": "bookmark", - }, - }, - Image: map[string]interface{}{ - "id": "f90f6034-2570-4974-8351-6b49732ef2eb", - "links": []interface{}{ - map[string]interface{}{ - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", - "rel": "bookmark", - }, - }, - }, - Flavor: map[string]interface{}{ - "id": "1", - "links": []interface{}{ - map[string]interface{}{ - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", - "rel": "bookmark", - }, - }, - }, - ID: "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", - UserID: "9349aff8be7545ac9d2f1d00999a23cd", - Name: "herp", - Created: "2014-09-25T13:10:02Z", - TenantID: "fcad67a6189847c4aecfa3c81a05783b", - Metadata: map[string]interface{}{}, - SecurityGroups: []map[string]interface{}{ - map[string]interface{}{ - "name": "default", - }, - }, - } - - // ServerDerp is a Server struct that should correspond to the second server in ServerListBody. - ServerDerp = Server{ - Status: "ACTIVE", - Updated: "2014-09-25T13:04:49Z", - HostID: "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", - Addresses: map[string]interface{}{ - "private": []interface{}{ - map[string]interface{}{ - "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:9e:89:be", - "version": float64(4), - "addr": "10.0.0.31", - "OS-EXT-IPS:type": "fixed", - }, - }, - }, - Links: []interface{}{ - map[string]interface{}{ - "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", - "rel": "self", - }, - map[string]interface{}{ - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", - "rel": "bookmark", - }, - }, - Image: map[string]interface{}{ - "id": "f90f6034-2570-4974-8351-6b49732ef2eb", - "links": []interface{}{ - map[string]interface{}{ - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", - "rel": "bookmark", - }, - }, - }, - Flavor: map[string]interface{}{ - "id": "1", - "links": []interface{}{ - map[string]interface{}{ - "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", - "rel": "bookmark", - }, - }, - }, - ID: "9e5476bd-a4ec-4653-93d6-72c93aa682ba", - UserID: "9349aff8be7545ac9d2f1d00999a23cd", - Name: "derp", - Created: "2014-09-25T13:04:41Z", - TenantID: "fcad67a6189847c4aecfa3c81a05783b", - Metadata: map[string]interface{}{}, - SecurityGroups: []map[string]interface{}{ - map[string]interface{}{ - "name": "default", - }, - }, - } -) - -// HandleServerCreationSuccessfully sets up the test server to respond to a server creation request -// with a given response. -func HandleServerCreationSuccessfully(t *testing.T, response string) { - th.Mux.HandleFunc("/servers", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "POST") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - th.TestJSONRequest(t, r, `{ - "server": { - "name": "derp", - "imageRef": "f90f6034-2570-4974-8351-6b49732ef2eb", - "flavorRef": "1" - } - }`) - - w.WriteHeader(http.StatusAccepted) - w.Header().Add("Content-Type", "application/json") - fmt.Fprintf(w, response) - }) -} - -// HandleServerListSuccessfully sets up the test server to respond to a server List request. -func HandleServerListSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/servers/detail", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "GET") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - - w.Header().Add("Content-Type", "application/json") - r.ParseForm() - marker := r.Form.Get("marker") - switch marker { - case "": - fmt.Fprintf(w, ServerListBody) - case "9e5476bd-a4ec-4653-93d6-72c93aa682ba": - fmt.Fprintf(w, `{ "servers": [] }`) - default: - t.Fatalf("/servers/detail invoked with unexpected marker=[%s]", marker) - } - }) -} - -// HandleServerDeletionSuccessfully sets up the test server to respond to a server deletion request. -func HandleServerDeletionSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/servers/asdfasdfasdf", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "DELETE") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - - w.WriteHeader(http.StatusNoContent) - }) -} - -// HandleServerForceDeletionSuccessfully sets up the test server to respond to a server force deletion -// request. -func HandleServerForceDeletionSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/servers/asdfasdfasdf/action", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "POST") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - th.TestJSONRequest(t, r, `{ "forceDelete": "" }`) - - w.WriteHeader(http.StatusAccepted) - }) -} - -// HandleServerGetSuccessfully sets up the test server to respond to a server Get request. -func HandleServerGetSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/servers/1234asdf", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "GET") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - th.TestHeader(t, r, "Accept", "application/json") - - fmt.Fprintf(w, SingleServerBody) - }) -} - -// HandleServerUpdateSuccessfully sets up the test server to respond to a server Update request. -func HandleServerUpdateSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/servers/1234asdf", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "PUT") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - th.TestHeader(t, r, "Accept", "application/json") - th.TestHeader(t, r, "Content-Type", "application/json") - th.TestJSONRequest(t, r, `{ "server": { "name": "new-name" } }`) - - fmt.Fprintf(w, SingleServerBody) - }) -} - -// HandleAdminPasswordChangeSuccessfully sets up the test server to respond to a server password -// change request. -func HandleAdminPasswordChangeSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "POST") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - th.TestJSONRequest(t, r, `{ "changePassword": { "adminPass": "new-password" } }`) - - w.WriteHeader(http.StatusAccepted) - }) -} - -// HandleRebootSuccessfully sets up the test server to respond to a reboot request with success. -func HandleRebootSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "POST") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - th.TestJSONRequest(t, r, `{ "reboot": { "type": "SOFT" } }`) - - w.WriteHeader(http.StatusAccepted) - }) -} - -// HandleRebuildSuccessfully sets up the test server to respond to a rebuild request with success. -func HandleRebuildSuccessfully(t *testing.T, response string) { - th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "POST") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - th.TestJSONRequest(t, r, ` - { - "rebuild": { - "name": "new-name", - "adminPass": "swordfish", - "imageRef": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", - "accessIPv4": "1.2.3.4" - } - } - `) - - w.WriteHeader(http.StatusAccepted) - w.Header().Add("Content-Type", "application/json") - fmt.Fprintf(w, response) - }) -} - -// HandleServerRescueSuccessfully sets up the test server to respond to a server Rescue request. -func HandleServerRescueSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "POST") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - th.TestJSONRequest(t, r, `{ "rescue": { "adminPass": "1234567890" } }`) - - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{ "adminPass": "1234567890" }`)) - }) -} - -// HandleMetadatumGetSuccessfully sets up the test server to respond to a metadatum Get request. -func HandleMetadatumGetSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/servers/1234asdf/metadata/foo", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "GET") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - th.TestHeader(t, r, "Accept", "application/json") - - w.WriteHeader(http.StatusOK) - w.Header().Add("Content-Type", "application/json") - w.Write([]byte(`{ "meta": {"foo":"bar"}}`)) - }) -} - -// HandleMetadatumCreateSuccessfully sets up the test server to respond to a metadatum Create request. -func HandleMetadatumCreateSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/servers/1234asdf/metadata/foo", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "PUT") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - th.TestJSONRequest(t, r, `{ - "meta": { - "foo": "bar" - } - }`) - - w.WriteHeader(http.StatusOK) - w.Header().Add("Content-Type", "application/json") - w.Write([]byte(`{ "meta": {"foo":"bar"}}`)) - }) -} - -// HandleMetadatumDeleteSuccessfully sets up the test server to respond to a metadatum Delete request. -func HandleMetadatumDeleteSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/servers/1234asdf/metadata/foo", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "DELETE") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - - w.WriteHeader(http.StatusNoContent) - }) -} - -// HandleMetadataGetSuccessfully sets up the test server to respond to a metadata Get request. -func HandleMetadataGetSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/servers/1234asdf/metadata", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "GET") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - th.TestHeader(t, r, "Accept", "application/json") - - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{ "metadata": {"foo":"bar", "this":"that"}}`)) - }) -} - -// HandleMetadataResetSuccessfully sets up the test server to respond to a metadata Create request. -func HandleMetadataResetSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/servers/1234asdf/metadata", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "PUT") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - th.TestJSONRequest(t, r, `{ - "metadata": { - "foo": "bar", - "this": "that" - } - }`) - - w.WriteHeader(http.StatusOK) - w.Header().Add("Content-Type", "application/json") - w.Write([]byte(`{ "metadata": {"foo":"bar", "this":"that"}}`)) - }) -} - -// HandleMetadataUpdateSuccessfully sets up the test server to respond to a metadata Update request. -func HandleMetadataUpdateSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/servers/1234asdf/metadata", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "POST") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - th.TestJSONRequest(t, r, `{ - "metadata": { - "foo": "baz", - "this": "those" - } - }`) - - w.WriteHeader(http.StatusOK) - w.Header().Add("Content-Type", "application/json") - w.Write([]byte(`{ "metadata": {"foo":"baz", "this":"those"}}`)) - }) -} - -// ListAddressesExpected represents an expected repsonse from a ListAddresses request. -var ListAddressesExpected = map[string][]Address{ - "public": []Address{ - Address{ - Version: 4, - Address: "80.56.136.39", - }, - Address{ - Version: 6, - Address: "2001:4800:790e:510:be76:4eff:fe04:82a8", - }, - }, - "private": []Address{ - Address{ - Version: 4, - Address: "10.880.3.154", - }, - }, -} - -// HandleAddressListSuccessfully sets up the test server to respond to a ListAddresses request. -func HandleAddressListSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/servers/asdfasdfasdf/ips", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "GET") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - - w.Header().Add("Content-Type", "application/json") - fmt.Fprintf(w, `{ - "addresses": { - "public": [ - { - "version": 4, - "addr": "50.56.176.35" - }, - { - "version": 6, - "addr": "2001:4800:780e:510:be76:4eff:fe04:84a8" - } - ], - "private": [ - { - "version": 4, - "addr": "10.180.3.155" - } - ] - } - }`) - }) -} - -// ListNetworkAddressesExpected represents an expected repsonse from a ListAddressesByNetwork request. -var ListNetworkAddressesExpected = []Address{ - Address{ - Version: 4, - Address: "50.56.176.35", - }, - Address{ - Version: 6, - Address: "2001:4800:780e:510:be76:4eff:fe04:84a8", - }, -} - -// HandleNetworkAddressListSuccessfully sets up the test server to respond to a ListAddressesByNetwork request. -func HandleNetworkAddressListSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/servers/asdfasdfasdf/ips/public", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "GET") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - - w.Header().Add("Content-Type", "application/json") - fmt.Fprintf(w, `{ - "public": [ - { - "version": 4, - "addr": "50.56.176.35" - }, - { - "version": 6, - "addr": "2001:4800:780e:510:be76:4eff:fe04:84a8" - } - ] - }`) - }) -} - -// HandleCreateServerImageSuccessfully sets up the test server to respond to a TestCreateServerImage request. -func HandleCreateServerImageSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/servers/serverimage/action", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "POST") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - w.Header().Add("Location", "https://0.0.0.0/images/xxxx-xxxxx-xxxxx-xxxx") - w.WriteHeader(http.StatusAccepted) - }) -} - -// HandlePasswordGetSuccessfully sets up the test server to respond to a password Get request. -func HandlePasswordGetSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/servers/1234asdf/os-server-password", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "GET") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - th.TestHeader(t, r, "Accept", "application/json") - - fmt.Fprintf(w, ServerPasswordBody) - }) -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests.go deleted file mode 100644 index e6490539ce3e..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests.go +++ /dev/null @@ -1,872 +0,0 @@ -package servers - -import ( - "encoding/base64" - "encoding/json" - "errors" - "fmt" - - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/openstack/compute/v2/flavors" - "github.com/rackspace/gophercloud/openstack/compute/v2/images" - "github.com/rackspace/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the -// List request. -type ListOptsBuilder interface { - ToServerListQuery() (string, error) -} - -// ListOpts allows the filtering and sorting of paginated collections through -// the API. Filtering is achieved by passing in struct field values that map to -// the server attributes you want to see returned. Marker and Limit are used -// for pagination. -type ListOpts struct { - // A time/date stamp for when the server last changed status. - ChangesSince string `q:"changes-since"` - - // Name of the image in URL format. - Image string `q:"image"` - - // Name of the flavor in URL format. - Flavor string `q:"flavor"` - - // Name of the server as a string; can be queried with regular expressions. - // Realize that ?name=bob returns both bob and bobb. If you need to match bob - // only, you can use a regular expression matching the syntax of the - // underlying database server implemented for Compute. - Name string `q:"name"` - - // Value of the status of the server so that you can filter on "ACTIVE" for example. - Status string `q:"status"` - - // Name of the host as a string. - Host string `q:"host"` - - // UUID of the server at which you want to set a marker. - Marker string `q:"marker"` - - // Integer value for the limit of values to return. - Limit int `q:"limit"` - - // Bool to show all tenants - AllTenants bool `q:"all_tenants"` -} - -// ToServerListQuery formats a ListOpts into a query string. -func (opts ListOpts) ToServerListQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - if err != nil { - return "", err - } - return q.String(), nil -} - -// List makes a request against the API to list servers accessible to you. -func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - url := listDetailURL(client) - - if opts != nil { - query, err := opts.ToServerListQuery() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - } - - createPageFn := func(r pagination.PageResult) pagination.Page { - return ServerPage{pagination.LinkedPageBase{PageResult: r}} - } - - return pagination.NewPager(client, url, createPageFn) -} - -// CreateOptsBuilder describes struct types that can be accepted by the Create call. -// The CreateOpts struct in this package does. -type CreateOptsBuilder interface { - ToServerCreateMap() (map[string]interface{}, error) -} - -// Network is used within CreateOpts to control a new server's network attachments. -type Network struct { - // UUID of a nova-network to attach to the newly provisioned server. - // Required unless Port is provided. - UUID string - - // Port of a neutron network to attach to the newly provisioned server. - // Required unless UUID is provided. - Port string - - // FixedIP [optional] specifies a fixed IPv4 address to be used on this network. - FixedIP string -} - -// Personality is an array of files that are injected into the server at launch. -type Personality []*File - -// File is used within CreateOpts and RebuildOpts to inject a file into the server at launch. -// File implements the json.Marshaler interface, so when a Create or Rebuild operation is requested, -// json.Marshal will call File's MarshalJSON method. -type File struct { - // Path of the file - Path string - // Contents of the file. Maximum content size is 255 bytes. - Contents []byte -} - -// MarshalJSON marshals the escaped file, base64 encoding the contents. -func (f *File) MarshalJSON() ([]byte, error) { - file := struct { - Path string `json:"path"` - Contents string `json:"contents"` - }{ - Path: f.Path, - Contents: base64.StdEncoding.EncodeToString(f.Contents), - } - return json.Marshal(file) -} - -// CreateOpts specifies server creation parameters. -type CreateOpts struct { - // Name [required] is the name to assign to the newly launched server. - Name string - - // ImageRef [optional; required if ImageName is not provided] is the ID or full - // URL to the image that contains the server's OS and initial state. - // Also optional if using the boot-from-volume extension. - ImageRef string - - // ImageName [optional; required if ImageRef is not provided] is the name of the - // image that contains the server's OS and initial state. - // Also optional if using the boot-from-volume extension. - ImageName string - - // FlavorRef [optional; required if FlavorName is not provided] is the ID or - // full URL to the flavor that describes the server's specs. - FlavorRef string - - // FlavorName [optional; required if FlavorRef is not provided] is the name of - // the flavor that describes the server's specs. - FlavorName string - - // SecurityGroups [optional] lists the names of the security groups to which this server should belong. - SecurityGroups []string - - // UserData [optional] contains configuration information or scripts to use upon launch. - // Create will base64-encode it for you. - UserData []byte - - // AvailabilityZone [optional] in which to launch the server. - AvailabilityZone string - - // Networks [optional] dictates how this server will be attached to available networks. - // By default, the server will be attached to all isolated networks for the tenant. - Networks []Network - - // Metadata [optional] contains key-value pairs (up to 255 bytes each) to attach to the server. - Metadata map[string]string - - // Personality [optional] includes files to inject into the server at launch. - // Create will base64-encode file contents for you. - Personality Personality - - // ConfigDrive [optional] enables metadata injection through a configuration drive. - ConfigDrive bool - - // AdminPass [optional] sets the root user password. If not set, a randomly-generated - // password will be created and returned in the response. - AdminPass string - - // AccessIPv4 [optional] specifies an IPv4 address for the instance. - AccessIPv4 string - - // AccessIPv6 [optional] specifies an IPv6 address for the instance. - AccessIPv6 string -} - -// ToServerCreateMap assembles a request body based on the contents of a CreateOpts. -func (opts CreateOpts) ToServerCreateMap() (map[string]interface{}, error) { - server := make(map[string]interface{}) - - server["name"] = opts.Name - server["imageRef"] = opts.ImageRef - server["imageName"] = opts.ImageName - server["flavorRef"] = opts.FlavorRef - server["flavorName"] = opts.FlavorName - - if opts.UserData != nil { - encoded := base64.StdEncoding.EncodeToString(opts.UserData) - server["user_data"] = &encoded - } - if opts.ConfigDrive { - server["config_drive"] = "true" - } - if opts.AvailabilityZone != "" { - server["availability_zone"] = opts.AvailabilityZone - } - if opts.Metadata != nil { - server["metadata"] = opts.Metadata - } - if opts.AdminPass != "" { - server["adminPass"] = opts.AdminPass - } - if opts.AccessIPv4 != "" { - server["accessIPv4"] = opts.AccessIPv4 - } - if opts.AccessIPv6 != "" { - server["accessIPv6"] = opts.AccessIPv6 - } - - if len(opts.SecurityGroups) > 0 { - securityGroups := make([]map[string]interface{}, len(opts.SecurityGroups)) - for i, groupName := range opts.SecurityGroups { - securityGroups[i] = map[string]interface{}{"name": groupName} - } - server["security_groups"] = securityGroups - } - - if len(opts.Networks) > 0 { - networks := make([]map[string]interface{}, len(opts.Networks)) - for i, net := range opts.Networks { - networks[i] = make(map[string]interface{}) - if net.UUID != "" { - networks[i]["uuid"] = net.UUID - } - if net.Port != "" { - networks[i]["port"] = net.Port - } - if net.FixedIP != "" { - networks[i]["fixed_ip"] = net.FixedIP - } - } - server["networks"] = networks - } - - if len(opts.Personality) > 0 { - server["personality"] = opts.Personality - } - - return map[string]interface{}{"server": server}, nil -} - -// Create requests a server to be provisioned to the user in the current tenant. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { - var res CreateResult - - reqBody, err := opts.ToServerCreateMap() - if err != nil { - res.Err = err - return res - } - - // If ImageRef isn't provided, use ImageName to ascertain the image ID. - if reqBody["server"].(map[string]interface{})["imageRef"].(string) == "" { - imageName := reqBody["server"].(map[string]interface{})["imageName"].(string) - if imageName == "" { - res.Err = errors.New("One and only one of ImageRef and ImageName must be provided.") - return res - } - imageID, err := images.IDFromName(client, imageName) - if err != nil { - res.Err = err - return res - } - reqBody["server"].(map[string]interface{})["imageRef"] = imageID - } - delete(reqBody["server"].(map[string]interface{}), "imageName") - - // If FlavorRef isn't provided, use FlavorName to ascertain the flavor ID. - if reqBody["server"].(map[string]interface{})["flavorRef"].(string) == "" { - flavorName := reqBody["server"].(map[string]interface{})["flavorName"].(string) - if flavorName == "" { - res.Err = errors.New("One and only one of FlavorRef and FlavorName must be provided.") - return res - } - flavorID, err := flavors.IDFromName(client, flavorName) - if err != nil { - res.Err = err - return res - } - reqBody["server"].(map[string]interface{})["flavorRef"] = flavorID - } - delete(reqBody["server"].(map[string]interface{}), "flavorName") - - _, res.Err = client.Post(listURL(client), reqBody, &res.Body, nil) - return res -} - -// Delete requests that a server previously provisioned be removed from your account. -func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { - var res DeleteResult - _, res.Err = client.Delete(deleteURL(client, id), nil) - return res -} - -func ForceDelete(client *gophercloud.ServiceClient, id string) ActionResult { - var req struct { - ForceDelete string `json:"forceDelete"` - } - - var res ActionResult - _, res.Err = client.Post(actionURL(client, id), req, nil, nil) - return res - -} - -// Get requests details on a single server, by ID. -func Get(client *gophercloud.ServiceClient, id string) GetResult { - var result GetResult - _, result.Err = client.Get(getURL(client, id), &result.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 203}, - }) - return result -} - -// UpdateOptsBuilder allows extensions to add additional attributes to the Update request. -type UpdateOptsBuilder interface { - ToServerUpdateMap() map[string]interface{} -} - -// UpdateOpts specifies the base attributes that may be updated on an existing server. -type UpdateOpts struct { - // Name [optional] changes the displayed name of the server. - // The server host name will *not* change. - // Server names are not constrained to be unique, even within the same tenant. - Name string - - // AccessIPv4 [optional] provides a new IPv4 address for the instance. - AccessIPv4 string - - // AccessIPv6 [optional] provides a new IPv6 address for the instance. - AccessIPv6 string -} - -// ToServerUpdateMap formats an UpdateOpts structure into a request body. -func (opts UpdateOpts) ToServerUpdateMap() map[string]interface{} { - server := make(map[string]string) - if opts.Name != "" { - server["name"] = opts.Name - } - if opts.AccessIPv4 != "" { - server["accessIPv4"] = opts.AccessIPv4 - } - if opts.AccessIPv6 != "" { - server["accessIPv6"] = opts.AccessIPv6 - } - return map[string]interface{}{"server": server} -} - -// Update requests that various attributes of the indicated server be changed. -func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult { - var result UpdateResult - reqBody := opts.ToServerUpdateMap() - _, result.Err = client.Put(updateURL(client, id), reqBody, &result.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return result -} - -// ChangeAdminPassword alters the administrator or root password for a specified server. -func ChangeAdminPassword(client *gophercloud.ServiceClient, id, newPassword string) ActionResult { - var req struct { - ChangePassword struct { - AdminPass string `json:"adminPass"` - } `json:"changePassword"` - } - - req.ChangePassword.AdminPass = newPassword - - var res ActionResult - _, res.Err = client.Post(actionURL(client, id), req, nil, nil) - return res -} - -// ErrArgument errors occur when an argument supplied to a package function -// fails to fall within acceptable values. For example, the Reboot() function -// expects the "how" parameter to be one of HardReboot or SoftReboot. These -// constants are (currently) strings, leading someone to wonder if they can pass -// other string values instead, perhaps in an effort to break the API of their -// provider. Reboot() returns this error in this situation. -// -// Function identifies which function was called/which function is generating -// the error. -// Argument identifies which formal argument was responsible for producing the -// error. -// Value provides the value as it was passed into the function. -type ErrArgument struct { - Function, Argument string - Value interface{} -} - -// Error yields a useful diagnostic for debugging purposes. -func (e *ErrArgument) Error() string { - return fmt.Sprintf("Bad argument in call to %s, formal parameter %s, value %#v", e.Function, e.Argument, e.Value) -} - -func (e *ErrArgument) String() string { - return e.Error() -} - -// RebootMethod describes the mechanisms by which a server reboot can be requested. -type RebootMethod string - -// These constants determine how a server should be rebooted. -// See the Reboot() function for further details. -const ( - SoftReboot RebootMethod = "SOFT" - HardReboot RebootMethod = "HARD" - OSReboot = SoftReboot - PowerCycle = HardReboot -) - -// Reboot requests that a given server reboot. -// Two methods exist for rebooting a server: -// -// HardReboot (aka PowerCycle) restarts the server instance by physically cutting power to the machine, or if a VM, -// terminating it at the hypervisor level. -// It's done. Caput. Full stop. -// Then, after a brief while, power is restored or the VM instance restarted. -// -// SoftReboot (aka OSReboot) simply tells the OS to restart under its own procedures. -// E.g., in Linux, asking it to enter runlevel 6, or executing "sudo shutdown -r now", or by asking Windows to restart the machine. -func Reboot(client *gophercloud.ServiceClient, id string, how RebootMethod) ActionResult { - var res ActionResult - - if (how != SoftReboot) && (how != HardReboot) { - res.Err = &ErrArgument{ - Function: "Reboot", - Argument: "how", - Value: how, - } - return res - } - - reqBody := struct { - C map[string]string `json:"reboot"` - }{ - map[string]string{"type": string(how)}, - } - - _, res.Err = client.Post(actionURL(client, id), reqBody, nil, nil) - return res -} - -// RebuildOptsBuilder is an interface that allows extensions to override the -// default behaviour of rebuild options -type RebuildOptsBuilder interface { - ToServerRebuildMap() (map[string]interface{}, error) -} - -// RebuildOpts represents the configuration options used in a server rebuild -// operation -type RebuildOpts struct { - // Required. The ID of the image you want your server to be provisioned on - ImageID string - - // Name to set the server to - Name string - - // Required. The server's admin password - AdminPass string - - // AccessIPv4 [optional] provides a new IPv4 address for the instance. - AccessIPv4 string - - // AccessIPv6 [optional] provides a new IPv6 address for the instance. - AccessIPv6 string - - // Metadata [optional] contains key-value pairs (up to 255 bytes each) to attach to the server. - Metadata map[string]string - - // Personality [optional] includes files to inject into the server at launch. - // Rebuild will base64-encode file contents for you. - Personality Personality -} - -// ToServerRebuildMap formats a RebuildOpts struct into a map for use in JSON -func (opts RebuildOpts) ToServerRebuildMap() (map[string]interface{}, error) { - var err error - server := make(map[string]interface{}) - - if opts.AdminPass == "" { - err = fmt.Errorf("AdminPass is required") - } - - if opts.ImageID == "" { - err = fmt.Errorf("ImageID is required") - } - - if err != nil { - return server, err - } - - server["name"] = opts.Name - server["adminPass"] = opts.AdminPass - server["imageRef"] = opts.ImageID - - if opts.AccessIPv4 != "" { - server["accessIPv4"] = opts.AccessIPv4 - } - - if opts.AccessIPv6 != "" { - server["accessIPv6"] = opts.AccessIPv6 - } - - if opts.Metadata != nil { - server["metadata"] = opts.Metadata - } - - if len(opts.Personality) > 0 { - server["personality"] = opts.Personality - } - - return map[string]interface{}{"rebuild": server}, nil -} - -// Rebuild will reprovision the server according to the configuration options -// provided in the RebuildOpts struct. -func Rebuild(client *gophercloud.ServiceClient, id string, opts RebuildOptsBuilder) RebuildResult { - var result RebuildResult - - if id == "" { - result.Err = fmt.Errorf("ID is required") - return result - } - - reqBody, err := opts.ToServerRebuildMap() - if err != nil { - result.Err = err - return result - } - - _, result.Err = client.Post(actionURL(client, id), reqBody, &result.Body, nil) - return result -} - -// ResizeOptsBuilder is an interface that allows extensions to override the default structure of -// a Resize request. -type ResizeOptsBuilder interface { - ToServerResizeMap() (map[string]interface{}, error) -} - -// ResizeOpts represents the configuration options used to control a Resize operation. -type ResizeOpts struct { - // FlavorRef is the ID of the flavor you wish your server to become. - FlavorRef string -} - -// ToServerResizeMap formats a ResizeOpts as a map that can be used as a JSON request body for the -// Resize request. -func (opts ResizeOpts) ToServerResizeMap() (map[string]interface{}, error) { - resize := map[string]interface{}{ - "flavorRef": opts.FlavorRef, - } - - return map[string]interface{}{"resize": resize}, nil -} - -// Resize instructs the provider to change the flavor of the server. -// Note that this implies rebuilding it. -// Unfortunately, one cannot pass rebuild parameters to the resize function. -// When the resize completes, the server will be in RESIZE_VERIFY state. -// While in this state, you can explore the use of the new server's configuration. -// If you like it, call ConfirmResize() to commit the resize permanently. -// Otherwise, call RevertResize() to restore the old configuration. -func Resize(client *gophercloud.ServiceClient, id string, opts ResizeOptsBuilder) ActionResult { - var res ActionResult - reqBody, err := opts.ToServerResizeMap() - if err != nil { - res.Err = err - return res - } - - _, res.Err = client.Post(actionURL(client, id), reqBody, nil, nil) - return res -} - -// ConfirmResize confirms a previous resize operation on a server. -// See Resize() for more details. -func ConfirmResize(client *gophercloud.ServiceClient, id string) ActionResult { - var res ActionResult - - reqBody := map[string]interface{}{"confirmResize": nil} - _, res.Err = client.Post(actionURL(client, id), reqBody, nil, &gophercloud.RequestOpts{ - OkCodes: []int{201, 202, 204}, - }) - return res -} - -// RevertResize cancels a previous resize operation on a server. -// See Resize() for more details. -func RevertResize(client *gophercloud.ServiceClient, id string) ActionResult { - var res ActionResult - reqBody := map[string]interface{}{"revertResize": nil} - _, res.Err = client.Post(actionURL(client, id), reqBody, nil, nil) - return res -} - -// RescueOptsBuilder is an interface that allows extensions to override the -// default structure of a Rescue request. -type RescueOptsBuilder interface { - ToServerRescueMap() (map[string]interface{}, error) -} - -// RescueOpts represents the configuration options used to control a Rescue -// option. -type RescueOpts struct { - // AdminPass is the desired administrative password for the instance in - // RESCUE mode. If it's left blank, the server will generate a password. - AdminPass string -} - -// ToServerRescueMap formats a RescueOpts as a map that can be used as a JSON -// request body for the Rescue request. -func (opts RescueOpts) ToServerRescueMap() (map[string]interface{}, error) { - server := make(map[string]interface{}) - if opts.AdminPass != "" { - server["adminPass"] = opts.AdminPass - } - return map[string]interface{}{"rescue": server}, nil -} - -// Rescue instructs the provider to place the server into RESCUE mode. -func Rescue(client *gophercloud.ServiceClient, id string, opts RescueOptsBuilder) RescueResult { - var result RescueResult - - if id == "" { - result.Err = fmt.Errorf("ID is required") - return result - } - reqBody, err := opts.ToServerRescueMap() - if err != nil { - result.Err = err - return result - } - - _, result.Err = client.Post(actionURL(client, id), reqBody, &result.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - - return result -} - -// ResetMetadataOptsBuilder allows extensions to add additional parameters to the -// Reset request. -type ResetMetadataOptsBuilder interface { - ToMetadataResetMap() (map[string]interface{}, error) -} - -// MetadataOpts is a map that contains key-value pairs. -type MetadataOpts map[string]string - -// ToMetadataResetMap assembles a body for a Reset request based on the contents of a MetadataOpts. -func (opts MetadataOpts) ToMetadataResetMap() (map[string]interface{}, error) { - return map[string]interface{}{"metadata": opts}, nil -} - -// ToMetadataUpdateMap assembles a body for an Update request based on the contents of a MetadataOpts. -func (opts MetadataOpts) ToMetadataUpdateMap() (map[string]interface{}, error) { - return map[string]interface{}{"metadata": opts}, nil -} - -// ResetMetadata will create multiple new key-value pairs for the given server ID. -// Note: Using this operation will erase any already-existing metadata and create -// the new metadata provided. To keep any already-existing metadata, use the -// UpdateMetadatas or UpdateMetadata function. -func ResetMetadata(client *gophercloud.ServiceClient, id string, opts ResetMetadataOptsBuilder) ResetMetadataResult { - var res ResetMetadataResult - metadata, err := opts.ToMetadataResetMap() - if err != nil { - res.Err = err - return res - } - _, res.Err = client.Put(metadataURL(client, id), metadata, &res.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return res -} - -// Metadata requests all the metadata for the given server ID. -func Metadata(client *gophercloud.ServiceClient, id string) GetMetadataResult { - var res GetMetadataResult - _, res.Err = client.Get(metadataURL(client, id), &res.Body, nil) - return res -} - -// UpdateMetadataOptsBuilder allows extensions to add additional parameters to the -// Create request. -type UpdateMetadataOptsBuilder interface { - ToMetadataUpdateMap() (map[string]interface{}, error) -} - -// UpdateMetadata updates (or creates) all the metadata specified by opts for the given server ID. -// This operation does not affect already-existing metadata that is not specified -// by opts. -func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts UpdateMetadataOptsBuilder) UpdateMetadataResult { - var res UpdateMetadataResult - metadata, err := opts.ToMetadataUpdateMap() - if err != nil { - res.Err = err - return res - } - _, res.Err = client.Post(metadataURL(client, id), metadata, &res.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return res -} - -// MetadatumOptsBuilder allows extensions to add additional parameters to the -// Create request. -type MetadatumOptsBuilder interface { - ToMetadatumCreateMap() (map[string]interface{}, string, error) -} - -// MetadatumOpts is a map of length one that contains a key-value pair. -type MetadatumOpts map[string]string - -// ToMetadatumCreateMap assembles a body for a Create request based on the contents of a MetadataumOpts. -func (opts MetadatumOpts) ToMetadatumCreateMap() (map[string]interface{}, string, error) { - if len(opts) != 1 { - return nil, "", errors.New("CreateMetadatum operation must have 1 and only 1 key-value pair.") - } - metadatum := map[string]interface{}{"meta": opts} - var key string - for k := range metadatum["meta"].(MetadatumOpts) { - key = k - } - return metadatum, key, nil -} - -// CreateMetadatum will create or update the key-value pair with the given key for the given server ID. -func CreateMetadatum(client *gophercloud.ServiceClient, id string, opts MetadatumOptsBuilder) CreateMetadatumResult { - var res CreateMetadatumResult - metadatum, key, err := opts.ToMetadatumCreateMap() - if err != nil { - res.Err = err - return res - } - - _, res.Err = client.Put(metadatumURL(client, id, key), metadatum, &res.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return res -} - -// Metadatum requests the key-value pair with the given key for the given server ID. -func Metadatum(client *gophercloud.ServiceClient, id, key string) GetMetadatumResult { - var res GetMetadatumResult - _, res.Err = client.Request("GET", metadatumURL(client, id, key), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - }) - return res -} - -// DeleteMetadatum will delete the key-value pair with the given key for the given server ID. -func DeleteMetadatum(client *gophercloud.ServiceClient, id, key string) DeleteMetadatumResult { - var res DeleteMetadatumResult - _, res.Err = client.Delete(metadatumURL(client, id, key), nil) - return res -} - -// ListAddresses makes a request against the API to list the servers IP addresses. -func ListAddresses(client *gophercloud.ServiceClient, id string) pagination.Pager { - createPageFn := func(r pagination.PageResult) pagination.Page { - return AddressPage{pagination.SinglePageBase(r)} - } - return pagination.NewPager(client, listAddressesURL(client, id), createPageFn) -} - -// ListAddressesByNetwork makes a request against the API to list the servers IP addresses -// for the given network. -func ListAddressesByNetwork(client *gophercloud.ServiceClient, id, network string) pagination.Pager { - createPageFn := func(r pagination.PageResult) pagination.Page { - return NetworkAddressPage{pagination.SinglePageBase(r)} - } - return pagination.NewPager(client, listAddressesByNetworkURL(client, id, network), createPageFn) -} - -type CreateImageOpts struct { - // Name [required] of the image/snapshot - Name string - // Metadata [optional] contains key-value pairs (up to 255 bytes each) to attach to the created image. - Metadata map[string]string -} - -type CreateImageOptsBuilder interface { - ToServerCreateImageMap() (map[string]interface{}, error) -} - -// ToServerCreateImageMap formats a CreateImageOpts structure into a request body. -func (opts CreateImageOpts) ToServerCreateImageMap() (map[string]interface{}, error) { - var err error - img := make(map[string]interface{}) - if opts.Name == "" { - return nil, fmt.Errorf("Cannot create a server image without a name") - } - img["name"] = opts.Name - if opts.Metadata != nil { - img["metadata"] = opts.Metadata - } - createImage := make(map[string]interface{}) - createImage["createImage"] = img - return createImage, err -} - -// CreateImage makes a request against the nova API to schedule an image to be created of the server -func CreateImage(client *gophercloud.ServiceClient, serverId string, opts CreateImageOptsBuilder) CreateImageResult { - var res CreateImageResult - reqBody, err := opts.ToServerCreateImageMap() - if err != nil { - res.Err = err - return res - } - response, err := client.Post(actionURL(client, serverId), reqBody, nil, &gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - res.Err = err - res.Header = response.Header - return res -} - -// IDFromName is a convienience function that returns a server's ID given its name. -func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { - serverCount := 0 - serverID := "" - if name == "" { - return "", fmt.Errorf("A server name must be provided.") - } - pager := List(client, nil) - pager.EachPage(func(page pagination.Page) (bool, error) { - serverList, err := ExtractServers(page) - if err != nil { - return false, err - } - - for _, s := range serverList { - if s.Name == name { - serverCount++ - serverID = s.ID - } - } - return true, nil - }) - - switch serverCount { - case 0: - return "", fmt.Errorf("Unable to find server: %s", name) - case 1: - return serverID, nil - default: - return "", fmt.Errorf("Found %d servers matching %s", serverCount, name) - } -} - -// GetPassword makes a request against the nova API to get the encrypted administrative password. -func GetPassword(client *gophercloud.ServiceClient, serverId string) GetPasswordResult { - var res GetPasswordResult - _, res.Err = client.Request("GET", passwordURL(client, serverId), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - }) - return res -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/results.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/results.go deleted file mode 100644 index 406f68935eae..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/results.go +++ /dev/null @@ -1,415 +0,0 @@ -package servers - -import ( - "crypto/rsa" - "encoding/base64" - "fmt" - "net/url" - "path" - "reflect" - - "github.com/mitchellh/mapstructure" - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/pagination" -) - -type serverResult struct { - gophercloud.Result -} - -// Extract interprets any serverResult as a Server, if possible. -func (r serverResult) Extract() (*Server, error) { - if r.Err != nil { - return nil, r.Err - } - - var response struct { - Server Server `mapstructure:"server"` - } - - config := &mapstructure.DecoderConfig{ - DecodeHook: toMapFromString, - Result: &response, - } - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return nil, err - } - - err = decoder.Decode(r.Body) - if err != nil { - return nil, err - } - - return &response.Server, nil -} - -// CreateResult temporarily contains the response from a Create call. -type CreateResult struct { - serverResult -} - -// GetResult temporarily contains the response from a Get call. -type GetResult struct { - serverResult -} - -// UpdateResult temporarily contains the response from an Update call. -type UpdateResult struct { - serverResult -} - -// DeleteResult temporarily contains the response from a Delete call. -type DeleteResult struct { - gophercloud.ErrResult -} - -// RebuildResult temporarily contains the response from a Rebuild call. -type RebuildResult struct { - serverResult -} - -// ActionResult represents the result of server action operations, like reboot -type ActionResult struct { - gophercloud.ErrResult -} - -// RescueResult represents the result of a server rescue operation -type RescueResult struct { - ActionResult -} - -// CreateImageResult represents the result of an image creation operation -type CreateImageResult struct { - gophercloud.Result -} - -// GetPasswordResult represent the result of a get os-server-password operation. -type GetPasswordResult struct { - gophercloud.Result -} - -// ExtractPassword gets the encrypted password. -// If privateKey != nil the password is decrypted with the private key. -// If privateKey == nil the encrypted password is returned and can be decrypted with: -// echo '' | base64 -D | openssl rsautl -decrypt -inkey -func (r GetPasswordResult) ExtractPassword(privateKey *rsa.PrivateKey) (string, error) { - - if r.Err != nil { - return "", r.Err - } - - var response struct { - Password string `mapstructure:"password"` - } - - err := mapstructure.Decode(r.Body, &response) - if err == nil && privateKey != nil && response.Password != "" { - return decryptPassword(response.Password, privateKey) - } - return response.Password, err -} - -func decryptPassword(encryptedPassword string, privateKey *rsa.PrivateKey) (string, error) { - b64EncryptedPassword := make([]byte, base64.StdEncoding.DecodedLen(len(encryptedPassword))) - - n, err := base64.StdEncoding.Decode(b64EncryptedPassword, []byte(encryptedPassword)) - if err != nil { - return "", fmt.Errorf("Failed to base64 decode encrypted password: %s", err) - } - password, err := rsa.DecryptPKCS1v15(nil, privateKey, b64EncryptedPassword[0:n]) - if err != nil { - return "", fmt.Errorf("Failed to decrypt password: %s", err) - } - - return string(password), nil -} - -// ExtractImageID gets the ID of the newly created server image from the header -func (res CreateImageResult) ExtractImageID() (string, error) { - if res.Err != nil { - return "", res.Err - } - // Get the image id from the header - u, err := url.ParseRequestURI(res.Header.Get("Location")) - if err != nil { - return "", fmt.Errorf("Failed to parse the image id: %s", err.Error()) - } - imageId := path.Base(u.Path) - if imageId == "." || imageId == "/" { - return "", fmt.Errorf("Failed to parse the ID of newly created image: %s", u) - } - return imageId, nil -} - -// Extract interprets any RescueResult as an AdminPass, if possible. -func (r RescueResult) Extract() (string, error) { - if r.Err != nil { - return "", r.Err - } - - var response struct { - AdminPass string `mapstructure:"adminPass"` - } - - err := mapstructure.Decode(r.Body, &response) - return response.AdminPass, err -} - -// Server exposes only the standard OpenStack fields corresponding to a given server on the user's account. -type Server struct { - // ID uniquely identifies this server amongst all other servers, including those not accessible to the current tenant. - ID string - - // TenantID identifies the tenant owning this server resource. - TenantID string `mapstructure:"tenant_id"` - - // UserID uniquely identifies the user account owning the tenant. - UserID string `mapstructure:"user_id"` - - // Name contains the human-readable name for the server. - Name string - - // Updated and Created contain ISO-8601 timestamps of when the state of the server last changed, and when it was created. - Updated string - Created string - - HostID string - - // Status contains the current operational status of the server, such as IN_PROGRESS or ACTIVE. - Status string - - // Progress ranges from 0..100. - // A request made against the server completes only once Progress reaches 100. - Progress int - - // AccessIPv4 and AccessIPv6 contain the IP addresses of the server, suitable for remote access for administration. - AccessIPv4, AccessIPv6 string - - // Image refers to a JSON object, which itself indicates the OS image used to deploy the server. - Image map[string]interface{} - - // Flavor refers to a JSON object, which itself indicates the hardware configuration of the deployed server. - Flavor map[string]interface{} - - // Addresses includes a list of all IP addresses assigned to the server, keyed by pool. - Addresses map[string]interface{} - - // Metadata includes a list of all user-specified key-value pairs attached to the server. - Metadata map[string]interface{} - - // Links includes HTTP references to the itself, useful for passing along to other APIs that might want a server reference. - Links []interface{} - - // KeyName indicates which public key was injected into the server on launch. - KeyName string `json:"key_name" mapstructure:"key_name"` - - // AdminPass will generally be empty (""). However, it will contain the administrative password chosen when provisioning a new server without a set AdminPass setting in the first place. - // Note that this is the ONLY time this field will be valid. - AdminPass string `json:"adminPass" mapstructure:"adminPass"` - - // SecurityGroups includes the security groups that this instance has applied to it - SecurityGroups []map[string]interface{} `json:"security_groups" mapstructure:"security_groups"` -} - -// ServerPage abstracts the raw results of making a List() request against the API. -// As OpenStack extensions may freely alter the response bodies of structures returned to the client, you may only safely access the -// data provided through the ExtractServers call. -type ServerPage struct { - pagination.LinkedPageBase -} - -// IsEmpty returns true if a page contains no Server results. -func (page ServerPage) IsEmpty() (bool, error) { - servers, err := ExtractServers(page) - if err != nil { - return true, err - } - return len(servers) == 0, nil -} - -// NextPageURL uses the response's embedded link reference to navigate to the next page of results. -func (page ServerPage) NextPageURL() (string, error) { - type resp struct { - Links []gophercloud.Link `mapstructure:"servers_links"` - } - - var r resp - err := mapstructure.Decode(page.Body, &r) - if err != nil { - return "", err - } - - return gophercloud.ExtractNextURL(r.Links) -} - -// ExtractServers interprets the results of a single page from a List() call, producing a slice of Server entities. -func ExtractServers(page pagination.Page) ([]Server, error) { - casted := page.(ServerPage).Body - - var response struct { - Servers []Server `mapstructure:"servers"` - } - - config := &mapstructure.DecoderConfig{ - DecodeHook: toMapFromString, - Result: &response, - } - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return nil, err - } - - err = decoder.Decode(casted) - - return response.Servers, err -} - -// MetadataResult contains the result of a call for (potentially) multiple key-value pairs. -type MetadataResult struct { - gophercloud.Result -} - -// GetMetadataResult temporarily contains the response from a metadata Get call. -type GetMetadataResult struct { - MetadataResult -} - -// ResetMetadataResult temporarily contains the response from a metadata Reset call. -type ResetMetadataResult struct { - MetadataResult -} - -// UpdateMetadataResult temporarily contains the response from a metadata Update call. -type UpdateMetadataResult struct { - MetadataResult -} - -// MetadatumResult contains the result of a call for individual a single key-value pair. -type MetadatumResult struct { - gophercloud.Result -} - -// GetMetadatumResult temporarily contains the response from a metadatum Get call. -type GetMetadatumResult struct { - MetadatumResult -} - -// CreateMetadatumResult temporarily contains the response from a metadatum Create call. -type CreateMetadatumResult struct { - MetadatumResult -} - -// DeleteMetadatumResult temporarily contains the response from a metadatum Delete call. -type DeleteMetadatumResult struct { - gophercloud.ErrResult -} - -// Extract interprets any MetadataResult as a Metadata, if possible. -func (r MetadataResult) Extract() (map[string]string, error) { - if r.Err != nil { - return nil, r.Err - } - - var response struct { - Metadata map[string]string `mapstructure:"metadata"` - } - - err := mapstructure.Decode(r.Body, &response) - return response.Metadata, err -} - -// Extract interprets any MetadatumResult as a Metadatum, if possible. -func (r MetadatumResult) Extract() (map[string]string, error) { - if r.Err != nil { - return nil, r.Err - } - - var response struct { - Metadatum map[string]string `mapstructure:"meta"` - } - - err := mapstructure.Decode(r.Body, &response) - return response.Metadatum, err -} - -func toMapFromString(from reflect.Kind, to reflect.Kind, data interface{}) (interface{}, error) { - if (from == reflect.String) && (to == reflect.Map) { - return map[string]interface{}{}, nil - } - return data, nil -} - -// Address represents an IP address. -type Address struct { - Version int `mapstructure:"version"` - Address string `mapstructure:"addr"` -} - -// AddressPage abstracts the raw results of making a ListAddresses() request against the API. -// As OpenStack extensions may freely alter the response bodies of structures returned -// to the client, you may only safely access the data provided through the ExtractAddresses call. -type AddressPage struct { - pagination.SinglePageBase -} - -// IsEmpty returns true if an AddressPage contains no networks. -func (r AddressPage) IsEmpty() (bool, error) { - addresses, err := ExtractAddresses(r) - if err != nil { - return true, err - } - return len(addresses) == 0, nil -} - -// ExtractAddresses interprets the results of a single page from a ListAddresses() call, -// producing a map of addresses. -func ExtractAddresses(page pagination.Page) (map[string][]Address, error) { - casted := page.(AddressPage).Body - - var response struct { - Addresses map[string][]Address `mapstructure:"addresses"` - } - - err := mapstructure.Decode(casted, &response) - if err != nil { - return nil, err - } - - return response.Addresses, err -} - -// NetworkAddressPage abstracts the raw results of making a ListAddressesByNetwork() request against the API. -// As OpenStack extensions may freely alter the response bodies of structures returned -// to the client, you may only safely access the data provided through the ExtractAddresses call. -type NetworkAddressPage struct { - pagination.SinglePageBase -} - -// IsEmpty returns true if a NetworkAddressPage contains no addresses. -func (r NetworkAddressPage) IsEmpty() (bool, error) { - addresses, err := ExtractNetworkAddresses(r) - if err != nil { - return true, err - } - return len(addresses) == 0, nil -} - -// ExtractNetworkAddresses interprets the results of a single page from a ListAddressesByNetwork() call, -// producing a slice of addresses. -func ExtractNetworkAddresses(page pagination.Page) ([]Address, error) { - casted := page.(NetworkAddressPage).Body - - var response map[string][]Address - err := mapstructure.Decode(casted, &response) - if err != nil { - return nil, err - } - - var key string - for k := range response { - key = k - } - - return response[key], err -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls.go deleted file mode 100644 index d51fcbe6cfa5..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls.go +++ /dev/null @@ -1,51 +0,0 @@ -package servers - -import "github.com/rackspace/gophercloud" - -func createURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("servers") -} - -func listURL(client *gophercloud.ServiceClient) string { - return createURL(client) -} - -func listDetailURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("servers", "detail") -} - -func deleteURL(client *gophercloud.ServiceClient, id string) string { - return client.ServiceURL("servers", id) -} - -func getURL(client *gophercloud.ServiceClient, id string) string { - return deleteURL(client, id) -} - -func updateURL(client *gophercloud.ServiceClient, id string) string { - return deleteURL(client, id) -} - -func actionURL(client *gophercloud.ServiceClient, id string) string { - return client.ServiceURL("servers", id, "action") -} - -func metadatumURL(client *gophercloud.ServiceClient, id, key string) string { - return client.ServiceURL("servers", id, "metadata", key) -} - -func metadataURL(client *gophercloud.ServiceClient, id string) string { - return client.ServiceURL("servers", id, "metadata") -} - -func listAddressesURL(client *gophercloud.ServiceClient, id string) string { - return client.ServiceURL("servers", id, "ips") -} - -func listAddressesByNetworkURL(client *gophercloud.ServiceClient, id, network string) string { - return client.ServiceURL("servers", id, "ips", network) -} - -func passwordURL(client *gophercloud.ServiceClient, id string) string { - return client.ServiceURL("servers", id, "os-server-password") -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/util.go b/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/util.go deleted file mode 100644 index e6baf74165bb..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers/util.go +++ /dev/null @@ -1,20 +0,0 @@ -package servers - -import "github.com/rackspace/gophercloud" - -// WaitForStatus will continually poll a server until it successfully transitions to a specified -// status. It will do this for at most the number of seconds specified. -func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { - return gophercloud.WaitFor(secs, func() (bool, error) { - current, err := Get(c, id).Extract() - if err != nil { - return false, err - } - - if current.Status == status { - return true, nil - } - - return false, nil - }) -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/endpoint_location.go b/vendor/github.com/rackspace/gophercloud/openstack/endpoint_location.go deleted file mode 100644 index 29d02c43f929..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/endpoint_location.go +++ /dev/null @@ -1,91 +0,0 @@ -package openstack - -import ( - "fmt" - - "github.com/rackspace/gophercloud" - tokens2 "github.com/rackspace/gophercloud/openstack/identity/v2/tokens" - tokens3 "github.com/rackspace/gophercloud/openstack/identity/v3/tokens" -) - -// V2EndpointURL discovers the endpoint URL for a specific service from a ServiceCatalog acquired -// during the v2 identity service. The specified EndpointOpts are used to identify a unique, -// unambiguous endpoint to return. It's an error both when multiple endpoints match the provided -// criteria and when none do. The minimum that can be specified is a Type, but you will also often -// need to specify a Name and/or a Region depending on what's available on your OpenStack -// deployment. -func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { - // Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided. - var endpoints = make([]tokens2.Endpoint, 0, 1) - for _, entry := range catalog.Entries { - if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { - for _, endpoint := range entry.Endpoints { - if opts.Region == "" || endpoint.Region == opts.Region { - endpoints = append(endpoints, endpoint) - } - } - } - } - - // Report an error if the options were ambiguous. - if len(endpoints) > 1 { - return "", fmt.Errorf("Discovered %d matching endpoints: %#v", len(endpoints), endpoints) - } - - // Extract the appropriate URL from the matching Endpoint. - for _, endpoint := range endpoints { - switch opts.Availability { - case gophercloud.AvailabilityPublic: - return gophercloud.NormalizeURL(endpoint.PublicURL), nil - case gophercloud.AvailabilityInternal: - return gophercloud.NormalizeURL(endpoint.InternalURL), nil - case gophercloud.AvailabilityAdmin: - return gophercloud.NormalizeURL(endpoint.AdminURL), nil - default: - return "", fmt.Errorf("Unexpected availability in endpoint query: %s", opts.Availability) - } - } - - // Report an error if there were no matching endpoints. - return "", gophercloud.ErrEndpointNotFound -} - -// V3EndpointURL discovers the endpoint URL for a specific service from a Catalog acquired -// during the v3 identity service. The specified EndpointOpts are used to identify a unique, -// unambiguous endpoint to return. It's an error both when multiple endpoints match the provided -// criteria and when none do. The minimum that can be specified is a Type, but you will also often -// need to specify a Name and/or a Region depending on what's available on your OpenStack -// deployment. -func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { - // Extract Endpoints from the catalog entries that match the requested Type, Interface, - // Name if provided, and Region if provided. - var endpoints = make([]tokens3.Endpoint, 0, 1) - for _, entry := range catalog.Entries { - if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { - for _, endpoint := range entry.Endpoints { - if opts.Availability != gophercloud.AvailabilityAdmin && - opts.Availability != gophercloud.AvailabilityPublic && - opts.Availability != gophercloud.AvailabilityInternal { - return "", fmt.Errorf("Unexpected availability in endpoint query: %s", opts.Availability) - } - if (opts.Availability == gophercloud.Availability(endpoint.Interface)) && - (opts.Region == "" || endpoint.Region == opts.Region) { - endpoints = append(endpoints, endpoint) - } - } - } - } - - // Report an error if the options were ambiguous. - if len(endpoints) > 1 { - return "", fmt.Errorf("Discovered %d matching endpoints: %#v", len(endpoints), endpoints) - } - - // Extract the URL from the matching Endpoint. - for _, endpoint := range endpoints { - return gophercloud.NormalizeURL(endpoint.URL), nil - } - - // Report an error if there were no matching endpoints. - return "", gophercloud.ErrEndpointNotFound -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/doc.go deleted file mode 100644 index 0c2d49d5670e..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package tenants provides information and interaction with the -// tenants API resource for the OpenStack Identity service. -// -// See http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 -// and http://developer.openstack.org/api-ref-identity-v2.html#admin-tenants -// for more information. -package tenants diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/fixtures.go deleted file mode 100644 index 7f044ac3b2d4..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/fixtures.go +++ /dev/null @@ -1,65 +0,0 @@ -// +build fixtures - -package tenants - -import ( - "fmt" - "net/http" - "testing" - - th "github.com/rackspace/gophercloud/testhelper" - "github.com/rackspace/gophercloud/testhelper/client" -) - -// ListOutput provides a single page of Tenant results. -const ListOutput = ` -{ - "tenants": [ - { - "id": "1234", - "name": "Red Team", - "description": "The team that is red", - "enabled": true - }, - { - "id": "9876", - "name": "Blue Team", - "description": "The team that is blue", - "enabled": false - } - ] -} -` - -// RedTeam is a Tenant fixture. -var RedTeam = Tenant{ - ID: "1234", - Name: "Red Team", - Description: "The team that is red", - Enabled: true, -} - -// BlueTeam is a Tenant fixture. -var BlueTeam = Tenant{ - ID: "9876", - Name: "Blue Team", - Description: "The team that is blue", - Enabled: false, -} - -// ExpectedTenantSlice is the slice of tenants expected to be returned from ListOutput. -var ExpectedTenantSlice = []Tenant{RedTeam, BlueTeam} - -// HandleListTenantsSuccessfully creates an HTTP handler at `/tenants` on the test handler mux that -// responds with a list of two tenants. -func HandleListTenantsSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/tenants", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "GET") - th.TestHeader(t, r, "Accept", "application/json") - th.TestHeader(t, r, "X-Auth-Token", client.TokenID) - - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, ListOutput) - }) -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/requests.go deleted file mode 100644 index 5a359f5c9e27..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/requests.go +++ /dev/null @@ -1,33 +0,0 @@ -package tenants - -import ( - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/pagination" -) - -// ListOpts filters the Tenants that are returned by the List call. -type ListOpts struct { - // Marker is the ID of the last Tenant on the previous page. - Marker string `q:"marker"` - - // Limit specifies the page size. - Limit int `q:"limit"` -} - -// List enumerates the Tenants to which the current token has access. -func List(client *gophercloud.ServiceClient, opts *ListOpts) pagination.Pager { - createPage := func(r pagination.PageResult) pagination.Page { - return TenantPage{pagination.LinkedPageBase{PageResult: r}} - } - - url := listURL(client) - if opts != nil { - q, err := gophercloud.BuildQueryString(opts) - if err != nil { - return pagination.Pager{Err: err} - } - url += q.String() - } - - return pagination.NewPager(client, url, createPage) -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/results.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/results.go deleted file mode 100644 index c1220c384bcb..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/results.go +++ /dev/null @@ -1,62 +0,0 @@ -package tenants - -import ( - "github.com/mitchellh/mapstructure" - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/pagination" -) - -// Tenant is a grouping of users in the identity service. -type Tenant struct { - // ID is a unique identifier for this tenant. - ID string `mapstructure:"id"` - - // Name is a friendlier user-facing name for this tenant. - Name string `mapstructure:"name"` - - // Description is a human-readable explanation of this Tenant's purpose. - Description string `mapstructure:"description"` - - // Enabled indicates whether or not a tenant is active. - Enabled bool `mapstructure:"enabled"` -} - -// TenantPage is a single page of Tenant results. -type TenantPage struct { - pagination.LinkedPageBase -} - -// IsEmpty determines whether or not a page of Tenants contains any results. -func (page TenantPage) IsEmpty() (bool, error) { - tenants, err := ExtractTenants(page) - if err != nil { - return false, err - } - return len(tenants) == 0, nil -} - -// NextPageURL extracts the "next" link from the tenants_links section of the result. -func (page TenantPage) NextPageURL() (string, error) { - type resp struct { - Links []gophercloud.Link `mapstructure:"tenants_links"` - } - - var r resp - err := mapstructure.Decode(page.Body, &r) - if err != nil { - return "", err - } - - return gophercloud.ExtractNextURL(r.Links) -} - -// ExtractTenants returns a slice of Tenants contained in a single page of results. -func ExtractTenants(page pagination.Page) ([]Tenant, error) { - casted := page.(TenantPage).Body - var response struct { - Tenants []Tenant `mapstructure:"tenants"` - } - - err := mapstructure.Decode(casted, &response) - return response.Tenants, err -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/urls.go deleted file mode 100644 index 1dd6ce023f9e..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tenants/urls.go +++ /dev/null @@ -1,7 +0,0 @@ -package tenants - -import "github.com/rackspace/gophercloud" - -func listURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("tenants") -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/doc.go deleted file mode 100644 index 31cacc5e17bd..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package tokens provides information and interaction with the token API -// resource for the OpenStack Identity service. -// For more information, see: -// http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 -package tokens diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/errors.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/errors.go deleted file mode 100644 index 3dfdc08dbedd..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/errors.go +++ /dev/null @@ -1,30 +0,0 @@ -package tokens - -import ( - "errors" - "fmt" -) - -var ( - // ErrUserIDProvided is returned if you attempt to authenticate with a UserID. - ErrUserIDProvided = unacceptedAttributeErr("UserID") - - // ErrAPIKeyProvided is returned if you attempt to authenticate with an APIKey. - ErrAPIKeyProvided = unacceptedAttributeErr("APIKey") - - // ErrDomainIDProvided is returned if you attempt to authenticate with a DomainID. - ErrDomainIDProvided = unacceptedAttributeErr("DomainID") - - // ErrDomainNameProvided is returned if you attempt to authenticate with a DomainName. - ErrDomainNameProvided = unacceptedAttributeErr("DomainName") - - // ErrUsernameRequired is returned if you attempt to authenticate without a Username. - ErrUsernameRequired = errors.New("You must supply a Username in your AuthOptions.") - - // ErrPasswordRequired is returned if you don't provide a password. - ErrPasswordRequired = errors.New("Please supply a Password in your AuthOptions.") -) - -func unacceptedAttributeErr(attribute string) error { - return fmt.Errorf("The base Identity V2 API does not accept authentication by %s", attribute) -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/fixtures.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/fixtures.go deleted file mode 100644 index 6245259e1c5e..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/fixtures.go +++ /dev/null @@ -1,195 +0,0 @@ -// +build fixtures - -package tokens - -import ( - "fmt" - "net/http" - "testing" - "time" - - "github.com/rackspace/gophercloud/openstack/identity/v2/tenants" - th "github.com/rackspace/gophercloud/testhelper" - thclient "github.com/rackspace/gophercloud/testhelper/client" -) - -// ExpectedToken is the token that should be parsed from TokenCreationResponse. -var ExpectedToken = &Token{ - ID: "aaaabbbbccccdddd", - ExpiresAt: time.Date(2014, time.January, 31, 15, 30, 58, 0, time.UTC), - Tenant: tenants.Tenant{ - ID: "fc394f2ab2df4114bde39905f800dc57", - Name: "test", - Description: "There are many tenants. This one is yours.", - Enabled: true, - }, -} - -// ExpectedServiceCatalog is the service catalog that should be parsed from TokenCreationResponse. -var ExpectedServiceCatalog = &ServiceCatalog{ - Entries: []CatalogEntry{ - CatalogEntry{ - Name: "inscrutablewalrus", - Type: "something", - Endpoints: []Endpoint{ - Endpoint{ - PublicURL: "http://something0:1234/v2/", - Region: "region0", - }, - Endpoint{ - PublicURL: "http://something1:1234/v2/", - Region: "region1", - }, - }, - }, - CatalogEntry{ - Name: "arbitrarypenguin", - Type: "else", - Endpoints: []Endpoint{ - Endpoint{ - PublicURL: "http://else0:4321/v3/", - Region: "region0", - }, - }, - }, - }, -} - -// ExpectedUser is the token that should be parsed from TokenGetResponse. -var ExpectedUser = &User{ - ID: "a530fefc3d594c4ba2693a4ecd6be74e", - Name: "apiserver", - Roles: []Role{{"member"}, {"service"}}, - UserName: "apiserver", -} - -// TokenCreationResponse is a JSON response that contains ExpectedToken and ExpectedServiceCatalog. -const TokenCreationResponse = ` -{ - "access": { - "token": { - "issued_at": "2014-01-30T15:30:58.000000Z", - "expires": "2014-01-31T15:30:58Z", - "id": "aaaabbbbccccdddd", - "tenant": { - "description": "There are many tenants. This one is yours.", - "enabled": true, - "id": "fc394f2ab2df4114bde39905f800dc57", - "name": "test" - } - }, - "serviceCatalog": [ - { - "endpoints": [ - { - "publicURL": "http://something0:1234/v2/", - "region": "region0" - }, - { - "publicURL": "http://something1:1234/v2/", - "region": "region1" - } - ], - "type": "something", - "name": "inscrutablewalrus" - }, - { - "endpoints": [ - { - "publicURL": "http://else0:4321/v3/", - "region": "region0" - } - ], - "type": "else", - "name": "arbitrarypenguin" - } - ] - } -} -` - -// TokenGetResponse is a JSON response that contains ExpectedToken and ExpectedUser. -const TokenGetResponse = ` -{ - "access": { - "token": { - "issued_at": "2014-01-30T15:30:58.000000Z", - "expires": "2014-01-31T15:30:58Z", - "id": "aaaabbbbccccdddd", - "tenant": { - "description": "There are many tenants. This one is yours.", - "enabled": true, - "id": "fc394f2ab2df4114bde39905f800dc57", - "name": "test" - } - }, - "serviceCatalog": [], - "user": { - "id": "a530fefc3d594c4ba2693a4ecd6be74e", - "name": "apiserver", - "roles": [ - { - "name": "member" - }, - { - "name": "service" - } - ], - "roles_links": [], - "username": "apiserver" - } - } -}` - -// HandleTokenPost expects a POST against a /tokens handler, ensures that the request body has been -// constructed properly given certain auth options, and returns the result. -func HandleTokenPost(t *testing.T, requestJSON string) { - th.Mux.HandleFunc("/tokens", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "POST") - th.TestHeader(t, r, "Content-Type", "application/json") - th.TestHeader(t, r, "Accept", "application/json") - if requestJSON != "" { - th.TestJSONRequest(t, r, requestJSON) - } - - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, TokenCreationResponse) - }) -} - -// HandleTokenGet expects a Get against a /tokens handler, ensures that the request body has been -// constructed properly given certain auth options, and returns the result. -func HandleTokenGet(t *testing.T, token string) { - th.Mux.HandleFunc("/tokens/"+token, func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "GET") - th.TestHeader(t, r, "Accept", "application/json") - th.TestHeader(t, r, "X-Auth-Token", thclient.TokenID) - - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, TokenGetResponse) - }) -} - -// IsSuccessful ensures that a CreateResult was successful and contains the correct token and -// service catalog. -func IsSuccessful(t *testing.T, result CreateResult) { - token, err := result.ExtractToken() - th.AssertNoErr(t, err) - th.CheckDeepEquals(t, ExpectedToken, token) - - serviceCatalog, err := result.ExtractServiceCatalog() - th.AssertNoErr(t, err) - th.CheckDeepEquals(t, ExpectedServiceCatalog, serviceCatalog) -} - -// GetIsSuccessful ensures that a GetResult was successful and contains the correct token and -// User Info. -func GetIsSuccessful(t *testing.T, result GetResult) { - token, err := result.ExtractToken() - th.AssertNoErr(t, err) - th.CheckDeepEquals(t, ExpectedToken, token) - - user, err := result.ExtractUser() - th.AssertNoErr(t, err) - th.CheckDeepEquals(t, ExpectedUser, user) -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests.go deleted file mode 100644 index 1f514386d0d7..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests.go +++ /dev/null @@ -1,99 +0,0 @@ -package tokens - -import ( - "fmt" - - "github.com/rackspace/gophercloud" -) - -// AuthOptionsBuilder describes any argument that may be passed to the Create call. -type AuthOptionsBuilder interface { - - // ToTokenCreateMap assembles the Create request body, returning an error if parameters are - // missing or inconsistent. - ToTokenCreateMap() (map[string]interface{}, error) -} - -// AuthOptions wraps a gophercloud AuthOptions in order to adhere to the AuthOptionsBuilder -// interface. -type AuthOptions struct { - gophercloud.AuthOptions -} - -// WrapOptions embeds a root AuthOptions struct in a package-specific one. -func WrapOptions(original gophercloud.AuthOptions) AuthOptions { - return AuthOptions{AuthOptions: original} -} - -// ToTokenCreateMap converts AuthOptions into nested maps that can be serialized into a JSON -// request. -func (auth AuthOptions) ToTokenCreateMap() (map[string]interface{}, error) { - // Error out if an unsupported auth option is present. - if auth.UserID != "" { - return nil, ErrUserIDProvided - } - if auth.APIKey != "" { - return nil, ErrAPIKeyProvided - } - if auth.DomainID != "" { - return nil, ErrDomainIDProvided - } - if auth.DomainName != "" { - return nil, ErrDomainNameProvided - } - - // Populate the request map. - authMap := make(map[string]interface{}) - - if auth.Username != "" { - if auth.Password != "" { - authMap["passwordCredentials"] = map[string]interface{}{ - "username": auth.Username, - "password": auth.Password, - } - } else { - return nil, ErrPasswordRequired - } - } else if auth.TokenID != "" { - authMap["token"] = map[string]interface{}{ - "id": auth.TokenID, - } - } else { - return nil, fmt.Errorf("You must provide either username/password or tenantID/token values.") - } - - if auth.TenantID != "" { - authMap["tenantId"] = auth.TenantID - } - if auth.TenantName != "" { - authMap["tenantName"] = auth.TenantName - } - - return map[string]interface{}{"auth": authMap}, nil -} - -// Create authenticates to the identity service and attempts to acquire a Token. -// If successful, the CreateResult -// Generally, rather than interact with this call directly, end users should call openstack.AuthenticatedClient(), -// which abstracts all of the gory details about navigating service catalogs and such. -func Create(client *gophercloud.ServiceClient, auth AuthOptionsBuilder) CreateResult { - request, err := auth.ToTokenCreateMap() - if err != nil { - return CreateResult{gophercloud.Result{Err: err}} - } - - var result CreateResult - _, result.Err = client.Post(CreateURL(client), request, &result.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 203}, - }) - return result -} - -// Validates and retrieves information for user's token. -func Get(client *gophercloud.ServiceClient, token string) GetResult { - var result GetResult - _, result.Err = client.Get(GetURL(client, token), &result.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 203}, - }) - return result -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/results.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/results.go deleted file mode 100644 index 67c577b8d0da..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/results.go +++ /dev/null @@ -1,170 +0,0 @@ -package tokens - -import ( - "time" - - "github.com/mitchellh/mapstructure" - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/openstack/identity/v2/tenants" -) - -// Token provides only the most basic information related to an authentication token. -type Token struct { - // ID provides the primary means of identifying a user to the OpenStack API. - // OpenStack defines this field as an opaque value, so do not depend on its content. - // It is safe, however, to compare for equality. - ID string - - // ExpiresAt provides a timestamp in ISO 8601 format, indicating when the authentication token becomes invalid. - // After this point in time, future API requests made using this authentication token will respond with errors. - // Either the caller will need to reauthenticate manually, or more preferably, the caller should exploit automatic re-authentication. - // See the AuthOptions structure for more details. - ExpiresAt time.Time - - // Tenant provides information about the tenant to which this token grants access. - Tenant tenants.Tenant -} - -// Authorization need user info which can get from token authentication's response -type Role struct { - Name string `mapstructure:"name"` -} -type User struct { - ID string `mapstructure:"id"` - Name string `mapstructure:"name"` - UserName string `mapstructure:"username"` - Roles []Role `mapstructure:"roles"` -} - -// Endpoint represents a single API endpoint offered by a service. -// It provides the public and internal URLs, if supported, along with a region specifier, again if provided. -// The significance of the Region field will depend upon your provider. -// -// In addition, the interface offered by the service will have version information associated with it -// through the VersionId, VersionInfo, and VersionList fields, if provided or supported. -// -// In all cases, fields which aren't supported by the provider and service combined will assume a zero-value (""). -type Endpoint struct { - TenantID string `mapstructure:"tenantId"` - PublicURL string `mapstructure:"publicURL"` - InternalURL string `mapstructure:"internalURL"` - AdminURL string `mapstructure:"adminURL"` - Region string `mapstructure:"region"` - VersionID string `mapstructure:"versionId"` - VersionInfo string `mapstructure:"versionInfo"` - VersionList string `mapstructure:"versionList"` -} - -// CatalogEntry provides a type-safe interface to an Identity API V2 service catalog listing. -// Each class of service, such as cloud DNS or block storage services, will have a single -// CatalogEntry representing it. -// -// Note: when looking for the desired service, try, whenever possible, to key off the type field. -// Otherwise, you'll tie the representation of the service to a specific provider. -type CatalogEntry struct { - // Name will contain the provider-specified name for the service. - Name string `mapstructure:"name"` - - // Type will contain a type string if OpenStack defines a type for the service. - // Otherwise, for provider-specific services, the provider may assign their own type strings. - Type string `mapstructure:"type"` - - // Endpoints will let the caller iterate over all the different endpoints that may exist for - // the service. - Endpoints []Endpoint `mapstructure:"endpoints"` -} - -// ServiceCatalog provides a view into the service catalog from a previous, successful authentication. -type ServiceCatalog struct { - Entries []CatalogEntry -} - -// CreateResult defers the interpretation of a created token. -// Use ExtractToken() to interpret it as a Token, or ExtractServiceCatalog() to interpret it as a service catalog. -type CreateResult struct { - gophercloud.Result -} - -// GetResult is the deferred response from a Get call, which is the same with a Created token. -// Use ExtractUser() to interpret it as a User. -type GetResult struct { - CreateResult -} - -// ExtractToken returns the just-created Token from a CreateResult. -func (result CreateResult) ExtractToken() (*Token, error) { - if result.Err != nil { - return nil, result.Err - } - - var response struct { - Access struct { - Token struct { - Expires string `mapstructure:"expires"` - ID string `mapstructure:"id"` - Tenant tenants.Tenant `mapstructure:"tenant"` - } `mapstructure:"token"` - } `mapstructure:"access"` - } - - err := mapstructure.Decode(result.Body, &response) - if err != nil { - return nil, err - } - - expiresTs, err := time.Parse(gophercloud.RFC3339Milli, response.Access.Token.Expires) - if err != nil { - return nil, err - } - - return &Token{ - ID: response.Access.Token.ID, - ExpiresAt: expiresTs, - Tenant: response.Access.Token.Tenant, - }, nil -} - -// ExtractServiceCatalog returns the ServiceCatalog that was generated along with the user's Token. -func (result CreateResult) ExtractServiceCatalog() (*ServiceCatalog, error) { - if result.Err != nil { - return nil, result.Err - } - - var response struct { - Access struct { - Entries []CatalogEntry `mapstructure:"serviceCatalog"` - } `mapstructure:"access"` - } - - err := mapstructure.Decode(result.Body, &response) - if err != nil { - return nil, err - } - - return &ServiceCatalog{Entries: response.Access.Entries}, nil -} - -// createErr quickly packs an error in a CreateResult. -func createErr(err error) CreateResult { - return CreateResult{gophercloud.Result{Err: err}} -} - -// ExtractUser returns the User from a GetResult. -func (result GetResult) ExtractUser() (*User, error) { - if result.Err != nil { - return nil, result.Err - } - - var response struct { - Access struct { - User User `mapstructure:"user"` - } `mapstructure:"access"` - } - - err := mapstructure.Decode(result.Body, &response) - if err != nil { - return nil, err - } - - return &response.Access.User, nil -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/urls.go deleted file mode 100644 index ee1393299f3e..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/urls.go +++ /dev/null @@ -1,13 +0,0 @@ -package tokens - -import "github.com/rackspace/gophercloud" - -// CreateURL generates the URL used to create new Tokens. -func CreateURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("tokens") -} - -// GetURL generates the URL used to Validate Tokens. -func GetURL(client *gophercloud.ServiceClient, token string) string { - return client.ServiceURL("tokens", token) -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/doc.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/doc.go deleted file mode 100644 index 76ff5f473873..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Package tokens provides information and interaction with the token API -// resource for the OpenStack Identity service. -// -// For more information, see: -// http://developer.openstack.org/api-ref-identity-v3.html#tokens-v3 -package tokens diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/errors.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/errors.go deleted file mode 100644 index a9e1d5c2f5ce..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/errors.go +++ /dev/null @@ -1,72 +0,0 @@ -package tokens - -import ( - "errors" - "fmt" -) - -func unacceptedAttributeErr(attribute string) error { - return fmt.Errorf("The base Identity V3 API does not accept authentication by %s", attribute) -} - -func redundantWithTokenErr(attribute string) error { - return fmt.Errorf("%s may not be provided when authenticating with a TokenID", attribute) -} - -func redundantWithUserID(attribute string) error { - return fmt.Errorf("%s may not be provided when authenticating with a UserID", attribute) -} - -var ( - // ErrAPIKeyProvided indicates that an APIKey was provided but can't be used. - ErrAPIKeyProvided = unacceptedAttributeErr("APIKey") - - // ErrTenantIDProvided indicates that a TenantID was provided but can't be used. - ErrTenantIDProvided = unacceptedAttributeErr("TenantID") - - // ErrTenantNameProvided indicates that a TenantName was provided but can't be used. - ErrTenantNameProvided = unacceptedAttributeErr("TenantName") - - // ErrUsernameWithToken indicates that a Username was provided, but token authentication is being used instead. - ErrUsernameWithToken = redundantWithTokenErr("Username") - - // ErrUserIDWithToken indicates that a UserID was provided, but token authentication is being used instead. - ErrUserIDWithToken = redundantWithTokenErr("UserID") - - // ErrDomainIDWithToken indicates that a DomainID was provided, but token authentication is being used instead. - ErrDomainIDWithToken = redundantWithTokenErr("DomainID") - - // ErrDomainNameWithToken indicates that a DomainName was provided, but token authentication is being used instead.s - ErrDomainNameWithToken = redundantWithTokenErr("DomainName") - - // ErrUsernameOrUserID indicates that neither username nor userID are specified, or both are at once. - ErrUsernameOrUserID = errors.New("Exactly one of Username and UserID must be provided for password authentication") - - // ErrDomainIDWithUserID indicates that a DomainID was provided, but unnecessary because a UserID is being used. - ErrDomainIDWithUserID = redundantWithUserID("DomainID") - - // ErrDomainNameWithUserID indicates that a DomainName was provided, but unnecessary because a UserID is being used. - ErrDomainNameWithUserID = redundantWithUserID("DomainName") - - // ErrDomainIDOrDomainName indicates that a username was provided, but no domain to scope it. - // It may also indicate that both a DomainID and a DomainName were provided at once. - ErrDomainIDOrDomainName = errors.New("You must provide exactly one of DomainID or DomainName to authenticate by Username") - - // ErrMissingPassword indicates that no password and no token were provided and no token is available. - ErrMissingPassword = errors.New("You must provide a password or a token to authenticate") - - // ErrScopeDomainIDOrDomainName indicates that a domain ID or Name was required in a Scope, but not present. - ErrScopeDomainIDOrDomainName = errors.New("You must provide exactly one of DomainID or DomainName in a Scope with ProjectName") - - // ErrScopeProjectIDOrProjectName indicates that both a ProjectID and a ProjectName were provided in a Scope. - ErrScopeProjectIDOrProjectName = errors.New("You must provide at most one of ProjectID or ProjectName in a Scope") - - // ErrScopeProjectIDAlone indicates that a ProjectID was provided with other constraints in a Scope. - ErrScopeProjectIDAlone = errors.New("ProjectID must be supplied alone in a Scope") - - // ErrScopeDomainName indicates that a DomainName was provided alone in a Scope. - ErrScopeDomainName = errors.New("DomainName must be supplied with a ProjectName or ProjectID in a Scope.") - - // ErrScopeEmpty indicates that no credentials were provided in a Scope. - ErrScopeEmpty = errors.New("You must provide either a Project or Domain in a Scope") -) diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests.go deleted file mode 100644 index b32e694da81d..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests.go +++ /dev/null @@ -1,285 +0,0 @@ -package tokens - -import ( - "net/http" - - "github.com/rackspace/gophercloud" -) - -// Scope allows a created token to be limited to a specific domain or project. -type Scope struct { - ProjectID string - ProjectName string - DomainID string - DomainName string -} - -func subjectTokenHeaders(c *gophercloud.ServiceClient, subjectToken string) map[string]string { - return map[string]string{ - "X-Subject-Token": subjectToken, - } -} - -// AuthOptionsV3er describes any argument that may be passed to the Create call. -type AuthOptionsV3er interface { - - // ToTokenCreateMap assembles the Create request body, returning an error if parameters are - // missing or inconsistent. - ToAuthOptionsV3Map(c *gophercloud.ServiceClient, scope *Scope) (map[string]interface{}, error) -} - -// AuthOptions wraps a gophercloud AuthOptions in order to adhere to the AuthOptionsV3er -// interface. -type AuthOptions struct { - gophercloud.AuthOptions -} - -func (options AuthOptions) ToAuthOptionsV3Map(c *gophercloud.ServiceClient, scope *Scope) (map[string]interface{}, error) { - // tokens3.Create logic - - // Populate the request structure based on the provided arguments. Create and return an error - // if insufficient or incompatible information is present. - authMap := make(map[string]interface{}) - - // Test first for unrecognized arguments. - if options.APIKey != "" { - return nil, ErrAPIKeyProvided - } - if options.TenantID != "" { - return nil, ErrTenantIDProvided - } - if options.TenantName != "" { - return nil, ErrTenantNameProvided - } - - if options.Password == "" { - if options.TokenID != "" { - c.TokenID = options.TokenID - } - if c.TokenID != "" { - // Because we aren't using password authentication, it's an error to also provide any of the user-based authentication - // parameters. - if options.Username != "" { - return nil, ErrUsernameWithToken - } - if options.UserID != "" { - return nil, ErrUserIDWithToken - } - - // Configure the request for Token authentication. - authMap["identity"] = map[string]interface{}{ - "methods": []string{"token"}, - "token": map[string]interface{}{ - "id": c.TokenID, - }, - } - - } else { - // If no password or token ID are available, authentication can't continue. - return nil, ErrMissingPassword - } - } else { - // Password authentication. - - // At least one of Username and UserID must be specified. - if options.Username == "" && options.UserID == "" { - return nil, ErrUsernameOrUserID - } - - if options.Username != "" { - // If Username is provided, UserID may not be provided. - if options.UserID != "" { - return nil, ErrUsernameOrUserID - } - - // Either DomainID or DomainName must also be specified. - if options.DomainID == "" && options.DomainName == "" { - return nil, ErrDomainIDOrDomainName - } - - if options.DomainID != "" { - if options.DomainName != "" { - return nil, ErrDomainIDOrDomainName - } - - // Configure the request for Username and Password authentication with a DomainID. - authMap["identity"] = map[string]interface{}{ - "methods": []string{"password"}, - "password" : map[string]interface{}{ - "user": map[string]interface{}{ - "name": &options.Username, - "password": options.Password, - "domain": map[string]interface{}{ - "id": &options.DomainID, - }, - }, - }, - } - - } - - if options.DomainName != "" { - // Configure the request for Username and Password authentication with a DomainName. - authMap["identity"] = map[string]interface{}{ - "methods": []string{"password"}, - "password": map[string]interface{}{ - "user": map[string]interface{}{ - "name": &options.Username, - "password": options.Password, - "domain": map[string]interface{}{ - "name": &options.DomainName, - }, - }, - }, - } - - } - } - - if options.UserID != "" { - // If UserID is specified, neither DomainID nor DomainName may be. - if options.DomainID != "" { - return nil, ErrDomainIDWithUserID - } - if options.DomainName != "" { - return nil, ErrDomainNameWithUserID - } - - // Configure the request for UserID and Password authentication. - authMap["identity"] = map[string]interface{}{ - "methods": []string{"password"}, - "password" : map[string]interface{}{ - "user": map[string]interface{}{ - "id": &options.UserID, - "password": options.Password, - }, - }, - } - - } - } - - // Add a "scope" element if a Scope has been provided. - if scope != nil { - if scope.ProjectName != "" { - // ProjectName provided: either DomainID or DomainName must also be supplied. - // ProjectID may not be supplied. - if scope.DomainID == "" && scope.DomainName == "" { - return nil, ErrScopeDomainIDOrDomainName - } - if scope.ProjectID != "" { - return nil, ErrScopeProjectIDOrProjectName - } - - if scope.DomainID != "" { - // ProjectName + DomainID - authMap["scope"] = map[string]interface{}{ - "project": map[string]interface{}{ - "domain": map[string]interface{}{ - "id": &scope.DomainID, - }, - "name": &scope.ProjectName, - }, - } - } - - if scope.DomainName != "" { - // ProjectName + DomainName - authMap["scope"] = map[string]interface{}{ - "project": map[string]interface{}{ - "domain": map[string]interface{}{ - "name": &scope.DomainName, - }, - "name": &scope.ProjectName, - }, - } - } - } else if scope.ProjectID != "" { - // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. - if scope.DomainID != "" { - return nil, ErrScopeProjectIDAlone - } - if scope.DomainName != "" { - return nil, ErrScopeProjectIDAlone - } - - // ProjectID - authMap["scope"] = map[string]interface{}{ - "project": map[string]interface{}{ - "id": &scope.ProjectID, - }, - } - } else if scope.DomainID != "" { - // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. - if scope.DomainName != "" { - return nil, ErrScopeDomainIDOrDomainName - } - - // DomainID - authMap["scope"] = map[string]interface{}{ - "domain": map[string]interface{}{ - "id": &scope.DomainID, - }, - } - } else if scope.DomainName != "" { - return nil, ErrScopeDomainName - } else { - return nil, ErrScopeEmpty - } - } - return map[string]interface{}{"auth": authMap}, nil -} - -// Create authenticates and either generates a new token, or changes the Scope of an existing token. -func Create(c *gophercloud.ServiceClient, options AuthOptionsV3er, scope *Scope) CreateResult { - request, err := options.ToAuthOptionsV3Map(c, scope) - if err != nil { - return CreateResult{commonResult{gophercloud.Result{Err: err}}} - } - - var result CreateResult - var response *http.Response - response, result.Err = c.Post(tokenURL(c), request, &result.Body, nil) - if result.Err != nil { - return result - } - result.Header = response.Header - return result -} - -// Get validates and retrieves information about another token. -func Get(c *gophercloud.ServiceClient, token string) GetResult { - var result GetResult - var response *http.Response - response, result.Err = c.Get(tokenURL(c), &result.Body, &gophercloud.RequestOpts{ - MoreHeaders: subjectTokenHeaders(c, token), - OkCodes: []int{200, 203}, - }) - if result.Err != nil { - return result - } - result.Header = response.Header - return result -} - -// Validate determines if a specified token is valid or not. -func Validate(c *gophercloud.ServiceClient, token string) (bool, error) { - response, err := c.Request("HEAD", tokenURL(c), gophercloud.RequestOpts{ - MoreHeaders: subjectTokenHeaders(c, token), - OkCodes: []int{204, 404}, - }) - if err != nil { - return false, err - } - - return response.StatusCode == 204, nil -} - -// Revoke immediately makes specified token invalid. -func Revoke(c *gophercloud.ServiceClient, token string) RevokeResult { - var res RevokeResult - _, res.Err = c.Delete(tokenURL(c), &gophercloud.RequestOpts{ - MoreHeaders: subjectTokenHeaders(c, token), - }) - return res -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/results.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/results.go deleted file mode 100644 index d134f7d4d074..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/results.go +++ /dev/null @@ -1,139 +0,0 @@ -package tokens - -import ( - "time" - - "github.com/mitchellh/mapstructure" - "github.com/rackspace/gophercloud" -) - -// Endpoint represents a single API endpoint offered by a service. -// It matches either a public, internal or admin URL. -// If supported, it contains a region specifier, again if provided. -// The significance of the Region field will depend upon your provider. -type Endpoint struct { - ID string `mapstructure:"id"` - Region string `mapstructure:"region"` - Interface string `mapstructure:"interface"` - URL string `mapstructure:"url"` -} - -// CatalogEntry provides a type-safe interface to an Identity API V3 service catalog listing. -// Each class of service, such as cloud DNS or block storage services, could have multiple -// CatalogEntry representing it (one by interface type, e.g public, admin or internal). -// -// Note: when looking for the desired service, try, whenever possible, to key off the type field. -// Otherwise, you'll tie the representation of the service to a specific provider. -type CatalogEntry struct { - - // Service ID - ID string `mapstructure:"id"` - - // Name will contain the provider-specified name for the service. - Name string `mapstructure:"name"` - - // Type will contain a type string if OpenStack defines a type for the service. - // Otherwise, for provider-specific services, the provider may assign their own type strings. - Type string `mapstructure:"type"` - - // Endpoints will let the caller iterate over all the different endpoints that may exist for - // the service. - Endpoints []Endpoint `mapstructure:"endpoints"` -} - -// ServiceCatalog provides a view into the service catalog from a previous, successful authentication. -type ServiceCatalog struct { - Entries []CatalogEntry -} - -// commonResult is the deferred result of a Create or a Get call. -type commonResult struct { - gophercloud.Result -} - -// Extract is a shortcut for ExtractToken. -// This function is deprecated and still present for backward compatibility. -func (r commonResult) Extract() (*Token, error) { - return r.ExtractToken() -} - -// ExtractToken interprets a commonResult as a Token. -func (r commonResult) ExtractToken() (*Token, error) { - if r.Err != nil { - return nil, r.Err - } - - var response struct { - Token struct { - ExpiresAt string `mapstructure:"expires_at"` - } `mapstructure:"token"` - } - - var token Token - - // Parse the token itself from the stored headers. - token.ID = r.Header.Get("X-Subject-Token") - - err := mapstructure.Decode(r.Body, &response) - if err != nil { - return nil, err - } - - // Attempt to parse the timestamp. - token.ExpiresAt, err = time.Parse(gophercloud.RFC3339Milli, response.Token.ExpiresAt) - - return &token, err -} - -// ExtractServiceCatalog returns the ServiceCatalog that was generated along with the user's Token. -func (result CreateResult) ExtractServiceCatalog() (*ServiceCatalog, error) { - if result.Err != nil { - return nil, result.Err - } - - var response struct { - Token struct { - Entries []CatalogEntry `mapstructure:"catalog"` - } `mapstructure:"token"` - } - - err := mapstructure.Decode(result.Body, &response) - if err != nil { - return nil, err - } - - return &ServiceCatalog{Entries: response.Token.Entries}, nil -} - -// CreateResult defers the interpretation of a created token. -// Use ExtractToken() to interpret it as a Token, or ExtractServiceCatalog() to interpret it as a service catalog. -type CreateResult struct { - commonResult -} - -// createErr quickly creates a CreateResult that reports an error. -func createErr(err error) CreateResult { - return CreateResult{ - commonResult: commonResult{Result: gophercloud.Result{Err: err}}, - } -} - -// GetResult is the deferred response from a Get call. -type GetResult struct { - commonResult -} - -// RevokeResult is the deferred response from a Revoke call. -type RevokeResult struct { - commonResult -} - -// Token is a string that grants a user access to a controlled set of services in an OpenStack provider. -// Each Token is valid for a set length of time. -type Token struct { - // ID is the issued token. - ID string - - // ExpiresAt is the timestamp at which this token will no longer be accepted. - ExpiresAt time.Time -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/urls.go b/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/urls.go deleted file mode 100644 index 360b60a82fba..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/urls.go +++ /dev/null @@ -1,7 +0,0 @@ -package tokens - -import "github.com/rackspace/gophercloud" - -func tokenURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("auth", "tokens") -} diff --git a/vendor/github.com/rackspace/gophercloud/openstack/utils/choose_version.go b/vendor/github.com/rackspace/gophercloud/openstack/utils/choose_version.go deleted file mode 100644 index b697ba8160ad..000000000000 --- a/vendor/github.com/rackspace/gophercloud/openstack/utils/choose_version.go +++ /dev/null @@ -1,114 +0,0 @@ -package utils - -import ( - "fmt" - "strings" - - "github.com/rackspace/gophercloud" -) - -// Version is a supported API version, corresponding to a vN package within the appropriate service. -type Version struct { - ID string - Suffix string - Priority int -} - -var goodStatus = map[string]bool{ - "current": true, - "supported": true, - "stable": true, -} - -// ChooseVersion queries the base endpoint of an API to choose the most recent non-experimental alternative from a service's -// published versions. -// It returns the highest-Priority Version among the alternatives that are provided, as well as its corresponding endpoint. -func ChooseVersion(client *gophercloud.ProviderClient, recognized []*Version) (*Version, string, error) { - type linkResp struct { - Href string `json:"href"` - Rel string `json:"rel"` - } - - type valueResp struct { - ID string `json:"id"` - Status string `json:"status"` - Links []linkResp `json:"links"` - } - - type versionsResp struct { - Values []valueResp `json:"values"` - } - - type response struct { - Versions versionsResp `json:"versions"` - } - - normalize := func(endpoint string) string { - if !strings.HasSuffix(endpoint, "/") { - return endpoint + "/" - } - return endpoint - } - identityEndpoint := normalize(client.IdentityEndpoint) - - // If a full endpoint is specified, check version suffixes for a match first. - for _, v := range recognized { - if strings.HasSuffix(identityEndpoint, v.Suffix) { - return v, identityEndpoint, nil - } - } - - var resp response - _, err := client.Request("GET", client.IdentityBase, gophercloud.RequestOpts{ - JSONResponse: &resp, - OkCodes: []int{200, 300}, - }) - - if err != nil { - return nil, "", err - } - - byID := make(map[string]*Version) - for _, version := range recognized { - byID[version.ID] = version - } - - var highest *Version - var endpoint string - - for _, value := range resp.Versions.Values { - href := "" - for _, link := range value.Links { - if link.Rel == "self" { - href = normalize(link.Href) - } - } - - if matching, ok := byID[value.ID]; ok { - // Prefer a version that exactly matches the provided endpoint. - if href == identityEndpoint { - if href == "" { - return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", value.ID, client.IdentityBase) - } - return matching, href, nil - } - - // Otherwise, find the highest-priority version with a whitelisted status. - if goodStatus[strings.ToLower(value.Status)] { - if highest == nil || matching.Priority > highest.Priority { - highest = matching - endpoint = href - } - } - } - } - - if highest == nil { - return nil, "", fmt.Errorf("No supported version available from endpoint %s", client.IdentityBase) - } - if endpoint == "" { - return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", highest.ID, client.IdentityBase) - } - - return highest, endpoint, nil -} diff --git a/vendor/github.com/rackspace/gophercloud/pagination/http.go b/vendor/github.com/rackspace/gophercloud/pagination/http.go deleted file mode 100644 index 1b3fe94ab9f2..000000000000 --- a/vendor/github.com/rackspace/gophercloud/pagination/http.go +++ /dev/null @@ -1,60 +0,0 @@ -package pagination - -import ( - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/rackspace/gophercloud" -) - -// PageResult stores the HTTP response that returned the current page of results. -type PageResult struct { - gophercloud.Result - url.URL -} - -// PageResultFrom parses an HTTP response as JSON and returns a PageResult containing the -// results, interpreting it as JSON if the content type indicates. -func PageResultFrom(resp *http.Response) (PageResult, error) { - var parsedBody interface{} - - defer resp.Body.Close() - rawBody, err := ioutil.ReadAll(resp.Body) - if err != nil { - return PageResult{}, err - } - - if strings.HasPrefix(resp.Header.Get("Content-Type"), "application/json") { - err = json.Unmarshal(rawBody, &parsedBody) - if err != nil { - return PageResult{}, err - } - } else { - parsedBody = rawBody - } - - return PageResultFromParsed(resp, parsedBody), err -} - -// PageResultFromParsed constructs a PageResult from an HTTP response that has already had its -// body parsed as JSON (and closed). -func PageResultFromParsed(resp *http.Response, body interface{}) PageResult { - return PageResult{ - Result: gophercloud.Result{ - Body: body, - Header: resp.Header, - }, - URL: *resp.Request.URL, - } -} - -// Request performs an HTTP request and extracts the http.Response from the result. -func Request(client *gophercloud.ServiceClient, headers map[string]string, url string) (*http.Response, error) { - return client.Request("GET", url, gophercloud.RequestOpts{ - MoreHeaders: headers, - OkCodes: []int{200, 204}, - }) -} diff --git a/vendor/github.com/rackspace/gophercloud/pagination/linked.go b/vendor/github.com/rackspace/gophercloud/pagination/linked.go deleted file mode 100644 index e9bd8dec9767..000000000000 --- a/vendor/github.com/rackspace/gophercloud/pagination/linked.go +++ /dev/null @@ -1,67 +0,0 @@ -package pagination - -import "fmt" - -// LinkedPageBase may be embedded to implement a page that provides navigational "Next" and "Previous" links within its result. -type LinkedPageBase struct { - PageResult - - // LinkPath lists the keys that should be traversed within a response to arrive at the "next" pointer. - // If any link along the path is missing, an empty URL will be returned. - // If any link results in an unexpected value type, an error will be returned. - // When left as "nil", []string{"links", "next"} will be used as a default. - LinkPath []string -} - -// NextPageURL extracts the pagination structure from a JSON response and returns the "next" link, if one is present. -// It assumes that the links are available in a "links" element of the top-level response object. -// If this is not the case, override NextPageURL on your result type. -func (current LinkedPageBase) NextPageURL() (string, error) { - var path []string - var key string - - if current.LinkPath == nil { - path = []string{"links", "next"} - } else { - path = current.LinkPath - } - - submap, ok := current.Body.(map[string]interface{}) - if !ok { - return "", fmt.Errorf("Expected an object, but was %#v", current.Body) - } - - for { - key, path = path[0], path[1:len(path)] - - value, ok := submap[key] - if !ok { - return "", nil - } - - if len(path) > 0 { - submap, ok = value.(map[string]interface{}) - if !ok { - return "", fmt.Errorf("Expected an object, but was %#v", value) - } - } else { - if value == nil { - // Actual null element. - return "", nil - } - - url, ok := value.(string) - if !ok { - return "", fmt.Errorf("Expected a string, but was %#v", value) - } - - return url, nil - } - } -} - -// GetBody returns the linked page's body. This method is needed to satisfy the -// Page interface. -func (current LinkedPageBase) GetBody() interface{} { - return current.Body -} diff --git a/vendor/github.com/rackspace/gophercloud/pagination/marker.go b/vendor/github.com/rackspace/gophercloud/pagination/marker.go deleted file mode 100644 index f355afc54b57..000000000000 --- a/vendor/github.com/rackspace/gophercloud/pagination/marker.go +++ /dev/null @@ -1,40 +0,0 @@ -package pagination - -// MarkerPage is a stricter Page interface that describes additional functionality required for use with NewMarkerPager. -// For convenience, embed the MarkedPageBase struct. -type MarkerPage interface { - Page - - // LastMarker returns the last "marker" value on this page. - LastMarker() (string, error) -} - -// MarkerPageBase is a page in a collection that's paginated by "limit" and "marker" query parameters. -type MarkerPageBase struct { - PageResult - - // Owner is a reference to the embedding struct. - Owner MarkerPage -} - -// NextPageURL generates the URL for the page of results after this one. -func (current MarkerPageBase) NextPageURL() (string, error) { - currentURL := current.URL - - mark, err := current.Owner.LastMarker() - if err != nil { - return "", err - } - - q := currentURL.Query() - q.Set("marker", mark) - currentURL.RawQuery = q.Encode() - - return currentURL.String(), nil -} - -// GetBody returns the linked page's body. This method is needed to satisfy the -// Page interface. -func (current MarkerPageBase) GetBody() interface{} { - return current.Body -} diff --git a/vendor/github.com/rackspace/gophercloud/pagination/null.go b/vendor/github.com/rackspace/gophercloud/pagination/null.go deleted file mode 100644 index ae57e1886c93..000000000000 --- a/vendor/github.com/rackspace/gophercloud/pagination/null.go +++ /dev/null @@ -1,20 +0,0 @@ -package pagination - -// nullPage is an always-empty page that trivially satisfies all Page interfacts. -// It's useful to be returned along with an error. -type nullPage struct{} - -// NextPageURL always returns "" to indicate that there are no more pages to return. -func (p nullPage) NextPageURL() (string, error) { - return "", nil -} - -// IsEmpty always returns true to prevent iteration over nullPages. -func (p nullPage) IsEmpty() (bool, error) { - return true, nil -} - -// LastMark always returns "" because the nullPage contains no items to have a mark. -func (p nullPage) LastMark() (string, error) { - return "", nil -} diff --git a/vendor/github.com/rackspace/gophercloud/pagination/pager.go b/vendor/github.com/rackspace/gophercloud/pagination/pager.go deleted file mode 100644 index 6f1ca046ff5d..000000000000 --- a/vendor/github.com/rackspace/gophercloud/pagination/pager.go +++ /dev/null @@ -1,238 +0,0 @@ -package pagination - -import ( - "errors" - "fmt" - "net/http" - "reflect" - "strings" - - "github.com/rackspace/gophercloud" -) - -var ( - // ErrPageNotAvailable is returned from a Pager when a next or previous page is requested, but does not exist. - ErrPageNotAvailable = errors.New("The requested page does not exist.") -) - -// Page must be satisfied by the result type of any resource collection. -// It allows clients to interact with the resource uniformly, regardless of whether or not or how it's paginated. -// Generally, rather than implementing this interface directly, implementors should embed one of the concrete PageBase structs, -// instead. -// Depending on the pagination strategy of a particular resource, there may be an additional subinterface that the result type -// will need to implement. -type Page interface { - - // NextPageURL generates the URL for the page of data that follows this collection. - // Return "" if no such page exists. - NextPageURL() (string, error) - - // IsEmpty returns true if this Page has no items in it. - IsEmpty() (bool, error) - - // GetBody returns the Page Body. This is used in the `AllPages` method. - GetBody() interface{} -} - -// Pager knows how to advance through a specific resource collection, one page at a time. -type Pager struct { - client *gophercloud.ServiceClient - - initialURL string - - createPage func(r PageResult) Page - - Err error - - // Headers supplies additional HTTP headers to populate on each paged request. - Headers map[string]string -} - -// NewPager constructs a manually-configured pager. -// Supply the URL for the first page, a function that requests a specific page given a URL, and a function that counts a page. -func NewPager(client *gophercloud.ServiceClient, initialURL string, createPage func(r PageResult) Page) Pager { - return Pager{ - client: client, - initialURL: initialURL, - createPage: createPage, - } -} - -// WithPageCreator returns a new Pager that substitutes a different page creation function. This is -// useful for overriding List functions in delegation. -func (p Pager) WithPageCreator(createPage func(r PageResult) Page) Pager { - return Pager{ - client: p.client, - initialURL: p.initialURL, - createPage: createPage, - } -} - -func (p Pager) fetchNextPage(url string) (Page, error) { - resp, err := Request(p.client, p.Headers, url) - if err != nil { - return nil, err - } - - remembered, err := PageResultFrom(resp) - if err != nil { - return nil, err - } - - return p.createPage(remembered), nil -} - -// EachPage iterates over each page returned by a Pager, yielding one at a time to a handler function. -// Return "false" from the handler to prematurely stop iterating. -func (p Pager) EachPage(handler func(Page) (bool, error)) error { - if p.Err != nil { - return p.Err - } - currentURL := p.initialURL - for { - currentPage, err := p.fetchNextPage(currentURL) - if err != nil { - return err - } - - empty, err := currentPage.IsEmpty() - if err != nil { - return err - } - if empty { - return nil - } - - ok, err := handler(currentPage) - if err != nil { - return err - } - if !ok { - return nil - } - - currentURL, err = currentPage.NextPageURL() - if err != nil { - return err - } - if currentURL == "" { - return nil - } - } -} - -// AllPages returns all the pages from a `List` operation in a single page, -// allowing the user to retrieve all the pages at once. -func (p Pager) AllPages() (Page, error) { - // pagesSlice holds all the pages until they get converted into as Page Body. - var pagesSlice []interface{} - // body will contain the final concatenated Page body. - var body reflect.Value - - // Grab a test page to ascertain the page body type. - testPage, err := p.fetchNextPage(p.initialURL) - if err != nil { - return nil, err - } - // Store the page type so we can use reflection to create a new mega-page of - // that type. - pageType := reflect.TypeOf(testPage) - - // if it's a single page, just return the testPage (first page) - if _, found := pageType.FieldByName("SinglePageBase"); found { - return testPage, nil - } - - // Switch on the page body type. Recognized types are `map[string]interface{}`, - // `[]byte`, and `[]interface{}`. - switch testPage.GetBody().(type) { - case map[string]interface{}: - // key is the map key for the page body if the body type is `map[string]interface{}`. - var key string - // Iterate over the pages to concatenate the bodies. - err := p.EachPage(func(page Page) (bool, error) { - b := page.GetBody().(map[string]interface{}) - for k := range b { - // If it's a linked page, we don't want the `links`, we want the other one. - if !strings.HasSuffix(k, "links") { - key = k - } - } - switch keyType := b[key].(type) { - case map[string]interface{}: - pagesSlice = append(pagesSlice, keyType) - case []interface{}: - pagesSlice = append(pagesSlice, b[key].([]interface{})...) - default: - return false, fmt.Errorf("Unsupported page body type: %+v", keyType) - } - return true, nil - }) - if err != nil { - return nil, err - } - // Set body to value of type `map[string]interface{}` - body = reflect.MakeMap(reflect.MapOf(reflect.TypeOf(key), reflect.TypeOf(pagesSlice))) - body.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(pagesSlice)) - case []byte: - // Iterate over the pages to concatenate the bodies. - err := p.EachPage(func(page Page) (bool, error) { - b := page.GetBody().([]byte) - pagesSlice = append(pagesSlice, b) - // seperate pages with a comma - pagesSlice = append(pagesSlice, []byte{10}) - return true, nil - }) - if err != nil { - return nil, err - } - if len(pagesSlice) > 0 { - // Remove the trailing comma. - pagesSlice = pagesSlice[:len(pagesSlice)-1] - } - var b []byte - // Combine the slice of slices in to a single slice. - for _, slice := range pagesSlice { - b = append(b, slice.([]byte)...) - } - // Set body to value of type `bytes`. - body = reflect.New(reflect.TypeOf(b)).Elem() - body.SetBytes(b) - case []interface{}: - // Iterate over the pages to concatenate the bodies. - err := p.EachPage(func(page Page) (bool, error) { - b := page.GetBody().([]interface{}) - pagesSlice = append(pagesSlice, b...) - return true, nil - }) - if err != nil { - return nil, err - } - // Set body to value of type `[]interface{}` - body = reflect.MakeSlice(reflect.TypeOf(pagesSlice), len(pagesSlice), len(pagesSlice)) - for i, s := range pagesSlice { - body.Index(i).Set(reflect.ValueOf(s)) - } - default: - return nil, fmt.Errorf("Page body has unrecognized type.") - } - - // Each `Extract*` function is expecting a specific type of page coming back, - // otherwise the type assertion in those functions will fail. pageType is needed - // to create a type in this method that has the same type that the `Extract*` - // function is expecting and set the Body of that object to the concatenated - // pages. - page := reflect.New(pageType) - // Set the page body to be the concatenated pages. - page.Elem().FieldByName("Body").Set(body) - // Set any additional headers that were pass along. The `objectstorage` pacakge, - // for example, passes a Content-Type header. - h := make(http.Header) - for k, v := range p.Headers { - h.Add(k, v) - } - page.Elem().FieldByName("Header").Set(reflect.ValueOf(h)) - // Type assert the page to a Page interface so that the type assertion in the - // `Extract*` methods will work. - return page.Elem().Interface().(Page), err -} diff --git a/vendor/github.com/rackspace/gophercloud/pagination/pkg.go b/vendor/github.com/rackspace/gophercloud/pagination/pkg.go deleted file mode 100644 index 912daea36426..000000000000 --- a/vendor/github.com/rackspace/gophercloud/pagination/pkg.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Package pagination contains utilities and convenience structs that implement common pagination idioms within OpenStack APIs. -*/ -package pagination diff --git a/vendor/github.com/rackspace/gophercloud/pagination/single.go b/vendor/github.com/rackspace/gophercloud/pagination/single.go deleted file mode 100644 index f78d4ab5d926..000000000000 --- a/vendor/github.com/rackspace/gophercloud/pagination/single.go +++ /dev/null @@ -1,15 +0,0 @@ -package pagination - -// SinglePageBase may be embedded in a Page that contains all of the results from an operation at once. -type SinglePageBase PageResult - -// NextPageURL always returns "" to indicate that there are no more pages to return. -func (current SinglePageBase) NextPageURL() (string, error) { - return "", nil -} - -// GetBody returns the single page's body. This method is needed to satisfy the -// Page interface. -func (current SinglePageBase) GetBody() interface{} { - return current.Body -} diff --git a/vendor/github.com/rackspace/gophercloud/params.go b/vendor/github.com/rackspace/gophercloud/params.go deleted file mode 100644 index 4d0f1e6e0281..000000000000 --- a/vendor/github.com/rackspace/gophercloud/params.go +++ /dev/null @@ -1,271 +0,0 @@ -package gophercloud - -import ( - "fmt" - "net/url" - "reflect" - "strconv" - "strings" - "time" -) - -// EnabledState is a convenience type, mostly used in Create and Update -// operations. Because the zero value of a bool is FALSE, we need to use a -// pointer instead to indicate zero-ness. -type EnabledState *bool - -// Convenience vars for EnabledState values. -var ( - iTrue = true - iFalse = false - - Enabled EnabledState = &iTrue - Disabled EnabledState = &iFalse -) - -// IntToPointer is a function for converting integers into integer pointers. -// This is useful when passing in options to operations. -func IntToPointer(i int) *int { - return &i -} - -/* -MaybeString is an internal function to be used by request methods in individual -resource packages. - -It takes a string that might be a zero value and returns either a pointer to its -address or nil. This is useful for allowing users to conveniently omit values -from an options struct by leaving them zeroed, but still pass nil to the JSON -serializer so they'll be omitted from the request body. -*/ -func MaybeString(original string) *string { - if original != "" { - return &original - } - return nil -} - -/* -MaybeInt is an internal function to be used by request methods in individual -resource packages. - -Like MaybeString, it accepts an int that may or may not be a zero value, and -returns either a pointer to its address or nil. It's intended to hint that the -JSON serializer should omit its field. -*/ -func MaybeInt(original int) *int { - if original != 0 { - return &original - } - return nil -} - -var t time.Time - -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Func, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - z := true - for i := 0; i < v.Len(); i++ { - z = z && isZero(v.Index(i)) - } - return z - case reflect.Struct: - if v.Type() == reflect.TypeOf(t) { - if v.Interface().(time.Time).IsZero() { - return true - } - return false - } - z := true - for i := 0; i < v.NumField(); i++ { - z = z && isZero(v.Field(i)) - } - return z - } - // Compare other types directly: - z := reflect.Zero(v.Type()) - return v.Interface() == z.Interface() -} - -/* -BuildQueryString is an internal function to be used by request methods in -individual resource packages. - -It accepts a tagged structure and expands it into a URL struct. Field names are -converted into query parameters based on a "q" tag. For example: - - type struct Something { - Bar string `q:"x_bar"` - Baz int `q:"lorem_ipsum"` - } - - instance := Something{ - Bar: "AAA", - Baz: "BBB", - } - -will be converted into "?x_bar=AAA&lorem_ipsum=BBB". - -The struct's fields may be strings, integers, or boolean values. Fields left at -their type's zero value will be omitted from the query. -*/ -func BuildQueryString(opts interface{}) (*url.URL, error) { - optsValue := reflect.ValueOf(opts) - if optsValue.Kind() == reflect.Ptr { - optsValue = optsValue.Elem() - } - - optsType := reflect.TypeOf(opts) - if optsType.Kind() == reflect.Ptr { - optsType = optsType.Elem() - } - - params := url.Values{} - - if optsValue.Kind() == reflect.Struct { - for i := 0; i < optsValue.NumField(); i++ { - v := optsValue.Field(i) - f := optsType.Field(i) - qTag := f.Tag.Get("q") - - // if the field has a 'q' tag, it goes in the query string - if qTag != "" { - tags := strings.Split(qTag, ",") - - // if the field is set, add it to the slice of query pieces - if !isZero(v) { - switch v.Kind() { - case reflect.String: - params.Add(tags[0], v.String()) - case reflect.Int: - params.Add(tags[0], strconv.FormatInt(v.Int(), 10)) - case reflect.Bool: - params.Add(tags[0], strconv.FormatBool(v.Bool())) - case reflect.Slice: - switch v.Type().Elem() { - case reflect.TypeOf(0): - for i := 0; i < v.Len(); i++ { - params.Add(tags[0], strconv.FormatInt(v.Index(i).Int(), 10)) - } - default: - for i := 0; i < v.Len(); i++ { - params.Add(tags[0], v.Index(i).String()) - } - } - } - } else { - // Otherwise, the field is not set. - if len(tags) == 2 && tags[1] == "required" { - // And the field is required. Return an error. - return nil, fmt.Errorf("Required query parameter [%s] not set.", f.Name) - } - } - } - } - - return &url.URL{RawQuery: params.Encode()}, nil - } - // Return an error if the underlying type of 'opts' isn't a struct. - return nil, fmt.Errorf("Options type is not a struct.") -} - -/* -BuildHeaders is an internal function to be used by request methods in -individual resource packages. - -It accepts an arbitrary tagged structure and produces a string map that's -suitable for use as the HTTP headers of an outgoing request. Field names are -mapped to header names based in "h" tags. - - type struct Something { - Bar string `h:"x_bar"` - Baz int `h:"lorem_ipsum"` - } - - instance := Something{ - Bar: "AAA", - Baz: "BBB", - } - -will be converted into: - - map[string]string{ - "x_bar": "AAA", - "lorem_ipsum": "BBB", - } - -Untagged fields and fields left at their zero values are skipped. Integers, -booleans and string values are supported. -*/ -func BuildHeaders(opts interface{}) (map[string]string, error) { - optsValue := reflect.ValueOf(opts) - if optsValue.Kind() == reflect.Ptr { - optsValue = optsValue.Elem() - } - - optsType := reflect.TypeOf(opts) - if optsType.Kind() == reflect.Ptr { - optsType = optsType.Elem() - } - - optsMap := make(map[string]string) - if optsValue.Kind() == reflect.Struct { - for i := 0; i < optsValue.NumField(); i++ { - v := optsValue.Field(i) - f := optsType.Field(i) - hTag := f.Tag.Get("h") - - // if the field has a 'h' tag, it goes in the header - if hTag != "" { - tags := strings.Split(hTag, ",") - - // if the field is set, add it to the slice of query pieces - if !isZero(v) { - switch v.Kind() { - case reflect.String: - optsMap[tags[0]] = v.String() - case reflect.Int: - optsMap[tags[0]] = strconv.FormatInt(v.Int(), 10) - case reflect.Bool: - optsMap[tags[0]] = strconv.FormatBool(v.Bool()) - } - } else { - // Otherwise, the field is not set. - if len(tags) == 2 && tags[1] == "required" { - // And the field is required. Return an error. - return optsMap, fmt.Errorf("Required header not set.") - } - } - } - - } - return optsMap, nil - } - // Return an error if the underlying type of 'opts' isn't a struct. - return optsMap, fmt.Errorf("Options type is not a struct.") -} - -// IDSliceToQueryString takes a slice of elements and converts them into a query -// string. For example, if name=foo and slice=[]int{20, 40, 60}, then the -// result would be `?name=20&name=40&name=60' -func IDSliceToQueryString(name string, ids []int) string { - str := "" - for k, v := range ids { - if k == 0 { - str += "?" - } else { - str += "&" - } - str += fmt.Sprintf("%s=%s", name, strconv.Itoa(v)) - } - return str -} - -// IntWithinRange returns TRUE if an integer falls within a defined range, and -// FALSE if not. -func IntWithinRange(val, min, max int) bool { - return val > min && val < max -} diff --git a/vendor/github.com/rackspace/gophercloud/provider_client.go b/vendor/github.com/rackspace/gophercloud/provider_client.go deleted file mode 100644 index 08fd0ca84f55..000000000000 --- a/vendor/github.com/rackspace/gophercloud/provider_client.go +++ /dev/null @@ -1,331 +0,0 @@ -package gophercloud - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" -) - -// DefaultUserAgent is the default User-Agent string set in the request header. -const DefaultUserAgent = "gophercloud/1.0.0" - -// UserAgent represents a User-Agent header. -type UserAgent struct { - // prepend is the slice of User-Agent strings to prepend to DefaultUserAgent. - // All the strings to prepend are accumulated and prepended in the Join method. - prepend []string -} - -// Prepend prepends a user-defined string to the default User-Agent string. Users -// may pass in one or more strings to prepend. -func (ua *UserAgent) Prepend(s ...string) { - ua.prepend = append(s, ua.prepend...) -} - -// Join concatenates all the user-defined User-Agend strings with the default -// Gophercloud User-Agent string. -func (ua *UserAgent) Join() string { - uaSlice := append(ua.prepend, DefaultUserAgent) - return strings.Join(uaSlice, " ") -} - -// ProviderClient stores details that are required to interact with any -// services within a specific provider's API. -// -// Generally, you acquire a ProviderClient by calling the NewClient method in -// the appropriate provider's child package, providing whatever authentication -// credentials are required. -type ProviderClient struct { - // IdentityBase is the base URL used for a particular provider's identity - // service - it will be used when issuing authenticatation requests. It - // should point to the root resource of the identity service, not a specific - // identity version. - IdentityBase string - - // IdentityEndpoint is the identity endpoint. This may be a specific version - // of the identity service. If this is the case, this endpoint is used rather - // than querying versions first. - IdentityEndpoint string - - // TokenID is the ID of the most recently issued valid token. - TokenID string - - // EndpointLocator describes how this provider discovers the endpoints for - // its constituent services. - EndpointLocator EndpointLocator - - // HTTPClient allows users to interject arbitrary http, https, or other transit behaviors. - HTTPClient http.Client - - // UserAgent represents the User-Agent header in the HTTP request. - UserAgent UserAgent - - // ReauthFunc is the function used to re-authenticate the user if the request - // fails with a 401 HTTP response code. This a needed because there may be multiple - // authentication functions for different Identity service versions. - ReauthFunc func() error -} - -// AuthenticatedHeaders returns a map of HTTP headers that are common for all -// authenticated service requests. -func (client *ProviderClient) AuthenticatedHeaders() map[string]string { - if client.TokenID == "" { - return map[string]string{} - } - return map[string]string{"X-Auth-Token": client.TokenID} -} - -// RequestOpts customizes the behavior of the provider.Request() method. -type RequestOpts struct { - // JSONBody, if provided, will be encoded as JSON and used as the body of the HTTP request. The - // content type of the request will default to "application/json" unless overridden by MoreHeaders. - // It's an error to specify both a JSONBody and a RawBody. - JSONBody interface{} - // RawBody contains an io.ReadSeeker that will be consumed by the request directly. No content-type - // will be set unless one is provided explicitly by MoreHeaders. - RawBody io.ReadSeeker - - // JSONResponse, if provided, will be populated with the contents of the response body parsed as - // JSON. - JSONResponse interface{} - // OkCodes contains a list of numeric HTTP status codes that should be interpreted as success. If - // the response has a different code, an error will be returned. - OkCodes []int - - // MoreHeaders specifies additional HTTP headers to be provide on the request. If a header is - // provided with a blank value (""), that header will be *omitted* instead: use this to suppress - // the default Accept header or an inferred Content-Type, for example. - MoreHeaders map[string]string -} - -func (opts *RequestOpts) setBody(body interface{}) { - if v, ok := (body).(io.ReadSeeker); ok { - opts.RawBody = v - } else if body != nil { - opts.JSONBody = body - } -} - -// UnexpectedResponseCodeError is returned by the Request method when a response code other than -// those listed in OkCodes is encountered. -type UnexpectedResponseCodeError struct { - URL string - Method string - Expected []int - Actual int - Body []byte -} - -func (err *UnexpectedResponseCodeError) Error() string { - return fmt.Sprintf( - "Expected HTTP response code %v when accessing [%s %s], but got %d instead\n%s", - err.Expected, err.Method, err.URL, err.Actual, err.Body, - ) -} - -var applicationJSON = "application/json" - -// Request performs an HTTP request using the ProviderClient's current HTTPClient. An authentication -// header will automatically be provided. -func (client *ProviderClient) Request(method, url string, options RequestOpts) (*http.Response, error) { - var body io.ReadSeeker - var contentType *string - - // Derive the content body by either encoding an arbitrary object as JSON, or by taking a provided - // io.ReadSeeker as-is. Default the content-type to application/json. - if options.JSONBody != nil { - if options.RawBody != nil { - panic("Please provide only one of JSONBody or RawBody to gophercloud.Request().") - } - - rendered, err := json.Marshal(options.JSONBody) - if err != nil { - return nil, err - } - - body = bytes.NewReader(rendered) - contentType = &applicationJSON - } - - if options.RawBody != nil { - body = options.RawBody - } - - // Construct the http.Request. - req, err := http.NewRequest(method, url, body) - if err != nil { - return nil, err - } - - // Populate the request headers. Apply options.MoreHeaders last, to give the caller the chance to - // modify or omit any header. - if contentType != nil { - req.Header.Set("Content-Type", *contentType) - } - req.Header.Set("Accept", applicationJSON) - - for k, v := range client.AuthenticatedHeaders() { - req.Header.Add(k, v) - } - - // Set the User-Agent header - req.Header.Set("User-Agent", client.UserAgent.Join()) - - if options.MoreHeaders != nil { - for k, v := range options.MoreHeaders { - if v != "" { - req.Header.Set(k, v) - } else { - req.Header.Del(k) - } - } - } - - // Set connection parameter to close the connection immediately when we've got the response - req.Close = true - - // Issue the request. - resp, err := client.HTTPClient.Do(req) - if err != nil { - return nil, err - } - - if resp.StatusCode == http.StatusUnauthorized { - if client.ReauthFunc != nil { - err = client.ReauthFunc() - if err != nil { - return nil, fmt.Errorf("Error trying to re-authenticate: %s", err) - } - if options.RawBody != nil { - options.RawBody.Seek(0, 0) - } - resp.Body.Close() - resp, err = client.Request(method, url, options) - if err != nil { - return nil, fmt.Errorf("Successfully re-authenticated, but got error executing request: %s", err) - } - - return resp, nil - } - } - - // Allow default OkCodes if none explicitly set - if options.OkCodes == nil { - options.OkCodes = defaultOkCodes(method) - } - - // Validate the HTTP response status. - var ok bool - for _, code := range options.OkCodes { - if resp.StatusCode == code { - ok = true - break - } - } - if !ok { - body, _ := ioutil.ReadAll(resp.Body) - resp.Body.Close() - return resp, &UnexpectedResponseCodeError{ - URL: url, - Method: method, - Expected: options.OkCodes, - Actual: resp.StatusCode, - Body: body, - } - } - - // Parse the response body as JSON, if requested to do so. - if options.JSONResponse != nil { - defer resp.Body.Close() - if err := json.NewDecoder(resp.Body).Decode(options.JSONResponse); err != nil { - return nil, err - } - } - - return resp, nil -} - -func defaultOkCodes(method string) []int { - switch { - case method == "GET": - return []int{200} - case method == "POST": - return []int{201, 202} - case method == "PUT": - return []int{201, 202} - case method == "PATCH": - return []int{200, 204} - case method == "DELETE": - return []int{202, 204} - } - - return []int{} -} - -func (client *ProviderClient) Get(url string, JSONResponse *interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = &RequestOpts{} - } - if JSONResponse != nil { - opts.JSONResponse = JSONResponse - } - return client.Request("GET", url, *opts) -} - -func (client *ProviderClient) Post(url string, body interface{}, JSONResponse *interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = &RequestOpts{} - } - - opts.setBody(body) - - if JSONResponse != nil { - opts.JSONResponse = JSONResponse - } - - return client.Request("POST", url, *opts) -} - -func (client *ProviderClient) Put(url string, body interface{}, JSONResponse *interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = &RequestOpts{} - } - - opts.setBody(body) - - if JSONResponse != nil { - opts.JSONResponse = JSONResponse - } - - return client.Request("PUT", url, *opts) -} - -func (client *ProviderClient) Patch(url string, JSONBody interface{}, JSONResponse *interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = &RequestOpts{} - } - - if v, ok := (JSONBody).(io.ReadSeeker); ok { - opts.RawBody = v - } else if JSONBody != nil { - opts.JSONBody = JSONBody - } - - if JSONResponse != nil { - opts.JSONResponse = JSONResponse - } - - return client.Request("PATCH", url, *opts) -} - -func (client *ProviderClient) Delete(url string, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = &RequestOpts{} - } - - return client.Request("DELETE", url, *opts) -} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/auth_env.go b/vendor/github.com/rackspace/gophercloud/rackspace/auth_env.go deleted file mode 100644 index 5852c3ce7384..000000000000 --- a/vendor/github.com/rackspace/gophercloud/rackspace/auth_env.go +++ /dev/null @@ -1,57 +0,0 @@ -package rackspace - -import ( - "fmt" - "os" - - "github.com/rackspace/gophercloud" -) - -var nilOptions = gophercloud.AuthOptions{} - -// ErrNoAuthUrl, ErrNoUsername, and ErrNoPassword errors indicate of the -// required RS_AUTH_URL, RS_USERNAME, or RS_PASSWORD environment variables, -// respectively, remain undefined. See the AuthOptions() function for more details. -var ( - ErrNoAuthURL = fmt.Errorf("Environment variable RS_AUTH_URL or OS_AUTH_URL need to be set.") - ErrNoUsername = fmt.Errorf("Environment variable RS_USERNAME or OS_USERNAME need to be set.") - ErrNoPassword = fmt.Errorf("Environment variable RS_API_KEY or RS_PASSWORD needs to be set.") -) - -func prefixedEnv(base string) string { - value := os.Getenv("RS_" + base) - if value == "" { - value = os.Getenv("OS_" + base) - } - return value -} - -// AuthOptionsFromEnv fills out an identity.AuthOptions structure with the -// settings found on the various Rackspace RS_* environment variables. -func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { - authURL := prefixedEnv("AUTH_URL") - username := prefixedEnv("USERNAME") - password := prefixedEnv("PASSWORD") - apiKey := prefixedEnv("API_KEY") - - if authURL == "" { - return nilOptions, ErrNoAuthURL - } - - if username == "" { - return nilOptions, ErrNoUsername - } - - if password == "" && apiKey == "" { - return nilOptions, ErrNoPassword - } - - ao := gophercloud.AuthOptions{ - IdentityEndpoint: authURL, - Username: username, - Password: password, - APIKey: apiKey, - } - - return ao, nil -} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/delegate.go deleted file mode 100644 index 438349410a6d..000000000000 --- a/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/delegate.go +++ /dev/null @@ -1,75 +0,0 @@ -package volumes - -import ( - "fmt" - - "github.com/rackspace/gophercloud" - os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes" - "github.com/rackspace/gophercloud/pagination" -) - -type CreateOpts struct { - os.CreateOpts -} - -func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) { - if opts.Size < 75 || opts.Size > 1024 { - return nil, fmt.Errorf("Size field must be between 75 and 1024") - } - - return opts.CreateOpts.ToVolumeCreateMap() -} - -// Create will create a new Volume based on the values in CreateOpts. To extract -// the Volume object from the response, call the Extract method on the -// CreateResult. -func Create(client *gophercloud.ServiceClient, opts os.CreateOptsBuilder) CreateResult { - return CreateResult{os.Create(client, opts)} -} - -// Delete will delete the existing Volume with the provided ID. -func Delete(client *gophercloud.ServiceClient, id string) os.DeleteResult { - return os.Delete(client, id) -} - -// Get retrieves the Volume with the provided ID. To extract the Volume object -// from the response, call the Extract method on the GetResult. -func Get(client *gophercloud.ServiceClient, id string) GetResult { - return GetResult{os.Get(client, id)} -} - -// List returns volumes optionally limited by the conditions provided in ListOpts. -func List(client *gophercloud.ServiceClient) pagination.Pager { - return os.List(client, os.ListOpts{}) -} - -// UpdateOpts contain options for updating an existing Volume. This object is passed -// to the volumes.Update function. For more information about the parameters, see -// the Volume object. -type UpdateOpts struct { - // OPTIONAL - Name string - // OPTIONAL - Description string -} - -// ToVolumeUpdateMap assembles a request body based on the contents of an -// UpdateOpts. -func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) { - v := make(map[string]interface{}) - - if opts.Description != "" { - v["display_description"] = opts.Description - } - if opts.Name != "" { - v["display_name"] = opts.Name - } - - return map[string]interface{}{"volume": v}, nil -} - -// Update will update the Volume with provided information. To extract the updated -// Volume from the response, call the Extract method on the UpdateResult. -func Update(client *gophercloud.ServiceClient, id string, opts os.UpdateOptsBuilder) UpdateResult { - return UpdateResult{os.Update(client, id, opts)} -} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/doc.go deleted file mode 100644 index b2be25c53817..000000000000 --- a/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package volumes provides information and interaction with the volume -// API resource for the Rackspace Block Storage service. -package volumes diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/results.go b/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/results.go deleted file mode 100644 index c7c2cc498412..000000000000 --- a/vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes/results.go +++ /dev/null @@ -1,66 +0,0 @@ -package volumes - -import ( - os "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes" - "github.com/rackspace/gophercloud/pagination" - - "github.com/mitchellh/mapstructure" -) - -// Volume wraps an Openstack volume -type Volume os.Volume - -// CreateResult represents the result of a create operation -type CreateResult struct { - os.CreateResult -} - -// GetResult represents the result of a get operation -type GetResult struct { - os.GetResult -} - -// UpdateResult represents the result of an update operation -type UpdateResult struct { - os.UpdateResult -} - -func commonExtract(resp interface{}, err error) (*Volume, error) { - if err != nil { - return nil, err - } - - var respStruct struct { - Volume *Volume `json:"volume"` - } - - err = mapstructure.Decode(resp, &respStruct) - - return respStruct.Volume, err -} - -// Extract will get the Volume object out of the GetResult object. -func (r GetResult) Extract() (*Volume, error) { - return commonExtract(r.Body, r.Err) -} - -// Extract will get the Volume object out of the CreateResult object. -func (r CreateResult) Extract() (*Volume, error) { - return commonExtract(r.Body, r.Err) -} - -// Extract will get the Volume object out of the UpdateResult object. -func (r UpdateResult) Extract() (*Volume, error) { - return commonExtract(r.Body, r.Err) -} - -// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call. -func ExtractVolumes(page pagination.Page) ([]Volume, error) { - var response struct { - Volumes []Volume `json:"volumes"` - } - - err := mapstructure.Decode(page.(os.ListResult).Body, &response) - - return response.Volumes, err -} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/client.go b/vendor/github.com/rackspace/gophercloud/rackspace/client.go deleted file mode 100644 index 09e9006c939a..000000000000 --- a/vendor/github.com/rackspace/gophercloud/rackspace/client.go +++ /dev/null @@ -1,234 +0,0 @@ -package rackspace - -import ( - "fmt" - - "github.com/rackspace/gophercloud" - os "github.com/rackspace/gophercloud/openstack" - "github.com/rackspace/gophercloud/openstack/utils" - tokens2 "github.com/rackspace/gophercloud/rackspace/identity/v2/tokens" -) - -const ( - // RackspaceUSIdentity is an identity endpoint located in the United States. - RackspaceUSIdentity = "https://identity.api.rackspacecloud.com/v2.0/" - - // RackspaceUKIdentity is an identity endpoint located in the UK. - RackspaceUKIdentity = "https://lon.identity.api.rackspacecloud.com/v2.0/" -) - -const ( - v20 = "v2.0" -) - -// NewClient creates a client that's prepared to communicate with the Rackspace API, but is not -// yet authenticated. Most users will probably prefer using the AuthenticatedClient function -// instead. -// -// Provide the base URL of the identity endpoint you wish to authenticate against as "endpoint". -// Often, this will be either RackspaceUSIdentity or RackspaceUKIdentity. -func NewClient(endpoint string) (*gophercloud.ProviderClient, error) { - if endpoint == "" { - return os.NewClient(RackspaceUSIdentity) - } - return os.NewClient(endpoint) -} - -// AuthenticatedClient logs in to Rackspace with the provided credentials and constructs a -// ProviderClient that's ready to operate. -// -// If the provided AuthOptions does not specify an explicit IdentityEndpoint, it will default to -// the canonical, production Rackspace US identity endpoint. -func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) { - client, err := NewClient(options.IdentityEndpoint) - if err != nil { - return nil, err - } - - err = Authenticate(client, options) - if err != nil { - return nil, err - } - return client, nil -} - -// Authenticate or re-authenticate against the most recent identity service supported at the -// provided endpoint. -func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { - versions := []*utils.Version{ - &utils.Version{ID: v20, Priority: 20, Suffix: "/v2.0/"}, - } - - chosen, endpoint, err := utils.ChooseVersion(client, versions) - if err != nil { - return err - } - - switch chosen.ID { - case v20: - return v2auth(client, endpoint, options) - default: - // The switch statement must be out of date from the versions list. - return fmt.Errorf("Unrecognized identity version: %s", chosen.ID) - } -} - -// AuthenticateV2 explicitly authenticates with v2 of the identity service. -func AuthenticateV2(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { - return v2auth(client, "", options) -} - -func v2auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions) error { - v2Client := NewIdentityV2(client) - if endpoint != "" { - v2Client.Endpoint = endpoint - } - - result := tokens2.Create(v2Client, tokens2.WrapOptions(options)) - - token, err := result.ExtractToken() - if err != nil { - return err - } - - catalog, err := result.ExtractServiceCatalog() - if err != nil { - return err - } - - if options.AllowReauth { - client.ReauthFunc = func() error { - return AuthenticateV2(client, options) - } - } - client.TokenID = token.ID - client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { - return os.V2EndpointURL(catalog, opts) - } - - return nil -} - -// NewIdentityV2 creates a ServiceClient that may be used to access the v2 identity service. -func NewIdentityV2(client *gophercloud.ProviderClient) *gophercloud.ServiceClient { - v2Endpoint := client.IdentityBase + "v2.0/" - - return &gophercloud.ServiceClient{ - ProviderClient: client, - Endpoint: v2Endpoint, - } -} - -// NewComputeV2 creates a ServiceClient that may be used to access the v2 compute service. -func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("compute") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - - return &gophercloud.ServiceClient{ - ProviderClient: client, - Endpoint: url, - }, nil -} - -// NewObjectCDNV1 creates a ServiceClient that may be used with the Rackspace v1 CDN. -func NewObjectCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("rax:object-cdn") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil -} - -// NewObjectStorageV1 creates a ServiceClient that may be used with the Rackspace v1 object storage package. -func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return os.NewObjectStorageV1(client, eo) -} - -// NewBlockStorageV1 creates a ServiceClient that can be used to access the -// Rackspace Cloud Block Storage v1 API. -func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("volume") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - - return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil -} - -// NewLBV1 creates a ServiceClient that can be used to access the Rackspace -// Cloud Load Balancer v1 API. -func NewLBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("rax:load-balancer") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil -} - -// NewNetworkV2 creates a ServiceClient that can be used to access the Rackspace -// Networking v2 API. -func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("network") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil -} - -// NewCDNV1 creates a ServiceClient that may be used to access the Rackspace v1 -// CDN service. -func NewCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("rax:cdn") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil -} - -// NewOrchestrationV1 creates a ServiceClient that may be used to access the v1 orchestration service. -func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("orchestration") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil -} - -// NewRackConnectV3 creates a ServiceClient that may be used to access the v3 RackConnect service. -func NewRackConnectV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("rax:rackconnect") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil -} - -// NewDBV1 creates a ServiceClient that may be used to access the v1 DB service. -func NewDBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("rax:database") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil -} - -// NewAutoScaleV1 creates a ServiceClient that may be used to access the v1 Auto Scale service. -func NewAutoScaleV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - eo.ApplyDefaults("rax:autoscale") - url, err := client.EndpointLocator(eo) - if err != nil { - return nil, err - } - return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil -} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate.go deleted file mode 100644 index 7810d156a0fb..000000000000 --- a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate.go +++ /dev/null @@ -1,116 +0,0 @@ -package servers - -import ( - "github.com/rackspace/gophercloud" - os "github.com/rackspace/gophercloud/openstack/compute/v2/servers" - "github.com/rackspace/gophercloud/pagination" -) - -// List makes a request against the API to list servers accessible to you. -func List(client *gophercloud.ServiceClient, opts os.ListOptsBuilder) pagination.Pager { - return os.List(client, opts) -} - -// Create requests a server to be provisioned to the user in the current tenant. -func Create(client *gophercloud.ServiceClient, opts os.CreateOptsBuilder) os.CreateResult { - return os.Create(client, opts) -} - -// Update requests an existing server to be updated with the supplied options. -func Update(client *gophercloud.ServiceClient, id string, opts os.UpdateOptsBuilder) os.UpdateResult { - return os.Update(client, id, opts) -} - -// Delete requests that a server previously provisioned be removed from your account. -func Delete(client *gophercloud.ServiceClient, id string) os.DeleteResult { - return os.Delete(client, id) -} - -// Get requests details on a single server, by ID. -func Get(client *gophercloud.ServiceClient, id string) os.GetResult { - return os.Get(client, id) -} - -// ChangeAdminPassword alters the administrator or root password for a specified server. -func ChangeAdminPassword(client *gophercloud.ServiceClient, id, newPassword string) os.ActionResult { - return os.ChangeAdminPassword(client, id, newPassword) -} - -// Reboot requests that a given server reboot. Two methods exist for rebooting a server: -// -// os.HardReboot (aka PowerCycle) restarts the server instance by physically cutting power to the -// machine, or if a VM, terminating it at the hypervisor level. It's done. Caput. Full stop. Then, -// after a brief wait, power is restored or the VM instance restarted. -// -// os.SoftReboot (aka OSReboot) simply tells the OS to restart under its own procedures. E.g., in -// Linux, asking it to enter runlevel 6, or executing "sudo shutdown -r now", or by asking Windows to restart the machine. -func Reboot(client *gophercloud.ServiceClient, id string, how os.RebootMethod) os.ActionResult { - return os.Reboot(client, id, how) -} - -// Rebuild will reprovision the server according to the configuration options provided in the -// RebuildOpts struct. -func Rebuild(client *gophercloud.ServiceClient, id string, opts os.RebuildOptsBuilder) os.RebuildResult { - return os.Rebuild(client, id, opts) -} - -// Resize instructs the provider to change the flavor of the server. -// Note that this implies rebuilding it. -// Unfortunately, one cannot pass rebuild parameters to the resize function. -// When the resize completes, the server will be in RESIZE_VERIFY state. -// While in this state, you can explore the use of the new server's configuration. -// If you like it, call ConfirmResize() to commit the resize permanently. -// Otherwise, call RevertResize() to restore the old configuration. -func Resize(client *gophercloud.ServiceClient, id string, opts os.ResizeOptsBuilder) os.ActionResult { - return os.Resize(client, id, opts) -} - -// ConfirmResize confirms a previous resize operation on a server. -// See Resize() for more details. -func ConfirmResize(client *gophercloud.ServiceClient, id string) os.ActionResult { - return os.ConfirmResize(client, id) -} - -// WaitForStatus will continually poll a server until it successfully transitions to a specified -// status. It will do this for at most the number of seconds specified. -func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { - return os.WaitForStatus(c, id, status, secs) -} - -// ExtractServers interprets the results of a single page from a List() call, producing a slice of Server entities. -func ExtractServers(page pagination.Page) ([]os.Server, error) { - return os.ExtractServers(page) -} - -// ListAddresses makes a request against the API to list the servers IP addresses. -func ListAddresses(client *gophercloud.ServiceClient, id string) pagination.Pager { - return os.ListAddresses(client, id) -} - -// ExtractAddresses interprets the results of a single page from a ListAddresses() call, producing a map of Address slices. -func ExtractAddresses(page pagination.Page) (map[string][]os.Address, error) { - return os.ExtractAddresses(page) -} - -// ListAddressesByNetwork makes a request against the API to list the servers IP addresses -// for the given network. -func ListAddressesByNetwork(client *gophercloud.ServiceClient, id, network string) pagination.Pager { - return os.ListAddressesByNetwork(client, id, network) -} - -// ExtractNetworkAddresses interprets the results of a single page from a ListAddressesByNetwork() call, producing a map of Address slices. -func ExtractNetworkAddresses(page pagination.Page) ([]os.Address, error) { - return os.ExtractNetworkAddresses(page) -} - -// Metadata requests all the metadata for the given server ID. -func Metadata(client *gophercloud.ServiceClient, id string) os.GetMetadataResult { - return os.Metadata(client, id) -} - -// UpdateMetadata updates (or creates) all the metadata specified by opts for the given server ID. -// This operation does not affect already-existing metadata that is not specified -// by opts. -func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts os.UpdateMetadataOptsBuilder) os.UpdateMetadataResult { - return os.UpdateMetadata(client, id, opts) -} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/doc.go deleted file mode 100644 index c9f77f6945df..000000000000 --- a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package servers provides information and interaction with the server -// API resource for the Rackspace Cloud Servers service. -package servers diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/fixtures.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/fixtures.go deleted file mode 100644 index 75cccd04181f..000000000000 --- a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/fixtures.go +++ /dev/null @@ -1,574 +0,0 @@ -// +build fixtures - -package servers - -import ( - os "github.com/rackspace/gophercloud/openstack/compute/v2/servers" -) - -// ListOutput is the recorded output of a Rackspace servers.List request. -const ListOutput = ` -{ - "servers": [ - { - "OS-DCF:diskConfig": "MANUAL", - "OS-EXT-STS:power_state": 1, - "OS-EXT-STS:task_state": null, - "OS-EXT-STS:vm_state": "active", - "accessIPv4": "1.2.3.4", - "accessIPv6": "1111:4822:7818:121:2000:9b5e:7438:a2d0", - "addresses": { - "private": [ - { - "addr": "10.208.230.113", - "version": 4 - } - ], - "public": [ - { - "addr": "2001:4800:7818:101:2000:9b5e:7428:a2d0", - "version": 6 - }, - { - "addr": "104.130.131.164", - "version": 4 - } - ] - }, - "created": "2014-09-23T12:34:58Z", - "flavor": { - "id": "performance1-8", - "links": [ - { - "href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-8", - "rel": "bookmark" - } - ] - }, - "hostId": "e8951a524bc465b0898aeac7674da6fe1495e253ae1ea17ddb2c2475", - "id": "59818cee-bc8c-44eb-8073-673ee65105f7", - "image": { - "id": "255df5fb-e3d4-45a3-9a07-c976debf7c14", - "links": [ - { - "href": "https://dfw.servers.api.rackspacecloud.com/111111/images/255df5fb-e3d4-45a3-9a07-c976debf7c14", - "rel": "bookmark" - } - ] - }, - "key_name": "mykey", - "links": [ - { - "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/59818cee-bc8c-44eb-8073-673ee65105f7", - "rel": "self" - }, - { - "href": "https://dfw.servers.api.rackspacecloud.com/111111/servers/59818cee-bc8c-44eb-8073-673ee65105f7", - "rel": "bookmark" - } - ], - "metadata": {}, - "name": "devstack", - "progress": 100, - "status": "ACTIVE", - "tenant_id": "111111", - "updated": "2014-09-23T12:38:19Z", - "user_id": "14ae7bb21d81422694655f3cc30f2930" - }, - { - "OS-DCF:diskConfig": "MANUAL", - "OS-EXT-STS:power_state": 1, - "OS-EXT-STS:task_state": null, - "OS-EXT-STS:vm_state": "active", - "accessIPv4": "1.1.2.3", - "accessIPv6": "2222:4444:7817:101:be76:4eff:f0e5:9e02", - "addresses": { - "private": [ - { - "addr": "10.10.20.30", - "version": 4 - } - ], - "public": [ - { - "addr": "1.1.2.3", - "version": 4 - }, - { - "addr": "2222:4444:7817:101:be76:4eff:f0e5:9e02", - "version": 6 - } - ] - }, - "created": "2014-07-21T19:32:55Z", - "flavor": { - "id": "performance1-2", - "links": [ - { - "href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-2", - "rel": "bookmark" - } - ] - }, - "hostId": "f859679906d6b1a38c1bd516b78f4dcc7d5fcf012578fa3ce460716c", - "id": "25f1c7f5-e00a-4715-b354-16e24b2f4630", - "image": { - "id": "bb02b1a3-bc77-4d17-ab5b-421d89850fca", - "links": [ - { - "href": "https://dfw.servers.api.rackspacecloud.com/111111/images/bb02b1a3-bc77-4d17-ab5b-421d89850fca", - "rel": "bookmark" - } - ] - }, - "key_name": "otherkey", - "links": [ - { - "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/25f1c7f5-e00a-4715-b355-16e24b2f4630", - "rel": "self" - }, - { - "href": "https://dfw.servers.api.rackspacecloud.com/111111/servers/25f1c7f5-e00a-4715-b355-16e24b2f4630", - "rel": "bookmark" - } - ], - "metadata": {}, - "name": "peril-dfw", - "progress": 100, - "status": "ACTIVE", - "tenant_id": "111111", - "updated": "2014-07-21T19:34:24Z", - "user_id": "14ae7bb21d81422694655f3cc30f2930" - } - ] -} -` - -// GetOutput is the recorded output of a Rackspace servers.Get request. -const GetOutput = ` -{ - "server": { - "OS-DCF:diskConfig": "AUTO", - "OS-EXT-STS:power_state": 1, - "OS-EXT-STS:task_state": null, - "OS-EXT-STS:vm_state": "active", - "accessIPv4": "1.2.4.8", - "accessIPv6": "2001:4800:6666:105:2a0f:c056:f594:7777", - "addresses": { - "private": [ - { - "addr": "10.20.40.80", - "version": 4 - } - ], - "public": [ - { - "addr": "1.2.4.8", - "version": 4 - }, - { - "addr": "2001:4800:6666:105:2a0f:c056:f594:7777", - "version": 6 - } - ] - }, - "created": "2014-10-21T14:42:16Z", - "flavor": { - "id": "performance1-1", - "links": [ - { - "href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-1", - "rel": "bookmark" - } - ] - }, - "hostId": "430d2ae02de0a7af77012c94778145eccf67e75b1fac0528aa10d4a7", - "id": "8c65cb68-0681-4c30-bc88-6b83a8a26aee", - "image": { - "id": "e19a734c-c7e6-443a-830c-242209c4d65d", - "links": [ - { - "href": "https://dfw.servers.api.rackspacecloud.com/111111/images/e19a734c-c7e6-443a-830c-242209c4d65d", - "rel": "bookmark" - } - ] - }, - "key_name": null, - "links": [ - { - "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee", - "rel": "self" - }, - { - "href": "https://dfw.servers.api.rackspacecloud.com/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee", - "rel": "bookmark" - } - ], - "metadata": {}, - "name": "Gophercloud-pxpGGuey", - "progress": 100, - "status": "ACTIVE", - "tenant_id": "111111", - "updated": "2014-10-21T14:42:57Z", - "user_id": "14ae7bb21d81423694655f4dd30f2930" - } -} -` - -// UpdateOutput is the recorded output of a Rackspace servers.Update request. -const UpdateOutput = ` -{ - "server": { - "OS-DCF:diskConfig": "AUTO", - "OS-EXT-STS:power_state": 1, - "OS-EXT-STS:task_state": null, - "OS-EXT-STS:vm_state": "active", - "accessIPv4": "1.2.4.8", - "accessIPv6": "2001:4800:6666:105:2a0f:c056:f594:7777", - "addresses": { - "private": [ - { - "addr": "10.20.40.80", - "version": 4 - } - ], - "public": [ - { - "addr": "1.2.4.8", - "version": 4 - }, - { - "addr": "2001:4800:6666:105:2a0f:c056:f594:7777", - "version": 6 - } - ] - }, - "created": "2014-10-21T14:42:16Z", - "flavor": { - "id": "performance1-1", - "links": [ - { - "href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-1", - "rel": "bookmark" - } - ] - }, - "hostId": "430d2ae02de0a7af77012c94778145eccf67e75b1fac0528aa10d4a7", - "id": "8c65cb68-0681-4c30-bc88-6b83a8a26aee", - "image": { - "id": "e19a734c-c7e6-443a-830c-242209c4d65d", - "links": [ - { - "href": "https://dfw.servers.api.rackspacecloud.com/111111/images/e19a734c-c7e6-443a-830c-242209c4d65d", - "rel": "bookmark" - } - ] - }, - "key_name": null, - "links": [ - { - "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee", - "rel": "self" - }, - { - "href": "https://dfw.servers.api.rackspacecloud.com/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee", - "rel": "bookmark" - } - ], - "metadata": {}, - "name": "test-server-updated", - "progress": 100, - "status": "ACTIVE", - "tenant_id": "111111", - "updated": "2014-10-21T14:42:57Z", - "user_id": "14ae7bb21d81423694655f4dd30f2930" - } -} -` - -// CreateOutput contains a sample of Rackspace's response to a Create call. -const CreateOutput = ` -{ - "server": { - "OS-DCF:diskConfig": "AUTO", - "adminPass": "v7tADqbE5pr9", - "id": "bb63327b-6a2f-34bc-b0ef-4b6d97ea637e", - "links": [ - { - "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/bb63327b-6a2f-34bc-b0ef-4b6d97ea637e", - "rel": "self" - }, - { - "href": "https://dfw.servers.api.rackspacecloud.com/111111/servers/bb63327b-6a2f-34bc-b0ef-4b6d97ea637e", - "rel": "bookmark" - } - ] - } -} -` - -// DevstackServer is the expected first result from parsing ListOutput. -var DevstackServer = os.Server{ - ID: "59818cee-bc8c-44eb-8073-673ee65105f7", - Name: "devstack", - TenantID: "111111", - UserID: "14ae7bb21d81422694655f3cc30f2930", - HostID: "e8951a524bc465b0898aeac7674da6fe1495e253ae1ea17ddb2c2475", - Updated: "2014-09-23T12:38:19Z", - Created: "2014-09-23T12:34:58Z", - AccessIPv4: "1.2.3.4", - AccessIPv6: "1111:4822:7818:121:2000:9b5e:7438:a2d0", - Progress: 100, - Status: "ACTIVE", - Image: map[string]interface{}{ - "id": "255df5fb-e3d4-45a3-9a07-c976debf7c14", - "links": []interface{}{ - map[string]interface{}{ - "href": "https://dfw.servers.api.rackspacecloud.com/111111/images/255df5fb-e3d4-45a3-9a07-c976debf7c14", - "rel": "bookmark", - }, - }, - }, - Flavor: map[string]interface{}{ - "id": "performance1-8", - "links": []interface{}{ - map[string]interface{}{ - "href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-8", - "rel": "bookmark", - }, - }, - }, - Addresses: map[string]interface{}{ - "private": []interface{}{ - map[string]interface{}{ - "addr": "10.20.30.40", - "version": float64(4.0), - }, - }, - "public": []interface{}{ - map[string]interface{}{ - "addr": "1111:4822:7818:121:2000:9b5e:7438:a2d0", - "version": float64(6.0), - }, - map[string]interface{}{ - "addr": "1.2.3.4", - "version": float64(4.0), - }, - }, - }, - Metadata: map[string]interface{}{}, - Links: []interface{}{ - map[string]interface{}{ - "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/59918cee-bd9d-44eb-8173-673ee75105f7", - "rel": "self", - }, - map[string]interface{}{ - "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/59818cee-bc8c-44eb-8073-673ee65105f7", - "rel": "bookmark", - }, - }, - KeyName: "mykey", - AdminPass: "", -} - -// PerilServer is the expected second result from parsing ListOutput. -var PerilServer = os.Server{ - ID: "25f1c7f5-e00a-4715-b354-16e24b2f4630", - Name: "peril-dfw", - TenantID: "111111", - UserID: "14ae7bb21d81422694655f3cc30f2930", - HostID: "f859679906d6b1a38c1bd516b78f4dcc7d5fcf012578fa3ce460716c", - Updated: "2014-07-21T19:34:24Z", - Created: "2014-07-21T19:32:55Z", - AccessIPv4: "1.1.2.3", - AccessIPv6: "2222:4444:7817:101:be76:4eff:f0e5:9e02", - Progress: 100, - Status: "ACTIVE", - Image: map[string]interface{}{ - "id": "bb02b1a3-bc77-4d17-ab5b-421d89850fca", - "links": []interface{}{ - map[string]interface{}{ - "href": "https://dfw.servers.api.rackspacecloud.com/111111/images/bb02b1a3-bc77-4d17-ab5b-421d89850fca", - "rel": "bookmark", - }, - }, - }, - Flavor: map[string]interface{}{ - "id": "performance1-2", - "links": []interface{}{ - map[string]interface{}{ - "href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-2", - "rel": "bookmark", - }, - }, - }, - Addresses: map[string]interface{}{ - "private": []interface{}{ - map[string]interface{}{ - "addr": "10.10.20.30", - "version": float64(4.0), - }, - }, - "public": []interface{}{ - map[string]interface{}{ - "addr": "2222:4444:7817:101:be76:4eff:f0e5:9e02", - "version": float64(6.0), - }, - map[string]interface{}{ - "addr": "1.1.2.3", - "version": float64(4.0), - }, - }, - }, - Metadata: map[string]interface{}{}, - Links: []interface{}{ - map[string]interface{}{ - "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/25f1c7f5-e00a-4715-b355-16e24b2f4630", - "rel": "self", - }, - map[string]interface{}{ - "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/25f1c7f5-e00a-4715-b355-16e24b2f4630", - "rel": "bookmark", - }, - }, - KeyName: "otherkey", - AdminPass: "", -} - -// GophercloudServer is the expected result from parsing GetOutput. -var GophercloudServer = os.Server{ - ID: "8c65cb68-0681-4c30-bc88-6b83a8a26aee", - Name: "Gophercloud-pxpGGuey", - TenantID: "111111", - UserID: "14ae7bb21d81423694655f4dd30f2930", - HostID: "430d2ae02de0a7af77012c94778145eccf67e75b1fac0528aa10d4a7", - Updated: "2014-10-21T14:42:57Z", - Created: "2014-10-21T14:42:16Z", - AccessIPv4: "1.2.4.8", - AccessIPv6: "2001:4800:6666:105:2a0f:c056:f594:7777", - Progress: 100, - Status: "ACTIVE", - Image: map[string]interface{}{ - "id": "e19a734c-c7e6-443a-830c-242209c4d65d", - "links": []interface{}{ - map[string]interface{}{ - "href": "https://dfw.servers.api.rackspacecloud.com/111111/images/e19a734c-c7e6-443a-830c-242209c4d65d", - "rel": "bookmark", - }, - }, - }, - Flavor: map[string]interface{}{ - "id": "performance1-1", - "links": []interface{}{ - map[string]interface{}{ - "href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-1", - "rel": "bookmark", - }, - }, - }, - Addresses: map[string]interface{}{ - "private": []interface{}{ - map[string]interface{}{ - "addr": "10.20.40.80", - "version": float64(4.0), - }, - }, - "public": []interface{}{ - map[string]interface{}{ - "addr": "2001:4800:6666:105:2a0f:c056:f594:7777", - "version": float64(6.0), - }, - map[string]interface{}{ - "addr": "1.2.4.8", - "version": float64(4.0), - }, - }, - }, - Metadata: map[string]interface{}{}, - Links: []interface{}{ - map[string]interface{}{ - "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee", - "rel": "self", - }, - map[string]interface{}{ - "href": "https://dfw.servers.api.rackspacecloud.com/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee", - "rel": "bookmark", - }, - }, - KeyName: "", - AdminPass: "", -} - -// GophercloudUpdatedServer is the expected result from parsing UpdateOutput. -var GophercloudUpdatedServer = os.Server{ - ID: "8c65cb68-0681-4c30-bc88-6b83a8a26aee", - Name: "test-server-updated", - TenantID: "111111", - UserID: "14ae7bb21d81423694655f4dd30f2930", - HostID: "430d2ae02de0a7af77012c94778145eccf67e75b1fac0528aa10d4a7", - Updated: "2014-10-21T14:42:57Z", - Created: "2014-10-21T14:42:16Z", - AccessIPv4: "1.2.4.8", - AccessIPv6: "2001:4800:6666:105:2a0f:c056:f594:7777", - Progress: 100, - Status: "ACTIVE", - Image: map[string]interface{}{ - "id": "e19a734c-c7e6-443a-830c-242209c4d65d", - "links": []interface{}{ - map[string]interface{}{ - "href": "https://dfw.servers.api.rackspacecloud.com/111111/images/e19a734c-c7e6-443a-830c-242209c4d65d", - "rel": "bookmark", - }, - }, - }, - Flavor: map[string]interface{}{ - "id": "performance1-1", - "links": []interface{}{ - map[string]interface{}{ - "href": "https://dfw.servers.api.rackspacecloud.com/111111/flavors/performance1-1", - "rel": "bookmark", - }, - }, - }, - Addresses: map[string]interface{}{ - "private": []interface{}{ - map[string]interface{}{ - "addr": "10.20.40.80", - "version": float64(4.0), - }, - }, - "public": []interface{}{ - map[string]interface{}{ - "addr": "2001:4800:6666:105:2a0f:c056:f594:7777", - "version": float64(6.0), - }, - map[string]interface{}{ - "addr": "1.2.4.8", - "version": float64(4.0), - }, - }, - }, - Metadata: map[string]interface{}{}, - Links: []interface{}{ - map[string]interface{}{ - "href": "https://dfw.servers.api.rackspacecloud.com/v2/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee", - "rel": "self", - }, - map[string]interface{}{ - "href": "https://dfw.servers.api.rackspacecloud.com/111111/servers/8c65cb68-0681-4c30-bc88-6b83a8a26aee", - "rel": "bookmark", - }, - }, - KeyName: "", - AdminPass: "", -} - -// CreatedServer is the partial Server struct that can be parsed from CreateOutput. -var CreatedServer = os.Server{ - ID: "bb63327b-6a2f-34bc-b0ef-4b6d97ea637e", - AdminPass: "v7tADqbE5pr9", - Links: []interface{}{}, -} - -// ExpectedServerSlice is the collection of servers, in order, that should be parsed from ListOutput. -var ExpectedServerSlice = []os.Server{DevstackServer, PerilServer} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/requests.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/requests.go deleted file mode 100644 index d4472a08088f..000000000000 --- a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/requests.go +++ /dev/null @@ -1,178 +0,0 @@ -package servers - -import ( - "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume" - "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/diskconfig" - os "github.com/rackspace/gophercloud/openstack/compute/v2/servers" -) - -// CreateOpts specifies all of the options that Rackspace accepts in its Create request, including -// the union of all extensions that Rackspace supports. -type CreateOpts struct { - // Name [required] is the name to assign to the newly launched server. - Name string - - // ImageRef [optional; required if ImageName is not provided] is the ID or full - // URL to the image that contains the server's OS and initial state. - // Also optional if using the boot-from-volume extension. - ImageRef string - - // ImageName [optional; required if ImageRef is not provided] is the name of the - // image that contains the server's OS and initial state. - // Also optional if using the boot-from-volume extension. - ImageName string - - // FlavorRef [optional; required if FlavorName is not provided] is the ID or - // full URL to the flavor that describes the server's specs. - FlavorRef string - - // FlavorName [optional; required if FlavorRef is not provided] is the name of - // the flavor that describes the server's specs. - FlavorName string - - // SecurityGroups [optional] lists the names of the security groups to which this server should belong. - SecurityGroups []string - - // UserData [optional] contains configuration information or scripts to use upon launch. - // Create will base64-encode it for you. - UserData []byte - - // AvailabilityZone [optional] in which to launch the server. - AvailabilityZone string - - // Networks [optional] dictates how this server will be attached to available networks. - // By default, the server will be attached to all isolated networks for the tenant. - Networks []os.Network - - // Metadata [optional] contains key-value pairs (up to 255 bytes each) to attach to the server. - Metadata map[string]string - - // Personality [optional] includes files to inject into the server at launch. - // Create will base64-encode file contents for you. - Personality os.Personality - - // ConfigDrive [optional] enables metadata injection through a configuration drive. - ConfigDrive bool - - // AdminPass [optional] sets the root user password. If not set, a randomly-generated - // password will be created and returned in the response. - AdminPass string - - // Rackspace-specific extensions begin here. - - // KeyPair [optional] specifies the name of the SSH KeyPair to be injected into the newly launched - // server. See the "keypairs" extension in OpenStack compute v2. - KeyPair string - - // DiskConfig [optional] controls how the created server's disk is partitioned. See the "diskconfig" - // extension in OpenStack compute v2. - DiskConfig diskconfig.DiskConfig - - // BlockDevice [optional] will create the server from a volume, which is created from an image, - // a snapshot, or another volume. - BlockDevice []bootfromvolume.BlockDevice -} - -// ToServerCreateMap constructs a request body using all of the OpenStack extensions that are -// active on Rackspace. -func (opts CreateOpts) ToServerCreateMap() (map[string]interface{}, error) { - base := os.CreateOpts{ - Name: opts.Name, - ImageRef: opts.ImageRef, - ImageName: opts.ImageName, - FlavorRef: opts.FlavorRef, - FlavorName: opts.FlavorName, - SecurityGroups: opts.SecurityGroups, - UserData: opts.UserData, - AvailabilityZone: opts.AvailabilityZone, - Networks: opts.Networks, - Metadata: opts.Metadata, - Personality: opts.Personality, - ConfigDrive: opts.ConfigDrive, - AdminPass: opts.AdminPass, - } - - drive := diskconfig.CreateOptsExt{ - CreateOptsBuilder: base, - DiskConfig: opts.DiskConfig, - } - - res, err := drive.ToServerCreateMap() - if err != nil { - return nil, err - } - - if len(opts.BlockDevice) != 0 { - bfv := bootfromvolume.CreateOptsExt{ - CreateOptsBuilder: drive, - BlockDevice: opts.BlockDevice, - } - - res, err = bfv.ToServerCreateMap() - if err != nil { - return nil, err - } - } - - // key_name doesn't actually come from the extension (or at least isn't documented there) so - // we need to add it manually. - serverMap := res["server"].(map[string]interface{}) - if opts.KeyPair != "" { - serverMap["key_name"] = opts.KeyPair - } - - return res, nil -} - -// RebuildOpts represents all of the configuration options used in a server rebuild operation that -// are supported by Rackspace. -type RebuildOpts struct { - // Required. The ID of the image you want your server to be provisioned on - ImageID string - - // Name to set the server to - Name string - - // Required. The server's admin password - AdminPass string - - // AccessIPv4 [optional] provides a new IPv4 address for the instance. - AccessIPv4 string - - // AccessIPv6 [optional] provides a new IPv6 address for the instance. - AccessIPv6 string - - // Metadata [optional] contains key-value pairs (up to 255 bytes each) to attach to the server. - Metadata map[string]string - - // Personality [optional] includes files to inject into the server at launch. - // Rebuild will base64-encode file contents for you. - Personality os.Personality - - // Rackspace-specific stuff begins here. - - // DiskConfig [optional] controls how the created server's disk is partitioned. See the "diskconfig" - // extension in OpenStack compute v2. - DiskConfig diskconfig.DiskConfig -} - -// ToServerRebuildMap constructs a request body using all of the OpenStack extensions that are -// active on Rackspace. -func (opts RebuildOpts) ToServerRebuildMap() (map[string]interface{}, error) { - base := os.RebuildOpts{ - ImageID: opts.ImageID, - Name: opts.Name, - AdminPass: opts.AdminPass, - AccessIPv4: opts.AccessIPv4, - AccessIPv6: opts.AccessIPv6, - Metadata: opts.Metadata, - Personality: opts.Personality, - } - - drive := diskconfig.RebuildOptsExt{ - RebuildOptsBuilder: base, - DiskConfig: opts.DiskConfig, - } - - return drive.ToServerRebuildMap() -} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach/delegate.go deleted file mode 100644 index c6003e0e5b94..000000000000 --- a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach/delegate.go +++ /dev/null @@ -1,27 +0,0 @@ -package volumeattach - -import ( - "github.com/rackspace/gophercloud" - os "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach" - "github.com/rackspace/gophercloud/pagination" -) - -// List returns a Pager that allows you to iterate over a collection of VolumeAttachments. -func List(client *gophercloud.ServiceClient, serverID string) pagination.Pager { - return os.List(client, serverID) -} - -// Create requests the creation of a new volume attachment on the server -func Create(client *gophercloud.ServiceClient, serverID string, opts os.CreateOptsBuilder) os.CreateResult { - return os.Create(client, serverID, opts) -} - -// Get returns public data about a previously created VolumeAttachment. -func Get(client *gophercloud.ServiceClient, serverID, aID string) os.GetResult { - return os.Get(client, serverID, aID) -} - -// Delete requests the deletion of a previous stored VolumeAttachment from the server. -func Delete(client *gophercloud.ServiceClient, serverID, aID string) os.DeleteResult { - return os.Delete(client, serverID, aID) -} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach/doc.go deleted file mode 100644 index 2164908e3a89..000000000000 --- a/vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package volumeattach provides the ability to attach and detach volume -// to instances to Rackspace servers -package volumeattach diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/delegate.go b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/delegate.go deleted file mode 100644 index 4f9885af03ce..000000000000 --- a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/delegate.go +++ /dev/null @@ -1,60 +0,0 @@ -package tokens - -import ( - "errors" - - "github.com/rackspace/gophercloud" - os "github.com/rackspace/gophercloud/openstack/identity/v2/tokens" -) - -var ( - // ErrPasswordProvided is returned if both a password and an API key are provided to Create. - ErrPasswordProvided = errors.New("Please provide either a password or an API key.") -) - -// AuthOptions wraps the OpenStack AuthOptions struct to be able to customize the request body -// when API key authentication is used. -type AuthOptions struct { - os.AuthOptions -} - -// WrapOptions embeds a root AuthOptions struct in a package-specific one. -func WrapOptions(original gophercloud.AuthOptions) AuthOptions { - return AuthOptions{AuthOptions: os.WrapOptions(original)} -} - -// ToTokenCreateMap serializes an AuthOptions into a request body. If an API key is provided, it -// will be used, otherwise -func (auth AuthOptions) ToTokenCreateMap() (map[string]interface{}, error) { - if auth.APIKey == "" { - return auth.AuthOptions.ToTokenCreateMap() - } - - // Verify that other required attributes are present. - if auth.Username == "" { - return nil, os.ErrUsernameRequired - } - - authMap := make(map[string]interface{}) - - authMap["RAX-KSKEY:apiKeyCredentials"] = map[string]interface{}{ - "username": auth.Username, - "apiKey": auth.APIKey, - } - - if auth.TenantID != "" { - authMap["tenantId"] = auth.TenantID - } - if auth.TenantName != "" { - authMap["tenantName"] = auth.TenantName - } - - return map[string]interface{}{"auth": authMap}, nil -} - -// Create authenticates to Rackspace's identity service and attempts to acquire a Token. Rather -// than interact with this service directly, users should generally call -// rackspace.AuthenticatedClient(). -func Create(client *gophercloud.ServiceClient, auth AuthOptions) os.CreateResult { - return os.Create(client, auth) -} diff --git a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/doc.go b/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/doc.go deleted file mode 100644 index 44043e5e13fe..000000000000 --- a/vendor/github.com/rackspace/gophercloud/rackspace/identity/v2/tokens/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package tokens provides information and interaction with the token -// API resource for the Rackspace Identity service. -package tokens diff --git a/vendor/github.com/rackspace/gophercloud/results.go b/vendor/github.com/rackspace/gophercloud/results.go deleted file mode 100644 index 27fd1b60fa21..000000000000 --- a/vendor/github.com/rackspace/gophercloud/results.go +++ /dev/null @@ -1,153 +0,0 @@ -package gophercloud - -import ( - "encoding/json" - "net/http" - "reflect" - - "github.com/mitchellh/mapstructure" -) - -/* -Result is an internal type to be used by individual resource packages, but its -methods will be available on a wide variety of user-facing embedding types. - -It acts as a base struct that other Result types, returned from request -functions, can embed for convenience. All Results capture basic information -from the HTTP transaction that was performed, including the response body, -HTTP headers, and any errors that happened. - -Generally, each Result type will have an Extract method that can be used to -further interpret the result's payload in a specific context. Extensions or -providers can then provide additional extraction functions to pull out -provider- or extension-specific information as well. -*/ -type Result struct { - // Body is the payload of the HTTP response from the server. In most cases, - // this will be the deserialized JSON structure. - Body interface{} - - // Header contains the HTTP header structure from the original response. - Header http.Header - - // Err is an error that occurred during the operation. It's deferred until - // extraction to make it easier to chain the Extract call. - Err error -} - -// PrettyPrintJSON creates a string containing the full response body as -// pretty-printed JSON. It's useful for capturing test fixtures and for -// debugging extraction bugs. If you include its output in an issue related to -// a buggy extraction function, we will all love you forever. -func (r Result) PrettyPrintJSON() string { - pretty, err := json.MarshalIndent(r.Body, "", " ") - if err != nil { - panic(err.Error()) - } - return string(pretty) -} - -// ErrResult is an internal type to be used by individual resource packages, but -// its methods will be available on a wide variety of user-facing embedding -// types. -// -// It represents results that only contain a potential error and -// nothing else. Usually, if the operation executed successfully, the Err field -// will be nil; otherwise it will be stocked with a relevant error. Use the -// ExtractErr method -// to cleanly pull it out. -type ErrResult struct { - Result -} - -// ExtractErr is a function that extracts error information, or nil, from a result. -func (r ErrResult) ExtractErr() error { - return r.Err -} - -/* -HeaderResult is an internal type to be used by individual resource packages, but -its methods will be available on a wide variety of user-facing embedding types. - -It represents a result that only contains an error (possibly nil) and an -http.Header. This is used, for example, by the objectstorage packages in -openstack, because most of the operations don't return response bodies, but do -have relevant information in headers. -*/ -type HeaderResult struct { - Result -} - -// ExtractHeader will return the http.Header and error from the HeaderResult. -// -// header, err := objects.Create(client, "my_container", objects.CreateOpts{}).ExtractHeader() -func (hr HeaderResult) ExtractHeader() (http.Header, error) { - return hr.Header, hr.Err -} - -// DecodeHeader is a function that decodes a header (usually of type map[string]interface{}) to -// another type (usually a struct). This function is used by the objectstorage package to give -// users access to response headers without having to query a map. A DecodeHookFunction is used, -// because OpenStack-based clients return header values as arrays (Go slices). -func DecodeHeader(from, to interface{}) error { - config := &mapstructure.DecoderConfig{ - DecodeHook: func(from, to reflect.Kind, data interface{}) (interface{}, error) { - if from == reflect.Slice { - return data.([]string)[0], nil - } - return data, nil - }, - Result: to, - WeaklyTypedInput: true, - } - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - if err := decoder.Decode(from); err != nil { - return err - } - return nil -} - -// RFC3339Milli describes a common time format used by some API responses. -const RFC3339Milli = "2006-01-02T15:04:05.999999Z" - -// Time format used in cloud orchestration -const STACK_TIME_FMT = "2006-01-02T15:04:05" - -/* -Link is an internal type to be used in packages of collection resources that are -paginated in a certain way. - -It's a response substructure common to many paginated collection results that is -used to point to related pages. Usually, the one we care about is the one with -Rel field set to "next". -*/ -type Link struct { - Href string `mapstructure:"href"` - Rel string `mapstructure:"rel"` -} - -/* -ExtractNextURL is an internal function useful for packages of collection -resources that are paginated in a certain way. - -It attempts attempts to extract the "next" URL from slice of Link structs, or -"" if no such URL is present. -*/ -func ExtractNextURL(links []Link) (string, error) { - var url string - - for _, l := range links { - if l.Rel == "next" { - url = l.Href - } - } - - if url == "" { - return "", nil - } - - return url, nil -} diff --git a/vendor/github.com/rackspace/gophercloud/service_client.go b/vendor/github.com/rackspace/gophercloud/service_client.go deleted file mode 100644 index 3490da05f272..000000000000 --- a/vendor/github.com/rackspace/gophercloud/service_client.go +++ /dev/null @@ -1,32 +0,0 @@ -package gophercloud - -import "strings" - -// ServiceClient stores details required to interact with a specific service API implemented by a provider. -// Generally, you'll acquire these by calling the appropriate `New` method on a ProviderClient. -type ServiceClient struct { - // ProviderClient is a reference to the provider that implements this service. - *ProviderClient - - // Endpoint is the base URL of the service's API, acquired from a service catalog. - // It MUST end with a /. - Endpoint string - - // ResourceBase is the base URL shared by the resources within a service's API. It should include - // the API version and, like Endpoint, MUST end with a / if set. If not set, the Endpoint is used - // as-is, instead. - ResourceBase string -} - -// ResourceBaseURL returns the base URL of any resources used by this service. It MUST end with a /. -func (client *ServiceClient) ResourceBaseURL() string { - if client.ResourceBase != "" { - return client.ResourceBase - } - return client.Endpoint -} - -// ServiceURL constructs a URL for a resource belonging to this provider. -func (client *ServiceClient) ServiceURL(parts ...string) string { - return client.ResourceBaseURL() + strings.Join(parts, "/") -} diff --git a/vendor/github.com/rackspace/gophercloud/testhelper/client/fake.go b/vendor/github.com/rackspace/gophercloud/testhelper/client/fake.go deleted file mode 100644 index 5b69b058f1fe..000000000000 --- a/vendor/github.com/rackspace/gophercloud/testhelper/client/fake.go +++ /dev/null @@ -1,17 +0,0 @@ -package client - -import ( - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/testhelper" -) - -// Fake token to use. -const TokenID = "cbc36478b0bd8e67e89469c7749d4127" - -// ServiceClient returns a generic service client for use in tests. -func ServiceClient() *gophercloud.ServiceClient { - return &gophercloud.ServiceClient{ - ProviderClient: &gophercloud.ProviderClient{TokenID: TokenID}, - Endpoint: testhelper.Endpoint(), - } -} diff --git a/vendor/github.com/rackspace/gophercloud/testhelper/convenience.go b/vendor/github.com/rackspace/gophercloud/testhelper/convenience.go deleted file mode 100644 index f5659172ea79..000000000000 --- a/vendor/github.com/rackspace/gophercloud/testhelper/convenience.go +++ /dev/null @@ -1,370 +0,0 @@ -package testhelper - -import ( - "bytes" - "encoding/json" - "fmt" - "path/filepath" - "reflect" - "runtime" - "strings" - "testing" -) - -const ( - logBodyFmt = "\033[1;31m%s %s\033[0m" - greenCode = "\033[0m\033[1;32m" - yellowCode = "\033[0m\033[1;33m" - resetCode = "\033[0m\033[1;31m" -) - -func prefix(depth int) string { - _, file, line, _ := runtime.Caller(depth) - return fmt.Sprintf("Failure in %s, line %d:", filepath.Base(file), line) -} - -func green(str interface{}) string { - return fmt.Sprintf("%s%#v%s", greenCode, str, resetCode) -} - -func yellow(str interface{}) string { - return fmt.Sprintf("%s%#v%s", yellowCode, str, resetCode) -} - -func logFatal(t *testing.T, str string) { - t.Fatalf(logBodyFmt, prefix(3), str) -} - -func logError(t *testing.T, str string) { - t.Errorf(logBodyFmt, prefix(3), str) -} - -type diffLogger func([]string, interface{}, interface{}) - -type visit struct { - a1 uintptr - a2 uintptr - typ reflect.Type -} - -// Recursively visits the structures of "expected" and "actual". The diffLogger function will be -// invoked with each different value encountered, including the reference path that was followed -// to get there. -func deepDiffEqual(expected, actual reflect.Value, visited map[visit]bool, path []string, logDifference diffLogger) { - defer func() { - // Fall back to the regular reflect.DeepEquals function. - if r := recover(); r != nil { - var e, a interface{} - if expected.IsValid() { - e = expected.Interface() - } - if actual.IsValid() { - a = actual.Interface() - } - - if !reflect.DeepEqual(e, a) { - logDifference(path, e, a) - } - } - }() - - if !expected.IsValid() && actual.IsValid() { - logDifference(path, nil, actual.Interface()) - return - } - if expected.IsValid() && !actual.IsValid() { - logDifference(path, expected.Interface(), nil) - return - } - if !expected.IsValid() && !actual.IsValid() { - return - } - - hard := func(k reflect.Kind) bool { - switch k { - case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct: - return true - } - return false - } - - if expected.CanAddr() && actual.CanAddr() && hard(expected.Kind()) { - addr1 := expected.UnsafeAddr() - addr2 := actual.UnsafeAddr() - - if addr1 > addr2 { - addr1, addr2 = addr2, addr1 - } - - if addr1 == addr2 { - // References are identical. We can short-circuit - return - } - - typ := expected.Type() - v := visit{addr1, addr2, typ} - if visited[v] { - // Already visited. - return - } - - // Remember this visit for later. - visited[v] = true - } - - switch expected.Kind() { - case reflect.Array: - for i := 0; i < expected.Len(); i++ { - hop := append(path, fmt.Sprintf("[%d]", i)) - deepDiffEqual(expected.Index(i), actual.Index(i), visited, hop, logDifference) - } - return - case reflect.Slice: - if expected.IsNil() != actual.IsNil() { - logDifference(path, expected.Interface(), actual.Interface()) - return - } - if expected.Len() == actual.Len() && expected.Pointer() == actual.Pointer() { - return - } - for i := 0; i < expected.Len(); i++ { - hop := append(path, fmt.Sprintf("[%d]", i)) - deepDiffEqual(expected.Index(i), actual.Index(i), visited, hop, logDifference) - } - return - case reflect.Interface: - if expected.IsNil() != actual.IsNil() { - logDifference(path, expected.Interface(), actual.Interface()) - return - } - deepDiffEqual(expected.Elem(), actual.Elem(), visited, path, logDifference) - return - case reflect.Ptr: - deepDiffEqual(expected.Elem(), actual.Elem(), visited, path, logDifference) - return - case reflect.Struct: - for i, n := 0, expected.NumField(); i < n; i++ { - field := expected.Type().Field(i) - hop := append(path, "."+field.Name) - deepDiffEqual(expected.Field(i), actual.Field(i), visited, hop, logDifference) - } - return - case reflect.Map: - if expected.IsNil() != actual.IsNil() { - logDifference(path, expected.Interface(), actual.Interface()) - return - } - if expected.Len() == actual.Len() && expected.Pointer() == actual.Pointer() { - return - } - - var keys []reflect.Value - if expected.Len() >= actual.Len() { - keys = expected.MapKeys() - } else { - keys = actual.MapKeys() - } - - for _, k := range keys { - expectedValue := expected.MapIndex(k) - actualValue := expected.MapIndex(k) - - if !expectedValue.IsValid() { - logDifference(path, nil, actual.Interface()) - return - } - if !actualValue.IsValid() { - logDifference(path, expected.Interface(), nil) - return - } - - hop := append(path, fmt.Sprintf("[%v]", k)) - deepDiffEqual(expectedValue, actualValue, visited, hop, logDifference) - } - return - case reflect.Func: - if expected.IsNil() != actual.IsNil() { - logDifference(path, expected.Interface(), actual.Interface()) - } - return - default: - if expected.Interface() != actual.Interface() { - logDifference(path, expected.Interface(), actual.Interface()) - } - } -} - -func deepDiff(expected, actual interface{}, logDifference diffLogger) { - if expected == nil || actual == nil { - logDifference([]string{}, expected, actual) - return - } - - expectedValue := reflect.ValueOf(expected) - actualValue := reflect.ValueOf(actual) - - if expectedValue.Type() != actualValue.Type() { - logDifference([]string{}, expected, actual) - return - } - deepDiffEqual(expectedValue, actualValue, map[visit]bool{}, []string{}, logDifference) -} - -// AssertEquals compares two arbitrary values and performs a comparison. If the -// comparison fails, a fatal error is raised that will fail the test -func AssertEquals(t *testing.T, expected, actual interface{}) { - if expected != actual { - logFatal(t, fmt.Sprintf("expected %s but got %s", green(expected), yellow(actual))) - } -} - -// CheckEquals is similar to AssertEquals, except with a non-fatal error -func CheckEquals(t *testing.T, expected, actual interface{}) { - if expected != actual { - logError(t, fmt.Sprintf("expected %s but got %s", green(expected), yellow(actual))) - } -} - -// AssertErr is a convenience function for checking that an error occurred -func AssertErr(t *testing.T, e error) { - if e == nil { - logFatal(t, fmt.Sprintf("expected an error but none occurred")) - } -} - -// CheckErr is a convenience function for checking that an error occurred, -// except with a non-fatal error -func CheckErr(t *testing.T, e error) { - if e == nil { - logError(t, fmt.Sprintf("expected an error but none occurred")) - } -} - -// AssertDeepEquals - like Equals - performs a comparison - but on more complex -// structures that requires deeper inspection -func AssertDeepEquals(t *testing.T, expected, actual interface{}) { - pre := prefix(2) - - differed := false - deepDiff(expected, actual, func(path []string, expected, actual interface{}) { - differed = true - t.Errorf("\033[1;31m%sat %s expected %s, but got %s\033[0m", - pre, - strings.Join(path, ""), - green(expected), - yellow(actual)) - }) - if differed { - logFatal(t, "The structures were different.") - } -} - -// CheckDeepEquals is similar to AssertDeepEquals, except with a non-fatal error -func CheckDeepEquals(t *testing.T, expected, actual interface{}) { - pre := prefix(2) - - deepDiff(expected, actual, func(path []string, expected, actual interface{}) { - t.Errorf("\033[1;31m%s at %s expected %s, but got %s\033[0m", - pre, - strings.Join(path, ""), - green(expected), - yellow(actual)) - }) -} - -func isByteArrayEquals(t *testing.T, expectedBytes []byte, actualBytes []byte) bool { - return bytes.Equal(expectedBytes, actualBytes) -} - -// AssertByteArrayEquals a convenience function for checking whether two byte arrays are equal -func AssertByteArrayEquals(t *testing.T, expectedBytes []byte, actualBytes []byte) { - if !isByteArrayEquals(t, expectedBytes, actualBytes) { - logFatal(t, "The bytes differed.") - } -} - -// CheckByteArrayEquals a convenience function for silent checking whether two byte arrays are equal -func CheckByteArrayEquals(t *testing.T, expectedBytes []byte, actualBytes []byte) { - if !isByteArrayEquals(t, expectedBytes, actualBytes) { - logError(t, "The bytes differed.") - } -} - -// isJSONEquals is a utility function that implements JSON comparison for AssertJSONEquals and -// CheckJSONEquals. -func isJSONEquals(t *testing.T, expectedJSON string, actual interface{}) bool { - var parsedExpected, parsedActual interface{} - err := json.Unmarshal([]byte(expectedJSON), &parsedExpected) - if err != nil { - t.Errorf("Unable to parse expected value as JSON: %v", err) - return false - } - - jsonActual, err := json.Marshal(actual) - AssertNoErr(t, err) - err = json.Unmarshal(jsonActual, &parsedActual) - AssertNoErr(t, err) - - if !reflect.DeepEqual(parsedExpected, parsedActual) { - prettyExpected, err := json.MarshalIndent(parsedExpected, "", " ") - if err != nil { - t.Logf("Unable to pretty-print expected JSON: %v\n%s", err, expectedJSON) - } else { - // We can't use green() here because %#v prints prettyExpected as a byte array literal, which - // is... unhelpful. Converting it to a string first leaves "\n" uninterpreted for some reason. - t.Logf("Expected JSON:\n%s%s%s", greenCode, prettyExpected, resetCode) - } - - prettyActual, err := json.MarshalIndent(actual, "", " ") - if err != nil { - t.Logf("Unable to pretty-print actual JSON: %v\n%#v", err, actual) - } else { - // We can't use yellow() for the same reason. - t.Logf("Actual JSON:\n%s%s%s", yellowCode, prettyActual, resetCode) - } - - return false - } - return true -} - -// AssertJSONEquals serializes a value as JSON, parses an expected string as JSON, and ensures that -// both are consistent. If they aren't, the expected and actual structures are pretty-printed and -// shown for comparison. -// -// This is useful for comparing structures that are built as nested map[string]interface{} values, -// which are a pain to construct as literals. -func AssertJSONEquals(t *testing.T, expectedJSON string, actual interface{}) { - if !isJSONEquals(t, expectedJSON, actual) { - logFatal(t, "The generated JSON structure differed.") - } -} - -// CheckJSONEquals is similar to AssertJSONEquals, but nonfatal. -func CheckJSONEquals(t *testing.T, expectedJSON string, actual interface{}) { - if !isJSONEquals(t, expectedJSON, actual) { - logError(t, "The generated JSON structure differed.") - } -} - -// AssertNoErr is a convenience function for checking whether an error value is -// an actual error -func AssertNoErr(t *testing.T, e error) { - if e != nil { - logFatal(t, fmt.Sprintf("unexpected error %s", yellow(e.Error()))) - } -} - -// AssertNotNil is a convenience function for checking whether given value is not nil -func AssertNotNil(t *testing.T, actual interface{}) { - if actual == nil || !reflect.ValueOf(actual).Elem().IsValid() { - logFatal(t, fmt.Sprintf("Not nil expexted, but was %v", actual)) - } -} - -// CheckNoErr is similar to AssertNoErr, except with a non-fatal error -func CheckNoErr(t *testing.T, e error) { - if e != nil { - logError(t, fmt.Sprintf("unexpected error %s", yellow(e.Error()))) - } -} diff --git a/vendor/github.com/rackspace/gophercloud/testhelper/doc.go b/vendor/github.com/rackspace/gophercloud/testhelper/doc.go deleted file mode 100644 index 25b4dfebbbe3..000000000000 --- a/vendor/github.com/rackspace/gophercloud/testhelper/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Package testhelper container methods that are useful for writing unit tests. -*/ -package testhelper diff --git a/vendor/github.com/rackspace/gophercloud/testhelper/http_responses.go b/vendor/github.com/rackspace/gophercloud/testhelper/http_responses.go deleted file mode 100644 index e1f1f9ac0e89..000000000000 --- a/vendor/github.com/rackspace/gophercloud/testhelper/http_responses.go +++ /dev/null @@ -1,91 +0,0 @@ -package testhelper - -import ( - "encoding/json" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "reflect" - "testing" -) - -var ( - // Mux is a multiplexer that can be used to register handlers. - Mux *http.ServeMux - - // Server is an in-memory HTTP server for testing. - Server *httptest.Server -) - -// SetupHTTP prepares the Mux and Server. -func SetupHTTP() { - Mux = http.NewServeMux() - Server = httptest.NewServer(Mux) -} - -// TeardownHTTP releases HTTP-related resources. -func TeardownHTTP() { - Server.Close() -} - -// Endpoint returns a fake endpoint that will actually target the Mux. -func Endpoint() string { - return Server.URL + "/" -} - -// TestFormValues ensures that all the URL parameters given to the http.Request are the same as values. -func TestFormValues(t *testing.T, r *http.Request, values map[string]string) { - want := url.Values{} - for k, v := range values { - want.Add(k, v) - } - - r.ParseForm() - if !reflect.DeepEqual(want, r.Form) { - t.Errorf("Request parameters = %v, want %v", r.Form, want) - } -} - -// TestMethod checks that the Request has the expected method (e.g. GET, POST). -func TestMethod(t *testing.T, r *http.Request, expected string) { - if expected != r.Method { - t.Errorf("Request method = %v, expected %v", r.Method, expected) - } -} - -// TestHeader checks that the header on the http.Request matches the expected value. -func TestHeader(t *testing.T, r *http.Request, header string, expected string) { - if actual := r.Header.Get(header); expected != actual { - t.Errorf("Header %s = %s, expected %s", header, actual, expected) - } -} - -// TestBody verifies that the request body matches an expected body. -func TestBody(t *testing.T, r *http.Request, expected string) { - b, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Errorf("Unable to read body: %v", err) - } - str := string(b) - if expected != str { - t.Errorf("Body = %s, expected %s", str, expected) - } -} - -// TestJSONRequest verifies that the JSON payload of a request matches an expected structure, without asserting things about -// whitespace or ordering. -func TestJSONRequest(t *testing.T, r *http.Request, expected string) { - b, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Errorf("Unable to read request body: %v", err) - } - - var actualJSON interface{} - err = json.Unmarshal(b, &actualJSON) - if err != nil { - t.Errorf("Unable to parse request body as JSON: %v", err) - } - - CheckJSONEquals(t, expected, actualJSON) -} diff --git a/vendor/github.com/rackspace/gophercloud/util.go b/vendor/github.com/rackspace/gophercloud/util.go deleted file mode 100644 index 3d6a4e490755..000000000000 --- a/vendor/github.com/rackspace/gophercloud/util.go +++ /dev/null @@ -1,82 +0,0 @@ -package gophercloud - -import ( - "errors" - "net/url" - "path/filepath" - "strings" - "time" -) - -// WaitFor polls a predicate function, once per second, up to a timeout limit. -// It usually does this to wait for a resource to transition to a certain state. -// Resource packages will wrap this in a more convenient function that's -// specific to a certain resource, but it can also be useful on its own. -func WaitFor(timeout int, predicate func() (bool, error)) error { - start := time.Now().Second() - for { - // Force a 1s sleep - time.Sleep(1 * time.Second) - - // If a timeout is set, and that's been exceeded, shut it down - if timeout >= 0 && time.Now().Second()-start >= timeout { - return errors.New("A timeout occurred") - } - - // Execute the function - satisfied, err := predicate() - if err != nil { - return err - } - if satisfied { - return nil - } - } -} - -// NormalizeURL is an internal function to be used by provider clients. -// -// It ensures that each endpoint URL has a closing `/`, as expected by -// ServiceClient's methods. -func NormalizeURL(url string) string { - if !strings.HasSuffix(url, "/") { - return url + "/" - } - return url -} - -// NormalizePathURL is used to convert rawPath to a fqdn, using basePath as -// a reference in the filesystem, if necessary. basePath is assumed to contain -// either '.' when first used, or the file:// type fqdn of the parent resource. -// e.g. myFavScript.yaml => file://opt/lib/myFavScript.yaml -func NormalizePathURL(basePath, rawPath string) (string, error) { - u, err := url.Parse(rawPath) - if err != nil { - return "", err - } - // if a scheme is defined, it must be a fqdn already - if u.Scheme != "" { - return u.String(), nil - } - // if basePath is a url, then child resources are assumed to be relative to it - bu, err := url.Parse(basePath) - if err != nil { - return "", err - } - var basePathSys, absPathSys string - if bu.Scheme != "" { - basePathSys = filepath.FromSlash(bu.Path) - absPathSys = filepath.Join(basePathSys, rawPath) - bu.Path = filepath.ToSlash(absPathSys) - return bu.String(), nil - } - - absPathSys = filepath.Join(basePath, rawPath) - u.Path = filepath.ToSlash(absPathSys) - if err != nil { - return "", err - } - u.Scheme = "file" - return u.String(), nil - -} diff --git a/vendor/golang.org/x/sys/unix/dev_darwin.go b/vendor/golang.org/x/sys/unix/dev_darwin.go index 7d101d5272d2..8d1dc0fa3d9a 100644 --- a/vendor/golang.org/x/sys/unix/dev_darwin.go +++ b/vendor/golang.org/x/sys/unix/dev_darwin.go @@ -20,5 +20,5 @@ func Minor(dev uint64) uint32 { // Mkdev returns a Darwin device number generated from the given major and minor // components. func Mkdev(major, minor uint32) uint64 { - return uint64((major << 24) | minor) + return (uint64(major) << 24) | uint64(minor) } diff --git a/vendor/golang.org/x/sys/unix/dev_dragonfly.go b/vendor/golang.org/x/sys/unix/dev_dragonfly.go new file mode 100644 index 000000000000..8502f202ce31 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_dragonfly.go @@ -0,0 +1,30 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Functions to access/create device major and minor numbers matching the +// encoding used in Dragonfly's sys/types.h header. +// +// The information below is extracted and adapted from sys/types.h: +// +// Minor gives a cookie instead of an index since in order to avoid changing the +// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for +// devices that don't use them. + +package unix + +// Major returns the major component of a DragonFlyBSD device number. +func Major(dev uint64) uint32 { + return uint32((dev >> 8) & 0xff) +} + +// Minor returns the minor component of a DragonFlyBSD device number. +func Minor(dev uint64) uint32 { + return uint32(dev & 0xffff00ff) +} + +// Mkdev returns a DragonFlyBSD device number generated from the given major and +// minor components. +func Mkdev(major, minor uint32) uint64 { + return (uint64(major) << 8) | uint64(minor) +} diff --git a/vendor/golang.org/x/sys/unix/dev_freebsd.go b/vendor/golang.org/x/sys/unix/dev_freebsd.go new file mode 100644 index 000000000000..eba3b4bd387a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_freebsd.go @@ -0,0 +1,30 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Functions to access/create device major and minor numbers matching the +// encoding used in FreeBSD's sys/types.h header. +// +// The information below is extracted and adapted from sys/types.h: +// +// Minor gives a cookie instead of an index since in order to avoid changing the +// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for +// devices that don't use them. + +package unix + +// Major returns the major component of a FreeBSD device number. +func Major(dev uint64) uint32 { + return uint32((dev >> 8) & 0xff) +} + +// Minor returns the minor component of a FreeBSD device number. +func Minor(dev uint64) uint32 { + return uint32(dev & 0xffff00ff) +} + +// Mkdev returns a FreeBSD device number generated from the given major and +// minor components. +func Mkdev(major, minor uint32) uint64 { + return (uint64(major) << 8) | uint64(minor) +} diff --git a/vendor/golang.org/x/sys/unix/dev_linux.go b/vendor/golang.org/x/sys/unix/dev_linux.go index c902c39e812e..d165d6f3085f 100644 --- a/vendor/golang.org/x/sys/unix/dev_linux.go +++ b/vendor/golang.org/x/sys/unix/dev_linux.go @@ -34,9 +34,9 @@ func Minor(dev uint64) uint32 { // Mkdev returns a Linux device number generated from the given major and minor // components. func Mkdev(major, minor uint32) uint64 { - dev := uint64((major & 0x00000fff) << 8) - dev |= uint64((major & 0xfffff000) << 32) - dev |= uint64((minor & 0x000000ff) << 0) - dev |= uint64((minor & 0xffffff00) << 12) + dev := (uint64(major) & 0x00000fff) << 8 + dev |= (uint64(major) & 0xfffff000) << 32 + dev |= (uint64(minor) & 0x000000ff) << 0 + dev |= (uint64(minor) & 0xffffff00) << 12 return dev } diff --git a/vendor/golang.org/x/sys/unix/dev_netbsd.go b/vendor/golang.org/x/sys/unix/dev_netbsd.go index 08db58ee3d13..b4a203d0c542 100644 --- a/vendor/golang.org/x/sys/unix/dev_netbsd.go +++ b/vendor/golang.org/x/sys/unix/dev_netbsd.go @@ -22,8 +22,8 @@ func Minor(dev uint64) uint32 { // Mkdev returns a NetBSD device number generated from the given major and minor // components. func Mkdev(major, minor uint32) uint64 { - dev := uint64((major << 8) & 0x000fff00) - dev |= uint64((minor << 12) & 0xfff00000) - dev |= uint64((minor << 0) & 0x000000ff) + dev := (uint64(major) << 8) & 0x000fff00 + dev |= (uint64(minor) << 12) & 0xfff00000 + dev |= (uint64(minor) << 0) & 0x000000ff return dev } diff --git a/vendor/golang.org/x/sys/unix/dev_openbsd.go b/vendor/golang.org/x/sys/unix/dev_openbsd.go new file mode 100644 index 000000000000..f3430c42ff00 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/dev_openbsd.go @@ -0,0 +1,29 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Functions to access/create device major and minor numbers matching the +// encoding used in OpenBSD's sys/types.h header. + +package unix + +// Major returns the major component of an OpenBSD device number. +func Major(dev uint64) uint32 { + return uint32((dev & 0x0000ff00) >> 8) +} + +// Minor returns the minor component of an OpenBSD device number. +func Minor(dev uint64) uint32 { + minor := uint32((dev & 0x000000ff) >> 0) + minor |= uint32((dev & 0xffff0000) >> 8) + return minor +} + +// Mkdev returns an OpenBSD device number generated from the given major and minor +// components. +func Mkdev(major, minor uint32) uint64 { + dev := (uint64(major) << 8) & 0x0000ff00 + dev |= (uint64(minor) << 8) & 0xffff0000 + dev |= (uint64(minor) << 0) & 0x000000ff + return dev +} diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go index 45e281a047dc..2e06b33f2e95 100644 --- a/vendor/golang.org/x/sys/unix/env_unix.go +++ b/vendor/golang.org/x/sys/unix/env_unix.go @@ -1,4 +1,4 @@ -// Copyright 2010 The Go Authors. All rights reserved. +// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/unix/env_unset.go b/vendor/golang.org/x/sys/unix/env_unset.go index 9222262559b8..c44fdc4afab4 100644 --- a/vendor/golang.org/x/sys/unix/env_unset.go +++ b/vendor/golang.org/x/sys/unix/env_unset.go @@ -1,4 +1,4 @@ -// Copyright 2014 The Go Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index 94c823212476..40bed3fa80a2 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -1,4 +1,4 @@ -// Copyright 2015 The Go Authors. All rights reserved. +// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,7 +8,7 @@ package unix import "syscall" -// We can't use the gc-syntax .s files for gccgo. On the plus side +// We can't use the gc-syntax .s files for gccgo. On the plus side // much of the functionality can be written directly in Go. //extern gccgoRealSyscall diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index 07f6be0392e2..99a774f2bef3 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -1,4 +1,4 @@ -// Copyright 2015 The Go Authors. All rights reserved. +// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go index bffe1a77db57..251a977a8113 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go @@ -1,4 +1,4 @@ -// Copyright 2015 The Go Authors. All rights reserved. +// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go deleted file mode 100644 index 56332692c421..000000000000 --- a/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gccgo,linux,sparc64 - -package unix - -import "syscall" - -//extern sysconf -func realSysconf(name int) int64 - -func sysconf(name int) (n int64, err syscall.Errno) { - r := realSysconf(name) - if r < 0 { - return 0, syscall.GetErrno() - } - return r, 0 -} diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index c3a08092687e..00b7ce7ac1f4 100755 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -142,7 +142,6 @@ openbsd_386) mkerrors="$mkerrors -m32" mksyscall="./mksyscall.pl -l32 -openbsd" mksysctl="./mksysctl_openbsd.pl" - zsysctl="zsysctl_openbsd.go" mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; @@ -150,7 +149,6 @@ openbsd_amd64) mkerrors="$mkerrors -m64" mksyscall="./mksyscall.pl -openbsd" mksysctl="./mksysctl_openbsd.pl" - zsysctl="zsysctl_openbsd.go" mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; @@ -158,7 +156,6 @@ openbsd_arm) mkerrors="$mkerrors" mksyscall="./mksyscall.pl -l32 -openbsd -arm" mksysctl="./mksysctl_openbsd.pl" - zsysctl="zsysctl_openbsd.go" mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 2c434f7ae756..9a799b2f5b8d 100755 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -17,8 +17,8 @@ if test -z "$GOARCH" -o -z "$GOOS"; then fi # Check that we are using the new build system if we should -if [[ "$GOOS" -eq "linux" ]] && [[ "$GOARCH" != "sparc64" ]]; then - if [[ "$GOLANG_SYS_BUILD" -ne "docker" ]]; then +if [[ "$GOOS" = "linux" ]] && [[ "$GOARCH" != "sparc64" ]]; then + if [[ "$GOLANG_SYS_BUILD" != "docker" ]]; then echo 1>&2 "In the new build system, mkerrors should not be called directly." echo 1>&2 "See README.md" exit 1 @@ -27,7 +27,7 @@ fi CC=${CC:-cc} -if [[ "$GOOS" -eq "solaris" ]]; then +if [[ "$GOOS" = "solaris" ]]; then # Assumes GNU versions of utilities in PATH. export PATH=/usr/gnu/bin:$PATH fi @@ -84,6 +84,7 @@ includes_FreeBSD=' #include #include #include +#include #include #include #include @@ -183,6 +184,7 @@ struct ltchars { #include #include #include +#include #include #include @@ -283,6 +285,7 @@ includes_SunOS=' #include #include #include +#include #include #include #include @@ -347,6 +350,7 @@ ccflags="$@" $2 !~ /^EXPR_/ && $2 ~ /^E[A-Z0-9_]+$/ || $2 ~ /^B[0-9_]+$/ || + $2 ~ /^(OLD|NEW)DEV$/ || $2 == "BOTHER" || $2 ~ /^CI?BAUD(EX)?$/ || $2 == "IBSHIFT" || @@ -417,7 +421,9 @@ ccflags="$@" $2 ~ /^(VM|VMADDR)_/ || $2 ~ /^(TASKSTATS|TS)_/ || $2 ~ /^GENL_/ || + $2 ~ /^UTIME_/ || $2 ~ /^XATTR_(CREATE|REPLACE)/ || + $2 ~ /^WDIOC_/ || $2 !~ "WMESGLEN" && $2 ~ /^W[A-Z0-9]+$/ || $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go index d3ff659bb36b..dbdfd0a3f196 100644 --- a/vendor/golang.org/x/sys/unix/mkpost.go +++ b/vendor/golang.org/x/sys/unix/mkpost.go @@ -56,6 +56,11 @@ func main() { removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`) b = removeFieldsRegex.ReplaceAll(b, []byte("_")) + // Convert [65]int8 to [65]byte in Utsname members to simplify + // conversion to string; see golang.org/issue/20753 + convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`) + b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte")) + // We refuse to export private fields on s390x if goarch == "s390x" && goos == "linux" { // Remove cgo padding fields diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go new file mode 100644 index 000000000000..83c85e0196fa --- /dev/null +++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +// For Unix, get the pagesize from the runtime. + +package unix + +import "syscall" + +func Getpagesize() int { + return syscall.Getpagesize() +} diff --git a/vendor/golang.org/x/sys/unix/race.go b/vendor/golang.org/x/sys/unix/race.go index 3c7627eb5ce9..61712b51c965 100644 --- a/vendor/golang.org/x/sys/unix/race.go +++ b/vendor/golang.org/x/sys/unix/race.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/unix/race0.go b/vendor/golang.org/x/sys/unix/race0.go index f8678e0d2156..dd0820431e9a 100644 --- a/vendor/golang.org/x/sys/unix/race0.go +++ b/vendor/golang.org/x/sys/unix/race0.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go index d9ff4731a2ec..6079eb4ac1e0 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go @@ -1,4 +1,4 @@ -// Copyright 2011 The Go Authors. All rights reserved. +// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index e2ee36452a8f..ed0a6af79ba5 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -5,10 +5,10 @@ // +build darwin dragonfly freebsd linux netbsd openbsd solaris // Package unix contains an interface to the low-level operating system -// primitives. OS details vary depending on the underlying system, and +// primitives. OS details vary depending on the underlying system, and // by default, godoc will display OS-specific documentation for the current -// system. If you want godoc to display OS documentation for another -// system, set $GOOS and $GOARCH to the desired system. For example, if +// system. If you want godoc to display OS documentation for another +// system, set $GOOS and $GOARCH to the desired system. For example, if // you want to view documentation for freebsd/arm on linux/amd64, set $GOOS // to freebsd and $GOARCH to arm. // The primary use of this package is inside other packages that provide a more @@ -49,21 +49,3 @@ func BytePtrFromString(s string) (*byte, error) { // Single-word zero for use when we need a valid pointer to 0 bytes. // See mkunix.pl. var _zero uintptr - -func (ts *Timespec) Unix() (sec int64, nsec int64) { - return int64(ts.Sec), int64(ts.Nsec) -} - -func (tv *Timeval) Unix() (sec int64, nsec int64) { - return int64(tv.Sec), int64(tv.Usec) * 1000 -} - -func (ts *Timespec) Nano() int64 { - return int64(ts.Sec)*1e9 + int64(ts.Nsec) -} - -func (tv *Timeval) Nano() int64 { - return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 -} - -func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index c2846b32d6cd..4119edd7307e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -34,7 +34,7 @@ func Getgroups() (gids []int, err error) { return nil, nil } - // Sanity check group count. Max is 16 on BSD. + // Sanity check group count. Max is 16 on BSD. if n < 0 || n > 1000 { return nil, EINVAL } @@ -607,6 +607,15 @@ func Futimes(fd int, tv []Timeval) error { //sys fcntl(fd int, cmd int, arg int) (val int, err error) +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} + // TODO: wrap // Acct(name nil-string) (err error) // Gethostuuid(uuid *byte, timeout *Timespec) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index ad74a11fb3ea..f6a8fccad157 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -54,7 +54,7 @@ func nametomib(name string) (mib []_C_int, err error) { // NOTE(rsc): It seems strange to set the buffer to have // size CTL_MAXNAME+2 but use only CTL_MAXNAME - // as the size. I don't know why the +2 is here, but the + // as the size. I don't know why the +2 is here, but the // kernel uses +2 for its own implementation of this function. // I am scared that if we don't include the +2 here, the kernel // will silently write 2 words farther than we specify @@ -377,7 +377,6 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { // Searchfs // Delete // Copyfile -// Poll // Watchevent // Waitevent // Modwatch diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go index c172a3da5a3a..b3ac109a2f42 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -11,27 +11,18 @@ import ( "unsafe" ) -func Getpagesize() int { return 4096 } - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int32(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int32(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} } //sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error) func Gettimeofday(tv *Timeval) (err error) { // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back + // but is otherwise unused. The answers come back // in the two registers. sec, usec, err := gettimeofday(tv) tv.Sec = int32(sec) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index c6c99c13a70a..75219444a8d2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -11,27 +11,18 @@ import ( "unsafe" ) -func Getpagesize() int { return 4096 } - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} } //sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error) func Gettimeofday(tv *Timeval) (err error) { // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back + // but is otherwise unused. The answers come back // in the two registers. sec, usec, err := gettimeofday(tv) tv.Sec = sec diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go index d286cf408d80..47ab66485969 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -9,27 +9,18 @@ import ( "unsafe" ) -func Getpagesize() int { return 4096 } - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int32(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int32(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} } //sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error) func Gettimeofday(tv *Timeval) (err error) { // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back + // but is otherwise unused. The answers come back // in the two registers. sec, usec, err := gettimeofday(tv) tv.Sec = int32(sec) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index c33905cdcd96..d6d962801426 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -11,27 +11,18 @@ import ( "unsafe" ) -func Getpagesize() int { return 16384 } - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} } //sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error) func Gettimeofday(tv *Timeval) (err error) { // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back + // but is otherwise unused. The answers come back // in the two registers. sec, usec, err := gettimeofday(tv) tv.Sec = sec diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 3a483373dce4..fee06839fd41 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -257,7 +257,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { // Searchfs // Delete // Copyfile -// Poll // Watchevent // Waitevent // Modwatch @@ -403,7 +402,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { // Pread_nocancel // Pwrite_nocancel // Waitid_nocancel -// Poll_nocancel // Msgsnd_nocancel // Msgrcv_nocancel // Sem_wait_nocancel diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index da7cb7982cdd..9babb31ea751 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -11,21 +11,12 @@ import ( "unsafe" ) -func Getpagesize() int { return 4096 } - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = nsec % 1e9 / 1e3 - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index d26e52eaefe1..8f7ab16d1fb8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -32,7 +32,7 @@ func nametomib(name string) (mib []_C_int, err error) { // NOTE(rsc): It seems strange to set the buffer to have // size CTL_MAXNAME+2 but use only CTL_MAXNAME - // as the size. I don't know why the +2 is here, but the + // as the size. I don't know why the +2 is here, but the // kernel uses +2 for its own implementation of this function. // I am scared that if we don't include the +2 here, the kernel // will silently write 2 words farther than we specify @@ -550,7 +550,6 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { // Searchfs // Delete // Copyfile -// Poll // Watchevent // Waitevent // Modwatch diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 6a0cd804d882..21e03958cdc5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -11,21 +11,12 @@ import ( "unsafe" ) -func Getpagesize() int { return 4096 } - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int32(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int32(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index e142540efa42..9c945a657965 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -11,21 +11,12 @@ import ( "unsafe" ) -func Getpagesize() int { return 4096 } - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = nsec % 1e9 / 1e3 - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 5504cb125594..5cd6243f2a06 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -11,21 +11,12 @@ import ( "unsafe" ) -func Getpagesize() int { return 4096 } - -func TimespecToNsec(ts Timespec) int64 { return ts.Sec*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = nsec / 1e9 - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 1b7d59d8963b..b98a7e1544d4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -255,7 +255,7 @@ func Getgroups() (gids []int, err error) { return nil, nil } - // Sanity check group count. Max is 1<<16 on Linux. + // Sanity check group count. Max is 1<<16 on Linux. if n < 0 || n > 1<<20 { return nil, EINVAL } @@ -290,8 +290,8 @@ type WaitStatus uint32 // 0x7F (stopped), or a signal number that caused an exit. // The 0x80 bit is whether there was a core dump. // An extra number (exit code, signal causing a stop) -// is in the high bits. At least that's the idea. -// There are various irregularities. For example, the +// is in the high bits. At least that's the idea. +// There are various irregularities. For example, the // "continued" status is 0xFFFF, distinguishing itself // from stopped via the core dump bit. @@ -926,7 +926,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from msg.Namelen = uint32(SizeofSockaddrAny) var iov Iovec if len(p) > 0 { - iov.Base = (*byte)(unsafe.Pointer(&p[0])) + iov.Base = &p[0] iov.SetLen(len(p)) } var dummy byte @@ -941,7 +941,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from iov.Base = &dummy iov.SetLen(1) } - msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.Control = &oob[0] msg.SetControllen(len(oob)) } msg.Iov = &iov @@ -974,11 +974,11 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) } } var msg Msghdr - msg.Name = (*byte)(unsafe.Pointer(ptr)) + msg.Name = (*byte)(ptr) msg.Namelen = uint32(salen) var iov Iovec if len(p) > 0 { - iov.Base = (*byte)(unsafe.Pointer(&p[0])) + iov.Base = &p[0] iov.SetLen(len(p)) } var dummy byte @@ -993,7 +993,7 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) iov.Base = &dummy iov.SetLen(1) } - msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.Control = &oob[0] msg.SetControllen(len(oob)) } msg.Iov = &iov @@ -1023,7 +1023,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro var buf [sizeofPtr]byte - // Leading edge. PEEKTEXT/PEEKDATA don't require aligned + // Leading edge. PEEKTEXT/PEEKDATA don't require aligned // access (PEEKUSER warns that it might), but if we don't // align our reads, we might straddle an unmapped page // boundary and not get the bytes leading up to the page @@ -1262,6 +1262,7 @@ func Getpgrp() (pid int) { //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT //sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) +//sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 //sys read(fd int, p []byte) (n int, err error) //sys Removexattr(path string, attr string) (err error) //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index 2b881b9793b4..4774fa363e83 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -14,21 +14,12 @@ import ( "unsafe" ) -func Getpagesize() int { return 4096 } - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int32(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = int32(nsec / 1e9) - tv.Usec = int32(nsec % 1e9 / 1e3) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} } //sysnb pipe(p *[2]_C_int) (err error) @@ -185,9 +176,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // On x86 Linux, all the socket calls go through an extra indirection, // I think because the 5-register system call interface can't handle -// the 6-argument calls like sendto and recvfrom. Instead the +// the 6-argument calls like sendto and recvfrom. Instead the // arguments to the underlying system call are the number below -// and a pointer to an array of uintptr. We hide the pointer in the +// and a pointer to an array of uintptr. We hide the pointer in the // socketcall assembly to avoid allocation on every system call. const ( diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 9516a3fd7ef6..3707f6b7c933 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -69,8 +69,6 @@ func Gettimeofday(tv *Timeval) (err error) { return nil } -func Getpagesize() int { return 4096 } - func Time(t *Time_t) (tt Time_t, err error) { var tv Timeval errno := gettimeofday(&tv) @@ -85,19 +83,12 @@ func Time(t *Time_t) (tt Time_t, err error) { //sys Utime(path string, buf *Utimbuf) (err error) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = nsec / 1e9 - tv.Usec = nsec % 1e9 / 1e3 - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} } //sysnb pipe(p *[2]_C_int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 71d87022899a..226be100f50b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -11,21 +11,12 @@ import ( "unsafe" ) -func Getpagesize() int { return 4096 } - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int32(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = int32(nsec / 1e9) - tv.Usec = int32(nsec % 1e9 / 1e3) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} } func Pipe(p []int) (err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 4a136396cdd0..9a8e6e411797 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -21,7 +21,12 @@ package unix //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK -//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS_PSELECT6 + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + ts := Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} + return Pselect(nfd, r, w, e, &ts, nil) +} + //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys Setfsgid(gid int) (err error) //sys Setfsuid(uid int) (err error) @@ -66,23 +71,14 @@ func Lstat(path string, stat *Stat_t) (err error) { //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) -func Getpagesize() int { return 65536 } - //sysnb Gettimeofday(tv *Timeval) (err error) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = nsec / 1e9 - tv.Usec = nsec % 1e9 / 1e3 - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} } func Time(t *Time_t) (Time_t, error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 73318e5c6433..cdda11a9fabf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -23,7 +23,12 @@ package unix //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK -//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS_PSELECT6 + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + ts := Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} + return Pselect(nfd, r, w, e, &ts, nil) +} + //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys Setfsgid(gid int) (err error) //sys Setfsuid(uid int) (err error) @@ -55,8 +60,6 @@ package unix //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) -func Getpagesize() int { return 65536 } - //sysnb Gettimeofday(tv *Timeval) (err error) func Time(t *Time_t) (tt Time_t, err error) { @@ -73,19 +76,12 @@ func Time(t *Time_t) (tt Time_t, err error) { //sys Utime(path string, buf *Utimbuf) (err error) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = nsec / 1e9 - tv.Usec = nsec % 1e9 / 1e3 - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} } func Pipe(p []int) (err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index b83d93fdffe1..a114ba8cb33e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -99,19 +99,12 @@ func Seek(fd int, offset int64, whence int) (off int64, err error) { return } -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int32(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = int32(nsec / 1e9) - tv.Usec = int32(nsec % 1e9 / 1e3) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} } //sysnb pipe2(p *[2]_C_int, flags int) (err error) @@ -235,5 +228,3 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { } return poll(&fds[0], len(fds), timeout) } - -func Getpagesize() int { return 4096 } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 60770f627c67..7cae936c458a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -28,7 +28,7 @@ package unix //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK -//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys Setfsgid(gid int) (err error) //sys Setfsuid(uid int) (err error) @@ -61,26 +61,17 @@ package unix //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) -func Getpagesize() int { return 65536 } - //sysnb Gettimeofday(tv *Timeval) (err error) //sysnb Time(t *Time_t) (tt Time_t, err error) //sys Utime(path string, buf *Utimbuf) (err error) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = nsec / 1e9 - tv.Usec = nsec % 1e9 / 1e3 - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} } func (r *PtraceRegs) PC() uint64 { return r.Nip } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 1708a4bbf9ac..e96a40cb210e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -46,8 +46,6 @@ import ( //sysnb getgroups(n int, list *_Gid_t) (nn int, err error) //sysnb setgroups(n int, list *_Gid_t) (err error) -func Getpagesize() int { return 4096 } - //sysnb Gettimeofday(tv *Timeval) (err error) func Time(t *Time_t) (tt Time_t, err error) { @@ -64,19 +62,12 @@ func Time(t *Time_t) (tt Time_t, err error) { //sys Utime(path string, buf *Utimbuf) (err error) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = nsec / 1e9 - tv.Usec = nsec % 1e9 / 1e3 - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} } //sysnb pipe2(p *[2]_C_int, flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index 20b7454d7703..012a3285ef93 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -6,11 +6,6 @@ package unix -import ( - "sync/atomic" - "syscall" -) - //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Dup2(oldfd int, newfd int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) @@ -63,21 +58,6 @@ import ( //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) -func sysconf(name int) (n int64, err syscall.Errno) - -// pageSize caches the value of Getpagesize, since it can't change -// once the system is booted. -var pageSize int64 // accessed atomically - -func Getpagesize() int { - n := atomic.LoadInt64(&pageSize) - if n == 0 { - n, _ = sysconf(_SC_PAGESIZE) - atomic.StoreInt64(&pageSize, n) - } - return int(n) -} - func Ioperm(from int, num int, on int) (err error) { return ENOSYS } @@ -102,19 +82,12 @@ func Time(t *Time_t) (tt Time_t, err error) { //sys Utime(path string, buf *Utimbuf) (err error) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = nsec / 1e9 - tv.Usec = int32(nsec % 1e9 / 1e3) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} } func (r *PtraceRegs) PC() uint64 { return r.Tpc } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index e12966845914..1caa5b3266fb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -422,7 +422,6 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // ntp_adjtime // pmc_control // pmc_get_info -// poll // pollts // preadv // profil diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index afaca09838af..24f74e58ce22 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -6,21 +6,12 @@ package unix -func Getpagesize() int { return 4096 } - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int64(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index a6ff04ce5bd2..6878bf7ff902 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -6,21 +6,12 @@ package unix -func Getpagesize() int { return 4096 } - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int64(nsec / 1e9) - ts.Nsec = int64(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index 68a6969b285b..dbbfcf71dbd6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -6,21 +6,12 @@ package unix -func Getpagesize() int { return 4096 } - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int64(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 408e63081c06..03a0fac61dd6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -243,7 +243,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { // nfssvc // nnpfspioctl // openat -// poll // preadv // profil // pwritev diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index a66ddc59ce9e..994964a9164d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -6,21 +6,12 @@ package unix -func Getpagesize() int { return 4096 } - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int64(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index 0776c1faf98b..649e67fccc58 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -6,21 +6,12 @@ package unix -func Getpagesize() int { return 4096 } - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = nsec % 1e9 / 1e3 - tv.Sec = nsec / 1e9 - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go index 14ddaf3f33c5..59844f504110 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -6,23 +6,12 @@ package unix -import "syscall" - -func Getpagesize() int { return syscall.Getpagesize() } - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int64(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 0d4e5c4e6c4c..3ab9e07c8c03 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -13,7 +13,6 @@ package unix import ( - "sync/atomic" "syscall" "unsafe" ) @@ -167,7 +166,7 @@ func Getwd() (wd string, err error) { func Getgroups() (gids []int, err error) { n, err := getgroups(0, nil) - // Check for error and sanity check group count. Newer versions of + // Check for error and sanity check group count. Newer versions of // Solaris allow up to 1024 (NGROUPS_MAX). if n < 0 || n > 1024 { if err != nil { @@ -351,7 +350,7 @@ func Futimesat(dirfd int, path string, tv []Timeval) error { } // Solaris doesn't have an futimes function because it allows NULL to be -// specified as the path for futimesat. However, Go doesn't like +// specified as the path for futimesat. However, Go doesn't like // NULL-style string interfaces, so this simple wrapper is provided. func Futimes(fd int, tv []Timeval) error { if tv == nil { @@ -515,6 +514,24 @@ func Acct(path string) (err error) { return acct(pathp) } +//sys __makedev(version int, major uint, minor uint) (val uint64) + +func Mkdev(major, minor uint32) uint64 { + return __makedev(NEWDEV, uint(major), uint(minor)) +} + +//sys __major(version int, dev uint64) (val uint) + +func Major(dev uint64) uint32 { + return uint32(__major(NEWDEV, dev)) +} + +//sys __minor(version int, dev uint64) (val uint) + +func Minor(dev uint64) uint32 { + return uint32(__minor(NEWDEV, dev)) +} + /* * Expose the ioctl function */ @@ -561,6 +578,15 @@ func IoctlGetTermio(fd int, req uint) (*Termio, error) { return &value, err } +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} + /* * Exposed directly */ @@ -613,6 +639,7 @@ func IoctlGetTermio(fd int, req uint) (*Termio, error) { //sys Mlock(b []byte) (err error) //sys Mlockall(flags int) (err error) //sys Mprotect(b []byte, prot int) (err error) +//sys Msync(b []byte, flags int) (err error) //sys Munlock(b []byte) (err error) //sys Munlockall() (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) @@ -699,18 +726,3 @@ func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, e func Munmap(b []byte) (err error) { return mapper.Munmap(b) } - -//sys sysconf(name int) (n int64, err error) - -// pageSize caches the value of Getpagesize, since it can't change -// once the system is booted. -var pageSize int64 // accessed atomically - -func Getpagesize() int { - n := atomic.LoadInt64(&pageSize) - if n == 0 { - n, _ = sysconf(_SC_PAGESIZE) - atomic.StoreInt64(&pageSize, n) - } - return int(n) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index 5aff62c3bbea..9d4e7a678f6a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -6,19 +6,12 @@ package unix -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = nsec % 1e9 / 1e3 - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} } func (iov *Iovec) SetLen(length int) { diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go new file mode 100644 index 000000000000..139fbbebbbda --- /dev/null +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -0,0 +1,62 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix + +// TimespecToNsec converts a Timespec value into a number of +// nanoseconds since the Unix epoch. +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +// NsecToTimespec takes a number of nanoseconds since the Unix epoch +// and returns the corresponding Timespec value. +func NsecToTimespec(nsec int64) Timespec { + sec := nsec / 1e9 + nsec = nsec % 1e9 + if nsec < 0 { + nsec += 1e9 + sec-- + } + return setTimespec(sec, nsec) +} + +// TimevalToNsec converts a Timeval value into a number of nanoseconds +// since the Unix epoch. +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +// NsecToTimeval takes a number of nanoseconds since the Unix epoch +// and returns the corresponding Timeval value. +func NsecToTimeval(nsec int64) Timeval { + nsec += 999 // round up to microsecond + usec := nsec % 1e9 / 1e3 + sec := nsec / 1e9 + if usec < 0 { + usec += 1e6 + sec-- + } + return setTimeval(sec, usec) +} + +// Unix returns ts as the number of seconds and nanoseconds elapsed since the +// Unix epoch. +func (ts *Timespec) Unix() (sec int64, nsec int64) { + return int64(ts.Sec), int64(ts.Nsec) +} + +// Unix returns tv as the number of seconds and nanoseconds elapsed since the +// Unix epoch. +func (tv *Timeval) Unix() (sec int64, nsec int64) { + return int64(tv.Sec), int64(tv.Usec) * 1000 +} + +// Nano returns ts as the number of nanoseconds elapsed since the Unix epoch. +func (ts *Timespec) Nano() int64 { + return int64(ts.Sec)*1e9 + int64(ts.Nsec) +} + +// Nano returns tv as the number of nanoseconds elapsed since the Unix epoch. +func (tv *Timeval) Nano() int64 { + return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 +} diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go index 415124d4a1a7..3a938ace6856 100644 --- a/vendor/golang.org/x/sys/unix/types_darwin.go +++ b/vendor/golang.org/x/sys/unix/types_darwin.go @@ -19,6 +19,7 @@ package unix #define _DARWIN_USE_64_BIT_INODE #include #include +#include #include #include #include @@ -252,3 +253,20 @@ const ( AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW ) + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go index 80b27781d1e1..78ce32a90a7a 100644 --- a/vendor/golang.org/x/sys/unix/types_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/types_dragonfly.go @@ -17,6 +17,7 @@ package unix #define KERNEL #include #include +#include #include #include #include @@ -247,3 +248,20 @@ const ( AT_FDCWD = C.AT_FDCWD AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW ) + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go index 934fd7ace22d..7340004ef039 100644 --- a/vendor/golang.org/x/sys/unix/types_freebsd.go +++ b/vendor/golang.org/x/sys/unix/types_freebsd.go @@ -17,6 +17,7 @@ package unix #define KERNEL #include #include +#include #include #include #include @@ -367,6 +368,24 @@ const ( AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW ) +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLINIGNEOF = C.POLLINIGNEOF + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + // Capabilities type CapRights C.struct_cap_rights diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go index cb95c80aabcb..9097245a2bb4 100644 --- a/vendor/golang.org/x/sys/unix/types_netbsd.go +++ b/vendor/golang.org/x/sys/unix/types_netbsd.go @@ -17,6 +17,7 @@ package unix #define KERNEL #include #include +#include #include #include #include @@ -234,6 +235,23 @@ const ( AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW ) +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + // Sysctl type Sysctlnode C.struct_sysctlnode diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go index 392da69bf0a1..44ea7c028c35 100644 --- a/vendor/golang.org/x/sys/unix/types_openbsd.go +++ b/vendor/golang.org/x/sys/unix/types_openbsd.go @@ -17,6 +17,7 @@ package unix #define KERNEL #include #include +#include #include #include #include @@ -249,3 +250,20 @@ const ( AT_FDCWD = C.AT_FDCWD AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW ) + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go index 393c7f04fb68..f77715544254 100644 --- a/vendor/golang.org/x/sys/unix/types_solaris.go +++ b/vendor/golang.org/x/sys/unix/types_solaris.go @@ -24,6 +24,7 @@ package unix #include #include #include +#include #include #include #include @@ -256,10 +257,6 @@ type BpfTimeval C.struct_bpf_timeval type BpfHdr C.struct_bpf_hdr -// sysconf information - -const _SC_PAGESIZE = C._SC_PAGESIZE - // Terminal handling type Termios C.struct_termios @@ -267,3 +264,20 @@ type Termios C.struct_termios type Termio C.struct_termio type Winsize C.struct_winsize + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 1d3eec44d492..adf5eef0f8e9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -981,6 +981,49 @@ const ( MAP_STACK = 0x400 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 + MNT_ACLS = 0x8000000 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x200000000 + MNT_BYFSID = 0x8000000 + MNT_CMDFLAGS = 0xd0f0000 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_EXKERB = 0x800 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXPUBLIC = 0x20000000 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_GJOURNAL = 0x2000000 + MNT_IGNORE = 0x800000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NFS4ACLS = 0x10 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NOEXEC = 0x4 + MNT_NONBUSY = 0x4000000 + MNT_NOSUID = 0x8 + MNT_NOSYMFOLLOW = 0x400000 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x1000000 + MNT_SOFTDEP = 0x200000 + MNT_SUIDDIR = 0x100000 + MNT_SUJ = 0x100000000 + MNT_SUSPEND = 0x4 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UPDATE = 0x10000 + MNT_UPDATEMASK = 0x2d8d0807e + MNT_USER = 0x8000 + MNT_VISFLAGMASK = 0x3fef0ffff + MNT_WAIT = 0x1 MSG_CMSG_CLOEXEC = 0x40000 MSG_COMPAT = 0x8000 MSG_CTRUNC = 0x20 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index ac094f9cf35a..360caff4f9a3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -982,6 +982,49 @@ const ( MAP_STACK = 0x400 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 + MNT_ACLS = 0x8000000 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x200000000 + MNT_BYFSID = 0x8000000 + MNT_CMDFLAGS = 0xd0f0000 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_EXKERB = 0x800 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXPUBLIC = 0x20000000 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_GJOURNAL = 0x2000000 + MNT_IGNORE = 0x800000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NFS4ACLS = 0x10 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NOEXEC = 0x4 + MNT_NONBUSY = 0x4000000 + MNT_NOSUID = 0x8 + MNT_NOSYMFOLLOW = 0x400000 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x1000000 + MNT_SOFTDEP = 0x200000 + MNT_SUIDDIR = 0x100000 + MNT_SUJ = 0x100000000 + MNT_SUSPEND = 0x4 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UPDATE = 0x10000 + MNT_UPDATEMASK = 0x2d8d0807e + MNT_USER = 0x8000 + MNT_VISFLAGMASK = 0x3fef0ffff + MNT_WAIT = 0x1 MSG_CMSG_CLOEXEC = 0x40000 MSG_COMPAT = 0x8000 MSG_CTRUNC = 0x20 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index c5c6f13e530e..87deda950e87 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -989,6 +989,49 @@ const ( MAP_STACK = 0x400 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 + MNT_ACLS = 0x8000000 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x200000000 + MNT_BYFSID = 0x8000000 + MNT_CMDFLAGS = 0xd0f0000 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_EXKERB = 0x800 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXPUBLIC = 0x20000000 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_GJOURNAL = 0x2000000 + MNT_IGNORE = 0x800000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NFS4ACLS = 0x10 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NOEXEC = 0x4 + MNT_NONBUSY = 0x4000000 + MNT_NOSUID = 0x8 + MNT_NOSYMFOLLOW = 0x400000 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x1000000 + MNT_SOFTDEP = 0x200000 + MNT_SUIDDIR = 0x100000 + MNT_SUJ = 0x100000000 + MNT_SUSPEND = 0x4 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UPDATE = 0x10000 + MNT_UPDATEMASK = 0x2d8d0807e + MNT_USER = 0x8000 + MNT_VISFLAGMASK = 0x3fef0ffff + MNT_WAIT = 0x1 MSG_CMSG_CLOEXEC = 0x40000 MSG_COMPAT = 0x8000 MSG_CTRUNC = 0x20 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 33ced1ae4042..bb8a7724bc4e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2b + AF_MAX = 0x2c AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,6 +51,7 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe + AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -129,6 +130,7 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x1008 @@ -392,6 +394,7 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -453,6 +456,8 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -657,8 +662,10 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -671,12 +678,14 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -687,8 +696,10 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -702,7 +713,9 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -745,6 +758,7 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -782,6 +796,7 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -915,6 +930,7 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -929,6 +945,7 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -947,6 +964,7 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -967,8 +985,10 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1025,6 +1045,7 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1256,7 +1277,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1281,7 +1302,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x19 + RTA_MAX = 0x1a RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1325,6 +1346,7 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1333,6 +1355,7 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1354,10 +1377,11 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x5f + RTM_MAX = 0x63 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1372,8 +1396,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x14 - RTM_NR_MSGTYPES = 0x50 + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1384,6 +1408,7 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1414,6 +1439,7 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 SECCOMP_MODE_DISABLED = 0x0 @@ -1552,6 +1578,7 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1560,11 +1587,13 @@ const ( SO_ERROR = 0x4 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x9 SO_LINGER = 0xd SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0xa @@ -1572,6 +1601,7 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1f SO_PRIORITY = 0xc @@ -1668,6 +1698,7 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1727,6 +1758,7 @@ const ( TIOCGPKT = 0x80045438 TIOCGPTLCK = 0x80045439 TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 TIOCGRS485 = 0x542e TIOCGSERIAL = 0x541e TIOCGSID = 0x5429 @@ -1810,6 +1842,8 @@ const ( TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1839,6 +1873,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 @@ -2019,7 +2064,6 @@ const ( SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) - SIGUNUSED = syscall.Signal(0x1f) SIGURG = syscall.Signal(0x17) SIGUSR1 = syscall.Signal(0xa) SIGUSR2 = syscall.Signal(0xc) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index a6618fc1181b..cf0b2249f778 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2b + AF_MAX = 0x2c AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,6 +51,7 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe + AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -129,6 +130,7 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x1008 @@ -392,6 +394,7 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -453,6 +456,8 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -657,8 +662,10 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -671,12 +678,14 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -687,8 +696,10 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -702,7 +713,9 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -745,6 +758,7 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -782,6 +796,7 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -915,6 +930,7 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -929,6 +945,7 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -947,6 +964,7 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -967,8 +985,10 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1025,6 +1045,7 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1166,7 +1187,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1257,7 +1278,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1282,7 +1303,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x19 + RTA_MAX = 0x1a RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1326,6 +1347,7 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1334,6 +1356,7 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1355,10 +1378,11 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x5f + RTM_MAX = 0x63 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1373,8 +1397,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x14 - RTM_NR_MSGTYPES = 0x50 + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1385,6 +1409,7 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1415,6 +1440,7 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 SECCOMP_MODE_DISABLED = 0x0 @@ -1553,6 +1579,7 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1561,11 +1588,13 @@ const ( SO_ERROR = 0x4 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x9 SO_LINGER = 0xd SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0xa @@ -1573,6 +1602,7 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1f SO_PRIORITY = 0xc @@ -1669,6 +1699,7 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1728,6 +1759,7 @@ const ( TIOCGPKT = 0x80045438 TIOCGPTLCK = 0x80045439 TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 TIOCGRS485 = 0x542e TIOCGSERIAL = 0x541e TIOCGSID = 0x5429 @@ -1811,6 +1843,8 @@ const ( TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1840,6 +1874,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 @@ -2020,7 +2065,6 @@ const ( SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) - SIGUNUSED = syscall.Signal(0x1f) SIGURG = syscall.Signal(0x17) SIGUSR1 = syscall.Signal(0xa) SIGUSR2 = syscall.Signal(0xc) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 1adff0b28cc4..57cfcf3fe02d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2b + AF_MAX = 0x2c AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,6 +51,7 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe + AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -129,6 +130,7 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x1008 @@ -392,6 +394,7 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -453,6 +456,8 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -657,8 +662,10 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -671,12 +678,14 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -687,8 +696,10 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -702,7 +713,9 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -745,6 +758,7 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -782,6 +796,7 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -914,6 +929,7 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -928,6 +944,7 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -946,6 +963,7 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -966,8 +984,10 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1024,6 +1044,7 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1261,7 +1282,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1286,7 +1307,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x19 + RTA_MAX = 0x1a RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1330,6 +1351,7 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1338,6 +1360,7 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1359,10 +1382,11 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x5f + RTM_MAX = 0x63 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1377,8 +1401,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x14 - RTM_NR_MSGTYPES = 0x50 + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1389,6 +1413,7 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1419,6 +1444,7 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 SECCOMP_MODE_DISABLED = 0x0 @@ -1557,6 +1583,7 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1565,11 +1592,13 @@ const ( SO_ERROR = 0x4 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x9 SO_LINGER = 0xd SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0xa @@ -1577,6 +1606,7 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1f SO_PRIORITY = 0xc @@ -1673,6 +1703,7 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1732,6 +1763,7 @@ const ( TIOCGPKT = 0x80045438 TIOCGPTLCK = 0x80045439 TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 TIOCGRS485 = 0x542e TIOCGSERIAL = 0x541e TIOCGSID = 0x5429 @@ -1815,6 +1847,8 @@ const ( TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1844,6 +1878,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 @@ -2024,7 +2069,6 @@ const ( SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) - SIGUNUSED = syscall.Signal(0x1f) SIGURG = syscall.Signal(0x17) SIGUSR1 = syscall.Signal(0xa) SIGUSR2 = syscall.Signal(0xc) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index c0ecd47c2a54..b6e5b090ecf9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2b + AF_MAX = 0x2c AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,6 +51,7 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe + AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -129,6 +130,7 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x1008 @@ -393,6 +395,7 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -441,6 +444,7 @@ const ( EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 + EXTRA_MAGIC = 0x45585401 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -454,6 +458,8 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -658,8 +664,10 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -672,12 +680,14 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -688,8 +698,10 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -703,7 +715,9 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -746,6 +760,7 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -783,6 +798,7 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -915,6 +931,7 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -929,6 +946,7 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -947,6 +965,7 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -967,8 +986,10 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1025,6 +1046,7 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1166,7 +1188,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1246,7 +1268,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1271,7 +1293,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x19 + RTA_MAX = 0x1a RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1315,6 +1337,7 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1323,6 +1346,7 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1344,10 +1368,11 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x5f + RTM_MAX = 0x63 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1362,8 +1387,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x14 - RTM_NR_MSGTYPES = 0x50 + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1374,6 +1399,7 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1404,6 +1430,7 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 SECCOMP_MODE_DISABLED = 0x0 @@ -1542,6 +1569,7 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1550,11 +1578,13 @@ const ( SO_ERROR = 0x4 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x9 SO_LINGER = 0xd SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0xa @@ -1562,6 +1592,7 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1f SO_PRIORITY = 0xc @@ -1658,6 +1689,7 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1717,6 +1749,7 @@ const ( TIOCGPKT = 0x80045438 TIOCGPTLCK = 0x80045439 TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 TIOCGRS485 = 0x542e TIOCGSERIAL = 0x541e TIOCGSID = 0x5429 @@ -1800,6 +1833,8 @@ const ( TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1829,6 +1864,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 @@ -2009,7 +2055,6 @@ const ( SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) - SIGUNUSED = syscall.Signal(0x1f) SIGURG = syscall.Signal(0x17) SIGUSR1 = syscall.Signal(0xa) SIGUSR2 = syscall.Signal(0xc) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 900f568af1cb..0113e1f6ab95 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2b + AF_MAX = 0x2c AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,6 +51,7 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe + AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -129,6 +130,7 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x1008 @@ -392,6 +394,7 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -453,6 +456,8 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -657,8 +662,10 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -671,12 +678,14 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -687,8 +696,10 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -702,7 +713,9 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -745,6 +758,7 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -782,6 +796,7 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -915,6 +930,7 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -929,6 +945,7 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -947,6 +964,7 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -967,8 +985,10 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1025,6 +1045,7 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1258,7 +1279,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1283,7 +1304,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x19 + RTA_MAX = 0x1a RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1327,6 +1348,7 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1335,6 +1357,7 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1356,10 +1379,11 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x5f + RTM_MAX = 0x63 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1374,8 +1398,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x14 - RTM_NR_MSGTYPES = 0x50 + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1386,6 +1410,7 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1416,6 +1441,7 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 SECCOMP_MODE_DISABLED = 0x0 @@ -1554,6 +1580,7 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1562,11 +1589,13 @@ const ( SO_ERROR = 0x1007 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x8 SO_LINGER = 0x80 SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0x100 @@ -1574,6 +1603,7 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 + SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1e SO_PRIORITY = 0xc @@ -1670,6 +1700,7 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1728,6 +1759,7 @@ const ( TIOCGPKT = 0x40045438 TIOCGPTLCK = 0x40045439 TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 TIOCGRS485 = 0x4020542e TIOCGSERIAL = 0x5484 TIOCGSID = 0x7416 @@ -1814,6 +1846,8 @@ const ( TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1844,6 +1878,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 4148f27730c0..6857657a5006 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2b + AF_MAX = 0x2c AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,6 +51,7 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe + AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -129,6 +130,7 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x1008 @@ -392,6 +394,7 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -453,6 +456,8 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -657,8 +662,10 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -671,12 +678,14 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -687,8 +696,10 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -702,7 +713,9 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -745,6 +758,7 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -782,6 +796,7 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -915,6 +930,7 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -929,6 +945,7 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -947,6 +964,7 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -967,8 +985,10 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1025,6 +1045,7 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1166,7 +1187,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1258,7 +1279,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1283,7 +1304,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x19 + RTA_MAX = 0x1a RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1327,6 +1348,7 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1335,6 +1357,7 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1356,10 +1379,11 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x5f + RTM_MAX = 0x63 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1374,8 +1398,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x14 - RTM_NR_MSGTYPES = 0x50 + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1386,6 +1410,7 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1416,6 +1441,7 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 SECCOMP_MODE_DISABLED = 0x0 @@ -1554,6 +1580,7 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1562,11 +1589,13 @@ const ( SO_ERROR = 0x1007 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x8 SO_LINGER = 0x80 SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0x100 @@ -1574,6 +1603,7 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 + SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1e SO_PRIORITY = 0xc @@ -1670,6 +1700,7 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1728,6 +1759,7 @@ const ( TIOCGPKT = 0x40045438 TIOCGPTLCK = 0x40045439 TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 TIOCGRS485 = 0x4020542e TIOCGSERIAL = 0x5484 TIOCGSID = 0x7416 @@ -1814,6 +1846,8 @@ const ( TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1844,6 +1878,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 2310beebd480..14f7e0e056eb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2b + AF_MAX = 0x2c AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,6 +51,7 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe + AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -129,6 +130,7 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x1008 @@ -392,6 +394,7 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -453,6 +456,8 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -657,8 +662,10 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -671,12 +678,14 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -687,8 +696,10 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -702,7 +713,9 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -745,6 +758,7 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -782,6 +796,7 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -915,6 +930,7 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -929,6 +945,7 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -947,6 +964,7 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -967,8 +985,10 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1025,6 +1045,7 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1166,7 +1187,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1258,7 +1279,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1283,7 +1304,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x19 + RTA_MAX = 0x1a RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1327,6 +1348,7 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1335,6 +1357,7 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1356,10 +1379,11 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x5f + RTM_MAX = 0x63 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1374,8 +1398,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x14 - RTM_NR_MSGTYPES = 0x50 + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1386,6 +1410,7 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1416,6 +1441,7 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 SECCOMP_MODE_DISABLED = 0x0 @@ -1554,6 +1580,7 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1562,11 +1589,13 @@ const ( SO_ERROR = 0x1007 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x8 SO_LINGER = 0x80 SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0x100 @@ -1574,6 +1603,7 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 + SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1e SO_PRIORITY = 0xc @@ -1670,6 +1700,7 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1728,6 +1759,7 @@ const ( TIOCGPKT = 0x40045438 TIOCGPTLCK = 0x40045439 TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 TIOCGRS485 = 0x4020542e TIOCGSERIAL = 0x5484 TIOCGSID = 0x7416 @@ -1814,6 +1846,8 @@ const ( TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1844,6 +1878,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 44191b0c2091..f795862d87e2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2b + AF_MAX = 0x2c AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,6 +51,7 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe + AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -129,6 +130,7 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x1008 @@ -392,6 +394,7 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -453,6 +456,8 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -657,8 +662,10 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -671,12 +678,14 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -687,8 +696,10 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -702,7 +713,9 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -745,6 +758,7 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -782,6 +796,7 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -915,6 +930,7 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -929,6 +945,7 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -947,6 +964,7 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -967,8 +985,10 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1025,6 +1045,7 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1258,7 +1279,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1283,7 +1304,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x19 + RTA_MAX = 0x1a RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1327,6 +1348,7 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1335,6 +1357,7 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1356,10 +1379,11 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x5f + RTM_MAX = 0x63 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1374,8 +1398,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x14 - RTM_NR_MSGTYPES = 0x50 + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1386,6 +1410,7 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1416,6 +1441,7 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 SECCOMP_MODE_DISABLED = 0x0 @@ -1554,6 +1580,7 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1562,11 +1589,13 @@ const ( SO_ERROR = 0x1007 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x8 SO_LINGER = 0x80 SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0x100 @@ -1574,6 +1603,7 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 + SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1e SO_PRIORITY = 0xc @@ -1670,6 +1700,7 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1728,6 +1759,7 @@ const ( TIOCGPKT = 0x40045438 TIOCGPTLCK = 0x40045439 TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 TIOCGRS485 = 0x4020542e TIOCGSERIAL = 0x5484 TIOCGSID = 0x7416 @@ -1814,6 +1846,8 @@ const ( TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1844,6 +1878,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 6772a59beb7a..2544c4b6326b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2b + AF_MAX = 0x2c AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,6 +51,7 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe + AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -129,6 +130,7 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x17 @@ -392,6 +394,7 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -453,6 +456,8 @@ const ( FF1 = 0x4000 FFDLY = 0x4000 FLUSHO = 0x800000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -657,8 +662,10 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -671,12 +678,14 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -687,8 +696,10 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -702,7 +713,9 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -745,6 +758,7 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -782,6 +796,7 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -914,6 +929,7 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -928,6 +944,7 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -946,6 +963,7 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -968,8 +986,10 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1026,6 +1046,7 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1168,7 +1189,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1314,7 +1335,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1339,7 +1360,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x19 + RTA_MAX = 0x1a RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1383,6 +1404,7 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1391,6 +1413,7 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1412,10 +1435,11 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x5f + RTM_MAX = 0x63 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1430,8 +1454,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x14 - RTM_NR_MSGTYPES = 0x50 + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1442,6 +1466,7 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1472,6 +1497,7 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 SECCOMP_MODE_DISABLED = 0x0 @@ -1610,6 +1636,7 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1618,11 +1645,13 @@ const ( SO_ERROR = 0x4 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x9 SO_LINGER = 0xd SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0xa @@ -1630,6 +1659,7 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 + SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1f SO_PRIORITY = 0xc @@ -1724,6 +1754,7 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1780,6 +1811,7 @@ const ( TIOCGPKT = 0x40045438 TIOCGPTLCK = 0x40045439 TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 TIOCGRS485 = 0x542e TIOCGSERIAL = 0x541e TIOCGSID = 0x5429 @@ -1872,6 +1904,8 @@ const ( TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0x10 VEOF = 0x4 VEOL = 0x6 @@ -1901,6 +1935,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 @@ -2081,7 +2126,6 @@ const ( SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) - SIGUNUSED = syscall.Signal(0x1f) SIGURG = syscall.Signal(0x17) SIGUSR1 = syscall.Signal(0xa) SIGUSR2 = syscall.Signal(0xc) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index ad29c3d3d463..133bdf584797 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2b + AF_MAX = 0x2c AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,6 +51,7 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe + AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -129,6 +130,7 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x17 @@ -392,6 +394,7 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -453,6 +456,8 @@ const ( FF1 = 0x4000 FFDLY = 0x4000 FLUSHO = 0x800000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -657,8 +662,10 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -671,12 +678,14 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -687,8 +696,10 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -702,7 +713,9 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -745,6 +758,7 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -782,6 +796,7 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -914,6 +929,7 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -928,6 +944,7 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -946,6 +963,7 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -968,8 +986,10 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1026,6 +1046,7 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1168,7 +1189,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1314,7 +1335,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1339,7 +1360,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x19 + RTA_MAX = 0x1a RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1383,6 +1404,7 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1391,6 +1413,7 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1412,10 +1435,11 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x5f + RTM_MAX = 0x63 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1430,8 +1454,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x14 - RTM_NR_MSGTYPES = 0x50 + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1442,6 +1466,7 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1472,6 +1497,7 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 SECCOMP_MODE_DISABLED = 0x0 @@ -1610,6 +1636,7 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1618,11 +1645,13 @@ const ( SO_ERROR = 0x4 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x9 SO_LINGER = 0xd SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0xa @@ -1630,6 +1659,7 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 + SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1f SO_PRIORITY = 0xc @@ -1724,6 +1754,7 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1780,6 +1811,7 @@ const ( TIOCGPKT = 0x40045438 TIOCGPTLCK = 0x40045439 TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 TIOCGRS485 = 0x542e TIOCGSERIAL = 0x541e TIOCGSID = 0x5429 @@ -1872,6 +1904,8 @@ const ( TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0x10 VEOF = 0x4 VEOL = 0x6 @@ -1901,6 +1935,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 @@ -2081,7 +2126,6 @@ const ( SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) - SIGUNUSED = syscall.Signal(0x1f) SIGURG = syscall.Signal(0x17) SIGUSR1 = syscall.Signal(0xa) SIGUSR2 = syscall.Signal(0xc) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 3b5707548a97..b921fb17a498 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -36,7 +36,7 @@ const ( AF_KEY = 0xf AF_LLC = 0x1a AF_LOCAL = 0x1 - AF_MAX = 0x2b + AF_MAX = 0x2c AF_MPLS = 0x1c AF_NETBEUI = 0xd AF_NETLINK = 0x10 @@ -51,6 +51,7 @@ const ( AF_ROUTE = 0x10 AF_RXRPC = 0x21 AF_SECURITY = 0xe + AF_SMC = 0x2b AF_SNA = 0x16 AF_TIPC = 0x1e AF_UNIX = 0x1 @@ -129,6 +130,7 @@ const ( ARPHRD_TUNNEL = 0x300 ARPHRD_TUNNEL6 = 0x301 ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f B0 = 0x0 B1000000 = 0x1008 @@ -392,6 +394,7 @@ const ( ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 ETH_P_IEEE802154 = 0xf6 ETH_P_IEEEPUP = 0xa00 ETH_P_IEEEPUPAT = 0xa01 @@ -453,6 +456,8 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 @@ -657,8 +662,10 @@ const ( IPV6_2292PKTOPTIONS = 0x6 IPV6_2292RTHDR = 0x5 IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 IPV6_ADD_MEMBERSHIP = 0x14 IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 IPV6_CHECKSUM = 0x7 IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 @@ -671,12 +678,14 @@ const ( IPV6_JOIN_GROUP = 0x14 IPV6_LEAVE_ANYCAST = 0x1c IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 IPV6_MTU = 0x18 IPV6_MTU_DISCOVER = 0x17 IPV6_MULTICAST_HOPS = 0x12 IPV6_MULTICAST_IF = 0x11 IPV6_MULTICAST_LOOP = 0x13 IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a IPV6_PATHMTU = 0x3d IPV6_PKTINFO = 0x32 IPV6_PMTUDISC_DO = 0x2 @@ -687,8 +696,10 @@ const ( IPV6_PMTUDISC_WANT = 0x1 IPV6_RECVDSTOPTS = 0x3a IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d IPV6_RECVHOPLIMIT = 0x33 IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a IPV6_RECVPATHMTU = 0x3c IPV6_RECVPKTINFO = 0x31 IPV6_RECVRTHDR = 0x38 @@ -702,7 +713,9 @@ const ( IPV6_RXDSTOPTS = 0x3b IPV6_RXHOPOPTS = 0x36 IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c IPV6_V6ONLY = 0x1a IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 @@ -745,6 +758,7 @@ const ( IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 IP_RECVOPTS = 0x6 IP_RECVORIGDSTADDR = 0x14 IP_RECVRETOPTS = 0x7 @@ -782,6 +796,7 @@ const ( KEYCTL_NEGATE = 0xd KEYCTL_READ = 0xb KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d KEYCTL_REVOKE = 0x3 KEYCTL_SEARCH = 0xa KEYCTL_SESSION_TO_PARENT = 0x12 @@ -914,6 +929,7 @@ const ( MS_SILENT = 0x8000 MS_SLAVE = 0x80000 MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 MS_SYNC = 0x4 MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 @@ -928,6 +944,7 @@ const ( NETLINK_DNRTMSG = 0xe NETLINK_DROP_MEMBERSHIP = 0x2 NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb NETLINK_FIB_LOOKUP = 0xa NETLINK_FIREWALL = 0x3 NETLINK_GENERIC = 0x10 @@ -946,6 +963,7 @@ const ( NETLINK_RX_RING = 0x6 NETLINK_SCSITRANSPORT = 0x12 NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 NETLINK_SOCK_DIAG = 0x4 NETLINK_TX_RING = 0x7 NETLINK_UNUSED = 0x1 @@ -966,8 +984,10 @@ const ( NLMSG_NOOP = 0x1 NLMSG_OVERRUN = 0x4 NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 NLM_F_DUMP_FILTERED = 0x20 @@ -1024,6 +1044,7 @@ const ( PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 PACKET_FANOUT_LB = 0x1 PACKET_FANOUT_QM = 0x5 @@ -1165,7 +1186,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1318,7 +1339,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1343,7 +1364,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x19 + RTA_MAX = 0x1a RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1387,6 +1408,7 @@ const ( RTM_DELLINK = 0x11 RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1395,6 +1417,7 @@ const ( RTM_DELTFILTER = 0x2d RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 RTM_F_PREFIX = 0x800 @@ -1416,10 +1439,11 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x5f + RTM_MAX = 0x63 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 RTM_NEWLINK = 0x10 RTM_NEWMDB = 0x54 RTM_NEWNDUSEROPT = 0x44 @@ -1434,8 +1458,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x14 - RTM_NR_MSGTYPES = 0x50 + RTM_NR_FAMILIES = 0x15 + RTM_NR_MSGTYPES = 0x54 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1446,6 +1470,7 @@ const ( RTNH_F_OFFLOAD = 0x8 RTNH_F_ONLINK = 0x4 RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 RTN_MAX = 0xb RTPROT_BABEL = 0x2a RTPROT_BIRD = 0xc @@ -1476,6 +1501,7 @@ const ( SCM_TIMESTAMP = 0x1d SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 SCM_WIFI_STATUS = 0x29 SECCOMP_MODE_DISABLED = 0x0 @@ -1614,6 +1640,7 @@ const ( SO_BSDCOMPAT = 0xe SO_BUSY_POLL = 0x2e SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b @@ -1622,11 +1649,13 @@ const ( SO_ERROR = 0x4 SO_GET_FILTER = 0x1a SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 SO_KEEPALIVE = 0x9 SO_LINGER = 0xd SO_LOCK_FILTER = 0x2c SO_MARK = 0x24 SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 SO_NOFCS = 0x2b SO_NO_CHECK = 0xb SO_OOBINLINE = 0xa @@ -1634,6 +1663,7 @@ const ( SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b SO_PEERNAME = 0x1c SO_PEERSEC = 0x1f SO_PRIORITY = 0xc @@ -1730,6 +1760,7 @@ const ( TCP_CORK = 0x3 TCP_DEFER_ACCEPT = 0x9 TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e TCP_INFO = 0xb TCP_KEEPCNT = 0x6 TCP_KEEPIDLE = 0x4 @@ -1789,6 +1820,7 @@ const ( TIOCGPKT = 0x80045438 TIOCGPTLCK = 0x80045439 TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 TIOCGRS485 = 0x542e TIOCGSERIAL = 0x541e TIOCGSID = 0x5429 @@ -1872,6 +1904,8 @@ const ( TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1901,6 +1935,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 @@ -2081,7 +2126,6 @@ const ( SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) - SIGUNUSED = syscall.Signal(0x1f) SIGURG = syscall.Signal(0x17) SIGUSR1 = syscall.Signal(0xa) SIGUSR2 = syscall.Signal(0xc) diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 81e83d78fcef..09eedb009353 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -664,6 +664,8 @@ const ( MS_OLDSYNC = 0x0 MS_SYNC = 0x4 M_FLUSH = 0x86 + NAME_MAX = 0xff + NEWDEV = 0x1 NL0 = 0x0 NL1 = 0x100 NLDLY = 0x100 @@ -672,6 +674,9 @@ const ( OFDEL = 0x80 OFILL = 0x40 OLCUC = 0x2 + OLDDEV = 0x0 + ONBITSMAJOR = 0x7 + ONBITSMINOR = 0x8 ONLCR = 0x4 ONLRET = 0x20 ONOCR = 0x10 @@ -1105,6 +1110,7 @@ const ( VEOL = 0x5 VEOL2 = 0x6 VERASE = 0x2 + VERASE2 = 0x11 VINTR = 0x0 VKILL = 0x3 VLNEXT = 0xf diff --git a/vendor/golang.org/x/sys/unix/zptrace386_linux.go b/vendor/golang.org/x/sys/unix/zptrace386_linux.go new file mode 100644 index 000000000000..2d21c49e1265 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zptrace386_linux.go @@ -0,0 +1,80 @@ +// Code generated by linux/mkall.go generatePtracePair(386, amd64). DO NOT EDIT. + +// +build linux +// +build 386 amd64 + +package unix + +import "unsafe" + +// PtraceRegs386 is the registers used by 386 binaries. +type PtraceRegs386 struct { + Ebx int32 + Ecx int32 + Edx int32 + Esi int32 + Edi int32 + Ebp int32 + Eax int32 + Xds int32 + Xes int32 + Xfs int32 + Xgs int32 + Orig_eax int32 + Eip int32 + Xcs int32 + Eflags int32 + Esp int32 + Xss int32 +} + +// PtraceGetRegs386 fetches the registers used by 386 binaries. +func PtraceGetRegs386(pid int, regsout *PtraceRegs386) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegs386 sets the registers used by 386 binaries. +func PtraceSetRegs386(pid int, regs *PtraceRegs386) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} + +// PtraceRegsAmd64 is the registers used by amd64 binaries. +type PtraceRegsAmd64 struct { + R15 uint64 + R14 uint64 + R13 uint64 + R12 uint64 + Rbp uint64 + Rbx uint64 + R11 uint64 + R10 uint64 + R9 uint64 + R8 uint64 + Rax uint64 + Rcx uint64 + Rdx uint64 + Rsi uint64 + Rdi uint64 + Orig_rax uint64 + Rip uint64 + Cs uint64 + Eflags uint64 + Rsp uint64 + Ss uint64 + Fs_base uint64 + Gs_base uint64 + Ds uint64 + Es uint64 + Fs uint64 + Gs uint64 +} + +// PtraceGetRegsAmd64 fetches the registers used by amd64 binaries. +func PtraceGetRegsAmd64(pid int, regsout *PtraceRegsAmd64) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsAmd64 sets the registers used by amd64 binaries. +func PtraceSetRegsAmd64(pid int, regs *PtraceRegsAmd64) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} diff --git a/vendor/golang.org/x/sys/unix/zptracearm_linux.go b/vendor/golang.org/x/sys/unix/zptracearm_linux.go new file mode 100644 index 000000000000..faf23bbed97e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zptracearm_linux.go @@ -0,0 +1,41 @@ +// Code generated by linux/mkall.go generatePtracePair(arm, arm64). DO NOT EDIT. + +// +build linux +// +build arm arm64 + +package unix + +import "unsafe" + +// PtraceRegsArm is the registers used by arm binaries. +type PtraceRegsArm struct { + Uregs [18]uint32 +} + +// PtraceGetRegsArm fetches the registers used by arm binaries. +func PtraceGetRegsArm(pid int, regsout *PtraceRegsArm) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsArm sets the registers used by arm binaries. +func PtraceSetRegsArm(pid int, regs *PtraceRegsArm) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} + +// PtraceRegsArm64 is the registers used by arm64 binaries. +type PtraceRegsArm64 struct { + Regs [31]uint64 + Sp uint64 + Pc uint64 + Pstate uint64 +} + +// PtraceGetRegsArm64 fetches the registers used by arm64 binaries. +func PtraceGetRegsArm64(pid int, regsout *PtraceRegsArm64) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsArm64 sets the registers used by arm64 binaries. +func PtraceSetRegsArm64(pid int, regs *PtraceRegsArm64) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} diff --git a/vendor/golang.org/x/sys/unix/zptracemips_linux.go b/vendor/golang.org/x/sys/unix/zptracemips_linux.go new file mode 100644 index 000000000000..c431131e63cf --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zptracemips_linux.go @@ -0,0 +1,50 @@ +// Code generated by linux/mkall.go generatePtracePair(mips, mips64). DO NOT EDIT. + +// +build linux +// +build mips mips64 + +package unix + +import "unsafe" + +// PtraceRegsMips is the registers used by mips binaries. +type PtraceRegsMips struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +// PtraceGetRegsMips fetches the registers used by mips binaries. +func PtraceGetRegsMips(pid int, regsout *PtraceRegsMips) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsMips sets the registers used by mips binaries. +func PtraceSetRegsMips(pid int, regs *PtraceRegsMips) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} + +// PtraceRegsMips64 is the registers used by mips64 binaries. +type PtraceRegsMips64 struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +// PtraceGetRegsMips64 fetches the registers used by mips64 binaries. +func PtraceGetRegsMips64(pid int, regsout *PtraceRegsMips64) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsMips64 sets the registers used by mips64 binaries. +func PtraceSetRegsMips64(pid int, regs *PtraceRegsMips64) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} diff --git a/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go b/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go new file mode 100644 index 000000000000..dc3d6d37322a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go @@ -0,0 +1,50 @@ +// Code generated by linux/mkall.go generatePtracePair(mipsle, mips64le). DO NOT EDIT. + +// +build linux +// +build mipsle mips64le + +package unix + +import "unsafe" + +// PtraceRegsMipsle is the registers used by mipsle binaries. +type PtraceRegsMipsle struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +// PtraceGetRegsMipsle fetches the registers used by mipsle binaries. +func PtraceGetRegsMipsle(pid int, regsout *PtraceRegsMipsle) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsMipsle sets the registers used by mipsle binaries. +func PtraceSetRegsMipsle(pid int, regs *PtraceRegsMipsle) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} + +// PtraceRegsMips64le is the registers used by mips64le binaries. +type PtraceRegsMips64le struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +// PtraceGetRegsMips64le fetches the registers used by mips64le binaries. +func PtraceGetRegsMips64le(pid int, regsout *PtraceRegsMips64le) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsMips64le sets the registers used by mips64le binaries. +func PtraceSetRegsMips64le(pid int, regs *PtraceRegsMips64le) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index 10491e9ed32d..9fb1b31f4838 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -408,6 +408,17 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 5f1f6bfef717..1e0fb46b01af 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -408,6 +408,17 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index 7a40974594f7..e1026a88a5bc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -408,6 +408,17 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 07c6ebc9f4c9..37fb210a08a1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -408,6 +408,17 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 7fa205cd03b1..75761477d5e5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -266,6 +266,17 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Madvise(b []byte, behav int) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 1a0bb4cb0e2f..8bcecfb9b688 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -266,6 +266,17 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Madvise(b []byte, behav int) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index ac1e8e013645..61c0cf99bb32 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -266,6 +266,17 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Madvise(b []byte, behav int) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 2b4e6acf042e..ffd01073c142 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -266,6 +266,17 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Madvise(b []byte, behav int) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 38c1bbdf96d7..85a2907e5dd6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1446,14 +1457,8 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1462,8 +1467,14 @@ func Munlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1483,14 @@ func Mlockall(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index dc8fe0a84d53..8e2be97d36f8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1446,14 +1457,8 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1462,8 +1467,14 @@ func Munlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1483,14 @@ func Mlockall(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 4d2804278503..5ff0637fde4d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1446,14 +1457,8 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1462,8 +1467,14 @@ func Munlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1483,14 @@ func Mlockall(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 20ad4b6c9bc0..40760110f35c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1446,14 +1457,8 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1462,8 +1467,14 @@ func Munlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1483,14 @@ func Mlockall(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1667,17 +1678,6 @@ func Seek(fd int, offset int64, whence int) (off int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) written = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 9f194dc4adfa..984e56173381 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1446,14 +1457,8 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1462,8 +1467,14 @@ func Munlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1483,14 @@ func Mlockall(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 4fde3ef08a7c..f98194e24599 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1446,14 +1457,8 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1462,8 +1467,14 @@ func Munlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1483,14 @@ func Mlockall(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1677,17 +1688,6 @@ func Seek(fd int, offset int64, whence int) (off int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) written = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index f6463423c454..f30267019bfe 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1446,14 +1457,8 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1462,8 +1467,14 @@ func Munlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1483,14 @@ func Mlockall(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1677,17 +1688,6 @@ func Seek(fd int, offset int64, whence int) (off int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) written = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 964591e5e8c8..f18c5e4a76eb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1446,14 +1457,8 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1462,8 +1467,14 @@ func Munlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1483,14 @@ func Mlockall(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 204ab1ae3811..bc268243cf1c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1446,14 +1457,8 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1462,8 +1467,14 @@ func Munlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1483,14 @@ func Mlockall(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1734,7 +1745,7 @@ func Seek(fd int, offset int64, whence int) (off int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index a8a2b0b0a359..8d874cbcdcc5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1446,14 +1457,8 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1462,8 +1467,14 @@ func Munlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1483,14 @@ func Mlockall(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1734,7 +1745,7 @@ func Seek(fd int, offset int64, whence int) (off int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index b6ff9e39238b..169321273d2d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1446,14 +1457,8 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1462,8 +1467,14 @@ func Munlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } @@ -1472,14 +1483,14 @@ func Mlockall(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Msync(b []byte, flags int) (err error) { +func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index db99fd0c992d..cfdea854ff50 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -266,6 +266,17 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Madvise(b []byte, behav int) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 7b6c2c87e6ed..244a3c761858 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -266,6 +266,17 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Madvise(b []byte, behav int) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 0f4cc3b5284b..e891adc3a86b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -266,6 +266,17 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Madvise(b []byte, behav int) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 7baea87c7b0d..f48beb0917b2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -266,6 +266,17 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Madvise(b []byte, behav int) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 0d69ce6b5217..44a3faf77fa5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -266,6 +266,17 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Madvise(b []byte, behav int) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 41572c26e419..1563752dd5e8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -266,6 +266,17 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Madvise(b []byte, behav int) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 4287133d0754..1d452764987d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -25,7 +25,11 @@ import ( //go:cgo_import_dynamic libc___xnet_recvmsg __xnet_recvmsg "libsocket.so" //go:cgo_import_dynamic libc___xnet_sendmsg __xnet_sendmsg "libsocket.so" //go:cgo_import_dynamic libc_acct acct "libc.so" +//go:cgo_import_dynamic libc___makedev __makedev "libc.so" +//go:cgo_import_dynamic libc___major __major "libc.so" +//go:cgo_import_dynamic libc___minor __minor "libc.so" //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" +//go:cgo_import_dynamic libc_poll poll "libc.so" //go:cgo_import_dynamic libc_access access "libc.so" //go:cgo_import_dynamic libc_adjtime adjtime "libc.so" //go:cgo_import_dynamic libc_chdir chdir "libc.so" @@ -75,6 +79,7 @@ import ( //go:cgo_import_dynamic libc_mlock mlock "libc.so" //go:cgo_import_dynamic libc_mlockall mlockall "libc.so" //go:cgo_import_dynamic libc_mprotect mprotect "libc.so" +//go:cgo_import_dynamic libc_msync msync "libc.so" //go:cgo_import_dynamic libc_munlock munlock "libc.so" //go:cgo_import_dynamic libc_munlockall munlockall "libc.so" //go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" @@ -129,7 +134,6 @@ import ( //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" -//go:cgo_import_dynamic libc_sysconf sysconf "libc.so" //go:linkname procpipe libc_pipe //go:linkname procgetsockname libc_getsockname @@ -146,7 +150,11 @@ import ( //go:linkname proc__xnet_recvmsg libc___xnet_recvmsg //go:linkname proc__xnet_sendmsg libc___xnet_sendmsg //go:linkname procacct libc_acct +//go:linkname proc__makedev libc___makedev +//go:linkname proc__major libc___major +//go:linkname proc__minor libc___minor //go:linkname procioctl libc_ioctl +//go:linkname procpoll libc_poll //go:linkname procAccess libc_access //go:linkname procAdjtime libc_adjtime //go:linkname procChdir libc_chdir @@ -196,6 +204,7 @@ import ( //go:linkname procMlock libc_mlock //go:linkname procMlockall libc_mlockall //go:linkname procMprotect libc_mprotect +//go:linkname procMsync libc_msync //go:linkname procMunlock libc_munlock //go:linkname procMunlockall libc_munlockall //go:linkname procNanosleep libc_nanosleep @@ -250,7 +259,6 @@ import ( //go:linkname procgetpeername libc_getpeername //go:linkname procsetsockopt libc_setsockopt //go:linkname procrecvfrom libc_recvfrom -//go:linkname procsysconf libc_sysconf var ( procpipe, @@ -268,7 +276,11 @@ var ( proc__xnet_recvmsg, proc__xnet_sendmsg, procacct, + proc__makedev, + proc__major, + proc__minor, procioctl, + procpoll, procAccess, procAdjtime, procChdir, @@ -318,6 +330,7 @@ var ( procMlock, procMlockall, procMprotect, + procMsync, procMunlock, procMunlockall, procNanosleep, @@ -371,8 +384,7 @@ var ( proc__xnet_getsockopt, procgetpeername, procsetsockopt, - procrecvfrom, - procsysconf syscallFunc + procrecvfrom syscallFunc ) func pipe(p *[2]_C_int) (n int, err error) { @@ -522,6 +534,24 @@ func acct(path *byte) (err error) { return } +func __makedev(version int, major uint, minor uint) (val uint64) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__makedev)), 3, uintptr(version), uintptr(major), uintptr(minor), 0, 0, 0) + val = uint64(r0) + return +} + +func __major(version int, dev uint64) (val uint) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__major)), 2, uintptr(version), uintptr(dev), 0, 0, 0, 0) + val = uint(r0) + return +} + +func __minor(version int, dev uint64) (val uint) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__minor)), 2, uintptr(version), uintptr(dev), 0, 0, 0, 0) + val = uint(r0) + return +} + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) if e1 != 0 { @@ -530,6 +560,15 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpoll)), 3, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1020,6 +1059,18 @@ func Mprotect(b []byte, prot int) (err error) { return } +func Msync(b []byte, flags int) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMsync)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(flags), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + func Munlock(b []byte) (err error) { var _p0 *byte if len(b) > 0 { @@ -1589,12 +1640,3 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } return } - -func sysconf(name int) (n int64, err error) { - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsysconf)), 1, uintptr(name), 0, 0, 0, 0, 0) - n = int64(r0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysctl_openbsd.go rename to vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go new file mode 100644 index 000000000000..83bb935b91c5 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -0,0 +1,270 @@ +// mksysctl_openbsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry{ + {"ddb.console", []_C_int{9, 6}}, + {"ddb.log", []_C_int{9, 7}}, + {"ddb.max_line", []_C_int{9, 3}}, + {"ddb.max_width", []_C_int{9, 2}}, + {"ddb.panic", []_C_int{9, 5}}, + {"ddb.radix", []_C_int{9, 1}}, + {"ddb.tab_stop_width", []_C_int{9, 4}}, + {"ddb.trigger", []_C_int{9, 8}}, + {"fs.posix.setuid", []_C_int{3, 1, 1}}, + {"hw.allowpowerdown", []_C_int{6, 22}}, + {"hw.byteorder", []_C_int{6, 4}}, + {"hw.cpuspeed", []_C_int{6, 12}}, + {"hw.diskcount", []_C_int{6, 10}}, + {"hw.disknames", []_C_int{6, 8}}, + {"hw.diskstats", []_C_int{6, 9}}, + {"hw.machine", []_C_int{6, 1}}, + {"hw.model", []_C_int{6, 2}}, + {"hw.ncpu", []_C_int{6, 3}}, + {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.pagesize", []_C_int{6, 7}}, + {"hw.physmem", []_C_int{6, 19}}, + {"hw.product", []_C_int{6, 15}}, + {"hw.serialno", []_C_int{6, 17}}, + {"hw.setperf", []_C_int{6, 13}}, + {"hw.usermem", []_C_int{6, 20}}, + {"hw.uuid", []_C_int{6, 18}}, + {"hw.vendor", []_C_int{6, 14}}, + {"hw.version", []_C_int{6, 16}}, + {"kern.arandom", []_C_int{1, 37}}, + {"kern.argmax", []_C_int{1, 8}}, + {"kern.boottime", []_C_int{1, 21}}, + {"kern.bufcachepercent", []_C_int{1, 72}}, + {"kern.ccpu", []_C_int{1, 45}}, + {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consdev", []_C_int{1, 75}}, + {"kern.cp_time", []_C_int{1, 40}}, + {"kern.cp_time2", []_C_int{1, 71}}, + {"kern.cryptodevallowsoft", []_C_int{1, 53}}, + {"kern.domainname", []_C_int{1, 22}}, + {"kern.file", []_C_int{1, 73}}, + {"kern.forkstat", []_C_int{1, 42}}, + {"kern.fscale", []_C_int{1, 46}}, + {"kern.fsync", []_C_int{1, 33}}, + {"kern.hostid", []_C_int{1, 11}}, + {"kern.hostname", []_C_int{1, 10}}, + {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, + {"kern.job_control", []_C_int{1, 19}}, + {"kern.malloc.buckets", []_C_int{1, 39, 1}}, + {"kern.malloc.kmemnames", []_C_int{1, 39, 3}}, + {"kern.maxclusters", []_C_int{1, 67}}, + {"kern.maxfiles", []_C_int{1, 7}}, + {"kern.maxlocksperuid", []_C_int{1, 70}}, + {"kern.maxpartitions", []_C_int{1, 23}}, + {"kern.maxproc", []_C_int{1, 6}}, + {"kern.maxthread", []_C_int{1, 25}}, + {"kern.maxvnodes", []_C_int{1, 5}}, + {"kern.mbstat", []_C_int{1, 59}}, + {"kern.msgbuf", []_C_int{1, 48}}, + {"kern.msgbufsize", []_C_int{1, 38}}, + {"kern.nchstats", []_C_int{1, 41}}, + {"kern.netlivelocks", []_C_int{1, 76}}, + {"kern.nfiles", []_C_int{1, 56}}, + {"kern.ngroups", []_C_int{1, 18}}, + {"kern.nosuidcoredump", []_C_int{1, 32}}, + {"kern.nprocs", []_C_int{1, 47}}, + {"kern.nselcoll", []_C_int{1, 43}}, + {"kern.nthreads", []_C_int{1, 26}}, + {"kern.numvnodes", []_C_int{1, 58}}, + {"kern.osrelease", []_C_int{1, 2}}, + {"kern.osrevision", []_C_int{1, 3}}, + {"kern.ostype", []_C_int{1, 1}}, + {"kern.osversion", []_C_int{1, 27}}, + {"kern.pool_debug", []_C_int{1, 77}}, + {"kern.posix1version", []_C_int{1, 17}}, + {"kern.proc", []_C_int{1, 66}}, + {"kern.random", []_C_int{1, 31}}, + {"kern.rawpartition", []_C_int{1, 24}}, + {"kern.saved_ids", []_C_int{1, 20}}, + {"kern.securelevel", []_C_int{1, 9}}, + {"kern.seminfo", []_C_int{1, 61}}, + {"kern.shminfo", []_C_int{1, 62}}, + {"kern.somaxconn", []_C_int{1, 28}}, + {"kern.sominconn", []_C_int{1, 29}}, + {"kern.splassert", []_C_int{1, 54}}, + {"kern.stackgap_random", []_C_int{1, 50}}, + {"kern.sysvipc_info", []_C_int{1, 51}}, + {"kern.sysvmsg", []_C_int{1, 34}}, + {"kern.sysvsem", []_C_int{1, 35}}, + {"kern.sysvshm", []_C_int{1, 36}}, + {"kern.timecounter.choice", []_C_int{1, 69, 4}}, + {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, + {"kern.timecounter.tick", []_C_int{1, 69, 1}}, + {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.tty.maxptys", []_C_int{1, 44, 6}}, + {"kern.tty.nptys", []_C_int{1, 44, 7}}, + {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, + {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, + {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, + {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, + {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, + {"kern.ttycount", []_C_int{1, 57}}, + {"kern.userasymcrypto", []_C_int{1, 60}}, + {"kern.usercrypto", []_C_int{1, 52}}, + {"kern.usermount", []_C_int{1, 30}}, + {"kern.version", []_C_int{1, 4}}, + {"kern.vnode", []_C_int{1, 13}}, + {"kern.watchdog.auto", []_C_int{1, 64, 2}}, + {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"net.bpf.bufsize", []_C_int{4, 31, 1}}, + {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, + {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, + {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}}, + {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}}, + {"net.inet.carp.log", []_C_int{4, 2, 112, 3}}, + {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}}, + {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}}, + {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}}, + {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}}, + {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}}, + {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}}, + {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}}, + {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}}, + {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}}, + {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}}, + {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}}, + {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}}, + {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}}, + {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}}, + {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}}, + {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}}, + {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}}, + {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}}, + {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, + {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, + {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, + {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, + {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, + {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}}, + {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}}, + {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, + {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, + {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, + {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, + {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, + {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, + {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}}, + {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}}, + {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}}, + {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}}, + {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}}, + {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}}, + {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}}, + {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}}, + {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}}, + {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}}, + {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, + {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, + {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, + {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, + {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, + {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}}, + {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, + {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, + {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, + {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}}, + {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}}, + {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}}, + {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}}, + {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}}, + {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}}, + {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}}, + {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, + {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, + {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, + {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, + {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, + {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}}, + {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, + {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, + {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, + {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, + {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, + {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, + {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, + {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}}, + {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}}, + {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}}, + {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}}, + {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}}, + {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}}, + {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, + {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, + {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, + {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}}, + {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, + {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}}, + {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}}, + {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}}, + {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, + {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}}, + {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, + {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, + {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, + {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}}, + {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}}, + {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}}, + {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}}, + {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}}, + {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}}, + {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, + {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, + {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, + {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}}, + {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}}, + {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, + {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, + {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, + {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, + {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, + {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, + {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}}, + {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, + {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, + {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, + {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}}, + {"net.key.sadb_dump", []_C_int{4, 30, 1}}, + {"net.key.spd_dump", []_C_int{4, 30, 2}}, + {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, + {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}}, + {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}}, + {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, + {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, + {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, + {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, + {"net.mpls.ttl", []_C_int{4, 33, 2}}, + {"net.pflow.stats", []_C_int{4, 34, 1}}, + {"net.pipex.enable", []_C_int{4, 35, 1}}, + {"vm.anonmin", []_C_int{2, 7}}, + {"vm.loadavg", []_C_int{2, 2}}, + {"vm.maxslp", []_C_int{2, 10}}, + {"vm.nkmempages", []_C_int{2, 6}}, + {"vm.psstrings", []_C_int{2, 3}}, + {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, + {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, + {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, + {"vm.uspace", []_C_int{2, 11}}, + {"vm.uvmexp", []_C_int{2, 4}}, + {"vm.vmmeter", []_C_int{2, 1}}, + {"vm.vnodemin", []_C_int{2, 9}}, + {"vm.vtextmin", []_C_int{2, 8}}, +} diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go new file mode 100644 index 000000000000..83bb935b91c5 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -0,0 +1,270 @@ +// mksysctl_openbsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry{ + {"ddb.console", []_C_int{9, 6}}, + {"ddb.log", []_C_int{9, 7}}, + {"ddb.max_line", []_C_int{9, 3}}, + {"ddb.max_width", []_C_int{9, 2}}, + {"ddb.panic", []_C_int{9, 5}}, + {"ddb.radix", []_C_int{9, 1}}, + {"ddb.tab_stop_width", []_C_int{9, 4}}, + {"ddb.trigger", []_C_int{9, 8}}, + {"fs.posix.setuid", []_C_int{3, 1, 1}}, + {"hw.allowpowerdown", []_C_int{6, 22}}, + {"hw.byteorder", []_C_int{6, 4}}, + {"hw.cpuspeed", []_C_int{6, 12}}, + {"hw.diskcount", []_C_int{6, 10}}, + {"hw.disknames", []_C_int{6, 8}}, + {"hw.diskstats", []_C_int{6, 9}}, + {"hw.machine", []_C_int{6, 1}}, + {"hw.model", []_C_int{6, 2}}, + {"hw.ncpu", []_C_int{6, 3}}, + {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.pagesize", []_C_int{6, 7}}, + {"hw.physmem", []_C_int{6, 19}}, + {"hw.product", []_C_int{6, 15}}, + {"hw.serialno", []_C_int{6, 17}}, + {"hw.setperf", []_C_int{6, 13}}, + {"hw.usermem", []_C_int{6, 20}}, + {"hw.uuid", []_C_int{6, 18}}, + {"hw.vendor", []_C_int{6, 14}}, + {"hw.version", []_C_int{6, 16}}, + {"kern.arandom", []_C_int{1, 37}}, + {"kern.argmax", []_C_int{1, 8}}, + {"kern.boottime", []_C_int{1, 21}}, + {"kern.bufcachepercent", []_C_int{1, 72}}, + {"kern.ccpu", []_C_int{1, 45}}, + {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consdev", []_C_int{1, 75}}, + {"kern.cp_time", []_C_int{1, 40}}, + {"kern.cp_time2", []_C_int{1, 71}}, + {"kern.cryptodevallowsoft", []_C_int{1, 53}}, + {"kern.domainname", []_C_int{1, 22}}, + {"kern.file", []_C_int{1, 73}}, + {"kern.forkstat", []_C_int{1, 42}}, + {"kern.fscale", []_C_int{1, 46}}, + {"kern.fsync", []_C_int{1, 33}}, + {"kern.hostid", []_C_int{1, 11}}, + {"kern.hostname", []_C_int{1, 10}}, + {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, + {"kern.job_control", []_C_int{1, 19}}, + {"kern.malloc.buckets", []_C_int{1, 39, 1}}, + {"kern.malloc.kmemnames", []_C_int{1, 39, 3}}, + {"kern.maxclusters", []_C_int{1, 67}}, + {"kern.maxfiles", []_C_int{1, 7}}, + {"kern.maxlocksperuid", []_C_int{1, 70}}, + {"kern.maxpartitions", []_C_int{1, 23}}, + {"kern.maxproc", []_C_int{1, 6}}, + {"kern.maxthread", []_C_int{1, 25}}, + {"kern.maxvnodes", []_C_int{1, 5}}, + {"kern.mbstat", []_C_int{1, 59}}, + {"kern.msgbuf", []_C_int{1, 48}}, + {"kern.msgbufsize", []_C_int{1, 38}}, + {"kern.nchstats", []_C_int{1, 41}}, + {"kern.netlivelocks", []_C_int{1, 76}}, + {"kern.nfiles", []_C_int{1, 56}}, + {"kern.ngroups", []_C_int{1, 18}}, + {"kern.nosuidcoredump", []_C_int{1, 32}}, + {"kern.nprocs", []_C_int{1, 47}}, + {"kern.nselcoll", []_C_int{1, 43}}, + {"kern.nthreads", []_C_int{1, 26}}, + {"kern.numvnodes", []_C_int{1, 58}}, + {"kern.osrelease", []_C_int{1, 2}}, + {"kern.osrevision", []_C_int{1, 3}}, + {"kern.ostype", []_C_int{1, 1}}, + {"kern.osversion", []_C_int{1, 27}}, + {"kern.pool_debug", []_C_int{1, 77}}, + {"kern.posix1version", []_C_int{1, 17}}, + {"kern.proc", []_C_int{1, 66}}, + {"kern.random", []_C_int{1, 31}}, + {"kern.rawpartition", []_C_int{1, 24}}, + {"kern.saved_ids", []_C_int{1, 20}}, + {"kern.securelevel", []_C_int{1, 9}}, + {"kern.seminfo", []_C_int{1, 61}}, + {"kern.shminfo", []_C_int{1, 62}}, + {"kern.somaxconn", []_C_int{1, 28}}, + {"kern.sominconn", []_C_int{1, 29}}, + {"kern.splassert", []_C_int{1, 54}}, + {"kern.stackgap_random", []_C_int{1, 50}}, + {"kern.sysvipc_info", []_C_int{1, 51}}, + {"kern.sysvmsg", []_C_int{1, 34}}, + {"kern.sysvsem", []_C_int{1, 35}}, + {"kern.sysvshm", []_C_int{1, 36}}, + {"kern.timecounter.choice", []_C_int{1, 69, 4}}, + {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, + {"kern.timecounter.tick", []_C_int{1, 69, 1}}, + {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.tty.maxptys", []_C_int{1, 44, 6}}, + {"kern.tty.nptys", []_C_int{1, 44, 7}}, + {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, + {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, + {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, + {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, + {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, + {"kern.ttycount", []_C_int{1, 57}}, + {"kern.userasymcrypto", []_C_int{1, 60}}, + {"kern.usercrypto", []_C_int{1, 52}}, + {"kern.usermount", []_C_int{1, 30}}, + {"kern.version", []_C_int{1, 4}}, + {"kern.vnode", []_C_int{1, 13}}, + {"kern.watchdog.auto", []_C_int{1, 64, 2}}, + {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"net.bpf.bufsize", []_C_int{4, 31, 1}}, + {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, + {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, + {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}}, + {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}}, + {"net.inet.carp.log", []_C_int{4, 2, 112, 3}}, + {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}}, + {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}}, + {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}}, + {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}}, + {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}}, + {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}}, + {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}}, + {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}}, + {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}}, + {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}}, + {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}}, + {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}}, + {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}}, + {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}}, + {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}}, + {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}}, + {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}}, + {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}}, + {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, + {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, + {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, + {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, + {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, + {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}}, + {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}}, + {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, + {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, + {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, + {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, + {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, + {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, + {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}}, + {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}}, + {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}}, + {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}}, + {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}}, + {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}}, + {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}}, + {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}}, + {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}}, + {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}}, + {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, + {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, + {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, + {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, + {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, + {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}}, + {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, + {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, + {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, + {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}}, + {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}}, + {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}}, + {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}}, + {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}}, + {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}}, + {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}}, + {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, + {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, + {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, + {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, + {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, + {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}}, + {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, + {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, + {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, + {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, + {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, + {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, + {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, + {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}}, + {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}}, + {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}}, + {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}}, + {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}}, + {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}}, + {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, + {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, + {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, + {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}}, + {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, + {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}}, + {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}}, + {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}}, + {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, + {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}}, + {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, + {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, + {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, + {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}}, + {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}}, + {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}}, + {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}}, + {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}}, + {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}}, + {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, + {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, + {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, + {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}}, + {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}}, + {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, + {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, + {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, + {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, + {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, + {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, + {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}}, + {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, + {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, + {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, + {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}}, + {"net.key.sadb_dump", []_C_int{4, 30, 1}}, + {"net.key.spd_dump", []_C_int{4, 30, 2}}, + {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, + {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}}, + {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}}, + {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, + {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, + {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, + {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, + {"net.mpls.ttl", []_C_int{4, 33, 2}}, + {"net.pflow.stats", []_C_int{4, 34, 1}}, + {"net.pipex.enable", []_C_int{4, 35, 1}}, + {"vm.anonmin", []_C_int{2, 7}}, + {"vm.loadavg", []_C_int{2, 2}}, + {"vm.maxslp", []_C_int{2, 10}}, + {"vm.nkmempages", []_C_int{2, 6}}, + {"vm.psstrings", []_C_int{2, 3}}, + {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, + {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, + {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, + {"vm.uspace", []_C_int{2, 11}}, + {"vm.uvmexp", []_C_int{2, 4}}, + {"vm.vmmeter", []_C_int{2, 1}}, + {"vm.vnodemin", []_C_int{2, 9}}, + {"vm.vtextmin", []_C_int{2, 8}}, +} diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index cef4fed02c57..95ab12903ec5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -385,4 +385,6 @@ const ( SYS_PKEY_MPROTECT = 380 SYS_PKEY_ALLOC = 381 SYS_PKEY_FREE = 382 + SYS_STATX = 383 + SYS_ARCH_PRCTL = 384 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 49bfa1270a88..c5dabf2e4518 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -338,4 +338,5 @@ const ( SYS_PKEY_MPROTECT = 329 SYS_PKEY_ALLOC = 330 SYS_PKEY_FREE = 331 + SYS_STATX = 332 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 97b182ef5b0e..ab7fa5fd3939 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -358,4 +358,5 @@ const ( SYS_PKEY_MPROTECT = 394 SYS_PKEY_ALLOC = 395 SYS_PKEY_FREE = 396 + SYS_STATX = 397 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 640784357fef..b1c6b4bd3b39 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -282,4 +282,5 @@ const ( SYS_PKEY_MPROTECT = 288 SYS_PKEY_ALLOC = 289 SYS_PKEY_FREE = 290 + SYS_STATX = 291 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 939567c0997f..2e9aa7a3e7ec 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -371,4 +371,5 @@ const ( SYS_PKEY_MPROTECT = 4363 SYS_PKEY_ALLOC = 4364 SYS_PKEY_FREE = 4365 + SYS_STATX = 4366 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 09db959690a0..92827635aae4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -331,4 +331,5 @@ const ( SYS_PKEY_MPROTECT = 5323 SYS_PKEY_ALLOC = 5324 SYS_PKEY_FREE = 5325 + SYS_STATX = 5326 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index d1b872a09bd4..45bd3fd6c84b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -331,4 +331,5 @@ const ( SYS_PKEY_MPROTECT = 5323 SYS_PKEY_ALLOC = 5324 SYS_PKEY_FREE = 5325 + SYS_STATX = 5326 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 82ba20f28b4e..62ccac4b7421 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -371,4 +371,5 @@ const ( SYS_PKEY_MPROTECT = 4363 SYS_PKEY_ALLOC = 4364 SYS_PKEY_FREE = 4365 + SYS_STATX = 4366 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 8944448aee20..dfe5dab67eee 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -366,4 +366,5 @@ const ( SYS_PREADV2 = 380 SYS_PWRITEV2 = 381 SYS_KEXEC_FILE_LOAD = 382 + SYS_STATX = 383 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 90a039be4fdb..eca97f738b73 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -366,4 +366,5 @@ const ( SYS_PREADV2 = 380 SYS_PWRITEV2 = 381 SYS_KEXEC_FILE_LOAD = 382 + SYS_STATX = 383 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index aab0cdb1838b..8ea18e6c254e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -306,6 +306,8 @@ const ( SYS_COPY_FILE_RANGE = 375 SYS_PREADV2 = 376 SYS_PWRITEV2 = 377 + SYS_S390_GUARDED_STORAGE = 378 + SYS_STATX = 379 SYS_SELECT = 142 SYS_GETRLIMIT = 191 SYS_LCHOWN = 198 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go index e61d78a54ff5..4667c7b277d1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go @@ -460,3 +460,22 @@ const ( AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 ) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 2619155ff8c5..3f33b18fc740 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -470,3 +470,22 @@ const ( AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 ) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go index 4dca0d4db265..463a28ba6f39 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go @@ -461,3 +461,22 @@ const ( AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 ) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index f2881fd14277..1ec20a0025c5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -1,6 +1,7 @@ +// cgo -godefs types_darwin.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + // +build arm64,darwin -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_darwin.go package unix @@ -469,3 +470,22 @@ const ( AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 ) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index 67c6bf883cbf..ab515c3e1a43 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -446,3 +446,22 @@ const ( AT_FDCWD = 0xfffafdcd AT_SYMLINK_NOFOLLOW = 0x1 ) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 5b28bcbbac9e..18f7816009c1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -516,6 +516,26 @@ const ( AT_SYMLINK_NOFOLLOW = 0x200 ) +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLINIGNEOF = 0x2000 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + type CapRights struct { Rights [2]uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index c65d89e4972a..dd0db2a5eab6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -519,6 +519,26 @@ const ( AT_SYMLINK_NOFOLLOW = 0x200 ) +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLINIGNEOF = 0x2000 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + type CapRights struct { Rights [2]uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 42c0a502cf8f..473d3dcf0846 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -519,6 +519,26 @@ const ( AT_SYMLINK_NOFOLLOW = 0x200 ) +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLINIGNEOF = 0x2000 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + type CapRights struct { Rights [2]uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 7b36896ea3d7..c6de94269d79 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -425,7 +425,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2b + IFLA_MAX = 0x2c RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -621,12 +621,12 @@ type Sysinfo_t struct { } type Utsname struct { - Sysname [65]int8 - Nodename [65]int8 - Release [65]int8 - Version [65]int8 - Machine [65]int8 - Domainname [65]int8 + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte } type Ustat_t struct { @@ -673,8 +673,6 @@ const RNDGETENTCNT = 0x80045200 const PERF_IOC_FLAG_GROUP = 0x1 -const _SC_PAGESIZE = 0x1e - type Termios struct { Iflag uint32 Oflag uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index e54fa9847d84..4ea42dfc2ea7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -429,7 +429,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2b + IFLA_MAX = 0x2c RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -637,12 +637,12 @@ type Sysinfo_t struct { } type Utsname struct { - Sysname [65]int8 - Nodename [65]int8 - Release [65]int8 - Version [65]int8 - Machine [65]int8 - Domainname [65]int8 + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte } type Ustat_t struct { @@ -691,8 +691,6 @@ const RNDGETENTCNT = 0x80045200 const PERF_IOC_FLAG_GROUP = 0x1 -const _SC_PAGESIZE = 0x1e - type Termios struct { Iflag uint32 Oflag uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index bff6ce25805b..f86d68388224 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -429,7 +429,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2b + IFLA_MAX = 0x2c RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -609,12 +609,12 @@ type Sysinfo_t struct { } type Utsname struct { - Sysname [65]uint8 - Nodename [65]uint8 - Release [65]uint8 - Version [65]uint8 - Machine [65]uint8 - Domainname [65]uint8 + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte } type Ustat_t struct { @@ -662,8 +662,6 @@ const RNDGETENTCNT = 0x80045200 const PERF_IOC_FLAG_GROUP = 0x1 -const _SC_PAGESIZE = 0x1e - type Termios struct { Iflag uint32 Oflag uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index a3d0cc4a1568..45c10b74295c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -430,7 +430,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2b + IFLA_MAX = 0x2c RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -615,12 +615,12 @@ type Sysinfo_t struct { } type Utsname struct { - Sysname [65]int8 - Nodename [65]int8 - Release [65]int8 - Version [65]int8 - Machine [65]int8 - Domainname [65]int8 + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte } type Ustat_t struct { @@ -670,8 +670,6 @@ const RNDGETENTCNT = 0x80045200 const PERF_IOC_FLAG_GROUP = 0x1 -const _SC_PAGESIZE = 0x1e - type Termios struct { Iflag uint32 Oflag uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 8fa6603fb8ed..4cc0a1c91f7d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -428,7 +428,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2b + IFLA_MAX = 0x2c RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -614,12 +614,12 @@ type Sysinfo_t struct { } type Utsname struct { - Sysname [65]int8 - Nodename [65]int8 - Release [65]int8 - Version [65]int8 - Machine [65]int8 - Domainname [65]int8 + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte } type Ustat_t struct { @@ -667,8 +667,6 @@ const RNDGETENTCNT = 0x40045200 const PERF_IOC_FLAG_GROUP = 0x1 -const _SC_PAGESIZE = 0x1e - type Termios struct { Iflag uint32 Oflag uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 3e5fc62524e2..d9df08789f93 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -430,7 +430,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2b + IFLA_MAX = 0x2c RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -618,12 +618,12 @@ type Sysinfo_t struct { } type Utsname struct { - Sysname [65]int8 - Nodename [65]int8 - Release [65]int8 - Version [65]int8 - Machine [65]int8 - Domainname [65]int8 + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte } type Ustat_t struct { @@ -672,8 +672,6 @@ const RNDGETENTCNT = 0x40045200 const PERF_IOC_FLAG_GROUP = 0x1 -const _SC_PAGESIZE = 0x1e - type Termios struct { Iflag uint32 Oflag uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index f9bd1ab0cf89..15e6b4b4b1bc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -430,7 +430,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2b + IFLA_MAX = 0x2c RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -618,12 +618,12 @@ type Sysinfo_t struct { } type Utsname struct { - Sysname [65]int8 - Nodename [65]int8 - Release [65]int8 - Version [65]int8 - Machine [65]int8 - Domainname [65]int8 + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte } type Ustat_t struct { @@ -672,8 +672,6 @@ const RNDGETENTCNT = 0x40045200 const PERF_IOC_FLAG_GROUP = 0x1 -const _SC_PAGESIZE = 0x1e - type Termios struct { Iflag uint32 Oflag uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 74c542132701..b6c2d32dd839 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -428,7 +428,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2b + IFLA_MAX = 0x2c RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -614,12 +614,12 @@ type Sysinfo_t struct { } type Utsname struct { - Sysname [65]int8 - Nodename [65]int8 - Release [65]int8 - Version [65]int8 - Machine [65]int8 - Domainname [65]int8 + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte } type Ustat_t struct { @@ -667,8 +667,6 @@ const RNDGETENTCNT = 0x40045200 const PERF_IOC_FLAG_GROUP = 0x1 -const _SC_PAGESIZE = 0x1e - type Termios struct { Iflag uint32 Oflag uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 2b0b18e94b76..3803e1062bac 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -431,7 +431,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2b + IFLA_MAX = 0x2c RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -625,12 +625,12 @@ type Sysinfo_t struct { } type Utsname struct { - Sysname [65]uint8 - Nodename [65]uint8 - Release [65]uint8 - Version [65]uint8 - Machine [65]uint8 - Domainname [65]uint8 + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte } type Ustat_t struct { @@ -680,8 +680,6 @@ const RNDGETENTCNT = 0x40045200 const PERF_IOC_FLAG_GROUP = 0x1 -const _SC_PAGESIZE = 0x1e - type Termios struct { Iflag uint32 Oflag uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index b2b59992d41b..7ef31fe21304 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -431,7 +431,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2b + IFLA_MAX = 0x2c RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -625,12 +625,12 @@ type Sysinfo_t struct { } type Utsname struct { - Sysname [65]uint8 - Nodename [65]uint8 - Release [65]uint8 - Version [65]uint8 - Machine [65]uint8 - Domainname [65]uint8 + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte } type Ustat_t struct { @@ -680,8 +680,6 @@ const RNDGETENTCNT = 0x40045200 const PERF_IOC_FLAG_GROUP = 0x1 -const _SC_PAGESIZE = 0x1e - type Termios struct { Iflag uint32 Oflag uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 5e0aa6636cba..cb194f47178a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -430,7 +430,7 @@ const ( IFLA_LINKINFO = 0x12 IFLA_NET_NS_PID = 0x13 IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2b + IFLA_MAX = 0x2c RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -642,12 +642,12 @@ type Sysinfo_t struct { } type Utsname struct { - Sysname [65]int8 - Nodename [65]int8 - Release [65]int8 - Version [65]int8 - Machine [65]int8 - Domainname [65]int8 + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte } type Ustat_t struct { @@ -697,8 +697,6 @@ const RNDGETENTCNT = 0x80045200 const PERF_IOC_FLAG_GROUP = 0x1 -const _SC_PAGESIZE = 0x1e - type Termios struct { Iflag uint32 Oflag uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 22bdab961453..9dbbb1ce5255 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -601,12 +601,12 @@ type Sysinfo_t struct { } type Utsname struct { - Sysname [65]int8 - Nodename [65]int8 - Release [65]int8 - Version [65]int8 - Machine [65]int8 - Domainname [65]int8 + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte } type Ustat_t struct { @@ -652,8 +652,6 @@ type Sigset_t struct { X__val [16]uint64 } -const _SC_PAGESIZE = 0x1e - type Termios struct { Iflag uint32 Oflag uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 42f99c0a3096..dfe446bff7d7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -1,5 +1,5 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_netbsd.go +// cgo -godefs types_netbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. // +build 386,netbsd @@ -387,6 +387,25 @@ const ( AT_SYMLINK_NOFOLLOW = 0x200 ) +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + type Sysctlnode struct { Flags uint32 Num int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index ff290ba06974..1498c23c228b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -1,5 +1,5 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_netbsd.go +// cgo -godefs types_netbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. // +build amd64,netbsd @@ -394,6 +394,25 @@ const ( AT_SYMLINK_NOFOLLOW = 0x200 ) +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + type Sysctlnode struct { Flags uint32 Num int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 66dbd7c0502f..d6711ce17050 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -1,5 +1,5 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_netbsd.go +// cgo -godefs types_netbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. // +build arm,netbsd @@ -392,6 +392,25 @@ const ( AT_SYMLINK_NOFOLLOW = 0x200 ) +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + type Sysctlnode struct { Flags uint32 Num int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 20fc9f450cb5..af295c3d0c92 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -1,5 +1,5 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_openbsd.go +// cgo -godefs types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. // +build 386,openbsd @@ -444,3 +444,22 @@ const ( AT_FDCWD = -0x64 AT_SYMLINK_NOFOLLOW = 0x2 ) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 46fe9490c8fd..ae153e70c015 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -1,5 +1,5 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_openbsd.go +// cgo -godefs types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. // +build amd64,openbsd @@ -451,3 +451,22 @@ const ( AT_FDCWD = -0x64 AT_SYMLINK_NOFOLLOW = 0x2 ) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index 62e1f7c04d14..35bb6195bf55 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -1,5 +1,5 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_openbsd.go +// cgo -godefs types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. // +build arm,openbsd @@ -437,3 +437,22 @@ const ( AT_FDCWD = -0x64 AT_SYMLINK_NOFOLLOW = 0x2 ) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 92336f9f9236..d4454524867a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -263,11 +263,11 @@ type FdSet struct { } type Utsname struct { - Sysname [257]int8 - Nodename [257]int8 - Release [257]int8 - Version [257]int8 - Machine [257]int8 + Sysname [257]byte + Nodename [257]byte + Release [257]byte + Version [257]byte + Machine [257]byte } type Ustat_t struct { @@ -413,8 +413,6 @@ type BpfHdr struct { Pad_cgo_0 [2]byte } -const _SC_PAGESIZE = 0xb - type Termios struct { Iflag uint32 Oflag uint32 @@ -440,3 +438,22 @@ type Winsize struct { Xpixel uint16 Ypixel uint16 } + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json b/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json deleted file mode 100644 index 2ef074320eed..000000000000 --- a/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json +++ /dev/null @@ -1,1558 +0,0 @@ -{ - "batchPath": "batch", - "title": "Google Cloud Key Management Service (KMS) API", - "ownerName": "Google", - "resources": { - "projects": { - "resources": { - "locations": { - "resources": { - "keyRings": { - "resources": { - "cryptoKeys": { - "resources": { - "cryptoKeyVersions": { - "methods": { - "list": { - "response": { - "$ref": "ListCryptoKeyVersionsResponse" - }, - "parameterOrder": [ - "parent" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "pageToken": { - "location": "query", - "description": "Optional pagination token, returned earlier via\nListCryptoKeyVersionsResponse.next_page_token.", - "type": "string" - }, - "pageSize": { - "format": "int32", - "description": "Optional limit on the number of CryptoKeyVersions to\ninclude in the response. Further CryptoKeyVersions can\nsubsequently be obtained by including the\nListCryptoKeyVersionsResponse.next_page_token in a subsequent request.\nIf unspecified, the server will pick an appropriate default.", - "type": "integer", - "location": "query" - }, - "parent": { - "description": "Required. The resource name of the CryptoKey to list, in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "location": "path" - } - }, - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.list", - "path": "v1/{+parent}/cryptoKeyVersions", - "description": "Lists CryptoKeyVersions." - }, - "create": { - "httpMethod": "POST", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "CryptoKeyVersion" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "parent": { - "description": "Required. The name of the CryptoKey associated with\nthe CryptoKeyVersions.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "location": "path" - } - }, - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions", - "path": "v1/{+parent}/cryptoKeyVersions", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.create", - "request": { - "$ref": "CryptoKeyVersion" - }, - "description": "Create a new CryptoKeyVersion in a CryptoKey.\n\nThe server will assign the next sequential id. If unset,\nstate will be set to\nENABLED." - }, - "destroy": { - "response": { - "$ref": "CryptoKeyVersion" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "parameters": { - "name": { - "description": "The resource name of the CryptoKeyVersion to destroy.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:destroy", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.destroy", - "path": "v1/{+name}:destroy", - "description": "Schedule a CryptoKeyVersion for destruction.\n\nUpon calling this method, CryptoKeyVersion.state will be set to\nDESTROY_SCHEDULED\nand destroy_time will be set to a time 24\nhours in the future, at which point the state\nwill be changed to\nDESTROYED, and the key\nmaterial will be irrevocably destroyed.\n\nBefore the destroy_time is reached,\nRestoreCryptoKeyVersion may be called to reverse the process.", - "request": { - "$ref": "DestroyCryptoKeyVersionRequest" - } - }, - "restore": { - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.restore", - "path": "v1/{+name}:restore", - "description": "Restore a CryptoKeyVersion in the\nDESTROY_SCHEDULED,\nstate.\n\nUpon restoration of the CryptoKeyVersion, state\nwill be set to DISABLED,\nand destroy_time will be cleared.", - "request": { - "$ref": "RestoreCryptoKeyVersionRequest" - }, - "response": { - "$ref": "CryptoKeyVersion" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "parameters": { - "name": { - "description": "The resource name of the CryptoKeyVersion to restore.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:restore" - }, - "get": { - "response": { - "$ref": "CryptoKeyVersion" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "name": { - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - "location": "path", - "description": "The name of the CryptoKeyVersion to get.", - "type": "string", - "required": true - } - }, - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.get", - "path": "v1/{+name}", - "description": "Returns metadata for a given CryptoKeyVersion." - }, - "patch": { - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.patch", - "path": "v1/{+name}", - "description": "Update a CryptoKeyVersion's metadata.\n\nstate may be changed between\nENABLED and\nDISABLED using this\nmethod. See DestroyCryptoKeyVersion and RestoreCryptoKeyVersion to\nmove between other states.", - "request": { - "$ref": "CryptoKeyVersion" - }, - "response": { - "$ref": "CryptoKeyVersion" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "PATCH", - "parameters": { - "updateMask": { - "location": "query", - "format": "google-fieldmask", - "description": "Required list of fields to be updated in this request.", - "type": "string" - }, - "name": { - "description": "Output only. The resource name for this CryptoKeyVersion in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}" - } - } - } - }, - "methods": { - "patch": { - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.patch", - "path": "v1/{+name}", - "description": "Update a CryptoKey.", - "request": { - "$ref": "CryptoKey" - }, - "response": { - "$ref": "CryptoKey" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "PATCH", - "parameters": { - "updateMask": { - "location": "query", - "format": "google-fieldmask", - "description": "Required list of fields to be updated in this request.", - "type": "string" - }, - "name": { - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "location": "path", - "description": "Output only. The resource name for this CryptoKey in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", - "type": "string", - "required": true - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}" - }, - "get": { - "response": { - "$ref": "CryptoKey" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "name": { - "description": "The name of the CryptoKey to get.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "location": "path" - } - }, - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.get", - "path": "v1/{+name}", - "description": "Returns metadata for a given CryptoKey, as well as its\nprimary CryptoKeyVersion." - }, - "testIamPermissions": { - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "httpMethod": "POST", - "parameterOrder": [ - "resource" - ], - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:testIamPermissions", - "path": "v1/{+resource}:testIamPermissions", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.testIamPermissions" - }, - "decrypt": { - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.decrypt", - "path": "v1/{+name}:decrypt", - "request": { - "$ref": "DecryptRequest" - }, - "description": "Decrypts data that was protected by Encrypt.", - "response": { - "$ref": "DecryptResponse" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the CryptoKey to use for decryption.\nThe server will choose the appropriate version.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "location": "path" - } - }, - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:decrypt" - }, - "list": { - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys", - "path": "v1/{+parent}/cryptoKeys", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.list", - "description": "Lists CryptoKeys.", - "httpMethod": "GET", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "ListCryptoKeysResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "parent": { - "location": "path", - "description": "Required. The resource name of the KeyRing to list, in the format\n`projects/*/locations/*/keyRings/*`.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$" - }, - "pageToken": { - "location": "query", - "description": "Optional pagination token, returned earlier via\nListCryptoKeysResponse.next_page_token.", - "type": "string" - }, - "pageSize": { - "location": "query", - "format": "int32", - "description": "Optional limit on the number of CryptoKeys to include in the\nresponse. Further CryptoKeys can subsequently be obtained by\nincluding the ListCryptoKeysResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", - "type": "integer" - } - } - }, - "encrypt": { - "response": { - "$ref": "EncryptResponse" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "parameters": { - "name": { - "description": "Required. The resource name of the CryptoKey or CryptoKeyVersion\nto use for encryption.\n\nIf a CryptoKey is specified, the server will use its\nprimary version.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/.+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:encrypt", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.encrypt", - "path": "v1/{+name}:encrypt", - "description": "Encrypts data, so that it can only be recovered by a call to Decrypt.", - "request": { - "$ref": "EncryptRequest" - } - }, - "setIamPolicy": { - "path": "v1/{+resource}:setIamPolicy", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.setIamPolicy", - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - "request": { - "$ref": "SetIamPolicyRequest" - }, - "httpMethod": "POST", - "parameterOrder": [ - "resource" - ], - "response": { - "$ref": "Policy" - }, - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:setIamPolicy" - }, - "create": { - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.create", - "path": "v1/{+parent}/cryptoKeys", - "description": "Create a new CryptoKey within a KeyRing.\n\nCryptoKey.purpose is required.", - "request": { - "$ref": "CryptoKey" - }, - "response": { - "$ref": "CryptoKey" - }, - "parameterOrder": [ - "parent" - ], - "httpMethod": "POST", - "parameters": { - "cryptoKeyId": { - "description": "Required. It must be unique within a KeyRing and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", - "type": "string", - "location": "query" - }, - "parent": { - "description": "Required. The name of the KeyRing associated with the\nCryptoKeys.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys" - }, - "updatePrimaryVersion": { - "response": { - "$ref": "CryptoKey" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "name": { - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "location": "path", - "description": "The resource name of the CryptoKey to update.", - "type": "string", - "required": true - } - }, - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:updatePrimaryVersion", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.updatePrimaryVersion", - "path": "v1/{+name}:updatePrimaryVersion", - "request": { - "$ref": "UpdateCryptoKeyPrimaryVersionRequest" - }, - "description": "Update the version of a CryptoKey that will be used in Encrypt" - }, - "getIamPolicy": { - "httpMethod": "GET", - "parameterOrder": [ - "resource" - ], - "response": { - "$ref": "Policy" - }, - "parameters": { - "resource": { - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - "location": "path", - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - "type": "string", - "required": true - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:getIamPolicy", - "path": "v1/{+resource}:getIamPolicy", - "id": "cloudkms.projects.locations.keyRings.cryptoKeys.getIamPolicy", - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset." - } - } - } - }, - "methods": { - "list": { - "path": "v1/{+parent}/keyRings", - "id": "cloudkms.projects.locations.keyRings.list", - "description": "Lists KeyRings.", - "httpMethod": "GET", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "ListKeyRingsResponse" - }, - "parameters": { - "pageSize": { - "format": "int32", - "description": "Optional limit on the number of KeyRings to include in the\nresponse. Further KeyRings can subsequently be obtained by\nincluding the ListKeyRingsResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", - "type": "integer", - "location": "query" - }, - "parent": { - "location": "path", - "description": "Required. The resource name of the location associated with the\nKeyRings, in the format `projects/*/locations/*`.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/locations/[^/]+$" - }, - "pageToken": { - "location": "query", - "description": "Optional pagination token, returned earlier via\nListKeyRingsResponse.next_page_token.", - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings" - }, - "create": { - "httpMethod": "POST", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "KeyRing" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "parent": { - "pattern": "^projects/[^/]+/locations/[^/]+$", - "location": "path", - "description": "Required. The resource name of the location associated with the\nKeyRings, in the format `projects/*/locations/*`.", - "type": "string", - "required": true - }, - "keyRingId": { - "description": "Required. It must be unique within a location and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", - "type": "string", - "location": "query" - } - }, - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings", - "path": "v1/{+parent}/keyRings", - "id": "cloudkms.projects.locations.keyRings.create", - "request": { - "$ref": "KeyRing" - }, - "description": "Create a new KeyRing in a given Project and Location." - }, - "setIamPolicy": { - "response": { - "$ref": "Policy" - }, - "parameterOrder": [ - "resource" - ], - "httpMethod": "POST", - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:setIamPolicy", - "id": "cloudkms.projects.locations.keyRings.setIamPolicy", - "path": "v1/{+resource}:setIamPolicy", - "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - "request": { - "$ref": "SetIamPolicyRequest" - } - }, - "getIamPolicy": { - "response": { - "$ref": "Policy" - }, - "parameterOrder": [ - "resource" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - "location": "path" - } - }, - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:getIamPolicy", - "id": "cloudkms.projects.locations.keyRings.getIamPolicy", - "path": "v1/{+resource}:getIamPolicy", - "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset." - }, - "get": { - "httpMethod": "GET", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "KeyRing" - }, - "parameters": { - "name": { - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - "location": "path", - "description": "The name of the KeyRing to get.", - "type": "string", - "required": true - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}", - "path": "v1/{+name}", - "id": "cloudkms.projects.locations.keyRings.get", - "description": "Returns metadata for a given KeyRing." - }, - "testIamPermissions": { - "id": "cloudkms.projects.locations.keyRings.testIamPermissions", - "path": "v1/{+resource}:testIamPermissions", - "request": { - "$ref": "TestIamPermissionsRequest" - }, - "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", - "response": { - "$ref": "TestIamPermissionsResponse" - }, - "parameterOrder": [ - "resource" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - "location": "path" - } - }, - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:testIamPermissions" - } - } - } - }, - "methods": { - "get": { - "id": "cloudkms.projects.locations.get", - "path": "v1/{+name}", - "description": "Get information about a location.", - "response": { - "$ref": "Location" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "name": { - "location": "path", - "description": "Resource name for the location.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/locations/[^/]+$" - } - }, - "flatPath": "v1/projects/{projectsId}/locations/{locationsId}" - }, - "list": { - "description": "Lists information about the supported locations for this service.", - "response": { - "$ref": "ListLocationsResponse" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "parameters": { - "pageToken": { - "location": "query", - "description": "The standard list page token.", - "type": "string" - }, - "name": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "The resource that owns the locations collection, if applicable.", - "type": "string", - "required": true - }, - "pageSize": { - "format": "int32", - "description": "The standard list page size.", - "type": "integer", - "location": "query" - }, - "filter": { - "description": "The standard list filter.", - "type": "string", - "location": "query" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectsId}/locations", - "id": "cloudkms.projects.locations.list", - "path": "v1/{+name}/locations" - } - } - } - } - } - }, - "parameters": { - "upload_protocol": { - "location": "query", - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string" - }, - "prettyPrint": { - "description": "Returns response with indentations and line breaks.", - "default": "true", - "type": "boolean", - "location": "query" - }, - "fields": { - "description": "Selector specifying which fields to include in a partial response.", - "type": "string", - "location": "query" - }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" - }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" - }, - "$.xgafv": { - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query", - "enum": [ - "1", - "2" - ], - "description": "V1 error format.", - "type": "string" - }, - "alt": { - "enum": [ - "json", - "media", - "proto" - ], - "type": "string", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query", - "description": "Data format for response.", - "default": "json" - }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "access_token": { - "location": "query", - "description": "OAuth access token.", - "type": "string" - }, - "quotaUser": { - "location": "query", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string" - }, - "pp": { - "location": "query", - "description": "Pretty-print response.", - "default": "true", - "type": "boolean" - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" - }, - "bearer_token": { - "location": "query", - "description": "OAuth bearer token.", - "type": "string" - } - }, - "version": "v1", - "baseUrl": "https://cloudkms.googleapis.com/", - "servicePath": "", - "description": "Manages encryption for your cloud services the same way you do on-premises. You can generate, use, rotate, and destroy AES256 encryption keys.", - "kind": "discovery#restDescription", - "basePath": "", - "id": "cloudkms:v1", - "documentationLink": "https://cloud.google.com/kms/", - "revision": "20170725", - "discoveryVersion": "v1", - "version_module": "True", - "schemas": { - "SetIamPolicyRequest": { - "properties": { - "updateMask": { - "format": "google-fieldmask", - "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, the\nfollowing default mask is used:\npaths: \"bindings, etag\"\nThis field is only used by Cloud IAM.", - "type": "string" - }, - "policy": { - "$ref": "Policy", - "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." - } - }, - "id": "SetIamPolicyRequest", - "description": "Request message for `SetIamPolicy` method.", - "type": "object" - }, - "DecryptRequest": { - "description": "Request message for KeyManagementService.Decrypt.", - "type": "object", - "properties": { - "additionalAuthenticatedData": { - "format": "byte", - "description": "Optional data that must match the data originally supplied in\nEncryptRequest.additional_authenticated_data.", - "type": "string" - }, - "ciphertext": { - "format": "byte", - "description": "Required. The encrypted data originally returned in\nEncryptResponse.ciphertext.", - "type": "string" - } - }, - "id": "DecryptRequest" - }, - "Location": { - "properties": { - "locationId": { - "description": "The canonical id for this location. For example: `\"us-east1\"`.", - "type": "string" - }, - "metadata": { - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - }, - "description": "Service-specific metadata. For example the available capacity at the given\nlocation.", - "type": "object" - }, - "labels": { - "additionalProperties": { - "type": "string" - }, - "description": "Cross-service attributes for the location. For example\n\n {\"cloud.googleapis.com/region\": \"us-east1\"}", - "type": "object" - }, - "name": { - "description": "Resource name for the location, which may vary between implementations.\nFor example: `\"projects/example-project/locations/us-east1\"`", - "type": "string" - } - }, - "id": "Location", - "description": "A resource that represents Google Cloud Platform location.", - "type": "object" - }, - "ListCryptoKeysResponse": { - "description": "Response message for KeyManagementService.ListCryptoKeys.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "A token to retrieve next page of results. Pass this value in\nListCryptoKeysRequest.page_token to retrieve the next page of results.", - "type": "string" - }, - "totalSize": { - "format": "int32", - "description": "The total number of CryptoKeys that matched the query.", - "type": "integer" - }, - "cryptoKeys": { - "description": "The list of CryptoKeys.", - "items": { - "$ref": "CryptoKey" - }, - "type": "array" - } - }, - "id": "ListCryptoKeysResponse" - }, - "Condition": { - "description": "A condition to be met.", - "type": "object", - "properties": { - "iam": { - "enumDescriptions": [ - "Default non-attribute.", - "Either principal or (if present) authority selector.", - "The principal (even if an authority selector is present), which\nmust only be used for attribution, not authorization.", - "An approver (distinct from the requester) that has authorized this\nrequest.\nWhen used with IN, the condition indicates that one of the approvers\nassociated with the request matches the specified principal, or is a\nmember of the specified group. Approvers can only grant additional\naccess, and are thus only used in a strictly positive context\n(e.g. ALLOW/IN or DENY/NOT_IN).", - "What types of justifications have been supplied with this request.\nString values should match enum names from tech.iam.JustificationType,\ne.g. \"MANUAL_STRING\". It is not permitted to grant access based on\nthe *absence* of a justification, so justification conditions can only\nbe used in a \"positive\" context (e.g., ALLOW/IN or DENY/NOT_IN).\n\nMultiple justifications, e.g., a Buganizer ID and a manually-entered\nreason, are normal and supported." - ], - "enum": [ - "NO_ATTR", - "AUTHORITY", - "ATTRIBUTION", - "APPROVER", - "JUSTIFICATION_TYPE" - ], - "description": "Trusted attributes supplied by the IAM system.", - "type": "string" - }, - "values": { - "description": "The objects of the condition. This is mutually exclusive with 'value'.", - "items": { - "type": "string" - }, - "type": "array" - }, - "op": { - "enumDescriptions": [ - "Default no-op.", - "DEPRECATED. Use IN instead.", - "DEPRECATED. Use NOT_IN instead.", - "The condition is true if the subject (or any element of it if it is\na set) matches any of the supplied values.", - "The condition is true if the subject (or every element of it if it is\na set) matches none of the supplied values.", - "Subject is discharged" - ], - "enum": [ - "NO_OP", - "EQUALS", - "NOT_EQUALS", - "IN", - "NOT_IN", - "DISCHARGED" - ], - "description": "An operator to apply the subject with.", - "type": "string" - }, - "svc": { - "description": "Trusted attributes discharged by the service.", - "type": "string" - }, - "sys": { - "enum": [ - "NO_ATTR", - "REGION", - "SERVICE", - "NAME", - "IP" - ], - "description": "Trusted attributes supplied by any service that owns resources and uses\nthe IAM system for access control.", - "type": "string", - "enumDescriptions": [ - "Default non-attribute type", - "Region of the resource", - "Service name", - "Resource name", - "IP address of the caller" - ] - }, - "value": { - "description": "DEPRECATED. Use 'values' instead.", - "type": "string" - } - }, - "id": "Condition" - }, - "CounterOptions": { - "properties": { - "field": { - "description": "The field value to attribute.", - "type": "string" - }, - "metric": { - "description": "The metric to update.", - "type": "string" - } - }, - "id": "CounterOptions", - "description": "Options for counters", - "type": "object" - }, - "AuditLogConfig": { - "properties": { - "exemptedMembers": { - "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", - "items": { - "type": "string" - }, - "type": "array" - }, - "logType": { - "enum": [ - "LOG_TYPE_UNSPECIFIED", - "ADMIN_READ", - "DATA_WRITE", - "DATA_READ" - ], - "description": "The log type that this config enables.", - "type": "string", - "enumDescriptions": [ - "Default case. Should never be this.", - "Admin reads. Example: CloudIAM getIamPolicy", - "Data writes. Example: CloudSQL Users create", - "Data reads. Example: CloudSQL Users list" - ] - } - }, - "id": "AuditLogConfig", - "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:foo@gmail.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\nfoo@gmail.com from DATA_READ logging.", - "type": "object" - }, - "DecryptResponse": { - "description": "Response message for KeyManagementService.Decrypt.", - "type": "object", - "properties": { - "plaintext": { - "format": "byte", - "description": "The decrypted data originally supplied in EncryptRequest.plaintext.", - "type": "string" - } - }, - "id": "DecryptResponse" - }, - "TestIamPermissionsRequest": { - "description": "Request message for `TestIamPermissions` method.", - "type": "object", - "properties": { - "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "id": "TestIamPermissionsRequest" - }, - "EncryptResponse": { - "properties": { - "ciphertext": { - "format": "byte", - "description": "The encrypted data.", - "type": "string" - }, - "name": { - "description": "The resource name of the CryptoKeyVersion used in encryption.", - "type": "string" - } - }, - "id": "EncryptResponse", - "description": "Response message for KeyManagementService.Encrypt.", - "type": "object" - }, - "KeyRing": { - "description": "A KeyRing is a toplevel logical grouping of CryptoKeys.", - "type": "object", - "properties": { - "createTime": { - "format": "google-datetime", - "description": "Output only. The time at which this KeyRing was created.", - "type": "string" - }, - "name": { - "description": "Output only. The resource name for the KeyRing in the format\n`projects/*/locations/*/keyRings/*`.", - "type": "string" - } - }, - "id": "KeyRing" - }, - "ListLocationsResponse": { - "description": "The response message for Locations.ListLocations.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "The standard List next-page token.", - "type": "string" - }, - "locations": { - "description": "A list of locations that matches the specified filter in the request.", - "items": { - "$ref": "Location" - }, - "type": "array" - } - }, - "id": "ListLocationsResponse" - }, - "Policy": { - "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", - "type": "object", - "properties": { - "iamOwned": { - "type": "boolean" - }, - "rules": { - "description": "If more than one rule is specified, the rules are applied in the following\nmanner:\n- All matching LOG rules are always applied.\n- If any DENY/DENY_WITH_LOG rule matches, permission is denied.\n Logging will be applied if one or more matching rule requires logging.\n- Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is\n granted.\n Logging will be applied if one or more matching rule requires logging.\n- Otherwise, if no rule applies, permission is denied.", - "items": { - "$ref": "Rule" - }, - "type": "array" - }, - "version": { - "format": "int32", - "description": "Version of the `Policy`. The default version is 0.", - "type": "integer" - }, - "auditConfigs": { - "description": "Specifies cloud audit logging configuration for this policy.", - "items": { - "$ref": "AuditConfig" - }, - "type": "array" - }, - "bindings": { - "description": "Associates a list of `members` to a `role`.\n`bindings` with no members will result in an error.", - "items": { - "$ref": "Binding" - }, - "type": "array" - }, - "etag": { - "format": "byte", - "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", - "type": "string" - } - }, - "id": "Policy" - }, - "RestoreCryptoKeyVersionRequest": { - "properties": {}, - "id": "RestoreCryptoKeyVersionRequest", - "description": "Request message for KeyManagementService.RestoreCryptoKeyVersion.", - "type": "object" - }, - "UpdateCryptoKeyPrimaryVersionRequest": { - "properties": { - "cryptoKeyVersionId": { - "description": "The id of the child CryptoKeyVersion to use as primary.", - "type": "string" - } - }, - "id": "UpdateCryptoKeyPrimaryVersionRequest", - "description": "Request message for KeyManagementService.UpdateCryptoKeyPrimaryVersion.", - "type": "object" - }, - "ListKeyRingsResponse": { - "description": "Response message for KeyManagementService.ListKeyRings.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "A token to retrieve next page of results. Pass this value in\nListKeyRingsRequest.page_token to retrieve the next page of results.", - "type": "string" - }, - "totalSize": { - "format": "int32", - "description": "The total number of KeyRings that matched the query.", - "type": "integer" - }, - "keyRings": { - "description": "The list of KeyRings.", - "items": { - "$ref": "KeyRing" - }, - "type": "array" - } - }, - "id": "ListKeyRingsResponse" - }, - "DataAccessOptions": { - "description": "Write a Data Access (Gin) log", - "type": "object", - "properties": {}, - "id": "DataAccessOptions" - }, - "AuditConfig": { - "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:foo@gmail.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"fooservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:bar@gmail.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor fooservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts foo@gmail.com from DATA_READ logging, and\nbar@gmail.com from DATA_WRITE logging.", - "type": "object", - "properties": { - "exemptedMembers": { - "items": { - "type": "string" - }, - "type": "array" - }, - "service": { - "description": "Specifies a service that will be enabled for audit logging.\nFor example, `storage.googleapis.com`, `cloudsql.googleapis.com`.\n`allServices` is a special value that covers all services.", - "type": "string" - }, - "auditLogConfigs": { - "description": "The configuration for logging of each type of permission.\nNext ID: 4", - "items": { - "$ref": "AuditLogConfig" - }, - "type": "array" - } - }, - "id": "AuditConfig" - }, - "CryptoKeyVersion": { - "description": "A CryptoKeyVersion represents an individual cryptographic key, and the\nassociated key material.\n\nIt can be used for cryptographic operations either directly, or via its\nparent CryptoKey, in which case the server will choose the appropriate\nversion for the operation.", - "type": "object", - "properties": { - "destroyEventTime": { - "format": "google-datetime", - "description": "Output only. The time this CryptoKeyVersion's key material was\ndestroyed. Only present if state is\nDESTROYED.", - "type": "string" - }, - "destroyTime": { - "format": "google-datetime", - "description": "Output only. The time this CryptoKeyVersion's key material is scheduled\nfor destruction. Only present if state is\nDESTROY_SCHEDULED.", - "type": "string" - }, - "createTime": { - "format": "google-datetime", - "description": "Output only. The time at which this CryptoKeyVersion was created.", - "type": "string" - }, - "state": { - "description": "The current state of the CryptoKeyVersion.", - "type": "string", - "enumDescriptions": [ - "Not specified.", - "This version may be used in Encrypt and\nDecrypt requests.", - "This version may not be used, but the key material is still available,\nand the version can be placed back into the ENABLED state.", - "This version is destroyed, and the key material is no longer stored.\nA version may not leave this state once entered.", - "This version is scheduled for destruction, and will be destroyed soon.\nCall\nRestoreCryptoKeyVersion\nto put it back into the DISABLED state." - ], - "enum": [ - "CRYPTO_KEY_VERSION_STATE_UNSPECIFIED", - "ENABLED", - "DISABLED", - "DESTROYED", - "DESTROY_SCHEDULED" - ] - }, - "name": { - "description": "Output only. The resource name for this CryptoKeyVersion in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`.", - "type": "string" - } - }, - "id": "CryptoKeyVersion" - }, - "CloudAuditOptions": { - "description": "Write a Cloud Audit log", - "type": "object", - "properties": { - "logName": { - "enumDescriptions": [ - "Default. Should not be used.", - "Corresponds to \"cloudaudit.googleapis.com/activity\"", - "Corresponds to \"cloudaudit.googleapis.com/data_access\"" - ], - "enum": [ - "UNSPECIFIED_LOG_NAME", - "ADMIN_ACTIVITY", - "DATA_ACCESS" - ], - "description": "The log_name to populate in the Cloud Audit Record.", - "type": "string" - } - }, - "id": "CloudAuditOptions" - }, - "Binding": { - "description": "Associates `members` with a `role`.", - "type": "object", - "properties": { - "condition": { - "$ref": "Expr", - "description": "The condition that is associated with this binding.\nNOTE: an unsatisfied condition will not allow user access via current\nbinding. Different bindings, including their conditions, are examined\nindependently.\nThis field is GOOGLE_INTERNAL." - }, - "members": { - "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", - "type": "string" - } - }, - "id": "Binding" - }, - "Expr": { - "description": "Represents an expression text. Example:\n\n title: \"User account presence\"\n description: \"Determines whether the request has a user account\"\n expression: \"size(request.user) \u003e 0\"", - "type": "object", - "properties": { - "location": { - "description": "An optional string indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.", - "type": "string" - }, - "title": { - "description": "An optional title for the expression, i.e. a short string describing\nits purpose. This can be used e.g. in UIs which allow to enter the\nexpression.", - "type": "string" - }, - "description": { - "description": "An optional description of the expression. This is a longer text which\ndescribes the expression, e.g. when hovered over it in a UI.", - "type": "string" - }, - "expression": { - "description": "Textual representation of an expression in\nCommon Expression Language syntax.\n\nThe application context of the containing message determines which\nwell-known feature set of CEL is supported.", - "type": "string" - } - }, - "id": "Expr" - }, - "EncryptRequest": { - "description": "Request message for KeyManagementService.Encrypt.", - "type": "object", - "properties": { - "additionalAuthenticatedData": { - "format": "byte", - "description": "Optional data that, if specified, must also be provided during decryption\nthrough DecryptRequest.additional_authenticated_data. Must be no\nlarger than 64KiB.", - "type": "string" - }, - "plaintext": { - "format": "byte", - "description": "Required. The data to encrypt. Must be no larger than 64KiB.", - "type": "string" - } - }, - "id": "EncryptRequest" - }, - "ListCryptoKeyVersionsResponse": { - "description": "Response message for KeyManagementService.ListCryptoKeyVersions.", - "type": "object", - "properties": { - "nextPageToken": { - "description": "A token to retrieve next page of results. Pass this value in\nListCryptoKeyVersionsRequest.page_token to retrieve the next page of\nresults.", - "type": "string" - }, - "totalSize": { - "format": "int32", - "description": "The total number of CryptoKeyVersions that matched the\nquery.", - "type": "integer" - }, - "cryptoKeyVersions": { - "description": "The list of CryptoKeyVersions.", - "items": { - "$ref": "CryptoKeyVersion" - }, - "type": "array" - } - }, - "id": "ListCryptoKeyVersionsResponse" - }, - "TestIamPermissionsResponse": { - "properties": { - "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "id": "TestIamPermissionsResponse", - "description": "Response message for `TestIamPermissions` method.", - "type": "object" - }, - "DestroyCryptoKeyVersionRequest": { - "properties": {}, - "id": "DestroyCryptoKeyVersionRequest", - "description": "Request message for KeyManagementService.DestroyCryptoKeyVersion.", - "type": "object" - }, - "Rule": { - "description": "A rule to be applied in a Policy.", - "type": "object", - "properties": { - "description": { - "description": "Human-readable description of the rule.", - "type": "string" - }, - "conditions": { - "description": "Additional restrictions that must be met", - "items": { - "$ref": "Condition" - }, - "type": "array" - }, - "logConfig": { - "description": "The config returned to callers of tech.iam.IAM.CheckPolicy for any entries\nthat match the LOG action.", - "items": { - "$ref": "LogConfig" - }, - "type": "array" - }, - "in": { - "description": "If one or more 'in' clauses are specified, the rule matches if\nthe PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.", - "items": { - "type": "string" - }, - "type": "array" - }, - "permissions": { - "description": "A permission is a string of form '\u003cservice\u003e.\u003cresource type\u003e.\u003cverb\u003e'\n(e.g., 'storage.buckets.list'). A value of '*' matches all permissions,\nand a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs.", - "items": { - "type": "string" - }, - "type": "array" - }, - "action": { - "description": "Required", - "type": "string", - "enumDescriptions": [ - "Default no action.", - "Matching 'Entries' grant access.", - "Matching 'Entries' grant access and the caller promises to log\nthe request per the returned log_configs.", - "Matching 'Entries' deny access.", - "Matching 'Entries' deny access and the caller promises to log\nthe request per the returned log_configs.", - "Matching 'Entries' tell IAM.Check callers to generate logs." - ], - "enum": [ - "NO_ACTION", - "ALLOW", - "ALLOW_WITH_LOG", - "DENY", - "DENY_WITH_LOG", - "LOG" - ] - }, - "notIn": { - "description": "If one or more 'not_in' clauses are specified, the rule matches\nif the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries.\nThe format for in and not_in entries is the same as for members in a\nBinding (see google/iam/v1/policy.proto).", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "id": "Rule" - }, - "CryptoKey": { - "description": "A CryptoKey represents a logical key that can be used for cryptographic\noperations.\n\nA CryptoKey is made up of one or more versions, which\nrepresent the actual key material used in cryptographic operations.", - "type": "object", - "properties": { - "createTime": { - "format": "google-datetime", - "description": "Output only. The time at which this CryptoKey was created.", - "type": "string" - }, - "rotationPeriod": { - "format": "google-duration", - "description": "next_rotation_time will be advanced by this period when the service\nautomatically rotates a key. Must be at least one day.\n\nIf rotation_period is set, next_rotation_time must also be set.", - "type": "string" - }, - "primary": { - "$ref": "CryptoKeyVersion", - "description": "Output only. A copy of the \"primary\" CryptoKeyVersion that will be used\nby Encrypt when this CryptoKey is given\nin EncryptRequest.name.\n\nThe CryptoKey's primary version can be updated via\nUpdateCryptoKeyPrimaryVersion." - }, - "name": { - "description": "Output only. The resource name for this CryptoKey in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", - "type": "string" - }, - "purpose": { - "enum": [ - "CRYPTO_KEY_PURPOSE_UNSPECIFIED", - "ENCRYPT_DECRYPT" - ], - "description": "The immutable purpose of this CryptoKey. Currently, the only acceptable\npurpose is ENCRYPT_DECRYPT.", - "type": "string", - "enumDescriptions": [ - "Not specified.", - "CryptoKeys with this purpose may be used with\nEncrypt and\nDecrypt." - ] - }, - "nextRotationTime": { - "format": "google-datetime", - "description": "At next_rotation_time, the Key Management Service will automatically:\n\n1. Create a new version of this CryptoKey.\n2. Mark the new version as primary.\n\nKey rotations performed manually via\nCreateCryptoKeyVersion and\nUpdateCryptoKeyPrimaryVersion\ndo not affect next_rotation_time.", - "type": "string" - } - }, - "id": "CryptoKey" - }, - "LogConfig": { - "description": "Specifies what kind of log the caller must write\nIncrement a streamz counter with the specified metric and field names.\n\nMetric names should start with a '/', generally be lowercase-only,\nand end in \"_count\". Field names should not contain an initial slash.\nThe actual exported metric names will have \"/iam/policy\" prepended.\n\nField names correspond to IAM request parameters and field values are\ntheir respective values.\n\nAt present the only supported field names are\n - \"iam_principal\", corresponding to IAMContext.principal;\n - \"\" (empty string), resulting in one aggretated counter with no field.\n\nExamples:\n counter { metric: \"/debug_access_count\" field: \"iam_principal\" }\n ==\u003e increment counter /iam/policy/backend_debug_access_count\n {iam_principal=[value of IAMContext.principal]}\n\nAt this time we do not support:\n* multiple field names (though this may be supported in the future)\n* decrementing the counter\n* incrementing it by anything other than 1", - "type": "object", - "properties": { - "counter": { - "description": "Counter options.", - "$ref": "CounterOptions" - }, - "dataAccess": { - "$ref": "DataAccessOptions", - "description": "Data access options." - }, - "cloudAudit": { - "description": "Cloud audit options.", - "$ref": "CloudAuditOptions" - } - }, - "id": "LogConfig" - } - }, - "icons": { - "x32": "http://www.google.com/images/icons/product/search-32.gif", - "x16": "http://www.google.com/images/icons/product/search-16.gif" - }, - "protocol": "rest", - "canonicalName": "Cloud KMS", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - } - } - } - }, - "rootUrl": "https://cloudkms.googleapis.com/", - "ownerDomain": "google.com", - "name": "cloudkms" -} diff --git a/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go b/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go deleted file mode 100644 index cc65a01d4a30..000000000000 --- a/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go +++ /dev/null @@ -1,5157 +0,0 @@ -// Package cloudkms provides access to the Google Cloud Key Management Service (KMS) API. -// -// See https://cloud.google.com/kms/ -// -// Usage example: -// -// import "google.golang.org/api/cloudkms/v1" -// ... -// cloudkmsService, err := cloudkms.New(oauthHttpClient) -package cloudkms - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - context "golang.org/x/net/context" - ctxhttp "golang.org/x/net/context/ctxhttp" - gensupport "google.golang.org/api/gensupport" - googleapi "google.golang.org/api/googleapi" - "io" - "net/http" - "net/url" - "strconv" - "strings" -) - -// Always reference these packages, just in case the auto-generated code -// below doesn't. -var _ = bytes.NewBuffer -var _ = strconv.Itoa -var _ = fmt.Sprintf -var _ = json.NewDecoder -var _ = io.Copy -var _ = url.Parse -var _ = gensupport.MarshalJSON -var _ = googleapi.Version -var _ = errors.New -var _ = strings.Replace -var _ = context.Canceled -var _ = ctxhttp.Do - -const apiId = "cloudkms:v1" -const apiName = "cloudkms" -const apiVersion = "v1" -const basePath = "https://cloudkms.googleapis.com/" - -// OAuth2 scopes used by this API. -const ( - // View and manage your data across Google Cloud Platform services - CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" -) - -func New(client *http.Client) (*Service, error) { - if client == nil { - return nil, errors.New("client is nil") - } - s := &Service{client: client, BasePath: basePath} - s.Projects = NewProjectsService(s) - return s, nil -} - -type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment - - Projects *ProjectsService -} - -func (s *Service) userAgent() string { - if s.UserAgent == "" { - return googleapi.UserAgent - } - return googleapi.UserAgent + " " + s.UserAgent -} - -func NewProjectsService(s *Service) *ProjectsService { - rs := &ProjectsService{s: s} - rs.Locations = NewProjectsLocationsService(s) - return rs -} - -type ProjectsService struct { - s *Service - - Locations *ProjectsLocationsService -} - -func NewProjectsLocationsService(s *Service) *ProjectsLocationsService { - rs := &ProjectsLocationsService{s: s} - rs.KeyRings = NewProjectsLocationsKeyRingsService(s) - return rs -} - -type ProjectsLocationsService struct { - s *Service - - KeyRings *ProjectsLocationsKeyRingsService -} - -func NewProjectsLocationsKeyRingsService(s *Service) *ProjectsLocationsKeyRingsService { - rs := &ProjectsLocationsKeyRingsService{s: s} - rs.CryptoKeys = NewProjectsLocationsKeyRingsCryptoKeysService(s) - return rs -} - -type ProjectsLocationsKeyRingsService struct { - s *Service - - CryptoKeys *ProjectsLocationsKeyRingsCryptoKeysService -} - -func NewProjectsLocationsKeyRingsCryptoKeysService(s *Service) *ProjectsLocationsKeyRingsCryptoKeysService { - rs := &ProjectsLocationsKeyRingsCryptoKeysService{s: s} - rs.CryptoKeyVersions = NewProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService(s) - return rs -} - -type ProjectsLocationsKeyRingsCryptoKeysService struct { - s *Service - - CryptoKeyVersions *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService -} - -func NewProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService(s *Service) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService { - rs := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService{s: s} - return rs -} - -type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService struct { - s *Service -} - -// AuditConfig: Specifies the audit configuration for a service. -// The configuration determines which permission types are logged, and -// what -// identities, if any, are exempted from logging. -// An AuditConfig must have one or more AuditLogConfigs. -// -// If there are AuditConfigs for both `allServices` and a specific -// service, -// the union of the two AuditConfigs is used for that service: the -// log_types -// specified in each AuditConfig are enabled, and the exempted_members -// in each -// AuditConfig are exempted. -// -// Example Policy with multiple AuditConfigs: -// -// { -// "audit_configs": [ -// { -// "service": "allServices" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:foo@gmail.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// }, -// { -// "log_type": "ADMIN_READ", -// } -// ] -// }, -// { -// "service": "fooservice.googleapis.com" -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// }, -// { -// "log_type": "DATA_WRITE", -// "exempted_members": [ -// "user:bar@gmail.com" -// ] -// } -// ] -// } -// ] -// } -// -// For fooservice, this policy enables DATA_READ, DATA_WRITE and -// ADMIN_READ -// logging. It also exempts foo@gmail.com from DATA_READ logging, -// and -// bar@gmail.com from DATA_WRITE logging. -type AuditConfig struct { - // AuditLogConfigs: The configuration for logging of each type of - // permission. - // Next ID: 4 - AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` - - ExemptedMembers []string `json:"exemptedMembers,omitempty"` - - // Service: Specifies a service that will be enabled for audit - // logging. - // For example, `storage.googleapis.com`, - // `cloudsql.googleapis.com`. - // `allServices` is a special value that covers all services. - Service string `json:"service,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AuditLogConfigs") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AuditLogConfigs") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *AuditConfig) MarshalJSON() ([]byte, error) { - type noMethod AuditConfig - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AuditLogConfig: Provides the configuration for logging a type of -// permissions. -// Example: -// -// { -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:foo@gmail.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE", -// } -// ] -// } -// -// This enables 'DATA_READ' and 'DATA_WRITE' logging, while -// exempting -// foo@gmail.com from DATA_READ logging. -type AuditLogConfig struct { - // ExemptedMembers: Specifies the identities that do not cause logging - // for this type of - // permission. - // Follows the same format of Binding.members. - ExemptedMembers []string `json:"exemptedMembers,omitempty"` - - // LogType: The log type that this config enables. - // - // Possible values: - // "LOG_TYPE_UNSPECIFIED" - Default case. Should never be this. - // "ADMIN_READ" - Admin reads. Example: CloudIAM getIamPolicy - // "DATA_WRITE" - Data writes. Example: CloudSQL Users create - // "DATA_READ" - Data reads. Example: CloudSQL Users list - LogType string `json:"logType,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ExemptedMembers") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ExemptedMembers") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { - type noMethod AuditLogConfig - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Binding: Associates `members` with a `role`. -type Binding struct { - // Condition: The condition that is associated with this binding. - // NOTE: an unsatisfied condition will not allow user access via - // current - // binding. Different bindings, including their conditions, are - // examined - // independently. - // This field is GOOGLE_INTERNAL. - Condition *Expr `json:"condition,omitempty"` - - // Members: Specifies the identities requesting access for a Cloud - // Platform resource. - // `members` can have the following values: - // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // * `allAuthenticatedUsers`: A special identifier that represents - // anyone - // who is authenticated with a Google account or a service - // account. - // - // * `user:{emailid}`: An email address that represents a specific - // Google - // account. For example, `alice@gmail.com` or `joe@example.com`. - // - // - // * `serviceAccount:{emailid}`: An email address that represents a - // service - // account. For example, - // `my-other-app@appspot.gserviceaccount.com`. - // - // * `group:{emailid}`: An email address that represents a Google - // group. - // For example, `admins@example.com`. - // - // - // * `domain:{domain}`: A Google Apps domain name that represents all - // the - // users of that domain. For example, `google.com` or - // `example.com`. - // - // - Members []string `json:"members,omitempty"` - - // Role: Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or - // `roles/owner`. - // Required - Role string `json:"role,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Condition") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Condition") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Binding) MarshalJSON() ([]byte, error) { - type noMethod Binding - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// CloudAuditOptions: Write a Cloud Audit log -type CloudAuditOptions struct { - // LogName: The log_name to populate in the Cloud Audit Record. - // - // Possible values: - // "UNSPECIFIED_LOG_NAME" - Default. Should not be used. - // "ADMIN_ACTIVITY" - Corresponds to - // "cloudaudit.googleapis.com/activity" - // "DATA_ACCESS" - Corresponds to - // "cloudaudit.googleapis.com/data_access" - LogName string `json:"logName,omitempty"` - - // ForceSendFields is a list of field names (e.g. "LogName") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "LogName") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *CloudAuditOptions) MarshalJSON() ([]byte, error) { - type noMethod CloudAuditOptions - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Condition: A condition to be met. -type Condition struct { - // Iam: Trusted attributes supplied by the IAM system. - // - // Possible values: - // "NO_ATTR" - Default non-attribute. - // "AUTHORITY" - Either principal or (if present) authority selector. - // "ATTRIBUTION" - The principal (even if an authority selector is - // present), which - // must only be used for attribution, not authorization. - // "APPROVER" - An approver (distinct from the requester) that has - // authorized this - // request. - // When used with IN, the condition indicates that one of the - // approvers - // associated with the request matches the specified principal, or is - // a - // member of the specified group. Approvers can only grant - // additional - // access, and are thus only used in a strictly positive context - // (e.g. ALLOW/IN or DENY/NOT_IN). - // "JUSTIFICATION_TYPE" - What types of justifications have been - // supplied with this request. - // String values should match enum names from - // tech.iam.JustificationType, - // e.g. "MANUAL_STRING". It is not permitted to grant access based - // on - // the *absence* of a justification, so justification conditions can - // only - // be used in a "positive" context (e.g., ALLOW/IN or - // DENY/NOT_IN). - // - // Multiple justifications, e.g., a Buganizer ID and a - // manually-entered - // reason, are normal and supported. - Iam string `json:"iam,omitempty"` - - // Op: An operator to apply the subject with. - // - // Possible values: - // "NO_OP" - Default no-op. - // "EQUALS" - DEPRECATED. Use IN instead. - // "NOT_EQUALS" - DEPRECATED. Use NOT_IN instead. - // "IN" - The condition is true if the subject (or any element of it - // if it is - // a set) matches any of the supplied values. - // "NOT_IN" - The condition is true if the subject (or every element - // of it if it is - // a set) matches none of the supplied values. - // "DISCHARGED" - Subject is discharged - Op string `json:"op,omitempty"` - - // Svc: Trusted attributes discharged by the service. - Svc string `json:"svc,omitempty"` - - // Sys: Trusted attributes supplied by any service that owns resources - // and uses - // the IAM system for access control. - // - // Possible values: - // "NO_ATTR" - Default non-attribute type - // "REGION" - Region of the resource - // "SERVICE" - Service name - // "NAME" - Resource name - // "IP" - IP address of the caller - Sys string `json:"sys,omitempty"` - - // Value: DEPRECATED. Use 'values' instead. - Value string `json:"value,omitempty"` - - // Values: The objects of the condition. This is mutually exclusive with - // 'value'. - Values []string `json:"values,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Iam") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Iam") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Condition) MarshalJSON() ([]byte, error) { - type noMethod Condition - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// CounterOptions: Options for counters -type CounterOptions struct { - // Field: The field value to attribute. - Field string `json:"field,omitempty"` - - // Metric: The metric to update. - Metric string `json:"metric,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Field") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Field") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *CounterOptions) MarshalJSON() ([]byte, error) { - type noMethod CounterOptions - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// CryptoKey: A CryptoKey represents a logical key that can be used for -// cryptographic -// operations. -// -// A CryptoKey is made up of one or more versions, which -// represent the actual key material used in cryptographic operations. -type CryptoKey struct { - // CreateTime: Output only. The time at which this CryptoKey was - // created. - CreateTime string `json:"createTime,omitempty"` - - // Name: Output only. The resource name for this CryptoKey in the - // format - // `projects/*/locations/*/keyRings/*/cryptoKeys/*`. - Name string `json:"name,omitempty"` - - // NextRotationTime: At next_rotation_time, the Key Management Service - // will automatically: - // - // 1. Create a new version of this CryptoKey. - // 2. Mark the new version as primary. - // - // Key rotations performed manually via - // CreateCryptoKeyVersion and - // UpdateCryptoKeyPrimaryVersion - // do not affect next_rotation_time. - NextRotationTime string `json:"nextRotationTime,omitempty"` - - // Primary: Output only. A copy of the "primary" CryptoKeyVersion that - // will be used - // by Encrypt when this CryptoKey is given - // in EncryptRequest.name. - // - // The CryptoKey's primary version can be updated - // via - // UpdateCryptoKeyPrimaryVersion. - Primary *CryptoKeyVersion `json:"primary,omitempty"` - - // Purpose: The immutable purpose of this CryptoKey. Currently, the only - // acceptable - // purpose is ENCRYPT_DECRYPT. - // - // Possible values: - // "CRYPTO_KEY_PURPOSE_UNSPECIFIED" - Not specified. - // "ENCRYPT_DECRYPT" - CryptoKeys with this purpose may be used - // with - // Encrypt and - // Decrypt. - Purpose string `json:"purpose,omitempty"` - - // RotationPeriod: next_rotation_time will be advanced by this period - // when the service - // automatically rotates a key. Must be at least one day. - // - // If rotation_period is set, next_rotation_time must also be set. - RotationPeriod string `json:"rotationPeriod,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreateTime") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreateTime") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *CryptoKey) MarshalJSON() ([]byte, error) { - type noMethod CryptoKey - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// CryptoKeyVersion: A CryptoKeyVersion represents an individual -// cryptographic key, and the -// associated key material. -// -// It can be used for cryptographic operations either directly, or via -// its -// parent CryptoKey, in which case the server will choose the -// appropriate -// version for the operation. -type CryptoKeyVersion struct { - // CreateTime: Output only. The time at which this CryptoKeyVersion was - // created. - CreateTime string `json:"createTime,omitempty"` - - // DestroyEventTime: Output only. The time this CryptoKeyVersion's key - // material was - // destroyed. Only present if state is - // DESTROYED. - DestroyEventTime string `json:"destroyEventTime,omitempty"` - - // DestroyTime: Output only. The time this CryptoKeyVersion's key - // material is scheduled - // for destruction. Only present if state is - // DESTROY_SCHEDULED. - DestroyTime string `json:"destroyTime,omitempty"` - - // Name: Output only. The resource name for this CryptoKeyVersion in the - // format - // `projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersio - // ns/*`. - Name string `json:"name,omitempty"` - - // State: The current state of the CryptoKeyVersion. - // - // Possible values: - // "CRYPTO_KEY_VERSION_STATE_UNSPECIFIED" - Not specified. - // "ENABLED" - This version may be used in Encrypt and - // Decrypt requests. - // "DISABLED" - This version may not be used, but the key material is - // still available, - // and the version can be placed back into the ENABLED state. - // "DESTROYED" - This version is destroyed, and the key material is no - // longer stored. - // A version may not leave this state once entered. - // "DESTROY_SCHEDULED" - This version is scheduled for destruction, - // and will be destroyed soon. - // Call - // RestoreCryptoKeyVersion - // to put it back into the DISABLED state. - State string `json:"state,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreateTime") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreateTime") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *CryptoKeyVersion) MarshalJSON() ([]byte, error) { - type noMethod CryptoKeyVersion - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// DataAccessOptions: Write a Data Access (Gin) log -type DataAccessOptions struct { -} - -// DecryptRequest: Request message for KeyManagementService.Decrypt. -type DecryptRequest struct { - // AdditionalAuthenticatedData: Optional data that must match the data - // originally supplied in - // EncryptRequest.additional_authenticated_data. - AdditionalAuthenticatedData string `json:"additionalAuthenticatedData,omitempty"` - - // Ciphertext: Required. The encrypted data originally returned - // in - // EncryptResponse.ciphertext. - Ciphertext string `json:"ciphertext,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "AdditionalAuthenticatedData") to unconditionally include in API - // requests. By default, fields with empty values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. - // "AdditionalAuthenticatedData") to include in API requests with the - // JSON null value. By default, fields with empty values are omitted - // from API requests. However, any field with an empty value appearing - // in NullFields will be sent to the server as null. It is an error if a - // field in this list has a non-empty value. This may be used to include - // null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *DecryptRequest) MarshalJSON() ([]byte, error) { - type noMethod DecryptRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// DecryptResponse: Response message for KeyManagementService.Decrypt. -type DecryptResponse struct { - // Plaintext: The decrypted data originally supplied in - // EncryptRequest.plaintext. - Plaintext string `json:"plaintext,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Plaintext") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Plaintext") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *DecryptResponse) MarshalJSON() ([]byte, error) { - type noMethod DecryptResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// DestroyCryptoKeyVersionRequest: Request message for -// KeyManagementService.DestroyCryptoKeyVersion. -type DestroyCryptoKeyVersionRequest struct { -} - -// EncryptRequest: Request message for KeyManagementService.Encrypt. -type EncryptRequest struct { - // AdditionalAuthenticatedData: Optional data that, if specified, must - // also be provided during decryption - // through DecryptRequest.additional_authenticated_data. Must be - // no - // larger than 64KiB. - AdditionalAuthenticatedData string `json:"additionalAuthenticatedData,omitempty"` - - // Plaintext: Required. The data to encrypt. Must be no larger than - // 64KiB. - Plaintext string `json:"plaintext,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "AdditionalAuthenticatedData") to unconditionally include in API - // requests. By default, fields with empty values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. - // "AdditionalAuthenticatedData") to include in API requests with the - // JSON null value. By default, fields with empty values are omitted - // from API requests. However, any field with an empty value appearing - // in NullFields will be sent to the server as null. It is an error if a - // field in this list has a non-empty value. This may be used to include - // null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *EncryptRequest) MarshalJSON() ([]byte, error) { - type noMethod EncryptRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// EncryptResponse: Response message for KeyManagementService.Encrypt. -type EncryptResponse struct { - // Ciphertext: The encrypted data. - Ciphertext string `json:"ciphertext,omitempty"` - - // Name: The resource name of the CryptoKeyVersion used in encryption. - Name string `json:"name,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Ciphertext") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Ciphertext") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *EncryptResponse) MarshalJSON() ([]byte, error) { - type noMethod EncryptResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Expr: Represents an expression text. Example: -// -// title: "User account presence" -// description: "Determines whether the request has a user account" -// expression: "size(request.user) > 0" -type Expr struct { - // Description: An optional description of the expression. This is a - // longer text which - // describes the expression, e.g. when hovered over it in a UI. - Description string `json:"description,omitempty"` - - // Expression: Textual representation of an expression in - // Common Expression Language syntax. - // - // The application context of the containing message determines - // which - // well-known feature set of CEL is supported. - Expression string `json:"expression,omitempty"` - - // Location: An optional string indicating the location of the - // expression for error - // reporting, e.g. a file name and a position in the file. - Location string `json:"location,omitempty"` - - // Title: An optional title for the expression, i.e. a short string - // describing - // its purpose. This can be used e.g. in UIs which allow to enter - // the - // expression. - Title string `json:"title,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Expr) MarshalJSON() ([]byte, error) { - type noMethod Expr - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// KeyRing: A KeyRing is a toplevel logical grouping of CryptoKeys. -type KeyRing struct { - // CreateTime: Output only. The time at which this KeyRing was created. - CreateTime string `json:"createTime,omitempty"` - - // Name: Output only. The resource name for the KeyRing in the - // format - // `projects/*/locations/*/keyRings/*`. - Name string `json:"name,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreateTime") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreateTime") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *KeyRing) MarshalJSON() ([]byte, error) { - type noMethod KeyRing - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListCryptoKeyVersionsResponse: Response message for -// KeyManagementService.ListCryptoKeyVersions. -type ListCryptoKeyVersionsResponse struct { - // CryptoKeyVersions: The list of CryptoKeyVersions. - CryptoKeyVersions []*CryptoKeyVersion `json:"cryptoKeyVersions,omitempty"` - - // NextPageToken: A token to retrieve next page of results. Pass this - // value in - // ListCryptoKeyVersionsRequest.page_token to retrieve the next page - // of - // results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // TotalSize: The total number of CryptoKeyVersions that matched - // the - // query. - TotalSize int64 `json:"totalSize,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CryptoKeyVersions") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CryptoKeyVersions") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *ListCryptoKeyVersionsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListCryptoKeyVersionsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListCryptoKeysResponse: Response message for -// KeyManagementService.ListCryptoKeys. -type ListCryptoKeysResponse struct { - // CryptoKeys: The list of CryptoKeys. - CryptoKeys []*CryptoKey `json:"cryptoKeys,omitempty"` - - // NextPageToken: A token to retrieve next page of results. Pass this - // value in - // ListCryptoKeysRequest.page_token to retrieve the next page of - // results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // TotalSize: The total number of CryptoKeys that matched the query. - TotalSize int64 `json:"totalSize,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CryptoKeys") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CryptoKeys") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListCryptoKeysResponse) MarshalJSON() ([]byte, error) { - type noMethod ListCryptoKeysResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListKeyRingsResponse: Response message for -// KeyManagementService.ListKeyRings. -type ListKeyRingsResponse struct { - // KeyRings: The list of KeyRings. - KeyRings []*KeyRing `json:"keyRings,omitempty"` - - // NextPageToken: A token to retrieve next page of results. Pass this - // value in - // ListKeyRingsRequest.page_token to retrieve the next page of results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // TotalSize: The total number of KeyRings that matched the query. - TotalSize int64 `json:"totalSize,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "KeyRings") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "KeyRings") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListKeyRingsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListKeyRingsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ListLocationsResponse: The response message for -// Locations.ListLocations. -type ListLocationsResponse struct { - // Locations: A list of locations that matches the specified filter in - // the request. - Locations []*Location `json:"locations,omitempty"` - - // NextPageToken: The standard List next-page token. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Locations") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Locations") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) { - type noMethod ListLocationsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Location: A resource that represents Google Cloud Platform location. -type Location struct { - // Labels: Cross-service attributes for the location. For example - // - // {"cloud.googleapis.com/region": "us-east1"} - Labels map[string]string `json:"labels,omitempty"` - - // LocationId: The canonical id for this location. For example: - // "us-east1". - LocationId string `json:"locationId,omitempty"` - - // Metadata: Service-specific metadata. For example the available - // capacity at the given - // location. - Metadata googleapi.RawMessage `json:"metadata,omitempty"` - - // Name: Resource name for the location, which may vary between - // implementations. - // For example: "projects/example-project/locations/us-east1" - Name string `json:"name,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Labels") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Labels") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Location) MarshalJSON() ([]byte, error) { - type noMethod Location - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// LogConfig: Specifies what kind of log the caller must write -// Increment a streamz counter with the specified metric and field -// names. -// -// Metric names should start with a '/', generally be -// lowercase-only, -// and end in "_count". Field names should not contain an initial -// slash. -// The actual exported metric names will have "/iam/policy" -// prepended. -// -// Field names correspond to IAM request parameters and field values -// are -// their respective values. -// -// At present the only supported field names are -// - "iam_principal", corresponding to IAMContext.principal; -// - "" (empty string), resulting in one aggretated counter with no -// field. -// -// Examples: -// counter { metric: "/debug_access_count" field: "iam_principal" } -// ==> increment counter /iam/policy/backend_debug_access_count -// {iam_principal=[value of -// IAMContext.principal]} -// -// At this time we do not support: -// * multiple field names (though this may be supported in the future) -// * decrementing the counter -// * incrementing it by anything other than 1 -type LogConfig struct { - // CloudAudit: Cloud audit options. - CloudAudit *CloudAuditOptions `json:"cloudAudit,omitempty"` - - // Counter: Counter options. - Counter *CounterOptions `json:"counter,omitempty"` - - // DataAccess: Data access options. - DataAccess *DataAccessOptions `json:"dataAccess,omitempty"` - - // ForceSendFields is a list of field names (e.g. "CloudAudit") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CloudAudit") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *LogConfig) MarshalJSON() ([]byte, error) { - type noMethod LogConfig - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Policy: Defines an Identity and Access Management (IAM) policy. It is -// used to -// specify access control policies for Cloud Platform resources. -// -// -// A `Policy` consists of a list of `bindings`. A `Binding` binds a list -// of -// `members` to a `role`, where the members can be user accounts, Google -// groups, -// Google domains, and service accounts. A `role` is a named list of -// permissions -// defined by IAM. -// -// **Example** -// -// { -// "bindings": [ -// { -// "role": "roles/owner", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// -// "serviceAccount:my-other-app@appspot.gserviceaccount.com", -// ] -// }, -// { -// "role": "roles/viewer", -// "members": ["user:sean@example.com"] -// } -// ] -// } -// -// For a description of IAM and its features, see the -// [IAM developer's guide](https://cloud.google.com/iam). -type Policy struct { - // AuditConfigs: Specifies cloud audit logging configuration for this - // policy. - AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` - - // Bindings: Associates a list of `members` to a `role`. - // `bindings` with no members will result in an error. - Bindings []*Binding `json:"bindings,omitempty"` - - // Etag: `etag` is used for optimistic concurrency control as a way to - // help - // prevent simultaneous updates of a policy from overwriting each - // other. - // It is strongly suggested that systems make use of the `etag` in - // the - // read-modify-write cycle to perform policy updates in order to avoid - // race - // conditions: An `etag` is returned in the response to `getIamPolicy`, - // and - // systems are expected to put that etag in the request to - // `setIamPolicy` to - // ensure that their change will be applied to the same version of the - // policy. - // - // If no `etag` is provided in the call to `setIamPolicy`, then the - // existing - // policy is overwritten blindly. - Etag string `json:"etag,omitempty"` - - IamOwned bool `json:"iamOwned,omitempty"` - - // Rules: If more than one rule is specified, the rules are applied in - // the following - // manner: - // - All matching LOG rules are always applied. - // - If any DENY/DENY_WITH_LOG rule matches, permission is denied. - // Logging will be applied if one or more matching rule requires - // logging. - // - Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is - // granted. - // Logging will be applied if one or more matching rule requires - // logging. - // - Otherwise, if no rule applies, permission is denied. - Rules []*Rule `json:"rules,omitempty"` - - // Version: Version of the `Policy`. The default version is 0. - Version int64 `json:"version,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "AuditConfigs") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AuditConfigs") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Policy) MarshalJSON() ([]byte, error) { - type noMethod Policy - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RestoreCryptoKeyVersionRequest: Request message for -// KeyManagementService.RestoreCryptoKeyVersion. -type RestoreCryptoKeyVersionRequest struct { -} - -// Rule: A rule to be applied in a Policy. -type Rule struct { - // Action: Required - // - // Possible values: - // "NO_ACTION" - Default no action. - // "ALLOW" - Matching 'Entries' grant access. - // "ALLOW_WITH_LOG" - Matching 'Entries' grant access and the caller - // promises to log - // the request per the returned log_configs. - // "DENY" - Matching 'Entries' deny access. - // "DENY_WITH_LOG" - Matching 'Entries' deny access and the caller - // promises to log - // the request per the returned log_configs. - // "LOG" - Matching 'Entries' tell IAM.Check callers to generate logs. - Action string `json:"action,omitempty"` - - // Conditions: Additional restrictions that must be met - Conditions []*Condition `json:"conditions,omitempty"` - - // Description: Human-readable description of the rule. - Description string `json:"description,omitempty"` - - // In: If one or more 'in' clauses are specified, the rule matches - // if - // the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries. - In []string `json:"in,omitempty"` - - // LogConfig: The config returned to callers of tech.iam.IAM.CheckPolicy - // for any entries - // that match the LOG action. - LogConfig []*LogConfig `json:"logConfig,omitempty"` - - // NotIn: If one or more 'not_in' clauses are specified, the rule - // matches - // if the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries. - // The format for in and not_in entries is the same as for members in - // a - // Binding (see google/iam/v1/policy.proto). - NotIn []string `json:"notIn,omitempty"` - - // Permissions: A permission is a string of form '..' - // (e.g., 'storage.buckets.list'). A value of '*' matches all - // permissions, - // and a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs. - Permissions []string `json:"permissions,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Action") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Action") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Rule) MarshalJSON() ([]byte, error) { - type noMethod Rule - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SetIamPolicyRequest: Request message for `SetIamPolicy` method. -type SetIamPolicyRequest struct { - // Policy: REQUIRED: The complete policy to be applied to the - // `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as - // Projects) - // might reject them. - Policy *Policy `json:"policy,omitempty"` - - // UpdateMask: OPTIONAL: A FieldMask specifying which fields of the - // policy to modify. Only - // the fields in the mask will be modified. If no mask is provided, - // the - // following default mask is used: - // paths: "bindings, etag" - // This field is only used by Cloud IAM. - UpdateMask string `json:"updateMask,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Policy") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Policy") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { - type noMethod SetIamPolicyRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TestIamPermissionsRequest: Request message for `TestIamPermissions` -// method. -type TestIamPermissionsRequest struct { - // Permissions: The set of permissions to check for the `resource`. - // Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For - // more - // information see - // [IAM - // Overview](https://cloud.google.com/iam/docs/overview#permissions). - Permissions []string `json:"permissions,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Permissions") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Permissions") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { - type noMethod TestIamPermissionsRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TestIamPermissionsResponse: Response message for `TestIamPermissions` -// method. -type TestIamPermissionsResponse struct { - // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is - // allowed. - Permissions []string `json:"permissions,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Permissions") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Permissions") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { - type noMethod TestIamPermissionsResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// UpdateCryptoKeyPrimaryVersionRequest: Request message for -// KeyManagementService.UpdateCryptoKeyPrimaryVersion. -type UpdateCryptoKeyPrimaryVersionRequest struct { - // CryptoKeyVersionId: The id of the child CryptoKeyVersion to use as - // primary. - CryptoKeyVersionId string `json:"cryptoKeyVersionId,omitempty"` - - // ForceSendFields is a list of field names (e.g. "CryptoKeyVersionId") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CryptoKeyVersionId") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *UpdateCryptoKeyPrimaryVersionRequest) MarshalJSON() ([]byte, error) { - type noMethod UpdateCryptoKeyPrimaryVersionRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// method id "cloudkms.projects.locations.get": - -type ProjectsLocationsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Get information about a location. -func (r *ProjectsLocationsService) Get(name string) *ProjectsLocationsGetCall { - c := &ProjectsLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsGetCall) Context(ctx context.Context) *ProjectsLocationsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.get" call. -// Exactly one of *Location or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Location.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsLocationsGetCall) Do(opts ...googleapi.CallOption) (*Location, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Location{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Get information about a location.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.get", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Resource name for the location.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}", - // "response": { - // "$ref": "Location" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "cloudkms.projects.locations.list": - -type ProjectsLocationsListCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists information about the supported locations for this -// service. -func (r *ProjectsLocationsService) List(name string) *ProjectsLocationsListCall { - c := &ProjectsLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Filter sets the optional parameter "filter": The standard list -// filter. -func (c *ProjectsLocationsListCall) Filter(filter string) *ProjectsLocationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// PageSize sets the optional parameter "pageSize": The standard list -// page size. -func (c *ProjectsLocationsListCall) PageSize(pageSize int64) *ProjectsLocationsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": The standard list -// page token. -func (c *ProjectsLocationsListCall) PageToken(pageToken string) *ProjectsLocationsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsListCall) Context(ctx context.Context) *ProjectsLocationsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}/locations") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.list" call. -// Exactly one of *ListLocationsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListLocationsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsListCall) Do(opts ...googleapi.CallOption) (*ListLocationsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListLocationsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists information about the supported locations for this service.", - // "flatPath": "v1/projects/{projectsId}/locations", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.list", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "filter": { - // "description": "The standard list filter.", - // "location": "query", - // "type": "string" - // }, - // "name": { - // "description": "The resource that owns the locations collection, if applicable.", - // "location": "path", - // "pattern": "^projects/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "pageSize": { - // "description": "The standard list page size.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "The standard list page token.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "v1/{+name}/locations", - // "response": { - // "$ref": "ListLocationsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsLocationsListCall) Pages(ctx context.Context, f func(*ListLocationsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "cloudkms.projects.locations.keyRings.create": - -type ProjectsLocationsKeyRingsCreateCall struct { - s *Service - parent string - keyring *KeyRing - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Create a new KeyRing in a given Project and Location. -func (r *ProjectsLocationsKeyRingsService) Create(parent string, keyring *KeyRing) *ProjectsLocationsKeyRingsCreateCall { - c := &ProjectsLocationsKeyRingsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.keyring = keyring - return c -} - -// KeyRingId sets the optional parameter "keyRingId": Required. It must -// be unique within a location and match the regular -// expression `[a-zA-Z0-9_-]{1,63}` -func (c *ProjectsLocationsKeyRingsCreateCall) KeyRingId(keyRingId string) *ProjectsLocationsKeyRingsCreateCall { - c.urlParams_.Set("keyRingId", keyRingId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCreateCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.keyring) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/keyRings") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.create" call. -// Exactly one of *KeyRing or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *KeyRing.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsLocationsKeyRingsCreateCall) Do(opts ...googleapi.CallOption) (*KeyRing, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &KeyRing{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Create a new KeyRing in a given Project and Location.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.create", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "keyRingId": { - // "description": "Required. It must be unique within a location and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The resource name of the location associated with the\nKeyRings, in the format `projects/*/locations/*`.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+parent}/keyRings", - // "request": { - // "$ref": "KeyRing" - // }, - // "response": { - // "$ref": "KeyRing" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.get": - -type ProjectsLocationsKeyRingsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns metadata for a given KeyRing. -func (r *ProjectsLocationsKeyRingsService) Get(name string) *ProjectsLocationsKeyRingsGetCall { - c := &ProjectsLocationsKeyRingsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsGetCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.get" call. -// Exactly one of *KeyRing or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *KeyRing.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsLocationsKeyRingsGetCall) Do(opts ...googleapi.CallOption) (*KeyRing, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &KeyRing{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns metadata for a given KeyRing.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.get", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The name of the KeyRing to get.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}", - // "response": { - // "$ref": "KeyRing" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.getIamPolicy": - -type ProjectsLocationsKeyRingsGetIamPolicyCall struct { - s *Service - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy -// set. -func (r *ProjectsLocationsKeyRingsService) GetIamPolicy(resource string) *ProjectsLocationsKeyRingsGetIamPolicyCall { - c := &ProjectsLocationsKeyRingsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsGetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsGetIamPolicyCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsGetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsGetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsGetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsLocationsKeyRingsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:getIamPolicy", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.getIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:getIamPolicy", - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.list": - -type ProjectsLocationsKeyRingsListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists KeyRings. -func (r *ProjectsLocationsKeyRingsService) List(parent string) *ProjectsLocationsKeyRingsListCall { - c := &ProjectsLocationsKeyRingsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// PageSize sets the optional parameter "pageSize": Optional limit on -// the number of KeyRings to include in the -// response. Further KeyRings can subsequently be obtained by -// including the ListKeyRingsResponse.next_page_token in a -// subsequent -// request. If unspecified, the server will pick an appropriate -// default. -func (c *ProjectsLocationsKeyRingsListCall) PageSize(pageSize int64) *ProjectsLocationsKeyRingsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": Optional -// pagination token, returned earlier -// via -// ListKeyRingsResponse.next_page_token. -func (c *ProjectsLocationsKeyRingsListCall) PageToken(pageToken string) *ProjectsLocationsKeyRingsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsListCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/keyRings") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.list" call. -// Exactly one of *ListKeyRingsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListKeyRingsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsListCall) Do(opts ...googleapi.CallOption) (*ListKeyRingsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListKeyRingsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists KeyRings.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.list", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "pageSize": { - // "description": "Optional limit on the number of KeyRings to include in the\nresponse. Further KeyRings can subsequently be obtained by\nincluding the ListKeyRingsResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional pagination token, returned earlier via\nListKeyRingsResponse.next_page_token.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The resource name of the location associated with the\nKeyRings, in the format `projects/*/locations/*`.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+parent}/keyRings", - // "response": { - // "$ref": "ListKeyRingsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsLocationsKeyRingsListCall) Pages(ctx context.Context, f func(*ListKeyRingsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "cloudkms.projects.locations.keyRings.setIamPolicy": - -type ProjectsLocationsKeyRingsSetIamPolicyCall struct { - s *Service - resource string - setiampolicyrequest *SetIamPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -func (r *ProjectsLocationsKeyRingsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsKeyRingsSetIamPolicyCall { - c := &ProjectsLocationsKeyRingsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.setiampolicyrequest = setiampolicyrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsSetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsSetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsSetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsLocationsKeyRingsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:setIamPolicy", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.setIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:setIamPolicy", - // "request": { - // "$ref": "SetIamPolicyRequest" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.testIamPermissions": - -type ProjectsLocationsKeyRingsTestIamPermissionsCall struct { - s *Service - resource string - testiampermissionsrequest *TestIamPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a NOT_FOUND error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. -func (r *ProjectsLocationsKeyRingsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsKeyRingsTestIamPermissionsCall { - c := &ProjectsLocationsKeyRingsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.testiampermissionsrequest = testiampermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsTestIamPermissionsCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestIamPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}:testIamPermissions", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.testIamPermissions", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:testIamPermissions", - // "request": { - // "$ref": "TestIamPermissionsRequest" - // }, - // "response": { - // "$ref": "TestIamPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.create": - -type ProjectsLocationsKeyRingsCryptoKeysCreateCall struct { - s *Service - parent string - cryptokey *CryptoKey - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Create a new CryptoKey within a KeyRing. -// -// CryptoKey.purpose is required. -func (r *ProjectsLocationsKeyRingsCryptoKeysService) Create(parent string, cryptokey *CryptoKey) *ProjectsLocationsKeyRingsCryptoKeysCreateCall { - c := &ProjectsLocationsKeyRingsCryptoKeysCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.cryptokey = cryptokey - return c -} - -// CryptoKeyId sets the optional parameter "cryptoKeyId": Required. It -// must be unique within a KeyRing and match the regular -// expression `[a-zA-Z0-9_-]{1,63}` -func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) CryptoKeyId(cryptoKeyId string) *ProjectsLocationsKeyRingsCryptoKeysCreateCall { - c.urlParams_.Set("cryptoKeyId", cryptoKeyId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.cryptokey) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/cryptoKeys") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.create" call. -// Exactly one of *CryptoKey or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *CryptoKey.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysCreateCall) Do(opts ...googleapi.CallOption) (*CryptoKey, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CryptoKey{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Create a new CryptoKey within a KeyRing.\n\nCryptoKey.purpose is required.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.create", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "cryptoKeyId": { - // "description": "Required. It must be unique within a KeyRing and match the regular\nexpression `[a-zA-Z0-9_-]{1,63}`", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The name of the KeyRing associated with the\nCryptoKeys.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+parent}/cryptoKeys", - // "request": { - // "$ref": "CryptoKey" - // }, - // "response": { - // "$ref": "CryptoKey" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.decrypt": - -type ProjectsLocationsKeyRingsCryptoKeysDecryptCall struct { - s *Service - name string - decryptrequest *DecryptRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Decrypt: Decrypts data that was protected by Encrypt. -func (r *ProjectsLocationsKeyRingsCryptoKeysService) Decrypt(name string, decryptrequest *DecryptRequest) *ProjectsLocationsKeyRingsCryptoKeysDecryptCall { - c := &ProjectsLocationsKeyRingsCryptoKeysDecryptCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.decryptrequest = decryptrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysDecryptCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysDecryptCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysDecryptCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysDecryptCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysDecryptCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysDecryptCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.decryptrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:decrypt") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.decrypt" call. -// Exactly one of *DecryptResponse or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *DecryptResponse.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysDecryptCall) Do(opts ...googleapi.CallOption) (*DecryptResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &DecryptResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Decrypts data that was protected by Encrypt.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:decrypt", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.decrypt", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The resource name of the CryptoKey to use for decryption.\nThe server will choose the appropriate version.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:decrypt", - // "request": { - // "$ref": "DecryptRequest" - // }, - // "response": { - // "$ref": "DecryptResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.encrypt": - -type ProjectsLocationsKeyRingsCryptoKeysEncryptCall struct { - s *Service - name string - encryptrequest *EncryptRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Encrypt: Encrypts data, so that it can only be recovered by a call to -// Decrypt. -func (r *ProjectsLocationsKeyRingsCryptoKeysService) Encrypt(name string, encryptrequest *EncryptRequest) *ProjectsLocationsKeyRingsCryptoKeysEncryptCall { - c := &ProjectsLocationsKeyRingsCryptoKeysEncryptCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.encryptrequest = encryptrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysEncryptCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysEncryptCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysEncryptCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysEncryptCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysEncryptCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysEncryptCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.encryptrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:encrypt") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.encrypt" call. -// Exactly one of *EncryptResponse or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *EncryptResponse.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysEncryptCall) Do(opts ...googleapi.CallOption) (*EncryptResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &EncryptResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Encrypts data, so that it can only be recovered by a call to Decrypt.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:encrypt", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.encrypt", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Required. The resource name of the CryptoKey or CryptoKeyVersion\nto use for encryption.\n\nIf a CryptoKey is specified, the server will use its\nprimary version.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/.+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:encrypt", - // "request": { - // "$ref": "EncryptRequest" - // }, - // "response": { - // "$ref": "EncryptResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.get": - -type ProjectsLocationsKeyRingsCryptoKeysGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns metadata for a given CryptoKey, as well as its -// primary CryptoKeyVersion. -func (r *ProjectsLocationsKeyRingsCryptoKeysService) Get(name string) *ProjectsLocationsKeyRingsCryptoKeysGetCall { - c := &ProjectsLocationsKeyRingsCryptoKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsCryptoKeysGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.get" call. -// Exactly one of *CryptoKey or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *CryptoKey.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetCall) Do(opts ...googleapi.CallOption) (*CryptoKey, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CryptoKey{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns metadata for a given CryptoKey, as well as its\nprimary CryptoKeyVersion.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.get", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The name of the CryptoKey to get.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}", - // "response": { - // "$ref": "CryptoKey" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.getIamPolicy": - -type ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall struct { - s *Service - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// GetIamPolicy: Gets the access control policy for a resource. -// Returns an empty policy if the resource exists and does not have a -// policy -// set. -func (r *ProjectsLocationsKeyRingsCryptoKeysService) GetIamPolicy(resource string) *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall { - c := &ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:getIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:getIamPolicy", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.getIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:getIamPolicy", - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.list": - -type ProjectsLocationsKeyRingsCryptoKeysListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists CryptoKeys. -func (r *ProjectsLocationsKeyRingsCryptoKeysService) List(parent string) *ProjectsLocationsKeyRingsCryptoKeysListCall { - c := &ProjectsLocationsKeyRingsCryptoKeysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// PageSize sets the optional parameter "pageSize": Optional limit on -// the number of CryptoKeys to include in the -// response. Further CryptoKeys can subsequently be obtained -// by -// including the ListCryptoKeysResponse.next_page_token in a -// subsequent -// request. If unspecified, the server will pick an appropriate -// default. -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) PageSize(pageSize int64) *ProjectsLocationsKeyRingsCryptoKeysListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": Optional -// pagination token, returned earlier -// via -// ListCryptoKeysResponse.next_page_token. -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) PageToken(pageToken string) *ProjectsLocationsKeyRingsCryptoKeysListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsCryptoKeysListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/cryptoKeys") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.list" call. -// Exactly one of *ListCryptoKeysResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ListCryptoKeysResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) Do(opts ...googleapi.CallOption) (*ListCryptoKeysResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListCryptoKeysResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists CryptoKeys.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.list", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "pageSize": { - // "description": "Optional limit on the number of CryptoKeys to include in the\nresponse. Further CryptoKeys can subsequently be obtained by\nincluding the ListCryptoKeysResponse.next_page_token in a subsequent\nrequest. If unspecified, the server will pick an appropriate default.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional pagination token, returned earlier via\nListCryptoKeysResponse.next_page_token.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The resource name of the KeyRing to list, in the format\n`projects/*/locations/*/keyRings/*`.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+parent}/cryptoKeys", - // "response": { - // "$ref": "ListCryptoKeysResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsLocationsKeyRingsCryptoKeysListCall) Pages(ctx context.Context, f func(*ListCryptoKeysResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.patch": - -type ProjectsLocationsKeyRingsCryptoKeysPatchCall struct { - s *Service - name string - cryptokey *CryptoKey - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Update a CryptoKey. -func (r *ProjectsLocationsKeyRingsCryptoKeysService) Patch(name string, cryptokey *CryptoKey) *ProjectsLocationsKeyRingsCryptoKeysPatchCall { - c := &ProjectsLocationsKeyRingsCryptoKeysPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.cryptokey = cryptokey - return c -} - -// UpdateMask sets the optional parameter "updateMask": Required list of -// fields to be updated in this request. -func (c *ProjectsLocationsKeyRingsCryptoKeysPatchCall) UpdateMask(updateMask string) *ProjectsLocationsKeyRingsCryptoKeysPatchCall { - c.urlParams_.Set("updateMask", updateMask) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysPatchCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.cryptokey) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.patch" call. -// Exactly one of *CryptoKey or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *CryptoKey.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysPatchCall) Do(opts ...googleapi.CallOption) (*CryptoKey, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CryptoKey{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Update a CryptoKey.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}", - // "httpMethod": "PATCH", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.patch", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Output only. The resource name for this CryptoKey in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "updateMask": { - // "description": "Required list of fields to be updated in this request.", - // "format": "google-fieldmask", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "v1/{+name}", - // "request": { - // "$ref": "CryptoKey" - // }, - // "response": { - // "$ref": "CryptoKey" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.setIamPolicy": - -type ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall struct { - s *Service - resource string - setiampolicyrequest *SetIamPolicyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any -// existing policy. -func (r *ProjectsLocationsKeyRingsCryptoKeysService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall { - c := &ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.setiampolicyrequest = setiampolicyrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:setIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the access control policy on the specified resource. Replaces any\nexisting policy.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:setIamPolicy", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.setIamPolicy", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:setIamPolicy", - // "request": { - // "$ref": "SetIamPolicyRequest" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.testIamPermissions": - -type ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall struct { - s *Service - resource string - testiampermissionsrequest *TestIamPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -// If the resource does not exist, this will return an empty set -// of -// permissions, not a NOT_FOUND error. -// -// Note: This operation is designed to be used for building -// permission-aware -// UIs and command-line tools, not for authorization checking. This -// operation -// may "fail open" without warning. -func (r *ProjectsLocationsKeyRingsCryptoKeysService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall { - c := &ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.resource = resource - c.testiampermissionsrequest = testiampermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+resource}:testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestIamPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a NOT_FOUND error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:testIamPermissions", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.testIamPermissions", - // "parameterOrder": [ - // "resource" - // ], - // "parameters": { - // "resource": { - // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+resource}:testIamPermissions", - // "request": { - // "$ref": "TestIamPermissionsRequest" - // }, - // "response": { - // "$ref": "TestIamPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.updatePrimaryVersion": - -type ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall struct { - s *Service - name string - updatecryptokeyprimaryversionrequest *UpdateCryptoKeyPrimaryVersionRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// UpdatePrimaryVersion: Update the version of a CryptoKey that will be -// used in Encrypt -func (r *ProjectsLocationsKeyRingsCryptoKeysService) UpdatePrimaryVersion(name string, updatecryptokeyprimaryversionrequest *UpdateCryptoKeyPrimaryVersionRequest) *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall { - c := &ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.updatecryptokeyprimaryversionrequest = updatecryptokeyprimaryversionrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.updatecryptokeyprimaryversionrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:updatePrimaryVersion") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.updatePrimaryVersion" call. -// Exactly one of *CryptoKey or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *CryptoKey.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionCall) Do(opts ...googleapi.CallOption) (*CryptoKey, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CryptoKey{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Update the version of a CryptoKey that will be used in Encrypt", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}:updatePrimaryVersion", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.updatePrimaryVersion", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The resource name of the CryptoKey to update.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:updatePrimaryVersion", - // "request": { - // "$ref": "UpdateCryptoKeyPrimaryVersionRequest" - // }, - // "response": { - // "$ref": "CryptoKey" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.create": - -type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall struct { - s *Service - parent string - cryptokeyversion *CryptoKeyVersion - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Create: Create a new CryptoKeyVersion in a CryptoKey. -// -// The server will assign the next sequential id. If unset, -// state will be set to -// ENABLED. -func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) Create(parent string, cryptokeyversion *CryptoKeyVersion) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall { - c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - c.cryptokeyversion = cryptokeyversion - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.cryptokeyversion) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/cryptoKeyVersions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.create" call. -// Exactly one of *CryptoKeyVersion or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *CryptoKeyVersion.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsCreateCall) Do(opts ...googleapi.CallOption) (*CryptoKeyVersion, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CryptoKeyVersion{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Create a new CryptoKeyVersion in a CryptoKey.\n\nThe server will assign the next sequential id. If unset,\nstate will be set to\nENABLED.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.create", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "parent": { - // "description": "Required. The name of the CryptoKey associated with\nthe CryptoKeyVersions.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+parent}/cryptoKeyVersions", - // "request": { - // "$ref": "CryptoKeyVersion" - // }, - // "response": { - // "$ref": "CryptoKeyVersion" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.destroy": - -type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall struct { - s *Service - name string - destroycryptokeyversionrequest *DestroyCryptoKeyVersionRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Destroy: Schedule a CryptoKeyVersion for destruction. -// -// Upon calling this method, CryptoKeyVersion.state will be set -// to -// DESTROY_SCHEDULED -// and destroy_time will be set to a time 24 -// hours in the future, at which point the state -// will be changed to -// DESTROYED, and the key -// material will be irrevocably destroyed. -// -// Before the destroy_time is reached, -// RestoreCryptoKeyVersion may be called to reverse the process. -func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) Destroy(name string, destroycryptokeyversionrequest *DestroyCryptoKeyVersionRequest) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall { - c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.destroycryptokeyversionrequest = destroycryptokeyversionrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.destroycryptokeyversionrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:destroy") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.destroy" call. -// Exactly one of *CryptoKeyVersion or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *CryptoKeyVersion.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsDestroyCall) Do(opts ...googleapi.CallOption) (*CryptoKeyVersion, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CryptoKeyVersion{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Schedule a CryptoKeyVersion for destruction.\n\nUpon calling this method, CryptoKeyVersion.state will be set to\nDESTROY_SCHEDULED\nand destroy_time will be set to a time 24\nhours in the future, at which point the state\nwill be changed to\nDESTROYED, and the key\nmaterial will be irrevocably destroyed.\n\nBefore the destroy_time is reached,\nRestoreCryptoKeyVersion may be called to reverse the process.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:destroy", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.destroy", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The resource name of the CryptoKeyVersion to destroy.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:destroy", - // "request": { - // "$ref": "DestroyCryptoKeyVersionRequest" - // }, - // "response": { - // "$ref": "CryptoKeyVersion" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.get": - -type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall struct { - s *Service - name string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns metadata for a given CryptoKeyVersion. -func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) Get(name string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall { - c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.get" call. -// Exactly one of *CryptoKeyVersion or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *CryptoKeyVersion.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsGetCall) Do(opts ...googleapi.CallOption) (*CryptoKeyVersion, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CryptoKeyVersion{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns metadata for a given CryptoKeyVersion.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.get", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The name of the CryptoKeyVersion to get.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}", - // "response": { - // "$ref": "CryptoKeyVersion" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.list": - -type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall struct { - s *Service - parent string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Lists CryptoKeyVersions. -func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) List(parent string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { - c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.parent = parent - return c -} - -// PageSize sets the optional parameter "pageSize": Optional limit on -// the number of CryptoKeyVersions to -// include in the response. Further CryptoKeyVersions can -// subsequently be obtained by including -// the -// ListCryptoKeyVersionsResponse.next_page_token in a subsequent -// request. -// If unspecified, the server will pick an appropriate default. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) PageSize(pageSize int64) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { - c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) - return c -} - -// PageToken sets the optional parameter "pageToken": Optional -// pagination token, returned earlier -// via -// ListCryptoKeyVersionsResponse.next_page_token. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) PageToken(pageToken string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/cryptoKeyVersions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "parent": c.parent, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.list" call. -// Exactly one of *ListCryptoKeyVersionsResponse or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ListCryptoKeyVersionsResponse.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) Do(opts ...googleapi.CallOption) (*ListCryptoKeyVersionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ListCryptoKeyVersionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists CryptoKeyVersions.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions", - // "httpMethod": "GET", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.list", - // "parameterOrder": [ - // "parent" - // ], - // "parameters": { - // "pageSize": { - // "description": "Optional limit on the number of CryptoKeyVersions to\ninclude in the response. Further CryptoKeyVersions can\nsubsequently be obtained by including the\nListCryptoKeyVersionsResponse.next_page_token in a subsequent request.\nIf unspecified, the server will pick an appropriate default.", - // "format": "int32", - // "location": "query", - // "type": "integer" - // }, - // "pageToken": { - // "description": "Optional pagination token, returned earlier via\nListCryptoKeyVersionsResponse.next_page_token.", - // "location": "query", - // "type": "string" - // }, - // "parent": { - // "description": "Required. The resource name of the CryptoKey to list, in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*`.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+parent}/cryptoKeyVersions", - // "response": { - // "$ref": "ListCryptoKeyVersionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsListCall) Pages(ctx context.Context, f func(*ListCryptoKeyVersionsResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.patch": - -type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall struct { - s *Service - name string - cryptokeyversion *CryptoKeyVersion - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Update a CryptoKeyVersion's metadata. -// -// state may be changed between -// ENABLED and -// DISABLED using this -// method. See DestroyCryptoKeyVersion and RestoreCryptoKeyVersion -// to -// move between other states. -func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) Patch(name string, cryptokeyversion *CryptoKeyVersion) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall { - c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.cryptokeyversion = cryptokeyversion - return c -} - -// UpdateMask sets the optional parameter "updateMask": Required list of -// fields to be updated in this request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall) UpdateMask(updateMask string) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall { - c.urlParams_.Set("updateMask", updateMask) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.cryptokeyversion) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.patch" call. -// Exactly one of *CryptoKeyVersion or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *CryptoKeyVersion.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchCall) Do(opts ...googleapi.CallOption) (*CryptoKeyVersion, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CryptoKeyVersion{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Update a CryptoKeyVersion's metadata.\n\nstate may be changed between\nENABLED and\nDISABLED using this\nmethod. See DestroyCryptoKeyVersion and RestoreCryptoKeyVersion to\nmove between other states.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}", - // "httpMethod": "PATCH", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.patch", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "Output only. The resource name for this CryptoKeyVersion in the format\n`projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - // "required": true, - // "type": "string" - // }, - // "updateMask": { - // "description": "Required list of fields to be updated in this request.", - // "format": "google-fieldmask", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "v1/{+name}", - // "request": { - // "$ref": "CryptoKeyVersion" - // }, - // "response": { - // "$ref": "CryptoKeyVersion" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} - -// method id "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.restore": - -type ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall struct { - s *Service - name string - restorecryptokeyversionrequest *RestoreCryptoKeyVersionRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Restore: Restore a CryptoKeyVersion in -// the -// DESTROY_SCHEDULED, -// state. -// -// Upon restoration of the CryptoKeyVersion, state -// will be set to DISABLED, -// and destroy_time will be cleared. -func (r *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsService) Restore(name string, restorecryptokeyversionrequest *RestoreCryptoKeyVersionRequest) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall { - c := &ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.name = name - c.restorecryptokeyversionrequest = restorecryptokeyversionrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall) Fields(s ...googleapi.Field) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall) Context(ctx context.Context) *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.restorecryptokeyversionrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:restore") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "name": c.name, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.restore" call. -// Exactly one of *CryptoKeyVersion or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *CryptoKeyVersion.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsRestoreCall) Do(opts ...googleapi.CallOption) (*CryptoKeyVersion, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &CryptoKeyVersion{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Restore a CryptoKeyVersion in the\nDESTROY_SCHEDULED,\nstate.\n\nUpon restoration of the CryptoKeyVersion, state\nwill be set to DISABLED,\nand destroy_time will be cleared.", - // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/keyRings/{keyRingsId}/cryptoKeys/{cryptoKeysId}/cryptoKeyVersions/{cryptoKeyVersionsId}:restore", - // "httpMethod": "POST", - // "id": "cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions.restore", - // "parameterOrder": [ - // "name" - // ], - // "parameters": { - // "name": { - // "description": "The resource name of the CryptoKeyVersion to restore.", - // "location": "path", - // "pattern": "^projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+$", - // "required": true, - // "type": "string" - // } - // }, - // "path": "v1/{+name}:restore", - // "request": { - // "$ref": "RestoreCryptoKeyVersionRequest" - // }, - // "response": { - // "$ref": "CryptoKeyVersion" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform" - // ] - // } - -} diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json index 00938eb6b097..5c3f16e3ba40 100644 --- a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json +++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/2ZSRpzMc2ShKN_p23rokDFE50Rk\"", + "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/CwN9bOpaV-xhJ_YHqke_sIbhsB0\"", "discoveryVersion": "v1", "id": "compute:alpha", "name": "compute", "version": "alpha", - "revision": "20170721", + "revision": "20170905", "title": "Compute Engine API", "description": "Creates and runs virtual machines on Google Cloud Platform.", "ownerDomain": "google.com", @@ -182,6 +182,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -213,6 +282,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -513,6 +651,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -544,6 +751,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -672,7 +948,7 @@ }, "index": { "type": "integer", - "description": "Assigns a zero-based index to this disk, where 0 is reserved for the boot disk. For example, if you have many disks attached to an instance, each disk would have a unique index number. If not specified, the server will choose an appropriate value.", + "description": "[Output Only] A zero-based index to this disk, where 0 is reserved for the boot disk. If you have many disks attached to an instance, each disk would have a unique index number.", "format": "int32" }, "initializeParams": { @@ -959,6 +1235,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -990,6 +1335,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -1158,6 +1572,22 @@ "description": "The minimum number of replicas that the autoscaler can scale down to. This cannot be less than 0. If not provided, autoscaler will choose a default value depending on maximum number of instances allowed.", "format": "int32" }, + "mode": { + "type": "string", + "description": "Defines operating mode for this policy.", + "enum": [ + "OFF", + "ON", + "ONLY_DOWN", + "ONLY_UP" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, "queueBasedScaling": { "$ref": "AutoscalingPolicyQueueBasedScaling", "description": "Configuration parameters of autoscaling based on queuing system." @@ -1304,6 +1734,11 @@ "description": "The max number of simultaneous connections for the group. Can be used with either CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must be set.\n\nThis cannot be used for internal load balancing.", "format": "int32" }, + "maxConnectionsPerEndpoint": { + "type": "integer", + "description": "The max number of simultaneous connections that a single backend network endpoint can handle. This is used to calculate the capacity of the group. Can be used in either CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections or maxConnectionsPerEndpoint must be set.\n\nThis cannot be used for internal load balancing.", + "format": "int32" + }, "maxConnectionsPerInstance": { "type": "integer", "description": "The max number of simultaneous connections that a single backend instance can handle. This is used to calculate the capacity of the group. Can be used in either CONNECTION or UTILIZATION balancing modes. For CONNECTION mode, either maxConnections or maxConnectionsPerInstance must be set.\n\nThis cannot be used for internal load balancing.", @@ -1314,6 +1749,11 @@ "description": "The max requests per second (RPS) of the group. Can be used with either RATE or UTILIZATION balancing modes, but required if RATE mode. For RATE mode, either maxRate or maxRatePerInstance must be set.\n\nThis cannot be used for internal load balancing.", "format": "int32" }, + "maxRatePerEndpoint": { + "type": "number", + "description": "The max requests per second (RPS) that a single backend network endpoint can handle. This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerEndpoint must be set.\n\nThis cannot be used for internal load balancing.", + "format": "float" + }, "maxRatePerInstance": { "type": "number", "description": "The max requests per second (RPS) that a single backend instance can handle. This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerInstance must be set.\n\nThis cannot be used for internal load balancing.", @@ -1419,6 +1859,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -1465,10 +1974,8 @@ "type": "boolean", "description": "If true, enable Cloud CDN for this BackendService.\n\nWhen the load balancing scheme is INTERNAL, this field is not used." }, - "failoverRatio": { - "type": "number", - "description": "The value of the field must be in [0, 1]. If set, 'backends[].failover' must be set. They together define the fallback behavior of the primary backend: if the ratio of the healthy VMs in the primary backend is at or below this number, traffic arriving at the load-balanced IP will be directed to the failover backend.\n\nIn case where 'failoverRatio' is not set or all the VMs in the backup backend are unhealthy, the traffic will be directed back to the primary backend in the \"force\" mode, where traffic will be spread to the healthy VMs with the best effort, or to all VMs when no VM is healthy.\n\nThis field is only used with l4 load balancing.", - "format": "float" + "failoverPolicy": { + "$ref": "BackendServiceFailoverPolicy" }, "fingerprint": { "type": "string", @@ -1477,7 +1984,7 @@ }, "healthChecks": { "type": "array", - "description": "The list of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health checking this BackendService. Currently at most one health check can be specified, and a health check is required for GCE backend services. A health check must not be specified for GAE app backend and Cloud Function backend.\n\nFor internal load balancing, a URL to a HealthCheck resource must be specified instead.", + "description": "The list of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health checking this BackendService. Currently at most one health check can be specified, and a health check is required for Compute Engine backend services. A health check must not be specified for App Engine backend and Cloud Function backend.\n\nFor internal load balancing, a URL to a HealthCheck resource must be specified instead.", "items": { "type": "string" } @@ -1497,6 +2004,7 @@ }, "loadBalancingScheme": { "type": "string", + "description": "Indicates whether the backend service will be used with internal or external load balancing. A backend service created for one type of load balancing cannot be used with the other. Possible values are INTERNAL and EXTERNAL.", "enum": [ "EXTERNAL", "INTERNAL", @@ -1608,6 +2116,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -1634,6 +2211,25 @@ } } }, + "BackendServiceFailoverPolicy": { + "id": "BackendServiceFailoverPolicy", + "type": "object", + "properties": { + "disableConnectionDrainOnFailover": { + "type": "boolean", + "description": "On failover or failback, this field indicates whether connection drain will be honored. Setting this to true has the following effect: connections to the old active pool are not drained. Connections to the new active pool use the timeout of 10 min (currently fixed). Setting to false has the following effect: both old and new connections will have a drain timeout of 10 min.\n\nThis can be set to true only if the protocol is TCP.\n\nThe default is false." + }, + "dropTrafficIfUnhealthy": { + "type": "boolean", + "description": "This option is used only when no healthy VMs are detected in the primary and backup instance groups. When set to true, traffic is dropped. When set to false, new connections are sent across all VMs in the primary group.\n\nThe default is false." + }, + "failoverRatio": { + "type": "number", + "description": "The value of the field must be in [0, 1]. If the ratio of the healthy VMs in the primary backend is at or below this number, traffic arriving at the load-balanced IP will be directed to the failover backend.\n\nIn case where 'failoverRatio' is not set or all the VMs in the backup backend are unhealthy, the traffic will be directed back to the primary backend in the \"force\" mode, where traffic will be spread to the healthy VMs with the best effort, or to all VMs when no VM is healthy.\n\nThis field is only used with l4 load balancing.", + "format": "float" + } + } + }, "BackendServiceGroupHealth": { "id": "BackendServiceGroupHealth", "type": "object", @@ -1699,6 +2295,84 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "BackendServiceReference": { + "id": "BackendServiceReference", + "type": "object", + "properties": { + "backendService": { + "type": "string" } } }, @@ -1967,6 +2641,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -1998,6 +2741,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2447,6 +3259,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2478,6 +3359,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2574,6 +3524,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2605,6 +3624,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3009,6 +4097,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3075,6 +4232,11 @@ "type": "string", "description": "An optional description of this resource. Provide this property when you create the resource." }, + "fingerprint": { + "type": "string", + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a ForwardingRule. Include the fingerprint in patch request to ensure that you do not overwrite changes that were applied from another concurrent request.\n\nTo see the latest fingerprint, make a get() request to retrieve a ForwardingRule.", + "format": "byte" + }, "id": { "type": "string", "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -3214,6 +4376,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3245,6 +4476,84 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "ForwardingRuleReference": { + "id": "ForwardingRuleReference", + "type": "object", + "properties": { + "forwardingRule": { + "type": "string" } } }, @@ -3412,6 +4721,20 @@ "type": "string", "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." }, + "portSpecification": { + "type": "string", + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, HTTP2 health check follows behavior specified in\nport\nand\nportName\nfields.", + "enum": [ + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" + ], + "enumDescriptions": [ + "", + "", + "" + ] + }, "proxyHeader": { "type": "string", "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", @@ -3451,6 +4774,20 @@ "type": "string", "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." }, + "portSpecification": { + "type": "string", + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, HTTP health check follows behavior specified in\nport\nand\nportName\nfields.", + "enum": [ + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" + ], + "enumDescriptions": [ + "", + "", + "" + ] + }, "proxyHeader": { "type": "string", "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", @@ -3490,6 +4827,20 @@ "type": "string", "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." }, + "portSpecification": { + "type": "string", + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, HTTPS health check follows behavior specified in\nport\nand\nportName\nfields.", + "enum": [ + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" + ], + "enumDescriptions": [ + "", + "", + "" + ] + }, "proxyHeader": { "type": "string", "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", @@ -3634,6 +4985,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3678,6 +5098,40 @@ } } }, + "HealthStatusForNetworkEndpoint": { + "id": "HealthStatusForNetworkEndpoint", + "type": "object", + "properties": { + "backendService": { + "$ref": "BackendServiceReference", + "description": "URL of the backend service associated with the health state of the network endpoint." + }, + "forwardingRule": { + "$ref": "ForwardingRuleReference", + "description": "URL of the forwarding rule associated with the health state of the network endpoint." + }, + "healthCheck": { + "$ref": "HealthCheckReference", + "description": "URL of the health check associated with the health state of the network endpoint." + }, + "healthState": { + "type": "string", + "description": "Health state of the network endpoint determined based on the health checks configured.", + "enum": [ + "DRAINING", + "HEALTHY", + "UNHEALTHY", + "UNKNOWN" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + } + } + }, "Host": { "id": "Host", "type": "object", @@ -3787,6 +5241,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3818,6 +5341,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3932,6 +5524,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3963,6 +5624,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4225,6 +5955,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4323,6 +6122,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4528,6 +6396,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4548,6 +6485,10 @@ "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." }, + "deletionProtection": { + "type": "boolean", + "description": "Whether the resource should be protected against deletion." + }, "description": { "type": "string", "description": "An optional description of this resource. Provide this property when you create the resource." @@ -4625,7 +6566,7 @@ }, "networkInterfaces": { "type": "array", - "description": "An array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet. Only one interface is supported per instance.", + "description": "An array of network configurations for this instance. These specify how interfaces are configured to interact with other network services, such as connecting to the internet. Multiple interfaces are supported per instance.", "items": { "$ref": "NetworkInterface" } @@ -4715,41 +6656,110 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "InstanceGroup": { - "id": "InstanceGroup", - "type": "object", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] The creation timestamp for this instance group in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "fingerprint": { - "type": "string", - "description": "[Output Only] The fingerprint of the named ports. The system uses this fingerprint to detect conflicts when multiple users change the named ports concurrently.", - "format": "byte" - }, - "id": { - "type": "string", - "description": "[Output Only] A unique identifier for this instance group, generated by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] The resource type, which is always compute#instanceGroup for instance groups.", - "default": "compute#instanceGroup" }, - "name": { - "type": "string", - "description": "The name of the instance group. The name must be 1-63 characters long, and comply with RFC1035.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InstanceGroup": { + "id": "InstanceGroup", + "type": "object", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] The creation timestamp for this instance group in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "fingerprint": { + "type": "string", + "description": "[Output Only] The fingerprint of the named ports. The system uses this fingerprint to detect conflicts when multiple users change the named ports concurrently.", + "format": "byte" + }, + "id": { + "type": "string", + "description": "[Output Only] A unique identifier for this instance group, generated by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#instanceGroup for instance groups.", + "default": "compute#instanceGroup" + }, + "name": { + "type": "string", + "description": "The name of the instance group. The name must be 1-63 characters long, and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { "required": [ "compute.instanceGroupManagers.insert" ] @@ -4817,6 +6827,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4848,6 +6927,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4959,7 +7107,7 @@ }, "serviceAccount": { "type": "string", - "description": "Service account will be used as credentials for all operations performed by managed instance group on instances. The service accounts needs all permissions required to create and delete instances. When not specified, the service account {projectNumber}@cloudservices.gserviceaccount.com will be used." + "description": "[Output Only] The service account to be used as credentials for all operations performed by the managed instance group on instances. The service accounts needs all permissions required to create and delete instances. By default, the service account {projectNumber}@cloudservices.gserviceaccount.com is used." }, "statefulPolicy": { "$ref": "InstanceGroupManagerStatefulPolicy", @@ -5162,6 +7310,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -5212,6 +7429,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -5448,6 +7734,75 @@ "nextPageToken": { "type": "string", "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -5655,6 +8010,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -5817,6 +8241,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -5848,6 +8341,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6025,6 +8587,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6244,6 +8875,13 @@ "type": "boolean", "description": "Administrative status of the interconnect. When this is set to ?true?, the Interconnect is functional and may carry traffic (assuming there are functional InterconnectAttachments and other requirements are satisfied). When set to ?false?, no packets will be carried over this Interconnect and no BGP routes will be exchanged over it. By default, it is set to ?true?." }, + "circuitInfos": { + "type": "array", + "description": "[Output Only] List of CircuitInfo objects, that describe the individual circuits in this LAG.", + "items": { + "$ref": "InterconnectCircuitInfo" + } + }, "connectionAuthorization": { "type": "string", "description": "[Output Only] URL to retrieve the Letter Of Authority and Customer Facility Assignment (LOA-CFA) documentation relating to this Interconnect. This documentation authorizes the facility provider to connect to the specified crossconnect ports." @@ -6290,9 +8928,11 @@ "interconnectType": { "type": "string", "enum": [ + "IT_PARTNER", "IT_PRIVATE" ], "enumDescriptions": [ + "", "" ] }, @@ -6363,7 +9003,7 @@ "InterconnectAttachment": { "id": "InterconnectAttachment", "type": "object", - "description": "Protocol definitions for Mixer API to support InterconnectAttachment. Next available tag: 14", + "description": "Protocol definitions for Mixer API to support InterconnectAttachment. Next available tag: 18", "properties": { "cloudRouterIpAddress": { "type": "string", @@ -6462,6 +9102,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6493,6 +9202,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6590,6 +9368,25 @@ } } }, + "InterconnectCircuitInfo": { + "id": "InterconnectCircuitInfo", + "type": "object", + "description": "Describes a single physical circuit between the Customer and Google. CircuitInfo objects are created by Google, so all fields are output only. Next id: 4", + "properties": { + "customerDemarcId": { + "type": "string", + "description": "Customer-side demarc ID for this circuit. This will only be set if it was provided by the Customer to Google during circuit turn-up." + }, + "googleCircuitId": { + "type": "string", + "description": "Google-assigned unique ID for this circuit. Assigned at circuit turn-up." + }, + "googleDemarcId": { + "type": "string", + "description": "Google-side demarc ID for this circuit. Assigned at circuit turn-up and provided by Google to the customer in the LOA." + } + } + }, "InterconnectList": { "id": "InterconnectList", "type": "object", @@ -6618,6 +9415,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6731,6 +9597,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6756,6 +9691,10 @@ "" ] }, + "region": { + "type": "string", + "description": "URL for the region of this location." + }, "regionKey": { "type": "string", "description": "Scope key for the region of this location." @@ -6873,6 +9812,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6931,6 +9939,87 @@ } } }, + "LicenseCode": { + "id": "LicenseCode", + "type": "object", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "[Output Only] Description of this License Code." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#licenseCode for licenses.", + "default": "compute#licenseCode" + }, + "licenseAlias": { + "type": "array", + "description": "[Output Only] URL and description aliases of Licenses with the same License Code.", + "items": { + "$ref": "LicenseCodeLicenseAlias" + } + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource. The name is 1-20 characters long and must be a valid 64 bit integer.", + "pattern": "[0-9]{0,20}?", + "annotations": { + "required": [ + "compute.licenses.insert" + ] + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "state": { + "type": "string", + "description": "[Output Only] Current state of this License Code.", + "enum": [ + "DISABLED", + "ENABLED", + "RESTRICTED", + "STATE_UNSPECIFIED", + "TERMINATED" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ] + }, + "transferable": { + "type": "boolean", + "description": "[Output Only] If true, the license will remain attached when creating images or snapshots from disks. Otherwise, the license is not transferred." + } + } + }, + "LicenseCodeLicenseAlias": { + "id": "LicenseCodeLicenseAlias", + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "[Output Only] Description of this License Code." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] URL of license corresponding to this License Code." + } + } + }, "LicenseResourceRequirements": { "id": "LicenseResourceRequirements", "type": "object", @@ -6969,6 +10058,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6984,6 +10142,10 @@ "counter": { "$ref": "LogConfigCounterOptions", "description": "Counter options." + }, + "dataAccess": { + "$ref": "LogConfigDataAccessOptions", + "description": "Data access options." } } }, @@ -7015,7 +10177,7 @@ "LogConfigCounterOptions": { "id": "LogConfigCounterOptions", "type": "object", - "description": "Options for counters", + "description": "Increment a streamz counter with the specified metric and field names.\n\nMetric names should start with a '/', generally be lowercase-only, and end in \"_count\". Field names should not contain an initial slash. The actual exported metric names will have \"/iam/policy\" prepended.\n\nField names correspond to IAM request parameters and field values are their respective values.\n\nAt present the only supported field names are - \"iam_principal\", corresponding to IAMContext.principal; - \"\" (empty string), resulting in one aggretated counter with no field.\n\nExamples: counter { metric: \"/debug_access_count\" field: \"iam_principal\" } ==\u003e increment counter /iam/policy/backend_debug_access_count {iam_principal=[value of IAMContext.principal]}\n\nAt this time we do not support: * multiple field names (though this may be supported in the future) * decrementing the counter * incrementing it by anything other than 1", "properties": { "field": { "type": "string", @@ -7027,6 +10189,25 @@ } } }, + "LogConfigDataAccessOptions": { + "id": "LogConfigDataAccessOptions", + "type": "object", + "description": "Write a Data Access (Gin) log", + "properties": { + "logMode": { + "type": "string", + "description": "Whether Gin logging should happen in a fail-closed manner at the caller. This is relevant only in the LocalIAM implementation, for now.", + "enum": [ + "LOG_FAIL_CLOSED", + "LOG_MODE_UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, "MachineType": { "id": "MachineType", "type": "object", @@ -7121,6 +10302,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -7152,6 +10402,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -7377,36 +10696,6 @@ "$ref": "ManagedInstanceOverrideDiskOverride" } }, - "metadata": { - "type": "array", - "description": "Metadata overrides defined for this instance.", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project.", - "pattern": "[a-zA-Z0-9-_]{1,128}", - "annotations": { - "required": [ - "compute.instances.insert", - "compute.projects.setCommonInstanceMetadata" - ] - } - }, - "value": { - "type": "string", - "description": "Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 262144 bytes (256 KiB).", - "annotations": { - "required": [ - "compute.instances.insert", - "compute.projects.setCommonInstanceMetadata" - ] - } - } - } - } - }, "origin": { "type": "string", "description": "[Output Only] Indicates where does the override come from.", @@ -7619,157 +10908,769 @@ } } }, - "NetworkInterface": { - "id": "NetworkInterface", + "NetworkEndpoint": { + "id": "NetworkEndpoint", "type": "object", - "description": "A network interface resource attached to an instance.", + "description": "The network endpoint.", "properties": { - "accessConfigs": { - "type": "array", - "description": "An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access.", - "items": { - "$ref": "AccessConfig" - } - }, - "aliasIpRanges": { - "type": "array", - "description": "An array of alias IP ranges for this network interface. Can only be specified for network interfaces on subnet-mode networks.", - "items": { - "$ref": "AliasIpRange" - } - }, - "fingerprint": { - "type": "string", - "description": "Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface.", - "format": "byte" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#networkInterface for network interfaces.", - "default": "compute#networkInterface" - }, - "name": { - "type": "string", - "description": "[Output Only] The name of the network interface, generated by the server. For network devices, these are eth0, eth1, etc." - }, - "network": { + "instance": { "type": "string", - "description": "URL of the network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used; if the network is not specified but the subnetwork is specified, the network is inferred.\n\nThis field is optional when creating a firewall rule. If not specified when creating a firewall rule, the default network global/networks/default is used.\n\nIf you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/global/networks/network \n- projects/project/global/networks/network \n- global/networks/default" + "description": "The name for a specific VM instance that the IP address belongs to. This is required for network endpoints of type GCE_VM_IP and GCE_VM_IP_PORT. The instance must be in the same zone of network endpoint group.\n\nThe name must be 1-63 characters long, and comply with RFC1035." }, - "networkIP": { + "ipAddress": { "type": "string", - "description": "An IPv4 internal network address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system." + "description": "Optional IPv4 address of network endpoint. The IP address must belong to a VM in GCE (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used." }, - "subnetwork": { - "type": "string", - "description": "The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not provide this property. If the network is in auto subnet mode, providing the subnetwork is optional. If the network is in custom subnet mode, then this field should be specified. If you specify this property, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork \n- regions/region/subnetworks/subnetwork" + "port": { + "type": "integer", + "description": "Optional port number of network endpoint. If not specified and the NetworkEndpointGroup.network_endpoint_type is GCE_IP_PORT, the defaultPort for the network endpoint group will be used.", + "format": "int32" } } }, - "NetworkList": { - "id": "NetworkList", + "NetworkEndpointGroup": { + "id": "NetworkEndpointGroup", "type": "object", - "description": "Contains a list of networks.", + "description": "Represents a collection of network endpoints.", "properties": { - "id": { + "creationTimestamp": { "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "array", - "description": "A list of Network resources.", - "items": { - "$ref": "Network" - } + "description": "[Output Only] Creation timestamp in RFC3339 text format." }, - "kind": { + "description": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#networkList for lists of networks.", - "default": "compute#networkList" + "description": "An optional description of this resource. Provide this property when you create the resource." }, - "nextPageToken": { + "id": { "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" }, - "selfLink": { + "kind": { "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "NetworkPeering": { - "id": "NetworkPeering", - "type": "object", - "description": "A network peering attached to a network resource. The message includes the peering name, peer network, peering state, and a flag indicating whether Google Compute Engine should automatically create routes for the peering.", - "properties": { - "autoCreateRoutes": { - "type": "boolean", - "description": "Whether full mesh connectivity is created and managed automatically. When it is set to true, Google Compute Engine will automatically create and manage the routes between two networks when the state is ACTIVE. Otherwise, user needs to create routes manually to route packets to peer network." + "description": "[Output Only] Type of the resource. Always compute#networkEndpointGroup for network endpoint group.", + "default": "compute#networkEndpointGroup" }, - "name": { - "type": "string", - "description": "Name of this peering. Provided by the client when the peering is created. The name must comply with RFC1035. Specifically, the name must be 1-63 characters long and match regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all the following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash." + "loadBalancer": { + "$ref": "NetworkEndpointGroupLbNetworkEndpointGroup", + "description": "This field is only valid when the network endpoint group type is LOAD_BALANCING." }, - "network": { + "name": { "type": "string", - "description": "The URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network." + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash." }, - "state": { + "networkEndpointType": { "type": "string", - "description": "[Output Only] State for the peering.", + "description": "Type of network endpoints in this network endpoint group. Only supported values for LOAD_BALANCING are GCE_VM_IP or GCE_VM_IP_PORT.", "enum": [ - "ACTIVE", - "INACTIVE" + "GCE_VM_IP_PORT" ], "enumDescriptions": [ - "", "" ] }, - "stateDetails": { + "selfLink": { "type": "string", - "description": "[Output Only] Details about the current state of the peering." - } - } - }, - "NetworkRoutingConfig": { - "id": "NetworkRoutingConfig", - "type": "object", - "description": "A routing configuration attached to a network resource. The message includes the list of routers associated with the network, and a flag indicating the type of routing behavior to enforce network-wide.", - "properties": { - "routingMode": { + "description": "[Output Only] Server-defined URL for the resource." + }, + "size": { + "type": "integer", + "description": "[Output only] Number of network endpoints in the network endpoint group.", + "format": "int32" + }, + "type": { "type": "string", - "description": "The network-wide routing mode to use. If set to REGIONAL, this network's cloud routers will only advertise routes with subnetworks of this network in the same region as the router. If set to GLOBAL, this network's cloud routers will advertise routes with all subnetworks of this network, across regions.", + "description": "Specify the type of this network endpoint group. Only LOAD_BALANCING is valid for now.", "enum": [ - "GLOBAL", - "REGIONAL" + "LOAD_BALANCING" ], "enumDescriptions": [ - "", "" ] } } }, - "NetworksAddPeeringRequest": { - "id": "NetworksAddPeeringRequest", + "NetworkEndpointGroupAggregatedList": { + "id": "NetworkEndpointGroupAggregatedList", "type": "object", "properties": { - "autoCreateRoutes": { - "type": "boolean", - "description": "Whether Google Compute Engine manages the routes automatically." + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." }, - "name": { + "items": { + "type": "object", + "description": "A list of NetworkEndpointGroupsScopedList resources.", + "additionalProperties": { + "$ref": "NetworkEndpointGroupsScopedList", + "description": "The name of the scope that contains this set of network endpoint groups." + } + }, + "kind": { "type": "string", - "description": "Name of the peering, which should conform to RFC1035." + "description": "[Output Only] The resource type, which is always compute#networkEndpointGroupAggregatedList for aggregated lists of network endpoint groups.", + "default": "compute#networkEndpointGroupAggregatedList" }, - "peerNetwork": { + "nextPageToken": { "type": "string", - "description": "URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network." - } - } - }, - "NetworksRemovePeeringRequest": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "NetworkEndpointGroupLbNetworkEndpointGroup": { + "id": "NetworkEndpointGroupLbNetworkEndpointGroup", + "type": "object", + "description": "Load balancing specific fields for network endpoint group of type LOAD_BALANCING.", + "properties": { + "defaultPort": { + "type": "integer", + "description": "The default port used if the port number is not specified in the network endpoint. If the network endpoint type is GCE_VM_IP, this field must not be specified.", + "format": "int32" + }, + "network": { + "type": "string", + "description": "The URL of the network to which all network endpoints in the NEG belong. Uses \"default\" project network if unspecified." + }, + "subnetwork": { + "type": "string", + "description": "Optional URL of the subnetwork to which all network endpoints in the NEG belong." + }, + "zone": { + "type": "string", + "description": "[Output Only] The URL of the zone where the network endpoint group is located." + } + } + }, + "NetworkEndpointGroupList": { + "id": "NetworkEndpointGroupList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of NetworkEndpointGroup resources.", + "items": { + "$ref": "NetworkEndpointGroup" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#networkEndpointGroupList for network endpoint group lists.", + "default": "compute#networkEndpointGroupList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "NetworkEndpointGroupsAttachEndpointsRequest": { + "id": "NetworkEndpointGroupsAttachEndpointsRequest", + "type": "object", + "properties": { + "networkEndpoints": { + "type": "array", + "description": "The list of network endpoints to be attached.", + "items": { + "$ref": "NetworkEndpoint" + } + } + } + }, + "NetworkEndpointGroupsDetachEndpointsRequest": { + "id": "NetworkEndpointGroupsDetachEndpointsRequest", + "type": "object", + "properties": { + "networkEndpoints": { + "type": "array", + "description": "The list of network endpoints to be detached.", + "items": { + "$ref": "NetworkEndpoint" + } + } + } + }, + "NetworkEndpointGroupsListEndpointsRequest": { + "id": "NetworkEndpointGroupsListEndpointsRequest", + "type": "object", + "properties": { + "healthStatus": { + "type": "string", + "description": "Optional query parameter for showing the health status of each network endpoint. Valid options are SKIP or SHOW. If you don't specifiy this parameter, the health status of network endpoints will not be provided.", + "enum": [ + "SHOW", + "SKIP" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "NetworkEndpointGroupsListNetworkEndpoints": { + "id": "NetworkEndpointGroupsListNetworkEndpoints", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of NetworkEndpointWithHealthStatus resources.", + "items": { + "$ref": "NetworkEndpointWithHealthStatus" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] The resource type, which is always compute#networkEndpointGroupsListNetworkEndpoints for the list of network endpoints in the specified network endpoint group.", + "default": "compute#networkEndpointGroupsListNetworkEndpoints" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "NetworkEndpointGroupsScopedList": { + "id": "NetworkEndpointGroupsScopedList", + "type": "object", + "properties": { + "networkEndpointGroups": { + "type": "array", + "description": "[Output Only] The list of network endpoint groups that are contained in this scope.", + "items": { + "$ref": "NetworkEndpointGroup" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] An informational warning that replaces the list of network endpoint groups when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "NetworkEndpointWithHealthStatus": { + "id": "NetworkEndpointWithHealthStatus", + "type": "object", + "properties": { + "healths": { + "type": "array", + "description": "[Output only] The health status of network endpoint;", + "items": { + "$ref": "HealthStatusForNetworkEndpoint" + } + }, + "networkEndpoint": { + "$ref": "NetworkEndpoint", + "description": "[Output only] The network endpoint;" + } + } + }, + "NetworkInterface": { + "id": "NetworkInterface", + "type": "object", + "description": "A network interface resource attached to an instance.", + "properties": { + "accessConfigs": { + "type": "array", + "description": "An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access.", + "items": { + "$ref": "AccessConfig" + } + }, + "aliasIpRanges": { + "type": "array", + "description": "An array of alias IP ranges for this network interface. Can only be specified for network interfaces on subnet-mode networks.", + "items": { + "$ref": "AliasIpRange" + } + }, + "fingerprint": { + "type": "string", + "description": "Fingerprint hash of contents stored in this network interface. This field will be ignored when inserting an Instance or adding a NetworkInterface. An up-to-date fingerprint must be provided in order to update the NetworkInterface.", + "format": "byte" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#networkInterface for network interfaces.", + "default": "compute#networkInterface" + }, + "name": { + "type": "string", + "description": "[Output Only] The name of the network interface, generated by the server. For network devices, these are eth0, eth1, etc." + }, + "network": { + "type": "string", + "description": "URL of the network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used; if the network is not specified but the subnetwork is specified, the network is inferred.\n\nThis field is optional when creating a firewall rule. If not specified when creating a firewall rule, the default network global/networks/default is used.\n\nIf you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/global/networks/network \n- projects/project/global/networks/network \n- global/networks/default" + }, + "networkIP": { + "type": "string", + "description": "An IPv4 internal network address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system." + }, + "subnetwork": { + "type": "string", + "description": "The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not provide this property. If the network is in auto subnet mode, providing the subnetwork is optional. If the network is in custom subnet mode, then this field should be specified. If you specify this property, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork \n- regions/region/subnetworks/subnetwork" + } + } + }, + "NetworkList": { + "id": "NetworkList", + "type": "object", + "description": "Contains a list of networks.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of Network resources.", + "items": { + "$ref": "Network" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#networkList for lists of networks.", + "default": "compute#networkList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "NetworkPeering": { + "id": "NetworkPeering", + "type": "object", + "description": "A network peering attached to a network resource. The message includes the peering name, peer network, peering state, and a flag indicating whether Google Compute Engine should automatically create routes for the peering.", + "properties": { + "autoCreateRoutes": { + "type": "boolean", + "description": "Whether full mesh connectivity is created and managed automatically. When it is set to true, Google Compute Engine will automatically create and manage the routes between two networks when the state is ACTIVE. Otherwise, user needs to create routes manually to route packets to peer network." + }, + "name": { + "type": "string", + "description": "Name of this peering. Provided by the client when the peering is created. The name must comply with RFC1035. Specifically, the name must be 1-63 characters long and match regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all the following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash." + }, + "network": { + "type": "string", + "description": "The URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network." + }, + "state": { + "type": "string", + "description": "[Output Only] State for the peering.", + "enum": [ + "ACTIVE", + "INACTIVE" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "stateDetails": { + "type": "string", + "description": "[Output Only] Details about the current state of the peering." + } + } + }, + "NetworkRoutingConfig": { + "id": "NetworkRoutingConfig", + "type": "object", + "description": "A routing configuration attached to a network resource. The message includes the list of routers associated with the network, and a flag indicating the type of routing behavior to enforce network-wide.", + "properties": { + "routingMode": { + "type": "string", + "description": "The network-wide routing mode to use. If set to REGIONAL, this network's cloud routers will only advertise routes with subnetworks of this network in the same region as the router. If set to GLOBAL, this network's cloud routers will advertise routes with all subnetworks of this network, across regions.", + "enum": [ + "GLOBAL", + "REGIONAL" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "NetworksAddPeeringRequest": { + "id": "NetworksAddPeeringRequest", + "type": "object", + "properties": { + "autoCreateRoutes": { + "type": "boolean", + "description": "Whether Google Compute Engine manages the routes automatically." + }, + "name": { + "type": "string", + "description": "Name of the peering, which should conform to RFC1035." + }, + "peerNetwork": { + "type": "string", + "description": "URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network." + } + } + }, + "NetworksRemovePeeringRequest": { "id": "NetworksRemovePeeringRequest", "type": "object", "properties": { @@ -8012,6 +11913,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8043,6 +12013,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8240,6 +12279,20 @@ "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." }, + "defaultNetworkTier": { + "type": "string", + "description": "This signifies the default network tier used for configuring resources of the project and can only take the following values: PREMIUM, STANDARD. Initially the default network tier is PREMIUM.", + "enum": [ + "PREMIUM", + "SELECT", + "STANDARD" + ], + "enumDescriptions": [ + "", + "", + "" + ] + }, "defaultServiceAccount": { "type": "string", "description": "[Output Only] Default service account used by VMs running in this project." @@ -8333,7 +12386,7 @@ }, "resources": { "type": "array", - "description": "Serive resources (a.k.a service projects) attached to this project as their shared VPC host.", + "description": "Service resources (a.k.a service projects) attached to this project as their shared VPC host.", "items": { "$ref": "XpnResourceId" } @@ -8350,6 +12403,26 @@ } } }, + "ProjectsSetDefaultNetworkTierRequest": { + "id": "ProjectsSetDefaultNetworkTierRequest", + "type": "object", + "properties": { + "networkTier": { + "type": "string", + "description": "Default network tier to be set.", + "enum": [ + "PREMIUM", + "SELECT", + "STANDARD" + ], + "enumDescriptions": [ + "", + "", + "" + ] + } + } + }, "ProjectsSetDefaultServiceAccountRequest": { "id": "ProjectsSetDefaultServiceAccountRequest", "type": "object", @@ -8390,6 +12463,7 @@ "INSTANCE_GROUPS", "INSTANCE_GROUP_MANAGERS", "INSTANCE_TEMPLATES", + "INTERCONNECTS", "IN_USE_ADDRESSES", "LOCAL_SSD_TOTAL_GB", "NETWORKS", @@ -8461,6 +12535,7 @@ "", "", "", + "", "" ] }, @@ -8573,19 +12648,88 @@ "items": { "$ref": "Autoscaler" } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#regionAutoscalerList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#regionAutoscalerList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8616,6 +12760,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8658,6 +12871,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8703,6 +12985,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8806,6 +13157,75 @@ "nextPageToken": { "type": "string", "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8906,6 +13326,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8978,6 +13467,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -9240,6 +13798,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -9294,6 +13921,13 @@ ] } }, + "nats": { + "type": "array", + "description": "List of Nat services created in this router. The maximum number of Nat services within a Router is 3 for Alpha.", + "items": { + "$ref": "RouterNat" + } + }, "network": { "type": "string", "description": "URI of the network to which this router belongs.", @@ -9357,6 +13991,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -9519,6 +14222,169 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "RouterNat": { + "id": "RouterNat", + "type": "object", + "description": "Represents a Nat resource. It enables the VMs within the specified subnetworks to access Internet without external IP addresses. It specifies a list of subnetworks (and the ranges within) that want to use NAT. Customers can also provide the external IPs that would be used for NAT. GCP would auto-allocate ephemeral IPs if no external IPs are provided.", + "properties": { + "autoAllocatedNatIps": { + "type": "array", + "description": "[Output Only] List of IPs allocated automatically by GCP for this Nat service. They will be raw IP strings like \"179.12.26.133\". They are ephemeral IPs allocated from the IP blocks managed by the NAT manager. This list can grow and shrink based on the number of VMs configured to use NAT.", + "items": { + "type": "string" + } + }, + "name": { + "type": "string", + "description": "Unique name of this Nat service. The name must be 1-63 characters long and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "natIpAllocateOption": { + "type": "string", + "description": "Specify the NatIpAllocateOption. If it is AUTO_ONLY, then nat_ip should be empty.", + "enum": [ + "AUTO_ONLY", + "MANUAL_ONLY" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "natIps": { + "type": "array", + "description": "List of URLs of the IP resources used for this Nat service. These IPs must be valid static external IP addresses assigned to the project. max_length is subject to change post alpha.", + "items": { + "type": "string" + } + }, + "sourceSubnetworkIpRangesToNat": { + "type": "string", + "description": "Specify the Nat option. If this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region.", + "enum": [ + "ALL_SUBNETWORKS_ALL_IP_RANGES", + "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES", + "LIST_OF_SUBNETWORKS" + ], + "enumDescriptions": [ + "", + "", + "" + ] + }, + "subnetworks": { + "type": "array", + "description": "List of Subnetwork resources whose traffic should be translated by NAT Gateway. It is used only when LIST_OF_SUBNETWORKS is selected for the SubnetworkIpRangeToNatOption above.", + "items": { + "$ref": "RouterNatSubnetworkToNat" + } + } + } + }, + "RouterNatSubnetworkToNat": { + "id": "RouterNatSubnetworkToNat", + "type": "object", + "description": "Defines the IP ranges that want to use NAT for a subnetwork.", + "properties": { + "name": { + "type": "string", + "description": "URL for the subnetwork resource to use NAT." + }, + "secondaryIpRangeNames": { + "type": "array", + "description": "List of the secondary ranges of the Subnetwork that are allowed to use NAT. This can be populated only if \"LIST_OF_SECONDARY_IP_RANGES\" is one of the values in source_ip_ranges_to_nat.", + "items": { + "type": "string" + } + }, + "sourceIpRangesToNats": { + "type": "array", + "description": "Specify the options for NAT ranges in the Subnetwork. All usages of single value are valid except NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple values is: [\"PRIMARY_IP_RANGE\", \"LIST_OF_SECONDARY_IP_RANGES\"] Default: [ALL_IP_RANGES]", + "items": { + "type": "string", + "enum": [ + "ALL_IP_RANGES", + "LIST_OF_SECONDARY_IP_RANGES", + "PRIMARY_IP_RANGE" + ], + "enumDescriptions": [ + "", + "", + "" + ] + } } } }, @@ -9546,6 +14412,12 @@ "$ref": "RouterStatusBgpPeerStatus" } }, + "natStatus": { + "type": "array", + "items": { + "$ref": "RouterStatusNatStatus" + } + }, "network": { "type": "string", "description": "URI of the network to which this router belongs." @@ -9612,6 +14484,48 @@ } } }, + "RouterStatusNatStatus": { + "id": "RouterStatusNatStatus", + "type": "object", + "description": "Status of a NAT contained in this router.", + "properties": { + "autoAllocatedNatIps": { + "type": "array", + "description": "List of IPs auto-allocated for NAT. Example: [\"1.1.1.1\", \"129.2.16.89\"]", + "items": { + "type": "string" + } + }, + "minExtraNatIpsNeeded": { + "type": "integer", + "description": "The number of extra IPs to allocate. This will be greater than 0 only if user-specified IPs are NOT enough to allow all configured VMs to use NAT. This value is meaningful only when auto-allocation of NAT IPs is *not* used.", + "format": "int32" + }, + "name": { + "type": "string", + "description": "Unique name of this NAT." + }, + "numVmEndpointsWithNatMappings": { + "type": "integer", + "description": "Number of VM endpoints (i.e., Nics) that can use NAT.", + "format": "int32" + }, + "userAllocatedNatIpResources": { + "type": "array", + "description": "List of fully qualified URLs of reserved IP address resources.", + "items": { + "type": "string" + } + }, + "userAllocatedNatIps": { + "type": "array", + "description": "List of IPs user-allocated for NAT. They will be raw IP strings like \"179.12.26.133\".", + "items": { + "type": "string" + } + } + } + }, "RouterStatusResponse": { "id": "RouterStatusResponse", "type": "object", @@ -9797,6 +14711,20 @@ "type": "string", "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." }, + "portSpecification": { + "type": "string", + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, SSL health check follows behavior specified in\nport\nand\nportName\nfields.", + "enum": [ + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" + ], + "enumDescriptions": [ + "", + "", + "" + ] + }, "proxyHeader": { "type": "string", "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", @@ -9846,32 +14774,6 @@ } } }, - "SecurityPoliciesList": { - "id": "SecurityPoliciesList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "array", - "description": "A list of SecurityPolicy resources.", - "items": { - "$ref": "SecurityPolicy" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#securityPoliciesList for listsof securityPolicies", - "default": "compute#securityPoliciesList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - } - } - }, "SecurityPolicy": { "id": "SecurityPolicy", "type": "object", @@ -9911,10 +14813,105 @@ "items": { "$ref": "SecurityPolicyRule" } - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "SecurityPolicyList": { + "id": "SecurityPolicyList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of SecurityPolicy resources.", + "items": { + "$ref": "SecurityPolicy" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#securityPolicyList for listsof securityPolicies", + "default": "compute#securityPolicyList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -9942,7 +14939,7 @@ }, "kind": { "type": "string", - "description": "[Output only] Type of the resource. Always compute#rulefor security policies", + "description": "[Output only] Type of the resource. Always compute#securityPolicyRule for security policy rules", "default": "compute#securityPolicyRule" }, "match": { @@ -9965,6 +14962,10 @@ "type": "object", "description": "Represents a match condition that incoming traffic is evaluated against. Exactly one field must be specified.", "properties": { + "config": { + "$ref": "SecurityPolicyRuleMatcherConfig", + "description": "The configuration options available when specifying versioned_expr. This field must be specified if versioned_expr is specified and cannot be specified if versioned_expr is not specified." + }, "srcIpRanges": { "type": "array", "description": "CIDR IP address range. Only IPv4 is supported.", @@ -9978,6 +14979,31 @@ "items": { "type": "string" } + }, + "versionedExpr": { + "type": "string", + "description": "Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config.", + "enum": [ + "SRC_IPS_V1", + "VERSIONED_EXPR_UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "SecurityPolicyRuleMatcherConfig": { + "id": "SecurityPolicyRuleMatcherConfig", + "type": "object", + "properties": { + "srcIpRanges": { + "type": "array", + "description": "CIDR IP address range. Only IPv4 is supported.", + "items": { + "type": "string" + } } } }, @@ -10177,92 +15203,508 @@ "items": { "$ref": "Snapshot" } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#snapshotList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "SslCertificate": { - "id": "SslCertificate", - "type": "object", - "description": "An SslCertificate resource. This resource provides a mechanism to upload an SSL key and certificate to the load balancer to serve secure connections from the user.", - "properties": { - "certificate": { - "type": "string", - "description": "A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert." - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#sslCertificate for SSL certificates.", - "default": "compute#sslCertificate" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "privateKey": { - "type": "string", - "description": "A write-only private key in PEM format. Only insert requests will include this field." - }, - "selfLink": { - "type": "string", - "description": "[Output only] Server-defined URL for the resource." + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#snapshotList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "SslCertificate": { + "id": "SslCertificate", + "type": "object", + "description": "An SslCertificate resource. This resource provides a mechanism to upload an SSL key and certificate to the load balancer to serve secure connections from the user.", + "properties": { + "certificate": { + "type": "string", + "description": "A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#sslCertificate for SSL certificates.", + "default": "compute#sslCertificate" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "privateKey": { + "type": "string", + "description": "A write-only private key in PEM format. Only insert requests will include this field." + }, + "selfLink": { + "type": "string", + "description": "[Output only] Server-defined URL for the resource." + } + } + }, + "SslCertificateList": { + "id": "SslCertificateList", + "type": "object", + "description": "Contains a list of SslCertificate resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of SslCertificate resources.", + "items": { + "$ref": "SslCertificate" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#sslCertificateList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "SslPoliciesList": { + "id": "SslPoliciesList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of SslPolicy resources.", + "items": { + "$ref": "SslPolicy" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#sslPoliciesList for lists of sslPolicies.", + "default": "compute#sslPoliciesList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "SslPoliciesListAvailableFeaturesResponse": { + "id": "SslPoliciesListAvailableFeaturesResponse", + "type": "object", + "properties": { + "features": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "SslPolicy": { + "id": "SslPolicy", + "type": "object", + "description": "A SSL policy specifies the server-side support for SSL features. This can be attached to a TargetHttpsProxy or a TargetSslProxy. This affects connections between clients and the HTTPS or SSL proxy load balancer. They do not affect the connection between the load balancers and the backends.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "customFeatures": { + "type": "array", + "description": "List of features enabled when the selected profile is CUSTOM. The\n- method returns the set of features that can be specified in this list. This field must be empty if the profile is not CUSTOM.", + "items": { + "type": "string" + } + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "enabledFeatures": { + "type": "array", + "description": "[Output Only] The list of features enabled in the SSL policy.", + "items": { + "type": "string" + } + }, + "fingerprint": { + "type": "string", + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a SslPolicy. An up-to-date fingerprint must be provided in order to update the SslPolicy.", + "format": "byte" + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output only] Type of the resource. Always compute#sslPolicyfor SSL policies.", + "default": "compute#sslPolicy" + }, + "minTlsVersion": { + "type": "string", + "description": "The minimum version of SSL protocol that can be used by the clients to establish a connection with the load balancer. This can be one of TLS_1_0, TLS_1_1, TLS_1_2, TLS_1_3.", + "enum": [ + "TLS_1_0", + "TLS_1_1", + "TLS_1_2", + "TLS_1_3" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "name": { + "type": "string", + "description": "Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "profile": { + "type": "string", + "description": "Profile specifies the set of SSL features that can be used by the load balancer when negotiating SSL with clients. This can be one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, the set of SSL features to enable must be specified in the customFeatures field.", + "enum": [ + "COMPATIBLE", + "CUSTOM", + "MODERN", + "RESTRICTED" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "warnings": { + "type": "array", + "description": "[Output Only] If potential misconfigurations are detected for this SSL policy, this field will be populated with warning messages.", + "items": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } } } }, - "SslCertificateList": { - "id": "SslCertificateList", + "SslPolicyReference": { + "id": "SslPolicyReference", "type": "object", - "description": "Contains a list of SslCertificate resources.", "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "array", - "description": "A list of SslCertificate resources.", - "items": { - "$ref": "SslCertificate" - } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#sslCertificateList" - }, - "nextPageToken": { + "sslPolicy": { "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." + "description": "URL of the SSL policy resource. Set this to empty string to clear any existing SSL policy associated with the target proxy resource." } } }, @@ -10364,6 +15806,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -10395,6 +15906,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -10527,6 +16107,20 @@ "type": "string", "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." }, + "portSpecification": { + "type": "string", + "description": "Specifies how port is selected for health checking, can be one of following values:\nUSE_FIXED_PORT: The port number in\nport\nis used for health checking.\nUSE_NAMED_PORT: The\nportName\nis used for health checking.\nUSE_SERVING_PORT: For NetworkEndpointGroup, the port specified for each network endpoint is used for health checking. For other backends, the port or named port specified in the Backend Service is used for health checking.\n\n\nIf not specified, TCP health check follows behavior specified in\nport\nand\nportName\nfields.", + "enum": [ + "USE_FIXED_PORT", + "USE_NAMED_PORT", + "USE_SERVING_PORT" + ], + "enumDescriptions": [ + "", + "", + "" + ] + }, "proxyHeader": { "type": "string", "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", @@ -10634,6 +16228,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -10727,6 +16390,10 @@ "type": "string" } }, + "sslPolicy": { + "type": "string", + "description": "URL of SslPolicy resource that will be associated with the TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource will not have any SSL policy configured." + }, "urlMap": { "type": "string", "description": "A fully-qualified or valid partial URL to the UrlMap resource that defines the mapping from URL to the BackendService. For example, the following are all valid URLs for specifying a URL map: \n- https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map \n- projects/project/global/urlMaps/url-map \n- global/urlMaps/url-map" @@ -10761,6 +16428,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -10844,6 +16580,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -10875,6 +16680,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -11067,6 +16941,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -11115,6 +17058,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -11360,6 +17372,10 @@ "items": { "type": "string" } + }, + "sslPolicy": { + "type": "string", + "description": "URL of SslPolicy resource that will be associated with the TargetSslProxy resource. If not set, the TargetSslProxy resource will not have any SSL policy configured." } } }, @@ -11391,6 +17407,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -11500,6 +17585,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -11533,6 +17687,18 @@ "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", "default": "compute#targetVpnGateway" }, + "labelFingerprint": { + "type": "string", + "description": "A fingerprint for the labels being applied to this TargetVpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels.\n\nTo see the latest fingerprint, make a get() request to retrieve an TargetVpnGateway.", + "format": "byte" + }, + "labels": { + "type": "object", + "description": "Labels to apply to this TargetVpnGateway resource. These can be later modified by the setLabels method. Each label key/value must comply with RFC1035. Label values may be empty.", + "additionalProperties": { + "type": "string" + } + }, "name": { "type": "string", "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", @@ -11613,6 +17779,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -11644,6 +17879,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -11888,6 +18192,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -11969,6 +18342,116 @@ } } }, + "UsableSubnetwork": { + "id": "UsableSubnetwork", + "type": "object", + "description": "Subnetwork which the current user has compute.subnetworks.use permission on.", + "properties": { + "subnetwork": { + "type": "string", + "description": "Subnetwork URL." + } + } + }, + "UsableSubnetworksAggregatedList": { + "id": "UsableSubnetworksAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output] A list of usable subnetwork URLs.", + "items": { + "$ref": "UsableSubnetwork" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#usableSubnetworksAggregatedList for aggregated lists of usable subnetworks.", + "default": "compute#usableSubnetworksAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, "UsageExportLocation": { "id": "UsageExportLocation", "type": "object", @@ -12146,6 +18629,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -12177,6 +18729,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -12289,6 +18910,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -12403,6 +19093,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -13192,8 +19951,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "testIamPermissions": { @@ -13465,7 +20223,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", "location": "path" } }, @@ -13602,8 +20360,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "setIamPolicy": { @@ -13623,7 +20380,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", "location": "path" } }, @@ -13659,7 +20416,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -14085,8 +20842,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "setSecurityPolicy": { @@ -14146,7 +20902,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -14228,7 +20984,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -15121,8 +21877,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "testIamPermissions": { @@ -15142,7 +21897,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -15294,86 +22049,192 @@ "region", "forwardingRule" ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.forwardingRules.get", - "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", - "httpMethod": "GET", - "description": "Returns the specified ForwardingRule resource.", - "parameters": { - "forwardingRule": { - "type": "string", - "description": "Name of the ForwardingRule resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region", - "forwardingRule" - ], - "response": { - "$ref": "ForwardingRule" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.forwardingRules.insert", - "path": "{project}/regions/{region}/forwardingRules", - "httpMethod": "POST", - "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" - } - }, - "parameterOrder": [ - "project", - "region" - ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.forwardingRules.get", + "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + "httpMethod": "GET", + "description": "Returns the specified ForwardingRule resource.", + "parameters": { + "forwardingRule": { + "type": "string", + "description": "Name of the ForwardingRule resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "forwardingRule" + ], + "response": { + "$ref": "ForwardingRule" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.forwardingRules.insert", + "path": "{project}/regions/{region}/forwardingRules", + "httpMethod": "POST", + "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "ForwardingRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.forwardingRules.list", + "path": "{project}/regions/{region}/forwardingRules", + "httpMethod": "GET", + "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "ForwardingRuleList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.forwardingRules.patch", + "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + "httpMethod": "PATCH", + "description": "Updates the specified forwarding rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. Currently, you can only patch the network_tier field.", + "parameters": { + "forwardingRule": { + "type": "string", + "description": "Name of the ForwardingRule resource to patch.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "region", + "forwardingRule" + ], "request": { "$ref": "ForwardingRule" }, @@ -15385,113 +22246,6 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "id": "compute.forwardingRules.list", - "path": "{project}/regions/{region}/forwardingRules", - "httpMethod": "GET", - "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "region" - ], - "response": { - "$ref": "ForwardingRuleList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "id": "compute.forwardingRules.patch", - "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", - "httpMethod": "PATCH", - "description": "Updates the specified forwarding rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. Currently, you can only patch the network_tier field.", - "parameters": { - "forwardingRule": { - "type": "string", - "description": "Name of the ForwardingRule resource to patch.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" - } - }, - "parameterOrder": [ - "project", - "region", - "forwardingRule" - ], - "request": { - "$ref": "ForwardingRule" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, "setLabels": { "id": "compute.forwardingRules.setLabels", "path": "{project}/regions/{region}/forwardingRules/{resource}/setLabels", @@ -15846,7 +22600,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -16062,8 +22816,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "setLabels": { @@ -16160,7 +22913,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -16542,8 +23295,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "testIamPermissions": { @@ -16563,7 +23315,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -17342,8 +24094,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "testIamPermissions": { @@ -17363,7 +24114,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -17620,8 +24371,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "testIamPermissions": { @@ -17641,7 +24391,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -17766,7 +24516,7 @@ }, "requestId": { "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and then the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" } }, @@ -17870,7 +24620,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", "location": "path" } }, @@ -17994,7 +24744,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", "location": "path" } }, @@ -18066,7 +24816,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -18663,8 +25413,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "recreateInstances": { @@ -19067,7 +25816,7 @@ }, "requestId": { "type": "string", - "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" }, "zone": { @@ -19753,7 +26502,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -20497,6 +27246,58 @@ "https://www.googleapis.com/auth/compute" ] }, + "setDeletionProtection": { + "id": "compute.instances.setDeletionProtection", + "path": "{project}/zones/{zone}/instances/{resource}/setDeletionProtection", + "httpMethod": "POST", + "description": "Sets deletion protection on the instance.", + "parameters": { + "deletionProtection": { + "type": "boolean", + "description": "Whether the resource should be protected against deletion.", + "default": "true", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "resource" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setDiskAutoDelete": { "id": "compute.instances.setDiskAutoDelete", "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", @@ -21670,6 +28471,50 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "setIamPolicy": { + "id": "compute.interconnectAttachments.setIamPolicy", + "path": "{project}/regions/{region}/interconnectAttachments/{resource}/setIamPolicy", + "httpMethod": "POST", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "resource" + ], + "request": { + "$ref": "Policy" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "testIamPermissions": { "id": "compute.interconnectAttachments.testIamPermissions", "path": "{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", @@ -21819,7 +28664,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -21915,6 +28760,40 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getIamPolicy": { + "id": "compute.interconnects.getIamPolicy", + "path": "{project}/global/interconnects/{resource}/getIamPolicy", + "httpMethod": "GET", + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "resource" + ], + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "insert": { "id": "compute.interconnects.insert", "path": "{project}/global/interconnects", @@ -22035,8 +28914,43 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" + ] + }, + "setIamPolicy": { + "id": "compute.interconnects.setIamPolicy", + "path": "{project}/global/interconnects/{resource}/setIamPolicy", + "httpMethod": "POST", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "resource" + ], + "request": { + "$ref": "Policy" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" ] }, "testIamPermissions": { @@ -22056,7 +28970,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -22078,6 +28992,44 @@ } } }, + "licenseCodes": { + "methods": { + "get": { + "id": "compute.licenseCodes.get", + "path": "{project}/global/licenseCodes/{licenseCode}", + "httpMethod": "GET", + "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code.", + "parameters": { + "licenseCode": { + "type": "string", + "description": "Number corresponding to the License code resource to return.", + "required": true, + "pattern": "[0-9]{0,61}?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "licenseCode" + ], + "response": { + "$ref": "LicenseCode" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, "licenses": { "methods": { "delete": { @@ -22099,6 +29051,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -22164,7 +29121,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", "location": "path" } }, @@ -22258,7 +29215,285 @@ "project" ], "response": { - "$ref": "LicensesListResponse" + "$ref": "LicensesListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setIamPolicy": { + "id": "compute.licenses.setIamPolicy", + "path": "{project}/global/licenses/{resource}/setIamPolicy", + "httpMethod": "POST", + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "resource" + ], + "request": { + "$ref": "Policy" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "id": "compute.licenses.testIamPermissions", + "path": "{project}/global/licenses/{resource}/testIamPermissions", + "httpMethod": "POST", + "description": "Returns permissions that a caller has on the specified resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "resource" + ], + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "machineTypes": { + "methods": { + "aggregatedList": { + "id": "compute.machineTypes.aggregatedList", + "path": "{project}/aggregated/machineTypes", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of machine types.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "MachineTypeAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "get": { + "id": "compute.machineTypes.get", + "path": "{project}/zones/{zone}/machineTypes/{machineType}", + "httpMethod": "GET", + "description": "Returns the specified machine type. Get a list of available machine types by making a list() request.", + "parameters": { + "machineType": { + "type": "string", + "description": "Name of the machine type to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "machineType" + ], + "response": { + "$ref": "MachineType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.machineTypes.list", + "path": "{project}/zones/{zone}/machineTypes", + "httpMethod": "GET", + "description": "Retrieves a list of machine types available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "MachineTypeList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "networkEndpointGroups": { + "methods": { + "aggregatedList": { + "id": "compute.networkEndpointGroups.aggregatedList", + "path": "{project}/aggregated/networkEndpointGroups", + "httpMethod": "GET", + "description": "Retrieves the list of network endpoint groups and sorts them by zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "NetworkEndpointGroupAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -22266,12 +29501,18 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setIamPolicy": { - "id": "compute.licenses.setIamPolicy", - "path": "{project}/global/licenses/{resource}/setIamPolicy", + "attachNetworkEndpoints": { + "id": "compute.networkEndpointGroups.attachNetworkEndpoints", + "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", "httpMethod": "POST", - "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "description": "Attach a list of network endpoints to the specified network endpoint group.", "parameters": { + "networkEndpointGroup": { + "type": "string", + "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", + "required": true, + "location": "path" + }, "project": { "type": "string", "description": "Project ID for this request.", @@ -22279,35 +29520,46 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "resource": { + "requestId": { "type": "string", - "description": "Name of the resource for this request.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", - "resource" + "zone", + "networkEndpointGroup" ], "request": { - "$ref": "Policy" + "$ref": "NetworkEndpointGroupsAttachEndpointsRequest" }, "response": { - "$ref": "Policy" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, - "testIamPermissions": { - "id": "compute.licenses.testIamPermissions", - "path": "{project}/global/licenses/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", + "delete": { + "id": "compute.networkEndpointGroups.delete", + "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + "httpMethod": "DELETE", + "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", "parameters": { + "networkEndpointGroup": { + "type": "string", + "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + "required": true, + "location": "path" + }, "project": { "type": "string", "description": "Project ID for this request.", @@ -22315,62 +29567,89 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "resource": { + "requestId": { "type": "string", - "description": "Name of the resource for this request.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", - "resource" + "zone", + "networkEndpointGroup" ], - "request": { - "$ref": "TestPermissionsRequest" - }, "response": { - "$ref": "TestPermissionsResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] - } - } - }, - "machineTypes": { - "methods": { - "aggregatedList": { - "id": "compute.machineTypes.aggregatedList", - "path": "{project}/aggregated/machineTypes", - "httpMethod": "GET", - "description": "Retrieves an aggregated list of machine types.", + }, + "detachNetworkEndpoints": { + "id": "compute.networkEndpointGroups.detachNetworkEndpoints", + "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + "httpMethod": "POST", + "description": "Detach a list of network endpoints from the specified network endpoint group.", "parameters": { - "filter": { + "networkEndpointGroup": { "type": "string", - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" + "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", + "required": true, + "location": "path" }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" }, - "orderBy": { + "requestId": { "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" }, - "pageToken": { + "zone": { "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" + "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "networkEndpointGroup" + ], + "request": { + "$ref": "NetworkEndpointGroupsDetachEndpointsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.networkEndpointGroups.get", + "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", + "httpMethod": "GET", + "description": "Returns the specified network endpoint group. Get a list of available network endpoint groups by making a list() request.", + "parameters": { + "networkEndpointGroup": { + "type": "string", + "description": "The name of the network endpoint group. It should comply with RFC1035.", + "required": true, + "location": "path" }, "project": { "type": "string", @@ -22378,13 +29657,21 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + "required": true, + "location": "path" } }, "parameterOrder": [ - "project" + "project", + "zone", + "networkEndpointGroup" ], "response": { - "$ref": "MachineTypeAggregatedList" + "$ref": "NetworkEndpointGroup" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -22392,18 +29679,74 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "get": { - "id": "compute.machineTypes.get", - "path": "{project}/zones/{zone}/machineTypes/{machineType}", - "httpMethod": "GET", - "description": "Returns the specified machine type. Get a list of available machine types by making a list() request.", + "insert": { + "id": "compute.networkEndpointGroups.insert", + "path": "{project}/zones/{zone}/networkEndpointGroups", + "httpMethod": "POST", + "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", "parameters": { - "machineType": { + "project": { "type": "string", - "description": "Name of the machine type to return.", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone where you want to create the network endpoint group. It should comply with RFC1035.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "request": { + "$ref": "NetworkEndpointGroup" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.networkEndpointGroups.list", + "path": "{project}/zones/{zone}/networkEndpointGroups", + "httpMethod": "GET", + "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" }, "project": { "type": "string", @@ -22414,19 +29757,17 @@ }, "zone": { "type": "string", - "description": "The name of the zone for this request.", + "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", - "zone", - "machineType" + "zone" ], "response": { - "$ref": "MachineType" + "$ref": "NetworkEndpointGroupList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -22434,11 +29775,11 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "list": { - "id": "compute.machineTypes.list", - "path": "{project}/zones/{zone}/machineTypes", - "httpMethod": "GET", - "description": "Retrieves a list of machine types available to the specified project.", + "listNetworkEndpoints": { + "id": "compute.networkEndpointGroups.listNetworkEndpoints", + "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + "httpMethod": "POST", + "description": "List the network endpoints in the specified network endpoint group.", "parameters": { "filter": { "type": "string", @@ -22453,6 +29794,12 @@ "minimum": "0", "location": "query" }, + "networkEndpointGroup": { + "type": "string", + "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", + "required": true, + "location": "path" + }, "orderBy": { "type": "string", "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", @@ -22470,6 +29817,50 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "zone": { + "type": "string", + "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "networkEndpointGroup" + ], + "request": { + "$ref": "NetworkEndpointGroupsListEndpointsRequest" + }, + "response": { + "$ref": "NetworkEndpointGroupsListNetworkEndpoints" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "testIamPermissions": { + "id": "compute.networkEndpointGroups.testIamPermissions", + "path": "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", + "httpMethod": "POST", + "description": "Returns permissions that a caller has on the specified resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -22480,10 +29871,14 @@ }, "parameterOrder": [ "project", - "zone" + "zone", + "resource" ], + "request": { + "$ref": "TestPermissionsRequest" + }, "response": { - "$ref": "MachineTypeList" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -22812,8 +30207,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "removePeering": { @@ -22912,7 +30306,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -23303,6 +30697,39 @@ "https://www.googleapis.com/auth/compute" ] }, + "setDefaultNetworkTier": { + "id": "compute.projects.setDefaultNetworkTier", + "path": "{project}/setDefaultNetworkTier", + "httpMethod": "POST", + "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "ProjectsSetDefaultNetworkTierRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setDefaultServiceAccount": { "id": "compute.projects.setDefaultServiceAccount", "path": "{project}/setDefaultServiceAccount", @@ -23590,7 +31017,7 @@ }, "requestId": { "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and then the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" } }, @@ -23606,8 +31033,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "testIamPermissions": { @@ -23683,7 +31109,7 @@ }, "requestId": { "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and then the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" } }, @@ -23982,8 +31408,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "testIamPermissions": { @@ -24587,7 +32012,7 @@ }, "requestId": { "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and then the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" }, "sourceImage": { @@ -25339,8 +32764,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "recreateInstances": { @@ -26531,8 +33955,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "preview": { @@ -26849,7 +34272,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -26926,7 +34349,7 @@ }, "securityPolicy": { "type": "string", - "description": "Name of the security policy to update.", + "description": "Name of the security policy to get.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -27019,7 +34442,7 @@ "project" ], "response": { - "$ref": "SecurityPoliciesList" + "$ref": "SecurityPolicyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -27065,8 +34488,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "testIamPermissions": { @@ -27086,7 +34508,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -27199,7 +34621,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", "location": "path" } }, @@ -27282,7 +34704,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", "location": "path" } }, @@ -27354,7 +34776,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -27549,7 +34971,289 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "resource" + ], + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "sslPolicies": { + "methods": { + "delete": { + "id": "compute.sslPolicies.delete", + "path": "{project}/global/sslPolicies/{sslPolicy}", + "httpMethod": "DELETE", + "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "sslPolicy": { + "type": "string", + "description": "Name of the SSL policy to delete. The name must be 1-63 characters long, and comply with RFC1035.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "sslPolicy" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.sslPolicies.get", + "path": "{project}/global/sslPolicies/{sslPolicy}", + "httpMethod": "GET", + "description": "List all of the ordered rules present in a single specified policy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "sslPolicy": { + "type": "string", + "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "sslPolicy" + ], + "response": { + "$ref": "SslPolicy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.sslPolicies.insert", + "path": "{project}/global/sslPolicies", + "httpMethod": "POST", + "description": "Returns the specified SSL policy resource. Get a list of available SSL policies by making a list() request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "SslPolicy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.sslPolicies.list", + "path": "{project}/global/sslPolicies", + "httpMethod": "GET", + "description": "List all the SSL policies that have been configured for the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "SslPoliciesList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "listAvailableFeatures": { + "id": "compute.sslPolicies.listAvailableFeatures", + "path": "{project}/global/sslPolicies/listAvailableFeatures", + "httpMethod": "GET", + "description": "Lists all features that can be specified in the SSL policy when using custom profile.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "SslPoliciesListAvailableFeaturesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.sslPolicies.patch", + "path": "{project}/global/sslPolicies/{sslPolicy}", + "httpMethod": "PATCH", + "description": "Patches the specified SSL policy with the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "sslPolicy": { + "type": "string", + "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "sslPolicy" + ], + "request": { + "$ref": "SslPolicy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "id": "compute.sslPolicies.testIamPermissions", + "path": "{project}/global/sslPolicies/{resource}/testIamPermissions", + "httpMethod": "POST", + "description": "Returns permissions that a caller has on the specified resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -27899,6 +35603,55 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "listUsable": { + "id": "compute.subnetworks.listUsable", + "path": "{project}/aggregated/subnetworks/listUsable", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of usable subnetworks.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "UsableSubnetworksAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "patch": { "id": "compute.subnetworks.patch", "path": "{project}/regions/{region}/subnetworks/{subnetwork}", @@ -27945,8 +35698,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "setIamPolicy": { @@ -28237,7 +35989,243 @@ "project" ], "response": { - "$ref": "TargetHttpProxyList" + "$ref": "TargetHttpProxyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setUrlMap": { + "id": "compute.targetHttpProxies.setUrlMap", + "path": "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + "httpMethod": "POST", + "description": "Changes the URL map for TargetHttpProxy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "targetHttpProxy": { + "type": "string", + "description": "Name of the TargetHttpProxy to set a URL map for.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetHttpProxy" + ], + "request": { + "$ref": "UrlMapReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "id": "compute.targetHttpProxies.testIamPermissions", + "path": "{project}/global/targetHttpProxies/{resource}/testIamPermissions", + "httpMethod": "POST", + "description": "Returns permissions that a caller has on the specified resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "resource" + ], + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "targetHttpsProxies": { + "methods": { + "delete": { + "id": "compute.targetHttpsProxies.delete", + "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", + "httpMethod": "DELETE", + "description": "Deletes the specified TargetHttpsProxy resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "targetHttpsProxy": { + "type": "string", + "description": "Name of the TargetHttpsProxy resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetHttpsProxy" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.targetHttpsProxies.get", + "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", + "httpMethod": "GET", + "description": "Returns the specified TargetHttpsProxy resource. Get a list of available target HTTPS proxies by making a list() request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "targetHttpsProxy": { + "type": "string", + "description": "Name of the TargetHttpsProxy resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetHttpsProxy" + ], + "response": { + "$ref": "TargetHttpsProxy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.targetHttpsProxies.insert", + "path": "{project}/global/targetHttpsProxies", + "httpMethod": "POST", + "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "TargetHttpsProxy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.targetHttpsProxies.list", + "path": "{project}/global/targetHttpsProxies", + "httpMethod": "GET", + "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "TargetHttpsProxyList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -28245,11 +36233,11 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setUrlMap": { - "id": "compute.targetHttpProxies.setUrlMap", - "path": "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap", + "setQuicOverride": { + "id": "compute.targetHttpsProxies.setQuicOverride", + "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setQuicOverride", "httpMethod": "POST", - "description": "Changes the URL map for TargetHttpProxy.", + "description": "Sets the QUIC override policy for TargetHttpsProxy.", "parameters": { "project": { "type": "string", @@ -28263,20 +36251,19 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" }, - "targetHttpProxy": { + "targetHttpsProxy": { "type": "string", - "description": "Name of the TargetHttpProxy to set a URL map for.", + "description": "Name of the TargetHttpsProxy resource to set the QUIC override policy for. The name should conform to RFC1035.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", - "targetHttpProxy" + "targetHttpsProxy" ], "request": { - "$ref": "UrlMapReference" + "$ref": "TargetHttpsProxiesSetQuicOverrideRequest" }, "response": { "$ref": "Operation" @@ -28286,52 +36273,11 @@ "https://www.googleapis.com/auth/compute" ] }, - "testIamPermissions": { - "id": "compute.targetHttpProxies.testIamPermissions", - "path": "{project}/global/targetHttpProxies/{resource}/testIamPermissions", + "setSslCertificates": { + "id": "compute.targetHttpsProxies.setSslCertificates", + "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - } - } - }, - "targetHttpsProxies": { - "methods": { - "delete": { - "id": "compute.targetHttpsProxies.delete", - "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", - "httpMethod": "DELETE", - "description": "Deletes the specified TargetHttpsProxy resource.", + "description": "Replaces SslCertificates for TargetHttpsProxy.", "parameters": { "project": { "type": "string", @@ -28347,40 +36293,7 @@ }, "targetHttpsProxy": { "type": "string", - "description": "Name of the TargetHttpsProxy resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "targetHttpsProxy" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "get": { - "id": "compute.targetHttpsProxies.get", - "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}", - "httpMethod": "GET", - "description": "Returns the specified TargetHttpsProxy resource. Get a list of available target HTTPS proxies by making a list() request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetHttpsProxy": { - "type": "string", - "description": "Name of the TargetHttpsProxy resource to return.", + "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -28390,123 +36303,8 @@ "project", "targetHttpsProxy" ], - "response": { - "$ref": "TargetHttpsProxy" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.targetHttpsProxies.insert", - "path": "{project}/global/targetHttpsProxies", - "httpMethod": "POST", - "description": "Creates a TargetHttpsProxy resource in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "TargetHttpsProxy" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "list": { - "id": "compute.targetHttpsProxies.list", - "path": "{project}/global/targetHttpsProxies", - "httpMethod": "GET", - "description": "Retrieves the list of TargetHttpsProxy resources available to the specified project.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "TargetHttpsProxyList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "setQuicOverride": { - "id": "compute.targetHttpsProxies.setQuicOverride", - "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setQuicOverride", - "httpMethod": "POST", - "description": "Sets the QUIC override policy for TargetHttpsProxy.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "targetHttpsProxy": { - "type": "string", - "description": "Name of the TargetHttpsProxy resource to set the QUIC override policy for. The name should conform to RFC1035.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "targetHttpsProxy" - ], "request": { - "$ref": "TargetHttpsProxiesSetQuicOverrideRequest" + "$ref": "TargetHttpsProxiesSetSslCertificatesRequest" }, "response": { "$ref": "Operation" @@ -28516,11 +36314,11 @@ "https://www.googleapis.com/auth/compute" ] }, - "setSslCertificates": { - "id": "compute.targetHttpsProxies.setSslCertificates", - "path": "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates", + "setSslPolicy": { + "id": "compute.targetHttpsProxies.setSslPolicy", + "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setSslPolicy", "httpMethod": "POST", - "description": "Replaces SslCertificates for TargetHttpsProxy.", + "description": "Sets the SSL policy for TargetHttpsProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the HTTPS proxy load balancer. They do not affect the connection between the load balancer and the backends.", "parameters": { "project": { "type": "string", @@ -28536,9 +36334,8 @@ }, "targetHttpsProxy": { "type": "string", - "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", + "description": "Name of the TargetHttpsProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, @@ -28547,7 +36344,7 @@ "targetHttpsProxy" ], "request": { - "$ref": "TargetHttpsProxiesSetSslCertificatesRequest" + "$ref": "SslPolicyReference" }, "response": { "$ref": "Operation" @@ -28615,7 +36412,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -29780,6 +37577,46 @@ "https://www.googleapis.com/auth/compute" ] }, + "setSslPolicy": { + "id": "compute.targetSslProxies.setSslPolicy", + "path": "{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", + "httpMethod": "POST", + "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "targetSslProxy": { + "type": "string", + "description": "Name of the TargetSslProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "targetSslProxy" + ], + "request": { + "$ref": "SslPolicyReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "testIamPermissions": { "id": "compute.targetSslProxies.testIamPermissions", "path": "{project}/global/targetSslProxies/{resource}/testIamPermissions", @@ -29797,7 +37634,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -30074,7 +37911,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -30615,8 +38452,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "testIamPermissions": { @@ -30636,7 +38472,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go index f44bc995f5e4..8badd5a9f9f3 100644 --- a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go @@ -97,8 +97,10 @@ func New(client *http.Client) (*Service, error) { s.InterconnectAttachments = NewInterconnectAttachmentsService(s) s.InterconnectLocations = NewInterconnectLocationsService(s) s.Interconnects = NewInterconnectsService(s) + s.LicenseCodes = NewLicenseCodesService(s) s.Licenses = NewLicensesService(s) s.MachineTypes = NewMachineTypesService(s) + s.NetworkEndpointGroups = NewNetworkEndpointGroupsService(s) s.Networks = NewNetworksService(s) s.Projects = NewProjectsService(s) s.RegionAutoscalers = NewRegionAutoscalersService(s) @@ -115,6 +117,7 @@ func New(client *http.Client) (*Service, error) { s.SecurityPolicies = NewSecurityPoliciesService(s) s.Snapshots = NewSnapshotsService(s) s.SslCertificates = NewSslCertificatesService(s) + s.SslPolicies = NewSslPoliciesService(s) s.Subnetworks = NewSubnetworksService(s) s.TargetHttpProxies = NewTargetHttpProxiesService(s) s.TargetHttpsProxies = NewTargetHttpsProxiesService(s) @@ -187,10 +190,14 @@ type Service struct { Interconnects *InterconnectsService + LicenseCodes *LicenseCodesService + Licenses *LicensesService MachineTypes *MachineTypesService + NetworkEndpointGroups *NetworkEndpointGroupsService + Networks *NetworksService Projects *ProjectsService @@ -223,6 +230,8 @@ type Service struct { SslCertificates *SslCertificatesService + SslPolicies *SslPoliciesService + Subnetworks *SubnetworksService TargetHttpProxies *TargetHttpProxiesService @@ -489,6 +498,15 @@ type InterconnectsService struct { s *Service } +func NewLicenseCodesService(s *Service) *LicenseCodesService { + rs := &LicenseCodesService{s: s} + return rs +} + +type LicenseCodesService struct { + s *Service +} + func NewLicensesService(s *Service) *LicensesService { rs := &LicensesService{s: s} return rs @@ -507,6 +525,15 @@ type MachineTypesService struct { s *Service } +func NewNetworkEndpointGroupsService(s *Service) *NetworkEndpointGroupsService { + rs := &NetworkEndpointGroupsService{s: s} + return rs +} + +type NetworkEndpointGroupsService struct { + s *Service +} + func NewNetworksService(s *Service) *NetworksService { rs := &NetworksService{s: s} return rs @@ -651,6 +678,15 @@ type SslCertificatesService struct { s *Service } +func NewSslPoliciesService(s *Service) *SslPoliciesService { + rs := &SslPoliciesService{s: s} + return rs +} + +type SslPoliciesService struct { + s *Service +} + func NewSubnetworksService(s *Service) *SubnetworksService { rs := &SubnetworksService{s: s} return rs @@ -883,6 +919,9 @@ type AcceleratorTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AcceleratorTypeAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -910,6 +949,102 @@ func (s *AcceleratorTypeAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AcceleratorTypeAggregatedListWarning: [Output Only] Informational +// warning message. +type AcceleratorTypeAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AcceleratorTypeAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod AcceleratorTypeAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AcceleratorTypeAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AcceleratorTypeAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AcceleratorTypeList: Contains a list of accelerator types. type AcceleratorTypeList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -934,6 +1069,9 @@ type AcceleratorTypeList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AcceleratorTypeListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -961,6 +1099,102 @@ func (s *AcceleratorTypeList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AcceleratorTypeListWarning: [Output Only] Informational warning +// message. +type AcceleratorTypeListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AcceleratorTypeListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypeListWarning) MarshalJSON() ([]byte, error) { + type noMethod AcceleratorTypeListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AcceleratorTypeListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypeListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AcceleratorTypeListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type AcceleratorTypesScopedList struct { // AcceleratorTypes: [Output Only] List of accelerator types contained // in this scope. @@ -1329,6 +1563,9 @@ type AddressAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AddressAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -1356,91 +1593,285 @@ func (s *AddressAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AddressList: Contains a list of addresses. -type AddressList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Address resources. - Items []*Address `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#addressList for - // lists of addresses. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AddressList) MarshalJSON() ([]byte, error) { - type noMethod AddressList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AddressesScopedList struct { - // Addresses: [Output Only] List of addresses contained in this scope. - Addresses []*Address `json:"addresses,omitempty"` - - // Warning: [Output Only] Informational warning which replaces the list - // of addresses when the list is empty. - Warning *AddressesScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Addresses") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Addresses") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AddressesScopedList) MarshalJSON() ([]byte, error) { - type noMethod AddressesScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AddressesScopedListWarning: [Output Only] Informational warning which -// replaces the list of addresses when the list is empty. -type AddressesScopedListWarning struct { +// AddressAggregatedListWarning: [Output Only] Informational warning +// message. +type AddressAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AddressAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod AddressAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AddressAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AddressAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AddressList: Contains a list of addresses. +type AddressList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of Address resources. + Items []*Address `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#addressList for + // lists of addresses. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *AddressListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressList) MarshalJSON() ([]byte, error) { + type noMethod AddressList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AddressListWarning: [Output Only] Informational warning message. +type AddressListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AddressListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressListWarning) MarshalJSON() ([]byte, error) { + type noMethod AddressListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AddressListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AddressListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AddressesScopedList struct { + // Addresses: [Output Only] List of addresses contained in this scope. + Addresses []*Address `json:"addresses,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of addresses when the list is empty. + Warning *AddressesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Addresses") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Addresses") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressesScopedList) MarshalJSON() ([]byte, error) { + type noMethod AddressesScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AddressesScopedListWarning: [Output Only] Informational warning which +// replaces the list of addresses when the list is empty. +type AddressesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -1623,10 +2054,9 @@ type AttachedDisk struct { // disk_size_gb in InitializeParams. DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` - // Index: Assigns a zero-based index to this disk, where 0 is reserved - // for the boot disk. For example, if you have many disks attached to an - // instance, each disk would have a unique index number. If not - // specified, the server will choose an appropriate value. + // Index: [Output Only] A zero-based index to this disk, where 0 is + // reserved for the boot disk. If you have many disks attached to an + // instance, each disk would have a unique index number. Index int64 `json:"index,omitempty"` // InitializeParams: [Input Only] Specifies the parameters for a new @@ -2075,6 +2505,9 @@ type AutoscalerAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AutoscalerAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -2102,6 +2535,102 @@ func (s *AutoscalerAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AutoscalerAggregatedListWarning: [Output Only] Informational warning +// message. +type AutoscalerAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AutoscalerAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AutoscalerAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod AutoscalerAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AutoscalerAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AutoscalerAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AutoscalerAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AutoscalerList: Contains a list of Autoscaler resources. type AutoscalerList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -2126,6 +2655,9 @@ type AutoscalerList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AutoscalerListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -2153,6 +2685,101 @@ func (s *AutoscalerList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AutoscalerListWarning: [Output Only] Informational warning message. +type AutoscalerListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AutoscalerListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AutoscalerListWarning) MarshalJSON() ([]byte, error) { + type noMethod AutoscalerListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AutoscalerListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AutoscalerListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AutoscalerListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type AutoscalerStatusDetails struct { // Message: The status message. Message string `json:"message,omitempty"` @@ -2368,6 +2995,15 @@ type AutoscalingPolicy struct { // instances allowed. MinNumReplicas int64 `json:"minNumReplicas,omitempty"` + // Mode: Defines operating mode for this policy. + // + // Possible values: + // "OFF" + // "ON" + // "ONLY_DOWN" + // "ONLY_UP" + Mode string `json:"mode,omitempty"` + // QueueBasedScaling: Configuration parameters of autoscaling based on // queuing system. QueueBasedScaling *AutoscalingPolicyQueueBasedScaling `json:"queueBasedScaling,omitempty"` @@ -2775,6 +3411,15 @@ type Backend struct { // This cannot be used for internal load balancing. MaxConnections int64 `json:"maxConnections,omitempty"` + // MaxConnectionsPerEndpoint: The max number of simultaneous connections + // that a single backend network endpoint can handle. This is used to + // calculate the capacity of the group. Can be used in either CONNECTION + // or UTILIZATION balancing modes. For CONNECTION mode, either + // maxConnections or maxConnectionsPerEndpoint must be set. + // + // This cannot be used for internal load balancing. + MaxConnectionsPerEndpoint int64 `json:"maxConnectionsPerEndpoint,omitempty"` + // MaxConnectionsPerInstance: The max number of simultaneous connections // that a single backend instance can handle. This is used to calculate // the capacity of the group. Can be used in either CONNECTION or @@ -2792,6 +3437,14 @@ type Backend struct { // This cannot be used for internal load balancing. MaxRate int64 `json:"maxRate,omitempty"` + // MaxRatePerEndpoint: The max requests per second (RPS) that a single + // backend network endpoint can handle. This is used to calculate the + // capacity of the group. Can be used in either balancing mode. For RATE + // mode, either maxRate or maxRatePerEndpoint must be set. + // + // This cannot be used for internal load balancing. + MaxRatePerEndpoint float64 `json:"maxRatePerEndpoint,omitempty"` + // MaxRatePerInstance: The max requests per second (RPS) that a single // backend instance can handle. This is used to calculate the capacity // of the group. Can be used in either balancing mode. For RATE mode, @@ -2834,6 +3487,7 @@ func (s *Backend) UnmarshalJSON(data []byte) error { type noMethod Backend var s1 struct { CapacityScaler gensupport.JSONFloat64 `json:"capacityScaler"` + MaxRatePerEndpoint gensupport.JSONFloat64 `json:"maxRatePerEndpoint"` MaxRatePerInstance gensupport.JSONFloat64 `json:"maxRatePerInstance"` MaxUtilization gensupport.JSONFloat64 `json:"maxUtilization"` *noMethod @@ -2843,6 +3497,7 @@ func (s *Backend) UnmarshalJSON(data []byte) error { return err } s.CapacityScaler = float64(s1.CapacityScaler) + s.MaxRatePerEndpoint = float64(s1.MaxRatePerEndpoint) s.MaxRatePerInstance = float64(s1.MaxRatePerInstance) s.MaxUtilization = float64(s1.MaxUtilization) return nil @@ -2979,6 +3634,9 @@ type BackendBucketList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *BackendBucketListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -3006,6 +3664,102 @@ func (s *BackendBucketList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BackendBucketListWarning: [Output Only] Informational warning +// message. +type BackendBucketListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*BackendBucketListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendBucketListWarning) MarshalJSON() ([]byte, error) { + type noMethod BackendBucketListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendBucketListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { + type noMethod BackendBucketListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BackendService: A BackendService resource. This resource defines a // group of backend virtual machines and their serving capacity. type BackendService struct { @@ -3042,20 +3796,7 @@ type BackendService struct { // When the load balancing scheme is INTERNAL, this field is not used. EnableCDN bool `json:"enableCDN,omitempty"` - // FailoverRatio: The value of the field must be in [0, 1]. If set, - // 'backends[].failover' must be set. They together define the fallback - // behavior of the primary backend: if the ratio of the healthy VMs in - // the primary backend is at or below this number, traffic arriving at - // the load-balanced IP will be directed to the failover backend. - // - // In case where 'failoverRatio' is not set or all the VMs in the backup - // backend are unhealthy, the traffic will be directed back to the - // primary backend in the "force" mode, where traffic will be spread to - // the healthy VMs with the best effort, or to all VMs when no VM is - // healthy. - // - // This field is only used with l4 load balancing. - FailoverRatio float64 `json:"failoverRatio,omitempty"` + FailoverPolicy *BackendServiceFailoverPolicy `json:"failoverPolicy,omitempty"` // Fingerprint: Fingerprint of this resource. A hash of the contents // stored in this object. This field is used in optimistic locking. This @@ -3066,8 +3807,9 @@ type BackendService struct { // HealthChecks: The list of URLs to the HttpHealthCheck or // HttpsHealthCheck resource for health checking this BackendService. // Currently at most one health check can be specified, and a health - // check is required for GCE backend services. A health check must not - // be specified for GAE app backend and Cloud Function backend. + // check is required for Compute Engine backend services. A health check + // must not be specified for App Engine backend and Cloud Function + // backend. // // For internal load balancing, a URL to a HealthCheck resource must be // specified instead. @@ -3083,6 +3825,11 @@ type BackendService struct { // for backend services. Kind string `json:"kind,omitempty"` + // LoadBalancingScheme: Indicates whether the backend service will be + // used with internal or external load balancing. A backend service + // created for one type of load balancing cannot be used with the other. + // Possible values are INTERNAL and EXTERNAL. + // // Possible values: // "EXTERNAL" // "INTERNAL" @@ -3193,20 +3940,6 @@ func (s *BackendService) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -func (s *BackendService) UnmarshalJSON(data []byte) error { - type noMethod BackendService - var s1 struct { - FailoverRatio gensupport.JSONFloat64 `json:"failoverRatio"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.FailoverRatio = float64(s1.FailoverRatio) - return nil -} - // BackendServiceAggregatedList: Contains a list of // BackendServicesScopedList. type BackendServiceAggregatedList struct { @@ -3231,6 +3964,9 @@ type BackendServiceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *BackendServiceAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -3258,6 +3994,102 @@ func (s *BackendServiceAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BackendServiceAggregatedListWarning: [Output Only] Informational +// warning message. +type BackendServiceAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*BackendServiceAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendServiceAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BackendServiceCdnPolicy: Message containing Cloud CDN configuration // for a backend service. type BackendServiceCdnPolicy struct { @@ -3302,6 +4134,81 @@ func (s *BackendServiceCdnPolicy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type BackendServiceFailoverPolicy struct { + // DisableConnectionDrainOnFailover: On failover or failback, this field + // indicates whether connection drain will be honored. Setting this to + // true has the following effect: connections to the old active pool are + // not drained. Connections to the new active pool use the timeout of 10 + // min (currently fixed). Setting to false has the following effect: + // both old and new connections will have a drain timeout of 10 + // min. + // + // This can be set to true only if the protocol is TCP. + // + // The default is false. + DisableConnectionDrainOnFailover bool `json:"disableConnectionDrainOnFailover,omitempty"` + + // DropTrafficIfUnhealthy: This option is used only when no healthy VMs + // are detected in the primary and backup instance groups. When set to + // true, traffic is dropped. When set to false, new connections are sent + // across all VMs in the primary group. + // + // The default is false. + DropTrafficIfUnhealthy bool `json:"dropTrafficIfUnhealthy,omitempty"` + + // FailoverRatio: The value of the field must be in [0, 1]. If the ratio + // of the healthy VMs in the primary backend is at or below this number, + // traffic arriving at the load-balanced IP will be directed to the + // failover backend. + // + // In case where 'failoverRatio' is not set or all the VMs in the backup + // backend are unhealthy, the traffic will be directed back to the + // primary backend in the "force" mode, where traffic will be spread to + // the healthy VMs with the best effort, or to all VMs when no VM is + // healthy. + // + // This field is only used with l4 load balancing. + FailoverRatio float64 `json:"failoverRatio,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "DisableConnectionDrainOnFailover") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "DisableConnectionDrainOnFailover") to include in API requests with + // the JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceFailoverPolicy) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceFailoverPolicy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *BackendServiceFailoverPolicy) UnmarshalJSON(data []byte) error { + type noMethod BackendServiceFailoverPolicy + var s1 struct { + FailoverRatio gensupport.JSONFloat64 `json:"failoverRatio"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.FailoverRatio = float64(s1.FailoverRatio) + return nil +} + type BackendServiceGroupHealth struct { HealthStatus []*HealthStatus `json:"healthStatus,omitempty"` @@ -3395,6 +4302,9 @@ type BackendServiceList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *BackendServiceListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -3422,6 +4332,129 @@ func (s *BackendServiceList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BackendServiceListWarning: [Output Only] Informational warning +// message. +type BackendServiceListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*BackendServiceListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceListWarning) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendServiceListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceListWarningData) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendServiceReference struct { + BackendService string `json:"backendService,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BackendService") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackendService") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceReference) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type BackendServicesScopedList struct { // BackendServices: List of BackendServices contained in this scope. BackendServices []*BackendService `json:"backendServices,omitempty"` @@ -3828,6 +4861,9 @@ type CommitmentAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *CommitmentAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -3855,6 +4891,102 @@ func (s *CommitmentAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// CommitmentAggregatedListWarning: [Output Only] Informational warning +// message. +type CommitmentAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*CommitmentAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommitmentAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod CommitmentAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type CommitmentAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommitmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod CommitmentAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // CommitmentList: Contains a list of Commitment resources. type CommitmentList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -3879,6 +5011,9 @@ type CommitmentList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *CommitmentListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -3906,6 +5041,101 @@ func (s *CommitmentList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// CommitmentListWarning: [Output Only] Informational warning message. +type CommitmentListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*CommitmentListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommitmentListWarning) MarshalJSON() ([]byte, error) { + type noMethod CommitmentListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type CommitmentListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommitmentListWarningData) MarshalJSON() ([]byte, error) { + type noMethod CommitmentListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type CommitmentsScopedList struct { // Commitments: [Output Only] List of commitments contained in this // scope. @@ -4528,6 +5758,9 @@ type DiskAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *DiskAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -4555,6 +5788,102 @@ func (s *DiskAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DiskAggregatedListWarning: [Output Only] Informational warning +// message. +type DiskAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DiskAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod DiskAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod DiskAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // DiskList: A list of Disk resources. type DiskList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -4579,6 +5908,9 @@ type DiskList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *DiskListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -4606,6 +5938,101 @@ func (s *DiskList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DiskListWarning: [Output Only] Informational warning message. +type DiskListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DiskListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskListWarning) MarshalJSON() ([]byte, error) { + type noMethod DiskListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskListWarningData) MarshalJSON() ([]byte, error) { + type noMethod DiskListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type DiskMoveRequest struct { // DestinationZone: The URL of the destination zone to move the disk. // This can be a full or partial URL. For example, the following are all @@ -4738,6 +6165,9 @@ type DiskTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *DiskTypeAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -4765,6 +6195,102 @@ func (s *DiskTypeAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DiskTypeAggregatedListWarning: [Output Only] Informational warning +// message. +type DiskTypeAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DiskTypeAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod DiskTypeAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskTypeAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod DiskTypeAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // DiskTypeList: Contains a list of disk types. type DiskTypeList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -4789,6 +6315,9 @@ type DiskTypeList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *DiskTypeListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -4816,6 +6345,101 @@ func (s *DiskTypeList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DiskTypeListWarning: [Output Only] Informational warning message. +type DiskTypeListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DiskTypeListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskTypeListWarning) MarshalJSON() ([]byte, error) { + type noMethod DiskTypeListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskTypeListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskTypeListWarningData) MarshalJSON() ([]byte, error) { + type noMethod DiskTypeListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type DiskTypesScopedList struct { // DiskTypes: [Output Only] List of disk types contained in this scope. DiskTypes []*DiskType `json:"diskTypes,omitempty"` @@ -5465,6 +7089,9 @@ type FirewallList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *FirewallListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -5492,6 +7119,101 @@ func (s *FirewallList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// FirewallListWarning: [Output Only] Informational warning message. +type FirewallListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*FirewallListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FirewallListWarning) MarshalJSON() ([]byte, error) { + type noMethod FirewallListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type FirewallListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FirewallListWarningData) MarshalJSON() ([]byte, error) { + type noMethod FirewallListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // FixedOrPercent: Encapsulates numeric value that can be either // absolute or relative. type FixedOrPercent struct { @@ -5581,6 +7303,16 @@ type ForwardingRule struct { // property when you create the resource. Description string `json:"description,omitempty"` + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a ForwardingRule. Include the + // fingerprint in patch request to ensure that you do not overwrite + // changes that were applied from another concurrent request. + // + // To see the latest fingerprint, make a get() request to retrieve a + // ForwardingRule. + Fingerprint string `json:"fingerprint,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` @@ -5791,6 +7523,9 @@ type ForwardingRuleAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *ForwardingRuleAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -5818,6 +7553,102 @@ func (s *ForwardingRuleAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ForwardingRuleAggregatedListWarning: [Output Only] Informational +// warning message. +type ForwardingRuleAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ForwardingRuleAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRuleAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ForwardingRuleAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRuleAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ForwardingRuleList: Contains a list of ForwardingRule resources. type ForwardingRuleList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -5841,6 +7672,9 @@ type ForwardingRuleList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *ForwardingRuleListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -5868,6 +7702,129 @@ func (s *ForwardingRuleList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ForwardingRuleListWarning: [Output Only] Informational warning +// message. +type ForwardingRuleListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ForwardingRuleListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleListWarning) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRuleListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ForwardingRuleListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleListWarningData) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRuleListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ForwardingRuleReference struct { + ForwardingRule string `json:"forwardingRule,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ForwardingRule") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ForwardingRule") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleReference) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRuleReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ForwardingRulesScopedList struct { // ForwardingRules: List of forwarding rules contained in this scope. ForwardingRules []*ForwardingRule `json:"forwardingRules,omitempty"` @@ -6133,6 +8090,33 @@ type HTTP2HealthCheck struct { // both port and port_name are defined, port takes precedence. PortName string `json:"portName,omitempty"` + // PortSpecification: Specifies how port is selected for health + // checking, can be one of following values: + // USE_FIXED_PORT: The port number in + // port + // is used for health checking. + // USE_NAMED_PORT: The + // portName + // is used for health checking. + // USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for + // each network endpoint is used for health checking. For other + // backends, the port or named port specified in the Backend Service is + // used for health checking. + // + // + // If not specified, HTTP2 health check follows behavior specified + // in + // port + // and + // portName + // fields. + // + // Possible values: + // "USE_FIXED_PORT" + // "USE_NAMED_PORT" + // "USE_SERVING_PORT" + PortSpecification string `json:"portSpecification,omitempty"` + // ProxyHeader: Specifies the type of proxy header to append before // sending data to the backend, either NONE or PROXY_V1. The default is // NONE. @@ -6188,6 +8172,33 @@ type HTTPHealthCheck struct { // both port and port_name are defined, port takes precedence. PortName string `json:"portName,omitempty"` + // PortSpecification: Specifies how port is selected for health + // checking, can be one of following values: + // USE_FIXED_PORT: The port number in + // port + // is used for health checking. + // USE_NAMED_PORT: The + // portName + // is used for health checking. + // USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for + // each network endpoint is used for health checking. For other + // backends, the port or named port specified in the Backend Service is + // used for health checking. + // + // + // If not specified, HTTP health check follows behavior specified + // in + // port + // and + // portName + // fields. + // + // Possible values: + // "USE_FIXED_PORT" + // "USE_NAMED_PORT" + // "USE_SERVING_PORT" + PortSpecification string `json:"portSpecification,omitempty"` + // ProxyHeader: Specifies the type of proxy header to append before // sending data to the backend, either NONE or PROXY_V1. The default is // NONE. @@ -6243,6 +8254,33 @@ type HTTPSHealthCheck struct { // both port and port_name are defined, port takes precedence. PortName string `json:"portName,omitempty"` + // PortSpecification: Specifies how port is selected for health + // checking, can be one of following values: + // USE_FIXED_PORT: The port number in + // port + // is used for health checking. + // USE_NAMED_PORT: The + // portName + // is used for health checking. + // USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for + // each network endpoint is used for health checking. For other + // backends, the port or named port specified in the Backend Service is + // used for health checking. + // + // + // If not specified, HTTPS health check follows behavior specified + // in + // port + // and + // portName + // fields. + // + // Possible values: + // "USE_FIXED_PORT" + // "USE_NAMED_PORT" + // "USE_SERVING_PORT" + PortSpecification string `json:"portSpecification,omitempty"` + // ProxyHeader: Specifies the type of proxy header to append before // sending data to the backend, either NONE or PROXY_V1. The default is // NONE. @@ -6411,6 +8449,9 @@ type HealthCheckList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *HealthCheckListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6438,6 +8479,101 @@ func (s *HealthCheckList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// HealthCheckListWarning: [Output Only] Informational warning message. +type HealthCheckListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HealthCheckListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthCheckListWarning) MarshalJSON() ([]byte, error) { + type noMethod HealthCheckListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HealthCheckListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthCheckListWarningData) MarshalJSON() ([]byte, error) { + type noMethod HealthCheckListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HealthCheckReference: A full or valid partial URL to a health check. // For example, the following are valid URLs: // - @@ -6510,6 +8646,53 @@ func (s *HealthStatus) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type HealthStatusForNetworkEndpoint struct { + // BackendService: URL of the backend service associated with the health + // state of the network endpoint. + BackendService *BackendServiceReference `json:"backendService,omitempty"` + + // ForwardingRule: URL of the forwarding rule associated with the health + // state of the network endpoint. + ForwardingRule *ForwardingRuleReference `json:"forwardingRule,omitempty"` + + // HealthCheck: URL of the health check associated with the health state + // of the network endpoint. + HealthCheck *HealthCheckReference `json:"healthCheck,omitempty"` + + // HealthState: Health state of the network endpoint determined based on + // the health checks configured. + // + // Possible values: + // "DRAINING" + // "HEALTHY" + // "UNHEALTHY" + // "UNKNOWN" + HealthState string `json:"healthState,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BackendService") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackendService") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *HealthStatusForNetworkEndpoint) MarshalJSON() ([]byte, error) { + type noMethod HealthStatusForNetworkEndpoint + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type Host struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -6637,6 +8820,9 @@ type HostAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *HostAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6664,6 +8850,102 @@ func (s *HostAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// HostAggregatedListWarning: [Output Only] Informational warning +// message. +type HostAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HostAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HostAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod HostAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HostAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HostAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod HostAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HostList: Contains a list of hosts. type HostList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -6688,6 +8970,9 @@ type HostList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *HostListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6715,6 +9000,101 @@ func (s *HostList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// HostListWarning: [Output Only] Informational warning message. +type HostListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HostListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HostListWarning) MarshalJSON() ([]byte, error) { + type noMethod HostListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HostListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HostListWarningData) MarshalJSON() ([]byte, error) { + type noMethod HostListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HostRule: UrlMaps A host-matching rule for a URL. If matched, will // use the named PathMatcher to select the BackendService. type HostRule struct { @@ -6852,6 +9232,9 @@ type HostTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *HostTypeAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6879,6 +9262,102 @@ func (s *HostTypeAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// HostTypeAggregatedListWarning: [Output Only] Informational warning +// message. +type HostTypeAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HostTypeAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HostTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod HostTypeAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HostTypeAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HostTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod HostTypeAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HostTypeList: Contains a list of host types. type HostTypeList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -6903,6 +9382,9 @@ type HostTypeList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *HostTypeListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6930,6 +9412,101 @@ func (s *HostTypeList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// HostTypeListWarning: [Output Only] Informational warning message. +type HostTypeListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HostTypeListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HostTypeListWarning) MarshalJSON() ([]byte, error) { + type noMethod HostTypeListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HostTypeListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HostTypeListWarningData) MarshalJSON() ([]byte, error) { + type noMethod HostTypeListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type HostTypesScopedList struct { // HostTypes: [Output Only] List of host types contained in this scope. HostTypes []*HostType `json:"hostTypes,omitempty"` @@ -7298,6 +9875,9 @@ type HttpHealthCheckList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *HttpHealthCheckListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -7325,6 +9905,102 @@ func (s *HttpHealthCheckList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// HttpHealthCheckListWarning: [Output Only] Informational warning +// message. +type HttpHealthCheckListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HttpHealthCheckListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpHealthCheckListWarning) MarshalJSON() ([]byte, error) { + type noMethod HttpHealthCheckListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HttpHealthCheckListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpHealthCheckListWarningData) MarshalJSON() ([]byte, error) { + type noMethod HttpHealthCheckListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HttpsHealthCheck: An HttpsHealthCheck resource. This resource defines // a template for how individual instances should be checked for health, // via HTTPS. @@ -7438,6 +10114,9 @@ type HttpsHealthCheckList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *HttpsHealthCheckListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -7465,6 +10144,102 @@ func (s *HttpsHealthCheckList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// HttpsHealthCheckListWarning: [Output Only] Informational warning +// message. +type HttpsHealthCheckListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HttpsHealthCheckListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpsHealthCheckListWarning) MarshalJSON() ([]byte, error) { + type noMethod HttpsHealthCheckListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HttpsHealthCheckListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpsHealthCheckListWarningData) MarshalJSON() ([]byte, error) { + type noMethod HttpsHealthCheckListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Image: An Image resource. type Image struct { // ArchiveSizeBytes: Size of the image tar.gz archive stored in Google @@ -7717,6 +10492,9 @@ type ImageList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *ImageListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -7744,6 +10522,101 @@ func (s *ImageList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ImageListWarning: [Output Only] Informational warning message. +type ImageListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ImageListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ImageListWarning) MarshalJSON() ([]byte, error) { + type noMethod ImageListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ImageListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ImageListWarningData) MarshalJSON() ([]byte, error) { + type noMethod ImageListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Instance: An Instance resource. type Instance struct { // CanIpForward: Allows this instance to send and receive packets with @@ -7759,6 +10632,10 @@ type Instance struct { // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` + // DeletionProtection: Whether the resource should be protected against + // deletion. + DeletionProtection bool `json:"deletionProtection,omitempty"` + // Description: An optional description of this resource. Provide this // property when you create the resource. Description string `json:"description,omitempty"` @@ -7870,10 +10747,10 @@ type Instance struct { // be a dash. Name string `json:"name,omitempty"` - // NetworkInterfaces: An array of configurations for this interface. - // This specifies how this interface is configured to interact with - // other network services, such as connecting to the internet. Only one - // interface is supported per instance. + // NetworkInterfaces: An array of network configurations for this + // instance. These specify how interfaces are configured to interact + // with other network services, such as connecting to the internet. + // Multiple interfaces are supported per instance. NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` // Scheduling: Sets the scheduling options for this instance. @@ -7975,6 +10852,9 @@ type InstanceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -8002,6 +10882,102 @@ func (s *InstanceAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceAggregatedListWarning: [Output Only] Informational warning +// message. +type InstanceAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceGroup struct { // CreationTimestamp: [Output Only] The creation timestamp for this // instance group in RFC3339 text format. @@ -8115,6 +11091,9 @@ type InstanceGroupAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -8142,6 +11121,102 @@ func (s *InstanceGroupAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupAggregatedListWarning: [Output Only] Informational +// warning message. +type InstanceGroupAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstanceGroupList: A list of InstanceGroup resources. type InstanceGroupList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -8166,6 +11241,9 @@ type InstanceGroupList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -8193,6 +11271,102 @@ func (s *InstanceGroupList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupListWarning: [Output Only] Informational warning +// message. +type InstanceGroupListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstanceGroupManager: An Instance Group Manager resource. type InstanceGroupManager struct { Activities *InstanceGroupManagerActivities `json:"activities,omitempty"` @@ -8274,11 +11448,11 @@ type InstanceGroupManager struct { // server defines this URL. SelfLink string `json:"selfLink,omitempty"` - // ServiceAccount: Service account will be used as credentials for all - // operations performed by managed instance group on instances. The - // service accounts needs all permissions required to create and delete - // instances. When not specified, the service account - // {projectNumber}@cloudservices.gserviceaccount.com will be used. + // ServiceAccount: [Output Only] The service account to be used as + // credentials for all operations performed by the managed instance + // group on instances. The service accounts needs all permissions + // required to create and delete instances. By default, the service + // account {projectNumber}@cloudservices.gserviceaccount.com is used. ServiceAccount string `json:"serviceAccount,omitempty"` // StatefulPolicy: Stateful configuration for this Instanced Group @@ -8503,6 +11677,9 @@ type InstanceGroupManagerAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupManagerAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -8530,6 +11707,102 @@ func (s *InstanceGroupManagerAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupManagerAggregatedListWarning: [Output Only] +// Informational warning message. +type InstanceGroupManagerAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupManagerAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagerAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupManagerAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagerAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceGroupManagerAutoHealingPolicy struct { // HealthCheck: The URL for the health check that signals autohealing. HealthCheck string `json:"healthCheck,omitempty"` @@ -8599,6 +11872,9 @@ type InstanceGroupManagerList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupManagerListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -8626,6 +11902,102 @@ func (s *InstanceGroupManagerList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupManagerListWarning: [Output Only] Informational warning +// message. +type InstanceGroupManagerListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupManagerListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagerListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupManagerListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagerListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceGroupManagerPendingActionsSummary struct { // Creating: [Output Only] The number of instances in the managed // instance group that are pending to be created. @@ -9019,6 +12391,9 @@ type InstanceGroupManagersListPerInstanceConfigsResp struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupManagersListPerInstanceConfigsRespWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -9046,6 +12421,102 @@ func (s *InstanceGroupManagersListPerInstanceConfigsResp) MarshalJSON() ([]byte, return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupManagersListPerInstanceConfigsRespWarning: [Output Only] +// Informational warning message. +type InstanceGroupManagersListPerInstanceConfigsRespWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupManagersListPerInstanceConfigsRespWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagersListPerInstanceConfigsRespWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersListPerInstanceConfigsRespWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupManagersListPerInstanceConfigsRespWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagersListPerInstanceConfigsRespWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagersListPerInstanceConfigsRespWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceGroupManagersRecreateInstancesRequest struct { // Instances: The URLs of one or more instances to recreate. This can be // a full URL or a partial URL, such as @@ -9438,6 +12909,9 @@ type InstanceGroupsListInstances struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupsListInstancesWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -9465,6 +12939,102 @@ func (s *InstanceGroupsListInstances) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupsListInstancesWarning: [Output Only] Informational +// warning message. +type InstanceGroupsListInstancesWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupsListInstancesWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsListInstancesWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupsListInstancesWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsListInstancesWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceGroupsListInstancesRequest struct { // InstanceState: A filter for the state of the instances in the // instance group. Valid options are ALL or RUNNING. If you do not @@ -9715,6 +13285,9 @@ type InstanceList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -9742,6 +13315,101 @@ func (s *InstanceList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceListWarning: [Output Only] Informational warning message. +type InstanceListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstanceListReferrers: Contains a list of instance referrers. type InstanceListReferrers struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -9766,6 +13434,9 @@ type InstanceListReferrers struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceListReferrersWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -9793,6 +13464,102 @@ func (s *InstanceListReferrers) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceListReferrersWarning: [Output Only] Informational warning +// message. +type InstanceListReferrersWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceListReferrersWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceListReferrersWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceListReferrersWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceListReferrersWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceListReferrersWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceListReferrersWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceMoveRequest struct { // DestinationZone: The URL of the destination zone to move the // instance. This can be a full or partial URL. For example, the @@ -10044,6 +13811,9 @@ type InstanceTemplateList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceTemplateListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -10071,6 +13841,102 @@ func (s *InstanceTemplateList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceTemplateListWarning: [Output Only] Informational warning +// message. +type InstanceTemplateListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceTemplateListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplateListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceTemplateListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceTemplateListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplateListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceTemplateListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceWithNamedPorts struct { // Instance: [Output Only] The URL of the instance. Instance string `json:"instance,omitempty"` @@ -10445,6 +14311,10 @@ type Interconnect struct { // over it. By default, it is set to ?true?. AdminEnabled bool `json:"adminEnabled,omitempty"` + // CircuitInfos: [Output Only] List of CircuitInfo objects, that + // describe the individual circuits in this LAG. + CircuitInfos []*InterconnectCircuitInfo `json:"circuitInfos,omitempty"` + // ConnectionAuthorization: [Output Only] URL to retrieve the Letter Of // Authority and Customer Facility Assignment (LOA-CFA) documentation // relating to this Interconnect. This documentation authorizes the @@ -10485,6 +14355,7 @@ type Interconnect struct { InterconnectAttachments []string `json:"interconnectAttachments,omitempty"` // Possible values: + // "IT_PARTNER" // "IT_PRIVATE" InterconnectType string `json:"interconnectType,omitempty"` @@ -10569,7 +14440,7 @@ func (s *Interconnect) MarshalJSON() ([]byte, error) { } // InterconnectAttachment: Protocol definitions for Mixer API to support -// InterconnectAttachment. Next available tag: 14 +// InterconnectAttachment. Next available tag: 18 type InterconnectAttachment struct { // CloudRouterIpAddress: [Output Only] IPv4 address + prefix length to // be configured on Cloud Router Interface for this interconnect @@ -10694,6 +14565,9 @@ type InterconnectAttachmentAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InterconnectAttachmentAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -10721,6 +14595,102 @@ func (s *InterconnectAttachmentAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InterconnectAttachmentAggregatedListWarning: [Output Only] +// Informational warning message. +type InterconnectAttachmentAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InterconnectAttachmentAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectAttachmentAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InterconnectAttachmentList: Response to the list request, and // contains a list of interconnect attachments. type InterconnectAttachmentList struct { @@ -10747,6 +14717,9 @@ type InterconnectAttachmentList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InterconnectAttachmentListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -10774,6 +14747,102 @@ func (s *InterconnectAttachmentList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InterconnectAttachmentListWarning: [Output Only] Informational +// warning message. +type InterconnectAttachmentListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InterconnectAttachmentListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentListWarning) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectAttachmentListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InterconnectAttachmentPrivateInfo: Private information for an // interconnect attachment when this belongs to an interconnect of type // IT_PRIVATE. @@ -10936,6 +15005,47 @@ func (s *InterconnectAttachmentsScopedListWarningData) MarshalJSON() ([]byte, er return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InterconnectCircuitInfo: Describes a single physical circuit between +// the Customer and Google. CircuitInfo objects are created by Google, +// so all fields are output only. Next id: 4 +type InterconnectCircuitInfo struct { + // CustomerDemarcId: Customer-side demarc ID for this circuit. This will + // only be set if it was provided by the Customer to Google during + // circuit turn-up. + CustomerDemarcId string `json:"customerDemarcId,omitempty"` + + // GoogleCircuitId: Google-assigned unique ID for this circuit. Assigned + // at circuit turn-up. + GoogleCircuitId string `json:"googleCircuitId,omitempty"` + + // GoogleDemarcId: Google-side demarc ID for this circuit. Assigned at + // circuit turn-up and provided by Google to the customer in the LOA. + GoogleDemarcId string `json:"googleDemarcId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CustomerDemarcId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CustomerDemarcId") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectCircuitInfo) MarshalJSON() ([]byte, error) { + type noMethod InterconnectCircuitInfo + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InterconnectList: Response to the list request, and contains a list // of interconnects. type InterconnectList struct { @@ -10961,6 +15071,9 @@ type InterconnectList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InterconnectListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -10988,6 +15101,101 @@ func (s *InterconnectList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InterconnectListWarning: [Output Only] Informational warning message. +type InterconnectListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InterconnectListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectListWarning) MarshalJSON() ([]byte, error) { + type noMethod InterconnectListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InterconnectListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InterconnectLocation: Protocol definitions for Mixer API to support // InterconnectLocation. type InterconnectLocation struct { @@ -11106,6 +15314,9 @@ type InterconnectLocationList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InterconnectLocationListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -11133,6 +15344,102 @@ func (s *InterconnectLocationList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InterconnectLocationListWarning: [Output Only] Informational warning +// message. +type InterconnectLocationListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InterconnectLocationListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectLocationListWarning) MarshalJSON() ([]byte, error) { + type noMethod InterconnectLocationListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectLocationListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectLocationListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InterconnectLocationListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InterconnectLocationRegionInfo: Information about any potential // InterconnectAttachments between an Interconnect at a specific // InterconnectLocation, and a specific Cloud Region. @@ -11148,6 +15455,9 @@ type InterconnectLocationRegionInfo struct { // "LP_LOCAL_REGION" LocationPresence string `json:"locationPresence,omitempty"` + // Region: URL for the region of this location. + Region string `json:"region,omitempty"` + // RegionKey: Scope key for the region of this location. RegionKey string `json:"regionKey,omitempty"` @@ -11289,6 +15599,9 @@ type IpOwnerList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *IpOwnerListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -11316,6 +15629,101 @@ func (s *IpOwnerList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// IpOwnerListWarning: [Output Only] Informational warning message. +type IpOwnerListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*IpOwnerListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *IpOwnerListWarning) MarshalJSON() ([]byte, error) { + type noMethod IpOwnerListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type IpOwnerListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *IpOwnerListWarningData) MarshalJSON() ([]byte, error) { + type noMethod IpOwnerListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // License: A license resource. type License struct { // ChargesUseFee: [Output Only] Deprecated. This field no longer @@ -11383,6 +15791,107 @@ func (s *License) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type LicenseCode struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: [Output Only] Description of this License Code. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#licenseCode for + // licenses. + Kind string `json:"kind,omitempty"` + + // LicenseAlias: [Output Only] URL and description aliases of Licenses + // with the same License Code. + LicenseAlias []*LicenseCodeLicenseAlias `json:"licenseAlias,omitempty"` + + // Name: [Output Only] Name of the resource. The name is 1-20 characters + // long and must be a valid 64 bit integer. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // State: [Output Only] Current state of this License Code. + // + // Possible values: + // "DISABLED" + // "ENABLED" + // "RESTRICTED" + // "STATE_UNSPECIFIED" + // "TERMINATED" + State string `json:"state,omitempty"` + + // Transferable: [Output Only] If true, the license will remain attached + // when creating images or snapshots from disks. Otherwise, the license + // is not transferred. + Transferable bool `json:"transferable,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *LicenseCode) MarshalJSON() ([]byte, error) { + type noMethod LicenseCode + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LicenseCodeLicenseAlias struct { + // Description: [Output Only] Description of this License Code. + Description string `json:"description,omitempty"` + + // SelfLink: [Output Only] URL of license corresponding to this License + // Code. + SelfLink string `json:"selfLink,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LicenseCodeLicenseAlias) MarshalJSON() ([]byte, error) { + type noMethod LicenseCodeLicenseAlias + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type LicenseResourceRequirements struct { // MinGuestCpuCount: Minimum number of guest cpus required to use the // Instance. Enforced at Instance creation and Instance start. @@ -11435,6 +15944,9 @@ type LicensesListResponse struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *LicensesListResponseWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -11462,15 +15974,43 @@ func (s *LicensesListResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// LogConfig: Specifies what kind of log the caller must write -type LogConfig struct { - // CloudAudit: Cloud audit options. - CloudAudit *LogConfigCloudAuditOptions `json:"cloudAudit,omitempty"` +// LicensesListResponseWarning: [Output Only] Informational warning +// message. +type LicensesListResponseWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // Counter: Counter options. - Counter *LogConfigCounterOptions `json:"counter,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*LicensesListResponseWarningData `json:"data,omitempty"` - // ForceSendFields is a list of field names (e.g. "CloudAudit") to + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -11478,8 +16018,8 @@ type LogConfig struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CloudAudit") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -11487,25 +16027,96 @@ type LogConfig struct { NullFields []string `json:"-"` } -func (s *LogConfig) MarshalJSON() ([]byte, error) { - type noMethod LogConfig +func (s *LicensesListResponseWarning) MarshalJSON() ([]byte, error) { + type noMethod LicensesListResponseWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// LogConfigCloudAuditOptions: Write a Cloud Audit log -type LogConfigCloudAuditOptions struct { - // AuthorizationLoggingOptions: Information used by the Cloud Audit - // Logging pipeline. - AuthorizationLoggingOptions *AuthorizationLoggingOptions `json:"authorizationLoggingOptions,omitempty"` - - // LogName: The log_name to populate in the Cloud Audit Record. - // - // Possible values: - // "ADMIN_ACTIVITY" - // "DATA_ACCESS" - // "UNSPECIFIED_LOG_NAME" - LogName string `json:"logName,omitempty"` +type LicensesListResponseWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LicensesListResponseWarningData) MarshalJSON() ([]byte, error) { + type noMethod LicensesListResponseWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LogConfig: Specifies what kind of log the caller must write +type LogConfig struct { + // CloudAudit: Cloud audit options. + CloudAudit *LogConfigCloudAuditOptions `json:"cloudAudit,omitempty"` + + // Counter: Counter options. + Counter *LogConfigCounterOptions `json:"counter,omitempty"` + + // DataAccess: Data access options. + DataAccess *LogConfigDataAccessOptions `json:"dataAccess,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CloudAudit") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CloudAudit") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LogConfig) MarshalJSON() ([]byte, error) { + type noMethod LogConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LogConfigCloudAuditOptions: Write a Cloud Audit log +type LogConfigCloudAuditOptions struct { + // AuthorizationLoggingOptions: Information used by the Cloud Audit + // Logging pipeline. + AuthorizationLoggingOptions *AuthorizationLoggingOptions `json:"authorizationLoggingOptions,omitempty"` + + // LogName: The log_name to populate in the Cloud Audit Record. + // + // Possible values: + // "ADMIN_ACTIVITY" + // "DATA_ACCESS" + // "UNSPECIFIED_LOG_NAME" + LogName string `json:"logName,omitempty"` // ForceSendFields is a list of field names (e.g. // "AuthorizationLoggingOptions") to unconditionally include in API @@ -11532,7 +16143,29 @@ func (s *LogConfigCloudAuditOptions) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// LogConfigCounterOptions: Options for counters +// LogConfigCounterOptions: Increment a streamz counter with the +// specified metric and field names. +// +// Metric names should start with a '/', generally be lowercase-only, +// and end in "_count". Field names should not contain an initial slash. +// The actual exported metric names will have "/iam/policy" +// prepended. +// +// Field names correspond to IAM request parameters and field values are +// their respective values. +// +// At present the only supported field names are - "iam_principal", +// corresponding to IAMContext.principal; - "" (empty string), resulting +// in one aggretated counter with no field. +// +// Examples: counter { metric: "/debug_access_count" field: +// "iam_principal" } ==> increment counter +// /iam/policy/backend_debug_access_count {iam_principal=[value of +// IAMContext.principal]} +// +// At this time we do not support: * multiple field names (though this +// may be supported in the future) * decrementing the counter * +// incrementing it by anything other than 1 type LogConfigCounterOptions struct { // Field: The field value to attribute. Field string `json:"field,omitempty"` @@ -11563,6 +16196,40 @@ func (s *LogConfigCounterOptions) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// LogConfigDataAccessOptions: Write a Data Access (Gin) log +type LogConfigDataAccessOptions struct { + // LogMode: Whether Gin logging should happen in a fail-closed manner at + // the caller. This is relevant only in the LocalIAM implementation, for + // now. + // + // Possible values: + // "LOG_FAIL_CLOSED" + // "LOG_MODE_UNSPECIFIED" + LogMode string `json:"logMode,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LogMode") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LogMode") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LogConfigDataAccessOptions) MarshalJSON() ([]byte, error) { + type noMethod LogConfigDataAccessOptions + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // MachineType: A Machine Type resource. type MachineType struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -11667,6 +16334,9 @@ type MachineTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *MachineTypeAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -11694,6 +16364,102 @@ func (s *MachineTypeAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// MachineTypeAggregatedListWarning: [Output Only] Informational warning +// message. +type MachineTypeAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*MachineTypeAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MachineTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type MachineTypeAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MachineTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // MachineTypeList: Contains a list of machine types. type MachineTypeList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -11718,6 +16484,9 @@ type MachineTypeList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *MachineTypeListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -11745,6 +16514,101 @@ func (s *MachineTypeList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// MachineTypeListWarning: [Output Only] Informational warning message. +type MachineTypeListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*MachineTypeListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MachineTypeListWarning) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type MachineTypeListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MachineTypeListWarningData) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type MachineTypesScopedList struct { // MachineTypes: [Output Only] List of machine types contained in this // scope. @@ -12079,9 +16943,6 @@ type ManagedInstanceOverride struct { // Disks: Disk overrides defined for this instance Disks []*ManagedInstanceOverrideDiskOverride `json:"disks,omitempty"` - // Metadata: Metadata overrides defined for this instance. - Metadata []*ManagedInstanceOverrideMetadata `json:"metadata,omitempty"` - // Origin: [Output Only] Indicates where does the override come from. // // Possible values: @@ -12112,43 +16973,6 @@ func (s *ManagedInstanceOverride) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ManagedInstanceOverrideMetadata struct { - // Key: Key for the metadata entry. Keys must conform to the following - // regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is - // reflected as part of a URL in the metadata server. Additionally, to - // avoid ambiguity, keys must not conflict with any other metadata keys - // for the project. - Key string `json:"key,omitempty"` - - // Value: Value for the metadata entry. These are free-form strings, and - // only have meaning as interpreted by the image running in the - // instance. The only restriction placed on values is that their size - // must be less than or equal to 262144 bytes (256 KiB). - Value string `json:"value,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Key") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ManagedInstanceOverrideMetadata) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceOverrideMetadata - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - type ManagedInstanceOverrideDiskOverride struct { // DeviceName: The name of the device on the VM DeviceName string `json:"deviceName,omitempty"` @@ -12436,287 +17260,29 @@ func (s *Network) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkInterface: A network interface resource attached to an -// instance. -type NetworkInterface struct { - // AccessConfigs: An array of configurations for this interface. - // Currently, only one access config, ONE_TO_ONE_NAT, is supported. If - // there are no accessConfigs specified, then this instance will have no - // external internet access. - AccessConfigs []*AccessConfig `json:"accessConfigs,omitempty"` - - // AliasIpRanges: An array of alias IP ranges for this network - // interface. Can only be specified for network interfaces on - // subnet-mode networks. - AliasIpRanges []*AliasIpRange `json:"aliasIpRanges,omitempty"` - - // Fingerprint: Fingerprint hash of contents stored in this network - // interface. This field will be ignored when inserting an Instance or - // adding a NetworkInterface. An up-to-date fingerprint must be provided - // in order to update the NetworkInterface. - Fingerprint string `json:"fingerprint,omitempty"` - - // Kind: [Output Only] Type of the resource. Always - // compute#networkInterface for network interfaces. - Kind string `json:"kind,omitempty"` - - // Name: [Output Only] The name of the network interface, generated by - // the server. For network devices, these are eth0, eth1, etc. - Name string `json:"name,omitempty"` - - // Network: URL of the network resource for this instance. When creating - // an instance, if neither the network nor the subnetwork is specified, - // the default network global/networks/default is used; if the network - // is not specified but the subnetwork is specified, the network is - // inferred. - // - // This field is optional when creating a firewall rule. If not - // specified when creating a firewall rule, the default network - // global/networks/default is used. - // - // If you specify this property, you can specify the network as a full - // or partial URL. For example, the following are all valid URLs: - // - - // https://www.googleapis.com/compute/v1/projects/project/global/networks/network - // - projects/project/global/networks/network - // - global/networks/default - Network string `json:"network,omitempty"` - - // NetworkIP: An IPv4 internal network address to assign to the instance - // for this network interface. If not specified by the user, an unused - // internal IP is assigned by the system. - NetworkIP string `json:"networkIP,omitempty"` - - // Subnetwork: The URL of the Subnetwork resource for this instance. If - // the network resource is in legacy mode, do not provide this property. - // If the network is in auto subnet mode, providing the subnetwork is - // optional. If the network is in custom subnet mode, then this field - // should be specified. If you specify this property, you can specify - // the subnetwork as a full or partial URL. For example, the following - // are all valid URLs: - // - - // https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork - // - regions/region/subnetworks/subnetwork - Subnetwork string `json:"subnetwork,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AccessConfigs") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AccessConfigs") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *NetworkInterface) MarshalJSON() ([]byte, error) { - type noMethod NetworkInterface - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// NetworkList: Contains a list of networks. -type NetworkList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Network resources. - Items []*Network `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#networkList for - // lists of networks. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *NetworkList) MarshalJSON() ([]byte, error) { - type noMethod NetworkList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// NetworkPeering: A network peering attached to a network resource. The -// message includes the peering name, peer network, peering state, and a -// flag indicating whether Google Compute Engine should automatically -// create routes for the peering. -type NetworkPeering struct { - // AutoCreateRoutes: Whether full mesh connectivity is created and - // managed automatically. When it is set to true, Google Compute Engine - // will automatically create and manage the routes between two networks - // when the state is ACTIVE. Otherwise, user needs to create routes - // manually to route packets to peer network. - AutoCreateRoutes bool `json:"autoCreateRoutes,omitempty"` - - // Name: Name of this peering. Provided by the client when the peering - // is created. The name must comply with RFC1035. Specifically, the name - // must be 1-63 characters long and match regular expression - // [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a - // lowercase letter, and all the following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. - Name string `json:"name,omitempty"` - - // Network: The URL of the peer network. It can be either full URL or - // partial URL. The peer network may belong to a different project. If - // the partial URL does not contain project, it is assumed that the peer - // network is in the same project as the current network. - Network string `json:"network,omitempty"` - - // State: [Output Only] State for the peering. - // - // Possible values: - // "ACTIVE" - // "INACTIVE" - State string `json:"state,omitempty"` - - // StateDetails: [Output Only] Details about the current state of the - // peering. - StateDetails string `json:"stateDetails,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AutoCreateRoutes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AutoCreateRoutes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *NetworkPeering) MarshalJSON() ([]byte, error) { - type noMethod NetworkPeering - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// NetworkRoutingConfig: A routing configuration attached to a network -// resource. The message includes the list of routers associated with -// the network, and a flag indicating the type of routing behavior to -// enforce network-wide. -type NetworkRoutingConfig struct { - // RoutingMode: The network-wide routing mode to use. If set to - // REGIONAL, this network's cloud routers will only advertise routes - // with subnetworks of this network in the same region as the router. If - // set to GLOBAL, this network's cloud routers will advertise routes - // with all subnetworks of this network, across regions. +// NetworkEndpoint: The network endpoint. +type NetworkEndpoint struct { + // Instance: The name for a specific VM instance that the IP address + // belongs to. This is required for network endpoints of type GCE_VM_IP + // and GCE_VM_IP_PORT. The instance must be in the same zone of network + // endpoint group. // - // Possible values: - // "GLOBAL" - // "REGIONAL" - RoutingMode string `json:"routingMode,omitempty"` - - // ForceSendFields is a list of field names (e.g. "RoutingMode") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "RoutingMode") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *NetworkRoutingConfig) MarshalJSON() ([]byte, error) { - type noMethod NetworkRoutingConfig - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type NetworksAddPeeringRequest struct { - // AutoCreateRoutes: Whether Google Compute Engine manages the routes - // automatically. - AutoCreateRoutes bool `json:"autoCreateRoutes,omitempty"` - - // Name: Name of the peering, which should conform to RFC1035. - Name string `json:"name,omitempty"` - - // PeerNetwork: URL of the peer network. It can be either full URL or - // partial URL. The peer network may belong to a different project. If - // the partial URL does not contain project, it is assumed that the peer - // network is in the same project as the current network. - PeerNetwork string `json:"peerNetwork,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AutoCreateRoutes") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AutoCreateRoutes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} + // The name must be 1-63 characters long, and comply with RFC1035. + Instance string `json:"instance,omitempty"` -func (s *NetworksAddPeeringRequest) MarshalJSON() ([]byte, error) { - type noMethod NetworksAddPeeringRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // IpAddress: Optional IPv4 address of network endpoint. The IP address + // must belong to a VM in GCE (either the primary IP or as part of an + // aliased IP range). If the IP address is not specified, then the + // primary IP address for the VM instance in the network that the + // network endpoint group belongs to will be used. + IpAddress string `json:"ipAddress,omitempty"` -type NetworksRemovePeeringRequest struct { - // Name: Name of the peering, which should conform to RFC1035. - Name string `json:"name,omitempty"` + // Port: Optional port number of network endpoint. If not specified and + // the NetworkEndpointGroup.network_endpoint_type is GCE_IP_PORT, the + // defaultPort for the network endpoint group will be used. + Port int64 `json:"port,omitempty"` - // ForceSendFields is a list of field names (e.g. "Name") to + // ForceSendFields is a list of field names (e.g. "Instance") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -12724,8 +17290,8 @@ type NetworksRemovePeeringRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Name") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Instance") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -12733,118 +17299,70 @@ type NetworksRemovePeeringRequest struct { NullFields []string `json:"-"` } -func (s *NetworksRemovePeeringRequest) MarshalJSON() ([]byte, error) { - type noMethod NetworksRemovePeeringRequest +func (s *NetworkEndpoint) MarshalJSON() ([]byte, error) { + type noMethod NetworkEndpoint raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Operation: An Operation resource, used to manage asynchronous API -// requests. -type Operation struct { - // ClientOperationId: [Output Only] Reserved for future use. - ClientOperationId string `json:"clientOperationId,omitempty"` - - // CreationTimestamp: [Deprecated] This field is deprecated. +// NetworkEndpointGroup: Represents a collection of network endpoints. +type NetworkEndpointGroup struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Description: [Output Only] A textual description of the operation, - // which is set when the operation is created. + // Description: An optional description of this resource. Provide this + // property when you create the resource. Description string `json:"description,omitempty"` - // EndTime: [Output Only] The time that this operation was completed. - // This value is in RFC3339 text format. - EndTime string `json:"endTime,omitempty"` - - // Error: [Output Only] If errors are generated during processing of the - // operation, this field will be populated. - Error *OperationError `json:"error,omitempty"` - - // HttpErrorMessage: [Output Only] If the operation fails, this field - // contains the HTTP error message that was returned, such as NOT FOUND. - HttpErrorMessage string `json:"httpErrorMessage,omitempty"` - - // HttpErrorStatusCode: [Output Only] If the operation fails, this field - // contains the HTTP error status code that was returned. For example, a - // 404 means the resource was not found. - HttpErrorStatusCode int64 `json:"httpErrorStatusCode,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // InsertTime: [Output Only] The time that this operation was requested. - // This value is in RFC3339 text format. - InsertTime string `json:"insertTime,omitempty"` - - // Kind: [Output Only] Type of the resource. Always compute#operation - // for Operation resources. + // Kind: [Output Only] Type of the resource. Always + // compute#networkEndpointGroup for network endpoint group. Kind string `json:"kind,omitempty"` - // Name: [Output Only] Name of the resource. - Name string `json:"name,omitempty"` - - // OperationType: [Output Only] The type of operation, such as insert, - // update, or delete, and so on. - OperationType string `json:"operationType,omitempty"` + // LoadBalancer: This field is only valid when the network endpoint + // group type is LOAD_BALANCING. + LoadBalancer *NetworkEndpointGroupLbNetworkEndpointGroup `json:"loadBalancer,omitempty"` - // Progress: [Output Only] An optional progress indicator that ranges - // from 0 to 100. There is no requirement that this be linear or support - // any granularity of operations. This should not be used to guess when - // the operation will be complete. This number should monotonically - // increase as the operation progresses. - Progress int64 `json:"progress,omitempty"` + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` - // Region: [Output Only] The URL of the region where the operation - // resides. Only available when performing regional operations. - Region string `json:"region,omitempty"` + // NetworkEndpointType: Type of network endpoints in this network + // endpoint group. Only supported values for LOAD_BALANCING are + // GCE_VM_IP or GCE_VM_IP_PORT. + // + // Possible values: + // "GCE_VM_IP_PORT" + NetworkEndpointType string `json:"networkEndpointType,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // StartTime: [Output Only] The time that this operation was started by - // the server. This value is in RFC3339 text format. - StartTime string `json:"startTime,omitempty"` + // Size: [Output only] Number of network endpoints in the network + // endpoint group. + Size int64 `json:"size,omitempty"` - // Status: [Output Only] The status of the operation, which can be one - // of the following: PENDING, RUNNING, or DONE. + // Type: Specify the type of this network endpoint group. Only + // LOAD_BALANCING is valid for now. // // Possible values: - // "DONE" - // "PENDING" - // "RUNNING" - Status string `json:"status,omitempty"` - - // StatusMessage: [Output Only] An optional textual description of the - // current status of the operation. - StatusMessage string `json:"statusMessage,omitempty"` - - // TargetId: [Output Only] The unique target ID, which identifies a - // specific incarnation of the target resource. - TargetId uint64 `json:"targetId,omitempty,string"` - - // TargetLink: [Output Only] The URL of the resource that the operation - // modifies. For operations related to creating a snapshot, this points - // to the persistent disk that the snapshot was created from. - TargetLink string `json:"targetLink,omitempty"` - - // User: [Output Only] User who requested the operation, for example: - // user@example.com. - User string `json:"user,omitempty"` - - // Warnings: [Output Only] If warning messages are generated during - // processing of the operation, this field will be populated. - Warnings []*OperationWarnings `json:"warnings,omitempty"` - - // Zone: [Output Only] The URL of the zone where the operation resides. - // Only available when performing per-zone operations. - Zone string `json:"zone,omitempty"` + // "LOAD_BALANCING" + Type string `json:"type,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "ClientOperationId") + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -12852,7 +17370,7 @@ type Operation struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClientOperationId") to + // NullFields is a list of field names (e.g. "CreationTimestamp") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -12862,54 +17380,44 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { - type noMethod Operation +func (s *NetworkEndpointGroup) MarshalJSON() ([]byte, error) { + type noMethod NetworkEndpointGroup raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// OperationError: [Output Only] If errors are generated during -// processing of the operation, this field will be populated. -type OperationError struct { - // Errors: [Output Only] The array of errors encountered while - // processing this operation. - Errors []*OperationErrorErrors `json:"errors,omitempty"` +type NetworkEndpointGroupAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // ForceSendFields is a list of field names (e.g. "Errors") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // Items: A list of NetworkEndpointGroupsScopedList resources. + Items map[string]NetworkEndpointGroupsScopedList `json:"items,omitempty"` - // NullFields is a list of field names (e.g. "Errors") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // Kind: [Output Only] The resource type, which is always + // compute#networkEndpointGroupAggregatedList for aggregated lists of + // network endpoint groups. + Kind string `json:"kind,omitempty"` -func (s *OperationError) MarshalJSON() ([]byte, error) { - type noMethod OperationError - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` -type OperationErrorErrors struct { - // Code: [Output Only] The error type identifier for this error. - Code string `json:"code,omitempty"` + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` - // Location: [Output Only] Indicates the field in the request that - // caused the error. This property is optional. - Location string `json:"location,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *NetworkEndpointGroupAggregatedListWarning `json:"warning,omitempty"` - // Message: [Output Only] An optional, human-readable error message. - Message string `json:"message,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -12917,7 +17425,7 @@ type OperationErrorErrors struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API + // NullFields is a list of field names (e.g. "Id") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -12926,13 +17434,15 @@ type OperationErrorErrors struct { NullFields []string `json:"-"` } -func (s *OperationErrorErrors) MarshalJSON() ([]byte, error) { - type noMethod OperationErrorErrors +func (s *NetworkEndpointGroupAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod NetworkEndpointGroupAggregatedList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationWarnings struct { +// NetworkEndpointGroupAggregatedListWarning: [Output Only] +// Informational warning message. +type NetworkEndpointGroupAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -12960,7 +17470,7 @@ type OperationWarnings struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*OperationWarningsData `json:"data,omitempty"` + Data []*NetworkEndpointGroupAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -12983,13 +17493,13 @@ type OperationWarnings struct { NullFields []string `json:"-"` } -func (s *OperationWarnings) MarshalJSON() ([]byte, error) { - type noMethod OperationWarnings +func (s *NetworkEndpointGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod NetworkEndpointGroupAggregatedListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationWarningsData struct { +type NetworkEndpointGroupAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -13020,40 +17530,33 @@ type OperationWarningsData struct { NullFields []string `json:"-"` } -func (s *OperationWarningsData) MarshalJSON() ([]byte, error) { - type noMethod OperationWarningsData +func (s *NetworkEndpointGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod NetworkEndpointGroupAggregatedListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationAggregatedList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A map of scoped operation lists. - Items map[string]OperationsScopedList `json:"items,omitempty"` +// NetworkEndpointGroupLbNetworkEndpointGroup: Load balancing specific +// fields for network endpoint group of type LOAD_BALANCING. +type NetworkEndpointGroupLbNetworkEndpointGroup struct { + // DefaultPort: The default port used if the port number is not + // specified in the network endpoint. If the network endpoint type is + // GCE_VM_IP, this field must not be specified. + DefaultPort int64 `json:"defaultPort,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#operationAggregatedList for aggregated lists of operations. - Kind string `json:"kind,omitempty"` + // Network: The URL of the network to which all network endpoints in the + // NEG belong. Uses "default" project network if unspecified. + Network string `json:"network,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // Subnetwork: Optional URL of the subnetwork to which all network + // endpoints in the NEG belong. + Subnetwork string `json:"subnetwork,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // Zone: [Output Only] The URL of the zone where the network endpoint + // group is located. + Zone string `json:"zone,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "DefaultPort") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -13061,32 +17564,31 @@ type OperationAggregatedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "DefaultPort") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *OperationAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod OperationAggregatedList +func (s *NetworkEndpointGroupLbNetworkEndpointGroup) MarshalJSON() ([]byte, error) { + type noMethod NetworkEndpointGroupLbNetworkEndpointGroup raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// OperationList: Contains a list of Operation resources. -type OperationList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. +type NetworkEndpointGroupList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. Id string `json:"id,omitempty"` - // Items: [Output Only] A list of Operation resources. - Items []*Operation `json:"items,omitempty"` + // Items: A list of NetworkEndpointGroup resources. + Items []*NetworkEndpointGroup `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#operations for - // Operations resource. + // Kind: [Output Only] The resource type, which is always + // compute#networkEndpointGroupList for network endpoint group lists. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -13100,6 +17602,9 @@ type OperationList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *NetworkEndpointGroupListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -13121,46 +17626,15 @@ type OperationList struct { NullFields []string `json:"-"` } -func (s *OperationList) MarshalJSON() ([]byte, error) { - type noMethod OperationList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type OperationsScopedList struct { - // Operations: [Output Only] List of operations contained in this scope. - Operations []*Operation `json:"operations,omitempty"` - - // Warning: [Output Only] Informational warning which replaces the list - // of operations when the list is empty. - Warning *OperationsScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Operations") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Operations") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *OperationsScopedList) MarshalJSON() ([]byte, error) { - type noMethod OperationsScopedList +func (s *NetworkEndpointGroupList) MarshalJSON() ([]byte, error) { + type noMethod NetworkEndpointGroupList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// OperationsScopedListWarning: [Output Only] Informational warning -// which replaces the list of operations when the list is empty. -type OperationsScopedListWarning struct { +// NetworkEndpointGroupListWarning: [Output Only] Informational warning +// message. +type NetworkEndpointGroupListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -13188,7 +17662,7 @@ type OperationsScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*OperationsScopedListWarningData `json:"data,omitempty"` + Data []*NetworkEndpointGroupListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -13211,13 +17685,13 @@ type OperationsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *OperationsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod OperationsScopedListWarning +func (s *NetworkEndpointGroupListWarning) MarshalJSON() ([]byte, error) { + type noMethod NetworkEndpointGroupListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationsScopedListWarningData struct { +type NetworkEndpointGroupListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -13248,38 +17722,17 @@ type OperationsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *OperationsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod OperationsScopedListWarningData +func (s *NetworkEndpointGroupListWarningData) MarshalJSON() ([]byte, error) { + type noMethod NetworkEndpointGroupListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PathMatcher: A matcher for the path portion of the URL. The -// BackendService from the longest-matched rule will serve the URL. If -// no rule was matched, the default service will be used. -type PathMatcher struct { - // DefaultService: The full or partial URL to the BackendService - // resource. This will be used if none of the pathRules defined by this - // PathMatcher is matched by the URL's path portion. For example, the - // following are all valid URLs to a BackendService resource: - // - - // https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService - // - compute/v1/projects/project/global/backendServices/backendService - // - // - global/backendServices/backendService - DefaultService string `json:"defaultService,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Name: The name to which this PathMatcher is referred by the HostRule. - Name string `json:"name,omitempty"` - - // PathRules: The list of path rules. - PathRules []*PathRule `json:"pathRules,omitempty"` +type NetworkEndpointGroupsAttachEndpointsRequest struct { + // NetworkEndpoints: The list of network endpoints to be attached. + NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` - // ForceSendFields is a list of field names (e.g. "DefaultService") to + // ForceSendFields is a list of field names (e.g. "NetworkEndpoints") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -13287,7 +17740,7 @@ type PathMatcher struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DefaultService") to + // NullFields is a list of field names (e.g. "NetworkEndpoints") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -13297,26 +17750,17 @@ type PathMatcher struct { NullFields []string `json:"-"` } -func (s *PathMatcher) MarshalJSON() ([]byte, error) { - type noMethod PathMatcher +func (s *NetworkEndpointGroupsAttachEndpointsRequest) MarshalJSON() ([]byte, error) { + type noMethod NetworkEndpointGroupsAttachEndpointsRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PathRule: A path-matching rule for a URL. If matched, will use the -// specified BackendService to handle the traffic arriving at this URL. -type PathRule struct { - // Paths: The list of path patterns to match. Each must start with / and - // the only place a * is allowed is at the end following a /. The string - // fed to the path matcher does not include any text after the first ? - // or #, and those chars are not allowed here. - Paths []string `json:"paths,omitempty"` +type NetworkEndpointGroupsDetachEndpointsRequest struct { + // NetworkEndpoints: The list of network endpoints to be detached. + NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` - // Service: The URL of the BackendService resource if this rule is - // matched. - Service string `json:"service,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Paths") to + // ForceSendFields is a list of field names (e.g. "NetworkEndpoints") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -13324,29 +17768,34 @@ type PathRule struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Paths") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "NetworkEndpoints") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *PathRule) MarshalJSON() ([]byte, error) { - type noMethod PathRule +func (s *NetworkEndpointGroupsDetachEndpointsRequest) MarshalJSON() ([]byte, error) { + type noMethod NetworkEndpointGroupsDetachEndpointsRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type PerInstanceConfig struct { - // Instance: The URL of the instance. Serves as a merge key during - // UpdatePerInstanceConfigs operation. - Instance string `json:"instance,omitempty"` - - Override *ManagedInstanceOverride `json:"override,omitempty"` +type NetworkEndpointGroupsListEndpointsRequest struct { + // HealthStatus: Optional query parameter for showing the health status + // of each network endpoint. Valid options are SKIP or SHOW. If you + // don't specifiy this parameter, the health status of network endpoints + // will not be provided. + // + // Possible values: + // "SHOW" + // "SKIP" + HealthStatus string `json:"healthStatus,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instance") to + // ForceSendFields is a list of field names (e.g. "HealthStatus") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -13354,85 +17803,50 @@ type PerInstanceConfig struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instance") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "HealthStatus") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *PerInstanceConfig) MarshalJSON() ([]byte, error) { - type noMethod PerInstanceConfig +func (s *NetworkEndpointGroupsListEndpointsRequest) MarshalJSON() ([]byte, error) { + type noMethod NetworkEndpointGroupsListEndpointsRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Policy: Defines an Identity and Access Management (IAM) policy. It is -// used to specify access control policies for Cloud Platform -// resources. -// -// -// -// A `Policy` consists of a list of `bindings`. A `Binding` binds a list -// of `members` to a `role`, where the members can be user accounts, -// Google groups, Google domains, and service accounts. A `role` is a -// named list of permissions defined by IAM. -// -// **Example** -// -// { "bindings": [ { "role": "roles/owner", "members": [ -// "user:mike@example.com", "group:admins@example.com", -// "domain:google.com", -// "serviceAccount:my-other-app@appspot.gserviceaccount.com", ] }, { -// "role": "roles/viewer", "members": ["user:sean@example.com"] } ] -// } -// -// For a description of IAM and its features, see the [IAM developer's -// guide](https://cloud.google.com/iam). -type Policy struct { - // AuditConfigs: Specifies cloud audit logging configuration for this - // policy. - AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` - - // Bindings: Associates a list of `members` to a `role`. `bindings` with - // no members will result in an error. - Bindings []*Binding `json:"bindings,omitempty"` +type NetworkEndpointGroupsListNetworkEndpoints struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Etag: `etag` is used for optimistic concurrency control as a way to - // help prevent simultaneous updates of a policy from overwriting each - // other. It is strongly suggested that systems make use of the `etag` - // in the read-modify-write cycle to perform policy updates in order to - // avoid race conditions: An `etag` is returned in the response to - // `getIamPolicy`, and systems are expected to put that etag in the - // request to `setIamPolicy` to ensure that their change will be applied - // to the same version of the policy. - // - // If no `etag` is provided in the call to `setIamPolicy`, then the - // existing policy is overwritten blindly. - Etag string `json:"etag,omitempty"` + // Items: A list of NetworkEndpointWithHealthStatus resources. + Items []*NetworkEndpointWithHealthStatus `json:"items,omitempty"` - IamOwned bool `json:"iamOwned,omitempty"` + // Kind: [Output Only] The resource type, which is always + // compute#networkEndpointGroupsListNetworkEndpoints for the list of + // network endpoints in the specified network endpoint group. + Kind string `json:"kind,omitempty"` - // Rules: If more than one rule is specified, the rules are applied in - // the following manner: - All matching LOG rules are always applied. - - // If any DENY/DENY_WITH_LOG rule matches, permission is denied. Logging - // will be applied if one or more matching rule requires logging. - - // Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is - // granted. Logging will be applied if one or more matching rule - // requires logging. - Otherwise, if no rule applies, permission is - // denied. - Rules []*Rule `json:"rules,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` - // Version: Version of the `Policy`. The default version is 0. - Version int64 `json:"version,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *NetworkEndpointGroupsListNetworkEndpointsWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "AuditConfigs") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -13440,109 +17854,95 @@ type Policy struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AuditConfigs") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { - type noMethod Policy +func (s *NetworkEndpointGroupsListNetworkEndpoints) MarshalJSON() ([]byte, error) { + type noMethod NetworkEndpointGroupsListNetworkEndpoints raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Project: A Project resource. Projects can only be created in the -// Google Cloud Platform Console. Unless marked otherwise, values can -// only be modified in the console. -type Project struct { - // CommonInstanceMetadata: Metadata key/value pairs available to all - // instances contained in this project. See Custom metadata for more - // information. - CommonInstanceMetadata *Metadata `json:"commonInstanceMetadata,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // DefaultServiceAccount: [Output Only] Default service account used by - // VMs running in this project. - DefaultServiceAccount string `json:"defaultServiceAccount,omitempty"` - - // Description: An optional textual description of the resource. - Description string `json:"description,omitempty"` - - // EnabledFeatures: Restricted features enabled for use on this project. - EnabledFeatures []string `json:"enabledFeatures,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. This is not the project ID, and - // is just a unique ID used by Compute Engine to identify resources. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#project for - // projects. - Kind string `json:"kind,omitempty"` - - // Name: The project ID. For example: my-example-project. Use the - // project ID to make requests to Compute Engine. - Name string `json:"name,omitempty"` - - // Quotas: [Output Only] Quotas assigned to this project. - Quotas []*Quota `json:"quotas,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // UsageExportLocation: The naming prefix for daily usage reports and - // the Google Cloud Storage bucket where they are stored. - UsageExportLocation *UsageExportLocation `json:"usageExportLocation,omitempty"` - - // XpnProjectStatus: [Output Only] The role this project has in a shared - // VPC configuration. Currently only HOST projects are differentiated. +// NetworkEndpointGroupsListNetworkEndpointsWarning: [Output Only] +// Informational warning message. +type NetworkEndpointGroupsListNetworkEndpointsWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. // // Possible values: - // "HOST" - // "UNSPECIFIED_XPN_PROJECT_STATUS" - XpnProjectStatus string `json:"xpnProjectStatus,omitempty"` + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*NetworkEndpointGroupsListNetworkEndpointsWarningData `json:"data,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "CommonInstanceMetadata") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CommonInstanceMetadata") - // to include in API requests with the JSON null value. By default, - // fields with empty values are omitted from API requests. However, any - // field with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Project) MarshalJSON() ([]byte, error) { - type noMethod Project +func (s *NetworkEndpointGroupsListNetworkEndpointsWarning) MarshalJSON() ([]byte, error) { + type noMethod NetworkEndpointGroupsListNetworkEndpointsWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ProjectsDisableXpnResourceRequest struct { - // XpnResource: Service resource (a.k.a service project) ID. - XpnResource *XpnResourceId `json:"xpnResource,omitempty"` +type NetworkEndpointGroupsListNetworkEndpointsWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ForceSendFields is a list of field names (e.g. "XpnResource") to + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -13550,71 +17950,93 @@ type ProjectsDisableXpnResourceRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "XpnResource") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *ProjectsDisableXpnResourceRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsDisableXpnResourceRequest +func (s *NetworkEndpointGroupsListNetworkEndpointsWarningData) MarshalJSON() ([]byte, error) { + type noMethod NetworkEndpointGroupsListNetworkEndpointsWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ProjectsEnableXpnResourceRequest struct { - // XpnResource: Service resource (a.k.a service project) ID. - XpnResource *XpnResourceId `json:"xpnResource,omitempty"` +type NetworkEndpointGroupsScopedList struct { + // NetworkEndpointGroups: [Output Only] The list of network endpoint + // groups that are contained in this scope. + NetworkEndpointGroups []*NetworkEndpointGroup `json:"networkEndpointGroups,omitempty"` - // ForceSendFields is a list of field names (e.g. "XpnResource") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // Warning: [Output Only] An informational warning that replaces the + // list of network endpoint groups when the list is empty. + Warning *NetworkEndpointGroupsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "NetworkEndpointGroups") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "XpnResource") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "NetworkEndpointGroups") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *ProjectsEnableXpnResourceRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsEnableXpnResourceRequest +func (s *NetworkEndpointGroupsScopedList) MarshalJSON() ([]byte, error) { + type noMethod NetworkEndpointGroupsScopedList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ProjectsGetXpnResources struct { - // Kind: [Output Only] Type of resource. Always - // compute#projectsGetXpnResources for lists of service resources (a.k.a - // service projects) - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` +// NetworkEndpointGroupsScopedListWarning: [Output Only] An +// informational warning that replaces the list of network endpoint +// groups when the list is empty. +type NetworkEndpointGroupsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // Resources: Serive resources (a.k.a service projects) attached to this - // project as their shared VPC host. - Resources []*XpnResourceId `json:"resources,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*NetworkEndpointGroupsScopedListWarningData `json:"data,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "Kind") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -13622,7 +18044,7 @@ type ProjectsGetXpnResources struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Kind") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -13631,19 +18053,27 @@ type ProjectsGetXpnResources struct { NullFields []string `json:"-"` } -func (s *ProjectsGetXpnResources) MarshalJSON() ([]byte, error) { - type noMethod ProjectsGetXpnResources +func (s *NetworkEndpointGroupsScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod NetworkEndpointGroupsScopedListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ProjectsListXpnHostsRequest struct { - // Organization: Optional organization ID managed by Cloud Resource - // Manager, for which to list shared VPC host projects. If not - // specified, the organization will be inferred from the project. - Organization string `json:"organization,omitempty"` +type NetworkEndpointGroupsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ForceSendFields is a list of field names (e.g. "Organization") to + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -13651,26 +18081,29 @@ type ProjectsListXpnHostsRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Organization") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *ProjectsListXpnHostsRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsListXpnHostsRequest +func (s *NetworkEndpointGroupsScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod NetworkEndpointGroupsScopedListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ProjectsSetDefaultServiceAccountRequest struct { - // Email: Email address of the service account. - Email string `json:"email,omitempty"` +type NetworkEndpointWithHealthStatus struct { + // Healths: [Output only] The health status of network endpoint; + Healths []*HealthStatusForNetworkEndpoint `json:"healths,omitempty"` - // ForceSendFields is a list of field names (e.g. "Email") to + // NetworkEndpoint: [Output only] The network endpoint; + NetworkEndpoint *NetworkEndpoint `json:"networkEndpoint,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Healths") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -13678,8 +18111,8 @@ type ProjectsSetDefaultServiceAccountRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Email") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Healths") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -13687,226 +18120,109 @@ type ProjectsSetDefaultServiceAccountRequest struct { NullFields []string `json:"-"` } -func (s *ProjectsSetDefaultServiceAccountRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsSetDefaultServiceAccountRequest +func (s *NetworkEndpointWithHealthStatus) MarshalJSON() ([]byte, error) { + type noMethod NetworkEndpointWithHealthStatus raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Quota: A quotas entry. -type Quota struct { - // Limit: [Output Only] Quota limit for this metric. - Limit float64 `json:"limit,omitempty"` +// NetworkInterface: A network interface resource attached to an +// instance. +type NetworkInterface struct { + // AccessConfigs: An array of configurations for this interface. + // Currently, only one access config, ONE_TO_ONE_NAT, is supported. If + // there are no accessConfigs specified, then this instance will have no + // external internet access. + AccessConfigs []*AccessConfig `json:"accessConfigs,omitempty"` - // Metric: [Output Only] Name of the quota metric. - // - // Possible values: - // "AMD_S9300_GPUS" - // "AUTOSCALERS" - // "BACKEND_BUCKETS" - // "BACKEND_SERVICES" - // "COMMITMENTS" - // "CPUS" - // "CPUS_ALL_REGIONS" - // "DISKS_TOTAL_GB" - // "FIREWALLS" - // "FORWARDING_RULES" - // "HEALTH_CHECKS" - // "IMAGES" - // "INSTANCES" - // "INSTANCE_GROUPS" - // "INSTANCE_GROUP_MANAGERS" - // "INSTANCE_TEMPLATES" - // "IN_USE_ADDRESSES" - // "LOCAL_SSD_TOTAL_GB" - // "NETWORKS" - // "NVIDIA_K80_GPUS" - // "NVIDIA_P100_GPUS" - // "PREEMPTIBLE_CPUS" - // "PREEMPTIBLE_LOCAL_SSD_GB" - // "REGIONAL_AUTOSCALERS" - // "REGIONAL_INSTANCE_GROUP_MANAGERS" - // "ROUTERS" - // "ROUTES" - // "SECURITY_POLICIES" - // "SECURITY_POLICY_RULES" - // "SNAPSHOTS" - // "SSD_TOTAL_GB" - // "SSL_CERTIFICATES" - // "STATIC_ADDRESSES" - // "SUBNETWORKS" - // "TARGET_HTTPS_PROXIES" - // "TARGET_HTTP_PROXIES" - // "TARGET_INSTANCES" - // "TARGET_POOLS" - // "TARGET_SSL_PROXIES" - // "TARGET_TCP_PROXIES" - // "TARGET_VPN_GATEWAYS" - // "URL_MAPS" - // "VPN_TUNNELS" - Metric string `json:"metric,omitempty"` - - // Usage: [Output Only] Current usage of this metric. - Usage float64 `json:"usage,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Limit") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Limit") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Quota) MarshalJSON() ([]byte, error) { - type noMethod Quota - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -func (s *Quota) UnmarshalJSON(data []byte) error { - type noMethod Quota - var s1 struct { - Limit gensupport.JSONFloat64 `json:"limit"` - Usage gensupport.JSONFloat64 `json:"usage"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.Limit = float64(s1.Limit) - s.Usage = float64(s1.Usage) - return nil -} - -// Reference: Represents a reference to a resource. -type Reference struct { - // Kind: [Output Only] Type of the resource. Always compute#reference - // for references. - Kind string `json:"kind,omitempty"` - - // ReferenceType: A description of the reference type with no implied - // semantics. Possible values include: - // - MEMBER_OF - ReferenceType string `json:"referenceType,omitempty"` - - // Referrer: URL of the resource which refers to the target. - Referrer string `json:"referrer,omitempty"` - - // Target: URL of the resource to which this reference points. - Target string `json:"target,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Reference) MarshalJSON() ([]byte, error) { - type noMethod Reference - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Region: Region resource. -type Region struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Deprecated: [Output Only] The deprecation status associated with this - // region. - Deprecated *DeprecationStatus `json:"deprecated,omitempty"` - - // Description: [Output Only] Textual description of the resource. - Description string `json:"description,omitempty"` + // AliasIpRanges: An array of alias IP ranges for this network + // interface. Can only be specified for network interfaces on + // subnet-mode networks. + AliasIpRanges []*AliasIpRange `json:"aliasIpRanges,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` + // Fingerprint: Fingerprint hash of contents stored in this network + // interface. This field will be ignored when inserting an Instance or + // adding a NetworkInterface. An up-to-date fingerprint must be provided + // in order to update the NetworkInterface. + Fingerprint string `json:"fingerprint,omitempty"` - // Kind: [Output Only] Type of the resource. Always compute#region for - // regions. + // Kind: [Output Only] Type of the resource. Always + // compute#networkInterface for network interfaces. Kind string `json:"kind,omitempty"` - // Name: [Output Only] Name of the resource. + // Name: [Output Only] The name of the network interface, generated by + // the server. For network devices, these are eth0, eth1, etc. Name string `json:"name,omitempty"` - // Quotas: [Output Only] Quotas assigned to this region. - Quotas []*Quota `json:"quotas,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Status: [Output Only] Status of the region, either UP or DOWN. + // Network: URL of the network resource for this instance. When creating + // an instance, if neither the network nor the subnetwork is specified, + // the default network global/networks/default is used; if the network + // is not specified but the subnetwork is specified, the network is + // inferred. // - // Possible values: - // "DOWN" - // "UP" - Status string `json:"status,omitempty"` + // This field is optional when creating a firewall rule. If not + // specified when creating a firewall rule, the default network + // global/networks/default is used. + // + // If you specify this property, you can specify the network as a full + // or partial URL. For example, the following are all valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project/global/networks/network + // - projects/project/global/networks/network + // - global/networks/default + Network string `json:"network,omitempty"` - // Zones: [Output Only] A list of zones available in this region, in the - // form of resource URLs. - Zones []string `json:"zones,omitempty"` + // NetworkIP: An IPv4 internal network address to assign to the instance + // for this network interface. If not specified by the user, an unused + // internal IP is assigned by the system. + NetworkIP string `json:"networkIP,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Subnetwork: The URL of the Subnetwork resource for this instance. If + // the network resource is in legacy mode, do not provide this property. + // If the network is in auto subnet mode, providing the subnetwork is + // optional. If the network is in custom subnet mode, then this field + // should be specified. If you specify this property, you can specify + // the subnetwork as a full or partial URL. For example, the following + // are all valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork + // - regions/region/subnetworks/subnetwork + Subnetwork string `json:"subnetwork,omitempty"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "AccessConfigs") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "AccessConfigs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Region) MarshalJSON() ([]byte, error) { - type noMethod Region +func (s *NetworkInterface) MarshalJSON() ([]byte, error) { + type noMethod NetworkInterface raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionAutoscalerList: Contains a list of autoscalers. -type RegionAutoscalerList struct { +// NetworkList: Contains a list of networks. +type NetworkList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Autoscaler resources. - Items []*Autoscaler `json:"items,omitempty"` + // Items: A list of Network resources. + Items []*Network `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of resource. Always compute#networkList for + // lists of networks. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -13920,6 +18236,9 @@ type RegionAutoscalerList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *NetworkListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -13941,40 +18260,48 @@ type RegionAutoscalerList struct { NullFields []string `json:"-"` } -func (s *RegionAutoscalerList) MarshalJSON() ([]byte, error) { - type noMethod RegionAutoscalerList +func (s *NetworkList) MarshalJSON() ([]byte, error) { + type noMethod NetworkList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionDiskTypeList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of DiskType resources. - Items []*DiskType `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#regionDiskTypeList for region disk types. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` +// NetworkListWarning: [Output Only] Informational warning message. +type NetworkListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*NetworkListWarningData `json:"data,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -13982,7 +18309,7 @@ type RegionDiskTypeList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -13991,18 +18318,27 @@ type RegionDiskTypeList struct { NullFields []string `json:"-"` } -func (s *RegionDiskTypeList) MarshalJSON() ([]byte, error) { - type noMethod RegionDiskTypeList +func (s *NetworkListWarning) MarshalJSON() ([]byte, error) { + type noMethod NetworkListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionDisksResizeRequest struct { - // SizeGb: The new size of the regional persistent disk, which is - // specified in GB. - SizeGb int64 `json:"sizeGb,omitempty,string"` +type NetworkListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ForceSendFields is a list of field names (e.g. "SizeGb") to + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -14010,7 +18346,7 @@ type RegionDisksResizeRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SizeGb") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -14019,40 +18355,51 @@ type RegionDisksResizeRequest struct { NullFields []string `json:"-"` } -func (s *RegionDisksResizeRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionDisksResizeRequest +func (s *NetworkListWarningData) MarshalJSON() ([]byte, error) { + type noMethod NetworkListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionInstanceGroupList: Contains a list of InstanceGroup resources. -type RegionInstanceGroupList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of InstanceGroup resources. - Items []*InstanceGroup `json:"items,omitempty"` +// NetworkPeering: A network peering attached to a network resource. The +// message includes the peering name, peer network, peering state, and a +// flag indicating whether Google Compute Engine should automatically +// create routes for the peering. +type NetworkPeering struct { + // AutoCreateRoutes: Whether full mesh connectivity is created and + // managed automatically. When it is set to true, Google Compute Engine + // will automatically create and manage the routes between two networks + // when the state is ACTIVE. Otherwise, user needs to create routes + // manually to route packets to peer network. + AutoCreateRoutes bool `json:"autoCreateRoutes,omitempty"` - // Kind: The resource type. - Kind string `json:"kind,omitempty"` + // Name: Name of this peering. Provided by the client when the peering + // is created. The name must comply with RFC1035. Specifically, the name + // must be 1-63 characters long and match regular expression + // [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a + // lowercase letter, and all the following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. + Name string `json:"name,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // Network: The URL of the peer network. It can be either full URL or + // partial URL. The peer network may belong to a different project. If + // the partial URL does not contain project, it is assumed that the peer + // network is in the same project as the current network. + Network string `json:"network,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // State: [Output Only] State for the peering. + // + // Possible values: + // "ACTIVE" + // "INACTIVE" + State string `json:"state,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // StateDetails: [Output Only] Details about the current state of the + // peering. + StateDetails string `json:"stateDetails,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "AutoCreateRoutes") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -14060,29 +18407,39 @@ type RegionInstanceGroupList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AutoCreateRoutes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupList +func (s *NetworkPeering) MarshalJSON() ([]byte, error) { + type noMethod NetworkPeering raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionInstanceGroupManagerDeleteInstanceConfigReq: -// RegionInstanceGroupManagers.deletePerInstanceConfigs -type RegionInstanceGroupManagerDeleteInstanceConfigReq struct { - // Instances: The list of instances for which we want to delete - // per-instance configs on this managed instance group. - Instances []string `json:"instances,omitempty"` +// NetworkRoutingConfig: A routing configuration attached to a network +// resource. The message includes the list of routers associated with +// the network, and a flag indicating the type of routing behavior to +// enforce network-wide. +type NetworkRoutingConfig struct { + // RoutingMode: The network-wide routing mode to use. If set to + // REGIONAL, this network's cloud routers will only advertise routes + // with subnetworks of this network in the same region as the router. If + // set to GLOBAL, this network's cloud routers will advertise routes + // with all subnetworks of this network, across regions. + // + // Possible values: + // "GLOBAL" + // "REGIONAL" + RoutingMode string `json:"routingMode,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to + // ForceSendFields is a list of field names (e.g. "RoutingMode") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -14090,52 +18447,36 @@ type RegionInstanceGroupManagerDeleteInstanceConfigReq struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "RoutingMode") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagerDeleteInstanceConfigReq) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagerDeleteInstanceConfigReq +func (s *NetworkRoutingConfig) MarshalJSON() ([]byte, error) { + type noMethod NetworkRoutingConfig raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionInstanceGroupManagerList: Contains a list of managed instance -// groups. -type RegionInstanceGroupManagerList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of InstanceGroupManager resources. - Items []*InstanceGroupManager `json:"items,omitempty"` - - // Kind: [Output Only] The resource type, which is always - // compute#instanceGroupManagerList for a list of managed instance - // groups that exist in th regional scope. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` +type NetworksAddPeeringRequest struct { + // AutoCreateRoutes: Whether Google Compute Engine manages the routes + // automatically. + AutoCreateRoutes bool `json:"autoCreateRoutes,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // Name: Name of the peering, which should conform to RFC1035. + Name string `json:"name,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // PeerNetwork: URL of the peer network. It can be either full URL or + // partial URL. The peer network may belong to a different project. If + // the partial URL does not contain project, it is assumed that the peer + // network is in the same project as the current network. + PeerNetwork string `json:"peerNetwork,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "AutoCreateRoutes") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -14143,37 +18484,7 @@ type RegionInstanceGroupManagerList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagerList) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagerList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// RegionInstanceGroupManagerUpdateInstanceConfigReq: -// RegionInstanceGroupManagers.updatePerInstanceConfigs -type RegionInstanceGroupManagerUpdateInstanceConfigReq struct { - // PerInstanceConfigs: The list of per-instance configs to insert or - // patch on this managed instance group. - PerInstanceConfigs []*PerInstanceConfig `json:"perInstanceConfigs,omitempty"` - - // ForceSendFields is a list of field names (e.g. "PerInstanceConfigs") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "PerInstanceConfigs") to + // NullFields is a list of field names (e.g. "AutoCreateRoutes") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -14183,19 +18494,17 @@ type RegionInstanceGroupManagerUpdateInstanceConfigReq struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagerUpdateInstanceConfigReq) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagerUpdateInstanceConfigReq +func (s *NetworksAddPeeringRequest) MarshalJSON() ([]byte, error) { + type noMethod NetworksAddPeeringRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupManagersAbandonInstancesRequest struct { - // Instances: The URLs of one or more instances to abandon. This can be - // a full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` +type NetworksRemovePeeringRequest struct { + // Name: Name of the peering, which should conform to RFC1035. + Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to + // ForceSendFields is a list of field names (e.g. "Name") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -14203,8 +18512,8 @@ type RegionInstanceGroupManagersAbandonInstancesRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -14212,155 +18521,126 @@ type RegionInstanceGroupManagersAbandonInstancesRequest struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersAbandonInstancesRequest +func (s *NetworksRemovePeeringRequest) MarshalJSON() ([]byte, error) { + type noMethod NetworksRemovePeeringRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionInstanceGroupManagersApplyUpdatesRequest: -// InstanceGroupManagers.applyUpdatesToInstances -type RegionInstanceGroupManagersApplyUpdatesRequest struct { - // Instances: The list of instances for which we want to apply changes - // on this managed instance group. - Instances []string `json:"instances,omitempty"` +// Operation: An Operation resource, used to manage asynchronous API +// requests. +type Operation struct { + // ClientOperationId: [Output Only] Reserved for future use. + ClientOperationId string `json:"clientOperationId,omitempty"` - // MaximalAction: The maximal action that should be perfomed on the - // instances. By default REPLACE. - // - // Possible values: - // "NONE" - // "REFRESH" - // "REPLACE" - // "RESTART" - MaximalAction string `json:"maximalAction,omitempty"` + // CreationTimestamp: [Deprecated] This field is deprecated. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // MinimalAction: The minimal action that should be perfomed on the - // instances. By default NONE. - // - // Possible values: - // "NONE" - // "REFRESH" - // "REPLACE" - // "RESTART" - MinimalAction string `json:"minimalAction,omitempty"` + // Description: [Output Only] A textual description of the operation, + // which is set when the operation is created. + Description string `json:"description,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // EndTime: [Output Only] The time that this operation was completed. + // This value is in RFC3339 text format. + EndTime string `json:"endTime,omitempty"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // Error: [Output Only] If errors are generated during processing of the + // operation, this field will be populated. + Error *OperationError `json:"error,omitempty"` -func (s *RegionInstanceGroupManagersApplyUpdatesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersApplyUpdatesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // HttpErrorMessage: [Output Only] If the operation fails, this field + // contains the HTTP error message that was returned, such as NOT FOUND. + HttpErrorMessage string `json:"httpErrorMessage,omitempty"` -type RegionInstanceGroupManagersDeleteInstancesRequest struct { - // Instances: The URLs of one or more instances to delete. This can be a - // full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` + // HttpErrorStatusCode: [Output Only] If the operation fails, this field + // contains the HTTP error status code that was returned. For example, a + // 404 means the resource was not found. + HttpErrorStatusCode int64 `json:"httpErrorStatusCode,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // InsertTime: [Output Only] The time that this operation was requested. + // This value is in RFC3339 text format. + InsertTime string `json:"insertTime,omitempty"` -func (s *RegionInstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersDeleteInstancesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Kind: [Output Only] Type of the resource. Always compute#operation + // for Operation resources. + Kind string `json:"kind,omitempty"` -type RegionInstanceGroupManagersListInstanceConfigsResp struct { - // Items: [Output Only] The list of PerInstanceConfig. - Items []*PerInstanceConfig `json:"items,omitempty"` + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // OperationType: [Output Only] The type of operation, such as insert, + // update, or delete, and so on. + OperationType string `json:"operationType,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Progress: [Output Only] An optional progress indicator that ranges + // from 0 to 100. There is no requirement that this be linear or support + // any granularity of operations. This should not be used to guess when + // the operation will be complete. This number should monotonically + // increase as the operation progresses. + Progress int64 `json:"progress,omitempty"` - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // Region: [Output Only] The URL of the region where the operation + // resides. Only available when performing regional operations. + Region string `json:"region,omitempty"` - // NullFields is a list of field names (e.g. "Items") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` -func (s *RegionInstanceGroupManagersListInstanceConfigsResp) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersListInstanceConfigsResp - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // StartTime: [Output Only] The time that this operation was started by + // the server. This value is in RFC3339 text format. + StartTime string `json:"startTime,omitempty"` -type RegionInstanceGroupManagersListInstancesResponse struct { - // ManagedInstances: List of managed instances. - ManagedInstances []*ManagedInstance `json:"managedInstances,omitempty"` + // Status: [Output Only] The status of the operation, which can be one + // of the following: PENDING, RUNNING, or DONE. + // + // Possible values: + // "DONE" + // "PENDING" + // "RUNNING" + Status string `json:"status,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // StatusMessage: [Output Only] An optional textual description of the + // current status of the operation. + StatusMessage string `json:"statusMessage,omitempty"` + + // TargetId: [Output Only] The unique target ID, which identifies a + // specific incarnation of the target resource. + TargetId uint64 `json:"targetId,omitempty,string"` + + // TargetLink: [Output Only] The URL of the resource that the operation + // modifies. For operations related to creating a snapshot, this points + // to the persistent disk that the snapshot was created from. + TargetLink string `json:"targetLink,omitempty"` + + // User: [Output Only] User who requested the operation, for example: + // user@example.com. + User string `json:"user,omitempty"` + + // Warnings: [Output Only] If warning messages are generated during + // processing of the operation, this field will be populated. + Warnings []*OperationWarnings `json:"warnings,omitempty"` + + // Zone: [Output Only] The URL of the zone where the operation resides. + // Only available when performing per-zone operations. + Zone string `json:"zone,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "ManagedInstances") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "ClientOperationId") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ManagedInstances") to + // NullFields is a list of field names (e.g. "ClientOperationId") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -14370,19 +18650,20 @@ type RegionInstanceGroupManagersListInstancesResponse struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersListInstancesResponse) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersListInstancesResponse +func (s *Operation) MarshalJSON() ([]byte, error) { + type noMethod Operation raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupManagersRecreateRequest struct { - // Instances: The URLs of one or more instances to recreate. This can be - // a full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` +// OperationError: [Output Only] If errors are generated during +// processing of the operation, this field will be populated. +type OperationError struct { + // Errors: [Output Only] The array of errors encountered while + // processing this operation. + Errors []*OperationErrorErrors `json:"errors,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to + // ForceSendFields is a list of field names (e.g. "Errors") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -14390,8 +18671,8 @@ type RegionInstanceGroupManagersRecreateRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Errors") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -14399,51 +18680,81 @@ type RegionInstanceGroupManagersRecreateRequest struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersRecreateRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersRecreateRequest +func (s *OperationError) MarshalJSON() ([]byte, error) { + type noMethod OperationError raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupManagersSetAutoHealingRequest struct { - AutoHealingPolicies []*InstanceGroupManagerAutoHealingPolicy `json:"autoHealingPolicies,omitempty"` +type OperationErrorErrors struct { + // Code: [Output Only] The error type identifier for this error. + Code string `json:"code,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutoHealingPolicies") - // to unconditionally include in API requests. By default, fields with + // Location: [Output Only] Indicates the field in the request that + // caused the error. This property is optional. + Location string `json:"location,omitempty"` + + // Message: [Output Only] An optional, human-readable error message. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoHealingPolicies") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersSetAutoHealingRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersSetAutoHealingRequest +func (s *OperationErrorErrors) MarshalJSON() ([]byte, error) { + type noMethod OperationErrorErrors raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupManagersSetTargetPoolsRequest struct { - // Fingerprint: Fingerprint of the target pools information, which is a - // hash of the contents. This field is used for optimistic locking when - // you update the target pool entries. This field is optional. - Fingerprint string `json:"fingerprint,omitempty"` +type OperationWarnings struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // TargetPools: The URL of all TargetPool resources to which instances - // in the instanceGroup field are added. The target pools automatically - // apply to all of the instances in the managed instance group. - TargetPools []string `json:"targetPools,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*OperationWarningsData `json:"data,omitempty"` - // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -14451,27 +18762,36 @@ type RegionInstanceGroupManagersSetTargetPoolsRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Fingerprint") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersSetTargetPoolsRequest +func (s *OperationWarnings) MarshalJSON() ([]byte, error) { + type noMethod OperationWarnings raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupManagersSetTemplateRequest struct { - // InstanceTemplate: URL of the InstanceTemplate resource from which all - // new instances will be created. - InstanceTemplate string `json:"instanceTemplate,omitempty"` +type OperationWarningsData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -14479,31 +18799,31 @@ type RegionInstanceGroupManagersSetTemplateRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "InstanceTemplate") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersSetTemplateRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersSetTemplateRequest +func (s *OperationWarningsData) MarshalJSON() ([]byte, error) { + type noMethod OperationWarningsData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupsListInstances struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. +type OperationAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. Id string `json:"id,omitempty"` - // Items: A list of InstanceWithNamedPorts resources. - Items []*InstanceWithNamedPorts `json:"items,omitempty"` + // Items: [Output Only] A map of scoped operation lists. + Items map[string]OperationsScopedList `json:"items,omitempty"` - // Kind: The resource type. + // Kind: [Output Only] Type of resource. Always + // compute#operationAggregatedList for aggregated lists of operations. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -14517,6 +18837,9 @@ type RegionInstanceGroupsListInstances struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *OperationAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -14538,28 +18861,49 @@ type RegionInstanceGroupsListInstances struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupsListInstances) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsListInstances +func (s *OperationAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod OperationAggregatedList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupsListInstancesRequest struct { - // InstanceState: Instances in which state should be returned. Valid - // options are: 'ALL', 'RUNNING'. By default, it lists all instances. +// OperationAggregatedListWarning: [Output Only] Informational warning +// message. +type OperationAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. // // Possible values: - // "ALL" - // "RUNNING" - InstanceState string `json:"instanceState,omitempty"` + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // PortName: Name of port user is interested in. It is optional. If it - // is set, only information about this ports will be returned. If it is - // not set, all the named ports will be returned. Always lists all - // instances. - PortName string `json:"portName,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*OperationAggregatedListWarningData `json:"data,omitempty"` - // ForceSendFields is a list of field names (e.g. "InstanceState") to + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -14567,34 +18911,36 @@ type RegionInstanceGroupsListInstancesRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "InstanceState") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionInstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsListInstancesRequest +func (s *OperationAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod OperationAggregatedListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupsSetNamedPortsRequest struct { - // Fingerprint: The fingerprint of the named ports information for this - // instance group. Use this optional property to prevent conflicts when - // multiple users change the named ports settings concurrently. Obtain - // the fingerprint with the instanceGroups.get method. Then, include the - // fingerprint in your request to ensure that you do not overwrite - // changes that were applied from another concurrent request. - Fingerprint string `json:"fingerprint,omitempty"` +type OperationAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // NamedPorts: The list of named ports to set for this instance group. - NamedPorts []*NamedPort `json:"namedPorts,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -14602,32 +18948,32 @@ type RegionInstanceGroupsSetNamedPortsRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Fingerprint") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionInstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsSetNamedPortsRequest +func (s *OperationAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod OperationAggregatedListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionList: Contains a list of region resources. -type RegionList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. +// OperationList: Contains a list of Operation resources. +type OperationList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. Id string `json:"id,omitempty"` - // Items: A list of Region resources. - Items []*Region `json:"items,omitempty"` + // Items: [Output Only] A list of Operation resources. + Items []*Operation `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#regionList for - // lists of regions. + // Kind: [Output Only] Type of resource. Always compute#operations for + // Operations resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -14641,6 +18987,9 @@ type RegionList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *OperationListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -14662,68 +19011,48 @@ type RegionList struct { NullFields []string `json:"-"` } -func (s *RegionList) MarshalJSON() ([]byte, error) { - type noMethod RegionList +func (s *OperationList) MarshalJSON() ([]byte, error) { + type noMethod OperationList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionSetLabelsRequest struct { - // LabelFingerprint: The fingerprint of the previous set of labels for - // this resource, used to detect conflicts. The fingerprint is initially - // generated by Compute Engine and changes after every request to modify - // or update labels. You must always provide an up-to-date fingerprint - // hash in order to update or change labels. Make a get() request to the - // resource to get the latest fingerprint. - LabelFingerprint string `json:"labelFingerprint,omitempty"` +// OperationListWarning: [Output Only] Informational warning message. +type OperationListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // Labels: The labels to set for this resource. - Labels map[string]string `json:"labels,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*OperationListWarningData `json:"data,omitempty"` - // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "LabelFingerprint") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *RegionSetLabelsRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionSetLabelsRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// ResourceCommitment: Commitment for a particular resource (a -// Commitment is composed of one or more of these). -type ResourceCommitment struct { - // Amount: The amount of the resource purchased (in a type-dependent - // unit, such as bytes). For vCPUs, this can just be an integer. For - // memory, this must be provided in MB. Memory must be a multiple of 256 - // MB, with up to 6.5GB of memory per every vCPU. - Amount int64 `json:"amount,omitempty,string"` - - // Type: Type of resource for which this commitment applies. Possible - // values are VCPU and MEMORY - // - // Possible values: - // "LOCAL_SSD" - // "MEMORY" - // "UNSPECIFIED" - // "VCPU" - Type string `json:"type,omitempty"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "Amount") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -14731,7 +19060,7 @@ type ResourceCommitment struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Amount") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -14740,18 +19069,27 @@ type ResourceCommitment struct { NullFields []string `json:"-"` } -func (s *ResourceCommitment) MarshalJSON() ([]byte, error) { - type noMethod ResourceCommitment +func (s *OperationListWarning) MarshalJSON() ([]byte, error) { + type noMethod OperationListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ResourceGroupReference struct { - // Group: A URI referencing one of the instance groups listed in the - // backend service. - Group string `json:"group,omitempty"` +type OperationListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ForceSendFields is a list of field names (e.g. "Group") to + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -14759,7 +19097,7 @@ type ResourceGroupReference struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Group") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -14768,140 +19106,46 @@ type ResourceGroupReference struct { NullFields []string `json:"-"` } -func (s *ResourceGroupReference) MarshalJSON() ([]byte, error) { - type noMethod ResourceGroupReference +func (s *OperationListWarningData) MarshalJSON() ([]byte, error) { + type noMethod OperationListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Route: Represents a Route resource. A route specifies how certain -// packets should be handled by the network. Routes are associated with -// instances by tags and the set of routes for a particular instance is -// called its routing table. -// -// For each packet leaving an instance, the system searches that -// instance's routing table for a single best matching route. Routes -// match packets by destination IP address, preferring smaller or more -// specific ranges over larger ones. If there is a tie, the system -// selects the route with the smallest priority value. If there is still -// a tie, it uses the layer three and four packet headers to select just -// one of the remaining matching routes. The packet is then forwarded as -// specified by the nextHop field of the winning route - either to -// another instance destination, an instance gateway, or a Google -// Compute Engine-operated gateway. -// -// Packets that do not match any route in the sending instance's routing -// table are dropped. -type Route struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // DestRange: The destination range of outgoing packets that this route - // applies to. Only IPv4 is supported. - DestRange string `json:"destRange,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of this resource. Always compute#routes for - // Route resources. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Network: Fully-qualified URL of the network that this route applies - // to. - Network string `json:"network,omitempty"` - - // NextHopGateway: The URL to a gateway that should handle matching - // packets. You can only specify the internet gateway using a full or - // partial valid URL: - // projects//global/gateways/default-internet-gateway - NextHopGateway string `json:"nextHopGateway,omitempty"` - - // NextHopInstance: The URL to an instance that should handle matching - // packets. You can specify this as a full or partial URL. For - // example: - // https://www.googleapis.com/compute/v1/projects/project/zones/ - // zone/instances/ - NextHopInstance string `json:"nextHopInstance,omitempty"` - - // NextHopIp: The network IP address of an instance that should handle - // matching packets. Only IPv4 is supported. - NextHopIp string `json:"nextHopIp,omitempty"` - - // NextHopNetwork: The URL of the local network if it should handle - // matching packets. - NextHopNetwork string `json:"nextHopNetwork,omitempty"` - - // NextHopPeering: [Output Only] The network peering name that should - // handle matching packets, which should conform to RFC1035. - NextHopPeering string `json:"nextHopPeering,omitempty"` - - // NextHopVpnTunnel: The URL to a VpnTunnel that should handle matching - // packets. - NextHopVpnTunnel string `json:"nextHopVpnTunnel,omitempty"` - - // Priority: The priority of this route. Priority is used to break ties - // in cases where there is more than one matching route of equal prefix - // length. In the case of two routes with equal prefix length, the one - // with the lowest-numbered priority value wins. Default value is 1000. - // Valid range is 0 through 65535. - Priority int64 `json:"priority,omitempty"` - - // SelfLink: [Output Only] Server-defined fully-qualified URL for this - // resource. - SelfLink string `json:"selfLink,omitempty"` - - // Tags: A list of instance tags to which this route applies. - Tags []string `json:"tags,omitempty"` - - // Warnings: [Output Only] If potential misconfigurations are detected - // for this route, this field will be populated with warning messages. - Warnings []*RouteWarnings `json:"warnings,omitempty"` +type OperationsScopedList struct { + // Operations: [Output Only] List of operations contained in this scope. + Operations []*Operation `json:"operations,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Warning: [Output Only] Informational warning which replaces the list + // of operations when the list is empty. + Warning *OperationsScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Operations") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Operations") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Route) MarshalJSON() ([]byte, error) { - type noMethod Route +func (s *OperationsScopedList) MarshalJSON() ([]byte, error) { + type noMethod OperationsScopedList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouteWarnings struct { +// OperationsScopedListWarning: [Output Only] Informational warning +// which replaces the list of operations when the list is empty. +type OperationsScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -14929,7 +19173,7 @@ type RouteWarnings struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RouteWarningsData `json:"data,omitempty"` + Data []*OperationsScopedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -14952,13 +19196,13 @@ type RouteWarnings struct { NullFields []string `json:"-"` } -func (s *RouteWarnings) MarshalJSON() ([]byte, error) { - type noMethod RouteWarnings +func (s *OperationsScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod OperationsScopedListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouteWarningsData struct { +type OperationsScopedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -14989,40 +19233,38 @@ type RouteWarningsData struct { NullFields []string `json:"-"` } -func (s *RouteWarningsData) MarshalJSON() ([]byte, error) { - type noMethod RouteWarningsData +func (s *OperationsScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod OperationsScopedListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouteList: Contains a list of Route resources. -type RouteList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Route resources. - Items []*Route `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` +// PathMatcher: A matcher for the path portion of the URL. The +// BackendService from the longest-matched rule will serve the URL. If +// no rule was matched, the default service will be used. +type PathMatcher struct { + // DefaultService: The full or partial URL to the BackendService + // resource. This will be used if none of the pathRules defined by this + // PathMatcher is matched by the URL's path portion. For example, the + // following are all valid URLs to a BackendService resource: + // - + // https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService + // - compute/v1/projects/project/global/backendServices/backendService + // + // - global/backendServices/backendService + DefaultService string `json:"defaultService,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // Name: The name to which this PathMatcher is referred by the HostRule. + Name string `json:"name,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // PathRules: The list of path rules. + PathRules []*PathRule `json:"pathRules,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "DefaultService") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15030,75 +19272,36 @@ type RouteList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "DefaultService") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *RouteList) MarshalJSON() ([]byte, error) { - type noMethod RouteList +func (s *PathMatcher) MarshalJSON() ([]byte, error) { + type noMethod PathMatcher raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Router: Router resource. -type Router struct { - // Bgp: BGP information specific to this router. - Bgp *RouterBgp `json:"bgp,omitempty"` - - // BgpPeers: BGP information that needs to be configured into the - // routing stack to establish the BGP peering. It must specify peer ASN - // and either interface name, IP, or peer IP. Please refer to RFC4273. - BgpPeers []*RouterBgpPeer `json:"bgpPeers,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Interfaces: Router interfaces. Each interface requires either one - // linked resource (e.g. linkedVpnTunnel), or IP address and IP address - // range (e.g. ipRange), or both. - Interfaces []*RouterInterface `json:"interfaces,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#router for - // routers. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Network: URI of the network to which this router belongs. - Network string `json:"network,omitempty"` - - // Region: [Output Only] URI of the region where the router resides. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` +// PathRule: A path-matching rule for a URL. If matched, will use the +// specified BackendService to handle the traffic arriving at this URL. +type PathRule struct { + // Paths: The list of path patterns to match. Each must start with / and + // the only place a * is allowed is at the end following a /. The string + // fed to the path matcher does not include any text after the first ? + // or #, and those chars are not allowed here. + Paths []string `json:"paths,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Service: The URL of the BackendService resource if this rule is + // matched. + Service string `json:"service,omitempty"` - // ForceSendFields is a list of field names (e.g. "Bgp") to + // ForceSendFields is a list of field names (e.g. "Paths") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15106,7 +19309,7 @@ type Router struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Bgp") to include in API + // NullFields is a list of field names (e.g. "Paths") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -15115,23 +19318,20 @@ type Router struct { NullFields []string `json:"-"` } -func (s *Router) MarshalJSON() ([]byte, error) { - type noMethod Router +func (s *PathRule) MarshalJSON() ([]byte, error) { + type noMethod PathRule raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterAdvertisedPrefix: Description-tagged prefixes for the router to -// advertise. -type RouterAdvertisedPrefix struct { - // Description: User-specified description for the prefix. - Description string `json:"description,omitempty"` +type PerInstanceConfig struct { + // Instance: The URL of the instance. Serves as a merge key during + // UpdatePerInstanceConfigs operation. + Instance string `json:"instance,omitempty"` - // Prefix: The prefix to advertise. The value must be a CIDR-formatted - // string. - Prefix string `json:"prefix,omitempty"` + Override *ManagedInstanceOverride `json:"override,omitempty"` - // ForceSendFields is a list of field names (e.g. "Description") to + // ForceSendFields is a list of field names (e.g. "Instance") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15139,49 +19339,85 @@ type RouterAdvertisedPrefix struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Instance") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RouterAdvertisedPrefix) MarshalJSON() ([]byte, error) { - type noMethod RouterAdvertisedPrefix +func (s *PerInstanceConfig) MarshalJSON() ([]byte, error) { + type noMethod PerInstanceConfig raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterAggregatedList: Contains a list of routers. -type RouterAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` +// Policy: Defines an Identity and Access Management (IAM) policy. It is +// used to specify access control policies for Cloud Platform +// resources. +// +// +// +// A `Policy` consists of a list of `bindings`. A `Binding` binds a list +// of `members` to a `role`, where the members can be user accounts, +// Google groups, Google domains, and service accounts. A `role` is a +// named list of permissions defined by IAM. +// +// **Example** +// +// { "bindings": [ { "role": "roles/owner", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-other-app@appspot.gserviceaccount.com", ] }, { +// "role": "roles/viewer", "members": ["user:sean@example.com"] } ] +// } +// +// For a description of IAM and its features, see the [IAM developer's +// guide](https://cloud.google.com/iam). +type Policy struct { + // AuditConfigs: Specifies cloud audit logging configuration for this + // policy. + AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` - // Items: A list of Router resources. - Items map[string]RoutersScopedList `json:"items,omitempty"` + // Bindings: Associates a list of `members` to a `role`. `bindings` with + // no members will result in an error. + Bindings []*Binding `json:"bindings,omitempty"` - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` + // Etag: `etag` is used for optimistic concurrency control as a way to + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. + // + // If no `etag` is provided in the call to `setIamPolicy`, then the + // existing policy is overwritten blindly. + Etag string `json:"etag,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + IamOwned bool `json:"iamOwned,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // Rules: If more than one rule is specified, the rules are applied in + // the following manner: - All matching LOG rules are always applied. - + // If any DENY/DENY_WITH_LOG rule matches, permission is denied. Logging + // will be applied if one or more matching rule requires logging. - + // Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is + // granted. Logging will be applied if one or more matching rule + // requires logging. - Otherwise, if no rule applies, permission is + // denied. + Rules []*Rule `json:"rules,omitempty"` + + // Version: Version of the `Policy`. The default version is 0. + Version int64 `json:"version,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "AuditConfigs") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15189,130 +19425,120 @@ type RouterAggregatedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "AuditConfigs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RouterAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod RouterAggregatedList +func (s *Policy) MarshalJSON() ([]byte, error) { + type noMethod Policy raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterBgp struct { - // AdvertiseMode: User-specified flag to indicate which mode to use for - // advertisement. - // - // Possible values: - // "CUSTOM" - // "DEFAULT" - AdvertiseMode string `json:"advertiseMode,omitempty"` +// Project: A Project resource. Projects can only be created in the +// Google Cloud Platform Console. Unless marked otherwise, values can +// only be modified in the console. +type Project struct { + // CommonInstanceMetadata: Metadata key/value pairs available to all + // instances contained in this project. See Custom metadata for more + // information. + CommonInstanceMetadata *Metadata `json:"commonInstanceMetadata,omitempty"` - // AdvertisedGroups: User-specified list of prefix groups to advertise - // in custom mode. This field can only be populated if advertise_mode is - // CUSTOM and is advertised to all peers of the router. These groups - // will be advertised in addition to any specified prefixes. Leave this - // field blank to advertise no custom groups. + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // DefaultNetworkTier: This signifies the default network tier used for + // configuring resources of the project and can only take the following + // values: PREMIUM, STANDARD. Initially the default network tier is + // PREMIUM. // // Possible values: - // "ALL_SUBNETS" - AdvertisedGroups []string `json:"advertisedGroups,omitempty"` + // "PREMIUM" + // "SELECT" + // "STANDARD" + DefaultNetworkTier string `json:"defaultNetworkTier,omitempty"` - // AdvertisedPrefixs: User-specified list of individual prefixes to - // advertise in custom mode. This field can only be populated if - // advertise_mode is CUSTOM and is advertised to all peers of the - // router. These prefixes will be advertised in addition to any - // specified groups. Leave this field blank to advertise no custom - // prefixes. - AdvertisedPrefixs []*RouterAdvertisedPrefix `json:"advertisedPrefixs,omitempty"` + // DefaultServiceAccount: [Output Only] Default service account used by + // VMs running in this project. + DefaultServiceAccount string `json:"defaultServiceAccount,omitempty"` - // Asn: Local BGP Autonomous System Number (ASN). Must be an RFC6996 - // private ASN, either 16-bit or 32-bit. The value will be fixed for - // this router resource. All VPN tunnels that link to this router will - // have the same local ASN. - Asn int64 `json:"asn,omitempty"` + // Description: An optional textual description of the resource. + Description string `json:"description,omitempty"` - // ForceSendFields is a list of field names (e.g. "AdvertiseMode") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // EnabledFeatures: Restricted features enabled for use on this project. + EnabledFeatures []string `json:"enabledFeatures,omitempty"` - // NullFields is a list of field names (e.g. "AdvertiseMode") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. This is not the project ID, and + // is just a unique ID used by Compute Engine to identify resources. + Id uint64 `json:"id,omitempty,string"` -func (s *RouterBgp) MarshalJSON() ([]byte, error) { - type noMethod RouterBgp - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Kind: [Output Only] Type of the resource. Always compute#project for + // projects. + Kind string `json:"kind,omitempty"` -type RouterBgpPeer struct { - // AdvertiseMode: User-specified flag to indicate which mode to use for - // advertisement. - // - // Possible values: - // "CUSTOM" - // "DEFAULT" - AdvertiseMode string `json:"advertiseMode,omitempty"` + // Name: The project ID. For example: my-example-project. Use the + // project ID to make requests to Compute Engine. + Name string `json:"name,omitempty"` - // AdvertisedGroups: User-specified list of prefix groups to advertise - // in custom mode. This field can only be populated if advertise_mode is - // CUSTOM and overrides the list defined for the router (in Bgp - // message). These groups will be advertised in addition to any - // specified prefixes. Leave this field blank to advertise no custom - // groups. - // - // Possible values: - // "ALL_SUBNETS" - AdvertisedGroups []string `json:"advertisedGroups,omitempty"` + // Quotas: [Output Only] Quotas assigned to this project. + Quotas []*Quota `json:"quotas,omitempty"` - // AdvertisedPrefixs: User-specified list of individual prefixes to - // advertise in custom mode. This field can only be populated if - // advertise_mode is CUSTOM and overrides the list defined for the - // router (in Bgp message). These prefixes will be advertised in - // addition to any specified groups. Leave this field blank to advertise - // no custom prefixes. - AdvertisedPrefixs []*RouterAdvertisedPrefix `json:"advertisedPrefixs,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` - // AdvertisedRoutePriority: The priority of routes advertised to this - // BGP peer. In the case where there is more than one matching route of - // maximum length, the routes with lowest priority value win. - AdvertisedRoutePriority int64 `json:"advertisedRoutePriority,omitempty"` + // UsageExportLocation: The naming prefix for daily usage reports and + // the Google Cloud Storage bucket where they are stored. + UsageExportLocation *UsageExportLocation `json:"usageExportLocation,omitempty"` - // InterfaceName: Name of the interface the BGP peer is associated with. - InterfaceName string `json:"interfaceName,omitempty"` + // XpnProjectStatus: [Output Only] The role this project has in a shared + // VPC configuration. Currently only HOST projects are differentiated. + // + // Possible values: + // "HOST" + // "UNSPECIFIED_XPN_PROJECT_STATUS" + XpnProjectStatus string `json:"xpnProjectStatus,omitempty"` - // IpAddress: IP address of the interface inside Google Cloud Platform. - // Only IPv4 is supported. - IpAddress string `json:"ipAddress,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // Name: Name of this BGP peer. The name must be 1-63 characters long - // and comply with RFC1035. - Name string `json:"name,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "CommonInstanceMetadata") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` - // PeerAsn: Peer BGP Autonomous System Number (ASN). For VPN use case, - // this value can be different for every tunnel. - PeerAsn int64 `json:"peerAsn,omitempty"` + // NullFields is a list of field names (e.g. "CommonInstanceMetadata") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} - // PeerIpAddress: IP address of the BGP interface outside Google cloud. - // Only IPv4 is supported. - PeerIpAddress string `json:"peerIpAddress,omitempty"` +func (s *Project) MarshalJSON() ([]byte, error) { + type noMethod Project + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // ForceSendFields is a list of field names (e.g. "AdvertiseMode") to +type ProjectsDisableXpnResourceRequest struct { + // XpnResource: Service resource (a.k.a service project) ID. + XpnResource *XpnResourceId `json:"xpnResource,omitempty"` + + // ForceSendFields is a list of field names (e.g. "XpnResource") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15320,7 +19546,7 @@ type RouterBgpPeer struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AdvertiseMode") to include + // NullFields is a list of field names (e.g. "XpnResource") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -15329,37 +19555,17 @@ type RouterBgpPeer struct { NullFields []string `json:"-"` } -func (s *RouterBgpPeer) MarshalJSON() ([]byte, error) { - type noMethod RouterBgpPeer +func (s *ProjectsDisableXpnResourceRequest) MarshalJSON() ([]byte, error) { + type noMethod ProjectsDisableXpnResourceRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterInterface struct { - // IpRange: IP address and range of the interface. The IP range must be - // in the RFC3927 link-local IP space. The value must be a - // CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not - // truncate the address as it represents the IP address of the - // interface. - IpRange string `json:"ipRange,omitempty"` - - // LinkedInterconnectAttachment: URI of the linked interconnect - // attachment. It must be in the same region as the router. Each - // interface can have at most one linked resource and it could either be - // a VPN Tunnel or an interconnect attachment. - LinkedInterconnectAttachment string `json:"linkedInterconnectAttachment,omitempty"` - - // LinkedVpnTunnel: URI of the linked VPN tunnel. It must be in the same - // region as the router. Each interface can have at most one linked - // resource and it could either be a VPN Tunnel or an interconnect - // attachment. - LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` - - // Name: Name of this interface entry. The name must be 1-63 characters - // long and comply with RFC1035. - Name string `json:"name,omitempty"` +type ProjectsEnableXpnResourceRequest struct { + // XpnResource: Service resource (a.k.a service project) ID. + XpnResource *XpnResourceId `json:"xpnResource,omitempty"` - // ForceSendFields is a list of field names (e.g. "IpRange") to + // ForceSendFields is a list of field names (e.g. "XpnResource") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15367,32 +19573,25 @@ type RouterInterface struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IpRange") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "XpnResource") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RouterInterface) MarshalJSON() ([]byte, error) { - type noMethod RouterInterface +func (s *ProjectsEnableXpnResourceRequest) MarshalJSON() ([]byte, error) { + type noMethod ProjectsEnableXpnResourceRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterList: Contains a list of Router resources. -type RouterList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Router resources. - Items []*Router `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#router for - // routers. +type ProjectsGetXpnResources struct { + // Kind: [Output Only] Type of resource. Always + // compute#projectsGetXpnResources for lists of service resources (a.k.a + // service projects) Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -15403,14 +19602,15 @@ type RouterList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // Resources: Service resources (a.k.a service projects) attached to + // this project as their shared VPC host. + Resources []*XpnResourceId `json:"resources,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Kind") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15418,7 +19618,7 @@ type RouterList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Kind") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -15427,25 +19627,51 @@ type RouterList struct { NullFields []string `json:"-"` } -func (s *RouterList) MarshalJSON() ([]byte, error) { - type noMethod RouterList +func (s *ProjectsGetXpnResources) MarshalJSON() ([]byte, error) { + type noMethod ProjectsGetXpnResources raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterStatus struct { - // BestRoutes: Best routes for this router's network. - BestRoutes []*Route `json:"bestRoutes,omitempty"` +type ProjectsListXpnHostsRequest struct { + // Organization: Optional organization ID managed by Cloud Resource + // Manager, for which to list shared VPC host projects. If not + // specified, the organization will be inferred from the project. + Organization string `json:"organization,omitempty"` - // BestRoutesForRouter: Best routes learned by this router. - BestRoutesForRouter []*Route `json:"bestRoutesForRouter,omitempty"` + // ForceSendFields is a list of field names (e.g. "Organization") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - BgpPeerStatus []*RouterStatusBgpPeerStatus `json:"bgpPeerStatus,omitempty"` + // NullFields is a list of field names (e.g. "Organization") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // Network: URI of the network to which this router belongs. - Network string `json:"network,omitempty"` +func (s *ProjectsListXpnHostsRequest) MarshalJSON() ([]byte, error) { + type noMethod ProjectsListXpnHostsRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // ForceSendFields is a list of field names (e.g. "BestRoutes") to +type ProjectsSetDefaultNetworkTierRequest struct { + // NetworkTier: Default network tier to be set. + // + // Possible values: + // "PREMIUM" + // "SELECT" + // "STANDARD" + NetworkTier string `json:"networkTier,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NetworkTier") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15453,59 +19679,106 @@ type RouterStatus struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BestRoutes") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "NetworkTier") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RouterStatus) MarshalJSON() ([]byte, error) { - type noMethod RouterStatus +func (s *ProjectsSetDefaultNetworkTierRequest) MarshalJSON() ([]byte, error) { + type noMethod ProjectsSetDefaultNetworkTierRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterStatusBgpPeerStatus struct { - // AdvertisedRoutes: Routes that were advertised to the remote BGP peer - AdvertisedRoutes []*Route `json:"advertisedRoutes,omitempty"` - - // IpAddress: IP address of the local BGP interface. - IpAddress string `json:"ipAddress,omitempty"` - - // LinkedVpnTunnel: URL of the VPN tunnel that this BGP peer controls. - LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` +type ProjectsSetDefaultServiceAccountRequest struct { + // Email: Email address of the service account. + Email string `json:"email,omitempty"` - // Name: Name of this BGP peer. Unique within the Routers resource. - Name string `json:"name,omitempty"` + // ForceSendFields is a list of field names (e.g. "Email") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // NumLearnedRoutes: Number of routes learned from the remote BGP Peer. - NumLearnedRoutes int64 `json:"numLearnedRoutes,omitempty"` + // NullFields is a list of field names (e.g. "Email") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // PeerIpAddress: IP address of the remote BGP interface. - PeerIpAddress string `json:"peerIpAddress,omitempty"` +func (s *ProjectsSetDefaultServiceAccountRequest) MarshalJSON() ([]byte, error) { + type noMethod ProjectsSetDefaultServiceAccountRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // State: BGP state as specified in RFC1771. - State string `json:"state,omitempty"` +// Quota: A quotas entry. +type Quota struct { + // Limit: [Output Only] Quota limit for this metric. + Limit float64 `json:"limit,omitempty"` - // Status: Status of the BGP peer: {UP, DOWN} + // Metric: [Output Only] Name of the quota metric. // // Possible values: - // "DOWN" - // "UNKNOWN" - // "UP" - Status string `json:"status,omitempty"` - - // Uptime: Time this session has been up. Format: 14 years, 51 weeks, 6 - // days, 23 hours, 59 minutes, 59 seconds - Uptime string `json:"uptime,omitempty"` + // "AMD_S9300_GPUS" + // "AUTOSCALERS" + // "BACKEND_BUCKETS" + // "BACKEND_SERVICES" + // "COMMITMENTS" + // "CPUS" + // "CPUS_ALL_REGIONS" + // "DISKS_TOTAL_GB" + // "FIREWALLS" + // "FORWARDING_RULES" + // "HEALTH_CHECKS" + // "IMAGES" + // "INSTANCES" + // "INSTANCE_GROUPS" + // "INSTANCE_GROUP_MANAGERS" + // "INSTANCE_TEMPLATES" + // "INTERCONNECTS" + // "IN_USE_ADDRESSES" + // "LOCAL_SSD_TOTAL_GB" + // "NETWORKS" + // "NVIDIA_K80_GPUS" + // "NVIDIA_P100_GPUS" + // "PREEMPTIBLE_CPUS" + // "PREEMPTIBLE_LOCAL_SSD_GB" + // "REGIONAL_AUTOSCALERS" + // "REGIONAL_INSTANCE_GROUP_MANAGERS" + // "ROUTERS" + // "ROUTES" + // "SECURITY_POLICIES" + // "SECURITY_POLICY_RULES" + // "SNAPSHOTS" + // "SSD_TOTAL_GB" + // "SSL_CERTIFICATES" + // "STATIC_ADDRESSES" + // "SUBNETWORKS" + // "TARGET_HTTPS_PROXIES" + // "TARGET_HTTP_PROXIES" + // "TARGET_INSTANCES" + // "TARGET_POOLS" + // "TARGET_SSL_PROXIES" + // "TARGET_TCP_PROXIES" + // "TARGET_VPN_GATEWAYS" + // "URL_MAPS" + // "VPN_TUNNELS" + Metric string `json:"metric,omitempty"` - // UptimeSeconds: Time this session has been up, in seconds. Format: 145 - UptimeSeconds string `json:"uptimeSeconds,omitempty"` + // Usage: [Output Only] Current usage of this metric. + Usage float64 `json:"usage,omitempty"` - // ForceSendFields is a list of field names (e.g. "AdvertisedRoutes") to + // ForceSendFields is a list of field names (e.g. "Limit") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15513,31 +19786,53 @@ type RouterStatusBgpPeerStatus struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AdvertisedRoutes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Limit") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RouterStatusBgpPeerStatus) MarshalJSON() ([]byte, error) { - type noMethod RouterStatusBgpPeerStatus +func (s *Quota) MarshalJSON() ([]byte, error) { + type noMethod Quota raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterStatusResponse struct { - // Kind: Type of resource. +func (s *Quota) UnmarshalJSON(data []byte) error { + type noMethod Quota + var s1 struct { + Limit gensupport.JSONFloat64 `json:"limit"` + Usage gensupport.JSONFloat64 `json:"usage"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Limit = float64(s1.Limit) + s.Usage = float64(s1.Usage) + return nil +} + +// Reference: Represents a reference to a resource. +type Reference struct { + // Kind: [Output Only] Type of the resource. Always compute#reference + // for references. Kind string `json:"kind,omitempty"` - Result *RouterStatus `json:"result,omitempty"` + // ReferenceType: A description of the reference type with no implied + // semantics. Possible values include: + // - MEMBER_OF + ReferenceType string `json:"referenceType,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Referrer: URL of the resource which refers to the target. + Referrer string `json:"referrer,omitempty"` + + // Target: URL of the resource to which this reference points. + Target string `json:"target,omitempty"` // ForceSendFields is a list of field names (e.g. "Kind") to // unconditionally include in API requests. By default, fields with @@ -15556,52 +19851,112 @@ type RouterStatusResponse struct { NullFields []string `json:"-"` } -func (s *RouterStatusResponse) MarshalJSON() ([]byte, error) { - type noMethod RouterStatusResponse +func (s *Reference) MarshalJSON() ([]byte, error) { + type noMethod Reference raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RoutersPreviewResponse struct { - // Resource: Preview of given router. - Resource *Router `json:"resource,omitempty"` +// Region: Region resource. +type Region struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Deprecated: [Output Only] The deprecation status associated with this + // region. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: [Output Only] Textual description of the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#region for + // regions. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // Quotas: [Output Only] Quotas assigned to this region. + Quotas []*Quota `json:"quotas,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] Status of the region, either UP or DOWN. + // + // Possible values: + // "DOWN" + // "UP" + Status string `json:"status,omitempty"` + + // Zones: [Output Only] A list of zones available in this region, in the + // form of resource URLs. + Zones []string `json:"zones,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Resource") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Resource") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *RoutersPreviewResponse) MarshalJSON() ([]byte, error) { - type noMethod RoutersPreviewResponse +func (s *Region) MarshalJSON() ([]byte, error) { + type noMethod Region raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RoutersScopedList struct { - // Routers: List of routers contained in this scope. - Routers []*Router `json:"routers,omitempty"` +// RegionAutoscalerList: Contains a list of autoscalers. +type RegionAutoscalerList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Warning: Informational warning which replaces the list of routers - // when the list is empty. - Warning *RoutersScopedListWarning `json:"warning,omitempty"` + // Items: A list of Autoscaler resources. + Items []*Autoscaler `json:"items,omitempty"` - // ForceSendFields is a list of field names (e.g. "Routers") to + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *RegionAutoscalerListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15609,8 +19964,8 @@ type RoutersScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Routers") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -15618,15 +19973,15 @@ type RoutersScopedList struct { NullFields []string `json:"-"` } -func (s *RoutersScopedList) MarshalJSON() ([]byte, error) { - type noMethod RoutersScopedList +func (s *RegionAutoscalerList) MarshalJSON() ([]byte, error) { + type noMethod RegionAutoscalerList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RoutersScopedListWarning: Informational warning which replaces the -// list of routers when the list is empty. -type RoutersScopedListWarning struct { +// RegionAutoscalerListWarning: [Output Only] Informational warning +// message. +type RegionAutoscalerListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -15654,7 +20009,7 @@ type RoutersScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RoutersScopedListWarningData `json:"data,omitempty"` + Data []*RegionAutoscalerListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -15677,13 +20032,13 @@ type RoutersScopedListWarning struct { NullFields []string `json:"-"` } -func (s *RoutersScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod RoutersScopedListWarning +func (s *RegionAutoscalerListWarning) MarshalJSON() ([]byte, error) { + type noMethod RegionAutoscalerListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RoutersScopedListWarningData struct { +type RegionAutoscalerListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -15714,50 +20069,43 @@ type RoutersScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *RoutersScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RoutersScopedListWarningData +func (s *RegionAutoscalerListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RegionAutoscalerListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Rule: A rule to be applied in a Policy. -type Rule struct { - // Action: Required - // - // Possible values: - // "ALLOW" - // "ALLOW_WITH_LOG" - // "DENY" - // "DENY_WITH_LOG" - // "LOG" - // "NO_ACTION" - Action string `json:"action,omitempty"` +type RegionDiskTypeList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Conditions: Additional restrictions that must be met - Conditions []*Condition `json:"conditions,omitempty"` + // Items: A list of DiskType resources. + Items []*DiskType `json:"items,omitempty"` - // Description: Human-readable description of the rule. - Description string `json:"description,omitempty"` + // Kind: [Output Only] Type of resource. Always + // compute#regionDiskTypeList for region disk types. + Kind string `json:"kind,omitempty"` - // Ins: If one or more 'in' clauses are specified, the rule matches if - // the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries. - Ins []string `json:"ins,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` - // LogConfigs: The config returned to callers of - // tech.iam.IAM.CheckPolicy for any entries that match the LOG action. - LogConfigs []*LogConfig `json:"logConfigs,omitempty"` + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` - // NotIns: If one or more 'not_in' clauses are specified, the rule - // matches if the PRINCIPAL/AUTHORITY_SELECTOR is in none of the - // entries. - NotIns []string `json:"notIns,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionDiskTypeListWarning `json:"warning,omitempty"` - // Permissions: A permission is a string of form '..' (e.g., - // 'storage.buckets.list'). A value of '*' matches all permissions, and - // a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs. - Permissions []string `json:"permissions,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Action") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15765,7 +20113,7 @@ type Rule struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Action") to include in API + // NullFields is a list of field names (e.g. "Id") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -15774,42 +20122,49 @@ type Rule struct { NullFields []string `json:"-"` } -func (s *Rule) MarshalJSON() ([]byte, error) { - type noMethod Rule +func (s *RegionDiskTypeList) MarshalJSON() ([]byte, error) { + type noMethod RegionDiskTypeList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SSLHealthCheck struct { - // Port: The TCP port number for the health check request. The default - // value is 443. Valid values are 1 through 65535. - Port int64 `json:"port,omitempty"` - - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. - PortName string `json:"portName,omitempty"` - - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. +// RegionDiskTypeListWarning: [Output Only] Informational warning +// message. +type RegionDiskTypeListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. // // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // Request: The application data to send once the SSL connection has - // been established (default value is empty). If both request and - // response are empty, the connection establishment alone will indicate - // health. The request data can only be ASCII. - Request string `json:"request,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RegionDiskTypeListWarningData `json:"data,omitempty"` - // Response: The bytes to match against the beginning of the response - // data. If left empty (the default value), any response will indicate - // health. The response data can only be ASCII. - Response string `json:"response,omitempty"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "Port") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15817,7 +20172,7 @@ type SSLHealthCheck struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Port") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -15826,41 +20181,27 @@ type SSLHealthCheck struct { NullFields []string `json:"-"` } -func (s *SSLHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod SSLHealthCheck +func (s *RegionDiskTypeListWarning) MarshalJSON() ([]byte, error) { + type noMethod RegionDiskTypeListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Scheduling: Sets the scheduling options for an Instance. -type Scheduling struct { - // AutomaticRestart: Specifies whether the instance should be - // automatically restarted if it is terminated by Compute Engine (not - // terminated by a user). You can only set the automatic restart option - // for standard instances. Preemptible instances cannot be automatically - // restarted. - // - // By default, this is set to true so an instance is automatically - // restarted if it is terminated by Compute Engine. - AutomaticRestart *bool `json:"automaticRestart,omitempty"` - - // OnHostMaintenance: Defines the maintenance behavior for this - // instance. For standard instances, the default behavior is MIGRATE. - // For preemptible instances, the default and only possible behavior is - // TERMINATE. For more information, see Setting Instance Scheduling - // Options. - // - // Possible values: - // "MIGRATE" - // "TERMINATE" - OnHostMaintenance string `json:"onHostMaintenance,omitempty"` +type RegionDiskTypeListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // Preemptible: Defines whether the instance is preemptible. This can - // only be set during instance creation, it cannot be set or changed - // after the instance has been created. - Preemptible bool `json:"preemptible,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutomaticRestart") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15868,47 +20209,27 @@ type Scheduling struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutomaticRestart") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Scheduling) MarshalJSON() ([]byte, error) { - type noMethod Scheduling +func (s *RegionDiskTypeListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RegionDiskTypeListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SecurityPoliciesList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of SecurityPolicy resources. - Items []*SecurityPolicy `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#securityPoliciesList for listsof securityPolicies - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +type RegionDisksResizeRequest struct { + // SizeGb: The new size of the regional persistent disk, which is + // specified in GB. + SizeGb int64 `json:"sizeGb,omitempty,string"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "SizeGb") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15916,7 +20237,7 @@ type SecurityPoliciesList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "SizeGb") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -15925,92 +20246,102 @@ type SecurityPoliciesList struct { NullFields []string `json:"-"` } -func (s *SecurityPoliciesList) MarshalJSON() ([]byte, error) { - type noMethod SecurityPoliciesList +func (s *RegionDisksResizeRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionDisksResizeRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SecurityPolicy: A security policy is comprised of one or more rules. -// It can also be associated with one or more 'targets'. -type SecurityPolicy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Fingerprint: Specifies a fingerprint for this resource, which is - // essentially a hash of the metadata's contents and used for optimistic - // locking. The fingerprint is initially generated by Compute Engine and - // changes after every request to modify or update metadata. You must - // always provide an up-to-date fingerprint hash in order to update or - // change metadata. - // - // To see the latest fingerprint, make get() request to the security - // policy. - Fingerprint string `json:"fingerprint,omitempty"` +// RegionInstanceGroupList: Contains a list of InstanceGroup resources. +type RegionInstanceGroupList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` + // Items: A list of InstanceGroup resources. + Items []*InstanceGroup `json:"items,omitempty"` - // Kind: [Output only] Type of the resource. Always - // compute#securityPolicyfor security policies + // Kind: The resource type. Kind string `json:"kind,omitempty"` - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Rules: List of rules that belong to this policy. There must always be - // a default rule (rule with priority 2147483647 and match "*"). If no - // rules are provided when creating a security policy, a default rule - // with action "allow" will be added. - Rules []*SecurityPolicyRule `json:"rules,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. + // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionInstanceGroupListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SecurityPolicy) MarshalJSON() ([]byte, error) { - type noMethod SecurityPolicy +func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SecurityPolicyReference struct { - SecurityPolicy string `json:"securityPolicy,omitempty"` +// RegionInstanceGroupListWarning: [Output Only] Informational warning +// message. +type RegionInstanceGroupListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // ForceSendFields is a list of field names (e.g. "SecurityPolicy") to + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RegionInstanceGroupListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16018,52 +20349,36 @@ type SecurityPolicyReference struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SecurityPolicy") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SecurityPolicyReference) MarshalJSON() ([]byte, error) { - type noMethod SecurityPolicyReference +func (s *RegionInstanceGroupListWarning) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SecurityPolicyRule: Represents a rule that describes one or more -// match conditions along with the action to be taken when traffic -// matches this condition (allow or deny). -type SecurityPolicyRule struct { - // Action: The Action to preform when the client connection triggers the - // rule. Can currently be either "allow" or "deny()" where valid values - // for status are 403, 404, and 502. - Action string `json:"action,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Kind: [Output only] Type of the resource. Always compute#rulefor - // security policies - Kind string `json:"kind,omitempty"` - - // Match: A match condition that incoming traffic is evaluated against. - // If it evaluates to true, the corresponding ?action? is enforced. - Match *SecurityPolicyRuleMatcher `json:"match,omitempty"` - - // Preview: If set to true, the specified action is not enforced. - Preview bool `json:"preview,omitempty"` +type RegionInstanceGroupListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // Priority: An integer indicating the priority of a rule in the list. - // The priority must be a positive value between 0 and 2147483647. Rules - // are evaluated in the increasing order of priority. - Priority int64 `json:"priority,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Action") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16071,7 +20386,7 @@ type SecurityPolicyRule struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Action") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -16080,22 +20395,20 @@ type SecurityPolicyRule struct { NullFields []string `json:"-"` } -func (s *SecurityPolicyRule) MarshalJSON() ([]byte, error) { - type noMethod SecurityPolicyRule +func (s *RegionInstanceGroupListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SecurityPolicyRuleMatcher: Represents a match condition that incoming -// traffic is evaluated against. Exactly one field must be specified. -type SecurityPolicyRuleMatcher struct { - // SrcIpRanges: CIDR IP address range. Only IPv4 is supported. - SrcIpRanges []string `json:"srcIpRanges,omitempty"` - - // SrcRegionCodes: Match by country or region code. - SrcRegionCodes []string `json:"srcRegionCodes,omitempty"` +// RegionInstanceGroupManagerDeleteInstanceConfigReq: +// RegionInstanceGroupManagers.deletePerInstanceConfigs +type RegionInstanceGroupManagerDeleteInstanceConfigReq struct { + // Instances: The list of instances for which we want to delete + // per-instance configs on this managed instance group. + Instances []string `json:"instances,omitempty"` - // ForceSendFields is a list of field names (e.g. "SrcIpRanges") to + // ForceSendFields is a list of field names (e.g. "Instances") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16103,50 +20416,55 @@ type SecurityPolicyRuleMatcher struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SrcIpRanges") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SecurityPolicyRuleMatcher) MarshalJSON() ([]byte, error) { - type noMethod SecurityPolicyRuleMatcher +func (s *RegionInstanceGroupManagerDeleteInstanceConfigReq) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagerDeleteInstanceConfigReq raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SerialPortOutput: An instance's serial console output. -type SerialPortOutput struct { - // Contents: [Output Only] The contents of the console output. - Contents string `json:"contents,omitempty"` +// RegionInstanceGroupManagerList: Contains a list of managed instance +// groups. +type RegionInstanceGroupManagerList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Kind: [Output Only] Type of the resource. Always - // compute#serialPortOutput for serial port output. + // Items: A list of InstanceGroupManager resources. + Items []*InstanceGroupManager `json:"items,omitempty"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroupManagerList for a list of managed instance + // groups that exist in th regional scope. Kind string `json:"kind,omitempty"` - // Next: [Output Only] The position of the next byte of content from the - // serial console output. Use this value in the next request as the - // start parameter. - Next int64 `json:"next,omitempty,string"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` - // Start: The starting byte position of the output that was returned. - // This should match the start parameter sent with the request. If the - // serial console output exceeds the size of the buffer, older output - // will be overwritten by newer content and the start values will be - // mismatched. - Start int64 `json:"start,omitempty,string"` + // Warning: [Output Only] Informational warning message. + Warning *RegionInstanceGroupManagerListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Contents") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16154,8 +20472,8 @@ type SerialPortOutput struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Contents") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -16163,22 +20481,49 @@ type SerialPortOutput struct { NullFields []string `json:"-"` } -func (s *SerialPortOutput) MarshalJSON() ([]byte, error) { - type noMethod SerialPortOutput +func (s *RegionInstanceGroupManagerList) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagerList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ServiceAccount: A service account. -type ServiceAccount struct { - // Email: Email address of the service account. - Email string `json:"email,omitempty"` +// RegionInstanceGroupManagerListWarning: [Output Only] Informational +// warning message. +type RegionInstanceGroupManagerListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // Scopes: The list of scopes to be made available for this service - // account. - Scopes []string `json:"scopes,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RegionInstanceGroupManagerListWarningData `json:"data,omitempty"` - // ForceSendFields is a list of field names (e.g. "Email") to + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16186,7 +20531,7 @@ type ServiceAccount struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Email") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -16195,28 +20540,27 @@ type ServiceAccount struct { NullFields []string `json:"-"` } -func (s *ServiceAccount) MarshalJSON() ([]byte, error) { - type noMethod ServiceAccount +func (s *RegionInstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagerListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SignedUrlKey: Represents a customer-supplied Signing Key used by -// Cloud CDN Signed URLs -type SignedUrlKey struct { - // KeyName: Name of the key. The name must be 1-63 characters long, and - // comply with RFC1035. Specifically, the name must be 1-63 characters - // long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? - // which means the first character must be a lowercase letter, and all - // following characters must be a dash, lowercase letter, or digit, - // except the last character, which cannot be a dash. - KeyName string `json:"keyName,omitempty"` +type RegionInstanceGroupManagerListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // KeyValue: 128-bit key value used for signing the URL. The key value - // must be a valid RFC 4648 Section 5 base64url encoded string. - KeyValue string `json:"keyValue,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "KeyName") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16224,8 +20568,8 @@ type SignedUrlKey struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "KeyName") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -16233,132 +20577,20 @@ type SignedUrlKey struct { NullFields []string `json:"-"` } -func (s *SignedUrlKey) MarshalJSON() ([]byte, error) { - type noMethod SignedUrlKey +func (s *RegionInstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagerListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Snapshot: A persistent disk snapshot resource. -type Snapshot struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // DiskSizeGb: [Output Only] Size of the snapshot, specified in GB. - DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#snapshot for - // Snapshot resources. - Kind string `json:"kind,omitempty"` - - // LabelFingerprint: A fingerprint for the labels being applied to this - // snapshot, which is essentially a hash of the labels set used for - // optimistic locking. The fingerprint is initially generated by Compute - // Engine and changes after every request to modify or update labels. - // You must always provide an up-to-date fingerprint hash in order to - // update or change labels. - // - // To see the latest fingerprint, make a get() request to retrieve a - // snapshot. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: Labels to apply to this snapshot. These can be later modified - // by the setLabels method. Label values may be empty. - Labels map[string]string `json:"labels,omitempty"` - - // LicenseCodes: Integer license codes indicating which licenses are - // attached to this snapshot. - LicenseCodes googleapi.Int64s `json:"licenseCodes,omitempty"` - - // Licenses: [Output Only] A list of public visible licenses that apply - // to this snapshot. This can be because the original image had licenses - // attached (such as a Windows image). - Licenses []string `json:"licenses,omitempty"` - - // Name: Name of the resource; provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // SnapshotEncryptionKey: Encrypts the snapshot using a - // customer-supplied encryption key. - // - // After you encrypt a snapshot using a customer-supplied key, you must - // provide the same key if you use the image later For example, you must - // provide the encryption key when you create a disk from the encrypted - // snapshot in a future request. - // - // Customer-supplied encryption keys do not protect access to metadata - // of the disk. - // - // If you do not provide an encryption key when creating the snapshot, - // then the snapshot will be encrypted using an automatically generated - // key and you do not need to provide a key to use the snapshot later. - SnapshotEncryptionKey *CustomerEncryptionKey `json:"snapshotEncryptionKey,omitempty"` - - // SourceDisk: [Output Only] The source disk used to create this - // snapshot. - SourceDisk string `json:"sourceDisk,omitempty"` - - // SourceDiskEncryptionKey: The customer-supplied encryption key of the - // source disk. Required if the source disk is protected by a - // customer-supplied encryption key. - SourceDiskEncryptionKey *CustomerEncryptionKey `json:"sourceDiskEncryptionKey,omitempty"` - - // SourceDiskId: [Output Only] The ID value of the disk used to create - // this snapshot. This value may be used to determine whether the - // snapshot was taken from the current or a previous instance of a given - // disk name. - SourceDiskId string `json:"sourceDiskId,omitempty"` - - // Status: [Output Only] The status of the snapshot. This can be - // CREATING, DELETING, FAILED, READY, or UPLOADING. - // - // Possible values: - // "CREATING" - // "DELETING" - // "FAILED" - // "READY" - // "UPLOADING" - Status string `json:"status,omitempty"` - - // StorageBytes: [Output Only] A size of the the storage used by the - // snapshot. As snapshots share storage, this number is expected to - // change with snapshot creation/deletion. - StorageBytes int64 `json:"storageBytes,omitempty,string"` - - // StorageBytesStatus: [Output Only] An indicator whether storageBytes - // is in a stable state or it is being adjusted as a result of shared - // storage reallocation. This status can either be UPDATING, meaning the - // size of the snapshot is being updated, or UP_TO_DATE, meaning the - // size of the snapshot is up-to-date. - // - // Possible values: - // "UPDATING" - // "UP_TO_DATE" - StorageBytesStatus string `json:"storageBytesStatus,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +// RegionInstanceGroupManagerUpdateInstanceConfigReq: +// RegionInstanceGroupManagers.updatePerInstanceConfigs +type RegionInstanceGroupManagerUpdateInstanceConfigReq struct { + // PerInstanceConfigs: The list of per-instance configs to insert or + // patch on this managed instance group. + PerInstanceConfigs []*PerInstanceConfig `json:"perInstanceConfigs,omitempty"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // ForceSendFields is a list of field names (e.g. "PerInstanceConfigs") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16366,7 +20598,7 @@ type Snapshot struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to + // NullFields is a list of field names (e.g. "PerInstanceConfigs") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -16376,40 +20608,19 @@ type Snapshot struct { NullFields []string `json:"-"` } -func (s *Snapshot) MarshalJSON() ([]byte, error) { - type noMethod Snapshot +func (s *RegionInstanceGroupManagerUpdateInstanceConfigReq) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagerUpdateInstanceConfigReq raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SnapshotList: Contains a list of Snapshot resources. -type SnapshotList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Snapshot resources. - Items []*Snapshot `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +type RegionInstanceGroupManagersAbandonInstancesRequest struct { + // Instances: The URLs of one or more instances to abandon. This can be + // a full URL or a partial URL, such as + // zones/[ZONE]/instances/[INSTANCE_NAME]. + Instances []string `json:"instances,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Instances") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16417,8 +20628,8 @@ type SnapshotList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -16426,58 +20637,69 @@ type SnapshotList struct { NullFields []string `json:"-"` } -func (s *SnapshotList) MarshalJSON() ([]byte, error) { - type noMethod SnapshotList +func (s *RegionInstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersAbandonInstancesRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificate: An SslCertificate resource. This resource provides a -// mechanism to upload an SSL key and certificate to the load balancer -// to serve secure connections from the user. -type SslCertificate struct { - // Certificate: A local certificate file. The certificate must be in PEM - // format. The certificate chain must be no greater than 5 certs long. - // The chain must include at least one intermediate cert. - Certificate string `json:"certificate,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` +// RegionInstanceGroupManagersApplyUpdatesRequest: +// InstanceGroupManagers.applyUpdatesToInstances +type RegionInstanceGroupManagersApplyUpdatesRequest struct { + // Instances: The list of instances for which we want to apply changes + // on this managed instance group. + Instances []string `json:"instances,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` + // MaximalAction: The maximal action that should be perfomed on the + // instances. By default REPLACE. + // + // Possible values: + // "NONE" + // "REFRESH" + // "REPLACE" + // "RESTART" + MaximalAction string `json:"maximalAction,omitempty"` - // Kind: [Output Only] Type of the resource. Always - // compute#sslCertificate for SSL certificates. - Kind string `json:"kind,omitempty"` + // MinimalAction: The minimal action that should be perfomed on the + // instances. By default NONE. + // + // Possible values: + // "NONE" + // "REFRESH" + // "REPLACE" + // "RESTART" + MinimalAction string `json:"minimalAction,omitempty"` - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // PrivateKey: A write-only private key in PEM format. Only insert - // requests will include this field. - PrivateKey string `json:"privateKey,omitempty"` + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // SelfLink: [Output only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` +func (s *RegionInstanceGroupManagersApplyUpdatesRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersApplyUpdatesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +type RegionInstanceGroupManagersDeleteInstancesRequest struct { + // Instances: The URLs of one or more instances to delete. This can be a + // full URL or a partial URL, such as + // zones/[ZONE]/instances/[INSTANCE_NAME]. + Instances []string `json:"instances,omitempty"` - // ForceSendFields is a list of field names (e.g. "Certificate") to + // ForceSendFields is a list of field names (e.g. "Instances") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16485,32 +20707,24 @@ type SslCertificate struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Certificate") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SslCertificate) MarshalJSON() ([]byte, error) { - type noMethod SslCertificate +func (s *RegionInstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersDeleteInstancesRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificateList: Contains a list of SslCertificate resources. -type SslCertificateList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of SslCertificate resources. - Items []*SslCertificate `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` +type RegionInstanceGroupManagersListInstanceConfigsResp struct { + // Items: [Output Only] The list of PerInstanceConfig. + Items []*PerInstanceConfig `json:"items,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next // page of results for list requests. If the number of results is larger @@ -16520,14 +20734,14 @@ type SslCertificateList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionInstanceGroupManagersListInstanceConfigsRespWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Items") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16535,7 +20749,7 @@ type SslCertificateList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Items") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -16544,157 +20758,86 @@ type SslCertificateList struct { NullFields []string `json:"-"` } -func (s *SslCertificateList) MarshalJSON() ([]byte, error) { - type noMethod SslCertificateList +func (s *RegionInstanceGroupManagersListInstanceConfigsResp) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersListInstanceConfigsResp raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Subnetwork: A Subnetwork resource. -type Subnetwork struct { - // AllowSubnetCidrRoutesOverlap: Whether this subnetwork can conflict - // with static routes. Setting this to true allows this subnetwork's - // primary and secondary ranges to conflict with routes that have - // already been configured on the corresponding network. Static routes - // will take precedence over the subnetwork route if the route prefix - // length is at least as large as the subnetwork prefix length. - // - // Also, packets destined to IPs within subnetwork may contain - // private/sensitive data and are prevented from leaving the virtual - // network. Setting this field to true will disable this feature. - // - // The default value is false and applies to all existing subnetworks - // and automatically created subnetworks. +// RegionInstanceGroupManagersListInstanceConfigsRespWarning: [Output +// Only] Informational warning message. +type RegionInstanceGroupManagersListInstanceConfigsRespWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. // - // This field cannot be set to true at resource creation time. - AllowSubnetCidrRoutesOverlap bool `json:"allowSubnetCidrRoutesOverlap,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. This field can be set only at - // resource creation time. - Description string `json:"description,omitempty"` - - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a Subnetwork. An up-to-date - // fingerprint must be provided in order to update the Subnetwork. - Fingerprint string `json:"fingerprint,omitempty"` - - // GatewayAddress: [Output Only] The gateway address for default routes - // to reach destination addresses outside this subnetwork. This field - // can be set only at resource creation time. - GatewayAddress string `json:"gatewayAddress,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // IpCidrRange: The range of internal addresses that are owned by this - // subnetwork. Provide this property when you create the subnetwork. For - // example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and - // non-overlapping within a network. Only IPv4 is supported. This field - // can be set only at resource creation time. - IpCidrRange string `json:"ipCidrRange,omitempty"` - - // Kind: [Output Only] Type of the resource. Always compute#subnetwork - // for Subnetwork resources. - Kind string `json:"kind,omitempty"` - - // Name: The name of the resource, provided by the client when initially - // creating the resource. The name must be 1-63 characters long, and - // comply with RFC1035. Specifically, the name must be 1-63 characters - // long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? - // which means the first character must be a lowercase letter, and all - // following characters must be a dash, lowercase letter, or digit, - // except the last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Network: The URL of the network to which this subnetwork belongs, - // provided by the client when initially creating the subnetwork. Only - // networks that are in the distributed mode can have subnetworks. This - // field can be set only at resource creation time. - Network string `json:"network,omitempty"` - - // PrivateIpGoogleAccess: Whether the VMs in this subnet can access - // Google services without assigned external IP addresses. This field - // can be both set at resource creation time and updated using - // setPrivateIpGoogleAccess. - PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` - - // Region: URL of the region where the Subnetwork resides. This field - // can be set only at resource creation time. - Region string `json:"region,omitempty"` - - // SecondaryIpRanges: An array of configurations for secondary IP ranges - // for VM instances contained in this subnetwork. The primary IP of such - // VM must belong to the primary ipCidrRange of the subnetwork. The - // alias IPs may belong to either primary or secondary ranges. - SecondaryIpRanges []*SubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RegionInstanceGroupManagersListInstanceConfigsRespWarningData `json:"data,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "AllowSubnetCidrRoutesOverlap") to unconditionally include in API - // requests. By default, fields with empty values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. - // "AllowSubnetCidrRoutesOverlap") to include in API requests with the - // JSON null value. By default, fields with empty values are omitted - // from API requests. However, any field with an empty value appearing - // in NullFields will be sent to the server as null. It is an error if a - // field in this list has a non-empty value. This may be used to include - // null fields in Patch requests. + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Subnetwork) MarshalJSON() ([]byte, error) { - type noMethod Subnetwork +func (s *RegionInstanceGroupManagersListInstanceConfigsRespWarning) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersListInstanceConfigsRespWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworkAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of SubnetworksScopedList resources. - Items map[string]SubnetworksScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#subnetworkAggregatedList for aggregated lists of subnetworks. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` +type RegionInstanceGroupManagersListInstanceConfigsRespWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16702,7 +20845,7 @@ type SubnetworkAggregatedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -16711,24 +20854,15 @@ type SubnetworkAggregatedList struct { NullFields []string `json:"-"` } -func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkAggregatedList +func (s *RegionInstanceGroupManagersListInstanceConfigsRespWarningData) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersListInstanceConfigsRespWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworkList: Contains a list of Subnetwork resources. -type SubnetworkList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Subnetwork resources. - Items []*Subnetwork `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#subnetworkList - // for lists of subnetworks. - Kind string `json:"kind,omitempty"` +type RegionInstanceGroupManagersListInstancesResponse struct { + // ManagedInstances: List of managed instances. + ManagedInstances []*ManagedInstance `json:"managedInstances,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next // page of results for list requests. If the number of results is larger @@ -16738,14 +20872,11 @@ type SubnetworkList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "ManagedInstances") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16753,8 +20884,38 @@ type SubnetworkList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "ManagedInstances") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupManagersListInstancesResponse) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersListInstancesResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupManagersRecreateRequest struct { + // Instances: The URLs of one or more instances to recreate. This can be + // a full URL or a partial URL, such as + // zones/[ZONE]/instances/[INSTANCE_NAME]. + Instances []string `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -16762,29 +20923,51 @@ type SubnetworkList struct { NullFields []string `json:"-"` } -func (s *SubnetworkList) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkList +func (s *RegionInstanceGroupManagersRecreateRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersRecreateRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworkSecondaryRange: Represents a secondary IP range of a -// subnetwork. -type SubnetworkSecondaryRange struct { - // IpCidrRange: The range of IP addresses belonging to this subnetwork - // secondary range. Provide this property when you create the - // subnetwork. Ranges must be unique and non-overlapping with all - // primary and secondary IP ranges within a network. Only IPv4 is - // supported. - IpCidrRange string `json:"ipCidrRange,omitempty"` +type RegionInstanceGroupManagersSetAutoHealingRequest struct { + AutoHealingPolicies []*InstanceGroupManagerAutoHealingPolicy `json:"autoHealingPolicies,omitempty"` - // RangeName: The name associated with this subnetwork secondary range, - // used when adding an alias IP range to a VM instance. The name must be - // 1-63 characters long, and comply with RFC1035. The name must be - // unique within the subnetwork. - RangeName string `json:"rangeName,omitempty"` + // ForceSendFields is a list of field names (e.g. "AutoHealingPolicies") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // NullFields is a list of field names (e.g. "AutoHealingPolicies") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupManagersSetAutoHealingRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersSetAutoHealingRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupManagersSetTargetPoolsRequest struct { + // Fingerprint: Fingerprint of the target pools information, which is a + // hash of the contents. This field is used for optimistic locking when + // you update the target pool entries. This field is optional. + Fingerprint string `json:"fingerprint,omitempty"` + + // TargetPools: The URL of all TargetPool resources to which instances + // in the instanceGroup field are added. The target pools automatically + // apply to all of the instances in the managed instance group. + TargetPools []string `json:"targetPools,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fingerprint") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16792,7 +20975,7 @@ type SubnetworkSecondaryRange struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IpCidrRange") to include + // NullFields is a list of field names (e.g. "Fingerprint") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -16801,21 +20984,18 @@ type SubnetworkSecondaryRange struct { NullFields []string `json:"-"` } -func (s *SubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkSecondaryRange +func (s *RegionInstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersSetTargetPoolsRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworksExpandIpCidrRangeRequest struct { - // IpCidrRange: The IP (in CIDR format or netmask) of internal addresses - // that are legal on this Subnetwork. This range should be disjoint from - // other subnetworks within this network. This range can only be larger - // than (i.e. a superset of) the range previously defined before the - // update. - IpCidrRange string `json:"ipCidrRange,omitempty"` +type RegionInstanceGroupManagersSetTemplateRequest struct { + // InstanceTemplate: URL of the InstanceTemplate resource from which all + // new instances will be created. + InstanceTemplate string `json:"instanceTemplate,omitempty"` - // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16823,30 +21003,52 @@ type SubnetworksExpandIpCidrRangeRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IpCidrRange") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "InstanceTemplate") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *SubnetworksExpandIpCidrRangeRequest) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksExpandIpCidrRangeRequest +func (s *RegionInstanceGroupManagersSetTemplateRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersSetTemplateRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworksScopedList struct { - // Subnetworks: List of subnetworks contained in this scope. - Subnetworks []*Subnetwork `json:"subnetworks,omitempty"` +type RegionInstanceGroupsListInstances struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Warning: An informational warning that appears when the list of - // addresses is empty. - Warning *SubnetworksScopedListWarning `json:"warning,omitempty"` + // Items: A list of InstanceWithNamedPorts resources. + Items []*InstanceWithNamedPorts `json:"items,omitempty"` - // ForceSendFields is a list of field names (e.g. "Subnetworks") to + // Kind: The resource type. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *RegionInstanceGroupsListInstancesWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16854,24 +21056,24 @@ type SubnetworksScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Subnetworks") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SubnetworksScopedList) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksScopedList +func (s *RegionInstanceGroupsListInstances) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupsListInstances raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworksScopedListWarning: An informational warning that appears -// when the list of addresses is empty. -type SubnetworksScopedListWarning struct { +// RegionInstanceGroupsListInstancesWarning: [Output Only] Informational +// warning message. +type RegionInstanceGroupsListInstancesWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -16899,7 +21101,7 @@ type SubnetworksScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*SubnetworksScopedListWarningData `json:"data,omitempty"` + Data []*RegionInstanceGroupsListInstancesWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -16922,13 +21124,13 @@ type SubnetworksScopedListWarning struct { NullFields []string `json:"-"` } -func (s *SubnetworksScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksScopedListWarning +func (s *RegionInstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupsListInstancesWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworksScopedListWarningData struct { +type RegionInstanceGroupsListInstancesWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -16959,70 +21161,28 @@ type SubnetworksScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *SubnetworksScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SubnetworksSetPrivateIpGoogleAccessRequest struct { - PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "PrivateIpGoogleAccess") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "PrivateIpGoogleAccess") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *SubnetworksSetPrivateIpGoogleAccessRequest) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksSetPrivateIpGoogleAccessRequest +func (s *RegionInstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupsListInstancesWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TCPHealthCheck struct { - // Port: The TCP port number for the health check request. The default - // value is 80. Valid values are 1 through 65535. - Port int64 `json:"port,omitempty"` - - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. - PortName string `json:"portName,omitempty"` - - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. +type RegionInstanceGroupsListInstancesRequest struct { + // InstanceState: Instances in which state should be returned. Valid + // options are: 'ALL', 'RUNNING'. By default, it lists all instances. // // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // Request: The application data to send once the TCP connection has - // been established (default value is empty). If both request and - // response are empty, the connection establishment alone will indicate - // health. The request data can only be ASCII. - Request string `json:"request,omitempty"` + // "ALL" + // "RUNNING" + InstanceState string `json:"instanceState,omitempty"` - // Response: The bytes to match against the beginning of the response - // data. If left empty (the default value), any response will indicate - // health. The response data can only be ASCII. - Response string `json:"response,omitempty"` + // PortName: Name of port user is interested in. It is optional. If it + // is set, only information about this ports will be returned. If it is + // not set, all the named ports will be returned. Always lists all + // instances. + PortName string `json:"portName,omitempty"` - // ForceSendFields is a list of field names (e.g. "Port") to + // ForceSendFields is a list of field names (e.g. "InstanceState") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -17030,36 +21190,32 @@ type TCPHealthCheck struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Port") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "InstanceState") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TCPHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod TCPHealthCheck +func (s *RegionInstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupsListInstancesRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Tags: A set of instance tags. -type Tags struct { - // Fingerprint: Specifies a fingerprint for this request, which is - // essentially a hash of the metadata's contents and used for optimistic - // locking. The fingerprint is initially generated by Compute Engine and - // changes after every request to modify or update metadata. You must - // always provide an up-to-date fingerprint hash in order to update or - // change metadata. - // - // To see the latest fingerprint, make get() request to the instance. +type RegionInstanceGroupsSetNamedPortsRequest struct { + // Fingerprint: The fingerprint of the named ports information for this + // instance group. Use this optional property to prevent conflicts when + // multiple users change the named ports settings concurrently. Obtain + // the fingerprint with the instanceGroups.get method. Then, include the + // fingerprint in your request to ensure that you do not overwrite + // changes that were applied from another concurrent request. Fingerprint string `json:"fingerprint,omitempty"` - // Items: An array of tags. Each tag must be 1-63 characters long, and - // comply with RFC1035. - Items []string `json:"items,omitempty"` + // NamedPorts: The list of named ports to set for this instance group. + NamedPorts []*NamedPort `json:"namedPorts,omitempty"` // ForceSendFields is a list of field names (e.g. "Fingerprint") to // unconditionally include in API requests. By default, fields with @@ -17078,104 +21234,102 @@ type Tags struct { NullFields []string `json:"-"` } -func (s *Tags) MarshalJSON() ([]byte, error) { - type noMethod Tags +func (s *RegionInstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupsSetNamedPortsRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpProxy: A TargetHttpProxy resource. This resource defines an -// HTTP proxy. -type TargetHttpProxy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` +// RegionList: Contains a list of region resources. +type RegionList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` + // Items: A list of Region resources. + Items []*Region `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#targetHttpProxy - // for target HTTP proxies. + // Kind: [Output Only] Type of resource. Always compute#regionList for + // lists of regions. Kind string `json:"kind,omitempty"` - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. + // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` - // UrlMap: URL to the UrlMap resource that defines the mapping from URL - // to the BackendService. - UrlMap string `json:"urlMap,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetHttpProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpProxy +func (s *RegionList) MarshalJSON() ([]byte, error) { + type noMethod RegionList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpProxyList: A list of TargetHttpProxy resources. -type TargetHttpProxyList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetHttpProxy resources. - Items []*TargetHttpProxy `json:"items,omitempty"` - - // Kind: Type of resource. Always compute#targetHttpProxyList for lists - // of target HTTP proxies. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` +// RegionListWarning: [Output Only] Informational warning message. +type RegionListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RegionListWarningData `json:"data,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -17183,7 +21337,7 @@ type TargetHttpProxyList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -17192,22 +21346,27 @@ type TargetHttpProxyList struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpProxyList +func (s *RegionListWarning) MarshalJSON() ([]byte, error) { + type noMethod RegionListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetHttpsProxiesSetQuicOverrideRequest struct { - // QuicOverride: QUIC policy for the TargetHttpsProxy resource. - // - // Possible values: - // "DISABLE" - // "ENABLE" - // "NONE" - QuicOverride string `json:"quicOverride,omitempty"` +type RegionListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ForceSendFields is a list of field names (e.g. "QuicOverride") to + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -17215,28 +21374,34 @@ type TargetHttpsProxiesSetQuicOverrideRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "QuicOverride") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetHttpsProxiesSetQuicOverrideRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxiesSetQuicOverrideRequest +func (s *RegionListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RegionListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetHttpsProxiesSetSslCertificatesRequest struct { - // SslCertificates: New set of SslCertificate resources to associate - // with this TargetHttpsProxy resource. Currently exactly one - // SslCertificate resource must be specified. - SslCertificates []string `json:"sslCertificates,omitempty"` +type RegionSetLabelsRequest struct { + // LabelFingerprint: The fingerprint of the previous set of labels for + // this resource, used to detect conflicts. The fingerprint is initially + // generated by Compute Engine and changes after every request to modify + // or update labels. You must always provide an up-to-date fingerprint + // hash in order to update or change labels. Make a get() request to the + // resource to get the latest fingerprint. + LabelFingerprint string `json:"labelFingerprint,omitempty"` - // ForceSendFields is a list of field names (e.g. "SslCertificates") to + // Labels: The labels to set for this resource. + Labels map[string]string `json:"labels,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -17244,7 +21409,7 @@ type TargetHttpsProxiesSetSslCertificatesRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SslCertificates") to + // NullFields is a list of field names (e.g. "LabelFingerprint") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -17254,133 +21419,60 @@ type TargetHttpsProxiesSetSslCertificatesRequest struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxiesSetSslCertificatesRequest +func (s *RegionSetLabelsRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionSetLabelsRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpsProxy: A TargetHttpsProxy resource. This resource defines -// an HTTPS proxy. -type TargetHttpsProxy struct { - // ClientSslPolicy: URL to ClientSslPolicy resource which controls the - // set of allowed SSL versions and ciphers. - ClientSslPolicy string `json:"clientSslPolicy,omitempty"` +// ResourceCommitment: Commitment for a particular resource (a +// Commitment is composed of one or more of these). +type ResourceCommitment struct { + // Amount: The amount of the resource purchased (in a type-dependent + // unit, such as bytes). For vCPUs, this can just be an integer. For + // memory, this must be provided in MB. Memory must be a multiple of 256 + // MB, with up to 6.5GB of memory per every vCPU. + Amount int64 `json:"amount,omitempty,string"` - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` + // Type: Type of resource for which this commitment applies. Possible + // values are VCPU and MEMORY + // + // Possible values: + // "LOCAL_SSD" + // "MEMORY" + // "UNSPECIFIED" + // "VCPU" + Type string `json:"type,omitempty"` - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` + // ForceSendFields is a list of field names (e.g. "Amount") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` + // NullFields is a list of field names (e.g. "Amount") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // Kind: [Output Only] Type of resource. Always compute#targetHttpsProxy - // for target HTTPS proxies. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // QuicOverride: Specifies the QUIC override policy for this - // TargetHttpsProxy resource. This determines whether the load balancer - // will attempt to negotiate QUIC with clients or not. Can specify one - // of NONE, ENABLE, or DISABLE. Specify ENABLE to always enable QUIC, - // Enables QUIC when set to ENABLE, and disables QUIC when set to - // DISABLE. If NONE is specified, uses the QUIC policy with no user - // overrides, which is equivalent to DISABLE. Not specifying this field - // is equivalent to specifying NONE. - // - // Possible values: - // "DISABLE" - // "ENABLE" - // "NONE" - QuicOverride string `json:"quicOverride,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // SslCertificates: URLs to SslCertificate resources that are used to - // authenticate connections between users and the load balancer. - // Currently, exactly one SSL certificate must be specified. - SslCertificates []string `json:"sslCertificates,omitempty"` - - // UrlMap: A fully-qualified or valid partial URL to the UrlMap resource - // that defines the mapping from URL to the BackendService. For example, - // the following are all valid URLs for specifying a URL map: - // - - // https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map - // - projects/project/global/urlMaps/url-map - // - global/urlMaps/url-map - UrlMap string `json:"urlMap,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "ClientSslPolicy") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ClientSslPolicy") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetHttpsProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxy +func (s *ResourceCommitment) MarshalJSON() ([]byte, error) { + type noMethod ResourceCommitment raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpsProxyList: Contains a list of TargetHttpsProxy resources. -type TargetHttpsProxyList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetHttpsProxy resources. - Items []*TargetHttpsProxy `json:"items,omitempty"` - - // Kind: Type of resource. Always compute#targetHttpsProxyList for lists - // of target HTTPS proxies. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +type ResourceGroupReference struct { + // Group: A URI referencing one of the instance groups listed in the + // backend service. + Group string `json:"group,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Group") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -17388,7 +21480,7 @@ type TargetHttpsProxyList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Group") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -17397,15 +21489,31 @@ type TargetHttpsProxyList struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxyList +func (s *ResourceGroupReference) MarshalJSON() ([]byte, error) { + type noMethod ResourceGroupReference raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstance: A TargetInstance resource. This resource defines an -// endpoint instance that terminates traffic of certain protocols. -type TargetInstance struct { +// Route: Represents a Route resource. A route specifies how certain +// packets should be handled by the network. Routes are associated with +// instances by tags and the set of routes for a particular instance is +// called its routing table. +// +// For each packet leaving an instance, the system searches that +// instance's routing table for a single best matching route. Routes +// match packets by destination IP address, preferring smaller or more +// specific ranges over larger ones. If there is a tie, the system +// selects the route with the smallest priority value. If there is still +// a tie, it uses the layer three and four packet headers to select just +// one of the remaining matching routes. The packet is then forwarded as +// specified by the nextHop field of the winning route - either to +// another instance destination, an instance gateway, or a Google +// Compute Engine-operated gateway. +// +// Packets that do not match any route in the sending instance's routing +// table are dropped. +type Route struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -17414,22 +21522,16 @@ type TargetInstance struct { // property when you create the resource. Description string `json:"description,omitempty"` + // DestRange: The destination range of outgoing packets that this route + // applies to. Only IPv4 is supported. + DestRange string `json:"destRange,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Instance: A URL to the virtual machine instance that handles traffic - // for this target instance. When creating a target instance, you can - // provide the fully-qualified URL or a valid partial URL to the desired - // virtual machine. For example, the following are all valid URLs: - // - - // https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance - // - projects/project/zones/zone/instances/instance - // - zones/zone/instances/instance - Instance string `json:"instance,omitempty"` - - // Kind: [Output Only] The type of the resource. Always - // compute#targetInstance for target instances. + // Kind: [Output Only] Type of this resource. Always compute#routes for + // Route resources. Kind string `json:"kind,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -17441,19 +21543,56 @@ type TargetInstance struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // NatPolicy: NAT option controlling how IPs are NAT'ed to the instance. - // Currently only NO_NAT (default value) is supported. - // - // Possible values: - // "NO_NAT" - NatPolicy string `json:"natPolicy,omitempty"` + // Network: Fully-qualified URL of the network that this route applies + // to. + Network string `json:"network,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. + // NextHopGateway: The URL to a gateway that should handle matching + // packets. You can only specify the internet gateway using a full or + // partial valid URL: + // projects//global/gateways/default-internet-gateway + NextHopGateway string `json:"nextHopGateway,omitempty"` + + // NextHopInstance: The URL to an instance that should handle matching + // packets. You can specify this as a full or partial URL. For + // example: + // https://www.googleapis.com/compute/v1/projects/project/zones/ + // zone/instances/ + NextHopInstance string `json:"nextHopInstance,omitempty"` + + // NextHopIp: The network IP address of an instance that should handle + // matching packets. Only IPv4 is supported. + NextHopIp string `json:"nextHopIp,omitempty"` + + // NextHopNetwork: The URL of the local network if it should handle + // matching packets. + NextHopNetwork string `json:"nextHopNetwork,omitempty"` + + // NextHopPeering: [Output Only] The network peering name that should + // handle matching packets, which should conform to RFC1035. + NextHopPeering string `json:"nextHopPeering,omitempty"` + + // NextHopVpnTunnel: The URL to a VpnTunnel that should handle matching + // packets. + NextHopVpnTunnel string `json:"nextHopVpnTunnel,omitempty"` + + // Priority: The priority of this route. Priority is used to break ties + // in cases where there is more than one matching route of equal prefix + // length. In the case of two routes with equal prefix length, the one + // with the lowest-numbered priority value wins. Default value is 1000. + // Valid range is 0 through 65535. + Priority int64 `json:"priority,omitempty"` + + // SelfLink: [Output Only] Server-defined fully-qualified URL for this + // resource. SelfLink string `json:"selfLink,omitempty"` - // Zone: [Output Only] URL of the zone where the target instance - // resides. - Zone string `json:"zone,omitempty"` + // Tags: A list of instance tags to which this route applies. + Tags []string `json:"tags,omitempty"` + + // Warnings: [Output Only] If potential misconfigurations are detected + // for this route, this field will be populated with warning messages. + Warnings []*RouteWarnings `json:"warnings,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -17477,39 +21616,84 @@ type TargetInstance struct { NullFields []string `json:"-"` } -func (s *TargetInstance) MarshalJSON() ([]byte, error) { - type noMethod TargetInstance +func (s *Route) MarshalJSON() ([]byte, error) { + type noMethod Route raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetInstanceAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` +type RouteWarnings struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // Items: A list of TargetInstance resources. - Items map[string]TargetInstancesScopedList `json:"items,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RouteWarningsData `json:"data,omitempty"` - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +func (s *RouteWarnings) MarshalJSON() ([]byte, error) { + type noMethod RouteWarnings + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // ForceSendFields is a list of field names (e.g. "Id") to +type RouteWarningsData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -17517,7 +21701,7 @@ type TargetInstanceAggregatedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -17526,20 +21710,20 @@ type TargetInstanceAggregatedList struct { NullFields []string `json:"-"` } -func (s *TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceAggregatedList +func (s *RouteWarningsData) MarshalJSON() ([]byte, error) { + type noMethod RouteWarningsData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstanceList: Contains a list of TargetInstance resources. -type TargetInstanceList struct { +// RouteList: Contains a list of Route resources. +type RouteList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetInstance resources. - Items []*TargetInstance `json:"items,omitempty"` + // Items: A list of Route resources. + Items []*Route `json:"items,omitempty"` // Kind: Type of resource. Kind string `json:"kind,omitempty"` @@ -17555,6 +21739,9 @@ type TargetInstanceList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RouteListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -17576,47 +21763,14 @@ type TargetInstanceList struct { NullFields []string `json:"-"` } -func (s *TargetInstanceList) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetInstancesScopedList struct { - // TargetInstances: List of target instances contained in this scope. - TargetInstances []*TargetInstance `json:"targetInstances,omitempty"` - - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *TargetInstancesScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "TargetInstances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "TargetInstances") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetInstancesScopedList) MarshalJSON() ([]byte, error) { - type noMethod TargetInstancesScopedList +func (s *RouteList) MarshalJSON() ([]byte, error) { + type noMethod RouteList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstancesScopedListWarning: Informational warning which -// replaces the list of addresses when the list is empty. -type TargetInstancesScopedListWarning struct { +// RouteListWarning: [Output Only] Informational warning message. +type RouteListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -17644,7 +21798,7 @@ type TargetInstancesScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetInstancesScopedListWarningData `json:"data,omitempty"` + Data []*RouteListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -17667,13 +21821,13 @@ type TargetInstancesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetInstancesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetInstancesScopedListWarning +func (s *RouteListWarning) MarshalJSON() ([]byte, error) { + type noMethod RouteListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetInstancesScopedListWarningData struct { +type RouteListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -17704,32 +21858,21 @@ type TargetInstancesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetInstancesScopedListWarningData +func (s *RouteListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RouteListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPool: A TargetPool resource. This resource defines a pool of -// instances, an associated HttpHealthCheck resource, and the fallback -// target pool. -type TargetPool struct { - // BackupPool: This field is applicable only when the containing target - // pool is serving a forwarding rule as the primary pool, and its - // failoverRatio field is properly set to a value between [0, - // 1]. - // - // backupPool and failoverRatio together define the fallback behavior of - // the primary target pool: if the ratio of the healthy instances in the - // primary pool is at or below failoverRatio, traffic arriving at the - // load-balanced IP will be directed to the backup pool. - // - // In case where failoverRatio and backupPool are not set, or all the - // instances in the backup pool are unhealthy, the traffic will be - // directed back to the primary pool in the "force" mode, where traffic - // will be spread to the healthy instances with the best effort, or to - // all instances when no instance is healthy. - BackupPool string `json:"backupPool,omitempty"` +// Router: Router resource. +type Router struct { + // Bgp: BGP information specific to this router. + Bgp *RouterBgp `json:"bgp,omitempty"` + + // BgpPeers: BGP information that needs to be configured into the + // routing stack to establish the BGP peering. It must specify peer ASN + // and either interface name, IP, or peer IP. Please refer to RFC4273. + BgpPeers []*RouterBgpPeer `json:"bgpPeers,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -17739,42 +21882,17 @@ type TargetPool struct { // property when you create the resource. Description string `json:"description,omitempty"` - // FailoverRatio: This field is applicable only when the containing - // target pool is serving a forwarding rule as the primary pool (i.e., - // not as a backup pool to some other target pool). The value of the - // field must be in [0, 1]. - // - // If set, backupPool must also be set. They together define the - // fallback behavior of the primary target pool: if the ratio of the - // healthy instances in the primary pool is at or below this number, - // traffic arriving at the load-balanced IP will be directed to the - // backup pool. - // - // In case where failoverRatio is not set or all the instances in the - // backup pool are unhealthy, the traffic will be directed back to the - // primary pool in the "force" mode, where traffic will be spread to the - // healthy instances with the best effort, or to all instances when no - // instance is healthy. - FailoverRatio float64 `json:"failoverRatio,omitempty"` - - // HealthChecks: The URL of the HttpHealthCheck resource. A member - // instance in this pool is considered healthy if and only if the health - // checks pass. An empty list means all member instances will be - // considered healthy at all times. Only HttpHealthChecks are supported. - // Only one health check may be specified. - HealthChecks []string `json:"healthChecks,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Instances: A list of resource URLs to the virtual machine instances - // serving this pool. They must live in zones contained in the same - // region as this pool. - Instances []string `json:"instances,omitempty"` + // Interfaces: Router interfaces. Each interface requires either one + // linked resource (e.g. linkedVpnTunnel), or IP address and IP address + // range (e.g. ipRange), or both. + Interfaces []*RouterInterface `json:"interfaces,omitempty"` - // Kind: [Output Only] Type of the resource. Always compute#targetPool - // for target pools. + // Kind: [Output Only] Type of resource. Always compute#router for + // routers. Kind string `json:"kind,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -17786,37 +21904,24 @@ type TargetPool struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // Region: [Output Only] URL of the region where the target pool - // resides. + // Nats: List of Nat services created in this router. The maximum number + // of Nat services within a Router is 3 for Alpha. + Nats []*RouterNat `json:"nats,omitempty"` + + // Network: URI of the network to which this router belongs. + Network string `json:"network,omitempty"` + + // Region: [Output Only] URI of the region where the router resides. Region string `json:"region,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // SessionAffinity: Sesssion affinity option, must be one of the - // following values: - // NONE: Connections from the same client IP may go to any instance in - // the pool. - // CLIENT_IP: Connections from the same client IP will go to the same - // instance in the pool while that instance remains - // healthy. - // CLIENT_IP_PROTO: Connections from the same client IP with the same IP - // protocol will go to the same instance in the pool while that instance - // remains healthy. - // - // Possible values: - // "CLIENT_IP" - // "CLIENT_IP_PORT_PROTO" - // "CLIENT_IP_PROTO" - // "GENERATED_COOKIE" - // "NONE" - SessionAffinity string `json:"sessionAffinity,omitempty"` - // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "BackupPool") to + // ForceSendFields is a list of field names (e.g. "Bgp") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -17824,8 +21929,8 @@ type TargetPool struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BackupPool") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Bgp") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -17833,37 +21938,55 @@ type TargetPool struct { NullFields []string `json:"-"` } -func (s *TargetPool) MarshalJSON() ([]byte, error) { - type noMethod TargetPool +func (s *Router) MarshalJSON() ([]byte, error) { + type noMethod Router raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -func (s *TargetPool) UnmarshalJSON(data []byte) error { - type noMethod TargetPool - var s1 struct { - FailoverRatio gensupport.JSONFloat64 `json:"failoverRatio"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.FailoverRatio = float64(s1.FailoverRatio) - return nil +// RouterAdvertisedPrefix: Description-tagged prefixes for the router to +// advertise. +type RouterAdvertisedPrefix struct { + // Description: User-specified description for the prefix. + Description string `json:"description,omitempty"` + + // Prefix: The prefix to advertise. The value must be a CIDR-formatted + // string. + Prefix string `json:"prefix,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } -type TargetPoolAggregatedList struct { +func (s *RouterAdvertisedPrefix) MarshalJSON() ([]byte, error) { + type noMethod RouterAdvertisedPrefix + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RouterAggregatedList: Contains a list of routers. +type RouterAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetPool resources. - Items map[string]TargetPoolsScopedList `json:"items,omitempty"` + // Items: A list of Router resources. + Items map[string]RoutersScopedList `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#targetPoolAggregatedList for aggregated lists of target - // pools. + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -17877,6 +22000,9 @@ type TargetPoolAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RouterAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -17898,25 +22024,49 @@ type TargetPoolAggregatedList struct { NullFields []string `json:"-"` } -func (s *TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolAggregatedList +func (s *RouterAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod RouterAggregatedList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolInstanceHealth struct { - HealthStatus []*HealthStatus `json:"healthStatus,omitempty"` +// RouterAggregatedListWarning: [Output Only] Informational warning +// message. +type RouterAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#targetPoolInstanceHealth when checking the health of an - // instance. - Kind string `json:"kind,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RouterAggregatedListWarningData `json:"data,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "HealthStatus") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -17924,50 +22074,36 @@ type TargetPoolInstanceHealth struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "HealthStatus") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetPoolInstanceHealth) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolInstanceHealth +func (s *RouterAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod RouterAggregatedListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPoolList: Contains a list of TargetPool resources. -type TargetPoolList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetPool resources. - Items []*TargetPool `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#targetPoolList - // for lists of target pools. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` +type RouterAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -17975,7 +22111,7 @@ type TargetPoolList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -17984,17 +22120,46 @@ type TargetPoolList struct { NullFields []string `json:"-"` } -func (s *TargetPoolList) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolList +func (s *RouterAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RouterAggregatedListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsAddHealthCheckRequest struct { - // HealthChecks: The HttpHealthCheck to add to the target pool. - HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` +type RouterBgp struct { + // AdvertiseMode: User-specified flag to indicate which mode to use for + // advertisement. + // + // Possible values: + // "CUSTOM" + // "DEFAULT" + AdvertiseMode string `json:"advertiseMode,omitempty"` - // ForceSendFields is a list of field names (e.g. "HealthChecks") to + // AdvertisedGroups: User-specified list of prefix groups to advertise + // in custom mode. This field can only be populated if advertise_mode is + // CUSTOM and is advertised to all peers of the router. These groups + // will be advertised in addition to any specified prefixes. Leave this + // field blank to advertise no custom groups. + // + // Possible values: + // "ALL_SUBNETS" + AdvertisedGroups []string `json:"advertisedGroups,omitempty"` + + // AdvertisedPrefixs: User-specified list of individual prefixes to + // advertise in custom mode. This field can only be populated if + // advertise_mode is CUSTOM and is advertised to all peers of the + // router. These prefixes will be advertised in addition to any + // specified groups. Leave this field blank to advertise no custom + // prefixes. + AdvertisedPrefixs []*RouterAdvertisedPrefix `json:"advertisedPrefixs,omitempty"` + + // Asn: Local BGP Autonomous System Number (ASN). Must be an RFC6996 + // private ASN, either 16-bit or 32-bit. The value will be fixed for + // this router resource. All VPN tunnels that link to this router will + // have the same local ASN. + Asn int64 `json:"asn,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AdvertiseMode") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -18002,7 +22167,7 @@ type TargetPoolsAddHealthCheckRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "HealthChecks") to include + // NullFields is a list of field names (e.g. "AdvertiseMode") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -18011,55 +22176,65 @@ type TargetPoolsAddHealthCheckRequest struct { NullFields []string `json:"-"` } -func (s *TargetPoolsAddHealthCheckRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsAddHealthCheckRequest +func (s *RouterBgp) MarshalJSON() ([]byte, error) { + type noMethod RouterBgp raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsAddInstanceRequest struct { - // Instances: A full or partial URL to an instance to add to this target - // pool. This can be a full or partial URL. For example, the following - // are valid URLs: - // - - // https://www.googleapis.com/compute/v1/projects/project-id/zones/zone/instances/instance-name - // - projects/project-id/zones/zone/instances/instance-name - // - zones/zone/instances/instance-name - Instances []*InstanceReference `json:"instances,omitempty"` +type RouterBgpPeer struct { + // AdvertiseMode: User-specified flag to indicate which mode to use for + // advertisement. + // + // Possible values: + // "CUSTOM" + // "DEFAULT" + AdvertiseMode string `json:"advertiseMode,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // AdvertisedGroups: User-specified list of prefix groups to advertise + // in custom mode. This field can only be populated if advertise_mode is + // CUSTOM and overrides the list defined for the router (in Bgp + // message). These groups will be advertised in addition to any + // specified prefixes. Leave this field blank to advertise no custom + // groups. + // + // Possible values: + // "ALL_SUBNETS" + AdvertisedGroups []string `json:"advertisedGroups,omitempty"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // AdvertisedPrefixs: User-specified list of individual prefixes to + // advertise in custom mode. This field can only be populated if + // advertise_mode is CUSTOM and overrides the list defined for the + // router (in Bgp message). These prefixes will be advertised in + // addition to any specified groups. Leave this field blank to advertise + // no custom prefixes. + AdvertisedPrefixs []*RouterAdvertisedPrefix `json:"advertisedPrefixs,omitempty"` -func (s *TargetPoolsAddInstanceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsAddInstanceRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // AdvertisedRoutePriority: The priority of routes advertised to this + // BGP peer. In the case where there is more than one matching route of + // maximum length, the routes with lowest priority value win. + AdvertisedRoutePriority int64 `json:"advertisedRoutePriority,omitempty"` -type TargetPoolsRemoveHealthCheckRequest struct { - // HealthChecks: Health check URL to be removed. This can be a full or - // valid partial URL. For example, the following are valid URLs: - // - - // https://www.googleapis.com/compute/beta/projects/project/global/httpHealthChecks/health-check - // - projects/project/global/httpHealthChecks/health-check - // - global/httpHealthChecks/health-check - HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` + // InterfaceName: Name of the interface the BGP peer is associated with. + InterfaceName string `json:"interfaceName,omitempty"` - // ForceSendFields is a list of field names (e.g. "HealthChecks") to + // IpAddress: IP address of the interface inside Google Cloud Platform. + // Only IPv4 is supported. + IpAddress string `json:"ipAddress,omitempty"` + + // Name: Name of this BGP peer. The name must be 1-63 characters long + // and comply with RFC1035. + Name string `json:"name,omitempty"` + + // PeerAsn: Peer BGP Autonomous System Number (ASN). For VPN use case, + // this value can be different for every tunnel. + PeerAsn int64 `json:"peerAsn,omitempty"` + + // PeerIpAddress: IP address of the BGP interface outside Google cloud. + // Only IPv4 is supported. + PeerIpAddress string `json:"peerIpAddress,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AdvertiseMode") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -18067,7 +22242,7 @@ type TargetPoolsRemoveHealthCheckRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "HealthChecks") to include + // NullFields is a list of field names (e.g. "AdvertiseMode") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -18076,17 +22251,37 @@ type TargetPoolsRemoveHealthCheckRequest struct { NullFields []string `json:"-"` } -func (s *TargetPoolsRemoveHealthCheckRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsRemoveHealthCheckRequest +func (s *RouterBgpPeer) MarshalJSON() ([]byte, error) { + type noMethod RouterBgpPeer raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsRemoveInstanceRequest struct { - // Instances: URLs of the instances to be removed from target pool. - Instances []*InstanceReference `json:"instances,omitempty"` +type RouterInterface struct { + // IpRange: IP address and range of the interface. The IP range must be + // in the RFC3927 link-local IP space. The value must be a + // CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not + // truncate the address as it represents the IP address of the + // interface. + IpRange string `json:"ipRange,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to + // LinkedInterconnectAttachment: URI of the linked interconnect + // attachment. It must be in the same region as the router. Each + // interface can have at most one linked resource and it could either be + // a VPN Tunnel or an interconnect attachment. + LinkedInterconnectAttachment string `json:"linkedInterconnectAttachment,omitempty"` + + // LinkedVpnTunnel: URI of the linked VPN tunnel. It must be in the same + // region as the router. Each interface can have at most one linked + // resource and it could either be a VPN Tunnel or an interconnect + // attachment. + LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` + + // Name: Name of this interface entry. The name must be 1-63 characters + // long and comply with RFC1035. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpRange") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -18094,7 +22289,7 @@ type TargetPoolsRemoveInstanceRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in + // NullFields is a list of field names (e.g. "IpRange") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -18103,21 +22298,44 @@ type TargetPoolsRemoveInstanceRequest struct { NullFields []string `json:"-"` } -func (s *TargetPoolsRemoveInstanceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsRemoveInstanceRequest +func (s *RouterInterface) MarshalJSON() ([]byte, error) { + type noMethod RouterInterface raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsScopedList struct { - // TargetPools: List of target pools contained in this scope. - TargetPools []*TargetPool `json:"targetPools,omitempty"` +// RouterList: Contains a list of Router resources. +type RouterList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *TargetPoolsScopedListWarning `json:"warning,omitempty"` + // Items: A list of Router resources. + Items []*Router `json:"items,omitempty"` - // ForceSendFields is a list of field names (e.g. "TargetPools") to + // Kind: [Output Only] Type of resource. Always compute#router for + // routers. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *RouterListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -18125,24 +22343,23 @@ type TargetPoolsScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "TargetPools") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetPoolsScopedList) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsScopedList +func (s *RouterList) MarshalJSON() ([]byte, error) { + type noMethod RouterList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPoolsScopedListWarning: Informational warning which replaces -// the list of addresses when the list is empty. -type TargetPoolsScopedListWarning struct { +// RouterListWarning: [Output Only] Informational warning message. +type RouterListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -18170,7 +22387,7 @@ type TargetPoolsScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetPoolsScopedListWarningData `json:"data,omitempty"` + Data []*RouterListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -18193,13 +22410,13 @@ type TargetPoolsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetPoolsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsScopedListWarning +func (s *RouterListWarning) MarshalJSON() ([]byte, error) { + type noMethod RouterListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsScopedListWarningData struct { +type RouterListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -18230,44 +22447,109 @@ type TargetPoolsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetPoolsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsScopedListWarningData +func (s *RouterListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RouterListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetReference struct { - Target string `json:"target,omitempty"` +// RouterNat: Represents a Nat resource. It enables the VMs within the +// specified subnetworks to access Internet without external IP +// addresses. It specifies a list of subnetworks (and the ranges within) +// that want to use NAT. Customers can also provide the external IPs +// that would be used for NAT. GCP would auto-allocate ephemeral IPs if +// no external IPs are provided. +type RouterNat struct { + // AutoAllocatedNatIps: [Output Only] List of IPs allocated + // automatically by GCP for this Nat service. They will be raw IP + // strings like "179.12.26.133". They are ephemeral IPs allocated from + // the IP blocks managed by the NAT manager. This list can grow and + // shrink based on the number of VMs configured to use NAT. + AutoAllocatedNatIps []string `json:"autoAllocatedNatIps,omitempty"` + + // Name: Unique name of this Nat service. The name must be 1-63 + // characters long and comply with RFC1035. + Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "Target") to - // unconditionally include in API requests. By default, fields with + // NatIpAllocateOption: Specify the NatIpAllocateOption. If it is + // AUTO_ONLY, then nat_ip should be empty. + // + // Possible values: + // "AUTO_ONLY" + // "MANUAL_ONLY" + NatIpAllocateOption string `json:"natIpAllocateOption,omitempty"` + + // NatIps: List of URLs of the IP resources used for this Nat service. + // These IPs must be valid static external IP addresses assigned to the + // project. max_length is subject to change post alpha. + NatIps []string `json:"natIps,omitempty"` + + // SourceSubnetworkIpRangesToNat: Specify the Nat option. If this field + // contains ALL_SUBNETWORKS_ALL_IP_RANGES or + // ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any + // other Router.Nat section in any Router for this network in this + // region. + // + // Possible values: + // "ALL_SUBNETWORKS_ALL_IP_RANGES" + // "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES" + // "LIST_OF_SUBNETWORKS" + SourceSubnetworkIpRangesToNat string `json:"sourceSubnetworkIpRangesToNat,omitempty"` + + // Subnetworks: List of Subnetwork resources whose traffic should be + // translated by NAT Gateway. It is used only when LIST_OF_SUBNETWORKS + // is selected for the SubnetworkIpRangeToNatOption above. + Subnetworks []*RouterNatSubnetworkToNat `json:"subnetworks,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutoAllocatedNatIps") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Target") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AutoAllocatedNatIps") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *TargetReference) MarshalJSON() ([]byte, error) { - type noMethod TargetReference +func (s *RouterNat) MarshalJSON() ([]byte, error) { + type noMethod RouterNat raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetSslProxiesSetBackendServiceRequest struct { - // Service: The URL of the new BackendService resource for the - // targetSslProxy. - Service string `json:"service,omitempty"` +// RouterNatSubnetworkToNat: Defines the IP ranges that want to use NAT +// for a subnetwork. +type RouterNatSubnetworkToNat struct { + // Name: URL for the subnetwork resource to use NAT. + Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "Service") to + // SecondaryIpRangeNames: List of the secondary ranges of the Subnetwork + // that are allowed to use NAT. This can be populated only if + // "LIST_OF_SECONDARY_IP_RANGES" is one of the values in + // source_ip_ranges_to_nat. + SecondaryIpRangeNames []string `json:"secondaryIpRangeNames,omitempty"` + + // SourceIpRangesToNats: Specify the options for NAT ranges in the + // Subnetwork. All usages of single value are valid except + // NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with multiple + // values is: ["PRIMARY_IP_RANGE", "LIST_OF_SECONDARY_IP_RANGES"] + // Default: [ALL_IP_RANGES] + // + // Possible values: + // "ALL_IP_RANGES" + // "LIST_OF_SECONDARY_IP_RANGES" + // "PRIMARY_IP_RANGE" + SourceIpRangesToNats []string `json:"sourceIpRangesToNats,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -18275,8 +22557,8 @@ type TargetSslProxiesSetBackendServiceRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Service") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -18284,22 +22566,27 @@ type TargetSslProxiesSetBackendServiceRequest struct { NullFields []string `json:"-"` } -func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxiesSetBackendServiceRequest +func (s *RouterNatSubnetworkToNat) MarshalJSON() ([]byte, error) { + type noMethod RouterNatSubnetworkToNat raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetSslProxiesSetProxyHeaderRequest struct { - // ProxyHeader: The new type of proxy header to append before sending - // data to the backend. NONE or PROXY_V1 are allowed. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` +type RouterStatus struct { + // BestRoutes: Best routes for this router's network. + BestRoutes []*Route `json:"bestRoutes,omitempty"` - // ForceSendFields is a list of field names (e.g. "ProxyHeader") to + // BestRoutesForRouter: Best routes learned by this router. + BestRoutesForRouter []*Route `json:"bestRoutesForRouter,omitempty"` + + BgpPeerStatus []*RouterStatusBgpPeerStatus `json:"bgpPeerStatus,omitempty"` + + NatStatus []*RouterStatusNatStatus `json:"natStatus,omitempty"` + + // Network: URI of the network to which this router belongs. + Network string `json:"network,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BestRoutes") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -18307,28 +22594,59 @@ type TargetSslProxiesSetProxyHeaderRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ProxyHeader") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "BestRoutes") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetSslProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxiesSetProxyHeaderRequest +func (s *RouterStatus) MarshalJSON() ([]byte, error) { + type noMethod RouterStatus raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetSslProxiesSetSslCertificatesRequest struct { - // SslCertificates: New set of URLs to SslCertificate resources to - // associate with this TargetSslProxy. Currently exactly one ssl - // certificate must be specified. - SslCertificates []string `json:"sslCertificates,omitempty"` +type RouterStatusBgpPeerStatus struct { + // AdvertisedRoutes: Routes that were advertised to the remote BGP peer + AdvertisedRoutes []*Route `json:"advertisedRoutes,omitempty"` - // ForceSendFields is a list of field names (e.g. "SslCertificates") to + // IpAddress: IP address of the local BGP interface. + IpAddress string `json:"ipAddress,omitempty"` + + // LinkedVpnTunnel: URL of the VPN tunnel that this BGP peer controls. + LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` + + // Name: Name of this BGP peer. Unique within the Routers resource. + Name string `json:"name,omitempty"` + + // NumLearnedRoutes: Number of routes learned from the remote BGP Peer. + NumLearnedRoutes int64 `json:"numLearnedRoutes,omitempty"` + + // PeerIpAddress: IP address of the remote BGP interface. + PeerIpAddress string `json:"peerIpAddress,omitempty"` + + // State: BGP state as specified in RFC1771. + State string `json:"state,omitempty"` + + // Status: Status of the BGP peer: {UP, DOWN} + // + // Possible values: + // "DOWN" + // "UNKNOWN" + // "UP" + Status string `json:"status,omitempty"` + + // Uptime: Time this session has been up. Format: 14 years, 51 weeks, 6 + // days, 23 hours, 59 minutes, 59 seconds + Uptime string `json:"uptime,omitempty"` + + // UptimeSeconds: Time this session has been up, in seconds. Format: 145 + UptimeSeconds string `json:"uptimeSeconds,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AdvertisedRoutes") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -18336,7 +22654,7 @@ type TargetSslProxiesSetSslCertificatesRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SslCertificates") to + // NullFields is a list of field names (e.g. "AdvertisedRoutes") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -18346,77 +22664,48 @@ type TargetSslProxiesSetSslCertificatesRequest struct { NullFields []string `json:"-"` } -func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxiesSetSslCertificatesRequest +func (s *RouterStatusBgpPeerStatus) MarshalJSON() ([]byte, error) { + type noMethod RouterStatusBgpPeerStatus raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetSslProxy: A TargetSslProxy resource. This resource defines an -// SSL proxy. -type TargetSslProxy struct { - // ClientSslPolicy: URL to ClientSslPolicy resource which controls the - // set of allowed SSL versions and ciphers. - ClientSslPolicy string `json:"clientSslPolicy,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` +// RouterStatusNatStatus: Status of a NAT contained in this router. +type RouterStatusNatStatus struct { + // AutoAllocatedNatIps: List of IPs auto-allocated for NAT. Example: + // ["1.1.1.1", "129.2.16.89"] + AutoAllocatedNatIps []string `json:"autoAllocatedNatIps,omitempty"` - // Kind: [Output Only] Type of the resource. Always - // compute#targetSslProxy for target SSL proxies. - Kind string `json:"kind,omitempty"` + // MinExtraNatIpsNeeded: The number of extra IPs to allocate. This will + // be greater than 0 only if user-specified IPs are NOT enough to allow + // all configured VMs to use NAT. This value is meaningful only when + // auto-allocation of NAT IPs is *not* used. + MinExtraNatIpsNeeded int64 `json:"minExtraNatIpsNeeded,omitempty"` - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. + // Name: Unique name of this NAT. Name string `json:"name,omitempty"` - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Service: URL to the BackendService resource. - Service string `json:"service,omitempty"` + // NumVmEndpointsWithNatMappings: Number of VM endpoints (i.e., Nics) + // that can use NAT. + NumVmEndpointsWithNatMappings int64 `json:"numVmEndpointsWithNatMappings,omitempty"` - // SslCertificates: URLs to SslCertificate resources that are used to - // authenticate connections to Backends. Currently exactly one SSL - // certificate must be specified. - SslCertificates []string `json:"sslCertificates,omitempty"` + // UserAllocatedNatIpResources: List of fully qualified URLs of reserved + // IP address resources. + UserAllocatedNatIpResources []string `json:"userAllocatedNatIpResources,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // UserAllocatedNatIps: List of IPs user-allocated for NAT. They will be + // raw IP strings like "179.12.26.133". + UserAllocatedNatIps []string `json:"userAllocatedNatIps,omitempty"` - // ForceSendFields is a list of field names (e.g. "ClientSslPolicy") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "AutoAllocatedNatIps") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClientSslPolicy") to + // NullFields is a list of field names (e.g. "AutoAllocatedNatIps") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -18426,40 +22715,54 @@ type TargetSslProxy struct { NullFields []string `json:"-"` } -func (s *TargetSslProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxy +func (s *RouterStatusNatStatus) MarshalJSON() ([]byte, error) { + type noMethod RouterStatusNatStatus raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetSslProxyList: Contains a list of TargetSslProxy resources. -type TargetSslProxyList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` +type RouterStatusResponse struct { + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` - // Items: A list of TargetSslProxy resources. - Items []*TargetSslProxy `json:"items,omitempty"` + Result *RouterStatus `json:"result,omitempty"` - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouterStatusResponse) MarshalJSON() ([]byte, error) { + type noMethod RouterStatusResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RoutersPreviewResponse struct { + // Resource: Preview of given router. + Resource *Router `json:"resource,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Resource") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -18467,8 +22770,8 @@ type TargetSslProxyList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Resource") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -18476,18 +22779,21 @@ type TargetSslProxyList struct { NullFields []string `json:"-"` } -func (s *TargetSslProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxyList +func (s *RoutersPreviewResponse) MarshalJSON() ([]byte, error) { + type noMethod RoutersPreviewResponse raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetTcpProxiesSetBackendServiceRequest struct { - // Service: The URL of the new BackendService resource for the - // targetTcpProxy. - Service string `json:"service,omitempty"` +type RoutersScopedList struct { + // Routers: List of routers contained in this scope. + Routers []*Router `json:"routers,omitempty"` - // ForceSendFields is a list of field names (e.g. "Service") to + // Warning: Informational warning which replaces the list of routers + // when the list is empty. + Warning *RoutersScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Routers") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -18495,7 +22801,7 @@ type TargetTcpProxiesSetBackendServiceRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Service") to include in + // NullFields is a list of field names (e.g. "Routers") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -18504,22 +22810,49 @@ type TargetTcpProxiesSetBackendServiceRequest struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxiesSetBackendServiceRequest +func (s *RoutersScopedList) MarshalJSON() ([]byte, error) { + type noMethod RoutersScopedList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetTcpProxiesSetProxyHeaderRequest struct { - // ProxyHeader: The new type of proxy header to append before sending - // data to the backend. NONE or PROXY_V1 are allowed. +// RoutersScopedListWarning: Informational warning which replaces the +// list of routers when the list is empty. +type RoutersScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. // // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // ForceSendFields is a list of field names (e.g. "ProxyHeader") to + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RoutersScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -18527,120 +22860,175 @@ type TargetTcpProxiesSetProxyHeaderRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ProxyHeader") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxiesSetProxyHeaderRequest +func (s *RoutersScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod RoutersScopedListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetTcpProxy: A TargetTcpProxy resource. This resource defines a -// TCP proxy. -type TargetTcpProxy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` +type RoutersScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Kind: [Output Only] Type of the resource. Always - // compute#targetTcpProxy for target TCP proxies. - Kind string `json:"kind,omitempty"` + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` +func (s *RoutersScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RoutersScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. +// Rule: A rule to be applied in a Policy. +type Rule struct { + // Action: Required // // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` + // "ALLOW" + // "ALLOW_WITH_LOG" + // "DENY" + // "DENY_WITH_LOG" + // "LOG" + // "NO_ACTION" + Action string `json:"action,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + // Conditions: Additional restrictions that must be met + Conditions []*Condition `json:"conditions,omitempty"` - // Service: URL to the BackendService resource. - Service string `json:"service,omitempty"` + // Description: Human-readable description of the rule. + Description string `json:"description,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Ins: If one or more 'in' clauses are specified, the rule matches if + // the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries. + Ins []string `json:"ins,omitempty"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // LogConfigs: The config returned to callers of + // tech.iam.IAM.CheckPolicy for any entries that match the LOG action. + LogConfigs []*LogConfig `json:"logConfigs,omitempty"` + + // NotIns: If one or more 'not_in' clauses are specified, the rule + // matches if the PRINCIPAL/AUTHORITY_SELECTOR is in none of the + // entries. + NotIns []string `json:"notIns,omitempty"` + + // Permissions: A permission is a string of form '..' (e.g., + // 'storage.buckets.list'). A value of '*' matches all permissions, and + // a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs. + Permissions []string `json:"permissions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetTcpProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxy +func (s *Rule) MarshalJSON() ([]byte, error) { + type noMethod Rule raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetTcpProxyList: Contains a list of TargetTcpProxy resources. -type TargetTcpProxyList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` +type SSLHealthCheck struct { + // Port: The TCP port number for the health check request. The default + // value is 443. Valid values are 1 through 65535. + Port int64 `json:"port,omitempty"` - // Items: A list of TargetTcpProxy resources. - Items []*TargetTcpProxy `json:"items,omitempty"` + // PortName: Port name as defined in InstanceGroup#NamedPort#name. If + // both port and port_name are defined, port takes precedence. + PortName string `json:"portName,omitempty"` - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` + // PortSpecification: Specifies how port is selected for health + // checking, can be one of following values: + // USE_FIXED_PORT: The port number in + // port + // is used for health checking. + // USE_NAMED_PORT: The + // portName + // is used for health checking. + // USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for + // each network endpoint is used for health checking. For other + // backends, the port or named port specified in the Backend Service is + // used for health checking. + // + // + // If not specified, SSL health check follows behavior specified + // in + // port + // and + // portName + // fields. + // + // Possible values: + // "USE_FIXED_PORT" + // "USE_NAMED_PORT" + // "USE_SERVING_PORT" + PortSpecification string `json:"portSpecification,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // Request: The application data to send once the SSL connection has + // been established (default value is empty). If both request and + // response are empty, the connection establishment alone will indicate + // health. The request data can only be ASCII. + Request string `json:"request,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Response: The bytes to match against the beginning of the response + // data. If left empty (the default value), any response will indicate + // health. The response data can only be ASCII. + Response string `json:"response,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Port") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -18648,7 +23036,7 @@ type TargetTcpProxyList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Port") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -18657,14 +23045,67 @@ type TargetTcpProxyList struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxyList +func (s *SSLHealthCheck) MarshalJSON() ([]byte, error) { + type noMethod SSLHealthCheck raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGateway: Represents a Target VPN gateway resource. -type TargetVpnGateway struct { +// Scheduling: Sets the scheduling options for an Instance. +type Scheduling struct { + // AutomaticRestart: Specifies whether the instance should be + // automatically restarted if it is terminated by Compute Engine (not + // terminated by a user). You can only set the automatic restart option + // for standard instances. Preemptible instances cannot be automatically + // restarted. + // + // By default, this is set to true so an instance is automatically + // restarted if it is terminated by Compute Engine. + AutomaticRestart *bool `json:"automaticRestart,omitempty"` + + // OnHostMaintenance: Defines the maintenance behavior for this + // instance. For standard instances, the default behavior is MIGRATE. + // For preemptible instances, the default and only possible behavior is + // TERMINATE. For more information, see Setting Instance Scheduling + // Options. + // + // Possible values: + // "MIGRATE" + // "TERMINATE" + OnHostMaintenance string `json:"onHostMaintenance,omitempty"` + + // Preemptible: Defines whether the instance is preemptible. This can + // only be set during instance creation, it cannot be set or changed + // after the instance has been created. + Preemptible bool `json:"preemptible,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutomaticRestart") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AutomaticRestart") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Scheduling) MarshalJSON() ([]byte, error) { + type noMethod Scheduling + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SecurityPolicy: A security policy is comprised of one or more rules. +// It can also be associated with one or more 'targets'. +type SecurityPolicy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -18673,17 +23114,23 @@ type TargetVpnGateway struct { // property when you create the resource. Description string `json:"description,omitempty"` - // ForwardingRules: [Output Only] A list of URLs to the ForwardingRule - // resources. ForwardingRules are created using - // compute.forwardingRules.insert and associated to a VPN gateway. - ForwardingRules []string `json:"forwardingRules,omitempty"` + // Fingerprint: Specifies a fingerprint for this resource, which is + // essentially a hash of the metadata's contents and used for optimistic + // locking. The fingerprint is initially generated by Compute Engine and + // changes after every request to modify or update metadata. You must + // always provide an up-to-date fingerprint hash in order to update or + // change metadata. + // + // To see the latest fingerprint, make get() request to the security + // policy. + Fingerprint string `json:"fingerprint,omitempty"` // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. + // Kind: [Output only] Type of the resource. Always + // compute#securityPolicyfor security policies Kind string `json:"kind,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -18695,31 +23142,15 @@ type TargetVpnGateway struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // Network: URL of the network to which this VPN gateway is attached. - // Provided by the client when the VPN gateway is created. - Network string `json:"network,omitempty"` - - // Region: [Output Only] URL of the region where the target VPN gateway - // resides. - Region string `json:"region,omitempty"` + // Rules: List of rules that belong to this policy. There must always be + // a default rule (rule with priority 2147483647 and match "*"). If no + // rules are provided when creating a security policy, a default rule + // with action "allow" will be added. + Rules []*SecurityPolicyRule `json:"rules,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Status: [Output Only] The status of the VPN gateway. - // - // Possible values: - // "CREATING" - // "DELETING" - // "FAILED" - // "READY" - Status string `json:"status,omitempty"` - - // Tunnels: [Output Only] A list of URLs to VpnTunnel resources. - // VpnTunnels are created using compute.vpntunnels.insert method and - // associated to a VPN gateway. - Tunnels []string `json:"tunnels,omitempty"` - // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -18742,73 +23173,22 @@ type TargetVpnGateway struct { NullFields []string `json:"-"` } -func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGateway - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetVpnGatewayAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetVpnGateway resources. - Items map[string]TargetVpnGatewaysScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayAggregatedList +func (s *SecurityPolicy) MarshalJSON() ([]byte, error) { + type noMethod SecurityPolicy raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGatewayList: Contains a list of TargetVpnGateway resources. -type TargetVpnGatewayList struct { +type SecurityPolicyList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetVpnGateway resources. - Items []*TargetVpnGateway `json:"items,omitempty"` + // Items: A list of SecurityPolicy resources. + Items []*SecurityPolicy `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. + // Kind: [Output Only] Type of resource. Always + // compute#securityPolicyList for listsof securityPolicies Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -18819,8 +23199,8 @@ type TargetVpnGatewayList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *SecurityPolicyListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -18843,48 +23223,15 @@ type TargetVpnGatewayList struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayList) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetVpnGatewaysScopedList struct { - // TargetVpnGateways: [Output Only] List of target vpn gateways - // contained in this scope. - TargetVpnGateways []*TargetVpnGateway `json:"targetVpnGateways,omitempty"` - - // Warning: [Output Only] Informational warning which replaces the list - // of addresses when the list is empty. - Warning *TargetVpnGatewaysScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "TargetVpnGateways") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "TargetVpnGateways") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetVpnGatewaysScopedList) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewaysScopedList +func (s *SecurityPolicyList) MarshalJSON() ([]byte, error) { + type noMethod SecurityPolicyList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGatewaysScopedListWarning: [Output Only] Informational -// warning which replaces the list of addresses when the list is empty. -type TargetVpnGatewaysScopedListWarning struct { +// SecurityPolicyListWarning: [Output Only] Informational warning +// message. +type SecurityPolicyListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -18912,7 +23259,7 @@ type TargetVpnGatewaysScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetVpnGatewaysScopedListWarningData `json:"data,omitempty"` + Data []*SecurityPolicyListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -18935,13 +23282,13 @@ type TargetVpnGatewaysScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewaysScopedListWarning +func (s *SecurityPolicyListWarning) MarshalJSON() ([]byte, error) { + type noMethod SecurityPolicyListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetVpnGatewaysScopedListWarningData struct { +type SecurityPolicyListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -18972,22 +23319,69 @@ type TargetVpnGatewaysScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewaysScopedListWarningData +func (s *SecurityPolicyListWarningData) MarshalJSON() ([]byte, error) { + type noMethod SecurityPolicyListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TestFailure struct { - ActualService string `json:"actualService,omitempty"` +type SecurityPolicyReference struct { + SecurityPolicy string `json:"securityPolicy,omitempty"` - ExpectedService string `json:"expectedService,omitempty"` + // ForceSendFields is a list of field names (e.g. "SecurityPolicy") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - Host string `json:"host,omitempty"` + // NullFields is a list of field names (e.g. "SecurityPolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} - Path string `json:"path,omitempty"` +func (s *SecurityPolicyReference) MarshalJSON() ([]byte, error) { + type noMethod SecurityPolicyReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // ForceSendFields is a list of field names (e.g. "ActualService") to +// SecurityPolicyRule: Represents a rule that describes one or more +// match conditions along with the action to be taken when traffic +// matches this condition (allow or deny). +type SecurityPolicyRule struct { + // Action: The Action to preform when the client connection triggers the + // rule. Can currently be either "allow" or "deny()" where valid values + // for status are 403, 404, and 502. + Action string `json:"action,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Kind: [Output only] Type of the resource. Always + // compute#securityPolicyRule for security policy rules + Kind string `json:"kind,omitempty"` + + // Match: A match condition that incoming traffic is evaluated against. + // If it evaluates to true, the corresponding ?action? is enforced. + Match *SecurityPolicyRuleMatcher `json:"match,omitempty"` + + // Preview: If set to true, the specified action is not enforced. + Preview bool `json:"preview,omitempty"` + + // Priority: An integer indicating the priority of a rule in the list. + // The priority must be a positive value between 0 and 2147483647. Rules + // are evaluated in the increasing order of priority. + Priority int64 `json:"priority,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -18995,28 +23389,46 @@ type TestFailure struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ActualService") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TestFailure) MarshalJSON() ([]byte, error) { - type noMethod TestFailure +func (s *SecurityPolicyRule) MarshalJSON() ([]byte, error) { + type noMethod SecurityPolicyRule raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TestPermissionsRequest struct { - // Permissions: The set of permissions to check for the 'resource'. - // Permissions with wildcards (such as '*' or 'storage.*') are not - // allowed. - Permissions []string `json:"permissions,omitempty"` +// SecurityPolicyRuleMatcher: Represents a match condition that incoming +// traffic is evaluated against. Exactly one field must be specified. +type SecurityPolicyRuleMatcher struct { + // Config: The configuration options available when specifying + // versioned_expr. This field must be specified if versioned_expr is + // specified and cannot be specified if versioned_expr is not specified. + Config *SecurityPolicyRuleMatcherConfig `json:"config,omitempty"` - // ForceSendFields is a list of field names (e.g. "Permissions") to + // SrcIpRanges: CIDR IP address range. Only IPv4 is supported. + SrcIpRanges []string `json:"srcIpRanges,omitempty"` + + // SrcRegionCodes: Match by country or region code. + SrcRegionCodes []string `json:"srcRegionCodes,omitempty"` + + // VersionedExpr: Preconfigured versioned expression. If this field is + // specified, config must also be specified. Available preconfigured + // expressions along with their requirements are: SRC_IPS_V1 - must + // specify the corresponding src_ip_range field in config. + // + // Possible values: + // "SRC_IPS_V1" + // "VERSIONED_EXPR_UNSPECIFIED" + VersionedExpr string `json:"versionedExpr,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Config") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -19024,7 +23436,34 @@ type TestPermissionsRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Permissions") to include + // NullFields is a list of field names (e.g. "Config") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyRuleMatcher) MarshalJSON() ([]byte, error) { + type noMethod SecurityPolicyRuleMatcher + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SecurityPolicyRuleMatcherConfig struct { + // SrcIpRanges: CIDR IP address range. Only IPv4 is supported. + SrcIpRanges []string `json:"srcIpRanges,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SrcIpRanges") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SrcIpRanges") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -19033,22 +23472,41 @@ type TestPermissionsRequest struct { NullFields []string `json:"-"` } -func (s *TestPermissionsRequest) MarshalJSON() ([]byte, error) { - type noMethod TestPermissionsRequest +func (s *SecurityPolicyRuleMatcherConfig) MarshalJSON() ([]byte, error) { + type noMethod SecurityPolicyRuleMatcherConfig raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TestPermissionsResponse struct { - // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is allowed. - Permissions []string `json:"permissions,omitempty"` +// SerialPortOutput: An instance's serial console output. +type SerialPortOutput struct { + // Contents: [Output Only] The contents of the console output. + Contents string `json:"contents,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#serialPortOutput for serial port output. + Kind string `json:"kind,omitempty"` + + // Next: [Output Only] The position of the next byte of content from the + // serial console output. Use this value in the next request as the + // start parameter. + Next int64 `json:"next,omitempty,string"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Start: The starting byte position of the output that was returned. + // This should match the start parameter sent with the request. If the + // serial console output exceeds the size of the buffer, older output + // will be overwritten by newer content and the start values will be + // mismatched. + Start int64 `json:"start,omitempty,string"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Permissions") to + // ForceSendFields is a list of field names (e.g. "Contents") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -19056,40 +23514,69 @@ type TestPermissionsResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Permissions") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Contents") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TestPermissionsResponse) MarshalJSON() ([]byte, error) { - type noMethod TestPermissionsResponse +func (s *SerialPortOutput) MarshalJSON() ([]byte, error) { + type noMethod SerialPortOutput raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UDPHealthCheck struct { - // Port: The UDP port number for the health check request. Valid values - // are 1 through 65535. - Port int64 `json:"port,omitempty"` +// ServiceAccount: A service account. +type ServiceAccount struct { + // Email: Email address of the service account. + Email string `json:"email,omitempty"` - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. - PortName string `json:"portName,omitempty"` + // Scopes: The list of scopes to be made available for this service + // account. + Scopes []string `json:"scopes,omitempty"` - // Request: Raw data of request to send in payload of UDP packet. It is - // an error if this is empty. The request data can only be ASCII. - Request string `json:"request,omitempty"` + // ForceSendFields is a list of field names (e.g. "Email") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Response: The bytes to match against the beginning of the response - // data. It is an error if this is empty. The response data can only be - // ASCII. - Response string `json:"response,omitempty"` + // NullFields is a list of field names (e.g. "Email") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // ForceSendFields is a list of field names (e.g. "Port") to +func (s *ServiceAccount) MarshalJSON() ([]byte, error) { + type noMethod ServiceAccount + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SignedUrlKey: Represents a customer-supplied Signing Key used by +// Cloud CDN Signed URLs +type SignedUrlKey struct { + // KeyName: Name of the key. The name must be 1-63 characters long, and + // comply with RFC1035. Specifically, the name must be 1-63 characters + // long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? + // which means the first character must be a lowercase letter, and all + // following characters must be a dash, lowercase letter, or digit, + // except the last character, which cannot be a dash. + KeyName string `json:"keyName,omitempty"` + + // KeyValue: 128-bit key value used for signing the URL. The key value + // must be a valid RFC 4648 Section 5 base64url encoded string. + KeyValue string `json:"keyValue,omitempty"` + + // ForceSendFields is a list of field names (e.g. "KeyName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -19097,8 +23584,8 @@ type UDPHealthCheck struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Port") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "KeyName") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -19106,46 +23593,58 @@ type UDPHealthCheck struct { NullFields []string `json:"-"` } -func (s *UDPHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod UDPHealthCheck +func (s *SignedUrlKey) MarshalJSON() ([]byte, error) { + type noMethod SignedUrlKey raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMap: A UrlMap resource. This resource defines the mapping from URL -// to the BackendService resource, based on the "longest-match" of the -// URL's host and path. -type UrlMap struct { +// Snapshot: A persistent disk snapshot resource. +type Snapshot struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` - // DefaultService: The URL of the BackendService resource if none of the - // hostRules match. - DefaultService string `json:"defaultService,omitempty"` - // Description: An optional description of this resource. Provide this // property when you create the resource. Description string `json:"description,omitempty"` - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a UrlMap. An up-to-date - // fingerprint must be provided in order to update the UrlMap. - Fingerprint string `json:"fingerprint,omitempty"` - - // HostRules: The list of HostRules to use against the URL. - HostRules []*HostRule `json:"hostRules,omitempty"` + // DiskSizeGb: [Output Only] Size of the snapshot, specified in GB. + DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of the resource. Always compute#urlMaps for - // url maps. + // Kind: [Output Only] Type of the resource. Always compute#snapshot for + // Snapshot resources. Kind string `json:"kind,omitempty"` - // Name: Name of the resource. Provided by the client when the resource + // LabelFingerprint: A fingerprint for the labels being applied to this + // snapshot, which is essentially a hash of the labels set used for + // optimistic locking. The fingerprint is initially generated by Compute + // Engine and changes after every request to modify or update labels. + // You must always provide an up-to-date fingerprint hash in order to + // update or change labels. + // + // To see the latest fingerprint, make a get() request to retrieve a + // snapshot. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels to apply to this snapshot. These can be later modified + // by the setLabels method. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + + // LicenseCodes: Integer license codes indicating which licenses are + // attached to this snapshot. + LicenseCodes googleapi.Int64s `json:"licenseCodes,omitempty"` + + // Licenses: [Output Only] A list of public visible licenses that apply + // to this snapshot. This can be because the original image had licenses + // attached (such as a Windows image). + Licenses []string `json:"licenses,omitempty"` + + // Name: Name of the resource; provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means @@ -19154,15 +23653,66 @@ type UrlMap struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // PathMatchers: The list of named PathMatchers to use against the URL. - PathMatchers []*PathMatcher `json:"pathMatchers,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Tests: The list of expected URL mappings. Request to update this - // UrlMap will succeed only if all of the test cases pass. - Tests []*UrlMapTest `json:"tests,omitempty"` + // SnapshotEncryptionKey: Encrypts the snapshot using a + // customer-supplied encryption key. + // + // After you encrypt a snapshot using a customer-supplied key, you must + // provide the same key if you use the image later For example, you must + // provide the encryption key when you create a disk from the encrypted + // snapshot in a future request. + // + // Customer-supplied encryption keys do not protect access to metadata + // of the disk. + // + // If you do not provide an encryption key when creating the snapshot, + // then the snapshot will be encrypted using an automatically generated + // key and you do not need to provide a key to use the snapshot later. + SnapshotEncryptionKey *CustomerEncryptionKey `json:"snapshotEncryptionKey,omitempty"` + + // SourceDisk: [Output Only] The source disk used to create this + // snapshot. + SourceDisk string `json:"sourceDisk,omitempty"` + + // SourceDiskEncryptionKey: The customer-supplied encryption key of the + // source disk. Required if the source disk is protected by a + // customer-supplied encryption key. + SourceDiskEncryptionKey *CustomerEncryptionKey `json:"sourceDiskEncryptionKey,omitempty"` + + // SourceDiskId: [Output Only] The ID value of the disk used to create + // this snapshot. This value may be used to determine whether the + // snapshot was taken from the current or a previous instance of a given + // disk name. + SourceDiskId string `json:"sourceDiskId,omitempty"` + + // Status: [Output Only] The status of the snapshot. This can be + // CREATING, DELETING, FAILED, READY, or UPLOADING. + // + // Possible values: + // "CREATING" + // "DELETING" + // "FAILED" + // "READY" + // "UPLOADING" + Status string `json:"status,omitempty"` + + // StorageBytes: [Output Only] A size of the the storage used by the + // snapshot. As snapshots share storage, this number is expected to + // change with snapshot creation/deletion. + StorageBytes int64 `json:"storageBytes,omitempty,string"` + + // StorageBytesStatus: [Output Only] An indicator whether storageBytes + // is in a stable state or it is being adjusted as a result of shared + // storage reallocation. This status can either be UPDATING, meaning the + // size of the snapshot is being updated, or UP_TO_DATE, meaning the + // size of the snapshot is up-to-date. + // + // Possible values: + // "UPDATING" + // "UP_TO_DATE" + StorageBytesStatus string `json:"storageBytesStatus,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -19186,20 +23736,20 @@ type UrlMap struct { NullFields []string `json:"-"` } -func (s *UrlMap) MarshalJSON() ([]byte, error) { - type noMethod UrlMap +func (s *Snapshot) MarshalJSON() ([]byte, error) { + type noMethod Snapshot raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMapList: Contains a list of UrlMap resources. -type UrlMapList struct { +// SnapshotList: Contains a list of Snapshot resources. +type SnapshotList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of UrlMap resources. - Items []*UrlMap `json:"items,omitempty"` + // Items: A list of Snapshot resources. + Items []*Snapshot `json:"items,omitempty"` // Kind: Type of resource. Kind string `json:"kind,omitempty"` @@ -19215,6 +23765,9 @@ type UrlMapList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *SnapshotListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -19236,16 +23789,48 @@ type UrlMapList struct { NullFields []string `json:"-"` } -func (s *UrlMapList) MarshalJSON() ([]byte, error) { - type noMethod UrlMapList +func (s *SnapshotList) MarshalJSON() ([]byte, error) { + type noMethod SnapshotList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UrlMapReference struct { - UrlMap string `json:"urlMap,omitempty"` +// SnapshotListWarning: [Output Only] Informational warning message. +type SnapshotListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // ForceSendFields is a list of field names (e.g. "UrlMap") to + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SnapshotListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -19253,7 +23838,7 @@ type UrlMapReference struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "UrlMap") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -19262,28 +23847,27 @@ type UrlMapReference struct { NullFields []string `json:"-"` } -func (s *UrlMapReference) MarshalJSON() ([]byte, error) { - type noMethod UrlMapReference +func (s *SnapshotListWarning) MarshalJSON() ([]byte, error) { + type noMethod SnapshotListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMapTest: Message for the expected URL mappings. -type UrlMapTest struct { - // Description: Description of this test case. - Description string `json:"description,omitempty"` - - // Host: Host portion of the URL. - Host string `json:"host,omitempty"` - - // Path: Path portion of the URL. - Path string `json:"path,omitempty"` +type SnapshotListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // Service: Expected BackendService resource the given URL should be - // mapped to. - Service string `json:"service,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Description") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -19291,38 +23875,67 @@ type UrlMapTest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *UrlMapTest) MarshalJSON() ([]byte, error) { - type noMethod UrlMapTest +func (s *SnapshotListWarningData) MarshalJSON() ([]byte, error) { + type noMethod SnapshotListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMapValidationResult: Message representing the validation result -// for a UrlMap. -type UrlMapValidationResult struct { - LoadErrors []string `json:"loadErrors,omitempty"` +// SslCertificate: An SslCertificate resource. This resource provides a +// mechanism to upload an SSL key and certificate to the load balancer +// to serve secure connections from the user. +type SslCertificate struct { + // Certificate: A local certificate file. The certificate must be in PEM + // format. The certificate chain must be no greater than 5 certs long. + // The chain must include at least one intermediate cert. + Certificate string `json:"certificate,omitempty"` - // LoadSucceeded: Whether the given UrlMap can be successfully loaded. - // If false, 'loadErrors' indicates the reasons. - LoadSucceeded bool `json:"loadSucceeded,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - TestFailures []*TestFailure `json:"testFailures,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // TestPassed: If successfully loaded, this field indicates whether the - // test passed. If false, 'testFailures's indicate the reason of - // failure. - TestPassed bool `json:"testPassed,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` - // ForceSendFields is a list of field names (e.g. "LoadErrors") to + // Kind: [Output Only] Type of the resource. Always + // compute#sslCertificate for SSL certificates. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PrivateKey: A write-only private key in PEM format. Only insert + // requests will include this field. + PrivateKey string `json:"privateKey,omitempty"` + + // SelfLink: [Output only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Certificate") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -19330,56 +23943,52 @@ type UrlMapValidationResult struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "LoadErrors") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Certificate") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *UrlMapValidationResult) MarshalJSON() ([]byte, error) { - type noMethod UrlMapValidationResult +func (s *SslCertificate) MarshalJSON() ([]byte, error) { + type noMethod SslCertificate raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UrlMapsValidateRequest struct { - // Resource: Content of the UrlMap to be validated. - Resource *UrlMap `json:"resource,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Resource") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` +// SslCertificateList: Contains a list of SslCertificate resources. +type SslCertificateList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // NullFields is a list of field names (e.g. "Resource") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // Items: A list of SslCertificate resources. + Items []*SslCertificate `json:"items,omitempty"` -func (s *UrlMapsValidateRequest) MarshalJSON() ([]byte, error) { - type noMethod UrlMapsValidateRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` -type UrlMapsValidateResponse struct { - Result *UrlMapValidationResult `json:"result,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *SslCertificateListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Result") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -19387,7 +23996,7 @@ type UrlMapsValidateResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Result") to include in API + // NullFields is a list of field names (e.g. "Id") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -19396,33 +24005,49 @@ type UrlMapsValidateResponse struct { NullFields []string `json:"-"` } -func (s *UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { - type noMethod UrlMapsValidateResponse +func (s *SslCertificateList) MarshalJSON() ([]byte, error) { + type noMethod SslCertificateList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UsageExportLocation: The location in Cloud Storage and naming method -// of the daily usage report. Contains bucket_name and report_name -// prefix. -type UsageExportLocation struct { - // BucketName: The name of an existing bucket in Cloud Storage where the - // usage report object is stored. The Google Service Account is granted - // write access to this bucket. This can either be the bucket name by - // itself, such as example-bucket, or the bucket name with gs:// or - // https://storage.googleapis.com/ in front of it, such as - // gs://example-bucket. - BucketName string `json:"bucketName,omitempty"` +// SslCertificateListWarning: [Output Only] Informational warning +// message. +type SslCertificateListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // ReportNamePrefix: An optional prefix for the name of the usage report - // object stored in bucketName. If not supplied, defaults to usage. The - // report is stored as a CSV file named - // report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the - // usage according to Pacific Time. If you supply a prefix, it should - // conform to Cloud Storage object naming conventions. - ReportNamePrefix string `json:"reportNamePrefix,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SslCertificateListWarningData `json:"data,omitempty"` - // ForceSendFields is a list of field names (e.g. "BucketName") to + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -19430,8 +24055,8 @@ type UsageExportLocation struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BucketName") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -19439,171 +24064,27 @@ type UsageExportLocation struct { NullFields []string `json:"-"` } -func (s *UsageExportLocation) MarshalJSON() ([]byte, error) { - type noMethod UsageExportLocation - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type VpnTunnel struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // DetailedStatus: [Output Only] Detailed status message for the VPN - // tunnel. - DetailedStatus string `json:"detailedStatus,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // IkeVersion: IKE protocol version to use when establishing the VPN - // tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. - // Default version is 2. - IkeVersion int64 `json:"ikeVersion,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for - // VPN tunnels. - Kind string `json:"kind,omitempty"` - - // LabelFingerprint: A fingerprint for the labels being applied to this - // VpnTunnel, which is essentially a hash of the labels set used for - // optimistic locking. The fingerprint is initially generated by Compute - // Engine and changes after every request to modify or update labels. - // You must always provide an up-to-date fingerprint hash in order to - // update or change labels. - // - // To see the latest fingerprint, make a get() request to retrieve a - // VpnTunnel. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: Labels to apply to this VpnTunnel. These can be later - // modified by the setLabels method. Each label key/value pair must - // comply with RFC1035. Label values may be empty. - Labels map[string]string `json:"labels,omitempty"` - - // LocalTrafficSelector: Local traffic selector to use when establishing - // the VPN tunnel with peer VPN gateway. The value should be a CIDR - // formatted string, for example: 192.168.0.0/16. The ranges should be - // disjoint. Only IPv4 is supported. - LocalTrafficSelector []string `json:"localTrafficSelector,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // PeerIp: IP address of the peer VPN gateway. Only IPv4 is supported. - PeerIp string `json:"peerIp,omitempty"` - - // Region: [Output Only] URL of the region where the VPN tunnel resides. - Region string `json:"region,omitempty"` - - // RemoteTrafficSelector: Remote traffic selectors to use when - // establishing the VPN tunnel with peer VPN gateway. The value should - // be a CIDR formatted string, for example: 192.168.0.0/16. The ranges - // should be disjoint. Only IPv4 is supported. - RemoteTrafficSelector []string `json:"remoteTrafficSelector,omitempty"` - - // Router: URL of router resource to be used for dynamic routing. - Router string `json:"router,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // SharedSecret: Shared secret used to set the secure session between - // the Cloud VPN gateway and the peer VPN gateway. - SharedSecret string `json:"sharedSecret,omitempty"` - - // SharedSecretHash: Hash of the shared secret. - SharedSecretHash string `json:"sharedSecretHash,omitempty"` - - // Status: [Output Only] The status of the VPN tunnel. - // - // Possible values: - // "ALLOCATING_RESOURCES" - // "AUTHORIZATION_ERROR" - // "DEPROVISIONING" - // "ESTABLISHED" - // "FAILED" - // "FIRST_HANDSHAKE" - // "NEGOTIATION_FAILURE" - // "NETWORK_ERROR" - // "NO_INCOMING_PACKETS" - // "PROVISIONING" - // "REJECTED" - // "WAITING_FOR_FULL_CONFIG" - Status string `json:"status,omitempty"` - - // TargetVpnGateway: URL of the VPN gateway with which this VPN tunnel - // is associated. Provided by the client when the VPN tunnel is created. - TargetVpnGateway string `json:"targetVpnGateway,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *VpnTunnel) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnel +func (s *SslCertificateListWarning) MarshalJSON() ([]byte, error) { + type noMethod SslCertificateListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnelAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of VpnTunnelsScopedList resources. - Items map[string]VpnTunnelsScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for - // VPN tunnels. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` +type SslCertificateListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -19611,7 +24092,7 @@ type VpnTunnelAggregatedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -19620,23 +24101,22 @@ type VpnTunnelAggregatedList struct { NullFields []string `json:"-"` } -func (s *VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelAggregatedList +func (s *SslCertificateListWarningData) MarshalJSON() ([]byte, error) { + type noMethod SslCertificateListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnelList: Contains a list of VpnTunnel resources. -type VpnTunnelList struct { +type SslPoliciesList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of VpnTunnel resources. - Items []*VpnTunnel `json:"items,omitempty"` + // Items: A list of SslPolicy resources. + Items []*SslPolicy `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for - // VPN tunnels. + // Kind: [Output Only] Type of the resource. Always + // compute#sslPoliciesList for lists of sslPolicies. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -19650,6 +24130,9 @@ type VpnTunnelList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *SslPoliciesListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -19671,46 +24154,14 @@ type VpnTunnelList struct { NullFields []string `json:"-"` } -func (s *VpnTunnelList) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type VpnTunnelsScopedList struct { - // VpnTunnels: List of vpn tunnels contained in this scope. - VpnTunnels []*VpnTunnel `json:"vpnTunnels,omitempty"` - - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *VpnTunnelsScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "VpnTunnels") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "VpnTunnels") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *VpnTunnelsScopedList) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelsScopedList +func (s *SslPoliciesList) MarshalJSON() ([]byte, error) { + type noMethod SslPoliciesList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnelsScopedListWarning: Informational warning which replaces the -// list of addresses when the list is empty. -type VpnTunnelsScopedListWarning struct { +// SslPoliciesListWarning: [Output Only] Informational warning message. +type SslPoliciesListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -19738,7 +24189,7 @@ type VpnTunnelsScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*VpnTunnelsScopedListWarningData `json:"data,omitempty"` + Data []*SslPoliciesListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -19761,13 +24212,13 @@ type VpnTunnelsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *VpnTunnelsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelsScopedListWarning +func (s *SslPoliciesListWarning) MarshalJSON() ([]byte, error) { + type noMethod SslPoliciesListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnelsScopedListWarningData struct { +type SslPoliciesListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -19798,77 +24249,20 @@ type VpnTunnelsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelsScopedListWarningData +func (s *SslPoliciesListWarningData) MarshalJSON() ([]byte, error) { + type noMethod SslPoliciesListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type XpnHostList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of shared VPC host project URLs. - Items []*Project `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#xpnHostList for - // lists of shared VPC hosts. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` +type SslPoliciesListAvailableFeaturesResponse struct { + Features []string `json:"features,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *XpnHostList) MarshalJSON() ([]byte, error) { - type noMethod XpnHostList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// XpnResourceId: Service resource (a.k.a service project) ID. -type XpnResourceId struct { - // Id: The ID of the service resource. In the case of projects, this - // field matches the project ID (e.g., my-project), not the project - // number (e.g., 12345678). - Id string `json:"id,omitempty"` - - // Type: The type of the service resource. - // - // Possible values: - // "PROJECT" - // "XPN_RESOURCE_TYPE_UNSPECIFIED" - Type string `json:"type,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Features") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -19876,8 +24270,8 @@ type XpnResourceId struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Features") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -19885,68 +24279,104 @@ type XpnResourceId struct { NullFields []string `json:"-"` } -func (s *XpnResourceId) MarshalJSON() ([]byte, error) { - type noMethod XpnResourceId +func (s *SslPoliciesListAvailableFeaturesResponse) MarshalJSON() ([]byte, error) { + type noMethod SslPoliciesListAvailableFeaturesResponse raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Zone: A Zone resource. -type Zone struct { - // AvailableCpuPlatforms: [Output Only] Available cpu/platform - // selections for the zone. - AvailableCpuPlatforms []string `json:"availableCpuPlatforms,omitempty"` - +// SslPolicy: A SSL policy specifies the server-side support for SSL +// features. This can be attached to a TargetHttpsProxy or a +// TargetSslProxy. This affects connections between clients and the +// HTTPS or SSL proxy load balancer. They do not affect the connection +// between the load balancers and the backends. +type SslPolicy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Deprecated: [Output Only] The deprecation status associated with this - // zone. - Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + // CustomFeatures: List of features enabled when the selected profile is + // CUSTOM. The + // - method returns the set of features that can be specified in this + // list. This field must be empty if the profile is not CUSTOM. + CustomFeatures []string `json:"customFeatures,omitempty"` - // Description: [Output Only] Textual description of the resource. + // Description: An optional description of this resource. Provide this + // property when you create the resource. Description string `json:"description,omitempty"` + // EnabledFeatures: [Output Only] The list of features enabled in the + // SSL policy. + EnabledFeatures []string `json:"enabledFeatures,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a SslPolicy. An up-to-date + // fingerprint must be provided in order to update the SslPolicy. + Fingerprint string `json:"fingerprint,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of the resource. Always compute#zone for - // zones. + // Kind: [Output only] Type of the resource. Always compute#sslPolicyfor + // SSL policies. Kind string `json:"kind,omitempty"` - // Name: [Output Only] Name of the resource. + // MinTlsVersion: The minimum version of SSL protocol that can be used + // by the clients to establish a connection with the load balancer. This + // can be one of TLS_1_0, TLS_1_1, TLS_1_2, TLS_1_3. + // + // Possible values: + // "TLS_1_0" + // "TLS_1_1" + // "TLS_1_2" + // "TLS_1_3" + MinTlsVersion string `json:"minTlsVersion,omitempty"` + + // Name: Name of the resource. The name must be 1-63 characters long, + // and comply with RFC1035. Specifically, the name must be 1-63 + // characters long and match the regular expression + // [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a + // lowercase letter, and all following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. Name string `json:"name,omitempty"` - // Region: [Output Only] Full URL reference to the region which hosts - // the zone. - Region string `json:"region,omitempty"` + // Profile: Profile specifies the set of SSL features that can be used + // by the load balancer when negotiating SSL with clients. This can be + // one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using CUSTOM, + // the set of SSL features to enable must be specified in the + // customFeatures field. + // + // Possible values: + // "COMPATIBLE" + // "CUSTOM" + // "MODERN" + // "RESTRICTED" + Profile string `json:"profile,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Status: [Output Only] Status of the zone, either UP or DOWN. - // - // Possible values: - // "DOWN" - // "UP" - Status string `json:"status,omitempty"` + // Warnings: [Output Only] If potential misconfigurations are detected + // for this SSL policy, this field will be populated with warning + // messages. + Warnings []*SslPolicyWarnings `json:"warnings,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. - // "AvailableCpuPlatforms") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AvailableCpuPlatforms") to + // NullFields is a list of field names (e.g. "CreationTimestamp") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -19956,40 +24386,47 @@ type Zone struct { NullFields []string `json:"-"` } -func (s *Zone) MarshalJSON() ([]byte, error) { - type noMethod Zone +func (s *SslPolicy) MarshalJSON() ([]byte, error) { + type noMethod SslPolicy raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ZoneList: Contains a list of zone resources. -type ZoneList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Zone resources. - Items []*Zone `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` +type SslPolicyWarnings struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SslPolicyWarningsData `json:"data,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -19997,7 +24434,7 @@ type ZoneList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -20006,137 +24443,9595 @@ type ZoneList struct { NullFields []string `json:"-"` } -func (s *ZoneList) MarshalJSON() ([]byte, error) { - type noMethod ZoneList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +func (s *SslPolicyWarnings) MarshalJSON() ([]byte, error) { + type noMethod SslPolicyWarnings + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslPolicyWarningsData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPolicyWarningsData) MarshalJSON() ([]byte, error) { + type noMethod SslPolicyWarningsData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslPolicyReference struct { + // SslPolicy: URL of the SSL policy resource. Set this to empty string + // to clear any existing SSL policy associated with the target proxy + // resource. + SslPolicy string `json:"sslPolicy,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SslPolicy") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SslPolicy") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslPolicyReference) MarshalJSON() ([]byte, error) { + type noMethod SslPolicyReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Subnetwork: A Subnetwork resource. +type Subnetwork struct { + // AllowSubnetCidrRoutesOverlap: Whether this subnetwork can conflict + // with static routes. Setting this to true allows this subnetwork's + // primary and secondary ranges to conflict with routes that have + // already been configured on the corresponding network. Static routes + // will take precedence over the subnetwork route if the route prefix + // length is at least as large as the subnetwork prefix length. + // + // Also, packets destined to IPs within subnetwork may contain + // private/sensitive data and are prevented from leaving the virtual + // network. Setting this field to true will disable this feature. + // + // The default value is false and applies to all existing subnetworks + // and automatically created subnetworks. + // + // This field cannot be set to true at resource creation time. + AllowSubnetCidrRoutesOverlap bool `json:"allowSubnetCidrRoutesOverlap,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. This field can be set only at + // resource creation time. + Description string `json:"description,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a Subnetwork. An up-to-date + // fingerprint must be provided in order to update the Subnetwork. + Fingerprint string `json:"fingerprint,omitempty"` + + // GatewayAddress: [Output Only] The gateway address for default routes + // to reach destination addresses outside this subnetwork. This field + // can be set only at resource creation time. + GatewayAddress string `json:"gatewayAddress,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // IpCidrRange: The range of internal addresses that are owned by this + // subnetwork. Provide this property when you create the subnetwork. For + // example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and + // non-overlapping within a network. Only IPv4 is supported. This field + // can be set only at resource creation time. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#subnetwork + // for Subnetwork resources. + Kind string `json:"kind,omitempty"` + + // Name: The name of the resource, provided by the client when initially + // creating the resource. The name must be 1-63 characters long, and + // comply with RFC1035. Specifically, the name must be 1-63 characters + // long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? + // which means the first character must be a lowercase letter, and all + // following characters must be a dash, lowercase letter, or digit, + // except the last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: The URL of the network to which this subnetwork belongs, + // provided by the client when initially creating the subnetwork. Only + // networks that are in the distributed mode can have subnetworks. This + // field can be set only at resource creation time. + Network string `json:"network,omitempty"` + + // PrivateIpGoogleAccess: Whether the VMs in this subnet can access + // Google services without assigned external IP addresses. This field + // can be both set at resource creation time and updated using + // setPrivateIpGoogleAccess. + PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` + + // Region: URL of the region where the Subnetwork resides. This field + // can be set only at resource creation time. + Region string `json:"region,omitempty"` + + // SecondaryIpRanges: An array of configurations for secondary IP ranges + // for VM instances contained in this subnetwork. The primary IP of such + // VM must belong to the primary ipCidrRange of the subnetwork. The + // alias IPs may belong to either primary or secondary ranges. + SecondaryIpRanges []*SubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "AllowSubnetCidrRoutesOverlap") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "AllowSubnetCidrRoutesOverlap") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Subnetwork) MarshalJSON() ([]byte, error) { + type noMethod Subnetwork + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SubnetworkAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of SubnetworksScopedList resources. + Items map[string]SubnetworksScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#subnetworkAggregatedList for aggregated lists of subnetworks. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *SubnetworkAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SubnetworkAggregatedListWarning: [Output Only] Informational warning +// message. +type SubnetworkAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SubnetworkAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SubnetworkAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SubnetworkList: Contains a list of Subnetwork resources. +type SubnetworkList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of Subnetwork resources. + Items []*Subnetwork `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#subnetworkList + // for lists of subnetworks. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *SubnetworkListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkList) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SubnetworkListWarning: [Output Only] Informational warning message. +type SubnetworkListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SubnetworkListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkListWarning) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SubnetworkListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkListWarningData) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SubnetworkSecondaryRange: Represents a secondary IP range of a +// subnetwork. +type SubnetworkSecondaryRange struct { + // IpCidrRange: The range of IP addresses belonging to this subnetwork + // secondary range. Provide this property when you create the + // subnetwork. Ranges must be unique and non-overlapping with all + // primary and secondary IP ranges within a network. Only IPv4 is + // supported. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // RangeName: The name associated with this subnetwork secondary range, + // used when adding an alias IP range to a VM instance. The name must be + // 1-63 characters long, and comply with RFC1035. The name must be + // unique within the subnetwork. + RangeName string `json:"rangeName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IpCidrRange") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkSecondaryRange + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SubnetworksExpandIpCidrRangeRequest struct { + // IpCidrRange: The IP (in CIDR format or netmask) of internal addresses + // that are legal on this Subnetwork. This range should be disjoint from + // other subnetworks within this network. This range can only be larger + // than (i.e. a superset of) the range previously defined before the + // update. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IpCidrRange") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworksExpandIpCidrRangeRequest) MarshalJSON() ([]byte, error) { + type noMethod SubnetworksExpandIpCidrRangeRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SubnetworksScopedList struct { + // Subnetworks: List of subnetworks contained in this scope. + Subnetworks []*Subnetwork `json:"subnetworks,omitempty"` + + // Warning: An informational warning that appears when the list of + // addresses is empty. + Warning *SubnetworksScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Subnetworks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Subnetworks") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworksScopedList) MarshalJSON() ([]byte, error) { + type noMethod SubnetworksScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SubnetworksScopedListWarning: An informational warning that appears +// when the list of addresses is empty. +type SubnetworksScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SubnetworksScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworksScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod SubnetworksScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SubnetworksScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworksScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod SubnetworksScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SubnetworksSetPrivateIpGoogleAccessRequest struct { + PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "PrivateIpGoogleAccess") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PrivateIpGoogleAccess") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworksSetPrivateIpGoogleAccessRequest) MarshalJSON() ([]byte, error) { + type noMethod SubnetworksSetPrivateIpGoogleAccessRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TCPHealthCheck struct { + // Port: The TCP port number for the health check request. The default + // value is 80. Valid values are 1 through 65535. + Port int64 `json:"port,omitempty"` + + // PortName: Port name as defined in InstanceGroup#NamedPort#name. If + // both port and port_name are defined, port takes precedence. + PortName string `json:"portName,omitempty"` + + // PortSpecification: Specifies how port is selected for health + // checking, can be one of following values: + // USE_FIXED_PORT: The port number in + // port + // is used for health checking. + // USE_NAMED_PORT: The + // portName + // is used for health checking. + // USE_SERVING_PORT: For NetworkEndpointGroup, the port specified for + // each network endpoint is used for health checking. For other + // backends, the port or named port specified in the Backend Service is + // used for health checking. + // + // + // If not specified, TCP health check follows behavior specified + // in + // port + // and + // portName + // fields. + // + // Possible values: + // "USE_FIXED_PORT" + // "USE_NAMED_PORT" + // "USE_SERVING_PORT" + PortSpecification string `json:"portSpecification,omitempty"` + + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // Request: The application data to send once the TCP connection has + // been established (default value is empty). If both request and + // response are empty, the connection establishment alone will indicate + // health. The request data can only be ASCII. + Request string `json:"request,omitempty"` + + // Response: The bytes to match against the beginning of the response + // data. If left empty (the default value), any response will indicate + // health. The response data can only be ASCII. + Response string `json:"response,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Port") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Port") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TCPHealthCheck) MarshalJSON() ([]byte, error) { + type noMethod TCPHealthCheck + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Tags: A set of instance tags. +type Tags struct { + // Fingerprint: Specifies a fingerprint for this request, which is + // essentially a hash of the metadata's contents and used for optimistic + // locking. The fingerprint is initially generated by Compute Engine and + // changes after every request to modify or update metadata. You must + // always provide an up-to-date fingerprint hash in order to update or + // change metadata. + // + // To see the latest fingerprint, make get() request to the instance. + Fingerprint string `json:"fingerprint,omitempty"` + + // Items: An array of tags. Each tag must be 1-63 characters long, and + // comply with RFC1035. + Items []string `json:"items,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Fingerprint") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Tags) MarshalJSON() ([]byte, error) { + type noMethod Tags + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpProxy: A TargetHttpProxy resource. This resource defines an +// HTTP proxy. +type TargetHttpProxy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#targetHttpProxy + // for target HTTP proxies. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // UrlMap: URL to the UrlMap resource that defines the mapping from URL + // to the BackendService. + UrlMap string `json:"urlMap,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpProxy) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpProxy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpProxyList: A list of TargetHttpProxy resources. +type TargetHttpProxyList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetHttpProxy resources. + Items []*TargetHttpProxy `json:"items,omitempty"` + + // Kind: Type of resource. Always compute#targetHttpProxyList for lists + // of target HTTP proxies. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetHttpProxyListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpProxyList) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpProxyList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpProxyListWarning: [Output Only] Informational warning +// message. +type TargetHttpProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetHttpProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpProxyListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpProxyListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetHttpProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpProxyListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpProxyListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetHttpsProxiesSetQuicOverrideRequest struct { + // QuicOverride: QUIC policy for the TargetHttpsProxy resource. + // + // Possible values: + // "DISABLE" + // "ENABLE" + // "NONE" + QuicOverride string `json:"quicOverride,omitempty"` + + // ForceSendFields is a list of field names (e.g. "QuicOverride") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "QuicOverride") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxiesSetQuicOverrideRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpsProxiesSetQuicOverrideRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetHttpsProxiesSetSslCertificatesRequest struct { + // SslCertificates: New set of SslCertificate resources to associate + // with this TargetHttpsProxy resource. Currently exactly one + // SslCertificate resource must be specified. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SslCertificates") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SslCertificates") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpsProxiesSetSslCertificatesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpsProxy: A TargetHttpsProxy resource. This resource defines +// an HTTPS proxy. +type TargetHttpsProxy struct { + // ClientSslPolicy: URL to ClientSslPolicy resource which controls the + // set of allowed SSL versions and ciphers. + ClientSslPolicy string `json:"clientSslPolicy,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#targetHttpsProxy + // for target HTTPS proxies. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // QuicOverride: Specifies the QUIC override policy for this + // TargetHttpsProxy resource. This determines whether the load balancer + // will attempt to negotiate QUIC with clients or not. Can specify one + // of NONE, ENABLE, or DISABLE. Specify ENABLE to always enable QUIC, + // Enables QUIC when set to ENABLE, and disables QUIC when set to + // DISABLE. If NONE is specified, uses the QUIC policy with no user + // overrides, which is equivalent to DISABLE. Not specifying this field + // is equivalent to specifying NONE. + // + // Possible values: + // "DISABLE" + // "ENABLE" + // "NONE" + QuicOverride string `json:"quicOverride,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SslCertificates: URLs to SslCertificate resources that are used to + // authenticate connections between users and the load balancer. + // Currently, exactly one SSL certificate must be specified. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // SslPolicy: URL of SslPolicy resource that will be associated with the + // TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource + // will not have any SSL policy configured. + SslPolicy string `json:"sslPolicy,omitempty"` + + // UrlMap: A fully-qualified or valid partial URL to the UrlMap resource + // that defines the mapping from URL to the BackendService. For example, + // the following are all valid URLs for specifying a URL map: + // - + // https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map + // - projects/project/global/urlMaps/url-map + // - global/urlMaps/url-map + UrlMap string `json:"urlMap,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ClientSslPolicy") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClientSslPolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxy) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpsProxy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpsProxyList: Contains a list of TargetHttpsProxy resources. +type TargetHttpsProxyList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetHttpsProxy resources. + Items []*TargetHttpsProxy `json:"items,omitempty"` + + // Kind: Type of resource. Always compute#targetHttpsProxyList for lists + // of target HTTPS proxies. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetHttpsProxyListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxyList) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpsProxyList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpsProxyListWarning: [Output Only] Informational warning +// message. +type TargetHttpsProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetHttpsProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxyListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpsProxyListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetHttpsProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxyListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpsProxyListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetInstance: A TargetInstance resource. This resource defines an +// endpoint instance that terminates traffic of certain protocols. +type TargetInstance struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Instance: A URL to the virtual machine instance that handles traffic + // for this target instance. When creating a target instance, you can + // provide the fully-qualified URL or a valid partial URL to the desired + // virtual machine. For example, the following are all valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance + // - projects/project/zones/zone/instances/instance + // - zones/zone/instances/instance + Instance string `json:"instance,omitempty"` + + // Kind: [Output Only] The type of the resource. Always + // compute#targetInstance for target instances. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // NatPolicy: NAT option controlling how IPs are NAT'ed to the instance. + // Currently only NO_NAT (default value) is supported. + // + // Possible values: + // "NO_NAT" + NatPolicy string `json:"natPolicy,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Zone: [Output Only] URL of the zone where the target instance + // resides. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstance) MarshalJSON() ([]byte, error) { + type noMethod TargetInstance + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetInstanceAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetInstance resources. + Items map[string]TargetInstancesScopedList `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetInstanceAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetInstanceAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetInstanceAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetInstanceAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetInstanceAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetInstanceList: Contains a list of TargetInstance resources. +type TargetInstanceList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetInstance resources. + Items []*TargetInstance `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetInstanceListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceList) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetInstanceListWarning: [Output Only] Informational warning +// message. +type TargetInstanceListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetInstanceListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetInstanceListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetInstancesScopedList struct { + // TargetInstances: List of target instances contained in this scope. + TargetInstances []*TargetInstance `json:"targetInstances,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *TargetInstancesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetInstances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetInstances") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstancesScopedList) MarshalJSON() ([]byte, error) { + type noMethod TargetInstancesScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetInstancesScopedListWarning: Informational warning which +// replaces the list of addresses when the list is empty. +type TargetInstancesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetInstancesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstancesScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetInstancesScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetInstancesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetInstancesScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetPool: A TargetPool resource. This resource defines a pool of +// instances, an associated HttpHealthCheck resource, and the fallback +// target pool. +type TargetPool struct { + // BackupPool: This field is applicable only when the containing target + // pool is serving a forwarding rule as the primary pool, and its + // failoverRatio field is properly set to a value between [0, + // 1]. + // + // backupPool and failoverRatio together define the fallback behavior of + // the primary target pool: if the ratio of the healthy instances in the + // primary pool is at or below failoverRatio, traffic arriving at the + // load-balanced IP will be directed to the backup pool. + // + // In case where failoverRatio and backupPool are not set, or all the + // instances in the backup pool are unhealthy, the traffic will be + // directed back to the primary pool in the "force" mode, where traffic + // will be spread to the healthy instances with the best effort, or to + // all instances when no instance is healthy. + BackupPool string `json:"backupPool,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // FailoverRatio: This field is applicable only when the containing + // target pool is serving a forwarding rule as the primary pool (i.e., + // not as a backup pool to some other target pool). The value of the + // field must be in [0, 1]. + // + // If set, backupPool must also be set. They together define the + // fallback behavior of the primary target pool: if the ratio of the + // healthy instances in the primary pool is at or below this number, + // traffic arriving at the load-balanced IP will be directed to the + // backup pool. + // + // In case where failoverRatio is not set or all the instances in the + // backup pool are unhealthy, the traffic will be directed back to the + // primary pool in the "force" mode, where traffic will be spread to the + // healthy instances with the best effort, or to all instances when no + // instance is healthy. + FailoverRatio float64 `json:"failoverRatio,omitempty"` + + // HealthChecks: The URL of the HttpHealthCheck resource. A member + // instance in this pool is considered healthy if and only if the health + // checks pass. An empty list means all member instances will be + // considered healthy at all times. Only HttpHealthChecks are supported. + // Only one health check may be specified. + HealthChecks []string `json:"healthChecks,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Instances: A list of resource URLs to the virtual machine instances + // serving this pool. They must live in zones contained in the same + // region as this pool. + Instances []string `json:"instances,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#targetPool + // for target pools. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Region: [Output Only] URL of the region where the target pool + // resides. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SessionAffinity: Sesssion affinity option, must be one of the + // following values: + // NONE: Connections from the same client IP may go to any instance in + // the pool. + // CLIENT_IP: Connections from the same client IP will go to the same + // instance in the pool while that instance remains + // healthy. + // CLIENT_IP_PROTO: Connections from the same client IP with the same IP + // protocol will go to the same instance in the pool while that instance + // remains healthy. + // + // Possible values: + // "CLIENT_IP" + // "CLIENT_IP_PORT_PROTO" + // "CLIENT_IP_PROTO" + // "GENERATED_COOKIE" + // "NONE" + SessionAffinity string `json:"sessionAffinity,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "BackupPool") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackupPool") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPool) MarshalJSON() ([]byte, error) { + type noMethod TargetPool + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *TargetPool) UnmarshalJSON(data []byte) error { + type noMethod TargetPool + var s1 struct { + FailoverRatio gensupport.JSONFloat64 `json:"failoverRatio"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.FailoverRatio = float64(s1.FailoverRatio) + return nil +} + +type TargetPoolAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetPool resources. + Items map[string]TargetPoolsScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#targetPoolAggregatedList for aggregated lists of target + // pools. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetPoolAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetPoolAggregatedListWarning: [Output Only] Informational warning +// message. +type TargetPoolAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetPoolAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolInstanceHealth struct { + HealthStatus []*HealthStatus `json:"healthStatus,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#targetPoolInstanceHealth when checking the health of an + // instance. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "HealthStatus") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HealthStatus") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolInstanceHealth) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolInstanceHealth + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetPoolList: Contains a list of TargetPool resources. +type TargetPoolList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetPool resources. + Items []*TargetPool `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#targetPoolList + // for lists of target pools. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetPoolListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolList) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetPoolListWarning: [Output Only] Informational warning message. +type TargetPoolListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetPoolListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolsAddHealthCheckRequest struct { + // HealthChecks: The HttpHealthCheck to add to the target pool. + HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthChecks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HealthChecks") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolsAddHealthCheckRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsAddHealthCheckRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolsAddInstanceRequest struct { + // Instances: A full or partial URL to an instance to add to this target + // pool. This can be a full or partial URL. For example, the following + // are valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project-id/zones/zone/instances/instance-name + // - projects/project-id/zones/zone/instances/instance-name + // - zones/zone/instances/instance-name + Instances []*InstanceReference `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolsAddInstanceRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsAddInstanceRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolsRemoveHealthCheckRequest struct { + // HealthChecks: Health check URL to be removed. This can be a full or + // valid partial URL. For example, the following are valid URLs: + // - + // https://www.googleapis.com/compute/beta/projects/project/global/httpHealthChecks/health-check + // - projects/project/global/httpHealthChecks/health-check + // - global/httpHealthChecks/health-check + HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthChecks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HealthChecks") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolsRemoveHealthCheckRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsRemoveHealthCheckRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolsRemoveInstanceRequest struct { + // Instances: URLs of the instances to be removed from target pool. + Instances []*InstanceReference `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolsRemoveInstanceRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsRemoveInstanceRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolsScopedList struct { + // TargetPools: List of target pools contained in this scope. + TargetPools []*TargetPool `json:"targetPools,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *TargetPoolsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetPools") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetPools") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolsScopedList) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetPoolsScopedListWarning: Informational warning which replaces +// the list of addresses when the list is empty. +type TargetPoolsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetPoolsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolsScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolsScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetReference struct { + Target string `json:"target,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Target") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Target") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetReference) MarshalJSON() ([]byte, error) { + type noMethod TargetReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetSslProxiesSetBackendServiceRequest struct { + // Service: The URL of the new BackendService resource for the + // targetSslProxy. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Service") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Service") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxiesSetBackendServiceRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetSslProxiesSetProxyHeaderRequest struct { + // ProxyHeader: The new type of proxy header to append before sending + // data to the backend. NONE or PROXY_V1 are allowed. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProxyHeader") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ProxyHeader") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxiesSetProxyHeaderRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetSslProxiesSetSslCertificatesRequest struct { + // SslCertificates: New set of URLs to SslCertificate resources to + // associate with this TargetSslProxy. Currently exactly one ssl + // certificate must be specified. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SslCertificates") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SslCertificates") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxiesSetSslCertificatesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetSslProxy: A TargetSslProxy resource. This resource defines an +// SSL proxy. +type TargetSslProxy struct { + // ClientSslPolicy: URL to ClientSslPolicy resource which controls the + // set of allowed SSL versions and ciphers. + ClientSslPolicy string `json:"clientSslPolicy,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always + // compute#targetSslProxy for target SSL proxies. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Service: URL to the BackendService resource. + Service string `json:"service,omitempty"` + + // SslCertificates: URLs to SslCertificate resources that are used to + // authenticate connections to Backends. Currently exactly one SSL + // certificate must be specified. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // SslPolicy: URL of SslPolicy resource that will be associated with the + // TargetSslProxy resource. If not set, the TargetSslProxy resource will + // not have any SSL policy configured. + SslPolicy string `json:"sslPolicy,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ClientSslPolicy") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClientSslPolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxy) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetSslProxyList: Contains a list of TargetSslProxy resources. +type TargetSslProxyList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetSslProxy resources. + Items []*TargetSslProxy `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetSslProxyListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxyList) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxyList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetSslProxyListWarning: [Output Only] Informational warning +// message. +type TargetSslProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetSslProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxyListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxyListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetSslProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxyListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxyListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetTcpProxiesSetBackendServiceRequest struct { + // Service: The URL of the new BackendService resource for the + // targetTcpProxy. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Service") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Service") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetTcpProxiesSetBackendServiceRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetTcpProxiesSetProxyHeaderRequest struct { + // ProxyHeader: The new type of proxy header to append before sending + // data to the backend. NONE or PROXY_V1 are allowed. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProxyHeader") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ProxyHeader") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetTcpProxiesSetProxyHeaderRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetTcpProxy: A TargetTcpProxy resource. This resource defines a +// TCP proxy. +type TargetTcpProxy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always + // compute#targetTcpProxy for target TCP proxies. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Service: URL to the BackendService resource. + Service string `json:"service,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxy) MarshalJSON() ([]byte, error) { + type noMethod TargetTcpProxy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetTcpProxyList: Contains a list of TargetTcpProxy resources. +type TargetTcpProxyList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetTcpProxy resources. + Items []*TargetTcpProxy `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetTcpProxyListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { + type noMethod TargetTcpProxyList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetTcpProxyListWarning: [Output Only] Informational warning +// message. +type TargetTcpProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetTcpProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxyListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetTcpProxyListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetTcpProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxyListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetTcpProxyListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGateway: Represents a Target VPN gateway resource. +type TargetVpnGateway struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // ForwardingRules: [Output Only] A list of URLs to the ForwardingRule + // resources. ForwardingRules are created using + // compute.forwardingRules.insert and associated to a VPN gateway. + ForwardingRules []string `json:"forwardingRules,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // LabelFingerprint: A fingerprint for the labels being applied to this + // TargetVpnGateway, which is essentially a hash of the labels set used + // for optimistic locking. The fingerprint is initially generated by + // Compute Engine and changes after every request to modify or update + // labels. You must always provide an up-to-date fingerprint hash in + // order to update or change labels. + // + // To see the latest fingerprint, make a get() request to retrieve an + // TargetVpnGateway. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels to apply to this TargetVpnGateway resource. These can + // be later modified by the setLabels method. Each label key/value must + // comply with RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: URL of the network to which this VPN gateway is attached. + // Provided by the client when the VPN gateway is created. + Network string `json:"network,omitempty"` + + // Region: [Output Only] URL of the region where the target VPN gateway + // resides. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] The status of the VPN gateway. + // + // Possible values: + // "CREATING" + // "DELETING" + // "FAILED" + // "READY" + Status string `json:"status,omitempty"` + + // Tunnels: [Output Only] A list of URLs to VpnTunnel resources. + // VpnTunnels are created using compute.vpntunnels.insert method and + // associated to a VPN gateway. + Tunnels []string `json:"tunnels,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGateway + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewayAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetVpnGateway resources. + Items map[string]TargetVpnGatewaysScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetVpnGatewayAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGatewayAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetVpnGatewayAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetVpnGatewayAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewayAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGatewayList: Contains a list of TargetVpnGateway resources. +type TargetVpnGatewayList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetVpnGateway resources. + Items []*TargetVpnGateway `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetVpnGatewayListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayList) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGatewayListWarning: [Output Only] Informational warning +// message. +type TargetVpnGatewayListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetVpnGatewayListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewayListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewaysScopedList struct { + // TargetVpnGateways: [Output Only] List of target vpn gateways + // contained in this scope. + TargetVpnGateways []*TargetVpnGateway `json:"targetVpnGateways,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of addresses when the list is empty. + Warning *TargetVpnGatewaysScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetVpnGateways") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetVpnGateways") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewaysScopedList) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewaysScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGatewaysScopedListWarning: [Output Only] Informational +// warning which replaces the list of addresses when the list is empty. +type TargetVpnGatewaysScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetVpnGatewaysScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewaysScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewaysScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewaysScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TestFailure struct { + ActualService string `json:"actualService,omitempty"` + + ExpectedService string `json:"expectedService,omitempty"` + + Host string `json:"host,omitempty"` + + Path string `json:"path,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ActualService") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ActualService") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestFailure) MarshalJSON() ([]byte, error) { + type noMethod TestFailure + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TestPermissionsRequest struct { + // Permissions: The set of permissions to check for the 'resource'. + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. + Permissions []string `json:"permissions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Permissions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestPermissionsRequest) MarshalJSON() ([]byte, error) { + type noMethod TestPermissionsRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TestPermissionsResponse struct { + // Permissions: A subset of `TestPermissionsRequest.permissions` that + // the caller is allowed. + Permissions []string `json:"permissions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Permissions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestPermissionsResponse) MarshalJSON() ([]byte, error) { + type noMethod TestPermissionsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UDPHealthCheck struct { + // Port: The UDP port number for the health check request. Valid values + // are 1 through 65535. + Port int64 `json:"port,omitempty"` + + // PortName: Port name as defined in InstanceGroup#NamedPort#name. If + // both port and port_name are defined, port takes precedence. + PortName string `json:"portName,omitempty"` + + // Request: Raw data of request to send in payload of UDP packet. It is + // an error if this is empty. The request data can only be ASCII. + Request string `json:"request,omitempty"` + + // Response: The bytes to match against the beginning of the response + // data. It is an error if this is empty. The response data can only be + // ASCII. + Response string `json:"response,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Port") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Port") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UDPHealthCheck) MarshalJSON() ([]byte, error) { + type noMethod UDPHealthCheck + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMap: A UrlMap resource. This resource defines the mapping from URL +// to the BackendService resource, based on the "longest-match" of the +// URL's host and path. +type UrlMap struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // DefaultService: The URL of the BackendService resource if none of the + // hostRules match. + DefaultService string `json:"defaultService,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a UrlMap. An up-to-date + // fingerprint must be provided in order to update the UrlMap. + Fingerprint string `json:"fingerprint,omitempty"` + + // HostRules: The list of HostRules to use against the URL. + HostRules []*HostRule `json:"hostRules,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#urlMaps for + // url maps. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PathMatchers: The list of named PathMatchers to use against the URL. + PathMatchers []*PathMatcher `json:"pathMatchers,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Tests: The list of expected URL mappings. Request to update this + // UrlMap will succeed only if all of the test cases pass. + Tests []*UrlMapTest `json:"tests,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *UrlMap) MarshalJSON() ([]byte, error) { + type noMethod UrlMap + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapList: Contains a list of UrlMap resources. +type UrlMapList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of UrlMap resources. + Items []*UrlMap `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *UrlMapListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapList) MarshalJSON() ([]byte, error) { + type noMethod UrlMapList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapListWarning: [Output Only] Informational warning message. +type UrlMapListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*UrlMapListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapListWarning) MarshalJSON() ([]byte, error) { + type noMethod UrlMapListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapListWarningData) MarshalJSON() ([]byte, error) { + type noMethod UrlMapListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapReference struct { + UrlMap string `json:"urlMap,omitempty"` + + // ForceSendFields is a list of field names (e.g. "UrlMap") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "UrlMap") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapReference) MarshalJSON() ([]byte, error) { + type noMethod UrlMapReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapTest: Message for the expected URL mappings. +type UrlMapTest struct { + // Description: Description of this test case. + Description string `json:"description,omitempty"` + + // Host: Host portion of the URL. + Host string `json:"host,omitempty"` + + // Path: Path portion of the URL. + Path string `json:"path,omitempty"` + + // Service: Expected BackendService resource the given URL should be + // mapped to. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapTest) MarshalJSON() ([]byte, error) { + type noMethod UrlMapTest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapValidationResult: Message representing the validation result +// for a UrlMap. +type UrlMapValidationResult struct { + LoadErrors []string `json:"loadErrors,omitempty"` + + // LoadSucceeded: Whether the given UrlMap can be successfully loaded. + // If false, 'loadErrors' indicates the reasons. + LoadSucceeded bool `json:"loadSucceeded,omitempty"` + + TestFailures []*TestFailure `json:"testFailures,omitempty"` + + // TestPassed: If successfully loaded, this field indicates whether the + // test passed. If false, 'testFailures's indicate the reason of + // failure. + TestPassed bool `json:"testPassed,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LoadErrors") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LoadErrors") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapValidationResult) MarshalJSON() ([]byte, error) { + type noMethod UrlMapValidationResult + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapsValidateRequest struct { + // Resource: Content of the UrlMap to be validated. + Resource *UrlMap `json:"resource,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Resource") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Resource") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsValidateRequest) MarshalJSON() ([]byte, error) { + type noMethod UrlMapsValidateRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapsValidateResponse struct { + Result *UrlMapValidationResult `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Result") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Result") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { + type noMethod UrlMapsValidateResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UsableSubnetwork: Subnetwork which the current user has +// compute.subnetworks.use permission on. +type UsableSubnetwork struct { + // Subnetwork: Subnetwork URL. + Subnetwork string `json:"subnetwork,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Subnetwork") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Subnetwork") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetwork) MarshalJSON() ([]byte, error) { + type noMethod UsableSubnetwork + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UsableSubnetworksAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: [Output] A list of usable subnetwork URLs. + Items []*UsableSubnetwork `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#usableSubnetworksAggregatedList for aggregated lists of + // usable subnetworks. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *UsableSubnetworksAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetworksAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod UsableSubnetworksAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UsableSubnetworksAggregatedListWarning: [Output Only] Informational +// warning message. +type UsableSubnetworksAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*UsableSubnetworksAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetworksAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod UsableSubnetworksAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UsableSubnetworksAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsableSubnetworksAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod UsableSubnetworksAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UsageExportLocation: The location in Cloud Storage and naming method +// of the daily usage report. Contains bucket_name and report_name +// prefix. +type UsageExportLocation struct { + // BucketName: The name of an existing bucket in Cloud Storage where the + // usage report object is stored. The Google Service Account is granted + // write access to this bucket. This can either be the bucket name by + // itself, such as example-bucket, or the bucket name with gs:// or + // https://storage.googleapis.com/ in front of it, such as + // gs://example-bucket. + BucketName string `json:"bucketName,omitempty"` + + // ReportNamePrefix: An optional prefix for the name of the usage report + // object stored in bucketName. If not supplied, defaults to usage. The + // report is stored as a CSV file named + // report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the + // usage according to Pacific Time. If you supply a prefix, it should + // conform to Cloud Storage object naming conventions. + ReportNamePrefix string `json:"reportNamePrefix,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BucketName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BucketName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsageExportLocation) MarshalJSON() ([]byte, error) { + type noMethod UsageExportLocation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnel struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // DetailedStatus: [Output Only] Detailed status message for the VPN + // tunnel. + DetailedStatus string `json:"detailedStatus,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // IkeVersion: IKE protocol version to use when establishing the VPN + // tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. + // Default version is 2. + IkeVersion int64 `json:"ikeVersion,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // LabelFingerprint: A fingerprint for the labels being applied to this + // VpnTunnel, which is essentially a hash of the labels set used for + // optimistic locking. The fingerprint is initially generated by Compute + // Engine and changes after every request to modify or update labels. + // You must always provide an up-to-date fingerprint hash in order to + // update or change labels. + // + // To see the latest fingerprint, make a get() request to retrieve a + // VpnTunnel. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels to apply to this VpnTunnel. These can be later + // modified by the setLabels method. Each label key/value pair must + // comply with RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + + // LocalTrafficSelector: Local traffic selector to use when establishing + // the VPN tunnel with peer VPN gateway. The value should be a CIDR + // formatted string, for example: 192.168.0.0/16. The ranges should be + // disjoint. Only IPv4 is supported. + LocalTrafficSelector []string `json:"localTrafficSelector,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PeerIp: IP address of the peer VPN gateway. Only IPv4 is supported. + PeerIp string `json:"peerIp,omitempty"` + + // Region: [Output Only] URL of the region where the VPN tunnel resides. + Region string `json:"region,omitempty"` + + // RemoteTrafficSelector: Remote traffic selectors to use when + // establishing the VPN tunnel with peer VPN gateway. The value should + // be a CIDR formatted string, for example: 192.168.0.0/16. The ranges + // should be disjoint. Only IPv4 is supported. + RemoteTrafficSelector []string `json:"remoteTrafficSelector,omitempty"` + + // Router: URL of router resource to be used for dynamic routing. + Router string `json:"router,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SharedSecret: Shared secret used to set the secure session between + // the Cloud VPN gateway and the peer VPN gateway. + SharedSecret string `json:"sharedSecret,omitempty"` + + // SharedSecretHash: Hash of the shared secret. + SharedSecretHash string `json:"sharedSecretHash,omitempty"` + + // Status: [Output Only] The status of the VPN tunnel. + // + // Possible values: + // "ALLOCATING_RESOURCES" + // "AUTHORIZATION_ERROR" + // "DEPROVISIONING" + // "ESTABLISHED" + // "FAILED" + // "FIRST_HANDSHAKE" + // "NEGOTIATION_FAILURE" + // "NETWORK_ERROR" + // "NO_INCOMING_PACKETS" + // "PROVISIONING" + // "REJECTED" + // "WAITING_FOR_FULL_CONFIG" + Status string `json:"status,omitempty"` + + // TargetVpnGateway: URL of the VPN gateway with which this VPN tunnel + // is associated. Provided by the client when the VPN tunnel is created. + TargetVpnGateway string `json:"targetVpnGateway,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnel) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnel + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of VpnTunnelsScopedList resources. + Items map[string]VpnTunnelsScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *VpnTunnelAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelAggregatedListWarning: [Output Only] Informational warning +// message. +type VpnTunnelAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnTunnelAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelList: Contains a list of VpnTunnel resources. +type VpnTunnelList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of VpnTunnel resources. + Items []*VpnTunnel `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *VpnTunnelListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelList) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelListWarning: [Output Only] Informational warning message. +type VpnTunnelListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnTunnelListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelListWarning) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelListWarningData) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelsScopedList struct { + // VpnTunnels: List of vpn tunnels contained in this scope. + VpnTunnels []*VpnTunnel `json:"vpnTunnels,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *VpnTunnelsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "VpnTunnels") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "VpnTunnels") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedList) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelsScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelsScopedListWarning: Informational warning which replaces the +// list of addresses when the list is empty. +type VpnTunnelsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnTunnelsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelsScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelsScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type XpnHostList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of shared VPC host project URLs. + Items []*Project `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#xpnHostList for + // lists of shared VPC hosts. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *XpnHostListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnHostList) MarshalJSON() ([]byte, error) { + type noMethod XpnHostList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// XpnHostListWarning: [Output Only] Informational warning message. +type XpnHostListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*XpnHostListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnHostListWarning) MarshalJSON() ([]byte, error) { + type noMethod XpnHostListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type XpnHostListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnHostListWarningData) MarshalJSON() ([]byte, error) { + type noMethod XpnHostListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// XpnResourceId: Service resource (a.k.a service project) ID. +type XpnResourceId struct { + // Id: The ID of the service resource. In the case of projects, this + // field matches the project ID (e.g., my-project), not the project + // number (e.g., 12345678). + Id string `json:"id,omitempty"` + + // Type: The type of the service resource. + // + // Possible values: + // "PROJECT" + // "XPN_RESOURCE_TYPE_UNSPECIFIED" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnResourceId) MarshalJSON() ([]byte, error) { + type noMethod XpnResourceId + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Zone: A Zone resource. +type Zone struct { + // AvailableCpuPlatforms: [Output Only] Available cpu/platform + // selections for the zone. + AvailableCpuPlatforms []string `json:"availableCpuPlatforms,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Deprecated: [Output Only] The deprecation status associated with this + // zone. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: [Output Only] Textual description of the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#zone for + // zones. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // Region: [Output Only] Full URL reference to the region which hosts + // the zone. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] Status of the zone, either UP or DOWN. + // + // Possible values: + // "DOWN" + // "UP" + Status string `json:"status,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "AvailableCpuPlatforms") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AvailableCpuPlatforms") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Zone) MarshalJSON() ([]byte, error) { + type noMethod Zone + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ZoneList: Contains a list of zone resources. +type ZoneList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of Zone resources. + Items []*Zone `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *ZoneListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneList) MarshalJSON() ([]byte, error) { + type noMethod ZoneList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ZoneListWarning: [Output Only] Informational warning message. +type ZoneListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ZoneListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneListWarning) MarshalJSON() ([]byte, error) { + type noMethod ZoneListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ZoneListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneListWarningData) MarshalJSON() ([]byte, error) { + type noMethod ZoneListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ZoneSetLabelsRequest struct { + // LabelFingerprint: The fingerprint of the previous set of labels for + // this resource, used to detect conflicts. The fingerprint is initially + // generated by Compute Engine and changes after every request to modify + // or update labels. You must always provide an up-to-date fingerprint + // hash in order to update or change labels. Make a get() request to the + // resource to get the latest fingerprint. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: The labels to set for this resource. + Labels map[string]string `json:"labels,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LabelFingerprint") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ZoneSetLabelsRequest) MarshalJSON() ([]byte, error) { + type noMethod ZoneSetLabelsRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "compute.acceleratorTypes.aggregatedList": + +type AcceleratorTypesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of accelerator types. +func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTypesAggregatedListCall { + c := &AcceleratorTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *AcceleratorTypesAggregatedListCall) Filter(filter string) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AcceleratorTypesAggregatedListCall) MaxResults(maxResults int64) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AcceleratorTypesAggregatedListCall) OrderBy(orderBy string) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AcceleratorTypesAggregatedListCall) PageToken(pageToken string) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AcceleratorTypesAggregatedListCall) Fields(s ...googleapi.Field) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AcceleratorTypesAggregatedListCall) IfNoneMatch(entityTag string) *AcceleratorTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AcceleratorTypesAggregatedListCall) Context(ctx context.Context) *AcceleratorTypesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AcceleratorTypesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/acceleratorTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.acceleratorTypes.aggregatedList" call. +// Exactly one of *AcceleratorTypeAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *AcceleratorTypeAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AcceleratorTypeAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of accelerator types.", + // "httpMethod": "GET", + // "id": "compute.acceleratorTypes.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/acceleratorTypes", + // "response": { + // "$ref": "AcceleratorTypeAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AcceleratorTypesAggregatedListCall) Pages(ctx context.Context, f func(*AcceleratorTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.acceleratorTypes.get": + +type AcceleratorTypesGetCall struct { + s *Service + project string + zone string + acceleratorType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified accelerator type. Get a list of available +// accelerator types by making a list() request. +func (r *AcceleratorTypesService) Get(project string, zone string, acceleratorType string) *AcceleratorTypesGetCall { + c := &AcceleratorTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.acceleratorType = acceleratorType + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AcceleratorTypesGetCall) Fields(s ...googleapi.Field) *AcceleratorTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AcceleratorTypesGetCall) IfNoneMatch(entityTag string) *AcceleratorTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AcceleratorTypesGetCall) Context(ctx context.Context) *AcceleratorTypesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AcceleratorTypesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "acceleratorType": c.acceleratorType, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.acceleratorTypes.get" call. +// Exactly one of *AcceleratorType or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AcceleratorType.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*AcceleratorType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AcceleratorType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified accelerator type. Get a list of available accelerator types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.acceleratorTypes.get", + // "parameterOrder": [ + // "project", + // "zone", + // "acceleratorType" + // ], + // "parameters": { + // "acceleratorType": { + // "description": "Name of the accelerator type to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}", + // "response": { + // "$ref": "AcceleratorType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.acceleratorTypes.list": + +type AcceleratorTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of accelerator types available to the +// specified project. +func (r *AcceleratorTypesService) List(project string, zone string) *AcceleratorTypesListCall { + c := &AcceleratorTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *AcceleratorTypesListCall) Filter(filter string) *AcceleratorTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AcceleratorTypesListCall) MaxResults(maxResults int64) *AcceleratorTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AcceleratorTypesListCall) OrderBy(orderBy string) *AcceleratorTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AcceleratorTypesListCall) PageToken(pageToken string) *AcceleratorTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AcceleratorTypesListCall) Fields(s ...googleapi.Field) *AcceleratorTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AcceleratorTypesListCall) IfNoneMatch(entityTag string) *AcceleratorTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AcceleratorTypesListCall) Context(ctx context.Context) *AcceleratorTypesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AcceleratorTypesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.acceleratorTypes.list" call. +// Exactly one of *AcceleratorTypeList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *AcceleratorTypeList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AcceleratorTypeList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of accelerator types available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.acceleratorTypes.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/acceleratorTypes", + // "response": { + // "$ref": "AcceleratorTypeList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AcceleratorTypesListCall) Pages(ctx context.Context, f func(*AcceleratorTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.addresses.aggregatedList": + +type AddressesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of addresses. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/aggregatedList +func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedListCall { + c := &AddressesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AddressesAggregatedListCall) OrderBy(orderBy string) *AddressesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesAggregatedListCall) IfNoneMatch(entityTag string) *AddressesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesAggregatedListCall) Context(ctx context.Context) *AddressesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/addresses") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.aggregatedList" call. +// Exactly one of *AddressAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *AddressAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AddressAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AddressAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of addresses.", + // "httpMethod": "GET", + // "id": "compute.addresses.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/addresses", + // "response": { + // "$ref": "AddressAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AddressesAggregatedListCall) Pages(ctx context.Context, f func(*AddressAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.addresses.delete": + +type AddressesDeleteCall struct { + s *Service + project string + region string + address string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/delete +func (r *AddressesService) Delete(project string, region string, address string) *AddressesDeleteCall { + c := &AddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AddressesDeleteCall) RequestId(requestId string) *AddressesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesDeleteCall) Context(ctx context.Context) *AddressesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "address": c.address, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified address resource.", + // "httpMethod": "DELETE", + // "id": "compute.addresses.delete", + // "parameterOrder": [ + // "project", + // "region", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{address}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.addresses.get": + +type AddressesGetCall struct { + s *Service + project string + region string + address string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/get +func (r *AddressesService) Get(project string, region string, address string) *AddressesGetCall { + c := &AddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesGetCall) IfNoneMatch(entityTag string) *AddressesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesGetCall) Context(ctx context.Context) *AddressesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "address": c.address, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.get" call. +// Exactly one of *Address or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Address.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Address{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified address resource.", + // "httpMethod": "GET", + // "id": "compute.addresses.get", + // "parameterOrder": [ + // "project", + // "region", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{address}", + // "response": { + // "$ref": "Address" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.addresses.insert": + +type AddressesInsertCall struct { + s *Service + project string + region string + address *Address + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an address resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/insert +func (r *AddressesService) Insert(project string, region string, address *Address) *AddressesInsertCall { + c := &AddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AddressesInsertCall) RequestId(requestId string) *AddressesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesInsertCall) Context(ctx context.Context) *AddressesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an address resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.addresses.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses", + // "request": { + // "$ref": "Address" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.addresses.list": + +type AddressesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of addresses contained within the specified +// region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/list +func (r *AddressesService) List(project string, region string) *AddressesListCall { + c := &AddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *AddressesListCall) Filter(filter string) *AddressesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AddressesListCall) OrderBy(orderBy string) *AddressesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesListCall) IfNoneMatch(entityTag string) *AddressesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesListCall) Context(ctx context.Context) *AddressesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.list" call. +// Exactly one of *AddressList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AddressList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AddressList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of addresses contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.addresses.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses", + // "response": { + // "$ref": "AddressList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.addresses.setLabels": + +type AddressesSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on an Address. To learn more about labels, +// read the Labeling Resources documentation. +func (r *AddressesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *AddressesSetLabelsCall { + c := &AddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AddressesSetLabelsCall) RequestId(requestId string) *AddressesSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesSetLabelsCall) Fields(s ...googleapi.Field) *AddressesSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesSetLabelsCall) Context(ctx context.Context) *AddressesSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.addresses.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.addresses.testIamPermissions": + +type AddressesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *AddressesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *AddressesTestIamPermissionsCall { + c := &AddressesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesTestIamPermissionsCall) Fields(s ...googleapi.Field) *AddressesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesTestIamPermissionsCall) Context(ctx context.Context) *AddressesTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.addresses.testIamPermissions", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.autoscalers.aggregatedList": + +type AutoscalersAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of autoscalers. +func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregatedListCall { + c := &AutoscalersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *AutoscalersAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AutoscalersAggregatedListCall) OrderBy(orderBy string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *AutoscalersAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *AutoscalersAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersAggregatedListCall) Context(ctx context.Context) *AutoscalersAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.aggregatedList" call. +// Exactly one of *AutoscalerAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *AutoscalerAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*AutoscalerAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AutoscalerAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of autoscalers.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/autoscalers", + // "response": { + // "$ref": "AutoscalerAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AutoscalersAggregatedListCall) Pages(ctx context.Context, f func(*AutoscalerAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.autoscalers.delete": + +type AutoscalersDeleteCall struct { + s *Service + project string + zone string + autoscaler string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified autoscaler. +func (r *AutoscalersService) Delete(project string, zone string, autoscaler string) *AutoscalersDeleteCall { + c := &AutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersDeleteCall) Context(ctx context.Context) *AutoscalersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "autoscaler": c.autoscaler, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified autoscaler.", + // "httpMethod": "DELETE", + // "id": "compute.autoscalers.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.get": + +type AutoscalersGetCall struct { + s *Service + project string + zone string + autoscaler string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified autoscaler resource. Get a list of +// available autoscalers by making a list() request. +func (r *AutoscalersService) Get(project string, zone string, autoscaler string) *AutoscalersGetCall { + c := &AutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersGetCall) Context(ctx context.Context) *AutoscalersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "autoscaler": c.autoscaler, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.get" call. +// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Autoscaler.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Autoscaler{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified autoscaler resource. Get a list of available autoscalers by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.get", + // "parameterOrder": [ + // "project", + // "zone", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "response": { + // "$ref": "Autoscaler" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.autoscalers.insert": + +type AutoscalersInsertCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an autoscaler in the specified project using the data +// included in the request. +func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Autoscaler) *AutoscalersInsertCall { + c := &AutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersInsertCall) RequestId(requestId string) *AutoscalersInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersInsertCall) Context(ctx context.Context) *AutoscalersInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.autoscalers.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.list": + +type AutoscalersListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of autoscalers contained within the specified +// zone. +func (r *AutoscalersService) List(project string, zone string) *AutoscalersListCall { + c := &AutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AutoscalersListCall) OrderBy(orderBy string) *AutoscalersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersListCall) Context(ctx context.Context) *AutoscalersListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.list" call. +// Exactly one of *AutoscalerList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AutoscalerList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AutoscalerList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of autoscalers contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "response": { + // "$ref": "AutoscalerList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AutoscalersListCall) Pages(ctx context.Context, f func(*AutoscalerList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.autoscalers.patch": + +type AutoscalersPatchCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an autoscaler in the specified project using the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +func (r *AutoscalersService) Patch(project string, zone string, autoscaler *Autoscaler) *AutoscalersPatchCall { + c := &AutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to patch. +func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCall { + c.urlParams_.Set("autoscaler", autoscaler) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersPatchCall) Context(ctx context.Context) *AutoscalersPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.autoscalers.patch", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to patch.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.testIamPermissions": + +type AutoscalersTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *AutoscalersService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *AutoscalersTestIamPermissionsCall { + c := &AutoscalersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *AutoscalersTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersTestIamPermissionsCall) Context(ctx context.Context) *AutoscalersTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.autoscalers.testIamPermissions", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.autoscalers.update": + +type AutoscalersUpdateCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an autoscaler in the specified project using the data +// included in the request. +func (r *AutoscalersService) Update(project string, zone string, autoscaler *Autoscaler) *AutoscalersUpdateCall { + c := &AutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to update. +func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdateCall { + c.urlParams_.Set("autoscaler", autoscaler) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersUpdateCall) RequestId(requestId string) *AutoscalersUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersUpdateCall) Context(ctx context.Context) *AutoscalersUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an autoscaler in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.autoscalers.update", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to update.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.addSignedUrlKey": + +type BackendBucketsAddSignedUrlKeyCall struct { + s *Service + project string + backendBucket string + signedurlkey *SignedUrlKey + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddSignedUrlKey: Adds the given Signed URL Key to the backend bucket. +func (r *BackendBucketsService) AddSignedUrlKey(project string, backendBucket string, signedurlkey *SignedUrlKey) *BackendBucketsAddSignedUrlKeyCall { + c := &BackendBucketsAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + c.signedurlkey = signedurlkey + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsAddSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsAddSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsAddSignedUrlKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsAddSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsAddSignedUrlKeyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsAddSignedUrlKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.addSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds the given Signed URL Key to the backend bucket.", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.addSignedUrlKey", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey", + // "request": { + // "$ref": "SignedUrlKey" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.delete": + +type BackendBucketsDeleteCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified BackendBucket resource. +func (r *BackendBucketsService) Delete(project string, backendBucket string) *BackendBucketsDeleteCall { + c := &BackendBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c } -type ZoneSetLabelsRequest struct { - // LabelFingerprint: The fingerprint of the previous set of labels for - // this resource, used to detect conflicts. The fingerprint is initially - // generated by Compute Engine and changes after every request to modify - // or update labels. You must always provide an up-to-date fingerprint - // hash in order to update or change labels. Make a get() request to the - // resource to get the latest fingerprint. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: The labels to set for this resource. - Labels map[string]string `json:"labels,omitempty"` +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsDeleteCall) Context(ctx context.Context) *BackendBucketsDeleteCall { + c.ctx_ = ctx + return c +} - // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} - // NullFields is a list of field names (e.g. "LabelFingerprint") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` +func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } -func (s *ZoneSetLabelsRequest) MarshalJSON() ([]byte, error) { - type noMethod ZoneSetLabelsRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +// Do executes the "compute.backendBuckets.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified BackendBucket resource.", + // "httpMethod": "DELETE", + // "id": "compute.backendBuckets.delete", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + } -// method id "compute.acceleratorTypes.aggregatedList": +// method id "compute.backendBuckets.deleteSignedUrlKey": -type AcceleratorTypesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BackendBucketsDeleteSignedUrlKeyCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of accelerator types. -func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTypesAggregatedListCall { - c := &AcceleratorTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteSignedUrlKey: Deletes the given Signed URL Key from the backend +// bucket. +func (r *BackendBucketsService) DeleteSignedUrlKey(project string, backendBucket string, keyName string) *BackendBucketsDeleteSignedUrlKeyCall { + c := &BackendBucketsDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.backendBucket = backendBucket + c.urlParams_.Set("keyName", keyName) return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *AcceleratorTypesAggregatedListCall) Filter(filter string) *AcceleratorTypesAggregatedListCall { - c.urlParams_.Set("filter", filter) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsDeleteSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) return c } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AcceleratorTypesAggregatedListCall) MaxResults(maxResults int64) *AcceleratorTypesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteSignedUrlKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AcceleratorTypesAggregatedListCall) OrderBy(orderBy string) *AcceleratorTypesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsDeleteSignedUrlKeyCall { + c.ctx_ = ctx return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AcceleratorTypesAggregatedListCall) PageToken(pageToken string) *AcceleratorTypesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.deleteSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the given Signed URL Key from the backend bucket.", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.deleteSignedUrlKey", + // "parameterOrder": [ + // "project", + // "backendBucket", + // "keyName" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "keyName": { + // "description": "The name of the Signed URL Key to delete.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.get": + +type BackendBucketsGetCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified BackendBucket resource. Get a list of +// available backend buckets by making a list() request. +func (r *BackendBucketsService) Get(project string, backendBucket string) *BackendBucketsGetCall { + c := &BackendBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AcceleratorTypesAggregatedListCall) Fields(s ...googleapi.Field) *AcceleratorTypesAggregatedListCall { +func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -20146,7 +34041,7 @@ func (c *AcceleratorTypesAggregatedListCall) Fields(s ...googleapi.Field) *Accel // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AcceleratorTypesAggregatedListCall) IfNoneMatch(entityTag string) *AcceleratorTypesAggregatedListCall { +func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -20154,21 +34049,21 @@ func (c *AcceleratorTypesAggregatedListCall) IfNoneMatch(entityTag string) *Acce // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AcceleratorTypesAggregatedListCall) Context(ctx context.Context) *AcceleratorTypesAggregatedListCall { +func (c *BackendBucketsGetCall) Context(ctx context.Context) *BackendBucketsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AcceleratorTypesAggregatedListCall) Header() http.Header { +func (c *BackendBucketsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -20179,24 +34074,25 @@ func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Respon } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/acceleratorTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "backendBucket": c.backendBucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.acceleratorTypes.aggregatedList" call. -// Exactly one of *AcceleratorTypeAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *AcceleratorTypeAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.backendBuckets.get" call. +// Exactly one of *BackendBucket or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *BackendBucket.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeAggregatedList, error) { +func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -20215,7 +34111,7 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AcceleratorTypeAggregatedList{ + ret := &BackendBucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -20227,34 +34123,19 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Retrieves an aggregated list of accelerator types.", + // "description": "Returns the specified BackendBucket resource. Get a list of available backend buckets by making a list() request.", // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.aggregatedList", + // "id": "compute.backendBuckets.get", // "parameterOrder": [ - // "project" + // "project", + // "backendBucket" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "backendBucket": { + // "description": "Name of the BackendBucket resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -20265,9 +34146,9 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // } // }, - // "path": "{project}/aggregated/acceleratorTypes", + // "path": "{project}/global/backendBuckets/{backendBucket}", // "response": { - // "$ref": "AcceleratorTypeAggregatedList" + // "$ref": "BackendBucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -20278,54 +34159,31 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AcceleratorTypesAggregatedListCall) Pages(ctx context.Context, f func(*AcceleratorTypeAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.acceleratorTypes.get": +// method id "compute.backendBuckets.getIamPolicy": -type AcceleratorTypesGetCall struct { - s *Service - project string - zone string - acceleratorType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BackendBucketsGetIamPolicyCall struct { + s *Service + project string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified accelerator type. Get a list of available -// accelerator types by making a list() request. -func (r *AcceleratorTypesService) Get(project string, zone string, acceleratorType string) *AcceleratorTypesGetCall { - c := &AcceleratorTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *BackendBucketsService) GetIamPolicy(project string, resource string) *BackendBucketsGetIamPolicyCall { + c := &BackendBucketsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.acceleratorType = acceleratorType + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AcceleratorTypesGetCall) Fields(s ...googleapi.Field) *AcceleratorTypesGetCall { +func (c *BackendBucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BackendBucketsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -20335,7 +34193,7 @@ func (c *AcceleratorTypesGetCall) Fields(s ...googleapi.Field) *AcceleratorTypes // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AcceleratorTypesGetCall) IfNoneMatch(entityTag string) *AcceleratorTypesGetCall { +func (c *BackendBucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BackendBucketsGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -20343,21 +34201,21 @@ func (c *AcceleratorTypesGetCall) IfNoneMatch(entityTag string) *AcceleratorType // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AcceleratorTypesGetCall) Context(ctx context.Context) *AcceleratorTypesGetCall { +func (c *BackendBucketsGetIamPolicyCall) Context(ctx context.Context) *BackendBucketsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AcceleratorTypesGetCall) Header() http.Header { +func (c *BackendBucketsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -20368,26 +34226,25 @@ func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "acceleratorType": c.acceleratorType, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.acceleratorTypes.get" call. -// Exactly one of *AcceleratorType or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AcceleratorType.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*AcceleratorType, error) { +// Do executes the "compute.backendBuckets.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BackendBucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -20406,7 +34263,7 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AcceleratorType{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -20418,22 +34275,14 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator } return ret, nil // { - // "description": "Returns the specified accelerator type. Get a list of available accelerator types by making a list() request.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.get", + // "id": "compute.backendBuckets.getIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "acceleratorType" + // "resource" // ], // "parameters": { - // "acceleratorType": { - // "description": "Name of the accelerator type to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -20441,17 +34290,17 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}", + // "path": "{project}/global/backendBuckets/{resource}/getIamPolicy", // "response": { - // "$ref": "AcceleratorType" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -20462,159 +34311,101 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator } -// method id "compute.acceleratorTypes.list": +// method id "compute.backendBuckets.insert": -type AcceleratorTypesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BackendBucketsInsertCall struct { + s *Service + project string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of accelerator types available to the -// specified project. -func (r *AcceleratorTypesService) List(project string, zone string) *AcceleratorTypesListCall { - c := &AcceleratorTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a BackendBucket resource in the specified project +// using the data included in the request. +func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBucket) *BackendBucketsInsertCall { + c := &BackendBucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *AcceleratorTypesListCall) Filter(filter string) *AcceleratorTypesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AcceleratorTypesListCall) MaxResults(maxResults int64) *AcceleratorTypesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.backendbucket = backendbucket return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AcceleratorTypesListCall) OrderBy(orderBy string) *AcceleratorTypesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AcceleratorTypesListCall) PageToken(pageToken string) *AcceleratorTypesListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsInsertCall) RequestId(requestId string) *BackendBucketsInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AcceleratorTypesListCall) Fields(s ...googleapi.Field) *AcceleratorTypesListCall { +func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AcceleratorTypesListCall) IfNoneMatch(entityTag string) *AcceleratorTypesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AcceleratorTypesListCall) Context(ctx context.Context) *AcceleratorTypesListCall { +func (c *BackendBucketsInsertCall) Context(ctx context.Context) *BackendBucketsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AcceleratorTypesListCall) Header() http.Header { +func (c *BackendBucketsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.acceleratorTypes.list" call. -// Exactly one of *AcceleratorTypeList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *AcceleratorTypeList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeList, error) { +// Do executes the "compute.backendBuckets.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -20633,7 +34424,7 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AcceleratorTypeList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -20645,37 +34436,13 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato } return ret, nil // { - // "description": "Retrieves a list of accelerator types available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.list", + // "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.insert", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -20683,51 +34450,30 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/acceleratorTypes", + // "path": "{project}/global/backendBuckets", + // "request": { + // "$ref": "BackendBucket" + // }, // "response": { - // "$ref": "AcceleratorTypeList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AcceleratorTypesListCall) Pages(ctx context.Context, f func(*AcceleratorTypeList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.addresses.aggregatedList": +// method id "compute.backendBuckets.list": -type AddressesAggregatedListCall struct { +type BackendBucketsListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -20736,10 +34482,10 @@ type AddressesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of addresses. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/aggregatedList -func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedListCall { - c := &AddressesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of BackendBucket resources available to the +// specified project. +func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { + c := &BackendBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -20770,7 +34516,7 @@ func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedLi // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall { +func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { c.urlParams_.Set("filter", filter) return c } @@ -20781,7 +34527,7 @@ func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregated // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAggregatedListCall { +func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -20798,7 +34544,7 @@ func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAgg // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *AddressesAggregatedListCall) OrderBy(orderBy string) *AddressesAggregatedListCall { +func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -20806,7 +34552,7 @@ func (c *AddressesAggregatedListCall) OrderBy(orderBy string) *AddressesAggregat // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggregatedListCall { +func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -20814,7 +34560,7 @@ func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggr // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAggregatedListCall { +func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -20824,7 +34570,7 @@ func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAgg // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AddressesAggregatedListCall) IfNoneMatch(entityTag string) *AddressesAggregatedListCall { +func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsListCall { c.ifNoneMatch_ = entityTag return c } @@ -20832,21 +34578,21 @@ func (c *AddressesAggregatedListCall) IfNoneMatch(entityTag string) *AddressesAg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesAggregatedListCall) Context(ctx context.Context) *AddressesAggregatedListCall { +func (c *BackendBucketsListCall) Context(ctx context.Context) *BackendBucketsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesAggregatedListCall) Header() http.Header { +func (c *BackendBucketsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -20857,7 +34603,7 @@ func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -20867,14 +34613,14 @@ func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.aggregatedList" call. -// Exactly one of *AddressAggregatedList or error will be non-nil. Any +// Do executes the "compute.backendBuckets.list" call. +// Exactly one of *BackendBucketList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *AddressAggregatedList.ServerResponse.Header or (if a response was +// *BackendBucketList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AddressAggregatedList, error) { +func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucketList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -20893,7 +34639,7 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AddressAggregatedList{ + ret := &BackendBucketList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -20905,9 +34651,9 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address } return ret, nil // { - // "description": "Retrieves an aggregated list of addresses.", + // "description": "Retrieves the list of BackendBucket resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.addresses.aggregatedList", + // "id": "compute.backendBuckets.list", // "parameterOrder": [ // "project" // ], @@ -20943,9 +34689,9 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address // "type": "string" // } // }, - // "path": "{project}/aggregated/addresses", + // "path": "{project}/global/backendBuckets", // "response": { - // "$ref": "AddressAggregatedList" + // "$ref": "BackendBucketList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -20959,7 +34705,7 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *AddressesAggregatedListCall) Pages(ctx context.Context, f func(*AddressAggregatedList) error) error { +func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucketList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -20977,25 +34723,26 @@ func (c *AddressesAggregatedListCall) Pages(ctx context.Context, f func(*Address } } -// method id "compute.addresses.delete": +// method id "compute.backendBuckets.patch": -type AddressesDeleteCall struct { - s *Service - project string - region string - address string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendBucketsPatchCall struct { + s *Service + project string + backendBucket string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified address resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/delete -func (r *AddressesService) Delete(project string, region string, address string) *AddressesDeleteCall { - c := &AddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified BackendBucket resource with the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +func (r *BackendBucketsService) Patch(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsPatchCall { + c := &BackendBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.address = address + c.backendBucket = backendBucket + c.backendbucket = backendbucket return c } @@ -21009,235 +34756,72 @@ func (r *AddressesService) Delete(project string, region string, address string) // same request ID, the server can check if original operation with the // same request ID was received, and if so, will ignore the second // request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *AddressesDeleteCall) RequestId(requestId string) *AddressesDeleteCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AddressesDeleteCall) Context(ctx context.Context) *AddressesDeleteCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AddressesDeleteCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "address": c.address, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.addresses.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified address resource.", - // "httpMethod": "DELETE", - // "id": "compute.addresses.delete", - // "parameterOrder": [ - // "project", - // "region", - // "address" - // ], - // "parameters": { - // "address": { - // "description": "Name of the address resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/addresses/{address}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.addresses.get": - -type AddressesGetCall struct { - s *Service - project string - region string - address string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified address resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/get -func (r *AddressesService) Get(project string, region string, address string) *AddressesGetCall { - c := &AddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.address = address +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall { +func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AddressesGetCall) IfNoneMatch(entityTag string) *AddressesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesGetCall) Context(ctx context.Context) *AddressesGetCall { +func (c *BackendBucketsPatchCall) Context(ctx context.Context) *BackendBucketsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesGetCall) Header() http.Header { +func (c *BackendBucketsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "address": c.address, + "project": c.project, + "backendBucket": c.backendBucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.get" call. -// Exactly one of *Address or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Address.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { +// Do executes the "compute.backendBuckets.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -21256,7 +34840,7 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Address{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -21268,17 +34852,16 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { } return ret, nil // { - // "description": "Returns the specified address resource.", - // "httpMethod": "GET", - // "id": "compute.addresses.get", + // "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.backendBuckets.patch", // "parameterOrder": [ // "project", - // "region", - // "address" + // "backendBucket" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to return.", + // "backendBucket": { + // "description": "Name of the BackendBucket resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -21291,73 +34874,53 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses/{address}", + // "path": "{project}/global/backendBuckets/{backendBucket}", + // "request": { + // "$ref": "BackendBucket" + // }, // "response": { - // "$ref": "Address" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.addresses.insert": +// method id "compute.backendBuckets.setIamPolicy": -type AddressesInsertCall struct { +type BackendBucketsSetIamPolicyCall struct { s *Service project string - region string - address *Address + resource string + policy *Policy urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates an address resource in the specified project using -// the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/insert -func (r *AddressesService) Insert(project string, region string, address *Address) *AddressesInsertCall { - c := &AddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *BackendBucketsService) SetIamPolicy(project string, resource string, policy *Policy) *BackendBucketsSetIamPolicyCall { + c := &BackendBucketsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.address = address - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *AddressesInsertCall) RequestId(requestId string) *AddressesInsertCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.policy = policy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall { +func (c *BackendBucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BackendBucketsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -21365,52 +34928,52 @@ func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesInsertCall) Context(ctx context.Context) *AddressesInsertCall { +func (c *BackendBucketsSetIamPolicyCall) Context(ctx context.Context) *BackendBucketsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesInsertCall) Header() http.Header { +func (c *BackendBucketsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.backendBuckets.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BackendBucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -21429,7 +34992,7 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -21441,12 +35004,12 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates an address resource in the specified project using the data included in the request.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", - // "id": "compute.addresses.insert", + // "id": "compute.backendBuckets.setIamPolicy", // "parameterOrder": [ // "project", - // "region" + // "resource" // ], // "parameters": { // "project": { @@ -21456,25 +35019,20 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses", + // "path": "{project}/global/backendBuckets/{resource}/setIamPolicy", // "request": { - // "$ref": "Address" + // "$ref": "Policy" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -21484,160 +35042,85 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.addresses.list": +// method id "compute.backendBuckets.testIamPermissions": -type AddressesListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BackendBucketsTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of addresses contained within the specified -// region. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/list -func (r *AddressesService) List(project string, region string) *AddressesListCall { - c := &AddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *BackendBucketsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *BackendBucketsTestIamPermissionsCall { + c := &BackendBucketsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *AddressesListCall) Filter(filter string) *AddressesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AddressesListCall) OrderBy(orderBy string) *AddressesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall { +func (c *BackendBucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BackendBucketsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AddressesListCall) IfNoneMatch(entityTag string) *AddressesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesListCall) Context(ctx context.Context) *AddressesListCall { +func (c *BackendBucketsTestIamPermissionsCall) Context(ctx context.Context) *BackendBucketsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesListCall) Header() http.Header { +func (c *BackendBucketsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.list" call. -// Exactly one of *AddressList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AddressList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { +// Do executes the "compute.backendBuckets.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendBucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -21656,7 +35139,7 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AddressList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -21668,37 +35151,14 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro } return ret, nil // { - // "description": "Retrieves a list of addresses contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.addresses.list", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.testIamPermissions", // "parameterOrder": [ // "project", - // "region" + // "resource" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -21706,17 +35166,20 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses", + // "path": "{project}/global/backendBuckets/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "AddressList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -21727,48 +35190,25 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.addresses.setLabels": +// method id "compute.backendBuckets.update": -type AddressesSetLabelsCall struct { - s *Service - project string - region string - resource string - regionsetlabelsrequest *RegionSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendBucketsUpdateCall struct { + s *Service + project string + backendBucket string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on an Address. To learn more about labels, -// read the Labeling Resources documentation. -func (r *AddressesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *AddressesSetLabelsCall { - c := &AddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified BackendBucket resource with the data +// included in the request. +func (r *BackendBucketsService) Update(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsUpdateCall { + c := &BackendBucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.regionsetlabelsrequest = regionsetlabelsrequest + c.backendBucket = backendBucket + c.backendbucket = backendbucket return c } @@ -21786,7 +35226,7 @@ func (r *AddressesService) SetLabels(project string, region string, resource str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AddressesSetLabelsCall) RequestId(requestId string) *AddressesSetLabelsCall { +func (c *BackendBucketsUpdateCall) RequestId(requestId string) *BackendBucketsUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -21794,7 +35234,7 @@ func (c *AddressesSetLabelsCall) RequestId(requestId string) *AddressesSetLabels // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesSetLabelsCall) Fields(s ...googleapi.Field) *AddressesSetLabelsCall { +func (c *BackendBucketsUpdateCall) Fields(s ...googleapi.Field) *BackendBucketsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -21802,53 +35242,52 @@ func (c *AddressesSetLabelsCall) Fields(s ...googleapi.Field) *AddressesSetLabel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesSetLabelsCall) Context(ctx context.Context) *AddressesSetLabelsCall { +func (c *BackendBucketsUpdateCall) Context(ctx context.Context) *BackendBucketsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesSetLabelsCall) Header() http.Header { +func (c *BackendBucketsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "backendBucket": c.backendBucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.setLabels" call. +// Do executes the "compute.backendBuckets.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -21879,26 +35318,25 @@ func (c *AddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.addresses.setLabels", + // "description": "Updates the specified BackendBucket resource with the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.backendBuckets.update", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "backendBucket" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "backendBucket": { + // "description": "Name of the BackendBucket resource to update.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "region": { - // "description": "The region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, @@ -21906,18 +35344,11 @@ func (c *AddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses/{resource}/setLabels", + // "path": "{project}/global/backendBuckets/{backendBucket}", // "request": { - // "$ref": "RegionSetLabelsRequest" + // "$ref": "BackendBucket" // }, // "response": { // "$ref": "Operation" @@ -21930,34 +35361,51 @@ func (c *AddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.addresses.testIamPermissions": +// method id "compute.backendServices.addSignedUrlKey": -type AddressesTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesAddSignedUrlKeyCall struct { + s *Service + project string + backendService string + signedurlkey *SignedUrlKey + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddSignedUrlKey: Adds the given Signed URL Key to the specified +// backend service. +func (r *BackendServicesService) AddSignedUrlKey(project string, backendService string, signedurlkey *SignedUrlKey) *BackendServicesAddSignedUrlKeyCall { + c := &BackendServicesAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.signedurlkey = signedurlkey + return c } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *AddressesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *AddressesTestIamPermissionsCall { - c := &AddressesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesAddSignedUrlKeyCall) RequestId(requestId string) *BackendServicesAddSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesTestIamPermissionsCall) Fields(s ...googleapi.Field) *AddressesTestIamPermissionsCall { +func (c *BackendServicesAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesAddSignedUrlKeyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -21965,53 +35413,52 @@ func (c *AddressesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Addresse // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesTestIamPermissionsCall) Context(ctx context.Context) *AddressesTestIamPermissionsCall { +func (c *BackendServicesAddSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesAddSignedUrlKeyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesTestIamPermissionsCall) Header() http.Header { +func (c *BackendServicesAddSignedUrlKeyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/addSignedUrlKey") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.backendServices.addSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -22030,7 +35477,7 @@ func (c *AddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -22042,56 +35489,51 @@ func (c *AddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Adds the given Signed URL Key to the specified backend service.", // "httpMethod": "POST", - // "id": "compute.addresses.testIamPermissions", + // "id": "compute.backendServices.addSignedUrlKey", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "backendService" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "backendService": { + // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses/{resource}/testIamPermissions", + // "path": "{project}/global/backendServices/{backendService}/addSignedUrlKey", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "SignedUrlKey" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.autoscalers.aggregatedList": +// method id "compute.backendServices.aggregatedList": -type AutoscalersAggregatedListCall struct { +type BackendServicesAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -22100,9 +35542,10 @@ type AutoscalersAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of autoscalers. -func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregatedListCall { - c := &AutoscalersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of all BackendService resources, +// regional and global, available to the specified project. +func (r *BackendServicesService) AggregatedList(project string) *BackendServicesAggregatedListCall { + c := &BackendServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -22133,7 +35576,7 @@ func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregat // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { +func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -22144,7 +35587,7 @@ func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggreg // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *AutoscalersAggregatedListCall { +func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *BackendServicesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -22161,7 +35604,7 @@ func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *Autoscaler // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *AutoscalersAggregatedListCall) OrderBy(orderBy string) *AutoscalersAggregatedListCall { +func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServicesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -22169,7 +35612,7 @@ func (c *AutoscalersAggregatedListCall) OrderBy(orderBy string) *AutoscalersAggr // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *AutoscalersAggregatedListCall { +func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *BackendServicesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -22177,7 +35620,7 @@ func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *Autoscalers // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *AutoscalersAggregatedListCall { +func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *BackendServicesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -22187,7 +35630,7 @@ func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *Autoscaler // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *AutoscalersAggregatedListCall { +func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *BackendServicesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -22195,21 +35638,21 @@ func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *Autoscale // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersAggregatedListCall) Context(ctx context.Context) *AutoscalersAggregatedListCall { +func (c *BackendServicesAggregatedListCall) Context(ctx context.Context) *BackendServicesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersAggregatedListCall) Header() http.Header { +func (c *BackendServicesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -22220,7 +35663,7 @@ func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -22230,14 +35673,14 @@ func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.aggregatedList" call. -// Exactly one of *AutoscalerAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *AutoscalerAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.backendServices.aggregatedList" call. +// Exactly one of *BackendServiceAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *BackendServiceAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*AutoscalerAggregatedList, error) { +func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*BackendServiceAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -22256,7 +35699,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AutoscalerAggregatedList{ + ret := &BackendServiceAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -22268,9 +35711,9 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos } return ret, nil // { - // "description": "Retrieves an aggregated list of autoscalers.", + // "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", // "httpMethod": "GET", - // "id": "compute.autoscalers.aggregatedList", + // "id": "compute.backendServices.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -22299,16 +35742,16 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", + // "description": "Name of the project scoping this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/autoscalers", + // "path": "{project}/aggregated/backendServices", // "response": { - // "$ref": "AutoscalerAggregatedList" + // "$ref": "BackendServiceAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -22322,7 +35765,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *AutoscalersAggregatedListCall) Pages(ctx context.Context, f func(*AutoscalerAggregatedList) error) error { +func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*BackendServiceAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -22340,24 +35783,23 @@ func (c *AutoscalersAggregatedListCall) Pages(ctx context.Context, f func(*Autos } } -// method id "compute.autoscalers.delete": +// method id "compute.backendServices.delete": -type AutoscalersDeleteCall struct { - s *Service - project string - zone string - autoscaler string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesDeleteCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified autoscaler. -func (r *AutoscalersService) Delete(project string, zone string, autoscaler string) *AutoscalersDeleteCall { - c := &AutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified BackendService resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/delete +func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall { + c := &BackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler + c.backendService = backendService return c } @@ -22375,7 +35817,7 @@ func (r *AutoscalersService) Delete(project string, zone string, autoscaler stri // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCall { +func (c *BackendServicesDeleteCall) RequestId(requestId string) *BackendServicesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -22383,7 +35825,7 @@ func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteCall { +func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendServicesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -22391,48 +35833,372 @@ func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersDeleteCall) Context(ctx context.Context) *AutoscalersDeleteCall { +func (c *BackendServicesDeleteCall) Context(ctx context.Context) *BackendServicesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersDeleteCall) Header() http.Header { +func (c *BackendServicesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified BackendService resource.", + // "httpMethod": "DELETE", + // "id": "compute.backendServices.delete", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.deleteSignedUrlKey": + +type BackendServicesDeleteSignedUrlKeyCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeleteSignedUrlKey: Deletes the given Signed URL Key from the +// specified backend service. +func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendService string, keyName string) *BackendServicesDeleteSignedUrlKeyCall { + c := &BackendServicesDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.urlParams_.Set("keyName", keyName) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendServicesDeleteSignedUrlKeyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesDeleteSignedUrlKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesDeleteSignedUrlKeyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesDeleteSignedUrlKeyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/deleteSignedUrlKey") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.deleteSignedUrlKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the given Signed URL Key from the specified backend service.", + // "httpMethod": "POST", + // "id": "compute.backendServices.deleteSignedUrlKey", + // "parameterOrder": [ + // "project", + // "backendService", + // "keyName" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "keyName": { + // "description": "The name of the Signed URL Key to delete.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}/deleteSignedUrlKey", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.get": + +type BackendServicesGetCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified BackendService resource. Get a list of +// available backend services by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/get +func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall { + c := &BackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesGetCall) Context(ctx context.Context) *BackendServicesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "autoscaler": c.autoscaler, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.backendServices.get" call. +// Exactly one of *BackendService or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *BackendService.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -22451,7 +36217,7 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &BackendService{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -22463,17 +36229,16 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified autoscaler.", - // "httpMethod": "DELETE", - // "id": "compute.autoscalers.delete", + // "description": "Returns the specified BackendService resource. Get a list of available backend services by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.backendServices.get", // "parameterOrder": [ // "project", - // "zone", - // "autoscaler" + // "backendService" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to delete.", + // "backendService": { + // "description": "Name of the BackendService resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -22485,121 +36250,101 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "path": "{project}/global/backendServices/{backendService}", // "response": { - // "$ref": "Operation" + // "$ref": "BackendService" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.autoscalers.get": +// method id "compute.backendServices.getHealth": -type AutoscalersGetCall struct { - s *Service - project string - zone string - autoscaler string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BackendServicesGetHealthCall struct { + s *Service + project string + backendService string + resourcegroupreference *ResourceGroupReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified autoscaler resource. Get a list of -// available autoscalers by making a list() request. -func (r *AutoscalersService) Get(project string, zone string, autoscaler string) *AutoscalersGetCall { - c := &AutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetHealth: Gets the most recent health check results for this +// BackendService. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/getHealth +func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { + c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler + c.backendService = backendService + c.resourcegroupreference = resourcegroupreference return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { +func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServicesGetHealthCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersGetCall) Context(ctx context.Context) *AutoscalersGetCall { +func (c *BackendServicesGetHealthCall) Context(ctx context.Context) *BackendServicesGetHealthCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersGetCall) Header() http.Header { +func (c *BackendServicesGetHealthCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "autoscaler": c.autoscaler, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.get" call. -// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Autoscaler.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { +// Do executes the "compute.backendServices.getHealth" call. +// Exactly one of *BackendServiceGroupHealth or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *BackendServiceGroupHealth.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -22618,7 +36363,7 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Autoscaler{ + ret := &BackendServiceGroupHealth{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -22630,40 +36375,34 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro } return ret, nil // { - // "description": "Returns the specified autoscaler resource. Get a list of available autoscalers by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.autoscalers.get", + // "description": "Gets the most recent health check results for this BackendService.", + // "httpMethod": "POST", + // "id": "compute.backendServices.getHealth", // "parameterOrder": [ // "project", - // "zone", - // "autoscaler" + // "backendService" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to return.", + // "backendService": { + // "description": "Name of the BackendService resource to which the queried instance belongs.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "path": "{project}/global/backendServices/{backendService}/getHealth", + // "request": { + // "$ref": "ResourceGroupReference" + // }, // "response": { - // "$ref": "Autoscaler" + // "$ref": "BackendServiceGroupHealth" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -22674,25 +36413,26 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro } -// method id "compute.autoscalers.insert": +// method id "compute.backendServices.insert": -type AutoscalersInsertCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesInsertCall struct { + s *Service + project string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an autoscaler in the specified project using the data -// included in the request. -func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Autoscaler) *AutoscalersInsertCall { - c := &AutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a BackendService resource in the specified project +// using the data included in the request. There are several +// restrictions and guidelines to keep in mind when creating a backend +// service. Read Restrictions and Guidelines for more information. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/insert +func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { + c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler + c.backendservice = backendservice return c } @@ -22710,7 +36450,7 @@ func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Aut // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersInsertCall) RequestId(requestId string) *AutoscalersInsertCall { +func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServicesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -22718,7 +36458,7 @@ func (c *AutoscalersInsertCall) RequestId(requestId string) *AutoscalersInsertCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertCall { +func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendServicesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -22726,52 +36466,51 @@ func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersInsertCall) Context(ctx context.Context) *AutoscalersInsertCall { +func (c *BackendServicesInsertCall) Context(ctx context.Context) *BackendServicesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersInsertCall) Header() http.Header { +func (c *BackendServicesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.insert" call. +// Do executes the "compute.backendServices.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -22802,12 +36541,11 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information.", // "httpMethod": "POST", - // "id": "compute.autoscalers.insert", + // "id": "compute.backendServices.insert", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "project": { @@ -22821,18 +36559,11 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", + // "path": "{project}/global/backendServices", // "request": { - // "$ref": "Autoscaler" + // "$ref": "BackendService" // }, // "response": { // "$ref": "Operation" @@ -22845,24 +36576,23 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.autoscalers.list": +// method id "compute.backendServices.list": -type AutoscalersListCall struct { +type BackendServicesListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of autoscalers contained within the specified -// zone. -func (r *AutoscalersService) List(project string, zone string) *AutoscalersListCall { - c := &AutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of BackendService resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/list +func (r *BackendServicesService) List(project string) *BackendServicesListCall { + c := &BackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -22892,7 +36622,7 @@ func (r *AutoscalersService) List(project string, zone string) *AutoscalersListC // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { +func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { c.urlParams_.Set("filter", filter) return c } @@ -22903,7 +36633,7 @@ func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall { +func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -22920,7 +36650,7 @@ func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *AutoscalersListCall) OrderBy(orderBy string) *AutoscalersListCall { +func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -22928,7 +36658,7 @@ func (c *AutoscalersListCall) OrderBy(orderBy string) *AutoscalersListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { +func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -22936,7 +36666,7 @@ func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall { +func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -22946,7 +36676,7 @@ func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall { +func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServicesListCall { c.ifNoneMatch_ = entityTag return c } @@ -22954,21 +36684,21 @@ func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersListCall) Context(ctx context.Context) *AutoscalersListCall { +func (c *BackendServicesListCall) Context(ctx context.Context) *BackendServicesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersListCall) Header() http.Header { +func (c *BackendServicesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -22979,25 +36709,24 @@ func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.list" call. -// Exactly one of *AutoscalerList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AutoscalerList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.backendServices.list" call. +// Exactly one of *BackendServiceList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BackendServiceList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, error) { +func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23016,7 +36745,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AutoscalerList{ + ret := &BackendServiceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -23028,12 +36757,11 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, } return ret, nil // { - // "description": "Retrieves a list of autoscalers contained within the specified zone.", + // "description": "Retrieves the list of BackendService resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.autoscalers.list", + // "id": "compute.backendServices.list", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { @@ -23059,231 +36787,17 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/autoscalers", - // "response": { - // "$ref": "AutoscalerList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AutoscalersListCall) Pages(ctx context.Context, f func(*AutoscalerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.autoscalers.patch": - -type AutoscalersPatchCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates an autoscaler in the specified project using the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. -func (r *AutoscalersService) Patch(project string, zone string, autoscaler *Autoscaler) *AutoscalersPatchCall { - c := &AutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to patch. -func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCall { - c.urlParams_.Set("autoscaler", autoscaler) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AutoscalersPatchCall) Context(ctx context.Context) *AutoscalersPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *AutoscalersPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.autoscalers.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.autoscalers.patch", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to patch.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", - // "request": { - // "$ref": "Autoscaler" - // }, + // "path": "{project}/global/backendServices", // "response": { - // "$ref": "Operation" + // "$ref": "BackendServiceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -23294,34 +36808,77 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.autoscalers.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type AutoscalersTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.backendServices.patch": + +type BackendServicesPatchCall struct { + s *Service + project string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *AutoscalersService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *AutoscalersTestIamPermissionsCall { - c := &AutoscalersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified BackendService resource with the data +// included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. This method +// supports PATCH semantics and uses the JSON merge patch format and +// processing rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/patch +func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { + c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.backendService = backendService + c.backendservice = backendservice + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *AutoscalersTestIamPermissionsCall { +func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -23329,53 +36886,52 @@ func (c *AutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *Autosc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersTestIamPermissionsCall) Context(ctx context.Context) *AutoscalersTestIamPermissionsCall { +func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServicesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersTestIamPermissionsCall) Header() http.Header { +func (c *BackendServicesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.backendServices.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23394,7 +36950,7 @@ func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -23406,79 +36962,68 @@ func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.autoscalers.testIamPermissions", + // "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.backendServices.patch", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "backendService" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "backendService": { + // "description": "Name of the BackendService resource to patch.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions", + // "path": "{project}/global/backendServices/{backendService}", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "BackendService" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.autoscalers.update": +// method id "compute.backendServices.setSecurityPolicy": -type AutoscalersUpdateCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesSetSecurityPolicyCall struct { + s *Service + project string + backendService string + securitypolicyreference *SecurityPolicyReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates an autoscaler in the specified project using the data -// included in the request. -func (r *AutoscalersService) Update(project string, zone string, autoscaler *Autoscaler) *AutoscalersUpdateCall { - c := &AutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetSecurityPolicy: Sets the security policy for the specified backend +// service. +func (r *BackendServicesService) SetSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetSecurityPolicyCall { + c := &BackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to update. -func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdateCall { - c.urlParams_.Set("autoscaler", autoscaler) + c.backendService = backendService + c.securitypolicyreference = securitypolicyreference return c } @@ -23496,7 +37041,7 @@ func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdate // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersUpdateCall) RequestId(requestId string) *AutoscalersUpdateCall { +func (c *BackendServicesSetSecurityPolicyCall) RequestId(requestId string) *BackendServicesSetSecurityPolicyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -23504,7 +37049,7 @@ func (c *AutoscalersUpdateCall) RequestId(requestId string) *AutoscalersUpdateCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateCall { +func (c *BackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetSecurityPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -23512,52 +37057,52 @@ func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersUpdateCall) Context(ctx context.Context) *AutoscalersUpdateCall { +func (c *BackendServicesSetSecurityPolicyCall) Context(ctx context.Context) *BackendServicesSetSecurityPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersUpdateCall) Header() http.Header { +func (c *BackendServicesSetSecurityPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/setSecurityPolicy") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.update" call. +// Do executes the "compute.backendServices.setSecurityPolicy" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23588,18 +37133,18 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.autoscalers.update", + // "description": "Sets the security policy for the specified backend service.", + // "httpMethod": "POST", + // "id": "compute.backendServices.setSecurityPolicy", // "parameterOrder": [ // "project", - // "zone" + // "backendService" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to update.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "backendService": { + // "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -23613,18 +37158,11 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "Name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", + // "path": "{project}/global/backendServices/{backendService}/setSecurityPolicy", // "request": { - // "$ref": "Autoscaler" + // "$ref": "SecurityPolicyReference" // }, // "response": { // "$ref": "Operation" @@ -23637,50 +37175,32 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.backendBuckets.addSignedUrlKey": +// method id "compute.backendServices.testIamPermissions": -type BackendBucketsAddSignedUrlKeyCall struct { - s *Service - project string - backendBucket string - signedurlkey *SignedUrlKey - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddSignedUrlKey: Adds the given Signed URL Key to the backend bucket. -func (r *BackendBucketsService) AddSignedUrlKey(project string, backendBucket string, signedurlkey *SignedUrlKey) *BackendBucketsAddSignedUrlKeyCall { - c := &BackendBucketsAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *BackendServicesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *BackendServicesTestIamPermissionsCall { + c := &BackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.signedurlkey = signedurlkey - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsAddSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsAddSignedUrlKeyCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsAddSignedUrlKeyCall { +func (c *BackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *BackendServicesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -23688,52 +37208,52 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *Backen // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsAddSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsAddSignedUrlKeyCall { +func (c *BackendServicesTestIamPermissionsCall) Context(ctx context.Context) *BackendServicesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsAddSignedUrlKeyCall) Header() http.Header { +func (c *BackendServicesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.addSignedUrlKey" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.backendServices.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23752,7 +37272,7 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -23764,20 +37284,14 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Adds the given Signed URL Key to the backend bucket.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.backendBuckets.addSignedUrlKey", + // "id": "compute.backendServices.testIamPermissions", // "parameterOrder": [ // "project", - // "backendBucket" + // "resource" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -23785,43 +37299,52 @@ func (c *BackendBucketsAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*O // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}/addSignedUrlKey", + // "path": "{project}/global/backendServices/{resource}/testIamPermissions", // "request": { - // "$ref": "SignedUrlKey" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendBuckets.delete": +// method id "compute.backendServices.update": -type BackendBucketsDeleteCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesUpdateCall struct { + s *Service + project string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified BackendBucket resource. -func (r *BackendBucketsService) Delete(project string, backendBucket string) *BackendBucketsDeleteCall { - c := &BackendBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified BackendService resource with the data +// included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/update +func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { + c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket + c.backendService = backendService + c.backendservice = backendservice return c } @@ -23839,7 +37362,7 @@ func (r *BackendBucketsService) Delete(project string, backendBucket string) *Ba // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDeleteCall { +func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServicesUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -23847,7 +37370,7 @@ func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDe // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteCall { +func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendServicesUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -23855,47 +37378,52 @@ func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsDeleteCall) Context(ctx context.Context) *BackendBucketsDeleteCall { +func (c *BackendServicesUpdateCall) Context(ctx context.Context) *BackendServicesUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsDeleteCall) Header() http.Header { +func (c *BackendServicesUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.delete" call. +// Do executes the "compute.backendServices.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23926,16 +37454,16 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified BackendBucket resource.", - // "httpMethod": "DELETE", - // "id": "compute.backendBuckets.delete", + // "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + // "httpMethod": "PUT", + // "id": "compute.backendServices.update", // "parameterOrder": [ // "project", - // "backendBucket" + // "backendService" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to delete.", + // "backendService": { + // "description": "Name of the BackendService resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -23954,7 +37482,10 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", + // "path": "{project}/global/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, // "response": { // "$ref": "Operation" // }, @@ -23966,50 +37497,32 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendBuckets.deleteSignedUrlKey": +// method id "compute.clientSslPolicies.testIamPermissions": -type BackendBucketsDeleteSignedUrlKeyCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ClientSslPoliciesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteSignedUrlKey: Deletes the given Signed URL Key from the backend -// bucket. -func (r *BackendBucketsService) DeleteSignedUrlKey(project string, backendBucket string, keyName string) *BackendBucketsDeleteSignedUrlKeyCall { - c := &BackendBucketsDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *ClientSslPoliciesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ClientSslPoliciesTestIamPermissionsCall { + c := &ClientSslPoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.urlParams_.Set("keyName", keyName) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendBucketsDeleteSignedUrlKeyCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteSignedUrlKeyCall { +func (c *ClientSslPoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ClientSslPoliciesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -24017,47 +37530,52 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *Bac // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendBucketsDeleteSignedUrlKeyCall { +func (c *ClientSslPoliciesTestIamPermissionsCall) Context(ctx context.Context) *ClientSslPoliciesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Header() http.Header { +func (c *ClientSslPoliciesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *ClientSslPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/clientSslPolicies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.deleteSignedUrlKey" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.clientSslPolicies.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ClientSslPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -24076,7 +37594,7 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -24088,27 +37606,14 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes the given Signed URL Key from the backend bucket.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.backendBuckets.deleteSignedUrlKey", + // "id": "compute.clientSslPolicies.testIamPermissions", // "parameterOrder": [ // "project", - // "backendBucket", - // "keyName" + // "resource" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to which the Signed URL Key should be added. The name should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "keyName": { - // "description": "The name of the Signed URL Key to delete.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -24116,49 +37621,120 @@ func (c *BackendBucketsDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}/deleteSignedUrlKey", + // "path": "{project}/global/clientSslPolicies/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendBuckets.get": +// method id "compute.diskTypes.aggregatedList": -type BackendBucketsGetCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type DiskTypesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified BackendBucket resource. Get a list of -// available backend buckets by making a list() request. -func (r *BackendBucketsService) Get(project string, backendBucket string) *BackendBucketsGetCall { - c := &BackendBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of disk types. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/aggregatedList +func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall { + c := &DiskTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *DiskTypesAggregatedListCall) OrderBy(orderBy string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetCall { +func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -24168,7 +37744,7 @@ func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGetCall { +func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -24176,21 +37752,21 @@ func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsGetCall) Context(ctx context.Context) *BackendBucketsGetCall { +func (c *DiskTypesAggregatedListCall) Context(ctx context.Context) *DiskTypesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsGetCall) Header() http.Header { +func (c *DiskTypesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -24201,25 +37777,24 @@ func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.get" call. -// Exactly one of *BackendBucket or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *BackendBucket.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.diskTypes.aggregatedList" call. +// Exactly one of *DiskTypeAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DiskTypeAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket, error) { +func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTypeAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -24238,7 +37813,7 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendBucket{ + ret := &DiskTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -24250,19 +37825,34 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket } return ret, nil // { - // "description": "Returns the specified BackendBucket resource. Get a list of available backend buckets by making a list() request.", + // "description": "Retrieves an aggregated list of disk types.", // "httpMethod": "GET", - // "id": "compute.backendBuckets.get", + // "id": "compute.diskTypes.aggregatedList", // "parameterOrder": [ - // "project", - // "backendBucket" + // "project" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -24273,9 +37863,9 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", + // "path": "{project}/aggregated/diskTypes", // "response": { - // "$ref": "BackendBucket" + // "$ref": "DiskTypeAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -24286,31 +37876,55 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket } -// method id "compute.backendBuckets.getIamPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DiskTypesAggregatedListCall) Pages(ctx context.Context, f func(*DiskTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type BackendBucketsGetIamPolicyCall struct { +// method id "compute.diskTypes.get": + +type DiskTypesGetCall struct { s *Service project string - resource string + zone string + diskType string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *BackendBucketsService) GetIamPolicy(project string, resource string) *BackendBucketsGetIamPolicyCall { - c := &BackendBucketsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified disk type. Get a list of available disk +// types by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/get +func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall { + c := &DiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource + c.zone = zone + c.diskType = diskType return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BackendBucketsGetIamPolicyCall { +func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -24320,7 +37934,7 @@ func (c *BackendBucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BackendBu // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendBucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BackendBucketsGetIamPolicyCall { +func (c *DiskTypesGetCall) IfNoneMatch(entityTag string) *DiskTypesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -24328,21 +37942,21 @@ func (c *BackendBucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BackendB // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsGetIamPolicyCall) Context(ctx context.Context) *BackendBucketsGetIamPolicyCall { +func (c *DiskTypesGetCall) Context(ctx context.Context) *DiskTypesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsGetIamPolicyCall) Header() http.Header { +func (c *DiskTypesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -24353,25 +37967,26 @@ func (c *BackendBucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "resource": c.resource, + "zone": c.zone, + "diskType": c.diskType, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// Do executes the "compute.diskTypes.get" call. +// Exactly one of *DiskType or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BackendBucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// *DiskType.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -24390,7 +38005,7 @@ func (c *BackendBucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &DiskType{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -24402,14 +38017,22 @@ func (c *BackendBucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "description": "Returns the specified disk type. Get a list of available disk types by making a list() request.", // "httpMethod": "GET", - // "id": "compute.backendBuckets.getIamPolicy", + // "id": "compute.diskTypes.get", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "diskType" // ], // "parameters": { + // "diskType": { + // "description": "Name of the disk type to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -24417,17 +38040,17 @@ func (c *BackendBucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{resource}/getIamPolicy", + // "path": "{project}/zones/{zone}/diskTypes/{diskType}", // "response": { - // "$ref": "Policy" + // "$ref": "DiskType" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -24438,101 +38061,160 @@ func (c *BackendBucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli } -// method id "compute.backendBuckets.insert": +// method id "compute.diskTypes.list": -type BackendBucketsInsertCall struct { - s *Service - project string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DiskTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a BackendBucket resource in the specified project -// using the data included in the request. -func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBucket) *BackendBucketsInsertCall { - c := &BackendBucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of disk types available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/list +func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall { + c := &DiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendbucket = backendbucket + c.zone = zone return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsInsertCall) RequestId(requestId string) *BackendBucketsInsertCall { - c.urlParams_.Set("requestId", requestId) +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *DiskTypesListCall) OrderBy(orderBy string) *DiskTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsInsertCall { +func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsInsertCall) Context(ctx context.Context) *BackendBucketsInsertCall { +func (c *DiskTypesListCall) Context(ctx context.Context) *DiskTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsInsertCall) Header() http.Header { +func (c *DiskTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.diskTypes.list" call. +// Exactly one of *DiskTypeList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *DiskTypeList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -24551,7 +38233,7 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &DiskTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -24563,13 +38245,37 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.backendBuckets.insert", + // "description": "Retrieves a list of disk types available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.diskTypes.list", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -24577,30 +38283,51 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets", - // "request": { - // "$ref": "BackendBucket" - // }, + // "path": "{project}/zones/{zone}/diskTypes", // "response": { - // "$ref": "Operation" + // "$ref": "DiskTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendBuckets.list": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.disks.aggregatedList": -type BackendBucketsListCall struct { +type DisksAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -24609,10 +38336,10 @@ type BackendBucketsListCall struct { header_ http.Header } -// List: Retrieves the list of BackendBucket resources available to the -// specified project. -func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { - c := &BackendBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of persistent disks. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/aggregatedList +func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { + c := &DisksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -24643,7 +38370,7 @@ func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { +func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -24654,7 +38381,7 @@ func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsListCall { +func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -24671,7 +38398,7 @@ func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsLis // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall { +func (c *DisksAggregatedListCall) OrderBy(orderBy string) *DisksAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -24679,7 +38406,7 @@ func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsListCall { +func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -24687,7 +38414,7 @@ func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsList // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsListCall { +func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -24697,7 +38424,7 @@ func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsLis // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsListCall { +func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -24705,21 +38432,21 @@ func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsLi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsListCall) Context(ctx context.Context) *BackendBucketsListCall { +func (c *DisksAggregatedListCall) Context(ctx context.Context) *DisksAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsListCall) Header() http.Header { +func (c *DisksAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -24730,7 +38457,7 @@ func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -24740,14 +38467,14 @@ func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.list" call. -// Exactly one of *BackendBucketList or error will be non-nil. Any +// Do executes the "compute.disks.aggregatedList" call. +// Exactly one of *DiskAggregatedList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *BackendBucketList.ServerResponse.Header or (if a response was +// *DiskAggregatedList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucketList, error) { +func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -24766,7 +38493,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendBucketList{ + ret := &DiskAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -24778,9 +38505,9 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke } return ret, nil // { - // "description": "Retrieves the list of BackendBucket resources available to the specified project.", + // "description": "Retrieves an aggregated list of persistent disks.", // "httpMethod": "GET", - // "id": "compute.backendBuckets.list", + // "id": "compute.disks.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -24816,9 +38543,9 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets", + // "path": "{project}/aggregated/disks", // "response": { - // "$ref": "BackendBucketList" + // "$ref": "DiskAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -24832,7 +38559,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucketList) error) error { +func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -24850,26 +38577,33 @@ func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucke } } -// method id "compute.backendBuckets.patch": +// method id "compute.disks.createSnapshot": -type BackendBucketsPatchCall struct { - s *Service - project string - backendBucket string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksCreateSnapshotCall struct { + s *Service + project string + zone string + disk string + snapshot *Snapshot + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified BackendBucket resource with the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. -func (r *BackendBucketsService) Patch(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsPatchCall { - c := &BackendBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// CreateSnapshot: Creates a snapshot of a specified persistent disk. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/createSnapshot +func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { + c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.backendbucket = backendbucket + c.zone = zone + c.disk = disk + c.snapshot = snapshot + return c +} + +// GuestFlush sets the optional parameter "guestFlush": +func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { + c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) return c } @@ -24887,7 +38621,7 @@ func (r *BackendBucketsService) Patch(project string, backendBucket string, back // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPatchCall { +func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapshotCall { c.urlParams_.Set("requestId", requestId) return c } @@ -24895,7 +38629,7 @@ func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPat // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPatchCall { +func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -24903,52 +38637,53 @@ func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsPatchCall) Context(ctx context.Context) *BackendBucketsPatchCall { +func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnapshotCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsPatchCall) Header() http.Header { +func (c *DisksCreateSnapshotCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.patch" call. +// Do executes the "compute.disks.createSnapshot" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -24979,21 +38714,26 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.backendBuckets.patch", + // "description": "Creates a snapshot of a specified persistent disk.", + // "httpMethod": "POST", + // "id": "compute.disks.createSnapshot", // "parameterOrder": [ // "project", - // "backendBucket" + // "zone", + // "disk" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to patch.", + // "disk": { + // "description": "Name of the persistent disk to snapshot.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, + // "guestFlush": { + // "location": "query", + // "type": "boolean" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -25005,103 +38745,457 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", + // "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", // "request": { - // "$ref": "BackendBucket" + // "$ref": "Snapshot" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.backendBuckets.setIamPolicy": +// method id "compute.disks.delete": -type BackendBucketsSetIamPolicyCall struct { +type DisksDeleteCall struct { s *Service project string - resource string - policy *Policy + zone string + disk string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *BackendBucketsService) SetIamPolicy(project string, resource string, policy *Policy) *BackendBucketsSetIamPolicyCall { - c := &BackendBucketsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified persistent disk. Deleting a disk +// removes its data permanently and is irreversible. However, deleting a +// disk does not delete any snapshots previously made from the disk. You +// must separately delete snapshots. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/delete +func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall { + c := &DisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksDeleteCall) Context(ctx context.Context) *DisksDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", + // "httpMethod": "DELETE", + // "id": "compute.disks.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "Name of the persistent disk to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.get": + +type DisksGetCall struct { + s *Service + project string + zone string + disk string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns a specified persistent disk. Get a list of available +// persistent disks by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/get +func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall { + c := &DisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksGetCall) Context(ctx context.Context) *DisksGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.get" call. +// Exactly one of *Disk or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Disk.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Disk{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns a specified persistent disk. Get a list of available persistent disks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.disks.get", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "Name of the persistent disk to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}", + // "response": { + // "$ref": "Disk" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.disks.getIamPolicy": + +type DisksGetIamPolicyCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *DisksService) GetIamPolicy(project string, zone string, resource string) *DisksGetIamPolicyCall { + c := &DisksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone c.resource = resource - c.policy = policy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BackendBucketsSetIamPolicyCall { +func (c *DisksGetIamPolicyCall) Fields(s ...googleapi.Field) *DisksGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksGetIamPolicyCall) IfNoneMatch(entityTag string) *DisksGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsSetIamPolicyCall) Context(ctx context.Context) *BackendBucketsSetIamPolicyCall { +func (c *DisksGetIamPolicyCall) Context(ctx context.Context) *DisksGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsSetIamPolicyCall) Header() http.Header { +func (c *DisksGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.setIamPolicy" call. +// Do executes the "compute.disks.getIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *BackendBucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -25132,11 +39226,12 @@ func (c *BackendBucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "httpMethod": "POST", - // "id": "compute.backendBuckets.setIamPolicy", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.disks.getIamPolicy", // "parameterOrder": [ // "project", + // "zone", // "resource" // ], // "parameters": { @@ -25150,52 +39245,87 @@ func (c *BackendBucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{resource}/setIamPolicy", - // "request": { - // "$ref": "Policy" - // }, + // "path": "{project}/zones/{zone}/disks/{resource}/getIamPolicy", // "response": { // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendBuckets.testIamPermissions": +// method id "compute.disks.insert": -type BackendBucketsTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksInsertCall struct { + s *Service + project string + zone string + disk *Disk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *BackendBucketsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *BackendBucketsTestIamPermissionsCall { - c := &BackendBucketsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a persistent disk in the specified project using the +// data in the request. You can create a disk with a sourceImage, a +// sourceSnapshot, or create an empty 500 GB data disk by omitting all +// properties. You can also create a disk that is larger than the +// default size by specifying the sizeGb property. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/insert +func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { + c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.disk = disk + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksInsertCall) RequestId(requestId string) *DisksInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// SourceImage sets the optional parameter "sourceImage": Source image +// to restore onto a disk. +func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall { + c.urlParams_.Set("sourceImage", sourceImage) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BackendBucketsTestIamPermissionsCall { +func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -25203,52 +39333,52 @@ func (c *BackendBucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *Bac // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsTestIamPermissionsCall) Context(ctx context.Context) *BackendBucketsTestIamPermissionsCall { +func (c *DisksInsertCall) Context(ctx context.Context) *DisksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsTestIamPermissionsCall) Header() http.Header { +func (c *DisksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendBucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.disks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -25267,7 +39397,7 @@ func (c *BackendBucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -25279,12 +39409,12 @@ func (c *BackendBucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", // "httpMethod": "POST", - // "id": "compute.backendBuckets.testIamPermissions", + // "id": "compute.disks.insert", // "parameterOrder": [ // "project", - // "resource" + // "zone" // ], // "parameters": { // "project": { @@ -25294,128 +39424,193 @@ func (c *BackendBucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sourceImage": { + // "description": "Optional. Source image to restore onto a disk.", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/disks", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "Disk" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.backendBuckets.update": +// method id "compute.disks.list": -type BackendBucketsUpdateCall struct { - s *Service - project string - backendBucket string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified BackendBucket resource with the data -// included in the request. -func (r *BackendBucketsService) Update(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsUpdateCall { - c := &BackendBucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of persistent disks contained within the +// specified zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/list +func (r *DisksService) List(project string, zone string) *DisksListCall { + c := &DisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.backendbucket = backendbucket + c.zone = zone return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsUpdateCall) RequestId(requestId string) *BackendBucketsUpdateCall { - c.urlParams_.Set("requestId", requestId) +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *DisksListCall) Filter(filter string) *DisksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *DisksListCall) OrderBy(orderBy string) *DisksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsUpdateCall) Fields(s ...googleapi.Field) *BackendBucketsUpdateCall { +func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsUpdateCall) Context(ctx context.Context) *BackendBucketsUpdateCall { +func (c *DisksListCall) Context(ctx context.Context) *DisksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsUpdateCall) Header() http.Header { +func (c *DisksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.disks.list" call. +// Exactly one of *DiskList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -25434,7 +39629,7 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &DiskList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -25446,19 +39641,35 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Updates the specified BackendBucket resource with the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.backendBuckets.update", + // "description": "Retrieves a list of persistent disks contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.disks.list", // "parameterOrder": [ // "project", - // "backendBucket" + // "zone" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -25468,46 +39679,68 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", - // "request": { - // "$ref": "BackendBucket" - // }, + // "path": "{project}/zones/{zone}/disks", // "response": { - // "$ref": "Operation" + // "$ref": "DiskList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendServices.addSignedUrlKey": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type BackendServicesAddSignedUrlKeyCall struct { - s *Service - project string - backendService string - signedurlkey *SignedUrlKey - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.disks.resize": + +type DisksResizeCall struct { + s *Service + project string + zone string + disk string + disksresizerequest *DisksResizeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddSignedUrlKey: Adds the given Signed URL Key to the specified -// backend service. -func (r *BackendServicesService) AddSignedUrlKey(project string, backendService string, signedurlkey *SignedUrlKey) *BackendServicesAddSignedUrlKeyCall { - c := &BackendServicesAddSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Resizes the specified persistent disk. +func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { + c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.signedurlkey = signedurlkey + c.zone = zone + c.disk = disk + c.disksresizerequest = disksresizerequest return c } @@ -25525,7 +39758,7 @@ func (r *BackendServicesService) AddSignedUrlKey(project string, backendService // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesAddSignedUrlKeyCall) RequestId(requestId string) *BackendServicesAddSignedUrlKeyCall { +func (c *DisksResizeCall) RequestId(requestId string) *DisksResizeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -25533,7 +39766,7 @@ func (c *BackendServicesAddSignedUrlKeyCall) RequestId(requestId string) *Backen // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesAddSignedUrlKeyCall { +func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -25541,52 +39774,53 @@ func (c *BackendServicesAddSignedUrlKeyCall) Fields(s ...googleapi.Field) *Backe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesAddSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesAddSignedUrlKeyCall { +func (c *DisksResizeCall) Context(ctx context.Context) *DisksResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesAddSignedUrlKeyCall) Header() http.Header { +func (c *DisksResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesAddSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signedurlkey) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksresizerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/addSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/resize") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.addSignedUrlKey" call. +// Do executes the "compute.disks.resize" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -25617,17 +39851,19 @@ func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Adds the given Signed URL Key to the specified backend service.", + // "description": "Resizes the specified persistent disk.", // "httpMethod": "POST", - // "id": "compute.backendServices.addSignedUrlKey", + // "id": "compute.disks.resize", // "parameterOrder": [ // "project", - // "backendService" + // "zone", + // "disk" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "disk": { + // "description": "The name of the persistent disk.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -25642,173 +39878,112 @@ func (c *BackendServicesAddSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (* // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // } - // }, - // "path": "{project}/global/backendServices/{backendService}/addSignedUrlKey", - // "request": { - // "$ref": "SignedUrlKey" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.backendServices.aggregatedList": - -type BackendServicesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves the list of all BackendService resources, -// regional and global, available to the specified project. -func (r *BackendServicesService) AggregatedList(project string) *BackendServicesAggregatedListCall { - c := &BackendServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}/resize", + // "request": { + // "$ref": "DisksResizeRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *BackendServicesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} +// method id "compute.disks.setIamPolicy": -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServicesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c +type DisksSetIamPolicyCall struct { + s *Service + project string + zone string + resource string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *BackendServicesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *DisksService) SetIamPolicy(project string, zone string, resource string, policy *Policy) *DisksSetIamPolicyCall { + c := &DisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.policy = policy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *BackendServicesAggregatedListCall { +func (c *DisksSetIamPolicyCall) Fields(s ...googleapi.Field) *DisksSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *BackendServicesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesAggregatedListCall) Context(ctx context.Context) *BackendServicesAggregatedListCall { +func (c *DisksSetIamPolicyCall) Context(ctx context.Context) *DisksSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesAggregatedListCall) Header() http.Header { +func (c *DisksSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.aggregatedList" call. -// Exactly one of *BackendServiceAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *BackendServiceAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*BackendServiceAggregatedList, error) { +// Do executes the "compute.disks.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -25827,7 +40002,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceAggregatedList{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -25839,95 +40014,73 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B } return ret, nil // { - // "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.backendServices.aggregatedList", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.disks.setIamPolicy", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Name of the project scoping this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/backendServices", + // "path": "{project}/zones/{zone}/disks/{resource}/setIamPolicy", + // "request": { + // "$ref": "Policy" + // }, // "response": { - // "$ref": "BackendServiceAggregatedList" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*BackendServiceAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.backendServices.delete": +// method id "compute.disks.setLabels": -type BackendServicesDeleteCall struct { - s *Service - project string - backendService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksSetLabelsCall struct { + s *Service + project string + zone string + resource string + zonesetlabelsrequest *ZoneSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified BackendService resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/delete -func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall { - c := &BackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on a disk. To learn more about labels, +// read the Labeling Resources documentation. +func (r *DisksService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *DisksSetLabelsCall { + c := &DisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService + c.zone = zone + c.resource = resource + c.zonesetlabelsrequest = zonesetlabelsrequest return c } @@ -25945,7 +40098,7 @@ func (r *BackendServicesService) Delete(project string, backendService string) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesDeleteCall) RequestId(requestId string) *BackendServicesDeleteCall { +func (c *DisksSetLabelsCall) RequestId(requestId string) *DisksSetLabelsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -25953,7 +40106,7 @@ func (c *BackendServicesDeleteCall) RequestId(requestId string) *BackendServices // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendServicesDeleteCall { +func (c *DisksSetLabelsCall) Fields(s ...googleapi.Field) *DisksSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -25961,47 +40114,53 @@ func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendService // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesDeleteCall) Context(ctx context.Context) *BackendServicesDeleteCall { +func (c *DisksSetLabelsCall) Context(ctx context.Context) *DisksSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesDeleteCall) Header() http.Header { +func (c *DisksSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.delete" call. +// Do executes the "compute.disks.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -26032,21 +40191,15 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified BackendService resource.", - // "httpMethod": "DELETE", - // "id": "compute.backendServices.delete", + // "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.disks.setLabels", // "parameterOrder": [ // "project", - // "backendService" + // "zone", + // "resource" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -26058,9 +40211,26 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", + // "path": "{project}/zones/{zone}/disks/{resource}/setLabels", + // "request": { + // "$ref": "ZoneSetLabelsRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -26072,50 +40242,34 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.backendServices.deleteSignedUrlKey": +// method id "compute.disks.testIamPermissions": -type BackendServicesDeleteSignedUrlKeyCall struct { - s *Service - project string - backendService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteSignedUrlKey: Deletes the given Signed URL Key from the -// specified backend service. -func (r *BackendServicesService) DeleteSignedUrlKey(project string, backendService string, keyName string) *BackendServicesDeleteSignedUrlKeyCall { - c := &BackendServicesDeleteSignedUrlKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { + c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.urlParams_.Set("keyName", keyName) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesDeleteSignedUrlKeyCall) RequestId(requestId string) *BackendServicesDeleteSignedUrlKeyCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *BackendServicesDeleteSignedUrlKeyCall { +func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -26123,47 +40277,53 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Fields(s ...googleapi.Field) *Ba // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesDeleteSignedUrlKeyCall) Context(ctx context.Context) *BackendServicesDeleteSignedUrlKeyCall { +func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesDeleteSignedUrlKeyCall) Header() http.Header { +func (c *DisksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesDeleteSignedUrlKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/deleteSignedUrlKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.deleteSignedUrlKey" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.disks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -26182,7 +40342,7 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -26194,139 +40354,144 @@ func (c *BackendServicesDeleteSignedUrlKeyCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes the given Signed URL Key from the specified backend service.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.backendServices.deleteSignedUrlKey", + // "id": "compute.disks.testIamPermissions", // "parameterOrder": [ // "project", - // "backendService", - // "keyName" + // "zone", + // "resource" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the Signed URL Key should be added. The name should conform to RFC1035.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "keyName": { - // "description": "The name of the Signed URL Key to delete.", - // "location": "query", + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}/deleteSignedUrlKey", + // "path": "{project}/zones/{zone}/disks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendServices.get": +// method id "compute.firewalls.delete": -type BackendServicesGetCall struct { - s *Service - project string - backendService string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type FirewallsDeleteCall struct { + s *Service + project string + firewall string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified BackendService resource. Get a list of -// available backend services by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/get -func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall { - c := &BackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified firewall. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/delete +func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall { + c := &FirewallsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService + c.firewall = firewall + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGetCall { +func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesGetCall) Context(ctx context.Context) *BackendServicesGetCall { +func (c *FirewallsDeleteCall) Context(ctx context.Context) *FirewallsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesGetCall) Header() http.Header { +func (c *FirewallsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + reqHeaders[k] = v } + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.get" call. -// Exactly one of *BackendService or error will be non-nil. Any non-2xx +// Do executes the "compute.firewalls.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *BackendService.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -26345,7 +40510,7 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendService{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -26357,16 +40522,16 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi } return ret, nil // { - // "description": "Returns the specified BackendService resource. Get a list of available backend services by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.backendServices.get", + // "description": "Deletes the specified firewall.", + // "httpMethod": "DELETE", + // "id": "compute.firewalls.delete", // "parameterOrder": [ // "project", - // "backendService" + // "firewall" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to return.", + // "firewall": { + // "description": "Name of the firewall rule to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -26378,101 +40543,111 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", + // "path": "{project}/global/firewalls/{firewall}", // "response": { - // "$ref": "BackendService" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.backendServices.getHealth": +// method id "compute.firewalls.get": -type BackendServicesGetHealthCall struct { - s *Service - project string - backendService string - resourcegroupreference *ResourceGroupReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallsGetCall struct { + s *Service + project string + firewall string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// GetHealth: Gets the most recent health check results for this -// BackendService. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/getHealth -func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { - c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified firewall. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/get +func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall { + c := &FirewallsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.resourcegroupreference = resourcegroupreference + c.firewall = firewall return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServicesGetHealthCall { +func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesGetHealthCall) Context(ctx context.Context) *BackendServicesGetHealthCall { +func (c *FirewallsGetCall) Context(ctx context.Context) *FirewallsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesGetHealthCall) Header() http.Header { +func (c *FirewallsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.getHealth" call. -// Exactly one of *BackendServiceGroupHealth or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *BackendServiceGroupHealth.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { +// Do executes the "compute.firewalls.get" call. +// Exactly one of *Firewall or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Firewall.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -26491,7 +40666,7 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceGroupHealth{ + ret := &Firewall{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -26503,34 +40678,32 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen } return ret, nil // { - // "description": "Gets the most recent health check results for this BackendService.", - // "httpMethod": "POST", - // "id": "compute.backendServices.getHealth", + // "description": "Returns the specified firewall.", + // "httpMethod": "GET", + // "id": "compute.firewalls.get", // "parameterOrder": [ // "project", - // "backendService" + // "firewall" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the queried instance belongs.", + // "firewall": { + // "description": "Name of the firewall rule to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "project": { + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}/getHealth", - // "request": { - // "$ref": "ResourceGroupReference" - // }, + // "path": "{project}/global/firewalls/{firewall}", // "response": { - // "$ref": "BackendServiceGroupHealth" + // "$ref": "Firewall" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -26541,26 +40714,24 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen } -// method id "compute.backendServices.insert": +// method id "compute.firewalls.insert": -type BackendServicesInsertCall struct { - s *Service - project string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallsInsertCall struct { + s *Service + project string + firewall *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a BackendService resource in the specified project -// using the data included in the request. There are several -// restrictions and guidelines to keep in mind when creating a backend -// service. Read Restrictions and Guidelines for more information. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/insert -func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { - c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a firewall rule in the specified project using the +// data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/insert +func (r *FirewallsService) Insert(project string, firewall *Firewall) *FirewallsInsertCall { + c := &FirewallsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendservice = backendservice + c.firewall = firewall return c } @@ -26578,7 +40749,7 @@ func (r *BackendServicesService) Insert(project string, backendservice *BackendS // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServicesInsertCall { +func (c *FirewallsInsertCall) RequestId(requestId string) *FirewallsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -26586,7 +40757,7 @@ func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServices // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendServicesInsertCall { +func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -26594,34 +40765,34 @@ func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendService // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesInsertCall) Context(ctx context.Context) *BackendServicesInsertCall { +func (c *FirewallsInsertCall) Context(ctx context.Context) *FirewallsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesInsertCall) Header() http.Header { +func (c *FirewallsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -26631,14 +40802,14 @@ func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.insert" call. +// Do executes the "compute.firewalls.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -26669,9 +40840,9 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information.", + // "description": "Creates a firewall rule in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.backendServices.insert", + // "id": "compute.firewalls.insert", // "parameterOrder": [ // "project" // ], @@ -26689,9 +40860,9 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/global/backendServices", + // "path": "{project}/global/firewalls", // "request": { - // "$ref": "BackendService" + // "$ref": "Firewall" // }, // "response": { // "$ref": "Operation" @@ -26704,9 +40875,9 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.backendServices.list": +// method id "compute.firewalls.list": -type BackendServicesListCall struct { +type FirewallsListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -26715,11 +40886,11 @@ type BackendServicesListCall struct { header_ http.Header } -// List: Retrieves the list of BackendService resources available to the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/list -func (r *BackendServicesService) List(project string) *BackendServicesListCall { - c := &BackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of firewall rules available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/list +func (r *FirewallsService) List(project string) *FirewallsListCall { + c := &FirewallsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -26750,7 +40921,7 @@ func (r *BackendServicesService) List(project string) *BackendServicesListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { +func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { c.urlParams_.Set("filter", filter) return c } @@ -26761,7 +40932,7 @@ func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesListCall { +func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -26778,7 +40949,7 @@ func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesL // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCall { +func (c *FirewallsListCall) OrderBy(orderBy string) *FirewallsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -26786,7 +40957,7 @@ func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesListCall { +func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -26794,7 +40965,7 @@ func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesListCall { +func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -26804,7 +40975,7 @@ func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServicesListCall { +func (c *FirewallsListCall) IfNoneMatch(entityTag string) *FirewallsListCall { c.ifNoneMatch_ = entityTag return c } @@ -26812,21 +40983,21 @@ func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServices // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesListCall) Context(ctx context.Context) *BackendServicesListCall { +func (c *FirewallsListCall) Context(ctx context.Context) *FirewallsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesListCall) Header() http.Header { +func (c *FirewallsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -26837,7 +41008,7 @@ func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -26847,14 +41018,14 @@ func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.list" call. -// Exactly one of *BackendServiceList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BackendServiceList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { +// Do executes the "compute.firewalls.list" call. +// Exactly one of *FirewallList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *FirewallList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -26873,7 +41044,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceList{ + ret := &FirewallList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -26885,9 +41056,9 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ } return ret, nil // { - // "description": "Retrieves the list of BackendService resources available to the specified project.", + // "description": "Retrieves the list of firewall rules available to the specified project.", // "httpMethod": "GET", - // "id": "compute.backendServices.list", + // "id": "compute.firewalls.list", // "parameterOrder": [ // "project" // ], @@ -26923,9 +41094,9 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // "type": "string" // } // }, - // "path": "{project}/global/backendServices", + // "path": "{project}/global/firewalls", // "response": { - // "$ref": "BackendServiceList" + // "$ref": "FirewallList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -26939,7 +41110,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { +func (c *FirewallsListCall) Pages(ctx context.Context, f func(*FirewallList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -26957,30 +41128,27 @@ func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServ } } -// method id "compute.backendServices.patch": +// method id "compute.firewalls.patch": -type BackendServicesPatchCall struct { - s *Service - project string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallsPatchCall struct { + s *Service + project string + firewall string + firewall2 *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified BackendService resource with the data -// included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. This method -// supports PATCH semantics and uses the JSON merge patch format and -// processing rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/patch -func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { - c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified firewall rule with the data included in +// the request. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/patch +func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall { + c := &FirewallsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.backendservice = backendservice + c.firewall = firewall + c.firewall2 = firewall2 return c } @@ -26998,7 +41166,7 @@ func (r *BackendServicesService) Patch(project string, backendService string, ba // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesPatchCall { +func (c *FirewallsPatchCall) RequestId(requestId string) *FirewallsPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -27006,7 +41174,7 @@ func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesP // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall { +func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -27014,52 +41182,52 @@ func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServices // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServicesPatchCall { +func (c *FirewallsPatchCall) Context(ctx context.Context) *FirewallsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesPatchCall) Header() http.Header { +func (c *FirewallsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.patch" call. +// Do executes the "compute.firewalls.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -27090,16 +41258,16 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", - // "id": "compute.backendServices.patch", + // "id": "compute.firewalls.patch", // "parameterOrder": [ // "project", - // "backendService" + // "firewall" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to patch.", + // "firewall": { + // "description": "Name of the firewall rule to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -27118,180 +41286,9 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", - // "request": { - // "$ref": "BackendService" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.backendServices.setSecurityPolicy": - -type BackendServicesSetSecurityPolicyCall struct { - s *Service - project string - backendService string - securitypolicyreference *SecurityPolicyReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetSecurityPolicy: Sets the security policy for the specified backend -// service. -func (r *BackendServicesService) SetSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetSecurityPolicyCall { - c := &BackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendService = backendService - c.securitypolicyreference = securitypolicyreference - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesSetSecurityPolicyCall) RequestId(requestId string) *BackendServicesSetSecurityPolicyCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetSecurityPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesSetSecurityPolicyCall) Context(ctx context.Context) *BackendServicesSetSecurityPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesSetSecurityPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/setSecurityPolicy") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.setSecurityPolicy" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the security policy for the specified backend service.", - // "httpMethod": "POST", - // "id": "compute.backendServices.setSecurityPolicy", - // "parameterOrder": [ - // "project", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/backendServices/{backendService}/setSecurityPolicy", + // "path": "{project}/global/firewalls/{firewall}", // "request": { - // "$ref": "SecurityPolicyReference" + // "$ref": "Firewall" // }, // "response": { // "$ref": "Operation" @@ -27304,9 +41301,9 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) } -// method id "compute.backendServices.testIamPermissions": +// method id "compute.firewalls.testIamPermissions": -type BackendServicesTestIamPermissionsCall struct { +type FirewallsTestIamPermissionsCall struct { s *Service project string resource string @@ -27318,8 +41315,8 @@ type BackendServicesTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *BackendServicesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *BackendServicesTestIamPermissionsCall { - c := &BackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *FirewallsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *FirewallsTestIamPermissionsCall { + c := &FirewallsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource c.testpermissionsrequest = testpermissionsrequest @@ -27329,7 +41326,7 @@ func (r *BackendServicesService) TestIamPermissions(project string, resource str // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *BackendServicesTestIamPermissionsCall { +func (c *FirewallsTestIamPermissionsCall) Fields(s ...googleapi.Field) *FirewallsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -27337,21 +41334,21 @@ func (c *BackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Ba // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesTestIamPermissionsCall) Context(ctx context.Context) *BackendServicesTestIamPermissionsCall { +func (c *FirewallsTestIamPermissionsCall) Context(ctx context.Context) *FirewallsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesTestIamPermissionsCall) Header() http.Header { +func (c *FirewallsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -27364,7 +41361,7 @@ func (c *BackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -27375,14 +41372,14 @@ func (c *BackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.testIamPermissions" call. +// Do executes the "compute.firewalls.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *FirewallsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -27415,7 +41412,7 @@ func (c *BackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.backendServices.testIamPermissions", + // "id": "compute.firewalls.testIamPermissions", // "parameterOrder": [ // "project", // "resource" @@ -27431,12 +41428,12 @@ func (c *BackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{resource}/testIamPermissions", + // "path": "{project}/global/firewalls/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -27452,28 +41449,28 @@ func (c *BackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } -// method id "compute.backendServices.update": +// method id "compute.firewalls.update": -type BackendServicesUpdateCall struct { - s *Service - project string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallsUpdateCall struct { + s *Service + project string + firewall string + firewall2 *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified BackendService resource with the data -// included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/update -func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { - c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified firewall rule with the data included in +// the request. Using PUT method, can only update following fields of +// firewall rule: allowed, description, sourceRanges, sourceTags, +// targetTags. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/update +func (r *FirewallsService) Update(project string, firewall string, firewall2 *Firewall) *FirewallsUpdateCall { + c := &FirewallsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.backendservice = backendservice + c.firewall = firewall + c.firewall2 = firewall2 return c } @@ -27491,7 +41488,7 @@ func (r *BackendServicesService) Update(project string, backendService string, b // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServicesUpdateCall { +func (c *FirewallsUpdateCall) RequestId(requestId string) *FirewallsUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -27499,7 +41496,7 @@ func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServices // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendServicesUpdateCall { +func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -27507,52 +41504,52 @@ func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendService // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesUpdateCall) Context(ctx context.Context) *BackendServicesUpdateCall { +func (c *FirewallsUpdateCall) Context(ctx context.Context) *FirewallsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesUpdateCall) Header() http.Header { +func (c *FirewallsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.update" call. +// Do executes the "compute.firewalls.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -27583,16 +41580,16 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + // "description": "Updates the specified firewall rule with the data included in the request. Using PUT method, can only update following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags.", // "httpMethod": "PUT", - // "id": "compute.backendServices.update", + // "id": "compute.firewalls.update", // "parameterOrder": [ // "project", - // "backendService" + // "firewall" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to update.", + // "firewall": { + // "description": "Name of the firewall rule to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -27611,9 +41608,9 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", + // "path": "{project}/global/firewalls/{firewall}", // "request": { - // "$ref": "BackendService" + // "$ref": "Firewall" // }, // "response": { // "$ref": "Operation" @@ -27626,157 +41623,9 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.clientSslPolicies.testIamPermissions": - -type ClientSslPoliciesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *ClientSslPoliciesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ClientSslPoliciesTestIamPermissionsCall { - c := &ClientSslPoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ClientSslPoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ClientSslPoliciesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ClientSslPoliciesTestIamPermissionsCall) Context(ctx context.Context) *ClientSslPoliciesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ClientSslPoliciesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ClientSslPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/clientSslPolicies/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.clientSslPolicies.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ClientSslPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.clientSslPolicies.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/clientSslPolicies/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.diskTypes.aggregatedList": +// method id "compute.forwardingRules.aggregatedList": -type DiskTypesAggregatedListCall struct { +type ForwardingRulesAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -27785,10 +41634,10 @@ type DiskTypesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of disk types. -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/aggregatedList -func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall { - c := &DiskTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of forwarding rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/aggregatedList +func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRulesAggregatedListCall { + c := &ForwardingRulesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -27819,7 +41668,7 @@ func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedLi // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -27830,7 +41679,7 @@ func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregated // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -27847,7 +41696,7 @@ func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAgg // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *DiskTypesAggregatedListCall) OrderBy(orderBy string) *DiskTypesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) OrderBy(orderBy string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -27855,7 +41704,7 @@ func (c *DiskTypesAggregatedListCall) OrderBy(orderBy string) *DiskTypesAggregat // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -27863,7 +41712,7 @@ func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggr // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -27873,7 +41722,7 @@ func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAgg // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) IfNoneMatch(entityTag string) *ForwardingRulesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -27881,21 +41730,21 @@ func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesAggregatedListCall) Context(ctx context.Context) *DiskTypesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) Context(ctx context.Context) *ForwardingRulesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesAggregatedListCall) Header() http.Header { +func (c *ForwardingRulesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -27906,7 +41755,7 @@ func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -27916,14 +41765,14 @@ func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.aggregatedList" call. -// Exactly one of *DiskTypeAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *DiskTypeAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.forwardingRules.aggregatedList" call. +// Exactly one of *ForwardingRuleAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ForwardingRuleAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTypeAggregatedList, error) { +func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -27942,7 +41791,7 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskTypeAggregatedList{ + ret := &ForwardingRuleAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -27954,9 +41803,9 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp } return ret, nil // { - // "description": "Retrieves an aggregated list of disk types.", + // "description": "Retrieves an aggregated list of forwarding rules.", // "httpMethod": "GET", - // "id": "compute.diskTypes.aggregatedList", + // "id": "compute.forwardingRules.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -27992,9 +41841,9 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp // "type": "string" // } // }, - // "path": "{project}/aggregated/diskTypes", + // "path": "{project}/aggregated/forwardingRules", // "response": { - // "$ref": "DiskTypeAggregatedList" + // "$ref": "ForwardingRuleAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -28008,7 +41857,7 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *DiskTypesAggregatedListCall) Pages(ctx context.Context, f func(*DiskTypeAggregatedList) error) error { +func (c *ForwardingRulesAggregatedListCall) Pages(ctx context.Context, f func(*ForwardingRuleAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -28026,34 +41875,205 @@ func (c *DiskTypesAggregatedListCall) Pages(ctx context.Context, f func(*DiskTyp } } -// method id "compute.diskTypes.get": +// method id "compute.forwardingRules.delete": -type DiskTypesGetCall struct { - s *Service - project string - zone string - diskType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ForwardingRulesDeleteCall struct { + s *Service + project string + region string + forwardingRule string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified disk type. Get a list of available disk -// types by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/get -func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall { - c := &DiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified ForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/delete +func (r *ForwardingRulesService) Delete(project string, region string, forwardingRule string) *ForwardingRulesDeleteCall { + c := &ForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.diskType = diskType + c.region = region + c.forwardingRule = forwardingRule + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ForwardingRulesDeleteCall) RequestId(requestId string) *ForwardingRulesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall { +func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRulesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ForwardingRulesDeleteCall) Context(ctx context.Context) *ForwardingRulesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ForwardingRulesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.forwardingRules.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified ForwardingRule resource.", + // "httpMethod": "DELETE", + // "id": "compute.forwardingRules.delete", + // "parameterOrder": [ + // "project", + // "region", + // "forwardingRule" + // ], + // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.forwardingRules.get": + +type ForwardingRulesGetCall struct { + s *Service + project string + region string + forwardingRule string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified ForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/get +func (r *ForwardingRulesService) Get(project string, region string, forwardingRule string) *ForwardingRulesGetCall { + c := &ForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.forwardingRule = forwardingRule + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -28063,7 +42083,7 @@ func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DiskTypesGetCall) IfNoneMatch(entityTag string) *DiskTypesGetCall { +func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -28071,21 +42091,21 @@ func (c *DiskTypesGetCall) IfNoneMatch(entityTag string) *DiskTypesGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesGetCall) Context(ctx context.Context) *DiskTypesGetCall { +func (c *ForwardingRulesGetCall) Context(ctx context.Context) *ForwardingRulesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesGetCall) Header() http.Header { +func (c *ForwardingRulesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -28096,26 +42116,26 @@ func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "diskType": c.diskType, + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.get" call. -// Exactly one of *DiskType or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskType.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { +// Do executes the "compute.forwardingRules.get" call. +// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ForwardingRule.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -28134,7 +42154,7 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskType{ + ret := &ForwardingRule{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -28146,17 +42166,17 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { } return ret, nil // { - // "description": "Returns the specified disk type. Get a list of available disk types by making a list() request.", + // "description": "Returns the specified ForwardingRule resource.", // "httpMethod": "GET", - // "id": "compute.diskTypes.get", + // "id": "compute.forwardingRules.get", // "parameterOrder": [ // "project", - // "zone", - // "diskType" + // "region", + // "forwardingRule" // ], // "parameters": { - // "diskType": { - // "description": "Name of the disk type to return.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -28169,17 +42189,17 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/diskTypes/{diskType}", + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", // "response": { - // "$ref": "DiskType" + // "$ref": "ForwardingRule" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -28190,160 +42210,105 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { } -// method id "compute.diskTypes.list": +// method id "compute.forwardingRules.insert": -type DiskTypesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ForwardingRulesInsertCall struct { + s *Service + project string + region string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of disk types available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/list -func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall { - c := &DiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a ForwardingRule resource in the specified project +// and region using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/insert +func (r *ForwardingRulesService) Insert(project string, region string, forwardingrule *ForwardingRule) *ForwardingRulesInsertCall { + c := &ForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.region = region + c.forwardingrule = forwardingrule return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DiskTypesListCall) OrderBy(orderBy string) *DiskTypesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ForwardingRulesInsertCall) RequestId(requestId string) *ForwardingRulesInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { +func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRulesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesListCall) Context(ctx context.Context) *DiskTypesListCall { +func (c *ForwardingRulesInsertCall) Context(ctx context.Context) *ForwardingRulesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesListCall) Header() http.Header { +func (c *ForwardingRulesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.list" call. -// Exactly one of *DiskTypeList or error will be non-nil. Any non-2xx +// Do executes the "compute.forwardingRules.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *DiskTypeList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, error) { +func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -28362,7 +42327,7 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskTypeList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -28374,37 +42339,14 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err } return ret, nil // { - // "description": "Retrieves a list of disk types available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.diskTypes.list", + // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.forwardingRules.insert", // "parameterOrder": [ // "project", - // "zone" + // "region" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -28412,64 +42354,53 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/diskTypes", + // "path": "{project}/regions/{region}/forwardingRules", + // "request": { + // "$ref": "ForwardingRule" + // }, // "response": { - // "$ref": "DiskTypeList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.disks.aggregatedList": +// method id "compute.forwardingRules.list": -type DisksAggregatedListCall struct { +type ForwardingRulesListCall struct { s *Service project string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of persistent disks. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/aggregatedList -func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { - c := &DisksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of ForwardingRule resources available to the +// specified project and region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/list +func (r *ForwardingRulesService) List(project string, region string) *ForwardingRulesListCall { + c := &ForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region return c } @@ -28499,7 +42430,7 @@ func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { +func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall { c.urlParams_.Set("filter", filter) return c } @@ -28510,7 +42441,7 @@ func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedListCall { +func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -28527,7 +42458,7 @@ func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedL // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *DisksAggregatedListCall) OrderBy(orderBy string) *DisksAggregatedListCall { +func (c *ForwardingRulesListCall) OrderBy(orderBy string) *ForwardingRulesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -28535,7 +42466,7 @@ func (c *DisksAggregatedListCall) OrderBy(orderBy string) *DisksAggregatedListCa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedListCall { +func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -28543,7 +42474,7 @@ func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedListCall { +func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -28553,7 +42484,7 @@ func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregatedListCall { +func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRulesListCall { c.ifNoneMatch_ = entityTag return c } @@ -28561,21 +42492,21 @@ func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregated // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksAggregatedListCall) Context(ctx context.Context) *DisksAggregatedListCall { +func (c *ForwardingRulesListCall) Context(ctx context.Context) *ForwardingRulesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksAggregatedListCall) Header() http.Header { +func (c *ForwardingRulesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -28586,24 +42517,25 @@ func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.aggregatedList" call. -// Exactly one of *DiskAggregatedList or error will be non-nil. Any +// Do executes the "compute.forwardingRules.list" call. +// Exactly one of *ForwardingRuleList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *DiskAggregatedList.ServerResponse.Header or (if a response was +// *ForwardingRuleList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggregatedList, error) { +func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -28622,7 +42554,7 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskAggregatedList{ + ret := &ForwardingRuleList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -28634,11 +42566,12 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega } return ret, nil // { - // "description": "Retrieves an aggregated list of persistent disks.", + // "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", // "httpMethod": "GET", - // "id": "compute.disks.aggregatedList", + // "id": "compute.forwardingRules.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "filter": { @@ -28670,11 +42603,18 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/disks", + // "path": "{project}/regions/{region}/forwardingRules", // "response": { - // "$ref": "DiskAggregatedList" + // "$ref": "ForwardingRuleList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -28688,7 +42628,7 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggregatedList) error) error { +func (c *ForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -28706,33 +42646,29 @@ func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggrega } } -// method id "compute.disks.createSnapshot": +// method id "compute.forwardingRules.patch": -type DisksCreateSnapshotCall struct { - s *Service - project string - zone string - disk string - snapshot *Snapshot - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ForwardingRulesPatchCall struct { + s *Service + project string + region string + forwardingRule string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// CreateSnapshot: Creates a snapshot of a specified persistent disk. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/createSnapshot -func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { - c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified forwarding rule with the data included +// in the request. This method supports PATCH semantics and uses the +// JSON merge patch format and processing rules. Currently, you can only +// patch the network_tier field. +func (r *ForwardingRulesService) Patch(project string, region string, forwardingRule string, forwardingrule *ForwardingRule) *ForwardingRulesPatchCall { + c := &ForwardingRulesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.snapshot = snapshot - return c -} - -// GuestFlush sets the optional parameter "guestFlush": -func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { - c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) + c.region = region + c.forwardingRule = forwardingRule + c.forwardingrule = forwardingrule return c } @@ -28750,7 +42686,7 @@ func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapsh // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapshotCall { +func (c *ForwardingRulesPatchCall) RequestId(requestId string) *ForwardingRulesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -28758,7 +42694,7 @@ func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapsh // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall { +func (c *ForwardingRulesPatchCall) Fields(s ...googleapi.Field) *ForwardingRulesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -28766,53 +42702,53 @@ func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnaps // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnapshotCall { +func (c *ForwardingRulesPatchCall) Context(ctx context.Context) *ForwardingRulesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksCreateSnapshotCall) Header() http.Header { +func (c *ForwardingRulesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.createSnapshot" call. +// Do executes the "compute.forwardingRules.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -28843,26 +42779,22 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a snapshot of a specified persistent disk.", - // "httpMethod": "POST", - // "id": "compute.disks.createSnapshot", + // "description": "Updates the specified forwarding rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. Currently, you can only patch the network_tier field.", + // "httpMethod": "PATCH", + // "id": "compute.forwardingRules.patch", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "region", + // "forwardingRule" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to snapshot.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "guestFlush": { - // "location": "query", - // "type": "boolean" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -28870,22 +42802,22 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", // "request": { - // "$ref": "Snapshot" + // "$ref": "ForwardingRule" // }, // "response": { // "$ref": "Operation" @@ -28898,28 +42830,27 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.disks.delete": +// method id "compute.forwardingRules.setLabels": -type DisksDeleteCall struct { - s *Service - project string - zone string - disk string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ForwardingRulesSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified persistent disk. Deleting a disk -// removes its data permanently and is irreversible. However, deleting a -// disk does not delete any snapshots previously made from the disk. You -// must separately delete snapshots. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/delete -func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall { - c := &DisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on the specified resource. To learn more +// about labels, read the Labeling Resources documentation. +func (r *ForwardingRulesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *ForwardingRulesSetLabelsCall { + c := &ForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest return c } @@ -28937,7 +42868,7 @@ func (r *DisksService) Delete(project string, zone string, disk string) *DisksDe // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { +func (c *ForwardingRulesSetLabelsCall) RequestId(requestId string) *ForwardingRulesSetLabelsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -28945,7 +42876,7 @@ func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { +func (c *ForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *ForwardingRulesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -28953,48 +42884,53 @@ func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksDeleteCall) Context(ctx context.Context) *DisksDeleteCall { +func (c *ForwardingRulesSetLabelsCall) Context(ctx context.Context) *ForwardingRulesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksDeleteCall) Header() http.Header { +func (c *ForwardingRulesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.delete" call. +// Do executes the "compute.forwardingRules.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29025,25 +42961,26 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", - // "httpMethod": "DELETE", - // "id": "compute.disks.delete", + // "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.forwardingRules.setLabels", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "region", + // "resource" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to delete.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "The region for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -29052,15 +42989,18 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}", + // "path": "{project}/regions/{region}/forwardingRules/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -29072,96 +43012,108 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } -// method id "compute.disks.get": +// method id "compute.forwardingRules.setTarget": -type DisksGetCall struct { - s *Service - project string - zone string - disk string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ForwardingRulesSetTargetCall struct { + s *Service + project string + region string + forwardingRule string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns a specified persistent disk. Get a list of available -// persistent disks by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/get -func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall { - c := &DisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTarget: Changes target URL for forwarding rule. The new target +// should be of the same type as the old target. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/setTarget +func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { + c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk + c.region = region + c.forwardingRule = forwardingRule + c.targetreference = targetreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ForwardingRulesSetTargetCall) RequestId(requestId string) *ForwardingRulesSetTargetCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { +func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingRulesSetTargetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksGetCall) Context(ctx context.Context) *DisksGetCall { +func (c *ForwardingRulesSetTargetCall) Context(ctx context.Context) *ForwardingRulesSetTargetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksGetCall) Header() http.Header { +func (c *ForwardingRulesSetTargetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.get" call. -// Exactly one of *Disk or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Disk.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { +// Do executes the "compute.forwardingRules.setTarget" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29180,7 +43132,7 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Disk{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -29192,17 +43144,17 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } return ret, nil // { - // "description": "Returns a specified persistent disk. Get a list of available persistent disks by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.disks.get", + // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", + // "httpMethod": "POST", + // "id": "compute.forwardingRules.setTarget", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "region", + // "forwardingRule" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to return.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource in which target is to be set.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -29215,116 +43167,116 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}", + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", + // "request": { + // "$ref": "TargetReference" + // }, // "response": { - // "$ref": "Disk" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.disks.getIamPolicy": +// method id "compute.forwardingRules.testIamPermissions": -type DisksGetIamPolicyCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ForwardingRulesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *DisksService) GetIamPolicy(project string, zone string, resource string) *DisksGetIamPolicyCall { - c := &DisksGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *ForwardingRulesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *ForwardingRulesTestIamPermissionsCall { + c := &ForwardingRulesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone + c.region = region c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksGetIamPolicyCall) Fields(s ...googleapi.Field) *DisksGetIamPolicyCall { +func (c *ForwardingRulesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ForwardingRulesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DisksGetIamPolicyCall) IfNoneMatch(entityTag string) *DisksGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksGetIamPolicyCall) Context(ctx context.Context) *DisksGetIamPolicyCall { +func (c *ForwardingRulesTestIamPermissionsCall) Context(ctx context.Context) *ForwardingRulesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksGetIamPolicyCall) Header() http.Header { +func (c *ForwardingRulesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "region": c.region, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.forwardingRules.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29343,7 +43295,7 @@ func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -29355,12 +43307,12 @@ func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.disks.getIamPolicy", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.forwardingRules.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", + // "region", // "resource" // ], // "parameters": { @@ -29371,24 +43323,27 @@ func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "region": { + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/getIamPolicy", + // "path": "{project}/regions/{region}/forwardingRules/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -29399,29 +43354,23 @@ func (c *DisksGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } -// method id "compute.disks.insert": +// method id "compute.globalAddresses.delete": -type DisksInsertCall struct { +type GlobalAddressesDeleteCall struct { s *Service project string - zone string - disk *Disk + address string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a persistent disk in the specified project using the -// data in the request. You can create a disk with a sourceImage, a -// sourceSnapshot, or create an empty 500 GB data disk by omitting all -// properties. You can also create a disk that is larger than the -// default size by specifying the sizeGb property. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/insert -func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { - c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/delete +func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall { + c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk + c.address = address return c } @@ -29439,22 +43388,15 @@ func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksIns // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksInsertCall) RequestId(requestId string) *DisksInsertCall { +func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddressesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } -// SourceImage sets the optional parameter "sourceImage": Source image -// to restore onto a disk. -func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall { - c.urlParams_.Set("sourceImage", sourceImage) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { +func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -29462,52 +43404,47 @@ func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksInsertCall) Context(ctx context.Context) *DisksInsertCall { +func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddressesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksInsertCall) Header() http.Header { +func (c *GlobalAddressesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "address": c.address, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.insert" call. +// Do executes the "compute.globalAddresses.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29538,14 +43475,21 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", - // "httpMethod": "POST", - // "id": "compute.disks.insert", + // "description": "Deletes the specified address resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalAddresses.delete", // "parameterOrder": [ // "project", - // "zone" + // "address" // ], // "parameters": { + // "address": { + // "description": "Name of the address resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -29557,24 +43501,9 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "sourceImage": { - // "description": "Optional. Source image to restore onto a disk.", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks", - // "request": { - // "$ref": "Disk" - // }, + // "path": "{project}/global/addresses/{address}", // "response": { // "$ref": "Operation" // }, @@ -29586,99 +43515,32 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } -// method id "compute.disks.list": +// method id "compute.globalAddresses.get": -type DisksListCall struct { +type GlobalAddressesGetCall struct { s *Service project string - zone string + address string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of persistent disks contained within the -// specified zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/list -func (r *DisksService) List(project string, zone string) *DisksListCall { - c := &DisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified address resource. Get a list of available +// addresses by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/get +func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall { + c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *DisksListCall) Filter(filter string) *DisksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DisksListCall) OrderBy(orderBy string) *DisksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { - c.urlParams_.Set("pageToken", pageToken) + c.address = address return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { +func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -29688,7 +43550,7 @@ func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { +func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -29696,21 +43558,21 @@ func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksListCall) Context(ctx context.Context) *DisksListCall { +func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksListCall) Header() http.Header { +func (c *GlobalAddressesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -29721,25 +43583,25 @@ func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "address": c.address, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.list" call. -// Exactly one of *DiskList or error will be non-nil. Any non-2xx status +// Do executes the "compute.globalAddresses.get" call. +// Exactly one of *Address or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *DiskList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { +// *Address.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29758,7 +43620,7 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskList{ + ret := &Address{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -29770,55 +43632,32 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { } return ret, nil // { - // "description": "Retrieves a list of persistent disks contained within the specified zone.", + // "description": "Returns the specified address resource. Get a list of available addresses by making a list() request.", // "httpMethod": "GET", - // "id": "compute.disks.list", + // "id": "compute.globalAddresses.get", // "parameterOrder": [ // "project", - // "zone" + // "address" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", + // "address": { + // "description": "Name of the address resource to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks", + // "path": "{project}/global/addresses/{address}", // "response": { - // "$ref": "DiskList" + // "$ref": "Address" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -29829,47 +43668,24 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.disks.resize": +// method id "compute.globalAddresses.insert": -type DisksResizeCall struct { - s *Service - project string - zone string - disk string - disksresizerequest *DisksResizeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalAddressesInsertCall struct { + s *Service + project string + address *Address + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the specified persistent disk. -func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { - c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an address resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/insert +func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall { + c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.disksresizerequest = disksresizerequest + c.address = address return c } @@ -29887,7 +43703,7 @@ func (r *DisksService) Resize(project string, zone string, disk string, disksres // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksResizeCall) RequestId(requestId string) *DisksResizeCall { +func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddressesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -29895,7 +43711,7 @@ func (c *DisksResizeCall) RequestId(requestId string) *DisksResizeCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { +func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -29903,53 +43719,51 @@ func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksResizeCall) Context(ctx context.Context) *DisksResizeCall { +func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddressesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksResizeCall) Header() http.Header { +func (c *GlobalAddressesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksresizerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.resize" call. +// Do executes the "compute.globalAddresses.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29980,22 +43794,13 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Resizes the specified persistent disk.", + // "description": "Creates an address resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.disks.resize", + // "id": "compute.globalAddresses.insert", // "parameterOrder": [ - // "project", - // "zone", - // "disk" + // "project" // ], // "parameters": { - // "disk": { - // "description": "The name of the persistent disk.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -30007,18 +43812,11 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}/resize", + // "path": "{project}/global/addresses", // "request": { - // "$ref": "DisksResizeRequest" + // "$ref": "Address" // }, // "response": { // "$ref": "Operation" @@ -30031,88 +43829,156 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } -// method id "compute.disks.setIamPolicy": +// method id "compute.globalAddresses.list": -type DisksSetIamPolicyCall struct { - s *Service - project string - zone string - resource string - policy *Policy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalAddressesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *DisksService) SetIamPolicy(project string, zone string, resource string, policy *Policy) *DisksSetIamPolicyCall { - c := &DisksSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of global addresses. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/list +func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { + c := &GlobalAddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.policy = policy + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *GlobalAddressesListCall) OrderBy(orderBy string) *GlobalAddressesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksSetIamPolicyCall) Fields(s ...googleapi.Field) *DisksSetIamPolicyCall { +func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalAddressesListCall) IfNoneMatch(entityTag string) *GlobalAddressesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksSetIamPolicyCall) Context(ctx context.Context) *DisksSetIamPolicyCall { +func (c *GlobalAddressesListCall) Context(ctx context.Context) *GlobalAddressesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksSetIamPolicyCall) Header() http.Header { +func (c *GlobalAddressesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.globalAddresses.list" call. +// Exactly one of *AddressList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AddressList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30131,7 +43997,7 @@ func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &AddressList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -30143,99 +44009,104 @@ func (c *DisksSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "httpMethod": "POST", - // "id": "compute.disks.setIamPolicy", + // "description": "Retrieves a list of global addresses.", + // "httpMethod": "GET", + // "id": "compute.globalAddresses.list", // "parameterOrder": [ - // "project", - // "zone", - // "resource" + // "project" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/setIamPolicy", - // "request": { - // "$ref": "Policy" - // }, + // "path": "{project}/global/addresses", // "response": { - // "$ref": "Policy" + // "$ref": "AddressList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.disks.setLabels": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type DisksSetLabelsCall struct { - s *Service - project string - zone string - resource string - zonesetlabelsrequest *ZoneSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.globalAddresses.setLabels": + +type GlobalAddressesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on a disk. To learn more about labels, -// read the Labeling Resources documentation. -func (r *DisksService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *DisksSetLabelsCall { - c := &DisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on a GlobalAddress. To learn more about +// labels, read the Labeling Resources documentation. +func (r *GlobalAddressesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalAddressesSetLabelsCall { + c := &GlobalAddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone c.resource = resource - c.zonesetlabelsrequest = zonesetlabelsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksSetLabelsCall) RequestId(requestId string) *DisksSetLabelsCall { - c.urlParams_.Set("requestId", requestId) + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksSetLabelsCall) Fields(s ...googleapi.Field) *DisksSetLabelsCall { +func (c *GlobalAddressesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalAddressesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -30243,53 +44114,52 @@ func (c *DisksSetLabelsCall) Fields(s ...googleapi.Field) *DisksSetLabelsCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksSetLabelsCall) Context(ctx context.Context) *DisksSetLabelsCall { +func (c *GlobalAddressesSetLabelsCall) Context(ctx context.Context) *GlobalAddressesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksSetLabelsCall) Header() http.Header { +func (c *GlobalAddressesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.setLabels" call. +// Do executes the "compute.globalAddresses.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30320,12 +44190,11 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation.", + // "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", - // "id": "compute.disks.setLabels", + // "id": "compute.globalAddresses.setLabels", // "parameterOrder": [ // "project", - // "zone", // "resource" // ], // "parameters": { @@ -30336,29 +44205,17 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/setLabels", + // "path": "{project}/global/addresses/{resource}/setLabels", // "request": { - // "$ref": "ZoneSetLabelsRequest" + // "$ref": "GlobalSetLabelsRequest" // }, // "response": { // "$ref": "Operation" @@ -30371,12 +44228,11 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.disks.testIamPermissions": +// method id "compute.globalAddresses.testIamPermissions": -type DisksTestIamPermissionsCall struct { +type GlobalAddressesTestIamPermissionsCall struct { s *Service project string - zone string resource string testpermissionsrequest *TestPermissionsRequest urlParams_ gensupport.URLParams @@ -30386,10 +44242,9 @@ type DisksTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { - c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *GlobalAddressesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *GlobalAddressesTestIamPermissionsCall { + c := &GlobalAddressesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone c.resource = resource c.testpermissionsrequest = testpermissionsrequest return c @@ -30398,7 +44253,7 @@ func (r *DisksService) TestIamPermissions(project string, zone string, resource // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { +func (c *GlobalAddressesTestIamPermissionsCall) Fields(s ...googleapi.Field) *GlobalAddressesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -30406,21 +44261,21 @@ func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIam // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { +func (c *GlobalAddressesTestIamPermissionsCall) Context(ctx context.Context) *GlobalAddressesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksTestIamPermissionsCall) Header() http.Header { +func (c *GlobalAddressesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -30433,26 +44288,25 @@ func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.testIamPermissions" call. +// Do executes the "compute.globalAddresses.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *GlobalAddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30485,10 +44339,9 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.disks.testIamPermissions", + // "id": "compute.globalAddresses.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", // "resource" // ], // "parameters": { @@ -30502,19 +44355,12 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/testIamPermissions", + // "path": "{project}/global/addresses/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -30530,23 +44376,23 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer } -// method id "compute.firewalls.delete": +// method id "compute.globalForwardingRules.delete": -type FirewallsDeleteCall struct { - s *Service - project string - firewall string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesDeleteCall struct { + s *Service + project string + forwardingRule string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified firewall. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/delete -func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall { - c := &FirewallsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified GlobalForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/delete +func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall { + c := &GlobalForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall + c.forwardingRule = forwardingRule return c } @@ -30564,7 +44410,7 @@ func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDel // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { +func (c *GlobalForwardingRulesDeleteCall) RequestId(requestId string) *GlobalForwardingRulesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -30572,7 +44418,7 @@ func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall { +func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -30580,21 +44426,21 @@ func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsDeleteCall) Context(ctx context.Context) *FirewallsDeleteCall { +func (c *GlobalForwardingRulesDeleteCall) Context(ctx context.Context) *GlobalForwardingRulesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsDeleteCall) Header() http.Header { +func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -30602,25 +44448,25 @@ func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.delete" call. +// Do executes the "compute.globalForwardingRules.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30651,16 +44497,16 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified firewall.", + // "description": "Deletes the specified GlobalForwardingRule resource.", // "httpMethod": "DELETE", - // "id": "compute.firewalls.delete", + // "id": "compute.globalForwardingRules.delete", // "parameterOrder": [ // "project", - // "firewall" + // "forwardingRule" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to delete.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -30679,7 +44525,7 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{firewall}", + // "path": "{project}/global/forwardingRules/{forwardingRule}", // "response": { // "$ref": "Operation" // }, @@ -30691,31 +44537,32 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.firewalls.get": +// method id "compute.globalForwardingRules.get": -type FirewallsGetCall struct { - s *Service - project string - firewall string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesGetCall struct { + s *Service + project string + forwardingRule string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified firewall. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/get -func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall { - c := &FirewallsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified GlobalForwardingRule resource. Get a list +// of available forwarding rules by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/get +func (r *GlobalForwardingRulesService) Get(project string, forwardingRule string) *GlobalForwardingRulesGetCall { + c := &GlobalForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall + c.forwardingRule = forwardingRule return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { +func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -30725,7 +44572,7 @@ func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { +func (c *GlobalForwardingRulesGetCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -30733,21 +44580,21 @@ func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsGetCall) Context(ctx context.Context) *FirewallsGetCall { +func (c *GlobalForwardingRulesGetCall) Context(ctx context.Context) *GlobalForwardingRulesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsGetCall) Header() http.Header { +func (c *GlobalForwardingRulesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -30758,25 +44605,25 @@ func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.get" call. -// Exactly one of *Firewall or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Firewall.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { +// Do executes the "compute.globalForwardingRules.get" call. +// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ForwardingRule.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30795,7 +44642,7 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Firewall{ + ret := &ForwardingRule{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -30807,16 +44654,16 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { } return ret, nil // { - // "description": "Returns the specified firewall.", + // "description": "Returns the specified GlobalForwardingRule resource. Get a list of available forwarding rules by making a list() request.", // "httpMethod": "GET", - // "id": "compute.firewalls.get", + // "id": "compute.globalForwardingRules.get", // "parameterOrder": [ // "project", - // "firewall" + // "forwardingRule" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to return.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -30830,9 +44677,9 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{firewall}", + // "path": "{project}/global/forwardingRules/{forwardingRule}", // "response": { - // "$ref": "Firewall" + // "$ref": "ForwardingRule" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -30843,24 +44690,24 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { } -// method id "compute.firewalls.insert": +// method id "compute.globalForwardingRules.insert": -type FirewallsInsertCall struct { - s *Service - project string - firewall *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesInsertCall struct { + s *Service + project string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a firewall rule in the specified project using the -// data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/insert -func (r *FirewallsService) Insert(project string, firewall *Firewall) *FirewallsInsertCall { - c := &FirewallsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a GlobalForwardingRule resource in the specified +// project using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/insert +func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *ForwardingRule) *GlobalForwardingRulesInsertCall { + c := &GlobalForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall + c.forwardingrule = forwardingrule return c } @@ -30878,7 +44725,7 @@ func (r *FirewallsService) Insert(project string, firewall *Firewall) *Firewalls // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsInsertCall) RequestId(requestId string) *FirewallsInsertCall { +func (c *GlobalForwardingRulesInsertCall) RequestId(requestId string) *GlobalForwardingRulesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -30886,7 +44733,7 @@ func (c *FirewallsInsertCall) RequestId(requestId string) *FirewallsInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall { +func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -30894,34 +44741,34 @@ func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsInsertCall) Context(ctx context.Context) *FirewallsInsertCall { +func (c *GlobalForwardingRulesInsertCall) Context(ctx context.Context) *GlobalForwardingRulesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsInsertCall) Header() http.Header { +func (c *GlobalForwardingRulesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -30931,14 +44778,14 @@ func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.insert" call. +// Do executes the "compute.globalForwardingRules.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30969,9 +44816,9 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates a firewall rule in the specified project using the data included in the request.", + // "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.firewalls.insert", + // "id": "compute.globalForwardingRules.insert", // "parameterOrder": [ // "project" // ], @@ -30989,9 +44836,9 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/global/firewalls", + // "path": "{project}/global/forwardingRules", // "request": { - // "$ref": "Firewall" + // "$ref": "ForwardingRule" // }, // "response": { // "$ref": "Operation" @@ -31004,9 +44851,9 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.firewalls.list": +// method id "compute.globalForwardingRules.list": -type FirewallsListCall struct { +type GlobalForwardingRulesListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -31015,11 +44862,11 @@ type FirewallsListCall struct { header_ http.Header } -// List: Retrieves the list of firewall rules available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/list -func (r *FirewallsService) List(project string) *FirewallsListCall { - c := &FirewallsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of GlobalForwardingRule resources available to +// the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/list +func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRulesListCall { + c := &GlobalForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -31050,7 +44897,7 @@ func (r *FirewallsService) List(project string) *FirewallsListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { +func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall { c.urlParams_.Set("filter", filter) return c } @@ -31061,7 +44908,7 @@ func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { +func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForwardingRulesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -31078,7 +44925,7 @@ func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *FirewallsListCall) OrderBy(orderBy string) *FirewallsListCall { +func (c *GlobalForwardingRulesListCall) OrderBy(orderBy string) *GlobalForwardingRulesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -31086,7 +44933,7 @@ func (c *FirewallsListCall) OrderBy(orderBy string) *FirewallsListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { +func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwardingRulesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -31094,7 +44941,7 @@ func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall { +func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -31104,7 +44951,7 @@ func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *FirewallsListCall) IfNoneMatch(entityTag string) *FirewallsListCall { +func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesListCall { c.ifNoneMatch_ = entityTag return c } @@ -31112,21 +44959,21 @@ func (c *FirewallsListCall) IfNoneMatch(entityTag string) *FirewallsListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsListCall) Context(ctx context.Context) *FirewallsListCall { +func (c *GlobalForwardingRulesListCall) Context(ctx context.Context) *GlobalForwardingRulesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsListCall) Header() http.Header { +func (c *GlobalForwardingRulesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -31137,7 +44984,7 @@ func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -31147,14 +44994,14 @@ func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.list" call. -// Exactly one of *FirewallList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *FirewallList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, error) { +// Do executes the "compute.globalForwardingRules.list" call. +// Exactly one of *ForwardingRuleList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ForwardingRuleList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -31173,7 +45020,7 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &FirewallList{ + ret := &ForwardingRuleList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -31185,9 +45032,9 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err } return ret, nil // { - // "description": "Retrieves the list of firewall rules available to the specified project.", + // "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.firewalls.list", + // "id": "compute.globalForwardingRules.list", // "parameterOrder": [ // "project" // ], @@ -31223,9 +45070,9 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err // "type": "string" // } // }, - // "path": "{project}/global/firewalls", + // "path": "{project}/global/forwardingRules", // "response": { - // "$ref": "FirewallList" + // "$ref": "ForwardingRuleList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -31239,7 +45086,7 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *FirewallsListCall) Pages(ctx context.Context, f func(*FirewallList) error) error { +func (c *GlobalForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -31257,27 +45104,27 @@ func (c *FirewallsListCall) Pages(ctx context.Context, f func(*FirewallList) err } } -// method id "compute.firewalls.patch": +// method id "compute.globalForwardingRules.patch": -type FirewallsPatchCall struct { - s *Service - project string - firewall string - firewall2 *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesPatchCall struct { + s *Service + project string + forwardingRule string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified firewall rule with the data included in -// the request. This method supports PATCH semantics and uses the JSON -// merge patch format and processing rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/patch -func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall { - c := &FirewallsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified forwarding rule with the data included +// in the request. This method supports PATCH semantics and uses the +// JSON merge patch format and processing rules. Currently, you can only +// patch the network_tier field. +func (r *GlobalForwardingRulesService) Patch(project string, forwardingRule string, forwardingrule *ForwardingRule) *GlobalForwardingRulesPatchCall { + c := &GlobalForwardingRulesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall - c.firewall2 = firewall2 + c.forwardingRule = forwardingRule + c.forwardingrule = forwardingrule return c } @@ -31295,7 +45142,7 @@ func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Fir // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsPatchCall) RequestId(requestId string) *FirewallsPatchCall { +func (c *GlobalForwardingRulesPatchCall) RequestId(requestId string) *GlobalForwardingRulesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -31303,7 +45150,7 @@ func (c *FirewallsPatchCall) RequestId(requestId string) *FirewallsPatchCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall { +func (c *GlobalForwardingRulesPatchCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -31311,52 +45158,52 @@ func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsPatchCall) Context(ctx context.Context) *FirewallsPatchCall { +func (c *GlobalForwardingRulesPatchCall) Context(ctx context.Context) *GlobalForwardingRulesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsPatchCall) Header() http.Header { +func (c *GlobalForwardingRulesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.patch" call. +// Do executes the "compute.globalForwardingRules.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -31387,16 +45234,16 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates the specified forwarding rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. Currently, you can only patch the network_tier field.", // "httpMethod": "PATCH", - // "id": "compute.firewalls.patch", + // "id": "compute.globalForwardingRules.patch", // "parameterOrder": [ // "project", - // "firewall" + // "forwardingRule" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to patch.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -31415,48 +45262,47 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{firewall}", + // "path": "{project}/global/forwardingRules/{forwardingRule}", // "request": { - // "$ref": "Firewall" + // "$ref": "ForwardingRule" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.firewalls.testIamPermissions": +// method id "compute.globalForwardingRules.setLabels": -type FirewallsTestIamPermissionsCall struct { +type GlobalForwardingRulesSetLabelsCall struct { s *Service project string resource string - testpermissionsrequest *TestPermissionsRequest + globalsetlabelsrequest *GlobalSetLabelsRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *FirewallsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *FirewallsTestIamPermissionsCall { - c := &FirewallsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on the specified resource. To learn more +// about labels, read the Labeling Resources documentation. +func (r *GlobalForwardingRulesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalForwardingRulesSetLabelsCall { + c := &GlobalForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsTestIamPermissionsCall) Fields(s ...googleapi.Field) *FirewallsTestIamPermissionsCall { +func (c *GlobalForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -31464,34 +45310,34 @@ func (c *FirewallsTestIamPermissionsCall) Fields(s ...googleapi.Field) *Firewall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsTestIamPermissionsCall) Context(ctx context.Context) *FirewallsTestIamPermissionsCall { +func (c *GlobalForwardingRulesSetLabelsCall) Context(ctx context.Context) *GlobalForwardingRulesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsTestIamPermissionsCall) Header() http.Header { +func (c *GlobalForwardingRulesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -31502,14 +45348,14 @@ func (c *FirewallsTestIamPermissionsCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *FirewallsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.globalForwardingRules.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -31528,7 +45374,7 @@ func (c *FirewallsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -31540,9 +45386,9 @@ func (c *FirewallsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", - // "id": "compute.firewalls.testIamPermissions", + // "id": "compute.globalForwardingRules.setLabels", // "parameterOrder": [ // "project", // "resource" @@ -31563,70 +45409,219 @@ func (c *FirewallsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{resource}/testIamPermissions", + // "path": "{project}/global/forwardingRules/{resource}/setLabels", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "GlobalSetLabelsRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.firewalls.update": +// method id "compute.globalForwardingRules.setTarget": -type FirewallsUpdateCall struct { - s *Service - project string - firewall string - firewall2 *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesSetTargetCall struct { + s *Service + project string + forwardingRule string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified firewall rule with the data included in -// the request. Using PUT method, can only update following fields of -// firewall rule: allowed, description, sourceRanges, sourceTags, -// targetTags. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/update -func (r *FirewallsService) Update(project string, firewall string, firewall2 *Firewall) *FirewallsUpdateCall { - c := &FirewallsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTarget: Changes target URL for the GlobalForwardingRule resource. +// The new target should be of the same type as the old target. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/setTarget +func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall { + c := &GlobalForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall - c.firewall2 = firewall2 + c.forwardingRule = forwardingRule + c.targetreference = targetreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalForwardingRulesSetTargetCall) RequestId(requestId string) *GlobalForwardingRulesSetTargetCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetTargetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalForwardingRulesSetTargetCall) Context(ctx context.Context) *GlobalForwardingRulesSetTargetCall { + c.ctx_ = ctx return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsUpdateCall) RequestId(requestId string) *FirewallsUpdateCall { - c.urlParams_.Set("requestId", requestId) +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalForwardingRulesSetTargetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}/setTarget") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "forwardingRule": c.forwardingRule, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.globalForwardingRules.setTarget" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target.", + // "httpMethod": "POST", + // "id": "compute.globalForwardingRules.setTarget", + // "parameterOrder": [ + // "project", + // "forwardingRule" + // ], + // "parameters": { + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource in which target is to be set.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget", + // "request": { + // "$ref": "TargetReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalForwardingRules.testIamPermissions": + +type GlobalForwardingRulesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *GlobalForwardingRulesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *GlobalForwardingRulesTestIamPermissionsCall { + c := &GlobalForwardingRulesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall { +func (c *GlobalForwardingRulesTestIamPermissionsCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -31634,52 +45629,52 @@ func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsUpdateCall) Context(ctx context.Context) *FirewallsUpdateCall { +func (c *GlobalForwardingRulesTestIamPermissionsCall) Context(ctx context.Context) *GlobalForwardingRulesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsUpdateCall) Header() http.Header { +func (c *GlobalForwardingRulesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "firewall": c.firewall, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.globalForwardingRules.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -31698,7 +45693,7 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -31710,21 +45705,14 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Updates the specified firewall rule with the data included in the request. Using PUT method, can only update following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags.", - // "httpMethod": "PUT", - // "id": "compute.firewalls.update", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.globalForwardingRules.testIamPermissions", // "parameterOrder": [ // "project", - // "firewall" + // "resource" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -31732,30 +45720,33 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{firewall}", + // "path": "{project}/global/forwardingRules/{resource}/testIamPermissions", // "request": { - // "$ref": "Firewall" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.forwardingRules.aggregatedList": +// method id "compute.globalOperations.aggregatedList": -type ForwardingRulesAggregatedListCall struct { +type GlobalOperationsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -31764,10 +45755,10 @@ type ForwardingRulesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of forwarding rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/aggregatedList -func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRulesAggregatedListCall { - c := &ForwardingRulesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of all operations. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/aggregatedList +func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall { + c := &GlobalOperationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -31798,7 +45789,7 @@ func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRules // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall { +func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -31809,7 +45800,7 @@ func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRul // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *ForwardingRulesAggregatedListCall { +func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -31826,7 +45817,7 @@ func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *Forwar // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *ForwardingRulesAggregatedListCall) OrderBy(orderBy string) *ForwardingRulesAggregatedListCall { +func (c *GlobalOperationsAggregatedListCall) OrderBy(orderBy string) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -31834,7 +45825,7 @@ func (c *ForwardingRulesAggregatedListCall) OrderBy(orderBy string) *ForwardingR // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *ForwardingRulesAggregatedListCall { +func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -31842,7 +45833,7 @@ func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *Forward // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *ForwardingRulesAggregatedListCall { +func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -31852,7 +45843,7 @@ func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *Forwar // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ForwardingRulesAggregatedListCall) IfNoneMatch(entityTag string) *ForwardingRulesAggregatedListCall { +func (c *GlobalOperationsAggregatedListCall) IfNoneMatch(entityTag string) *GlobalOperationsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -31860,21 +45851,21 @@ func (c *ForwardingRulesAggregatedListCall) IfNoneMatch(entityTag string) *Forwa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesAggregatedListCall) Context(ctx context.Context) *ForwardingRulesAggregatedListCall { +func (c *GlobalOperationsAggregatedListCall) Context(ctx context.Context) *GlobalOperationsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesAggregatedListCall) Header() http.Header { +func (c *GlobalOperationsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -31885,7 +45876,7 @@ func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Respons } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -31895,14 +45886,14 @@ func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Respons return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.aggregatedList" call. -// Exactly one of *ForwardingRuleAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ForwardingRuleAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.globalOperations.aggregatedList" call. +// Exactly one of *OperationAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *OperationAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleAggregatedList, error) { +func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*OperationAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -31921,7 +45912,7 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRuleAggregatedList{ + ret := &OperationAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -31933,9 +45924,9 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F } return ret, nil // { - // "description": "Retrieves an aggregated list of forwarding rules.", + // "description": "Retrieves an aggregated list of all operations.", // "httpMethod": "GET", - // "id": "compute.forwardingRules.aggregatedList", + // "id": "compute.globalOperations.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -31971,9 +45962,9 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F // "type": "string" // } // }, - // "path": "{project}/aggregated/forwardingRules", + // "path": "{project}/aggregated/operations", // "response": { - // "$ref": "ForwardingRuleAggregatedList" + // "$ref": "OperationAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -31987,7 +45978,7 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ForwardingRulesAggregatedListCall) Pages(ctx context.Context, f func(*ForwardingRuleAggregatedList) error) error { +func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(*OperationAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -32005,100 +45996,202 @@ func (c *ForwardingRulesAggregatedListCall) Pages(ctx context.Context, f func(*F } } -// method id "compute.forwardingRules.delete": +// method id "compute.globalOperations.delete": -type ForwardingRulesDeleteCall struct { - s *Service - project string - region string - forwardingRule string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalOperationsDeleteCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified ForwardingRule resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/delete -func (r *ForwardingRulesService) Delete(project string, region string, forwardingRule string) *ForwardingRulesDeleteCall { - c := &ForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/delete +func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { + c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingRule = forwardingRule + c.operation = operation return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesDeleteCall) RequestId(requestId string) *ForwardingRulesDeleteCall { - c.urlParams_.Set("requestId", requestId) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalOperationsDeleteCall) Context(ctx context.Context) *GlobalOperationsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalOperationsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "operation": c.operation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.globalOperations.delete" call. +func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes the specified Operations resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalOperations.delete", + // "parameterOrder": [ + // "project", + // "operation" + // ], + // "parameters": { + // "operation": { + // "description": "Name of the Operations resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/operations/{operation}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalOperations.get": + +type GlobalOperationsGetCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves the specified Operations resource. Get a list of +// operations by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/get +func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { + c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRulesDeleteCall { +func (c *GlobalOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesDeleteCall) Context(ctx context.Context) *ForwardingRulesDeleteCall { +func (c *GlobalOperationsGetCall) Context(ctx context.Context) *GlobalOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesDeleteCall) Header() http.Header { +func (c *GlobalOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, + "project": c.project, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.delete" call. +// Do executes the "compute.globalOperations.get" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32129,17 +46222,16 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified ForwardingRule resource.", - // "httpMethod": "DELETE", - // "id": "compute.forwardingRules.delete", + // "description": "Retrieves the specified Operations resource. Get a list of operations by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.globalOperations.get", // "parameterOrder": [ // "project", - // "region", - // "forwardingRule" + // "operation" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to delete.", + // "operation": { + // "description": "Name of the Operations resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -32151,59 +46243,112 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "path": "{project}/global/operations/{operation}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.forwardingRules.get": +// method id "compute.globalOperations.list": -type ForwardingRulesGetCall struct { - s *Service - project string - region string - forwardingRule string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalOperationsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified ForwardingRule resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/get -func (r *ForwardingRulesService) Get(project string, region string, forwardingRule string) *ForwardingRulesGetCall { - c := &ForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of Operation resources contained within the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/list +func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall { + c := &GlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingRule = forwardingRule + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *GlobalOperationsListCall) OrderBy(orderBy string) *GlobalOperationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGetCall { +func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -32213,7 +46358,7 @@ func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesGetCall { +func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperationsListCall { c.ifNoneMatch_ = entityTag return c } @@ -32221,21 +46366,21 @@ func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesGetCall) Context(ctx context.Context) *ForwardingRulesGetCall { +func (c *GlobalOperationsListCall) Context(ctx context.Context) *GlobalOperationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesGetCall) Header() http.Header { +func (c *GlobalOperationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -32246,26 +46391,24 @@ func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.get" call. -// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx +// Do executes the "compute.globalOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *ForwardingRule.ServerResponse.Header or (if a response was returned +// *OperationList.ServerResponse.Header or (if a response was returned // at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { +func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32284,7 +46427,7 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRule{ + ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -32296,20 +46439,34 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu } return ret, nil // { - // "description": "Returns the specified ForwardingRule resource.", + // "description": "Retrieves a list of Operation resources contained within the specified project.", // "httpMethod": "GET", - // "id": "compute.forwardingRules.get", + // "id": "compute.globalOperations.list", // "parameterOrder": [ - // "project", - // "region", - // "forwardingRule" + // "project" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -32318,18 +46475,11 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "path": "{project}/global/operations", // "response": { - // "$ref": "ForwardingRule" + // "$ref": "OperationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -32340,26 +46490,43 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu } -// method id "compute.forwardingRules.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ForwardingRulesInsertCall struct { - s *Service - project string - region string - forwardingrule *ForwardingRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.healthChecks.delete": + +type HealthChecksDeleteCall struct { + s *Service + project string + healthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a ForwardingRule resource in the specified project -// and region using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/insert -func (r *ForwardingRulesService) Insert(project string, region string, forwardingrule *ForwardingRule) *ForwardingRulesInsertCall { - c := &ForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HealthCheck resource. +func (r *HealthChecksService) Delete(project string, healthCheck string) *HealthChecksDeleteCall { + c := &HealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingrule = forwardingrule + c.healthCheck = healthCheck return c } @@ -32377,7 +46544,7 @@ func (r *ForwardingRulesService) Insert(project string, region string, forwardin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesInsertCall) RequestId(requestId string) *ForwardingRulesInsertCall { +func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -32385,7 +46552,7 @@ func (c *ForwardingRulesInsertCall) RequestId(requestId string) *ForwardingRules // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRulesInsertCall { +func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -32393,52 +46560,47 @@ func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRule // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesInsertCall) Context(ctx context.Context) *ForwardingRulesInsertCall { +func (c *HealthChecksDeleteCall) Context(ctx context.Context) *HealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesInsertCall) Header() http.Header { +func (c *HealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.insert" call. +// Do executes the "compute.healthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32469,25 +46631,25 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.forwardingRules.insert", + // "description": "Deletes the specified HealthCheck resource.", + // "httpMethod": "DELETE", + // "id": "compute.healthChecks.delete", // "parameterOrder": [ // "project", - // "region" + // "healthCheck" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to delete.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, @@ -32497,10 +46659,7 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules", - // "request": { - // "$ref": "ForwardingRule" - // }, + // "path": "{project}/global/healthChecks/{healthCheck}", // "response": { // "$ref": "Operation" // }, @@ -32512,99 +46671,31 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.forwardingRules.list": +// method id "compute.healthChecks.get": -type ForwardingRulesListCall struct { +type HealthChecksGetCall struct { s *Service project string - region string + healthCheck string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of ForwardingRule resources available to the -// specified project and region. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/list -func (r *ForwardingRulesService) List(project string, region string) *ForwardingRulesListCall { - c := &ForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HealthCheck resource. Get a list of +// available health checks by making a list() request. +func (r *HealthChecksService) Get(project string, healthCheck string) *HealthChecksGetCall { + c := &HealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *ForwardingRulesListCall) OrderBy(orderBy string) *ForwardingRulesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.healthCheck = healthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesListCall { +func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -32614,7 +46705,7 @@ func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRulesListCall { +func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -32622,21 +46713,21 @@ func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRules // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesListCall) Context(ctx context.Context) *ForwardingRulesListCall { +func (c *HealthChecksGetCall) Context(ctx context.Context) *HealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesListCall) Header() http.Header { +func (c *HealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -32647,25 +46738,25 @@ func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.list" call. -// Exactly one of *ForwardingRuleList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ForwardingRuleList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { +// Do executes the "compute.healthChecks.get" call. +// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HealthCheck.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32684,7 +46775,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRuleList{ + ret := &HealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -32696,35 +46787,19 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR } return ret, nil // { - // "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", + // "description": "Returns the specified HealthCheck resource. Get a list of available health checks by making a list() request.", // "httpMethod": "GET", - // "id": "compute.forwardingRules.list", + // "id": "compute.healthChecks.get", // "parameterOrder": [ // "project", - // "region" + // "healthCheck" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -32733,18 +46808,11 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules", + // "path": "{project}/global/healthChecks/{healthCheck}", // "response": { - // "$ref": "ForwardingRuleList" + // "$ref": "HealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -32755,50 +46823,23 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.forwardingRules.patch": +// method id "compute.healthChecks.insert": -type ForwardingRulesPatchCall struct { - s *Service - project string - region string - forwardingRule string - forwardingrule *ForwardingRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksInsertCall struct { + s *Service + project string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified forwarding rule with the data included -// in the request. This method supports PATCH semantics and uses the -// JSON merge patch format and processing rules. Currently, you can only -// patch the network_tier field. -func (r *ForwardingRulesService) Patch(project string, region string, forwardingRule string, forwardingrule *ForwardingRule) *ForwardingRulesPatchCall { - c := &ForwardingRulesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) *HealthChecksInsertCall { + c := &HealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingRule = forwardingRule - c.forwardingrule = forwardingrule + c.healthcheck = healthcheck return c } @@ -32816,7 +46857,7 @@ func (r *ForwardingRulesService) Patch(project string, region string, forwarding // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesPatchCall) RequestId(requestId string) *ForwardingRulesPatchCall { +func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -32824,7 +46865,7 @@ func (c *ForwardingRulesPatchCall) RequestId(requestId string) *ForwardingRulesP // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesPatchCall) Fields(s ...googleapi.Field) *ForwardingRulesPatchCall { +func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -32832,53 +46873,51 @@ func (c *ForwardingRulesPatchCall) Fields(s ...googleapi.Field) *ForwardingRules // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesPatchCall) Context(ctx context.Context) *ForwardingRulesPatchCall { +func (c *HealthChecksInsertCall) Context(ctx context.Context) *HealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesPatchCall) Header() http.Header { +func (c *HealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.patch" call. +// Do executes the "compute.healthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32909,22 +46948,13 @@ func (c *ForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Updates the specified forwarding rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. Currently, you can only patch the network_tier field.", - // "httpMethod": "PATCH", - // "id": "compute.forwardingRules.patch", + // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.healthChecks.insert", // "parameterOrder": [ - // "project", - // "region", - // "forwardingRule" + // "project" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -32932,136 +46962,177 @@ func (c *ForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "path": "{project}/global/healthChecks", // "request": { - // "$ref": "ForwardingRule" + // "$ref": "HealthCheck" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.forwardingRules.setLabels": +// method id "compute.healthChecks.list": -type ForwardingRulesSetLabelsCall struct { - s *Service - project string - region string - resource string - regionsetlabelsrequest *RegionSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on the specified resource. To learn more -// about labels, read the Labeling Resources documentation. -func (r *ForwardingRulesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *ForwardingRulesSetLabelsCall { - c := &ForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of HealthCheck resources available to the +// specified project. +func (r *HealthChecksService) List(project string) *HealthChecksListCall { + c := &HealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.regionsetlabelsrequest = regionsetlabelsrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesSetLabelsCall) RequestId(requestId string) *ForwardingRulesSetLabelsCall { - c.urlParams_.Set("requestId", requestId) +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *ForwardingRulesSetLabelsCall { +func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesSetLabelsCall) Context(ctx context.Context) *ForwardingRulesSetLabelsCall { +func (c *HealthChecksListCall) Context(ctx context.Context) *HealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesSetLabelsCall) Header() http.Header { +func (c *HealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.healthChecks.list" call. +// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *HealthCheckList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33080,7 +47151,7 @@ func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &HealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -33092,79 +47163,98 @@ func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.forwardingRules.setLabels", + // "description": "Retrieves the list of HealthCheck resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.healthChecks.list", // "parameterOrder": [ - // "project", - // "region", - // "resource" + // "project" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", // "type": "string" // }, - // "region": { - // "description": "The region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{resource}/setLabels", - // "request": { - // "$ref": "RegionSetLabelsRequest" - // }, + // "path": "{project}/global/healthChecks", // "response": { - // "$ref": "Operation" + // "$ref": "HealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.forwardingRules.setTarget": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ForwardingRulesSetTargetCall struct { - s *Service - project string - region string - forwardingRule string - targetreference *TargetReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.healthChecks.patch": + +type HealthChecksPatchCall struct { + s *Service + project string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTarget: Changes target URL for forwarding rule. The new target -// should be of the same type as the old target. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/setTarget -func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { - c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a HealthCheck resource in the specified project using +// the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +func (r *HealthChecksService) Patch(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksPatchCall { + c := &HealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingRule = forwardingRule - c.targetreference = targetreference + c.healthCheck = healthCheck + c.healthcheck = healthcheck return c } @@ -33182,7 +47272,7 @@ func (r *ForwardingRulesService) SetTarget(project string, region string, forwar // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesSetTargetCall) RequestId(requestId string) *ForwardingRulesSetTargetCall { +func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -33190,7 +47280,7 @@ func (c *ForwardingRulesSetTargetCall) RequestId(requestId string) *ForwardingRu // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingRulesSetTargetCall { +func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -33198,53 +47288,52 @@ func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingR // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesSetTargetCall) Context(ctx context.Context) *ForwardingRulesSetTargetCall { +func (c *HealthChecksPatchCall) Context(ctx context.Context) *HealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesSetTargetCall) Header() http.Header { +func (c *HealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.setTarget" call. +// Do executes the "compute.healthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33275,17 +47364,16 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", - // "httpMethod": "POST", - // "id": "compute.forwardingRules.setTarget", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.healthChecks.patch", // "parameterOrder": [ // "project", - // "region", - // "forwardingRule" + // "healthCheck" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource in which target is to be set.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -33298,22 +47386,15 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", + // "path": "{project}/global/healthChecks/{healthCheck}", // "request": { - // "$ref": "TargetReference" + // "$ref": "HealthCheck" // }, // "response": { // "$ref": "Operation" @@ -33326,12 +47407,11 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.forwardingRules.testIamPermissions": +// method id "compute.healthChecks.testIamPermissions": -type ForwardingRulesTestIamPermissionsCall struct { +type HealthChecksTestIamPermissionsCall struct { s *Service project string - region string resource string testpermissionsrequest *TestPermissionsRequest urlParams_ gensupport.URLParams @@ -33341,10 +47421,9 @@ type ForwardingRulesTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *ForwardingRulesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *ForwardingRulesTestIamPermissionsCall { - c := &ForwardingRulesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HealthChecksTestIamPermissionsCall { + c := &HealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region c.resource = resource c.testpermissionsrequest = testpermissionsrequest return c @@ -33353,7 +47432,7 @@ func (r *ForwardingRulesService) TestIamPermissions(project string, region strin // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ForwardingRulesTestIamPermissionsCall { +func (c *HealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HealthChecksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -33361,21 +47440,21 @@ func (c *ForwardingRulesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Fo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesTestIamPermissionsCall) Context(ctx context.Context) *ForwardingRulesTestIamPermissionsCall { +func (c *HealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HealthChecksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesTestIamPermissionsCall) Header() http.Header { +func (c *HealthChecksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -33388,26 +47467,25 @@ func (c *ForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.testIamPermissions" call. +// Do executes the "compute.healthChecks.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *HealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33440,10 +47518,9 @@ func (c *ForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.forwardingRules.testIamPermissions", + // "id": "compute.healthChecks.testIamPermissions", // "parameterOrder": [ // "project", - // "region", // "resource" // ], // "parameters": { @@ -33454,22 +47531,15 @@ func (c *ForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{resource}/testIamPermissions", + // "path": "{project}/global/healthChecks/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -33485,23 +47555,25 @@ func (c *ForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } -// method id "compute.globalAddresses.delete": +// method id "compute.healthChecks.update": -type GlobalAddressesDeleteCall struct { - s *Service - project string - address string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksUpdateCall struct { + s *Service + project string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified address resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/delete -func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall { - c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *HealthChecksService) Update(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksUpdateCall { + c := &HealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.address = address + c.healthCheck = healthCheck + c.healthcheck = healthcheck return c } @@ -33519,7 +47591,7 @@ func (r *GlobalAddressesService) Delete(project string, address string) *GlobalA // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddressesDeleteCall { +func (c *HealthChecksUpdateCall) RequestId(requestId string) *HealthChecksUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -33527,7 +47599,7 @@ func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddresses // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall { +func (c *HealthChecksUpdateCall) Fields(s ...googleapi.Field) *HealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -33535,47 +47607,52 @@ func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddresse // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddressesDeleteCall { +func (c *HealthChecksUpdateCall) Context(ctx context.Context) *HealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesDeleteCall) Header() http.Header { +func (c *HealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "address": c.address, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.delete" call. +// Do executes the "compute.healthChecks.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33606,16 +47683,16 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified address resource.", - // "httpMethod": "DELETE", - // "id": "compute.globalAddresses.delete", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.healthChecks.update", // "parameterOrder": [ // "project", - // "address" + // "healthCheck" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to delete.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -33634,7 +47711,10 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/global/addresses/{address}", + // "path": "{project}/global/healthChecks/{healthCheck}", + // "request": { + // "$ref": "HealthCheck" + // }, // "response": { // "$ref": "Operation" // }, @@ -33646,32 +47726,95 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.globalAddresses.get": +// method id "compute.hostTypes.aggregatedList": -type GlobalAddressesGetCall struct { +type HostTypesAggregatedListCall struct { s *Service project string - address string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified address resource. Get a list of available -// addresses by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/get -func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall { - c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of host types. +func (r *HostTypesService) AggregatedList(project string) *HostTypesAggregatedListCall { + c := &HostTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.address = address + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *HostTypesAggregatedListCall) Filter(filter string) *HostTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *HostTypesAggregatedListCall) MaxResults(maxResults int64) *HostTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *HostTypesAggregatedListCall) OrderBy(orderBy string) *HostTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *HostTypesAggregatedListCall) PageToken(pageToken string) *HostTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall { +func (c *HostTypesAggregatedListCall) Fields(s ...googleapi.Field) *HostTypesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -33681,7 +47824,7 @@ func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesGetCall { +func (c *HostTypesAggregatedListCall) IfNoneMatch(entityTag string) *HostTypesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -33689,21 +47832,21 @@ func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGetCall { +func (c *HostTypesAggregatedListCall) Context(ctx context.Context) *HostTypesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesGetCall) Header() http.Header { +func (c *HostTypesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HostTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -33714,25 +47857,24 @@ func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/hostTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "address": c.address, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.get" call. -// Exactly one of *Address or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Address.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { +// Do executes the "compute.hostTypes.aggregatedList" call. +// Exactly one of *HostTypeAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HostTypeAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HostTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*HostTypeAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33751,7 +47893,7 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Address{ + ret := &HostTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -33763,19 +47905,34 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err } return ret, nil // { - // "description": "Returns the specified address resource. Get a list of available addresses by making a list() request.", + // "description": "Retrieves an aggregated list of host types.", // "httpMethod": "GET", - // "id": "compute.globalAddresses.get", + // "id": "compute.hostTypes.aggregatedList", // "parameterOrder": [ - // "project", - // "address" + // "project" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -33786,9 +47943,9 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err // "type": "string" // } // }, - // "path": "{project}/global/addresses/{address}", + // "path": "{project}/aggregated/hostTypes", // "response": { - // "$ref": "Address" + // "$ref": "HostTypeAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -33799,102 +47956,116 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err } -// method id "compute.globalAddresses.insert": - -type GlobalAddressesInsertCall struct { - s *Service - project string - address *Address - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *HostTypesAggregatedListCall) Pages(ctx context.Context, f func(*HostTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// Insert: Creates an address resource in the specified project using -// the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/insert -func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall { - c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.address = address - return c -} +// method id "compute.hostTypes.get": -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddressesInsertCall { - c.urlParams_.Set("requestId", requestId) +type HostTypesGetCall struct { + s *Service + project string + zone string + hostType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified host type. Get a list of available host +// types by making a list() request. +func (r *HostTypesService) Get(project string, zone string, hostType string) *HostTypesGetCall { + c := &HostTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.hostType = hostType return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall { +func (c *HostTypesGetCall) Fields(s ...googleapi.Field) *HostTypesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HostTypesGetCall) IfNoneMatch(entityTag string) *HostTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddressesInsertCall { +func (c *HostTypesGetCall) Context(ctx context.Context) *HostTypesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesInsertCall) Header() http.Header { +func (c *HostTypesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *HostTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/hostTypes/{hostType}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "hostType": c.hostType, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.hostTypes.get" call. +// Exactly one of *HostType or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *HostType.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HostTypesGetCall) Do(opts ...googleapi.CallOption) (*HostType, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33913,7 +48084,7 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &HostType{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -33925,13 +48096,22 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates an address resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.globalAddresses.insert", + // "description": "Returns the specified host type. Get a list of available host types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.hostTypes.get", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "hostType" // ], // "parameters": { + // "hostType": { + // "description": "Name of the host type to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -33939,43 +48119,45 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/addresses", - // "request": { - // "$ref": "Address" - // }, + // "path": "{project}/zones/{zone}/hostTypes/{hostType}", // "response": { - // "$ref": "Operation" + // "$ref": "HostType" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.globalAddresses.list": +// method id "compute.hostTypes.list": -type GlobalAddressesListCall struct { +type HostTypesListCall struct { s *Service project string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of global addresses. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/list -func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { - c := &GlobalAddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of host types available to the specified +// project. +func (r *HostTypesService) List(project string, zone string) *HostTypesListCall { + c := &HostTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone return c } @@ -34005,7 +48187,7 @@ func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall { +func (c *HostTypesListCall) Filter(filter string) *HostTypesListCall { c.urlParams_.Set("filter", filter) return c } @@ -34016,7 +48198,7 @@ func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesListCall { +func (c *HostTypesListCall) MaxResults(maxResults int64) *HostTypesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -34033,7 +48215,7 @@ func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesL // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *GlobalAddressesListCall) OrderBy(orderBy string) *GlobalAddressesListCall { +func (c *HostTypesListCall) OrderBy(orderBy string) *HostTypesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -34041,7 +48223,7 @@ func (c *GlobalAddressesListCall) OrderBy(orderBy string) *GlobalAddressesListCa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesListCall { +func (c *HostTypesListCall) PageToken(pageToken string) *HostTypesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -34049,7 +48231,7 @@ func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesListCall { +func (c *HostTypesListCall) Fields(s ...googleapi.Field) *HostTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -34059,7 +48241,7 @@ func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalAddressesListCall) IfNoneMatch(entityTag string) *GlobalAddressesListCall { +func (c *HostTypesListCall) IfNoneMatch(entityTag string) *HostTypesListCall { c.ifNoneMatch_ = entityTag return c } @@ -34067,21 +48249,21 @@ func (c *GlobalAddressesListCall) IfNoneMatch(entityTag string) *GlobalAddresses // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesListCall) Context(ctx context.Context) *GlobalAddressesListCall { +func (c *HostTypesListCall) Context(ctx context.Context) *HostTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesListCall) Header() http.Header { +func (c *HostTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { +func (c *HostTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -34092,24 +48274,25 @@ func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/hostTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.list" call. -// Exactly one of *AddressList or error will be non-nil. Any non-2xx +// Do executes the "compute.hostTypes.list" call. +// Exactly one of *HostTypeList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *AddressList.ServerResponse.Header or (if a response was returned at +// *HostTypeList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { +func (c *HostTypesListCall) Do(opts ...googleapi.CallOption) (*HostTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34128,7 +48311,7 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AddressList{ + ret := &HostTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -34140,11 +48323,12 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList } return ret, nil // { - // "description": "Retrieves a list of global addresses.", + // "description": "Retrieves a list of host types available to the specified project.", // "httpMethod": "GET", - // "id": "compute.globalAddresses.list", + // "id": "compute.hostTypes.list", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "filter": { @@ -34176,11 +48360,18 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/addresses", + // "path": "{project}/zones/{zone}/hostTypes", // "response": { - // "$ref": "AddressList" + // "$ref": "HostTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -34194,7 +48385,7 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { +func (c *HostTypesListCall) Pages(ctx context.Context, f func(*HostTypeList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -34212,85 +48403,155 @@ func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList } } -// method id "compute.globalAddresses.setLabels": +// method id "compute.hosts.aggregatedList": -type GlobalAddressesSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HostsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on a GlobalAddress. To learn more about -// labels, read the Labeling Resources documentation. -func (r *GlobalAddressesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalAddressesSetLabelsCall { - c := &GlobalAddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of hosts. +func (r *HostsService) AggregatedList(project string) *HostsAggregatedListCall { + c := &HostsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *HostsAggregatedListCall) Filter(filter string) *HostsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *HostsAggregatedListCall) MaxResults(maxResults int64) *HostsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *HostsAggregatedListCall) OrderBy(orderBy string) *HostsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *HostsAggregatedListCall) PageToken(pageToken string) *HostsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalAddressesSetLabelsCall { +func (c *HostsAggregatedListCall) Fields(s ...googleapi.Field) *HostsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HostsAggregatedListCall) IfNoneMatch(entityTag string) *HostsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesSetLabelsCall) Context(ctx context.Context) *GlobalAddressesSetLabelsCall { +func (c *HostsAggregatedListCall) Context(ctx context.Context) *HostsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesSetLabelsCall) Header() http.Header { +func (c *HostsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *HostsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/hosts") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.hosts.aggregatedList" call. +// Exactly one of *HostAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HostAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HostsAggregatedListCall) Do(opts ...googleapi.CallOption) (*HostAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34309,7 +48570,7 @@ func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &HostAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -34321,70 +48582,122 @@ func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.globalAddresses.setLabels", + // "description": "Retrieves an aggregated list of hosts.", + // "httpMethod": "GET", + // "id": "compute.hosts.aggregatedList", // "parameterOrder": [ - // "project", - // "resource" + // "project" // ], // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/global/addresses/{resource}/setLabels", - // "request": { - // "$ref": "GlobalSetLabelsRequest" - // }, + // "path": "{project}/aggregated/hosts", // "response": { - // "$ref": "Operation" + // "$ref": "HostAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.globalAddresses.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *HostsAggregatedListCall) Pages(ctx context.Context, f func(*HostAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type GlobalAddressesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.hosts.delete": + +type HostsDeleteCall struct { + s *Service + project string + zone string + host string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *GlobalAddressesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *GlobalAddressesTestIamPermissionsCall { - c := &GlobalAddressesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Host resource. +func (r *HostsService) Delete(project string, zone string, host string) *HostsDeleteCall { + c := &HostsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.host = host + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HostsDeleteCall) RequestId(requestId string) *HostsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesTestIamPermissionsCall) Fields(s ...googleapi.Field) *GlobalAddressesTestIamPermissionsCall { +func (c *HostsDeleteCall) Fields(s ...googleapi.Field) *HostsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -34392,52 +48705,48 @@ func (c *GlobalAddressesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Gl // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesTestIamPermissionsCall) Context(ctx context.Context) *GlobalAddressesTestIamPermissionsCall { +func (c *HostsDeleteCall) Context(ctx context.Context) *HostsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesTestIamPermissionsCall) Header() http.Header { +func (c *HostsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *HostsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/hosts/{host}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "host": c.host, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalAddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.hosts.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HostsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34456,7 +48765,7 @@ func (c *GlobalAddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -34468,14 +48777,22 @@ func (c *GlobalAddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.globalAddresses.testIamPermissions", + // "description": "Deletes the specified Host resource.", + // "httpMethod": "DELETE", + // "id": "compute.hosts.delete", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "host" // ], // "parameters": { + // "host": { + // "description": "Name of the Host resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -34483,121 +48800,120 @@ func (c *GlobalAddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/addresses/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/zones/{zone}/hosts/{host}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.globalForwardingRules.delete": +// method id "compute.hosts.get": -type GlobalForwardingRulesDeleteCall struct { - s *Service - project string - forwardingRule string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HostsGetCall struct { + s *Service + project string + zone string + host string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified GlobalForwardingRule resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/delete -func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall { - c := &GlobalForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified host. Get a list of available hosts by +// making a list() request. +func (r *HostsService) Get(project string, zone string, host string) *HostsGetCall { + c := &HostsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesDeleteCall) RequestId(requestId string) *GlobalForwardingRulesDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.host = host return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesDeleteCall { +func (c *HostsGetCall) Fields(s ...googleapi.Field) *HostsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HostsGetCall) IfNoneMatch(entityTag string) *HostsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesDeleteCall) Context(ctx context.Context) *GlobalForwardingRulesDeleteCall { +func (c *HostsGetCall) Context(ctx context.Context) *HostsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { +func (c *HostsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HostsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/hosts/{host}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, + "zone": c.zone, + "host": c.host, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.hosts.get" call. +// Exactly one of *Host or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Host.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *HostsGetCall) Do(opts ...googleapi.CallOption) (*Host, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34616,7 +48932,7 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Host{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -34628,16 +48944,17 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified GlobalForwardingRule resource.", - // "httpMethod": "DELETE", - // "id": "compute.globalForwardingRules.delete", + // "description": "Returns the specified host. Get a list of available hosts by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.hosts.get", // "parameterOrder": [ // "project", - // "forwardingRule" + // "zone", + // "host" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to delete.", + // "host": { + // "description": "Name of the host to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -34650,50 +48967,54 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{forwardingRule}", + // "path": "{project}/zones/{zone}/hosts/{host}", // "response": { - // "$ref": "Operation" + // "$ref": "Host" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.globalForwardingRules.get": +// method id "compute.hosts.getIamPolicy": -type GlobalForwardingRulesGetCall struct { - s *Service - project string - forwardingRule string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HostsGetIamPolicyCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified GlobalForwardingRule resource. Get a list -// of available forwarding rules by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/get -func (r *GlobalForwardingRulesService) Get(project string, forwardingRule string) *GlobalForwardingRulesGetCall { - c := &GlobalForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *HostsService) GetIamPolicy(project string, zone string, resource string) *HostsGetIamPolicyCall { + c := &HostsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule + c.zone = zone + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesGetCall { +func (c *HostsGetIamPolicyCall) Fields(s ...googleapi.Field) *HostsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -34703,7 +49024,7 @@ func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwa // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalForwardingRulesGetCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesGetCall { +func (c *HostsGetIamPolicyCall) IfNoneMatch(entityTag string) *HostsGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -34711,21 +49032,21 @@ func (c *GlobalForwardingRulesGetCall) IfNoneMatch(entityTag string) *GlobalForw // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesGetCall) Context(ctx context.Context) *GlobalForwardingRulesGetCall { +func (c *HostsGetIamPolicyCall) Context(ctx context.Context) *HostsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesGetCall) Header() http.Header { +func (c *HostsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HostsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -34736,25 +49057,26 @@ func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/hosts/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.get" call. -// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ForwardingRule.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { +// Do executes the "compute.hosts.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *HostsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34773,7 +49095,7 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRule{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -34785,32 +49107,40 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar } return ret, nil // { - // "description": "Returns the specified GlobalForwardingRule resource. Get a list of available forwarding rules by making a list() request.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.globalForwardingRules.get", + // "id": "compute.hosts.getIamPolicy", // "parameterOrder": [ // "project", - // "forwardingRule" + // "zone", + // "resource" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to return.", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{forwardingRule}", + // "path": "{project}/zones/{zone}/hosts/{resource}/getIamPolicy", // "response": { - // "$ref": "ForwardingRule" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -34821,24 +49151,25 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar } -// method id "compute.globalForwardingRules.insert": +// method id "compute.hosts.insert": -type GlobalForwardingRulesInsertCall struct { - s *Service - project string - forwardingrule *ForwardingRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HostsInsertCall struct { + s *Service + project string + zone string + host *Host + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a GlobalForwardingRule resource in the specified -// project using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/insert -func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *ForwardingRule) *GlobalForwardingRulesInsertCall { - c := &GlobalForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a host resource in the specified project using the +// data included in the request. +func (r *HostsService) Insert(project string, zone string, host *Host) *HostsInsertCall { + c := &HostsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingrule = forwardingrule + c.zone = zone + c.host = host return c } @@ -34856,7 +49187,7 @@ func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *Fo // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesInsertCall) RequestId(requestId string) *GlobalForwardingRulesInsertCall { +func (c *HostsInsertCall) RequestId(requestId string) *HostsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -34864,7 +49195,7 @@ func (c *GlobalForwardingRulesInsertCall) RequestId(requestId string) *GlobalFor // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesInsertCall { +func (c *HostsInsertCall) Fields(s ...googleapi.Field) *HostsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -34872,51 +49203,52 @@ func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalFo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesInsertCall) Context(ctx context.Context) *GlobalForwardingRulesInsertCall { +func (c *HostsInsertCall) Context(ctx context.Context) *HostsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesInsertCall) Header() http.Header { +func (c *HostsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *HostsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.host) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/hosts") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.insert" call. +// Do executes the "compute.hosts.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HostsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34947,11 +49279,12 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request.", + // "description": "Creates a host resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.insert", + // "id": "compute.hosts.insert", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "project": { @@ -34965,11 +49298,18 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules", + // "path": "{project}/zones/{zone}/hosts", // "request": { - // "$ref": "ForwardingRule" + // "$ref": "Host" // }, // "response": { // "$ref": "Operation" @@ -34982,23 +49322,23 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.globalForwardingRules.list": +// method id "compute.hosts.list": -type GlobalForwardingRulesListCall struct { +type HostsListCall struct { s *Service project string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of GlobalForwardingRule resources available to -// the specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/list -func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRulesListCall { - c := &GlobalForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of hosts available to the specified project. +func (r *HostsService) List(project string, zone string) *HostsListCall { + c := &HostsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone return c } @@ -35028,7 +49368,7 @@ func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRul // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall { +func (c *HostsListCall) Filter(filter string) *HostsListCall { c.urlParams_.Set("filter", filter) return c } @@ -35039,7 +49379,7 @@ func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingR // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForwardingRulesListCall { +func (c *HostsListCall) MaxResults(maxResults int64) *HostsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -35056,7 +49396,7 @@ func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForw // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *GlobalForwardingRulesListCall) OrderBy(orderBy string) *GlobalForwardingRulesListCall { +func (c *HostsListCall) OrderBy(orderBy string) *HostsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -35064,7 +49404,7 @@ func (c *GlobalForwardingRulesListCall) OrderBy(orderBy string) *GlobalForwardin // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwardingRulesListCall { +func (c *HostsListCall) PageToken(pageToken string) *HostsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -35072,7 +49412,7 @@ func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesListCall { +func (c *HostsListCall) Fields(s ...googleapi.Field) *HostsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35082,7 +49422,7 @@ func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForw // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesListCall { +func (c *HostsListCall) IfNoneMatch(entityTag string) *HostsListCall { c.ifNoneMatch_ = entityTag return c } @@ -35090,21 +49430,21 @@ func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalFor // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesListCall) Context(ctx context.Context) *GlobalForwardingRulesListCall { +func (c *HostsListCall) Context(ctx context.Context) *HostsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesListCall) Header() http.Header { +func (c *HostsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { +func (c *HostsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -35115,24 +49455,25 @@ func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/hosts") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.list" call. -// Exactly one of *ForwardingRuleList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ForwardingRuleList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { +// Do executes the "compute.hosts.list" call. +// Exactly one of *HostList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *HostList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HostsListCall) Do(opts ...googleapi.CallOption) (*HostList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35151,7 +49492,7 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRuleList{ + ret := &HostList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -35163,11 +49504,12 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa } return ret, nil // { - // "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project.", + // "description": "Retrieves a list of hosts available to the specified project.", // "httpMethod": "GET", - // "id": "compute.globalForwardingRules.list", + // "id": "compute.hosts.list", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "filter": { @@ -35199,11 +49541,18 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules", + // "path": "{project}/zones/{zone}/hosts", // "response": { - // "$ref": "ForwardingRuleList" + // "$ref": "HostList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -35217,7 +49566,7 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *GlobalForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { +func (c *HostsListCall) Pages(ctx context.Context, f func(*HostList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -35235,53 +49584,192 @@ func (c *GlobalForwardingRulesListCall) Pages(ctx context.Context, f func(*Forwa } } -// method id "compute.globalForwardingRules.patch": +// method id "compute.hosts.setIamPolicy": -type GlobalForwardingRulesPatchCall struct { - s *Service - project string - forwardingRule string - forwardingrule *ForwardingRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HostsSetIamPolicyCall struct { + s *Service + project string + zone string + resource string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *HostsService) SetIamPolicy(project string, zone string, resource string, policy *Policy) *HostsSetIamPolicyCall { + c := &HostsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.policy = policy + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HostsSetIamPolicyCall) Fields(s ...googleapi.Field) *HostsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HostsSetIamPolicyCall) Context(ctx context.Context) *HostsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HostsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HostsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/hosts/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.hosts.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *HostsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.hosts.setIamPolicy", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/hosts/{resource}/setIamPolicy", + // "request": { + // "$ref": "Policy" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.hosts.testIamPermissions": + +type HostsTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified forwarding rule with the data included -// in the request. This method supports PATCH semantics and uses the -// JSON merge patch format and processing rules. Currently, you can only -// patch the network_tier field. -func (r *GlobalForwardingRulesService) Patch(project string, forwardingRule string, forwardingrule *ForwardingRule) *GlobalForwardingRulesPatchCall { - c := &GlobalForwardingRulesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *HostsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *HostsTestIamPermissionsCall { + c := &HostsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule - c.forwardingrule = forwardingrule - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesPatchCall) RequestId(requestId string) *GlobalForwardingRulesPatchCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesPatchCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesPatchCall { +func (c *HostsTestIamPermissionsCall) Fields(s ...googleapi.Field) *HostsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35289,52 +49777,53 @@ func (c *GlobalForwardingRulesPatchCall) Fields(s ...googleapi.Field) *GlobalFor // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesPatchCall) Context(ctx context.Context) *GlobalForwardingRulesPatchCall { +func (c *HostsTestIamPermissionsCall) Context(ctx context.Context) *HostsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesPatchCall) Header() http.Header { +func (c *HostsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *HostsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/hosts/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.hosts.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HostsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35353,7 +49842,7 @@ func (c *GlobalForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -35365,40 +49854,43 @@ func (c *GlobalForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Updates the specified forwarding rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules. Currently, you can only patch the network_tier field.", - // "httpMethod": "PATCH", - // "id": "compute.globalForwardingRules.patch", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.hosts.testIamPermissions", // "parameterOrder": [ // "project", - // "forwardingRule" + // "zone", + // "resource" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to patch.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{forwardingRule}", + // "path": "{project}/zones/{zone}/hosts/{resource}/testIamPermissions", // "request": { - // "$ref": "ForwardingRule" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -35409,32 +49901,49 @@ func (c *GlobalForwardingRulesPatchCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.globalForwardingRules.setLabels": +// method id "compute.httpHealthChecks.delete": -type GlobalForwardingRulesSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksDeleteCall struct { + s *Service + project string + httpHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on the specified resource. To learn more -// about labels, read the Labeling Resources documentation. -func (r *GlobalForwardingRulesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalForwardingRulesSetLabelsCall { - c := &GlobalForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HttpHealthCheck resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/delete +func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { + c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest + c.httpHealthCheck = httpHealthCheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpHealthChecksDeleteCall) RequestId(requestId string) *HttpHealthChecksDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetLabelsCall { +func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35442,52 +49951,47 @@ func (c *GlobalForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *Globa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesSetLabelsCall) Context(ctx context.Context) *GlobalForwardingRulesSetLabelsCall { +func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesSetLabelsCall) Header() http.Header { +func (c *HttpHealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.setLabels" call. +// Do executes the "compute.httpHealthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35518,14 +50022,21 @@ func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.setLabels", + // "description": "Deletes the specified HttpHealthCheck resource.", + // "httpMethod": "DELETE", + // "id": "compute.httpHealthChecks.delete", // "parameterOrder": [ // "project", - // "resource" + // "httpHealthCheck" // ], // "parameters": { + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -35533,18 +50044,13 @@ func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (* // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{resource}/setLabels", - // "request": { - // "$ref": "GlobalSetLabelsRequest" - // }, + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "response": { // "$ref": "Operation" // }, @@ -35556,105 +50062,93 @@ func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.globalForwardingRules.setTarget": +// method id "compute.httpHealthChecks.get": -type GlobalForwardingRulesSetTargetCall struct { +type HttpHealthChecksGetCall struct { s *Service project string - forwardingRule string - targetreference *TargetReference + httpHealthCheck string urlParams_ gensupport.URLParams + ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// SetTarget: Changes target URL for the GlobalForwardingRule resource. -// The new target should be of the same type as the old target. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/setTarget -func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall { - c := &GlobalForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HttpHealthCheck resource. Get a list of +// available HTTP health checks by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/get +func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { + c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule - c.targetreference = targetreference - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesSetTargetCall) RequestId(requestId string) *GlobalForwardingRulesSetTargetCall { - c.urlParams_.Set("requestId", requestId) + c.httpHealthCheck = httpHealthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetTargetCall { +func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthChecksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesSetTargetCall) Context(ctx context.Context) *GlobalForwardingRulesSetTargetCall { +func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesSetTargetCall) Header() http.Header { +func (c *HttpHealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}/setTarget") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.setTarget" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.httpHealthChecks.get" call. +// Exactly one of *HttpHealthCheck or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *HttpHealthCheck.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35673,7 +50167,7 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &HttpHealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -35685,16 +50179,16 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target.", - // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.setTarget", + // "description": "Returns the specified HttpHealthCheck resource. Get a list of available HTTP health checks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.httpHealthChecks.get", // "parameterOrder": [ // "project", - // "forwardingRule" + // "httpHealthCheck" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource in which target is to be set.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -35706,54 +50200,65 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget", - // "request": { - // "$ref": "TargetReference" - // }, + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "response": { - // "$ref": "Operation" + // "$ref": "HttpHealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.globalForwardingRules.testIamPermissions": +// method id "compute.httpHealthChecks.insert": -type GlobalForwardingRulesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksInsertCall struct { + s *Service + project string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *GlobalForwardingRulesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *GlobalForwardingRulesTestIamPermissionsCall { - c := &GlobalForwardingRulesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HttpHealthCheck resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/insert +func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { + c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.httphealthcheck = httphealthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpHealthChecksInsertCall) RequestId(requestId string) *HttpHealthChecksInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesTestIamPermissionsCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesTestIamPermissionsCall { +func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35761,52 +50266,51 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) Fields(s ...googleapi.Fiel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesTestIamPermissionsCall) Context(ctx context.Context) *GlobalForwardingRulesTestIamPermissionsCall { +func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesTestIamPermissionsCall) Header() http.Header { +func (c *HttpHealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.httpHealthChecks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35825,7 +50329,7 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallO if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -35837,12 +50341,11 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.testIamPermissions", + // "id": "compute.httpHealthChecks.insert", // "parameterOrder": [ - // "project", - // "resource" + // "project" // ], // "parameters": { // "project": { @@ -35852,33 +50355,30 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallO // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{resource}/testIamPermissions", + // "path": "{project}/global/httpHealthChecks", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "HttpHealthCheck" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.globalOperations.aggregatedList": +// method id "compute.httpHealthChecks.list": -type GlobalOperationsAggregatedListCall struct { +type HttpHealthChecksListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -35887,10 +50387,11 @@ type GlobalOperationsAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of all operations. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/aggregatedList -func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall { - c := &GlobalOperationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of HttpHealthCheck resources available to +// the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/list +func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { + c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -35921,7 +50422,7 @@ func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperatio // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { +func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { c.urlParams_.Set("filter", filter) return c } @@ -35932,7 +50433,7 @@ func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperat // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *GlobalOperationsAggregatedListCall { +func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -35949,7 +50450,7 @@ func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *Globa // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *GlobalOperationsAggregatedListCall) OrderBy(orderBy string) *GlobalOperationsAggregatedListCall { +func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -35957,7 +50458,7 @@ func (c *GlobalOperationsAggregatedListCall) OrderBy(orderBy string) *GlobalOper // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *GlobalOperationsAggregatedListCall { +func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -35965,7 +50466,7 @@ func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *Global // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *GlobalOperationsAggregatedListCall { +func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35975,7 +50476,7 @@ func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *Globa // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalOperationsAggregatedListCall) IfNoneMatch(entityTag string) *GlobalOperationsAggregatedListCall { +func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -35983,21 +50484,21 @@ func (c *GlobalOperationsAggregatedListCall) IfNoneMatch(entityTag string) *Glob // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsAggregatedListCall) Context(ctx context.Context) *GlobalOperationsAggregatedListCall { +func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsAggregatedListCall) Header() http.Header { +func (c *HttpHealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -36008,7 +50509,7 @@ func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Respon } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -36018,14 +50519,14 @@ func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Respon return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.aggregatedList" call. -// Exactly one of *OperationAggregatedList or error will be non-nil. Any +// Do executes the "compute.httpHealthChecks.list" call. +// Exactly one of *HttpHealthCheckList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *OperationAggregatedList.ServerResponse.Header or (if a response was +// *HttpHealthCheckList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*OperationAggregatedList, error) { +func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36044,7 +50545,7 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &OperationAggregatedList{ + ret := &HttpHealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -36056,9 +50557,9 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Retrieves an aggregated list of all operations.", + // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.globalOperations.aggregatedList", + // "id": "compute.httpHealthChecks.list", // "parameterOrder": [ // "project" // ], @@ -36094,9 +50595,9 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // } // }, - // "path": "{project}/aggregated/operations", + // "path": "{project}/global/httpHealthChecks", // "response": { - // "$ref": "OperationAggregatedList" + // "$ref": "HttpHealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -36110,7 +50611,7 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(*OperationAggregatedList) error) error { +func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -36128,30 +50629,53 @@ func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(* } } -// method id "compute.globalOperations.delete": +// method id "compute.httpHealthChecks.patch": -type GlobalOperationsDeleteCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksPatchCall struct { + s *Service + project string + httpHealthCheck string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a HttpHealthCheck resource in the specified project +// using the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/patch +func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { + c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck + return c } -// Delete: Deletes the specified Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/delete -func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { - c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.operation = operation +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpHealthChecksPatchCall) RequestId(requestId string) *HttpHealthChecksPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperationsDeleteCall { +func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -36159,62 +50683,92 @@ func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperati // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsDeleteCall) Context(ctx context.Context) *GlobalOperationsDeleteCall { +func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsDeleteCall) Header() http.Header { +func (c *HttpHealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "operation": c.operation, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.delete" call. -func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "compute.httpHealthChecks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return nil, err } - return nil + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Deletes the specified Operations resource.", - // "httpMethod": "DELETE", - // "id": "compute.globalOperations.delete", + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.httpHealthChecks.patch", // "parameterOrder": [ // "project", - // "operation" + // "httpHealthCheck" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to delete.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -36226,9 +50780,20 @@ func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/operations/{operation}", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "request": { + // "$ref": "HttpHealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute" @@ -36237,93 +50802,85 @@ func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { } -// method id "compute.globalOperations.get": +// method id "compute.httpHealthChecks.testIamPermissions": -type GlobalOperationsGetCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HttpHealthChecksTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Retrieves the specified Operations resource. Get a list of -// operations by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/get -func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { - c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *HttpHealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HttpHealthChecksTestIamPermissionsCall { + c := &HttpHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.operation = operation + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOperationsGetCall { +func (c *HttpHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HttpHealthChecksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsGetCall) Context(ctx context.Context) *GlobalOperationsGetCall { +func (c *HttpHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HttpHealthChecksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsGetCall) Header() http.Header { +func (c *HttpHealthChecksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "operation": c.operation, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.get" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.httpHealthChecks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36342,7 +50899,7 @@ func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -36354,32 +50911,35 @@ func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Retrieves the specified Operations resource. Get a list of operations by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.globalOperations.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.httpHealthChecks.testIamPermissions", // "parameterOrder": [ // "project", - // "operation" + // "resource" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/operations/{operation}", + // "path": "{project}/global/httpHealthChecks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -36390,157 +50950,105 @@ func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.globalOperations.list": +// method id "compute.httpHealthChecks.update": -type GlobalOperationsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HttpHealthChecksUpdateCall struct { + s *Service + project string + httpHealthCheck string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of Operation resources contained within the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/list -func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall { - c := &GlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HttpHealthCheck resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/update +func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { + c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperationsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *GlobalOperationsListCall) OrderBy(orderBy string) *GlobalOperationsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperationsListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpHealthChecksUpdateCall) RequestId(requestId string) *HttpHealthChecksUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperationsListCall { +func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsListCall) Context(ctx context.Context) *GlobalOperationsListCall { +func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsListCall) Header() http.Header { +func (c *HttpHealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.list" call. -// Exactly one of *OperationList or error will be non-nil. Any non-2xx +// Do executes the "compute.httpHealthChecks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *OperationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36559,7 +51067,7 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &OperationList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -36571,34 +51079,19 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL } return ret, nil // { - // "description": "Retrieves a list of Operation resources contained within the specified project.", - // "httpMethod": "GET", - // "id": "compute.globalOperations.list", + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.httpHealthChecks.update", // "parameterOrder": [ - // "project" + // "project", + // "httpHealthCheck" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -36607,58 +51100,44 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/operations", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "request": { + // "$ref": "HttpHealthCheck" + // }, // "response": { - // "$ref": "OperationList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.healthChecks.delete": +// method id "compute.httpsHealthChecks.delete": -type HealthChecksDeleteCall struct { - s *Service - project string - healthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksDeleteCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HealthCheck resource. -func (r *HealthChecksService) Delete(project string, healthCheck string) *HealthChecksDeleteCall { - c := &HealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HttpsHealthCheck resource. +func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { + c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck + c.httpsHealthCheck = httpsHealthCheck return c } @@ -36676,7 +51155,7 @@ func (r *HealthChecksService) Delete(project string, healthCheck string) *Health // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDeleteCall { +func (c *HttpsHealthChecksDeleteCall) RequestId(requestId string) *HttpsHealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -36684,7 +51163,7 @@ func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDelete // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDeleteCall { +func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -36692,21 +51171,21 @@ func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDelet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksDeleteCall) Context(ctx context.Context) *HealthChecksDeleteCall { +func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksDeleteCall) Header() http.Header { +func (c *HttpsHealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -36714,25 +51193,25 @@ func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.delete" call. +// Do executes the "compute.httpsHealthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36763,16 +51242,16 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Deletes the specified HealthCheck resource.", + // "description": "Deletes the specified HttpsHealthCheck resource.", // "httpMethod": "DELETE", - // "id": "compute.healthChecks.delete", + // "id": "compute.httpsHealthChecks.delete", // "parameterOrder": [ // "project", - // "healthCheck" + // "httpsHealthCheck" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to delete.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -36791,43 +51270,43 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.healthChecks.get": - -type HealthChecksGetCall struct { - s *Service - project string - healthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header + // ] + // } + } -// Get: Returns the specified HealthCheck resource. Get a list of -// available health checks by making a list() request. -func (r *HealthChecksService) Get(project string, healthCheck string) *HealthChecksGetCall { - c := &HealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// method id "compute.httpsHealthChecks.get": + +type HttpsHealthChecksGetCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified HttpsHealthCheck resource. Get a list of +// available HTTPS health checks by making a list() request. +func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { + c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck + c.httpsHealthCheck = httpsHealthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall { +func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -36837,7 +51316,7 @@ func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall { +func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChecksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -36845,21 +51324,21 @@ func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksGetCall) Context(ctx context.Context) *HealthChecksGetCall { +func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksGetCall) Header() http.Header { +func (c *HttpsHealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -36870,25 +51349,25 @@ func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.get" call. -// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HealthCheck.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { +// Do executes the "compute.httpsHealthChecks.get" call. +// Exactly one of *HttpsHealthCheck or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpsHealthCheck.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36907,7 +51386,7 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthCheck{ + ret := &HttpsHealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -36919,16 +51398,16 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er } return ret, nil // { - // "description": "Returns the specified HealthCheck resource. Get a list of available health checks by making a list() request.", + // "description": "Returns the specified HttpsHealthCheck resource. Get a list of available HTTPS health checks by making a list() request.", // "httpMethod": "GET", - // "id": "compute.healthChecks.get", + // "id": "compute.httpsHealthChecks.get", // "parameterOrder": [ // "project", - // "healthCheck" + // "httpsHealthCheck" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to return.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -36942,9 +51421,9 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "response": { - // "$ref": "HealthCheck" + // "$ref": "HttpsHealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -36955,23 +51434,23 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er } -// method id "compute.healthChecks.insert": +// method id "compute.httpsHealthChecks.insert": -type HealthChecksInsertCall struct { - s *Service - project string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksInsertCall struct { + s *Service + project string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HealthCheck resource in the specified project using -// the data included in the request. -func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) *HealthChecksInsertCall { - c := &HealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HttpsHealthCheck resource in the specified project +// using the data included in the request. +func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { + c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthcheck = healthcheck + c.httpshealthcheck = httpshealthcheck return c } @@ -36989,7 +51468,7 @@ func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsertCall { +func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthChecksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -36997,7 +51476,7 @@ func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsert // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInsertCall { +func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -37005,34 +51484,34 @@ func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInser // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksInsertCall) Context(ctx context.Context) *HealthChecksInsertCall { +func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksInsertCall) Header() http.Header { +func (c *HttpsHealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -37042,14 +51521,14 @@ func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.insert" call. +// Do executes the "compute.httpsHealthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37080,9 +51559,9 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.healthChecks.insert", + // "id": "compute.httpsHealthChecks.insert", // "parameterOrder": [ // "project" // ], @@ -37100,9 +51579,9 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/global/healthChecks", + // "path": "{project}/global/httpsHealthChecks", // "request": { - // "$ref": "HealthCheck" + // "$ref": "HttpsHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -37115,9 +51594,9 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.healthChecks.list": +// method id "compute.httpsHealthChecks.list": -type HealthChecksListCall struct { +type HttpsHealthChecksListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -37126,10 +51605,10 @@ type HealthChecksListCall struct { header_ http.Header } -// List: Retrieves the list of HealthCheck resources available to the -// specified project. -func (r *HealthChecksService) List(project string) *HealthChecksListCall { - c := &HealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of HttpsHealthCheck resources available to +// the specified project. +func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { + c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -37160,7 +51639,7 @@ func (r *HealthChecksService) List(project string) *HealthChecksListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { +func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { c.urlParams_.Set("filter", filter) return c } @@ -37171,7 +51650,7 @@ func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCall { +func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -37188,7 +51667,7 @@ func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCal // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { +func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -37196,7 +51675,7 @@ func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall { +func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -37204,7 +51683,7 @@ func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCall { +func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -37214,7 +51693,7 @@ func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCall { +func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -37222,21 +51701,21 @@ func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksListCall) Context(ctx context.Context) *HealthChecksListCall { +func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksListCall) Header() http.Header { +func (c *HttpsHealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -37247,7 +51726,7 @@ func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -37257,14 +51736,14 @@ func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.list" call. -// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HealthCheckList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.httpsHealthChecks.list" call. +// Exactly one of *HttpsHealthCheckList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpsHealthCheckList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { +func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37283,7 +51762,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthCheckList{ + ret := &HttpsHealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -37295,9 +51774,9 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis } return ret, nil // { - // "description": "Retrieves the list of HealthCheck resources available to the specified project.", + // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.healthChecks.list", + // "id": "compute.httpsHealthChecks.list", // "parameterOrder": [ // "project" // ], @@ -37333,9 +51812,9 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // "type": "string" // } // }, - // "path": "{project}/global/healthChecks", + // "path": "{project}/global/httpsHealthChecks", // "response": { - // "$ref": "HealthCheckList" + // "$ref": "HttpsHealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -37349,7 +51828,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { +func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -37367,346 +51846,26 @@ func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckLis } } -// method id "compute.healthChecks.patch": - -type HealthChecksPatchCall struct { - s *Service - project string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates a HealthCheck resource in the specified project using -// the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. -func (r *HealthChecksService) Patch(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksPatchCall { - c := &HealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.healthCheck = healthCheck - c.healthcheck = healthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HealthChecksPatchCall) Context(ctx context.Context) *HealthChecksPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HealthChecksPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.healthChecks.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.healthChecks.patch", - // "parameterOrder": [ - // "project", - // "healthCheck" - // ], - // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/healthChecks/{healthCheck}", - // "request": { - // "$ref": "HealthCheck" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.healthChecks.testIamPermissions": - -type HealthChecksTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *HealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HealthChecksTestIamPermissionsCall { - c := &HealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HealthChecksTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HealthChecksTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HealthChecksTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.healthChecks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.healthChecks.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/healthChecks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.healthChecks.update": +// method id "compute.httpsHealthChecks.patch": -type HealthChecksUpdateCall struct { - s *Service - project string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksPatchCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a HealthCheck resource in the specified project using -// the data included in the request. -func (r *HealthChecksService) Update(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksUpdateCall { - c := &HealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { + c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck - c.healthcheck = healthcheck + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck return c } @@ -37724,7 +51883,7 @@ func (r *HealthChecksService) Update(project string, healthCheck string, healthc // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksUpdateCall) RequestId(requestId string) *HealthChecksUpdateCall { +func (c *HttpsHealthChecksPatchCall) RequestId(requestId string) *HttpsHealthChecksPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -37732,7 +51891,7 @@ func (c *HealthChecksUpdateCall) RequestId(requestId string) *HealthChecksUpdate // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksUpdateCall) Fields(s ...googleapi.Field) *HealthChecksUpdateCall { +func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -37740,52 +51899,52 @@ func (c *HealthChecksUpdateCall) Fields(s ...googleapi.Field) *HealthChecksUpdat // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksUpdateCall) Context(ctx context.Context) *HealthChecksUpdateCall { +func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksUpdateCall) Header() http.Header { +func (c *HttpsHealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.update" call. +// Do executes the "compute.httpsHealthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37816,16 +51975,16 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.healthChecks.update", + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.httpsHealthChecks.patch", // "parameterOrder": [ // "project", - // "healthCheck" + // "httpsHealthCheck" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to update.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -37844,9 +52003,9 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "request": { - // "$ref": "HealthCheck" + // "$ref": "HttpsHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -37859,155 +52018,85 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.hostTypes.aggregatedList": +// method id "compute.httpsHealthChecks.testIamPermissions": -type HostTypesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of host types. -func (r *HostTypesService) AggregatedList(project string) *HostTypesAggregatedListCall { - c := &HostTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *HttpsHealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HttpsHealthChecksTestIamPermissionsCall { + c := &HttpsHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *HostTypesAggregatedListCall) Filter(filter string) *HostTypesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *HostTypesAggregatedListCall) MaxResults(maxResults int64) *HostTypesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *HostTypesAggregatedListCall) OrderBy(orderBy string) *HostTypesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *HostTypesAggregatedListCall) PageToken(pageToken string) *HostTypesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HostTypesAggregatedListCall) Fields(s ...googleapi.Field) *HostTypesAggregatedListCall { +func (c *HttpsHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HttpsHealthChecksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HostTypesAggregatedListCall) IfNoneMatch(entityTag string) *HostTypesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HostTypesAggregatedListCall) Context(ctx context.Context) *HostTypesAggregatedListCall { +func (c *HttpsHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HttpsHealthChecksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HostTypesAggregatedListCall) Header() http.Header { +func (c *HttpsHealthChecksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HostTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/hostTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.hostTypes.aggregatedList" call. -// Exactly one of *HostTypeAggregatedList or error will be non-nil. Any +// Do executes the "compute.httpsHealthChecks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *HostTypeAggregatedList.ServerResponse.Header or (if a response was +// *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HostTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*HostTypeAggregatedList, error) { +func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38026,7 +52115,7 @@ func (c *HostTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*HostTyp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HostTypeAggregatedList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38038,47 +52127,35 @@ func (c *HostTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*HostTyp } return ret, nil // { - // "description": "Retrieves an aggregated list of host types.", - // "httpMethod": "GET", - // "id": "compute.hostTypes.aggregatedList", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.httpsHealthChecks.testIamPermissions", // "parameterOrder": [ - // "project" + // "project", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/hostTypes", + // "path": "{project}/global/httpsHealthChecks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "HostTypeAggregatedList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -38089,116 +52166,104 @@ func (c *HostTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*HostTyp } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *HostTypesAggregatedListCall) Pages(ctx context.Context, f func(*HostTypeAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.hostTypes.get": +// method id "compute.httpsHealthChecks.update": -type HostTypesGetCall struct { - s *Service - project string - zone string - hostType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksUpdateCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified host type. Get a list of available host -// types by making a list() request. -func (r *HostTypesService) Get(project string, zone string, hostType string) *HostTypesGetCall { - c := &HostTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. +func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { + c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.hostType = hostType + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpsHealthChecksUpdateCall) RequestId(requestId string) *HttpsHealthChecksUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HostTypesGetCall) Fields(s ...googleapi.Field) *HostTypesGetCall { +func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HostTypesGetCall) IfNoneMatch(entityTag string) *HostTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HostTypesGetCall) Context(ctx context.Context) *HostTypesGetCall { +func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HostTypesGetCall) Header() http.Header { +func (c *HttpsHealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HostTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/hostTypes/{hostType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "hostType": c.hostType, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.hostTypes.get" call. -// Exactly one of *HostType or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *HostType.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.httpsHealthChecks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HostTypesGetCall) Do(opts ...googleapi.CallOption) (*HostType, error) { +func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38217,7 +52282,7 @@ func (c *HostTypesGetCall) Do(opts ...googleapi.CallOption) (*HostType, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HostType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38229,17 +52294,16 @@ func (c *HostTypesGetCall) Do(opts ...googleapi.CallOption) (*HostType, error) { } return ret, nil // { - // "description": "Returns the specified host type. Get a list of available host types by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.hostTypes.get", + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.httpsHealthChecks.update", // "parameterOrder": [ // "project", - // "zone", - // "hostType" + // "httpsHealthCheck" // ], // "parameters": { - // "hostType": { - // "description": "Name of the host type to return.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -38252,180 +52316,118 @@ func (c *HostTypesGetCall) Do(opts ...googleapi.CallOption) (*HostType, error) { // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/hostTypes/{hostType}", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "request": { + // "$ref": "HttpsHealthCheck" + // }, // "response": { - // "$ref": "HostType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.hostTypes.list": +// method id "compute.images.delete": -type HostTypesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ImagesDeleteCall struct { + s *Service + project string + image string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of host types available to the specified -// project. -func (r *HostTypesService) List(project string, zone string) *HostTypesListCall { - c := &HostTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified image. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/delete +func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { + c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *HostTypesListCall) Filter(filter string) *HostTypesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *HostTypesListCall) MaxResults(maxResults int64) *HostTypesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.image = image return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *HostTypesListCall) OrderBy(orderBy string) *HostTypesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *HostTypesListCall) PageToken(pageToken string) *HostTypesListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ImagesDeleteCall) RequestId(requestId string) *ImagesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HostTypesListCall) Fields(s ...googleapi.Field) *HostTypesListCall { +func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HostTypesListCall) IfNoneMatch(entityTag string) *HostTypesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HostTypesListCall) Context(ctx context.Context) *HostTypesListCall { +func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HostTypesListCall) Header() http.Header { +func (c *ImagesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HostTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/hostTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.hostTypes.list" call. -// Exactly one of *HostTypeList or error will be non-nil. Any non-2xx +// Do executes the "compute.images.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *HostTypeList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HostTypesListCall) Do(opts ...googleapi.CallOption) (*HostTypeList, error) { +func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38444,7 +52446,7 @@ func (c *HostTypesListCall) Do(opts ...googleapi.CallOption) (*HostTypeList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HostTypeList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38456,35 +52458,19 @@ func (c *HostTypesListCall) Do(opts ...googleapi.CallOption) (*HostTypeList, err } return ret, nil // { - // "description": "Retrieves a list of host types available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.hostTypes.list", + // "description": "Deletes the specified image.", + // "httpMethod": "DELETE", + // "id": "compute.images.delete", // "parameterOrder": [ // "project", - // "zone" + // "image" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "image": { + // "description": "Name of the image resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -38494,197 +52480,125 @@ func (c *HostTypesListCall) Do(opts ...googleapi.CallOption) (*HostTypeList, err // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/hostTypes", + // "path": "{project}/global/images/{image}", // "response": { - // "$ref": "HostTypeList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *HostTypesListCall) Pages(ctx context.Context, f func(*HostTypeList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.hosts.aggregatedList": - -type HostsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} +// method id "compute.images.deprecate": -// AggregatedList: Retrieves an aggregated list of hosts. -func (r *HostsService) AggregatedList(project string) *HostsAggregatedListCall { - c := &HostsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c +type ImagesDeprecateCall struct { + s *Service + project string + image string + deprecationstatus *DeprecationStatus + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// Deprecate: Sets the deprecation status of an image. // -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *HostsAggregatedListCall) Filter(filter string) *HostsAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *HostsAggregatedListCall) MaxResults(maxResults int64) *HostsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// If an empty request body is given, clears the deprecation status +// instead. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/deprecate +func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { + c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.image = image + c.deprecationstatus = deprecationstatus return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *HostsAggregatedListCall) OrderBy(orderBy string) *HostsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *HostsAggregatedListCall) PageToken(pageToken string) *HostsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HostsAggregatedListCall) Fields(s ...googleapi.Field) *HostsAggregatedListCall { +func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HostsAggregatedListCall) IfNoneMatch(entityTag string) *HostsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HostsAggregatedListCall) Context(ctx context.Context) *HostsAggregatedListCall { +func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HostsAggregatedListCall) Header() http.Header { +func (c *ImagesDeprecateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HostsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/hosts") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.hosts.aggregatedList" call. -// Exactly one of *HostAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HostAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HostsAggregatedListCall) Do(opts ...googleapi.CallOption) (*HostAggregatedList, error) { +// Do executes the "compute.images.deprecate" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38703,7 +52617,7 @@ func (c *HostsAggregatedListCall) Do(opts ...googleapi.CallOption) (*HostAggrega if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HostAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38715,34 +52629,19 @@ func (c *HostsAggregatedListCall) Do(opts ...googleapi.CallOption) (*HostAggrega } return ret, nil // { - // "description": "Retrieves an aggregated list of hosts.", - // "httpMethod": "GET", - // "id": "compute.hosts.aggregatedList", + // "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", + // "httpMethod": "POST", + // "id": "compute.images.deprecate", // "parameterOrder": [ - // "project" + // "project", + // "image" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "image": { + // "description": "Image name.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -38751,135 +52650,115 @@ func (c *HostsAggregatedListCall) Do(opts ...googleapi.CallOption) (*HostAggrega // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/aggregated/hosts", + // "path": "{project}/global/images/{image}/deprecate", + // "request": { + // "$ref": "DeprecationStatus" + // }, // "response": { - // "$ref": "HostAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *HostsAggregatedListCall) Pages(ctx context.Context, f func(*HostAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.hosts.delete": +// method id "compute.images.get": -type HostsDeleteCall struct { - s *Service - project string - zone string - host string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesGetCall struct { + s *Service + project string + image string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Host resource. -func (r *HostsService) Delete(project string, zone string, host string) *HostsDeleteCall { - c := &HostsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified image. Get a list of available images by +// making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/get +func (r *ImagesService) Get(project string, image string) *ImagesGetCall { + c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.host = host - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HostsDeleteCall) RequestId(requestId string) *HostsDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.image = image return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HostsDeleteCall) Fields(s ...googleapi.Field) *HostsDeleteCall { +func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HostsDeleteCall) Context(ctx context.Context) *HostsDeleteCall { +func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HostsDeleteCall) Header() http.Header { +func (c *ImagesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HostsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/hosts/{host}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "host": c.host, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.hosts.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HostsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.images.get" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Image.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38898,7 +52777,7 @@ func (c *HostsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Image{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38910,17 +52789,16 @@ func (c *HostsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Deletes the specified Host resource.", - // "httpMethod": "DELETE", - // "id": "compute.hosts.delete", + // "description": "Returns the specified image. Get a list of available images by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.images.get", // "parameterOrder": [ // "project", - // "zone", - // "host" + // "image" // ], // "parameters": { - // "host": { - // "description": "Name of the Host resource to delete.", + // "image": { + // "description": "Name of the image resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -38932,59 +52810,46 @@ func (c *HostsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/hosts/{host}", + // "path": "{project}/global/images/{image}", // "response": { - // "$ref": "Operation" + // "$ref": "Image" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.hosts.get": +// method id "compute.images.getFromFamily": -type HostsGetCall struct { +type ImagesGetFromFamilyCall struct { s *Service project string - zone string - host string + family string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified host. Get a list of available hosts by -// making a list() request. -func (r *HostsService) Get(project string, zone string, host string) *HostsGetCall { - c := &HostsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetFromFamily: Returns the latest image that is part of an image +// family and is not deprecated. +func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { + c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.host = host + c.family = family return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HostsGetCall) Fields(s ...googleapi.Field) *HostsGetCall { +func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFamilyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -38994,7 +52859,7 @@ func (c *HostsGetCall) Fields(s ...googleapi.Field) *HostsGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HostsGetCall) IfNoneMatch(entityTag string) *HostsGetCall { +func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFamilyCall { c.ifNoneMatch_ = entityTag return c } @@ -39002,21 +52867,21 @@ func (c *HostsGetCall) IfNoneMatch(entityTag string) *HostsGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HostsGetCall) Context(ctx context.Context) *HostsGetCall { +func (c *ImagesGetFromFamilyCall) Context(ctx context.Context) *ImagesGetFromFamilyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HostsGetCall) Header() http.Header { +func (c *ImagesGetFromFamilyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HostsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -39027,26 +52892,25 @@ func (c *HostsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/hosts/{host}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "host": c.host, + "family": c.family, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.hosts.get" call. -// Exactly one of *Host or error will be non-nil. Any non-2xx status +// Do executes the "compute.images.getFromFamily" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Host.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *HostsGetCall) Do(opts ...googleapi.CallOption) (*Host, error) { +// *Image.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39065,7 +52929,7 @@ func (c *HostsGetCall) Do(opts ...googleapi.CallOption) (*Host, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Host{ + ret := &Image{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -39077,17 +52941,16 @@ func (c *HostsGetCall) Do(opts ...googleapi.CallOption) (*Host, error) { } return ret, nil // { - // "description": "Returns the specified host. Get a list of available hosts by making a list() request.", + // "description": "Returns the latest image that is part of an image family and is not deprecated.", // "httpMethod": "GET", - // "id": "compute.hosts.get", + // "id": "compute.images.getFromFamily", // "parameterOrder": [ // "project", - // "zone", - // "host" + // "family" // ], // "parameters": { - // "host": { - // "description": "Name of the host to return.", + // "family": { + // "description": "Name of the image family to search for.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -39099,18 +52962,11 @@ func (c *HostsGetCall) Do(opts ...googleapi.CallOption) (*Host, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/hosts/{host}", + // "path": "{project}/global/images/family/{family}", // "response": { - // "$ref": "Host" + // "$ref": "Image" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -39121,12 +52977,11 @@ func (c *HostsGetCall) Do(opts ...googleapi.CallOption) (*Host, error) { } -// method id "compute.hosts.getIamPolicy": +// method id "compute.images.getIamPolicy": -type HostsGetIamPolicyCall struct { +type ImagesGetIamPolicyCall struct { s *Service project string - zone string resource string urlParams_ gensupport.URLParams ifNoneMatch_ string @@ -39136,10 +52991,9 @@ type HostsGetIamPolicyCall struct { // GetIamPolicy: Gets the access control policy for a resource. May be // empty if no such policy or resource exists. -func (r *HostsService) GetIamPolicy(project string, zone string, resource string) *HostsGetIamPolicyCall { - c := &HostsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ImagesService) GetIamPolicy(project string, resource string) *ImagesGetIamPolicyCall { + c := &ImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone c.resource = resource return c } @@ -39147,7 +53001,7 @@ func (r *HostsService) GetIamPolicy(project string, zone string, resource string // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HostsGetIamPolicyCall) Fields(s ...googleapi.Field) *HostsGetIamPolicyCall { +func (c *ImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39157,7 +53011,7 @@ func (c *HostsGetIamPolicyCall) Fields(s ...googleapi.Field) *HostsGetIamPolicyC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HostsGetIamPolicyCall) IfNoneMatch(entityTag string) *HostsGetIamPolicyCall { +func (c *ImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *ImagesGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -39165,21 +53019,21 @@ func (c *HostsGetIamPolicyCall) IfNoneMatch(entityTag string) *HostsGetIamPolicy // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HostsGetIamPolicyCall) Context(ctx context.Context) *HostsGetIamPolicyCall { +func (c *ImagesGetIamPolicyCall) Context(ctx context.Context) *ImagesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HostsGetIamPolicyCall) Header() http.Header { +func (c *ImagesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HostsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -39190,26 +53044,25 @@ func (c *HostsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/hosts/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.hosts.getIamPolicy" call. +// Do executes the "compute.images.getIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *HostsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39242,10 +53095,9 @@ func (c *HostsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error // { // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.hosts.getIamPolicy", + // "id": "compute.images.getIamPolicy", // "parameterOrder": [ // "project", - // "zone", // "resource" // ], // "parameters": { @@ -39259,19 +53111,12 @@ func (c *HostsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/hosts/{resource}/getIamPolicy", + // "path": "{project}/global/images/{resource}/getIamPolicy", // "response": { // "$ref": "Policy" // }, @@ -39284,25 +53129,31 @@ func (c *HostsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } -// method id "compute.hosts.insert": +// method id "compute.images.insert": -type HostsInsertCall struct { +type ImagesInsertCall struct { s *Service project string - zone string - host *Host + image *Image urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a host resource in the specified project using the -// data included in the request. -func (r *HostsService) Insert(project string, zone string, host *Host) *HostsInsertCall { - c := &HostsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an image in the specified project using the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/insert +func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { + c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.host = host + c.image = image + return c +} + +// ForceCreate sets the optional parameter "forceCreate": Force image +// creation if true. +func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { + c.urlParams_.Set("forceCreate", fmt.Sprint(forceCreate)) return c } @@ -39320,7 +53171,7 @@ func (r *HostsService) Insert(project string, zone string, host *Host) *HostsIns // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HostsInsertCall) RequestId(requestId string) *HostsInsertCall { +func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -39328,7 +53179,7 @@ func (c *HostsInsertCall) RequestId(requestId string) *HostsInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HostsInsertCall) Fields(s ...googleapi.Field) *HostsInsertCall { +func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39336,52 +53187,51 @@ func (c *HostsInsertCall) Fields(s ...googleapi.Field) *HostsInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HostsInsertCall) Context(ctx context.Context) *HostsInsertCall { +func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HostsInsertCall) Header() http.Header { +func (c *ImagesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HostsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.host) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/hosts") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.hosts.insert" call. +// Do executes the "compute.images.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HostsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39412,14 +53262,18 @@ func (c *HostsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Creates a host resource in the specified project using the data included in the request.", + // "description": "Creates an image in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.hosts.insert", + // "id": "compute.images.insert", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { + // "forceCreate": { + // "description": "Force image creation if true.", + // "location": "query", + // "type": "boolean" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -39431,47 +53285,48 @@ func (c *HostsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/hosts", + // "path": "{project}/global/images", // "request": { - // "$ref": "Host" + // "$ref": "Image" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "compute.hosts.list": +// method id "compute.images.list": -type HostsListCall struct { +type ImagesListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of hosts available to the specified project. -func (r *HostsService) List(project string, zone string) *HostsListCall { - c := &HostsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of private images available to the specified +// project. Private images are images you create that belong to your +// project. This method does not get any images that belong to other +// projects, including publicly-available images, like Debian 8. If you +// want to get a list of publicly-available images, use this method to +// make a request to the respective image project, such as debian-cloud +// or windows-cloud. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/list +func (r *ImagesService) List(project string) *ImagesListCall { + c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -39501,7 +53356,7 @@ func (r *HostsService) List(project string, zone string) *HostsListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *HostsListCall) Filter(filter string) *HostsListCall { +func (c *ImagesListCall) Filter(filter string) *ImagesListCall { c.urlParams_.Set("filter", filter) return c } @@ -39512,7 +53367,7 @@ func (c *HostsListCall) Filter(filter string) *HostsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *HostsListCall) MaxResults(maxResults int64) *HostsListCall { +func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -39529,7 +53384,7 @@ func (c *HostsListCall) MaxResults(maxResults int64) *HostsListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *HostsListCall) OrderBy(orderBy string) *HostsListCall { +func (c *ImagesListCall) OrderBy(orderBy string) *ImagesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -39537,7 +53392,7 @@ func (c *HostsListCall) OrderBy(orderBy string) *HostsListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *HostsListCall) PageToken(pageToken string) *HostsListCall { +func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -39545,7 +53400,7 @@ func (c *HostsListCall) PageToken(pageToken string) *HostsListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HostsListCall) Fields(s ...googleapi.Field) *HostsListCall { +func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39555,7 +53410,7 @@ func (c *HostsListCall) Fields(s ...googleapi.Field) *HostsListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HostsListCall) IfNoneMatch(entityTag string) *HostsListCall { +func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { c.ifNoneMatch_ = entityTag return c } @@ -39563,21 +53418,21 @@ func (c *HostsListCall) IfNoneMatch(entityTag string) *HostsListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HostsListCall) Context(ctx context.Context) *HostsListCall { +func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HostsListCall) Header() http.Header { +func (c *ImagesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HostsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -39588,25 +53443,24 @@ func (c *HostsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/hosts") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.hosts.list" call. -// Exactly one of *HostList or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *HostList.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.images.list" call. +// Exactly one of *ImageList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ImageList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HostsListCall) Do(opts ...googleapi.CallOption) (*HostList, error) { +func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39625,7 +53479,7 @@ func (c *HostsListCall) Do(opts ...googleapi.CallOption) (*HostList, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HostList{ + ret := &ImageList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -39637,12 +53491,11 @@ func (c *HostsListCall) Do(opts ...googleapi.CallOption) (*HostList, error) { } return ret, nil // { - // "description": "Retrieves a list of hosts available to the specified project.", + // "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", // "httpMethod": "GET", - // "id": "compute.hosts.list", + // "id": "compute.images.list", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { @@ -39674,18 +53527,11 @@ func (c *HostsListCall) Do(opts ...googleapi.CallOption) (*HostList, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/hosts", + // "path": "{project}/global/images", // "response": { - // "$ref": "HostList" + // "$ref": "ImageList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -39699,7 +53545,7 @@ func (c *HostsListCall) Do(opts ...googleapi.CallOption) (*HostList, error) { // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *HostsListCall) Pages(ctx context.Context, f func(*HostList) error) error { +func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -39717,12 +53563,11 @@ func (c *HostsListCall) Pages(ctx context.Context, f func(*HostList) error) erro } } -// method id "compute.hosts.setIamPolicy": +// method id "compute.images.setIamPolicy": -type HostsSetIamPolicyCall struct { +type ImagesSetIamPolicyCall struct { s *Service project string - zone string resource string policy *Policy urlParams_ gensupport.URLParams @@ -39732,10 +53577,9 @@ type HostsSetIamPolicyCall struct { // SetIamPolicy: Sets the access control policy on the specified // resource. Replaces any existing policy. -func (r *HostsService) SetIamPolicy(project string, zone string, resource string, policy *Policy) *HostsSetIamPolicyCall { - c := &HostsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ImagesService) SetIamPolicy(project string, resource string, policy *Policy) *ImagesSetIamPolicyCall { + c := &ImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone c.resource = resource c.policy = policy return c @@ -39744,7 +53588,7 @@ func (r *HostsService) SetIamPolicy(project string, zone string, resource string // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HostsSetIamPolicyCall) Fields(s ...googleapi.Field) *HostsSetIamPolicyCall { +func (c *ImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39752,21 +53596,21 @@ func (c *HostsSetIamPolicyCall) Fields(s ...googleapi.Field) *HostsSetIamPolicyC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HostsSetIamPolicyCall) Context(ctx context.Context) *HostsSetIamPolicyCall { +func (c *ImagesSetIamPolicyCall) Context(ctx context.Context) *ImagesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HostsSetIamPolicyCall) Header() http.Header { +func (c *ImagesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HostsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -39779,26 +53623,25 @@ func (c *HostsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/hosts/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.hosts.setIamPolicy" call. +// Do executes the "compute.images.setIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *HostsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39831,10 +53674,9 @@ func (c *HostsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error // { // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", - // "id": "compute.hosts.setIamPolicy", + // "id": "compute.images.setIamPolicy", // "parameterOrder": [ // "project", - // "zone", // "resource" // ], // "parameters": { @@ -39848,19 +53690,12 @@ func (c *HostsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/hosts/{resource}/setIamPolicy", + // "path": "{project}/global/images/{resource}/setIamPolicy", // "request": { // "$ref": "Policy" // }, @@ -39875,34 +53710,32 @@ func (c *HostsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error } -// method id "compute.hosts.testIamPermissions": +// method id "compute.images.setLabels": -type HostsTestIamPermissionsCall struct { +type ImagesSetLabelsCall struct { s *Service project string - zone string resource string - testpermissionsrequest *TestPermissionsRequest + globalsetlabelsrequest *GlobalSetLabelsRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *HostsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *HostsTestIamPermissionsCall { - c := &HostsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on an image. To learn more about labels, +// read the Labeling Resources documentation. +func (r *ImagesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ImagesSetLabelsCall { + c := &ImagesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HostsTestIamPermissionsCall) Fields(s ...googleapi.Field) *HostsTestIamPermissionsCall { +func (c *ImagesSetLabelsCall) Fields(s ...googleapi.Field) *ImagesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39910,53 +53743,52 @@ func (c *HostsTestIamPermissionsCall) Fields(s ...googleapi.Field) *HostsTestIam // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HostsTestIamPermissionsCall) Context(ctx context.Context) *HostsTestIamPermissionsCall { +func (c *ImagesSetLabelsCall) Context(ctx context.Context) *ImagesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HostsTestIamPermissionsCall) Header() http.Header { +func (c *ImagesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HostsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/hosts/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.hosts.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HostsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.images.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39975,7 +53807,7 @@ func (c *HostsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -39987,12 +53819,11 @@ func (c *HostsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", - // "id": "compute.hosts.testIamPermissions", + // "id": "compute.images.setLabels", // "parameterOrder": [ // "project", - // "zone", // "resource" // ], // "parameters": { @@ -40006,77 +53837,52 @@ func (c *HostsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/hosts/{resource}/testIamPermissions", + // "path": "{project}/global/images/{resource}/setLabels", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "GlobalSetLabelsRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.httpHealthChecks.delete": +// method id "compute.images.testIamPermissions": -type HttpHealthChecksDeleteCall struct { - s *Service - project string - httpHealthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HttpHealthCheck resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/delete -func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { - c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *ImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ImagesTestIamPermissionsCall { + c := &ImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksDeleteCall) RequestId(requestId string) *HttpHealthChecksDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall { +func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -40084,204 +53890,52 @@ func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChecksDeleteCall { +func (c *ImagesTestIamPermissionsCall) Context(ctx context.Context) *ImagesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksDeleteCall) Header() http.Header { +func (c *ImagesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpHealthChecks.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified HttpHealthCheck resource.", - // "httpMethod": "DELETE", - // "id": "compute.httpHealthChecks.delete", - // "parameterOrder": [ - // "project", - // "httpHealthCheck" - // ], - // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.httpHealthChecks.get": - -type HttpHealthChecksGetCall struct { - s *Service - project string - httpHealthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified HttpHealthCheck resource. Get a list of -// available HTTP health checks by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/get -func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { - c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httpHealthCheck = httpHealthCheck - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthChecksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecksGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpHealthChecksGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.get" call. -// Exactly one of *HttpHealthCheck or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HttpHealthCheck.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.images.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheck, error) { +func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40300,7 +53954,7 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpHealthCheck{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -40312,32 +53966,35 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC } return ret, nil // { - // "description": "Returns the specified HttpHealthCheck resource. Get a list of available HTTP health checks by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.httpHealthChecks.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.images.testIamPermissions", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "resource" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to return.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "{project}/global/images/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "HttpHealthCheck" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -40348,24 +54005,42 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC } -// method id "compute.httpHealthChecks.insert": +// method id "compute.instanceGroupManagers.abandonInstances": -type HttpHealthChecksInsertCall struct { - s *Service - project string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersAbandonInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HttpHealthCheck resource in the specified project -// using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/insert -func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { - c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AbandonInstances: Schedules a group action to remove the specified +// instances from the managed instance group. Abandoning an instance +// does not delete the instance, but it does remove the instance from +// any target pools that are applied by the managed instance group. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you abandon. This operation is marked as +// DONE when the action is scheduled even if the instances have not yet +// been removed from the group. You must separately verify the status of +// the abandoning action with the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { + c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httphealthcheck = httphealthcheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersabandoninstancesrequest = instancegroupmanagersabandoninstancesrequest return c } @@ -40383,7 +54058,7 @@ func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHe // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksInsertCall) RequestId(requestId string) *HttpHealthChecksInsertCall { +func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *InstanceGroupManagersAbandonInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -40391,7 +54066,7 @@ func (c *HttpHealthChecksInsertCall) RequestId(requestId string) *HttpHealthChec // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall { +func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAbandonInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -40399,51 +54074,53 @@ func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChecksInsertCall { +func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *InstanceGroupManagersAbandonInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksInsertCall) Header() http.Header { +func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.insert" call. +// Do executes the "compute.instanceGroupManagers.abandonInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40474,13 +54151,21 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", // "httpMethod": "POST", - // "id": "compute.httpHealthChecks.insert", + // "id": "compute.instanceGroupManagers.abandonInstances", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -40492,11 +54177,17 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", // "request": { - // "$ref": "HttpHealthCheck" + // "$ref": "InstanceGroupManagersAbandonInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -40509,9 +54200,9 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.httpHealthChecks.list": +// method id "compute.instanceGroupManagers.aggregatedList": -type HttpHealthChecksListCall struct { +type InstanceGroupManagersAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -40520,11 +54211,10 @@ type HttpHealthChecksListCall struct { header_ http.Header } -// List: Retrieves the list of HttpHealthCheck resources available to -// the specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/list -func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { - c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of managed instance groups and +// groups them by zone. +func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { + c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -40555,7 +54245,7 @@ func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { +func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -40566,7 +54256,7 @@ func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCa // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall { +func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -40583,7 +54273,7 @@ func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthCheck // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksListCall { +func (c *InstanceGroupManagersAggregatedListCall) OrderBy(orderBy string) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -40591,7 +54281,7 @@ func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksList // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall { +func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -40599,7 +54289,7 @@ func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecks // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall { +func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -40609,7 +54299,7 @@ func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthCheck // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChecksListCall { +func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -40617,21 +54307,21 @@ func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthChecksListCall { +func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) *InstanceGroupManagersAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksListCall) Header() http.Header { +func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -40642,7 +54332,7 @@ func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -40652,14 +54342,15 @@ func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.list" call. -// Exactly one of *HttpHealthCheckList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpHealthCheckList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheckList, error) { +// Do executes the "compute.instanceGroupManagers.aggregatedList" call. +// Exactly one of *InstanceGroupManagerAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InstanceGroupManagerAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40678,7 +54369,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpHealthCheckList{ + ret := &InstanceGroupManagerAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -40690,9 +54381,9 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth } return ret, nil // { - // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", + // "description": "Retrieves the list of managed instance groups and groups them by zone.", // "httpMethod": "GET", - // "id": "compute.httpHealthChecks.list", + // "id": "compute.instanceGroupManagers.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -40728,9 +54419,9 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks", + // "path": "{project}/aggregated/instanceGroupManagers", // "response": { - // "$ref": "HttpHealthCheckList" + // "$ref": "InstanceGroupManagerAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -40744,7 +54435,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealthCheckList) error) error { +func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -40762,53 +54453,35 @@ func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealth } } -// method id "compute.httpHealthChecks.patch": +// method id "compute.instanceGroupManagers.applyUpdatesToInstances": -type HttpHealthChecksPatchCall struct { - s *Service - project string - httpHealthCheck string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersApplyUpdatesToInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersapplyupdatesrequest *InstanceGroupManagersApplyUpdatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HttpHealthCheck resource in the specified project -// using the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/patch -func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { - c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ApplyUpdatesToInstances: Apply changes to selected instances on the +// managed instance group. This method can be used to apply new +// overrides and/or new versions. +func (r *InstanceGroupManagersService) ApplyUpdatesToInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersapplyupdatesrequest *InstanceGroupManagersApplyUpdatesRequest) *InstanceGroupManagersApplyUpdatesToInstancesCall { + c := &InstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck - c.httphealthcheck = httphealthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksPatchCall) RequestId(requestId string) *HttpHealthChecksPatchCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersapplyupdatesrequest = instancegroupmanagersapplyupdatesrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall { +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersApplyUpdatesToInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -40816,52 +54489,53 @@ func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChecksPatchCall { +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Context(ctx context.Context) *InstanceGroupManagersApplyUpdatesToInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksPatchCall) Header() http.Header { +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersapplyupdatesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.patch" call. +// Do executes the "compute.instanceGroupManagers.applyUpdatesToInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40892,18 +54566,18 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.httpHealthChecks.patch", + // "description": "Apply changes to selected instances on the managed instance group. This method can be used to apply new overrides and/or new versions.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.applyUpdatesToInstances", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to patch.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group, should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -40914,54 +54588,75 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone where the managed instance group is located. Should conform to RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", // "request": { - // "$ref": "HttpHealthCheck" + // "$ref": "InstanceGroupManagersApplyUpdatesRequest" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.httpHealthChecks.testIamPermissions": +// method id "compute.instanceGroupManagers.delete": -type HttpHealthChecksTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersDeleteCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *HttpHealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HttpHealthChecksTestIamPermissionsCall { - c := &HttpHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified managed instance group and all of the +// instances in that group. Note that the instance group must not belong +// to a backend service. Read Deleting an instance group for more +// information. +func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { + c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.instanceGroupManager = instanceGroupManager + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceGroupManagersDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HttpHealthChecksTestIamPermissionsCall { +func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -40969,52 +54664,48 @@ func (c *HttpHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *H // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HttpHealthChecksTestIamPermissionsCall { +func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *InstanceGroupManagersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksTestIamPermissionsCall) Header() http.Header { +func (c *InstanceGroupManagersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instanceGroupManagers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41033,7 +54724,7 @@ func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -41045,14 +54736,21 @@ func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.httpHealthChecks.testIamPermissions", + // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "httpMethod": "DELETE", + // "id": "compute.instanceGroupManagers.delete", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -41060,50 +54758,65 @@ func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.httpHealthChecks.update": +// method id "compute.instanceGroupManagers.deleteInstances": -type HttpHealthChecksUpdateCall struct { - s *Service - project string - httpHealthCheck string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersDeleteInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a HttpHealthCheck resource in the specified project -// using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/update -func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { - c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteInstances: Schedules a group action to delete the specified +// instances in the managed instance group. The instances are also +// removed from any target pools of which they were a member. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you delete. This operation is marked as DONE +// when the action is scheduled even if the instances are still being +// deleted. You must separately verify the status of the deleting action +// with the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { + c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck - c.httphealthcheck = httphealthcheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersdeleteinstancesrequest = instancegroupmanagersdeleteinstancesrequest return c } @@ -41121,7 +54834,7 @@ func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksUpdateCall) RequestId(requestId string) *HttpHealthChecksUpdateCall { +func (c *InstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *InstanceGroupManagersDeleteInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -41129,7 +54842,7 @@ func (c *HttpHealthChecksUpdateCall) RequestId(requestId string) *HttpHealthChec // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall { +func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -41137,52 +54850,53 @@ func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChecksUpdateCall { +func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *InstanceGroupManagersDeleteInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksUpdateCall) Header() http.Header { +func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.update" call. +// Do executes the "compute.instanceGroupManagers.deleteInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41213,18 +54927,18 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.httpHealthChecks.update", + // "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.deleteInstances", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to update.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -41239,11 +54953,17 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", // "request": { - // "$ref": "HttpHealthCheck" + // "$ref": "InstanceGroupManagersDeleteInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -41256,48 +54976,34 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.httpsHealthChecks.delete": +// method id "compute.instanceGroupManagers.deletePerInstanceConfigs": -type HttpsHealthChecksDeleteCall struct { - s *Service - project string - httpsHealthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersDeletePerInstanceConfigsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersdeleteperinstanceconfigsreq *InstanceGroupManagersDeletePerInstanceConfigsReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HttpsHealthCheck resource. -func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { - c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeletePerInstanceConfigs: Delete selected per-instance configs for +// the managed instance group. +func (r *InstanceGroupManagersService) DeletePerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteperinstanceconfigsreq *InstanceGroupManagersDeletePerInstanceConfigsReq) *InstanceGroupManagersDeletePerInstanceConfigsCall { + c := &InstanceGroupManagersDeletePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksDeleteCall) RequestId(requestId string) *HttpsHealthChecksDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersdeleteperinstanceconfigsreq = instancegroupmanagersdeleteperinstanceconfigsreq return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthChecksDeleteCall { +func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeletePerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -41305,47 +55011,53 @@ func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthChecksDeleteCall { +func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersDeletePerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksDeleteCall) Header() http.Header { +func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteperinstanceconfigsreq) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.delete" call. +// Do executes the "compute.instanceGroupManagers.deletePerInstanceConfigs" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41376,18 +55088,18 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified HttpsHealthCheck resource.", - // "httpMethod": "DELETE", - // "id": "compute.httpsHealthChecks.delete", + // "description": "Delete selected per-instance configs for the managed instance group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.deletePerInstanceConfigs", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to delete.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -41398,13 +55110,17 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", + // "request": { + // "$ref": "InstanceGroupManagersDeletePerInstanceConfigsReq" + // }, // "response": { // "$ref": "Operation" // }, @@ -41416,31 +55132,34 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.httpsHealthChecks.get": +// method id "compute.instanceGroupManagers.get": -type HttpsHealthChecksGetCall struct { - s *Service - project string - httpsHealthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersGetCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified HttpsHealthCheck resource. Get a list of -// available HTTPS health checks by making a list() request. -func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { - c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns all of the details about the specified managed instance +// group. Get a list of available managed instance groups by making a +// list() request. +func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { + c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChecksGetCall { +func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGroupManagersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -41450,7 +55169,7 @@ func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChec // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChecksGetCall { +func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGroupManagersGetCall { c.ifNoneMatch_ = entityTag return c } @@ -41458,21 +55177,21 @@ func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChecksGetCall { +func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGroupManagersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksGetCall) Header() http.Header { +func (c *InstanceGroupManagersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -41483,25 +55202,26 @@ func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.get" call. -// Exactly one of *HttpsHealthCheck or error will be non-nil. Any +// Do executes the "compute.instanceGroupManagers.get" call. +// Exactly one of *InstanceGroupManager or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *HttpsHealthCheck.ServerResponse.Header or (if a response was +// *InstanceGroupManager.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheck, error) { +func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41520,7 +55240,7 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpsHealthCheck{ + ret := &InstanceGroupManager{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -41532,18 +55252,18 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt } return ret, nil // { - // "description": "Returns the specified HttpsHealthCheck resource. Get a list of available HTTPS health checks by making a list() request.", + // "description": "Returns all of the details about the specified managed instance group. Get a list of available managed instance groups by making a list() request.", // "httpMethod": "GET", - // "id": "compute.httpsHealthChecks.get", + // "id": "compute.instanceGroupManagers.get", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to return.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -41553,11 +55273,17 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "response": { - // "$ref": "HttpsHealthCheck" + // "$ref": "InstanceGroupManager" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -41568,23 +55294,33 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt } -// method id "compute.httpsHealthChecks.insert": +// method id "compute.instanceGroupManagers.insert": -type HttpsHealthChecksInsertCall struct { - s *Service - project string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersInsertCall struct { + s *Service + project string + zone string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HttpsHealthCheck resource in the specified project -// using the data included in the request. -func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { - c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a managed instance group using the information that +// you specify in the request. After the group is created, it schedules +// an action to create instances in the group using the specified +// instance template. This operation is marked as DONE when the group is +// created even if the instances in the group have not yet been created. +// You must separately verify the status of the individual instances +// with the listmanagedinstances method. +// +// A managed instance group can have up to 1000 VM instances per group. +// Please contact Cloud Support if you need an increase in this limit. +func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { + c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpshealthcheck = httpshealthcheck + c.zone = zone + c.instancegroupmanager = instancegroupmanager return c } @@ -41602,7 +55338,7 @@ func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *Http // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthChecksInsertCall { +func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceGroupManagersInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -41610,7 +55346,7 @@ func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthCh // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthChecksInsertCall { +func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *InstanceGroupManagersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -41618,51 +55354,52 @@ func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthChecksInsertCall { +func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *InstanceGroupManagersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksInsertCall) Header() http.Header { +func (c *InstanceGroupManagersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.insert" call. +// Do executes the "compute.instanceGroupManagers.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41693,11 +55430,12 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit.", // "httpMethod": "POST", - // "id": "compute.httpsHealthChecks.insert", + // "id": "compute.instanceGroupManagers.insert", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "project": { @@ -41711,11 +55449,17 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks", + // "path": "{project}/zones/{zone}/instanceGroupManagers", // "request": { - // "$ref": "HttpsHealthCheck" + // "$ref": "InstanceGroupManager" // }, // "response": { // "$ref": "Operation" @@ -41728,22 +55472,24 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.httpsHealthChecks.list": +// method id "compute.instanceGroupManagers.list": -type HttpsHealthChecksListCall struct { +type InstanceGroupManagersListCall struct { s *Service project string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of HttpsHealthCheck resources available to -// the specified project. -func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { - c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of managed instance groups that are contained +// within the specified project and zone. +func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { + c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone return c } @@ -41773,7 +55519,7 @@ func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCa // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { +func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { c.urlParams_.Set("filter", filter) return c } @@ -41784,7 +55530,7 @@ func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksList // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChecksListCall { +func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGroupManagersListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -41801,7 +55547,7 @@ func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChe // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksListCall { +func (c *InstanceGroupManagersListCall) OrderBy(orderBy string) *InstanceGroupManagersListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -41809,7 +55555,7 @@ func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksLi // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChecksListCall { +func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGroupManagersListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -41817,7 +55563,7 @@ func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChec // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChecksListCall { +func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -41827,7 +55573,7 @@ func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthChecksListCall { +func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListCall { c.ifNoneMatch_ = entityTag return c } @@ -41835,21 +55581,21 @@ func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthCh // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChecksListCall { +func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGroupManagersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksListCall) Header() http.Header { +func (c *InstanceGroupManagersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -41860,24 +55606,25 @@ func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.list" call. -// Exactly one of *HttpsHealthCheckList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpsHealthCheckList.ServerResponse.Header or (if a response was +// Do executes the "compute.instanceGroupManagers.list" call. +// Exactly one of *InstanceGroupManagerList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupManagerList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheckList, error) { +func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41896,7 +55643,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpsHealthCheckList{ + ret := &InstanceGroupManagerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -41908,11 +55655,12 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal } return ret, nil // { - // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", + // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", // "httpMethod": "GET", - // "id": "compute.httpsHealthChecks.list", + // "id": "compute.instanceGroupManagers.list", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "filter": { @@ -41944,205 +55692,17 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // } - // }, - // "path": "{project}/global/httpsHealthChecks", - // "response": { - // "$ref": "HttpsHealthCheckList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHealthCheckList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.httpsHealthChecks.patch": - -type HttpsHealthChecksPatchCall struct { - s *Service - project string - httpsHealthCheck string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates a HttpsHealthCheck resource in the specified project -// using the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. -func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { - c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.httpsHealthCheck = httpsHealthCheck - c.httpshealthcheck = httpshealthcheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksPatchCall) RequestId(requestId string) *HttpsHealthChecksPatchCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthChecksPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthChecksPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *HttpsHealthChecksPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.httpsHealthChecks.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.httpsHealthChecks.patch", - // "parameterOrder": [ - // "project", - // "httpsHealthCheck" - // ], - // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", - // "request": { - // "$ref": "HttpsHealthCheck" - // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroupManagerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -42153,32 +55713,81 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.httpsHealthChecks.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type HttpsHealthChecksTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instanceGroupManagers.listManagedInstances": + +type InstanceGroupManagersListManagedInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *HttpsHealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HttpsHealthChecksTestIamPermissionsCall { - c := &HttpsHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListManagedInstances: Lists all of the instances in the managed +// instance group. Each instance in the list has a currentAction, which +// indicates the action that the managed instance group is performing on +// the instance. For example, if the group is still creating an +// instance, the currentAction is CREATING. If a previous action failed, +// the list displays the errors for that failed action. +func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { + c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.instanceGroupManager = instanceGroupManager + return c +} + +// Filter sets the optional parameter "filter": +func (c *InstanceGroupManagersListManagedInstancesCall) Filter(filter string) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": +func (c *InstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "order_by": +func (c *InstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("order_by", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": +func (c *InstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HttpsHealthChecksTestIamPermissionsCall { +func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42186,52 +55795,50 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HttpsHealthChecksTestIamPermissionsCall { +func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *InstanceGroupManagersListManagedInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksTestIamPermissionsCall) Header() http.Header { +func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instanceGroupManagers.listManagedInstances" call. +// Exactly one of *InstanceGroupManagersListManagedInstancesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *InstanceGroupManagersListManagedInstancesResponse.ServerResponse.Head +// er or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListManagedInstancesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42250,7 +55857,7 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &InstanceGroupManagersListManagedInstancesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -42262,14 +55869,40 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", // "httpMethod": "POST", - // "id": "compute.httpsHealthChecks.testIamPermissions", + // "id": "compute.instanceGroupManagers.listManagedInstances", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "filter": { + // "location": "query", + // "type": "string" + // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "order_by": { + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -42277,20 +55910,16 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "InstanceGroupManagersListManagedInstancesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -42301,51 +55930,120 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } -// method id "compute.httpsHealthChecks.update": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListManagedInstancesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type HttpsHealthChecksUpdateCall struct { - s *Service - project string - httpsHealthCheck string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instanceGroupManagers.listPerInstanceConfigs": + +type InstanceGroupManagersListPerInstanceConfigsCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a HttpsHealthCheck resource in the specified project -// using the data included in the request. -func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { - c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListPerInstanceConfigs: Lists all of the per-instance configs defined +// for the managed instance group. +func (r *InstanceGroupManagersService) ListPerInstanceConfigs(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListPerInstanceConfigsCall { + c := &InstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck - c.httpshealthcheck = httpshealthcheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksUpdateCall) RequestId(requestId string) *HttpsHealthChecksUpdateCall { - c.urlParams_.Set("requestId", requestId) +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Filter(filter string) *InstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstanceGroupManagersListPerInstanceConfigsCall) MaxResults(maxResults int64) *InstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceGroupManagersListPerInstanceConfigsCall) OrderBy(orderBy string) *InstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageToken string) *InstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthChecksUpdateCall { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42353,52 +56051,50 @@ func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthChecksUpdateCall { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersListPerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksUpdateCall) Header() http.Header { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceGroupManagers.listPerInstanceConfigs" call. +// Exactly one of *InstanceGroupManagersListPerInstanceConfigsResp or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *InstanceGroupManagersListPerInstanceConfigsResp.ServerResponse.Header +// or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListPerInstanceConfigsResp, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42417,7 +56113,7 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroupManagersListPerInstanceConfigsResp{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -42429,21 +56125,44 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.httpsHealthChecks.update", + // "description": "Lists all of the per-instance configs defined for the managed instance group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.listPerInstanceConfigs", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to update.", + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -42451,44 +56170,73 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", - // "request": { - // "$ref": "HttpsHealthCheck" - // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroupManagersListPerInstanceConfigsResp" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.images.delete": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupManagersListPerInstanceConfigsCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListPerInstanceConfigsResp) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ImagesDeleteCall struct { - s *Service - project string - image string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instanceGroupManagers.patch": + +type InstanceGroupManagersPatchCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified image. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/delete -func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { - c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is patched even if the instances in the group are still in the +// process of being patched. You must separately verify the status of +// the individual instances with the listManagedInstances method. This +// method supports PATCH semantics and uses the JSON merge patch format +// and processing rules. +func (r *InstanceGroupManagersService) Patch(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersPatchCall { + c := &InstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanager = instancegroupmanager return c } @@ -42506,7 +56254,7 @@ func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ImagesDeleteCall) RequestId(requestId string) *ImagesDeleteCall { +func (c *InstanceGroupManagersPatchCall) RequestId(requestId string) *InstanceGroupManagersPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -42514,7 +56262,7 @@ func (c *ImagesDeleteCall) RequestId(requestId string) *ImagesDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { +func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceGroupManagersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42522,47 +56270,53 @@ func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { +func (c *InstanceGroupManagersPatchCall) Context(ctx context.Context) *InstanceGroupManagersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesDeleteCall) Header() http.Header { +func (c *InstanceGroupManagersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.delete" call. +// Do executes the "compute.instanceGroupManagers.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42593,18 +56347,18 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified image.", - // "httpMethod": "DELETE", - // "id": "compute.images.delete", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instanceGroupManagers.patch", // "parameterOrder": [ // "project", - // "image" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "image": { - // "description": "Name of the image resource to delete.", + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -42619,9 +56373,18 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images/{image}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "request": { + // "$ref": "InstanceGroupManager" + // }, // "response": { // "$ref": "Operation" // }, @@ -42633,28 +56396,40 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.images.deprecate": +// method id "compute.instanceGroupManagers.recreateInstances": -type ImagesDeprecateCall struct { - s *Service - project string - image string - deprecationstatus *DeprecationStatus - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersRecreateInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Deprecate: Sets the deprecation status of an image. +// RecreateInstances: Schedules a group action to recreate the specified +// instances in the managed instance group. The instances are deleted +// and recreated using the current instance template for the managed +// instance group. This operation is marked as DONE when the action is +// scheduled even if the instances have not yet been recreated. You must +// separately verify the status of the recreating action with the +// listmanagedinstances method. // -// If an empty request body is given, clears the deprecation status -// instead. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/deprecate -func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { - c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { + c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image - c.deprecationstatus = deprecationstatus + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest return c } @@ -42664,12 +56439,15 @@ func (r *ImagesService) Deprecate(project string, image string, deprecationstatu // request if it has already been completed. // // For example, consider a situation where you make an initial request -// and then the request times out. If you make the request again with -// the same request ID, the server can check if original operation with -// the same request ID was received, and if so, will ignore the second +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second // request. This prevents clients from accidentally creating duplicate // commitments. -func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -42677,7 +56455,7 @@ func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall { +func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42685,52 +56463,53 @@ func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall { +func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesDeprecateCall) Header() http.Header { +func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.deprecate" call. +// Do executes the "compute.instanceGroupManagers.recreateInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42761,18 +56540,18 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", + // "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", // "httpMethod": "POST", - // "id": "compute.images.deprecate", + // "id": "compute.instanceGroupManagers.recreateInstances", // "parameterOrder": [ // "project", - // "image" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "image": { - // "description": "Image name.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -42784,14 +56563,20 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and then the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.", + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images/{image}/deprecate", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", // "request": { - // "$ref": "DeprecationStatus" + // "$ref": "InstanceGroupManagersRecreateInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -42804,93 +56589,110 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.images.get": +// method id "compute.instanceGroupManagers.resize": -type ImagesGetCall struct { - s *Service - project string - image string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersResizeCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified image. Get a list of available images by -// making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/get -func (r *ImagesService) Get(project string, image string) *ImagesGetCall { - c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Resizes the managed instance group. If you increase the size, +// the group creates new instances using the current instance template. +// If you decrease the size, the group deletes instances. The resize +// operation is marked DONE when the resize actions are scheduled even +// if the group has not yet added or deleted any instances. You must +// separately verify the status of the creating or deleting actions with +// the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or deleted. +func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { + c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.urlParams_.Set("size", fmt.Sprint(size)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceGroupManagersResizeCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { +func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { +func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesGetCall) Header() http.Header { +func (c *InstanceGroupManagersResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.get" call. -// Exactly one of *Image or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Image.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { +// Do executes the "compute.instanceGroupManagers.resize" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42909,7 +56711,7 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Image{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -42921,18 +56723,19 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { } return ret, nil // { - // "description": "Returns the specified image. Get a list of available images by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.images.get", + // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.resize", // "parameterOrder": [ // "project", - // "image" + // "zone", + // "instanceGroupManager", + // "size" // ], // "parameters": { - // "image": { - // "description": "Name of the image resource to return.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -42942,107 +56745,152 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "size": { + // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", + // "format": "int32", + // "location": "query", + // "required": true, + // "type": "integer" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images/{image}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", // "response": { - // "$ref": "Image" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.images.getFromFamily": +// method id "compute.instanceGroupManagers.resizeAdvanced": -type ImagesGetFromFamilyCall struct { - s *Service - project string - family string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersResizeAdvancedCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersresizeadvancedrequest *InstanceGroupManagersResizeAdvancedRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetFromFamily: Returns the latest image that is part of an image -// family and is not deprecated. -func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { - c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ResizeAdvanced: Resizes the managed instance group with advanced +// configuration options like disabling creation retries. This is an +// extended version of the resize method. +// +// If you increase the size of the instance group, the group creates new +// instances using the current instance template. If you decrease the +// size, the group deletes instances. The resize operation is marked +// DONE when the resize actions are scheduled even if the group has not +// yet added or deleted any instances. You must separately verify the +// status of the creating, creatingWithoutRetries, or deleting actions +// with the get or listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or deleted. +func (r *InstanceGroupManagersService) ResizeAdvanced(project string, zone string, instanceGroupManager string, instancegroupmanagersresizeadvancedrequest *InstanceGroupManagersResizeAdvancedRequest) *InstanceGroupManagersResizeAdvancedCall { + c := &InstanceGroupManagersResizeAdvancedCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.family = family + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersresizeadvancedrequest = instancegroupmanagersresizeadvancedrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersResizeAdvancedCall) RequestId(requestId string) *InstanceGroupManagersResizeAdvancedCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFamilyCall { +func (c *InstanceGroupManagersResizeAdvancedCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeAdvancedCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFamilyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesGetFromFamilyCall) Context(ctx context.Context) *ImagesGetFromFamilyCall { +func (c *InstanceGroupManagersResizeAdvancedCall) Context(ctx context.Context) *InstanceGroupManagersResizeAdvancedCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesGetFromFamilyCall) Header() http.Header { +func (c *InstanceGroupManagersResizeAdvancedCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersresizeadvancedrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "family": c.family, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.getFromFamily" call. -// Exactly one of *Image or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Image.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, error) { +// Do executes the "compute.instanceGroupManagers.resizeAdvanced" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43061,7 +56909,7 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Image{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -43073,18 +56921,18 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro } return ret, nil // { - // "description": "Returns the latest image that is part of an image family and is not deprecated.", - // "httpMethod": "GET", - // "id": "compute.images.getFromFamily", + // "description": "Resizes the managed instance group with advanced configuration options like disabling creation retries. This is an extended version of the resize method.\n\nIf you increase the size of the instance group, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating, creatingWithoutRetries, or deleting actions with the get or listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.resizeAdvanced", // "parameterOrder": [ // "project", - // "family" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "family": { - // "description": "Name of the image family to search for.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -43094,107 +56942,134 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images/family/{family}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", + // "request": { + // "$ref": "InstanceGroupManagersResizeAdvancedRequest" + // }, // "response": { - // "$ref": "Image" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.images.getIamPolicy": +// method id "compute.instanceGroupManagers.setAutoHealingPolicies": -type ImagesGetIamPolicyCall struct { - s *Service - project string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersSetAutoHealingPoliciesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssetautohealingrequest *InstanceGroupManagersSetAutoHealingRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *ImagesService) GetIamPolicy(project string, resource string) *ImagesGetIamPolicyCall { - c := &ImagesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetAutoHealingPolicies: Modifies the autohealing policies. +func (r *InstanceGroupManagersService) SetAutoHealingPolicies(project string, zone string, instanceGroupManager string, instancegroupmanagerssetautohealingrequest *InstanceGroupManagersSetAutoHealingRequest) *InstanceGroupManagersSetAutoHealingPoliciesCall { + c := &InstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssetautohealingrequest = instancegroupmanagerssetautohealingrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *InstanceGroupManagersSetAutoHealingPoliciesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesGetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesGetIamPolicyCall { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetAutoHealingPoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesGetIamPolicyCall) IfNoneMatch(entityTag string) *ImagesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesGetIamPolicyCall) Context(ctx context.Context) *ImagesGetIamPolicyCall { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *InstanceGroupManagersSetAutoHealingPoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesGetIamPolicyCall) Header() http.Header { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetautohealingrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instanceGroupManagers.setAutoHealingPolicies" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43213,7 +57088,7 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -43225,14 +57100,21 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.images.getIamPolicy", + // "description": "Modifies the autohealing policies.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.setAutoHealingPolicies", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -43240,52 +57122,55 @@ func (c *ImagesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/{resource}/getIamPolicy", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + // "request": { + // "$ref": "InstanceGroupManagersSetAutoHealingRequest" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.images.insert": +// method id "compute.instanceGroupManagers.setInstanceTemplate": -type ImagesInsertCall struct { - s *Service - project string - image *Image - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersSetInstanceTemplateCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an image in the specified project using the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/insert -func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { - c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetInstanceTemplate: Specifies the instance template to use when +// creating new instances in this group. The templates for existing +// instances in the group do not change unless you recreate them. +func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { + c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image - return c -} - -// ForceCreate sets the optional parameter "forceCreate": Force image -// creation if true. -func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { - c.urlParams_.Set("forceCreate", fmt.Sprint(forceCreate)) + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest return c } @@ -43303,7 +57188,7 @@ func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *InstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -43311,7 +57196,7 @@ func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -43319,51 +57204,53 @@ func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesInsertCall) Header() http.Header { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.insert" call. +// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43394,17 +57281,20 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates an image in the specified project using the data included in the request.", + // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", // "httpMethod": "POST", - // "id": "compute.images.insert", + // "id": "compute.instanceGroupManagers.setInstanceTemplate", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "forceCreate": { - // "description": "Force image creation if true.", - // "location": "query", - // "type": "boolean" + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" // }, // "project": { // "description": "Project ID for this request.", @@ -43417,182 +57307,135 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", // "request": { - // "$ref": "Image" + // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.images.list": +// method id "compute.instanceGroupManagers.setTargetPools": -type ImagesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersSetTargetPoolsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of private images available to the specified -// project. Private images are images you create that belong to your -// project. This method does not get any images that belong to other -// projects, including publicly-available images, like Debian 8. If you -// want to get a list of publicly-available images, use this method to -// make a request to the respective image project, such as debian-cloud -// or windows-cloud. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/list -func (r *ImagesService) List(project string) *ImagesListCall { - c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTargetPools: Modifies the target pools to which all instances in +// this managed instance group are assigned. The target pools +// automatically apply to all of the instances in the managed instance +// group. This operation is marked DONE when you make the request even +// if the instances have not yet been added to their target pools. The +// change might take some time to apply to all of the instances in the +// group depending on the size of the group. +func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { + c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *ImagesListCall) Filter(filter string) *ImagesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *ImagesListCall) OrderBy(orderBy string) *ImagesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *InstanceGroupManagersSetTargetPoolsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { +func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { +func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesListCall) Header() http.Header { +func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.list" call. -// Exactly one of *ImageList or error will be non-nil. Any non-2xx +// Do executes the "compute.instanceGroupManagers.setTargetPools" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *ImageList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { +func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43611,7 +57454,7 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ImageList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -43623,104 +57466,83 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { } return ret, nil // { - // "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", - // "httpMethod": "GET", - // "id": "compute.images.list", + // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.setTargetPools", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + // "request": { + // "$ref": "InstanceGroupManagersSetTargetPoolsRequest" + // }, // "response": { - // "$ref": "ImageList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.images.setIamPolicy": +// method id "compute.instanceGroupManagers.testIamPermissions": -type ImagesSetIamPolicyCall struct { - s *Service - project string - resource string - policy *Policy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *ImagesService) SetIamPolicy(project string, resource string, policy *Policy) *ImagesSetIamPolicyCall { - c := &ImagesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InstanceGroupManagersService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceGroupManagersTestIamPermissionsCall { + c := &InstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone c.resource = resource - c.policy = policy + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesSetIamPolicyCall { +func (c *InstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -43728,52 +57550,53 @@ func (c *ImagesSetIamPolicyCall) Fields(s ...googleapi.Field) *ImagesSetIamPolic // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesSetIamPolicyCall) Context(ctx context.Context) *ImagesSetIamPolicyCall { +func (c *InstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *InstanceGroupManagersTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesSetIamPolicyCall) Header() http.Header { +func (c *InstanceGroupManagersTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instanceGroupManagers.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43792,7 +57615,7 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -43804,11 +57627,12 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.images.setIamPolicy", + // "id": "compute.instanceGroupManagers.testIamPermissions", // "parameterOrder": [ // "project", + // "zone", // "resource" // ], // "parameters": { @@ -43822,52 +57646,84 @@ func (c *ImagesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, erro // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/{resource}/setIamPolicy", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions", // "request": { - // "$ref": "Policy" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Policy" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.images.setLabels": +// method id "compute.instanceGroupManagers.update": -type ImagesSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersUpdateCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on an image. To learn more about labels, -// read the Labeling Resources documentation. -func (r *ImagesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ImagesSetLabelsCall { - c := &ImagesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is updated even if the instances in the group have not yet been +// updated. You must separately verify the status of the individual +// instances with the listManagedInstances method. +func (r *InstanceGroupManagersService) Update(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersUpdateCall { + c := &InstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanager = instancegroupmanager + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersUpdateCall) RequestId(requestId string) *InstanceGroupManagersUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesSetLabelsCall) Fields(s ...googleapi.Field) *ImagesSetLabelsCall { +func (c *InstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -43875,52 +57731,53 @@ func (c *ImagesSetLabelsCall) Fields(s ...googleapi.Field) *ImagesSetLabelsCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesSetLabelsCall) Context(ctx context.Context) *ImagesSetLabelsCall { +func (c *InstanceGroupManagersUpdateCall) Context(ctx context.Context) *InstanceGroupManagersUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesSetLabelsCall) Header() http.Header { +func (c *InstanceGroupManagersUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.setLabels" call. +// Do executes the "compute.instanceGroupManagers.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43951,14 +57808,21 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.images.setLabels", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listManagedInstances method.", + // "httpMethod": "PUT", + // "id": "compute.instanceGroupManagers.update", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -43966,17 +57830,21 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/{resource}/setLabels", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "request": { - // "$ref": "GlobalSetLabelsRequest" + // "$ref": "InstanceGroupManager" // }, // "response": { // "$ref": "Operation" @@ -43989,32 +57857,55 @@ func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.images.testIamPermissions": +// method id "compute.instanceGroupManagers.updatePerInstanceConfigs": -type ImagesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersUpdatePerInstanceConfigsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersupdateperinstanceconfigsreq *InstanceGroupManagersUpdatePerInstanceConfigsReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *ImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ImagesTestIamPermissionsCall { - c := &ImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdatePerInstanceConfigs: Insert or patch (for the ones that already +// exist) per-instance configs for the managed instance group. +// perInstanceConfig.instance serves as a key used to distinguish +// whether to perform insert or patch. +func (r *InstanceGroupManagersService) UpdatePerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagersupdateperinstanceconfigsreq *InstanceGroupManagersUpdatePerInstanceConfigsReq) *InstanceGroupManagersUpdatePerInstanceConfigsCall { + c := &InstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersupdateperinstanceconfigsreq = instancegroupmanagersupdateperinstanceconfigsreq + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) RequestId(requestId string) *InstanceGroupManagersUpdatePerInstanceConfigsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestIamPermissionsCall { +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersUpdatePerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44022,52 +57913,53 @@ func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesTestIamPermissionsCall) Context(ctx context.Context) *ImagesTestIamPermissionsCall { +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersUpdatePerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesTestIamPermissionsCall) Header() http.Header { +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersupdateperinstanceconfigsreq) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instanceGroupManagers.updatePerInstanceConfigs" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44086,7 +57978,7 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44098,14 +57990,21 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Insert or patch (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", // "httpMethod": "POST", - // "id": "compute.images.testIamPermissions", + // "id": "compute.instanceGroupManagers.updatePerInstanceConfigs", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -44113,66 +58012,55 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "InstanceGroupManagersUpdatePerInstanceConfigsReq" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceGroupManagers.abandonInstances": +// method id "compute.instanceGroups.addInstances": -type InstanceGroupManagersAbandonInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsAddInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AbandonInstances: Schedules a group action to remove the specified -// instances from the managed instance group. Abandoning an instance -// does not delete the instance, but it does remove the instance from -// any target pools that are applied by the managed instance group. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you abandon. This operation is marked as -// DONE when the action is scheduled even if the instances have not yet -// been removed from the group. You must separately verify the status of -// the abandoning action with the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { - c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddInstances: Adds a list of instances to the specified instance +// group. All of the instances in the instance group must be in the same +// network/subnetwork. Read Adding instances for more information. +func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { + c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersabandoninstancesrequest = instancegroupmanagersabandoninstancesrequest + c.instanceGroup = instanceGroup + c.instancegroupsaddinstancesrequest = instancegroupsaddinstancesrequest return c } @@ -44190,7 +58078,7 @@ func (r *InstanceGroupManagersService) AbandonInstances(project string, zone str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *InstanceGroupManagersAbandonInstancesCall { +func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGroupsAddInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -44198,7 +58086,7 @@ func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAbandonInstancesCall { +func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsAddInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44206,53 +58094,53 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *InstanceGroupManagersAbandonInstancesCall { +func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceGroupsAddInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { +func (c *InstanceGroupsAddInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.abandonInstances" call. +// Do executes the "compute.instanceGroups.addInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44283,17 +58171,17 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.abandonInstances", + // "id": "compute.instanceGroups.addInstances", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instanceGroup": { + // "description": "The name of the instance group where you are adding instances.", // "location": "path", // "required": true, // "type": "string" @@ -44311,15 +58199,15 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", // "request": { - // "$ref": "InstanceGroupManagersAbandonInstancesRequest" + // "$ref": "InstanceGroupsAddInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -44332,9 +58220,9 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt } -// method id "compute.instanceGroupManagers.aggregatedList": +// method id "compute.instanceGroups.aggregatedList": -type InstanceGroupManagersAggregatedListCall struct { +type InstanceGroupsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -44343,10 +58231,10 @@ type InstanceGroupManagersAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves the list of managed instance groups and -// groups them by zone. -func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { - c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of instance groups and sorts them +// by zone. +func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { + c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -44377,7 +58265,7 @@ func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceG // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -44388,7 +58276,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *Instanc // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupManagersAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -44405,7 +58293,7 @@ func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) * // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstanceGroupManagersAggregatedListCall) OrderBy(orderBy string) *InstanceGroupManagersAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) OrderBy(orderBy string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -44413,7 +58301,7 @@ func (c *InstanceGroupManagersAggregatedListCall) OrderBy(orderBy string) *Insta // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *InstanceGroupManagersAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -44421,7 +58309,7 @@ func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *I // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44431,7 +58319,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) * // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -44439,21 +58327,21 @@ func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) *InstanceGroupManagersAggregatedListCall { +func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *InstanceGroupsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { +func (c *InstanceGroupsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -44464,7 +58352,7 @@ func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.R } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -44474,15 +58362,14 @@ func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.R return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.aggregatedList" call. -// Exactly one of *InstanceGroupManagerAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *InstanceGroupManagerAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerAggregatedList, error) { +// Do executes the "compute.instanceGroups.aggregatedList" call. +// Exactly one of *InstanceGroupAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44501,7 +58388,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagerAggregatedList{ + ret := &InstanceGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44513,9 +58400,9 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Retrieves the list of managed instance groups and groups them by zone.", + // "description": "Retrieves the list of instance groups and sorts them by zone.", // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.aggregatedList", + // "id": "compute.instanceGroups.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -44551,9 +58438,9 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio // "type": "string" // } // }, - // "path": "{project}/aggregated/instanceGroupManagers", + // "path": "{project}/aggregated/instanceGroups", // "response": { - // "$ref": "InstanceGroupManagerAggregatedList" + // "$ref": "InstanceGroupAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -44567,7 +58454,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerAggregatedList) error) error { +func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -44585,35 +58472,53 @@ func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f f } } -// method id "compute.instanceGroupManagers.applyUpdatesToInstances": +// method id "compute.instanceGroups.delete": -type InstanceGroupManagersApplyUpdatesToInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersapplyupdatesrequest *InstanceGroupManagersApplyUpdatesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsDeleteCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ApplyUpdatesToInstances: Apply changes to selected instances on the -// managed instance group. This method can be used to apply new -// overrides and/or new versions. -func (r *InstanceGroupManagersService) ApplyUpdatesToInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersapplyupdatesrequest *InstanceGroupManagersApplyUpdatesRequest) *InstanceGroupManagersApplyUpdatesToInstancesCall { - c := &InstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified instance group. The instances in the +// group are not deleted. Note that instance group must not belong to a +// backend service. Read Deleting an instance group for more +// information. +func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { + c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersapplyupdatesrequest = instancegroupmanagersapplyupdatesrequest + c.instanceGroup = instanceGroup + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupsDeleteCall) RequestId(requestId string) *InstanceGroupsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersApplyUpdatesToInstancesCall { +func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44621,53 +58526,213 @@ func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Context(ctx context.Context) *InstanceGroupManagersApplyUpdatesToInstancesCall { +func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header { +func (c *InstanceGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersapplyupdatesrequest) + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceGroups.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } if err != nil { return nil, err } - reqHeaders.Set("Content-Type", "application/json") + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "httpMethod": "DELETE", + // "id": "compute.instanceGroups.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instanceGroups.get": + +type InstanceGroupsGetCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified instance group. Get a list of available +// instance groups by making a list() request. +func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { + c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceGroupsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.applyUpdatesToInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.instanceGroups.get" call. +// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *InstanceGroup.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44686,7 +58751,7 @@ func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi. if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44698,17 +58763,17 @@ func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Apply changes to selected instances on the managed instance group. This method can be used to apply new overrides and/or new versions.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.applyUpdatesToInstances", + // "description": "Returns the specified instance group. Get a list of available instance groups by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instanceGroups.get", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group, should conform to RFC1035.", + // "instanceGroup": { + // "description": "The name of the instance group.", // "location": "path", // "required": true, // "type": "string" @@ -44721,48 +58786,44 @@ func (c *InstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi. // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located. Should conform to RFC1035.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", - // "request": { - // "$ref": "InstanceGroupManagersApplyUpdatesRequest" - // }, + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.delete": +// method id "compute.instanceGroups.insert": -type InstanceGroupManagersDeleteCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsInsertCall struct { + s *Service + project string + zone string + instancegroup *InstanceGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified managed instance group and all of the -// instances in that group. Note that the instance group must not belong -// to a backend service. Read Deleting an instance group for more -// information. -func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { - c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an instance group in the specified project using the +// parameters that are included in the request. +func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { + c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager + c.instancegroup = instancegroup return c } @@ -44780,7 +58841,7 @@ func (r *InstanceGroupManagersService) Delete(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceGroupManagersDeleteCall { +func (c *InstanceGroupsInsertCall) RequestId(requestId string) *InstanceGroupsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -44788,7 +58849,7 @@ func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteCall { +func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44796,48 +58857,52 @@ func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *InstanceGroupManagersDeleteCall { +func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersDeleteCall) Header() http.Header { +func (c *InstanceGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.delete" call. +// Do executes the "compute.instanceGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44868,21 +58933,14 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", - // "httpMethod": "DELETE", - // "id": "compute.instanceGroupManagers.delete", + // "description": "Creates an instance group in the specified project using the parameters that are included in the request.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.insert", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "zone" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group to delete.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -44896,13 +58954,16 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where you want to create the instance group.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/zones/{zone}/instanceGroups", + // "request": { + // "$ref": "InstanceGroup" + // }, // "response": { // "$ref": "Operation" // }, @@ -44914,121 +58975,159 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroupManagers.deleteInstances": +// method id "compute.instanceGroups.list": -type InstanceGroupManagersDeleteInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// DeleteInstances: Schedules a group action to delete the specified -// instances in the managed instance group. The instances are also -// removed from any target pools of which they were a member. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you delete. This operation is marked as DONE -// when the action is scheduled even if the instances are still being -// deleted. You must separately verify the status of the deleting action -// with the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { - c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of instance groups that are located in the +// specified project and zone. +func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { + c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersdeleteinstancesrequest = instancegroupmanagersdeleteinstancesrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *InstanceGroupManagersDeleteInstancesCall { - c.urlParams_.Set("requestId", requestId) +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceGroupsListCall) OrderBy(orderBy string) *InstanceGroupsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteInstancesCall { +func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *InstanceGroupManagersDeleteInstancesCall { +func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { +func (c *InstanceGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.deleteInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceGroups.list" call. +// Exactly one of *InstanceGroupList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45047,7 +59146,7 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45059,19 +59158,35 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.deleteInstances", + // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroups.list", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "zone" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -45081,61 +59196,141 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", - // "request": { - // "$ref": "InstanceGroupManagersDeleteInstancesRequest" - // }, + // "path": "{project}/zones/{zone}/instanceGroups", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.deletePerInstanceConfigs": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGroupList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstanceGroupManagersDeletePerInstanceConfigsCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersdeleteperinstanceconfigsreq *InstanceGroupManagersDeletePerInstanceConfigsReq - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instanceGroups.listInstances": + +type InstanceGroupsListInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeletePerInstanceConfigs: Delete selected per-instance configs for -// the managed instance group. -func (r *InstanceGroupManagersService) DeletePerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteperinstanceconfigsreq *InstanceGroupManagersDeletePerInstanceConfigsReq) *InstanceGroupManagersDeletePerInstanceConfigsCall { - c := &InstanceGroupManagersDeletePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListInstances: Lists the instances in the specified instance group. +func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { + c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersdeleteperinstanceconfigsreq = instancegroupmanagersdeleteperinstanceconfigsreq + c.instanceGroup = instanceGroup + c.instancegroupslistinstancesrequest = instancegroupslistinstancesrequest + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceGroupsListInstancesCall) OrderBy(orderBy string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeletePerInstanceConfigsCall { +func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsListInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -45143,53 +59338,53 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Fields(s ...googleap // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersDeletePerInstanceConfigsCall { +func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *InstanceGroupsListInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Header() http.Header { +func (c *InstanceGroupsListInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteperinstanceconfigsreq) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.deletePerInstanceConfigs" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceGroups.listInstances" call. +// Exactly one of *InstanceGroupsListInstances or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupsListInstances.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupsListInstances, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45208,7 +59403,7 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroupsListInstances{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45220,21 +59415,44 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi } return ret, nil // { - // "description": "Delete selected per-instance configs for the managed instance group.", + // "description": "Lists the instances in the specified instance group.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.deletePerInstanceConfigs", + // "id": "compute.instanceGroups.listInstances", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "instanceGroup": { + // "description": "The name of the instance group from which you want to generate a list of included instances.", // "location": "path", // "required": true, // "type": "string" // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -45243,117 +59461,154 @@ func (c *InstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", // "request": { - // "$ref": "InstanceGroupManagersDeletePerInstanceConfigsReq" + // "$ref": "InstanceGroupsListInstancesRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroupsListInstances" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupsListInstances) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstanceGroupManagersGetCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.instanceGroups.removeInstances": + +type InstanceGroupsRemoveInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns all of the details about the specified managed instance -// group. Get a list of available managed instance groups by making a -// list() request. -func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { - c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemoveInstances: Removes one or more instances from the specified +// instance group, but does not delete those instances. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration before the VM instance is removed or deleted. +func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { + c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager + c.instanceGroup = instanceGroup + c.instancegroupsremoveinstancesrequest = instancegroupsremoveinstancesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *InstanceGroupsRemoveInstancesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGroupManagersGetCall { +func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsRemoveInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGroupManagersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGroupManagersGetCall { +func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *InstanceGroupsRemoveInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersGetCall) Header() http.Header { +func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.get" call. -// Exactly one of *InstanceGroupManager or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupManager.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { +// Do executes the "compute.instanceGroups.removeInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45372,7 +59627,7 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManager{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45384,17 +59639,17 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan } return ret, nil // { - // "description": "Returns all of the details about the specified managed instance group. Get a list of available managed instance groups by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.get", + // "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.removeInstances", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instanceGroup": { + // "description": "The name of the instance group where the specified instances will be removed.", // "location": "path", // "required": true, // "type": "string" @@ -45406,53 +59661,53 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", + // "request": { + // "$ref": "InstanceGroupsRemoveInstancesRequest" + // }, // "response": { - // "$ref": "InstanceGroupManager" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceGroupManagers.insert": +// method id "compute.instanceGroups.setNamedPorts": -type InstanceGroupManagersInsertCall struct { - s *Service - project string - zone string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsSetNamedPortsCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a managed instance group using the information that -// you specify in the request. After the group is created, it schedules -// an action to create instances in the group using the specified -// instance template. This operation is marked as DONE when the group is -// created even if the instances in the group have not yet been created. -// You must separately verify the status of the individual instances -// with the listmanagedinstances method. -// -// A managed instance group can have up to 1000 VM instances per group. -// Please contact Cloud Support if you need an increase in this limit. -func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { - c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetNamedPorts: Sets the named ports for the specified instance group. +func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { + c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instancegroupmanager = instancegroupmanager + c.instanceGroup = instanceGroup + c.instancegroupssetnamedportsrequest = instancegroupssetnamedportsrequest return c } @@ -45470,7 +59725,7 @@ func (r *InstanceGroupManagersService) Insert(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceGroupManagersInsertCall { +func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceGroupsSetNamedPortsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -45478,7 +59733,7 @@ func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *InstanceGroupManagersInsertCall { +func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *InstanceGroupsSetNamedPortsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -45486,52 +59741,53 @@ func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *InstanceGroupManagersInsertCall { +func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *InstanceGroupsSetNamedPortsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersInsertCall) Header() http.Header { +func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.insert" call. +// Do executes the "compute.instanceGroups.setNamedPorts" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45562,14 +59818,21 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit.", + // "description": "Sets the named ports for the specified instance group.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.insert", + // "id": "compute.instanceGroups.setNamedPorts", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "instanceGroup" // ], // "parameters": { + // "instanceGroup": { + // "description": "The name of the instance group where the named ports are updated.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -45583,15 +59846,15 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "InstanceGroupsSetNamedPortsRequest" // }, // "response": { // "$ref": "Operation" @@ -45604,159 +59867,88 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroupManagers.list": +// method id "compute.instanceGroups.testIamPermissions": -type InstanceGroupManagersListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupsTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of managed instance groups that are contained -// within the specified project and zone. -func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { - c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InstanceGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceGroupsTestIamPermissionsCall { + c := &InstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGroupManagersListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupManagersListCall) OrderBy(orderBy string) *InstanceGroupManagersListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGroupManagersListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListCall { +func (c *InstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceGroupsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGroupManagersListCall { +func (c *InstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *InstanceGroupsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersListCall) Header() http.Header { +func (c *InstanceGroupsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.list" call. -// Exactly one of *InstanceGroupManagerList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupManagerList.ServerResponse.Header or (if a response was +// Do executes the "compute.instanceGroups.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerList, error) { +func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45775,7 +59967,7 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagerList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45787,37 +59979,15 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta } return ret, nil // { - // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", - // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.list", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.testIamPermissions", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -45825,101 +59995,84 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // "required": true, // "type": "string" // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers", + // "path": "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "InstanceGroupManagerList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute", // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.listManagedInstances": - -type InstanceGroupManagersListManagedInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// ListManagedInstances: Lists all of the instances in the managed -// instance group. Each instance in the list has a currentAction, which -// indicates the action that the managed instance group is performing on -// the instance. For example, if the group is still creating an -// instance, the currentAction is CREATING. If a previous action failed, -// the list displays the errors for that failed action. -func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { - c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - return c -} + // ] + // } -// Filter sets the optional parameter "filter": -func (c *InstanceGroupManagersListManagedInstancesCall) Filter(filter string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("filter", filter) - return c } -// MaxResults sets the optional parameter "maxResults": -func (c *InstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c +// method id "compute.instanceTemplates.delete": + +type InstanceTemplatesDeleteCall struct { + s *Service + project string + instanceTemplate string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// OrderBy sets the optional parameter "order_by": -func (c *InstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("order_by", orderBy) +// Delete: Deletes the specified instance template. If you delete an +// instance template that is being referenced from another instance +// group, the instance group will not be able to create or recreate +// virtual machine instances. Deleting an instance template is permanent +// and cannot be undone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/delete +func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { + c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instanceTemplate = instanceTemplate return c } -// PageToken sets the optional parameter "pageToken": -func (c *InstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("pageToken", pageToken) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTemplatesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListManagedInstancesCall { +func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -45927,21 +60080,21 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Fi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *InstanceGroupManagersListManagedInstancesCall { +func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemplatesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { +func (c *InstanceTemplatesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -45949,28 +60102,25 @@ func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (* reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "instanceTemplate": c.instanceTemplate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.listManagedInstances" call. -// Exactly one of *InstanceGroupManagersListManagedInstancesResponse or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *InstanceGroupManagersListManagedInstancesResponse.ServerResponse.Head -// er or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListManagedInstancesResponse, error) { +// Do executes the "compute.instanceTemplates.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45989,7 +60139,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagersListManagedInstancesResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -46001,40 +60151,21 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.listManagedInstances", + // "description": "Deletes the specified instance template. If you delete an instance template that is being referenced from another instance group, the instance group will not be able to create or recreate virtual machine instances. Deleting an instance template is permanent and cannot be undone.", + // "httpMethod": "DELETE", + // "id": "compute.instanceTemplates.delete", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "instanceTemplate" // ], // "parameters": { - // "filter": { - // "location": "query", - // "type": "string" - // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instanceTemplate": { + // "description": "The name of the instance template to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "order_by": { - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -46042,191 +60173,111 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "path": "{project}/global/instanceTemplates/{instanceTemplate}", // "response": { - // "$ref": "InstanceGroupManagersListManagedInstancesResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListManagedInstancesResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.listPerInstanceConfigs": +// method id "compute.instanceTemplates.get": -type InstanceGroupManagersListPerInstanceConfigsCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceTemplatesGetCall struct { + s *Service + project string + instanceTemplate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// ListPerInstanceConfigs: Lists all of the per-instance configs defined -// for the managed instance group. -func (r *InstanceGroupManagersService) ListPerInstanceConfigs(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListPerInstanceConfigsCall { - c := &InstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified instance template. Get a list of available +// instance templates by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/get +func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { + c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) Filter(filter string) *InstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupManagersListPerInstanceConfigsCall) MaxResults(maxResults int64) *InstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) OrderBy(orderBy string) *InstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageToken string) *InstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("pageToken", pageToken) + c.instanceTemplate = instanceTemplate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListPerInstanceConfigsCall { +func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersListPerInstanceConfigsCall { +func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplatesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { +func (c *InstanceTemplatesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "instanceTemplate": c.instanceTemplate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.listPerInstanceConfigs" call. -// Exactly one of *InstanceGroupManagersListPerInstanceConfigsResp or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *InstanceGroupManagersListPerInstanceConfigsResp.ServerResponse.Header -// or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListPerInstanceConfigsResp, error) { +// Do executes the "compute.instanceTemplates.get" call. +// Exactly one of *InstanceTemplate or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceTemplate.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46245,7 +60296,7 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.C if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagersListPerInstanceConfigsResp{ + ret := &InstanceTemplate{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -46257,61 +60308,32 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Lists all of the per-instance configs defined for the managed instance group.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.listPerInstanceConfigs", + // "description": "Returns the specified instance template. Get a list of available instance templates by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instanceTemplates.get", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "instanceTemplate" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "instanceTemplate": { + // "description": "The name of the instance template.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", + // "path": "{project}/global/instanceTemplates/{instanceTemplate}", // "response": { - // "$ref": "InstanceGroupManagersListPerInstanceConfigsResp" + // "$ref": "InstanceTemplate" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -46322,53 +60344,27 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.C } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersListPerInstanceConfigsCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListPerInstanceConfigsResp) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.patch": +// method id "compute.instanceTemplates.insert": -type InstanceGroupManagersPatchCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceTemplatesInsertCall struct { + s *Service + project string + instancetemplate *InstanceTemplate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is patched even if the instances in the group are still in the -// process of being patched. You must separately verify the status of -// the individual instances with the listManagedInstances method. This -// method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. -func (r *InstanceGroupManagersService) Patch(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersPatchCall { - c := &InstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an instance template in the specified project using +// the data that is included in the request. If you are creating a new +// template to update an existing instance group, your new instance +// template must use the same network or, if applicable, the same +// subnetwork as the original template. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/insert +func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { + c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager + c.instancetemplate = instancetemplate return c } @@ -46386,7 +60382,7 @@ func (r *InstanceGroupManagersService) Patch(project string, zone string, instan // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersPatchCall) RequestId(requestId string) *InstanceGroupManagersPatchCall { +func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTemplatesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -46394,7 +60390,7 @@ func (c *InstanceGroupManagersPatchCall) RequestId(requestId string) *InstanceGr // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceGroupManagersPatchCall { +func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46402,53 +60398,51 @@ func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersPatchCall) Context(ctx context.Context) *InstanceGroupManagersPatchCall { +func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemplatesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersPatchCall) Header() http.Header { +func (c *InstanceTemplatesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.patch" call. +// Do executes the "compute.instanceTemplates.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46479,21 +60473,13 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.instanceGroupManagers.patch", + // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", + // "httpMethod": "POST", + // "id": "compute.instanceTemplates.insert", // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" + // "project" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -46505,144 +60491,174 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/global/instanceTemplates", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "InstanceTemplate" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceGroupManagers.recreateInstances": - -type InstanceGroupManagersRecreateInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instanceTemplates.list": + +type InstanceTemplatesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of instance templates that are contained +// within the specified project and zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/list +func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { + c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c } -// RecreateInstances: Schedules a group action to recreate the specified -// instances in the managed instance group. The instances are deleted -// and recreated using the current instance template for the managed -// instance group. This operation is marked as DONE when the action is -// scheduled even if the instances have not yet been recreated. You must -// separately verify the status of the recreating action with the -// listmanagedinstances method. +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. // -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. // -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { - c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceTemplatesListCall) OrderBy(orderBy string) *InstanceTemplatesListCall { + c.urlParams_.Set("orderBy", orderBy) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersRecreateInstancesCall { - c.urlParams_.Set("requestId", requestId) +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTemplatesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTemplatesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { +func (c *InstanceTemplatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.recreateInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceTemplates.list" call. +// Exactly one of *InstanceTemplateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceTemplateList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46661,7 +60677,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceTemplateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -46673,110 +60689,104 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.recreateInstances", + // "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", + // "httpMethod": "GET", + // "id": "compute.instanceTemplates.list", // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" + // "project" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", - // "request": { - // "$ref": "InstanceGroupManagersRecreateInstancesRequest" - // }, + // "path": "{project}/global/instanceTemplates", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceTemplateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.resize": - -type InstanceGroupManagersResizeCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// Resize: Resizes the managed instance group. If you increase the size, -// the group creates new instances using the current instance template. -// If you decrease the size, the group deletes instances. The resize -// operation is marked DONE when the resize actions are scheduled even -// if the group has not yet added or deleted any instances. You must -// separately verify the status of the creating or deleting actions with -// the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or deleted. -func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { - c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.urlParams_.Set("size", fmt.Sprint(size)) - return c +// method id "compute.instanceTemplates.testIamPermissions": + +type InstanceTemplatesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceGroupManagersResizeCall { - c.urlParams_.Set("requestId", requestId) +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InstanceTemplatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceTemplatesTestIamPermissionsCall { + c := &InstanceTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { +func (c *InstanceTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceTemplatesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46784,48 +60794,52 @@ func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { +func (c *InstanceTemplatesTestIamPermissionsCall) Context(ctx context.Context) *InstanceTemplatesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersResizeCall) Header() http.Header { +func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.resize" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceTemplates.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46844,7 +60858,7 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -46856,22 +60870,14 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.resize", + // "id": "compute.instanceTemplates.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager", - // "size" + // "resource" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -46879,71 +60885,53 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "size": { - // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", - // "format": "int32", - // "location": "query", - // "required": true, - // "type": "integer" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + // "path": "{project}/global/instanceTemplates/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.resizeAdvanced": +// method id "compute.instances.addAccessConfig": -type InstanceGroupManagersResizeAdvancedCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersresizeadvancedrequest *InstanceGroupManagersResizeAdvancedRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesAddAccessConfigCall struct { + s *Service + project string + zone string + instance string + accessconfig *AccessConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ResizeAdvanced: Resizes the managed instance group with advanced -// configuration options like disabling creation retries. This is an -// extended version of the resize method. -// -// If you increase the size of the instance group, the group creates new -// instances using the current instance template. If you decrease the -// size, the group deletes instances. The resize operation is marked -// DONE when the resize actions are scheduled even if the group has not -// yet added or deleted any instances. You must separately verify the -// status of the creating, creatingWithoutRetries, or deleting actions -// with the get or listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or deleted. -func (r *InstanceGroupManagersService) ResizeAdvanced(project string, zone string, instanceGroupManager string, instancegroupmanagersresizeadvancedrequest *InstanceGroupManagersResizeAdvancedRequest) *InstanceGroupManagersResizeAdvancedCall { - c := &InstanceGroupManagersResizeAdvancedCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddAccessConfig: Adds an access config to an instance's network +// interface. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/addAccessConfig +func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { + c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersresizeadvancedrequest = instancegroupmanagersresizeadvancedrequest + c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.accessconfig = accessconfig return c } @@ -46961,7 +60949,7 @@ func (r *InstanceGroupManagersService) ResizeAdvanced(project string, zone strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersResizeAdvancedCall) RequestId(requestId string) *InstanceGroupManagersResizeAdvancedCall { +func (c *InstancesAddAccessConfigCall) RequestId(requestId string) *InstancesAddAccessConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -46969,7 +60957,7 @@ func (c *InstanceGroupManagersResizeAdvancedCall) RequestId(requestId string) *I // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersResizeAdvancedCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeAdvancedCall { +func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46977,53 +60965,53 @@ func (c *InstanceGroupManagersResizeAdvancedCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersResizeAdvancedCall) Context(ctx context.Context) *InstanceGroupManagersResizeAdvancedCall { +func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAddAccessConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersResizeAdvancedCall) Header() http.Header { +func (c *InstancesAddAccessConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersresizeadvancedrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.resizeAdvanced" call. +// Do executes the "compute.instances.addAccessConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47054,18 +61042,26 @@ func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Resizes the managed instance group with advanced configuration options like disabling creation retries. This is an extended version of the resize method.\n\nIf you increase the size of the instance group, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating, creatingWithoutRetries, or deleting actions with the get or listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "description": "Adds an access config to an instance's network interface.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.resizeAdvanced", + // "id": "compute.instances.addAccessConfig", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance", + // "networkInterface" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "The instance name for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface to add to this instance.", + // "location": "query", // "required": true, // "type": "string" // }, @@ -47082,15 +61078,16 @@ func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOptio // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", + // "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", // "request": { - // "$ref": "InstanceGroupManagersResizeAdvancedRequest" + // "$ref": "AccessConfig" // }, // "response": { // "$ref": "Operation" @@ -47103,106 +61100,156 @@ func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOptio } -// method id "compute.instanceGroupManagers.setAutoHealingPolicies": +// method id "compute.instances.aggregatedList": -type InstanceGroupManagersSetAutoHealingPoliciesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssetautohealingrequest *InstanceGroupManagersSetAutoHealingRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetAutoHealingPolicies: Modifies the autohealing policies. -func (r *InstanceGroupManagersService) SetAutoHealingPolicies(project string, zone string, instanceGroupManager string, instancegroupmanagerssetautohealingrequest *InstanceGroupManagersSetAutoHealingRequest) *InstanceGroupManagersSetAutoHealingPoliciesCall { - c := &InstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves aggregated list of instances. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/aggregatedList +func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { + c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssetautohealingrequest = instancegroupmanagerssetautohealingrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *InstanceGroupManagersSetAutoHealingPoliciesCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstancesAggregatedListCall) OrderBy(orderBy string) *InstancesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetAutoHealingPoliciesCall { +func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *InstanceGroupManagersSetAutoHealingPoliciesCall { +func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { +func (c *InstancesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetautohealingrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setAutoHealingPolicies" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.aggregatedList" call. +// Exactly one of *InstanceAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47221,7 +61268,7 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.C if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -47233,77 +61280,110 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Modifies the autohealing policies.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setAutoHealingPolicies", + // "description": "Retrieves aggregated list of instances.", + // "httpMethod": "GET", + // "id": "compute.instances.aggregatedList", // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" + // "project" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", - // "request": { - // "$ref": "InstanceGroupManagersSetAutoHealingRequest" - // }, + // "path": "{project}/aggregated/instances", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.setInstanceTemplate": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstanceGroupManagersSetInstanceTemplateCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instances.attachDisk": + +type InstancesAttachDiskCall struct { + s *Service + project string + zone string + instance string + attacheddisk *AttachedDisk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetInstanceTemplate: Specifies the instance template to use when -// creating new instances in this group. The templates for existing -// instances in the group do not change unless you recreate them. -func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { - c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AttachDisk: Attaches an existing Disk resource to an instance. You +// must first create the disk before you can attach it. It is not +// possible to create and attach a disk at the same time. For more +// information, read Adding a persistent disk to your instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/attachDisk +func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { + c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest + c.instance = instance + c.attacheddisk = attacheddisk + return c +} + +// ForceAttach sets the optional parameter "forceAttach": Whether to +// force attach the disk even if it's currently attached to another +// instance. This is only available for regional disks. +func (c *InstancesAttachDiskCall) ForceAttach(forceAttach bool) *InstancesAttachDiskCall { + c.urlParams_.Set("forceAttach", fmt.Sprint(forceAttach)) return c } @@ -47321,7 +61401,7 @@ func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstancesAttachDiskCall) RequestId(requestId string) *InstancesAttachDiskCall { c.urlParams_.Set("requestId", requestId) return c } @@ -47329,7 +61409,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId strin // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -47337,53 +61417,53 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Fie // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachDiskCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { +func (c *InstancesAttachDiskCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. +// Do executes the "compute.instances.attachDisk" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47414,18 +61494,24 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", + // "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setInstanceTemplate", + // "id": "compute.instances.attachDisk", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "forceAttach": { + // "description": "Whether to force attach the disk even if it's currently attached to another instance. This is only available for regional disks.", + // "location": "query", + // "type": "boolean" + // }, + // "instance": { + // "description": "The instance name for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -47442,15 +61528,16 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", // "request": { - // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" + // "$ref": "AttachedDisk" // }, // "response": { // "$ref": "Operation" @@ -47463,32 +61550,26 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call } -// method id "compute.instanceGroupManagers.setTargetPools": +// method id "compute.instances.delete": -type InstanceGroupManagersSetTargetPoolsCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesDeleteCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTargetPools: Modifies the target pools to which all instances in -// this managed instance group are assigned. The target pools -// automatically apply to all of the instances in the managed instance -// group. This operation is marked DONE when you make the request even -// if the instances have not yet been added to their target pools. The -// change might take some time to apply to all of the instances in the -// group depending on the size of the group. -func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { - c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Instance resource. For more +// information, see Stopping or Deleting an Instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete +func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { + c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest + c.instance = instance return c } @@ -47506,7 +61587,7 @@ func (r *InstanceGroupManagersService) SetTargetPools(project string, zone strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -47514,7 +61595,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *I // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -47522,53 +61603,48 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { +func (c *InstancesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setTargetPools" call. +// Do executes the "compute.instances.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47599,18 +61675,19 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setTargetPools", + // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", + // "httpMethod": "DELETE", + // "id": "compute.instances.delete", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "Name of the instance resource to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -47627,16 +61704,14 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", - // "request": { - // "$ref": "InstanceGroupManagersSetTargetPoolsRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}", // "response": { // "$ref": "Operation" // }, @@ -47648,34 +61723,54 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio } -// method id "compute.instanceGroupManagers.testIamPermissions": +// method id "compute.instances.deleteAccessConfig": -type InstanceGroupManagersTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesDeleteAccessConfigCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstanceGroupManagersService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceGroupManagersTestIamPermissionsCall { - c := &InstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteAccessConfig: Deletes an access config from an instance's +// network interface. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig +func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { + c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.instance = instance + c.urlParams_.Set("accessConfig", accessConfig) + c.urlParams_.Set("networkInterface", networkInterface) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *InstancesDeleteAccessConfigCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersTestIamPermissionsCall { +func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -47683,53 +61778,48 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Fiel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *InstanceGroupManagersTestIamPermissionsCall { +func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersTestIamPermissionsCall) Header() http.Header { +func (c *InstancesDeleteAccessConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "resource": c.resource, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instances.deleteAccessConfig" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47748,7 +61838,7 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallO if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -47760,15 +61850,36 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Deletes an access config from an instance's network interface.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.testIamPermissions", + // "id": "compute.instances.deleteAccessConfig", // "parameterOrder": [ // "project", // "zone", - // "resource" + // "instance", + // "accessConfig", + // "networkInterface" // ], // "parameters": { + // "accessConfig": { + // "description": "The name of the access config to delete.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface.", + // "location": "query", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -47776,11 +61887,9 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallO // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, // "zone": { @@ -47791,46 +61900,38 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallO // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceGroupManagers.update": +// method id "compute.instances.detachDisk": -type InstanceGroupManagersUpdateCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesDetachDiskCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is updated even if the instances in the group have not yet been -// updated. You must separately verify the status of the individual -// instances with the listManagedInstances method. -func (r *InstanceGroupManagersService) Update(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersUpdateCall { - c := &InstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DetachDisk: Detaches a disk from an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/detachDisk +func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { + c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager + c.instance = instance + c.urlParams_.Set("deviceName", deviceName) return c } @@ -47848,7 +61949,7 @@ func (r *InstanceGroupManagersService) Update(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersUpdateCall) RequestId(requestId string) *InstanceGroupManagersUpdateCall { +func (c *InstancesDetachDiskCall) RequestId(requestId string) *InstancesDetachDiskCall { c.urlParams_.Set("requestId", requestId) return c } @@ -47856,7 +61957,7 @@ func (c *InstanceGroupManagersUpdateCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersUpdateCall { +func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -47864,53 +61965,48 @@ func (c *InstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersUpdateCall) Context(ctx context.Context) *InstanceGroupManagersUpdateCall { +func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachDiskCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersUpdateCall) Header() http.Header { +func (c *InstancesDetachDiskCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.update" call. +// Do executes the "compute.instances.detachDisk" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47941,18 +62037,26 @@ func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listManagedInstances method.", - // "httpMethod": "PUT", - // "id": "compute.instanceGroupManagers.update", + // "description": "Detaches a disk from an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.detachDisk", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance", + // "deviceName" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", + // "deviceName": { + // "description": "Disk device name to detach.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "Instance name.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -47969,16 +62073,14 @@ func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", - // "request": { - // "$ref": "InstanceGroupManager" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", // "response": { // "$ref": "Operation" // }, @@ -47990,97 +62092,96 @@ func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroupManagers.updatePerInstanceConfigs": +// method id "compute.instances.get": -type InstanceGroupManagersUpdatePerInstanceConfigsCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersupdateperinstanceconfigsreq *InstanceGroupManagersUpdatePerInstanceConfigsReq - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// UpdatePerInstanceConfigs: Insert or patch (for the ones that already -// exist) per-instance configs for the managed instance group. -// perInstanceConfig.instance serves as a key used to distinguish -// whether to perform insert or patch. -func (r *InstanceGroupManagersService) UpdatePerInstanceConfigs(project string, zone string, instanceGroupManager string, instancegroupmanagersupdateperinstanceconfigsreq *InstanceGroupManagersUpdatePerInstanceConfigsReq) *InstanceGroupManagersUpdatePerInstanceConfigsCall { - c := &InstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Instance resource. Get a list of available +// instances by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/get +func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { + c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersupdateperinstanceconfigsreq = instancegroupmanagersupdateperinstanceconfigsreq - return c -} - -// RequestId sets the optional parameter "requestId": begin_interface: -// MixerMutationRequestBuilder Request ID to support idempotency. -func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) RequestId(requestId string) *InstanceGroupManagersUpdatePerInstanceConfigsCall { - c.urlParams_.Set("requestId", requestId) + c.instance = instance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersUpdatePerInstanceConfigsCall { +func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Context(ctx context.Context) *InstanceGroupManagersUpdatePerInstanceConfigsCall { +func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header { +func (c *InstancesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersupdateperinstanceconfigsreq) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.updatePerInstanceConfigs" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.instances.get" call. +// Exactly one of *Instance or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Instance.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48099,7 +62200,7 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Instance{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48111,18 +62212,19 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi } return ret, nil // { - // "description": "Insert or patch (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.updatePerInstanceConfigs", + // "description": "Returns the specified Instance resource. Get a list of available instances by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instances.get", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "instance": { + // "description": "Name of the instance resource to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -48133,135 +62235,122 @@ func (c *InstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "begin_interface: MixerMutationRequestBuilder Request ID to support idempotency.", - // "location": "query", - // "type": "string" - // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located. It should conform to RFC1035.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", - // "request": { - // "$ref": "InstanceGroupManagersUpdatePerInstanceConfigsReq" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}", // "response": { - // "$ref": "Operation" + // "$ref": "Instance" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroups.addInstances": +// method id "compute.instances.getGuestAttributes": -type InstanceGroupsAddInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetGuestAttributesCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// AddInstances: Adds a list of instances to the specified instance -// group. All of the instances in the instance group must be in the same -// network/subnetwork. Read Adding instances for more information. -func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { - c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetGuestAttributes: Returns the specified guest attributes entry. +func (r *InstancesService) GetGuestAttributes(project string, zone string, instance string) *InstancesGetGuestAttributesCall { + c := &InstancesGetGuestAttributesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupsaddinstancesrequest = instancegroupsaddinstancesrequest + c.instance = instance return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGroupsAddInstancesCall { - c.urlParams_.Set("requestId", requestId) +// VariableKey sets the optional parameter "variableKey": Specifies the +// key for the guest attributes entry. +func (c *InstancesGetGuestAttributesCall) VariableKey(variableKey string) *InstancesGetGuestAttributesCall { + c.urlParams_.Set("variableKey", variableKey) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsAddInstancesCall { +func (c *InstancesGetGuestAttributesCall) Fields(s ...googleapi.Field) *InstancesGetGuestAttributesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetGuestAttributesCall) IfNoneMatch(entityTag string) *InstancesGetGuestAttributesCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceGroupsAddInstancesCall { +func (c *InstancesGetGuestAttributesCall) Context(ctx context.Context) *InstancesGetGuestAttributesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsAddInstancesCall) Header() http.Header { +func (c *InstancesGetGuestAttributesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getGuestAttributes") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.addInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.instances.getGuestAttributes" call. +// Exactly one of *GuestAttributes or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *GuestAttributes.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*GuestAttributes, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48280,7 +62369,7 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &GuestAttributes{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48292,18 +62381,19 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", - // "httpMethod": "POST", - // "id": "compute.instanceGroups.addInstances", + // "description": "Returns the specified guest attributes entry.", + // "httpMethod": "GET", + // "id": "compute.instances.getGuestAttributes", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instance" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where you are adding instances.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -48314,123 +62404,59 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "variableKey": { + // "description": "Specifies the key for the guest attributes entry.", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", - // "request": { - // "$ref": "InstanceGroupsAddInstancesRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/getGuestAttributes", // "response": { - // "$ref": "Operation" + // "$ref": "GuestAttributes" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroups.aggregatedList": +// method id "compute.instances.getIamPolicy": -type InstanceGroupsAggregatedListCall struct { +type InstancesGetIamPolicyCall struct { s *Service project string + zone string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves the list of instance groups and sorts them -// by zone. -func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { - c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *InstancesService) GetIamPolicy(project string, zone string, resource string) *InstancesGetIamPolicyCall { + c := &InstancesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupsAggregatedListCall) OrderBy(orderBy string) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *InstanceGroupsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) + c.zone = zone + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupsAggregatedListCall { +func (c *InstancesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48440,7 +62466,7 @@ func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *Instanc // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupsAggregatedListCall { +func (c *InstancesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstancesGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -48448,21 +62474,21 @@ func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *Instan // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *InstanceGroupsAggregatedListCall { +func (c *InstancesGetIamPolicyCall) Context(ctx context.Context) *InstancesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsAggregatedListCall) Header() http.Header { +func (c *InstancesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -48473,24 +62499,26 @@ func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.aggregatedList" call. -// Exactly one of *InstanceGroupAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupAggregatedList, error) { +// Do executes the "compute.instances.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48509,7 +62537,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupAggregatedList{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48521,47 +62549,40 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In } return ret, nil // { - // "description": "Retrieves the list of instance groups and sorts them by zone.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.instanceGroups.aggregatedList", + // "id": "compute.instances.getIamPolicy", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/instanceGroups", + // "path": "{project}/zones/{zone}/instances/{resource}/getIamPolicy", // "response": { - // "$ref": "InstanceGroupAggregatedList" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -48572,123 +62593,114 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroups.delete": +// method id "compute.instances.getSerialPortOutput": -type InstanceGroupsDeleteCall struct { - s *Service - project string - zone string - instanceGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetSerialPortOutputCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified instance group. The instances in the -// group are not deleted. Note that instance group must not belong to a -// backend service. Read Deleting an instance group for more -// information. -func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { - c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetSerialPortOutput: Returns the specified instance's serial port +// output. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/getSerialPortOutput +func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { + c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup + c.instance = instance + return c +} + +// Port sets the optional parameter "port": Specifies which COM or +// serial port to retrieve data from. +func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("port", fmt.Sprint(port)) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsDeleteCall) RequestId(requestId string) *InstanceGroupsDeleteCall { - c.urlParams_.Set("requestId", requestId) +// Start sets the optional parameter "start": Returns output starting +// from a specific byte position. Use this to page through output when +// the output is too large to return in a single request. For the +// initial request, leave this field unspecified. For subsequent calls, +// this field should be set to the next value returned in the previous +// call. +func (c *InstancesGetSerialPortOutputCall) Start(start int64) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("start", fmt.Sprint(start)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsDeleteCall { +func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *InstancesGetSerialPortOutputCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsDeleteCall { +func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *InstancesGetSerialPortOutputCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsDeleteCall) Header() http.Header { +func (c *InstancesGetSerialPortOutputCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.getSerialPortOutput" call. +// Exactly one of *SerialPortOutput or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SerialPortOutput.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*SerialPortOutput, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48707,7 +62719,7 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &SerialPortOutput{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48719,21 +62731,31 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", - // "httpMethod": "DELETE", - // "id": "compute.instanceGroups.delete", + // "description": "Returns the specified instance's serial port output.", + // "httpMethod": "GET", + // "id": "compute.instances.getSerialPortOutput", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instance" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group to delete.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, + // "port": { + // "default": "1", + // "description": "Specifies which COM or serial port to retrieve data from.", + // "format": "int32", + // "location": "query", + // "maximum": "4", + // "minimum": "1", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -48741,119 +62763,148 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "start": { + // "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", + // "format": "int64", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "path": "{project}/zones/{zone}/instances/{instance}/serialPort", // "response": { - // "$ref": "Operation" + // "$ref": "SerialPortOutput" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroups.get": +// method id "compute.instances.insert": -type InstanceGroupsGetCall struct { - s *Service - project string - zone string - instanceGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesInsertCall struct { + s *Service + project string + zone string + instance *Instance + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance group. Get a list of available -// instance groups by making a list() request. -func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { - c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an instance resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/insert +func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { + c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup + c.instance = instance + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesInsertCall) RequestId(requestId string) *InstancesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// SourceInstanceTemplate sets the optional parameter +// "sourceInstanceTemplate": Specifies instance template to create the +// instance. +// +// This field is optional. It can be a full or partial URL. For example, +// the following are all valid URLs to an instance template: +// - +// https://www.googleapis.com/compute/v1/projects/project/global/global/instanceTemplates/instanceTemplate +// - projects/project/global/global/instanceTemplates/instanceTemplate +// +// - global/instancesTemplates/instanceTemplate +func (c *InstancesInsertCall) SourceInstanceTemplate(sourceInstanceTemplate string) *InstancesInsertCall { + c.urlParams_.Set("sourceInstanceTemplate", sourceInstanceTemplate) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetCall { +func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetCall { +func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsGetCall) Header() http.Header { +func (c *InstancesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.get" call. -// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx +// Do executes the "compute.instances.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *InstanceGroup.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48872,7 +62923,7 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroup{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48884,21 +62935,14 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup } return ret, nil // { - // "description": "Returns the specified instance group. Get a list of available instance groups by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instanceGroups.get", + // "description": "Creates an instance resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.insert", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroup" + // "zone" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -48906,108 +62950,177 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sourceInstanceTemplate": { + // "description": "Specifies instance template to create the instance.\n\nThis field is optional. It can be a full or partial URL. For example, the following are all valid URLs to an instance template: \n- https://www.googleapis.com/compute/v1/projects/project/global/global/instanceTemplates/instanceTemplate \n- projects/project/global/global/instanceTemplates/instanceTemplate \n- global/instancesTemplates/instanceTemplate", + // "location": "query", + // "type": "string" + // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "path": "{project}/zones/{zone}/instances", + // "request": { + // "$ref": "Instance" + // }, // "response": { - // "$ref": "InstanceGroup" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceGroups.insert": +// method id "compute.instances.list": -type InstanceGroupsInsertCall struct { - s *Service - project string - zone string - instancegroup *InstanceGroup - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance group in the specified project using the -// parameters that are included in the request. -func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { - c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of instances contained within the specified +// zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/list +func (r *InstancesService) List(project string, zone string) *InstancesListCall { + c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instancegroup = instancegroup return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsInsertCall) RequestId(requestId string) *InstanceGroupsInsertCall { - c.urlParams_.Set("requestId", requestId) +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InstancesListCall) Filter(filter string) *InstancesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstancesListCall) OrderBy(orderBy string) *InstancesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsInsertCall { +func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsInsertCall { +func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsInsertCall) Header() http.Header { +func (c *InstancesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -49016,14 +63129,14 @@ func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.instances.list" call. +// Exactly one of *InstanceList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *InstanceList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49042,7 +63155,7 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -49054,14 +63167,37 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates an instance group in the specified project using the parameters that are included in the request.", - // "httpMethod": "POST", - // "id": "compute.instanceGroups.insert", + // "description": "Retrieves the list of instances contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.instances.list", // "parameterOrder": [ // "project", // "zone" // ], // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -49069,51 +63205,68 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { - // "description": "The name of the zone where you want to create the instance group.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups", - // "request": { - // "$ref": "InstanceGroup" - // }, + // "path": "{project}/zones/{zone}/instances", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroups.list": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstanceGroupsListCall struct { +// method id "compute.instances.listReferrers": + +type InstancesListReferrersCall struct { s *Service project string zone string + instance string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of instance groups that are located in the -// specified project and zone. -func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { - c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListReferrers: Retrieves the list of referrers to instances contained +// within the specified zone. +func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { + c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone + c.instance = instance return c } @@ -49143,7 +63296,7 @@ func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroup // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { +func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferrersCall { c.urlParams_.Set("filter", filter) return c } @@ -49154,7 +63307,7 @@ func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsListCall { +func (c *InstancesListReferrersCall) MaxResults(maxResults int64) *InstancesListReferrersCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -49171,7 +63324,7 @@ func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsLis // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstanceGroupsListCall) OrderBy(orderBy string) *InstanceGroupsListCall { +func (c *InstancesListReferrersCall) OrderBy(orderBy string) *InstancesListReferrersCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -49179,7 +63332,7 @@ func (c *InstanceGroupsListCall) OrderBy(orderBy string) *InstanceGroupsListCall // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsListCall { +func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListReferrersCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -49187,7 +63340,7 @@ func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsList // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsListCall { +func (c *InstancesListReferrersCall) Fields(s ...googleapi.Field) *InstancesListReferrersCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49197,7 +63350,7 @@ func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsLis // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsListCall { +func (c *InstancesListReferrersCall) IfNoneMatch(entityTag string) *InstancesListReferrersCall { c.ifNoneMatch_ = entityTag return c } @@ -49205,21 +63358,21 @@ func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsLi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsListCall { +func (c *InstancesListReferrersCall) Context(ctx context.Context) *InstancesListReferrersCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsListCall) Header() http.Header { +func (c *InstancesListReferrersCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -49230,25 +63383,26 @@ func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/referrers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.list" call. -// Exactly one of *InstanceGroupList or error will be non-nil. Any +// Do executes the "compute.instances.listReferrers" call. +// Exactly one of *InstanceListReferrers or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *InstanceGroupList.ServerResponse.Header or (if a response was +// *InstanceListReferrers.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupList, error) { +func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*InstanceListReferrers, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49267,7 +63421,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupList{ + ret := &InstanceListReferrers{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -49279,12 +63433,13 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou } return ret, nil // { - // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + // "description": "Retrieves the list of referrers to instances contained within the specified zone.", // "httpMethod": "GET", - // "id": "compute.instanceGroups.list", + // "id": "compute.instances.listReferrers", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "instance" // ], // "parameters": { // "filter": { @@ -49292,6 +63447,13 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // "location": "query", // "type": "string" // }, + // "instance": { + // "description": "Name of the target instance scoping this request, or '-' if the request should span over all instances in the container.", + // "location": "path", + // "pattern": "-|[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", @@ -49318,15 +63480,16 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups", + // "path": "{project}/zones/{zone}/instances/{instance}/referrers", // "response": { - // "$ref": "InstanceGroupList" + // "$ref": "InstanceListReferrers" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -49340,7 +63503,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGroupList) error) error { +func (c *InstancesListReferrersCall) Pages(ctx context.Context, f func(*InstanceListReferrers) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -49358,100 +63521,52 @@ func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGrou } } -// method id "compute.instanceGroups.listInstances": +// method id "compute.instances.reset": -type InstanceGroupsListInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesResetCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListInstances: Lists the instances in the specified instance group. -func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { - c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Reset: Performs a reset on the instance. For more information, see +// Resetting an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/reset +func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { + c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupslistinstancesrequest = instancegroupslistinstancesrequest - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.instance = instance return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupsListInstancesCall) OrderBy(orderBy string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesResetCall) RequestId(requestId string) *InstancesResetCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsListInstancesCall { +func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49459,53 +63574,48 @@ func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *InstanceGroupsListInstancesCall { +func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsListInstancesCall) Header() http.Header { +func (c *InstancesResetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.listInstances" call. -// Exactly one of *InstanceGroupsListInstances or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupsListInstances.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupsListInstances, error) { +// Do executes the "compute.instances.reset" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49524,7 +63634,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupsListInstances{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -49536,44 +63646,22 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins } return ret, nil // { - // "description": "Lists the instances in the specified instance group.", + // "description": "Performs a reset on the instance. For more information, see Resetting an instance.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.listInstances", + // "id": "compute.instances.reset", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instance" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "instanceGroup": { - // "description": "The name of the instance group from which you want to generate a list of included instances.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -49581,75 +63669,56 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", - // "request": { - // "$ref": "InstanceGroupsListInstancesRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/reset", // "response": { - // "$ref": "InstanceGroupsListInstances" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupsListInstances) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroups.removeInstances": +// method id "compute.instances.setDeletionProtection": -type InstanceGroupsRemoveInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetDeletionProtectionCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveInstances: Removes one or more instances from the specified -// instance group, but does not delete those instances. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration before the VM instance is removed or deleted. -func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { - c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetDeletionProtection: Sets deletion protection on the instance. +func (r *InstancesService) SetDeletionProtection(project string, zone string, resource string) *InstancesSetDeletionProtectionCall { + c := &InstancesSetDeletionProtectionCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupsremoveinstancesrequest = instancegroupsremoveinstancesrequest + c.resource = resource + return c +} + +// DeletionProtection sets the optional parameter "deletionProtection": +// Whether the resource should be protected against deletion. +func (c *InstancesSetDeletionProtectionCall) DeletionProtection(deletionProtection bool) *InstancesSetDeletionProtectionCall { + c.urlParams_.Set("deletionProtection", fmt.Sprint(deletionProtection)) return c } @@ -49667,7 +63736,7 @@ func (r *InstanceGroupsService) RemoveInstances(project string, zone string, ins // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *InstanceGroupsRemoveInstancesCall { +func (c *InstancesSetDeletionProtectionCall) RequestId(requestId string) *InstancesSetDeletionProtectionCall { c.urlParams_.Set("requestId", requestId) return c } @@ -49675,7 +63744,7 @@ func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *Instanc // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsRemoveInstancesCall { +func (c *InstancesSetDeletionProtectionCall) Fields(s ...googleapi.Field) *InstancesSetDeletionProtectionCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49683,53 +63752,48 @@ func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *Instan // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *InstanceGroupsRemoveInstancesCall { +func (c *InstancesSetDeletionProtectionCall) Context(ctx context.Context) *InstancesSetDeletionProtectionCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { +func (c *InstancesSetDeletionProtectionCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setDeletionProtection") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.removeInstances" call. +// Do executes the "compute.instances.setDeletionProtection" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49760,20 +63824,20 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted.", + // "description": "Sets deletion protection on the instance.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.removeInstances", + // "id": "compute.instances.setDeletionProtection", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "resource" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where the specified instances will be removed.", - // "location": "path", - // "required": true, - // "type": "string" + // "deletionProtection": { + // "default": "true", + // "description": "Whether the resource should be protected against deletion.", + // "location": "query", + // "type": "boolean" // }, // "project": { // "description": "Project ID for this request.", @@ -49787,17 +63851,22 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O // "location": "query", // "type": "string" // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", - // "request": { - // "$ref": "InstanceGroupsRemoveInstancesRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{resource}/setDeletionProtection", // "response": { // "$ref": "Operation" // }, @@ -49809,26 +63878,28 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.instanceGroups.setNamedPorts": +// method id "compute.instances.setDiskAutoDelete": -type InstanceGroupsSetNamedPortsCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetDiskAutoDeleteCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetNamedPorts: Sets the named ports for the specified instance group. -func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { - c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to +// an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setDiskAutoDelete +func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { + c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupssetnamedportsrequest = instancegroupssetnamedportsrequest + c.instance = instance + c.urlParams_.Set("autoDelete", fmt.Sprint(autoDelete)) + c.urlParams_.Set("deviceName", deviceName) return c } @@ -49846,7 +63917,7 @@ func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceGroupsSetNamedPortsCall { +func (c *InstancesSetDiskAutoDeleteCall) RequestId(requestId string) *InstancesSetDiskAutoDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -49854,7 +63925,7 @@ func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *InstanceGroupsSetNamedPortsCall { +func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49862,53 +63933,48 @@ func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *InstanceGroupsSetNamedPortsCall { +func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *InstancesSetDiskAutoDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { +func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.setNamedPorts" call. +// Do executes the "compute.instances.setDiskAutoDelete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49939,18 +64005,34 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Sets the named ports for the specified instance group.", + // "description": "Sets the auto-delete flag for a disk attached to an instance.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.setNamedPorts", + // "id": "compute.instances.setDiskAutoDelete", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instance", + // "autoDelete", + // "deviceName" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where the named ports are updated.", + // "autoDelete": { + // "description": "Whether to auto-delete the disk when the instance is deleted.", + // "location": "query", + // "required": true, + // "type": "boolean" + // }, + // "deviceName": { + // "description": "The device name of the disk to modify.", + // "location": "query", + // "pattern": "\\w[\\w.-]{0,254}", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "The instance name.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -49967,16 +64049,14 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", - // "request": { - // "$ref": "InstanceGroupsSetNamedPortsRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", // "response": { // "$ref": "Operation" // }, @@ -49988,34 +64068,34 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroups.testIamPermissions": +// method id "compute.instances.setIamPolicy": -type InstanceGroupsTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetIamPolicyCall struct { + s *Service + project string + zone string + resource string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstanceGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceGroupsTestIamPermissionsCall { - c := &InstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *InstancesService) SetIamPolicy(project string, zone string, resource string, policy *Policy) *InstancesSetIamPolicyCall { + c := &InstancesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.policy = policy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceGroupsTestIamPermissionsCall { +func (c *InstancesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50023,34 +64103,34 @@ func (c *InstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *Ins // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *InstanceGroupsTestIamPermissionsCall { +func (c *InstancesSetIamPolicyCall) Context(ctx context.Context) *InstancesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsTestIamPermissionsCall) Header() http.Header { +func (c *InstancesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -50062,14 +64142,14 @@ func (c *InstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Resp return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instances.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50088,7 +64168,7 @@ func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50100,9 +64180,9 @@ func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.testIamPermissions", + // "id": "compute.instances.setIamPolicy", // "parameterOrder": [ // "project", // "zone", @@ -50131,43 +64211,42 @@ func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/instances/{resource}/setIamPolicy", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "Policy" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceTemplates.delete": +// method id "compute.instances.setLabels": -type InstanceTemplatesDeleteCall struct { - s *Service - project string - instanceTemplate string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetLabelsCall struct { + s *Service + project string + zone string + instance string + instancessetlabelsrequest *InstancesSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified instance template. If you delete an -// instance template that is being referenced from another instance -// group, the instance group will not be able to create or recreate -// virtual machine instances. Deleting an instance template is permanent -// and cannot be undone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/delete -func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { - c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets labels on an instance. To learn more about labels, +// read the Labeling Resources documentation. +func (r *InstancesService) SetLabels(project string, zone string, instance string, instancessetlabelsrequest *InstancesSetLabelsRequest) *InstancesSetLabelsCall { + c := &InstancesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instanceTemplate = instanceTemplate + c.zone = zone + c.instance = instance + c.instancessetlabelsrequest = instancessetlabelsrequest return c } @@ -50185,7 +64264,7 @@ func (r *InstanceTemplatesService) Delete(project string, instanceTemplate strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTemplatesDeleteCall { +func (c *InstancesSetLabelsCall) RequestId(requestId string) *InstancesSetLabelsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -50193,7 +64272,7 @@ func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTempl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall { +func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50201,47 +64280,53 @@ func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemplatesDeleteCall { +func (c *InstancesSetLabelsCall) Context(ctx context.Context) *InstancesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesDeleteCall) Header() http.Header { +func (c *InstancesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setLabels") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "instanceTemplate": c.instanceTemplate, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.delete" call. +// Do executes the "compute.instances.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50272,16 +64357,17 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified instance template. If you delete an instance template that is being referenced from another instance group, the instance group will not be able to create or recreate virtual machine instances. Deleting an instance template is permanent and cannot be undone.", - // "httpMethod": "DELETE", - // "id": "compute.instanceTemplates.delete", + // "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.instances.setLabels", // "parameterOrder": [ // "project", - // "instanceTemplate" + // "zone", + // "instance" // ], // "parameters": { - // "instanceTemplate": { - // "description": "The name of the instance template to delete.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -50298,9 +64384,19 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "path": "{project}/zones/{zone}/instances/{instance}/setLabels", + // "request": { + // "$ref": "InstancesSetLabelsRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -50312,93 +64408,107 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.instanceTemplates.get": +// method id "compute.instances.setMachineResources": -type InstanceTemplatesGetCall struct { - s *Service - project string - instanceTemplate string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetMachineResourcesCall struct { + s *Service + project string + zone string + instance string + instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance template. Get a list of available -// instance templates by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/get -func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { - c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMachineResources: Changes the number and/or type of accelerator +// for a stopped instance to the values specified in the request. +func (r *InstancesService) SetMachineResources(project string, zone string, instance string, instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest) *InstancesSetMachineResourcesCall { + c := &InstancesSetMachineResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instanceTemplate = instanceTemplate + c.zone = zone + c.instance = instance + c.instancessetmachineresourcesrequest = instancessetmachineresourcesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *InstancesSetMachineResourcesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall { +func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *InstancesSetMachineResourcesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplatesGetCall { +func (c *InstancesSetMachineResourcesCall) Context(ctx context.Context) *InstancesSetMachineResourcesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesGetCall) Header() http.Header { +func (c *InstancesSetMachineResourcesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachineresourcesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineResources") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "instanceTemplate": c.instanceTemplate, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.get" call. -// Exactly one of *InstanceTemplate or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceTemplate.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { +// Do executes the "compute.instances.setMachineResources" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50417,7 +64527,7 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceTemplate{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50429,16 +64539,17 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe } return ret, nil // { - // "description": "Returns the specified instance template. Get a list of available instance templates by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instanceTemplates.get", + // "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.setMachineResources", // "parameterOrder": [ // "project", - // "instanceTemplate" + // "zone", + // "instance" // ], // "parameters": { - // "instanceTemplate": { - // "description": "The name of the instance template.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -50450,42 +64561,56 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "path": "{project}/zones/{zone}/instances/{instance}/setMachineResources", + // "request": { + // "$ref": "InstancesSetMachineResourcesRequest" + // }, // "response": { - // "$ref": "InstanceTemplate" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceTemplates.insert": +// method id "compute.instances.setMachineType": -type InstanceTemplatesInsertCall struct { - s *Service - project string - instancetemplate *InstanceTemplate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetMachineTypeCall struct { + s *Service + project string + zone string + instance string + instancessetmachinetyperequest *InstancesSetMachineTypeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance template in the specified project using -// the data that is included in the request. If you are creating a new -// template to update an existing instance group, your new instance -// template must use the same network or, if applicable, the same -// subnetwork as the original template. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/insert -func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { - c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMachineType: Changes the machine type for a stopped instance to +// the machine type specified in the request. +func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { + c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instancetemplate = instancetemplate + c.zone = zone + c.instance = instance + c.instancessetmachinetyperequest = instancessetmachinetyperequest return c } @@ -50503,7 +64628,7 @@ func (r *InstanceTemplatesService) Insert(project string, instancetemplate *Inst // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTemplatesInsertCall { +func (c *InstancesSetMachineTypeCall) RequestId(requestId string) *InstancesSetMachineTypeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -50511,7 +64636,7 @@ func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTempl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall { +func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSetMachineTypeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50519,51 +64644,53 @@ func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemplatesInsertCall { +func (c *InstancesSetMachineTypeCall) Context(ctx context.Context) *InstancesSetMachineTypeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesInsertCall) Header() http.Header { +func (c *InstancesSetMachineTypeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachinetyperequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.insert" call. +// Do executes the "compute.instances.setMachineType" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50594,13 +64721,22 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", + // "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", // "httpMethod": "POST", - // "id": "compute.instanceTemplates.insert", + // "id": "compute.instances.setMachineType", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -50612,11 +64748,18 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates", + // "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", // "request": { - // "$ref": "InstanceTemplate" + // "$ref": "InstancesSetMachineTypeRequest" // }, // "response": { // "$ref": "Operation" @@ -50629,157 +64772,108 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.instanceTemplates.list": +// method id "compute.instances.setMetadata": -type InstanceTemplatesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetMetadataCall struct { + s *Service + project string + zone string + instance string + metadata *Metadata + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of instance templates that are contained -// within the specified project and zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/list -func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { - c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMetadata: Sets metadata for the specified instance to the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setMetadata +func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { + c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instance = instance + c.metadata = metadata return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceTemplatesListCall) OrderBy(orderBy string) *InstanceTemplatesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMetadataCall) RequestId(requestId string) *InstancesSetMetadataCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall { +func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTemplatesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTemplatesListCall { +func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMetadataCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesListCall) Header() http.Header { +func (c *InstancesSetMetadataCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.list" call. -// Exactly one of *InstanceTemplateList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceTemplateList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { +// Do executes the "compute.instances.setMetadata" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50798,7 +64892,7 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceTemplateList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50810,104 +64904,106 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT } return ret, nil // { - // "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", - // "httpMethod": "GET", - // "id": "compute.instanceTemplates.list", + // "description": "Sets metadata for the specified instance to the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.instances.setMetadata", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates", + // "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", + // "request": { + // "$ref": "Metadata" + // }, // "response": { - // "$ref": "InstanceTemplateList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] - // } - -} + // } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } } -// method id "compute.instanceTemplates.testIamPermissions": +// method id "compute.instances.setMinCpuPlatform": -type InstanceTemplatesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetMinCpuPlatformCall struct { + s *Service + project string + zone string + instance string + instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstanceTemplatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceTemplatesTestIamPermissionsCall { - c := &InstanceTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMinCpuPlatform: Changes the minimum CPU platform that this +// instance should use. This method can only be called on a stopped +// instance. For more information, read Specifying a Minimum CPU +// Platform. +func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instance string, instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest) *InstancesSetMinCpuPlatformCall { + c := &InstancesSetMinCpuPlatformCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.instance = instance + c.instancessetmincpuplatformrequest = instancessetmincpuplatformrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMinCpuPlatformCall) RequestId(requestId string) *InstancesSetMinCpuPlatformCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceTemplatesTestIamPermissionsCall { +func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *InstancesSetMinCpuPlatformCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50915,52 +65011,53 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesTestIamPermissionsCall) Context(ctx context.Context) *InstanceTemplatesTestIamPermissionsCall { +func (c *InstancesSetMinCpuPlatformCall) Context(ctx context.Context) *InstancesSetMinCpuPlatformCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { +func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmincpuplatformrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instances.setMinCpuPlatform" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50979,7 +65076,7 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50991,14 +65088,22 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", // "httpMethod": "POST", - // "id": "compute.instanceTemplates.testIamPermissions", + // "id": "compute.instances.setMinCpuPlatform", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -51006,53 +65111,55 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "InstancesSetMinCpuPlatformRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.addAccessConfig": +// method id "compute.instances.setScheduling": -type InstancesAddAccessConfigCall struct { - s *Service - project string - zone string - instance string - accessconfig *AccessConfig - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetSchedulingCall struct { + s *Service + project string + zone string + instance string + scheduling *Scheduling + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddAccessConfig: Adds an access config to an instance's network -// interface. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/addAccessConfig -func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { - c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetScheduling: Sets an instance's scheduling options. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling +func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { + c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.accessconfig = accessconfig + c.scheduling = scheduling return c } @@ -51070,7 +65177,7 @@ func (r *InstancesService) AddAccessConfig(project string, zone string, instance // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesAddAccessConfigCall) RequestId(requestId string) *InstancesAddAccessConfigCall { +func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSchedulingCall { c.urlParams_.Set("requestId", requestId) return c } @@ -51078,7 +65185,7 @@ func (c *InstancesAddAccessConfigCall) RequestId(requestId string) *InstancesAdd // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall { +func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -51086,34 +65193,34 @@ func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAd // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAddAccessConfigCall { +func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAddAccessConfigCall) Header() http.Header { +func (c *InstancesSetSchedulingCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -51125,14 +65232,14 @@ func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.addAccessConfig" call. +// Do executes the "compute.instances.setScheduling" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51163,29 +65270,22 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Adds an access config to an instance's network interface.", + // "description": "Sets an instance's scheduling options.", // "httpMethod": "POST", - // "id": "compute.instances.addAccessConfig", + // "id": "compute.instances.setScheduling", // "parameterOrder": [ // "project", // "zone", - // "instance", - // "networkInterface" + // "instance" // ], // "parameters": { // "instance": { - // "description": "The instance name for this request.", + // "description": "Instance name.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface to add to this instance.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -51206,9 +65306,9 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", + // "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", // "request": { - // "$ref": "AccessConfig" + // "$ref": "Scheduling" // }, // "response": { // "$ref": "Operation" @@ -51221,156 +65321,108 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.instances.aggregatedList": +// method id "compute.instances.setServiceAccount": -type InstancesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetServiceAccountCall struct { + s *Service + project string + zone string + instance string + instancessetserviceaccountrequest *InstancesSetServiceAccountRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves aggregated list of instances. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/aggregatedList -func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { - c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetServiceAccount: Sets the service account on the instance. For more +// information, read Changing the service account and access scopes for +// an instance. +func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { + c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instance = instance + c.instancessetserviceaccountrequest = instancessetserviceaccountrequest return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstancesAggregatedListCall) OrderBy(orderBy string) *InstancesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall { +func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAggregatedListCall { +func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAggregatedListCall) Header() http.Header { +func (c *InstancesSetServiceAccountCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setServiceAccount") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.aggregatedList" call. -// Exactly one of *InstanceAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceAggregatedList, error) { +// Do executes the "compute.instances.setServiceAccount" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51389,7 +65441,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -51401,110 +65453,79 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc } return ret, nil // { - // "description": "Retrieves aggregated list of instances.", - // "httpMethod": "GET", - // "id": "compute.instances.aggregatedList", + // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.setServiceAccount", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", + // "instance": { + // "description": "Name of the instance resource to start.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/instances", + // "path": "{project}/zones/{zone}/instances/{instance}/setServiceAccount", + // "request": { + // "$ref": "InstancesSetServiceAccountRequest" + // }, // "response": { - // "$ref": "InstanceAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instances.attachDisk": +// method id "compute.instances.setTags": -type InstancesAttachDiskCall struct { - s *Service - project string - zone string - instance string - attacheddisk *AttachedDisk - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetTagsCall struct { + s *Service + project string + zone string + instance string + tags *Tags + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AttachDisk: Attaches an existing Disk resource to an instance. You -// must first create the disk before you can attach it. It is not -// possible to create and attach a disk at the same time. For more -// information, read Adding a persistent disk to your instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/attachDisk -func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { - c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTags: Sets tags for the specified instance to the data included in +// the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setTags +func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { + c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.attacheddisk = attacheddisk - return c -} - -// ForceAttach sets the optional parameter "forceAttach": Whether to -// force attach the disk even if it's currently attached to another -// instance. This is only available for regional disks. -func (c *InstancesAttachDiskCall) ForceAttach(forceAttach bool) *InstancesAttachDiskCall { - c.urlParams_.Set("forceAttach", fmt.Sprint(forceAttach)) + c.tags = tags return c } @@ -51522,7 +65543,7 @@ func (c *InstancesAttachDiskCall) ForceAttach(forceAttach bool) *InstancesAttach // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesAttachDiskCall) RequestId(requestId string) *InstancesAttachDiskCall { +func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -51530,7 +65551,7 @@ func (c *InstancesAttachDiskCall) RequestId(requestId string) *InstancesAttachDi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall { +func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -51538,34 +65559,34 @@ func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachDiskCall { +func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAttachDiskCall) Header() http.Header { +func (c *InstancesSetTagsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -51577,14 +65598,14 @@ func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.attachDisk" call. +// Do executes the "compute.instances.setTags" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51615,22 +65636,17 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance.", + // "description": "Sets tags for the specified instance to the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instances.attachDisk", + // "id": "compute.instances.setTags", // "parameterOrder": [ // "project", // "zone", // "instance" // ], // "parameters": { - // "forceAttach": { - // "description": "Whether to force attach the disk even if it's currently attached to another instance. This is only available for regional disks.", - // "location": "query", - // "type": "boolean" - // }, // "instance": { - // "description": "The instance name for this request.", + // "description": "Name of the instance scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -51656,9 +65672,9 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", + // "path": "{project}/zones/{zone}/instances/{instance}/setTags", // "request": { - // "$ref": "AttachedDisk" + // "$ref": "Tags" // }, // "response": { // "$ref": "Operation" @@ -51671,9 +65687,9 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.instances.delete": +// method id "compute.instances.simulateMaintenanceEvent": -type InstancesDeleteCall struct { +type InstancesSimulateMaintenanceEventCall struct { s *Service project string zone string @@ -51683,40 +65699,20 @@ type InstancesDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified Instance resource. For more -// information, see Stopping or Deleting an Instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete -func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { - c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SimulateMaintenanceEvent: Simulates a maintenance event on the +// instance. +func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { + c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { - c.urlParams_.Set("requestId", requestId) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { +func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -51724,21 +65720,21 @@ func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { +func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDeleteCall) Header() http.Header { +func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -51746,9 +65742,9 @@ func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -51758,14 +65754,14 @@ func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.delete" call. +// Do executes the "compute.instances.simulateMaintenanceEvent" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51796,9 +65792,9 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", - // "httpMethod": "DELETE", - // "id": "compute.instances.delete", + // "description": "Simulates a maintenance event on the instance.", + // "httpMethod": "POST", + // "id": "compute.instances.simulateMaintenanceEvent", // "parameterOrder": [ // "project", // "zone", @@ -51806,7 +65802,7 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // ], // "parameters": { // "instance": { - // "description": "Name of the instance resource to delete.", + // "description": "Name of the instance scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -51819,11 +65815,6 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -51832,7 +65823,7 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}", + // "path": "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", // "response": { // "$ref": "Operation" // }, @@ -51844,9 +65835,9 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.instances.deleteAccessConfig": +// method id "compute.instances.start": -type InstancesDeleteAccessConfigCall struct { +type InstancesStartCall struct { s *Service project string zone string @@ -51856,16 +65847,15 @@ type InstancesDeleteAccessConfigCall struct { header_ http.Header } -// DeleteAccessConfig: Deletes an access config from an instance's -// network interface. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig -func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { - c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Start: Starts an instance that was stopped using the using the +// instances().stop method. For more information, see Restart an +// instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/start +func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { + c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.urlParams_.Set("accessConfig", accessConfig) - c.urlParams_.Set("networkInterface", networkInterface) return c } @@ -51883,7 +65873,7 @@ func (r *InstancesService) DeleteAccessConfig(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *InstancesDeleteAccessConfigCall { +func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { c.urlParams_.Set("requestId", requestId) return c } @@ -51891,7 +65881,7 @@ func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *Instances // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { +func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -51899,21 +65889,21 @@ func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { +func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDeleteAccessConfigCall) Header() http.Header { +func (c *InstancesStartCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -51921,7 +65911,7 @@ func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -51933,14 +65923,14 @@ func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.deleteAccessConfig" call. +// Do executes the "compute.instances.start" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51971,36 +65961,22 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes an access config from an instance's network interface.", + // "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", // "httpMethod": "POST", - // "id": "compute.instances.deleteAccessConfig", + // "id": "compute.instances.start", // "parameterOrder": [ // "project", // "zone", - // "instance", - // "accessConfig", - // "networkInterface" + // "instance" // ], // "parameters": { - // "accessConfig": { - // "description": "The name of the access config to delete.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "instance": { - // "description": "The instance name for this request.", + // "description": "Name of the instance resource to start.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -52021,7 +65997,7 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + // "path": "{project}/zones/{zone}/instances/{instance}/start", // "response": { // "$ref": "Operation" // }, @@ -52033,26 +66009,28 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instances.detachDisk": +// method id "compute.instances.startWithEncryptionKey": -type InstancesDetachDiskCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesStartWithEncryptionKeyCall struct { + s *Service + project string + zone string + instance string + instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DetachDisk: Detaches a disk from an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/detachDisk -func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { - c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// StartWithEncryptionKey: Starts an instance that was stopped using the +// using the instances().stop method. For more information, see Restart +// an instance. +func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { + c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.urlParams_.Set("deviceName", deviceName) + c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest return c } @@ -52070,7 +66048,7 @@ func (r *InstancesService) DetachDisk(project string, zone string, instance stri // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDetachDiskCall) RequestId(requestId string) *InstancesDetachDiskCall { +func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *InstancesStartWithEncryptionKeyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -52078,7 +66056,7 @@ func (c *InstancesDetachDiskCall) RequestId(requestId string) *InstancesDetachDi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall { +func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -52086,29 +66064,34 @@ func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachDiskCall { +func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDetachDiskCall) Header() http.Header { +func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -52120,14 +66103,14 @@ func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.detachDisk" call. +// Do executes the "compute.instances.startWithEncryptionKey" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52158,24 +66141,17 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Detaches a disk from an instance.", + // "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", // "httpMethod": "POST", - // "id": "compute.instances.detachDisk", + // "id": "compute.instances.startWithEncryptionKey", // "parameterOrder": [ // "project", // "zone", - // "instance", - // "deviceName" + // "instance" // ], // "parameters": { - // "deviceName": { - // "description": "Disk device name to detach.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "instance": { - // "description": "Instance name.", + // "description": "Name of the instance resource to start.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -52201,7 +66177,10 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", + // "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", + // "request": { + // "$ref": "InstancesStartWithEncryptionKeyRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -52213,79 +66192,97 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.instances.get": +// method id "compute.instances.stop": -type InstancesGetCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesStopCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Instance resource. Get a list of available -// instances by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/get -func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { - c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Stop: Stops a running instance, shutting it down cleanly, and allows +// you to restart the instance at a later time. Stopped instances do not +// incur per-minute, virtual machine usage charges while they are +// stopped, but any resources that the virtual machine is using, such as +// persistent disks and static IP addresses, will continue to be charged +// until they are deleted. For more information, see Stopping an +// instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop +func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { + c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance return c } +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If +// true, discard the contents of any attached localSSD partitions. +// Default value is false (== preserve localSSD data). +func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStopCall { + c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { +func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { +func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetCall) Header() http.Header { +func (c *InstancesStopCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -52295,14 +66292,14 @@ func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.get" call. -// Exactly one of *Instance or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Instance.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.instances.stop" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { +func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52321,7 +66318,7 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Instance{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -52333,17 +66330,22 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { } return ret, nil // { - // "description": "Returns the specified Instance resource. Get a list of available instances by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instances.get", + // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.stop", // "parameterOrder": [ // "project", // "zone", // "instance" // ], // "parameters": { + // "discardLocalSsd": { + // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false (== preserve localSSD data).", + // "location": "query", + // "type": "boolean" + // }, // "instance": { - // "description": "Name of the instance resource to return.", + // "description": "Name of the instance resource to stop.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -52356,6 +66358,11 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -52364,97 +66371,107 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}", + // "path": "{project}/zones/{zone}/instances/{instance}/stop", // "response": { - // "$ref": "Instance" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.getGuestAttributes": +// method id "compute.instances.suspend": -type InstancesGetGuestAttributesCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSuspendCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetGuestAttributes: Returns the specified guest attributes entry. -func (r *InstancesService) GetGuestAttributes(project string, zone string, instance string) *InstancesGetGuestAttributesCall { - c := &InstancesGetGuestAttributesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Suspend: This method suspends a running instance, saving its state to +// persistent storage, and allows you to resume the instance at a later +// time. Suspended instances incur reduced per-minute, virtual machine +// usage charges while they are suspended. Any resources the virtual +// machine is using, such as persistent disks and static IP addresses, +// will continue to be charged until they are deleted. +func (r *InstancesService) Suspend(project string, zone string, instance string) *InstancesSuspendCall { + c := &InstancesSuspendCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance return c } -// VariableKey sets the optional parameter "variableKey": Specifies the -// key for the guest attributes entry. -func (c *InstancesGetGuestAttributesCall) VariableKey(variableKey string) *InstancesGetGuestAttributesCall { - c.urlParams_.Set("variableKey", variableKey) +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If +// true, discard the contents of any attached localSSD partitions. +// Default value is false (== preserve localSSD data). +func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall { + c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSuspendCall) RequestId(requestId string) *InstancesSuspendCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetGuestAttributesCall) Fields(s ...googleapi.Field) *InstancesGetGuestAttributesCall { +func (c *InstancesSuspendCall) Fields(s ...googleapi.Field) *InstancesSuspendCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetGuestAttributesCall) IfNoneMatch(entityTag string) *InstancesGetGuestAttributesCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetGuestAttributesCall) Context(ctx context.Context) *InstancesGetGuestAttributesCall { +func (c *InstancesSuspendCall) Context(ctx context.Context) *InstancesSuspendCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetGuestAttributesCall) Header() http.Header { +func (c *InstancesSuspendCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/getGuestAttributes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/suspend") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -52464,14 +66481,14 @@ func (c *InstancesGetGuestAttributesCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getGuestAttributes" call. -// Exactly one of *GuestAttributes or error will be non-nil. Any non-2xx +// Do executes the "compute.instances.suspend" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *GuestAttributes.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*GuestAttributes, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52490,7 +66507,7 @@ func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*Gue if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &GuestAttributes{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -52502,17 +66519,22 @@ func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*Gue } return ret, nil // { - // "description": "Returns the specified guest attributes entry.", - // "httpMethod": "GET", - // "id": "compute.instances.getGuestAttributes", + // "description": "This method suspends a running instance, saving its state to persistent storage, and allows you to resume the instance at a later time. Suspended instances incur reduced per-minute, virtual machine usage charges while they are suspended. Any resources the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted.", + // "httpMethod": "POST", + // "id": "compute.instances.suspend", // "parameterOrder": [ // "project", // "zone", // "instance" // ], // "parameters": { + // "discardLocalSsd": { + // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false (== preserve localSSD data).", + // "location": "query", + // "type": "boolean" + // }, // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "Name of the instance resource to suspend.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -52525,8 +66547,8 @@ func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*Gue // "required": true, // "type": "string" // }, - // "variableKey": { - // "description": "Specifies the key for the guest attributes entry.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, @@ -52538,91 +66560,83 @@ func (c *InstancesGetGuestAttributesCall) Do(opts ...googleapi.CallOption) (*Gue // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/getGuestAttributes", + // "path": "{project}/zones/{zone}/instances/{instance}/suspend", // "response": { - // "$ref": "GuestAttributes" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.getIamPolicy": +// method id "compute.instances.testIamPermissions": -type InstancesGetIamPolicyCall struct { - s *Service - project string - zone string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *InstancesService) GetIamPolicy(project string, zone string, resource string) *InstancesGetIamPolicyCall { - c := &InstancesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstancesTestIamPermissionsCall { + c := &InstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesGetIamPolicyCall { +func (c *InstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstancesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetIamPolicyCall) IfNoneMatch(entityTag string) *InstancesGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetIamPolicyCall) Context(ctx context.Context) *InstancesGetIamPolicyCall { +func (c *InstancesTestIamPermissionsCall) Context(ctx context.Context) *InstancesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetIamPolicyCall) Header() http.Header { +func (c *InstancesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -52632,14 +66646,14 @@ func (c *InstancesGetIamPolicyCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.instances.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52658,7 +66672,7 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -52670,9 +66684,9 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.instances.getIamPolicy", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.instances.testIamPermissions", // "parameterOrder": [ // "project", // "zone", @@ -52701,9 +66715,12 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/getIamPolicy", + // "path": "{project}/zones/{zone}/instances/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Policy" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -52714,97 +66731,93 @@ func (c *InstancesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } -// method id "compute.instances.getSerialPortOutput": +// method id "compute.instances.updateAccessConfig": -type InstancesGetSerialPortOutputCall struct { +type InstancesUpdateAccessConfigCall struct { s *Service project string zone string instance string + accessconfig *AccessConfig urlParams_ gensupport.URLParams - ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetSerialPortOutput: Returns the specified instance's serial port -// output. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/getSerialPortOutput -func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { - c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateAccessConfig: Updates the specified access config from an +// instance's network interface with the data included in the request. +// This method supports PATCH semantics and uses the JSON merge patch +// format and processing rules. +func (r *InstancesService) UpdateAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesUpdateAccessConfigCall { + c := &InstancesUpdateAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.accessconfig = accessconfig return c } -// Port sets the optional parameter "port": Specifies which COM or -// serial port to retrieve data from. -func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialPortOutputCall { - c.urlParams_.Set("port", fmt.Sprint(port)) - return c -} - -// Start sets the optional parameter "start": Returns output starting -// from a specific byte position. Use this to page through output when -// the output is too large to return in a single request. For the -// initial request, leave this field unspecified. For subsequent calls, -// this field should be set to the next value returned in the previous -// call. -func (c *InstancesGetSerialPortOutputCall) Start(start int64) *InstancesGetSerialPortOutputCall { - c.urlParams_.Set("start", fmt.Sprint(start)) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesUpdateAccessConfigCall) RequestId(requestId string) *InstancesUpdateAccessConfigCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall { +func (c *InstancesUpdateAccessConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateAccessConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *InstancesGetSerialPortOutputCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *InstancesGetSerialPortOutputCall { +func (c *InstancesUpdateAccessConfigCall) Context(ctx context.Context) *InstancesUpdateAccessConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetSerialPortOutputCall) Header() http.Header { +func (c *InstancesUpdateAccessConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateAccessConfig") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -52814,14 +66827,14 @@ func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getSerialPortOutput" call. -// Exactly one of *SerialPortOutput or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *SerialPortOutput.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*SerialPortOutput, error) { +// Do executes the "compute.instances.updateAccessConfig" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52840,7 +66853,7 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SerialPortOutput{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -52852,30 +66865,28 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se } return ret, nil // { - // "description": "Returns the specified instance's serial port output.", - // "httpMethod": "GET", - // "id": "compute.instances.getSerialPortOutput", + // "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "POST", + // "id": "compute.instances.updateAccessConfig", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "instance", + // "networkInterface" // ], // "parameters": { // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "The instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "port": { - // "default": "1", - // "description": "Specifies which COM or serial port to retrieve data from.", - // "format": "int32", + // "networkInterface": { + // "description": "The name of the network interface where the access config is attached.", // "location": "query", - // "maximum": "4", - // "minimum": "1", - // "type": "integer" + // "required": true, + // "type": "string" // }, // "project": { // "description": "Project ID for this request.", @@ -52884,9 +66895,8 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se // "required": true, // "type": "string" // }, - // "start": { - // "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", - // "format": "int64", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, @@ -52898,39 +66908,43 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/serialPort", + // "path": "{project}/zones/{zone}/instances/{instance}/updateAccessConfig", + // "request": { + // "$ref": "AccessConfig" + // }, // "response": { - // "$ref": "SerialPortOutput" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.insert": +// method id "compute.instances.updateNetworkInterface": -type InstancesInsertCall struct { - s *Service - project string - zone string - instance *Instance - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesUpdateNetworkInterfaceCall struct { + s *Service + project string + zone string + instance string + networkinterface *NetworkInterface + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance resource in the specified project using -// the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/insert -func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { - c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdateNetworkInterface: Updates an instance's network interface. This +// method follows PATCH semantics. +func (r *InstancesService) UpdateNetworkInterface(project string, zone string, instance string, networkInterface string, networkinterface *NetworkInterface) *InstancesUpdateNetworkInterfaceCall { + c := &InstancesUpdateNetworkInterfaceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.networkinterface = networkinterface return c } @@ -52948,31 +66962,15 @@ func (r *InstancesService) Insert(project string, zone string, instance *Instanc // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesInsertCall) RequestId(requestId string) *InstancesInsertCall { +func (c *InstancesUpdateNetworkInterfaceCall) RequestId(requestId string) *InstancesUpdateNetworkInterfaceCall { c.urlParams_.Set("requestId", requestId) return c } -// SourceInstanceTemplate sets the optional parameter -// "sourceInstanceTemplate": Specifies instance template to create the -// instance. -// -// This field is optional. It can be a full or partial URL. For example, -// the following are all valid URLs to an instance template: -// - -// https://www.googleapis.com/compute/v1/projects/project/global/global/instanceTemplates/instanceTemplate -// - projects/project/global/global/instanceTemplates/instanceTemplate -// -// - global/instancesTemplates/instanceTemplate -func (c *InstancesInsertCall) SourceInstanceTemplate(sourceInstanceTemplate string) *InstancesInsertCall { - c.urlParams_.Set("sourceInstanceTemplate", sourceInstanceTemplate) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { +func (c *InstancesUpdateNetworkInterfaceCall) Fields(s ...googleapi.Field) *InstancesUpdateNetworkInterfaceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -52980,52 +66978,53 @@ func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { +func (c *InstancesUpdateNetworkInterfaceCall) Context(ctx context.Context) *InstancesUpdateNetworkInterfaceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesInsertCall) Header() http.Header { +func (c *InstancesUpdateNetworkInterfaceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkinterface) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.insert" call. +// Do executes the "compute.instances.updateNetworkInterface" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53056,14 +67055,29 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates an instance resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.insert", + // "description": "Updates an instance's network interface. This method follows PATCH semantics.", + // "httpMethod": "PATCH", + // "id": "compute.instances.updateNetworkInterface", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "instance", + // "networkInterface" // ], // "parameters": { + // "instance": { + // "description": "The instance name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface to update.", + // "location": "query", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -53076,11 +67090,6 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "location": "query", // "type": "string" // }, - // "sourceInstanceTemplate": { - // "description": "Specifies instance template to create the instance.\n\nThis field is optional. It can be a full or partial URL. For example, the following are all valid URLs to an instance template: \n- https://www.googleapis.com/compute/v1/projects/project/global/global/instanceTemplates/instanceTemplate \n- projects/project/global/global/instanceTemplates/instanceTemplate \n- global/instancesTemplates/instanceTemplate", - // "location": "query", - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -53089,9 +67098,9 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances", + // "path": "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface", // "request": { - // "$ref": "Instance" + // "$ref": "NetworkInterface" // }, // "response": { // "$ref": "Operation" @@ -53104,25 +67113,22 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.instances.list": +// method id "compute.interconnectAttachments.aggregatedList": -type InstancesListCall struct { +type InterconnectAttachmentsAggregatedListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of instances contained within the specified -// zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/list -func (r *InstancesService) List(project string, zone string) *InstancesListCall { - c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of interconnect +// attachments. +func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { + c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -53152,7 +67158,7 @@ func (r *InstancesService) List(project string, zone string) *InstancesListCall // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *InstancesListCall) Filter(filter string) *InstancesListCall { +func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -53163,7 +67169,7 @@ func (c *InstancesListCall) Filter(filter string) *InstancesListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { +func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -53180,7 +67186,7 @@ func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstancesListCall) OrderBy(orderBy string) *InstancesListCall { +func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -53188,7 +67194,7 @@ func (c *InstancesListCall) OrderBy(orderBy string) *InstancesListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { +func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -53196,7 +67202,7 @@ func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { +func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -53206,7 +67212,7 @@ func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { +func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -53214,21 +67220,21 @@ func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { +func (c *InterconnectAttachmentsAggregatedListCall) Context(ctx context.Context) *InterconnectAttachmentsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesListCall) Header() http.Header { +func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -53239,25 +67245,25 @@ func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.list" call. -// Exactly one of *InstanceList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *InstanceList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, error) { +// Do executes the "compute.interconnectAttachments.aggregatedList" call. +// Exactly one of *InterconnectAttachmentAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InterconnectAttachmentAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53276,7 +67282,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceList{ + ret := &InterconnectAttachmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -53288,12 +67294,11 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err } return ret, nil // { - // "description": "Retrieves the list of instances contained within the specified zone.", + // "description": "Retrieves an aggregated list of interconnect attachments.", // "httpMethod": "GET", - // "id": "compute.instances.list", + // "id": "compute.interconnectAttachments.aggregatedList", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { @@ -53325,18 +67330,11 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances", + // "path": "{project}/aggregated/interconnectAttachments", // "response": { - // "$ref": "InstanceList" + // "$ref": "InterconnectAttachmentAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -53350,7 +67348,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) error) error { +func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -53368,100 +67366,203 @@ func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) err } } -// method id "compute.instances.listReferrers": +// method id "compute.interconnectAttachments.delete": -type InstancesListReferrersCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsDeleteCall struct { + s *Service + project string + region string + interconnectAttachment string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListReferrers: Retrieves the list of referrers to instances contained -// within the specified zone. -func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { - c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified interconnect attachment. +func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { + c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.region = region + c.interconnectAttachment = interconnectAttachment return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferrersCall { - c.urlParams_.Set("filter", filter) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *InterconnectAttachmentsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectAttachmentsDeleteCall) Context(ctx context.Context) *InterconnectAttachmentsDeleteCall { + c.ctx_ = ctx return c } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstancesListReferrersCall) MaxResults(maxResults int64) *InstancesListReferrersCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectAttachments.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified interconnect attachment.", + // "httpMethod": "DELETE", + // "id": "compute.interconnectAttachments.delete", + // "parameterOrder": [ + // "project", + // "region", + // "interconnectAttachment" + // ], + // "parameters": { + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstancesListReferrersCall) OrderBy(orderBy string) *InstancesListReferrersCall { - c.urlParams_.Set("orderBy", orderBy) - return c +// method id "compute.interconnectAttachments.get": + +type InterconnectAttachmentsGetCall struct { + s *Service + project string + region string + interconnectAttachment string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListReferrersCall { - c.urlParams_.Set("pageToken", pageToken) +// Get: Returns the specified interconnect attachment. +func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { + c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.interconnectAttachment = interconnectAttachment return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesListReferrersCall) Fields(s ...googleapi.Field) *InstancesListReferrersCall { +func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -53471,7 +67572,7 @@ func (c *InstancesListReferrersCall) Fields(s ...googleapi.Field) *InstancesList // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesListReferrersCall) IfNoneMatch(entityTag string) *InstancesListReferrersCall { +func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -53479,21 +67580,21 @@ func (c *InstancesListReferrersCall) IfNoneMatch(entityTag string) *InstancesLis // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesListReferrersCall) Context(ctx context.Context) *InstancesListReferrersCall { +func (c *InterconnectAttachmentsGetCall) Context(ctx context.Context) *InterconnectAttachmentsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesListReferrersCall) Header() http.Header { +func (c *InterconnectAttachmentsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -53504,26 +67605,26 @@ func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, erro } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/referrers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.listReferrers" call. -// Exactly one of *InstanceListReferrers or error will be non-nil. Any +// Do executes the "compute.interconnectAttachments.get" call. +// Exactly one of *InterconnectAttachment or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *InstanceListReferrers.ServerResponse.Header or (if a response was +// *InterconnectAttachment.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*InstanceListReferrers, error) { +func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53542,7 +67643,7 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceListReferrers{ + ret := &InterconnectAttachment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -53554,45 +67655,22 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance } return ret, nil // { - // "description": "Retrieves the list of referrers to instances contained within the specified zone.", + // "description": "Returns the specified interconnect attachment.", // "httpMethod": "GET", - // "id": "compute.instances.listReferrers", + // "id": "compute.interconnectAttachments.get", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "interconnectAttachment" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "instance": { - // "description": "Name of the target instance scoping this request, or '-' if the request should span over all instances in the container.", + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to return.", // "location": "path", - // "pattern": "-|[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -53600,17 +67678,17 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/referrers", + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", // "response": { - // "$ref": "InstanceListReferrers" + // "$ref": "InterconnectAttachment" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -53621,122 +67699,95 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstancesListReferrersCall) Pages(ctx context.Context, f func(*InstanceListReferrers) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instances.reset": +// method id "compute.interconnectAttachments.getIamPolicy": -type InstancesResetCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsGetIamPolicyCall struct { + s *Service + project string + region string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Reset: Performs a reset on the instance. For more information, see -// Resetting an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/reset -func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { - c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *InterconnectAttachmentsService) GetIamPolicy(project string, region string, resource string) *InterconnectAttachmentsGetIamPolicyCall { + c := &InterconnectAttachmentsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesResetCall) RequestId(requestId string) *InstancesResetCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { +func (c *InterconnectAttachmentsGetIamPolicyCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectAttachmentsGetIamPolicyCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { +func (c *InterconnectAttachmentsGetIamPolicyCall) Context(ctx context.Context) *InterconnectAttachmentsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesResetCall) Header() http.Header { +func (c *InterconnectAttachmentsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.reset" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectAttachments.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InterconnectAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53755,7 +67806,7 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -53767,22 +67818,15 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Performs a reset on the instance. For more information, see Resetting an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.reset", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.interconnectAttachments.getIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -53790,53 +67834,53 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/reset", + // "path": "{project}/regions/{region}/interconnectAttachments/{resource}/getIamPolicy", // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setDiskAutoDelete": +// method id "compute.interconnectAttachments.insert": -type InstancesSetDiskAutoDeleteCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsInsertCall struct { + s *Service + project string + region string + interconnectattachment *InterconnectAttachment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to -// an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setDiskAutoDelete -func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { - c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an InterconnectAttachment in the specified project +// using the data included in the request. +func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { + c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("autoDelete", fmt.Sprint(autoDelete)) - c.urlParams_.Set("deviceName", deviceName) + c.region = region + c.interconnectattachment = interconnectattachment return c } @@ -53854,7 +67898,7 @@ func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instan // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetDiskAutoDeleteCall) RequestId(requestId string) *InstancesSetDiskAutoDeleteCall { +func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *InterconnectAttachmentsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -53862,7 +67906,7 @@ func (c *InstancesSetDiskAutoDeleteCall) RequestId(requestId string) *InstancesS // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall { +func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -53870,48 +67914,52 @@ func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *Instances // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *InstancesSetDiskAutoDeleteCall { +func (c *InterconnectAttachmentsInsertCall) Context(ctx context.Context) *InterconnectAttachmentsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { +func (c *InterconnectAttachmentsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setDiskAutoDelete" call. +// Do executes the "compute.interconnectAttachments.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53942,41 +67990,25 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Sets the auto-delete flag for a disk attached to an instance.", + // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instances.setDiskAutoDelete", + // "id": "compute.interconnectAttachments.insert", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "autoDelete", - // "deviceName" + // "region" // ], // "parameters": { - // "autoDelete": { - // "description": "Whether to auto-delete the disk when the instance is deleted.", - // "location": "query", - // "required": true, - // "type": "boolean" - // }, - // "deviceName": { - // "description": "The device name of the disk to modify.", - // "location": "query", - // "pattern": "\\w[\\w.-]{0,254}", - // "required": true, - // "type": "string" - // }, - // "instance": { - // "description": "The instance name.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -53984,16 +68016,12 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", + // "path": "{project}/regions/{region}/interconnectAttachments", + // "request": { + // "$ref": "InterconnectAttachment" + // }, // "response": { // "$ref": "Operation" // }, @@ -54005,88 +68033,159 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instances.setIamPolicy": +// method id "compute.interconnectAttachments.list": -type InstancesSetIamPolicyCall struct { - s *Service - project string - zone string - resource string - policy *Policy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *InstancesService) SetIamPolicy(project string, zone string, resource string, policy *Policy) *InstancesSetIamPolicyCall { - c := &InstancesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of interconnect attachments contained within +// the specified region. +func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { + c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.policy = policy + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *InterconnectAttachmentsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetIamPolicyCall) Fields(s ...googleapi.Field) *InstancesSetIamPolicyCall { +func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetIamPolicyCall) Context(ctx context.Context) *InstancesSetIamPolicyCall { +func (c *InterconnectAttachmentsListCall) Context(ctx context.Context) *InterconnectAttachmentsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetIamPolicyCall) Header() http.Header { +func (c *InterconnectAttachmentsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.interconnectAttachments.list" call. +// Exactly one of *InterconnectAttachmentList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectAttachmentList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54105,7 +68204,7 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &InterconnectAttachmentList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54117,15 +68216,37 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "httpMethod": "POST", - // "id": "compute.instances.setIamPolicy", + // "description": "Retrieves the list of interconnect attachments contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.interconnectAttachments.list", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "region" // ], // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -54133,83 +68254,76 @@ func (c *InstancesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/setIamPolicy", - // "request": { - // "$ref": "Policy" - // }, + // "path": "{project}/regions/{region}/interconnectAttachments", // "response": { - // "$ref": "Policy" + // "$ref": "InterconnectAttachmentList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setLabels": - -type InstancesSetLabelsCall struct { - s *Service - project string - zone string - instance string - instancessetlabelsrequest *InstancesSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// SetLabels: Sets labels on an instance. To learn more about labels, -// read the Labeling Resources documentation. -func (r *InstancesService) SetLabels(project string, zone string, instance string, instancessetlabelsrequest *InstancesSetLabelsRequest) *InstancesSetLabelsCall { - c := &InstancesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.instancessetlabelsrequest = instancessetlabelsrequest - return c +// method id "compute.interconnectAttachments.setIamPolicy": + +type InterconnectAttachmentsSetIamPolicyCall struct { + s *Service + project string + region string + resource string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetLabelsCall) RequestId(requestId string) *InstancesSetLabelsCall { - c.urlParams_.Set("requestId", requestId) +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *InterconnectAttachmentsService) SetIamPolicy(project string, region string, resource string, policy *Policy) *InterconnectAttachmentsSetIamPolicyCall { + c := &InterconnectAttachmentsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.policy = policy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabelsCall { +func (c *InterconnectAttachmentsSetIamPolicyCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -54217,53 +68331,53 @@ func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetLabelsCall) Context(ctx context.Context) *InstancesSetLabelsCall { +func (c *InterconnectAttachmentsSetIamPolicyCall) Context(ctx context.Context) *InterconnectAttachmentsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetLabelsCall) Header() http.Header { +func (c *InterconnectAttachmentsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectAttachments.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InterconnectAttachmentsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54282,7 +68396,7 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54294,22 +68408,15 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", - // "id": "compute.instances.setLabels", + // "id": "compute.interconnectAttachments.setIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -54317,25 +68424,27 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setLabels", + // "path": "{project}/regions/{region}/interconnectAttachments/{resource}/setIamPolicy", // "request": { - // "$ref": "InstancesSetLabelsRequest" + // "$ref": "Policy" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -54345,53 +68454,34 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.instances.setMachineResources": +// method id "compute.interconnectAttachments.testIamPermissions": -type InstancesSetMachineResourcesCall struct { - s *Service - project string - zone string - instance string - instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetMachineResources: Changes the number and/or type of accelerator -// for a stopped instance to the values specified in the request. -func (r *InstancesService) SetMachineResources(project string, zone string, instance string, instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest) *InstancesSetMachineResourcesCall { - c := &InstancesSetMachineResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InterconnectAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectAttachmentsTestIamPermissionsCall { + c := &InterconnectAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancessetmachineresourcesrequest = instancessetmachineresourcesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *InstancesSetMachineResourcesCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *InstancesSetMachineResourcesCall { +func (c *InterconnectAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -54399,53 +68489,53 @@ func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *Instanc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMachineResourcesCall) Context(ctx context.Context) *InstancesSetMachineResourcesCall { +func (c *InterconnectAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectAttachmentsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMachineResourcesCall) Header() http.Header { +func (c *InterconnectAttachmentsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachineresourcesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineResources") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMachineResources" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectAttachments.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54464,7 +68554,7 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54476,22 +68566,15 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.instances.setMachineResources", + // "id": "compute.interconnectAttachments.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -54499,135 +68582,124 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMachineResources", + // "path": "{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", // "request": { - // "$ref": "InstancesSetMachineResourcesRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setMachineType": +// method id "compute.interconnectLocations.get": -type InstancesSetMachineTypeCall struct { - s *Service - project string - zone string - instance string - instancessetmachinetyperequest *InstancesSetMachineTypeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectLocationsGetCall struct { + s *Service + project string + interconnectLocation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetMachineType: Changes the machine type for a stopped instance to -// the machine type specified in the request. -func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { - c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the details for the specified interconnect location. Get +// a list of available interconnect locations by making a list() +// request. +func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { + c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancessetmachinetyperequest = instancessetmachinetyperequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMachineTypeCall) RequestId(requestId string) *InstancesSetMachineTypeCall { - c.urlParams_.Set("requestId", requestId) + c.interconnectLocation = interconnectLocation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSetMachineTypeCall { +func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectLocationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectLocationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMachineTypeCall) Context(ctx context.Context) *InstancesSetMachineTypeCall { +func (c *InterconnectLocationsGetCall) Context(ctx context.Context) *InterconnectLocationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMachineTypeCall) Header() http.Header { +func (c *InterconnectLocationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachinetyperequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{interconnectLocation}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "interconnectLocation": c.interconnectLocation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMachineType" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectLocations.get" call. +// Exactly one of *InterconnectLocation or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectLocation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectLocation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54646,7 +68718,7 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InterconnectLocation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54658,17 +68730,16 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.setMachineType", + // "description": "Returns the details for the specified interconnect location. Get a list of available interconnect locations by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.interconnectLocations.get", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "interconnectLocation" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "interconnectLocation": { + // "description": "Name of the interconnect location to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -54680,137 +68751,171 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", - // "request": { - // "$ref": "InstancesSetMachineTypeRequest" - // }, + // "path": "{project}/global/interconnectLocations/{interconnectLocation}", // "response": { - // "$ref": "Operation" + // "$ref": "InterconnectLocation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setMetadata": +// method id "compute.interconnectLocations.list": -type InstancesSetMetadataCall struct { - s *Service - project string - zone string - instance string - metadata *Metadata - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectLocationsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of interconnect locations available to the +// specified project. +func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { + c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { + c.urlParams_.Set("filter", filter) + return c } -// SetMetadata: Sets metadata for the specified instance to the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setMetadata -func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { - c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.metadata = metadata +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *InterconnectLocationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMetadataCall) RequestId(requestId string) *InstancesSetMetadataCall { - c.urlParams_.Set("requestId", requestId) +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLocationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InterconnectLocationsListCall) PageToken(pageToken string) *InterconnectLocationsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall { +func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *InterconnectLocationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *InterconnectLocationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMetadataCall { +func (c *InterconnectLocationsListCall) Context(ctx context.Context) *InterconnectLocationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMetadataCall) Header() http.Header { +func (c *InterconnectLocationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMetadata" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectLocations.list" call. +// Exactly one of *InterconnectLocationList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectLocationList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectLocationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54829,7 +68934,7 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InterconnectLocationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54841,106 +68946,104 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Sets metadata for the specified instance to the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.setMetadata", + // "description": "Retrieves the list of interconnect locations available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.interconnectLocations.list", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", - // "request": { - // "$ref": "Metadata" - // }, + // "path": "{project}/global/interconnectLocations", // "response": { - // "$ref": "Operation" + // "$ref": "InterconnectLocationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setMinCpuPlatform": - -type InstancesSetMinCpuPlatformCall struct { - s *Service - project string - zone string - instance string - instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*InterconnectLocationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// SetMinCpuPlatform: Changes the minimum CPU platform that this -// instance should use. This method can only be called on a stopped -// instance. For more information, read Specifying a Minimum CPU -// Platform. -func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instance string, instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest) *InstancesSetMinCpuPlatformCall { - c := &InstancesSetMinCpuPlatformCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.instancessetmincpuplatformrequest = instancessetmincpuplatformrequest - return c +// method id "compute.interconnectLocations.testIamPermissions": + +type InterconnectLocationsTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMinCpuPlatformCall) RequestId(requestId string) *InstancesSetMinCpuPlatformCall { - c.urlParams_.Set("requestId", requestId) +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InterconnectLocationsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectLocationsTestIamPermissionsCall { + c := &InterconnectLocationsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *InstancesSetMinCpuPlatformCall { +func (c *InterconnectLocationsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectLocationsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -54948,53 +69051,52 @@ func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *Instances // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMinCpuPlatformCall) Context(ctx context.Context) *InstancesSetMinCpuPlatformCall { +func (c *InterconnectLocationsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectLocationsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { +func (c *InterconnectLocationsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectLocationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmincpuplatformrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMinCpuPlatform" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectLocations.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectLocationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55013,7 +69115,7 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55025,22 +69127,14 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.instances.setMinCpuPlatform", + // "id": "compute.interconnectLocations.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -55048,55 +69142,46 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", + // "path": "{project}/global/interconnectLocations/{resource}/testIamPermissions", // "request": { - // "$ref": "InstancesSetMinCpuPlatformRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setScheduling": +// method id "compute.interconnects.delete": -type InstancesSetSchedulingCall struct { - s *Service - project string - zone string - instance string - scheduling *Scheduling - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectsDeleteCall struct { + s *Service + project string + interconnect string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetScheduling: Sets an instance's scheduling options. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling -func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { - c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified interconnect. +func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { + c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.scheduling = scheduling + c.interconnect = interconnect return c } @@ -55114,7 +69199,7 @@ func (r *InstancesService) SetScheduling(project string, zone string, instance s // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSchedulingCall { +func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -55122,7 +69207,7 @@ func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSc // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { +func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -55130,53 +69215,47 @@ func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetS // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { +func (c *InterconnectsDeleteCall) Context(ctx context.Context) *InterconnectsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetSchedulingCall) Header() http.Header { +func (c *InterconnectsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setScheduling" call. +// Do executes the "compute.interconnects.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55207,17 +69286,16 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Sets an instance's scheduling options.", - // "httpMethod": "POST", - // "id": "compute.instances.setScheduling", + // "description": "Deletes the specified interconnect.", + // "httpMethod": "DELETE", + // "id": "compute.interconnects.delete", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "interconnect" // ], // "parameters": { - // "instance": { - // "description": "Instance name.", + // "interconnect": { + // "description": "Name of the interconnect to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -55234,19 +69312,9 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", - // "request": { - // "$ref": "Scheduling" - // }, + // "path": "{project}/global/interconnects/{interconnect}", // "response": { // "$ref": "Operation" // }, @@ -55258,108 +69326,92 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.instances.setServiceAccount": +// method id "compute.interconnects.get": -type InstancesSetServiceAccountCall struct { - s *Service - project string - zone string - instance string - instancessetserviceaccountrequest *InstancesSetServiceAccountRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectsGetCall struct { + s *Service + project string + interconnect string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetServiceAccount: Sets the service account on the instance. For more -// information, read Changing the service account and access scopes for -// an instance. -func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { - c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified interconnect. Get a list of available +// interconnects by making a list() request. +func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { + c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancessetserviceaccountrequest = instancessetserviceaccountrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { - c.urlParams_.Set("requestId", requestId) + c.interconnect = interconnect return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { +func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { +func (c *InterconnectsGetCall) Context(ctx context.Context) *InterconnectsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetServiceAccountCall) Header() http.Header { +func (c *InterconnectsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setServiceAccount") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setServiceAccount" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.interconnects.get" call. +// Exactly one of *Interconnect or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *Interconnect.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55378,7 +69430,7 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Interconnect{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55390,17 +69442,16 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.setServiceAccount", + // "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.interconnects.get", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "interconnect" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", + // "interconnect": { + // "description": "Name of the interconnect to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -55412,137 +69463,107 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setServiceAccount", - // "request": { - // "$ref": "InstancesSetServiceAccountRequest" - // }, + // "path": "{project}/global/interconnects/{interconnect}", // "response": { - // "$ref": "Operation" + // "$ref": "Interconnect" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setTags": +// method id "compute.interconnects.getIamPolicy": -type InstancesSetTagsCall struct { - s *Service - project string - zone string - instance string - tags *Tags - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectsGetIamPolicyCall struct { + s *Service + project string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetTags: Sets tags for the specified instance to the data included in -// the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setTags -func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { - c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *InterconnectsService) GetIamPolicy(project string, resource string) *InterconnectsGetIamPolicyCall { + c := &InterconnectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.tags = tags - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { +func (c *InterconnectsGetIamPolicyCall) Fields(s ...googleapi.Field) *InterconnectsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectsGetIamPolicyCall) IfNoneMatch(entityTag string) *InterconnectsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { +func (c *InterconnectsGetIamPolicyCall) Context(ctx context.Context) *InterconnectsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetTagsCall) Header() http.Header { +func (c *InterconnectsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setTags" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnects.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InterconnectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55561,7 +69582,7 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55573,22 +69594,14 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Sets tags for the specified instance to the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.setTags", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.interconnects.getIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -55596,60 +69609,70 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setTags", - // "request": { - // "$ref": "Tags" - // }, + // "path": "{project}/global/interconnects/{resource}/getIamPolicy", // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.simulateMaintenanceEvent": +// method id "compute.interconnects.insert": -type InstancesSimulateMaintenanceEventCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectsInsertCall struct { + s *Service + project string + interconnect *Interconnect + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SimulateMaintenanceEvent: Simulates a maintenance event on the -// instance. -func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { - c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a Interconnect in the specified project using the +// data included in the request. +func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { + c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.interconnect = interconnect + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { +func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -55657,48 +69680,51 @@ func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *In // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { +func (c *InterconnectsInsertCall) Context(ctx context.Context) *InterconnectsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { +func (c *InterconnectsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.simulateMaintenanceEvent" call. +// Do executes the "compute.interconnects.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55729,22 +69755,13 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Simulates a maintenance event on the instance.", + // "description": "Creates a Interconnect in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instances.simulateMaintenanceEvent", + // "id": "compute.interconnects.insert", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -55752,15 +69769,16 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", + // "path": "{project}/global/interconnects", + // "request": { + // "$ref": "Interconnect" + // }, // "response": { // "$ref": "Operation" // }, @@ -55772,102 +69790,156 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) } -// method id "compute.instances.start": +// method id "compute.interconnects.list": -type InstancesStartCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Start: Starts an instance that was stopped using the using the -// instances().stop method. For more information, see Restart an -// instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/start -func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { - c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of interconnect available to the specified +// project. +func (r *InterconnectsService) List(project string) *InterconnectsListCall { + c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { - c.urlParams_.Set("requestId", requestId) +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { +func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { +func (c *InterconnectsListCall) Context(ctx context.Context) *InterconnectsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStartCall) Header() http.Header { +func (c *InterconnectsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.start" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnects.list" call. +// Exactly one of *InterconnectList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55886,7 +69958,7 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InterconnectList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55898,76 +69970,98 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.start", + // "description": "Retrieves the list of interconnect available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.interconnects.list", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/start", + // "path": "{project}/global/interconnects", // "response": { - // "$ref": "Operation" + // "$ref": "InterconnectList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.startWithEncryptionKey": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesStartWithEncryptionKeyCall struct { - s *Service - project string - zone string - instance string - instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.interconnects.patch": + +type InterconnectsPatchCall struct { + s *Service + project string + interconnect string + interconnect2 *Interconnect + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// StartWithEncryptionKey: Starts an instance that was stopped using the -// using the instances().stop method. For more information, see Restart -// an instance. -func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { - c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified interconnect with the data included in +// the request. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. +func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { + c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest + c.interconnect = interconnect + c.interconnect2 = interconnect2 return c } @@ -55985,7 +70079,7 @@ func (r *InstancesService) StartWithEncryptionKey(project string, zone string, i // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *InstancesStartWithEncryptionKeyCall { +func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -55993,7 +70087,7 @@ func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *Insta // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { +func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -56001,53 +70095,52 @@ func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *Inst // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { +func (c *InterconnectsPatchCall) Context(ctx context.Context) *InterconnectsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { +func (c *InterconnectsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.startWithEncryptionKey" call. +// Do executes the "compute.interconnects.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56078,17 +70171,16 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.startWithEncryptionKey", + // "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.interconnects.patch", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "interconnect" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", + // "interconnect": { + // "description": "Name of the interconnect to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -56105,18 +70197,11 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", + // "path": "{project}/global/interconnects/{interconnect}", // "request": { - // "$ref": "InstancesStartWithEncryptionKeyRequest" + // "$ref": "Interconnect" // }, // "response": { // "$ref": "Operation" @@ -56129,65 +70214,32 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( } -// method id "compute.instances.stop": +// method id "compute.interconnects.setIamPolicy": -type InstancesStopCall struct { +type InterconnectsSetIamPolicyCall struct { s *Service project string - zone string - instance string + resource string + policy *Policy urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Stop: Stops a running instance, shutting it down cleanly, and allows -// you to restart the instance at a later time. Stopped instances do not -// incur per-minute, virtual machine usage charges while they are -// stopped, but any resources that the virtual machine is using, such as -// persistent disks and static IP addresses, will continue to be charged -// until they are deleted. For more information, see Stopping an -// instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop -func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { - c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *InterconnectsService) SetIamPolicy(project string, resource string, policy *Policy) *InterconnectsSetIamPolicyCall { + c := &InterconnectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - return c -} - -// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If -// true, discard the contents of any attached localSSD partitions. -// Default value is false (== preserve localSSD data). -func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStopCall { - c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.policy = policy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { +func (c *InterconnectsSetIamPolicyCall) Fields(s ...googleapi.Field) *InterconnectsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -56195,48 +70247,52 @@ func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { +func (c *InterconnectsSetIamPolicyCall) Context(ctx context.Context) *InterconnectsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStopCall) Header() http.Header { +func (c *InterconnectsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.stop" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnects.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *InterconnectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56255,7 +70311,7 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56267,27 +70323,14 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", // "httpMethod": "POST", - // "id": "compute.instances.stop", + // "id": "compute.interconnects.setIamPolicy", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "discardLocalSsd": { - // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false (== preserve localSSD data).", - // "location": "query", - // "type": "boolean" - // }, - // "instance": { - // "description": "Name of the instance resource to stop.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -56295,22 +70338,20 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/stop", + // "path": "{project}/global/interconnects/{resource}/setIamPolicy", + // "request": { + // "$ref": "Policy" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -56320,63 +70361,32 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.instances.suspend": +// method id "compute.interconnects.testIamPermissions": -type InstancesSuspendCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectsTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Suspend: This method suspends a running instance, saving its state to -// persistent storage, and allows you to resume the instance at a later -// time. Suspended instances incur reduced per-minute, virtual machine -// usage charges while they are suspended. Any resources the virtual -// machine is using, such as persistent disks and static IP addresses, -// will continue to be charged until they are deleted. -func (r *InstancesService) Suspend(project string, zone string, instance string) *InstancesSuspendCall { - c := &InstancesSuspendCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InterconnectsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectsTestIamPermissionsCall { + c := &InterconnectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - return c -} - -// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If -// true, discard the contents of any attached localSSD partitions. -// Default value is false (== preserve localSSD data). -func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall { - c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSuspendCall) RequestId(requestId string) *InstancesSuspendCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSuspendCall) Fields(s ...googleapi.Field) *InstancesSuspendCall { +func (c *InterconnectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -56384,48 +70394,52 @@ func (c *InstancesSuspendCall) Fields(s ...googleapi.Field) *InstancesSuspendCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSuspendCall) Context(ctx context.Context) *InstancesSuspendCall { +func (c *InterconnectsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSuspendCall) Header() http.Header { +func (c *InterconnectsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSuspendCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/suspend") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.suspend" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnects.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56444,7 +70458,7 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56456,27 +70470,14 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "This method suspends a running instance, saving its state to persistent storage, and allows you to resume the instance at a later time. Suspended instances incur reduced per-minute, virtual machine usage charges while they are suspended. Any resources the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.instances.suspend", + // "id": "compute.interconnects.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "discardLocalSsd": { - // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false (== preserve localSSD data).", - // "location": "query", - // "type": "boolean" - // }, - // "instance": { - // "description": "Name of the instance resource to suspend.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -56484,113 +70485,116 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/suspend", + // "path": "{project}/global/interconnects/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.testIamPermissions": +// method id "compute.licenseCodes.get": -type InstancesTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LicenseCodesGetCall struct { + s *Service + project string + licenseCode string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstancesTestIamPermissionsCall { - c := &InstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Return a specified license code. License codes are mirrored +// across all projects that have permissions to read the License Code. +func (r *LicenseCodesService) Get(project string, licenseCode string) *LicenseCodesGetCall { + c := &LicenseCodesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.licenseCode = licenseCode return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstancesTestIamPermissionsCall { +func (c *LicenseCodesGetCall) Fields(s ...googleapi.Field) *LicenseCodesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LicenseCodesGetCall) IfNoneMatch(entityTag string) *LicenseCodesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesTestIamPermissionsCall) Context(ctx context.Context) *InstancesTestIamPermissionsCall { +func (c *LicenseCodesGetCall) Context(ctx context.Context) *LicenseCodesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesTestIamPermissionsCall) Header() http.Header { +func (c *LicenseCodesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *LicenseCodesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenseCodes/{licenseCode}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, + "licenseCode": c.licenseCode, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.licenseCodes.get" call. +// Exactly one of *LicenseCode or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LicenseCode.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *LicenseCodesGetCall) Do(opts ...googleapi.CallOption) (*LicenseCode, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56609,7 +70613,7 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &LicenseCode{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56621,43 +70625,32 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.instances.testIamPermissions", + // "description": "Return a specified license code. License codes are mirrored across all projects that have permissions to read the License Code.", + // "httpMethod": "GET", + // "id": "compute.licenseCodes.get", // "parameterOrder": [ // "project", - // "zone", - // "resource" + // "licenseCode" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "licenseCode": { + // "description": "Number corresponding to the License code resource to return.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[0-9]{0,61}?", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/global/licenseCodes/{licenseCode}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "LicenseCode" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -56668,30 +70661,22 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } -// method id "compute.instances.updateAccessConfig": +// method id "compute.licenses.delete": -type InstancesUpdateAccessConfigCall struct { - s *Service - project string - zone string - instance string - accessconfig *AccessConfig - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LicensesDeleteCall struct { + s *Service + project string + license string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdateAccessConfig: Updates the specified access config from an -// instance's network interface with the data included in the request. -// This method supports PATCH semantics and uses the JSON merge patch -// format and processing rules. -func (r *InstancesService) UpdateAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesUpdateAccessConfigCall { - c := &InstancesUpdateAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified license. +func (r *LicensesService) Delete(project string, license string) *LicensesDeleteCall { + c := &LicensesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.accessconfig = accessconfig + c.license = license return c } @@ -56709,7 +70694,7 @@ func (r *InstancesService) UpdateAccessConfig(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateAccessConfigCall) RequestId(requestId string) *InstancesUpdateAccessConfigCall { +func (c *LicensesDeleteCall) RequestId(requestId string) *LicensesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -56717,7 +70702,7 @@ func (c *InstancesUpdateAccessConfigCall) RequestId(requestId string) *Instances // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateAccessConfigCall) Fields(s ...googleapi.Field) *InstancesUpdateAccessConfigCall { +func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -56725,53 +70710,47 @@ func (c *InstancesUpdateAccessConfigCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateAccessConfigCall) Context(ctx context.Context) *InstancesUpdateAccessConfigCall { +func (c *LicensesDeleteCall) Context(ctx context.Context) *LicensesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateAccessConfigCall) Header() http.Header { +func (c *LicensesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateAccessConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateAccessConfig" call. +// Do executes the "compute.licenses.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56802,29 +70781,21 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates the specified access config from an instance's network interface with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "POST", - // "id": "compute.instances.updateAccessConfig", + // "description": "Deletes the specified license.", + // "httpMethod": "DELETE", + // "id": "compute.licenses.delete", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "networkInterface" + // "license" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", + // "license": { + // "description": "Name of the license resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface where the access config is attached.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -56836,19 +70807,9 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/updateAccessConfig", - // "request": { - // "$ref": "AccessConfig" - // }, + // "path": "{project}/global/licenses/{license}", // "response": { // "$ref": "Operation" // }, @@ -56860,108 +70821,92 @@ func (c *InstancesUpdateAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instances.updateNetworkInterface": - -type InstancesUpdateNetworkInterfaceCall struct { - s *Service - project string - zone string - instance string - networkinterface *NetworkInterface - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// UpdateNetworkInterface: Updates an instance's network interface. This -// method follows PATCH semantics. -func (r *InstancesService) UpdateNetworkInterface(project string, zone string, instance string, networkInterface string, networkinterface *NetworkInterface) *InstancesUpdateNetworkInterfaceCall { - c := &InstancesUpdateNetworkInterfaceCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.networkinterface = networkinterface - return c -} +// method id "compute.licenses.get": -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesUpdateNetworkInterfaceCall) RequestId(requestId string) *InstancesUpdateNetworkInterfaceCall { - c.urlParams_.Set("requestId", requestId) +type LicensesGetCall struct { + s *Service + project string + license string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified License resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/licenses/get +func (r *LicensesService) Get(project string, license string) *LicensesGetCall { + c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.license = license return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesUpdateNetworkInterfaceCall) Fields(s ...googleapi.Field) *InstancesUpdateNetworkInterfaceCall { +func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesUpdateNetworkInterfaceCall) Context(ctx context.Context) *InstancesUpdateNetworkInterfaceCall { +func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesUpdateNetworkInterfaceCall) Header() http.Header { +func (c *LicensesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesUpdateNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkinterface) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.updateNetworkInterface" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.licenses.get" call. +// Exactly one of *License or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *License.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56980,7 +70925,7 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &License{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56992,154 +70937,67 @@ func (c *InstancesUpdateNetworkInterfaceCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Updates an instance's network interface. This method follows PATCH semantics.", - // "httpMethod": "PATCH", - // "id": "compute.instances.updateNetworkInterface", + // "description": "Returns the specified License resource.", + // "httpMethod": "GET", + // "id": "compute.licenses.get", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "networkInterface" + // "license" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", + // "license": { + // "description": "Name of the License resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface to update.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/updateNetworkInterface", - // "request": { - // "$ref": "NetworkInterface" - // }, + // "path": "{project}/global/licenses/{license}", // "response": { - // "$ref": "Operation" + // "$ref": "License" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.interconnectAttachments.aggregatedList": +// method id "compute.licenses.getIamPolicy": -type InterconnectAttachmentsAggregatedListCall struct { +type LicensesGetIamPolicyCall struct { s *Service project string + resource string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of interconnect -// attachments. -func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { - c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *LicensesService) GetIamPolicy(project string, resource string) *LicensesGetIamPolicyCall { + c := &LicensesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) *InterconnectAttachmentsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *InterconnectAttachmentsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) *InterconnectAttachmentsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsAggregatedListCall { +func (c *LicensesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57149,7 +71007,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsAggregatedListCall { +func (c *LicensesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicensesGetIamPolicyCall { c.ifNoneMatch_ = entityTag return c } @@ -57157,21 +71015,21 @@ func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsAggregatedListCall) Context(ctx context.Context) *InterconnectAttachmentsAggregatedListCall { +func (c *LicensesGetIamPolicyCall) Context(ctx context.Context) *LicensesGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { +func (c *LicensesGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -57182,25 +71040,25 @@ func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/interconnectAttachments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.aggregatedList" call. -// Exactly one of *InterconnectAttachmentAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *InterconnectAttachmentAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentAggregatedList, error) { +// Do executes the "compute.licenses.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57219,7 +71077,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectAttachmentAggregatedList{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -57231,47 +71089,32 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Retrieves an aggregated list of interconnect attachments.", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.aggregatedList", + // "id": "compute.licenses.getIamPolicy", // "parameterOrder": [ - // "project" + // "project", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/interconnectAttachments", + // "path": "{project}/global/licenses/{resource}/getIamPolicy", // "response": { - // "$ref": "InterconnectAttachmentAggregatedList" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -57282,45 +71125,22 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.interconnectAttachments.delete": +// method id "compute.licenses.insert": -type InterconnectAttachmentsDeleteCall struct { - s *Service - project string - region string - interconnectAttachment string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LicensesInsertCall struct { + s *Service + project string + license *License + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified interconnect attachment. -func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { - c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Create a License resource in the specified project. +func (r *LicensesService) Insert(project string, license *License) *LicensesInsertCall { + c := &LicensesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectAttachment = interconnectAttachment + c.license = license return c } @@ -57338,7 +71158,7 @@ func (r *InterconnectAttachmentsService) Delete(project string, region string, i // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *InterconnectAttachmentsDeleteCall { +func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -57346,7 +71166,7 @@ func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *Interco // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsDeleteCall { +func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57354,48 +71174,51 @@ func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *Interc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsDeleteCall) Context(ctx context.Context) *InterconnectAttachmentsDeleteCall { +func (c *LicensesInsertCall) Context(ctx context.Context) *LicensesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { +func (c *LicensesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.license) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "interconnectAttachment": c.interconnectAttachment, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.delete" call. +// Do executes the "compute.licenses.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57426,22 +71249,13 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Deletes the specified interconnect attachment.", - // "httpMethod": "DELETE", - // "id": "compute.interconnectAttachments.delete", + // "description": "Create a License resource in the specified project.", + // "httpMethod": "POST", + // "id": "compute.licenses.insert", // "parameterOrder": [ - // "project", - // "region", - // "interconnectAttachment" + // "project" // ], // "parameters": { - // "interconnectAttachment": { - // "description": "Name of the interconnect attachment to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -57449,57 +71263,124 @@ func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*O // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "path": "{project}/global/licenses", + // "request": { + // "$ref": "License" + // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "compute.interconnectAttachments.get": +// method id "compute.licenses.list": -type InterconnectAttachmentsGetCall struct { - s *Service - project string - region string - interconnectAttachment string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type LicensesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified interconnect attachment. -func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { - c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of licenses available in the specified +// project. This method does not get any licenses that belong to other +// projects, including licenses attached to publicly-available images, +// like Debian 8. If you want to get a list of publicly-available +// licenses, use this method to make a request to the respective image +// project, such as debian-cloud or windows-cloud. +func (r *LicensesService) List(project string) *LicensesListCall { + c := &LicensesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectAttachment = interconnectAttachment + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *LicensesListCall) Filter(filter string) *LicensesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetCall { +func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57509,7 +71390,7 @@ func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *Interconn // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetCall { +func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { c.ifNoneMatch_ = entityTag return c } @@ -57517,21 +71398,21 @@ func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *Intercon // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsGetCall) Context(ctx context.Context) *InterconnectAttachmentsGetCall { +func (c *LicensesListCall) Context(ctx context.Context) *LicensesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsGetCall) Header() http.Header { +func (c *LicensesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -57542,26 +71423,24 @@ func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "interconnectAttachment": c.interconnectAttachment, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.get" call. -// Exactly one of *InterconnectAttachment or error will be non-nil. Any +// Do executes the "compute.licenses.list" call. +// Exactly one of *LicensesListResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *InterconnectAttachment.ServerResponse.Header or (if a response was +// *LicensesListResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachment, error) { +func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57580,7 +71459,7 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectAttachment{ + ret := &LicensesListResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -57592,20 +71471,34 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte } return ret, nil // { - // "description": "Returns the specified interconnect attachment.", + // "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 8. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.get", + // "id": "compute.licenses.list", // "parameterOrder": [ - // "project", - // "region", - // "interconnectAttachment" + // "project" // ], // "parameters": { - // "interconnectAttachment": { - // "description": "Name of the interconnect attachment to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -57614,18 +71507,11 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "path": "{project}/global/licenses", // "response": { - // "$ref": "InterconnectAttachment" + // "$ref": "LicensesListResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -57636,95 +71522,106 @@ func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*Inte } -// method id "compute.interconnectAttachments.getIamPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InterconnectAttachmentsGetIamPolicyCall struct { - s *Service - project string - region string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.licenses.setIamPolicy": + +type LicensesSetIamPolicyCall struct { + s *Service + project string + resource string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *InterconnectAttachmentsService) GetIamPolicy(project string, region string, resource string) *InterconnectAttachmentsGetIamPolicyCall { - c := &InterconnectAttachmentsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *LicensesService) SetIamPolicy(project string, resource string, policy *Policy) *LicensesSetIamPolicyCall { + c := &LicensesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region c.resource = resource + c.policy = policy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsGetIamPolicyCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetIamPolicyCall { +func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsGetIamPolicyCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsGetIamPolicyCall) Context(ctx context.Context) *InterconnectAttachmentsGetIamPolicyCall { +func (c *LicensesSetIamPolicyCall) Context(ctx context.Context) *LicensesSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsGetIamPolicyCall) Header() http.Header { +func (c *LicensesSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.getIamPolicy" call. +// Do executes the "compute.licenses.setIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *InterconnectAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57755,12 +71652,11 @@ func (c *InterconnectAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.getIamPolicy", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.licenses.setIamPolicy", // "parameterOrder": [ // "project", - // "region", // "resource" // ], // "parameters": { @@ -57771,79 +71667,55 @@ func (c *InterconnectAttachmentsGetIamPolicyCall) Do(opts ...googleapi.CallOptio // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{resource}/getIamPolicy", + // "path": "{project}/global/licenses/{resource}/setIamPolicy", + // "request": { + // "$ref": "Policy" + // }, // "response": { // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.interconnectAttachments.insert": +// method id "compute.licenses.testIamPermissions": -type InterconnectAttachmentsInsertCall struct { +type LicensesTestIamPermissionsCall struct { s *Service project string - region string - interconnectattachment *InterconnectAttachment + resource string + testpermissionsrequest *TestPermissionsRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates an InterconnectAttachment in the specified project -// using the data included in the request. -func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { - c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *LicensesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicensesTestIamPermissionsCall { + c := &LicensesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.interconnectattachment = interconnectattachment - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *InterconnectAttachmentsInsertCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsInsertCall { +func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57851,52 +71723,52 @@ func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *Interc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsInsertCall) Context(ctx context.Context) *InterconnectAttachmentsInsertCall { +func (c *LicensesTestIamPermissionsCall) Context(ctx context.Context) *LicensesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsInsertCall) Header() http.Header { +func (c *LicensesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.licenses.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57915,7 +71787,7 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -57927,12 +71799,12 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.interconnectAttachments.insert", + // "id": "compute.licenses.testIamPermissions", // "parameterOrder": [ // "project", - // "region" + // "resource" // ], // "parameters": { // "project": { @@ -57942,52 +71814,46 @@ func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*O // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments", + // "path": "{project}/global/licenses/{resource}/testIamPermissions", // "request": { - // "$ref": "InterconnectAttachment" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.interconnectAttachments.list": +// method id "compute.machineTypes.aggregatedList": -type InterconnectAttachmentsListCall struct { +type MachineTypesAggregatedListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of interconnect attachments contained within -// the specified region. -func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { - c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of machine types. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/aggregatedList +func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { + c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -58017,7 +71883,7 @@ func (r *InterconnectAttachmentsService) List(project string, region string) *In // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { +func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -58028,7 +71894,7 @@ func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAtt // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *InterconnectAttachmentsListCall { +func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -58045,7 +71911,7 @@ func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *Intercon // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectAttachmentsListCall { +func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -58053,7 +71919,7 @@ func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectA // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *InterconnectAttachmentsListCall { +func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -58061,7 +71927,7 @@ func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *Interconn // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsListCall { +func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58071,7 +71937,7 @@ func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *Intercon // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsListCall { +func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -58079,21 +71945,21 @@ func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *Interco // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsListCall) Context(ctx context.Context) *InterconnectAttachmentsListCall { +func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsListCall) Header() http.Header { +func (c *MachineTypesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -58104,25 +71970,24 @@ func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.list" call. -// Exactly one of *InterconnectAttachmentList or error will be non-nil. +// Do executes the "compute.machineTypes.aggregatedList" call. +// Exactly one of *MachineTypeAggregatedList or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either -// *InterconnectAttachmentList.ServerResponse.Header or (if a response +// *MachineTypeAggregatedList.ServerResponse.Header or (if a response // was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentList, error) { +func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58141,7 +72006,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectAttachmentList{ + ret := &MachineTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -58153,12 +72018,11 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int } return ret, nil // { - // "description": "Retrieves the list of interconnect attachments contained within the specified region.", + // "description": "Retrieves an aggregated list of machine types.", // "httpMethod": "GET", - // "id": "compute.interconnectAttachments.list", + // "id": "compute.machineTypes.aggregatedList", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { @@ -58190,18 +72054,11 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments", + // "path": "{project}/aggregated/machineTypes", // "response": { - // "$ref": "InterconnectAttachmentList" + // "$ref": "MachineTypeAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -58215,7 +72072,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentList) error) error { +func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -58233,88 +72090,96 @@ func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*Int } } -// method id "compute.interconnectAttachments.testIamPermissions": +// method id "compute.machineTypes.get": -type InterconnectAttachmentsTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type MachineTypesGetCall struct { + s *Service + project string + zone string + machineType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InterconnectAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectAttachmentsTestIamPermissionsCall { - c := &InterconnectAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified machine type. Get a list of available +// machine types by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/get +func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { + c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.machineType = machineType return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsTestIamPermissionsCall { +func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectAttachmentsTestIamPermissionsCall { +func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectAttachmentsTestIamPermissionsCall) Header() http.Header { +func (c *MachineTypesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "machineType": c.machineType, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectAttachments.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.machineTypes.get" call. +// Exactly one of *MachineType or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *MachineType.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58333,7 +72198,7 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.Cal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &MachineType{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -58345,43 +72210,40 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.interconnectAttachments.testIamPermissions", + // "description": "Returns the specified machine type. Get a list of available machine types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.machineTypes.get", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "zone", + // "machineType" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "machineType": { + // "description": "Name of the machine type to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/zones/{zone}/machineTypes/{machineType}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "MachineType" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -58392,32 +72254,99 @@ func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.Cal } -// method id "compute.interconnectLocations.get": +// method id "compute.machineTypes.list": -type InterconnectLocationsGetCall struct { - s *Service - project string - interconnectLocation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type MachineTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the details for the specified interconnect location. Get -// a list of available interconnect locations by making a list() -// request. -func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { - c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of machine types available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/list +func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { + c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnectLocation = interconnectLocation + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectLocationsGetCall { +func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58427,7 +72356,7 @@ func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *Interconnec // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectLocationsGetCall { +func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { c.ifNoneMatch_ = entityTag return c } @@ -58435,21 +72364,21 @@ func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *Interconne // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectLocationsGetCall) Context(ctx context.Context) *InterconnectLocationsGetCall { +func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectLocationsGetCall) Header() http.Header { +func (c *MachineTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -58460,25 +72389,25 @@ func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{interconnectLocation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnectLocation": c.interconnectLocation, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectLocations.get" call. -// Exactly one of *InterconnectLocation or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InterconnectLocation.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.machineTypes.list" call. +// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *MachineTypeList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectLocation, error) { +func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58497,7 +72426,7 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectLocation{ + ret := &MachineTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -58509,19 +72438,35 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc } return ret, nil // { - // "description": "Returns the details for the specified interconnect location. Get a list of available interconnect locations by making a list() request.", + // "description": "Retrieves a list of machine types available to the specified project.", // "httpMethod": "GET", - // "id": "compute.interconnectLocations.get", + // "id": "compute.machineTypes.list", // "parameterOrder": [ // "project", - // "interconnectLocation" + // "zone" // ], // "parameters": { - // "interconnectLocation": { - // "description": "Name of the interconnect location to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -58530,11 +72475,18 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/interconnectLocations/{interconnectLocation}", + // "path": "{project}/zones/{zone}/machineTypes", // "response": { - // "$ref": "InterconnectLocation" + // "$ref": "MachineTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -58545,9 +72497,30 @@ func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*Interc } -// method id "compute.interconnectLocations.list": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InterconnectLocationsListCall struct { +// method id "compute.networkEndpointGroups.aggregatedList": + +type NetworkEndpointGroupsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -58556,10 +72529,10 @@ type InterconnectLocationsListCall struct { header_ http.Header } -// List: Retrieves the list of interconnect locations available to the -// specified project. -func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { - c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of network endpoint groups and +// sorts them by zone. +func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEndpointGroupsAggregatedListCall { + c := &NetworkEndpointGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -58590,7 +72563,7 @@ func (r *InterconnectLocationsService) List(project string) *InterconnectLocatio // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) Filter(filter string) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -58601,7 +72574,7 @@ func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocat // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *InterconnectLocationsListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -58618,7 +72591,7 @@ func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *Interconne // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLocationsListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) OrderBy(orderBy string) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -58626,7 +72599,7 @@ func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLoc // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InterconnectLocationsListCall) PageToken(pageToken string) *InterconnectLocationsListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) PageToken(pageToken string) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -58634,7 +72607,7 @@ func (c *InterconnectLocationsListCall) PageToken(pageToken string) *Interconnec // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *InterconnectLocationsListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58644,7 +72617,7 @@ func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *Interconne // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *InterconnectLocationsListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -58652,21 +72625,21 @@ func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *Interconn // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectLocationsListCall) Context(ctx context.Context) *InterconnectLocationsListCall { +func (c *NetworkEndpointGroupsAggregatedListCall) Context(ctx context.Context) *NetworkEndpointGroupsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectLocationsListCall) Header() http.Header { +func (c *NetworkEndpointGroupsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -58677,7 +72650,7 @@ func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -58687,14 +72660,15 @@ func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectLocations.list" call. -// Exactly one of *InterconnectLocationList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InterconnectLocationList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectLocationList, error) { +// Do executes the "compute.networkEndpointGroups.aggregatedList" call. +// Exactly one of *NetworkEndpointGroupAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *NetworkEndpointGroupAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58713,7 +72687,7 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectLocationList{ + ret := &NetworkEndpointGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -58725,9 +72699,9 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter } return ret, nil // { - // "description": "Retrieves the list of interconnect locations available to the specified project.", + // "description": "Retrieves the list of network endpoint groups and sorts them by zone.", // "httpMethod": "GET", - // "id": "compute.interconnectLocations.list", + // "id": "compute.networkEndpointGroups.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -58763,9 +72737,9 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter // "type": "string" // } // }, - // "path": "{project}/global/interconnectLocations", + // "path": "{project}/aggregated/networkEndpointGroups", // "response": { - // "$ref": "InterconnectLocationList" + // "$ref": "NetworkEndpointGroupAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -58779,7 +72753,7 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*InterconnectLocationList) error) error { +func (c *NetworkEndpointGroupsAggregatedListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -58797,32 +72771,53 @@ func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*Inter } } -// method id "compute.interconnectLocations.testIamPermissions": +// method id "compute.networkEndpointGroups.attachNetworkEndpoints": -type InterconnectLocationsTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsAttachNetworkEndpointsCall struct { + s *Service + project string + zone string + networkEndpointGroup string + networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InterconnectLocationsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectLocationsTestIamPermissionsCall { - c := &InterconnectLocationsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AttachNetworkEndpoints: Attach a list of network endpoints to the +// specified network endpoint group. +func (r *NetworkEndpointGroupsService) AttachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsattachendpointsrequest *NetworkEndpointGroupsAttachEndpointsRequest) *NetworkEndpointGroupsAttachNetworkEndpointsCall { + c := &NetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup + c.networkendpointgroupsattachendpointsrequest = networkendpointgroupsattachendpointsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsAttachNetworkEndpointsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectLocationsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectLocationsTestIamPermissionsCall { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsAttachNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58830,52 +72825,53 @@ func (c *InterconnectLocationsTestIamPermissionsCall) Fields(s ...googleapi.Fiel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectLocationsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectLocationsTestIamPermissionsCall { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsAttachNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectLocationsTestIamPermissionsCall) Header() http.Header { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectLocationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsattachendpointsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnectLocations.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InterconnectLocationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.networkEndpointGroups.attachNetworkEndpoints" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58894,7 +72890,7 @@ func (c *InterconnectLocationsTestIamPermissionsCall) Do(opts ...googleapi.CallO if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -58906,14 +72902,21 @@ func (c *InterconnectLocationsTestIamPermissionsCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Attach a list of network endpoints to the specified network endpoint group.", // "httpMethod": "POST", - // "id": "compute.interconnectLocations.testIamPermissions", + // "id": "compute.networkEndpointGroups.attachNetworkEndpoints", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "networkEndpointGroup" // ], // "parameters": { + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -58921,46 +72924,54 @@ func (c *InterconnectLocationsTestIamPermissionsCall) Do(opts ...googleapi.CallO // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/interconnectLocations/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "NetworkEndpointGroupsAttachEndpointsRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.interconnects.delete": +// method id "compute.networkEndpointGroups.delete": -type InterconnectsDeleteCall struct { - s *Service - project string - interconnect string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsDeleteCall struct { + s *Service + project string + zone string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified interconnect. -func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { - c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified network endpoint group. The network +// endpoints in the NEG and the VM instances they belong to are not +// terminated when the NEG is deleted. Note that the NEG cannot be +// deleted if there are backend services referencing it. +func (r *NetworkEndpointGroupsService) Delete(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsDeleteCall { + c := &NetworkEndpointGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup return c } @@ -58978,7 +72989,7 @@ func (r *InterconnectsService) Delete(project string, interconnect string) *Inte // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDeleteCall { +func (c *NetworkEndpointGroupsDeleteCall) RequestId(requestId string) *NetworkEndpointGroupsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -58986,7 +72997,7 @@ func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDele // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDeleteCall { +func (c *NetworkEndpointGroupsDeleteCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58994,21 +73005,21 @@ func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsDeleteCall) Context(ctx context.Context) *InterconnectsDeleteCall { +func (c *NetworkEndpointGroupsDeleteCall) Context(ctx context.Context) *NetworkEndpointGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsDeleteCall) Header() http.Header { +func (c *NetworkEndpointGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -59016,25 +73027,26 @@ func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.delete" call. +// Do executes the "compute.networkEndpointGroups.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59065,18 +73077,18 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified interconnect.", + // "description": "Deletes the specified network endpoint group. The network endpoints in the NEG and the VM instances they belong to are not terminated when the NEG is deleted. Note that the NEG cannot be deleted if there are backend services referencing it.", // "httpMethod": "DELETE", - // "id": "compute.interconnects.delete", + // "id": "compute.networkEndpointGroups.delete", // "parameterOrder": [ // "project", - // "interconnect" + // "zone", + // "networkEndpointGroup" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect to delete.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -59091,9 +73103,15 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{interconnect}", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", // "response": { // "$ref": "Operation" // }, @@ -59105,31 +73123,213 @@ func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.interconnects.get": +// method id "compute.networkEndpointGroups.detachNetworkEndpoints": -type InterconnectsGetCall struct { - s *Service - project string - interconnect string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsDetachNetworkEndpointsCall struct { + s *Service + project string + zone string + networkEndpointGroup string + networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified interconnect. Get a list of available -// interconnects by making a list() request. -func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { - c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DetachNetworkEndpoints: Detach a list of network endpoints from the +// specified network endpoint group. +func (r *NetworkEndpointGroupsService) DetachNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupsdetachendpointsrequest *NetworkEndpointGroupsDetachEndpointsRequest) *NetworkEndpointGroupsDetachNetworkEndpointsCall { + c := &NetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup + c.networkendpointgroupsdetachendpointsrequest = networkendpointgroupsdetachendpointsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId string) *NetworkEndpointGroupsDetachNetworkEndpointsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCall { +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsDetachNetworkEndpointsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsDetachNetworkEndpointsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupsdetachendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkEndpointGroups.detachNetworkEndpoints" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Detach a list of network endpoints from the specified network endpoint group.", + // "httpMethod": "POST", + // "id": "compute.networkEndpointGroups.detachNetworkEndpoints", + // "parameterOrder": [ + // "project", + // "zone", + // "networkEndpointGroup" + // ], + // "parameters": { + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group where you are removing network endpoints. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + // "request": { + // "$ref": "NetworkEndpointGroupsDetachEndpointsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.networkEndpointGroups.get": + +type NetworkEndpointGroupsGetCall struct { + s *Service + project string + zone string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified network endpoint group. Get a list of +// available network endpoint groups by making a list() request. +func (r *NetworkEndpointGroupsService) Get(project string, zone string, networkEndpointGroup string) *NetworkEndpointGroupsGetCall { + c := &NetworkEndpointGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkEndpointGroupsGetCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59139,7 +73339,7 @@ func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCall { +func (c *NetworkEndpointGroupsGetCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -59147,21 +73347,21 @@ func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsGetCall) Context(ctx context.Context) *InterconnectsGetCall { +func (c *NetworkEndpointGroupsGetCall) Context(ctx context.Context) *NetworkEndpointGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsGetCall) Header() http.Header { +func (c *NetworkEndpointGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -59172,25 +73372,26 @@ func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.get" call. -// Exactly one of *Interconnect or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Interconnect.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, error) { +// Do executes the "compute.networkEndpointGroups.get" call. +// Exactly one of *NetworkEndpointGroup or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NetworkEndpointGroup.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59209,7 +73410,7 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Interconnect{ + ret := &NetworkEndpointGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -59221,18 +73422,18 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, } return ret, nil // { - // "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", + // "description": "Returns the specified network endpoint group. Get a list of available network endpoint groups by making a list() request.", // "httpMethod": "GET", - // "id": "compute.interconnects.get", + // "id": "compute.networkEndpointGroups.get", // "parameterOrder": [ // "project", - // "interconnect" + // "zone", + // "networkEndpointGroup" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect to return.", + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -59242,11 +73443,17 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{interconnect}", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", // "response": { - // "$ref": "Interconnect" + // "$ref": "NetworkEndpointGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -59257,23 +73464,25 @@ func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, } -// method id "compute.interconnects.insert": +// method id "compute.networkEndpointGroups.insert": -type InterconnectsInsertCall struct { - s *Service - project string - interconnect *Interconnect - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsInsertCall struct { + s *Service + project string + zone string + networkendpointgroup *NetworkEndpointGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a Interconnect in the specified project using the -// data included in the request. -func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { - c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a network endpoint group in the specified project +// using the parameters that are included in the request. +func (r *NetworkEndpointGroupsService) Insert(project string, zone string, networkendpointgroup *NetworkEndpointGroup) *NetworkEndpointGroupsInsertCall { + c := &NetworkEndpointGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect + c.zone = zone + c.networkendpointgroup = networkendpointgroup return c } @@ -59291,7 +73500,7 @@ func (r *InterconnectsService) Insert(project string, interconnect *Interconnect // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInsertCall { +func (c *NetworkEndpointGroupsInsertCall) RequestId(requestId string) *NetworkEndpointGroupsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -59299,7 +73508,7 @@ func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInse // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsInsertCall { +func (c *NetworkEndpointGroupsInsertCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59307,51 +73516,52 @@ func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsIns // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsInsertCall) Context(ctx context.Context) *InterconnectsInsertCall { +func (c *NetworkEndpointGroupsInsertCall) Context(ctx context.Context) *NetworkEndpointGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsInsertCall) Header() http.Header { +func (c *NetworkEndpointGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroup) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.insert" call. +// Do executes the "compute.networkEndpointGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworkEndpointGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59382,11 +73592,12 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a Interconnect in the specified project using the data included in the request.", + // "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", // "httpMethod": "POST", - // "id": "compute.interconnects.insert", + // "id": "compute.networkEndpointGroups.insert", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "project": { @@ -59400,11 +73611,17 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the network endpoint group. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/interconnects", + // "path": "{project}/zones/{zone}/networkEndpointGroups", // "request": { - // "$ref": "Interconnect" + // "$ref": "NetworkEndpointGroup" // }, // "response": { // "$ref": "Operation" @@ -59417,22 +73634,24 @@ func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.interconnects.list": +// method id "compute.networkEndpointGroups.list": -type InterconnectsListCall struct { +type NetworkEndpointGroupsListCall struct { s *Service project string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of interconnect available to the specified -// project. -func (r *InterconnectsService) List(project string) *InterconnectsListCall { - c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of network endpoint groups that are located +// in the specified project and zone. +func (r *NetworkEndpointGroupsService) List(project string, zone string) *NetworkEndpointGroupsListCall { + c := &NetworkEndpointGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone return c } @@ -59462,7 +73681,7 @@ func (r *InterconnectsService) List(project string) *InterconnectsListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { +func (c *NetworkEndpointGroupsListCall) Filter(filter string) *NetworkEndpointGroupsListCall { c.urlParams_.Set("filter", filter) return c } @@ -59473,7 +73692,7 @@ func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListCall { +func (c *NetworkEndpointGroupsListCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -59490,7 +73709,7 @@ func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListC // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { +func (c *NetworkEndpointGroupsListCall) OrderBy(orderBy string) *NetworkEndpointGroupsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -59498,7 +73717,7 @@ func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCall { +func (c *NetworkEndpointGroupsListCall) PageToken(pageToken string) *NetworkEndpointGroupsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -59506,7 +73725,7 @@ func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListCall { +func (c *NetworkEndpointGroupsListCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59516,7 +73735,7 @@ func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsListCall { +func (c *NetworkEndpointGroupsListCall) IfNoneMatch(entityTag string) *NetworkEndpointGroupsListCall { c.ifNoneMatch_ = entityTag return c } @@ -59524,21 +73743,21 @@ func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsList // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsListCall) Context(ctx context.Context) *InterconnectsListCall { +func (c *NetworkEndpointGroupsListCall) Context(ctx context.Context) *NetworkEndpointGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsListCall) Header() http.Header { +func (c *NetworkEndpointGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -59549,24 +73768,25 @@ func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.list" call. -// Exactly one of *InterconnectList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InterconnectList.ServerResponse.Header or (if a response was +// Do executes the "compute.networkEndpointGroups.list" call. +// Exactly one of *NetworkEndpointGroupList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *NetworkEndpointGroupList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectList, error) { +func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59585,7 +73805,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InterconnectList{ + ret := &NetworkEndpointGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -59597,11 +73817,12 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL } return ret, nil // { - // "description": "Retrieves the list of interconnect available to the specified project.", + // "description": "Retrieves the list of network endpoint groups that are located in the specified project and zone.", // "httpMethod": "GET", - // "id": "compute.interconnects.list", + // "id": "compute.networkEndpointGroups.list", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "filter": { @@ -59633,11 +73854,17 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/interconnects", + // "path": "{project}/zones/{zone}/networkEndpointGroups", // "response": { - // "$ref": "InterconnectList" + // "$ref": "NetworkEndpointGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -59651,7 +73878,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectList) error) error { +func (c *NetworkEndpointGroupsListCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -59669,52 +73896,101 @@ func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectL } } -// method id "compute.interconnects.patch": +// method id "compute.networkEndpointGroups.listNetworkEndpoints": -type InterconnectsPatchCall struct { - s *Service - project string - interconnect string - interconnect2 *Interconnect - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworkEndpointGroupsListNetworkEndpointsCall struct { + s *Service + project string + zone string + networkEndpointGroup string + networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified interconnect with the data included in -// the request. This method supports PATCH semantics and uses the JSON -// merge patch format and processing rules. -func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { - c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListNetworkEndpoints: List the network endpoints in the specified +// network endpoint group. +func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone string, networkEndpointGroup string, networkendpointgroupslistendpointsrequest *NetworkEndpointGroupsListEndpointsRequest) *NetworkEndpointGroupsListNetworkEndpointsCall { + c := &NetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.interconnect = interconnect - c.interconnect2 = interconnect2 + c.zone = zone + c.networkEndpointGroup = networkEndpointGroup + c.networkendpointgroupslistendpointsrequest = networkendpointgroupslistendpointsrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatchCall { - c.urlParams_.Set("requestId", requestId) +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *NetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults int64) *NetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) *NetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken string) *NetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatchCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59722,52 +73998,55 @@ func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsPatchCall) Context(ctx context.Context) *InterconnectsPatchCall { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Context(ctx context.Context) *NetworkEndpointGroupsListNetworkEndpointsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsPatchCall) Header() http.Header { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkendpointgroupslistendpointsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "interconnect": c.interconnect, + "project": c.project, + "zone": c.zone, + "networkEndpointGroup": c.networkEndpointGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networkEndpointGroups.listNetworkEndpoints" call. +// Exactly one of *NetworkEndpointGroupsListNetworkEndpoints or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *NetworkEndpointGroupsListNetworkEndpoints.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupsListNetworkEndpoints, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59786,7 +74065,7 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &NetworkEndpointGroupsListNetworkEndpoints{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -59798,21 +74077,44 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.interconnects.patch", + // "description": "List the network endpoints in the specified network endpoint group.", + // "httpMethod": "POST", + // "id": "compute.networkEndpointGroups.listNetworkEndpoints", // "parameterOrder": [ // "project", - // "interconnect" + // "zone", + // "networkEndpointGroup" // ], // "parameters": { - // "interconnect": { - // "description": "Name of the interconnect to update.", + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -59820,18 +74122,19 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{interconnect}", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", // "request": { - // "$ref": "Interconnect" + // "$ref": "NetworkEndpointGroupsListEndpointsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "NetworkEndpointGroupsListNetworkEndpoints" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -59842,11 +74145,33 @@ func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.interconnects.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupsListNetworkEndpoints) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InterconnectsTestIamPermissionsCall struct { +// method id "compute.networkEndpointGroups.testIamPermissions": + +type NetworkEndpointGroupsTestIamPermissionsCall struct { s *Service project string + zone string resource string testpermissionsrequest *TestPermissionsRequest urlParams_ gensupport.URLParams @@ -59856,9 +74181,10 @@ type InterconnectsTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *InterconnectsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectsTestIamPermissionsCall { - c := &InterconnectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *NetworkEndpointGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworkEndpointGroupsTestIamPermissionsCall { + c := &NetworkEndpointGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone c.resource = resource c.testpermissionsrequest = testpermissionsrequest return c @@ -59867,7 +74193,7 @@ func (r *InterconnectsService) TestIamPermissions(project string, resource strin // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InterconnectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectsTestIamPermissionsCall { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworkEndpointGroupsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59875,21 +74201,21 @@ func (c *InterconnectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *Inte // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InterconnectsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectsTestIamPermissionsCall { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Context(ctx context.Context) *NetworkEndpointGroupsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InterconnectsTestIamPermissionsCall) Header() http.Header { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InterconnectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -59902,25 +74228,26 @@ func (c *InterconnectsTestIamPermissionsCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.interconnects.testIamPermissions" call. +// Do executes the "compute.networkEndpointGroups.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *NetworkEndpointGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59953,9 +74280,10 @@ func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.interconnects.testIamPermissions", + // "id": "compute.networkEndpointGroups.testIamPermissions", // "parameterOrder": [ // "project", + // "zone", // "resource" // ], // "parameters": { @@ -59969,12 +74297,19 @@ func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/interconnects/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -59990,29 +74325,50 @@ func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) ( } -// method id "compute.licenses.delete": +// method id "compute.networks.addPeering": -type LicensesDeleteCall struct { - s *Service - project string - license string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksAddPeeringCall struct { + s *Service + project string + network string + networksaddpeeringrequest *NetworksAddPeeringRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified license. -func (r *LicensesService) Delete(project string, license string) *LicensesDeleteCall { - c := &LicensesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddPeering: Adds a peering to the specified network. +func (r *NetworksService) AddPeering(project string, network string, networksaddpeeringrequest *NetworksAddPeeringRequest) *NetworksAddPeeringCall { + c := &NetworksAddPeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license + c.network = network + c.networksaddpeeringrequest = networksaddpeeringrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksAddPeeringCall) RequestId(requestId string) *NetworksAddPeeringCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { +func (c *NetworksAddPeeringCall) Fields(s ...googleapi.Field) *NetworksAddPeeringCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60020,47 +74376,52 @@ func (c *LicensesDeleteCall) Fields(s ...googleapi.Field) *LicensesDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesDeleteCall) Context(ctx context.Context) *LicensesDeleteCall { +func (c *NetworksAddPeeringCall) Context(ctx context.Context) *NetworksAddPeeringCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesDeleteCall) Header() http.Header { +func (c *NetworksAddPeeringCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksaddpeeringrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/addPeering") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "license": c.license, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.delete" call. +// Do executes the "compute.networks.addPeering" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60091,16 +74452,16 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Deletes the specified license.", - // "httpMethod": "DELETE", - // "id": "compute.licenses.delete", + // "description": "Adds a peering to the specified network.", + // "httpMethod": "POST", + // "id": "compute.networks.addPeering", // "parameterOrder": [ // "project", - // "license" + // "network" // ], // "parameters": { - // "license": { - // "description": "Name of the license resource to delete.", + // "network": { + // "description": "Name of the network resource to add peering to.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -60112,9 +74473,17 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/licenses/{license}", + // "path": "{project}/global/networks/{network}/addPeering", + // "request": { + // "$ref": "NetworksAddPeeringRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -60126,92 +74495,97 @@ func (c *LicensesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.licenses.get": +// method id "compute.networks.delete": -type LicensesGetCall struct { - s *Service - project string - license string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworksDeleteCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified License resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/licenses/get -func (r *LicensesService) Get(project string, license string) *LicensesGetCall { - c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified network. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/delete +func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall { + c := &NetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license + c.network = network + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksDeleteCall) RequestId(requestId string) *NetworksDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { +func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { +func (c *NetworksDeleteCall) Context(ctx context.Context) *NetworksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesGetCall) Header() http.Header { +func (c *NetworksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "license": c.license, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.get" call. -// Exactly one of *License or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *License.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { +// Do executes the "compute.networks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60230,7 +74604,7 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &License{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -60242,16 +74616,16 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { } return ret, nil // { - // "description": "Returns the specified License resource.", - // "httpMethod": "GET", - // "id": "compute.licenses.get", + // "description": "Deletes the specified network.", + // "httpMethod": "DELETE", + // "id": "compute.networks.delete", // "parameterOrder": [ // "project", - // "license" + // "network" // ], // "parameters": { - // "license": { - // "description": "Name of the License resource to return.", + // "network": { + // "description": "Name of the network to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -60263,46 +74637,51 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/licenses/{license}", + // "path": "{project}/global/networks/{network}", // "response": { - // "$ref": "License" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.licenses.getIamPolicy": +// method id "compute.networks.get": -type LicensesGetIamPolicyCall struct { +type NetworksGetCall struct { s *Service project string - resource string + network string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *LicensesService) GetIamPolicy(project string, resource string) *LicensesGetIamPolicyCall { - c := &LicensesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified network. Get a list of available networks +// by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/get +func (r *NetworksService) Get(project string, network string) *NetworksGetCall { + c := &NetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource + c.network = network return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesGetIamPolicyCall { +func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60312,7 +74691,7 @@ func (c *LicensesGetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesGetIamP // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *LicensesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicensesGetIamPolicyCall { +func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -60320,21 +74699,21 @@ func (c *LicensesGetIamPolicyCall) IfNoneMatch(entityTag string) *LicensesGetIam // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesGetIamPolicyCall) Context(ctx context.Context) *LicensesGetIamPolicyCall { +func (c *NetworksGetCall) Context(ctx context.Context) *NetworksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesGetIamPolicyCall) Header() http.Header { +func (c *NetworksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -60345,25 +74724,25 @@ func (c *LicensesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// Do executes the "compute.networks.get" call. +// Exactly one of *Network or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) +// *Network.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60382,7 +74761,7 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Network{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -60394,32 +74773,32 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "description": "Returns the specified network. Get a list of available networks by making a list() request.", // "httpMethod": "GET", - // "id": "compute.licenses.getIamPolicy", + // "id": "compute.networks.get", // "parameterOrder": [ // "project", - // "resource" + // "network" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "network": { + // "description": "Name of the network to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/licenses/{resource}/getIamPolicy", + // "path": "{project}/global/networks/{network}", // "response": { - // "$ref": "Policy" + // "$ref": "Network" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -60430,22 +74809,24 @@ func (c *LicensesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } -// method id "compute.licenses.insert": +// method id "compute.networks.insert": -type LicensesInsertCall struct { +type NetworksInsertCall struct { s *Service project string - license *License + network *Network urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Create a License resource in the specified project. -func (r *LicensesService) Insert(project string, license *License) *LicensesInsertCall { - c := &LicensesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a network in the specified project using the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/insert +func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { + c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license + c.network = network return c } @@ -60463,7 +74844,7 @@ func (r *LicensesService) Insert(project string, license *License) *LicensesInse // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { +func (c *NetworksInsertCall) RequestId(requestId string) *NetworksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -60471,7 +74852,7 @@ func (c *LicensesInsertCall) RequestId(requestId string) *LicensesInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { +func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60479,34 +74860,34 @@ func (c *LicensesInsertCall) Fields(s ...googleapi.Field) *LicensesInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesInsertCall) Context(ctx context.Context) *LicensesInsertCall { +func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesInsertCall) Header() http.Header { +func (c *NetworksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.license) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -60516,14 +74897,14 @@ func (c *LicensesInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.insert" call. +// Do executes the "compute.networks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60554,9 +74935,9 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Create a License resource in the specified project.", + // "description": "Creates a network in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.licenses.insert", + // "id": "compute.networks.insert", // "parameterOrder": [ // "project" // ], @@ -60574,27 +74955,24 @@ func (c *LicensesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error // "type": "string" // } // }, - // "path": "{project}/global/licenses", + // "path": "{project}/global/networks", // "request": { - // "$ref": "License" + // "$ref": "Network" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.licenses.list": +// method id "compute.networks.list": -type LicensesListCall struct { +type NetworksListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -60603,14 +74981,11 @@ type LicensesListCall struct { header_ http.Header } -// List: Retrieves the list of licenses available in the specified -// project. This method does not get any licenses that belong to other -// projects, including licenses attached to publicly-available images, -// like Debian 8. If you want to get a list of publicly-available -// licenses, use this method to make a request to the respective image -// project, such as debian-cloud or windows-cloud. -func (r *LicensesService) List(project string) *LicensesListCall { - c := &LicensesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of networks available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/list +func (r *NetworksService) List(project string) *NetworksListCall { + c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -60641,7 +75016,7 @@ func (r *LicensesService) List(project string) *LicensesListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *LicensesListCall) Filter(filter string) *LicensesListCall { +func (c *NetworksListCall) Filter(filter string) *NetworksListCall { c.urlParams_.Set("filter", filter) return c } @@ -60652,7 +75027,7 @@ func (c *LicensesListCall) Filter(filter string) *LicensesListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { +func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -60669,7 +75044,7 @@ func (c *LicensesListCall) MaxResults(maxResults int64) *LicensesListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { +func (c *NetworksListCall) OrderBy(orderBy string) *NetworksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -60677,7 +75052,7 @@ func (c *LicensesListCall) OrderBy(orderBy string) *LicensesListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { +func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -60685,7 +75060,7 @@ func (c *LicensesListCall) PageToken(pageToken string) *LicensesListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { +func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60695,7 +75070,7 @@ func (c *LicensesListCall) Fields(s ...googleapi.Field) *LicensesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { +func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { c.ifNoneMatch_ = entityTag return c } @@ -60703,21 +75078,21 @@ func (c *LicensesListCall) IfNoneMatch(entityTag string) *LicensesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesListCall) Context(ctx context.Context) *LicensesListCall { +func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesListCall) Header() http.Header { +func (c *NetworksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -60728,7 +75103,7 @@ func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -60738,14 +75113,14 @@ func (c *LicensesListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.list" call. -// Exactly one of *LicensesListResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *LicensesListResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListResponse, error) { +// Do executes the "compute.networks.list" call. +// Exactly one of *NetworkList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *NetworkList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60764,7 +75139,7 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &LicensesListResponse{ + ret := &NetworkList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -60776,9 +75151,9 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon } return ret, nil // { - // "description": "Retrieves the list of licenses available in the specified project. This method does not get any licenses that belong to other projects, including licenses attached to publicly-available images, like Debian 8. If you want to get a list of publicly-available licenses, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + // "description": "Retrieves the list of networks available to the specified project.", // "httpMethod": "GET", - // "id": "compute.licenses.list", + // "id": "compute.networks.list", // "parameterOrder": [ // "project" // ], @@ -60796,343 +75171,27 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon // "minimum": "0", // "type": "integer" // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/licenses", - // "response": { - // "$ref": "LicensesListResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *LicensesListCall) Pages(ctx context.Context, f func(*LicensesListResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.licenses.setIamPolicy": - -type LicensesSetIamPolicyCall struct { - s *Service - project string - resource string - policy *Policy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *LicensesService) SetIamPolicy(project string, resource string, policy *Policy) *LicensesSetIamPolicyCall { - c := &LicensesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.policy = policy - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *LicensesSetIamPolicyCall) Fields(s ...googleapi.Field) *LicensesSetIamPolicyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *LicensesSetIamPolicyCall) Context(ctx context.Context) *LicensesSetIamPolicyCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *LicensesSetIamPolicyCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *LicensesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/setIamPolicy") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.licenses.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *LicensesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", - // "httpMethod": "POST", - // "id": "compute.licenses.setIamPolicy", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/licenses/{resource}/setIamPolicy", - // "request": { - // "$ref": "Policy" - // }, - // "response": { - // "$ref": "Policy" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.licenses.testIamPermissions": - -type LicensesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *LicensesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *LicensesTestIamPermissionsCall { - c := &LicensesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *LicensesTestIamPermissionsCall) Fields(s ...googleapi.Field) *LicensesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *LicensesTestIamPermissionsCall) Context(ctx context.Context) *LicensesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *LicensesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *LicensesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.licenses.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.licenses.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/global/licenses/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/global/networks", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "NetworkList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -61143,22 +75202,44 @@ func (c *LicensesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test } -// method id "compute.machineTypes.aggregatedList": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type MachineTypesAggregatedListCall struct { +// method id "compute.networks.listIpOwners": + +type NetworksListIpOwnersCall struct { s *Service project string + network string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of machine types. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/aggregatedList -func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { - c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListIpOwners: List the internal IP owners in the specified network. +func (r *NetworksService) ListIpOwners(project string, network string) *NetworksListIpOwnersCall { + c := &NetworksListIpOwnersCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.network = network return c } @@ -61188,18 +75269,25 @@ func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggreg // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { +func (c *NetworksListIpOwnersCall) Filter(filter string) *NetworksListIpOwnersCall { c.urlParams_.Set("filter", filter) return c } +// IpCidrRange sets the optional parameter "ipCidrRange": (Optional) IP +// CIDR range filter, example: "10.128.10.0/30". +func (c *NetworksListIpOwnersCall) IpCidrRange(ipCidrRange string) *NetworksListIpOwnersCall { + c.urlParams_.Set("ipCidrRange", ipCidrRange) + return c +} + // MaxResults sets the optional parameter "maxResults": The maximum // number of results per page that should be returned. If the number of // available results is larger than maxResults, Compute Engine returns a // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { +func (c *NetworksListIpOwnersCall) MaxResults(maxResults int64) *NetworksListIpOwnersCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -61216,23 +75304,51 @@ func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTy // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { +func (c *NetworksListIpOwnersCall) OrderBy(orderBy string) *NetworksListIpOwnersCall { c.urlParams_.Set("orderBy", orderBy) return c } +// OwnerProjects sets the optional parameter "ownerProjects": (Optional) +// Project IDs filter, example: "project-1,project-2". +func (c *NetworksListIpOwnersCall) OwnerProjects(ownerProjects string) *NetworksListIpOwnersCall { + c.urlParams_.Set("ownerProjects", ownerProjects) + return c +} + +// OwnerTypes sets the optional parameter "ownerTypes": (Optional) Owner +// types filter, example: "instance,forwardingRule". +func (c *NetworksListIpOwnersCall) OwnerTypes(ownerTypes string) *NetworksListIpOwnersCall { + c.urlParams_.Set("ownerTypes", ownerTypes) + return c +} + // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { +func (c *NetworksListIpOwnersCall) PageToken(pageToken string) *NetworksListIpOwnersCall { c.urlParams_.Set("pageToken", pageToken) return c } +// SubnetName sets the optional parameter "subnetName": (Optional) +// Subnetwork name filter. +func (c *NetworksListIpOwnersCall) SubnetName(subnetName string) *NetworksListIpOwnersCall { + c.urlParams_.Set("subnetName", subnetName) + return c +} + +// SubnetRegion sets the optional parameter "subnetRegion": (Optional) +// Subnetwork region filter. +func (c *NetworksListIpOwnersCall) SubnetRegion(subnetRegion string) *NetworksListIpOwnersCall { + c.urlParams_.Set("subnetRegion", subnetRegion) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { +func (c *NetworksListIpOwnersCall) Fields(s ...googleapi.Field) *NetworksListIpOwnersCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61242,7 +75358,7 @@ func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTy // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { +func (c *NetworksListIpOwnersCall) IfNoneMatch(entityTag string) *NetworksListIpOwnersCall { c.ifNoneMatch_ = entityTag return c } @@ -61250,21 +75366,21 @@ func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineT // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { +func (c *NetworksListIpOwnersCall) Context(ctx context.Context) *NetworksListIpOwnersCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineTypesAggregatedListCall) Header() http.Header { +func (c *NetworksListIpOwnersCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksListIpOwnersCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -61275,24 +75391,25 @@ func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/listIpOwners") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.aggregatedList" call. -// Exactly one of *MachineTypeAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *MachineTypeAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { +// Do executes the "compute.networks.listIpOwners" call. +// Exactly one of *IpOwnerList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *IpOwnerList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksListIpOwnersCall) Do(opts ...googleapi.CallOption) (*IpOwnerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61311,7 +75428,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineTypeAggregatedList{ + ret := &IpOwnerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -61323,11 +75440,12 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach } return ret, nil // { - // "description": "Retrieves an aggregated list of machine types.", + // "description": "List the internal IP owners in the specified network.", // "httpMethod": "GET", - // "id": "compute.machineTypes.aggregatedList", + // "id": "compute.networks.listIpOwners", // "parameterOrder": [ - // "project" + // "project", + // "network" // ], // "parameters": { // "filter": { @@ -61335,6 +75453,11 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // "location": "query", // "type": "string" // }, + // "ipCidrRange": { + // "description": "(Optional) IP CIDR range filter, example: \"10.128.10.0/30\".", + // "location": "query", + // "type": "string" + // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", @@ -61343,11 +75466,28 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // "minimum": "0", // "type": "integer" // }, + // "network": { + // "description": "Name of the network to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "orderBy": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" // }, + // "ownerProjects": { + // "description": "(Optional) Project IDs filter, example: \"project-1,project-2\".", + // "location": "query", + // "type": "string" + // }, + // "ownerTypes": { + // "description": "(Optional) Owner types filter, example: \"instance,forwardingRule\".", + // "location": "query", + // "type": "string" + // }, // "pageToken": { // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", @@ -61359,11 +75499,23 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "subnetName": { + // "description": "(Optional) Subnetwork name filter.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "type": "string" + // }, + // "subnetRegion": { + // "description": "(Optional) Subnetwork region filter.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "type": "string" // } // }, - // "path": "{project}/aggregated/machineTypes", + // "path": "{project}/global/networks/{network}/listIpOwners", // "response": { - // "$ref": "MachineTypeAggregatedList" + // "$ref": "IpOwnerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -61377,7 +75529,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { +func (c *NetworksListIpOwnersCall) Pages(ctx context.Context, f func(*IpOwnerList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -61395,96 +75547,104 @@ func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*Mach } } -// method id "compute.machineTypes.get": +// method id "compute.networks.patch": -type MachineTypesGetCall struct { - s *Service - project string - zone string - machineType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworksPatchCall struct { + s *Service + project string + network string + network2 *Network + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified machine type. Get a list of available -// machine types by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/get -func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { - c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified network with the data included in the +// request. +func (r *NetworksService) Patch(project string, network string, network2 *Network) *NetworksPatchCall { + c := &NetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.machineType = machineType + c.network = network + c.network2 = network2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksPatchCall) RequestId(requestId string) *NetworksPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { +func (c *NetworksPatchCall) Fields(s ...googleapi.Field) *NetworksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { +func (c *NetworksPatchCall) Context(ctx context.Context) *NetworksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineTypesGetCall) Header() http.Header { +func (c *NetworksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.network2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "machineType": c.machineType, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.get" call. -// Exactly one of *MachineType or error will be non-nil. Any non-2xx +// Do executes the "compute.networks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *MachineType.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { +func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61503,7 +75663,7 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -61515,17 +75675,16 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er } return ret, nil // { - // "description": "Returns the specified machine type. Get a list of available machine types by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.machineTypes.get", + // "description": "Patches the specified network with the data included in the request.", + // "httpMethod": "PATCH", + // "id": "compute.networks.patch", // "parameterOrder": [ // "project", - // "zone", - // "machineType" + // "network" // ], // "parameters": { - // "machineType": { - // "description": "Name of the machine type to return.", + // "network": { + // "description": "Name of the network to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -61538,181 +75697,288 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/machineTypes/{machineType}", + // "path": "{project}/global/networks/{network}", + // "request": { + // "$ref": "Network" + // }, // "response": { - // "$ref": "MachineType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.machineTypes.list": +// method id "compute.networks.removePeering": -type MachineTypesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworksRemovePeeringCall struct { + s *Service + project string + network string + networksremovepeeringrequest *NetworksRemovePeeringRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of machine types available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/list -func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { - c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RemovePeering: Removes a peering from the specified network. +func (r *NetworksService) RemovePeering(project string, network string, networksremovepeeringrequest *NetworksRemovePeeringRequest) *NetworksRemovePeeringCall { + c := &NetworksRemovePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone + c.network = network + c.networksremovepeeringrequest = networksremovepeeringrequest return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { - c.urlParams_.Set("filter", filter) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksRemovePeeringCall) RequestId(requestId string) *NetworksRemovePeeringCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworksRemovePeeringCall) Fields(s ...googleapi.Field) *NetworksRemovePeeringCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworksRemovePeeringCall) Context(ctx context.Context) *NetworksRemovePeeringCall { + c.ctx_ = ctx return c } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworksRemovePeeringCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksremovepeeringrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/removePeering") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "network": c.network, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networks.removePeering" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Removes a peering from the specified network.", + // "httpMethod": "POST", + // "id": "compute.networks.removePeering", + // "parameterOrder": [ + // "project", + // "network" + // ], + // "parameters": { + // "network": { + // "description": "Name of the network resource to remove peering from.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/networks/{network}/removePeering", + // "request": { + // "$ref": "NetworksRemovePeeringRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.networks.switchToCustomMode": + +type NetworksSwitchToCustomModeCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SwitchToCustomMode: Switches the network mode from auto subnet mode +// to custom subnet mode. +func (r *NetworksService) SwitchToCustomMode(project string, network string) *NetworksSwitchToCustomModeCall { + c := &NetworksSwitchToCustomModeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.network = network return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksSwitchToCustomModeCall) RequestId(requestId string) *NetworksSwitchToCustomModeCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { +func (c *NetworksSwitchToCustomModeCall) Fields(s ...googleapi.Field) *NetworksSwitchToCustomModeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { +func (c *NetworksSwitchToCustomModeCall) Context(ctx context.Context) *NetworksSwitchToCustomModeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineTypesListCall) Header() http.Header { +func (c *NetworksSwitchToCustomModeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/switchToCustomMode") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.list" call. -// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx +// Do executes the "compute.networks.switchToCustomMode" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *MachineTypeList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61731,7 +75997,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineTypeList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -61743,35 +76009,19 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis } return ret, nil // { - // "description": "Retrieves a list of machine types available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.machineTypes.list", + // "description": "Switches the network mode from auto subnet mode to custom subnet mode.", + // "httpMethod": "POST", + // "id": "compute.networks.switchToCustomMode", // "parameterOrder": [ // "project", - // "zone" + // "network" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "network": { + // "description": "Name of the network to be updated.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -61781,92 +76031,50 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/machineTypes", + // "path": "{project}/global/networks/{network}/switchToCustomMode", // "response": { - // "$ref": "MachineTypeList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.networks.addPeering": +// method id "compute.networks.testIamPermissions": -type NetworksAddPeeringCall struct { - s *Service - project string - network string - networksaddpeeringrequest *NetworksAddPeeringRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddPeering: Adds a peering to the specified network. -func (r *NetworksService) AddPeering(project string, network string, networksaddpeeringrequest *NetworksAddPeeringRequest) *NetworksAddPeeringCall { - c := &NetworksAddPeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *NetworksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworksTestIamPermissionsCall { + c := &NetworksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - c.networksaddpeeringrequest = networksaddpeeringrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksAddPeeringCall) RequestId(requestId string) *NetworksAddPeeringCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksAddPeeringCall) Fields(s ...googleapi.Field) *NetworksAddPeeringCall { +func (c *NetworksTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61874,52 +76082,52 @@ func (c *NetworksAddPeeringCall) Fields(s ...googleapi.Field) *NetworksAddPeerin // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksAddPeeringCall) Context(ctx context.Context) *NetworksAddPeeringCall { +func (c *NetworksTestIamPermissionsCall) Context(ctx context.Context) *NetworksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksAddPeeringCall) Header() http.Header { +func (c *NetworksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksaddpeeringrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/addPeering") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "network": c.network, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.addPeering" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61938,7 +76146,7 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -61950,21 +76158,14 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Adds a peering to the specified network.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.networks.addPeering", + // "id": "compute.networks.testIamPermissions", // "parameterOrder": [ // "project", - // "network" + // "resource" // ], // "parameters": { - // "network": { - // "description": "Name of the network resource to add peering to.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -61972,44 +76173,44 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/addPeering", + // "path": "{project}/global/networks/{resource}/testIamPermissions", // "request": { - // "$ref": "NetworksAddPeeringRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.networks.delete": +// method id "compute.projects.disableXpnHost": -type NetworksDeleteCall struct { +type ProjectsDisableXpnHostCall struct { s *Service project string - network string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified network. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/delete -func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall { - c := &NetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DisableXpnHost: Disable this project as a shared VPC host project. +func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHostCall { + c := &ProjectsDisableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network return c } @@ -62027,7 +76228,7 @@ func (r *NetworksService) Delete(project string, network string) *NetworksDelete // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksDeleteCall) RequestId(requestId string) *NetworksDeleteCall { +func (c *ProjectsDisableXpnHostCall) RequestId(requestId string) *ProjectsDisableXpnHostCall { c.urlParams_.Set("requestId", requestId) return c } @@ -62035,7 +76236,7 @@ func (c *NetworksDeleteCall) RequestId(requestId string) *NetworksDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { +func (c *ProjectsDisableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnHostCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62043,21 +76244,21 @@ func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksDeleteCall) Context(ctx context.Context) *NetworksDeleteCall { +func (c *ProjectsDisableXpnHostCall) Context(ctx context.Context) *ProjectsDisableXpnHostCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksDeleteCall) Header() http.Header { +func (c *ProjectsDisableXpnHostCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -62065,25 +76266,24 @@ func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnHost") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.delete" call. +// Do executes the "compute.projects.disableXpnHost" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62114,21 +76314,13 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Deletes the specified network.", - // "httpMethod": "DELETE", - // "id": "compute.networks.delete", + // "description": "Disable this project as a shared VPC host project.", + // "httpMethod": "POST", + // "id": "compute.projects.disableXpnHost", // "parameterOrder": [ - // "project", - // "network" + // "project" // ], // "parameters": { - // "network": { - // "description": "Name of the network to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -62142,7 +76334,7 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}", + // "path": "{project}/disableXpnHost", // "response": { // "$ref": "Operation" // }, @@ -62154,93 +76346,101 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.networks.get": +// method id "compute.projects.disableXpnResource": -type NetworksGetCall struct { - s *Service - project string - network string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsDisableXpnResourceCall struct { + s *Service + project string + projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified network. Get a list of available networks -// by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/get -func (r *NetworksService) Get(project string, network string) *NetworksGetCall { - c := &NetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DisableXpnResource: Disable a serivce resource (a.k.a service +// project) associated with this host project. +func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest) *ProjectsDisableXpnResourceCall { + c := &ProjectsDisableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network + c.projectsdisablexpnresourcerequest = projectsdisablexpnresourcerequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsDisableXpnResourceCall) RequestId(requestId string) *ProjectsDisableXpnResourceCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { +func (c *ProjectsDisableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnResourceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksGetCall) Context(ctx context.Context) *NetworksGetCall { +func (c *ProjectsDisableXpnResourceCall) Context(ctx context.Context) *ProjectsDisableXpnResourceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksGetCall) Header() http.Header { +func (c *ProjectsDisableXpnResourceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsdisablexpnresourcerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnResource") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.get" call. -// Exactly one of *Network or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Network.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { +// Do executes the "compute.projects.disableXpnResource" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62259,7 +76459,7 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Network{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62271,60 +76471,55 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { } return ret, nil // { - // "description": "Returns the specified network. Get a list of available networks by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.networks.get", + // "description": "Disable a serivce resource (a.k.a service project) associated with this host project.", + // "httpMethod": "POST", + // "id": "compute.projects.disableXpnResource", // "parameterOrder": [ - // "project", - // "network" + // "project" // ], // "parameters": { - // "network": { - // "description": "Name of the network to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}", + // "path": "{project}/disableXpnResource", + // "request": { + // "$ref": "ProjectsDisableXpnResourceRequest" + // }, // "response": { - // "$ref": "Network" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.networks.insert": +// method id "compute.projects.enableXpnHost": -type NetworksInsertCall struct { +type ProjectsEnableXpnHostCall struct { s *Service project string - network *Network urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a network in the specified project using the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/insert -func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { - c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// EnableXpnHost: Enable this project as a shared VPC host project. +func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCall { + c := &ProjectsEnableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network return c } @@ -62342,7 +76537,7 @@ func (r *NetworksService) Insert(project string, network *Network) *NetworksInse // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksInsertCall) RequestId(requestId string) *NetworksInsertCall { +func (c *ProjectsEnableXpnHostCall) RequestId(requestId string) *ProjectsEnableXpnHostCall { c.urlParams_.Set("requestId", requestId) return c } @@ -62350,7 +76545,7 @@ func (c *NetworksInsertCall) RequestId(requestId string) *NetworksInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { +func (c *ProjectsEnableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnHostCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62358,34 +76553,29 @@ func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { +func (c *ProjectsEnableXpnHostCall) Context(ctx context.Context) *ProjectsEnableXpnHostCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksInsertCall) Header() http.Header { +func (c *ProjectsEnableXpnHostCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnHost") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -62395,14 +76585,14 @@ func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.insert" call. +// Do executes the "compute.projects.enableXpnHost" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62433,9 +76623,9 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Creates a network in the specified project using the data included in the request.", + // "description": "Enable this project as a shared VPC host project.", // "httpMethod": "POST", - // "id": "compute.networks.insert", + // "id": "compute.projects.enableXpnHost", // "parameterOrder": [ // "project" // ], @@ -62453,157 +76643,99 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error // "type": "string" // } // }, - // "path": "{project}/global/networks", - // "request": { - // "$ref": "Network" - // }, + // "path": "{project}/enableXpnHost", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.networks.list": - -type NetworksListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} + // ] + // } -// List: Retrieves the list of networks available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/list -func (r *NetworksService) List(project string) *NetworksListCall { - c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *NetworksListCall) Filter(filter string) *NetworksListCall { - c.urlParams_.Set("filter", filter) - return c +// method id "compute.projects.enableXpnResource": + +type ProjectsEnableXpnResourceCall struct { + s *Service + project string + projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// EnableXpnResource: Enable service resource (a.k.a service project) +// for a host project, so that subnets in the host project can be used +// by instances in the service project. +func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest) *ProjectsEnableXpnResourceCall { + c := &ProjectsEnableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.projectsenablexpnresourcerequest = projectsenablexpnresourcerequest return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *NetworksListCall) OrderBy(orderBy string) *NetworksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsEnableXpnResourceCall) RequestId(requestId string) *ProjectsEnableXpnResourceCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { +func (c *ProjectsEnableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnResourceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { +func (c *ProjectsEnableXpnResourceCall) Context(ctx context.Context) *ProjectsEnableXpnResourceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksListCall) Header() http.Header { +func (c *ProjectsEnableXpnResourceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsenablexpnresourcerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnResource") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -62611,14 +76743,14 @@ func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.list" call. -// Exactly one of *NetworkList or error will be non-nil. Any non-2xx +// Do executes the "compute.projects.enableXpnResource" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *NetworkList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error) { +func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62637,7 +76769,7 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NetworkList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62649,204 +76781,64 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error } return ret, nil // { - // "description": "Retrieves the list of networks available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.networks.list", + // "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project.", + // "httpMethod": "POST", + // "id": "compute.projects.enableXpnResource", // "parameterOrder": [ // "project" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/networks", + // "path": "{project}/enableXpnResource", + // "request": { + // "$ref": "ProjectsEnableXpnResourceRequest" + // }, // "response": { - // "$ref": "NetworkList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.networks.listIpOwners": +// method id "compute.projects.get": -type NetworksListIpOwnersCall struct { +type ProjectsGetCall struct { s *Service project string - network string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// ListIpOwners: List the internal IP owners in the specified network. -func (r *NetworksService) ListIpOwners(project string, network string) *NetworksListIpOwnersCall { - c := &NetworksListIpOwnersCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Project resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/get +func (r *ProjectsService) Get(project string) *ProjectsGetCall { + c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *NetworksListIpOwnersCall) Filter(filter string) *NetworksListIpOwnersCall { - c.urlParams_.Set("filter", filter) - return c -} - -// IpCidrRange sets the optional parameter "ipCidrRange": (Optional) IP -// CIDR range filter, example: "10.128.10.0/30". -func (c *NetworksListIpOwnersCall) IpCidrRange(ipCidrRange string) *NetworksListIpOwnersCall { - c.urlParams_.Set("ipCidrRange", ipCidrRange) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NetworksListIpOwnersCall) MaxResults(maxResults int64) *NetworksListIpOwnersCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *NetworksListIpOwnersCall) OrderBy(orderBy string) *NetworksListIpOwnersCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// OwnerProjects sets the optional parameter "ownerProjects": (Optional) -// Project IDs filter, example: "project-1,project-2". -func (c *NetworksListIpOwnersCall) OwnerProjects(ownerProjects string) *NetworksListIpOwnersCall { - c.urlParams_.Set("ownerProjects", ownerProjects) - return c -} - -// OwnerTypes sets the optional parameter "ownerTypes": (Optional) Owner -// types filter, example: "instance,forwardingRule". -func (c *NetworksListIpOwnersCall) OwnerTypes(ownerTypes string) *NetworksListIpOwnersCall { - c.urlParams_.Set("ownerTypes", ownerTypes) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *NetworksListIpOwnersCall) PageToken(pageToken string) *NetworksListIpOwnersCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// SubnetName sets the optional parameter "subnetName": (Optional) -// Subnetwork name filter. -func (c *NetworksListIpOwnersCall) SubnetName(subnetName string) *NetworksListIpOwnersCall { - c.urlParams_.Set("subnetName", subnetName) - return c -} - -// SubnetRegion sets the optional parameter "subnetRegion": (Optional) -// Subnetwork region filter. -func (c *NetworksListIpOwnersCall) SubnetRegion(subnetRegion string) *NetworksListIpOwnersCall { - c.urlParams_.Set("subnetRegion", subnetRegion) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksListIpOwnersCall) Fields(s ...googleapi.Field) *NetworksListIpOwnersCall { +func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62856,7 +76848,7 @@ func (c *NetworksListIpOwnersCall) Fields(s ...googleapi.Field) *NetworksListIpO // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworksListIpOwnersCall) IfNoneMatch(entityTag string) *NetworksListIpOwnersCall { +func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -62864,21 +76856,21 @@ func (c *NetworksListIpOwnersCall) IfNoneMatch(entityTag string) *NetworksListIp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksListIpOwnersCall) Context(ctx context.Context) *NetworksListIpOwnersCall { +func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksListIpOwnersCall) Header() http.Header { +func (c *ProjectsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksListIpOwnersCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -62889,25 +76881,24 @@ func (c *NetworksListIpOwnersCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/listIpOwners") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.listIpOwners" call. -// Exactly one of *IpOwnerList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *IpOwnerList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksListIpOwnersCall) Do(opts ...googleapi.CallOption) (*IpOwnerList, error) { +// Do executes the "compute.projects.get" call. +// Exactly one of *Project or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Project.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62926,7 +76917,7 @@ func (c *NetworksListIpOwnersCall) Do(opts ...googleapi.CallOption) (*IpOwnerLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &IpOwnerList{ + ret := &Project{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -62938,82 +76929,24 @@ func (c *NetworksListIpOwnersCall) Do(opts ...googleapi.CallOption) (*IpOwnerLis } return ret, nil // { - // "description": "List the internal IP owners in the specified network.", + // "description": "Returns the specified Project resource.", // "httpMethod": "GET", - // "id": "compute.networks.listIpOwners", + // "id": "compute.projects.get", // "parameterOrder": [ - // "project", - // "network" + // "project" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "ipCidrRange": { - // "description": "(Optional) IP CIDR range filter, example: \"10.128.10.0/30\".", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "network": { - // "description": "Name of the network to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "ownerProjects": { - // "description": "(Optional) Project IDs filter, example: \"project-1,project-2\".", - // "location": "query", - // "type": "string" - // }, - // "ownerTypes": { - // "description": "(Optional) Owner types filter, example: \"instance,forwardingRule\".", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "subnetName": { - // "description": "(Optional) Subnetwork name filter.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "type": "string" - // }, - // "subnetRegion": { - // "description": "(Optional) Subnetwork region filter.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/listIpOwners", + // "path": "{project}", // "response": { - // "$ref": "IpOwnerList" + // "$ref": "Project" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -63024,125 +76957,89 @@ func (c *NetworksListIpOwnersCall) Do(opts ...googleapi.CallOption) (*IpOwnerLis } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NetworksListIpOwnersCall) Pages(ctx context.Context, f func(*IpOwnerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.networks.patch": +// method id "compute.projects.getXpnHost": -type NetworksPatchCall struct { - s *Service - project string - network string - network2 *Network - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsGetXpnHostCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified network with the data included in the -// request. -func (r *NetworksService) Patch(project string, network string, network2 *Network) *NetworksPatchCall { - c := &NetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetXpnHost: Get the shared VPC host project that this project links +// to. May be empty if no link exists. +func (r *ProjectsService) GetXpnHost(project string) *ProjectsGetXpnHostCall { + c := &ProjectsGetXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - c.network2 = network2 - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksPatchCall) RequestId(requestId string) *NetworksPatchCall { - c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksPatchCall) Fields(s ...googleapi.Field) *NetworksPatchCall { +func (c *ProjectsGetXpnHostCall) Fields(s ...googleapi.Field) *ProjectsGetXpnHostCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetXpnHostCall) IfNoneMatch(entityTag string) *ProjectsGetXpnHostCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksPatchCall) Context(ctx context.Context) *NetworksPatchCall { +func (c *ProjectsGetXpnHostCall) Context(ctx context.Context) *ProjectsGetXpnHostCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksPatchCall) Header() http.Header { +func (c *ProjectsGetXpnHostCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.network2) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnHost") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.projects.getXpnHost" call. +// Exactly one of *Project or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Project.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63161,7 +77058,7 @@ func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Project{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -63173,147 +77070,140 @@ func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Patches the specified network with the data included in the request.", - // "httpMethod": "PATCH", - // "id": "compute.networks.patch", + // "description": "Get the shared VPC host project that this project links to. May be empty if no link exists.", + // "httpMethod": "GET", + // "id": "compute.projects.getXpnHost", // "parameterOrder": [ - // "project", - // "network" + // "project" // ], // "parameters": { - // "network": { - // "description": "Name of the network to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}", - // "request": { - // "$ref": "Network" - // }, + // "path": "{project}/getXpnHost", // "response": { - // "$ref": "Operation" + // "$ref": "Project" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.networks.removePeering": +// method id "compute.projects.getXpnResources": -type NetworksRemovePeeringCall struct { - s *Service - project string - network string - networksremovepeeringrequest *NetworksRemovePeeringRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsGetXpnResourcesCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// RemovePeering: Removes a peering from the specified network. -func (r *NetworksService) RemovePeering(project string, network string, networksremovepeeringrequest *NetworksRemovePeeringRequest) *NetworksRemovePeeringCall { - c := &NetworksRemovePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetXpnResources: Get service resources (a.k.a service project) +// associated with this host project. +func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourcesCall { + c := &ProjectsGetXpnResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - c.networksremovepeeringrequest = networksremovepeeringrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksRemovePeeringCall) RequestId(requestId string) *NetworksRemovePeeringCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": +func (c *ProjectsGetXpnResourcesCall) Filter(filter string) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": +func (c *ProjectsGetXpnResourcesCall) MaxResults(maxResults int64) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "order_by": +func (c *ProjectsGetXpnResourcesCall) OrderBy(orderBy string) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("order_by", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": +func (c *ProjectsGetXpnResourcesCall) PageToken(pageToken string) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksRemovePeeringCall) Fields(s ...googleapi.Field) *NetworksRemovePeeringCall { +func (c *ProjectsGetXpnResourcesCall) Fields(s ...googleapi.Field) *ProjectsGetXpnResourcesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetXpnResourcesCall) IfNoneMatch(entityTag string) *ProjectsGetXpnResourcesCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksRemovePeeringCall) Context(ctx context.Context) *NetworksRemovePeeringCall { +func (c *ProjectsGetXpnResourcesCall) Context(ctx context.Context) *ProjectsGetXpnResourcesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksRemovePeeringCall) Header() http.Header { +func (c *ProjectsGetXpnResourcesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksremovepeeringrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/removePeering") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnResources") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.removePeering" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.projects.getXpnResources" call. +// Exactly one of *ProjectsGetXpnResources or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ProjectsGetXpnResources.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*ProjectsGetXpnResources, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63332,7 +77222,7 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &ProjectsGetXpnResources{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -63344,19 +77234,30 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Removes a peering from the specified network.", - // "httpMethod": "POST", - // "id": "compute.networks.removePeering", + // "description": "Get service resources (a.k.a service project) associated with this host project.", + // "httpMethod": "GET", + // "id": "compute.projects.getXpnResources", // "parameterOrder": [ - // "project", - // "network" + // "project" // ], // "parameters": { - // "network": { - // "description": "Name of the network resource to remove peering from.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "order_by": { + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "location": "query", // "type": "string" // }, // "project": { @@ -63365,19 +77266,11 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/removePeering", - // "request": { - // "$ref": "NetworksRemovePeeringRequest" - // }, + // "path": "{project}/getXpnResources", // "response": { - // "$ref": "Operation" + // "$ref": "ProjectsGetXpnResources" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -63387,49 +77280,75 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.networks.switchToCustomMode": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsGetXpnResourcesCall) Pages(ctx context.Context, f func(*ProjectsGetXpnResources) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type NetworksSwitchToCustomModeCall struct { - s *Service - project string - network string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.projects.listXpnHosts": + +type ProjectsListXpnHostsCall struct { + s *Service + project string + projectslistxpnhostsrequest *ProjectsListXpnHostsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SwitchToCustomMode: Switches the network mode from auto subnet mode -// to custom subnet mode. -func (r *NetworksService) SwitchToCustomMode(project string, network string) *NetworksSwitchToCustomModeCall { - c := &NetworksSwitchToCustomModeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListXpnHosts: List all shared VPC host projects visible to the user +// in an organization. +func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsrequest *ProjectsListXpnHostsRequest) *ProjectsListXpnHostsCall { + c := &ProjectsListXpnHostsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network + c.projectslistxpnhostsrequest = projectslistxpnhostsrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksSwitchToCustomModeCall) RequestId(requestId string) *NetworksSwitchToCustomModeCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": +func (c *ProjectsListXpnHostsCall) Filter(filter string) *ProjectsListXpnHostsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": +func (c *ProjectsListXpnHostsCall) MaxResults(maxResults int64) *ProjectsListXpnHostsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "order_by": +func (c *ProjectsListXpnHostsCall) OrderBy(orderBy string) *ProjectsListXpnHostsCall { + c.urlParams_.Set("order_by", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": +func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnHostsCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksSwitchToCustomModeCall) Fields(s ...googleapi.Field) *NetworksSwitchToCustomModeCall { +func (c *ProjectsListXpnHostsCall) Fields(s ...googleapi.Field) *ProjectsListXpnHostsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63437,47 +77356,51 @@ func (c *NetworksSwitchToCustomModeCall) Fields(s ...googleapi.Field) *NetworksS // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksSwitchToCustomModeCall) Context(ctx context.Context) *NetworksSwitchToCustomModeCall { +func (c *ProjectsListXpnHostsCall) Context(ctx context.Context) *ProjectsListXpnHostsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksSwitchToCustomModeCall) Header() http.Header { +func (c *ProjectsListXpnHostsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectslistxpnhostsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/switchToCustomMode") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/listXpnHosts") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.switchToCustomMode" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.projects.listXpnHosts" call. +// Exactly one of *XpnHostList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *XpnHostList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63496,7 +77419,7 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &XpnHostList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -63508,19 +77431,30 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Switches the network mode from auto subnet mode to custom subnet mode.", + // "description": "List all shared VPC host projects visible to the user in an organization.", // "httpMethod": "POST", - // "id": "compute.networks.switchToCustomMode", + // "id": "compute.projects.listXpnHosts", // "parameterOrder": [ - // "project", - // "network" + // "project" // ], // "parameters": { - // "network": { - // "description": "Name of the network to be updated.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "order_by": { + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "location": "query", // "type": "string" // }, // "project": { @@ -63529,16 +77463,14 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/switchToCustomMode", + // "path": "{project}/listXpnHosts", + // "request": { + // "$ref": "ProjectsListXpnHostsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "XpnHostList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -63548,32 +77480,69 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.networks.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsListXpnHostsCall) Pages(ctx context.Context, f func(*XpnHostList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type NetworksTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.projects.moveDisk": + +type ProjectsMoveDiskCall struct { + s *Service + project string + diskmoverequest *DiskMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *NetworksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworksTestIamPermissionsCall { - c := &NetworksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// MoveDisk: Moves a persistent disk from one zone to another. +func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequest) *ProjectsMoveDiskCall { + c := &ProjectsMoveDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.diskmoverequest = diskmoverequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsMoveDiskCall) RequestId(requestId string) *ProjectsMoveDiskCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworksTestIamPermissionsCall { +func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63581,52 +77550,51 @@ func (c *NetworksTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworksT // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksTestIamPermissionsCall) Context(ctx context.Context) *NetworksTestIamPermissionsCall { +func (c *ProjectsMoveDiskCall) Context(ctx context.Context) *ProjectsMoveDiskCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksTestIamPermissionsCall) Header() http.Header { +func (c *ProjectsMoveDiskCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.diskmoverequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveDisk") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.projects.moveDisk" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63645,7 +77613,7 @@ func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -63657,12 +77625,11 @@ func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Moves a persistent disk from one zone to another.", // "httpMethod": "POST", - // "id": "compute.networks.testIamPermissions", + // "id": "compute.projects.moveDisk", // "parameterOrder": [ - // "project", - // "resource" + // "project" // ], // "parameters": { // "project": { @@ -63672,44 +77639,44 @@ func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/networks/{resource}/testIamPermissions", + // "path": "{project}/moveDisk", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "DiskMoveRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.projects.disableXpnHost": +// method id "compute.projects.moveInstance": -type ProjectsDisableXpnHostCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsMoveInstanceCall struct { + s *Service + project string + instancemoverequest *InstanceMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DisableXpnHost: Disable this project as a shared VPC host project. -func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHostCall { - c := &ProjectsDisableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// MoveInstance: Moves an instance and its attached persistent disks +// from one zone to another. +func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall { + c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.instancemoverequest = instancemoverequest return c } @@ -63727,7 +77694,7 @@ func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHost // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsDisableXpnHostCall) RequestId(requestId string) *ProjectsDisableXpnHostCall { +func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInstanceCall { c.urlParams_.Set("requestId", requestId) return c } @@ -63735,7 +77702,7 @@ func (c *ProjectsDisableXpnHostCall) RequestId(requestId string) *ProjectsDisabl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsDisableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnHostCall { +func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63743,29 +77710,34 @@ func (c *ProjectsDisableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsDisab // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsDisableXpnHostCall) Context(ctx context.Context) *ProjectsDisableXpnHostCall { +func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsDisableXpnHostCall) Header() http.Header { +func (c *ProjectsMoveInstanceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnHost") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveInstance") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -63775,14 +77747,14 @@ func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.disableXpnHost" call. +// Do executes the "compute.projects.moveInstance" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63813,9 +77785,9 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Disable this project as a shared VPC host project.", + // "description": "Moves an instance and its attached persistent disks from one zone to another.", // "httpMethod": "POST", - // "id": "compute.projects.disableXpnHost", + // "id": "compute.projects.moveInstance", // "parameterOrder": [ // "project" // ], @@ -63833,7 +77805,10 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "{project}/disableXpnHost", + // "path": "{project}/moveInstance", + // "request": { + // "$ref": "InstanceMoveRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -63845,23 +77820,24 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.projects.disableXpnResource": +// method id "compute.projects.setCommonInstanceMetadata": -type ProjectsDisableXpnResourceCall struct { - s *Service - project string - projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsSetCommonInstanceMetadataCall struct { + s *Service + project string + metadata *Metadata + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DisableXpnResource: Disable a serivce resource (a.k.a service -// project) associated with this host project. -func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest) *ProjectsDisableXpnResourceCall { - c := &ProjectsDisableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetCommonInstanceMetadata: Sets metadata common to all instances +// within the specified project using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setCommonInstanceMetadata +func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall { + c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectsdisablexpnresourcerequest = projectsdisablexpnresourcerequest + c.metadata = metadata return c } @@ -63879,7 +77855,7 @@ func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnr // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsDisableXpnResourceCall) RequestId(requestId string) *ProjectsDisableXpnResourceCall { +func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *ProjectsSetCommonInstanceMetadataCall { c.urlParams_.Set("requestId", requestId) return c } @@ -63887,7 +77863,7 @@ func (c *ProjectsDisableXpnResourceCall) RequestId(requestId string) *ProjectsDi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsDisableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnResourceCall { +func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -63895,34 +77871,34 @@ func (c *ProjectsDisableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsDisableXpnResourceCall) Context(ctx context.Context) *ProjectsDisableXpnResourceCall { +func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsDisableXpnResourceCall) Header() http.Header { +func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsdisablexpnresourcerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnResource") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setCommonInstanceMetadata") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -63932,14 +77908,14 @@ func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.disableXpnResource" call. +// Do executes the "compute.projects.setCommonInstanceMetadata" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -63970,9 +77946,9 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Disable a serivce resource (a.k.a service project) associated with this host project.", + // "description": "Sets metadata common to all instances within the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.projects.disableXpnResource", + // "id": "compute.projects.setCommonInstanceMetadata", // "parameterOrder": [ // "project" // ], @@ -63990,9 +77966,9 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // } // }, - // "path": "{project}/disableXpnResource", + // "path": "{project}/setCommonInstanceMetadata", // "request": { - // "$ref": "ProjectsDisableXpnResourceRequest" + // "$ref": "Metadata" // }, // "response": { // "$ref": "Operation" @@ -64005,20 +77981,25 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.projects.enableXpnHost": +// method id "compute.projects.setDefaultNetworkTier": -type ProjectsEnableXpnHostCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsSetDefaultNetworkTierCall struct { + s *Service + project string + projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// EnableXpnHost: Enable this project as a shared VPC host project. -func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCall { - c := &ProjectsEnableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetDefaultNetworkTier: Sets the default network tier of the project. +// The default network tier is used when an +// address/forwardingRule/instance is created without specifying the +// network tier field. +func (r *ProjectsService) SetDefaultNetworkTier(project string, projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest) *ProjectsSetDefaultNetworkTierCall { + c := &ProjectsSetDefaultNetworkTierCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.projectssetdefaultnetworktierrequest = projectssetdefaultnetworktierrequest return c } @@ -64036,7 +78017,7 @@ func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCa // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsEnableXpnHostCall) RequestId(requestId string) *ProjectsEnableXpnHostCall { +func (c *ProjectsSetDefaultNetworkTierCall) RequestId(requestId string) *ProjectsSetDefaultNetworkTierCall { c.urlParams_.Set("requestId", requestId) return c } @@ -64044,7 +78025,7 @@ func (c *ProjectsEnableXpnHostCall) RequestId(requestId string) *ProjectsEnableX // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsEnableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnHostCall { +func (c *ProjectsSetDefaultNetworkTierCall) Fields(s ...googleapi.Field) *ProjectsSetDefaultNetworkTierCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -64052,29 +78033,34 @@ func (c *ProjectsEnableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsEnable // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsEnableXpnHostCall) Context(ctx context.Context) *ProjectsEnableXpnHostCall { +func (c *ProjectsSetDefaultNetworkTierCall) Context(ctx context.Context) *ProjectsSetDefaultNetworkTierCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsEnableXpnHostCall) Header() http.Header { +func (c *ProjectsSetDefaultNetworkTierCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetdefaultnetworktierrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnHost") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setDefaultNetworkTier") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -64084,14 +78070,14 @@ func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.enableXpnHost" call. +// Do executes the "compute.projects.setDefaultNetworkTier" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64122,9 +78108,9 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Enable this project as a shared VPC host project.", + // "description": "Sets the default network tier of the project. The default network tier is used when an address/forwardingRule/instance is created without specifying the network tier field.", // "httpMethod": "POST", - // "id": "compute.projects.enableXpnHost", + // "id": "compute.projects.setDefaultNetworkTier", // "parameterOrder": [ // "project" // ], @@ -64142,7 +78128,10 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/enableXpnHost", + // "path": "{project}/setDefaultNetworkTier", + // "request": { + // "$ref": "ProjectsSetDefaultNetworkTierRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -64154,24 +78143,24 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.projects.enableXpnResource": +// method id "compute.projects.setDefaultServiceAccount": -type ProjectsEnableXpnResourceCall struct { - s *Service - project string - projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsSetDefaultServiceAccountCall struct { + s *Service + project string + projectssetdefaultserviceaccountrequest *ProjectsSetDefaultServiceAccountRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// EnableXpnResource: Enable service resource (a.k.a service project) -// for a host project, so that subnets in the host project can be used -// by instances in the service project. -func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest) *ProjectsEnableXpnResourceCall { - c := &ProjectsEnableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetDefaultServiceAccount: Sets the default service account of the +// project. The default service account is used when a VM instance is +// created with the service account email address set to "default". +func (r *ProjectsService) SetDefaultServiceAccount(project string, projectssetdefaultserviceaccountrequest *ProjectsSetDefaultServiceAccountRequest) *ProjectsSetDefaultServiceAccountCall { + c := &ProjectsSetDefaultServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectsenablexpnresourcerequest = projectsenablexpnresourcerequest + c.projectssetdefaultserviceaccountrequest = projectssetdefaultserviceaccountrequest return c } @@ -64189,7 +78178,7 @@ func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnres // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsEnableXpnResourceCall) RequestId(requestId string) *ProjectsEnableXpnResourceCall { +func (c *ProjectsSetDefaultServiceAccountCall) RequestId(requestId string) *ProjectsSetDefaultServiceAccountCall { c.urlParams_.Set("requestId", requestId) return c } @@ -64197,7 +78186,7 @@ func (c *ProjectsEnableXpnResourceCall) RequestId(requestId string) *ProjectsEna // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsEnableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnResourceCall { +func (c *ProjectsSetDefaultServiceAccountCall) Fields(s ...googleapi.Field) *ProjectsSetDefaultServiceAccountCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -64205,34 +78194,34 @@ func (c *ProjectsEnableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsEn // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsEnableXpnResourceCall) Context(ctx context.Context) *ProjectsEnableXpnResourceCall { +func (c *ProjectsSetDefaultServiceAccountCall) Context(ctx context.Context) *ProjectsSetDefaultServiceAccountCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsEnableXpnResourceCall) Header() http.Header { +func (c *ProjectsSetDefaultServiceAccountCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsSetDefaultServiceAccountCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsenablexpnresourcerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetdefaultserviceaccountrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnResource") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setDefaultServiceAccount") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -64242,14 +78231,14 @@ func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.enableXpnResource" call. +// Do executes the "compute.projects.setDefaultServiceAccount" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsSetDefaultServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64280,9 +78269,9 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project.", + // "description": "Sets the default service account of the project. The default service account is used when a VM instance is created with the service account email address set to \"default\".", // "httpMethod": "POST", - // "id": "compute.projects.enableXpnResource", + // "id": "compute.projects.setDefaultServiceAccount", // "parameterOrder": [ // "project" // ], @@ -64300,9 +78289,9 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera // "type": "string" // } // }, - // "path": "{project}/enableXpnResource", + // "path": "{project}/setDefaultServiceAccount", // "request": { - // "$ref": "ProjectsEnableXpnResourceRequest" + // "$ref": "ProjectsSetDefaultServiceAccountRequest" // }, // "response": { // "$ref": "Operation" @@ -64315,74 +78304,89 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera } -// method id "compute.projects.get": +// method id "compute.projects.setUsageExportBucket": -type ProjectsGetCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsSetUsageExportBucketCall struct { + s *Service + project string + usageexportlocation *UsageExportLocation + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Project resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/get -func (r *ProjectsService) Get(project string) *ProjectsGetCall { - c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetUsageExportBucket: Enables the usage export feature and sets the +// usage export bucket where reports are stored. If you provide an empty +// request body using this method, the usage export feature will be +// disabled. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setUsageExportBucket +func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall { + c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.usageexportlocation = usageexportlocation + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *ProjectsSetUsageExportBucketCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { +func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { +func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGetCall) Header() http.Header { +func (c *ProjectsSetUsageExportBucketCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setUsageExportBucket") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -64390,14 +78394,14 @@ func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.get" call. -// Exactly one of *Project or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Project.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { +// Do executes the "compute.projects.setUsageExportBucket" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64416,7 +78420,7 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Project{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64428,9 +78432,9 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { } return ret, nil // { - // "description": "Returns the specified Project resource.", - // "httpMethod": "GET", - // "id": "compute.projects.get", + // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", + // "httpMethod": "POST", + // "id": "compute.projects.setUsageExportBucket", // "parameterOrder": [ // "project" // ], @@ -64441,104 +78445,124 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}", + // "path": "{project}/setUsageExportBucket", + // "request": { + // "$ref": "UsageExportLocation" + // }, // "response": { - // "$ref": "Project" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "compute.projects.getXpnHost": +// method id "compute.regionAutoscalers.delete": -type ProjectsGetXpnHostCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionAutoscalersDeleteCall struct { + s *Service + project string + region string + autoscaler string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetXpnHost: Get the shared VPC host project that this project links -// to. May be empty if no link exists. -func (r *ProjectsService) GetXpnHost(project string) *ProjectsGetXpnHostCall { - c := &ProjectsGetXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified autoscaler. +func (r *RegionAutoscalersService) Delete(project string, region string, autoscaler string) *RegionAutoscalersDeleteCall { + c := &RegionAutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.autoscaler = autoscaler + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionAutoscalersDeleteCall) RequestId(requestId string) *RegionAutoscalersDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetXpnHostCall) Fields(s ...googleapi.Field) *ProjectsGetXpnHostCall { +func (c *RegionAutoscalersDeleteCall) Fields(s ...googleapi.Field) *RegionAutoscalersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsGetXpnHostCall) IfNoneMatch(entityTag string) *ProjectsGetXpnHostCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetXpnHostCall) Context(ctx context.Context) *ProjectsGetXpnHostCall { +func (c *RegionAutoscalersDeleteCall) Context(ctx context.Context) *RegionAutoscalersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGetXpnHostCall) Header() http.Header { +func (c *RegionAutoscalersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnHost") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "autoscaler": c.autoscaler, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.getXpnHost" call. -// Exactly one of *Project or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Project.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, error) { +// Do executes the "compute.regionAutoscalers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64557,7 +78581,7 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Project{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64569,24 +78593,45 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err } return ret, nil // { - // "description": "Get the shared VPC host project that this project links to. May be empty if no link exists.", - // "httpMethod": "GET", - // "id": "compute.projects.getXpnHost", + // "description": "Deletes the specified autoscaler.", + // "httpMethod": "DELETE", + // "id": "compute.regionAutoscalers.delete", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "autoscaler" // ], // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/getXpnHost", + // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", // "response": { - // "$ref": "Project" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -64596,53 +78641,32 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err } -// method id "compute.projects.getXpnResources": +// method id "compute.regionAutoscalers.get": -type ProjectsGetXpnResourcesCall struct { +type RegionAutoscalersGetCall struct { s *Service project string + region string + autoscaler string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetXpnResources: Get service resources (a.k.a service project) -// associated with this host project. -func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourcesCall { - c := &ProjectsGetXpnResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified autoscaler. +func (r *RegionAutoscalersService) Get(project string, region string, autoscaler string) *RegionAutoscalersGetCall { + c := &RegionAutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": -func (c *ProjectsGetXpnResourcesCall) Filter(filter string) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": -func (c *ProjectsGetXpnResourcesCall) MaxResults(maxResults int64) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "order_by": -func (c *ProjectsGetXpnResourcesCall) OrderBy(orderBy string) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("order_by", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": -func (c *ProjectsGetXpnResourcesCall) PageToken(pageToken string) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("pageToken", pageToken) + c.region = region + c.autoscaler = autoscaler return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetXpnResourcesCall) Fields(s ...googleapi.Field) *ProjectsGetXpnResourcesCall { +func (c *RegionAutoscalersGetCall) Fields(s ...googleapi.Field) *RegionAutoscalersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -64652,7 +78676,7 @@ func (c *ProjectsGetXpnResourcesCall) Fields(s ...googleapi.Field) *ProjectsGetX // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsGetXpnResourcesCall) IfNoneMatch(entityTag string) *ProjectsGetXpnResourcesCall { +func (c *RegionAutoscalersGetCall) IfNoneMatch(entityTag string) *RegionAutoscalersGetCall { c.ifNoneMatch_ = entityTag return c } @@ -64660,21 +78684,21 @@ func (c *ProjectsGetXpnResourcesCall) IfNoneMatch(entityTag string) *ProjectsGet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetXpnResourcesCall) Context(ctx context.Context) *ProjectsGetXpnResourcesCall { +func (c *RegionAutoscalersGetCall) Context(ctx context.Context) *RegionAutoscalersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGetXpnResourcesCall) Header() http.Header { +func (c *RegionAutoscalersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -64685,24 +78709,26 @@ func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnResources") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "autoscaler": c.autoscaler, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.getXpnResources" call. -// Exactly one of *ProjectsGetXpnResources or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ProjectsGetXpnResources.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*ProjectsGetXpnResources, error) { +// Do executes the "compute.regionAutoscalers.get" call. +// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Autoscaler.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64721,7 +78747,7 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ProjectsGetXpnResources{ + ret := &Autoscaler{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64733,121 +78759,95 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project } return ret, nil // { - // "description": "Get service resources (a.k.a service project) associated with this host project.", + // "description": "Returns the specified autoscaler.", // "httpMethod": "GET", - // "id": "compute.projects.getXpnResources", + // "id": "compute.regionAutoscalers.get", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "autoscaler" // ], // "parameters": { - // "filter": { - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "order_by": { - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "location": "query", + // "autoscaler": { + // "description": "Name of the autoscaler to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { // "description": "Project ID for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/getXpnResources", + // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", // "response": { - // "$ref": "ProjectsGetXpnResources" + // "$ref": "Autoscaler" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsGetXpnResourcesCall) Pages(ctx context.Context, f func(*ProjectsGetXpnResources) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.projects.listXpnHosts": +// method id "compute.regionAutoscalers.insert": -type ProjectsListXpnHostsCall struct { - s *Service - project string - projectslistxpnhostsrequest *ProjectsListXpnHostsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionAutoscalersInsertCall struct { + s *Service + project string + region string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListXpnHosts: List all shared VPC host projects visible to the user -// in an organization. -func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsrequest *ProjectsListXpnHostsRequest) *ProjectsListXpnHostsCall { - c := &ProjectsListXpnHostsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an autoscaler in the specified project using the data +// included in the request. +func (r *RegionAutoscalersService) Insert(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersInsertCall { + c := &RegionAutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectslistxpnhostsrequest = projectslistxpnhostsrequest - return c -} - -// Filter sets the optional parameter "filter": -func (c *ProjectsListXpnHostsCall) Filter(filter string) *ProjectsListXpnHostsCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": -func (c *ProjectsListXpnHostsCall) MaxResults(maxResults int64) *ProjectsListXpnHostsCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "order_by": -func (c *ProjectsListXpnHostsCall) OrderBy(orderBy string) *ProjectsListXpnHostsCall { - c.urlParams_.Set("order_by", orderBy) + c.region = region + c.autoscaler = autoscaler return c } -// PageToken sets the optional parameter "pageToken": -func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnHostsCall { - c.urlParams_.Set("pageToken", pageToken) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionAutoscalersInsertCall) RequestId(requestId string) *RegionAutoscalersInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsListXpnHostsCall) Fields(s ...googleapi.Field) *ProjectsListXpnHostsCall { +func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutoscalersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -64855,51 +78855,52 @@ func (c *ProjectsListXpnHostsCall) Fields(s ...googleapi.Field) *ProjectsListXpn // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsListXpnHostsCall) Context(ctx context.Context) *ProjectsListXpnHostsCall { +func (c *RegionAutoscalersInsertCall) Context(ctx context.Context) *RegionAutoscalersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsListXpnHostsCall) Header() http.Header { +func (c *RegionAutoscalersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectslistxpnhostsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/listXpnHosts") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.listXpnHosts" call. -// Exactly one of *XpnHostList or error will be non-nil. Any non-2xx +// Do executes the "compute.regionAutoscalers.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *XpnHostList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostList, error) { +func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -64918,7 +78919,7 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &XpnHostList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -64930,46 +78931,40 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis } return ret, nil // { - // "description": "List all shared VPC host projects visible to the user in an organization.", + // "description": "Creates an autoscaler in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.projects.listXpnHosts", + // "id": "compute.regionAutoscalers.insert", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { - // "filter": { - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "order_by": { - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/listXpnHosts", + // "path": "{project}/regions/{region}/autoscalers", // "request": { - // "$ref": "ProjectsListXpnHostsRequest" + // "$ref": "Autoscaler" // }, // "response": { - // "$ref": "XpnHostList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -64979,121 +78974,159 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsListXpnHostsCall) Pages(ctx context.Context, f func(*XpnHostList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } +// method id "compute.regionAutoscalers.list": + +type RegionAutoscalersListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// method id "compute.projects.moveDisk": +// List: Retrieves a list of autoscalers contained within the specified +// region. +func (r *RegionAutoscalersService) List(project string, region string) *RegionAutoscalersListCall { + c := &RegionAutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} -type ProjectsMoveDiskCall struct { - s *Service - project string - diskmoverequest *DiskMoveRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall { + c.urlParams_.Set("filter", filter) + return c } -// MoveDisk: Moves a persistent disk from one zone to another. -func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequest) *ProjectsMoveDiskCall { - c := &ProjectsMoveDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.diskmoverequest = diskmoverequest +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscalersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsMoveDiskCall) RequestId(requestId string) *ProjectsMoveDiskCall { - c.urlParams_.Set("requestId", requestId) +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscalersListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCall { +func (c *RegionAutoscalersListCall) Fields(s ...googleapi.Field) *RegionAutoscalersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionAutoscalersListCall) IfNoneMatch(entityTag string) *RegionAutoscalersListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsMoveDiskCall) Context(ctx context.Context) *ProjectsMoveDiskCall { +func (c *RegionAutoscalersListCall) Context(ctx context.Context) *RegionAutoscalersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsMoveDiskCall) Header() http.Header { +func (c *RegionAutoscalersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.diskmoverequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.moveDisk" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionAutoscalers.list" call. +// Exactly one of *RegionAutoscalerList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RegionAutoscalerList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAutoscalerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65112,7 +79145,7 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &RegionAutoscalerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -65124,13 +79157,37 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Moves a persistent disk from one zone to another.", - // "httpMethod": "POST", - // "id": "compute.projects.moveDisk", + // "description": "Retrieves a list of autoscalers contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.regionAutoscalers.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -65138,44 +79195,75 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/moveDisk", - // "request": { - // "$ref": "DiskMoveRequest" - // }, + // "path": "{project}/regions/{region}/autoscalers", // "response": { - // "$ref": "Operation" + // "$ref": "RegionAutoscalerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.projects.moveInstance": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionAutoscalersListCall) Pages(ctx context.Context, f func(*RegionAutoscalerList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ProjectsMoveInstanceCall struct { - s *Service - project string - instancemoverequest *InstanceMoveRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionAutoscalers.patch": + +type RegionAutoscalersPatchCall struct { + s *Service + project string + region string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// MoveInstance: Moves an instance and its attached persistent disks -// from one zone to another. -func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall { - c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates an autoscaler in the specified project using the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +func (r *RegionAutoscalersService) Patch(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersPatchCall { + c := &RegionAutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instancemoverequest = instancemoverequest + c.region = region + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to patch. +func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutoscalersPatchCall { + c.urlParams_.Set("autoscaler", autoscaler) return c } @@ -65193,7 +79281,7 @@ func (r *ProjectsService) MoveInstance(project string, instancemoverequest *Inst // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInstanceCall { +func (c *RegionAutoscalersPatchCall) RequestId(requestId string) *RegionAutoscalersPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -65201,7 +79289,7 @@ func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInst // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall { +func (c *RegionAutoscalersPatchCall) Fields(s ...googleapi.Field) *RegionAutoscalersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -65209,51 +79297,52 @@ func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveIns // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall { +func (c *RegionAutoscalersPatchCall) Context(ctx context.Context) *RegionAutoscalersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsMoveInstanceCall) Header() http.Header { +func (c *RegionAutoscalersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveInstance") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.moveInstance" call. +// Do executes the "compute.regionAutoscalers.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65284,13 +79373,20 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Moves an instance and its attached persistent disks from one zone to another.", - // "httpMethod": "POST", - // "id": "compute.projects.moveInstance", + // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.regionAutoscalers.patch", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to patch.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -65298,15 +79394,22 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/moveInstance", + // "path": "{project}/regions/{region}/autoscalers", // "request": { - // "$ref": "InstanceMoveRequest" + // "$ref": "Autoscaler" // }, // "response": { // "$ref": "Operation" @@ -65319,50 +79422,34 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.projects.setCommonInstanceMetadata": - -type ProjectsSetCommonInstanceMetadataCall struct { - s *Service - project string - metadata *Metadata - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} +// method id "compute.regionAutoscalers.testIamPermissions": -// SetCommonInstanceMetadata: Sets metadata common to all instances -// within the specified project using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setCommonInstanceMetadata -func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall { - c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.metadata = metadata - return c +type RegionAutoscalersTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *ProjectsSetCommonInstanceMetadataCall { - c.urlParams_.Set("requestId", requestId) +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionAutoscalersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionAutoscalersTestIamPermissionsCall { + c := &RegionAutoscalersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall { +func (c *RegionAutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionAutoscalersTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -65370,51 +79457,53 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *Pr // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall { +func (c *RegionAutoscalersTestIamPermissionsCall) Context(ctx context.Context) *RegionAutoscalersTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header { +func (c *RegionAutoscalersTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setCommonInstanceMetadata") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setCommonInstanceMetadata" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionAutoscalers.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65433,7 +79522,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -65445,11 +79534,13 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets metadata common to all instances within the specified project using the data included in the request.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.projects.setCommonInstanceMetadata", + // "id": "compute.regionAutoscalers.testIamPermissions", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "resource" // ], // "parameters": { // "project": { @@ -65459,45 +79550,63 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/setCommonInstanceMetadata", + // "path": "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions", // "request": { - // "$ref": "Metadata" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.projects.setDefaultServiceAccount": +// method id "compute.regionAutoscalers.update": -type ProjectsSetDefaultServiceAccountCall struct { - s *Service - project string - projectssetdefaultserviceaccountrequest *ProjectsSetDefaultServiceAccountRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionAutoscalersUpdateCall struct { + s *Service + project string + region string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetDefaultServiceAccount: Sets the default service account of the -// project. The default service account is used when a VM instance is -// created with the service account email address set to "default". -func (r *ProjectsService) SetDefaultServiceAccount(project string, projectssetdefaultserviceaccountrequest *ProjectsSetDefaultServiceAccountRequest) *ProjectsSetDefaultServiceAccountCall { - c := &ProjectsSetDefaultServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates an autoscaler in the specified project using the data +// included in the request. +func (r *RegionAutoscalersService) Update(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersUpdateCall { + c := &RegionAutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectssetdefaultserviceaccountrequest = projectssetdefaultserviceaccountrequest + c.region = region + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to update. +func (c *RegionAutoscalersUpdateCall) Autoscaler(autoscaler string) *RegionAutoscalersUpdateCall { + c.urlParams_.Set("autoscaler", autoscaler) return c } @@ -65515,7 +79624,7 @@ func (r *ProjectsService) SetDefaultServiceAccount(project string, projectssetde // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsSetDefaultServiceAccountCall) RequestId(requestId string) *ProjectsSetDefaultServiceAccountCall { +func (c *RegionAutoscalersUpdateCall) RequestId(requestId string) *RegionAutoscalersUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -65523,7 +79632,7 @@ func (c *ProjectsSetDefaultServiceAccountCall) RequestId(requestId string) *Proj // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetDefaultServiceAccountCall) Fields(s ...googleapi.Field) *ProjectsSetDefaultServiceAccountCall { +func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutoscalersUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -65531,51 +79640,52 @@ func (c *ProjectsSetDefaultServiceAccountCall) Fields(s ...googleapi.Field) *Pro // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetDefaultServiceAccountCall) Context(ctx context.Context) *ProjectsSetDefaultServiceAccountCall { +func (c *RegionAutoscalersUpdateCall) Context(ctx context.Context) *RegionAutoscalersUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsSetDefaultServiceAccountCall) Header() http.Header { +func (c *RegionAutoscalersUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsSetDefaultServiceAccountCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetdefaultserviceaccountrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setDefaultServiceAccount") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setDefaultServiceAccount" call. +// Do executes the "compute.regionAutoscalers.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsSetDefaultServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65606,13 +79716,20 @@ func (c *ProjectsSetDefaultServiceAccountCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the default service account of the project. The default service account is used when a VM instance is created with the service account email address set to \"default\".", - // "httpMethod": "POST", - // "id": "compute.projects.setDefaultServiceAccount", + // "description": "Updates an autoscaler in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.regionAutoscalers.update", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to update.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -65620,15 +79737,22 @@ func (c *ProjectsSetDefaultServiceAccountCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/setDefaultServiceAccount", + // "path": "{project}/regions/{region}/autoscalers", // "request": { - // "$ref": "ProjectsSetDefaultServiceAccountRequest" + // "$ref": "Autoscaler" // }, // "response": { // "$ref": "Operation" @@ -65641,26 +79765,24 @@ func (c *ProjectsSetDefaultServiceAccountCall) Do(opts ...googleapi.CallOption) } -// method id "compute.projects.setUsageExportBucket": +// method id "compute.regionBackendServices.delete": -type ProjectsSetUsageExportBucketCall struct { - s *Service - project string - usageexportlocation *UsageExportLocation - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesDeleteCall struct { + s *Service + project string + region string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetUsageExportBucket: Enables the usage export feature and sets the -// usage export bucket where reports are stored. If you provide an empty -// request body using this method, the usage export feature will be -// disabled. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setUsageExportBucket -func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall { - c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified regional BackendService resource. +func (r *RegionBackendServicesService) Delete(project string, region string, backendService string) *RegionBackendServicesDeleteCall { + c := &RegionBackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.usageexportlocation = usageexportlocation + c.region = region + c.backendService = backendService return c } @@ -65678,7 +79800,7 @@ func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocati // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *ProjectsSetUsageExportBucketCall { +func (c *RegionBackendServicesDeleteCall) RequestId(requestId string) *RegionBackendServicesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -65686,7 +79808,7 @@ func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *Projects // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall { +func (c *RegionBackendServicesDeleteCall) Fields(s ...googleapi.Field) *RegionBackendServicesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -65694,51 +79816,48 @@ func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *Project // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall { +func (c *RegionBackendServicesDeleteCall) Context(ctx context.Context) *RegionBackendServicesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsSetUsageExportBucketCall) Header() http.Header { +func (c *RegionBackendServicesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setUsageExportBucket") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setUsageExportBucket" call. +// Do executes the "compute.regionBackendServices.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65769,13 +79888,22 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", - // "httpMethod": "POST", - // "id": "compute.projects.setUsageExportBucket", + // "description": "Deletes the specified regional BackendService resource.", + // "httpMethod": "DELETE", + // "id": "compute.regionBackendServices.delete", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "backendService" // ], // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -65783,123 +79911,119 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/setUsageExportBucket", - // "request": { - // "$ref": "UsageExportLocation" - // }, + // "path": "{project}/regions/{region}/backendServices/{backendService}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionAutoscalers.delete": +// method id "compute.regionBackendServices.get": -type RegionAutoscalersDeleteCall struct { - s *Service - project string - region string - autoscaler string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesGetCall struct { + s *Service + project string + region string + backendService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified autoscaler. -func (r *RegionAutoscalersService) Delete(project string, region string, autoscaler string) *RegionAutoscalersDeleteCall { - c := &RegionAutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified regional BackendService resource. +func (r *RegionBackendServicesService) Get(project string, region string, backendService string) *RegionBackendServicesGetCall { + c := &RegionBackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionAutoscalersDeleteCall) RequestId(requestId string) *RegionAutoscalersDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.backendService = backendService return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersDeleteCall) Fields(s ...googleapi.Field) *RegionAutoscalersDeleteCall { +func (c *RegionBackendServicesGetCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionBackendServicesGetCall) IfNoneMatch(entityTag string) *RegionBackendServicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersDeleteCall) Context(ctx context.Context) *RegionAutoscalersDeleteCall { +func (c *RegionBackendServicesGetCall) Context(ctx context.Context) *RegionBackendServicesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersDeleteCall) Header() http.Header { +func (c *RegionBackendServicesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "autoscaler": c.autoscaler, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.regionBackendServices.get" call. +// Exactly one of *BackendService or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *BackendService.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -65918,7 +80042,7 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &BackendService{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -65930,17 +80054,17 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified autoscaler.", - // "httpMethod": "DELETE", - // "id": "compute.regionAutoscalers.delete", + // "description": "Returns the specified regional BackendService resource.", + // "httpMethod": "GET", + // "id": "compute.regionBackendServices.get", // "parameterOrder": [ // "project", // "region", - // "autoscaler" + // "backendService" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to delete.", + // "backendService": { + // "description": "Name of the BackendService resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -65959,113 +80083,103 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", + // "path": "{project}/regions/{region}/backendServices/{backendService}", // "response": { - // "$ref": "Operation" + // "$ref": "BackendService" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionAutoscalers.get": +// method id "compute.regionBackendServices.getHealth": -type RegionAutoscalersGetCall struct { - s *Service - project string - region string - autoscaler string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionBackendServicesGetHealthCall struct { + s *Service + project string + region string + backendService string + resourcegroupreference *ResourceGroupReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified autoscaler. -func (r *RegionAutoscalersService) Get(project string, region string, autoscaler string) *RegionAutoscalersGetCall { - c := &RegionAutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetHealth: Gets the most recent health check results for this +// regional BackendService. +func (r *RegionBackendServicesService) GetHealth(project string, region string, backendService string, resourcegroupreference *ResourceGroupReference) *RegionBackendServicesGetHealthCall { + c := &RegionBackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler + c.backendService = backendService + c.resourcegroupreference = resourcegroupreference return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersGetCall) Fields(s ...googleapi.Field) *RegionAutoscalersGetCall { +func (c *RegionBackendServicesGetHealthCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetHealthCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionAutoscalersGetCall) IfNoneMatch(entityTag string) *RegionAutoscalersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersGetCall) Context(ctx context.Context) *RegionAutoscalersGetCall { +func (c *RegionBackendServicesGetHealthCall) Context(ctx context.Context) *RegionBackendServicesGetHealthCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersGetCall) Header() http.Header { +func (c *RegionBackendServicesGetHealthCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}/getHealth") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "autoscaler": c.autoscaler, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.get" call. -// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Autoscaler.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { +// Do executes the "compute.regionBackendServices.getHealth" call. +// Exactly one of *BackendServiceGroupHealth or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *BackendServiceGroupHealth.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66084,7 +80198,7 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Autoscaler{ + ret := &BackendServiceGroupHealth{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -66096,24 +80210,23 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler } return ret, nil // { - // "description": "Returns the specified autoscaler.", - // "httpMethod": "GET", - // "id": "compute.regionAutoscalers.get", + // "description": "Gets the most recent health check results for this regional BackendService.", + // "httpMethod": "POST", + // "id": "compute.regionBackendServices.getHealth", // "parameterOrder": [ // "project", // "region", - // "autoscaler" + // "backendService" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to return.", + // "backendService": { + // "description": "Name of the BackendService resource to which the queried instance belongs.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, @@ -66127,9 +80240,12 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", + // "path": "{project}/regions/{region}/backendServices/{backendService}/getHealth", + // "request": { + // "$ref": "ResourceGroupReference" + // }, // "response": { - // "$ref": "Autoscaler" + // "$ref": "BackendServiceGroupHealth" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -66140,25 +80256,28 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler } -// method id "compute.regionAutoscalers.insert": +// method id "compute.regionBackendServices.insert": -type RegionAutoscalersInsertCall struct { - s *Service - project string - region string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesInsertCall struct { + s *Service + project string + region string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an autoscaler in the specified project using the data -// included in the request. -func (r *RegionAutoscalersService) Insert(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersInsertCall { - c := &RegionAutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a regional BackendService resource in the specified +// project using the data included in the request. There are several +// restrictions and guidelines to keep in mind when creating a regional +// backend service. Read Restrictions and Guidelines for more +// information. +func (r *RegionBackendServicesService) Insert(project string, region string, backendservice *BackendService) *RegionBackendServicesInsertCall { + c := &RegionBackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler + c.backendservice = backendservice return c } @@ -66176,7 +80295,7 @@ func (r *RegionAutoscalersService) Insert(project string, region string, autosca // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionAutoscalersInsertCall) RequestId(requestId string) *RegionAutoscalersInsertCall { +func (c *RegionBackendServicesInsertCall) RequestId(requestId string) *RegionBackendServicesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -66184,7 +80303,7 @@ func (c *RegionAutoscalersInsertCall) RequestId(requestId string) *RegionAutosca // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutoscalersInsertCall { +func (c *RegionBackendServicesInsertCall) Fields(s ...googleapi.Field) *RegionBackendServicesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66192,34 +80311,34 @@ func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutosc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersInsertCall) Context(ctx context.Context) *RegionAutoscalersInsertCall { +func (c *RegionBackendServicesInsertCall) Context(ctx context.Context) *RegionBackendServicesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersInsertCall) Header() http.Header { +func (c *RegionBackendServicesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -66230,14 +80349,14 @@ func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.insert" call. +// Do executes the "compute.regionBackendServices.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66268,9 +80387,9 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Restrictions and Guidelines for more information.", // "httpMethod": "POST", - // "id": "compute.regionAutoscalers.insert", + // "id": "compute.regionBackendServices.insert", // "parameterOrder": [ // "project", // "region" @@ -66296,9 +80415,9 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers", + // "path": "{project}/regions/{region}/backendServices", // "request": { - // "$ref": "Autoscaler" + // "$ref": "BackendService" // }, // "response": { // "$ref": "Operation" @@ -66311,9 +80430,9 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.regionAutoscalers.list": +// method id "compute.regionBackendServices.list": -type RegionAutoscalersListCall struct { +type RegionBackendServicesListCall struct { s *Service project string region string @@ -66323,10 +80442,10 @@ type RegionAutoscalersListCall struct { header_ http.Header } -// List: Retrieves a list of autoscalers contained within the specified -// region. -func (r *RegionAutoscalersService) List(project string, region string) *RegionAutoscalersListCall { - c := &RegionAutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of regional BackendService resources +// available to the specified project in the given region. +func (r *RegionBackendServicesService) List(project string, region string) *RegionBackendServicesListCall { + c := &RegionBackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -66358,7 +80477,7 @@ func (r *RegionAutoscalersService) List(project string, region string) *RegionAu // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall { +func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServicesListCall { c.urlParams_.Set("filter", filter) return c } @@ -66369,7 +80488,7 @@ func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersList // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscalersListCall { +func (c *RegionBackendServicesListCall) MaxResults(maxResults int64) *RegionBackendServicesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -66386,7 +80505,7 @@ func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscal // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersListCall { +func (c *RegionBackendServicesListCall) OrderBy(orderBy string) *RegionBackendServicesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -66394,7 +80513,7 @@ func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersLi // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscalersListCall { +func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBackendServicesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -66402,7 +80521,7 @@ func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscale // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersListCall) Fields(s ...googleapi.Field) *RegionAutoscalersListCall { +func (c *RegionBackendServicesListCall) Fields(s ...googleapi.Field) *RegionBackendServicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66412,7 +80531,7 @@ func (c *RegionAutoscalersListCall) Fields(s ...googleapi.Field) *RegionAutoscal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionAutoscalersListCall) IfNoneMatch(entityTag string) *RegionAutoscalersListCall { +func (c *RegionBackendServicesListCall) IfNoneMatch(entityTag string) *RegionBackendServicesListCall { c.ifNoneMatch_ = entityTag return c } @@ -66420,21 +80539,21 @@ func (c *RegionAutoscalersListCall) IfNoneMatch(entityTag string) *RegionAutosca // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersListCall) Context(ctx context.Context) *RegionAutoscalersListCall { +func (c *RegionBackendServicesListCall) Context(ctx context.Context) *RegionBackendServicesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersListCall) Header() http.Header { +func (c *RegionBackendServicesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -66445,7 +80564,7 @@ func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -66456,14 +80575,14 @@ func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.list" call. -// Exactly one of *RegionAutoscalerList or error will be non-nil. Any +// Do executes the "compute.regionBackendServices.list" call. +// Exactly one of *BackendServiceList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *RegionAutoscalerList.ServerResponse.Header or (if a response was +// *BackendServiceList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAutoscalerList, error) { +func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66482,7 +80601,7 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionAutoscalerList{ + ret := &BackendServiceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -66494,9 +80613,9 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut } return ret, nil // { - // "description": "Retrieves a list of autoscalers contained within the specified region.", + // "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", // "httpMethod": "GET", - // "id": "compute.regionAutoscalers.list", + // "id": "compute.regionBackendServices.list", // "parameterOrder": [ // "project", // "region" @@ -66540,9 +80659,9 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers", + // "path": "{project}/regions/{region}/backendServices", // "response": { - // "$ref": "RegionAutoscalerList" + // "$ref": "BackendServiceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -66556,7 +80675,7 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionAutoscalersListCall) Pages(ctx context.Context, f func(*RegionAutoscalerList) error) error { +func (c *RegionBackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -66574,33 +80693,31 @@ func (c *RegionAutoscalersListCall) Pages(ctx context.Context, f func(*RegionAut } } -// method id "compute.regionAutoscalers.patch": +// method id "compute.regionBackendServices.patch": -type RegionAutoscalersPatchCall struct { - s *Service - project string - region string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesPatchCall struct { + s *Service + project string + region string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates an autoscaler in the specified project using the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. -func (r *RegionAutoscalersService) Patch(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersPatchCall { - c := &RegionAutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified regional BackendService resource with +// the data included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. This method +// supports PATCH semantics and uses the JSON merge patch format and +// processing rules. +func (r *RegionBackendServicesService) Patch(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesPatchCall { + c := &RegionBackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to patch. -func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutoscalersPatchCall { - c.urlParams_.Set("autoscaler", autoscaler) + c.backendService = backendService + c.backendservice = backendservice return c } @@ -66610,15 +80727,15 @@ func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutosc // request if it has already been completed. // // For example, consider a situation where you make an initial request -// and then the request times out. If you make the request again with -// the same request ID, the server can check if original operation with -// the same request ID was received, and if so, will ignore the second +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second // request. This prevents clients from accidentally creating duplicate // commitments. // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionAutoscalersPatchCall) RequestId(requestId string) *RegionAutoscalersPatchCall { +func (c *RegionBackendServicesPatchCall) RequestId(requestId string) *RegionBackendServicesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -66626,7 +80743,7 @@ func (c *RegionAutoscalersPatchCall) RequestId(requestId string) *RegionAutoscal // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersPatchCall) Fields(s ...googleapi.Field) *RegionAutoscalersPatchCall { +func (c *RegionBackendServicesPatchCall) Fields(s ...googleapi.Field) *RegionBackendServicesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66634,52 +80751,53 @@ func (c *RegionAutoscalersPatchCall) Fields(s ...googleapi.Field) *RegionAutosca // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersPatchCall) Context(ctx context.Context) *RegionAutoscalersPatchCall { +func (c *RegionBackendServicesPatchCall) Context(ctx context.Context) *RegionBackendServicesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersPatchCall) Header() http.Header { +func (c *RegionBackendServicesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.patch" call. +// Do executes the "compute.regionBackendServices.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66710,18 +80828,20 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", - // "id": "compute.regionAutoscalers.patch", + // "id": "compute.regionBackendServices.patch", // "parameterOrder": [ // "project", - // "region" + // "region", + // "backendService" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to patch.", - // "location": "query", + // "backendService": { + // "description": "Name of the BackendService resource to patch.", + // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -66739,30 +80859,29 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and then the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers", + // "path": "{project}/regions/{region}/backendServices/{backendService}", // "request": { - // "$ref": "Autoscaler" + // "$ref": "BackendService" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionAutoscalers.testIamPermissions": +// method id "compute.regionBackendServices.testIamPermissions": -type RegionAutoscalersTestIamPermissionsCall struct { +type RegionBackendServicesTestIamPermissionsCall struct { s *Service project string region string @@ -66775,8 +80894,8 @@ type RegionAutoscalersTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *RegionAutoscalersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionAutoscalersTestIamPermissionsCall { - c := &RegionAutoscalersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionBackendServicesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionBackendServicesTestIamPermissionsCall { + c := &RegionBackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.resource = resource @@ -66787,7 +80906,7 @@ func (r *RegionAutoscalersService) TestIamPermissions(project string, region str // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionAutoscalersTestIamPermissionsCall { +func (c *RegionBackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionBackendServicesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -66795,21 +80914,21 @@ func (c *RegionAutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersTestIamPermissionsCall) Context(ctx context.Context) *RegionAutoscalersTestIamPermissionsCall { +func (c *RegionBackendServicesTestIamPermissionsCall) Context(ctx context.Context) *RegionBackendServicesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersTestIamPermissionsCall) Header() http.Header { +func (c *RegionBackendServicesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -66822,7 +80941,7 @@ func (c *RegionAutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -66834,14 +80953,14 @@ func (c *RegionAutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.R return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.testIamPermissions" call. +// Do executes the "compute.regionBackendServices.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -66874,7 +80993,7 @@ func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.regionAutoscalers.testIamPermissions", + // "id": "compute.regionBackendServices.testIamPermissions", // "parameterOrder": [ // "project", // "region", @@ -66903,7 +81022,7 @@ func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions", + // "path": "{project}/regions/{region}/backendServices/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -66919,205 +81038,29 @@ func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } -// method id "compute.regionAutoscalers.update": - -type RegionAutoscalersUpdateCall struct { - s *Service - project string - region string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates an autoscaler in the specified project using the data -// included in the request. -func (r *RegionAutoscalersService) Update(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersUpdateCall { - c := &RegionAutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to update. -func (c *RegionAutoscalersUpdateCall) Autoscaler(autoscaler string) *RegionAutoscalersUpdateCall { - c.urlParams_.Set("autoscaler", autoscaler) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and then the request times out. If you make the request again with -// the same request ID, the server can check if original operation with -// the same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -func (c *RegionAutoscalersUpdateCall) RequestId(requestId string) *RegionAutoscalersUpdateCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutoscalersUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionAutoscalersUpdateCall) Context(ctx context.Context) *RegionAutoscalersUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionAutoscalersUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionAutoscalers.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an autoscaler in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.regionAutoscalers.update", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to update.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and then the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/autoscalers", - // "request": { - // "$ref": "Autoscaler" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.regionBackendServices.delete": +// method id "compute.regionBackendServices.update": -type RegionBackendServicesDeleteCall struct { +type RegionBackendServicesUpdateCall struct { s *Service project string region string backendService string + backendservice *BackendService urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified regional BackendService resource. -func (r *RegionBackendServicesService) Delete(project string, region string, backendService string) *RegionBackendServicesDeleteCall { - c := &RegionBackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified regional BackendService resource with +// the data included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. +func (r *RegionBackendServicesService) Update(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesUpdateCall { + c := &RegionBackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.backendService = backendService + c.backendservice = backendservice return c } @@ -67135,7 +81078,7 @@ func (r *RegionBackendServicesService) Delete(project string, region string, bac // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesDeleteCall) RequestId(requestId string) *RegionBackendServicesDeleteCall { +func (c *RegionBackendServicesUpdateCall) RequestId(requestId string) *RegionBackendServicesUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -67143,7 +81086,7 @@ func (c *RegionBackendServicesDeleteCall) RequestId(requestId string) *RegionBac // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesDeleteCall) Fields(s ...googleapi.Field) *RegionBackendServicesDeleteCall { +func (c *RegionBackendServicesUpdateCall) Fields(s ...googleapi.Field) *RegionBackendServicesUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67151,31 +81094,36 @@ func (c *RegionBackendServicesDeleteCall) Fields(s ...googleapi.Field) *RegionBa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesDeleteCall) Context(ctx context.Context) *RegionBackendServicesDeleteCall { +func (c *RegionBackendServicesUpdateCall) Context(ctx context.Context) *RegionBackendServicesUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesDeleteCall) Header() http.Header { +func (c *RegionBackendServicesUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -67185,14 +81133,14 @@ func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.delete" call. +// Do executes the "compute.regionBackendServices.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67223,9 +81171,9 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified regional BackendService resource.", - // "httpMethod": "DELETE", - // "id": "compute.regionBackendServices.delete", + // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + // "httpMethod": "PUT", + // "id": "compute.regionBackendServices.update", // "parameterOrder": [ // "project", // "region", @@ -67233,7 +81181,7 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // ], // "parameters": { // "backendService": { - // "description": "Name of the BackendService resource to delete.", + // "description": "Name of the BackendService resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -67260,6 +81208,9 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // } // }, // "path": "{project}/regions/{region}/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, // "response": { // "$ref": "Operation" // }, @@ -67271,32 +81222,95 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.regionBackendServices.get": +// method id "compute.regionCommitments.aggregatedList": -type RegionBackendServicesGetCall struct { - s *Service - project string - region string - backendService string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionCommitmentsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified regional BackendService resource. -func (r *RegionBackendServicesService) Get(project string, region string, backendService string) *RegionBackendServicesGetCall { - c := &RegionBackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of commitments. +func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitmentsAggregatedListCall { + c := &RegionCommitmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.backendService = backendService + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionCommitmentsAggregatedListCall) MaxResults(maxResults int64) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionCommitmentsAggregatedListCall) OrderBy(orderBy string) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesGetCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetCall { +func (c *RegionCommitmentsAggregatedListCall) Fields(s ...googleapi.Field) *RegionCommitmentsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67306,7 +81320,7 @@ func (c *RegionBackendServicesGetCall) Fields(s ...googleapi.Field) *RegionBacke // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionBackendServicesGetCall) IfNoneMatch(entityTag string) *RegionBackendServicesGetCall { +func (c *RegionCommitmentsAggregatedListCall) IfNoneMatch(entityTag string) *RegionCommitmentsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -67314,21 +81328,21 @@ func (c *RegionBackendServicesGetCall) IfNoneMatch(entityTag string) *RegionBack // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesGetCall) Context(ctx context.Context) *RegionBackendServicesGetCall { +func (c *RegionCommitmentsAggregatedListCall) Context(ctx context.Context) *RegionCommitmentsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesGetCall) Header() http.Header { +func (c *RegionCommitmentsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -67339,26 +81353,24 @@ func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/commitments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.get" call. -// Exactly one of *BackendService or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *BackendService.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionCommitments.aggregatedList" call. +// Exactly one of *CommitmentAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *CommitmentAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { +func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*CommitmentAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67377,7 +81389,7 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendService{ + ret := &CommitmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -67389,20 +81401,34 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen } return ret, nil // { - // "description": "Returns the specified regional BackendService resource.", + // "description": "Retrieves an aggregated list of commitments.", // "httpMethod": "GET", - // "id": "compute.regionBackendServices.get", + // "id": "compute.regionCommitments.aggregatedList", // "parameterOrder": [ - // "project", - // "region", - // "backendService" + // "project" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -67411,18 +81437,11 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}", + // "path": "{project}/aggregated/commitments", // "response": { - // "$ref": "BackendService" + // "$ref": "CommitmentAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -67433,88 +81452,116 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen } -// method id "compute.regionBackendServices.getHealth": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionCommitmentsAggregatedListCall) Pages(ctx context.Context, f func(*CommitmentAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionBackendServicesGetHealthCall struct { - s *Service - project string - region string - backendService string - resourcegroupreference *ResourceGroupReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionCommitments.get": + +type RegionCommitmentsGetCall struct { + s *Service + project string + region string + commitment string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// GetHealth: Gets the most recent health check results for this -// regional BackendService. -func (r *RegionBackendServicesService) GetHealth(project string, region string, backendService string, resourcegroupreference *ResourceGroupReference) *RegionBackendServicesGetHealthCall { - c := &RegionBackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified commitment resource. Get a list of +// available commitments by making a list() request. +func (r *RegionCommitmentsService) Get(project string, region string, commitment string) *RegionCommitmentsGetCall { + c := &RegionCommitmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendService = backendService - c.resourcegroupreference = resourcegroupreference + c.commitment = commitment return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesGetHealthCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetHealthCall { +func (c *RegionCommitmentsGetCall) Fields(s ...googleapi.Field) *RegionCommitmentsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionCommitmentsGetCall) IfNoneMatch(entityTag string) *RegionCommitmentsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesGetHealthCall) Context(ctx context.Context) *RegionBackendServicesGetHealthCall { +func (c *RegionCommitmentsGetCall) Context(ctx context.Context) *RegionCommitmentsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesGetHealthCall) Header() http.Header { +func (c *RegionCommitmentsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}/getHealth") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments/{commitment}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, + "region": c.region, + "commitment": c.commitment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.getHealth" call. -// Exactly one of *BackendServiceGroupHealth or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *BackendServiceGroupHealth.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { +// Do executes the "compute.regionCommitments.get" call. +// Exactly one of *Commitment or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Commitment.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67533,7 +81580,7 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceGroupHealth{ + ret := &Commitment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -67545,42 +81592,40 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Gets the most recent health check results for this regional BackendService.", - // "httpMethod": "POST", - // "id": "compute.regionBackendServices.getHealth", + // "description": "Returns the specified commitment resource. Get a list of available commitments by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.regionCommitments.get", // "parameterOrder": [ // "project", // "region", - // "backendService" + // "commitment" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the queried instance belongs.", + // "commitment": { + // "description": "Name of the commitment to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "project": { + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}/getHealth", - // "request": { - // "$ref": "ResourceGroupReference" - // }, + // "path": "{project}/regions/{region}/commitments/{commitment}", // "response": { - // "$ref": "BackendServiceGroupHealth" + // "$ref": "Commitment" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -67591,28 +81636,25 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.regionBackendServices.insert": +// method id "compute.regionCommitments.insert": -type RegionBackendServicesInsertCall struct { - s *Service - project string - region string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionCommitmentsInsertCall struct { + s *Service + project string + region string + commitment *Commitment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a regional BackendService resource in the specified -// project using the data included in the request. There are several -// restrictions and guidelines to keep in mind when creating a regional -// backend service. Read Restrictions and Guidelines for more -// information. -func (r *RegionBackendServicesService) Insert(project string, region string, backendservice *BackendService) *RegionBackendServicesInsertCall { - c := &RegionBackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a commitment in the specified project using the data +// included in the request. +func (r *RegionCommitmentsService) Insert(project string, region string, commitment *Commitment) *RegionCommitmentsInsertCall { + c := &RegionCommitmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendservice = backendservice + c.commitment = commitment return c } @@ -67630,7 +81672,7 @@ func (r *RegionBackendServicesService) Insert(project string, region string, bac // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesInsertCall) RequestId(requestId string) *RegionBackendServicesInsertCall { +func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitmentsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -67638,7 +81680,7 @@ func (c *RegionBackendServicesInsertCall) RequestId(requestId string) *RegionBac // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesInsertCall) Fields(s ...googleapi.Field) *RegionBackendServicesInsertCall { +func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommitmentsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67646,34 +81688,34 @@ func (c *RegionBackendServicesInsertCall) Fields(s ...googleapi.Field) *RegionBa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesInsertCall) Context(ctx context.Context) *RegionBackendServicesInsertCall { +func (c *RegionCommitmentsInsertCall) Context(ctx context.Context) *RegionCommitmentsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesInsertCall) Header() http.Header { +func (c *RegionCommitmentsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -67684,14 +81726,14 @@ func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.insert" call. +// Do executes the "compute.regionCommitments.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -67722,9 +81764,9 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Restrictions and Guidelines for more information.", + // "description": "Creates a commitment in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.regionBackendServices.insert", + // "id": "compute.regionCommitments.insert", // "parameterOrder": [ // "project", // "region" @@ -67738,7 +81780,7 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -67750,9 +81792,9 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices", + // "path": "{project}/regions/{region}/commitments", // "request": { - // "$ref": "BackendService" + // "$ref": "Commitment" // }, // "response": { // "$ref": "Operation" @@ -67765,9 +81807,9 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.regionBackendServices.list": +// method id "compute.regionCommitments.list": -type RegionBackendServicesListCall struct { +type RegionCommitmentsListCall struct { s *Service project string region string @@ -67777,10 +81819,10 @@ type RegionBackendServicesListCall struct { header_ http.Header } -// List: Retrieves the list of regional BackendService resources -// available to the specified project in the given region. -func (r *RegionBackendServicesService) List(project string, region string) *RegionBackendServicesListCall { - c := &RegionBackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of commitments contained within the specified +// region. +func (r *RegionCommitmentsService) List(project string, region string) *RegionCommitmentsListCall { + c := &RegionCommitmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -67812,7 +81854,7 @@ func (r *RegionBackendServicesService) List(project string, region string) *Regi // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServicesListCall { +func (c *RegionCommitmentsListCall) Filter(filter string) *RegionCommitmentsListCall { c.urlParams_.Set("filter", filter) return c } @@ -67823,7 +81865,7 @@ func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServ // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionBackendServicesListCall) MaxResults(maxResults int64) *RegionBackendServicesListCall { +func (c *RegionCommitmentsListCall) MaxResults(maxResults int64) *RegionCommitmentsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -67840,7 +81882,7 @@ func (c *RegionBackendServicesListCall) MaxResults(maxResults int64) *RegionBack // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionBackendServicesListCall) OrderBy(orderBy string) *RegionBackendServicesListCall { +func (c *RegionCommitmentsListCall) OrderBy(orderBy string) *RegionCommitmentsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -67848,7 +81890,7 @@ func (c *RegionBackendServicesListCall) OrderBy(orderBy string) *RegionBackendSe // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBackendServicesListCall { +func (c *RegionCommitmentsListCall) PageToken(pageToken string) *RegionCommitmentsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -67856,7 +81898,7 @@ func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBacke // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesListCall) Fields(s ...googleapi.Field) *RegionBackendServicesListCall { +func (c *RegionCommitmentsListCall) Fields(s ...googleapi.Field) *RegionCommitmentsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -67866,7 +81908,7 @@ func (c *RegionBackendServicesListCall) Fields(s ...googleapi.Field) *RegionBack // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionBackendServicesListCall) IfNoneMatch(entityTag string) *RegionBackendServicesListCall { +func (c *RegionCommitmentsListCall) IfNoneMatch(entityTag string) *RegionCommitmentsListCall { c.ifNoneMatch_ = entityTag return c } @@ -67874,265 +81916,50 @@ func (c *RegionBackendServicesListCall) IfNoneMatch(entityTag string) *RegionBac // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesListCall) Context(ctx context.Context) *RegionBackendServicesListCall { +func (c *RegionCommitmentsListCall) Context(ctx context.Context) *RegionCommitmentsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesListCall) Header() http.Header { +func (c *RegionCommitmentsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionBackendServices.list" call. -// Exactly one of *BackendServiceList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BackendServiceList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BackendServiceList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", - // "httpMethod": "GET", - // "id": "compute.regionBackendServices.list", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/backendServices", - // "response": { - // "$ref": "BackendServiceList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionBackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionBackendServices.patch": - -type RegionBackendServicesPatchCall struct { - s *Service - project string - region string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates the specified regional BackendService resource with -// the data included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. This method -// supports PATCH semantics and uses the JSON merge patch format and -// processing rules. -func (r *RegionBackendServicesService) Patch(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesPatchCall { - c := &RegionBackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.backendService = backendService - c.backendservice = backendservice - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesPatchCall) RequestId(requestId string) *RegionBackendServicesPatchCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionBackendServicesPatchCall) Fields(s ...googleapi.Field) *RegionBackendServicesPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionBackendServicesPatchCall) Context(ctx context.Context) *RegionBackendServicesPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionBackendServicesPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) - if err != nil { - return nil, err + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.regionCommitments.list" call. +// Exactly one of *CommitmentList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *CommitmentList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*CommitmentList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68151,7 +81978,7 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &CommitmentList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -68163,20 +81990,35 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.regionBackendServices.patch", + // "description": "Retrieves a list of commitments contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.regionCommitments.list", // "parameterOrder": [ // "project", - // "region", - // "backendService" + // "region" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -68187,24 +82029,16 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}", - // "request": { - // "$ref": "BackendService" - // }, + // "path": "{project}/regions/{region}/commitments", // "response": { - // "$ref": "Operation" + // "$ref": "CommitmentList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -68215,9 +82049,30 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.regionBackendServices.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionCommitmentsListCall) Pages(ctx context.Context, f func(*CommitmentList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionBackendServicesTestIamPermissionsCall struct { +// method id "compute.regionCommitments.testIamPermissions": + +type RegionCommitmentsTestIamPermissionsCall struct { s *Service project string region string @@ -68230,8 +82085,8 @@ type RegionBackendServicesTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *RegionBackendServicesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionBackendServicesTestIamPermissionsCall { - c := &RegionBackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionCommitmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionCommitmentsTestIamPermissionsCall { + c := &RegionCommitmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.resource = resource @@ -68242,7 +82097,7 @@ func (r *RegionBackendServicesService) TestIamPermissions(project string, region // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionBackendServicesTestIamPermissionsCall { +func (c *RegionCommitmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionCommitmentsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68250,21 +82105,21 @@ func (c *RegionBackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Fiel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesTestIamPermissionsCall) Context(ctx context.Context) *RegionBackendServicesTestIamPermissionsCall { +func (c *RegionCommitmentsTestIamPermissionsCall) Context(ctx context.Context) *RegionCommitmentsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesTestIamPermissionsCall) Header() http.Header { +func (c *RegionCommitmentsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -68277,7 +82132,7 @@ func (c *RegionBackendServicesTestIamPermissionsCall) doRequest(alt string) (*ht } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -68289,14 +82144,14 @@ func (c *RegionBackendServicesTestIamPermissionsCall) doRequest(alt string) (*ht return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.testIamPermissions" call. +// Do executes the "compute.regionCommitments.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *RegionCommitmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68329,7 +82184,7 @@ func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallO // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.regionBackendServices.testIamPermissions", + // "id": "compute.regionCommitments.testIamPermissions", // "parameterOrder": [ // "project", // "region", @@ -68358,7 +82213,7 @@ func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallO // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{resource}/testIamPermissions", + // "path": "{project}/regions/{region}/commitments/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -68374,109 +82229,95 @@ func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallO } -// method id "compute.regionBackendServices.update": +// method id "compute.regionDiskTypes.get": -type RegionBackendServicesUpdateCall struct { - s *Service - project string - region string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDiskTypesGetCall struct { + s *Service + project string + region string + diskType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified regional BackendService resource with -// the data included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. -func (r *RegionBackendServicesService) Update(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesUpdateCall { - c := &RegionBackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified regional disk type. Get a list of +// available disk types by making a list() request. +func (r *RegionDiskTypesService) Get(project string, region string, diskType string) *RegionDiskTypesGetCall { + c := &RegionDiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendService = backendService - c.backendservice = backendservice - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesUpdateCall) RequestId(requestId string) *RegionBackendServicesUpdateCall { - c.urlParams_.Set("requestId", requestId) + c.diskType = diskType return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesUpdateCall) Fields(s ...googleapi.Field) *RegionBackendServicesUpdateCall { +func (c *RegionDiskTypesGetCall) Fields(s ...googleapi.Field) *RegionDiskTypesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionDiskTypesGetCall) IfNoneMatch(entityTag string) *RegionDiskTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesUpdateCall) Context(ctx context.Context) *RegionBackendServicesUpdateCall { +func (c *RegionDiskTypesGetCall) Context(ctx context.Context) *RegionDiskTypesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesUpdateCall) Header() http.Header { +func (c *RegionDiskTypesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDiskTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/diskTypes/{diskType}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, + "region": c.region, + "diskType": c.diskType, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.regionDiskTypes.get" call. +// Exactly one of *DiskType or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskType.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68495,7 +82336,7 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &DiskType{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -68507,17 +82348,17 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", - // "httpMethod": "PUT", - // "id": "compute.regionBackendServices.update", + // "description": "Returns the specified regional disk type. Get a list of available disk types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.regionDiskTypes.get", // "parameterOrder": [ // "project", // "region", - // "backendService" + // "diskType" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to update.", + // "diskType": { + // "description": "Name of the disk type to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -68531,48 +82372,44 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}", - // "request": { - // "$ref": "BackendService" - // }, + // "path": "{project}/regions/{region}/diskTypes/{diskType}", // "response": { - // "$ref": "Operation" + // "$ref": "DiskType" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionCommitments.aggregatedList": +// method id "compute.regionDiskTypes.list": -type RegionCommitmentsAggregatedListCall struct { +type RegionDiskTypesListCall struct { s *Service project string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of commitments. -func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitmentsAggregatedListCall { - c := &RegionCommitmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of regional disk types available to the +// specified project. +func (r *RegionDiskTypesService) List(project string, region string) *RegionDiskTypesListCall { + c := &RegionDiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region return c } @@ -68602,7 +82439,7 @@ func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitm // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommitmentsAggregatedListCall { +func (c *RegionDiskTypesListCall) Filter(filter string) *RegionDiskTypesListCall { c.urlParams_.Set("filter", filter) return c } @@ -68613,7 +82450,7 @@ func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommi // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionCommitmentsAggregatedListCall) MaxResults(maxResults int64) *RegionCommitmentsAggregatedListCall { +func (c *RegionDiskTypesListCall) MaxResults(maxResults int64) *RegionDiskTypesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -68630,7 +82467,7 @@ func (c *RegionCommitmentsAggregatedListCall) MaxResults(maxResults int64) *Regi // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionCommitmentsAggregatedListCall) OrderBy(orderBy string) *RegionCommitmentsAggregatedListCall { +func (c *RegionDiskTypesListCall) OrderBy(orderBy string) *RegionDiskTypesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -68638,7 +82475,7 @@ func (c *RegionCommitmentsAggregatedListCall) OrderBy(orderBy string) *RegionCom // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *RegionCommitmentsAggregatedListCall { +func (c *RegionDiskTypesListCall) PageToken(pageToken string) *RegionDiskTypesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -68646,7 +82483,7 @@ func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *Regio // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionCommitmentsAggregatedListCall) Fields(s ...googleapi.Field) *RegionCommitmentsAggregatedListCall { +func (c *RegionDiskTypesListCall) Fields(s ...googleapi.Field) *RegionDiskTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68656,7 +82493,7 @@ func (c *RegionCommitmentsAggregatedListCall) Fields(s ...googleapi.Field) *Regi // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionCommitmentsAggregatedListCall) IfNoneMatch(entityTag string) *RegionCommitmentsAggregatedListCall { +func (c *RegionDiskTypesListCall) IfNoneMatch(entityTag string) *RegionDiskTypesListCall { c.ifNoneMatch_ = entityTag return c } @@ -68664,21 +82501,21 @@ func (c *RegionCommitmentsAggregatedListCall) IfNoneMatch(entityTag string) *Reg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionCommitmentsAggregatedListCall) Context(ctx context.Context) *RegionCommitmentsAggregatedListCall { +func (c *RegionDiskTypesListCall) Context(ctx context.Context) *RegionDiskTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionCommitmentsAggregatedListCall) Header() http.Header { +func (c *RegionDiskTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -68689,24 +82526,25 @@ func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Respo } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/commitments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/diskTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionCommitments.aggregatedList" call. -// Exactly one of *CommitmentAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *CommitmentAggregatedList.ServerResponse.Header or (if a response was +// Do executes the "compute.regionDiskTypes.list" call. +// Exactly one of *RegionDiskTypeList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RegionDiskTypeList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*CommitmentAggregatedList, error) { +func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68725,7 +82563,7 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &CommitmentAggregatedList{ + ret := &RegionDiskTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -68737,11 +82575,12 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves an aggregated list of commitments.", + // "description": "Retrieves a list of regional disk types available to the specified project.", // "httpMethod": "GET", - // "id": "compute.regionCommitments.aggregatedList", + // "id": "compute.regionDiskTypes.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "filter": { @@ -68773,11 +82612,18 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/commitments", + // "path": "{project}/regions/{region}/diskTypes", // "response": { - // "$ref": "CommitmentAggregatedList" + // "$ref": "RegionDiskTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -68791,7 +82637,7 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionCommitmentsAggregatedListCall) Pages(ctx context.Context, f func(*CommitmentAggregatedList) error) error { +func (c *RegionDiskTypesListCall) Pages(ctx context.Context, f func(*RegionDiskTypeList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -68809,188 +82655,32 @@ func (c *RegionCommitmentsAggregatedListCall) Pages(ctx context.Context, f func( } } -// method id "compute.regionCommitments.get": - -type RegionCommitmentsGetCall struct { - s *Service - project string - region string - commitment string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified commitment resource. Get a list of -// available commitments by making a list() request. -func (r *RegionCommitmentsService) Get(project string, region string, commitment string) *RegionCommitmentsGetCall { - c := &RegionCommitmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.commitment = commitment - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionCommitmentsGetCall) Fields(s ...googleapi.Field) *RegionCommitmentsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionCommitmentsGetCall) IfNoneMatch(entityTag string) *RegionCommitmentsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionCommitmentsGetCall) Context(ctx context.Context) *RegionCommitmentsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionCommitmentsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments/{commitment}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "commitment": c.commitment, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionCommitments.get" call. -// Exactly one of *Commitment or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Commitment.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Commitment{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified commitment resource. Get a list of available commitments by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.regionCommitments.get", - // "parameterOrder": [ - // "project", - // "region", - // "commitment" - // ], - // "parameters": { - // "commitment": { - // "description": "Name of the commitment to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/commitments/{commitment}", - // "response": { - // "$ref": "Commitment" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionCommitments.insert": +// method id "compute.regionDisks.createSnapshot": -type RegionCommitmentsInsertCall struct { +type RegionDisksCreateSnapshotCall struct { s *Service project string region string - commitment *Commitment + disk string + snapshot *Snapshot urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a commitment in the specified project using the data -// included in the request. -func (r *RegionCommitmentsService) Insert(project string, region string, commitment *Commitment) *RegionCommitmentsInsertCall { - c := &RegionCommitmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// CreateSnapshot: Creates a snapshot of this regional disk. +func (r *RegionDisksService) CreateSnapshot(project string, region string, disk string, snapshot *Snapshot) *RegionDisksCreateSnapshotCall { + c := &RegionDisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.commitment = commitment + c.disk = disk + c.snapshot = snapshot + return c +} + +// GuestFlush sets the optional parameter "guestFlush": +func (c *RegionDisksCreateSnapshotCall) GuestFlush(guestFlush bool) *RegionDisksCreateSnapshotCall { + c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) return c } @@ -69008,7 +82698,7 @@ func (r *RegionCommitmentsService) Insert(project string, region string, commitm // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitmentsInsertCall { +func (c *RegionDisksCreateSnapshotCall) RequestId(requestId string) *RegionDisksCreateSnapshotCall { c.urlParams_.Set("requestId", requestId) return c } @@ -69016,7 +82706,7 @@ func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitm // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommitmentsInsertCall { +func (c *RegionDisksCreateSnapshotCall) Fields(s ...googleapi.Field) *RegionDisksCreateSnapshotCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -69024,52 +82714,53 @@ func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommit // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionCommitmentsInsertCall) Context(ctx context.Context) *RegionCommitmentsInsertCall { +func (c *RegionDisksCreateSnapshotCall) Context(ctx context.Context) *RegionDisksCreateSnapshotCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionCommitmentsInsertCall) Header() http.Header { +func (c *RegionDisksCreateSnapshotCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitment) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/createSnapshot") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionCommitments.insert" call. +// Do executes the "compute.regionDisks.createSnapshot" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -69100,14 +82791,26 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a commitment in the specified project using the data included in the request.", + // "description": "Creates a snapshot of this regional disk.", // "httpMethod": "POST", - // "id": "compute.regionCommitments.insert", + // "id": "compute.regionDisks.createSnapshot", // "parameterOrder": [ // "project", - // "region" + // "region", + // "disk" // ], // "parameters": { + // "disk": { + // "description": "Name of the regional persistent disk to snapshot.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "guestFlush": { + // "location": "query", + // "type": "boolean" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -69128,9 +82831,9 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "{project}/regions/{region}/commitments", + // "path": "{project}/regions/{region}/disks/{disk}/createSnapshot", // "request": { - // "$ref": "Commitment" + // "$ref": "Snapshot" // }, // "response": { // "$ref": "Operation" @@ -69143,159 +82846,102 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.regionCommitments.list": +// method id "compute.regionDisks.delete": -type RegionCommitmentsListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionDisksDeleteCall struct { + s *Service + project string + region string + disk string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of commitments contained within the specified -// region. -func (r *RegionCommitmentsService) List(project string, region string) *RegionCommitmentsListCall { - c := &RegionCommitmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified regional persistent disk. Deleting a +// regional disk removes all the replicas of its data permanently and is +// irreversible. However, deleting a disk does not delete any snapshots +// previously made from the disk. You must separately delete snapshots. +func (r *RegionDisksService) Delete(project string, region string, disk string) *RegionDisksDeleteCall { + c := &RegionDisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region + c.disk = disk return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionCommitmentsListCall) Filter(filter string) *RegionCommitmentsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionCommitmentsListCall) MaxResults(maxResults int64) *RegionCommitmentsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionCommitmentsListCall) OrderBy(orderBy string) *RegionCommitmentsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionCommitmentsListCall) PageToken(pageToken string) *RegionCommitmentsListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionDisksDeleteCall) RequestId(requestId string) *RegionDisksDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionCommitmentsListCall) Fields(s ...googleapi.Field) *RegionCommitmentsListCall { +func (c *RegionDisksDeleteCall) Fields(s ...googleapi.Field) *RegionDisksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionCommitmentsListCall) IfNoneMatch(entityTag string) *RegionCommitmentsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionCommitmentsListCall) Context(ctx context.Context) *RegionCommitmentsListCall { +func (c *RegionDisksDeleteCall) Context(ctx context.Context) *RegionDisksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionCommitmentsListCall) Header() http.Header { +func (c *RegionDisksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionCommitments.list" call. -// Exactly one of *CommitmentList or error will be non-nil. Any non-2xx +// Do executes the "compute.regionDisks.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *CommitmentList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*CommitmentList, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -69314,7 +82960,7 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &CommitmentList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -69326,35 +82972,19 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen } return ret, nil // { - // "description": "Retrieves a list of commitments contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.regionCommitments.list", + // "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", + // "httpMethod": "DELETE", + // "id": "compute.regionDisks.delete", // "parameterOrder": [ // "project", - // "region" + // "region", + // "disk" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "disk": { + // "description": "Name of the regional persistent disk to delete.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -69370,124 +83000,113 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/commitments", + // "path": "{project}/regions/{region}/disks/{disk}", // "response": { - // "$ref": "CommitmentList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionCommitmentsListCall) Pages(ctx context.Context, f func(*CommitmentList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionCommitments.testIamPermissions": +// method id "compute.regionDisks.get": -type RegionCommitmentsTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksGetCall struct { + s *Service + project string + region string + disk string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionCommitmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionCommitmentsTestIamPermissionsCall { - c := &RegionCommitmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns a specified regional persistent disk. +func (r *RegionDisksService) Get(project string, region string, disk string) *RegionDisksGetCall { + c := &RegionDisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.disk = disk return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionCommitmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionCommitmentsTestIamPermissionsCall { +func (c *RegionDisksGetCall) Fields(s ...googleapi.Field) *RegionDisksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionDisksGetCall) IfNoneMatch(entityTag string) *RegionDisksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionCommitmentsTestIamPermissionsCall) Context(ctx context.Context) *RegionCommitmentsTestIamPermissionsCall { +func (c *RegionDisksGetCall) Context(ctx context.Context) *RegionDisksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionCommitmentsTestIamPermissionsCall) Header() http.Header { +func (c *RegionDisksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionCommitmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionCommitments.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionCommitmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.regionDisks.get" call. +// Exactly one of *Disk or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Disk.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -69506,7 +83125,7 @@ func (c *RegionCommitmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Disk{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -69518,43 +83137,40 @@ func (c *RegionCommitmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionCommitments.testIamPermissions", + // "description": "Returns a specified regional persistent disk.", + // "httpMethod": "GET", + // "id": "compute.regionDisks.get", // "parameterOrder": [ // "project", // "region", - // "resource" + // "disk" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "disk": { + // "description": "Name of the regional persistent disk to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/commitments/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/disks/{disk}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Disk" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -69565,95 +83181,111 @@ func (c *RegionCommitmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } -// method id "compute.regionDiskTypes.get": +// method id "compute.regionDisks.insert": -type RegionDiskTypesGetCall struct { - s *Service - project string - region string - diskType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionDisksInsertCall struct { + s *Service + project string + region string + disk *Disk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified regional disk type. Get a list of -// available disk types by making a list() request. -func (r *RegionDiskTypesService) Get(project string, region string, diskType string) *RegionDiskTypesGetCall { - c := &RegionDiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a persistent regional disk in the specified project +// using the data included in the request. +func (r *RegionDisksService) Insert(project string, region string, disk *Disk) *RegionDisksInsertCall { + c := &RegionDisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.diskType = diskType + c.disk = disk + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionDisksInsertCall) RequestId(requestId string) *RegionDisksInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// SourceImage sets the optional parameter "sourceImage": Source image +// to restore onto a disk. +func (c *RegionDisksInsertCall) SourceImage(sourceImage string) *RegionDisksInsertCall { + c.urlParams_.Set("sourceImage", sourceImage) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDiskTypesGetCall) Fields(s ...googleapi.Field) *RegionDiskTypesGetCall { +func (c *RegionDisksInsertCall) Fields(s ...googleapi.Field) *RegionDisksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionDiskTypesGetCall) IfNoneMatch(entityTag string) *RegionDiskTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDiskTypesGetCall) Context(ctx context.Context) *RegionDiskTypesGetCall { +func (c *RegionDisksInsertCall) Context(ctx context.Context) *RegionDisksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDiskTypesGetCall) Header() http.Header { +func (c *RegionDisksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDiskTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/diskTypes/{diskType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "diskType": c.diskType, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDiskTypes.get" call. -// Exactly one of *DiskType or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskType.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.regionDisks.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { +func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -69672,7 +83304,7 @@ func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -69684,22 +83316,14 @@ func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, er } return ret, nil // { - // "description": "Returns the specified regional disk type. Get a list of available disk types by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.regionDiskTypes.get", + // "description": "Creates a persistent regional disk in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.regionDisks.insert", // "parameterOrder": [ // "project", - // "region", - // "diskType" + // "region" // ], // "parameters": { - // "diskType": { - // "description": "Name of the disk type to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -69708,29 +83332,41 @@ func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, er // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sourceImage": { + // "description": "Optional. Source image to restore onto a disk.", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/diskTypes/{diskType}", + // "path": "{project}/regions/{region}/disks", + // "request": { + // "$ref": "Disk" + // }, // "response": { - // "$ref": "DiskType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionDiskTypes.list": +// method id "compute.regionDisks.list": -type RegionDiskTypesListCall struct { +type RegionDisksListCall struct { s *Service project string region string @@ -69740,10 +83376,10 @@ type RegionDiskTypesListCall struct { header_ http.Header } -// List: Retrieves a list of regional disk types available to the -// specified project. -func (r *RegionDiskTypesService) List(project string, region string) *RegionDiskTypesListCall { - c := &RegionDiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of persistent disks contained within the +// specified region. +func (r *RegionDisksService) List(project string, region string) *RegionDisksListCall { + c := &RegionDisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -69775,7 +83411,7 @@ func (r *RegionDiskTypesService) List(project string, region string) *RegionDisk // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *RegionDiskTypesListCall) Filter(filter string) *RegionDiskTypesListCall { +func (c *RegionDisksListCall) Filter(filter string) *RegionDisksListCall { c.urlParams_.Set("filter", filter) return c } @@ -69786,7 +83422,7 @@ func (c *RegionDiskTypesListCall) Filter(filter string) *RegionDiskTypesListCall // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionDiskTypesListCall) MaxResults(maxResults int64) *RegionDiskTypesListCall { +func (c *RegionDisksListCall) MaxResults(maxResults int64) *RegionDisksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -69803,7 +83439,7 @@ func (c *RegionDiskTypesListCall) MaxResults(maxResults int64) *RegionDiskTypesL // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionDiskTypesListCall) OrderBy(orderBy string) *RegionDiskTypesListCall { +func (c *RegionDisksListCall) OrderBy(orderBy string) *RegionDisksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -69811,7 +83447,7 @@ func (c *RegionDiskTypesListCall) OrderBy(orderBy string) *RegionDiskTypesListCa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionDiskTypesListCall) PageToken(pageToken string) *RegionDiskTypesListCall { +func (c *RegionDisksListCall) PageToken(pageToken string) *RegionDisksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -69819,7 +83455,7 @@ func (c *RegionDiskTypesListCall) PageToken(pageToken string) *RegionDiskTypesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDiskTypesListCall) Fields(s ...googleapi.Field) *RegionDiskTypesListCall { +func (c *RegionDisksListCall) Fields(s ...googleapi.Field) *RegionDisksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -69829,7 +83465,7 @@ func (c *RegionDiskTypesListCall) Fields(s ...googleapi.Field) *RegionDiskTypesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionDiskTypesListCall) IfNoneMatch(entityTag string) *RegionDiskTypesListCall { +func (c *RegionDisksListCall) IfNoneMatch(entityTag string) *RegionDisksListCall { c.ifNoneMatch_ = entityTag return c } @@ -69837,21 +83473,21 @@ func (c *RegionDiskTypesListCall) IfNoneMatch(entityTag string) *RegionDiskTypes // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDiskTypesListCall) Context(ctx context.Context) *RegionDiskTypesListCall { +func (c *RegionDisksListCall) Context(ctx context.Context) *RegionDisksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDiskTypesListCall) Header() http.Header { +func (c *RegionDisksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -69862,7 +83498,7 @@ func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/diskTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -69873,14 +83509,14 @@ func (c *RegionDiskTypesListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDiskTypes.list" call. -// Exactly one of *RegionDiskTypeList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RegionDiskTypeList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskTypeList, error) { +// Do executes the "compute.regionDisks.list" call. +// Exactly one of *DiskList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -69899,7 +83535,7 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionDiskTypeList{ + ret := &DiskList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -69911,9 +83547,9 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT } return ret, nil // { - // "description": "Retrieves a list of regional disk types available to the specified project.", + // "description": "Retrieves the list of persistent disks contained within the specified region.", // "httpMethod": "GET", - // "id": "compute.regionDiskTypes.list", + // "id": "compute.regionDisks.list", // "parameterOrder": [ // "project", // "region" @@ -69950,16 +83586,16 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/diskTypes", + // "path": "{project}/regions/{region}/disks", // "response": { - // "$ref": "RegionDiskTypeList" + // "$ref": "DiskList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -69973,7 +83609,7 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionDiskTypesListCall) Pages(ctx context.Context, f func(*RegionDiskTypeList) error) error { +func (c *RegionDisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -69991,32 +83627,26 @@ func (c *RegionDiskTypesListCall) Pages(ctx context.Context, f func(*RegionDiskT } } -// method id "compute.regionDisks.createSnapshot": +// method id "compute.regionDisks.resize": -type RegionDisksCreateSnapshotCall struct { - s *Service - project string - region string - disk string - snapshot *Snapshot - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksResizeCall struct { + s *Service + project string + region string + disk string + regiondisksresizerequest *RegionDisksResizeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// CreateSnapshot: Creates a snapshot of this regional disk. -func (r *RegionDisksService) CreateSnapshot(project string, region string, disk string, snapshot *Snapshot) *RegionDisksCreateSnapshotCall { - c := &RegionDisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Resizes the specified regional persistent disk. +func (r *RegionDisksService) Resize(project string, region string, disk string, regiondisksresizerequest *RegionDisksResizeRequest) *RegionDisksResizeCall { + c := &RegionDisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.disk = disk - c.snapshot = snapshot - return c -} - -// GuestFlush sets the optional parameter "guestFlush": -func (c *RegionDisksCreateSnapshotCall) GuestFlush(guestFlush bool) *RegionDisksCreateSnapshotCall { - c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) + c.regiondisksresizerequest = regiondisksresizerequest return c } @@ -70034,7 +83664,7 @@ func (c *RegionDisksCreateSnapshotCall) GuestFlush(guestFlush bool) *RegionDisks // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionDisksCreateSnapshotCall) RequestId(requestId string) *RegionDisksCreateSnapshotCall { +func (c *RegionDisksResizeCall) RequestId(requestId string) *RegionDisksResizeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -70042,7 +83672,7 @@ func (c *RegionDisksCreateSnapshotCall) RequestId(requestId string) *RegionDisks // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksCreateSnapshotCall) Fields(s ...googleapi.Field) *RegionDisksCreateSnapshotCall { +func (c *RegionDisksResizeCall) Fields(s ...googleapi.Field) *RegionDisksResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70050,34 +83680,34 @@ func (c *RegionDisksCreateSnapshotCall) Fields(s ...googleapi.Field) *RegionDisk // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksCreateSnapshotCall) Context(ctx context.Context) *RegionDisksCreateSnapshotCall { +func (c *RegionDisksResizeCall) Context(ctx context.Context) *RegionDisksResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksCreateSnapshotCall) Header() http.Header { +func (c *RegionDisksResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksresizerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/createSnapshot") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/resize") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -70089,14 +83719,14 @@ func (c *RegionDisksCreateSnapshotCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.createSnapshot" call. +// Do executes the "compute.regionDisks.resize" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70127,9 +83757,9 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Creates a snapshot of this regional disk.", + // "description": "Resizes the specified regional persistent disk.", // "httpMethod": "POST", - // "id": "compute.regionDisks.createSnapshot", + // "id": "compute.regionDisks.resize", // "parameterOrder": [ // "project", // "region", @@ -70137,18 +83767,14 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera // ], // "parameters": { // "disk": { - // "description": "Name of the regional persistent disk to snapshot.", + // "description": "Name of the regional persistent disk.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "guestFlush": { - // "location": "query", - // "type": "boolean" - // }, // "project": { - // "description": "Project ID for this request.", + // "description": "The project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, @@ -70167,9 +83793,9 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{disk}/createSnapshot", + // "path": "{project}/regions/{region}/disks/{disk}/resize", // "request": { - // "$ref": "Snapshot" + // "$ref": "RegionDisksResizeRequest" // }, // "response": { // "$ref": "Operation" @@ -70182,27 +83808,26 @@ func (c *RegionDisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Opera } -// method id "compute.regionDisks.delete": +// method id "compute.regionDisks.setLabels": -type RegionDisksDeleteCall struct { - s *Service - project string - region string - disk string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified regional persistent disk. Deleting a -// regional disk removes all the replicas of its data permanently and is -// irreversible. However, deleting a disk does not delete any snapshots -// previously made from the disk. You must separately delete snapshots. -func (r *RegionDisksService) Delete(project string, region string, disk string) *RegionDisksDeleteCall { - c := &RegionDisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on the target regional disk. +func (r *RegionDisksService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *RegionDisksSetLabelsCall { + c := &RegionDisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest return c } @@ -70220,7 +83845,7 @@ func (r *RegionDisksService) Delete(project string, region string, disk string) // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionDisksDeleteCall) RequestId(requestId string) *RegionDisksDeleteCall { +func (c *RegionDisksSetLabelsCall) RequestId(requestId string) *RegionDisksSetLabelsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -70228,7 +83853,7 @@ func (c *RegionDisksDeleteCall) RequestId(requestId string) *RegionDisksDeleteCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksDeleteCall) Fields(s ...googleapi.Field) *RegionDisksDeleteCall { +func (c *RegionDisksSetLabelsCall) Fields(s ...googleapi.Field) *RegionDisksSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70236,48 +83861,53 @@ func (c *RegionDisksDeleteCall) Fields(s ...googleapi.Field) *RegionDisksDeleteC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksDeleteCall) Context(ctx context.Context) *RegionDisksDeleteCall { +func (c *RegionDisksSetLabelsCall) Context(ctx context.Context) *RegionDisksSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksDeleteCall) Header() http.Header { +func (c *RegionDisksSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "disk": c.disk, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.delete" call. +// Do executes the "compute.regionDisks.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70308,21 +83938,15 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified regional persistent disk. Deleting a regional disk removes all the replicas of its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", - // "httpMethod": "DELETE", - // "id": "compute.regionDisks.delete", + // "description": "Sets the labels on the target regional disk.", + // "httpMethod": "POST", + // "id": "compute.regionDisks.setLabels", // "parameterOrder": [ // "project", // "region", - // "disk" + // "resource" // ], // "parameters": { - // "disk": { - // "description": "Name of the regional persistent disk to delete.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -70331,7 +83955,7 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "The region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -70341,9 +83965,19 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{disk}", + // "path": "{project}/regions/{region}/disks/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -70355,94 +83989,88 @@ func (c *RegionDisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.regionDisks.get": +// method id "compute.regionDisks.testIamPermissions": -type RegionDisksGetCall struct { - s *Service - project string - region string - disk string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionDisksTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns a specified regional persistent disk. -func (r *RegionDisksService) Get(project string, region string, disk string) *RegionDisksGetCall { - c := &RegionDisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionDisksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionDisksTestIamPermissionsCall { + c := &RegionDisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksGetCall) Fields(s ...googleapi.Field) *RegionDisksGetCall { +func (c *RegionDisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionDisksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionDisksGetCall) IfNoneMatch(entityTag string) *RegionDisksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksGetCall) Context(ctx context.Context) *RegionDisksGetCall { +func (c *RegionDisksTestIamPermissionsCall) Context(ctx context.Context) *RegionDisksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksGetCall) Header() http.Header { +func (c *RegionDisksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "disk": c.disk, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.get" call. -// Exactly one of *Disk or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Disk.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { +// Do executes the "compute.regionDisks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70461,7 +84089,7 @@ func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Disk{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -70473,22 +84101,15 @@ func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } return ret, nil // { - // "description": "Returns a specified regional persistent disk.", - // "httpMethod": "GET", - // "id": "compute.regionDisks.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.regionDisks.testIamPermissions", // "parameterOrder": [ // "project", // "region", - // "disk" + // "resource" // ], // "parameters": { - // "disk": { - // "description": "Name of the regional persistent disk to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -70497,16 +84118,26 @@ func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{disk}", + // "path": "{project}/regions/{region}/disks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Disk" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -70517,25 +84148,42 @@ func (c *RegionDisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } -// method id "compute.regionDisks.insert": +// method id "compute.regionInstanceGroupManagers.abandonInstances": -type RegionDisksInsertCall struct { - s *Service - project string - region string - disk *Disk - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersAbandonInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a persistent regional disk in the specified project -// using the data included in the request. -func (r *RegionDisksService) Insert(project string, region string, disk *Disk) *RegionDisksInsertCall { - c := &RegionDisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AbandonInstances: Schedules a group action to remove the specified +// instances from the managed instance group. Abandoning an instance +// does not delete the instance, but it does remove the instance from +// any target pools that are applied by the managed instance group. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you abandon. This operation is marked as +// DONE when the action is scheduled even if the instances have not yet +// been removed from the group. You must separately verify the status of +// the abandoning action with the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest) *RegionInstanceGroupManagersAbandonInstancesCall { + c := &RegionInstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersabandoninstancesrequest = regioninstancegroupmanagersabandoninstancesrequest return c } @@ -70545,27 +84193,23 @@ func (r *RegionDisksService) Insert(project string, region string, disk *Disk) * // request if it has already been completed. // // For example, consider a situation where you make an initial request -// and then the request times out. If you make the request again with -// the same request ID, the server can check if original operation with -// the same request ID was received, and if so, will ignore the second +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second // request. This prevents clients from accidentally creating duplicate // commitments. -func (c *RegionDisksInsertCall) RequestId(requestId string) *RegionDisksInsertCall { +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersAbandonInstancesCall { c.urlParams_.Set("requestId", requestId) return c } -// SourceImage sets the optional parameter "sourceImage": Source image -// to restore onto a disk. -func (c *RegionDisksInsertCall) SourceImage(sourceImage string) *RegionDisksInsertCall { - c.urlParams_.Set("sourceImage", sourceImage) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksInsertCall) Fields(s ...googleapi.Field) *RegionDisksInsertCall { +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersAbandonInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -70573,52 +84217,53 @@ func (c *RegionDisksInsertCall) Fields(s ...googleapi.Field) *RegionDisksInsertC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksInsertCall) Context(ctx context.Context) *RegionDisksInsertCall { +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersAbandonInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksInsertCall) Header() http.Header { +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersabandoninstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.insert" call. +// Do executes the "compute.regionInstanceGroupManagers.abandonInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70649,14 +84294,21 @@ func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates a persistent regional disk in the specified project using the data included in the request.", + // "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", // "httpMethod": "POST", - // "id": "compute.regionDisks.insert", + // "id": "compute.regionInstanceGroupManagers.abandonInstances", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -70665,26 +84317,20 @@ func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and then the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.", - // "location": "query", - // "type": "string" - // }, - // "sourceImage": { - // "description": "Optional. Source image to restore onto a disk.", + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", // "request": { - // "$ref": "Disk" + // "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -70697,159 +84343,88 @@ func (c *RegionDisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.regionDisks.list": - -type RegionDisksListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of persistent disks contained within the -// specified region. -func (r *RegionDisksService) List(project string, region string) *RegionDisksListCall { - c := &RegionDisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionDisksListCall) Filter(filter string) *RegionDisksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionDisksListCall) MaxResults(maxResults int64) *RegionDisksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} +// method id "compute.regionInstanceGroupManagers.applyUpdatesToInstances": -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionDisksListCall) OrderBy(orderBy string) *RegionDisksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c +type RegionInstanceGroupManagersApplyUpdatesToInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionDisksListCall) PageToken(pageToken string) *RegionDisksListCall { - c.urlParams_.Set("pageToken", pageToken) +// ApplyUpdatesToInstances: Apply updates to selected instances the +// managed instance group. +func (r *RegionInstanceGroupManagersService) ApplyUpdatesToInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { + c := &RegionInstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersapplyupdatesrequest = regioninstancegroupmanagersapplyupdatesrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksListCall) Fields(s ...googleapi.Field) *RegionDisksListCall { +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionDisksListCall) IfNoneMatch(entityTag string) *RegionDisksListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksListCall) Context(ctx context.Context) *RegionDisksListCall { +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksListCall) Header() http.Header { +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersapplyupdatesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.list" call. -// Exactly one of *DiskList or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskList.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.regionInstanceGroupManagers.applyUpdatesToInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { +func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -70868,7 +84443,7 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -70880,35 +84455,19 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error } return ret, nil // { - // "description": "Retrieves the list of persistent disks contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.regionDisks.list", + // "description": "Apply updates to selected instances the managed instance group.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.applyUpdatesToInstances", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroupManager" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "instanceGroupManager": { + // "description": "The name of the managed instance group, should conform to RFC1035.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -70919,67 +84478,46 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request, should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + // "request": { + // "$ref": "RegionInstanceGroupManagersApplyUpdatesRequest" + // }, // "response": { - // "$ref": "DiskList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionDisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionDisks.resize": +// method id "compute.regionInstanceGroupManagers.delete": -type RegionDisksResizeCall struct { - s *Service - project string - region string - disk string - regiondisksresizerequest *RegionDisksResizeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersDeleteCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the specified regional persistent disk. -func (r *RegionDisksService) Resize(project string, region string, disk string, regiondisksresizerequest *RegionDisksResizeRequest) *RegionDisksResizeCall { - c := &RegionDisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified managed instance group and all of the +// instances in that group. +func (r *RegionInstanceGroupManagersService) Delete(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersDeleteCall { + c := &RegionInstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.disk = disk - c.regiondisksresizerequest = regiondisksresizerequest + c.instanceGroupManager = instanceGroupManager return c } @@ -70997,7 +84535,7 @@ func (r *RegionDisksService) Resize(project string, region string, disk string, // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionDisksResizeCall) RequestId(requestId string) *RegionDisksResizeCall { +func (c *RegionInstanceGroupManagersDeleteCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -71005,7 +84543,7 @@ func (c *RegionDisksResizeCall) RequestId(requestId string) *RegionDisksResizeCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksResizeCall) Fields(s ...googleapi.Field) *RegionDisksResizeCall { +func (c *RegionInstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -71013,53 +84551,48 @@ func (c *RegionDisksResizeCall) Fields(s ...googleapi.Field) *RegionDisksResizeC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksResizeCall) Context(ctx context.Context) *RegionDisksResizeCall { +func (c *RegionInstanceGroupManagersDeleteCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksResizeCall) Header() http.Header { +func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regiondisksresizerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{disk}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "disk": c.disk, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.resize" call. +// Do executes the "compute.regionInstanceGroupManagers.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71090,33 +84623,31 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Resizes the specified regional persistent disk.", - // "httpMethod": "POST", - // "id": "compute.regionDisks.resize", + // "description": "Deletes the specified managed instance group and all of the instances in that group.", + // "httpMethod": "DELETE", + // "id": "compute.regionInstanceGroupManagers.delete", // "parameterOrder": [ // "project", // "region", - // "disk" + // "instanceGroupManager" // ], // "parameters": { - // "disk": { - // "description": "Name of the regional persistent disk.", + // "instanceGroupManager": { + // "description": "Name of the managed instance group to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "project": { - // "description": "The project ID for this request.", + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -71126,10 +84657,7 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{disk}/resize", - // "request": { - // "$ref": "RegionDisksResizeRequest" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", // "response": { // "$ref": "Operation" // }, @@ -71141,26 +84669,41 @@ func (c *RegionDisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.regionDisks.setLabels": +// method id "compute.regionInstanceGroupManagers.deleteInstances": -type RegionDisksSetLabelsCall struct { - s *Service - project string - region string - resource string - regionsetlabelsrequest *RegionSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersDeleteInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on the target regional disk. -func (r *RegionDisksService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *RegionDisksSetLabelsCall { - c := &RegionDisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteInstances: Schedules a group action to delete the specified +// instances in the managed instance group. The instances are also +// removed from any target pools of which they were a member. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you delete. This operation is marked as DONE +// when the action is scheduled even if the instances are still being +// deleted. You must separately verify the status of the deleting action +// with the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest) *RegionInstanceGroupManagersDeleteInstancesCall { + c := &RegionInstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.regionsetlabelsrequest = regionsetlabelsrequest + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersdeleteinstancesrequest = regioninstancegroupmanagersdeleteinstancesrequest return c } @@ -71178,7 +84721,7 @@ func (r *RegionDisksService) SetLabels(project string, region string, resource s // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionDisksSetLabelsCall) RequestId(requestId string) *RegionDisksSetLabelsCall { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -71186,7 +84729,7 @@ func (c *RegionDisksSetLabelsCall) RequestId(requestId string) *RegionDisksSetLa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksSetLabelsCall) Fields(s ...googleapi.Field) *RegionDisksSetLabelsCall { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -71194,53 +84737,53 @@ func (c *RegionDisksSetLabelsCall) Fields(s ...googleapi.Field) *RegionDisksSetL // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksSetLabelsCall) Context(ctx context.Context) *RegionDisksSetLabelsCall { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksSetLabelsCall) Header() http.Header { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersdeleteinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.setLabels" call. +// Do executes the "compute.regionInstanceGroupManagers.deleteInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71271,15 +84814,21 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Sets the labels on the target regional disk.", + // "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", // "httpMethod": "POST", - // "id": "compute.regionDisks.setLabels", + // "id": "compute.regionInstanceGroupManagers.deleteInstances", // "parameterOrder": [ // "project", // "region", - // "resource" + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -71288,9 +84837,8 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "region": { - // "description": "The region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -71298,18 +84846,11 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{resource}/setLabels", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", // "request": { - // "$ref": "RegionSetLabelsRequest" + // "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -71322,34 +84863,34 @@ func (c *RegionDisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.regionDisks.testIamPermissions": +// method id "compute.regionInstanceGroupManagers.deletePerInstanceConfigs": -type RegionDisksTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersDeletePerInstanceConfigsCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerdeleteinstanceconfigreq *RegionInstanceGroupManagerDeleteInstanceConfigReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionDisksService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionDisksTestIamPermissionsCall { - c := &RegionDisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeletePerInstanceConfigs: Delete selected per-instance configs for +// the managed instance group. +func (r *RegionInstanceGroupManagersService) DeletePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerdeleteinstanceconfigreq *RegionInstanceGroupManagerDeleteInstanceConfigReq) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { + c := &RegionInstanceGroupManagersDeletePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerdeleteinstanceconfigreq = regioninstancegroupmanagerdeleteinstanceconfigreq return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionDisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionDisksTestIamPermissionsCall { +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -71357,53 +84898,53 @@ func (c *RegionDisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *Region // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionDisksTestIamPermissionsCall) Context(ctx context.Context) *RegionDisksTestIamPermissionsCall { +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionDisksTestIamPermissionsCall) Header() http.Header { +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionDisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerdeleteinstanceconfigreq) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/disks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionDisks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.regionInstanceGroupManagers.deletePerInstanceConfigs" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71422,7 +84963,7 @@ func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -71434,152 +84975,122 @@ func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Delete selected per-instance configs for the managed instance group.", // "httpMethod": "POST", - // "id": "compute.regionDisks.testIamPermissions", + // "id": "compute.regionInstanceGroupManagers.deletePerInstanceConfigs", // "parameterOrder": [ // "project", // "region", - // "resource" + // "instanceGroupManager" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group. It should conform to RFC1035.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "region": { + // "description": "Name of the region scoping this request, should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/disks/{resource}/testIamPermissions", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "RegionInstanceGroupManagerDeleteInstanceConfigReq" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionInstanceGroupManagers.abandonInstances": +// method id "compute.regionInstanceGroupManagers.get": -type RegionInstanceGroupManagersAbandonInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersGetCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// AbandonInstances: Schedules a group action to remove the specified -// instances from the managed instance group. Abandoning an instance -// does not delete the instance, but it does remove the instance from -// any target pools that are applied by the managed instance group. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you abandon. This operation is marked as -// DONE when the action is scheduled even if the instances have not yet -// been removed from the group. You must separately verify the status of -// the abandoning action with the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest) *RegionInstanceGroupManagersAbandonInstancesCall { - c := &RegionInstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns all of the details about the specified managed instance +// group. +func (r *RegionInstanceGroupManagersService) Get(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersGetCall { + c := &RegionInstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersabandoninstancesrequest = regioninstancegroupmanagersabandoninstancesrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersAbandonInstancesCall { - c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersAbandonInstancesCall { +func (c *RegionInstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersAbandonInstancesCall { +func (c *RegionInstanceGroupManagersGetCall) Context(ctx context.Context) *RegionInstanceGroupManagersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { +func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersabandoninstancesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -71587,16 +85098,16 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroupManagers.abandonInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +} + +// Do executes the "compute.regionInstanceGroupManagers.get" call. +// Exactly one of *InstanceGroupManager or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupManager.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71615,7 +85126,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroupManager{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -71627,9 +85138,9 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.abandonInstances", + // "description": "Returns all of the details about the specified managed instance group.", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroupManagers.get", // "parameterOrder": [ // "project", // "region", @@ -71637,7 +85148,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C // ], // "parameters": { // "instanceGroupManager": { - // "description": "Name of the managed instance group.", + // "description": "Name of the managed instance group to return.", // "location": "path", // "required": true, // "type": "string" @@ -71654,56 +85165,73 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C // "location": "path", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", - // "request": { - // "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroupManager" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.applyUpdatesToInstances": +// method id "compute.regionInstanceGroupManagers.insert": -type RegionInstanceGroupManagersApplyUpdatesToInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersInsertCall struct { + s *Service + project string + region string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ApplyUpdatesToInstances: Apply updates to selected instances the -// managed instance group. -func (r *RegionInstanceGroupManagersService) ApplyUpdatesToInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersapplyupdatesrequest *RegionInstanceGroupManagersApplyUpdatesRequest) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { - c := &RegionInstanceGroupManagersApplyUpdatesToInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a managed instance group using the information that +// you specify in the request. After the group is created, it schedules +// an action to create instances in the group using the specified +// instance template. This operation is marked as DONE when the group is +// created even if the instances in the group have not yet been created. +// You must separately verify the status of the individual instances +// with the listmanagedinstances method. +// +// A regional managed instance group can contain up to 2000 instances. +func (r *RegionInstanceGroupManagersService) Insert(project string, region string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersInsertCall { + c := &RegionInstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersapplyupdatesrequest = regioninstancegroupmanagersapplyupdatesrequest + c.instancegroupmanager = instancegroupmanager + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersInsertCall) RequestId(requestId string) *RegionInstanceGroupManagersInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { +func (c *RegionInstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -71711,53 +85239,52 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Fields(s ...goo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersApplyUpdatesToInstancesCall { +func (c *RegionInstanceGroupManagersInsertCall) Context(ctx context.Context) *RegionInstanceGroupManagersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Header() http.Header { +func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersapplyupdatesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.applyUpdatesToInstances" call. +// Do executes the "compute.regionInstanceGroupManagers.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71788,21 +85315,14 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...goog } return ret, nil // { - // "description": "Apply updates to selected instances the managed instance group.", + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.applyUpdatesToInstances", + // "id": "compute.regionInstanceGroupManagers.insert", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "region" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group, should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -71811,15 +85331,20 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...goog // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "description": "Name of the region scoping this request.", // "location": "path", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/applyUpdatesToInstances", + // "path": "{project}/regions/{region}/instanceGroupManagers", // "request": { - // "$ref": "RegionInstanceGroupManagersApplyUpdatesRequest" + // "$ref": "InstanceGroupManager" // }, // "response": { // "$ref": "Operation" @@ -71832,100 +85357,159 @@ func (c *RegionInstanceGroupManagersApplyUpdatesToInstancesCall) Do(opts ...goog } -// method id "compute.regionInstanceGroupManagers.delete": +// method id "compute.regionInstanceGroupManagers.list": -type RegionInstanceGroupManagersDeleteCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified managed instance group and all of the -// instances in that group. -func (r *RegionInstanceGroupManagersService) Delete(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersDeleteCall { - c := &RegionInstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of managed instance groups that are +// contained within the specified region. +func (r *RegionInstanceGroupManagersService) List(project string, region string) *RegionInstanceGroupManagersListCall { + c := &RegionInstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersDeleteCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteCall { - c.urlParams_.Set("requestId", requestId) +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionInstanceGroupManagersListCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionInstanceGroupManagersListCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *RegionInstanceGroupManagersListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteCall { +func (c *RegionInstanceGroupManagersListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupManagersListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersDeleteCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteCall { +func (c *RegionInstanceGroupManagersListCall) Context(ctx context.Context) *RegionInstanceGroupManagersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { +func (c *RegionInstanceGroupManagersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroupManagers.list" call. +// Exactly one of *RegionInstanceGroupManagerList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *RegionInstanceGroupManagerList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -71944,7 +85528,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &RegionInstanceGroupManagerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -71956,19 +85540,35 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes the specified managed instance group and all of the instances in that group.", - // "httpMethod": "DELETE", - // "id": "compute.regionInstanceGroupManagers.delete", + // "description": "Retrieves the list of managed instance groups that are contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroupManagers.list", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "region" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group to delete.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -71983,86 +85583,94 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) // "location": "path", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/regions/{region}/instanceGroupManagers", // "response": { - // "$ref": "Operation" + // "$ref": "RegionInstanceGroupManagerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.deleteInstances": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupManagersListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagerList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionInstanceGroupManagersDeleteInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionInstanceGroupManagers.listManagedInstances": + +type RegionInstanceGroupManagersListManagedInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteInstances: Schedules a group action to delete the specified -// instances in the managed instance group. The instances are also -// removed from any target pools of which they were a member. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you delete. This operation is marked as DONE -// when the action is scheduled even if the instances are still being -// deleted. You must separately verify the status of the deleting action -// with the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest) *RegionInstanceGroupManagersDeleteInstancesCall { - c := &RegionInstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListManagedInstances: Lists the instances in the managed instance +// group and instances that are scheduled to be created. The list +// includes any current actions that the group has scheduled for its +// instances. +func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListManagedInstancesCall { + c := &RegionInstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersdeleteinstancesrequest = regioninstancegroupmanagersdeleteinstancesrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteInstancesCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Filter(filter string) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": +func (c *RegionInstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "order_by": +func (c *RegionInstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("order_by", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": +func (c *RegionInstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteInstancesCall { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72070,34 +85678,29 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.F // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteInstancesCall { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersListManagedInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersdeleteinstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -72109,14 +85712,16 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) ( return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.deleteInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroupManagers.listManagedInstances" call. +// Exactly one of *RegionInstanceGroupManagersListInstancesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *RegionInstanceGroupManagersListInstancesResponse.ServerResponse.Heade +// r or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstancesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72135,7 +85740,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &RegionInstanceGroupManagersListInstancesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -72147,21 +85752,40 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.deleteInstances", + // "id": "compute.regionInstanceGroupManagers.listManagedInstances", // "parameterOrder": [ // "project", // "region", // "instanceGroupManager" // ], // "parameters": { + // "filter": { + // "location": "query", + // "type": "string" + // }, // "instanceGroupManager": { - // "description": "Name of the managed instance group.", + // "description": "The name of the managed instance group.", // "location": "path", // "required": true, // "type": "string" // }, + // "maxResults": { + // "default": "500", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "order_by": { + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -72174,56 +85798,135 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca // "location": "path", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", - // "request": { - // "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "response": { - // "$ref": "Operation" + // "$ref": "RegionInstanceGroupManagersListInstancesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.deletePerInstanceConfigs": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstancesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionInstanceGroupManagers.listPerInstanceConfigs": -type RegionInstanceGroupManagersDeletePerInstanceConfigsCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerdeleteinstanceconfigreq *RegionInstanceGroupManagerDeleteInstanceConfigReq - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersListPerInstanceConfigsCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeletePerInstanceConfigs: Delete selected per-instance configs for -// the managed instance group. -func (r *RegionInstanceGroupManagersService) DeletePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerdeleteinstanceconfigreq *RegionInstanceGroupManagerDeleteInstanceConfigReq) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { - c := &RegionInstanceGroupManagersDeletePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListPerInstanceConfigs: Lists all of the per-instance configs defined +// for the managed instance group. +func (r *RegionInstanceGroupManagersService) ListPerInstanceConfigs(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { + c := &RegionInstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerdeleteinstanceconfigreq = regioninstancegroupmanagerdeleteinstanceconfigreq + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Filter(filter string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageToken string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72231,34 +85934,29 @@ func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Fields(s ...go // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeletePerInstanceConfigsCall { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Header() http.Header { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerdeleteinstanceconfigreq) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -72270,14 +85968,16 @@ func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) doRequest(alt return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.deletePerInstanceConfigs" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroupManagers.listPerInstanceConfigs" call. +// Exactly one of *RegionInstanceGroupManagersListInstanceConfigsResp or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *RegionInstanceGroupManagersListInstanceConfigsResp.ServerResponse.Hea +// der or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstanceConfigsResp, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72296,7 +85996,7 @@ func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...goo if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &RegionInstanceGroupManagersListInstanceConfigsResp{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -72308,21 +86008,44 @@ func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...goo } return ret, nil // { - // "description": "Delete selected per-instance configs for the managed instance group.", + // "description": "Lists all of the per-instance configs defined for the managed instance group.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.deletePerInstanceConfigs", + // "id": "compute.regionInstanceGroupManagers.listPerInstanceConfigs", // "parameterOrder": [ // "project", // "region", // "instanceGroupManager" // ], // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, // "instanceGroupManager": { // "description": "The name of the managed instance group. It should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -72337,93 +86060,129 @@ func (c *RegionInstanceGroupManagersDeletePerInstanceConfigsCall) Do(opts ...goo // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deletePerInstanceConfigs", - // "request": { - // "$ref": "RegionInstanceGroupManagerDeleteInstanceConfigReq" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", // "response": { - // "$ref": "Operation" + // "$ref": "RegionInstanceGroupManagersListInstanceConfigsResp" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstanceConfigsResp) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionInstanceGroupManagersGetCall struct { +// method id "compute.regionInstanceGroupManagers.patch": + +type RegionInstanceGroupManagersPatchCall struct { s *Service project string region string instanceGroupManager string + instancegroupmanager *InstanceGroupManager urlParams_ gensupport.URLParams - ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns all of the details about the specified managed instance -// group. -func (r *RegionInstanceGroupManagersService) Get(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersGetCall { - c := &RegionInstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is patched even if the instances in the group are still in the +// process of being patched. You must separately verify the status of +// the individual instances with the listmanagedinstances method. This +// method supports PATCH semantics and uses the JSON merge patch format +// and processing rules. +func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { + c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager + c.instancegroupmanager = instancegroupmanager + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersGetCall { +func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersGetCall) Context(ctx context.Context) *RegionInstanceGroupManagersGetCall { +func (c *RegionInstanceGroupManagersPatchCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { +func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -72433,14 +86192,14 @@ func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Respon return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.get" call. -// Exactly one of *InstanceGroupManager or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupManager.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { +// Do executes the "compute.regionInstanceGroupManagers.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72459,7 +86218,7 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManager{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -72471,9 +86230,9 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns all of the details about the specified managed instance group.", - // "httpMethod": "GET", - // "id": "compute.regionInstanceGroupManagers.get", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.regionInstanceGroupManagers.patch", // "parameterOrder": [ // "project", // "region", @@ -72481,7 +86240,7 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "instanceGroupManager": { - // "description": "Name of the managed instance group to return.", + // "description": "The name of the instance group manager.", // "location": "path", // "required": true, // "type": "string" @@ -72498,47 +86257,62 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* // "location": "path", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - // "response": { + // "request": { // "$ref": "InstanceGroupManager" // }, + // "response": { + // "$ref": "Operation" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionInstanceGroupManagers.insert": +// method id "compute.regionInstanceGroupManagers.recreateInstances": -type RegionInstanceGroupManagersInsertCall struct { - s *Service - project string - region string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersRecreateInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a managed instance group using the information that -// you specify in the request. After the group is created, it schedules -// an action to create instances in the group using the specified -// instance template. This operation is marked as DONE when the group is -// created even if the instances in the group have not yet been created. -// You must separately verify the status of the individual instances -// with the listmanagedinstances method. +// RecreateInstances: Schedules a group action to recreate the specified +// instances in the managed instance group. The instances are deleted +// and recreated using the current instance template for the managed +// instance group. This operation is marked as DONE when the action is +// scheduled even if the instances have not yet been recreated. You must +// separately verify the status of the recreating action with the +// listmanagedinstances method. // -// A regional managed instance group can contain up to 2000 instances. -func (r *RegionInstanceGroupManagersService) Insert(project string, region string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersInsertCall { - c := &RegionInstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { + c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instancegroupmanager = instancegroupmanager + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersrecreaterequest = regioninstancegroupmanagersrecreaterequest return c } @@ -72556,7 +86330,7 @@ func (r *RegionInstanceGroupManagersService) Insert(project string, region strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersInsertCall) RequestId(requestId string) *RegionInstanceGroupManagersInsertCall { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -72564,7 +86338,7 @@ func (c *RegionInstanceGroupManagersInsertCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersInsertCall { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -72572,52 +86346,53 @@ func (c *RegionInstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersInsertCall) Context(ctx context.Context) *RegionInstanceGroupManagersInsertCall { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersRecreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersrecreaterequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.insert" call. +// Do executes the "compute.regionInstanceGroupManagers.recreateInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72648,14 +86423,21 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances.", + // "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.insert", + // "id": "compute.regionInstanceGroupManagers.recreateInstances", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -72675,9 +86457,9 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "RegionInstanceGroupManagersRecreateRequest" // }, // "response": { // "$ref": "Operation" @@ -72690,159 +86472,111 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionInstanceGroupManagers.list": +// method id "compute.regionInstanceGroupManagers.resize": -type RegionInstanceGroupManagersListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersResizeCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of managed instance groups that are -// contained within the specified region. -func (r *RegionInstanceGroupManagersService) List(project string, region string) *RegionInstanceGroupManagersListCall { - c := &RegionInstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Changes the intended size for the managed instance group. If +// you increase the size, the group schedules actions to create new +// instances using the current instance template. If you decrease the +// size, the group schedules delete actions on one or more instances. +// The resize operation is marked DONE when the resize actions are +// scheduled even if the group has not yet added or deleted any +// instances. You must separately verify the status of the creating or +// deleting actions with the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or deleted. +func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { + c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region + c.instanceGroupManager = instanceGroupManager + c.urlParams_.Set("size", fmt.Sprint(size)) return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionInstanceGroupManagersListCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionInstanceGroupManagersListCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListCall { +func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupManagersListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersListCall) Context(ctx context.Context) *RegionInstanceGroupManagersListCall { +func (c *RegionInstanceGroupManagersResizeCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersListCall) Header() http.Header { +func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.list" call. -// Exactly one of *RegionInstanceGroupManagerList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *RegionInstanceGroupManagerList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagerList, error) { +// Do executes the "compute.regionInstanceGroupManagers.resize" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -72861,7 +86595,7 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupManagerList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -72873,35 +86607,20 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves the list of managed instance groups that are contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.regionInstanceGroupManagers.list", + // "description": "Changes the intended size for the managed instance group. If you increase the size, the group schedules actions to create new instances using the current instance template. If you decrease the size, the group schedules delete actions on one or more instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.resize", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroupManager", + // "size" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -72916,94 +86635,80 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( // "location": "path", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "size": { + // "description": "Number of instances that should exist in this instance group manager.", + // "format": "int32", + // "location": "query", + // "minimum": "0", + // "required": true, + // "type": "integer" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", // "response": { - // "$ref": "RegionInstanceGroupManagerList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupManagersListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroupManagers.listManagedInstances": +// method id "compute.regionInstanceGroupManagers.setAutoHealingPolicies": -type RegionInstanceGroupManagersListManagedInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersSetAutoHealingPoliciesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListManagedInstances: Lists the instances in the managed instance -// group and instances that are scheduled to be created. The list -// includes any current actions that the group has scheduled for its -// instances. -func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListManagedInstancesCall { - c := &RegionInstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetAutoHealingPolicies: Modifies the autohealing policy for the +// instances in this managed instance group. +func (r *RegionInstanceGroupManagersService) SetAutoHealingPolicies(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { + c := &RegionInstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerssetautohealingrequest = regioninstancegroupmanagerssetautohealingrequest return c } -// Filter sets the optional parameter "filter": -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Filter(filter string) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": -func (c *RegionInstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "order_by": -func (c *RegionInstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("order_by", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": -func (c *RegionInstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("pageToken", pageToken) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -73011,29 +86716,34 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Fields(s ...google // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Header { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssetautohealingrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -73045,16 +86755,14 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt stri return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.listManagedInstances" call. -// Exactly one of *RegionInstanceGroupManagersListInstancesResponse or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *RegionInstanceGroupManagersListInstancesResponse.ServerResponse.Heade -// r or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstancesResponse, error) { +// Do executes the "compute.regionInstanceGroupManagers.setAutoHealingPolicies" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73073,7 +86781,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupManagersListInstancesResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -73085,40 +86793,21 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea } return ret, nil // { - // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", + // "description": "Modifies the autohealing policy for the instances in this managed instance group.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.listManagedInstances", + // "id": "compute.regionInstanceGroupManagers.setAutoHealingPolicies", // "parameterOrder": [ // "project", // "region", // "instanceGroupManager" // ], // "parameters": { - // "filter": { - // "location": "query", - // "type": "string" - // }, // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "description": "Name of the managed instance group.", // "location": "path", // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "order_by": { - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -73131,135 +86820,76 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // "location": "path", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + // "request": { + // "$ref": "RegionInstanceGroupManagersSetAutoHealingRequest" + // }, // "response": { - // "$ref": "RegionInstanceGroupManagersListInstancesResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstancesResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroupManagers.listPerInstanceConfigs": +// method id "compute.regionInstanceGroupManagers.setInstanceTemplate": -type RegionInstanceGroupManagersListPerInstanceConfigsCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersSetInstanceTemplateCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListPerInstanceConfigs: Lists all of the per-instance configs defined -// for the managed instance group. -func (r *RegionInstanceGroupManagersService) ListPerInstanceConfigs(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { - c := &RegionInstanceGroupManagersListPerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetInstanceTemplate: Sets the instance template to use when creating +// new instances or recreating instances in this group. Existing +// instances are not affected. +func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { + c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerssettemplaterequest = regioninstancegroupmanagerssettemplaterequest return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Filter(filter string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) PageToken(pageToken string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *RegionInstanceGroupManagersSetInstanceTemplateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListPerInstanceConfigsCall { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -73267,29 +86897,34 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Fields(s ...goog // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersListPerInstanceConfigsCall { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetInstanceTemplateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Header() http.Header { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -73301,16 +86936,14 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) doRequest(alt st return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.listPerInstanceConfigs" call. -// Exactly one of *RegionInstanceGroupManagersListInstanceConfigsResp or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *RegionInstanceGroupManagersListInstanceConfigsResp.ServerResponse.Hea -// der or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstanceConfigsResp, error) { +// Do executes the "compute.regionInstanceGroupManagers.setInstanceTemplate" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73329,7 +86962,7 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupManagersListInstanceConfigsResp{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -73341,44 +86974,21 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl } return ret, nil // { - // "description": "Lists all of the per-instance configs defined for the managed instance group.", + // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.listPerInstanceConfigs", + // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", // "parameterOrder": [ // "project", // "region", // "instanceGroupManager" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", + // "description": "The name of the managed instance group.", // "location": "path", // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -73387,72 +86997,54 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "description": "Name of the region scoping this request.", // "location": "path", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listPerInstanceConfigs", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "request": { + // "$ref": "RegionInstanceGroupManagersSetTemplateRequest" + // }, // "response": { - // "$ref": "RegionInstanceGroupManagersListInstanceConfigsResp" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstanceConfigsResp) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroupManagers.patch": - -type RegionInstanceGroupManagersPatchCall struct { - s *Service - project string - region string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header + // "https://www.googleapis.com/auth/compute" + // ] + // } + } -// Patch: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is patched even if the instances in the group are still in the -// process of being patched. You must separately verify the status of -// the individual instances with the listmanagedinstances method. This -// method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. -func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { - c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// method id "compute.regionInstanceGroupManagers.setTargetPools": + +type RegionInstanceGroupManagersSetTargetPoolsCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetTargetPools: Modifies the target pools to which all new instances +// in this group are assigned. Existing instances in the group are not +// affected. +func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { + c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager + c.regioninstancegroupmanagerssettargetpoolsrequest = regioninstancegroupmanagerssettargetpoolsrequest return c } @@ -73470,7 +87062,7 @@ func (r *RegionInstanceGroupManagersService) Patch(project string, region string // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchCall { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *RegionInstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -73478,7 +87070,7 @@ func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *Regi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchCall { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -73486,36 +87078,36 @@ func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *Reg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersPatchCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchCall { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetTargetPoolsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettargetpoolsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -73525,14 +87117,14 @@ func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Resp return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.patch" call. +// Do executes the "compute.regionInstanceGroupManagers.setTargetPools" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73563,9 +87155,9 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.regionInstanceGroupManagers.patch", + // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.setTargetPools", // "parameterOrder": [ // "project", // "region", @@ -73573,7 +87165,7 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "instanceGroupManager": { - // "description": "The name of the instance group manager.", + // "description": "Name of the managed instance group.", // "location": "path", // "required": true, // "type": "string" @@ -73597,82 +87189,49 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionInstanceGroupManagers.recreateInstances": +// method id "compute.regionInstanceGroupManagers.testIamPermissions": -type RegionInstanceGroupManagersRecreateInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RecreateInstances: Schedules a group action to recreate the specified -// instances in the managed instance group. The instances are deleted -// and recreated using the current instance template for the managed -// instance group. This operation is marked as DONE when the action is -// scheduled even if the instances have not yet been recreated. You must -// separately verify the status of the recreating action with the -// listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { - c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionInstanceGroupManagersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupManagersTestIamPermissionsCall { + c := &RegionInstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersrecreaterequest = regioninstancegroupmanagersrecreaterequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersRecreateInstancesCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersRecreateInstancesCall { +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -73680,53 +87239,53 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersRecreateInstancesCall { +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupManagersTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header { +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersrecreaterequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.recreateInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroupManagers.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73745,7 +87304,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -73757,21 +87316,15 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.recreateInstances", + // "id": "compute.regionInstanceGroupManagers.testIamPermissions", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "resource" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -73780,62 +87333,60 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + // "path": "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions", // "request": { - // "$ref": "RegionInstanceGroupManagersRecreateRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.resize": +// method id "compute.regionInstanceGroupManagers.update": -type RegionInstanceGroupManagersResizeCall struct { +type RegionInstanceGroupManagersUpdateCall struct { s *Service project string region string instanceGroupManager string + instancegroupmanager *InstanceGroupManager urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Resize: Changes the intended size for the managed instance group. If -// you increase the size, the group schedules actions to create new -// instances using the current instance template. If you decrease the -// size, the group schedules delete actions on one or more instances. -// The resize operation is marked DONE when the resize actions are -// scheduled even if the group has not yet added or deleted any -// instances. You must separately verify the status of the creating or -// deleting actions with the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or deleted. -func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { - c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is updated even if the instances in the group have not yet been +// updated. You must separately verify the status of the individual +// instances with the listmanagedinstances method. +func (r *RegionInstanceGroupManagersService) Update(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersUpdateCall { + c := &RegionInstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager - c.urlParams_.Set("size", fmt.Sprint(size)) + c.instancegroupmanager = instancegroupmanager return c } @@ -73853,7 +87404,7 @@ func (r *RegionInstanceGroupManagersService) Resize(project string, region strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeCall { +func (c *RegionInstanceGroupManagersUpdateCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -73861,7 +87412,7 @@ func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeCall { +func (c *RegionInstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -73869,31 +87420,36 @@ func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersResizeCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeCall { +func (c *RegionInstanceGroupManagersUpdateCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { +func (c *RegionInstanceGroupManagersUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -73903,14 +87459,14 @@ func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.resize" call. +// Do executes the "compute.regionInstanceGroupManagers.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -73941,18 +87497,17 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the intended size for the managed instance group. If you increase the size, the group schedules actions to create new instances using the current instance template. If you decrease the size, the group schedules delete actions on one or more instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.resize", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listmanagedinstances method.", + // "httpMethod": "PUT", + // "id": "compute.regionInstanceGroupManagers.update", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager", - // "size" + // "instanceGroupManager" // ], // "parameters": { // "instanceGroupManager": { - // "description": "Name of the managed instance group.", + // "description": "The name of the instance group manager.", // "location": "path", // "required": true, // "type": "string" @@ -73974,17 +87529,12 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "size": { - // "description": "Number of instances that should exist in this instance group manager.", - // "format": "int32", - // "location": "query", - // "minimum": "0", - // "required": true, - // "type": "integer" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "request": { + // "$ref": "InstanceGroupManager" + // }, // "response": { // "$ref": "Operation" // }, @@ -73996,53 +87546,36 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionInstanceGroupManagers.setAutoHealingPolicies": +// method id "compute.regionInstanceGroupManagers.updatePerInstanceConfigs": -type RegionInstanceGroupManagersSetAutoHealingPoliciesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersUpdatePerInstanceConfigsCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetAutoHealingPolicies: Modifies the autohealing policy for the -// instances in this managed instance group. -func (r *RegionInstanceGroupManagersService) SetAutoHealingPolicies(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { - c := &RegionInstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// UpdatePerInstanceConfigs: Insert or patch (for the ones that already +// exist) per-instance configs for the managed instance group. +// perInstanceConfig.instance serves as a key used to distinguish +// whether to perform insert or patch. +func (r *RegionInstanceGroupManagersService) UpdatePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { + c := &RegionInstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerssetautohealingrequest = regioninstancegroupmanagerssetautohealingrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { - c.urlParams_.Set("requestId", requestId) + c.regioninstancegroupmanagerupdateinstanceconfigreq = regioninstancegroupmanagerupdateinstanceconfigreq return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -74050,34 +87583,34 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...goog // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssetautohealingrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerupdateinstanceconfigreq) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -74089,14 +87622,14 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt st return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.setAutoHealingPolicies" call. +// Do executes the "compute.regionInstanceGroupManagers.updatePerInstanceConfigs" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74127,9 +87660,9 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl } return ret, nil // { - // "description": "Modifies the autohealing policy for the instances in this managed instance group.", + // "description": "Insert or patch (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.setAutoHealingPolicies", + // "id": "compute.regionInstanceGroupManagers.updatePerInstanceConfigs", // "parameterOrder": [ // "project", // "region", @@ -74137,7 +87670,7 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl // ], // "parameters": { // "instanceGroupManager": { - // "description": "Name of the managed instance group.", + // "description": "The name of the managed instance group. It should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" @@ -74150,20 +87683,15 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region scoping this request, should conform to RFC1035.", // "location": "path", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", // "request": { - // "$ref": "RegionInstanceGroupManagersSetAutoHealingRequest" + // "$ref": "RegionInstanceGroupManagerUpdateInstanceConfigReq" // }, // "response": { // "$ref": "Operation" @@ -74176,108 +87704,94 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl } -// method id "compute.regionInstanceGroupManagers.setInstanceTemplate": +// method id "compute.regionInstanceGroups.get": -type RegionInstanceGroupManagersSetInstanceTemplateCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupsGetCall struct { + s *Service + project string + region string + instanceGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetInstanceTemplate: Sets the instance template to use when creating -// new instances or recreating instances in this group. Existing -// instances are not affected. -func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { - c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified instance group resource. +func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { + c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerssettemplaterequest = regioninstancegroupmanagerssettemplaterequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *RegionInstanceGroupManagersSetInstanceTemplateCall { - c.urlParams_.Set("requestId", requestId) + c.instanceGroup = instanceGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetInstanceTemplateCall { +func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetInstanceTemplateCall { +func (c *RegionInstanceGroupsGetCall) Context(ctx context.Context) *RegionInstanceGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { +func (c *RegionInstanceGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettemplaterequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.setInstanceTemplate" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.regionInstanceGroups.get" call. +// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *InstanceGroup.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74296,7 +87810,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -74308,17 +87822,17 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap } return ret, nil // { - // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", + // "description": "Returns the specified instance group resource.", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroups.get", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instanceGroup": { + // "description": "Name of the instance group resource to return.", // "location": "path", // "required": true, // "type": "string" @@ -74335,130 +87849,174 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap // "location": "path", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", - // "request": { - // "$ref": "RegionInstanceGroupManagersSetTemplateRequest" - // }, + // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.setTargetPools": - -type RegionInstanceGroupManagersSetTargetPoolsCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} +// method id "compute.regionInstanceGroups.list": -// SetTargetPools: Modifies the target pools to which all new instances -// in this group are assigned. Existing instances in the group are not -// affected. -func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { - c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +type RegionInstanceGroupsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of instance group resources contained within +// the specified region. +func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { + c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerssettargetpoolsrequest = regioninstancegroupmanagerssettargetpoolsrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *RegionInstanceGroupManagersSetTargetPoolsCall { - c.urlParams_.Set("requestId", requestId) +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInstanceGroupsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGroupsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstanceGroupsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetTargetPoolsCall { +func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetTargetPoolsCall { +func (c *RegionInstanceGroupsListCall) Context(ctx context.Context) *RegionInstanceGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { +func (c *RegionInstanceGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettargetpoolsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.setTargetPools" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroups.list" call. +// Exactly one of *RegionInstanceGroupList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RegionInstanceGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74477,7 +88035,7 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &RegionInstanceGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -74489,19 +88047,35 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.setTargetPools", + // "description": "Retrieves the list of instance group resources contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroups.list", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "region" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -74516,56 +88090,139 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal // "location": "path", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", - // "request": { - // "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" - // }, + // "path": "{project}/regions/{region}/instanceGroups", // "response": { - // "$ref": "Operation" + // "$ref": "RegionInstanceGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionInstanceGroupManagersTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionInstanceGroups.listInstances": + +type RegionInstanceGroupsListInstancesCall struct { + s *Service + project string + region string + instanceGroup string + regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionInstanceGroupManagersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupManagersTestIamPermissionsCall { - c := &RegionInstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListInstances: Lists the instances in the specified instance group +// and displays information about the named ports. Depending on the +// specified options, this method can list all instances or only the +// instances that are running. +func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { + c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.instanceGroup = instanceGroup + c.regioninstancegroupslistinstancesrequest = regioninstancegroupslistinstancesrequest + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersTestIamPermissionsCall { +func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -74573,53 +88230,54 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleap // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupManagersTestIamPermissionsCall { +func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Header() http.Header { +func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.regionInstanceGroups.listInstances" call. +// Exactly one of *RegionInstanceGroupsListInstances or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *RegionInstanceGroupsListInstances.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74638,7 +88296,7 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &RegionInstanceGroupsListInstances{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -74650,43 +88308,64 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.testIamPermissions", + // "id": "compute.regionInstanceGroups.listInstances", // "parameterOrder": [ // "project", // "region", - // "resource" + // "instanceGroup" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "instanceGroup": { + // "description": "Name of the regional instance group for which we want to list the instances.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions", + // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "RegionInstanceGroupsListInstancesRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "RegionInstanceGroupsListInstances" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -74697,30 +88376,48 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi } -// method id "compute.regionInstanceGroupManagers.update": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionInstanceGroupManagersUpdateCall struct { - s *Service - project string - region string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionInstanceGroups.setNamedPorts": + +type RegionInstanceGroupsSetNamedPortsCall struct { + s *Service + project string + region string + instanceGroup string + regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is updated even if the instances in the group have not yet been -// updated. You must separately verify the status of the individual -// instances with the listmanagedinstances method. -func (r *RegionInstanceGroupManagersService) Update(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersUpdateCall { - c := &RegionInstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetNamedPorts: Sets the named ports for the specified regional +// instance group. +func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { + c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager + c.instanceGroup = instanceGroup + c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest return c } @@ -74738,7 +88435,7 @@ func (r *RegionInstanceGroupManagersService) Update(project string, region strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersUpdateCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdateCall { +func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -74746,7 +88443,7 @@ func (c *RegionInstanceGroupManagersUpdateCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdateCall { +func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -74754,53 +88451,53 @@ func (c *RegionInstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersUpdateCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdateCall { +func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersUpdateCall) Header() http.Header { +func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.update" call. +// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74831,17 +88528,17 @@ func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listmanagedinstances method.", - // "httpMethod": "PUT", - // "id": "compute.regionInstanceGroupManagers.update", + // "description": "Sets the named ports for the specified regional instance group.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroups.setNamedPorts", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", + // "instanceGroup": { + // "description": "The name of the regional instance group where the named ports are updated.", // "location": "path", // "required": true, // "type": "string" @@ -74865,9 +88562,9 @@ func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" // }, // "response": { // "$ref": "Operation" @@ -74880,36 +88577,34 @@ func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionInstanceGroupManagers.updatePerInstanceConfigs": +// method id "compute.regionInstanceGroups.testIamPermissions": -type RegionInstanceGroupManagersUpdatePerInstanceConfigsCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupsTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UpdatePerInstanceConfigs: Insert or patch (for the ones that already -// exist) per-instance configs for the managed instance group. -// perInstanceConfig.instance serves as a key used to distinguish -// whether to perform insert or patch. -func (r *RegionInstanceGroupManagersService) UpdatePerInstanceConfigs(project string, region string, instanceGroupManager string, regioninstancegroupmanagerupdateinstanceconfigreq *RegionInstanceGroupManagerUpdateInstanceConfigReq) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { - c := &RegionInstanceGroupManagersUpdatePerInstanceConfigsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionInstanceGroupsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupsTestIamPermissionsCall { + c := &RegionInstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerupdateinstanceconfigreq = regioninstancegroupmanagerupdateinstanceconfigreq + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { +func (c *RegionInstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -74917,53 +88612,53 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Fields(s ...go // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall { +func (c *RegionInstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Header() http.Header { +func (c *RegionInstanceGroupsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerupdateinstanceconfigreq) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.updatePerInstanceConfigs" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroups.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -74982,7 +88677,7 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...goo if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -74994,21 +88689,15 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...goo } return ret, nil // { - // "description": "Insert or patch (for the ones that already exist) per-instance configs for the managed instance group. perInstanceConfig.instance serves as a key used to distinguish whether to perform insert or patch.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.updatePerInstanceConfigs", + // "id": "compute.regionInstanceGroups.testIamPermissions", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "resource" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group. It should conform to RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -75017,157 +88706,129 @@ func (c *RegionInstanceGroupManagersUpdatePerInstanceConfigsCall) Do(opts ...goo // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request, should conform to RFC1035.", + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/updatePerInstanceConfigs", + // "path": "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions", // "request": { - // "$ref": "RegionInstanceGroupManagerUpdateInstanceConfigReq" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroups.get": +// method id "compute.regionOperations.delete": -type RegionInstanceGroupsGetCall struct { - s *Service - project string - region string - instanceGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionOperationsDeleteCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance group resource. -func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { - c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified region-specific Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/delete +func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall { + c := &RegionOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroup = instanceGroup + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsGetCall { +func (c *RegionOperationsDeleteCall) Fields(s ...googleapi.Field) *RegionOperationsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsGetCall) Context(ctx context.Context) *RegionInstanceGroupsGetCall { +func (c *RegionOperationsDeleteCall) Context(ctx context.Context) *RegionOperationsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsGetCall) Header() http.Header { +func (c *RegionOperationsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.get" call. -// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *InstanceGroup.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { +// Do executes the "compute.regionOperations.delete" call. +func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceGroup{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err + return err } - return ret, nil + return nil // { - // "description": "Returns the specified instance group resource.", - // "httpMethod": "GET", - // "id": "compute.regionInstanceGroups.get", + // "description": "Deletes the specified region-specific Operations resource.", + // "httpMethod": "DELETE", + // "id": "compute.regionOperations.delete", // "parameterOrder": [ // "project", // "region", - // "instanceGroup" + // "operation" // ], // "parameters": { - // "instanceGroup": { - // "description": "Name of the instance group resource to return.", + // "operation": { + // "description": "Name of the Operations resource to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -75179,117 +88840,49 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}", - // "response": { - // "$ref": "InstanceGroup" - // }, + // "path": "{project}/regions/{region}/operations/{operation}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionInstanceGroups.list": +// method id "compute.regionOperations.get": -type RegionInstanceGroupsListCall struct { +type RegionOperationsGetCall struct { s *Service project string region string + operation string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of instance group resources contained within -// the specified region. -func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { - c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Retrieves the specified region-specific Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/get +func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall { + c := &RegionOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInstanceGroupsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGroupsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstanceGroupsListCall { - c.urlParams_.Set("pageToken", pageToken) + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListCall { +func (c *RegionOperationsGetCall) Fields(s ...googleapi.Field) *RegionOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -75299,7 +88892,7 @@ func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInsta // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsListCall { +func (c *RegionOperationsGetCall) IfNoneMatch(entityTag string) *RegionOperationsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -75307,21 +88900,21 @@ func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInst // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsListCall) Context(ctx context.Context) *RegionInstanceGroupsListCall { +func (c *RegionOperationsGetCall) Context(ctx context.Context) *RegionOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsListCall) Header() http.Header { +func (c *RegionOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -75332,25 +88925,26 @@ func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.list" call. -// Exactly one of *RegionInstanceGroupList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RegionInstanceGroupList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupList, error) { +// Do executes the "compute.regionOperations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -75369,7 +88963,7 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -75381,35 +88975,20 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region } return ret, nil // { - // "description": "Retrieves the list of instance group resources contained within the specified region.", + // "description": "Retrieves the specified region-specific Operations resource.", // "httpMethod": "GET", - // "id": "compute.regionInstanceGroups.list", + // "id": "compute.regionOperations.get", // "parameterOrder": [ // "project", - // "region" + // "region", + // "operation" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "operation": { + // "description": "Name of the Operations resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -75420,15 +88999,16 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups", + // "path": "{project}/regions/{region}/operations/{operation}", // "response": { - // "$ref": "RegionInstanceGroupList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -75439,50 +89019,25 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroups.listInstances": +// method id "compute.regionOperations.list": -type RegionInstanceGroupsListInstancesCall struct { - s *Service - project string - region string - instanceGroup string - regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionOperationsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// ListInstances: Lists the instances in the specified instance group -// and displays information about the named ports. Depending on the -// specified options, this method can list all instances or only the -// instances that are running. -func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { - c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of Operation resources contained within the +// specified region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/list +func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall { + c := &RegionOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroup = instanceGroup - c.regioninstancegroupslistinstancesrequest = regioninstancegroupslistinstancesrequest return c } @@ -75512,7 +89067,7 @@ func (r *RegionInstanceGroupsService) ListInstances(project string, region strin // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { +func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { c.urlParams_.Set("filter", filter) return c } @@ -75523,7 +89078,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionIns // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { +func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperationsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -75540,7 +89095,7 @@ func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *Re // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { +func (c *RegionOperationsListCall) OrderBy(orderBy string) *RegionOperationsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -75548,7 +89103,7 @@ func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionI // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { +func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -75556,62 +89111,68 @@ func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { +func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { +func (c *RegionOperationsListCall) Context(ctx context.Context) *RegionOperationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { +func (c *RegionOperationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.listInstances" call. -// Exactly one of *RegionInstanceGroupsListInstances or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *RegionInstanceGroupsListInstances.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { +// Do executes the "compute.regionOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *OperationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -75630,7 +89191,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupsListInstances{ + ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -75642,13 +89203,12 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.listInstances", + // "description": "Retrieves a list of Operation resources contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.regionOperations.list", // "parameterOrder": [ // "project", - // "region", - // "instanceGroup" + // "region" // ], // "parameters": { // "filter": { @@ -75656,12 +89216,6 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // "location": "query", // "type": "string" // }, - // "instanceGroup": { - // "description": "Name of the regional instance group for which we want to list the instances.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", @@ -75688,18 +89242,16 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", - // "request": { - // "$ref": "RegionInstanceGroupsListInstancesRequest" - // }, + // "path": "{project}/regions/{region}/operations", // "response": { - // "$ref": "RegionInstanceGroupsListInstances" + // "$ref": "OperationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -75713,7 +89265,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { +func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -75731,107 +89283,93 @@ func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f fun } } -// method id "compute.regionInstanceGroups.setNamedPorts": +// method id "compute.regions.get": -type RegionInstanceGroupsSetNamedPortsCall struct { - s *Service - project string - region string - instanceGroup string - regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionsGetCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetNamedPorts: Sets the named ports for the specified regional -// instance group. -func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { - c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Region resource. Get a list of available +// regions by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/get +func (r *RegionsService) Get(project string, region string) *RegionsGetCall { + c := &RegionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroup = instanceGroup - c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { - c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { +func (c *RegionsGetCall) Fields(s ...googleapi.Field) *RegionsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionsGetCall) IfNoneMatch(entityTag string) *RegionsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { +func (c *RegionsGetCall) Context(ctx context.Context) *RegionsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { +func (c *RegionsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regions.get" call. +// Exactly one of *Region or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Region.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -75850,7 +89388,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Region{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -75862,21 +89400,14 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the named ports for the specified regional instance group.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.setNamedPorts", + // "description": "Returns the specified Region resource. Get a list of available regions by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.regions.get", // "parameterOrder": [ // "project", - // "region", - // "instanceGroup" + // "region" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the regional instance group where the named ports are updated.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -75885,114 +89416,177 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region resource to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", - // "request": { - // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" - // }, + // "path": "{project}/regions/{region}", // "response": { - // "$ref": "Operation" + // "$ref": "Region" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroups.testIamPermissions": +// method id "compute.regions.list": -type RegionInstanceGroupsTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionInstanceGroupsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupsTestIamPermissionsCall { - c := &RegionInstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of region resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/list +func (r *RegionsService) List(project string) *RegionsListCall { + c := &RegionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RegionsListCall) Filter(filter string) *RegionsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionsListCall) MaxResults(maxResults int64) *RegionsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionsListCall) OrderBy(orderBy string) *RegionsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsTestIamPermissionsCall { +func (c *RegionsListCall) Fields(s ...googleapi.Field) *RegionsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionsListCall) IfNoneMatch(entityTag string) *RegionsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupsTestIamPermissionsCall { +func (c *RegionsListCall) Context(ctx context.Context) *RegionsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Header() http.Header { +func (c *RegionsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.regions.list" call. +// Exactly one of *RegionList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RegionList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -76011,7 +89605,7 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &RegionList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -76023,43 +89617,47 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.testIamPermissions", + // "description": "Retrieves the list of region resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.regions.list", // "parameterOrder": [ - // "project", - // "region", - // "resource" + // "project" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "RegionList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -76070,100 +89668,234 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp } -// method id "compute.regionOperations.delete": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionsListCall) Pages(ctx context.Context, f func(*RegionList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionOperationsDeleteCall struct { - s *Service - project string - region string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.routers.aggregatedList": + +type RoutersAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified region-specific Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/delete -func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall { - c := &RegionOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of routers. +func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCall { + c := &RoutersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.operation = operation + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RoutersAggregatedListCall) Filter(filter string) *RoutersAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RoutersAggregatedListCall) MaxResults(maxResults int64) *RoutersAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RoutersAggregatedListCall) OrderBy(orderBy string) *RoutersAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RoutersAggregatedListCall) PageToken(pageToken string) *RoutersAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionOperationsDeleteCall) Fields(s ...googleapi.Field) *RegionOperationsDeleteCall { +func (c *RoutersAggregatedListCall) Fields(s ...googleapi.Field) *RoutersAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutersAggregatedListCall) IfNoneMatch(entityTag string) *RoutersAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionOperationsDeleteCall) Context(ctx context.Context) *RegionOperationsDeleteCall { +func (c *RoutersAggregatedListCall) Context(ctx context.Context) *RoutersAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionOperationsDeleteCall) Header() http.Header { +func (c *RoutersAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/routers") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "operation": c.operation, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionOperations.delete" call. -func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "compute.routers.aggregatedList" call. +// Exactly one of *RouterAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RouterAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return nil, err } - return nil + ret := &RouterAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Deletes the specified region-specific Operations resource.", - // "httpMethod": "DELETE", - // "id": "compute.regionOperations.delete", + // "description": "Retrieves an aggregated list of routers.", + // "httpMethod": "GET", + // "id": "compute.routers.aggregatedList", // "parameterOrder": [ - // "project", - // "region", - // "operation" + // "project" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -76172,113 +89904,135 @@ func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/operations/{operation}", + // "path": "{project}/aggregated/routers", + // "response": { + // "$ref": "RouterAggregatedList" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionOperations.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RoutersAggregatedListCall) Pages(ctx context.Context, f func(*RouterAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionOperationsGetCall struct { - s *Service - project string - region string - operation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.routers.delete": + +type RoutersDeleteCall struct { + s *Service + project string + region string + router string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Retrieves the specified region-specific Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/get -func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall { - c := &RegionOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Router resource. +func (r *RoutersService) Delete(project string, region string, router string) *RoutersDeleteCall { + c := &RoutersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.operation = operation + c.router = router + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RoutersDeleteCall) RequestId(requestId string) *RoutersDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionOperationsGetCall) Fields(s ...googleapi.Field) *RegionOperationsGetCall { +func (c *RoutersDeleteCall) Fields(s ...googleapi.Field) *RoutersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionOperationsGetCall) IfNoneMatch(entityTag string) *RegionOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionOperationsGetCall) Context(ctx context.Context) *RegionOperationsGetCall { +func (c *RoutersDeleteCall) Context(ctx context.Context) *RoutersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionOperationsGetCall) Header() http.Header { +func (c *RoutersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "operation": c.operation, + "project": c.project, + "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionOperations.get" call. +// Do executes the "compute.routers.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -76309,22 +90063,15 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Retrieves the specified region-specific Operations resource.", - // "httpMethod": "GET", - // "id": "compute.regionOperations.get", + // "description": "Deletes the specified Router resource.", + // "httpMethod": "DELETE", + // "id": "compute.routers.delete", // "parameterOrder": [ // "project", // "region", - // "operation" + // "router" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -76338,114 +90085,59 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/operations/{operation}", + // "path": "{project}/regions/{region}/routers/{router}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionOperations.list": +// method id "compute.routers.get": -type RegionOperationsListCall struct { +type RoutersGetCall struct { s *Service project string region string + router string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of Operation resources contained within the -// specified region. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/list -func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall { - c := &RegionOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Router resource. Get a list of available +// routers by making a list() request. +func (r *RoutersService) Get(project string, region string, router string) *RoutersGetCall { + c := &RoutersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperationsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionOperationsListCall) OrderBy(orderBy string) *RegionOperationsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperationsListCall { - c.urlParams_.Set("pageToken", pageToken) + c.router = router return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperationsListCall { +func (c *RoutersGetCall) Fields(s ...googleapi.Field) *RoutersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -76455,7 +90147,7 @@ func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperation // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperationsListCall { +func (c *RoutersGetCall) IfNoneMatch(entityTag string) *RoutersGetCall { c.ifNoneMatch_ = entityTag return c } @@ -76463,21 +90155,21 @@ func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperatio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionOperationsListCall) Context(ctx context.Context) *RegionOperationsListCall { +func (c *RoutersGetCall) Context(ctx context.Context) *RoutersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionOperationsListCall) Header() http.Header { +func (c *RoutersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -76488,25 +90180,26 @@ func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionOperations.list" call. -// Exactly one of *OperationList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *OperationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { +// Do executes the "compute.routers.get" call. +// Exactly one of *Router or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Router.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -76525,7 +90218,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &OperationList{ + ret := &Router{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -76537,37 +90230,15 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL } return ret, nil // { - // "description": "Retrieves a list of Operation resources contained within the specified region.", + // "description": "Returns the specified Router resource. Get a list of available routers by making a list() request.", // "httpMethod": "GET", - // "id": "compute.regionOperations.list", + // "id": "compute.routers.get", // "parameterOrder": [ // "project", - // "region" + // "region", + // "router" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -76581,11 +90252,18 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/operations", + // "path": "{project}/regions/{region}/routers/{router}", // "response": { - // "$ref": "OperationList" + // "$ref": "Router" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -76596,53 +90274,33 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regions.get": +// method id "compute.routers.getRouterStatus": -type RegionsGetCall struct { +type RoutersGetRouterStatusCall struct { s *Service project string region string + router string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified Region resource. Get a list of available -// regions by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/get -func (r *RegionsService) Get(project string, region string) *RegionsGetCall { - c := &RegionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetRouterStatus: Retrieves runtime information of the specified +// router. +func (r *RoutersService) GetRouterStatus(project string, region string, router string) *RoutersGetRouterStatusCall { + c := &RoutersGetRouterStatusCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region + c.router = router return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionsGetCall) Fields(s ...googleapi.Field) *RegionsGetCall { +func (c *RoutersGetRouterStatusCall) Fields(s ...googleapi.Field) *RoutersGetRouterStatusCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -76652,7 +90310,7 @@ func (c *RegionsGetCall) Fields(s ...googleapi.Field) *RegionsGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionsGetCall) IfNoneMatch(entityTag string) *RegionsGetCall { +func (c *RoutersGetRouterStatusCall) IfNoneMatch(entityTag string) *RoutersGetRouterStatusCall { c.ifNoneMatch_ = entityTag return c } @@ -76660,21 +90318,21 @@ func (c *RegionsGetCall) IfNoneMatch(entityTag string) *RegionsGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionsGetCall) Context(ctx context.Context) *RegionsGetCall { +func (c *RoutersGetRouterStatusCall) Context(ctx context.Context) *RoutersGetRouterStatusCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionsGetCall) Header() http.Header { +func (c *RoutersGetRouterStatusCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -76685,25 +90343,26 @@ func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/getRouterStatus") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regions.get" call. -// Exactly one of *Region or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Region.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { +// Do executes the "compute.routers.getRouterStatus" call. +// Exactly one of *RouterStatusResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RouterStatusResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterStatusResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -76722,7 +90381,7 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Region{ + ret := &RouterStatusResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -76734,12 +90393,13 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { } return ret, nil // { - // "description": "Returns the specified Region resource. Get a list of available regions by making a list() request.", + // "description": "Retrieves runtime information of the specified router.", // "httpMethod": "GET", - // "id": "compute.regions.get", + // "id": "compute.routers.getRouterStatus", // "parameterOrder": [ // "project", - // "region" + // "region", + // "router" // ], // "parameters": { // "project": { @@ -76750,16 +90410,23 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { // "type": "string" // }, // "region": { - // "description": "Name of the region resource to return.", + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to query.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}", + // "path": "{project}/regions/{region}/routers/{router}/getRouterStatus", // "response": { - // "$ref": "Region" + // "$ref": "RouterStatusResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -76770,157 +90437,104 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { } -// method id "compute.regions.list": +// method id "compute.routers.insert": -type RegionsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RoutersInsertCall struct { + s *Service + project string + region string + router *Router + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of region resources available to the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/list -func (r *RegionsService) List(project string) *RegionsListCall { - c := &RegionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a Router resource in the specified project and region +// using the data included in the request. +func (r *RoutersService) Insert(project string, region string, router *Router) *RoutersInsertCall { + c := &RoutersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.router = router return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionsListCall) Filter(filter string) *RegionsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionsListCall) MaxResults(maxResults int64) *RegionsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionsListCall) OrderBy(orderBy string) *RegionsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall { - c.urlParams_.Set("pageToken", pageToken) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RoutersInsertCall) RequestId(requestId string) *RoutersInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionsListCall) Fields(s ...googleapi.Field) *RegionsListCall { +func (c *RoutersInsertCall) Fields(s ...googleapi.Field) *RoutersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionsListCall) IfNoneMatch(entityTag string) *RegionsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionsListCall) Context(ctx context.Context) *RegionsListCall { +func (c *RoutersInsertCall) Context(ctx context.Context) *RoutersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionsListCall) Header() http.Header { +func (c *RoutersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.router) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regions.list" call. -// Exactly one of *RegionList or error will be non-nil. Any non-2xx +// Do executes the "compute.routers.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *RegionList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) { +func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -76939,7 +90553,7 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -76951,93 +90565,67 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) } return ret, nil // { - // "description": "Retrieves the list of region resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.regions.list", + // "description": "Creates a Router resource in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.routers.insert", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions", + // "path": "{project}/regions/{region}/routers", + // "request": { + // "$ref": "Router" + // }, // "response": { - // "$ref": "RegionList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionsListCall) Pages(ctx context.Context, f func(*RegionList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.routers.aggregatedList": +// method id "compute.routers.list": -type RoutersAggregatedListCall struct { +type RoutersListCall struct { s *Service project string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of routers. -func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCall { - c := &RoutersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of Router resources available to the specified +// project. +func (r *RoutersService) List(project string, region string) *RoutersListCall { + c := &RoutersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region return c } @@ -77067,7 +90655,7 @@ func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCa // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *RoutersAggregatedListCall) Filter(filter string) *RoutersAggregatedListCall { +func (c *RoutersListCall) Filter(filter string) *RoutersListCall { c.urlParams_.Set("filter", filter) return c } @@ -77078,7 +90666,7 @@ func (c *RoutersAggregatedListCall) Filter(filter string) *RoutersAggregatedList // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RoutersAggregatedListCall) MaxResults(maxResults int64) *RoutersAggregatedListCall { +func (c *RoutersListCall) MaxResults(maxResults int64) *RoutersListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -77095,7 +90683,7 @@ func (c *RoutersAggregatedListCall) MaxResults(maxResults int64) *RoutersAggrega // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RoutersAggregatedListCall) OrderBy(orderBy string) *RoutersAggregatedListCall { +func (c *RoutersListCall) OrderBy(orderBy string) *RoutersListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -77103,7 +90691,7 @@ func (c *RoutersAggregatedListCall) OrderBy(orderBy string) *RoutersAggregatedLi // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RoutersAggregatedListCall) PageToken(pageToken string) *RoutersAggregatedListCall { +func (c *RoutersListCall) PageToken(pageToken string) *RoutersListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -77111,7 +90699,7 @@ func (c *RoutersAggregatedListCall) PageToken(pageToken string) *RoutersAggregat // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersAggregatedListCall) Fields(s ...googleapi.Field) *RoutersAggregatedListCall { +func (c *RoutersListCall) Fields(s ...googleapi.Field) *RoutersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -77121,7 +90709,7 @@ func (c *RoutersAggregatedListCall) Fields(s ...googleapi.Field) *RoutersAggrega // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RoutersAggregatedListCall) IfNoneMatch(entityTag string) *RoutersAggregatedListCall { +func (c *RoutersListCall) IfNoneMatch(entityTag string) *RoutersListCall { c.ifNoneMatch_ = entityTag return c } @@ -77129,21 +90717,21 @@ func (c *RoutersAggregatedListCall) IfNoneMatch(entityTag string) *RoutersAggreg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersAggregatedListCall) Context(ctx context.Context) *RoutersAggregatedListCall { +func (c *RoutersListCall) Context(ctx context.Context) *RoutersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersAggregatedListCall) Header() http.Header { +func (c *RoutersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -77154,24 +90742,25 @@ func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/routers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.aggregatedList" call. -// Exactly one of *RouterAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RouterAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAggregatedList, error) { +// Do executes the "compute.routers.list" call. +// Exactly one of *RouterList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RouterList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77190,7 +90779,7 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RouterAggregatedList{ + ret := &RouterList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -77202,11 +90791,12 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg } return ret, nil // { - // "description": "Retrieves an aggregated list of routers.", + // "description": "Retrieves a list of Router resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.routers.aggregatedList", + // "id": "compute.routers.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "filter": { @@ -77238,11 +90828,18 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/routers", + // "path": "{project}/regions/{region}/routers", // "response": { - // "$ref": "RouterAggregatedList" + // "$ref": "RouterList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -77256,7 +90853,7 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RoutersAggregatedListCall) Pages(ctx context.Context, f func(*RouterAggregatedList) error) error { +func (c *RoutersListCall) Pages(ctx context.Context, f func(*RouterList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -77274,24 +90871,28 @@ func (c *RoutersAggregatedListCall) Pages(ctx context.Context, f func(*RouterAgg } } -// method id "compute.routers.delete": +// method id "compute.routers.patch": -type RoutersDeleteCall struct { +type RoutersPatchCall struct { s *Service project string region string router string + router2 *Router urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified Router resource. -func (r *RoutersService) Delete(project string, region string, router string) *RoutersDeleteCall { - c := &RoutersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified Router resource with the data included +// in the request. This method supports PATCH semantics and uses JSON +// merge patch format and processing rules. +func (r *RoutersService) Patch(project string, region string, router string, router2 *Router) *RoutersPatchCall { + c := &RoutersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.router = router + c.router2 = router2 return c } @@ -77309,7 +90910,7 @@ func (r *RoutersService) Delete(project string, region string, router string) *R // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RoutersDeleteCall) RequestId(requestId string) *RoutersDeleteCall { +func (c *RoutersPatchCall) RequestId(requestId string) *RoutersPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -77317,7 +90918,7 @@ func (c *RoutersDeleteCall) RequestId(requestId string) *RoutersDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersDeleteCall) Fields(s ...googleapi.Field) *RoutersDeleteCall { +func (c *RoutersPatchCall) Fields(s ...googleapi.Field) *RoutersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -77325,31 +90926,36 @@ func (c *RoutersDeleteCall) Fields(s ...googleapi.Field) *RoutersDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersDeleteCall) Context(ctx context.Context) *RoutersDeleteCall { +func (c *RoutersPatchCall) Context(ctx context.Context) *RoutersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersDeleteCall) Header() http.Header { +func (c *RoutersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -77359,14 +90965,14 @@ func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.delete" call. +// Do executes the "compute.routers.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77397,9 +91003,9 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified Router resource.", - // "httpMethod": "DELETE", - // "id": "compute.routers.delete", + // "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.routers.patch", // "parameterOrder": [ // "project", // "region", @@ -77426,7 +91032,7 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "type": "string" // }, // "router": { - // "description": "Name of the Router resource to delete.", + // "description": "Name of the Router resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -77434,6 +91040,9 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // } // }, // "path": "{project}/regions/{region}/routers/{router}", + // "request": { + // "$ref": "Router" + // }, // "response": { // "$ref": "Operation" // }, @@ -77445,78 +91054,72 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.routers.get": +// method id "compute.routers.preview": -type RoutersGetCall struct { - s *Service - project string - region string - router string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RoutersPreviewCall struct { + s *Service + project string + region string + router string + router2 *Router + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Router resource. Get a list of available -// routers by making a list() request. -func (r *RoutersService) Get(project string, region string, router string) *RoutersGetCall { - c := &RoutersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Preview: Preview fields auto-generated during router create and +// update operations. Calling this method does NOT create or update the +// router. +func (r *RoutersService) Preview(project string, region string, router string, router2 *Router) *RoutersPreviewCall { + c := &RoutersPreviewCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.router = router + c.router2 = router2 return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersGetCall) Fields(s ...googleapi.Field) *RoutersGetCall { +func (c *RoutersPreviewCall) Fields(s ...googleapi.Field) *RoutersPreviewCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RoutersGetCall) IfNoneMatch(entityTag string) *RoutersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersGetCall) Context(ctx context.Context) *RoutersGetCall { +func (c *RoutersPreviewCall) Context(ctx context.Context) *RoutersPreviewCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersGetCall) Header() http.Header { +func (c *RoutersPreviewCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/preview") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -77526,14 +91129,14 @@ func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.get" call. -// Exactly one of *Router or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Router.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { +// Do executes the "compute.routers.preview" call. +// Exactly one of *RoutersPreviewResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RoutersPreviewResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77552,7 +91155,7 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Router{ + ret := &RoutersPreviewResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -77564,9 +91167,9 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { } return ret, nil // { - // "description": "Returns the specified Router resource. Get a list of available routers by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.routers.get", + // "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", + // "httpMethod": "POST", + // "id": "compute.routers.preview", // "parameterOrder": [ // "project", // "region", @@ -77588,17 +91191,20 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { // "type": "string" // }, // "router": { - // "description": "Name of the Router resource to return.", + // "description": "Name of the Router resource to query.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{router}", - // "response": { + // "path": "{project}/regions/{region}/routers/{router}/preview", + // "request": { // "$ref": "Router" // }, + // "response": { + // "$ref": "RoutersPreviewResponse" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute", @@ -77608,78 +91214,249 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { } -// method id "compute.routers.getRouterStatus": +// method id "compute.routers.testIamPermissions": -type RoutersGetRouterStatusCall struct { - s *Service - project string - region string - router string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RoutersTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RoutersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RoutersTestIamPermissionsCall { + c := &RoutersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutersTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutersTestIamPermissionsCall) Context(ctx context.Context) *RoutersTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoutersTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoutersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.routers.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RoutersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.routers.testIamPermissions", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/routers/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.routers.update": + +type RoutersUpdateCall struct { + s *Service + project string + region string + router string + router2 *Router + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetRouterStatus: Retrieves runtime information of the specified -// router. -func (r *RoutersService) GetRouterStatus(project string, region string, router string) *RoutersGetRouterStatusCall { - c := &RoutersGetRouterStatusCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified Router resource with the data included +// in the request. +func (r *RoutersService) Update(project string, region string, router string, router2 *Router) *RoutersUpdateCall { + c := &RoutersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.router = router + c.router2 = router2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RoutersUpdateCall) RequestId(requestId string) *RoutersUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersGetRouterStatusCall) Fields(s ...googleapi.Field) *RoutersGetRouterStatusCall { +func (c *RoutersUpdateCall) Fields(s ...googleapi.Field) *RoutersUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RoutersGetRouterStatusCall) IfNoneMatch(entityTag string) *RoutersGetRouterStatusCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersGetRouterStatusCall) Context(ctx context.Context) *RoutersGetRouterStatusCall { +func (c *RoutersUpdateCall) Context(ctx context.Context) *RoutersUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersGetRouterStatusCall) Header() http.Header { +func (c *RoutersUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/getRouterStatus") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -77689,14 +91466,14 @@ func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.getRouterStatus" call. -// Exactly one of *RouterStatusResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RouterStatusResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterStatusResponse, error) { +// Do executes the "compute.routers.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77715,7 +91492,7 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RouterStatusResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -77727,9 +91504,9 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt } return ret, nil // { - // "description": "Retrieves runtime information of the specified router.", - // "httpMethod": "GET", - // "id": "compute.routers.getRouterStatus", + // "description": "Updates the specified Router resource with the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.routers.update", // "parameterOrder": [ // "project", // "region", @@ -77750,46 +91527,51 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "router": { - // "description": "Name of the Router resource to query.", + // "description": "Name of the Router resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{router}/getRouterStatus", + // "path": "{project}/regions/{region}/routers/{router}", + // "request": { + // "$ref": "Router" + // }, // "response": { - // "$ref": "RouterStatusResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.routers.insert": +// method id "compute.routes.delete": -type RoutersInsertCall struct { +type RoutesDeleteCall struct { s *Service project string - region string - router *Router + route string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a Router resource in the specified project and region -// using the data included in the request. -func (r *RoutersService) Insert(project string, region string, router *Router) *RoutersInsertCall { - c := &RoutersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Route resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/delete +func (r *RoutesService) Delete(project string, route string) *RoutesDeleteCall { + c := &RoutesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.router = router + c.route = route return c } @@ -77807,7 +91589,7 @@ func (r *RoutersService) Insert(project string, region string, router *Router) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RoutersInsertCall) RequestId(requestId string) *RoutersInsertCall { +func (c *RoutesDeleteCall) RequestId(requestId string) *RoutesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -77815,7 +91597,7 @@ func (c *RoutersInsertCall) RequestId(requestId string) *RoutersInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersInsertCall) Fields(s ...googleapi.Field) *RoutersInsertCall { +func (c *RoutesDeleteCall) Fields(s ...googleapi.Field) *RoutesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -77823,52 +91605,47 @@ func (c *RoutersInsertCall) Fields(s ...googleapi.Field) *RoutersInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersInsertCall) Context(ctx context.Context) *RoutersInsertCall { +func (c *RoutesDeleteCall) Context(ctx context.Context) *RoutesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersInsertCall) Header() http.Header { +func (c *RoutesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, + "route": c.route, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.insert" call. +// Do executes the "compute.routes.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77899,12 +91676,12 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates a Router resource in the specified project and region using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.routers.insert", + // "description": "Deletes the specified Route resource.", + // "httpMethod": "DELETE", + // "id": "compute.routes.delete", // "parameterOrder": [ // "project", - // "region" + // "route" // ], // "parameters": { // "project": { @@ -77914,23 +91691,20 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "route": { + // "description": "Name of the Route resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers", - // "request": { - // "$ref": "Router" - // }, + // "path": "{project}/global/routes/{route}", // "response": { // "$ref": "Operation" // }, @@ -77942,98 +91716,32 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.routers.list": +// method id "compute.routes.get": -type RoutersListCall struct { +type RoutesGetCall struct { s *Service project string - region string + route string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of Router resources available to the specified -// project. -func (r *RoutersService) List(project string, region string) *RoutersListCall { - c := &RoutersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Route resource. Get a list of available +// routes by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/get +func (r *RoutesService) Get(project string, route string) *RoutesGetCall { + c := &RoutesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RoutersListCall) Filter(filter string) *RoutersListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RoutersListCall) MaxResults(maxResults int64) *RoutersListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RoutersListCall) OrderBy(orderBy string) *RoutersListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RoutersListCall) PageToken(pageToken string) *RoutersListCall { - c.urlParams_.Set("pageToken", pageToken) + c.route = route return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersListCall) Fields(s ...googleapi.Field) *RoutersListCall { +func (c *RoutesGetCall) Fields(s ...googleapi.Field) *RoutesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -78043,7 +91751,7 @@ func (c *RoutersListCall) Fields(s ...googleapi.Field) *RoutersListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RoutersListCall) IfNoneMatch(entityTag string) *RoutersListCall { +func (c *RoutesGetCall) IfNoneMatch(entityTag string) *RoutesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -78051,21 +91759,21 @@ func (c *RoutersListCall) IfNoneMatch(entityTag string) *RoutersListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersListCall) Context(ctx context.Context) *RoutersListCall { +func (c *RoutesGetCall) Context(ctx context.Context) *RoutesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersListCall) Header() http.Header { +func (c *RoutesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -78076,25 +91784,25 @@ func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, + "route": c.route, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.list" call. -// Exactly one of *RouterList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *RouterList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) { +// Do executes the "compute.routes.get" call. +// Exactly one of *Route or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Route.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78113,7 +91821,7 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RouterList{ + ret := &Route{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -78125,37 +91833,14 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) } return ret, nil // { - // "description": "Retrieves a list of Router resources available to the specified project.", + // "description": "Returns the specified Route resource. Get a list of available routes by making a list() request.", // "httpMethod": "GET", - // "id": "compute.routers.list", + // "id": "compute.routes.get", // "parameterOrder": [ // "project", - // "region" + // "route" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -78163,17 +91848,17 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "route": { + // "description": "Name of the Route resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers", + // "path": "{project}/global/routes/{route}", // "response": { - // "$ref": "RouterList" + // "$ref": "Route" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -78184,49 +91869,24 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RoutersListCall) Pages(ctx context.Context, f func(*RouterList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.routers.patch": +// method id "compute.routes.insert": -type RoutersPatchCall struct { +type RoutesInsertCall struct { s *Service project string - region string - router string - router2 *Router + route *Route urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Patches the specified Router resource with the data included -// in the request. This method supports PATCH semantics and uses JSON -// merge patch format and processing rules. -func (r *RoutersService) Patch(project string, region string, router string, router2 *Router) *RoutersPatchCall { - c := &RoutersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a Route resource in the specified project using the +// data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/insert +func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall { + c := &RoutesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.router = router - c.router2 = router2 + c.route = route return c } @@ -78244,7 +91904,7 @@ func (r *RoutersService) Patch(project string, region string, router string, rou // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RoutersPatchCall) RequestId(requestId string) *RoutersPatchCall { +func (c *RoutesInsertCall) RequestId(requestId string) *RoutesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -78252,7 +91912,7 @@ func (c *RoutersPatchCall) RequestId(requestId string) *RoutersPatchCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersPatchCall) Fields(s ...googleapi.Field) *RoutersPatchCall { +func (c *RoutesInsertCall) Fields(s ...googleapi.Field) *RoutesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -78260,53 +91920,51 @@ func (c *RoutersPatchCall) Fields(s ...googleapi.Field) *RoutersPatchCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersPatchCall) Context(ctx context.Context) *RoutersPatchCall { +func (c *RoutesInsertCall) Context(ctx context.Context) *RoutesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersPatchCall) Header() http.Header { +func (c *RoutesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.route) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, - "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.patch" call. +// Do executes the "compute.routes.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78337,13 +91995,11 @@ func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.routers.patch", + // "description": "Creates a Route resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.routes.insert", // "parameterOrder": [ - // "project", - // "region", - // "router" + // "project" // ], // "parameters": { // "project": { @@ -78353,125 +92009,178 @@ func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{router}", + // "path": "{project}/global/routes", // "request": { - // "$ref": "Router" + // "$ref": "Route" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.routers.preview": +// method id "compute.routes.list": -type RoutersPreviewCall struct { - s *Service - project string - region string - router string - router2 *Router - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RoutesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of Route resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/list +func (r *RoutesService) List(project string) *RoutesListCall { + c := &RoutesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RoutesListCall) Filter(filter string) *RoutesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RoutesListCall) MaxResults(maxResults int64) *RoutesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RoutesListCall) OrderBy(orderBy string) *RoutesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c } -// Preview: Preview fields auto-generated during router create and -// update operations. Calling this method does NOT create or update the -// router. -func (r *RoutersService) Preview(project string, region string, router string, router2 *Router) *RoutersPreviewCall { - c := &RoutersPreviewCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.router = router - c.router2 = router2 +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersPreviewCall) Fields(s ...googleapi.Field) *RoutersPreviewCall { +func (c *RoutesListCall) Fields(s ...googleapi.Field) *RoutesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutesListCall) IfNoneMatch(entityTag string) *RoutesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersPreviewCall) Context(ctx context.Context) *RoutersPreviewCall { +func (c *RoutesListCall) Context(ctx context.Context) *RoutesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersPreviewCall) Header() http.Header { +func (c *RoutesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/preview") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, - "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.preview" call. -// Exactly one of *RoutersPreviewResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RoutersPreviewResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewResponse, error) { +// Do executes the "compute.routes.list" call. +// Exactly one of *RouteList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RouteList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78490,7 +92199,7 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RoutersPreviewResponse{ + ret := &RouteList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -78502,43 +92211,47 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe } return ret, nil // { - // "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", - // "httpMethod": "POST", - // "id": "compute.routers.preview", + // "description": "Retrieves the list of Route resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.routes.list", // "parameterOrder": [ - // "project", - // "region", - // "router" + // "project" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "router": { - // "description": "Name of the Router resource to query.", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{router}/preview", - // "request": { - // "$ref": "Router" - // }, + // "path": "{project}/global/routes", // "response": { - // "$ref": "RoutersPreviewResponse" + // "$ref": "RouteList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -78549,12 +92262,32 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe } -// method id "compute.routers.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RoutesListCall) Pages(ctx context.Context, f func(*RouteList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RoutersTestIamPermissionsCall struct { +// method id "compute.routes.testIamPermissions": + +type RoutesTestIamPermissionsCall struct { s *Service project string - region string resource string testpermissionsrequest *TestPermissionsRequest urlParams_ gensupport.URLParams @@ -78564,10 +92297,9 @@ type RoutersTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *RoutersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RoutersTestIamPermissionsCall { - c := &RoutersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RoutesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *RoutesTestIamPermissionsCall { + c := &RoutesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region c.resource = resource c.testpermissionsrequest = testpermissionsrequest return c @@ -78576,7 +92308,7 @@ func (r *RoutersService) TestIamPermissions(project string, region string, resou // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutersTestIamPermissionsCall { +func (c *RoutesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -78584,21 +92316,21 @@ func (c *RoutersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutersTes // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersTestIamPermissionsCall) Context(ctx context.Context) *RoutersTestIamPermissionsCall { +func (c *RoutesTestIamPermissionsCall) Context(ctx context.Context) *RoutesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersTestIamPermissionsCall) Header() http.Header { +func (c *RoutesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -78611,26 +92343,25 @@ func (c *RoutersTestIamPermissionsCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.testIamPermissions" call. +// Do executes the "compute.routes.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RoutersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *RoutesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -78663,10 +92394,9 @@ func (c *RoutersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestP // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.routers.testIamPermissions", + // "id": "compute.routes.testIamPermissions", // "parameterOrder": [ // "project", - // "region", // "resource" // ], // "parameters": { @@ -78677,22 +92407,15 @@ func (c *RoutersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestP // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{resource}/testIamPermissions", + // "path": "{project}/global/routes/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -78708,205 +92431,22 @@ func (c *RoutersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestP } -// method id "compute.routers.update": - -type RoutersUpdateCall struct { - s *Service - project string - region string - router string - router2 *Router - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates the specified Router resource with the data included -// in the request. -func (r *RoutersService) Update(project string, region string, router string, router2 *Router) *RoutersUpdateCall { - c := &RoutersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.router = router - c.router2 = router2 - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RoutersUpdateCall) RequestId(requestId string) *RoutersUpdateCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutersUpdateCall) Fields(s ...googleapi.Field) *RoutersUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutersUpdateCall) Context(ctx context.Context) *RoutersUpdateCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RoutersUpdateCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "router": c.router, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routers.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates the specified Router resource with the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.routers.update", - // "parameterOrder": [ - // "project", - // "region", - // "router" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/routers/{router}", - // "request": { - // "$ref": "Router" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.routes.delete": +// method id "compute.securityPolicies.delete": -type RoutesDeleteCall struct { - s *Service - project string - route string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SecurityPoliciesDeleteCall struct { + s *Service + project string + securityPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Route resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/delete -func (r *RoutesService) Delete(project string, route string) *RoutesDeleteCall { - c := &RoutesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified policy. +func (r *SecurityPoliciesService) Delete(project string, securityPolicy string) *SecurityPoliciesDeleteCall { + c := &SecurityPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.route = route + c.securityPolicy = securityPolicy return c } @@ -78924,7 +92464,7 @@ func (r *RoutesService) Delete(project string, route string) *RoutesDeleteCall { // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RoutesDeleteCall) RequestId(requestId string) *RoutesDeleteCall { +func (c *SecurityPoliciesDeleteCall) RequestId(requestId string) *SecurityPoliciesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -78932,7 +92472,7 @@ func (c *RoutesDeleteCall) RequestId(requestId string) *RoutesDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutesDeleteCall) Fields(s ...googleapi.Field) *RoutesDeleteCall { +func (c *SecurityPoliciesDeleteCall) Fields(s ...googleapi.Field) *SecurityPoliciesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -78940,21 +92480,21 @@ func (c *RoutesDeleteCall) Fields(s ...googleapi.Field) *RoutesDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutesDeleteCall) Context(ctx context.Context) *RoutesDeleteCall { +func (c *SecurityPoliciesDeleteCall) Context(ctx context.Context) *SecurityPoliciesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutesDeleteCall) Header() http.Header { +func (c *SecurityPoliciesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -78962,25 +92502,25 @@ func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "route": c.route, + "project": c.project, + "securityPolicy": c.securityPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routes.delete" call. +// Do executes the "compute.securityPolicies.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79011,12 +92551,12 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified Route resource.", + // "description": "Deletes the specified policy.", // "httpMethod": "DELETE", - // "id": "compute.routes.delete", + // "id": "compute.securityPolicies.delete", // "parameterOrder": [ // "project", - // "route" + // "securityPolicy" // ], // "parameters": { // "project": { @@ -79031,15 +92571,15 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "location": "query", // "type": "string" // }, - // "route": { - // "description": "Name of the Route resource to delete.", + // "securityPolicy": { + // "description": "Name of the security policy to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/routes/{route}", + // "path": "{project}/global/securityPolicies/{securityPolicy}", // "response": { // "$ref": "Operation" // }, @@ -79051,32 +92591,31 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.routes.get": +// method id "compute.securityPolicies.get": -type RoutesGetCall struct { - s *Service - project string - route string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type SecurityPoliciesGetCall struct { + s *Service + project string + securityPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Route resource. Get a list of available -// routes by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/get -func (r *RoutesService) Get(project string, route string) *RoutesGetCall { - c := &RoutesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: List all of the ordered rules present in a single specified +// policy. +func (r *SecurityPoliciesService) Get(project string, securityPolicy string) *SecurityPoliciesGetCall { + c := &SecurityPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.route = route + c.securityPolicy = securityPolicy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutesGetCall) Fields(s ...googleapi.Field) *RoutesGetCall { +func (c *SecurityPoliciesGetCall) Fields(s ...googleapi.Field) *SecurityPoliciesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79086,7 +92625,7 @@ func (c *RoutesGetCall) Fields(s ...googleapi.Field) *RoutesGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RoutesGetCall) IfNoneMatch(entityTag string) *RoutesGetCall { +func (c *SecurityPoliciesGetCall) IfNoneMatch(entityTag string) *SecurityPoliciesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -79094,21 +92633,21 @@ func (c *RoutesGetCall) IfNoneMatch(entityTag string) *RoutesGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutesGetCall) Context(ctx context.Context) *RoutesGetCall { +func (c *SecurityPoliciesGetCall) Context(ctx context.Context) *SecurityPoliciesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutesGetCall) Header() http.Header { +func (c *SecurityPoliciesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -79119,25 +92658,25 @@ func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "route": c.route, + "project": c.project, + "securityPolicy": c.securityPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routes.get" call. -// Exactly one of *Route or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Route.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { +// Do executes the "compute.securityPolicies.get" call. +// Exactly one of *SecurityPolicy or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SecurityPolicy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPolicy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79156,7 +92695,7 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Route{ + ret := &SecurityPolicy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -79168,12 +92707,12 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { } return ret, nil // { - // "description": "Returns the specified Route resource. Get a list of available routes by making a list() request.", + // "description": "List all of the ordered rules present in a single specified policy.", // "httpMethod": "GET", - // "id": "compute.routes.get", + // "id": "compute.securityPolicies.get", // "parameterOrder": [ // "project", - // "route" + // "securityPolicy" // ], // "parameters": { // "project": { @@ -79183,17 +92722,17 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { // "required": true, // "type": "string" // }, - // "route": { - // "description": "Name of the Route resource to return.", + // "securityPolicy": { + // "description": "Name of the security policy to get.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/routes/{route}", + // "path": "{project}/global/securityPolicies/{securityPolicy}", // "response": { - // "$ref": "Route" + // "$ref": "SecurityPolicy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -79204,24 +92743,23 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { } -// method id "compute.routes.insert": +// method id "compute.securityPolicies.insert": -type RoutesInsertCall struct { - s *Service - project string - route *Route - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SecurityPoliciesInsertCall struct { + s *Service + project string + securitypolicy *SecurityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a Route resource in the specified project using the -// data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/insert -func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall { - c := &RoutesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a new policy in the specified project using the data +// included in the request. +func (r *SecurityPoliciesService) Insert(project string, securitypolicy *SecurityPolicy) *SecurityPoliciesInsertCall { + c := &SecurityPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.route = route + c.securitypolicy = securitypolicy return c } @@ -79239,7 +92777,7 @@ func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall { // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RoutesInsertCall) RequestId(requestId string) *RoutesInsertCall { +func (c *SecurityPoliciesInsertCall) RequestId(requestId string) *SecurityPoliciesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -79247,7 +92785,7 @@ func (c *RoutesInsertCall) RequestId(requestId string) *RoutesInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutesInsertCall) Fields(s ...googleapi.Field) *RoutesInsertCall { +func (c *SecurityPoliciesInsertCall) Fields(s ...googleapi.Field) *SecurityPoliciesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79255,34 +92793,34 @@ func (c *RoutesInsertCall) Fields(s ...googleapi.Field) *RoutesInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutesInsertCall) Context(ctx context.Context) *RoutesInsertCall { +func (c *SecurityPoliciesInsertCall) Context(ctx context.Context) *SecurityPoliciesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutesInsertCall) Header() http.Header { +func (c *SecurityPoliciesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.route) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -79292,14 +92830,14 @@ func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routes.insert" call. +// Do executes the "compute.securityPolicies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79330,9 +92868,9 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates a Route resource in the specified project using the data included in the request.", + // "description": "Creates a new policy in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.routes.insert", + // "id": "compute.securityPolicies.insert", // "parameterOrder": [ // "project" // ], @@ -79350,9 +92888,9 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "type": "string" // } // }, - // "path": "{project}/global/routes", + // "path": "{project}/global/securityPolicies", // "request": { - // "$ref": "Route" + // "$ref": "SecurityPolicy" // }, // "response": { // "$ref": "Operation" @@ -79365,9 +92903,9 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.routes.list": +// method id "compute.securityPolicies.list": -type RoutesListCall struct { +type SecurityPoliciesListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -79376,11 +92914,10 @@ type RoutesListCall struct { header_ http.Header } -// List: Retrieves the list of Route resources available to the +// List: List all the policies that have been configured for the // specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/list -func (r *RoutesService) List(project string) *RoutesListCall { - c := &RoutesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *SecurityPoliciesService) List(project string) *SecurityPoliciesListCall { + c := &SecurityPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -79411,7 +92948,7 @@ func (r *RoutesService) List(project string) *RoutesListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *RoutesListCall) Filter(filter string) *RoutesListCall { +func (c *SecurityPoliciesListCall) Filter(filter string) *SecurityPoliciesListCall { c.urlParams_.Set("filter", filter) return c } @@ -79422,7 +92959,7 @@ func (c *RoutesListCall) Filter(filter string) *RoutesListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RoutesListCall) MaxResults(maxResults int64) *RoutesListCall { +func (c *SecurityPoliciesListCall) MaxResults(maxResults int64) *SecurityPoliciesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -79439,7 +92976,7 @@ func (c *RoutesListCall) MaxResults(maxResults int64) *RoutesListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RoutesListCall) OrderBy(orderBy string) *RoutesListCall { +func (c *SecurityPoliciesListCall) OrderBy(orderBy string) *SecurityPoliciesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -79447,7 +92984,7 @@ func (c *RoutesListCall) OrderBy(orderBy string) *RoutesListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall { +func (c *SecurityPoliciesListCall) PageToken(pageToken string) *SecurityPoliciesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -79455,7 +92992,7 @@ func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutesListCall) Fields(s ...googleapi.Field) *RoutesListCall { +func (c *SecurityPoliciesListCall) Fields(s ...googleapi.Field) *SecurityPoliciesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79465,7 +93002,7 @@ func (c *RoutesListCall) Fields(s ...googleapi.Field) *RoutesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RoutesListCall) IfNoneMatch(entityTag string) *RoutesListCall { +func (c *SecurityPoliciesListCall) IfNoneMatch(entityTag string) *SecurityPoliciesListCall { c.ifNoneMatch_ = entityTag return c } @@ -79473,21 +93010,21 @@ func (c *RoutesListCall) IfNoneMatch(entityTag string) *RoutesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutesListCall) Context(ctx context.Context) *RoutesListCall { +func (c *SecurityPoliciesListCall) Context(ctx context.Context) *SecurityPoliciesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutesListCall) Header() http.Header { +func (c *SecurityPoliciesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -79498,7 +93035,7 @@ func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -79508,14 +93045,14 @@ func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routes.list" call. -// Exactly one of *RouteList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *RouteList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { +// Do executes the "compute.securityPolicies.list" call. +// Exactly one of *SecurityPolicyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SecurityPolicyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79534,7 +93071,7 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RouteList{ + ret := &SecurityPolicyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -79546,9 +93083,9 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { } return ret, nil // { - // "description": "Retrieves the list of Route resources available to the specified project.", + // "description": "List all the policies that have been configured for the specified project.", // "httpMethod": "GET", - // "id": "compute.routes.list", + // "id": "compute.securityPolicies.list", // "parameterOrder": [ // "project" // ], @@ -79584,9 +93121,9 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { // "type": "string" // } // }, - // "path": "{project}/global/routes", + // "path": "{project}/global/securityPolicies", // "response": { - // "$ref": "RouteList" + // "$ref": "SecurityPolicyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -79600,7 +93137,7 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RoutesListCall) Pages(ctx context.Context, f func(*RouteList) error) error { +func (c *SecurityPoliciesListCall) Pages(ctx context.Context, f func(*SecurityPolicyList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -79618,9 +93155,180 @@ func (c *RoutesListCall) Pages(ctx context.Context, f func(*RouteList) error) er } } -// method id "compute.routes.testIamPermissions": +// method id "compute.securityPolicies.patch": -type RoutesTestIamPermissionsCall struct { +type SecurityPoliciesPatchCall struct { + s *Service + project string + securityPolicy string + securitypolicy *SecurityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified policy with the data included in the +// request. +func (r *SecurityPoliciesService) Patch(project string, securityPolicy string, securitypolicy *SecurityPolicy) *SecurityPoliciesPatchCall { + c := &SecurityPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.securityPolicy = securityPolicy + c.securitypolicy = securitypolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SecurityPoliciesPatchCall) RequestId(requestId string) *SecurityPoliciesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SecurityPoliciesPatchCall) Fields(s ...googleapi.Field) *SecurityPoliciesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SecurityPoliciesPatchCall) Context(ctx context.Context) *SecurityPoliciesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SecurityPoliciesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.securityPolicies.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified policy with the data included in the request.", + // "httpMethod": "PATCH", + // "id": "compute.securityPolicies.patch", + // "parameterOrder": [ + // "project", + // "securityPolicy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/securityPolicies/{securityPolicy}", + // "request": { + // "$ref": "SecurityPolicy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.securityPolicies.testIamPermissions": + +type SecurityPoliciesTestIamPermissionsCall struct { s *Service project string resource string @@ -79632,8 +93340,8 @@ type RoutesTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *RoutesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *RoutesTestIamPermissionsCall { - c := &RoutesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *SecurityPoliciesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SecurityPoliciesTestIamPermissionsCall { + c := &SecurityPoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource c.testpermissionsrequest = testpermissionsrequest @@ -79643,7 +93351,7 @@ func (r *RoutesService) TestIamPermissions(project string, resource string, test // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutesTestIamPermissionsCall { +func (c *SecurityPoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *SecurityPoliciesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79651,21 +93359,21 @@ func (c *RoutesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutesTestI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutesTestIamPermissionsCall) Context(ctx context.Context) *RoutesTestIamPermissionsCall { +func (c *SecurityPoliciesTestIamPermissionsCall) Context(ctx context.Context) *SecurityPoliciesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutesTestIamPermissionsCall) Header() http.Header { +func (c *SecurityPoliciesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -79678,7 +93386,7 @@ func (c *RoutesTestIamPermissionsCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -79689,14 +93397,14 @@ func (c *RoutesTestIamPermissionsCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routes.testIamPermissions" call. +// Do executes the "compute.securityPolicies.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RoutesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *SecurityPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79729,7 +93437,7 @@ func (c *RoutesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.routes.testIamPermissions", + // "id": "compute.securityPolicies.testIamPermissions", // "parameterOrder": [ // "project", // "resource" @@ -79745,12 +93453,12 @@ func (c *RoutesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/routes/{resource}/testIamPermissions", + // "path": "{project}/global/securityPolicies/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -79766,22 +93474,29 @@ func (c *RoutesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe } -// method id "compute.securityPolicies.delete": +// method id "compute.snapshots.delete": -type SecurityPoliciesDeleteCall struct { - s *Service - project string - securityPolicy string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SnapshotsDeleteCall struct { + s *Service + project string + snapshot string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified policy. -func (r *SecurityPoliciesService) Delete(project string, securityPolicy string) *SecurityPoliciesDeleteCall { - c := &SecurityPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Snapshot resource. Keep in mind that +// deleting a single snapshot might not necessarily delete all the data +// on that snapshot. If any data on the snapshot that is marked for +// deletion is needed for subsequent snapshots, the data will be moved +// to the next corresponding snapshot. +// +// For more information, see Deleting snaphots. +// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/delete +func (r *SnapshotsService) Delete(project string, snapshot string) *SnapshotsDeleteCall { + c := &SnapshotsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.securityPolicy = securityPolicy + c.snapshot = snapshot return c } @@ -79799,7 +93514,7 @@ func (r *SecurityPoliciesService) Delete(project string, securityPolicy string) // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *SecurityPoliciesDeleteCall) RequestId(requestId string) *SecurityPoliciesDeleteCall { +func (c *SnapshotsDeleteCall) RequestId(requestId string) *SnapshotsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -79807,7 +93522,7 @@ func (c *SecurityPoliciesDeleteCall) RequestId(requestId string) *SecurityPolici // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SecurityPoliciesDeleteCall) Fields(s ...googleapi.Field) *SecurityPoliciesDeleteCall { +func (c *SnapshotsDeleteCall) Fields(s ...googleapi.Field) *SnapshotsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79815,21 +93530,21 @@ func (c *SecurityPoliciesDeleteCall) Fields(s ...googleapi.Field) *SecurityPolic // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SecurityPoliciesDeleteCall) Context(ctx context.Context) *SecurityPoliciesDeleteCall { +func (c *SnapshotsDeleteCall) Context(ctx context.Context) *SnapshotsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SecurityPoliciesDeleteCall) Header() http.Header { +func (c *SnapshotsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -79837,25 +93552,25 @@ func (c *SecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, erro reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "securityPolicy": c.securityPolicy, + "project": c.project, + "snapshot": c.snapshot, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.securityPolicies.delete" call. +// Do executes the "compute.snapshots.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79886,12 +93601,12 @@ func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified policy.", + // "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snaphots.", // "httpMethod": "DELETE", - // "id": "compute.securityPolicies.delete", + // "id": "compute.snapshots.delete", // "parameterOrder": [ // "project", - // "securityPolicy" + // "snapshot" // ], // "parameters": { // "project": { @@ -79906,15 +93621,15 @@ func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "location": "query", // "type": "string" // }, - // "securityPolicy": { - // "description": "Name of the security policy to delete.", + // "snapshot": { + // "description": "Name of the Snapshot resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/securityPolicies/{securityPolicy}", + // "path": "{project}/global/snapshots/{snapshot}", // "response": { // "$ref": "Operation" // }, @@ -79926,31 +93641,32 @@ func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.securityPolicies.get": +// method id "compute.snapshots.get": -type SecurityPoliciesGetCall struct { - s *Service - project string - securityPolicy string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type SnapshotsGetCall struct { + s *Service + project string + snapshot string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: List all of the ordered rules present in a single specified -// policy. -func (r *SecurityPoliciesService) Get(project string, securityPolicy string) *SecurityPoliciesGetCall { - c := &SecurityPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Snapshot resource. Get a list of available +// snapshots by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/get +func (r *SnapshotsService) Get(project string, snapshot string) *SnapshotsGetCall { + c := &SnapshotsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.securityPolicy = securityPolicy + c.snapshot = snapshot return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SecurityPoliciesGetCall) Fields(s ...googleapi.Field) *SecurityPoliciesGetCall { +func (c *SnapshotsGetCall) Fields(s ...googleapi.Field) *SnapshotsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -79960,7 +93676,7 @@ func (c *SecurityPoliciesGetCall) Fields(s ...googleapi.Field) *SecurityPolicies // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SecurityPoliciesGetCall) IfNoneMatch(entityTag string) *SecurityPoliciesGetCall { +func (c *SnapshotsGetCall) IfNoneMatch(entityTag string) *SnapshotsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -79968,21 +93684,21 @@ func (c *SecurityPoliciesGetCall) IfNoneMatch(entityTag string) *SecurityPolicie // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SecurityPoliciesGetCall) Context(ctx context.Context) *SecurityPoliciesGetCall { +func (c *SnapshotsGetCall) Context(ctx context.Context) *SnapshotsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SecurityPoliciesGetCall) Header() http.Header { +func (c *SnapshotsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -79993,25 +93709,25 @@ func (c *SecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "securityPolicy": c.securityPolicy, + "project": c.project, + "snapshot": c.snapshot, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.securityPolicies.get" call. -// Exactly one of *SecurityPolicy or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *SecurityPolicy.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPolicy, error) { +// Do executes the "compute.snapshots.get" call. +// Exactly one of *Snapshot or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Snapshot.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80030,7 +93746,7 @@ func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPol if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SecurityPolicy{ + ret := &Snapshot{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -80042,12 +93758,12 @@ func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPol } return ret, nil // { - // "description": "List all of the ordered rules present in a single specified policy.", + // "description": "Returns the specified Snapshot resource. Get a list of available snapshots by making a list() request.", // "httpMethod": "GET", - // "id": "compute.securityPolicies.get", + // "id": "compute.snapshots.get", // "parameterOrder": [ // "project", - // "securityPolicy" + // "snapshot" // ], // "parameters": { // "project": { @@ -80057,17 +93773,17 @@ func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPol // "required": true, // "type": "string" // }, - // "securityPolicy": { - // "description": "Name of the security policy to update.", + // "snapshot": { + // "description": "Name of the Snapshot resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/securityPolicies/{securityPolicy}", + // "path": "{project}/global/snapshots/{snapshot}", // "response": { - // "$ref": "SecurityPolicy" + // "$ref": "Snapshot" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -80078,101 +93794,92 @@ func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPol } -// method id "compute.securityPolicies.insert": +// method id "compute.snapshots.getIamPolicy": -type SecurityPoliciesInsertCall struct { - s *Service - project string - securitypolicy *SecurityPolicy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SnapshotsGetIamPolicyCall struct { + s *Service + project string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates a new policy in the specified project using the data -// included in the request. -func (r *SecurityPoliciesService) Insert(project string, securitypolicy *SecurityPolicy) *SecurityPoliciesInsertCall { - c := &SecurityPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +func (r *SnapshotsService) GetIamPolicy(project string, resource string) *SnapshotsGetIamPolicyCall { + c := &SnapshotsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.securitypolicy = securitypolicy - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *SecurityPoliciesInsertCall) RequestId(requestId string) *SecurityPoliciesInsertCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SecurityPoliciesInsertCall) Fields(s ...googleapi.Field) *SecurityPoliciesInsertCall { +func (c *SnapshotsGetIamPolicyCall) Fields(s ...googleapi.Field) *SnapshotsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SnapshotsGetIamPolicyCall) IfNoneMatch(entityTag string) *SnapshotsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SecurityPoliciesInsertCall) Context(ctx context.Context) *SecurityPoliciesInsertCall { +func (c *SnapshotsGetIamPolicyCall) Context(ctx context.Context) *SnapshotsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SecurityPoliciesInsertCall) Header() http.Header { +func (c *SnapshotsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *SnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/getIamPolicy") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.securityPolicies.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.snapshots.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *SnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80191,7 +93898,7 @@ func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -80203,11 +93910,12 @@ func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a new policy in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.securityPolicies.insert", + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "httpMethod": "GET", + // "id": "compute.snapshots.getIamPolicy", // "parameterOrder": [ - // "project" + // "project", + // "resource" // ], // "parameters": { // "project": { @@ -80217,30 +93925,30 @@ func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/securityPolicies", - // "request": { - // "$ref": "SecurityPolicy" - // }, + // "path": "{project}/global/snapshots/{resource}/getIamPolicy", // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.securityPolicies.list": +// method id "compute.snapshots.list": -type SecurityPoliciesListCall struct { +type SnapshotsListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -80249,10 +93957,11 @@ type SecurityPoliciesListCall struct { header_ http.Header } -// List: List all the policies that have been configured for the +// List: Retrieves the list of Snapshot resources contained within the // specified project. -func (r *SecurityPoliciesService) List(project string) *SecurityPoliciesListCall { - c := &SecurityPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/list +func (r *SnapshotsService) List(project string) *SnapshotsListCall { + c := &SnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -80283,7 +93992,7 @@ func (r *SecurityPoliciesService) List(project string) *SecurityPoliciesListCall // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *SecurityPoliciesListCall) Filter(filter string) *SecurityPoliciesListCall { +func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall { c.urlParams_.Set("filter", filter) return c } @@ -80294,7 +94003,7 @@ func (c *SecurityPoliciesListCall) Filter(filter string) *SecurityPoliciesListCa // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *SecurityPoliciesListCall) MaxResults(maxResults int64) *SecurityPoliciesListCall { +func (c *SnapshotsListCall) MaxResults(maxResults int64) *SnapshotsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -80311,7 +94020,7 @@ func (c *SecurityPoliciesListCall) MaxResults(maxResults int64) *SecurityPolicie // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *SecurityPoliciesListCall) OrderBy(orderBy string) *SecurityPoliciesListCall { +func (c *SnapshotsListCall) OrderBy(orderBy string) *SnapshotsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -80319,7 +94028,7 @@ func (c *SecurityPoliciesListCall) OrderBy(orderBy string) *SecurityPoliciesList // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *SecurityPoliciesListCall) PageToken(pageToken string) *SecurityPoliciesListCall { +func (c *SnapshotsListCall) PageToken(pageToken string) *SnapshotsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -80327,7 +94036,7 @@ func (c *SecurityPoliciesListCall) PageToken(pageToken string) *SecurityPolicies // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SecurityPoliciesListCall) Fields(s ...googleapi.Field) *SecurityPoliciesListCall { +func (c *SnapshotsListCall) Fields(s ...googleapi.Field) *SnapshotsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -80337,7 +94046,7 @@ func (c *SecurityPoliciesListCall) Fields(s ...googleapi.Field) *SecurityPolicie // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SecurityPoliciesListCall) IfNoneMatch(entityTag string) *SecurityPoliciesListCall { +func (c *SnapshotsListCall) IfNoneMatch(entityTag string) *SnapshotsListCall { c.ifNoneMatch_ = entityTag return c } @@ -80345,21 +94054,21 @@ func (c *SecurityPoliciesListCall) IfNoneMatch(entityTag string) *SecurityPolici // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SecurityPoliciesListCall) Context(ctx context.Context) *SecurityPoliciesListCall { +func (c *SnapshotsListCall) Context(ctx context.Context) *SnapshotsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SecurityPoliciesListCall) Header() http.Header { +func (c *SnapshotsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { +func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -80370,7 +94079,7 @@ func (c *SecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -80380,14 +94089,14 @@ func (c *SecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.securityPolicies.list" call. -// Exactly one of *SecurityPoliciesList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *SecurityPoliciesList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPoliciesList, error) { +// Do executes the "compute.snapshots.list" call. +// Exactly one of *SnapshotList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SnapshotList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80406,7 +94115,7 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SecurityPoliciesList{ + ret := &SnapshotList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -80418,9 +94127,9 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo } return ret, nil // { - // "description": "List all the policies that have been configured for the specified project.", + // "description": "Retrieves the list of Snapshot resources contained within the specified project.", // "httpMethod": "GET", - // "id": "compute.securityPolicies.list", + // "id": "compute.snapshots.list", // "parameterOrder": [ // "project" // ], @@ -80456,9 +94165,9 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo // "type": "string" // } // }, - // "path": "{project}/global/securityPolicies", + // "path": "{project}/global/snapshots", // "response": { - // "$ref": "SecurityPoliciesList" + // "$ref": "SnapshotList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -80472,7 +94181,7 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *SecurityPoliciesListCall) Pages(ctx context.Context, f func(*SecurityPoliciesList) error) error { +func (c *SnapshotsListCall) Pages(ctx context.Context, f func(*SnapshotList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -80490,51 +94199,32 @@ func (c *SecurityPoliciesListCall) Pages(ctx context.Context, f func(*SecurityPo } } -// method id "compute.securityPolicies.patch": +// method id "compute.snapshots.setIamPolicy": -type SecurityPoliciesPatchCall struct { - s *Service - project string - securityPolicy string - securitypolicy *SecurityPolicy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SnapshotsSetIamPolicyCall struct { + s *Service + project string + resource string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified policy with the data included in the -// request. -func (r *SecurityPoliciesService) Patch(project string, securityPolicy string, securitypolicy *SecurityPolicy) *SecurityPoliciesPatchCall { - c := &SecurityPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +func (r *SnapshotsService) SetIamPolicy(project string, resource string, policy *Policy) *SnapshotsSetIamPolicyCall { + c := &SnapshotsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.securityPolicy = securityPolicy - c.securitypolicy = securitypolicy - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *SecurityPoliciesPatchCall) RequestId(requestId string) *SecurityPoliciesPatchCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.policy = policy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SecurityPoliciesPatchCall) Fields(s ...googleapi.Field) *SecurityPoliciesPatchCall { +func (c *SnapshotsSetIamPolicyCall) Fields(s ...googleapi.Field) *SnapshotsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -80542,52 +94232,52 @@ func (c *SecurityPoliciesPatchCall) Fields(s ...googleapi.Field) *SecurityPolici // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SecurityPoliciesPatchCall) Context(ctx context.Context) *SecurityPoliciesPatchCall { +func (c *SnapshotsSetIamPolicyCall) Context(ctx context.Context) *SnapshotsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SecurityPoliciesPatchCall) Header() http.Header { +func (c *SnapshotsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *SnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/setIamPolicy") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "securityPolicy": c.securityPolicy, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.securityPolicies.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.snapshots.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *SnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80606,7 +94296,7 @@ func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -80618,12 +94308,12 @@ func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Patches the specified policy with the data included in the request.", - // "httpMethod": "PATCH", - // "id": "compute.securityPolicies.patch", + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "httpMethod": "POST", + // "id": "compute.snapshots.setIamPolicy", // "parameterOrder": [ // "project", - // "securityPolicy" + // "resource" // ], // "parameters": { // "project": { @@ -80633,61 +94323,55 @@ func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "securityPolicy": { - // "description": "Name of the security policy to update.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z0-9](?:[-a-z0-9_]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/securityPolicies/{securityPolicy}", + // "path": "{project}/global/snapshots/{resource}/setIamPolicy", // "request": { - // "$ref": "SecurityPolicy" + // "$ref": "Policy" // }, // "response": { - // "$ref": "Operation" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.securityPolicies.testIamPermissions": +// method id "compute.snapshots.setLabels": -type SecurityPoliciesTestIamPermissionsCall struct { +type SnapshotsSetLabelsCall struct { s *Service project string resource string - testpermissionsrequest *TestPermissionsRequest + globalsetlabelsrequest *GlobalSetLabelsRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *SecurityPoliciesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SecurityPoliciesTestIamPermissionsCall { - c := &SecurityPoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on a snapshot. To learn more about labels, +// read the Labeling Resources documentation. +func (r *SnapshotsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *SnapshotsSetLabelsCall { + c := &SnapshotsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SecurityPoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *SecurityPoliciesTestIamPermissionsCall { +func (c *SnapshotsSetLabelsCall) Fields(s ...googleapi.Field) *SnapshotsSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -80695,34 +94379,34 @@ func (c *SecurityPoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *S // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SecurityPoliciesTestIamPermissionsCall) Context(ctx context.Context) *SecurityPoliciesTestIamPermissionsCall { +func (c *SnapshotsSetLabelsCall) Context(ctx context.Context) *SnapshotsSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SecurityPoliciesTestIamPermissionsCall) Header() http.Header { +func (c *SnapshotsSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SecurityPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *SnapshotsSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -80733,14 +94417,14 @@ func (c *SecurityPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Re return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.securityPolicies.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *SecurityPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.snapshots.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -80759,7 +94443,7 @@ func (c *SecurityPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -80771,9 +94455,9 @@ func (c *SecurityPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Sets the labels on a snapshot. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", - // "id": "compute.securityPolicies.testIamPermissions", + // "id": "compute.snapshots.setLabels", // "parameterOrder": [ // "project", // "resource" @@ -80794,71 +94478,47 @@ func (c *SecurityPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption // "type": "string" // } // }, - // "path": "{project}/global/securityPolicies/{resource}/testIamPermissions", + // "path": "{project}/global/snapshots/{resource}/setLabels", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "GlobalSetLabelsRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.snapshots.delete": +// method id "compute.snapshots.testIamPermissions": -type SnapshotsDeleteCall struct { - s *Service - project string - snapshot string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SnapshotsTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Snapshot resource. Keep in mind that -// deleting a single snapshot might not necessarily delete all the data -// on that snapshot. If any data on the snapshot that is marked for -// deletion is needed for subsequent snapshots, the data will be moved -// to the next corresponding snapshot. -// -// For more information, see Deleting snaphots. -// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/delete -func (r *SnapshotsService) Delete(project string, snapshot string) *SnapshotsDeleteCall { - c := &SnapshotsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *SnapshotsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SnapshotsTestIamPermissionsCall { + c := &SnapshotsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.snapshot = snapshot - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *SnapshotsDeleteCall) RequestId(requestId string) *SnapshotsDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SnapshotsDeleteCall) Fields(s ...googleapi.Field) *SnapshotsDeleteCall { +func (c *SnapshotsTestIamPermissionsCall) Fields(s ...googleapi.Field) *SnapshotsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -80866,204 +94526,52 @@ func (c *SnapshotsDeleteCall) Fields(s ...googleapi.Field) *SnapshotsDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SnapshotsDeleteCall) Context(ctx context.Context) *SnapshotsDeleteCall { +func (c *SnapshotsTestIamPermissionsCall) Context(ctx context.Context) *SnapshotsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SnapshotsDeleteCall) Header() http.Header { +func (c *SnapshotsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *SnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "snapshot": c.snapshot, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.snapshots.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Deletes the specified Snapshot resource. Keep in mind that deleting a single snapshot might not necessarily delete all the data on that snapshot. If any data on the snapshot that is marked for deletion is needed for subsequent snapshots, the data will be moved to the next corresponding snapshot.\n\nFor more information, see Deleting snaphots.", - // "httpMethod": "DELETE", - // "id": "compute.snapshots.delete", - // "parameterOrder": [ - // "project", - // "snapshot" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "snapshot": { - // "description": "Name of the Snapshot resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/snapshots/{snapshot}", - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.snapshots.get": - -type SnapshotsGetCall struct { - s *Service - project string - snapshot string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified Snapshot resource. Get a list of available -// snapshots by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/get -func (r *SnapshotsService) Get(project string, snapshot string) *SnapshotsGetCall { - c := &SnapshotsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.snapshot = snapshot - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *SnapshotsGetCall) Fields(s ...googleapi.Field) *SnapshotsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SnapshotsGetCall) IfNoneMatch(entityTag string) *SnapshotsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *SnapshotsGetCall) Context(ctx context.Context) *SnapshotsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *SnapshotsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "snapshot": c.snapshot, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.snapshots.get" call. -// Exactly one of *Snapshot or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Snapshot.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { +// Do executes the "compute.snapshots.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81082,7 +94590,7 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Snapshot{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81094,12 +94602,12 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { } return ret, nil // { - // "description": "Returns the specified Snapshot resource. Get a list of available snapshots by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.snapshots.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.snapshots.testIamPermissions", // "parameterOrder": [ // "project", - // "snapshot" + // "resource" // ], // "parameters": { // "project": { @@ -81109,17 +94617,20 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { // "required": true, // "type": "string" // }, - // "snapshot": { - // "description": "Name of the Snapshot resource to return.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/snapshots/{snapshot}", + // "path": "{project}/global/snapshots/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Snapshot" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -81130,92 +94641,96 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { } -// method id "compute.snapshots.getIamPolicy": +// method id "compute.sslCertificates.delete": -type SnapshotsGetIamPolicyCall struct { - s *Service - project string - resource string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type SslCertificatesDeleteCall struct { + s *Service + project string + sslCertificate string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetIamPolicy: Gets the access control policy for a resource. May be -// empty if no such policy or resource exists. -func (r *SnapshotsService) GetIamPolicy(project string, resource string) *SnapshotsGetIamPolicyCall { - c := &SnapshotsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified SslCertificate resource. +func (r *SslCertificatesService) Delete(project string, sslCertificate string) *SslCertificatesDeleteCall { + c := &SslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource + c.sslCertificate = sslCertificate + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SslCertificatesDeleteCall) RequestId(requestId string) *SslCertificatesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SnapshotsGetIamPolicyCall) Fields(s ...googleapi.Field) *SnapshotsGetIamPolicyCall { +func (c *SslCertificatesDeleteCall) Fields(s ...googleapi.Field) *SslCertificatesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *SnapshotsGetIamPolicyCall) IfNoneMatch(entityTag string) *SnapshotsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SnapshotsGetIamPolicyCall) Context(ctx context.Context) *SnapshotsGetIamPolicyCall { +func (c *SslCertificatesDeleteCall) Context(ctx context.Context) *SslCertificatesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SnapshotsGetIamPolicyCall) Header() http.Header { +func (c *SslCertificatesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SnapshotsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/getIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "sslCertificate": c.sslCertificate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.snapshots.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *SnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.sslCertificates.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81234,7 +94749,7 @@ func (c *SnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81246,12 +94761,12 @@ func (c *SnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", - // "httpMethod": "GET", - // "id": "compute.snapshots.getIamPolicy", + // "description": "Deletes the specified SslCertificate resource.", + // "httpMethod": "DELETE", + // "id": "compute.sslCertificates.delete", // "parameterOrder": [ // "project", - // "resource" + // "sslCertificate" // ], // "parameters": { // "project": { @@ -81261,118 +94776,56 @@ func (c *SnapshotsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/snapshots/{resource}/getIamPolicy", + // "path": "{project}/global/sslCertificates/{sslCertificate}", // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.snapshots.list": +// method id "compute.sslCertificates.get": -type SnapshotsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type SslCertificatesGetCall struct { + s *Service + project string + sslCertificate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of Snapshot resources contained within the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/snapshots/list -func (r *SnapshotsService) List(project string) *SnapshotsListCall { - c := &SnapshotsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified SslCertificate resource. Get a list of +// available SSL certificates by making a list() request. +func (r *SslCertificatesService) Get(project string, sslCertificate string) *SslCertificatesGetCall { + c := &SslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *SnapshotsListCall) MaxResults(maxResults int64) *SnapshotsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *SnapshotsListCall) OrderBy(orderBy string) *SnapshotsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *SnapshotsListCall) PageToken(pageToken string) *SnapshotsListCall { - c.urlParams_.Set("pageToken", pageToken) + c.sslCertificate = sslCertificate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SnapshotsListCall) Fields(s ...googleapi.Field) *SnapshotsListCall { +func (c *SslCertificatesGetCall) Fields(s ...googleapi.Field) *SslCertificatesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -81382,7 +94835,7 @@ func (c *SnapshotsListCall) Fields(s ...googleapi.Field) *SnapshotsListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SnapshotsListCall) IfNoneMatch(entityTag string) *SnapshotsListCall { +func (c *SslCertificatesGetCall) IfNoneMatch(entityTag string) *SslCertificatesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -81390,21 +94843,21 @@ func (c *SnapshotsListCall) IfNoneMatch(entityTag string) *SnapshotsListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SnapshotsListCall) Context(ctx context.Context) *SnapshotsListCall { +func (c *SslCertificatesGetCall) Context(ctx context.Context) *SslCertificatesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SnapshotsListCall) Header() http.Header { +func (c *SslCertificatesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { +func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -81415,24 +94868,25 @@ func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "sslCertificate": c.sslCertificate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.snapshots.list" call. -// Exactly one of *SnapshotList or error will be non-nil. Any non-2xx +// Do executes the "compute.sslCertificates.get" call. +// Exactly one of *SslCertificate or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *SnapshotList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, error) { +// *SslCertificate.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertificate, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81451,7 +94905,7 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SnapshotList{ + ret := &SslCertificate{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81463,47 +94917,32 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err } return ret, nil // { - // "description": "Retrieves the list of Snapshot resources contained within the specified project.", + // "description": "Returns the specified SslCertificate resource. Get a list of available SSL certificates by making a list() request.", // "httpMethod": "GET", - // "id": "compute.snapshots.list", + // "id": "compute.sslCertificates.get", // "parameterOrder": [ - // "project" + // "project", + // "sslCertificate" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "sslCertificate": { + // "description": "Name of the SslCertificate resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/snapshots", + // "path": "{project}/global/sslCertificates/{sslCertificate}", // "response": { - // "$ref": "SnapshotList" + // "$ref": "SslCertificate" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -81514,53 +94953,49 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *SnapshotsListCall) Pages(ctx context.Context, f func(*SnapshotList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.snapshots.setIamPolicy": +// method id "compute.sslCertificates.insert": -type SnapshotsSetIamPolicyCall struct { - s *Service - project string - resource string - policy *Policy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SslCertificatesInsertCall struct { + s *Service + project string + sslcertificate *SslCertificate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetIamPolicy: Sets the access control policy on the specified -// resource. Replaces any existing policy. -func (r *SnapshotsService) SetIamPolicy(project string, resource string, policy *Policy) *SnapshotsSetIamPolicyCall { - c := &SnapshotsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a SslCertificate resource in the specified project +// using the data included in the request. +func (r *SslCertificatesService) Insert(project string, sslcertificate *SslCertificate) *SslCertificatesInsertCall { + c := &SslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.policy = policy + c.sslcertificate = sslcertificate + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SslCertificatesInsertCall) RequestId(requestId string) *SslCertificatesInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SnapshotsSetIamPolicyCall) Fields(s ...googleapi.Field) *SnapshotsSetIamPolicyCall { +func (c *SslCertificatesInsertCall) Fields(s ...googleapi.Field) *SslCertificatesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -81568,52 +95003,51 @@ func (c *SnapshotsSetIamPolicyCall) Fields(s ...googleapi.Field) *SnapshotsSetIa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SnapshotsSetIamPolicyCall) Context(ctx context.Context) *SnapshotsSetIamPolicyCall { +func (c *SslCertificatesInsertCall) Context(ctx context.Context) *SslCertificatesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SnapshotsSetIamPolicyCall) Header() http.Header { +func (c *SslCertificatesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SnapshotsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/setIamPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.snapshots.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *SnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "compute.sslCertificates.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81632,7 +95066,7 @@ func (c *SnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Policy{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81644,12 +95078,11 @@ func (c *SnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } return ret, nil // { - // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.snapshots.setIamPolicy", + // "id": "compute.sslCertificates.insert", // "parameterOrder": [ - // "project", - // "resource" + // "project" // ], // "parameters": { // "project": { @@ -81659,20 +95092,18 @@ func (c *SnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/snapshots/{resource}/setIamPolicy", + // "path": "{project}/global/sslCertificates", // "request": { - // "$ref": "Policy" + // "$ref": "SslCertificate" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -81682,85 +95113,156 @@ func (c *SnapshotsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, e } -// method id "compute.snapshots.setLabels": +// method id "compute.sslCertificates.list": -type SnapshotsSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SslCertificatesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on a snapshot. To learn more about labels, -// read the Labeling Resources documentation. -func (r *SnapshotsService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *SnapshotsSetLabelsCall { - c := &SnapshotsSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of SslCertificate resources available to the +// specified project. +func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { + c := &SslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *SslCertificatesListCall) Filter(filter string) *SslCertificatesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *SslCertificatesListCall) MaxResults(maxResults int64) *SslCertificatesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *SslCertificatesListCall) OrderBy(orderBy string) *SslCertificatesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SslCertificatesListCall) PageToken(pageToken string) *SslCertificatesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SnapshotsSetLabelsCall) Fields(s ...googleapi.Field) *SnapshotsSetLabelsCall { +func (c *SslCertificatesListCall) Fields(s ...googleapi.Field) *SslCertificatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslCertificatesListCall) IfNoneMatch(entityTag string) *SslCertificatesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SnapshotsSetLabelsCall) Context(ctx context.Context) *SnapshotsSetLabelsCall { +func (c *SslCertificatesListCall) Context(ctx context.Context) *SslCertificatesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SnapshotsSetLabelsCall) Header() http.Header { +func (c *SslCertificatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SnapshotsSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.snapshots.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *SnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.sslCertificates.list" call. +// Exactly one of *SslCertificateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SslCertificateList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertificateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81779,7 +95281,7 @@ func (c *SnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &SslCertificateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -81791,47 +95293,81 @@ func (c *SnapshotsSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Sets the labels on a snapshot. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.snapshots.setLabels", + // "description": "Retrieves the list of SslCertificate resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.sslCertificates.list", // "parameterOrder": [ - // "project", - // "resource" + // "project" // ], // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/global/snapshots/{resource}/setLabels", - // "request": { - // "$ref": "GlobalSetLabelsRequest" - // }, + // "path": "{project}/global/sslCertificates", // "response": { - // "$ref": "Operation" + // "$ref": "SslCertificateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.snapshots.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertificateList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type SnapshotsTestIamPermissionsCall struct { +// method id "compute.sslCertificates.testIamPermissions": + +type SslCertificatesTestIamPermissionsCall struct { s *Service project string resource string @@ -81843,8 +95379,8 @@ type SnapshotsTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *SnapshotsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SnapshotsTestIamPermissionsCall { - c := &SnapshotsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *SslCertificatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SslCertificatesTestIamPermissionsCall { + c := &SslCertificatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource c.testpermissionsrequest = testpermissionsrequest @@ -81854,7 +95390,7 @@ func (r *SnapshotsService) TestIamPermissions(project string, resource string, t // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SnapshotsTestIamPermissionsCall) Fields(s ...googleapi.Field) *SnapshotsTestIamPermissionsCall { +func (c *SslCertificatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *SslCertificatesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -81862,21 +95398,21 @@ func (c *SnapshotsTestIamPermissionsCall) Fields(s ...googleapi.Field) *Snapshot // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SnapshotsTestIamPermissionsCall) Context(ctx context.Context) *SnapshotsTestIamPermissionsCall { +func (c *SslCertificatesTestIamPermissionsCall) Context(ctx context.Context) *SslCertificatesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SnapshotsTestIamPermissionsCall) Header() http.Header { +func (c *SslCertificatesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *SslCertificatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -81889,7 +95425,7 @@ func (c *SnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -81900,14 +95436,14 @@ func (c *SnapshotsTestIamPermissionsCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.snapshots.testIamPermissions" call. +// Do executes the "compute.sslCertificates.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *SslCertificatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -81940,7 +95476,7 @@ func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.snapshots.testIamPermissions", + // "id": "compute.sslCertificates.testIamPermissions", // "parameterOrder": [ // "project", // "resource" @@ -81956,12 +95492,12 @@ func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/snapshots/{resource}/testIamPermissions", + // "path": "{project}/global/sslCertificates/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -81977,22 +95513,24 @@ func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } -// method id "compute.sslCertificates.delete": +// method id "compute.sslPolicies.delete": -type SslCertificatesDeleteCall struct { - s *Service - project string - sslCertificate string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SslPoliciesDeleteCall struct { + s *Service + project string + sslPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified SslCertificate resource. -func (r *SslCertificatesService) Delete(project string, sslCertificate string) *SslCertificatesDeleteCall { - c := &SslCertificatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified SSL policy. The SSL policy resource can +// be deleted only if it is not in use by any TargetHttpsProxy or +// TargetSslProxy resources. +func (r *SslPoliciesService) Delete(project string, sslPolicy string) *SslPoliciesDeleteCall { + c := &SslPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.sslCertificate = sslCertificate + c.sslPolicy = sslPolicy return c } @@ -82010,7 +95548,7 @@ func (r *SslCertificatesService) Delete(project string, sslCertificate string) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *SslCertificatesDeleteCall) RequestId(requestId string) *SslCertificatesDeleteCall { +func (c *SslPoliciesDeleteCall) RequestId(requestId string) *SslPoliciesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -82018,7 +95556,7 @@ func (c *SslCertificatesDeleteCall) RequestId(requestId string) *SslCertificates // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslCertificatesDeleteCall) Fields(s ...googleapi.Field) *SslCertificatesDeleteCall { +func (c *SslPoliciesDeleteCall) Fields(s ...googleapi.Field) *SslPoliciesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -82026,21 +95564,21 @@ func (c *SslCertificatesDeleteCall) Fields(s ...googleapi.Field) *SslCertificate // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslCertificatesDeleteCall) Context(ctx context.Context) *SslCertificatesDeleteCall { +func (c *SslPoliciesDeleteCall) Context(ctx context.Context) *SslPoliciesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SslCertificatesDeleteCall) Header() http.Header { +func (c *SslPoliciesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *SslPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -82048,25 +95586,25 @@ func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "sslCertificate": c.sslCertificate, + "project": c.project, + "sslPolicy": c.sslPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslCertificates.delete" call. +// Do executes the "compute.sslPolicies.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SslPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82097,12 +95635,12 @@ func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified SslCertificate resource.", + // "description": "Deletes the specified SSL policy. The SSL policy resource can be deleted only if it is not in use by any TargetHttpsProxy or TargetSslProxy resources.", // "httpMethod": "DELETE", - // "id": "compute.sslCertificates.delete", + // "id": "compute.sslPolicies.delete", // "parameterOrder": [ // "project", - // "sslCertificate" + // "sslPolicy" // ], // "parameters": { // "project": { @@ -82117,15 +95655,14 @@ func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "location": "query", // "type": "string" // }, - // "sslCertificate": { - // "description": "Name of the SslCertificate resource to delete.", + // "sslPolicy": { + // "description": "Name of the SSL policy to delete. The name must be 1-63 characters long, and comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/sslCertificates/{sslCertificate}", + // "path": "{project}/global/sslPolicies/{sslPolicy}", // "response": { // "$ref": "Operation" // }, @@ -82137,31 +95674,31 @@ func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.sslCertificates.get": +// method id "compute.sslPolicies.get": -type SslCertificatesGetCall struct { - s *Service - project string - sslCertificate string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type SslPoliciesGetCall struct { + s *Service + project string + sslPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified SslCertificate resource. Get a list of -// available SSL certificates by making a list() request. -func (r *SslCertificatesService) Get(project string, sslCertificate string) *SslCertificatesGetCall { - c := &SslCertificatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: List all of the ordered rules present in a single specified +// policy. +func (r *SslPoliciesService) Get(project string, sslPolicy string) *SslPoliciesGetCall { + c := &SslPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.sslCertificate = sslCertificate + c.sslPolicy = sslPolicy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslCertificatesGetCall) Fields(s ...googleapi.Field) *SslCertificatesGetCall { +func (c *SslPoliciesGetCall) Fields(s ...googleapi.Field) *SslPoliciesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -82171,7 +95708,7 @@ func (c *SslCertificatesGetCall) Fields(s ...googleapi.Field) *SslCertificatesGe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SslCertificatesGetCall) IfNoneMatch(entityTag string) *SslCertificatesGetCall { +func (c *SslPoliciesGetCall) IfNoneMatch(entityTag string) *SslPoliciesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -82179,21 +95716,21 @@ func (c *SslCertificatesGetCall) IfNoneMatch(entityTag string) *SslCertificatesG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslCertificatesGetCall) Context(ctx context.Context) *SslCertificatesGetCall { +func (c *SslPoliciesGetCall) Context(ctx context.Context) *SslPoliciesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SslCertificatesGetCall) Header() http.Header { +func (c *SslPoliciesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *SslPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -82204,25 +95741,25 @@ func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "sslCertificate": c.sslCertificate, + "project": c.project, + "sslPolicy": c.sslPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslCertificates.get" call. -// Exactly one of *SslCertificate or error will be non-nil. Any non-2xx +// Do executes the "compute.sslPolicies.get" call. +// Exactly one of *SslPolicy or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *SslCertificate.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertificate, error) { +// *SslPolicy.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SslPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SslPolicy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82241,7 +95778,7 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SslCertificate{ + ret := &SslPolicy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -82253,12 +95790,12 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica } return ret, nil // { - // "description": "Returns the specified SslCertificate resource. Get a list of available SSL certificates by making a list() request.", + // "description": "List all of the ordered rules present in a single specified policy.", // "httpMethod": "GET", - // "id": "compute.sslCertificates.get", + // "id": "compute.sslPolicies.get", // "parameterOrder": [ // "project", - // "sslCertificate" + // "sslPolicy" // ], // "parameters": { // "project": { @@ -82268,17 +95805,16 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica // "required": true, // "type": "string" // }, - // "sslCertificate": { - // "description": "Name of the SslCertificate resource to return.", + // "sslPolicy": { + // "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/sslCertificates/{sslCertificate}", + // "path": "{project}/global/sslPolicies/{sslPolicy}", // "response": { - // "$ref": "SslCertificate" + // "$ref": "SslPolicy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -82289,23 +95825,23 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica } -// method id "compute.sslCertificates.insert": +// method id "compute.sslPolicies.insert": -type SslCertificatesInsertCall struct { - s *Service - project string - sslcertificate *SslCertificate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SslPoliciesInsertCall struct { + s *Service + project string + sslpolicy *SslPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a SslCertificate resource in the specified project -// using the data included in the request. -func (r *SslCertificatesService) Insert(project string, sslcertificate *SslCertificate) *SslCertificatesInsertCall { - c := &SslCertificatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Returns the specified SSL policy resource. Get a list of +// available SSL policies by making a list() request. +func (r *SslPoliciesService) Insert(project string, sslpolicy *SslPolicy) *SslPoliciesInsertCall { + c := &SslPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.sslcertificate = sslcertificate + c.sslpolicy = sslpolicy return c } @@ -82323,7 +95859,7 @@ func (r *SslCertificatesService) Insert(project string, sslcertificate *SslCerti // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *SslCertificatesInsertCall) RequestId(requestId string) *SslCertificatesInsertCall { +func (c *SslPoliciesInsertCall) RequestId(requestId string) *SslPoliciesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -82331,7 +95867,7 @@ func (c *SslCertificatesInsertCall) RequestId(requestId string) *SslCertificates // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslCertificatesInsertCall) Fields(s ...googleapi.Field) *SslCertificatesInsertCall { +func (c *SslPoliciesInsertCall) Fields(s ...googleapi.Field) *SslPoliciesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -82339,34 +95875,34 @@ func (c *SslCertificatesInsertCall) Fields(s ...googleapi.Field) *SslCertificate // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslCertificatesInsertCall) Context(ctx context.Context) *SslCertificatesInsertCall { +func (c *SslPoliciesInsertCall) Context(ctx context.Context) *SslPoliciesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SslCertificatesInsertCall) Header() http.Header { +func (c *SslPoliciesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *SslPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -82376,14 +95912,14 @@ func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslCertificates.insert" call. +// Do executes the "compute.sslPolicies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SslPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82414,9 +95950,9 @@ func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a SslCertificate resource in the specified project using the data included in the request.", + // "description": "Returns the specified SSL policy resource. Get a list of available SSL policies by making a list() request.", // "httpMethod": "POST", - // "id": "compute.sslCertificates.insert", + // "id": "compute.sslPolicies.insert", // "parameterOrder": [ // "project" // ], @@ -82434,9 +95970,9 @@ func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/global/sslCertificates", + // "path": "{project}/global/sslPolicies", // "request": { - // "$ref": "SslCertificate" + // "$ref": "SslPolicy" // }, // "response": { // "$ref": "Operation" @@ -82449,9 +95985,9 @@ func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.sslCertificates.list": +// method id "compute.sslPolicies.list": -type SslCertificatesListCall struct { +type SslPoliciesListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -82460,10 +95996,10 @@ type SslCertificatesListCall struct { header_ http.Header } -// List: Retrieves the list of SslCertificate resources available to the +// List: List all the SSL policies that have been configured for the // specified project. -func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { - c := &SslCertificatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *SslPoliciesService) List(project string) *SslPoliciesListCall { + c := &SslPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -82494,7 +96030,7 @@ func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *SslCertificatesListCall) Filter(filter string) *SslCertificatesListCall { +func (c *SslPoliciesListCall) Filter(filter string) *SslPoliciesListCall { c.urlParams_.Set("filter", filter) return c } @@ -82505,7 +96041,7 @@ func (c *SslCertificatesListCall) Filter(filter string) *SslCertificatesListCall // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *SslCertificatesListCall) MaxResults(maxResults int64) *SslCertificatesListCall { +func (c *SslPoliciesListCall) MaxResults(maxResults int64) *SslPoliciesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -82522,7 +96058,7 @@ func (c *SslCertificatesListCall) MaxResults(maxResults int64) *SslCertificatesL // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *SslCertificatesListCall) OrderBy(orderBy string) *SslCertificatesListCall { +func (c *SslPoliciesListCall) OrderBy(orderBy string) *SslPoliciesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -82530,7 +96066,7 @@ func (c *SslCertificatesListCall) OrderBy(orderBy string) *SslCertificatesListCa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *SslCertificatesListCall) PageToken(pageToken string) *SslCertificatesListCall { +func (c *SslPoliciesListCall) PageToken(pageToken string) *SslPoliciesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -82538,7 +96074,7 @@ func (c *SslCertificatesListCall) PageToken(pageToken string) *SslCertificatesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslCertificatesListCall) Fields(s ...googleapi.Field) *SslCertificatesListCall { +func (c *SslPoliciesListCall) Fields(s ...googleapi.Field) *SslPoliciesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -82548,7 +96084,7 @@ func (c *SslCertificatesListCall) Fields(s ...googleapi.Field) *SslCertificatesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *SslCertificatesListCall) IfNoneMatch(entityTag string) *SslCertificatesListCall { +func (c *SslPoliciesListCall) IfNoneMatch(entityTag string) *SslPoliciesListCall { c.ifNoneMatch_ = entityTag return c } @@ -82556,21 +96092,21 @@ func (c *SslCertificatesListCall) IfNoneMatch(entityTag string) *SslCertificates // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslCertificatesListCall) Context(ctx context.Context) *SslCertificatesListCall { +func (c *SslPoliciesListCall) Context(ctx context.Context) *SslPoliciesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SslCertificatesListCall) Header() http.Header { +func (c *SslPoliciesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) { +func (c *SslPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -82581,7 +96117,7 @@ func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -82591,14 +96127,14 @@ func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslCertificates.list" call. -// Exactly one of *SslCertificateList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *SslCertificateList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.sslPolicies.list" call. +// Exactly one of *SslPoliciesList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *SslPoliciesList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertificateList, error) { +func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82617,7 +96153,7 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SslCertificateList{ + ret := &SslPoliciesList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -82629,9 +96165,263 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific } return ret, nil // { - // "description": "Retrieves the list of SslCertificate resources available to the specified project.", + // "description": "List all the SSL policies that have been configured for the specified project.", // "httpMethod": "GET", - // "id": "compute.sslCertificates.list", + // "id": "compute.sslPolicies.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/sslPolicies", + // "response": { + // "$ref": "SslPoliciesList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SslPoliciesListCall) Pages(ctx context.Context, f func(*SslPoliciesList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.sslPolicies.listAvailableFeatures": + +type SslPoliciesListAvailableFeaturesCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// ListAvailableFeatures: Lists all features that can be specified in +// the SSL policy when using custom profile. +func (r *SslPoliciesService) ListAvailableFeatures(project string) *SslPoliciesListAvailableFeaturesCall { + c := &SslPoliciesListAvailableFeaturesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *SslPoliciesListAvailableFeaturesCall) Filter(filter string) *SslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *SslPoliciesListAvailableFeaturesCall) MaxResults(maxResults int64) *SslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *SslPoliciesListAvailableFeaturesCall) OrderBy(orderBy string) *SslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SslPoliciesListAvailableFeaturesCall) PageToken(pageToken string) *SslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslPoliciesListAvailableFeaturesCall) Fields(s ...googleapi.Field) *SslPoliciesListAvailableFeaturesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SslPoliciesListAvailableFeaturesCall) IfNoneMatch(entityTag string) *SslPoliciesListAvailableFeaturesCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslPoliciesListAvailableFeaturesCall) Context(ctx context.Context) *SslPoliciesListAvailableFeaturesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SslPoliciesListAvailableFeaturesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SslPoliciesListAvailableFeaturesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/listAvailableFeatures") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.sslPolicies.listAvailableFeatures" call. +// Exactly one of *SslPoliciesListAvailableFeaturesResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *SslPoliciesListAvailableFeaturesResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) (*SslPoliciesListAvailableFeaturesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SslPoliciesListAvailableFeaturesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all features that can be specified in the SSL policy when using custom profile.", + // "httpMethod": "GET", + // "id": "compute.sslPolicies.listAvailableFeatures", // "parameterOrder": [ // "project" // ], @@ -82667,9 +96457,9 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific // "type": "string" // } // }, - // "path": "{project}/global/sslCertificates", + // "path": "{project}/global/sslPolicies/listAvailableFeatures", // "response": { - // "$ref": "SslCertificateList" + // "$ref": "SslPoliciesListAvailableFeaturesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -82680,30 +96470,179 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *SslCertificatesListCall) Pages(ctx context.Context, f func(*SslCertificateList) error) error { +// method id "compute.sslPolicies.patch": + +type SslPoliciesPatchCall struct { + s *Service + project string + sslPolicy string + sslpolicy *SslPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified SSL policy with the data included in the +// request. +func (r *SslPoliciesService) Patch(project string, sslPolicy string, sslpolicy *SslPolicy) *SslPoliciesPatchCall { + c := &SslPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.sslPolicy = sslPolicy + c.sslpolicy = sslpolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SslPoliciesPatchCall) RequestId(requestId string) *SslPoliciesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SslPoliciesPatchCall) Fields(s ...googleapi.Field) *SslPoliciesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SslPoliciesPatchCall) Context(ctx context.Context) *SslPoliciesPatchCall { c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SslPoliciesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SslPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{sslPolicy}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "sslPolicy": c.sslPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.sslPolicies.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SslPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() } - if x.NextPageToken == "" { - return nil + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, } - c.PageToken(x.NextPageToken) } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified SSL policy with the data included in the request.", + // "httpMethod": "PATCH", + // "id": "compute.sslPolicies.patch", + // "parameterOrder": [ + // "project", + // "sslPolicy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "sslPolicy": { + // "description": "Name of the SSL policy to update. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/sslPolicies/{sslPolicy}", + // "request": { + // "$ref": "SslPolicy" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + } -// method id "compute.sslCertificates.testIamPermissions": +// method id "compute.sslPolicies.testIamPermissions": -type SslCertificatesTestIamPermissionsCall struct { +type SslPoliciesTestIamPermissionsCall struct { s *Service project string resource string @@ -82715,8 +96654,8 @@ type SslCertificatesTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *SslCertificatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SslCertificatesTestIamPermissionsCall { - c := &SslCertificatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *SslPoliciesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SslPoliciesTestIamPermissionsCall { + c := &SslPoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource c.testpermissionsrequest = testpermissionsrequest @@ -82726,7 +96665,7 @@ func (r *SslCertificatesService) TestIamPermissions(project string, resource str // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SslCertificatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *SslCertificatesTestIamPermissionsCall { +func (c *SslPoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *SslPoliciesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -82734,21 +96673,21 @@ func (c *SslCertificatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Ss // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SslCertificatesTestIamPermissionsCall) Context(ctx context.Context) *SslCertificatesTestIamPermissionsCall { +func (c *SslPoliciesTestIamPermissionsCall) Context(ctx context.Context) *SslPoliciesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SslCertificatesTestIamPermissionsCall) Header() http.Header { +func (c *SslPoliciesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SslCertificatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *SslPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -82761,7 +96700,7 @@ func (c *SslCertificatesTestIamPermissionsCall) doRequest(alt string) (*http.Res } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslPolicies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -82772,14 +96711,14 @@ func (c *SslCertificatesTestIamPermissionsCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.sslCertificates.testIamPermissions" call. +// Do executes the "compute.sslPolicies.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *SslCertificatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *SslPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -82812,7 +96751,7 @@ func (c *SslCertificatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.sslCertificates.testIamPermissions", + // "id": "compute.sslPolicies.testIamPermissions", // "parameterOrder": [ // "project", // "resource" @@ -82828,12 +96767,12 @@ func (c *SslCertificatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/sslCertificates/{resource}/testIamPermissions", + // "path": "{project}/global/sslPolicies/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -84213,6 +98152,257 @@ func (c *SubnetworksListCall) Pages(ctx context.Context, f func(*SubnetworkList) } } +// method id "compute.subnetworks.listUsable": + +type SubnetworksListUsableCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// ListUsable: Retrieves an aggregated list of usable subnetworks. +func (r *SubnetworksService) ListUsable(project string) *SubnetworksListUsableCall { + c := &SubnetworksListUsableCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *SubnetworksListUsableCall) Filter(filter string) *SubnetworksListUsableCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *SubnetworksListUsableCall) MaxResults(maxResults int64) *SubnetworksListUsableCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *SubnetworksListUsableCall) OrderBy(orderBy string) *SubnetworksListUsableCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SubnetworksListUsableCall) PageToken(pageToken string) *SubnetworksListUsableCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SubnetworksListUsableCall) Fields(s ...googleapi.Field) *SubnetworksListUsableCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SubnetworksListUsableCall) IfNoneMatch(entityTag string) *SubnetworksListUsableCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SubnetworksListUsableCall) Context(ctx context.Context) *SubnetworksListUsableCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SubnetworksListUsableCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SubnetworksListUsableCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/subnetworks/listUsable") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.subnetworks.listUsable" call. +// Exactly one of *UsableSubnetworksAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *UsableSubnetworksAggregatedList.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSubnetworksAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &UsableSubnetworksAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of usable subnetworks.", + // "httpMethod": "GET", + // "id": "compute.subnetworks.listUsable", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/subnetworks/listUsable", + // "response": { + // "$ref": "UsableSubnetworksAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SubnetworksListUsableCall) Pages(ctx context.Context, f func(*UsableSubnetworksAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.subnetworks.patch": type SubnetworksPatchCall struct { @@ -84392,8 +98582,7 @@ func (c *SubnetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -85925,7 +100114,7 @@ func (c *TargetHttpProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } @@ -86691,6 +100880,25 @@ func (r *TargetHttpsProxiesService) SetQuicOverride(project string, targetHttpsP return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetHttpsProxiesSetQuicOverrideCall) RequestId(requestId string) *TargetHttpsProxiesSetQuicOverrideCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -86793,6 +101001,11 @@ func (c *TargetHttpsProxiesSetQuicOverrideCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetHttpsProxy": { // "description": "Name of the TargetHttpsProxy resource to set the QUIC override policy for. The name should conform to RFC1035.", // "location": "path", @@ -86985,6 +101198,179 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti } +// method id "compute.targetHttpsProxies.setSslPolicy": + +type TargetHttpsProxiesSetSslPolicyCall struct { + s *Service + project string + targetHttpsProxy string + sslpolicyreference *SslPolicyReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetSslPolicy: Sets the SSL policy for TargetHttpsProxy. The SSL +// policy specifies the server-side support for SSL features. This +// affects connections between clients and the HTTPS proxy load +// balancer. They do not affect the connection between the load balancer +// and the backends. +func (r *TargetHttpsProxiesService) SetSslPolicy(project string, targetHttpsProxy string, sslpolicyreference *SslPolicyReference) *TargetHttpsProxiesSetSslPolicyCall { + c := &TargetHttpsProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetHttpsProxy = targetHttpsProxy + c.sslpolicyreference = sslpolicyreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetHttpsProxiesSetSslPolicyCall) RequestId(requestId string) *TargetHttpsProxiesSetSslPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetHttpsProxiesSetSslPolicyCall) Fields(s ...googleapi.Field) *TargetHttpsProxiesSetSslPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetHttpsProxiesSetSslPolicyCall) Context(ctx context.Context) *TargetHttpsProxiesSetSslPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetHttpsProxiesSetSslPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetHttpsProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicyreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setSslPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetHttpsProxy": c.targetHttpsProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetHttpsProxies.setSslPolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetHttpsProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the SSL policy for TargetHttpsProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the HTTPS proxy load balancer. They do not affect the connection between the load balancer and the backends.", + // "httpMethod": "POST", + // "id": "compute.targetHttpsProxies.setSslPolicy", + // "parameterOrder": [ + // "project", + // "targetHttpsProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetHttpsProxy": { + // "description": "Name of the TargetHttpsProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetHttpsProxies/{targetHttpsProxy}/setSslPolicy", + // "request": { + // "$ref": "SslPolicyReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.targetHttpsProxies.setUrlMap": type TargetHttpsProxiesSetUrlMapCall struct { @@ -87282,7 +101668,7 @@ func (c *TargetHttpsProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOpti // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } @@ -91986,6 +106372,178 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption } +// method id "compute.targetSslProxies.setSslPolicy": + +type TargetSslProxiesSetSslPolicyCall struct { + s *Service + project string + targetSslProxy string + sslpolicyreference *SslPolicyReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetSslPolicy: Sets the SSL policy for TargetSslProxy. The SSL policy +// specifies the server-side support for SSL features. This affects +// connections between clients and the SSL proxy load balancer. They do +// not affect the connection between the load balancer and the backends. +func (r *TargetSslProxiesService) SetSslPolicy(project string, targetSslProxy string, sslpolicyreference *SslPolicyReference) *TargetSslProxiesSetSslPolicyCall { + c := &TargetSslProxiesSetSslPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.targetSslProxy = targetSslProxy + c.sslpolicyreference = sslpolicyreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetSslProxiesSetSslPolicyCall) RequestId(requestId string) *TargetSslProxiesSetSslPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetSslProxiesSetSslPolicyCall) Fields(s ...googleapi.Field) *TargetSslProxiesSetSslPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetSslProxiesSetSslPolicyCall) Context(ctx context.Context) *TargetSslProxiesSetSslPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetSslProxiesSetSslPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetSslProxiesSetSslPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslpolicyreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "targetSslProxy": c.targetSslProxy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetSslProxies.setSslPolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + // "httpMethod": "POST", + // "id": "compute.targetSslProxies.setSslPolicy", + // "parameterOrder": [ + // "project", + // "targetSslProxy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetSslProxy": { + // "description": "Name of the TargetSslProxy resource whose SSL policy is to be set. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", + // "request": { + // "$ref": "SslPolicyReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.targetSslProxies.testIamPermissions": type TargetSslProxiesTestIamPermissionsCall struct { @@ -92113,7 +106671,7 @@ func (c *TargetSslProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } @@ -93325,7 +107883,7 @@ func (c *TargetTcpProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } @@ -95590,8 +110148,7 @@ func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -95724,7 +110281,7 @@ func (c *UrlMapsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestP // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json index 626b31832ebb..a7e3cc0a0d22 100644 --- a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json +++ b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/gfBEnRRtVgnVgbcTDcEDV6LmQ_A\"", + "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/QIBUrC-C0KuVnsoUFunufep6MGE\"", "discoveryVersion": "v1", "id": "compute:beta", "name": "compute", "version": "beta", - "revision": "20170721", + "revision": "20170905", "title": "Compute Engine API", "description": "Creates and runs virtual machines on Google Cloud Platform.", "ownerDomain": "google.com", @@ -182,6 +182,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -213,6 +282,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -467,6 +605,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -498,23 +705,10 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "AddressesScopedList": { - "id": "AddressesScopedList", - "type": "object", - "properties": { - "addresses": { - "type": "array", - "description": "[Output Only] List of addresses contained in this scope.", - "items": { - "$ref": "Address" - } }, "warning": { "type": "object", - "description": "[Output Only] Informational warning which replaces the list of addresses when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "type": "string", @@ -583,45 +777,127 @@ } } }, - "AliasIpRange": { - "id": "AliasIpRange", - "type": "object", - "description": "An alias IP range attached to an instance's network interface.", - "properties": { - "ipCidrRange": { - "type": "string", - "description": "The IP CIDR range represented by this alias IP range. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (e.g. 10.2.3.4), a netmask (e.g. /24) or a CIDR format string (e.g. 10.1.2.0/24)." - }, - "subnetworkRangeName": { - "type": "string", - "description": "Optional subnetwork secondary range name specifying the secondary range from which to allocate the IP CIDR range for this alias IP range. If left unspecified, the primary range of the subnetwork will be used." - } - } - }, - "AttachedDisk": { - "id": "AttachedDisk", + "AddressesScopedList": { + "id": "AddressesScopedList", "type": "object", - "description": "An instance-attached disk resource.", "properties": { - "autoDelete": { - "type": "boolean", - "description": "Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance)." - }, - "boot": { - "type": "boolean", - "description": "Indicates that this is a boot disk. The virtual machine will use the first partition of the disk for its root filesystem." - }, - "deviceName": { - "type": "string", - "description": "Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance.\n\nIf not specified, the server chooses a default device name to apply to this disk, in the form persistent-disks-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks." + "addresses": { + "type": "array", + "description": "[Output Only] List of addresses contained in this scope.", + "items": { + "$ref": "Address" + } }, - "diskEncryptionKey": { + "warning": { + "type": "object", + "description": "[Output Only] Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "AliasIpRange": { + "id": "AliasIpRange", + "type": "object", + "description": "An alias IP range attached to an instance's network interface.", + "properties": { + "ipCidrRange": { + "type": "string", + "description": "The IP CIDR range represented by this alias IP range. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. This range may be a single IP address (e.g. 10.2.3.4), a netmask (e.g. /24) or a CIDR format string (e.g. 10.1.2.0/24)." + }, + "subnetworkRangeName": { + "type": "string", + "description": "Optional subnetwork secondary range name specifying the secondary range from which to allocate the IP CIDR range for this alias IP range. If left unspecified, the primary range of the subnetwork will be used." + } + } + }, + "AttachedDisk": { + "id": "AttachedDisk", + "type": "object", + "description": "An instance-attached disk resource.", + "properties": { + "autoDelete": { + "type": "boolean", + "description": "Specifies whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance)." + }, + "boot": { + "type": "boolean", + "description": "Indicates that this is a boot disk. The virtual machine will use the first partition of the disk for its root filesystem." + }, + "deviceName": { + "type": "string", + "description": "Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance.\n\nIf not specified, the server chooses a default device name to apply to this disk, in the form persistent-disks-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks." + }, + "diskEncryptionKey": { "$ref": "CustomerEncryptionKey", "description": "Encrypts or decrypts a disk using a customer-supplied encryption key.\n\nIf you are creating a new disk, this field encrypts the new disk using an encryption key that you provide. If you are attaching an existing disk that is already encrypted, this field decrypts the disk using the customer-supplied encryption key.\n\nIf you encrypt a disk using a customer-supplied key, you must provide the same key again when you attempt to use this resource at a later time. For example, you must provide the key when you create a snapshot or an image from the disk or when you attach the disk to a virtual machine instance.\n\nIf you do not provide an encryption key, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later.\n\nInstance templates do not store customer-supplied encryption keys, so you cannot use your own keys to encrypt disks in a managed instance group." }, "index": { "type": "integer", - "description": "Assigns a zero-based index to this disk, where 0 is reserved for the boot disk. For example, if you have many disks attached to an instance, each disk would have a unique index number. If not specified, the server will choose an appropriate value.", + "description": "[Output Only] A zero-based index to this disk, where 0 is reserved for the boot disk. If you have many disks attached to an instance, each disk would have a unique index number.", "format": "int32" }, "initializeParams": { @@ -908,6 +1184,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -939,6 +1284,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -1301,23 +1715,92 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "BackendService": { - "id": "BackendService", - "type": "object", - "description": "A BackendService resource. This resource defines a group of backend virtual machines and their serving capacity.", - "properties": { - "affinityCookieTtlSec": { - "type": "integer", - "description": "Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value for TTL is one day.\n\nWhen the load balancing scheme is INTERNAL, this field is not used.", - "format": "int32" }, - "backends": { - "type": "array", - "description": "The list of backends that serve this BackendService.", - "items": { + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "BackendService": { + "id": "BackendService", + "type": "object", + "description": "A BackendService resource. This resource defines a group of backend virtual machines and their serving capacity.", + "properties": { + "affinityCookieTtlSec": { + "type": "integer", + "description": "Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value for TTL is one day.\n\nWhen the load balancing scheme is INTERNAL, this field is not used.", + "format": "int32" + }, + "backends": { + "type": "array", + "description": "The list of backends that serve this BackendService.", + "items": { "$ref": "Backend" } }, @@ -1347,7 +1830,7 @@ }, "healthChecks": { "type": "array", - "description": "The list of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health checking this BackendService. Currently at most one health check can be specified, and a health check is required for GCE backend services. A health check must not be specified for GAE app backend and Cloud Function backend.\n\nFor internal load balancing, a URL to a HealthCheck resource must be specified instead.", + "description": "The list of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health checking this BackendService. Currently at most one health check can be specified, and a health check is required for Compute Engine backend services. A health check must not be specified for App Engine backend and Cloud Function backend.\n\nFor internal load balancing, a URL to a HealthCheck resource must be specified instead.", "items": { "type": "string" } @@ -1367,6 +1850,7 @@ }, "loadBalancingScheme": { "type": "string", + "description": "Indicates whether the backend service will be used with internal or external load balancing. A backend service created for one type of load balancing cannot be used with the other. Possible values are INTERNAL and EXTERNAL.", "enum": [ "EXTERNAL", "INTERNAL", @@ -1414,6 +1898,10 @@ "type": "string", "description": "[Output Only] URL of the region where the regional backend service resides. This field is not applicable to global backend services." }, + "securityPolicy": { + "type": "string", + "description": "[Output Only] The resource URL for the security policy associated with this backend service." + }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." @@ -1472,6 +1960,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -1551,6 +2108,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -1819,41 +2445,179 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "CommitmentList": { - "id": "CommitmentList", - "type": "object", - "description": "Contains a list of Commitment resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "array", - "description": "A list of Commitment resources.", - "items": { - "$ref": "Commitment" - } }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#commitmentList for lists of commitments.", - "default": "compute#commitmentList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "CommitmentsScopedList": { + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "CommitmentList": { + "id": "CommitmentList", + "type": "object", + "description": "Contains a list of Commitment resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of Commitment resources.", + "items": { + "$ref": "Commitment" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#commitmentList for lists of commitments.", + "default": "compute#commitmentList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "CommitmentsScopedList": { "id": "CommitmentsScopedList", "type": "object", "properties": { @@ -2271,6 +3035,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2302,25 +3135,94 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "DiskMoveRequest": { - "id": "DiskMoveRequest", - "type": "object", - "properties": { - "destinationZone": { - "type": "string", - "description": "The URL of the destination zone to move the disk. This can be a full or partial URL. For example, the following are all valid URLs to a zone: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone \n- projects/project/zones/zone \n- zones/zone" }, - "targetDisk": { - "type": "string", - "description": "The URL of the target disk to move. This can be a full or partial URL. For example, the following are all valid URLs to a disk: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk \n- projects/project/zones/zone/disks/disk \n- zones/zone/disks/disk" - } - } - }, - "DiskType": { - "id": "DiskType", + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "DiskMoveRequest": { + "id": "DiskMoveRequest", + "type": "object", + "properties": { + "destinationZone": { + "type": "string", + "description": "The URL of the destination zone to move the disk. This can be a full or partial URL. For example, the following are all valid URLs to a zone: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone \n- projects/project/zones/zone \n- zones/zone" + }, + "targetDisk": { + "type": "string", + "description": "The URL of the target disk to move. This can be a full or partial URL. For example, the following are all valid URLs to a disk: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk \n- projects/project/zones/zone/disks/disk \n- zones/zone/disks/disk" + } + } + }, + "DiskType": { + "id": "DiskType", "type": "object", "description": "A DiskType resource.", "properties": { @@ -2398,6 +3300,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2429,6 +3400,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2607,6 +3647,34 @@ } } }, + "DistributionPolicy": { + "id": "DistributionPolicy", + "type": "object", + "properties": { + "zones": { + "type": "array", + "items": { + "$ref": "DistributionPolicyZoneConfiguration" + } + } + } + }, + "DistributionPolicyZoneConfiguration": { + "id": "DistributionPolicyZoneConfiguration", + "type": "object", + "properties": { + "zone": { + "type": "string", + "description": "URL of the zone where managed instance group is spawning instances (for regional resources). Zone has to belong to the region where managed instance group is located.", + "annotations": { + "required": [ + "compute.regionInstanceGroupManagers.insert", + "compute.regionInstanceGroupManagers.update" + ] + } + } + } + }, "Expr": { "id": "Expr", "type": "object", @@ -2801,21 +3869,90 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "FixedOrPercent": { - "id": "FixedOrPercent", - "type": "object", - "description": "Encapsulates numeric value that can be either absolute or relative.", - "properties": { - "calculated": { - "type": "integer", - "description": "[Output Only] Absolute value calculated based on mode: mode = fixed -\u003e calculated = fixed = percent -\u003e calculated = ceiling(percent/100 * base_value)", - "format": "int32" }, - "fixed": { - "type": "integer", + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "FixedOrPercent": { + "id": "FixedOrPercent", + "type": "object", + "description": "Encapsulates numeric value that can be either absolute or relative.", + "properties": { + "calculated": { + "type": "integer", + "description": "[Output Only] Absolute value calculated based on mode: mode = fixed -\u003e calculated = fixed = percent -\u003e calculated = ceiling(percent/100 * base_value)", + "format": "int32" + }, + "fixed": { + "type": "integer", "description": "fixed must be non-negative.", "format": "int32" }, @@ -2992,6 +4129,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3023,6 +4229,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3336,45 +4611,114 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "HealthCheckReference": { - "id": "HealthCheckReference", - "type": "object", - "description": "A full or valid partial URL to a health check. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project-id/global/httpHealthChecks/health-check \n- projects/project-id/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", - "properties": { - "healthCheck": { - "type": "string" - } - } - }, - "HealthStatus": { - "id": "HealthStatus", - "type": "object", - "properties": { - "healthState": { - "type": "string", - "description": "Health state of the instance.", - "enum": [ - "HEALTHY", - "UNHEALTHY" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "instance": { - "type": "string", - "description": "URL of the instance resource." - }, - "ipAddress": { - "type": "string", - "description": "The IP address represented by this resource." }, - "port": { - "type": "integer", + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "HealthCheckReference": { + "id": "HealthCheckReference", + "type": "object", + "description": "A full or valid partial URL to a health check. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project-id/global/httpHealthChecks/health-check \n- projects/project-id/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", + "properties": { + "healthCheck": { + "type": "string" + } + } + }, + "HealthStatus": { + "id": "HealthStatus", + "type": "object", + "properties": { + "healthState": { + "type": "string", + "description": "Health state of the instance.", + "enum": [ + "HEALTHY", + "UNHEALTHY" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "instance": { + "type": "string", + "description": "URL of the instance resource." + }, + "ipAddress": { + "type": "string", + "description": "The IP address represented by this resource." + }, + "port": { + "type": "integer", "description": "The port on the instance.", "format": "int32" } @@ -3497,6 +4841,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3595,6 +5008,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3792,21 +5274,90 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "Instance": { - "id": "Instance", - "type": "object", - "description": "An Instance resource.", - "properties": { - "canIpForward": { - "type": "boolean", - "description": "Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes. For more information, see Enabling IP Forwarding." }, - "cpuPlatform": { - "type": "string", - "description": "[Output Only] The CPU platform used by this instance." + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "Instance": { + "id": "Instance", + "type": "object", + "description": "An Instance resource.", + "properties": { + "canIpForward": { + "type": "boolean", + "description": "Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes. For more information, see Enabling IP Forwarding." + }, + "cpuPlatform": { + "type": "string", + "description": "[Output Only] The CPU platform used by this instance." }, "creationTimestamp": { "type": "string", @@ -3881,7 +5432,7 @@ }, "networkInterfaces": { "type": "array", - "description": "An array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet. Only one interface is supported per instance.", + "description": "An array of network configurations for this instance. These specify how interfaces are configured to interact with other network services, such as connecting to the internet. Multiple interfaces are supported per instance.", "items": { "$ref": "NetworkInterface" } @@ -3971,6 +5522,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4073,6 +5693,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4104,45 +5793,118 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "InstanceGroupManager": { - "id": "InstanceGroupManager", - "type": "object", - "description": "An Instance Group Manager resource.", - "properties": { - "autoHealingPolicies": { - "type": "array", - "description": "The autohealing policy for this managed instance group. You can specify only one value.", - "items": { - "$ref": "InstanceGroupManagerAutoHealingPolicy" - } - }, - "baseInstanceName": { - "type": "string", - "description": "The base instance name to use for instances in this group. The value must be 1-58 characters long. Instances are named by appending a hyphen and a random four-character string to the base instance name. The base instance name must comply with RFC1035.", - "pattern": "[a-z][-a-z0-9]{0,57}", - "annotations": { - "required": [ - "compute.instanceGroupManagers.insert" - ] - } - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] The creation timestamp for this managed instance group in RFC3339 text format." - }, - "currentActions": { - "$ref": "InstanceGroupManagerActionsSummary", - "description": "[Output Only] The list of instance actions and the number of instances in this managed instance group that are scheduled for each of those actions." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." }, - "failoverAction": { - "type": "string", + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InstanceGroupManager": { + "id": "InstanceGroupManager", + "type": "object", + "description": "An Instance Group Manager resource.", + "properties": { + "autoHealingPolicies": { + "type": "array", + "description": "The autohealing policy for this managed instance group. You can specify only one value.", + "items": { + "$ref": "InstanceGroupManagerAutoHealingPolicy" + } + }, + "baseInstanceName": { + "type": "string", + "description": "The base instance name to use for instances in this group. The value must be 1-58 characters long. Instances are named by appending a hyphen and a random four-character string to the base instance name. The base instance name must comply with RFC1035.", + "pattern": "[a-z][-a-z0-9]{0,57}", + "annotations": { + "required": [ + "compute.instanceGroupManagers.insert" + ] + } + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] The creation timestamp for this managed instance group in RFC3339 text format." + }, + "currentActions": { + "$ref": "InstanceGroupManagerActionsSummary", + "description": "[Output Only] The list of instance actions and the number of instances in this managed instance group that are scheduled for each of those actions." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "distributionPolicy": { + "$ref": "DistributionPolicy", + "description": "Policy valid only for regional managed instance groups." + }, + "failoverAction": { + "type": "string", "description": "The action to perform in case of zone failure. Only one value is supported, NO_FAILOVER. The default is NO_FAILOVER.", "enum": [ "NO_FAILOVER", @@ -4208,7 +5970,7 @@ }, "serviceAccount": { "type": "string", - "description": "Service account will be used as credentials for all operations performed by managed instance group on instances. The service accounts needs all permissions required to create and delete instances. When not specified, the service account {projectNumber}@cloudservices.gserviceaccount.com will be used." + "description": "[Output Only] The service account to be used as credentials for all operations performed by the managed instance group on instances. The service accounts needs all permissions required to create and delete instances. By default, the service account {projectNumber}@cloudservices.gserviceaccount.com is used." }, "targetPools": { "type": "array", @@ -4324,6 +6086,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4370,6 +6201,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4691,19 +6591,88 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "InstanceGroupsListInstancesRequest": { - "id": "InstanceGroupsListInstancesRequest", - "type": "object", - "properties": { - "instanceState": { - "type": "string", - "description": "A filter for the state of the instances in the instance group. Valid options are ALL or RUNNING. If you do not specify this parameter the list includes all instances regardless of their state.", - "enum": [ - "ALL", - "RUNNING" + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InstanceGroupsListInstancesRequest": { + "id": "InstanceGroupsListInstancesRequest", + "type": "object", + "properties": { + "instanceState": { + "type": "string", + "description": "A filter for the state of the instances in the instance group. Valid options are ALL or RUNNING. If you do not specify this parameter the list includes all instances regardless of their state.", + "enum": [ + "ALL", + "RUNNING" ], "enumDescriptions": [ "", @@ -4853,6 +6822,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4884,6 +6922,75 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -5057,46 +7164,115 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "InstanceWithNamedPorts": { - "id": "InstanceWithNamedPorts", - "type": "object", - "properties": { - "instance": { - "type": "string", - "description": "[Output Only] The URL of the instance." - }, - "namedPorts": { - "type": "array", - "description": "[Output Only] The named ports that belong to this instance group.", - "items": { - "$ref": "NamedPort" - } }, - "status": { - "type": "string", - "description": "[Output Only] The status of the instance.", - "enum": [ - "PROVISIONING", - "RUNNING", - "STAGING", - "STOPPED", - "STOPPING", - "SUSPENDED", - "SUSPENDING", - "TERMINATED" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "" + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InstanceWithNamedPorts": { + "id": "InstanceWithNamedPorts", + "type": "object", + "properties": { + "instance": { + "type": "string", + "description": "[Output Only] The URL of the instance." + }, + "namedPorts": { + "type": "array", + "description": "[Output Only] The named ports that belong to this instance group.", + "items": { + "$ref": "NamedPort" + } + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the instance.", + "enum": [ + "PROVISIONING", + "RUNNING", + "STAGING", + "STOPPED", + "STOPPING", + "SUSPENDED", + "SUSPENDING", + "TERMINATED" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "" ] } } @@ -5263,159 +7439,214 @@ } } }, - "License": { - "id": "License", + "Interconnect": { + "id": "Interconnect", "type": "object", - "description": "A license resource.", + "description": "Protocol definitions for Mixer API to support Interconnect. Next available tag: 23", "properties": { - "chargesUseFee": { + "adminEnabled": { "type": "boolean", - "description": "[Output Only] Deprecated. This field no longer reflects whether a license charges a usage fee." + "description": "Administrative status of the interconnect. When this is set to ?true?, the Interconnect is functional and may carry traffic (assuming there are functional InterconnectAttachments and other requirements are satisfied). When set to ?false?, no packets will be carried over this Interconnect and no BGP routes will be exchanged over it. By default, it is set to ?true?." + }, + "circuitInfos": { + "type": "array", + "description": "[Output Only] List of CircuitInfo objects, that describe the individual circuits in this LAG.", + "items": { + "$ref": "InterconnectCircuitInfo" + } + }, + "connectionAuthorization": { + "type": "string", + "description": "[Output Only] URL to retrieve the Letter Of Authority and Customer Facility Assignment (LOA-CFA) documentation relating to this Interconnect. This documentation authorizes the facility provider to connect to the specified crossconnect ports." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "customerName": { + "type": "string", + "description": "Customer name, to put in the Letter of Authorization as the party authorized to request a crossconnect." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "expectedOutages": { + "type": "array", + "description": "[Output Only] List of outages expected for this Interconnect.", + "items": { + "$ref": "InterconnectOutageNotification" + } + }, + "googleIpAddress": { + "type": "string", + "description": "[Output Only] IP address configured on the Google side of the Interconnect link. This can be used only for ping tests." + }, + "googleReferenceId": { + "type": "string", + "description": "[Output Only] Google reference ID; to be used when raising support tickets with Google or otherwise to debug backend connectivity issues." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "interconnectAttachments": { + "type": "array", + "description": "[Output Only] A list of the URLs of all InterconnectAttachments configured to use this Interconnect.", + "items": { + "type": "string" + } + }, + "interconnectType": { + "type": "string", + "enum": [ + "IT_PRIVATE" + ], + "enumDescriptions": [ + "" + ] }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#license for licenses.", - "default": "compute#license" + "description": "[Output Only] Type of the resource. Always compute#interconnect for interconnects.", + "default": "compute#interconnect" + }, + "linkType": { + "type": "string", + "enum": [ + "LINK_TYPE_ETHERNET_10G_LR" + ], + "enumDescriptions": [ + "" + ] + }, + "location": { + "type": "string", + "description": "URL of the InterconnectLocation object that represents where this connection is to be provisioned." }, "name": { "type": "string", - "description": "[Output Only] Name of the resource. The name is 1-63 characters long and complies with RFC1035.", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "annotations": { "required": [ - "compute.images.insert" + "compute.interconnects.insert" ] } }, - "selfLink": { + "nocContactEmail": { "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - } - } - }, - "LogConfig": { - "id": "LogConfig", - "type": "object", - "description": "Specifies what kind of log the caller must write", - "properties": { - "cloudAudit": { - "$ref": "LogConfigCloudAuditOptions", - "description": "Cloud audit options." - }, - "counter": { - "$ref": "LogConfigCounterOptions", - "description": "Counter options." - } - } - }, - "LogConfigCloudAuditOptions": { - "id": "LogConfigCloudAuditOptions", - "type": "object", - "description": "Write a Cloud Audit log", - "properties": { - "authorizationLoggingOptions": { - "$ref": "AuthorizationLoggingOptions", - "description": "Information used by the Cloud Audit Logging pipeline." + "description": "Email address to contact the customer NOC for operations and maintenance notifications regarding this Interconnect. If specified, this will be used for notifications in addition to all other forms described, such as Stackdriver logs alerting and Cloud Notifications." }, - "logName": { + "operationalStatus": { "type": "string", - "description": "The log_name to populate in the Cloud Audit Record.", + "description": "[Output Only] The current status of whether or not this Interconnect is functional.", "enum": [ - "ADMIN_ACTIVITY", - "DATA_ACCESS", - "UNSPECIFIED_LOG_NAME" + "OS_ACTIVE", + "OS_UNPROVISIONED" ], "enumDescriptions": [ - "", "", "" ] - } - } - }, - "LogConfigCounterOptions": { - "id": "LogConfigCounterOptions", - "type": "object", - "description": "Options for counters", - "properties": { - "field": { + }, + "peerIpAddress": { "type": "string", - "description": "The field value to attribute." + "description": "[Output Only] IP address configured on the customer side of the Interconnect link. The customer should configure this IP address during turnup when prompted by Google NOC. This can be used only for ping tests." }, - "metric": { + "provisionedLinkCount": { + "type": "integer", + "description": "[Output Only] Number of links actually provisioned in this interconnect.", + "format": "int32" + }, + "requestedLinkCount": { + "type": "integer", + "description": "Target number of physical links in the link bundle, as requested by the customer.", + "format": "int32" + }, + "selfLink": { "type": "string", - "description": "The metric to update." + "description": "[Output Only] Server-defined URL for the resource." } } }, - "MachineType": { - "id": "MachineType", + "InterconnectAttachment": { + "id": "InterconnectAttachment", "type": "object", - "description": "A Machine Type resource.", + "description": "Protocol definitions for Mixer API to support InterconnectAttachment. Next available tag: 18", "properties": { + "cloudRouterIpAddress": { + "type": "string", + "description": "[Output Only] IPv4 address + prefix length to be configured on Cloud Router Interface for this interconnect attachment." + }, "creationTimestamp": { "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." }, - "deprecated": { - "$ref": "DeprecationStatus", - "description": "[Output Only] The deprecation status associated with this machine type." + "customerRouterIpAddress": { + "type": "string", + "description": "[Output Only] IPv4 address + prefix length to be configured on the customer router subinterface for this interconnect attachment." }, "description": { "type": "string", - "description": "[Output Only] An optional textual description of the resource." + "description": "An optional description of this resource. Provide this property when you create the resource." }, - "guestCpus": { - "type": "integer", - "description": "[Output Only] The number of virtual CPUs that are available to the instance.", - "format": "int32" + "googleReferenceId": { + "type": "string", + "description": "[Output Only] Google reference ID, to be used when raising support tickets with Google or otherwise to debug backend connectivity issues." }, "id": { "type": "string", "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64" }, - "isSharedCpu": { - "type": "boolean", - "description": "[Output Only] Whether this machine type has a shared CPU. See Shared-core machine types for more information." + "interconnect": { + "type": "string", + "description": "URL of the underlying Interconnect object that this attachment's traffic will traverse through." }, "kind": { "type": "string", - "description": "[Output Only] The type of the resource. Always compute#machineType for machine types.", - "default": "compute#machineType" - }, - "maximumPersistentDisks": { - "type": "integer", - "description": "[Output Only] Maximum persistent disks allowed.", - "format": "int32" + "description": "[Output Only] Type of the resource. Always compute#interconnectAttachment for interconnect attachments.", + "default": "compute#interconnectAttachment" }, - "maximumPersistentDisksSizeGb": { + "name": { "type": "string", - "description": "[Output Only] Maximum total persistent disks size (GB) allowed.", - "format": "int64" + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" }, - "memoryMb": { - "type": "integer", - "description": "[Output Only] The amount of physical memory available to the instance, defined in MB.", - "format": "int32" + "operationalStatus": { + "type": "string", + "description": "[Output Only] The current status of whether or not this interconnect attachment is functional.", + "enum": [ + "OS_ACTIVE", + "OS_UNPROVISIONED" + ], + "enumDescriptions": [ + "", + "" + ] }, - "name": { + "privateInterconnectInfo": { + "$ref": "InterconnectAttachmentPrivateInfo", + "description": "[Output Only] Information specific to a Private InterconnectAttachment. Only populated if the interconnect that this is attached is of type IT_PRIVATE." + }, + "region": { "type": "string", - "description": "[Output Only] Name of the resource.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + "description": "[Output Only] URL of the region where the regional interconnect attachment resides." }, - "selfLink": { + "router": { "type": "string", - "description": "[Output Only] Server-defined URL for the resource." + "description": "URL of the cloud router to be used for dynamic routing. This router must be in the same region as this InterconnectAttachment. The InterconnectAttachment will automatically connect the Interconnect to the network & region within which the Cloud Router is configured." }, - "zone": { + "selfLink": { "type": "string", - "description": "[Output Only] The name of the zone where the machine type resides, such as us-central1-a." + "description": "[Output Only] Server-defined URL for the resource." } } }, - "MachineTypeAggregatedList": { - "id": "MachineTypeAggregatedList", + "InterconnectAttachmentAggregatedList": { + "id": "InterconnectAttachmentAggregatedList", "type": "object", "properties": { "id": { @@ -5424,16 +7655,16 @@ }, "items": { "type": "object", - "description": "A list of MachineTypesScopedList resources.", + "description": "A list of InterconnectAttachmentsScopedList resources.", "additionalProperties": { - "$ref": "MachineTypesScopedList", - "description": "[Output Only] Name of the scope containing this set of machine types." + "$ref": "InterconnectAttachmentsScopedList", + "description": "Name of the scope containing this set of interconnect attachments." } }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#machineTypeAggregatedList for aggregated lists of machine types.", - "default": "compute#machineTypeAggregatedList" + "description": "[Output Only] Type of resource. Always compute#interconnectAttachmentAggregatedList for aggregated lists of interconnect attachments.", + "default": "compute#interconnectAttachmentAggregatedList" }, "nextPageToken": { "type": "string", @@ -5442,13 +7673,82 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, - "MachineTypeList": { - "id": "MachineTypeList", + "InterconnectAttachmentList": { + "id": "InterconnectAttachmentList", "type": "object", - "description": "Contains a list of machine types.", + "description": "Response to the list request, and contains a list of interconnect attachments.", "properties": { "id": { "type": "string", @@ -5456,15 +7756,15 @@ }, "items": { "type": "array", - "description": "A list of MachineType resources.", + "description": "A list of InterconnectAttachment resources.", "items": { - "$ref": "MachineType" + "$ref": "InterconnectAttachment" } }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#machineTypeList for lists of machine types.", - "default": "compute#machineTypeList" + "description": "[Output Only] Type of resource. Always compute#interconnectAttachmentList for lists of interconnect attachments.", + "default": "compute#interconnectAttachmentList" }, "nextPageToken": { "type": "string", @@ -5473,23 +7773,104 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, - "MachineTypesScopedList": { - "id": "MachineTypesScopedList", + "InterconnectAttachmentPrivateInfo": { + "id": "InterconnectAttachmentPrivateInfo", "type": "object", + "description": "Private information for an interconnect attachment when this belongs to an interconnect of type IT_PRIVATE.", "properties": { - "machineTypes": { + "tag8021q": { + "type": "integer", + "description": "[Output Only] 802.1q encapsulation tag to be used for traffic between Google and the customer, going to and from this network and region.", + "format": "uint32" + } + } + }, + "InterconnectAttachmentsScopedList": { + "id": "InterconnectAttachmentsScopedList", + "type": "object", + "properties": { + "interconnectAttachments": { "type": "array", - "description": "[Output Only] List of machine types contained in this scope.", + "description": "List of interconnect attachments contained in this scope.", "items": { - "$ref": "MachineType" + "$ref": "InterconnectAttachment" } }, "warning": { "type": "object", - "description": "[Output Only] An informational warning that appears when the machine types list is empty.", + "description": "Informational warning which replaces the list of addresses when the list is empty.", "properties": { "code": { "type": "string", @@ -5558,202 +7939,159 @@ } } }, - "ManagedInstance": { - "id": "ManagedInstance", + "InterconnectCircuitInfo": { + "id": "InterconnectCircuitInfo", "type": "object", + "description": "Describes a single physical circuit between the Customer and Google. CircuitInfo objects are created by Google, so all fields are output only. Next id: 4", "properties": { - "currentAction": { - "type": "string", - "description": "[Output Only] The current action that the managed instance group has scheduled for the instance. Possible values: \n- NONE The instance is running, and the managed instance group does not have any scheduled actions for this instance. \n- CREATING The managed instance group is creating this instance. If the group fails to create this instance, it will try again until it is successful. \n- CREATING_WITHOUT_RETRIES The managed instance group is attempting to create this instance only once. If the group fails to create this instance, it does not try again and the group's targetSize value is decreased instead. \n- RECREATING The managed instance group is recreating this instance. \n- DELETING The managed instance group is permanently deleting this instance. \n- ABANDONING The managed instance group is abandoning this instance. The instance will be removed from the instance group and from any target pools that are associated with this group. \n- RESTARTING The managed instance group is restarting the instance. \n- REFRESHING The managed instance group is applying configuration changes to the instance without stopping it. For example, the group can update the target pool list for an instance without stopping that instance.", - "enum": [ - "ABANDONING", - "CREATING", - "CREATING_WITHOUT_RETRIES", - "DELETING", - "NONE", - "RECREATING", - "REFRESHING", - "RESTARTING", - "VERIFYING" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "id": { + "customerDemarcId": { "type": "string", - "description": "[Output only] The unique identifier for this resource. This field is empty when instance does not exist.", - "format": "uint64" + "description": "Customer-side demarc ID for this circuit. This will only be set if it was provided by the Customer to Google during circuit turn-up." }, - "instance": { + "googleCircuitId": { "type": "string", - "description": "[Output Only] The URL of the instance. The URL can exist even if the instance has not yet been created." + "description": "Google-assigned unique ID for this circuit. Assigned at circuit turn-up." }, - "instanceStatus": { + "googleDemarcId": { "type": "string", - "description": "[Output Only] The status of the instance. This field is empty when the instance does not exist.", - "enum": [ - "PROVISIONING", - "RUNNING", - "STAGING", - "STOPPED", - "STOPPING", - "SUSPENDED", - "SUSPENDING", - "TERMINATED" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "lastAttempt": { - "$ref": "ManagedInstanceLastAttempt", - "description": "[Output Only] Information about the last attempt to create or delete the instance." - }, - "version": { - "$ref": "ManagedInstanceVersion", - "description": "[Output Only] Intended version of this instance." + "description": "Google-side demarc ID for this circuit. Assigned at circuit turn-up and provided by Google to the customer in the LOA." } } }, - "ManagedInstanceLastAttempt": { - "id": "ManagedInstanceLastAttempt", + "InterconnectList": { + "id": "InterconnectList", "type": "object", + "description": "Response to the list request, and contains a list of interconnects.", "properties": { - "errors": { - "type": "object", - "description": "[Output Only] Encountered errors during the last attempt to create or delete the instance.", - "properties": { - "errors": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of Interconnect resources.", + "items": { + "$ref": "Interconnect" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#interconnectList for lists of interconnects.", + "default": "compute#interconnectList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { "type": "array", - "description": "[Output Only] The array of errors encountered while processing this operation.", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", "items": { "type": "object", "properties": { - "code": { - "type": "string", - "description": "[Output Only] The error type identifier for this error." - }, - "location": { + "key": { "type": "string", - "description": "[Output Only] Indicates the field in the request that caused the error. This property is optional." + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." }, - "message": { + "value": { "type": "string", - "description": "[Output Only] An optional, human-readable error message." + "description": "[Output Only] A warning data value corresponding to the key." } } } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." } } } } }, - "ManagedInstanceVersion": { - "id": "ManagedInstanceVersion", + "InterconnectLocation": { + "id": "InterconnectLocation", "type": "object", + "description": "Protocol definitions for Mixer API to support InterconnectLocation.", "properties": { - "instanceTemplate": { + "address": { "type": "string", - "description": "[Output Only] The intended template of the instance. This field is empty when current_action is one of { DELETING, ABANDONING }." + "description": "[Output Only] The postal address of the Point of Presence, each line in the address is separated by a newline character." }, - "name": { - "type": "string", - "description": "[Output Only] Name of the version." - } - } - }, - "Metadata": { - "id": "Metadata", - "type": "object", - "description": "A metadata key/value entry.", - "properties": { - "fingerprint": { + "availabilityZone": { "type": "string", - "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata.", - "format": "byte" - }, - "items": { - "type": "array", - "description": "Array of key/value pairs. The total size of all keys and values must be less than 512 KB.", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project.", - "pattern": "[a-zA-Z0-9-_]{1,128}", - "annotations": { - "required": [ - "compute.instances.insert", - "compute.projects.setCommonInstanceMetadata" - ] - } - }, - "value": { - "type": "string", - "description": "Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 262144 bytes (256 KiB).", - "annotations": { - "required": [ - "compute.instances.insert", - "compute.projects.setCommonInstanceMetadata" - ] - } - } - } - } + "description": "Availability zone for this location. Within a city, maintenance will not be simultaneously scheduled in more than one availability zone. Example: \"zone1\" or \"zone2\"." }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#metadata for metadata.", - "default": "compute#metadata" - } - } - }, - "NamedPort": { - "id": "NamedPort", - "type": "object", - "description": "The named port. For example: .", - "properties": { - "name": { + "city": { "type": "string", - "description": "The name for this named port. The name must be 1-63 characters long, and comply with RFC1035." + "description": "City designator used by the Interconnect UI to locate this InterconnectLocation within the Continent. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\"." }, - "port": { - "type": "integer", - "description": "The port number, which can be a value between 1 and 65535.", - "format": "int32" - } - } - }, - "Network": { - "id": "Network", - "type": "object", - "description": "Represents a Network resource. Read Networks and Firewalls for more information.", - "properties": { - "IPv4Range": { + "continent": { "type": "string", - "description": "The range of internal addresses that are legal on this network. This range is a CIDR specification, for example: 192.168.0.0/16. Provided by the client when the network is created.", - "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}/[0-9]{1,2}" - }, - "autoCreateSubnetworks": { - "type": "boolean", - "description": "When set to true, the network is created in \"auto subnet mode\". When set to false, the network is in \"custom subnet mode\".\n\nIn \"auto subnet mode\", a newly created network is assigned the default CIDR of 10.128.0.0/9 and it automatically creates one subnetwork per region." + "description": "Continent for this location. Used by the location picker in the Interconnect UI.", + "enum": [ + "C_AFRICA", + "C_ASIA_PAC", + "C_EUROPE", + "C_NORTH_AMERICA", + "C_SOUTH_AMERICA" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ] }, "creationTimestamp": { "type": "string", @@ -5761,12 +8099,15 @@ }, "description": { "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." + "description": "[Output Only] An optional description of the resource." }, - "gatewayIPv4": { + "facilityProvider": { "type": "string", - "description": "A gateway address for default routing to other networks. This value is read only and is selected by the Google Compute Engine, typically as the first usable address in the IPv4Range.", - "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}" + "description": "[Output Only] The name of the provider for this facility (e.g., EQUINIX)." + }, + "facilityProviderFacilityId": { + "type": "string", + "description": "[Output Only] A provider-assigned Identifier for this facility (e.g., Ashburn-DC1)." }, "id": { "type": "string", @@ -5775,85 +8116,34 @@ }, "kind": { "type": "string", - "description": "[Output Only] Type of the resource. Always compute#network for networks.", - "default": "compute#network" + "description": "[Output Only] Type of the resource. Always compute#interconnectLocation for interconnect locations.", + "default": "compute#interconnectLocation" }, "name": { "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.networks.insert" - ] - } - }, - "peerings": { - "type": "array", - "description": "[Output Only] List of network peerings for the resource.", - "items": { - "$ref": "NetworkPeering" - } + "description": "[Output Only] Name of the resource." }, - "selfLink": { + "peeringdbFacilityId": { "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "subnetworks": { - "type": "array", - "description": "[Output Only] Server-defined fully-qualified URLs for all subnetworks in this network.", - "items": { - "type": "string" - } - } - } - }, - "NetworkInterface": { - "id": "NetworkInterface", - "type": "object", - "description": "A network interface resource attached to an instance.", - "properties": { - "accessConfigs": { - "type": "array", - "description": "An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access.", - "items": { - "$ref": "AccessConfig" - } + "description": "[Output Only] The peeringdb identifier for this facility (corresponding with a netfac type in peeringdb)." }, - "aliasIpRanges": { + "regionInfos": { "type": "array", - "description": "An array of alias IP ranges for this network interface. Can only be specified for network interfaces on subnet-mode networks.", + "description": "[Output Only] A list of InterconnectLocation.RegionInfo objects, that describe parameters pertaining to the relation between this InterconnectLocation and various Google Cloud regions.", "items": { - "$ref": "AliasIpRange" + "$ref": "InterconnectLocationRegionInfo" } }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#networkInterface for network interfaces.", - "default": "compute#networkInterface" - }, - "name": { - "type": "string", - "description": "[Output Only] The name of the network interface, generated by the server. For network devices, these are eth0, eth1, etc." - }, - "network": { - "type": "string", - "description": "URL of the network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used; if the network is not specified but the subnetwork is specified, the network is inferred.\n\nThis field is optional when creating a firewall rule. If not specified when creating a firewall rule, the default network global/networks/default is used.\n\nIf you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/global/networks/network \n- projects/project/global/networks/network \n- global/networks/default" - }, - "networkIP": { - "type": "string", - "description": "An IPv4 internal network address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system." - }, - "subnetwork": { + "selfLink": { "type": "string", - "description": "The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not provide this property. If the network is in auto subnet mode, providing the subnetwork is optional. If the network is in custom subnet mode, then this field should be specified. If you specify this property, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork \n- regions/region/subnetworks/subnetwork" + "description": "[Output Only] Server-defined URL for the resource." } } }, - "NetworkList": { - "id": "NetworkList", + "InterconnectLocationList": { + "id": "InterconnectLocationList", "type": "object", - "description": "Contains a list of networks.", + "description": "Response to the list request, and contains a list of interconnect locations.", "properties": { "id": { "type": "string", @@ -5861,15 +8151,15 @@ }, "items": { "type": "array", - "description": "A list of Network resources.", + "description": "A list of InterconnectLocation resources.", "items": { - "$ref": "Network" + "$ref": "InterconnectLocation" } }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#networkList for lists of networks.", - "default": "compute#networkList" + "description": "[Output Only] Type of resource. Always compute#interconnectLocationList for lists of interconnect locations.", + "default": "compute#interconnectLocationList" }, "nextPageToken": { "type": "string", @@ -5878,328 +8168,366 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "NetworkPeering": { - "id": "NetworkPeering", + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InterconnectLocationRegionInfo": { + "id": "InterconnectLocationRegionInfo", "type": "object", - "description": "A network peering attached to a network resource. The message includes the peering name, peer network, peering state, and a flag indicating whether Google Compute Engine should automatically create routes for the peering.", + "description": "Information about any potential InterconnectAttachments between an Interconnect at a specific InterconnectLocation, and a specific Cloud Region.", "properties": { - "autoCreateRoutes": { - "type": "boolean", - "description": "Whether full mesh connectivity is created and managed automatically. When it is set to true, Google Compute Engine will automatically create and manage the routes between two networks when the state is ACTIVE. Otherwise, user needs to create routes manually to route packets to peer network." - }, - "name": { - "type": "string", - "description": "Name of this peering. Provided by the client when the peering is created. The name must comply with RFC1035. Specifically, the name must be 1-63 characters long and match regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all the following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash." - }, - "network": { + "expectedRttMs": { "type": "string", - "description": "The URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network." + "description": "Expected round-trip time in milliseconds, from this InterconnectLocation to a VM in this region.", + "format": "int64" }, - "state": { + "locationPresence": { "type": "string", - "description": "[Output Only] State for the peering.", + "description": "Identifies the network presence of this location.", "enum": [ - "ACTIVE", - "INACTIVE" + "LP_GLOBAL", + "LP_LOCAL_REGION" ], "enumDescriptions": [ "", "" ] }, - "stateDetails": { - "type": "string", - "description": "[Output Only] Details about the current state of the peering." - } - } - }, - "NetworksAddPeeringRequest": { - "id": "NetworksAddPeeringRequest", - "type": "object", - "properties": { - "autoCreateRoutes": { - "type": "boolean", - "description": "Whether Google Compute Engine manages the routes automatically." - }, - "name": { + "region": { "type": "string", - "description": "Name of the peering, which should conform to RFC1035." + "description": "URL for the region of this location." }, - "peerNetwork": { - "type": "string", - "description": "URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network." - } - } - }, - "NetworksRemovePeeringRequest": { - "id": "NetworksRemovePeeringRequest", - "type": "object", - "properties": { - "name": { + "regionKey": { "type": "string", - "description": "Name of the peering, which should conform to RFC1035." + "description": "Scope key for the region of this location." } } }, - "Operation": { - "id": "Operation", + "InterconnectOutageNotification": { + "id": "InterconnectOutageNotification", "type": "object", - "description": "An Operation resource, used to manage asynchronous API requests.", + "description": "Description of a planned outage on this Interconnect. Next id: 9", "properties": { - "clientOperationId": { - "type": "string", - "description": "[Output Only] Reserved for future use." - }, - "creationTimestamp": { - "type": "string", - "description": "[Deprecated] This field is deprecated." + "affectedCircuits": { + "type": "array", + "description": "Iff issue_type is IT_PARTIAL_OUTAGE, a list of the Google-side circuit IDs that will be affected.", + "items": { + "type": "string" + } }, "description": { "type": "string", - "description": "[Output Only] A textual description of the operation, which is set when the operation is created." + "description": "Short user-visible description of the purpose of the outage." }, "endTime": { "type": "string", - "description": "[Output Only] The time that this operation was completed. This value is in RFC3339 text format." - }, - "error": { - "type": "object", - "description": "[Output Only] If errors are generated during processing of the operation, this field will be populated.", - "properties": { - "errors": { - "type": "array", - "description": "[Output Only] The array of errors encountered while processing this operation.", - "items": { - "type": "object", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] The error type identifier for this error." - }, - "location": { - "type": "string", - "description": "[Output Only] Indicates the field in the request that caused the error. This property is optional." - }, - "message": { - "type": "string", - "description": "[Output Only] An optional, human-readable error message." - } - } - } - } - } - }, - "httpErrorMessage": { - "type": "string", - "description": "[Output Only] If the operation fails, this field contains the HTTP error message that was returned, such as NOT FOUND." - }, - "httpErrorStatusCode": { - "type": "integer", - "description": "[Output Only] If the operation fails, this field contains the HTTP error status code that was returned. For example, a 404 means the resource was not found.", - "format": "int32" - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "insertTime": { - "type": "string", - "description": "[Output Only] The time that this operation was requested. This value is in RFC3339 text format." + "format": "int64" }, - "kind": { + "issueType": { "type": "string", - "description": "[Output Only] Type of the resource. Always compute#operation for Operation resources.", - "default": "compute#operation" + "enum": [ + "IT_OUTAGE", + "IT_PARTIAL_OUTAGE" + ], + "enumDescriptions": [ + "", + "" + ] }, "name": { "type": "string", - "description": "[Output Only] Name of the resource." - }, - "operationType": { - "type": "string", - "description": "[Output Only] The type of operation, such as insert, update, or delete, and so on." - }, - "progress": { - "type": "integer", - "description": "[Output Only] An optional progress indicator that ranges from 0 to 100. There is no requirement that this be linear or support any granularity of operations. This should not be used to guess when the operation will be complete. This number should monotonically increase as the operation progresses.", - "format": "int32" - }, - "region": { - "type": "string", - "description": "[Output Only] The URL of the region where the operation resides. Only available when performing regional operations." + "description": "Unique identifier for this outage notification." }, - "selfLink": { + "source": { "type": "string", - "description": "[Output Only] Server-defined URL for the resource." + "enum": [ + "NSRC_GOOGLE" + ], + "enumDescriptions": [ + "" + ] }, "startTime": { "type": "string", - "description": "[Output Only] The time that this operation was started by the server. This value is in RFC3339 text format." + "description": "Scheduled start and end times for the outage (milliseconds since Unix epoch).", + "format": "int64" }, - "status": { + "state": { "type": "string", - "description": "[Output Only] The status of the operation, which can be one of the following: PENDING, RUNNING, or DONE.", "enum": [ - "DONE", - "PENDING", - "RUNNING" + "NS_ACTIVE", + "NS_CANCELED" ], "enumDescriptions": [ - "", "", "" ] + } + } + }, + "License": { + "id": "License", + "type": "object", + "description": "A license resource.", + "properties": { + "chargesUseFee": { + "type": "boolean", + "description": "[Output Only] Deprecated. This field no longer reflects whether a license charges a usage fee." }, - "statusMessage": { - "type": "string", - "description": "[Output Only] An optional textual description of the current status of the operation." - }, - "targetId": { + "kind": { "type": "string", - "description": "[Output Only] The unique target ID, which identifies a specific incarnation of the target resource.", - "format": "uint64" + "description": "[Output Only] Type of resource. Always compute#license for licenses.", + "default": "compute#license" }, - "targetLink": { + "name": { "type": "string", - "description": "[Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the persistent disk that the snapshot was created from." + "description": "[Output Only] Name of the resource. The name is 1-63 characters long and complies with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.images.insert" + ] + } }, - "user": { + "selfLink": { "type": "string", - "description": "[Output Only] User who requested the operation, for example: user@example.com." - }, - "warnings": { - "type": "array", - "description": "[Output Only] If warning messages are generated during processing of the operation, this field will be populated.", - "items": { - "type": "object", - "properties": { - "code": { - "type": "string", - "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", - "enum": [ - "CLEANUP_FAILED", - "DEPRECATED_RESOURCE_USED", - "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", - "FIELD_VALUE_OVERRIDEN", - "INJECTED_KERNELS_DEPRECATED", - "NEXT_HOP_ADDRESS_NOT_ASSIGNED", - "NEXT_HOP_CANNOT_IP_FORWARD", - "NEXT_HOP_INSTANCE_NOT_FOUND", - "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", - "NEXT_HOP_NOT_RUNNING", - "NOT_CRITICAL_ERROR", - "NO_RESULTS_ON_PAGE", - "REQUIRED_TOS_AGREEMENT", - "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", - "RESOURCE_NOT_DELETED", - "SINGLE_INSTANCE_PROPERTY_TEMPLATE", - "UNREACHABLE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "data": { - "type": "array", - "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", - "items": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." - }, - "value": { - "type": "string", - "description": "[Output Only] A warning data value corresponding to the key." - } - } - } - }, - "message": { - "type": "string", - "description": "[Output Only] A human-readable description of the warning code." - } - } - } + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "LogConfig": { + "id": "LogConfig", + "type": "object", + "description": "Specifies what kind of log the caller must write", + "properties": { + "cloudAudit": { + "$ref": "LogConfigCloudAuditOptions", + "description": "Cloud audit options." }, - "zone": { + "counter": { + "$ref": "LogConfigCounterOptions", + "description": "Counter options." + }, + "dataAccess": { + "$ref": "LogConfigDataAccessOptions", + "description": "Data access options." + } + } + }, + "LogConfigCloudAuditOptions": { + "id": "LogConfigCloudAuditOptions", + "type": "object", + "description": "Write a Cloud Audit log", + "properties": { + "authorizationLoggingOptions": { + "$ref": "AuthorizationLoggingOptions", + "description": "Information used by the Cloud Audit Logging pipeline." + }, + "logName": { "type": "string", - "description": "[Output Only] The URL of the zone where the operation resides. Only available when performing per-zone operations." + "description": "The log_name to populate in the Cloud Audit Record.", + "enum": [ + "ADMIN_ACTIVITY", + "DATA_ACCESS", + "UNSPECIFIED_LOG_NAME" + ], + "enumDescriptions": [ + "", + "", + "" + ] } } }, - "OperationAggregatedList": { - "id": "OperationAggregatedList", + "LogConfigCounterOptions": { + "id": "LogConfigCounterOptions", + "type": "object", + "description": "Increment a streamz counter with the specified metric and field names.\n\nMetric names should start with a '/', generally be lowercase-only, and end in \"_count\". Field names should not contain an initial slash. The actual exported metric names will have \"/iam/policy\" prepended.\n\nField names correspond to IAM request parameters and field values are their respective values.\n\nAt present the only supported field names are - \"iam_principal\", corresponding to IAMContext.principal; - \"\" (empty string), resulting in one aggretated counter with no field.\n\nExamples: counter { metric: \"/debug_access_count\" field: \"iam_principal\" } ==\u003e increment counter /iam/policy/backend_debug_access_count {iam_principal=[value of IAMContext.principal]}\n\nAt this time we do not support: * multiple field names (though this may be supported in the future) * decrementing the counter * incrementing it by anything other than 1", + "properties": { + "field": { + "type": "string", + "description": "The field value to attribute." + }, + "metric": { + "type": "string", + "description": "The metric to update." + } + } + }, + "LogConfigDataAccessOptions": { + "id": "LogConfigDataAccessOptions", "type": "object", + "description": "Write a Data Access (Gin) log", "properties": { + "logMode": { + "type": "string", + "description": "Whether Gin logging should happen in a fail-closed manner at the caller. This is relevant only in the LocalIAM implementation, for now.", + "enum": [ + "LOG_FAIL_CLOSED", + "LOG_MODE_UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "MachineType": { + "id": "MachineType", + "type": "object", + "description": "A Machine Type resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this machine type." + }, + "description": { + "type": "string", + "description": "[Output Only] An optional textual description of the resource." + }, + "guestCpus": { + "type": "integer", + "description": "[Output Only] The number of virtual CPUs that are available to the instance.", + "format": "int32" + }, "id": { "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" }, - "items": { - "type": "object", - "description": "[Output Only] A map of scoped operation lists.", - "additionalProperties": { - "$ref": "OperationsScopedList", - "description": "[Output Only] Name of the scope containing this set of operations." - } + "isSharedCpu": { + "type": "boolean", + "description": "[Output Only] Whether this machine type has a shared CPU. See Shared-core machine types for more information." }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#operationAggregatedList for aggregated lists of operations.", - "default": "compute#operationAggregatedList" + "description": "[Output Only] The type of the resource. Always compute#machineType for machine types.", + "default": "compute#machineType" }, - "nextPageToken": { + "maximumPersistentDisks": { + "type": "integer", + "description": "[Output Only] Maximum persistent disks allowed.", + "format": "int32" + }, + "maximumPersistentDisksSizeGb": { "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + "description": "[Output Only] Maximum total persistent disks size (GB) allowed.", + "format": "int64" + }, + "memoryMb": { + "type": "integer", + "description": "[Output Only] The amount of physical memory available to the instance, defined in MB.", + "format": "int32" + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" }, "selfLink": { "type": "string", - "description": "[Output Only] Server-defined URL for this resource." + "description": "[Output Only] Server-defined URL for the resource." + }, + "zone": { + "type": "string", + "description": "[Output Only] The name of the zone where the machine type resides, such as us-central1-a." } } }, - "OperationList": { - "id": "OperationList", + "MachineTypeAggregatedList": { + "id": "MachineTypeAggregatedList", "type": "object", - "description": "Contains a list of Operation resources.", "properties": { "id": { "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + "description": "[Output Only] Unique identifier for the resource; defined by the server." }, "items": { - "type": "array", - "description": "[Output Only] A list of Operation resources.", - "items": { - "$ref": "Operation" + "type": "object", + "description": "A list of MachineTypesScopedList resources.", + "additionalProperties": { + "$ref": "MachineTypesScopedList", + "description": "[Output Only] Name of the scope containing this set of machine types." } }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#operations for Operations resource.", - "default": "compute#operationList" + "description": "[Output Only] Type of resource. Always compute#machineTypeAggregatedList for aggregated lists of machine types.", + "default": "compute#machineTypeAggregatedList" }, "nextPageToken": { "type": "string", @@ -6208,23 +8536,10 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "OperationsScopedList": { - "id": "OperationsScopedList", - "type": "object", - "properties": { - "operations": { - "type": "array", - "description": "[Output Only] List of operations contained in this scope.", - "items": { - "$ref": "Operation" - } }, "warning": { "type": "object", - "description": "[Output Only] Informational warning which replaces the list of operations when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "type": "string", @@ -6293,360 +8608,397 @@ } } }, - "PathMatcher": { - "id": "PathMatcher", + "MachineTypeList": { + "id": "MachineTypeList", "type": "object", - "description": "A matcher for the path portion of the URL. The BackendService from the longest-matched rule will serve the URL. If no rule was matched, the default service will be used.", + "description": "Contains a list of machine types.", "properties": { - "defaultService": { + "id": { "type": "string", - "description": "The full or partial URL to the BackendService resource. This will be used if none of the pathRules defined by this PathMatcher is matched by the URL's path portion. For example, the following are all valid URLs to a BackendService resource: \n- https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService \n- compute/v1/projects/project/global/backendServices/backendService \n- global/backendServices/backendService" + "description": "[Output Only] Unique identifier for the resource; defined by the server." }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." + "items": { + "type": "array", + "description": "A list of MachineType resources.", + "items": { + "$ref": "MachineType" + } }, - "name": { + "kind": { "type": "string", - "description": "The name to which this PathMatcher is referred by the HostRule." + "description": "[Output Only] Type of resource. Always compute#machineTypeList for lists of machine types.", + "default": "compute#machineTypeList" }, - "pathRules": { - "type": "array", - "description": "The list of path rules.", - "items": { - "$ref": "PathRule" - } - } - } - }, - "PathRule": { - "id": "PathRule", - "type": "object", - "description": "A path-matching rule for a URL. If matched, will use the specified BackendService to handle the traffic arriving at this URL.", - "properties": { - "paths": { - "type": "array", - "description": "The list of path patterns to match. Each must start with / and the only place a * is allowed is at the end following a /. The string fed to the path matcher does not include any text after the first ? or #, and those chars are not allowed here.", - "items": { - "type": "string" - } + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." }, - "service": { + "selfLink": { "type": "string", - "description": "The URL of the BackendService resource if this rule is matched." + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, - "Policy": { - "id": "Policy", + "MachineTypesScopedList": { + "id": "MachineTypesScopedList", "type": "object", - "description": "Defines an Identity and Access Management (IAM) policy. It is used to specify access control policies for Cloud Platform resources.\n\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of `members` to a `role`, where the members can be user accounts, Google groups, Google domains, and service accounts. A `role` is a named list of permissions defined by IAM.\n\n**Example**\n\n{ \"bindings\": [ { \"role\": \"roles/owner\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-other-app@appspot.gserviceaccount.com\", ] }, { \"role\": \"roles/viewer\", \"members\": [\"user:sean@example.com\"] } ] }\n\nFor a description of IAM and its features, see the [IAM developer's guide](https://cloud.google.com/iam).", "properties": { - "auditConfigs": { - "type": "array", - "description": "Specifies cloud audit logging configuration for this policy.", - "items": { - "$ref": "AuditConfig" - } - }, - "bindings": { + "machineTypes": { "type": "array", - "description": "Associates a list of `members` to a `role`. `bindings` with no members will result in an error.", + "description": "[Output Only] List of machine types contained in this scope.", "items": { - "$ref": "Binding" + "$ref": "MachineType" } }, - "etag": { - "type": "string", - "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing policy is overwritten blindly.", - "format": "byte" - }, - "iamOwned": { - "type": "boolean", - "description": "" - }, - "rules": { - "type": "array", - "description": "If more than one rule is specified, the rules are applied in the following manner: - All matching LOG rules are always applied. - If any DENY/DENY_WITH_LOG rule matches, permission is denied. Logging will be applied if one or more matching rule requires logging. - Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is granted. Logging will be applied if one or more matching rule requires logging. - Otherwise, if no rule applies, permission is denied.", - "items": { - "$ref": "Rule" + "warning": { + "type": "object", + "description": "[Output Only] An informational warning that appears when the machine types list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } } - }, - "version": { - "type": "integer", - "description": "Version of the `Policy`. The default version is 0.", - "format": "int32" } } }, - "Project": { - "id": "Project", + "ManagedInstance": { + "id": "ManagedInstance", "type": "object", - "description": "A Project resource. Projects can only be created in the Google Cloud Platform Console. Unless marked otherwise, values can only be modified in the console.", "properties": { - "commonInstanceMetadata": { - "$ref": "Metadata", - "description": "Metadata key/value pairs available to all instances contained in this project. See Custom metadata for more information." - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "defaultServiceAccount": { - "type": "string", - "description": "[Output Only] Default service account used by VMs running in this project." - }, - "description": { + "currentAction": { "type": "string", - "description": "An optional textual description of the resource." - }, - "enabledFeatures": { - "type": "array", - "description": "Restricted features enabled for use on this project.", - "items": { - "type": "string" - } + "description": "[Output Only] The current action that the managed instance group has scheduled for the instance. Possible values: \n- NONE The instance is running, and the managed instance group does not have any scheduled actions for this instance. \n- CREATING The managed instance group is creating this instance. If the group fails to create this instance, it will try again until it is successful. \n- CREATING_WITHOUT_RETRIES The managed instance group is attempting to create this instance only once. If the group fails to create this instance, it does not try again and the group's targetSize value is decreased instead. \n- RECREATING The managed instance group is recreating this instance. \n- DELETING The managed instance group is permanently deleting this instance. \n- ABANDONING The managed instance group is abandoning this instance. The instance will be removed from the instance group and from any target pools that are associated with this group. \n- RESTARTING The managed instance group is restarting the instance. \n- REFRESHING The managed instance group is applying configuration changes to the instance without stopping it. For example, the group can update the target pool list for an instance without stopping that instance.", + "enum": [ + "ABANDONING", + "CREATING", + "CREATING_WITHOUT_RETRIES", + "DELETING", + "NONE", + "RECREATING", + "REFRESHING", + "RESTARTING", + "VERIFYING" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] }, "id": { "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server. This is not the project ID, and is just a unique ID used by Compute Engine to identify resources.", + "description": "[Output only] The unique identifier for this resource. This field is empty when instance does not exist.", "format": "uint64" }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#project for projects.", - "default": "compute#project" - }, - "name": { - "type": "string", - "description": "The project ID. For example: my-example-project. Use the project ID to make requests to Compute Engine." - }, - "quotas": { - "type": "array", - "description": "[Output Only] Quotas assigned to this project.", - "items": { - "$ref": "Quota" - } - }, - "selfLink": { + "instance": { "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "usageExportLocation": { - "$ref": "UsageExportLocation", - "description": "The naming prefix for daily usage reports and the Google Cloud Storage bucket where they are stored." + "description": "[Output Only] The URL of the instance. The URL can exist even if the instance has not yet been created." }, - "xpnProjectStatus": { + "instanceStatus": { "type": "string", - "description": "[Output Only] The role this project has in a shared VPC configuration. Currently only HOST projects are differentiated.", + "description": "[Output Only] The status of the instance. This field is empty when the instance does not exist.", "enum": [ - "HOST", - "UNSPECIFIED_XPN_PROJECT_STATUS" + "PROVISIONING", + "RUNNING", + "STAGING", + "STOPPED", + "STOPPING", + "SUSPENDED", + "SUSPENDING", + "TERMINATED" ], "enumDescriptions": [ + "", + "", + "", + "", + "", + "", "", "" ] + }, + "lastAttempt": { + "$ref": "ManagedInstanceLastAttempt", + "description": "[Output Only] Information about the last attempt to create or delete the instance." + }, + "version": { + "$ref": "ManagedInstanceVersion", + "description": "[Output Only] Intended version of this instance." } } }, - "ProjectsDisableXpnResourceRequest": { - "id": "ProjectsDisableXpnResourceRequest", + "ManagedInstanceLastAttempt": { + "id": "ManagedInstanceLastAttempt", "type": "object", "properties": { - "xpnResource": { - "$ref": "XpnResourceId", - "description": "Service resource (a.k.a service project) ID." + "errors": { + "type": "object", + "description": "[Output Only] Encountered errors during the last attempt to create or delete the instance.", + "properties": { + "errors": { + "type": "array", + "description": "[Output Only] The array of errors encountered while processing this operation.", + "items": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] The error type identifier for this error." + }, + "location": { + "type": "string", + "description": "[Output Only] Indicates the field in the request that caused the error. This property is optional." + }, + "message": { + "type": "string", + "description": "[Output Only] An optional, human-readable error message." + } + } + } + } + } } } }, - "ProjectsEnableXpnResourceRequest": { - "id": "ProjectsEnableXpnResourceRequest", + "ManagedInstanceVersion": { + "id": "ManagedInstanceVersion", "type": "object", "properties": { - "xpnResource": { - "$ref": "XpnResourceId", - "description": "Service resource (a.k.a service project) ID." + "instanceTemplate": { + "type": "string", + "description": "[Output Only] The intended template of the instance. This field is empty when current_action is one of { DELETING, ABANDONING }." + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the version." } } }, - "ProjectsGetXpnResources": { - "id": "ProjectsGetXpnResources", + "Metadata": { + "id": "Metadata", "type": "object", + "description": "A metadata key/value entry.", "properties": { - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#projectsGetXpnResources for lists of service resources (a.k.a service projects)", - "default": "compute#projectsGetXpnResources" - }, - "nextPageToken": { + "fingerprint": { "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata.", + "format": "byte" }, - "resources": { + "items": { "type": "array", - "description": "Serive resources (a.k.a service projects) attached to this project as their shared VPC host.", + "description": "Array of key/value pairs. The total size of all keys and values must be less than 512 KB.", "items": { - "$ref": "XpnResourceId" + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project.", + "pattern": "[a-zA-Z0-9-_]{1,128}", + "annotations": { + "required": [ + "compute.instances.insert", + "compute.projects.setCommonInstanceMetadata" + ] + } + }, + "value": { + "type": "string", + "description": "Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 262144 bytes (256 KiB).", + "annotations": { + "required": [ + "compute.instances.insert", + "compute.projects.setCommonInstanceMetadata" + ] + } + } + } } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#metadata for metadata.", + "default": "compute#metadata" } } }, - "ProjectsListXpnHostsRequest": { - "id": "ProjectsListXpnHostsRequest", + "NamedPort": { + "id": "NamedPort", "type": "object", + "description": "The named port. For example: .", "properties": { - "organization": { + "name": { "type": "string", - "description": "Optional organization ID managed by Cloud Resource Manager, for which to list shared VPC host projects. If not specified, the organization will be inferred from the project." + "description": "The name for this named port. The name must be 1-63 characters long, and comply with RFC1035." + }, + "port": { + "type": "integer", + "description": "The port number, which can be a value between 1 and 65535.", + "format": "int32" } } }, - "Quota": { - "id": "Quota", + "Network": { + "id": "Network", "type": "object", - "description": "A quotas entry.", + "description": "Represents a Network resource. Read Networks and Firewalls for more information.", "properties": { - "limit": { - "type": "number", - "description": "[Output Only] Quota limit for this metric.", - "format": "double" + "IPv4Range": { + "type": "string", + "description": "The range of internal addresses that are legal on this network. This range is a CIDR specification, for example: 192.168.0.0/16. Provided by the client when the network is created.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}/[0-9]{1,2}" }, - "metric": { - "type": "string", - "description": "[Output Only] Name of the quota metric.", - "enum": [ - "AUTOSCALERS", - "BACKEND_BUCKETS", - "BACKEND_SERVICES", - "COMMITMENTS", - "CPUS", - "CPUS_ALL_REGIONS", - "DISKS_TOTAL_GB", - "FIREWALLS", - "FORWARDING_RULES", - "HEALTH_CHECKS", - "IMAGES", - "INSTANCES", - "INSTANCE_GROUPS", - "INSTANCE_GROUP_MANAGERS", - "INSTANCE_TEMPLATES", - "IN_USE_ADDRESSES", - "LOCAL_SSD_TOTAL_GB", - "NETWORKS", - "NVIDIA_K80_GPUS", - "PREEMPTIBLE_CPUS", - "PREEMPTIBLE_LOCAL_SSD_GB", - "REGIONAL_AUTOSCALERS", - "REGIONAL_INSTANCE_GROUP_MANAGERS", - "ROUTERS", - "ROUTES", - "SECURITY_POLICIES", - "SECURITY_POLICY_RULES", - "SNAPSHOTS", - "SSD_TOTAL_GB", - "SSL_CERTIFICATES", - "STATIC_ADDRESSES", - "SUBNETWORKS", - "TARGET_HTTPS_PROXIES", - "TARGET_HTTP_PROXIES", - "TARGET_INSTANCES", - "TARGET_POOLS", - "TARGET_SSL_PROXIES", - "TARGET_VPN_GATEWAYS", - "URL_MAPS", - "VPN_TUNNELS" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ] - }, - "usage": { - "type": "number", - "description": "[Output Only] Current usage of this metric.", - "format": "double" - } - } - }, - "Reference": { - "id": "Reference", - "type": "object", - "description": "Represents a reference to a resource.", - "properties": { - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#reference for references.", - "default": "compute#reference" - }, - "referenceType": { - "type": "string", - "description": "A description of the reference type with no implied semantics. Possible values include: \n- MEMBER_OF" - }, - "referrer": { - "type": "string", - "description": "URL of the resource which refers to the target." - }, - "target": { - "type": "string", - "description": "URL of the resource to which this reference points." - } - } - }, - "Region": { - "id": "Region", - "type": "object", - "description": "Region resource.", - "properties": { - "creationTimestamp": { + "autoCreateSubnetworks": { + "type": "boolean", + "description": "When set to true, the network is created in \"auto subnet mode\". When set to false, the network is in \"custom subnet mode\".\n\nIn \"auto subnet mode\", a newly created network is assigned the default CIDR of 10.128.0.0/9 and it automatically creates one subnetwork per region." + }, + "creationTimestamp": { "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." }, - "deprecated": { - "$ref": "DeprecationStatus", - "description": "[Output Only] The deprecation status associated with this region." - }, "description": { "type": "string", - "description": "[Output Only] Textual description of the resource." + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "gatewayIPv4": { + "type": "string", + "description": "A gateway address for default routing to other networks. This value is read only and is selected by the Google Compute Engine, typically as the first usable address in the IPv4Range.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}" }, "id": { "type": "string", @@ -6655,80 +9007,89 @@ }, "kind": { "type": "string", - "description": "[Output Only] Type of the resource. Always compute#region for regions.", - "default": "compute#region" + "description": "[Output Only] Type of the resource. Always compute#network for networks.", + "default": "compute#network" }, "name": { "type": "string", - "description": "[Output Only] Name of the resource." + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.networks.insert" + ] + } }, - "quotas": { + "peerings": { "type": "array", - "description": "[Output Only] Quotas assigned to this region.", + "description": "[Output Only] List of network peerings for the resource.", "items": { - "$ref": "Quota" + "$ref": "NetworkPeering" } }, + "routingConfig": { + "$ref": "NetworkRoutingConfig", + "description": "The network-level routing configuration for this network. Used by Cloud Router to determine what type of network-wide routing behavior to enforce." + }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." }, - "status": { - "type": "string", - "description": "[Output Only] Status of the region, either UP or DOWN.", - "enum": [ - "DOWN", - "UP" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "zones": { + "subnetworks": { "type": "array", - "description": "[Output Only] A list of zones available in this region, in the form of resource URLs.", + "description": "[Output Only] Server-defined fully-qualified URLs for all subnetworks in this network.", "items": { "type": "string" } } } }, - "RegionAutoscalerList": { - "id": "RegionAutoscalerList", + "NetworkInterface": { + "id": "NetworkInterface", "type": "object", - "description": "Contains a list of autoscalers.", + "description": "A network interface resource attached to an instance.", "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." + "accessConfigs": { + "type": "array", + "description": "An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access.", + "items": { + "$ref": "AccessConfig" + } }, - "items": { + "aliasIpRanges": { "type": "array", - "description": "A list of Autoscaler resources.", + "description": "An array of alias IP ranges for this network interface. Can only be specified for network interfaces on subnet-mode networks.", "items": { - "$ref": "Autoscaler" + "$ref": "AliasIpRange" } }, "kind": { "type": "string", - "description": "Type of resource.", - "default": "compute#regionAutoscalerList" + "description": "[Output Only] Type of the resource. Always compute#networkInterface for network interfaces.", + "default": "compute#networkInterface" }, - "nextPageToken": { + "name": { "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + "description": "[Output Only] The name of the network interface, generated by the server. For network devices, these are eth0, eth1, etc." }, - "selfLink": { + "network": { "type": "string", - "description": "[Output Only] Server-defined URL for this resource." + "description": "URL of the network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used; if the network is not specified but the subnetwork is specified, the network is inferred.\n\nThis field is optional when creating a firewall rule. If not specified when creating a firewall rule, the default network global/networks/default is used.\n\nIf you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/global/networks/network \n- projects/project/global/networks/network \n- global/networks/default" + }, + "networkIP": { + "type": "string", + "description": "An IPv4 internal network address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system." + }, + "subnetwork": { + "type": "string", + "description": "The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not provide this property. If the network is in auto subnet mode, providing the subnetwork is optional. If the network is in custom subnet mode, then this field should be specified. If you specify this property, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork \n- regions/region/subnetworks/subnetwork" } } }, - "RegionInstanceGroupList": { - "id": "RegionInstanceGroupList", + "NetworkList": { + "id": "NetworkList", "type": "object", - "description": "Contains a list of InstanceGroup resources.", + "description": "Contains a list of networks.", "properties": { "id": { "type": "string", @@ -6736,15 +9097,15 @@ }, "items": { "type": "array", - "description": "A list of InstanceGroup resources.", + "description": "A list of Network resources.", "items": { - "$ref": "InstanceGroup" + "$ref": "Network" } }, "kind": { "type": "string", - "description": "The resource type.", - "default": "compute#regionInstanceGroupList" + "description": "[Output Only] Type of resource. Always compute#networkList for lists of networks.", + "default": "compute#networkList" }, "nextPageToken": { "type": "string", @@ -6753,396 +9114,290 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, - "RegionInstanceGroupManagerList": { - "id": "RegionInstanceGroupManagerList", + "NetworkPeering": { + "id": "NetworkPeering", "type": "object", - "description": "Contains a list of managed instance groups.", + "description": "A network peering attached to a network resource. The message includes the peering name, peer network, peering state, and a flag indicating whether Google Compute Engine should automatically create routes for the peering.", "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "array", - "description": "A list of InstanceGroupManager resources.", - "items": { - "$ref": "InstanceGroupManager" - } + "autoCreateRoutes": { + "type": "boolean", + "description": "Whether full mesh connectivity is created and managed automatically. When it is set to true, Google Compute Engine will automatically create and manage the routes between two networks when the state is ACTIVE. Otherwise, user needs to create routes manually to route packets to peer network." }, - "kind": { + "name": { "type": "string", - "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerList for a list of managed instance groups that exist in th regional scope.", - "default": "compute#regionInstanceGroupManagerList" + "description": "Name of this peering. Provided by the client when the peering is created. The name must comply with RFC1035. Specifically, the name must be 1-63 characters long and match regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all the following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash." }, - "nextPageToken": { + "network": { "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + "description": "The URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network." }, - "selfLink": { + "state": { "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "RegionInstanceGroupManagersAbandonInstancesRequest": { - "id": "RegionInstanceGroupManagersAbandonInstancesRequest", - "type": "object", - "properties": { - "instances": { - "type": "array", - "description": "The URLs of one or more instances to abandon. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", - "items": { - "type": "string" - } + "description": "[Output Only] State for the peering.", + "enum": [ + "ACTIVE", + "INACTIVE" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "stateDetails": { + "type": "string", + "description": "[Output Only] Details about the current state of the peering." } } }, - "RegionInstanceGroupManagersDeleteInstancesRequest": { - "id": "RegionInstanceGroupManagersDeleteInstancesRequest", + "NetworkRoutingConfig": { + "id": "NetworkRoutingConfig", "type": "object", + "description": "A routing configuration attached to a network resource. The message includes the list of routers associated with the network, and a flag indicating the type of routing behavior to enforce network-wide.", "properties": { - "instances": { - "type": "array", - "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", - "items": { - "type": "string" - } + "routingMode": { + "type": "string", + "description": "The network-wide routing mode to use. If set to REGIONAL, this network's cloud routers will only advertise routes with subnetworks of this network in the same region as the router. If set to GLOBAL, this network's cloud routers will advertise routes with all subnetworks of this network, across regions.", + "enum": [ + "GLOBAL", + "REGIONAL" + ], + "enumDescriptions": [ + "", + "" + ] } } }, - "RegionInstanceGroupManagersListInstancesResponse": { - "id": "RegionInstanceGroupManagersListInstancesResponse", + "NetworksAddPeeringRequest": { + "id": "NetworksAddPeeringRequest", "type": "object", "properties": { - "managedInstances": { - "type": "array", - "description": "List of managed instances.", - "items": { - "$ref": "ManagedInstance" - } + "autoCreateRoutes": { + "type": "boolean", + "description": "Whether Google Compute Engine manages the routes automatically." }, - "nextPageToken": { + "name": { "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - } - } - }, - "RegionInstanceGroupManagersRecreateRequest": { - "id": "RegionInstanceGroupManagersRecreateRequest", - "type": "object", - "properties": { - "instances": { - "type": "array", - "description": "The URLs of one or more instances to recreate. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", - "items": { - "type": "string" - } + "description": "Name of the peering, which should conform to RFC1035." + }, + "peerNetwork": { + "type": "string", + "description": "URL of the peer network. It can be either full URL or partial URL. The peer network may belong to a different project. If the partial URL does not contain project, it is assumed that the peer network is in the same project as the current network." } } }, - "RegionInstanceGroupManagersSetAutoHealingRequest": { - "id": "RegionInstanceGroupManagersSetAutoHealingRequest", + "NetworksRemovePeeringRequest": { + "id": "NetworksRemovePeeringRequest", "type": "object", "properties": { - "autoHealingPolicies": { - "type": "array", - "items": { - "$ref": "InstanceGroupManagerAutoHealingPolicy" - } + "name": { + "type": "string", + "description": "Name of the peering, which should conform to RFC1035." } } }, - "RegionInstanceGroupManagersSetTargetPoolsRequest": { - "id": "RegionInstanceGroupManagersSetTargetPoolsRequest", + "Operation": { + "id": "Operation", "type": "object", + "description": "An Operation resource, used to manage asynchronous API requests.", "properties": { - "fingerprint": { + "clientOperationId": { "type": "string", - "description": "Fingerprint of the target pools information, which is a hash of the contents. This field is used for optimistic locking when you update the target pool entries. This field is optional.", - "format": "byte" + "description": "[Output Only] Reserved for future use." }, - "targetPools": { - "type": "array", - "description": "The URL of all TargetPool resources to which instances in the instanceGroup field are added. The target pools automatically apply to all of the instances in the managed instance group.", - "items": { - "type": "string" + "creationTimestamp": { + "type": "string", + "description": "[Deprecated] This field is deprecated." + }, + "description": { + "type": "string", + "description": "[Output Only] A textual description of the operation, which is set when the operation is created." + }, + "endTime": { + "type": "string", + "description": "[Output Only] The time that this operation was completed. This value is in RFC3339 text format." + }, + "error": { + "type": "object", + "description": "[Output Only] If errors are generated during processing of the operation, this field will be populated.", + "properties": { + "errors": { + "type": "array", + "description": "[Output Only] The array of errors encountered while processing this operation.", + "items": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] The error type identifier for this error." + }, + "location": { + "type": "string", + "description": "[Output Only] Indicates the field in the request that caused the error. This property is optional." + }, + "message": { + "type": "string", + "description": "[Output Only] An optional, human-readable error message." + } + } + } + } } - } - } - }, - "RegionInstanceGroupManagersSetTemplateRequest": { - "id": "RegionInstanceGroupManagersSetTemplateRequest", - "type": "object", - "properties": { - "instanceTemplate": { + }, + "httpErrorMessage": { "type": "string", - "description": "URL of the InstanceTemplate resource from which all new instances will be created." - } - } - }, - "RegionInstanceGroupsListInstances": { - "id": "RegionInstanceGroupsListInstances", - "type": "object", - "properties": { + "description": "[Output Only] If the operation fails, this field contains the HTTP error message that was returned, such as NOT FOUND." + }, + "httpErrorStatusCode": { + "type": "integer", + "description": "[Output Only] If the operation fails, this field contains the HTTP error status code that was returned. For example, a 404 means the resource was not found.", + "format": "int32" + }, "id": { "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" }, - "items": { - "type": "array", - "description": "A list of InstanceWithNamedPorts resources.", - "items": { - "$ref": "InstanceWithNamedPorts" - } + "insertTime": { + "type": "string", + "description": "[Output Only] The time that this operation was requested. This value is in RFC3339 text format." }, "kind": { "type": "string", - "description": "The resource type.", - "default": "compute#regionInstanceGroupsListInstances" + "description": "[Output Only] Type of the resource. Always compute#operation for Operation resources.", + "default": "compute#operation" }, - "nextPageToken": { + "name": { "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + "description": "[Output Only] Name of the resource." + }, + "operationType": { + "type": "string", + "description": "[Output Only] The type of operation, such as insert, update, or delete, and so on." + }, + "progress": { + "type": "integer", + "description": "[Output Only] An optional progress indicator that ranges from 0 to 100. There is no requirement that this be linear or support any granularity of operations. This should not be used to guess when the operation will be complete. This number should monotonically increase as the operation progresses.", + "format": "int32" + }, + "region": { + "type": "string", + "description": "[Output Only] The URL of the region where the operation resides. Only available when performing regional operations." }, "selfLink": { "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "RegionInstanceGroupsListInstancesRequest": { - "id": "RegionInstanceGroupsListInstancesRequest", - "type": "object", - "properties": { - "instanceState": { + "description": "[Output Only] Server-defined URL for the resource." + }, + "startTime": { "type": "string", - "description": "Instances in which state should be returned. Valid options are: 'ALL', 'RUNNING'. By default, it lists all instances.", + "description": "[Output Only] The time that this operation was started by the server. This value is in RFC3339 text format." + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the operation, which can be one of the following: PENDING, RUNNING, or DONE.", "enum": [ - "ALL", + "DONE", + "PENDING", "RUNNING" ], "enumDescriptions": [ + "", "", "" ] }, - "portName": { + "statusMessage": { "type": "string", - "description": "Name of port user is interested in. It is optional. If it is set, only information about this ports will be returned. If it is not set, all the named ports will be returned. Always lists all instances.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - } - } - }, - "RegionInstanceGroupsSetNamedPortsRequest": { - "id": "RegionInstanceGroupsSetNamedPortsRequest", - "type": "object", - "properties": { - "fingerprint": { + "description": "[Output Only] An optional textual description of the current status of the operation." + }, + "targetId": { "type": "string", - "description": "The fingerprint of the named ports information for this instance group. Use this optional property to prevent conflicts when multiple users change the named ports settings concurrently. Obtain the fingerprint with the instanceGroups.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request.", - "format": "byte" + "description": "[Output Only] The unique target ID, which identifies a specific incarnation of the target resource.", + "format": "uint64" }, - "namedPorts": { - "type": "array", - "description": "The list of named ports to set for this instance group.", - "items": { - "$ref": "NamedPort" - } - } - } - }, - "RegionList": { - "id": "RegionList", - "type": "object", - "description": "Contains a list of region resources.", - "properties": { - "id": { + "targetLink": { "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." + "description": "[Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the persistent disk that the snapshot was created from." }, - "items": { + "user": { + "type": "string", + "description": "[Output Only] User who requested the operation, for example: user@example.com." + }, + "warnings": { "type": "array", - "description": "A list of Region resources.", - "items": { - "$ref": "Region" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#regionList for lists of regions.", - "default": "compute#regionList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "RegionSetLabelsRequest": { - "id": "RegionSetLabelsRequest", - "type": "object", - "properties": { - "labelFingerprint": { - "type": "string", - "description": "The fingerprint of the previous set of labels for this resource, used to detect conflicts. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. Make a get() request to the resource to get the latest fingerprint.", - "format": "byte" - }, - "labels": { - "type": "object", - "description": "The labels to set for this resource.", - "additionalProperties": { - "type": "string" - } - } - } - }, - "ResourceCommitment": { - "id": "ResourceCommitment", - "type": "object", - "description": "Commitment for a particular resource (a Commitment is composed of one or more of these).", - "properties": { - "amount": { - "type": "string", - "description": "The amount of the resource purchased (in a type-dependent unit, such as bytes). For vCPUs, this can just be an integer. For memory, this must be provided in MB. Memory must be a multiple of 256 MB, with up to 6.5GB of memory per every vCPU.", - "format": "int64" - }, - "type": { - "type": "string", - "description": "Type of resource for which this commitment applies. Possible values are VCPU and MEMORY", - "enum": [ - "MEMORY", - "UNSPECIFIED", - "VCPU" - ], - "enumDescriptions": [ - "", - "", - "" - ] - } - } - }, - "ResourceGroupReference": { - "id": "ResourceGroupReference", - "type": "object", - "properties": { - "group": { - "type": "string", - "description": "A URI referencing one of the instance groups listed in the backend service." - } - } - }, - "Route": { - "id": "Route", - "type": "object", - "description": "Represents a Route resource. A route specifies how certain packets should be handled by the network. Routes are associated with instances by tags and the set of routes for a particular instance is called its routing table.\n\nFor each packet leaving an instance, the system searches that instance's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the nextHop field of the winning route - either to another instance destination, an instance gateway, or a Google Compute Engine-operated gateway.\n\nPackets that do not match any route in the sending instance's routing table are dropped.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "destRange": { - "type": "string", - "description": "The destination range of outgoing packets that this route applies to. Only IPv4 is supported.", - "annotations": { - "required": [ - "compute.routes.insert" - ] - } - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of this resource. Always compute#routes for Route resources.", - "default": "compute#route" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.routes.insert" - ] - } - }, - "network": { - "type": "string", - "description": "Fully-qualified URL of the network that this route applies to.", - "annotations": { - "required": [ - "compute.routes.insert" - ] - } - }, - "nextHopGateway": { - "type": "string", - "description": "The URL to a gateway that should handle matching packets. You can only specify the internet gateway using a full or partial valid URL: projects/\u003cproject-id\u003e/global/gateways/default-internet-gateway" - }, - "nextHopInstance": { - "type": "string", - "description": "The URL to an instance that should handle matching packets. You can specify this as a full or partial URL. For example:\nhttps://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/" - }, - "nextHopIp": { - "type": "string", - "description": "The network IP address of an instance that should handle matching packets. Only IPv4 is supported." - }, - "nextHopNetwork": { - "type": "string", - "description": "The URL of the local network if it should handle matching packets." - }, - "nextHopPeering": { - "type": "string", - "description": "[Output Only] The network peering name that should handle matching packets, which should conform to RFC1035." - }, - "nextHopVpnTunnel": { - "type": "string", - "description": "The URL to a VpnTunnel that should handle matching packets." - }, - "priority": { - "type": "integer", - "description": "The priority of this route. Priority is used to break ties in cases where there is more than one matching route of equal prefix length. In the case of two routes with equal prefix length, the one with the lowest-numbered priority value wins. Default value is 1000. Valid range is 0 through 65535.", - "format": "uint32", - "annotations": { - "required": [ - "compute.routes.insert" - ] - } - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined fully-qualified URL for this resource." - }, - "tags": { - "type": "array", - "description": "A list of instance tags to which this route applies.", - "items": { - "type": "string" - }, - "annotations": { - "required": [ - "compute.routes.insert" - ] - } - }, - "warnings": { - "type": "array", - "description": "[Output Only] If potential misconfigurations are detected for this route, this field will be populated with warning messages.", + "description": "[Output Only] If warning messages are generated during processing of the operation, this field will be populated.", "items": { "type": "object", "properties": { @@ -7211,29 +9466,33 @@ } } } + }, + "zone": { + "type": "string", + "description": "[Output Only] The URL of the zone where the operation resides. Only available when performing per-zone operations." } } }, - "RouteList": { - "id": "RouteList", + "OperationAggregatedList": { + "id": "OperationAggregatedList", "type": "object", - "description": "Contains a list of Route resources.", "properties": { "id": { "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." }, "items": { - "type": "array", - "description": "A list of Route resources.", - "items": { - "$ref": "Route" + "type": "object", + "description": "[Output Only] A map of scoped operation lists.", + "additionalProperties": { + "$ref": "OperationsScopedList", + "description": "[Output Only] Name of the scope containing this set of operations." } }, "kind": { "type": "string", - "description": "Type of resource.", - "default": "compute#routeList" + "description": "[Output Only] Type of resource. Always compute#operationAggregatedList for aggregated lists of operations.", + "default": "compute#operationAggregatedList" }, "nextPageToken": { "type": "string", @@ -7242,100 +9501,98 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "Router": { - "id": "Router", - "type": "object", - "description": "Router resource.", - "properties": { - "bgp": { - "$ref": "RouterBgp", - "description": "BGP information specific to this router." - }, - "bgpPeers": { - "type": "array", - "description": "BGP information that needs to be configured into the routing stack to establish the BGP peering. It must specify peer ASN and either interface name, IP, or peer IP. Please refer to RFC4273.", - "items": { - "$ref": "RouterBgpPeer" - } - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "interfaces": { - "type": "array", - "description": "Router interfaces. Each interface requires either one linked resource (e.g. linkedVpnTunnel), or IP address and IP address range (e.g. ipRange), or both.", - "items": { - "$ref": "RouterInterface" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#router for routers.", - "default": "compute#router" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.routers.insert" - ] - } - }, - "network": { - "type": "string", - "description": "URI of the network to which this router belongs.", - "annotations": { - "required": [ - "compute.routers.insert" - ] + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } } - }, - "region": { - "type": "string", - "description": "[Output Only] URI of the region where the router resides." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." } } }, - "RouterAggregatedList": { - "id": "RouterAggregatedList", + "OperationList": { + "id": "OperationList", "type": "object", - "description": "Contains a list of routers.", + "description": "Contains a list of Operation resources.", "properties": { "id": { "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." }, "items": { - "type": "object", - "description": "A list of Router resources.", - "additionalProperties": { - "$ref": "RoutersScopedList", - "description": "Name of the scope containing this set of routers." + "type": "array", + "description": "[Output Only] A list of Operation resources.", + "items": { + "$ref": "Operation" } }, "kind": { "type": "string", - "description": "Type of resource.", - "default": "compute#routerAggregatedList" + "description": "[Output Only] Type of resource. Always compute#operations for Operations resource.", + "default": "compute#operationList" }, "nextPageToken": { "type": "string", @@ -7344,231 +9601,92 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "RouterBgp": { - "id": "RouterBgp", - "type": "object", - "properties": { - "asn": { - "type": "integer", - "description": "Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, either 16-bit or 32-bit. The value will be fixed for this router resource. All VPN tunnels that link to this router will have the same local ASN.", - "format": "uint32" - } - } - }, - "RouterBgpPeer": { - "id": "RouterBgpPeer", - "type": "object", - "properties": { - "advertisedRoutePriority": { - "type": "integer", - "description": "The priority of routes advertised to this BGP peer. In the case where there is more than one matching route of maximum length, the routes with lowest priority value win.", - "format": "uint32" - }, - "interfaceName": { - "type": "string", - "description": "Name of the interface the BGP peer is associated with." - }, - "ipAddress": { - "type": "string", - "description": "IP address of the interface inside Google Cloud Platform. Only IPv4 is supported." - }, - "name": { - "type": "string", - "description": "Name of this BGP peer. The name must be 1-63 characters long and comply with RFC1035.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "peerAsn": { - "type": "integer", - "description": "Peer BGP Autonomous System Number (ASN). For VPN use case, this value can be different for every tunnel.", - "format": "uint32" - }, - "peerIpAddress": { - "type": "string", - "description": "IP address of the BGP interface outside Google cloud. Only IPv4 is supported." - } - } - }, - "RouterInterface": { - "id": "RouterInterface", - "type": "object", - "properties": { - "ipRange": { - "type": "string", - "description": "IP address and range of the interface. The IP range must be in the RFC3927 link-local IP space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface." - }, - "linkedVpnTunnel": { - "type": "string", - "description": "URI of the linked VPN tunnel. It must be in the same region as the router. Each interface can have at most one linked resource and it could either be a VPN Tunnel or an interconnect attachment." }, - "name": { - "type": "string", - "description": "Name of this interface entry. The name must be 1-63 characters long and comply with RFC1035.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, - "RouterList": { - "id": "RouterList", + "OperationsScopedList": { + "id": "OperationsScopedList", "type": "object", - "description": "Contains a list of Router resources.", "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { + "operations": { "type": "array", - "description": "A list of Router resources.", + "description": "[Output Only] List of operations contained in this scope.", "items": { - "$ref": "Router" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#router for routers.", - "default": "compute#routerList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "RouterStatus": { - "id": "RouterStatus", - "type": "object", - "properties": { - "bestRoutes": { - "type": "array", - "description": "Best routes for this router's network.", - "items": { - "$ref": "Route" - } - }, - "bestRoutesForRouter": { - "type": "array", - "description": "Best routes learned by this router.", - "items": { - "$ref": "Route" - } - }, - "bgpPeerStatus": { - "type": "array", - "items": { - "$ref": "RouterStatusBgpPeerStatus" - } - }, - "network": { - "type": "string", - "description": "URI of the network to which this router belongs." - } - } - }, - "RouterStatusBgpPeerStatus": { - "id": "RouterStatusBgpPeerStatus", - "type": "object", - "properties": { - "advertisedRoutes": { - "type": "array", - "description": "Routes that were advertised to the remote BGP peer", - "items": { - "$ref": "Route" - } - }, - "ipAddress": { - "type": "string", - "description": "IP address of the local BGP interface." - }, - "linkedVpnTunnel": { - "type": "string", - "description": "URL of the VPN tunnel that this BGP peer controls." - }, - "name": { - "type": "string", - "description": "Name of this BGP peer. Unique within the Routers resource." - }, - "numLearnedRoutes": { - "type": "integer", - "description": "Number of routes learned from the remote BGP Peer.", - "format": "uint32" - }, - "peerIpAddress": { - "type": "string", - "description": "IP address of the remote BGP interface." - }, - "state": { - "type": "string", - "description": "BGP state as specified in RFC1771." - }, - "status": { - "type": "string", - "description": "Status of the BGP peer: {UP, DOWN}", - "enum": [ - "DOWN", - "UNKNOWN", - "UP" - ], - "enumDescriptions": [ - "", - "", - "" - ] - }, - "uptime": { - "type": "string", - "description": "Time this session has been up. Format: 14 years, 51 weeks, 6 days, 23 hours, 59 minutes, 59 seconds" - }, - "uptimeSeconds": { - "type": "string", - "description": "Time this session has been up, in seconds. Format: 145" - } - } - }, - "RouterStatusResponse": { - "id": "RouterStatusResponse", - "type": "object", - "properties": { - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#routerStatusResponse" - }, - "result": { - "$ref": "RouterStatus" - } - } - }, - "RoutersPreviewResponse": { - "id": "RoutersPreviewResponse", - "type": "object", - "properties": { - "resource": { - "$ref": "Router", - "description": "Preview of given router." - } - } - }, - "RoutersScopedList": { - "id": "RoutersScopedList", - "type": "object", - "properties": { - "routers": { - "type": "array", - "description": "List of routers contained in this scope.", - "items": { - "$ref": "Router" + "$ref": "Operation" } }, "warning": { "type": "object", - "description": "Informational warning which replaces the list of routers when the list is empty.", + "description": "[Output Only] Informational warning which replaces the list of operations when the list is empty.", "properties": { "code": { "type": "string", @@ -7637,283 +9755,155 @@ } } }, - "Rule": { - "id": "Rule", + "PathMatcher": { + "id": "PathMatcher", "type": "object", - "description": "A rule to be applied in a Policy.", + "description": "A matcher for the path portion of the URL. The BackendService from the longest-matched rule will serve the URL. If no rule was matched, the default service will be used.", "properties": { - "action": { + "defaultService": { "type": "string", - "description": "Required", - "enum": [ - "ALLOW", - "ALLOW_WITH_LOG", - "DENY", - "DENY_WITH_LOG", - "LOG", - "NO_ACTION" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "" - ] - }, - "conditions": { - "type": "array", - "description": "Additional restrictions that must be met", - "items": { - "$ref": "Condition" - } + "description": "The full or partial URL to the BackendService resource. This will be used if none of the pathRules defined by this PathMatcher is matched by the URL's path portion. For example, the following are all valid URLs to a BackendService resource: \n- https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService \n- compute/v1/projects/project/global/backendServices/backendService \n- global/backendServices/backendService" }, "description": { "type": "string", - "description": "Human-readable description of the rule." + "description": "An optional description of this resource. Provide this property when you create the resource." }, - "ins": { + "name": { + "type": "string", + "description": "The name to which this PathMatcher is referred by the HostRule." + }, + "pathRules": { "type": "array", - "description": "If one or more 'in' clauses are specified, the rule matches if the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.", + "description": "The list of path rules.", + "items": { + "$ref": "PathRule" + } + } + } + }, + "PathRule": { + "id": "PathRule", + "type": "object", + "description": "A path-matching rule for a URL. If matched, will use the specified BackendService to handle the traffic arriving at this URL.", + "properties": { + "paths": { + "type": "array", + "description": "The list of path patterns to match. Each must start with / and the only place a * is allowed is at the end following a /. The string fed to the path matcher does not include any text after the first ? or #, and those chars are not allowed here.", "items": { "type": "string" } }, - "logConfigs": { + "service": { + "type": "string", + "description": "The URL of the BackendService resource if this rule is matched." + } + } + }, + "Policy": { + "id": "Policy", + "type": "object", + "description": "Defines an Identity and Access Management (IAM) policy. It is used to specify access control policies for Cloud Platform resources.\n\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of `members` to a `role`, where the members can be user accounts, Google groups, Google domains, and service accounts. A `role` is a named list of permissions defined by IAM.\n\n**Example**\n\n{ \"bindings\": [ { \"role\": \"roles/owner\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-other-app@appspot.gserviceaccount.com\", ] }, { \"role\": \"roles/viewer\", \"members\": [\"user:sean@example.com\"] } ] }\n\nFor a description of IAM and its features, see the [IAM developer's guide](https://cloud.google.com/iam).", + "properties": { + "auditConfigs": { "type": "array", - "description": "The config returned to callers of tech.iam.IAM.CheckPolicy for any entries that match the LOG action.", + "description": "Specifies cloud audit logging configuration for this policy.", "items": { - "$ref": "LogConfig" + "$ref": "AuditConfig" } }, - "notIns": { + "bindings": { "type": "array", - "description": "If one or more 'not_in' clauses are specified, the rule matches if the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries.", + "description": "Associates a list of `members` to a `role`. `bindings` with no members will result in an error.", "items": { - "type": "string" + "$ref": "Binding" } }, - "permissions": { + "etag": { + "type": "string", + "description": "`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing policy is overwritten blindly.", + "format": "byte" + }, + "iamOwned": { + "type": "boolean", + "description": "" + }, + "rules": { "type": "array", - "description": "A permission is a string of form '..' (e.g., 'storage.buckets.list'). A value of '*' matches all permissions, and a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs.", + "description": "If more than one rule is specified, the rules are applied in the following manner: - All matching LOG rules are always applied. - If any DENY/DENY_WITH_LOG rule matches, permission is denied. Logging will be applied if one or more matching rule requires logging. - Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is granted. Logging will be applied if one or more matching rule requires logging. - Otherwise, if no rule applies, permission is denied.", "items": { - "type": "string" + "$ref": "Rule" } + }, + "version": { + "type": "integer", + "description": "Version of the `Policy`. The default version is 0.", + "format": "int32" } } }, - "SSLHealthCheck": { - "id": "SSLHealthCheck", + "Project": { + "id": "Project", "type": "object", + "description": "A Project resource. Projects can only be created in the Google Cloud Platform Console. Unless marked otherwise, values can only be modified in the console.", "properties": { - "port": { - "type": "integer", - "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", - "format": "int32" + "commonInstanceMetadata": { + "$ref": "Metadata", + "description": "Metadata key/value pairs available to all instances contained in this project. See Custom metadata for more information." }, - "portName": { + "creationTimestamp": { "type": "string", - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." + "description": "[Output Only] Creation timestamp in RFC3339 text format." }, - "proxyHeader": { + "defaultServiceAccount": { "type": "string", - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "request": { - "type": "string", - "description": "The application data to send once the SSL connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII." - }, - "response": { - "type": "string", - "description": "The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII." - } - } - }, - "Scheduling": { - "id": "Scheduling", - "type": "object", - "description": "Sets the scheduling options for an Instance.", - "properties": { - "automaticRestart": { - "type": "boolean", - "description": "Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted.\n\nBy default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine." - }, - "onHostMaintenance": { - "type": "string", - "description": "Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Setting Instance Scheduling Options.", - "enum": [ - "MIGRATE", - "TERMINATE" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "preemptible": { - "type": "boolean", - "description": "Defines whether the instance is preemptible. This can only be set during instance creation, it cannot be set or changed after the instance has been created." - } - } - }, - "SerialPortOutput": { - "id": "SerialPortOutput", - "type": "object", - "description": "An instance's serial console output.", - "properties": { - "contents": { - "type": "string", - "description": "[Output Only] The contents of the console output." - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#serialPortOutput for serial port output.", - "default": "compute#serialPortOutput" - }, - "next": { - "type": "string", - "description": "[Output Only] The position of the next byte of content from the serial console output. Use this value in the next request as the start parameter.", - "format": "int64" - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." + "description": "[Output Only] Default service account used by VMs running in this project." }, - "start": { - "type": "string", - "description": "The starting byte position of the output that was returned. This should match the start parameter sent with the request. If the serial console output exceeds the size of the buffer, older output will be overwritten by newer content and the start values will be mismatched.", - "format": "int64" - } - } - }, - "ServiceAccount": { - "id": "ServiceAccount", - "type": "object", - "description": "A service account.", - "properties": { - "email": { + "description": { "type": "string", - "description": "Email address of the service account." + "description": "An optional textual description of the resource." }, - "scopes": { + "enabledFeatures": { "type": "array", - "description": "The list of scopes to be made available for this service account.", + "description": "Restricted features enabled for use on this project.", "items": { "type": "string" } - } - } - }, - "Snapshot": { - "id": "Snapshot", - "type": "object", - "description": "A persistent disk snapshot resource.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "diskSizeGb": { - "type": "string", - "description": "[Output Only] Size of the snapshot, specified in GB.", - "format": "int64" }, "id": { "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server. This is not the project ID, and is just a unique ID used by Compute Engine to identify resources.", "format": "uint64" }, "kind": { "type": "string", - "description": "[Output Only] Type of the resource. Always compute#snapshot for Snapshot resources.", - "default": "compute#snapshot" + "description": "[Output Only] Type of the resource. Always compute#project for projects.", + "default": "compute#project" }, - "labelFingerprint": { + "name": { "type": "string", - "description": "A fingerprint for the labels being applied to this snapshot, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels.\n\nTo see the latest fingerprint, make a get() request to retrieve a snapshot.", - "format": "byte" - }, - "labels": { - "type": "object", - "description": "Labels to apply to this snapshot. These can be later modified by the setLabels method. Label values may be empty.", - "additionalProperties": { - "type": "string" - } + "description": "The project ID. For example: my-example-project. Use the project ID to make requests to Compute Engine." }, - "licenses": { + "quotas": { "type": "array", - "description": "[Output Only] A list of public visible licenses that apply to this snapshot. This can be because the original image had licenses attached (such as a Windows image).", + "description": "[Output Only] Quotas assigned to this project.", "items": { - "type": "string" + "$ref": "Quota" } }, - "name": { - "type": "string", - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." }, - "snapshotEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "Encrypts the snapshot using a customer-supplied encryption key.\n\nAfter you encrypt a snapshot using a customer-supplied key, you must provide the same key if you use the image later For example, you must provide the encryption key when you create a disk from the encrypted snapshot in a future request.\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the snapshot, then the snapshot will be encrypted using an automatically generated key and you do not need to provide a key to use the snapshot later." - }, - "sourceDisk": { - "type": "string", - "description": "[Output Only] The source disk used to create this snapshot." - }, - "sourceDiskEncryptionKey": { - "$ref": "CustomerEncryptionKey", - "description": "The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key." - }, - "sourceDiskId": { - "type": "string", - "description": "[Output Only] The ID value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name." - }, - "status": { - "type": "string", - "description": "[Output Only] The status of the snapshot. This can be CREATING, DELETING, FAILED, READY, or UPLOADING.", - "enum": [ - "CREATING", - "DELETING", - "FAILED", - "READY", - "UPLOADING" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ] - }, - "storageBytes": { - "type": "string", - "description": "[Output Only] A size of the the storage used by the snapshot. As snapshots share storage, this number is expected to change with snapshot creation/deletion.", - "format": "int64" + "usageExportLocation": { + "$ref": "UsageExportLocation", + "description": "The naming prefix for daily usage reports and the Google Cloud Storage bucket where they are stored." }, - "storageBytesStatus": { + "xpnProjectStatus": { "type": "string", - "description": "[Output Only] An indicator whether storageBytes is in a stable state or it is being adjusted as a result of shared storage reallocation. This status can either be UPDATING, meaning the size of the snapshot is being updated, or UP_TO_DATE, meaning the size of the snapshot is up-to-date.", + "description": "[Output Only] The role this project has in a shared VPC configuration. Currently only HOST projects are differentiated.", "enum": [ - "UPDATING", - "UP_TO_DATE" + "HOST", + "UNSPECIFIED_XPN_PROJECT_STATUS" ], "enumDescriptions": [ "", @@ -7922,206 +9912,260 @@ } } }, - "SnapshotList": { - "id": "SnapshotList", + "ProjectsDisableXpnResourceRequest": { + "id": "ProjectsDisableXpnResourceRequest", + "type": "object", + "properties": { + "xpnResource": { + "$ref": "XpnResourceId", + "description": "Service resource (a.k.a service project) ID." + } + } + }, + "ProjectsEnableXpnResourceRequest": { + "id": "ProjectsEnableXpnResourceRequest", + "type": "object", + "properties": { + "xpnResource": { + "$ref": "XpnResourceId", + "description": "Service resource (a.k.a service project) ID." + } + } + }, + "ProjectsGetXpnResources": { + "id": "ProjectsGetXpnResources", "type": "object", - "description": "Contains a list of Snapshot resources.", "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "array", - "description": "A list of Snapshot resources.", - "items": { - "$ref": "Snapshot" - } - }, "kind": { "type": "string", - "description": "Type of resource.", - "default": "compute#snapshotList" + "description": "[Output Only] Type of resource. Always compute#projectsGetXpnResources for lists of service resources (a.k.a service projects)", + "default": "compute#projectsGetXpnResources" }, "nextPageToken": { "type": "string", "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." + "resources": { + "type": "array", + "description": "Service resources (a.k.a service projects) attached to this project as their shared VPC host.", + "items": { + "$ref": "XpnResourceId" + } } } }, - "SslCertificate": { - "id": "SslCertificate", + "ProjectsListXpnHostsRequest": { + "id": "ProjectsListXpnHostsRequest", "type": "object", - "description": "An SslCertificate resource. This resource provides a mechanism to upload an SSL key and certificate to the load balancer to serve secure connections from the user.", "properties": { - "certificate": { - "type": "string", - "description": "A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert." - }, - "creationTimestamp": { + "organization": { "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." + "description": "Optional organization ID managed by Cloud Resource Manager, for which to list shared VPC host projects. If not specified, the organization will be inferred from the project." + } + } + }, + "Quota": { + "id": "Quota", + "type": "object", + "description": "A quotas entry.", + "properties": { + "limit": { + "type": "number", + "description": "[Output Only] Quota limit for this metric.", + "format": "double" }, - "description": { + "metric": { "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#sslCertificate for SSL certificates.", - "default": "compute#sslCertificate" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "privateKey": { - "type": "string", - "description": "A write-only private key in PEM format. Only insert requests will include this field." + "description": "[Output Only] Name of the quota metric.", + "enum": [ + "AUTOSCALERS", + "BACKEND_BUCKETS", + "BACKEND_SERVICES", + "COMMITMENTS", + "CPUS", + "CPUS_ALL_REGIONS", + "DISKS_TOTAL_GB", + "FIREWALLS", + "FORWARDING_RULES", + "HEALTH_CHECKS", + "IMAGES", + "INSTANCES", + "INSTANCE_GROUPS", + "INSTANCE_GROUP_MANAGERS", + "INSTANCE_TEMPLATES", + "INTERCONNECTS", + "IN_USE_ADDRESSES", + "LOCAL_SSD_TOTAL_GB", + "NETWORKS", + "NVIDIA_K80_GPUS", + "NVIDIA_P100_GPUS", + "PREEMPTIBLE_CPUS", + "PREEMPTIBLE_LOCAL_SSD_GB", + "REGIONAL_AUTOSCALERS", + "REGIONAL_INSTANCE_GROUP_MANAGERS", + "ROUTERS", + "ROUTES", + "SECURITY_POLICIES", + "SECURITY_POLICY_RULES", + "SNAPSHOTS", + "SSD_TOTAL_GB", + "SSL_CERTIFICATES", + "STATIC_ADDRESSES", + "SUBNETWORKS", + "TARGET_HTTPS_PROXIES", + "TARGET_HTTP_PROXIES", + "TARGET_INSTANCES", + "TARGET_POOLS", + "TARGET_SSL_PROXIES", + "TARGET_TCP_PROXIES", + "TARGET_VPN_GATEWAYS", + "URL_MAPS", + "VPN_TUNNELS" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] }, - "selfLink": { - "type": "string", - "description": "[Output only] Server-defined URL for the resource." + "usage": { + "type": "number", + "description": "[Output Only] Current usage of this metric.", + "format": "double" } } }, - "SslCertificateList": { - "id": "SslCertificateList", + "Reference": { + "id": "Reference", "type": "object", - "description": "Contains a list of SslCertificate resources.", + "description": "Represents a reference to a resource.", "properties": { - "id": { + "kind": { "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "array", - "description": "A list of SslCertificate resources.", - "items": { - "$ref": "SslCertificate" - } + "description": "[Output Only] Type of the resource. Always compute#reference for references.", + "default": "compute#reference" }, - "kind": { + "referenceType": { "type": "string", - "description": "Type of resource.", - "default": "compute#sslCertificateList" + "description": "A description of the reference type with no implied semantics. Possible values include: \n- MEMBER_OF" }, - "nextPageToken": { + "referrer": { "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + "description": "URL of the resource which refers to the target." }, - "selfLink": { + "target": { "type": "string", - "description": "[Output Only] Server-defined URL for this resource." + "description": "URL of the resource to which this reference points." } } }, - "Subnetwork": { - "id": "Subnetwork", + "Region": { + "id": "Region", "type": "object", - "description": "A Subnetwork resource.", + "description": "Region resource.", "properties": { "creationTimestamp": { "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource. This field can be set only at resource creation time." + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this region." }, - "gatewayAddress": { + "description": { "type": "string", - "description": "[Output Only] The gateway address for default routes to reach destination addresses outside this subnetwork. This field can be set only at resource creation time." + "description": "[Output Only] Textual description of the resource." }, "id": { "type": "string", "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64" }, - "ipCidrRange": { - "type": "string", - "description": "The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported. This field can be set only at resource creation time." - }, "kind": { "type": "string", - "description": "[Output Only] Type of the resource. Always compute#subnetwork for Subnetwork resources.", - "default": "compute#subnetwork" + "description": "[Output Only] Type of the resource. Always compute#region for regions.", + "default": "compute#region" }, "name": { "type": "string", - "description": "The name of the resource, provided by the client when initially creating the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "network": { - "type": "string", - "description": "The URL of the network to which this subnetwork belongs, provided by the client when initially creating the subnetwork. Only networks that are in the distributed mode can have subnetworks. This field can be set only at resource creation time." - }, - "privateIpGoogleAccess": { - "type": "boolean", - "description": "Whether the VMs in this subnet can access Google services without assigned external IP addresses. This field can be both set at resource creation time and updated using setPrivateIpGoogleAccess." - }, - "region": { - "type": "string", - "description": "URL of the region where the Subnetwork resides. This field can be set only at resource creation time." + "description": "[Output Only] Name of the resource." }, - "secondaryIpRanges": { + "quotas": { "type": "array", - "description": "An array of configurations for secondary IP ranges for VM instances contained in this subnetwork. The primary IP of such VM must belong to the primary ipCidrRange of the subnetwork. The alias IPs may belong to either primary or secondary ranges.", + "description": "[Output Only] Quotas assigned to this region.", "items": { - "$ref": "SubnetworkSecondaryRange" + "$ref": "Quota" } }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." - } - } - }, - "SubnetworkAggregatedList": { - "id": "SubnetworkAggregatedList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "object", - "description": "A list of SubnetworksScopedList resources.", - "additionalProperties": { - "$ref": "SubnetworksScopedList", - "description": "Name of the scope containing this set of Subnetworks." - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#subnetworkAggregatedList for aggregated lists of subnetworks.", - "default": "compute#subnetworkAggregatedList" }, - "nextPageToken": { + "status": { "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + "description": "[Output Only] Status of the region, either UP or DOWN.", + "enum": [ + "DOWN", + "UP" + ], + "enumDescriptions": [ + "", + "" + ] }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." + "zones": { + "type": "array", + "description": "[Output Only] A list of zones available in this region, in the form of resource URLs.", + "items": { + "type": "string" + } } } }, - "SubnetworkList": { - "id": "SubnetworkList", + "RegionAutoscalerList": { + "id": "RegionAutoscalerList", "type": "object", - "description": "Contains a list of Subnetwork resources.", + "description": "Contains a list of autoscalers.", "properties": { "id": { "type": "string", @@ -8129,15 +10173,15 @@ }, "items": { "type": "array", - "description": "A list of Subnetwork resources.", + "description": "A list of Autoscaler resources.", "items": { - "$ref": "Subnetwork" + "$ref": "Autoscaler" } }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#subnetworkList for lists of subnetworks.", - "default": "compute#subnetworkList" + "description": "Type of resource.", + "default": "compute#regionAutoscalerList" }, "nextPageToken": { "type": "string", @@ -8146,48 +10190,10 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "SubnetworkSecondaryRange": { - "id": "SubnetworkSecondaryRange", - "type": "object", - "description": "Represents a secondary IP range of a subnetwork.", - "properties": { - "ipCidrRange": { - "type": "string", - "description": "The range of IP addresses belonging to this subnetwork secondary range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported." - }, - "rangeName": { - "type": "string", - "description": "The name associated with this subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork." - } - } - }, - "SubnetworksExpandIpCidrRangeRequest": { - "id": "SubnetworksExpandIpCidrRangeRequest", - "type": "object", - "properties": { - "ipCidrRange": { - "type": "string", - "description": "The IP (in CIDR format or netmask) of internal addresses that are legal on this Subnetwork. This range should be disjoint from other subnetworks within this network. This range can only be larger than (i.e. a superset of) the range previously defined before the update." - } - } - }, - "SubnetworksScopedList": { - "id": "SubnetworksScopedList", - "type": "object", - "properties": { - "subnetworks": { - "type": "array", - "description": "List of subnetworks contained in this scope.", - "items": { - "$ref": "Subnetwork" - } }, "warning": { "type": "object", - "description": "An informational warning that appears when the list of addresses is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "type": "string", @@ -8256,111 +10262,110 @@ } } }, - "SubnetworksSetPrivateIpGoogleAccessRequest": { - "id": "SubnetworksSetPrivateIpGoogleAccessRequest", - "type": "object", - "properties": { - "privateIpGoogleAccess": { - "type": "boolean" - } - } - }, - "TCPHealthCheck": { - "id": "TCPHealthCheck", - "type": "object", - "properties": { - "port": { - "type": "integer", - "description": "The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535.", - "format": "int32" - }, - "portName": { - "type": "string", - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." - }, - "proxyHeader": { - "type": "string", - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "request": { - "type": "string", - "description": "The application data to send once the TCP connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII." - }, - "response": { - "type": "string", - "description": "The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII." - } - } - }, - "Tags": { - "id": "Tags", + "RegionInstanceGroupList": { + "id": "RegionInstanceGroupList", "type": "object", - "description": "A set of instance tags.", + "description": "Contains a list of InstanceGroup resources.", "properties": { - "fingerprint": { + "id": { "type": "string", - "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata.\n\nTo see the latest fingerprint, make get() request to the instance.", - "format": "byte" + "description": "[Output Only] Unique identifier for the resource; defined by the server." }, "items": { "type": "array", - "description": "An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035.", + "description": "A list of InstanceGroup resources.", "items": { - "type": "string" + "$ref": "InstanceGroup" } - } - } - }, - "TargetHttpProxy": { - "id": "TargetHttpProxy", - "type": "object", - "description": "A TargetHttpProxy resource. This resource defines an HTTP proxy.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetHttpProxy for target HTTP proxies.", - "default": "compute#targetHttpProxy" + "description": "The resource type.", + "default": "compute#regionInstanceGroupList" }, - "name": { + "nextPageToken": { "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." }, "selfLink": { "type": "string", - "description": "[Output Only] Server-defined URL for the resource." + "description": "[Output Only] Server-defined URL for this resource." }, - "urlMap": { - "type": "string", - "description": "URL to the UrlMap resource that defines the mapping from URL to the BackendService." + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, - "TargetHttpProxyList": { - "id": "TargetHttpProxyList", + "RegionInstanceGroupManagerList": { + "id": "RegionInstanceGroupManagerList", "type": "object", - "description": "A list of TargetHttpProxy resources.", + "description": "Contains a list of managed instance groups.", "properties": { "id": { "type": "string", @@ -8368,15 +10373,15 @@ }, "items": { "type": "array", - "description": "A list of TargetHttpProxy resources.", + "description": "A list of InstanceGroupManager resources.", "items": { - "$ref": "TargetHttpProxy" + "$ref": "InstanceGroupManager" } }, "kind": { "type": "string", - "description": "Type of resource. Always compute#targetHttpProxyList for lists of target HTTP proxies.", - "default": "compute#targetHttpProxyList" + "description": "[Output Only] The resource type, which is always compute#instanceGroupManagerList for a list of managed instance groups that exist in th regional scope.", + "default": "compute#regionInstanceGroupManagerList" }, "nextPageToken": { "type": "string", @@ -8385,185 +10390,177 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "TargetHttpsProxiesSetSslCertificatesRequest": { - "id": "TargetHttpsProxiesSetSslCertificatesRequest", - "type": "object", - "properties": { - "sslCertificates": { - "type": "array", - "description": "New set of SslCertificate resources to associate with this TargetHttpsProxy resource. Currently exactly one SslCertificate resource must be specified.", - "items": { - "type": "string" - } - } - } - }, - "TargetHttpsProxy": { - "id": "TargetHttpsProxy", - "type": "object", - "description": "A TargetHttpsProxy resource. This resource defines an HTTPS proxy.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetHttpsProxy for target HTTPS proxies.", - "default": "compute#targetHttpsProxy" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." }, - "sslCertificates": { - "type": "array", - "description": "URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. Currently, exactly one SSL certificate must be specified.", + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "RegionInstanceGroupManagersAbandonInstancesRequest": { + "id": "RegionInstanceGroupManagersAbandonInstancesRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "The URLs of one or more instances to abandon. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", "items": { "type": "string" } - }, - "urlMap": { - "type": "string", - "description": "A fully-qualified or valid partial URL to the UrlMap resource that defines the mapping from URL to the BackendService. For example, the following are all valid URLs for specifying a URL map: \n- https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map \n- projects/project/global/urlMaps/url-map \n- global/urlMaps/url-map" } } }, - "TargetHttpsProxyList": { - "id": "TargetHttpsProxyList", + "RegionInstanceGroupManagersDeleteInstancesRequest": { + "id": "RegionInstanceGroupManagersDeleteInstancesRequest", "type": "object", - "description": "Contains a list of TargetHttpsProxy resources.", "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { + "instances": { "type": "array", - "description": "A list of TargetHttpsProxy resources.", + "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", "items": { - "$ref": "TargetHttpsProxy" + "type": "string" + } + } + } + }, + "RegionInstanceGroupManagersListInstancesResponse": { + "id": "RegionInstanceGroupManagersListInstancesResponse", + "type": "object", + "properties": { + "managedInstances": { + "type": "array", + "description": "List of managed instances.", + "items": { + "$ref": "ManagedInstance" } - }, - "kind": { - "type": "string", - "description": "Type of resource. Always compute#targetHttpsProxyList for lists of target HTTPS proxies.", - "default": "compute#targetHttpsProxyList" }, "nextPageToken": { "type": "string", "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." } } }, - "TargetInstance": { - "id": "TargetInstance", + "RegionInstanceGroupManagersRecreateRequest": { + "id": "RegionInstanceGroupManagersRecreateRequest", "type": "object", - "description": "A TargetInstance resource. This resource defines an endpoint instance that terminates traffic of certain protocols.", "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "instance": { - "type": "string", - "description": "A URL to the virtual machine instance that handles traffic for this target instance. When creating a target instance, you can provide the fully-qualified URL or a valid partial URL to the desired virtual machine. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance \n- projects/project/zones/zone/instances/instance \n- zones/zone/instances/instance" - }, - "kind": { - "type": "string", - "description": "[Output Only] The type of the resource. Always compute#targetInstance for target instances.", - "default": "compute#targetInstance" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "natPolicy": { - "type": "string", - "description": "NAT option controlling how IPs are NAT'ed to the instance. Currently only NO_NAT (default value) is supported.", - "enum": [ - "NO_NAT" - ], - "enumDescriptions": [ - "" - ] - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "zone": { - "type": "string", - "description": "[Output Only] URL of the zone where the target instance resides." + "instances": { + "type": "array", + "description": "The URLs of one or more instances to recreate. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", + "items": { + "type": "string" + } } } }, - "TargetInstanceAggregatedList": { - "id": "TargetInstanceAggregatedList", + "RegionInstanceGroupManagersSetAutoHealingRequest": { + "id": "RegionInstanceGroupManagersSetAutoHealingRequest", "type": "object", "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "object", - "description": "A list of TargetInstance resources.", - "additionalProperties": { - "$ref": "TargetInstancesScopedList", - "description": "Name of the scope containing this set of target instances." + "autoHealingPolicies": { + "type": "array", + "items": { + "$ref": "InstanceGroupManagerAutoHealingPolicy" } - }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#targetInstanceAggregatedList" - }, - "nextPageToken": { + } + } + }, + "RegionInstanceGroupManagersSetTargetPoolsRequest": { + "id": "RegionInstanceGroupManagersSetTargetPoolsRequest", + "type": "object", + "properties": { + "fingerprint": { "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + "description": "Fingerprint of the target pools information, which is a hash of the contents. This field is used for optimistic locking when you update the target pool entries. This field is optional.", + "format": "byte" }, - "selfLink": { + "targetPools": { + "type": "array", + "description": "The URL of all TargetPool resources to which instances in the instanceGroup field are added. The target pools automatically apply to all of the instances in the managed instance group.", + "items": { + "type": "string" + } + } + } + }, + "RegionInstanceGroupManagersSetTemplateRequest": { + "id": "RegionInstanceGroupManagersSetTemplateRequest", + "type": "object", + "properties": { + "instanceTemplate": { "type": "string", - "description": "[Output Only] Server-defined URL for this resource." + "description": "URL of the InstanceTemplate resource from which all new instances will be created." } } }, - "TargetInstanceList": { - "id": "TargetInstanceList", + "RegionInstanceGroupsListInstances": { + "id": "RegionInstanceGroupsListInstances", "type": "object", - "description": "Contains a list of TargetInstance resources.", "properties": { "id": { "type": "string", @@ -8571,15 +10568,15 @@ }, "items": { "type": "array", - "description": "A list of TargetInstance resources.", + "description": "A list of InstanceWithNamedPorts resources.", "items": { - "$ref": "TargetInstance" + "$ref": "InstanceWithNamedPorts" } }, "kind": { "type": "string", - "description": "Type of resource.", - "default": "compute#targetInstanceList" + "description": "The resource type.", + "default": "compute#regionInstanceGroupsListInstances" }, "nextPageToken": { "type": "string", @@ -8588,23 +10585,10 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "TargetInstancesScopedList": { - "id": "TargetInstancesScopedList", - "type": "object", - "properties": { - "targetInstances": { - "type": "array", - "description": "List of target instances contained in this scope.", - "items": { - "$ref": "TargetInstance" - } }, "warning": { "type": "object", - "description": "Informational warning which replaces the list of addresses when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "type": "string", @@ -8673,137 +10657,51 @@ } } }, - "TargetPool": { - "id": "TargetPool", - "type": "object", - "description": "A TargetPool resource. This resource defines a pool of instances, an associated HttpHealthCheck resource, and the fallback target pool.", + "RegionInstanceGroupsListInstancesRequest": { + "id": "RegionInstanceGroupsListInstancesRequest", + "type": "object", "properties": { - "backupPool": { - "type": "string", - "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool, and its failoverRatio field is properly set to a value between [0, 1].\n\nbackupPool and failoverRatio together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below failoverRatio, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio and backupPool are not set, or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy." - }, - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "failoverRatio": { - "type": "number", - "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool (i.e., not as a backup pool to some other target pool). The value of the field must be in [0, 1].\n\nIf set, backupPool must also be set. They together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below this number, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio is not set or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy.", - "format": "float" - }, - "healthChecks": { - "type": "array", - "description": "The URL of the HttpHealthCheck resource. A member instance in this pool is considered healthy if and only if the health checks pass. An empty list means all member instances will be considered healthy at all times. Only HttpHealthChecks are supported. Only one health check may be specified.", - "items": { - "type": "string" - } - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "instances": { - "type": "array", - "description": "A list of resource URLs to the virtual machine instances serving this pool. They must live in zones contained in the same region as this pool.", - "items": { - "type": "string" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#targetPool for target pools.", - "default": "compute#targetPool" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "region": { - "type": "string", - "description": "[Output Only] URL of the region where the target pool resides." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "sessionAffinity": { + "instanceState": { "type": "string", - "description": "Sesssion affinity option, must be one of the following values:\nNONE: Connections from the same client IP may go to any instance in the pool.\nCLIENT_IP: Connections from the same client IP will go to the same instance in the pool while that instance remains healthy.\nCLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol will go to the same instance in the pool while that instance remains healthy.", + "description": "Instances in which state should be returned. Valid options are: 'ALL', 'RUNNING'. By default, it lists all instances.", "enum": [ - "CLIENT_IP", - "CLIENT_IP_PORT_PROTO", - "CLIENT_IP_PROTO", - "GENERATED_COOKIE", - "NONE" + "ALL", + "RUNNING" ], "enumDescriptions": [ - "", - "", - "", "", "" ] - } - } - }, - "TargetPoolAggregatedList": { - "id": "TargetPoolAggregatedList", - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "object", - "description": "A list of TargetPool resources.", - "additionalProperties": { - "$ref": "TargetPoolsScopedList", - "description": "Name of the scope containing this set of target pools." - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetPoolAggregatedList for aggregated lists of target pools.", - "default": "compute#targetPoolAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." }, - "selfLink": { + "portName": { "type": "string", - "description": "[Output Only] Server-defined URL for this resource." + "description": "Name of port user is interested in. It is optional. If it is set, only information about this ports will be returned. If it is not set, all the named ports will be returned. Always lists all instances.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" } } }, - "TargetPoolInstanceHealth": { - "id": "TargetPoolInstanceHealth", + "RegionInstanceGroupsSetNamedPortsRequest": { + "id": "RegionInstanceGroupsSetNamedPortsRequest", "type": "object", "properties": { - "healthStatus": { + "fingerprint": { + "type": "string", + "description": "The fingerprint of the named ports information for this instance group. Use this optional property to prevent conflicts when multiple users change the named ports settings concurrently. Obtain the fingerprint with the instanceGroups.get method. Then, include the fingerprint in your request to ensure that you do not overwrite changes that were applied from another concurrent request.", + "format": "byte" + }, + "namedPorts": { "type": "array", + "description": "The list of named ports to set for this instance group.", "items": { - "$ref": "HealthStatus" + "$ref": "NamedPort" } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetPoolInstanceHealth when checking the health of an instance.", - "default": "compute#targetPoolInstanceHealth" } } }, - "TargetPoolList": { - "id": "TargetPoolList", + "RegionList": { + "id": "RegionList", "type": "object", - "description": "Contains a list of TargetPool resources.", + "description": "Contains a list of region resources.", "properties": { "id": { "type": "string", @@ -8811,15 +10709,15 @@ }, "items": { "type": "array", - "description": "A list of TargetPool resources.", + "description": "A list of Region resources.", "items": { - "$ref": "TargetPool" + "$ref": "Region" } }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetPoolList for lists of target pools.", - "default": "compute#targetPoolList" + "description": "[Output Only] Type of resource. Always compute#regionList for lists of regions.", + "default": "compute#regionList" }, "nextPageToken": { "type": "string", @@ -8828,75 +10726,10 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "TargetPoolsAddHealthCheckRequest": { - "id": "TargetPoolsAddHealthCheckRequest", - "type": "object", - "properties": { - "healthChecks": { - "type": "array", - "description": "The HttpHealthCheck to add to the target pool.", - "items": { - "$ref": "HealthCheckReference" - } - } - } - }, - "TargetPoolsAddInstanceRequest": { - "id": "TargetPoolsAddInstanceRequest", - "type": "object", - "properties": { - "instances": { - "type": "array", - "description": "A full or partial URL to an instance to add to this target pool. This can be a full or partial URL. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project-id/zones/zone/instances/instance-name \n- projects/project-id/zones/zone/instances/instance-name \n- zones/zone/instances/instance-name", - "items": { - "$ref": "InstanceReference" - } - } - } - }, - "TargetPoolsRemoveHealthCheckRequest": { - "id": "TargetPoolsRemoveHealthCheckRequest", - "type": "object", - "properties": { - "healthChecks": { - "type": "array", - "description": "Health check URL to be removed. This can be a full or valid partial URL. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project/global/httpHealthChecks/health-check \n- projects/project/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", - "items": { - "$ref": "HealthCheckReference" - } - } - } - }, - "TargetPoolsRemoveInstanceRequest": { - "id": "TargetPoolsRemoveInstanceRequest", - "type": "object", - "properties": { - "instances": { - "type": "array", - "description": "URLs of the instances to be removed from target pool.", - "items": { - "$ref": "InstanceReference" - } - } - } - }, - "TargetPoolsScopedList": { - "id": "TargetPoolsScopedList", - "type": "object", - "properties": { - "targetPools": { - "type": "array", - "description": "List of target pools contained in this scope.", - "items": { - "$ref": "TargetPool" - } }, "warning": { "type": "object", - "description": "Informational warning which replaces the list of addresses when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "type": "string", @@ -8965,60 +10798,64 @@ } } }, - "TargetReference": { - "id": "TargetReference", - "type": "object", - "properties": { - "target": { - "type": "string" - } - } - }, - "TargetSslProxiesSetBackendServiceRequest": { - "id": "TargetSslProxiesSetBackendServiceRequest", + "RegionSetLabelsRequest": { + "id": "RegionSetLabelsRequest", "type": "object", "properties": { - "service": { + "labelFingerprint": { "type": "string", - "description": "The URL of the new BackendService resource for the targetSslProxy." + "description": "The fingerprint of the previous set of labels for this resource, used to detect conflicts. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. Make a get() request to the resource to get the latest fingerprint.", + "format": "byte" + }, + "labels": { + "type": "object", + "description": "The labels to set for this resource.", + "additionalProperties": { + "type": "string" + } } } }, - "TargetSslProxiesSetProxyHeaderRequest": { - "id": "TargetSslProxiesSetProxyHeaderRequest", + "ResourceCommitment": { + "id": "ResourceCommitment", "type": "object", + "description": "Commitment for a particular resource (a Commitment is composed of one or more of these).", "properties": { - "proxyHeader": { + "amount": { "type": "string", - "description": "The new type of proxy header to append before sending data to the backend. NONE or PROXY_V1 are allowed.", + "description": "The amount of the resource purchased (in a type-dependent unit, such as bytes). For vCPUs, this can just be an integer. For memory, this must be provided in MB. Memory must be a multiple of 256 MB, with up to 6.5GB of memory per every vCPU.", + "format": "int64" + }, + "type": { + "type": "string", + "description": "Type of resource for which this commitment applies. Possible values are VCPU and MEMORY", "enum": [ - "NONE", - "PROXY_V1" + "MEMORY", + "UNSPECIFIED", + "VCPU" ], "enumDescriptions": [ + "", "", "" ] } } }, - "TargetSslProxiesSetSslCertificatesRequest": { - "id": "TargetSslProxiesSetSslCertificatesRequest", + "ResourceGroupReference": { + "id": "ResourceGroupReference", "type": "object", "properties": { - "sslCertificates": { - "type": "array", - "description": "New set of URLs to SslCertificate resources to associate with this TargetSslProxy. Currently exactly one ssl certificate must be specified.", - "items": { - "type": "string" - } + "group": { + "type": "string", + "description": "A URI referencing one of the instance groups listed in the backend service." } } }, - "TargetSslProxy": { - "id": "TargetSslProxy", + "Route": { + "id": "Route", "type": "object", - "description": "A TargetSslProxy resource. This resource defines an SSL proxy.", + "description": "Represents a Route resource. A route specifies how certain packets should be handled by the network. Routes are associated with instances by tags and the set of routes for a particular instance is called its routing table.\n\nFor each packet leaving an instance, the system searches that instance's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the nextHop field of the winning route - either to another instance destination, an instance gateway, or a Google Compute Engine-operated gateway.\n\nPackets that do not match any route in the sending instance's routing table are dropped.", "properties": { "creationTimestamp": { "type": "string", @@ -9028,6 +10865,15 @@ "type": "string", "description": "An optional description of this resource. Provide this property when you create the resource." }, + "destRange": { + "type": "string", + "description": "The destination range of outgoing packets that this route applies to. Only IPv4 is supported.", + "annotations": { + "required": [ + "compute.routes.insert" + ] + } + }, "id": { "type": "string", "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -9035,47 +10881,156 @@ }, "kind": { "type": "string", - "description": "[Output Only] Type of the resource. Always compute#targetSslProxy for target SSL proxies.", - "default": "compute#targetSslProxy" + "description": "[Output Only] Type of this resource. Always compute#routes for Route resources.", + "default": "compute#route" }, "name": { "type": "string", "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.routes.insert" + ] + } }, - "proxyHeader": { + "network": { "type": "string", - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ] + "description": "Fully-qualified URL of the network that this route applies to.", + "annotations": { + "required": [ + "compute.routes.insert" + ] + } }, - "selfLink": { + "nextHopGateway": { "type": "string", - "description": "[Output Only] Server-defined URL for the resource." + "description": "The URL to a gateway that should handle matching packets. You can only specify the internet gateway using a full or partial valid URL: projects/\u003cproject-id\u003e/global/gateways/default-internet-gateway" }, - "service": { + "nextHopInstance": { "type": "string", - "description": "URL to the BackendService resource." + "description": "The URL to an instance that should handle matching packets. You can specify this as a full or partial URL. For example:\nhttps://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/" }, - "sslCertificates": { + "nextHopIp": { + "type": "string", + "description": "The network IP address of an instance that should handle matching packets. Only IPv4 is supported." + }, + "nextHopNetwork": { + "type": "string", + "description": "The URL of the local network if it should handle matching packets." + }, + "nextHopPeering": { + "type": "string", + "description": "[Output Only] The network peering name that should handle matching packets, which should conform to RFC1035." + }, + "nextHopVpnTunnel": { + "type": "string", + "description": "The URL to a VpnTunnel that should handle matching packets." + }, + "priority": { + "type": "integer", + "description": "The priority of this route. Priority is used to break ties in cases where there is more than one matching route of equal prefix length. In the case of two routes with equal prefix length, the one with the lowest-numbered priority value wins. Default value is 1000. Valid range is 0 through 65535.", + "format": "uint32", + "annotations": { + "required": [ + "compute.routes.insert" + ] + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined fully-qualified URL for this resource." + }, + "tags": { "type": "array", - "description": "URLs to SslCertificate resources that are used to authenticate connections to Backends. Currently exactly one SSL certificate must be specified.", + "description": "A list of instance tags to which this route applies.", "items": { "type": "string" + }, + "annotations": { + "required": [ + "compute.routes.insert" + ] + } + }, + "warnings": { + "type": "array", + "description": "[Output Only] If potential misconfigurations are detected for this route, this field will be populated with warning messages.", + "items": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } } }, - "TargetSslProxyList": { - "id": "TargetSslProxyList", + "RouteList": { + "id": "RouteList", "type": "object", - "description": "Contains a list of TargetSslProxy resources.", + "description": "Contains a list of Route resources.", "properties": { "id": { "type": "string", @@ -9083,15 +11038,15 @@ }, "items": { "type": "array", - "description": "A list of TargetSslProxy resources.", + "description": "A list of Route resources.", "items": { - "$ref": "TargetSslProxy" + "$ref": "Route" } }, "kind": { "type": "string", "description": "Type of resource.", - "default": "compute#targetSslProxyList" + "default": "compute#routeList" }, "nextPageToken": { "type": "string", @@ -9100,123 +11055,94 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, - "TargetTcpProxiesSetBackendServiceRequest": { - "id": "TargetTcpProxiesSetBackendServiceRequest", + "Router": { + "id": "Router", "type": "object", + "description": "Router resource.", "properties": { - "service": { - "type": "string", - "description": "The URL of the new BackendService resource for the targetTcpProxy." - } - } - }, - "TargetTcpProxiesSetProxyHeaderRequest": { - "id": "TargetTcpProxiesSetProxyHeaderRequest", - "type": "object", - "properties": { - "proxyHeader": { - "type": "string", - "description": "The new type of proxy header to append before sending data to the backend. NONE or PROXY_V1 are allowed.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ] - } - } - }, - "TargetTcpProxy": { - "id": "TargetTcpProxy", - "type": "object", - "description": "A TargetTcpProxy resource. This resource defines a TCP proxy.", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#targetTcpProxy for target TCP proxies.", - "default": "compute#targetTcpProxy" - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "proxyHeader": { - "type": "string", - "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", - "enum": [ - "NONE", - "PROXY_V1" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "service": { - "type": "string", - "description": "URL to the BackendService resource." - } - } - }, - "TargetTcpProxyList": { - "id": "TargetTcpProxyList", - "type": "object", - "description": "Contains a list of TargetTcpProxy resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." + "bgp": { + "$ref": "RouterBgp", + "description": "BGP information specific to this router." }, - "items": { + "bgpPeers": { "type": "array", - "description": "A list of TargetTcpProxy resources.", + "description": "BGP information that needs to be configured into the routing stack to establish the BGP peering. It must specify peer ASN and either interface name, IP, or peer IP. Please refer to RFC4273.", "items": { - "$ref": "TargetTcpProxy" + "$ref": "RouterBgpPeer" } }, - "kind": { - "type": "string", - "description": "Type of resource.", - "default": "compute#targetTcpProxyList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "TargetVpnGateway": { - "id": "TargetVpnGateway", - "type": "object", - "description": "Represents a Target VPN gateway resource.", - "properties": { "creationTimestamp": { "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." @@ -9225,22 +11151,22 @@ "type": "string", "description": "An optional description of this resource. Provide this property when you create the resource." }, - "forwardingRules": { - "type": "array", - "description": "[Output Only] A list of URLs to the ForwardingRule resources. ForwardingRules are created using compute.forwardingRules.insert and associated to a VPN gateway.", - "items": { - "type": "string" - } - }, "id": { "type": "string", "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64" }, + "interfaces": { + "type": "array", + "description": "Router interfaces. Each interface requires either one linked resource (e.g. linkedVpnTunnel), or IP address and IP address range (e.g. ipRange), or both.", + "items": { + "$ref": "RouterInterface" + } + }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", - "default": "compute#targetVpnGateway" + "description": "[Output Only] Type of resource. Always compute#router for routers.", + "default": "compute#router" }, "name": { "type": "string", @@ -9248,55 +11174,33 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "annotations": { "required": [ - "compute.targetVpnGateways.insert" + "compute.routers.insert" ] } }, "network": { "type": "string", - "description": "URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created.", + "description": "URI of the network to which this router belongs.", "annotations": { "required": [ - "compute.targetVpnGateways.insert" + "compute.routers.insert" ] } }, "region": { "type": "string", - "description": "[Output Only] URL of the region where the target VPN gateway resides." + "description": "[Output Only] URI of the region where the router resides." }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." - }, - "status": { - "type": "string", - "description": "[Output Only] The status of the VPN gateway.", - "enum": [ - "CREATING", - "DELETING", - "FAILED", - "READY" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ] - }, - "tunnels": { - "type": "array", - "description": "[Output Only] A list of URLs to VpnTunnel resources. VpnTunnels are created using compute.vpntunnels.insert method and associated to a VPN gateway.", - "items": { - "type": "string" - } } } }, - "TargetVpnGatewayAggregatedList": { - "id": "TargetVpnGatewayAggregatedList", + "RouterAggregatedList": { + "id": "RouterAggregatedList", "type": "object", + "description": "Contains a list of routers.", "properties": { "id": { "type": "string", @@ -9304,47 +11208,16 @@ }, "items": { "type": "object", - "description": "A list of TargetVpnGateway resources.", + "description": "A list of Router resources.", "additionalProperties": { - "$ref": "TargetVpnGatewaysScopedList", - "description": "[Output Only] Name of the scope containing this set of target VPN gateways." - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", - "default": "compute#targetVpnGatewayAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "TargetVpnGatewayList": { - "id": "TargetVpnGatewayList", - "type": "object", - "description": "Contains a list of TargetVpnGateway resources.", - "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "array", - "description": "A list of TargetVpnGateway resources.", - "items": { - "$ref": "TargetVpnGateway" + "$ref": "RoutersScopedList", + "description": "Name of the scope containing this set of routers." } }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", - "default": "compute#targetVpnGatewayList" + "description": "Type of resource.", + "default": "compute#routerAggregatedList" }, "nextPageToken": { "type": "string", @@ -9353,23 +11226,10 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "TargetVpnGatewaysScopedList": { - "id": "TargetVpnGatewaysScopedList", - "type": "object", - "properties": { - "targetVpnGateways": { - "type": "array", - "description": "[Output Only] List of target vpn gateways contained in this scope.", - "items": { - "$ref": "TargetVpnGateway" - } }, "warning": { "type": "object", - "description": "[Output Only] Informational warning which replaces the list of addresses when the list is empty.", + "description": "[Output Only] Informational warning message.", "properties": { "code": { "type": "string", @@ -9438,141 +11298,77 @@ } } }, - "TestFailure": { - "id": "TestFailure", - "type": "object", - "properties": { - "actualService": { - "type": "string" - }, - "expectedService": { - "type": "string" - }, - "host": { - "type": "string" - }, - "path": { - "type": "string" - } - } - }, - "TestPermissionsRequest": { - "id": "TestPermissionsRequest", + "RouterBgp": { + "id": "RouterBgp", "type": "object", "properties": { - "permissions": { - "type": "array", - "description": "The set of permissions to check for the 'resource'. Permissions with wildcards (such as '*' or 'storage.*') are not allowed.", - "items": { - "type": "string" - } + "asn": { + "type": "integer", + "description": "Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, either 16-bit or 32-bit. The value will be fixed for this router resource. All VPN tunnels that link to this router will have the same local ASN.", + "format": "uint32" } } }, - "TestPermissionsResponse": { - "id": "TestPermissionsResponse", + "RouterBgpPeer": { + "id": "RouterBgpPeer", "type": "object", "properties": { - "permissions": { - "type": "array", - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", - "items": { - "type": "string" - } - } - } - }, - "UDPHealthCheck": { - "id": "UDPHealthCheck", - "type": "object", - "properties": { - "port": { + "advertisedRoutePriority": { "type": "integer", - "description": "The UDP port number for the health check request. Valid values are 1 through 65535.", - "format": "int32" + "description": "The priority of routes advertised to this BGP peer. In the case where there is more than one matching route of maximum length, the routes with lowest priority value win.", + "format": "uint32" }, - "portName": { + "interfaceName": { "type": "string", - "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." + "description": "Name of the interface the BGP peer is associated with." }, - "request": { + "ipAddress": { "type": "string", - "description": "Raw data of request to send in payload of UDP packet. It is an error if this is empty. The request data can only be ASCII." + "description": "IP address of the interface inside Google Cloud Platform. Only IPv4 is supported." }, - "response": { + "name": { "type": "string", - "description": "The bytes to match against the beginning of the response data. It is an error if this is empty. The response data can only be ASCII." + "description": "Name of this BGP peer. The name must be 1-63 characters long and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "peerAsn": { + "type": "integer", + "description": "Peer BGP Autonomous System Number (ASN). For VPN use case, this value can be different for every tunnel.", + "format": "uint32" + }, + "peerIpAddress": { + "type": "string", + "description": "IP address of the BGP interface outside Google cloud. Only IPv4 is supported." } } }, - "UrlMap": { - "id": "UrlMap", + "RouterInterface": { + "id": "RouterInterface", "type": "object", - "description": "A UrlMap resource. This resource defines the mapping from URL to the BackendService resource, based on the \"longest-match\" of the URL's host and path.", "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "defaultService": { - "type": "string", - "description": "The URL of the BackendService resource if none of the hostRules match." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "fingerprint": { + "ipRange": { "type": "string", - "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a UrlMap. An up-to-date fingerprint must be provided in order to update the UrlMap.", - "format": "byte" - }, - "hostRules": { - "type": "array", - "description": "The list of HostRules to use against the URL.", - "items": { - "$ref": "HostRule" - } + "description": "IP address and range of the interface. The IP range must be in the RFC3927 link-local IP space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface." }, - "id": { + "linkedInterconnectAttachment": { "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" + "description": "URI of the linked interconnect attachment. It must be in the same region as the router. Each interface can have at most one linked resource and it could either be a VPN Tunnel or an interconnect attachment." }, - "kind": { + "linkedVpnTunnel": { "type": "string", - "description": "[Output Only] Type of the resource. Always compute#urlMaps for url maps.", - "default": "compute#urlMap" + "description": "URI of the linked VPN tunnel. It must be in the same region as the router. Each interface can have at most one linked resource and it could either be a VPN Tunnel or an interconnect attachment." }, "name": { "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "Name of this interface entry. The name must be 1-63 characters long and comply with RFC1035.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "pathMatchers": { - "type": "array", - "description": "The list of named PathMatchers to use against the URL.", - "items": { - "$ref": "PathMatcher" - } - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "tests": { - "type": "array", - "description": "The list of expected URL mappings. Request to update this UrlMap will succeed only if all of the test cases pass.", - "items": { - "$ref": "UrlMapTest" - } } } }, - "UrlMapList": { - "id": "UrlMapList", + "RouterList": { + "id": "RouterList", "type": "object", - "description": "Contains a list of UrlMap resources.", + "description": "Contains a list of Router resources.", "properties": { "id": { "type": "string", @@ -9580,15 +11376,15 @@ }, "items": { "type": "array", - "description": "A list of UrlMap resources.", + "description": "A list of Router resources.", "items": { - "$ref": "UrlMap" + "$ref": "Router" } }, "kind": { "type": "string", - "description": "Type of resource.", - "default": "compute#urlMapList" + "description": "[Output Only] Type of resource. Always compute#router for routers.", + "default": "compute#routerList" }, "nextPageToken": { "type": "string", @@ -9597,300 +11393,206 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "UrlMapReference": { - "id": "UrlMapReference", - "type": "object", - "properties": { - "urlMap": { - "type": "string" - } - } - }, - "UrlMapTest": { - "id": "UrlMapTest", - "type": "object", - "description": "Message for the expected URL mappings.", - "properties": { - "description": { - "type": "string", - "description": "Description of this test case." }, - "host": { - "type": "string", - "description": "Host portion of the URL." - }, - "path": { - "type": "string", - "description": "Path portion of the URL." - }, - "service": { - "type": "string", - "description": "Expected BackendService resource the given URL should be mapped to." + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, - "UrlMapValidationResult": { - "id": "UrlMapValidationResult", + "RouterStatus": { + "id": "RouterStatus", "type": "object", - "description": "Message representing the validation result for a UrlMap.", "properties": { - "loadErrors": { + "bestRoutes": { "type": "array", + "description": "Best routes for this router's network.", "items": { - "type": "string" + "$ref": "Route" } }, - "loadSucceeded": { - "type": "boolean", - "description": "Whether the given UrlMap can be successfully loaded. If false, 'loadErrors' indicates the reasons." + "bestRoutesForRouter": { + "type": "array", + "description": "Best routes learned by this router.", + "items": { + "$ref": "Route" + } }, - "testFailures": { + "bgpPeerStatus": { "type": "array", "items": { - "$ref": "TestFailure" + "$ref": "RouterStatusBgpPeerStatus" } }, - "testPassed": { - "type": "boolean", - "description": "If successfully loaded, this field indicates whether the test passed. If false, 'testFailures's indicate the reason of failure." - } - } - }, - "UrlMapsValidateRequest": { - "id": "UrlMapsValidateRequest", - "type": "object", - "properties": { - "resource": { - "$ref": "UrlMap", - "description": "Content of the UrlMap to be validated." + "network": { + "type": "string", + "description": "URI of the network to which this router belongs." } } }, - "UrlMapsValidateResponse": { - "id": "UrlMapsValidateResponse", + "RouterStatusBgpPeerStatus": { + "id": "RouterStatusBgpPeerStatus", "type": "object", "properties": { - "result": { - "$ref": "UrlMapValidationResult" - } - } - }, - "UsageExportLocation": { - "id": "UsageExportLocation", - "type": "object", - "description": "The location in Cloud Storage and naming method of the daily usage report. Contains bucket_name and report_name prefix.", - "properties": { - "bucketName": { - "type": "string", - "description": "The name of an existing bucket in Cloud Storage where the usage report object is stored. The Google Service Account is granted write access to this bucket. This can either be the bucket name by itself, such as example-bucket, or the bucket name with gs:// or https://storage.googleapis.com/ in front of it, such as gs://example-bucket." - }, - "reportNamePrefix": { - "type": "string", - "description": "An optional prefix for the name of the usage report object stored in bucketName. If not supplied, defaults to usage. The report is stored as a CSV file named report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the usage according to Pacific Time. If you supply a prefix, it should conform to Cloud Storage object naming conventions." - } - } - }, - "VpnTunnel": { - "id": "VpnTunnel", - "type": "object", - "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." - }, - "description": { - "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." - }, - "detailedStatus": { - "type": "string", - "description": "[Output Only] Detailed status message for the VPN tunnel." - }, - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" - }, - "ikeVersion": { - "type": "integer", - "description": "IKE protocol version to use when establishing the VPN tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. Default version is 2.", - "format": "int32" - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", - "default": "compute#vpnTunnel" - }, - "localTrafficSelector": { + "advertisedRoutes": { "type": "array", - "description": "Local traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported.", + "description": "Routes that were advertised to the remote BGP peer", "items": { - "type": "string" - } - }, - "name": { - "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.vpnTunnels.insert" - ] + "$ref": "Route" } }, - "peerIp": { + "ipAddress": { "type": "string", - "description": "IP address of the peer VPN gateway. Only IPv4 is supported." + "description": "IP address of the local BGP interface." }, - "region": { + "linkedVpnTunnel": { "type": "string", - "description": "[Output Only] URL of the region where the VPN tunnel resides." - }, - "remoteTrafficSelector": { - "type": "array", - "description": "Remote traffic selectors to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported.", - "items": { - "type": "string" - } + "description": "URL of the VPN tunnel that this BGP peer controls." }, - "router": { + "name": { "type": "string", - "description": "URL of router resource to be used for dynamic routing." + "description": "Name of this BGP peer. Unique within the Routers resource." }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." + "numLearnedRoutes": { + "type": "integer", + "description": "Number of routes learned from the remote BGP Peer.", + "format": "uint32" }, - "sharedSecret": { + "peerIpAddress": { "type": "string", - "description": "Shared secret used to set the secure session between the Cloud VPN gateway and the peer VPN gateway." + "description": "IP address of the remote BGP interface." }, - "sharedSecretHash": { + "state": { "type": "string", - "description": "Hash of the shared secret." + "description": "BGP state as specified in RFC1771." }, "status": { "type": "string", - "description": "[Output Only] The status of the VPN tunnel.", + "description": "Status of the BGP peer: {UP, DOWN}", "enum": [ - "ALLOCATING_RESOURCES", - "AUTHORIZATION_ERROR", - "DEPROVISIONING", - "ESTABLISHED", - "FAILED", - "FIRST_HANDSHAKE", - "NEGOTIATION_FAILURE", - "NETWORK_ERROR", - "NO_INCOMING_PACKETS", - "PROVISIONING", - "REJECTED", - "WAITING_FOR_FULL_CONFIG" + "DOWN", + "UNKNOWN", + "UP" ], "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", "", "", "" ] }, - "targetVpnGateway": { + "uptime": { "type": "string", - "description": "URL of the VPN gateway with which this VPN tunnel is associated. Provided by the client when the VPN tunnel is created.", - "annotations": { - "required": [ - "compute.vpnTunnels.insert" - ] - } + "description": "Time this session has been up. Format: 14 years, 51 weeks, 6 days, 23 hours, 59 minutes, 59 seconds" + }, + "uptimeSeconds": { + "type": "string", + "description": "Time this session has been up, in seconds. Format: 145" } } }, - "VpnTunnelAggregatedList": { - "id": "VpnTunnelAggregatedList", + "RouterStatusResponse": { + "id": "RouterStatusResponse", "type": "object", "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "object", - "description": "A list of VpnTunnelsScopedList resources.", - "additionalProperties": { - "$ref": "VpnTunnelsScopedList", - "description": "Name of the scope containing this set of vpn tunnels." - } - }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", - "default": "compute#vpnTunnelAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + "description": "Type of resource.", + "default": "compute#routerStatusResponse" }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." + "result": { + "$ref": "RouterStatus" } } }, - "VpnTunnelList": { - "id": "VpnTunnelList", + "RoutersPreviewResponse": { + "id": "RoutersPreviewResponse", "type": "object", - "description": "Contains a list of VpnTunnel resources.", "properties": { - "id": { - "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." - }, - "items": { - "type": "array", - "description": "A list of VpnTunnel resources.", - "items": { - "$ref": "VpnTunnel" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", - "default": "compute#vpnTunnelList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." + "resource": { + "$ref": "Router", + "description": "Preview of given router." } } }, - "VpnTunnelsScopedList": { - "id": "VpnTunnelsScopedList", + "RoutersScopedList": { + "id": "RoutersScopedList", "type": "object", "properties": { - "vpnTunnels": { + "routers": { "type": "array", - "description": "List of vpn tunnels contained in this scope.", + "description": "List of routers contained in this scope.", "items": { - "$ref": "VpnTunnel" + "$ref": "Router" } }, "warning": { "type": "object", - "description": "Informational warning which replaces the list of addresses when the list is empty.", + "description": "Informational warning which replaces the list of routers when the list is empty.", "properties": { "code": { "type": "string", @@ -9959,82 +11661,151 @@ } } }, - "XpnHostList": { - "id": "XpnHostList", + "Rule": { + "id": "Rule", "type": "object", + "description": "A rule to be applied in a Policy.", "properties": { - "id": { + "action": { "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." + "description": "Required", + "enum": [ + "ALLOW", + "ALLOW_WITH_LOG", + "DENY", + "DENY_WITH_LOG", + "LOG", + "NO_ACTION" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "" + ] }, - "items": { + "conditions": { "type": "array", - "description": "[Output Only] A list of shared VPC host project URLs.", + "description": "Additional restrictions that must be met", "items": { - "$ref": "Project" + "$ref": "Condition" } }, - "kind": { + "description": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#xpnHostList for lists of shared VPC hosts.", - "default": "compute#xpnHostList" + "description": "Human-readable description of the rule." }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + "ins": { + "type": "array", + "description": "If one or more 'in' clauses are specified, the rule matches if the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.", + "items": { + "type": "string" + } }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." - } - } - }, - "XpnResourceId": { - "id": "XpnResourceId", + "logConfigs": { + "type": "array", + "description": "The config returned to callers of tech.iam.IAM.CheckPolicy for any entries that match the LOG action.", + "items": { + "$ref": "LogConfig" + } + }, + "notIns": { + "type": "array", + "description": "If one or more 'not_in' clauses are specified, the rule matches if the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries.", + "items": { + "type": "string" + } + }, + "permissions": { + "type": "array", + "description": "A permission is a string of form '..' (e.g., 'storage.buckets.list'). A value of '*' matches all permissions, and a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs.", + "items": { + "type": "string" + } + } + } + }, + "SSLHealthCheck": { + "id": "SSLHealthCheck", "type": "object", - "description": "Service resource (a.k.a service project) ID.", "properties": { - "id": { + "port": { + "type": "integer", + "description": "The TCP port number for the health check request. The default value is 443. Valid values are 1 through 65535.", + "format": "int32" + }, + "portName": { "type": "string", - "description": "The ID of the service resource. In the case of projects, this field matches the project ID (e.g., my-project), not the project number (e.g., 12345678)." + "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." }, - "type": { + "proxyHeader": { "type": "string", - "description": "The type of the service resource.", + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", "enum": [ - "PROJECT", - "XPN_RESOURCE_TYPE_UNSPECIFIED" + "NONE", + "PROXY_V1" ], "enumDescriptions": [ "", "" ] + }, + "request": { + "type": "string", + "description": "The application data to send once the SSL connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII." + }, + "response": { + "type": "string", + "description": "The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII." } } }, - "Zone": { - "id": "Zone", + "Scheduling": { + "id": "Scheduling", "type": "object", - "description": "A Zone resource.", + "description": "Sets the scheduling options for an Instance.", "properties": { - "availableCpuPlatforms": { - "type": "array", - "description": "[Output Only] Available cpu/platform selections for the zone.", - "items": { - "type": "string" - } + "automaticRestart": { + "type": "boolean", + "description": "Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted.\n\nBy default, this is set to true so an instance is automatically restarted if it is terminated by Compute Engine." + }, + "onHostMaintenance": { + "type": "string", + "description": "Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Setting Instance Scheduling Options.", + "enum": [ + "MIGRATE", + "TERMINATE" + ], + "enumDescriptions": [ + "", + "" + ] }, + "preemptible": { + "type": "boolean", + "description": "Defines whether the instance is preemptible. This can only be set during instance creation, it cannot be set or changed after the instance has been created." + } + } + }, + "SecurityPolicy": { + "id": "SecurityPolicy", + "type": "object", + "description": "A security policy is comprised of one or more rules. It can also be associated with one or more 'targets'.", + "properties": { "creationTimestamp": { "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." }, - "deprecated": { - "$ref": "DeprecationStatus", - "description": "[Output Only] The deprecation status associated with this zone." - }, "description": { "type": "string", - "description": "[Output Only] Textual description of the resource." + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "fingerprint": { + "type": "string", + "description": "Specifies a fingerprint for this resource, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata.\n\nTo see the latest fingerprint, make get() request to the security policy.", + "format": "byte" }, "id": { "type": "string", @@ -10043,39 +11814,30 @@ }, "kind": { "type": "string", - "description": "[Output Only] Type of the resource. Always compute#zone for zones.", - "default": "compute#zone" + "description": "[Output only] Type of the resource. Always compute#securityPolicyfor security policies", + "default": "compute#securityPolicy" }, "name": { "type": "string", - "description": "[Output Only] Name of the resource." + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" }, - "region": { - "type": "string", - "description": "[Output Only] Full URL reference to the region which hosts the zone." + "rules": { + "type": "array", + "description": "List of rules that belong to this policy. There must always be a default rule (rule with priority 2147483647 and match \"*\"). If no rules are provided when creating a security policy, a default rule with action \"allow\" will be added.", + "items": { + "$ref": "SecurityPolicyRule" + } }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." - }, - "status": { - "type": "string", - "description": "[Output Only] Status of the zone, either UP or DOWN.", - "enum": [ - "DOWN", - "UP" - ], - "enumDescriptions": [ - "", - "" - ] } } }, - "ZoneList": { - "id": "ZoneList", + "SecurityPolicyList": { + "id": "SecurityPolicyList", "type": "object", - "description": "Contains a list of zone resources.", "properties": { "id": { "type": "string", @@ -10083,53 +11845,4308 @@ }, "items": { "type": "array", - "description": "A list of Zone resources.", + "description": "A list of SecurityPolicy resources.", "items": { - "$ref": "Zone" + "$ref": "SecurityPolicy" } }, "kind": { "type": "string", - "description": "Type of resource.", - "default": "compute#zoneList" + "description": "[Output Only] Type of resource. Always compute#securityPolicyList for listsof securityPolicies", + "default": "compute#securityPolicyList" }, "nextPageToken": { "type": "string", "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "SecurityPolicyReference": { + "id": "SecurityPolicyReference", + "type": "object", + "properties": { + "securityPolicy": { + "type": "string" + } + } + }, + "SecurityPolicyRule": { + "id": "SecurityPolicyRule", + "type": "object", + "description": "Represents a rule that describes one or more match conditions along with the action to be taken when traffic matches this condition (allow or deny).", + "properties": { + "action": { + "type": "string", + "description": "The Action to preform when the client connection triggers the rule. Can currently be either \"allow\" or \"deny()\" where valid values for status are 403, 404, and 502." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "kind": { + "type": "string", + "description": "[Output only] Type of the resource. Always compute#securityPolicyRule for security policy rules", + "default": "compute#securityPolicyRule" + }, + "match": { + "$ref": "SecurityPolicyRuleMatcher", + "description": "A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding ?action? is enforced." + }, + "preview": { + "type": "boolean", + "description": "If set to true, the specified action is not enforced." + }, + "priority": { + "type": "integer", + "description": "An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated in the increasing order of priority.", + "format": "int32" + } + } + }, + "SecurityPolicyRuleMatcher": { + "id": "SecurityPolicyRuleMatcher", + "type": "object", + "description": "Represents a match condition that incoming traffic is evaluated against. Exactly one field must be specified.", + "properties": { + "srcIpRanges": { + "type": "array", + "description": "CIDR IP address range. Only IPv4 is supported.", + "items": { + "type": "string" + } + } + } + }, + "SerialPortOutput": { + "id": "SerialPortOutput", + "type": "object", + "description": "An instance's serial console output.", + "properties": { + "contents": { + "type": "string", + "description": "[Output Only] The contents of the console output." + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#serialPortOutput for serial port output.", + "default": "compute#serialPortOutput" + }, + "next": { + "type": "string", + "description": "[Output Only] The position of the next byte of content from the serial console output. Use this value in the next request as the start parameter.", + "format": "int64" + }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "start": { + "type": "string", + "description": "The starting byte position of the output that was returned. This should match the start parameter sent with the request. If the serial console output exceeds the size of the buffer, older output will be overwritten by newer content and the start values will be mismatched.", + "format": "int64" + } + } + }, + "ServiceAccount": { + "id": "ServiceAccount", + "type": "object", + "description": "A service account.", + "properties": { + "email": { + "type": "string", + "description": "Email address of the service account." + }, + "scopes": { + "type": "array", + "description": "The list of scopes to be made available for this service account.", + "items": { + "type": "string" + } + } + } + }, + "Snapshot": { + "id": "Snapshot", + "type": "object", + "description": "A persistent disk snapshot resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "diskSizeGb": { + "type": "string", + "description": "[Output Only] Size of the snapshot, specified in GB.", + "format": "int64" + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#snapshot for Snapshot resources.", + "default": "compute#snapshot" + }, + "labelFingerprint": { + "type": "string", + "description": "A fingerprint for the labels being applied to this snapshot, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels.\n\nTo see the latest fingerprint, make a get() request to retrieve a snapshot.", + "format": "byte" + }, + "labels": { + "type": "object", + "description": "Labels to apply to this snapshot. These can be later modified by the setLabels method. Label values may be empty.", + "additionalProperties": { + "type": "string" + } + }, + "licenses": { + "type": "array", + "description": "[Output Only] A list of public visible licenses that apply to this snapshot. This can be because the original image had licenses attached (such as a Windows image).", + "items": { + "type": "string" + } + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "snapshotEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Encrypts the snapshot using a customer-supplied encryption key.\n\nAfter you encrypt a snapshot using a customer-supplied key, you must provide the same key if you use the image later For example, you must provide the encryption key when you create a disk from the encrypted snapshot in a future request.\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the snapshot, then the snapshot will be encrypted using an automatically generated key and you do not need to provide a key to use the snapshot later." + }, + "sourceDisk": { + "type": "string", + "description": "[Output Only] The source disk used to create this snapshot." + }, + "sourceDiskEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key." + }, + "sourceDiskId": { + "type": "string", + "description": "[Output Only] The ID value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name." + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the snapshot. This can be CREATING, DELETING, FAILED, READY, or UPLOADING.", + "enum": [ + "CREATING", + "DELETING", + "FAILED", + "READY", + "UPLOADING" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ] + }, + "storageBytes": { + "type": "string", + "description": "[Output Only] A size of the the storage used by the snapshot. As snapshots share storage, this number is expected to change with snapshot creation/deletion.", + "format": "int64" + }, + "storageBytesStatus": { + "type": "string", + "description": "[Output Only] An indicator whether storageBytes is in a stable state or it is being adjusted as a result of shared storage reallocation. This status can either be UPDATING, meaning the size of the snapshot is being updated, or UP_TO_DATE, meaning the size of the snapshot is up-to-date.", + "enum": [ + "UPDATING", + "UP_TO_DATE" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "SnapshotList": { + "id": "SnapshotList", + "type": "object", + "description": "Contains a list of Snapshot resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of Snapshot resources.", + "items": { + "$ref": "Snapshot" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#snapshotList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "SslCertificate": { + "id": "SslCertificate", + "type": "object", + "description": "An SslCertificate resource. This resource provides a mechanism to upload an SSL key and certificate to the load balancer to serve secure connections from the user.", + "properties": { + "certificate": { + "type": "string", + "description": "A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#sslCertificate for SSL certificates.", + "default": "compute#sslCertificate" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "privateKey": { + "type": "string", + "description": "A write-only private key in PEM format. Only insert requests will include this field." + }, + "selfLink": { + "type": "string", + "description": "[Output only] Server-defined URL for the resource." + } + } + }, + "SslCertificateList": { + "id": "SslCertificateList", + "type": "object", + "description": "Contains a list of SslCertificate resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of SslCertificate resources.", + "items": { + "$ref": "SslCertificate" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#sslCertificateList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "Subnetwork": { + "id": "Subnetwork", + "type": "object", + "description": "A Subnetwork resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource. This field can be set only at resource creation time." + }, + "gatewayAddress": { + "type": "string", + "description": "[Output Only] The gateway address for default routes to reach destination addresses outside this subnetwork. This field can be set only at resource creation time." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "ipCidrRange": { + "type": "string", + "description": "The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network. Only IPv4 is supported. This field can be set only at resource creation time." + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#subnetwork for Subnetwork resources.", + "default": "compute#subnetwork" + }, + "name": { + "type": "string", + "description": "The name of the resource, provided by the client when initially creating the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "network": { + "type": "string", + "description": "The URL of the network to which this subnetwork belongs, provided by the client when initially creating the subnetwork. Only networks that are in the distributed mode can have subnetworks. This field can be set only at resource creation time." + }, + "privateIpGoogleAccess": { + "type": "boolean", + "description": "Whether the VMs in this subnet can access Google services without assigned external IP addresses. This field can be both set at resource creation time and updated using setPrivateIpGoogleAccess." + }, + "region": { + "type": "string", + "description": "URL of the region where the Subnetwork resides. This field can be set only at resource creation time." + }, + "secondaryIpRanges": { + "type": "array", + "description": "An array of configurations for secondary IP ranges for VM instances contained in this subnetwork. The primary IP of such VM must belong to the primary ipCidrRange of the subnetwork. The alias IPs may belong to either primary or secondary ranges.", + "items": { + "$ref": "SubnetworkSecondaryRange" + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "SubnetworkAggregatedList": { + "id": "SubnetworkAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "object", + "description": "A list of SubnetworksScopedList resources.", + "additionalProperties": { + "$ref": "SubnetworksScopedList", + "description": "Name of the scope containing this set of Subnetworks." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#subnetworkAggregatedList for aggregated lists of subnetworks.", + "default": "compute#subnetworkAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "SubnetworkList": { + "id": "SubnetworkList", + "type": "object", + "description": "Contains a list of Subnetwork resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of Subnetwork resources.", + "items": { + "$ref": "Subnetwork" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#subnetworkList for lists of subnetworks.", + "default": "compute#subnetworkList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "SubnetworkSecondaryRange": { + "id": "SubnetworkSecondaryRange", + "type": "object", + "description": "Represents a secondary IP range of a subnetwork.", + "properties": { + "ipCidrRange": { + "type": "string", + "description": "The range of IP addresses belonging to this subnetwork secondary range. Provide this property when you create the subnetwork. Ranges must be unique and non-overlapping with all primary and secondary IP ranges within a network. Only IPv4 is supported." + }, + "rangeName": { + "type": "string", + "description": "The name associated with this subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork." + } + } + }, + "SubnetworksExpandIpCidrRangeRequest": { + "id": "SubnetworksExpandIpCidrRangeRequest", + "type": "object", + "properties": { + "ipCidrRange": { + "type": "string", + "description": "The IP (in CIDR format or netmask) of internal addresses that are legal on this Subnetwork. This range should be disjoint from other subnetworks within this network. This range can only be larger than (i.e. a superset of) the range previously defined before the update." + } + } + }, + "SubnetworksScopedList": { + "id": "SubnetworksScopedList", + "type": "object", + "properties": { + "subnetworks": { + "type": "array", + "description": "List of subnetworks contained in this scope.", + "items": { + "$ref": "Subnetwork" + } + }, + "warning": { + "type": "object", + "description": "An informational warning that appears when the list of addresses is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "SubnetworksSetPrivateIpGoogleAccessRequest": { + "id": "SubnetworksSetPrivateIpGoogleAccessRequest", + "type": "object", + "properties": { + "privateIpGoogleAccess": { + "type": "boolean" + } + } + }, + "TCPHealthCheck": { + "id": "TCPHealthCheck", + "type": "object", + "properties": { + "port": { + "type": "integer", + "description": "The TCP port number for the health check request. The default value is 80. Valid values are 1 through 65535.", + "format": "int32" + }, + "portName": { + "type": "string", + "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." + }, + "proxyHeader": { + "type": "string", + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "request": { + "type": "string", + "description": "The application data to send once the TCP connection has been established (default value is empty). If both request and response are empty, the connection establishment alone will indicate health. The request data can only be ASCII." + }, + "response": { + "type": "string", + "description": "The bytes to match against the beginning of the response data. If left empty (the default value), any response will indicate health. The response data can only be ASCII." + } + } + }, + "Tags": { + "id": "Tags", + "type": "object", + "description": "A set of instance tags.", + "properties": { + "fingerprint": { + "type": "string", + "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata.\n\nTo see the latest fingerprint, make get() request to the instance.", + "format": "byte" + }, + "items": { + "type": "array", + "description": "An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035.", + "items": { + "type": "string" + } + } + } + }, + "TargetHttpProxy": { + "id": "TargetHttpProxy", + "type": "object", + "description": "A TargetHttpProxy resource. This resource defines an HTTP proxy.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetHttpProxy for target HTTP proxies.", + "default": "compute#targetHttpProxy" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "urlMap": { + "type": "string", + "description": "URL to the UrlMap resource that defines the mapping from URL to the BackendService." + } + } + }, + "TargetHttpProxyList": { + "id": "TargetHttpProxyList", + "type": "object", + "description": "A list of TargetHttpProxy resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of TargetHttpProxy resources.", + "items": { + "$ref": "TargetHttpProxy" + } + }, + "kind": { + "type": "string", + "description": "Type of resource. Always compute#targetHttpProxyList for lists of target HTTP proxies.", + "default": "compute#targetHttpProxyList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TargetHttpsProxiesSetSslCertificatesRequest": { + "id": "TargetHttpsProxiesSetSslCertificatesRequest", + "type": "object", + "properties": { + "sslCertificates": { + "type": "array", + "description": "New set of SslCertificate resources to associate with this TargetHttpsProxy resource. Currently exactly one SslCertificate resource must be specified.", + "items": { + "type": "string" + } + } + } + }, + "TargetHttpsProxy": { + "id": "TargetHttpsProxy", + "type": "object", + "description": "A TargetHttpsProxy resource. This resource defines an HTTPS proxy.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetHttpsProxy for target HTTPS proxies.", + "default": "compute#targetHttpsProxy" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "sslCertificates": { + "type": "array", + "description": "URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. Currently, exactly one SSL certificate must be specified.", + "items": { + "type": "string" + } + }, + "urlMap": { + "type": "string", + "description": "A fully-qualified or valid partial URL to the UrlMap resource that defines the mapping from URL to the BackendService. For example, the following are all valid URLs for specifying a URL map: \n- https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map \n- projects/project/global/urlMaps/url-map \n- global/urlMaps/url-map" + } + } + }, + "TargetHttpsProxyList": { + "id": "TargetHttpsProxyList", + "type": "object", + "description": "Contains a list of TargetHttpsProxy resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of TargetHttpsProxy resources.", + "items": { + "$ref": "TargetHttpsProxy" + } + }, + "kind": { + "type": "string", + "description": "Type of resource. Always compute#targetHttpsProxyList for lists of target HTTPS proxies.", + "default": "compute#targetHttpsProxyList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TargetInstance": { + "id": "TargetInstance", + "type": "object", + "description": "A TargetInstance resource. This resource defines an endpoint instance that terminates traffic of certain protocols.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "instance": { + "type": "string", + "description": "A URL to the virtual machine instance that handles traffic for this target instance. When creating a target instance, you can provide the fully-qualified URL or a valid partial URL to the desired virtual machine. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance \n- projects/project/zones/zone/instances/instance \n- zones/zone/instances/instance" + }, + "kind": { + "type": "string", + "description": "[Output Only] The type of the resource. Always compute#targetInstance for target instances.", + "default": "compute#targetInstance" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "natPolicy": { + "type": "string", + "description": "NAT option controlling how IPs are NAT'ed to the instance. Currently only NO_NAT (default value) is supported.", + "enum": [ + "NO_NAT" + ], + "enumDescriptions": [ + "" + ] + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "zone": { + "type": "string", + "description": "[Output Only] URL of the zone where the target instance resides." + } + } + }, + "TargetInstanceAggregatedList": { + "id": "TargetInstanceAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "object", + "description": "A list of TargetInstance resources.", + "additionalProperties": { + "$ref": "TargetInstancesScopedList", + "description": "Name of the scope containing this set of target instances." + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#targetInstanceAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TargetInstanceList": { + "id": "TargetInstanceList", + "type": "object", + "description": "Contains a list of TargetInstance resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of TargetInstance resources.", + "items": { + "$ref": "TargetInstance" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#targetInstanceList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TargetInstancesScopedList": { + "id": "TargetInstancesScopedList", + "type": "object", + "properties": { + "targetInstances": { + "type": "array", + "description": "List of target instances contained in this scope.", + "items": { + "$ref": "TargetInstance" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TargetPool": { + "id": "TargetPool", + "type": "object", + "description": "A TargetPool resource. This resource defines a pool of instances, an associated HttpHealthCheck resource, and the fallback target pool.", + "properties": { + "backupPool": { + "type": "string", + "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool, and its failoverRatio field is properly set to a value between [0, 1].\n\nbackupPool and failoverRatio together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below failoverRatio, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio and backupPool are not set, or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "failoverRatio": { + "type": "number", + "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool (i.e., not as a backup pool to some other target pool). The value of the field must be in [0, 1].\n\nIf set, backupPool must also be set. They together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below this number, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio is not set or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy.", + "format": "float" + }, + "healthChecks": { + "type": "array", + "description": "The URL of the HttpHealthCheck resource. A member instance in this pool is considered healthy if and only if the health checks pass. An empty list means all member instances will be considered healthy at all times. Only HttpHealthChecks are supported. Only one health check may be specified.", + "items": { + "type": "string" + } + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "instances": { + "type": "array", + "description": "A list of resource URLs to the virtual machine instances serving this pool. They must live in zones contained in the same region as this pool.", + "items": { + "type": "string" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#targetPool for target pools.", + "default": "compute#targetPool" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the target pool resides." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "sessionAffinity": { + "type": "string", + "description": "Sesssion affinity option, must be one of the following values:\nNONE: Connections from the same client IP may go to any instance in the pool.\nCLIENT_IP: Connections from the same client IP will go to the same instance in the pool while that instance remains healthy.\nCLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol will go to the same instance in the pool while that instance remains healthy.", + "enum": [ + "CLIENT_IP", + "CLIENT_IP_PORT_PROTO", + "CLIENT_IP_PROTO", + "GENERATED_COOKIE", + "NONE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ] + } + } + }, + "TargetPoolAggregatedList": { + "id": "TargetPoolAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "object", + "description": "A list of TargetPool resources.", + "additionalProperties": { + "$ref": "TargetPoolsScopedList", + "description": "Name of the scope containing this set of target pools." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetPoolAggregatedList for aggregated lists of target pools.", + "default": "compute#targetPoolAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TargetPoolInstanceHealth": { + "id": "TargetPoolInstanceHealth", + "type": "object", + "properties": { + "healthStatus": { + "type": "array", + "items": { + "$ref": "HealthStatus" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetPoolInstanceHealth when checking the health of an instance.", + "default": "compute#targetPoolInstanceHealth" + } + } + }, + "TargetPoolList": { + "id": "TargetPoolList", + "type": "object", + "description": "Contains a list of TargetPool resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of TargetPool resources.", + "items": { + "$ref": "TargetPool" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetPoolList for lists of target pools.", + "default": "compute#targetPoolList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TargetPoolsAddHealthCheckRequest": { + "id": "TargetPoolsAddHealthCheckRequest", + "type": "object", + "properties": { + "healthChecks": { + "type": "array", + "description": "The HttpHealthCheck to add to the target pool.", + "items": { + "$ref": "HealthCheckReference" + } + } + } + }, + "TargetPoolsAddInstanceRequest": { + "id": "TargetPoolsAddInstanceRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "A full or partial URL to an instance to add to this target pool. This can be a full or partial URL. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project-id/zones/zone/instances/instance-name \n- projects/project-id/zones/zone/instances/instance-name \n- zones/zone/instances/instance-name", + "items": { + "$ref": "InstanceReference" + } + } + } + }, + "TargetPoolsRemoveHealthCheckRequest": { + "id": "TargetPoolsRemoveHealthCheckRequest", + "type": "object", + "properties": { + "healthChecks": { + "type": "array", + "description": "Health check URL to be removed. This can be a full or valid partial URL. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project/global/httpHealthChecks/health-check \n- projects/project/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", + "items": { + "$ref": "HealthCheckReference" + } + } + } + }, + "TargetPoolsRemoveInstanceRequest": { + "id": "TargetPoolsRemoveInstanceRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "URLs of the instances to be removed from target pool.", + "items": { + "$ref": "InstanceReference" + } + } + } + }, + "TargetPoolsScopedList": { + "id": "TargetPoolsScopedList", + "type": "object", + "properties": { + "targetPools": { + "type": "array", + "description": "List of target pools contained in this scope.", + "items": { + "$ref": "TargetPool" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TargetReference": { + "id": "TargetReference", + "type": "object", + "properties": { + "target": { + "type": "string" + } + } + }, + "TargetSslProxiesSetBackendServiceRequest": { + "id": "TargetSslProxiesSetBackendServiceRequest", + "type": "object", + "properties": { + "service": { + "type": "string", + "description": "The URL of the new BackendService resource for the targetSslProxy." + } + } + }, + "TargetSslProxiesSetProxyHeaderRequest": { + "id": "TargetSslProxiesSetProxyHeaderRequest", + "type": "object", + "properties": { + "proxyHeader": { + "type": "string", + "description": "The new type of proxy header to append before sending data to the backend. NONE or PROXY_V1 are allowed.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "TargetSslProxiesSetSslCertificatesRequest": { + "id": "TargetSslProxiesSetSslCertificatesRequest", + "type": "object", + "properties": { + "sslCertificates": { + "type": "array", + "description": "New set of URLs to SslCertificate resources to associate with this TargetSslProxy. Currently exactly one ssl certificate must be specified.", + "items": { + "type": "string" + } + } + } + }, + "TargetSslProxy": { + "id": "TargetSslProxy", + "type": "object", + "description": "A TargetSslProxy resource. This resource defines an SSL proxy.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#targetSslProxy for target SSL proxies.", + "default": "compute#targetSslProxy" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "proxyHeader": { + "type": "string", + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "service": { + "type": "string", + "description": "URL to the BackendService resource." + }, + "sslCertificates": { + "type": "array", + "description": "URLs to SslCertificate resources that are used to authenticate connections to Backends. Currently exactly one SSL certificate must be specified.", + "items": { + "type": "string" + } + } + } + }, + "TargetSslProxyList": { + "id": "TargetSslProxyList", + "type": "object", + "description": "Contains a list of TargetSslProxy resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of TargetSslProxy resources.", + "items": { + "$ref": "TargetSslProxy" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#targetSslProxyList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TargetTcpProxiesSetBackendServiceRequest": { + "id": "TargetTcpProxiesSetBackendServiceRequest", + "type": "object", + "properties": { + "service": { + "type": "string", + "description": "The URL of the new BackendService resource for the targetTcpProxy." + } + } + }, + "TargetTcpProxiesSetProxyHeaderRequest": { + "id": "TargetTcpProxiesSetProxyHeaderRequest", + "type": "object", + "properties": { + "proxyHeader": { + "type": "string", + "description": "The new type of proxy header to append before sending data to the backend. NONE or PROXY_V1 are allowed.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "TargetTcpProxy": { + "id": "TargetTcpProxy", + "type": "object", + "description": "A TargetTcpProxy resource. This resource defines a TCP proxy.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#targetTcpProxy for target TCP proxies.", + "default": "compute#targetTcpProxy" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "proxyHeader": { + "type": "string", + "description": "Specifies the type of proxy header to append before sending data to the backend, either NONE or PROXY_V1. The default is NONE.", + "enum": [ + "NONE", + "PROXY_V1" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "service": { + "type": "string", + "description": "URL to the BackendService resource." + } + } + }, + "TargetTcpProxyList": { + "id": "TargetTcpProxyList", + "type": "object", + "description": "Contains a list of TargetTcpProxy resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of TargetTcpProxy resources.", + "items": { + "$ref": "TargetTcpProxy" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#targetTcpProxyList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TargetVpnGateway": { + "id": "TargetVpnGateway", + "type": "object", + "description": "Represents a Target VPN gateway resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "forwardingRules": { + "type": "array", + "description": "[Output Only] A list of URLs to the ForwardingRule resources. ForwardingRules are created using compute.forwardingRules.insert and associated to a VPN gateway.", + "items": { + "type": "string" + } + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", + "default": "compute#targetVpnGateway" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.targetVpnGateways.insert" + ] + } + }, + "network": { + "type": "string", + "description": "URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created.", + "annotations": { + "required": [ + "compute.targetVpnGateways.insert" + ] + } + }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the target VPN gateway resides." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the VPN gateway.", + "enum": [ + "CREATING", + "DELETING", + "FAILED", + "READY" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "tunnels": { + "type": "array", + "description": "[Output Only] A list of URLs to VpnTunnel resources. VpnTunnels are created using compute.vpntunnels.insert method and associated to a VPN gateway.", + "items": { + "type": "string" + } + } + } + }, + "TargetVpnGatewayAggregatedList": { + "id": "TargetVpnGatewayAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "object", + "description": "A list of TargetVpnGateway resources.", + "additionalProperties": { + "$ref": "TargetVpnGatewaysScopedList", + "description": "[Output Only] Name of the scope containing this set of target VPN gateways." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", + "default": "compute#targetVpnGatewayAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TargetVpnGatewayList": { + "id": "TargetVpnGatewayList", + "type": "object", + "description": "Contains a list of TargetVpnGateway resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of TargetVpnGateway resources.", + "items": { + "$ref": "TargetVpnGateway" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", + "default": "compute#targetVpnGatewayList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TargetVpnGatewaysScopedList": { + "id": "TargetVpnGatewaysScopedList", + "type": "object", + "properties": { + "targetVpnGateways": { + "type": "array", + "description": "[Output Only] List of target vpn gateways contained in this scope.", + "items": { + "$ref": "TargetVpnGateway" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TestFailure": { + "id": "TestFailure", + "type": "object", + "properties": { + "actualService": { + "type": "string" + }, + "expectedService": { + "type": "string" + }, + "host": { + "type": "string" + }, + "path": { + "type": "string" + } + } + }, + "TestPermissionsRequest": { + "id": "TestPermissionsRequest", + "type": "object", + "properties": { + "permissions": { + "type": "array", + "description": "The set of permissions to check for the 'resource'. Permissions with wildcards (such as '*' or 'storage.*') are not allowed.", + "items": { + "type": "string" + } + } + } + }, + "TestPermissionsResponse": { + "id": "TestPermissionsResponse", + "type": "object", + "properties": { + "permissions": { + "type": "array", + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is allowed.", + "items": { + "type": "string" + } + } + } + }, + "UDPHealthCheck": { + "id": "UDPHealthCheck", + "type": "object", + "properties": { + "port": { + "type": "integer", + "description": "The UDP port number for the health check request. Valid values are 1 through 65535.", + "format": "int32" + }, + "portName": { + "type": "string", + "description": "Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name are defined, port takes precedence." + }, + "request": { + "type": "string", + "description": "Raw data of request to send in payload of UDP packet. It is an error if this is empty. The request data can only be ASCII." + }, + "response": { + "type": "string", + "description": "The bytes to match against the beginning of the response data. It is an error if this is empty. The response data can only be ASCII." + } + } + }, + "UrlMap": { + "id": "UrlMap", + "type": "object", + "description": "A UrlMap resource. This resource defines the mapping from URL to the BackendService resource, based on the \"longest-match\" of the URL's host and path.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "defaultService": { + "type": "string", + "description": "The URL of the BackendService resource if none of the hostRules match." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "fingerprint": { + "type": "string", + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a UrlMap. An up-to-date fingerprint must be provided in order to update the UrlMap.", + "format": "byte" + }, + "hostRules": { + "type": "array", + "description": "The list of HostRules to use against the URL.", + "items": { + "$ref": "HostRule" + } + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#urlMaps for url maps.", + "default": "compute#urlMap" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "pathMatchers": { + "type": "array", + "description": "The list of named PathMatchers to use against the URL.", + "items": { + "$ref": "PathMatcher" + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "tests": { + "type": "array", + "description": "The list of expected URL mappings. Request to update this UrlMap will succeed only if all of the test cases pass.", + "items": { + "$ref": "UrlMapTest" + } + } + } + }, + "UrlMapList": { + "id": "UrlMapList", + "type": "object", + "description": "Contains a list of UrlMap resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of UrlMap resources.", + "items": { + "$ref": "UrlMap" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#urlMapList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "UrlMapReference": { + "id": "UrlMapReference", + "type": "object", + "properties": { + "urlMap": { + "type": "string" + } + } + }, + "UrlMapTest": { + "id": "UrlMapTest", + "type": "object", + "description": "Message for the expected URL mappings.", + "properties": { + "description": { + "type": "string", + "description": "Description of this test case." + }, + "host": { + "type": "string", + "description": "Host portion of the URL." + }, + "path": { + "type": "string", + "description": "Path portion of the URL." + }, + "service": { + "type": "string", + "description": "Expected BackendService resource the given URL should be mapped to." + } + } + }, + "UrlMapValidationResult": { + "id": "UrlMapValidationResult", + "type": "object", + "description": "Message representing the validation result for a UrlMap.", + "properties": { + "loadErrors": { + "type": "array", + "items": { + "type": "string" + } + }, + "loadSucceeded": { + "type": "boolean", + "description": "Whether the given UrlMap can be successfully loaded. If false, 'loadErrors' indicates the reasons." + }, + "testFailures": { + "type": "array", + "items": { + "$ref": "TestFailure" + } + }, + "testPassed": { + "type": "boolean", + "description": "If successfully loaded, this field indicates whether the test passed. If false, 'testFailures's indicate the reason of failure." + } + } + }, + "UrlMapsValidateRequest": { + "id": "UrlMapsValidateRequest", + "type": "object", + "properties": { + "resource": { + "$ref": "UrlMap", + "description": "Content of the UrlMap to be validated." + } + } + }, + "UrlMapsValidateResponse": { + "id": "UrlMapsValidateResponse", + "type": "object", + "properties": { + "result": { + "$ref": "UrlMapValidationResult" + } + } + }, + "UsageExportLocation": { + "id": "UsageExportLocation", + "type": "object", + "description": "The location in Cloud Storage and naming method of the daily usage report. Contains bucket_name and report_name prefix.", + "properties": { + "bucketName": { + "type": "string", + "description": "The name of an existing bucket in Cloud Storage where the usage report object is stored. The Google Service Account is granted write access to this bucket. This can either be the bucket name by itself, such as example-bucket, or the bucket name with gs:// or https://storage.googleapis.com/ in front of it, such as gs://example-bucket." + }, + "reportNamePrefix": { + "type": "string", + "description": "An optional prefix for the name of the usage report object stored in bucketName. If not supplied, defaults to usage. The report is stored as a CSV file named report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the usage according to Pacific Time. If you supply a prefix, it should conform to Cloud Storage object naming conventions." + } + } + }, + "VpnTunnel": { + "id": "VpnTunnel", + "type": "object", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "detailedStatus": { + "type": "string", + "description": "[Output Only] Detailed status message for the VPN tunnel." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "ikeVersion": { + "type": "integer", + "description": "IKE protocol version to use when establishing the VPN tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. Default version is 2.", + "format": "int32" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", + "default": "compute#vpnTunnel" + }, + "localTrafficSelector": { + "type": "array", + "description": "Local traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported.", + "items": { + "type": "string" + } + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.vpnTunnels.insert" + ] + } + }, + "peerIp": { + "type": "string", + "description": "IP address of the peer VPN gateway. Only IPv4 is supported." + }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the VPN tunnel resides." + }, + "remoteTrafficSelector": { + "type": "array", + "description": "Remote traffic selectors to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint. Only IPv4 is supported.", + "items": { + "type": "string" + } + }, + "router": { + "type": "string", + "description": "URL of router resource to be used for dynamic routing." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "sharedSecret": { + "type": "string", + "description": "Shared secret used to set the secure session between the Cloud VPN gateway and the peer VPN gateway." + }, + "sharedSecretHash": { + "type": "string", + "description": "Hash of the shared secret." + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the VPN tunnel.", + "enum": [ + "ALLOCATING_RESOURCES", + "AUTHORIZATION_ERROR", + "DEPROVISIONING", + "ESTABLISHED", + "FAILED", + "FIRST_HANDSHAKE", + "NEGOTIATION_FAILURE", + "NETWORK_ERROR", + "NO_INCOMING_PACKETS", + "PROVISIONING", + "REJECTED", + "WAITING_FOR_FULL_CONFIG" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "targetVpnGateway": { + "type": "string", + "description": "URL of the VPN gateway with which this VPN tunnel is associated. Provided by the client when the VPN tunnel is created.", + "annotations": { + "required": [ + "compute.vpnTunnels.insert" + ] + } + } + } + }, + "VpnTunnelAggregatedList": { + "id": "VpnTunnelAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "object", + "description": "A list of VpnTunnelsScopedList resources.", + "additionalProperties": { + "$ref": "VpnTunnelsScopedList", + "description": "Name of the scope containing this set of vpn tunnels." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", + "default": "compute#vpnTunnelAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "VpnTunnelList": { + "id": "VpnTunnelList", + "type": "object", + "description": "Contains a list of VpnTunnel resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of VpnTunnel resources.", + "items": { + "$ref": "VpnTunnel" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", + "default": "compute#vpnTunnelList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "VpnTunnelsScopedList": { + "id": "VpnTunnelsScopedList", + "type": "object", + "properties": { + "vpnTunnels": { + "type": "array", + "description": "List of vpn tunnels contained in this scope.", + "items": { + "$ref": "VpnTunnel" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "XpnHostList": { + "id": "XpnHostList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of shared VPC host project URLs.", + "items": { + "$ref": "Project" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#xpnHostList for lists of shared VPC hosts.", + "default": "compute#xpnHostList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "XpnResourceId": { + "id": "XpnResourceId", + "type": "object", + "description": "Service resource (a.k.a service project) ID.", + "properties": { + "id": { + "type": "string", + "description": "The ID of the service resource. In the case of projects, this field matches the project ID (e.g., my-project), not the project number (e.g., 12345678)." + }, + "type": { + "type": "string", + "description": "The type of the service resource.", + "enum": [ + "PROJECT", + "XPN_RESOURCE_TYPE_UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "Zone": { + "id": "Zone", + "type": "object", + "description": "A Zone resource.", + "properties": { + "availableCpuPlatforms": { + "type": "array", + "description": "[Output Only] Available cpu/platform selections for the zone.", + "items": { + "type": "string" + } + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "[Output Only] The deprecation status associated with this zone." + }, + "description": { + "type": "string", + "description": "[Output Only] Textual description of the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#zone for zones.", + "default": "compute#zone" + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource." + }, + "region": { + "type": "string", + "description": "[Output Only] Full URL reference to the region which hosts the zone." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "status": { + "type": "string", + "description": "[Output Only] Status of the zone, either UP or DOWN.", + "enum": [ + "DOWN", + "UP" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "ZoneList": { + "id": "ZoneList", + "type": "object", + "description": "Contains a list of zone resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of Zone resources.", + "items": { + "$ref": "Zone" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#zoneList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "ZoneSetLabelsRequest": { + "id": "ZoneSetLabelsRequest", + "type": "object", + "properties": { + "labelFingerprint": { + "type": "string", + "description": "The fingerprint of the previous set of labels for this resource, used to detect conflicts. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. Make a get() request to the resource to get the latest fingerprint.", + "format": "byte" + }, + "labels": { + "type": "object", + "description": "The labels to set for this resource.", + "additionalProperties": { + "type": "string" + } + } + } + } + }, + "resources": { + "acceleratorTypes": { + "methods": { + "aggregatedList": { + "id": "compute.acceleratorTypes.aggregatedList", + "path": "{project}/aggregated/acceleratorTypes", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of accelerator types.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "AcceleratorTypeAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "get": { + "id": "compute.acceleratorTypes.get", + "path": "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}", + "httpMethod": "GET", + "description": "Returns the specified accelerator type. Get a list of available accelerator types by making a list() request.", + "parameters": { + "acceleratorType": { + "type": "string", + "description": "Name of the accelerator type to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "acceleratorType" + ], + "response": { + "$ref": "AcceleratorType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.acceleratorTypes.list", + "path": "{project}/zones/{zone}/acceleratorTypes", + "httpMethod": "GET", + "description": "Retrieves a list of accelerator types available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "AcceleratorTypeList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "addresses": { + "methods": { + "aggregatedList": { + "id": "compute.addresses.aggregatedList", + "path": "{project}/aggregated/addresses", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of addresses.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "AddressAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.addresses.delete", + "path": "{project}/regions/{region}/addresses/{address}", + "httpMethod": "DELETE", + "description": "Deletes the specified address resource.", + "parameters": { + "address": { + "type": "string", + "description": "Name of the address resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "region", + "address" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.addresses.get", + "path": "{project}/regions/{region}/addresses/{address}", + "httpMethod": "GET", + "description": "Returns the specified address resource.", + "parameters": { + "address": { + "type": "string", + "description": "Name of the address resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "address" + ], + "response": { + "$ref": "Address" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.addresses.insert", + "path": "{project}/regions/{region}/addresses", + "httpMethod": "POST", + "description": "Creates an address resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "Address" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.addresses.list", + "path": "{project}/regions/{region}/addresses", + "httpMethod": "GET", + "description": "Retrieves a list of addresses contained within the specified region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "AddressList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setLabels": { + "id": "compute.addresses.setLabels", + "path": "{project}/regions/{region}/addresses/{resource}/setLabels", + "httpMethod": "POST", + "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "resource" + ], + "request": { + "$ref": "RegionSetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "id": "compute.addresses.testIamPermissions", + "path": "{project}/regions/{region}/addresses/{resource}/testIamPermissions", + "httpMethod": "POST", + "description": "Returns permissions that a caller has on the specified resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "The name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "resource" + ], + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] } } }, - "ZoneSetLabelsRequest": { - "id": "ZoneSetLabelsRequest", - "type": "object", - "properties": { - "labelFingerprint": { - "type": "string", - "description": "The fingerprint of the previous set of labels for this resource, used to detect conflicts. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. Make a get() request to the resource to get the latest fingerprint.", - "format": "byte" - }, - "labels": { - "type": "object", - "description": "The labels to set for this resource.", - "additionalProperties": { - "type": "string" - } - } - } - } - }, - "resources": { - "acceleratorTypes": { + "autoscalers": { "methods": { "aggregatedList": { - "id": "compute.acceleratorTypes.aggregatedList", - "path": "{project}/aggregated/acceleratorTypes", + "id": "compute.autoscalers.aggregatedList", + "path": "{project}/aggregated/autoscalers", "httpMethod": "GET", - "description": "Retrieves an aggregated list of accelerator types.", + "description": "Retrieves an aggregated list of autoscalers.", "parameters": { "filter": { "type": "string", @@ -10166,7 +16183,7 @@ "project" ], "response": { - "$ref": "AcceleratorTypeAggregatedList" + "$ref": "AutoscalerAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -10174,15 +16191,61 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "delete": { + "id": "compute.autoscalers.delete", + "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + "httpMethod": "DELETE", + "description": "Deletes the specified autoscaler.", + "parameters": { + "autoscaler": { + "type": "string", + "description": "Name of the autoscaler to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "Name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "autoscaler" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "get": { - "id": "compute.acceleratorTypes.get", - "path": "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}", + "id": "compute.autoscalers.get", + "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", "httpMethod": "GET", - "description": "Returns the specified accelerator type. Get a list of available accelerator types by making a list() request.", + "description": "Returns the specified autoscaler resource. Get a list of available autoscalers by making a list() request.", "parameters": { - "acceleratorType": { + "autoscaler": { "type": "string", - "description": "Name of the accelerator type to return.", + "description": "Name of the autoscaler to return.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -10196,7 +16259,7 @@ }, "zone": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Name of the zone for this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -10205,10 +16268,10 @@ "parameterOrder": [ "project", "zone", - "acceleratorType" + "autoscaler" ], "response": { - "$ref": "AcceleratorType" + "$ref": "Autoscaler" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -10216,11 +16279,52 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "insert": { + "id": "compute.autoscalers.insert", + "path": "{project}/zones/{zone}/autoscalers", + "httpMethod": "POST", + "description": "Creates an autoscaler in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "Name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "request": { + "$ref": "Autoscaler" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "list": { - "id": "compute.acceleratorTypes.list", - "path": "{project}/zones/{zone}/acceleratorTypes", + "id": "compute.autoscalers.list", + "path": "{project}/zones/{zone}/autoscalers", "httpMethod": "GET", - "description": "Retrieves a list of accelerator types available to the specified project.", + "description": "Retrieves a list of autoscalers contained within the specified zone.", "parameters": { "filter": { "type": "string", @@ -10254,7 +16358,7 @@ }, "zone": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Name of the zone for this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -10265,76 +16369,166 @@ "zone" ], "response": { - "$ref": "AcceleratorTypeList" + "$ref": "AutoscalerList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "addresses": { - "methods": { - "aggregatedList": { - "id": "compute.addresses.aggregatedList", - "path": "{project}/aggregated/addresses", - "httpMethod": "GET", - "description": "Retrieves an aggregated list of addresses.", + }, + "patch": { + "id": "compute.autoscalers.patch", + "path": "{project}/zones/{zone}/autoscalers", + "httpMethod": "PATCH", + "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "parameters": { + "autoscaler": { + "type": "string", + "description": "Name of the autoscaler to patch.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "Name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "request": { + "$ref": "Autoscaler" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "id": "compute.autoscalers.testIamPermissions", + "path": "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions", + "httpMethod": "POST", + "description": "Returns permissions that a caller has on the specified resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "resource" + ], + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "update": { + "id": "compute.autoscalers.update", + "path": "{project}/zones/{zone}/autoscalers", + "httpMethod": "PUT", + "description": "Updates an autoscaler in the specified project using the data included in the request.", "parameters": { - "filter": { + "autoscaler": { "type": "string", - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", + "description": "Name of the autoscaler to update.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "query" }, - "orderBy": { + "project": { "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" }, - "pageToken": { + "requestId": { "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" }, - "project": { + "zone": { "type": "string", - "description": "Project ID for this request.", + "description": "Name of the zone for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ - "project" + "project", + "zone" ], + "request": { + "$ref": "Autoscaler" + }, "response": { - "$ref": "AddressAggregatedList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] - }, + } + } + }, + "backendBuckets": { + "methods": { "delete": { - "id": "compute.addresses.delete", - "path": "{project}/regions/{region}/addresses/{address}", + "id": "compute.backendBuckets.delete", + "path": "{project}/global/backendBuckets/{backendBucket}", "httpMethod": "DELETE", - "description": "Deletes the specified address resource.", + "description": "Deletes the specified BackendBucket resource.", "parameters": { - "address": { + "backendBucket": { "type": "string", - "description": "Name of the address resource to delete.", + "description": "Name of the BackendBucket resource to delete.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -10346,13 +16540,6 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, "requestId": { "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", @@ -10361,8 +16548,7 @@ }, "parameterOrder": [ "project", - "region", - "address" + "backendBucket" ], "response": { "$ref": "Operation" @@ -10373,14 +16559,14 @@ ] }, "get": { - "id": "compute.addresses.get", - "path": "{project}/regions/{region}/addresses/{address}", + "id": "compute.backendBuckets.get", + "path": "{project}/global/backendBuckets/{backendBucket}", "httpMethod": "GET", - "description": "Returns the specified address resource.", + "description": "Returns the specified BackendBucket resource. Get a list of available backend buckets by making a list() request.", "parameters": { - "address": { + "backendBucket": { "type": "string", - "description": "Name of the address resource to return.", + "description": "Name of the BackendBucket resource to return.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -10391,22 +16577,14 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" } }, "parameterOrder": [ "project", - "region", - "address" + "backendBucket" ], "response": { - "$ref": "Address" + "$ref": "BackendBucket" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -10415,10 +16593,10 @@ ] }, "insert": { - "id": "compute.addresses.insert", - "path": "{project}/regions/{region}/addresses", + "id": "compute.backendBuckets.insert", + "path": "{project}/global/backendBuckets", "httpMethod": "POST", - "description": "Creates an address resource in the specified project using the data included in the request.", + "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", "parameters": { "project": { "type": "string", @@ -10427,13 +16605,6 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, "requestId": { "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", @@ -10441,11 +16612,10 @@ } }, "parameterOrder": [ - "project", - "region" + "project" ], "request": { - "$ref": "Address" + "$ref": "BackendBucket" }, "response": { "$ref": "Operation" @@ -10456,10 +16626,10 @@ ] }, "list": { - "id": "compute.addresses.list", - "path": "{project}/regions/{region}/addresses", + "id": "compute.backendBuckets.list", + "path": "{project}/global/backendBuckets", "httpMethod": "GET", - "description": "Retrieves a list of addresses contained within the specified region.", + "description": "Retrieves the list of BackendBucket resources available to the specified project.", "parameters": { "filter": { "type": "string", @@ -10490,21 +16660,13 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" } }, "parameterOrder": [ - "project", - "region" + "project" ], "response": { - "$ref": "AddressList" + "$ref": "BackendBucketList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -10512,46 +16674,38 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setLabels": { - "id": "compute.addresses.setLabels", - "path": "{project}/regions/{region}/addresses/{resource}/setLabels", - "httpMethod": "POST", - "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", + "patch": { + "id": "compute.backendBuckets.patch", + "path": "{project}/global/backendBuckets/{backendBucket}", + "httpMethod": "PATCH", + "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "parameters": { - "project": { + "backendBucket": { "type": "string", - "description": "Project ID for this request.", + "description": "Name of the BackendBucket resource to patch.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, - "region": { + "project": { "type": "string", - "description": "The region for this request.", + "description": "Project ID for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "requestId": { "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" } }, "parameterOrder": [ "project", - "region", - "resource" + "backendBucket" ], "request": { - "$ref": "RegionSetLabelsRequest" + "$ref": "BackendBucket" }, "response": { "$ref": "Operation" @@ -10561,60 +16715,56 @@ "https://www.googleapis.com/auth/compute" ] }, - "testIamPermissions": { - "id": "compute.addresses.testIamPermissions", - "path": "{project}/regions/{region}/addresses/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", + "update": { + "id": "compute.backendBuckets.update", + "path": "{project}/global/backendBuckets/{backendBucket}", + "httpMethod": "PUT", + "description": "Updates the specified BackendBucket resource with the data included in the request.", "parameters": { - "project": { + "backendBucket": { "type": "string", - "description": "Project ID for this request.", + "description": "Name of the BackendBucket resource to update.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, - "region": { + "project": { "type": "string", - "description": "The name of the region for this request.", + "description": "Project ID for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "resource": { + "requestId": { "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ "project", - "region", - "resource" + "backendBucket" ], "request": { - "$ref": "TestPermissionsRequest" + "$ref": "BackendBucket" }, "response": { - "$ref": "TestPermissionsResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } }, - "autoscalers": { + "backendServices": { "methods": { "aggregatedList": { - "id": "compute.autoscalers.aggregatedList", - "path": "{project}/aggregated/autoscalers", + "id": "compute.backendServices.aggregatedList", + "path": "{project}/aggregated/backendServices", "httpMethod": "GET", - "description": "Retrieves an aggregated list of autoscalers.", + "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", "parameters": { "filter": { "type": "string", @@ -10641,7 +16791,7 @@ }, "project": { "type": "string", - "description": "Project ID for this request.", + "description": "Name of the project scoping this request.", "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" @@ -10651,7 +16801,7 @@ "project" ], "response": { - "$ref": "AutoscalerAggregatedList" + "$ref": "BackendServiceAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -10660,14 +16810,14 @@ ] }, "delete": { - "id": "compute.autoscalers.delete", - "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + "id": "compute.backendServices.delete", + "path": "{project}/global/backendServices/{backendService}", "httpMethod": "DELETE", - "description": "Deletes the specified autoscaler.", + "description": "Deletes the specified BackendService resource.", "parameters": { - "autoscaler": { + "backendService": { "type": "string", - "description": "Name of the autoscaler to delete.", + "description": "Name of the BackendService resource to delete.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -10683,63 +16833,83 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" + } + }, + "parameterOrder": [ + "project", + "backendService" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.backendServices.get", + "path": "{project}/global/backendServices/{backendService}", + "httpMethod": "GET", + "description": "Returns the specified BackendService resource. Get a list of available backend services by making a list() request.", + "parameters": { + "backendService": { + "type": "string", + "description": "Name of the BackendService resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" }, - "zone": { + "project": { "type": "string", - "description": "Name of the zone for this request.", + "description": "Project ID for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, "parameterOrder": [ "project", - "zone", - "autoscaler" + "backendService" ], "response": { - "$ref": "Operation" + "$ref": "BackendService" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "get": { - "id": "compute.autoscalers.get", - "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", - "httpMethod": "GET", - "description": "Returns the specified autoscaler resource. Get a list of available autoscalers by making a list() request.", + "getHealth": { + "id": "compute.backendServices.getHealth", + "path": "{project}/global/backendServices/{backendService}/getHealth", + "httpMethod": "POST", + "description": "Gets the most recent health check results for this BackendService.", "parameters": { - "autoscaler": { + "backendService": { "type": "string", - "description": "Name of the autoscaler to return.", + "description": "Name of the BackendService resource to which the queried instance belongs.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, "project": { "type": "string", - "description": "Project ID for this request.", "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" - }, - "zone": { - "type": "string", - "description": "Name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" } }, "parameterOrder": [ "project", - "zone", - "autoscaler" + "backendService" ], + "request": { + "$ref": "ResourceGroupReference" + }, "response": { - "$ref": "Autoscaler" + "$ref": "BackendServiceGroupHealth" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -10748,10 +16918,10 @@ ] }, "insert": { - "id": "compute.autoscalers.insert", - "path": "{project}/zones/{zone}/autoscalers", + "id": "compute.backendServices.insert", + "path": "{project}/global/backendServices", "httpMethod": "POST", - "description": "Creates an autoscaler in the specified project using the data included in the request.", + "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information.", "parameters": { "project": { "type": "string", @@ -10764,21 +16934,13 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" - }, - "zone": { - "type": "string", - "description": "Name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" } }, "parameterOrder": [ - "project", - "zone" + "project" ], "request": { - "$ref": "Autoscaler" + "$ref": "BackendService" }, "response": { "$ref": "Operation" @@ -10789,10 +16951,10 @@ ] }, "list": { - "id": "compute.autoscalers.list", - "path": "{project}/zones/{zone}/autoscalers", + "id": "compute.backendServices.list", + "path": "{project}/global/backendServices", "httpMethod": "GET", - "description": "Retrieves a list of autoscalers contained within the specified zone.", + "description": "Retrieves the list of BackendService resources available to the specified project.", "parameters": { "filter": { "type": "string", @@ -10823,21 +16985,13 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" - }, - "zone": { - "type": "string", - "description": "Name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" } }, "parameterOrder": [ - "project", - "zone" + "project" ], "response": { - "$ref": "AutoscalerList" + "$ref": "BackendServiceList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -10846,16 +17000,17 @@ ] }, "patch": { - "id": "compute.autoscalers.patch", - "path": "{project}/zones/{zone}/autoscalers", + "id": "compute.backendServices.patch", + "path": "{project}/global/backendServices/{backendService}", "httpMethod": "PATCH", - "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "parameters": { - "autoscaler": { + "backendService": { "type": "string", - "description": "Name of the autoscaler to patch.", + "description": "Name of the BackendService resource to patch.", + "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "query" + "location": "path" }, "project": { "type": "string", @@ -10868,34 +17023,66 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" + } + }, + "parameterOrder": [ + "project", + "backendService" + ], + "request": { + "$ref": "BackendService" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setSecurityPolicy": { + "id": "compute.backendServices.setSecurityPolicy", + "path": "{project}/global/backendServices/{backendService}/setSecurityPolicy", + "httpMethod": "POST", + "description": "Sets the security policy for the specified backend service.", + "parameters": { + "backendService": { + "type": "string", + "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", + "required": true, + "location": "path" }, - "zone": { + "project": { "type": "string", - "description": "Name of the zone for this request.", + "description": "Project ID for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ "project", - "zone" + "backendService" ], "request": { - "$ref": "Autoscaler" + "$ref": "SecurityPolicyReference" }, "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "testIamPermissions": { - "id": "compute.autoscalers.testIamPermissions", - "path": "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions", + "id": "compute.backendServices.testIamPermissions", + "path": "{project}/global/backendServices/{resource}/testIamPermissions", "httpMethod": "POST", "description": "Returns permissions that a caller has on the specified resource.", "parameters": { @@ -10910,20 +17097,12 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", - "zone", "resource" ], "request": { @@ -10939,16 +17118,17 @@ ] }, "update": { - "id": "compute.autoscalers.update", - "path": "{project}/zones/{zone}/autoscalers", + "id": "compute.backendServices.update", + "path": "{project}/global/backendServices/{backendService}", "httpMethod": "PUT", - "description": "Updates an autoscaler in the specified project using the data included in the request.", + "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", "parameters": { - "autoscaler": { + "backendService": { "type": "string", - "description": "Name of the autoscaler to update.", + "description": "Name of the BackendService resource to update.", + "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "query" + "location": "path" }, "project": { "type": "string", @@ -10961,21 +17141,14 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" - }, - "zone": { - "type": "string", - "description": "Name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" } }, "parameterOrder": [ "project", - "zone" + "backendService" ], "request": { - "$ref": "Autoscaler" + "$ref": "BackendService" }, "response": { "$ref": "Operation" @@ -10987,20 +17160,36 @@ } } }, - "backendBuckets": { + "diskTypes": { "methods": { - "delete": { - "id": "compute.backendBuckets.delete", - "path": "{project}/global/backendBuckets/{backendBucket}", - "httpMethod": "DELETE", - "description": "Deletes the specified BackendBucket resource.", + "aggregatedList": { + "id": "compute.diskTypes.aggregatedList", + "path": "{project}/aggregated/diskTypes", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of disk types.", "parameters": { - "backendBucket": { + "filter": { "type": "string", - "description": "Name of the BackendBucket resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" }, "project": { "type": "string", @@ -11008,34 +17197,29 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" - }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" } }, "parameterOrder": [ - "project", - "backendBucket" + "project" ], "response": { - "$ref": "Operation" + "$ref": "DiskTypeAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, "get": { - "id": "compute.backendBuckets.get", - "path": "{project}/global/backendBuckets/{backendBucket}", + "id": "compute.diskTypes.get", + "path": "{project}/zones/{zone}/diskTypes/{diskType}", "httpMethod": "GET", - "description": "Returns the specified BackendBucket resource. Get a list of available backend buckets by making a list() request.", + "description": "Returns the specified disk type. Get a list of available disk types by making a list() request.", "parameters": { - "backendBucket": { + "diskType": { "type": "string", - "description": "Name of the BackendBucket resource to return.", + "description": "Name of the disk type to return.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -11046,14 +17230,22 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" } }, "parameterOrder": [ "project", - "backendBucket" + "zone", + "diskType" ], "response": { - "$ref": "BackendBucket" + "$ref": "DiskType" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -11061,12 +17253,35 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "insert": { - "id": "compute.backendBuckets.insert", - "path": "{project}/global/backendBuckets", - "httpMethod": "POST", - "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", + "list": { + "id": "compute.diskTypes.list", + "path": "{project}/zones/{zone}/diskTypes", + "httpMethod": "GET", + "description": "Retrieves a list of disk types available to the specified project.", "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, "project": { "type": "string", "description": "Project ID for this request.", @@ -11074,31 +17289,36 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "requestId": { + "zone": { "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" } }, "parameterOrder": [ - "project" + "project", + "zone" ], - "request": { - "$ref": "BackendBucket" - }, "response": { - "$ref": "Operation" + "$ref": "DiskTypeList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - }, - "list": { - "id": "compute.backendBuckets.list", - "path": "{project}/global/backendBuckets", + } + } + }, + "disks": { + "methods": { + "aggregatedList": { + "id": "compute.disks.aggregatedList", + "path": "{project}/aggregated/disks", "httpMethod": "GET", - "description": "Retrieves the list of BackendBucket resources available to the specified project.", + "description": "Retrieves an aggregated list of persistent disks.", "parameters": { "filter": { "type": "string", @@ -11135,7 +17355,7 @@ "project" ], "response": { - "$ref": "BackendBucketList" + "$ref": "DiskAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -11143,19 +17363,23 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "id": "compute.backendBuckets.patch", - "path": "{project}/global/backendBuckets/{backendBucket}", - "httpMethod": "PATCH", - "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "createSnapshot": { + "id": "compute.disks.createSnapshot", + "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", + "httpMethod": "POST", + "description": "Creates a snapshot of a specified persistent disk.", "parameters": { - "backendBucket": { + "disk": { "type": "string", - "description": "Name of the BackendBucket resource to patch.", + "description": "Name of the persistent disk to snapshot.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, + "guestFlush": { + "type": "boolean", + "location": "query" + }, "project": { "type": "string", "description": "Project ID for this request.", @@ -11167,35 +17391,41 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" } }, "parameterOrder": [ "project", - "backendBucket" + "zone", + "disk" ], "request": { - "$ref": "BackendBucket" + "$ref": "Snapshot" }, "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "update": { - "id": "compute.backendBuckets.update", - "path": "{project}/global/backendBuckets/{backendBucket}", - "httpMethod": "PUT", - "description": "Updates the specified BackendBucket resource with the data included in the request.", + "delete": { + "id": "compute.disks.delete", + "path": "{project}/zones/{zone}/disks/{disk}", + "httpMethod": "DELETE", + "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", "parameters": { - "backendBucket": { + "disk": { "type": "string", - "description": "Name of the BackendBucket resource to update.", + "description": "Name of the persistent disk to delete.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, "project": { @@ -11209,15 +17439,20 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" } }, "parameterOrder": [ "project", - "backendBucket" + "zone", + "disk" ], - "request": { - "$ref": "BackendBucket" - }, "response": { "$ref": "Operation" }, @@ -11225,53 +17460,42 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "backendServices": { - "methods": { - "aggregatedList": { - "id": "compute.backendServices.aggregatedList", - "path": "{project}/aggregated/backendServices", + }, + "get": { + "id": "compute.disks.get", + "path": "{project}/zones/{zone}/disks/{disk}", "httpMethod": "GET", - "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", + "description": "Returns a specified persistent disk. Get a list of available persistent disks by making a list() request.", "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { + "disk": { "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" + "description": "Name of the persistent disk to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" }, "project": { "type": "string", - "description": "Name of the project scoping this request.", + "description": "Project ID for this request.", "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" } }, "parameterOrder": [ - "project" + "project", + "zone", + "disk" ], "response": { - "$ref": "BackendServiceAggregatedList" + "$ref": "Disk" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -11279,19 +17503,12 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "delete": { - "id": "compute.backendServices.delete", - "path": "{project}/global/backendServices/{backendService}", - "httpMethod": "DELETE", - "description": "Deletes the specified BackendService resource.", + "insert": { + "id": "compute.disks.insert", + "path": "{project}/zones/{zone}/disks", + "httpMethod": "POST", + "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", "parameters": { - "backendService": { - "type": "string", - "description": "Name of the BackendService resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, "project": { "type": "string", "description": "Project ID for this request.", @@ -11303,12 +17520,27 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" + }, + "sourceImage": { + "type": "string", + "description": "Optional. Source image to restore onto a disk.", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" } }, "parameterOrder": [ "project", - "backendService" + "zone" ], + "request": { + "$ref": "Disk" + }, "response": { "$ref": "Operation" }, @@ -11317,18 +17549,34 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "id": "compute.backendServices.get", - "path": "{project}/global/backendServices/{backendService}", + "list": { + "id": "compute.disks.list", + "path": "{project}/zones/{zone}/disks", "httpMethod": "GET", - "description": "Returns the specified BackendService resource. Get a list of available backend services by making a list() request.", + "description": "Retrieves a list of persistent disks contained within the specified zone.", "parameters": { - "backendService": { + "filter": { "type": "string", - "description": "Name of the BackendService resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" }, "project": { "type": "string", @@ -11336,14 +17584,21 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" } }, "parameterOrder": [ "project", - "backendService" + "zone" ], "response": { - "$ref": "BackendService" + "$ref": "DiskList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -11351,47 +17606,60 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "getHealth": { - "id": "compute.backendServices.getHealth", - "path": "{project}/global/backendServices/{backendService}/getHealth", + "resize": { + "id": "compute.disks.resize", + "path": "{project}/zones/{zone}/disks/{disk}/resize", "httpMethod": "POST", - "description": "Gets the most recent health check results for this BackendService.", + "description": "Resizes the specified persistent disk.", "parameters": { - "backendService": { + "disk": { "type": "string", - "description": "Name of the BackendService resource to which the queried instance belongs.", + "description": "The name of the persistent disk.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, "project": { "type": "string", + "description": "Project ID for this request.", "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" } }, "parameterOrder": [ "project", - "backendService" + "zone", + "disk" ], "request": { - "$ref": "ResourceGroupReference" + "$ref": "DisksResizeRequest" }, "response": { - "$ref": "BackendServiceGroupHealth" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "id": "compute.backendServices.insert", - "path": "{project}/global/backendServices", + "setLabels": { + "id": "compute.disks.setLabels", + "path": "{project}/zones/{zone}/disks/{resource}/setLabels", "httpMethod": "POST", - "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information.", + "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation.", "parameters": { "project": { "type": "string", @@ -11404,13 +17672,29 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" } }, "parameterOrder": [ - "project" + "project", + "zone", + "resource" ], "request": { - "$ref": "BackendService" + "$ref": "ZoneSetLabelsRequest" }, "response": { "$ref": "Operation" @@ -11420,64 +17704,64 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "id": "compute.backendServices.list", - "path": "{project}/global/backendServices", - "httpMethod": "GET", - "description": "Retrieves the list of BackendService resources available to the specified project.", + "testIamPermissions": { + "id": "compute.disks.testIamPermissions", + "path": "{project}/zones/{zone}/disks/{resource}/testIamPermissions", + "httpMethod": "POST", + "description": "Returns permissions that a caller has on the specified resource.", "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { + "project": { "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" }, - "pageToken": { + "resource": { "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" }, - "project": { + "zone": { "type": "string", - "description": "Project ID for this request.", + "description": "The name of the zone for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ - "project" + "project", + "zone", + "resource" ], + "request": { + "$ref": "TestPermissionsRequest" + }, "response": { - "$ref": "BackendServiceList" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, - "patch": { - "id": "compute.backendServices.patch", - "path": "{project}/global/backendServices/{backendService}", - "httpMethod": "PATCH", - "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + } + } + }, + "firewalls": { + "methods": { + "delete": { + "id": "compute.firewalls.delete", + "path": "{project}/global/firewalls/{firewall}", + "httpMethod": "DELETE", + "description": "Deletes the specified firewall.", "parameters": { - "backendService": { + "firewall": { "type": "string", - "description": "Name of the BackendService resource to patch.", + "description": "Name of the firewall rule to delete.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -11497,50 +17781,43 @@ }, "parameterOrder": [ "project", - "backendService" + "firewall" ], - "request": { - "$ref": "BackendService" - }, "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "testIamPermissions": { - "id": "compute.backendServices.testIamPermissions", - "path": "{project}/global/backendServices/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", + "get": { + "id": "compute.firewalls.get", + "path": "{project}/global/firewalls/{firewall}", + "httpMethod": "GET", + "description": "Returns the specified firewall.", "parameters": { - "project": { + "firewall": { "type": "string", - "description": "Project ID for this request.", + "description": "Name of the firewall rule to return.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, - "resource": { + "project": { "type": "string", - "description": "Name of the resource for this request.", + "description": "Project ID for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, "parameterOrder": [ "project", - "resource" + "firewall" ], - "request": { - "$ref": "TestPermissionsRequest" - }, "response": { - "$ref": "TestPermissionsResponse" + "$ref": "Firewall" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -11548,19 +17825,12 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "update": { - "id": "compute.backendServices.update", - "path": "{project}/global/backendServices/{backendService}", - "httpMethod": "PUT", - "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + "insert": { + "id": "compute.firewalls.insert", + "path": "{project}/global/firewalls", + "httpMethod": "POST", + "description": "Creates a firewall rule in the specified project using the data included in the request.", "parameters": { - "backendService": { - "type": "string", - "description": "Name of the BackendService resource to update.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, "project": { "type": "string", "description": "Project ID for this request.", @@ -11575,11 +17845,10 @@ } }, "parameterOrder": [ - "project", - "backendService" + "project" ], "request": { - "$ref": "BackendService" + "$ref": "Firewall" }, "response": { "$ref": "Operation" @@ -11588,16 +17857,12 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "diskTypes": { - "methods": { - "aggregatedList": { - "id": "compute.diskTypes.aggregatedList", - "path": "{project}/aggregated/diskTypes", + }, + "list": { + "id": "compute.firewalls.list", + "path": "{project}/global/firewalls", "httpMethod": "GET", - "description": "Retrieves an aggregated list of disk types.", + "description": "Retrieves the list of firewall rules available to the specified project.", "parameters": { "filter": { "type": "string", @@ -11634,7 +17899,7 @@ "project" ], "response": { - "$ref": "DiskTypeAggregatedList" + "$ref": "FirewallList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -11642,15 +17907,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "get": { - "id": "compute.diskTypes.get", - "path": "{project}/zones/{zone}/diskTypes/{diskType}", - "httpMethod": "GET", - "description": "Returns the specified disk type. Get a list of available disk types by making a list() request.", + "patch": { + "id": "compute.firewalls.patch", + "path": "{project}/global/firewalls/{firewall}", + "httpMethod": "PATCH", + "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "parameters": { - "diskType": { + "firewall": { "type": "string", - "description": "Name of the disk type to return.", + "description": "Name of the firewall rule to patch.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -11662,21 +17927,57 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "zone": { + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "firewall" + ], + "request": { + "$ref": "Firewall" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "id": "compute.firewalls.testIamPermissions", + "path": "{project}/global/firewalls/{resource}/testIamPermissions", + "httpMethod": "POST", + "description": "Returns permissions that a caller has on the specified resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "resource": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", - "zone", - "diskType" + "resource" ], + "request": { + "$ref": "TestPermissionsRequest" + }, "response": { - "$ref": "DiskType" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -11684,34 +17985,18 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "list": { - "id": "compute.diskTypes.list", - "path": "{project}/zones/{zone}/diskTypes", - "httpMethod": "GET", - "description": "Retrieves a list of disk types available to the specified project.", + "update": { + "id": "compute.firewalls.update", + "path": "{project}/global/firewalls/{firewall}", + "httpMethod": "PUT", + "description": "Updates the specified firewall rule with the data included in the request. Using PUT method, can only update following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags.", "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { + "firewall": { "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" + "description": "Name of the firewall rule to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" }, "project": { "type": "string", @@ -11720,36 +18005,36 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "zone": { + "requestId": { "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ "project", - "zone" + "firewall" ], + "request": { + "$ref": "Firewall" + }, "response": { - "$ref": "DiskTypeList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } }, - "disks": { + "forwardingRules": { "methods": { "aggregatedList": { - "id": "compute.disks.aggregatedList", - "path": "{project}/aggregated/disks", + "id": "compute.forwardingRules.aggregatedList", + "path": "{project}/aggregated/forwardingRules", "httpMethod": "GET", - "description": "Retrieves an aggregated list of persistent disks.", + "description": "Retrieves an aggregated list of forwarding rules.", "parameters": { "filter": { "type": "string", @@ -11786,7 +18071,7 @@ "project" ], "response": { - "$ref": "DiskAggregatedList" + "$ref": "ForwardingRuleAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -11794,23 +18079,19 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "createSnapshot": { - "id": "compute.disks.createSnapshot", - "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", - "httpMethod": "POST", - "description": "Creates a snapshot of a specified persistent disk.", + "delete": { + "id": "compute.forwardingRules.delete", + "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + "httpMethod": "DELETE", + "description": "Deletes the specified ForwardingRule resource.", "parameters": { - "disk": { + "forwardingRule": { "type": "string", - "description": "Name of the persistent disk to snapshot.", + "description": "Name of the ForwardingRule resource to delete.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, - "guestFlush": { - "type": "boolean", - "location": "query" - }, "project": { "type": "string", "description": "Project ID for this request.", @@ -11818,71 +18099,23 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" - }, - "zone": { + "region": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Name of the region scoping this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "disk" - ], - "request": { - "$ref": "Snapshot" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "delete": { - "id": "compute.disks.delete", - "path": "{project}/zones/{zone}/disks/{disk}", - "httpMethod": "DELETE", - "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", - "parameters": { - "disk": { - "type": "string", - "description": "Name of the persistent disk to delete.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" }, "requestId": { "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" } }, "parameterOrder": [ "project", - "zone", - "disk" + "region", + "forwardingRule" ], "response": { "$ref": "Operation" @@ -11893,14 +18126,14 @@ ] }, "get": { - "id": "compute.disks.get", - "path": "{project}/zones/{zone}/disks/{disk}", + "id": "compute.forwardingRules.get", + "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", "httpMethod": "GET", - "description": "Returns a specified persistent disk. Get a list of available persistent disks by making a list() request.", + "description": "Returns the specified ForwardingRule resource.", "parameters": { - "disk": { + "forwardingRule": { "type": "string", - "description": "Name of the persistent disk to return.", + "description": "Name of the ForwardingRule resource to return.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -11912,9 +18145,9 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "zone": { + "region": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Name of the region scoping this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -11922,11 +18155,11 @@ }, "parameterOrder": [ "project", - "zone", - "disk" + "region", + "forwardingRule" ], "response": { - "$ref": "Disk" + "$ref": "ForwardingRule" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -11935,10 +18168,10 @@ ] }, "insert": { - "id": "compute.disks.insert", - "path": "{project}/zones/{zone}/disks", + "id": "compute.forwardingRules.insert", + "path": "{project}/regions/{region}/forwardingRules", "httpMethod": "POST", - "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", + "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", "parameters": { "project": { "type": "string", @@ -11947,30 +18180,25 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" - }, - "sourceImage": { - "type": "string", - "description": "Optional. Source image to restore onto a disk.", - "location": "query" - }, - "zone": { + "region": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Name of the region scoping this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ "project", - "zone" + "region" ], "request": { - "$ref": "Disk" + "$ref": "ForwardingRule" }, "response": { "$ref": "Operation" @@ -11981,10 +18209,10 @@ ] }, "list": { - "id": "compute.disks.list", - "path": "{project}/zones/{zone}/disks", + "id": "compute.forwardingRules.list", + "path": "{project}/regions/{region}/forwardingRules", "httpMethod": "GET", - "description": "Retrieves a list of persistent disks contained within the specified zone.", + "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", "parameters": { "filter": { "type": "string", @@ -12016,9 +18244,9 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "zone": { + "region": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Name of the region scoping this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -12026,10 +18254,10 @@ }, "parameterOrder": [ "project", - "zone" + "region" ], "response": { - "$ref": "DiskList" + "$ref": "ForwardingRuleList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -12037,24 +18265,24 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "resize": { - "id": "compute.disks.resize", - "path": "{project}/zones/{zone}/disks/{disk}/resize", + "setLabels": { + "id": "compute.forwardingRules.setLabels", + "path": "{project}/regions/{region}/forwardingRules/{resource}/setLabels", "httpMethod": "POST", - "description": "Resizes the specified persistent disk.", + "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", "parameters": { - "disk": { + "project": { "type": "string", - "description": "The name of the persistent disk.", + "description": "Project ID for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "project": { + "region": { "type": "string", - "description": "Project ID for this request.", + "description": "The region for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, "requestId": { @@ -12062,9 +18290,9 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" }, - "zone": { + "resource": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Name of the resource for this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -12072,11 +18300,11 @@ }, "parameterOrder": [ "project", - "zone", - "disk" + "region", + "resource" ], "request": { - "$ref": "DisksResizeRequest" + "$ref": "RegionSetLabelsRequest" }, "response": { "$ref": "Operation" @@ -12086,12 +18314,19 @@ "https://www.googleapis.com/auth/compute" ] }, - "setLabels": { - "id": "compute.disks.setLabels", - "path": "{project}/zones/{zone}/disks/{resource}/setLabels", + "setTarget": { + "id": "compute.forwardingRules.setTarget", + "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", "httpMethod": "POST", - "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation.", + "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", "parameters": { + "forwardingRule": { + "type": "string", + "description": "Name of the ForwardingRule resource in which target is to be set.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, "project": { "type": "string", "description": "Project ID for this request.", @@ -12099,33 +18334,26 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" - }, - "resource": { + "region": { "type": "string", - "description": "Name of the resource for this request.", + "description": "Name of the region scoping this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, - "zone": { + "requestId": { "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ "project", - "zone", - "resource" + "region", + "forwardingRule" ], "request": { - "$ref": "ZoneSetLabelsRequest" + "$ref": "TargetReference" }, "response": { "$ref": "Operation" @@ -12136,8 +18364,8 @@ ] }, "testIamPermissions": { - "id": "compute.disks.testIamPermissions", - "path": "{project}/zones/{zone}/disks/{resource}/testIamPermissions", + "id": "compute.forwardingRules.testIamPermissions", + "path": "{project}/regions/{region}/forwardingRules/{resource}/testIamPermissions", "httpMethod": "POST", "description": "Returns permissions that a caller has on the specified resource.", "parameters": { @@ -12148,16 +18376,16 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "resource": { + "region": { "type": "string", - "description": "Name of the resource for this request.", + "description": "The name of the region for this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, - "zone": { + "resource": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Name of the resource for this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -12165,7 +18393,7 @@ }, "parameterOrder": [ "project", - "zone", + "region", "resource" ], "request": { @@ -12182,17 +18410,17 @@ } } }, - "firewalls": { + "globalAddresses": { "methods": { "delete": { - "id": "compute.firewalls.delete", - "path": "{project}/global/firewalls/{firewall}", + "id": "compute.globalAddresses.delete", + "path": "{project}/global/addresses/{address}", "httpMethod": "DELETE", - "description": "Deletes the specified firewall.", + "description": "Deletes the specified address resource.", "parameters": { - "firewall": { + "address": { "type": "string", - "description": "Name of the firewall rule to delete.", + "description": "Name of the address resource to delete.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -12212,7 +18440,7 @@ }, "parameterOrder": [ "project", - "firewall" + "address" ], "response": { "$ref": "Operation" @@ -12223,14 +18451,14 @@ ] }, "get": { - "id": "compute.firewalls.get", - "path": "{project}/global/firewalls/{firewall}", + "id": "compute.globalAddresses.get", + "path": "{project}/global/addresses/{address}", "httpMethod": "GET", - "description": "Returns the specified firewall.", + "description": "Returns the specified address resource. Get a list of available addresses by making a list() request.", "parameters": { - "firewall": { + "address": { "type": "string", - "description": "Name of the firewall rule to return.", + "description": "Name of the address resource to return.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -12245,10 +18473,10 @@ }, "parameterOrder": [ "project", - "firewall" + "address" ], "response": { - "$ref": "Firewall" + "$ref": "Address" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -12257,10 +18485,10 @@ ] }, "insert": { - "id": "compute.firewalls.insert", - "path": "{project}/global/firewalls", + "id": "compute.globalAddresses.insert", + "path": "{project}/global/addresses", "httpMethod": "POST", - "description": "Creates a firewall rule in the specified project using the data included in the request.", + "description": "Creates an address resource in the specified project using the data included in the request.", "parameters": { "project": { "type": "string", @@ -12279,7 +18507,7 @@ "project" ], "request": { - "$ref": "Firewall" + "$ref": "Address" }, "response": { "$ref": "Operation" @@ -12290,10 +18518,10 @@ ] }, "list": { - "id": "compute.firewalls.list", - "path": "{project}/global/firewalls", + "id": "compute.globalAddresses.list", + "path": "{project}/global/addresses", "httpMethod": "GET", - "description": "Retrieves the list of firewall rules available to the specified project.", + "description": "Retrieves a list of global addresses.", "parameters": { "filter": { "type": "string", @@ -12330,7 +18558,7 @@ "project" ], "response": { - "$ref": "FirewallList" + "$ref": "AddressList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -12338,19 +18566,12 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "id": "compute.firewalls.patch", - "path": "{project}/global/firewalls/{firewall}", - "httpMethod": "PATCH", - "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "setLabels": { + "id": "compute.globalAddresses.setLabels", + "path": "{project}/global/addresses/{resource}/setLabels", + "httpMethod": "POST", + "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", "parameters": { - "firewall": { - "type": "string", - "description": "Name of the firewall rule to patch.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, "project": { "type": "string", "description": "Project ID for this request.", @@ -12358,31 +18579,32 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "requestId": { + "resource": { "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "location": "path" } }, "parameterOrder": [ "project", - "firewall" + "resource" ], "request": { - "$ref": "Firewall" + "$ref": "GlobalSetLabelsRequest" }, "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "testIamPermissions": { - "id": "compute.firewalls.testIamPermissions", - "path": "{project}/global/firewalls/{resource}/testIamPermissions", + "id": "compute.globalAddresses.testIamPermissions", + "path": "{project}/global/addresses/{resource}/testIamPermissions", "httpMethod": "POST", "description": "Returns permissions that a caller has on the specified resource.", "parameters": { @@ -12397,7 +18619,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -12416,106 +18638,16 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, - "update": { - "id": "compute.firewalls.update", - "path": "{project}/global/firewalls/{firewall}", - "httpMethod": "PUT", - "description": "Updates the specified firewall rule with the data included in the request. Using PUT method, can only update following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags.", - "parameters": { - "firewall": { - "type": "string", - "description": "Name of the firewall rule to update.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" - } - }, - "parameterOrder": [ - "project", - "firewall" - ], - "request": { - "$ref": "Firewall" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] } } }, - "forwardingRules": { + "globalForwardingRules": { "methods": { - "aggregatedList": { - "id": "compute.forwardingRules.aggregatedList", - "path": "{project}/aggregated/forwardingRules", - "httpMethod": "GET", - "description": "Retrieves an aggregated list of forwarding rules.", - "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "ForwardingRuleAggregatedList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, "delete": { - "id": "compute.forwardingRules.delete", - "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + "id": "compute.globalForwardingRules.delete", + "path": "{project}/global/forwardingRules/{forwardingRule}", "httpMethod": "DELETE", - "description": "Deletes the specified ForwardingRule resource.", + "description": "Deletes the specified GlobalForwardingRule resource.", "parameters": { "forwardingRule": { "type": "string", @@ -12531,13 +18663,6 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, "requestId": { "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", @@ -12546,7 +18671,6 @@ }, "parameterOrder": [ "project", - "region", "forwardingRule" ], "response": { @@ -12558,10 +18682,10 @@ ] }, "get": { - "id": "compute.forwardingRules.get", - "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + "id": "compute.globalForwardingRules.get", + "path": "{project}/global/forwardingRules/{forwardingRule}", "httpMethod": "GET", - "description": "Returns the specified ForwardingRule resource.", + "description": "Returns the specified GlobalForwardingRule resource. Get a list of available forwarding rules by making a list() request.", "parameters": { "forwardingRule": { "type": "string", @@ -12576,18 +18700,10 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" } }, "parameterOrder": [ "project", - "region", "forwardingRule" ], "response": { @@ -12600,10 +18716,10 @@ ] }, "insert": { - "id": "compute.forwardingRules.insert", - "path": "{project}/regions/{region}/forwardingRules", + "id": "compute.globalForwardingRules.insert", + "path": "{project}/global/forwardingRules", "httpMethod": "POST", - "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", + "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request.", "parameters": { "project": { "type": "string", @@ -12612,13 +18728,6 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, "requestId": { "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", @@ -12626,8 +18735,7 @@ } }, "parameterOrder": [ - "project", - "region" + "project" ], "request": { "$ref": "ForwardingRule" @@ -12641,10 +18749,10 @@ ] }, "list": { - "id": "compute.forwardingRules.list", - "path": "{project}/regions/{region}/forwardingRules", + "id": "compute.globalForwardingRules.list", + "path": "{project}/global/forwardingRules", "httpMethod": "GET", - "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", + "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project.", "parameters": { "filter": { "type": "string", @@ -12675,18 +18783,10 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" - }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" } }, "parameterOrder": [ - "project", - "region" + "project" ], "response": { "$ref": "ForwardingRuleList" @@ -12698,8 +18798,8 @@ ] }, "setLabels": { - "id": "compute.forwardingRules.setLabels", - "path": "{project}/regions/{region}/forwardingRules/{resource}/setLabels", + "id": "compute.globalForwardingRules.setLabels", + "path": "{project}/global/forwardingRules/{resource}/setLabels", "httpMethod": "POST", "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", "parameters": { @@ -12710,33 +18810,20 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "region": { - "type": "string", - "description": "The region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" - }, "resource": { "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", - "region", "resource" ], "request": { - "$ref": "RegionSetLabelsRequest" + "$ref": "GlobalSetLabelsRequest" }, "response": { "$ref": "Operation" @@ -12747,10 +18834,10 @@ ] }, "setTarget": { - "id": "compute.forwardingRules.setTarget", - "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", + "id": "compute.globalForwardingRules.setTarget", + "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget", "httpMethod": "POST", - "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", + "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target.", "parameters": { "forwardingRule": { "type": "string", @@ -12766,13 +18853,6 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "region": { - "type": "string", - "description": "Name of the region scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, "requestId": { "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", @@ -12781,7 +18861,6 @@ }, "parameterOrder": [ "project", - "region", "forwardingRule" ], "request": { @@ -12796,8 +18875,8 @@ ] }, "testIamPermissions": { - "id": "compute.forwardingRules.testIamPermissions", - "path": "{project}/regions/{region}/forwardingRules/{resource}/testIamPermissions", + "id": "compute.globalForwardingRules.testIamPermissions", + "path": "{project}/global/forwardingRules/{resource}/testIamPermissions", "httpMethod": "POST", "description": "Returns permissions that a caller has on the specified resource.", "parameters": { @@ -12808,24 +18887,16 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "region": { - "type": "string", - "description": "The name of the region for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, "resource": { "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", - "region", "resource" ], "request": { @@ -12842,20 +18913,36 @@ } } }, - "globalAddresses": { + "globalOperations": { "methods": { - "delete": { - "id": "compute.globalAddresses.delete", - "path": "{project}/global/addresses/{address}", - "httpMethod": "DELETE", - "description": "Deletes the specified address resource.", + "aggregatedList": { + "id": "compute.globalOperations.aggregatedList", + "path": "{project}/aggregated/operations", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of all operations.", "parameters": { - "address": { + "filter": { "type": "string", - "description": "Name of the address resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" }, "project": { "type": "string", @@ -12863,34 +18950,29 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" - }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" } }, "parameterOrder": [ - "project", - "address" + "project" ], "response": { - "$ref": "Operation" + "$ref": "OperationAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "get": { - "id": "compute.globalAddresses.get", - "path": "{project}/global/addresses/{address}", - "httpMethod": "GET", - "description": "Returns the specified address resource. Get a list of available addresses by making a list() request.", + "delete": { + "id": "compute.globalOperations.delete", + "path": "{project}/global/operations/{operation}", + "httpMethod": "DELETE", + "description": "Deletes the specified Operations resource.", "parameters": { - "address": { + "operation": { "type": "string", - "description": "Name of the address resource to return.", + "description": "Name of the Operations resource to delete.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -12905,55 +18987,52 @@ }, "parameterOrder": [ "project", - "address" + "operation" ], - "response": { - "$ref": "Address" - }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "id": "compute.globalAddresses.insert", - "path": "{project}/global/addresses", - "httpMethod": "POST", - "description": "Creates an address resource in the specified project using the data included in the request.", + "get": { + "id": "compute.globalOperations.get", + "path": "{project}/global/operations/{operation}", + "httpMethod": "GET", + "description": "Retrieves the specified Operations resource. Get a list of operations by making a list() request.", "parameters": { + "operation": { + "type": "string", + "description": "Name of the Operations resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, "project": { "type": "string", "description": "Project ID for this request.", "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" - }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" } }, "parameterOrder": [ - "project" + "project", + "operation" ], - "request": { - "$ref": "Address" - }, "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, "list": { - "id": "compute.globalAddresses.list", - "path": "{project}/global/addresses", + "id": "compute.globalOperations.list", + "path": "{project}/global/operations", "httpMethod": "GET", - "description": "Retrieves a list of global addresses.", + "description": "Retrieves a list of Operation resources contained within the specified project.", "parameters": { "filter": { "type": "string", @@ -12990,80 +19069,7 @@ "project" ], "response": { - "$ref": "AddressList" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "setLabels": { - "id": "compute.globalAddresses.setLabels", - "path": "{project}/global/addresses/{resource}/setLabels", - "httpMethod": "POST", - "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "GlobalSetLabelsRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "testIamPermissions": { - "id": "compute.globalAddresses.testIamPermissions", - "path": "{project}/global/addresses/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "resource": { - "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "resource" - ], - "request": { - "$ref": "TestPermissionsRequest" - }, - "response": { - "$ref": "TestPermissionsResponse" + "$ref": "OperationList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -13073,17 +19079,17 @@ } } }, - "globalForwardingRules": { + "healthChecks": { "methods": { "delete": { - "id": "compute.globalForwardingRules.delete", - "path": "{project}/global/forwardingRules/{forwardingRule}", + "id": "compute.healthChecks.delete", + "path": "{project}/global/healthChecks/{healthCheck}", "httpMethod": "DELETE", - "description": "Deletes the specified GlobalForwardingRule resource.", + "description": "Deletes the specified HealthCheck resource.", "parameters": { - "forwardingRule": { + "healthCheck": { "type": "string", - "description": "Name of the ForwardingRule resource to delete.", + "description": "Name of the HealthCheck resource to delete.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -13103,7 +19109,7 @@ }, "parameterOrder": [ "project", - "forwardingRule" + "healthCheck" ], "response": { "$ref": "Operation" @@ -13114,14 +19120,14 @@ ] }, "get": { - "id": "compute.globalForwardingRules.get", - "path": "{project}/global/forwardingRules/{forwardingRule}", + "id": "compute.healthChecks.get", + "path": "{project}/global/healthChecks/{healthCheck}", "httpMethod": "GET", - "description": "Returns the specified GlobalForwardingRule resource. Get a list of available forwarding rules by making a list() request.", + "description": "Returns the specified HealthCheck resource. Get a list of available health checks by making a list() request.", "parameters": { - "forwardingRule": { + "healthCheck": { "type": "string", - "description": "Name of the ForwardingRule resource to return.", + "description": "Name of the HealthCheck resource to return.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -13136,10 +19142,10 @@ }, "parameterOrder": [ "project", - "forwardingRule" + "healthCheck" ], "response": { - "$ref": "ForwardingRule" + "$ref": "HealthCheck" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -13148,10 +19154,10 @@ ] }, "insert": { - "id": "compute.globalForwardingRules.insert", - "path": "{project}/global/forwardingRules", + "id": "compute.healthChecks.insert", + "path": "{project}/global/healthChecks", "httpMethod": "POST", - "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request.", + "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", "parameters": { "project": { "type": "string", @@ -13170,7 +19176,7 @@ "project" ], "request": { - "$ref": "ForwardingRule" + "$ref": "HealthCheck" }, "response": { "$ref": "Operation" @@ -13181,10 +19187,10 @@ ] }, "list": { - "id": "compute.globalForwardingRules.list", - "path": "{project}/global/forwardingRules", + "id": "compute.healthChecks.list", + "path": "{project}/global/healthChecks", "httpMethod": "GET", - "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project.", + "description": "Retrieves the list of HealthCheck resources available to the specified project.", "parameters": { "filter": { "type": "string", @@ -13221,7 +19227,7 @@ "project" ], "response": { - "$ref": "ForwardingRuleList" + "$ref": "HealthCheckList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -13229,11 +19235,52 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "setLabels": { - "id": "compute.globalForwardingRules.setLabels", - "path": "{project}/global/forwardingRules/{resource}/setLabels", + "patch": { + "id": "compute.healthChecks.patch", + "path": "{project}/global/healthChecks/{healthCheck}", + "httpMethod": "PATCH", + "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "parameters": { + "healthCheck": { + "type": "string", + "description": "Name of the HealthCheck resource to patch.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "healthCheck" + ], + "request": { + "$ref": "HealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "id": "compute.healthChecks.testIamPermissions", + "path": "{project}/global/healthChecks/{resource}/testIamPermissions", "httpMethod": "POST", - "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", + "description": "Returns permissions that a caller has on the specified resource.", "parameters": { "project": { "type": "string", @@ -13246,7 +19293,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -13255,25 +19302,26 @@ "resource" ], "request": { - "$ref": "GlobalSetLabelsRequest" + "$ref": "TestPermissionsRequest" }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "setTarget": { - "id": "compute.globalForwardingRules.setTarget", - "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget", - "httpMethod": "POST", - "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target.", + "update": { + "id": "compute.healthChecks.update", + "path": "{project}/global/healthChecks/{healthCheck}", + "httpMethod": "PUT", + "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", "parameters": { - "forwardingRule": { + "healthCheck": { "type": "string", - "description": "Name of the ForwardingRule resource in which target is to be set.", + "description": "Name of the HealthCheck resource to update.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -13293,10 +19341,10 @@ }, "parameterOrder": [ "project", - "forwardingRule" + "healthCheck" ], "request": { - "$ref": "TargetReference" + "$ref": "HealthCheck" }, "response": { "$ref": "Operation" @@ -13305,13 +19353,24 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - }, - "testIamPermissions": { - "id": "compute.globalForwardingRules.testIamPermissions", - "path": "{project}/global/forwardingRules/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", + } + } + }, + "httpHealthChecks": { + "methods": { + "delete": { + "id": "compute.httpHealthChecks.delete", + "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "httpMethod": "DELETE", + "description": "Deletes the specified HttpHealthCheck resource.", "parameters": { + "httpHealthCheck": { + "type": "string", + "description": "Name of the HttpHealthCheck resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, "project": { "type": "string", "description": "Project ID for this request.", @@ -13319,39 +19378,96 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "resource": { + "requestId": { "type": "string", - "description": "Name of the resource for this request.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "httpHealthCheck" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.httpHealthChecks.get", + "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "httpMethod": "GET", + "description": "Returns the specified HttpHealthCheck resource. Get a list of available HTTP health checks by making a list() request.", + "parameters": { + "httpHealthCheck": { + "type": "string", + "description": "Name of the HttpHealthCheck resource to return.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, "parameterOrder": [ "project", - "resource" + "httpHealthCheck" ], - "request": { - "$ref": "TestPermissionsRequest" - }, "response": { - "$ref": "TestPermissionsResponse" + "$ref": "HttpHealthCheck" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "globalOperations": { - "methods": { - "aggregatedList": { - "id": "compute.globalOperations.aggregatedList", - "path": "{project}/aggregated/operations", + }, + "insert": { + "id": "compute.httpHealthChecks.insert", + "path": "{project}/global/httpHealthChecks", + "httpMethod": "POST", + "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "HttpHealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.httpHealthChecks.list", + "path": "{project}/global/httpHealthChecks", "httpMethod": "GET", - "description": "Retrieves an aggregated list of all operations.", + "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", "parameters": { "filter": { "type": "string", @@ -13388,7 +19504,7 @@ "project" ], "response": { - "$ref": "OperationAggregatedList" + "$ref": "HttpHealthCheckList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -13396,15 +19512,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "delete": { - "id": "compute.globalOperations.delete", - "path": "{project}/global/operations/{operation}", - "httpMethod": "DELETE", - "description": "Deletes the specified Operations resource.", + "patch": { + "id": "compute.httpHealthChecks.patch", + "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "httpMethod": "PATCH", + "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "parameters": { - "operation": { + "httpHealthCheck": { "type": "string", - "description": "Name of the Operations resource to delete.", + "description": "Name of the HttpHealthCheck resource to patch.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -13415,44 +19531,58 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ "project", - "operation" + "httpHealthCheck" ], + "request": { + "$ref": "HttpHealthCheck" + }, + "response": { + "$ref": "Operation" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] }, - "get": { - "id": "compute.globalOperations.get", - "path": "{project}/global/operations/{operation}", - "httpMethod": "GET", - "description": "Retrieves the specified Operations resource. Get a list of operations by making a list() request.", + "testIamPermissions": { + "id": "compute.httpHealthChecks.testIamPermissions", + "path": "{project}/global/httpHealthChecks/{resource}/testIamPermissions", + "httpMethod": "POST", + "description": "Returns permissions that a caller has on the specified resource.", "parameters": { - "operation": { + "project": { "type": "string", - "description": "Name of the Operations resource to return.", + "description": "Project ID for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "project": { + "resource": { "type": "string", - "description": "Project ID for this request.", + "description": "Name of the resource for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", - "operation" + "resource" ], + "request": { + "$ref": "TestPermissionsRequest" + }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -13460,34 +19590,18 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "list": { - "id": "compute.globalOperations.list", - "path": "{project}/global/operations", - "httpMethod": "GET", - "description": "Retrieves a list of Operation resources contained within the specified project.", + "update": { + "id": "compute.httpHealthChecks.update", + "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "httpMethod": "PUT", + "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { + "httpHealthCheck": { "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" + "description": "Name of the HttpHealthCheck resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" }, "project": { "type": "string", @@ -13495,33 +19609,41 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ - "project" + "project", + "httpHealthCheck" ], + "request": { + "$ref": "HttpHealthCheck" + }, "response": { - "$ref": "OperationList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } }, - "healthChecks": { + "httpsHealthChecks": { "methods": { "delete": { - "id": "compute.healthChecks.delete", - "path": "{project}/global/healthChecks/{healthCheck}", + "id": "compute.httpsHealthChecks.delete", + "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", "httpMethod": "DELETE", - "description": "Deletes the specified HealthCheck resource.", + "description": "Deletes the specified HttpsHealthCheck resource.", "parameters": { - "healthCheck": { + "httpsHealthCheck": { "type": "string", - "description": "Name of the HealthCheck resource to delete.", + "description": "Name of the HttpsHealthCheck resource to delete.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -13541,7 +19663,7 @@ }, "parameterOrder": [ "project", - "healthCheck" + "httpsHealthCheck" ], "response": { "$ref": "Operation" @@ -13552,14 +19674,14 @@ ] }, "get": { - "id": "compute.healthChecks.get", - "path": "{project}/global/healthChecks/{healthCheck}", + "id": "compute.httpsHealthChecks.get", + "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", "httpMethod": "GET", - "description": "Returns the specified HealthCheck resource. Get a list of available health checks by making a list() request.", + "description": "Returns the specified HttpsHealthCheck resource. Get a list of available HTTPS health checks by making a list() request.", "parameters": { - "healthCheck": { + "httpsHealthCheck": { "type": "string", - "description": "Name of the HealthCheck resource to return.", + "description": "Name of the HttpsHealthCheck resource to return.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -13574,10 +19696,10 @@ }, "parameterOrder": [ "project", - "healthCheck" + "httpsHealthCheck" ], "response": { - "$ref": "HealthCheck" + "$ref": "HttpsHealthCheck" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -13586,10 +19708,10 @@ ] }, "insert": { - "id": "compute.healthChecks.insert", - "path": "{project}/global/healthChecks", + "id": "compute.httpsHealthChecks.insert", + "path": "{project}/global/httpsHealthChecks", "httpMethod": "POST", - "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", "parameters": { "project": { "type": "string", @@ -13608,7 +19730,7 @@ "project" ], "request": { - "$ref": "HealthCheck" + "$ref": "HttpsHealthCheck" }, "response": { "$ref": "Operation" @@ -13619,10 +19741,10 @@ ] }, "list": { - "id": "compute.healthChecks.list", - "path": "{project}/global/healthChecks", + "id": "compute.httpsHealthChecks.list", + "path": "{project}/global/httpsHealthChecks", "httpMethod": "GET", - "description": "Retrieves the list of HealthCheck resources available to the specified project.", + "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", "parameters": { "filter": { "type": "string", @@ -13659,7 +19781,7 @@ "project" ], "response": { - "$ref": "HealthCheckList" + "$ref": "HttpsHealthCheckList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -13668,14 +19790,14 @@ ] }, "patch": { - "id": "compute.healthChecks.patch", - "path": "{project}/global/healthChecks/{healthCheck}", + "id": "compute.httpsHealthChecks.patch", + "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", "httpMethod": "PATCH", - "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "parameters": { - "healthCheck": { + "httpsHealthCheck": { "type": "string", - "description": "Name of the HealthCheck resource to patch.", + "description": "Name of the HttpsHealthCheck resource to patch.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -13695,23 +19817,22 @@ }, "parameterOrder": [ "project", - "healthCheck" + "httpsHealthCheck" ], "request": { - "$ref": "HealthCheck" + "$ref": "HttpsHealthCheck" }, "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "testIamPermissions": { - "id": "compute.healthChecks.testIamPermissions", - "path": "{project}/global/healthChecks/{resource}/testIamPermissions", + "id": "compute.httpsHealthChecks.testIamPermissions", + "path": "{project}/global/httpsHealthChecks/{resource}/testIamPermissions", "httpMethod": "POST", "description": "Returns permissions that a caller has on the specified resource.", "parameters": { @@ -13726,7 +19847,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -13747,14 +19868,14 @@ ] }, "update": { - "id": "compute.healthChecks.update", - "path": "{project}/global/healthChecks/{healthCheck}", + "id": "compute.httpsHealthChecks.update", + "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", "httpMethod": "PUT", - "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", + "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", "parameters": { - "healthCheck": { + "httpsHealthCheck": { "type": "string", - "description": "Name of the HealthCheck resource to update.", + "description": "Name of the HttpsHealthCheck resource to update.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -13774,10 +19895,10 @@ }, "parameterOrder": [ "project", - "healthCheck" + "httpsHealthCheck" ], "request": { - "$ref": "HealthCheck" + "$ref": "HttpsHealthCheck" }, "response": { "$ref": "Operation" @@ -13789,17 +19910,17 @@ } } }, - "httpHealthChecks": { + "images": { "methods": { "delete": { - "id": "compute.httpHealthChecks.delete", - "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "id": "compute.images.delete", + "path": "{project}/global/images/{image}", "httpMethod": "DELETE", - "description": "Deletes the specified HttpHealthCheck resource.", + "description": "Deletes the specified image.", "parameters": { - "httpHealthCheck": { + "image": { "type": "string", - "description": "Name of the HttpHealthCheck resource to delete.", + "description": "Name of the image resource to delete.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -13819,8 +19940,49 @@ }, "parameterOrder": [ "project", - "httpHealthCheck" + "image" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "deprecate": { + "id": "compute.images.deprecate", + "path": "{project}/global/images/{image}/deprecate", + "httpMethod": "POST", + "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", + "parameters": { + "image": { + "type": "string", + "description": "Image name.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "image" ], + "request": { + "$ref": "DeprecationStatus" + }, "response": { "$ref": "Operation" }, @@ -13830,14 +19992,48 @@ ] }, "get": { - "id": "compute.httpHealthChecks.get", - "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "id": "compute.images.get", + "path": "{project}/global/images/{image}", "httpMethod": "GET", - "description": "Returns the specified HttpHealthCheck resource. Get a list of available HTTP health checks by making a list() request.", + "description": "Returns the specified image. Get a list of available images by making a list() request.", "parameters": { - "httpHealthCheck": { + "image": { + "type": "string", + "description": "Name of the image resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "image" + ], + "response": { + "$ref": "Image" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getFromFamily": { + "id": "compute.images.getFromFamily", + "path": "{project}/global/images/family/{family}", + "httpMethod": "GET", + "description": "Returns the latest image that is part of an image family and is not deprecated.", + "parameters": { + "family": { "type": "string", - "description": "Name of the HttpHealthCheck resource to return.", + "description": "Name of the image family to search for.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -13852,10 +20048,10 @@ }, "parameterOrder": [ "project", - "httpHealthCheck" + "family" ], "response": { - "$ref": "HttpHealthCheck" + "$ref": "Image" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -13864,11 +20060,16 @@ ] }, "insert": { - "id": "compute.httpHealthChecks.insert", - "path": "{project}/global/httpHealthChecks", + "id": "compute.images.insert", + "path": "{project}/global/images", "httpMethod": "POST", - "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", + "description": "Creates an image in the specified project using the data included in the request.", "parameters": { + "forceCreate": { + "type": "boolean", + "description": "Force image creation if true.", + "location": "query" + }, "project": { "type": "string", "description": "Project ID for this request.", @@ -13886,21 +20087,24 @@ "project" ], "request": { - "$ref": "HttpHealthCheck" + "$ref": "Image" }, "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" ] }, "list": { - "id": "compute.httpHealthChecks.list", - "path": "{project}/global/httpHealthChecks", + "id": "compute.images.list", + "path": "{project}/global/images", "httpMethod": "GET", - "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", + "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", "parameters": { "filter": { "type": "string", @@ -13937,7 +20141,7 @@ "project" ], "response": { - "$ref": "HttpHealthCheckList" + "$ref": "ImageList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -13945,19 +20149,12 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "id": "compute.httpHealthChecks.patch", - "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", - "httpMethod": "PATCH", - "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "setLabels": { + "id": "compute.images.setLabels", + "path": "{project}/global/images/{resource}/setLabels", + "httpMethod": "POST", + "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation.", "parameters": { - "httpHealthCheck": { - "type": "string", - "description": "Name of the HttpHealthCheck resource to patch.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, "project": { "type": "string", "description": "Project ID for this request.", @@ -13965,31 +20162,32 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "requestId": { + "resource": { "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "location": "path" } }, "parameterOrder": [ "project", - "httpHealthCheck" + "resource" ], "request": { - "$ref": "HttpHealthCheck" + "$ref": "GlobalSetLabelsRequest" }, "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "testIamPermissions": { - "id": "compute.httpHealthChecks.testIamPermissions", - "path": "{project}/global/httpHealthChecks/{resource}/testIamPermissions", + "id": "compute.images.testIamPermissions", + "path": "{project}/global/images/{resource}/testIamPermissions", "httpMethod": "POST", "description": "Returns permissions that a caller has on the specified resource.", "parameters": { @@ -14004,7 +20202,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -14023,18 +20221,21 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, - "update": { - "id": "compute.httpHealthChecks.update", - "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", - "httpMethod": "PUT", - "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", + } + } + }, + "instanceGroupManagers": { + "methods": { + "abandonInstances": { + "id": "compute.instanceGroupManagers.abandonInstances", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + "httpMethod": "POST", + "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", "parameters": { - "httpHealthCheck": { + "instanceGroupManager": { "type": "string", - "description": "Name of the HttpHealthCheck resource to update.", + "description": "The name of the managed instance group.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, "project": { @@ -14048,14 +20249,21 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" } }, "parameterOrder": [ "project", - "httpHealthCheck" + "zone", + "instanceGroupManager" ], "request": { - "$ref": "HttpHealthCheck" + "$ref": "InstanceGroupManagersAbandonInstancesRequest" }, "response": { "$ref": "Operation" @@ -14064,22 +20272,66 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "httpsHealthChecks": { - "methods": { + }, + "aggregatedList": { + "id": "compute.instanceGroupManagers.aggregatedList", + "path": "{project}/aggregated/instanceGroupManagers", + "httpMethod": "GET", + "description": "Retrieves the list of managed instance groups and groups them by zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "InstanceGroupManagerAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "delete": { - "id": "compute.httpsHealthChecks.delete", - "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + "id": "compute.instanceGroupManagers.delete", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", "httpMethod": "DELETE", - "description": "Deletes the specified HttpsHealthCheck resource.", + "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", "parameters": { - "httpsHealthCheck": { + "instanceGroupManager": { "type": "string", - "description": "Name of the HttpsHealthCheck resource to delete.", + "description": "The name of the managed instance group to delete.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, "project": { @@ -14093,12 +20345,66 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" } }, "parameterOrder": [ "project", - "httpsHealthCheck" + "zone", + "instanceGroupManager" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "deleteInstances": { + "id": "compute.instanceGroupManagers.deleteInstances", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + "httpMethod": "POST", + "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instanceGroupManager" ], + "request": { + "$ref": "InstanceGroupManagersDeleteInstancesRequest" + }, "response": { "$ref": "Operation" }, @@ -14108,16 +20414,15 @@ ] }, "get": { - "id": "compute.httpsHealthChecks.get", - "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + "id": "compute.instanceGroupManagers.get", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", "httpMethod": "GET", - "description": "Returns the specified HttpsHealthCheck resource. Get a list of available HTTPS health checks by making a list() request.", + "description": "Returns all of the details about the specified managed instance group. Get a list of available managed instance groups by making a list() request.", "parameters": { - "httpsHealthCheck": { + "instanceGroupManager": { "type": "string", - "description": "Name of the HttpsHealthCheck resource to return.", + "description": "The name of the managed instance group.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, "project": { @@ -14126,14 +20431,21 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" } }, "parameterOrder": [ "project", - "httpsHealthCheck" + "zone", + "instanceGroupManager" ], "response": { - "$ref": "HttpsHealthCheck" + "$ref": "InstanceGroupManager" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -14142,10 +20454,10 @@ ] }, "insert": { - "id": "compute.httpsHealthChecks.insert", - "path": "{project}/global/httpsHealthChecks", + "id": "compute.instanceGroupManagers.insert", + "path": "{project}/zones/{zone}/instanceGroupManagers", "httpMethod": "POST", - "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", + "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit.", "parameters": { "project": { "type": "string", @@ -14158,13 +20470,20 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone where you want to create the managed instance group.", + "required": true, + "location": "path" } }, "parameterOrder": [ - "project" + "project", + "zone" ], "request": { - "$ref": "HttpsHealthCheck" + "$ref": "InstanceGroupManager" }, "response": { "$ref": "Operation" @@ -14175,10 +20494,10 @@ ] }, "list": { - "id": "compute.httpsHealthChecks.list", - "path": "{project}/global/httpsHealthChecks", + "id": "compute.instanceGroupManagers.list", + "path": "{project}/zones/{zone}/instanceGroupManagers", "httpMethod": "GET", - "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", + "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", "parameters": { "filter": { "type": "string", @@ -14209,13 +20528,20 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" } }, "parameterOrder": [ - "project" + "project", + "zone" ], "response": { - "$ref": "HttpsHealthCheckList" + "$ref": "InstanceGroupManagerList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -14223,19 +20549,37 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "patch": { - "id": "compute.httpsHealthChecks.patch", - "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", - "httpMethod": "PATCH", - "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "listManagedInstances": { + "id": "compute.instanceGroupManagers.listManagedInstances", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + "httpMethod": "POST", + "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", "parameters": { - "httpsHealthCheck": { + "filter": { "type": "string", - "description": "Name of the HttpsHealthCheck resource to patch.", + "location": "query" + }, + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, + "maxResults": { + "type": "integer", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "order_by": { + "type": "string", + "location": "query" + }, + "pageToken": { + "type": "string", + "location": "query" + }, "project": { "type": "string", "description": "Project ID for this request.", @@ -14243,21 +20587,20 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "requestId": { + "zone": { "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" } }, "parameterOrder": [ "project", - "httpsHealthCheck" + "zone", + "instanceGroupManager" ], - "request": { - "$ref": "HttpsHealthCheck" - }, "response": { - "$ref": "Operation" + "$ref": "InstanceGroupManagersListManagedInstancesResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -14265,12 +20608,18 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "testIamPermissions": { - "id": "compute.httpsHealthChecks.testIamPermissions", - "path": "{project}/global/httpsHealthChecks/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", + "patch": { + "id": "compute.instanceGroupManagers.patch", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + "httpMethod": "PATCH", + "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the instance group manager.", + "required": true, + "location": "path" + }, "project": { "type": "string", "description": "Project ID for this request.", @@ -14278,41 +20627,44 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "resource": { + "requestId": { "type": "string", - "description": "Name of the resource for this request.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone where you want to create the managed instance group.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", - "resource" + "zone", + "instanceGroupManager" ], "request": { - "$ref": "TestPermissionsRequest" + "$ref": "InstanceGroupManager" }, "response": { - "$ref": "TestPermissionsResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "update": { - "id": "compute.httpsHealthChecks.update", - "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", - "httpMethod": "PUT", - "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", + "recreateInstances": { + "id": "compute.instanceGroupManagers.recreateInstances", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + "httpMethod": "POST", + "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", "parameters": { - "httpsHealthCheck": { + "instanceGroupManager": { "type": "string", - "description": "Name of the HttpsHealthCheck resource to update.", + "description": "The name of the managed instance group.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, "project": { @@ -14326,14 +20678,21 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" } }, "parameterOrder": [ "project", - "httpsHealthCheck" + "zone", + "instanceGroupManager" ], "request": { - "$ref": "HttpsHealthCheck" + "$ref": "InstanceGroupManagersRecreateInstancesRequest" }, "response": { "$ref": "Operation" @@ -14342,22 +20701,17 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] - } - } - }, - "images": { - "methods": { - "delete": { - "id": "compute.images.delete", - "path": "{project}/global/images/{image}", - "httpMethod": "DELETE", - "description": "Deletes the specified image.", + }, + "resize": { + "id": "compute.instanceGroupManagers.resize", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + "httpMethod": "POST", + "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", "parameters": { - "image": { + "instanceGroupManager": { "type": "string", - "description": "Name of the image resource to delete.", + "description": "The name of the managed instance group.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, "project": { @@ -14371,11 +20725,26 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" + }, + "size": { + "type": "integer", + "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", + "required": true, + "format": "int32", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" } }, "parameterOrder": [ "project", - "image" + "zone", + "instanceGroupManager", + "size" ], "response": { "$ref": "Operation" @@ -14385,17 +20754,16 @@ "https://www.googleapis.com/auth/compute" ] }, - "deprecate": { - "id": "compute.images.deprecate", - "path": "{project}/global/images/{image}/deprecate", + "resizeAdvanced": { + "id": "compute.instanceGroupManagers.resizeAdvanced", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", "httpMethod": "POST", - "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", + "description": "Resizes the managed instance group with advanced configuration options like disabling creation retries. This is an extended version of the resize method.\n\nIf you increase the size of the instance group, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating, creatingWithoutRetries, or deleting actions with the get or listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", "parameters": { - "image": { + "instanceGroupManager": { "type": "string", - "description": "Image name.", + "description": "The name of the managed instance group.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, "project": { @@ -14407,16 +20775,23 @@ }, "requestId": { "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and then the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" } }, "parameterOrder": [ "project", - "image" + "zone", + "instanceGroupManager" ], "request": { - "$ref": "DeprecationStatus" + "$ref": "InstanceGroupManagersResizeAdvancedRequest" }, "response": { "$ref": "Operation" @@ -14426,17 +20801,16 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "id": "compute.images.get", - "path": "{project}/global/images/{image}", - "httpMethod": "GET", - "description": "Returns the specified image. Get a list of available images by making a list() request.", + "setAutoHealingPolicies": { + "id": "compute.instanceGroupManagers.setAutoHealingPolicies", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + "httpMethod": "POST", + "description": "Modifies the autohealing policies.", "parameters": { - "image": { + "instanceGroupManager": { "type": "string", - "description": "Name of the image resource to return.", + "description": "The name of the instance group manager.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, "project": { @@ -14445,65 +20819,46 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" - } - }, - "parameterOrder": [ - "project", - "image" - ], - "response": { - "$ref": "Image" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "getFromFamily": { - "id": "compute.images.getFromFamily", - "path": "{project}/global/images/family/{family}", - "httpMethod": "GET", - "description": "Returns the latest image that is part of an image family and is not deprecated.", - "parameters": { - "family": { + }, + "requestId": { "type": "string", - "description": "Name of the image family to search for.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" }, - "project": { + "zone": { "type": "string", - "description": "Project ID for this request.", + "description": "The name of the zone where the managed instance group is located.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, "parameterOrder": [ "project", - "family" + "zone", + "instanceGroupManager" ], + "request": { + "$ref": "InstanceGroupManagersSetAutoHealingRequest" + }, "response": { - "$ref": "Image" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "id": "compute.images.insert", - "path": "{project}/global/images", + "setInstanceTemplate": { + "id": "compute.instanceGroupManagers.setInstanceTemplate", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", "httpMethod": "POST", - "description": "Creates an image in the specified project using the data included in the request.", + "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", "parameters": { - "forceCreate": { - "type": "boolean", - "description": "Force image creation if true.", - "location": "query" + "instanceGroupManager": { + "type": "string", + "description": "The name of the managed instance group.", + "required": true, + "location": "path" }, "project": { "type": "string", @@ -14516,79 +20871,82 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone where the managed instance group is located.", + "required": true, + "location": "path" } }, "parameterOrder": [ - "project" + "project", + "zone", + "instanceGroupManager" ], "request": { - "$ref": "Image" + "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" }, "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" + "https://www.googleapis.com/auth/compute" ] }, - "list": { - "id": "compute.images.list", - "path": "{project}/global/images", - "httpMethod": "GET", - "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + "setTargetPools": { + "id": "compute.instanceGroupManagers.setTargetPools", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + "httpMethod": "POST", + "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", "parameters": { - "filter": { + "instanceGroupManager": { "type": "string", - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" + "description": "The name of the managed instance group.", + "required": true, + "location": "path" }, - "orderBy": { + "project": { "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" }, - "pageToken": { + "requestId": { "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" }, - "project": { + "zone": { "type": "string", - "description": "Project ID for this request.", + "description": "The name of the zone where the managed instance group is located.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, "parameterOrder": [ - "project" + "project", + "zone", + "instanceGroupManager" ], + "request": { + "$ref": "InstanceGroupManagersSetTargetPoolsRequest" + }, "response": { - "$ref": "ImageList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "setLabels": { - "id": "compute.images.setLabels", - "path": "{project}/global/images/{resource}/setLabels", + "testIamPermissions": { + "id": "compute.instanceGroupManagers.testIamPermissions", + "path": "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions", "httpMethod": "POST", - "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation.", + "description": "Returns permissions that a caller has on the specified resource.", "parameters": { "project": { "type": "string", @@ -14601,31 +20959,46 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", + "zone", "resource" ], "request": { - "$ref": "GlobalSetLabelsRequest" + "$ref": "TestPermissionsRequest" }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "testIamPermissions": { - "id": "compute.images.testIamPermissions", - "path": "{project}/global/images/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", + "update": { + "id": "compute.instanceGroupManagers.update", + "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + "httpMethod": "PUT", + "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listManagedInstances method.", "parameters": { + "instanceGroupManager": { + "type": "string", + "description": "The name of the instance group manager.", + "required": true, + "location": "path" + }, "project": { "type": "string", "description": "Project ID for this request.", @@ -14633,43 +21006,47 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "resource": { + "requestId": { "type": "string", - "description": "Name of the resource for this request.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone where you want to create the managed instance group.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", - "resource" + "zone", + "instanceGroupManager" ], "request": { - "$ref": "TestPermissionsRequest" + "$ref": "InstanceGroupManager" }, "response": { - "$ref": "TestPermissionsResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] } } }, - "instanceGroupManagers": { + "instanceGroups": { "methods": { - "abandonInstances": { - "id": "compute.instanceGroupManagers.abandonInstances", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + "addInstances": { + "id": "compute.instanceGroups.addInstances", + "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", "httpMethod": "POST", - "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", "parameters": { - "instanceGroupManager": { + "instanceGroup": { "type": "string", - "description": "The name of the managed instance group.", + "description": "The name of the instance group where you are adding instances.", "required": true, "location": "path" }, @@ -14687,7 +21064,7 @@ }, "zone": { "type": "string", - "description": "The name of the zone where the managed instance group is located.", + "description": "The name of the zone where the instance group is located.", "required": true, "location": "path" } @@ -14695,10 +21072,10 @@ "parameterOrder": [ "project", "zone", - "instanceGroupManager" + "instanceGroup" ], "request": { - "$ref": "InstanceGroupManagersAbandonInstancesRequest" + "$ref": "InstanceGroupsAddInstancesRequest" }, "response": { "$ref": "Operation" @@ -14709,10 +21086,10 @@ ] }, "aggregatedList": { - "id": "compute.instanceGroupManagers.aggregatedList", - "path": "{project}/aggregated/instanceGroupManagers", + "id": "compute.instanceGroups.aggregatedList", + "path": "{project}/aggregated/instanceGroups", "httpMethod": "GET", - "description": "Retrieves the list of managed instance groups and groups them by zone.", + "description": "Retrieves the list of instance groups and sorts them by zone.", "parameters": { "filter": { "type": "string", @@ -14749,7 +21126,7 @@ "project" ], "response": { - "$ref": "InstanceGroupManagerAggregatedList" + "$ref": "InstanceGroupAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -14758,58 +21135,14 @@ ] }, "delete": { - "id": "compute.instanceGroupManagers.delete", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + "id": "compute.instanceGroups.delete", + "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", "httpMethod": "DELETE", - "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the managed instance group to delete.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the managed instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroupManager" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "deleteInstances": { - "id": "compute.instanceGroupManagers.deleteInstances", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", - "httpMethod": "POST", - "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", "parameters": { - "instanceGroupManager": { + "instanceGroup": { "type": "string", - "description": "The name of the managed instance group.", + "description": "The name of the instance group to delete.", "required": true, "location": "path" }, @@ -14827,7 +21160,7 @@ }, "zone": { "type": "string", - "description": "The name of the zone where the managed instance group is located.", + "description": "The name of the zone where the instance group is located.", "required": true, "location": "path" } @@ -14835,11 +21168,8 @@ "parameterOrder": [ "project", "zone", - "instanceGroupManager" + "instanceGroup" ], - "request": { - "$ref": "InstanceGroupManagersDeleteInstancesRequest" - }, "response": { "$ref": "Operation" }, @@ -14849,14 +21179,14 @@ ] }, "get": { - "id": "compute.instanceGroupManagers.get", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + "id": "compute.instanceGroups.get", + "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", "httpMethod": "GET", - "description": "Returns all of the details about the specified managed instance group. Get a list of available managed instance groups by making a list() request.", + "description": "Returns the specified instance group. Get a list of available instance groups by making a list() request.", "parameters": { - "instanceGroupManager": { + "instanceGroup": { "type": "string", - "description": "The name of the managed instance group.", + "description": "The name of the instance group.", "required": true, "location": "path" }, @@ -14869,7 +21199,7 @@ }, "zone": { "type": "string", - "description": "The name of the zone where the managed instance group is located.", + "description": "The name of the zone where the instance group is located.", "required": true, "location": "path" } @@ -14877,10 +21207,10 @@ "parameterOrder": [ "project", "zone", - "instanceGroupManager" + "instanceGroup" ], "response": { - "$ref": "InstanceGroupManager" + "$ref": "InstanceGroup" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -14889,10 +21219,10 @@ ] }, "insert": { - "id": "compute.instanceGroupManagers.insert", - "path": "{project}/zones/{zone}/instanceGroupManagers", + "id": "compute.instanceGroups.insert", + "path": "{project}/zones/{zone}/instanceGroups", "httpMethod": "POST", - "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit.", + "description": "Creates an instance group in the specified project using the parameters that are included in the request.", "parameters": { "project": { "type": "string", @@ -14908,7 +21238,7 @@ }, "zone": { "type": "string", - "description": "The name of the zone where you want to create the managed instance group.", + "description": "The name of the zone where you want to create the instance group.", "required": true, "location": "path" } @@ -14918,7 +21248,7 @@ "zone" ], "request": { - "$ref": "InstanceGroupManager" + "$ref": "InstanceGroup" }, "response": { "$ref": "Operation" @@ -14929,10 +21259,10 @@ ] }, "list": { - "id": "compute.instanceGroupManagers.list", - "path": "{project}/zones/{zone}/instanceGroupManagers", + "id": "compute.instanceGroups.list", + "path": "{project}/zones/{zone}/instanceGroups", "httpMethod": "GET", - "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", + "description": "Retrieves the list of instance groups that are located in the specified project and zone.", "parameters": { "filter": { "type": "string", @@ -14966,7 +21296,7 @@ }, "zone": { "type": "string", - "description": "The name of the zone where the managed instance group is located.", + "description": "The name of the zone where the instance group is located.", "required": true, "location": "path" } @@ -14976,7 +21306,7 @@ "zone" ], "response": { - "$ref": "InstanceGroupManagerList" + "$ref": "InstanceGroupList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -14984,35 +21314,39 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "listManagedInstances": { - "id": "compute.instanceGroupManagers.listManagedInstances", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + "listInstances": { + "id": "compute.instanceGroups.listInstances", + "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", "httpMethod": "POST", - "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", + "description": "Lists the instances in the specified instance group.", "parameters": { "filter": { "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, - "instanceGroupManager": { + "instanceGroup": { "type": "string", - "description": "The name of the managed instance group.", + "description": "The name of the instance group from which you want to generate a list of included instances.", "required": true, "location": "path" }, "maxResults": { "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", "default": "500", "format": "uint32", "minimum": "0", "location": "query" }, - "order_by": { + "orderBy": { "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", "location": "query" }, "pageToken": { "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", "location": "query" }, "project": { @@ -15024,52 +21358,7 @@ }, "zone": { "type": "string", - "description": "The name of the zone where the managed instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroupManager" - ], - "response": { - "$ref": "InstanceGroupManagersListManagedInstancesResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "patch": { - "id": "compute.instanceGroupManagers.patch", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", - "httpMethod": "PATCH", - "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the instance group manager.", - "required": true, - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" - }, - "zone": { - "type": "string", - "description": "The name of the zone where you want to create the managed instance group.", + "description": "The name of the zone where the instance group is located.", "required": true, "location": "path" } @@ -15077,13 +21366,13 @@ "parameterOrder": [ "project", "zone", - "instanceGroupManager" + "instanceGroup" ], "request": { - "$ref": "InstanceGroupManager" + "$ref": "InstanceGroupsListInstancesRequest" }, "response": { - "$ref": "Operation" + "$ref": "InstanceGroupsListInstances" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15091,15 +21380,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "recreateInstances": { - "id": "compute.instanceGroupManagers.recreateInstances", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + "removeInstances": { + "id": "compute.instanceGroups.removeInstances", + "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", "httpMethod": "POST", - "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted.", "parameters": { - "instanceGroupManager": { + "instanceGroup": { "type": "string", - "description": "The name of the managed instance group.", + "description": "The name of the instance group where the specified instances will be removed.", "required": true, "location": "path" }, @@ -15117,7 +21406,7 @@ }, "zone": { "type": "string", - "description": "The name of the zone where the managed instance group is located.", + "description": "The name of the zone where the instance group is located.", "required": true, "location": "path" } @@ -15125,10 +21414,10 @@ "parameterOrder": [ "project", "zone", - "instanceGroupManager" + "instanceGroup" ], "request": { - "$ref": "InstanceGroupManagersRecreateInstancesRequest" + "$ref": "InstanceGroupsRemoveInstancesRequest" }, "response": { "$ref": "Operation" @@ -15138,15 +21427,15 @@ "https://www.googleapis.com/auth/compute" ] }, - "resize": { - "id": "compute.instanceGroupManagers.resize", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + "setNamedPorts": { + "id": "compute.instanceGroups.setNamedPorts", + "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", "httpMethod": "POST", - "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + "description": "Sets the named ports for the specified instance group.", "parameters": { - "instanceGroupManager": { + "instanceGroup": { "type": "string", - "description": "The name of the managed instance group.", + "description": "The name of the instance group where the named ports are updated.", "required": true, "location": "path" }, @@ -15162,16 +21451,9 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" }, - "size": { - "type": "integer", - "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", - "required": true, - "format": "int32", - "location": "query" - }, "zone": { "type": "string", - "description": "The name of the zone where the managed instance group is located.", + "description": "The name of the zone where the instance group is located.", "required": true, "location": "path" } @@ -15179,9 +21461,11 @@ "parameterOrder": [ "project", "zone", - "instanceGroupManager", - "size" + "instanceGroup" ], + "request": { + "$ref": "InstanceGroupsSetNamedPortsRequest" + }, "response": { "$ref": "Operation" }, @@ -15190,18 +21474,12 @@ "https://www.googleapis.com/auth/compute" ] }, - "resizeAdvanced": { - "id": "compute.instanceGroupManagers.resizeAdvanced", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", + "testIamPermissions": { + "id": "compute.instanceGroups.testIamPermissions", + "path": "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions", "httpMethod": "POST", - "description": "Resizes the managed instance group with advanced configuration options like disabling creation retries. This is an extended version of the resize method.\n\nIf you increase the size of the instance group, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating, creatingWithoutRetries, or deleting actions with the get or listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + "description": "Returns permissions that a caller has on the specified resource.", "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the managed instance group.", - "required": true, - "location": "path" - }, "project": { "type": "string", "description": "Project ID for this request.", @@ -15209,44 +21487,53 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "requestId": { + "resource": { "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" }, "zone": { "type": "string", - "description": "The name of the zone where the managed instance group is located.", + "description": "The name of the zone for this request.", "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", "zone", - "instanceGroupManager" + "resource" ], "request": { - "$ref": "InstanceGroupManagersResizeAdvancedRequest" + "$ref": "TestPermissionsRequest" }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - }, - "setAutoHealingPolicies": { - "id": "compute.instanceGroupManagers.setAutoHealingPolicies", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", - "httpMethod": "POST", - "description": "Modifies the autohealing policies.", + } + } + }, + "instanceTemplates": { + "methods": { + "delete": { + "id": "compute.instanceTemplates.delete", + "path": "{project}/global/instanceTemplates/{instanceTemplate}", + "httpMethod": "DELETE", + "description": "Deletes the specified instance template. If you delete an instance template that is being referenced from another instance group, the instance group will not be able to create or recreate virtual machine instances. Deleting an instance template is permanent and cannot be undone.", "parameters": { - "instanceGroupManager": { + "instanceTemplate": { "type": "string", - "description": "The name of the instance group manager.", + "description": "The name of the instance template to delete.", "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, "project": { @@ -15260,22 +21547,12 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the managed instance group is located.", - "required": true, - "location": "path" } }, "parameterOrder": [ "project", - "zone", - "instanceGroupManager" + "instanceTemplate" ], - "request": { - "$ref": "InstanceGroupManagersSetAutoHealingRequest" - }, "response": { "$ref": "Operation" }, @@ -15284,16 +21561,17 @@ "https://www.googleapis.com/auth/compute" ] }, - "setInstanceTemplate": { - "id": "compute.instanceGroupManagers.setInstanceTemplate", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", - "httpMethod": "POST", - "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", + "get": { + "id": "compute.instanceTemplates.get", + "path": "{project}/global/instanceTemplates/{instanceTemplate}", + "httpMethod": "GET", + "description": "Returns the specified instance template. Get a list of available instance templates by making a list() request.", "parameters": { - "instanceGroupManager": { + "instanceTemplate": { "type": "string", - "description": "The name of the managed instance group.", + "description": "The name of the instance template.", "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, "project": { @@ -15302,47 +21580,27 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" - }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the managed instance group is located.", - "required": true, - "location": "path" } }, "parameterOrder": [ "project", - "zone", - "instanceGroupManager" + "instanceTemplate" ], - "request": { - "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" - }, "response": { - "$ref": "Operation" + "$ref": "InstanceTemplate" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "setTargetPools": { - "id": "compute.instanceGroupManagers.setTargetPools", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", + "insert": { + "id": "compute.instanceTemplates.insert", + "path": "{project}/global/instanceTemplates", "httpMethod": "POST", - "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", - "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the managed instance group.", - "required": true, - "location": "path" - }, + "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", + "parameters": { "project": { "type": "string", "description": "Project ID for this request.", @@ -15354,21 +21612,13 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" - }, - "zone": { - "type": "string", - "description": "The name of the zone where the managed instance group is located.", - "required": true, - "location": "path" } }, "parameterOrder": [ - "project", - "zone", - "instanceGroupManager" + "project" ], "request": { - "$ref": "InstanceGroupManagersSetTargetPoolsRequest" + "$ref": "InstanceTemplate" }, "response": { "$ref": "Operation" @@ -15378,44 +21628,48 @@ "https://www.googleapis.com/auth/compute" ] }, - "testIamPermissions": { - "id": "compute.instanceGroupManagers.testIamPermissions", - "path": "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions", - "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", + "list": { + "id": "compute.instanceTemplates.list", + "path": "{project}/global/instanceTemplates", + "httpMethod": "GET", + "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", "parameters": { - "project": { + "filter": { "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" }, - "resource": { + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" }, - "zone": { + "pageToken": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, "parameterOrder": [ - "project", - "zone", - "resource" + "project" ], - "request": { - "$ref": "TestPermissionsRequest" - }, "response": { - "$ref": "TestPermissionsResponse" + "$ref": "InstanceTemplateList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15423,18 +21677,12 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "update": { - "id": "compute.instanceGroupManagers.update", - "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", - "httpMethod": "PUT", - "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listManagedInstances method.", + "testIamPermissions": { + "id": "compute.instanceTemplates.testIamPermissions", + "path": "{project}/global/instanceTemplates/{resource}/testIamPermissions", + "httpMethod": "POST", + "description": "Returns permissions that a caller has on the specified resource.", "parameters": { - "instanceGroupManager": { - "type": "string", - "description": "The name of the instance group manager.", - "required": true, - "location": "path" - }, "project": { "type": "string", "description": "Project ID for this request.", @@ -15442,50 +21690,53 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" - }, - "zone": { + "resource": { "type": "string", - "description": "The name of the zone where you want to create the managed instance group.", + "description": "Name of the resource for this request.", "required": true, + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", - "zone", - "instanceGroupManager" + "resource" ], "request": { - "$ref": "InstanceGroupManager" + "$ref": "TestPermissionsRequest" }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] } } }, - "instanceGroups": { + "instances": { "methods": { - "addInstances": { - "id": "compute.instanceGroups.addInstances", - "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", + "addAccessConfig": { + "id": "compute.instances.addAccessConfig", + "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", "httpMethod": "POST", - "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", + "description": "Adds an access config to an instance's network interface.", "parameters": { - "instanceGroup": { + "instance": { "type": "string", - "description": "The name of the instance group where you are adding instances.", + "description": "The instance name for this request.", "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, + "networkInterface": { + "type": "string", + "description": "The name of the network interface to add to this instance.", + "required": true, + "location": "query" + }, "project": { "type": "string", "description": "Project ID for this request.", @@ -15500,18 +21751,20 @@ }, "zone": { "type": "string", - "description": "The name of the zone where the instance group is located.", + "description": "The name of the zone for this request.", "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", "zone", - "instanceGroup" + "instance", + "networkInterface" ], "request": { - "$ref": "InstanceGroupsAddInstancesRequest" + "$ref": "AccessConfig" }, "response": { "$ref": "Operation" @@ -15522,10 +21775,10 @@ ] }, "aggregatedList": { - "id": "compute.instanceGroups.aggregatedList", - "path": "{project}/aggregated/instanceGroups", + "id": "compute.instances.aggregatedList", + "path": "{project}/aggregated/instances", "httpMethod": "GET", - "description": "Retrieves the list of instance groups and sorts them by zone.", + "description": "Retrieves aggregated list of instances.", "parameters": { "filter": { "type": "string", @@ -15562,7 +21815,7 @@ "project" ], "response": { - "$ref": "InstanceGroupAggregatedList" + "$ref": "InstanceAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -15570,16 +21823,17 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "delete": { - "id": "compute.instanceGroups.delete", - "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", - "httpMethod": "DELETE", - "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", + "attachDisk": { + "id": "compute.instances.attachDisk", + "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", + "httpMethod": "POST", + "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance.", "parameters": { - "instanceGroup": { + "instance": { "type": "string", - "description": "The name of the instance group to delete.", + "description": "The instance name for this request.", "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, "project": { @@ -15596,16 +21850,20 @@ }, "zone": { "type": "string", - "description": "The name of the zone where the instance group is located.", + "description": "The name of the zone for this request.", "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", "zone", - "instanceGroup" + "instance" ], + "request": { + "$ref": "AttachedDisk" + }, "response": { "$ref": "Operation" }, @@ -15614,52 +21872,19 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "id": "compute.instanceGroups.get", - "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", - "httpMethod": "GET", - "description": "Returns the specified instance group. Get a list of available instance groups by making a list() request.", + "delete": { + "id": "compute.instances.delete", + "path": "{project}/zones/{zone}/instances/{instance}", + "httpMethod": "DELETE", + "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", "parameters": { - "instanceGroup": { - "type": "string", - "description": "The name of the instance group.", - "required": true, - "location": "path" - }, - "project": { + "instance": { "type": "string", - "description": "Project ID for this request.", + "description": "Name of the instance resource to delete.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, - "zone": { - "type": "string", - "description": "The name of the zone where the instance group is located.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "project", - "zone", - "instanceGroup" - ], - "response": { - "$ref": "InstanceGroup" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "insert": { - "id": "compute.instanceGroups.insert", - "path": "{project}/zones/{zone}/instanceGroups", - "httpMethod": "POST", - "description": "Creates an instance group in the specified project using the parameters that are included in the request.", - "parameters": { "project": { "type": "string", "description": "Project ID for this request.", @@ -15674,18 +21899,17 @@ }, "zone": { "type": "string", - "description": "The name of the zone where you want to create the instance group.", + "description": "The name of the zone for this request.", "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", - "zone" + "zone", + "instance" ], - "request": { - "$ref": "InstanceGroup" - }, "response": { "$ref": "Operation" }, @@ -15694,33 +21918,29 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "id": "compute.instanceGroups.list", - "path": "{project}/zones/{zone}/instanceGroups", - "httpMethod": "GET", - "description": "Retrieves the list of instance groups that are located in the specified project and zone.", + "deleteAccessConfig": { + "id": "compute.instances.deleteAccessConfig", + "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + "httpMethod": "POST", + "description": "Deletes an access config from an instance's network interface.", "parameters": { - "filter": { + "accessConfig": { "type": "string", - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", + "description": "The name of the access config to delete.", + "required": true, "location": "query" }, - "orderBy": { + "instance": { "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" + "description": "The instance name for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" }, - "pageToken": { + "networkInterface": { "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "The name of the network interface.", + "required": true, "location": "query" }, "project": { @@ -15730,61 +21950,53 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", - "description": "The name of the zone where the instance group is located.", + "description": "The name of the zone for this request.", "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", - "zone" + "zone", + "instance", + "accessConfig", + "networkInterface" ], "response": { - "$ref": "InstanceGroupList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "listInstances": { - "id": "compute.instanceGroups.listInstances", - "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", + "detachDisk": { + "id": "compute.instances.detachDisk", + "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", "httpMethod": "POST", - "description": "Lists the instances in the specified instance group.", + "description": "Detaches a disk from an instance.", "parameters": { - "filter": { + "deviceName": { "type": "string", - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Disk device name to detach.", + "required": true, "location": "query" }, - "instanceGroup": { + "instance": { "type": "string", - "description": "The name of the instance group from which you want to generate a list of included instances.", + "description": "Instance name.", "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, "project": { "type": "string", "description": "Project ID for this request.", @@ -15792,40 +22004,44 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", - "description": "The name of the zone where the instance group is located.", + "description": "The name of the zone for this request.", "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", "zone", - "instanceGroup" + "instance", + "deviceName" ], - "request": { - "$ref": "InstanceGroupsListInstancesRequest" - }, "response": { - "$ref": "InstanceGroupsListInstances" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "removeInstances": { - "id": "compute.instanceGroups.removeInstances", - "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", - "httpMethod": "POST", - "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted.", + "get": { + "id": "compute.instances.get", + "path": "{project}/zones/{zone}/instances/{instance}", + "httpMethod": "GET", + "description": "Returns the specified Instance resource. Get a list of available instances by making a list() request.", "parameters": { - "instanceGroup": { + "instance": { "type": "string", - "description": "The name of the instance group where the specified instances will be removed.", + "description": "Name of the instance resource to return.", "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, "project": { @@ -15835,46 +22051,50 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" - }, "zone": { "type": "string", - "description": "The name of the zone where the instance group is located.", + "description": "The name of the zone for this request.", "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", "zone", - "instanceGroup" + "instance" ], - "request": { - "$ref": "InstanceGroupsRemoveInstancesRequest" - }, "response": { - "$ref": "Operation" + "$ref": "Instance" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "setNamedPorts": { - "id": "compute.instanceGroups.setNamedPorts", - "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", - "httpMethod": "POST", - "description": "Sets the named ports for the specified instance group.", + "getSerialPortOutput": { + "id": "compute.instances.getSerialPortOutput", + "path": "{project}/zones/{zone}/instances/{instance}/serialPort", + "httpMethod": "GET", + "description": "Returns the specified instance's serial port output.", "parameters": { - "instanceGroup": { + "instance": { "type": "string", - "description": "The name of the instance group where the named ports are updated.", + "description": "Name of the instance scoping this request.", "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, + "port": { + "type": "integer", + "description": "Specifies which COM or serial port to retrieve data from.", + "default": "1", + "format": "int32", + "minimum": "1", + "maximum": "4", + "location": "query" + }, "project": { "type": "string", "description": "Project ID for this request.", @@ -15882,39 +22102,39 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "requestId": { + "start": { "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", + "format": "int64", "location": "query" }, "zone": { "type": "string", - "description": "The name of the zone where the instance group is located.", + "description": "The name of the zone for this request.", "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", "zone", - "instanceGroup" + "instance" ], - "request": { - "$ref": "InstanceGroupsSetNamedPortsRequest" - }, "response": { - "$ref": "Operation" + "$ref": "SerialPortOutput" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "testIamPermissions": { - "id": "compute.instanceGroups.testIamPermissions", - "path": "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions", + "insert": { + "id": "compute.instances.insert", + "path": "{project}/zones/{zone}/instances", "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", + "description": "Creates an instance resource in the specified project using the data included in the request.", "parameters": { "project": { "type": "string", @@ -15923,12 +22143,10 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "resource": { + "requestId": { "type": "string", - "description": "Name of the resource for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" }, "zone": { "type": "string", @@ -15940,37 +22158,47 @@ }, "parameterOrder": [ "project", - "zone", - "resource" + "zone" ], "request": { - "$ref": "TestPermissionsRequest" + "$ref": "Instance" }, "response": { - "$ref": "TestPermissionsResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] - } - } - }, - "instanceTemplates": { - "methods": { - "delete": { - "id": "compute.instanceTemplates.delete", - "path": "{project}/global/instanceTemplates/{instanceTemplate}", - "httpMethod": "DELETE", - "description": "Deletes the specified instance template. If you delete an instance template that is being referenced from another instance group, the instance group will not be able to create or recreate virtual machine instances. Deleting an instance template is permanent and cannot be undone.", + }, + "list": { + "id": "compute.instances.list", + "path": "{project}/zones/{zone}/instances", + "httpMethod": "GET", + "description": "Retrieves the list of instances contained within the specified zone.", "parameters": { - "instanceTemplate": { + "filter": { "type": "string", - "description": "The name of the instance template to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" }, "project": { "type": "string", @@ -15979,36 +22207,62 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "requestId": { + "zone": { "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" } }, "parameterOrder": [ "project", - "instanceTemplate" + "zone" ], "response": { - "$ref": "Operation" + "$ref": "InstanceList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "get": { - "id": "compute.instanceTemplates.get", - "path": "{project}/global/instanceTemplates/{instanceTemplate}", + "listReferrers": { + "id": "compute.instances.listReferrers", + "path": "{project}/zones/{zone}/instances/{instance}/referrers", "httpMethod": "GET", - "description": "Returns the specified instance template. Get a list of available instance templates by making a list() request.", + "description": "Retrieves the list of referrers to instances contained within the specified zone.", "parameters": { - "instanceTemplate": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "instance": { + "type": "string", + "description": "Name of the target instance scoping this request, or '-' if the request should span over all instances in the container.", + "required": true, + "pattern": "-|[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { "type": "string", - "description": "The name of the instance template.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" }, "project": { "type": "string", @@ -16016,14 +22270,22 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" } }, "parameterOrder": [ "project", - "instanceTemplate" + "zone", + "instance" ], "response": { - "$ref": "InstanceTemplate" + "$ref": "InstanceListReferrers" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -16031,12 +22293,19 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "insert": { - "id": "compute.instanceTemplates.insert", - "path": "{project}/global/instanceTemplates", + "reset": { + "id": "compute.instances.reset", + "path": "{project}/zones/{zone}/instances/{instance}/reset", "httpMethod": "POST", - "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", + "description": "Performs a reset on the instance. For more information, see Resetting an instance.", "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, "project": { "type": "string", "description": "Project ID for this request.", @@ -16048,14 +22317,20 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" } }, "parameterOrder": [ - "project" + "project", + "zone", + "instance" ], - "request": { - "$ref": "InstanceTemplate" - }, "response": { "$ref": "Operation" }, @@ -16064,34 +22339,31 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "id": "compute.instanceTemplates.list", - "path": "{project}/global/instanceTemplates", - "httpMethod": "GET", - "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", + "setDiskAutoDelete": { + "id": "compute.instances.setDiskAutoDelete", + "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", + "httpMethod": "POST", + "description": "Sets the auto-delete flag for a disk attached to an instance.", "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", + "autoDelete": { + "type": "boolean", + "description": "Whether to auto-delete the disk when the instance is deleted.", + "required": true, "location": "query" }, - "orderBy": { + "deviceName": { "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "description": "The device name of the disk to modify.", + "required": true, + "pattern": "\\w[\\w.-]{0,254}", "location": "query" }, - "pageToken": { + "instance": { "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" + "description": "The instance name.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" }, "project": { "type": "string", @@ -16099,26 +22371,48 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" } }, "parameterOrder": [ - "project" + "project", + "zone", + "instance", + "autoDelete", + "deviceName" ], "response": { - "$ref": "InstanceTemplateList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "testIamPermissions": { - "id": "compute.instanceTemplates.testIamPermissions", - "path": "{project}/global/instanceTemplates/{resource}/testIamPermissions", + "setLabels": { + "id": "compute.instances.setLabels", + "path": "{project}/zones/{zone}/instances/{instance}/setLabels", "httpMethod": "POST", - "description": "Returns permissions that a caller has on the specified resource.", + "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation.", "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, "project": { "type": "string", "description": "Project ID for this request.", @@ -16126,53 +22420,48 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "resource": { + "requestId": { "type": "string", - "description": "Name of the resource for this request.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", - "resource" + "zone", + "instance" ], "request": { - "$ref": "TestPermissionsRequest" + "$ref": "InstancesSetLabelsRequest" }, "response": { - "$ref": "TestPermissionsResponse" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] - } - } - }, - "instances": { - "methods": { - "addAccessConfig": { - "id": "compute.instances.addAccessConfig", - "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", + }, + "setMachineResources": { + "id": "compute.instances.setMachineResources", + "path": "{project}/zones/{zone}/instances/{instance}/setMachineResources", "httpMethod": "POST", - "description": "Adds an access config to an instance's network interface.", + "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", "parameters": { "instance": { "type": "string", - "description": "The instance name for this request.", + "description": "Name of the instance scoping this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, - "networkInterface": { - "type": "string", - "description": "The name of the network interface to add to this instance.", - "required": true, - "location": "query" - }, "project": { "type": "string", "description": "Project ID for this request.", @@ -16196,11 +22485,10 @@ "parameterOrder": [ "project", "zone", - "instance", - "networkInterface" + "instance" ], "request": { - "$ref": "AccessConfig" + "$ref": "InstancesSetMachineResourcesRequest" }, "response": { "$ref": "Operation" @@ -16210,64 +22498,64 @@ "https://www.googleapis.com/auth/compute" ] }, - "aggregatedList": { - "id": "compute.instances.aggregatedList", - "path": "{project}/aggregated/instances", - "httpMethod": "GET", - "description": "Retrieves aggregated list of instances.", + "setMachineType": { + "id": "compute.instances.setMachineType", + "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", + "httpMethod": "POST", + "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", "parameters": { - "filter": { + "instance": { "type": "string", - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" }, - "orderBy": { + "project": { "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" }, - "pageToken": { + "requestId": { "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" }, - "project": { + "zone": { "type": "string", - "description": "Project ID for this request.", + "description": "The name of the zone for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ - "project" + "project", + "zone", + "instance" ], + "request": { + "$ref": "InstancesSetMachineTypeRequest" + }, "response": { - "$ref": "InstanceAggregatedList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "attachDisk": { - "id": "compute.instances.attachDisk", - "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", + "setMetadata": { + "id": "compute.instances.setMetadata", + "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", "httpMethod": "POST", - "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance.", + "description": "Sets metadata for the specified instance to the data included in the request.", "parameters": { "instance": { "type": "string", - "description": "The instance name for this request.", + "description": "Name of the instance scoping this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -16298,7 +22586,7 @@ "instance" ], "request": { - "$ref": "AttachedDisk" + "$ref": "Metadata" }, "response": { "$ref": "Operation" @@ -16308,15 +22596,15 @@ "https://www.googleapis.com/auth/compute" ] }, - "delete": { - "id": "compute.instances.delete", - "path": "{project}/zones/{zone}/instances/{instance}", - "httpMethod": "DELETE", - "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", + "setMinCpuPlatform": { + "id": "compute.instances.setMinCpuPlatform", + "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", + "httpMethod": "POST", + "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", "parameters": { "instance": { "type": "string", - "description": "Name of the instance resource to delete.", + "description": "Name of the instance scoping this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -16346,6 +22634,9 @@ "zone", "instance" ], + "request": { + "$ref": "InstancesSetMinCpuPlatformRequest" + }, "response": { "$ref": "Operation" }, @@ -16354,31 +22645,19 @@ "https://www.googleapis.com/auth/compute" ] }, - "deleteAccessConfig": { - "id": "compute.instances.deleteAccessConfig", - "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + "setScheduling": { + "id": "compute.instances.setScheduling", + "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", "httpMethod": "POST", - "description": "Deletes an access config from an instance's network interface.", + "description": "Sets an instance's scheduling options.", "parameters": { - "accessConfig": { - "type": "string", - "description": "The name of the access config to delete.", - "required": true, - "location": "query" - }, "instance": { "type": "string", - "description": "The instance name for this request.", + "description": "Instance name.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, - "networkInterface": { - "type": "string", - "description": "The name of the network interface.", - "required": true, - "location": "query" - }, "project": { "type": "string", "description": "Project ID for this request.", @@ -16402,10 +22681,11 @@ "parameterOrder": [ "project", "zone", - "instance", - "accessConfig", - "networkInterface" + "instance" ], + "request": { + "$ref": "Scheduling" + }, "response": { "$ref": "Operation" }, @@ -16414,21 +22694,15 @@ "https://www.googleapis.com/auth/compute" ] }, - "detachDisk": { - "id": "compute.instances.detachDisk", - "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", + "setServiceAccount": { + "id": "compute.instances.setServiceAccount", + "path": "{project}/zones/{zone}/instances/{instance}/setServiceAccount", "httpMethod": "POST", - "description": "Detaches a disk from an instance.", + "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", "parameters": { - "deviceName": { - "type": "string", - "description": "Disk device name to detach.", - "required": true, - "location": "query" - }, "instance": { "type": "string", - "description": "Instance name.", + "description": "Name of the instance resource to start.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -16456,9 +22730,11 @@ "parameterOrder": [ "project", "zone", - "instance", - "deviceName" + "instance" ], + "request": { + "$ref": "InstancesSetServiceAccountRequest" + }, "response": { "$ref": "Operation" }, @@ -16467,15 +22743,15 @@ "https://www.googleapis.com/auth/compute" ] }, - "get": { - "id": "compute.instances.get", - "path": "{project}/zones/{zone}/instances/{instance}", - "httpMethod": "GET", - "description": "Returns the specified Instance resource. Get a list of available instances by making a list() request.", + "setTags": { + "id": "compute.instances.setTags", + "path": "{project}/zones/{zone}/instances/{instance}/setTags", + "httpMethod": "POST", + "description": "Sets tags for the specified instance to the data included in the request.", "parameters": { "instance": { "type": "string", - "description": "Name of the instance resource to return.", + "description": "Name of the instance scoping this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -16487,6 +22763,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -16500,37 +22781,30 @@ "zone", "instance" ], + "request": { + "$ref": "Tags" + }, "response": { - "$ref": "Instance" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "getSerialPortOutput": { - "id": "compute.instances.getSerialPortOutput", - "path": "{project}/zones/{zone}/instances/{instance}/serialPort", - "httpMethod": "GET", - "description": "Returns the specified instance's serial port output.", + "start": { + "id": "compute.instances.start", + "path": "{project}/zones/{zone}/instances/{instance}/start", + "httpMethod": "POST", + "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", "parameters": { "instance": { "type": "string", - "description": "Name of the instance scoping this request.", + "description": "Name of the instance resource to start.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, - "port": { - "type": "integer", - "description": "Specifies which COM or serial port to retrieve data from.", - "default": "1", - "format": "int32", - "minimum": "1", - "maximum": "4", - "location": "query" - }, "project": { "type": "string", "description": "Project ID for this request.", @@ -16538,10 +22812,9 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "start": { + "requestId": { "type": "string", - "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", - "format": "int64", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" }, "zone": { @@ -16558,20 +22831,26 @@ "instance" ], "response": { - "$ref": "SerialPortOutput" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "insert": { - "id": "compute.instances.insert", - "path": "{project}/zones/{zone}/instances", + "startWithEncryptionKey": { + "id": "compute.instances.startWithEncryptionKey", + "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", "httpMethod": "POST", - "description": "Creates an instance resource in the specified project using the data included in the request.", + "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance resource to start.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, "project": { "type": "string", "description": "Project ID for this request.", @@ -16594,10 +22873,11 @@ }, "parameterOrder": [ "project", - "zone" + "zone", + "instance" ], "request": { - "$ref": "Instance" + "$ref": "InstancesStartWithEncryptionKeyRequest" }, "response": { "$ref": "Operation" @@ -16607,34 +22887,18 @@ "https://www.googleapis.com/auth/compute" ] }, - "list": { - "id": "compute.instances.list", - "path": "{project}/zones/{zone}/instances", - "httpMethod": "GET", - "description": "Retrieves the list of instances contained within the specified zone.", + "stop": { + "id": "compute.instances.stop", + "path": "{project}/zones/{zone}/instances/{instance}/stop", + "httpMethod": "POST", + "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { + "instance": { "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" + "description": "Name of the instance resource to stop.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" }, "project": { "type": "string", @@ -16643,6 +22907,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -16653,58 +22922,35 @@ }, "parameterOrder": [ "project", - "zone" + "zone", + "instance" ], "response": { - "$ref": "InstanceList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "listReferrers": { - "id": "compute.instances.listReferrers", - "path": "{project}/zones/{zone}/instances/{instance}/referrers", - "httpMethod": "GET", - "description": "Retrieves the list of referrers to instances contained within the specified zone.", + "testIamPermissions": { + "id": "compute.instances.testIamPermissions", + "path": "{project}/zones/{zone}/instances/{resource}/testIamPermissions", + "httpMethod": "POST", + "description": "Returns permissions that a caller has on the specified resource.", "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "instance": { + "project": { "type": "string", - "description": "Name of the target instance scoping this request, or '-' if the request should span over all instances in the container.", + "description": "Project ID for this request.", "required": true, - "pattern": "-|[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - "default": "500", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "orderBy": { - "type": "string", - "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" - }, - "project": { + "resource": { "type": "string", - "description": "Project ID for this request.", + "description": "Name of the resource for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, "zone": { @@ -16718,85 +22964,82 @@ "parameterOrder": [ "project", "zone", - "instance" + "resource" ], + "request": { + "$ref": "TestPermissionsRequest" + }, "response": { - "$ref": "InstanceListReferrers" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - }, - "reset": { - "id": "compute.instances.reset", - "path": "{project}/zones/{zone}/instances/{instance}/reset", - "httpMethod": "POST", - "description": "Performs a reset on the instance. For more information, see Resetting an instance.", - "parameters": { - "instance": { - "type": "string", - "description": "Name of the instance scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" + } + } + }, + "interconnectAttachments": { + "methods": { + "aggregatedList": { + "id": "compute.interconnectAttachments.aggregatedList", + "path": "{project}/aggregated/interconnectAttachments", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of interconnect attachments.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" }, - "project": { + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" }, - "requestId": { + "pageToken": { "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", "location": "query" }, - "zone": { + "project": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Project ID for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, "parameterOrder": [ - "project", - "zone", - "instance" + "project" ], "response": { - "$ref": "Operation" + "$ref": "InterconnectAttachmentAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "setDiskAutoDelete": { - "id": "compute.instances.setDiskAutoDelete", - "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", - "httpMethod": "POST", - "description": "Sets the auto-delete flag for a disk attached to an instance.", + "delete": { + "id": "compute.interconnectAttachments.delete", + "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "httpMethod": "DELETE", + "description": "Deletes the specified interconnect attachment.", "parameters": { - "autoDelete": { - "type": "boolean", - "description": "Whether to auto-delete the disk when the instance is deleted.", - "required": true, - "location": "query" - }, - "deviceName": { - "type": "string", - "description": "The device name of the disk to modify.", - "required": true, - "pattern": "\\w[\\w.-]{0,254}", - "location": "query" - }, - "instance": { + "interconnectAttachment": { "type": "string", - "description": "The instance name.", + "description": "Name of the interconnect attachment to delete.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -16808,25 +23051,23 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" - }, - "zone": { + "region": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Name of the region for this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ "project", - "zone", - "instance", - "autoDelete", - "deviceName" + "region", + "interconnectAttachment" ], "response": { "$ref": "Operation" @@ -16836,15 +23077,15 @@ "https://www.googleapis.com/auth/compute" ] }, - "setLabels": { - "id": "compute.instances.setLabels", - "path": "{project}/zones/{zone}/instances/{instance}/setLabels", - "httpMethod": "POST", - "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation.", + "get": { + "id": "compute.interconnectAttachments.get", + "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "httpMethod": "GET", + "description": "Returns the specified interconnect attachment.", "parameters": { - "instance": { + "interconnectAttachment": { "type": "string", - "description": "Name of the instance scoping this request.", + "description": "Name of the interconnect attachment to return.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -16856,14 +23097,9 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" - }, - "zone": { + "region": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Name of the region for this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -16871,60 +23107,50 @@ }, "parameterOrder": [ "project", - "zone", - "instance" + "region", + "interconnectAttachment" ], - "request": { - "$ref": "InstancesSetLabelsRequest" - }, "response": { - "$ref": "Operation" + "$ref": "InterconnectAttachment" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "setMachineResources": { - "id": "compute.instances.setMachineResources", - "path": "{project}/zones/{zone}/instances/{instance}/setMachineResources", + "insert": { + "id": "compute.interconnectAttachments.insert", + "path": "{project}/regions/{region}/interconnectAttachments", "httpMethod": "POST", - "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", + "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", "parameters": { - "instance": { + "project": { "type": "string", - "description": "Name of the instance scoping this request.", + "description": "Project ID for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "project": { + "region": { "type": "string", - "description": "Project ID for this request.", + "description": "Name of the region for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, "requestId": { "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" } }, "parameterOrder": [ "project", - "zone", - "instance" + "region" ], "request": { - "$ref": "InstancesSetMachineResourcesRequest" + "$ref": "InterconnectAttachment" }, "response": { "$ref": "Operation" @@ -16934,18 +23160,34 @@ "https://www.googleapis.com/auth/compute" ] }, - "setMachineType": { - "id": "compute.instances.setMachineType", - "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", - "httpMethod": "POST", - "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", + "list": { + "id": "compute.interconnectAttachments.list", + "path": "{project}/regions/{region}/interconnectAttachments", + "httpMethod": "GET", + "description": "Retrieves the list of interconnect attachments contained within the specified region.", "parameters": { - "instance": { + "filter": { "type": "string", - "description": "Name of the instance scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" }, "project": { "type": "string", @@ -16954,14 +23196,9 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" - }, - "zone": { + "region": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Name of the region for this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -16969,33 +23206,23 @@ }, "parameterOrder": [ "project", - "zone", - "instance" + "region" ], - "request": { - "$ref": "InstancesSetMachineTypeRequest" - }, "response": { - "$ref": "Operation" + "$ref": "InterconnectAttachmentList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "setMetadata": { - "id": "compute.instances.setMetadata", - "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", + "testIamPermissions": { + "id": "compute.interconnectAttachments.testIamPermissions", + "path": "{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", "httpMethod": "POST", - "description": "Sets metadata for the specified instance to the data included in the request.", + "description": "Returns permissions that a caller has on the specified resource.", "parameters": { - "instance": { - "type": "string", - "description": "Name of the instance scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, "project": { "type": "string", "description": "Project ID for this request.", @@ -17003,14 +23230,16 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "requestId": { + "region": { "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" + "description": "The name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" }, - "zone": { + "resource": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Name of the resource for this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -17018,127 +23247,121 @@ }, "parameterOrder": [ "project", - "zone", - "instance" + "region", + "resource" ], "request": { - "$ref": "Metadata" + "$ref": "TestPermissionsRequest" }, "response": { - "$ref": "Operation" + "$ref": "TestPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - }, - "setMinCpuPlatform": { - "id": "compute.instances.setMinCpuPlatform", - "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", - "httpMethod": "POST", - "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", + } + } + }, + "interconnectLocations": { + "methods": { + "get": { + "id": "compute.interconnectLocations.get", + "path": "{project}/global/interconnectLocations/{interconnectLocation}", + "httpMethod": "GET", + "description": "Returns the details for the specified interconnect location. Get a list of available interconnect locations by making a list() request.", "parameters": { - "instance": { - "type": "string", - "description": "Name of the instance scoping this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" - }, - "zone": { + "interconnectLocation": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Name of the interconnect location to return.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" } }, "parameterOrder": [ "project", - "zone", - "instance" + "interconnectLocation" ], - "request": { - "$ref": "InstancesSetMinCpuPlatformRequest" - }, "response": { - "$ref": "Operation" + "$ref": "InterconnectLocation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "setScheduling": { - "id": "compute.instances.setScheduling", - "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", - "httpMethod": "POST", - "description": "Sets an instance's scheduling options.", + "list": { + "id": "compute.interconnectLocations.list", + "path": "{project}/global/interconnectLocations", + "httpMethod": "GET", + "description": "Retrieves the list of interconnect locations available to the specified project.", "parameters": { - "instance": { + "filter": { "type": "string", - "description": "Instance name.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" }, - "project": { + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" }, - "requestId": { + "pageToken": { "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", "location": "query" }, - "zone": { + "project": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Project ID for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, "parameterOrder": [ - "project", - "zone", - "instance" + "project" ], - "request": { - "$ref": "Scheduling" - }, "response": { - "$ref": "Operation" + "$ref": "InterconnectLocationList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] - }, - "setServiceAccount": { - "id": "compute.instances.setServiceAccount", - "path": "{project}/zones/{zone}/instances/{instance}/setServiceAccount", - "httpMethod": "POST", - "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", + } + } + }, + "interconnects": { + "methods": { + "delete": { + "id": "compute.interconnects.delete", + "path": "{project}/global/interconnects/{interconnect}", + "httpMethod": "DELETE", + "description": "Deletes the specified interconnect.", "parameters": { - "instance": { + "interconnect": { "type": "string", - "description": "Name of the instance resource to start.", + "description": "Name of the interconnect to delete.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -17154,23 +23377,12 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" } }, "parameterOrder": [ "project", - "zone", - "instance" + "interconnect" ], - "request": { - "$ref": "InstancesSetServiceAccountRequest" - }, "response": { "$ref": "Operation" }, @@ -17179,15 +23391,15 @@ "https://www.googleapis.com/auth/compute" ] }, - "setTags": { - "id": "compute.instances.setTags", - "path": "{project}/zones/{zone}/instances/{instance}/setTags", - "httpMethod": "POST", - "description": "Sets tags for the specified instance to the data included in the request.", + "get": { + "id": "compute.interconnects.get", + "path": "{project}/global/interconnects/{interconnect}", + "httpMethod": "GET", + "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", "parameters": { - "instance": { + "interconnect": { "type": "string", - "description": "Name of the instance scoping this request.", + "description": "Name of the interconnect to return.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -17198,49 +23410,27 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" - }, - "requestId": { - "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - "location": "query" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" } }, "parameterOrder": [ "project", - "zone", - "instance" + "interconnect" ], - "request": { - "$ref": "Tags" - }, "response": { - "$ref": "Operation" + "$ref": "Interconnect" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "start": { - "id": "compute.instances.start", - "path": "{project}/zones/{zone}/instances/{instance}/start", + "insert": { + "id": "compute.interconnects.insert", + "path": "{project}/global/interconnects", "httpMethod": "POST", - "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", + "description": "Creates a Interconnect in the specified project using the data included in the request.", "parameters": { - "instance": { - "type": "string", - "description": "Name of the instance resource to start.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, "project": { "type": "string", "description": "Project ID for this request.", @@ -17252,20 +23442,14 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" } }, "parameterOrder": [ - "project", - "zone", - "instance" + "project" ], + "request": { + "$ref": "Interconnect" + }, "response": { "$ref": "Operation" }, @@ -17274,64 +23458,64 @@ "https://www.googleapis.com/auth/compute" ] }, - "startWithEncryptionKey": { - "id": "compute.instances.startWithEncryptionKey", - "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", - "httpMethod": "POST", - "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", + "list": { + "id": "compute.interconnects.list", + "path": "{project}/global/interconnects", + "httpMethod": "GET", + "description": "Retrieves the list of interconnect available to the specified project.", "parameters": { - "instance": { + "filter": { "type": "string", - "description": "Name of the instance resource to start.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" }, - "project": { + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" }, - "requestId": { + "pageToken": { "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", "location": "query" }, - "zone": { + "project": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Project ID for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, "parameterOrder": [ - "project", - "zone", - "instance" + "project" ], - "request": { - "$ref": "InstancesStartWithEncryptionKeyRequest" - }, "response": { - "$ref": "Operation" + "$ref": "InterconnectList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "stop": { - "id": "compute.instances.stop", - "path": "{project}/zones/{zone}/instances/{instance}/stop", - "httpMethod": "POST", - "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + "patch": { + "id": "compute.interconnects.patch", + "path": "{project}/global/interconnects/{interconnect}", + "httpMethod": "PATCH", + "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "parameters": { - "instance": { + "interconnect": { "type": "string", - "description": "Name of the instance resource to stop.", + "description": "Name of the interconnect to update.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -17347,20 +23531,15 @@ "type": "string", "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" } }, "parameterOrder": [ "project", - "zone", - "instance" + "interconnect" ], + "request": { + "$ref": "Interconnect" + }, "response": { "$ref": "Operation" }, @@ -17370,8 +23549,8 @@ ] }, "testIamPermissions": { - "id": "compute.instances.testIamPermissions", - "path": "{project}/zones/{zone}/instances/{resource}/testIamPermissions", + "id": "compute.interconnects.testIamPermissions", + "path": "{project}/global/interconnects/{resource}/testIamPermissions", "httpMethod": "POST", "description": "Returns permissions that a caller has on the specified resource.", "parameters": { @@ -17386,20 +23565,12 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "zone": { - "type": "string", - "description": "The name of the zone for this request.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", - "zone", "resource" ], "request": { @@ -17780,8 +23951,41 @@ }, "pageToken": { "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "NetworkList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.networks.patch", + "path": "{project}/global/networks/{network}", + "httpMethod": "PATCH", + "description": "Patches the specified network with the data included in the request.", + "parameters": { + "network": { + "type": "string", + "description": "Name of the network to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" }, "project": { "type": "string", @@ -17789,18 +23993,26 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ - "project" + "project", + "network" ], + "request": { + "$ref": "Network" + }, "response": { - "$ref": "NetworkList" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "removePeering": { @@ -17899,7 +24111,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -18544,7 +24756,7 @@ }, "requestId": { "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and then the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" } }, @@ -18560,8 +24772,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "testIamPermissions": { @@ -18637,7 +24848,7 @@ }, "requestId": { "type": "string", - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and then the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query" } }, @@ -18936,8 +25147,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "testIamPermissions": { @@ -19608,8 +25818,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "recreateInstances": { @@ -20758,8 +26967,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "preview": { @@ -21076,7 +27284,243 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "resource" + ], + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "securityPolicies": { + "methods": { + "delete": { + "id": "compute.securityPolicies.delete", + "path": "{project}/global/securityPolicies/{securityPolicy}", + "httpMethod": "DELETE", + "description": "Deletes the specified policy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "securityPolicy": { + "type": "string", + "description": "Name of the security policy to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "securityPolicy" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.securityPolicies.get", + "path": "{project}/global/securityPolicies/{securityPolicy}", + "httpMethod": "GET", + "description": "List all of the ordered rules present in a single specified policy.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "securityPolicy": { + "type": "string", + "description": "Name of the security policy to get.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "securityPolicy" + ], + "response": { + "$ref": "SecurityPolicy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.securityPolicies.insert", + "path": "{project}/global/securityPolicies", + "httpMethod": "POST", + "description": "Creates a new policy in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "SecurityPolicy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.securityPolicies.list", + "path": "{project}/global/securityPolicies", + "httpMethod": "GET", + "description": "List all the policies that have been configured for the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "SecurityPolicyList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.securityPolicies.patch", + "path": "{project}/global/securityPolicies/{securityPolicy}", + "httpMethod": "PATCH", + "description": "Patches the specified policy with the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "securityPolicy": { + "type": "string", + "description": "Name of the security policy to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "securityPolicy" + ], + "request": { + "$ref": "SecurityPolicy" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "id": "compute.securityPolicies.testIamPermissions", + "path": "{project}/global/securityPolicies/{resource}/testIamPermissions", + "httpMethod": "POST", + "description": "Returns permissions that a caller has on the specified resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -21274,7 +27718,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -21469,7 +27913,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -22173,7 +28617,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -22450,7 +28894,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -23632,7 +30076,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, @@ -24413,8 +30857,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "testIamPermissions": { @@ -24434,7 +30877,7 @@ "type": "string", "description": "Name of the resource for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", "location": "path" } }, diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go index 2e9cc07ed49d..f9e1af4b0bb9 100644 --- a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go @@ -91,6 +91,9 @@ func New(client *http.Client) (*Service, error) { s.InstanceGroups = NewInstanceGroupsService(s) s.InstanceTemplates = NewInstanceTemplatesService(s) s.Instances = NewInstancesService(s) + s.InterconnectAttachments = NewInterconnectAttachmentsService(s) + s.InterconnectLocations = NewInterconnectLocationsService(s) + s.Interconnects = NewInterconnectsService(s) s.Licenses = NewLicensesService(s) s.MachineTypes = NewMachineTypesService(s) s.Networks = NewNetworksService(s) @@ -104,6 +107,7 @@ func New(client *http.Client) (*Service, error) { s.Regions = NewRegionsService(s) s.Routers = NewRoutersService(s) s.Routes = NewRoutesService(s) + s.SecurityPolicies = NewSecurityPoliciesService(s) s.Snapshots = NewSnapshotsService(s) s.SslCertificates = NewSslCertificatesService(s) s.Subnetworks = NewSubnetworksService(s) @@ -166,6 +170,12 @@ type Service struct { Instances *InstancesService + InterconnectAttachments *InterconnectAttachmentsService + + InterconnectLocations *InterconnectLocationsService + + Interconnects *InterconnectsService + Licenses *LicensesService MachineTypes *MachineTypesService @@ -192,6 +202,8 @@ type Service struct { Routes *RoutesService + SecurityPolicies *SecurityPoliciesService + Snapshots *SnapshotsService SslCertificates *SslCertificatesService @@ -408,6 +420,33 @@ type InstancesService struct { s *Service } +func NewInterconnectAttachmentsService(s *Service) *InterconnectAttachmentsService { + rs := &InterconnectAttachmentsService{s: s} + return rs +} + +type InterconnectAttachmentsService struct { + s *Service +} + +func NewInterconnectLocationsService(s *Service) *InterconnectLocationsService { + rs := &InterconnectLocationsService{s: s} + return rs +} + +type InterconnectLocationsService struct { + s *Service +} + +func NewInterconnectsService(s *Service) *InterconnectsService { + rs := &InterconnectsService{s: s} + return rs +} + +type InterconnectsService struct { + s *Service +} + func NewLicensesService(s *Service) *LicensesService { rs := &LicensesService{s: s} return rs @@ -525,6 +564,15 @@ type RoutesService struct { s *Service } +func NewSecurityPoliciesService(s *Service) *SecurityPoliciesService { + rs := &SecurityPoliciesService{s: s} + return rs +} + +type SecurityPoliciesService struct { + s *Service +} + func NewSnapshotsService(s *Service) *SnapshotsService { rs := &SnapshotsService{s: s} return rs @@ -775,6 +823,9 @@ type AcceleratorTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AcceleratorTypeAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -802,6 +853,102 @@ func (s *AcceleratorTypeAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AcceleratorTypeAggregatedListWarning: [Output Only] Informational +// warning message. +type AcceleratorTypeAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AcceleratorTypeAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod AcceleratorTypeAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AcceleratorTypeAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AcceleratorTypeAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AcceleratorTypeList: Contains a list of accelerator types. type AcceleratorTypeList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -826,6 +973,9 @@ type AcceleratorTypeList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AcceleratorTypeListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -853,6 +1003,102 @@ func (s *AcceleratorTypeList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AcceleratorTypeListWarning: [Output Only] Informational warning +// message. +type AcceleratorTypeListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AcceleratorTypeListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypeListWarning) MarshalJSON() ([]byte, error) { + type noMethod AcceleratorTypeListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AcceleratorTypeListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypeListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AcceleratorTypeListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type AcceleratorTypesScopedList struct { // AcceleratorTypes: [Output Only] List of accelerator types contained // in this scope. @@ -1174,6 +1420,9 @@ type AddressAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AddressAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -1201,91 +1450,285 @@ func (s *AddressAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// AddressList: Contains a list of addresses. -type AddressList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Address resources. - Items []*Address `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#addressList for - // lists of addresses. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AddressList) MarshalJSON() ([]byte, error) { - type noMethod AddressList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type AddressesScopedList struct { - // Addresses: [Output Only] List of addresses contained in this scope. - Addresses []*Address `json:"addresses,omitempty"` - - // Warning: [Output Only] Informational warning which replaces the list - // of addresses when the list is empty. - Warning *AddressesScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Addresses") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Addresses") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *AddressesScopedList) MarshalJSON() ([]byte, error) { - type noMethod AddressesScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// AddressesScopedListWarning: [Output Only] Informational warning which -// replaces the list of addresses when the list is empty. -type AddressesScopedListWarning struct { +// AddressAggregatedListWarning: [Output Only] Informational warning +// message. +type AddressAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AddressAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod AddressAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AddressAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AddressAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AddressList: Contains a list of addresses. +type AddressList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of Address resources. + Items []*Address `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#addressList for + // lists of addresses. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *AddressListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressList) MarshalJSON() ([]byte, error) { + type noMethod AddressList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AddressListWarning: [Output Only] Informational warning message. +type AddressListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AddressListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressListWarning) MarshalJSON() ([]byte, error) { + type noMethod AddressListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AddressListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AddressListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AddressesScopedList struct { + // Addresses: [Output Only] List of addresses contained in this scope. + Addresses []*Address `json:"addresses,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of addresses when the list is empty. + Warning *AddressesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Addresses") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Addresses") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressesScopedList) MarshalJSON() ([]byte, error) { + type noMethod AddressesScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AddressesScopedListWarning: [Output Only] Informational warning which +// replaces the list of addresses when the list is empty. +type AddressesScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -1464,10 +1907,9 @@ type AttachedDisk struct { // group. DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` - // Index: Assigns a zero-based index to this disk, where 0 is reserved - // for the boot disk. For example, if you have many disks attached to an - // instance, each disk would have a unique index number. If not - // specified, the server will choose an appropriate value. + // Index: [Output Only] A zero-based index to this disk, where 0 is + // reserved for the boot disk. If you have many disks attached to an + // instance, each disk would have a unique index number. Index int64 `json:"index,omitempty"` // InitializeParams: [Input Only] Specifies the parameters for a new @@ -1916,6 +2358,9 @@ type AutoscalerAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AutoscalerAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -1943,6 +2388,102 @@ func (s *AutoscalerAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AutoscalerAggregatedListWarning: [Output Only] Informational warning +// message. +type AutoscalerAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AutoscalerAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AutoscalerAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod AutoscalerAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AutoscalerAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AutoscalerAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AutoscalerAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AutoscalerList: Contains a list of Autoscaler resources. type AutoscalerList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -1967,6 +2508,9 @@ type AutoscalerList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AutoscalerListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -1994,6 +2538,101 @@ func (s *AutoscalerList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AutoscalerListWarning: [Output Only] Informational warning message. +type AutoscalerListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AutoscalerListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AutoscalerListWarning) MarshalJSON() ([]byte, error) { + type noMethod AutoscalerListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AutoscalerListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AutoscalerListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AutoscalerListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type AutoscalerStatusDetails struct { // Message: The status message. Message string `json:"message,omitempty"` @@ -2668,6 +3307,9 @@ type BackendBucketList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *BackendBucketListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -2695,6 +3337,102 @@ func (s *BackendBucketList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BackendBucketListWarning: [Output Only] Informational warning +// message. +type BackendBucketListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*BackendBucketListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendBucketListWarning) MarshalJSON() ([]byte, error) { + type noMethod BackendBucketListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendBucketListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { + type noMethod BackendBucketListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BackendService: A BackendService resource. This resource defines a // group of backend virtual machines and their serving capacity. type BackendService struct { @@ -2736,8 +3474,9 @@ type BackendService struct { // HealthChecks: The list of URLs to the HttpHealthCheck or // HttpsHealthCheck resource for health checking this BackendService. // Currently at most one health check can be specified, and a health - // check is required for GCE backend services. A health check must not - // be specified for GAE app backend and Cloud Function backend. + // check is required for Compute Engine backend services. A health check + // must not be specified for App Engine backend and Cloud Function + // backend. // // For internal load balancing, a URL to a HealthCheck resource must be // specified instead. @@ -2753,6 +3492,11 @@ type BackendService struct { // for backend services. Kind string `json:"kind,omitempty"` + // LoadBalancingScheme: Indicates whether the backend service will be + // used with internal or external load balancing. A backend service + // created for one type of load balancing cannot be used with the other. + // Possible values are INTERNAL and EXTERNAL. + // // Possible values: // "EXTERNAL" // "INTERNAL" @@ -2803,6 +3547,10 @@ type BackendService struct { // services. Region string `json:"region,omitempty"` + // SecurityPolicy: [Output Only] The resource URL for the security + // policy associated with this backend service. + SecurityPolicy string `json:"securityPolicy,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -2882,6 +3630,9 @@ type BackendServiceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *BackendServiceAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -2909,6 +3660,102 @@ func (s *BackendServiceAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BackendServiceAggregatedListWarning: [Output Only] Informational +// warning message. +type BackendServiceAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*BackendServiceAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendServiceAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BackendServiceCdnPolicy: Message containing Cloud CDN configuration // for a backend service. type BackendServiceCdnPolicy struct { @@ -3032,6 +3879,9 @@ type BackendServiceList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *BackendServiceListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -3059,6 +3909,102 @@ func (s *BackendServiceList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BackendServiceListWarning: [Output Only] Informational warning +// message. +type BackendServiceListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*BackendServiceListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceListWarning) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendServiceListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceListWarningData) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type BackendServicesScopedList struct { // BackendServices: List of BackendServices contained in this scope. BackendServices []*BackendService `json:"backendServices,omitempty"` @@ -3465,6 +4411,9 @@ type CommitmentAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *CommitmentAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -3492,92 +4441,286 @@ func (s *CommitmentAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// CommitmentList: Contains a list of Commitment resources. -type CommitmentList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Commitment resources. - Items []*Commitment `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#commitmentList - // for lists of commitments. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *CommitmentList) MarshalJSON() ([]byte, error) { - type noMethod CommitmentList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type CommitmentsScopedList struct { - // Commitments: [Output Only] List of commitments contained in this - // scope. - Commitments []*Commitment `json:"commitments,omitempty"` - - // Warning: [Output Only] Informational warning which replaces the list - // of commitments when the list is empty. - Warning *CommitmentsScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Commitments") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Commitments") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *CommitmentsScopedList) MarshalJSON() ([]byte, error) { - type noMethod CommitmentsScopedList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// CommitmentsScopedListWarning: [Output Only] Informational warning -// which replaces the list of commitments when the list is empty. -type CommitmentsScopedListWarning struct { +// CommitmentAggregatedListWarning: [Output Only] Informational warning +// message. +type CommitmentAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*CommitmentAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommitmentAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod CommitmentAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type CommitmentAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommitmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod CommitmentAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CommitmentList: Contains a list of Commitment resources. +type CommitmentList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of Commitment resources. + Items []*Commitment `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#commitmentList + // for lists of commitments. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *CommitmentListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommitmentList) MarshalJSON() ([]byte, error) { + type noMethod CommitmentList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CommitmentListWarning: [Output Only] Informational warning message. +type CommitmentListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*CommitmentListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommitmentListWarning) MarshalJSON() ([]byte, error) { + type noMethod CommitmentListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type CommitmentListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommitmentListWarningData) MarshalJSON() ([]byte, error) { + type noMethod CommitmentListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type CommitmentsScopedList struct { + // Commitments: [Output Only] List of commitments contained in this + // scope. + Commitments []*Commitment `json:"commitments,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of commitments when the list is empty. + Warning *CommitmentsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Commitments") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Commitments") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommitmentsScopedList) MarshalJSON() ([]byte, error) { + type noMethod CommitmentsScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CommitmentsScopedListWarning: [Output Only] Informational warning +// which replaces the list of commitments when the list is empty. +type CommitmentsScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -4141,6 +5284,9 @@ type DiskAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *DiskAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -4168,6 +5314,102 @@ func (s *DiskAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DiskAggregatedListWarning: [Output Only] Informational warning +// message. +type DiskAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DiskAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod DiskAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod DiskAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // DiskList: A list of Disk resources. type DiskList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -4192,6 +5434,9 @@ type DiskList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *DiskListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -4219,6 +5464,101 @@ func (s *DiskList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DiskListWarning: [Output Only] Informational warning message. +type DiskListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DiskListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskListWarning) MarshalJSON() ([]byte, error) { + type noMethod DiskListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskListWarningData) MarshalJSON() ([]byte, error) { + type noMethod DiskListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type DiskMoveRequest struct { // DestinationZone: The URL of the destination zone to move the disk. // This can be a full or partial URL. For example, the following are all @@ -4351,6 +5691,9 @@ type DiskTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *DiskTypeAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -4378,6 +5721,102 @@ func (s *DiskTypeAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DiskTypeAggregatedListWarning: [Output Only] Informational warning +// message. +type DiskTypeAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DiskTypeAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod DiskTypeAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskTypeAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod DiskTypeAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // DiskTypeList: Contains a list of disk types. type DiskTypeList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -4402,6 +5841,9 @@ type DiskTypeList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *DiskTypeListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -4429,6 +5871,101 @@ func (s *DiskTypeList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DiskTypeListWarning: [Output Only] Informational warning message. +type DiskTypeListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DiskTypeListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskTypeListWarning) MarshalJSON() ([]byte, error) { + type noMethod DiskTypeListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskTypeListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskTypeListWarningData) MarshalJSON() ([]byte, error) { + type noMethod DiskTypeListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type DiskTypesScopedList struct { // DiskTypes: [Output Only] List of disk types contained in this scope. DiskTypes []*DiskType `json:"diskTypes,omitempty"` @@ -4711,6 +6248,61 @@ func (s *DisksScopedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type DistributionPolicy struct { + Zones []*DistributionPolicyZoneConfiguration `json:"zones,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Zones") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Zones") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DistributionPolicy) MarshalJSON() ([]byte, error) { + type noMethod DistributionPolicy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DistributionPolicyZoneConfiguration struct { + // Zone: URL of the zone where managed instance group is spawning + // instances (for regional resources). Zone has to belong to the region + // where managed instance group is located. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Zone") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Zone") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DistributionPolicyZoneConfiguration) MarshalJSON() ([]byte, error) { + type noMethod DistributionPolicyZoneConfiguration + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Expr: Represents an expression text. Example: // // title: "User account presence" description: "Determines whether the @@ -5017,6 +6609,9 @@ type FirewallList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *FirewallListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -5044,6 +6639,101 @@ func (s *FirewallList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// FirewallListWarning: [Output Only] Informational warning message. +type FirewallListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*FirewallListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FirewallListWarning) MarshalJSON() ([]byte, error) { + type noMethod FirewallListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type FirewallListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FirewallListWarningData) MarshalJSON() ([]byte, error) { + type noMethod FirewallListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // FixedOrPercent: Encapsulates numeric value that can be either // absolute or relative. type FixedOrPercent struct { @@ -5326,6 +7016,9 @@ type ForwardingRuleAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *ForwardingRuleAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -5353,6 +7046,102 @@ func (s *ForwardingRuleAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ForwardingRuleAggregatedListWarning: [Output Only] Informational +// warning message. +type ForwardingRuleAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ForwardingRuleAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRuleAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ForwardingRuleAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRuleAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ForwardingRuleList: Contains a list of ForwardingRule resources. type ForwardingRuleList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -5376,6 +7165,9 @@ type ForwardingRuleList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *ForwardingRuleListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -5403,6 +7195,102 @@ func (s *ForwardingRuleList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ForwardingRuleListWarning: [Output Only] Informational warning +// message. +type ForwardingRuleListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ForwardingRuleListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleListWarning) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRuleListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ForwardingRuleListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleListWarningData) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRuleListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ForwardingRulesScopedList struct { // ForwardingRules: List of forwarding rules contained in this scope. ForwardingRules []*ForwardingRule `json:"forwardingRules,omitempty"` @@ -5836,6 +7724,9 @@ type HealthCheckList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *HealthCheckListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -5863,6 +7754,101 @@ func (s *HealthCheckList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// HealthCheckListWarning: [Output Only] Informational warning message. +type HealthCheckListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HealthCheckListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthCheckListWarning) MarshalJSON() ([]byte, error) { + type noMethod HealthCheckListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HealthCheckListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthCheckListWarningData) MarshalJSON() ([]byte, error) { + type noMethod HealthCheckListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HealthCheckReference: A full or valid partial URL to a health check. // For example, the following are valid URLs: // - @@ -6089,6 +8075,9 @@ type HttpHealthCheckList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *HttpHealthCheckListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6116,6 +8105,102 @@ func (s *HttpHealthCheckList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// HttpHealthCheckListWarning: [Output Only] Informational warning +// message. +type HttpHealthCheckListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HttpHealthCheckListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpHealthCheckListWarning) MarshalJSON() ([]byte, error) { + type noMethod HttpHealthCheckListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HttpHealthCheckListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpHealthCheckListWarningData) MarshalJSON() ([]byte, error) { + type noMethod HttpHealthCheckListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HttpsHealthCheck: An HttpsHealthCheck resource. This resource defines // a template for how individual instances should be checked for health, // via HTTPS. @@ -6229,6 +8314,9 @@ type HttpsHealthCheckList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *HttpsHealthCheckListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6256,6 +8344,102 @@ func (s *HttpsHealthCheckList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// HttpsHealthCheckListWarning: [Output Only] Informational warning +// message. +type HttpsHealthCheckListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HttpsHealthCheckListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpsHealthCheckListWarning) MarshalJSON() ([]byte, error) { + type noMethod HttpsHealthCheckListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HttpsHealthCheckListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpsHealthCheckListWarningData) MarshalJSON() ([]byte, error) { + type noMethod HttpsHealthCheckListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Image: An Image resource. type Image struct { // ArchiveSizeBytes: Size of the image tar.gz archive stored in Google @@ -6504,6 +8688,9 @@ type ImageList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *ImageListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6531,6 +8718,101 @@ func (s *ImageList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ImageListWarning: [Output Only] Informational warning message. +type ImageListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ImageListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ImageListWarning) MarshalJSON() ([]byte, error) { + type noMethod ImageListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ImageListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ImageListWarningData) MarshalJSON() ([]byte, error) { + type noMethod ImageListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Instance: An Instance resource. type Instance struct { // CanIpForward: Allows this instance to send and receive packets with @@ -6624,10 +8906,10 @@ type Instance struct { // be a dash. Name string `json:"name,omitempty"` - // NetworkInterfaces: An array of configurations for this interface. - // This specifies how this interface is configured to interact with - // other network services, such as connecting to the internet. Only one - // interface is supported per instance. + // NetworkInterfaces: An array of network configurations for this + // instance. These specify how interfaces are configured to interact + // with other network services, such as connecting to the internet. + // Multiple interfaces are supported per instance. NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` // Scheduling: Sets the scheduling options for this instance. @@ -6729,6 +9011,9 @@ type InstanceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6756,6 +9041,102 @@ func (s *InstanceAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceAggregatedListWarning: [Output Only] Informational warning +// message. +type InstanceAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceGroup struct { // CreationTimestamp: [Output Only] The creation timestamp for this // instance group in RFC3339 text format. @@ -6869,6 +9250,9 @@ type InstanceGroupAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6896,6 +9280,102 @@ func (s *InstanceGroupAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupAggregatedListWarning: [Output Only] Informational +// warning message. +type InstanceGroupAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstanceGroupList: A list of InstanceGroup resources. type InstanceGroupList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -6920,6 +9400,9 @@ type InstanceGroupList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6947,6 +9430,102 @@ func (s *InstanceGroupList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupListWarning: [Output Only] Informational warning +// message. +type InstanceGroupListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstanceGroupManager: An Instance Group Manager resource. type InstanceGroupManager struct { // AutoHealingPolicies: The autohealing policy for this managed instance @@ -6972,6 +9551,10 @@ type InstanceGroupManager struct { // property when you create the resource. Description string `json:"description,omitempty"` + // DistributionPolicy: Policy valid only for regional managed instance + // groups. + DistributionPolicy *DistributionPolicy `json:"distributionPolicy,omitempty"` + // FailoverAction: The action to perform in case of zone failure. Only // one value is supported, NO_FAILOVER. The default is NO_FAILOVER. // @@ -7022,11 +9605,11 @@ type InstanceGroupManager struct { // server defines this URL. SelfLink string `json:"selfLink,omitempty"` - // ServiceAccount: Service account will be used as credentials for all - // operations performed by managed instance group on instances. The - // service accounts needs all permissions required to create and delete - // instances. When not specified, the service account - // {projectNumber}@cloudservices.gserviceaccount.com will be used. + // ServiceAccount: [Output Only] The service account to be used as + // credentials for all operations performed by the managed instance + // group on instances. The service accounts needs all permissions + // required to create and delete instances. By default, the service + // account {projectNumber}@cloudservices.gserviceaccount.com is used. ServiceAccount string `json:"serviceAccount,omitempty"` // TargetPools: The URLs for all TargetPool resources to which instances @@ -7189,6 +9772,9 @@ type InstanceGroupManagerAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupManagerAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -7216,6 +9802,102 @@ func (s *InstanceGroupManagerAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupManagerAggregatedListWarning: [Output Only] +// Informational warning message. +type InstanceGroupManagerAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupManagerAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagerAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupManagerAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagerAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceGroupManagerAutoHealingPolicy struct { // HealthCheck: The URL for the health check that signals autohealing. HealthCheck string `json:"healthCheck,omitempty"` @@ -7278,6 +9960,9 @@ type InstanceGroupManagerList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupManagerListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -7305,24 +9990,43 @@ func (s *InstanceGroupManagerList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type InstanceGroupManagerPendingActionsSummary struct { - // Creating: [Output Only] The number of instances in the managed - // instance group that are pending to be created. - Creating int64 `json:"creating,omitempty"` - - // Deleting: [Output Only] The number of instances in the managed - // instance group that are pending to be deleted. - Deleting int64 `json:"deleting,omitempty"` +// InstanceGroupManagerListWarning: [Output Only] Informational warning +// message. +type InstanceGroupManagerListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // Recreating: [Output Only] The number of instances in the managed - // instance group that are pending to be recreated. - Recreating int64 `json:"recreating,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupManagerListWarningData `json:"data,omitempty"` - // Restarting: [Output Only] The number of instances in the managed - // instance group that are pending to be restarted. - Restarting int64 `json:"restarting,omitempty"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "Creating") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -7330,8 +10034,8 @@ type InstanceGroupManagerPendingActionsSummary struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Creating") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -7339,22 +10043,99 @@ type InstanceGroupManagerPendingActionsSummary struct { NullFields []string `json:"-"` } -func (s *InstanceGroupManagerPendingActionsSummary) MarshalJSON() ([]byte, error) { - type noMethod InstanceGroupManagerPendingActionsSummary +func (s *InstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagerListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type InstanceGroupManagerUpdatePolicy struct { - // MaxSurge: Maximum number of instances that can be created above the - // InstanceGroupManager.targetSize during the update process. By - // default, a fixed value of 1 is used. Using maxSurge > 0 will cause - // instance names to change during the update process. At least one of { - // maxSurge, maxUnavailable } must be greater than 0. - MaxSurge *FixedOrPercent `json:"maxSurge,omitempty"` +type InstanceGroupManagerListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // MaxUnavailable: Maximum number of instances that can be unavailable - // during the update process. The instance is considered available if + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagerListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupManagerPendingActionsSummary struct { + // Creating: [Output Only] The number of instances in the managed + // instance group that are pending to be created. + Creating int64 `json:"creating,omitempty"` + + // Deleting: [Output Only] The number of instances in the managed + // instance group that are pending to be deleted. + Deleting int64 `json:"deleting,omitempty"` + + // Recreating: [Output Only] The number of instances in the managed + // instance group that are pending to be recreated. + Recreating int64 `json:"recreating,omitempty"` + + // Restarting: [Output Only] The number of instances in the managed + // instance group that are pending to be restarted. + Restarting int64 `json:"restarting,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Creating") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Creating") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerPendingActionsSummary) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagerPendingActionsSummary + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupManagerUpdatePolicy struct { + // MaxSurge: Maximum number of instances that can be created above the + // InstanceGroupManager.targetSize during the update process. By + // default, a fixed value of 1 is used. Using maxSurge > 0 will cause + // instance names to change during the update process. At least one of { + // maxSurge, maxUnavailable } must be greater than 0. + MaxSurge *FixedOrPercent `json:"maxSurge,omitempty"` + + // MaxUnavailable: Maximum number of instances that can be unavailable + // during the update process. The instance is considered available if // all of the following conditions are satisfied: 1. Instance's status // is RUNNING. 2. Instance's liveness health check result was observed // to be HEALTHY at least once. By default, a fixed value of 1 is used. @@ -7902,6 +10683,9 @@ type InstanceGroupsListInstances struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupsListInstancesWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -7929,6 +10713,102 @@ func (s *InstanceGroupsListInstances) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupsListInstancesWarning: [Output Only] Informational +// warning message. +type InstanceGroupsListInstancesWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupsListInstancesWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsListInstancesWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupsListInstancesWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsListInstancesWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceGroupsListInstancesRequest struct { // InstanceState: A filter for the state of the instances in the // instance group. Valid options are ALL or RUNNING. If you do not @@ -8179,6 +11059,9 @@ type InstanceList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -8206,6 +11089,101 @@ func (s *InstanceList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceListWarning: [Output Only] Informational warning message. +type InstanceListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstanceListReferrers: Contains a list of instance referrers. type InstanceListReferrers struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -8230,6 +11208,9 @@ type InstanceListReferrers struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceListReferrersWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -8257,6 +11238,102 @@ func (s *InstanceListReferrers) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceListReferrersWarning: [Output Only] Informational warning +// message. +type InstanceListReferrersWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceListReferrersWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceListReferrersWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceListReferrersWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceListReferrersWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceListReferrersWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceListReferrersWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceMoveRequest struct { // DestinationZone: The URL of the destination zone to move the // instance. This can be a full or partial URL. For example, the @@ -8500,6 +11577,9 @@ type InstanceTemplateList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceTemplateListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -8527,6 +11607,102 @@ func (s *InstanceTemplateList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceTemplateListWarning: [Output Only] Informational warning +// message. +type InstanceTemplateListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceTemplateListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplateListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceTemplateListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceTemplateListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplateListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceTemplateListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceWithNamedPorts struct { // Instance: [Output Only] The URL of the instance. Instance string `json:"instance,omitempty"` @@ -8882,129 +12058,122 @@ func (s *InstancesStartWithEncryptionKeyRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// License: A license resource. -type License struct { - // ChargesUseFee: [Output Only] Deprecated. This field no longer - // reflects whether a license charges a usage fee. - ChargesUseFee bool `json:"chargesUseFee,omitempty"` +// Interconnect: Protocol definitions for Mixer API to support +// Interconnect. Next available tag: 23 +type Interconnect struct { + // AdminEnabled: Administrative status of the interconnect. When this is + // set to ?true?, the Interconnect is functional and may carry traffic + // (assuming there are functional InterconnectAttachments and other + // requirements are satisfied). When set to ?false?, no packets will be + // carried over this Interconnect and no BGP routes will be exchanged + // over it. By default, it is set to ?true?. + AdminEnabled bool `json:"adminEnabled,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#license for - // licenses. - Kind string `json:"kind,omitempty"` + // CircuitInfos: [Output Only] List of CircuitInfo objects, that + // describe the individual circuits in this LAG. + CircuitInfos []*InterconnectCircuitInfo `json:"circuitInfos,omitempty"` - // Name: [Output Only] Name of the resource. The name is 1-63 characters - // long and complies with RFC1035. - Name string `json:"name,omitempty"` + // ConnectionAuthorization: [Output Only] URL to retrieve the Letter Of + // Authority and Customer Facility Assignment (LOA-CFA) documentation + // relating to this Interconnect. This documentation authorizes the + // facility provider to connect to the specified crossconnect ports. + ConnectionAuthorization string `json:"connectionAuthorization,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // CustomerName: Customer name, to put in the Letter of Authorization as + // the party authorized to request a crossconnect. + CustomerName string `json:"customerName,omitempty"` - // ForceSendFields is a list of field names (e.g. "ChargesUseFee") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // NullFields is a list of field names (e.g. "ChargesUseFee") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // ExpectedOutages: [Output Only] List of outages expected for this + // Interconnect. + ExpectedOutages []*InterconnectOutageNotification `json:"expectedOutages,omitempty"` -func (s *License) MarshalJSON() ([]byte, error) { - type noMethod License - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // GoogleIpAddress: [Output Only] IP address configured on the Google + // side of the Interconnect link. This can be used only for ping tests. + GoogleIpAddress string `json:"googleIpAddress,omitempty"` -// LogConfig: Specifies what kind of log the caller must write -type LogConfig struct { - // CloudAudit: Cloud audit options. - CloudAudit *LogConfigCloudAuditOptions `json:"cloudAudit,omitempty"` + // GoogleReferenceId: [Output Only] Google reference ID; to be used when + // raising support tickets with Google or otherwise to debug backend + // connectivity issues. + GoogleReferenceId string `json:"googleReferenceId,omitempty"` - // Counter: Counter options. - Counter *LogConfigCounterOptions `json:"counter,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` - // ForceSendFields is a list of field names (e.g. "CloudAudit") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // InterconnectAttachments: [Output Only] A list of the URLs of all + // InterconnectAttachments configured to use this Interconnect. + InterconnectAttachments []string `json:"interconnectAttachments,omitempty"` - // NullFields is a list of field names (e.g. "CloudAudit") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // Possible values: + // "IT_PRIVATE" + InterconnectType string `json:"interconnectType,omitempty"` -func (s *LogConfig) MarshalJSON() ([]byte, error) { - type noMethod LogConfig - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Kind: [Output Only] Type of the resource. Always compute#interconnect + // for interconnects. + Kind string `json:"kind,omitempty"` -// LogConfigCloudAuditOptions: Write a Cloud Audit log -type LogConfigCloudAuditOptions struct { - // AuthorizationLoggingOptions: Information used by the Cloud Audit - // Logging pipeline. - AuthorizationLoggingOptions *AuthorizationLoggingOptions `json:"authorizationLoggingOptions,omitempty"` + // Possible values: + // "LINK_TYPE_ETHERNET_10G_LR" + LinkType string `json:"linkType,omitempty"` - // LogName: The log_name to populate in the Cloud Audit Record. + // Location: URL of the InterconnectLocation object that represents + // where this connection is to be provisioned. + Location string `json:"location,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // NocContactEmail: Email address to contact the customer NOC for + // operations and maintenance notifications regarding this Interconnect. + // If specified, this will be used for notifications in addition to all + // other forms described, such as Stackdriver logs alerting and Cloud + // Notifications. + NocContactEmail string `json:"nocContactEmail,omitempty"` + + // OperationalStatus: [Output Only] The current status of whether or not + // this Interconnect is functional. // // Possible values: - // "ADMIN_ACTIVITY" - // "DATA_ACCESS" - // "UNSPECIFIED_LOG_NAME" - LogName string `json:"logName,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "AuthorizationLoggingOptions") to unconditionally include in API - // requests. By default, fields with empty values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` + // "OS_ACTIVE" + // "OS_UNPROVISIONED" + OperationalStatus string `json:"operationalStatus,omitempty"` + + // PeerIpAddress: [Output Only] IP address configured on the customer + // side of the Interconnect link. The customer should configure this IP + // address during turnup when prompted by Google NOC. This can be used + // only for ping tests. + PeerIpAddress string `json:"peerIpAddress,omitempty"` - // NullFields is a list of field names (e.g. - // "AuthorizationLoggingOptions") to include in API requests with the - // JSON null value. By default, fields with empty values are omitted - // from API requests. However, any field with an empty value appearing - // in NullFields will be sent to the server as null. It is an error if a - // field in this list has a non-empty value. This may be used to include - // null fields in Patch requests. - NullFields []string `json:"-"` -} + // ProvisionedLinkCount: [Output Only] Number of links actually + // provisioned in this interconnect. + ProvisionedLinkCount int64 `json:"provisionedLinkCount,omitempty"` -func (s *LogConfigCloudAuditOptions) MarshalJSON() ([]byte, error) { - type noMethod LogConfigCloudAuditOptions - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // RequestedLinkCount: Target number of physical links in the link + // bundle, as requested by the customer. + RequestedLinkCount int64 `json:"requestedLinkCount,omitempty"` -// LogConfigCounterOptions: Options for counters -type LogConfigCounterOptions struct { - // Field: The field value to attribute. - Field string `json:"field,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` - // Metric: The metric to update. - Metric string `json:"metric,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Field") to + // ForceSendFields is a list of field names (e.g. "AdminEnabled") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -9012,86 +12181,108 @@ type LogConfigCounterOptions struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Field") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "AdminEnabled") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *LogConfigCounterOptions) MarshalJSON() ([]byte, error) { - type noMethod LogConfigCounterOptions +func (s *Interconnect) MarshalJSON() ([]byte, error) { + type noMethod Interconnect raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// MachineType: A Machine Type resource. -type MachineType struct { +// InterconnectAttachment: Protocol definitions for Mixer API to support +// InterconnectAttachment. Next available tag: 18 +type InterconnectAttachment struct { + // CloudRouterIpAddress: [Output Only] IPv4 address + prefix length to + // be configured on Cloud Router Interface for this interconnect + // attachment. + CloudRouterIpAddress string `json:"cloudRouterIpAddress,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Deprecated: [Output Only] The deprecation status associated with this - // machine type. - Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + // CustomerRouterIpAddress: [Output Only] IPv4 address + prefix length + // to be configured on the customer router subinterface for this + // interconnect attachment. + CustomerRouterIpAddress string `json:"customerRouterIpAddress,omitempty"` - // Description: [Output Only] An optional textual description of the - // resource. + // Description: An optional description of this resource. Provide this + // property when you create the resource. Description string `json:"description,omitempty"` - // GuestCpus: [Output Only] The number of virtual CPUs that are - // available to the instance. - GuestCpus int64 `json:"guestCpus,omitempty"` + // GoogleReferenceId: [Output Only] Google reference ID, to be used when + // raising support tickets with Google or otherwise to debug backend + // connectivity issues. + GoogleReferenceId string `json:"googleReferenceId,omitempty"` // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // IsSharedCpu: [Output Only] Whether this machine type has a shared - // CPU. See Shared-core machine types for more information. - IsSharedCpu bool `json:"isSharedCpu,omitempty"` + // Interconnect: URL of the underlying Interconnect object that this + // attachment's traffic will traverse through. + Interconnect string `json:"interconnect,omitempty"` - // Kind: [Output Only] The type of the resource. Always - // compute#machineType for machine types. + // Kind: [Output Only] Type of the resource. Always + // compute#interconnectAttachment for interconnect attachments. Kind string `json:"kind,omitempty"` - // MaximumPersistentDisks: [Output Only] Maximum persistent disks - // allowed. - MaximumPersistentDisks int64 `json:"maximumPersistentDisks,omitempty"` + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` - // MaximumPersistentDisksSizeGb: [Output Only] Maximum total persistent - // disks size (GB) allowed. - MaximumPersistentDisksSizeGb int64 `json:"maximumPersistentDisksSizeGb,omitempty,string"` + // OperationalStatus: [Output Only] The current status of whether or not + // this interconnect attachment is functional. + // + // Possible values: + // "OS_ACTIVE" + // "OS_UNPROVISIONED" + OperationalStatus string `json:"operationalStatus,omitempty"` - // MemoryMb: [Output Only] The amount of physical memory available to - // the instance, defined in MB. - MemoryMb int64 `json:"memoryMb,omitempty"` + // PrivateInterconnectInfo: [Output Only] Information specific to a + // Private InterconnectAttachment. Only populated if the interconnect + // that this is attached is of type IT_PRIVATE. + PrivateInterconnectInfo *InterconnectAttachmentPrivateInfo `json:"privateInterconnectInfo,omitempty"` - // Name: [Output Only] Name of the resource. - Name string `json:"name,omitempty"` + // Region: [Output Only] URL of the region where the regional + // interconnect attachment resides. + Region string `json:"region,omitempty"` + + // Router: URL of the cloud router to be used for dynamic routing. This + // router must be in the same region as this InterconnectAttachment. The + // InterconnectAttachment will automatically connect the Interconnect to + // the network & region within which the Cloud Router is configured. + Router string `json:"router,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Zone: [Output Only] The name of the zone where the machine type - // resides, such as us-central1-a. - Zone string `json:"zone,omitempty"` - // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "CloudRouterIpAddress") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to + // NullFields is a list of field names (e.g. "CloudRouterIpAddress") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -9101,23 +12292,23 @@ type MachineType struct { NullFields []string `json:"-"` } -func (s *MachineType) MarshalJSON() ([]byte, error) { - type noMethod MachineType +func (s *InterconnectAttachment) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachment raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type MachineTypeAggregatedList struct { +type InterconnectAttachmentAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of MachineTypesScopedList resources. - Items map[string]MachineTypesScopedList `json:"items,omitempty"` + // Items: A list of InterconnectAttachmentsScopedList resources. + Items map[string]InterconnectAttachmentsScopedList `json:"items,omitempty"` // Kind: [Output Only] Type of resource. Always - // compute#machineTypeAggregatedList for aggregated lists of machine - // types. + // compute#interconnectAttachmentAggregatedList for aggregated lists of + // interconnect attachments. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -9131,6 +12322,9 @@ type MachineTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InterconnectAttachmentAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -9152,41 +12346,86 @@ type MachineTypeAggregatedList struct { NullFields []string `json:"-"` } -func (s *MachineTypeAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod MachineTypeAggregatedList +func (s *InterconnectAttachmentAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentAggregatedList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// MachineTypeList: Contains a list of machine types. -type MachineTypeList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` +// InterconnectAttachmentAggregatedListWarning: [Output Only] +// Informational warning message. +type InterconnectAttachmentAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // Items: A list of MachineType resources. - Items []*MachineType `json:"items,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InterconnectAttachmentAggregatedListWarningData `json:"data,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#machineTypeList - // for lists of machine types. - Kind string `json:"kind,omitempty"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +func (s *InterconnectAttachmentAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // ForceSendFields is a list of field names (e.g. "Id") to +type InterconnectAttachmentAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -9194,7 +12433,7 @@ type MachineTypeList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -9203,22 +12442,46 @@ type MachineTypeList struct { NullFields []string `json:"-"` } -func (s *MachineTypeList) MarshalJSON() ([]byte, error) { - type noMethod MachineTypeList +func (s *InterconnectAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentAggregatedListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type MachineTypesScopedList struct { - // MachineTypes: [Output Only] List of machine types contained in this - // scope. - MachineTypes []*MachineType `json:"machineTypes,omitempty"` +// InterconnectAttachmentList: Response to the list request, and +// contains a list of interconnect attachments. +type InterconnectAttachmentList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Warning: [Output Only] An informational warning that appears when the - // machine types list is empty. - Warning *MachineTypesScopedListWarning `json:"warning,omitempty"` + // Items: A list of InterconnectAttachment resources. + Items []*InterconnectAttachment `json:"items,omitempty"` - // ForceSendFields is a list of field names (e.g. "MachineTypes") to + // Kind: [Output Only] Type of resource. Always + // compute#interconnectAttachmentList for lists of interconnect + // attachments. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *InterconnectAttachmentListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -9226,24 +12489,24 @@ type MachineTypesScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "MachineTypes") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *MachineTypesScopedList) MarshalJSON() ([]byte, error) { - type noMethod MachineTypesScopedList +func (s *InterconnectAttachmentList) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// MachineTypesScopedListWarning: [Output Only] An informational warning -// that appears when the machine types list is empty. -type MachineTypesScopedListWarning struct { +// InterconnectAttachmentListWarning: [Output Only] Informational +// warning message. +type InterconnectAttachmentListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -9271,7 +12534,7 @@ type MachineTypesScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*MachineTypesScopedListWarningData `json:"data,omitempty"` + Data []*InterconnectAttachmentListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -9294,13 +12557,13 @@ type MachineTypesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *MachineTypesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod MachineTypesScopedListWarning +func (s *InterconnectAttachmentListWarning) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type MachineTypesScopedListWarningData struct { +type InterconnectAttachmentListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -9331,80 +12594,22 @@ type MachineTypesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *MachineTypesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod MachineTypesScopedListWarningData +func (s *InterconnectAttachmentListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ManagedInstance struct { - // CurrentAction: [Output Only] The current action that the managed - // instance group has scheduled for the instance. Possible values: - // - NONE The instance is running, and the managed instance group does - // not have any scheduled actions for this instance. - // - CREATING The managed instance group is creating this instance. If - // the group fails to create this instance, it will try again until it - // is successful. - // - CREATING_WITHOUT_RETRIES The managed instance group is attempting - // to create this instance only once. If the group fails to create this - // instance, it does not try again and the group's targetSize value is - // decreased instead. - // - RECREATING The managed instance group is recreating this instance. - // - // - DELETING The managed instance group is permanently deleting this - // instance. - // - ABANDONING The managed instance group is abandoning this instance. - // The instance will be removed from the instance group and from any - // target pools that are associated with this group. - // - RESTARTING The managed instance group is restarting the instance. - // - // - REFRESHING The managed instance group is applying configuration - // changes to the instance without stopping it. For example, the group - // can update the target pool list for an instance without stopping that - // instance. - // - // Possible values: - // "ABANDONING" - // "CREATING" - // "CREATING_WITHOUT_RETRIES" - // "DELETING" - // "NONE" - // "RECREATING" - // "REFRESHING" - // "RESTARTING" - // "VERIFYING" - CurrentAction string `json:"currentAction,omitempty"` - - // Id: [Output only] The unique identifier for this resource. This field - // is empty when instance does not exist. - Id uint64 `json:"id,omitempty,string"` - - // Instance: [Output Only] The URL of the instance. The URL can exist - // even if the instance has not yet been created. - Instance string `json:"instance,omitempty"` - - // InstanceStatus: [Output Only] The status of the instance. This field - // is empty when the instance does not exist. - // - // Possible values: - // "PROVISIONING" - // "RUNNING" - // "STAGING" - // "STOPPED" - // "STOPPING" - // "SUSPENDED" - // "SUSPENDING" - // "TERMINATED" - InstanceStatus string `json:"instanceStatus,omitempty"` - - // LastAttempt: [Output Only] Information about the last attempt to - // create or delete the instance. - LastAttempt *ManagedInstanceLastAttempt `json:"lastAttempt,omitempty"` - - // Version: [Output Only] Intended version of this instance. - Version *ManagedInstanceVersion `json:"version,omitempty"` +// InterconnectAttachmentPrivateInfo: Private information for an +// interconnect attachment when this belongs to an interconnect of type +// IT_PRIVATE. +type InterconnectAttachmentPrivateInfo struct { + // Tag8021q: [Output Only] 802.1q encapsulation tag to be used for + // traffic between Google and the customer, going to and from this + // network and region. + Tag8021q int64 `json:"tag8021q,omitempty"` - // ForceSendFields is a list of field names (e.g. "CurrentAction") to + // ForceSendFields is a list of field names (e.g. "Tag8021q") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -9412,57 +12617,92 @@ type ManagedInstance struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CurrentAction") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Tag8021q") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *ManagedInstance) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstance +func (s *InterconnectAttachmentPrivateInfo) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentPrivateInfo raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ManagedInstanceLastAttempt struct { - // Errors: [Output Only] Encountered errors during the last attempt to - // create or delete the instance. - Errors *ManagedInstanceLastAttemptErrors `json:"errors,omitempty"` +type InterconnectAttachmentsScopedList struct { + // InterconnectAttachments: List of interconnect attachments contained + // in this scope. + InterconnectAttachments []*InterconnectAttachment `json:"interconnectAttachments,omitempty"` - // ForceSendFields is a list of field names (e.g. "Errors") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *InterconnectAttachmentsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "InterconnectAttachments") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Errors") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "InterconnectAttachments") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *ManagedInstanceLastAttempt) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceLastAttempt +func (s *InterconnectAttachmentsScopedList) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentsScopedList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ManagedInstanceLastAttemptErrors: [Output Only] Encountered errors -// during the last attempt to create or delete the instance. -type ManagedInstanceLastAttemptErrors struct { - // Errors: [Output Only] The array of errors encountered while - // processing this operation. - Errors []*ManagedInstanceLastAttemptErrorsErrors `json:"errors,omitempty"` +// InterconnectAttachmentsScopedListWarning: Informational warning which +// replaces the list of addresses when the list is empty. +type InterconnectAttachmentsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // ForceSendFields is a list of field names (e.g. "Errors") to + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InterconnectAttachmentsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -9470,7 +12710,7 @@ type ManagedInstanceLastAttemptErrors struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Errors") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -9479,24 +12719,27 @@ type ManagedInstanceLastAttemptErrors struct { NullFields []string `json:"-"` } -func (s *ManagedInstanceLastAttemptErrors) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceLastAttemptErrors +func (s *InterconnectAttachmentsScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentsScopedListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ManagedInstanceLastAttemptErrorsErrors struct { - // Code: [Output Only] The error type identifier for this error. - Code string `json:"code,omitempty"` - - // Location: [Output Only] Indicates the field in the request that - // caused the error. This property is optional. - Location string `json:"location,omitempty"` +type InterconnectAttachmentsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // Message: [Output Only] An optional, human-readable error message. - Message string `json:"message,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -9504,7 +12747,7 @@ type ManagedInstanceLastAttemptErrorsErrors struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -9513,22 +12756,30 @@ type ManagedInstanceLastAttemptErrorsErrors struct { NullFields []string `json:"-"` } -func (s *ManagedInstanceLastAttemptErrorsErrors) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceLastAttemptErrorsErrors +func (s *InterconnectAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentsScopedListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ManagedInstanceVersion struct { - // InstanceTemplate: [Output Only] The intended template of the - // instance. This field is empty when current_action is one of { - // DELETING, ABANDONING }. - InstanceTemplate string `json:"instanceTemplate,omitempty"` +// InterconnectCircuitInfo: Describes a single physical circuit between +// the Customer and Google. CircuitInfo objects are created by Google, +// so all fields are output only. Next id: 4 +type InterconnectCircuitInfo struct { + // CustomerDemarcId: Customer-side demarc ID for this circuit. This will + // only be set if it was provided by the Customer to Google during + // circuit turn-up. + CustomerDemarcId string `json:"customerDemarcId,omitempty"` - // Name: [Output Only] Name of the version. - Name string `json:"name,omitempty"` + // GoogleCircuitId: Google-assigned unique ID for this circuit. Assigned + // at circuit turn-up. + GoogleCircuitId string `json:"googleCircuitId,omitempty"` - // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to + // GoogleDemarcId: Google-side demarc ID for this circuit. Assigned at + // circuit turn-up and provided by Google to the customer in the LOA. + GoogleDemarcId string `json:"googleDemarcId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CustomerDemarcId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -9536,7 +12787,7 @@ type ManagedInstanceVersion struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "InstanceTemplate") to + // NullFields is a list of field names (e.g. "CustomerDemarcId") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -9546,31 +12797,45 @@ type ManagedInstanceVersion struct { NullFields []string `json:"-"` } -func (s *ManagedInstanceVersion) MarshalJSON() ([]byte, error) { - type noMethod ManagedInstanceVersion +func (s *InterconnectCircuitInfo) MarshalJSON() ([]byte, error) { + type noMethod InterconnectCircuitInfo raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Metadata: A metadata key/value entry. -type Metadata struct { - // Fingerprint: Specifies a fingerprint for this request, which is - // essentially a hash of the metadata's contents and used for optimistic - // locking. The fingerprint is initially generated by Compute Engine and - // changes after every request to modify or update metadata. You must - // always provide an up-to-date fingerprint hash in order to update or - // change metadata. - Fingerprint string `json:"fingerprint,omitempty"` +// InterconnectList: Response to the list request, and contains a list +// of interconnects. +type InterconnectList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Items: Array of key/value pairs. The total size of all keys and - // values must be less than 512 KB. - Items []*MetadataItems `json:"items,omitempty"` + // Items: A list of Interconnect resources. + Items []*Interconnect `json:"items,omitempty"` - // Kind: [Output Only] Type of the resource. Always compute#metadata for - // metadata. + // Kind: [Output Only] Type of resource. Always compute#interconnectList + // for lists of interconnects. Kind string `json:"kind,omitempty"` - // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *InterconnectListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -9578,36 +12843,57 @@ type Metadata struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Fingerprint") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Metadata) MarshalJSON() ([]byte, error) { - type noMethod Metadata +func (s *InterconnectList) MarshalJSON() ([]byte, error) { + type noMethod InterconnectList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type MetadataItems struct { - // Key: Key for the metadata entry. Keys must conform to the following - // regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is - // reflected as part of a URL in the metadata server. Additionally, to - // avoid ambiguity, keys must not conflict with any other metadata keys - // for the project. - Key string `json:"key,omitempty"` +// InterconnectListWarning: [Output Only] Informational warning message. +type InterconnectListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // Value: Value for the metadata entry. These are free-form strings, and - // only have meaning as interpreted by the image running in the - // instance. The only restriction placed on values is that their size - // must be less than or equal to 262144 bytes (256 KiB). - Value *string `json:"value,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InterconnectListWarningData `json:"data,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -9615,7 +12901,7 @@ type MetadataItems struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -9624,22 +12910,27 @@ type MetadataItems struct { NullFields []string `json:"-"` } -func (s *MetadataItems) MarshalJSON() ([]byte, error) { - type noMethod MetadataItems +func (s *InterconnectListWarning) MarshalJSON() ([]byte, error) { + type noMethod InterconnectListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NamedPort: The named port. For example: . -type NamedPort struct { - // Name: The name for this named port. The name must be 1-63 characters - // long, and comply with RFC1035. - Name string `json:"name,omitempty"` +type InterconnectListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // Port: The port number, which can be a value between 1 and 65535. - Port int64 `json:"port,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Name") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -9647,7 +12938,7 @@ type NamedPort struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Name") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -9656,74 +12947,83 @@ type NamedPort struct { NullFields []string `json:"-"` } -func (s *NamedPort) MarshalJSON() ([]byte, error) { - type noMethod NamedPort +func (s *InterconnectListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InterconnectListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Network: Represents a Network resource. Read Networks and Firewalls -// for more information. -type Network struct { - // IPv4Range: The range of internal addresses that are legal on this - // network. This range is a CIDR specification, for example: - // 192.168.0.0/16. Provided by the client when the network is created. - IPv4Range string `json:"IPv4Range,omitempty"` +// InterconnectLocation: Protocol definitions for Mixer API to support +// InterconnectLocation. +type InterconnectLocation struct { + // Address: [Output Only] The postal address of the Point of Presence, + // each line in the address is separated by a newline character. + Address string `json:"address,omitempty"` - // AutoCreateSubnetworks: When set to true, the network is created in - // "auto subnet mode". When set to false, the network is in "custom - // subnet mode". + // AvailabilityZone: Availability zone for this location. Within a city, + // maintenance will not be simultaneously scheduled in more than one + // availability zone. Example: "zone1" or "zone2". + AvailabilityZone string `json:"availabilityZone,omitempty"` + + // City: City designator used by the Interconnect UI to locate this + // InterconnectLocation within the Continent. For example: "Chicago, + // IL", "Amsterdam, Netherlands". + City string `json:"city,omitempty"` + + // Continent: Continent for this location. Used by the location picker + // in the Interconnect UI. // - // In "auto subnet mode", a newly created network is assigned the - // default CIDR of 10.128.0.0/9 and it automatically creates one - // subnetwork per region. - AutoCreateSubnetworks bool `json:"autoCreateSubnetworks,omitempty"` + // Possible values: + // "C_AFRICA" + // "C_ASIA_PAC" + // "C_EUROPE" + // "C_NORTH_AMERICA" + // "C_SOUTH_AMERICA" + Continent string `json:"continent,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Description: An optional description of this resource. Provide this - // property when you create the resource. + // Description: [Output Only] An optional description of the resource. Description string `json:"description,omitempty"` - // GatewayIPv4: A gateway address for default routing to other networks. - // This value is read only and is selected by the Google Compute Engine, - // typically as the first usable address in the IPv4Range. - GatewayIPv4 string `json:"gatewayIPv4,omitempty"` + // FacilityProvider: [Output Only] The name of the provider for this + // facility (e.g., EQUINIX). + FacilityProvider string `json:"facilityProvider,omitempty"` + + // FacilityProviderFacilityId: [Output Only] A provider-assigned + // Identifier for this facility (e.g., Ashburn-DC1). + FacilityProviderFacilityId string `json:"facilityProviderFacilityId,omitempty"` // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of the resource. Always compute#network for - // networks. + // Kind: [Output Only] Type of the resource. Always + // compute#interconnectLocation for interconnect locations. Kind string `json:"kind,omitempty"` - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. + // Name: [Output Only] Name of the resource. Name string `json:"name,omitempty"` - // Peerings: [Output Only] List of network peerings for the resource. - Peerings []*NetworkPeering `json:"peerings,omitempty"` + // PeeringdbFacilityId: [Output Only] The peeringdb identifier for this + // facility (corresponding with a netfac type in peeringdb). + PeeringdbFacilityId string `json:"peeringdbFacilityId,omitempty"` + + // RegionInfos: [Output Only] A list of InterconnectLocation.RegionInfo + // objects, that describe parameters pertaining to the relation between + // this InterconnectLocation and various Google Cloud regions. + RegionInfos []*InterconnectLocationRegionInfo `json:"regionInfos,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Subnetworks: [Output Only] Server-defined fully-qualified URLs for - // all subnetworks in this network. - Subnetworks []string `json:"subnetworks,omitempty"` - // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "IPv4Range") to + // ForceSendFields is a list of field names (e.g. "Address") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -9731,7 +13031,7 @@ type Network struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IPv4Range") to include in + // NullFields is a list of field names (e.g. "Address") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -9740,103 +13040,24 @@ type Network struct { NullFields []string `json:"-"` } -func (s *Network) MarshalJSON() ([]byte, error) { - type noMethod Network +func (s *InterconnectLocation) MarshalJSON() ([]byte, error) { + type noMethod InterconnectLocation raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkInterface: A network interface resource attached to an -// instance. -type NetworkInterface struct { - // AccessConfigs: An array of configurations for this interface. - // Currently, only one access config, ONE_TO_ONE_NAT, is supported. If - // there are no accessConfigs specified, then this instance will have no - // external internet access. - AccessConfigs []*AccessConfig `json:"accessConfigs,omitempty"` +// InterconnectLocationList: Response to the list request, and contains +// a list of interconnect locations. +type InterconnectLocationList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // AliasIpRanges: An array of alias IP ranges for this network - // interface. Can only be specified for network interfaces on - // subnet-mode networks. - AliasIpRanges []*AliasIpRange `json:"aliasIpRanges,omitempty"` - - // Kind: [Output Only] Type of the resource. Always - // compute#networkInterface for network interfaces. - Kind string `json:"kind,omitempty"` - - // Name: [Output Only] The name of the network interface, generated by - // the server. For network devices, these are eth0, eth1, etc. - Name string `json:"name,omitempty"` - - // Network: URL of the network resource for this instance. When creating - // an instance, if neither the network nor the subnetwork is specified, - // the default network global/networks/default is used; if the network - // is not specified but the subnetwork is specified, the network is - // inferred. - // - // This field is optional when creating a firewall rule. If not - // specified when creating a firewall rule, the default network - // global/networks/default is used. - // - // If you specify this property, you can specify the network as a full - // or partial URL. For example, the following are all valid URLs: - // - - // https://www.googleapis.com/compute/v1/projects/project/global/networks/network - // - projects/project/global/networks/network - // - global/networks/default - Network string `json:"network,omitempty"` - - // NetworkIP: An IPv4 internal network address to assign to the instance - // for this network interface. If not specified by the user, an unused - // internal IP is assigned by the system. - NetworkIP string `json:"networkIP,omitempty"` - - // Subnetwork: The URL of the Subnetwork resource for this instance. If - // the network resource is in legacy mode, do not provide this property. - // If the network is in auto subnet mode, providing the subnetwork is - // optional. If the network is in custom subnet mode, then this field - // should be specified. If you specify this property, you can specify - // the subnetwork as a full or partial URL. For example, the following - // are all valid URLs: - // - - // https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork - // - regions/region/subnetworks/subnetwork - Subnetwork string `json:"subnetwork,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AccessConfigs") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AccessConfigs") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *NetworkInterface) MarshalJSON() ([]byte, error) { - type noMethod NetworkInterface - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// NetworkList: Contains a list of networks. -type NetworkList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Network resources. - Items []*Network `json:"items,omitempty"` + // Items: A list of InterconnectLocation resources. + Items []*InterconnectLocation `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#networkList for - // lists of networks. + // Kind: [Output Only] Type of resource. Always + // compute#interconnectLocationList for lists of interconnect locations. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -9850,6 +13071,9 @@ type NetworkList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InterconnectLocationListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -9871,51 +13095,49 @@ type NetworkList struct { NullFields []string `json:"-"` } -func (s *NetworkList) MarshalJSON() ([]byte, error) { - type noMethod NetworkList +func (s *InterconnectLocationList) MarshalJSON() ([]byte, error) { + type noMethod InterconnectLocationList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// NetworkPeering: A network peering attached to a network resource. The -// message includes the peering name, peer network, peering state, and a -// flag indicating whether Google Compute Engine should automatically -// create routes for the peering. -type NetworkPeering struct { - // AutoCreateRoutes: Whether full mesh connectivity is created and - // managed automatically. When it is set to true, Google Compute Engine - // will automatically create and manage the routes between two networks - // when the state is ACTIVE. Otherwise, user needs to create routes - // manually to route packets to peer network. - AutoCreateRoutes bool `json:"autoCreateRoutes,omitempty"` - - // Name: Name of this peering. Provided by the client when the peering - // is created. The name must comply with RFC1035. Specifically, the name - // must be 1-63 characters long and match regular expression - // [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a - // lowercase letter, and all the following characters must be a dash, - // lowercase letter, or digit, except the last character, which cannot - // be a dash. - Name string `json:"name,omitempty"` - - // Network: The URL of the peer network. It can be either full URL or - // partial URL. The peer network may belong to a different project. If - // the partial URL does not contain project, it is assumed that the peer - // network is in the same project as the current network. - Network string `json:"network,omitempty"` - - // State: [Output Only] State for the peering. +// InterconnectLocationListWarning: [Output Only] Informational warning +// message. +type InterconnectLocationListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. // // Possible values: - // "ACTIVE" - // "INACTIVE" - State string `json:"state,omitempty"` + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // StateDetails: [Output Only] Details about the current state of the - // peering. - StateDetails string `json:"stateDetails,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InterconnectLocationListWarningData `json:"data,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutoCreateRoutes") to + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -9923,37 +13145,36 @@ type NetworkPeering struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoCreateRoutes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NetworkPeering) MarshalJSON() ([]byte, error) { - type noMethod NetworkPeering +func (s *InterconnectLocationListWarning) MarshalJSON() ([]byte, error) { + type noMethod InterconnectLocationListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworksAddPeeringRequest struct { - // AutoCreateRoutes: Whether Google Compute Engine manages the routes - // automatically. - AutoCreateRoutes bool `json:"autoCreateRoutes,omitempty"` - - // Name: Name of the peering, which should conform to RFC1035. - Name string `json:"name,omitempty"` +type InterconnectLocationListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // PeerNetwork: URL of the peer network. It can be either full URL or - // partial URL. The peer network may belong to a different project. If - // the partial URL does not contain project, it is assumed that the peer - // network is in the same project as the current network. - PeerNetwork string `json:"peerNetwork,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutoCreateRoutes") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -9961,27 +13182,43 @@ type NetworksAddPeeringRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AutoCreateRoutes") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NetworksAddPeeringRequest) MarshalJSON() ([]byte, error) { - type noMethod NetworksAddPeeringRequest +func (s *InterconnectLocationListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InterconnectLocationListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type NetworksRemovePeeringRequest struct { - // Name: Name of the peering, which should conform to RFC1035. - Name string `json:"name,omitempty"` +// InterconnectLocationRegionInfo: Information about any potential +// InterconnectAttachments between an Interconnect at a specific +// InterconnectLocation, and a specific Cloud Region. +type InterconnectLocationRegionInfo struct { + // ExpectedRttMs: Expected round-trip time in milliseconds, from this + // InterconnectLocation to a VM in this region. + ExpectedRttMs int64 `json:"expectedRttMs,omitempty,string"` - // ForceSendFields is a list of field names (e.g. "Name") to + // LocationPresence: Identifies the network presence of this location. + // + // Possible values: + // "LP_GLOBAL" + // "LP_LOCAL_REGION" + LocationPresence string `json:"locationPresence,omitempty"` + + // Region: URL for the region of this location. + Region string `json:"region,omitempty"` + + // RegionKey: Scope key for the region of this location. + RegionKey string `json:"regionKey,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExpectedRttMs") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -9989,135 +13226,64 @@ type NetworksRemovePeeringRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Name") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "ExpectedRttMs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *NetworksRemovePeeringRequest) MarshalJSON() ([]byte, error) { - type noMethod NetworksRemovePeeringRequest +func (s *InterconnectLocationRegionInfo) MarshalJSON() ([]byte, error) { + type noMethod InterconnectLocationRegionInfo raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Operation: An Operation resource, used to manage asynchronous API -// requests. -type Operation struct { - // ClientOperationId: [Output Only] Reserved for future use. - ClientOperationId string `json:"clientOperationId,omitempty"` - - // CreationTimestamp: [Deprecated] This field is deprecated. - CreationTimestamp string `json:"creationTimestamp,omitempty"` +// InterconnectOutageNotification: Description of a planned outage on +// this Interconnect. Next id: 9 +type InterconnectOutageNotification struct { + // AffectedCircuits: Iff issue_type is IT_PARTIAL_OUTAGE, a list of the + // Google-side circuit IDs that will be affected. + AffectedCircuits []string `json:"affectedCircuits,omitempty"` - // Description: [Output Only] A textual description of the operation, - // which is set when the operation is created. + // Description: Short user-visible description of the purpose of the + // outage. Description string `json:"description,omitempty"` - // EndTime: [Output Only] The time that this operation was completed. - // This value is in RFC3339 text format. - EndTime string `json:"endTime,omitempty"` - - // Error: [Output Only] If errors are generated during processing of the - // operation, this field will be populated. - Error *OperationError `json:"error,omitempty"` - - // HttpErrorMessage: [Output Only] If the operation fails, this field - // contains the HTTP error message that was returned, such as NOT FOUND. - HttpErrorMessage string `json:"httpErrorMessage,omitempty"` - - // HttpErrorStatusCode: [Output Only] If the operation fails, this field - // contains the HTTP error status code that was returned. For example, a - // 404 means the resource was not found. - HttpErrorStatusCode int64 `json:"httpErrorStatusCode,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // InsertTime: [Output Only] The time that this operation was requested. - // This value is in RFC3339 text format. - InsertTime string `json:"insertTime,omitempty"` + EndTime int64 `json:"endTime,omitempty,string"` - // Kind: [Output Only] Type of the resource. Always compute#operation - // for Operation resources. - Kind string `json:"kind,omitempty"` + // Possible values: + // "IT_OUTAGE" + // "IT_PARTIAL_OUTAGE" + IssueType string `json:"issueType,omitempty"` - // Name: [Output Only] Name of the resource. + // Name: Unique identifier for this outage notification. Name string `json:"name,omitempty"` - // OperationType: [Output Only] The type of operation, such as insert, - // update, or delete, and so on. - OperationType string `json:"operationType,omitempty"` - - // Progress: [Output Only] An optional progress indicator that ranges - // from 0 to 100. There is no requirement that this be linear or support - // any granularity of operations. This should not be used to guess when - // the operation will be complete. This number should monotonically - // increase as the operation progresses. - Progress int64 `json:"progress,omitempty"` - - // Region: [Output Only] The URL of the region where the operation - // resides. Only available when performing regional operations. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // StartTime: [Output Only] The time that this operation was started by - // the server. This value is in RFC3339 text format. - StartTime string `json:"startTime,omitempty"` - - // Status: [Output Only] The status of the operation, which can be one - // of the following: PENDING, RUNNING, or DONE. - // // Possible values: - // "DONE" - // "PENDING" - // "RUNNING" - Status string `json:"status,omitempty"` - - // StatusMessage: [Output Only] An optional textual description of the - // current status of the operation. - StatusMessage string `json:"statusMessage,omitempty"` - - // TargetId: [Output Only] The unique target ID, which identifies a - // specific incarnation of the target resource. - TargetId uint64 `json:"targetId,omitempty,string"` - - // TargetLink: [Output Only] The URL of the resource that the operation - // modifies. For operations related to creating a snapshot, this points - // to the persistent disk that the snapshot was created from. - TargetLink string `json:"targetLink,omitempty"` - - // User: [Output Only] User who requested the operation, for example: - // user@example.com. - User string `json:"user,omitempty"` - - // Warnings: [Output Only] If warning messages are generated during - // processing of the operation, this field will be populated. - Warnings []*OperationWarnings `json:"warnings,omitempty"` + // "NSRC_GOOGLE" + Source string `json:"source,omitempty"` - // Zone: [Output Only] The URL of the zone where the operation resides. - // Only available when performing per-zone operations. - Zone string `json:"zone,omitempty"` + // StartTime: Scheduled start and end times for the outage (milliseconds + // since Unix epoch). + StartTime int64 `json:"startTime,omitempty,string"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Possible values: + // "NS_ACTIVE" + // "NS_CANCELED" + State string `json:"state,omitempty"` - // ForceSendFields is a list of field names (e.g. "ClientOperationId") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "AffectedCircuits") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClientOperationId") to + // NullFields is a list of field names (e.g. "AffectedCircuits") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -10127,20 +13293,34 @@ type Operation struct { NullFields []string `json:"-"` } -func (s *Operation) MarshalJSON() ([]byte, error) { - type noMethod Operation +func (s *InterconnectOutageNotification) MarshalJSON() ([]byte, error) { + type noMethod InterconnectOutageNotification raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// OperationError: [Output Only] If errors are generated during -// processing of the operation, this field will be populated. -type OperationError struct { - // Errors: [Output Only] The array of errors encountered while - // processing this operation. - Errors []*OperationErrorErrors `json:"errors,omitempty"` +// License: A license resource. +type License struct { + // ChargesUseFee: [Output Only] Deprecated. This field no longer + // reflects whether a license charges a usage fee. + ChargesUseFee bool `json:"chargesUseFee,omitempty"` - // ForceSendFields is a list of field names (e.g. "Errors") to + // Kind: [Output Only] Type of resource. Always compute#license for + // licenses. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. The name is 1-63 characters + // long and complies with RFC1035. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ChargesUseFee") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -10148,33 +13328,33 @@ type OperationError struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Errors") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "ChargesUseFee") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *OperationError) MarshalJSON() ([]byte, error) { - type noMethod OperationError +func (s *License) MarshalJSON() ([]byte, error) { + type noMethod License raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationErrorErrors struct { - // Code: [Output Only] The error type identifier for this error. - Code string `json:"code,omitempty"` +// LogConfig: Specifies what kind of log the caller must write +type LogConfig struct { + // CloudAudit: Cloud audit options. + CloudAudit *LogConfigCloudAuditOptions `json:"cloudAudit,omitempty"` - // Location: [Output Only] Indicates the field in the request that - // caused the error. This property is optional. - Location string `json:"location,omitempty"` + // Counter: Counter options. + Counter *LogConfigCounterOptions `json:"counter,omitempty"` - // Message: [Output Only] An optional, human-readable error message. - Message string `json:"message,omitempty"` + // DataAccess: Data access options. + DataAccess *LogConfigDataAccessOptions `json:"dataAccess,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "CloudAudit") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -10182,8 +13362,8 @@ type OperationErrorErrors struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "CloudAudit") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -10191,47 +13371,82 @@ type OperationErrorErrors struct { NullFields []string `json:"-"` } -func (s *OperationErrorErrors) MarshalJSON() ([]byte, error) { - type noMethod OperationErrorErrors +func (s *LogConfig) MarshalJSON() ([]byte, error) { + type noMethod LogConfig raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationWarnings struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. +// LogConfigCloudAuditOptions: Write a Cloud Audit log +type LogConfigCloudAuditOptions struct { + // AuthorizationLoggingOptions: Information used by the Cloud Audit + // Logging pipeline. + AuthorizationLoggingOptions *AuthorizationLoggingOptions `json:"authorizationLoggingOptions,omitempty"` + + // LogName: The log_name to populate in the Cloud Audit Record. // // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` + // "ADMIN_ACTIVITY" + // "DATA_ACCESS" + // "UNSPECIFIED_LOG_NAME" + LogName string `json:"logName,omitempty"` - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*OperationWarningsData `json:"data,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "AuthorizationLoggingOptions") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // NullFields is a list of field names (e.g. + // "AuthorizationLoggingOptions") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} - // ForceSendFields is a list of field names (e.g. "Code") to +func (s *LogConfigCloudAuditOptions) MarshalJSON() ([]byte, error) { + type noMethod LogConfigCloudAuditOptions + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LogConfigCounterOptions: Increment a streamz counter with the +// specified metric and field names. +// +// Metric names should start with a '/', generally be lowercase-only, +// and end in "_count". Field names should not contain an initial slash. +// The actual exported metric names will have "/iam/policy" +// prepended. +// +// Field names correspond to IAM request parameters and field values are +// their respective values. +// +// At present the only supported field names are - "iam_principal", +// corresponding to IAMContext.principal; - "" (empty string), resulting +// in one aggretated counter with no field. +// +// Examples: counter { metric: "/debug_access_count" field: +// "iam_principal" } ==> increment counter +// /iam/policy/backend_debug_access_count {iam_principal=[value of +// IAMContext.principal]} +// +// At this time we do not support: * multiple field names (though this +// may be supported in the future) * decrementing the counter * +// incrementing it by anything other than 1 +type LogConfigCounterOptions struct { + // Field: The field value to attribute. + Field string `json:"field,omitempty"` + + // Metric: The metric to update. + Metric string `json:"metric,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Field") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -10239,7 +13454,7 @@ type OperationWarnings struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API + // NullFields is a list of field names (e.g. "Field") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -10248,27 +13463,24 @@ type OperationWarnings struct { NullFields []string `json:"-"` } -func (s *OperationWarnings) MarshalJSON() ([]byte, error) { - type noMethod OperationWarnings +func (s *LogConfigCounterOptions) MarshalJSON() ([]byte, error) { + type noMethod LogConfigCounterOptions raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationWarningsData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` +// LogConfigDataAccessOptions: Write a Data Access (Gin) log +type LogConfigDataAccessOptions struct { + // LogMode: Whether Gin logging should happen in a fail-closed manner at + // the caller. This is relevant only in the LocalIAM implementation, for + // now. + // + // Possible values: + // "LOG_FAIL_CLOSED" + // "LOG_MODE_UNSPECIFIED" + LogMode string `json:"logMode,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // ForceSendFields is a list of field names (e.g. "LogMode") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -10276,8 +13488,8 @@ type OperationWarningsData struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "LogMode") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -10285,73 +13497,103 @@ type OperationWarningsData struct { NullFields []string `json:"-"` } -func (s *OperationWarningsData) MarshalJSON() ([]byte, error) { - type noMethod OperationWarningsData +func (s *LogConfigDataAccessOptions) MarshalJSON() ([]byte, error) { + type noMethod LogConfigDataAccessOptions raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationAggregatedList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id string `json:"id,omitempty"` +// MachineType: A Machine Type resource. +type MachineType struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Items: [Output Only] A map of scoped operation lists. - Items map[string]OperationsScopedList `json:"items,omitempty"` + // Deprecated: [Output Only] The deprecation status associated with this + // machine type. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#operationAggregatedList for aggregated lists of operations. - Kind string `json:"kind,omitempty"` + // Description: [Output Only] An optional textual description of the + // resource. + Description string `json:"description,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // GuestCpus: [Output Only] The number of virtual CPUs that are + // available to the instance. + GuestCpus int64 `json:"guestCpus,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // IsSharedCpu: [Output Only] Whether this machine type has a shared + // CPU. See Shared-core machine types for more information. + IsSharedCpu bool `json:"isSharedCpu,omitempty"` + + // Kind: [Output Only] The type of the resource. Always + // compute#machineType for machine types. + Kind string `json:"kind,omitempty"` + + // MaximumPersistentDisks: [Output Only] Maximum persistent disks + // allowed. + MaximumPersistentDisks int64 `json:"maximumPersistentDisks,omitempty"` + + // MaximumPersistentDisksSizeGb: [Output Only] Maximum total persistent + // disks size (GB) allowed. + MaximumPersistentDisksSizeGb int64 `json:"maximumPersistentDisksSizeGb,omitempty,string"` + + // MemoryMb: [Output Only] The amount of physical memory available to + // the instance, defined in MB. + MemoryMb int64 `json:"memoryMb,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // Zone: [Output Only] The name of the zone where the machine type + // resides, such as us-central1-a. + Zone string `json:"zone,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *OperationAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod OperationAggregatedList +func (s *MachineType) MarshalJSON() ([]byte, error) { + type noMethod MachineType raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// OperationList: Contains a list of Operation resources. -type OperationList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. +type MachineTypeAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. Id string `json:"id,omitempty"` - // Items: [Output Only] A list of Operation resources. - Items []*Operation `json:"items,omitempty"` + // Items: A list of MachineTypesScopedList resources. + Items map[string]MachineTypesScopedList `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#operations for - // Operations resource. + // Kind: [Output Only] Type of resource. Always + // compute#machineTypeAggregatedList for aggregated lists of machine + // types. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -10365,6 +13607,9 @@ type OperationList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *MachineTypeAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -10386,46 +13631,15 @@ type OperationList struct { NullFields []string `json:"-"` } -func (s *OperationList) MarshalJSON() ([]byte, error) { - type noMethod OperationList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type OperationsScopedList struct { - // Operations: [Output Only] List of operations contained in this scope. - Operations []*Operation `json:"operations,omitempty"` - - // Warning: [Output Only] Informational warning which replaces the list - // of operations when the list is empty. - Warning *OperationsScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Operations") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Operations") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *OperationsScopedList) MarshalJSON() ([]byte, error) { - type noMethod OperationsScopedList +func (s *MachineTypeAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeAggregatedList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// OperationsScopedListWarning: [Output Only] Informational warning -// which replaces the list of operations when the list is empty. -type OperationsScopedListWarning struct { +// MachineTypeAggregatedListWarning: [Output Only] Informational warning +// message. +type MachineTypeAggregatedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -10453,7 +13667,7 @@ type OperationsScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*OperationsScopedListWarningData `json:"data,omitempty"` + Data []*MachineTypeAggregatedListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -10476,13 +13690,13 @@ type OperationsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *OperationsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod OperationsScopedListWarning +func (s *MachineTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeAggregatedListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type OperationsScopedListWarningData struct { +type MachineTypeAggregatedListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -10513,38 +13727,44 @@ type OperationsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *OperationsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod OperationsScopedListWarningData +func (s *MachineTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeAggregatedListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PathMatcher: A matcher for the path portion of the URL. The -// BackendService from the longest-matched rule will serve the URL. If -// no rule was matched, the default service will be used. -type PathMatcher struct { - // DefaultService: The full or partial URL to the BackendService - // resource. This will be used if none of the pathRules defined by this - // PathMatcher is matched by the URL's path portion. For example, the - // following are all valid URLs to a BackendService resource: - // - - // https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService - // - compute/v1/projects/project/global/backendServices/backendService - // - // - global/backendServices/backendService - DefaultService string `json:"defaultService,omitempty"` +// MachineTypeList: Contains a list of machine types. +type MachineTypeList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` + // Items: A list of MachineType resources. + Items []*MachineType `json:"items,omitempty"` - // Name: The name to which this PathMatcher is referred by the HostRule. - Name string `json:"name,omitempty"` + // Kind: [Output Only] Type of resource. Always compute#machineTypeList + // for lists of machine types. + Kind string `json:"kind,omitempty"` - // PathRules: The list of path rules. - PathRules []*PathRule `json:"pathRules,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` - // ForceSendFields is a list of field names (e.g. "DefaultService") to + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *MachineTypeListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -10552,36 +13772,57 @@ type PathMatcher struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DefaultService") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *PathMatcher) MarshalJSON() ([]byte, error) { - type noMethod PathMatcher +func (s *MachineTypeList) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PathRule: A path-matching rule for a URL. If matched, will use the -// specified BackendService to handle the traffic arriving at this URL. -type PathRule struct { - // Paths: The list of path patterns to match. Each must start with / and - // the only place a * is allowed is at the end following a /. The string - // fed to the path matcher does not include any text after the first ? - // or #, and those chars are not allowed here. - Paths []string `json:"paths,omitempty"` +// MachineTypeListWarning: [Output Only] Informational warning message. +type MachineTypeListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // Service: The URL of the BackendService resource if this rule is - // matched. - Service string `json:"service,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*MachineTypeListWarningData `json:"data,omitempty"` - // ForceSendFields is a list of field names (e.g. "Paths") to + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -10589,7 +13830,7 @@ type PathRule struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Paths") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -10598,76 +13839,27 @@ type PathRule struct { NullFields []string `json:"-"` } -func (s *PathRule) MarshalJSON() ([]byte, error) { - type noMethod PathRule +func (s *MachineTypeListWarning) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Policy: Defines an Identity and Access Management (IAM) policy. It is -// used to specify access control policies for Cloud Platform -// resources. -// -// -// -// A `Policy` consists of a list of `bindings`. A `Binding` binds a list -// of `members` to a `role`, where the members can be user accounts, -// Google groups, Google domains, and service accounts. A `role` is a -// named list of permissions defined by IAM. -// -// **Example** -// -// { "bindings": [ { "role": "roles/owner", "members": [ -// "user:mike@example.com", "group:admins@example.com", -// "domain:google.com", -// "serviceAccount:my-other-app@appspot.gserviceaccount.com", ] }, { -// "role": "roles/viewer", "members": ["user:sean@example.com"] } ] -// } -// -// For a description of IAM and its features, see the [IAM developer's -// guide](https://cloud.google.com/iam). -type Policy struct { - // AuditConfigs: Specifies cloud audit logging configuration for this - // policy. - AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` - - // Bindings: Associates a list of `members` to a `role`. `bindings` with - // no members will result in an error. - Bindings []*Binding `json:"bindings,omitempty"` - - // Etag: `etag` is used for optimistic concurrency control as a way to - // help prevent simultaneous updates of a policy from overwriting each - // other. It is strongly suggested that systems make use of the `etag` - // in the read-modify-write cycle to perform policy updates in order to - // avoid race conditions: An `etag` is returned in the response to - // `getIamPolicy`, and systems are expected to put that etag in the - // request to `setIamPolicy` to ensure that their change will be applied - // to the same version of the policy. - // - // If no `etag` is provided in the call to `setIamPolicy`, then the - // existing policy is overwritten blindly. - Etag string `json:"etag,omitempty"` - - IamOwned bool `json:"iamOwned,omitempty"` - - // Rules: If more than one rule is specified, the rules are applied in - // the following manner: - All matching LOG rules are always applied. - - // If any DENY/DENY_WITH_LOG rule matches, permission is denied. Logging - // will be applied if one or more matching rule requires logging. - - // Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is - // granted. Logging will be applied if one or more matching rule - // requires logging. - Otherwise, if no rule applies, permission is - // denied. - Rules []*Rule `json:"rules,omitempty"` - - // Version: Version of the `Policy`. The default version is 0. - Version int64 `json:"version,omitempty"` +type MachineTypeListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "AuditConfigs") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -10675,109 +13867,31 @@ type Policy struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AuditConfigs") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Policy) MarshalJSON() ([]byte, error) { - type noMethod Policy +func (s *MachineTypeListWarningData) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Project: A Project resource. Projects can only be created in the -// Google Cloud Platform Console. Unless marked otherwise, values can -// only be modified in the console. -type Project struct { - // CommonInstanceMetadata: Metadata key/value pairs available to all - // instances contained in this project. See Custom metadata for more - // information. - CommonInstanceMetadata *Metadata `json:"commonInstanceMetadata,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // DefaultServiceAccount: [Output Only] Default service account used by - // VMs running in this project. - DefaultServiceAccount string `json:"defaultServiceAccount,omitempty"` - - // Description: An optional textual description of the resource. - Description string `json:"description,omitempty"` - - // EnabledFeatures: Restricted features enabled for use on this project. - EnabledFeatures []string `json:"enabledFeatures,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. This is not the project ID, and - // is just a unique ID used by Compute Engine to identify resources. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#project for - // projects. - Kind string `json:"kind,omitempty"` - - // Name: The project ID. For example: my-example-project. Use the - // project ID to make requests to Compute Engine. - Name string `json:"name,omitempty"` - - // Quotas: [Output Only] Quotas assigned to this project. - Quotas []*Quota `json:"quotas,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // UsageExportLocation: The naming prefix for daily usage reports and - // the Google Cloud Storage bucket where they are stored. - UsageExportLocation *UsageExportLocation `json:"usageExportLocation,omitempty"` - - // XpnProjectStatus: [Output Only] The role this project has in a shared - // VPC configuration. Currently only HOST projects are differentiated. - // - // Possible values: - // "HOST" - // "UNSPECIFIED_XPN_PROJECT_STATUS" - XpnProjectStatus string `json:"xpnProjectStatus,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. - // "CommonInstanceMetadata") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CommonInstanceMetadata") - // to include in API requests with the JSON null value. By default, - // fields with empty values are omitted from API requests. However, any - // field with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Project) MarshalJSON() ([]byte, error) { - type noMethod Project - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} +type MachineTypesScopedList struct { + // MachineTypes: [Output Only] List of machine types contained in this + // scope. + MachineTypes []*MachineType `json:"machineTypes,omitempty"` -type ProjectsDisableXpnResourceRequest struct { - // XpnResource: Service resource (a.k.a service project) ID. - XpnResource *XpnResourceId `json:"xpnResource,omitempty"` + // Warning: [Output Only] An informational warning that appears when the + // machine types list is empty. + Warning *MachineTypesScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "XpnResource") to + // ForceSendFields is a list of field names (e.g. "MachineTypes") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -10785,7 +13899,7 @@ type ProjectsDisableXpnResourceRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "XpnResource") to include + // NullFields is a list of field names (e.g. "MachineTypes") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -10794,17 +13908,49 @@ type ProjectsDisableXpnResourceRequest struct { NullFields []string `json:"-"` } -func (s *ProjectsDisableXpnResourceRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsDisableXpnResourceRequest +func (s *MachineTypesScopedList) MarshalJSON() ([]byte, error) { + type noMethod MachineTypesScopedList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ProjectsEnableXpnResourceRequest struct { - // XpnResource: Service resource (a.k.a service project) ID. - XpnResource *XpnResourceId `json:"xpnResource,omitempty"` +// MachineTypesScopedListWarning: [Output Only] An informational warning +// that appears when the machine types list is empty. +type MachineTypesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // ForceSendFields is a list of field names (e.g. "XpnResource") to + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*MachineTypesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -10812,44 +13958,36 @@ type ProjectsEnableXpnResourceRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "XpnResource") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *ProjectsEnableXpnResourceRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsEnableXpnResourceRequest +func (s *MachineTypesScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod MachineTypesScopedListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ProjectsGetXpnResources struct { - // Kind: [Output Only] Type of resource. Always - // compute#projectsGetXpnResources for lists of service resources (a.k.a - // service projects) - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // Resources: Serive resources (a.k.a service projects) attached to this - // project as their shared VPC host. - Resources []*XpnResourceId `json:"resources,omitempty"` +type MachineTypesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Kind") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -10857,7 +13995,7 @@ type ProjectsGetXpnResources struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Kind") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -10866,19 +14004,80 @@ type ProjectsGetXpnResources struct { NullFields []string `json:"-"` } -func (s *ProjectsGetXpnResources) MarshalJSON() ([]byte, error) { - type noMethod ProjectsGetXpnResources +func (s *MachineTypesScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod MachineTypesScopedListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ProjectsListXpnHostsRequest struct { - // Organization: Optional organization ID managed by Cloud Resource - // Manager, for which to list shared VPC host projects. If not - // specified, the organization will be inferred from the project. - Organization string `json:"organization,omitempty"` +type ManagedInstance struct { + // CurrentAction: [Output Only] The current action that the managed + // instance group has scheduled for the instance. Possible values: + // - NONE The instance is running, and the managed instance group does + // not have any scheduled actions for this instance. + // - CREATING The managed instance group is creating this instance. If + // the group fails to create this instance, it will try again until it + // is successful. + // - CREATING_WITHOUT_RETRIES The managed instance group is attempting + // to create this instance only once. If the group fails to create this + // instance, it does not try again and the group's targetSize value is + // decreased instead. + // - RECREATING The managed instance group is recreating this instance. + // + // - DELETING The managed instance group is permanently deleting this + // instance. + // - ABANDONING The managed instance group is abandoning this instance. + // The instance will be removed from the instance group and from any + // target pools that are associated with this group. + // - RESTARTING The managed instance group is restarting the instance. + // + // - REFRESHING The managed instance group is applying configuration + // changes to the instance without stopping it. For example, the group + // can update the target pool list for an instance without stopping that + // instance. + // + // Possible values: + // "ABANDONING" + // "CREATING" + // "CREATING_WITHOUT_RETRIES" + // "DELETING" + // "NONE" + // "RECREATING" + // "REFRESHING" + // "RESTARTING" + // "VERIFYING" + CurrentAction string `json:"currentAction,omitempty"` - // ForceSendFields is a list of field names (e.g. "Organization") to + // Id: [Output only] The unique identifier for this resource. This field + // is empty when instance does not exist. + Id uint64 `json:"id,omitempty,string"` + + // Instance: [Output Only] The URL of the instance. The URL can exist + // even if the instance has not yet been created. + Instance string `json:"instance,omitempty"` + + // InstanceStatus: [Output Only] The status of the instance. This field + // is empty when the instance does not exist. + // + // Possible values: + // "PROVISIONING" + // "RUNNING" + // "STAGING" + // "STOPPED" + // "STOPPING" + // "SUSPENDED" + // "SUSPENDING" + // "TERMINATED" + InstanceStatus string `json:"instanceStatus,omitempty"` + + // LastAttempt: [Output Only] Information about the last attempt to + // create or delete the instance. + LastAttempt *ManagedInstanceLastAttempt `json:"lastAttempt,omitempty"` + + // Version: [Output Only] Intended version of this instance. + Version *ManagedInstanceVersion `json:"version,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CurrentAction") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -10886,7 +14085,7 @@ type ProjectsListXpnHostsRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Organization") to include + // NullFields is a list of field names (e.g. "CurrentAction") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -10895,66 +14094,18 @@ type ProjectsListXpnHostsRequest struct { NullFields []string `json:"-"` } -func (s *ProjectsListXpnHostsRequest) MarshalJSON() ([]byte, error) { - type noMethod ProjectsListXpnHostsRequest +func (s *ManagedInstance) MarshalJSON() ([]byte, error) { + type noMethod ManagedInstance raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Quota: A quotas entry. -type Quota struct { - // Limit: [Output Only] Quota limit for this metric. - Limit float64 `json:"limit,omitempty"` - - // Metric: [Output Only] Name of the quota metric. - // - // Possible values: - // "AUTOSCALERS" - // "BACKEND_BUCKETS" - // "BACKEND_SERVICES" - // "COMMITMENTS" - // "CPUS" - // "CPUS_ALL_REGIONS" - // "DISKS_TOTAL_GB" - // "FIREWALLS" - // "FORWARDING_RULES" - // "HEALTH_CHECKS" - // "IMAGES" - // "INSTANCES" - // "INSTANCE_GROUPS" - // "INSTANCE_GROUP_MANAGERS" - // "INSTANCE_TEMPLATES" - // "IN_USE_ADDRESSES" - // "LOCAL_SSD_TOTAL_GB" - // "NETWORKS" - // "NVIDIA_K80_GPUS" - // "PREEMPTIBLE_CPUS" - // "PREEMPTIBLE_LOCAL_SSD_GB" - // "REGIONAL_AUTOSCALERS" - // "REGIONAL_INSTANCE_GROUP_MANAGERS" - // "ROUTERS" - // "ROUTES" - // "SECURITY_POLICIES" - // "SECURITY_POLICY_RULES" - // "SNAPSHOTS" - // "SSD_TOTAL_GB" - // "SSL_CERTIFICATES" - // "STATIC_ADDRESSES" - // "SUBNETWORKS" - // "TARGET_HTTPS_PROXIES" - // "TARGET_HTTP_PROXIES" - // "TARGET_INSTANCES" - // "TARGET_POOLS" - // "TARGET_SSL_PROXIES" - // "TARGET_VPN_GATEWAYS" - // "URL_MAPS" - // "VPN_TUNNELS" - Metric string `json:"metric,omitempty"` - - // Usage: [Output Only] Current usage of this metric. - Usage float64 `json:"usage,omitempty"` +type ManagedInstanceLastAttempt struct { + // Errors: [Output Only] Encountered errors during the last attempt to + // create or delete the instance. + Errors *ManagedInstanceLastAttemptErrors `json:"errors,omitempty"` - // ForceSendFields is a list of field names (e.g. "Limit") to + // ForceSendFields is a list of field names (e.g. "Errors") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -10962,7 +14113,7 @@ type Quota struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Limit") to include in API + // NullFields is a list of field names (e.g. "Errors") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -10971,46 +14122,20 @@ type Quota struct { NullFields []string `json:"-"` } -func (s *Quota) MarshalJSON() ([]byte, error) { - type noMethod Quota +func (s *ManagedInstanceLastAttempt) MarshalJSON() ([]byte, error) { + type noMethod ManagedInstanceLastAttempt raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -func (s *Quota) UnmarshalJSON(data []byte) error { - type noMethod Quota - var s1 struct { - Limit gensupport.JSONFloat64 `json:"limit"` - Usage gensupport.JSONFloat64 `json:"usage"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.Limit = float64(s1.Limit) - s.Usage = float64(s1.Usage) - return nil -} - -// Reference: Represents a reference to a resource. -type Reference struct { - // Kind: [Output Only] Type of the resource. Always compute#reference - // for references. - Kind string `json:"kind,omitempty"` - - // ReferenceType: A description of the reference type with no implied - // semantics. Possible values include: - // - MEMBER_OF - ReferenceType string `json:"referenceType,omitempty"` - - // Referrer: URL of the resource which refers to the target. - Referrer string `json:"referrer,omitempty"` - - // Target: URL of the resource to which this reference points. - Target string `json:"target,omitempty"` +// ManagedInstanceLastAttemptErrors: [Output Only] Encountered errors +// during the last attempt to create or delete the instance. +type ManagedInstanceLastAttemptErrors struct { + // Errors: [Output Only] The array of errors encountered while + // processing this operation. + Errors []*ManagedInstanceLastAttemptErrorsErrors `json:"errors,omitempty"` - // ForceSendFields is a list of field names (e.g. "Kind") to + // ForceSendFields is a list of field names (e.g. "Errors") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -11018,7 +14143,7 @@ type Reference struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Kind") to include in API + // NullFields is a list of field names (e.g. "Errors") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -11027,66 +14152,64 @@ type Reference struct { NullFields []string `json:"-"` } -func (s *Reference) MarshalJSON() ([]byte, error) { - type noMethod Reference +func (s *ManagedInstanceLastAttemptErrors) MarshalJSON() ([]byte, error) { + type noMethod ManagedInstanceLastAttemptErrors raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Region: Region resource. -type Region struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Deprecated: [Output Only] The deprecation status associated with this - // region. - Deprecated *DeprecationStatus `json:"deprecated,omitempty"` - - // Description: [Output Only] Textual description of the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` +type ManagedInstanceLastAttemptErrorsErrors struct { + // Code: [Output Only] The error type identifier for this error. + Code string `json:"code,omitempty"` - // Kind: [Output Only] Type of the resource. Always compute#region for - // regions. - Kind string `json:"kind,omitempty"` + // Location: [Output Only] Indicates the field in the request that + // caused the error. This property is optional. + Location string `json:"location,omitempty"` - // Name: [Output Only] Name of the resource. - Name string `json:"name,omitempty"` + // Message: [Output Only] An optional, human-readable error message. + Message string `json:"message,omitempty"` - // Quotas: [Output Only] Quotas assigned to this region. - Quotas []*Quota `json:"quotas,omitempty"` + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // Status: [Output Only] Status of the region, either UP or DOWN. - // - // Possible values: - // "DOWN" - // "UP" - Status string `json:"status,omitempty"` +func (s *ManagedInstanceLastAttemptErrorsErrors) MarshalJSON() ([]byte, error) { + type noMethod ManagedInstanceLastAttemptErrorsErrors + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // Zones: [Output Only] A list of zones available in this region, in the - // form of resource URLs. - Zones []string `json:"zones,omitempty"` +type ManagedInstanceVersion struct { + // InstanceTemplate: [Output Only] The intended template of the + // instance. This field is empty when current_action is one of { + // DELETING, ABANDONING }. + InstanceTemplate string `json:"instanceTemplate,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Name: [Output Only] Name of the version. + Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to + // NullFields is a list of field names (e.g. "InstanceTemplate") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -11096,40 +14219,31 @@ type Region struct { NullFields []string `json:"-"` } -func (s *Region) MarshalJSON() ([]byte, error) { - type noMethod Region +func (s *ManagedInstanceVersion) MarshalJSON() ([]byte, error) { + type noMethod ManagedInstanceVersion raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionAutoscalerList: Contains a list of autoscalers. -type RegionAutoscalerList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` +// Metadata: A metadata key/value entry. +type Metadata struct { + // Fingerprint: Specifies a fingerprint for this request, which is + // essentially a hash of the metadata's contents and used for optimistic + // locking. The fingerprint is initially generated by Compute Engine and + // changes after every request to modify or update metadata. You must + // always provide an up-to-date fingerprint hash in order to update or + // change metadata. + Fingerprint string `json:"fingerprint,omitempty"` - // Items: A list of Autoscaler resources. - Items []*Autoscaler `json:"items,omitempty"` + // Items: Array of key/value pairs. The total size of all keys and + // values must be less than 512 KB. + Items []*MetadataItems `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of the resource. Always compute#metadata for + // metadata. Kind string `json:"kind,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Fingerprint") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -11137,49 +14251,36 @@ type RegionAutoscalerList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Fingerprint") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionAutoscalerList) MarshalJSON() ([]byte, error) { - type noMethod RegionAutoscalerList +func (s *Metadata) MarshalJSON() ([]byte, error) { + type noMethod Metadata raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionInstanceGroupList: Contains a list of InstanceGroup resources. -type RegionInstanceGroupList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of InstanceGroup resources. - Items []*InstanceGroup `json:"items,omitempty"` - - // Kind: The resource type. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` +type MetadataItems struct { + // Key: Key for the metadata entry. Keys must conform to the following + // regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is + // reflected as part of a URL in the metadata server. Additionally, to + // avoid ambiguity, keys must not conflict with any other metadata keys + // for the project. + Key string `json:"key,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Value: Value for the metadata entry. These are free-form strings, and + // only have meaning as interpreted by the image running in the + // instance. The only restriction placed on values is that their size + // must be less than or equal to 262144 bytes (256 KiB). + Value *string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -11187,7 +14288,7 @@ type RegionInstanceGroupList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -11196,43 +14297,22 @@ type RegionInstanceGroupList struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupList +func (s *MetadataItems) MarshalJSON() ([]byte, error) { + type noMethod MetadataItems raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionInstanceGroupManagerList: Contains a list of managed instance -// groups. -type RegionInstanceGroupManagerList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of InstanceGroupManager resources. - Items []*InstanceGroupManager `json:"items,omitempty"` - - // Kind: [Output Only] The resource type, which is always - // compute#instanceGroupManagerList for a list of managed instance - // groups that exist in th regional scope. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` +// NamedPort: The named port. For example: . +type NamedPort struct { + // Name: The name for this named port. The name must be 1-63 characters + // long, and comply with RFC1035. + Name string `json:"name,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Port: The port number, which can be a value between 1 and 65535. + Port int64 `json:"port,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Name") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -11240,7 +14320,7 @@ type RegionInstanceGroupManagerList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Name") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -11249,48 +14329,79 @@ type RegionInstanceGroupManagerList struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagerList) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagerList +func (s *NamedPort) MarshalJSON() ([]byte, error) { + type noMethod NamedPort raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupManagersAbandonInstancesRequest struct { - // Instances: The URLs of one or more instances to abandon. This can be - // a full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` +// Network: Represents a Network resource. Read Networks and Firewalls +// for more information. +type Network struct { + // IPv4Range: The range of internal addresses that are legal on this + // network. This range is a CIDR specification, for example: + // 192.168.0.0/16. Provided by the client when the network is created. + IPv4Range string `json:"IPv4Range,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // AutoCreateSubnetworks: When set to true, the network is created in + // "auto subnet mode". When set to false, the network is in "custom + // subnet mode". + // + // In "auto subnet mode", a newly created network is assigned the + // default CIDR of 10.128.0.0/9 and it automatically creates one + // subnetwork per region. + AutoCreateSubnetworks bool `json:"autoCreateSubnetworks,omitempty"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` -func (s *RegionInstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersAbandonInstancesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` -type RegionInstanceGroupManagersDeleteInstancesRequest struct { - // Instances: The URLs of one or more instances to delete. This can be a - // full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` + // GatewayIPv4: A gateway address for default routing to other networks. + // This value is read only and is selected by the Google Compute Engine, + // typically as the first usable address in the IPv4Range. + GatewayIPv4 string `json:"gatewayIPv4,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#network for + // networks. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Peerings: [Output Only] List of network peerings for the resource. + Peerings []*NetworkPeering `json:"peerings,omitempty"` + + // RoutingConfig: The network-level routing configuration for this + // network. Used by Cloud Router to determine what type of network-wide + // routing behavior to enforce. + RoutingConfig *NetworkRoutingConfig `json:"routingConfig,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Subnetworks: [Output Only] Server-defined fully-qualified URLs for + // all subnetworks in this network. + Subnetworks []string `json:"subnetworks,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "IPv4Range") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -11298,7 +14409,7 @@ type RegionInstanceGroupManagersDeleteInstancesRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Instances") to include in + // NullFields is a list of field names (e.g. "IPv4Range") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -11307,120 +14418,70 @@ type RegionInstanceGroupManagersDeleteInstancesRequest struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersDeleteInstancesRequest +func (s *Network) MarshalJSON() ([]byte, error) { + type noMethod Network raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupManagersListInstancesResponse struct { - // ManagedInstances: List of managed instances. - ManagedInstances []*ManagedInstance `json:"managedInstances,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "ManagedInstances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "ManagedInstances") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagersListInstancesResponse) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersListInstancesResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RegionInstanceGroupManagersRecreateRequest struct { - // Instances: The URLs of one or more instances to recreate. This can be - // a full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. - Instances []string `json:"instances,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagersRecreateRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersRecreateRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} +// NetworkInterface: A network interface resource attached to an +// instance. +type NetworkInterface struct { + // AccessConfigs: An array of configurations for this interface. + // Currently, only one access config, ONE_TO_ONE_NAT, is supported. If + // there are no accessConfigs specified, then this instance will have no + // external internet access. + AccessConfigs []*AccessConfig `json:"accessConfigs,omitempty"` -type RegionInstanceGroupManagersSetAutoHealingRequest struct { - AutoHealingPolicies []*InstanceGroupManagerAutoHealingPolicy `json:"autoHealingPolicies,omitempty"` + // AliasIpRanges: An array of alias IP ranges for this network + // interface. Can only be specified for network interfaces on + // subnet-mode networks. + AliasIpRanges []*AliasIpRange `json:"aliasIpRanges,omitempty"` - // ForceSendFields is a list of field names (e.g. "AutoHealingPolicies") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // Kind: [Output Only] Type of the resource. Always + // compute#networkInterface for network interfaces. + Kind string `json:"kind,omitempty"` - // NullFields is a list of field names (e.g. "AutoHealingPolicies") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} + // Name: [Output Only] The name of the network interface, generated by + // the server. For network devices, these are eth0, eth1, etc. + Name string `json:"name,omitempty"` -func (s *RegionInstanceGroupManagersSetAutoHealingRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersSetAutoHealingRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Network: URL of the network resource for this instance. When creating + // an instance, if neither the network nor the subnetwork is specified, + // the default network global/networks/default is used; if the network + // is not specified but the subnetwork is specified, the network is + // inferred. + // + // This field is optional when creating a firewall rule. If not + // specified when creating a firewall rule, the default network + // global/networks/default is used. + // + // If you specify this property, you can specify the network as a full + // or partial URL. For example, the following are all valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project/global/networks/network + // - projects/project/global/networks/network + // - global/networks/default + Network string `json:"network,omitempty"` -type RegionInstanceGroupManagersSetTargetPoolsRequest struct { - // Fingerprint: Fingerprint of the target pools information, which is a - // hash of the contents. This field is used for optimistic locking when - // you update the target pool entries. This field is optional. - Fingerprint string `json:"fingerprint,omitempty"` + // NetworkIP: An IPv4 internal network address to assign to the instance + // for this network interface. If not specified by the user, an unused + // internal IP is assigned by the system. + NetworkIP string `json:"networkIP,omitempty"` - // TargetPools: The URL of all TargetPool resources to which instances - // in the instanceGroup field are added. The target pools automatically - // apply to all of the instances in the managed instance group. - TargetPools []string `json:"targetPools,omitempty"` + // Subnetwork: The URL of the Subnetwork resource for this instance. If + // the network resource is in legacy mode, do not provide this property. + // If the network is in auto subnet mode, providing the subnetwork is + // optional. If the network is in custom subnet mode, then this field + // should be specified. If you specify this property, you can specify + // the subnetwork as a full or partial URL. For example, the following + // are all valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork + // - regions/region/subnetworks/subnetwork + Subnetwork string `json:"subnetwork,omitempty"` - // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // ForceSendFields is a list of field names (e.g. "AccessConfigs") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -11428,7 +14489,7 @@ type RegionInstanceGroupManagersSetTargetPoolsRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Fingerprint") to include + // NullFields is a list of field names (e.g. "AccessConfigs") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as @@ -11437,50 +14498,23 @@ type RegionInstanceGroupManagersSetTargetPoolsRequest struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersSetTargetPoolsRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type RegionInstanceGroupManagersSetTemplateRequest struct { - // InstanceTemplate: URL of the InstanceTemplate resource from which all - // new instances will be created. - InstanceTemplate string `json:"instanceTemplate,omitempty"` - - // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "InstanceTemplate") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *RegionInstanceGroupManagersSetTemplateRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupManagersSetTemplateRequest +func (s *NetworkInterface) MarshalJSON() ([]byte, error) { + type noMethod NetworkInterface raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupsListInstances struct { +// NetworkList: Contains a list of networks. +type NetworkList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of InstanceWithNamedPorts resources. - Items []*InstanceWithNamedPorts `json:"items,omitempty"` + // Items: A list of Network resources. + Items []*Network `json:"items,omitempty"` - // Kind: The resource type. + // Kind: [Output Only] Type of resource. Always compute#networkList for + // lists of networks. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -11494,6 +14528,9 @@ type RegionInstanceGroupsListInstances struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *NetworkListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -11515,28 +14552,48 @@ type RegionInstanceGroupsListInstances struct { NullFields []string `json:"-"` } -func (s *RegionInstanceGroupsListInstances) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsListInstances +func (s *NetworkList) MarshalJSON() ([]byte, error) { + type noMethod NetworkList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupsListInstancesRequest struct { - // InstanceState: Instances in which state should be returned. Valid - // options are: 'ALL', 'RUNNING'. By default, it lists all instances. +// NetworkListWarning: [Output Only] Informational warning message. +type NetworkListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. // // Possible values: - // "ALL" - // "RUNNING" - InstanceState string `json:"instanceState,omitempty"` + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // PortName: Name of port user is interested in. It is optional. If it - // is set, only information about this ports will be returned. If it is - // not set, all the named ports will be returned. Always lists all - // instances. - PortName string `json:"portName,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*NetworkListWarningData `json:"data,omitempty"` - // ForceSendFields is a list of field names (e.g. "InstanceState") to + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -11544,34 +14601,36 @@ type RegionInstanceGroupsListInstancesRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "InstanceState") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionInstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsListInstancesRequest +func (s *NetworkListWarning) MarshalJSON() ([]byte, error) { + type noMethod NetworkListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionInstanceGroupsSetNamedPortsRequest struct { - // Fingerprint: The fingerprint of the named ports information for this - // instance group. Use this optional property to prevent conflicts when - // multiple users change the named ports settings concurrently. Obtain - // the fingerprint with the instanceGroups.get method. Then, include the - // fingerprint in your request to ensure that you do not overwrite - // changes that were applied from another concurrent request. - Fingerprint string `json:"fingerprint,omitempty"` +type NetworkListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // NamedPorts: The list of named ports to set for this instance group. - NamedPorts []*NamedPort `json:"namedPorts,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -11579,50 +14638,60 @@ type RegionInstanceGroupsSetNamedPortsRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Fingerprint") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionInstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionInstanceGroupsSetNamedPortsRequest +func (s *NetworkListWarningData) MarshalJSON() ([]byte, error) { + type noMethod NetworkListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RegionList: Contains a list of region resources. -type RegionList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Region resources. - Items []*Region `json:"items,omitempty"` +// NetworkPeering: A network peering attached to a network resource. The +// message includes the peering name, peer network, peering state, and a +// flag indicating whether Google Compute Engine should automatically +// create routes for the peering. +type NetworkPeering struct { + // AutoCreateRoutes: Whether full mesh connectivity is created and + // managed automatically. When it is set to true, Google Compute Engine + // will automatically create and manage the routes between two networks + // when the state is ACTIVE. Otherwise, user needs to create routes + // manually to route packets to peer network. + AutoCreateRoutes bool `json:"autoCreateRoutes,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#regionList for - // lists of regions. - Kind string `json:"kind,omitempty"` + // Name: Name of this peering. Provided by the client when the peering + // is created. The name must comply with RFC1035. Specifically, the name + // must be 1-63 characters long and match regular expression + // [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a + // lowercase letter, and all the following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. + Name string `json:"name,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // Network: The URL of the peer network. It can be either full URL or + // partial URL. The peer network may belong to a different project. If + // the partial URL does not contain project, it is assumed that the peer + // network is in the same project as the current network. + Network string `json:"network,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // State: [Output Only] State for the peering. + // + // Possible values: + // "ACTIVE" + // "INACTIVE" + State string `json:"state,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // StateDetails: [Output Only] Details about the current state of the + // peering. + StateDetails string `json:"stateDetails,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "AutoCreateRoutes") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -11630,34 +14699,39 @@ type RegionList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AutoCreateRoutes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *RegionList) MarshalJSON() ([]byte, error) { - type noMethod RegionList +func (s *NetworkPeering) MarshalJSON() ([]byte, error) { + type noMethod NetworkPeering raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RegionSetLabelsRequest struct { - // LabelFingerprint: The fingerprint of the previous set of labels for - // this resource, used to detect conflicts. The fingerprint is initially - // generated by Compute Engine and changes after every request to modify - // or update labels. You must always provide an up-to-date fingerprint - // hash in order to update or change labels. Make a get() request to the - // resource to get the latest fingerprint. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: The labels to set for this resource. - Labels map[string]string `json:"labels,omitempty"` +// NetworkRoutingConfig: A routing configuration attached to a network +// resource. The message includes the list of routers associated with +// the network, and a flag indicating the type of routing behavior to +// enforce network-wide. +type NetworkRoutingConfig struct { + // RoutingMode: The network-wide routing mode to use. If set to + // REGIONAL, this network's cloud routers will only advertise routes + // with subnetworks of this network in the same region as the router. If + // set to GLOBAL, this network's cloud routers will advertise routes + // with all subnetworks of this network, across regions. + // + // Possible values: + // "GLOBAL" + // "REGIONAL" + RoutingMode string `json:"routingMode,omitempty"` - // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to + // ForceSendFields is a list of field names (e.g. "RoutingMode") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -11665,41 +14739,36 @@ type RegionSetLabelsRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "LabelFingerprint") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "RoutingMode") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RegionSetLabelsRequest) MarshalJSON() ([]byte, error) { - type noMethod RegionSetLabelsRequest +func (s *NetworkRoutingConfig) MarshalJSON() ([]byte, error) { + type noMethod NetworkRoutingConfig raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ResourceCommitment: Commitment for a particular resource (a -// Commitment is composed of one or more of these). -type ResourceCommitment struct { - // Amount: The amount of the resource purchased (in a type-dependent - // unit, such as bytes). For vCPUs, this can just be an integer. For - // memory, this must be provided in MB. Memory must be a multiple of 256 - // MB, with up to 6.5GB of memory per every vCPU. - Amount int64 `json:"amount,omitempty,string"` +type NetworksAddPeeringRequest struct { + // AutoCreateRoutes: Whether Google Compute Engine manages the routes + // automatically. + AutoCreateRoutes bool `json:"autoCreateRoutes,omitempty"` - // Type: Type of resource for which this commitment applies. Possible - // values are VCPU and MEMORY - // - // Possible values: - // "MEMORY" - // "UNSPECIFIED" - // "VCPU" - Type string `json:"type,omitempty"` + // Name: Name of the peering, which should conform to RFC1035. + Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "Amount") to + // PeerNetwork: URL of the peer network. It can be either full URL or + // partial URL. The peer network may belong to a different project. If + // the partial URL does not contain project, it is assumed that the peer + // network is in the same project as the current network. + PeerNetwork string `json:"peerNetwork,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutoCreateRoutes") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -11707,27 +14776,27 @@ type ResourceCommitment struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Amount") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AutoCreateRoutes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *ResourceCommitment) MarshalJSON() ([]byte, error) { - type noMethod ResourceCommitment +func (s *NetworksAddPeeringRequest) MarshalJSON() ([]byte, error) { + type noMethod NetworksAddPeeringRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ResourceGroupReference struct { - // Group: A URI referencing one of the instance groups listed in the - // backend service. - Group string `json:"group,omitempty"` +type NetworksRemovePeeringRequest struct { + // Name: Name of the peering, which should conform to RFC1035. + Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "Group") to + // ForceSendFields is a list of field names (e.g. "Name") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -11735,7 +14804,7 @@ type ResourceGroupReference struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Group") to include in API + // NullFields is a list of field names (e.g. "Name") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -11744,116 +14813,118 @@ type ResourceGroupReference struct { NullFields []string `json:"-"` } -func (s *ResourceGroupReference) MarshalJSON() ([]byte, error) { - type noMethod ResourceGroupReference +func (s *NetworksRemovePeeringRequest) MarshalJSON() ([]byte, error) { + type noMethod NetworksRemovePeeringRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Route: Represents a Route resource. A route specifies how certain -// packets should be handled by the network. Routes are associated with -// instances by tags and the set of routes for a particular instance is -// called its routing table. -// -// For each packet leaving an instance, the system searches that -// instance's routing table for a single best matching route. Routes -// match packets by destination IP address, preferring smaller or more -// specific ranges over larger ones. If there is a tie, the system -// selects the route with the smallest priority value. If there is still -// a tie, it uses the layer three and four packet headers to select just -// one of the remaining matching routes. The packet is then forwarded as -// specified by the nextHop field of the winning route - either to -// another instance destination, an instance gateway, or a Google -// Compute Engine-operated gateway. -// -// Packets that do not match any route in the sending instance's routing -// table are dropped. -type Route struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. +// Operation: An Operation resource, used to manage asynchronous API +// requests. +type Operation struct { + // ClientOperationId: [Output Only] Reserved for future use. + ClientOperationId string `json:"clientOperationId,omitempty"` + + // CreationTimestamp: [Deprecated] This field is deprecated. CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Description: An optional description of this resource. Provide this - // property when you create the resource. + // Description: [Output Only] A textual description of the operation, + // which is set when the operation is created. Description string `json:"description,omitempty"` - // DestRange: The destination range of outgoing packets that this route - // applies to. Only IPv4 is supported. - DestRange string `json:"destRange,omitempty"` + // EndTime: [Output Only] The time that this operation was completed. + // This value is in RFC3339 text format. + EndTime string `json:"endTime,omitempty"` + + // Error: [Output Only] If errors are generated during processing of the + // operation, this field will be populated. + Error *OperationError `json:"error,omitempty"` + + // HttpErrorMessage: [Output Only] If the operation fails, this field + // contains the HTTP error message that was returned, such as NOT FOUND. + HttpErrorMessage string `json:"httpErrorMessage,omitempty"` + + // HttpErrorStatusCode: [Output Only] If the operation fails, this field + // contains the HTTP error status code that was returned. For example, a + // 404 means the resource was not found. + HttpErrorStatusCode int64 `json:"httpErrorStatusCode,omitempty"` // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of this resource. Always compute#routes for - // Route resources. + // InsertTime: [Output Only] The time that this operation was requested. + // This value is in RFC3339 text format. + InsertTime string `json:"insertTime,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#operation + // for Operation resources. Kind string `json:"kind,omitempty"` - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. + // Name: [Output Only] Name of the resource. Name string `json:"name,omitempty"` - // Network: Fully-qualified URL of the network that this route applies - // to. - Network string `json:"network,omitempty"` + // OperationType: [Output Only] The type of operation, such as insert, + // update, or delete, and so on. + OperationType string `json:"operationType,omitempty"` - // NextHopGateway: The URL to a gateway that should handle matching - // packets. You can only specify the internet gateway using a full or - // partial valid URL: - // projects//global/gateways/default-internet-gateway - NextHopGateway string `json:"nextHopGateway,omitempty"` + // Progress: [Output Only] An optional progress indicator that ranges + // from 0 to 100. There is no requirement that this be linear or support + // any granularity of operations. This should not be used to guess when + // the operation will be complete. This number should monotonically + // increase as the operation progresses. + Progress int64 `json:"progress,omitempty"` - // NextHopInstance: The URL to an instance that should handle matching - // packets. You can specify this as a full or partial URL. For - // example: - // https://www.googleapis.com/compute/v1/projects/project/zones/ - // zone/instances/ - NextHopInstance string `json:"nextHopInstance,omitempty"` + // Region: [Output Only] The URL of the region where the operation + // resides. Only available when performing regional operations. + Region string `json:"region,omitempty"` - // NextHopIp: The network IP address of an instance that should handle - // matching packets. Only IPv4 is supported. - NextHopIp string `json:"nextHopIp,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` - // NextHopNetwork: The URL of the local network if it should handle - // matching packets. - NextHopNetwork string `json:"nextHopNetwork,omitempty"` + // StartTime: [Output Only] The time that this operation was started by + // the server. This value is in RFC3339 text format. + StartTime string `json:"startTime,omitempty"` - // NextHopPeering: [Output Only] The network peering name that should - // handle matching packets, which should conform to RFC1035. - NextHopPeering string `json:"nextHopPeering,omitempty"` + // Status: [Output Only] The status of the operation, which can be one + // of the following: PENDING, RUNNING, or DONE. + // + // Possible values: + // "DONE" + // "PENDING" + // "RUNNING" + Status string `json:"status,omitempty"` - // NextHopVpnTunnel: The URL to a VpnTunnel that should handle matching - // packets. - NextHopVpnTunnel string `json:"nextHopVpnTunnel,omitempty"` + // StatusMessage: [Output Only] An optional textual description of the + // current status of the operation. + StatusMessage string `json:"statusMessage,omitempty"` - // Priority: The priority of this route. Priority is used to break ties - // in cases where there is more than one matching route of equal prefix - // length. In the case of two routes with equal prefix length, the one - // with the lowest-numbered priority value wins. Default value is 1000. - // Valid range is 0 through 65535. - Priority int64 `json:"priority,omitempty"` + // TargetId: [Output Only] The unique target ID, which identifies a + // specific incarnation of the target resource. + TargetId uint64 `json:"targetId,omitempty,string"` - // SelfLink: [Output Only] Server-defined fully-qualified URL for this - // resource. - SelfLink string `json:"selfLink,omitempty"` + // TargetLink: [Output Only] The URL of the resource that the operation + // modifies. For operations related to creating a snapshot, this points + // to the persistent disk that the snapshot was created from. + TargetLink string `json:"targetLink,omitempty"` - // Tags: A list of instance tags to which this route applies. - Tags []string `json:"tags,omitempty"` + // User: [Output Only] User who requested the operation, for example: + // user@example.com. + User string `json:"user,omitempty"` - // Warnings: [Output Only] If potential misconfigurations are detected - // for this route, this field will be populated with warning messages. - Warnings []*RouteWarnings `json:"warnings,omitempty"` + // Warnings: [Output Only] If warning messages are generated during + // processing of the operation, this field will be populated. + Warnings []*OperationWarnings `json:"warnings,omitempty"` + + // Zone: [Output Only] The URL of the zone where the operation resides. + // Only available when performing per-zone operations. + Zone string `json:"zone,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // ForceSendFields is a list of field names (e.g. "ClientOperationId") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -11861,7 +14932,7 @@ type Route struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to + // NullFields is a list of field names (e.g. "ClientOperationId") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -11871,13 +14942,77 @@ type Route struct { NullFields []string `json:"-"` } -func (s *Route) MarshalJSON() ([]byte, error) { - type noMethod Route +func (s *Operation) MarshalJSON() ([]byte, error) { + type noMethod Operation raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouteWarnings struct { +// OperationError: [Output Only] If errors are generated during +// processing of the operation, this field will be populated. +type OperationError struct { + // Errors: [Output Only] The array of errors encountered while + // processing this operation. + Errors []*OperationErrorErrors `json:"errors,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Errors") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Errors") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationError) MarshalJSON() ([]byte, error) { + type noMethod OperationError + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type OperationErrorErrors struct { + // Code: [Output Only] The error type identifier for this error. + Code string `json:"code,omitempty"` + + // Location: [Output Only] Indicates the field in the request that + // caused the error. This property is optional. + Location string `json:"location,omitempty"` + + // Message: [Output Only] An optional, human-readable error message. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationErrorErrors) MarshalJSON() ([]byte, error) { + type noMethod OperationErrorErrors + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type OperationWarnings struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -11905,7 +15040,7 @@ type RouteWarnings struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RouteWarningsData `json:"data,omitempty"` + Data []*OperationWarningsData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -11928,13 +15063,13 @@ type RouteWarnings struct { NullFields []string `json:"-"` } -func (s *RouteWarnings) MarshalJSON() ([]byte, error) { - type noMethod RouteWarnings +func (s *OperationWarnings) MarshalJSON() ([]byte, error) { + type noMethod OperationWarnings raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouteWarningsData struct { +type OperationWarningsData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -11965,22 +15100,22 @@ type RouteWarningsData struct { NullFields []string `json:"-"` } -func (s *RouteWarningsData) MarshalJSON() ([]byte, error) { - type noMethod RouteWarningsData +func (s *OperationWarningsData) MarshalJSON() ([]byte, error) { + type noMethod OperationWarningsData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouteList: Contains a list of Route resources. -type RouteList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. +type OperationAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. Id string `json:"id,omitempty"` - // Items: A list of Route resources. - Items []*Route `json:"items,omitempty"` + // Items: [Output Only] A map of scoped operation lists. + Items map[string]OperationsScopedList `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of resource. Always + // compute#operationAggregatedList for aggregated lists of operations. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -11994,6 +15129,9 @@ type RouteList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *OperationAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -12015,66 +15153,86 @@ type RouteList struct { NullFields []string `json:"-"` } -func (s *RouteList) MarshalJSON() ([]byte, error) { - type noMethod RouteList +func (s *OperationAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod OperationAggregatedList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Router: Router resource. -type Router struct { - // Bgp: BGP information specific to this router. - Bgp *RouterBgp `json:"bgp,omitempty"` - - // BgpPeers: BGP information that needs to be configured into the - // routing stack to establish the BGP peering. It must specify peer ASN - // and either interface name, IP, or peer IP. Please refer to RFC4273. - BgpPeers []*RouterBgpPeer `json:"bgpPeers,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` +// OperationAggregatedListWarning: [Output Only] Informational warning +// message. +type OperationAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // Interfaces: Router interfaces. Each interface requires either one - // linked resource (e.g. linkedVpnTunnel), or IP address and IP address - // range (e.g. ipRange), or both. - Interfaces []*RouterInterface `json:"interfaces,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*OperationAggregatedListWarningData `json:"data,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#router for - // routers. - Kind string `json:"kind,omitempty"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Network: URI of the network to which this router belongs. - Network string `json:"network,omitempty"` + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // Region: [Output Only] URI of the region where the router resides. - Region string `json:"region,omitempty"` +func (s *OperationAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod OperationAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` +type OperationAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Bgp") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -12082,7 +15240,7 @@ type Router struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Bgp") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -12091,22 +15249,23 @@ type Router struct { NullFields []string `json:"-"` } -func (s *Router) MarshalJSON() ([]byte, error) { - type noMethod Router +func (s *OperationAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod OperationAggregatedListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterAggregatedList: Contains a list of routers. -type RouterAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. +// OperationList: Contains a list of Operation resources. +type OperationList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. Id string `json:"id,omitempty"` - // Items: A list of Router resources. - Items map[string]RoutersScopedList `json:"items,omitempty"` + // Items: [Output Only] A list of Operation resources. + Items []*Operation `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of resource. Always compute#operations for + // Operations resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -12120,6 +15279,9 @@ type RouterAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *OperationListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -12141,20 +15303,48 @@ type RouterAggregatedList struct { NullFields []string `json:"-"` } -func (s *RouterAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod RouterAggregatedList +func (s *OperationList) MarshalJSON() ([]byte, error) { + type noMethod OperationList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterBgp struct { - // Asn: Local BGP Autonomous System Number (ASN). Must be an RFC6996 - // private ASN, either 16-bit or 32-bit. The value will be fixed for - // this router resource. All VPN tunnels that link to this router will - // have the same local ASN. - Asn int64 `json:"asn,omitempty"` +// OperationListWarning: [Output Only] Informational warning message. +type OperationListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // ForceSendFields is a list of field names (e.g. "Asn") to + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*OperationListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -12162,7 +15352,7 @@ type RouterBgp struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Asn") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -12171,81 +15361,58 @@ type RouterBgp struct { NullFields []string `json:"-"` } -func (s *RouterBgp) MarshalJSON() ([]byte, error) { - type noMethod RouterBgp +func (s *OperationListWarning) MarshalJSON() ([]byte, error) { + type noMethod OperationListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterBgpPeer struct { - // AdvertisedRoutePriority: The priority of routes advertised to this - // BGP peer. In the case where there is more than one matching route of - // maximum length, the routes with lowest priority value win. - AdvertisedRoutePriority int64 `json:"advertisedRoutePriority,omitempty"` - - // InterfaceName: Name of the interface the BGP peer is associated with. - InterfaceName string `json:"interfaceName,omitempty"` - - // IpAddress: IP address of the interface inside Google Cloud Platform. - // Only IPv4 is supported. - IpAddress string `json:"ipAddress,omitempty"` - - // Name: Name of this BGP peer. The name must be 1-63 characters long - // and comply with RFC1035. - Name string `json:"name,omitempty"` - - // PeerAsn: Peer BGP Autonomous System Number (ASN). For VPN use case, - // this value can be different for every tunnel. - PeerAsn int64 `json:"peerAsn,omitempty"` +type OperationListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // PeerIpAddress: IP address of the BGP interface outside Google cloud. - // Only IPv4 is supported. - PeerIpAddress string `json:"peerIpAddress,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "AdvertisedRoutePriority") to unconditionally include in API - // requests. By default, fields with empty values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AdvertisedRoutePriority") - // to include in API requests with the JSON null value. By default, - // fields with empty values are omitted from API requests. However, any - // field with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RouterBgpPeer) MarshalJSON() ([]byte, error) { - type noMethod RouterBgpPeer +func (s *OperationListWarningData) MarshalJSON() ([]byte, error) { + type noMethod OperationListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterInterface struct { - // IpRange: IP address and range of the interface. The IP range must be - // in the RFC3927 link-local IP space. The value must be a - // CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not - // truncate the address as it represents the IP address of the - // interface. - IpRange string `json:"ipRange,omitempty"` - - // LinkedVpnTunnel: URI of the linked VPN tunnel. It must be in the same - // region as the router. Each interface can have at most one linked - // resource and it could either be a VPN Tunnel or an interconnect - // attachment. - LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` +type OperationsScopedList struct { + // Operations: [Output Only] List of operations contained in this scope. + Operations []*Operation `json:"operations,omitempty"` - // Name: Name of this interface entry. The name must be 1-63 characters - // long and comply with RFC1035. - Name string `json:"name,omitempty"` + // Warning: [Output Only] Informational warning which replaces the list + // of operations when the list is empty. + Warning *OperationsScopedListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "IpRange") to + // ForceSendFields is a list of field names (e.g. "Operations") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -12253,7 +15420,7 @@ type RouterInterface struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IpRange") to include in + // NullFields is a list of field names (e.g. "Operations") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -12262,41 +15429,49 @@ type RouterInterface struct { NullFields []string `json:"-"` } -func (s *RouterInterface) MarshalJSON() ([]byte, error) { - type noMethod RouterInterface +func (s *OperationsScopedList) MarshalJSON() ([]byte, error) { + type noMethod OperationsScopedList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RouterList: Contains a list of Router resources. -type RouterList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Router resources. - Items []*Router `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#router for - // routers. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` +// OperationsScopedListWarning: [Output Only] Informational warning +// which replaces the list of operations when the list is empty. +type OperationsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*OperationsScopedListWarningData `json:"data,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -12304,7 +15479,7 @@ type RouterList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -12313,25 +15488,27 @@ type RouterList struct { NullFields []string `json:"-"` } -func (s *RouterList) MarshalJSON() ([]byte, error) { - type noMethod RouterList +func (s *OperationsScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod OperationsScopedListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterStatus struct { - // BestRoutes: Best routes for this router's network. - BestRoutes []*Route `json:"bestRoutes,omitempty"` - - // BestRoutesForRouter: Best routes learned by this router. - BestRoutesForRouter []*Route `json:"bestRoutesForRouter,omitempty"` - - BgpPeerStatus []*RouterStatusBgpPeerStatus `json:"bgpPeerStatus,omitempty"` +type OperationsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // Network: URI of the network to which this router belongs. - Network string `json:"network,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "BestRoutes") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -12339,8 +15516,8 @@ type RouterStatus struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BestRoutes") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -12348,50 +15525,38 @@ type RouterStatus struct { NullFields []string `json:"-"` } -func (s *RouterStatus) MarshalJSON() ([]byte, error) { - type noMethod RouterStatus +func (s *OperationsScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod OperationsScopedListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterStatusBgpPeerStatus struct { - // AdvertisedRoutes: Routes that were advertised to the remote BGP peer - AdvertisedRoutes []*Route `json:"advertisedRoutes,omitempty"` - - // IpAddress: IP address of the local BGP interface. - IpAddress string `json:"ipAddress,omitempty"` +// PathMatcher: A matcher for the path portion of the URL. The +// BackendService from the longest-matched rule will serve the URL. If +// no rule was matched, the default service will be used. +type PathMatcher struct { + // DefaultService: The full or partial URL to the BackendService + // resource. This will be used if none of the pathRules defined by this + // PathMatcher is matched by the URL's path portion. For example, the + // following are all valid URLs to a BackendService resource: + // - + // https://www.googleapis.com/compute/v1/projects/project/global/backendServices/backendService + // - compute/v1/projects/project/global/backendServices/backendService + // + // - global/backendServices/backendService + DefaultService string `json:"defaultService,omitempty"` - // LinkedVpnTunnel: URL of the VPN tunnel that this BGP peer controls. - LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // Name: Name of this BGP peer. Unique within the Routers resource. + // Name: The name to which this PathMatcher is referred by the HostRule. Name string `json:"name,omitempty"` - // NumLearnedRoutes: Number of routes learned from the remote BGP Peer. - NumLearnedRoutes int64 `json:"numLearnedRoutes,omitempty"` - - // PeerIpAddress: IP address of the remote BGP interface. - PeerIpAddress string `json:"peerIpAddress,omitempty"` - - // State: BGP state as specified in RFC1771. - State string `json:"state,omitempty"` - - // Status: Status of the BGP peer: {UP, DOWN} - // - // Possible values: - // "DOWN" - // "UNKNOWN" - // "UP" - Status string `json:"status,omitempty"` - - // Uptime: Time this session has been up. Format: 14 years, 51 weeks, 6 - // days, 23 hours, 59 minutes, 59 seconds - Uptime string `json:"uptime,omitempty"` - - // UptimeSeconds: Time this session has been up, in seconds. Format: 145 - UptimeSeconds string `json:"uptimeSeconds,omitempty"` + // PathRules: The list of path rules. + PathRules []*PathRule `json:"pathRules,omitempty"` - // ForceSendFields is a list of field names (e.g. "AdvertisedRoutes") to + // ForceSendFields is a list of field names (e.g. "DefaultService") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -12399,7 +15564,7 @@ type RouterStatusBgpPeerStatus struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AdvertisedRoutes") to + // NullFields is a list of field names (e.g. "DefaultService") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -12409,23 +15574,26 @@ type RouterStatusBgpPeerStatus struct { NullFields []string `json:"-"` } -func (s *RouterStatusBgpPeerStatus) MarshalJSON() ([]byte, error) { - type noMethod RouterStatusBgpPeerStatus +func (s *PathMatcher) MarshalJSON() ([]byte, error) { + type noMethod PathMatcher raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RouterStatusResponse struct { - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - Result *RouterStatus `json:"result,omitempty"` +// PathRule: A path-matching rule for a URL. If matched, will use the +// specified BackendService to handle the traffic arriving at this URL. +type PathRule struct { + // Paths: The list of path patterns to match. Each must start with / and + // the only place a * is allowed is at the end following a /. The string + // fed to the path matcher does not include any text after the first ? + // or #, and those chars are not allowed here. + Paths []string `json:"paths,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Service: The URL of the BackendService resource if this rule is + // matched. + Service string `json:"service,omitempty"` - // ForceSendFields is a list of field names (e.g. "Kind") to + // ForceSendFields is a list of field names (e.g. "Paths") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -12433,7 +15601,7 @@ type RouterStatusResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Kind") to include in API + // NullFields is a list of field names (e.g. "Paths") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -12442,21 +15610,76 @@ type RouterStatusResponse struct { NullFields []string `json:"-"` } -func (s *RouterStatusResponse) MarshalJSON() ([]byte, error) { - type noMethod RouterStatusResponse +func (s *PathRule) MarshalJSON() ([]byte, error) { + type noMethod PathRule raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RoutersPreviewResponse struct { - // Resource: Preview of given router. - Resource *Router `json:"resource,omitempty"` +// Policy: Defines an Identity and Access Management (IAM) policy. It is +// used to specify access control policies for Cloud Platform +// resources. +// +// +// +// A `Policy` consists of a list of `bindings`. A `Binding` binds a list +// of `members` to a `role`, where the members can be user accounts, +// Google groups, Google domains, and service accounts. A `role` is a +// named list of permissions defined by IAM. +// +// **Example** +// +// { "bindings": [ { "role": "roles/owner", "members": [ +// "user:mike@example.com", "group:admins@example.com", +// "domain:google.com", +// "serviceAccount:my-other-app@appspot.gserviceaccount.com", ] }, { +// "role": "roles/viewer", "members": ["user:sean@example.com"] } ] +// } +// +// For a description of IAM and its features, see the [IAM developer's +// guide](https://cloud.google.com/iam). +type Policy struct { + // AuditConfigs: Specifies cloud audit logging configuration for this + // policy. + AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` + + // Bindings: Associates a list of `members` to a `role`. `bindings` with + // no members will result in an error. + Bindings []*Binding `json:"bindings,omitempty"` + + // Etag: `etag` is used for optimistic concurrency control as a way to + // help prevent simultaneous updates of a policy from overwriting each + // other. It is strongly suggested that systems make use of the `etag` + // in the read-modify-write cycle to perform policy updates in order to + // avoid race conditions: An `etag` is returned in the response to + // `getIamPolicy`, and systems are expected to put that etag in the + // request to `setIamPolicy` to ensure that their change will be applied + // to the same version of the policy. + // + // If no `etag` is provided in the call to `setIamPolicy`, then the + // existing policy is overwritten blindly. + Etag string `json:"etag,omitempty"` + + IamOwned bool `json:"iamOwned,omitempty"` + + // Rules: If more than one rule is specified, the rules are applied in + // the following manner: - All matching LOG rules are always applied. - + // If any DENY/DENY_WITH_LOG rule matches, permission is denied. Logging + // will be applied if one or more matching rule requires logging. - + // Otherwise, if any ALLOW/ALLOW_WITH_LOG rule matches, permission is + // granted. Logging will be applied if one or more matching rule + // requires logging. - Otherwise, if no rule applies, permission is + // denied. + Rules []*Rule `json:"rules,omitempty"` + + // Version: Version of the `Policy`. The default version is 0. + Version int64 `json:"version,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Resource") to + // ForceSendFields is a list of field names (e.g. "AuditConfigs") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -12464,89 +15687,109 @@ type RoutersPreviewResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Resource") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "AuditConfigs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RoutersPreviewResponse) MarshalJSON() ([]byte, error) { - type noMethod RoutersPreviewResponse +func (s *Policy) MarshalJSON() ([]byte, error) { + type noMethod Policy raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RoutersScopedList struct { - // Routers: List of routers contained in this scope. - Routers []*Router `json:"routers,omitempty"` +// Project: A Project resource. Projects can only be created in the +// Google Cloud Platform Console. Unless marked otherwise, values can +// only be modified in the console. +type Project struct { + // CommonInstanceMetadata: Metadata key/value pairs available to all + // instances contained in this project. See Custom metadata for more + // information. + CommonInstanceMetadata *Metadata `json:"commonInstanceMetadata,omitempty"` - // Warning: Informational warning which replaces the list of routers - // when the list is empty. - Warning *RoutersScopedListWarning `json:"warning,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // ForceSendFields is a list of field names (e.g. "Routers") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // DefaultServiceAccount: [Output Only] Default service account used by + // VMs running in this project. + DefaultServiceAccount string `json:"defaultServiceAccount,omitempty"` + + // Description: An optional textual description of the resource. + Description string `json:"description,omitempty"` + + // EnabledFeatures: Restricted features enabled for use on this project. + EnabledFeatures []string `json:"enabledFeatures,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. This is not the project ID, and + // is just a unique ID used by Compute Engine to identify resources. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#project for + // projects. + Kind string `json:"kind,omitempty"` + + // Name: The project ID. For example: my-example-project. Use the + // project ID to make requests to Compute Engine. + Name string `json:"name,omitempty"` + + // Quotas: [Output Only] Quotas assigned to this project. + Quotas []*Quota `json:"quotas,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // UsageExportLocation: The naming prefix for daily usage reports and + // the Google Cloud Storage bucket where they are stored. + UsageExportLocation *UsageExportLocation `json:"usageExportLocation,omitempty"` + + // XpnProjectStatus: [Output Only] The role this project has in a shared + // VPC configuration. Currently only HOST projects are differentiated. + // + // Possible values: + // "HOST" + // "UNSPECIFIED_XPN_PROJECT_STATUS" + XpnProjectStatus string `json:"xpnProjectStatus,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "CommonInstanceMetadata") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Routers") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "CommonInstanceMetadata") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *RoutersScopedList) MarshalJSON() ([]byte, error) { - type noMethod RoutersScopedList +func (s *Project) MarshalJSON() ([]byte, error) { + type noMethod Project raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// RoutersScopedListWarning: Informational warning which replaces the -// list of routers when the list is empty. -type RoutersScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*RoutersScopedListWarningData `json:"data,omitempty"` - - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` +type ProjectsDisableXpnResourceRequest struct { + // XpnResource: Service resource (a.k.a service project) ID. + XpnResource *XpnResourceId `json:"xpnResource,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "XpnResource") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -12554,36 +15797,26 @@ type RoutersScopedListWarning struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "XpnResource") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RoutersScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod RoutersScopedListWarning +func (s *ProjectsDisableXpnResourceRequest) MarshalJSON() ([]byte, error) { + type noMethod ProjectsDisableXpnResourceRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type RoutersScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` +type ProjectsEnableXpnResourceRequest struct { + // XpnResource: Service resource (a.k.a service project) ID. + XpnResource *XpnResourceId `json:"xpnResource,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // ForceSendFields is a list of field names (e.g. "XpnResource") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -12591,59 +15824,44 @@ type RoutersScopedListWarningData struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "XpnResource") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *RoutersScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod RoutersScopedListWarningData +func (s *ProjectsEnableXpnResourceRequest) MarshalJSON() ([]byte, error) { + type noMethod ProjectsEnableXpnResourceRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Rule: A rule to be applied in a Policy. -type Rule struct { - // Action: Required - // - // Possible values: - // "ALLOW" - // "ALLOW_WITH_LOG" - // "DENY" - // "DENY_WITH_LOG" - // "LOG" - // "NO_ACTION" - Action string `json:"action,omitempty"` - - // Conditions: Additional restrictions that must be met - Conditions []*Condition `json:"conditions,omitempty"` - - // Description: Human-readable description of the rule. - Description string `json:"description,omitempty"` - - // Ins: If one or more 'in' clauses are specified, the rule matches if - // the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries. - Ins []string `json:"ins,omitempty"` +type ProjectsGetXpnResources struct { + // Kind: [Output Only] Type of resource. Always + // compute#projectsGetXpnResources for lists of service resources (a.k.a + // service projects) + Kind string `json:"kind,omitempty"` - // LogConfigs: The config returned to callers of - // tech.iam.IAM.CheckPolicy for any entries that match the LOG action. - LogConfigs []*LogConfig `json:"logConfigs,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` - // NotIns: If one or more 'not_in' clauses are specified, the rule - // matches if the PRINCIPAL/AUTHORITY_SELECTOR is in none of the - // entries. - NotIns []string `json:"notIns,omitempty"` + // Resources: Service resources (a.k.a service projects) attached to + // this project as their shared VPC host. + Resources []*XpnResourceId `json:"resources,omitempty"` - // Permissions: A permission is a string of form '..' (e.g., - // 'storage.buckets.list'). A value of '*' matches all permissions, and - // a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs. - Permissions []string `json:"permissions,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Action") to + // ForceSendFields is a list of field names (e.g. "Kind") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -12651,7 +15869,7 @@ type Rule struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Action") to include in API + // NullFields is a list of field names (e.g. "Kind") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -12660,42 +15878,19 @@ type Rule struct { NullFields []string `json:"-"` } -func (s *Rule) MarshalJSON() ([]byte, error) { - type noMethod Rule +func (s *ProjectsGetXpnResources) MarshalJSON() ([]byte, error) { + type noMethod ProjectsGetXpnResources raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SSLHealthCheck struct { - // Port: The TCP port number for the health check request. The default - // value is 443. Valid values are 1 through 65535. - Port int64 `json:"port,omitempty"` - - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. - PortName string `json:"portName,omitempty"` - - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // Request: The application data to send once the SSL connection has - // been established (default value is empty). If both request and - // response are empty, the connection establishment alone will indicate - // health. The request data can only be ASCII. - Request string `json:"request,omitempty"` - - // Response: The bytes to match against the beginning of the response - // data. If left empty (the default value), any response will indicate - // health. The response data can only be ASCII. - Response string `json:"response,omitempty"` +type ProjectsListXpnHostsRequest struct { + // Organization: Optional organization ID managed by Cloud Resource + // Manager, for which to list shared VPC host projects. If not + // specified, the organization will be inferred from the project. + Organization string `json:"organization,omitempty"` - // ForceSendFields is a list of field names (e.g. "Port") to + // ForceSendFields is a list of field names (e.g. "Organization") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -12703,102 +15898,78 @@ type SSLHealthCheck struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Port") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Organization") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SSLHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod SSLHealthCheck +func (s *ProjectsListXpnHostsRequest) MarshalJSON() ([]byte, error) { + type noMethod ProjectsListXpnHostsRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Scheduling: Sets the scheduling options for an Instance. -type Scheduling struct { - // AutomaticRestart: Specifies whether the instance should be - // automatically restarted if it is terminated by Compute Engine (not - // terminated by a user). You can only set the automatic restart option - // for standard instances. Preemptible instances cannot be automatically - // restarted. - // - // By default, this is set to true so an instance is automatically - // restarted if it is terminated by Compute Engine. - AutomaticRestart *bool `json:"automaticRestart,omitempty"` +// Quota: A quotas entry. +type Quota struct { + // Limit: [Output Only] Quota limit for this metric. + Limit float64 `json:"limit,omitempty"` - // OnHostMaintenance: Defines the maintenance behavior for this - // instance. For standard instances, the default behavior is MIGRATE. - // For preemptible instances, the default and only possible behavior is - // TERMINATE. For more information, see Setting Instance Scheduling - // Options. + // Metric: [Output Only] Name of the quota metric. // // Possible values: - // "MIGRATE" - // "TERMINATE" - OnHostMaintenance string `json:"onHostMaintenance,omitempty"` - - // Preemptible: Defines whether the instance is preemptible. This can - // only be set during instance creation, it cannot be set or changed - // after the instance has been created. - Preemptible bool `json:"preemptible,omitempty"` - - // ForceSendFields is a list of field names (e.g. "AutomaticRestart") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "AutomaticRestart") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Scheduling) MarshalJSON() ([]byte, error) { - type noMethod Scheduling - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// SerialPortOutput: An instance's serial console output. -type SerialPortOutput struct { - // Contents: [Output Only] The contents of the console output. - Contents string `json:"contents,omitempty"` - - // Kind: [Output Only] Type of the resource. Always - // compute#serialPortOutput for serial port output. - Kind string `json:"kind,omitempty"` - - // Next: [Output Only] The position of the next byte of content from the - // serial console output. Use this value in the next request as the - // start parameter. - Next int64 `json:"next,omitempty,string"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Start: The starting byte position of the output that was returned. - // This should match the start parameter sent with the request. If the - // serial console output exceeds the size of the buffer, older output - // will be overwritten by newer content and the start values will be - // mismatched. - Start int64 `json:"start,omitempty,string"` + // "AUTOSCALERS" + // "BACKEND_BUCKETS" + // "BACKEND_SERVICES" + // "COMMITMENTS" + // "CPUS" + // "CPUS_ALL_REGIONS" + // "DISKS_TOTAL_GB" + // "FIREWALLS" + // "FORWARDING_RULES" + // "HEALTH_CHECKS" + // "IMAGES" + // "INSTANCES" + // "INSTANCE_GROUPS" + // "INSTANCE_GROUP_MANAGERS" + // "INSTANCE_TEMPLATES" + // "INTERCONNECTS" + // "IN_USE_ADDRESSES" + // "LOCAL_SSD_TOTAL_GB" + // "NETWORKS" + // "NVIDIA_K80_GPUS" + // "NVIDIA_P100_GPUS" + // "PREEMPTIBLE_CPUS" + // "PREEMPTIBLE_LOCAL_SSD_GB" + // "REGIONAL_AUTOSCALERS" + // "REGIONAL_INSTANCE_GROUP_MANAGERS" + // "ROUTERS" + // "ROUTES" + // "SECURITY_POLICIES" + // "SECURITY_POLICY_RULES" + // "SNAPSHOTS" + // "SSD_TOTAL_GB" + // "SSL_CERTIFICATES" + // "STATIC_ADDRESSES" + // "SUBNETWORKS" + // "TARGET_HTTPS_PROXIES" + // "TARGET_HTTP_PROXIES" + // "TARGET_INSTANCES" + // "TARGET_POOLS" + // "TARGET_SSL_PROXIES" + // "TARGET_TCP_PROXIES" + // "TARGET_VPN_GATEWAYS" + // "URL_MAPS" + // "VPN_TUNNELS" + Metric string `json:"metric,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Usage: [Output Only] Current usage of this metric. + Usage float64 `json:"usage,omitempty"` - // ForceSendFields is a list of field names (e.g. "Contents") to + // ForceSendFields is a list of field names (e.g. "Limit") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -12806,8 +15977,8 @@ type SerialPortOutput struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Contents") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Limit") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -12815,22 +15986,46 @@ type SerialPortOutput struct { NullFields []string `json:"-"` } -func (s *SerialPortOutput) MarshalJSON() ([]byte, error) { - type noMethod SerialPortOutput +func (s *Quota) MarshalJSON() ([]byte, error) { + type noMethod Quota raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ServiceAccount: A service account. -type ServiceAccount struct { - // Email: Email address of the service account. - Email string `json:"email,omitempty"` +func (s *Quota) UnmarshalJSON(data []byte) error { + type noMethod Quota + var s1 struct { + Limit gensupport.JSONFloat64 `json:"limit"` + Usage gensupport.JSONFloat64 `json:"usage"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Limit = float64(s1.Limit) + s.Usage = float64(s1.Usage) + return nil +} - // Scopes: The list of scopes to be made available for this service - // account. - Scopes []string `json:"scopes,omitempty"` +// Reference: Represents a reference to a resource. +type Reference struct { + // Kind: [Output Only] Type of the resource. Always compute#reference + // for references. + Kind string `json:"kind,omitempty"` - // ForceSendFields is a list of field names (e.g. "Email") to + // ReferenceType: A description of the reference type with no implied + // semantics. Possible values include: + // - MEMBER_OF + ReferenceType string `json:"referenceType,omitempty"` + + // Referrer: URL of the resource which refers to the target. + Referrer string `json:"referrer,omitempty"` + + // Target: URL of the resource to which this reference points. + Target string `json:"target,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -12838,7 +16033,7 @@ type ServiceAccount struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Email") to include in API + // NullFields is a list of field names (e.g. "Kind") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -12847,122 +16042,52 @@ type ServiceAccount struct { NullFields []string `json:"-"` } -func (s *ServiceAccount) MarshalJSON() ([]byte, error) { - type noMethod ServiceAccount +func (s *Reference) MarshalJSON() ([]byte, error) { + type noMethod Reference raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Snapshot: A persistent disk snapshot resource. -type Snapshot struct { +// Region: Region resource. +type Region struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` + // Deprecated: [Output Only] The deprecation status associated with this + // region. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` - // DiskSizeGb: [Output Only] Size of the snapshot, specified in GB. - DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + // Description: [Output Only] Textual description of the resource. + Description string `json:"description,omitempty"` // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of the resource. Always compute#snapshot for - // Snapshot resources. + // Kind: [Output Only] Type of the resource. Always compute#region for + // regions. Kind string `json:"kind,omitempty"` - // LabelFingerprint: A fingerprint for the labels being applied to this - // snapshot, which is essentially a hash of the labels set used for - // optimistic locking. The fingerprint is initially generated by Compute - // Engine and changes after every request to modify or update labels. - // You must always provide an up-to-date fingerprint hash in order to - // update or change labels. - // - // To see the latest fingerprint, make a get() request to retrieve a - // snapshot. - LabelFingerprint string `json:"labelFingerprint,omitempty"` - - // Labels: Labels to apply to this snapshot. These can be later modified - // by the setLabels method. Label values may be empty. - Labels map[string]string `json:"labels,omitempty"` - - // Licenses: [Output Only] A list of public visible licenses that apply - // to this snapshot. This can be because the original image had licenses - // attached (such as a Windows image). - Licenses []string `json:"licenses,omitempty"` - - // Name: Name of the resource; provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. + // Name: [Output Only] Name of the resource. Name string `json:"name,omitempty"` + // Quotas: [Output Only] Quotas assigned to this region. + Quotas []*Quota `json:"quotas,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // SnapshotEncryptionKey: Encrypts the snapshot using a - // customer-supplied encryption key. - // - // After you encrypt a snapshot using a customer-supplied key, you must - // provide the same key if you use the image later For example, you must - // provide the encryption key when you create a disk from the encrypted - // snapshot in a future request. - // - // Customer-supplied encryption keys do not protect access to metadata - // of the disk. - // - // If you do not provide an encryption key when creating the snapshot, - // then the snapshot will be encrypted using an automatically generated - // key and you do not need to provide a key to use the snapshot later. - SnapshotEncryptionKey *CustomerEncryptionKey `json:"snapshotEncryptionKey,omitempty"` - - // SourceDisk: [Output Only] The source disk used to create this - // snapshot. - SourceDisk string `json:"sourceDisk,omitempty"` - - // SourceDiskEncryptionKey: The customer-supplied encryption key of the - // source disk. Required if the source disk is protected by a - // customer-supplied encryption key. - SourceDiskEncryptionKey *CustomerEncryptionKey `json:"sourceDiskEncryptionKey,omitempty"` - - // SourceDiskId: [Output Only] The ID value of the disk used to create - // this snapshot. This value may be used to determine whether the - // snapshot was taken from the current or a previous instance of a given - // disk name. - SourceDiskId string `json:"sourceDiskId,omitempty"` - - // Status: [Output Only] The status of the snapshot. This can be - // CREATING, DELETING, FAILED, READY, or UPLOADING. + // Status: [Output Only] Status of the region, either UP or DOWN. // // Possible values: - // "CREATING" - // "DELETING" - // "FAILED" - // "READY" - // "UPLOADING" + // "DOWN" + // "UP" Status string `json:"status,omitempty"` - // StorageBytes: [Output Only] A size of the the storage used by the - // snapshot. As snapshots share storage, this number is expected to - // change with snapshot creation/deletion. - StorageBytes int64 `json:"storageBytes,omitempty,string"` - - // StorageBytesStatus: [Output Only] An indicator whether storageBytes - // is in a stable state or it is being adjusted as a result of shared - // storage reallocation. This status can either be UPDATING, meaning the - // size of the snapshot is being updated, or UP_TO_DATE, meaning the - // size of the snapshot is up-to-date. - // - // Possible values: - // "UPDATING" - // "UP_TO_DATE" - StorageBytesStatus string `json:"storageBytesStatus,omitempty"` + // Zones: [Output Only] A list of zones available in this region, in the + // form of resource URLs. + Zones []string `json:"zones,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -12986,20 +16111,20 @@ type Snapshot struct { NullFields []string `json:"-"` } -func (s *Snapshot) MarshalJSON() ([]byte, error) { - type noMethod Snapshot +func (s *Region) MarshalJSON() ([]byte, error) { + type noMethod Region raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SnapshotList: Contains a list of Snapshot resources. -type SnapshotList struct { +// RegionAutoscalerList: Contains a list of autoscalers. +type RegionAutoscalerList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Snapshot resources. - Items []*Snapshot `json:"items,omitempty"` + // Items: A list of Autoscaler resources. + Items []*Autoscaler `json:"items,omitempty"` // Kind: Type of resource. Kind string `json:"kind,omitempty"` @@ -13015,6 +16140,9 @@ type SnapshotList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionAutoscalerListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -13036,58 +16164,49 @@ type SnapshotList struct { NullFields []string `json:"-"` } -func (s *SnapshotList) MarshalJSON() ([]byte, error) { - type noMethod SnapshotList +func (s *RegionAutoscalerList) MarshalJSON() ([]byte, error) { + type noMethod RegionAutoscalerList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificate: An SslCertificate resource. This resource provides a -// mechanism to upload an SSL key and certificate to the load balancer -// to serve secure connections from the user. -type SslCertificate struct { - // Certificate: A local certificate file. The certificate must be in PEM - // format. The certificate chain must be no greater than 5 certs long. - // The chain must include at least one intermediate cert. - Certificate string `json:"certificate,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always - // compute#sslCertificate for SSL certificates. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // PrivateKey: A write-only private key in PEM format. Only insert - // requests will include this field. - PrivateKey string `json:"privateKey,omitempty"` +// RegionAutoscalerListWarning: [Output Only] Informational warning +// message. +type RegionAutoscalerListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // SelfLink: [Output only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RegionAutoscalerListWarningData `json:"data,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "Certificate") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -13095,49 +16214,36 @@ type SslCertificate struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Certificate") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SslCertificate) MarshalJSON() ([]byte, error) { - type noMethod SslCertificate +func (s *RegionAutoscalerListWarning) MarshalJSON() ([]byte, error) { + type noMethod RegionAutoscalerListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificateList: Contains a list of SslCertificate resources. -type SslCertificateList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of SslCertificate resources. - Items []*SslCertificate `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` +type RegionAutoscalerListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -13145,7 +16251,7 @@ type SslCertificateList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -13154,115 +16260,22 @@ type SslCertificateList struct { NullFields []string `json:"-"` } -func (s *SslCertificateList) MarshalJSON() ([]byte, error) { - type noMethod SslCertificateList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// Subnetwork: A Subnetwork resource. -type Subnetwork struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. This field can be set only at - // resource creation time. - Description string `json:"description,omitempty"` - - // GatewayAddress: [Output Only] The gateway address for default routes - // to reach destination addresses outside this subnetwork. This field - // can be set only at resource creation time. - GatewayAddress string `json:"gatewayAddress,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // IpCidrRange: The range of internal addresses that are owned by this - // subnetwork. Provide this property when you create the subnetwork. For - // example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and - // non-overlapping within a network. Only IPv4 is supported. This field - // can be set only at resource creation time. - IpCidrRange string `json:"ipCidrRange,omitempty"` - - // Kind: [Output Only] Type of the resource. Always compute#subnetwork - // for Subnetwork resources. - Kind string `json:"kind,omitempty"` - - // Name: The name of the resource, provided by the client when initially - // creating the resource. The name must be 1-63 characters long, and - // comply with RFC1035. Specifically, the name must be 1-63 characters - // long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? - // which means the first character must be a lowercase letter, and all - // following characters must be a dash, lowercase letter, or digit, - // except the last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Network: The URL of the network to which this subnetwork belongs, - // provided by the client when initially creating the subnetwork. Only - // networks that are in the distributed mode can have subnetworks. This - // field can be set only at resource creation time. - Network string `json:"network,omitempty"` - - // PrivateIpGoogleAccess: Whether the VMs in this subnet can access - // Google services without assigned external IP addresses. This field - // can be both set at resource creation time and updated using - // setPrivateIpGoogleAccess. - PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` - - // Region: URL of the region where the Subnetwork resides. This field - // can be set only at resource creation time. - Region string `json:"region,omitempty"` - - // SecondaryIpRanges: An array of configurations for secondary IP ranges - // for VM instances contained in this subnetwork. The primary IP of such - // VM must belong to the primary ipCidrRange of the subnetwork. The - // alias IPs may belong to either primary or secondary ranges. - SecondaryIpRanges []*SubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *Subnetwork) MarshalJSON() ([]byte, error) { - type noMethod Subnetwork +func (s *RegionAutoscalerListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RegionAutoscalerListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworkAggregatedList struct { +// RegionInstanceGroupList: Contains a list of InstanceGroup resources. +type RegionInstanceGroupList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of SubnetworksScopedList resources. - Items map[string]SubnetworksScopedList `json:"items,omitempty"` + // Items: A list of InstanceGroup resources. + Items []*InstanceGroup `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always - // compute#subnetworkAggregatedList for aggregated lists of subnetworks. + // Kind: The resource type. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -13276,6 +16289,9 @@ type SubnetworkAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionInstanceGroupListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -13297,41 +16313,49 @@ type SubnetworkAggregatedList struct { NullFields []string `json:"-"` } -func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkAggregatedList +func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworkList: Contains a list of Subnetwork resources. -type SubnetworkList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of Subnetwork resources. - Items []*Subnetwork `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#subnetworkList - // for lists of subnetworks. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` +// RegionInstanceGroupListWarning: [Output Only] Informational warning +// message. +type RegionInstanceGroupListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RegionInstanceGroupListWarningData `json:"data,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -13339,7 +16363,7 @@ type SubnetworkList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -13348,29 +16372,27 @@ type SubnetworkList struct { NullFields []string `json:"-"` } -func (s *SubnetworkList) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkList +func (s *RegionInstanceGroupListWarning) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworkSecondaryRange: Represents a secondary IP range of a -// subnetwork. -type SubnetworkSecondaryRange struct { - // IpCidrRange: The range of IP addresses belonging to this subnetwork - // secondary range. Provide this property when you create the - // subnetwork. Ranges must be unique and non-overlapping with all - // primary and secondary IP ranges within a network. Only IPv4 is - // supported. - IpCidrRange string `json:"ipCidrRange,omitempty"` +type RegionInstanceGroupListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // RangeName: The name associated with this subnetwork secondary range, - // used when adding an alias IP range to a VM instance. The name must be - // 1-63 characters long, and comply with RFC1035. The name must be - // unique within the subnetwork. - RangeName string `json:"rangeName,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -13378,61 +16400,55 @@ type SubnetworkSecondaryRange struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "IpCidrRange") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { - type noMethod SubnetworkSecondaryRange +func (s *RegionInstanceGroupListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworksExpandIpCidrRangeRequest struct { - // IpCidrRange: The IP (in CIDR format or netmask) of internal addresses - // that are legal on this Subnetwork. This range should be disjoint from - // other subnetworks within this network. This range can only be larger - // than (i.e. a superset of) the range previously defined before the - // update. - IpCidrRange string `json:"ipCidrRange,omitempty"` +// RegionInstanceGroupManagerList: Contains a list of managed instance +// groups. +type RegionInstanceGroupManagerList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // ForceSendFields is a list of field names (e.g. "IpCidrRange") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // Items: A list of InstanceGroupManager resources. + Items []*InstanceGroupManager `json:"items,omitempty"` - // NullFields is a list of field names (e.g. "IpCidrRange") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // Kind: [Output Only] The resource type, which is always + // compute#instanceGroupManagerList for a list of managed instance + // groups that exist in th regional scope. + Kind string `json:"kind,omitempty"` -func (s *SubnetworksExpandIpCidrRangeRequest) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksExpandIpCidrRangeRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` -type SubnetworksScopedList struct { - // Subnetworks: List of subnetworks contained in this scope. - Subnetworks []*Subnetwork `json:"subnetworks,omitempty"` + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` - // Warning: An informational warning that appears when the list of - // addresses is empty. - Warning *SubnetworksScopedListWarning `json:"warning,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionInstanceGroupManagerListWarning `json:"warning,omitempty"` - // ForceSendFields is a list of field names (e.g. "Subnetworks") to + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -13440,24 +16456,24 @@ type SubnetworksScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Subnetworks") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *SubnetworksScopedList) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksScopedList +func (s *RegionInstanceGroupManagerList) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagerList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SubnetworksScopedListWarning: An informational warning that appears -// when the list of addresses is empty. -type SubnetworksScopedListWarning struct { +// RegionInstanceGroupManagerListWarning: [Output Only] Informational +// warning message. +type RegionInstanceGroupManagerListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -13485,7 +16501,7 @@ type SubnetworksScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*SubnetworksScopedListWarningData `json:"data,omitempty"` + Data []*RegionInstanceGroupManagerListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -13508,13 +16524,13 @@ type SubnetworksScopedListWarning struct { NullFields []string `json:"-"` } -func (s *SubnetworksScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksScopedListWarning +func (s *RegionInstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagerListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type SubnetworksScopedListWarningData struct { +type RegionInstanceGroupManagerListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -13545,70 +16561,19 @@ type SubnetworksScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *SubnetworksScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksScopedListWarningData - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type SubnetworksSetPrivateIpGoogleAccessRequest struct { - PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "PrivateIpGoogleAccess") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "PrivateIpGoogleAccess") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *SubnetworksSetPrivateIpGoogleAccessRequest) MarshalJSON() ([]byte, error) { - type noMethod SubnetworksSetPrivateIpGoogleAccessRequest +func (s *RegionInstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagerListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TCPHealthCheck struct { - // Port: The TCP port number for the health check request. The default - // value is 80. Valid values are 1 through 65535. - Port int64 `json:"port,omitempty"` - - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. - PortName string `json:"portName,omitempty"` - - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // Request: The application data to send once the TCP connection has - // been established (default value is empty). If both request and - // response are empty, the connection establishment alone will indicate - // health. The request data can only be ASCII. - Request string `json:"request,omitempty"` - - // Response: The bytes to match against the beginning of the response - // data. If left empty (the default value), any response will indicate - // health. The response data can only be ASCII. - Response string `json:"response,omitempty"` +type RegionInstanceGroupManagersAbandonInstancesRequest struct { + // Instances: The URLs of one or more instances to abandon. This can be + // a full URL or a partial URL, such as + // zones/[ZONE]/instances/[INSTANCE_NAME]. + Instances []string `json:"instances,omitempty"` - // ForceSendFields is a list of field names (e.g. "Port") to + // ForceSendFields is a list of field names (e.g. "Instances") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -13616,8 +16581,8 @@ type TCPHealthCheck struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Port") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -13625,29 +16590,19 @@ type TCPHealthCheck struct { NullFields []string `json:"-"` } -func (s *TCPHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod TCPHealthCheck +func (s *RegionInstanceGroupManagersAbandonInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersAbandonInstancesRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Tags: A set of instance tags. -type Tags struct { - // Fingerprint: Specifies a fingerprint for this request, which is - // essentially a hash of the metadata's contents and used for optimistic - // locking. The fingerprint is initially generated by Compute Engine and - // changes after every request to modify or update metadata. You must - // always provide an up-to-date fingerprint hash in order to update or - // change metadata. - // - // To see the latest fingerprint, make get() request to the instance. - Fingerprint string `json:"fingerprint,omitempty"` - - // Items: An array of tags. Each tag must be 1-63 characters long, and - // comply with RFC1035. - Items []string `json:"items,omitempty"` +type RegionInstanceGroupManagersDeleteInstancesRequest struct { + // Instances: The URLs of one or more instances to delete. This can be a + // full URL or a partial URL, such as + // zones/[ZONE]/instances/[INSTANCE_NAME]. + Instances []string `json:"instances,omitempty"` - // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // ForceSendFields is a list of field names (e.g. "Instances") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -13655,69 +16610,46 @@ type Tags struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Fingerprint") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *Tags) MarshalJSON() ([]byte, error) { - type noMethod Tags +func (s *RegionInstanceGroupManagersDeleteInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersDeleteInstancesRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpProxy: A TargetHttpProxy resource. This resource defines an -// HTTP proxy. -type TargetHttpProxy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of resource. Always compute#targetHttpProxy - // for target HTTP proxies. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` +type RegionInstanceGroupManagersListInstancesResponse struct { + // ManagedInstances: List of managed instances. + ManagedInstances []*ManagedInstance `json:"managedInstances,omitempty"` - // UrlMap: URL to the UrlMap resource that defines the mapping from URL - // to the BackendService. - UrlMap string `json:"urlMap,omitempty"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "ManagedInstances") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to + // NullFields is a list of field names (e.g. "ManagedInstances") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -13727,41 +16659,19 @@ type TargetHttpProxy struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpProxy +func (s *RegionInstanceGroupManagersListInstancesResponse) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersListInstancesResponse raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpProxyList: A list of TargetHttpProxy resources. -type TargetHttpProxyList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetHttpProxy resources. - Items []*TargetHttpProxy `json:"items,omitempty"` - - // Kind: Type of resource. Always compute#targetHttpProxyList for lists - // of target HTTP proxies. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +type RegionInstanceGroupManagersRecreateRequest struct { + // Instances: The URLs of one or more instances to recreate. This can be + // a full URL or a partial URL, such as + // zones/[ZONE]/instances/[INSTANCE_NAME]. + Instances []string `json:"instances,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Instances") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -13769,8 +16679,8 @@ type TargetHttpProxyList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -13778,27 +16688,24 @@ type TargetHttpProxyList struct { NullFields []string `json:"-"` } -func (s *TargetHttpProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpProxyList +func (s *RegionInstanceGroupManagersRecreateRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersRecreateRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetHttpsProxiesSetSslCertificatesRequest struct { - // SslCertificates: New set of SslCertificate resources to associate - // with this TargetHttpsProxy resource. Currently exactly one - // SslCertificate resource must be specified. - SslCertificates []string `json:"sslCertificates,omitempty"` +type RegionInstanceGroupManagersSetAutoHealingRequest struct { + AutoHealingPolicies []*InstanceGroupManagerAutoHealingPolicy `json:"autoHealingPolicies,omitempty"` - // ForceSendFields is a list of field names (e.g. "SslCertificates") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "AutoHealingPolicies") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "SslCertificates") to + // NullFields is a list of field names (e.g. "AutoHealingPolicies") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -13808,70 +16715,60 @@ type TargetHttpsProxiesSetSslCertificatesRequest struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxiesSetSslCertificatesRequest +func (s *RegionInstanceGroupManagersSetAutoHealingRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersSetAutoHealingRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpsProxy: A TargetHttpsProxy resource. This resource defines -// an HTTPS proxy. -type TargetHttpsProxy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of resource. Always compute#targetHttpsProxy - // for target HTTPS proxies. - Kind string `json:"kind,omitempty"` +type RegionInstanceGroupManagersSetTargetPoolsRequest struct { + // Fingerprint: Fingerprint of the target pools information, which is a + // hash of the contents. This field is used for optimistic locking when + // you update the target pool entries. This field is optional. + Fingerprint string `json:"fingerprint,omitempty"` - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` + // TargetPools: The URL of all TargetPool resources to which instances + // in the instanceGroup field are added. The target pools automatically + // apply to all of the instances in the managed instance group. + TargetPools []string `json:"targetPools,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // SslCertificates: URLs to SslCertificate resources that are used to - // authenticate connections between users and the load balancer. - // Currently, exactly one SSL certificate must be specified. - SslCertificates []string `json:"sslCertificates,omitempty"` + // NullFields is a list of field names (e.g. "Fingerprint") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // UrlMap: A fully-qualified or valid partial URL to the UrlMap resource - // that defines the mapping from URL to the BackendService. For example, - // the following are all valid URLs for specifying a URL map: - // - - // https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map - // - projects/project/global/urlMaps/url-map - // - global/urlMaps/url-map - UrlMap string `json:"urlMap,omitempty"` +func (s *RegionInstanceGroupManagersSetTargetPoolsRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersSetTargetPoolsRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +type RegionInstanceGroupManagersSetTemplateRequest struct { + // InstanceTemplate: URL of the InstanceTemplate resource from which all + // new instances will be created. + InstanceTemplate string `json:"instanceTemplate,omitempty"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "InstanceTemplate") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to + // NullFields is a list of field names (e.g. "InstanceTemplate") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -13881,23 +16778,21 @@ type TargetHttpsProxy struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxy +func (s *RegionInstanceGroupManagersSetTemplateRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagersSetTemplateRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetHttpsProxyList: Contains a list of TargetHttpsProxy resources. -type TargetHttpsProxyList struct { +type RegionInstanceGroupsListInstances struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetHttpsProxy resources. - Items []*TargetHttpsProxy `json:"items,omitempty"` + // Items: A list of InstanceWithNamedPorts resources. + Items []*InstanceWithNamedPorts `json:"items,omitempty"` - // Kind: Type of resource. Always compute#targetHttpsProxyList for lists - // of target HTTPS proxies. + // Kind: The resource type. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -13911,6 +16806,9 @@ type TargetHttpsProxyList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionInstanceGroupsListInstancesWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -13932,119 +16830,159 @@ type TargetHttpsProxyList struct { NullFields []string `json:"-"` } -func (s *TargetHttpsProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetHttpsProxyList +func (s *RegionInstanceGroupsListInstances) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupsListInstances raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstance: A TargetInstance resource. This resource defines an -// endpoint instance that terminates traffic of certain protocols. -type TargetInstance struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Instance: A URL to the virtual machine instance that handles traffic - // for this target instance. When creating a target instance, you can - // provide the fully-qualified URL or a valid partial URL to the desired - // virtual machine. For example, the following are all valid URLs: - // - - // https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance - // - projects/project/zones/zone/instances/instance - // - zones/zone/instances/instance - Instance string `json:"instance,omitempty"` - - // Kind: [Output Only] The type of the resource. Always - // compute#targetInstance for target instances. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // NatPolicy: NAT option controlling how IPs are NAT'ed to the instance. - // Currently only NO_NAT (default value) is supported. +// RegionInstanceGroupsListInstancesWarning: [Output Only] Informational +// warning message. +type RegionInstanceGroupsListInstancesWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. // // Possible values: - // "NO_NAT" - NatPolicy string `json:"natPolicy,omitempty"` + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RegionInstanceGroupsListInstancesWarningData `json:"data,omitempty"` - // Zone: [Output Only] URL of the zone where the target instance - // resides. - Zone string `json:"zone,omitempty"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupsListInstancesWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupsListInstancesWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetInstance) MarshalJSON() ([]byte, error) { - type noMethod TargetInstance +func (s *RegionInstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupsListInstancesWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetInstanceAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` +type RegionInstanceGroupsListInstancesRequest struct { + // InstanceState: Instances in which state should be returned. Valid + // options are: 'ALL', 'RUNNING'. By default, it lists all instances. + // + // Possible values: + // "ALL" + // "RUNNING" + InstanceState string `json:"instanceState,omitempty"` - // Items: A list of TargetInstance resources. - Items map[string]TargetInstancesScopedList `json:"items,omitempty"` + // PortName: Name of port user is interested in. It is optional. If it + // is set, only information about this ports will be returned. If it is + // not set, all the named ports will be returned. Always lists all + // instances. + PortName string `json:"portName,omitempty"` - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` + // ForceSendFields is a list of field names (e.g. "InstanceState") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // NullFields is a list of field names (e.g. "InstanceState") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` +func (s *RegionInstanceGroupsListInstancesRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupsListInstancesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +type RegionInstanceGroupsSetNamedPortsRequest struct { + // Fingerprint: The fingerprint of the named ports information for this + // instance group. Use this optional property to prevent conflicts when + // multiple users change the named ports settings concurrently. Obtain + // the fingerprint with the instanceGroups.get method. Then, include the + // fingerprint in your request to ensure that you do not overwrite + // changes that were applied from another concurrent request. + Fingerprint string `json:"fingerprint,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // NamedPorts: The list of named ports to set for this instance group. + NamedPorts []*NamedPort `json:"namedPorts,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fingerprint") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -14052,31 +16990,32 @@ type TargetInstanceAggregatedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Fingerprint") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceAggregatedList +func (s *RegionInstanceGroupsSetNamedPortsRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupsSetNamedPortsRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstanceList: Contains a list of TargetInstance resources. -type TargetInstanceList struct { +// RegionList: Contains a list of region resources. +type RegionList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetInstance resources. - Items []*TargetInstance `json:"items,omitempty"` + // Items: A list of Region resources. + Items []*Region `json:"items,omitempty"` - // Kind: Type of resource. + // Kind: [Output Only] Type of resource. Always compute#regionList for + // lists of regions. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -14090,6 +17029,9 @@ type TargetInstanceList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -14111,47 +17053,14 @@ type TargetInstanceList struct { NullFields []string `json:"-"` } -func (s *TargetInstanceList) MarshalJSON() ([]byte, error) { - type noMethod TargetInstanceList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetInstancesScopedList struct { - // TargetInstances: List of target instances contained in this scope. - TargetInstances []*TargetInstance `json:"targetInstances,omitempty"` - - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *TargetInstancesScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "TargetInstances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "TargetInstances") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetInstancesScopedList) MarshalJSON() ([]byte, error) { - type noMethod TargetInstancesScopedList +func (s *RegionList) MarshalJSON() ([]byte, error) { + type noMethod RegionList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetInstancesScopedListWarning: Informational warning which -// replaces the list of addresses when the list is empty. -type TargetInstancesScopedListWarning struct { +// RegionListWarning: [Output Only] Informational warning message. +type RegionListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -14179,7 +17088,7 @@ type TargetInstancesScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetInstancesScopedListWarningData `json:"data,omitempty"` + Data []*RegionListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -14202,13 +17111,13 @@ type TargetInstancesScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetInstancesScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetInstancesScopedListWarning +func (s *RegionListWarning) MarshalJSON() ([]byte, error) { + type noMethod RegionListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetInstancesScopedListWarningData struct { +type RegionListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -14239,119 +17148,25 @@ type TargetInstancesScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetInstancesScopedListWarningData +func (s *RegionListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RegionListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPool: A TargetPool resource. This resource defines a pool of -// instances, an associated HttpHealthCheck resource, and the fallback -// target pool. -type TargetPool struct { - // BackupPool: This field is applicable only when the containing target - // pool is serving a forwarding rule as the primary pool, and its - // failoverRatio field is properly set to a value between [0, - // 1]. - // - // backupPool and failoverRatio together define the fallback behavior of - // the primary target pool: if the ratio of the healthy instances in the - // primary pool is at or below failoverRatio, traffic arriving at the - // load-balanced IP will be directed to the backup pool. - // - // In case where failoverRatio and backupPool are not set, or all the - // instances in the backup pool are unhealthy, the traffic will be - // directed back to the primary pool in the "force" mode, where traffic - // will be spread to the healthy instances with the best effort, or to - // all instances when no instance is healthy. - BackupPool string `json:"backupPool,omitempty"` - - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // FailoverRatio: This field is applicable only when the containing - // target pool is serving a forwarding rule as the primary pool (i.e., - // not as a backup pool to some other target pool). The value of the - // field must be in [0, 1]. - // - // If set, backupPool must also be set. They together define the - // fallback behavior of the primary target pool: if the ratio of the - // healthy instances in the primary pool is at or below this number, - // traffic arriving at the load-balanced IP will be directed to the - // backup pool. - // - // In case where failoverRatio is not set or all the instances in the - // backup pool are unhealthy, the traffic will be directed back to the - // primary pool in the "force" mode, where traffic will be spread to the - // healthy instances with the best effort, or to all instances when no - // instance is healthy. - FailoverRatio float64 `json:"failoverRatio,omitempty"` - - // HealthChecks: The URL of the HttpHealthCheck resource. A member - // instance in this pool is considered healthy if and only if the health - // checks pass. An empty list means all member instances will be - // considered healthy at all times. Only HttpHealthChecks are supported. - // Only one health check may be specified. - HealthChecks []string `json:"healthChecks,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Instances: A list of resource URLs to the virtual machine instances - // serving this pool. They must live in zones contained in the same - // region as this pool. - Instances []string `json:"instances,omitempty"` - - // Kind: [Output Only] Type of the resource. Always compute#targetPool - // for target pools. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // Region: [Output Only] URL of the region where the target pool - // resides. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // SessionAffinity: Sesssion affinity option, must be one of the - // following values: - // NONE: Connections from the same client IP may go to any instance in - // the pool. - // CLIENT_IP: Connections from the same client IP will go to the same - // instance in the pool while that instance remains - // healthy. - // CLIENT_IP_PROTO: Connections from the same client IP with the same IP - // protocol will go to the same instance in the pool while that instance - // remains healthy. - // - // Possible values: - // "CLIENT_IP" - // "CLIENT_IP_PORT_PROTO" - // "CLIENT_IP_PROTO" - // "GENERATED_COOKIE" - // "NONE" - SessionAffinity string `json:"sessionAffinity,omitempty"` +type RegionSetLabelsRequest struct { + // LabelFingerprint: The fingerprint of the previous set of labels for + // this resource, used to detect conflicts. The fingerprint is initially + // generated by Compute Engine and changes after every request to modify + // or update labels. You must always provide an up-to-date fingerprint + // hash in order to update or change labels. Make a get() request to the + // resource to get the latest fingerprint. + LabelFingerprint string `json:"labelFingerprint,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Labels: The labels to set for this resource. + Labels map[string]string `json:"labels,omitempty"` - // ForceSendFields is a list of field names (e.g. "BackupPool") to + // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -14359,64 +17174,41 @@ type TargetPool struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BackupPool") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "LabelFingerprint") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *TargetPool) MarshalJSON() ([]byte, error) { - type noMethod TargetPool +func (s *RegionSetLabelsRequest) MarshalJSON() ([]byte, error) { + type noMethod RegionSetLabelsRequest raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -func (s *TargetPool) UnmarshalJSON(data []byte) error { - type noMethod TargetPool - var s1 struct { - FailoverRatio gensupport.JSONFloat64 `json:"failoverRatio"` - *noMethod - } - s1.noMethod = (*noMethod)(s) - if err := json.Unmarshal(data, &s1); err != nil { - return err - } - s.FailoverRatio = float64(s1.FailoverRatio) - return nil -} - -type TargetPoolAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetPool resources. - Items map[string]TargetPoolsScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#targetPoolAggregatedList for aggregated lists of target - // pools. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` +// ResourceCommitment: Commitment for a particular resource (a +// Commitment is composed of one or more of these). +type ResourceCommitment struct { + // Amount: The amount of the resource purchased (in a type-dependent + // unit, such as bytes). For vCPUs, this can just be an integer. For + // memory, this must be provided in MB. Memory must be a multiple of 256 + // MB, with up to 6.5GB of memory per every vCPU. + Amount int64 `json:"amount,omitempty,string"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Type: Type of resource for which this commitment applies. Possible + // values are VCPU and MEMORY + // + // Possible values: + // "MEMORY" + // "UNSPECIFIED" + // "VCPU" + Type string `json:"type,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Amount") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -14424,7 +17216,7 @@ type TargetPoolAggregatedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Amount") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -14433,25 +17225,18 @@ type TargetPoolAggregatedList struct { NullFields []string `json:"-"` } -func (s *TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolAggregatedList +func (s *ResourceCommitment) MarshalJSON() ([]byte, error) { + type noMethod ResourceCommitment raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolInstanceHealth struct { - HealthStatus []*HealthStatus `json:"healthStatus,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#targetPoolInstanceHealth when checking the health of an - // instance. - Kind string `json:"kind,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +type ResourceGroupReference struct { + // Group: A URI referencing one of the instance groups listed in the + // backend service. + Group string `json:"group,omitempty"` - // ForceSendFields is a list of field names (e.g. "HealthStatus") to + // ForceSendFields is a list of field names (e.g. "Group") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -14459,225 +17244,149 @@ type TargetPoolInstanceHealth struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "HealthStatus") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Group") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetPoolInstanceHealth) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolInstanceHealth +func (s *ResourceGroupReference) MarshalJSON() ([]byte, error) { + type noMethod ResourceGroupReference raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPoolList: Contains a list of TargetPool resources. -type TargetPoolList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` +// Route: Represents a Route resource. A route specifies how certain +// packets should be handled by the network. Routes are associated with +// instances by tags and the set of routes for a particular instance is +// called its routing table. +// +// For each packet leaving an instance, the system searches that +// instance's routing table for a single best matching route. Routes +// match packets by destination IP address, preferring smaller or more +// specific ranges over larger ones. If there is a tie, the system +// selects the route with the smallest priority value. If there is still +// a tie, it uses the layer three and four packet headers to select just +// one of the remaining matching routes. The packet is then forwarded as +// specified by the nextHop field of the winning route - either to +// another instance destination, an instance gateway, or a Google +// Compute Engine-operated gateway. +// +// Packets that do not match any route in the sending instance's routing +// table are dropped. +type Route struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Items: A list of TargetPool resources. - Items []*TargetPool `json:"items,omitempty"` + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#targetPoolList - // for lists of target pools. + // DestRange: The destination range of outgoing packets that this route + // applies to. Only IPv4 is supported. + DestRange string `json:"destRange,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of this resource. Always compute#routes for + // Route resources. Kind string `json:"kind,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetPoolList) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetPoolsAddHealthCheckRequest struct { - // HealthChecks: The HttpHealthCheck to add to the target pool. - HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` - - // ForceSendFields is a list of field names (e.g. "HealthChecks") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "HealthChecks") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *TargetPoolsAddHealthCheckRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsAddHealthCheckRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetPoolsAddInstanceRequest struct { - // Instances: A full or partial URL to an instance to add to this target - // pool. This can be a full or partial URL. For example, the following - // are valid URLs: - // - - // https://www.googleapis.com/compute/v1/projects/project-id/zones/zone/instances/instance-name - // - projects/project-id/zones/zone/instances/instance-name - // - zones/zone/instances/instance-name - Instances []*InstanceReference `json:"instances,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // Network: Fully-qualified URL of the network that this route applies + // to. + Network string `json:"network,omitempty"` -func (s *TargetPoolsAddInstanceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsAddInstanceRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // NextHopGateway: The URL to a gateway that should handle matching + // packets. You can only specify the internet gateway using a full or + // partial valid URL: + // projects//global/gateways/default-internet-gateway + NextHopGateway string `json:"nextHopGateway,omitempty"` -type TargetPoolsRemoveHealthCheckRequest struct { - // HealthChecks: Health check URL to be removed. This can be a full or - // valid partial URL. For example, the following are valid URLs: - // - - // https://www.googleapis.com/compute/beta/projects/project/global/httpHealthChecks/health-check - // - projects/project/global/httpHealthChecks/health-check - // - global/httpHealthChecks/health-check - HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` + // NextHopInstance: The URL to an instance that should handle matching + // packets. You can specify this as a full or partial URL. For + // example: + // https://www.googleapis.com/compute/v1/projects/project/zones/ + // zone/instances/ + NextHopInstance string `json:"nextHopInstance,omitempty"` - // ForceSendFields is a list of field names (e.g. "HealthChecks") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // NextHopIp: The network IP address of an instance that should handle + // matching packets. Only IPv4 is supported. + NextHopIp string `json:"nextHopIp,omitempty"` - // NullFields is a list of field names (e.g. "HealthChecks") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // NextHopNetwork: The URL of the local network if it should handle + // matching packets. + NextHopNetwork string `json:"nextHopNetwork,omitempty"` -func (s *TargetPoolsRemoveHealthCheckRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsRemoveHealthCheckRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // NextHopPeering: [Output Only] The network peering name that should + // handle matching packets, which should conform to RFC1035. + NextHopPeering string `json:"nextHopPeering,omitempty"` -type TargetPoolsRemoveInstanceRequest struct { - // Instances: URLs of the instances to be removed from target pool. - Instances []*InstanceReference `json:"instances,omitempty"` + // NextHopVpnTunnel: The URL to a VpnTunnel that should handle matching + // packets. + NextHopVpnTunnel string `json:"nextHopVpnTunnel,omitempty"` - // ForceSendFields is a list of field names (e.g. "Instances") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // Priority: The priority of this route. Priority is used to break ties + // in cases where there is more than one matching route of equal prefix + // length. In the case of two routes with equal prefix length, the one + // with the lowest-numbered priority value wins. Default value is 1000. + // Valid range is 0 through 65535. + Priority int64 `json:"priority,omitempty"` - // NullFields is a list of field names (e.g. "Instances") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // SelfLink: [Output Only] Server-defined fully-qualified URL for this + // resource. + SelfLink string `json:"selfLink,omitempty"` -func (s *TargetPoolsRemoveInstanceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsRemoveInstanceRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Tags: A list of instance tags to which this route applies. + Tags []string `json:"tags,omitempty"` -type TargetPoolsScopedList struct { - // TargetPools: List of target pools contained in this scope. - TargetPools []*TargetPool `json:"targetPools,omitempty"` + // Warnings: [Output Only] If potential misconfigurations are detected + // for this route, this field will be populated with warning messages. + Warnings []*RouteWarnings `json:"warnings,omitempty"` - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *TargetPoolsScopedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "TargetPools") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "TargetPools") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *TargetPoolsScopedList) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsScopedList +func (s *Route) MarshalJSON() ([]byte, error) { + type noMethod Route raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetPoolsScopedListWarning: Informational warning which replaces -// the list of addresses when the list is empty. -type TargetPoolsScopedListWarning struct { +type RouteWarnings struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -14705,7 +17414,7 @@ type TargetPoolsScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetPoolsScopedListWarningData `json:"data,omitempty"` + Data []*RouteWarningsData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -14728,13 +17437,13 @@ type TargetPoolsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetPoolsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsScopedListWarning +func (s *RouteWarnings) MarshalJSON() ([]byte, error) { + type noMethod RouteWarnings raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetPoolsScopedListWarningData struct { +type RouteWarningsData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -14765,16 +17474,43 @@ type TargetPoolsScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetPoolsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetPoolsScopedListWarningData +func (s *RouteWarningsData) MarshalJSON() ([]byte, error) { + type noMethod RouteWarningsData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetReference struct { - Target string `json:"target,omitempty"` +// RouteList: Contains a list of Route resources. +type RouteList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // ForceSendFields is a list of field names (e.g. "Target") to + // Items: A list of Route resources. + Items []*Route `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *RouteListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -14782,7 +17518,7 @@ type TargetReference struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Target") to include in API + // NullFields is a list of field names (e.g. "Id") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -14791,18 +17527,48 @@ type TargetReference struct { NullFields []string `json:"-"` } -func (s *TargetReference) MarshalJSON() ([]byte, error) { - type noMethod TargetReference +func (s *RouteList) MarshalJSON() ([]byte, error) { + type noMethod RouteList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetSslProxiesSetBackendServiceRequest struct { - // Service: The URL of the new BackendService resource for the - // targetSslProxy. - Service string `json:"service,omitempty"` +// RouteListWarning: [Output Only] Informational warning message. +type RouteListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // ForceSendFields is a list of field names (e.g. "Service") to + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RouteListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -14810,8 +17576,8 @@ type TargetSslProxiesSetBackendServiceRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Service") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -14819,22 +17585,27 @@ type TargetSslProxiesSetBackendServiceRequest struct { NullFields []string `json:"-"` } -func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxiesSetBackendServiceRequest +func (s *RouteListWarning) MarshalJSON() ([]byte, error) { + type noMethod RouteListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetSslProxiesSetProxyHeaderRequest struct { - // ProxyHeader: The new type of proxy header to append before sending - // data to the backend. NONE or PROXY_V1 are allowed. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` +type RouteListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ForceSendFields is a list of field names (e.g. "ProxyHeader") to + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -14842,54 +17613,31 @@ type TargetSslProxiesSetProxyHeaderRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ProxyHeader") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetSslProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxiesSetProxyHeaderRequest +func (s *RouteListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RouteListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetSslProxiesSetSslCertificatesRequest struct { - // SslCertificates: New set of URLs to SslCertificate resources to - // associate with this TargetSslProxy. Currently exactly one ssl - // certificate must be specified. - SslCertificates []string `json:"sslCertificates,omitempty"` - - // ForceSendFields is a list of field names (e.g. "SslCertificates") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "SslCertificates") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} +// Router: Router resource. +type Router struct { + // Bgp: BGP information specific to this router. + Bgp *RouterBgp `json:"bgp,omitempty"` -func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxiesSetSslCertificatesRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // BgpPeers: BGP information that needs to be configured into the + // routing stack to establish the BGP peering. It must specify peer ASN + // and either interface name, IP, or peer IP. Please refer to RFC4273. + BgpPeers []*RouterBgpPeer `json:"bgpPeers,omitempty"` -// TargetSslProxy: A TargetSslProxy resource. This resource defines an -// SSL proxy. -type TargetSslProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -14902,8 +17650,13 @@ type TargetSslProxy struct { // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of the resource. Always - // compute#targetSslProxy for target SSL proxies. + // Interfaces: Router interfaces. Each interface requires either one + // linked resource (e.g. linkedVpnTunnel), or IP address and IP address + // range (e.g. ipRange), or both. + Interfaces []*RouterInterface `json:"interfaces,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#router for + // routers. Kind string `json:"kind,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -14915,62 +17668,50 @@ type TargetSslProxy struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` + // Network: URI of the network to which this router belongs. + Network string `json:"network,omitempty"` + + // Region: [Output Only] URI of the region where the router resides. + Region string `json:"region,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Service: URL to the BackendService resource. - Service string `json:"service,omitempty"` - - // SslCertificates: URLs to SslCertificate resources that are used to - // authenticate connections to Backends. Currently exactly one SSL - // certificate must be specified. - SslCertificates []string `json:"sslCertificates,omitempty"` - // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "Bgp") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Bgp") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetSslProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxy +func (s *Router) MarshalJSON() ([]byte, error) { + type noMethod Router raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetSslProxyList: Contains a list of TargetSslProxy resources. -type TargetSslProxyList struct { +// RouterAggregatedList: Contains a list of routers. +type RouterAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetSslProxy resources. - Items []*TargetSslProxy `json:"items,omitempty"` + // Items: A list of Router resources. + Items map[string]RoutersScopedList `json:"items,omitempty"` // Kind: Type of resource. Kind string `json:"kind,omitempty"` @@ -14986,6 +17727,9 @@ type TargetSslProxyList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RouterAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -15007,18 +17751,49 @@ type TargetSslProxyList struct { NullFields []string `json:"-"` } -func (s *TargetSslProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetSslProxyList +func (s *RouterAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod RouterAggregatedList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetTcpProxiesSetBackendServiceRequest struct { - // Service: The URL of the new BackendService resource for the - // targetTcpProxy. - Service string `json:"service,omitempty"` +// RouterAggregatedListWarning: [Output Only] Informational warning +// message. +type RouterAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // ForceSendFields is a list of field names (e.g. "Service") to + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RouterAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15026,8 +17801,8 @@ type TargetTcpProxiesSetBackendServiceRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Service") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -15035,22 +17810,27 @@ type TargetTcpProxiesSetBackendServiceRequest struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxiesSetBackendServiceRequest +func (s *RouterAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod RouterAggregatedListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetTcpProxiesSetProxyHeaderRequest struct { - // ProxyHeader: The new type of proxy header to append before sending - // data to the backend. NONE or PROXY_V1 are allowed. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` +type RouterAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ForceSendFields is a list of field names (e.g. "ProxyHeader") to + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15058,120 +17838,29 @@ type TargetTcpProxiesSetProxyHeaderRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ProxyHeader") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxiesSetProxyHeaderRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// TargetTcpProxy: A TargetTcpProxy resource. This resource defines a -// TCP proxy. -type TargetTcpProxy struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always - // compute#targetTcpProxy for target TCP proxies. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // ProxyHeader: Specifies the type of proxy header to append before - // sending data to the backend, either NONE or PROXY_V1. The default is - // NONE. - // - // Possible values: - // "NONE" - // "PROXY_V1" - ProxyHeader string `json:"proxyHeader,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Service: URL to the BackendService resource. - Service string `json:"service,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetTcpProxy) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxy +func (s *RouterAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RouterAggregatedListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetTcpProxyList: Contains a list of TargetTcpProxy resources. -type TargetTcpProxyList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetTcpProxy resources. - Items []*TargetTcpProxy `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +type RouterBgp struct { + // Asn: Local BGP Autonomous System Number (ASN). Must be an RFC6996 + // private ASN, either 16-bit or 32-bit. The value will be fixed for + // this router resource. All VPN tunnels that link to this router will + // have the same local ASN. + Asn int64 `json:"asn,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Asn") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15179,7 +17868,7 @@ type TargetTcpProxyList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Asn") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -15188,125 +17877,87 @@ type TargetTcpProxyList struct { NullFields []string `json:"-"` } -func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { - type noMethod TargetTcpProxyList +func (s *RouterBgp) MarshalJSON() ([]byte, error) { + type noMethod RouterBgp raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGateway: Represents a Target VPN gateway resource. -type TargetVpnGateway struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` - - // ForwardingRules: [Output Only] A list of URLs to the ForwardingRule - // resources. ForwardingRules are created using - // compute.forwardingRules.insert and associated to a VPN gateway. - ForwardingRules []string `json:"forwardingRules,omitempty"` +type RouterBgpPeer struct { + // AdvertisedRoutePriority: The priority of routes advertised to this + // BGP peer. In the case where there is more than one matching route of + // maximum length, the routes with lowest priority value win. + AdvertisedRoutePriority int64 `json:"advertisedRoutePriority,omitempty"` - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` + // InterfaceName: Name of the interface the BGP peer is associated with. + InterfaceName string `json:"interfaceName,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. - Kind string `json:"kind,omitempty"` + // IpAddress: IP address of the interface inside Google Cloud Platform. + // Only IPv4 is supported. + IpAddress string `json:"ipAddress,omitempty"` - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. + // Name: Name of this BGP peer. The name must be 1-63 characters long + // and comply with RFC1035. Name string `json:"name,omitempty"` - // Network: URL of the network to which this VPN gateway is attached. - // Provided by the client when the VPN gateway is created. - Network string `json:"network,omitempty"` - - // Region: [Output Only] URL of the region where the target VPN gateway - // resides. - Region string `json:"region,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Status: [Output Only] The status of the VPN gateway. - // - // Possible values: - // "CREATING" - // "DELETING" - // "FAILED" - // "READY" - Status string `json:"status,omitempty"` - - // Tunnels: [Output Only] A list of URLs to VpnTunnel resources. - // VpnTunnels are created using compute.vpntunnels.insert method and - // associated to a VPN gateway. - Tunnels []string `json:"tunnels,omitempty"` + // PeerAsn: Peer BGP Autonomous System Number (ASN). For VPN use case, + // this value can be different for every tunnel. + PeerAsn int64 `json:"peerAsn,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // PeerIpAddress: IP address of the BGP interface outside Google cloud. + // Only IPv4 is supported. + PeerIpAddress string `json:"peerIpAddress,omitempty"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "AdvertisedRoutePriority") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the + // NullFields is a list of field names (e.g. "AdvertisedRoutePriority") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } -func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGateway +func (s *RouterBgpPeer) MarshalJSON() ([]byte, error) { + type noMethod RouterBgpPeer raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetVpnGatewayAggregatedList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of TargetVpnGateway resources. - Items map[string]TargetVpnGatewaysScopedList `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. - Kind string `json:"kind,omitempty"` +type RouterInterface struct { + // IpRange: IP address and range of the interface. The IP range must be + // in the RFC3927 link-local IP space. The value must be a + // CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not + // truncate the address as it represents the IP address of the + // interface. + IpRange string `json:"ipRange,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // LinkedInterconnectAttachment: URI of the linked interconnect + // attachment. It must be in the same region as the router. Each + // interface can have at most one linked resource and it could either be + // a VPN Tunnel or an interconnect attachment. + LinkedInterconnectAttachment string `json:"linkedInterconnectAttachment,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // LinkedVpnTunnel: URI of the linked VPN tunnel. It must be in the same + // region as the router. Each interface can have at most one linked + // resource and it could either be a VPN Tunnel or an interconnect + // attachment. + LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Name: Name of this interface entry. The name must be 1-63 characters + // long and comply with RFC1035. + Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "IpRange") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15314,8 +17965,8 @@ type TargetVpnGatewayAggregatedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "IpRange") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -15323,23 +17974,23 @@ type TargetVpnGatewayAggregatedList struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayAggregatedList +func (s *RouterInterface) MarshalJSON() ([]byte, error) { + type noMethod RouterInterface raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGatewayList: Contains a list of TargetVpnGateway resources. -type TargetVpnGatewayList struct { +// RouterList: Contains a list of Router resources. +type RouterList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of TargetVpnGateway resources. - Items []*TargetVpnGateway `json:"items,omitempty"` + // Items: A list of Router resources. + Items []*Router `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway - // for target VPN gateways. + // Kind: [Output Only] Type of resource. Always compute#router for + // routers. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -15353,6 +18004,9 @@ type TargetVpnGatewayList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RouterListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -15374,48 +18028,14 @@ type TargetVpnGatewayList struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewayList) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewayList - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type TargetVpnGatewaysScopedList struct { - // TargetVpnGateways: [Output Only] List of target vpn gateways - // contained in this scope. - TargetVpnGateways []*TargetVpnGateway `json:"targetVpnGateways,omitempty"` - - // Warning: [Output Only] Informational warning which replaces the list - // of addresses when the list is empty. - Warning *TargetVpnGatewaysScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. "TargetVpnGateways") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "TargetVpnGateways") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *TargetVpnGatewaysScopedList) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewaysScopedList +func (s *RouterList) MarshalJSON() ([]byte, error) { + type noMethod RouterList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// TargetVpnGatewaysScopedListWarning: [Output Only] Informational -// warning which replaces the list of addresses when the list is empty. -type TargetVpnGatewaysScopedListWarning struct { +// RouterListWarning: [Output Only] Informational warning message. +type RouterListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -15443,7 +18063,7 @@ type TargetVpnGatewaysScopedListWarning struct { // Data: [Output Only] Metadata about this warning in key: value format. // For example: // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*TargetVpnGatewaysScopedListWarningData `json:"data,omitempty"` + Data []*RouterListWarningData `json:"data,omitempty"` // Message: [Output Only] A human-readable description of the warning // code. @@ -15466,13 +18086,13 @@ type TargetVpnGatewaysScopedListWarning struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewaysScopedListWarning +func (s *RouterListWarning) MarshalJSON() ([]byte, error) { + type noMethod RouterListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TargetVpnGatewaysScopedListWarningData struct { +type RouterListWarningData struct { // Key: [Output Only] A key that provides more detail on the warning // being returned. For example, for warnings where there are no results // in a list request for a particular zone, this key might be scope and @@ -15503,22 +18123,25 @@ type TargetVpnGatewaysScopedListWarningData struct { NullFields []string `json:"-"` } -func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod TargetVpnGatewaysScopedListWarningData +func (s *RouterListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RouterListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TestFailure struct { - ActualService string `json:"actualService,omitempty"` +type RouterStatus struct { + // BestRoutes: Best routes for this router's network. + BestRoutes []*Route `json:"bestRoutes,omitempty"` - ExpectedService string `json:"expectedService,omitempty"` + // BestRoutesForRouter: Best routes learned by this router. + BestRoutesForRouter []*Route `json:"bestRoutesForRouter,omitempty"` - Host string `json:"host,omitempty"` + BgpPeerStatus []*RouterStatusBgpPeerStatus `json:"bgpPeerStatus,omitempty"` - Path string `json:"path,omitempty"` + // Network: URI of the network to which this router belongs. + Network string `json:"network,omitempty"` - // ForceSendFields is a list of field names (e.g. "ActualService") to + // ForceSendFields is a list of field names (e.g. "BestRoutes") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15526,28 +18149,59 @@ type TestFailure struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ActualService") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "BestRoutes") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TestFailure) MarshalJSON() ([]byte, error) { - type noMethod TestFailure +func (s *RouterStatus) MarshalJSON() ([]byte, error) { + type noMethod RouterStatus raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TestPermissionsRequest struct { - // Permissions: The set of permissions to check for the 'resource'. - // Permissions with wildcards (such as '*' or 'storage.*') are not - // allowed. - Permissions []string `json:"permissions,omitempty"` +type RouterStatusBgpPeerStatus struct { + // AdvertisedRoutes: Routes that were advertised to the remote BGP peer + AdvertisedRoutes []*Route `json:"advertisedRoutes,omitempty"` - // ForceSendFields is a list of field names (e.g. "Permissions") to + // IpAddress: IP address of the local BGP interface. + IpAddress string `json:"ipAddress,omitempty"` + + // LinkedVpnTunnel: URL of the VPN tunnel that this BGP peer controls. + LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` + + // Name: Name of this BGP peer. Unique within the Routers resource. + Name string `json:"name,omitempty"` + + // NumLearnedRoutes: Number of routes learned from the remote BGP Peer. + NumLearnedRoutes int64 `json:"numLearnedRoutes,omitempty"` + + // PeerIpAddress: IP address of the remote BGP interface. + PeerIpAddress string `json:"peerIpAddress,omitempty"` + + // State: BGP state as specified in RFC1771. + State string `json:"state,omitempty"` + + // Status: Status of the BGP peer: {UP, DOWN} + // + // Possible values: + // "DOWN" + // "UNKNOWN" + // "UP" + Status string `json:"status,omitempty"` + + // Uptime: Time this session has been up. Format: 14 years, 51 weeks, 6 + // days, 23 hours, 59 minutes, 59 seconds + Uptime string `json:"uptime,omitempty"` + + // UptimeSeconds: Time this session has been up, in seconds. Format: 145 + UptimeSeconds string `json:"uptimeSeconds,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AdvertisedRoutes") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15555,31 +18209,33 @@ type TestPermissionsRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Permissions") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AdvertisedRoutes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *TestPermissionsRequest) MarshalJSON() ([]byte, error) { - type noMethod TestPermissionsRequest +func (s *RouterStatusBgpPeerStatus) MarshalJSON() ([]byte, error) { + type noMethod RouterStatusBgpPeerStatus raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type TestPermissionsResponse struct { - // Permissions: A subset of `TestPermissionsRequest.permissions` that - // the caller is allowed. - Permissions []string `json:"permissions,omitempty"` +type RouterStatusResponse struct { + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + Result *RouterStatus `json:"result,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Permissions") to + // ForceSendFields is a list of field names (e.g. "Kind") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15587,40 +18243,30 @@ type TestPermissionsResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Permissions") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *TestPermissionsResponse) MarshalJSON() ([]byte, error) { - type noMethod TestPermissionsResponse +func (s *RouterStatusResponse) MarshalJSON() ([]byte, error) { + type noMethod RouterStatusResponse raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UDPHealthCheck struct { - // Port: The UDP port number for the health check request. Valid values - // are 1 through 65535. - Port int64 `json:"port,omitempty"` - - // PortName: Port name as defined in InstanceGroup#NamedPort#name. If - // both port and port_name are defined, port takes precedence. - PortName string `json:"portName,omitempty"` - - // Request: Raw data of request to send in payload of UDP packet. It is - // an error if this is empty. The request data can only be ASCII. - Request string `json:"request,omitempty"` +type RoutersPreviewResponse struct { + // Resource: Preview of given router. + Resource *Router `json:"resource,omitempty"` - // Response: The bytes to match against the beginning of the response - // data. It is an error if this is empty. The response data can only be - // ASCII. - Response string `json:"response,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Port") to + // ForceSendFields is a list of field names (e.g. "Resource") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15628,8 +18274,8 @@ type UDPHealthCheck struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Port") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Resource") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -15637,120 +18283,80 @@ type UDPHealthCheck struct { NullFields []string `json:"-"` } -func (s *UDPHealthCheck) MarshalJSON() ([]byte, error) { - type noMethod UDPHealthCheck +func (s *RoutersPreviewResponse) MarshalJSON() ([]byte, error) { + type noMethod RoutersPreviewResponse raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMap: A UrlMap resource. This resource defines the mapping from URL -// to the BackendService resource, based on the "longest-match" of the -// URL's host and path. -type UrlMap struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` +type RoutersScopedList struct { + // Routers: List of routers contained in this scope. + Routers []*Router `json:"routers,omitempty"` - // DefaultService: The URL of the BackendService resource if none of the - // hostRules match. - DefaultService string `json:"defaultService,omitempty"` + // Warning: Informational warning which replaces the list of routers + // when the list is empty. + Warning *RoutersScopedListWarning `json:"warning,omitempty"` - // Description: An optional description of this resource. Provide this - // property when you create the resource. - Description string `json:"description,omitempty"` + // ForceSendFields is a list of field names (e.g. "Routers") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Fingerprint: Fingerprint of this resource. A hash of the contents - // stored in this object. This field is used in optimistic locking. This - // field will be ignored when inserting a UrlMap. An up-to-date - // fingerprint must be provided in order to update the UrlMap. - Fingerprint string `json:"fingerprint,omitempty"` + // NullFields is a list of field names (e.g. "Routers") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} - // HostRules: The list of HostRules to use against the URL. - HostRules []*HostRule `json:"hostRules,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of the resource. Always compute#urlMaps for - // url maps. - Kind string `json:"kind,omitempty"` - - // Name: Name of the resource. Provided by the client when the resource - // is created. The name must be 1-63 characters long, and comply with - // RFC1035. Specifically, the name must be 1-63 characters long and - // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means - // the first character must be a lowercase letter, and all following - // characters must be a dash, lowercase letter, or digit, except the - // last character, which cannot be a dash. - Name string `json:"name,omitempty"` - - // PathMatchers: The list of named PathMatchers to use against the URL. - PathMatchers []*PathMatcher `json:"pathMatchers,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // Tests: The list of expected URL mappings. Request to update this - // UrlMap will succeed only if all of the test cases pass. - Tests []*UrlMapTest `json:"tests,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *UrlMap) MarshalJSON() ([]byte, error) { - type noMethod UrlMap +func (s *RoutersScopedList) MarshalJSON() ([]byte, error) { + type noMethod RoutersScopedList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMapList: Contains a list of UrlMap resources. -type UrlMapList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of UrlMap resources. - Items []*UrlMap `json:"items,omitempty"` - - // Kind: Type of resource. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` +// RoutersScopedListWarning: Informational warning which replaces the +// list of routers when the list is empty. +type RoutersScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RoutersScopedListWarningData `json:"data,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15758,7 +18364,7 @@ type UrlMapList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -15767,16 +18373,27 @@ type UrlMapList struct { NullFields []string `json:"-"` } -func (s *UrlMapList) MarshalJSON() ([]byte, error) { - type noMethod UrlMapList +func (s *RoutersScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod RoutersScopedListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UrlMapReference struct { - UrlMap string `json:"urlMap,omitempty"` +type RoutersScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // ForceSendFields is a list of field names (e.g. "UrlMap") to + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15784,7 +18401,7 @@ type UrlMapReference struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "UrlMap") to include in API + // NullFields is a list of field names (e.g. "Key") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -15793,67 +18410,50 @@ type UrlMapReference struct { NullFields []string `json:"-"` } -func (s *UrlMapReference) MarshalJSON() ([]byte, error) { - type noMethod UrlMapReference +func (s *RoutersScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RoutersScopedListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UrlMapTest: Message for the expected URL mappings. -type UrlMapTest struct { - // Description: Description of this test case. - Description string `json:"description,omitempty"` - - // Host: Host portion of the URL. - Host string `json:"host,omitempty"` - - // Path: Path portion of the URL. - Path string `json:"path,omitempty"` - - // Service: Expected BackendService resource the given URL should be - // mapped to. - Service string `json:"service,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` +// Rule: A rule to be applied in a Policy. +type Rule struct { + // Action: Required + // + // Possible values: + // "ALLOW" + // "ALLOW_WITH_LOG" + // "DENY" + // "DENY_WITH_LOG" + // "LOG" + // "NO_ACTION" + Action string `json:"action,omitempty"` - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // Conditions: Additional restrictions that must be met + Conditions []*Condition `json:"conditions,omitempty"` -func (s *UrlMapTest) MarshalJSON() ([]byte, error) { - type noMethod UrlMapTest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Description: Human-readable description of the rule. + Description string `json:"description,omitempty"` -// UrlMapValidationResult: Message representing the validation result -// for a UrlMap. -type UrlMapValidationResult struct { - LoadErrors []string `json:"loadErrors,omitempty"` + // Ins: If one or more 'in' clauses are specified, the rule matches if + // the PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries. + Ins []string `json:"ins,omitempty"` - // LoadSucceeded: Whether the given UrlMap can be successfully loaded. - // If false, 'loadErrors' indicates the reasons. - LoadSucceeded bool `json:"loadSucceeded,omitempty"` + // LogConfigs: The config returned to callers of + // tech.iam.IAM.CheckPolicy for any entries that match the LOG action. + LogConfigs []*LogConfig `json:"logConfigs,omitempty"` - TestFailures []*TestFailure `json:"testFailures,omitempty"` + // NotIns: If one or more 'not_in' clauses are specified, the rule + // matches if the PRINCIPAL/AUTHORITY_SELECTOR is in none of the + // entries. + NotIns []string `json:"notIns,omitempty"` - // TestPassed: If successfully loaded, this field indicates whether the - // test passed. If false, 'testFailures's indicate the reason of - // failure. - TestPassed bool `json:"testPassed,omitempty"` + // Permissions: A permission is a string of form '..' (e.g., + // 'storage.buckets.list'). A value of '*' matches all permissions, and + // a verb part of '*' (e.g., 'storage.buckets.*') matches all verbs. + Permissions []string `json:"permissions,omitempty"` - // ForceSendFields is a list of field names (e.g. "LoadErrors") to + // ForceSendFields is a list of field names (e.g. "Action") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15861,8 +18461,8 @@ type UrlMapValidationResult struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "LoadErrors") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -15870,47 +18470,42 @@ type UrlMapValidationResult struct { NullFields []string `json:"-"` } -func (s *UrlMapValidationResult) MarshalJSON() ([]byte, error) { - type noMethod UrlMapValidationResult +func (s *Rule) MarshalJSON() ([]byte, error) { + type noMethod Rule raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type UrlMapsValidateRequest struct { - // Resource: Content of the UrlMap to be validated. - Resource *UrlMap `json:"resource,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Resource") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` +type SSLHealthCheck struct { + // Port: The TCP port number for the health check request. The default + // value is 443. Valid values are 1 through 65535. + Port int64 `json:"port,omitempty"` - // NullFields is a list of field names (e.g. "Resource") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // PortName: Port name as defined in InstanceGroup#NamedPort#name. If + // both port and port_name are defined, port takes precedence. + PortName string `json:"portName,omitempty"` -func (s *UrlMapsValidateRequest) MarshalJSON() ([]byte, error) { - type noMethod UrlMapsValidateRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` -type UrlMapsValidateResponse struct { - Result *UrlMapValidationResult `json:"result,omitempty"` + // Request: The application data to send once the SSL connection has + // been established (default value is empty). If both request and + // response are empty, the connection establishment alone will indicate + // health. The request data can only be ASCII. + Request string `json:"request,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Response: The bytes to match against the beginning of the response + // data. If left empty (the default value), any response will indicate + // health. The response data can only be ASCII. + Response string `json:"response,omitempty"` - // ForceSendFields is a list of field names (e.g. "Result") to + // ForceSendFields is a list of field names (e.g. "Port") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15918,7 +18513,7 @@ type UrlMapsValidateResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Result") to include in API + // NullFields is a list of field names (e.g. "Port") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -15927,33 +18522,41 @@ type UrlMapsValidateResponse struct { NullFields []string `json:"-"` } -func (s *UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { - type noMethod UrlMapsValidateResponse +func (s *SSLHealthCheck) MarshalJSON() ([]byte, error) { + type noMethod SSLHealthCheck raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// UsageExportLocation: The location in Cloud Storage and naming method -// of the daily usage report. Contains bucket_name and report_name -// prefix. -type UsageExportLocation struct { - // BucketName: The name of an existing bucket in Cloud Storage where the - // usage report object is stored. The Google Service Account is granted - // write access to this bucket. This can either be the bucket name by - // itself, such as example-bucket, or the bucket name with gs:// or - // https://storage.googleapis.com/ in front of it, such as - // gs://example-bucket. - BucketName string `json:"bucketName,omitempty"` +// Scheduling: Sets the scheduling options for an Instance. +type Scheduling struct { + // AutomaticRestart: Specifies whether the instance should be + // automatically restarted if it is terminated by Compute Engine (not + // terminated by a user). You can only set the automatic restart option + // for standard instances. Preemptible instances cannot be automatically + // restarted. + // + // By default, this is set to true so an instance is automatically + // restarted if it is terminated by Compute Engine. + AutomaticRestart *bool `json:"automaticRestart,omitempty"` - // ReportNamePrefix: An optional prefix for the name of the usage report - // object stored in bucketName. If not supplied, defaults to usage. The - // report is stored as a CSV file named - // report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the - // usage according to Pacific Time. If you supply a prefix, it should - // conform to Cloud Storage object naming conventions. - ReportNamePrefix string `json:"reportNamePrefix,omitempty"` + // OnHostMaintenance: Defines the maintenance behavior for this + // instance. For standard instances, the default behavior is MIGRATE. + // For preemptible instances, the default and only possible behavior is + // TERMINATE. For more information, see Setting Instance Scheduling + // Options. + // + // Possible values: + // "MIGRATE" + // "TERMINATE" + OnHostMaintenance string `json:"onHostMaintenance,omitempty"` - // ForceSendFields is a list of field names (e.g. "BucketName") to + // Preemptible: Defines whether the instance is preemptible. This can + // only be set during instance creation, it cannot be set or changed + // after the instance has been created. + Preemptible bool `json:"preemptible,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutomaticRestart") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -15961,22 +18564,25 @@ type UsageExportLocation struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BucketName") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AutomaticRestart") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } -func (s *UsageExportLocation) MarshalJSON() ([]byte, error) { - type noMethod UsageExportLocation +func (s *Scheduling) MarshalJSON() ([]byte, error) { + type noMethod Scheduling raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnel struct { +// SecurityPolicy: A security policy is comprised of one or more rules. +// It can also be associated with one or more 'targets'. +type SecurityPolicy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -15985,29 +18591,25 @@ type VpnTunnel struct { // property when you create the resource. Description string `json:"description,omitempty"` - // DetailedStatus: [Output Only] Detailed status message for the VPN - // tunnel. - DetailedStatus string `json:"detailedStatus,omitempty"` + // Fingerprint: Specifies a fingerprint for this resource, which is + // essentially a hash of the metadata's contents and used for optimistic + // locking. The fingerprint is initially generated by Compute Engine and + // changes after every request to modify or update metadata. You must + // always provide an up-to-date fingerprint hash in order to update or + // change metadata. + // + // To see the latest fingerprint, make get() request to the security + // policy. + Fingerprint string `json:"fingerprint,omitempty"` // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // IkeVersion: IKE protocol version to use when establishing the VPN - // tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. - // Default version is 2. - IkeVersion int64 `json:"ikeVersion,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for - // VPN tunnels. + // Kind: [Output only] Type of the resource. Always + // compute#securityPolicyfor security policies Kind string `json:"kind,omitempty"` - // LocalTrafficSelector: Local traffic selector to use when establishing - // the VPN tunnel with peer VPN gateway. The value should be a CIDR - // formatted string, for example: 192.168.0.0/16. The ranges should be - // disjoint. Only IPv4 is supported. - LocalTrafficSelector []string `json:"localTrafficSelector,omitempty"` - // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -16017,52 +18619,15 @@ type VpnTunnel struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` - // PeerIp: IP address of the peer VPN gateway. Only IPv4 is supported. - PeerIp string `json:"peerIp,omitempty"` - - // Region: [Output Only] URL of the region where the VPN tunnel resides. - Region string `json:"region,omitempty"` - - // RemoteTrafficSelector: Remote traffic selectors to use when - // establishing the VPN tunnel with peer VPN gateway. The value should - // be a CIDR formatted string, for example: 192.168.0.0/16. The ranges - // should be disjoint. Only IPv4 is supported. - RemoteTrafficSelector []string `json:"remoteTrafficSelector,omitempty"` - - // Router: URL of router resource to be used for dynamic routing. - Router string `json:"router,omitempty"` + // Rules: List of rules that belong to this policy. There must always be + // a default rule (rule with priority 2147483647 and match "*"). If no + // rules are provided when creating a security policy, a default rule + // with action "allow" will be added. + Rules []*SecurityPolicyRule `json:"rules,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // SharedSecret: Shared secret used to set the secure session between - // the Cloud VPN gateway and the peer VPN gateway. - SharedSecret string `json:"sharedSecret,omitempty"` - - // SharedSecretHash: Hash of the shared secret. - SharedSecretHash string `json:"sharedSecretHash,omitempty"` - - // Status: [Output Only] The status of the VPN tunnel. - // - // Possible values: - // "ALLOCATING_RESOURCES" - // "AUTHORIZATION_ERROR" - // "DEPROVISIONING" - // "ESTABLISHED" - // "FAILED" - // "FIRST_HANDSHAKE" - // "NEGOTIATION_FAILURE" - // "NETWORK_ERROR" - // "NO_INCOMING_PACKETS" - // "PROVISIONING" - // "REJECTED" - // "WAITING_FOR_FULL_CONFIG" - Status string `json:"status,omitempty"` - - // TargetVpnGateway: URL of the VPN gateway with which this VPN tunnel - // is associated. Provided by the client when the VPN tunnel is created. - TargetVpnGateway string `json:"targetVpnGateway,omitempty"` - // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -16085,22 +18650,22 @@ type VpnTunnel struct { NullFields []string `json:"-"` } -func (s *VpnTunnel) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnel +func (s *SecurityPolicy) MarshalJSON() ([]byte, error) { + type noMethod SecurityPolicy raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnelAggregatedList struct { +type SecurityPolicyList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of VpnTunnelsScopedList resources. - Items map[string]VpnTunnelsScopedList `json:"items,omitempty"` + // Items: A list of SecurityPolicy resources. + Items []*SecurityPolicy `json:"items,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for - // VPN tunnels. + // Kind: [Output Only] Type of resource. Always + // compute#securityPolicyList for listsof securityPolicies Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -16111,8 +18676,8 @@ type VpnTunnelAggregatedList struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *SecurityPolicyListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -16135,41 +18700,49 @@ type VpnTunnelAggregatedList struct { NullFields []string `json:"-"` } -func (s *VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelAggregatedList +func (s *SecurityPolicyList) MarshalJSON() ([]byte, error) { + type noMethod SecurityPolicyList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnelList: Contains a list of VpnTunnel resources. -type VpnTunnelList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of VpnTunnel resources. - Items []*VpnTunnel `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for - // VPN tunnels. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` +// SecurityPolicyListWarning: [Output Only] Informational warning +// message. +type SecurityPolicyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SecurityPolicyListWarningData `json:"data,omitempty"` - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16177,7 +18750,7 @@ type VpnTunnelList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Code") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -16186,21 +18759,27 @@ type VpnTunnelList struct { NullFields []string `json:"-"` } -func (s *VpnTunnelList) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelList +func (s *SecurityPolicyListWarning) MarshalJSON() ([]byte, error) { + type noMethod SecurityPolicyListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnelsScopedList struct { - // VpnTunnels: List of vpn tunnels contained in this scope. - VpnTunnels []*VpnTunnel `json:"vpnTunnels,omitempty"` +type SecurityPolicyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *VpnTunnelsScopedListWarning `json:"warning,omitempty"` + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` - // ForceSendFields is a list of field names (e.g. "VpnTunnels") to + // ForceSendFields is a list of field names (e.g. "Key") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16208,8 +18787,8 @@ type VpnTunnelsScopedList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "VpnTunnels") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -16217,49 +18796,69 @@ type VpnTunnelsScopedList struct { NullFields []string `json:"-"` } -func (s *VpnTunnelsScopedList) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelsScopedList +func (s *SecurityPolicyListWarningData) MarshalJSON() ([]byte, error) { + type noMethod SecurityPolicyListWarningData raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// VpnTunnelsScopedListWarning: Informational warning which replaces the -// list of addresses when the list is empty. -type VpnTunnelsScopedListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. - // - // Possible values: - // "CLEANUP_FAILED" - // "DEPRECATED_RESOURCE_USED" - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - // "FIELD_VALUE_OVERRIDEN" - // "INJECTED_KERNELS_DEPRECATED" - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - // "NEXT_HOP_CANNOT_IP_FORWARD" - // "NEXT_HOP_INSTANCE_NOT_FOUND" - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - // "NEXT_HOP_NOT_RUNNING" - // "NOT_CRITICAL_ERROR" - // "NO_RESULTS_ON_PAGE" - // "REQUIRED_TOS_AGREEMENT" - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - // "RESOURCE_NOT_DELETED" - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - // "UNREACHABLE" - Code string `json:"code,omitempty"` +type SecurityPolicyReference struct { + SecurityPolicy string `json:"securityPolicy,omitempty"` - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: - // "data": [ { "key": "scope", "value": "zones/us-east1-d" } - Data []*VpnTunnelsScopedListWarningData `json:"data,omitempty"` + // ForceSendFields is a list of field names (e.g. "SecurityPolicy") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // NullFields is a list of field names (e.g. "SecurityPolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} - // ForceSendFields is a list of field names (e.g. "Code") to +func (s *SecurityPolicyReference) MarshalJSON() ([]byte, error) { + type noMethod SecurityPolicyReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SecurityPolicyRule: Represents a rule that describes one or more +// match conditions along with the action to be taken when traffic +// matches this condition (allow or deny). +type SecurityPolicyRule struct { + // Action: The Action to preform when the client connection triggers the + // rule. Can currently be either "allow" or "deny()" where valid values + // for status are 403, 404, and 502. + Action string `json:"action,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Kind: [Output only] Type of the resource. Always + // compute#securityPolicyRule for security policy rules + Kind string `json:"kind,omitempty"` + + // Match: A match condition that incoming traffic is evaluated against. + // If it evaluates to true, the corresponding ?action? is enforced. + Match *SecurityPolicyRuleMatcher `json:"match,omitempty"` + + // Preview: If set to true, the specified action is not enforced. + Preview bool `json:"preview,omitempty"` + + // Priority: An integer indicating the priority of a rule in the list. + // The priority must be a positive value between 0 and 2147483647. Rules + // are evaluated in the increasing order of priority. + Priority int64 `json:"priority,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16267,7 +18866,7 @@ type VpnTunnelsScopedListWarning struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API + // NullFields is a list of field names (e.g. "Action") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -16276,27 +18875,19 @@ type VpnTunnelsScopedListWarning struct { NullFields []string `json:"-"` } -func (s *VpnTunnelsScopedListWarning) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelsScopedListWarning +func (s *SecurityPolicyRule) MarshalJSON() ([]byte, error) { + type noMethod SecurityPolicyRule raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type VpnTunnelsScopedListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` - - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` +// SecurityPolicyRuleMatcher: Represents a match condition that incoming +// traffic is evaluated against. Exactly one field must be specified. +type SecurityPolicyRuleMatcher struct { + // SrcIpRanges: CIDR IP address range. Only IPv4 is supported. + SrcIpRanges []string `json:"srcIpRanges,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // ForceSendFields is a list of field names (e.g. "SrcIpRanges") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16304,49 +18895,50 @@ type VpnTunnelsScopedListWarningData struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "SrcIpRanges") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { - type noMethod VpnTunnelsScopedListWarningData +func (s *SecurityPolicyRuleMatcher) MarshalJSON() ([]byte, error) { + type noMethod SecurityPolicyRuleMatcher raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type XpnHostList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: [Output Only] A list of shared VPC host project URLs. - Items []*Project `json:"items,omitempty"` +// SerialPortOutput: An instance's serial console output. +type SerialPortOutput struct { + // Contents: [Output Only] The contents of the console output. + Contents string `json:"contents,omitempty"` - // Kind: [Output Only] Type of resource. Always compute#xpnHostList for - // lists of shared VPC hosts. + // Kind: [Output Only] Type of the resource. Always + // compute#serialPortOutput for serial port output. Kind string `json:"kind,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` + // Next: [Output Only] The position of the next byte of content from the + // serial console output. Use this value in the next request as the + // start parameter. + Next int64 `json:"next,omitempty,string"` // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Start: The starting byte position of the output that was returned. + // This should match the start parameter sent with the request. If the + // serial console output exceeds the size of the buffer, older output + // will be overwritten by newer content and the start values will be + // mismatched. + Start int64 `json:"start,omitempty,string"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Contents") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16354,8 +18946,8 @@ type XpnHostList struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Contents") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -16363,27 +18955,22 @@ type XpnHostList struct { NullFields []string `json:"-"` } -func (s *XpnHostList) MarshalJSON() ([]byte, error) { - type noMethod XpnHostList +func (s *SerialPortOutput) MarshalJSON() ([]byte, error) { + type noMethod SerialPortOutput raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// XpnResourceId: Service resource (a.k.a service project) ID. -type XpnResourceId struct { - // Id: The ID of the service resource. In the case of projects, this - // field matches the project ID (e.g., my-project), not the project - // number (e.g., 12345678). - Id string `json:"id,omitempty"` +// ServiceAccount: A service account. +type ServiceAccount struct { + // Email: Email address of the service account. + Email string `json:"email,omitempty"` - // Type: The type of the service resource. - // - // Possible values: - // "PROJECT" - // "XPN_RESOURCE_TYPE_UNSPECIFIED" - Type string `json:"type,omitempty"` + // Scopes: The list of scopes to be made available for this service + // account. + Scopes []string `json:"scopes,omitempty"` - // ForceSendFields is a list of field names (e.g. "Id") to + // ForceSendFields is a list of field names (e.g. "Email") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16391,7 +18978,7 @@ type XpnResourceId struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Id") to include in API + // NullFields is a list of field names (e.g. "Email") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -16400,68 +18987,136 @@ type XpnResourceId struct { NullFields []string `json:"-"` } -func (s *XpnResourceId) MarshalJSON() ([]byte, error) { - type noMethod XpnResourceId +func (s *ServiceAccount) MarshalJSON() ([]byte, error) { + type noMethod ServiceAccount raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Zone: A Zone resource. -type Zone struct { - // AvailableCpuPlatforms: [Output Only] Available cpu/platform - // selections for the zone. - AvailableCpuPlatforms []string `json:"availableCpuPlatforms,omitempty"` - +// Snapshot: A persistent disk snapshot resource. +type Snapshot struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Deprecated: [Output Only] The deprecation status associated with this - // zone. - Deprecated *DeprecationStatus `json:"deprecated,omitempty"` - - // Description: [Output Only] Textual description of the resource. + // Description: An optional description of this resource. Provide this + // property when you create the resource. Description string `json:"description,omitempty"` + // DiskSizeGb: [Output Only] Size of the snapshot, specified in GB. + DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of the resource. Always compute#zone for - // zones. + // Kind: [Output Only] Type of the resource. Always compute#snapshot for + // Snapshot resources. Kind string `json:"kind,omitempty"` - // Name: [Output Only] Name of the resource. - Name string `json:"name,omitempty"` + // LabelFingerprint: A fingerprint for the labels being applied to this + // snapshot, which is essentially a hash of the labels set used for + // optimistic locking. The fingerprint is initially generated by Compute + // Engine and changes after every request to modify or update labels. + // You must always provide an up-to-date fingerprint hash in order to + // update or change labels. + // + // To see the latest fingerprint, make a get() request to retrieve a + // snapshot. + LabelFingerprint string `json:"labelFingerprint,omitempty"` - // Region: [Output Only] Full URL reference to the region which hosts - // the zone. - Region string `json:"region,omitempty"` + // Labels: Labels to apply to this snapshot. These can be later modified + // by the setLabels method. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + + // Licenses: [Output Only] A list of public visible licenses that apply + // to this snapshot. This can be because the original image had licenses + // attached (such as a Windows image). + Licenses []string `json:"licenses,omitempty"` + + // Name: Name of the resource; provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Status: [Output Only] Status of the zone, either UP or DOWN. + // SnapshotEncryptionKey: Encrypts the snapshot using a + // customer-supplied encryption key. + // + // After you encrypt a snapshot using a customer-supplied key, you must + // provide the same key if you use the image later For example, you must + // provide the encryption key when you create a disk from the encrypted + // snapshot in a future request. + // + // Customer-supplied encryption keys do not protect access to metadata + // of the disk. + // + // If you do not provide an encryption key when creating the snapshot, + // then the snapshot will be encrypted using an automatically generated + // key and you do not need to provide a key to use the snapshot later. + SnapshotEncryptionKey *CustomerEncryptionKey `json:"snapshotEncryptionKey,omitempty"` + + // SourceDisk: [Output Only] The source disk used to create this + // snapshot. + SourceDisk string `json:"sourceDisk,omitempty"` + + // SourceDiskEncryptionKey: The customer-supplied encryption key of the + // source disk. Required if the source disk is protected by a + // customer-supplied encryption key. + SourceDiskEncryptionKey *CustomerEncryptionKey `json:"sourceDiskEncryptionKey,omitempty"` + + // SourceDiskId: [Output Only] The ID value of the disk used to create + // this snapshot. This value may be used to determine whether the + // snapshot was taken from the current or a previous instance of a given + // disk name. + SourceDiskId string `json:"sourceDiskId,omitempty"` + + // Status: [Output Only] The status of the snapshot. This can be + // CREATING, DELETING, FAILED, READY, or UPLOADING. // // Possible values: - // "DOWN" - // "UP" + // "CREATING" + // "DELETING" + // "FAILED" + // "READY" + // "UPLOADING" Status string `json:"status,omitempty"` + // StorageBytes: [Output Only] A size of the the storage used by the + // snapshot. As snapshots share storage, this number is expected to + // change with snapshot creation/deletion. + StorageBytes int64 `json:"storageBytes,omitempty,string"` + + // StorageBytesStatus: [Output Only] An indicator whether storageBytes + // is in a stable state or it is being adjusted as a result of shared + // storage reallocation. This status can either be UPDATING, meaning the + // size of the snapshot is being updated, or UP_TO_DATE, meaning the + // size of the snapshot is up-to-date. + // + // Possible values: + // "UPDATING" + // "UP_TO_DATE" + StorageBytesStatus string `json:"storageBytesStatus,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. - // "AvailableCpuPlatforms") to unconditionally include in API requests. - // By default, fields with empty values are omitted from API requests. - // However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "AvailableCpuPlatforms") to + // NullFields is a list of field names (e.g. "CreationTimestamp") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -16471,20 +19126,20 @@ type Zone struct { NullFields []string `json:"-"` } -func (s *Zone) MarshalJSON() ([]byte, error) { - type noMethod Zone +func (s *Snapshot) MarshalJSON() ([]byte, error) { + type noMethod Snapshot raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// ZoneList: Contains a list of zone resources. -type ZoneList struct { +// SnapshotList: Contains a list of Snapshot resources. +type SnapshotList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of Zone resources. - Items []*Zone `json:"items,omitempty"` + // Items: A list of Snapshot resources. + Items []*Snapshot `json:"items,omitempty"` // Kind: Type of resource. Kind string `json:"kind,omitempty"` @@ -16500,6 +19155,9 @@ type ZoneList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *SnapshotListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -16521,25 +19179,48 @@ type ZoneList struct { NullFields []string `json:"-"` } -func (s *ZoneList) MarshalJSON() ([]byte, error) { - type noMethod ZoneList +func (s *SnapshotList) MarshalJSON() ([]byte, error) { + type noMethod SnapshotList raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ZoneSetLabelsRequest struct { - // LabelFingerprint: The fingerprint of the previous set of labels for - // this resource, used to detect conflicts. The fingerprint is initially - // generated by Compute Engine and changes after every request to modify - // or update labels. You must always provide an up-to-date fingerprint - // hash in order to update or change labels. Make a get() request to the - // resource to get the latest fingerprint. - LabelFingerprint string `json:"labelFingerprint,omitempty"` +// SnapshotListWarning: [Output Only] Informational warning message. +type SnapshotListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` - // Labels: The labels to set for this resource. - Labels map[string]string `json:"labels,omitempty"` + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SnapshotListWarningData `json:"data,omitempty"` - // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -16547,143 +19228,10132 @@ type ZoneSetLabelsRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "LabelFingerprint") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *ZoneSetLabelsRequest) MarshalJSON() ([]byte, error) { - type noMethod ZoneSetLabelsRequest +func (s *SnapshotListWarning) MarshalJSON() ([]byte, error) { + type noMethod SnapshotListWarning raw := noMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// method id "compute.acceleratorTypes.aggregatedList": - -type AcceleratorTypesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} +type SnapshotListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` -// AggregatedList: Retrieves an aggregated list of accelerator types. -func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTypesAggregatedListCall { - c := &AcceleratorTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *AcceleratorTypesAggregatedListCall) Filter(filter string) *AcceleratorTypesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AcceleratorTypesAggregatedListCall) MaxResults(maxResults int64) *AcceleratorTypesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AcceleratorTypesAggregatedListCall) OrderBy(orderBy string) *AcceleratorTypesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c +func (s *SnapshotListWarningData) MarshalJSON() ([]byte, error) { + type noMethod SnapshotListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AcceleratorTypesAggregatedListCall) PageToken(pageToken string) *AcceleratorTypesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} +// SslCertificate: An SslCertificate resource. This resource provides a +// mechanism to upload an SSL key and certificate to the load balancer +// to serve secure connections from the user. +type SslCertificate struct { + // Certificate: A local certificate file. The certificate must be in PEM + // format. The certificate chain must be no greater than 5 certs long. + // The chain must include at least one intermediate cert. + Certificate string `json:"certificate,omitempty"` -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *AcceleratorTypesAggregatedListCall) Fields(s ...googleapi.Field) *AcceleratorTypesAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AcceleratorTypesAggregatedListCall) IfNoneMatch(entityTag string) *AcceleratorTypesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *AcceleratorTypesAggregatedListCall) Context(ctx context.Context) *AcceleratorTypesAggregatedListCall { - c.ctx_ = ctx - return c -} + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always + // compute#sslCertificate for SSL certificates. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PrivateKey: A write-only private key in PEM format. Only insert + // requests will include this field. + PrivateKey string `json:"privateKey,omitempty"` + + // SelfLink: [Output only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Certificate") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Certificate") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslCertificate) MarshalJSON() ([]byte, error) { + type noMethod SslCertificate + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SslCertificateList: Contains a list of SslCertificate resources. +type SslCertificateList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of SslCertificate resources. + Items []*SslCertificate `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *SslCertificateListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslCertificateList) MarshalJSON() ([]byte, error) { + type noMethod SslCertificateList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SslCertificateListWarning: [Output Only] Informational warning +// message. +type SslCertificateListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SslCertificateListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslCertificateListWarning) MarshalJSON() ([]byte, error) { + type noMethod SslCertificateListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslCertificateListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslCertificateListWarningData) MarshalJSON() ([]byte, error) { + type noMethod SslCertificateListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Subnetwork: A Subnetwork resource. +type Subnetwork struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. This field can be set only at + // resource creation time. + Description string `json:"description,omitempty"` + + // GatewayAddress: [Output Only] The gateway address for default routes + // to reach destination addresses outside this subnetwork. This field + // can be set only at resource creation time. + GatewayAddress string `json:"gatewayAddress,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // IpCidrRange: The range of internal addresses that are owned by this + // subnetwork. Provide this property when you create the subnetwork. For + // example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and + // non-overlapping within a network. Only IPv4 is supported. This field + // can be set only at resource creation time. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#subnetwork + // for Subnetwork resources. + Kind string `json:"kind,omitempty"` + + // Name: The name of the resource, provided by the client when initially + // creating the resource. The name must be 1-63 characters long, and + // comply with RFC1035. Specifically, the name must be 1-63 characters + // long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? + // which means the first character must be a lowercase letter, and all + // following characters must be a dash, lowercase letter, or digit, + // except the last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: The URL of the network to which this subnetwork belongs, + // provided by the client when initially creating the subnetwork. Only + // networks that are in the distributed mode can have subnetworks. This + // field can be set only at resource creation time. + Network string `json:"network,omitempty"` + + // PrivateIpGoogleAccess: Whether the VMs in this subnet can access + // Google services without assigned external IP addresses. This field + // can be both set at resource creation time and updated using + // setPrivateIpGoogleAccess. + PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` + + // Region: URL of the region where the Subnetwork resides. This field + // can be set only at resource creation time. + Region string `json:"region,omitempty"` + + // SecondaryIpRanges: An array of configurations for secondary IP ranges + // for VM instances contained in this subnetwork. The primary IP of such + // VM must belong to the primary ipCidrRange of the subnetwork. The + // alias IPs may belong to either primary or secondary ranges. + SecondaryIpRanges []*SubnetworkSecondaryRange `json:"secondaryIpRanges,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Subnetwork) MarshalJSON() ([]byte, error) { + type noMethod Subnetwork + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SubnetworkAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of SubnetworksScopedList resources. + Items map[string]SubnetworksScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#subnetworkAggregatedList for aggregated lists of subnetworks. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *SubnetworkAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SubnetworkAggregatedListWarning: [Output Only] Informational warning +// message. +type SubnetworkAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SubnetworkAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SubnetworkAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SubnetworkList: Contains a list of Subnetwork resources. +type SubnetworkList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of Subnetwork resources. + Items []*Subnetwork `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#subnetworkList + // for lists of subnetworks. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *SubnetworkListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkList) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SubnetworkListWarning: [Output Only] Informational warning message. +type SubnetworkListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SubnetworkListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkListWarning) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SubnetworkListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkListWarningData) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SubnetworkSecondaryRange: Represents a secondary IP range of a +// subnetwork. +type SubnetworkSecondaryRange struct { + // IpCidrRange: The range of IP addresses belonging to this subnetwork + // secondary range. Provide this property when you create the + // subnetwork. Ranges must be unique and non-overlapping with all + // primary and secondary IP ranges within a network. Only IPv4 is + // supported. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // RangeName: The name associated with this subnetwork secondary range, + // used when adding an alias IP range to a VM instance. The name must be + // 1-63 characters long, and comply with RFC1035. The name must be + // unique within the subnetwork. + RangeName string `json:"rangeName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IpCidrRange") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkSecondaryRange) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkSecondaryRange + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SubnetworksExpandIpCidrRangeRequest struct { + // IpCidrRange: The IP (in CIDR format or netmask) of internal addresses + // that are legal on this Subnetwork. This range should be disjoint from + // other subnetworks within this network. This range can only be larger + // than (i.e. a superset of) the range previously defined before the + // update. + IpCidrRange string `json:"ipCidrRange,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpCidrRange") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IpCidrRange") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworksExpandIpCidrRangeRequest) MarshalJSON() ([]byte, error) { + type noMethod SubnetworksExpandIpCidrRangeRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SubnetworksScopedList struct { + // Subnetworks: List of subnetworks contained in this scope. + Subnetworks []*Subnetwork `json:"subnetworks,omitempty"` + + // Warning: An informational warning that appears when the list of + // addresses is empty. + Warning *SubnetworksScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Subnetworks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Subnetworks") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworksScopedList) MarshalJSON() ([]byte, error) { + type noMethod SubnetworksScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SubnetworksScopedListWarning: An informational warning that appears +// when the list of addresses is empty. +type SubnetworksScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SubnetworksScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworksScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod SubnetworksScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SubnetworksScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworksScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod SubnetworksScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SubnetworksSetPrivateIpGoogleAccessRequest struct { + PrivateIpGoogleAccess bool `json:"privateIpGoogleAccess,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "PrivateIpGoogleAccess") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PrivateIpGoogleAccess") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworksSetPrivateIpGoogleAccessRequest) MarshalJSON() ([]byte, error) { + type noMethod SubnetworksSetPrivateIpGoogleAccessRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TCPHealthCheck struct { + // Port: The TCP port number for the health check request. The default + // value is 80. Valid values are 1 through 65535. + Port int64 `json:"port,omitempty"` + + // PortName: Port name as defined in InstanceGroup#NamedPort#name. If + // both port and port_name are defined, port takes precedence. + PortName string `json:"portName,omitempty"` + + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // Request: The application data to send once the TCP connection has + // been established (default value is empty). If both request and + // response are empty, the connection establishment alone will indicate + // health. The request data can only be ASCII. + Request string `json:"request,omitempty"` + + // Response: The bytes to match against the beginning of the response + // data. If left empty (the default value), any response will indicate + // health. The response data can only be ASCII. + Response string `json:"response,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Port") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Port") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TCPHealthCheck) MarshalJSON() ([]byte, error) { + type noMethod TCPHealthCheck + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Tags: A set of instance tags. +type Tags struct { + // Fingerprint: Specifies a fingerprint for this request, which is + // essentially a hash of the metadata's contents and used for optimistic + // locking. The fingerprint is initially generated by Compute Engine and + // changes after every request to modify or update metadata. You must + // always provide an up-to-date fingerprint hash in order to update or + // change metadata. + // + // To see the latest fingerprint, make get() request to the instance. + Fingerprint string `json:"fingerprint,omitempty"` + + // Items: An array of tags. Each tag must be 1-63 characters long, and + // comply with RFC1035. + Items []string `json:"items,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Fingerprint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Fingerprint") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Tags) MarshalJSON() ([]byte, error) { + type noMethod Tags + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpProxy: A TargetHttpProxy resource. This resource defines an +// HTTP proxy. +type TargetHttpProxy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#targetHttpProxy + // for target HTTP proxies. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // UrlMap: URL to the UrlMap resource that defines the mapping from URL + // to the BackendService. + UrlMap string `json:"urlMap,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpProxy) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpProxy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpProxyList: A list of TargetHttpProxy resources. +type TargetHttpProxyList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetHttpProxy resources. + Items []*TargetHttpProxy `json:"items,omitempty"` + + // Kind: Type of resource. Always compute#targetHttpProxyList for lists + // of target HTTP proxies. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetHttpProxyListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpProxyList) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpProxyList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpProxyListWarning: [Output Only] Informational warning +// message. +type TargetHttpProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetHttpProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpProxyListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpProxyListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetHttpProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpProxyListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpProxyListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetHttpsProxiesSetSslCertificatesRequest struct { + // SslCertificates: New set of SslCertificate resources to associate + // with this TargetHttpsProxy resource. Currently exactly one + // SslCertificate resource must be specified. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SslCertificates") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SslCertificates") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpsProxiesSetSslCertificatesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpsProxy: A TargetHttpsProxy resource. This resource defines +// an HTTPS proxy. +type TargetHttpsProxy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#targetHttpsProxy + // for target HTTPS proxies. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SslCertificates: URLs to SslCertificate resources that are used to + // authenticate connections between users and the load balancer. + // Currently, exactly one SSL certificate must be specified. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // UrlMap: A fully-qualified or valid partial URL to the UrlMap resource + // that defines the mapping from URL to the BackendService. For example, + // the following are all valid URLs for specifying a URL map: + // - + // https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map + // - projects/project/global/urlMaps/url-map + // - global/urlMaps/url-map + UrlMap string `json:"urlMap,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxy) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpsProxy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpsProxyList: Contains a list of TargetHttpsProxy resources. +type TargetHttpsProxyList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetHttpsProxy resources. + Items []*TargetHttpsProxy `json:"items,omitempty"` + + // Kind: Type of resource. Always compute#targetHttpsProxyList for lists + // of target HTTPS proxies. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetHttpsProxyListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxyList) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpsProxyList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetHttpsProxyListWarning: [Output Only] Informational warning +// message. +type TargetHttpsProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetHttpsProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxyListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpsProxyListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetHttpsProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxyListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpsProxyListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetInstance: A TargetInstance resource. This resource defines an +// endpoint instance that terminates traffic of certain protocols. +type TargetInstance struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Instance: A URL to the virtual machine instance that handles traffic + // for this target instance. When creating a target instance, you can + // provide the fully-qualified URL or a valid partial URL to the desired + // virtual machine. For example, the following are all valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance + // - projects/project/zones/zone/instances/instance + // - zones/zone/instances/instance + Instance string `json:"instance,omitempty"` + + // Kind: [Output Only] The type of the resource. Always + // compute#targetInstance for target instances. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // NatPolicy: NAT option controlling how IPs are NAT'ed to the instance. + // Currently only NO_NAT (default value) is supported. + // + // Possible values: + // "NO_NAT" + NatPolicy string `json:"natPolicy,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Zone: [Output Only] URL of the zone where the target instance + // resides. + Zone string `json:"zone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstance) MarshalJSON() ([]byte, error) { + type noMethod TargetInstance + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetInstanceAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetInstance resources. + Items map[string]TargetInstancesScopedList `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetInstanceAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetInstanceAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetInstanceAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetInstanceAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetInstanceAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetInstanceList: Contains a list of TargetInstance resources. +type TargetInstanceList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetInstance resources. + Items []*TargetInstance `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetInstanceListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceList) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetInstanceListWarning: [Output Only] Informational warning +// message. +type TargetInstanceListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetInstanceListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetInstanceListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetInstancesScopedList struct { + // TargetInstances: List of target instances contained in this scope. + TargetInstances []*TargetInstance `json:"targetInstances,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *TargetInstancesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetInstances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetInstances") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstancesScopedList) MarshalJSON() ([]byte, error) { + type noMethod TargetInstancesScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetInstancesScopedListWarning: Informational warning which +// replaces the list of addresses when the list is empty. +type TargetInstancesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetInstancesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstancesScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetInstancesScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetInstancesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetInstancesScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetPool: A TargetPool resource. This resource defines a pool of +// instances, an associated HttpHealthCheck resource, and the fallback +// target pool. +type TargetPool struct { + // BackupPool: This field is applicable only when the containing target + // pool is serving a forwarding rule as the primary pool, and its + // failoverRatio field is properly set to a value between [0, + // 1]. + // + // backupPool and failoverRatio together define the fallback behavior of + // the primary target pool: if the ratio of the healthy instances in the + // primary pool is at or below failoverRatio, traffic arriving at the + // load-balanced IP will be directed to the backup pool. + // + // In case where failoverRatio and backupPool are not set, or all the + // instances in the backup pool are unhealthy, the traffic will be + // directed back to the primary pool in the "force" mode, where traffic + // will be spread to the healthy instances with the best effort, or to + // all instances when no instance is healthy. + BackupPool string `json:"backupPool,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // FailoverRatio: This field is applicable only when the containing + // target pool is serving a forwarding rule as the primary pool (i.e., + // not as a backup pool to some other target pool). The value of the + // field must be in [0, 1]. + // + // If set, backupPool must also be set. They together define the + // fallback behavior of the primary target pool: if the ratio of the + // healthy instances in the primary pool is at or below this number, + // traffic arriving at the load-balanced IP will be directed to the + // backup pool. + // + // In case where failoverRatio is not set or all the instances in the + // backup pool are unhealthy, the traffic will be directed back to the + // primary pool in the "force" mode, where traffic will be spread to the + // healthy instances with the best effort, or to all instances when no + // instance is healthy. + FailoverRatio float64 `json:"failoverRatio,omitempty"` + + // HealthChecks: The URL of the HttpHealthCheck resource. A member + // instance in this pool is considered healthy if and only if the health + // checks pass. An empty list means all member instances will be + // considered healthy at all times. Only HttpHealthChecks are supported. + // Only one health check may be specified. + HealthChecks []string `json:"healthChecks,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Instances: A list of resource URLs to the virtual machine instances + // serving this pool. They must live in zones contained in the same + // region as this pool. + Instances []string `json:"instances,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#targetPool + // for target pools. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Region: [Output Only] URL of the region where the target pool + // resides. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SessionAffinity: Sesssion affinity option, must be one of the + // following values: + // NONE: Connections from the same client IP may go to any instance in + // the pool. + // CLIENT_IP: Connections from the same client IP will go to the same + // instance in the pool while that instance remains + // healthy. + // CLIENT_IP_PROTO: Connections from the same client IP with the same IP + // protocol will go to the same instance in the pool while that instance + // remains healthy. + // + // Possible values: + // "CLIENT_IP" + // "CLIENT_IP_PORT_PROTO" + // "CLIENT_IP_PROTO" + // "GENERATED_COOKIE" + // "NONE" + SessionAffinity string `json:"sessionAffinity,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "BackupPool") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackupPool") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPool) MarshalJSON() ([]byte, error) { + type noMethod TargetPool + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *TargetPool) UnmarshalJSON(data []byte) error { + type noMethod TargetPool + var s1 struct { + FailoverRatio gensupport.JSONFloat64 `json:"failoverRatio"` + *noMethod + } + s1.noMethod = (*noMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.FailoverRatio = float64(s1.FailoverRatio) + return nil +} + +type TargetPoolAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetPool resources. + Items map[string]TargetPoolsScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#targetPoolAggregatedList for aggregated lists of target + // pools. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetPoolAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetPoolAggregatedListWarning: [Output Only] Informational warning +// message. +type TargetPoolAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetPoolAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolInstanceHealth struct { + HealthStatus []*HealthStatus `json:"healthStatus,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#targetPoolInstanceHealth when checking the health of an + // instance. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "HealthStatus") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HealthStatus") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolInstanceHealth) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolInstanceHealth + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetPoolList: Contains a list of TargetPool resources. +type TargetPoolList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetPool resources. + Items []*TargetPool `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#targetPoolList + // for lists of target pools. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetPoolListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolList) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetPoolListWarning: [Output Only] Informational warning message. +type TargetPoolListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetPoolListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolsAddHealthCheckRequest struct { + // HealthChecks: The HttpHealthCheck to add to the target pool. + HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthChecks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HealthChecks") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolsAddHealthCheckRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsAddHealthCheckRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolsAddInstanceRequest struct { + // Instances: A full or partial URL to an instance to add to this target + // pool. This can be a full or partial URL. For example, the following + // are valid URLs: + // - + // https://www.googleapis.com/compute/v1/projects/project-id/zones/zone/instances/instance-name + // - projects/project-id/zones/zone/instances/instance-name + // - zones/zone/instances/instance-name + Instances []*InstanceReference `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolsAddInstanceRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsAddInstanceRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolsRemoveHealthCheckRequest struct { + // HealthChecks: Health check URL to be removed. This can be a full or + // valid partial URL. For example, the following are valid URLs: + // - + // https://www.googleapis.com/compute/beta/projects/project/global/httpHealthChecks/health-check + // - projects/project/global/httpHealthChecks/health-check + // - global/httpHealthChecks/health-check + HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HealthChecks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HealthChecks") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolsRemoveHealthCheckRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsRemoveHealthCheckRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolsRemoveInstanceRequest struct { + // Instances: URLs of the instances to be removed from target pool. + Instances []*InstanceReference `json:"instances,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instances") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instances") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolsRemoveInstanceRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsRemoveInstanceRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolsScopedList struct { + // TargetPools: List of target pools contained in this scope. + TargetPools []*TargetPool `json:"targetPools,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *TargetPoolsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetPools") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetPools") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolsScopedList) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetPoolsScopedListWarning: Informational warning which replaces +// the list of addresses when the list is empty. +type TargetPoolsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetPoolsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolsScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolsScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolsScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetReference struct { + Target string `json:"target,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Target") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Target") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetReference) MarshalJSON() ([]byte, error) { + type noMethod TargetReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetSslProxiesSetBackendServiceRequest struct { + // Service: The URL of the new BackendService resource for the + // targetSslProxy. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Service") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Service") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxiesSetBackendServiceRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetSslProxiesSetProxyHeaderRequest struct { + // ProxyHeader: The new type of proxy header to append before sending + // data to the backend. NONE or PROXY_V1 are allowed. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProxyHeader") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ProxyHeader") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxiesSetProxyHeaderRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetSslProxiesSetSslCertificatesRequest struct { + // SslCertificates: New set of URLs to SslCertificate resources to + // associate with this TargetSslProxy. Currently exactly one ssl + // certificate must be specified. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SslCertificates") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SslCertificates") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxiesSetSslCertificatesRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetSslProxy: A TargetSslProxy resource. This resource defines an +// SSL proxy. +type TargetSslProxy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always + // compute#targetSslProxy for target SSL proxies. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Service: URL to the BackendService resource. + Service string `json:"service,omitempty"` + + // SslCertificates: URLs to SslCertificate resources that are used to + // authenticate connections to Backends. Currently exactly one SSL + // certificate must be specified. + SslCertificates []string `json:"sslCertificates,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxy) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetSslProxyList: Contains a list of TargetSslProxy resources. +type TargetSslProxyList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetSslProxy resources. + Items []*TargetSslProxy `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetSslProxyListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxyList) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxyList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetSslProxyListWarning: [Output Only] Informational warning +// message. +type TargetSslProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetSslProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxyListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxyListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetSslProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxyListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxyListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetTcpProxiesSetBackendServiceRequest struct { + // Service: The URL of the new BackendService resource for the + // targetTcpProxy. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Service") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Service") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetTcpProxiesSetBackendServiceRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetTcpProxiesSetProxyHeaderRequest struct { + // ProxyHeader: The new type of proxy header to append before sending + // data to the backend. NONE or PROXY_V1 are allowed. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProxyHeader") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ProxyHeader") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { + type noMethod TargetTcpProxiesSetProxyHeaderRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetTcpProxy: A TargetTcpProxy resource. This resource defines a +// TCP proxy. +type TargetTcpProxy struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always + // compute#targetTcpProxy for target TCP proxies. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // ProxyHeader: Specifies the type of proxy header to append before + // sending data to the backend, either NONE or PROXY_V1. The default is + // NONE. + // + // Possible values: + // "NONE" + // "PROXY_V1" + ProxyHeader string `json:"proxyHeader,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Service: URL to the BackendService resource. + Service string `json:"service,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxy) MarshalJSON() ([]byte, error) { + type noMethod TargetTcpProxy + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetTcpProxyList: Contains a list of TargetTcpProxy resources. +type TargetTcpProxyList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetTcpProxy resources. + Items []*TargetTcpProxy `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetTcpProxyListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { + type noMethod TargetTcpProxyList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetTcpProxyListWarning: [Output Only] Informational warning +// message. +type TargetTcpProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetTcpProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxyListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetTcpProxyListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetTcpProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxyListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetTcpProxyListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGateway: Represents a Target VPN gateway resource. +type TargetVpnGateway struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // ForwardingRules: [Output Only] A list of URLs to the ForwardingRule + // resources. ForwardingRules are created using + // compute.forwardingRules.insert and associated to a VPN gateway. + ForwardingRules []string `json:"forwardingRules,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: URL of the network to which this VPN gateway is attached. + // Provided by the client when the VPN gateway is created. + Network string `json:"network,omitempty"` + + // Region: [Output Only] URL of the region where the target VPN gateway + // resides. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] The status of the VPN gateway. + // + // Possible values: + // "CREATING" + // "DELETING" + // "FAILED" + // "READY" + Status string `json:"status,omitempty"` + + // Tunnels: [Output Only] A list of URLs to VpnTunnel resources. + // VpnTunnels are created using compute.vpntunnels.insert method and + // associated to a VPN gateway. + Tunnels []string `json:"tunnels,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGateway) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGateway + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewayAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetVpnGateway resources. + Items map[string]TargetVpnGatewaysScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetVpnGatewayAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGatewayAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetVpnGatewayAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetVpnGatewayAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewayAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGatewayList: Contains a list of TargetVpnGateway resources. +type TargetVpnGatewayList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of TargetVpnGateway resources. + Items []*TargetVpnGateway `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#targetVpnGateway + // for target VPN gateways. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *TargetVpnGatewayListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayList) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGatewayListWarning: [Output Only] Informational warning +// message. +type TargetVpnGatewayListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetVpnGatewayListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewayListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewaysScopedList struct { + // TargetVpnGateways: [Output Only] List of target vpn gateways + // contained in this scope. + TargetVpnGateways []*TargetVpnGateway `json:"targetVpnGateways,omitempty"` + + // Warning: [Output Only] Informational warning which replaces the list + // of addresses when the list is empty. + Warning *TargetVpnGatewaysScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "TargetVpnGateways") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TargetVpnGateways") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewaysScopedList) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewaysScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TargetVpnGatewaysScopedListWarning: [Output Only] Informational +// warning which replaces the list of addresses when the list is empty. +type TargetVpnGatewaysScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetVpnGatewaysScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewaysScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewaysScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewaysScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewaysScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewaysScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TestFailure struct { + ActualService string `json:"actualService,omitempty"` + + ExpectedService string `json:"expectedService,omitempty"` + + Host string `json:"host,omitempty"` + + Path string `json:"path,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ActualService") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ActualService") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestFailure) MarshalJSON() ([]byte, error) { + type noMethod TestFailure + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TestPermissionsRequest struct { + // Permissions: The set of permissions to check for the 'resource'. + // Permissions with wildcards (such as '*' or 'storage.*') are not + // allowed. + Permissions []string `json:"permissions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Permissions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestPermissionsRequest) MarshalJSON() ([]byte, error) { + type noMethod TestPermissionsRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TestPermissionsResponse struct { + // Permissions: A subset of `TestPermissionsRequest.permissions` that + // the caller is allowed. + Permissions []string `json:"permissions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Permissions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestPermissionsResponse) MarshalJSON() ([]byte, error) { + type noMethod TestPermissionsResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UDPHealthCheck struct { + // Port: The UDP port number for the health check request. Valid values + // are 1 through 65535. + Port int64 `json:"port,omitempty"` + + // PortName: Port name as defined in InstanceGroup#NamedPort#name. If + // both port and port_name are defined, port takes precedence. + PortName string `json:"portName,omitempty"` + + // Request: Raw data of request to send in payload of UDP packet. It is + // an error if this is empty. The request data can only be ASCII. + Request string `json:"request,omitempty"` + + // Response: The bytes to match against the beginning of the response + // data. It is an error if this is empty. The response data can only be + // ASCII. + Response string `json:"response,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Port") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Port") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UDPHealthCheck) MarshalJSON() ([]byte, error) { + type noMethod UDPHealthCheck + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMap: A UrlMap resource. This resource defines the mapping from URL +// to the BackendService resource, based on the "longest-match" of the +// URL's host and path. +type UrlMap struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // DefaultService: The URL of the BackendService resource if none of the + // hostRules match. + DefaultService string `json:"defaultService,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. This + // field will be ignored when inserting a UrlMap. An up-to-date + // fingerprint must be provided in order to update the UrlMap. + Fingerprint string `json:"fingerprint,omitempty"` + + // HostRules: The list of HostRules to use against the URL. + HostRules []*HostRule `json:"hostRules,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#urlMaps for + // url maps. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PathMatchers: The list of named PathMatchers to use against the URL. + PathMatchers []*PathMatcher `json:"pathMatchers,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Tests: The list of expected URL mappings. Request to update this + // UrlMap will succeed only if all of the test cases pass. + Tests []*UrlMapTest `json:"tests,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *UrlMap) MarshalJSON() ([]byte, error) { + type noMethod UrlMap + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapList: Contains a list of UrlMap resources. +type UrlMapList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of UrlMap resources. + Items []*UrlMap `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *UrlMapListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapList) MarshalJSON() ([]byte, error) { + type noMethod UrlMapList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapListWarning: [Output Only] Informational warning message. +type UrlMapListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*UrlMapListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapListWarning) MarshalJSON() ([]byte, error) { + type noMethod UrlMapListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapListWarningData) MarshalJSON() ([]byte, error) { + type noMethod UrlMapListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapReference struct { + UrlMap string `json:"urlMap,omitempty"` + + // ForceSendFields is a list of field names (e.g. "UrlMap") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "UrlMap") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapReference) MarshalJSON() ([]byte, error) { + type noMethod UrlMapReference + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapTest: Message for the expected URL mappings. +type UrlMapTest struct { + // Description: Description of this test case. + Description string `json:"description,omitempty"` + + // Host: Host portion of the URL. + Host string `json:"host,omitempty"` + + // Path: Path portion of the URL. + Path string `json:"path,omitempty"` + + // Service: Expected BackendService resource the given URL should be + // mapped to. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapTest) MarshalJSON() ([]byte, error) { + type noMethod UrlMapTest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UrlMapValidationResult: Message representing the validation result +// for a UrlMap. +type UrlMapValidationResult struct { + LoadErrors []string `json:"loadErrors,omitempty"` + + // LoadSucceeded: Whether the given UrlMap can be successfully loaded. + // If false, 'loadErrors' indicates the reasons. + LoadSucceeded bool `json:"loadSucceeded,omitempty"` + + TestFailures []*TestFailure `json:"testFailures,omitempty"` + + // TestPassed: If successfully loaded, this field indicates whether the + // test passed. If false, 'testFailures's indicate the reason of + // failure. + TestPassed bool `json:"testPassed,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LoadErrors") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LoadErrors") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapValidationResult) MarshalJSON() ([]byte, error) { + type noMethod UrlMapValidationResult + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapsValidateRequest struct { + // Resource: Content of the UrlMap to be validated. + Resource *UrlMap `json:"resource,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Resource") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Resource") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsValidateRequest) MarshalJSON() ([]byte, error) { + type noMethod UrlMapsValidateRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapsValidateResponse struct { + Result *UrlMapValidationResult `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Result") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Result") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapsValidateResponse) MarshalJSON() ([]byte, error) { + type noMethod UrlMapsValidateResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UsageExportLocation: The location in Cloud Storage and naming method +// of the daily usage report. Contains bucket_name and report_name +// prefix. +type UsageExportLocation struct { + // BucketName: The name of an existing bucket in Cloud Storage where the + // usage report object is stored. The Google Service Account is granted + // write access to this bucket. This can either be the bucket name by + // itself, such as example-bucket, or the bucket name with gs:// or + // https://storage.googleapis.com/ in front of it, such as + // gs://example-bucket. + BucketName string `json:"bucketName,omitempty"` + + // ReportNamePrefix: An optional prefix for the name of the usage report + // object stored in bucketName. If not supplied, defaults to usage. The + // report is stored as a CSV file named + // report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the day of the + // usage according to Pacific Time. If you supply a prefix, it should + // conform to Cloud Storage object naming conventions. + ReportNamePrefix string `json:"reportNamePrefix,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BucketName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BucketName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UsageExportLocation) MarshalJSON() ([]byte, error) { + type noMethod UsageExportLocation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnel struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // DetailedStatus: [Output Only] Detailed status message for the VPN + // tunnel. + DetailedStatus string `json:"detailedStatus,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // IkeVersion: IKE protocol version to use when establishing the VPN + // tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. + // Default version is 2. + IkeVersion int64 `json:"ikeVersion,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // LocalTrafficSelector: Local traffic selector to use when establishing + // the VPN tunnel with peer VPN gateway. The value should be a CIDR + // formatted string, for example: 192.168.0.0/16. The ranges should be + // disjoint. Only IPv4 is supported. + LocalTrafficSelector []string `json:"localTrafficSelector,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // PeerIp: IP address of the peer VPN gateway. Only IPv4 is supported. + PeerIp string `json:"peerIp,omitempty"` + + // Region: [Output Only] URL of the region where the VPN tunnel resides. + Region string `json:"region,omitempty"` + + // RemoteTrafficSelector: Remote traffic selectors to use when + // establishing the VPN tunnel with peer VPN gateway. The value should + // be a CIDR formatted string, for example: 192.168.0.0/16. The ranges + // should be disjoint. Only IPv4 is supported. + RemoteTrafficSelector []string `json:"remoteTrafficSelector,omitempty"` + + // Router: URL of router resource to be used for dynamic routing. + Router string `json:"router,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // SharedSecret: Shared secret used to set the secure session between + // the Cloud VPN gateway and the peer VPN gateway. + SharedSecret string `json:"sharedSecret,omitempty"` + + // SharedSecretHash: Hash of the shared secret. + SharedSecretHash string `json:"sharedSecretHash,omitempty"` + + // Status: [Output Only] The status of the VPN tunnel. + // + // Possible values: + // "ALLOCATING_RESOURCES" + // "AUTHORIZATION_ERROR" + // "DEPROVISIONING" + // "ESTABLISHED" + // "FAILED" + // "FIRST_HANDSHAKE" + // "NEGOTIATION_FAILURE" + // "NETWORK_ERROR" + // "NO_INCOMING_PACKETS" + // "PROVISIONING" + // "REJECTED" + // "WAITING_FOR_FULL_CONFIG" + Status string `json:"status,omitempty"` + + // TargetVpnGateway: URL of the VPN gateway with which this VPN tunnel + // is associated. Provided by the client when the VPN tunnel is created. + TargetVpnGateway string `json:"targetVpnGateway,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnel) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnel + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of VpnTunnelsScopedList resources. + Items map[string]VpnTunnelsScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *VpnTunnelAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelAggregatedListWarning: [Output Only] Informational warning +// message. +type VpnTunnelAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnTunnelAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelList: Contains a list of VpnTunnel resources. +type VpnTunnelList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of VpnTunnel resources. + Items []*VpnTunnel `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#vpnTunnel for + // VPN tunnels. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *VpnTunnelListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelList) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelListWarning: [Output Only] Informational warning message. +type VpnTunnelListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnTunnelListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelListWarning) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelListWarningData) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelsScopedList struct { + // VpnTunnels: List of vpn tunnels contained in this scope. + VpnTunnels []*VpnTunnel `json:"vpnTunnels,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *VpnTunnelsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "VpnTunnels") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "VpnTunnels") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedList) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelsScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// VpnTunnelsScopedListWarning: Informational warning which replaces the +// list of addresses when the list is empty. +type VpnTunnelsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnTunnelsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelsScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelsScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelsScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type XpnHostList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: [Output Only] A list of shared VPC host project URLs. + Items []*Project `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#xpnHostList for + // lists of shared VPC hosts. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *XpnHostListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnHostList) MarshalJSON() ([]byte, error) { + type noMethod XpnHostList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// XpnHostListWarning: [Output Only] Informational warning message. +type XpnHostListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*XpnHostListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnHostListWarning) MarshalJSON() ([]byte, error) { + type noMethod XpnHostListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type XpnHostListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnHostListWarningData) MarshalJSON() ([]byte, error) { + type noMethod XpnHostListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// XpnResourceId: Service resource (a.k.a service project) ID. +type XpnResourceId struct { + // Id: The ID of the service resource. In the case of projects, this + // field matches the project ID (e.g., my-project), not the project + // number (e.g., 12345678). + Id string `json:"id,omitempty"` + + // Type: The type of the service resource. + // + // Possible values: + // "PROJECT" + // "XPN_RESOURCE_TYPE_UNSPECIFIED" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnResourceId) MarshalJSON() ([]byte, error) { + type noMethod XpnResourceId + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Zone: A Zone resource. +type Zone struct { + // AvailableCpuPlatforms: [Output Only] Available cpu/platform + // selections for the zone. + AvailableCpuPlatforms []string `json:"availableCpuPlatforms,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Deprecated: [Output Only] The deprecation status associated with this + // zone. + Deprecated *DeprecationStatus `json:"deprecated,omitempty"` + + // Description: [Output Only] Textual description of the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always compute#zone for + // zones. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // Region: [Output Only] Full URL reference to the region which hosts + // the zone. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Status: [Output Only] Status of the zone, either UP or DOWN. + // + // Possible values: + // "DOWN" + // "UP" + Status string `json:"status,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "AvailableCpuPlatforms") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AvailableCpuPlatforms") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Zone) MarshalJSON() ([]byte, error) { + type noMethod Zone + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ZoneList: Contains a list of zone resources. +type ZoneList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of Zone resources. + Items []*Zone `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *ZoneListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneList) MarshalJSON() ([]byte, error) { + type noMethod ZoneList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ZoneListWarning: [Output Only] Informational warning message. +type ZoneListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ZoneListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneListWarning) MarshalJSON() ([]byte, error) { + type noMethod ZoneListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ZoneListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneListWarningData) MarshalJSON() ([]byte, error) { + type noMethod ZoneListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ZoneSetLabelsRequest struct { + // LabelFingerprint: The fingerprint of the previous set of labels for + // this resource, used to detect conflicts. The fingerprint is initially + // generated by Compute Engine and changes after every request to modify + // or update labels. You must always provide an up-to-date fingerprint + // hash in order to update or change labels. Make a get() request to the + // resource to get the latest fingerprint. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: The labels to set for this resource. + Labels map[string]string `json:"labels,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LabelFingerprint") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ZoneSetLabelsRequest) MarshalJSON() ([]byte, error) { + type noMethod ZoneSetLabelsRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "compute.acceleratorTypes.aggregatedList": + +type AcceleratorTypesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of accelerator types. +func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTypesAggregatedListCall { + c := &AcceleratorTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *AcceleratorTypesAggregatedListCall) Filter(filter string) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AcceleratorTypesAggregatedListCall) MaxResults(maxResults int64) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AcceleratorTypesAggregatedListCall) OrderBy(orderBy string) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AcceleratorTypesAggregatedListCall) PageToken(pageToken string) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AcceleratorTypesAggregatedListCall) Fields(s ...googleapi.Field) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AcceleratorTypesAggregatedListCall) IfNoneMatch(entityTag string) *AcceleratorTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AcceleratorTypesAggregatedListCall) Context(ctx context.Context) *AcceleratorTypesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AcceleratorTypesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/acceleratorTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.acceleratorTypes.aggregatedList" call. +// Exactly one of *AcceleratorTypeAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *AcceleratorTypeAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AcceleratorTypeAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of accelerator types.", + // "httpMethod": "GET", + // "id": "compute.acceleratorTypes.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/acceleratorTypes", + // "response": { + // "$ref": "AcceleratorTypeAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AcceleratorTypesAggregatedListCall) Pages(ctx context.Context, f func(*AcceleratorTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.acceleratorTypes.get": + +type AcceleratorTypesGetCall struct { + s *Service + project string + zone string + acceleratorType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified accelerator type. Get a list of available +// accelerator types by making a list() request. +func (r *AcceleratorTypesService) Get(project string, zone string, acceleratorType string) *AcceleratorTypesGetCall { + c := &AcceleratorTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.acceleratorType = acceleratorType + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AcceleratorTypesGetCall) Fields(s ...googleapi.Field) *AcceleratorTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AcceleratorTypesGetCall) IfNoneMatch(entityTag string) *AcceleratorTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AcceleratorTypesGetCall) Context(ctx context.Context) *AcceleratorTypesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AcceleratorTypesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "acceleratorType": c.acceleratorType, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.acceleratorTypes.get" call. +// Exactly one of *AcceleratorType or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AcceleratorType.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*AcceleratorType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AcceleratorType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified accelerator type. Get a list of available accelerator types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.acceleratorTypes.get", + // "parameterOrder": [ + // "project", + // "zone", + // "acceleratorType" + // ], + // "parameters": { + // "acceleratorType": { + // "description": "Name of the accelerator type to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}", + // "response": { + // "$ref": "AcceleratorType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.acceleratorTypes.list": + +type AcceleratorTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of accelerator types available to the +// specified project. +func (r *AcceleratorTypesService) List(project string, zone string) *AcceleratorTypesListCall { + c := &AcceleratorTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *AcceleratorTypesListCall) Filter(filter string) *AcceleratorTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AcceleratorTypesListCall) MaxResults(maxResults int64) *AcceleratorTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AcceleratorTypesListCall) OrderBy(orderBy string) *AcceleratorTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AcceleratorTypesListCall) PageToken(pageToken string) *AcceleratorTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AcceleratorTypesListCall) Fields(s ...googleapi.Field) *AcceleratorTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AcceleratorTypesListCall) IfNoneMatch(entityTag string) *AcceleratorTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AcceleratorTypesListCall) Context(ctx context.Context) *AcceleratorTypesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AcceleratorTypesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.acceleratorTypes.list" call. +// Exactly one of *AcceleratorTypeList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *AcceleratorTypeList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AcceleratorTypeList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of accelerator types available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.acceleratorTypes.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/acceleratorTypes", + // "response": { + // "$ref": "AcceleratorTypeList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AcceleratorTypesListCall) Pages(ctx context.Context, f func(*AcceleratorTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.addresses.aggregatedList": + +type AddressesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of addresses. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/aggregatedList +func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedListCall { + c := &AddressesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AddressesAggregatedListCall) OrderBy(orderBy string) *AddressesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesAggregatedListCall) IfNoneMatch(entityTag string) *AddressesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesAggregatedListCall) Context(ctx context.Context) *AddressesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/addresses") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.aggregatedList" call. +// Exactly one of *AddressAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *AddressAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AddressAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AddressAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of addresses.", + // "httpMethod": "GET", + // "id": "compute.addresses.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/addresses", + // "response": { + // "$ref": "AddressAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AddressesAggregatedListCall) Pages(ctx context.Context, f func(*AddressAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.addresses.delete": + +type AddressesDeleteCall struct { + s *Service + project string + region string + address string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/delete +func (r *AddressesService) Delete(project string, region string, address string) *AddressesDeleteCall { + c := &AddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AddressesDeleteCall) RequestId(requestId string) *AddressesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesDeleteCall) Context(ctx context.Context) *AddressesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "address": c.address, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified address resource.", + // "httpMethod": "DELETE", + // "id": "compute.addresses.delete", + // "parameterOrder": [ + // "project", + // "region", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{address}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.addresses.get": + +type AddressesGetCall struct { + s *Service + project string + region string + address string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/get +func (r *AddressesService) Get(project string, region string, address string) *AddressesGetCall { + c := &AddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesGetCall) IfNoneMatch(entityTag string) *AddressesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesGetCall) Context(ctx context.Context) *AddressesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "address": c.address, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.get" call. +// Exactly one of *Address or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Address.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Address{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified address resource.", + // "httpMethod": "GET", + // "id": "compute.addresses.get", + // "parameterOrder": [ + // "project", + // "region", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{address}", + // "response": { + // "$ref": "Address" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.addresses.insert": + +type AddressesInsertCall struct { + s *Service + project string + region string + address *Address + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an address resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/insert +func (r *AddressesService) Insert(project string, region string, address *Address) *AddressesInsertCall { + c := &AddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AddressesInsertCall) RequestId(requestId string) *AddressesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesInsertCall) Context(ctx context.Context) *AddressesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an address resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.addresses.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses", + // "request": { + // "$ref": "Address" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.addresses.list": + +type AddressesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of addresses contained within the specified +// region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/list +func (r *AddressesService) List(project string, region string) *AddressesListCall { + c := &AddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *AddressesListCall) Filter(filter string) *AddressesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AddressesListCall) OrderBy(orderBy string) *AddressesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AddressesListCall) IfNoneMatch(entityTag string) *AddressesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesListCall) Context(ctx context.Context) *AddressesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.list" call. +// Exactly one of *AddressList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AddressList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AddressList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of addresses contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.addresses.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses", + // "response": { + // "$ref": "AddressList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.addresses.setLabels": + +type AddressesSetLabelsCall struct { + s *Service + project string + region string + resource string + regionsetlabelsrequest *RegionSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on an Address. To learn more about labels, +// read the Labeling Resources documentation. +func (r *AddressesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *AddressesSetLabelsCall { + c := &AddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AddressesSetLabelsCall) RequestId(requestId string) *AddressesSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesSetLabelsCall) Fields(s ...googleapi.Field) *AddressesSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesSetLabelsCall) Context(ctx context.Context) *AddressesSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.addresses.setLabels", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{resource}/setLabels", + // "request": { + // "$ref": "RegionSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.addresses.testIamPermissions": + +type AddressesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *AddressesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *AddressesTestIamPermissionsCall { + c := &AddressesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesTestIamPermissionsCall) Fields(s ...googleapi.Field) *AddressesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesTestIamPermissionsCall) Context(ctx context.Context) *AddressesTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.addresses.testIamPermissions", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/addresses/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.autoscalers.aggregatedList": + +type AutoscalersAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of autoscalers. +func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregatedListCall { + c := &AutoscalersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *AutoscalersAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AutoscalersAggregatedListCall) OrderBy(orderBy string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *AutoscalersAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *AutoscalersAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *AutoscalersAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersAggregatedListCall) Context(ctx context.Context) *AutoscalersAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.aggregatedList" call. +// Exactly one of *AutoscalerAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *AutoscalerAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*AutoscalerAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AutoscalerAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of autoscalers.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/autoscalers", + // "response": { + // "$ref": "AutoscalerAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AutoscalersAggregatedListCall) Pages(ctx context.Context, f func(*AutoscalerAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.autoscalers.delete": + +type AutoscalersDeleteCall struct { + s *Service + project string + zone string + autoscaler string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified autoscaler. +func (r *AutoscalersService) Delete(project string, zone string, autoscaler string) *AutoscalersDeleteCall { + c := &AutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersDeleteCall) Context(ctx context.Context) *AutoscalersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "autoscaler": c.autoscaler, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified autoscaler.", + // "httpMethod": "DELETE", + // "id": "compute.autoscalers.delete", + // "parameterOrder": [ + // "project", + // "zone", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.get": + +type AutoscalersGetCall struct { + s *Service + project string + zone string + autoscaler string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified autoscaler resource. Get a list of +// available autoscalers by making a list() request. +func (r *AutoscalersService) Get(project string, zone string, autoscaler string) *AutoscalersGetCall { + c := &AutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersGetCall) Context(ctx context.Context) *AutoscalersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "autoscaler": c.autoscaler, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.get" call. +// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Autoscaler.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Autoscaler{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified autoscaler resource. Get a list of available autoscalers by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.get", + // "parameterOrder": [ + // "project", + // "zone", + // "autoscaler" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "response": { + // "$ref": "Autoscaler" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.autoscalers.insert": + +type AutoscalersInsertCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an autoscaler in the specified project using the data +// included in the request. +func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Autoscaler) *AutoscalersInsertCall { + c := &AutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersInsertCall) RequestId(requestId string) *AutoscalersInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersInsertCall) Context(ctx context.Context) *AutoscalersInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.autoscalers.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.list": + +type AutoscalersListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of autoscalers contained within the specified +// zone. +func (r *AutoscalersService) List(project string, zone string) *AutoscalersListCall { + c := &AutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *AutoscalersListCall) OrderBy(orderBy string) *AutoscalersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersListCall) Context(ctx context.Context) *AutoscalersListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.list" call. +// Exactly one of *AutoscalerList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AutoscalerList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AutoscalerList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of autoscalers contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.autoscalers.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "response": { + // "$ref": "AutoscalerList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AutoscalersListCall) Pages(ctx context.Context, f func(*AutoscalerList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.autoscalers.patch": + +type AutoscalersPatchCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates an autoscaler in the specified project using the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +func (r *AutoscalersService) Patch(project string, zone string, autoscaler *Autoscaler) *AutoscalersPatchCall { + c := &AutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to patch. +func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCall { + c.urlParams_.Set("autoscaler", autoscaler) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersPatchCall) Context(ctx context.Context) *AutoscalersPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.autoscalers.patch", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to patch.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.autoscalers.testIamPermissions": + +type AutoscalersTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *AutoscalersService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *AutoscalersTestIamPermissionsCall { + c := &AutoscalersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *AutoscalersTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersTestIamPermissionsCall) Context(ctx context.Context) *AutoscalersTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.autoscalers.testIamPermissions", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.autoscalers.update": + +type AutoscalersUpdateCall struct { + s *Service + project string + zone string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an autoscaler in the specified project using the data +// included in the request. +func (r *AutoscalersService) Update(project string, zone string, autoscaler *Autoscaler) *AutoscalersUpdateCall { + c := &AutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to update. +func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdateCall { + c.urlParams_.Set("autoscaler", autoscaler) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersUpdateCall) RequestId(requestId string) *AutoscalersUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AutoscalersUpdateCall) Context(ctx context.Context) *AutoscalersUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AutoscalersUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.autoscalers.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an autoscaler in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.autoscalers.update", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to update.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.delete": + +type BackendBucketsDeleteCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified BackendBucket resource. +func (r *BackendBucketsService) Delete(project string, backendBucket string) *BackendBucketsDeleteCall { + c := &BackendBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsDeleteCall) Context(ctx context.Context) *BackendBucketsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified BackendBucket resource.", + // "httpMethod": "DELETE", + // "id": "compute.backendBuckets.delete", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.get": + +type BackendBucketsGetCall struct { + s *Service + project string + backendBucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified BackendBucket resource. Get a list of +// available backend buckets by making a list() request. +func (r *BackendBucketsService) Get(project string, backendBucket string) *BackendBucketsGetCall { + c := &BackendBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsGetCall) Context(ctx context.Context) *BackendBucketsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.get" call. +// Exactly one of *BackendBucket or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *BackendBucket.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendBucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified BackendBucket resource. Get a list of available backend buckets by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.backendBuckets.get", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}", + // "response": { + // "$ref": "BackendBucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.backendBuckets.insert": + +type BackendBucketsInsertCall struct { + s *Service + project string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a BackendBucket resource in the specified project +// using the data included in the request. +func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBucket) *BackendBucketsInsertCall { + c := &BackendBucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendbucket = backendbucket + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsInsertCall) RequestId(requestId string) *BackendBucketsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsInsertCall) Context(ctx context.Context) *BackendBucketsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets", + // "request": { + // "$ref": "BackendBucket" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.list": + +type BackendBucketsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of BackendBucket resources available to the +// specified project. +func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { + c := &BackendBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsListCall) Context(ctx context.Context) *BackendBucketsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.list" call. +// Exactly one of *BackendBucketList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BackendBucketList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucketList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendBucketList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of BackendBucket resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.backendBuckets.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets", + // "response": { + // "$ref": "BackendBucketList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucketList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.backendBuckets.patch": + +type BackendBucketsPatchCall struct { + s *Service + project string + backendBucket string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified BackendBucket resource with the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +func (r *BackendBucketsService) Patch(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsPatchCall { + c := &BackendBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + c.backendbucket = backendbucket + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsPatchCall) Context(ctx context.Context) *BackendBucketsPatchCall { + c.ctx_ = ctx + return c +} // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AcceleratorTypesAggregatedListCall) Header() http.Header { +func (c *BackendBucketsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.backendBuckets.patch", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}", + // "request": { + // "$ref": "BackendBucket" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.update": + +type BackendBucketsUpdateCall struct { + s *Service + project string + backendBucket string + backendbucket *BackendBucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates the specified BackendBucket resource with the data +// included in the request. +func (r *BackendBucketsService) Update(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsUpdateCall { + c := &BackendBucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendBucket = backendBucket + c.backendbucket = backendbucket + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsUpdateCall) RequestId(requestId string) *BackendBucketsUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsUpdateCall) Fields(s ...googleapi.Field) *BackendBucketsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsUpdateCall) Context(ctx context.Context) *BackendBucketsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendBucket": c.backendBucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified BackendBucket resource with the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.backendBuckets.update", + // "parameterOrder": [ + // "project", + // "backendBucket" + // ], + // "parameters": { + // "backendBucket": { + // "description": "Name of the BackendBucket resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendBuckets/{backendBucket}", + // "request": { + // "$ref": "BackendBucket" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.aggregatedList": + +type BackendServicesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all BackendService resources, +// regional and global, available to the specified project. +func (r *BackendServicesService) AggregatedList(project string) *BackendServicesAggregatedListCall { + c := &BackendServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *BackendServicesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServicesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *BackendServicesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *BackendServicesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *BackendServicesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesAggregatedListCall) Context(ctx context.Context) *BackendServicesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -16694,7 +29364,7 @@ func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Respon } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/acceleratorTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -16703,15 +29373,15 @@ func (c *AcceleratorTypesAggregatedListCall) doRequest(alt string) (*http.Respon }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } - -// Do executes the "compute.acceleratorTypes.aggregatedList" call. -// Exactly one of *AcceleratorTypeAggregatedList or error will be + +// Do executes the "compute.backendServices.aggregatedList" call. +// Exactly one of *BackendServiceAggregatedList or error will be // non-nil. Any non-2xx status code is an error. Response headers are in -// either *AcceleratorTypeAggregatedList.ServerResponse.Header or (if a +// either *BackendServiceAggregatedList.ServerResponse.Header or (if a // response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeAggregatedList, error) { +func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*BackendServiceAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -16730,7 +29400,7 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AcceleratorTypeAggregatedList{ + ret := &BackendServiceAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -16742,9 +29412,9 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Retrieves an aggregated list of accelerator types.", + // "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.aggregatedList", + // "id": "compute.backendServices.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -16773,16 +29443,16 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", + // "description": "Name of the project scoping this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/acceleratorTypes", + // "path": "{project}/aggregated/backendServices", // "response": { - // "$ref": "AcceleratorTypeAggregatedList" + // "$ref": "BackendServiceAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -16796,7 +29466,7 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *AcceleratorTypesAggregatedListCall) Pages(ctx context.Context, f func(*AcceleratorTypeAggregatedList) error) error { +func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*BackendServiceAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -16814,95 +29484,566 @@ func (c *AcceleratorTypesAggregatedListCall) Pages(ctx context.Context, f func(* } } -// method id "compute.acceleratorTypes.get": +// method id "compute.backendServices.delete": -type AcceleratorTypesGetCall struct { - s *Service - project string - zone string - acceleratorType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BackendServicesDeleteCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified BackendService resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/delete +func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall { + c := &BackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesDeleteCall) RequestId(requestId string) *BackendServicesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendServicesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesDeleteCall) Context(ctx context.Context) *BackendServicesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified BackendService resource.", + // "httpMethod": "DELETE", + // "id": "compute.backendServices.delete", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.get": + +type BackendServicesGetCall struct { + s *Service + project string + backendService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified BackendService resource. Get a list of +// available backend services by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/get +func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall { + c := &BackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesGetCall) Context(ctx context.Context) *BackendServicesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.get" call. +// Exactly one of *BackendService or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *BackendService.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendService{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified BackendService resource. Get a list of available backend services by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.backendServices.get", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}", + // "response": { + // "$ref": "BackendService" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.backendServices.getHealth": + +type BackendServicesGetHealthCall struct { + s *Service + project string + backendService string + resourcegroupreference *ResourceGroupReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GetHealth: Gets the most recent health check results for this +// BackendService. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/getHealth +func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { + c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.resourcegroupreference = resourcegroupreference + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServicesGetHealthCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesGetHealthCall) Context(ctx context.Context) *BackendServicesGetHealthCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesGetHealthCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.getHealth" call. +// Exactly one of *BackendServiceGroupHealth or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *BackendServiceGroupHealth.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BackendServiceGroupHealth{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the most recent health check results for this BackendService.", + // "httpMethod": "POST", + // "id": "compute.backendServices.getHealth", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to which the queried instance belongs.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/backendServices/{backendService}/getHealth", + // "request": { + // "$ref": "ResourceGroupReference" + // }, + // "response": { + // "$ref": "BackendServiceGroupHealth" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.backendServices.insert": + +type BackendServicesInsertCall struct { + s *Service + project string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a BackendService resource in the specified project +// using the data included in the request. There are several +// restrictions and guidelines to keep in mind when creating a backend +// service. Read Restrictions and Guidelines for more information. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/insert +func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { + c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendservice = backendservice + return c } -// Get: Returns the specified accelerator type. Get a list of available -// accelerator types by making a list() request. -func (r *AcceleratorTypesService) Get(project string, zone string, acceleratorType string) *AcceleratorTypesGetCall { - c := &AcceleratorTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.acceleratorType = acceleratorType +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServicesInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AcceleratorTypesGetCall) Fields(s ...googleapi.Field) *AcceleratorTypesGetCall { +func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendServicesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AcceleratorTypesGetCall) IfNoneMatch(entityTag string) *AcceleratorTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AcceleratorTypesGetCall) Context(ctx context.Context) *AcceleratorTypesGetCall { +func (c *BackendServicesInsertCall) Context(ctx context.Context) *BackendServicesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AcceleratorTypesGetCall) Header() http.Header { +func (c *BackendServicesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "acceleratorType": c.acceleratorType, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.acceleratorTypes.get" call. -// Exactly one of *AcceleratorType or error will be non-nil. Any non-2xx +// Do executes the "compute.backendServices.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *AcceleratorType.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*AcceleratorType, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -16921,7 +30062,7 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AcceleratorType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -16933,22 +30074,13 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator } return ret, nil // { - // "description": "Returns the specified accelerator type. Get a list of available accelerator types by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.get", + // "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information.", + // "httpMethod": "POST", + // "id": "compute.backendServices.insert", // "parameterOrder": [ - // "project", - // "zone", - // "acceleratorType" + // "project" // ], // "parameters": { - // "acceleratorType": { - // "description": "Name of the accelerator type to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -16956,45 +30088,44 @@ func (c *AcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*Accelerator // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/acceleratorTypes/{acceleratorType}", + // "path": "{project}/global/backendServices", + // "request": { + // "$ref": "BackendService" + // }, // "response": { - // "$ref": "AcceleratorType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.acceleratorTypes.list": +// method id "compute.backendServices.list": -type AcceleratorTypesListCall struct { +type BackendServicesListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of accelerator types available to the +// List: Retrieves the list of BackendService resources available to the // specified project. -func (r *AcceleratorTypesService) List(project string, zone string) *AcceleratorTypesListCall { - c := &AcceleratorTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/list +func (r *BackendServicesService) List(project string) *BackendServicesListCall { + c := &BackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -17024,7 +30155,7 @@ func (r *AcceleratorTypesService) List(project string, zone string) *Accelerator // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *AcceleratorTypesListCall) Filter(filter string) *AcceleratorTypesListCall { +func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { c.urlParams_.Set("filter", filter) return c } @@ -17035,7 +30166,7 @@ func (c *AcceleratorTypesListCall) Filter(filter string) *AcceleratorTypesListCa // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *AcceleratorTypesListCall) MaxResults(maxResults int64) *AcceleratorTypesListCall { +func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -17052,7 +30183,7 @@ func (c *AcceleratorTypesListCall) MaxResults(maxResults int64) *AcceleratorType // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *AcceleratorTypesListCall) OrderBy(orderBy string) *AcceleratorTypesListCall { +func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -17060,7 +30191,7 @@ func (c *AcceleratorTypesListCall) OrderBy(orderBy string) *AcceleratorTypesList // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *AcceleratorTypesListCall) PageToken(pageToken string) *AcceleratorTypesListCall { +func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -17068,7 +30199,7 @@ func (c *AcceleratorTypesListCall) PageToken(pageToken string) *AcceleratorTypes // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AcceleratorTypesListCall) Fields(s ...googleapi.Field) *AcceleratorTypesListCall { +func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -17078,7 +30209,7 @@ func (c *AcceleratorTypesListCall) Fields(s ...googleapi.Field) *AcceleratorType // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AcceleratorTypesListCall) IfNoneMatch(entityTag string) *AcceleratorTypesListCall { +func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServicesListCall { c.ifNoneMatch_ = entityTag return c } @@ -17086,21 +30217,21 @@ func (c *AcceleratorTypesListCall) IfNoneMatch(entityTag string) *AcceleratorTyp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AcceleratorTypesListCall) Context(ctx context.Context) *AcceleratorTypesListCall { +func (c *BackendServicesListCall) Context(ctx context.Context) *BackendServicesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AcceleratorTypesListCall) Header() http.Header { +func (c *BackendServicesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -17111,25 +30242,24 @@ func (c *AcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/acceleratorTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.acceleratorTypes.list" call. -// Exactly one of *AcceleratorTypeList or error will be non-nil. Any +// Do executes the "compute.backendServices.list" call. +// Exactly one of *BackendServiceList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *AcceleratorTypeList.ServerResponse.Header or (if a response was +// *BackendServiceList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*AcceleratorTypeList, error) { +func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -17148,7 +30278,7 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AcceleratorTypeList{ + ret := &BackendServiceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -17160,12 +30290,11 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato } return ret, nil // { - // "description": "Retrieves a list of accelerator types available to the specified project.", + // "description": "Retrieves the list of BackendService resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.acceleratorTypes.list", + // "id": "compute.backendServices.list", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { @@ -17197,18 +30326,11 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/acceleratorTypes", + // "path": "{project}/global/backendServices", // "response": { - // "$ref": "AcceleratorTypeList" + // "$ref": "BackendServiceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -17222,7 +30344,7 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *AcceleratorTypesListCall) Pages(ctx context.Context, f func(*AcceleratorTypeList) error) error { +func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -17240,156 +30362,109 @@ func (c *AcceleratorTypesListCall) Pages(ctx context.Context, f func(*Accelerato } } -// method id "compute.addresses.aggregatedList": +// method id "compute.backendServices.patch": -type AddressesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BackendServicesPatchCall struct { + s *Service + project string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of addresses. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/aggregatedList -func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedListCall { - c := &AddressesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified BackendService resource with the data +// included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. This method +// supports PATCH semantics and uses the JSON merge patch format and +// processing rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/patch +func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { + c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.backendService = backendService + c.backendservice = backendservice return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *AddressesAggregatedListCall) OrderBy(orderBy string) *AddressesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAggregatedListCall { +func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AddressesAggregatedListCall) IfNoneMatch(entityTag string) *AddressesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesAggregatedListCall) Context(ctx context.Context) *AddressesAggregatedListCall { +func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServicesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesAggregatedListCall) Header() http.Header { +func (c *BackendServicesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.aggregatedList" call. -// Exactly one of *AddressAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *AddressAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*AddressAggregatedList, error) { +// Do executes the "compute.backendServices.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -17408,7 +30483,7 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AddressAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -17420,34 +30495,19 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address } return ret, nil // { - // "description": "Retrieves an aggregated list of addresses.", - // "httpMethod": "GET", - // "id": "compute.addresses.aggregatedList", + // "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.backendServices.patch", // "parameterOrder": [ - // "project" + // "project", + // "backendService" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "backendService": { + // "description": "Name of the BackendService resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -17456,61 +30516,47 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/aggregated/addresses", + // "path": "{project}/global/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, // "response": { - // "$ref": "AddressAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *AddressesAggregatedListCall) Pages(ctx context.Context, f func(*AddressAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.addresses.delete": +// method id "compute.backendServices.setSecurityPolicy": -type AddressesDeleteCall struct { - s *Service - project string - region string - address string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesSetSecurityPolicyCall struct { + s *Service + project string + backendService string + securitypolicyreference *SecurityPolicyReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified address resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/delete -func (r *AddressesService) Delete(project string, region string, address string) *AddressesDeleteCall { - c := &AddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetSecurityPolicy: Sets the security policy for the specified backend +// service. +func (r *BackendServicesService) SetSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetSecurityPolicyCall { + c := &BackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.address = address + c.backendService = backendService + c.securitypolicyreference = securitypolicyreference return c } @@ -17528,7 +30574,7 @@ func (r *AddressesService) Delete(project string, region string, address string) // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AddressesDeleteCall) RequestId(requestId string) *AddressesDeleteCall { +func (c *BackendServicesSetSecurityPolicyCall) RequestId(requestId string) *BackendServicesSetSecurityPolicyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -17536,7 +30582,7 @@ func (c *AddressesDeleteCall) RequestId(requestId string) *AddressesDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall { +func (c *BackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetSecurityPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -17544,48 +30590,52 @@ func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesDeleteCall) Context(ctx context.Context) *AddressesDeleteCall { +func (c *BackendServicesSetSecurityPolicyCall) Context(ctx context.Context) *BackendServicesSetSecurityPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesDeleteCall) Header() http.Header { +func (c *BackendServicesSetSecurityPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/setSecurityPolicy") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "address": c.address, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.delete" call. +// Do executes the "compute.backendServices.setSecurityPolicy" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -17616,19 +30666,17 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified address resource.", - // "httpMethod": "DELETE", - // "id": "compute.addresses.delete", + // "description": "Sets the security policy for the specified backend service.", + // "httpMethod": "POST", + // "id": "compute.backendServices.setSecurityPolicy", // "parameterOrder": [ // "project", - // "region", - // "address" + // "backendService" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to delete.", + // "backendService": { + // "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -17639,20 +30687,16 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses/{address}", + // "path": "{project}/global/backendServices/{backendService}/setSecurityPolicy", + // "request": { + // "$ref": "SecurityPolicyReference" + // }, // "response": { // "$ref": "Operation" // }, @@ -17664,95 +30708,85 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.addresses.get": +// method id "compute.backendServices.testIamPermissions": -type AddressesGetCall struct { - s *Service - project string - region string - address string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BackendServicesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified address resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/get -func (r *AddressesService) Get(project string, region string, address string) *AddressesGetCall { - c := &AddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *BackendServicesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *BackendServicesTestIamPermissionsCall { + c := &BackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.address = address + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall { +func (c *BackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *BackendServicesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *AddressesGetCall) IfNoneMatch(entityTag string) *AddressesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesGetCall) Context(ctx context.Context) *AddressesGetCall { +func (c *BackendServicesTestIamPermissionsCall) Context(ctx context.Context) *BackendServicesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesGetCall) Header() http.Header { +func (c *BackendServicesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err } - var body io.Reader = nil + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "address": c.address, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.get" call. -// Exactly one of *Address or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Address.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { +// Do executes the "compute.backendServices.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -17771,7 +30805,7 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Address{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -17783,22 +30817,14 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { } return ret, nil // { - // "description": "Returns the specified address resource.", - // "httpMethod": "GET", - // "id": "compute.addresses.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.backendServices.testIamPermissions", // "parameterOrder": [ // "project", - // "region", - // "address" + // "resource" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -17806,17 +30832,20 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses/{address}", + // "path": "{project}/global/backendServices/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Address" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -17827,26 +30856,28 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { } -// method id "compute.addresses.insert": +// method id "compute.backendServices.update": -type AddressesInsertCall struct { - s *Service - project string - region string - address *Address - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesUpdateCall struct { + s *Service + project string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an address resource in the specified project using -// the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/insert -func (r *AddressesService) Insert(project string, region string, address *Address) *AddressesInsertCall { - c := &AddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified BackendService resource with the data +// included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. +// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/update +func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { + c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.address = address + c.backendService = backendService + c.backendservice = backendservice return c } @@ -17864,7 +30895,7 @@ func (r *AddressesService) Insert(project string, region string, address *Addres // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AddressesInsertCall) RequestId(requestId string) *AddressesInsertCall { +func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServicesUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -17872,7 +30903,7 @@ func (c *AddressesInsertCall) RequestId(requestId string) *AddressesInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall { +func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendServicesUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -17880,52 +30911,52 @@ func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesInsertCall) Context(ctx context.Context) *AddressesInsertCall { +func (c *BackendServicesUpdateCall) Context(ctx context.Context) *BackendServicesUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesInsertCall) Header() http.Header { +func (c *BackendServicesUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.insert" call. +// Do executes the "compute.backendServices.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -17956,25 +30987,25 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates an address resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.addresses.insert", + // "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + // "httpMethod": "PUT", + // "id": "compute.backendServices.update", // "parameterOrder": [ // "project", - // "region" + // "backendService" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "backendService": { + // "description": "Name of the BackendService resource to update.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, @@ -17984,9 +31015,9 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses", + // "path": "{project}/global/backendServices/{backendService}", // "request": { - // "$ref": "Address" + // "$ref": "BackendService" // }, // "response": { // "$ref": "Operation" @@ -17999,25 +31030,22 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.addresses.list": +// method id "compute.diskTypes.aggregatedList": -type AddressesListCall struct { +type DiskTypesAggregatedListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of addresses contained within the specified -// region. -// For details, see https://cloud.google.com/compute/docs/reference/latest/addresses/list -func (r *AddressesService) List(project string, region string) *AddressesListCall { - c := &AddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of disk types. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/aggregatedList +func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall { + c := &DiskTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -18047,7 +31075,7 @@ func (r *AddressesService) List(project string, region string) *AddressesListCal // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *AddressesListCall) Filter(filter string) *AddressesListCall { +func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -18058,7 +31086,7 @@ func (c *AddressesListCall) Filter(filter string) *AddressesListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall { +func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -18075,7 +31103,7 @@ func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *AddressesListCall) OrderBy(orderBy string) *AddressesListCall { +func (c *DiskTypesAggregatedListCall) OrderBy(orderBy string) *DiskTypesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -18083,7 +31111,7 @@ func (c *AddressesListCall) OrderBy(orderBy string) *AddressesListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { +func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -18091,7 +31119,7 @@ func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall { +func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -18101,7 +31129,7 @@ func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AddressesListCall) IfNoneMatch(entityTag string) *AddressesListCall { +func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -18109,21 +31137,21 @@ func (c *AddressesListCall) IfNoneMatch(entityTag string) *AddressesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesListCall) Context(ctx context.Context) *AddressesListCall { +func (c *DiskTypesAggregatedListCall) Context(ctx context.Context) *DiskTypesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesListCall) Header() http.Header { +func (c *DiskTypesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { +func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -18134,25 +31162,24 @@ func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.list" call. -// Exactly one of *AddressList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AddressList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { +// Do executes the "compute.diskTypes.aggregatedList" call. +// Exactly one of *DiskTypeAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DiskTypeAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTypeAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -18171,7 +31198,7 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AddressList{ + ret := &DiskTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -18183,12 +31210,11 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro } return ret, nil // { - // "description": "Retrieves a list of addresses contained within the specified region.", + // "description": "Retrieves an aggregated list of disk types.", // "httpMethod": "GET", - // "id": "compute.addresses.list", + // "id": "compute.diskTypes.aggregatedList", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { @@ -18220,18 +31246,11 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses", + // "path": "{project}/aggregated/diskTypes", // "response": { - // "$ref": "AddressList" + // "$ref": "DiskTypeAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -18245,7 +31264,7 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { +func (c *DiskTypesAggregatedListCall) Pages(ctx context.Context, f func(*DiskTypeAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -18263,107 +31282,96 @@ func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) erro } } -// method id "compute.addresses.setLabels": +// method id "compute.diskTypes.get": -type AddressesSetLabelsCall struct { - s *Service - project string - region string - resource string - regionsetlabelsrequest *RegionSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DiskTypesGetCall struct { + s *Service + project string + zone string + diskType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on an Address. To learn more about labels, -// read the Labeling Resources documentation. -func (r *AddressesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *AddressesSetLabelsCall { - c := &AddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified disk type. Get a list of available disk +// types by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/get +func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall { + c := &DiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.regionsetlabelsrequest = regionsetlabelsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *AddressesSetLabelsCall) RequestId(requestId string) *AddressesSetLabelsCall { - c.urlParams_.Set("requestId", requestId) + c.zone = zone + c.diskType = diskType return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesSetLabelsCall) Fields(s ...googleapi.Field) *AddressesSetLabelsCall { +func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DiskTypesGetCall) IfNoneMatch(entityTag string) *DiskTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesSetLabelsCall) Context(ctx context.Context) *AddressesSetLabelsCall { +func (c *DiskTypesGetCall) Context(ctx context.Context) *DiskTypesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesSetLabelsCall) Header() http.Header { +func (c *DiskTypesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, - "resource": c.resource, + "zone": c.zone, + "diskType": c.diskType, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.diskTypes.get" call. +// Exactly one of *DiskType or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskType.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -18382,7 +31390,7 @@ func (c *AddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &DiskType{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -18394,139 +31402,204 @@ func (c *AddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.addresses.setLabels", + // "description": "Returns the specified disk type. Get a list of available disk types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.diskTypes.get", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "zone", + // "diskType" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "diskType": { + // "description": "Name of the disk type to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "region": { - // "description": "The region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses/{resource}/setLabels", - // "request": { - // "$ref": "RegionSetLabelsRequest" - // }, + // "path": "{project}/zones/{zone}/diskTypes/{diskType}", // "response": { - // "$ref": "Operation" + // "$ref": "DiskType" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.addresses.testIamPermissions": +// method id "compute.diskTypes.list": -type AddressesTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DiskTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *AddressesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *AddressesTestIamPermissionsCall { - c := &AddressesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of disk types available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/list +func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall { + c := &DiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *DiskTypesListCall) OrderBy(orderBy string) *DiskTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AddressesTestIamPermissionsCall) Fields(s ...googleapi.Field) *AddressesTestIamPermissionsCall { +func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AddressesTestIamPermissionsCall) Context(ctx context.Context) *AddressesTestIamPermissionsCall { +func (c *DiskTypesListCall) Context(ctx context.Context) *DiskTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AddressesTestIamPermissionsCall) Header() http.Header { +func (c *DiskTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AddressesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.addresses.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.diskTypes.list" call. +// Exactly one of *DiskTypeList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *DiskTypeList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -18545,7 +31618,7 @@ func (c *AddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &DiskTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -18555,17 +31628,39 @@ func (c *AddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.addresses.testIamPermissions", + return ret, nil + // { + // "description": "Retrieves a list of disk types available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.diskTypes.list", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "zone" // ], // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -18573,27 +31668,17 @@ func (c *AddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/addresses/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/zones/{zone}/diskTypes", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "DiskTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -18604,9 +31689,30 @@ func (c *AddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } -// method id "compute.autoscalers.aggregatedList": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type AutoscalersAggregatedListCall struct { +// method id "compute.disks.aggregatedList": + +type DisksAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -18615,9 +31721,10 @@ type AutoscalersAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of autoscalers. -func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregatedListCall { - c := &AutoscalersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of persistent disks. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/aggregatedList +func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { + c := &DisksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -18648,7 +31755,7 @@ func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregat // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { +func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -18659,7 +31766,7 @@ func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggreg // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *AutoscalersAggregatedListCall { +func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -18676,7 +31783,7 @@ func (c *AutoscalersAggregatedListCall) MaxResults(maxResults int64) *Autoscaler // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *AutoscalersAggregatedListCall) OrderBy(orderBy string) *AutoscalersAggregatedListCall { +func (c *DisksAggregatedListCall) OrderBy(orderBy string) *DisksAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -18684,7 +31791,7 @@ func (c *AutoscalersAggregatedListCall) OrderBy(orderBy string) *AutoscalersAggr // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *AutoscalersAggregatedListCall { +func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -18692,7 +31799,7 @@ func (c *AutoscalersAggregatedListCall) PageToken(pageToken string) *Autoscalers // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *AutoscalersAggregatedListCall { +func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -18702,7 +31809,7 @@ func (c *AutoscalersAggregatedListCall) Fields(s ...googleapi.Field) *Autoscaler // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *AutoscalersAggregatedListCall { +func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -18710,21 +31817,21 @@ func (c *AutoscalersAggregatedListCall) IfNoneMatch(entityTag string) *Autoscale // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersAggregatedListCall) Context(ctx context.Context) *AutoscalersAggregatedListCall { +func (c *DisksAggregatedListCall) Context(ctx context.Context) *DisksAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersAggregatedListCall) Header() http.Header { +func (c *DisksAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -18735,7 +31842,7 @@ func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -18745,14 +31852,14 @@ func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.aggregatedList" call. -// Exactly one of *AutoscalerAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *AutoscalerAggregatedList.ServerResponse.Header or (if a response was +// Do executes the "compute.disks.aggregatedList" call. +// Exactly one of *DiskAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DiskAggregatedList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*AutoscalerAggregatedList, error) { +func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -18771,7 +31878,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AutoscalerAggregatedList{ + ret := &DiskAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -18783,9 +31890,9 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos } return ret, nil // { - // "description": "Retrieves an aggregated list of autoscalers.", + // "description": "Retrieves an aggregated list of persistent disks.", // "httpMethod": "GET", - // "id": "compute.autoscalers.aggregatedList", + // "id": "compute.disks.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -18821,9 +31928,9 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // "type": "string" // } // }, - // "path": "{project}/aggregated/autoscalers", + // "path": "{project}/aggregated/disks", // "response": { - // "$ref": "AutoscalerAggregatedList" + // "$ref": "DiskAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -18837,7 +31944,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *AutoscalersAggregatedListCall) Pages(ctx context.Context, f func(*AutoscalerAggregatedList) error) error { +func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -18855,24 +31962,33 @@ func (c *AutoscalersAggregatedListCall) Pages(ctx context.Context, f func(*Autos } } -// method id "compute.autoscalers.delete": +// method id "compute.disks.createSnapshot": -type AutoscalersDeleteCall struct { +type DisksCreateSnapshotCall struct { s *Service project string zone string - autoscaler string + disk string + snapshot *Snapshot urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified autoscaler. -func (r *AutoscalersService) Delete(project string, zone string, autoscaler string) *AutoscalersDeleteCall { - c := &AutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// CreateSnapshot: Creates a snapshot of a specified persistent disk. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/createSnapshot +func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { + c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.autoscaler = autoscaler + c.disk = disk + c.snapshot = snapshot + return c +} + +// GuestFlush sets the optional parameter "guestFlush": +func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { + c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) return c } @@ -18890,7 +32006,7 @@ func (r *AutoscalersService) Delete(project string, zone string, autoscaler stri // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCall { +func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapshotCall { c.urlParams_.Set("requestId", requestId) return c } @@ -18898,7 +32014,7 @@ func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteCall { +func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -18906,48 +32022,235 @@ func (c *AutoscalersDeleteCall) Fields(s ...googleapi.Field) *AutoscalersDeleteC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersDeleteCall) Context(ctx context.Context) *AutoscalersDeleteCall { +func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnapshotCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersDeleteCall) Header() http.Header { +func (c *DisksCreateSnapshotCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.createSnapshot" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a snapshot of a specified persistent disk.", + // "httpMethod": "POST", + // "id": "compute.disks.createSnapshot", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "Name of the persistent disk to snapshot.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "guestFlush": { + // "location": "query", + // "type": "boolean" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", + // "request": { + // "$ref": "Snapshot" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.disks.delete": + +type DisksDeleteCall struct { + s *Service + project string + zone string + disk string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified persistent disk. Deleting a disk +// removes its data permanently and is irreversible. However, deleting a +// disk does not delete any snapshots previously made from the disk. You +// must separately delete snapshots. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/delete +func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall { + c := &DisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksDeleteCall) Context(ctx context.Context) *DisksDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "autoscaler": c.autoscaler, + "project": c.project, + "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.delete" call. +// Do executes the "compute.disks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -18978,19 +32281,18 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Deletes the specified autoscaler.", + // "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", // "httpMethod": "DELETE", - // "id": "compute.autoscalers.delete", + // "id": "compute.disks.delete", // "parameterOrder": [ // "project", // "zone", - // "autoscaler" + // "disk" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to delete.", + // "disk": { + // "description": "Name of the persistent disk to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -19007,14 +32309,14 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "type": "string" // }, // "zone": { - // "description": "Name of the zone for this request.", + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "path": "{project}/zones/{zone}/disks/{disk}", // "response": { // "$ref": "Operation" // }, @@ -19026,33 +32328,34 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.autoscalers.get": +// method id "compute.disks.get": -type AutoscalersGetCall struct { +type DisksGetCall struct { s *Service project string zone string - autoscaler string + disk string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified autoscaler resource. Get a list of -// available autoscalers by making a list() request. -func (r *AutoscalersService) Get(project string, zone string, autoscaler string) *AutoscalersGetCall { - c := &AutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns a specified persistent disk. Get a list of available +// persistent disks by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/get +func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall { + c := &DisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.autoscaler = autoscaler + c.disk = disk return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { +func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -19062,7 +32365,7 @@ func (c *AutoscalersGetCall) Fields(s ...googleapi.Field) *AutoscalersGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { +func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -19070,21 +32373,21 @@ func (c *AutoscalersGetCall) IfNoneMatch(entityTag string) *AutoscalersGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersGetCall) Context(ctx context.Context) *AutoscalersGetCall { +func (c *DisksGetCall) Context(ctx context.Context) *DisksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersGetCall) Header() http.Header { +func (c *DisksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -19095,26 +32398,26 @@ func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "autoscaler": c.autoscaler, + "project": c.project, + "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.get" call. -// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Autoscaler.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { +// Do executes the "compute.disks.get" call. +// Exactly one of *Disk or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Disk.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -19133,7 +32436,7 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Autoscaler{ + ret := &Disk{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -19145,17 +32448,17 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro } return ret, nil // { - // "description": "Returns the specified autoscaler resource. Get a list of available autoscalers by making a list() request.", + // "description": "Returns a specified persistent disk. Get a list of available persistent disks by making a list() request.", // "httpMethod": "GET", - // "id": "compute.autoscalers.get", + // "id": "compute.disks.get", // "parameterOrder": [ // "project", // "zone", - // "autoscaler" + // "disk" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to return.", + // "disk": { + // "description": "Name of the persistent disk to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -19169,16 +32472,16 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro // "type": "string" // }, // "zone": { - // "description": "Name of the zone for this request.", + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers/{autoscaler}", + // "path": "{project}/zones/{zone}/disks/{disk}", // "response": { - // "$ref": "Autoscaler" + // "$ref": "Disk" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -19189,25 +32492,29 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro } -// method id "compute.autoscalers.insert": +// method id "compute.disks.insert": -type AutoscalersInsertCall struct { +type DisksInsertCall struct { s *Service project string zone string - autoscaler *Autoscaler + disk *Disk urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates an autoscaler in the specified project using the data -// included in the request. -func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Autoscaler) *AutoscalersInsertCall { - c := &AutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a persistent disk in the specified project using the +// data in the request. You can create a disk with a sourceImage, a +// sourceSnapshot, or create an empty 500 GB data disk by omitting all +// properties. You can also create a disk that is larger than the +// default size by specifying the sizeGb property. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/insert +func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { + c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.autoscaler = autoscaler + c.disk = disk return c } @@ -19225,15 +32532,22 @@ func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Aut // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersInsertCall) RequestId(requestId string) *AutoscalersInsertCall { +func (c *DisksInsertCall) RequestId(requestId string) *DisksInsertCall { c.urlParams_.Set("requestId", requestId) return c } +// SourceImage sets the optional parameter "sourceImage": Source image +// to restore onto a disk. +func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall { + c.urlParams_.Set("sourceImage", sourceImage) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertCall { +func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -19241,34 +32555,34 @@ func (c *AutoscalersInsertCall) Fields(s ...googleapi.Field) *AutoscalersInsertC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersInsertCall) Context(ctx context.Context) *AutoscalersInsertCall { +func (c *DisksInsertCall) Context(ctx context.Context) *DisksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersInsertCall) Header() http.Header { +func (c *DisksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -19279,14 +32593,14 @@ func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.insert" call. +// Do executes the "compute.disks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -19317,9 +32631,9 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", // "httpMethod": "POST", - // "id": "compute.autoscalers.insert", + // "id": "compute.disks.insert", // "parameterOrder": [ // "project", // "zone" @@ -19337,17 +32651,22 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "location": "query", // "type": "string" // }, + // "sourceImage": { + // "description": "Optional. Source image to restore onto a disk.", + // "location": "query", + // "type": "string" + // }, // "zone": { - // "description": "Name of the zone for this request.", + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", + // "path": "{project}/zones/{zone}/disks", // "request": { - // "$ref": "Autoscaler" + // "$ref": "Disk" // }, // "response": { // "$ref": "Operation" @@ -19360,9 +32679,9 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } -// method id "compute.autoscalers.list": +// method id "compute.disks.list": -type AutoscalersListCall struct { +type DisksListCall struct { s *Service project string zone string @@ -19372,10 +32691,11 @@ type AutoscalersListCall struct { header_ http.Header } -// List: Retrieves a list of autoscalers contained within the specified -// zone. -func (r *AutoscalersService) List(project string, zone string) *AutoscalersListCall { - c := &AutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of persistent disks contained within the +// specified zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/list +func (r *DisksService) List(project string, zone string) *DisksListCall { + c := &DisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone return c @@ -19407,7 +32727,7 @@ func (r *AutoscalersService) List(project string, zone string) *AutoscalersListC // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { +func (c *DisksListCall) Filter(filter string) *DisksListCall { c.urlParams_.Set("filter", filter) return c } @@ -19418,7 +32738,7 @@ func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall { +func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -19435,7 +32755,7 @@ func (c *AutoscalersListCall) MaxResults(maxResults int64) *AutoscalersListCall // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *AutoscalersListCall) OrderBy(orderBy string) *AutoscalersListCall { +func (c *DisksListCall) OrderBy(orderBy string) *DisksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -19443,7 +32763,7 @@ func (c *AutoscalersListCall) OrderBy(orderBy string) *AutoscalersListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { +func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -19451,7 +32771,7 @@ func (c *AutoscalersListCall) PageToken(pageToken string) *AutoscalersListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall { +func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -19461,7 +32781,7 @@ func (c *AutoscalersListCall) Fields(s ...googleapi.Field) *AutoscalersListCall // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall { +func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { c.ifNoneMatch_ = entityTag return c } @@ -19469,21 +32789,21 @@ func (c *AutoscalersListCall) IfNoneMatch(entityTag string) *AutoscalersListCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersListCall) Context(ctx context.Context) *AutoscalersListCall { +func (c *DisksListCall) Context(ctx context.Context) *DisksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersListCall) Header() http.Header { +func (c *DisksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -19494,7 +32814,7 @@ func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -19505,14 +32825,14 @@ func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.list" call. -// Exactly one of *AutoscalerList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AutoscalerList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, error) { +// Do executes the "compute.disks.list" call. +// Exactly one of *DiskList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DiskList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -19531,7 +32851,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AutoscalerList{ + ret := &DiskList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -19543,9 +32863,9 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, } return ret, nil // { - // "description": "Retrieves a list of autoscalers contained within the specified zone.", + // "description": "Retrieves a list of persistent disks contained within the specified zone.", // "httpMethod": "GET", - // "id": "compute.autoscalers.list", + // "id": "compute.disks.list", // "parameterOrder": [ // "project", // "zone" @@ -19582,16 +32902,16 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, // "type": "string" // }, // "zone": { - // "description": "Name of the zone for this request.", + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", + // "path": "{project}/zones/{zone}/disks", // "response": { - // "$ref": "AutoscalerList" + // "$ref": "DiskList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -19605,7 +32925,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *AutoscalersListCall) Pages(ctx context.Context, f func(*AutoscalerList) error) error { +func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -19623,33 +32943,26 @@ func (c *AutoscalersListCall) Pages(ctx context.Context, f func(*AutoscalerList) } } -// method id "compute.autoscalers.patch": +// method id "compute.disks.resize": -type AutoscalersPatchCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksResizeCall struct { + s *Service + project string + zone string + disk string + disksresizerequest *DisksResizeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates an autoscaler in the specified project using the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. -func (r *AutoscalersService) Patch(project string, zone string, autoscaler *Autoscaler) *AutoscalersPatchCall { - c := &AutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Resizes the specified persistent disk. +func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { + c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to patch. -func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCall { - c.urlParams_.Set("autoscaler", autoscaler) + c.disk = disk + c.disksresizerequest = disksresizerequest return c } @@ -19667,7 +32980,7 @@ func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCa // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall { +func (c *DisksResizeCall) RequestId(requestId string) *DisksResizeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -19675,7 +32988,7 @@ func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCall { +func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -19683,52 +32996,53 @@ func (c *AutoscalersPatchCall) Fields(s ...googleapi.Field) *AutoscalersPatchCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersPatchCall) Context(ctx context.Context) *AutoscalersPatchCall { +func (c *DisksResizeCall) Context(ctx context.Context) *DisksResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersPatchCall) Header() http.Header { +func (c *DisksResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksresizerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/resize") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.patch" call. +// Do executes the "compute.disks.resize" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -19759,18 +33073,20 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.autoscalers.patch", + // "description": "Resizes the specified persistent disk.", + // "httpMethod": "POST", + // "id": "compute.disks.resize", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "disk" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to patch.", - // "location": "query", + // "disk": { + // "description": "The name of the persistent disk.", + // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -19786,57 +33102,75 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err // "type": "string" // }, // "zone": { - // "description": "Name of the zone for this request.", + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", + // "path": "{project}/zones/{zone}/disks/{disk}/resize", // "request": { - // "$ref": "Autoscaler" + // "$ref": "DisksResizeRequest" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.autoscalers.testIamPermissions": +// method id "compute.disks.setLabels": -type AutoscalersTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DisksSetLabelsCall struct { + s *Service + project string + zone string + resource string + zonesetlabelsrequest *ZoneSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *AutoscalersService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *AutoscalersTestIamPermissionsCall { - c := &AutoscalersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on a disk. To learn more about labels, +// read the Labeling Resources documentation. +func (r *DisksService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *DisksSetLabelsCall { + c := &DisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zonesetlabelsrequest = zonesetlabelsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksSetLabelsCall) RequestId(requestId string) *DisksSetLabelsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *AutoscalersTestIamPermissionsCall { +func (c *DisksSetLabelsCall) Fields(s ...googleapi.Field) *DisksSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -19844,34 +33178,34 @@ func (c *AutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *Autosc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersTestIamPermissionsCall) Context(ctx context.Context) *AutoscalersTestIamPermissionsCall { +func (c *DisksSetLabelsCall) Context(ctx context.Context) *DisksSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersTestIamPermissionsCall) Header() http.Header { +func (c *DisksSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -19883,14 +33217,14 @@ func (c *AutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Respons return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.disks.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -19909,7 +33243,7 @@ func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -19921,9 +33255,9 @@ func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation.", // "httpMethod": "POST", - // "id": "compute.autoscalers.testIamPermissions", + // "id": "compute.disks.setLabels", // "parameterOrder": [ // "project", // "zone", @@ -19937,6 +33271,11 @@ func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "resource": { // "description": "Name of the resource for this request.", // "location": "path", @@ -19952,74 +33291,49 @@ func (c *AutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/disks/{resource}/setLabels", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "ZoneSetLabelsRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.autoscalers.update": - -type AutoscalersUpdateCall struct { - s *Service - project string - zone string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates an autoscaler in the specified project using the data -// included in the request. -func (r *AutoscalersService) Update(project string, zone string, autoscaler *Autoscaler) *AutoscalersUpdateCall { - c := &AutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to update. -func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdateCall { - c.urlParams_.Set("autoscaler", autoscaler) - return c -} +// method id "compute.disks.testIamPermissions": -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *AutoscalersUpdateCall) RequestId(requestId string) *AutoscalersUpdateCall { - c.urlParams_.Set("requestId", requestId) +type DisksTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { + c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateCall { +func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -20027,52 +33341,53 @@ func (c *AutoscalersUpdateCall) Fields(s ...googleapi.Field) *AutoscalersUpdateC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *AutoscalersUpdateCall) Context(ctx context.Context) *AutoscalersUpdateCall { +func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *AutoscalersUpdateCall) Header() http.Header { +func (c *DisksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.autoscalers.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.disks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -20091,7 +33406,7 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -20103,20 +33418,15 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.autoscalers.update", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.disks.testIamPermissions", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "resource" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to update.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -20124,50 +33434,54 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "zone": { - // "description": "Name of the zone for this request.", + // "description": "The name of the zone for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/autoscalers", + // "path": "{project}/zones/{zone}/disks/{resource}/testIamPermissions", // "request": { - // "$ref": "Autoscaler" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendBuckets.delete": +// method id "compute.firewalls.delete": -type BackendBucketsDeleteCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallsDeleteCall struct { + s *Service + project string + firewall string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified BackendBucket resource. -func (r *BackendBucketsService) Delete(project string, backendBucket string) *BackendBucketsDeleteCall { - c := &BackendBucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified firewall. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/delete +func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall { + c := &FirewallsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket + c.firewall = firewall return c } @@ -20185,7 +33499,7 @@ func (r *BackendBucketsService) Delete(project string, backendBucket string) *Ba // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDeleteCall { +func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -20193,7 +33507,7 @@ func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDe // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsDeleteCall { +func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -20201,21 +33515,21 @@ func (c *BackendBucketsDeleteCall) Fields(s ...googleapi.Field) *BackendBucketsD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsDeleteCall) Context(ctx context.Context) *BackendBucketsDeleteCall { +func (c *FirewallsDeleteCall) Context(ctx context.Context) *FirewallsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsDeleteCall) Header() http.Header { +func (c *FirewallsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -20223,25 +33537,25 @@ func (c *BackendBucketsDeleteCall) doRequest(alt string) (*http.Response, error) reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.delete" call. +// Do executes the "compute.firewalls.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -20272,16 +33586,16 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified BackendBucket resource.", + // "description": "Deletes the specified firewall.", // "httpMethod": "DELETE", - // "id": "compute.backendBuckets.delete", + // "id": "compute.firewalls.delete", // "parameterOrder": [ // "project", - // "backendBucket" + // "firewall" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to delete.", + // "firewall": { + // "description": "Name of the firewall rule to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -20300,7 +33614,7 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", + // "path": "{project}/global/firewalls/{firewall}", // "response": { // "$ref": "Operation" // }, @@ -20312,31 +33626,31 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendBuckets.get": +// method id "compute.firewalls.get": -type BackendBucketsGetCall struct { - s *Service - project string - backendBucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type FirewallsGetCall struct { + s *Service + project string + firewall string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified BackendBucket resource. Get a list of -// available backend buckets by making a list() request. -func (r *BackendBucketsService) Get(project string, backendBucket string) *BackendBucketsGetCall { - c := &BackendBucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified firewall. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/get +func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall { + c := &FirewallsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket + c.firewall = firewall return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetCall { +func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -20346,7 +33660,7 @@ func (c *BackendBucketsGetCall) Fields(s ...googleapi.Field) *BackendBucketsGetC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGetCall { +func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -20354,21 +33668,21 @@ func (c *BackendBucketsGetCall) IfNoneMatch(entityTag string) *BackendBucketsGet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsGetCall) Context(ctx context.Context) *BackendBucketsGetCall { +func (c *FirewallsGetCall) Context(ctx context.Context) *FirewallsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsGetCall) Header() http.Header { +func (c *FirewallsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -20379,25 +33693,25 @@ func (c *BackendBucketsGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.get" call. -// Exactly one of *BackendBucket or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *BackendBucket.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket, error) { +// Do executes the "compute.firewalls.get" call. +// Exactly one of *Firewall or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Firewall.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -20416,7 +33730,7 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendBucket{ + ret := &Firewall{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -20428,16 +33742,16 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket } return ret, nil // { - // "description": "Returns the specified BackendBucket resource. Get a list of available backend buckets by making a list() request.", + // "description": "Returns the specified firewall.", // "httpMethod": "GET", - // "id": "compute.backendBuckets.get", + // "id": "compute.firewalls.get", // "parameterOrder": [ // "project", - // "backendBucket" + // "firewall" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to return.", + // "firewall": { + // "description": "Name of the firewall rule to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -20451,9 +33765,9 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", + // "path": "{project}/global/firewalls/{firewall}", // "response": { - // "$ref": "BackendBucket" + // "$ref": "Firewall" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -20464,23 +33778,24 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket } -// method id "compute.backendBuckets.insert": +// method id "compute.firewalls.insert": -type BackendBucketsInsertCall struct { - s *Service - project string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallsInsertCall struct { + s *Service + project string + firewall *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a BackendBucket resource in the specified project -// using the data included in the request. -func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBucket) *BackendBucketsInsertCall { - c := &BackendBucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a firewall rule in the specified project using the +// data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/insert +func (r *FirewallsService) Insert(project string, firewall *Firewall) *FirewallsInsertCall { + c := &FirewallsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendbucket = backendbucket + c.firewall = firewall return c } @@ -20498,7 +33813,7 @@ func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBuc // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsInsertCall) RequestId(requestId string) *BackendBucketsInsertCall { +func (c *FirewallsInsertCall) RequestId(requestId string) *FirewallsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -20506,7 +33821,7 @@ func (c *BackendBucketsInsertCall) RequestId(requestId string) *BackendBucketsIn // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsInsertCall { +func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -20514,34 +33829,34 @@ func (c *BackendBucketsInsertCall) Fields(s ...googleapi.Field) *BackendBucketsI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsInsertCall) Context(ctx context.Context) *BackendBucketsInsertCall { +func (c *FirewallsInsertCall) Context(ctx context.Context) *FirewallsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsInsertCall) Header() http.Header { +func (c *FirewallsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -20551,14 +33866,14 @@ func (c *BackendBucketsInsertCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.insert" call. +// Do executes the "compute.firewalls.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -20589,9 +33904,9 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", + // "description": "Creates a firewall rule in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.backendBuckets.insert", + // "id": "compute.firewalls.insert", // "parameterOrder": [ // "project" // ], @@ -20609,9 +33924,9 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets", + // "path": "{project}/global/firewalls", // "request": { - // "$ref": "BackendBucket" + // "$ref": "Firewall" // }, // "response": { // "$ref": "Operation" @@ -20624,9 +33939,9 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendBuckets.list": +// method id "compute.firewalls.list": -type BackendBucketsListCall struct { +type FirewallsListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -20635,10 +33950,11 @@ type BackendBucketsListCall struct { header_ http.Header } -// List: Retrieves the list of BackendBucket resources available to the -// specified project. -func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { - c := &BackendBucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of firewall rules available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/list +func (r *FirewallsService) List(project string) *FirewallsListCall { + c := &FirewallsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -20669,7 +33985,7 @@ func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { +func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { c.urlParams_.Set("filter", filter) return c } @@ -20680,7 +33996,7 @@ func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsListCall { +func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -20697,7 +34013,7 @@ func (c *BackendBucketsListCall) MaxResults(maxResults int64) *BackendBucketsLis // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall { +func (c *FirewallsListCall) OrderBy(orderBy string) *FirewallsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -20705,7 +34021,7 @@ func (c *BackendBucketsListCall) OrderBy(orderBy string) *BackendBucketsListCall // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsListCall { +func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -20713,7 +34029,7 @@ func (c *BackendBucketsListCall) PageToken(pageToken string) *BackendBucketsList // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsListCall { +func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -20723,7 +34039,7 @@ func (c *BackendBucketsListCall) Fields(s ...googleapi.Field) *BackendBucketsLis // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsListCall { +func (c *FirewallsListCall) IfNoneMatch(entityTag string) *FirewallsListCall { c.ifNoneMatch_ = entityTag return c } @@ -20731,21 +34047,21 @@ func (c *BackendBucketsListCall) IfNoneMatch(entityTag string) *BackendBucketsLi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsListCall) Context(ctx context.Context) *BackendBucketsListCall { +func (c *FirewallsListCall) Context(ctx context.Context) *FirewallsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsListCall) Header() http.Header { +func (c *FirewallsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -20756,7 +34072,7 @@ func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -20766,14 +34082,14 @@ func (c *BackendBucketsListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.list" call. -// Exactly one of *BackendBucketList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BackendBucketList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucketList, error) { +// Do executes the "compute.firewalls.list" call. +// Exactly one of *FirewallList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *FirewallList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -20792,7 +34108,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendBucketList{ + ret := &FirewallList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -20804,9 +34120,9 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke } return ret, nil // { - // "description": "Retrieves the list of BackendBucket resources available to the specified project.", + // "description": "Retrieves the list of firewall rules available to the specified project.", // "httpMethod": "GET", - // "id": "compute.backendBuckets.list", + // "id": "compute.firewalls.list", // "parameterOrder": [ // "project" // ], @@ -20842,86 +34158,239 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets", + // "path": "{project}/global/firewalls", + // "response": { + // "$ref": "FirewallList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *FirewallsListCall) Pages(ctx context.Context, f func(*FirewallList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.firewalls.patch": + +type FirewallsPatchCall struct { + s *Service + project string + firewall string + firewall2 *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified firewall rule with the data included in +// the request. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/patch +func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall { + c := &FirewallsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.firewall = firewall + c.firewall2 = firewall2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *FirewallsPatchCall) RequestId(requestId string) *FirewallsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FirewallsPatchCall) Context(ctx context.Context) *FirewallsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FirewallsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "firewall": c.firewall, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.firewalls.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.firewalls.patch", + // "parameterOrder": [ + // "project", + // "firewall" + // ], + // "parameters": { + // "firewall": { + // "description": "Name of the firewall rule to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/firewalls/{firewall}", + // "request": { + // "$ref": "Firewall" + // }, // "response": { - // "$ref": "BackendBucketList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *BackendBucketsListCall) Pages(ctx context.Context, f func(*BackendBucketList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.backendBuckets.patch": +// method id "compute.firewalls.testIamPermissions": -type BackendBucketsPatchCall struct { - s *Service - project string - backendBucket string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallsTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates the specified BackendBucket resource with the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. -func (r *BackendBucketsService) Patch(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsPatchCall { - c := &BackendBucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *FirewallsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *FirewallsTestIamPermissionsCall { + c := &FirewallsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.backendbucket = backendbucket - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPatchCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPatchCall { +func (c *FirewallsTestIamPermissionsCall) Fields(s ...googleapi.Field) *FirewallsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -20929,52 +34398,52 @@ func (c *BackendBucketsPatchCall) Fields(s ...googleapi.Field) *BackendBucketsPa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsPatchCall) Context(ctx context.Context) *BackendBucketsPatchCall { +func (c *FirewallsTestIamPermissionsCall) Context(ctx context.Context) *FirewallsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsPatchCall) Header() http.Header { +func (c *FirewallsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.firewalls.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *FirewallsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -20993,7 +34462,7 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -21005,21 +34474,14 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Updates the specified BackendBucket resource with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.backendBuckets.patch", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.firewalls.testIamPermissions", // "parameterOrder": [ // "project", - // "backendBucket" + // "resource" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -21027,18 +34489,20 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", + // "path": "{project}/global/firewalls/{resource}/testIamPermissions", // "request": { - // "$ref": "BackendBucket" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -21049,25 +34513,28 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendBuckets.update": +// method id "compute.firewalls.update": -type BackendBucketsUpdateCall struct { - s *Service - project string - backendBucket string - backendbucket *BackendBucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type FirewallsUpdateCall struct { + s *Service + project string + firewall string + firewall2 *Firewall + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified BackendBucket resource with the data -// included in the request. -func (r *BackendBucketsService) Update(project string, backendBucket string, backendbucket *BackendBucket) *BackendBucketsUpdateCall { - c := &BackendBucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified firewall rule with the data included in +// the request. Using PUT method, can only update following fields of +// firewall rule: allowed, description, sourceRanges, sourceTags, +// targetTags. +// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/update +func (r *FirewallsService) Update(project string, firewall string, firewall2 *Firewall) *FirewallsUpdateCall { + c := &FirewallsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendBucket = backendBucket - c.backendbucket = backendbucket + c.firewall = firewall + c.firewall2 = firewall2 return c } @@ -21085,7 +34552,7 @@ func (r *BackendBucketsService) Update(project string, backendBucket string, bac // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendBucketsUpdateCall) RequestId(requestId string) *BackendBucketsUpdateCall { +func (c *FirewallsUpdateCall) RequestId(requestId string) *FirewallsUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -21093,7 +34560,7 @@ func (c *BackendBucketsUpdateCall) RequestId(requestId string) *BackendBucketsUp // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendBucketsUpdateCall) Fields(s ...googleapi.Field) *BackendBucketsUpdateCall { +func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -21101,52 +34568,52 @@ func (c *BackendBucketsUpdateCall) Fields(s ...googleapi.Field) *BackendBucketsU // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendBucketsUpdateCall) Context(ctx context.Context) *BackendBucketsUpdateCall { +func (c *FirewallsUpdateCall) Context(ctx context.Context) *FirewallsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendBucketsUpdateCall) Header() http.Header { +func (c *FirewallsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendBucketsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendbucket) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendBuckets/{backendBucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendBucket": c.backendBucket, + "project": c.project, + "firewall": c.firewall, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendBuckets.update" call. +// Do executes the "compute.firewalls.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -21177,16 +34644,16 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Updates the specified BackendBucket resource with the data included in the request.", + // "description": "Updates the specified firewall rule with the data included in the request. Using PUT method, can only update following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags.", // "httpMethod": "PUT", - // "id": "compute.backendBuckets.update", + // "id": "compute.firewalls.update", // "parameterOrder": [ // "project", - // "backendBucket" + // "firewall" // ], // "parameters": { - // "backendBucket": { - // "description": "Name of the BackendBucket resource to update.", + // "firewall": { + // "description": "Name of the firewall rule to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -21205,9 +34672,9 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // } // }, - // "path": "{project}/global/backendBuckets/{backendBucket}", + // "path": "{project}/global/firewalls/{firewall}", // "request": { - // "$ref": "BackendBucket" + // "$ref": "Firewall" // }, // "response": { // "$ref": "Operation" @@ -21220,9 +34687,9 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.backendServices.aggregatedList": +// method id "compute.forwardingRules.aggregatedList": -type BackendServicesAggregatedListCall struct { +type ForwardingRulesAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -21231,10 +34698,10 @@ type BackendServicesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves the list of all BackendService resources, -// regional and global, available to the specified project. -func (r *BackendServicesService) AggregatedList(project string) *BackendServicesAggregatedListCall { - c := &BackendServicesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of forwarding rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/aggregatedList +func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRulesAggregatedListCall { + c := &ForwardingRulesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -21265,7 +34732,7 @@ func (r *BackendServicesService) AggregatedList(project string) *BackendServices // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -21276,7 +34743,7 @@ func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServic // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *BackendServicesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -21293,7 +34760,7 @@ func (c *BackendServicesAggregatedListCall) MaxResults(maxResults int64) *Backen // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServicesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) OrderBy(orderBy string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -21301,7 +34768,7 @@ func (c *BackendServicesAggregatedListCall) OrderBy(orderBy string) *BackendServ // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *BackendServicesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -21309,7 +34776,7 @@ func (c *BackendServicesAggregatedListCall) PageToken(pageToken string) *Backend // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *BackendServicesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -21319,7 +34786,7 @@ func (c *BackendServicesAggregatedListCall) Fields(s ...googleapi.Field) *Backen // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *BackendServicesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) IfNoneMatch(entityTag string) *ForwardingRulesAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -21327,21 +34794,21 @@ func (c *BackendServicesAggregatedListCall) IfNoneMatch(entityTag string) *Backe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesAggregatedListCall) Context(ctx context.Context) *BackendServicesAggregatedListCall { +func (c *ForwardingRulesAggregatedListCall) Context(ctx context.Context) *ForwardingRulesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesAggregatedListCall) Header() http.Header { +func (c *ForwardingRulesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -21352,7 +34819,7 @@ func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Respons } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -21362,14 +34829,14 @@ func (c *BackendServicesAggregatedListCall) doRequest(alt string) (*http.Respons return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.aggregatedList" call. -// Exactly one of *BackendServiceAggregatedList or error will be +// Do executes the "compute.forwardingRules.aggregatedList" call. +// Exactly one of *ForwardingRuleAggregatedList or error will be // non-nil. Any non-2xx status code is an error. Response headers are in -// either *BackendServiceAggregatedList.ServerResponse.Header or (if a +// either *ForwardingRuleAggregatedList.ServerResponse.Header or (if a // response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*BackendServiceAggregatedList, error) { +func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -21388,7 +34855,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceAggregatedList{ + ret := &ForwardingRuleAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -21400,9 +34867,9 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B } return ret, nil // { - // "description": "Retrieves the list of all BackendService resources, regional and global, available to the specified project.", + // "description": "Retrieves an aggregated list of forwarding rules.", // "httpMethod": "GET", - // "id": "compute.backendServices.aggregatedList", + // "id": "compute.forwardingRules.aggregatedList", // "parameterOrder": [ // "project" // ], @@ -21431,16 +34898,16 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B // "type": "string" // }, // "project": { - // "description": "Name of the project scoping this request.", + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/backendServices", + // "path": "{project}/aggregated/forwardingRules", // "response": { - // "$ref": "BackendServiceAggregatedList" + // "$ref": "ForwardingRuleAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -21454,7 +34921,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*BackendServiceAggregatedList) error) error { +func (c *ForwardingRulesAggregatedListCall) Pages(ctx context.Context, f func(*ForwardingRuleAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -21472,23 +34939,25 @@ func (c *BackendServicesAggregatedListCall) Pages(ctx context.Context, f func(*B } } -// method id "compute.backendServices.delete": +// method id "compute.forwardingRules.delete": -type BackendServicesDeleteCall struct { +type ForwardingRulesDeleteCall struct { s *Service project string - backendService string + region string + forwardingRule string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified BackendService resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/delete -func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall { - c := &BackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified ForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/delete +func (r *ForwardingRulesService) Delete(project string, region string, forwardingRule string) *ForwardingRulesDeleteCall { + c := &ForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService + c.region = region + c.forwardingRule = forwardingRule return c } @@ -21506,7 +34975,7 @@ func (r *BackendServicesService) Delete(project string, backendService string) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesDeleteCall) RequestId(requestId string) *BackendServicesDeleteCall { +func (c *ForwardingRulesDeleteCall) RequestId(requestId string) *ForwardingRulesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -21514,7 +34983,7 @@ func (c *BackendServicesDeleteCall) RequestId(requestId string) *BackendServices // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendServicesDeleteCall { +func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRulesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -21522,21 +34991,21 @@ func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendService // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesDeleteCall) Context(ctx context.Context) *BackendServicesDeleteCall { +func (c *ForwardingRulesDeleteCall) Context(ctx context.Context) *ForwardingRulesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesDeleteCall) Header() http.Header { +func (c *ForwardingRulesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -21544,25 +35013,26 @@ func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "backendService": c.backendService, + "region": c.region, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.delete" call. +// Do executes the "compute.forwardingRules.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -21593,16 +35063,17 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified BackendService resource.", + // "description": "Deletes the specified ForwardingRule resource.", // "httpMethod": "DELETE", - // "id": "compute.backendServices.delete", + // "id": "compute.forwardingRules.delete", // "parameterOrder": [ // "project", - // "backendService" + // "region", + // "forwardingRule" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to delete.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -21615,13 +35086,20 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", // "response": { // "$ref": "Operation" // }, @@ -21633,32 +35111,33 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.backendServices.get": +// method id "compute.forwardingRules.get": -type BackendServicesGetCall struct { +type ForwardingRulesGetCall struct { s *Service project string - backendService string + region string + forwardingRule string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified BackendService resource. Get a list of -// available backend services by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/get -func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall { - c := &BackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified ForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/get +func (r *ForwardingRulesService) Get(project string, region string, forwardingRule string) *ForwardingRulesGetCall { + c := &ForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService + c.region = region + c.forwardingRule = forwardingRule return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGetCall { +func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -21668,7 +35147,7 @@ func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesGetCall { +func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -21676,196 +35155,51 @@ func (c *BackendServicesGetCall) IfNoneMatch(entityTag string) *BackendServicesG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesGetCall) Context(ctx context.Context) *BackendServicesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.get" call. -// Exactly one of *BackendService or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *BackendService.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BackendService{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified BackendService resource. Get a list of available backend services by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.backendServices.get", - // "parameterOrder": [ - // "project", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/backendServices/{backendService}", - // "response": { - // "$ref": "BackendService" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.backendServices.getHealth": - -type BackendServicesGetHealthCall struct { - s *Service - project string - backendService string - resourcegroupreference *ResourceGroupReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// GetHealth: Gets the most recent health check results for this -// BackendService. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/getHealth -func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall { - c := &BackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendService = backendService - c.resourcegroupreference = resourcegroupreference - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServicesGetHealthCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesGetHealthCall) Context(ctx context.Context) *BackendServicesGetHealthCall { +func (c *ForwardingRulesGetCall) Context(ctx context.Context) *ForwardingRulesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesGetHealthCall) Header() http.Header { +func (c *ForwardingRulesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) - if err != nil { - return nil, err + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "backendService": c.backendService, + "region": c.region, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.getHealth" call. -// Exactly one of *BackendServiceGroupHealth or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *BackendServiceGroupHealth.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.forwardingRules.get" call. +// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ForwardingRule.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { +func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -21884,7 +35218,7 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceGroupHealth{ + ret := &ForwardingRule{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -21896,34 +35230,40 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen } return ret, nil // { - // "description": "Gets the most recent health check results for this BackendService.", - // "httpMethod": "POST", - // "id": "compute.backendServices.getHealth", + // "description": "Returns the specified ForwardingRule resource.", + // "httpMethod": "GET", + // "id": "compute.forwardingRules.get", // "parameterOrder": [ // "project", - // "backendService" + // "region", + // "forwardingRule" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the queried instance belongs.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "project": { + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}/getHealth", - // "request": { - // "$ref": "ResourceGroupReference" - // }, + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", // "response": { - // "$ref": "BackendServiceGroupHealth" + // "$ref": "ForwardingRule" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -21934,26 +35274,26 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen } -// method id "compute.backendServices.insert": +// method id "compute.forwardingRules.insert": -type BackendServicesInsertCall struct { +type ForwardingRulesInsertCall struct { s *Service project string - backendservice *BackendService + region string + forwardingrule *ForwardingRule urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a BackendService resource in the specified project -// using the data included in the request. There are several -// restrictions and guidelines to keep in mind when creating a backend -// service. Read Restrictions and Guidelines for more information. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/insert -func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall { - c := &BackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a ForwardingRule resource in the specified project +// and region using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/insert +func (r *ForwardingRulesService) Insert(project string, region string, forwardingrule *ForwardingRule) *ForwardingRulesInsertCall { + c := &ForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendservice = backendservice + c.region = region + c.forwardingrule = forwardingrule return c } @@ -21971,7 +35311,7 @@ func (r *BackendServicesService) Insert(project string, backendservice *BackendS // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServicesInsertCall { +func (c *ForwardingRulesInsertCall) RequestId(requestId string) *ForwardingRulesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -21979,7 +35319,7 @@ func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServices // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendServicesInsertCall { +func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRulesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -21987,51 +35327,52 @@ func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendService // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesInsertCall) Context(ctx context.Context) *BackendServicesInsertCall { +func (c *ForwardingRulesInsertCall) Context(ctx context.Context) *ForwardingRulesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesInsertCall) Header() http.Header { +func (c *ForwardingRulesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.insert" call. +// Do executes the "compute.forwardingRules.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -22062,11 +35403,12 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a backend service. Read Restrictions and Guidelines for more information.", + // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.backendServices.insert", + // "id": "compute.forwardingRules.insert", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "project": { @@ -22076,15 +35418,22 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/backendServices", + // "path": "{project}/regions/{region}/forwardingRules", // "request": { - // "$ref": "BackendService" + // "$ref": "ForwardingRule" // }, // "response": { // "$ref": "Operation" @@ -22097,23 +35446,25 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.backendServices.list": +// method id "compute.forwardingRules.list": -type BackendServicesListCall struct { +type ForwardingRulesListCall struct { s *Service project string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of BackendService resources available to the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/list -func (r *BackendServicesService) List(project string) *BackendServicesListCall { - c := &BackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of ForwardingRule resources available to the +// specified project and region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/list +func (r *ForwardingRulesService) List(project string, region string) *ForwardingRulesListCall { + c := &ForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region return c } @@ -22143,7 +35494,7 @@ func (r *BackendServicesService) List(project string) *BackendServicesListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { +func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall { c.urlParams_.Set("filter", filter) return c } @@ -22154,7 +35505,7 @@ func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesListCall { +func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -22171,7 +35522,7 @@ func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesL // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCall { +func (c *ForwardingRulesListCall) OrderBy(orderBy string) *ForwardingRulesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -22179,7 +35530,7 @@ func (c *BackendServicesListCall) OrderBy(orderBy string) *BackendServicesListCa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesListCall { +func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -22187,7 +35538,7 @@ func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesListCall { +func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -22197,7 +35548,7 @@ func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServicesListCall { +func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRulesListCall { c.ifNoneMatch_ = entityTag return c } @@ -22205,21 +35556,21 @@ func (c *BackendServicesListCall) IfNoneMatch(entityTag string) *BackendServices // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesListCall) Context(ctx context.Context) *BackendServicesListCall { +func (c *ForwardingRulesListCall) Context(ctx context.Context) *ForwardingRulesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesListCall) Header() http.Header { +func (c *ForwardingRulesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -22230,24 +35581,25 @@ func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.list" call. -// Exactly one of *BackendServiceList or error will be non-nil. Any +// Do executes the "compute.forwardingRules.list" call. +// Exactly one of *ForwardingRuleList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *BackendServiceList.ServerResponse.Header or (if a response was +// *ForwardingRuleList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { +func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -22266,7 +35618,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceList{ + ret := &ForwardingRuleList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -22278,11 +35630,12 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ } return ret, nil // { - // "description": "Retrieves the list of BackendService resources available to the specified project.", + // "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", // "httpMethod": "GET", - // "id": "compute.backendServices.list", + // "id": "compute.forwardingRules.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "filter": { @@ -22314,11 +35667,18 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/backendServices", + // "path": "{project}/regions/{region}/forwardingRules", // "response": { - // "$ref": "BackendServiceList" + // "$ref": "ForwardingRuleList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -22332,7 +35692,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { +func (c *ForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -22350,353 +35710,27 @@ func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServ } } -// method id "compute.backendServices.patch": - -type BackendServicesPatchCall struct { - s *Service - project string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Patches the specified BackendService resource with the data -// included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. This method -// supports PATCH semantics and uses the JSON merge patch format and -// processing rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/patch -func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { - c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.backendService = backendService - c.backendservice = backendservice - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesPatchCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServicesPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Patches the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.backendServices.patch", - // "parameterOrder": [ - // "project", - // "backendService" - // ], - // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/global/backendServices/{backendService}", - // "request": { - // "$ref": "BackendService" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.backendServices.testIamPermissions": +// method id "compute.forwardingRules.setLabels": -type BackendServicesTestIamPermissionsCall struct { +type ForwardingRulesSetLabelsCall struct { s *Service project string + region string resource string - testpermissionsrequest *TestPermissionsRequest + regionsetlabelsrequest *RegionSetLabelsRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *BackendServicesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *BackendServicesTestIamPermissionsCall { - c := &BackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *BackendServicesTestIamPermissionsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BackendServicesTestIamPermissionsCall) Context(ctx context.Context) *BackendServicesTestIamPermissionsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *BackendServicesTestIamPermissionsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *BackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{resource}/testIamPermissions") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.backendServices.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.backendServices.testIamPermissions", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/backendServices/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.backendServices.update": - -type BackendServicesUpdateCall struct { - s *Service - project string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates the specified BackendService resource with the data -// included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. -// For details, see https://cloud.google.com/compute/docs/reference/latest/backendServices/update -func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall { - c := &BackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on the specified resource. To learn more +// about labels, read the Labeling Resources documentation. +func (r *ForwardingRulesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *ForwardingRulesSetLabelsCall { + c := &ForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.backendservice = backendservice + c.region = region + c.resource = resource + c.regionsetlabelsrequest = regionsetlabelsrequest return c } @@ -22714,7 +35748,7 @@ func (r *BackendServicesService) Update(project string, backendService string, b // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServicesUpdateCall { +func (c *ForwardingRulesSetLabelsCall) RequestId(requestId string) *ForwardingRulesSetLabelsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -22722,7 +35756,7 @@ func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServices // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendServicesUpdateCall { +func (c *ForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *ForwardingRulesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -22730,52 +35764,53 @@ func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendService // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesUpdateCall) Context(ctx context.Context) *BackendServicesUpdateCall { +func (c *ForwardingRulesSetLabelsCall) Context(ctx context.Context) *ForwardingRulesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesUpdateCall) Header() http.Header { +func (c *ForwardingRulesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.update" call. +// Do executes the "compute.forwardingRules.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -22806,25 +35841,26 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates the specified BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", - // "httpMethod": "PUT", - // "id": "compute.backendServices.update", + // "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.forwardingRules.setLabels", // "parameterOrder": [ // "project", - // "backendService" + // "region", + // "resource" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to update.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "The region for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -22832,11 +35868,18 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/backendServices/{backendService}", + // "path": "{project}/regions/{region}/forwardingRules/{resource}/setLabels", // "request": { - // "$ref": "BackendService" + // "$ref": "RegionSetLabelsRequest" // }, // "response": { // "$ref": "Operation" @@ -22849,156 +35892,108 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.diskTypes.aggregatedList": +// method id "compute.forwardingRules.setTarget": -type DiskTypesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ForwardingRulesSetTargetCall struct { + s *Service + project string + region string + forwardingRule string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of disk types. -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/aggregatedList -func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall { - c := &DiskTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTarget: Changes target URL for forwarding rule. The new target +// should be of the same type as the old target. +// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/setTarget +func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { + c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.forwardingRule = forwardingRule + c.targetreference = targetreference return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DiskTypesAggregatedListCall) OrderBy(orderBy string) *DiskTypesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ForwardingRulesSetTargetCall) RequestId(requestId string) *ForwardingRulesSetTargetCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAggregatedListCall { +func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingRulesSetTargetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DiskTypesAggregatedListCall) IfNoneMatch(entityTag string) *DiskTypesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesAggregatedListCall) Context(ctx context.Context) *DiskTypesAggregatedListCall { +func (c *ForwardingRulesSetTargetCall) Context(ctx context.Context) *ForwardingRulesSetTargetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesAggregatedListCall) Header() http.Header { +func (c *ForwardingRulesSetTargetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.aggregatedList" call. -// Exactly one of *DiskTypeAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *DiskTypeAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTypeAggregatedList, error) { +// Do executes the "compute.forwardingRules.setTarget" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23017,7 +36012,7 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskTypeAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -23029,34 +36024,20 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp } return ret, nil // { - // "description": "Retrieves an aggregated list of disk types.", - // "httpMethod": "GET", - // "id": "compute.diskTypes.aggregatedList", + // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", + // "httpMethod": "POST", + // "id": "compute.forwardingRules.setTarget", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "forwardingRule" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource in which target is to be set.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -23065,132 +36046,117 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/aggregated/diskTypes", + // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", + // "request": { + // "$ref": "TargetReference" + // }, // "response": { - // "$ref": "DiskTypeAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DiskTypesAggregatedListCall) Pages(ctx context.Context, f func(*DiskTypeAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.diskTypes.get": +// method id "compute.forwardingRules.testIamPermissions": -type DiskTypesGetCall struct { - s *Service - project string - zone string - diskType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ForwardingRulesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified disk type. Get a list of available disk -// types by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/get -func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall { - c := &DiskTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *ForwardingRulesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *ForwardingRulesTestIamPermissionsCall { + c := &ForwardingRulesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.diskType = diskType + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall { +func (c *ForwardingRulesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ForwardingRulesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DiskTypesGetCall) IfNoneMatch(entityTag string) *DiskTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesGetCall) Context(ctx context.Context) *DiskTypesGetCall { +func (c *ForwardingRulesTestIamPermissionsCall) Context(ctx context.Context) *ForwardingRulesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesGetCall) Header() http.Header { +func (c *ForwardingRulesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "diskType": c.diskType, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.get" call. -// Exactly one of *DiskType or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskType.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { +// Do executes the "compute.forwardingRules.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23209,7 +36175,7 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskType{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -23221,40 +36187,43 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { } return ret, nil // { - // "description": "Returns the specified disk type. Get a list of available disk types by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.diskTypes.get", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.forwardingRules.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "diskType" + // "region", + // "resource" // ], // "parameters": { - // "diskType": { - // "description": "Name of the disk type to return.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "The name of the region for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/diskTypes/{diskType}", + // "path": "{project}/regions/{region}/forwardingRules/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "DiskType" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -23265,160 +36234,97 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { } -// method id "compute.diskTypes.list": - -type DiskTypesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of disk types available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/diskTypes/list -func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall { - c := &DiskTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { - c.urlParams_.Set("filter", filter) - return c -} +// method id "compute.globalAddresses.delete": -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c +type GlobalAddressesDeleteCall struct { + s *Service + project string + address string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DiskTypesListCall) OrderBy(orderBy string) *DiskTypesListCall { - c.urlParams_.Set("orderBy", orderBy) +// Delete: Deletes the specified address resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/delete +func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall { + c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.address = address return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall { - c.urlParams_.Set("pageToken", pageToken) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddressesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall { +func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DiskTypesListCall) IfNoneMatch(entityTag string) *DiskTypesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DiskTypesListCall) Context(ctx context.Context) *DiskTypesListCall { +func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddressesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DiskTypesListCall) Header() http.Header { +func (c *GlobalAddressesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, + "address": c.address, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.diskTypes.list" call. -// Exactly one of *DiskTypeList or error will be non-nil. Any non-2xx +// Do executes the "compute.globalAddresses.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *DiskTypeList.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, error) { +func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23437,7 +36343,7 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskTypeList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -23449,35 +36355,19 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err } return ret, nil // { - // "description": "Retrieves a list of disk types available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.diskTypes.list", + // "description": "Deletes the specified address resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalAddresses.delete", // "parameterOrder": [ // "project", - // "zone" + // "address" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "address": { + // "description": "Name of the address resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -23487,138 +36377,50 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/diskTypes", + // "path": "{project}/global/addresses/{address}", // "response": { - // "$ref": "DiskTypeList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DiskTypesListCall) Pages(ctx context.Context, f func(*DiskTypeList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.disks.aggregatedList": +// method id "compute.globalAddresses.get": -type DisksAggregatedListCall struct { +type GlobalAddressesGetCall struct { s *Service project string + address string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of persistent disks. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/aggregatedList -func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { - c := &DisksAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified address resource. Get a list of available +// addresses by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/get +func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall { + c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DisksAggregatedListCall) OrderBy(orderBy string) *DisksAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) + c.address = address return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedListCall { +func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -23628,7 +36430,7 @@ func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregatedListCall { +func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -23636,21 +36438,21 @@ func (c *DisksAggregatedListCall) IfNoneMatch(entityTag string) *DisksAggregated // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksAggregatedListCall) Context(ctx context.Context) *DisksAggregatedListCall { +func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksAggregatedListCall) Header() http.Header { +func (c *GlobalAddressesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -23661,24 +36463,25 @@ func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "address": c.address, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.aggregatedList" call. -// Exactly one of *DiskAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *DiskAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggregatedList, error) { +// Do executes the "compute.globalAddresses.get" call. +// Exactly one of *Address or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Address.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23697,7 +36500,7 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskAggregatedList{ + ret := &Address{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -23709,34 +36512,19 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega } return ret, nil // { - // "description": "Retrieves an aggregated list of persistent disks.", + // "description": "Returns the specified address resource. Get a list of available addresses by making a list() request.", // "httpMethod": "GET", - // "id": "compute.disks.aggregatedList", + // "id": "compute.globalAddresses.get", // "parameterOrder": [ - // "project" + // "project", + // "address" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "address": { + // "description": "Name of the address resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -23747,9 +36535,9 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega // "type": "string" // } // }, - // "path": "{project}/aggregated/disks", + // "path": "{project}/global/addresses/{address}", // "response": { - // "$ref": "DiskAggregatedList" + // "$ref": "Address" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -23760,54 +36548,24 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DisksAggregatedListCall) Pages(ctx context.Context, f func(*DiskAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.disks.createSnapshot": +// method id "compute.globalAddresses.insert": -type DisksCreateSnapshotCall struct { +type GlobalAddressesInsertCall struct { s *Service project string - zone string - disk string - snapshot *Snapshot + address *Address urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// CreateSnapshot: Creates a snapshot of a specified persistent disk. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/createSnapshot -func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall { - c := &DisksCreateSnapshotCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an address resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/insert +func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall { + c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.snapshot = snapshot - return c -} - -// GuestFlush sets the optional parameter "guestFlush": -func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapshotCall { - c.urlParams_.Set("guestFlush", fmt.Sprint(guestFlush)) + c.address = address return c } @@ -23825,7 +36583,7 @@ func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapsh // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapshotCall { +func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddressesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -23833,7 +36591,7 @@ func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapsh // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall { +func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -23841,53 +36599,51 @@ func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnaps // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnapshotCall { +func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddressesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksCreateSnapshotCall) Header() http.Header { +func (c *GlobalAddressesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.createSnapshot" call. +// Do executes the "compute.globalAddresses.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -23918,26 +36674,13 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates a snapshot of a specified persistent disk.", + // "description": "Creates an address resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.disks.createSnapshot", + // "id": "compute.globalAddresses.insert", // "parameterOrder": [ - // "project", - // "zone", - // "disk" + // "project" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to snapshot.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "guestFlush": { - // "location": "query", - // "type": "boolean" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -23949,18 +36692,11 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", + // "path": "{project}/global/addresses", // "request": { - // "$ref": "Snapshot" + // "$ref": "Address" // }, // "response": { // "$ref": "Operation" @@ -23973,103 +36709,156 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.disks.delete": +// method id "compute.globalAddresses.list": -type DisksDeleteCall struct { - s *Service - project string - zone string - disk string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalAddressesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified persistent disk. Deleting a disk -// removes its data permanently and is irreversible. However, deleting a -// disk does not delete any snapshots previously made from the disk. You -// must separately delete snapshots. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/delete -func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall { - c := &DisksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of global addresses. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/list +func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { + c := &GlobalAddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { - c.urlParams_.Set("requestId", requestId) +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *GlobalAddressesListCall) OrderBy(orderBy string) *GlobalAddressesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall { +func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalAddressesListCall) IfNoneMatch(entityTag string) *GlobalAddressesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksDeleteCall) Context(ctx context.Context) *DisksDeleteCall { +func (c *GlobalAddressesListCall) Context(ctx context.Context) *GlobalAddressesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksDeleteCall) Header() http.Header { +func (c *GlobalAddressesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.globalAddresses.list" call. +// Exactly one of *AddressList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *AddressList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -24088,7 +36877,7 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &AddressList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -24100,143 +36889,157 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Deletes the specified persistent disk. Deleting a disk removes its data permanently and is irreversible. However, deleting a disk does not delete any snapshots previously made from the disk. You must separately delete snapshots.", - // "httpMethod": "DELETE", - // "id": "compute.disks.delete", + // "description": "Retrieves a list of global addresses.", + // "httpMethod": "GET", + // "id": "compute.globalAddresses.list", // "parameterOrder": [ - // "project", - // "zone", - // "disk" + // "project" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to delete.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}", + // "path": "{project}/global/addresses", // "response": { - // "$ref": "Operation" + // "$ref": "AddressList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.disks.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type DisksGetCall struct { - s *Service - project string - zone string - disk string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.globalAddresses.setLabels": + +type GlobalAddressesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns a specified persistent disk. Get a list of available -// persistent disks by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/get -func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall { - c := &DisksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets the labels on a GlobalAddress. To learn more about +// labels, read the Labeling Resources documentation. +func (r *GlobalAddressesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalAddressesSetLabelsCall { + c := &GlobalAddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall { +func (c *GlobalAddressesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalAddressesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DisksGetCall) IfNoneMatch(entityTag string) *DisksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksGetCall) Context(ctx context.Context) *DisksGetCall { +func (c *GlobalAddressesSetLabelsCall) Context(ctx context.Context) *GlobalAddressesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksGetCall) Header() http.Header { +func (c *GlobalAddressesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.get" call. -// Exactly one of *Disk or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Disk.ServerResponse.Header or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { +// Do executes the "compute.globalAddresses.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -24255,7 +37058,7 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Disk{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -24267,22 +37070,14 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } return ret, nil // { - // "description": "Returns a specified persistent disk. Get a list of available persistent disks by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.disks.get", + // "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.globalAddresses.setLabels", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "resource" // ], // "parameters": { - // "disk": { - // "description": "Name of the persistent disk to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -24290,83 +37085,55 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}", + // "path": "{project}/global/addresses/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, // "response": { - // "$ref": "Disk" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.disks.insert": +// method id "compute.globalAddresses.testIamPermissions": -type DisksInsertCall struct { - s *Service - project string - zone string - disk *Disk - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalAddressesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a persistent disk in the specified project using the -// data in the request. You can create a disk with a sourceImage, a -// sourceSnapshot, or create an empty 500 GB data disk by omitting all -// properties. You can also create a disk that is larger than the -// default size by specifying the sizeGb property. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/insert -func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall { - c := &DisksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *GlobalAddressesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *GlobalAddressesTestIamPermissionsCall { + c := &GlobalAddressesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksInsertCall) RequestId(requestId string) *DisksInsertCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// SourceImage sets the optional parameter "sourceImage": Source image -// to restore onto a disk. -func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall { - c.urlParams_.Set("sourceImage", sourceImage) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { +func (c *GlobalAddressesTestIamPermissionsCall) Fields(s ...googleapi.Field) *GlobalAddressesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -24374,52 +37141,52 @@ func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksInsertCall) Context(ctx context.Context) *DisksInsertCall { +func (c *GlobalAddressesTestIamPermissionsCall) Context(ctx context.Context) *GlobalAddressesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksInsertCall) Header() http.Header { +func (c *GlobalAddressesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.globalAddresses.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalAddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -24438,7 +37205,7 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -24450,12 +37217,12 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.disks.insert", + // "id": "compute.globalAddresses.testIamPermissions", // "parameterOrder": [ // "project", - // "zone" + // "resource" // ], // "parameters": { // "project": { @@ -24465,193 +37232,121 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "sourceImage": { - // "description": "Optional. Source image to restore onto a disk.", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks", + // "path": "{project}/global/addresses/{resource}/testIamPermissions", // "request": { - // "$ref": "Disk" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.disks.list": +// method id "compute.globalForwardingRules.delete": -type DisksListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesDeleteCall struct { + s *Service + project string + forwardingRule string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of persistent disks contained within the -// specified zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/disks/list -func (r *DisksService) List(project string, zone string) *DisksListCall { - c := &DisksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified GlobalForwardingRule resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/delete +func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall { + c := &GlobalForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *DisksListCall) Filter(filter string) *DisksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.forwardingRule = forwardingRule return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *DisksListCall) OrderBy(orderBy string) *DisksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *DisksListCall) PageToken(pageToken string) *DisksListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalForwardingRulesDeleteCall) RequestId(requestId string) *GlobalForwardingRulesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall { +func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DisksListCall) IfNoneMatch(entityTag string) *DisksListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksListCall) Context(ctx context.Context) *DisksListCall { +func (c *GlobalForwardingRulesDeleteCall) Context(ctx context.Context) *GlobalForwardingRulesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksListCall) Header() http.Header { +func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.list" call. -// Exactly one of *DiskList or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *DiskList.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.globalForwardingRules.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { +func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -24670,7 +37365,7 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &DiskList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -24682,186 +37377,133 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { } return ret, nil // { - // "description": "Retrieves a list of persistent disks contained within the specified zone.", - // "httpMethod": "GET", - // "id": "compute.disks.list", + // "description": "Deletes the specified GlobalForwardingRule resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalForwardingRules.delete", // "parameterOrder": [ // "project", - // "zone" + // "forwardingRule" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to delete.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks", + // "path": "{project}/global/forwardingRules/{forwardingRule}", // "response": { - // "$ref": "DiskList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.disks.resize": +// method id "compute.globalForwardingRules.get": -type DisksResizeCall struct { - s *Service - project string - zone string - disk string - disksresizerequest *DisksResizeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesGetCall struct { + s *Service + project string + forwardingRule string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the specified persistent disk. -func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { - c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified GlobalForwardingRule resource. Get a list +// of available forwarding rules by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/get +func (r *GlobalForwardingRulesService) Get(project string, forwardingRule string) *GlobalForwardingRulesGetCall { + c := &GlobalForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.disk = disk - c.disksresizerequest = disksresizerequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksResizeCall) RequestId(requestId string) *DisksResizeCall { - c.urlParams_.Set("requestId", requestId) + c.forwardingRule = forwardingRule return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { +func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalForwardingRulesGetCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksResizeCall) Context(ctx context.Context) *DisksResizeCall { +func (c *GlobalForwardingRulesGetCall) Context(ctx context.Context) *GlobalForwardingRulesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksResizeCall) Header() http.Header { +func (c *GlobalForwardingRulesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksresizerequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "disk": c.disk, + "project": c.project, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.resize" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.globalForwardingRules.get" call. +// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *ForwardingRule.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -24880,7 +37522,7 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &ForwardingRule{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -24892,17 +37534,16 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { } return ret, nil // { - // "description": "Resizes the specified persistent disk.", - // "httpMethod": "POST", - // "id": "compute.disks.resize", + // "description": "Returns the specified GlobalForwardingRule resource. Get a list of available forwarding rules by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.globalForwardingRules.get", // "parameterOrder": [ // "project", - // "zone", - // "disk" + // "forwardingRule" // ], // "parameters": { - // "disk": { - // "description": "The name of the persistent disk.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -24914,56 +37555,39 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{disk}/resize", - // "request": { - // "$ref": "DisksResizeRequest" - // }, + // "path": "{project}/global/forwardingRules/{forwardingRule}", // "response": { - // "$ref": "Operation" + // "$ref": "ForwardingRule" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.disks.setLabels": +// method id "compute.globalForwardingRules.insert": -type DisksSetLabelsCall struct { - s *Service - project string - zone string - resource string - zonesetlabelsrequest *ZoneSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesInsertCall struct { + s *Service + project string + forwardingrule *ForwardingRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on a disk. To learn more about labels, -// read the Labeling Resources documentation. -func (r *DisksService) SetLabels(project string, zone string, resource string, zonesetlabelsrequest *ZoneSetLabelsRequest) *DisksSetLabelsCall { - c := &DisksSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a GlobalForwardingRule resource in the specified +// project using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/insert +func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *ForwardingRule) *GlobalForwardingRulesInsertCall { + c := &GlobalForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.zonesetlabelsrequest = zonesetlabelsrequest + c.forwardingrule = forwardingrule return c } @@ -24981,7 +37605,7 @@ func (r *DisksService) SetLabels(project string, zone string, resource string, z // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *DisksSetLabelsCall) RequestId(requestId string) *DisksSetLabelsCall { +func (c *GlobalForwardingRulesInsertCall) RequestId(requestId string) *GlobalForwardingRulesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -24989,7 +37613,7 @@ func (c *DisksSetLabelsCall) RequestId(requestId string) *DisksSetLabelsCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksSetLabelsCall) Fields(s ...googleapi.Field) *DisksSetLabelsCall { +func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -24997,53 +37621,51 @@ func (c *DisksSetLabelsCall) Fields(s ...googleapi.Field) *DisksSetLabelsCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksSetLabelsCall) Context(ctx context.Context) *DisksSetLabelsCall { +func (c *GlobalForwardingRulesInsertCall) Context(ctx context.Context) *GlobalForwardingRulesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksSetLabelsCall) Header() http.Header { +func (c *GlobalForwardingRulesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.zonesetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.setLabels" call. +// Do executes the "compute.globalForwardingRules.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -25074,13 +37696,11 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Sets the labels on a disk. To learn more about labels, read the Labeling Resources documentation.", + // "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.disks.setLabels", + // "id": "compute.globalForwardingRules.insert", // "parameterOrder": [ - // "project", - // "zone", - // "resource" + // "project" // ], // "parameters": { // "project": { @@ -25094,25 +37714,11 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/setLabels", + // "path": "{project}/global/forwardingRules", // "request": { - // "$ref": "ZoneSetLabelsRequest" + // "$ref": "ForwardingRule" // }, // "response": { // "$ref": "Operation" @@ -25125,88 +37731,157 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.disks.testIamPermissions": +// method id "compute.globalForwardingRules.list": -type DisksTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *DisksService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *DisksTestIamPermissionsCall { - c := &DisksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of GlobalForwardingRule resources available to +// the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/list +func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRulesListCall { + c := &GlobalForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForwardingRulesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *GlobalForwardingRulesListCall) OrderBy(orderBy string) *GlobalForwardingRulesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwardingRulesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DisksTestIamPermissionsCall) Fields(s ...googleapi.Field) *DisksTestIamPermissionsCall { +func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DisksTestIamPermissionsCall) Context(ctx context.Context) *DisksTestIamPermissionsCall { +func (c *GlobalForwardingRulesListCall) Context(ctx context.Context) *GlobalForwardingRulesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DisksTestIamPermissionsCall) Header() http.Header { +func (c *GlobalForwardingRulesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DisksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.disks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// Do executes the "compute.globalForwardingRules.list" call. +// Exactly one of *ForwardingRuleList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// *ForwardingRuleList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -25225,7 +37900,7 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &ForwardingRuleList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -25237,43 +37912,47 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.disks.testIamPermissions", + // "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.globalForwardingRules.list", // "parameterOrder": [ - // "project", - // "zone", - // "resource" + // "project" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/disks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/global/forwardingRules", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "ForwardingRuleList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -25284,49 +37963,53 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer } -// method id "compute.firewalls.delete": - -type FirewallsDeleteCall struct { - s *Service - project string - firewall string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// Delete: Deletes the specified firewall. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/delete -func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall { - c := &FirewallsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.firewall = firewall - return c +// method id "compute.globalForwardingRules.setLabels": + +type GlobalForwardingRulesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { - c.urlParams_.Set("requestId", requestId) +// SetLabels: Sets the labels on the specified resource. To learn more +// about labels, read the Labeling Resources documentation. +func (r *GlobalForwardingRulesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalForwardingRulesSetLabelsCall { + c := &GlobalForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall { +func (c *GlobalForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -25334,47 +38017,52 @@ func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsDeleteCall) Context(ctx context.Context) *FirewallsDeleteCall { +func (c *GlobalForwardingRulesSetLabelsCall) Context(ctx context.Context) *GlobalForwardingRulesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsDeleteCall) Header() http.Header { +func (c *GlobalForwardingRulesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "firewall": c.firewall, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.delete" call. +// Do executes the "compute.globalForwardingRules.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -25405,21 +38093,14 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified firewall.", - // "httpMethod": "DELETE", - // "id": "compute.firewalls.delete", + // "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.globalForwardingRules.setLabels", // "parameterOrder": [ // "project", - // "firewall" + // "resource" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -25427,13 +38108,18 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{firewall}", + // "path": "{project}/global/forwardingRules/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -25445,92 +38131,105 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.firewalls.get": +// method id "compute.globalForwardingRules.setTarget": -type FirewallsGetCall struct { - s *Service - project string - firewall string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesSetTargetCall struct { + s *Service + project string + forwardingRule string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified firewall. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/get -func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall { - c := &FirewallsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTarget: Changes target URL for the GlobalForwardingRule resource. +// The new target should be of the same type as the old target. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/setTarget +func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall { + c := &GlobalForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall + c.forwardingRule = forwardingRule + c.targetreference = targetreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalForwardingRulesSetTargetCall) RequestId(requestId string) *GlobalForwardingRulesSetTargetCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall { +func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetTargetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *FirewallsGetCall) IfNoneMatch(entityTag string) *FirewallsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsGetCall) Context(ctx context.Context) *FirewallsGetCall { +func (c *GlobalForwardingRulesSetTargetCall) Context(ctx context.Context) *GlobalForwardingRulesSetTargetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsGetCall) Header() http.Header { +func (c *GlobalForwardingRulesSetTargetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}/setTarget") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, + "forwardingRule": c.forwardingRule, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.get" call. -// Exactly one of *Firewall or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Firewall.ServerResponse.Header or (if a response was returned at +// Do executes the "compute.globalForwardingRules.setTarget" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { +func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -25549,7 +38248,7 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Firewall{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -25561,16 +38260,16 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { } return ret, nil // { - // "description": "Returns the specified firewall.", - // "httpMethod": "GET", - // "id": "compute.firewalls.get", + // "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target.", + // "httpMethod": "POST", + // "id": "compute.globalForwardingRules.setTarget", // "parameterOrder": [ // "project", - // "firewall" + // "forwardingRule" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to return.", + // "forwardingRule": { + // "description": "Name of the ForwardingRule resource in which target is to be set.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -25582,65 +38281,54 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{firewall}", + // "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget", + // "request": { + // "$ref": "TargetReference" + // }, // "response": { - // "$ref": "Firewall" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.firewalls.insert": +// method id "compute.globalForwardingRules.testIamPermissions": -type FirewallsInsertCall struct { - s *Service - project string - firewall *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalForwardingRulesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a firewall rule in the specified project using the -// data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/insert -func (r *FirewallsService) Insert(project string, firewall *Firewall) *FirewallsInsertCall { - c := &FirewallsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *GlobalForwardingRulesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *GlobalForwardingRulesTestIamPermissionsCall { + c := &GlobalForwardingRulesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsInsertCall) RequestId(requestId string) *FirewallsInsertCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall { +func (c *GlobalForwardingRulesTestIamPermissionsCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -25648,51 +38336,52 @@ func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsInsertCall) Context(ctx context.Context) *FirewallsInsertCall { +func (c *GlobalForwardingRulesTestIamPermissionsCall) Context(ctx context.Context) *GlobalForwardingRulesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsInsertCall) Header() http.Header { +func (c *GlobalForwardingRulesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.globalForwardingRules.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -25711,7 +38400,7 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -25723,11 +38412,12 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates a firewall rule in the specified project using the data included in the request.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.firewalls.insert", + // "id": "compute.globalForwardingRules.testIamPermissions", // "parameterOrder": [ - // "project" + // "project", + // "resource" // ], // "parameters": { // "project": { @@ -25737,30 +38427,33 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/firewalls", + // "path": "{project}/global/forwardingRules/{resource}/testIamPermissions", // "request": { - // "$ref": "Firewall" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.firewalls.list": +// method id "compute.globalOperations.aggregatedList": -type FirewallsListCall struct { +type GlobalOperationsAggregatedListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -25769,11 +38462,10 @@ type FirewallsListCall struct { header_ http.Header } -// List: Retrieves the list of firewall rules available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/list -func (r *FirewallsService) List(project string) *FirewallsListCall { - c := &FirewallsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of all operations. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/aggregatedList +func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall { + c := &GlobalOperationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -25804,7 +38496,7 @@ func (r *FirewallsService) List(project string) *FirewallsListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { +func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -25815,7 +38507,7 @@ func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { +func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -25826,291 +38518,89 @@ func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall { // // You can also sort results in descending order based on the creation // timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *FirewallsListCall) OrderBy(orderBy string) *FirewallsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *FirewallsListCall) IfNoneMatch(entityTag string) *FirewallsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *FirewallsListCall) Context(ctx context.Context) *FirewallsListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *FirewallsListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.firewalls.list" call. -// Exactly one of *FirewallList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *FirewallList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &FirewallList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of firewall rules available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.firewalls.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/firewalls", - // "response": { - // "$ref": "FirewallList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *FirewallsListCall) Pages(ctx context.Context, f func(*FirewallList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.firewalls.patch": - -type FirewallsPatchCall struct { - s *Service - project string - firewall string - firewall2 *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Updates the specified firewall rule with the data included in -// the request. This method supports PATCH semantics and uses the JSON -// merge patch format and processing rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/patch -func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall { - c := &FirewallsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.firewall = firewall - c.firewall2 = firewall2 - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsPatchCall) RequestId(requestId string) *FirewallsPatchCall { - c.urlParams_.Set("requestId", requestId) +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *GlobalOperationsAggregatedListCall) OrderBy(orderBy string) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall { +func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalOperationsAggregatedListCall) IfNoneMatch(entityTag string) *GlobalOperationsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsPatchCall) Context(ctx context.Context) *FirewallsPatchCall { +func (c *GlobalOperationsAggregatedListCall) Context(ctx context.Context) *GlobalOperationsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsPatchCall) Header() http.Header { +func (c *GlobalOperationsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.globalOperations.aggregatedList" call. +// Exactly one of *OperationAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *OperationAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*OperationAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -26129,7 +38619,7 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &OperationAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -26141,19 +38631,34 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.firewalls.patch", + // "description": "Retrieves an aggregated list of all operations.", + // "httpMethod": "GET", + // "id": "compute.globalOperations.aggregatedList", // "parameterOrder": [ - // "project", - // "firewall" + // "project" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -26162,19 +38667,11 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{firewall}", - // "request": { - // "$ref": "Firewall" - // }, + // "path": "{project}/aggregated/operations", // "response": { - // "$ref": "Operation" + // "$ref": "OperationAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -26185,32 +38682,51 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.firewalls.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(*OperationAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type FirewallsTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.globalOperations.delete": + +type GlobalOperationsDeleteCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *FirewallsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *FirewallsTestIamPermissionsCall { - c := &FirewallsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/delete +func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { + c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsTestIamPermissionsCall) Fields(s ...googleapi.Field) *FirewallsTestIamPermissionsCall { +func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperationsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -26218,222 +38734,171 @@ func (c *FirewallsTestIamPermissionsCall) Fields(s ...googleapi.Field) *Firewall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsTestIamPermissionsCall) Context(ctx context.Context) *FirewallsTestIamPermissionsCall { +func (c *GlobalOperationsDeleteCall) Context(ctx context.Context) *GlobalOperationsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsTestIamPermissionsCall) Header() http.Header { +func (c *GlobalOperationsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *FirewallsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.globalOperations.delete" call. +func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &TestPermissionsResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err + return err } - return ret, nil + return nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.firewalls.testIamPermissions", + // "description": "Deletes the specified Operations resource.", + // "httpMethod": "DELETE", + // "id": "compute.globalOperations.delete", // "parameterOrder": [ // "project", - // "resource" + // "operation" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "operation": { + // "description": "Name of the Operations resource to delete.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, - // "response": { - // "$ref": "TestPermissionsResponse" - // }, + // "path": "{project}/global/operations/{operation}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.firewalls.update": +// method id "compute.globalOperations.get": -type FirewallsUpdateCall struct { - s *Service - project string - firewall string - firewall2 *Firewall - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalOperationsGetCall struct { + s *Service + project string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified firewall rule with the data included in -// the request. Using PUT method, can only update following fields of -// firewall rule: allowed, description, sourceRanges, sourceTags, -// targetTags. -// For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/update -func (r *FirewallsService) Update(project string, firewall string, firewall2 *Firewall) *FirewallsUpdateCall { - c := &FirewallsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Retrieves the specified Operations resource. Get a list of +// operations by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/get +func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { + c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.firewall = firewall - c.firewall2 = firewall2 - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *FirewallsUpdateCall) RequestId(requestId string) *FirewallsUpdateCall { - c.urlParams_.Set("requestId", requestId) + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall { +func (c *GlobalOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *GlobalOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *FirewallsUpdateCall) Context(ctx context.Context) *FirewallsUpdateCall { +func (c *GlobalOperationsGetCall) Context(ctx context.Context) *GlobalOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *FirewallsUpdateCall) Header() http.Header { +func (c *GlobalOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "firewall": c.firewall, + "project": c.project, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.firewalls.update" call. +// Do executes the "compute.globalOperations.get" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -26464,16 +38929,16 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Updates the specified firewall rule with the data included in the request. Using PUT method, can only update following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags.", - // "httpMethod": "PUT", - // "id": "compute.firewalls.update", + // "description": "Retrieves the specified Operations resource. Get a list of operations by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.globalOperations.get", // "parameterOrder": [ // "project", - // "firewall" + // "operation" // ], // "parameters": { - // "firewall": { - // "description": "Name of the firewall rule to update.", + // "operation": { + // "description": "Name of the Operations resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -26485,31 +38950,24 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/firewalls/{firewall}", - // "request": { - // "$ref": "Firewall" - // }, + // "path": "{project}/global/operations/{operation}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.forwardingRules.aggregatedList": +// method id "compute.globalOperations.list": -type ForwardingRulesAggregatedListCall struct { +type GlobalOperationsListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -26518,10 +38976,11 @@ type ForwardingRulesAggregatedListCall struct { header_ http.Header } -// AggregatedList: Retrieves an aggregated list of forwarding rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/aggregatedList -func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRulesAggregatedListCall { - c := &ForwardingRulesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of Operation resources contained within the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/list +func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall { + c := &GlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -26552,7 +39011,7 @@ func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRules // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall { +func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { c.urlParams_.Set("filter", filter) return c } @@ -26563,7 +39022,7 @@ func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRul // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *ForwardingRulesAggregatedListCall { +func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperationsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -26580,7 +39039,7 @@ func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *Forwar // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *ForwardingRulesAggregatedListCall) OrderBy(orderBy string) *ForwardingRulesAggregatedListCall { +func (c *GlobalOperationsListCall) OrderBy(orderBy string) *GlobalOperationsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -26588,7 +39047,7 @@ func (c *ForwardingRulesAggregatedListCall) OrderBy(orderBy string) *ForwardingR // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *ForwardingRulesAggregatedListCall { +func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -26596,7 +39055,7 @@ func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *Forward // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *ForwardingRulesAggregatedListCall { +func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -26606,7 +39065,7 @@ func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *Forwar // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ForwardingRulesAggregatedListCall) IfNoneMatch(entityTag string) *ForwardingRulesAggregatedListCall { +func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperationsListCall { c.ifNoneMatch_ = entityTag return c } @@ -26614,21 +39073,21 @@ func (c *ForwardingRulesAggregatedListCall) IfNoneMatch(entityTag string) *Forwa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesAggregatedListCall) Context(ctx context.Context) *ForwardingRulesAggregatedListCall { +func (c *GlobalOperationsListCall) Context(ctx context.Context) *GlobalOperationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesAggregatedListCall) Header() http.Header { +func (c *GlobalOperationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -26639,7 +39098,7 @@ func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Respons } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -26649,14 +39108,14 @@ func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Respons return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.aggregatedList" call. -// Exactly one of *ForwardingRuleAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *ForwardingRuleAggregatedList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.globalOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *OperationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleAggregatedList, error) { +func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -26675,7 +39134,7 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRuleAggregatedList{ + ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -26687,9 +39146,9 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F } return ret, nil // { - // "description": "Retrieves an aggregated list of forwarding rules.", + // "description": "Retrieves a list of Operation resources contained within the specified project.", // "httpMethod": "GET", - // "id": "compute.forwardingRules.aggregatedList", + // "id": "compute.globalOperations.list", // "parameterOrder": [ // "project" // ], @@ -26725,9 +39184,9 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F // "type": "string" // } // }, - // "path": "{project}/aggregated/forwardingRules", + // "path": "{project}/global/operations", // "response": { - // "$ref": "ForwardingRuleAggregatedList" + // "$ref": "OperationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -26741,7 +39200,7 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ForwardingRulesAggregatedListCall) Pages(ctx context.Context, f func(*ForwardingRuleAggregatedList) error) error { +func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -26759,25 +39218,22 @@ func (c *ForwardingRulesAggregatedListCall) Pages(ctx context.Context, f func(*F } } -// method id "compute.forwardingRules.delete": +// method id "compute.healthChecks.delete": -type ForwardingRulesDeleteCall struct { - s *Service - project string - region string - forwardingRule string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksDeleteCall struct { + s *Service + project string + healthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified ForwardingRule resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/delete -func (r *ForwardingRulesService) Delete(project string, region string, forwardingRule string) *ForwardingRulesDeleteCall { - c := &ForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HealthCheck resource. +func (r *HealthChecksService) Delete(project string, healthCheck string) *HealthChecksDeleteCall { + c := &HealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingRule = forwardingRule + c.healthCheck = healthCheck return c } @@ -26795,7 +39251,7 @@ func (r *ForwardingRulesService) Delete(project string, region string, forwardin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesDeleteCall) RequestId(requestId string) *ForwardingRulesDeleteCall { +func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -26803,7 +39259,7 @@ func (c *ForwardingRulesDeleteCall) RequestId(requestId string) *ForwardingRules // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRulesDeleteCall { +func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -26811,21 +39267,21 @@ func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRule // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesDeleteCall) Context(ctx context.Context) *ForwardingRulesDeleteCall { +func (c *HealthChecksDeleteCall) Context(ctx context.Context) *HealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesDeleteCall) Header() http.Header { +func (c *HealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -26833,26 +39289,25 @@ func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.delete" call. +// Do executes the "compute.healthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -26883,17 +39338,16 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified ForwardingRule resource.", + // "description": "Deletes the specified HealthCheck resource.", // "httpMethod": "DELETE", - // "id": "compute.forwardingRules.delete", + // "id": "compute.healthChecks.delete", // "parameterOrder": [ // "project", - // "region", - // "forwardingRule" + // "healthCheck" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to delete.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -26906,20 +39360,13 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "path": "{project}/global/healthChecks/{healthCheck}", // "response": { // "$ref": "Operation" // }, @@ -26931,33 +39378,31 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.forwardingRules.get": +// method id "compute.healthChecks.get": -type ForwardingRulesGetCall struct { - s *Service - project string - region string - forwardingRule string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HealthChecksGetCall struct { + s *Service + project string + healthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified ForwardingRule resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/get -func (r *ForwardingRulesService) Get(project string, region string, forwardingRule string) *ForwardingRulesGetCall { - c := &ForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HealthCheck resource. Get a list of +// available health checks by making a list() request. +func (r *HealthChecksService) Get(project string, healthCheck string) *HealthChecksGetCall { + c := &HealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingRule = forwardingRule + c.healthCheck = healthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGetCall { +func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -26967,7 +39412,7 @@ func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesGetCall { +func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -26975,21 +39420,21 @@ func (c *ForwardingRulesGetCall) IfNoneMatch(entityTag string) *ForwardingRulesG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesGetCall) Context(ctx context.Context) *ForwardingRulesGetCall { +func (c *HealthChecksGetCall) Context(ctx context.Context) *HealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesGetCall) Header() http.Header { +func (c *HealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -27000,26 +39445,25 @@ func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.get" call. -// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx +// Do executes the "compute.healthChecks.get" call. +// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *ForwardingRule.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { +// *HealthCheck.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -27038,7 +39482,7 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRule{ + ret := &HealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -27050,17 +39494,16 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu } return ret, nil // { - // "description": "Returns the specified ForwardingRule resource.", + // "description": "Returns the specified HealthCheck resource. Get a list of available health checks by making a list() request.", // "httpMethod": "GET", - // "id": "compute.forwardingRules.get", + // "id": "compute.healthChecks.get", // "parameterOrder": [ // "project", - // "region", - // "forwardingRule" + // "healthCheck" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to return.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -27072,18 +39515,11 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + // "path": "{project}/global/healthChecks/{healthCheck}", // "response": { - // "$ref": "ForwardingRule" + // "$ref": "HealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -27094,26 +39530,23 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu } -// method id "compute.forwardingRules.insert": +// method id "compute.healthChecks.insert": -type ForwardingRulesInsertCall struct { - s *Service - project string - region string - forwardingrule *ForwardingRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksInsertCall struct { + s *Service + project string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a ForwardingRule resource in the specified project -// and region using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/insert -func (r *ForwardingRulesService) Insert(project string, region string, forwardingrule *ForwardingRule) *ForwardingRulesInsertCall { - c := &ForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) *HealthChecksInsertCall { + c := &HealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingrule = forwardingrule + c.healthcheck = healthcheck return c } @@ -27131,7 +39564,7 @@ func (r *ForwardingRulesService) Insert(project string, region string, forwardin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesInsertCall) RequestId(requestId string) *ForwardingRulesInsertCall { +func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -27139,7 +39572,7 @@ func (c *ForwardingRulesInsertCall) RequestId(requestId string) *ForwardingRules // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRulesInsertCall { +func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -27147,52 +39580,51 @@ func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRule // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesInsertCall) Context(ctx context.Context) *ForwardingRulesInsertCall { +func (c *HealthChecksInsertCall) Context(ctx context.Context) *HealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesInsertCall) Header() http.Header { +func (c *HealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.insert" call. +// Do executes the "compute.healthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -27223,12 +39655,11 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", + // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.forwardingRules.insert", + // "id": "compute.healthChecks.insert", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "project": { @@ -27238,22 +39669,15 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules", + // "path": "{project}/global/healthChecks", // "request": { - // "$ref": "ForwardingRule" + // "$ref": "HealthCheck" // }, // "response": { // "$ref": "Operation" @@ -27266,25 +39690,22 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.forwardingRules.list": +// method id "compute.healthChecks.list": -type ForwardingRulesListCall struct { +type HealthChecksListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of ForwardingRule resources available to the -// specified project and region. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/list -func (r *ForwardingRulesService) List(project string, region string) *ForwardingRulesListCall { - c := &ForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of HealthCheck resources available to the +// specified project. +func (r *HealthChecksService) List(project string) *HealthChecksListCall { + c := &HealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -27314,7 +39735,7 @@ func (r *ForwardingRulesService) List(project string, region string) *Forwarding // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall { +func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { c.urlParams_.Set("filter", filter) return c } @@ -27325,7 +39746,7 @@ func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesListCall { +func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -27342,7 +39763,7 @@ func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesL // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *ForwardingRulesListCall) OrderBy(orderBy string) *ForwardingRulesListCall { +func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -27350,7 +39771,7 @@ func (c *ForwardingRulesListCall) OrderBy(orderBy string) *ForwardingRulesListCa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesListCall { +func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -27358,7 +39779,7 @@ func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesListCall { +func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -27368,7 +39789,7 @@ func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRulesListCall { +func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -27376,21 +39797,21 @@ func (c *ForwardingRulesListCall) IfNoneMatch(entityTag string) *ForwardingRules // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesListCall) Context(ctx context.Context) *ForwardingRulesListCall { +func (c *HealthChecksListCall) Context(ctx context.Context) *HealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesListCall) Header() http.Header { +func (c *HealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -27401,25 +39822,24 @@ func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.list" call. -// Exactly one of *ForwardingRuleList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ForwardingRuleList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.healthChecks.list" call. +// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HealthCheckList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { +func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -27438,7 +39858,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRuleList{ + ret := &HealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -27450,12 +39870,11 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR } return ret, nil // { - // "description": "Retrieves a list of ForwardingRule resources available to the specified project and region.", + // "description": "Retrieves the list of HealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.forwardingRules.list", + // "id": "compute.healthChecks.list", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { @@ -27487,18 +39906,11 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules", + // "path": "{project}/global/healthChecks", // "response": { - // "$ref": "ForwardingRuleList" + // "$ref": "HealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -27512,7 +39924,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { +func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -27530,27 +39942,26 @@ func (c *ForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingR } } -// method id "compute.forwardingRules.setLabels": +// method id "compute.healthChecks.patch": -type ForwardingRulesSetLabelsCall struct { - s *Service - project string - region string - resource string - regionsetlabelsrequest *RegionSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksPatchCall struct { + s *Service + project string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on the specified resource. To learn more -// about labels, read the Labeling Resources documentation. -func (r *ForwardingRulesService) SetLabels(project string, region string, resource string, regionsetlabelsrequest *RegionSetLabelsRequest) *ForwardingRulesSetLabelsCall { - c := &ForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a HealthCheck resource in the specified project using +// the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +func (r *HealthChecksService) Patch(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksPatchCall { + c := &HealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.regionsetlabelsrequest = regionsetlabelsrequest + c.healthCheck = healthCheck + c.healthcheck = healthcheck return c } @@ -27568,7 +39979,7 @@ func (r *ForwardingRulesService) SetLabels(project string, region string, resour // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesSetLabelsCall) RequestId(requestId string) *ForwardingRulesSetLabelsCall { +func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -27576,7 +39987,7 @@ func (c *ForwardingRulesSetLabelsCall) RequestId(requestId string) *ForwardingRu // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *ForwardingRulesSetLabelsCall { +func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -27584,53 +39995,52 @@ func (c *ForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *ForwardingR // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesSetLabelsCall) Context(ctx context.Context) *ForwardingRulesSetLabelsCall { +func (c *HealthChecksPatchCall) Context(ctx context.Context) *HealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesSetLabelsCall) Header() http.Header { +func (c *HealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionsetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.setLabels" call. +// Do executes the "compute.healthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -27661,26 +40071,25 @@ func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.forwardingRules.setLabels", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.healthChecks.patch", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "healthCheck" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to patch.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "region": { - // "description": "The region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, @@ -27688,18 +40097,11 @@ func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{resource}/setLabels", + // "path": "{project}/global/healthChecks/{healthCheck}", // "request": { - // "$ref": "RegionSetLabelsRequest" + // "$ref": "HealthCheck" // }, // "response": { // "$ref": "Operation" @@ -27712,54 +40114,32 @@ func (c *ForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.forwardingRules.setTarget": +// method id "compute.healthChecks.testIamPermissions": -type ForwardingRulesSetTargetCall struct { - s *Service - project string - region string - forwardingRule string - targetreference *TargetReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTarget: Changes target URL for forwarding rule. The new target -// should be of the same type as the old target. -// For details, see https://cloud.google.com/compute/docs/reference/latest/forwardingRules/setTarget -func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall { - c := &ForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *HealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HealthChecksTestIamPermissionsCall { + c := &HealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.forwardingRule = forwardingRule - c.targetreference = targetreference - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ForwardingRulesSetTargetCall) RequestId(requestId string) *ForwardingRulesSetTargetCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingRulesSetTargetCall { +func (c *HealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HealthChecksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -27767,53 +40147,52 @@ func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingR // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesSetTargetCall) Context(ctx context.Context) *ForwardingRulesSetTargetCall { +func (c *HealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HealthChecksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesSetTargetCall) Header() http.Header { +func (c *HealthChecksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "forwardingRule": c.forwardingRule, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.setTarget" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.healthChecks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -27832,7 +40211,7 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -27844,22 +40223,14 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Changes target URL for forwarding rule. The new target should be of the same type as the old target.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.forwardingRules.setTarget", + // "id": "compute.healthChecks.testIamPermissions", // "parameterOrder": [ // "project", - // "region", - // "forwardingRule" + // "resource" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource in which target is to be set.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -27867,62 +40238,75 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region scoping this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", + // "path": "{project}/global/healthChecks/{resource}/testIamPermissions", // "request": { - // "$ref": "TargetReference" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.forwardingRules.testIamPermissions": +// method id "compute.healthChecks.update": -type ForwardingRulesTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HealthChecksUpdateCall struct { + s *Service + project string + healthCheck string + healthcheck *HealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *ForwardingRulesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *ForwardingRulesTestIamPermissionsCall { - c := &ForwardingRulesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HealthCheck resource in the specified project using +// the data included in the request. +func (r *HealthChecksService) Update(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksUpdateCall { + c := &HealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.healthCheck = healthCheck + c.healthcheck = healthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HealthChecksUpdateCall) RequestId(requestId string) *HealthChecksUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ForwardingRulesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ForwardingRulesTestIamPermissionsCall { +func (c *HealthChecksUpdateCall) Fields(s ...googleapi.Field) *HealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -27930,53 +40314,52 @@ func (c *ForwardingRulesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Fo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ForwardingRulesTestIamPermissionsCall) Context(ctx context.Context) *ForwardingRulesTestIamPermissionsCall { +func (c *HealthChecksUpdateCall) Context(ctx context.Context) *HealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ForwardingRulesTestIamPermissionsCall) Header() http.Header { +func (c *HealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "healthCheck": c.healthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.forwardingRules.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.healthChecks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -27995,7 +40378,7 @@ func (c *ForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -28007,70 +40390,66 @@ func (c *ForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.forwardingRules.testIamPermissions", + // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.healthChecks.update", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "healthCheck" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "healthCheck": { + // "description": "Name of the HealthCheck resource to update.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/forwardingRules/{resource}/testIamPermissions", + // "path": "{project}/global/healthChecks/{healthCheck}", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "HealthCheck" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.globalAddresses.delete": +// method id "compute.httpHealthChecks.delete": -type GlobalAddressesDeleteCall struct { - s *Service - project string - address string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksDeleteCall struct { + s *Service + project string + httpHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified address resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/delete -func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall { - c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HttpHealthCheck resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/delete +func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { + c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.address = address + c.httpHealthCheck = httpHealthCheck return c } @@ -28088,7 +40467,7 @@ func (r *GlobalAddressesService) Delete(project string, address string) *GlobalA // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddressesDeleteCall { +func (c *HttpHealthChecksDeleteCall) RequestId(requestId string) *HttpHealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -28096,7 +40475,7 @@ func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddresses // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall { +func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -28104,21 +40483,21 @@ func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddresse // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddressesDeleteCall { +func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesDeleteCall) Header() http.Header { +func (c *HttpHealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -28126,25 +40505,25 @@ func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "address": c.address, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.delete" call. +// Do executes the "compute.httpHealthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -28175,16 +40554,16 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Deletes the specified address resource.", + // "description": "Deletes the specified HttpHealthCheck resource.", // "httpMethod": "DELETE", - // "id": "compute.globalAddresses.delete", + // "id": "compute.httpHealthChecks.delete", // "parameterOrder": [ // "project", - // "address" + // "httpHealthCheck" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to delete.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -28203,7 +40582,7 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/global/addresses/{address}", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "response": { // "$ref": "Operation" // }, @@ -28215,32 +40594,32 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.globalAddresses.get": +// method id "compute.httpHealthChecks.get": -type GlobalAddressesGetCall struct { - s *Service - project string - address string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HttpHealthChecksGetCall struct { + s *Service + project string + httpHealthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified address resource. Get a list of available -// addresses by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/get -func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall { - c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HttpHealthCheck resource. Get a list of +// available HTTP health checks by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/get +func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { + c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.address = address + c.httpHealthCheck = httpHealthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall { +func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -28250,7 +40629,7 @@ func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGe // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesGetCall { +func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthChecksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -28258,21 +40637,21 @@ func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGetCall { +func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesGetCall) Header() http.Header { +func (c *HttpHealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -28283,25 +40662,25 @@ func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "address": c.address, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.get" call. -// Exactly one of *Address or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Address.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { +// Do executes the "compute.httpHealthChecks.get" call. +// Exactly one of *HttpHealthCheck or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HttpHealthCheck.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -28320,7 +40699,7 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Address{ + ret := &HttpHealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -28332,16 +40711,16 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err } return ret, nil // { - // "description": "Returns the specified address resource. Get a list of available addresses by making a list() request.", + // "description": "Returns the specified HttpHealthCheck resource. Get a list of available HTTP health checks by making a list() request.", // "httpMethod": "GET", - // "id": "compute.globalAddresses.get", + // "id": "compute.httpHealthChecks.get", // "parameterOrder": [ // "project", - // "address" + // "httpHealthCheck" // ], // "parameters": { - // "address": { - // "description": "Name of the address resource to return.", + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -28355,9 +40734,9 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err // "type": "string" // } // }, - // "path": "{project}/global/addresses/{address}", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "response": { - // "$ref": "Address" + // "$ref": "HttpHealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -28368,24 +40747,24 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err } -// method id "compute.globalAddresses.insert": +// method id "compute.httpHealthChecks.insert": -type GlobalAddressesInsertCall struct { - s *Service - project string - address *Address - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksInsertCall struct { + s *Service + project string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an address resource in the specified project using -// the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/insert -func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall { - c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HttpHealthCheck resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/insert +func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { + c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.address = address + c.httphealthcheck = httphealthcheck return c } @@ -28403,7 +40782,7 @@ func (r *GlobalAddressesService) Insert(project string, address *Address) *Globa // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddressesInsertCall { +func (c *HttpHealthChecksInsertCall) RequestId(requestId string) *HttpHealthChecksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -28411,7 +40790,7 @@ func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddresses // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall { +func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -28419,34 +40798,34 @@ func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddresse // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddressesInsertCall { +func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesInsertCall) Header() http.Header { +func (c *HttpHealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -28456,14 +40835,14 @@ func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.insert" call. +// Do executes the "compute.httpHealthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -28494,9 +40873,9 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates an address resource in the specified project using the data included in the request.", + // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.globalAddresses.insert", + // "id": "compute.httpHealthChecks.insert", // "parameterOrder": [ // "project" // ], @@ -28514,9 +40893,9 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/global/addresses", + // "path": "{project}/global/httpHealthChecks", // "request": { - // "$ref": "Address" + // "$ref": "HttpHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -28529,9 +40908,9 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.globalAddresses.list": +// method id "compute.httpHealthChecks.list": -type GlobalAddressesListCall struct { +type HttpHealthChecksListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -28540,10 +40919,11 @@ type GlobalAddressesListCall struct { header_ http.Header } -// List: Retrieves a list of global addresses. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalAddresses/list -func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { - c := &GlobalAddressesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of HttpHealthCheck resources available to +// the specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/list +func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { + c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -28574,7 +40954,7 @@ func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall { +func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { c.urlParams_.Set("filter", filter) return c } @@ -28585,7 +40965,7 @@ func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesListCall { +func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -28602,7 +40982,7 @@ func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesL // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *GlobalAddressesListCall) OrderBy(orderBy string) *GlobalAddressesListCall { +func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -28610,7 +40990,7 @@ func (c *GlobalAddressesListCall) OrderBy(orderBy string) *GlobalAddressesListCa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesListCall { +func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -28618,7 +40998,7 @@ func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesListCall { +func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -28628,7 +41008,7 @@ func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesL // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalAddressesListCall) IfNoneMatch(entityTag string) *GlobalAddressesListCall { +func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChecksListCall { c.ifNoneMatch_ = entityTag return c } @@ -28636,21 +41016,21 @@ func (c *GlobalAddressesListCall) IfNoneMatch(entityTag string) *GlobalAddresses // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesListCall) Context(ctx context.Context) *GlobalAddressesListCall { +func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesListCall) Header() http.Header { +func (c *HttpHealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -28661,7 +41041,7 @@ func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -28671,14 +41051,14 @@ func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.list" call. -// Exactly one of *AddressList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *AddressList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, error) { +// Do executes the "compute.httpHealthChecks.list" call. +// Exactly one of *HttpHealthCheckList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpHealthCheckList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -28697,7 +41077,7 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &AddressList{ + ret := &HttpHealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -28709,9 +41089,9 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList } return ret, nil // { - // "description": "Retrieves a list of global addresses.", + // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.globalAddresses.list", + // "id": "compute.httpHealthChecks.list", // "parameterOrder": [ // "project" // ], @@ -28747,9 +41127,9 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList // "type": "string" // } // }, - // "path": "{project}/global/addresses", + // "path": "{project}/global/httpHealthChecks", // "response": { - // "$ref": "AddressList" + // "$ref": "HttpHealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -28763,7 +41143,7 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList) error) error { +func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealthCheckList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -28781,32 +41161,205 @@ func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList } } -// method id "compute.globalAddresses.setLabels": +// method id "compute.httpHealthChecks.patch": -type GlobalAddressesSetLabelsCall struct { +type HttpHealthChecksPatchCall struct { + s *Service + project string + httpHealthCheck string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a HttpHealthCheck resource in the specified project +// using the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/patch +func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { + c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpHealthChecksPatchCall) RequestId(requestId string) *HttpHealthChecksPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChecksPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *HttpHealthChecksPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.httpHealthChecks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.httpHealthChecks.patch", + // "parameterOrder": [ + // "project", + // "httpHealthCheck" + // ], + // "parameters": { + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "request": { + // "$ref": "HttpHealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.httpHealthChecks.testIamPermissions": + +type HttpHealthChecksTestIamPermissionsCall struct { s *Service project string resource string - globalsetlabelsrequest *GlobalSetLabelsRequest + testpermissionsrequest *TestPermissionsRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetLabels: Sets the labels on a GlobalAddress. To learn more about -// labels, read the Labeling Resources documentation. -func (r *GlobalAddressesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalAddressesSetLabelsCall { - c := &GlobalAddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *HttpHealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HttpHealthChecksTestIamPermissionsCall { + c := &HttpHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalAddressesSetLabelsCall { +func (c *HttpHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HttpHealthChecksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -28814,34 +41367,34 @@ func (c *GlobalAddressesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalAddre // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesSetLabelsCall) Context(ctx context.Context) *GlobalAddressesSetLabelsCall { +func (c *HttpHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HttpHealthChecksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesSetLabelsCall) Header() http.Header { +func (c *HttpHealthChecksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -28852,14 +41405,14 @@ func (c *GlobalAddressesSetLabelsCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.httpHealthChecks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -28878,7 +41431,7 @@ func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -28890,9 +41443,9 @@ func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.globalAddresses.setLabels", + // "id": "compute.httpHealthChecks.testIamPermissions", // "parameterOrder": [ // "project", // "resource" @@ -28908,52 +41461,73 @@ func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/addresses/{resource}/setLabels", + // "path": "{project}/global/httpHealthChecks/{resource}/testIamPermissions", // "request": { - // "$ref": "GlobalSetLabelsRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.globalAddresses.testIamPermissions": +// method id "compute.httpHealthChecks.update": -type GlobalAddressesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpHealthChecksUpdateCall struct { + s *Service + project string + httpHealthCheck string + httphealthcheck *HttpHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *GlobalAddressesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *GlobalAddressesTestIamPermissionsCall { - c := &GlobalAddressesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HttpHealthCheck resource in the specified project +// using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/update +func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { + c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.httpHealthCheck = httpHealthCheck + c.httphealthcheck = httphealthcheck + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpHealthChecksUpdateCall) RequestId(requestId string) *HttpHealthChecksUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesTestIamPermissionsCall) Fields(s ...googleapi.Field) *GlobalAddressesTestIamPermissionsCall { +func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -28961,52 +41535,52 @@ func (c *GlobalAddressesTestIamPermissionsCall) Fields(s ...googleapi.Field) *Gl // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesTestIamPermissionsCall) Context(ctx context.Context) *GlobalAddressesTestIamPermissionsCall { +func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesTestIamPermissionsCall) Header() http.Header { +func (c *HttpHealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "httpHealthCheck": c.httpHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalAddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.httpHealthChecks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29025,7 +41599,7 @@ func (c *GlobalAddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -29037,14 +41611,21 @@ func (c *GlobalAddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.globalAddresses.testIamPermissions", + // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.httpHealthChecks.update", // "parameterOrder": [ // "project", - // "resource" + // "httpHealthCheck" // ], // "parameters": { + // "httpHealthCheck": { + // "description": "Name of the HttpHealthCheck resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -29052,47 +41633,43 @@ func (c *GlobalAddressesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/global/addresses/{resource}/testIamPermissions", + // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "HttpHealthCheck" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.globalForwardingRules.delete": +// method id "compute.httpsHealthChecks.delete": -type GlobalForwardingRulesDeleteCall struct { - s *Service - project string - forwardingRule string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksDeleteCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified GlobalForwardingRule resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/delete -func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall { - c := &GlobalForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified HttpsHealthCheck resource. +func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { + c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule + c.httpsHealthCheck = httpsHealthCheck return c } @@ -29110,7 +41687,7 @@ func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesDeleteCall) RequestId(requestId string) *GlobalForwardingRulesDeleteCall { +func (c *HttpsHealthChecksDeleteCall) RequestId(requestId string) *HttpsHealthChecksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -29118,7 +41695,7 @@ func (c *GlobalForwardingRulesDeleteCall) RequestId(requestId string) *GlobalFor // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesDeleteCall { +func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthChecksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -29126,21 +41703,21 @@ func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalFo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesDeleteCall) Context(ctx context.Context) *GlobalForwardingRulesDeleteCall { +func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthChecksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { +func (c *HttpsHealthChecksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -29148,25 +41725,25 @@ func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.delete" call. +// Do executes the "compute.httpsHealthChecks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29197,16 +41774,16 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified GlobalForwardingRule resource.", + // "description": "Deletes the specified HttpsHealthCheck resource.", // "httpMethod": "DELETE", - // "id": "compute.globalForwardingRules.delete", + // "id": "compute.httpsHealthChecks.delete", // "parameterOrder": [ // "project", - // "forwardingRule" + // "httpsHealthCheck" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to delete.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -29225,7 +41802,7 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{forwardingRule}", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "response": { // "$ref": "Operation" // }, @@ -29237,32 +41814,31 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.globalForwardingRules.get": +// method id "compute.httpsHealthChecks.get": -type GlobalForwardingRulesGetCall struct { - s *Service - project string - forwardingRule string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksGetCall struct { + s *Service + project string + httpsHealthCheck string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified GlobalForwardingRule resource. Get a list -// of available forwarding rules by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/get -func (r *GlobalForwardingRulesService) Get(project string, forwardingRule string) *GlobalForwardingRulesGetCall { - c := &GlobalForwardingRulesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified HttpsHealthCheck resource. Get a list of +// available HTTPS health checks by making a list() request. +func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { + c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule + c.httpsHealthCheck = httpsHealthCheck return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesGetCall { +func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChecksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -29272,7 +41848,7 @@ func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwa // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *GlobalForwardingRulesGetCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesGetCall { +func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChecksGetCall { c.ifNoneMatch_ = entityTag return c } @@ -29280,21 +41856,21 @@ func (c *GlobalForwardingRulesGetCall) IfNoneMatch(entityTag string) *GlobalForw // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesGetCall) Context(ctx context.Context) *GlobalForwardingRulesGetCall { +func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChecksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesGetCall) Header() http.Header { +func (c *HttpsHealthChecksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -29305,25 +41881,25 @@ func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.get" call. -// Exactly one of *ForwardingRule or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ForwardingRule.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.httpsHealthChecks.get" call. +// Exactly one of *HttpsHealthCheck or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpsHealthCheck.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRule, error) { +func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheck, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29342,7 +41918,7 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ForwardingRule{ + ret := &HttpsHealthCheck{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -29354,16 +41930,16 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar } return ret, nil // { - // "description": "Returns the specified GlobalForwardingRule resource. Get a list of available forwarding rules by making a list() request.", + // "description": "Returns the specified HttpsHealthCheck resource. Get a list of available HTTPS health checks by making a list() request.", // "httpMethod": "GET", - // "id": "compute.globalForwardingRules.get", + // "id": "compute.httpsHealthChecks.get", // "parameterOrder": [ // "project", - // "forwardingRule" + // "httpsHealthCheck" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource to return.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -29377,9 +41953,9 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{forwardingRule}", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "response": { - // "$ref": "ForwardingRule" + // "$ref": "HttpsHealthCheck" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -29390,24 +41966,23 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar } -// method id "compute.globalForwardingRules.insert": +// method id "compute.httpsHealthChecks.insert": -type GlobalForwardingRulesInsertCall struct { - s *Service - project string - forwardingrule *ForwardingRule - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksInsertCall struct { + s *Service + project string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a GlobalForwardingRule resource in the specified -// project using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/insert -func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *ForwardingRule) *GlobalForwardingRulesInsertCall { - c := &GlobalForwardingRulesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a HttpsHealthCheck resource in the specified project +// using the data included in the request. +func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { + c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingrule = forwardingrule + c.httpshealthcheck = httpshealthcheck return c } @@ -29425,7 +42000,7 @@ func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *Fo // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesInsertCall) RequestId(requestId string) *GlobalForwardingRulesInsertCall { +func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthChecksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -29433,7 +42008,7 @@ func (c *GlobalForwardingRulesInsertCall) RequestId(requestId string) *GlobalFor // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesInsertCall { +func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthChecksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -29441,34 +42016,34 @@ func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalFo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesInsertCall) Context(ctx context.Context) *GlobalForwardingRulesInsertCall { +func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthChecksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesInsertCall) Header() http.Header { +func (c *HttpsHealthChecksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -29478,14 +42053,14 @@ func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.insert" call. +// Do executes the "compute.httpsHealthChecks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29516,9 +42091,9 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a GlobalForwardingRule resource in the specified project using the data included in the request.", + // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.insert", + // "id": "compute.httpsHealthChecks.insert", // "parameterOrder": [ // "project" // ], @@ -29536,9 +42111,9 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules", + // "path": "{project}/global/httpsHealthChecks", // "request": { - // "$ref": "ForwardingRule" + // "$ref": "HttpsHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -29551,9 +42126,9 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.globalForwardingRules.list": +// method id "compute.httpsHealthChecks.list": -type GlobalForwardingRulesListCall struct { +type HttpsHealthChecksListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -29562,11 +42137,10 @@ type GlobalForwardingRulesListCall struct { header_ http.Header } -// List: Retrieves a list of GlobalForwardingRule resources available to +// List: Retrieves the list of HttpsHealthCheck resources available to // the specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/list -func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRulesListCall { - c := &GlobalForwardingRulesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { + c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -29597,7 +42171,7 @@ func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRul // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall { +func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { c.urlParams_.Set("filter", filter) return c } @@ -29608,7 +42182,7 @@ func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingR // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForwardingRulesListCall { +func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChecksListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -29625,264 +42199,83 @@ func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForw // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *GlobalForwardingRulesListCall) OrderBy(orderBy string) *GlobalForwardingRulesListCall { +func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksListCall { c.urlParams_.Set("orderBy", orderBy) return c } -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwardingRulesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalForwardingRulesListCall) IfNoneMatch(entityTag string) *GlobalForwardingRulesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalForwardingRulesListCall) Context(ctx context.Context) *GlobalForwardingRulesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalForwardingRulesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalForwardingRules.list" call. -// Exactly one of *ForwardingRuleList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ForwardingRuleList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingRuleList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ForwardingRuleList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of GlobalForwardingRule resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.globalForwardingRules.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/forwardingRules", - // "response": { - // "$ref": "ForwardingRuleList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalForwardingRulesListCall) Pages(ctx context.Context, f func(*ForwardingRuleList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.globalForwardingRules.setLabels": - -type GlobalForwardingRulesSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetLabels: Sets the labels on the specified resource. To learn more -// about labels, read the Labeling Resources documentation. -func (r *GlobalForwardingRulesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalForwardingRulesSetLabelsCall { - c := &GlobalForwardingRulesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest - return c -} - +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChecksListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetLabelsCall { +func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChecksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthChecksListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesSetLabelsCall) Context(ctx context.Context) *GlobalForwardingRulesSetLabelsCall { +func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChecksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesSetLabelsCall) Header() http.Header { +func (c *HttpsHealthChecksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.httpsHealthChecks.list" call. +// Exactly one of *HttpsHealthCheckList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HttpsHealthCheckList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheckList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -29901,7 +42294,7 @@ func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &HttpsHealthCheckList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -29913,64 +42306,98 @@ func (c *GlobalForwardingRulesSetLabelsCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Sets the labels on the specified resource. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.setLabels", + // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.httpsHealthChecks.list", // "parameterOrder": [ - // "project", - // "resource" + // "project" // ], // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{resource}/setLabels", - // "request": { - // "$ref": "GlobalSetLabelsRequest" - // }, + // "path": "{project}/global/httpsHealthChecks", // "response": { - // "$ref": "Operation" + // "$ref": "HttpsHealthCheckList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.globalForwardingRules.setTarget": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHealthCheckList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type GlobalForwardingRulesSetTargetCall struct { - s *Service - project string - forwardingRule string - targetreference *TargetReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.httpsHealthChecks.patch": + +type HttpsHealthChecksPatchCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTarget: Changes target URL for the GlobalForwardingRule resource. -// The new target should be of the same type as the old target. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules/setTarget -func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall { - c := &GlobalForwardingRulesSetTargetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. This method supports PATCH +// semantics and uses the JSON merge patch format and processing rules. +func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { + c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule - c.targetreference = targetreference + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck return c } @@ -29988,7 +42415,7 @@ func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesSetTargetCall) RequestId(requestId string) *GlobalForwardingRulesSetTargetCall { +func (c *HttpsHealthChecksPatchCall) RequestId(requestId string) *HttpsHealthChecksPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -29996,7 +42423,7 @@ func (c *GlobalForwardingRulesSetTargetCall) RequestId(requestId string) *Global // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetTargetCall { +func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthChecksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -30004,52 +42431,52 @@ func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *Globa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesSetTargetCall) Context(ctx context.Context) *GlobalForwardingRulesSetTargetCall { +func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthChecksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesSetTargetCall) Header() http.Header { +func (c *HttpsHealthChecksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}/setTarget") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.setTarget" call. +// Do executes the "compute.httpsHealthChecks.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30080,16 +42507,16 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Changes target URL for the GlobalForwardingRule resource. The new target should be of the same type as the old target.", - // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.setTarget", + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.httpsHealthChecks.patch", // "parameterOrder": [ // "project", - // "forwardingRule" + // "httpsHealthCheck" // ], // "parameters": { - // "forwardingRule": { - // "description": "Name of the ForwardingRule resource in which target is to be set.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -30108,9 +42535,9 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "request": { - // "$ref": "TargetReference" + // "$ref": "HttpsHealthCheck" // }, // "response": { // "$ref": "Operation" @@ -30123,9 +42550,9 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.globalForwardingRules.testIamPermissions": +// method id "compute.httpsHealthChecks.testIamPermissions": -type GlobalForwardingRulesTestIamPermissionsCall struct { +type HttpsHealthChecksTestIamPermissionsCall struct { s *Service project string resource string @@ -30137,8 +42564,8 @@ type GlobalForwardingRulesTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *GlobalForwardingRulesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *GlobalForwardingRulesTestIamPermissionsCall { - c := &GlobalForwardingRulesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *HttpsHealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HttpsHealthChecksTestIamPermissionsCall { + c := &HttpsHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource c.testpermissionsrequest = testpermissionsrequest @@ -30148,7 +42575,7 @@ func (r *GlobalForwardingRulesService) TestIamPermissions(project string, resour // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesTestIamPermissionsCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesTestIamPermissionsCall { +func (c *HttpsHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HttpsHealthChecksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -30156,21 +42583,21 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) Fields(s ...googleapi.Fiel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesTestIamPermissionsCall) Context(ctx context.Context) *GlobalForwardingRulesTestIamPermissionsCall { +func (c *HttpsHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HttpsHealthChecksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesTestIamPermissionsCall) Header() http.Header { +func (c *HttpsHealthChecksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -30183,7 +42610,7 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*ht } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -30194,14 +42621,14 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) doRequest(alt string) (*ht return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.testIamPermissions" call. +// Do executes the "compute.httpsHealthChecks.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30234,7 +42661,7 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallO // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.globalForwardingRules.testIamPermissions", + // "id": "compute.httpsHealthChecks.testIamPermissions", // "parameterOrder": [ // "project", // "resource" @@ -30250,12 +42677,12 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallO // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/forwardingRules/{resource}/testIamPermissions", + // "path": "{project}/global/httpsHealthChecks/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -30271,282 +42698,51 @@ func (c *GlobalForwardingRulesTestIamPermissionsCall) Do(opts ...googleapi.CallO } -// method id "compute.globalOperations.aggregatedList": +// method id "compute.httpsHealthChecks.update": -type GlobalOperationsAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type HttpsHealthChecksUpdateCall struct { + s *Service + project string + httpsHealthCheck string + httpshealthcheck *HttpsHealthCheck + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of all operations. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/aggregatedList -func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall { - c := &GlobalOperationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a HttpsHealthCheck resource in the specified project +// using the data included in the request. +func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { + c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.httpsHealthCheck = httpsHealthCheck + c.httpshealthcheck = httpshealthcheck return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *GlobalOperationsAggregatedListCall) OrderBy(orderBy string) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *GlobalOperationsAggregatedListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOperationsAggregatedListCall) IfNoneMatch(entityTag string) *GlobalOperationsAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *GlobalOperationsAggregatedListCall) Context(ctx context.Context) *GlobalOperationsAggregatedListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *GlobalOperationsAggregatedListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.globalOperations.aggregatedList" call. -// Exactly one of *OperationAggregatedList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *OperationAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*OperationAggregatedList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &OperationAggregatedList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an aggregated list of all operations.", - // "httpMethod": "GET", - // "id": "compute.globalOperations.aggregatedList", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/aggregated/operations", - // "response": { - // "$ref": "OperationAggregatedList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalOperationsAggregatedListCall) Pages(ctx context.Context, f func(*OperationAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.globalOperations.delete": - -type GlobalOperationsDeleteCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes the specified Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/delete -func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall { - c := &GlobalOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.operation = operation +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpsHealthChecksUpdateCall) RequestId(requestId string) *HttpsHealthChecksUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperationsDeleteCall { +func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthChecksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -30554,62 +42750,92 @@ func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperati // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsDeleteCall) Context(ctx context.Context) *GlobalOperationsDeleteCall { +func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthChecksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsDeleteCall) Header() http.Header { +func (c *HttpsHealthChecksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "operation": c.operation, + "project": c.project, + "httpsHealthCheck": c.httpsHealthCheck, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.delete" call. -func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "compute.httpsHealthChecks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return nil, err } - return nil + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Deletes the specified Operations resource.", - // "httpMethod": "DELETE", - // "id": "compute.globalOperations.delete", + // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.httpsHealthChecks.update", // "parameterOrder": [ // "project", - // "operation" + // "httpsHealthCheck" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to delete.", + // "httpsHealthCheck": { + // "description": "Name of the HttpsHealthCheck resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -30621,9 +42847,20 @@ func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/operations/{operation}", + // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "request": { + // "$ref": "HttpsHealthCheck" + // }, + // "response": { + // "$ref": "Operation" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute" @@ -30632,93 +42869,97 @@ func (c *GlobalOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { } -// method id "compute.globalOperations.get": +// method id "compute.images.delete": -type GlobalOperationsGetCall struct { - s *Service - project string - operation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ImagesDeleteCall struct { + s *Service + project string + image string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Retrieves the specified Operations resource. Get a list of -// operations by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/get -func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall { - c := &GlobalOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified image. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/delete +func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { + c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.operation = operation + c.image = image + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ImagesDeleteCall) RequestId(requestId string) *ImagesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOperationsGetCall { +func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOperationsGetCall) IfNoneMatch(entityTag string) *GlobalOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsGetCall) Context(ctx context.Context) *GlobalOperationsGetCall { +func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsGetCall) Header() http.Header { +func (c *ImagesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "operation": c.operation, + "project": c.project, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.get" call. +// Do executes the "compute.images.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30749,16 +42990,16 @@ func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Retrieves the specified Operations resource. Get a list of operations by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.globalOperations.get", + // "description": "Deletes the specified image.", + // "httpMethod": "DELETE", + // "id": "compute.images.delete", // "parameterOrder": [ // "project", - // "operation" + // "image" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", + // "image": { + // "description": "Name of the image resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -30770,172 +43011,126 @@ func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/operations/{operation}", + // "path": "{project}/global/images/{image}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.globalOperations.list": - -type GlobalOperationsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} +// method id "compute.images.deprecate": -// List: Retrieves a list of Operation resources contained within the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/globalOperations/list -func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall { - c := &GlobalOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c +type ImagesDeprecateCall struct { + s *Service + project string + image string + deprecationstatus *DeprecationStatus + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// Deprecate: Sets the deprecation status of an image. // -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperationsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// If an empty request body is given, clears the deprecation status +// instead. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/deprecate +func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { + c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.image = image + c.deprecationstatus = deprecationstatus return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *GlobalOperationsListCall) OrderBy(orderBy string) *GlobalOperationsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperationsListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperationsListCall { +func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *GlobalOperationsListCall) IfNoneMatch(entityTag string) *GlobalOperationsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalOperationsListCall) Context(ctx context.Context) *GlobalOperationsListCall { +func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalOperationsListCall) Header() http.Header { +func (c *ImagesDeprecateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalOperations.list" call. -// Exactly one of *OperationList or error will be non-nil. Any non-2xx +// Do executes the "compute.images.deprecate" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *OperationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -30954,7 +43149,7 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &OperationList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -30966,34 +43161,19 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL } return ret, nil // { - // "description": "Retrieves a list of Operation resources contained within the specified project.", - // "httpMethod": "GET", - // "id": "compute.globalOperations.list", + // "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", + // "httpMethod": "POST", + // "id": "compute.images.deprecate", // "parameterOrder": [ - // "project" + // "project", + // "image" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "image": { + // "description": "Image name.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -31002,132 +43182,115 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/operations", + // "path": "{project}/global/images/{image}/deprecate", + // "request": { + // "$ref": "DeprecationStatus" + // }, // "response": { - // "$ref": "OperationList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *GlobalOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.healthChecks.delete": +// method id "compute.images.get": -type HealthChecksDeleteCall struct { - s *Service - project string - healthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesGetCall struct { + s *Service + project string + image string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HealthCheck resource. -func (r *HealthChecksService) Delete(project string, healthCheck string) *HealthChecksDeleteCall { - c := &HealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified image. Get a list of available images by +// making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/get +func (r *ImagesService) Get(project string, image string) *ImagesGetCall { + c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.image = image return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksDeleteCall) Fields(s ...googleapi.Field) *HealthChecksDeleteCall { +func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksDeleteCall) Context(ctx context.Context) *HealthChecksDeleteCall { +func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksDeleteCall) Header() http.Header { +func (c *ImagesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.images.get" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Image.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -31146,7 +43309,7 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Image{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -31157,17 +43320,17 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e return nil, err } return ret, nil - // { - // "description": "Deletes the specified HealthCheck resource.", - // "httpMethod": "DELETE", - // "id": "compute.healthChecks.delete", + // { + // "description": "Returns the specified image. Get a list of available images by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.images.get", // "parameterOrder": [ // "project", - // "healthCheck" + // "image" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to delete.", + // "image": { + // "description": "Name of the image resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -31179,50 +43342,46 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", + // "path": "{project}/global/images/{image}", // "response": { - // "$ref": "Operation" + // "$ref": "Image" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.healthChecks.get": +// method id "compute.images.getFromFamily": -type HealthChecksGetCall struct { +type ImagesGetFromFamilyCall struct { s *Service project string - healthCheck string + family string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified HealthCheck resource. Get a list of -// available health checks by making a list() request. -func (r *HealthChecksService) Get(project string, healthCheck string) *HealthChecksGetCall { - c := &HealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetFromFamily: Returns the latest image that is part of an image +// family and is not deprecated. +func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { + c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck + c.family = family return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall { +func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFamilyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -31232,7 +43391,7 @@ func (c *HealthChecksGetCall) Fields(s ...googleapi.Field) *HealthChecksGetCall // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall { +func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFamilyCall { c.ifNoneMatch_ = entityTag return c } @@ -31240,21 +43399,21 @@ func (c *HealthChecksGetCall) IfNoneMatch(entityTag string) *HealthChecksGetCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksGetCall) Context(ctx context.Context) *HealthChecksGetCall { +func (c *ImagesGetFromFamilyCall) Context(ctx context.Context) *ImagesGetFromFamilyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksGetCall) Header() http.Header { +func (c *ImagesGetFromFamilyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -31265,25 +43424,25 @@ func (c *HealthChecksGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "family": c.family, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.get" call. -// Exactly one of *HealthCheck or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HealthCheck.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, error) { +// Do executes the "compute.images.getFromFamily" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Image.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -31302,7 +43461,7 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthCheck{ + ret := &Image{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -31314,16 +43473,16 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er } return ret, nil // { - // "description": "Returns the specified HealthCheck resource. Get a list of available health checks by making a list() request.", + // "description": "Returns the latest image that is part of an image family and is not deprecated.", // "httpMethod": "GET", - // "id": "compute.healthChecks.get", + // "id": "compute.images.getFromFamily", // "parameterOrder": [ // "project", - // "healthCheck" + // "family" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to return.", + // "family": { + // "description": "Name of the image family to search for.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -31337,9 +43496,9 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", + // "path": "{project}/global/images/family/{family}", // "response": { - // "$ref": "HealthCheck" + // "$ref": "Image" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -31350,23 +43509,31 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er } -// method id "compute.healthChecks.insert": +// method id "compute.images.insert": -type HealthChecksInsertCall struct { - s *Service - project string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ImagesInsertCall struct { + s *Service + project string + image *Image + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HealthCheck resource in the specified project using -// the data included in the request. -func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) *HealthChecksInsertCall { - c := &HealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an image in the specified project using the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/insert +func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { + c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthcheck = healthcheck + c.image = image + return c +} + +// ForceCreate sets the optional parameter "forceCreate": Force image +// creation if true. +func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { + c.urlParams_.Set("forceCreate", fmt.Sprint(forceCreate)) return c } @@ -31384,7 +43551,7 @@ func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsertCall { +func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -31392,7 +43559,7 @@ func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsert // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInsertCall { +func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -31400,34 +43567,34 @@ func (c *HealthChecksInsertCall) Fields(s ...googleapi.Field) *HealthChecksInser // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksInsertCall) Context(ctx context.Context) *HealthChecksInsertCall { +func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksInsertCall) Header() http.Header { +func (c *ImagesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -31437,14 +43604,14 @@ func (c *HealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.insert" call. +// Do executes the "compute.images.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -31475,13 +43642,18 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Creates a HealthCheck resource in the specified project using the data included in the request.", + // "description": "Creates an image in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.healthChecks.insert", + // "id": "compute.images.insert", // "parameterOrder": [ // "project" // ], // "parameters": { + // "forceCreate": { + // "description": "Force image creation if true.", + // "location": "query", + // "type": "boolean" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -31495,24 +43667,27 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/global/healthChecks", + // "path": "{project}/global/images", // "request": { - // "$ref": "HealthCheck" + // "$ref": "Image" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "compute.healthChecks.list": +// method id "compute.images.list": -type HealthChecksListCall struct { +type ImagesListCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -31521,10 +43696,16 @@ type HealthChecksListCall struct { header_ http.Header } -// List: Retrieves the list of HealthCheck resources available to the -// specified project. -func (r *HealthChecksService) List(project string) *HealthChecksListCall { - c := &HealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of private images available to the specified +// project. Private images are images you create that belong to your +// project. This method does not get any images that belong to other +// projects, including publicly-available images, like Debian 8. If you +// want to get a list of publicly-available images, use this method to +// make a request to the respective image project, such as debian-cloud +// or windows-cloud. +// For details, see https://cloud.google.com/compute/docs/reference/latest/images/list +func (r *ImagesService) List(project string) *ImagesListCall { + c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } @@ -31555,7 +43736,7 @@ func (r *HealthChecksService) List(project string) *HealthChecksListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { +func (c *ImagesListCall) Filter(filter string) *ImagesListCall { c.urlParams_.Set("filter", filter) return c } @@ -31566,7 +43747,7 @@ func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCall { +func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -31583,7 +43764,7 @@ func (c *HealthChecksListCall) MaxResults(maxResults int64) *HealthChecksListCal // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { +func (c *ImagesListCall) OrderBy(orderBy string) *ImagesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -31591,7 +43772,7 @@ func (c *HealthChecksListCall) OrderBy(orderBy string) *HealthChecksListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall { +func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -31599,7 +43780,7 @@ func (c *HealthChecksListCall) PageToken(pageToken string) *HealthChecksListCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCall { +func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -31609,7 +43790,7 @@ func (c *HealthChecksListCall) Fields(s ...googleapi.Field) *HealthChecksListCal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCall { +func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { c.ifNoneMatch_ = entityTag return c } @@ -31617,21 +43798,21 @@ func (c *HealthChecksListCall) IfNoneMatch(entityTag string) *HealthChecksListCa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksListCall) Context(ctx context.Context) *HealthChecksListCall { +func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksListCall) Header() http.Header { +func (c *ImagesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -31642,7 +43823,7 @@ func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -31652,14 +43833,14 @@ func (c *HealthChecksListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.list" call. -// Exactly one of *HealthCheckList or error will be non-nil. Any non-2xx +// Do executes the "compute.images.list" call. +// Exactly one of *ImageList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *HealthCheckList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckList, error) { +// *ImageList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -31678,7 +43859,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HealthCheckList{ + ret := &ImageList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -31690,9 +43871,9 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis } return ret, nil // { - // "description": "Retrieves the list of HealthCheck resources available to the specified project.", + // "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", // "httpMethod": "GET", - // "id": "compute.healthChecks.list", + // "id": "compute.images.list", // "parameterOrder": [ // "project" // ], @@ -31728,9 +43909,325 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // "type": "string" // } // }, - // "path": "{project}/global/healthChecks", + // "path": "{project}/global/images", + // "response": { + // "$ref": "ImageList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.images.setLabels": + +type ImagesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLabels: Sets the labels on an image. To learn more about labels, +// read the Labeling Resources documentation. +func (r *ImagesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ImagesSetLabelsCall { + c := &ImagesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesSetLabelsCall) Fields(s ...googleapi.Field) *ImagesSetLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesSetLabelsCall) Context(ctx context.Context) *ImagesSetLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ImagesSetLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setLabels") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.images.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.images.setLabels", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.images.testIamPermissions": + +type ImagesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *ImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ImagesTestIamPermissionsCall { + c := &ImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesTestIamPermissionsCall) Context(ctx context.Context) *ImagesTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ImagesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.images.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.images.testIamPermissions", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "HealthCheckList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -31741,47 +44238,42 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *HealthChecksListCall) Pages(ctx context.Context, f func(*HealthCheckList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.healthChecks.patch": +// method id "compute.instanceGroupManagers.abandonInstances": -type HealthChecksPatchCall struct { - s *Service - project string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersAbandonInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HealthCheck resource in the specified project using -// the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. -func (r *HealthChecksService) Patch(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksPatchCall { - c := &HealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AbandonInstances: Schedules a group action to remove the specified +// instances from the managed instance group. Abandoning an instance +// does not delete the instance, but it does remove the instance from +// any target pools that are applied by the managed instance group. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you abandon. This operation is marked as +// DONE when the action is scheduled even if the instances have not yet +// been removed from the group. You must separately verify the status of +// the abandoning action with the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { + c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck - c.healthcheck = healthcheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersabandoninstancesrequest = instancegroupmanagersabandoninstancesrequest return c } @@ -31799,7 +44291,7 @@ func (r *HealthChecksService) Patch(project string, healthCheck string, healthch // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCall { +func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *InstanceGroupManagersAbandonInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -31807,7 +44299,7 @@ func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchCall { +func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAbandonInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -31815,52 +44307,53 @@ func (c *HealthChecksPatchCall) Fields(s ...googleapi.Field) *HealthChecksPatchC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksPatchCall) Context(ctx context.Context) *HealthChecksPatchCall { +func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *InstanceGroupManagersAbandonInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksPatchCall) Header() http.Header { +func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.patch" call. +// Do executes the "compute.instanceGroupManagers.abandonInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -31891,18 +44384,18 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.healthChecks.patch", + // "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.abandonInstances", // "parameterOrder": [ // "project", - // "healthCheck" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to patch.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -31917,103 +44410,180 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", // "request": { - // "$ref": "HealthCheck" + // "$ref": "InstanceGroupManagersAbandonInstancesRequest" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.healthChecks.testIamPermissions": +// method id "compute.instanceGroupManagers.aggregatedList": -type HealthChecksTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *HealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HealthChecksTestIamPermissionsCall { - c := &HealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of managed instance groups and +// groups them by zone. +func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { + c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceGroupManagersAggregatedListCall) OrderBy(orderBy string) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HealthChecksTestIamPermissionsCall { +func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HealthChecksTestIamPermissionsCall { +func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) *InstanceGroupManagersAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksTestIamPermissionsCall) Header() http.Header { +func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instanceGroupManagers.aggregatedList" call. +// Exactly one of *InstanceGroupManagerAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InstanceGroupManagerAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32032,7 +44602,7 @@ func (c *HealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &InstanceGroupManagerAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -32044,35 +44614,47 @@ func (c *HealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.healthChecks.testIamPermissions", + // "description": "Retrieves the list of managed instance groups and groups them by zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroupManagers.aggregatedList", // "parameterOrder": [ - // "project", - // "resource" + // "project" // ], // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/aggregated/instanceGroupManagers", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "InstanceGroupManagerAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -32083,25 +44665,48 @@ func (c *HealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.healthChecks.update": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type HealthChecksUpdateCall struct { - s *Service - project string - healthCheck string - healthcheck *HealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instanceGroupManagers.delete": + +type InstanceGroupManagersDeleteCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a HealthCheck resource in the specified project using -// the data included in the request. -func (r *HealthChecksService) Update(project string, healthCheck string, healthcheck *HealthCheck) *HealthChecksUpdateCall { - c := &HealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified managed instance group and all of the +// instances in that group. Note that the instance group must not belong +// to a backend service. Read Deleting an instance group for more +// information. +func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { + c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.healthCheck = healthCheck - c.healthcheck = healthcheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager return c } @@ -32119,7 +44724,7 @@ func (r *HealthChecksService) Update(project string, healthCheck string, healthc // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HealthChecksUpdateCall) RequestId(requestId string) *HealthChecksUpdateCall { +func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceGroupManagersDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -32127,7 +44732,7 @@ func (c *HealthChecksUpdateCall) RequestId(requestId string) *HealthChecksUpdate // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HealthChecksUpdateCall) Fields(s ...googleapi.Field) *HealthChecksUpdateCall { +func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -32135,52 +44740,48 @@ func (c *HealthChecksUpdateCall) Fields(s ...googleapi.Field) *HealthChecksUpdat // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HealthChecksUpdateCall) Context(ctx context.Context) *HealthChecksUpdateCall { +func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *InstanceGroupManagersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HealthChecksUpdateCall) Header() http.Header { +func (c *InstanceGroupManagersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/healthChecks/{healthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "healthCheck": c.healthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.healthChecks.update" call. +// Do executes the "compute.instanceGroupManagers.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32211,18 +44812,18 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Updates a HealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.healthChecks.update", + // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "httpMethod": "DELETE", + // "id": "compute.instanceGroupManagers.delete", // "parameterOrder": [ // "project", - // "healthCheck" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "healthCheck": { - // "description": "Name of the HealthCheck resource to update.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -32237,12 +44838,15 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/healthChecks/{healthCheck}", - // "request": { - // "$ref": "HealthCheck" - // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "response": { // "$ref": "Operation" // }, @@ -32254,23 +44858,41 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.httpHealthChecks.delete": +// method id "compute.instanceGroupManagers.deleteInstances": -type HttpHealthChecksDeleteCall struct { - s *Service - project string - httpHealthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersDeleteInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HttpHealthCheck resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/delete -func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall { - c := &HttpHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteInstances: Schedules a group action to delete the specified +// instances in the managed instance group. The instances are also +// removed from any target pools of which they were a member. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you delete. This operation is marked as DONE +// when the action is scheduled even if the instances are still being +// deleted. You must separately verify the status of the deleting action +// with the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { + c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersdeleteinstancesrequest = instancegroupmanagersdeleteinstancesrequest return c } @@ -32288,7 +44910,7 @@ func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksDeleteCall) RequestId(requestId string) *HttpHealthChecksDeleteCall { +func (c *InstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *InstanceGroupManagersDeleteInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -32296,7 +44918,7 @@ func (c *HttpHealthChecksDeleteCall) RequestId(requestId string) *HttpHealthChec // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall { +func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -32304,47 +44926,53 @@ func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChecksDeleteCall { +func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *InstanceGroupManagersDeleteInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksDeleteCall) Header() http.Header { +func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.delete" call. +// Do executes the "compute.instanceGroupManagers.deleteInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32375,18 +45003,18 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Deletes the specified HttpHealthCheck resource.", - // "httpMethod": "DELETE", - // "id": "compute.httpHealthChecks.delete", + // "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.deleteInstances", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to delete.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -32401,9 +45029,18 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "request": { + // "$ref": "InstanceGroupManagersDeleteInstancesRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -32415,32 +45052,34 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.httpHealthChecks.get": +// method id "compute.instanceGroupManagers.get": -type HttpHealthChecksGetCall struct { - s *Service - project string - httpHealthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersGetCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified HttpHealthCheck resource. Get a list of -// available HTTP health checks by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/get -func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall { - c := &HttpHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns all of the details about the specified managed instance +// group. Get a list of available managed instance groups by making a +// list() request. +func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { + c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall { +func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGroupManagersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -32450,7 +45089,7 @@ func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecks // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthChecksGetCall { +func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGroupManagersGetCall { c.ifNoneMatch_ = entityTag return c } @@ -32458,21 +45097,21 @@ func (c *HttpHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpHealthCheck // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecksGetCall { +func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGroupManagersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksGetCall) Header() http.Header { +func (c *InstanceGroupManagersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -32483,25 +45122,26 @@ func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.get" call. -// Exactly one of *HttpHealthCheck or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *HttpHealthCheck.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.instanceGroupManagers.get" call. +// Exactly one of *InstanceGroupManager or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupManager.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheck, error) { +func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32520,7 +45160,7 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpHealthCheck{ + ret := &InstanceGroupManager{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -32532,18 +45172,18 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC } return ret, nil // { - // "description": "Returns the specified HttpHealthCheck resource. Get a list of available HTTP health checks by making a list() request.", + // "description": "Returns all of the details about the specified managed instance group. Get a list of available managed instance groups by making a list() request.", // "httpMethod": "GET", - // "id": "compute.httpHealthChecks.get", + // "id": "compute.instanceGroupManagers.get", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to return.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -32553,11 +45193,17 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "response": { - // "$ref": "HttpHealthCheck" + // "$ref": "InstanceGroupManager" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -32568,24 +45214,33 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC } -// method id "compute.httpHealthChecks.insert": +// method id "compute.instanceGroupManagers.insert": -type HttpHealthChecksInsertCall struct { - s *Service - project string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersInsertCall struct { + s *Service + project string + zone string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HttpHealthCheck resource in the specified project -// using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/insert -func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall { - c := &HttpHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a managed instance group using the information that +// you specify in the request. After the group is created, it schedules +// an action to create instances in the group using the specified +// instance template. This operation is marked as DONE when the group is +// created even if the instances in the group have not yet been created. +// You must separately verify the status of the individual instances +// with the listmanagedinstances method. +// +// A managed instance group can have up to 1000 VM instances per group. +// Please contact Cloud Support if you need an increase in this limit. +func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { + c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httphealthcheck = httphealthcheck + c.zone = zone + c.instancegroupmanager = instancegroupmanager return c } @@ -32603,7 +45258,7 @@ func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHe // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksInsertCall) RequestId(requestId string) *HttpHealthChecksInsertCall { +func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceGroupManagersInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -32611,7 +45266,7 @@ func (c *HttpHealthChecksInsertCall) RequestId(requestId string) *HttpHealthChec // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall { +func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *InstanceGroupManagersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -32619,51 +45274,52 @@ func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChecksInsertCall { +func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *InstanceGroupManagersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksInsertCall) Header() http.Header { +func (c *InstanceGroupManagersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.insert" call. +// Do executes the "compute.instanceGroupManagers.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32694,11 +45350,12 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit.", // "httpMethod": "POST", - // "id": "compute.httpHealthChecks.insert", + // "id": "compute.instanceGroupManagers.insert", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "project": { @@ -32712,11 +45369,17 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks", + // "path": "{project}/zones/{zone}/instanceGroupManagers", // "request": { - // "$ref": "HttpHealthCheck" + // "$ref": "InstanceGroupManager" // }, // "response": { // "$ref": "Operation" @@ -32729,23 +45392,24 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.httpHealthChecks.list": +// method id "compute.instanceGroupManagers.list": -type HttpHealthChecksListCall struct { +type InstanceGroupManagersListCall struct { s *Service project string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of HttpHealthCheck resources available to -// the specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/list -func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall { - c := &HttpHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of managed instance groups that are contained +// within the specified project and zone. +func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { + c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone return c } @@ -32775,7 +45439,7 @@ func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { +func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { c.urlParams_.Set("filter", filter) return c } @@ -32786,7 +45450,7 @@ func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCa // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall { +func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGroupManagersListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -32803,7 +45467,7 @@ func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthCheck // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksListCall { +func (c *InstanceGroupManagersListCall) OrderBy(orderBy string) *InstanceGroupManagersListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -32811,7 +45475,7 @@ func (c *HttpHealthChecksListCall) OrderBy(orderBy string) *HttpHealthChecksList // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall { +func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGroupManagersListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -32819,7 +45483,7 @@ func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecks // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall { +func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -32829,7 +45493,7 @@ func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthCheck // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChecksListCall { +func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListCall { c.ifNoneMatch_ = entityTag return c } @@ -32837,21 +45501,21 @@ func (c *HttpHealthChecksListCall) IfNoneMatch(entityTag string) *HttpHealthChec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthChecksListCall { +func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGroupManagersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksListCall) Header() http.Header { +func (c *InstanceGroupManagersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -32862,24 +45526,25 @@ func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.list" call. -// Exactly one of *HttpHealthCheckList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpHealthCheckList.ServerResponse.Header or (if a response was +// Do executes the "compute.instanceGroupManagers.list" call. +// Exactly one of *InstanceGroupManagerList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupManagerList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealthCheckList, error) { +func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -32898,7 +45563,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpHealthCheckList{ + ret := &InstanceGroupManagerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -32910,11 +45575,12 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth } return ret, nil // { - // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", + // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", // "httpMethod": "GET", - // "id": "compute.httpHealthChecks.list", + // "id": "compute.instanceGroupManagers.list", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "filter": { @@ -32946,11 +45612,17 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks", + // "path": "{project}/zones/{zone}/instanceGroupManagers", // "response": { - // "$ref": "HttpHealthCheckList" + // "$ref": "InstanceGroupManagerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -32964,7 +45636,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealthCheckList) error) error { +func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -32982,53 +45654,60 @@ func (c *HttpHealthChecksListCall) Pages(ctx context.Context, f func(*HttpHealth } } -// method id "compute.httpHealthChecks.patch": +// method id "compute.instanceGroupManagers.listManagedInstances": -type HttpHealthChecksPatchCall struct { - s *Service - project string - httpHealthCheck string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersListManagedInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HttpHealthCheck resource in the specified project -// using the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/patch -func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall { - c := &HttpHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListManagedInstances: Lists all of the instances in the managed +// instance group. Each instance in the list has a currentAction, which +// indicates the action that the managed instance group is performing on +// the instance. For example, if the group is still creating an +// instance, the currentAction is CREATING. If a previous action failed, +// the list displays the errors for that failed action. +func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { + c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck - c.httphealthcheck = httphealthcheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksPatchCall) RequestId(requestId string) *HttpHealthChecksPatchCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": +func (c *InstanceGroupManagersListManagedInstancesCall) Filter(filter string) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": +func (c *InstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "order_by": +func (c *InstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("order_by", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": +func (c *InstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *InstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall { +func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -33036,52 +45715,50 @@ func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChec // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChecksPatchCall { +func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *InstanceGroupManagersListManagedInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksPatchCall) Header() http.Header { +func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceGroupManagers.listManagedInstances" call. +// Exactly one of *InstanceGroupManagersListManagedInstancesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *InstanceGroupManagersListManagedInstancesResponse.ServerResponse.Head +// er or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListManagedInstancesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33100,7 +45777,7 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroupManagersListManagedInstancesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -33112,21 +45789,40 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.httpHealthChecks.patch", + // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.listManagedInstances", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to patch.", + // "filter": { + // "location": "query", + // "type": "string" + // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, + // "maxResults": { + // "default": "500", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "order_by": { + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -33134,18 +45830,16 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", - // "request": { - // "$ref": "HttpHealthCheck" - // }, + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroupManagersListManagedInstancesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -33156,32 +45850,79 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.httpHealthChecks.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListManagedInstancesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type HttpHealthChecksTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instanceGroupManagers.patch": + +type InstanceGroupManagersPatchCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *HttpHealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HttpHealthChecksTestIamPermissionsCall { - c := &HttpHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is patched even if the instances in the group are still in the +// process of being patched. You must separately verify the status of +// the individual instances with the listManagedInstances method. This +// method supports PATCH semantics and uses the JSON merge patch format +// and processing rules. +func (r *InstanceGroupManagersService) Patch(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersPatchCall { + c := &InstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanager = instancegroupmanager + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersPatchCall) RequestId(requestId string) *InstanceGroupManagersPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HttpHealthChecksTestIamPermissionsCall { +func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceGroupManagersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -33189,52 +45930,53 @@ func (c *HttpHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *H // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HttpHealthChecksTestIamPermissionsCall { +func (c *InstanceGroupManagersPatchCall) Context(ctx context.Context) *InstanceGroupManagersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksTestIamPermissionsCall) Header() http.Header { +func (c *InstanceGroupManagersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instanceGroupManagers.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33253,7 +45995,7 @@ func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -33265,14 +46007,21 @@ func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.httpHealthChecks.testIamPermissions", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.instanceGroupManagers.patch", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -33280,50 +46029,67 @@ func (c *HttpHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "InstanceGroupManager" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.httpHealthChecks.update": +// method id "compute.instanceGroupManagers.recreateInstances": -type HttpHealthChecksUpdateCall struct { - s *Service - project string - httpHealthCheck string - httphealthcheck *HttpHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersRecreateInstancesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a HttpHealthCheck resource in the specified project -// using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks/update -func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall { - c := &HttpHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// RecreateInstances: Schedules a group action to recreate the specified +// instances in the managed instance group. The instances are deleted +// and recreated using the current instance template for the managed +// instance group. This operation is marked as DONE when the action is +// scheduled even if the instances have not yet been recreated. You must +// separately verify the status of the recreating action with the +// listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { + c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpHealthCheck = httpHealthCheck - c.httphealthcheck = httphealthcheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest return c } @@ -33341,7 +46107,7 @@ func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpHealthChecksUpdateCall) RequestId(requestId string) *HttpHealthChecksUpdateCall { +func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -33349,7 +46115,7 @@ func (c *HttpHealthChecksUpdateCall) RequestId(requestId string) *HttpHealthChec // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall { +func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -33357,52 +46123,53 @@ func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChecksUpdateCall { +func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpHealthChecksUpdateCall) Header() http.Header { +func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpHealthCheck": c.httpHealthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpHealthChecks.update" call. +// Do executes the "compute.instanceGroupManagers.recreateInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33433,18 +46200,18 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.httpHealthChecks.update", + // "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.recreateInstances", // "parameterOrder": [ // "project", - // "httpHealthCheck" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "httpHealthCheck": { - // "description": "Name of the HttpHealthCheck resource to update.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -33459,11 +46226,17 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", // "request": { - // "$ref": "HttpHealthCheck" + // "$ref": "InstanceGroupManagersRecreateInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -33476,22 +46249,35 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.httpsHealthChecks.delete": +// method id "compute.instanceGroupManagers.resize": -type HttpsHealthChecksDeleteCall struct { - s *Service - project string - httpsHealthCheck string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersResizeCall struct { + s *Service + project string + zone string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified HttpsHealthCheck resource. -func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck string) *HttpsHealthChecksDeleteCall { - c := &HttpsHealthChecksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Resizes the managed instance group. If you increase the size, +// the group creates new instances using the current instance template. +// If you decrease the size, the group deletes instances. The resize +// operation is marked DONE when the resize actions are scheduled even +// if the group has not yet added or deleted any instances. You must +// separately verify the status of the creating or deleting actions with +// the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or deleted. +func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { + c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.urlParams_.Set("size", fmt.Sprint(size)) return c } @@ -33509,7 +46295,7 @@ func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksDeleteCall) RequestId(requestId string) *HttpsHealthChecksDeleteCall { +func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceGroupManagersResizeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -33517,7 +46303,7 @@ func (c *HttpsHealthChecksDeleteCall) RequestId(requestId string) *HttpsHealthCh // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthChecksDeleteCall { +func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -33525,21 +46311,21 @@ func (c *HttpsHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpsHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthChecksDeleteCall { +func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksDeleteCall) Header() http.Header { +func (c *InstanceGroupManagersResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -33547,25 +46333,26 @@ func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, err reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.delete" call. +// Do executes the "compute.instanceGroupManagers.resize" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33596,18 +46383,19 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified HttpsHealthCheck resource.", - // "httpMethod": "DELETE", - // "id": "compute.httpsHealthChecks.delete", + // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.resize", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "zone", + // "instanceGroupManager", + // "size" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to delete.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -33622,9 +46410,22 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "size": { + // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", + // "format": "int32", + // "location": "query", + // "required": true, + // "type": "integer" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", // "response": { // "$ref": "Operation" // }, @@ -33636,92 +46437,120 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.httpsHealthChecks.get": +// method id "compute.instanceGroupManagers.resizeAdvanced": -type HttpsHealthChecksGetCall struct { - s *Service - project string - httpsHealthCheck string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersResizeAdvancedCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagersresizeadvancedrequest *InstanceGroupManagersResizeAdvancedRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified HttpsHealthCheck resource. Get a list of -// available HTTPS health checks by making a list() request. -func (r *HttpsHealthChecksService) Get(project string, httpsHealthCheck string) *HttpsHealthChecksGetCall { - c := &HttpsHealthChecksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ResizeAdvanced: Resizes the managed instance group with advanced +// configuration options like disabling creation retries. This is an +// extended version of the resize method. +// +// If you increase the size of the instance group, the group creates new +// instances using the current instance template. If you decrease the +// size, the group deletes instances. The resize operation is marked +// DONE when the resize actions are scheduled even if the group has not +// yet added or deleted any instances. You must separately verify the +// status of the creating, creatingWithoutRetries, or deleting actions +// with the get or listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or deleted. +func (r *InstanceGroupManagersService) ResizeAdvanced(project string, zone string, instanceGroupManager string, instancegroupmanagersresizeadvancedrequest *InstanceGroupManagersResizeAdvancedRequest) *InstanceGroupManagersResizeAdvancedCall { + c := &InstanceGroupManagersResizeAdvancedCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagersresizeadvancedrequest = instancegroupmanagersresizeadvancedrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersResizeAdvancedCall) RequestId(requestId string) *InstanceGroupManagersResizeAdvancedCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpsHealthChecksGetCall { +func (c *InstanceGroupManagersResizeAdvancedCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeAdvancedCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HttpsHealthChecksGetCall) IfNoneMatch(entityTag string) *HttpsHealthChecksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChecksGetCall { +func (c *InstanceGroupManagersResizeAdvancedCall) Context(ctx context.Context) *InstanceGroupManagersResizeAdvancedCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksGetCall) Header() http.Header { +func (c *InstanceGroupManagersResizeAdvancedCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersresizeadvancedrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.get" call. -// Exactly one of *HttpsHealthCheck or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpsHealthCheck.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheck, error) { +// Do executes the "compute.instanceGroupManagers.resizeAdvanced" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33740,7 +46569,7 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpsHealthCheck{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -33752,18 +46581,18 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt } return ret, nil // { - // "description": "Returns the specified HttpsHealthCheck resource. Get a list of available HTTPS health checks by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.httpsHealthChecks.get", + // "description": "Resizes the managed instance group with advanced configuration options like disabling creation retries. This is an extended version of the resize method.\n\nIf you increase the size of the instance group, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating, creatingWithoutRetries, or deleting actions with the get or listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.resizeAdvanced", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to return.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -33773,38 +46602,54 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", + // "request": { + // "$ref": "InstanceGroupManagersResizeAdvancedRequest" + // }, // "response": { - // "$ref": "HttpsHealthCheck" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.httpsHealthChecks.insert": +// method id "compute.instanceGroupManagers.setAutoHealingPolicies": -type HttpsHealthChecksInsertCall struct { - s *Service - project string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersSetAutoHealingPoliciesCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssetautohealingrequest *InstanceGroupManagersSetAutoHealingRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a HttpsHealthCheck resource in the specified project -// using the data included in the request. -func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksInsertCall { - c := &HttpsHealthChecksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetAutoHealingPolicies: Modifies the autohealing policies. +func (r *InstanceGroupManagersService) SetAutoHealingPolicies(project string, zone string, instanceGroupManager string, instancegroupmanagerssetautohealingrequest *InstanceGroupManagersSetAutoHealingRequest) *InstanceGroupManagersSetAutoHealingPoliciesCall { + c := &InstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpshealthcheck = httpshealthcheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssetautohealingrequest = instancegroupmanagerssetautohealingrequest return c } @@ -33822,7 +46667,7 @@ func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *Http // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthChecksInsertCall { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *InstanceGroupManagersSetAutoHealingPoliciesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -33830,7 +46675,7 @@ func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthCh // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthChecksInsertCall { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetAutoHealingPoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -33838,51 +46683,53 @@ func (c *HttpsHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpsHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthChecksInsertCall { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *InstanceGroupManagersSetAutoHealingPoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksInsertCall) Header() http.Header { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetautohealingrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.insert" call. +// Do executes the "compute.instanceGroupManagers.setAutoHealingPolicies" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -33913,13 +46760,21 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "description": "Modifies the autohealing policies.", // "httpMethod": "POST", - // "id": "compute.httpsHealthChecks.insert", + // "id": "compute.instanceGroupManagers.setAutoHealingPolicies", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -33931,11 +46786,17 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", // "request": { - // "$ref": "HttpsHealthCheck" + // "$ref": "InstanceGroupManagersSetAutoHealingRequest" // }, // "response": { // "$ref": "Operation" @@ -33948,156 +46809,108 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.httpsHealthChecks.list": +// method id "compute.instanceGroupManagers.setInstanceTemplate": -type HttpsHealthChecksListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersSetInstanceTemplateCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of HttpsHealthCheck resources available to -// the specified project. -func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCall { - c := &HttpsHealthChecksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetInstanceTemplate: Specifies the instance template to use when +// creating new instances in this group. The templates for existing +// instances in the group do not change unless you recreate them. +func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { + c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *HttpsHealthChecksListCall) MaxResults(maxResults int64) *HttpsHealthChecksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *HttpsHealthChecksListCall) OrderBy(orderBy string) *HttpsHealthChecksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *HttpsHealthChecksListCall) PageToken(pageToken string) *HttpsHealthChecksListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *InstanceGroupManagersSetInstanceTemplateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksListCall) Fields(s ...googleapi.Field) *HttpsHealthChecksListCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *HttpsHealthChecksListCall) IfNoneMatch(entityTag string) *HttpsHealthChecksListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChecksListCall { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksListCall) Header() http.Header { +func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.list" call. -// Exactly one of *HttpsHealthCheckList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HttpsHealthCheckList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHealthCheckList, error) { +// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34116,7 +46929,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &HttpsHealthCheckList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -34128,98 +46941,81 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal } return ret, nil // { - // "description": "Retrieves the list of HttpsHealthCheck resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.httpsHealthChecks.list", + // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.setInstanceTemplate", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "request": { + // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" + // }, // "response": { - // "$ref": "HttpsHealthCheckList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *HttpsHealthChecksListCall) Pages(ctx context.Context, f func(*HttpsHealthCheckList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.httpsHealthChecks.patch": +// method id "compute.instanceGroupManagers.setTargetPools": -type HttpsHealthChecksPatchCall struct { - s *Service - project string - httpsHealthCheck string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersSetTargetPoolsCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a HttpsHealthCheck resource in the specified project -// using the data included in the request. This method supports PATCH -// semantics and uses the JSON merge patch format and processing rules. -func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksPatchCall { - c := &HttpsHealthChecksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTargetPools: Modifies the target pools to which all instances in +// this managed instance group are assigned. The target pools +// automatically apply to all of the instances in the managed instance +// group. This operation is marked DONE when you make the request even +// if the instances have not yet been added to their target pools. The +// change might take some time to apply to all of the instances in the +// group depending on the size of the group. +func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { + c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck - c.httpshealthcheck = httpshealthcheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest return c } @@ -34237,7 +47033,7 @@ func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksPatchCall) RequestId(requestId string) *HttpsHealthChecksPatchCall { +func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *InstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -34245,7 +47041,7 @@ func (c *HttpsHealthChecksPatchCall) RequestId(requestId string) *HttpsHealthChe // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthChecksPatchCall { +func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -34253,52 +47049,53 @@ func (c *HttpsHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpsHealthCh // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthChecksPatchCall { +func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksPatchCall) Header() http.Header { +func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.patch" call. +// Do executes the "compute.instanceGroupManagers.setTargetPools" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34329,18 +47126,18 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.httpsHealthChecks.patch", + // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroupManagers.setTargetPools", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to patch.", + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -34355,29 +47152,35 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the managed instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", // "request": { - // "$ref": "HttpsHealthCheck" + // "$ref": "InstanceGroupManagersSetTargetPoolsRequest" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.httpsHealthChecks.testIamPermissions": +// method id "compute.instanceGroupManagers.testIamPermissions": -type HttpsHealthChecksTestIamPermissionsCall struct { +type InstanceGroupManagersTestIamPermissionsCall struct { s *Service project string + zone string resource string testpermissionsrequest *TestPermissionsRequest urlParams_ gensupport.URLParams @@ -34387,9 +47190,10 @@ type HttpsHealthChecksTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *HttpsHealthChecksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *HttpsHealthChecksTestIamPermissionsCall { - c := &HttpsHealthChecksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InstanceGroupManagersService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceGroupManagersTestIamPermissionsCall { + c := &InstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone c.resource = resource c.testpermissionsrequest = testpermissionsrequest return c @@ -34398,7 +47202,7 @@ func (r *HttpsHealthChecksService) TestIamPermissions(project string, resource s // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) *HttpsHealthChecksTestIamPermissionsCall { +func (c *InstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -34406,21 +47210,21 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksTestIamPermissionsCall) Context(ctx context.Context) *HttpsHealthChecksTestIamPermissionsCall { +func (c *InstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *InstanceGroupManagersTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksTestIamPermissionsCall) Header() http.Header { +func (c *InstanceGroupManagersTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -34433,25 +47237,26 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) doRequest(alt string) (*http.R } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.testIamPermissions" call. +// Do executes the "compute.instanceGroupManagers.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34484,9 +47289,10 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.httpsHealthChecks.testIamPermissions", + // "id": "compute.instanceGroupManagers.testIamPermissions", // "parameterOrder": [ // "project", + // "zone", // "resource" // ], // "parameters": { @@ -34500,12 +47306,19 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -34521,25 +47334,30 @@ func (c *HttpsHealthChecksTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } -// method id "compute.httpsHealthChecks.update": +// method id "compute.instanceGroupManagers.update": -type HttpsHealthChecksUpdateCall struct { - s *Service - project string - httpsHealthCheck string - httpshealthcheck *HttpsHealthCheck - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupManagersUpdateCall struct { + s *Service + project string + zone string + instanceGroupManager string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates a HttpsHealthCheck resource in the specified project -// using the data included in the request. -func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck string, httpshealthcheck *HttpsHealthCheck) *HttpsHealthChecksUpdateCall { - c := &HttpsHealthChecksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is updated even if the instances in the group have not yet been +// updated. You must separately verify the status of the individual +// instances with the listManagedInstances method. +func (r *InstanceGroupManagersService) Update(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersUpdateCall { + c := &InstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.httpsHealthCheck = httpsHealthCheck - c.httpshealthcheck = httpshealthcheck + c.zone = zone + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanager = instancegroupmanager return c } @@ -34557,7 +47375,7 @@ func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *HttpsHealthChecksUpdateCall) RequestId(requestId string) *HttpsHealthChecksUpdateCall { +func (c *InstanceGroupManagersUpdateCall) RequestId(requestId string) *InstanceGroupManagersUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -34565,7 +47383,7 @@ func (c *HttpsHealthChecksUpdateCall) RequestId(requestId string) *HttpsHealthCh // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthChecksUpdateCall { +func (c *InstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -34573,52 +47391,53 @@ func (c *HttpsHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpsHealthC // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthChecksUpdateCall { +func (c *InstanceGroupManagersUpdateCall) Context(ctx context.Context) *InstanceGroupManagersUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *HttpsHealthChecksUpdateCall) Header() http.Header { +func (c *InstanceGroupManagersUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "httpsHealthCheck": c.httpsHealthCheck, + "project": c.project, + "zone": c.zone, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.httpsHealthChecks.update" call. +// Do executes the "compute.instanceGroupManagers.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34649,18 +47468,18 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Updates a HttpsHealthCheck resource in the specified project using the data included in the request.", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listManagedInstances method.", // "httpMethod": "PUT", - // "id": "compute.httpsHealthChecks.update", + // "id": "compute.instanceGroupManagers.update", // "parameterOrder": [ // "project", - // "httpsHealthCheck" + // "zone", + // "instanceGroupManager" // ], // "parameters": { - // "httpsHealthCheck": { - // "description": "Name of the HttpsHealthCheck resource to update.", + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -34675,11 +47494,17 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", + // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "request": { - // "$ref": "HttpsHealthCheck" + // "$ref": "InstanceGroupManager" // }, // "response": { // "$ref": "Operation" @@ -34692,23 +47517,28 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.images.delete": +// method id "compute.instanceGroups.addInstances": -type ImagesDeleteCall struct { - s *Service - project string - image string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsAddInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified image. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/delete -func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { - c := &ImagesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddInstances: Adds a list of instances to the specified instance +// group. All of the instances in the instance group must be in the same +// network/subnetwork. Read Adding instances for more information. +func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { + c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupsaddinstancesrequest = instancegroupsaddinstancesrequest return c } @@ -34726,7 +47556,7 @@ func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ImagesDeleteCall) RequestId(requestId string) *ImagesDeleteCall { +func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGroupsAddInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -34734,7 +47564,7 @@ func (c *ImagesDeleteCall) RequestId(requestId string) *ImagesDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { +func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsAddInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -34742,47 +47572,53 @@ func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { +func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceGroupsAddInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesDeleteCall) Header() http.Header { +func (c *InstanceGroupsAddInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.delete" call. +// Do executes the "compute.instanceGroups.addInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34813,18 +47649,18 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified image.", - // "httpMethod": "DELETE", - // "id": "compute.images.delete", + // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.addInstances", // "parameterOrder": [ // "project", - // "image" + // "zone", + // "instanceGroup" // ], // "parameters": { - // "image": { - // "description": "Name of the image resource to delete.", + // "instanceGroup": { + // "description": "The name of the instance group where you are adding instances.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -34839,9 +47675,18 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images/{image}", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", + // "request": { + // "$ref": "InstanceGroupsAddInstancesRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -34853,104 +47698,156 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.images.deprecate": +// method id "compute.instanceGroups.aggregatedList": -type ImagesDeprecateCall struct { - s *Service - project string - image string - deprecationstatus *DeprecationStatus - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Deprecate: Sets the deprecation status of an image. -// -// If an empty request body is given, clears the deprecation status -// instead. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/deprecate -func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall { - c := &ImagesDeprecateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves the list of instance groups and sorts them +// by zone. +func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { + c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image - c.deprecationstatus = deprecationstatus return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and then the request times out. If you make the request again with -// the same request ID, the server can check if original operation with -// the same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { - c.urlParams_.Set("requestId", requestId) +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceGroupsAggregatedListCall) OrderBy(orderBy string) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall { +func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall { +func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *InstanceGroupsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesDeprecateCall) Header() http.Header { +func (c *InstanceGroupsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "image": c.image, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.deprecate" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceGroups.aggregatedList" call. +// Exactly one of *InstanceGroupAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -34969,7 +47866,7 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroupAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -34981,19 +47878,34 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Sets the deprecation status of an image.\n\nIf an empty request body is given, clears the deprecation status instead.", - // "httpMethod": "POST", - // "id": "compute.images.deprecate", + // "description": "Retrieves the list of instance groups and sorts them by zone.", + // "httpMethod": "GET", + // "id": "compute.instanceGroups.aggregatedList", // "parameterOrder": [ - // "project", - // "image" + // "project" // ], // "parameters": { - // "image": { - // "description": "Image name.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -35002,115 +47914,138 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and then the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/images/{image}/deprecate", - // "request": { - // "$ref": "DeprecationStatus" - // }, + // "path": "{project}/aggregated/instanceGroups", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroupAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.images.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ImagesGetCall struct { - s *Service - project string - image string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.instanceGroups.delete": + +type InstanceGroupsDeleteCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified image. Get a list of available images by -// making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/get -func (r *ImagesService) Get(project string, image string) *ImagesGetCall { - c := &ImagesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified instance group. The instances in the +// group are not deleted. Note that instance group must not belong to a +// backend service. Read Deleting an instance group for more +// information. +func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { + c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image + c.zone = zone + c.instanceGroup = instanceGroup + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupsDeleteCall) RequestId(requestId string) *InstanceGroupsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall { +func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ImagesGetCall) IfNoneMatch(entityTag string) *ImagesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { +func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesGetCall) Header() http.Header { +func (c *InstanceGroupsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "image": c.image, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.get" call. -// Exactly one of *Image or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Image.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { +// Do executes the "compute.instanceGroups.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35129,7 +48064,7 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Image{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -35141,18 +48076,18 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { } return ret, nil // { - // "description": "Returns the specified image. Get a list of available images by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.images.get", + // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", + // "httpMethod": "DELETE", + // "id": "compute.instanceGroups.delete", // "parameterOrder": [ // "project", - // "image" + // "zone", + // "instanceGroup" // ], // "parameters": { - // "image": { - // "description": "Name of the image resource to return.", + // "instanceGroup": { + // "description": "The name of the instance group to delete.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -35162,46 +48097,58 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images/{image}", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", // "response": { - // "$ref": "Image" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.images.getFromFamily": +// method id "compute.instanceGroups.get": -type ImagesGetFromFamilyCall struct { - s *Service - project string - family string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupsGetCall struct { + s *Service + project string + zone string + instanceGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// GetFromFamily: Returns the latest image that is part of an image -// family and is not deprecated. -func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { - c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified instance group. Get a list of available +// instance groups by making a list() request. +func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { + c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.family = family + c.zone = zone + c.instanceGroup = instanceGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFamilyCall { +func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35211,7 +48158,7 @@ func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFam // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFamilyCall { +func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -35219,21 +48166,21 @@ func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesGetFromFamilyCall) Context(ctx context.Context) *ImagesGetFromFamilyCall { +func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesGetFromFamilyCall) Header() http.Header { +func (c *InstanceGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -35244,25 +48191,26 @@ func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "family": c.family, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.getFromFamily" call. -// Exactly one of *Image or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Image.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, error) { +// Do executes the "compute.instanceGroups.get" call. +// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *InstanceGroup.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35281,7 +48229,7 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Image{ + ret := &InstanceGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -35293,18 +48241,18 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro } return ret, nil // { - // "description": "Returns the latest image that is part of an image family and is not deprecated.", + // "description": "Returns the specified instance group. Get a list of available instance groups by making a list() request.", // "httpMethod": "GET", - // "id": "compute.images.getFromFamily", + // "id": "compute.instanceGroups.get", // "parameterOrder": [ // "project", - // "family" + // "zone", + // "instanceGroup" // ], // "parameters": { - // "family": { - // "description": "Name of the image family to search for.", + // "instanceGroup": { + // "description": "The name of the instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -35314,11 +48262,17 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images/family/{family}", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", // "response": { - // "$ref": "Image" + // "$ref": "InstanceGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -35329,31 +48283,25 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro } -// method id "compute.images.insert": +// method id "compute.instanceGroups.insert": -type ImagesInsertCall struct { - s *Service - project string - image *Image - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsInsertCall struct { + s *Service + project string + zone string + instancegroup *InstanceGroup + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an image in the specified project using the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/insert -func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall { - c := &ImagesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an instance group in the specified project using the +// parameters that are included in the request. +func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { + c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.image = image - return c -} - -// ForceCreate sets the optional parameter "forceCreate": Force image -// creation if true. -func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { - c.urlParams_.Set("forceCreate", fmt.Sprint(forceCreate)) + c.zone = zone + c.instancegroup = instancegroup return c } @@ -35371,7 +48319,7 @@ func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { +func (c *InstanceGroupsInsertCall) RequestId(requestId string) *InstanceGroupsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -35379,7 +48327,7 @@ func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { +func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35387,51 +48335,52 @@ func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { +func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesInsertCall) Header() http.Header { +func (c *InstanceGroupsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.insert" call. +// Do executes the "compute.instanceGroups.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35462,18 +48411,14 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates an image in the specified project using the data included in the request.", + // "description": "Creates an instance group in the specified project using the parameters that are included in the request.", // "httpMethod": "POST", - // "id": "compute.images.insert", + // "id": "compute.instanceGroups.insert", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { - // "forceCreate": { - // "description": "Force image creation if true.", - // "location": "query", - // "type": "boolean" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -35485,48 +48430,47 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where you want to create the instance group.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images", + // "path": "{project}/zones/{zone}/instanceGroups", // "request": { - // "$ref": "Image" + // "$ref": "InstanceGroup" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.images.list": +// method id "compute.instanceGroups.list": -type ImagesListCall struct { +type InstanceGroupsListCall struct { s *Service project string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of private images available to the specified -// project. Private images are images you create that belong to your -// project. This method does not get any images that belong to other -// projects, including publicly-available images, like Debian 8. If you -// want to get a list of publicly-available images, use this method to -// make a request to the respective image project, such as debian-cloud -// or windows-cloud. -// For details, see https://cloud.google.com/compute/docs/reference/latest/images/list -func (r *ImagesService) List(project string) *ImagesListCall { - c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of instance groups that are located in the +// specified project and zone. +func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { + c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone return c } @@ -35556,7 +48500,7 @@ func (r *ImagesService) List(project string) *ImagesListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *ImagesListCall) Filter(filter string) *ImagesListCall { +func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { c.urlParams_.Set("filter", filter) return c } @@ -35567,7 +48511,7 @@ func (c *ImagesListCall) Filter(filter string) *ImagesListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { +func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -35584,7 +48528,7 @@ func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *ImagesListCall) OrderBy(orderBy string) *ImagesListCall { +func (c *InstanceGroupsListCall) OrderBy(orderBy string) *InstanceGroupsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -35592,7 +48536,7 @@ func (c *ImagesListCall) OrderBy(orderBy string) *ImagesListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { +func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -35600,7 +48544,7 @@ func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { +func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35610,7 +48554,7 @@ func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { +func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsListCall { c.ifNoneMatch_ = entityTag return c } @@ -35618,21 +48562,21 @@ func (c *ImagesListCall) IfNoneMatch(entityTag string) *ImagesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { +func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesListCall) Header() http.Header { +func (c *InstanceGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -35643,24 +48587,25 @@ func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.list" call. -// Exactly one of *ImageList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *ImageList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { +// Do executes the "compute.instanceGroups.list" call. +// Exactly one of *InstanceGroupList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -35679,7 +48624,7 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ImageList{ + ret := &InstanceGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -35691,11 +48636,12 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { } return ret, nil // { - // "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", + // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", // "httpMethod": "GET", - // "id": "compute.images.list", + // "id": "compute.instanceGroups.list", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "filter": { @@ -35727,11 +48673,17 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone where the instance group is located.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/images", + // "path": "{project}/zones/{zone}/instanceGroups", // "response": { - // "$ref": "ImageList" + // "$ref": "InstanceGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -35745,7 +48697,7 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) error { +func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGroupList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -35763,179 +48715,100 @@ func (c *ImagesListCall) Pages(ctx context.Context, f func(*ImageList) error) er } } -// method id "compute.images.setLabels": +// method id "compute.instanceGroups.listInstances": -type ImagesSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsListInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on an image. To learn more about labels, -// read the Labeling Resources documentation. -func (r *ImagesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *ImagesSetLabelsCall { - c := &ImagesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListInstances: Lists the instances in the specified instance group. +func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { + c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupslistinstancesrequest = instancegroupslistinstancesrequest return c } -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ImagesSetLabelsCall) Fields(s ...googleapi.Field) *ImagesSetLabelsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("filter", filter) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ImagesSetLabelsCall) Context(ctx context.Context) *ImagesSetLabelsCall { - c.ctx_ = ctx +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ImagesSetLabelsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ImagesSetLabelsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/setLabels") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.images.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ImagesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the labels on an image. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.images.setLabels", - // "parameterOrder": [ - // "project", - // "resource" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/global/images/{resource}/setLabels", - // "request": { - // "$ref": "GlobalSetLabelsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.images.testIamPermissions": - -type ImagesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstanceGroupsListInstancesCall) OrderBy(orderBy string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("orderBy", orderBy) + return c } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *ImagesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *ImagesTestIamPermissionsCall { - c := &ImagesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceGroupsListInstancesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestIamPermissionsCall { +func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsListInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -35943,52 +48816,53 @@ func (c *ImagesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ImagesTestI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ImagesTestIamPermissionsCall) Context(ctx context.Context) *ImagesTestIamPermissionsCall { +func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *InstanceGroupsListInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ImagesTestIamPermissionsCall) Header() http.Header { +func (c *InstanceGroupsListInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ImagesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.images.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.instanceGroups.listInstances" call. +// Exactly one of *InstanceGroupsListInstances or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InstanceGroupsListInstances.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupsListInstances, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36007,7 +48881,7 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &InstanceGroupsListInstances{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -36019,14 +48893,44 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Lists the instances in the specified instance group.", // "httpMethod": "POST", - // "id": "compute.images.testIamPermissions", + // "id": "compute.instanceGroups.listInstances", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instanceGroup" // ], // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "instanceGroup": { + // "description": "The name of the instance group from which you want to generate a list of included instances.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -36034,20 +48938,19 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "zone": { + // "description": "The name of the zone where the instance group is located.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/images/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "InstanceGroupsListInstancesRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "InstanceGroupsListInstances" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -36058,42 +48961,52 @@ func (c *ImagesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe } -// method id "compute.instanceGroupManagers.abandonInstances": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupsListInstances) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstanceGroupManagersAbandonInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instanceGroups.removeInstances": + +type InstanceGroupsRemoveInstancesCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AbandonInstances: Schedules a group action to remove the specified -// instances from the managed instance group. Abandoning an instance -// does not delete the instance, but it does remove the instance from -// any target pools that are applied by the managed instance group. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you abandon. This operation is marked as -// DONE when the action is scheduled even if the instances have not yet -// been removed from the group. You must separately verify the status of -// the abandoning action with the listmanagedinstances method. +// RemoveInstances: Removes one or more instances from the specified +// instance group, but does not delete those instances. // // If the group is part of a backend service that has enabled connection // draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *InstanceGroupManagersService) AbandonInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersabandoninstancesrequest *InstanceGroupManagersAbandonInstancesRequest) *InstanceGroupManagersAbandonInstancesCall { - c := &InstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// duration before the VM instance is removed or deleted. +func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { + c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersabandoninstancesrequest = instancegroupmanagersabandoninstancesrequest + c.instanceGroup = instanceGroup + c.instancegroupsremoveinstancesrequest = instancegroupsremoveinstancesrequest return c } @@ -36111,7 +49024,7 @@ func (r *InstanceGroupManagersService) AbandonInstances(project string, zone str // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *InstanceGroupManagersAbandonInstancesCall { +func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *InstanceGroupsRemoveInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -36119,7 +49032,7 @@ func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAbandonInstancesCall { +func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsRemoveInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -36127,53 +49040,53 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *InstanceGroupManagersAbandonInstancesCall { +func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *InstanceGroupsRemoveInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersAbandonInstancesCall) Header() http.Header { +func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.abandonInstances" call. +// Do executes the "compute.instanceGroups.removeInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36204,17 +49117,17 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt } return ret, nil // { - // "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.abandonInstances", + // "id": "compute.instanceGroups.removeInstances", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instanceGroup": { + // "description": "The name of the instance group where the specified instances will be removed.", // "location": "path", // "required": true, // "type": "string" @@ -36232,15 +49145,15 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone where the instance group is located.", // "location": "path", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", // "request": { - // "$ref": "InstanceGroupManagersAbandonInstancesRequest" + // "$ref": "InstanceGroupsRemoveInstancesRequest" // }, // "response": { // "$ref": "Operation" @@ -36253,157 +49166,106 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt } -// method id "compute.instanceGroupManagers.aggregatedList": +// method id "compute.instanceGroups.setNamedPorts": -type InstanceGroupManagersAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceGroupsSetNamedPortsCall struct { + s *Service + project string + zone string + instanceGroup string + instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves the list of managed instance groups and -// groups them by zone. -func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceGroupManagersAggregatedListCall { - c := &InstanceGroupManagersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetNamedPorts: Sets the named ports for the specified instance group. +func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { + c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instanceGroup = instanceGroup + c.instancegroupssetnamedportsrequest = instancegroupssetnamedportsrequest return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupManagersAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupManagersAggregatedListCall) OrderBy(orderBy string) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupManagersAggregatedListCall) PageToken(pageToken string) *InstanceGroupManagersAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceGroupsSetNamedPortsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersAggregatedListCall { +func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *InstanceGroupsSetNamedPortsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) *InstanceGroupManagersAggregatedListCall { +func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *InstanceGroupsSetNamedPortsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersAggregatedListCall) Header() http.Header { +func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.aggregatedList" call. -// Exactly one of *InstanceGroupManagerAggregatedList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *InstanceGroupManagerAggregatedList.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerAggregatedList, error) { +// Do executes the "compute.instanceGroups.setNamedPorts" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36422,7 +49284,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagerAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -36434,125 +49296,83 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Retrieves the list of managed instance groups and groups them by zone.", - // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.aggregatedList", + // "description": "Sets the named ports for the specified instance group.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.setNamedPorts", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instanceGroup" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", + // "instanceGroup": { + // "description": "The name of the instance group where the named ports are updated.", + // "location": "path", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone where the instance group is located.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/instanceGroupManagers", + // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", + // "request": { + // "$ref": "InstanceGroupsSetNamedPortsRequest" + // }, // "response": { - // "$ref": "InstanceGroupManagerAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.delete": +// method id "compute.instanceGroups.testIamPermissions": -type InstanceGroupManagersDeleteCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceGroupsTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified managed instance group and all of the -// instances in that group. Note that the instance group must not belong -// to a backend service. Read Deleting an instance group for more -// information. -func (r *InstanceGroupManagersService) Delete(project string, zone string, instanceGroupManager string) *InstanceGroupManagersDeleteCall { - c := &InstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InstanceGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceGroupsTestIamPermissionsCall { + c := &InstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceGroupManagersDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteCall { +func (c *InstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceGroupsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -36560,48 +49380,53 @@ func (c *InstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *InstanceGroupManagersDeleteCall { +func (c *InstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *InstanceGroupsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersDeleteCall) Header() http.Header { +func (c *InstanceGroupsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceGroups.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36620,7 +49445,7 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -36632,21 +49457,15 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified managed instance group and all of the instances in that group. Note that the instance group must not belong to a backend service. Read Deleting an instance group for more information.", - // "httpMethod": "DELETE", - // "id": "compute.instanceGroupManagers.delete", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.instanceGroups.testIamPermissions", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "resource" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group to delete.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -36654,65 +49473,58 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.deleteInstances": +// method id "compute.instanceTemplates.delete": -type InstanceGroupManagersDeleteInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceTemplatesDeleteCall struct { + s *Service + project string + instanceTemplate string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteInstances: Schedules a group action to delete the specified -// instances in the managed instance group. The instances are also -// removed from any target pools of which they were a member. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you delete. This operation is marked as DONE -// when the action is scheduled even if the instances are still being -// deleted. You must separately verify the status of the deleting action -// with the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *InstanceGroupManagersService) DeleteInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersdeleteinstancesrequest *InstanceGroupManagersDeleteInstancesRequest) *InstanceGroupManagersDeleteInstancesCall { - c := &InstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified instance template. If you delete an +// instance template that is being referenced from another instance +// group, the instance group will not be able to create or recreate +// virtual machine instances. Deleting an instance template is permanent +// and cannot be undone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/delete +func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { + c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersdeleteinstancesrequest = instancegroupmanagersdeleteinstancesrequest + c.instanceTemplate = instanceTemplate return c } @@ -36730,7 +49542,7 @@ func (r *InstanceGroupManagersService) DeleteInstances(project string, zone stri // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *InstanceGroupManagersDeleteInstancesCall { +func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTemplatesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -36738,7 +49550,7 @@ func (c *InstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) * // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersDeleteInstancesCall { +func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -36746,53 +49558,47 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *InstanceGroupManagersDeleteInstancesCall { +func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemplatesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersDeleteInstancesCall) Header() http.Header { +func (c *InstanceTemplatesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "instanceTemplate": c.instanceTemplate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.deleteInstances" call. +// Do executes the "compute.instanceTemplates.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36823,18 +49629,18 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti } return ret, nil // { - // "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.deleteInstances", + // "description": "Deletes the specified instance template. If you delete an instance template that is being referenced from another instance group, the instance group will not be able to create or recreate virtual machine instances. Deleting an instance template is permanent and cannot be undone.", + // "httpMethod": "DELETE", + // "id": "compute.instanceTemplates.delete", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "instanceTemplate" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instanceTemplate": { + // "description": "The name of the instance template to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -36849,18 +49655,9 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", - // "request": { - // "$ref": "InstanceGroupManagersDeleteInstancesRequest" - // }, + // "path": "{project}/global/instanceTemplates/{instanceTemplate}", // "response": { // "$ref": "Operation" // }, @@ -36872,34 +49669,32 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti } -// method id "compute.instanceGroupManagers.get": +// method id "compute.instanceTemplates.get": -type InstanceGroupManagersGetCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstanceTemplatesGetCall struct { + s *Service + project string + instanceTemplate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns all of the details about the specified managed instance -// group. Get a list of available managed instance groups by making a -// list() request. -func (r *InstanceGroupManagersService) Get(project string, zone string, instanceGroupManager string) *InstanceGroupManagersGetCall { - c := &InstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified instance template. Get a list of available +// instance templates by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/get +func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { + c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager + c.instanceTemplate = instanceTemplate return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGroupManagersGetCall { +func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -36909,7 +49704,7 @@ func (c *InstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *InstanceGro // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGroupManagersGetCall { +func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -36917,21 +49712,21 @@ func (c *InstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *InstanceGr // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGroupManagersGetCall { +func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplatesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersGetCall) Header() http.Header { +func (c *InstanceTemplatesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -36942,26 +49737,25 @@ func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "instanceTemplate": c.instanceTemplate, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.get" call. -// Exactly one of *InstanceGroupManager or error will be non-nil. Any +// Do executes the "compute.instanceTemplates.get" call. +// Exactly one of *InstanceTemplate or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *InstanceGroupManager.ServerResponse.Header or (if a response was +// *InstanceTemplate.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { +func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -36980,7 +49774,7 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManager{ + ret := &InstanceTemplate{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -36992,18 +49786,18 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan } return ret, nil // { - // "description": "Returns all of the details about the specified managed instance group. Get a list of available managed instance groups by making a list() request.", + // "description": "Returns the specified instance template. Get a list of available instance templates by making a list() request.", // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.get", + // "id": "compute.instanceTemplates.get", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "instanceTemplate" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instanceTemplate": { + // "description": "The name of the instance template.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -37013,17 +49807,11 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/global/instanceTemplates/{instanceTemplate}", // "response": { - // "$ref": "InstanceGroupManager" + // "$ref": "InstanceTemplate" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -37034,33 +49822,27 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan } -// method id "compute.instanceGroupManagers.insert": +// method id "compute.instanceTemplates.insert": -type InstanceGroupManagersInsertCall struct { - s *Service - project string - zone string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceTemplatesInsertCall struct { + s *Service + project string + instancetemplate *InstanceTemplate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a managed instance group using the information that -// you specify in the request. After the group is created, it schedules -// an action to create instances in the group using the specified -// instance template. This operation is marked as DONE when the group is -// created even if the instances in the group have not yet been created. -// You must separately verify the status of the individual instances -// with the listmanagedinstances method. -// -// A managed instance group can have up to 1000 VM instances per group. -// Please contact Cloud Support if you need an increase in this limit. -func (r *InstanceGroupManagersService) Insert(project string, zone string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersInsertCall { - c := &InstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an instance template in the specified project using +// the data that is included in the request. If you are creating a new +// template to update an existing instance group, your new instance +// template must use the same network or, if applicable, the same +// subnetwork as the original template. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/insert +func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { + c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instancegroupmanager = instancegroupmanager + c.instancetemplate = instancetemplate return c } @@ -37078,7 +49860,7 @@ func (r *InstanceGroupManagersService) Insert(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceGroupManagersInsertCall { +func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTemplatesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -37086,7 +49868,7 @@ func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *InstanceGroupManagersInsertCall { +func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -37094,52 +49876,51 @@ func (c *InstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *InstanceGroupManagersInsertCall { +func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemplatesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersInsertCall) Header() http.Header { +func (c *InstanceTemplatesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.insert" call. +// Do executes the "compute.instanceTemplates.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37170,12 +49951,11 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA managed instance group can have up to 1000 VM instances per group. Please contact Cloud Support if you need an increase in this limit.", + // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.insert", + // "id": "compute.instanceTemplates.insert", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "project": { @@ -37189,17 +49969,11 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers", + // "path": "{project}/global/instanceTemplates", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "InstanceTemplate" // }, // "response": { // "$ref": "Operation" @@ -37212,24 +49986,23 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroupManagers.list": +// method id "compute.instanceTemplates.list": -type InstanceGroupManagersListCall struct { +type InstanceTemplatesListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of managed instance groups that are contained +// List: Retrieves a list of instance templates that are contained // within the specified project and zone. -func (r *InstanceGroupManagersService) List(project string, zone string) *InstanceGroupManagersListCall { - c := &InstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/list +func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { + c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -37259,7 +50032,7 @@ func (r *InstanceGroupManagersService) List(project string, zone string) *Instan // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { +func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { c.urlParams_.Set("filter", filter) return c } @@ -37270,7 +50043,7 @@ func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupMana // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGroupManagersListCall { +func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -37287,7 +50060,7 @@ func (c *InstanceGroupManagersListCall) MaxResults(maxResults int64) *InstanceGr // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstanceGroupManagersListCall) OrderBy(orderBy string) *InstanceGroupManagersListCall { +func (c *InstanceTemplatesListCall) OrderBy(orderBy string) *InstanceTemplatesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -37295,7 +50068,7 @@ func (c *InstanceGroupManagersListCall) OrderBy(orderBy string) *InstanceGroupMa // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGroupManagersListCall { +func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -37303,7 +50076,7 @@ func (c *InstanceGroupManagersListCall) PageToken(pageToken string) *InstanceGro // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListCall { +func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -37313,7 +50086,7 @@ func (c *InstanceGroupManagersListCall) Fields(s ...googleapi.Field) *InstanceGr // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceGroupManagersListCall { +func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTemplatesListCall { c.ifNoneMatch_ = entityTag return c } @@ -37321,21 +50094,21 @@ func (c *InstanceGroupManagersListCall) IfNoneMatch(entityTag string) *InstanceG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGroupManagersListCall { +func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTemplatesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersListCall) Header() http.Header { +func (c *InstanceTemplatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -37346,25 +50119,24 @@ func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.list" call. -// Exactly one of *InstanceGroupManagerList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupManagerList.ServerResponse.Header or (if a response was +// Do executes the "compute.instanceTemplates.list" call. +// Exactly one of *InstanceTemplateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceTemplateList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagerList, error) { +func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37383,7 +50155,7 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManagerList{ + ret := &InstanceTemplateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -37395,12 +50167,11 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta } return ret, nil // { - // "description": "Retrieves a list of managed instance groups that are contained within the specified project and zone.", + // "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", // "httpMethod": "GET", - // "id": "compute.instanceGroupManagers.list", + // "id": "compute.instanceTemplates.list", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { @@ -37432,234 +50203,11 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers", - // "response": { - // "$ref": "InstanceGroupManagerList" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersListCall) Pages(ctx context.Context, f func(*InstanceGroupManagerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroupManagers.listManagedInstances": - -type InstanceGroupManagersListManagedInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// ListManagedInstances: Lists all of the instances in the managed -// instance group. Each instance in the list has a currentAction, which -// indicates the action that the managed instance group is performing on -// the instance. For example, if the group is still creating an -// instance, the currentAction is CREATING. If a previous action failed, -// the list displays the errors for that failed action. -func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone string, instanceGroupManager string) *InstanceGroupManagersListManagedInstancesCall { - c := &InstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - return c -} - -// Filter sets the optional parameter "filter": -func (c *InstanceGroupManagersListManagedInstancesCall) Filter(filter string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": -func (c *InstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "order_by": -func (c *InstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("order_by", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": -func (c *InstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *InstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *InstanceGroupManagersListManagedInstancesCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *InstanceGroupManagersListManagedInstancesCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroupManagers.listManagedInstances" call. -// Exactly one of *InstanceGroupManagersListManagedInstancesResponse or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *InstanceGroupManagersListManagedInstancesResponse.ServerResponse.Head -// er or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManagersListManagedInstancesResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &InstanceGroupManagersListManagedInstancesResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.listManagedInstances", - // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager" - // ], - // "parameters": { - // "filter": { - // "location": "query", - // "type": "string" - // }, - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "order_by": { - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "path": "{project}/global/instanceTemplates", // "response": { - // "$ref": "InstanceGroupManagersListManagedInstancesResponse" + // "$ref": "InstanceTemplateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -37673,7 +50221,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupManagersListManagedInstancesResponse) error) error { +func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -37691,58 +50239,32 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Contex } } -// method id "compute.instanceGroupManagers.patch": +// method id "compute.instanceTemplates.testIamPermissions": -type InstanceGroupManagersPatchCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstanceTemplatesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is patched even if the instances in the group are still in the -// process of being patched. You must separately verify the status of -// the individual instances with the listManagedInstances method. This -// method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. -func (r *InstanceGroupManagersService) Patch(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersPatchCall { - c := &InstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InstanceTemplatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceTemplatesTestIamPermissionsCall { + c := &InstanceTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersPatchCall) RequestId(requestId string) *InstanceGroupManagersPatchCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceGroupManagersPatchCall { +func (c *InstanceTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceTemplatesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -37750,53 +50272,52 @@ func (c *InstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *InstanceG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersPatchCall) Context(ctx context.Context) *InstanceGroupManagersPatchCall { +func (c *InstanceTemplatesTestIamPermissionsCall) Context(ctx context.Context) *InstanceTemplatesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersPatchCall) Header() http.Header { +func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instanceTemplates.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -37815,7 +50336,7 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -37827,21 +50348,14 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listManagedInstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.instanceGroupManagers.patch", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.instanceTemplates.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroupManager" + // "resource" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -37849,24 +50363,20 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/global/instanceTemplates/{resource}/testIamPermissions", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -37877,40 +50387,29 @@ func (c *InstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instanceGroupManagers.recreateInstances": +// method id "compute.instances.addAccessConfig": -type InstanceGroupManagersRecreateInstancesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesAddAccessConfigCall struct { + s *Service + project string + zone string + instance string + accessconfig *AccessConfig + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RecreateInstances: Schedules a group action to recreate the specified -// instances in the managed instance group. The instances are deleted -// and recreated using the current instance template for the managed -// instance group. This operation is marked as DONE when the action is -// scheduled even if the instances have not yet been recreated. You must -// separately verify the status of the recreating action with the -// listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *InstanceGroupManagersService) RecreateInstances(project string, zone string, instanceGroupManager string, instancegroupmanagersrecreateinstancesrequest *InstanceGroupManagersRecreateInstancesRequest) *InstanceGroupManagersRecreateInstancesCall { - c := &InstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddAccessConfig: Adds an access config to an instance's network +// interface. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/addAccessConfig +func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { + c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersrecreateinstancesrequest = instancegroupmanagersrecreateinstancesrequest + c.instance = instance + c.urlParams_.Set("networkInterface", networkInterface) + c.accessconfig = accessconfig return c } @@ -37928,7 +50427,7 @@ func (r *InstanceGroupManagersService) RecreateInstances(project string, zone st // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstancesAddAccessConfigCall) RequestId(requestId string) *InstancesAddAccessConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -37936,7 +50435,7 @@ func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -37944,53 +50443,53 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *InstanceGroupManagersRecreateInstancesCall { +func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAddAccessConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersRecreateInstancesCall) Header() http.Header { +func (c *InstancesAddAccessConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.recreateInstances" call. +// Do executes the "compute.instances.addAccessConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38021,18 +50520,26 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Adds an access config to an instance's network interface.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.recreateInstances", + // "id": "compute.instances.addAccessConfig", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance", + // "networkInterface" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "The instance name for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface to add to this instance.", + // "location": "query", // "required": true, // "type": "string" // }, @@ -38049,15 +50556,16 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", + // "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", // "request": { - // "$ref": "InstanceGroupManagersRecreateInstancesRequest" + // "$ref": "AccessConfig" // }, // "response": { // "$ref": "Operation" @@ -38070,110 +50578,156 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp } -// method id "compute.instanceGroupManagers.resize": +// method id "compute.instances.aggregatedList": -type InstanceGroupManagersResizeCall struct { - s *Service - project string - zone string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Resize: Resizes the managed instance group. If you increase the size, -// the group creates new instances using the current instance template. -// If you decrease the size, the group deletes instances. The resize -// operation is marked DONE when the resize actions are scheduled even -// if the group has not yet added or deleted any instances. You must -// separately verify the status of the creating or deleting actions with -// the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or deleted. -func (r *InstanceGroupManagersService) Resize(project string, zone string, instanceGroupManager string, size int64) *InstanceGroupManagersResizeCall { - c := &InstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves aggregated list of instances. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/aggregatedList +func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { + c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.urlParams_.Set("size", fmt.Sprint(size)) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceGroupManagersResizeCall { - c.urlParams_.Set("requestId", requestId) +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstancesAggregatedListCall) OrderBy(orderBy string) *InstancesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeCall { +func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *InstanceGroupManagersResizeCall { +func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersResizeCall) Header() http.Header { +func (c *InstancesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.resize" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.aggregatedList" call. +// Exactly one of *InstanceAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38192,7 +50746,7 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -38204,94 +50758,102 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Resizes the managed instance group. If you increase the size, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.resize", + // "description": "Retrieves aggregated list of instances.", + // "httpMethod": "GET", + // "id": "compute.instances.aggregatedList", // "parameterOrder": [ - // "project", - // "zone", - // "instanceGroupManager", - // "size" + // "project" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" // }, - // "size": { - // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", - // "format": "int32", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", - // "required": true, - // "type": "integer" + // "type": "string" // }, - // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize", + // "path": "{project}/aggregated/instances", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroupManagers.resizeAdvanced": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstanceGroupManagersResizeAdvancedCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagersresizeadvancedrequest *InstanceGroupManagersResizeAdvancedRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.instances.attachDisk": + +type InstancesAttachDiskCall struct { + s *Service + project string + zone string + instance string + attacheddisk *AttachedDisk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ResizeAdvanced: Resizes the managed instance group with advanced -// configuration options like disabling creation retries. This is an -// extended version of the resize method. -// -// If you increase the size of the instance group, the group creates new -// instances using the current instance template. If you decrease the -// size, the group deletes instances. The resize operation is marked -// DONE when the resize actions are scheduled even if the group has not -// yet added or deleted any instances. You must separately verify the -// status of the creating, creatingWithoutRetries, or deleting actions -// with the get or listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or deleted. -func (r *InstanceGroupManagersService) ResizeAdvanced(project string, zone string, instanceGroupManager string, instancegroupmanagersresizeadvancedrequest *InstanceGroupManagersResizeAdvancedRequest) *InstanceGroupManagersResizeAdvancedCall { - c := &InstanceGroupManagersResizeAdvancedCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AttachDisk: Attaches an existing Disk resource to an instance. You +// must first create the disk before you can attach it. It is not +// possible to create and attach a disk at the same time. For more +// information, read Adding a persistent disk to your instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/attachDisk +func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { + c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagersresizeadvancedrequest = instancegroupmanagersresizeadvancedrequest + c.instance = instance + c.attacheddisk = attacheddisk return c } @@ -38309,7 +50871,7 @@ func (r *InstanceGroupManagersService) ResizeAdvanced(project string, zone strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersResizeAdvancedCall) RequestId(requestId string) *InstanceGroupManagersResizeAdvancedCall { +func (c *InstancesAttachDiskCall) RequestId(requestId string) *InstancesAttachDiskCall { c.urlParams_.Set("requestId", requestId) return c } @@ -38317,7 +50879,7 @@ func (c *InstanceGroupManagersResizeAdvancedCall) RequestId(requestId string) *I // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersResizeAdvancedCall) Fields(s ...googleapi.Field) *InstanceGroupManagersResizeAdvancedCall { +func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -38325,53 +50887,53 @@ func (c *InstanceGroupManagersResizeAdvancedCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersResizeAdvancedCall) Context(ctx context.Context) *InstanceGroupManagersResizeAdvancedCall { +func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachDiskCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersResizeAdvancedCall) Header() http.Header { +func (c *InstancesAttachDiskCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersResizeAdvancedCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersresizeadvancedrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.resizeAdvanced" call. +// Do executes the "compute.instances.attachDisk" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38402,18 +50964,19 @@ func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Resizes the managed instance group with advanced configuration options like disabling creation retries. This is an extended version of the resize method.\n\nIf you increase the size of the instance group, the group creates new instances using the current instance template. If you decrease the size, the group deletes instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating, creatingWithoutRetries, or deleting actions with the get or listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.resizeAdvanced", + // "id": "compute.instances.attachDisk", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "instance": { + // "description": "The instance name for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -38430,15 +50993,16 @@ func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOptio // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resizeAdvanced", + // "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", // "request": { - // "$ref": "InstanceGroupManagersResizeAdvancedRequest" + // "$ref": "AttachedDisk" // }, // "response": { // "$ref": "Operation" @@ -38451,26 +51015,26 @@ func (c *InstanceGroupManagersResizeAdvancedCall) Do(opts ...googleapi.CallOptio } -// method id "compute.instanceGroupManagers.setAutoHealingPolicies": +// method id "compute.instances.delete": -type InstanceGroupManagersSetAutoHealingPoliciesCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssetautohealingrequest *InstanceGroupManagersSetAutoHealingRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesDeleteCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetAutoHealingPolicies: Modifies the autohealing policies. -func (r *InstanceGroupManagersService) SetAutoHealingPolicies(project string, zone string, instanceGroupManager string, instancegroupmanagerssetautohealingrequest *InstanceGroupManagersSetAutoHealingRequest) *InstanceGroupManagersSetAutoHealingPoliciesCall { - c := &InstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Instance resource. For more +// information, see Stopping or Deleting an Instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete +func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { + c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssetautohealingrequest = instancegroupmanagerssetautohealingrequest + c.instance = instance return c } @@ -38488,7 +51052,7 @@ func (r *InstanceGroupManagersService) SetAutoHealingPolicies(project string, zo // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *InstanceGroupManagersSetAutoHealingPoliciesCall { +func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -38496,7 +51060,7 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId st // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetAutoHealingPoliciesCall { +func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -38504,53 +51068,48 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi. // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *InstanceGroupManagersSetAutoHealingPoliciesCall { +func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { +func (c *InstancesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetautohealingrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setAutoHealingPolicies" call. +// Do executes the "compute.instances.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38581,18 +51140,19 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Modifies the autohealing policies.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setAutoHealingPolicies", + // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", + // "httpMethod": "DELETE", + // "id": "compute.instances.delete", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", + // "instance": { + // "description": "Name of the instance resource to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -38609,16 +51169,14 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.C // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", - // "request": { - // "$ref": "InstanceGroupManagersSetAutoHealingRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}", // "response": { // "$ref": "Operation" // }, @@ -38630,28 +51188,28 @@ func (c *InstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.C } -// method id "compute.instanceGroupManagers.setInstanceTemplate": +// method id "compute.instances.deleteAccessConfig": -type InstanceGroupManagersSetInstanceTemplateCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesDeleteAccessConfigCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetInstanceTemplate: Specifies the instance template to use when -// creating new instances in this group. The templates for existing -// instances in the group do not change unless you recreate them. -func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone string, instanceGroupManager string, instancegroupmanagerssetinstancetemplaterequest *InstanceGroupManagersSetInstanceTemplateRequest) *InstanceGroupManagersSetInstanceTemplateCall { - c := &InstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DeleteAccessConfig: Deletes an access config from an instance's +// network interface. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig +func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { + c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssetinstancetemplaterequest = instancegroupmanagerssetinstancetemplaterequest + c.instance = instance + c.urlParams_.Set("accessConfig", accessConfig) + c.urlParams_.Set("networkInterface", networkInterface) return c } @@ -38669,7 +51227,7 @@ func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *InstancesDeleteAccessConfigCall { c.urlParams_.Set("requestId", requestId) return c } @@ -38677,7 +51235,7 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId strin // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -38685,53 +51243,48 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Fie // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *InstanceGroupManagersSetInstanceTemplateCall { +func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { +func (c *InstancesDeleteAccessConfigCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. +// Do executes the "compute.instances.deleteAccessConfig" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38762,18 +51315,33 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call } return ret, nil // { - // "description": "Specifies the instance template to use when creating new instances in this group. The templates for existing instances in the group do not change unless you recreate them.", + // "description": "Deletes an access config from an instance's network interface.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setInstanceTemplate", + // "id": "compute.instances.deleteAccessConfig", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance", + // "accessConfig", + // "networkInterface" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "accessConfig": { + // "description": "The name of the access config to delete.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "The instance name for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "networkInterface": { + // "description": "The name of the network interface.", + // "location": "query", // "required": true, // "type": "string" // }, @@ -38790,16 +51358,14 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", - // "request": { - // "$ref": "InstanceGroupManagersSetInstanceTemplateRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", // "response": { // "$ref": "Operation" // }, @@ -38811,32 +51377,26 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call } -// method id "compute.instanceGroupManagers.setTargetPools": +// method id "compute.instances.detachDisk": -type InstanceGroupManagersSetTargetPoolsCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesDetachDiskCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTargetPools: Modifies the target pools to which all instances in -// this managed instance group are assigned. The target pools -// automatically apply to all of the instances in the managed instance -// group. This operation is marked DONE when you make the request even -// if the instances have not yet been added to their target pools. The -// change might take some time to apply to all of the instances in the -// group depending on the size of the group. -func (r *InstanceGroupManagersService) SetTargetPools(project string, zone string, instanceGroupManager string, instancegroupmanagerssettargetpoolsrequest *InstanceGroupManagersSetTargetPoolsRequest) *InstanceGroupManagersSetTargetPoolsCall { - c := &InstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DetachDisk: Detaches a disk from an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/detachDisk +func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { + c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanagerssettargetpoolsrequest = instancegroupmanagerssettargetpoolsrequest + c.instance = instance + c.urlParams_.Set("deviceName", deviceName) return c } @@ -38854,7 +51414,7 @@ func (r *InstanceGroupManagersService) SetTargetPools(project string, zone strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstancesDetachDiskCall) RequestId(requestId string) *InstancesDetachDiskCall { c.urlParams_.Set("requestId", requestId) return c } @@ -38862,7 +51422,7 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *I // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -38870,53 +51430,48 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *InstanceGroupManagersSetTargetPoolsCall { +func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachDiskCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersSetTargetPoolsCall) Header() http.Header { +func (c *InstancesDetachDiskCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.setTargetPools" call. +// Do executes the "compute.instances.detachDisk" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -38947,18 +51502,26 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Modifies the target pools to which all instances in this managed instance group are assigned. The target pools automatically apply to all of the instances in the managed instance group. This operation is marked DONE when you make the request even if the instances have not yet been added to their target pools. The change might take some time to apply to all of the instances in the group depending on the size of the group.", + // "description": "Detaches a disk from an instance.", // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.setTargetPools", + // "id": "compute.instances.detachDisk", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance", + // "deviceName" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "deviceName": { + // "description": "Disk device name to detach.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "Instance name.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -38975,16 +51538,14 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the managed instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", - // "request": { - // "$ref": "InstanceGroupManagersSetTargetPoolsRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", // "response": { // "$ref": "Operation" // }, @@ -38992,92 +51553,100 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute" // ] - // } - -} - -// method id "compute.instanceGroupManagers.testIamPermissions": - -type InstanceGroupManagersTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header + // } + } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstanceGroupManagersService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceGroupManagersTestIamPermissionsCall { - c := &InstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// method id "compute.instances.get": + +type InstancesGetCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified Instance resource. Get a list of available +// instances by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/get +func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { + c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.instance = instance return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceGroupManagersTestIamPermissionsCall { +func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *InstanceGroupManagersTestIamPermissionsCall { +func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersTestIamPermissionsCall) Header() http.Header { +func (c *InstancesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "resource": c.resource, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instances.get" call. +// Exactly one of *Instance or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Instance.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39096,7 +51665,7 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallO if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Instance{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -39108,26 +51677,26 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.instanceGroupManagers.testIamPermissions", + // "description": "Returns the specified Instance resource. Get a list of available instances by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.instances.get", // "parameterOrder": [ // "project", // "zone", - // "resource" + // "instance" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "instance": { + // "description": "Name of the instance resource to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, @@ -39139,12 +51708,9 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallO // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Instance" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -39155,110 +51721,114 @@ func (c *InstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallO } -// method id "compute.instanceGroupManagers.update": +// method id "compute.instances.getSerialPortOutput": -type InstanceGroupManagersUpdateCall struct { - s *Service - project string - zone string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesGetSerialPortOutputCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is updated even if the instances in the group have not yet been -// updated. You must separately verify the status of the individual -// instances with the listManagedInstances method. -func (r *InstanceGroupManagersService) Update(project string, zone string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *InstanceGroupManagersUpdateCall { - c := &InstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetSerialPortOutput: Returns the specified instance's serial port +// output. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/getSerialPortOutput +func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { + c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager + c.instance = instance return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupManagersUpdateCall) RequestId(requestId string) *InstanceGroupManagersUpdateCall { - c.urlParams_.Set("requestId", requestId) +// Port sets the optional parameter "port": Specifies which COM or +// serial port to retrieve data from. +func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("port", fmt.Sprint(port)) + return c +} + +// Start sets the optional parameter "start": Returns output starting +// from a specific byte position. Use this to page through output when +// the output is too large to return in a single request. For the +// initial request, leave this field unspecified. For subsequent calls, +// this field should be set to the next value returned in the previous +// call. +func (c *InstancesGetSerialPortOutputCall) Start(start int64) *InstancesGetSerialPortOutputCall { + c.urlParams_.Set("start", fmt.Sprint(start)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *InstanceGroupManagersUpdateCall { +func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *InstancesGetSerialPortOutputCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupManagersUpdateCall) Context(ctx context.Context) *InstanceGroupManagersUpdateCall { +func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *InstancesGetSerialPortOutputCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupManagersUpdateCall) Header() http.Header { +func (c *InstancesGetSerialPortOutputCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroupManagers.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.getSerialPortOutput" call. +// Exactly one of *SerialPortOutput or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SerialPortOutput.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*SerialPortOutput, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39277,7 +51847,7 @@ func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Ope if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &SerialPortOutput{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -39289,21 +51859,31 @@ func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listManagedInstances method.", - // "httpMethod": "PUT", - // "id": "compute.instanceGroupManagers.update", + // "description": "Returns the specified instance's serial port output.", + // "httpMethod": "GET", + // "id": "compute.instances.getSerialPortOutput", // "parameterOrder": [ // "project", // "zone", - // "instanceGroupManager" + // "instance" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, + // "port": { + // "default": "1", + // "description": "Specifies which COM or serial port to retrieve data from.", + // "format": "int32", + // "location": "query", + // "maximum": "4", + // "minimum": "1", + // "type": "integer" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -39311,55 +51891,53 @@ func (c *InstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "start": { + // "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", + // "format": "int64", // "location": "query", // "type": "string" // }, // "zone": { - // "description": "The name of the zone where you want to create the managed instance group.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", - // "request": { - // "$ref": "InstanceGroupManager" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/serialPort", // "response": { - // "$ref": "Operation" + // "$ref": "SerialPortOutput" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroups.addInstances": +// method id "compute.instances.insert": -type InstanceGroupsAddInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesInsertCall struct { + s *Service + project string + zone string + instance *Instance + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddInstances: Adds a list of instances to the specified instance -// group. All of the instances in the instance group must be in the same -// network/subnetwork. Read Adding instances for more information. -func (r *InstanceGroupsService) AddInstances(project string, zone string, instanceGroup string, instancegroupsaddinstancesrequest *InstanceGroupsAddInstancesRequest) *InstanceGroupsAddInstancesCall { - c := &InstanceGroupsAddInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an instance resource in the specified project using +// the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/insert +func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { + c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupsaddinstancesrequest = instancegroupsaddinstancesrequest + c.instance = instance return c } @@ -39377,7 +51955,7 @@ func (r *InstanceGroupsService) AddInstances(project string, zone string, instan // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGroupsAddInstancesCall { +func (c *InstancesInsertCall) RequestId(requestId string) *InstancesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -39385,7 +51963,7 @@ func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGr // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsAddInstancesCall { +func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39393,53 +51971,52 @@ func (c *InstanceGroupsAddInstancesCall) Fields(s ...googleapi.Field) *InstanceG // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceGroupsAddInstancesCall { +func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsAddInstancesCall) Header() http.Header { +func (c *InstancesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.addInstances" call. +// Do executes the "compute.instances.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39470,21 +52047,14 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Adds a list of instances to the specified instance group. All of the instances in the instance group must be in the same network/subnetwork. Read Adding instances for more information.", + // "description": "Creates an instance resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.addInstances", + // "id": "compute.instances.insert", // "parameterOrder": [ // "project", - // "zone", - // "instanceGroup" + // "zone" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where you are adding instances.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -39498,15 +52068,16 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances", + // "path": "{project}/zones/{zone}/instances", // "request": { - // "$ref": "InstanceGroupsAddInstancesRequest" + // "$ref": "Instance" // }, // "response": { // "$ref": "Operation" @@ -39519,22 +52090,25 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instanceGroups.aggregatedList": +// method id "compute.instances.list": -type InstanceGroupsAggregatedListCall struct { +type InstancesListCall struct { s *Service project string + zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves the list of instance groups and sorts them -// by zone. -func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAggregatedListCall { - c := &InstanceGroupsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of instances contained within the specified +// zone. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/list +func (r *InstancesService) List(project string, zone string) *InstancesListCall { + c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone return c } @@ -39564,7 +52138,7 @@ func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAg // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { +func (c *InstancesListCall) Filter(filter string) *InstancesListCall { c.urlParams_.Set("filter", filter) return c } @@ -39575,7 +52149,7 @@ func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroups // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *InstanceGroupsAggregatedListCall { +func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -39592,7 +52166,7 @@ func (c *InstanceGroupsAggregatedListCall) MaxResults(maxResults int64) *Instanc // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstanceGroupsAggregatedListCall) OrderBy(orderBy string) *InstanceGroupsAggregatedListCall { +func (c *InstancesListCall) OrderBy(orderBy string) *InstancesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -39600,7 +52174,7 @@ func (c *InstanceGroupsAggregatedListCall) OrderBy(orderBy string) *InstanceGrou // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *InstanceGroupsAggregatedListCall { +func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -39608,7 +52182,7 @@ func (c *InstanceGroupsAggregatedListCall) PageToken(pageToken string) *Instance // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *InstanceGroupsAggregatedListCall { +func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -39618,7 +52192,7 @@ func (c *InstanceGroupsAggregatedListCall) Fields(s ...googleapi.Field) *Instanc // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *InstanceGroupsAggregatedListCall { +func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { c.ifNoneMatch_ = entityTag return c } @@ -39626,21 +52200,21 @@ func (c *InstanceGroupsAggregatedListCall) IfNoneMatch(entityTag string) *Instan // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *InstanceGroupsAggregatedListCall { +func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsAggregatedListCall) Header() http.Header { +func (c *InstancesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -39651,24 +52225,25 @@ func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.aggregatedList" call. -// Exactly one of *InstanceGroupAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupAggregatedList, error) { +// Do executes the "compute.instances.list" call. +// Exactly one of *InstanceList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *InstanceList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39687,7 +52262,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupAggregatedList{ + ret := &InstanceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -39699,11 +52274,12 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In } return ret, nil // { - // "description": "Retrieves the list of instance groups and sorts them by zone.", + // "description": "Retrieves the list of instances contained within the specified zone.", // "httpMethod": "GET", - // "id": "compute.instanceGroups.aggregatedList", + // "id": "compute.instances.list", // "parameterOrder": [ - // "project" + // "project", + // "zone" // ], // "parameters": { // "filter": { @@ -39735,11 +52311,18 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/instanceGroups", + // "path": "{project}/zones/{zone}/instances", // "response": { - // "$ref": "InstanceGroupAggregatedList" + // "$ref": "InstanceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -39753,7 +52336,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*InstanceGroupAggregatedList) error) error { +func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -39771,102 +52354,162 @@ func (c *InstanceGroupsAggregatedListCall) Pages(ctx context.Context, f func(*In } } -// method id "compute.instanceGroups.delete": +// method id "compute.instances.listReferrers": -type InstanceGroupsDeleteCall struct { - s *Service - project string - zone string - instanceGroup string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesListReferrersCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified instance group. The instances in the -// group are not deleted. Note that instance group must not belong to a -// backend service. Read Deleting an instance group for more -// information. -func (r *InstanceGroupsService) Delete(project string, zone string, instanceGroup string) *InstanceGroupsDeleteCall { - c := &InstanceGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListReferrers: Retrieves the list of referrers to instances contained +// within the specified zone. +func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { + c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup + c.instance = instance return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsDeleteCall) RequestId(requestId string) *InstanceGroupsDeleteCall { - c.urlParams_.Set("requestId", requestId) +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferrersCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InstancesListReferrersCall) MaxResults(maxResults int64) *InstancesListReferrersCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InstancesListReferrersCall) OrderBy(orderBy string) *InstancesListReferrersCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListReferrersCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsDeleteCall) Fields(s ...googleapi.Field) *InstanceGroupsDeleteCall { +func (c *InstancesListReferrersCall) Fields(s ...googleapi.Field) *InstancesListReferrersCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstancesListReferrersCall) IfNoneMatch(entityTag string) *InstancesListReferrersCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsDeleteCall { +func (c *InstancesListReferrersCall) Context(ctx context.Context) *InstancesListReferrersCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsDeleteCall) Header() http.Header { +func (c *InstancesListReferrersCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/referrers") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.instances.listReferrers" call. +// Exactly one of *InstanceListReferrers or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceListReferrers.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*InstanceListReferrers, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -39885,7 +52528,7 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceListReferrers{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -39897,21 +52540,45 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Deletes the specified instance group. The instances in the group are not deleted. Note that instance group must not belong to a backend service. Read Deleting an instance group for more information.", - // "httpMethod": "DELETE", - // "id": "compute.instanceGroups.delete", + // "description": "Retrieves the list of referrers to instances contained within the specified zone.", + // "httpMethod": "GET", + // "id": "compute.instances.listReferrers", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instance" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group to delete.", + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "instance": { + // "description": "Name of the target instance scoping this request, or '-' if the request should span over all instances in the container.", // "location": "path", + // "pattern": "-|[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -39919,119 +52586,143 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "path": "{project}/zones/{zone}/instances/{instance}/referrers", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceListReferrers" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instanceGroups.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstancesListReferrersCall) Pages(ctx context.Context, f func(*InstanceListReferrers) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstanceGroupsGetCall struct { - s *Service - project string - zone string - instanceGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.instances.reset": + +type InstancesResetCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance group. Get a list of available -// instance groups by making a list() request. -func (r *InstanceGroupsService) Get(project string, zone string, instanceGroup string) *InstanceGroupsGetCall { - c := &InstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Reset: Performs a reset on the instance. For more information, see +// Resetting an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/reset +func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { + c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup + c.instance = instance + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesResetCall) RequestId(requestId string) *InstancesResetCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsGetCall) Fields(s ...googleapi.Field) *InstanceGroupsGetCall { +func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupsGetCall) IfNoneMatch(entityTag string) *InstanceGroupsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetCall { +func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsGetCall) Header() http.Header { +func (c *InstancesResetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.get" call. -// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx +// Do executes the "compute.instances.reset" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *InstanceGroup.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40050,7 +52741,7 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroup{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -40062,18 +52753,19 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup } return ret, nil // { - // "description": "Returns the specified instance group. Get a list of available instance groups by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instanceGroups.get", + // "description": "Performs a reset on the instance. For more information, see Resetting an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.reset", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instance" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -40084,45 +52776,53 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}", + // "path": "{project}/zones/{zone}/instances/{instance}/reset", // "response": { - // "$ref": "InstanceGroup" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceGroups.insert": +// method id "compute.instances.setDiskAutoDelete": -type InstanceGroupsInsertCall struct { - s *Service - project string - zone string - instancegroup *InstanceGroup - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetDiskAutoDeleteCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance group in the specified project using the -// parameters that are included in the request. -func (r *InstanceGroupsService) Insert(project string, zone string, instancegroup *InstanceGroup) *InstanceGroupsInsertCall { - c := &InstanceGroupsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to +// an instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setDiskAutoDelete +func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { + c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instancegroup = instancegroup + c.instance = instance + c.urlParams_.Set("autoDelete", fmt.Sprint(autoDelete)) + c.urlParams_.Set("deviceName", deviceName) return c } @@ -40140,7 +52840,7 @@ func (r *InstanceGroupsService) Insert(project string, zone string, instancegrou // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsInsertCall) RequestId(requestId string) *InstanceGroupsInsertCall { +func (c *InstancesSetDiskAutoDeleteCall) RequestId(requestId string) *InstancesSetDiskAutoDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -40148,7 +52848,7 @@ func (c *InstanceGroupsInsertCall) RequestId(requestId string) *InstanceGroupsIn // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsInsertCall { +func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -40156,52 +52856,48 @@ func (c *InstanceGroupsInsertCall) Fields(s ...googleapi.Field) *InstanceGroupsI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsInsertCall { +func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *InstancesSetDiskAutoDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsInsertCall) Header() http.Header { +func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.insert" call. +// Do executes the "compute.instances.setDiskAutoDelete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40232,14 +52928,37 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Creates an instance group in the specified project using the parameters that are included in the request.", + // "description": "Sets the auto-delete flag for a disk attached to an instance.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.insert", + // "id": "compute.instances.setDiskAutoDelete", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "instance", + // "autoDelete", + // "deviceName" // ], // "parameters": { + // "autoDelete": { + // "description": "Whether to auto-delete the disk when the instance is deleted.", + // "location": "query", + // "required": true, + // "type": "boolean" + // }, + // "deviceName": { + // "description": "The device name of the disk to modify.", + // "location": "query", + // "pattern": "\\w[\\w.-]{0,254}", + // "required": true, + // "type": "string" + // }, + // "instance": { + // "description": "The instance name.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -40253,16 +52972,14 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "zone": { - // "description": "The name of the zone where you want to create the instance group.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups", - // "request": { - // "$ref": "InstanceGroup" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", // "response": { // "$ref": "Operation" // }, @@ -40274,159 +52991,107 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.instanceGroups.list": +// method id "compute.instances.setLabels": -type InstanceGroupsListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetLabelsCall struct { + s *Service + project string + zone string + instance string + instancessetlabelsrequest *InstancesSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of instance groups that are located in the -// specified project and zone. -func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroupsListCall { - c := &InstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetLabels: Sets labels on an instance. To learn more about labels, +// read the Labeling Resources documentation. +func (r *InstancesService) SetLabels(project string, zone string, instance string, instancessetlabelsrequest *InstancesSetLabelsRequest) *InstancesSetLabelsCall { + c := &InstancesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone + c.instance = instance + c.instancessetlabelsrequest = instancessetlabelsrequest return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupsListCall) MaxResults(maxResults int64) *InstanceGroupsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupsListCall) OrderBy(orderBy string) *InstanceGroupsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupsListCall) PageToken(pageToken string) *InstanceGroupsListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetLabelsCall) RequestId(requestId string) *InstancesSetLabelsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsListCall) Fields(s ...googleapi.Field) *InstanceGroupsListCall { +func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceGroupsListCall) IfNoneMatch(entityTag string) *InstanceGroupsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsListCall { +func (c *InstancesSetLabelsCall) Context(ctx context.Context) *InstancesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsListCall) Header() http.Header { +func (c *InstancesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setLabels") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.list" call. -// Exactly one of *InstanceGroupList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGroupList, error) { +// Do executes the "compute.instances.setLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40445,7 +53110,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -40457,35 +53122,20 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou } return ret, nil // { - // "description": "Retrieves the list of instance groups that are located in the specified project and zone.", - // "httpMethod": "GET", - // "id": "compute.instanceGroups.list", + // "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation.", + // "httpMethod": "POST", + // "id": "compute.instances.setLabels", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -40495,141 +53145,81 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups", + // "path": "{project}/zones/{zone}/instances/{instance}/setLabels", + // "request": { + // "$ref": "InstancesSetLabelsRequest" + // }, // "response": { - // "$ref": "InstanceGroupList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsListCall) Pages(ctx context.Context, f func(*InstanceGroupList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroups.listInstances": +// method id "compute.instances.setMachineResources": -type InstanceGroupsListInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetMachineResourcesCall struct { + s *Service + project string + zone string + instance string + instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// ListInstances: Lists the instances in the specified instance group. -func (r *InstanceGroupsService) ListInstances(project string, zone string, instanceGroup string, instancegroupslistinstancesrequest *InstanceGroupsListInstancesRequest) *InstanceGroupsListInstancesCall { - c := &InstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMachineResources: Changes the number and/or type of accelerator +// for a stopped instance to the values specified in the request. +func (r *InstancesService) SetMachineResources(project string, zone string, instance string, instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest) *InstancesSetMachineResourcesCall { + c := &InstancesSetMachineResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupslistinstancesrequest = instancegroupslistinstancesrequest - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceGroupsListInstancesCall) MaxResults(maxResults int64) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.instance = instance + c.instancessetmachineresourcesrequest = instancessetmachineresourcesrequest return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceGroupsListInstancesCall) OrderBy(orderBy string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceGroupsListInstancesCall) PageToken(pageToken string) *InstanceGroupsListInstancesCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *InstancesSetMachineResourcesCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsListInstancesCall { +func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *InstancesSetMachineResourcesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -40637,53 +53227,53 @@ func (c *InstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *InstanceGroupsListInstancesCall { +func (c *InstancesSetMachineResourcesCall) Context(ctx context.Context) *InstancesSetMachineResourcesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsListInstancesCall) Header() http.Header { +func (c *InstancesSetMachineResourcesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachineresourcesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineResources") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.listInstances" call. -// Exactly one of *InstanceGroupsListInstances or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *InstanceGroupsListInstances.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*InstanceGroupsListInstances, error) { +// Do executes the "compute.instances.setMachineResources" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40702,7 +53292,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupsListInstances{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -40714,44 +53304,22 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins } return ret, nil // { - // "description": "Lists the instances in the specified instance group.", + // "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.listInstances", + // "id": "compute.instances.setMachineResources", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instance" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "instanceGroup": { - // "description": "The name of the instance group from which you want to generate a list of included instances.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -40759,75 +53327,55 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances", + // "path": "{project}/zones/{zone}/instances/{instance}/setMachineResources", // "request": { - // "$ref": "InstanceGroupsListInstancesRequest" + // "$ref": "InstancesSetMachineResourcesRequest" // }, // "response": { - // "$ref": "InstanceGroupsListInstances" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*InstanceGroupsListInstances) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceGroups.removeInstances": +// method id "compute.instances.setMachineType": -type InstanceGroupsRemoveInstancesCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetMachineTypeCall struct { + s *Service + project string + zone string + instance string + instancessetmachinetyperequest *InstancesSetMachineTypeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// RemoveInstances: Removes one or more instances from the specified -// instance group, but does not delete those instances. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration before the VM instance is removed or deleted. -func (r *InstanceGroupsService) RemoveInstances(project string, zone string, instanceGroup string, instancegroupsremoveinstancesrequest *InstanceGroupsRemoveInstancesRequest) *InstanceGroupsRemoveInstancesCall { - c := &InstanceGroupsRemoveInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMachineType: Changes the machine type for a stopped instance to +// the machine type specified in the request. +func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { + c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupsremoveinstancesrequest = instancegroupsremoveinstancesrequest + c.instance = instance + c.instancessetmachinetyperequest = instancessetmachinetyperequest return c } @@ -40845,7 +53393,7 @@ func (r *InstanceGroupsService) RemoveInstances(project string, zone string, ins // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *InstanceGroupsRemoveInstancesCall { +func (c *InstancesSetMachineTypeCall) RequestId(requestId string) *InstancesSetMachineTypeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -40853,7 +53401,7 @@ func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *Instanc // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *InstanceGroupsRemoveInstancesCall { +func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSetMachineTypeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -40861,53 +53409,53 @@ func (c *InstanceGroupsRemoveInstancesCall) Fields(s ...googleapi.Field) *Instan // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *InstanceGroupsRemoveInstancesCall { +func (c *InstancesSetMachineTypeCall) Context(ctx context.Context) *InstancesSetMachineTypeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsRemoveInstancesCall) Header() http.Header { +func (c *InstancesSetMachineTypeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachinetyperequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.removeInstances" call. +// Do executes the "compute.instances.setMachineType" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -40938,18 +53486,19 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O } return ret, nil // { - // "description": "Removes one or more instances from the specified instance group, but does not delete those instances.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration before the VM instance is removed or deleted.", + // "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.removeInstances", + // "id": "compute.instances.setMachineType", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instance" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where the specified instances will be removed.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -40966,15 +53515,16 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances", + // "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", // "request": { - // "$ref": "InstanceGroupsRemoveInstancesRequest" + // "$ref": "InstancesSetMachineTypeRequest" // }, // "response": { // "$ref": "Operation" @@ -40987,26 +53537,28 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O } -// method id "compute.instanceGroups.setNamedPorts": +// method id "compute.instances.setMetadata": -type InstanceGroupsSetNamedPortsCall struct { - s *Service - project string - zone string - instanceGroup string - instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetMetadataCall struct { + s *Service + project string + zone string + instance string + metadata *Metadata + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetNamedPorts: Sets the named ports for the specified instance group. -func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, instanceGroup string, instancegroupssetnamedportsrequest *InstanceGroupsSetNamedPortsRequest) *InstanceGroupsSetNamedPortsCall { - c := &InstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMetadata: Sets metadata for the specified instance to the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setMetadata +func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { + c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instanceGroup = instanceGroup - c.instancegroupssetnamedportsrequest = instancegroupssetnamedportsrequest + c.instance = instance + c.metadata = metadata return c } @@ -41024,7 +53576,7 @@ func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, insta // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceGroupsSetNamedPortsCall { +func (c *InstancesSetMetadataCall) RequestId(requestId string) *InstancesSetMetadataCall { c.urlParams_.Set("requestId", requestId) return c } @@ -41032,7 +53584,7 @@ func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceG // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *InstanceGroupsSetNamedPortsCall { +func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -41040,53 +53592,53 @@ func (c *InstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *Instance // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *InstanceGroupsSetNamedPortsCall { +func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMetadataCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsSetNamedPortsCall) Header() http.Header { +func (c *InstancesSetMetadataCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instanceGroup": c.instanceGroup, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceGroups.setNamedPorts" call. +// Do executes the "compute.instances.setMetadata" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41117,18 +53669,19 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Sets the named ports for the specified instance group.", + // "description": "Sets metadata for the specified instance to the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.setNamedPorts", + // "id": "compute.instances.setMetadata", // "parameterOrder": [ // "project", // "zone", - // "instanceGroup" + // "instance" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the instance group where the named ports are updated.", + // "instance": { + // "description": "Name of the instance scoping this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -41145,15 +53698,16 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "zone": { - // "description": "The name of the zone where the instance group is located.", + // "description": "The name of the zone for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts", + // "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", // "request": { - // "$ref": "InstanceGroupsSetNamedPortsRequest" + // "$ref": "Metadata" // }, // "response": { // "$ref": "Operation" @@ -41166,34 +53720,55 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.instanceGroups.testIamPermissions": +// method id "compute.instances.setMinCpuPlatform": -type InstanceGroupsTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetMinCpuPlatformCall struct { + s *Service + project string + zone string + instance string + instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstanceGroupsService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceGroupsTestIamPermissionsCall { - c := &InstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetMinCpuPlatform: Changes the minimum CPU platform that this +// instance should use. This method can only be called on a stopped +// instance. For more information, read Specifying a Minimum CPU +// Platform. +func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instance string, instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest) *InstancesSetMinCpuPlatformCall { + c := &InstancesSetMinCpuPlatformCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.instance = instance + c.instancessetmincpuplatformrequest = instancessetmincpuplatformrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMinCpuPlatformCall) RequestId(requestId string) *InstancesSetMinCpuPlatformCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceGroupsTestIamPermissionsCall { +func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *InstancesSetMinCpuPlatformCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -41201,53 +53776,53 @@ func (c *InstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *Ins // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *InstanceGroupsTestIamPermissionsCall { +func (c *InstancesSetMinCpuPlatformCall) Context(ctx context.Context) *InstancesSetMinCpuPlatformCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceGroupsTestIamPermissionsCall) Header() http.Header { +func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmincpuplatformrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, - "resource": c.resource, + "instance": c.instance, }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instanceGroups.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.setMinCpuPlatform" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41266,7 +53841,7 @@ func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -41278,15 +53853,22 @@ func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", // "httpMethod": "POST", - // "id": "compute.instanceGroups.testIamPermissions", + // "id": "compute.instances.setMinCpuPlatform", // "parameterOrder": [ // "project", // "zone", - // "resource" + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -41294,11 +53876,9 @@ func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, // "zone": { @@ -41309,43 +53889,42 @@ func (c *InstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instanceGroups/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "InstancesSetMinCpuPlatformRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceTemplates.delete": +// method id "compute.instances.setScheduling": -type InstanceTemplatesDeleteCall struct { - s *Service - project string - instanceTemplate string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetSchedulingCall struct { + s *Service + project string + zone string + instance string + scheduling *Scheduling + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified instance template. If you delete an -// instance template that is being referenced from another instance -// group, the instance group will not be able to create or recreate -// virtual machine instances. Deleting an instance template is permanent -// and cannot be undone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/delete -func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall { - c := &InstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetScheduling: Sets an instance's scheduling options. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling +func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { + c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instanceTemplate = instanceTemplate + c.zone = zone + c.instance = instance + c.scheduling = scheduling return c } @@ -41363,7 +53942,7 @@ func (r *InstanceTemplatesService) Delete(project string, instanceTemplate strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTemplatesDeleteCall { +func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSchedulingCall { c.urlParams_.Set("requestId", requestId) return c } @@ -41371,7 +53950,7 @@ func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTempl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall { +func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -41379,47 +53958,53 @@ func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemplatesDeleteCall { +func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesDeleteCall) Header() http.Header { +func (c *InstancesSetSchedulingCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "instanceTemplate": c.instanceTemplate, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.delete" call. +// Do executes the "compute.instances.setScheduling" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41450,16 +54035,17 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified instance template. If you delete an instance template that is being referenced from another instance group, the instance group will not be able to create or recreate virtual machine instances. Deleting an instance template is permanent and cannot be undone.", - // "httpMethod": "DELETE", - // "id": "compute.instanceTemplates.delete", + // "description": "Sets an instance's scheduling options.", + // "httpMethod": "POST", + // "id": "compute.instances.setScheduling", // "parameterOrder": [ // "project", - // "instanceTemplate" + // "zone", + // "instance" // ], // "parameters": { - // "instanceTemplate": { - // "description": "The name of the instance template to delete.", + // "instance": { + // "description": "Instance name.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -41476,9 +54062,19 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", + // "request": { + // "$ref": "Scheduling" + // }, // "response": { // "$ref": "Operation" // }, @@ -41490,93 +54086,108 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.instanceTemplates.get": +// method id "compute.instances.setServiceAccount": -type InstanceTemplatesGetCall struct { - s *Service - project string - instanceTemplate string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesSetServiceAccountCall struct { + s *Service + project string + zone string + instance string + instancessetserviceaccountrequest *InstancesSetServiceAccountRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance template. Get a list of available -// instance templates by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/get -func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall { - c := &InstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetServiceAccount: Sets the service account on the instance. For more +// information, read Changing the service account and access scopes for +// an instance. +func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { + c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instanceTemplate = instanceTemplate + c.zone = zone + c.instance = instance + c.instancessetserviceaccountrequest = instancessetserviceaccountrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall { +func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceTemplatesGetCall) IfNoneMatch(entityTag string) *InstanceTemplatesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplatesGetCall { +func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesGetCall) Header() http.Header { +func (c *InstancesSetServiceAccountCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setServiceAccount") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "instanceTemplate": c.instanceTemplate, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.get" call. -// Exactly one of *InstanceTemplate or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceTemplate.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { +// Do executes the "compute.instances.setServiceAccount" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41595,7 +54206,7 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceTemplate{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -41607,16 +54218,17 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe } return ret, nil // { - // "description": "Returns the specified instance template. Get a list of available instance templates by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.instanceTemplates.get", + // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.setServiceAccount", // "parameterOrder": [ // "project", - // "instanceTemplate" + // "zone", + // "instance" // ], // "parameters": { - // "instanceTemplate": { - // "description": "The name of the instance template.", + // "instance": { + // "description": "Name of the instance resource to start.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -41628,42 +54240,57 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{instanceTemplate}", + // "path": "{project}/zones/{zone}/instances/{instance}/setServiceAccount", + // "request": { + // "$ref": "InstancesSetServiceAccountRequest" + // }, // "response": { - // "$ref": "InstanceTemplate" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instanceTemplates.insert": +// method id "compute.instances.setTags": -type InstanceTemplatesInsertCall struct { - s *Service - project string - instancetemplate *InstanceTemplate - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetTagsCall struct { + s *Service + project string + zone string + instance string + tags *Tags + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance template in the specified project using -// the data that is included in the request. If you are creating a new -// template to update an existing instance group, your new instance -// template must use the same network or, if applicable, the same -// subnetwork as the original template. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/insert -func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall { - c := &InstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTags: Sets tags for the specified instance to the data included in +// the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setTags +func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { + c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instancetemplate = instancetemplate + c.zone = zone + c.instance = instance + c.tags = tags return c } @@ -41681,7 +54308,7 @@ func (r *InstanceTemplatesService) Insert(project string, instancetemplate *Inst // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTemplatesInsertCall { +func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -41689,7 +54316,7 @@ func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTempl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall { +func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -41697,51 +54324,53 @@ func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemp // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemplatesInsertCall { +func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesInsertCall) Header() http.Header { +func (c *InstancesSetTagsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.insert" call. +// Do executes the "compute.instances.setTags" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41772,13 +54401,22 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates an instance template in the specified project using the data that is included in the request. If you are creating a new template to update an existing instance group, your new instance template must use the same network or, if applicable, the same subnetwork as the original template.", + // "description": "Sets tags for the specified instance to the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instanceTemplates.insert", + // "id": "compute.instances.setTags", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -41790,11 +54428,18 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates", + // "path": "{project}/zones/{zone}/instances/{instance}/setTags", // "request": { - // "$ref": "InstanceTemplate" + // "$ref": "Tags" // }, // "response": { // "$ref": "Operation" @@ -41807,157 +54452,102 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.instanceTemplates.list": +// method id "compute.instances.start": -type InstanceTemplatesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesStartCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of instance templates that are contained -// within the specified project and zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instanceTemplates/list -func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall { - c := &InstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Start: Starts an instance that was stopped using the using the +// instances().stop method. For more information, see Restart an +// instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/start +func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { + c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.zone = zone + c.instance = instance return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstanceTemplatesListCall) OrderBy(orderBy string) *InstanceTemplatesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall { +func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstanceTemplatesListCall) IfNoneMatch(entityTag string) *InstanceTemplatesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTemplatesListCall { +func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesListCall) Header() http.Header { +func (c *InstancesStartCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.list" call. -// Exactly one of *InstanceTemplateList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceTemplateList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { +// Do executes the "compute.instances.start" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -41976,7 +54566,7 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceTemplateList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -41988,104 +54578,102 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT } return ret, nil // { - // "description": "Retrieves a list of instance templates that are contained within the specified project and zone.", - // "httpMethod": "GET", - // "id": "compute.instanceTemplates.list", + // "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.start", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "instance" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", + // "instance": { + // "description": "Name of the instance resource to start.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates", + // "path": "{project}/zones/{zone}/instances/{instance}/start", // "response": { - // "$ref": "InstanceTemplateList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instanceTemplates.testIamPermissions": +// method id "compute.instances.startWithEncryptionKey": -type InstanceTemplatesTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesStartWithEncryptionKeyCall struct { + s *Service + project string + zone string + instance string + instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstanceTemplatesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstanceTemplatesTestIamPermissionsCall { - c := &InstanceTemplatesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// StartWithEncryptionKey: Starts an instance that was stopped using the +// using the instances().stop method. For more information, see Restart +// an instance. +func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { + c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.zone = zone + c.instance = instance + c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *InstancesStartWithEncryptionKeyCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstanceTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstanceTemplatesTestIamPermissionsCall { +func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42093,52 +54681,53 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstanceTemplatesTestIamPermissionsCall) Context(ctx context.Context) *InstanceTemplatesTestIamPermissionsCall { +func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstanceTemplatesTestIamPermissionsCall) Header() http.Header { +func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstanceTemplatesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "resource": c.resource, + "zone": c.zone, + "instance": c.instance, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instanceTemplates.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.instances.startWithEncryptionKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42157,7 +54746,7 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -42169,14 +54758,22 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", // "httpMethod": "POST", - // "id": "compute.instanceTemplates.testIamPermissions", + // "id": "compute.instances.startWithEncryptionKey", // "parameterOrder": [ // "project", - // "resource" + // "zone", + // "instance" // ], // "parameters": { + // "instance": { + // "description": "Name of the instance resource to start.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -42184,53 +54781,59 @@ func (c *InstanceTemplatesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/instanceTemplates/{resource}/testIamPermissions", + // "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "InstancesStartWithEncryptionKeyRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.instances.addAccessConfig": +// method id "compute.instances.stop": -type InstancesAddAccessConfigCall struct { - s *Service - project string - zone string - instance string - accessconfig *AccessConfig - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesStopCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddAccessConfig: Adds an access config to an instance's network -// interface. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/addAccessConfig -func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall { - c := &InstancesAddAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Stop: Stops a running instance, shutting it down cleanly, and allows +// you to restart the instance at a later time. Stopped instances do not +// incur per-minute, virtual machine usage charges while they are +// stopped, but any resources that the virtual machine is using, such as +// persistent disks and static IP addresses, will continue to be charged +// until they are deleted. For more information, see Stopping an +// instance. +// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop +func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { + c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.urlParams_.Set("networkInterface", networkInterface) - c.accessconfig = accessconfig return c } @@ -42248,7 +54851,7 @@ func (r *InstancesService) AddAccessConfig(project string, zone string, instance // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesAddAccessConfigCall) RequestId(requestId string) *InstancesAddAccessConfigCall { +func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { c.urlParams_.Set("requestId", requestId) return c } @@ -42256,7 +54859,7 @@ func (c *InstancesAddAccessConfigCall) RequestId(requestId string) *InstancesAdd // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall { +func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42264,34 +54867,29 @@ func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAd // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAddAccessConfigCall { +func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAddAccessConfigCall) Header() http.Header { +func (c *InstancesStopCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -42303,14 +54901,14 @@ func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.addAccessConfig" call. +// Do executes the "compute.instances.stop" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42341,29 +54939,22 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Adds an access config to an instance's network interface.", + // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", // "httpMethod": "POST", - // "id": "compute.instances.addAccessConfig", + // "id": "compute.instances.stop", // "parameterOrder": [ // "project", // "zone", - // "instance", - // "networkInterface" + // "instance" // ], // "parameters": { // "instance": { - // "description": "The instance name for this request.", + // "description": "Name of the instance resource to stop.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface to add to this instance.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -42384,10 +54975,7 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", - // "request": { - // "$ref": "AccessConfig" - // }, + // "path": "{project}/zones/{zone}/instances/{instance}/stop", // "response": { // "$ref": "Operation" // }, @@ -42399,156 +54987,88 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.instances.aggregatedList": +// method id "compute.instances.testIamPermissions": -type InstancesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type InstancesTestIamPermissionsCall struct { + s *Service + project string + zone string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves aggregated list of instances. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/aggregatedList -func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall { - c := &InstancesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstancesTestIamPermissionsCall { + c := &InstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstancesAggregatedListCall) OrderBy(orderBy string) *InstancesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) + c.zone = zone + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall { +func (c *InstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstancesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesAggregatedListCall) IfNoneMatch(entityTag string) *InstancesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAggregatedListCall { +func (c *InstancesTestIamPermissionsCall) Context(ctx context.Context) *InstancesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAggregatedListCall) Header() http.Header { +func (c *InstancesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "zone": c.zone, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.aggregatedList" call. -// Exactly one of *InstanceAggregatedList or error will be non-nil. Any +// Do executes the "compute.instances.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *InstanceAggregatedList.ServerResponse.Header or (if a response was +// *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceAggregatedList, error) { +func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42567,7 +55087,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceAggregatedList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -42579,47 +55099,43 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc } return ret, nil // { - // "description": "Retrieves aggregated list of instances.", - // "httpMethod": "GET", - // "id": "compute.instances.aggregatedList", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.instances.testIamPermissions", // "parameterOrder": [ - // "project" + // "project", + // "zone", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "zone": { + // "description": "The name of the zone for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/instances", + // "path": "{project}/zones/{zone}/instances/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "InstanceAggregatedList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -42630,131 +55146,157 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstancesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instances.attachDisk": +// method id "compute.interconnectAttachments.aggregatedList": -type InstancesAttachDiskCall struct { +type InterconnectAttachmentsAggregatedListCall struct { s *Service project string - zone string - instance string - attacheddisk *AttachedDisk urlParams_ gensupport.URLParams + ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AttachDisk: Attaches an existing Disk resource to an instance. You -// must first create the disk before you can attach it. It is not -// possible to create and attach a disk at the same time. For more -// information, read Adding a persistent disk to your instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/attachDisk -func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall { - c := &InstancesAttachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.attacheddisk = attacheddisk +// AggregatedList: Retrieves an aggregated list of interconnect +// attachments. +func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { + c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesAttachDiskCall) RequestId(requestId string) *InstancesAttachDiskCall { - c.urlParams_.Set("requestId", requestId) +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall { +func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachDiskCall { +func (c *InterconnectAttachmentsAggregatedListCall) Context(ctx context.Context) *InterconnectAttachmentsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesAttachDiskCall) Header() http.Header { +func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/interconnectAttachments") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.attachDisk" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectAttachments.aggregatedList" call. +// Exactly one of *InterconnectAttachmentAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InterconnectAttachmentAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42773,7 +55315,7 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InterconnectAttachmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -42785,77 +55327,96 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Attaches an existing Disk resource to an instance. You must first create the disk before you can attach it. It is not possible to create and attach a disk at the same time. For more information, read Adding a persistent disk to your instance.", - // "httpMethod": "POST", - // "id": "compute.instances.attachDisk", + // "description": "Retrieves an aggregated list of interconnect attachments.", + // "httpMethod": "GET", + // "id": "compute.interconnectAttachments.aggregatedList", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "The instance name for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", - // "request": { - // "$ref": "AttachedDisk" - // }, + // "path": "{project}/aggregated/interconnectAttachments", // "response": { - // "$ref": "Operation" + // "$ref": "InterconnectAttachmentAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.delete": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesDeleteCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.interconnectAttachments.delete": + +type InterconnectAttachmentsDeleteCall struct { + s *Service + project string + region string + interconnectAttachment string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Instance resource. For more -// information, see Stopping or Deleting an Instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/delete -func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall { - c := &InstancesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified interconnect attachment. +func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { + c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.region = region + c.interconnectAttachment = interconnectAttachment return c } @@ -42873,7 +55434,7 @@ func (r *InstancesService) Delete(project string, zone string, instance string) // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { +func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *InterconnectAttachmentsDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -42881,7 +55442,7 @@ func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall { +func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -42889,21 +55450,21 @@ func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall { +func (c *InterconnectAttachmentsDeleteCall) Context(ctx context.Context) *InterconnectAttachmentsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDeleteCall) Header() http.Header { +func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -42911,26 +55472,26 @@ func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.delete" call. +// Do executes the "compute.interconnectAttachments.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -42961,17 +55522,17 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Deletes the specified Instance resource. For more information, see Stopping or Deleting an Instance.", + // "description": "Deletes the specified interconnect attachment.", // "httpMethod": "DELETE", - // "id": "compute.instances.delete", + // "id": "compute.interconnectAttachments.delete", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "interconnectAttachment" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to delete.", + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -42984,20 +55545,20 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}", + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", // "response": { // "$ref": "Operation" // }, @@ -43009,103 +55570,94 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } -// method id "compute.instances.deleteAccessConfig": +// method id "compute.interconnectAttachments.get": -type InstancesDeleteAccessConfigCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsGetCall struct { + s *Service + project string + region string + interconnectAttachment string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// DeleteAccessConfig: Deletes an access config from an instance's -// network interface. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/deleteAccessConfig -func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall { - c := &InstancesDeleteAccessConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified interconnect attachment. +func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { + c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("accessConfig", accessConfig) - c.urlParams_.Set("networkInterface", networkInterface) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *InstancesDeleteAccessConfigCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.interconnectAttachment = interconnectAttachment return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall { +func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *InstancesDeleteAccessConfigCall { +func (c *InterconnectAttachmentsGetCall) Context(ctx context.Context) *InterconnectAttachmentsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDeleteAccessConfigCall) Header() http.Header { +func (c *InterconnectAttachmentsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.deleteAccessConfig" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectAttachments.get" call. +// Exactly one of *InterconnectAttachment or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectAttachment.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43124,7 +55676,7 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InterconnectAttachment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -43136,36 +55688,22 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes an access config from an instance's network interface.", - // "httpMethod": "POST", - // "id": "compute.instances.deleteAccessConfig", + // "description": "Returns the specified interconnect attachment.", + // "httpMethod": "GET", + // "id": "compute.interconnectAttachments.get", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "accessConfig", - // "networkInterface" + // "region", + // "interconnectAttachment" // ], // "parameters": { - // "accessConfig": { - // "description": "The name of the access config to delete.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "instance": { - // "description": "The instance name for this request.", + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "networkInterface": { - // "description": "The name of the network interface.", - // "location": "query", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -43173,51 +55711,46 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", // "response": { - // "$ref": "Operation" + // "$ref": "InterconnectAttachment" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.detachDisk": +// method id "compute.interconnectAttachments.insert": -type InstancesDetachDiskCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectAttachmentsInsertCall struct { + s *Service + project string + region string + interconnectattachment *InterconnectAttachment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DetachDisk: Detaches a disk from an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/detachDisk -func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall { - c := &InstancesDetachDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an InterconnectAttachment in the specified project +// using the data included in the request. +func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { + c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("deviceName", deviceName) + c.region = region + c.interconnectattachment = interconnectattachment return c } @@ -43235,7 +55768,7 @@ func (r *InstancesService) DetachDisk(project string, zone string, instance stri // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesDetachDiskCall) RequestId(requestId string) *InstancesDetachDiskCall { +func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *InterconnectAttachmentsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -43243,7 +55776,7 @@ func (c *InstancesDetachDiskCall) RequestId(requestId string) *InstancesDetachDi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall { +func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -43251,48 +55784,52 @@ func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachDiskCall { +func (c *InterconnectAttachmentsInsertCall) Context(ctx context.Context) *InterconnectAttachmentsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesDetachDiskCall) Header() http.Header { +func (c *InterconnectAttachmentsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.detachDisk" call. +// Do executes the "compute.interconnectAttachments.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43323,33 +55860,25 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Detaches a disk from an instance.", + // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instances.detachDisk", + // "id": "compute.interconnectAttachments.insert", // "parameterOrder": [ // "project", - // "zone", - // "instance", - // "deviceName" + // "region" // ], // "parameters": { - // "deviceName": { - // "description": "Disk device name to detach.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "instance": { - // "description": "Instance name.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -43357,16 +55886,12 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", + // "path": "{project}/regions/{region}/interconnectAttachments", + // "request": { + // "$ref": "InterconnectAttachment" + // }, // "response": { // "$ref": "Operation" // }, @@ -43378,34 +55903,98 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, } -// method id "compute.instances.get": +// method id "compute.interconnectAttachments.list": -type InstancesGetCall struct { +type InterconnectAttachmentsListCall struct { s *Service project string - zone string - instance string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified Instance resource. Get a list of available -// instances by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/get -func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall { - c := &InstancesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of interconnect attachments contained within +// the specified region. +func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { + c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *InterconnectAttachmentsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { +func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -43415,7 +56004,7 @@ func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { +func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsListCall { c.ifNoneMatch_ = entityTag return c } @@ -43423,21 +56012,21 @@ func (c *InstancesGetCall) IfNoneMatch(entityTag string) *InstancesGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { +func (c *InterconnectAttachmentsListCall) Context(ctx context.Context) *InterconnectAttachmentsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetCall) Header() http.Header { +func (c *InterconnectAttachmentsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -43448,26 +56037,25 @@ func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.instances.get" call. -// Exactly one of *Instance or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Instance.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectAttachments.list" call. +// Exactly one of *InterconnectAttachmentList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectAttachmentList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43486,7 +56074,7 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Instance{ + ret := &InterconnectAttachmentList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -43498,20 +56086,35 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { } return ret, nil // { - // "description": "Returns the specified Instance resource. Get a list of available instances by making a list() request.", + // "description": "Retrieves the list of interconnect attachments contained within the specified region.", // "httpMethod": "GET", - // "id": "compute.instances.get", + // "id": "compute.interconnectAttachments.list", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -43521,17 +56124,17 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}", + // "path": "{project}/regions/{region}/interconnectAttachments", // "response": { - // "$ref": "Instance" + // "$ref": "InterconnectAttachmentList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -43542,114 +56145,109 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { } -// method id "compute.instances.getSerialPortOutput": - -type InstancesGetSerialPortOutputCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// GetSerialPortOutput: Returns the specified instance's serial port -// output. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/getSerialPortOutput -func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall { - c := &InstancesGetSerialPortOutputCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - return c -} +// method id "compute.interconnectAttachments.testIamPermissions": -// Port sets the optional parameter "port": Specifies which COM or -// serial port to retrieve data from. -func (c *InstancesGetSerialPortOutputCall) Port(port int64) *InstancesGetSerialPortOutputCall { - c.urlParams_.Set("port", fmt.Sprint(port)) - return c +type InterconnectAttachmentsTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Start sets the optional parameter "start": Returns output starting -// from a specific byte position. Use this to page through output when -// the output is too large to return in a single request. For the -// initial request, leave this field unspecified. For subsequent calls, -// this field should be set to the next value returned in the previous -// call. -func (c *InstancesGetSerialPortOutputCall) Start(start int64) *InstancesGetSerialPortOutputCall { - c.urlParams_.Set("start", fmt.Sprint(start)) +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InterconnectAttachmentsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectAttachmentsTestIamPermissionsCall { + c := &InterconnectAttachmentsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall { +func (c *InterconnectAttachmentsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesGetSerialPortOutputCall) IfNoneMatch(entityTag string) *InstancesGetSerialPortOutputCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *InstancesGetSerialPortOutputCall { +func (c *InterconnectAttachmentsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectAttachmentsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesGetSerialPortOutputCall) Header() http.Header { +func (c *InterconnectAttachmentsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectAttachmentsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.getSerialPortOutput" call. -// Exactly one of *SerialPortOutput or error will be non-nil. Any +// Do executes the "compute.interconnectAttachments.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *SerialPortOutput.ServerResponse.Header or (if a response was +// *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*SerialPortOutput, error) { +func (c *InterconnectAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43668,7 +56266,7 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &SerialPortOutput{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -43680,31 +56278,15 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se } return ret, nil // { - // "description": "Returns the specified instance's serial port output.", - // "httpMethod": "GET", - // "id": "compute.instances.getSerialPortOutput", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.interconnectAttachments.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "region", + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "port": { - // "default": "1", - // "description": "Specifies which COM or serial port to retrieve data from.", - // "format": "int32", - // "location": "query", - // "maximum": "4", - // "minimum": "1", - // "type": "integer" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -43712,23 +56294,27 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se // "required": true, // "type": "string" // }, - // "start": { - // "description": "Returns output starting from a specific byte position. Use this to page through output when the output is too large to return in a single request. For the initial request, leave this field unspecified. For subsequent calls, this field should be set to the next value returned in the previous call.", - // "format": "int64", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/serialPort", + // "path": "{project}/regions/{region}/interconnectAttachments/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "SerialPortOutput" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -43739,105 +56325,93 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se } -// method id "compute.instances.insert": +// method id "compute.interconnectLocations.get": -type InstancesInsertCall struct { - s *Service - project string - zone string - instance *Instance - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectLocationsGetCall struct { + s *Service + project string + interconnectLocation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Insert: Creates an instance resource in the specified project using -// the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/insert -func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall { - c := &InstancesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the details for the specified interconnect location. Get +// a list of available interconnect locations by making a list() +// request. +func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { + c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesInsertCall) RequestId(requestId string) *InstancesInsertCall { - c.urlParams_.Set("requestId", requestId) + c.interconnectLocation = interconnectLocation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall { +func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectLocationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectLocationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall { +func (c *InterconnectLocationsGetCall) Context(ctx context.Context) *InterconnectLocationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesInsertCall) Header() http.Header { +func (c *InterconnectLocationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{interconnectLocation}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "interconnectLocation": c.interconnectLocation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnectLocations.get" call. +// Exactly one of *InterconnectLocation or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectLocation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectLocation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -43856,7 +56430,7 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InterconnectLocation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -43868,68 +56442,58 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro } return ret, nil // { - // "description": "Creates an instance resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.insert", + // "description": "Returns the details for the specified interconnect location. Get a list of available interconnect locations by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.interconnectLocations.get", // "parameterOrder": [ // "project", - // "zone" + // "interconnectLocation" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "interconnectLocation": { + // "description": "Name of the interconnect location to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances", - // "request": { - // "$ref": "Instance" - // }, + // "path": "{project}/global/interconnectLocations/{interconnectLocation}", // "response": { - // "$ref": "Operation" + // "$ref": "InterconnectLocation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.list": +// method id "compute.interconnectLocations.list": -type InstancesListCall struct { +type InterconnectLocationsListCall struct { s *Service project string - zone string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of instances contained within the specified -// zone. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/list -func (r *InstancesService) List(project string, zone string) *InstancesListCall { - c := &InstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of interconnect locations available to the +// specified project. +func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { + c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone return c } @@ -43959,7 +56523,7 @@ func (r *InstancesService) List(project string, zone string) *InstancesListCall // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *InstancesListCall) Filter(filter string) *InstancesListCall { +func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { c.urlParams_.Set("filter", filter) return c } @@ -43970,7 +56534,7 @@ func (c *InstancesListCall) Filter(filter string) *InstancesListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { +func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *InterconnectLocationsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -43987,7 +56551,7 @@ func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *InstancesListCall) OrderBy(orderBy string) *InstancesListCall { +func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLocationsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -43995,7 +56559,7 @@ func (c *InstancesListCall) OrderBy(orderBy string) *InstancesListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { +func (c *InterconnectLocationsListCall) PageToken(pageToken string) *InterconnectLocationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -44003,7 +56567,7 @@ func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { +func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *InterconnectLocationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44013,7 +56577,7 @@ func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { +func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *InterconnectLocationsListCall { c.ifNoneMatch_ = entityTag return c } @@ -44021,21 +56585,21 @@ func (c *InstancesListCall) IfNoneMatch(entityTag string) *InstancesListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { +func (c *InterconnectLocationsListCall) Context(ctx context.Context) *InterconnectLocationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesListCall) Header() http.Header { +func (c *InterconnectLocationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -44046,25 +56610,24 @@ func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.list" call. -// Exactly one of *InstanceList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *InstanceList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, error) { +// Do executes the "compute.interconnectLocations.list" call. +// Exactly one of *InterconnectLocationList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectLocationList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectLocationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44083,7 +56646,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceList{ + ret := &InterconnectLocationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44095,12 +56658,11 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err } return ret, nil // { - // "description": "Retrieves the list of instances contained within the specified zone.", + // "description": "Retrieves the list of interconnect locations available to the specified project.", // "httpMethod": "GET", - // "id": "compute.instances.list", + // "id": "compute.interconnectLocations.list", // "parameterOrder": [ - // "project", - // "zone" + // "project" // ], // "parameters": { // "filter": { @@ -44132,18 +56694,11 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances", + // "path": "{project}/global/interconnectLocations", // "response": { - // "$ref": "InstanceList" + // "$ref": "InterconnectLocationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -44157,7 +56712,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) error) error { +func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*InterconnectLocationList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -44175,162 +56730,96 @@ func (c *InstancesListCall) Pages(ctx context.Context, f func(*InstanceList) err } } -// method id "compute.instances.listReferrers": +// method id "compute.interconnects.delete": -type InstancesListReferrersCall struct { +type InterconnectsDeleteCall struct { s *Service project string - zone string - instance string + interconnect string urlParams_ gensupport.URLParams - ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// ListReferrers: Retrieves the list of referrers to instances contained -// within the specified zone. -func (r *InstancesService) ListReferrers(project string, zone string, instance string) *InstancesListReferrersCall { - c := &InstancesListReferrersCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified interconnect. +func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { + c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferrersCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *InstancesListReferrersCall) MaxResults(maxResults int64) *InstancesListReferrersCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + c.interconnect = interconnect return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *InstancesListReferrersCall) OrderBy(orderBy string) *InstancesListReferrersCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *InstancesListReferrersCall) PageToken(pageToken string) *InstancesListReferrersCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesListReferrersCall) Fields(s ...googleapi.Field) *InstancesListReferrersCall { +func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *InstancesListReferrersCall) IfNoneMatch(entityTag string) *InstancesListReferrersCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesListReferrersCall) Context(ctx context.Context) *InstancesListReferrersCall { +func (c *InterconnectsDeleteCall) Context(ctx context.Context) *InterconnectsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesListReferrersCall) Header() http.Header { +func (c *InterconnectsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesListReferrersCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/referrers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.listReferrers" call. -// Exactly one of *InstanceListReferrers or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceListReferrers.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*InstanceListReferrers, error) { +// Do executes the "compute.interconnects.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44349,7 +56838,7 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceListReferrers{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44361,43 +56850,19 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance } return ret, nil // { - // "description": "Retrieves the list of referrers to instances contained within the specified zone.", - // "httpMethod": "GET", - // "id": "compute.instances.listReferrers", + // "description": "Deletes the specified interconnect.", + // "httpMethod": "DELETE", + // "id": "compute.interconnects.delete", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "interconnect" // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "instance": { - // "description": "Name of the target instance scoping this request, or '-' if the request should span over all instances in the container.", - // "location": "path", - // "pattern": "-|[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "parameters": { + // "interconnect": { + // "description": "Name of the interconnect to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -44407,143 +56872,110 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/referrers", + // "path": "{project}/global/interconnects/{interconnect}", // "response": { - // "$ref": "InstanceListReferrers" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *InstancesListReferrersCall) Pages(ctx context.Context, f func(*InstanceListReferrers) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.instances.reset": +// method id "compute.interconnects.get": -type InstancesResetCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectsGetCall struct { + s *Service + project string + interconnect string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Reset: Performs a reset on the instance. For more information, see -// Resetting an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/reset -func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall { - c := &InstancesResetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified interconnect. Get a list of available +// interconnects by making a list() request. +func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { + c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesResetCall) RequestId(requestId string) *InstancesResetCall { - c.urlParams_.Set("requestId", requestId) + c.interconnect = interconnect return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall { +func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { +func (c *InterconnectsGetCall) Context(ctx context.Context) *InterconnectsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesResetCall) Header() http.Header { +func (c *InterconnectsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.reset" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.interconnects.get" call. +// Exactly one of *Interconnect or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *Interconnect.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44562,7 +56994,7 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Interconnect{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44574,17 +57006,16 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Performs a reset on the instance. For more information, see Resetting an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.reset", + // "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.interconnects.get", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "interconnect" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "interconnect": { + // "description": "Name of the interconnect to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -44596,54 +57027,38 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/reset", + // "path": "{project}/global/interconnects/{interconnect}", // "response": { - // "$ref": "Operation" + // "$ref": "Interconnect" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setDiskAutoDelete": +// method id "compute.interconnects.insert": -type InstancesSetDiskAutoDeleteCall struct { - s *Service - project string - zone string - instance string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectsInsertCall struct { + s *Service + project string + interconnect *Interconnect + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to -// an instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setDiskAutoDelete -func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall { - c := &InstancesSetDiskAutoDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a Interconnect in the specified project using the +// data included in the request. +func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { + c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.urlParams_.Set("autoDelete", fmt.Sprint(autoDelete)) - c.urlParams_.Set("deviceName", deviceName) + c.interconnect = interconnect return c } @@ -44661,7 +57076,7 @@ func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instan // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetDiskAutoDeleteCall) RequestId(requestId string) *InstancesSetDiskAutoDeleteCall { +func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -44669,7 +57084,7 @@ func (c *InstancesSetDiskAutoDeleteCall) RequestId(requestId string) *InstancesS // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall { +func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -44677,48 +57092,51 @@ func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *Instances // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *InstancesSetDiskAutoDeleteCall { +func (c *InterconnectsInsertCall) Context(ctx context.Context) *InterconnectsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetDiskAutoDeleteCall) Header() http.Header { +func (c *InterconnectsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setDiskAutoDelete" call. +// Do executes the "compute.interconnects.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44749,37 +57167,13 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Sets the auto-delete flag for a disk attached to an instance.", + // "description": "Creates a Interconnect in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instances.setDiskAutoDelete", + // "id": "compute.interconnects.insert", // "parameterOrder": [ - // "project", - // "zone", - // "instance", - // "autoDelete", - // "deviceName" + // "project" // ], // "parameters": { - // "autoDelete": { - // "description": "Whether to auto-delete the disk when the instance is deleted.", - // "location": "query", - // "required": true, - // "type": "boolean" - // }, - // "deviceName": { - // "description": "The device name of the disk to modify.", - // "location": "query", - // "pattern": "\\w[\\w.-]{0,254}", - // "required": true, - // "type": "string" - // }, - // "instance": { - // "description": "The instance name.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -44791,16 +57185,12 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", + // "path": "{project}/global/interconnects", + // "request": { + // "$ref": "Interconnect" + // }, // "response": { // "$ref": "Operation" // }, @@ -44812,107 +57202,156 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instances.setLabels": +// method id "compute.interconnects.list": -type InstancesSetLabelsCall struct { - s *Service - project string - zone string - instance string - instancessetlabelsrequest *InstancesSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InterconnectsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets labels on an instance. To learn more about labels, -// read the Labeling Resources documentation. -func (r *InstancesService) SetLabels(project string, zone string, instance string, instancessetlabelsrequest *InstancesSetLabelsRequest) *InstancesSetLabelsCall { - c := &InstancesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of interconnect available to the specified +// project. +func (r *InterconnectsService) List(project string) *InterconnectsListCall { + c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancessetlabelsrequest = instancessetlabelsrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetLabelsCall) RequestId(requestId string) *InstancesSetLabelsCall { - c.urlParams_.Set("requestId", requestId) +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetLabelsCall) Fields(s ...googleapi.Field) *InstancesSetLabelsCall { +func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetLabelsCall) Context(ctx context.Context) *InstancesSetLabelsCall { +func (c *InterconnectsListCall) Context(ctx context.Context) *InterconnectsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetLabelsCall) Header() http.Header { +func (c *InterconnectsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetlabelsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setLabels" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnects.list" call. +// Exactly one of *InterconnectList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -44931,7 +57370,7 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InterconnectList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -44943,78 +57382,98 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Sets labels on an instance. To learn more about labels, read the Labeling Resources documentation.", - // "httpMethod": "POST", - // "id": "compute.instances.setLabels", + // "description": "Retrieves the list of interconnect available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.interconnects.list", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setLabels", - // "request": { - // "$ref": "InstancesSetLabelsRequest" - // }, + // "path": "{project}/global/interconnects", // "response": { - // "$ref": "Operation" + // "$ref": "InterconnectList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setMachineResources": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesSetMachineResourcesCall struct { - s *Service - project string - zone string - instance string - instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.interconnects.patch": + +type InterconnectsPatchCall struct { + s *Service + project string + interconnect string + interconnect2 *Interconnect + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetMachineResources: Changes the number and/or type of accelerator -// for a stopped instance to the values specified in the request. -func (r *InstancesService) SetMachineResources(project string, zone string, instance string, instancessetmachineresourcesrequest *InstancesSetMachineResourcesRequest) *InstancesSetMachineResourcesCall { - c := &InstancesSetMachineResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified interconnect with the data included in +// the request. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. +func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { + c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancessetmachineresourcesrequest = instancessetmachineresourcesrequest + c.interconnect = interconnect + c.interconnect2 = interconnect2 return c } @@ -45032,7 +57491,7 @@ func (r *InstancesService) SetMachineResources(project string, zone string, inst // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *InstancesSetMachineResourcesCall { +func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -45040,7 +57499,7 @@ func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *Instance // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *InstancesSetMachineResourcesCall { +func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -45048,53 +57507,52 @@ func (c *InstancesSetMachineResourcesCall) Fields(s ...googleapi.Field) *Instanc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMachineResourcesCall) Context(ctx context.Context) *InstancesSetMachineResourcesCall { +func (c *InterconnectsPatchCall) Context(ctx context.Context) *InterconnectsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMachineResourcesCall) Header() http.Header { +func (c *InterconnectsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMachineResourcesCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachineresourcesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineResources") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "interconnect": c.interconnect, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMachineResources" call. +// Do executes the "compute.interconnects.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45125,17 +57583,16 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Changes the number and/or type of accelerator for a stopped instance to the values specified in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.setMachineResources", + // "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.interconnects.patch", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "interconnect" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "interconnect": { + // "description": "Name of the interconnect to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -45152,18 +57609,11 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMachineResources", + // "path": "{project}/global/interconnects/{interconnect}", // "request": { - // "$ref": "InstancesSetMachineResourcesRequest" + // "$ref": "Interconnect" // }, // "response": { // "$ref": "Operation" @@ -45176,53 +57626,32 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op } -// method id "compute.instances.setMachineType": - -type InstancesSetMachineTypeCall struct { - s *Service - project string - zone string - instance string - instancessetmachinetyperequest *InstancesSetMachineTypeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetMachineType: Changes the machine type for a stopped instance to -// the machine type specified in the request. -func (r *InstancesService) SetMachineType(project string, zone string, instance string, instancessetmachinetyperequest *InstancesSetMachineTypeRequest) *InstancesSetMachineTypeCall { - c := &InstancesSetMachineTypeCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - c.instance = instance - c.instancessetmachinetyperequest = instancessetmachinetyperequest - return c -} +// method id "compute.interconnects.testIamPermissions": -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMachineTypeCall) RequestId(requestId string) *InstancesSetMachineTypeCall { - c.urlParams_.Set("requestId", requestId) +type InterconnectsTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *InterconnectsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *InterconnectsTestIamPermissionsCall { + c := &InterconnectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSetMachineTypeCall { +func (c *InterconnectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *InterconnectsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -45230,53 +57659,52 @@ func (c *InstancesSetMachineTypeCall) Fields(s ...googleapi.Field) *InstancesSet // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMachineTypeCall) Context(ctx context.Context) *InstancesSetMachineTypeCall { +func (c *InterconnectsTestIamPermissionsCall) Context(ctx context.Context) *InterconnectsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMachineTypeCall) Header() http.Header { +func (c *InterconnectsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { +func (c *InterconnectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachinetyperequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "zone": c.zone, - "instance": c.instance, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMachineType" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.interconnects.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45295,7 +57723,7 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45307,22 +57735,14 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Changes the machine type for a stopped instance to the machine type specified in the request.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.instances.setMachineType", + // "id": "compute.interconnects.testIamPermissions", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "resource" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -45330,136 +57750,116 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMachineType", + // "path": "{project}/global/interconnects/{resource}/testIamPermissions", // "request": { - // "$ref": "InstancesSetMachineTypeRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setMetadata": +// method id "compute.licenses.get": -type InstancesSetMetadataCall struct { - s *Service - project string - zone string - instance string - metadata *Metadata - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type LicensesGetCall struct { + s *Service + project string + license string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetMetadata: Sets metadata for the specified instance to the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setMetadata -func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall { - c := &InstancesSetMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified License resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/licenses/get +func (r *LicensesService) Get(project string, license string) *LicensesGetCall { + c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.metadata = metadata - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMetadataCall) RequestId(requestId string) *InstancesSetMetadataCall { - c.urlParams_.Set("requestId", requestId) + c.license = license return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall { +func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMetadataCall { +func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMetadataCall) Header() http.Header { +func (c *LicensesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { +func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "license": c.license, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMetadata" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.licenses.get" call. +// Exactly one of *License or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *License.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45478,7 +57878,7 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &License{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45490,17 +57890,16 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Sets metadata for the specified instance to the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.instances.setMetadata", + // "description": "Returns the specified License resource.", + // "httpMethod": "GET", + // "id": "compute.licenses.get", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "license" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "license": { + // "description": "Name of the License resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -45512,138 +57911,171 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", - // "request": { - // "$ref": "Metadata" - // }, + // "path": "{project}/global/licenses/{license}", // "response": { - // "$ref": "Operation" + // "$ref": "License" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setMinCpuPlatform": +// method id "compute.machineTypes.aggregatedList": -type InstancesSetMinCpuPlatformCall struct { - s *Service - project string - zone string - instance string - instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type MachineTypesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetMinCpuPlatform: Changes the minimum CPU platform that this -// instance should use. This method can only be called on a stopped -// instance. For more information, read Specifying a Minimum CPU -// Platform. -func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instance string, instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest) *InstancesSetMinCpuPlatformCall { - c := &InstancesSetMinCpuPlatformCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of machine types. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/aggregatedList +func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { + c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancessetmincpuplatformrequest = instancessetmincpuplatformrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetMinCpuPlatformCall) RequestId(requestId string) *InstancesSetMinCpuPlatformCall { - c.urlParams_.Set("requestId", requestId) +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *InstancesSetMinCpuPlatformCall { +func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetMinCpuPlatformCall) Context(ctx context.Context) *InstancesSetMinCpuPlatformCall { +func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { +func (c *MachineTypesAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmincpuplatformrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setMinCpuPlatform" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.machineTypes.aggregatedList" call. +// Exactly one of *MachineTypeAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *MachineTypeAggregatedList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45662,7 +58094,7 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &MachineTypeAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45674,158 +58106,168 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", - // "httpMethod": "POST", - // "id": "compute.instances.setMinCpuPlatform", + // "description": "Retrieves an aggregated list of machine types.", + // "httpMethod": "GET", + // "id": "compute.machineTypes.aggregatedList", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", - // "request": { - // "$ref": "InstancesSetMinCpuPlatformRequest" - // }, + // "path": "{project}/aggregated/machineTypes", // "response": { - // "$ref": "Operation" + // "$ref": "MachineTypeAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setScheduling": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesSetSchedulingCall struct { - s *Service - project string - zone string - instance string - scheduling *Scheduling - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.machineTypes.get": + +type MachineTypesGetCall struct { + s *Service + project string + zone string + machineType string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetScheduling: Sets an instance's scheduling options. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setScheduling -func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { - c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified machine type. Get a list of available +// machine types by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/get +func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { + c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.scheduling = scheduling - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSchedulingCall { - c.urlParams_.Set("requestId", requestId) + c.machineType = machineType return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { +func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { +func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetSchedulingCall) Header() http.Header { +func (c *MachineTypesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, + "machineType": c.machineType, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setScheduling" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.machineTypes.get" call. +// Exactly one of *MachineType or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at +// *MachineType.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -45844,7 +58286,7 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &MachineType{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -45856,17 +58298,17 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Sets an instance's scheduling options.", - // "httpMethod": "POST", - // "id": "compute.instances.setScheduling", + // "description": "Returns the specified machine type. Get a list of available machine types by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.machineTypes.get", // "parameterOrder": [ // "project", // "zone", - // "instance" + // "machineType" // ], // "parameters": { - // "instance": { - // "description": "Instance name.", + // "machineType": { + // "description": "Name of the machine type to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -45879,11 +58321,6 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -45892,123 +58329,173 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", - // "request": { - // "$ref": "Scheduling" - // }, + // "path": "{project}/zones/{zone}/machineTypes/{machineType}", // "response": { - // "$ref": "Operation" + // "$ref": "MachineType" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setServiceAccount": +// method id "compute.machineTypes.list": -type InstancesSetServiceAccountCall struct { - s *Service - project string - zone string - instance string - instancessetserviceaccountrequest *InstancesSetServiceAccountRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type MachineTypesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SetServiceAccount: Sets the service account on the instance. For more -// information, read Changing the service account and access scopes for -// an instance. -func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { - c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of machine types available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/list +func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { + c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone - c.instance = instance - c.instancessetserviceaccountrequest = instancessetserviceaccountrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { - c.urlParams_.Set("requestId", requestId) +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { +func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { +func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetServiceAccountCall) Header() http.Header { +func (c *MachineTypesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { +func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setServiceAccount") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "zone": c.zone, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setServiceAccount" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.machineTypes.list" call. +// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *MachineTypeList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46027,7 +58514,7 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &MachineTypeList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -46039,20 +58526,35 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.setServiceAccount", + // "description": "Retrieves a list of machine types available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.machineTypes.list", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "zone" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -46062,11 +58564,6 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -46075,43 +58572,58 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setServiceAccount", - // "request": { - // "$ref": "InstancesSetServiceAccountRequest" - // }, + // "path": "{project}/zones/{zone}/machineTypes", // "response": { - // "$ref": "Operation" + // "$ref": "MachineTypeList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.setTags": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type InstancesSetTagsCall struct { - s *Service - project string - zone string - instance string - tags *Tags - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.networks.addPeering": + +type NetworksAddPeeringCall struct { + s *Service + project string + network string + networksaddpeeringrequest *NetworksAddPeeringRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTags: Sets tags for the specified instance to the data included in -// the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/setTags -func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { - c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AddPeering: Adds a peering to the specified network. +func (r *NetworksService) AddPeering(project string, network string, networksaddpeeringrequest *NetworksAddPeeringRequest) *NetworksAddPeeringCall { + c := &NetworksAddPeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.tags = tags + c.network = network + c.networksaddpeeringrequest = networksaddpeeringrequest return c } @@ -46129,7 +58641,7 @@ func (r *InstancesService) SetTags(project string, zone string, instance string, // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { +func (c *NetworksAddPeeringCall) RequestId(requestId string) *NetworksAddPeeringCall { c.urlParams_.Set("requestId", requestId) return c } @@ -46137,7 +58649,7 @@ func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { +func (c *NetworksAddPeeringCall) Fields(s ...googleapi.Field) *NetworksAddPeeringCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46145,53 +58657,52 @@ func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { +func (c *NetworksAddPeeringCall) Context(ctx context.Context) *NetworksAddPeeringCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetTagsCall) Header() http.Header { +func (c *NetworksAddPeeringCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksaddpeeringrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/addPeering") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setTags" call. +// Do executes the "compute.networks.addPeering" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46222,17 +58733,16 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Sets tags for the specified instance to the data included in the request.", + // "description": "Adds a peering to the specified network.", // "httpMethod": "POST", - // "id": "compute.instances.setTags", + // "id": "compute.networks.addPeering", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "network" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance scoping this request.", + // "network": { + // "description": "Name of the network resource to add peering to.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -46249,18 +58759,11 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/setTags", + // "path": "{project}/global/networks/{network}/addPeering", // "request": { - // "$ref": "Tags" + // "$ref": "NetworksAddPeeringRequest" // }, // "response": { // "$ref": "Operation" @@ -46273,27 +58776,23 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.instances.start": +// method id "compute.networks.delete": -type InstancesStartCall struct { +type NetworksDeleteCall struct { s *Service project string - zone string - instance string + network string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Start: Starts an instance that was stopped using the using the -// instances().stop method. For more information, see Restart an -// instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/start -func (r *InstancesService) Start(project string, zone string, instance string) *InstancesStartCall { - c := &InstancesStartCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified network. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/delete +func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall { + c := &NetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.network = network return c } @@ -46311,7 +58810,7 @@ func (r *InstancesService) Start(project string, zone string, instance string) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { +func (c *NetworksDeleteCall) RequestId(requestId string) *NetworksDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -46319,7 +58818,7 @@ func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { +func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46327,21 +58826,21 @@ func (c *InstancesStartCall) Fields(s ...googleapi.Field) *InstancesStartCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { +func (c *NetworksDeleteCall) Context(ctx context.Context) *NetworksDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStartCall) Header() http.Header { +func (c *NetworksDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -46349,26 +58848,25 @@ func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.start" call. +// Do executes the "compute.networks.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46399,17 +58897,16 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.start", + // "description": "Deletes the specified network.", + // "httpMethod": "DELETE", + // "id": "compute.networks.delete", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "network" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", + // "network": { + // "description": "Name of the network to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -46426,16 +58923,9 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/start", + // "path": "{project}/global/networks/{network}", // "response": { // "$ref": "Operation" // }, @@ -46447,108 +58937,93 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.instances.startWithEncryptionKey": +// method id "compute.networks.get": -type InstancesStartWithEncryptionKeyCall struct { - s *Service - project string - zone string - instance string - instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksGetCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// StartWithEncryptionKey: Starts an instance that was stopped using the -// using the instances().stop method. For more information, see Restart -// an instance. -func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { - c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified network. Get a list of available networks +// by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/get +func (r *NetworksService) Get(project string, network string) *NetworksGetCall { + c := &NetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance - c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *InstancesStartWithEncryptionKeyCall { - c.urlParams_.Set("requestId", requestId) + c.network = network return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { +func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { +func (c *NetworksGetCall) Context(ctx context.Context) *NetworksGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStartWithEncryptionKeyCall) Header() http.Header { +func (c *NetworksGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.startWithEncryptionKey" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.networks.get" call. +// Exactly one of *Network or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Network.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46567,7 +59042,7 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Network{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -46579,17 +59054,16 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", - // "httpMethod": "POST", - // "id": "compute.instances.startWithEncryptionKey", + // "description": "Returns the specified network. Get a list of available networks by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.networks.get", // "parameterOrder": [ // "project", - // "zone", - // "instance" + // "network" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to start.", + // "network": { + // "description": "Name of the network to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -46601,60 +59075,39 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", - // "request": { - // "$ref": "InstancesStartWithEncryptionKeyRequest" - // }, + // "path": "{project}/global/networks/{network}", // "response": { - // "$ref": "Operation" + // "$ref": "Network" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.instances.stop": +// method id "compute.networks.insert": -type InstancesStopCall struct { +type NetworksInsertCall struct { s *Service project string - zone string - instance string + network *Network urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Stop: Stops a running instance, shutting it down cleanly, and allows -// you to restart the instance at a later time. Stopped instances do not -// incur per-minute, virtual machine usage charges while they are -// stopped, but any resources that the virtual machine is using, such as -// persistent disks and static IP addresses, will continue to be charged -// until they are deleted. For more information, see Stopping an -// instance. -// For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop -func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { - c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a network in the specified project using the data +// included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/insert +func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { + c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.instance = instance + c.network = network return c } @@ -46672,7 +59125,7 @@ func (r *InstancesService) Stop(project string, zone string, instance string) *I // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { +func (c *NetworksInsertCall) RequestId(requestId string) *NetworksInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -46680,7 +59133,7 @@ func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { +func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -46688,48 +59141,51 @@ func (c *InstancesStopCall) Fields(s ...googleapi.Field) *InstancesStopCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { +func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesStopCall) Header() http.Header { +func (c *NetworksInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "instance": c.instance, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.stop" call. +// Do executes the "compute.networks.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46760,22 +59216,13 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + // "description": "Creates a network in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.instances.stop", + // "id": "compute.networks.insert", // "parameterOrder": [ - // "project", - // "zone", - // "instance" + // "project" // ], // "parameters": { - // "instance": { - // "description": "Name of the instance resource to stop.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -46787,16 +59234,12 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{instance}/stop", + // "path": "{project}/global/networks", + // "request": { + // "$ref": "Network" + // }, // "response": { // "$ref": "Operation" // }, @@ -46808,88 +59251,157 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.instances.testIamPermissions": +// method id "compute.networks.list": -type InstancesTestIamPermissionsCall struct { - s *Service - project string - zone string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type NetworksListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *InstancesService) TestIamPermissions(project string, zone string, resource string, testpermissionsrequest *TestPermissionsRequest) *InstancesTestIamPermissionsCall { - c := &InstancesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of networks available to the specified +// project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/list +func (r *NetworksService) List(project string) *NetworksListCall { + c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *NetworksListCall) Filter(filter string) *NetworksListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *NetworksListCall) OrderBy(orderBy string) *NetworksListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesTestIamPermissionsCall) Fields(s ...googleapi.Field) *InstancesTestIamPermissionsCall { +func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesTestIamPermissionsCall) Context(ctx context.Context) *InstancesTestIamPermissionsCall { +func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesTestIamPermissionsCall) Header() http.Header { +func (c *NetworksListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.networks.list" call. +// Exactly one of *NetworkList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *NetworkList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -46908,7 +59420,7 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &NetworkList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -46920,43 +59432,47 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.instances.testIamPermissions", + // "description": "Retrieves the list of networks available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.networks.list", // "parameterOrder": [ - // "project", - // "zone", - // "resource" + // "project" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/instances/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/global/networks", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "NetworkList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -46967,92 +59483,125 @@ func (c *InstancesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes } -// method id "compute.licenses.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type LicensesGetCall struct { - s *Service - project string - license string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.networks.patch": + +type NetworksPatchCall struct { + s *Service + project string + network string + network2 *Network + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified License resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/licenses/get -func (r *LicensesService) Get(project string, license string) *LicensesGetCall { - c := &LicensesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified network with the data included in the +// request. +func (r *NetworksService) Patch(project string, network string, network2 *Network) *NetworksPatchCall { + c := &NetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.license = license + c.network = network + c.network2 = network2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksPatchCall) RequestId(requestId string) *NetworksPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall { +func (c *NetworksPatchCall) Fields(s ...googleapi.Field) *NetworksPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *LicensesGetCall) IfNoneMatch(entityTag string) *LicensesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { +func (c *NetworksPatchCall) Context(ctx context.Context) *NetworksPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *LicensesGetCall) Header() http.Header { +func (c *NetworksPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.network2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "license": c.license, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.licenses.get" call. -// Exactly one of *License or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *License.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { +// Do executes the "compute.networks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47071,7 +59620,7 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &License{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -47083,16 +59632,16 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { } return ret, nil // { - // "description": "Returns the specified License resource.", - // "httpMethod": "GET", - // "id": "compute.licenses.get", + // "description": "Patches the specified network with the data included in the request.", + // "httpMethod": "PATCH", + // "id": "compute.networks.patch", // "parameterOrder": [ // "project", - // "license" + // "network" // ], // "parameters": { - // "license": { - // "description": "Name of the License resource to return.", + // "network": { + // "description": "Name of the network to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -47104,171 +59653,125 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/licenses/{license}", + // "path": "{project}/global/networks/{network}", + // "request": { + // "$ref": "Network" + // }, // "response": { - // "$ref": "License" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.machineTypes.aggregatedList": - -type MachineTypesAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// AggregatedList: Retrieves an aggregated list of machine types. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/aggregatedList -func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall { - c := &MachineTypesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} +// method id "compute.networks.removePeering": -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c +type NetworksRemovePeeringCall struct { + s *Service + project string + network string + networksremovepeeringrequest *NetworksRemovePeeringRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// RemovePeering: Removes a peering from the specified network. +func (r *NetworksService) RemovePeering(project string, network string, networksremovepeeringrequest *NetworksRemovePeeringRequest) *NetworksRemovePeeringCall { + c := &NetworksRemovePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.network = network + c.networksremovepeeringrequest = networksremovepeeringrequest return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *MachineTypesAggregatedListCall) OrderBy(orderBy string) *MachineTypesAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksRemovePeeringCall) RequestId(requestId string) *NetworksRemovePeeringCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall { +func (c *NetworksRemovePeeringCall) Fields(s ...googleapi.Field) *NetworksRemovePeeringCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesAggregatedListCall) IfNoneMatch(entityTag string) *MachineTypesAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTypesAggregatedListCall { +func (c *NetworksRemovePeeringCall) Context(ctx context.Context) *NetworksRemovePeeringCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineTypesAggregatedListCall) Header() http.Header { +func (c *NetworksRemovePeeringCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksremovepeeringrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/removePeering") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.aggregatedList" call. -// Exactly one of *MachineTypeAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *MachineTypeAggregatedList.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*MachineTypeAggregatedList, error) { +// Do executes the "compute.networks.removePeering" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47287,7 +59790,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineTypeAggregatedList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -47299,34 +59802,19 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach } return ret, nil // { - // "description": "Retrieves an aggregated list of machine types.", - // "httpMethod": "GET", - // "id": "compute.machineTypes.aggregatedList", + // "description": "Removes a peering from the specified network.", + // "httpMethod": "POST", + // "id": "compute.networks.removePeering", // "parameterOrder": [ - // "project" + // "project", + // "network" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "network": { + // "description": "Name of the network resource to remove peering from.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, // "project": { @@ -47335,132 +59823,119 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/aggregated/machineTypes", + // "path": "{project}/global/networks/{network}/removePeering", + // "request": { + // "$ref": "NetworksRemovePeeringRequest" + // }, // "response": { - // "$ref": "MachineTypeAggregatedList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *MachineTypesAggregatedListCall) Pages(ctx context.Context, f func(*MachineTypeAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.machineTypes.get": +// method id "compute.networks.switchToCustomMode": -type MachineTypesGetCall struct { - s *Service - project string - zone string - machineType string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworksSwitchToCustomModeCall struct { + s *Service + project string + network string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified machine type. Get a list of available -// machine types by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/get -func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall { - c := &MachineTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SwitchToCustomMode: Switches the network mode from auto subnet mode +// to custom subnet mode. +func (r *NetworksService) SwitchToCustomMode(project string, network string) *NetworksSwitchToCustomModeCall { + c := &NetworksSwitchToCustomModeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - c.machineType = machineType + c.network = network + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksSwitchToCustomModeCall) RequestId(requestId string) *NetworksSwitchToCustomModeCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall { +func (c *NetworksSwitchToCustomModeCall) Fields(s ...googleapi.Field) *NetworksSwitchToCustomModeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesGetCall) IfNoneMatch(entityTag string) *MachineTypesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall { +func (c *NetworksSwitchToCustomModeCall) Context(ctx context.Context) *NetworksSwitchToCustomModeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineTypesGetCall) Header() http.Header { +func (c *NetworksSwitchToCustomModeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/switchToCustomMode") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - "machineType": c.machineType, + "project": c.project, + "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.get" call. -// Exactly one of *MachineType or error will be non-nil. Any non-2xx +// Do executes the "compute.networks.switchToCustomMode" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *MachineType.ServerResponse.Header or (if a response was returned at +// *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, error) { +func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47479,7 +59954,7 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineType{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -47491,17 +59966,16 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er } return ret, nil // { - // "description": "Returns the specified machine type. Get a list of available machine types by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.machineTypes.get", + // "description": "Switches the network mode from auto subnet mode to custom subnet mode.", + // "httpMethod": "POST", + // "id": "compute.networks.switchToCustomMode", // "parameterOrder": [ // "project", - // "zone", - // "machineType" + // "network" // ], // "parameters": { - // "machineType": { - // "description": "Name of the machine type to return.", + // "network": { + // "description": "Name of the network to be updated.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -47514,181 +59988,103 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/machineTypes/{machineType}", + // "path": "{project}/global/networks/{network}/switchToCustomMode", // "response": { - // "$ref": "MachineType" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.machineTypes.list": +// method id "compute.networks.testIamPermissions": -type MachineTypesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type NetworksTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves a list of machine types available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/machineTypes/list -func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall { - c := &MachineTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *NetworksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworksTestIamPermissionsCall { + c := &NetworksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *MachineTypesListCall) OrderBy(orderBy string) *MachineTypesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall { +func (c *NetworksTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworksTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *MachineTypesListCall) IfNoneMatch(entityTag string) *MachineTypesListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCall { +func (c *NetworksTestIamPermissionsCall) Context(ctx context.Context) *NetworksTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *MachineTypesListCall) Header() http.Header { +func (c *NetworksTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { +func (c *NetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.machineTypes.list" call. -// Exactly one of *MachineTypeList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *MachineTypeList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.networks.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeList, error) { +func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47707,7 +60103,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &MachineTypeList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -47719,37 +60115,14 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis } return ret, nil // { - // "description": "Retrieves a list of machine types available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.machineTypes.list", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.networks.testIamPermissions", // "parameterOrder": [ // "project", - // "zone" + // "resource" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -47757,17 +60130,20 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // "required": true, // "type": "string" // }, - // "zone": { - // "description": "The name of the zone for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/zones/{zone}/machineTypes", + // "path": "{project}/global/networks/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "MachineTypeList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -47778,45 +60154,20 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *MachineTypesListCall) Pages(ctx context.Context, f func(*MachineTypeList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.networks.addPeering": +// method id "compute.projects.disableXpnHost": -type NetworksAddPeeringCall struct { - s *Service - project string - network string - networksaddpeeringrequest *NetworksAddPeeringRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsDisableXpnHostCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AddPeering: Adds a peering to the specified network. -func (r *NetworksService) AddPeering(project string, network string, networksaddpeeringrequest *NetworksAddPeeringRequest) *NetworksAddPeeringCall { - c := &NetworksAddPeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DisableXpnHost: Disable this project as a shared VPC host project. +func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHostCall { + c := &ProjectsDisableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - c.networksaddpeeringrequest = networksaddpeeringrequest return c } @@ -47834,7 +60185,7 @@ func (r *NetworksService) AddPeering(project string, network string, networksadd // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksAddPeeringCall) RequestId(requestId string) *NetworksAddPeeringCall { +func (c *ProjectsDisableXpnHostCall) RequestId(requestId string) *ProjectsDisableXpnHostCall { c.urlParams_.Set("requestId", requestId) return c } @@ -47842,7 +60193,7 @@ func (c *NetworksAddPeeringCall) RequestId(requestId string) *NetworksAddPeering // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksAddPeeringCall) Fields(s ...googleapi.Field) *NetworksAddPeeringCall { +func (c *ProjectsDisableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnHostCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -47850,52 +60201,46 @@ func (c *NetworksAddPeeringCall) Fields(s ...googleapi.Field) *NetworksAddPeerin // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksAddPeeringCall) Context(ctx context.Context) *NetworksAddPeeringCall { +func (c *ProjectsDisableXpnHostCall) Context(ctx context.Context) *ProjectsDisableXpnHostCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksAddPeeringCall) Header() http.Header { +func (c *ProjectsDisableXpnHostCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksAddPeeringCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksaddpeeringrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/addPeering") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnHost") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.addPeering" call. +// Do executes the "compute.projects.disableXpnHost" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -47926,21 +60271,13 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e } return ret, nil // { - // "description": "Adds a peering to the specified network.", + // "description": "Disable this project as a shared VPC host project.", // "httpMethod": "POST", - // "id": "compute.networks.addPeering", + // "id": "compute.projects.disableXpnHost", // "parameterOrder": [ - // "project", - // "network" + // "project" // ], // "parameters": { - // "network": { - // "description": "Name of the network resource to add peering to.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -47954,10 +60291,7 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/addPeering", - // "request": { - // "$ref": "NetworksAddPeeringRequest" - // }, + // "path": "{project}/disableXpnHost", // "response": { // "$ref": "Operation" // }, @@ -47969,23 +60303,23 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e } -// method id "compute.networks.delete": +// method id "compute.projects.disableXpnResource": -type NetworksDeleteCall struct { - s *Service - project string - network string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsDisableXpnResourceCall struct { + s *Service + project string + projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified network. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/delete -func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall { - c := &NetworksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// DisableXpnResource: Disable a serivce resource (a.k.a service +// project) associated with this host project. +func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest) *ProjectsDisableXpnResourceCall { + c := &ProjectsDisableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network + c.projectsdisablexpnresourcerequest = projectsdisablexpnresourcerequest return c } @@ -48003,7 +60337,7 @@ func (r *NetworksService) Delete(project string, network string) *NetworksDelete // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksDeleteCall) RequestId(requestId string) *NetworksDeleteCall { +func (c *ProjectsDisableXpnResourceCall) RequestId(requestId string) *ProjectsDisableXpnResourceCall { c.urlParams_.Set("requestId", requestId) return c } @@ -48011,7 +60345,7 @@ func (c *NetworksDeleteCall) RequestId(requestId string) *NetworksDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { +func (c *ProjectsDisableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnResourceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48019,47 +60353,51 @@ func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksDeleteCall) Context(ctx context.Context) *NetworksDeleteCall { +func (c *ProjectsDisableXpnResourceCall) Context(ctx context.Context) *ProjectsDisableXpnResourceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksDeleteCall) Header() http.Header { +func (c *ProjectsDisableXpnResourceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsdisablexpnresourcerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnResource") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.delete" call. +// Do executes the "compute.projects.disableXpnResource" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48090,21 +60428,13 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Deletes the specified network.", - // "httpMethod": "DELETE", - // "id": "compute.networks.delete", + // "description": "Disable a serivce resource (a.k.a service project) associated with this host project.", + // "httpMethod": "POST", + // "id": "compute.projects.disableXpnResource", // "parameterOrder": [ - // "project", - // "network" + // "project" // ], // "parameters": { - // "network": { - // "description": "Name of the network to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -48118,7 +60448,10 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}", + // "path": "{project}/disableXpnResource", + // "request": { + // "$ref": "ProjectsDisableXpnResourceRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -48130,93 +60463,93 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.networks.get": +// method id "compute.projects.enableXpnHost": -type NetworksGetCall struct { - s *Service - project string - network string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ProjectsEnableXpnHostCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified network. Get a list of available networks -// by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/get -func (r *NetworksService) Get(project string, network string) *NetworksGetCall { - c := &NetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// EnableXpnHost: Enable this project as a shared VPC host project. +func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCall { + c := &ProjectsEnableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsEnableXpnHostCall) RequestId(requestId string) *ProjectsEnableXpnHostCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall { +func (c *ProjectsEnableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnHostCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NetworksGetCall) IfNoneMatch(entityTag string) *NetworksGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksGetCall) Context(ctx context.Context) *NetworksGetCall { +func (c *ProjectsEnableXpnHostCall) Context(ctx context.Context) *ProjectsEnableXpnHostCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksGetCall) Header() http.Header { +func (c *ProjectsEnableXpnHostCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnHost") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.get" call. -// Exactly one of *Network or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Network.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { +// Do executes the "compute.projects.enableXpnHost" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48235,7 +60568,7 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Network{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48247,60 +60580,56 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { } return ret, nil // { - // "description": "Returns the specified network. Get a list of available networks by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.networks.get", + // "description": "Enable this project as a shared VPC host project.", + // "httpMethod": "POST", + // "id": "compute.projects.enableXpnHost", // "parameterOrder": [ - // "project", - // "network" + // "project" // ], // "parameters": { - // "network": { - // "description": "Name of the network to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}", + // "path": "{project}/enableXpnHost", // "response": { - // "$ref": "Network" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.networks.insert": +// method id "compute.projects.enableXpnResource": -type NetworksInsertCall struct { - s *Service - project string - network *Network - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsEnableXpnResourceCall struct { + s *Service + project string + projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a network in the specified project using the data -// included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/insert -func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall { - c := &NetworksInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// EnableXpnResource: Enable service resource (a.k.a service project) +// for a host project, so that subnets in the host project can be used +// by instances in the service project. +func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest) *ProjectsEnableXpnResourceCall { + c := &ProjectsEnableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network + c.projectsenablexpnresourcerequest = projectsenablexpnresourcerequest return c } @@ -48318,7 +60647,7 @@ func (r *NetworksService) Insert(project string, network *Network) *NetworksInse // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksInsertCall) RequestId(requestId string) *NetworksInsertCall { +func (c *ProjectsEnableXpnResourceCall) RequestId(requestId string) *ProjectsEnableXpnResourceCall { c.urlParams_.Set("requestId", requestId) return c } @@ -48326,7 +60655,7 @@ func (c *NetworksInsertCall) RequestId(requestId string) *NetworksInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { +func (c *ProjectsEnableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnResourceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48334,34 +60663,34 @@ func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { +func (c *ProjectsEnableXpnResourceCall) Context(ctx context.Context) *ProjectsEnableXpnResourceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksInsertCall) Header() http.Header { +func (c *ProjectsEnableXpnResourceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsenablexpnresourcerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnResource") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -48371,14 +60700,14 @@ func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.insert" call. +// Do executes the "compute.projects.enableXpnResource" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48409,9 +60738,9 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Creates a network in the specified project using the data included in the request.", + // "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project.", // "httpMethod": "POST", - // "id": "compute.networks.insert", + // "id": "compute.projects.enableXpnResource", // "parameterOrder": [ // "project" // ], @@ -48429,9 +60758,9 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error // "type": "string" // } // }, - // "path": "{project}/global/networks", + // "path": "{project}/enableXpnResource", // "request": { - // "$ref": "Network" + // "$ref": "ProjectsEnableXpnResourceRequest" // }, // "response": { // "$ref": "Operation" @@ -48444,9 +60773,9 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error } -// method id "compute.networks.list": +// method id "compute.projects.get": -type NetworksListCall struct { +type ProjectsGetCall struct { s *Service project string urlParams_ gensupport.URLParams @@ -48455,86 +60784,18 @@ type NetworksListCall struct { header_ http.Header } -// List: Retrieves the list of networks available to the specified -// project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/networks/list -func (r *NetworksService) List(project string) *NetworksListCall { - c := &NetworksListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Project resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/get +func (r *ProjectsService) Get(project string) *ProjectsGetCall { + c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *NetworksListCall) Filter(filter string) *NetworksListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *NetworksListCall) OrderBy(orderBy string) *NetworksListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { +func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -48544,7 +60805,7 @@ func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { +func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -48552,21 +60813,21 @@ func (c *NetworksListCall) IfNoneMatch(entityTag string) *NetworksListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { +func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksListCall) Header() http.Header { +func (c *ProjectsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -48577,7 +60838,7 @@ func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -48587,14 +60848,14 @@ func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.list" call. -// Exactly one of *NetworkList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *NetworkList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error) { +// Do executes the "compute.projects.get" call. +// Exactly one of *Project or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Project.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48613,7 +60874,7 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &NetworkList{ + ret := &Project{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48625,36 +60886,13 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error } return ret, nil // { - // "description": "Retrieves the list of networks available to the specified project.", + // "description": "Returns the specified Project resource.", // "httpMethod": "GET", - // "id": "compute.networks.list", + // "id": "compute.projects.get", // "parameterOrder": [ // "project" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -48663,9 +60901,9 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error // "type": "string" // } // }, - // "path": "{project}/global/networks", + // "path": "{project}", // "response": { - // "$ref": "NetworkList" + // "$ref": "Project" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -48676,124 +60914,89 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.networks.removePeering": +// method id "compute.projects.getXpnHost": -type NetworksRemovePeeringCall struct { - s *Service - project string - network string - networksremovepeeringrequest *NetworksRemovePeeringRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsGetXpnHostCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// RemovePeering: Removes a peering from the specified network. -func (r *NetworksService) RemovePeering(project string, network string, networksremovepeeringrequest *NetworksRemovePeeringRequest) *NetworksRemovePeeringCall { - c := &NetworksRemovePeeringCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetXpnHost: Get the shared VPC host project that this project links +// to. May be empty if no link exists. +func (r *ProjectsService) GetXpnHost(project string) *ProjectsGetXpnHostCall { + c := &ProjectsGetXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network - c.networksremovepeeringrequest = networksremovepeeringrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksRemovePeeringCall) RequestId(requestId string) *NetworksRemovePeeringCall { - c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksRemovePeeringCall) Fields(s ...googleapi.Field) *NetworksRemovePeeringCall { +func (c *ProjectsGetXpnHostCall) Fields(s ...googleapi.Field) *ProjectsGetXpnHostCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetXpnHostCall) IfNoneMatch(entityTag string) *ProjectsGetXpnHostCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksRemovePeeringCall) Context(ctx context.Context) *NetworksRemovePeeringCall { +func (c *ProjectsGetXpnHostCall) Context(ctx context.Context) *ProjectsGetXpnHostCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksRemovePeeringCall) Header() http.Header { +func (c *ProjectsGetXpnHostCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksRemovePeeringCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.networksremovepeeringrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/removePeering") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnHost") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.removePeering" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.projects.getXpnHost" call. +// Exactly one of *Project or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Project.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48812,7 +61015,7 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &Project{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48824,40 +61027,24 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Removes a peering from the specified network.", - // "httpMethod": "POST", - // "id": "compute.networks.removePeering", + // "description": "Get the shared VPC host project that this project links to. May be empty if no link exists.", + // "httpMethod": "GET", + // "id": "compute.projects.getXpnHost", // "parameterOrder": [ - // "project", - // "network" + // "project" // ], // "parameters": { - // "network": { - // "description": "Name of the network resource to remove peering from.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/removePeering", - // "request": { - // "$ref": "NetworksRemovePeeringRequest" - // }, + // "path": "{project}/getXpnHost", // "response": { - // "$ref": "Operation" + // "$ref": "Project" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -48867,97 +61054,113 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.networks.switchToCustomMode": +// method id "compute.projects.getXpnResources": -type NetworksSwitchToCustomModeCall struct { - s *Service - project string - network string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsGetXpnResourcesCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// SwitchToCustomMode: Switches the network mode from auto subnet mode -// to custom subnet mode. -func (r *NetworksService) SwitchToCustomMode(project string, network string) *NetworksSwitchToCustomModeCall { - c := &NetworksSwitchToCustomModeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetXpnResources: Get service resources (a.k.a service project) +// associated with this host project. +func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourcesCall { + c := &ProjectsGetXpnResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.network = network return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *NetworksSwitchToCustomModeCall) RequestId(requestId string) *NetworksSwitchToCustomModeCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": +func (c *ProjectsGetXpnResourcesCall) Filter(filter string) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": +func (c *ProjectsGetXpnResourcesCall) MaxResults(maxResults int64) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "order_by": +func (c *ProjectsGetXpnResourcesCall) OrderBy(orderBy string) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("order_by", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": +func (c *ProjectsGetXpnResourcesCall) PageToken(pageToken string) *ProjectsGetXpnResourcesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksSwitchToCustomModeCall) Fields(s ...googleapi.Field) *NetworksSwitchToCustomModeCall { +func (c *ProjectsGetXpnResourcesCall) Fields(s ...googleapi.Field) *ProjectsGetXpnResourcesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetXpnResourcesCall) IfNoneMatch(entityTag string) *ProjectsGetXpnResourcesCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksSwitchToCustomModeCall) Context(ctx context.Context) *NetworksSwitchToCustomModeCall { +func (c *ProjectsGetXpnResourcesCall) Context(ctx context.Context) *ProjectsGetXpnResourcesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksSwitchToCustomModeCall) Header() http.Header { +func (c *ProjectsGetXpnResourcesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksSwitchToCustomModeCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}/switchToCustomMode") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnResources") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "network": c.network, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.switchToCustomMode" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.projects.getXpnResources" call. +// Exactly one of *ProjectsGetXpnResources or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ProjectsGetXpnResources.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*ProjectsGetXpnResources, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -48976,7 +61179,7 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &ProjectsGetXpnResources{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -48988,19 +61191,30 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Switches the network mode from auto subnet mode to custom subnet mode.", - // "httpMethod": "POST", - // "id": "compute.networks.switchToCustomMode", + // "description": "Get service resources (a.k.a service project) associated with this host project.", + // "httpMethod": "GET", + // "id": "compute.projects.getXpnResources", // "parameterOrder": [ - // "project", - // "network" + // "project" // ], // "parameters": { - // "network": { - // "description": "Name of the network to be updated.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "order_by": { + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "location": "query", // "type": "string" // }, // "project": { @@ -49009,16 +61223,11 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/global/networks/{network}/switchToCustomMode", + // "path": "{project}/getXpnResources", // "response": { - // "$ref": "Operation" + // "$ref": "ProjectsGetXpnResources" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -49028,32 +61237,75 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.networks.testIamPermissions": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsGetXpnResourcesCall) Pages(ctx context.Context, f func(*ProjectsGetXpnResources) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type NetworksTestIamPermissionsCall struct { - s *Service - project string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.projects.listXpnHosts": + +type ProjectsListXpnHostsCall struct { + s *Service + project string + projectslistxpnhostsrequest *ProjectsListXpnHostsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *NetworksService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *NetworksTestIamPermissionsCall { - c := &NetworksTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListXpnHosts: List all shared VPC host projects visible to the user +// in an organization. +func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsrequest *ProjectsListXpnHostsRequest) *ProjectsListXpnHostsCall { + c := &ProjectsListXpnHostsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.projectslistxpnhostsrequest = projectslistxpnhostsrequest + return c +} + +// Filter sets the optional parameter "filter": +func (c *ProjectsListXpnHostsCall) Filter(filter string) *ProjectsListXpnHostsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": +func (c *ProjectsListXpnHostsCall) MaxResults(maxResults int64) *ProjectsListXpnHostsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "order_by": +func (c *ProjectsListXpnHostsCall) OrderBy(orderBy string) *ProjectsListXpnHostsCall { + c.urlParams_.Set("order_by", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": +func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnHostsCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NetworksTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworksTestIamPermissionsCall { +func (c *ProjectsListXpnHostsCall) Fields(s ...googleapi.Field) *ProjectsListXpnHostsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49061,52 +61313,51 @@ func (c *NetworksTestIamPermissionsCall) Fields(s ...googleapi.Field) *NetworksT // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NetworksTestIamPermissionsCall) Context(ctx context.Context) *NetworksTestIamPermissionsCall { +func (c *ProjectsListXpnHostsCall) Context(ctx context.Context) *ProjectsListXpnHostsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NetworksTestIamPermissionsCall) Header() http.Header { +func (c *ProjectsListXpnHostsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NetworksTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectslistxpnhostsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/listXpnHosts") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.networks.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.projects.listXpnHosts" call. +// Exactly one of *XpnHostList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *XpnHostList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49125,7 +61376,7 @@ func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &XpnHostList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -49137,59 +61388,92 @@ func (c *NetworksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Test } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", + // "description": "List all shared VPC host projects visible to the user in an organization.", // "httpMethod": "POST", - // "id": "compute.networks.testIamPermissions", + // "id": "compute.projects.listXpnHosts", // "parameterOrder": [ - // "project", - // "resource" + // "project" // ], // "parameters": { + // "filter": { + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "order_by": { + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/global/networks/{resource}/testIamPermissions", + // "path": "{project}/listXpnHosts", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "ProjectsListXpnHostsRequest" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "XpnHostList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.projects.disableXpnHost": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsListXpnHostsCall) Pages(ctx context.Context, f func(*XpnHostList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type ProjectsDisableXpnHostCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.projects.moveDisk": + +type ProjectsMoveDiskCall struct { + s *Service + project string + diskmoverequest *DiskMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DisableXpnHost: Disable this project as a shared VPC host project. -func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHostCall { - c := &ProjectsDisableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// MoveDisk: Moves a persistent disk from one zone to another. +func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequest) *ProjectsMoveDiskCall { + c := &ProjectsMoveDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.diskmoverequest = diskmoverequest return c } @@ -49207,7 +61491,7 @@ func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHost // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsDisableXpnHostCall) RequestId(requestId string) *ProjectsDisableXpnHostCall { +func (c *ProjectsMoveDiskCall) RequestId(requestId string) *ProjectsMoveDiskCall { c.urlParams_.Set("requestId", requestId) return c } @@ -49215,7 +61499,7 @@ func (c *ProjectsDisableXpnHostCall) RequestId(requestId string) *ProjectsDisabl // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsDisableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnHostCall { +func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49223,29 +61507,34 @@ func (c *ProjectsDisableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsDisab // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsDisableXpnHostCall) Context(ctx context.Context) *ProjectsDisableXpnHostCall { +func (c *ProjectsMoveDiskCall) Context(ctx context.Context) *ProjectsMoveDiskCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsDisableXpnHostCall) Header() http.Header { +func (c *ProjectsMoveDiskCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.diskmoverequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnHost") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveDisk") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -49255,14 +61544,14 @@ func (c *ProjectsDisableXpnHostCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.disableXpnHost" call. +// Do executes the "compute.projects.moveDisk" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49293,9 +61582,9 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Disable this project as a shared VPC host project.", + // "description": "Moves a persistent disk from one zone to another.", // "httpMethod": "POST", - // "id": "compute.projects.disableXpnHost", + // "id": "compute.projects.moveDisk", // "parameterOrder": [ // "project" // ], @@ -49313,7 +61602,10 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "{project}/disableXpnHost", + // "path": "{project}/moveDisk", + // "request": { + // "$ref": "DiskMoveRequest" + // }, // "response": { // "$ref": "Operation" // }, @@ -49325,23 +61617,23 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.projects.disableXpnResource": +// method id "compute.projects.moveInstance": -type ProjectsDisableXpnResourceCall struct { - s *Service - project string - projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsMoveInstanceCall struct { + s *Service + project string + instancemoverequest *InstanceMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DisableXpnResource: Disable a serivce resource (a.k.a service -// project) associated with this host project. -func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnresourcerequest *ProjectsDisableXpnResourceRequest) *ProjectsDisableXpnResourceCall { - c := &ProjectsDisableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// MoveInstance: Moves an instance and its attached persistent disks +// from one zone to another. +func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall { + c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectsdisablexpnresourcerequest = projectsdisablexpnresourcerequest + c.instancemoverequest = instancemoverequest return c } @@ -49359,7 +61651,7 @@ func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnr // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsDisableXpnResourceCall) RequestId(requestId string) *ProjectsDisableXpnResourceCall { +func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInstanceCall { c.urlParams_.Set("requestId", requestId) return c } @@ -49367,7 +61659,7 @@ func (c *ProjectsDisableXpnResourceCall) RequestId(requestId string) *ProjectsDi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsDisableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsDisableXpnResourceCall { +func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49375,34 +61667,34 @@ func (c *ProjectsDisableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsD // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsDisableXpnResourceCall) Context(ctx context.Context) *ProjectsDisableXpnResourceCall { +func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsDisableXpnResourceCall) Header() http.Header { +func (c *ProjectsMoveInstanceCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsdisablexpnresourcerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/disableXpnResource") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveInstance") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -49412,14 +61704,14 @@ func (c *ProjectsDisableXpnResourceCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.disableXpnResource" call. +// Do executes the "compute.projects.moveInstance" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49450,9 +61742,9 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Disable a serivce resource (a.k.a service project) associated with this host project.", + // "description": "Moves an instance and its attached persistent disks from one zone to another.", // "httpMethod": "POST", - // "id": "compute.projects.disableXpnResource", + // "id": "compute.projects.moveInstance", // "parameterOrder": [ // "project" // ], @@ -49470,9 +61762,9 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // } // }, - // "path": "{project}/disableXpnResource", + // "path": "{project}/moveInstance", // "request": { - // "$ref": "ProjectsDisableXpnResourceRequest" + // "$ref": "InstanceMoveRequest" // }, // "response": { // "$ref": "Operation" @@ -49485,20 +61777,24 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.projects.enableXpnHost": +// method id "compute.projects.setCommonInstanceMetadata": -type ProjectsEnableXpnHostCall struct { +type ProjectsSetCommonInstanceMetadataCall struct { s *Service project string + metadata *Metadata urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// EnableXpnHost: Enable this project as a shared VPC host project. -func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCall { - c := &ProjectsEnableXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetCommonInstanceMetadata: Sets metadata common to all instances +// within the specified project using the data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setCommonInstanceMetadata +func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall { + c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.metadata = metadata return c } @@ -49516,7 +61812,7 @@ func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCa // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsEnableXpnHostCall) RequestId(requestId string) *ProjectsEnableXpnHostCall { +func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *ProjectsSetCommonInstanceMetadataCall { c.urlParams_.Set("requestId", requestId) return c } @@ -49524,7 +61820,7 @@ func (c *ProjectsEnableXpnHostCall) RequestId(requestId string) *ProjectsEnableX // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsEnableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnHostCall { +func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49532,29 +61828,34 @@ func (c *ProjectsEnableXpnHostCall) Fields(s ...googleapi.Field) *ProjectsEnable // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsEnableXpnHostCall) Context(ctx context.Context) *ProjectsEnableXpnHostCall { +func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsEnableXpnHostCall) Header() http.Header { +func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnHost") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setCommonInstanceMetadata") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -49564,14 +61865,14 @@ func (c *ProjectsEnableXpnHostCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.enableXpnHost" call. +// Do executes the "compute.projects.setCommonInstanceMetadata" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49602,9 +61903,9 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Enable this project as a shared VPC host project.", + // "description": "Sets metadata common to all instances within the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.projects.enableXpnHost", + // "id": "compute.projects.setCommonInstanceMetadata", // "parameterOrder": [ // "project" // ], @@ -49622,7 +61923,10 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation // "type": "string" // } // }, - // "path": "{project}/enableXpnHost", + // "path": "{project}/setCommonInstanceMetadata", + // "request": { + // "$ref": "Metadata" + // }, // "response": { // "$ref": "Operation" // }, @@ -49634,24 +61938,26 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation } -// method id "compute.projects.enableXpnResource": +// method id "compute.projects.setUsageExportBucket": -type ProjectsEnableXpnResourceCall struct { - s *Service - project string - projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ProjectsSetUsageExportBucketCall struct { + s *Service + project string + usageexportlocation *UsageExportLocation + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// EnableXpnResource: Enable service resource (a.k.a service project) -// for a host project, so that subnets in the host project can be used -// by instances in the service project. -func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnresourcerequest *ProjectsEnableXpnResourceRequest) *ProjectsEnableXpnResourceCall { - c := &ProjectsEnableXpnResourceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetUsageExportBucket: Enables the usage export feature and sets the +// usage export bucket where reports are stored. If you provide an empty +// request body using this method, the usage export feature will be +// disabled. +// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setUsageExportBucket +func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall { + c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectsenablexpnresourcerequest = projectsenablexpnresourcerequest + c.usageexportlocation = usageexportlocation return c } @@ -49669,7 +61975,7 @@ func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnres // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsEnableXpnResourceCall) RequestId(requestId string) *ProjectsEnableXpnResourceCall { +func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *ProjectsSetUsageExportBucketCall { c.urlParams_.Set("requestId", requestId) return c } @@ -49677,7 +61983,7 @@ func (c *ProjectsEnableXpnResourceCall) RequestId(requestId string) *ProjectsEna // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsEnableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsEnableXpnResourceCall { +func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49685,34 +61991,34 @@ func (c *ProjectsEnableXpnResourceCall) Fields(s ...googleapi.Field) *ProjectsEn // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsEnableXpnResourceCall) Context(ctx context.Context) *ProjectsEnableXpnResourceCall { +func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsEnableXpnResourceCall) Header() http.Header { +func (c *ProjectsSetUsageExportBucketCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, error) { +func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectsenablexpnresourcerequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/enableXpnResource") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setUsageExportBucket") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -49722,14 +62028,14 @@ func (c *ProjectsEnableXpnResourceCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.enableXpnResource" call. +// Do executes the "compute.projects.setUsageExportBucket" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49760,9 +62066,9 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Enable service resource (a.k.a service project) for a host project, so that subnets in the host project can be used by instances in the service project.", + // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", // "httpMethod": "POST", - // "id": "compute.projects.enableXpnResource", + // "id": "compute.projects.setUsageExportBucket", // "parameterOrder": [ // "project" // ], @@ -49780,104 +62086,117 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera // "type": "string" // } // }, - // "path": "{project}/enableXpnResource", + // "path": "{project}/setUsageExportBucket", // "request": { - // "$ref": "ProjectsEnableXpnResourceRequest" + // "$ref": "UsageExportLocation" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "compute.projects.get": +// method id "compute.regionAutoscalers.delete": -type ProjectsGetCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionAutoscalersDeleteCall struct { + s *Service + project string + region string + autoscaler string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Project resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/get -func (r *ProjectsService) Get(project string) *ProjectsGetCall { - c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified autoscaler. +func (r *RegionAutoscalersService) Delete(project string, region string, autoscaler string) *RegionAutoscalersDeleteCall { + c := &RegionAutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.autoscaler = autoscaler + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionAutoscalersDeleteCall) RequestId(requestId string) *RegionAutoscalersDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { +func (c *RegionAutoscalersDeleteCall) Fields(s ...googleapi.Field) *RegionAutoscalersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { +func (c *RegionAutoscalersDeleteCall) Context(ctx context.Context) *RegionAutoscalersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGetCall) Header() http.Header { +func (c *RegionAutoscalersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "autoscaler": c.autoscaler, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.get" call. -// Exactly one of *Project or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Project.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { +// Do executes the "compute.regionAutoscalers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -49896,7 +62215,7 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Project{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -49908,57 +62227,80 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { } return ret, nil // { - // "description": "Returns the specified Project resource.", - // "httpMethod": "GET", - // "id": "compute.projects.get", + // "description": "Deletes the specified autoscaler.", + // "httpMethod": "DELETE", + // "id": "compute.regionAutoscalers.delete", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "autoscaler" // ], // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}", + // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", // "response": { - // "$ref": "Project" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.projects.getXpnHost": +// method id "compute.regionAutoscalers.get": -type ProjectsGetXpnHostCall struct { +type RegionAutoscalersGetCall struct { s *Service project string + region string + autoscaler string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetXpnHost: Get the shared VPC host project that this project links -// to. May be empty if no link exists. -func (r *ProjectsService) GetXpnHost(project string) *ProjectsGetXpnHostCall { - c := &ProjectsGetXpnHostCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified autoscaler. +func (r *RegionAutoscalersService) Get(project string, region string, autoscaler string) *RegionAutoscalersGetCall { + c := &RegionAutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.autoscaler = autoscaler return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetXpnHostCall) Fields(s ...googleapi.Field) *ProjectsGetXpnHostCall { +func (c *RegionAutoscalersGetCall) Fields(s ...googleapi.Field) *RegionAutoscalersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -49968,7 +62310,7 @@ func (c *ProjectsGetXpnHostCall) Fields(s ...googleapi.Field) *ProjectsGetXpnHos // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ProjectsGetXpnHostCall) IfNoneMatch(entityTag string) *ProjectsGetXpnHostCall { +func (c *RegionAutoscalersGetCall) IfNoneMatch(entityTag string) *RegionAutoscalersGetCall { c.ifNoneMatch_ = entityTag return c } @@ -49976,21 +62318,21 @@ func (c *ProjectsGetXpnHostCall) IfNoneMatch(entityTag string) *ProjectsGetXpnHo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetXpnHostCall) Context(ctx context.Context) *ProjectsGetXpnHostCall { +func (c *RegionAutoscalersGetCall) Context(ctx context.Context) *RegionAutoscalersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGetXpnHostCall) Header() http.Header { +func (c *RegionAutoscalersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -50001,24 +62343,26 @@ func (c *ProjectsGetXpnHostCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnHost") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "autoscaler": c.autoscaler, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.getXpnHost" call. -// Exactly one of *Project or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Project.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, error) { +// Do executes the "compute.regionAutoscalers.get" call. +// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Autoscaler.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50037,7 +62381,7 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Project{ + ret := &Autoscaler{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50049,140 +62393,148 @@ func (c *ProjectsGetXpnHostCall) Do(opts ...googleapi.CallOption) (*Project, err } return ret, nil // { - // "description": "Get the shared VPC host project that this project links to. May be empty if no link exists.", + // "description": "Returns the specified autoscaler.", // "httpMethod": "GET", - // "id": "compute.projects.getXpnHost", + // "id": "compute.regionAutoscalers.get", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "autoscaler" // ], // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/getXpnHost", + // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", // "response": { - // "$ref": "Project" + // "$ref": "Autoscaler" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.projects.getXpnResources": +// method id "compute.regionAutoscalers.insert": -type ProjectsGetXpnResourcesCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionAutoscalersInsertCall struct { + s *Service + project string + region string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// GetXpnResources: Get service resources (a.k.a service project) -// associated with this host project. -func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourcesCall { - c := &ProjectsGetXpnResourcesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates an autoscaler in the specified project using the data +// included in the request. +func (r *RegionAutoscalersService) Insert(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersInsertCall { + c := &RegionAutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region + c.autoscaler = autoscaler return c } -// Filter sets the optional parameter "filter": -func (c *ProjectsGetXpnResourcesCall) Filter(filter string) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": -func (c *ProjectsGetXpnResourcesCall) MaxResults(maxResults int64) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "order_by": -func (c *ProjectsGetXpnResourcesCall) OrderBy(orderBy string) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("order_by", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": -func (c *ProjectsGetXpnResourcesCall) PageToken(pageToken string) *ProjectsGetXpnResourcesCall { - c.urlParams_.Set("pageToken", pageToken) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionAutoscalersInsertCall) RequestId(requestId string) *RegionAutoscalersInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsGetXpnResourcesCall) Fields(s ...googleapi.Field) *ProjectsGetXpnResourcesCall { +func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutoscalersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ProjectsGetXpnResourcesCall) IfNoneMatch(entityTag string) *ProjectsGetXpnResourcesCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsGetXpnResourcesCall) Context(ctx context.Context) *ProjectsGetXpnResourcesCall { +func (c *RegionAutoscalersInsertCall) Context(ctx context.Context) *RegionAutoscalersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsGetXpnResourcesCall) Header() http.Header { +func (c *RegionAutoscalersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsGetXpnResourcesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/getXpnResources") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.getXpnResources" call. -// Exactly one of *ProjectsGetXpnResources or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ProjectsGetXpnResources.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*ProjectsGetXpnResources, error) { +// Do executes the "compute.regionAutoscalers.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50201,7 +62553,7 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &ProjectsGetXpnResources{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50213,43 +62565,40 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project } return ret, nil // { - // "description": "Get service resources (a.k.a service project) associated with this host project.", - // "httpMethod": "GET", - // "id": "compute.projects.getXpnResources", + // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.regionAutoscalers.insert", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { - // "filter": { - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "order_by": { - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/getXpnResources", + // "path": "{project}/regions/{region}/autoscalers", + // "request": { + // "$ref": "Autoscaler" + // }, // "response": { - // "$ref": "ProjectsGetXpnResources" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -50259,67 +62608,90 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ProjectsGetXpnResourcesCall) Pages(ctx context.Context, f func(*ProjectsGetXpnResources) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.projects.listXpnHosts": +// method id "compute.regionAutoscalers.list": -type ProjectsListXpnHostsCall struct { - s *Service - project string - projectslistxpnhostsrequest *ProjectsListXpnHostsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionAutoscalersListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// ListXpnHosts: List all shared VPC host projects visible to the user -// in an organization. -func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsrequest *ProjectsListXpnHostsRequest) *ProjectsListXpnHostsCall { - c := &ProjectsListXpnHostsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of autoscalers contained within the specified +// region. +func (r *RegionAutoscalersService) List(project string, region string) *RegionAutoscalersListCall { + c := &RegionAutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.projectslistxpnhostsrequest = projectslistxpnhostsrequest + c.region = region return c } -// Filter sets the optional parameter "filter": -func (c *ProjectsListXpnHostsCall) Filter(filter string) *ProjectsListXpnHostsCall { +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall { c.urlParams_.Set("filter", filter) return c } -// MaxResults sets the optional parameter "maxResults": -func (c *ProjectsListXpnHostsCall) MaxResults(maxResults int64) *ProjectsListXpnHostsCall { +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscalersListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } -// OrderBy sets the optional parameter "order_by": -func (c *ProjectsListXpnHostsCall) OrderBy(orderBy string) *ProjectsListXpnHostsCall { - c.urlParams_.Set("order_by", orderBy) +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersListCall { + c.urlParams_.Set("orderBy", orderBy) return c } -// PageToken sets the optional parameter "pageToken": -func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnHostsCall { +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscalersListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -50327,59 +62699,68 @@ func (c *ProjectsListXpnHostsCall) PageToken(pageToken string) *ProjectsListXpnH // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsListXpnHostsCall) Fields(s ...googleapi.Field) *ProjectsListXpnHostsCall { +func (c *RegionAutoscalersListCall) Fields(s ...googleapi.Field) *RegionAutoscalersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionAutoscalersListCall) IfNoneMatch(entityTag string) *RegionAutoscalersListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsListXpnHostsCall) Context(ctx context.Context) *ProjectsListXpnHostsCall { +func (c *RegionAutoscalersListCall) Context(ctx context.Context) *RegionAutoscalersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsListXpnHostsCall) Header() http.Header { +func (c *RegionAutoscalersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsListXpnHostsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectslistxpnhostsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/listXpnHosts") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.listXpnHosts" call. -// Exactly one of *XpnHostList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *XpnHostList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostList, error) { +// Do executes the "compute.regionAutoscalers.list" call. +// Exactly one of *RegionAutoscalerList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RegionAutoscalerList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAutoscalerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50398,7 +62779,7 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &XpnHostList{ + ret := &RegionAutoscalerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50410,29 +62791,34 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis } return ret, nil // { - // "description": "List all shared VPC host projects visible to the user in an organization.", - // "httpMethod": "POST", - // "id": "compute.projects.listXpnHosts", + // "description": "Retrieves a list of autoscalers contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.regionAutoscalers.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", // "format": "uint32", // "location": "query", // "minimum": "0", // "type": "integer" // }, - // "order_by": { + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", // "location": "query", // "type": "string" // }, // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", // "location": "query", // "type": "string" // }, @@ -50442,18 +62828,23 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/listXpnHosts", - // "request": { - // "$ref": "ProjectsListXpnHostsRequest" - // }, + // "path": "{project}/regions/{region}/autoscalers", // "response": { - // "$ref": "XpnHostList" + // "$ref": "RegionAutoscalerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } @@ -50462,7 +62853,7 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *ProjectsListXpnHostsCall) Pages(ctx context.Context, f func(*XpnHostList) error) error { +func (c *RegionAutoscalersListCall) Pages(ctx context.Context, f func(*RegionAutoscalerList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -50480,22 +62871,33 @@ func (c *ProjectsListXpnHostsCall) Pages(ctx context.Context, f func(*XpnHostLis } } -// method id "compute.projects.moveDisk": +// method id "compute.regionAutoscalers.patch": -type ProjectsMoveDiskCall struct { - s *Service - project string - diskmoverequest *DiskMoveRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionAutoscalersPatchCall struct { + s *Service + project string + region string + autoscaler *Autoscaler + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// MoveDisk: Moves a persistent disk from one zone to another. -func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequest) *ProjectsMoveDiskCall { - c := &ProjectsMoveDiskCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates an autoscaler in the specified project using the data +// included in the request. This method supports PATCH semantics and +// uses the JSON merge patch format and processing rules. +func (r *RegionAutoscalersService) Patch(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersPatchCall { + c := &RegionAutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.diskmoverequest = diskmoverequest + c.region = region + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to patch. +func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutoscalersPatchCall { + c.urlParams_.Set("autoscaler", autoscaler) return c } @@ -50513,7 +62915,7 @@ func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequ // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsMoveDiskCall) RequestId(requestId string) *ProjectsMoveDiskCall { +func (c *RegionAutoscalersPatchCall) RequestId(requestId string) *RegionAutoscalersPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -50521,7 +62923,7 @@ func (c *ProjectsMoveDiskCall) RequestId(requestId string) *ProjectsMoveDiskCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCall { +func (c *RegionAutoscalersPatchCall) Fields(s ...googleapi.Field) *RegionAutoscalersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50529,51 +62931,52 @@ func (c *ProjectsMoveDiskCall) Fields(s ...googleapi.Field) *ProjectsMoveDiskCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsMoveDiskCall) Context(ctx context.Context) *ProjectsMoveDiskCall { +func (c *RegionAutoscalersPatchCall) Context(ctx context.Context) *RegionAutoscalersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsMoveDiskCall) Header() http.Header { +func (c *RegionAutoscalersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.diskmoverequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveDisk") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.moveDisk" call. +// Do executes the "compute.regionAutoscalers.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50604,13 +63007,20 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Moves a persistent disk from one zone to another.", - // "httpMethod": "POST", - // "id": "compute.projects.moveDisk", + // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.regionAutoscalers.patch", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to patch.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -50618,15 +63028,22 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/moveDisk", + // "path": "{project}/regions/{region}/autoscalers", // "request": { - // "$ref": "DiskMoveRequest" + // "$ref": "Autoscaler" // }, // "response": { // "$ref": "Operation" @@ -50639,49 +63056,34 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.projects.moveInstance": +// method id "compute.regionAutoscalers.testIamPermissions": -type ProjectsMoveInstanceCall struct { - s *Service - project string - instancemoverequest *InstanceMoveRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionAutoscalersTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// MoveInstance: Moves an instance and its attached persistent disks -// from one zone to another. -func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall { - c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionAutoscalersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionAutoscalersTestIamPermissionsCall { + c := &RegionAutoscalersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.instancemoverequest = instancemoverequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInstanceCall { - c.urlParams_.Set("requestId", requestId) + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall { +func (c *RegionAutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionAutoscalersTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50689,51 +63091,53 @@ func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveIns // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall { +func (c *RegionAutoscalersTestIamPermissionsCall) Context(ctx context.Context) *RegionAutoscalersTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsMoveInstanceCall) Header() http.Header { +func (c *RegionAutoscalersTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveInstance") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.moveInstance" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionAutoscalers.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50752,7 +63156,7 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -50764,11 +63168,13 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Moves an instance and its attached persistent disks from one zone to another.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.projects.moveInstance", + // "id": "compute.regionAutoscalers.testIamPermissions", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "resource" // ], // "parameters": { // "project": { @@ -50778,45 +63184,63 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/moveInstance", + // "path": "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions", // "request": { - // "$ref": "InstanceMoveRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.projects.setCommonInstanceMetadata": +// method id "compute.regionAutoscalers.update": -type ProjectsSetCommonInstanceMetadataCall struct { +type RegionAutoscalersUpdateCall struct { s *Service project string - metadata *Metadata + region string + autoscaler *Autoscaler urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetCommonInstanceMetadata: Sets metadata common to all instances -// within the specified project using the data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setCommonInstanceMetadata -func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall { - c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates an autoscaler in the specified project using the data +// included in the request. +func (r *RegionAutoscalersService) Update(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersUpdateCall { + c := &RegionAutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.metadata = metadata + c.region = region + c.autoscaler = autoscaler + return c +} + +// Autoscaler sets the optional parameter "autoscaler": Name of the +// autoscaler to update. +func (c *RegionAutoscalersUpdateCall) Autoscaler(autoscaler string) *RegionAutoscalersUpdateCall { + c.urlParams_.Set("autoscaler", autoscaler) return c } @@ -50834,7 +63258,7 @@ func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Me // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *ProjectsSetCommonInstanceMetadataCall { +func (c *RegionAutoscalersUpdateCall) RequestId(requestId string) *RegionAutoscalersUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -50842,7 +63266,7 @@ func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *Pro // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall { +func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutoscalersUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -50850,51 +63274,52 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *Pr // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall { +func (c *RegionAutoscalersUpdateCall) Context(ctx context.Context) *RegionAutoscalersUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header { +func (c *RegionAutoscalersUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setCommonInstanceMetadata") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setCommonInstanceMetadata" call. +// Do executes the "compute.regionAutoscalers.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -50925,13 +63350,20 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets metadata common to all instances within the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.projects.setCommonInstanceMetadata", + // "description": "Updates an autoscaler in the specified project using the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.regionAutoscalers.update", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { + // "autoscaler": { + // "description": "Name of the autoscaler to update.", + // "location": "query", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -50939,15 +63371,22 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/setCommonInstanceMetadata", + // "path": "{project}/regions/{region}/autoscalers", // "request": { - // "$ref": "Metadata" + // "$ref": "Autoscaler" // }, // "response": { // "$ref": "Operation" @@ -50960,26 +63399,24 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) } -// method id "compute.projects.setUsageExportBucket": +// method id "compute.regionBackendServices.delete": -type ProjectsSetUsageExportBucketCall struct { - s *Service - project string - usageexportlocation *UsageExportLocation - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesDeleteCall struct { + s *Service + project string + region string + backendService string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetUsageExportBucket: Enables the usage export feature and sets the -// usage export bucket where reports are stored. If you provide an empty -// request body using this method, the usage export feature will be -// disabled. -// For details, see https://cloud.google.com/compute/docs/reference/latest/projects/setUsageExportBucket -func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall { - c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified regional BackendService resource. +func (r *RegionBackendServicesService) Delete(project string, region string, backendService string) *RegionBackendServicesDeleteCall { + c := &RegionBackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.usageexportlocation = usageexportlocation + c.region = region + c.backendService = backendService return c } @@ -50997,7 +63434,7 @@ func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocati // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *ProjectsSetUsageExportBucketCall { +func (c *RegionBackendServicesDeleteCall) RequestId(requestId string) *RegionBackendServicesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -51005,7 +63442,7 @@ func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *Projects // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall { +func (c *RegionBackendServicesDeleteCall) Fields(s ...googleapi.Field) *RegionBackendServicesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -51013,51 +63450,48 @@ func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *Project // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall { +func (c *RegionBackendServicesDeleteCall) Context(ctx context.Context) *RegionBackendServicesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ProjectsSetUsageExportBucketCall) Header() http.Header { +func (c *RegionBackendServicesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setUsageExportBucket") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.projects.setUsageExportBucket" call. +// Do executes the "compute.regionBackendServices.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51088,13 +63522,22 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", - // "httpMethod": "POST", - // "id": "compute.projects.setUsageExportBucket", + // "description": "Deletes the specified regional BackendService resource.", + // "httpMethod": "DELETE", + // "id": "compute.regionBackendServices.delete", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "backendService" // ], // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -51102,123 +63545,119 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/setUsageExportBucket", - // "request": { - // "$ref": "UsageExportLocation" - // }, + // "path": "{project}/regions/{region}/backendServices/{backendService}", // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionAutoscalers.delete": +// method id "compute.regionBackendServices.get": -type RegionAutoscalersDeleteCall struct { - s *Service - project string - region string - autoscaler string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesGetCall struct { + s *Service + project string + region string + backendService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified autoscaler. -func (r *RegionAutoscalersService) Delete(project string, region string, autoscaler string) *RegionAutoscalersDeleteCall { - c := &RegionAutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified regional BackendService resource. +func (r *RegionBackendServicesService) Get(project string, region string, backendService string) *RegionBackendServicesGetCall { + c := &RegionBackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionAutoscalersDeleteCall) RequestId(requestId string) *RegionAutoscalersDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.backendService = backendService return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersDeleteCall) Fields(s ...googleapi.Field) *RegionAutoscalersDeleteCall { +func (c *RegionBackendServicesGetCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionBackendServicesGetCall) IfNoneMatch(entityTag string) *RegionBackendServicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersDeleteCall) Context(ctx context.Context) *RegionAutoscalersDeleteCall { +func (c *RegionBackendServicesGetCall) Context(ctx context.Context) *RegionBackendServicesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersDeleteCall) Header() http.Header { +func (c *RegionBackendServicesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "autoscaler": c.autoscaler, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.delete" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.regionBackendServices.get" call. +// Exactly one of *BackendService or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *BackendService.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51237,7 +63676,7 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &BackendService{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -51249,17 +63688,17 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Deletes the specified autoscaler.", - // "httpMethod": "DELETE", - // "id": "compute.regionAutoscalers.delete", + // "description": "Returns the specified regional BackendService resource.", + // "httpMethod": "GET", + // "id": "compute.regionBackendServices.get", // "parameterOrder": [ // "project", // "region", - // "autoscaler" + // "backendService" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to delete.", + // "backendService": { + // "description": "Name of the BackendService resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -51278,113 +63717,103 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", + // "path": "{project}/regions/{region}/backendServices/{backendService}", // "response": { - // "$ref": "Operation" + // "$ref": "BackendService" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionAutoscalers.get": +// method id "compute.regionBackendServices.getHealth": -type RegionAutoscalersGetCall struct { - s *Service - project string - region string - autoscaler string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionBackendServicesGetHealthCall struct { + s *Service + project string + region string + backendService string + resourcegroupreference *ResourceGroupReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified autoscaler. -func (r *RegionAutoscalersService) Get(project string, region string, autoscaler string) *RegionAutoscalersGetCall { - c := &RegionAutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetHealth: Gets the most recent health check results for this +// regional BackendService. +func (r *RegionBackendServicesService) GetHealth(project string, region string, backendService string, resourcegroupreference *ResourceGroupReference) *RegionBackendServicesGetHealthCall { + c := &RegionBackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler + c.backendService = backendService + c.resourcegroupreference = resourcegroupreference return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersGetCall) Fields(s ...googleapi.Field) *RegionAutoscalersGetCall { +func (c *RegionBackendServicesGetHealthCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetHealthCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionAutoscalersGetCall) IfNoneMatch(entityTag string) *RegionAutoscalersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersGetCall) Context(ctx context.Context) *RegionAutoscalersGetCall { +func (c *RegionBackendServicesGetHealthCall) Context(ctx context.Context) *RegionBackendServicesGetHealthCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersGetCall) Header() http.Header { +func (c *RegionBackendServicesGetHealthCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{autoscaler}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}/getHealth") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "autoscaler": c.autoscaler, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.get" call. -// Exactly one of *Autoscaler or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Autoscaler.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) { +// Do executes the "compute.regionBackendServices.getHealth" call. +// Exactly one of *BackendServiceGroupHealth or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *BackendServiceGroupHealth.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51403,7 +63832,7 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Autoscaler{ + ret := &BackendServiceGroupHealth{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -51415,24 +63844,23 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler } return ret, nil // { - // "description": "Returns the specified autoscaler.", - // "httpMethod": "GET", - // "id": "compute.regionAutoscalers.get", + // "description": "Gets the most recent health check results for this regional BackendService.", + // "httpMethod": "POST", + // "id": "compute.regionBackendServices.getHealth", // "parameterOrder": [ // "project", // "region", - // "autoscaler" + // "backendService" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to return.", + // "backendService": { + // "description": "Name of the BackendService resource to which the queried instance belongs.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, @@ -51446,9 +63874,12 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", + // "path": "{project}/regions/{region}/backendServices/{backendService}/getHealth", + // "request": { + // "$ref": "ResourceGroupReference" + // }, // "response": { - // "$ref": "Autoscaler" + // "$ref": "BackendServiceGroupHealth" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -51459,25 +63890,28 @@ func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler } -// method id "compute.regionAutoscalers.insert": +// method id "compute.regionBackendServices.insert": -type RegionAutoscalersInsertCall struct { - s *Service - project string - region string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesInsertCall struct { + s *Service + project string + region string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates an autoscaler in the specified project using the data -// included in the request. -func (r *RegionAutoscalersService) Insert(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersInsertCall { - c := &RegionAutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a regional BackendService resource in the specified +// project using the data included in the request. There are several +// restrictions and guidelines to keep in mind when creating a regional +// backend service. Read Restrictions and Guidelines for more +// information. +func (r *RegionBackendServicesService) Insert(project string, region string, backendservice *BackendService) *RegionBackendServicesInsertCall { + c := &RegionBackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler + c.backendservice = backendservice return c } @@ -51495,7 +63929,7 @@ func (r *RegionAutoscalersService) Insert(project string, region string, autosca // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionAutoscalersInsertCall) RequestId(requestId string) *RegionAutoscalersInsertCall { +func (c *RegionBackendServicesInsertCall) RequestId(requestId string) *RegionBackendServicesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -51503,7 +63937,7 @@ func (c *RegionAutoscalersInsertCall) RequestId(requestId string) *RegionAutosca // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutoscalersInsertCall { +func (c *RegionBackendServicesInsertCall) Fields(s ...googleapi.Field) *RegionBackendServicesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -51511,34 +63945,34 @@ func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutosc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersInsertCall) Context(ctx context.Context) *RegionAutoscalersInsertCall { +func (c *RegionBackendServicesInsertCall) Context(ctx context.Context) *RegionBackendServicesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersInsertCall) Header() http.Header { +func (c *RegionBackendServicesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -51549,14 +63983,14 @@ func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, err return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.insert" call. +// Do executes the "compute.regionBackendServices.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51587,9 +64021,9 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates an autoscaler in the specified project using the data included in the request.", + // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Restrictions and Guidelines for more information.", // "httpMethod": "POST", - // "id": "compute.regionAutoscalers.insert", + // "id": "compute.regionBackendServices.insert", // "parameterOrder": [ // "project", // "region" @@ -51615,9 +64049,9 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers", + // "path": "{project}/regions/{region}/backendServices", // "request": { - // "$ref": "Autoscaler" + // "$ref": "BackendService" // }, // "response": { // "$ref": "Operation" @@ -51630,9 +64064,9 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati } -// method id "compute.regionAutoscalers.list": +// method id "compute.regionBackendServices.list": -type RegionAutoscalersListCall struct { +type RegionBackendServicesListCall struct { s *Service project string region string @@ -51642,10 +64076,10 @@ type RegionAutoscalersListCall struct { header_ http.Header } -// List: Retrieves a list of autoscalers contained within the specified -// region. -func (r *RegionAutoscalersService) List(project string, region string) *RegionAutoscalersListCall { - c := &RegionAutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of regional BackendService resources +// available to the specified project in the given region. +func (r *RegionBackendServicesService) List(project string, region string) *RegionBackendServicesListCall { + c := &RegionBackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -51677,7 +64111,7 @@ func (r *RegionAutoscalersService) List(project string, region string) *RegionAu // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall { +func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServicesListCall { c.urlParams_.Set("filter", filter) return c } @@ -51688,7 +64122,7 @@ func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersList // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscalersListCall { +func (c *RegionBackendServicesListCall) MaxResults(maxResults int64) *RegionBackendServicesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -51705,7 +64139,7 @@ func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscal // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersListCall { +func (c *RegionBackendServicesListCall) OrderBy(orderBy string) *RegionBackendServicesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -51713,7 +64147,7 @@ func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersLi // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscalersListCall { +func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBackendServicesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -51721,7 +64155,7 @@ func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscale // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersListCall) Fields(s ...googleapi.Field) *RegionAutoscalersListCall { +func (c *RegionBackendServicesListCall) Fields(s ...googleapi.Field) *RegionBackendServicesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -51731,7 +64165,7 @@ func (c *RegionAutoscalersListCall) Fields(s ...googleapi.Field) *RegionAutoscal // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionAutoscalersListCall) IfNoneMatch(entityTag string) *RegionAutoscalersListCall { +func (c *RegionBackendServicesListCall) IfNoneMatch(entityTag string) *RegionBackendServicesListCall { c.ifNoneMatch_ = entityTag return c } @@ -51739,21 +64173,21 @@ func (c *RegionAutoscalersListCall) IfNoneMatch(entityTag string) *RegionAutosca // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersListCall) Context(ctx context.Context) *RegionAutoscalersListCall { +func (c *RegionBackendServicesListCall) Context(ctx context.Context) *RegionBackendServicesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersListCall) Header() http.Header { +func (c *RegionBackendServicesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -51764,7 +64198,7 @@ func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -51775,14 +64209,14 @@ func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.list" call. -// Exactly one of *RegionAutoscalerList or error will be non-nil. Any +// Do executes the "compute.regionBackendServices.list" call. +// Exactly one of *BackendServiceList or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *RegionAutoscalerList.ServerResponse.Header or (if a response was +// *BackendServiceList.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAutoscalerList, error) { +func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -51801,7 +64235,7 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionAutoscalerList{ + ret := &BackendServiceList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -51813,9 +64247,9 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut } return ret, nil // { - // "description": "Retrieves a list of autoscalers contained within the specified region.", + // "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", // "httpMethod": "GET", - // "id": "compute.regionAutoscalers.list", + // "id": "compute.regionBackendServices.list", // "parameterOrder": [ // "project", // "region" @@ -51859,9 +64293,9 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers", + // "path": "{project}/regions/{region}/backendServices", // "response": { - // "$ref": "RegionAutoscalerList" + // "$ref": "BackendServiceList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -51875,7 +64309,7 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionAutoscalersListCall) Pages(ctx context.Context, f func(*RegionAutoscalerList) error) error { +func (c *RegionBackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -51893,33 +64327,31 @@ func (c *RegionAutoscalersListCall) Pages(ctx context.Context, f func(*RegionAut } } -// method id "compute.regionAutoscalers.patch": +// method id "compute.regionBackendServices.patch": -type RegionAutoscalersPatchCall struct { - s *Service - project string - region string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesPatchCall struct { + s *Service + project string + region string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Updates an autoscaler in the specified project using the data -// included in the request. This method supports PATCH semantics and -// uses the JSON merge patch format and processing rules. -func (r *RegionAutoscalersService) Patch(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersPatchCall { - c := &RegionAutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Updates the specified regional BackendService resource with +// the data included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. This method +// supports PATCH semantics and uses the JSON merge patch format and +// processing rules. +func (r *RegionBackendServicesService) Patch(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesPatchCall { + c := &RegionBackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to patch. -func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutoscalersPatchCall { - c.urlParams_.Set("autoscaler", autoscaler) + c.backendService = backendService + c.backendservice = backendservice return c } @@ -51929,15 +64361,15 @@ func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutosc // request if it has already been completed. // // For example, consider a situation where you make an initial request -// and then the request times out. If you make the request again with -// the same request ID, the server can check if original operation with -// the same request ID was received, and if so, will ignore the second +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second // request. This prevents clients from accidentally creating duplicate // commitments. // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionAutoscalersPatchCall) RequestId(requestId string) *RegionAutoscalersPatchCall { +func (c *RegionBackendServicesPatchCall) RequestId(requestId string) *RegionBackendServicesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -51945,168 +64377,7 @@ func (c *RegionAutoscalersPatchCall) RequestId(requestId string) *RegionAutoscal // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersPatchCall) Fields(s ...googleapi.Field) *RegionAutoscalersPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionAutoscalersPatchCall) Context(ctx context.Context) *RegionAutoscalersPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionAutoscalersPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionAutoscalers.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an autoscaler in the specified project using the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.regionAutoscalers.patch", - // "parameterOrder": [ - // "project", - // "region" - // ], - // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to patch.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and then the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/autoscalers", - // "request": { - // "$ref": "Autoscaler" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionAutoscalers.testIamPermissions": - -type RegionAutoscalersTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionAutoscalersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionAutoscalersTestIamPermissionsCall { - c := &RegionAutoscalersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionAutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionAutoscalersTestIamPermissionsCall { +func (c *RegionBackendServicesPatchCall) Fields(s ...googleapi.Field) *RegionBackendServicesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -52114,53 +64385,53 @@ func (c *RegionAutoscalersTestIamPermissionsCall) Fields(s ...googleapi.Field) * // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersTestIamPermissionsCall) Context(ctx context.Context) *RegionAutoscalersTestIamPermissionsCall { +func (c *RegionBackendServicesPatchCall) Context(ctx context.Context) *RegionBackendServicesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersTestIamPermissionsCall) Header() http.Header { +func (c *RegionBackendServicesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "backendService": c.backendService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.regionBackendServices.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52179,7 +64450,7 @@ func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOptio if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -52191,15 +64462,22 @@ func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOptio } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionAutoscalers.testIamPermissions", + // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.regionBackendServices.patch", // "parameterOrder": [ // "project", // "region", - // "resource" + // "backendService" // ], // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -52208,85 +64486,61 @@ func (c *RegionAutoscalersTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers/{resource}/testIamPermissions", + // "path": "{project}/regions/{region}/backendServices/{backendService}", // "request": { - // "$ref": "TestPermissionsRequest" + // "$ref": "BackendService" // }, // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionAutoscalers.update": +// method id "compute.regionBackendServices.testIamPermissions": -type RegionAutoscalersUpdateCall struct { - s *Service - project string - region string - autoscaler *Autoscaler - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionBackendServicesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates an autoscaler in the specified project using the data -// included in the request. -func (r *RegionAutoscalersService) Update(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersUpdateCall { - c := &RegionAutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionBackendServicesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionBackendServicesTestIamPermissionsCall { + c := &RegionBackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.autoscaler = autoscaler - return c -} - -// Autoscaler sets the optional parameter "autoscaler": Name of the -// autoscaler to update. -func (c *RegionAutoscalersUpdateCall) Autoscaler(autoscaler string) *RegionAutoscalersUpdateCall { - c.urlParams_.Set("autoscaler", autoscaler) - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and then the request times out. If you make the request again with -// the same request ID, the server can check if original operation with -// the same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -func (c *RegionAutoscalersUpdateCall) RequestId(requestId string) *RegionAutoscalersUpdateCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutoscalersUpdateCall { +func (c *RegionBackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionBackendServicesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -52294,52 +64548,53 @@ func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutosc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionAutoscalersUpdateCall) Context(ctx context.Context) *RegionAutoscalersUpdateCall { +func (c *RegionBackendServicesTestIamPermissionsCall) Context(ctx context.Context) *RegionBackendServicesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionAutoscalersUpdateCall) Header() http.Header { +func (c *RegionBackendServicesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/autoscalers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionAutoscalers.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionBackendServices.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52358,7 +64613,7 @@ func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operati if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -52370,20 +64625,15 @@ func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Updates an autoscaler in the specified project using the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.regionAutoscalers.update", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.regionBackendServices.testIamPermissions", // "parameterOrder": [ // "project", - // "region" + // "region", + // "resource" // ], // "parameters": { - // "autoscaler": { - // "description": "Name of the autoscaler to update.", - // "location": "query", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -52392,51 +64642,59 @@ func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and then the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.", - // "location": "query", + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/autoscalers", + // "path": "{project}/regions/{region}/backendServices/{resource}/testIamPermissions", // "request": { - // "$ref": "Autoscaler" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionBackendServices.delete": +// method id "compute.regionBackendServices.update": -type RegionBackendServicesDeleteCall struct { +type RegionBackendServicesUpdateCall struct { s *Service project string region string backendService string + backendservice *BackendService urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified regional BackendService resource. -func (r *RegionBackendServicesService) Delete(project string, region string, backendService string) *RegionBackendServicesDeleteCall { - c := &RegionBackendServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified regional BackendService resource with +// the data included in the request. There are several restrictions and +// guidelines to keep in mind when updating a backend service. Read +// Restrictions and Guidelines for more information. +func (r *RegionBackendServicesService) Update(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesUpdateCall { + c := &RegionBackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.backendService = backendService + c.backendservice = backendservice return c } @@ -52454,7 +64712,7 @@ func (r *RegionBackendServicesService) Delete(project string, region string, bac // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesDeleteCall) RequestId(requestId string) *RegionBackendServicesDeleteCall { +func (c *RegionBackendServicesUpdateCall) RequestId(requestId string) *RegionBackendServicesUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -52462,7 +64720,7 @@ func (c *RegionBackendServicesDeleteCall) RequestId(requestId string) *RegionBac // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesDeleteCall) Fields(s ...googleapi.Field) *RegionBackendServicesDeleteCall { +func (c *RegionBackendServicesUpdateCall) Fields(s ...googleapi.Field) *RegionBackendServicesUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -52470,31 +64728,36 @@ func (c *RegionBackendServicesDeleteCall) Fields(s ...googleapi.Field) *RegionBa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesDeleteCall) Context(ctx context.Context) *RegionBackendServicesDeleteCall { +func (c *RegionBackendServicesUpdateCall) Context(ctx context.Context) *RegionBackendServicesUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesDeleteCall) Header() http.Header { +func (c *RegionBackendServicesUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -52504,14 +64767,14 @@ func (c *RegionBackendServicesDeleteCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.delete" call. +// Do executes the "compute.regionBackendServices.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52542,9 +64805,9 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Deletes the specified regional BackendService resource.", - // "httpMethod": "DELETE", - // "id": "compute.regionBackendServices.delete", + // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", + // "httpMethod": "PUT", + // "id": "compute.regionBackendServices.update", // "parameterOrder": [ // "project", // "region", @@ -52552,7 +64815,7 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // ], // "parameters": { // "backendService": { - // "description": "Name of the BackendService resource to delete.", + // "description": "Name of the BackendService resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -52579,6 +64842,9 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // } // }, // "path": "{project}/regions/{region}/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, // "response": { // "$ref": "Operation" // }, @@ -52590,32 +64856,95 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.regionBackendServices.get": +// method id "compute.regionCommitments.aggregatedList": -type RegionBackendServicesGetCall struct { - s *Service - project string - region string - backendService string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionCommitmentsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified regional BackendService resource. -func (r *RegionBackendServicesService) Get(project string, region string, backendService string) *RegionBackendServicesGetCall { - c := &RegionBackendServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of commitments. +func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitmentsAggregatedListCall { + c := &RegionCommitmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.backendService = backendService + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionCommitmentsAggregatedListCall) MaxResults(maxResults int64) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionCommitmentsAggregatedListCall) OrderBy(orderBy string) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesGetCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetCall { +func (c *RegionCommitmentsAggregatedListCall) Fields(s ...googleapi.Field) *RegionCommitmentsAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -52625,7 +64954,7 @@ func (c *RegionBackendServicesGetCall) Fields(s ...googleapi.Field) *RegionBacke // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionBackendServicesGetCall) IfNoneMatch(entityTag string) *RegionBackendServicesGetCall { +func (c *RegionCommitmentsAggregatedListCall) IfNoneMatch(entityTag string) *RegionCommitmentsAggregatedListCall { c.ifNoneMatch_ = entityTag return c } @@ -52633,21 +64962,21 @@ func (c *RegionBackendServicesGetCall) IfNoneMatch(entityTag string) *RegionBack // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesGetCall) Context(ctx context.Context) *RegionBackendServicesGetCall { +func (c *RegionCommitmentsAggregatedListCall) Context(ctx context.Context) *RegionCommitmentsAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesGetCall) Header() http.Header { +func (c *RegionCommitmentsAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -52658,26 +64987,24 @@ func (c *RegionBackendServicesGetCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/commitments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.get" call. -// Exactly one of *BackendService or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *BackendService.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionCommitments.aggregatedList" call. +// Exactly one of *CommitmentAggregatedList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *CommitmentAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendService, error) { +func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*CommitmentAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52696,7 +65023,7 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendService{ + ret := &CommitmentAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -52708,20 +65035,34 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen } return ret, nil // { - // "description": "Returns the specified regional BackendService resource.", + // "description": "Retrieves an aggregated list of commitments.", // "httpMethod": "GET", - // "id": "compute.regionBackendServices.get", + // "id": "compute.regionCommitments.aggregatedList", // "parameterOrder": [ - // "project", - // "region", - // "backendService" + // "project" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -52730,18 +65071,11 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}", + // "path": "{project}/aggregated/commitments", // "response": { - // "$ref": "BackendService" + // "$ref": "CommitmentAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -52752,88 +65086,116 @@ func (c *RegionBackendServicesGetCall) Do(opts ...googleapi.CallOption) (*Backen } -// method id "compute.regionBackendServices.getHealth": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionCommitmentsAggregatedListCall) Pages(ctx context.Context, f func(*CommitmentAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionBackendServicesGetHealthCall struct { - s *Service - project string - region string - backendService string - resourcegroupreference *ResourceGroupReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionCommitments.get": + +type RegionCommitmentsGetCall struct { + s *Service + project string + region string + commitment string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// GetHealth: Gets the most recent health check results for this -// regional BackendService. -func (r *RegionBackendServicesService) GetHealth(project string, region string, backendService string, resourcegroupreference *ResourceGroupReference) *RegionBackendServicesGetHealthCall { - c := &RegionBackendServicesGetHealthCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified commitment resource. Get a list of +// available commitments by making a list() request. +func (r *RegionCommitmentsService) Get(project string, region string, commitment string) *RegionCommitmentsGetCall { + c := &RegionCommitmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendService = backendService - c.resourcegroupreference = resourcegroupreference + c.commitment = commitment return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesGetHealthCall) Fields(s ...googleapi.Field) *RegionBackendServicesGetHealthCall { +func (c *RegionCommitmentsGetCall) Fields(s ...googleapi.Field) *RegionCommitmentsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionCommitmentsGetCall) IfNoneMatch(entityTag string) *RegionCommitmentsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesGetHealthCall) Context(ctx context.Context) *RegionBackendServicesGetHealthCall { +func (c *RegionCommitmentsGetCall) Context(ctx context.Context) *RegionCommitmentsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesGetHealthCall) Header() http.Header { +func (c *RegionCommitmentsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}/getHealth") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments/{commitment}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, + "region": c.region, + "commitment": c.commitment, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.getHealth" call. -// Exactly one of *BackendServiceGroupHealth or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *BackendServiceGroupHealth.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*BackendServiceGroupHealth, error) { +// Do executes the "compute.regionCommitments.get" call. +// Exactly one of *Commitment or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Commitment.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -52852,7 +65214,7 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceGroupHealth{ + ret := &Commitment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -52864,42 +65226,40 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Gets the most recent health check results for this regional BackendService.", - // "httpMethod": "POST", - // "id": "compute.regionBackendServices.getHealth", + // "description": "Returns the specified commitment resource. Get a list of available commitments by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.regionCommitments.get", // "parameterOrder": [ // "project", // "region", - // "backendService" + // "commitment" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to which the queried instance belongs.", + // "commitment": { + // "description": "Name of the commitment to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, // "project": { + // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}/getHealth", - // "request": { - // "$ref": "ResourceGroupReference" - // }, + // "path": "{project}/regions/{region}/commitments/{commitment}", // "response": { - // "$ref": "BackendServiceGroupHealth" + // "$ref": "Commitment" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -52910,28 +65270,25 @@ func (c *RegionBackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (* } -// method id "compute.regionBackendServices.insert": +// method id "compute.regionCommitments.insert": -type RegionBackendServicesInsertCall struct { - s *Service - project string - region string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionCommitmentsInsertCall struct { + s *Service + project string + region string + commitment *Commitment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a regional BackendService resource in the specified -// project using the data included in the request. There are several -// restrictions and guidelines to keep in mind when creating a regional -// backend service. Read Restrictions and Guidelines for more -// information. -func (r *RegionBackendServicesService) Insert(project string, region string, backendservice *BackendService) *RegionBackendServicesInsertCall { - c := &RegionBackendServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a commitment in the specified project using the data +// included in the request. +func (r *RegionCommitmentsService) Insert(project string, region string, commitment *Commitment) *RegionCommitmentsInsertCall { + c := &RegionCommitmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendservice = backendservice + c.commitment = commitment return c } @@ -52949,7 +65306,7 @@ func (r *RegionBackendServicesService) Insert(project string, region string, bac // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesInsertCall) RequestId(requestId string) *RegionBackendServicesInsertCall { +func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitmentsInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -52957,7 +65314,7 @@ func (c *RegionBackendServicesInsertCall) RequestId(requestId string) *RegionBac // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesInsertCall) Fields(s ...googleapi.Field) *RegionBackendServicesInsertCall { +func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommitmentsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -52965,34 +65322,34 @@ func (c *RegionBackendServicesInsertCall) Fields(s ...googleapi.Field) *RegionBa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesInsertCall) Context(ctx context.Context) *RegionBackendServicesInsertCall { +func (c *RegionCommitmentsInsertCall) Context(ctx context.Context) *RegionCommitmentsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesInsertCall) Header() http.Header { +func (c *RegionCommitmentsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitment) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -53003,14 +65360,14 @@ func (c *RegionBackendServicesInsertCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.insert" call. +// Do executes the "compute.regionCommitments.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53041,9 +65398,9 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Creates a regional BackendService resource in the specified project using the data included in the request. There are several restrictions and guidelines to keep in mind when creating a regional backend service. Read Restrictions and Guidelines for more information.", + // "description": "Creates a commitment in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.regionBackendServices.insert", + // "id": "compute.regionCommitments.insert", // "parameterOrder": [ // "project", // "region" @@ -53057,7 +65414,7 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -53069,9 +65426,9 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices", + // "path": "{project}/regions/{region}/commitments", // "request": { - // "$ref": "BackendService" + // "$ref": "Commitment" // }, // "response": { // "$ref": "Operation" @@ -53084,9 +65441,9 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.regionBackendServices.list": +// method id "compute.regionCommitments.list": -type RegionBackendServicesListCall struct { +type RegionCommitmentsListCall struct { s *Service project string region string @@ -53096,10 +65453,10 @@ type RegionBackendServicesListCall struct { header_ http.Header } -// List: Retrieves the list of regional BackendService resources -// available to the specified project in the given region. -func (r *RegionBackendServicesService) List(project string, region string) *RegionBackendServicesListCall { - c := &RegionBackendServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of commitments contained within the specified +// region. +func (r *RegionCommitmentsService) List(project string, region string) *RegionCommitmentsListCall { + c := &RegionCommitmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -53131,7 +65488,7 @@ func (r *RegionBackendServicesService) List(project string, region string) *Regi // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServicesListCall { +func (c *RegionCommitmentsListCall) Filter(filter string) *RegionCommitmentsListCall { c.urlParams_.Set("filter", filter) return c } @@ -53142,7 +65499,7 @@ func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServ // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionBackendServicesListCall) MaxResults(maxResults int64) *RegionBackendServicesListCall { +func (c *RegionCommitmentsListCall) MaxResults(maxResults int64) *RegionCommitmentsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -53159,7 +65516,7 @@ func (c *RegionBackendServicesListCall) MaxResults(maxResults int64) *RegionBack // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionBackendServicesListCall) OrderBy(orderBy string) *RegionBackendServicesListCall { +func (c *RegionCommitmentsListCall) OrderBy(orderBy string) *RegionCommitmentsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -53167,7 +65524,7 @@ func (c *RegionBackendServicesListCall) OrderBy(orderBy string) *RegionBackendSe // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBackendServicesListCall { +func (c *RegionCommitmentsListCall) PageToken(pageToken string) *RegionCommitmentsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -53175,7 +65532,7 @@ func (c *RegionBackendServicesListCall) PageToken(pageToken string) *RegionBacke // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesListCall) Fields(s ...googleapi.Field) *RegionBackendServicesListCall { +func (c *RegionCommitmentsListCall) Fields(s ...googleapi.Field) *RegionCommitmentsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -53185,7 +65542,7 @@ func (c *RegionBackendServicesListCall) Fields(s ...googleapi.Field) *RegionBack // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionBackendServicesListCall) IfNoneMatch(entityTag string) *RegionBackendServicesListCall { +func (c *RegionCommitmentsListCall) IfNoneMatch(entityTag string) *RegionCommitmentsListCall { c.ifNoneMatch_ = entityTag return c } @@ -53193,21 +65550,21 @@ func (c *RegionBackendServicesListCall) IfNoneMatch(entityTag string) *RegionBac // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesListCall) Context(ctx context.Context) *RegionBackendServicesListCall { +func (c *RegionCommitmentsListCall) Context(ctx context.Context) *RegionCommitmentsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesListCall) Header() http.Header { +func (c *RegionCommitmentsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -53218,7 +65575,7 @@ func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, e } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -53229,14 +65586,14 @@ func (c *RegionBackendServicesListCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.list" call. -// Exactly one of *BackendServiceList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BackendServiceList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionCommitments.list" call. +// Exactly one of *CommitmentList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *CommitmentList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServiceList, error) { +func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*CommitmentList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53255,7 +65612,7 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &BackendServiceList{ + ret := &CommitmentList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -53267,9 +65624,9 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe } return ret, nil // { - // "description": "Retrieves the list of regional BackendService resources available to the specified project in the given region.", + // "description": "Retrieves a list of commitments contained within the specified region.", // "httpMethod": "GET", - // "id": "compute.regionBackendServices.list", + // "id": "compute.regionCommitments.list", // "parameterOrder": [ // "project", // "region" @@ -53306,16 +65663,16 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices", + // "path": "{project}/regions/{region}/commitments", // "response": { - // "$ref": "BackendServiceList" + // "$ref": "CommitmentList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -53329,7 +65686,7 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionBackendServicesListCall) Pages(ctx context.Context, f func(*BackendServiceList) error) error { +func (c *RegionCommitmentsListCall) Pages(ctx context.Context, f func(*CommitmentList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -53347,31 +65704,406 @@ func (c *RegionBackendServicesListCall) Pages(ctx context.Context, f func(*Backe } } -// method id "compute.regionBackendServices.patch": +// method id "compute.regionInstanceGroupManagers.abandonInstances": + +type RegionInstanceGroupManagersAbandonInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AbandonInstances: Schedules a group action to remove the specified +// instances from the managed instance group. Abandoning an instance +// does not delete the instance, but it does remove the instance from +// any target pools that are applied by the managed instance group. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you abandon. This operation is marked as +// DONE when the action is scheduled even if the instances have not yet +// been removed from the group. You must separately verify the status of +// the abandoning action with the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest) *RegionInstanceGroupManagersAbandonInstancesCall { + c := &RegionInstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersabandoninstancesrequest = regioninstancegroupmanagersabandoninstancesrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersAbandonInstancesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersAbandonInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersAbandonInstancesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersabandoninstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.abandonInstances" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.abandonInstances", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "request": { + // "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceGroupManagers.delete": + +type RegionInstanceGroupManagersDeleteCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified managed instance group and all of the +// instances in that group. +func (r *RegionInstanceGroupManagersService) Delete(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersDeleteCall { + c := &RegionInstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersDeleteCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupManagersDeleteCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroupManagers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified managed instance group and all of the instances in that group.", + // "httpMethod": "DELETE", + // "id": "compute.regionInstanceGroupManagers.delete", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroupManager" + // ], + // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } -type RegionBackendServicesPatchCall struct { - s *Service - project string - region string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header } -// Patch: Updates the specified regional BackendService resource with -// the data included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. This method -// supports PATCH semantics and uses the JSON merge patch format and -// processing rules. -func (r *RegionBackendServicesService) Patch(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesPatchCall { - c := &RegionBackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// method id "compute.regionInstanceGroupManagers.deleteInstances": + +type RegionInstanceGroupManagersDeleteInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeleteInstances: Schedules a group action to delete the specified +// instances in the managed instance group. The instances are also +// removed from any target pools of which they were a member. This +// method reduces the targetSize of the managed instance group by the +// number of instances that you delete. This operation is marked as DONE +// when the action is scheduled even if the instances are still being +// deleted. You must separately verify the status of the deleting action +// with the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or +// deleted. +// +// You can specify a maximum of 1000 instances with this method per +// request. +func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest) *RegionInstanceGroupManagersDeleteInstancesCall { + c := &RegionInstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendService = backendService - c.backendservice = backendservice + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagersdeleteinstancesrequest = regioninstancegroupmanagersdeleteinstancesrequest return c } @@ -53389,7 +66121,7 @@ func (r *RegionBackendServicesService) Patch(project string, region string, back // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesPatchCall) RequestId(requestId string) *RegionBackendServicesPatchCall { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -53397,7 +66129,7 @@ func (c *RegionBackendServicesPatchCall) RequestId(requestId string) *RegionBack // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesPatchCall) Fields(s ...googleapi.Field) *RegionBackendServicesPatchCall { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -53405,53 +66137,53 @@ func (c *RegionBackendServicesPatchCall) Fields(s ...googleapi.Field) *RegionBac // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesPatchCall) Context(ctx context.Context) *RegionBackendServicesPatchCall { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesPatchCall) Header() http.Header { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersdeleteinstancesrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.patch" call. +// Do executes the "compute.regionInstanceGroupManagers.deleteInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53482,19 +66214,18 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.regionBackendServices.patch", + // "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.deleteInstances", // "parameterOrder": [ // "project", // "region", - // "backendService" + // "instanceGroupManager" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to patch.", + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -53508,7 +66239,6 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper // "region": { // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -53518,104 +66248,110 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", // "request": { - // "$ref": "BackendService" + // "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionBackendServices.testIamPermissions": +// method id "compute.regionInstanceGroupManagers.get": -type RegionBackendServicesTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersGetCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionBackendServicesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionBackendServicesTestIamPermissionsCall { - c := &RegionBackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns all of the details about the specified managed instance +// group. +func (r *RegionInstanceGroupManagersService) Get(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersGetCall { + c := &RegionInstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.instanceGroupManager = instanceGroupManager return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionBackendServicesTestIamPermissionsCall { +func (c *RegionInstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesTestIamPermissionsCall) Context(ctx context.Context) *RegionBackendServicesTestIamPermissionsCall { +func (c *RegionInstanceGroupManagersGetCall) Context(ctx context.Context) *RegionInstanceGroupManagersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesTestIamPermissionsCall) Header() http.Header { +func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// Do executes the "compute.regionInstanceGroupManagers.get" call. +// Exactly one of *InstanceGroupManager or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// *InstanceGroupManager.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53634,7 +66370,7 @@ func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallO if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &InstanceGroupManager{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -53646,43 +66382,38 @@ func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallO } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionBackendServices.testIamPermissions", + // "description": "Returns all of the details about the specified managed instance group.", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroupManagers.get", // "parameterOrder": [ // "project", // "region", - // "resource" + // "instanceGroupManager" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "instanceGroupManager": { + // "description": "Name of the managed instance group to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "region": { + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "InstanceGroupManager" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -53693,29 +66424,32 @@ func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallO } -// method id "compute.regionBackendServices.update": +// method id "compute.regionInstanceGroupManagers.insert": -type RegionBackendServicesUpdateCall struct { - s *Service - project string - region string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersInsertCall struct { + s *Service + project string + region string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified regional BackendService resource with -// the data included in the request. There are several restrictions and -// guidelines to keep in mind when updating a backend service. Read -// Restrictions and Guidelines for more information. -func (r *RegionBackendServicesService) Update(project string, region string, backendService string, backendservice *BackendService) *RegionBackendServicesUpdateCall { - c := &RegionBackendServicesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a managed instance group using the information that +// you specify in the request. After the group is created, it schedules +// an action to create instances in the group using the specified +// instance template. This operation is marked as DONE when the group is +// created even if the instances in the group have not yet been created. +// You must separately verify the status of the individual instances +// with the listmanagedinstances method. +// +// A regional managed instance group can contain up to 2000 instances. +func (r *RegionInstanceGroupManagersService) Insert(project string, region string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersInsertCall { + c := &RegionInstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.backendService = backendService - c.backendservice = backendservice + c.instancegroupmanager = instancegroupmanager return c } @@ -53733,7 +66467,7 @@ func (r *RegionBackendServicesService) Update(project string, region string, bac // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionBackendServicesUpdateCall) RequestId(requestId string) *RegionBackendServicesUpdateCall { +func (c *RegionInstanceGroupManagersInsertCall) RequestId(requestId string) *RegionInstanceGroupManagersInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -53741,7 +66475,7 @@ func (c *RegionBackendServicesUpdateCall) RequestId(requestId string) *RegionBac // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionBackendServicesUpdateCall) Fields(s ...googleapi.Field) *RegionBackendServicesUpdateCall { +func (c *RegionInstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -53749,53 +66483,52 @@ func (c *RegionBackendServicesUpdateCall) Fields(s ...googleapi.Field) *RegionBa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionBackendServicesUpdateCall) Context(ctx context.Context) *RegionBackendServicesUpdateCall { +func (c *RegionInstanceGroupManagersInsertCall) Context(ctx context.Context) *RegionInstanceGroupManagersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionBackendServicesUpdateCall) Header() http.Header { +func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionBackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "backendService": c.backendService, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionBackendServices.update" call. +// Do executes the "compute.regionInstanceGroupManagers.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -53826,22 +66559,14 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Updates the specified regional BackendService resource with the data included in the request. There are several restrictions and guidelines to keep in mind when updating a backend service. Read Restrictions and Guidelines for more information.", - // "httpMethod": "PUT", - // "id": "compute.regionBackendServices.update", + // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.insert", // "parameterOrder": [ // "project", - // "region", - // "backendService" + // "region" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to update.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -53852,7 +66577,6 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope // "region": { // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -53862,9 +66586,9 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope // "type": "string" // } // }, - // "path": "{project}/regions/{region}/backendServices/{backendService}", + // "path": "{project}/regions/{region}/instanceGroupManagers", // "request": { - // "$ref": "BackendService" + // "$ref": "InstanceGroupManager" // }, // "response": { // "$ref": "Operation" @@ -53877,21 +66601,24 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope } -// method id "compute.regionCommitments.aggregatedList": +// method id "compute.regionInstanceGroupManagers.list": -type RegionCommitmentsAggregatedListCall struct { +type RegionInstanceGroupManagersListCall struct { s *Service project string + region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// AggregatedList: Retrieves an aggregated list of commitments. -func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitmentsAggregatedListCall { - c := &RegionCommitmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of managed instance groups that are +// contained within the specified region. +func (r *RegionInstanceGroupManagersService) List(project string, region string) *RegionInstanceGroupManagersListCall { + c := &RegionInstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project + c.region = region return c } @@ -53921,7 +66648,7 @@ func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitm // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommitmentsAggregatedListCall { +func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("filter", filter) return c } @@ -53932,7 +66659,7 @@ func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommi // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionCommitmentsAggregatedListCall) MaxResults(maxResults int64) *RegionCommitmentsAggregatedListCall { +func (c *RegionInstanceGroupManagersListCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -53949,7 +66676,7 @@ func (c *RegionCommitmentsAggregatedListCall) MaxResults(maxResults int64) *Regi // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionCommitmentsAggregatedListCall) OrderBy(orderBy string) *RegionCommitmentsAggregatedListCall { +func (c *RegionInstanceGroupManagersListCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -53957,7 +66684,7 @@ func (c *RegionCommitmentsAggregatedListCall) OrderBy(orderBy string) *RegionCom // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *RegionCommitmentsAggregatedListCall { +func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -53965,7 +66692,7 @@ func (c *RegionCommitmentsAggregatedListCall) PageToken(pageToken string) *Regio // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionCommitmentsAggregatedListCall) Fields(s ...googleapi.Field) *RegionCommitmentsAggregatedListCall { +func (c *RegionInstanceGroupManagersListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -53975,7 +66702,7 @@ func (c *RegionCommitmentsAggregatedListCall) Fields(s ...googleapi.Field) *Regi // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionCommitmentsAggregatedListCall) IfNoneMatch(entityTag string) *RegionCommitmentsAggregatedListCall { +func (c *RegionInstanceGroupManagersListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersListCall { c.ifNoneMatch_ = entityTag return c } @@ -53983,21 +66710,21 @@ func (c *RegionCommitmentsAggregatedListCall) IfNoneMatch(entityTag string) *Reg // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionCommitmentsAggregatedListCall) Context(ctx context.Context) *RegionCommitmentsAggregatedListCall { +func (c *RegionInstanceGroupManagersListCall) Context(ctx context.Context) *RegionInstanceGroupManagersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionCommitmentsAggregatedListCall) Header() http.Header { +func (c *RegionInstanceGroupManagersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -54008,24 +66735,25 @@ func (c *RegionCommitmentsAggregatedListCall) doRequest(alt string) (*http.Respo } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/commitments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionCommitments.aggregatedList" call. -// Exactly one of *CommitmentAggregatedList or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *CommitmentAggregatedList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionInstanceGroupManagers.list" call. +// Exactly one of *RegionInstanceGroupManagerList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *RegionInstanceGroupManagerList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*CommitmentAggregatedList, error) { +func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagerList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54044,7 +66772,7 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &CommitmentAggregatedList{ + ret := &RegionInstanceGroupManagerList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54056,11 +66784,12 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves an aggregated list of commitments.", + // "description": "Retrieves the list of managed instance groups that are contained within the specified region.", // "httpMethod": "GET", - // "id": "compute.regionCommitments.aggregatedList", + // "id": "compute.regionInstanceGroupManagers.list", // "parameterOrder": [ - // "project" + // "project", + // "region" // ], // "parameters": { // "filter": { @@ -54092,11 +66821,17 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/aggregated/commitments", + // "path": "{project}/regions/{region}/instanceGroupManagers", // "response": { - // "$ref": "CommitmentAggregatedList" + // "$ref": "RegionInstanceGroupManagerList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -54110,7 +66845,7 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionCommitmentsAggregatedListCall) Pages(ctx context.Context, f func(*CommitmentAggregatedList) error) error { +func (c *RegionInstanceGroupManagersListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagerList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -54128,214 +66863,58 @@ func (c *RegionCommitmentsAggregatedListCall) Pages(ctx context.Context, f func( } } -// method id "compute.regionCommitments.get": - -type RegionCommitmentsGetCall struct { - s *Service - project string - region string - commitment string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: Returns the specified commitment resource. Get a list of -// available commitments by making a list() request. -func (r *RegionCommitmentsService) Get(project string, region string, commitment string) *RegionCommitmentsGetCall { - c := &RegionCommitmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.commitment = commitment - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionCommitmentsGetCall) Fields(s ...googleapi.Field) *RegionCommitmentsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionCommitmentsGetCall) IfNoneMatch(entityTag string) *RegionCommitmentsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionCommitmentsGetCall) Context(ctx context.Context) *RegionCommitmentsGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionCommitmentsGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionCommitmentsGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments/{commitment}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "commitment": c.commitment, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} +// method id "compute.regionInstanceGroupManagers.listManagedInstances": -// Do executes the "compute.regionCommitments.get" call. -// Exactly one of *Commitment or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Commitment.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Commitment{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified commitment resource. Get a list of available commitments by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.regionCommitments.get", - // "parameterOrder": [ - // "project", - // "region", - // "commitment" - // ], - // "parameters": { - // "commitment": { - // "description": "Name of the commitment to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/commitments/{commitment}", - // "response": { - // "$ref": "Commitment" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } +type RegionInstanceGroupManagersListManagedInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} +// ListManagedInstances: Lists the instances in the managed instance +// group and instances that are scheduled to be created. The list +// includes any current actions that the group has scheduled for its +// instances. +func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListManagedInstancesCall { + c := &RegionInstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + return c } -// method id "compute.regionCommitments.insert": +// Filter sets the optional parameter "filter": +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Filter(filter string) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("filter", filter) + return c +} -type RegionCommitmentsInsertCall struct { - s *Service - project string - region string - commitment *Commitment - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// MaxResults sets the optional parameter "maxResults": +func (c *RegionInstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c } -// Insert: Creates a commitment in the specified project using the data -// included in the request. -func (r *RegionCommitmentsService) Insert(project string, region string, commitment *Commitment) *RegionCommitmentsInsertCall { - c := &RegionCommitmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.commitment = commitment +// OrderBy sets the optional parameter "order_by": +func (c *RegionInstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("order_by", orderBy) return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitmentsInsertCall { - c.urlParams_.Set("requestId", requestId) +// PageToken sets the optional parameter "pageToken": +func (c *RegionInstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *RegionInstanceGroupManagersListManagedInstancesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommitmentsInsertCall { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -54343,52 +66922,50 @@ func (c *RegionCommitmentsInsertCall) Fields(s ...googleapi.Field) *RegionCommit // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionCommitmentsInsertCall) Context(ctx context.Context) *RegionCommitmentsInsertCall { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersListManagedInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionCommitmentsInsertCall) Header() http.Header { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionCommitmentsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.commitment) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionCommitments.insert" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroupManagers.listManagedInstances" call. +// Exactly one of *RegionInstanceGroupManagersListInstancesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *RegionInstanceGroupManagersListInstancesResponse.ServerResponse.Heade +// r or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstancesResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54407,7 +66984,7 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &RegionInstanceGroupManagersListInstancesResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54419,14 +66996,40 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati } return ret, nil // { - // "description": "Creates a commitment in the specified project using the data included in the request.", + // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", // "httpMethod": "POST", - // "id": "compute.regionCommitments.insert", + // "id": "compute.regionInstanceGroupManagers.listManagedInstances", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroupManager" // ], // "parameters": { + // "filter": { + // "location": "query", + // "type": "string" + // }, + // "instanceGroupManager": { + // "description": "The name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "order_by": { + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -54435,186 +67038,152 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/commitments", - // "request": { - // "$ref": "Commitment" - // }, + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "response": { - // "$ref": "Operation" + // "$ref": "RegionInstanceGroupManagersListInstancesResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionCommitments.list": - -type RegionCommitmentsListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstancesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// List: Retrieves a list of commitments contained within the specified -// region. -func (r *RegionCommitmentsService) List(project string, region string) *RegionCommitmentsListCall { - c := &RegionCommitmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - return c -} +// method id "compute.regionInstanceGroupManagers.patch": -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionCommitmentsListCall) Filter(filter string) *RegionCommitmentsListCall { - c.urlParams_.Set("filter", filter) - return c +type RegionInstanceGroupManagersPatchCall struct { + s *Service + project string + region string + instanceGroupManager string + instancegroupmanager *InstanceGroupManager + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionCommitmentsListCall) MaxResults(maxResults int64) *RegionCommitmentsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// Patch: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is patched even if the instances in the group are still in the +// process of being patched. You must separately verify the status of +// the individual instances with the listmanagedinstances method. This +// method supports PATCH semantics and uses the JSON merge patch format +// and processing rules. +func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { + c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroupManager = instanceGroupManager + c.instancegroupmanager = instancegroupmanager return c } -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. // -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. // -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionCommitmentsListCall) OrderBy(orderBy string) *RegionCommitmentsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionCommitmentsListCall) PageToken(pageToken string) *RegionCommitmentsListCall { - c.urlParams_.Set("pageToken", pageToken) +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionCommitmentsListCall) Fields(s ...googleapi.Field) *RegionCommitmentsListCall { +func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionCommitmentsListCall) IfNoneMatch(entityTag string) *RegionCommitmentsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionCommitmentsListCall) Context(ctx context.Context) *RegionCommitmentsListCall { +func (c *RegionInstanceGroupManagersPatchCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionCommitmentsListCall) Header() http.Header { +func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionCommitmentsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/commitments") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionCommitments.list" call. -// Exactly one of *CommitmentList or error will be non-nil. Any non-2xx +// Do executes the "compute.regionInstanceGroupManagers.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *CommitmentList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*CommitmentList, error) { +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54633,7 +67202,7 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &CommitmentList{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -54645,35 +67214,19 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen } return ret, nil // { - // "description": "Retrieves a list of commitments contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.regionCommitments.list", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.regionInstanceGroupManagers.patch", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroupManager" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "instanceGroupManager": { + // "description": "The name of the instance group manager.", + // "location": "path", + // "required": true, // "type": "string" // }, // "project": { @@ -54684,69 +67237,52 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen // "type": "string" // }, // "region": { - // "description": "Name of the region for this request.", + // "description": "Name of the region scoping this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/commitments", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "request": { + // "$ref": "InstanceGroupManager" + // }, // "response": { - // "$ref": "CommitmentList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionCommitmentsListCall) Pages(ctx context.Context, f func(*CommitmentList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroupManagers.abandonInstances": +// method id "compute.regionInstanceGroupManagers.recreateInstances": -type RegionInstanceGroupManagersAbandonInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersRecreateInstancesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AbandonInstances: Schedules a group action to remove the specified -// instances from the managed instance group. Abandoning an instance -// does not delete the instance, but it does remove the instance from -// any target pools that are applied by the managed instance group. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you abandon. This operation is marked as -// DONE when the action is scheduled even if the instances have not yet -// been removed from the group. You must separately verify the status of -// the abandoning action with the listmanagedinstances method. +// RecreateInstances: Schedules a group action to recreate the specified +// instances in the managed instance group. The instances are deleted +// and recreated using the current instance template for the managed +// instance group. This operation is marked as DONE when the action is +// scheduled even if the instances have not yet been recreated. You must +// separately verify the status of the recreating action with the +// listmanagedinstances method. // // If the group is part of a backend service that has enabled connection // draining, it can take up to 60 seconds after the connection draining @@ -54755,12 +67291,12 @@ type RegionInstanceGroupManagersAbandonInstancesCall struct { // // You can specify a maximum of 1000 instances with this method per // request. -func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersabandoninstancesrequest *RegionInstanceGroupManagersAbandonInstancesRequest) *RegionInstanceGroupManagersAbandonInstancesCall { - c := &RegionInstanceGroupManagersAbandonInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { + c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersabandoninstancesrequest = regioninstancegroupmanagersabandoninstancesrequest + c.regioninstancegroupmanagersrecreaterequest = regioninstancegroupmanagersrecreaterequest return c } @@ -54778,7 +67314,7 @@ func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, re // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersAbandonInstancesCall { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -54786,7 +67322,7 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) RequestId(requestId st // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersAbandonInstancesCall { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersRecreateInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -54794,34 +67330,34 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Fields(s ...googleapi. // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersAbandonInstancesCall { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersRecreateInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Header() http.Header { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersabandoninstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersrecreaterequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -54833,14 +67369,14 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) doRequest(alt string) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.abandonInstances" call. +// Do executes the "compute.regionInstanceGroupManagers.recreateInstances" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -54871,9 +67407,9 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Schedules a group action to remove the specified instances from the managed instance group. Abandoning an instance does not delete the instance, but it does remove the instance from any target pools that are applied by the managed instance group. This method reduces the targetSize of the managed instance group by the number of instances that you abandon. This operation is marked as DONE when the action is scheduled even if the instances have not yet been removed from the group. You must separately verify the status of the abandoning action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.abandonInstances", + // "id": "compute.regionInstanceGroupManagers.recreateInstances", // "parameterOrder": [ // "project", // "region", @@ -54905,9 +67441,9 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", // "request": { - // "$ref": "RegionInstanceGroupManagersAbandonInstancesRequest" + // "$ref": "RegionInstanceGroupManagersRecreateRequest" // }, // "response": { // "$ref": "Operation" @@ -54920,9 +67456,9 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C } -// method id "compute.regionInstanceGroupManagers.delete": +// method id "compute.regionInstanceGroupManagers.resize": -type RegionInstanceGroupManagersDeleteCall struct { +type RegionInstanceGroupManagersResizeCall struct { s *Service project string region string @@ -54932,13 +67468,24 @@ type RegionInstanceGroupManagersDeleteCall struct { header_ http.Header } -// Delete: Deletes the specified managed instance group and all of the -// instances in that group. -func (r *RegionInstanceGroupManagersService) Delete(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersDeleteCall { - c := &RegionInstanceGroupManagersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Resize: Changes the intended size for the managed instance group. If +// you increase the size, the group schedules actions to create new +// instances using the current instance template. If you decrease the +// size, the group schedules delete actions on one or more instances. +// The resize operation is marked DONE when the resize actions are +// scheduled even if the group has not yet added or deleted any +// instances. You must separately verify the status of the creating or +// deleting actions with the listmanagedinstances method. +// +// If the group is part of a backend service that has enabled connection +// draining, it can take up to 60 seconds after the connection draining +// duration has elapsed before the VM instance is removed or deleted. +func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { + c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager + c.urlParams_.Set("size", fmt.Sprint(size)) return c } @@ -54956,7 +67503,7 @@ func (r *RegionInstanceGroupManagersService) Delete(project string, region strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersDeleteCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteCall { +func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeCall { c.urlParams_.Set("requestId", requestId) return c } @@ -54964,7 +67511,7 @@ func (c *RegionInstanceGroupManagersDeleteCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteCall { +func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -54972,21 +67519,21 @@ func (c *RegionInstanceGroupManagersDeleteCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersDeleteCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteCall { +func (c *RegionInstanceGroupManagersResizeCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersDeleteCall) Header() http.Header { +func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -54994,9 +67541,9 @@ func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Res reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -55006,14 +67553,14 @@ func (c *RegionInstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.delete" call. +// Do executes the "compute.regionInstanceGroupManagers.resize" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55044,17 +67591,18 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Deletes the specified managed instance group and all of the instances in that group.", - // "httpMethod": "DELETE", - // "id": "compute.regionInstanceGroupManagers.delete", + // "description": "Changes the intended size for the managed instance group. If you increase the size, the group schedules actions to create new instances using the current instance template. If you decrease the size, the group schedules delete actions on one or more instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.resize", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "instanceGroupManager", + // "size" // ], // "parameters": { // "instanceGroupManager": { - // "description": "Name of the managed instance group to delete.", + // "description": "Name of the managed instance group.", // "location": "path", // "required": true, // "type": "string" @@ -55076,9 +67624,17 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "size": { + // "description": "Number of instances that should exist in this instance group manager.", + // "format": "int32", + // "location": "query", + // "minimum": "0", + // "required": true, + // "type": "integer" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", // "response": { // "$ref": "Operation" // }, @@ -55090,41 +67646,27 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionInstanceGroupManagers.deleteInstances": +// method id "compute.regionInstanceGroupManagers.setAutoHealingPolicies": -type RegionInstanceGroupManagersDeleteInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersSetAutoHealingPoliciesCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// DeleteInstances: Schedules a group action to delete the specified -// instances in the managed instance group. The instances are also -// removed from any target pools of which they were a member. This -// method reduces the targetSize of the managed instance group by the -// number of instances that you delete. This operation is marked as DONE -// when the action is scheduled even if the instances are still being -// deleted. You must separately verify the status of the deleting action -// with the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersdeleteinstancesrequest *RegionInstanceGroupManagersDeleteInstancesRequest) *RegionInstanceGroupManagersDeleteInstancesCall { - c := &RegionInstanceGroupManagersDeleteInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetAutoHealingPolicies: Modifies the autohealing policy for the +// instances in this managed instance group. +func (r *RegionInstanceGroupManagersService) SetAutoHealingPolicies(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { + c := &RegionInstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersdeleteinstancesrequest = regioninstancegroupmanagersdeleteinstancesrequest + c.regioninstancegroupmanagerssetautohealingrequest = regioninstancegroupmanagerssetautohealingrequest return c } @@ -55142,7 +67684,7 @@ func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, reg // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteInstancesCall { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { c.urlParams_.Set("requestId", requestId) return c } @@ -55150,7 +67692,7 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) RequestId(requestId str // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersDeleteInstancesCall { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -55158,34 +67700,34 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Fields(s ...googleapi.F // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersDeleteInstancesCall { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Header() http.Header { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersdeleteinstancesrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssetautohealingrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -55197,14 +67739,14 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) doRequest(alt string) ( return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.deleteInstances" call. +// Do executes the "compute.regionInstanceGroupManagers.setAutoHealingPolicies" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55235,9 +67777,9 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca } return ret, nil // { - // "description": "Schedules a group action to delete the specified instances in the managed instance group. The instances are also removed from any target pools of which they were a member. This method reduces the targetSize of the managed instance group by the number of instances that you delete. This operation is marked as DONE when the action is scheduled even if the instances are still being deleted. You must separately verify the status of the deleting action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", + // "description": "Modifies the autohealing policy for the instances in this managed instance group.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.deleteInstances", + // "id": "compute.regionInstanceGroupManagers.setAutoHealingPolicies", // "parameterOrder": [ // "project", // "region", @@ -55269,9 +67811,9 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", // "request": { - // "$ref": "RegionInstanceGroupManagersDeleteInstancesRequest" + // "$ref": "RegionInstanceGroupManagersSetAutoHealingRequest" // }, // "response": { // "$ref": "Operation" @@ -55284,78 +67826,91 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca } -// method id "compute.regionInstanceGroupManagers.get": +// method id "compute.regionInstanceGroupManagers.setInstanceTemplate": -type RegionInstanceGroupManagersGetCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersSetInstanceTemplateCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns all of the details about the specified managed instance -// group. -func (r *RegionInstanceGroupManagersService) Get(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersGetCall { - c := &RegionInstanceGroupManagersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetInstanceTemplate: Sets the instance template to use when creating +// new instances or recreating instances in this group. Existing +// instances are not affected. +func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { + c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerssettemplaterequest = regioninstancegroupmanagerssettemplaterequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *RegionInstanceGroupManagersSetInstanceTemplateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersGetCall { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetInstanceTemplateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupManagersGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersGetCall) Context(ctx context.Context) *RegionInstanceGroupManagersGetCall { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetInstanceTemplateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersGetCall) Header() http.Header { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -55365,14 +67920,14 @@ func (c *RegionInstanceGroupManagersGetCall) doRequest(alt string) (*http.Respon return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.get" call. -// Exactly one of *InstanceGroupManager or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *InstanceGroupManager.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroupManager, error) { +// Do executes the "compute.regionInstanceGroupManagers.setInstanceTemplate" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55391,7 +67946,7 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroupManager{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55403,9 +67958,9 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns all of the details about the specified managed instance group.", - // "httpMethod": "GET", - // "id": "compute.regionInstanceGroupManagers.get", + // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", // "parameterOrder": [ // "project", // "region", @@ -55413,7 +67968,7 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "instanceGroupManager": { - // "description": "Name of the managed instance group to return.", + // "description": "The name of the managed instance group.", // "location": "path", // "required": true, // "type": "string" @@ -55430,47 +67985,50 @@ func (c *RegionInstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (* // "location": "path", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "request": { + // "$ref": "RegionInstanceGroupManagersSetTemplateRequest" + // }, // "response": { - // "$ref": "InstanceGroupManager" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionInstanceGroupManagers.insert": +// method id "compute.regionInstanceGroupManagers.setTargetPools": -type RegionInstanceGroupManagersInsertCall struct { - s *Service - project string - region string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersSetTargetPoolsCall struct { + s *Service + project string + region string + instanceGroupManager string + regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a managed instance group using the information that -// you specify in the request. After the group is created, it schedules -// an action to create instances in the group using the specified -// instance template. This operation is marked as DONE when the group is -// created even if the instances in the group have not yet been created. -// You must separately verify the status of the individual instances -// with the listmanagedinstances method. -// -// A regional managed instance group can contain up to 2000 instances. -func (r *RegionInstanceGroupManagersService) Insert(project string, region string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersInsertCall { - c := &RegionInstanceGroupManagersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetTargetPools: Modifies the target pools to which all new instances +// in this group are assigned. Existing instances in the group are not +// affected. +func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { + c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instancegroupmanager = instancegroupmanager + c.instanceGroupManager = instanceGroupManager + c.regioninstancegroupmanagerssettargetpoolsrequest = regioninstancegroupmanagerssettargetpoolsrequest return c } @@ -55488,7 +68046,7 @@ func (r *RegionInstanceGroupManagersService) Insert(project string, region strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersInsertCall) RequestId(requestId string) *RegionInstanceGroupManagersInsertCall { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *RegionInstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -55496,7 +68054,7 @@ func (c *RegionInstanceGroupManagersInsertCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersInsertCall { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetTargetPoolsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -55504,52 +68062,53 @@ func (c *RegionInstanceGroupManagersInsertCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersInsertCall) Context(ctx context.Context) *RegionInstanceGroupManagersInsertCall { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetTargetPoolsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersInsertCall) Header() http.Header { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettargetpoolsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "instanceGroupManager": c.instanceGroupManager, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.insert" call. +// Do executes the "compute.regionInstanceGroupManagers.setTargetPools" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55580,14 +68139,21 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Creates a managed instance group using the information that you specify in the request. After the group is created, it schedules an action to create instances in the group using the specified instance template. This operation is marked as DONE when the group is created even if the instances in the group have not yet been created. You must separately verify the status of the individual instances with the listmanagedinstances method.\n\nA regional managed instance group can contain up to 2000 instances.", + // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.insert", + // "id": "compute.regionInstanceGroupManagers.setTargetPools", // "parameterOrder": [ // "project", - // "region" + // "region", + // "instanceGroupManager" // ], // "parameters": { + // "instanceGroupManager": { + // "description": "Name of the managed instance group.", + // "location": "path", + // "required": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -55607,9 +68173,9 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", // "request": { - // "$ref": "InstanceGroupManager" + // "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" // }, // "response": { // "$ref": "Operation" @@ -55622,159 +68188,88 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionInstanceGroupManagers.list": +// method id "compute.regionInstanceGroupManagers.testIamPermissions": -type RegionInstanceGroupManagersListCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupManagersTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of managed instance groups that are -// contained within the specified region. -func (r *RegionInstanceGroupManagersService) List(project string, region string) *RegionInstanceGroupManagersListCall { - c := &RegionInstanceGroupManagersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionInstanceGroupManagersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupManagersTestIamPermissionsCall { + c := &RegionInstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionInstanceGroupManagersListCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionInstanceGroupManagersListCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionInstanceGroupManagersListCall) PageToken(pageToken string) *RegionInstanceGroupManagersListCall { - c.urlParams_.Set("pageToken", pageToken) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListCall { +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupManagersListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupManagersListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersListCall) Context(ctx context.Context) *RegionInstanceGroupManagersListCall { +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupManagersTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersListCall) Header() http.Header { +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.list" call. -// Exactly one of *RegionInstanceGroupManagerList or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *RegionInstanceGroupManagerList.ServerResponse.Header or (if a -// response was returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "compute.regionInstanceGroupManagers.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagerList, error) { +func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -55793,7 +68288,7 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupManagerList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -55805,37 +68300,15 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves the list of managed instance groups that are contained within the specified region.", - // "httpMethod": "GET", - // "id": "compute.regionInstanceGroupManagers.list", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroupManagers.testIamPermissions", // "parameterOrder": [ // "project", - // "region" + // "region", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -55844,15 +68317,26 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers", + // "path": "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "RegionInstanceGroupManagerList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -55863,79 +68347,56 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupManagersListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagerList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroupManagers.listManagedInstances": +// method id "compute.regionInstanceGroupManagers.update": -type RegionInstanceGroupManagersListManagedInstancesCall struct { +type RegionInstanceGroupManagersUpdateCall struct { s *Service project string region string instanceGroupManager string + instancegroupmanager *InstanceGroupManager urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// ListManagedInstances: Lists the instances in the managed instance -// group and instances that are scheduled to be created. The list -// includes any current actions that the group has scheduled for its -// instances. -func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string, region string, instanceGroupManager string) *RegionInstanceGroupManagersListManagedInstancesCall { - c := &RegionInstanceGroupManagersListManagedInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates a managed instance group using the information that +// you specify in the request. This operation is marked as DONE when the +// group is updated even if the instances in the group have not yet been +// updated. You must separately verify the status of the individual +// instances with the listmanagedinstances method. +func (r *RegionInstanceGroupManagersService) Update(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersUpdateCall { + c := &RegionInstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.instanceGroupManager = instanceGroupManager + c.instancegroupmanager = instancegroupmanager return c } -// Filter sets the optional parameter "filter": -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Filter(filter string) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": -func (c *RegionInstanceGroupManagersListManagedInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "order_by": -func (c *RegionInstanceGroupManagersListManagedInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("order_by", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": -func (c *RegionInstanceGroupManagersListManagedInstancesCall) PageToken(pageToken string) *RegionInstanceGroupManagersListManagedInstancesCall { - c.urlParams_.Set("pageToken", pageToken) +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersUpdateCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdateCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *RegionInstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -55943,31 +68404,36 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Fields(s ...google // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersListManagedInstancesCall { +func (c *RegionInstanceGroupManagersUpdateCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Header() http.Header { +func (c *RegionInstanceGroupManagersUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -55977,16 +68443,14 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) doRequest(alt stri return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.listManagedInstances" call. -// Exactly one of *RegionInstanceGroupManagersListInstancesResponse or -// error will be non-nil. Any non-2xx status code is an error. Response -// headers are in either -// *RegionInstanceGroupManagersListInstancesResponse.ServerResponse.Heade -// r or (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupManagersListInstancesResponse, error) { +// Do executes the "compute.regionInstanceGroupManagers.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56005,7 +68469,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupManagersListInstancesResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56017,40 +68481,21 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea } return ret, nil // { - // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.listManagedInstances", + // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listmanagedinstances method.", + // "httpMethod": "PUT", + // "id": "compute.regionInstanceGroupManagers.update", // "parameterOrder": [ // "project", // "region", // "instanceGroupManager" // ], // "parameters": { - // "filter": { - // "location": "query", - // "type": "string" - // }, // "instanceGroupManager": { - // "description": "The name of the managed instance group.", + // "description": "The name of the instance group manager.", // "location": "path", // "required": true, // "type": "string" // }, - // "maxResults": { - // "default": "500", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "order_by": { - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "location": "query", - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -56063,148 +68508,116 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // "location": "path", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", + // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", + // "request": { + // "$ref": "InstanceGroupManager" + // }, // "response": { - // "$ref": "RegionInstanceGroupManagersListInstancesResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupManagersListManagedInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupManagersListInstancesResponse) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.regionInstanceGroupManagers.patch": +// method id "compute.regionInstanceGroups.get": -type RegionInstanceGroupManagersPatchCall struct { - s *Service - project string - region string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupsGetCall struct { + s *Service + project string + region string + instanceGroup string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Patch: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is patched even if the instances in the group are still in the -// process of being patched. You must separately verify the status of -// the individual instances with the listmanagedinstances method. This -// method supports PATCH semantics and uses the JSON merge patch format -// and processing rules. -func (r *RegionInstanceGroupManagersService) Patch(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersPatchCall { - c := &RegionInstanceGroupManagersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified instance group resource. +func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { + c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersPatchCall) RequestId(requestId string) *RegionInstanceGroupManagersPatchCall { - c.urlParams_.Set("requestId", requestId) + c.instanceGroup = instanceGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersPatchCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersPatchCall { +func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersPatchCall) Context(ctx context.Context) *RegionInstanceGroupManagersPatchCall { +func (c *RegionInstanceGroupsGetCall) Context(ctx context.Context) *RegionInstanceGroupsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersPatchCall) Header() http.Header { +func (c *RegionInstanceGroupsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.regionInstanceGroups.get" call. +// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *InstanceGroup.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56223,7 +68636,7 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &InstanceGroup{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56235,17 +68648,17 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is patched even if the instances in the group are still in the process of being patched. You must separately verify the status of the individual instances with the listmanagedinstances method. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.regionInstanceGroupManagers.patch", + // "description": "Returns the specified instance group resource.", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroups.get", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", + // "instanceGroup": { + // "description": "Name of the instance group resource to return.", // "location": "path", // "required": true, // "type": "string" @@ -56262,19 +68675,11 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) // "location": "path", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - // "request": { - // "$ref": "InstanceGroupManager" - // }, + // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}", // "response": { - // "$ref": "Operation" + // "$ref": "InstanceGroup" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -56285,120 +68690,159 @@ func (c *RegionInstanceGroupManagersPatchCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionInstanceGroupManagers.recreateInstances": +// method id "compute.regionInstanceGroups.list": -type RegionInstanceGroupManagersRecreateInstancesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// RecreateInstances: Schedules a group action to recreate the specified -// instances in the managed instance group. The instances are deleted -// and recreated using the current instance template for the managed -// instance group. This operation is marked as DONE when the action is -// scheduled even if the instances have not yet been recreated. You must -// separately verify the status of the recreating action with the -// listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or -// deleted. -// -// You can specify a maximum of 1000 instances with this method per -// request. -func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, region string, instanceGroupManager string, regioninstancegroupmanagersrecreaterequest *RegionInstanceGroupManagersRecreateRequest) *RegionInstanceGroupManagersRecreateInstancesCall { - c := &RegionInstanceGroupManagersRecreateInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of instance group resources contained within +// the specified region. +func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { + c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagersrecreaterequest = regioninstancegroupmanagersrecreaterequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersRecreateInstancesCall { - c.urlParams_.Set("requestId", requestId) +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInstanceGroupsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGroupsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstanceGroupsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersRecreateInstancesCall { +func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context) *RegionInstanceGroupManagersRecreateInstancesCall { +func (c *RegionInstanceGroupsListCall) Context(ctx context.Context) *RegionInstanceGroupsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Header() http.Header { +func (c *RegionInstanceGroupsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagersrecreaterequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.recreateInstances" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroups.list" call. +// Exactly one of *RegionInstanceGroupList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RegionInstanceGroupList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56417,7 +68861,7 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &RegionInstanceGroupList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56429,19 +68873,35 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. } return ret, nil // { - // "description": "Schedules a group action to recreate the specified instances in the managed instance group. The instances are deleted and recreated using the current instance template for the managed instance group. This operation is marked as DONE when the action is scheduled even if the instances have not yet been recreated. You must separately verify the status of the recreating action with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.\n\nYou can specify a maximum of 1000 instances with this method per request.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.recreateInstances", + // "description": "Retrieves the list of instance group resources contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.regionInstanceGroups.list", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "region" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -56456,84 +68916,139 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. // "location": "path", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", - // "request": { - // "$ref": "RegionInstanceGroupManagersRecreateRequest" - // }, + // "path": "{project}/regions/{region}/instanceGroups", // "response": { - // "$ref": "Operation" + // "$ref": "RegionInstanceGroupList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.resize": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionInstanceGroups.listInstances": -type RegionInstanceGroupManagersResizeCall struct { - s *Service - project string - region string - instanceGroupManager string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupsListInstancesCall struct { + s *Service + project string + region string + instanceGroup string + regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Resize: Changes the intended size for the managed instance group. If -// you increase the size, the group schedules actions to create new -// instances using the current instance template. If you decrease the -// size, the group schedules delete actions on one or more instances. -// The resize operation is marked DONE when the resize actions are -// scheduled even if the group has not yet added or deleted any -// instances. You must separately verify the status of the creating or -// deleting actions with the listmanagedinstances method. -// -// If the group is part of a backend service that has enabled connection -// draining, it can take up to 60 seconds after the connection draining -// duration has elapsed before the VM instance is removed or deleted. -func (r *RegionInstanceGroupManagersService) Resize(project string, region string, instanceGroupManager string, size int64) *RegionInstanceGroupManagersResizeCall { - c := &RegionInstanceGroupManagersResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// ListInstances: Lists the instances in the specified instance group +// and displays information about the named ports. Depending on the +// specified options, this method can list all instances or only the +// instances that are running. +func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { + c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.urlParams_.Set("size", fmt.Sprint(size)) + c.instanceGroup = instanceGroup + c.regioninstancegroupslistinstancesrequest = regioninstancegroupslistinstancesrequest return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeCall { - c.urlParams_.Set("requestId", requestId) +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersResizeCall { +func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -56541,48 +69056,54 @@ func (c *RegionInstanceGroupManagersResizeCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersResizeCall) Context(ctx context.Context) *RegionInstanceGroupManagersResizeCall { +func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersResizeCall) Header() http.Header { +func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.resize" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroups.listInstances" call. +// Exactly one of *RegionInstanceGroupsListInstances or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *RegionInstanceGroupsListInstances.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56601,7 +69122,7 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &RegionInstanceGroupsListInstances{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56613,22 +69134,44 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Changes the intended size for the managed instance group. If you increase the size, the group schedules actions to create new instances using the current instance template. If you decrease the size, the group schedules delete actions on one or more instances. The resize operation is marked DONE when the resize actions are scheduled even if the group has not yet added or deleted any instances. You must separately verify the status of the creating or deleting actions with the listmanagedinstances method.\n\nIf the group is part of a backend service that has enabled connection draining, it can take up to 60 seconds after the connection draining duration has elapsed before the VM instance is removed or deleted.", + // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.resize", + // "id": "compute.regionInstanceGroups.listInstances", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager", - // "size" + // "instanceGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "instanceGroup": { + // "description": "Name of the regional instance group for which we want to list the instances.", // "location": "path", // "required": true, // "type": "string" // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -56641,54 +69184,66 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) // "location": "path", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "size": { - // "description": "Number of instances that should exist in this instance group manager.", - // "format": "int32", - // "location": "query", - // "minimum": "0", - // "required": true, - // "type": "integer" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/resize", + // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + // "request": { + // "$ref": "RegionInstanceGroupsListInstancesRequest" + // }, // "response": { - // "$ref": "Operation" + // "$ref": "RegionInstanceGroupsListInstances" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.setAutoHealingPolicies": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionInstanceGroupManagersSetAutoHealingPoliciesCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.regionInstanceGroups.setNamedPorts": + +type RegionInstanceGroupsSetNamedPortsCall struct { + s *Service + project string + region string + instanceGroup string + regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetAutoHealingPolicies: Modifies the autohealing policy for the -// instances in this managed instance group. -func (r *RegionInstanceGroupManagersService) SetAutoHealingPolicies(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssetautohealingrequest *RegionInstanceGroupManagersSetAutoHealingRequest) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { - c := &RegionInstanceGroupManagersSetAutoHealingPoliciesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// SetNamedPorts: Sets the named ports for the specified regional +// instance group. +func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { + c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerssetautohealingrequest = regioninstancegroupmanagerssetautohealingrequest + c.instanceGroup = instanceGroup + c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest return c } @@ -56706,7 +69261,7 @@ func (r *RegionInstanceGroupManagersService) SetAutoHealingPolicies(project stri // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(requestId string) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { +func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -56714,7 +69269,7 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) RequestId(reques // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { +func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -56722,53 +69277,53 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Fields(s ...goog // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetAutoHealingPoliciesCall { +func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Header() http.Header { +func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssetautohealingrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.setAutoHealingPolicies" call. +// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56799,17 +69354,17 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl } return ret, nil // { - // "description": "Modifies the autohealing policy for the instances in this managed instance group.", + // "description": "Sets the named ports for the specified regional instance group.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.setAutoHealingPolicies", + // "id": "compute.regionInstanceGroups.setNamedPorts", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "instanceGroup" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", + // "instanceGroup": { + // "description": "The name of the regional instance group where the named ports are updated.", // "location": "path", // "required": true, // "type": "string" @@ -56833,9 +69388,9 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setAutoHealingPolicies", + // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", // "request": { - // "$ref": "RegionInstanceGroupManagersSetAutoHealingRequest" + // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" // }, // "response": { // "$ref": "Operation" @@ -56848,54 +69403,34 @@ func (c *RegionInstanceGroupManagersSetAutoHealingPoliciesCall) Do(opts ...googl } -// method id "compute.regionInstanceGroupManagers.setInstanceTemplate": +// method id "compute.regionInstanceGroups.testIamPermissions": -type RegionInstanceGroupManagersSetInstanceTemplateCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionInstanceGroupsTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetInstanceTemplate: Sets the instance template to use when creating -// new instances or recreating instances in this group. Existing -// instances are not affected. -func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettemplaterequest *RegionInstanceGroupManagersSetTemplateRequest) *RegionInstanceGroupManagersSetInstanceTemplateCall { - c := &RegionInstanceGroupManagersSetInstanceTemplateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RegionInstanceGroupsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupsTestIamPermissionsCall { + c := &RegionInstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerssettemplaterequest = regioninstancegroupmanagerssettemplaterequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *RegionInstanceGroupManagersSetInstanceTemplateCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetInstanceTemplateCall { +func (c *RegionInstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -56903,53 +69438,53 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Fields(s ...googlea // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetInstanceTemplateCall { +func (c *RegionInstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Header() http.Header { +func (c *RegionInstanceGroupsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettemplaterequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.setInstanceTemplate" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionInstanceGroups.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -56968,7 +69503,7 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -56980,21 +69515,15 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap } return ret, nil // { - // "description": "Sets the instance template to use when creating new instances or recreating instances in this group. Existing instances are not affected.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.setInstanceTemplate", + // "id": "compute.regionInstanceGroups.testIamPermissions", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "resource" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the managed instance group.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -57003,80 +69532,62 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", + // "path": "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions", // "request": { - // "$ref": "RegionInstanceGroupManagersSetTemplateRequest" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "Operation" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroupManagers.setTargetPools": +// method id "compute.regionOperations.delete": -type RegionInstanceGroupManagersSetTargetPoolsCall struct { - s *Service - project string - region string - instanceGroupManager string - regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionOperationsDeleteCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTargetPools: Modifies the target pools to which all new instances -// in this group are assigned. Existing instances in the group are not -// affected. -func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, region string, instanceGroupManager string, regioninstancegroupmanagerssettargetpoolsrequest *RegionInstanceGroupManagersSetTargetPoolsRequest) *RegionInstanceGroupManagersSetTargetPoolsCall { - c := &RegionInstanceGroupManagersSetTargetPoolsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified region-specific Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/delete +func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall { + c := &RegionOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.regioninstancegroupmanagerssettargetpoolsrequest = regioninstancegroupmanagerssettargetpoolsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *RegionInstanceGroupManagersSetTargetPoolsCall { - c.urlParams_.Set("requestId", requestId) + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersSetTargetPoolsCall { +func (c *RegionOperationsDeleteCall) Fields(s ...googleapi.Field) *RegionOperationsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57084,95 +69595,66 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Fields(s ...googleapi.Fi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) *RegionInstanceGroupManagersSetTargetPoolsCall { +func (c *RegionOperationsDeleteCall) Context(ctx context.Context) *RegionOperationsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Header() http.Header { +func (c *RegionOperationsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupmanagerssettargetpoolsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.setTargetPools" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.regionOperations.delete" call. +func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err + return err } - return ret, nil + return nil // { - // "description": "Modifies the target pools to which all new instances in this group are assigned. Existing instances in the group are not affected.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.setTargetPools", + // "description": "Deletes the specified region-specific Operations resource.", + // "httpMethod": "DELETE", + // "id": "compute.regionOperations.delete", // "parameterOrder": [ // "project", // "region", - // "instanceGroupManager" + // "operation" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "Name of the managed instance group.", + // "operation": { + // "description": "Name of the Operations resource to delete.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -57184,24 +69666,14 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", - // "request": { - // "$ref": "RegionInstanceGroupManagersSetTargetPoolsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, + // "path": "{project}/regions/{region}/operations/{operation}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/compute" @@ -57210,88 +69682,95 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal } -// method id "compute.regionInstanceGroupManagers.testIamPermissions": +// method id "compute.regionOperations.get": -type RegionInstanceGroupManagersTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionOperationsGetCall struct { + s *Service + project string + region string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionInstanceGroupManagersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupManagersTestIamPermissionsCall { - c := &RegionInstanceGroupManagersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Retrieves the specified region-specific Operations resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/get +func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall { + c := &RegionOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.operation = operation return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersTestIamPermissionsCall { +func (c *RegionOperationsGetCall) Fields(s ...googleapi.Field) *RegionOperationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionOperationsGetCall) IfNoneMatch(entityTag string) *RegionOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupManagersTestIamPermissionsCall { +func (c *RegionOperationsGetCall) Context(ctx context.Context) *RegionOperationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Header() http.Header { +func (c *RegionOperationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "operation": c.operation, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.regionOperations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57310,7 +69789,7 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -57322,43 +69801,40 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroupManagers.testIamPermissions", + // "description": "Retrieves the specified region-specific Operations resource.", + // "httpMethod": "GET", + // "id": "compute.regionOperations.get", // "parameterOrder": [ // "project", // "region", - // "resource" + // "operation" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "operation": { + // "description": "Name of the Operations resource to return.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", + // "project": { + // "description": "Project ID for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "region": { + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/operations/{operation}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -57369,110 +69845,160 @@ func (c *RegionInstanceGroupManagersTestIamPermissionsCall) Do(opts ...googleapi } -// method id "compute.regionInstanceGroupManagers.update": +// method id "compute.regionOperations.list": -type RegionInstanceGroupManagersUpdateCall struct { - s *Service - project string - region string - instanceGroupManager string - instancegroupmanager *InstanceGroupManager - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionOperationsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates a managed instance group using the information that -// you specify in the request. This operation is marked as DONE when the -// group is updated even if the instances in the group have not yet been -// updated. You must separately verify the status of the individual -// instances with the listmanagedinstances method. -func (r *RegionInstanceGroupManagersService) Update(project string, region string, instanceGroupManager string, instancegroupmanager *InstanceGroupManager) *RegionInstanceGroupManagersUpdateCall { - c := &RegionInstanceGroupManagersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of Operation resources contained within the +// specified region. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/list +func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall { + c := &RegionOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroupManager = instanceGroupManager - c.instancegroupmanager = instancegroupmanager return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. // -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. // -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupManagersUpdateCall) RequestId(requestId string) *RegionInstanceGroupManagersUpdateCall { - c.urlParams_.Set("requestId", requestId) +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *RegionOperationsListCall) OrderBy(orderBy string) *RegionOperationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupManagersUpdateCall) Fields(s ...googleapi.Field) *RegionInstanceGroupManagersUpdateCall { +func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupManagersUpdateCall) Context(ctx context.Context) *RegionInstanceGroupManagersUpdateCall { +func (c *RegionOperationsListCall) Context(ctx context.Context) *RegionOperationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupManagersUpdateCall) Header() http.Header { +func (c *RegionOperationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupManagersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroupManager": c.instanceGroupManager, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroupManagers.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.regionOperations.list" call. +// Exactly one of *OperationList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *OperationList.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57491,7 +70017,7 @@ func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &OperationList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -57503,19 +70029,35 @@ func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Updates a managed instance group using the information that you specify in the request. This operation is marked as DONE when the group is updated even if the instances in the group have not yet been updated. You must separately verify the status of the individual instances with the listmanagedinstances method.", - // "httpMethod": "PUT", - // "id": "compute.regionInstanceGroupManagers.update", + // "description": "Retrieves a list of Operation resources contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.regionOperations.list", // "parameterOrder": [ // "project", - // "region", - // "instanceGroupManager" + // "region" // ], // "parameters": { - // "instanceGroupManager": { - // "description": "The name of the instance group manager.", - // "location": "path", - // "required": true, + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -57526,58 +70068,73 @@ func (c *RegionInstanceGroupManagersUpdateCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", - // "request": { - // "$ref": "InstanceGroupManager" - // }, + // "path": "{project}/regions/{region}/operations", // "response": { - // "$ref": "Operation" + // "$ref": "OperationList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionInstanceGroups.get": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RegionInstanceGroupsGetCall struct { - s *Service - project string - region string - instanceGroup string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.regions.get": + +type RegionsGetCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified instance group resource. -func (r *RegionInstanceGroupsService) Get(project string, region string, instanceGroup string) *RegionInstanceGroupsGetCall { - c := &RegionInstanceGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Region resource. Get a list of available +// regions by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/get +func (r *RegionsService) Get(project string, region string) *RegionsGetCall { + c := &RegionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroup = instanceGroup return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsGetCall { +func (c *RegionsGetCall) Fields(s ...googleapi.Field) *RegionsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57587,7 +70144,7 @@ func (c *RegionInstanceGroupsGetCall) Fields(s ...googleapi.Field) *RegionInstan // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsGetCall { +func (c *RegionsGetCall) IfNoneMatch(entityTag string) *RegionsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -57595,21 +70152,21 @@ func (c *RegionInstanceGroupsGetCall) IfNoneMatch(entityTag string) *RegionInsta // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsGetCall) Context(ctx context.Context) *RegionInstanceGroupsGetCall { +func (c *RegionsGetCall) Context(ctx context.Context) *RegionsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsGetCall) Header() http.Header { +func (c *RegionsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -57620,26 +70177,25 @@ func (c *RegionInstanceGroupsGetCall) doRequest(alt string) (*http.Response, err } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.get" call. -// Exactly one of *InstanceGroup or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *InstanceGroup.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup, error) { +// Do executes the "compute.regions.get" call. +// Exactly one of *Region or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Region.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57658,7 +70214,7 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &InstanceGroup{ + ret := &Region{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -57670,21 +70226,14 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc } return ret, nil // { - // "description": "Returns the specified instance group resource.", + // "description": "Returns the specified Region resource. Get a list of available regions by making a list() request.", // "httpMethod": "GET", - // "id": "compute.regionInstanceGroups.get", + // "id": "compute.regions.get", // "parameterOrder": [ // "project", - // "region", - // "instanceGroup" + // "region" // ], // "parameters": { - // "instanceGroup": { - // "description": "Name of the instance group resource to return.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -57693,15 +70242,16 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region resource to return.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}", + // "path": "{project}/regions/{region}", // "response": { - // "$ref": "InstanceGroup" + // "$ref": "Region" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -57712,24 +70262,23 @@ func (c *RegionInstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*Instanc } -// method id "compute.regionInstanceGroups.list": +// method id "compute.regions.list": -type RegionInstanceGroupsListCall struct { +type RegionsListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves the list of instance group resources contained within -// the specified region. -func (r *RegionInstanceGroupsService) List(project string, region string) *RegionInstanceGroupsListCall { - c := &RegionInstanceGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of region resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/list +func (r *RegionsService) List(project string) *RegionsListCall { + c := &RegionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -57759,7 +70308,7 @@ func (r *RegionInstanceGroupsService) List(project string, region string) *Regio // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { +func (c *RegionsListCall) Filter(filter string) *RegionsListCall { c.urlParams_.Set("filter", filter) return c } @@ -57770,7 +70319,7 @@ func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGrou // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInstanceGroupsListCall { +func (c *RegionsListCall) MaxResults(maxResults int64) *RegionsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -57787,7 +70336,7 @@ func (c *RegionInstanceGroupsListCall) MaxResults(maxResults int64) *RegionInsta // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGroupsListCall { +func (c *RegionsListCall) OrderBy(orderBy string) *RegionsListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -57795,7 +70344,7 @@ func (c *RegionInstanceGroupsListCall) OrderBy(orderBy string) *RegionInstanceGr // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstanceGroupsListCall { +func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -57803,7 +70352,7 @@ func (c *RegionInstanceGroupsListCall) PageToken(pageToken string) *RegionInstan // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListCall { +func (c *RegionsListCall) Fields(s ...googleapi.Field) *RegionsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -57813,7 +70362,7 @@ func (c *RegionInstanceGroupsListCall) Fields(s ...googleapi.Field) *RegionInsta // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInstanceGroupsListCall { +func (c *RegionsListCall) IfNoneMatch(entityTag string) *RegionsListCall { c.ifNoneMatch_ = entityTag return c } @@ -57821,21 +70370,21 @@ func (c *RegionInstanceGroupsListCall) IfNoneMatch(entityTag string) *RegionInst // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsListCall) Context(ctx context.Context) *RegionInstanceGroupsListCall { +func (c *RegionsListCall) Context(ctx context.Context) *RegionsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsListCall) Header() http.Header { +func (c *RegionsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -57846,25 +70395,24 @@ func (c *RegionInstanceGroupsListCall) doRequest(alt string) (*http.Response, er } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.list" call. -// Exactly one of *RegionInstanceGroupList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RegionInstanceGroupList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupList, error) { +// Do executes the "compute.regions.list" call. +// Exactly one of *RegionList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RegionList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -57883,7 +70431,7 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupList{ + ret := &RegionList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -57895,12 +70443,11 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region } return ret, nil // { - // "description": "Retrieves the list of instance group resources contained within the specified region.", + // "description": "Retrieves the list of region resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.regionInstanceGroups.list", + // "id": "compute.regions.list", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { @@ -57932,17 +70479,11 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups", + // "path": "{project}/regions", // "response": { - // "$ref": "RegionInstanceGroupList" + // "$ref": "RegionList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -57956,7 +70497,7 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*RegionInstanceGroupList) error) error { +func (c *RegionsListCall) Pages(ctx context.Context, f func(*RegionList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -57974,29 +70515,21 @@ func (c *RegionInstanceGroupsListCall) Pages(ctx context.Context, f func(*Region } } -// method id "compute.regionInstanceGroups.listInstances": +// method id "compute.routers.aggregatedList": -type RegionInstanceGroupsListInstancesCall struct { - s *Service - project string - region string - instanceGroup string - regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RoutersAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// ListInstances: Lists the instances in the specified instance group -// and displays information about the named ports. Depending on the -// specified options, this method can list all instances or only the -// instances that are running. -func (r *RegionInstanceGroupsService) ListInstances(project string, region string, instanceGroup string, regioninstancegroupslistinstancesrequest *RegionInstanceGroupsListInstancesRequest) *RegionInstanceGroupsListInstancesCall { - c := &RegionInstanceGroupsListInstancesCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// AggregatedList: Retrieves an aggregated list of routers. +func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCall { + c := &RoutersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.instanceGroup = instanceGroup - c.regioninstancegroupslistinstancesrequest = regioninstancegroupslistinstancesrequest return c } @@ -58026,7 +70559,7 @@ func (r *RegionInstanceGroupsService) ListInstances(project string, region strin // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { +func (c *RoutersAggregatedListCall) Filter(filter string) *RoutersAggregatedListCall { c.urlParams_.Set("filter", filter) return c } @@ -58037,7 +70570,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionIns // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { +func (c *RoutersAggregatedListCall) MaxResults(maxResults int64) *RoutersAggregatedListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -58054,7 +70587,7 @@ func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *Re // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { +func (c *RoutersAggregatedListCall) OrderBy(orderBy string) *RoutersAggregatedListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -58062,7 +70595,7 @@ func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionI // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { +func (c *RoutersAggregatedListCall) PageToken(pageToken string) *RoutersAggregatedListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -58070,62 +70603,67 @@ func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { +func (c *RoutersAggregatedListCall) Fields(s ...googleapi.Field) *RoutersAggregatedListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutersAggregatedListCall) IfNoneMatch(entityTag string) *RoutersAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { +func (c *RoutersAggregatedListCall) Context(ctx context.Context) *RoutersAggregatedListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { +func (c *RoutersAggregatedListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/routers") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.listInstances" call. -// Exactly one of *RegionInstanceGroupsListInstances or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *RegionInstanceGroupsListInstances.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { +// Do executes the "compute.routers.aggregatedList" call. +// Exactly one of *RouterAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RouterAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAggregatedList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58144,7 +70682,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionInstanceGroupsListInstances{ + ret := &RouterAggregatedList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -58156,13 +70694,11 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.listInstances", + // "description": "Retrieves an aggregated list of routers.", + // "httpMethod": "GET", + // "id": "compute.routers.aggregatedList", // "parameterOrder": [ - // "project", - // "region", - // "instanceGroup" + // "project" // ], // "parameters": { // "filter": { @@ -58170,12 +70706,6 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // "location": "query", // "type": "string" // }, - // "instanceGroup": { - // "description": "Name of the regional instance group for which we want to list the instances.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", @@ -58200,20 +70730,11 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", - // "request": { - // "$ref": "RegionInstanceGroupsListInstancesRequest" - // }, + // "path": "{project}/aggregated/routers", // "response": { - // "$ref": "RegionInstanceGroupsListInstances" + // "$ref": "RouterAggregatedList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -58227,7 +70748,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { +func (c *RoutersAggregatedListCall) Pages(ctx context.Context, f func(*RouterAggregatedList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -58245,27 +70766,24 @@ func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f fun } } -// method id "compute.regionInstanceGroups.setNamedPorts": +// method id "compute.routers.delete": -type RegionInstanceGroupsSetNamedPortsCall struct { - s *Service - project string - region string - instanceGroup string - regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RoutersDeleteCall struct { + s *Service + project string + region string + router string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetNamedPorts: Sets the named ports for the specified regional -// instance group. -func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { - c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified Router resource. +func (r *RoutersService) Delete(project string, region string, router string) *RoutersDeleteCall { + c := &RoutersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.instanceGroup = instanceGroup - c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest + c.router = router return c } @@ -58283,7 +70801,7 @@ func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region strin // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { +func (c *RoutersDeleteCall) RequestId(requestId string) *RoutersDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -58291,7 +70809,7 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *Reg // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { +func (c *RoutersDeleteCall) Fields(s ...googleapi.Field) *RoutersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58299,53 +70817,48 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *Re // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { +func (c *RoutersDeleteCall) Context(ctx context.Context) *RoutersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { +func (c *RoutersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. +// Do executes the "compute.routers.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58376,21 +70889,15 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Sets the named ports for the specified regional instance group.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.setNamedPorts", + // "description": "Deletes the specified Router resource.", + // "httpMethod": "DELETE", + // "id": "compute.routers.delete", // "parameterOrder": [ // "project", // "region", - // "instanceGroup" + // "router" // ], // "parameters": { - // "instanceGroup": { - // "description": "The name of the regional instance group where the named ports are updated.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -58399,8 +70906,9 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "Name of the region for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -58408,12 +70916,16 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", - // "request": { - // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" - // }, + // "path": "{project}/regions/{region}/routers/{router}", // "response": { // "$ref": "Operation" // }, @@ -58425,88 +70937,95 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) } -// method id "compute.regionInstanceGroups.testIamPermissions": +// method id "compute.routers.get": -type RegionInstanceGroupsTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RoutersGetCall struct { + s *Service + project string + region string + router string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RegionInstanceGroupsService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionInstanceGroupsTestIamPermissionsCall { - c := &RegionInstanceGroupsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Router resource. Get a list of available +// routers by making a list() request. +func (r *RoutersService) Get(project string, region string, router string) *RoutersGetCall { + c := &RoutersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.router = router return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsTestIamPermissionsCall { +func (c *RoutersGetCall) Fields(s ...googleapi.Field) *RoutersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutersGetCall) IfNoneMatch(entityTag string) *RoutersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Context(ctx context.Context) *RegionInstanceGroupsTestIamPermissionsCall { +func (c *RoutersGetCall) Context(ctx context.Context) *RoutersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Header() http.Header { +func (c *RoutersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.routers.get" call. +// Exactly one of *Router or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Router.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58525,7 +71044,7 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Router{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -58537,13 +71056,13 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.testIamPermissions", + // "description": "Returns the specified Router resource. Get a list of available routers by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.routers.get", // "parameterOrder": [ // "project", // "region", - // "resource" + // "router" // ], // "parameters": { // "project": { @@ -58554,26 +71073,23 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp // "type": "string" // }, // "region": { - // "description": "The name of the region for this request.", + // "description": "Name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "router": { + // "description": "Name of the Router resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/instanceGroups/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/regions/{region}/routers/{router}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Router" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -58584,102 +71100,134 @@ func (c *RegionInstanceGroupsTestIamPermissionsCall) Do(opts ...googleapi.CallOp } -// method id "compute.regionOperations.delete": +// method id "compute.routers.getRouterStatus": -type RegionOperationsDeleteCall struct { - s *Service - project string - region string - operation string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RoutersGetRouterStatusCall struct { + s *Service + project string + region string + router string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified region-specific Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/delete -func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall { - c := &RegionOperationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// GetRouterStatus: Retrieves runtime information of the specified +// router. +func (r *RoutersService) GetRouterStatus(project string, region string, router string) *RoutersGetRouterStatusCall { + c := &RoutersGetRouterStatusCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.operation = operation + c.router = router return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionOperationsDeleteCall) Fields(s ...googleapi.Field) *RegionOperationsDeleteCall { +func (c *RoutersGetRouterStatusCall) Fields(s ...googleapi.Field) *RoutersGetRouterStatusCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutersGetRouterStatusCall) IfNoneMatch(entityTag string) *RoutersGetRouterStatusCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionOperationsDeleteCall) Context(ctx context.Context) *RegionOperationsDeleteCall { +func (c *RoutersGetRouterStatusCall) Context(ctx context.Context) *RoutersGetRouterStatusCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionOperationsDeleteCall) Header() http.Header { +func (c *RoutersGetRouterStatusCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/getRouterStatus") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "operation": c.operation, + "project": c.project, + "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionOperations.delete" call. -func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "compute.routers.getRouterStatus" call. +// Exactly one of *RouterStatusResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RouterStatusResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterStatusResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return err + return nil, err } - return nil + ret := &RouterStatusResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Deletes the specified region-specific Operations resource.", - // "httpMethod": "DELETE", - // "id": "compute.regionOperations.delete", + // "description": "Retrieves runtime information of the specified router.", + // "httpMethod": "GET", + // "id": "compute.routers.getRouterStatus", // "parameterOrder": [ // "project", // "region", - // "operation" + // "router" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -58693,106 +71241,126 @@ func (c *RegionOperationsDeleteCall) Do(opts ...googleapi.CallOption) error { // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to query.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/operations/{operation}", + // "path": "{project}/regions/{region}/routers/{router}/getRouterStatus", + // "response": { + // "$ref": "RouterStatusResponse" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.regionOperations.get": +// method id "compute.routers.insert": -type RegionOperationsGetCall struct { - s *Service - project string - region string - operation string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RoutersInsertCall struct { + s *Service + project string + region string + router *Router + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Retrieves the specified region-specific Operations resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/get -func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall { - c := &RegionOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a Router resource in the specified project and region +// using the data included in the request. +func (r *RoutersService) Insert(project string, region string, router *Router) *RoutersInsertCall { + c := &RoutersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.operation = operation + c.router = router + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RoutersInsertCall) RequestId(requestId string) *RoutersInsertCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionOperationsGetCall) Fields(s ...googleapi.Field) *RegionOperationsGetCall { +func (c *RoutersInsertCall) Fields(s ...googleapi.Field) *RoutersInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionOperationsGetCall) IfNoneMatch(entityTag string) *RegionOperationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionOperationsGetCall) Context(ctx context.Context) *RegionOperationsGetCall { +func (c *RoutersInsertCall) Context(ctx context.Context) *RoutersInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionOperationsGetCall) Header() http.Header { +func (c *RoutersInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.router) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "operation": c.operation, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionOperations.get" call. +// Do executes the "compute.routers.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -58823,22 +71391,14 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Retrieves the specified region-specific Operations resource.", - // "httpMethod": "GET", - // "id": "compute.regionOperations.get", + // "description": "Creates a Router resource in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.routers.insert", // "parameterOrder": [ // "project", - // "region", - // "operation" + // "region" // ], // "parameters": { - // "operation": { - // "description": "Name of the Operations resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -58852,24 +71412,31 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "{project}/regions/{region}/operations/{operation}", + // "path": "{project}/regions/{region}/routers", + // "request": { + // "$ref": "Router" + // }, // "response": { // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regionOperations.list": +// method id "compute.routers.list": -type RegionOperationsListCall struct { +type RoutersListCall struct { s *Service project string region string @@ -58879,11 +71446,10 @@ type RegionOperationsListCall struct { header_ http.Header } -// List: Retrieves a list of Operation resources contained within the -// specified region. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regionOperations/list -func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall { - c := &RegionOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves a list of Router resources available to the specified +// project. +func (r *RoutersService) List(project string, region string) *RoutersListCall { + c := &RoutersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region return c @@ -58915,7 +71481,7 @@ func (r *RegionOperationsService) List(project string, region string) *RegionOpe // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { +func (c *RoutersListCall) Filter(filter string) *RoutersListCall { c.urlParams_.Set("filter", filter) return c } @@ -58926,7 +71492,7 @@ func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCa // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperationsListCall { +func (c *RoutersListCall) MaxResults(maxResults int64) *RoutersListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -58943,7 +71509,7 @@ func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperation // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RegionOperationsListCall) OrderBy(orderBy string) *RegionOperationsListCall { +func (c *RoutersListCall) OrderBy(orderBy string) *RoutersListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -58951,7 +71517,7 @@ func (c *RegionOperationsListCall) OrderBy(orderBy string) *RegionOperationsList // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperationsListCall { +func (c *RoutersListCall) PageToken(pageToken string) *RoutersListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -58959,7 +71525,7 @@ func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperations // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperationsListCall { +func (c *RoutersListCall) Fields(s ...googleapi.Field) *RoutersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -58969,7 +71535,7 @@ func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperation // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperationsListCall { +func (c *RoutersListCall) IfNoneMatch(entityTag string) *RoutersListCall { c.ifNoneMatch_ = entityTag return c } @@ -58977,21 +71543,21 @@ func (c *RegionOperationsListCall) IfNoneMatch(entityTag string) *RegionOperatio // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionOperationsListCall) Context(ctx context.Context) *RegionOperationsListCall { +func (c *RoutersListCall) Context(ctx context.Context) *RoutersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionOperationsListCall) Header() http.Header { +func (c *RoutersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -59002,7 +71568,7 @@ func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders @@ -59013,14 +71579,14 @@ func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionOperations.list" call. -// Exactly one of *OperationList or error will be non-nil. Any non-2xx +// Do executes the "compute.routers.list" call. +// Exactly one of *RouterList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *OperationList.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationList, error) { +// *RouterList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59039,7 +71605,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &OperationList{ + ret := &RouterList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -59051,9 +71617,9 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL } return ret, nil // { - // "description": "Retrieves a list of Operation resources contained within the specified region.", + // "description": "Retrieves a list of Router resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.regionOperations.list", + // "id": "compute.routers.list", // "parameterOrder": [ // "project", // "region" @@ -59097,9 +71663,9 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // "type": "string" // } // }, - // "path": "{project}/regions/{region}/operations", + // "path": "{project}/regions/{region}/routers", // "response": { - // "$ref": "OperationList" + // "$ref": "RouterList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -59113,7 +71679,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationList) error) error { +func (c *RoutersListCall) Pages(ctx context.Context, f func(*RouterList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -59131,93 +71697,108 @@ func (c *RegionOperationsListCall) Pages(ctx context.Context, f func(*OperationL } } -// method id "compute.regions.get": +// method id "compute.routers.patch": -type RegionsGetCall struct { - s *Service - project string - region string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RoutersPatchCall struct { + s *Service + project string + region string + router string + router2 *Router + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns the specified Region resource. Get a list of available -// regions by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/get -func (r *RegionsService) Get(project string, region string) *RegionsGetCall { - c := &RegionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified Router resource with the data included +// in the request. This method supports PATCH semantics and uses JSON +// merge patch format and processing rules. +func (r *RoutersService) Patch(project string, region string, router string, router2 *Router) *RoutersPatchCall { + c := &RoutersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region + c.router = router + c.router2 = router2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RoutersPatchCall) RequestId(requestId string) *RoutersPatchCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionsGetCall) Fields(s ...googleapi.Field) *RegionsGetCall { +func (c *RoutersPatchCall) Fields(s ...googleapi.Field) *RoutersPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionsGetCall) IfNoneMatch(entityTag string) *RegionsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionsGetCall) Context(ctx context.Context) *RegionsGetCall { +func (c *RoutersPatchCall) Context(ctx context.Context) *RoutersPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionsGetCall) Header() http.Header { +func (c *RoutersPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regions.get" call. -// Exactly one of *Region or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Region.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { +// Do executes the "compute.routers.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59236,7 +71817,7 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Region{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -59248,12 +71829,13 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { } return ret, nil // { - // "description": "Returns the specified Region resource. Get a list of available regions by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.regions.get", + // "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.routers.patch", // "parameterOrder": [ // "project", - // "region" + // "region", + // "router" // ], // "parameters": { // "project": { @@ -59264,177 +71846,123 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { // "type": "string" // }, // "region": { - // "description": "Name of the region resource to return.", + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}", + // "path": "{project}/regions/{region}/routers/{router}", + // "request": { + // "$ref": "Router" + // }, // "response": { - // "$ref": "Region" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.regions.list": +// method id "compute.routers.preview": -type RegionsListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RoutersPreviewCall struct { + s *Service + project string + region string + router string + router2 *Router + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves the list of region resources available to the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/regions/list -func (r *RegionsService) List(project string) *RegionsListCall { - c := &RegionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Preview: Preview fields auto-generated during router create and +// update operations. Calling this method does NOT create or update the +// router. +func (r *RoutersService) Preview(project string, region string, router string, router2 *Router) *RoutersPreviewCall { + c := &RoutersPreviewCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RegionsListCall) Filter(filter string) *RegionsListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RegionsListCall) MaxResults(maxResults int64) *RegionsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RegionsListCall) OrderBy(orderBy string) *RegionsListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall { - c.urlParams_.Set("pageToken", pageToken) + c.region = region + c.router = router + c.router2 = router2 return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionsListCall) Fields(s ...googleapi.Field) *RegionsListCall { +func (c *RoutersPreviewCall) Fields(s ...googleapi.Field) *RoutersPreviewCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionsListCall) IfNoneMatch(entityTag string) *RegionsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionsListCall) Context(ctx context.Context) *RegionsListCall { +func (c *RoutersPreviewCall) Context(ctx context.Context) *RoutersPreviewCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionsListCall) Header() http.Header { +func (c *RoutersPreviewCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/preview") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, + "region": c.region, + "router": c.router, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regions.list" call. -// Exactly one of *RegionList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *RegionList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) { +// Do executes the "compute.routers.preview" call. +// Exactly one of *RoutersPreviewResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RoutersPreviewResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59453,7 +71981,7 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RegionList{ + ret := &RoutersPreviewResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -59465,47 +71993,43 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) } return ret, nil // { - // "description": "Retrieves the list of region resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.regions.list", + // "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", + // "httpMethod": "POST", + // "id": "compute.routers.preview", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "router" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "router": { + // "description": "Name of the Router resource to query.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions", + // "path": "{project}/regions/{region}/routers/{router}/preview", + // "request": { + // "$ref": "Router" + // }, // "response": { - // "$ref": "RegionList" + // "$ref": "RoutersPreviewResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -59516,176 +72040,88 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RegionsListCall) Pages(ctx context.Context, f func(*RegionList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.routers.aggregatedList": +// method id "compute.routers.testIamPermissions": -type RoutersAggregatedListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type RoutersTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// AggregatedList: Retrieves an aggregated list of routers. -func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCall { - c := &RoutersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RoutersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RoutersTestIamPermissionsCall { + c := &RoutersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RoutersAggregatedListCall) Filter(filter string) *RoutersAggregatedListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RoutersAggregatedListCall) MaxResults(maxResults int64) *RoutersAggregatedListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RoutersAggregatedListCall) OrderBy(orderBy string) *RoutersAggregatedListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RoutersAggregatedListCall) PageToken(pageToken string) *RoutersAggregatedListCall { - c.urlParams_.Set("pageToken", pageToken) + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersAggregatedListCall) Fields(s ...googleapi.Field) *RoutersAggregatedListCall { +func (c *RoutersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutersTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RoutersAggregatedListCall) IfNoneMatch(entityTag string) *RoutersAggregatedListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersAggregatedListCall) Context(ctx context.Context) *RoutersAggregatedListCall { +func (c *RoutersTestIamPermissionsCall) Context(ctx context.Context) *RoutersTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersAggregatedListCall) Header() http.Header { +func (c *RoutersTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/routers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "region": c.region, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.aggregatedList" call. -// Exactly one of *RouterAggregatedList or error will be non-nil. Any +// Do executes the "compute.routers.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *RouterAggregatedList.ServerResponse.Header or (if a response was +// *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAggregatedList, error) { +func (c *RoutersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59704,7 +72140,7 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RouterAggregatedList{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -59716,47 +72152,43 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg } return ret, nil // { - // "description": "Retrieves an aggregated list of routers.", - // "httpMethod": "GET", - // "id": "compute.routers.aggregatedList", + // "description": "Returns permissions that a caller has on the specified resource.", + // "httpMethod": "POST", + // "id": "compute.routers.testIamPermissions", // "parameterOrder": [ - // "project" + // "project", + // "region", + // "resource" // ], // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, // "type": "string" // }, - // "project": { - // "description": "Project ID for this request.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/aggregated/routers", + // "path": "{project}/regions/{region}/routers/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, // "response": { - // "$ref": "RouterAggregatedList" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -59767,45 +72199,27 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RoutersAggregatedListCall) Pages(ctx context.Context, f func(*RouterAggregatedList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.routers.delete": +// method id "compute.routers.update": -type RoutersDeleteCall struct { +type RoutersUpdateCall struct { s *Service project string region string router string + router2 *Router urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Delete: Deletes the specified Router resource. -func (r *RoutersService) Delete(project string, region string, router string) *RoutersDeleteCall { - c := &RoutersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Update: Updates the specified Router resource with the data included +// in the request. +func (r *RoutersService) Update(project string, region string, router string, router2 *Router) *RoutersUpdateCall { + c := &RoutersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.router = router + c.router2 = router2 return c } @@ -59823,7 +72237,7 @@ func (r *RoutersService) Delete(project string, region string, router string) *R // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RoutersDeleteCall) RequestId(requestId string) *RoutersDeleteCall { +func (c *RoutersUpdateCall) RequestId(requestId string) *RoutersUpdateCall { c.urlParams_.Set("requestId", requestId) return c } @@ -59831,7 +72245,7 @@ func (c *RoutersDeleteCall) RequestId(requestId string) *RoutersDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersDeleteCall) Fields(s ...googleapi.Field) *RoutersDeleteCall { +func (c *RoutersUpdateCall) Fields(s ...googleapi.Field) *RoutersUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -59839,31 +72253,36 @@ func (c *RoutersDeleteCall) Fields(s ...googleapi.Field) *RoutersDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersDeleteCall) Context(ctx context.Context) *RoutersDeleteCall { +func (c *RoutersUpdateCall) Context(ctx context.Context) *RoutersUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersDeleteCall) Header() http.Header { +func (c *RoutersUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("PUT", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, @@ -59873,14 +72292,14 @@ func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.delete" call. +// Do executes the "compute.routers.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -59911,9 +72330,9 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified Router resource.", - // "httpMethod": "DELETE", - // "id": "compute.routers.delete", + // "description": "Updates the specified Router resource with the data included in the request.", + // "httpMethod": "PUT", + // "id": "compute.routers.update", // "parameterOrder": [ // "project", // "region", @@ -59940,7 +72359,7 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "type": "string" // }, // "router": { - // "description": "Name of the Router resource to delete.", + // "description": "Name of the Router resource to update.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -59948,6 +72367,9 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // } // }, // "path": "{project}/regions/{region}/routers/{router}", + // "request": { + // "$ref": "Router" + // }, // "response": { // "$ref": "Operation" // }, @@ -59959,95 +72381,97 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.routers.get": - -type RoutersGetCall struct { - s *Service - project string - region string - router string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// method id "compute.routes.delete": + +type RoutesDeleteCall struct { + s *Service + project string + route string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified Route resource. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/delete +func (r *RoutesService) Delete(project string, route string) *RoutesDeleteCall { + c := &RoutesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.route = route + return c } -// Get: Returns the specified Router resource. Get a list of available -// routers by making a list() request. -func (r *RoutersService) Get(project string, region string, router string) *RoutersGetCall { - c := &RoutersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.router = router +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RoutesDeleteCall) RequestId(requestId string) *RoutesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersGetCall) Fields(s ...googleapi.Field) *RoutersGetCall { +func (c *RoutesDeleteCall) Fields(s ...googleapi.Field) *RoutesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RoutersGetCall) IfNoneMatch(entityTag string) *RoutersGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersGetCall) Context(ctx context.Context) *RoutersGetCall { +func (c *RoutesDeleteCall) Context(ctx context.Context) *RoutesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersGetCall) Header() http.Header { +func (c *RoutesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, - "router": c.router, + "route": c.route, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.get" call. -// Exactly one of *Router or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Router.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { +// Do executes the "compute.routes.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60066,7 +72490,7 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Router{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -60078,13 +72502,12 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { } return ret, nil // { - // "description": "Returns the specified Router resource. Get a list of available routers by making a list() request.", - // "httpMethod": "GET", - // "id": "compute.routers.get", + // "description": "Deletes the specified Route resource.", + // "httpMethod": "DELETE", + // "id": "compute.routes.delete", // "parameterOrder": [ // "project", - // "region", - // "router" + // "route" // ], // "parameters": { // "project": { @@ -60094,61 +72517,57 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, - // "router": { - // "description": "Name of the Router resource to return.", + // "route": { + // "description": "Name of the Route resource to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{router}", + // "path": "{project}/global/routes/{route}", // "response": { - // "$ref": "Router" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.routers.getRouterStatus": +// method id "compute.routes.get": -type RoutersGetRouterStatusCall struct { +type RoutesGetCall struct { s *Service project string - region string - router string + route string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetRouterStatus: Retrieves runtime information of the specified -// router. -func (r *RoutersService) GetRouterStatus(project string, region string, router string) *RoutersGetRouterStatusCall { - c := &RoutersGetRouterStatusCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: Returns the specified Route resource. Get a list of available +// routes by making a list() request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/get +func (r *RoutesService) Get(project string, route string) *RoutesGetCall { + c := &RoutesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.router = router + c.route = route return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersGetRouterStatusCall) Fields(s ...googleapi.Field) *RoutersGetRouterStatusCall { +func (c *RoutesGetCall) Fields(s ...googleapi.Field) *RoutesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60158,7 +72577,7 @@ func (c *RoutersGetRouterStatusCall) Fields(s ...googleapi.Field) *RoutersGetRou // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RoutersGetRouterStatusCall) IfNoneMatch(entityTag string) *RoutersGetRouterStatusCall { +func (c *RoutesGetCall) IfNoneMatch(entityTag string) *RoutesGetCall { c.ifNoneMatch_ = entityTag return c } @@ -60166,21 +72585,21 @@ func (c *RoutersGetRouterStatusCall) IfNoneMatch(entityTag string) *RoutersGetRo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersGetRouterStatusCall) Context(ctx context.Context) *RoutersGetRouterStatusCall { +func (c *RoutesGetCall) Context(ctx context.Context) *RoutesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersGetRouterStatusCall) Header() http.Header { +func (c *RoutesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -60191,26 +72610,25 @@ func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, erro } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/getRouterStatus") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, - "router": c.router, + "route": c.route, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.getRouterStatus" call. -// Exactly one of *RouterStatusResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *RouterStatusResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterStatusResponse, error) { +// Do executes the "compute.routes.get" call. +// Exactly one of *Route or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Route.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60229,7 +72647,7 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RouterStatusResponse{ + ret := &Route{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -60241,13 +72659,12 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt } return ret, nil // { - // "description": "Retrieves runtime information of the specified router.", + // "description": "Returns the specified Route resource. Get a list of available routes by making a list() request.", // "httpMethod": "GET", - // "id": "compute.routers.getRouterStatus", + // "id": "compute.routes.get", // "parameterOrder": [ // "project", - // "region", - // "router" + // "route" // ], // "parameters": { // "project": { @@ -60257,24 +72674,17 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to query.", + // "route": { + // "description": "Name of the Route resource to return.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{router}/getRouterStatus", + // "path": "{project}/global/routes/{route}", // "response": { - // "$ref": "RouterStatusResponse" + // "$ref": "Route" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -60285,25 +72695,24 @@ func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterSt } -// method id "compute.routers.insert": +// method id "compute.routes.insert": -type RoutersInsertCall struct { +type RoutesInsertCall struct { s *Service project string - region string - router *Router + route *Route urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a Router resource in the specified project and region -// using the data included in the request. -func (r *RoutersService) Insert(project string, region string, router *Router) *RoutersInsertCall { - c := &RoutersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a Route resource in the specified project using the +// data included in the request. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/insert +func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall { + c := &RoutesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.router = router + c.route = route return c } @@ -60321,7 +72730,7 @@ func (r *RoutersService) Insert(project string, region string, router *Router) * // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RoutersInsertCall) RequestId(requestId string) *RoutersInsertCall { +func (c *RoutesInsertCall) RequestId(requestId string) *RoutesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -60329,7 +72738,7 @@ func (c *RoutersInsertCall) RequestId(requestId string) *RoutersInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersInsertCall) Fields(s ...googleapi.Field) *RoutersInsertCall { +func (c *RoutesInsertCall) Fields(s ...googleapi.Field) *RoutesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60337,52 +72746,51 @@ func (c *RoutersInsertCall) Fields(s ...googleapi.Field) *RoutersInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersInsertCall) Context(ctx context.Context) *RoutersInsertCall { +func (c *RoutesInsertCall) Context(ctx context.Context) *RoutesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersInsertCall) Header() http.Header { +func (c *RoutesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.route) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.insert" call. +// Do executes the "compute.routes.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60413,12 +72821,11 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates a Router resource in the specified project and region using the data included in the request.", + // "description": "Creates a Route resource in the specified project using the data included in the request.", // "httpMethod": "POST", - // "id": "compute.routers.insert", + // "id": "compute.routes.insert", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "project": { @@ -60428,22 +72835,15 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, // "requestId": { // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers", + // "path": "{project}/global/routes", // "request": { - // "$ref": "Router" + // "$ref": "Route" // }, // "response": { // "$ref": "Operation" @@ -60456,24 +72856,23 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.routers.list": +// method id "compute.routes.list": -type RoutersListCall struct { +type RoutesListCall struct { s *Service project string - region string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of Router resources available to the specified -// project. -func (r *RoutersService) List(project string, region string) *RoutersListCall { - c := &RoutersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: Retrieves the list of Route resources available to the +// specified project. +// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/list +func (r *RoutesService) List(project string) *RoutesListCall { + c := &RoutesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region return c } @@ -60503,7 +72902,7 @@ func (r *RoutersService) List(project string, region string) *RoutersListCall { // true) (zone eq us-central1-f). Multiple expressions are treated as // AND expressions, meaning that resources must match all expressions to // pass the filters. -func (c *RoutersListCall) Filter(filter string) *RoutersListCall { +func (c *RoutesListCall) Filter(filter string) *RoutesListCall { c.urlParams_.Set("filter", filter) return c } @@ -60514,7 +72913,7 @@ func (c *RoutersListCall) Filter(filter string) *RoutersListCall { // nextPageToken that can be used to get the next page of results in // subsequent list requests. Acceptable values are 0 to 500, inclusive. // (Default: 500) -func (c *RoutersListCall) MaxResults(maxResults int64) *RoutersListCall { +func (c *RoutesListCall) MaxResults(maxResults int64) *RoutesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -60531,7 +72930,7 @@ func (c *RoutersListCall) MaxResults(maxResults int64) *RoutersListCall { // // Currently, only sorting by name or creationTimestamp desc is // supported. -func (c *RoutersListCall) OrderBy(orderBy string) *RoutersListCall { +func (c *RoutesListCall) OrderBy(orderBy string) *RoutesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -60539,7 +72938,7 @@ func (c *RoutersListCall) OrderBy(orderBy string) *RoutersListCall { // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set pageToken to the nextPageToken returned by a // previous list request to get the next page of results. -func (c *RoutersListCall) PageToken(pageToken string) *RoutersListCall { +func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -60547,7 +72946,7 @@ func (c *RoutersListCall) PageToken(pageToken string) *RoutersListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersListCall) Fields(s ...googleapi.Field) *RoutersListCall { +func (c *RoutesListCall) Fields(s ...googleapi.Field) *RoutesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60557,7 +72956,7 @@ func (c *RoutersListCall) Fields(s ...googleapi.Field) *RoutersListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RoutersListCall) IfNoneMatch(entityTag string) *RoutersListCall { +func (c *RoutesListCall) IfNoneMatch(entityTag string) *RoutesListCall { c.ifNoneMatch_ = entityTag return c } @@ -60565,21 +72964,21 @@ func (c *RoutersListCall) IfNoneMatch(entityTag string) *RoutersListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersListCall) Context(ctx context.Context) *RoutersListCall { +func (c *RoutesListCall) Context(ctx context.Context) *RoutesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersListCall) Header() http.Header { +func (c *RoutesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -60590,25 +72989,24 @@ func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.list" call. -// Exactly one of *RouterList or error will be non-nil. Any non-2xx +// Do executes the "compute.routes.list" call. +// Exactly one of *RouteList or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *RouterList.ServerResponse.Header or (if a response was returned at +// *RouteList.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) { +func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -60627,7 +73025,7 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RouterList{ + ret := &RouteList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -60639,12 +73037,11 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) } return ret, nil // { - // "description": "Retrieves a list of Router resources available to the specified project.", + // "description": "Retrieves the list of Route resources available to the specified project.", // "httpMethod": "GET", - // "id": "compute.routers.list", + // "id": "compute.routes.list", // "parameterOrder": [ - // "project", - // "region" + // "project" // ], // "parameters": { // "filter": { @@ -60676,18 +73073,11 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers", + // "path": "{project}/global/routes", // "response": { - // "$ref": "RouterList" + // "$ref": "RouteList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -60701,7 +73091,7 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RoutersListCall) Pages(ctx context.Context, f func(*RouterList) error) error { +func (c *RoutesListCall) Pages(ctx context.Context, f func(*RouteList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -60719,219 +73109,32 @@ func (c *RoutersListCall) Pages(ctx context.Context, f func(*RouterList) error) } } -// method id "compute.routers.patch": - -type RoutersPatchCall struct { - s *Service - project string - region string - router string - router2 *Router - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Patch: Patches the specified Router resource with the data included -// in the request. This method supports PATCH semantics and uses JSON -// merge patch format and processing rules. -func (r *RoutersService) Patch(project string, region string, router string, router2 *Router) *RoutersPatchCall { - c := &RoutersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.router = router - c.router2 = router2 - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RoutersPatchCall) RequestId(requestId string) *RoutersPatchCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutersPatchCall) Fields(s ...googleapi.Field) *RoutersPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutersPatchCall) Context(ctx context.Context) *RoutersPatchCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RoutersPatchCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "router": c.router, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routers.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Patches the specified Router resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", - // "httpMethod": "PATCH", - // "id": "compute.routers.patch", - // "parameterOrder": [ - // "project", - // "region", - // "router" - // ], - // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "{project}/regions/{region}/routers/{router}", - // "request": { - // "$ref": "Router" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.routers.preview": +// method id "compute.routes.testIamPermissions": -type RoutersPreviewCall struct { - s *Service - project string - region string - router string - router2 *Router - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RoutesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Preview: Preview fields auto-generated during router create and -// update operations. Calling this method does NOT create or update the -// router. -func (r *RoutersService) Preview(project string, region string, router string, router2 *Router) *RoutersPreviewCall { - c := &RoutersPreviewCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +func (r *RoutesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *RoutesTestIamPermissionsCall { + c := &RoutesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.router = router - c.router2 = router2 + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersPreviewCall) Fields(s ...googleapi.Field) *RoutersPreviewCall { +func (c *RoutesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -60939,53 +73142,52 @@ func (c *RoutersPreviewCall) Fields(s ...googleapi.Field) *RoutersPreviewCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersPreviewCall) Context(ctx context.Context) *RoutersPreviewCall { +func (c *RoutesTestIamPermissionsCall) Context(ctx context.Context) *RoutesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersPreviewCall) Header() http.Header { +func (c *RoutesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersPreviewCall) doRequest(alt string) (*http.Response, error) { +func (c *RoutesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/preview") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "router": c.router, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.preview" call. -// Exactly one of *RoutersPreviewResponse or error will be non-nil. Any +// Do executes the "compute.routes.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *RoutersPreviewResponse.ServerResponse.Header or (if a response was +// *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewResponse, error) { +func (c *RoutesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61004,7 +73206,7 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &RoutersPreviewResponse{ + ret := &TestPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -61016,13 +73218,12 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe } return ret, nil // { - // "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", + // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.routers.preview", + // "id": "compute.routes.testIamPermissions", // "parameterOrder": [ // "project", - // "region", - // "router" + // "resource" // ], // "parameters": { // "project": { @@ -61032,27 +73233,20 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to query.", + // "resource": { + // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{router}/preview", + // "path": "{project}/global/routes/{resource}/testIamPermissions", // "request": { - // "$ref": "Router" + // "$ref": "TestPermissionsRequest" // }, // "response": { - // "$ref": "RoutersPreviewResponse" + // "$ref": "TestPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -61063,34 +73257,48 @@ func (c *RoutersPreviewCall) Do(opts ...googleapi.CallOption) (*RoutersPreviewRe } -// method id "compute.routers.testIamPermissions": +// method id "compute.securityPolicies.delete": -type RoutersTestIamPermissionsCall struct { - s *Service - project string - region string - resource string - testpermissionsrequest *TestPermissionsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SecurityPoliciesDeleteCall struct { + s *Service + project string + securityPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// TestIamPermissions: Returns permissions that a caller has on the -// specified resource. -func (r *RoutersService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RoutersTestIamPermissionsCall { - c := &RoutersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Delete: Deletes the specified policy. +func (r *SecurityPoliciesService) Delete(project string, securityPolicy string) *SecurityPoliciesDeleteCall { + c := &SecurityPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.resource = resource - c.testpermissionsrequest = testpermissionsrequest + c.securityPolicy = securityPolicy + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SecurityPoliciesDeleteCall) RequestId(requestId string) *SecurityPoliciesDeleteCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutersTestIamPermissionsCall { +func (c *SecurityPoliciesDeleteCall) Fields(s ...googleapi.Field) *SecurityPoliciesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61098,53 +73306,47 @@ func (c *RoutersTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutersTes // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersTestIamPermissionsCall) Context(ctx context.Context) *RoutersTestIamPermissionsCall { +func (c *SecurityPoliciesDeleteCall) Context(ctx context.Context) *SecurityPoliciesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersTestIamPermissionsCall) Header() http.Header { +func (c *SecurityPoliciesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("DELETE", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "resource": c.resource, + "project": c.project, + "securityPolicy": c.securityPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.testIamPermissions" call. -// Exactly one of *TestPermissionsResponse or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TestPermissionsResponse.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RoutersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +// Do executes the "compute.securityPolicies.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SecurityPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61163,7 +73365,7 @@ func (c *RoutersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestP if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &TestPermissionsResponse{ + ret := &Operation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -61175,13 +73377,12 @@ func (c *RoutersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestP } return ret, nil // { - // "description": "Returns permissions that a caller has on the specified resource.", - // "httpMethod": "POST", - // "id": "compute.routers.testIamPermissions", + // "description": "Deletes the specified policy.", + // "httpMethod": "DELETE", + // "id": "compute.securityPolicies.delete", // "parameterOrder": [ // "project", - // "region", - // "resource" + // "securityPolicy" // ], // "parameters": { // "project": { @@ -61191,138 +73392,117 @@ func (c *RoutersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestP // "required": true, // "type": "string" // }, - // "region": { - // "description": "The name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", // "type": "string" // }, - // "resource": { - // "description": "Name of the resource for this request.", + // "securityPolicy": { + // "description": "Name of the security policy to delete.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{resource}/testIamPermissions", - // "request": { - // "$ref": "TestPermissionsRequest" - // }, + // "path": "{project}/global/securityPolicies/{securityPolicy}", // "response": { - // "$ref": "TestPermissionsResponse" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// method id "compute.routers.update": +// method id "compute.securityPolicies.get": -type RoutersUpdateCall struct { - s *Service - project string - region string - router string - router2 *Router - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SecurityPoliciesGetCall struct { + s *Service + project string + securityPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates the specified Router resource with the data included -// in the request. -func (r *RoutersService) Update(project string, region string, router string, router2 *Router) *RoutersUpdateCall { - c := &RoutersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Get: List all of the ordered rules present in a single specified +// policy. +func (r *SecurityPoliciesService) Get(project string, securityPolicy string) *SecurityPoliciesGetCall { + c := &SecurityPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.region = region - c.router = router - c.router2 = router2 - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. -// -// For example, consider a situation where you make an initial request -// and the request times out. If you make the request again with the -// same request ID, the server can check if original operation with the -// same request ID was received, and if so, will ignore the second -// request. This prevents clients from accidentally creating duplicate -// commitments. -// -// The request ID must be a valid UUID with the exception that zero UUID -// is not supported (00000000-0000-0000-0000-000000000000). -func (c *RoutersUpdateCall) RequestId(requestId string) *RoutersUpdateCall { - c.urlParams_.Set("requestId", requestId) + c.securityPolicy = securityPolicy return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutersUpdateCall) Fields(s ...googleapi.Field) *RoutersUpdateCall { +func (c *SecurityPoliciesGetCall) Fields(s ...googleapi.Field) *SecurityPoliciesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SecurityPoliciesGetCall) IfNoneMatch(entityTag string) *SecurityPoliciesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutersUpdateCall) Context(ctx context.Context) *RoutersUpdateCall { +func (c *SecurityPoliciesGetCall) Context(ctx context.Context) *SecurityPoliciesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutersUpdateCall) Header() http.Header { +func (c *SecurityPoliciesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) + req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "router": c.router, + "project": c.project, + "securityPolicy": c.securityPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routers.update" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx +// Do executes the "compute.securityPolicies.get" call. +// Exactly one of *SecurityPolicy or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// *SecurityPolicy.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*SecurityPolicy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61341,7 +73521,7 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Operation{ + ret := &SecurityPolicy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -61353,13 +73533,12 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Updates the specified Router resource with the data included in the request.", - // "httpMethod": "PUT", - // "id": "compute.routers.update", + // "description": "List all of the ordered rules present in a single specified policy.", + // "httpMethod": "GET", + // "id": "compute.securityPolicies.get", // "parameterOrder": [ // "project", - // "region", - // "router" + // "securityPolicy" // ], // "parameters": { // "project": { @@ -61369,58 +73548,44 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // }, - // "router": { - // "description": "Name of the Router resource to update.", + // "securityPolicy": { + // "description": "Name of the security policy to get.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/regions/{region}/routers/{router}", - // "request": { - // "$ref": "Router" - // }, + // "path": "{project}/global/securityPolicies/{securityPolicy}", // "response": { - // "$ref": "Operation" + // "$ref": "SecurityPolicy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.routes.delete": +// method id "compute.securityPolicies.insert": -type RoutesDeleteCall struct { - s *Service - project string - route string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SecurityPoliciesInsertCall struct { + s *Service + project string + securitypolicy *SecurityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Route resource. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/delete -func (r *RoutesService) Delete(project string, route string) *RoutesDeleteCall { - c := &RoutesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Insert: Creates a new policy in the specified project using the data +// included in the request. +func (r *SecurityPoliciesService) Insert(project string, securitypolicy *SecurityPolicy) *SecurityPoliciesInsertCall { + c := &SecurityPoliciesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.route = route + c.securitypolicy = securitypolicy return c } @@ -61438,7 +73603,7 @@ func (r *RoutesService) Delete(project string, route string) *RoutesDeleteCall { // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RoutesDeleteCall) RequestId(requestId string) *RoutesDeleteCall { +func (c *SecurityPoliciesInsertCall) RequestId(requestId string) *SecurityPoliciesInsertCall { c.urlParams_.Set("requestId", requestId) return c } @@ -61446,7 +73611,7 @@ func (c *RoutesDeleteCall) RequestId(requestId string) *RoutesDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutesDeleteCall) Fields(s ...googleapi.Field) *RoutesDeleteCall { +func (c *SecurityPoliciesInsertCall) Fields(s ...googleapi.Field) *SecurityPoliciesInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61454,47 +73619,51 @@ func (c *RoutesDeleteCall) Fields(s ...googleapi.Field) *RoutesDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutesDeleteCall) Context(ctx context.Context) *RoutesDeleteCall { +func (c *SecurityPoliciesInsertCall) Context(ctx context.Context) *SecurityPoliciesInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutesDeleteCall) Header() http.Header { +func (c *SecurityPoliciesInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) + req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "route": c.route, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routes.delete" call. +// Do executes the "compute.securityPolicies.insert" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61525,12 +73694,11 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Deletes the specified Route resource.", - // "httpMethod": "DELETE", - // "id": "compute.routes.delete", + // "description": "Creates a new policy in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.securityPolicies.insert", // "parameterOrder": [ - // "project", - // "route" + // "project" // ], // "parameters": { // "project": { @@ -61544,16 +73712,12 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // }, - // "route": { - // "description": "Name of the Route resource to delete.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/global/routes/{route}", + // "path": "{project}/global/securityPolicies", + // "request": { + // "$ref": "SecurityPolicy" + // }, // "response": { // "$ref": "Operation" // }, @@ -61565,32 +73729,96 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) } -// method id "compute.routes.get": +// method id "compute.securityPolicies.list": -type RoutesGetCall struct { +type SecurityPoliciesListCall struct { s *Service project string - route string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified Route resource. Get a list of available -// routes by making a list() request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/get -func (r *RoutesService) Get(project string, route string) *RoutesGetCall { - c := &RoutesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// List: List all the policies that have been configured for the +// specified project. +func (r *SecurityPoliciesService) List(project string) *SecurityPoliciesListCall { + c := &SecurityPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.route = route + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *SecurityPoliciesListCall) Filter(filter string) *SecurityPoliciesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *SecurityPoliciesListCall) MaxResults(maxResults int64) *SecurityPoliciesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *SecurityPoliciesListCall) OrderBy(orderBy string) *SecurityPoliciesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *SecurityPoliciesListCall) PageToken(pageToken string) *SecurityPoliciesListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutesGetCall) Fields(s ...googleapi.Field) *RoutesGetCall { +func (c *SecurityPoliciesListCall) Fields(s ...googleapi.Field) *SecurityPoliciesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61600,7 +73828,7 @@ func (c *RoutesGetCall) Fields(s ...googleapi.Field) *RoutesGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *RoutesGetCall) IfNoneMatch(entityTag string) *RoutesGetCall { +func (c *SecurityPoliciesListCall) IfNoneMatch(entityTag string) *SecurityPoliciesListCall { c.ifNoneMatch_ = entityTag return c } @@ -61608,21 +73836,21 @@ func (c *RoutesGetCall) IfNoneMatch(entityTag string) *RoutesGetCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutesGetCall) Context(ctx context.Context) *RoutesGetCall { +func (c *SecurityPoliciesListCall) Context(ctx context.Context) *SecurityPoliciesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutesGetCall) Header() http.Header { +func (c *SecurityPoliciesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -61633,25 +73861,24 @@ func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { } var body io.Reader = nil c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, - "route": c.route, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routes.get" call. -// Exactly one of *Route or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Route.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { +// Do executes the "compute.securityPolicies.list" call. +// Exactly one of *SecurityPolicyList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SecurityPolicyList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61670,7 +73897,7 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - ret := &Route{ + ret := &SecurityPolicyList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -61682,32 +73909,47 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { } return ret, nil // { - // "description": "Returns the specified Route resource. Get a list of available routes by making a list() request.", + // "description": "List all the policies that have been configured for the specified project.", // "httpMethod": "GET", - // "id": "compute.routes.get", + // "id": "compute.securityPolicies.list", // "parameterOrder": [ - // "project", - // "route" + // "project" // ], // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" - // }, - // "route": { - // "description": "Name of the Route resource to return.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" // } // }, - // "path": "{project}/global/routes/{route}", + // "path": "{project}/global/securityPolicies", // "response": { - // "$ref": "Route" + // "$ref": "SecurityPolicyList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -61718,24 +73960,46 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { } -// method id "compute.routes.insert": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *SecurityPoliciesListCall) Pages(ctx context.Context, f func(*SecurityPolicyList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type RoutesInsertCall struct { - s *Service - project string - route *Route - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.securityPolicies.patch": + +type SecurityPoliciesPatchCall struct { + s *Service + project string + securityPolicy string + securitypolicy *SecurityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a Route resource in the specified project using the -// data included in the request. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/insert -func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall { - c := &RoutesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// Patch: Patches the specified policy with the data included in the +// request. +func (r *SecurityPoliciesService) Patch(project string, securityPolicy string, securitypolicy *SecurityPolicy) *SecurityPoliciesPatchCall { + c := &SecurityPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.route = route + c.securityPolicy = securityPolicy + c.securitypolicy = securitypolicy return c } @@ -61753,7 +74017,7 @@ func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall { // // The request ID must be a valid UUID with the exception that zero UUID // is not supported (00000000-0000-0000-0000-000000000000). -func (c *RoutesInsertCall) RequestId(requestId string) *RoutesInsertCall { +func (c *SecurityPoliciesPatchCall) RequestId(requestId string) *SecurityPoliciesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -61761,7 +74025,7 @@ func (c *RoutesInsertCall) RequestId(requestId string) *RoutesInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutesInsertCall) Fields(s ...googleapi.Field) *RoutesInsertCall { +func (c *SecurityPoliciesPatchCall) Fields(s ...googleapi.Field) *SecurityPoliciesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -61769,51 +74033,52 @@ func (c *RoutesInsertCall) Fields(s ...googleapi.Field) *RoutesInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutesInsertCall) Context(ctx context.Context) *RoutesInsertCall { +func (c *SecurityPoliciesPatchCall) Context(ctx context.Context) *SecurityPoliciesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutesInsertCall) Header() http.Header { +func (c *SecurityPoliciesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.route) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{securityPolicy}") urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) + req, _ := http.NewRequest("PATCH", urls, body) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, + "project": c.project, + "securityPolicy": c.securityPolicy, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routes.insert" call. +// Do executes the "compute.securityPolicies.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -61844,11 +74109,12 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Creates a Route resource in the specified project using the data included in the request.", - // "httpMethod": "POST", - // "id": "compute.routes.insert", + // "description": "Patches the specified policy with the data included in the request.", + // "httpMethod": "PATCH", + // "id": "compute.securityPolicies.patch", // "parameterOrder": [ - // "project" + // "project", + // "securityPolicy" // ], // "parameters": { // "project": { @@ -61862,279 +74128,33 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" - // } - // }, - // "path": "{project}/global/routes", - // "request": { - // "$ref": "Route" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.routes.list": - -type RoutesListCall struct { - s *Service - project string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves the list of Route resources available to the -// specified project. -// For details, see https://cloud.google.com/compute/docs/reference/latest/routes/list -func (r *RoutesService) List(project string) *RoutesListCall { - c := &RoutesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - return c -} - -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. -// -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. -// -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. -// -// To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. -func (c *RoutesListCall) Filter(filter string) *RoutesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than maxResults, Compute Engine returns a -// nextPageToken that can be used to get the next page of results in -// subsequent list requests. Acceptable values are 0 to 500, inclusive. -// (Default: 500) -func (c *RoutesListCall) MaxResults(maxResults int64) *RoutesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. -// -// You can also sort results in descending order based on the creation -// timestamp using orderBy="creationTimestamp desc". This sorts results -// based on the creationTimestamp field in reverse chronological order -// (newest result first). Use this to sort resources like operations so -// that the newest operation is returned first. -// -// Currently, only sorting by name or creationTimestamp desc is -// supported. -func (c *RoutesListCall) OrderBy(orderBy string) *RoutesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set pageToken to the nextPageToken returned by a -// previous list request to get the next page of results. -func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RoutesListCall) Fields(s ...googleapi.Field) *RoutesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RoutesListCall) IfNoneMatch(entityTag string) *RoutesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RoutesListCall) Context(ctx context.Context) *RoutesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RoutesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.routes.list" call. -// Exactly one of *RouteList or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *RouteList.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &RouteList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := json.NewDecoder(res.Body).Decode(target); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves the list of Route resources available to the specified project.", - // "httpMethod": "GET", - // "id": "compute.routes.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", - // "location": "query", - // "type": "string" // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", + // "securityPolicy": { + // "description": "Name of the security policy to update.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/routes", + // "path": "{project}/global/securityPolicies/{securityPolicy}", + // "request": { + // "$ref": "SecurityPolicy" + // }, // "response": { - // "$ref": "RouteList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *RoutesListCall) Pages(ctx context.Context, f func(*RouteList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "compute.routes.testIamPermissions": +// method id "compute.securityPolicies.testIamPermissions": -type RoutesTestIamPermissionsCall struct { +type SecurityPoliciesTestIamPermissionsCall struct { s *Service project string resource string @@ -62146,8 +74166,8 @@ type RoutesTestIamPermissionsCall struct { // TestIamPermissions: Returns permissions that a caller has on the // specified resource. -func (r *RoutesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *RoutesTestIamPermissionsCall { - c := &RoutesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *SecurityPoliciesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *SecurityPoliciesTestIamPermissionsCall { + c := &SecurityPoliciesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.resource = resource c.testpermissionsrequest = testpermissionsrequest @@ -62157,7 +74177,7 @@ func (r *RoutesService) TestIamPermissions(project string, resource string, test // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RoutesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutesTestIamPermissionsCall { +func (c *SecurityPoliciesTestIamPermissionsCall) Fields(s ...googleapi.Field) *SecurityPoliciesTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -62165,21 +74185,21 @@ func (c *RoutesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RoutesTestI // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RoutesTestIamPermissionsCall) Context(ctx context.Context) *RoutesTestIamPermissionsCall { +func (c *SecurityPoliciesTestIamPermissionsCall) Context(ctx context.Context) *SecurityPoliciesTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RoutesTestIamPermissionsCall) Header() http.Header { +func (c *SecurityPoliciesTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RoutesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *SecurityPoliciesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v @@ -62192,7 +74212,7 @@ func (c *RoutesTestIamPermissionsCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{resource}/testIamPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/securityPolicies/{resource}/testIamPermissions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -62203,14 +74223,14 @@ func (c *RoutesTestIamPermissionsCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.routes.testIamPermissions" call. +// Do executes the "compute.securityPolicies.testIamPermissions" call. // Exactly one of *TestPermissionsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *TestPermissionsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *RoutesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { +func (c *SecurityPoliciesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -62243,7 +74263,7 @@ func (c *RoutesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe // { // "description": "Returns permissions that a caller has on the specified resource.", // "httpMethod": "POST", - // "id": "compute.routes.testIamPermissions", + // "id": "compute.securityPolicies.testIamPermissions", // "parameterOrder": [ // "project", // "resource" @@ -62259,12 +74279,12 @@ func (c *RoutesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPe // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "{project}/global/routes/{resource}/testIamPermissions", + // "path": "{project}/global/securityPolicies/{resource}/testIamPermissions", // "request": { // "$ref": "TestPermissionsRequest" // }, @@ -63127,7 +75147,7 @@ func (c *SnapshotsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*Tes // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } @@ -63999,7 +76019,7 @@ func (c *SslCertificatesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } @@ -66910,7 +78930,7 @@ func (c *TargetHttpProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOptio // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } @@ -68122,7 +80142,7 @@ func (c *TargetHttpsProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOpti // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } @@ -72953,7 +84973,7 @@ func (c *TargetSslProxiesTestIamPermissionsCall) Do(opts ...googleapi.CallOption // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } @@ -76282,8 +88302,7 @@ func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -76416,7 +88435,7 @@ func (c *UrlMapsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestP // "resource": { // "description": "Name of the resource for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?", + // "pattern": "(?:[-a-z0-9_]{0,62}[a-z0-9])?", // "required": true, // "type": "string" // } diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 9b169bb56d52..95ca7af201a7 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/dXJ6H81YhcA1IB55do9QrvoIsEc\"", + "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/bvUbOBPnfuX4gDZ5aBr7PZU4ZJM\"", "discoveryVersion": "v1", "id": "compute:v1", "name": "compute", "version": "v1", - "revision": "20170721", + "revision": "20170905", "title": "Compute Engine API", "description": "Creates and runs virtual machines on Google Cloud Platform.", "ownerDomain": "google.com", @@ -591,7 +591,7 @@ }, "index": { "type": "integer", - "description": "Assigns a zero-based index to this disk, where 0 is reserved for the boot disk. For example, if you have many disks attached to an instance, each disk would have a unique index number. If not specified, the server will choose an appropriate value.", + "description": "[Output Only] A zero-based index to this disk, where 0 is reserved for the boot disk. If you have many disks attached to an instance, each disk would have a unique index number.", "format": "int32" }, "initializeParams": { @@ -1216,7 +1216,7 @@ }, "healthChecks": { "type": "array", - "description": "The list of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health checking this BackendService. Currently at most one health check can be specified, and a health check is required for GCE backend services. A health check must not be specified for GAE app backend and Cloud Function backend.\n\nFor internal load balancing, a URL to a HealthCheck resource must be specified instead.", + "description": "The list of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health checking this BackendService. Currently at most one health check can be specified, and a health check is required for Compute Engine backend services. A health check must not be specified for App Engine backend and Cloud Function backend.\n\nFor internal load balancing, a URL to a HealthCheck resource must be specified instead.", "items": { "type": "string" } @@ -1236,6 +1236,7 @@ }, "loadBalancingScheme": { "type": "string", + "description": "Indicates whether the backend service will be used with internal or external load balancing. A backend service created for one type of load balancing cannot be used with the other. Possible values are INTERNAL and EXTERNAL.", "enum": [ "EXTERNAL", "INTERNAL", @@ -2387,10 +2388,49 @@ "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." }, + "denied": { + "type": "array", + "description": "The list of DENY rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection.", + "items": { + "type": "object", + "properties": { + "IPProtocol": { + "type": "string", + "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), or the IP protocol number." + }, + "ports": { + "type": "array", + "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", + "items": { + "type": "string" + } + } + } + } + }, "description": { "type": "string", "description": "An optional description of this resource. Provide this property when you create the resource." }, + "destinationRanges": { + "type": "array", + "description": "If destination ranges are specified, the firewall will apply only to traffic that has destination IP address in these ranges. These ranges must be expressed in CIDR format. Only IPv4 is supported.", + "items": { + "type": "string" + } + }, + "direction": { + "type": "string", + "description": "Direction of traffic to which this firewall applies; default is INGRESS. Note: For INGRESS traffic, it is NOT supported to specify destinationRanges; For EGRESS traffic, it is NOT supported to specify sourceRanges OR sourceTags.", + "enum": [ + "EGRESS", + "INGRESS" + ], + "enumDescriptions": [ + "", + "" + ] + }, "id": { "type": "string", "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -2416,6 +2456,11 @@ "type": "string", "description": "URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used:\nglobal/networks/default\nIf you choose to specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my-network \n- projects/myproject/global/networks/my-network \n- global/networks/default" }, + "priority": { + "type": "integer", + "description": "Priority for this rule. This is an integer between 0 and 65535, both inclusive. When not specified, the value assumed is 1000. Relative priorities determine precedence of conflicting rules. Lower value of priority implies higher precedence (eg, a rule with priority 0 has higher precedence than a rule with priority 1). DENY rules take precedence over ALLOW rules having equal priority.", + "format": "int32" + }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." @@ -3485,7 +3530,7 @@ }, "networkInterfaces": { "type": "array", - "description": "An array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet. Only one interface is supported per instance.", + "description": "An array of network configurations for this instance. These specify how interfaces are configured to interact with other network services, such as connecting to the internet. Multiple interfaces are supported per instance.", "items": { "$ref": "NetworkInterface" } @@ -5772,7 +5817,7 @@ }, "resources": { "type": "array", - "description": "Serive resources (a.k.a service projects) attached to this project as their shared VPC host.", + "description": "Service resources (a.k.a service projects) attached to this project as their shared VPC host.", "items": { "$ref": "XpnResourceId" } @@ -5822,6 +5867,7 @@ "LOCAL_SSD_TOTAL_GB", "NETWORKS", "NVIDIA_K80_GPUS", + "NVIDIA_P100_GPUS", "PREEMPTIBLE_CPUS", "PREEMPTIBLE_LOCAL_SSD_GB", "REGIONAL_AUTOSCALERS", @@ -5840,6 +5886,7 @@ "TARGET_INSTANCES", "TARGET_POOLS", "TARGET_SSL_PROXIES", + "TARGET_TCP_PROXIES", "TARGET_VPN_GATEWAYS", "URL_MAPS", "VPN_TUNNELS" @@ -5884,6 +5931,8 @@ "", "", "", + "", + "", "" ] }, @@ -9459,6 +9508,11 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -9535,6 +9589,11 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -9682,6 +9741,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "Name of the zone for this request.", @@ -9758,6 +9822,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "Name of the zone for this request.", @@ -9857,6 +9926,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "Name of the zone for this request.", @@ -9877,8 +9951,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "update": { @@ -9900,6 +9973,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "Name of the zone for this request.", @@ -9946,6 +10024,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -10006,6 +10089,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -10090,6 +10178,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -10104,8 +10197,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "update": { @@ -10127,6 +10219,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -10216,6 +10313,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -10312,6 +10414,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -10396,6 +10503,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -10410,8 +10522,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "update": { @@ -10433,6 +10544,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -10679,6 +10795,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -10722,6 +10843,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -10798,6 +10924,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "sourceImage": { "type": "string", "description": "Optional. Source image to restore onto a disk.", @@ -10903,6 +11034,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -10940,6 +11076,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "resource": { "type": "string", "description": "Name of the resource for this request.", @@ -10994,6 +11135,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -11054,6 +11200,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -11123,11 +11274,11 @@ "id": "compute.firewalls.patch", "path": "{project}/global/firewalls/{firewall}", "httpMethod": "PATCH", - "description": "Updates the specified firewall rule with the data included in the request. Using PUT method, can only update following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags. This method supports patch semantics.", + "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "parameters": { "firewall": { "type": "string", - "description": "Name of the firewall rule to update.", + "description": "Name of the firewall rule to patch.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -11138,6 +11289,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -11174,6 +11330,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -11270,6 +11431,11 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -11346,6 +11512,11 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -11446,6 +11617,11 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -11487,6 +11663,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -11547,6 +11728,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -11635,6 +11821,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -11695,6 +11886,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -11779,6 +11975,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -11985,6 +12186,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -12045,6 +12251,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -12129,6 +12340,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -12143,8 +12359,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "update": { @@ -12166,6 +12381,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -12206,6 +12426,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -12266,6 +12491,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -12350,6 +12580,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -12364,8 +12599,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "update": { @@ -12387,6 +12621,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -12427,6 +12666,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -12487,6 +12731,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -12571,6 +12820,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -12585,8 +12839,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "update": { @@ -12608,6 +12861,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -12648,6 +12906,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -12681,6 +12944,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -12783,6 +13051,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -12910,6 +13183,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone where the managed instance group is located.", @@ -13001,6 +13279,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone where the managed instance group is located.", @@ -13040,6 +13323,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone where the managed instance group is located.", @@ -13116,6 +13404,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone where you want to create the managed instance group.", @@ -13272,6 +13565,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone where the managed instance group is located.", @@ -13314,6 +13612,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "size": { "type": "integer", "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", @@ -13361,6 +13664,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone where the managed instance group is located.", @@ -13403,6 +13711,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone where the managed instance group is located.", @@ -13449,6 +13762,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone where the instance group is located.", @@ -13540,6 +13858,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone where the instance group is located.", @@ -13613,6 +13936,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone where you want to create the instance group.", @@ -13776,6 +14104,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone where the instance group is located.", @@ -13818,6 +14151,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone where the instance group is located.", @@ -13864,6 +14202,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -13924,6 +14267,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -14019,6 +14367,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -14113,6 +14466,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -14157,6 +14515,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -14210,6 +14573,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -14259,6 +14627,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -14393,9 +14766,14 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "zone": { + "requestId": { "type": "string", - "description": "The name of the zone for this request.", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -14493,6 +14871,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -14547,6 +14930,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -14590,6 +14978,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -14634,6 +15027,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -14678,6 +15076,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -14722,6 +15125,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -14766,6 +15174,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -14810,6 +15223,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -14854,6 +15272,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -14898,6 +15321,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -14939,6 +15367,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -14983,6 +15416,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "The name of the zone for this request.", @@ -15217,6 +15655,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -15253,6 +15696,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -15313,6 +15761,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -15397,6 +15850,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -15433,6 +15891,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -15463,6 +15926,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -15488,6 +15956,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -15516,6 +15989,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -15541,6 +16019,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -15711,6 +16194,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -15739,6 +16227,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -15767,6 +16260,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -15795,6 +16293,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -15844,6 +16347,11 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -15920,6 +16428,11 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -16019,6 +16532,11 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -16033,8 +16551,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "update": { @@ -16062,6 +16579,11 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -16109,6 +16631,11 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -16229,6 +16756,11 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -16329,6 +16861,11 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -16344,8 +16881,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "update": { @@ -16374,6 +16910,11 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -16506,6 +17047,11 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -16608,6 +17154,11 @@ "description": "Name of the region scoping this request.", "required": true, "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -16650,6 +17201,11 @@ "description": "Name of the region scoping this request.", "required": true, "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -16689,6 +17245,11 @@ "description": "Name of the region scoping this request.", "required": true, "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -16765,6 +17326,11 @@ "description": "Name of the region scoping this request.", "required": true, "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -16921,6 +17487,11 @@ "description": "Name of the region scoping this request.", "required": true, "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -16964,6 +17535,11 @@ "required": true, "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "size": { "type": "integer", "description": "Number of instances that should exist in this instance group manager.", @@ -17011,6 +17587,11 @@ "description": "Name of the region scoping this request.", "required": true, "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -17053,6 +17634,11 @@ "description": "Name of the region scoping this request.", "required": true, "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -17261,6 +17847,11 @@ "description": "Name of the region scoping this request.", "required": true, "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -17580,6 +18171,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "router": { "type": "string", "description": "Name of the Router resource to delete.", @@ -17704,6 +18300,11 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -17798,6 +18399,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "router": { "type": "string", "description": "Name of the Router resource to patch.", @@ -17819,8 +18425,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "preview": { @@ -17888,6 +18493,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "router": { "type": "string", "description": "Name of the Router resource to update.", @@ -17929,6 +18539,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "route": { "type": "string", "description": "Name of the Route resource to delete.", @@ -17995,6 +18610,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -18077,6 +18697,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "snapshot": { "type": "string", "description": "Name of the Snapshot resource to delete.", @@ -18233,6 +18858,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "sslCertificate": { "type": "string", "description": "Name of the SslCertificate resource to delete.", @@ -18299,6 +18929,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -18437,6 +19072,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "subnetwork": { "type": "string", "description": "Name of the Subnetwork resource to delete.", @@ -18478,6 +19118,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "subnetwork": { "type": "string", "description": "Name of the Subnetwork resource to update.", @@ -18563,6 +19208,11 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -18657,6 +19307,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "subnetwork": { "type": "string", "description": "Name of the Subnetwork resource.", @@ -18698,6 +19353,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetHttpProxy": { "type": "string", "description": "Name of the TargetHttpProxy resource to delete.", @@ -18764,6 +19424,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -18842,6 +19507,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetHttpProxy": { "type": "string", "description": "Name of the TargetHttpProxy to set a URL map for.", @@ -18882,6 +19552,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetHttpsProxy": { "type": "string", "description": "Name of the TargetHttpsProxy resource to delete.", @@ -18948,6 +19623,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -19026,6 +19706,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetHttpsProxy": { "type": "string", "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", @@ -19062,6 +19747,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetHttpsProxy": { "type": "string", "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", @@ -19151,6 +19841,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetInstance": { "type": "string", "description": "Name of the TargetInstance resource to delete.", @@ -19234,6 +19929,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "zone": { "type": "string", "description": "Name of the zone scoping this request.", @@ -19338,6 +20038,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetPool": { "type": "string", "description": "Name of the target pool to add a health check to.", @@ -19382,6 +20087,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetPool": { "type": "string", "description": "Name of the TargetPool resource to add instances to.", @@ -19475,6 +20185,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetPool": { "type": "string", "description": "Name of the TargetPool resource to delete.", @@ -19602,6 +20317,11 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -19696,6 +20416,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetPool": { "type": "string", "description": "Name of the target pool to remove health checks from.", @@ -19740,6 +20465,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetPool": { "type": "string", "description": "Name of the TargetPool resource to remove instances from.", @@ -19790,6 +20520,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetPool": { "type": "string", "description": "Name of the TargetPool resource to set a backup pool for.", @@ -19831,6 +20566,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetSslProxy": { "type": "string", "description": "Name of the TargetSslProxy resource to delete.", @@ -19897,6 +20637,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -19975,6 +20720,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetSslProxy": { "type": "string", "description": "Name of the TargetSslProxy resource whose BackendService resource is to be set.", @@ -20011,6 +20761,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetSslProxy": { "type": "string", "description": "Name of the TargetSslProxy resource whose ProxyHeader is to be set.", @@ -20047,6 +20802,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetSslProxy": { "type": "string", "description": "Name of the TargetSslProxy resource whose SslCertificate resource is to be set.", @@ -20087,6 +20847,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetTcpProxy": { "type": "string", "description": "Name of the TargetTcpProxy resource to delete.", @@ -20153,6 +20918,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -20231,6 +21001,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetTcpProxy": { "type": "string", "description": "Name of the TargetTcpProxy resource whose BackendService resource is to be set.", @@ -20267,6 +21042,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetTcpProxy": { "type": "string", "description": "Name of the TargetTcpProxy resource whose ProxyHeader is to be set.", @@ -20363,6 +21143,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "targetVpnGateway": { "type": "string", "description": "Name of the target VPN gateway to delete.", @@ -20445,6 +21230,11 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -20536,6 +21326,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "urlMap": { "type": "string", "description": "Name of the UrlMap resource to delete.", @@ -20602,6 +21397,11 @@ "required": true, "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ @@ -20631,6 +21431,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "urlMap": { "type": "string", "description": "Name of the UrlMap scoping this request.", @@ -20716,6 +21521,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "urlMap": { "type": "string", "description": "Name of the UrlMap resource to patch.", @@ -20736,8 +21546,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "update": { @@ -20753,6 +21562,11 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "urlMap": { "type": "string", "description": "Name of the UrlMap resource to update.", @@ -20885,6 +21699,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, "vpnTunnel": { "type": "string", "description": "Name of the VpnTunnel resource to delete.", @@ -20967,6 +21786,11 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" } }, "parameterOrder": [ diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index 8a7df317a6cf..f6125c61c51a 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -1432,10 +1432,9 @@ type AttachedDisk struct { // group. DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` - // Index: Assigns a zero-based index to this disk, where 0 is reserved - // for the boot disk. For example, if you have many disks attached to an - // instance, each disk would have a unique index number. If not - // specified, the server will choose an appropriate value. + // Index: [Output Only] A zero-based index to this disk, where 0 is + // reserved for the boot disk. If you have many disks attached to an + // instance, each disk would have a unique index number. Index int64 `json:"index,omitempty"` // InitializeParams: [Input Only] Specifies the parameters for a new @@ -2500,8 +2499,9 @@ type BackendService struct { // HealthChecks: The list of URLs to the HttpHealthCheck or // HttpsHealthCheck resource for health checking this BackendService. // Currently at most one health check can be specified, and a health - // check is required for GCE backend services. A health check must not - // be specified for GAE app backend and Cloud Function backend. + // check is required for Compute Engine backend services. A health check + // must not be specified for App Engine backend and Cloud Function + // backend. // // For internal load balancing, a URL to a HealthCheck resource must be // specified instead. @@ -2517,6 +2517,11 @@ type BackendService struct { // for backend services. Kind string `json:"kind,omitempty"` + // LoadBalancingScheme: Indicates whether the backend service will be + // used with internal or external load balancing. A backend service + // created for one type of load balancing cannot be used with the other. + // Possible values are INTERNAL and EXTERNAL. + // // Possible values: // "EXTERNAL" // "INTERNAL" @@ -4332,10 +4337,31 @@ type Firewall struct { // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` + // Denied: The list of DENY rules specified by this firewall. Each rule + // specifies a protocol and port-range tuple that describes a permitted + // connection. + Denied []*FirewallDenied `json:"denied,omitempty"` + // Description: An optional description of this resource. Provide this // property when you create the resource. Description string `json:"description,omitempty"` + // DestinationRanges: If destination ranges are specified, the firewall + // will apply only to traffic that has destination IP address in these + // ranges. These ranges must be expressed in CIDR format. Only IPv4 is + // supported. + DestinationRanges []string `json:"destinationRanges,omitempty"` + + // Direction: Direction of traffic to which this firewall applies; + // default is INGRESS. Note: For INGRESS traffic, it is NOT supported to + // specify destinationRanges; For EGRESS traffic, it is NOT supported to + // specify sourceRanges OR sourceTags. + // + // Possible values: + // "EGRESS" + // "INGRESS" + Direction string `json:"direction,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` @@ -4366,6 +4392,14 @@ type Firewall struct { // - global/networks/default Network string `json:"network,omitempty"` + // Priority: Priority for this rule. This is an integer between 0 and + // 65535, both inclusive. When not specified, the value assumed is 1000. + // Relative priorities determine precedence of conflicting rules. Lower + // value of priority implies higher precedence (eg, a rule with priority + // 0 has higher precedence than a rule with priority 1). DENY rules take + // precedence over ALLOW rules having equal priority. + Priority int64 `json:"priority,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -4464,6 +4498,44 @@ func (s *FirewallAllowed) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type FirewallDenied struct { + // IPProtocol: The IP protocol to which this rule applies. The protocol + // type is required when creating a firewall rule. This value can either + // be one of the following well known protocol strings (tcp, udp, icmp, + // esp, ah, ipip, sctp), or the IP protocol number. + IPProtocol string `json:"IPProtocol,omitempty"` + + // Ports: An optional list of ports to which this rule applies. This + // field is only applicable for UDP or TCP protocol. Each entry must be + // either an integer or a range. If not specified, this rule applies to + // connections through any port. + // + // Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. + Ports []string `json:"ports,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IPProtocol") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IPProtocol") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FirewallDenied) MarshalJSON() ([]byte, error) { + type noMethod FirewallDenied + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // FirewallList: Contains a list of firewalls. type FirewallList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -5994,10 +6066,10 @@ type Instance struct { // be a dash. Name string `json:"name,omitempty"` - // NetworkInterfaces: An array of configurations for this interface. - // This specifies how this interface is configured to interact with - // other network services, such as connecting to the internet. Only one - // interface is supported per instance. + // NetworkInterfaces: An array of network configurations for this + // instance. These specify how interfaces are configured to interact + // with other network services, such as connecting to the internet. + // Multiple interfaces are supported per instance. NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` // Scheduling: Sets the scheduling options for this instance. @@ -9628,8 +9700,8 @@ type ProjectsGetXpnResources struct { // the results. NextPageToken string `json:"nextPageToken,omitempty"` - // Resources: Serive resources (a.k.a service projects) attached to this - // project as their shared VPC host. + // Resources: Service resources (a.k.a service projects) attached to + // this project as their shared VPC host. Resources []*XpnResourceId `json:"resources,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -9715,6 +9787,7 @@ type Quota struct { // "LOCAL_SSD_TOTAL_GB" // "NETWORKS" // "NVIDIA_K80_GPUS" + // "NVIDIA_P100_GPUS" // "PREEMPTIBLE_CPUS" // "PREEMPTIBLE_LOCAL_SSD_GB" // "REGIONAL_AUTOSCALERS" @@ -9733,6 +9806,7 @@ type Quota struct { // "TARGET_INSTANCES" // "TARGET_POOLS" // "TARGET_SSL_PROXIES" + // "TARGET_TCP_PROXIES" // "TARGET_VPN_GATEWAYS" // "URL_MAPS" // "VPN_TUNNELS" @@ -16023,6 +16097,25 @@ func (r *AddressesService) Delete(project string, region string, address string) return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AddressesDeleteCall) RequestId(requestId string) *AddressesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -16135,6 +16228,11 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/addresses/{address}", @@ -16335,6 +16433,25 @@ func (r *AddressesService) Insert(project string, region string, address *Addres return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AddressesInsertCall) RequestId(requestId string) *AddressesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -16443,6 +16560,11 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/addresses", @@ -16996,6 +17118,25 @@ func (r *AutoscalersService) Delete(project string, zone string, autoscaler stri return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersDeleteCall) RequestId(requestId string) *AutoscalersDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -17102,6 +17243,11 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "Name of the zone for this request.", // "location": "path", @@ -17307,6 +17453,25 @@ func (r *AutoscalersService) Insert(project string, zone string, autoscaler *Aut return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersInsertCall) RequestId(requestId string) *AutoscalersInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -17409,6 +17574,11 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "Name of the zone for this request.", // "location": "path", @@ -17725,6 +17895,25 @@ func (c *AutoscalersPatchCall) Autoscaler(autoscaler string) *AutoscalersPatchCa return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersPatchCall) RequestId(requestId string) *AutoscalersPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -17833,6 +18022,11 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "Name of the zone for this request.", // "location": "path", @@ -17850,8 +18044,7 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -17886,6 +18079,25 @@ func (c *AutoscalersUpdateCall) Autoscaler(autoscaler string) *AutoscalersUpdate return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *AutoscalersUpdateCall) RequestId(requestId string) *AutoscalersUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -17994,6 +18206,11 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "Name of the zone for this request.", // "location": "path", @@ -18036,6 +18253,25 @@ func (r *BackendBucketsService) Delete(project string, backendBucket string) *Ba return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsDeleteCall) RequestId(requestId string) *BackendBucketsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -18139,6 +18375,11 @@ func (c *BackendBucketsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/backendBuckets/{backendBucket}", @@ -18325,6 +18566,25 @@ func (r *BackendBucketsService) Insert(project string, backendbucket *BackendBuc return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsInsertCall) RequestId(requestId string) *BackendBucketsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -18424,6 +18684,11 @@ func (c *BackendBucketsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/backendBuckets", @@ -18716,6 +18981,25 @@ func (r *BackendBucketsService) Patch(project string, backendBucket string, back return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsPatchCall) RequestId(requestId string) *BackendBucketsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -18824,6 +19108,11 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/backendBuckets/{backendBucket}", @@ -18835,8 +19124,7 @@ func (c *BackendBucketsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -18864,6 +19152,25 @@ func (r *BackendBucketsService) Update(project string, backendBucket string, bac return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendBucketsUpdateCall) RequestId(requestId string) *BackendBucketsUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -18972,6 +19279,11 @@ func (c *BackendBucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/backendBuckets/{backendBucket}", @@ -19261,6 +19573,25 @@ func (r *BackendServicesService) Delete(project string, backendService string) * return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesDeleteCall) RequestId(requestId string) *BackendServicesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -19364,6 +19695,11 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/backendServices/{backendService}", @@ -19702,6 +20038,25 @@ func (r *BackendServicesService) Insert(project string, backendservice *BackendS return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesInsertCall) RequestId(requestId string) *BackendServicesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -19801,6 +20156,11 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/backendServices", @@ -20098,6 +20458,25 @@ func (r *BackendServicesService) Patch(project string, backendService string, ba return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -20206,6 +20585,11 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/backendServices/{backendService}", @@ -20217,8 +20601,7 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -20249,6 +20632,25 @@ func (r *BackendServicesService) Update(project string, backendService string, b return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *BackendServicesUpdateCall) RequestId(requestId string) *BackendServicesUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -20357,6 +20759,11 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/backendServices/{backendService}", @@ -21336,6 +21743,25 @@ func (c *DisksCreateSnapshotCall) GuestFlush(guestFlush bool) *DisksCreateSnapsh return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksCreateSnapshotCall) RequestId(requestId string) *DisksCreateSnapshotCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -21451,6 +21877,11 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -21499,6 +21930,25 @@ func (r *DisksService) Delete(project string, zone string, disk string) *DisksDe return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksDeleteCall) RequestId(requestId string) *DisksDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -21604,6 +22054,11 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -21814,6 +22269,25 @@ func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksIns return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksInsertCall) RequestId(requestId string) *DisksInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // SourceImage sets the optional parameter "sourceImage": Source image // to restore onto a disk. func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall { @@ -21923,6 +22397,11 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "sourceImage": { // "description": "Optional. Source image to restore onto a disk.", // "location": "query", @@ -22238,6 +22717,25 @@ func (r *DisksService) Resize(project string, zone string, disk string, disksres return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksResizeCall) RequestId(requestId string) *DisksResizeCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -22349,6 +22847,11 @@ func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -22396,6 +22899,25 @@ func (r *DisksService) SetLabels(project string, zone string, resource string, z return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *DisksSetLabelsCall) RequestId(requestId string) *DisksSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -22500,6 +23022,11 @@ func (c *DisksSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "resource": { // "description": "Name of the resource for this request.", // "location": "path", @@ -22550,6 +23077,25 @@ func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDel return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *FirewallsDeleteCall) RequestId(requestId string) *FirewallsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -22653,6 +23199,11 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/firewalls/{firewall}", @@ -22840,6 +23391,25 @@ func (r *FirewallsService) Insert(project string, firewall *Firewall) *Firewalls return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *FirewallsInsertCall) RequestId(requestId string) *FirewallsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -22939,6 +23509,11 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/firewalls", @@ -23222,9 +23797,8 @@ type FirewallsPatchCall struct { } // Patch: Updates the specified firewall rule with the data included in -// the request. Using PUT method, can only update following fields of -// firewall rule: allowed, description, sourceRanges, sourceTags, -// targetTags. This method supports patch semantics. +// the request. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. // For details, see https://cloud.google.com/compute/docs/reference/latest/firewalls/patch func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall { c := &FirewallsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -23234,6 +23808,25 @@ func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Fir return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *FirewallsPatchCall) RequestId(requestId string) *FirewallsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -23321,7 +23914,7 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error } return ret, nil // { - // "description": "Updates the specified firewall rule with the data included in the request. Using PUT method, can only update following fields of firewall rule: allowed, description, sourceRanges, sourceTags, targetTags. This method supports patch semantics.", + // "description": "Updates the specified firewall rule with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", // "httpMethod": "PATCH", // "id": "compute.firewalls.patch", // "parameterOrder": [ @@ -23330,7 +23923,7 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error // ], // "parameters": { // "firewall": { - // "description": "Name of the firewall rule to update.", + // "description": "Name of the firewall rule to patch.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -23342,6 +23935,11 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/firewalls/{firewall}", @@ -23384,6 +23982,25 @@ func (r *FirewallsService) Update(project string, firewall string, firewall2 *Fi return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *FirewallsUpdateCall) RequestId(requestId string) *FirewallsUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -23492,6 +24109,11 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/firewalls/{firewall}", @@ -23783,6 +24405,25 @@ func (r *ForwardingRulesService) Delete(project string, region string, forwardin return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ForwardingRulesDeleteCall) RequestId(requestId string) *ForwardingRulesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -23895,6 +24536,11 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", @@ -24095,6 +24741,25 @@ func (r *ForwardingRulesService) Insert(project string, region string, forwardin return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ForwardingRulesInsertCall) RequestId(requestId string) *ForwardingRulesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -24203,6 +24868,11 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/forwardingRules", @@ -24509,6 +25179,25 @@ func (r *ForwardingRulesService) SetTarget(project string, region string, forwar return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ForwardingRulesSetTargetCall) RequestId(requestId string) *ForwardingRulesSetTargetCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -24626,6 +25315,11 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", @@ -24663,6 +25357,25 @@ func (r *GlobalAddressesService) Delete(project string, address string) *GlobalA return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddressesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -24766,6 +25479,11 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/addresses/{address}", @@ -24954,6 +25672,25 @@ func (r *GlobalAddressesService) Insert(project string, address *Address) *Globa return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddressesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -25053,6 +25790,11 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/addresses", @@ -25342,6 +26084,25 @@ func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule str return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalForwardingRulesDeleteCall) RequestId(requestId string) *GlobalForwardingRulesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -25445,6 +26206,11 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/forwardingRules/{forwardingRule}", @@ -25633,6 +26399,25 @@ func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *Fo return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalForwardingRulesInsertCall) RequestId(requestId string) *GlobalForwardingRulesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -25732,6 +26517,11 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/forwardingRules", @@ -26025,6 +26815,25 @@ func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *GlobalForwardingRulesSetTargetCall) RequestId(requestId string) *GlobalForwardingRulesSetTargetCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -26133,6 +26942,11 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget", @@ -26936,6 +27750,25 @@ func (r *HealthChecksService) Delete(project string, healthCheck string) *Health return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HealthChecksDeleteCall) RequestId(requestId string) *HealthChecksDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -27039,6 +27872,11 @@ func (c *HealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, e // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/healthChecks/{healthCheck}", @@ -27225,6 +28063,25 @@ func (r *HealthChecksService) Insert(project string, healthcheck *HealthCheck) * return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HealthChecksInsertCall) RequestId(requestId string) *HealthChecksInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -27324,6 +28181,11 @@ func (c *HealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, e // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/healthChecks", @@ -27616,6 +28478,25 @@ func (r *HealthChecksService) Patch(project string, healthCheck string, healthch return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HealthChecksPatchCall) RequestId(requestId string) *HealthChecksPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -27724,6 +28605,11 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/healthChecks/{healthCheck}", @@ -27735,8 +28621,7 @@ func (c *HealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, er // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -27764,6 +28649,25 @@ func (r *HealthChecksService) Update(project string, healthCheck string, healthc return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HealthChecksUpdateCall) RequestId(requestId string) *HealthChecksUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -27872,6 +28776,11 @@ func (c *HealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, e // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/healthChecks/{healthCheck}", @@ -27909,6 +28818,25 @@ func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpHealthChecksDeleteCall) RequestId(requestId string) *HttpHealthChecksDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -28012,6 +28940,11 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", @@ -28200,6 +29133,25 @@ func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHe return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpHealthChecksInsertCall) RequestId(requestId string) *HttpHealthChecksInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -28299,6 +29251,11 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/httpHealthChecks", @@ -28593,6 +29550,25 @@ func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpHealthChecksPatchCall) RequestId(requestId string) *HttpHealthChecksPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -28701,6 +29677,11 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", @@ -28712,8 +29693,7 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -28742,6 +29722,25 @@ func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpHealthChecksUpdateCall) RequestId(requestId string) *HttpHealthChecksUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -28850,6 +29849,11 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", @@ -28886,6 +29890,25 @@ func (r *HttpsHealthChecksService) Delete(project string, httpsHealthCheck strin return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpsHealthChecksDeleteCall) RequestId(requestId string) *HttpsHealthChecksDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -28989,6 +30012,11 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", @@ -29175,6 +30203,25 @@ func (r *HttpsHealthChecksService) Insert(project string, httpshealthcheck *Http return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpsHealthChecksInsertCall) RequestId(requestId string) *HttpsHealthChecksInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -29274,6 +30321,11 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/httpsHealthChecks", @@ -29566,6 +30618,25 @@ func (r *HttpsHealthChecksService) Patch(project string, httpsHealthCheck string return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpsHealthChecksPatchCall) RequestId(requestId string) *HttpsHealthChecksPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -29674,6 +30745,11 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", @@ -29685,8 +30761,7 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -29714,6 +30789,25 @@ func (r *HttpsHealthChecksService) Update(project string, httpsHealthCheck strin return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *HttpsHealthChecksUpdateCall) RequestId(requestId string) *HttpsHealthChecksUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -29822,6 +30916,11 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/httpsHealthChecks/{httpsHealthCheck}", @@ -29859,6 +30958,25 @@ func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall { return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ImagesDeleteCall) RequestId(requestId string) *ImagesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -29962,6 +31080,11 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/images/{image}", @@ -30001,6 +31124,25 @@ func (r *ImagesService) Deprecate(project string, image string, deprecationstatu return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ImagesDeprecateCall) RequestId(requestId string) *ImagesDeprecateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -30109,6 +31251,11 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/images/{image}/deprecate", @@ -30459,6 +31606,25 @@ func (c *ImagesInsertCall) ForceCreate(forceCreate bool) *ImagesInsertCall { return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ImagesInsertCall) RequestId(requestId string) *ImagesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -30563,6 +31729,11 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/images", @@ -31027,6 +32198,25 @@ func (r *InstanceGroupManagersService) AbandonInstances(project string, zone str return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *InstanceGroupManagersAbandonInstancesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -31137,6 +32327,11 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone where the managed instance group is located.", // "location": "path", @@ -31436,6 +32631,25 @@ func (r *InstanceGroupManagersService) Delete(project string, zone string, insta return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersDeleteCall) RequestId(requestId string) *InstanceGroupManagersDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -31541,6 +32755,11 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone where the managed instance group is located.", // "location": "path", @@ -31598,6 +32817,25 @@ func (r *InstanceGroupManagersService) DeleteInstances(project string, zone stri return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *InstanceGroupManagersDeleteInstancesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -31708,6 +32946,11 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone where the managed instance group is located.", // "location": "path", @@ -31922,6 +33165,25 @@ func (r *InstanceGroupManagersService) Insert(project string, zone string, insta return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersInsertCall) RequestId(requestId string) *InstanceGroupManagersInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -32024,6 +33286,11 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone where you want to create the managed instance group.", // "location": "path", @@ -32541,6 +33808,25 @@ func (r *InstanceGroupManagersService) RecreateInstances(project string, zone st return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *InstanceGroupManagersRecreateInstancesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -32651,6 +33937,11 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone where the managed instance group is located.", // "location": "path", @@ -32705,6 +33996,25 @@ func (r *InstanceGroupManagersService) Resize(project string, zone string, insta return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersResizeCall) RequestId(requestId string) *InstanceGroupManagersResizeCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -32811,6 +34121,11 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "size": { // "description": "The number of running instances that the managed instance group should maintain at any given time. The group automatically adds or removes instances to maintain the number of instances specified by this parameter.", // "format": "int32", @@ -32862,6 +34177,25 @@ func (r *InstanceGroupManagersService) SetInstanceTemplate(project string, zone return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *InstanceGroupManagersSetInstanceTemplateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -32972,6 +34306,11 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone where the managed instance group is located.", // "location": "path", @@ -33023,6 +34362,25 @@ func (r *InstanceGroupManagersService) SetTargetPools(project string, zone strin return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *InstanceGroupManagersSetTargetPoolsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -33133,6 +34491,11 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone where the managed instance group is located.", // "location": "path", @@ -33180,6 +34543,25 @@ func (r *InstanceGroupsService) AddInstances(project string, zone string, instan return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupsAddInstancesCall) RequestId(requestId string) *InstanceGroupsAddInstancesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -33290,6 +34672,11 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone where the instance group is located.", // "location": "path", @@ -33588,6 +34975,25 @@ func (r *InstanceGroupsService) Delete(project string, zone string, instanceGrou return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupsDeleteCall) RequestId(requestId string) *InstanceGroupsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -33693,6 +35099,11 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone where the instance group is located.", // "location": "path", @@ -33895,6 +35306,25 @@ func (r *InstanceGroupsService) Insert(project string, zone string, instancegrou return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupsInsertCall) RequestId(requestId string) *InstanceGroupsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -33997,6 +35427,11 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone where you want to create the instance group.", // "location": "path", @@ -34576,6 +36011,25 @@ func (r *InstanceGroupsService) RemoveInstances(project string, zone string, ins return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupsRemoveInstancesCall) RequestId(requestId string) *InstanceGroupsRemoveInstancesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -34686,6 +36140,11 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone where the instance group is located.", // "location": "path", @@ -34731,6 +36190,25 @@ func (r *InstanceGroupsService) SetNamedPorts(project string, zone string, insta return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceGroupsSetNamedPortsCall) RequestId(requestId string) *InstanceGroupsSetNamedPortsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -34841,6 +36319,11 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone where the instance group is located.", // "location": "path", @@ -34887,6 +36370,25 @@ func (r *InstanceTemplatesService) Delete(project string, instanceTemplate strin return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceTemplatesDeleteCall) RequestId(requestId string) *InstanceTemplatesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -34990,6 +36492,11 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/instanceTemplates/{instanceTemplate}", @@ -35181,6 +36688,25 @@ func (r *InstanceTemplatesService) Insert(project string, instancetemplate *Inst return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstanceTemplatesInsertCall) RequestId(requestId string) *InstanceTemplatesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -35280,6 +36806,11 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/instanceTemplates", @@ -35576,6 +37107,25 @@ func (r *InstancesService) AddAccessConfig(project string, zone string, instance return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesAddAccessConfigCall) RequestId(requestId string) *InstancesAddAccessConfigCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -35694,6 +37244,11 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -35996,6 +37551,25 @@ func (r *InstancesService) AttachDisk(project string, zone string, instance stri return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesAttachDiskCall) RequestId(requestId string) *InstancesAttachDiskCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -36107,6 +37681,11 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -36153,6 +37732,25 @@ func (r *InstancesService) Delete(project string, zone string, instance string) return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesDeleteCall) RequestId(requestId string) *InstancesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -36259,6 +37857,11 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -36304,6 +37907,25 @@ func (r *InstancesService) DeleteAccessConfig(project string, zone string, insta return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesDeleteAccessConfigCall) RequestId(requestId string) *InstancesDeleteAccessConfigCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -36424,6 +38046,11 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -36467,6 +38094,25 @@ func (r *InstancesService) DetachDisk(project string, zone string, instance stri return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesDetachDiskCall) RequestId(requestId string) *InstancesDetachDiskCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -36580,6 +38226,11 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -36984,6 +38635,25 @@ func (r *InstancesService) Insert(project string, zone string, instance *Instanc return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesInsertCall) RequestId(requestId string) *InstancesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -37086,6 +38756,11 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -37396,6 +39071,25 @@ func (r *InstancesService) Reset(project string, zone string, instance string) * return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesResetCall) RequestId(requestId string) *InstancesResetCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -37502,6 +39196,11 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -37547,6 +39246,25 @@ func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instan return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetDiskAutoDeleteCall) RequestId(requestId string) *InstancesSetDiskAutoDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -37668,6 +39386,11 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -37712,6 +39435,25 @@ func (r *InstancesService) SetLabels(project string, zone string, instance strin return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetLabelsCall) RequestId(requestId string) *InstancesSetLabelsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -37823,6 +39565,11 @@ func (c *InstancesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, e // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -37870,6 +39617,25 @@ func (r *InstancesService) SetMachineResources(project string, zone string, inst return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMachineResourcesCall) RequestId(requestId string) *InstancesSetMachineResourcesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -37981,6 +39747,11 @@ func (c *InstancesSetMachineResourcesCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -38028,6 +39799,25 @@ func (r *InstancesService) SetMachineType(project string, zone string, instance return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMachineTypeCall) RequestId(requestId string) *InstancesSetMachineTypeCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -38139,6 +39929,11 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -38187,6 +39982,25 @@ func (r *InstancesService) SetMetadata(project string, zone string, instance str return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMetadataCall) RequestId(requestId string) *InstancesSetMetadataCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -38298,6 +40112,11 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -38345,6 +40164,25 @@ func (r *InstancesService) SetScheduling(project string, zone string, instance s return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSchedulingCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -38456,6 +40294,11 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -38504,6 +40347,25 @@ func (r *InstancesService) SetServiceAccount(project string, zone string, instan return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -38615,6 +40477,11 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -38663,6 +40530,25 @@ func (r *InstancesService) SetTags(project string, zone string, instance string, return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -38774,6 +40660,11 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -38821,6 +40712,25 @@ func (r *InstancesService) Start(project string, zone string, instance string) * return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesStartCall) RequestId(requestId string) *InstancesStartCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -38927,6 +40837,11 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -38972,6 +40887,25 @@ func (r *InstancesService) StartWithEncryptionKey(project string, zone string, i return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesStartWithEncryptionKeyCall) RequestId(requestId string) *InstancesStartWithEncryptionKeyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -39083,6 +41017,11 @@ func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) ( // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -39134,6 +41073,25 @@ func (r *InstancesService) Stop(project string, zone string, instance string) *I return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesStopCall) RequestId(requestId string) *InstancesStopCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -39240,6 +41198,11 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -40113,6 +42076,25 @@ func (r *NetworksService) AddPeering(project string, network string, networksadd return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksAddPeeringCall) RequestId(requestId string) *NetworksAddPeeringCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -40221,6 +42203,11 @@ func (c *NetworksAddPeeringCall) Do(opts ...googleapi.CallOption) (*Operation, e // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/networks/{network}/addPeering", @@ -40258,6 +42245,25 @@ func (r *NetworksService) Delete(project string, network string) *NetworksDelete return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksDeleteCall) RequestId(requestId string) *NetworksDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -40361,6 +42367,11 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/networks/{network}", @@ -40549,6 +42560,25 @@ func (r *NetworksService) Insert(project string, network *Network) *NetworksInse return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksInsertCall) RequestId(requestId string) *NetworksInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -40648,6 +42678,11 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/networks", @@ -40939,6 +42974,25 @@ func (r *NetworksService) RemovePeering(project string, network string, networks return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksRemovePeeringCall) RequestId(requestId string) *NetworksRemovePeeringCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -41047,6 +43101,11 @@ func (c *NetworksRemovePeeringCall) Do(opts ...googleapi.CallOption) (*Operation // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/networks/{network}/removePeering", @@ -41084,6 +43143,25 @@ func (r *NetworksService) SwitchToCustomMode(project string, network string) *Ne return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksSwitchToCustomModeCall) RequestId(requestId string) *NetworksSwitchToCustomModeCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -41187,6 +43265,11 @@ func (c *NetworksSwitchToCustomModeCall) Do(opts ...googleapi.CallOption) (*Oper // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/networks/{network}/switchToCustomMode", @@ -41218,6 +43301,25 @@ func (r *ProjectsService) DisableXpnHost(project string) *ProjectsDisableXpnHost return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsDisableXpnHostCall) RequestId(requestId string) *ProjectsDisableXpnHostCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -41312,6 +43414,11 @@ func (c *ProjectsDisableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operatio // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/disableXpnHost", @@ -41346,6 +43453,25 @@ func (r *ProjectsService) DisableXpnResource(project string, projectsdisablexpnr return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsDisableXpnResourceCall) RequestId(requestId string) *ProjectsDisableXpnResourceCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -41445,6 +43571,11 @@ func (c *ProjectsDisableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Oper // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/disableXpnResource", @@ -41479,6 +43610,25 @@ func (r *ProjectsService) EnableXpnHost(project string) *ProjectsEnableXpnHostCa return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsEnableXpnHostCall) RequestId(requestId string) *ProjectsEnableXpnHostCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -41573,6 +43723,11 @@ func (c *ProjectsEnableXpnHostCall) Do(opts ...googleapi.CallOption) (*Operation // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/enableXpnHost", @@ -41608,6 +43763,25 @@ func (r *ProjectsService) EnableXpnResource(project string, projectsenablexpnres return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsEnableXpnResourceCall) RequestId(requestId string) *ProjectsEnableXpnResourceCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -41707,6 +43881,11 @@ func (c *ProjectsEnableXpnResourceCall) Do(opts ...googleapi.CallOption) (*Opera // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/enableXpnResource", @@ -42428,6 +44607,25 @@ func (r *ProjectsService) MoveDisk(project string, diskmoverequest *DiskMoveRequ return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsMoveDiskCall) RequestId(requestId string) *ProjectsMoveDiskCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -42527,6 +44725,11 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/moveDisk", @@ -42564,6 +44767,25 @@ func (r *ProjectsService) MoveInstance(project string, instancemoverequest *Inst return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInstanceCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -42663,6 +44885,11 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/moveInstance", @@ -42701,6 +44928,25 @@ func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Me return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *ProjectsSetCommonInstanceMetadataCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -42800,6 +45046,11 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/setCommonInstanceMetadata", @@ -42840,6 +45091,25 @@ func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocati return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *ProjectsSetUsageExportBucketCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -42939,6 +45209,11 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/setUsageExportBucket", @@ -42980,6 +45255,25 @@ func (r *RegionAutoscalersService) Delete(project string, region string, autosca return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionAutoscalersDeleteCall) RequestId(requestId string) *RegionAutoscalersDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -43092,6 +45386,11 @@ func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/autoscalers/{autoscaler}", @@ -43290,6 +45589,25 @@ func (r *RegionAutoscalersService) Insert(project string, region string, autosca return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionAutoscalersInsertCall) RequestId(requestId string) *RegionAutoscalersInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -43398,6 +45716,11 @@ func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/autoscalers", @@ -43708,6 +46031,25 @@ func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutosc return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionAutoscalersPatchCall) RequestId(requestId string) *RegionAutoscalersPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -43822,6 +46164,11 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/autoscalers", @@ -43833,8 +46180,7 @@ func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operatio // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -43869,6 +46215,25 @@ func (c *RegionAutoscalersUpdateCall) Autoscaler(autoscaler string) *RegionAutos return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionAutoscalersUpdateCall) RequestId(requestId string) *RegionAutoscalersUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -43983,6 +46348,11 @@ func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/autoscalers", @@ -44021,6 +46391,25 @@ func (r *RegionBackendServicesService) Delete(project string, region string, bac return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionBackendServicesDeleteCall) RequestId(requestId string) *RegionBackendServicesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -44133,6 +46522,11 @@ func (c *RegionBackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/backendServices/{backendService}", @@ -44492,6 +46886,25 @@ func (r *RegionBackendServicesService) Insert(project string, region string, bac return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionBackendServicesInsertCall) RequestId(requestId string) *RegionBackendServicesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -44600,6 +47013,11 @@ func (c *RegionBackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Ope // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/backendServices", @@ -44908,6 +47326,25 @@ func (r *RegionBackendServicesService) Patch(project string, region string, back return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionBackendServicesPatchCall) RequestId(requestId string) *RegionBackendServicesPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -45025,6 +47462,11 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/backendServices/{backendService}", @@ -45036,8 +47478,7 @@ func (c *RegionBackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Oper // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -45069,6 +47510,25 @@ func (r *RegionBackendServicesService) Update(project string, region string, bac return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionBackendServicesUpdateCall) RequestId(requestId string) *RegionBackendServicesUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -45186,6 +47646,11 @@ func (c *RegionBackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Ope // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/backendServices/{backendService}", @@ -45639,6 +48104,25 @@ func (r *RegionCommitmentsService) Insert(project string, region string, commitm return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionCommitmentsInsertCall) RequestId(requestId string) *RegionCommitmentsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -45747,6 +48231,11 @@ func (c *RegionCommitmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/commitments", @@ -46066,6 +48555,25 @@ func (r *RegionInstanceGroupManagersService) AbandonInstances(project string, re return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersAbandonInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersAbandonInstancesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -46181,6 +48689,11 @@ func (c *RegionInstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.C // "location": "path", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/abandonInstances", @@ -46220,6 +48733,25 @@ func (r *RegionInstanceGroupManagersService) Delete(project string, region strin return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersDeleteCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -46330,6 +48862,11 @@ func (c *RegionInstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) // "location": "path", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}", @@ -46382,6 +48919,25 @@ func (r *RegionInstanceGroupManagersService) DeleteInstances(project string, reg return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersDeleteInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersDeleteInstancesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -46497,6 +49053,11 @@ func (c *RegionInstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.Ca // "location": "path", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/deleteInstances", @@ -46704,6 +49265,25 @@ func (r *RegionInstanceGroupManagersService) Insert(project string, region strin return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersInsertCall) RequestId(requestId string) *RegionInstanceGroupManagersInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -46811,6 +49391,11 @@ func (c *RegionInstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) // "location": "path", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/instanceGroupManagers", @@ -47321,6 +49906,25 @@ func (r *RegionInstanceGroupManagersService) RecreateInstances(project string, r return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersRecreateInstancesCall) RequestId(requestId string) *RegionInstanceGroupManagersRecreateInstancesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -47436,6 +50040,11 @@ func (c *RegionInstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi. // "location": "path", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/recreateInstances", @@ -47486,6 +50095,25 @@ func (r *RegionInstanceGroupManagersService) Resize(project string, region strin return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersResizeCall) RequestId(requestId string) *RegionInstanceGroupManagersResizeCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -47598,6 +50226,11 @@ func (c *RegionInstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "size": { // "description": "Number of instances that should exist in this instance group manager.", // "format": "int32", @@ -47644,6 +50277,25 @@ func (r *RegionInstanceGroupManagersService) SetInstanceTemplate(project string, return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) RequestId(requestId string) *RegionInstanceGroupManagersSetInstanceTemplateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -47759,6 +50411,11 @@ func (c *RegionInstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleap // "location": "path", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate", @@ -47801,6 +50458,25 @@ func (r *RegionInstanceGroupManagersService) SetTargetPools(project string, regi return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupManagersSetTargetPoolsCall) RequestId(requestId string) *RegionInstanceGroupManagersSetTargetPoolsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -47916,6 +50592,11 @@ func (c *RegionInstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.Cal // "location": "path", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/setTargetPools", @@ -48650,6 +51331,25 @@ func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region strin return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -48765,6 +51465,11 @@ func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) // "location": "path", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", @@ -50007,6 +52712,25 @@ func (r *RoutersService) Delete(project string, region string, router string) *R return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RoutersDeleteCall) RequestId(requestId string) *RoutersDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -50113,6 +52837,11 @@ func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "router": { // "description": "Name of the Router resource to delete.", // "location": "path", @@ -50481,6 +53210,25 @@ func (r *RoutersService) Insert(project string, region string, router *Router) * return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RoutersInsertCall) RequestId(requestId string) *RoutersInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -50589,6 +53337,11 @@ func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/routers", @@ -50894,6 +53647,25 @@ func (r *RoutersService) Patch(project string, region string, router string, rou return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RoutersPatchCall) RequestId(requestId string) *RoutersPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -51005,6 +53777,11 @@ func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "router": { // "description": "Name of the Router resource to patch.", // "location": "path", @@ -51022,8 +53799,7 @@ func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -51213,6 +53989,25 @@ func (r *RoutersService) Update(project string, region string, router string, ro return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RoutersUpdateCall) RequestId(requestId string) *RoutersUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -51324,6 +54119,11 @@ func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "router": { // "description": "Name of the Router resource to update.", // "location": "path", @@ -51367,6 +54167,25 @@ func (r *RoutesService) Delete(project string, route string) *RoutesDeleteCall { return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RoutesDeleteCall) RequestId(requestId string) *RoutesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -51464,6 +54283,11 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "route": { // "description": "Name of the Route resource to delete.", // "location": "path", @@ -51658,6 +54482,25 @@ func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall { return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *RoutesInsertCall) RequestId(requestId string) *RoutesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -51757,6 +54600,11 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/routes", @@ -52053,6 +54901,25 @@ func (r *SnapshotsService) Delete(project string, snapshot string) *SnapshotsDel return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SnapshotsDeleteCall) RequestId(requestId string) *SnapshotsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -52150,6 +55017,11 @@ func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "snapshot": { // "description": "Name of the Snapshot resource to delete.", // "location": "path", @@ -52742,6 +55614,25 @@ func (r *SslCertificatesService) Delete(project string, sslCertificate string) * return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SslCertificatesDeleteCall) RequestId(requestId string) *SslCertificatesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -52839,6 +55730,11 @@ func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "sslCertificate": { // "description": "Name of the SslCertificate resource to delete.", // "location": "path", @@ -53031,6 +55927,25 @@ func (r *SslCertificatesService) Insert(project string, sslcertificate *SslCerti return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SslCertificatesInsertCall) RequestId(requestId string) *SslCertificatesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -53130,6 +56045,11 @@ func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/sslCertificates", @@ -53671,6 +56591,25 @@ func (r *SubnetworksService) Delete(project string, region string, subnetwork st return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SubnetworksDeleteCall) RequestId(requestId string) *SubnetworksDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -53777,6 +56716,11 @@ func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "subnetwork": { // "description": "Name of the Subnetwork resource to delete.", // "location": "path", @@ -53821,6 +56765,25 @@ func (r *SubnetworksService) ExpandIpCidrRange(project string, region string, su return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SubnetworksExpandIpCidrRangeCall) RequestId(requestId string) *SubnetworksExpandIpCidrRangeCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -53932,6 +56895,11 @@ func (c *SubnetworksExpandIpCidrRangeCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "subnetwork": { // "description": "Name of the Subnetwork resource to update.", // "location": "path", @@ -54140,6 +57108,25 @@ func (r *SubnetworksService) Insert(project string, region string, subnetwork *S return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SubnetworksInsertCall) RequestId(requestId string) *SubnetworksInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -54248,6 +57235,11 @@ func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/subnetworks", @@ -54553,6 +57545,25 @@ func (r *SubnetworksService) SetPrivateIpGoogleAccess(project string, region str return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *SubnetworksSetPrivateIpGoogleAccessCall) RequestId(requestId string) *SubnetworksSetPrivateIpGoogleAccessCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -54664,6 +57675,11 @@ func (c *SubnetworksSetPrivateIpGoogleAccessCall) Do(opts ...googleapi.CallOptio // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "subnetwork": { // "description": "Name of the Subnetwork resource.", // "location": "path", @@ -54707,6 +57723,25 @@ func (r *TargetHttpProxiesService) Delete(project string, targetHttpProxy string return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetHttpProxiesDeleteCall) RequestId(requestId string) *TargetHttpProxiesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -54804,6 +57839,11 @@ func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetHttpProxy": { // "description": "Name of the TargetHttpProxy resource to delete.", // "location": "path", @@ -54998,6 +58038,25 @@ func (r *TargetHttpProxiesService) Insert(project string, targethttpproxy *Targe return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetHttpProxiesInsertCall) RequestId(requestId string) *TargetHttpProxiesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -55097,6 +58156,11 @@ func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/targetHttpProxies", @@ -55389,6 +58453,25 @@ func (r *TargetHttpProxiesService) SetUrlMap(project string, targetHttpProxy str return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetHttpProxiesSetUrlMapCall) RequestId(requestId string) *TargetHttpProxiesSetUrlMapCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -55491,6 +58574,11 @@ func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Oper // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetHttpProxy": { // "description": "Name of the TargetHttpProxy to set a URL map for.", // "location": "path", @@ -55533,6 +58621,25 @@ func (r *TargetHttpsProxiesService) Delete(project string, targetHttpsProxy stri return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetHttpsProxiesDeleteCall) RequestId(requestId string) *TargetHttpsProxiesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -55630,6 +58737,11 @@ func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operat // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetHttpsProxy": { // "description": "Name of the TargetHttpsProxy resource to delete.", // "location": "path", @@ -55822,6 +58934,25 @@ func (r *TargetHttpsProxiesService) Insert(project string, targethttpsproxy *Tar return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetHttpsProxiesInsertCall) RequestId(requestId string) *TargetHttpsProxiesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -55921,6 +59052,11 @@ func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operat // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/targetHttpsProxies", @@ -56211,6 +59347,25 @@ func (r *TargetHttpsProxiesService) SetSslCertificates(project string, targetHtt return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetHttpsProxiesSetSslCertificatesCall) RequestId(requestId string) *TargetHttpsProxiesSetSslCertificatesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -56313,6 +59468,11 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetHttpsProxy": { // "description": "Name of the TargetHttpsProxy resource to set an SslCertificates resource for.", // "location": "path", @@ -56357,6 +59517,25 @@ func (r *TargetHttpsProxiesService) SetUrlMap(project string, targetHttpsProxy s return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetHttpsProxiesSetUrlMapCall) RequestId(requestId string) *TargetHttpsProxiesSetUrlMapCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -56459,6 +59638,11 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Ope // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetHttpsProxy": { // "description": "Name of the TargetHttpsProxy resource whose URL map is to be set.", // "location": "path", @@ -56756,6 +59940,25 @@ func (r *TargetInstancesService) Delete(project string, zone string, targetInsta return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetInstancesDeleteCall) RequestId(requestId string) *TargetInstancesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -56855,6 +60058,11 @@ func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetInstance": { // "description": "Name of the TargetInstance resource to delete.", // "location": "path", @@ -57069,6 +60277,25 @@ func (r *TargetInstancesService) Insert(project string, zone string, targetinsta return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetInstancesInsertCall) RequestId(requestId string) *TargetInstancesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -57171,6 +60398,11 @@ func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "Name of the zone scoping this request.", // "location": "path", @@ -57482,6 +60714,25 @@ func (r *TargetPoolsService) AddHealthCheck(project string, region string, targe return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetPoolsAddHealthCheckCall) RequestId(requestId string) *TargetPoolsAddHealthCheckCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -57593,6 +60844,11 @@ func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Opera // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetPool": { // "description": "Name of the target pool to add a health check to.", // "location": "path", @@ -57640,6 +60896,25 @@ func (r *TargetPoolsService) AddInstance(project string, region string, targetPo return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetPoolsAddInstanceCall) RequestId(requestId string) *TargetPoolsAddInstanceCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -57751,6 +61026,11 @@ func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetPool": { // "description": "Name of the TargetPool resource to add instances to.", // "location": "path", @@ -58048,6 +61328,25 @@ func (r *TargetPoolsService) Delete(project string, region string, targetPool st return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetPoolsDeleteCall) RequestId(requestId string) *TargetPoolsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -58154,6 +61453,11 @@ func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetPool": { // "description": "Name of the TargetPool resource to delete.", // "location": "path", @@ -58521,6 +61825,25 @@ func (r *TargetPoolsService) Insert(project string, region string, targetpool *T return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetPoolsInsertCall) RequestId(requestId string) *TargetPoolsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -58629,6 +61952,11 @@ func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/targetPools", @@ -58934,6 +62262,25 @@ func (r *TargetPoolsService) RemoveHealthCheck(project string, region string, ta return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetPoolsRemoveHealthCheckCall) RequestId(requestId string) *TargetPoolsRemoveHealthCheckCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -59045,6 +62392,11 @@ func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Op // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetPool": { // "description": "Name of the target pool to remove health checks from.", // "location": "path", @@ -59092,6 +62444,25 @@ func (r *TargetPoolsService) RemoveInstance(project string, region string, targe return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetPoolsRemoveInstanceCall) RequestId(requestId string) *TargetPoolsRemoveInstanceCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -59203,6 +62574,11 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetPool": { // "description": "Name of the TargetPool resource to remove instances from.", // "location": "path", @@ -59257,6 +62633,25 @@ func (c *TargetPoolsSetBackupCall) FailoverRatio(failoverRatio float64) *TargetP return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetPoolsSetBackupCall) RequestId(requestId string) *TargetPoolsSetBackupCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -59374,6 +62769,11 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetPool": { // "description": "Name of the TargetPool resource to set a backup pool for.", // "location": "path", @@ -59416,6 +62816,25 @@ func (r *TargetSslProxiesService) Delete(project string, targetSslProxy string) return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetSslProxiesDeleteCall) RequestId(requestId string) *TargetSslProxiesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -59513,6 +62932,11 @@ func (c *TargetSslProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetSslProxy": { // "description": "Name of the TargetSslProxy resource to delete.", // "location": "path", @@ -59705,6 +63129,25 @@ func (r *TargetSslProxiesService) Insert(project string, targetsslproxy *TargetS return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetSslProxiesInsertCall) RequestId(requestId string) *TargetSslProxiesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -59804,6 +63247,11 @@ func (c *TargetSslProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/targetSslProxies", @@ -60094,6 +63542,25 @@ func (r *TargetSslProxiesService) SetBackendService(project string, targetSslPro return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetSslProxiesSetBackendServiceCall) RequestId(requestId string) *TargetSslProxiesSetBackendServiceCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -60196,6 +63663,11 @@ func (c *TargetSslProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetSslProxy": { // "description": "Name of the TargetSslProxy resource whose BackendService resource is to be set.", // "location": "path", @@ -60240,6 +63712,25 @@ func (r *TargetSslProxiesService) SetProxyHeader(project string, targetSslProxy return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetSslProxiesSetProxyHeaderCall) RequestId(requestId string) *TargetSslProxiesSetProxyHeaderCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -60342,6 +63833,11 @@ func (c *TargetSslProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetSslProxy": { // "description": "Name of the TargetSslProxy resource whose ProxyHeader is to be set.", // "location": "path", @@ -60386,6 +63882,25 @@ func (r *TargetSslProxiesService) SetSslCertificates(project string, targetSslPr return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetSslProxiesSetSslCertificatesCall) RequestId(requestId string) *TargetSslProxiesSetSslCertificatesCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -60488,6 +64003,11 @@ func (c *TargetSslProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOption // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetSslProxy": { // "description": "Name of the TargetSslProxy resource whose SslCertificate resource is to be set.", // "location": "path", @@ -60530,6 +64050,25 @@ func (r *TargetTcpProxiesService) Delete(project string, targetTcpProxy string) return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetTcpProxiesDeleteCall) RequestId(requestId string) *TargetTcpProxiesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -60627,6 +64166,11 @@ func (c *TargetTcpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetTcpProxy": { // "description": "Name of the TargetTcpProxy resource to delete.", // "location": "path", @@ -60819,6 +64363,25 @@ func (r *TargetTcpProxiesService) Insert(project string, targettcpproxy *TargetT return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetTcpProxiesInsertCall) RequestId(requestId string) *TargetTcpProxiesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -60918,6 +64481,11 @@ func (c *TargetTcpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/targetTcpProxies", @@ -61208,6 +64776,25 @@ func (r *TargetTcpProxiesService) SetBackendService(project string, targetTcpPro return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetTcpProxiesSetBackendServiceCall) RequestId(requestId string) *TargetTcpProxiesSetBackendServiceCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -61310,6 +64897,11 @@ func (c *TargetTcpProxiesSetBackendServiceCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetTcpProxy": { // "description": "Name of the TargetTcpProxy resource whose BackendService resource is to be set.", // "location": "path", @@ -61354,6 +64946,25 @@ func (r *TargetTcpProxiesService) SetProxyHeader(project string, targetTcpProxy return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetTcpProxiesSetProxyHeaderCall) RequestId(requestId string) *TargetTcpProxiesSetProxyHeaderCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -61456,6 +65067,11 @@ func (c *TargetTcpProxiesSetProxyHeaderCall) Do(opts ...googleapi.CallOption) (* // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetTcpProxy": { // "description": "Name of the TargetTcpProxy resource whose ProxyHeader is to be set.", // "location": "path", @@ -61751,6 +65367,25 @@ func (r *TargetVpnGatewaysService) Delete(project string, region string, targetV return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetVpnGatewaysDeleteCall) RequestId(requestId string) *TargetVpnGatewaysDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -61857,6 +65492,11 @@ func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operati // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "targetVpnGateway": { // "description": "Name of the target VPN gateway to delete.", // "location": "path", @@ -62062,6 +65702,25 @@ func (r *TargetVpnGatewaysService) Insert(project string, region string, targetv return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *TargetVpnGatewaysInsertCall) RequestId(requestId string) *TargetVpnGatewaysInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -62170,6 +65829,11 @@ func (c *TargetVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operati // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/targetVpnGateways", @@ -62470,6 +66134,25 @@ func (r *UrlMapsService) Delete(project string, urlMap string) *UrlMapsDeleteCal return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *UrlMapsDeleteCall) RequestId(requestId string) *UrlMapsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -62567,6 +66250,11 @@ func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "urlMap": { // "description": "Name of the UrlMap resource to delete.", // "location": "path", @@ -62761,6 +66449,25 @@ func (r *UrlMapsService) Insert(project string, urlmap *UrlMap) *UrlMapsInsertCa return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *UrlMapsInsertCall) RequestId(requestId string) *UrlMapsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -62860,6 +66567,11 @@ func (c *UrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/global/urlMaps", @@ -62899,6 +66611,25 @@ func (r *UrlMapsService) InvalidateCache(project string, urlMap string, cacheinv return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *UrlMapsInvalidateCacheCall) RequestId(requestId string) *UrlMapsInvalidateCacheCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -63001,6 +66732,11 @@ func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operatio // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "urlMap": { // "description": "Name of the UrlMap scoping this request.", // "location": "path", @@ -63301,6 +67037,25 @@ func (r *UrlMapsService) Patch(project string, urlMap string, urlmap *UrlMap) *U return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *UrlMapsPatchCall) RequestId(requestId string) *UrlMapsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -63403,6 +67158,11 @@ func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "urlMap": { // "description": "Name of the UrlMap resource to patch.", // "location": "path", @@ -63420,8 +67180,7 @@ func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } @@ -63450,6 +67209,25 @@ func (r *UrlMapsService) Update(project string, urlMap string, urlmap *UrlMap) * return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *UrlMapsUpdateCall) RequestId(requestId string) *UrlMapsUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -63552,6 +67330,11 @@ func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "urlMap": { // "description": "Name of the UrlMap resource to update.", // "location": "path", @@ -63996,6 +67779,25 @@ func (r *VpnTunnelsService) Delete(project string, region string, vpnTunnel stri return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *VpnTunnelsDeleteCall) RequestId(requestId string) *VpnTunnelsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -64102,6 +67904,11 @@ func (c *VpnTunnelsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "vpnTunnel": { // "description": "Name of the VpnTunnel resource to delete.", // "location": "path", @@ -64307,6 +68114,25 @@ func (r *VpnTunnelsService) Insert(project string, region string, vpntunnel *Vpn return c } +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *VpnTunnelsInsertCall) RequestId(requestId string) *VpnTunnelsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -64415,6 +68241,11 @@ func (c *VpnTunnelsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, // "path": "{project}/regions/{region}/vpnTunnels", diff --git a/vendor/google.golang.org/api/container/v1/container-api.json b/vendor/google.golang.org/api/container/v1/container-api.json index e9151bb696b3..06dd8d0cc97e 100644 --- a/vendor/google.golang.org/api/container/v1/container-api.json +++ b/vendor/google.golang.org/api/container/v1/container-api.json @@ -1,24 +1,10 @@ { - "basePath": "", - "ownerDomain": "google.com", - "name": "container", - "batchPath": "batch", - "revision": "20170720", - "documentationLink": "https://cloud.google.com/container-engine/", - "id": "container:v1", - "title": "Google Container Engine API", - "ownerName": "Google", - "discoveryVersion": "v1", "resources": { "projects": { "resources": { "zones": { "methods": { "getServerconfig": { - "flatPath": "v1/projects/{projectId}/zones/{zone}/serverconfig", - "path": "v1/projects/{projectId}/zones/{zone}/serverconfig", - "id": "container.projects.zones.getServerconfig", - "description": "Returns configuration info about the Container Engine service.", "httpMethod": "GET", "parameterOrder": [ "projectId", @@ -43,17 +29,24 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" - ] + ], + "flatPath": "v1/projects/{projectId}/zones/{zone}/serverconfig", + "path": "v1/projects/{projectId}/zones/{zone}/serverconfig", + "id": "container.projects.zones.getServerconfig", + "description": "Returns configuration info about the Container Engine service." } }, "resources": { "clusters": { "methods": { - "locations": { + "logging": { + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/logging", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/logging", + "id": "container.projects.zones.clusters.logging", "request": { - "$ref": "SetLocationsRequest" + "$ref": "SetLoggingServiceRequest" }, - "description": "Sets the locations of a specific cluster.", + "description": "Sets the logging service of a specific cluster.", "httpMethod": "POST", "parameterOrder": [ "projectId", @@ -74,10 +67,10 @@ "location": "path" }, "zone": { - "location": "path", "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", "type": "string", - "required": true + "required": true, + "location": "path" }, "clusterId": { "description": "The name of the cluster to upgrade.", @@ -85,21 +78,18 @@ "required": true, "location": "path" } - }, - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/locations", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/locations", - "id": "container.projects.zones.clusters.locations" + } }, - "update": { - "response": { - "$ref": "Operation" - }, + "list": { + "description": "Lists all clusters owned by a project in either the specified zone or all\nzones.", + "httpMethod": "GET", "parameterOrder": [ "projectId", - "zone", - "clusterId" + "zone" ], - "httpMethod": "PUT", + "response": { + "$ref": "ListClustersResponse" + }, "parameters": { "projectId": { "location": "path", @@ -108,46 +98,35 @@ "required": true }, "zone": { - "location": "path", - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", - "type": "string", - "required": true - }, - "clusterId": { - "location": "path", - "description": "The name of the cluster to upgrade.", + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides, or \"-\" for all zones.", "type": "string", - "required": true + "required": true, + "location": "path" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", - "id": "container.projects.zones.clusters.update", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", - "description": "Updates the settings of a specific cluster.", - "request": { - "$ref": "UpdateClusterRequest" - } + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters", + "path": "v1/projects/{projectId}/zones/{zone}/clusters", + "id": "container.projects.zones.clusters.list" }, - "monitoring": { - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/monitoring", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/monitoring", - "id": "container.projects.zones.clusters.monitoring", - "description": "Sets the monitoring service of a specific cluster.", + "create": { "request": { - "$ref": "SetMonitoringServiceRequest" + "$ref": "CreateClusterRequest" }, - "httpMethod": "POST", - "parameterOrder": [ - "projectId", - "zone", - "clusterId" - ], + "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default network](/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe cluster creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range is being used by the cluster.", "response": { "$ref": "Operation" }, + "parameterOrder": [ + "projectId", + "zone" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], "parameters": { "projectId": { "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", @@ -156,23 +135,17 @@ "location": "path" }, "zone": { - "location": "path", "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", "type": "string", - "required": true - }, - "clusterId": { - "description": "The name of the cluster to upgrade.", - "type": "string", "required": true, "location": "path" } }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters", + "id": "container.projects.zones.clusters.create", + "path": "v1/projects/{projectId}/zones/{zone}/clusters" }, - "master": { + "resourceLabels": { "httpMethod": "POST", "parameterOrder": [ "projectId", @@ -184,43 +157,40 @@ }, "parameters": { "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "location": "path", + "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", "type": "string", - "required": true, - "location": "path" + "required": true }, "zone": { - "location": "path", "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", "type": "string", - "required": true + "required": true, + "location": "path" }, "clusterId": { - "location": "path", - "description": "The name of the cluster to upgrade.", + "description": "The name of the cluster.", "type": "string", - "required": true + "required": true, + "location": "path" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/master", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/master", - "id": "container.projects.zones.clusters.master", - "description": "Updates the master of a specific cluster.", + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/resourceLabels", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/resourceLabels", + "id": "container.projects.zones.clusters.resourceLabels", + "description": "Sets labels on a cluster.", "request": { - "$ref": "UpdateMasterRequest" + "$ref": "SetLabelsRequest" } }, - "setMasterAuth": { - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMasterAuth", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMasterAuth", - "id": "container.projects.zones.clusters.setMasterAuth", + "completeIpRotation": { "request": { - "$ref": "SetMasterAuthRequest" + "$ref": "CompleteIPRotationRequest" }, - "description": "Used to set master auth materials. Currently supports :-\nChanging the admin password of a specific cluster.\nThis can be either via password generation or explicitly set the password.", + "description": "Completes master IP rotation.", "httpMethod": "POST", "parameterOrder": [ "projectId", @@ -235,10 +205,10 @@ ], "parameters": { "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "location": "path", + "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", "type": "string", - "required": true, - "location": "path" + "required": true }, "zone": { "location": "path", @@ -247,36 +217,36 @@ "required": true }, "clusterId": { - "location": "path", - "description": "The name of the cluster to upgrade.", + "description": "The name of the cluster.", "type": "string", - "required": true + "required": true, + "location": "path" } - } - }, - "logging": { - "request": { - "$ref": "SetLoggingServiceRequest" }, - "description": "Sets the logging service of a specific cluster.", + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:completeIpRotation", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:completeIpRotation", + "id": "container.projects.zones.clusters.completeIpRotation" + }, + "get": { + "description": "Gets the details of a specific cluster.", "response": { - "$ref": "Operation" + "$ref": "Cluster" }, "parameterOrder": [ "projectId", "zone", "clusterId" ], - "httpMethod": "POST", + "httpMethod": "GET", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "parameters": { "projectId": { - "location": "path", "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", "type": "string", - "required": true + "required": true, + "location": "path" }, "zone": { "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", @@ -285,25 +255,33 @@ "location": "path" }, "clusterId": { - "description": "The name of the cluster to upgrade.", + "location": "path", + "description": "The name of the cluster to retrieve.", "type": "string", - "required": true, - "location": "path" + "required": true } }, - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/logging", - "id": "container.projects.zones.clusters.logging", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/logging" + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", + "id": "container.projects.zones.clusters.get", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}" }, - "list": { - "response": { - "$ref": "ListClustersResponse" + "legacyAbac": { + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/legacyAbac", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/legacyAbac", + "id": "container.projects.zones.clusters.legacyAbac", + "request": { + "$ref": "SetLegacyAbacRequest" }, + "description": "Enables or disables the ABAC authorization mechanism on a cluster.", + "httpMethod": "POST", "parameterOrder": [ "projectId", - "zone" + "zone", + "clusterId" ], - "httpMethod": "GET", + "response": { + "$ref": "Operation" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], @@ -315,55 +293,64 @@ "required": true }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides, or \"-\" for all zones.", + "location": "path", + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "type": "string", + "required": true + }, + "clusterId": { + "description": "The name of the cluster to update.", "type": "string", "required": true, "location": "path" } - }, - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters", - "id": "container.projects.zones.clusters.list", - "path": "v1/projects/{projectId}/zones/{zone}/clusters", - "description": "Lists all clusters owned by a project in either the specified zone or all\nzones." + } }, - "create": { - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters", - "path": "v1/projects/{projectId}/zones/{zone}/clusters", - "id": "container.projects.zones.clusters.create", - "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default network](/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe cluster creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range is being used by the cluster.", + "setNetworkPolicy": { + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setNetworkPolicy", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setNetworkPolicy", + "id": "container.projects.zones.clusters.setNetworkPolicy", "request": { - "$ref": "CreateClusterRequest" + "$ref": "SetNetworkPolicyRequest" }, + "description": "Enables/Disables Network Policy for a cluster.", "httpMethod": "POST", "parameterOrder": [ "projectId", - "zone" + "zone", + "clusterId" ], "response": { "$ref": "Operation" }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], "parameters": { "projectId": { - "location": "path", - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", "type": "string", - "required": true + "required": true, + "location": "path" }, "zone": { "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", "type": "string", "required": true, "location": "path" + }, + "clusterId": { + "description": "The name of the cluster.", + "type": "string", + "required": true, + "location": "path" } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] + } }, - "resourceLabels": { - "description": "Sets labels on a cluster.", + "startIpRotation": { + "description": "Start master IP rotation.", "request": { - "$ref": "SetLabelsRequest" + "$ref": "StartIPRotationRequest" }, "httpMethod": "POST", "parameterOrder": [ @@ -382,30 +369,30 @@ "location": "path" }, "zone": { - "location": "path", "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", "type": "string", - "required": true + "required": true, + "location": "path" }, "clusterId": { + "location": "path", "description": "The name of the cluster.", "type": "string", - "required": true, - "location": "path" + "required": true } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/resourceLabels", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/resourceLabels", - "id": "container.projects.zones.clusters.resourceLabels" + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:startIpRotation", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:startIpRotation", + "id": "container.projects.zones.clusters.startIpRotation" }, - "completeIpRotation": { - "request": { - "$ref": "CompleteIPRotationRequest" + "addons": { + "description": "Sets the addons of a specific cluster.", + "request": { + "$ref": "SetAddonsConfigRequest" }, - "description": "Completes master IP rotation.", "response": { "$ref": "Operation" }, @@ -415,47 +402,44 @@ "clusterId" ], "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], "parameters": { "projectId": { - "location": "path", - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", "type": "string", - "required": true + "required": true, + "location": "path" }, "zone": { + "location": "path", "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", "type": "string", - "required": true, - "location": "path" + "required": true }, "clusterId": { - "description": "The name of the cluster.", + "description": "The name of the cluster to upgrade.", "type": "string", "required": true, "location": "path" } }, - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:completeIpRotation", - "id": "container.projects.zones.clusters.completeIpRotation", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:completeIpRotation" + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/addons", + "id": "container.projects.zones.clusters.addons", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/addons" }, - "get": { - "description": "Gets the details of a specific cluster.", + "delete": { + "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster\n(e.g. load balancer resources) will not be deleted if they weren't present\nat the initial create time.", "response": { - "$ref": "Cluster" + "$ref": "Operation" }, "parameterOrder": [ "projectId", "zone", "clusterId" ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], + "httpMethod": "DELETE", "parameters": { "projectId": { "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", @@ -470,17 +454,27 @@ "required": true }, "clusterId": { - "location": "path", - "description": "The name of the cluster to retrieve.", + "description": "The name of the cluster to delete.", "type": "string", - "required": true + "required": true, + "location": "path" } }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", - "id": "container.projects.zones.clusters.get", + "id": "container.projects.zones.clusters.delete", "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}" }, - "legacyAbac": { + "locations": { + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/locations", + "id": "container.projects.zones.clusters.locations", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/locations", + "description": "Sets the locations of a specific cluster.", + "request": { + "$ref": "SetLocationsRequest" + }, "response": { "$ref": "Operation" }, @@ -504,31 +498,21 @@ "location": "path" }, "clusterId": { - "description": "The name of the cluster to update.", + "location": "path", + "description": "The name of the cluster to upgrade.", "type": "string", - "required": true, - "location": "path" + "required": true } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/legacyAbac", - "id": "container.projects.zones.clusters.legacyAbac", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/legacyAbac", - "description": "Enables or disables the ABAC authorization mechanism on a cluster.", - "request": { - "$ref": "SetLegacyAbacRequest" - } + ] }, - "setNetworkPolicy": { - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setNetworkPolicy", - "id": "container.projects.zones.clusters.setNetworkPolicy", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setNetworkPolicy", + "update": { "request": { - "$ref": "SetNetworkPolicyRequest" + "$ref": "UpdateClusterRequest" }, - "description": "Enables/Disables Network Policy for a cluster.", + "description": "Updates the settings of a specific cluster.", "response": { "$ref": "Operation" }, @@ -537,36 +521,39 @@ "zone", "clusterId" ], - "httpMethod": "POST", + "httpMethod": "PUT", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "parameters": { "projectId": { "location": "path", - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", "type": "string", "required": true }, "zone": { + "location": "path", "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", "type": "string", - "required": true, - "location": "path" + "required": true }, "clusterId": { "location": "path", - "description": "The name of the cluster.", + "description": "The name of the cluster to upgrade.", "type": "string", "required": true } - } + }, + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", + "id": "container.projects.zones.clusters.update", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}" }, - "startIpRotation": { + "monitoring": { "request": { - "$ref": "StartIPRotationRequest" + "$ref": "SetMonitoringServiceRequest" }, - "description": "Start master IP rotation.", + "description": "Sets the monitoring service of a specific cluster.", "response": { "$ref": "Operation" }, @@ -581,38 +568,45 @@ ], "parameters": { "projectId": { - "location": "path", - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", "type": "string", - "required": true + "required": true, + "location": "path" }, "zone": { - "location": "path", "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", "type": "string", - "required": true + "required": true, + "location": "path" }, "clusterId": { "location": "path", - "description": "The name of the cluster.", + "description": "The name of the cluster to upgrade.", "type": "string", "required": true } }, - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:startIpRotation", - "id": "container.projects.zones.clusters.startIpRotation", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:startIpRotation" + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/monitoring", + "id": "container.projects.zones.clusters.monitoring", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/monitoring" }, - "addons": { - "response": { - "$ref": "Operation" + "master": { + "request": { + "$ref": "UpdateMasterRequest" }, + "description": "Updates the master of a specific cluster.", + "httpMethod": "POST", "parameterOrder": [ "projectId", "zone", "clusterId" ], - "httpMethod": "POST", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], "parameters": { "projectId": { "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", @@ -633,37 +627,30 @@ "location": "path" } }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/addons", - "id": "container.projects.zones.clusters.addons", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/addons", - "description": "Sets the addons of a specific cluster.", - "request": { - "$ref": "SetAddonsConfigRequest" - } + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/master", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/master", + "id": "container.projects.zones.clusters.master" }, - "delete": { - "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster\n(e.g. load balancer resources) will not be deleted if they weren't present\nat the initial create time.", - "response": { - "$ref": "Operation" + "setMasterAuth": { + "description": "Used to set master auth materials. Currently supports :-\nChanging the admin password of a specific cluster.\nThis can be either via password generation or explicitly set the password.", + "request": { + "$ref": "SetMasterAuthRequest" }, + "httpMethod": "POST", "parameterOrder": [ "projectId", "zone", "clusterId" ], - "httpMethod": "DELETE", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], + "response": { + "$ref": "Operation" + }, "parameters": { "projectId": { - "location": "path", "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", "type": "string", - "required": true + "required": true, + "location": "path" }, "zone": { "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", @@ -672,35 +659,45 @@ "location": "path" }, "clusterId": { - "location": "path", - "description": "The name of the cluster to delete.", + "description": "The name of the cluster to upgrade.", "type": "string", - "required": true + "required": true, + "location": "path" } }, - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}", - "id": "container.projects.zones.clusters.delete", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}" + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMasterAuth", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMasterAuth", + "id": "container.projects.zones.clusters.setMasterAuth" } }, "resources": { "nodePools": { "methods": { - "list": { - "description": "Lists the node pools for a cluster.", - "httpMethod": "GET", + "autoscaling": { + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/autoscaling", + "id": "container.projects.zones.clusters.nodePools.autoscaling", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/autoscaling", + "description": "Sets the autoscaling settings of a specific node pool.", + "request": { + "$ref": "SetNodePoolAutoscalingRequest" + }, + "response": { + "$ref": "Operation" + }, "parameterOrder": [ "projectId", "zone", - "clusterId" + "clusterId", + "nodePoolId" ], - "response": { - "$ref": "ListNodePoolsResponse" - }, + "httpMethod": "POST", "parameters": { "projectId": { "location": "path", - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", "type": "string", "required": true }, @@ -711,40 +708,46 @@ "required": true }, "clusterId": { - "location": "path", - "description": "The name of the cluster.", + "description": "The name of the cluster to upgrade.", "type": "string", - "required": true + "required": true, + "location": "path" + }, + "nodePoolId": { + "description": "The name of the node pool to upgrade.", + "type": "string", + "required": true, + "location": "path" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools", - "id": "container.projects.zones.clusters.nodePools.list" + ] }, - "rollback": { - "description": "Roll back the previously Aborted or Failed NodePool upgrade.\nThis will be an no-op if the last upgrade successfully completed.", - "request": { - "$ref": "RollbackNodePoolUpgradeRequest" + "get": { + "description": "Retrieves the node pool requested.", + "httpMethod": "GET", + "response": { + "$ref": "NodePool" }, - "httpMethod": "POST", "parameterOrder": [ "projectId", "zone", "clusterId", "nodePoolId" ], - "response": { - "$ref": "Operation" - }, "parameters": { + "nodePoolId": { + "location": "path", + "description": "The name of the node pool.", + "type": "string", + "required": true + }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "location": "path", + "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", "type": "string", - "required": true, - "location": "path" + "required": true }, "zone": { "location": "path", @@ -753,13 +756,7 @@ "required": true }, "clusterId": { - "description": "The name of the cluster to rollback.", - "type": "string", - "required": true, - "location": "path" - }, - "nodePoolId": { - "description": "The name of the node pool to rollback.", + "description": "The name of the cluster.", "type": "string", "required": true, "location": "path" @@ -768,14 +765,14 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}:rollback", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}:rollback", - "id": "container.projects.zones.clusters.nodePools.rollback" + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}", + "id": "container.projects.zones.clusters.nodePools.get" }, - "create": { - "description": "Creates a node pool for a cluster.", + "update": { + "description": "Updates the version and/or image type of a specific node pool.", "request": { - "$ref": "CreateNodePoolRequest" + "$ref": "UpdateNodePoolRequest" }, "response": { "$ref": "Operation" @@ -783,41 +780,45 @@ "parameterOrder": [ "projectId", "zone", - "clusterId" + "clusterId", + "nodePoolId" ], "httpMethod": "POST", "parameters": { "projectId": { "location": "path", - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", "type": "string", "required": true }, "zone": { - "location": "path", "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", "type": "string", - "required": true + "required": true, + "location": "path" }, "clusterId": { "location": "path", - "description": "The name of the cluster.", + "description": "The name of the cluster to upgrade.", "type": "string", "required": true + }, + "nodePoolId": { + "description": "The name of the node pool to upgrade.", + "type": "string", + "required": true, + "location": "path" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools", - "id": "container.projects.zones.clusters.nodePools.create", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools" + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/update", + "id": "container.projects.zones.clusters.nodePools.update", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/update" }, - "autoscaling": { - "request": { - "$ref": "SetNodePoolAutoscalingRequest" - }, - "description": "Sets the autoscaling settings of a specific node pool.", + "delete": { + "description": "Deletes a node pool from a cluster.", "response": { "$ref": "Operation" }, @@ -827,57 +828,67 @@ "clusterId", "nodePoolId" ], - "httpMethod": "POST", + "httpMethod": "DELETE", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "parameters": { - "nodePoolId": { - "description": "The name of the node pool to upgrade.", - "type": "string", - "required": true, - "location": "path" - }, "projectId": { "location": "path", - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", "type": "string", "required": true }, "zone": { - "location": "path", "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", "type": "string", - "required": true + "required": true, + "location": "path" }, "clusterId": { - "description": "The name of the cluster to upgrade.", + "location": "path", + "description": "The name of the cluster.", "type": "string", - "required": true, - "location": "path" + "required": true + }, + "nodePoolId": { + "location": "path", + "description": "The name of the node pool to delete.", + "type": "string", + "required": true } }, - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/autoscaling", - "id": "container.projects.zones.clusters.nodePools.autoscaling", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/autoscaling" + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}", + "id": "container.projects.zones.clusters.nodePools.delete", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}" }, - "get": { - "response": { - "$ref": "NodePool" + "setSize": { + "request": { + "$ref": "SetNodePoolSizeRequest" }, + "description": "Sets the size of a specific node pool.", + "httpMethod": "POST", "parameterOrder": [ "projectId", "zone", "clusterId", "nodePoolId" ], - "httpMethod": "GET", + "response": { + "$ref": "Operation" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "parameters": { + "nodePoolId": { + "location": "path", + "description": "The name of the node pool to update.", + "type": "string", + "required": true + }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", "type": "string", "required": true, "location": "path" @@ -889,34 +900,34 @@ "location": "path" }, "clusterId": { - "description": "The name of the cluster.", + "description": "The name of the cluster to update.", "type": "string", "required": true, "location": "path" - }, - "nodePoolId": { - "location": "path", - "description": "The name of the node pool.", - "type": "string", - "required": true } }, - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}", - "id": "container.projects.zones.clusters.nodePools.get", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}", - "description": "Retrieves the node pool requested." + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setSize", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setSize", + "id": "container.projects.zones.clusters.nodePools.setSize" }, - "update": { - "response": { - "$ref": "Operation" + "setManagement": { + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setManagement", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setManagement", + "id": "container.projects.zones.clusters.nodePools.setManagement", + "request": { + "$ref": "SetNodePoolManagementRequest" }, + "description": "Sets the NodeManagement options for a node pool.", + "httpMethod": "POST", "parameterOrder": [ "projectId", "zone", "clusterId", "nodePoolId" ], - "httpMethod": "POST", + "response": { + "$ref": "Operation" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], @@ -928,64 +939,47 @@ "location": "path" }, "zone": { - "location": "path", "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", "type": "string", - "required": true + "required": true, + "location": "path" }, "clusterId": { - "location": "path", - "description": "The name of the cluster to upgrade.", + "description": "The name of the cluster to update.", "type": "string", - "required": true + "required": true, + "location": "path" }, "nodePoolId": { "location": "path", - "description": "The name of the node pool to upgrade.", + "description": "The name of the node pool to update.", "type": "string", "required": true } - }, - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/update", - "id": "container.projects.zones.clusters.nodePools.update", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/update", - "request": { - "$ref": "UpdateNodePoolRequest" - }, - "description": "Updates the version and/or image type of a specific node pool." + } }, - "delete": { - "response": { - "$ref": "Operation" - }, + "list": { + "httpMethod": "GET", "parameterOrder": [ "projectId", "zone", - "clusterId", - "nodePoolId" - ], - "httpMethod": "DELETE", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" + "clusterId" ], + "response": { + "$ref": "ListNodePoolsResponse" + }, "parameters": { - "nodePoolId": { - "description": "The name of the node pool to delete.", - "type": "string", - "required": true, - "location": "path" - }, "projectId": { - "location": "path", "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", "type": "string", - "required": true + "required": true, + "location": "path" }, "zone": { - "location": "path", "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", "type": "string", - "required": true + "required": true, + "location": "path" }, "clusterId": { "location": "path", @@ -994,90 +988,89 @@ "required": true } }, - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}", - "id": "container.projects.zones.clusters.nodePools.delete", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}", - "description": "Deletes a node pool from a cluster." + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools", + "id": "container.projects.zones.clusters.nodePools.list", + "description": "Lists the node pools for a cluster." }, - "setManagement": { - "description": "Sets the NodeManagement options for a node pool.", + "rollback": { + "description": "Roll back the previously Aborted or Failed NodePool upgrade.\nThis will be an no-op if the last upgrade successfully completed.", "request": { - "$ref": "SetNodePoolManagementRequest" - }, - "response": { - "$ref": "Operation" + "$ref": "RollbackNodePoolUpgradeRequest" }, + "httpMethod": "POST", "parameterOrder": [ "projectId", "zone", "clusterId", "nodePoolId" ], - "httpMethod": "POST", + "response": { + "$ref": "Operation" + }, "parameters": { + "nodePoolId": { + "description": "The name of the node pool to rollback.", + "type": "string", + "required": true, + "location": "path" + }, "projectId": { - "location": "path", "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", "type": "string", - "required": true + "required": true, + "location": "path" }, "zone": { + "location": "path", "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", "type": "string", - "required": true, - "location": "path" + "required": true }, "clusterId": { - "description": "The name of the cluster to update.", - "type": "string", - "required": true, - "location": "path" - }, - "nodePoolId": { - "description": "The name of the node pool to update.", + "location": "path", + "description": "The name of the cluster to rollback.", "type": "string", - "required": true, - "location": "path" + "required": true } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setManagement", - "id": "container.projects.zones.clusters.nodePools.setManagement", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setManagement" + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}:rollback", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}:rollback", + "id": "container.projects.zones.clusters.nodePools.rollback" }, - "setSize": { + "create": { + "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools", + "id": "container.projects.zones.clusters.nodePools.create", + "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools", "request": { - "$ref": "SetNodePoolSizeRequest" + "$ref": "CreateNodePoolRequest" + }, + "description": "Creates a node pool for a cluster.", + "response": { + "$ref": "Operation" }, - "description": "Sets the size of a specific node pool.", - "httpMethod": "POST", "parameterOrder": [ "projectId", "zone", - "clusterId", - "nodePoolId" + "clusterId" ], - "response": { - "$ref": "Operation" - }, + "httpMethod": "POST", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "parameters": { - "nodePoolId": { + "projectId": { "location": "path", - "description": "The name of the node pool to update.", + "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", "type": "string", "required": true }, - "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", - "type": "string", - "required": true, - "location": "path" - }, "zone": { "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", "type": "string", @@ -1085,15 +1078,12 @@ "location": "path" }, "clusterId": { - "location": "path", - "description": "The name of the cluster to update.", + "description": "The name of the cluster.", "type": "string", - "required": true + "required": true, + "location": "path" } - }, - "flatPath": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setSize", - "path": "v1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setSize", - "id": "container.projects.zones.clusters.nodePools.setSize" + } } } } @@ -1101,20 +1091,16 @@ }, "operations": { "methods": { - "cancel": { - "description": "Cancels the specified operation.", - "request": { - "$ref": "CancelOperationRequest" - }, + "get": { "response": { - "$ref": "Empty" + "$ref": "Operation" }, "parameterOrder": [ "projectId", "zone", "operationId" ], - "httpMethod": "POST", + "httpMethod": "GET", "parameters": { "operationId": { "description": "The server-assigned `name` of the operation.", @@ -1129,37 +1115,30 @@ "required": true }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the operation resides.", - "type": "string", - "required": true, - "location": "path" + "location": "path", + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "type": "string", + "required": true } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}:cancel", - "id": "container.projects.zones.operations.cancel", - "path": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}:cancel" + "flatPath": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}", + "id": "container.projects.zones.operations.get", + "path": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}", + "description": "Gets the specified operation." }, - "get": { - "description": "Gets the specified operation.", - "response": { - "$ref": "Operation" - }, + "list": { + "httpMethod": "GET", "parameterOrder": [ "projectId", - "zone", - "operationId" + "zone" ], - "httpMethod": "GET", + "response": { + "$ref": "ListOperationsResponse" + }, "parameters": { - "operationId": { - "location": "path", - "description": "The server-assigned `name` of the operation.", - "type": "string", - "required": true - }, "projectId": { "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", "type": "string", @@ -1167,7 +1146,7 @@ "location": "path" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available)\nto return operations for, or `-` for all zones.", "type": "string", "required": true, "location": "path" @@ -1176,40 +1155,51 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "flatPath": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}", - "id": "container.projects.zones.operations.get", - "path": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}" + "flatPath": "v1/projects/{projectId}/zones/{zone}/operations", + "path": "v1/projects/{projectId}/zones/{zone}/operations", + "id": "container.projects.zones.operations.list", + "description": "Lists all operations in a project in a specific zone or all zones." }, - "list": { - "httpMethod": "GET", + "cancel": { + "request": { + "$ref": "CancelOperationRequest" + }, + "description": "Cancels the specified operation.", + "response": { + "$ref": "Empty" + }, "parameterOrder": [ "projectId", - "zone" + "zone", + "operationId" ], - "response": { - "$ref": "ListOperationsResponse" - }, + "httpMethod": "POST", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "parameters": { + "operationId": { + "location": "path", + "description": "The server-assigned `name` of the operation.", + "type": "string", + "required": true + }, "projectId": { + "location": "path", "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", "type": "string", - "required": true, - "location": "path" + "required": true }, "zone": { - "location": "path", - "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available)\nto return operations for, or `-` for all zones.", + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the operation resides.", "type": "string", - "required": true + "required": true, + "location": "path" } }, - "flatPath": "v1/projects/{projectId}/zones/{zone}/operations", - "path": "v1/projects/{projectId}/zones/{zone}/operations", - "id": "container.projects.zones.operations.list", - "description": "Lists all operations in a project in a specific zone or all zones." + "flatPath": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}:cancel", + "id": "container.projects.zones.operations.cancel", + "path": "v1/projects/{projectId}/zones/{zone}/operations/{operationId}:cancel" } } } @@ -1225,16 +1215,16 @@ "type": "boolean", "location": "query" }, - "oauth_token": { - "location": "query", - "description": "OAuth 2.0 token for the current user.", - "type": "string" - }, "bearer_token": { "description": "OAuth bearer token.", "type": "string", "location": "query" }, + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" + }, "upload_protocol": { "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", "type": "string", @@ -1246,20 +1236,15 @@ "type": "boolean", "location": "query" }, - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string", - "location": "query" - }, "fields": { "description": "Selector specifying which fields to include in a partial response.", "type": "string", "location": "query" }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" }, "$.xgafv": { "enumDescriptions": [ @@ -1274,7 +1259,18 @@ "description": "V1 error format.", "type": "string" }, + "callback": { + "description": "JSONP", + "type": "string", + "location": "query" + }, "alt": { + "enum": [ + "json", + "media", + "proto" + ], + "type": "string", "enumDescriptions": [ "Responses with Content-Type of application/json", "Media download with context-dependent Content-Type", @@ -1282,12 +1278,11 @@ ], "location": "query", "description": "Data format for response.", - "default": "json", - "enum": [ - "json", - "media", - "proto" - ], + "default": "json" + }, + "key": { + "location": "query", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", "type": "string" }, "access_token": { @@ -1295,11 +1290,6 @@ "description": "OAuth access token.", "type": "string" }, - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, "quotaUser": { "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", "type": "string", @@ -1307,6 +1297,150 @@ } }, "schemas": { + "CancelOperationRequest": { + "description": "CancelOperationRequest cancels a single operation.", + "type": "object", + "properties": {}, + "id": "CancelOperationRequest" + }, + "KubernetesDashboard": { + "description": "Configuration for the Kubernetes Dashboard.", + "type": "object", + "properties": { + "disabled": { + "description": "Whether the Kubernetes Dashboard is enabled for this cluster.", + "type": "boolean" + } + }, + "id": "KubernetesDashboard" + }, + "SetLegacyAbacRequest": { + "description": "SetLegacyAbacRequest enables or disables the ABAC authorization mechanism for\na cluster.", + "type": "object", + "properties": { + "enabled": { + "description": "Whether ABAC authorization will be enabled in the cluster.", + "type": "boolean" + } + }, + "id": "SetLegacyAbacRequest" + }, + "Operation": { + "description": "This operation resource represents operations that may have happened or are\nhappening on the cluster. All fields are output only.", + "type": "object", + "properties": { + "selfLink": { + "description": "Server-defined URL for the resource.", + "type": "string" + }, + "endTime": { + "description": "[Output only] The time the operation completed, in\n[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "type": "string" + }, + "targetLink": { + "description": "Server-defined URL for the target of the operation.", + "type": "string" + }, + "detail": { + "description": "Detailed operation progress, if available.", + "type": "string" + }, + "operationType": { + "description": "The operation type.", + "type": "string", + "enumDescriptions": [ + "Not set.", + "Cluster create.", + "Cluster delete.", + "A master upgrade.", + "A node upgrade.", + "Cluster repair.", + "Cluster update.", + "Node pool create.", + "Node pool delete.", + "Set node pool management.", + "Automatic node pool repair.", + "Automatic node upgrade.", + "Set labels.", + "Set/generate master auth materials", + "Set node pool size.", + "Updates network policy for a cluster." + ], + "enum": [ + "TYPE_UNSPECIFIED", + "CREATE_CLUSTER", + "DELETE_CLUSTER", + "UPGRADE_MASTER", + "UPGRADE_NODES", + "REPAIR_CLUSTER", + "UPDATE_CLUSTER", + "CREATE_NODE_POOL", + "DELETE_NODE_POOL", + "SET_NODE_POOL_MANAGEMENT", + "AUTO_REPAIR_NODES", + "AUTO_UPGRADE_NODES", + "SET_LABELS", + "SET_MASTER_AUTH", + "SET_NODE_POOL_SIZE", + "SET_NETWORK_POLICY" + ] + }, + "startTime": { + "description": "[Output only] The time the operation started, in\n[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "type": "string" + }, + "zone": { + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the operation\nis taking place.", + "type": "string" + }, + "status": { + "enumDescriptions": [ + "Not set.", + "The operation has been created.", + "The operation is currently running.", + "The operation is done, either cancelled or completed.", + "The operation is aborting." + ], + "enum": [ + "STATUS_UNSPECIFIED", + "PENDING", + "RUNNING", + "DONE", + "ABORTING" + ], + "description": "The current status of the operation.", + "type": "string" + }, + "statusMessage": { + "description": "If an error has occurred, a textual description of the error.", + "type": "string" + }, + "name": { + "description": "The server-assigned ID for the operation.", + "type": "string" + } + }, + "id": "Operation" + }, + "AddonsConfig": { + "description": "Configuration for the addons that can be automatically spun up in the\ncluster, enabling additional functionality.", + "type": "object", + "properties": { + "kubernetesDashboard": { + "description": "Configuration for the Kubernetes Dashboard.", + "$ref": "KubernetesDashboard" + }, + "horizontalPodAutoscaling": { + "description": "Configuration for the horizontal pod autoscaling feature, which\nincreases or decreases the number of replica pods a replication controller\nhas based on the resource usage of the existing pods.", + "$ref": "HorizontalPodAutoscaling" + }, + "httpLoadBalancing": { + "$ref": "HttpLoadBalancing", + "description": "Configuration for the HTTP (L7) load balancing controller addon, which\nmakes it easy to set up HTTP load balancers for services in a cluster." + } + }, + "id": "AddonsConfig" + }, "SetLocationsRequest": { "description": "SetLocationsRequest sets the locations of the cluster.", "type": "object", @@ -1359,8 +1493,6 @@ "type": "boolean" }, "provider": { - "description": "The selected network policy provider.", - "type": "string", "enumDescriptions": [ "Not set", "Tigera (Calico Felix)." @@ -1368,7 +1500,9 @@ "enum": [ "PROVIDER_UNSPECIFIED", "CALICO" - ] + ], + "description": "The selected network policy provider.", + "type": "string" } }, "id": "NetworkPolicy" @@ -1380,14 +1514,64 @@ "masterVersion": { "description": "The Kubernetes version to change the master to. The only valid value is the\nlatest supported version. Use \"-\" to have the server automatically select\nthe latest version.", "type": "string" - } - }, - "id": "UpdateMasterRequest" - }, - "Cluster": { - "description": "A Google Container Engine cluster.", - "type": "object", - "properties": { + } + }, + "id": "UpdateMasterRequest" + }, + "Cluster": { + "description": "A Google Container Engine cluster.", + "type": "object", + "properties": { + "createTime": { + "description": "[Output only] The time the cluster was created, in\n[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "type": "string" + }, + "clusterIpv4Cidr": { + "description": "The IP address range of the container pods in this cluster, in\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`). Leave blank to have\none automatically chosen or specify a `/14` block in `10.0.0.0/8`.", + "type": "string" + }, + "initialNodeCount": { + "format": "int32", + "description": "The number of nodes to create in this cluster. You must ensure that your\nCompute Engine \u003ca href=\"/compute/docs/resource-quotas\"\u003eresource quota\u003c/a\u003e\nis sufficient for this number of instances. You must also have available\nfirewall and routes quota.\nFor requests, this field should only be used in lieu of a\n\"node_pool\" object, since this configuration (along with the\n\"node_config\") will be used to create a \"NodePool\" object with an\nauto-generated name. Do not use this and a node_pool at the same time.", + "type": "integer" + }, + "selfLink": { + "description": "[Output only] Server-defined URL for the resource.", + "type": "string" + }, + "nodePools": { + "description": "The node pools associated with this cluster.\nThis field should not be set if \"node_config\" or \"initial_node_count\" are\nspecified.", + "items": { + "$ref": "NodePool" + }, + "type": "array" + }, + "locations": { + "description": "The list of Google Compute Engine\n[locations](/compute/docs/zones#available) in which the cluster's nodes\nshould be located.", + "items": { + "type": "string" + }, + "type": "array" + }, + "instanceGroupUrls": { + "description": "[Output only] The resource URLs of [instance\ngroups](/compute/docs/instance-groups/) associated with this\ncluster.", + "items": { + "type": "string" + }, + "type": "array" + }, + "networkPolicy": { + "description": "Configuration options for the NetworkPolicy feature.", + "$ref": "NetworkPolicy" + }, + "servicesIpv4Cidr": { + "description": "[Output only] The IP address range of the Kubernetes services in\nthis cluster, in\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `1.2.3.4/29`). Service addresses are\ntypically put in the last `/16` from the container CIDR.", + "type": "string" + }, + "enableKubernetesAlpha": { + "description": "Kubernetes alpha features are enabled on this cluster. This includes alpha\nAPI groups (e.g. v1alpha1) and features that may not be production ready in\nthe kubernetes version of the master and nodes.\nThe cluster has no SLA for uptime and master/node upgrades are disabled.\nAlpha enabled clusters are automatically deleted thirty days after\ncreation.", + "type": "boolean" + }, "description": { "description": "An optional description of this cluster.", "type": "string" @@ -1413,8 +1597,8 @@ "description": "[Output only] The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", "type": "string" }, - "loggingService": { - "description": "The logging service the cluster should use to write logs.\nCurrently available options:\n\n* `logging.googleapis.com` - the Google Cloud Logging service.\n* `none` - no logs will be exported from the cluster.\n* if left as an empty string,`logging.googleapis.com` will be used.", + "expireTime": { + "description": "[Output only] The time the cluster will be automatically\ndeleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", "type": "string" }, "nodeIpv4CidrSize": { @@ -1422,21 +1606,21 @@ "description": "[Output only] The size of the address space on each node for hosting\ncontainers. This is provisioned from within the `container_ipv4_cidr`\nrange.", "type": "integer" }, - "expireTime": { - "description": "[Output only] The time the cluster will be automatically\ndeleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", + "loggingService": { + "description": "The logging service the cluster should use to write logs.\nCurrently available options:\n\n* `logging.googleapis.com` - the Google Cloud Logging service.\n* `none` - no logs will be exported from the cluster.\n* if left as an empty string,`logging.googleapis.com` will be used.", "type": "string" }, "masterAuthorizedNetworksConfig": { - "$ref": "MasterAuthorizedNetworksConfig", - "description": "Master authorized networks is a Beta feature.\nThe configuration options for master authorized networks feature." + "description": "Master authorized networks is a Beta feature.\nThe configuration options for master authorized networks feature.", + "$ref": "MasterAuthorizedNetworksConfig" }, "statusMessage": { "description": "[Output only] Additional information about the current status of this\ncluster, if available.", "type": "string" }, "masterAuth": { - "$ref": "MasterAuth", - "description": "The authentication information for accessing the master endpoint." + "description": "The authentication information for accessing the master endpoint.", + "$ref": "MasterAuth" }, "currentMasterVersion": { "description": "[Output only] The current software version of the master endpoint.", @@ -1478,6 +1662,10 @@ "description": "[Output only] The current version of the node software components.\nIf they are currently at multiple versions because they're in the process\nof being upgraded, this reflects the minimum version of all nodes.", "type": "string" }, + "name": { + "description": "The name of this cluster. The name must be unique within this project\nand zone, and can be up to 40 characters with the following restrictions:\n\n* Lowercase letters, numbers, and hyphens only.\n* Must start with a letter.\n* Must end with a number or a letter.", + "type": "string" + }, "resourceLabels": { "description": "The resource labels for the cluster to use to annotate any related\nGoogle Compute Engine resources.", "type": "object", @@ -1485,75 +1673,21 @@ "type": "string" } }, - "name": { - "description": "The name of this cluster. The name must be unique within this project\nand zone, and can be up to 40 characters with the following restrictions:\n\n* Lowercase letters, numbers, and hyphens only.\n* Must start with a letter.\n* Must end with a number or a letter.", - "type": "string" - }, "initialClusterVersion": { "description": "The initial Kubernetes version for this cluster. Valid versions are those\nfound in validMasterVersions returned by getServerConfig. The version can\nbe upgraded over time; such upgrades are reflected in\ncurrentMasterVersion and currentNodeVersion.", "type": "string" }, "ipAllocationPolicy": { - "$ref": "IPAllocationPolicy", - "description": "Configuration for cluster IP allocation." - }, - "endpoint": { - "description": "[Output only] The IP address of this cluster's master endpoint.\nThe endpoint can be accessed from the internet at\n`https://username:password@endpoint/`.\n\nSee the `masterAuth` property of this resource for username and\npassword information.", - "type": "string" + "description": "Configuration for cluster IP allocation.", + "$ref": "IPAllocationPolicy" }, "legacyAbac": { - "description": "Configuration for the legacy ABAC authorization mode.", - "$ref": "LegacyAbac" - }, - "createTime": { - "description": "[Output only] The time the cluster was created, in\n[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", - "type": "string" - }, - "clusterIpv4Cidr": { - "description": "The IP address range of the container pods in this cluster, in\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`). Leave blank to have\none automatically chosen or specify a `/14` block in `10.0.0.0/8`.", - "type": "string" - }, - "initialNodeCount": { - "format": "int32", - "description": "The number of nodes to create in this cluster. You must ensure that your\nCompute Engine \u003ca href=\"/compute/docs/resource-quotas\"\u003eresource quota\u003c/a\u003e\nis sufficient for this number of instances. You must also have available\nfirewall and routes quota.\nFor requests, this field should only be used in lieu of a\n\"node_pool\" object, since this configuration (along with the\n\"node_config\") will be used to create a \"NodePool\" object with an\nauto-generated name. Do not use this and a node_pool at the same time.", - "type": "integer" - }, - "selfLink": { - "description": "[Output only] Server-defined URL for the resource.", - "type": "string" - }, - "locations": { - "description": "The list of Google Compute Engine\n[locations](/compute/docs/zones#available) in which the cluster's nodes\nshould be located.", - "items": { - "type": "string" - }, - "type": "array" - }, - "nodePools": { - "description": "The node pools associated with this cluster.\nThis field should not be set if \"node_config\" or \"initial_node_count\" are\nspecified.", - "items": { - "$ref": "NodePool" - }, - "type": "array" - }, - "instanceGroupUrls": { - "description": "[Output only] The resource URLs of [instance\ngroups](/compute/docs/instance-groups/) associated with this\ncluster.", - "items": { - "type": "string" - }, - "type": "array" + "$ref": "LegacyAbac", + "description": "Configuration for the legacy ABAC authorization mode." }, - "servicesIpv4Cidr": { - "description": "[Output only] The IP address range of the Kubernetes services in\nthis cluster, in\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `1.2.3.4/29`). Service addresses are\ntypically put in the last `/16` from the container CIDR.", + "endpoint": { + "description": "[Output only] The IP address of this cluster's master endpoint.\nThe endpoint can be accessed from the internet at\n`https://username:password@endpoint/`.\n\nSee the `masterAuth` property of this resource for username and\npassword information.", "type": "string" - }, - "networkPolicy": { - "$ref": "NetworkPolicy", - "description": "Configuration options for the NetworkPolicy feature." - }, - "enableKubernetesAlpha": { - "description": "Kubernetes alpha features are enabled on this cluster. This includes alpha\nAPI groups (e.g. v1alpha1) and features that may not be production ready in\nthe kubernetes version of the master and nodes.\nThe cluster has no SLA for uptime and master/node upgrades are disabled.\nAlpha enabled clusters are automatically deleted thirty days after\ncreation.", - "type": "boolean" } }, "id": "Cluster" @@ -1652,41 +1786,44 @@ }, "id": "ServerConfig" }, - "MasterAuth": { - "description": "The authentication information for accessing the master endpoint.\nAuthentication can be done using HTTP basic auth or using client\ncertificates.", + "NodeConfig": { + "description": "Parameters that describe the nodes in a cluster.", "type": "object", "properties": { - "clientKey": { - "description": "[Output only] Base64-encoded private key used by clients to authenticate\nto the cluster endpoint.", - "type": "string" + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "The metadata key/value pairs assigned to instances in the cluster.\n\nKeys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes\nin length. These are reflected as part of a URL in the metadata server.\nAdditionally, to avoid ambiguity, keys must not conflict with any other\nmetadata keys for the project or be one of the four reserved keys:\n\"instance-template\", \"kube-env\", \"startup-script\", and \"user-data\"\n\nValues are free-form strings, and only have meaning as interpreted by\nthe image running in the instance. The only restriction placed on them is\nthat each value's size must be less than or equal to 32 KB.\n\nThe total size of all keys and values must be less than 512 KB.", + "type": "object" }, - "clusterCaCertificate": { - "description": "[Output only] Base64-encoded public certificate that is the root of\ntrust for the cluster.", - "type": "string" + "diskSizeGb": { + "format": "int32", + "description": "Size of the disk attached to each node, specified in GB.\nThe smallest allowed disk size is 10GB.\n\nIf unspecified, the default disk size is 100GB.", + "type": "integer" }, - "clientCertificate": { - "description": "[Output only] Base64-encoded public certificate used by clients to\nauthenticate to the cluster endpoint.", - "type": "string" + "tags": { + "description": "The list of instance tags applied to all nodes. Tags are used to identify\nvalid sources or targets for network firewalls and are specified by\nthe client during cluster or node pool creation. Each tag within the list\nmust comply with RFC1035.", + "items": { + "type": "string" + }, + "type": "array" }, - "username": { - "description": "The username to use for HTTP basic authentication to the master endpoint.\nFor clusters v1.6.0 and later, you can disable basic authentication by\nproviding an empty username.", + "serviceAccount": { + "description": "The Google Cloud Platform Service Account to be used by the node VMs. If\nno Service Account is specified, the \"default\" service account is used.", "type": "string" }, - "password": { - "description": "The password to use for HTTP basic authentication to the master endpoint.\nBecause the master endpoint is open to the Internet, you should create a\nstrong password. If a password is provided for cluster creation, username\nmust be non-empty.", + "accelerators": { + "description": "A list of hardware accelerators to be attached to each node.\nSee https://cloud.google.com/compute/docs/gpus for more information about\nsupport for GPUs.", + "items": { + "$ref": "AcceleratorConfig" + }, + "type": "array" + }, + "machineType": { + "description": "The name of a Google Compute Engine [machine\ntype](/compute/docs/machine-types) (e.g.\n`n1-standard-1`).\n\nIf unspecified, the default machine type is\n`n1-standard-1`.", "type": "string" }, - "clientCertificateConfig": { - "description": "Configuration for client certificate authentication on the cluster. If no\nconfiguration is specified, a client certificate is issued.", - "$ref": "ClientCertificateConfig" - } - }, - "id": "MasterAuth" - }, - "NodeConfig": { - "description": "Parameters that describe the nodes in a cluster.", - "type": "object", - "properties": { "imageType": { "description": "The image type to use for this node. Note that for a given image type,\nthe latest version of it will be used.", "type": "string" @@ -1703,7 +1840,7 @@ "type": "boolean" }, "labels": { - "description": "The map of Kubernetes labels (key/value pairs) to be applied to each node.\nThese will added in addition to any default label(s) that\nKubernetes may apply to the node.\nIn case of conflict in label keys, the applied set may differ depending on\nthe Kubernetes version -- it's best to assume the behavior is undefined\nand conflicts should be avoided.\nFor more information, including usage and the valid values, see:\nhttp://kubernetes.io/v1.1/docs/user-guide/labels.html", + "description": "The map of Kubernetes labels (key/value pairs) to be applied to each node.\nThese will added in addition to any default label(s) that\nKubernetes may apply to the node.\nIn case of conflict in label keys, the applied set may differ depending on\nthe Kubernetes version -- it's best to assume the behavior is undefined\nand conflicts should be avoided.\nFor more information, including usage and the valid values, see:\nhttps://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", "type": "object", "additionalProperties": { "type": "string" @@ -1713,43 +1850,40 @@ "format": "int32", "description": "The number of local SSD disks to be attached to the node.\n\nThe limit for this value is dependant upon the maximum number of\ndisks available on a machine per zone. See:\nhttps://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits\nfor more information.", "type": "integer" + } + }, + "id": "NodeConfig" + }, + "MasterAuth": { + "description": "The authentication information for accessing the master endpoint.\nAuthentication can be done using HTTP basic auth or using client\ncertificates.", + "type": "object", + "properties": { + "clientCertificateConfig": { + "$ref": "ClientCertificateConfig", + "description": "Configuration for client certificate authentication on the cluster. If no\nconfiguration is specified, a client certificate is issued." }, - "metadata": { - "description": "The metadata key/value pairs assigned to instances in the cluster.\n\nKeys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes\nin length. These are reflected as part of a URL in the metadata server.\nAdditionally, to avoid ambiguity, keys must not conflict with any other\nmetadata keys for the project or be one of the four reserved keys:\n\"instance-template\", \"kube-env\", \"startup-script\", and \"user-data\"\n\nValues are free-form strings, and only have meaning as interpreted by\nthe image running in the instance. The only restriction placed on them is\nthat each value's size must be less than or equal to 32 KB.\n\nThe total size of all keys and values must be less than 512 KB.", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "diskSizeGb": { - "format": "int32", - "description": "Size of the disk attached to each node, specified in GB.\nThe smallest allowed disk size is 10GB.\n\nIf unspecified, the default disk size is 100GB.", - "type": "integer" + "password": { + "description": "The password to use for HTTP basic authentication to the master endpoint.\nBecause the master endpoint is open to the Internet, you should create a\nstrong password. If a password is provided for cluster creation, username\nmust be non-empty.", + "type": "string" }, - "tags": { - "description": "The list of instance tags applied to all nodes. Tags are used to identify\nvalid sources or targets for network firewalls and are specified by\nthe client during cluster or node pool creation. Each tag within the list\nmust comply with RFC1035.", - "items": { - "type": "string" - }, - "type": "array" + "clientKey": { + "description": "[Output only] Base64-encoded private key used by clients to authenticate\nto the cluster endpoint.", + "type": "string" }, - "serviceAccount": { - "description": "The Google Cloud Platform Service Account to be used by the node VMs. If\nno Service Account is specified, the \"default\" service account is used.", + "clusterCaCertificate": { + "description": "[Output only] Base64-encoded public certificate that is the root of\ntrust for the cluster.", "type": "string" }, - "accelerators": { - "description": "A list of hardware accelerators to be attached to each node.\nSee https://cloud.google.com/compute/docs/gpus for more information about\nsupport for GPUs.", - "items": { - "$ref": "AcceleratorConfig" - }, - "type": "array" + "clientCertificate": { + "description": "[Output only] Base64-encoded public certificate used by clients to\nauthenticate to the cluster endpoint.", + "type": "string" }, - "machineType": { - "description": "The name of a Google Compute Engine [machine\ntype](/compute/docs/machine-types) (e.g.\n`n1-standard-1`).\n\nIf unspecified, the default machine type is\n`n1-standard-1`.", + "username": { + "description": "The username to use for HTTP basic authentication to the master endpoint.\nFor clusters v1.6.0 and later, you can disable basic authentication by\nproviding an empty username.", "type": "string" } }, - "id": "NodeConfig" + "id": "MasterAuth" }, "AutoUpgradeOptions": { "description": "AutoUpgradeOptions defines the set of options for the user to control how\nthe Auto Upgrades will proceed.", @@ -1803,8 +1937,8 @@ "type": "object", "properties": { "networkPolicy": { - "description": "Configuration options for the NetworkPolicy feature.", - "$ref": "NetworkPolicy" + "$ref": "NetworkPolicy", + "description": "Configuration options for the NetworkPolicy feature." } }, "id": "SetNetworkPolicyRequest" @@ -1814,8 +1948,8 @@ "type": "object", "properties": { "update": { - "$ref": "MasterAuth", - "description": "A description of the update." + "description": "A description of the update.", + "$ref": "MasterAuth" }, "action": { "description": "The exact form of action to be taken on the master auth", @@ -1834,17 +1968,6 @@ }, "id": "SetMasterAuthRequest" }, - "ClientCertificateConfig": { - "description": "Configuration for client certificates on the cluster.", - "type": "object", - "properties": { - "issueClientCertificate": { - "description": "Issue a client certificate.", - "type": "boolean" - } - }, - "id": "ClientCertificateConfig" - }, "NodePoolAutoscaling": { "description": "NodePoolAutoscaling contains information required by cluster autoscaler to\nadjust the size of the node pool to the current cluster usage.", "type": "object", @@ -1866,12 +1989,35 @@ }, "id": "NodePoolAutoscaling" }, + "ClientCertificateConfig": { + "description": "Configuration for client certificates on the cluster.", + "type": "object", + "properties": { + "issueClientCertificate": { + "description": "Issue a client certificate.", + "type": "boolean" + } + }, + "id": "ClientCertificateConfig" + }, "IPAllocationPolicy": { "description": "Configuration for controlling how IPs are allocated in the cluster.", "type": "object", "properties": { + "clusterIpv4CidrBlock": { + "description": "The IP address range for the cluster pod IPs. If this field is set, then\n`cluster.cluster_ipv4_cidr` must be left blank.\n\nThis field is only applicable when `use_ip_aliases` is true.\n\nSet to blank to have a range chosen with the default size.\n\nSet to /netmask (e.g. `/14`) to have a range chosen with a specific\nnetmask.\n\nSet to a\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.", + "type": "string" + }, + "clusterSecondaryRangeName": { + "description": "The name of the secondary range to be used for the cluster CIDR\nblock. The secondary range will be used for pod IP\naddresses. This must be an existing secondary range associated\nwith the cluster subnetwork.\n\nThis field is only applicable with use_ip_aliases is true and\ncreate_subnetwork is false.", + "type": "string" + }, + "nodeIpv4CidrBlock": { + "description": "The IP address range of the instance IPs in this cluster.\n\nThis is applicable only if `create_subnetwork` is true.\n\nSet to blank to have a range chosen with the default size.\n\nSet to /netmask (e.g. `/14`) to have a range chosen with a specific\nnetmask.\n\nSet to a\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.", + "type": "string" + }, "servicesIpv4Cidr": { - "description": "The IP address range of the services IPs in this cluster. If blank, a range\nwill be automatically chosen with the default size.\n\nThis field is only applicable when `use_ip_aliases` is true.\n\nSet to blank to have a range will be chosen with the default size.\n\nSet to /netmask (e.g. `/14`) to have a range be chosen with a specific\nnetmask.\n\nSet to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.", + "description": "This field is deprecated, use services_ipv4_cidr_block.", "type": "string" }, "createSubnetwork": { @@ -1882,16 +2028,24 @@ "description": "Whether alias IPs will be used for pod IPs in the cluster.", "type": "boolean" }, + "servicesSecondaryRangeName": { + "description": "The name of the secondary range to be used as for the services\nCIDR block. The secondary range will be used for service\nClusterIPs. This must be an existing secondary range associated\nwith the cluster subnetwork.\n\nThis field is only applicable with use_ip_aliases is true and\ncreate_subnetwork is false.", + "type": "string" + }, + "servicesIpv4CidrBlock": { + "description": "The IP address range of the services IPs in this cluster. If blank, a range\nwill be automatically chosen with the default size.\n\nThis field is only applicable when `use_ip_aliases` is true.\n\nSet to blank to have a range chosen with the default size.\n\nSet to /netmask (e.g. `/14`) to have a range chosen with a specific\nnetmask.\n\nSet to a\n[CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.", + "type": "string" + }, "subnetworkName": { "description": "A custom subnetwork name to be used if `create_subnetwork` is true. If\nthis field is empty, then an automatic name will be chosen for the new\nsubnetwork.", "type": "string" }, "clusterIpv4Cidr": { - "description": "The IP address range for the cluster pod IPs. If this field is set, then\n`cluster.cluster_ipv4_cidr` must be left blank.\n\nThis field is only applicable when `use_ip_aliases` is true.\n\nSet to blank to have a range will be chosen with the default size.\n\nSet to /netmask (e.g. `/14`) to have a range be chosen with a specific\nnetmask.\n\nSet to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.", + "description": "This field is deprecated, use cluster_ipv4_cidr_block.", "type": "string" }, "nodeIpv4Cidr": { - "description": "The IP address range of the instance IPs in this cluster.\n\nThis is applicable only if `create_subnetwork` is true.\n\nSet to blank to have a range will be chosen with the default size.\n\nSet to /netmask (e.g. `/14`) to have a range be chosen with a specific\nnetmask.\n\nSet to a [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)\nnotation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.\n`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range\nto use.", + "description": "This field is deprecated, use node_ipv4_cidr_block.", "type": "string" } }, @@ -1914,8 +2068,8 @@ "type": "string" }, "desiredMasterAuthorizedNetworksConfig": { - "description": "Master authorized networks is a Beta feature.\nThe desired configuration options for master authorized networks feature.", - "$ref": "MasterAuthorizedNetworksConfig" + "$ref": "MasterAuthorizedNetworksConfig", + "description": "Master authorized networks is a Beta feature.\nThe desired configuration options for master authorized networks feature." }, "desiredLocations": { "description": "The desired list of Google Compute Engine\n[locations](/compute/docs/zones#available) in which the cluster's nodes\nshould be located. Changing the locations a cluster is in will result\nin nodes being either created or removed from the cluster, depending on\nwhether locations are being added or removed.\n\nThis list must always include the cluster's primary zone.", @@ -1925,8 +2079,8 @@ "type": "array" }, "desiredNodePoolAutoscaling": { - "$ref": "NodePoolAutoscaling", - "description": "Autoscaler configuration for the node pool specified in\ndesired_node_pool_id. If there is only one pool in the\ncluster and desired_node_pool_id is not provided then\nthe change applies to that single node pool." + "description": "Autoscaler configuration for the node pool specified in\ndesired_node_pool_id. If there is only one pool in the\ncluster and desired_node_pool_id is not provided then\nthe change applies to that single node pool.", + "$ref": "NodePoolAutoscaling" }, "desiredMonitoringService": { "description": "The monitoring service the cluster should use to write metrics.\nCurrently available options:\n\n* \"monitoring.googleapis.com\" - the Google Cloud Monitoring service\n* \"none\" - no metrics will be exported from the cluster", @@ -1937,8 +2091,8 @@ "type": "string" }, "desiredAddonsConfig": { - "description": "Configurations for the various addons available to run in the cluster.", - "$ref": "AddonsConfig" + "$ref": "AddonsConfig", + "description": "Configurations for the various addons available to run in the cluster." } }, "id": "ClusterUpdate" @@ -1957,30 +2111,13 @@ "HorizontalPodAutoscaling": { "description": "Configuration options for the horizontal pod autoscaling feature, which\nincreases or decreases the number of replica pods a replication controller\nhas based on the resource usage of the existing pods.", "type": "object", - "properties": { - "disabled": { - "description": "Whether the Horizontal Pod Autoscaling feature is enabled in the cluster.\nWhen enabled, it ensures that a Heapster pod is running in the cluster,\nwhich is also used by the Cloud Monitoring service.", - "type": "boolean" - } - }, - "id": "HorizontalPodAutoscaling" - }, - "SetNodePoolManagementRequest": { - "description": "SetNodePoolManagementRequest sets the node management properties of a node\npool.", - "type": "object", - "properties": { - "management": { - "$ref": "NodeManagement", - "description": "NodeManagement configuration for the node pool." + "properties": { + "disabled": { + "description": "Whether the Horizontal Pod Autoscaling feature is enabled in the cluster.\nWhen enabled, it ensures that a Heapster pod is running in the cluster,\nwhich is also used by the Cloud Monitoring service.", + "type": "boolean" } }, - "id": "SetNodePoolManagementRequest" - }, - "Empty": { - "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", - "type": "object", - "properties": {}, - "id": "Empty" + "id": "HorizontalPodAutoscaling" }, "MasterAuthorizedNetworksConfig": { "description": "Master authorized networks is a Beta feature.\nConfiguration options for the master authorized networks feature. Enabled\nmaster authorized networks will disallow all external traffic to access\nKubernetes master through HTTPS except traffic from the given CIDR blocks,\nGoogle Compute Engine Public IPs and Google Prod IPs.", @@ -2000,6 +2137,23 @@ }, "id": "MasterAuthorizedNetworksConfig" }, + "Empty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "type": "object", + "properties": {}, + "id": "Empty" + }, + "SetNodePoolManagementRequest": { + "description": "SetNodePoolManagementRequest sets the node management properties of a node\npool.", + "type": "object", + "properties": { + "management": { + "$ref": "NodeManagement", + "description": "NodeManagement configuration for the node pool." + } + }, + "id": "SetNodePoolManagementRequest" + }, "SetNodePoolAutoscalingRequest": { "description": "SetNodePoolAutoscalingRequest sets the autoscaler settings of a node pool.", "type": "object", @@ -2048,6 +2202,17 @@ "properties": {}, "id": "StartIPRotationRequest" }, + "LegacyAbac": { + "description": "Configuration for the legacy Attribute Based Access Control authorization\nmode.", + "type": "object", + "properties": { + "enabled": { + "description": "Whether the ABAC authorizer is enabled for this cluster. When enabled,\nidentities in the system, including service accounts, nodes, and\ncontrollers, will have statically granted permissions beyond those\nprovided by the RBAC configuration or IAM.", + "type": "boolean" + } + }, + "id": "LegacyAbac" + }, "UpdateNodePoolRequest": { "description": "UpdateNodePoolRequests update a node pool's image and/or version.", "type": "object", @@ -2079,24 +2244,13 @@ }, "id": "AcceleratorConfig" }, - "LegacyAbac": { - "description": "Configuration for the legacy Attribute Based Access Control authorization\nmode.", - "type": "object", - "properties": { - "enabled": { - "description": "Whether the ABAC authorizer is enabled for this cluster. When enabled,\nidentities in the system, including service accounts, nodes, and\ncontrollers, will have statically granted permissions beyond those\nprovided by the RBAC configuration or IAM.", - "type": "boolean" - } - }, - "id": "LegacyAbac" - }, "SetAddonsConfigRequest": { "description": "SetAddonsConfigRequest sets the addons associated with the cluster.", "type": "object", "properties": { "addonsConfig": { - "$ref": "AddonsConfig", - "description": "The desired configurations for the various addons available to run in the\ncluster." + "description": "The desired configurations for the various addons available to run in the\ncluster.", + "$ref": "AddonsConfig" } }, "id": "SetAddonsConfigRequest" @@ -2106,11 +2260,11 @@ "type": "object", "properties": { "resourceLabels": { - "description": "The labels to set for that cluster.", - "type": "object", "additionalProperties": { "type": "string" - } + }, + "description": "The labels to set for that cluster.", + "type": "object" }, "labelFingerprint": { "description": "The fingerprint of the previous set of labels for this resource,\nused to detect conflicts. The fingerprint is initially generated by\nContainer Engine and changes after every request to modify or update\nlabels. You must always provide an up-to-date fingerprint hash when\nupdating or changing labels. Make a \u003ccode\u003eget()\u003c/code\u003e request to the\nresource to get the latest fingerprint.", @@ -2123,47 +2277,13 @@ "description": "NodePool contains the name and configuration for a cluster's node pool.\nNode pools are a set of nodes (i.e. VM's), with a common configuration and\nspecification, under the control of the cluster master. They may have a set\nof Kubernetes labels applied to them, which may be used to reference them\nduring pod scheduling. They may also be resized up or down, to accommodate\nthe workload.", "type": "object", "properties": { - "status": { - "enumDescriptions": [ - "Not set.", - "The PROVISIONING state indicates the node pool is being created.", - "The RUNNING state indicates the node pool has been created\nand is fully usable.", - "The RUNNING_WITH_ERROR state indicates the node pool has been created\nand is partially usable. Some error state has occurred and some\nfunctionality may be impaired. Customer may need to reissue a request\nor trigger a new update.", - "The RECONCILING state indicates that some work is actively being done on\nthe node pool, such as upgrading node software. Details can\nbe found in the `statusMessage` field.", - "The STOPPING state indicates the node pool is being deleted.", - "The ERROR state indicates the node pool may be unusable. Details\ncan be found in the `statusMessage` field." - ], - "enum": [ - "STATUS_UNSPECIFIED", - "PROVISIONING", - "RUNNING", - "RUNNING_WITH_ERROR", - "RECONCILING", - "STOPPING", - "ERROR" - ], - "description": "[Output only] The status of the nodes in this pool instance.", - "type": "string" - }, - "config": { - "$ref": "NodeConfig", - "description": "The node configuration of the pool." - }, - "statusMessage": { - "description": "[Output only] Additional information about the current status of this\nnode pool instance, if available.", - "type": "string" - }, - "name": { - "description": "The name of the node pool.", - "type": "string" - }, "autoscaling": { "description": "Autoscaler configuration for this NodePool. Autoscaler is enabled\nonly if a valid configuration is present.", "$ref": "NodePoolAutoscaling" }, "management": { - "description": "NodeManagement configuration for this NodePool.", - "$ref": "NodeManagement" + "$ref": "NodeManagement", + "description": "NodeManagement configuration for this NodePool." }, "initialNodeCount": { "format": "int32", @@ -2184,173 +2304,72 @@ "version": { "description": "[Output only] The version of the Kubernetes of this node.", "type": "string" - } - }, - "id": "NodePool" - }, - "NodeManagement": { - "description": "NodeManagement defines the set of node management services turned on for the\nnode pool.", - "type": "object", - "properties": { - "autoRepair": { - "description": "A flag that specifies whether the node auto-repair is enabled for the node\npool. If enabled, the nodes in this node pool will be monitored and, if\nthey fail health checks too many times, an automatic repair action will be\ntriggered.", - "type": "boolean" - }, - "autoUpgrade": { - "description": "A flag that specifies whether node auto-upgrade is enabled for the node\npool. If enabled, node auto-upgrade helps keep the nodes in your node pool\nup to date with the latest release version of Kubernetes.", - "type": "boolean" - }, - "upgradeOptions": { - "description": "Specifies the Auto Upgrade knobs for the node pool.", - "$ref": "AutoUpgradeOptions" - } - }, - "id": "NodeManagement" - }, - "CancelOperationRequest": { - "description": "CancelOperationRequest cancels a single operation.", - "type": "object", - "properties": {}, - "id": "CancelOperationRequest" - }, - "SetLegacyAbacRequest": { - "description": "SetLegacyAbacRequest enables or disables the ABAC authorization mechanism for\na cluster.", - "type": "object", - "properties": { - "enabled": { - "description": "Whether ABAC authorization will be enabled in the cluster.", - "type": "boolean" - } - }, - "id": "SetLegacyAbacRequest" - }, - "KubernetesDashboard": { - "description": "Configuration for the Kubernetes Dashboard.", - "type": "object", - "properties": { - "disabled": { - "description": "Whether the Kubernetes Dashboard is enabled for this cluster.", - "type": "boolean" - } - }, - "id": "KubernetesDashboard" - }, - "Operation": { - "description": "This operation resource represents operations that may have happened or are\nhappening on the cluster. All fields are output only.", - "type": "object", - "properties": { - "selfLink": { - "description": "Server-defined URL for the resource.", - "type": "string" - }, - "detail": { - "description": "Detailed operation progress, if available.", - "type": "string" - }, - "targetLink": { - "description": "Server-defined URL for the target of the operation.", - "type": "string" - }, - "operationType": { - "description": "The operation type.", - "type": "string", - "enumDescriptions": [ - "Not set.", - "Cluster create.", - "Cluster delete.", - "A master upgrade.", - "A node upgrade.", - "Cluster repair.", - "Cluster update.", - "Node pool create.", - "Node pool delete.", - "Set node pool management.", - "Automatic node pool repair.", - "Automatic node upgrade.", - "Set labels.", - "Set/generate master auth materials", - "Set node pool size.", - "Updates network policy for a cluster." - ], - "enum": [ - "TYPE_UNSPECIFIED", - "CREATE_CLUSTER", - "DELETE_CLUSTER", - "UPGRADE_MASTER", - "UPGRADE_NODES", - "REPAIR_CLUSTER", - "UPDATE_CLUSTER", - "CREATE_NODE_POOL", - "DELETE_NODE_POOL", - "SET_NODE_POOL_MANAGEMENT", - "AUTO_REPAIR_NODES", - "AUTO_UPGRADE_NODES", - "SET_LABELS", - "SET_MASTER_AUTH", - "SET_NODE_POOL_SIZE", - "SET_NETWORK_POLICY" - ] - }, - "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the operation\nis taking place.", - "type": "string" }, "status": { "enumDescriptions": [ "Not set.", - "The operation has been created.", - "The operation is currently running.", - "The operation is done, either cancelled or completed.", - "The operation is aborting." + "The PROVISIONING state indicates the node pool is being created.", + "The RUNNING state indicates the node pool has been created\nand is fully usable.", + "The RUNNING_WITH_ERROR state indicates the node pool has been created\nand is partially usable. Some error state has occurred and some\nfunctionality may be impaired. Customer may need to reissue a request\nor trigger a new update.", + "The RECONCILING state indicates that some work is actively being done on\nthe node pool, such as upgrading node software. Details can\nbe found in the `statusMessage` field.", + "The STOPPING state indicates the node pool is being deleted.", + "The ERROR state indicates the node pool may be unusable. Details\ncan be found in the `statusMessage` field." ], "enum": [ "STATUS_UNSPECIFIED", - "PENDING", + "PROVISIONING", "RUNNING", - "DONE", - "ABORTING" + "RUNNING_WITH_ERROR", + "RECONCILING", + "STOPPING", + "ERROR" ], - "description": "The current status of the operation.", + "description": "[Output only] The status of the nodes in this pool instance.", "type": "string" }, + "config": { + "description": "The node configuration of the pool.", + "$ref": "NodeConfig" + }, "name": { - "description": "The server-assigned ID for the operation.", + "description": "The name of the node pool.", "type": "string" }, "statusMessage": { - "description": "If an error has occurred, a textual description of the error.", + "description": "[Output only] Additional information about the current status of this\nnode pool instance, if available.", "type": "string" } }, - "id": "Operation" + "id": "NodePool" }, - "AddonsConfig": { - "description": "Configuration for the addons that can be automatically spun up in the\ncluster, enabling additional functionality.", + "NodeManagement": { + "description": "NodeManagement defines the set of node management services turned on for the\nnode pool.", "type": "object", "properties": { - "kubernetesDashboard": { - "description": "Configuration for the Kubernetes Dashboard.", - "$ref": "KubernetesDashboard" + "autoRepair": { + "description": "A flag that specifies whether the node auto-repair is enabled for the node\npool. If enabled, the nodes in this node pool will be monitored and, if\nthey fail health checks too many times, an automatic repair action will be\ntriggered.", + "type": "boolean" }, - "horizontalPodAutoscaling": { - "$ref": "HorizontalPodAutoscaling", - "description": "Configuration for the horizontal pod autoscaling feature, which\nincreases or decreases the number of replica pods a replication controller\nhas based on the resource usage of the existing pods." + "autoUpgrade": { + "description": "A flag that specifies whether node auto-upgrade is enabled for the node\npool. If enabled, node auto-upgrade helps keep the nodes in your node pool\nup to date with the latest release version of Kubernetes.", + "type": "boolean" }, - "httpLoadBalancing": { - "$ref": "HttpLoadBalancing", - "description": "Configuration for the HTTP (L7) load balancing controller addon, which\nmakes it easy to set up HTTP load balancers for services in a cluster." + "upgradeOptions": { + "$ref": "AutoUpgradeOptions", + "description": "Specifies the Auto Upgrade knobs for the node pool." } }, - "id": "AddonsConfig" + "id": "NodeManagement" } }, "protocol": "rest", "icons": { - "x16": "http://www.google.com/images/icons/product/search-16.gif", - "x32": "http://www.google.com/images/icons/product/search-32.gif" + "x32": "http://www.google.com/images/icons/product/search-32.gif", + "x16": "http://www.google.com/images/icons/product/search-16.gif" }, "version": "v1", "baseUrl": "https://container.googleapis.com/", + "canonicalName": "Container", "auth": { "oauth2": { "scopes": { @@ -2363,5 +2382,15 @@ "servicePath": "", "description": "The Google Container Engine API is used for building and managing container based applications, powered by the open source Kubernetes technology.", "kind": "discovery#restDescription", - "rootUrl": "https://container.googleapis.com/" + "rootUrl": "https://container.googleapis.com/", + "basePath": "", + "ownerDomain": "google.com", + "name": "container", + "batchPath": "batch", + "id": "container:v1", + "documentationLink": "https://cloud.google.com/container-engine/", + "revision": "20170825", + "title": "Google Container Engine API", + "discoveryVersion": "v1", + "ownerName": "Google" } diff --git a/vendor/google.golang.org/api/container/v1/container-gen.go b/vendor/google.golang.org/api/container/v1/container-gen.go index 839d7b6eb7c2..89add684e4a2 100644 --- a/vendor/google.golang.org/api/container/v1/container-gen.go +++ b/vendor/google.golang.org/api/container/v1/container-gen.go @@ -850,28 +850,43 @@ func (s *HttpLoadBalancing) MarshalJSON() ([]byte, error) { // IPAllocationPolicy: Configuration for controlling how IPs are // allocated in the cluster. type IPAllocationPolicy struct { - // ClusterIpv4Cidr: The IP address range for the cluster pod IPs. If - // this field is set, then + // ClusterIpv4Cidr: This field is deprecated, use + // cluster_ipv4_cidr_block. + ClusterIpv4Cidr string `json:"clusterIpv4Cidr,omitempty"` + + // ClusterIpv4CidrBlock: The IP address range for the cluster pod IPs. + // If this field is set, then // `cluster.cluster_ipv4_cidr` must be left blank. // // This field is only applicable when `use_ip_aliases` is true. // - // Set to blank to have a range will be chosen with the default - // size. + // Set to blank to have a range chosen with the default size. // - // Set to /netmask (e.g. `/14`) to have a range be chosen with a + // Set to /netmask (e.g. `/14`) to have a range chosen with a // specific // netmask. // - // Set to a + // Set to + // a // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // no - // tation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks + // + // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks // (e.g. // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific // range // to use. - ClusterIpv4Cidr string `json:"clusterIpv4Cidr,omitempty"` + ClusterIpv4CidrBlock string `json:"clusterIpv4CidrBlock,omitempty"` + + // ClusterSecondaryRangeName: The name of the secondary range to be used + // for the cluster CIDR + // block. The secondary range will be used for pod IP + // addresses. This must be an existing secondary range associated + // with the cluster subnetwork. + // + // This field is only applicable with use_ip_aliases is true + // and + // create_subnetwork is false. + ClusterSecondaryRangeName string `json:"clusterSecondaryRangeName,omitempty"` // CreateSubnetwork: Whether a new subnetwork will be created // automatically for the cluster. @@ -879,50 +894,68 @@ type IPAllocationPolicy struct { // This field is only applicable when `use_ip_aliases` is true. CreateSubnetwork bool `json:"createSubnetwork,omitempty"` - // NodeIpv4Cidr: The IP address range of the instance IPs in this + // NodeIpv4Cidr: This field is deprecated, use node_ipv4_cidr_block. + NodeIpv4Cidr string `json:"nodeIpv4Cidr,omitempty"` + + // NodeIpv4CidrBlock: The IP address range of the instance IPs in this // cluster. // // This is applicable only if `create_subnetwork` is true. // - // Set to blank to have a range will be chosen with the default - // size. + // Set to blank to have a range chosen with the default size. // - // Set to /netmask (e.g. `/14`) to have a range be chosen with a + // Set to /netmask (e.g. `/14`) to have a range chosen with a // specific // netmask. // - // Set to a + // Set to + // a // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // no - // tation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks + // + // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks // (e.g. // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific // range // to use. - NodeIpv4Cidr string `json:"nodeIpv4Cidr,omitempty"` + NodeIpv4CidrBlock string `json:"nodeIpv4CidrBlock,omitempty"` + + // ServicesIpv4Cidr: This field is deprecated, use + // services_ipv4_cidr_block. + ServicesIpv4Cidr string `json:"servicesIpv4Cidr,omitempty"` - // ServicesIpv4Cidr: The IP address range of the services IPs in this - // cluster. If blank, a range + // ServicesIpv4CidrBlock: The IP address range of the services IPs in + // this cluster. If blank, a range // will be automatically chosen with the default size. // // This field is only applicable when `use_ip_aliases` is true. // - // Set to blank to have a range will be chosen with the default - // size. + // Set to blank to have a range chosen with the default size. // - // Set to /netmask (e.g. `/14`) to have a range be chosen with a + // Set to /netmask (e.g. `/14`) to have a range chosen with a // specific // netmask. // - // Set to a + // Set to + // a // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) - // no - // tation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks + // + // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks // (e.g. // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific // range // to use. - ServicesIpv4Cidr string `json:"servicesIpv4Cidr,omitempty"` + ServicesIpv4CidrBlock string `json:"servicesIpv4CidrBlock,omitempty"` + + // ServicesSecondaryRangeName: The name of the secondary range to be + // used as for the services + // CIDR block. The secondary range will be used for service + // ClusterIPs. This must be an existing secondary range associated + // with the cluster subnetwork. + // + // This field is only applicable with use_ip_aliases is true + // and + // create_subnetwork is false. + ServicesSecondaryRangeName string `json:"servicesSecondaryRangeName,omitempty"` // SubnetworkName: A custom subnetwork name to be used if // `create_subnetwork` is true. If @@ -1313,7 +1346,8 @@ type NodeConfig struct { // and conflicts should be avoided. // For more information, including usage and the valid values, // see: - // http://kubernetes.io/v1.1/docs/user-guide/labels.html + // https://kubernetes.io/docs/concepts/overview/working-with-objects + // /labels/ Labels map[string]string `json:"labels,omitempty"` // LocalSsdCount: The number of local SSD disks to be attached to the @@ -1627,6 +1661,11 @@ type Operation struct { // Detail: Detailed operation progress, if available. Detail string `json:"detail,omitempty"` + // EndTime: [Output only] The time the operation completed, + // in + // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + EndTime string `json:"endTime,omitempty"` + // Name: The server-assigned ID for the operation. Name string `json:"name,omitempty"` @@ -1654,6 +1693,11 @@ type Operation struct { // SelfLink: Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // StartTime: [Output only] The time the operation started, + // in + // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. + StartTime string `json:"startTime,omitempty"` + // Status: The current status of the operation. // // Possible values: diff --git a/vendor/google.golang.org/api/gensupport/media.go b/vendor/google.golang.org/api/gensupport/media.go index c6410e89a97a..f3e77fc52979 100644 --- a/vendor/google.golang.org/api/gensupport/media.go +++ b/vendor/google.golang.org/api/gensupport/media.go @@ -174,26 +174,126 @@ func typeHeader(contentType string) textproto.MIMEHeader { // PrepareUpload determines whether the data in the supplied reader should be // uploaded in a single request, or in sequential chunks. // chunkSize is the size of the chunk that media should be split into. -// If chunkSize is non-zero and the contents of media do not fit in a single -// chunk (or there is an error reading media), then media will be returned as a -// MediaBuffer. Otherwise, media will be returned as a Reader. +// +// If chunkSize is zero, media is returned as the first value, and the other +// two return values are nil, true. +// +// Otherwise, a MediaBuffer is returned, along with a bool indicating whether the +// contents of media fit in a single chunk. // // After PrepareUpload has been called, media should no longer be used: the // media content should be accessed via one of the return values. -func PrepareUpload(media io.Reader, chunkSize int) (io.Reader, *MediaBuffer) { +func PrepareUpload(media io.Reader, chunkSize int) (r io.Reader, mb *MediaBuffer, singleChunk bool) { if chunkSize == 0 { // do not chunk - return media, nil + return media, nil, true + } + mb = NewMediaBuffer(media, chunkSize) + _, _, _, err := mb.Chunk() + // If err is io.EOF, we can upload this in a single request. Otherwise, err is + // either nil or a non-EOF error. If it is the latter, then the next call to + // mb.Chunk will return the same error. Returning a MediaBuffer ensures that this + // error will be handled at some point. + return nil, mb, err == io.EOF +} + +// MediaInfo holds information for media uploads. It is intended for use by generated +// code only. +type MediaInfo struct { + // At most one of Media and MediaBuffer will be set. + media io.Reader + buffer *MediaBuffer + singleChunk bool + mType string + size int64 // mediaSize, if known. Used only for calls to progressUpdater_. + progressUpdater googleapi.ProgressUpdater +} + +// NewInfoFromMedia should be invoked from the Media method of a call. It returns a +// MediaInfo populated with chunk size and content type, and a reader or MediaBuffer +// if needed. +func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { + mi := &MediaInfo{} + opts := googleapi.ProcessMediaOptions(options) + if !opts.ForceEmptyContentType { + r, mi.mType = DetermineContentType(r, opts.ContentType) + } + mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize) + return mi +} + +// NewInfoFromResumableMedia should be invoked from the ResumableMedia method of a +// call. It returns a MediaInfo using the given reader, size and media type. +func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo { + rdr := ReaderAtToReader(r, size) + rdr, mType := DetermineContentType(rdr, mediaType) + return &MediaInfo{ + size: size, + mType: mType, + buffer: NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize), + media: nil, + singleChunk: false, + } +} + +func (mi *MediaInfo) SetProgressUpdater(pu googleapi.ProgressUpdater) { + if mi != nil { + mi.progressUpdater = pu } +} - mb := NewMediaBuffer(media, chunkSize) - rdr, _, _, err := mb.Chunk() +// UploadType determines the type of upload: a single request, or a resumable +// series of requests. +func (mi *MediaInfo) UploadType() string { + if mi.singleChunk { + return "multipart" + } + return "resumable" +} - if err == io.EOF { // we can upload this in a single request - return rdr, nil +// UploadRequest sets up an HTTP request for media upload. It adds headers +// as necessary, and returns a replacement for the body. +func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, cleanup func()) { + cleanup = func() {} + if mi == nil { + return body, cleanup + } + var media io.Reader + if mi.media != nil { + // This only happens when the caller has turned off chunking. In that + // case, we write all of media in a single non-retryable request. + media = mi.media + } else if mi.singleChunk { + // The data fits in a single chunk, which has now been read into the MediaBuffer. + // We obtain that chunk so we can write it in a single request. The request can + // be retried because the data is stored in the MediaBuffer. + media, _, _, _ = mi.buffer.Chunk() + } + if media != nil { + combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType) + cleanup = func() { combined.Close() } + reqHeaders.Set("Content-Type", ctype) + body = combined } - // err might be a non-EOF error. If it is, the next call to mb.Chunk will - // return the same error. Returning a MediaBuffer ensures that this error - // will be handled at some point. + if mi.buffer != nil && mi.mType != "" && !mi.singleChunk { + reqHeaders.Set("X-Upload-Content-Type", mi.mType) + } + return body, cleanup +} - return nil, mb +// ResumableUpload returns an appropriately configured ResumableUpload value if the +// upload is resumable, or nil otherwise. +func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload { + if mi == nil || mi.singleChunk { + return nil + } + return &ResumableUpload{ + URI: locURI, + Media: mi.buffer, + MediaType: mi.mType, + Callback: func(curr int64) { + if mi.progressUpdater != nil { + mi.progressUpdater(curr, mi.size) + } + }, + } } diff --git a/vendor/k8s.io/api/LICENSE b/vendor/k8s.io/api/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/k8s.io/api/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/api/admission/v1alpha1/BUILD b/vendor/k8s.io/api/admission/v1alpha1/BUILD deleted file mode 100644 index fd8f53569533..000000000000 --- a/vendor/k8s.io/api/admission/v1alpha1/BUILD +++ /dev/null @@ -1,42 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/api/authentication/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) - -filegroup( - name = "go_default_library_protos", - srcs = ["generated.proto"], - visibility = ["//visibility:public"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/api/admission/v1alpha1/doc.go b/vendor/k8s.io/api/admission/v1alpha1/doc.go deleted file mode 100644 index 942732201dde..000000000000 --- a/vendor/k8s.io/api/admission/v1alpha1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register -// +k8s:openapi-gen=false - -// +groupName=admission.k8s.io -package v1alpha1 diff --git a/vendor/k8s.io/api/admission/v1alpha1/generated.pb.go b/vendor/k8s.io/api/admission/v1alpha1/generated.pb.go deleted file mode 100644 index 03fa1f6be8d1..000000000000 --- a/vendor/k8s.io/api/admission/v1alpha1/generated.pb.go +++ /dev/null @@ -1,1031 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/admission/v1alpha1/generated.proto -// DO NOT EDIT! - -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/admission/v1alpha1/generated.proto - - It has these top-level messages: - AdmissionReview - AdmissionReviewSpec - AdmissionReviewStatus -*/ -package v1alpha1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *AdmissionReview) Reset() { *m = AdmissionReview{} } -func (*AdmissionReview) ProtoMessage() {} -func (*AdmissionReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *AdmissionReviewSpec) Reset() { *m = AdmissionReviewSpec{} } -func (*AdmissionReviewSpec) ProtoMessage() {} -func (*AdmissionReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *AdmissionReviewStatus) Reset() { *m = AdmissionReviewStatus{} } -func (*AdmissionReviewStatus) ProtoMessage() {} -func (*AdmissionReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func init() { - proto.RegisterType((*AdmissionReview)(nil), "k8s.io.api.admission.v1alpha1.AdmissionReview") - proto.RegisterType((*AdmissionReviewSpec)(nil), "k8s.io.api.admission.v1alpha1.AdmissionReviewSpec") - proto.RegisterType((*AdmissionReviewStatus)(nil), "k8s.io.api.admission.v1alpha1.AdmissionReviewStatus") -} -func (m *AdmissionReview) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n1, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n2, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - return i, nil -} - -func (m *AdmissionReviewSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AdmissionReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Kind.Size())) - n3, err := m.Kind.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n4, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.OldObject.Size())) - n5, err := m.OldObject.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) - i += copy(dAtA[i:], m.Operation) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n6, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubResource))) - i += copy(dAtA[i:], m.SubResource) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UserInfo.Size())) - n7, err := m.UserInfo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - return i, nil -} - -func (m *AdmissionReviewStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AdmissionReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Allowed { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.Result != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) - n8, err := m.Result.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *AdmissionReview) Size() (n int) { - var l int - _ = l - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AdmissionReviewSpec) Size() (n int) { - var l int - _ = l - l = m.Kind.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Object.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.OldObject.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operation) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SubResource) - n += 1 + l + sovGenerated(uint64(l)) - l = m.UserInfo.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AdmissionReviewStatus) Size() (n int) { - var l int - _ = l - n += 2 - if m.Result != nil { - l = m.Result.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AdmissionReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AdmissionReview{`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "AdmissionReviewSpec", "AdmissionReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "AdmissionReviewStatus", "AdmissionReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *AdmissionReviewSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AdmissionReviewSpec{`, - `Kind:` + strings.Replace(strings.Replace(this.Kind.String(), "GroupVersionKind", "k8s_io_apimachinery_pkg_apis_meta_v1.GroupVersionKind", 1), `&`, ``, 1) + `,`, - `Object:` + strings.Replace(strings.Replace(this.Object.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `OldObject:` + strings.Replace(strings.Replace(this.OldObject.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Resource:` + strings.Replace(strings.Replace(this.Resource.String(), "GroupVersionResource", "k8s_io_apimachinery_pkg_apis_meta_v1.GroupVersionResource", 1), `&`, ``, 1) + `,`, - `SubResource:` + fmt.Sprintf("%v", this.SubResource) + `,`, - `UserInfo:` + strings.Replace(strings.Replace(this.UserInfo.String(), "UserInfo", "k8s_io_api_authentication_v1.UserInfo", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *AdmissionReviewStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AdmissionReviewStatus{`, - `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, - `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "k8s_io_apimachinery_pkg_apis_meta_v1.Status", 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AdmissionReview) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AdmissionReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AdmissionReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AdmissionReviewSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AdmissionReviewSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AdmissionReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Kind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OldObject", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.OldObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operation = Operation(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubResource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SubResource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AdmissionReviewStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AdmissionReviewStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AdmissionReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Allowed = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Result == nil { - m.Result = &k8s_io_apimachinery_pkg_apis_meta_v1.Status{} - } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admission/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 645 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcf, 0x4f, 0x13, 0x41, - 0x14, 0xc7, 0xbb, 0x5a, 0x4a, 0x3b, 0x18, 0xd1, 0x21, 0x26, 0x1b, 0x12, 0x17, 0xc2, 0xc1, 0x60, - 0x02, 0xb3, 0x01, 0x91, 0x18, 0xe3, 0x85, 0x26, 0x6a, 0x8c, 0x09, 0x98, 0x01, 0x8c, 0x31, 0xc6, - 0x64, 0xba, 0x7d, 0xb4, 0x63, 0xbb, 0x33, 0x9b, 0x9d, 0xd9, 0xa2, 0x37, 0xff, 0x04, 0x0f, 0xfe, - 0x1d, 0xfe, 0x17, 0x26, 0x1c, 0x39, 0x72, 0x22, 0x52, 0xff, 0x0b, 0x4f, 0x66, 0x67, 0x67, 0x77, - 0x4b, 0xa1, 0x2a, 0x9e, 0xda, 0xf7, 0xe3, 0xfb, 0x99, 0xf7, 0xde, 0xbc, 0x59, 0xf4, 0xac, 0xf7, - 0x48, 0x11, 0x2e, 0xfd, 0x5e, 0xd2, 0x82, 0x58, 0x80, 0x06, 0xe5, 0x0f, 0x40, 0xb4, 0x65, 0xec, - 0xdb, 0x00, 0x8b, 0xb8, 0xcf, 0xda, 0x21, 0x57, 0x8a, 0x4b, 0xe1, 0x0f, 0xd6, 0x58, 0x3f, 0xea, - 0xb2, 0x35, 0xbf, 0x03, 0x02, 0x62, 0xa6, 0xa1, 0x4d, 0xa2, 0x58, 0x6a, 0x89, 0xef, 0x66, 0xe9, - 0x84, 0x45, 0x9c, 0x14, 0xe9, 0x24, 0x4f, 0x9f, 0x5f, 0xed, 0x70, 0xdd, 0x4d, 0x5a, 0x24, 0x90, - 0xa1, 0xdf, 0x91, 0x1d, 0xe9, 0x1b, 0x55, 0x2b, 0x39, 0x30, 0x96, 0x31, 0xcc, 0xbf, 0x8c, 0x36, - 0xbf, 0x32, 0x7a, 0x78, 0xa2, 0xbb, 0x20, 0x34, 0x0f, 0x98, 0xce, 0x2a, 0x18, 0x3f, 0x7b, 0x7e, - 0xa3, 0xcc, 0x0e, 0x59, 0xd0, 0xe5, 0x02, 0xe2, 0x4f, 0x7e, 0xd4, 0xeb, 0xa4, 0x0e, 0xe5, 0x87, - 0xa0, 0xd9, 0x65, 0x2a, 0x7f, 0x92, 0x2a, 0x4e, 0x84, 0xe6, 0x21, 0x5c, 0x10, 0x6c, 0xfe, 0x4d, - 0xa0, 0x82, 0x2e, 0x84, 0xec, 0x82, 0xee, 0xc1, 0x24, 0x5d, 0xa2, 0x79, 0xdf, 0xe7, 0x42, 0x2b, - 0x1d, 0x8f, 0x8b, 0x96, 0xbe, 0x3b, 0x68, 0x76, 0x2b, 0x9f, 0x23, 0x85, 0x01, 0x87, 0x43, 0xbc, - 0x87, 0xaa, 0x2a, 0x82, 0xc0, 0x75, 0x16, 0x9d, 0xe5, 0x99, 0xf5, 0x75, 0xf2, 0xc7, 0x91, 0x93, - 0x31, 0xf5, 0x6e, 0x04, 0x41, 0xf3, 0xc6, 0xd1, 0xe9, 0x42, 0x65, 0x78, 0xba, 0x50, 0x4d, 0x2d, - 0x6a, 0x68, 0xf8, 0x1d, 0xaa, 0x29, 0xcd, 0x74, 0xa2, 0xdc, 0x6b, 0x86, 0xbb, 0x71, 0x45, 0xae, - 0xd1, 0x36, 0x6f, 0x5a, 0x72, 0x2d, 0xb3, 0xa9, 0x65, 0x2e, 0x7d, 0x9b, 0x42, 0x73, 0x97, 0x54, - 0x82, 0xdf, 0xa0, 0x6a, 0x8f, 0x8b, 0xb6, 0xed, 0x65, 0x73, 0xe4, 0xcc, 0x62, 0x46, 0x24, 0xea, - 0x75, 0x52, 0x87, 0x22, 0xe9, 0x15, 0x92, 0xc1, 0x1a, 0x79, 0x1e, 0xcb, 0x24, 0x7a, 0x0d, 0x71, - 0xca, 0x7a, 0xc9, 0x45, 0xbb, 0xec, 0x27, 0xb5, 0xa8, 0x21, 0xe2, 0x7d, 0x54, 0x93, 0xad, 0x0f, - 0x10, 0x68, 0xdb, 0xcf, 0xea, 0x44, 0xb6, 0xbd, 0x37, 0x42, 0xd9, 0xe1, 0xd3, 0x8f, 0x1a, 0x44, - 0x8a, 0x2d, 0x1b, 0xd9, 0x31, 0x10, 0x6a, 0x61, 0xf8, 0x3d, 0x6a, 0xc8, 0x7e, 0x3b, 0x73, 0xba, - 0xd7, 0xff, 0x87, 0x7c, 0xdb, 0x92, 0x1b, 0x3b, 0x39, 0x87, 0x96, 0x48, 0xfc, 0x04, 0x35, 0x64, - 0x94, 0xae, 0x00, 0x97, 0xc2, 0xad, 0x2e, 0x3a, 0xcb, 0x8d, 0xa6, 0x57, 0x08, 0xf2, 0xc0, 0xaf, - 0x51, 0x83, 0x96, 0x02, 0xbc, 0x88, 0xaa, 0x82, 0x85, 0xe0, 0x4e, 0x19, 0x61, 0x31, 0x96, 0x6d, - 0x16, 0x02, 0x35, 0x11, 0xec, 0xa3, 0x46, 0xfa, 0xab, 0x22, 0x16, 0x80, 0x5b, 0x33, 0x69, 0x45, - 0x41, 0xdb, 0x79, 0x80, 0x96, 0x39, 0xb8, 0x8b, 0xea, 0x31, 0x28, 0x99, 0xc4, 0x01, 0xb8, 0xd3, - 0xa6, 0xdf, 0xc7, 0x57, 0xbf, 0x25, 0x6a, 0x09, 0xcd, 0x5b, 0xf6, 0xac, 0x7a, 0xee, 0xa1, 0x05, - 0x1d, 0x3f, 0x44, 0x33, 0x2a, 0x69, 0xe5, 0x01, 0xb7, 0x6e, 0x8a, 0x9b, 0xb3, 0x82, 0x99, 0xdd, - 0x32, 0x44, 0x47, 0xf3, 0xf0, 0x1e, 0xaa, 0x27, 0x0a, 0xe2, 0x17, 0xe2, 0x40, 0xba, 0x0d, 0x53, - 0xe0, 0xbd, 0x73, 0xab, 0x7b, 0xee, 0xbb, 0x91, 0x16, 0xb6, 0x6f, 0xb3, 0xcb, 0x62, 0x72, 0x0f, - 0x2d, 0x48, 0x4b, 0x5f, 0x1d, 0x74, 0xe7, 0xd2, 0x15, 0xc7, 0xf7, 0xd1, 0x34, 0xeb, 0xf7, 0xe5, - 0x21, 0x64, 0x5b, 0x5b, 0x6f, 0xce, 0x5a, 0xcc, 0xf4, 0x56, 0xe6, 0xa6, 0x79, 0x1c, 0xbf, 0x1a, - 0x7b, 0x53, 0x2b, 0xff, 0x36, 0x39, 0xfb, 0x96, 0x50, 0xba, 0x7e, 0x14, 0x54, 0xd2, 0xd7, 0xf9, - 0x3b, 0x6a, 0x92, 0xa3, 0x33, 0xaf, 0x72, 0x7c, 0xe6, 0x55, 0x4e, 0xce, 0xbc, 0xca, 0xe7, 0xa1, - 0xe7, 0x1c, 0x0d, 0x3d, 0xe7, 0x78, 0xe8, 0x39, 0x27, 0x43, 0xcf, 0xf9, 0x31, 0xf4, 0x9c, 0x2f, - 0x3f, 0xbd, 0xca, 0xdb, 0x7a, 0xfe, 0x4a, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x57, 0xa7, - 0x50, 0xd8, 0x05, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/admission/v1alpha1/generated.proto b/vendor/k8s.io/api/admission/v1alpha1/generated.proto deleted file mode 100644 index 8859a46fa9c6..000000000000 --- a/vendor/k8s.io/api/admission/v1alpha1/generated.proto +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.admission.v1alpha1; - -import "k8s.io/api/authentication/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; -import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// AdmissionReview describes an admission request. -message AdmissionReview { - // Spec describes the attributes for the admission request. - // Since this admission controller is non-mutating the webhook should avoid setting this in its response to avoid the - // cost of deserializing it. - // +optional - optional AdmissionReviewSpec spec = 1; - - // Status is filled in by the webhook and indicates whether the admission request should be permitted. - // +optional - optional AdmissionReviewStatus status = 2; -} - -// AdmissionReviewSpec describes the admission.Attributes for the admission request. -message AdmissionReviewSpec { - // Kind is the type of object being manipulated. For example: Pod - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 1; - - // Object is the object from the incoming request prior to default values being applied - optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2; - - // OldObject is the existing object. Only populated for UPDATE requests. - // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 3; - - // Operation is the operation being performed - optional string operation = 4; - - // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and - // rely on the server to generate the name. If that is the case, this method will return the empty string. - // +optional - optional string name = 5; - - // Namespace is the namespace associated with the request (if any). - // +optional - optional string namespace = 6; - - // Resource is the name of the resource being requested. This is not the kind. For example: pods - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 7; - - // SubResource is the name of the subresource being requested. This is a different resource, scoped to the parent - // resource, but it may have a different kind. For instance, /pods has the resource "pods" and the kind "Pod", while - // /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" (because status operates on - // pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource - // "binding", and kind "Binding". - // +optional - optional string subResource = 8; - - // UserInfo is information about the requesting user - optional k8s.io.api.authentication.v1.UserInfo userInfo = 9; -} - -// AdmissionReviewStatus describes the status of the admission request. -message AdmissionReviewStatus { - // Allowed indicates whether or not the admission request was permitted. - optional bool allowed = 1; - - // Result contains extra details into why an admission request was denied. - // This field IS NOT consulted in any way if "Allowed" is "true". - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 2; -} - diff --git a/vendor/k8s.io/api/admission/v1alpha1/register.go b/vendor/k8s.io/api/admission/v1alpha1/register.go deleted file mode 100644 index ea6d5609af37..000000000000 --- a/vendor/k8s.io/api/admission/v1alpha1/register.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name for this API. -const GroupName = "admission.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &AdmissionReview{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/admission/v1alpha1/types.go b/vendor/k8s.io/api/admission/v1alpha1/types.go deleted file mode 100644 index 5defadca6bdb..000000000000 --- a/vendor/k8s.io/api/admission/v1alpha1/types.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - authenticationv1 "k8s.io/api/authentication/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// AdmissionReview describes an admission request. -type AdmissionReview struct { - metav1.TypeMeta `json:",inline"` - // Spec describes the attributes for the admission request. - // Since this admission controller is non-mutating the webhook should avoid setting this in its response to avoid the - // cost of deserializing it. - // +optional - Spec AdmissionReviewSpec `json:"spec,omitempty" protobuf:"bytes,1,opt,name=spec"` - // Status is filled in by the webhook and indicates whether the admission request should be permitted. - // +optional - Status AdmissionReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` -} - -// AdmissionReviewSpec describes the admission.Attributes for the admission request. -type AdmissionReviewSpec struct { - // Kind is the type of object being manipulated. For example: Pod - Kind metav1.GroupVersionKind `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` - // Object is the object from the incoming request prior to default values being applied - Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` - // OldObject is the existing object. Only populated for UPDATE requests. - // +optional - OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,3,opt,name=oldObject"` - // Operation is the operation being performed - Operation Operation `json:"operation,omitempty" protobuf:"bytes,4,opt,name=operation"` - // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and - // rely on the server to generate the name. If that is the case, this method will return the empty string. - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"` - // Namespace is the namespace associated with the request (if any). - // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"` - // Resource is the name of the resource being requested. This is not the kind. For example: pods - Resource metav1.GroupVersionResource `json:"resource,omitempty" protobuf:"bytes,7,opt,name=resource"` - // SubResource is the name of the subresource being requested. This is a different resource, scoped to the parent - // resource, but it may have a different kind. For instance, /pods has the resource "pods" and the kind "Pod", while - // /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" (because status operates on - // pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource - // "binding", and kind "Binding". - // +optional - SubResource string `json:"subResource,omitempty" protobuf:"bytes,8,opt,name=subResource"` - // UserInfo is information about the requesting user - UserInfo authenticationv1.UserInfo `json:"userInfo,omitempty" protobuf:"bytes,9,opt,name=userInfo"` -} - -// AdmissionReviewStatus describes the status of the admission request. -type AdmissionReviewStatus struct { - // Allowed indicates whether or not the admission request was permitted. - Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` - // Result contains extra details into why an admission request was denied. - // This field IS NOT consulted in any way if "Allowed" is "true". - // +optional - Result *metav1.Status `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` -} - -// Operation is the type of resource operation being checked for admission control -type Operation string - -// Operation constants -const ( - Create Operation = "CREATE" - Update Operation = "UPDATE" - Delete Operation = "DELETE" - Connect Operation = "CONNECT" -) diff --git a/vendor/k8s.io/api/admission/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index 07da434c98de..000000000000 --- a/vendor/k8s.io/api/admission/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_AdmissionReview = map[string]string{ - "": "AdmissionReview describes an admission request.", - "spec": "Spec describes the attributes for the admission request. Since this admission controller is non-mutating the webhook should avoid setting this in its response to avoid the cost of deserializing it.", - "status": "Status is filled in by the webhook and indicates whether the admission request should be permitted.", -} - -func (AdmissionReview) SwaggerDoc() map[string]string { - return map_AdmissionReview -} - -var map_AdmissionReviewSpec = map[string]string{ - "": "AdmissionReviewSpec describes the admission.Attributes for the admission request.", - "kind": "Kind is the type of object being manipulated. For example: Pod", - "object": "Object is the object from the incoming request prior to default values being applied", - "oldObject": "OldObject is the existing object. Only populated for UPDATE requests.", - "operation": "Operation is the operation being performed", - "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this method will return the empty string.", - "namespace": "Namespace is the namespace associated with the request (if any).", - "resource": "Resource is the name of the resource being requested. This is not the kind. For example: pods", - "subResource": "SubResource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind. For instance, /pods has the resource \"pods\" and the kind \"Pod\", while /pods/foo/status has the resource \"pods\", the sub resource \"status\", and the kind \"Pod\" (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource \"pods\", subresource \"binding\", and kind \"Binding\".", - "userInfo": "UserInfo is information about the requesting user", -} - -func (AdmissionReviewSpec) SwaggerDoc() map[string]string { - return map_AdmissionReviewSpec -} - -var map_AdmissionReviewStatus = map[string]string{ - "": "AdmissionReviewStatus describes the status of the admission request.", - "allowed": "Allowed indicates whether or not the admission request was permitted.", - "status": "Result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".", -} - -func (AdmissionReviewStatus) SwaggerDoc() map[string]string { - return map_AdmissionReviewStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admission/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admission/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 667792aeb687..000000000000 --- a/vendor/k8s.io/api/admission/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,127 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1alpha1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AdmissionReview).DeepCopyInto(out.(*AdmissionReview)) - return nil - }, InType: reflect.TypeOf(&AdmissionReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AdmissionReviewSpec).DeepCopyInto(out.(*AdmissionReviewSpec)) - return nil - }, InType: reflect.TypeOf(&AdmissionReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AdmissionReviewStatus).DeepCopyInto(out.(*AdmissionReviewStatus)) - return nil - }, InType: reflect.TypeOf(&AdmissionReviewStatus{})}, - ) -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionReview) DeepCopyInto(out *AdmissionReview) { - *out = *in - out.TypeMeta = in.TypeMeta - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReview. -func (in *AdmissionReview) DeepCopy() *AdmissionReview { - if in == nil { - return nil - } - out := new(AdmissionReview) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AdmissionReview) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionReviewSpec) DeepCopyInto(out *AdmissionReviewSpec) { - *out = *in - out.Kind = in.Kind - in.Object.DeepCopyInto(&out.Object) - in.OldObject.DeepCopyInto(&out.OldObject) - out.Resource = in.Resource - in.UserInfo.DeepCopyInto(&out.UserInfo) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReviewSpec. -func (in *AdmissionReviewSpec) DeepCopy() *AdmissionReviewSpec { - if in == nil { - return nil - } - out := new(AdmissionReviewSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionReviewStatus) DeepCopyInto(out *AdmissionReviewStatus) { - *out = *in - if in.Result != nil { - in, out := &in.Result, &out.Result - if *in == nil { - *out = nil - } else { - *out = new(v1.Status) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReviewStatus. -func (in *AdmissionReviewStatus) DeepCopy() *AdmissionReviewStatus { - if in == nil { - return nil - } - out := new(AdmissionReviewStatus) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/admission/v1beta1/BUILD b/vendor/k8s.io/api/admission/v1beta1/BUILD new file mode 100644 index 000000000000..fd69b9170360 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/BUILD @@ -0,0 +1,43 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "generated.pb.go", + "register.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/api/admission/v1beta1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/k8s.io/api/authentication/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + ], +) + +filegroup( + name = "go_default_library_protos", + srcs = ["generated.proto"], + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/api/admission/v1beta1/doc.go b/vendor/k8s.io/api/admission/v1beta1/doc.go new file mode 100644 index 000000000000..9a8b8e41f0dd --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=false + +// +groupName=admission.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/api/admission/v1beta1/generated.pb.go b/vendor/k8s.io/api/admission/v1beta1/generated.pb.go new file mode 100644 index 000000000000..6f5edcb85da1 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/generated.pb.go @@ -0,0 +1,1208 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto + + It has these top-level messages: + AdmissionRequest + AdmissionResponse + AdmissionReview +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} } +func (*AdmissionRequest) ProtoMessage() {} +func (*AdmissionRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} } +func (*AdmissionResponse) ProtoMessage() {} +func (*AdmissionResponse) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *AdmissionReview) Reset() { *m = AdmissionReview{} } +func (*AdmissionReview) ProtoMessage() {} +func (*AdmissionReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func init() { + proto.RegisterType((*AdmissionRequest)(nil), "k8s.io.api.admission.v1beta1.AdmissionRequest") + proto.RegisterType((*AdmissionResponse)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse") + proto.RegisterType((*AdmissionReview)(nil), "k8s.io.api.admission.v1beta1.AdmissionReview") +} +func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Kind.Size())) + n1, err := m.Kind.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) + n2, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubResource))) + i += copy(dAtA[i:], m.SubResource) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i += copy(dAtA[i:], m.Operation) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UserInfo.Size())) + n3, err := m.UserInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) + n4, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.OldObject.Size())) + n5, err := m.OldObject.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *AdmissionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + dAtA[i] = 0x10 + i++ + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.Result != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) + n6, err := m.Result.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.Patch != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Patch))) + i += copy(dAtA[i:], m.Patch) + } + if m.PatchType != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PatchType))) + i += copy(dAtA[i:], *m.PatchType) + } + return i, nil +} + +func (m *AdmissionReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Request != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Request.Size())) + n7, err := m.Request.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.Response != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Response.Size())) + n8, err := m.Response.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *AdmissionRequest) Size() (n int) { + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Kind.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubResource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operation) + n += 1 + l + sovGenerated(uint64(l)) + l = m.UserInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.OldObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AdmissionResponse) Size() (n int) { + var l int + _ = l + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Patch != nil { + l = len(m.Patch) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PatchType != nil { + l = len(*m.PatchType) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AdmissionReview) Size() (n int) { + var l int + _ = l + if m.Request != nil { + l = m.Request.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AdmissionRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdmissionRequest{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Kind:` + strings.Replace(strings.Replace(this.Kind.String(), "GroupVersionKind", "k8s_io_apimachinery_pkg_apis_meta_v1.GroupVersionKind", 1), `&`, ``, 1) + `,`, + `Resource:` + strings.Replace(strings.Replace(this.Resource.String(), "GroupVersionResource", "k8s_io_apimachinery_pkg_apis_meta_v1.GroupVersionResource", 1), `&`, ``, 1) + `,`, + `SubResource:` + fmt.Sprintf("%v", this.SubResource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, + `UserInfo:` + strings.Replace(strings.Replace(this.UserInfo.String(), "UserInfo", "k8s_io_api_authentication_v1.UserInfo", 1), `&`, ``, 1) + `,`, + `Object:` + strings.Replace(strings.Replace(this.Object.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `OldObject:` + strings.Replace(strings.Replace(this.OldObject.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *AdmissionResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdmissionResponse{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "k8s_io_apimachinery_pkg_apis_meta_v1.Status", 1) + `,`, + `Patch:` + valueToStringGenerated(this.Patch) + `,`, + `PatchType:` + valueToStringGenerated(this.PatchType) + `,`, + `}`, + }, "") + return s +} +func (this *AdmissionReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AdmissionReview{`, + `Request:` + strings.Replace(fmt.Sprintf("%v", this.Request), "AdmissionRequest", "AdmissionRequest", 1) + `,`, + `Response:` + strings.Replace(fmt.Sprintf("%v", this.Response), "AdmissionResponse", "AdmissionResponse", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Kind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubResource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubResource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operation = Operation(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OldObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.OldObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &k8s_io_apimachinery_pkg_apis_meta_v1.Status{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Patch", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Patch = append(m.Patch[:0], dAtA[iNdEx:postIndex]...) + if m.Patch == nil { + m.Patch = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PatchType(dAtA[iNdEx:postIndex]) + m.PatchType = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AdmissionReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AdmissionReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AdmissionReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Request == nil { + m.Request = &AdmissionRequest{} + } + if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &AdmissionResponse{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 739 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x4e, 0xdb, 0x4a, + 0x14, 0x8e, 0x21, 0x7f, 0x9e, 0xa0, 0x0b, 0xcc, 0xdd, 0x58, 0xd1, 0x95, 0xc3, 0x65, 0x71, 0xc5, + 0x95, 0x60, 0x5c, 0x68, 0x8b, 0x50, 0xd5, 0x0d, 0x16, 0xa8, 0x42, 0x95, 0x00, 0x0d, 0xa4, 0x6a, + 0xbb, 0xa8, 0x34, 0x71, 0x86, 0x64, 0x9a, 0xd8, 0xe3, 0x7a, 0xc6, 0xa1, 0xec, 0xfa, 0x08, 0x7d, + 0x93, 0x3e, 0x44, 0x37, 0x2c, 0x59, 0xb2, 0x8a, 0x4a, 0xfa, 0x00, 0xdd, 0xb3, 0xaa, 0x3c, 0x1e, + 0xc7, 0x29, 0x34, 0x2d, 0xad, 0xba, 0xca, 0x9c, 0x73, 0xbe, 0xef, 0x3b, 0xf1, 0x77, 0xce, 0x0c, + 0xd8, 0xed, 0x6d, 0x09, 0xc4, 0xb8, 0xd3, 0x8b, 0x5b, 0x34, 0x0a, 0xa8, 0xa4, 0xc2, 0x19, 0xd0, + 0xa0, 0xcd, 0x23, 0x47, 0x17, 0x48, 0xc8, 0x1c, 0xd2, 0xf6, 0x99, 0x10, 0x8c, 0x07, 0xce, 0x60, + 0xbd, 0x45, 0x25, 0x59, 0x77, 0x3a, 0x34, 0xa0, 0x11, 0x91, 0xb4, 0x8d, 0xc2, 0x88, 0x4b, 0x0e, + 0xff, 0x49, 0xd1, 0x88, 0x84, 0x0c, 0x8d, 0xd1, 0x48, 0xa3, 0xeb, 0x6b, 0x1d, 0x26, 0xbb, 0x71, + 0x0b, 0x79, 0xdc, 0x77, 0x3a, 0xbc, 0xc3, 0x1d, 0x45, 0x6a, 0xc5, 0x27, 0x2a, 0x52, 0x81, 0x3a, + 0xa5, 0x62, 0xf5, 0xd5, 0xc9, 0xd6, 0xb1, 0xec, 0xd2, 0x40, 0x32, 0x8f, 0xc8, 0xb4, 0xff, 0xcd, + 0xd6, 0xf5, 0x07, 0x39, 0xda, 0x27, 0x5e, 0x97, 0x05, 0x34, 0x3a, 0x73, 0xc2, 0x5e, 0x27, 0x49, + 0x08, 0xc7, 0xa7, 0x92, 0x7c, 0x8f, 0xe5, 0x4c, 0x63, 0x45, 0x71, 0x20, 0x99, 0x4f, 0x6f, 0x11, + 0x36, 0x7f, 0x46, 0x10, 0x5e, 0x97, 0xfa, 0xe4, 0x16, 0xef, 0xfe, 0x34, 0x5e, 0x2c, 0x59, 0xdf, + 0x61, 0x81, 0x14, 0x32, 0xba, 0x49, 0x5a, 0xfe, 0x52, 0x02, 0x0b, 0xdb, 0x99, 0x8d, 0x98, 0xbe, + 0x89, 0xa9, 0x90, 0xd0, 0x05, 0xb3, 0x31, 0x6b, 0x5b, 0xc6, 0x92, 0xb1, 0x62, 0xba, 0xf7, 0xce, + 0x87, 0x8d, 0xc2, 0x68, 0xd8, 0x98, 0x6d, 0xee, 0xed, 0x5c, 0x0f, 0x1b, 0xff, 0x4e, 0xeb, 0x22, + 0xcf, 0x42, 0x2a, 0x50, 0x73, 0x6f, 0x07, 0x27, 0x64, 0xf8, 0x1c, 0x14, 0x7b, 0x2c, 0x68, 0x5b, + 0x33, 0x4b, 0xc6, 0x4a, 0x6d, 0x63, 0x13, 0xe5, 0x63, 0x1b, 0xd3, 0x50, 0xd8, 0xeb, 0x24, 0x09, + 0x81, 0x12, 0xef, 0xd0, 0x60, 0x1d, 0x3d, 0x89, 0x78, 0x1c, 0x3e, 0xa3, 0x51, 0xf2, 0x67, 0x9e, + 0xb2, 0xa0, 0xed, 0xce, 0xe9, 0xe6, 0xc5, 0x24, 0xc2, 0x4a, 0x11, 0x76, 0x41, 0x35, 0xa2, 0x82, + 0xc7, 0x91, 0x47, 0xad, 0x59, 0xa5, 0xfe, 0xe8, 0xd7, 0xd5, 0xb1, 0x56, 0x70, 0x17, 0x74, 0x87, + 0x6a, 0x96, 0xc1, 0x63, 0x75, 0xf8, 0x10, 0xd4, 0x44, 0xdc, 0xca, 0x0a, 0x56, 0x51, 0xf9, 0xf1, + 0xb7, 0x26, 0xd4, 0x8e, 0xf2, 0x12, 0x9e, 0xc4, 0xc1, 0x25, 0x50, 0x0c, 0x88, 0x4f, 0xad, 0x92, + 0xc2, 0x8f, 0x3f, 0x61, 0x9f, 0xf8, 0x14, 0xab, 0x0a, 0x74, 0x80, 0x99, 0xfc, 0x8a, 0x90, 0x78, + 0xd4, 0x2a, 0x2b, 0xd8, 0xa2, 0x86, 0x99, 0xfb, 0x59, 0x01, 0xe7, 0x18, 0xf8, 0x18, 0x98, 0x3c, + 0x4c, 0x06, 0xc7, 0x78, 0x60, 0x55, 0x14, 0xc1, 0xce, 0x08, 0x07, 0x59, 0xe1, 0x7a, 0x32, 0xc0, + 0x39, 0x01, 0x1e, 0x83, 0x6a, 0x2c, 0x68, 0xb4, 0x17, 0x9c, 0x70, 0xab, 0xaa, 0x1c, 0xfb, 0x0f, + 0x4d, 0x5e, 0xa3, 0x6f, 0x36, 0x3f, 0x71, 0xaa, 0xa9, 0xd1, 0xb9, 0x3b, 0x59, 0x06, 0x8f, 0x95, + 0x60, 0x13, 0x94, 0x79, 0xeb, 0x35, 0xf5, 0xa4, 0x65, 0x2a, 0xcd, 0xb5, 0xa9, 0x53, 0xd0, 0x8b, + 0x8b, 0x30, 0x39, 0xdd, 0x7d, 0x2b, 0x69, 0x90, 0x0c, 0xc0, 0xfd, 0x4b, 0x4b, 0x97, 0x0f, 0x94, + 0x08, 0xd6, 0x62, 0xf0, 0x15, 0x30, 0x79, 0xbf, 0x9d, 0x26, 0x2d, 0xf0, 0x3b, 0xca, 0x63, 0x2b, + 0x0f, 0x32, 0x1d, 0x9c, 0x4b, 0x2e, 0x7f, 0x98, 0x01, 0x8b, 0x13, 0x1b, 0x2f, 0x42, 0x1e, 0x08, + 0xfa, 0x47, 0x56, 0xfe, 0x7f, 0x50, 0x21, 0xfd, 0x3e, 0x3f, 0xa5, 0xe9, 0xd6, 0x57, 0xdd, 0x79, + 0xad, 0x53, 0xd9, 0x4e, 0xd3, 0x38, 0xab, 0xc3, 0x43, 0x50, 0x16, 0x92, 0xc8, 0x58, 0xe8, 0x0d, + 0x5e, 0xbd, 0xdb, 0x06, 0x1f, 0x29, 0x8e, 0x0b, 0x12, 0xdb, 0x30, 0x15, 0x71, 0x5f, 0x62, 0xad, + 0x03, 0x1b, 0xa0, 0x14, 0x12, 0xe9, 0x75, 0xd5, 0x96, 0xce, 0xb9, 0xe6, 0x68, 0xd8, 0x28, 0x1d, + 0x26, 0x09, 0x9c, 0xe6, 0xe1, 0x16, 0x30, 0xd5, 0xe1, 0xf8, 0x2c, 0xcc, 0x56, 0xb3, 0x9e, 0x98, + 0x74, 0x98, 0x25, 0xaf, 0x27, 0x03, 0x9c, 0x83, 0x97, 0x3f, 0x1a, 0x60, 0x7e, 0xc2, 0xb1, 0x01, + 0xa3, 0xa7, 0xb0, 0x09, 0x2a, 0x51, 0xfa, 0x5a, 0x28, 0xcf, 0x6a, 0x1b, 0x08, 0xfd, 0xe8, 0x61, + 0x46, 0x37, 0xdf, 0x18, 0xb7, 0x96, 0xf8, 0xa2, 0x03, 0x9c, 0x69, 0xc1, 0x17, 0xea, 0x6e, 0xab, + 0x91, 0xe8, 0x97, 0xc3, 0xb9, 0xb3, 0x6e, 0x4a, 0x73, 0xe7, 0xf4, 0x65, 0x56, 0x11, 0x1e, 0xcb, + 0xb9, 0x6b, 0xe7, 0x57, 0x76, 0xe1, 0xe2, 0xca, 0x2e, 0x5c, 0x5e, 0xd9, 0x85, 0x77, 0x23, 0xdb, + 0x38, 0x1f, 0xd9, 0xc6, 0xc5, 0xc8, 0x36, 0x2e, 0x47, 0xb6, 0xf1, 0x69, 0x64, 0x1b, 0xef, 0x3f, + 0xdb, 0x85, 0x97, 0x15, 0x2d, 0xfc, 0x35, 0x00, 0x00, 0xff, 0xff, 0x76, 0x21, 0xd5, 0x35, 0xaf, + 0x06, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/admission/v1beta1/generated.proto b/vendor/k8s.io/api/admission/v1beta1/generated.proto new file mode 100644 index 000000000000..159325f37d13 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/generated.proto @@ -0,0 +1,112 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.admission.v1beta1; + +import "k8s.io/api/authentication/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// AdmissionRequest describes the admission.Attributes for the admission request. +message AdmissionRequest { + // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are + // otherwise identical (parallel requests, requests when earlier requests did not modify etc) + // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + optional string uid = 1; + + // Kind is the type of object being manipulated. For example: Pod + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2; + + // Resource is the name of the resource being requested. This is not the kind. For example: pods + optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3; + + // SubResource is the name of the subresource being requested. This is a different resource, scoped to the parent + // resource, but it may have a different kind. For instance, /pods has the resource "pods" and the kind "Pod", while + // /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" (because status operates on + // pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource + // "binding", and kind "Binding". + // +optional + optional string subResource = 4; + + // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and + // rely on the server to generate the name. If that is the case, this method will return the empty string. + // +optional + optional string name = 5; + + // Namespace is the namespace associated with the request (if any). + // +optional + optional string namespace = 6; + + // Operation is the operation being performed + optional string operation = 7; + + // UserInfo is information about the requesting user + optional k8s.io.api.authentication.v1.UserInfo userInfo = 8; + + // Object is the object from the incoming request prior to default values being applied + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 9; + + // OldObject is the existing object. Only populated for UPDATE requests. + // +optional + optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10; +} + +// AdmissionResponse describes an admission response. +message AdmissionResponse { + // UID is an identifier for the individual request/response. + // This should be copied over from the corresponding AdmissionRequest. + optional string uid = 1; + + // Allowed indicates whether or not the admission request was permitted. + optional bool allowed = 2; + + // Result contains extra details into why an admission request was denied. + // This field IS NOT consulted in any way if "Allowed" is "true". + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3; + + // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. + // +optional + optional bytes patch = 4; + + // The type of Patch. Currently we only allow "JSONPatch". + // +optional + optional string patchType = 5; +} + +// AdmissionReview describes an admission review request/response. +message AdmissionReview { + // Request describes the attributes for the admission request. + // +optional + optional AdmissionRequest request = 1; + + // Response describes the attributes for the admission response. + // +optional + optional AdmissionResponse response = 2; +} + diff --git a/vendor/k8s.io/api/admission/v1beta1/register.go b/vendor/k8s.io/api/admission/v1beta1/register.go new file mode 100644 index 000000000000..78d21a0c8a7e --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "admission.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AdmissionReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/admission/v1beta1/types.go b/vendor/k8s.io/api/admission/v1beta1/types.go new file mode 100644 index 000000000000..9ad939c396c0 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/types.go @@ -0,0 +1,116 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AdmissionReview describes an admission review request/response. +type AdmissionReview struct { + metav1.TypeMeta `json:",inline"` + // Request describes the attributes for the admission request. + // +optional + Request *AdmissionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` + // Response describes the attributes for the admission response. + // +optional + Response *AdmissionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` +} + +// AdmissionRequest describes the admission.Attributes for the admission request. +type AdmissionRequest struct { + // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are + // otherwise identical (parallel requests, requests when earlier requests did not modify etc) + // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` + // Kind is the type of object being manipulated. For example: Pod + Kind metav1.GroupVersionKind `json:"kind" protobuf:"bytes,2,opt,name=kind"` + // Resource is the name of the resource being requested. This is not the kind. For example: pods + Resource metav1.GroupVersionResource `json:"resource" protobuf:"bytes,3,opt,name=resource"` + // SubResource is the name of the subresource being requested. This is a different resource, scoped to the parent + // resource, but it may have a different kind. For instance, /pods has the resource "pods" and the kind "Pod", while + // /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" (because status operates on + // pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource + // "binding", and kind "Binding". + // +optional + SubResource string `json:"subResource,omitempty" protobuf:"bytes,4,opt,name=subResource"` + // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and + // rely on the server to generate the name. If that is the case, this method will return the empty string. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"` + // Namespace is the namespace associated with the request (if any). + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"` + // Operation is the operation being performed + Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"` + // UserInfo is information about the requesting user + UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"` + // Object is the object from the incoming request prior to default values being applied + // +optional + Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,9,opt,name=object"` + // OldObject is the existing object. Only populated for UPDATE requests. + // +optional + OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,10,opt,name=oldObject"` +} + +// AdmissionResponse describes an admission response. +type AdmissionResponse struct { + // UID is an identifier for the individual request/response. + // This should be copied over from the corresponding AdmissionRequest. + UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` + + // Allowed indicates whether or not the admission request was permitted. + Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"` + + // Result contains extra details into why an admission request was denied. + // This field IS NOT consulted in any way if "Allowed" is "true". + // +optional + Result *metav1.Status `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` + + // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. + // +optional + Patch []byte `json:"patch,omitempty" protobuf:"bytes,4,opt,name=patch"` + + // The type of Patch. Currently we only allow "JSONPatch". + // +optional + PatchType *PatchType `json:"patchType,omitempty" protobuf:"bytes,5,opt,name=patchType"` +} + +// PatchType is the type of patch being used to represent the mutated object +type PatchType string + +// PatchType constants. +const ( + PatchTypeJSONPatch PatchType = "JSONPatch" +) + +// Operation is the type of resource operation being checked for admission control +type Operation string + +// Operation constants +const ( + Create Operation = "CREATE" + Update Operation = "UPDATE" + Delete Operation = "DELETE" + Connect Operation = "CONNECT" +) diff --git a/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000000..1f53135179a3 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,71 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AdmissionRequest = map[string]string{ + "": "AdmissionRequest describes the admission.Attributes for the admission request.", + "uid": "UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.", + "kind": "Kind is the type of object being manipulated. For example: Pod", + "resource": "Resource is the name of the resource being requested. This is not the kind. For example: pods", + "subResource": "SubResource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind. For instance, /pods has the resource \"pods\" and the kind \"Pod\", while /pods/foo/status has the resource \"pods\", the sub resource \"status\", and the kind \"Pod\" (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource \"pods\", subresource \"binding\", and kind \"Binding\".", + "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this method will return the empty string.", + "namespace": "Namespace is the namespace associated with the request (if any).", + "operation": "Operation is the operation being performed", + "userInfo": "UserInfo is information about the requesting user", + "object": "Object is the object from the incoming request prior to default values being applied", + "oldObject": "OldObject is the existing object. Only populated for UPDATE requests.", +} + +func (AdmissionRequest) SwaggerDoc() map[string]string { + return map_AdmissionRequest +} + +var map_AdmissionResponse = map[string]string{ + "": "AdmissionResponse describes an admission response.", + "uid": "UID is an identifier for the individual request/response. This should be copied over from the corresponding AdmissionRequest.", + "allowed": "Allowed indicates whether or not the admission request was permitted.", + "status": "Result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".", + "patch": "The patch body. Currently we only support \"JSONPatch\" which implements RFC 6902.", + "patchType": "The type of Patch. Currently we only allow \"JSONPatch\".", +} + +func (AdmissionResponse) SwaggerDoc() map[string]string { + return map_AdmissionResponse +} + +var map_AdmissionReview = map[string]string{ + "": "AdmissionReview describes an admission review request/response.", + "request": "Request describes the attributes for the admission request.", + "response": "Response describes the attributes for the admission response.", +} + +func (AdmissionReview) SwaggerDoc() map[string]string { + return map_AdmissionReview +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000000..ad9cb851b8f1 --- /dev/null +++ b/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,130 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) { + *out = *in + out.Kind = in.Kind + out.Resource = in.Resource + in.UserInfo.DeepCopyInto(&out.UserInfo) + in.Object.DeepCopyInto(&out.Object) + in.OldObject.DeepCopyInto(&out.OldObject) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionRequest. +func (in *AdmissionRequest) DeepCopy() *AdmissionRequest { + if in == nil { + return nil + } + out := new(AdmissionRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionResponse) DeepCopyInto(out *AdmissionResponse) { + *out = *in + if in.Result != nil { + in, out := &in.Result, &out.Result + if *in == nil { + *out = nil + } else { + *out = new(v1.Status) + (*in).DeepCopyInto(*out) + } + } + if in.Patch != nil { + in, out := &in.Patch, &out.Patch + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.PatchType != nil { + in, out := &in.PatchType, &out.PatchType + if *in == nil { + *out = nil + } else { + *out = new(PatchType) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionResponse. +func (in *AdmissionResponse) DeepCopy() *AdmissionResponse { + if in == nil { + return nil + } + out := new(AdmissionResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionReview) DeepCopyInto(out *AdmissionReview) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Request != nil { + in, out := &in.Request, &out.Request + if *in == nil { + *out = nil + } else { + *out = new(AdmissionRequest) + (*in).DeepCopyInto(*out) + } + } + if in.Response != nil { + in, out := &in.Response, &out.Response + if *in == nil { + *out = nil + } else { + *out = new(AdmissionResponse) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReview. +func (in *AdmissionReview) DeepCopy() *AdmissionReview { + if in == nil { + return nil + } + out := new(AdmissionReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdmissionReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/BUILD b/vendor/k8s.io/api/admissionregistration/v1alpha1/BUILD index 6a38064dd240..417eab39ff09 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/BUILD +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/BUILD @@ -15,10 +15,10 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/admissionregistration/v1alpha1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go index bd72b25cc364..4fa2d9c6627b 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go @@ -14,12 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // Package v1alpha1 is the v1alpha1 version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration -// InitializerConfiguration and ExternalAdmissionHookConfiguration is for the +// InitializerConfiguration and validatingWebhookConfiguration is for the // new dynamic admission controller configuration. // +groupName=admissionregistration.k8s.io package v1alpha1 diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go index 6a1977f8331e..bdc26637f68e 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go @@ -25,16 +25,10 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto It has these top-level messages: - AdmissionHookClientConfig - ExternalAdmissionHook - ExternalAdmissionHookConfiguration - ExternalAdmissionHookConfigurationList Initializer InitializerConfiguration InitializerConfigurationList Rule - RuleWithOperations - ServiceReference */ package v1alpha1 @@ -58,226 +52,32 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AdmissionHookClientConfig) Reset() { *m = AdmissionHookClientConfig{} } -func (*AdmissionHookClientConfig) ProtoMessage() {} -func (*AdmissionHookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} -} - -func (m *ExternalAdmissionHook) Reset() { *m = ExternalAdmissionHook{} } -func (*ExternalAdmissionHook) ProtoMessage() {} -func (*ExternalAdmissionHook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *ExternalAdmissionHookConfiguration) Reset() { *m = ExternalAdmissionHookConfiguration{} } -func (*ExternalAdmissionHookConfiguration) ProtoMessage() {} -func (*ExternalAdmissionHookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} -} - -func (m *ExternalAdmissionHookConfigurationList) Reset() { - *m = ExternalAdmissionHookConfigurationList{} -} -func (*ExternalAdmissionHookConfigurationList) ProtoMessage() {} -func (*ExternalAdmissionHookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} -} - func (m *Initializer) Reset() { *m = Initializer{} } func (*Initializer) ProtoMessage() {} -func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } func (m *InitializerConfiguration) Reset() { *m = InitializerConfiguration{} } func (*InitializerConfiguration) ProtoMessage() {} func (*InitializerConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptorGenerated, []int{1} } func (m *InitializerConfigurationList) Reset() { *m = InitializerConfigurationList{} } func (*InitializerConfigurationList) ProtoMessage() {} func (*InitializerConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptorGenerated, []int{2} } func (m *Rule) Reset() { *m = Rule{} } func (*Rule) ProtoMessage() {} -func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } -func (*RuleWithOperations) ProtoMessage() {} -func (*RuleWithOperations) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func init() { - proto.RegisterType((*AdmissionHookClientConfig)(nil), "k8s.io.api.admissionregistration.v1alpha1.AdmissionHookClientConfig") - proto.RegisterType((*ExternalAdmissionHook)(nil), "k8s.io.api.admissionregistration.v1alpha1.ExternalAdmissionHook") - proto.RegisterType((*ExternalAdmissionHookConfiguration)(nil), "k8s.io.api.admissionregistration.v1alpha1.ExternalAdmissionHookConfiguration") - proto.RegisterType((*ExternalAdmissionHookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1alpha1.ExternalAdmissionHookConfigurationList") proto.RegisterType((*Initializer)(nil), "k8s.io.api.admissionregistration.v1alpha1.Initializer") proto.RegisterType((*InitializerConfiguration)(nil), "k8s.io.api.admissionregistration.v1alpha1.InitializerConfiguration") proto.RegisterType((*InitializerConfigurationList)(nil), "k8s.io.api.admissionregistration.v1alpha1.InitializerConfigurationList") proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1alpha1.Rule") - proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1alpha1.RuleWithOperations") - proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1alpha1.ServiceReference") -} -func (m *AdmissionHookClientConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AdmissionHookClientConfig) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) - n1, err := m.Service.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if m.CABundle != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i += copy(dAtA[i:], m.CABundle) - } - return i, nil -} - -func (m *ExternalAdmissionHook) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExternalAdmissionHook) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) - n2, err := m.ClientConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.FailurePolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i += copy(dAtA[i:], *m.FailurePolicy) - } - return i, nil -} - -func (m *ExternalAdmissionHookConfiguration) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExternalAdmissionHookConfiguration) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - if len(m.ExternalAdmissionHooks) > 0 { - for _, msg := range m.ExternalAdmissionHooks { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ExternalAdmissionHookConfigurationList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExternalAdmissionHookConfigurationList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil } - func (m *Initializer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -330,11 +130,11 @@ func (m *InitializerConfiguration) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n5, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n1 if len(m.Initializers) > 0 { for _, msg := range m.Initializers { dAtA[i] = 0x12 @@ -368,11 +168,11 @@ func (m *InitializerConfigurationList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n2 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -451,73 +251,6 @@ func (m *Rule) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Operations) > 0 { - for _, s := range m.Operations { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Rule.Size())) - n7, err := m.Rule.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - return i, nil -} - -func (m *ServiceReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil -} - func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) dAtA[offset+1] = uint8(v >> 8) @@ -545,66 +278,6 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } -func (m *AdmissionHookClientConfig) Size() (n int) { - var l int - _ = l - l = m.Service.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.CABundle != nil { - l = len(m.CABundle) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ExternalAdmissionHook) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ExternalAdmissionHookConfiguration) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ExternalAdmissionHooks) > 0 { - for _, e := range m.ExternalAdmissionHooks { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ExternalAdmissionHookConfigurationList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func (m *Initializer) Size() (n int) { var l int _ = l @@ -671,36 +344,12 @@ func (m *Rule) Size() (n int) { return n } -func (m *RuleWithOperations) Size() (n int) { - var l int - _ = l - if len(m.Operations) > 0 { - for _, s := range m.Operations { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.Rule.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServiceReference) Size() (n int) { - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break } } return n @@ -708,629 +357,58 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *AdmissionHookClientConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AdmissionHookClientConfig{`, - `Service:` + strings.Replace(strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1), `&`, ``, 1) + `,`, - `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, - `}`, - }, "") - return s -} -func (this *ExternalAdmissionHook) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExternalAdmissionHook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "AdmissionHookClientConfig", "AdmissionHookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `}`, - }, "") - return s -} -func (this *ExternalAdmissionHookConfiguration) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExternalAdmissionHookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `ExternalAdmissionHooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExternalAdmissionHooks), "ExternalAdmissionHook", "ExternalAdmissionHook", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ExternalAdmissionHookConfigurationList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExternalAdmissionHookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ExternalAdmissionHookConfiguration", "ExternalAdmissionHookConfiguration", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *Initializer) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Initializer{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "Rule", "Rule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *InitializerConfiguration) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&InitializerConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Initializers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializer", "Initializer", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *InitializerConfigurationList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&InitializerConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "InitializerConfiguration", "InitializerConfiguration", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Rule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Rule{`, - `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, - `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, - `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, - `}`, - }, "") - return s -} -func (this *RuleWithOperations) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuleWithOperations{`, - `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, - `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceReference{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AdmissionHookClientConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AdmissionHookClientConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AdmissionHookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) - if m.CABundle == nil { - m.CABundle = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalAdmissionHook) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalAdmissionHook: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalAdmissionHook: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, RuleWithOperations{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := FailurePolicyType(dAtA[iNdEx:postIndex]) - m.FailurePolicy = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalAdmissionHookConfiguration) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalAdmissionHookConfiguration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalAdmissionHookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalAdmissionHooks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExternalAdmissionHooks = append(m.ExternalAdmissionHooks, ExternalAdmissionHook{}) - if err := m.ExternalAdmissionHooks[len(m.ExternalAdmissionHooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalAdmissionHookConfigurationList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalAdmissionHookConfigurationList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalAdmissionHookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ExternalAdmissionHookConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF + s := strings.Join([]string{`&Initializer{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "Rule", "Rule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *InitializerConfiguration) String() string { + if this == nil { + return "nil" } - return nil + s := strings.Join([]string{`&InitializerConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Initializers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializer", "Initializer", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *InitializerConfigurationList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&InitializerConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "InitializerConfiguration", "InitializerConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Rule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Rule{`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) } func (m *Initializer) Unmarshal(dAtA []byte) error { l := len(dAtA) @@ -1801,223 +879,6 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } return nil } -func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 @@ -2128,60 +989,40 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 871 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x8b, 0x23, 0x45, - 0x14, 0x4f, 0x65, 0x32, 0x6c, 0x52, 0x49, 0xd8, 0xdd, 0x42, 0x97, 0x38, 0x48, 0x77, 0xe8, 0xc3, - 0x12, 0x11, 0xbb, 0x9d, 0x51, 0x16, 0x41, 0x44, 0xa7, 0xc7, 0xaf, 0x81, 0xfd, 0x18, 0xcb, 0x45, - 0x41, 0x3c, 0x58, 0xe9, 0xbc, 0x24, 0x65, 0xfa, 0x8b, 0xaa, 0xea, 0xe0, 0x78, 0x10, 0x2f, 0xde, - 0x05, 0x2f, 0x5e, 0xbd, 0x79, 0xf1, 0xff, 0x98, 0xe3, 0x1e, 0xf7, 0x14, 0x9c, 0x16, 0xbc, 0x08, - 0xfe, 0x01, 0x73, 0x92, 0xfe, 0x4a, 0x3a, 0x9b, 0x84, 0x4d, 0x5c, 0x98, 0x5b, 0xea, 0xf7, 0xea, - 0xf7, 0xde, 0xef, 0xfd, 0xf2, 0x5e, 0x35, 0xa6, 0x93, 0x77, 0xa4, 0xc9, 0x03, 0x6b, 0x12, 0xf5, - 0x41, 0xf8, 0xa0, 0x40, 0x5a, 0x53, 0xf0, 0x07, 0x81, 0xb0, 0xf2, 0x00, 0x0b, 0xb9, 0xc5, 0x06, - 0x1e, 0x97, 0x92, 0x07, 0xbe, 0x80, 0x11, 0x97, 0x4a, 0x30, 0xc5, 0x03, 0xdf, 0x9a, 0x1e, 0x32, - 0x37, 0x1c, 0xb3, 0x43, 0x6b, 0x04, 0x3e, 0x08, 0xa6, 0x60, 0x60, 0x86, 0x22, 0x50, 0x01, 0x79, - 0x2d, 0xa3, 0x9a, 0x2c, 0xe4, 0xe6, 0x5a, 0xaa, 0x59, 0x50, 0x0f, 0xde, 0x18, 0x71, 0x35, 0x8e, - 0xfa, 0xa6, 0x13, 0x78, 0xd6, 0x28, 0x18, 0x05, 0x56, 0x9a, 0xa1, 0x1f, 0x0d, 0xd3, 0x53, 0x7a, - 0x48, 0x7f, 0x65, 0x99, 0x0f, 0xde, 0x5e, 0x88, 0xf2, 0x98, 0x33, 0xe6, 0x3e, 0x88, 0x73, 0x2b, - 0x9c, 0x8c, 0x12, 0x40, 0x5a, 0x1e, 0x28, 0x66, 0x4d, 0x57, 0xf4, 0x1c, 0x58, 0x9b, 0x58, 0x22, - 0xf2, 0x15, 0xf7, 0x60, 0x85, 0x70, 0xef, 0x79, 0x04, 0xe9, 0x8c, 0xc1, 0x63, 0x2b, 0xbc, 0xb7, - 0x36, 0xf1, 0x22, 0xc5, 0x5d, 0x8b, 0xfb, 0x4a, 0x2a, 0xf1, 0x2c, 0xc9, 0xf8, 0x03, 0xe1, 0x57, - 0x8e, 0x0b, 0x97, 0x3e, 0x0d, 0x82, 0xc9, 0x89, 0xcb, 0xc1, 0x57, 0x27, 0x81, 0x3f, 0xe4, 0x23, - 0x32, 0xc4, 0x37, 0x24, 0x88, 0x29, 0x77, 0xa0, 0x83, 0xba, 0xa8, 0xd7, 0x3c, 0x7a, 0xd7, 0xdc, - 0xda, 0x5d, 0xf3, 0xf3, 0x8c, 0x49, 0x61, 0x08, 0x02, 0x7c, 0x07, 0xec, 0x9b, 0x17, 0x33, 0xbd, - 0x12, 0xcf, 0xf4, 0x1b, 0x45, 0xa4, 0x48, 0x4e, 0x7a, 0xb8, 0xee, 0x30, 0x3b, 0xf2, 0x07, 0x2e, - 0x74, 0xaa, 0x5d, 0xd4, 0x6b, 0xd9, 0xad, 0x78, 0xa6, 0xd7, 0x4f, 0x8e, 0x33, 0x8c, 0xce, 0xa3, - 0xc6, 0x3f, 0x55, 0xfc, 0xf2, 0x47, 0xdf, 0x29, 0x10, 0x3e, 0x73, 0x97, 0x74, 0x93, 0x2e, 0xae, - 0xf9, 0xcc, 0xcb, 0x84, 0x36, 0xec, 0x56, 0x5e, 0xab, 0xf6, 0x90, 0x79, 0x40, 0xd3, 0x08, 0xf9, - 0x01, 0xb7, 0x9c, 0x52, 0x77, 0x69, 0xa5, 0xe6, 0xd1, 0x87, 0x3b, 0xb4, 0xb4, 0xd1, 0x29, 0xfb, - 0xa5, 0xbc, 0x5e, 0xab, 0x8c, 0xd2, 0xa5, 0x7a, 0xa4, 0x8f, 0xf7, 0x45, 0xe4, 0x82, 0xec, 0xec, - 0x75, 0xf7, 0x7a, 0xcd, 0xa3, 0xf7, 0x76, 0x28, 0x4c, 0x23, 0x17, 0xbe, 0xe4, 0x6a, 0xfc, 0x28, - 0x84, 0x2c, 0x24, 0xed, 0x76, 0x5e, 0x71, 0x3f, 0x89, 0x49, 0x9a, 0xa5, 0x26, 0xf7, 0x71, 0x7b, - 0xc8, 0xb8, 0x1b, 0x09, 0x38, 0x0b, 0x5c, 0xee, 0x9c, 0x77, 0x6a, 0xa9, 0x1d, 0x77, 0xe3, 0x99, - 0xde, 0xfe, 0xb8, 0x1c, 0xb8, 0x9a, 0xe9, 0xb7, 0x97, 0x80, 0xc7, 0xe7, 0x21, 0xd0, 0x65, 0xb2, - 0xf1, 0x5b, 0x15, 0x1b, 0x6b, 0xdd, 0xce, 0x3a, 0x8a, 0x32, 0x2d, 0xe4, 0x1b, 0x5c, 0x4f, 0xa6, - 0x7f, 0xc0, 0x14, 0xcb, 0xe7, 0xe4, 0xcd, 0x52, 0x6f, 0xf3, 0x61, 0x34, 0xc3, 0xc9, 0x28, 0x01, - 0xa4, 0x99, 0xdc, 0x36, 0xa7, 0x87, 0xe6, 0xa3, 0xfe, 0xb7, 0xe0, 0xa8, 0x07, 0xa0, 0x98, 0x4d, - 0xf2, 0x76, 0xf0, 0x02, 0xa3, 0xf3, 0xac, 0xe4, 0x57, 0x84, 0xef, 0xc0, 0x3a, 0x21, 0xb2, 0x53, - 0x4d, 0xcd, 0xfc, 0x60, 0x07, 0x33, 0xd7, 0x76, 0x64, 0x6b, 0xb9, 0x80, 0x3b, 0x6b, 0xc3, 0x92, - 0x6e, 0xa8, 0x6f, 0x5c, 0x21, 0x7c, 0xf7, 0xf9, 0x1e, 0xdd, 0xe7, 0x52, 0x91, 0xaf, 0x57, 0x7c, - 0x32, 0xb7, 0xf3, 0x29, 0x61, 0xa7, 0x2e, 0xdd, 0xca, 0x45, 0xd6, 0x0b, 0xa4, 0xe4, 0x91, 0xc0, - 0xfb, 0x5c, 0x81, 0x57, 0x38, 0xf2, 0xe0, 0x45, 0x1d, 0x59, 0xd2, 0xbf, 0x18, 0xb7, 0xd3, 0xa4, - 0x06, 0xcd, 0x4a, 0x19, 0x3f, 0x21, 0xdc, 0x3c, 0xf5, 0xb9, 0xe2, 0xcc, 0xe5, 0xdf, 0x83, 0xd8, - 0x62, 0x09, 0x1f, 0x17, 0x4b, 0x90, 0xa9, 0xb4, 0x76, 0x5c, 0x82, 0xf5, 0x63, 0x6f, 0xfc, 0x8b, - 0x70, 0xa7, 0xa4, 0xe3, 0xba, 0xc7, 0x33, 0xc4, 0x2d, 0xbe, 0xa8, 0x5e, 0xf4, 0x76, 0x6f, 0x87, - 0xde, 0x4a, 0xe2, 0x17, 0x6f, 0x49, 0x09, 0x94, 0x74, 0xa9, 0x82, 0xf1, 0x37, 0xc2, 0xaf, 0x6e, - 0x6a, 0xf8, 0x1a, 0x66, 0x6d, 0xbc, 0x3c, 0x6b, 0x27, 0xff, 0xaf, 0xd3, 0x6d, 0x26, 0xec, 0x17, - 0x84, 0x6b, 0xc9, 0x5f, 0x4d, 0x5e, 0xc7, 0x0d, 0x16, 0xf2, 0x4f, 0x44, 0x10, 0x85, 0xb2, 0x83, - 0xba, 0x7b, 0xbd, 0x86, 0xdd, 0x8e, 0x67, 0x7a, 0xe3, 0xf8, 0xec, 0x34, 0x03, 0xe9, 0x22, 0x4e, - 0x0e, 0x71, 0x93, 0x85, 0xfc, 0x0b, 0x10, 0x89, 0x8e, 0x4c, 0x65, 0xc3, 0xbe, 0x19, 0xcf, 0xf4, - 0xe6, 0xf1, 0xd9, 0x69, 0x01, 0xd3, 0xf2, 0x9d, 0x24, 0xbf, 0x00, 0x19, 0x44, 0xc2, 0xc9, 0x5f, - 0xe8, 0x3c, 0x3f, 0x2d, 0x40, 0xba, 0x88, 0x1b, 0xbf, 0x23, 0x4c, 0x56, 0xdf, 0x64, 0xf2, 0x3e, - 0xc6, 0xc1, 0xfc, 0x94, 0x8b, 0xd4, 0xd3, 0xa9, 0x99, 0xa3, 0x57, 0x33, 0xbd, 0x3d, 0x3f, 0xa5, - 0x6f, 0x6e, 0x89, 0x42, 0x3e, 0xc3, 0xb5, 0x64, 0xa0, 0xf3, 0x4f, 0xd3, 0xce, 0xcb, 0x31, 0x5f, - 0xb8, 0xe4, 0x44, 0xd3, 0x54, 0x06, 0xe0, 0x5b, 0xcf, 0x7e, 0x89, 0x89, 0x85, 0x1b, 0xc9, 0x32, - 0xca, 0x90, 0x39, 0xc5, 0xae, 0xde, 0xce, 0xa9, 0x8d, 0x87, 0x45, 0x80, 0x2e, 0xee, 0xcc, 0xf7, - 0xba, 0xba, 0x69, 0xaf, 0x6d, 0xf3, 0xe2, 0x52, 0xab, 0x3c, 0xb9, 0xd4, 0x2a, 0x4f, 0x2f, 0xb5, - 0xca, 0x8f, 0xb1, 0x86, 0x2e, 0x62, 0x0d, 0x3d, 0x89, 0x35, 0xf4, 0x34, 0xd6, 0xd0, 0x9f, 0xb1, - 0x86, 0x7e, 0xfe, 0x4b, 0xab, 0x7c, 0x55, 0x2f, 0xf4, 0xfe, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe7, - 0xb5, 0x5f, 0xd5, 0xfb, 0x09, 0x00, 0x00, + // 545 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x51, 0x4d, 0x8b, 0x13, 0x3f, + 0x18, 0x6f, 0xfe, 0xdb, 0x42, 0x9b, 0x76, 0xf9, 0xcb, 0xe0, 0xa1, 0x14, 0x99, 0x96, 0x9e, 0x2a, + 0x62, 0x62, 0x57, 0x59, 0xbc, 0xee, 0xec, 0x41, 0x0a, 0xbe, 0x2c, 0x41, 0x3c, 0x88, 0x07, 0xd3, + 0xf6, 0xd9, 0x69, 0x6c, 0x27, 0x19, 0x92, 0x4c, 0x41, 0x4f, 0x5e, 0xbc, 0x0b, 0x7e, 0xa9, 0x1e, + 0xf7, 0xb8, 0xa7, 0x62, 0x47, 0xf0, 0xe8, 0x67, 0x90, 0x99, 0xe9, 0xec, 0xcc, 0x5a, 0x8b, 0xab, + 0xb7, 0x3c, 0xbf, 0x27, 0xbf, 0xb7, 0x04, 0xb3, 0xf9, 0x63, 0x43, 0x84, 0xa2, 0xf3, 0x68, 0x0c, + 0x5a, 0x82, 0x05, 0x43, 0x97, 0x20, 0xa7, 0x4a, 0xd3, 0xed, 0x82, 0x87, 0x82, 0xf2, 0x69, 0x20, + 0x8c, 0x11, 0x4a, 0x6a, 0xf0, 0x85, 0xb1, 0x9a, 0x5b, 0xa1, 0x24, 0x5d, 0x0e, 0xf9, 0x22, 0x9c, + 0xf1, 0x21, 0xf5, 0x41, 0x82, 0xe6, 0x16, 0xa6, 0x24, 0xd4, 0xca, 0x2a, 0xe7, 0x6e, 0x46, 0x25, + 0x3c, 0x14, 0xe4, 0xb7, 0x54, 0x92, 0x53, 0x3b, 0xf7, 0x7d, 0x61, 0x67, 0xd1, 0x98, 0x4c, 0x54, + 0x40, 0x7d, 0xe5, 0x2b, 0x9a, 0x2a, 0x8c, 0xa3, 0xf3, 0x74, 0x4a, 0x87, 0xf4, 0x94, 0x29, 0x77, + 0x1e, 0x15, 0xa1, 0x02, 0x3e, 0x99, 0x09, 0x09, 0xfa, 0x3d, 0x0d, 0xe7, 0x7e, 0x02, 0x18, 0x1a, + 0x80, 0xe5, 0x74, 0xb9, 0x93, 0xa7, 0x43, 0xf7, 0xb1, 0x74, 0x24, 0xad, 0x08, 0x60, 0x87, 0x70, + 0xfc, 0x27, 0x82, 0x99, 0xcc, 0x20, 0xe0, 0x3b, 0xbc, 0x87, 0xfb, 0x78, 0x91, 0x15, 0x0b, 0x2a, + 0xa4, 0x35, 0x56, 0xff, 0x4a, 0xea, 0x7f, 0x42, 0xb8, 0x39, 0x92, 0xc2, 0x0a, 0xbe, 0x10, 0x1f, + 0x40, 0x3b, 0x3d, 0x5c, 0x95, 0x3c, 0x80, 0x36, 0xea, 0xa1, 0x41, 0xc3, 0x6b, 0xad, 0xd6, 0xdd, + 0x4a, 0xbc, 0xee, 0x56, 0x9f, 0xf3, 0x00, 0x58, 0xba, 0x71, 0x5e, 0xe2, 0x9a, 0x8e, 0x16, 0x60, + 0xda, 0xff, 0xf5, 0x0e, 0x06, 0xcd, 0x23, 0x4a, 0x6e, 0xfc, 0xde, 0x84, 0x45, 0x0b, 0xf0, 0x0e, + 0xb7, 0x9a, 0xb5, 0x64, 0x32, 0x2c, 0x13, 0xeb, 0xff, 0x40, 0xb8, 0x5d, 0xca, 0x71, 0xaa, 0xe4, + 0xb9, 0xf0, 0xa3, 0x4c, 0xc0, 0x79, 0x8b, 0xeb, 0xc9, 0xeb, 0x4e, 0xb9, 0xe5, 0x69, 0xb0, 0xe6, + 0xd1, 0x83, 0x92, 0xeb, 0x55, 0x59, 0x12, 0xce, 0xfd, 0x04, 0x30, 0x24, 0xb9, 0x4d, 0x96, 0x43, + 0xf2, 0x62, 0xfc, 0x0e, 0x26, 0xf6, 0x19, 0x58, 0xee, 0x39, 0x5b, 0x5b, 0x5c, 0x60, 0xec, 0x4a, + 0xd5, 0x09, 0x71, 0x4b, 0x14, 0xee, 0x79, 0xb7, 0xe3, 0xbf, 0xe8, 0x56, 0x0a, 0xef, 0xdd, 0xde, + 0x7a, 0xb5, 0x4a, 0xa0, 0x61, 0xd7, 0x1c, 0xfa, 0xdf, 0x11, 0xbe, 0xb3, 0xaf, 0xf0, 0x53, 0x61, + 0xac, 0xf3, 0x66, 0xa7, 0x34, 0xb9, 0x59, 0xe9, 0x84, 0x9d, 0x56, 0xbe, 0xb5, 0x8d, 0x51, 0xcf, + 0x91, 0x52, 0xe1, 0x19, 0xae, 0x09, 0x0b, 0x41, 0xde, 0xf4, 0xf4, 0xdf, 0x9a, 0x5e, 0x4b, 0x5d, + 0xfc, 0xec, 0x28, 0x51, 0x66, 0x99, 0x41, 0xff, 0x0b, 0xc2, 0xd5, 0xe4, 0xab, 0x9d, 0x7b, 0xb8, + 0xc1, 0x43, 0xf1, 0x44, 0xab, 0x28, 0x34, 0x6d, 0xd4, 0x3b, 0x18, 0x34, 0xbc, 0xc3, 0x78, 0xdd, + 0x6d, 0x9c, 0x9c, 0x8d, 0x32, 0x90, 0x15, 0x7b, 0x67, 0x88, 0x9b, 0x3c, 0x14, 0xaf, 0x40, 0x27, + 0x39, 0xb2, 0x94, 0x0d, 0xef, 0xff, 0x78, 0xdd, 0x6d, 0x9e, 0x9c, 0x8d, 0x72, 0x98, 0x95, 0xef, + 0x24, 0xfa, 0x1a, 0x8c, 0x8a, 0xf4, 0x04, 0x4c, 0xfb, 0xa0, 0xd0, 0x67, 0x39, 0xc8, 0x8a, 0xbd, + 0x47, 0x56, 0x1b, 0xb7, 0x72, 0xb1, 0x71, 0x2b, 0x97, 0x1b, 0xb7, 0xf2, 0x31, 0x76, 0xd1, 0x2a, + 0x76, 0xd1, 0x45, 0xec, 0xa2, 0xcb, 0xd8, 0x45, 0x5f, 0x63, 0x17, 0x7d, 0xfe, 0xe6, 0x56, 0x5e, + 0xd7, 0xf3, 0xd2, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x1d, 0xfb, 0x23, 0x89, 0xaa, 0x04, 0x00, + 0x00, } diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto index 5b0d4f0065b8..82508ca60ef3 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto @@ -29,69 +29,6 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1alpha1"; -// AdmissionHookClientConfig contains the information to make a TLS -// connection with the webhook -message AdmissionHookClientConfig { - // Service is a reference to the service for this webhook. If there is only - // one port open for the service, that port will be used. If there are multiple - // ports open, port 443 will be used if it is open, otherwise it is an error. - // Required - optional ServiceReference service = 1; - - // CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate. - // Required - optional bytes caBundle = 2; -} - -// ExternalAdmissionHook describes an external admission webhook and the -// resources and operations it applies to. -message ExternalAdmissionHook { - // The name of the external admission webhook. - // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where - // "imagepolicy" is the name of the webhook, and kubernetes.io is the name - // of the organization. - // Required. - optional string name = 1; - - // ClientConfig defines how to communicate with the hook. - // Required - optional AdmissionHookClientConfig clientConfig = 2; - - // Rules describes what operations on what resources/subresources the webhook cares about. - // The webhook cares about an operation if it matches _any_ Rule. - repeated RuleWithOperations rules = 3; - - // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - - // allowed values are Ignore or Fail. Defaults to Ignore. - // +optional - optional string failurePolicy = 4; -} - -// ExternalAdmissionHookConfiguration describes the configuration of initializers. -message ExternalAdmissionHookConfiguration { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // ExternalAdmissionHooks is a list of external admission webhooks and the - // affected resources and operations. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - repeated ExternalAdmissionHook externalAdmissionHooks = 2; -} - -// ExternalAdmissionHookConfigurationList is a list of ExternalAdmissionHookConfiguration. -message ExternalAdmissionHookConfigurationList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of ExternalAdmissionHookConfiguration. - repeated ExternalAdmissionHookConfiguration items = 2; -} - // Initializer describes the name and the failure policy of an initializer, and // what resources it applies to. message Initializer { @@ -169,28 +106,3 @@ message Rule { repeated string resources = 3; } -// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make -// sure that all the tuple expansions are valid. -message RuleWithOperations { - // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * - // for all operations. - // If '*' is present, the length of the slice must be one. - // Required. - repeated string operations = 1; - - // Rule is embedded, it describes other criteria of the rule, like - // APIGroups, APIVersions, Resources, etc. - optional Rule rule = 2; -} - -// ServiceReference holds a reference to Service.legacy.k8s.io -message ServiceReference { - // Namespace is the namespace of the service - // Required - optional string namespace = 1; - - // Name is the name of the service - // Required - optional string name = 2; -} - diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go index e178cf743ef4..e9a4164c3757 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go @@ -45,8 +45,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &InitializerConfiguration{}, &InitializerConfigurationList{}, - &ExternalAdmissionHookConfiguration{}, - &ExternalAdmissionHookConfigurationList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go index d4827e59d33f..a245f1e858f4 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -104,116 +104,3 @@ type Rule struct { // Required. Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` } - -type FailurePolicyType string - -const ( - // Ignore means the initilizer is removed from the initializers list of an - // object if the initializer is timed out. - Ignore FailurePolicyType = "Ignore" - // For 1.7, only "Ignore" is allowed. "Fail" will be allowed when the - // extensible admission feature is beta. - Fail FailurePolicyType = "Fail" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ExternalAdmissionHookConfiguration describes the configuration of initializers. -type ExternalAdmissionHookConfiguration struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // ExternalAdmissionHooks is a list of external admission webhooks and the - // affected resources and operations. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - ExternalAdmissionHooks []ExternalAdmissionHook `json:"externalAdmissionHooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=externalAdmissionHooks"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ExternalAdmissionHookConfigurationList is a list of ExternalAdmissionHookConfiguration. -type ExternalAdmissionHookConfigurationList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of ExternalAdmissionHookConfiguration. - Items []ExternalAdmissionHookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// ExternalAdmissionHook describes an external admission webhook and the -// resources and operations it applies to. -type ExternalAdmissionHook struct { - // The name of the external admission webhook. - // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where - // "imagepolicy" is the name of the webhook, and kubernetes.io is the name - // of the organization. - // Required. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - - // ClientConfig defines how to communicate with the hook. - // Required - ClientConfig AdmissionHookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` - - // Rules describes what operations on what resources/subresources the webhook cares about. - // The webhook cares about an operation if it matches _any_ Rule. - Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` - - // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - - // allowed values are Ignore or Fail. Defaults to Ignore. - // +optional - FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` -} - -// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make -// sure that all the tuple expansions are valid. -type RuleWithOperations struct { - // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * - // for all operations. - // If '*' is present, the length of the slice must be one. - // Required. - Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"` - // Rule is embedded, it describes other criteria of the rule, like - // APIGroups, APIVersions, Resources, etc. - Rule `json:",inline" protobuf:"bytes,2,opt,name=rule"` -} - -type OperationType string - -// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. -const ( - OperationAll OperationType = "*" - Create OperationType = "CREATE" - Update OperationType = "UPDATE" - Delete OperationType = "DELETE" - Connect OperationType = "CONNECT" -) - -// AdmissionHookClientConfig contains the information to make a TLS -// connection with the webhook -type AdmissionHookClientConfig struct { - // Service is a reference to the service for this webhook. If there is only - // one port open for the service, that port will be used. If there are multiple - // ports open, port 443 will be used if it is open, otherwise it is an error. - // Required - Service ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"` - // CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate. - // Required - CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"` -} - -// ServiceReference holds a reference to Service.legacy.k8s.io -type ServiceReference struct { - // Namespace is the namespace of the service - // Required - Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` - // Name is the name of the service - // Required - Name string `json:"name" protobuf:"bytes,2,opt,name=name"` -} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go index 0b30ecc802ed..e2494e5d7765 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go @@ -27,48 +27,6 @@ package v1alpha1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE -var map_AdmissionHookClientConfig = map[string]string{ - "": "AdmissionHookClientConfig contains the information to make a TLS connection with the webhook", - "service": "Service is a reference to the service for this webhook. If there is only one port open for the service, that port will be used. If there are multiple ports open, port 443 will be used if it is open, otherwise it is an error. Required", - "caBundle": "CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate. Required", -} - -func (AdmissionHookClientConfig) SwaggerDoc() map[string]string { - return map_AdmissionHookClientConfig -} - -var map_ExternalAdmissionHook = map[string]string{ - "": "ExternalAdmissionHook describes an external admission webhook and the resources and operations it applies to.", - "name": "The name of the external admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", - "clientConfig": "ClientConfig defines how to communicate with the hook. Required", - "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule.", - "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", -} - -func (ExternalAdmissionHook) SwaggerDoc() map[string]string { - return map_ExternalAdmissionHook -} - -var map_ExternalAdmissionHookConfiguration = map[string]string{ - "": "ExternalAdmissionHookConfiguration describes the configuration of initializers.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "externalAdmissionHooks": "ExternalAdmissionHooks is a list of external admission webhooks and the affected resources and operations.", -} - -func (ExternalAdmissionHookConfiguration) SwaggerDoc() map[string]string { - return map_ExternalAdmissionHookConfiguration -} - -var map_ExternalAdmissionHookConfigurationList = map[string]string{ - "": "ExternalAdmissionHookConfigurationList is a list of ExternalAdmissionHookConfiguration.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of ExternalAdmissionHookConfiguration.", -} - -func (ExternalAdmissionHookConfigurationList) SwaggerDoc() map[string]string { - return map_ExternalAdmissionHookConfigurationList -} - var map_Initializer = map[string]string{ "": "Initializer describes the name and the failure policy of an initializer, and what resources it applies to.", "name": "Name is the identifier of the initializer. It will be added to the object that needs to be initialized. Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where \"alwayspullimages\" is the name of the webhook, and kubernetes.io is the name of the organization. Required", @@ -110,23 +68,4 @@ func (Rule) SwaggerDoc() map[string]string { return map_Rule } -var map_RuleWithOperations = map[string]string{ - "": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", - "operations": "Operations is the operations the admission hook cares about - CREATE, UPDATE, or * for all operations. If '*' is present, the length of the slice must be one. Required.", -} - -func (RuleWithOperations) SwaggerDoc() map[string]string { - return map_RuleWithOperations -} - -var map_ServiceReference = map[string]string{ - "": "ServiceReference holds a reference to Service.legacy.k8s.io", - "namespace": "Namespace is the namespace of the service Required", - "name": "Name is the name of the service Required", -} - -func (ServiceReference) SwaggerDoc() map[string]string { - return map_ServiceReference -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go index 118fed750312..850de37c4871 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go @@ -21,187 +21,9 @@ limitations under the License. package v1alpha1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AdmissionHookClientConfig).DeepCopyInto(out.(*AdmissionHookClientConfig)) - return nil - }, InType: reflect.TypeOf(&AdmissionHookClientConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExternalAdmissionHook).DeepCopyInto(out.(*ExternalAdmissionHook)) - return nil - }, InType: reflect.TypeOf(&ExternalAdmissionHook{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExternalAdmissionHookConfiguration).DeepCopyInto(out.(*ExternalAdmissionHookConfiguration)) - return nil - }, InType: reflect.TypeOf(&ExternalAdmissionHookConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExternalAdmissionHookConfigurationList).DeepCopyInto(out.(*ExternalAdmissionHookConfigurationList)) - return nil - }, InType: reflect.TypeOf(&ExternalAdmissionHookConfigurationList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Initializer).DeepCopyInto(out.(*Initializer)) - return nil - }, InType: reflect.TypeOf(&Initializer{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*InitializerConfiguration).DeepCopyInto(out.(*InitializerConfiguration)) - return nil - }, InType: reflect.TypeOf(&InitializerConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*InitializerConfigurationList).DeepCopyInto(out.(*InitializerConfigurationList)) - return nil - }, InType: reflect.TypeOf(&InitializerConfigurationList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Rule).DeepCopyInto(out.(*Rule)) - return nil - }, InType: reflect.TypeOf(&Rule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RuleWithOperations).DeepCopyInto(out.(*RuleWithOperations)) - return nil - }, InType: reflect.TypeOf(&RuleWithOperations{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceReference).DeepCopyInto(out.(*ServiceReference)) - return nil - }, InType: reflect.TypeOf(&ServiceReference{})}, - ) -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionHookClientConfig) DeepCopyInto(out *AdmissionHookClientConfig) { - *out = *in - out.Service = in.Service - if in.CABundle != nil { - in, out := &in.CABundle, &out.CABundle - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionHookClientConfig. -func (in *AdmissionHookClientConfig) DeepCopy() *AdmissionHookClientConfig { - if in == nil { - return nil - } - out := new(AdmissionHookClientConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalAdmissionHook) DeepCopyInto(out *ExternalAdmissionHook) { - *out = *in - in.ClientConfig.DeepCopyInto(&out.ClientConfig) - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]RuleWithOperations, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.FailurePolicy != nil { - in, out := &in.FailurePolicy, &out.FailurePolicy - if *in == nil { - *out = nil - } else { - *out = new(FailurePolicyType) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalAdmissionHook. -func (in *ExternalAdmissionHook) DeepCopy() *ExternalAdmissionHook { - if in == nil { - return nil - } - out := new(ExternalAdmissionHook) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalAdmissionHookConfiguration) DeepCopyInto(out *ExternalAdmissionHookConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.ExternalAdmissionHooks != nil { - in, out := &in.ExternalAdmissionHooks, &out.ExternalAdmissionHooks - *out = make([]ExternalAdmissionHook, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalAdmissionHookConfiguration. -func (in *ExternalAdmissionHookConfiguration) DeepCopy() *ExternalAdmissionHookConfiguration { - if in == nil { - return nil - } - out := new(ExternalAdmissionHookConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ExternalAdmissionHookConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalAdmissionHookConfigurationList) DeepCopyInto(out *ExternalAdmissionHookConfigurationList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ExternalAdmissionHookConfiguration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalAdmissionHookConfigurationList. -func (in *ExternalAdmissionHookConfigurationList) DeepCopy() *ExternalAdmissionHookConfigurationList { - if in == nil { - return nil - } - out := new(ExternalAdmissionHookConfigurationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ExternalAdmissionHookConfigurationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Initializer) DeepCopyInto(out *Initializer) { *out = *in @@ -323,41 +145,3 @@ func (in *Rule) DeepCopy() *Rule { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuleWithOperations) DeepCopyInto(out *RuleWithOperations) { - *out = *in - if in.Operations != nil { - in, out := &in.Operations, &out.Operations - *out = make([]OperationType, len(*in)) - copy(*out, *in) - } - in.Rule.DeepCopyInto(&out.Rule) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleWithOperations. -func (in *RuleWithOperations) DeepCopy() *RuleWithOperations { - if in == nil { - return nil - } - out := new(RuleWithOperations) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. -func (in *ServiceReference) DeepCopy() *ServiceReference { - if in == nil { - return nil - } - out := new(ServiceReference) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/BUILD b/vendor/k8s.io/api/admissionregistration/v1beta1/BUILD new file mode 100644 index 000000000000..dbcfadd2bfb6 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/BUILD @@ -0,0 +1,44 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "generated.pb.go", + "register.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/api/admissionregistration/v1beta1", + deps = [ + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +filegroup( + name = "go_default_library_protos", + srcs = ["generated.proto"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go new file mode 100644 index 000000000000..387af54a6d93 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +// Package v1beta1 is the v1beta1 version of the API. +// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration +// InitializerConfiguration and validatingWebhookConfiguration is for the +// new dynamic admission controller configuration. +// +groupName=admissionregistration.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go new file mode 100644 index 000000000000..392ebf99504d --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -0,0 +1,2150 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto + + It has these top-level messages: + MutatingWebhookConfiguration + MutatingWebhookConfigurationList + Rule + RuleWithOperations + ServiceReference + ValidatingWebhookConfiguration + ValidatingWebhookConfigurationList + Webhook + WebhookClientConfig +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } +func (*MutatingWebhookConfiguration) ProtoMessage() {} +func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{0} +} + +func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } +func (*MutatingWebhookConfigurationList) ProtoMessage() {} +func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *Rule) Reset() { *m = Rule{} } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } +func (*RuleWithOperations) ProtoMessage() {} +func (*RuleWithOperations) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } +func (*ValidatingWebhookConfiguration) ProtoMessage() {} +func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{5} +} + +func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } +func (*ValidatingWebhookConfigurationList) ProtoMessage() {} +func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{6} +} + +func (m *Webhook) Reset() { *m = Webhook{} } +func (*Webhook) ProtoMessage() {} +func (*Webhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func init() { + proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") + proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") + proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1beta1.Rule") + proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.RuleWithOperations") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1beta1.ServiceReference") + proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration") + proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList") + proto.RegisterType((*Webhook)(nil), "k8s.io.api.admissionregistration.v1beta1.Webhook") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1beta1.WebhookClientConfig") +} +func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.Webhooks) > 0 { + for _, msg := range m.Webhooks { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Rule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Rule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Operations) > 0 { + for _, s := range m.Operations { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Rule.Size())) + n3, err := m.Rule.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Path != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i += copy(dAtA[i:], *m.Path) + } + return i, nil +} + +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Webhooks) > 0 { + for _, msg := range m.Webhooks { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Webhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) + n6, err := m.ClientConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.FailurePolicy != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i += copy(dAtA[i:], *m.FailurePolicy) + } + if m.NamespaceSelector != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) + n7, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Service != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) + n8, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.CABundle != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i += copy(dAtA[i:], m.CABundle) + } + if m.URL != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i += copy(dAtA[i:], *m.URL) + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *MutatingWebhookConfiguration) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MutatingWebhookConfigurationList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Rule) Size() (n int) { + var l int + _ = l + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RuleWithOperations) Size() (n int) { + var l int + _ = l + if len(m.Operations) > 0 { + for _, s := range m.Operations { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Rule.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceReference) Size() (n int) { + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ValidatingWebhookConfiguration) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfigurationList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Webhook) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *MutatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MutatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Rule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Rule{`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `}`, + }, "") + return s +} +func (this *RuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuleWithOperations{`, + `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, + `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Webhook) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Webhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, Webhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, Webhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ValidatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Webhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Webhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Webhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.URL = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 916 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0xcf, 0x6f, 0x1b, 0x45, + 0x14, 0xf6, 0xd6, 0x8e, 0x6c, 0x8f, 0x6d, 0xd1, 0x0c, 0x20, 0x99, 0xa8, 0xda, 0xb5, 0x7c, 0x40, + 0x96, 0x50, 0x76, 0x71, 0x8a, 0x10, 0x42, 0x20, 0x94, 0x8d, 0x54, 0x88, 0x94, 0xb4, 0x66, 0x02, + 0xad, 0x84, 0x38, 0x30, 0x5e, 0xbf, 0xd8, 0x83, 0xf7, 0x97, 0x66, 0x66, 0xdd, 0xe6, 0x86, 0xc4, + 0x3f, 0x80, 0xc4, 0x1f, 0xc1, 0x5f, 0xc1, 0x3d, 0x37, 0x7a, 0x41, 0xf4, 0xb4, 0x22, 0xcb, 0x99, + 0x03, 0xd7, 0x9e, 0xd0, 0xce, 0xae, 0xbd, 0x76, 0x1c, 0xa7, 0xee, 0x85, 0x03, 0x37, 0xcf, 0xf7, + 0xde, 0xf7, 0xbd, 0xf7, 0x3d, 0xbf, 0xb7, 0xe8, 0xcb, 0xe9, 0x47, 0xc2, 0x64, 0x81, 0x35, 0x8d, + 0x86, 0xc0, 0x7d, 0x90, 0x20, 0xac, 0x19, 0xf8, 0xa3, 0x80, 0x5b, 0x79, 0x80, 0x86, 0xcc, 0xa2, + 0x23, 0x8f, 0x09, 0xc1, 0x02, 0x9f, 0xc3, 0x98, 0x09, 0xc9, 0xa9, 0x64, 0x81, 0x6f, 0xcd, 0xfa, + 0x43, 0x90, 0xb4, 0x6f, 0x8d, 0xc1, 0x07, 0x4e, 0x25, 0x8c, 0xcc, 0x90, 0x07, 0x32, 0xc0, 0xbd, + 0x8c, 0x69, 0xd2, 0x90, 0x99, 0x37, 0x32, 0xcd, 0x9c, 0xb9, 0xb7, 0x3f, 0x66, 0x72, 0x12, 0x0d, + 0x4d, 0x27, 0xf0, 0xac, 0x71, 0x30, 0x0e, 0x2c, 0x25, 0x30, 0x8c, 0xce, 0xd5, 0x4b, 0x3d, 0xd4, + 0xaf, 0x4c, 0x78, 0xaf, 0xbb, 0xd4, 0x92, 0x13, 0x70, 0xb0, 0x66, 0x6b, 0xc5, 0xf7, 0x4e, 0x8b, + 0x1c, 0x78, 0x26, 0xc1, 0x4f, 0x6b, 0x8b, 0x7d, 0x1a, 0x32, 0x01, 0x7c, 0x06, 0xdc, 0x0a, 0xa7, + 0xe3, 0x34, 0x26, 0x56, 0x13, 0x36, 0x79, 0xd9, 0xfb, 0xa0, 0x90, 0xf3, 0xa8, 0x33, 0x61, 0x3e, + 0xf0, 0x8b, 0x42, 0xc3, 0x03, 0x49, 0x6f, 0x6a, 0xc2, 0xda, 0xc4, 0xe2, 0x91, 0x2f, 0x99, 0x07, + 0x6b, 0x84, 0x0f, 0x5f, 0x45, 0x10, 0xce, 0x04, 0x3c, 0xba, 0xc6, 0xbb, 0xbf, 0x89, 0x17, 0x49, + 0xe6, 0x5a, 0xcc, 0x97, 0x42, 0xf2, 0xeb, 0xa4, 0xee, 0xef, 0x1a, 0xba, 0x77, 0x1a, 0x49, 0x2a, + 0x99, 0x3f, 0x7e, 0x02, 0xc3, 0x49, 0x10, 0x4c, 0x8f, 0x02, 0xff, 0x9c, 0x8d, 0xa3, 0xec, 0xef, + 0xc1, 0xdf, 0xa1, 0x5a, 0xea, 0x6c, 0x44, 0x25, 0x6d, 0x6b, 0x1d, 0xad, 0xd7, 0x38, 0x78, 0xdf, + 0x2c, 0xfe, 0xd3, 0x45, 0x21, 0x33, 0x9c, 0x8e, 0x53, 0x40, 0x98, 0x69, 0xb6, 0x39, 0xeb, 0x9b, + 0x8f, 0x86, 0xdf, 0x83, 0x23, 0x4f, 0x41, 0x52, 0x1b, 0x5f, 0xc6, 0x46, 0x29, 0x89, 0x0d, 0x54, + 0x60, 0x64, 0xa1, 0x8a, 0xcf, 0x50, 0x2d, 0xaf, 0x2c, 0xda, 0x77, 0x3a, 0xe5, 0x5e, 0xe3, 0xa0, + 0x6f, 0x6e, 0xbb, 0x35, 0x66, 0xce, 0xb4, 0x2b, 0x69, 0x09, 0x52, 0x7b, 0x9a, 0x0b, 0x75, 0xff, + 0xd6, 0x50, 0xe7, 0x36, 0x5f, 0x27, 0x4c, 0x48, 0xfc, 0xed, 0x9a, 0x37, 0x73, 0x3b, 0x6f, 0x29, + 0x5b, 0x39, 0xbb, 0x9b, 0x3b, 0xab, 0xcd, 0x91, 0x25, 0x5f, 0x53, 0xb4, 0xc3, 0x24, 0x78, 0x73, + 0x53, 0x0f, 0xb6, 0x37, 0x75, 0x5b, 0xe3, 0x76, 0x2b, 0x2f, 0xb9, 0x73, 0x9c, 0x8a, 0x93, 0xac, + 0x46, 0xf7, 0x67, 0x0d, 0x55, 0x48, 0xe4, 0x02, 0x7e, 0x0f, 0xd5, 0x69, 0xc8, 0x3e, 0xe7, 0x41, + 0x14, 0x8a, 0xb6, 0xd6, 0x29, 0xf7, 0xea, 0x76, 0x2b, 0x89, 0x8d, 0xfa, 0xe1, 0xe0, 0x38, 0x03, + 0x49, 0x11, 0xc7, 0x7d, 0xd4, 0xa0, 0x21, 0x7b, 0x0c, 0x5c, 0x2d, 0xbe, 0x6a, 0xb4, 0x6e, 0xbf, + 0x91, 0xc4, 0x46, 0xe3, 0x70, 0x70, 0x3c, 0x87, 0xc9, 0x72, 0x4e, 0xaa, 0xcf, 0x41, 0x04, 0x11, + 0x77, 0x40, 0xb4, 0xcb, 0x85, 0x3e, 0x99, 0x83, 0xa4, 0x88, 0x77, 0x7f, 0xd1, 0x10, 0x4e, 0xbb, + 0x7a, 0xc2, 0xe4, 0xe4, 0x51, 0x08, 0x99, 0x03, 0x81, 0x3f, 0x43, 0x28, 0x58, 0xbc, 0xf2, 0x26, + 0x0d, 0xb5, 0x1f, 0x0b, 0xf4, 0x65, 0x6c, 0xb4, 0x16, 0xaf, 0xaf, 0x2e, 0x42, 0x20, 0x4b, 0x14, + 0x3c, 0x40, 0x15, 0x1e, 0xb9, 0xd0, 0xbe, 0xb3, 0xf6, 0xa7, 0xbd, 0x62, 0xb2, 0x69, 0x33, 0x76, + 0x33, 0x9f, 0xa0, 0x1a, 0x18, 0x51, 0x4a, 0xdd, 0x1f, 0x35, 0x74, 0xf7, 0x0c, 0xf8, 0x8c, 0x39, + 0x40, 0xe0, 0x1c, 0x38, 0xf8, 0x0e, 0x60, 0x0b, 0xd5, 0x7d, 0xea, 0x81, 0x08, 0xa9, 0x03, 0x6a, + 0x41, 0xea, 0xf6, 0x6e, 0xce, 0xad, 0x3f, 0x9c, 0x07, 0x48, 0x91, 0x83, 0x3b, 0xa8, 0x92, 0x3e, + 0x54, 0x5f, 0xf5, 0xa2, 0x4e, 0x9a, 0x4b, 0x54, 0x04, 0xdf, 0x43, 0x95, 0x90, 0xca, 0x49, 0xbb, + 0xac, 0x32, 0x6a, 0x69, 0x74, 0x40, 0xe5, 0x84, 0x28, 0xb4, 0xfb, 0x87, 0x86, 0xf4, 0xc7, 0xd4, + 0x65, 0xa3, 0xff, 0xdd, 0x3d, 0xfe, 0xa3, 0xa1, 0xee, 0xed, 0xce, 0xfe, 0x83, 0x8b, 0xf4, 0x56, + 0x2f, 0xf2, 0x8b, 0xed, 0x6d, 0xdd, 0xde, 0xfa, 0x86, 0x9b, 0xfc, 0xad, 0x8c, 0xaa, 0x79, 0xfa, + 0x62, 0x33, 0xb4, 0x8d, 0x9b, 0xf1, 0x14, 0x35, 0x1d, 0x97, 0x81, 0x2f, 0x33, 0xe9, 0x7c, 0xb7, + 0x3f, 0x7d, 0xed, 0xd1, 0x1f, 0x2d, 0x89, 0xd8, 0x6f, 0xe5, 0x85, 0x9a, 0xcb, 0x28, 0x59, 0x29, + 0x84, 0x29, 0xda, 0x49, 0x4f, 0x20, 0xbb, 0xe6, 0xc6, 0xc1, 0x27, 0xaf, 0x77, 0x4d, 0xab, 0xa7, + 0x5d, 0x4c, 0x22, 0x8d, 0x09, 0x92, 0x29, 0xe3, 0x13, 0xd4, 0x3a, 0xa7, 0xcc, 0x8d, 0x38, 0x0c, + 0x02, 0x97, 0x39, 0x17, 0xed, 0x8a, 0x1a, 0xc3, 0xbb, 0x49, 0x6c, 0xb4, 0x1e, 0x2c, 0x07, 0x5e, + 0xc6, 0xc6, 0xee, 0x0a, 0xa0, 0x4e, 0x7f, 0x95, 0x8c, 0x9f, 0xa1, 0xdd, 0xc5, 0xc9, 0x9d, 0x81, + 0x0b, 0x8e, 0x0c, 0x78, 0x7b, 0x47, 0x8d, 0xeb, 0xfe, 0x96, 0xdb, 0x42, 0x87, 0xe0, 0xce, 0xa9, + 0xf6, 0xdb, 0x49, 0x6c, 0xec, 0x3e, 0xbc, 0xae, 0x48, 0xd6, 0x8b, 0x74, 0x7f, 0xd5, 0xd0, 0x9b, + 0x37, 0x8c, 0x19, 0x53, 0x54, 0x15, 0xd9, 0xc7, 0x23, 0xdf, 0xda, 0x8f, 0xb7, 0x1f, 0xe2, 0xf5, + 0xaf, 0x8e, 0xdd, 0x48, 0x62, 0xa3, 0x3a, 0x47, 0xe7, 0xba, 0xb8, 0x87, 0x6a, 0x0e, 0xb5, 0x23, + 0x7f, 0x94, 0x7f, 0xf6, 0x9a, 0x76, 0x33, 0xdd, 0xf2, 0xa3, 0xc3, 0x0c, 0x23, 0x8b, 0x28, 0x7e, + 0x07, 0x95, 0x23, 0xee, 0xe6, 0x5f, 0x98, 0x6a, 0x12, 0x1b, 0xe5, 0xaf, 0xc9, 0x09, 0x49, 0x31, + 0x7b, 0xff, 0xf2, 0x4a, 0x2f, 0x3d, 0xbf, 0xd2, 0x4b, 0x2f, 0xae, 0xf4, 0xd2, 0x0f, 0x89, 0xae, + 0x5d, 0x26, 0xba, 0xf6, 0x3c, 0xd1, 0xb5, 0x17, 0x89, 0xae, 0xfd, 0x99, 0xe8, 0xda, 0x4f, 0x7f, + 0xe9, 0xa5, 0x6f, 0xaa, 0x79, 0x6b, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x78, 0x57, 0x76, 0x28, + 0x10, 0x0a, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto new file mode 100644 index 000000000000..513a3d167ee1 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -0,0 +1,261 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.admissionregistration.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +message MutatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated Webhook Webhooks = 2; +} + +// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. +message MutatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of MutatingWebhookConfiguration. + repeated MutatingWebhookConfiguration items = 2; +} + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +message Rule { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiGroups = 1; + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiVersions = 2; + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + repeated string resources = 3; +} + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +message RuleWithOperations { + // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * + // for all operations. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string operations = 1; + + // Rule is embedded, it describes other criteria of the rule, like + // APIGroups, APIVersions, Resources, etc. + optional Rule rule = 2; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // `namespace` is the namespace of the service. + // Required + optional string namespace = 1; + + // `name` is the name of the service. + // Required + optional string name = 2; + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + optional string path = 3; +} + +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +message ValidatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated Webhook Webhooks = 2; +} + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +message ValidatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingWebhookConfiguration. + repeated ValidatingWebhookConfiguration items = 2; +} + +// Webhook describes an admission webhook and the resources and operations it applies to. +message Webhook { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + optional string name = 1; + + // ClientConfig defines how to communicate with the hook. + // Required + optional WebhookClientConfig clientConfig = 2; + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + repeated RuleWithOperations rules = 3; + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Ignore. + // +optional + optional string failurePolicy = 4; + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is other cluster scoped resource, + // it is not subjected to the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; +} + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook +message WebhookClientConfig { + // `url` gives the location of the webhook, in standard URL form + // (`[scheme://]host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + optional string url = 3; + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // If there is only one port open for the service, that port will be + // used. If there are multiple ports open, port 443 will be used if it + // is open, otherwise it is an error. + // + // +optional + optional ServiceReference service = 1; + + // `caBundle` is a PEM encoded CA bundle which will be used to validate + // the webhook's server certificate. + // Required. + optional bytes caBundle = 2; +} + diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go new file mode 100644 index 000000000000..d126da9fb719 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "admissionregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ValidatingWebhookConfiguration{}, + &ValidatingWebhookConfigurationList{}, + &MutatingWebhookConfiguration{}, + &MutatingWebhookConfigurationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go new file mode 100644 index 000000000000..30d2750ce335 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -0,0 +1,281 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +type Rule struct { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` +} + +type FailurePolicyType string + +const ( + // Ignore means the initializer is removed from the initializers list of an + // object if the initializer is timed out. + Ignore FailurePolicyType = "Ignore" + // For 1.7, only "Ignore" is allowed. "Fail" will be allowed when the + // extensible admission feature is beta. + Fail FailurePolicyType = "Fail" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +type ValidatingWebhookConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +type ValidatingWebhookConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of ValidatingWebhookConfiguration. + Items []ValidatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +type MutatingWebhookConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. +type MutatingWebhookConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of MutatingWebhookConfiguration. + Items []MutatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Webhook describes an admission webhook and the resources and operations it applies to. +type Webhook struct { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // ClientConfig defines how to communicate with the hook. + // Required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Ignore. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is other cluster scoped resource, + // it is not subjected to the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` +} + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +type RuleWithOperations struct { + // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * + // for all operations. + // If '*' is present, the length of the slice must be one. + // Required. + Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"` + // Rule is embedded, it describes other criteria of the rule, like + // APIGroups, APIVersions, Resources, etc. + Rule `json:",inline" protobuf:"bytes,2,opt,name=rule"` +} + +type OperationType string + +// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. +const ( + OperationAll OperationType = "*" + Create OperationType = "CREATE" + Update OperationType = "UPDATE" + Delete OperationType = "DELETE" + Connect OperationType = "CONNECT" +) + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook +type WebhookClientConfig struct { + // `url` gives the location of the webhook, in standard URL form + // (`[scheme://]host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // If there is only one port open for the service, that port will be + // used. If there are multiple ports open, port 443 will be used if it + // is open, otherwise it is an error. + // + // +optional + Service *ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"` + + // `caBundle` is a PEM encoded CA bundle which will be used to validate + // the webhook's server certificate. + // Required. + CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // `namespace` is the namespace of the service. + // Required + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // `name` is the name of the service. + // Required + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000000..ea8c1e37f257 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,125 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_MutatingWebhookConfiguration = map[string]string{ + "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", +} + +func (MutatingWebhookConfiguration) SwaggerDoc() map[string]string { + return map_MutatingWebhookConfiguration +} + +var map_MutatingWebhookConfigurationList = map[string]string{ + "": "MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of MutatingWebhookConfiguration.", +} + +func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string { + return map_MutatingWebhookConfigurationList +} + +var map_Rule = map[string]string{ + "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", + "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", +} + +func (Rule) SwaggerDoc() map[string]string { + return map_Rule +} + +var map_RuleWithOperations = map[string]string{ + "": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", + "operations": "Operations is the operations the admission hook cares about - CREATE, UPDATE, or * for all operations. If '*' is present, the length of the slice must be one. Required.", +} + +func (RuleWithOperations) SwaggerDoc() map[string]string { + return map_RuleWithOperations +} + +var map_ServiceReference = map[string]string{ + "": "ServiceReference holds a reference to Service.legacy.k8s.io", + "namespace": "`namespace` is the namespace of the service. Required", + "name": "`name` is the name of the service. Required", + "path": "`path` is an optional URL path which will be sent in any request to this service.", +} + +func (ServiceReference) SwaggerDoc() map[string]string { + return map_ServiceReference +} + +var map_ValidatingWebhookConfiguration = map[string]string{ + "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", +} + +func (ValidatingWebhookConfiguration) SwaggerDoc() map[string]string { + return map_ValidatingWebhookConfiguration +} + +var map_ValidatingWebhookConfigurationList = map[string]string{ + "": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of ValidatingWebhookConfiguration.", +} + +func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { + return map_ValidatingWebhookConfigurationList +} + +var map_Webhook = map[string]string{ + "": "Webhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is other cluster scoped resource, it is not subjected to the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", +} + +func (Webhook) SwaggerDoc() map[string]string { + return map_Webhook +} + +var map_WebhookClientConfig = map[string]string{ + "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", + "url": "`url` gives the location of the webhook, in standard URL form (`[scheme://]host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nIf there is only one port open for the service, that port will be used. If there are multiple ports open, port 443 will be used if it is open, otherwise it is an error.", + "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. Required.", +} + +func (WebhookClientConfig) SwaggerDoc() map[string]string { + return map_WebhookClientConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000000..fbd9ad329012 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,321 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]Webhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfiguration. +func (in *MutatingWebhookConfiguration) DeepCopy() *MutatingWebhookConfiguration { + if in == nil { + return nil + } + out := new(MutatingWebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhookConfigurationList) DeepCopyInto(out *MutatingWebhookConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MutatingWebhookConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfigurationList. +func (in *MutatingWebhookConfigurationList) DeepCopy() *MutatingWebhookConfigurationList { + if in == nil { + return nil + } + out := new(MutatingWebhookConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rule) DeepCopyInto(out *Rule) { + *out = *in + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIVersions != nil { + in, out := &in.APIVersions, &out.APIVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. +func (in *Rule) DeepCopy() *Rule { + if in == nil { + return nil + } + out := new(Rule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleWithOperations) DeepCopyInto(out *RuleWithOperations) { + *out = *in + if in.Operations != nil { + in, out := &in.Operations, &out.Operations + *out = make([]OperationType, len(*in)) + copy(*out, *in) + } + in.Rule.DeepCopyInto(&out.Rule) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleWithOperations. +func (in *RuleWithOperations) DeepCopy() *RuleWithOperations { + if in == nil { + return nil + } + out := new(RuleWithOperations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]Webhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfiguration. +func (in *ValidatingWebhookConfiguration) DeepCopy() *ValidatingWebhookConfiguration { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfigurationList) DeepCopyInto(out *ValidatingWebhookConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingWebhookConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfigurationList. +func (in *ValidatingWebhookConfigurationList) DeepCopy() *ValidatingWebhookConfigurationList { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Webhook) DeepCopyInto(out *Webhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + if *in == nil { + *out = nil + } else { + *out = new(FailurePolicyType) + **out = **in + } + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. +func (in *Webhook) DeepCopy() *Webhook { + if in == nil { + return nil + } + out := new(Webhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + if in.Service != nil { + in, out := &in.Service, &out.Service + if *in == nil { + *out = nil + } else { + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/apps/v1/BUILD b/vendor/k8s.io/api/apps/v1/BUILD new file mode 100644 index 000000000000..7902387f59b3 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/BUILD @@ -0,0 +1,43 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "generated.pb.go", + "register.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/api/apps/v1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + ], +) + +filegroup( + name = "go_default_library_protos", + srcs = ["generated.proto"], + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/api/apps/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go new file mode 100644 index 000000000000..cd84007b54cb --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +package v1 diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go new file mode 100644 index 000000000000..38e7415b726b --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -0,0 +1,6945 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto + + It has these top-level messages: + ControllerRevision + ControllerRevisionList + DaemonSet + DaemonSetCondition + DaemonSetList + DaemonSetSpec + DaemonSetStatus + DaemonSetUpdateStrategy + Deployment + DeploymentCondition + DeploymentList + DeploymentSpec + DeploymentStatus + DeploymentStrategy + ReplicaSet + ReplicaSetCondition + ReplicaSetList + ReplicaSetSpec + ReplicaSetStatus + RollingUpdateDaemonSet + RollingUpdateDeployment + RollingUpdateStatefulSetStrategy + StatefulSet + StatefulSetCondition + StatefulSetList + StatefulSetSpec + StatefulSetStatus + StatefulSetUpdateStrategy +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_api_core_v1 "k8s.io/api/core/v1" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } + +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } + +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + +func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } +func (*RollingUpdateDeployment) ProtoMessage() {} +func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{20} +} + +func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } +func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} +func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{21} +} + +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } + +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } + +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } + +func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } +func (*StatefulSetUpdateStrategy) ProtoMessage() {} +func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{27} +} + +func init() { + proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1.ControllerRevision") + proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1.ControllerRevisionList") + proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.apps.v1.DaemonSet") + proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.apps.v1.DaemonSetCondition") + proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.apps.v1.DaemonSetList") + proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.apps.v1.DaemonSetSpec") + proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.apps.v1.DaemonSetStatus") + proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.DaemonSetUpdateStrategy") + proto.RegisterType((*Deployment)(nil), "k8s.io.api.apps.v1.Deployment") + proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1.DeploymentCondition") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1.DeploymentList") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1.DeploymentStrategy") + proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.apps.v1.ReplicaSet") + proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.apps.v1.ReplicaSetCondition") + proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.apps.v1.ReplicaSetList") + proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.apps.v1.ReplicaSetSpec") + proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.apps.v1.ReplicaSetStatus") + proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.apps.v1.RollingUpdateDaemonSet") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.apps.v1.RollingUpdateDeployment") + proto.RegisterType((*RollingUpdateStatefulSetStrategy)(nil), "k8s.io.api.apps.v1.RollingUpdateStatefulSetStrategy") + proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1.StatefulSet") + proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1.StatefulSetCondition") + proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1.StatefulSetList") + proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1.StatefulSetSpec") + proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1.StatefulSetStatus") + proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.StatefulSetUpdateStrategy") +} +func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) + n2, err := m.Data.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + return i, nil +} + +func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DaemonSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n6, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + return i, nil +} + +func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n8, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Selector != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n9, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n10, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } + return i, nil +} + +func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.RollingUpdate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} + +func (m *Deployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n14, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n15, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + return i, nil +} + +func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) + n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + return i, nil +} + +func (m *DeploymentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n18, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n19, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n20, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) + n21, err := m.Strategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } + dAtA[i] = 0x38 + i++ + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.ProgressDeadlineSeconds != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + } + return i, nil +} + +func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + return i, nil +} + +func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.RollingUpdate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + return i, nil +} + +func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n24, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n25, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + return i, nil +} + +func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n27, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n28, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n29, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + return i, nil +} + +func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + return i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if m.MaxSurge != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) + n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + return i, nil +} + +func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Partition != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + } + return i, nil +} + +func (m *StatefulSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n34, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n35, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + return i, nil +} + +func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n36, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n37, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n38, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n39, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + if len(m.VolumeClaimTemplates) > 0 { + for _, msg := range m.VolumeClaimTemplates { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i += copy(dAtA[i:], m.ServiceName) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i += copy(dAtA[i:], m.PodManagementPolicy) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n40, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } + return i, nil +} + +func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i += copy(dAtA[i:], m.CurrentRevision) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i += copy(dAtA[i:], m.UpdateRevision) + if m.CollisionCount != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.RollingUpdate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n41, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ControllerRevision) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Data.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Revision)) + return n +} + +func (m *ControllerRevisionList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSetSpec) Size() (n int) { + var l int + _ = l + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + return n +} + +func (m *DaemonSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) + n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberReady)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberAvailable)) + n += 1 + sovGenerated(uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSetUpdateStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Deployment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n +} + +func (m *DeploymentStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + return n +} + +func (m *DeploymentStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ReplicaSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicaSetSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *ReplicaSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RollingUpdateDaemonSet) Size() (n int) { + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingUpdateDeployment) Size() (n int) { + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + var l int + _ = l + if m.Partition != nil { + n += 1 + sovGenerated(uint64(*m.Partition)) + } + return n +} + +func (m *StatefulSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeClaimTemplates) > 0 { + for _, e := range m.VolumeClaimTemplates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodManagementPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + return n +} + +func (m *StatefulSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + l = len(m.CurrentRevision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UpdateRevision) + n += 1 + l + sovGenerated(uint64(l)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetUpdateStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ControllerRevision) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerRevision{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *ControllerRevisionList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerRevisionList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetSpec{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetStatus{`, + `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, + `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, + `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, + `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, + `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, + `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetUpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Deployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDaemonSet{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateStatefulSetStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateStatefulSetStrategy{`, + `Partition:` + valueToStringGenerated(this.Partition) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, + `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetUpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ControllerRevision) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRevision: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRevision: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRevisionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRevisionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ControllerRevision{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DaemonSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) + } + m.CurrentNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) + } + m.NumberMisscheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberMisscheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) + } + m.DesiredNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) + } + m.NumberReady = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberReady |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) + } + m.UpdatedNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) + } + m.NumberAvailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberAvailable |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) + } + m.NumberUnavailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberUnavailable |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDaemonSet{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Deployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ReplicaSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ReplicaSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Partition = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StatefulSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) + if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodManagementPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodManagementPolicy = PodManagementPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UpdateRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, StatefulSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetUpdateStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetUpdateStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateStatefulSetStrategy{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 2051 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47, + 0x1d, 0x75, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19, + 0x60, 0xe3, 0xcd, 0x66, 0x7b, 0xf0, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78, + 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0x35, 0x33, 0xb5, 0xe3, 0x8e, 0xfb, 0x4b, 0xdd, 0xd5, + 0xc3, 0x8e, 0xb8, 0x20, 0x24, 0x38, 0x71, 0xe0, 0x3f, 0x41, 0x08, 0xc1, 0x0d, 0x45, 0x88, 0xcb, + 0x5e, 0x90, 0x22, 0x2e, 0xe4, 0x64, 0xb1, 0x93, 0x13, 0x42, 0x39, 0x72, 0xc9, 0x05, 0x54, 0xd5, + 0xd5, 0xdf, 0xd5, 0x9e, 0xb1, 0x37, 0xeb, 0xa0, 0x68, 0x6f, 0x9e, 0xaa, 0xf7, 0x7b, 0xfd, 0xab, + 0xaa, 0x5f, 0xd5, 0x7b, 0x5d, 0x6d, 0x70, 0xef, 0xf8, 0xdb, 0xae, 0xaa, 0x59, 0xcd, 0x63, 0xaf, + 0x4b, 0x1c, 0x93, 0x50, 0xe2, 0x36, 0x87, 0xc4, 0xec, 0x5b, 0x4e, 0x53, 0x74, 0x60, 0x5b, 0x6b, + 0x62, 0xdb, 0x76, 0x9b, 0xc3, 0xcd, 0xe6, 0x80, 0x98, 0xc4, 0xc1, 0x94, 0xf4, 0x55, 0xdb, 0xb1, + 0xa8, 0x05, 0xa1, 0x8f, 0x51, 0xb1, 0xad, 0xa9, 0x0c, 0xa3, 0x0e, 0x37, 0xaf, 0xde, 0x1e, 0x68, + 0xf4, 0xc8, 0xeb, 0xaa, 0x3d, 0xcb, 0x68, 0x0e, 0xac, 0x81, 0xd5, 0xe4, 0xd0, 0xae, 0xf7, 0x88, + 0xff, 0xe2, 0x3f, 0xf8, 0x5f, 0x3e, 0xc5, 0xd5, 0x46, 0xec, 0x31, 0x3d, 0xcb, 0x21, 0x92, 0xc7, + 0x5c, 0xbd, 0x19, 0xc3, 0xd8, 0x96, 0xae, 0xf5, 0x46, 0xcd, 0xe1, 0x66, 0x97, 0x50, 0x9c, 0x85, + 0xde, 0x8d, 0xa0, 0x06, 0xee, 0x1d, 0x69, 0x26, 0x71, 0x46, 0x4d, 0xfb, 0x78, 0xc0, 0x1a, 0xdc, + 0xa6, 0x41, 0x28, 0x96, 0x3d, 0xa0, 0x99, 0x17, 0xe5, 0x78, 0x26, 0xd5, 0x0c, 0x92, 0x09, 0x78, + 0x73, 0x52, 0x80, 0xdb, 0x3b, 0x22, 0x06, 0xce, 0xc4, 0xbd, 0x9e, 0x17, 0xe7, 0x51, 0x4d, 0x6f, + 0x6a, 0x26, 0x75, 0xa9, 0x93, 0x0e, 0x6a, 0xfc, 0x47, 0x01, 0xb0, 0x6d, 0x99, 0xd4, 0xb1, 0x74, + 0x9d, 0x38, 0x88, 0x0c, 0x35, 0x57, 0xb3, 0x4c, 0xf8, 0x53, 0x50, 0x61, 0xe3, 0xe9, 0x63, 0x8a, + 0xab, 0xca, 0x75, 0x65, 0x63, 0xe1, 0xce, 0xb7, 0xd4, 0x68, 0x3d, 0x42, 0x7a, 0xd5, 0x3e, 0x1e, + 0xb0, 0x06, 0x57, 0x65, 0x68, 0x75, 0xb8, 0xa9, 0xee, 0x75, 0x3f, 0x20, 0x3d, 0xda, 0x21, 0x14, + 0xb7, 0xe0, 0x93, 0x93, 0xfa, 0xcc, 0xf8, 0xa4, 0x0e, 0xa2, 0x36, 0x14, 0xb2, 0xc2, 0x3d, 0x50, + 0xe2, 0xec, 0x05, 0xce, 0x7e, 0x3b, 0x97, 0x5d, 0x0c, 0x5a, 0x45, 0xf8, 0x67, 0x6f, 0x3f, 0xa6, + 0xc4, 0x64, 0xe9, 0xb5, 0x2e, 0x09, 0xea, 0xd2, 0x36, 0xa6, 0x18, 0x71, 0x22, 0xf8, 0x1a, 0xa8, + 0x38, 0x22, 0xfd, 0x6a, 0xf1, 0xba, 0xb2, 0x51, 0x6c, 0x5d, 0x16, 0xa8, 0x4a, 0x30, 0x2c, 0x14, + 0x22, 0x1a, 0x7f, 0x55, 0xc0, 0x5a, 0x76, 0xdc, 0x3b, 0x9a, 0x4b, 0xe1, 0xfb, 0x99, 0xb1, 0xab, + 0xd3, 0x8d, 0x9d, 0x45, 0xf3, 0x91, 0x87, 0x0f, 0x0e, 0x5a, 0x62, 0xe3, 0x7e, 0x0f, 0x94, 0x35, + 0x4a, 0x0c, 0xb7, 0x5a, 0xb8, 0x5e, 0xdc, 0x58, 0xb8, 0x73, 0x43, 0xcd, 0x96, 0xb9, 0x9a, 0x4d, + 0xac, 0xb5, 0x28, 0x28, 0xcb, 0xef, 0xb2, 0x60, 0xe4, 0x73, 0x34, 0xfe, 0xab, 0x80, 0xf9, 0x6d, + 0x4c, 0x0c, 0xcb, 0x3c, 0x20, 0xf4, 0x02, 0x16, 0xad, 0x0d, 0x4a, 0xae, 0x4d, 0x7a, 0x62, 0xd1, + 0xbe, 0x26, 0xcb, 0x3d, 0x4c, 0xe7, 0xc0, 0x26, 0xbd, 0x68, 0xa1, 0xd8, 0x2f, 0xc4, 0x83, 0xe1, + 0x7b, 0x60, 0xd6, 0xa5, 0x98, 0x7a, 0x2e, 0x5f, 0xa6, 0x85, 0x3b, 0x5f, 0x3f, 0x9d, 0x86, 0x43, + 0x5b, 0x4b, 0x82, 0x68, 0xd6, 0xff, 0x8d, 0x04, 0x45, 0xe3, 0x5f, 0x05, 0x00, 0x43, 0x6c, 0xdb, + 0x32, 0xfb, 0x1a, 0x65, 0xf5, 0xfb, 0x16, 0x28, 0xd1, 0x91, 0x4d, 0xf8, 0x34, 0xcc, 0xb7, 0x6e, + 0x04, 0x59, 0xdc, 0x1f, 0xd9, 0xe4, 0xb3, 0x93, 0xfa, 0x5a, 0x36, 0x82, 0xf5, 0x20, 0x1e, 0x03, + 0x77, 0xc2, 0xfc, 0x0a, 0x3c, 0xfa, 0x6e, 0xf2, 0xd1, 0x9f, 0x9d, 0xd4, 0x25, 0xe7, 0x8a, 0x1a, + 0x32, 0x25, 0x13, 0x84, 0x43, 0x00, 0x75, 0xec, 0xd2, 0xfb, 0x0e, 0x36, 0x5d, 0xff, 0x49, 0x9a, + 0x41, 0xc4, 0xc8, 0x5f, 0x9d, 0x6e, 0x79, 0x58, 0x44, 0xeb, 0xaa, 0xc8, 0x02, 0xee, 0x64, 0xd8, + 0x90, 0xe4, 0x09, 0xf0, 0x06, 0x98, 0x75, 0x08, 0x76, 0x2d, 0xb3, 0x5a, 0xe2, 0xa3, 0x08, 0x27, + 0x10, 0xf1, 0x56, 0x24, 0x7a, 0xe1, 0x4d, 0x30, 0x67, 0x10, 0xd7, 0xc5, 0x03, 0x52, 0x2d, 0x73, + 0xe0, 0xb2, 0x00, 0xce, 0x75, 0xfc, 0x66, 0x14, 0xf4, 0x37, 0x7e, 0xaf, 0x80, 0xc5, 0x70, 0xe6, + 0x2e, 0x60, 0xab, 0xb4, 0x92, 0x5b, 0xe5, 0xe5, 0x53, 0xeb, 0x24, 0x67, 0x87, 0x7c, 0x58, 0x8c, + 0xe5, 0xcc, 0x8a, 0x10, 0xfe, 0x04, 0x54, 0x5c, 0xa2, 0x93, 0x1e, 0xb5, 0x1c, 0x91, 0xf3, 0xeb, + 0x53, 0xe6, 0x8c, 0xbb, 0x44, 0x3f, 0x10, 0xa1, 0xad, 0x4b, 0x2c, 0xe9, 0xe0, 0x17, 0x0a, 0x29, + 0xe1, 0x8f, 0x40, 0x85, 0x12, 0xc3, 0xd6, 0x31, 0x25, 0x62, 0x9b, 0x24, 0xea, 0x9b, 0x95, 0x0b, + 0x23, 0xdb, 0xb7, 0xfa, 0xf7, 0x05, 0x8c, 0x6f, 0x94, 0x70, 0x1e, 0x82, 0x56, 0x14, 0xd2, 0xc0, + 0x63, 0xb0, 0xe4, 0xd9, 0x7d, 0x86, 0xa4, 0xec, 0xe8, 0x1e, 0x8c, 0x44, 0xf9, 0xdc, 0x3a, 0x75, + 0x42, 0x0e, 0x13, 0x21, 0xad, 0x35, 0xf1, 0x80, 0xa5, 0x64, 0x3b, 0x4a, 0x51, 0xc3, 0x2d, 0xb0, + 0x6c, 0x68, 0x26, 0x22, 0xb8, 0x3f, 0x3a, 0x20, 0x3d, 0xcb, 0xec, 0xbb, 0xbc, 0x80, 0xca, 0xad, + 0x75, 0x41, 0xb0, 0xdc, 0x49, 0x76, 0xa3, 0x34, 0x1e, 0xee, 0x80, 0xd5, 0xe0, 0x9c, 0xfd, 0x81, + 0xe6, 0x52, 0xcb, 0x19, 0xed, 0x68, 0x86, 0x46, 0xab, 0xb3, 0x9c, 0xa7, 0x3a, 0x3e, 0xa9, 0xaf, + 0x22, 0x49, 0x3f, 0x92, 0x46, 0x35, 0x7e, 0x33, 0x0b, 0x96, 0x53, 0xa7, 0x01, 0x7c, 0x00, 0xd6, + 0x7a, 0x9e, 0xe3, 0x10, 0x93, 0xee, 0x7a, 0x46, 0x97, 0x38, 0x07, 0xbd, 0x23, 0xd2, 0xf7, 0x74, + 0xd2, 0xe7, 0x2b, 0x5a, 0x6e, 0xd5, 0x44, 0xae, 0x6b, 0x6d, 0x29, 0x0a, 0xe5, 0x44, 0xc3, 0x1f, + 0x02, 0x68, 0xf2, 0xa6, 0x8e, 0xe6, 0xba, 0x21, 0x67, 0x81, 0x73, 0x86, 0x1b, 0x70, 0x37, 0x83, + 0x40, 0x92, 0x28, 0x96, 0x63, 0x9f, 0xb8, 0x9a, 0x43, 0xfa, 0xe9, 0x1c, 0x8b, 0xc9, 0x1c, 0xb7, + 0xa5, 0x28, 0x94, 0x13, 0x0d, 0xdf, 0x00, 0x0b, 0xfe, 0xd3, 0xf8, 0x9c, 0x8b, 0xc5, 0x59, 0x11, + 0x64, 0x0b, 0xbb, 0x51, 0x17, 0x8a, 0xe3, 0xd8, 0xd0, 0xac, 0xae, 0x4b, 0x9c, 0x21, 0xe9, 0xbf, + 0xe3, 0x7b, 0x00, 0x26, 0x94, 0x65, 0x2e, 0x94, 0xe1, 0xd0, 0xf6, 0x32, 0x08, 0x24, 0x89, 0x62, + 0x43, 0xf3, 0xab, 0x26, 0x33, 0xb4, 0xd9, 0xe4, 0xd0, 0x0e, 0xa5, 0x28, 0x94, 0x13, 0xcd, 0x6a, + 0xcf, 0x4f, 0x79, 0x6b, 0x88, 0x35, 0x1d, 0x77, 0x75, 0x52, 0x9d, 0x4b, 0xd6, 0xde, 0x6e, 0xb2, + 0x1b, 0xa5, 0xf1, 0xf0, 0x1d, 0x70, 0xc5, 0x6f, 0x3a, 0x34, 0x71, 0x48, 0x52, 0xe1, 0x24, 0x2f, + 0x09, 0x92, 0x2b, 0xbb, 0x69, 0x00, 0xca, 0xc6, 0xc0, 0xb7, 0xc0, 0x52, 0xcf, 0xd2, 0x75, 0x5e, + 0x8f, 0x6d, 0xcb, 0x33, 0x69, 0x75, 0x9e, 0xb3, 0x40, 0xb6, 0x87, 0xda, 0x89, 0x1e, 0x94, 0x42, + 0xc2, 0x87, 0x00, 0xf4, 0x02, 0x39, 0x70, 0xab, 0x20, 0x5f, 0xe8, 0xb3, 0x3a, 0x14, 0x09, 0x70, + 0xd8, 0xe4, 0xa2, 0x18, 0x5b, 0xe3, 0x43, 0x05, 0xac, 0xe7, 0xec, 0x71, 0xf8, 0xbd, 0x84, 0xea, + 0xdd, 0x4a, 0xa9, 0xde, 0xb5, 0x9c, 0xb0, 0x98, 0xf4, 0xf5, 0xc0, 0x22, 0xf3, 0x1d, 0x9a, 0x39, + 0xf0, 0x21, 0xe2, 0x04, 0x7b, 0x55, 0x96, 0x3b, 0x8a, 0x03, 0xa3, 0x63, 0xf8, 0xca, 0xf8, 0xa4, + 0xbe, 0x98, 0xe8, 0x43, 0x49, 0xce, 0xc6, 0x2f, 0x0b, 0x00, 0x6c, 0x13, 0x5b, 0xb7, 0x46, 0x06, + 0x31, 0x2f, 0xc2, 0xb5, 0x6c, 0x27, 0x5c, 0x4b, 0x43, 0xba, 0x10, 0x61, 0x3e, 0xb9, 0xb6, 0x65, + 0x27, 0x65, 0x5b, 0xbe, 0x31, 0x81, 0xe7, 0x74, 0xdf, 0xf2, 0x8f, 0x22, 0x58, 0x89, 0xc0, 0x91, + 0x71, 0xb9, 0x97, 0x58, 0xc2, 0x57, 0x52, 0x4b, 0xb8, 0x2e, 0x09, 0x79, 0x6e, 0xce, 0xe5, 0xf3, + 0x77, 0x10, 0xf0, 0x03, 0xb0, 0xc4, 0xac, 0x8a, 0x5f, 0x08, 0xdc, 0x08, 0xcd, 0x9e, 0xd9, 0x08, + 0x85, 0x42, 0xb6, 0x93, 0x60, 0x42, 0x29, 0xe6, 0x1c, 0xe3, 0x35, 0xf7, 0xbc, 0x8d, 0x57, 0xe3, + 0x0f, 0x0a, 0x58, 0x8a, 0x96, 0xe9, 0x02, 0x6c, 0x52, 0x3b, 0x69, 0x93, 0x6a, 0xa7, 0xd7, 0x65, + 0x8e, 0x4f, 0xfa, 0x7b, 0x29, 0x9e, 0x35, 0x37, 0x4a, 0x1b, 0xec, 0x85, 0xca, 0xd6, 0xb5, 0x1e, + 0x76, 0x85, 0xac, 0x5e, 0xf2, 0x5f, 0xa6, 0xfc, 0x36, 0x14, 0xf6, 0x26, 0x2c, 0x55, 0xe1, 0xf9, + 0x5a, 0xaa, 0xe2, 0xe7, 0x63, 0xa9, 0xee, 0x83, 0x8a, 0x1b, 0x98, 0xa9, 0x12, 0xa7, 0xbc, 0x31, + 0x69, 0x3b, 0x0b, 0x1f, 0x15, 0xb2, 0x86, 0x0e, 0x2a, 0x64, 0x92, 0x79, 0xa7, 0xf2, 0x17, 0xe9, + 0x9d, 0xd8, 0x16, 0xb6, 0xb1, 0xe7, 0x92, 0x3e, 0xaf, 0xfb, 0x4a, 0xb4, 0x85, 0xf7, 0x79, 0x2b, + 0x12, 0xbd, 0xf0, 0x10, 0xac, 0xdb, 0x8e, 0x35, 0x70, 0x88, 0xeb, 0x6e, 0x13, 0xdc, 0xd7, 0x35, + 0x93, 0x04, 0x03, 0xf0, 0x55, 0xef, 0xda, 0xf8, 0xa4, 0xbe, 0xbe, 0x2f, 0x87, 0xa0, 0xbc, 0xd8, + 0xc6, 0x9f, 0x4b, 0xe0, 0x72, 0xfa, 0x44, 0xcc, 0x31, 0x22, 0xca, 0xb9, 0x8c, 0xc8, 0x6b, 0xb1, + 0x12, 0xf5, 0x5d, 0x5a, 0xec, 0x9d, 0x3f, 0x53, 0xa6, 0x5b, 0x60, 0x59, 0x18, 0x8f, 0xa0, 0x53, + 0x58, 0xb1, 0x70, 0x79, 0x0e, 0x93, 0xdd, 0x28, 0x8d, 0x67, 0xf6, 0x22, 0x72, 0x0d, 0x01, 0x49, + 0x29, 0x69, 0x2f, 0xb6, 0xd2, 0x00, 0x94, 0x8d, 0x81, 0x1d, 0xb0, 0xe2, 0x99, 0x59, 0x2a, 0xbf, + 0x5c, 0xae, 0x09, 0xaa, 0x95, 0xc3, 0x2c, 0x04, 0xc9, 0xe2, 0xe0, 0x8f, 0x13, 0x8e, 0x63, 0x96, + 0x1f, 0x04, 0xaf, 0x9c, 0x5e, 0xd1, 0x53, 0x5b, 0x0e, 0x78, 0x0f, 0x2c, 0x3a, 0xdc, 0x50, 0x06, + 0x59, 0xfa, 0xa6, 0xec, 0x2b, 0x22, 0x6c, 0x11, 0xc5, 0x3b, 0x51, 0x12, 0x2b, 0xf1, 0x51, 0x95, + 0x69, 0x7d, 0x54, 0xe3, 0x4f, 0x0a, 0x80, 0xd9, 0x2d, 0x38, 0xf1, 0xe5, 0x3e, 0x13, 0x11, 0x93, + 0xc8, 0xbe, 0xdc, 0xe1, 0xdc, 0x9a, 0xec, 0x70, 0xa2, 0x13, 0x74, 0x3a, 0x8b, 0x23, 0x66, 0xe0, + 0x62, 0x2e, 0x66, 0xa6, 0xb0, 0x38, 0x51, 0x3e, 0xcf, 0x66, 0x71, 0x62, 0x3c, 0xa7, 0x5b, 0x9c, + 0x7f, 0x17, 0xc0, 0x4a, 0x04, 0x9e, 0xda, 0xe2, 0x48, 0x42, 0x5e, 0x5c, 0xce, 0x4c, 0xbe, 0x9c, + 0x61, 0xb6, 0x23, 0x9a, 0xba, 0xff, 0x13, 0xdb, 0x11, 0x25, 0x94, 0x63, 0x3b, 0x7e, 0x57, 0x88, + 0x67, 0xfd, 0xa5, 0xb7, 0x1d, 0xcf, 0x7e, 0xb9, 0xd2, 0xf8, 0x4b, 0x11, 0x5c, 0x4e, 0x6f, 0xc1, + 0x84, 0x0e, 0x2a, 0x13, 0x75, 0x70, 0x1f, 0xac, 0x3e, 0xf2, 0x74, 0x7d, 0xc4, 0xa7, 0x21, 0x26, + 0x86, 0xbe, 0x82, 0x7e, 0x55, 0x44, 0xae, 0x7e, 0x5f, 0x82, 0x41, 0xd2, 0xc8, 0x1c, 0x4d, 0x2f, + 0x9e, 0x4b, 0xd3, 0x33, 0x6a, 0x53, 0x3a, 0x83, 0xda, 0x48, 0xf5, 0xb9, 0x7c, 0x0e, 0x7d, 0x9e, + 0x5a, 0x50, 0x25, 0xc7, 0xd5, 0xc4, 0x77, 0xf8, 0x5f, 0x2b, 0x60, 0x4d, 0xfe, 0xfa, 0x0c, 0x75, + 0xb0, 0x64, 0xe0, 0xc7, 0xf1, 0xcb, 0x8b, 0x49, 0x82, 0xe1, 0x51, 0x4d, 0x57, 0xfd, 0xaf, 0x3b, + 0xea, 0xbb, 0x26, 0xdd, 0x73, 0x0e, 0xa8, 0xa3, 0x99, 0x03, 0x5f, 0x60, 0x3b, 0x09, 0x2e, 0x94, + 0xe2, 0x6e, 0x7c, 0xa2, 0x80, 0xf5, 0x1c, 0x95, 0xbb, 0xd8, 0x4c, 0xe0, 0x43, 0x50, 0x31, 0xf0, + 0xe3, 0x03, 0xcf, 0x19, 0x04, 0x92, 0x7c, 0xf6, 0xe7, 0xf0, 0x8d, 0xdc, 0x11, 0x2c, 0x28, 0xe4, + 0x6b, 0xec, 0x81, 0xeb, 0x89, 0x41, 0xb2, 0x4d, 0x43, 0x1e, 0x79, 0x3a, 0xdf, 0x3f, 0xc2, 0x53, + 0xdc, 0x02, 0xf3, 0x36, 0x76, 0xa8, 0x16, 0x9a, 0xd1, 0x72, 0x6b, 0x71, 0x7c, 0x52, 0x9f, 0xdf, + 0x0f, 0x1a, 0x51, 0xd4, 0xdf, 0xf8, 0x55, 0x01, 0x2c, 0xc4, 0x48, 0x2e, 0x40, 0xdf, 0xdf, 0x4e, + 0xe8, 0xbb, 0xf4, 0x8b, 0x49, 0x7c, 0x54, 0x79, 0x02, 0xdf, 0x49, 0x09, 0xfc, 0x37, 0x27, 0x11, + 0x9d, 0xae, 0xf0, 0x9f, 0x16, 0xc0, 0x6a, 0x0c, 0x1d, 0x49, 0xfc, 0x77, 0x12, 0x12, 0xbf, 0x91, + 0x92, 0xf8, 0xaa, 0x2c, 0xe6, 0x85, 0xc6, 0x4f, 0xd6, 0xf8, 0x3f, 0x2a, 0x60, 0x39, 0x36, 0x77, + 0x17, 0x20, 0xf2, 0xdb, 0x49, 0x91, 0xaf, 0x4f, 0xa8, 0x97, 0x1c, 0x95, 0x7f, 0x52, 0x4e, 0xe4, + 0xfd, 0xa5, 0x97, 0xf9, 0x9f, 0x83, 0xd5, 0xa1, 0xa5, 0x7b, 0x06, 0x69, 0xeb, 0x58, 0x33, 0x02, + 0x00, 0x53, 0x32, 0x36, 0x89, 0x37, 0xa5, 0xf4, 0xc4, 0x71, 0x35, 0x97, 0x12, 0x93, 0x3e, 0x88, + 0x22, 0x23, 0x2d, 0x7e, 0x20, 0xa1, 0x43, 0xd2, 0x87, 0xc0, 0x37, 0xc0, 0x02, 0xd3, 0x54, 0xad, + 0x47, 0x76, 0xb1, 0x11, 0xd4, 0x54, 0xf8, 0x7d, 0xe0, 0x20, 0xea, 0x42, 0x71, 0x1c, 0x3c, 0x02, + 0x2b, 0xb6, 0xd5, 0xef, 0x60, 0x13, 0x0f, 0x08, 0x3b, 0xff, 0xf7, 0xf9, 0xff, 0x42, 0xf0, 0x7b, + 0x87, 0xf9, 0xd6, 0x9b, 0xc1, 0x0b, 0xe9, 0x7e, 0x16, 0xc2, 0x3c, 0xbb, 0xa4, 0x99, 0xef, 0x67, + 0x19, 0x25, 0x34, 0x32, 0x9f, 0xb3, 0xe6, 0x32, 0xff, 0x03, 0x20, 0x2b, 0xae, 0x73, 0x7e, 0xd0, + 0xca, 0xbb, 0x51, 0xa9, 0x9c, 0xeb, 0x6b, 0xd4, 0xa7, 0x25, 0x70, 0x25, 0x73, 0x40, 0x7e, 0x81, + 0x77, 0x1a, 0x19, 0xb7, 0x54, 0x3c, 0x83, 0x5b, 0xda, 0x02, 0xcb, 0xe2, 0x43, 0x58, 0xca, 0x6c, + 0x85, 0x76, 0xb4, 0x9d, 0xec, 0x46, 0x69, 0xbc, 0xec, 0x4e, 0xa5, 0x7c, 0xc6, 0x3b, 0x95, 0x78, + 0x16, 0xe2, 0xff, 0x37, 0xfc, 0xaa, 0xcb, 0x66, 0x21, 0xfe, 0x8d, 0x23, 0x8d, 0x87, 0xdf, 0x0d, + 0x4a, 0x2a, 0x64, 0x98, 0xe3, 0x0c, 0xa9, 0x1a, 0x09, 0x09, 0x52, 0xe8, 0x67, 0xfa, 0xd8, 0xf3, + 0xbe, 0xe4, 0x63, 0xcf, 0xc6, 0x84, 0x52, 0x9e, 0xde, 0x2a, 0xfe, 0x4d, 0x01, 0x2f, 0xe5, 0xee, + 0x01, 0xb8, 0x95, 0xd0, 0xd9, 0xdb, 0x29, 0x9d, 0x7d, 0x39, 0x37, 0x30, 0x26, 0xb6, 0x86, 0xfc, + 0x42, 0xe4, 0xee, 0xc4, 0x0b, 0x11, 0x89, 0x8b, 0x9a, 0x7c, 0x33, 0xd2, 0xda, 0x78, 0xf2, 0xb4, + 0x36, 0xf3, 0xd1, 0xd3, 0xda, 0xcc, 0xc7, 0x4f, 0x6b, 0x33, 0xbf, 0x18, 0xd7, 0x94, 0x27, 0xe3, + 0x9a, 0xf2, 0xd1, 0xb8, 0xa6, 0x7c, 0x3c, 0xae, 0x29, 0xff, 0x1c, 0xd7, 0x94, 0xdf, 0x7e, 0x52, + 0x9b, 0x79, 0x58, 0x18, 0x6e, 0xfe, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x70, 0x5f, 0xbf, 0x58, 0x3d, + 0x26, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto new file mode 100644 index 000000000000..c0499d3258c4 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -0,0 +1,701 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.apps.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/api/policy/v1beta1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both +// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, +// it may be subject to name and representation changes in future releases, and clients should not +// depend on its stability. It is primarily for internal use by controllers. +message ControllerRevision { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Data is the serialized representation of the state. + optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + + // Revision indicates the revision of the state represented by Data. + optional int64 revision = 3; +} + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +message ControllerRevisionList { + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ControllerRevisions + repeated ControllerRevision items = 2; +} + +// DaemonSet represents the configuration of a daemon set. +message DaemonSet { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The desired behavior of this daemon set. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional DaemonSetSpec spec = 2; + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional DaemonSetStatus status = 3; +} + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +message DaemonSetCondition { + // Type of DaemonSet condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// DaemonSetList is a collection of daemon sets. +message DaemonSetList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // A list of daemon sets. + repeated DaemonSet items = 2; +} + +// DaemonSetSpec is the specification of a daemon set. +message DaemonSetSpec { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + optional k8s.io.api.core.v1.PodTemplateSpec template = 2; + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + optional DaemonSetUpdateStrategy updateStrategy = 3; + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + optional int32 minReadySeconds = 4; + + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + optional int32 revisionHistoryLimit = 6; +} + +// DaemonSetStatus represents the current status of a daemon set. +message DaemonSetStatus { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + optional int32 currentNumberScheduled = 1; + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + optional int32 numberMisscheduled = 2; + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + optional int32 desiredNumberScheduled = 3; + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + optional int32 numberReady = 4; + + // The most recent generation observed by the daemon set controller. + // +optional + optional int64 observedGeneration = 5; + + // The total number of nodes that are running updated daemon pod + // +optional + optional int32 updatedNumberScheduled = 6; + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + optional int32 numberAvailable = 7; + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + optional int32 numberUnavailable = 8; + + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + optional int32 collisionCount = 9; + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated DaemonSetCondition conditions = 10; +} + +// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. +message DaemonSetUpdateStrategy { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if type = "RollingUpdate". + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as Deployment `strategy.rollingUpdate`. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + optional RollingUpdateDaemonSet rollingUpdate = 2; +} + +// Deployment enables declarative updates for Pods and ReplicaSets. +message Deployment { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the Deployment. + // +optional + optional DeploymentSpec spec = 2; + + // Most recently observed status of the Deployment. + // +optional + optional DeploymentStatus status = 3; +} + +// DeploymentCondition describes the state of a deployment at a certain point. +message DeploymentCondition { + // Type of deployment condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // Last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// DeploymentList is a list of Deployments. +message DeploymentList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Deployments. + repeated Deployment items = 2; +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +message DeploymentSpec { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + optional int32 replicas = 1; + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // It must match the pod template's labels. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template describes the pods that will be created. + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + optional DeploymentStrategy strategy = 4; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 5; + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + optional int32 revisionHistoryLimit = 6; + + // Indicates that the deployment is paused. + // +optional + optional bool paused = 7; + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + optional int32 progressDeadlineSeconds = 9; +} + +// DeploymentStatus is the most recently observed status of the Deployment. +message DeploymentStatus { + // The generation observed by the deployment controller. + // +optional + optional int64 observedGeneration = 1; + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + optional int32 replicas = 2; + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + optional int32 updatedReplicas = 3; + + // Total number of ready pods targeted by this deployment. + // +optional + optional int32 readyReplicas = 7; + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + optional int32 availableReplicas = 4; + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + optional int32 unavailableReplicas = 5; + + // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge + repeated DeploymentCondition conditions = 6; + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + optional int32 collisionCount = 8; +} + +// DeploymentStrategy describes how to replace existing pods with new ones. +message DeploymentStrategy { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + optional RollingUpdateDeployment rollingUpdate = 2; +} + +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. +message ReplicaSet { + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetSpec spec = 2; + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetStatus status = 3; +} + +// ReplicaSetCondition describes the state of a replica set at a certain point. +message ReplicaSetCondition { + // Type of replica set condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// ReplicaSetList is a collection of ReplicaSets. +message ReplicaSetList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ReplicaSets. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + repeated ReplicaSet items = 2; +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +message ReplicaSetSpec { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // +optional + optional int32 replicas = 1; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 4; + + // Selector is a label query over pods that should match the replica count. + // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +message ReplicaSetStatus { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + optional int32 replicas = 1; + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + optional int32 fullyLabeledReplicas = 2; + + // The number of ready replicas for this replica set. + // +optional + optional int32 readyReplicas = 4; + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + optional int32 availableReplicas = 5; + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + optional int64 observedGeneration = 3; + + // Represents the latest available observations of a replica set's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated ReplicaSetCondition conditions = 6; +} + +// Spec to control the desired behavior of daemon set rolling update. +message RollingUpdateDaemonSet { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; +} + +// Spec to control the desired behavior of rolling update. +message RollingUpdateDeployment { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new RC can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; +} + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +message RollingUpdateStatefulSetStrategy { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + // Default value is 0. + // +optional + optional int32 partition = 1; +} + +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +message StatefulSet { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired identities of pods in this set. + // +optional + optional StatefulSetSpec spec = 2; + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + optional StatefulSetStatus status = 3; +} + +// StatefulSetCondition describes the state of a statefulset at a certain point. +message StatefulSetCondition { + // Type of statefulset condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// StatefulSetList is a collection of StatefulSets. +message StatefulSetList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated StatefulSet items = 2; +} + +// A StatefulSetSpec is the specification of a StatefulSet. +message StatefulSetSpec { + // replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + optional int32 replicas = 1; + + // selector is a label query over pods that should match the replica count. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + + // volumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + + // serviceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + optional string serviceName = 5; + + // podManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + optional string podManagementPolicy = 6; + + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + optional StatefulSetUpdateStrategy updateStrategy = 7; + + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + optional int32 revisionHistoryLimit = 8; +} + +// StatefulSetStatus represents the current state of a StatefulSet. +message StatefulSetStatus { + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. + // +optional + optional int64 observedGeneration = 1; + + // replicas is the number of Pods created by the StatefulSet controller. + optional int32 replicas = 2; + + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + optional int32 readyReplicas = 3; + + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + optional int32 currentReplicas = 4; + + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + optional int32 updatedReplicas = 5; + + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + optional string currentRevision = 6; + + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + optional string updateRevision = 7; + + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + optional int32 collisionCount = 9; + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated StatefulSetCondition conditions = 10; +} + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +message StatefulSetUpdateStrategy { + // Type indicates the type of the StatefulSetUpdateStrategy. + // Default is RollingUpdate. + // +optional + optional string type = 1; + + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + // +optional + optional RollingUpdateStatefulSetStrategy rollingUpdate = 2; +} + diff --git a/vendor/k8s.io/api/apps/v1/register.go b/vendor/k8s.io/api/apps/v1/register.go new file mode 100644 index 000000000000..027101046744 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/register.go @@ -0,0 +1,60 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "apps" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Deployment{}, + &DeploymentList{}, + &StatefulSet{}, + &StatefulSetList{}, + &DaemonSet{}, + &DaemonSetList{}, + &ReplicaSet{}, + &ReplicaSetList{}, + &ControllerRevision{}, + &ControllerRevisionList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go new file mode 100644 index 000000000000..f32300fe99f6 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -0,0 +1,819 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + ControllerRevisionHashLabelKey = "controller-revision-hash" + StatefulSetRevisionLabel = ControllerRevisionHashLabelKey + DeprecatedRollbackTo = "deprecated.deployment.rollback.to" + DeprecatedTemplateGeneration = "deprecated.daemonset.template.generation" + StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +type StatefulSet struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired identities of pods in this set. + // +optional + Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodManagementPolicyType defines the policy for creating pods under a stateful set. +type PodManagementPolicyType string + +const ( + // OrderedReadyPodManagement will create pods in strictly increasing order on + // scale up and strictly decreasing order on scale down, progressing only when + // the previous pod is ready or terminated. At most one pod will be changed + // at any time. + OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady" + // ParallelPodManagement will create and delete pods as soon as the stateful set + // replica count is changed, and will not wait for pods to be ready or complete + // termination. + ParallelPodManagement = "Parallel" +) + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +type StatefulSetUpdateStrategy struct { + // Type indicates the type of the StatefulSetUpdateStrategy. + // Default is RollingUpdate. + // +optional + Type StatefulSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetStrategyType"` + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + // +optional + RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +// StatefulSetUpdateStrategyType is a string enumeration type that enumerates +// all possible update strategies for the StatefulSet controller. +type StatefulSetUpdateStrategyType string + +const ( + // RollingUpdateStatefulSetStrategyType indicates that update will be + // applied to all Pods in the StatefulSet with respect to the StatefulSet + // ordering constraints. When a scale operation is performed with this + // strategy, new Pods will be created from the specification version indicated + // by the StatefulSet's updateRevision. + RollingUpdateStatefulSetStrategyType = "RollingUpdate" + // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version + // tracking and ordered rolling restarts are disabled. Pods are recreated + // from the StatefulSetSpec when they are manually deleted. When a scale + // operation is performed with this strategy,specification version indicated + // by the StatefulSet's currentRevision. + OnDeleteStatefulSetStrategyType = "OnDelete" +) + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +type RollingUpdateStatefulSetStrategy struct { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + // Default value is 0. + // +optional + Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` +} + +// A StatefulSetSpec is the specification of a StatefulSet. +type StatefulSetSpec struct { + // replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // selector is a label query over pods that should match the replica count. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // volumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` + + // serviceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` + + // podManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + PodManagementPolicy PodManagementPolicyType `json:"podManagementPolicy,omitempty" protobuf:"bytes,6,opt,name=podManagementPolicy,casttype=PodManagementPolicyType"` + + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,7,opt,name=updateStrategy"` + + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` +} + +// StatefulSetStatus represents the current state of a StatefulSet. +type StatefulSetStatus struct { + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // replicas is the number of Pods created by the StatefulSet controller. + Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` + + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"` + + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,4,opt,name=currentReplicas"` + + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,5,opt,name=updatedReplicas"` + + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + CurrentRevision string `json:"currentRevision,omitempty" protobuf:"bytes,6,opt,name=currentRevision"` + + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + UpdateRevision string `json:"updateRevision,omitempty" protobuf:"bytes,7,opt,name=updateRevision"` + + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type StatefulSetConditionType string + +// StatefulSetCondition describes the state of a statefulset at a certain point. +type StatefulSetCondition struct { + // Type of statefulset condition. + Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StatefulSetList is a collection of StatefulSets. +type StatefulSetList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Deployment enables declarative updates for Pods and ReplicaSets. +type Deployment struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // It must match the pod template's labels. + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // Template describes the pods that will be created. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` + + // Indicates that the deployment is paused. + // +optional + Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing RCs (and label key that is added to its pods) to prevent the existing RCs + // to select new pods (and old pods being select by new RC). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +// DeploymentStrategy describes how to replace existing pods with new ones. +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new RC can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of desired pods. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` +} + +// DeploymentStatus is the most recently observed status of the Deployment. +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` + + // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"` +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeploymentList is a list of Deployments. +type DeploymentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Deployments. + Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. +type DaemonSetUpdateStrategy struct { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate. + // +optional + Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + + // Rolling update config params. Present only if type = "RollingUpdate". + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as Deployment `strategy.rollingUpdate`. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DaemonSetUpdateStrategyType string + +const ( + // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. + RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" + + // Replace the old daemons only when it's killed + OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" +) + +// Spec to control the desired behavior of daemon set rolling update. +type RollingUpdateDaemonSet struct { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` +} + +// DaemonSetSpec is the specification of a daemon set. +type DaemonSetSpec struct { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"` + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"` + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` +} + +// DaemonSetStatus represents the current status of a daemon set. +type DaemonSetStatus struct { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"` + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"` + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"` + + // The most recent generation observed by the daemon set controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"` + + // The total number of nodes that are running updated daemon pod + // +optional + UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"` + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"` + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"` + + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DaemonSet represents the configuration of a daemon set. +type DaemonSet struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The desired behavior of this daemon set. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +const ( + // DefaultDaemonSetUniqueLabelKey is the default label key that is added + // to existing DaemonSet pods to distinguish between old and new + // DaemonSet pods during DaemonSet template updates. + DefaultDaemonSetUniqueLabelKey = ControllerRevisionHashLabelKey +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DaemonSetList is a collection of daemon sets. +type DaemonSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // A list of daemon sets. + Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. +type ReplicaSet struct { + metav1.TypeMeta `json:",inline"` + + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicaSetList is a collection of ReplicaSets. +type ReplicaSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of ReplicaSets. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +type ReplicaSetSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // Selector is a label query over pods that should match the replica count. + // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +type ReplicaSetStatus struct { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` + + // The number of ready replicas for this replica set. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` + + // Represents the latest available observations of a replica set's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` +} + +type ReplicaSetConditionType string + +// These are valid conditions of a replica set. +const ( + // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created + // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted + // due to kubelet being down or finalizers are failing. + ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" +) + +// ReplicaSetCondition describes the state of a replica set at a certain point. +type ReplicaSetCondition struct { + // Type of replica set condition. + Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both +// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, +// it may be subject to name and representation changes in future releases, and clients should not +// depend on its stability. It is primarily for internal use by controllers. +type ControllerRevision struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Data is the serialized representation of the state. + Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` + + // Revision indicates the revision of the state represented by Data. + Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +type ControllerRevisionList struct { + metav1.TypeMeta `json:",inline"` + + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ControllerRevisions + Items []ControllerRevision `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000000..76305393e213 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -0,0 +1,365 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ControllerRevision = map[string]string{ + "": "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "data": "Data is the serialized representation of the state.", + "revision": "Revision indicates the revision of the state represented by Data.", +} + +func (ControllerRevision) SwaggerDoc() map[string]string { + return map_ControllerRevision +} + +var map_ControllerRevisionList = map[string]string{ + "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is the list of ControllerRevisions", +} + +func (ControllerRevisionList) SwaggerDoc() map[string]string { + return map_ControllerRevisionList +} + +var map_DaemonSet = map[string]string{ + "": "DaemonSet represents the configuration of a daemon set.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (DaemonSet) SwaggerDoc() map[string]string { + return map_DaemonSet +} + +var map_DaemonSetCondition = map[string]string{ + "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", + "type": "Type of DaemonSet condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DaemonSetCondition) SwaggerDoc() map[string]string { + return map_DaemonSetCondition +} + +var map_DaemonSetList = map[string]string{ + "": "DaemonSetList is a collection of daemon sets.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "A list of daemon sets.", +} + +func (DaemonSetList) SwaggerDoc() map[string]string { + return map_DaemonSetList +} + +var map_DaemonSetSpec = map[string]string{ + "": "DaemonSetSpec is the specification of a daemon set.", + "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", + "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", + "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", +} + +func (DaemonSetSpec) SwaggerDoc() map[string]string { + return map_DaemonSetSpec +} + +var map_DaemonSetStatus = map[string]string{ + "": "DaemonSetStatus represents the current status of a daemon set.", + "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", + "observedGeneration": "The most recent generation observed by the daemon set controller.", + "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod", + "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a DaemonSet's current state.", +} + +func (DaemonSetStatus) SwaggerDoc() map[string]string { + return map_DaemonSetStatus +} + +var map_DaemonSetUpdateStrategy = map[string]string{ + "": "DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.", + "type": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate.", + "rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".", +} + +func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string { + return map_DaemonSetUpdateStrategy +} + +var map_Deployment = map[string]string{ + "": "Deployment enables declarative updates for Pods and ReplicaSets.", + "metadata": "Standard object metadata.", + "spec": "Specification of the desired behavior of the Deployment.", + "status": "Most recently observed status of the Deployment.", +} + +func (Deployment) SwaggerDoc() map[string]string { + return map_Deployment +} + +var map_DeploymentCondition = map[string]string{ + "": "DeploymentCondition describes the state of a deployment at a certain point.", + "type": "Type of deployment condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DeploymentCondition) SwaggerDoc() map[string]string { + return map_DeploymentCondition +} + +var map_DeploymentList = map[string]string{ + "": "DeploymentList is a list of Deployments.", + "metadata": "Standard list metadata.", + "items": "Items is the list of Deployments.", +} + +func (DeploymentList) SwaggerDoc() map[string]string { + return map_DeploymentList +} + +var map_DeploymentSpec = map[string]string{ + "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", + "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", + "template": "Template describes the pods that will be created.", + "strategy": "The deployment strategy to use to replace existing pods with new ones.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", + "paused": "Indicates that the deployment is paused.", + "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", +} + +func (DeploymentSpec) SwaggerDoc() map[string]string { + return map_DeploymentSpec +} + +var map_DeploymentStatus = map[string]string{ + "": "DeploymentStatus is the most recently observed status of the Deployment.", + "observedGeneration": "The generation observed by the deployment controller.", + "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "readyReplicas": "Total number of ready pods targeted by this deployment.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "conditions": "Represents the latest available observations of a deployment's current state.", + "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", +} + +func (DeploymentStatus) SwaggerDoc() map[string]string { + return map_DeploymentStatus +} + +var map_DeploymentStrategy = map[string]string{ + "": "DeploymentStrategy describes how to replace existing pods with new ones.", + "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", +} + +func (DeploymentStrategy) SwaggerDoc() map[string]string { + return map_DeploymentStrategy +} + +var map_ReplicaSet = map[string]string{ + "": "ReplicaSet ensures that a specified number of pod replicas are running at any given time.", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (ReplicaSet) SwaggerDoc() map[string]string { + return map_ReplicaSet +} + +var map_ReplicaSetCondition = map[string]string{ + "": "ReplicaSetCondition describes the state of a replica set at a certain point.", + "type": "Type of replica set condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (ReplicaSetCondition) SwaggerDoc() map[string]string { + return map_ReplicaSetCondition +} + +var map_ReplicaSetList = map[string]string{ + "": "ReplicaSetList is a collection of ReplicaSets.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", +} + +func (ReplicaSetList) SwaggerDoc() map[string]string { + return map_ReplicaSetList +} + +var map_ReplicaSetSpec = map[string]string{ + "": "ReplicaSetSpec is the specification of a ReplicaSet.", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "selector": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", +} + +func (ReplicaSetSpec) SwaggerDoc() map[string]string { + return map_ReplicaSetSpec +} + +var map_ReplicaSetStatus = map[string]string{ + "": "ReplicaSetStatus represents the current status of a ReplicaSet.", + "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", + "readyReplicas": "The number of ready replicas for this replica set.", + "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", + "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", + "conditions": "Represents the latest available observations of a replica set's current state.", +} + +func (ReplicaSetStatus) SwaggerDoc() map[string]string { + return map_ReplicaSetStatus +} + +var map_RollingUpdateDaemonSet = map[string]string{ + "": "Spec to control the desired behavior of daemon set rolling update.", + "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", +} + +func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { + return map_RollingUpdateDaemonSet +} + +var map_RollingUpdateDeployment = map[string]string{ + "": "Spec to control the desired behavior of rolling update.", + "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", +} + +func (RollingUpdateDeployment) SwaggerDoc() map[string]string { + return map_RollingUpdateDeployment +} + +var map_RollingUpdateStatefulSetStrategy = map[string]string{ + "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", + "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.", +} + +func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { + return map_RollingUpdateStatefulSetStrategy +} + +var map_StatefulSet = map[string]string{ + "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "spec": "Spec defines the desired identities of pods in this set.", + "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", +} + +func (StatefulSet) SwaggerDoc() map[string]string { + return map_StatefulSet +} + +var map_StatefulSetCondition = map[string]string{ + "": "StatefulSetCondition describes the state of a statefulset at a certain point.", + "type": "Type of statefulset condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (StatefulSetCondition) SwaggerDoc() map[string]string { + return map_StatefulSetCondition +} + +var map_StatefulSetList = map[string]string{ + "": "StatefulSetList is a collection of StatefulSets.", +} + +func (StatefulSetList) SwaggerDoc() map[string]string { + return map_StatefulSetList +} + +var map_StatefulSetSpec = map[string]string{ + "": "A StatefulSetSpec is the specification of a StatefulSet.", + "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", + "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", + "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", +} + +func (StatefulSetSpec) SwaggerDoc() map[string]string { + return map_StatefulSetSpec +} + +var map_StatefulSetStatus = map[string]string{ + "": "StatefulSetStatus represents the current state of a StatefulSet.", + "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", + "replicas": "replicas is the number of Pods created by the StatefulSet controller.", + "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", + "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", + "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", + "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", + "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a statefulset's current state.", +} + +func (StatefulSetStatus) SwaggerDoc() map[string]string { + return map_StatefulSetStatus +} + +var map_StatefulSetUpdateStrategy = map[string]string{ + "": "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.", + "type": "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.", + "rollingUpdate": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.", +} + +func (StatefulSetUpdateStrategy) SwaggerDoc() map[string]string { + return map_StatefulSetUpdateStrategy +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000000..c41db2981551 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go @@ -0,0 +1,866 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Data.DeepCopyInto(&out.Data) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision. +func (in *ControllerRevision) DeepCopy() *ControllerRevision { + if in == nil { + return nil + } + out := new(ControllerRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevision) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControllerRevision, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList. +func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList { + if in == nil { + return nil + } + out := new(ControllerRevisionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevisionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet. +func (in *DaemonSet) DeepCopy() *DaemonSet { + if in == nil { + return nil + } + out := new(DaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DaemonSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList. +func (in *DaemonSetList) DeepCopy() *DaemonSetList { + if in == nil { + return nil + } + out := new(DaemonSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec. +func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec { + if in == nil { + return nil + } + out := new(DaemonSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { + *out = *in + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus. +func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus { + if in == nil { + return nil + } + out := new(DaemonSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + if *in == nil { + *out = nil + } else { + *out = new(RollingUpdateDaemonSet) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy. +func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy { + if in == nil { + return nil + } + out := new(DaemonSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Deployment) DeepCopyInto(out *Deployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. +func (in *Deployment) DeepCopy() *Deployment { + if in == nil { + return nil + } + out := new(Deployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Deployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. +func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { + if in == nil { + return nil + } + out := new(DeploymentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. +func (in *DeploymentList) DeepCopy() *DeploymentList { + if in == nil { + return nil + } + out := new(DeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + in.Strategy.DeepCopyInto(&out.Strategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. +func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { + if in == nil { + return nil + } + out := new(DeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. +func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { + if in == nil { + return nil + } + out := new(DeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + if *in == nil { + *out = nil + } else { + *out = new(RollingUpdateDeployment) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. +func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { + if in == nil { + return nil + } + out := new(DeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet. +func (in *ReplicaSet) DeepCopy() *ReplicaSet { + if in == nil { + return nil + } + out := new(ReplicaSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition. +func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { + if in == nil { + return nil + } + out := new(ReplicaSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList. +func (in *ReplicaSetList) DeepCopy() *ReplicaSetList { + if in == nil { + return nil + } + out := new(ReplicaSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec. +func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { + if in == nil { + return nil + } + out := new(ReplicaSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicaSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus. +func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { + if in == nil { + return nil + } + out := new(ReplicaSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet. +func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet { + if in == nil { + return nil + } + out := new(RollingUpdateDaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. +func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { + if in == nil { + return nil + } + out := new(RollingUpdateDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) { + *out = *in + if in.Partition != nil { + in, out := &in.Partition, &out.Partition + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy. +func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy { + if in == nil { + return nil + } + out := new(RollingUpdateStatefulSetStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSet) DeepCopyInto(out *StatefulSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet. +func (in *StatefulSet) DeepCopy() *StatefulSet { + if in == nil { + return nil + } + out := new(StatefulSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. +func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { + if in == nil { + return nil + } + out := new(StatefulSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StatefulSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList. +func (in *StatefulSetList) DeepCopy() *StatefulSetList { + if in == nil { + return nil + } + out := new(StatefulSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]core_v1.PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec. +func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec { + if in == nil { + return nil + } + out := new(StatefulSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { + *out = *in + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StatefulSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus. +func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus { + if in == nil { + return nil + } + out := new(StatefulSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + if *in == nil { + *out = nil + } else { + *out = new(RollingUpdateStatefulSetStrategy) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy. +func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy { + if in == nil { + return nil + } + out := new(StatefulSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/apps/v1beta1/BUILD b/vendor/k8s.io/api/apps/v1beta1/BUILD index dd256cbd3102..f40a41e3e20b 100644 --- a/vendor/k8s.io/api/apps/v1beta1/BUILD +++ b/vendor/k8s.io/api/apps/v1beta1/BUILD @@ -15,12 +15,12 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/apps/v1beta1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", diff --git a/vendor/k8s.io/api/apps/v1beta1/doc.go b/vendor/k8s.io/api/apps/v1beta1/doc.go index 82af692e6248..f763ac1a62d9 100644 --- a/vendor/k8s.io/api/apps/v1beta1/doc.go +++ b/vendor/k8s.io/api/apps/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1beta1 diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index 6d5e1f7a1ebe..baee7a9750a9 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -41,6 +41,7 @@ limitations under the License. ScaleSpec ScaleStatus StatefulSet + StatefulSetCondition StatefulSetList StatefulSetSpec StatefulSetStatus @@ -144,22 +145,26 @@ func (m *StatefulSet) Reset() { *m = StatefulSet{} } func (*StatefulSet) ProtoMessage() {} func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{19} + return fileDescriptorGenerated, []int{20} } func init() { @@ -179,6 +184,7 @@ func init() { proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus") proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta1.StatefulSet") + proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta1.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta1.StatefulSetList") proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta1.StatefulSetSpec") proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta1.StatefulSetStatus") @@ -840,6 +846,48 @@ func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n24, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -858,11 +906,11 @@ func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n24, err := m.ListMeta.MarshalTo(dAtA[i:]) + n25, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n24 + i += n25 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -902,20 +950,20 @@ func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n25, err := m.Selector.MarshalTo(dAtA[i:]) + n26, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n25 + i += n26 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n26, err := m.Template.MarshalTo(dAtA[i:]) + n27, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n27 if len(m.VolumeClaimTemplates) > 0 { for _, msg := range m.VolumeClaimTemplates { dAtA[i] = 0x22 @@ -939,11 +987,11 @@ func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n27, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + n28, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n28 if m.RevisionHistoryLimit != nil { dAtA[i] = 0x40 i++ @@ -997,6 +1045,18 @@ func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -1023,11 +1083,11 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n28, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n29, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n29 } return i, nil } @@ -1286,6 +1346,22 @@ func (m *StatefulSet) Size() (n int) { return n } +func (m *StatefulSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *StatefulSetList) Size() (n int) { var l int _ = l @@ -1347,6 +1423,12 @@ func (m *StatefulSetStatus) Size() (n int) { if m.CollisionCount != nil { n += 1 + sovGenerated(uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1591,6 +1673,20 @@ func (this *StatefulSet) String() string { }, "") return s } +func (this *StatefulSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} func (this *StatefulSetList) String() string { if this == nil { return "nil" @@ -1632,6 +1728,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4017,6 +4114,202 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } return nil } +func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StatefulSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -4603,6 +4896,37 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } } m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, StatefulSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4846,119 +5170,122 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1820 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4b, 0x6f, 0x1b, 0xc9, - 0x11, 0xd6, 0x50, 0xa4, 0x44, 0xb6, 0x56, 0x94, 0xd5, 0x52, 0x24, 0xae, 0x36, 0xa1, 0x04, 0x66, - 0xb1, 0x2b, 0xef, 0xae, 0x86, 0x6b, 0xd9, 0x31, 0xfc, 0x00, 0x8c, 0x88, 0x94, 0x63, 0xcb, 0x90, - 0x22, 0xb9, 0x29, 0x39, 0x88, 0x93, 0x00, 0x6e, 0x0e, 0xdb, 0xd4, 0x58, 0xf3, 0xc2, 0x4c, 0x0f, - 0x63, 0x22, 0x97, 0xdc, 0x13, 0xc0, 0x39, 0xe7, 0x57, 0xe4, 0x18, 0x24, 0xb7, 0x9c, 0x74, 0x09, - 0x60, 0xe4, 0x12, 0x9f, 0x84, 0x98, 0xbe, 0xe6, 0x9a, 0x8b, 0x81, 0x00, 0x41, 0xf7, 0xf4, 0xbc, - 0x67, 0x24, 0x2a, 0x40, 0x74, 0xc8, 0x8d, 0xd3, 0x55, 0xf5, 0x55, 0x75, 0x77, 0x75, 0xd5, 0x57, - 0x04, 0x3f, 0x3c, 0xb9, 0xe3, 0xc8, 0xaa, 0xd9, 0x3c, 0x71, 0xbb, 0xc4, 0x36, 0x08, 0x25, 0x4e, - 0x73, 0x40, 0x8c, 0x9e, 0x69, 0x37, 0x85, 0x00, 0x5b, 0x6a, 0x13, 0x5b, 0x96, 0xd3, 0x1c, 0xdc, - 0xe8, 0x12, 0x8a, 0x6f, 0x34, 0xfb, 0xc4, 0x20, 0x36, 0xa6, 0xa4, 0x27, 0x5b, 0xb6, 0x49, 0x4d, - 0xb8, 0xec, 0x29, 0xca, 0xd8, 0x52, 0x65, 0xa6, 0x28, 0x0b, 0xc5, 0x95, 0x8d, 0xbe, 0x4a, 0x8f, - 0xdd, 0xae, 0xac, 0x98, 0x7a, 0xb3, 0x6f, 0xf6, 0xcd, 0x26, 0xd7, 0xef, 0xba, 0x2f, 0xf9, 0x17, - 0xff, 0xe0, 0xbf, 0x3c, 0x9c, 0x95, 0x46, 0xc4, 0xa1, 0x62, 0xda, 0xa4, 0x39, 0x48, 0xf9, 0x5a, - 0xb9, 0x1e, 0xd1, 0xb1, 0x4c, 0x4d, 0x55, 0x86, 0x79, 0x61, 0xad, 0xdc, 0x0a, 0x55, 0x75, 0xac, - 0x1c, 0xab, 0x06, 0xb1, 0x87, 0x4d, 0xeb, 0xa4, 0xcf, 0x16, 0x9c, 0xa6, 0x4e, 0x28, 0xce, 0x72, - 0xd0, 0xcc, 0xb3, 0xb2, 0x5d, 0x83, 0xaa, 0x3a, 0x49, 0x19, 0xdc, 0xbe, 0xc8, 0xc0, 0x51, 0x8e, - 0x89, 0x8e, 0x53, 0x76, 0x37, 0xf3, 0xec, 0x5c, 0xaa, 0x6a, 0x4d, 0xd5, 0xa0, 0x0e, 0xb5, 0x93, - 0x46, 0x8d, 0x7f, 0x49, 0x00, 0xb6, 0x4d, 0x83, 0xda, 0xa6, 0xa6, 0x11, 0x1b, 0x91, 0x81, 0xea, - 0xa8, 0xa6, 0x01, 0x5f, 0x80, 0x32, 0xdb, 0x4f, 0x0f, 0x53, 0x5c, 0x93, 0xd6, 0xa4, 0xf5, 0x99, - 0xcd, 0x6f, 0xe5, 0xf0, 0x52, 0x02, 0x78, 0xd9, 0x3a, 0xe9, 0xb3, 0x05, 0x47, 0x66, 0xda, 0xf2, - 0xe0, 0x86, 0xbc, 0xdf, 0x7d, 0x45, 0x14, 0xba, 0x47, 0x28, 0x6e, 0xc1, 0xd3, 0xb3, 0xd5, 0x89, - 0xd1, 0xd9, 0x2a, 0x08, 0xd7, 0x50, 0x80, 0x0a, 0xf7, 0x41, 0x91, 0xa3, 0x17, 0x38, 0xfa, 0x46, - 0x2e, 0xba, 0xd8, 0xb4, 0x8c, 0xf0, 0x2f, 0x1f, 0xbe, 0xa6, 0xc4, 0x60, 0xe1, 0xb5, 0x3e, 0x11, - 0xd0, 0xc5, 0x6d, 0x4c, 0x31, 0xe2, 0x40, 0xf0, 0x1b, 0x50, 0xb6, 0x45, 0xf8, 0xb5, 0xc9, 0x35, - 0x69, 0x7d, 0xb2, 0x75, 0x4d, 0x68, 0x95, 0xfd, 0x6d, 0xa1, 0x40, 0xa3, 0x71, 0x2a, 0x81, 0xa5, - 0xf4, 0xbe, 0x77, 0x55, 0x87, 0xc2, 0x9f, 0xa7, 0xf6, 0x2e, 0x8f, 0xb7, 0x77, 0x66, 0xcd, 0x77, - 0x1e, 0x38, 0xf6, 0x57, 0x22, 0xfb, 0x3e, 0x00, 0x25, 0x95, 0x12, 0xdd, 0xa9, 0x15, 0xd6, 0x26, - 0xd7, 0x67, 0x36, 0xbf, 0x96, 0x73, 0x72, 0x5d, 0x4e, 0x47, 0xd7, 0x9a, 0x15, 0xb8, 0xa5, 0x1d, - 0x86, 0x80, 0x3c, 0xa0, 0xc6, 0x6f, 0x0b, 0x00, 0x6c, 0x13, 0x4b, 0x33, 0x87, 0x3a, 0x31, 0xe8, - 0x15, 0x5c, 0xdd, 0x0e, 0x28, 0x3a, 0x16, 0x51, 0xc4, 0xd5, 0x7d, 0x99, 0xbb, 0x83, 0x30, 0xa8, - 0x8e, 0x45, 0x94, 0xf0, 0xd2, 0xd8, 0x17, 0xe2, 0x10, 0xf0, 0x29, 0x98, 0x72, 0x28, 0xa6, 0xae, - 0xc3, 0xaf, 0x6c, 0x66, 0xf3, 0xfa, 0x38, 0x60, 0xdc, 0xa0, 0x55, 0x15, 0x70, 0x53, 0xde, 0x37, - 0x12, 0x40, 0x8d, 0xbf, 0x4f, 0x82, 0x85, 0x50, 0xb9, 0x6d, 0x1a, 0x3d, 0x95, 0xb2, 0x94, 0xbe, - 0x0f, 0x8a, 0x74, 0x68, 0x11, 0x7e, 0x26, 0x95, 0xd6, 0x97, 0x7e, 0x30, 0x87, 0x43, 0x8b, 0x7c, - 0x3c, 0x5b, 0x5d, 0xce, 0x30, 0x61, 0x22, 0xc4, 0x8d, 0xe0, 0x6e, 0x10, 0x67, 0x81, 0x9b, 0xdf, - 0x8a, 0x3b, 0xff, 0x78, 0xb6, 0x9a, 0x51, 0x6b, 0xe4, 0x00, 0x29, 0x1e, 0x22, 0xfc, 0x02, 0x4c, - 0xd9, 0x04, 0x3b, 0xa6, 0x51, 0x2b, 0x72, 0xb4, 0x60, 0x2b, 0x88, 0xaf, 0x22, 0x21, 0x85, 0xd7, - 0xc1, 0xb4, 0x4e, 0x1c, 0x07, 0xf7, 0x49, 0xad, 0xc4, 0x15, 0xe7, 0x84, 0xe2, 0xf4, 0x9e, 0xb7, - 0x8c, 0x7c, 0x39, 0x7c, 0x05, 0xaa, 0x1a, 0x76, 0xe8, 0x91, 0xd5, 0xc3, 0x94, 0x1c, 0xaa, 0x3a, - 0xa9, 0x4d, 0xf1, 0x03, 0xfd, 0x6a, 0xbc, 0xbb, 0x67, 0x16, 0xad, 0x25, 0x81, 0x5e, 0xdd, 0x8d, - 0x21, 0xa1, 0x04, 0x32, 0x1c, 0x00, 0xc8, 0x56, 0x0e, 0x6d, 0x6c, 0x38, 0xde, 0x41, 0x31, 0x7f, - 0xd3, 0x97, 0xf6, 0xb7, 0x22, 0xfc, 0xc1, 0xdd, 0x14, 0x1a, 0xca, 0xf0, 0xd0, 0xf8, 0xa3, 0x04, - 0xaa, 0xe1, 0x35, 0x5d, 0xc1, 0x5b, 0x7d, 0x1c, 0x7f, 0xab, 0xdf, 0x1f, 0x23, 0x39, 0x73, 0xde, - 0xe8, 0x3f, 0x0b, 0x00, 0x86, 0x4a, 0xc8, 0xd4, 0xb4, 0x2e, 0x56, 0x4e, 0xe0, 0x1a, 0x28, 0x1a, - 0x58, 0xf7, 0x73, 0x32, 0x78, 0x20, 0x3f, 0xc6, 0x3a, 0x41, 0x5c, 0x02, 0xdf, 0x48, 0x00, 0xba, - 0xfc, 0xe8, 0x7b, 0x5b, 0x86, 0x61, 0x52, 0xcc, 0x4e, 0xc3, 0x0f, 0xa8, 0x3d, 0x46, 0x40, 0xbe, - 0x2f, 0xf9, 0x28, 0x85, 0xf2, 0xd0, 0xa0, 0xf6, 0x30, 0xbc, 0x85, 0xb4, 0x02, 0xca, 0x70, 0x0d, - 0x7f, 0x06, 0x80, 0x2d, 0x30, 0x0f, 0x4d, 0xf1, 0x6c, 0xf3, 0x6b, 0x80, 0xef, 0xbe, 0x6d, 0x1a, - 0x2f, 0xd5, 0x7e, 0x58, 0x58, 0x50, 0x00, 0x81, 0x22, 0x70, 0x2b, 0x0f, 0xc1, 0x72, 0x4e, 0x9c, - 0xf0, 0x1a, 0x98, 0x3c, 0x21, 0x43, 0xef, 0xa8, 0x10, 0xfb, 0x09, 0x17, 0x41, 0x69, 0x80, 0x35, - 0x97, 0x78, 0x6f, 0x12, 0x79, 0x1f, 0xf7, 0x0a, 0x77, 0xa4, 0xc6, 0x1f, 0x4a, 0xd1, 0x4c, 0x61, - 0xf5, 0x06, 0xae, 0xb3, 0xf6, 0x60, 0x69, 0xaa, 0x82, 0x1d, 0x8e, 0x51, 0x6a, 0x7d, 0xe2, 0xb5, - 0x06, 0x6f, 0x0d, 0x05, 0x52, 0xf8, 0x0b, 0x50, 0x76, 0x88, 0x46, 0x14, 0x6a, 0xda, 0xa2, 0xc4, - 0xdd, 0x1c, 0x33, 0xa7, 0x70, 0x97, 0x68, 0x1d, 0x61, 0xea, 0xc1, 0xfb, 0x5f, 0x28, 0x80, 0x84, - 0x4f, 0x41, 0x99, 0x12, 0xdd, 0xd2, 0x30, 0x25, 0xe2, 0xf4, 0x62, 0x79, 0xc5, 0x6a, 0x07, 0x03, - 0x3b, 0x30, 0x7b, 0x87, 0x42, 0x8d, 0x57, 0xcf, 0x20, 0x4f, 0xfd, 0x55, 0x14, 0xc0, 0xc0, 0x9f, - 0x82, 0xb2, 0x43, 0x59, 0x57, 0xef, 0x0f, 0x79, 0x45, 0x39, 0xaf, 0xad, 0x44, 0xeb, 0xa8, 0x67, - 0x12, 0x42, 0xfb, 0x2b, 0x28, 0x80, 0x83, 0x5b, 0x60, 0x4e, 0x57, 0x0d, 0x44, 0x70, 0x6f, 0xd8, - 0x21, 0x8a, 0x69, 0xf4, 0x1c, 0x5e, 0x8a, 0x4a, 0xad, 0x65, 0x61, 0x34, 0xb7, 0x17, 0x17, 0xa3, - 0xa4, 0x3e, 0xdc, 0x05, 0x8b, 0x7e, 0xdb, 0x7d, 0xac, 0x3a, 0xd4, 0xb4, 0x87, 0xbb, 0xaa, 0xae, - 0x52, 0x5e, 0xa0, 0x4a, 0xad, 0xda, 0xe8, 0x6c, 0x75, 0x11, 0x65, 0xc8, 0x51, 0xa6, 0x15, 0xab, - 0x9d, 0x16, 0x76, 0x1d, 0xd2, 0xe3, 0x05, 0xa7, 0x1c, 0xd6, 0xce, 0x03, 0xbe, 0x8a, 0x84, 0x14, - 0xfe, 0x24, 0x96, 0xa6, 0xe5, 0xcb, 0xa5, 0x69, 0x35, 0x3f, 0x45, 0xe1, 0x11, 0x58, 0xb6, 0x6c, - 0xb3, 0x6f, 0x13, 0xc7, 0xd9, 0x26, 0xb8, 0xa7, 0xa9, 0x06, 0xf1, 0x4f, 0xa6, 0xc2, 0x77, 0xf4, - 0xd9, 0xe8, 0x6c, 0x75, 0xf9, 0x20, 0x5b, 0x05, 0xe5, 0xd9, 0x36, 0xfe, 0x52, 0x04, 0xd7, 0x92, - 0x3d, 0x0e, 0x3e, 0x01, 0xd0, 0xec, 0x3a, 0xc4, 0x1e, 0x90, 0xde, 0x23, 0x8f, 0xb8, 0x31, 0x76, - 0x23, 0x71, 0x76, 0x13, 0xbc, 0xdb, 0xfd, 0x94, 0x06, 0xca, 0xb0, 0xf2, 0xf8, 0x91, 0x78, 0x00, - 0x05, 0x1e, 0x68, 0x84, 0x1f, 0xa5, 0x1e, 0xc1, 0x16, 0x98, 0x13, 0x6f, 0xdf, 0x17, 0xf2, 0x64, - 0x8d, 0xdc, 0xfb, 0x51, 0x5c, 0x8c, 0x92, 0xfa, 0xf0, 0x11, 0x98, 0xc7, 0x03, 0xac, 0x6a, 0xb8, - 0xab, 0x91, 0x00, 0xa4, 0xc8, 0x41, 0x3e, 0x15, 0x20, 0xf3, 0x5b, 0x49, 0x05, 0x94, 0xb6, 0x81, - 0x7b, 0x60, 0xc1, 0x35, 0xd2, 0x50, 0x5e, 0x1e, 0x7e, 0x26, 0xa0, 0x16, 0x8e, 0xd2, 0x2a, 0x28, - 0xcb, 0x0e, 0xbe, 0x00, 0x40, 0xf1, 0x1b, 0xb3, 0x53, 0x9b, 0xe2, 0x95, 0xf4, 0x9b, 0x31, 0xde, - 0x4b, 0xd0, 0xcd, 0xc3, 0x2a, 0x16, 0x2c, 0x39, 0x28, 0x82, 0x09, 0xef, 0x83, 0x59, 0x9b, 0xbd, - 0x80, 0x20, 0xd4, 0x69, 0x1e, 0xea, 0x77, 0x84, 0xd9, 0x2c, 0x8a, 0x0a, 0x51, 0x5c, 0x17, 0xde, - 0x03, 0x55, 0xc5, 0xd4, 0x34, 0x9e, 0xf9, 0x6d, 0xd3, 0x35, 0x28, 0x4f, 0xde, 0x52, 0x0b, 0xb2, - 0xce, 0xdc, 0x8e, 0x49, 0x50, 0x42, 0xb3, 0xf1, 0x67, 0x29, 0xda, 0x66, 0xfc, 0xe7, 0x0c, 0xef, - 0xc5, 0xa8, 0xcf, 0x17, 0x09, 0xea, 0xb3, 0x94, 0xb6, 0x88, 0x30, 0x1f, 0x15, 0xcc, 0xb2, 0xe4, - 0x57, 0x8d, 0xbe, 0x77, 0xe1, 0xa2, 0x24, 0x7e, 0x7b, 0xee, 0x53, 0x0a, 0xb4, 0x23, 0x8d, 0x71, - 0x9e, 0xef, 0x3c, 0x2a, 0x44, 0x71, 0xe4, 0xc6, 0x03, 0x50, 0x8d, 0xbf, 0xc3, 0x18, 0xa7, 0x97, - 0x2e, 0xe4, 0xf4, 0x1f, 0x24, 0xb0, 0x9c, 0xe3, 0x1d, 0x6a, 0xa0, 0xaa, 0xe3, 0xd7, 0x91, 0x1c, - 0xb9, 0x90, 0x1b, 0xb3, 0xa9, 0x49, 0xf6, 0xa6, 0x26, 0x79, 0xc7, 0xa0, 0xfb, 0x76, 0x87, 0xda, - 0xaa, 0xd1, 0xf7, 0xee, 0x61, 0x2f, 0x86, 0x85, 0x12, 0xd8, 0xf0, 0x39, 0x28, 0xeb, 0xf8, 0x75, - 0xc7, 0xb5, 0xfb, 0x59, 0xe7, 0x35, 0x9e, 0x1f, 0xde, 0x3f, 0xf6, 0x04, 0x0a, 0x0a, 0xf0, 0x1a, - 0xfb, 0x60, 0x2d, 0xb6, 0x49, 0x56, 0x2a, 0xc8, 0x4b, 0x57, 0xeb, 0x90, 0xf0, 0xc2, 0xbf, 0x06, - 0x15, 0x0b, 0xdb, 0x54, 0x0d, 0xca, 0x45, 0xa9, 0x35, 0x3b, 0x3a, 0x5b, 0xad, 0x1c, 0xf8, 0x8b, - 0x28, 0x94, 0x37, 0xfe, 0x2d, 0x81, 0x52, 0x47, 0xc1, 0x1a, 0xb9, 0x82, 0xd1, 0x61, 0x3b, 0x36, - 0x3a, 0x34, 0x72, 0x93, 0x88, 0xc7, 0x93, 0x3b, 0x35, 0xec, 0x26, 0xa6, 0x86, 0xcf, 0x2f, 0xc0, - 0x39, 0x7f, 0x60, 0xb8, 0x0b, 0x2a, 0x81, 0xbb, 0x58, 0x95, 0x94, 0x2e, 0xaa, 0x92, 0x8d, 0xdf, - 0x17, 0xc0, 0x4c, 0xc4, 0xc5, 0xe5, 0xac, 0xd9, 0x71, 0x47, 0x88, 0x06, 0x2b, 0x43, 0x9b, 0xe3, - 0x6c, 0x44, 0xf6, 0x49, 0x85, 0xc7, 0xdf, 0xc2, 0xee, 0x9d, 0xe6, 0x1a, 0x0f, 0x40, 0x95, 0x62, - 0xbb, 0x4f, 0xa8, 0x2f, 0xe3, 0x07, 0x56, 0x09, 0x99, 0xfe, 0x61, 0x4c, 0x8a, 0x12, 0xda, 0x2b, - 0xf7, 0xc1, 0x6c, 0xcc, 0xd9, 0xa5, 0x48, 0xd8, 0x1b, 0x76, 0x38, 0x61, 0x72, 0x5e, 0x41, 0x76, - 0x3d, 0x89, 0x65, 0xd7, 0x7a, 0xfe, 0x61, 0x46, 0x9e, 0x4c, 0x5e, 0x8e, 0xa1, 0x44, 0x8e, 0x7d, - 0x35, 0x16, 0xda, 0xf9, 0x99, 0xf6, 0x27, 0x09, 0xcc, 0x45, 0xb4, 0xaf, 0x60, 0x82, 0xd9, 0x89, - 0x4f, 0x30, 0x9f, 0x8f, 0xb3, 0x89, 0x9c, 0x11, 0xe6, 0xaf, 0xa5, 0x58, 0xf0, 0xff, 0xf7, 0xa4, - 0xfa, 0x57, 0x60, 0x71, 0x60, 0x6a, 0xae, 0x4e, 0xda, 0x1a, 0x56, 0x75, 0x5f, 0x81, 0x31, 0x98, - 0xc9, 0xe4, 0x1f, 0x15, 0x01, 0x3c, 0xb1, 0x1d, 0xd5, 0xa1, 0xc4, 0xa0, 0xcf, 0x42, 0xcb, 0xd6, - 0x77, 0x85, 0x93, 0xc5, 0x67, 0x19, 0x70, 0x28, 0xd3, 0x09, 0xfc, 0x01, 0x98, 0x61, 0x04, 0x4e, - 0x55, 0x08, 0x9b, 0x05, 0xc5, 0xf4, 0xbf, 0x20, 0x80, 0x66, 0x3a, 0xa1, 0x08, 0x45, 0xf5, 0xe0, - 0x31, 0x58, 0xb0, 0xcc, 0xde, 0x1e, 0x36, 0x70, 0x9f, 0xb0, 0xb6, 0x77, 0xc0, 0xff, 0xd0, 0xe4, - 0x4c, 0xbb, 0xd2, 0xba, 0xed, 0x33, 0xa5, 0x83, 0xb4, 0xca, 0x47, 0x46, 0x59, 0xd3, 0xcb, 0x9c, - 0x07, 0x64, 0x41, 0x42, 0x1b, 0x54, 0x5d, 0xd1, 0x7e, 0xc4, 0xe0, 0xe1, 0xcd, 0xff, 0x9b, 0xe3, - 0x64, 0xd8, 0x51, 0xcc, 0x32, 0xac, 0x46, 0xf1, 0x75, 0x94, 0xf0, 0x90, 0x3b, 0x48, 0x94, 0xff, - 0x9b, 0x41, 0xa2, 0xf1, 0x9b, 0x22, 0x98, 0x4f, 0x3d, 0x5d, 0xf8, 0xa3, 0x73, 0x18, 0xf7, 0xd2, - 0xff, 0x8c, 0x6d, 0xa7, 0x08, 0xe3, 0xe4, 0x25, 0x08, 0xe3, 0x16, 0x98, 0x53, 0x5c, 0xdb, 0x66, - 0xb3, 0x7e, 0x9c, 0x65, 0x07, 0x54, 0xbd, 0x1d, 0x17, 0xa3, 0xa4, 0x7e, 0x16, 0xdb, 0x2f, 0x5d, - 0x92, 0xed, 0x47, 0xa3, 0x10, 0x8c, 0xcd, 0x4b, 0xbb, 0x74, 0x14, 0x82, 0xb8, 0x25, 0xf5, 0x59, - 0xb7, 0xf2, 0x50, 0x03, 0x84, 0xe9, 0x78, 0xb7, 0x3a, 0x8a, 0x49, 0x51, 0x42, 0x3b, 0x83, 0x39, - 0x57, 0xc6, 0x66, 0xce, 0x7f, 0x93, 0xc0, 0xa7, 0xb9, 0x19, 0x0a, 0xb7, 0x62, 0x04, 0x7a, 0x23, - 0x41, 0xa0, 0xbf, 0x97, 0x6b, 0x18, 0xe1, 0xd1, 0x76, 0x36, 0x8f, 0xbe, 0x3b, 0x1e, 0x8f, 0xce, - 0x20, 0x79, 0x17, 0x13, 0xea, 0xd6, 0xc6, 0xe9, 0xfb, 0xfa, 0xc4, 0xdb, 0xf7, 0xf5, 0x89, 0x77, - 0xef, 0xeb, 0x13, 0xbf, 0x1e, 0xd5, 0xa5, 0xd3, 0x51, 0x5d, 0x7a, 0x3b, 0xaa, 0x4b, 0xef, 0x46, - 0x75, 0xe9, 0x1f, 0xa3, 0xba, 0xf4, 0xbb, 0x0f, 0xf5, 0x89, 0xe7, 0xd3, 0xc2, 0xe3, 0x7f, 0x02, - 0x00, 0x00, 0xff, 0xff, 0x3a, 0x2e, 0x29, 0xcd, 0xb9, 0x19, 0x00, 0x00, + // 1871 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0x52, 0xa4, 0x44, 0x3d, 0x45, 0x94, 0x3d, 0x52, 0x2d, 0x46, 0x69, 0x29, 0x61, 0x1b, + 0x24, 0x72, 0x12, 0x2d, 0x63, 0x25, 0x0d, 0x12, 0xbb, 0x08, 0x2a, 0xca, 0x6e, 0xe2, 0x40, 0xaa, + 0x94, 0xa1, 0x94, 0xa2, 0x69, 0x0b, 0x64, 0xb8, 0x1c, 0xd3, 0x1b, 0xed, 0x3f, 0xec, 0x0e, 0x59, + 0x13, 0xbd, 0xf4, 0x03, 0x14, 0x48, 0xcf, 0xfd, 0x14, 0x3d, 0x16, 0xed, 0xad, 0x27, 0x5f, 0x0a, + 0x04, 0xbd, 0x34, 0x27, 0xa1, 0xa6, 0xaf, 0x6d, 0x6f, 0xbd, 0x18, 0x28, 0x50, 0xcc, 0xec, 0xec, + 0xff, 0x5d, 0x89, 0x2a, 0x20, 0x1d, 0x72, 0xe3, 0xce, 0x7b, 0xef, 0xf7, 0xde, 0xcc, 0xbc, 0xf7, + 0xe6, 0xfd, 0x08, 0x3f, 0x3a, 0x7d, 0xdf, 0xd7, 0x0c, 0xa7, 0x7d, 0x3a, 0xec, 0x51, 0xcf, 0xa6, + 0x8c, 0xfa, 0xed, 0x11, 0xb5, 0xfb, 0x8e, 0xd7, 0x96, 0x02, 0xe2, 0x1a, 0x6d, 0xe2, 0xba, 0x7e, + 0x7b, 0x74, 0xa7, 0x47, 0x19, 0xb9, 0xd3, 0x1e, 0x50, 0x9b, 0x7a, 0x84, 0xd1, 0xbe, 0xe6, 0x7a, + 0x0e, 0x73, 0xd0, 0x5a, 0xa0, 0xa8, 0x11, 0xd7, 0xd0, 0xb8, 0xa2, 0x26, 0x15, 0xd7, 0xb7, 0x07, + 0x06, 0x7b, 0x3c, 0xec, 0x69, 0xba, 0x63, 0xb5, 0x07, 0xce, 0xc0, 0x69, 0x0b, 0xfd, 0xde, 0xf0, + 0x91, 0xf8, 0x12, 0x1f, 0xe2, 0x57, 0x80, 0xb3, 0xae, 0x26, 0x1c, 0xea, 0x8e, 0x47, 0xdb, 0xa3, + 0x9c, 0xaf, 0xf5, 0xdb, 0x09, 0x1d, 0xd7, 0x31, 0x0d, 0x7d, 0x5c, 0x16, 0xd6, 0xfa, 0xbb, 0xb1, + 0xaa, 0x45, 0xf4, 0xc7, 0x86, 0x4d, 0xbd, 0x71, 0xdb, 0x3d, 0x1d, 0xf0, 0x05, 0xbf, 0x6d, 0x51, + 0x46, 0x8a, 0x1c, 0xb4, 0xcb, 0xac, 0xbc, 0xa1, 0xcd, 0x0c, 0x8b, 0xe6, 0x0c, 0xde, 0xbb, 0xc8, + 0xc0, 0xd7, 0x1f, 0x53, 0x8b, 0xe4, 0xec, 0xde, 0x29, 0xb3, 0x1b, 0x32, 0xc3, 0x6c, 0x1b, 0x36, + 0xf3, 0x99, 0x97, 0x35, 0x52, 0xff, 0xa3, 0x00, 0xda, 0x73, 0x6c, 0xe6, 0x39, 0xa6, 0x49, 0x3d, + 0x4c, 0x47, 0x86, 0x6f, 0x38, 0x36, 0xfa, 0x02, 0xea, 0x7c, 0x3f, 0x7d, 0xc2, 0x48, 0x53, 0xd9, + 0x54, 0xb6, 0x16, 0x77, 0xde, 0xd6, 0xe2, 0x4b, 0x89, 0xe0, 0x35, 0xf7, 0x74, 0xc0, 0x17, 0x7c, + 0x8d, 0x6b, 0x6b, 0xa3, 0x3b, 0xda, 0x61, 0xef, 0x4b, 0xaa, 0xb3, 0x03, 0xca, 0x48, 0x07, 0x3d, + 0x3d, 0xdb, 0x98, 0x99, 0x9c, 0x6d, 0x40, 0xbc, 0x86, 0x23, 0x54, 0x74, 0x08, 0x55, 0x81, 0x5e, + 0x11, 0xe8, 0xdb, 0xa5, 0xe8, 0x72, 0xd3, 0x1a, 0x26, 0xbf, 0x7a, 0xf0, 0x84, 0x51, 0x9b, 0x87, + 0xd7, 0x79, 0x49, 0x42, 0x57, 0xef, 0x13, 0x46, 0xb0, 0x00, 0x42, 0x6f, 0x41, 0xdd, 0x93, 0xe1, + 0x37, 0x67, 0x37, 0x95, 0xad, 0xd9, 0xce, 0x0d, 0xa9, 0x55, 0x0f, 0xb7, 0x85, 0x23, 0x0d, 0xf5, + 0xa9, 0x02, 0xb7, 0xf2, 0xfb, 0xde, 0x37, 0x7c, 0x86, 0x7e, 0x91, 0xdb, 0xbb, 0x36, 0xdd, 0xde, + 0xb9, 0xb5, 0xd8, 0x79, 0xe4, 0x38, 0x5c, 0x49, 0xec, 0xfb, 0x08, 0x6a, 0x06, 0xa3, 0x96, 0xdf, + 0xac, 0x6c, 0xce, 0x6e, 0x2d, 0xee, 0xbc, 0xa9, 0x95, 0xe4, 0xba, 0x96, 0x8f, 0xae, 0xb3, 0x24, + 0x71, 0x6b, 0x0f, 0x39, 0x02, 0x0e, 0x80, 0xd4, 0xdf, 0x56, 0x00, 0xee, 0x53, 0xd7, 0x74, 0xc6, + 0x16, 0xb5, 0xd9, 0x35, 0x5c, 0xdd, 0x43, 0xa8, 0xfa, 0x2e, 0xd5, 0xe5, 0xd5, 0xbd, 0x5e, 0xba, + 0x83, 0x38, 0xa8, 0xae, 0x4b, 0xf5, 0xf8, 0xd2, 0xf8, 0x17, 0x16, 0x10, 0xe8, 0x53, 0x98, 0xf3, + 0x19, 0x61, 0x43, 0x5f, 0x5c, 0xd9, 0xe2, 0xce, 0xed, 0x69, 0xc0, 0x84, 0x41, 0xa7, 0x21, 0xe1, + 0xe6, 0x82, 0x6f, 0x2c, 0x81, 0xd4, 0xbf, 0xcf, 0xc2, 0x4a, 0xac, 0xbc, 0xe7, 0xd8, 0x7d, 0x83, + 0xf1, 0x94, 0xbe, 0x07, 0x55, 0x36, 0x76, 0xa9, 0x38, 0x93, 0x85, 0xce, 0xeb, 0x61, 0x30, 0xc7, + 0x63, 0x97, 0xbe, 0x38, 0xdb, 0x58, 0x2b, 0x30, 0xe1, 0x22, 0x2c, 0x8c, 0xd0, 0x7e, 0x14, 0x67, + 0x45, 0x98, 0xbf, 0x9b, 0x76, 0xfe, 0xe2, 0x6c, 0xa3, 0xa0, 0xd7, 0x68, 0x11, 0x52, 0x3a, 0x44, + 0xf4, 0x1a, 0xcc, 0x79, 0x94, 0xf8, 0x8e, 0xdd, 0xac, 0x0a, 0xb4, 0x68, 0x2b, 0x58, 0xac, 0x62, + 0x29, 0x45, 0xb7, 0x61, 0xde, 0xa2, 0xbe, 0x4f, 0x06, 0xb4, 0x59, 0x13, 0x8a, 0xcb, 0x52, 0x71, + 0xfe, 0x20, 0x58, 0xc6, 0xa1, 0x1c, 0x7d, 0x09, 0x0d, 0x93, 0xf8, 0xec, 0xc4, 0xed, 0x13, 0x46, + 0x8f, 0x0d, 0x8b, 0x36, 0xe7, 0xc4, 0x81, 0xbe, 0x31, 0xdd, 0xdd, 0x73, 0x8b, 0xce, 0x2d, 0x89, + 0xde, 0xd8, 0x4f, 0x21, 0xe1, 0x0c, 0x32, 0x1a, 0x01, 0xe2, 0x2b, 0xc7, 0x1e, 0xb1, 0xfd, 0xe0, + 0xa0, 0xb8, 0xbf, 0xf9, 0x4b, 0xfb, 0x5b, 0x97, 0xfe, 0xd0, 0x7e, 0x0e, 0x0d, 0x17, 0x78, 0x50, + 0xff, 0xa8, 0x40, 0x23, 0xbe, 0xa6, 0x6b, 0xa8, 0xd5, 0x8f, 0xd3, 0xb5, 0xfa, 0xfd, 0x29, 0x92, + 0xb3, 0xa4, 0x46, 0xff, 0x59, 0x01, 0x14, 0x2b, 0x61, 0xc7, 0x34, 0x7b, 0x44, 0x3f, 0x45, 0x9b, + 0x50, 0xb5, 0x89, 0x15, 0xe6, 0x64, 0x54, 0x20, 0x3f, 0x21, 0x16, 0xc5, 0x42, 0x82, 0xbe, 0x52, + 0x00, 0x0d, 0xc5, 0xd1, 0xf7, 0x77, 0x6d, 0xdb, 0x61, 0x84, 0x9f, 0x46, 0x18, 0xd0, 0xde, 0x14, + 0x01, 0x85, 0xbe, 0xb4, 0x93, 0x1c, 0xca, 0x03, 0x9b, 0x79, 0xe3, 0xf8, 0x16, 0xf2, 0x0a, 0xb8, + 0xc0, 0x35, 0xfa, 0x39, 0x80, 0x27, 0x31, 0x8f, 0x1d, 0x59, 0xb6, 0xe5, 0x3d, 0x20, 0x74, 0xbf, + 0xe7, 0xd8, 0x8f, 0x8c, 0x41, 0xdc, 0x58, 0x70, 0x04, 0x81, 0x13, 0x70, 0xeb, 0x0f, 0x60, 0xad, + 0x24, 0x4e, 0x74, 0x03, 0x66, 0x4f, 0xe9, 0x38, 0x38, 0x2a, 0xcc, 0x7f, 0xa2, 0x55, 0xa8, 0x8d, + 0x88, 0x39, 0xa4, 0x41, 0x4d, 0xe2, 0xe0, 0xe3, 0x6e, 0xe5, 0x7d, 0x45, 0xfd, 0x43, 0x2d, 0x99, + 0x29, 0xbc, 0xdf, 0xa0, 0x2d, 0xfe, 0x3c, 0xb8, 0xa6, 0xa1, 0x13, 0x5f, 0x60, 0xd4, 0x3a, 0x2f, + 0x05, 0x4f, 0x43, 0xb0, 0x86, 0x23, 0x29, 0xfa, 0x25, 0xd4, 0x7d, 0x6a, 0x52, 0x9d, 0x39, 0x9e, + 0x6c, 0x71, 0xef, 0x4c, 0x99, 0x53, 0xa4, 0x47, 0xcd, 0xae, 0x34, 0x0d, 0xe0, 0xc3, 0x2f, 0x1c, + 0x41, 0xa2, 0x4f, 0xa1, 0xce, 0xa8, 0xe5, 0x9a, 0x84, 0x51, 0x79, 0x7a, 0xa9, 0xbc, 0xe2, 0xbd, + 0x83, 0x83, 0x1d, 0x39, 0xfd, 0x63, 0xa9, 0x26, 0xba, 0x67, 0x94, 0xa7, 0xe1, 0x2a, 0x8e, 0x60, + 0xd0, 0xcf, 0xa0, 0xee, 0x33, 0xfe, 0xaa, 0x0f, 0xc6, 0xa2, 0xa3, 0x9c, 0xf7, 0xac, 0x24, 0xfb, + 0x68, 0x60, 0x12, 0x43, 0x87, 0x2b, 0x38, 0x82, 0x43, 0xbb, 0xb0, 0x6c, 0x19, 0x36, 0xa6, 0xa4, + 0x3f, 0xee, 0x52, 0xdd, 0xb1, 0xfb, 0xbe, 0x68, 0x45, 0xb5, 0xce, 0x9a, 0x34, 0x5a, 0x3e, 0x48, + 0x8b, 0x71, 0x56, 0x1f, 0xed, 0xc3, 0x6a, 0xf8, 0xec, 0x7e, 0x6c, 0xf8, 0xcc, 0xf1, 0xc6, 0xfb, + 0x86, 0x65, 0x30, 0xd1, 0xa0, 0x6a, 0x9d, 0xe6, 0xe4, 0x6c, 0x63, 0x15, 0x17, 0xc8, 0x71, 0xa1, + 0x15, 0xef, 0x9d, 0x2e, 0x19, 0xfa, 0xb4, 0x2f, 0x1a, 0x4e, 0x3d, 0xee, 0x9d, 0x47, 0x62, 0x15, + 0x4b, 0x29, 0xfa, 0x69, 0x2a, 0x4d, 0xeb, 0x97, 0x4b, 0xd3, 0x46, 0x79, 0x8a, 0xa2, 0x13, 0x58, + 0x73, 0x3d, 0x67, 0xe0, 0x51, 0xdf, 0xbf, 0x4f, 0x49, 0xdf, 0x34, 0x6c, 0x1a, 0x9e, 0xcc, 0x82, + 0xd8, 0xd1, 0x2b, 0x93, 0xb3, 0x8d, 0xb5, 0xa3, 0x62, 0x15, 0x5c, 0x66, 0xab, 0xfe, 0xa5, 0x0a, + 0x37, 0xb2, 0x6f, 0x1c, 0xfa, 0x04, 0x90, 0xd3, 0xf3, 0xa9, 0x37, 0xa2, 0xfd, 0x8f, 0x82, 0xc1, + 0x8d, 0x4f, 0x37, 0x8a, 0x98, 0x6e, 0xa2, 0xba, 0x3d, 0xcc, 0x69, 0xe0, 0x02, 0xab, 0x60, 0x3e, + 0x92, 0x05, 0x50, 0x11, 0x81, 0x26, 0xe6, 0xa3, 0x5c, 0x11, 0xec, 0xc2, 0xb2, 0xac, 0xfd, 0x50, + 0x28, 0x92, 0x35, 0x71, 0xef, 0x27, 0x69, 0x31, 0xce, 0xea, 0xa3, 0x8f, 0xe0, 0x26, 0x19, 0x11, + 0xc3, 0x24, 0x3d, 0x93, 0x46, 0x20, 0x55, 0x01, 0xf2, 0xb2, 0x04, 0xb9, 0xb9, 0x9b, 0x55, 0xc0, + 0x79, 0x1b, 0x74, 0x00, 0x2b, 0x43, 0x3b, 0x0f, 0x15, 0xe4, 0xe1, 0x2b, 0x12, 0x6a, 0xe5, 0x24, + 0xaf, 0x82, 0x8b, 0xec, 0xd0, 0x17, 0x00, 0x7a, 0xf8, 0x30, 0xfb, 0xcd, 0x39, 0xd1, 0x49, 0xdf, + 0x9a, 0xa2, 0x5e, 0xa2, 0xd7, 0x3c, 0xee, 0x62, 0xd1, 0x92, 0x8f, 0x13, 0x98, 0xe8, 0x1e, 0x2c, + 0x79, 0xbc, 0x02, 0xa2, 0x50, 0xe7, 0x45, 0xa8, 0xdf, 0x91, 0x66, 0x4b, 0x38, 0x29, 0xc4, 0x69, + 0x5d, 0x74, 0x17, 0x1a, 0xba, 0x63, 0x9a, 0x22, 0xf3, 0xf7, 0x9c, 0xa1, 0xcd, 0x44, 0xf2, 0xd6, + 0x3a, 0x88, 0xbf, 0xcc, 0x7b, 0x29, 0x09, 0xce, 0x68, 0xaa, 0x7f, 0x56, 0x92, 0xcf, 0x4c, 0x58, + 0xce, 0xe8, 0x6e, 0x6a, 0xf4, 0x79, 0x2d, 0x33, 0xfa, 0xdc, 0xca, 0x5b, 0x24, 0x26, 0x1f, 0x03, + 0x96, 0x78, 0xf2, 0x1b, 0xf6, 0x20, 0xb8, 0x70, 0xd9, 0x12, 0xdf, 0x3e, 0xb7, 0x94, 0x22, 0xed, + 0xc4, 0xc3, 0x78, 0x53, 0xec, 0x3c, 0x29, 0xc4, 0x69, 0x64, 0xf5, 0x43, 0x68, 0xa4, 0xeb, 0x30, + 0x35, 0xd3, 0x2b, 0x17, 0xce, 0xf4, 0xcf, 0x15, 0x58, 0x2b, 0xf1, 0x8e, 0x4c, 0x68, 0x58, 0xe4, + 0x49, 0x22, 0x47, 0x2e, 0x9c, 0x8d, 0x39, 0x6b, 0xd2, 0x02, 0xd6, 0xa4, 0x3d, 0xb4, 0xd9, 0xa1, + 0xd7, 0x65, 0x9e, 0x61, 0x0f, 0x82, 0x7b, 0x38, 0x48, 0x61, 0xe1, 0x0c, 0x36, 0xfa, 0x1c, 0xea, + 0x16, 0x79, 0xd2, 0x1d, 0x7a, 0x83, 0xa2, 0xf3, 0x9a, 0xce, 0x8f, 0x78, 0x3f, 0x0e, 0x24, 0x0a, + 0x8e, 0xf0, 0xd4, 0x43, 0xd8, 0x4c, 0x6d, 0x92, 0xb7, 0x0a, 0xfa, 0x68, 0x68, 0x76, 0x69, 0x7c, + 0xe1, 0x6f, 0xc2, 0x82, 0x4b, 0x3c, 0x66, 0x44, 0xed, 0xa2, 0xd6, 0x59, 0x9a, 0x9c, 0x6d, 0x2c, + 0x1c, 0x85, 0x8b, 0x38, 0x96, 0xab, 0xff, 0x55, 0xa0, 0xd6, 0xd5, 0x89, 0x49, 0xaf, 0x81, 0x3a, + 0xdc, 0x4f, 0x51, 0x07, 0xb5, 0x34, 0x89, 0x44, 0x3c, 0xa5, 0xac, 0x61, 0x3f, 0xc3, 0x1a, 0x5e, + 0xbd, 0x00, 0xe7, 0x7c, 0xc2, 0xf0, 0x01, 0x2c, 0x44, 0xee, 0x52, 0x5d, 0x52, 0xb9, 0xa8, 0x4b, + 0xaa, 0xbf, 0xaf, 0xc0, 0x62, 0xc2, 0xc5, 0xe5, 0xac, 0xf9, 0x71, 0x27, 0x06, 0x0d, 0xde, 0x86, + 0x76, 0xa6, 0xd9, 0x88, 0x16, 0x0e, 0x15, 0xc1, 0xfc, 0x16, 0xbf, 0xde, 0xf9, 0x59, 0xe3, 0x43, + 0x68, 0x30, 0xe2, 0x0d, 0x28, 0x0b, 0x65, 0xe2, 0xc0, 0x16, 0xe2, 0x49, 0xff, 0x38, 0x25, 0xc5, + 0x19, 0xed, 0xf5, 0x7b, 0xb0, 0x94, 0x72, 0x76, 0xa9, 0x21, 0xec, 0x2b, 0x7e, 0x38, 0x71, 0x72, + 0x5e, 0x43, 0x76, 0x7d, 0x92, 0xca, 0xae, 0xad, 0xf2, 0xc3, 0x4c, 0x94, 0x4c, 0x59, 0x8e, 0xe1, + 0x4c, 0x8e, 0xbd, 0x31, 0x15, 0xda, 0xf9, 0x99, 0xf6, 0xaf, 0x0a, 0xac, 0x26, 0xb4, 0x63, 0x6e, + 0xfa, 0xc3, 0x54, 0x83, 0xde, 0xca, 0x34, 0xe8, 0x66, 0x91, 0xcd, 0x95, 0x91, 0xd3, 0x62, 0x76, + 0x37, 0x7b, 0xd5, 0xec, 0xee, 0x0a, 0x48, 0xb1, 0xfa, 0x27, 0x05, 0x96, 0x13, 0x67, 0x77, 0x0d, + 0x8c, 0xf1, 0x61, 0x9a, 0x31, 0xbe, 0x3a, 0x4d, 0xd2, 0x94, 0x50, 0xc6, 0xbf, 0xd6, 0x52, 0xc1, + 0x7f, 0xeb, 0x49, 0xcc, 0xaf, 0x61, 0x75, 0xe4, 0x98, 0x43, 0x8b, 0xee, 0x99, 0xc4, 0xb0, 0x42, + 0x05, 0x3e, 0x31, 0xce, 0x66, 0xff, 0x18, 0x8a, 0xe0, 0xa9, 0xe7, 0x1b, 0x3e, 0xa3, 0x36, 0xfb, + 0x2c, 0xb6, 0xec, 0x7c, 0x57, 0x3a, 0x59, 0xfd, 0xac, 0x00, 0x0e, 0x17, 0x3a, 0x41, 0x3f, 0x80, + 0x45, 0x3e, 0x30, 0x1b, 0x3a, 0xe5, 0xdc, 0x5b, 0x26, 0xd6, 0x8a, 0x04, 0x5a, 0xec, 0xc6, 0x22, + 0x9c, 0xd4, 0x43, 0x8f, 0x61, 0xc5, 0x75, 0xfa, 0x07, 0xc4, 0x26, 0x03, 0xca, 0xc7, 0x8c, 0x23, + 0xf1, 0x07, 0xb2, 0x60, 0x36, 0x0b, 0x9d, 0xf7, 0xc2, 0xc9, 0xf4, 0x28, 0xaf, 0xf2, 0x82, 0x53, + 0x84, 0xfc, 0xb2, 0x28, 0xea, 0x22, 0x48, 0xe4, 0x41, 0x63, 0x28, 0x9f, 0x7b, 0x49, 0xf4, 0x82, + 0xff, 0x5b, 0x76, 0xa6, 0xc9, 0xb0, 0x93, 0x94, 0x65, 0xdc, 0xfd, 0xd3, 0xeb, 0x38, 0xe3, 0xa1, + 0x94, 0xb8, 0xd5, 0xff, 0x1f, 0xe2, 0xa6, 0xfe, 0xbb, 0x0a, 0x37, 0x73, 0xad, 0x12, 0xfd, 0xf8, + 0x1c, 0x86, 0x73, 0xeb, 0xca, 0xd8, 0x4d, 0x6e, 0x40, 0x9f, 0xbd, 0xc4, 0x80, 0xbe, 0x0b, 0xcb, + 0xfa, 0xd0, 0xf3, 0xa8, 0xcd, 0x32, 0xac, 0x26, 0xa2, 0x46, 0x7b, 0x69, 0x31, 0xce, 0xea, 0x17, + 0xb1, 0xab, 0xda, 0x25, 0xd9, 0x55, 0x32, 0x0a, 0x39, 0x21, 0x07, 0x69, 0x97, 0x8f, 0x42, 0x0e, + 0xca, 0x59, 0x7d, 0x3e, 0x1d, 0x04, 0xa8, 0x11, 0xc2, 0x7c, 0x7a, 0x3a, 0x38, 0x49, 0x49, 0x71, + 0x46, 0xbb, 0x80, 0xa9, 0x2c, 0x4c, 0xcb, 0x54, 0x10, 0x49, 0x91, 0x30, 0x10, 0x35, 0xbe, 0x3d, + 0x4d, 0x2e, 0x4f, 0xcd, 0xc2, 0xd4, 0xbf, 0x29, 0xf0, 0x72, 0x69, 0x11, 0xa0, 0xdd, 0xd4, 0x93, + 0xbb, 0x9d, 0x79, 0x72, 0xbf, 0x57, 0x6a, 0x98, 0x78, 0x77, 0xbd, 0x62, 0x6a, 0xf4, 0xc1, 0x74, + 0xd4, 0xa8, 0x60, 0x6e, 0xbf, 0x98, 0x23, 0x75, 0xb6, 0x9f, 0x3e, 0x6b, 0xcd, 0x7c, 0xfd, 0xac, + 0x35, 0xf3, 0xcd, 0xb3, 0xd6, 0xcc, 0x6f, 0x26, 0x2d, 0xe5, 0xe9, 0xa4, 0xa5, 0x7c, 0x3d, 0x69, + 0x29, 0xdf, 0x4c, 0x5a, 0xca, 0x3f, 0x26, 0x2d, 0xe5, 0x77, 0xcf, 0x5b, 0x33, 0x9f, 0xcf, 0x4b, + 0x8f, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x9d, 0x5d, 0x9e, 0x04, 0x8c, 0x1b, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index dcb034dfbd6c..68397a026ba7 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -338,6 +338,27 @@ message StatefulSet { optional StatefulSetStatus status = 3; } +// StatefulSetCondition describes the state of a statefulset at a certain point. +message StatefulSetCondition { + // Type of statefulset condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + // StatefulSetList is a collection of StatefulSets. message StatefulSetList { // +optional @@ -442,6 +463,12 @@ message StatefulSetStatus { // newest ControllerRevision. // +optional optional int32 collisionCount = 9; + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated StatefulSetCondition conditions = 10; } // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet diff --git a/vendor/k8s.io/api/apps/v1beta1/register.go b/vendor/k8s.io/api/apps/v1beta1/register.go index 84789be618d8..5b16819f2f68 100644 --- a/vendor/k8s.io/api/apps/v1beta1/register.go +++ b/vendor/k8s.io/api/apps/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Deployment{}, diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index 63d1c725d8df..dd9e97e10151 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -26,6 +26,7 @@ import ( const ( ControllerRevisionHashLabelKey = "controller-revision-hash" StatefulSetRevisionLabel = ControllerRevisionHashLabelKey + StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" ) // ScaleSpec describes the attributes of a scale subresource @@ -247,6 +248,31 @@ type StatefulSetStatus struct { // newest ControllerRevision. // +optional CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type StatefulSetConditionType string + +// StatefulSetCondition describes the state of a statefulset at a certain point. +type StatefulSetCondition struct { + // Type of statefulset condition. + Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index 72d5e69ce0df..d12baf39f5a8 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -206,6 +206,19 @@ func (StatefulSet) SwaggerDoc() map[string]string { return map_StatefulSet } +var map_StatefulSetCondition = map[string]string{ + "": "StatefulSetCondition describes the state of a statefulset at a certain point.", + "type": "Type of statefulset condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (StatefulSetCondition) SwaggerDoc() map[string]string { + return map_StatefulSetCondition +} + var map_StatefulSetList = map[string]string{ "": "StatefulSetList is a collection of StatefulSets.", } @@ -240,6 +253,7 @@ var map_StatefulSetStatus = map[string]string{ "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a statefulset's current state.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go index 2f046b02057d..cad744ce0bbc 100644 --- a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go @@ -23,105 +23,10 @@ package v1beta1 import ( core_v1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ControllerRevision).DeepCopyInto(out.(*ControllerRevision)) - return nil - }, InType: reflect.TypeOf(&ControllerRevision{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ControllerRevisionList).DeepCopyInto(out.(*ControllerRevisionList)) - return nil - }, InType: reflect.TypeOf(&ControllerRevisionList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Deployment).DeepCopyInto(out.(*Deployment)) - return nil - }, InType: reflect.TypeOf(&Deployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentCondition).DeepCopyInto(out.(*DeploymentCondition)) - return nil - }, InType: reflect.TypeOf(&DeploymentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentList).DeepCopyInto(out.(*DeploymentList)) - return nil - }, InType: reflect.TypeOf(&DeploymentList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentRollback).DeepCopyInto(out.(*DeploymentRollback)) - return nil - }, InType: reflect.TypeOf(&DeploymentRollback{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentSpec).DeepCopyInto(out.(*DeploymentSpec)) - return nil - }, InType: reflect.TypeOf(&DeploymentSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStatus).DeepCopyInto(out.(*DeploymentStatus)) - return nil - }, InType: reflect.TypeOf(&DeploymentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStrategy).DeepCopyInto(out.(*DeploymentStrategy)) - return nil - }, InType: reflect.TypeOf(&DeploymentStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollbackConfig).DeepCopyInto(out.(*RollbackConfig)) - return nil - }, InType: reflect.TypeOf(&RollbackConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDeployment).DeepCopyInto(out.(*RollingUpdateDeployment)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateStatefulSetStrategy).DeepCopyInto(out.(*RollingUpdateStatefulSetStrategy)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateStatefulSetStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Scale).DeepCopyInto(out.(*Scale)) - return nil - }, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleSpec).DeepCopyInto(out.(*ScaleSpec)) - return nil - }, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleStatus).DeepCopyInto(out.(*ScaleStatus)) - return nil - }, InType: reflect.TypeOf(&ScaleStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSet).DeepCopyInto(out.(*StatefulSet)) - return nil - }, InType: reflect.TypeOf(&StatefulSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetList).DeepCopyInto(out.(*StatefulSetList)) - return nil - }, InType: reflect.TypeOf(&StatefulSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetSpec).DeepCopyInto(out.(*StatefulSetSpec)) - return nil - }, InType: reflect.TypeOf(&StatefulSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetStatus).DeepCopyInto(out.(*StatefulSetStatus)) - return nil - }, InType: reflect.TypeOf(&StatefulSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetUpdateStrategy).DeepCopyInto(out.(*StatefulSetUpdateStrategy)) - return nil - }, InType: reflect.TypeOf(&StatefulSetUpdateStrategy{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { *out = *in @@ -591,6 +496,23 @@ func (in *StatefulSet) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. +func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { + if in == nil { + return nil + } + out := new(StatefulSetCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in @@ -698,6 +620,13 @@ func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { **out = **in } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StatefulSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/vendor/k8s.io/api/apps/v1beta2/BUILD b/vendor/k8s.io/api/apps/v1beta2/BUILD index dd256cbd3102..c13a6ff578e1 100644 --- a/vendor/k8s.io/api/apps/v1beta2/BUILD +++ b/vendor/k8s.io/api/apps/v1beta2/BUILD @@ -15,12 +15,12 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/apps/v1beta2", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", diff --git a/vendor/k8s.io/api/apps/v1beta2/doc.go b/vendor/k8s.io/api/apps/v1beta2/doc.go index 0527447bcdf9..ae4a190863b0 100644 --- a/vendor/k8s.io/api/apps/v1beta2/doc.go +++ b/vendor/k8s.io/api/apps/v1beta2/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1beta2 diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index 07d95d8aca5c..2572ab081365 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -28,6 +28,7 @@ limitations under the License. ControllerRevision ControllerRevisionList DaemonSet + DaemonSetCondition DaemonSetList DaemonSetSpec DaemonSetStatus @@ -50,6 +51,7 @@ limitations under the License. ScaleSpec ScaleStatus StatefulSet + StatefulSetCondition StatefulSetList StatefulSetSpec StatefulSetStatus @@ -97,120 +99,129 @@ func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{19} + return fileDescriptorGenerated, []int{20} } func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptorGenerated, []int{21} } func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *StatefulSet) Reset() { *m = StatefulSet{} } func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{28} + return fileDescriptorGenerated, []int{30} } func init() { proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta2.ControllerRevision") proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1beta2.ControllerRevisionList") proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.apps.v1beta2.DaemonSet") + proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.apps.v1beta2.DaemonSetCondition") proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.apps.v1beta2.DaemonSetList") proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.apps.v1beta2.DaemonSetSpec") proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.apps.v1beta2.DaemonSetStatus") @@ -233,6 +244,7 @@ func init() { proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta2.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus") proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta2.StatefulSet") + proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta2.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta2.StatefulSetList") proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta2.StatefulSetSpec") proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta2.StatefulSetStatus") @@ -355,6 +367,48 @@ func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -373,11 +427,11 @@ func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n7, err := m.ListMeta.MarshalTo(dAtA[i:]) + n8, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -412,28 +466,28 @@ func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n8, err := m.Selector.MarshalTo(dAtA[i:]) + n9, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n9 } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n9, err := m.Template.MarshalTo(dAtA[i:]) + n10, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n10, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -489,6 +543,18 @@ func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -515,11 +581,11 @@ func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n11, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n12 } return i, nil } @@ -542,27 +608,27 @@ func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n13 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n13, err := m.Spec.MarshalTo(dAtA[i:]) + n14, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n13 + i += n14 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n14, err := m.Status.MarshalTo(dAtA[i:]) + n15, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n15 return i, nil } @@ -600,19 +666,19 @@ func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n15, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n16 dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n16, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n17 return i, nil } @@ -634,11 +700,11 @@ func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n17, err := m.ListMeta.MarshalTo(dAtA[i:]) + n18, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n18 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -678,28 +744,28 @@ func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n18, err := m.Selector.MarshalTo(dAtA[i:]) + n19, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n19 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n19, err := m.Template.MarshalTo(dAtA[i:]) + n20, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n19 + i += n20 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n20, err := m.Strategy.MarshalTo(dAtA[i:]) + n21, err := m.Strategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n20 + i += n21 dAtA[i] = 0x28 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -800,11 +866,11 @@ func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n21, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n21 + i += n22 } return i, nil } @@ -827,27 +893,27 @@ func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n22, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n22 + i += n23 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n23, err := m.Spec.MarshalTo(dAtA[i:]) + n24, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n23 + i += n24 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n24, err := m.Status.MarshalTo(dAtA[i:]) + n25, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n24 + i += n25 return i, nil } @@ -877,11 +943,11 @@ func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n25, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n25 + i += n26 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -911,11 +977,11 @@ func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n26, err := m.ListMeta.MarshalTo(dAtA[i:]) + n27, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n27 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -955,20 +1021,20 @@ func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n27, err := m.Selector.MarshalTo(dAtA[i:]) + n28, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n28 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n28, err := m.Template.MarshalTo(dAtA[i:]) + n29, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n29 dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -1039,11 +1105,11 @@ func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n29, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n30 } return i, nil } @@ -1067,21 +1133,21 @@ func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n31 } if m.MaxSurge != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n31, err := m.MaxSurge.MarshalTo(dAtA[i:]) + n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n32 } return i, nil } @@ -1127,27 +1193,27 @@ func (m *Scale) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n32, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n33 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n33, err := m.Spec.MarshalTo(dAtA[i:]) + n34, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n33 + i += n34 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n34, err := m.Status.MarshalTo(dAtA[i:]) + n35, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n34 + i += n35 return i, nil } @@ -1237,27 +1303,69 @@ func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n35, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n36, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n36 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n36, err := m.Spec.MarshalTo(dAtA[i:]) + n37, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n37 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n37, err := m.Status.MarshalTo(dAtA[i:]) + n38, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n38 + return i, nil +} + +func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n39, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) return i, nil } @@ -1279,11 +1387,11 @@ func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n38, err := m.ListMeta.MarshalTo(dAtA[i:]) + n40, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n40 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1323,20 +1431,20 @@ func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n39, err := m.Selector.MarshalTo(dAtA[i:]) + n41, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n41 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n40, err := m.Template.MarshalTo(dAtA[i:]) + n42, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n42 if len(m.VolumeClaimTemplates) > 0 { for _, msg := range m.VolumeClaimTemplates { dAtA[i] = 0x22 @@ -1360,11 +1468,11 @@ func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n41, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + n43, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n43 if m.RevisionHistoryLimit != nil { dAtA[i] = 0x40 i++ @@ -1416,6 +1524,18 @@ func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -1442,11 +1562,11 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n42, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n44, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n42 + i += n44 } return i, nil } @@ -1515,6 +1635,22 @@ func (m *DaemonSet) Size() (n int) { return n } +func (m *DaemonSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *DaemonSetList) Size() (n int) { var l int _ = l @@ -1561,6 +1697,12 @@ func (m *DaemonSetStatus) Size() (n int) { if m.CollisionCount != nil { n += 1 + sovGenerated(uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1834,6 +1976,22 @@ func (m *StatefulSet) Size() (n int) { return n } +func (m *StatefulSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *StatefulSetList) Size() (n int) { var l int _ = l @@ -1893,6 +2051,12 @@ func (m *StatefulSetStatus) Size() (n int) { if m.CollisionCount != nil { n += 1 + sovGenerated(uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1956,6 +2120,20 @@ func (this *DaemonSet) String() string { }, "") return s } +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} func (this *DaemonSetList) String() string { if this == nil { return "nil" @@ -1995,6 +2173,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2245,6 +2424,20 @@ func (this *StatefulSet) String() string { }, "") return s } +func (this *StatefulSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} func (this *StatefulSetList) String() string { if this == nil { return "nil" @@ -2286,6 +2479,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2660,13 +2854,209 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -3183,6 +3573,37 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } } m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5969,6 +6390,202 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } return nil } +func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StatefulSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -6554,6 +7171,37 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } } m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, StatefulSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6797,138 +7445,142 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 2119 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x5a, 0xcb, 0x6f, 0x1c, 0xb7, - 0x19, 0xd7, 0xec, 0x43, 0xda, 0xa5, 0x2c, 0xc9, 0xa6, 0x54, 0x49, 0x91, 0xdb, 0x95, 0xb0, 0x09, - 0x1c, 0x39, 0xb6, 0x67, 0x6d, 0xe5, 0x81, 0xc4, 0x06, 0xda, 0x6a, 0xa5, 0xd4, 0x76, 0xa0, 0x57, - 0x28, 0xc9, 0x40, 0x83, 0x16, 0x30, 0xb5, 0x4b, 0xaf, 0x26, 0x9a, 0x17, 0x66, 0x38, 0x5b, 0x2f, - 0x7a, 0xe9, 0xa9, 0x40, 0x81, 0x02, 0xe9, 0xb9, 0xff, 0x44, 0x7b, 0x2a, 0x8a, 0xf6, 0x56, 0x04, - 0x85, 0x2f, 0x05, 0x82, 0x5e, 0x92, 0x93, 0x50, 0x6f, 0x8e, 0x3d, 0xf7, 0x12, 0xa0, 0x40, 0x41, - 0x0e, 0xe7, 0xc1, 0x79, 0x58, 0x23, 0x25, 0xde, 0x14, 0xb9, 0x69, 0xf9, 0xfd, 0xbe, 0x1f, 0x3f, - 0x0e, 0x3f, 0x7e, 0xdf, 0x6f, 0x38, 0x02, 0x3f, 0x3e, 0x79, 0xd7, 0x55, 0x35, 0xab, 0x75, 0xe2, - 0x1d, 0x11, 0xc7, 0x24, 0x94, 0xb8, 0xad, 0x3e, 0x31, 0xbb, 0x96, 0xd3, 0x12, 0x06, 0x6c, 0x6b, - 0x2d, 0x6c, 0xdb, 0x6e, 0xab, 0x7f, 0xe7, 0x88, 0x50, 0xbc, 0xd6, 0xea, 0x11, 0x93, 0x38, 0x98, - 0x92, 0xae, 0x6a, 0x3b, 0x16, 0xb5, 0xe0, 0x82, 0x0f, 0x54, 0xb1, 0xad, 0xa9, 0x0c, 0xa8, 0x0a, - 0xe0, 0xd2, 0xad, 0x9e, 0x46, 0x8f, 0xbd, 0x23, 0xb5, 0x63, 0x19, 0xad, 0x9e, 0xd5, 0xb3, 0x5a, - 0x1c, 0x7f, 0xe4, 0x3d, 0xe1, 0xbf, 0xf8, 0x0f, 0xfe, 0x97, 0xcf, 0xb3, 0xd4, 0x8c, 0x4d, 0xd8, - 0xb1, 0x1c, 0xd2, 0xea, 0xdf, 0x49, 0xce, 0xb5, 0x74, 0x3d, 0x86, 0xb1, 0x2d, 0x5d, 0xeb, 0x0c, - 0x44, 0x58, 0x69, 0xe8, 0x5b, 0x11, 0xd4, 0xc0, 0x9d, 0x63, 0xcd, 0x24, 0xce, 0xa0, 0x65, 0x9f, - 0xf4, 0xd8, 0x80, 0xdb, 0x32, 0x08, 0xc5, 0x59, 0x13, 0xb4, 0xf2, 0xbc, 0x1c, 0xcf, 0xa4, 0x9a, - 0x41, 0x52, 0x0e, 0xef, 0x9c, 0xe5, 0xe0, 0x76, 0x8e, 0x89, 0x81, 0x53, 0x7e, 0x6f, 0xe6, 0xf9, - 0x79, 0x54, 0xd3, 0x5b, 0x9a, 0x49, 0x5d, 0xea, 0x24, 0x9d, 0x9a, 0xff, 0x51, 0x00, 0xdc, 0xb0, - 0x4c, 0xea, 0x58, 0xba, 0x4e, 0x1c, 0x44, 0xfa, 0x9a, 0xab, 0x59, 0x26, 0x7c, 0x0c, 0x6a, 0x6c, - 0x3d, 0x5d, 0x4c, 0xf1, 0xa2, 0xb2, 0xa2, 0xac, 0x4e, 0xae, 0xdd, 0x56, 0xa3, 0x4d, 0x09, 0xe9, - 0x55, 0xfb, 0xa4, 0xc7, 0x06, 0x5c, 0x95, 0xa1, 0xd5, 0xfe, 0x1d, 0x75, 0xf7, 0xe8, 0x63, 0xd2, - 0xa1, 0xdb, 0x84, 0xe2, 0x36, 0x7c, 0x76, 0xba, 0x3c, 0x36, 0x3c, 0x5d, 0x06, 0xd1, 0x18, 0x0a, - 0x59, 0xe1, 0x2e, 0xa8, 0x70, 0xf6, 0x12, 0x67, 0xbf, 0x95, 0xcb, 0x2e, 0x16, 0xad, 0x22, 0xfc, - 0x8b, 0xf7, 0x9f, 0x52, 0x62, 0xb2, 0xf0, 0xda, 0x97, 0x04, 0x75, 0x65, 0x13, 0x53, 0x8c, 0x38, - 0x11, 0xbc, 0x09, 0x6a, 0x8e, 0x08, 0x7f, 0xb1, 0xbc, 0xa2, 0xac, 0x96, 0xdb, 0x97, 0x05, 0xaa, - 0x16, 0x2c, 0x0b, 0x85, 0x88, 0xe6, 0x33, 0x05, 0xcc, 0xa7, 0xd7, 0xbd, 0xa5, 0xb9, 0x14, 0xfe, - 0x2c, 0xb5, 0x76, 0xb5, 0xd8, 0xda, 0x99, 0x37, 0x5f, 0x79, 0x38, 0x71, 0x30, 0x12, 0x5b, 0xf7, - 0x1e, 0xa8, 0x6a, 0x94, 0x18, 0xee, 0x62, 0x69, 0xa5, 0xbc, 0x3a, 0xb9, 0x76, 0x43, 0xcd, 0xc9, - 0x75, 0x35, 0x1d, 0x5d, 0x7b, 0x4a, 0xf0, 0x56, 0x1f, 0x32, 0x06, 0xe4, 0x13, 0x35, 0x7f, 0x53, - 0x02, 0xf5, 0x4d, 0x4c, 0x0c, 0xcb, 0xdc, 0x27, 0x74, 0x04, 0x3b, 0xf7, 0x00, 0x54, 0x5c, 0x9b, - 0x74, 0xc4, 0xce, 0x5d, 0xcb, 0x5d, 0x40, 0x18, 0xd3, 0xbe, 0x4d, 0x3a, 0xd1, 0x96, 0xb1, 0x5f, - 0x88, 0x33, 0xc0, 0x3d, 0x30, 0xee, 0x52, 0x4c, 0x3d, 0x97, 0x6f, 0xd8, 0xe4, 0xda, 0x6a, 0x01, - 0x2e, 0x8e, 0x6f, 0x4f, 0x0b, 0xb6, 0x71, 0xff, 0x37, 0x12, 0x3c, 0xcd, 0x3f, 0x29, 0x60, 0x2a, - 0xc4, 0x8e, 0x60, 0x37, 0xef, 0xcb, 0xbb, 0xd9, 0x3c, 0x7b, 0x01, 0x39, 0x9b, 0xf8, 0x69, 0x39, - 0x16, 0x38, 0x7b, 0x44, 0xf0, 0xe7, 0xa0, 0xe6, 0x12, 0x9d, 0x74, 0xa8, 0xe5, 0x88, 0xc0, 0xdf, - 0x2c, 0x18, 0x38, 0x3e, 0x22, 0xfa, 0xbe, 0x70, 0x6d, 0x5f, 0x62, 0x91, 0x07, 0xbf, 0x50, 0x48, - 0x09, 0x3f, 0x04, 0x35, 0x4a, 0x0c, 0x5b, 0xc7, 0x94, 0x88, 0x9d, 0x7c, 0x35, 0x1e, 0x3c, 0x2b, - 0x97, 0x8c, 0x6c, 0xcf, 0xea, 0x1e, 0x08, 0x18, 0xdf, 0xc6, 0xf0, 0x61, 0x04, 0xa3, 0x28, 0xa4, - 0x81, 0x36, 0x98, 0xf6, 0xec, 0x2e, 0x43, 0x52, 0x56, 0x62, 0x7a, 0x03, 0xb1, 0xad, 0xb7, 0xcf, - 0x7e, 0x2a, 0x87, 0x92, 0x5f, 0x7b, 0x5e, 0xcc, 0x32, 0x2d, 0x8f, 0xa3, 0x04, 0x3f, 0x5c, 0x07, - 0x33, 0x86, 0x66, 0x22, 0x82, 0xbb, 0x83, 0x7d, 0xd2, 0xb1, 0xcc, 0xae, 0xbb, 0x58, 0x59, 0x51, - 0x56, 0xab, 0xed, 0x05, 0x41, 0x30, 0xb3, 0x2d, 0x9b, 0x51, 0x12, 0x0f, 0xb7, 0xc0, 0x5c, 0x50, - 0x14, 0x1e, 0x68, 0x2e, 0xb5, 0x9c, 0xc1, 0x96, 0x66, 0x68, 0x74, 0x71, 0x9c, 0xf3, 0x2c, 0x0e, - 0x4f, 0x97, 0xe7, 0x50, 0x86, 0x1d, 0x65, 0x7a, 0x35, 0xff, 0x58, 0x05, 0x33, 0x89, 0x5c, 0x85, - 0x8f, 0xc0, 0x7c, 0xc7, 0x73, 0x1c, 0x62, 0xd2, 0x1d, 0xcf, 0x38, 0x22, 0xce, 0x7e, 0xe7, 0x98, - 0x74, 0x3d, 0x9d, 0x74, 0xf9, 0xb6, 0x56, 0xdb, 0x0d, 0x11, 0xeb, 0xfc, 0x46, 0x26, 0x0a, 0xe5, - 0x78, 0xc3, 0x0f, 0x00, 0x34, 0xf9, 0xd0, 0xb6, 0xe6, 0xba, 0x21, 0x67, 0x89, 0x73, 0x2e, 0x09, - 0x4e, 0xb8, 0x93, 0x42, 0xa0, 0x0c, 0x2f, 0x16, 0x63, 0x97, 0xb8, 0x9a, 0x43, 0xba, 0xc9, 0x18, - 0xcb, 0x72, 0x8c, 0x9b, 0x99, 0x28, 0x94, 0xe3, 0x0d, 0xdf, 0x06, 0x93, 0xfe, 0x6c, 0xfc, 0x99, - 0x8b, 0xcd, 0x99, 0x15, 0x64, 0x93, 0x3b, 0x91, 0x09, 0xc5, 0x71, 0x6c, 0x69, 0xd6, 0x91, 0x4b, - 0x9c, 0x3e, 0xe9, 0xde, 0xf7, 0x1b, 0x16, 0xab, 0xea, 0x55, 0x5e, 0xd5, 0xc3, 0xa5, 0xed, 0xa6, - 0x10, 0x28, 0xc3, 0x8b, 0x2d, 0xcd, 0xcf, 0x9a, 0xd4, 0xd2, 0xc6, 0xe5, 0xa5, 0x1d, 0x66, 0xa2, - 0x50, 0x8e, 0x37, 0xcb, 0x3d, 0x3f, 0xe4, 0xf5, 0x3e, 0xd6, 0x74, 0x7c, 0xa4, 0x93, 0xc5, 0x09, - 0x39, 0xf7, 0x76, 0x64, 0x33, 0x4a, 0xe2, 0xe1, 0x7d, 0x70, 0xc5, 0x1f, 0x3a, 0x34, 0x71, 0x48, - 0x52, 0xe3, 0x24, 0xaf, 0x08, 0x92, 0x2b, 0x3b, 0x49, 0x00, 0x4a, 0xfb, 0xc0, 0xbb, 0x60, 0xba, - 0x63, 0xe9, 0x3a, 0xcf, 0xc7, 0x0d, 0xcb, 0x33, 0xe9, 0x62, 0x9d, 0xb3, 0x40, 0x76, 0x86, 0x36, - 0x24, 0x0b, 0x4a, 0x20, 0x9b, 0x9f, 0x2a, 0x60, 0x21, 0xe7, 0x1c, 0xc2, 0x1f, 0x81, 0x0a, 0x1d, - 0xd8, 0x84, 0x27, 0x6a, 0xbd, 0x7d, 0x23, 0x28, 0xe1, 0x07, 0x03, 0x9b, 0x7c, 0x75, 0xba, 0x7c, - 0x35, 0xc7, 0x8d, 0x99, 0x11, 0x77, 0x84, 0xc7, 0x60, 0x8a, 0xf5, 0x30, 0xcd, 0xec, 0xf9, 0x10, - 0x51, 0x6a, 0x5a, 0xb9, 0x15, 0x01, 0xc5, 0xd1, 0x51, 0xd1, 0xbc, 0x32, 0x3c, 0x5d, 0x9e, 0x92, - 0x6c, 0x48, 0x26, 0x6e, 0xfe, 0xb6, 0x04, 0xc0, 0x26, 0xb1, 0x75, 0x6b, 0x60, 0x10, 0x73, 0x14, - 0x6d, 0xf0, 0xa1, 0xd4, 0x06, 0x5f, 0xcf, 0xaf, 0x71, 0x61, 0x50, 0xb9, 0x7d, 0xf0, 0xc3, 0x44, - 0x1f, 0xbc, 0x5e, 0x84, 0xec, 0xc5, 0x8d, 0xf0, 0xf3, 0x32, 0x98, 0x8d, 0xc0, 0x1b, 0x96, 0xd9, - 0xd5, 0xf8, 0x69, 0xb8, 0x27, 0xed, 0xe8, 0xeb, 0x89, 0x1d, 0x5d, 0xc8, 0x70, 0x89, 0xed, 0xe6, - 0x56, 0x18, 0x67, 0x89, 0xbb, 0xbf, 0x25, 0x4f, 0xfe, 0xd5, 0xe9, 0x72, 0x86, 0xe2, 0x56, 0x43, - 0x26, 0x39, 0x44, 0x78, 0x0d, 0x8c, 0x3b, 0x04, 0xbb, 0x96, 0xc9, 0xcb, 0x42, 0x3d, 0x5a, 0x0a, - 0xe2, 0xa3, 0x48, 0x58, 0xe1, 0x75, 0x30, 0x61, 0x10, 0xd7, 0xc5, 0x3d, 0xc2, 0x2b, 0x40, 0xbd, - 0x3d, 0x23, 0x80, 0x13, 0xdb, 0xfe, 0x30, 0x0a, 0xec, 0xf0, 0x63, 0x30, 0xad, 0x63, 0x57, 0xa4, - 0xe3, 0x81, 0x66, 0x10, 0x7e, 0xc6, 0x27, 0xd7, 0xde, 0x28, 0xb6, 0xf7, 0xcc, 0x23, 0xea, 0x3d, - 0x5b, 0x12, 0x13, 0x4a, 0x30, 0xc3, 0x3e, 0x80, 0x6c, 0xe4, 0xc0, 0xc1, 0xa6, 0xeb, 0x3f, 0x28, - 0x36, 0xdf, 0xc4, 0xb9, 0xe7, 0x0b, 0xeb, 0xd9, 0x56, 0x8a, 0x0d, 0x65, 0xcc, 0xd0, 0xfc, 0xb3, - 0x02, 0xa6, 0xa3, 0x6d, 0x1a, 0x81, 0xc6, 0x79, 0x20, 0x6b, 0x9c, 0x57, 0x0b, 0x24, 0x67, 0x8e, - 0xc8, 0xf9, 0xbc, 0x12, 0x0f, 0x9d, 0xab, 0x9c, 0x55, 0xa6, 0xda, 0x6d, 0x5d, 0xeb, 0x60, 0x57, - 0xb4, 0xc3, 0x4b, 0xbe, 0x62, 0xf7, 0xc7, 0x50, 0x68, 0x95, 0xf4, 0x50, 0xe9, 0xe5, 0xea, 0xa1, - 0xf2, 0x37, 0xa3, 0x87, 0x7e, 0x0a, 0x6a, 0x6e, 0xa0, 0x84, 0x2a, 0x9c, 0xf2, 0x46, 0xa1, 0x83, - 0x2d, 0x44, 0x50, 0x48, 0x1d, 0xca, 0x9f, 0x90, 0x2e, 0x4b, 0xf8, 0x54, 0xbf, 0x4d, 0xe1, 0xc3, - 0x0e, 0xb3, 0x8d, 0x3d, 0x97, 0x74, 0xf9, 0x09, 0xa8, 0x45, 0x87, 0x79, 0x8f, 0x8f, 0x22, 0x61, - 0x85, 0x87, 0x60, 0xc1, 0x76, 0xac, 0x9e, 0x43, 0x5c, 0x77, 0x93, 0xe0, 0xae, 0xae, 0x99, 0x24, - 0x58, 0x80, 0xdf, 0xb2, 0xae, 0x0e, 0x4f, 0x97, 0x17, 0xf6, 0xb2, 0x21, 0x28, 0xcf, 0xb7, 0xf9, - 0xb7, 0x0a, 0xb8, 0x9c, 0xac, 0x8d, 0x39, 0x2a, 0x42, 0xb9, 0x90, 0x8a, 0xb8, 0x19, 0xcb, 0x53, - 0x5f, 0x62, 0xc5, 0xde, 0x2e, 0x53, 0xb9, 0xba, 0x0e, 0x66, 0x84, 0x6a, 0x08, 0x8c, 0x42, 0x47, - 0x85, 0xdb, 0x73, 0x28, 0x9b, 0x51, 0x12, 0xcf, 0xb4, 0x41, 0xd4, 0xf2, 0x03, 0x92, 0x8a, 0xac, - 0x0d, 0xd6, 0x93, 0x00, 0x94, 0xf6, 0x81, 0xdb, 0x60, 0xd6, 0x33, 0xd3, 0x54, 0x7e, 0xba, 0x5c, - 0x15, 0x54, 0xb3, 0x87, 0x69, 0x08, 0xca, 0xf2, 0x83, 0x8f, 0x01, 0xe8, 0x04, 0x05, 0xdd, 0x5d, - 0x1c, 0xe7, 0x25, 0xe1, 0x66, 0x81, 0xb4, 0x0e, 0xbb, 0x40, 0xd4, 0x56, 0xc3, 0x21, 0x17, 0xc5, - 0x38, 0xe1, 0x3d, 0x30, 0xe5, 0x70, 0x49, 0x18, 0x84, 0xea, 0xcb, 0xaa, 0xef, 0x09, 0xb7, 0x29, - 0x14, 0x37, 0x22, 0x19, 0x9b, 0xa1, 0x84, 0x6a, 0x85, 0x95, 0xd0, 0x5f, 0x15, 0x00, 0xd3, 0xe7, - 0x10, 0xde, 0x95, 0x5a, 0xe6, 0xb5, 0x44, 0xcb, 0x9c, 0x4f, 0x7b, 0xc4, 0x3a, 0xa6, 0x96, 0xad, - 0x7f, 0x6e, 0x17, 0xd4, 0x3f, 0x51, 0x41, 0x2d, 0x26, 0x80, 0xc4, 0x63, 0x18, 0xcd, 0x3d, 0x40, - 0x51, 0x01, 0x14, 0x05, 0xf5, 0x0d, 0x08, 0xa0, 0x18, 0xd9, 0x8b, 0x05, 0xd0, 0xbf, 0x4b, 0x60, - 0x36, 0x02, 0x17, 0x16, 0x40, 0x19, 0x2e, 0x2f, 0x4d, 0x00, 0x65, 0x2b, 0x88, 0xf2, 0xcb, 0x56, - 0x10, 0x2f, 0x41, 0x78, 0x71, 0x51, 0x12, 0x3d, 0xba, 0xff, 0x27, 0x51, 0x12, 0x45, 0x95, 0x23, - 0x4a, 0xfe, 0x50, 0x8a, 0x87, 0xfe, 0x9d, 0x17, 0x25, 0x5f, 0xff, 0xca, 0xa4, 0xf9, 0xf7, 0x32, - 0xb8, 0x9c, 0x3c, 0x87, 0x52, 0x83, 0x54, 0xce, 0x6c, 0x90, 0x7b, 0x60, 0xee, 0x89, 0xa7, 0xeb, - 0x03, 0xfe, 0x18, 0x62, 0x5d, 0xd2, 0x6f, 0xad, 0xdf, 0x17, 0x9e, 0x73, 0x3f, 0xc9, 0xc0, 0xa0, - 0x4c, 0xcf, 0x9c, 0x66, 0x5f, 0xbe, 0x50, 0xb3, 0x4f, 0x75, 0xa0, 0xca, 0x39, 0x3a, 0x50, 0x66, - 0xe3, 0xae, 0x5e, 0xa0, 0x71, 0x9f, 0xaf, 0xd3, 0x66, 0x14, 0xae, 0xb3, 0x3a, 0x6d, 0xf3, 0xd7, - 0x0a, 0x98, 0xcf, 0x7e, 0xe1, 0x86, 0x3a, 0x98, 0x36, 0xf0, 0xd3, 0xf8, 0xbd, 0xc4, 0x59, 0x4d, - 0xc4, 0xa3, 0x9a, 0xae, 0xfa, 0x5f, 0x19, 0xd4, 0x87, 0x26, 0xdd, 0x75, 0xf6, 0xa9, 0xa3, 0x99, - 0x3d, 0xbf, 0xf3, 0x6e, 0x4b, 0x5c, 0x28, 0xc1, 0xdd, 0xfc, 0x52, 0x01, 0x0b, 0x39, 0x9d, 0x6f, - 0xb4, 0x91, 0xc0, 0x8f, 0x40, 0xcd, 0xc0, 0x4f, 0xf7, 0x3d, 0xa7, 0x97, 0xd5, 0xab, 0x8b, 0xcd, - 0xc3, 0x4f, 0xf3, 0xb6, 0x60, 0x41, 0x21, 0x5f, 0x73, 0x17, 0xac, 0x48, 0x8b, 0x64, 0x27, 0x87, - 0x3c, 0xf1, 0x74, 0x7e, 0x88, 0x84, 0xd8, 0xb8, 0x01, 0xea, 0x36, 0x76, 0xa8, 0x16, 0x4a, 0xd5, - 0x6a, 0x7b, 0x6a, 0x78, 0xba, 0x5c, 0xdf, 0x0b, 0x06, 0x51, 0x64, 0x6f, 0xfe, 0x57, 0x01, 0xd5, - 0xfd, 0x0e, 0xd6, 0xc9, 0x08, 0xba, 0xfd, 0xa6, 0xd4, 0xed, 0xf3, 0x2f, 0xba, 0x79, 0x3c, 0xb9, - 0x8d, 0x7e, 0x2b, 0xd1, 0xe8, 0x5f, 0x3b, 0x83, 0xe7, 0xc5, 0x3d, 0xfe, 0x3d, 0x50, 0x0f, 0xa7, - 0x3b, 0x5f, 0x01, 0x6a, 0xfe, 0xbe, 0x04, 0x26, 0x63, 0x53, 0x9c, 0xb3, 0x7c, 0x3d, 0x96, 0xca, - 0x3e, 0x3b, 0x98, 0x6b, 0x45, 0x16, 0xa2, 0x06, 0x25, 0xfe, 0x7d, 0x93, 0x3a, 0xf1, 0x17, 0xbc, - 0x74, 0xe5, 0xff, 0x21, 0x98, 0xa6, 0xd8, 0xe9, 0x11, 0x1a, 0xd8, 0xf8, 0x03, 0xab, 0x47, 0xb7, - 0x13, 0x07, 0x92, 0x15, 0x25, 0xd0, 0x4b, 0xf7, 0xc0, 0x94, 0x34, 0x19, 0xbc, 0x0c, 0xca, 0x27, - 0x64, 0xe0, 0xcb, 0x1e, 0xc4, 0xfe, 0x84, 0x73, 0xa0, 0xda, 0xc7, 0xba, 0xe7, 0xe7, 0x79, 0x1d, - 0xf9, 0x3f, 0xee, 0x96, 0xde, 0x55, 0x9a, 0x9f, 0xb0, 0x87, 0x13, 0x25, 0xe7, 0x08, 0xb2, 0xeb, - 0x03, 0x29, 0xbb, 0xf2, 0xbf, 0x03, 0xc5, 0x8f, 0x4c, 0x5e, 0x8e, 0xa1, 0x44, 0x8e, 0xbd, 0x51, - 0x88, 0xed, 0xc5, 0x99, 0xf6, 0x17, 0x05, 0xcc, 0xc4, 0xd0, 0x23, 0x10, 0x38, 0x0f, 0x65, 0x81, - 0xf3, 0x5a, 0x91, 0x45, 0xe4, 0x28, 0x9c, 0x7f, 0x54, 0xa5, 0xe0, 0xbf, 0xf3, 0x12, 0xe7, 0x97, - 0x60, 0xae, 0x6f, 0xe9, 0x9e, 0x41, 0x36, 0x74, 0xac, 0x19, 0x01, 0x80, 0x75, 0xf1, 0x72, 0xf2, - 0xdd, 0x22, 0xa4, 0x27, 0x8e, 0xab, 0xb9, 0x94, 0x98, 0xf4, 0x51, 0xe4, 0x19, 0xe9, 0x90, 0x47, - 0x19, 0x74, 0x28, 0x73, 0x12, 0xf8, 0x36, 0x98, 0x64, 0x7a, 0x42, 0xeb, 0x90, 0x1d, 0x6c, 0x04, - 0xc2, 0x39, 0xfc, 0xe2, 0xb1, 0x1f, 0x99, 0x50, 0x1c, 0x07, 0x8f, 0xc1, 0xac, 0x6d, 0x75, 0xb7, - 0xb1, 0x89, 0x7b, 0x84, 0xb5, 0xbd, 0x3d, 0xfe, 0xaf, 0x08, 0xfc, 0x32, 0xa6, 0xde, 0x7e, 0x27, - 0x78, 0x4b, 0xdf, 0x4b, 0x43, 0xd8, 0x4b, 0x4b, 0xc6, 0x30, 0x7f, 0x69, 0xc9, 0xa2, 0x84, 0x4e, - 0xea, 0x2b, 0x9d, 0x7f, 0x67, 0xb9, 0x56, 0x24, 0xc3, 0x2e, 0xf8, 0x9d, 0x2e, 0xef, 0xae, 0xa9, - 0x76, 0xa1, 0x8f, 0x6c, 0x9f, 0x54, 0xc0, 0x95, 0xd4, 0xd1, 0xfd, 0x16, 0x6f, 0x7b, 0x52, 0x72, - 0xb1, 0x7c, 0x0e, 0xb9, 0xb8, 0x0e, 0x66, 0xc4, 0xf7, 0xbd, 0x84, 0xda, 0x0c, 0xf5, 0xf8, 0x86, - 0x6c, 0x46, 0x49, 0x7c, 0xd6, 0x6d, 0x53, 0xf5, 0x9c, 0xb7, 0x4d, 0xf1, 0x28, 0xc4, 0xff, 0x50, - 0xf8, 0xa9, 0x97, 0x8e, 0x42, 0xfc, 0x2b, 0x45, 0x12, 0xcf, 0x3a, 0x96, 0xcf, 0x1a, 0x32, 0x4c, - 0xc8, 0x1d, 0xeb, 0x50, 0xb2, 0xa2, 0x04, 0xfa, 0x6b, 0x7d, 0xc3, 0xfa, 0xa7, 0x02, 0x5e, 0xc9, - 0xcd, 0x52, 0xb8, 0x2e, 0xbd, 0xf2, 0xdf, 0x4a, 0xbc, 0xf2, 0xff, 0x20, 0xd7, 0x31, 0xf6, 0xe2, - 0xef, 0x64, 0xdf, 0xe3, 0xbc, 0x57, 0xec, 0x1e, 0x27, 0x43, 0xe8, 0x9d, 0x7d, 0xa1, 0xd3, 0xbe, - 0xf5, 0xec, 0x79, 0x63, 0xec, 0xb3, 0xe7, 0x8d, 0xb1, 0x2f, 0x9e, 0x37, 0xc6, 0x7e, 0x35, 0x6c, - 0x28, 0xcf, 0x86, 0x0d, 0xe5, 0xb3, 0x61, 0x43, 0xf9, 0x62, 0xd8, 0x50, 0xfe, 0x35, 0x6c, 0x28, - 0xbf, 0xfb, 0xb2, 0x31, 0xf6, 0xd1, 0x84, 0x98, 0xf1, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe8, - 0x1d, 0x40, 0xdd, 0x77, 0x25, 0x00, 0x00, + // 2186 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcb, 0x6f, 0x1c, 0xb7, + 0x19, 0xf7, 0xec, 0x43, 0x5a, 0x51, 0x96, 0x64, 0x53, 0xaa, 0xb4, 0x91, 0xdb, 0x95, 0xb1, 0x09, + 0x1c, 0x39, 0xb6, 0x66, 0x6d, 0xe5, 0x81, 0xc4, 0x2e, 0xda, 0x6a, 0xa5, 0xd4, 0x76, 0xa0, 0x57, + 0x28, 0xcb, 0x40, 0x83, 0x16, 0x35, 0xb5, 0x4b, 0xaf, 0x26, 0x9a, 0x17, 0x66, 0x38, 0x5b, 0x2f, + 0x7a, 0xe9, 0xa9, 0x40, 0x81, 0x02, 0x6d, 0xaf, 0xfd, 0x27, 0x7a, 0x2b, 0x8a, 0xf6, 0x56, 0x04, + 0x85, 0x2f, 0x05, 0x82, 0x5e, 0x92, 0x93, 0x50, 0x6f, 0x4e, 0x45, 0xd1, 0x4b, 0x81, 0x5e, 0x02, + 0x14, 0x28, 0xc8, 0xe1, 0x3c, 0x38, 0x0f, 0xef, 0x48, 0xb1, 0x95, 0x22, 0xc8, 0x6d, 0x87, 0xfc, + 0x7d, 0x3f, 0x7e, 0x24, 0xbf, 0x8f, 0xdf, 0x6f, 0x38, 0x0b, 0xbe, 0x77, 0xf4, 0xb6, 0xab, 0x6a, + 0x56, 0xeb, 0xc8, 0x3b, 0x20, 0x8e, 0x49, 0x28, 0x71, 0x5b, 0x7d, 0x62, 0x76, 0x2d, 0xa7, 0x25, + 0x3a, 0xb0, 0xad, 0xb5, 0xb0, 0x6d, 0xbb, 0xad, 0xfe, 0xcd, 0x03, 0x42, 0xf1, 0x6a, 0xab, 0x47, + 0x4c, 0xe2, 0x60, 0x4a, 0xba, 0xaa, 0xed, 0x58, 0xd4, 0x82, 0x0b, 0x3e, 0x50, 0xc5, 0xb6, 0xa6, + 0x32, 0xa0, 0x2a, 0x80, 0x8b, 0x2b, 0x3d, 0x8d, 0x1e, 0x7a, 0x07, 0x6a, 0xc7, 0x32, 0x5a, 0x3d, + 0xab, 0x67, 0xb5, 0x38, 0xfe, 0xc0, 0x7b, 0xc4, 0x9f, 0xf8, 0x03, 0xff, 0xe5, 0xf3, 0x2c, 0x36, + 0x63, 0x03, 0x76, 0x2c, 0x87, 0xb4, 0xfa, 0x37, 0x93, 0x63, 0x2d, 0x5e, 0x8d, 0x61, 0x6c, 0x4b, + 0xd7, 0x3a, 0x03, 0xe1, 0x56, 0x1a, 0xfa, 0x46, 0x04, 0x35, 0x70, 0xe7, 0x50, 0x33, 0x89, 0x33, + 0x68, 0xd9, 0x47, 0x3d, 0xd6, 0xe0, 0xb6, 0x0c, 0x42, 0x71, 0xd6, 0x00, 0xad, 0x3c, 0x2b, 0xc7, + 0x33, 0xa9, 0x66, 0x90, 0x94, 0xc1, 0x5b, 0xa3, 0x0c, 0xdc, 0xce, 0x21, 0x31, 0x70, 0xca, 0xee, + 0xf5, 0x3c, 0x3b, 0x8f, 0x6a, 0x7a, 0x4b, 0x33, 0xa9, 0x4b, 0x9d, 0xa4, 0x51, 0xf3, 0x3f, 0x0a, + 0x80, 0xeb, 0x96, 0x49, 0x1d, 0x4b, 0xd7, 0x89, 0x83, 0x48, 0x5f, 0x73, 0x35, 0xcb, 0x84, 0x0f, + 0x41, 0x8d, 0xcd, 0xa7, 0x8b, 0x29, 0xae, 0x2b, 0x97, 0x95, 0xe5, 0xc9, 0xd5, 0x1b, 0x6a, 0xb4, + 0x29, 0x21, 0xbd, 0x6a, 0x1f, 0xf5, 0x58, 0x83, 0xab, 0x32, 0xb4, 0xda, 0xbf, 0xa9, 0xee, 0x1c, + 0x7c, 0x48, 0x3a, 0x74, 0x8b, 0x50, 0xdc, 0x86, 0x4f, 0x8e, 0x97, 0xce, 0x0d, 0x8f, 0x97, 0x40, + 0xd4, 0x86, 0x42, 0x56, 0xb8, 0x03, 0x2a, 0x9c, 0xbd, 0xc4, 0xd9, 0x57, 0x72, 0xd9, 0xc5, 0xa4, + 0x55, 0x84, 0x7f, 0xf2, 0xee, 0x63, 0x4a, 0x4c, 0xe6, 0x5e, 0xfb, 0xbc, 0xa0, 0xae, 0x6c, 0x60, + 0x8a, 0x11, 0x27, 0x82, 0xd7, 0x41, 0xcd, 0x11, 0xee, 0xd7, 0xcb, 0x97, 0x95, 0xe5, 0x72, 0xfb, + 0x82, 0x40, 0xd5, 0x82, 0x69, 0xa1, 0x10, 0xd1, 0x7c, 0xa2, 0x80, 0xf9, 0xf4, 0xbc, 0x37, 0x35, + 0x97, 0xc2, 0x1f, 0xa6, 0xe6, 0xae, 0x16, 0x9b, 0x3b, 0xb3, 0xe6, 0x33, 0x0f, 0x07, 0x0e, 0x5a, + 0x62, 0xf3, 0xde, 0x05, 0x55, 0x8d, 0x12, 0xc3, 0xad, 0x97, 0x2e, 0x97, 0x97, 0x27, 0x57, 0xaf, + 0xa9, 0x39, 0xb1, 0xae, 0xa6, 0xbd, 0x6b, 0x4f, 0x09, 0xde, 0xea, 0x3d, 0xc6, 0x80, 0x7c, 0xa2, + 0xe6, 0x2f, 0x4a, 0x60, 0x62, 0x03, 0x13, 0xc3, 0x32, 0xf7, 0x08, 0x3d, 0x83, 0x9d, 0xbb, 0x0b, + 0x2a, 0xae, 0x4d, 0x3a, 0x62, 0xe7, 0xae, 0xe4, 0x4e, 0x20, 0xf4, 0x69, 0xcf, 0x26, 0x9d, 0x68, + 0xcb, 0xd8, 0x13, 0xe2, 0x0c, 0x70, 0x17, 0x8c, 0xb9, 0x14, 0x53, 0xcf, 0xe5, 0x1b, 0x36, 0xb9, + 0xba, 0x5c, 0x80, 0x8b, 0xe3, 0xdb, 0xd3, 0x82, 0x6d, 0xcc, 0x7f, 0x46, 0x82, 0xa7, 0xf9, 0x8f, + 0x12, 0x80, 0x21, 0x76, 0xdd, 0x32, 0xbb, 0x1a, 0x65, 0xe1, 0x7c, 0x0b, 0x54, 0xe8, 0xc0, 0x26, + 0x7c, 0x41, 0x26, 0xda, 0x57, 0x02, 0x57, 0xee, 0x0f, 0x6c, 0xf2, 0xf9, 0xf1, 0xd2, 0x7c, 0xda, + 0x82, 0xf5, 0x20, 0x6e, 0x03, 0x37, 0x43, 0x27, 0x4b, 0xdc, 0xfa, 0x0d, 0x79, 0xe8, 0xcf, 0x8f, + 0x97, 0x32, 0x8e, 0x19, 0x35, 0x64, 0x92, 0x1d, 0x84, 0x7d, 0x00, 0x75, 0xec, 0xd2, 0xfb, 0x0e, + 0x36, 0x5d, 0x7f, 0x24, 0xcd, 0x20, 0x62, 0xfa, 0xaf, 0x15, 0xdb, 0x28, 0x66, 0xd1, 0x5e, 0x14, + 0x5e, 0xc0, 0xcd, 0x14, 0x1b, 0xca, 0x18, 0x01, 0x5e, 0x01, 0x63, 0x0e, 0xc1, 0xae, 0x65, 0xd6, + 0x2b, 0x7c, 0x16, 0xe1, 0x02, 0x22, 0xde, 0x8a, 0x44, 0x2f, 0xbc, 0x0a, 0xc6, 0x0d, 0xe2, 0xba, + 0xb8, 0x47, 0xea, 0x55, 0x0e, 0x9c, 0x11, 0xc0, 0xf1, 0x2d, 0xbf, 0x19, 0x05, 0xfd, 0xcd, 0xdf, + 0x2b, 0x60, 0x2a, 0x5c, 0xb9, 0x33, 0xc8, 0x9c, 0x3b, 0x72, 0xe6, 0x34, 0x47, 0x07, 0x4b, 0x4e, + 0xc2, 0x7c, 0x54, 0x8e, 0x39, 0xce, 0xc2, 0x11, 0xfe, 0x08, 0xd4, 0x5c, 0xa2, 0x93, 0x0e, 0xb5, + 0x1c, 0xe1, 0xf8, 0xeb, 0x05, 0x1d, 0xc7, 0x07, 0x44, 0xdf, 0x13, 0xa6, 0xed, 0xf3, 0xcc, 0xf3, + 0xe0, 0x09, 0x85, 0x94, 0xf0, 0x7d, 0x50, 0xa3, 0xc4, 0xb0, 0x75, 0x4c, 0x89, 0xc8, 0x9a, 0x97, + 0xe3, 0xce, 0xb3, 0x98, 0x61, 0x64, 0xbb, 0x56, 0xf7, 0xbe, 0x80, 0xf1, 0x94, 0x09, 0x17, 0x23, + 0x68, 0x45, 0x21, 0x0d, 0xb4, 0xc1, 0xb4, 0x67, 0x77, 0x19, 0x92, 0xb2, 0xe3, 0xbc, 0x37, 0x10, + 0x31, 0x74, 0x63, 0xf4, 0xaa, 0xec, 0x4b, 0x76, 0xed, 0x79, 0x31, 0xca, 0xb4, 0xdc, 0x8e, 0x12, + 0xfc, 0x70, 0x0d, 0xcc, 0x18, 0x9a, 0x89, 0x08, 0xee, 0x0e, 0xf6, 0x48, 0xc7, 0x32, 0xbb, 0x2e, + 0x0f, 0xa5, 0x6a, 0x7b, 0x41, 0x10, 0xcc, 0x6c, 0xc9, 0xdd, 0x28, 0x89, 0x87, 0x9b, 0x60, 0x2e, + 0x38, 0x80, 0xef, 0x6a, 0x2e, 0xb5, 0x9c, 0xc1, 0xa6, 0x66, 0x68, 0xb4, 0x3e, 0xc6, 0x79, 0xea, + 0xc3, 0xe3, 0xa5, 0x39, 0x94, 0xd1, 0x8f, 0x32, 0xad, 0x9a, 0xbf, 0x19, 0x03, 0x33, 0x89, 0x73, + 0x01, 0x3e, 0x00, 0xf3, 0x1d, 0xcf, 0x71, 0x88, 0x49, 0xb7, 0x3d, 0xe3, 0x80, 0x38, 0x7b, 0x9d, + 0x43, 0xd2, 0xf5, 0x74, 0xd2, 0xe5, 0xdb, 0x5a, 0x6d, 0x37, 0x84, 0xaf, 0xf3, 0xeb, 0x99, 0x28, + 0x94, 0x63, 0x0d, 0xdf, 0x03, 0xd0, 0xe4, 0x4d, 0x5b, 0x9a, 0xeb, 0x86, 0x9c, 0x25, 0xce, 0x19, + 0xa6, 0xe2, 0x76, 0x0a, 0x81, 0x32, 0xac, 0x98, 0x8f, 0x5d, 0xe2, 0x6a, 0x0e, 0xe9, 0x26, 0x7d, + 0x2c, 0xcb, 0x3e, 0x6e, 0x64, 0xa2, 0x50, 0x8e, 0x35, 0x7c, 0x13, 0x4c, 0xfa, 0xa3, 0xf1, 0x35, + 0x17, 0x9b, 0x33, 0x2b, 0xc8, 0x26, 0xb7, 0xa3, 0x2e, 0x14, 0xc7, 0xb1, 0xa9, 0x59, 0x07, 0x2e, + 0x71, 0xfa, 0xa4, 0x7b, 0xc7, 0x17, 0x07, 0xac, 0x82, 0x56, 0x79, 0x05, 0x0d, 0xa7, 0xb6, 0x93, + 0x42, 0xa0, 0x0c, 0x2b, 0x36, 0x35, 0x3f, 0x6a, 0x52, 0x53, 0x1b, 0x93, 0xa7, 0xb6, 0x9f, 0x89, + 0x42, 0x39, 0xd6, 0x2c, 0xf6, 0x7c, 0x97, 0xd7, 0xfa, 0x58, 0xd3, 0xf1, 0x81, 0x4e, 0xea, 0xe3, + 0x72, 0xec, 0x6d, 0xcb, 0xdd, 0x28, 0x89, 0x87, 0x77, 0xc0, 0x45, 0xbf, 0x69, 0xdf, 0xc4, 0x21, + 0x49, 0x8d, 0x93, 0xbc, 0x24, 0x48, 0x2e, 0x6e, 0x27, 0x01, 0x28, 0x6d, 0x03, 0x6f, 0x81, 0xe9, + 0x8e, 0xa5, 0xeb, 0x3c, 0x1e, 0xd7, 0x2d, 0xcf, 0xa4, 0xf5, 0x09, 0xce, 0x02, 0x59, 0x0e, 0xad, + 0x4b, 0x3d, 0x28, 0x81, 0x84, 0x3f, 0x06, 0xa0, 0x13, 0x14, 0x06, 0xb7, 0x0e, 0x46, 0x28, 0x80, + 0x74, 0x59, 0x8a, 0x2a, 0x73, 0xd8, 0xe4, 0xa2, 0x18, 0x65, 0xf3, 0x23, 0x05, 0x2c, 0xe4, 0x24, + 0x3a, 0xfc, 0xae, 0x54, 0x04, 0xaf, 0x25, 0x8a, 0xe0, 0xa5, 0x1c, 0xb3, 0x58, 0x25, 0x3c, 0x04, + 0x53, 0x4c, 0x90, 0x68, 0x66, 0xcf, 0x87, 0x88, 0xb3, 0xac, 0x95, 0x3b, 0x01, 0x14, 0x47, 0x47, + 0xa7, 0xf2, 0xc5, 0xe1, 0xf1, 0xd2, 0x94, 0xd4, 0x87, 0x64, 0xe2, 0xe6, 0x2f, 0x4b, 0x00, 0x6c, + 0x10, 0x5b, 0xb7, 0x06, 0x06, 0x31, 0xcf, 0x42, 0xd3, 0xdc, 0x93, 0x34, 0xcd, 0xab, 0xf9, 0x5b, + 0x12, 0x3a, 0x95, 0x2b, 0x6a, 0xde, 0x4f, 0x88, 0x9a, 0xab, 0x45, 0xc8, 0x9e, 0xad, 0x6a, 0x3e, + 0x29, 0x83, 0xd9, 0x08, 0x1c, 0xc9, 0x9a, 0xdb, 0xd2, 0x8e, 0xbe, 0x9a, 0xd8, 0xd1, 0x85, 0x0c, + 0x93, 0x17, 0xa6, 0x6b, 0x9e, 0xbf, 0xbe, 0x80, 0x1f, 0x82, 0x69, 0x26, 0x64, 0xfc, 0x90, 0xe0, + 0x32, 0x69, 0xec, 0xc4, 0x32, 0x29, 0x2c, 0x6e, 0x9b, 0x12, 0x13, 0x4a, 0x30, 0xe7, 0xc8, 0xb2, + 0xf1, 0x17, 0x2d, 0xcb, 0x9a, 0x7f, 0x50, 0xc0, 0x74, 0xb4, 0x4d, 0x67, 0x20, 0xa2, 0xee, 0xca, + 0x22, 0xea, 0xe5, 0x02, 0xc1, 0x99, 0xa3, 0xa2, 0x3e, 0xa9, 0xc4, 0x5d, 0xe7, 0x32, 0x6a, 0x99, + 0xbd, 0x82, 0xd9, 0xba, 0xd6, 0xc1, 0xae, 0xa8, 0xb7, 0xe7, 0xfd, 0xd7, 0x2f, 0xbf, 0x0d, 0x85, + 0xbd, 0x92, 0xe0, 0x2a, 0xbd, 0x58, 0xc1, 0x55, 0x7e, 0x3e, 0x82, 0xeb, 0x07, 0xa0, 0xe6, 0x06, + 0x52, 0xab, 0xc2, 0x29, 0xaf, 0x15, 0x4a, 0x6c, 0xa1, 0xb2, 0x42, 0xea, 0x50, 0x5f, 0x85, 0x74, + 0x59, 0xca, 0xaa, 0xfa, 0x65, 0x2a, 0x2b, 0x96, 0xcc, 0x36, 0xf6, 0x5c, 0xd2, 0xe5, 0x19, 0x50, + 0x8b, 0x92, 0x79, 0x97, 0xb7, 0x22, 0xd1, 0x0b, 0xf7, 0xc1, 0x82, 0xed, 0x58, 0x3d, 0x87, 0xb8, + 0xee, 0x06, 0xc1, 0x5d, 0x5d, 0x33, 0x49, 0x30, 0x01, 0xbf, 0x26, 0x5e, 0x1a, 0x1e, 0x2f, 0x2d, + 0xec, 0x66, 0x43, 0x50, 0x9e, 0x6d, 0xf3, 0xcf, 0x15, 0x70, 0x21, 0x79, 0x36, 0xe6, 0xc8, 0x14, + 0xe5, 0x54, 0x32, 0xe5, 0x7a, 0x2c, 0x4e, 0x7d, 0x0d, 0x17, 0xbb, 0x2a, 0x48, 0xc5, 0xea, 0x1a, + 0x98, 0x11, 0xb2, 0x24, 0xe8, 0x14, 0x42, 0x2d, 0xdc, 0x9e, 0x7d, 0xb9, 0x1b, 0x25, 0xf1, 0x4c, + 0x7c, 0x44, 0x9a, 0x22, 0x20, 0xa9, 0xc8, 0xe2, 0x63, 0x2d, 0x09, 0x40, 0x69, 0x1b, 0xb8, 0x05, + 0x66, 0x3d, 0x33, 0x4d, 0xe5, 0x87, 0xcb, 0x25, 0x41, 0x35, 0xbb, 0x9f, 0x86, 0xa0, 0x2c, 0x3b, + 0xf8, 0x50, 0xd2, 0x23, 0x63, 0xfc, 0x48, 0xb8, 0x5e, 0x20, 0xac, 0x0b, 0x0b, 0x12, 0x78, 0x1b, + 0x4c, 0x39, 0x5c, 0x73, 0x06, 0xae, 0xfa, 0xba, 0xed, 0x1b, 0xc2, 0x6c, 0x0a, 0xc5, 0x3b, 0x91, + 0x8c, 0xcd, 0x90, 0x5a, 0xb5, 0xa2, 0x52, 0xab, 0xf9, 0x27, 0x05, 0xc0, 0x74, 0x1e, 0x8e, 0xbc, + 0x09, 0x48, 0x59, 0xc4, 0x2a, 0xa6, 0x96, 0xad, 0x7f, 0x6e, 0x14, 0xd4, 0x3f, 0xd1, 0x81, 0x5a, + 0x4c, 0x00, 0x89, 0x65, 0x38, 0x9b, 0x4b, 0x9d, 0xa2, 0x02, 0x28, 0x72, 0xea, 0x39, 0x08, 0xa0, + 0x18, 0xd9, 0xb3, 0x05, 0xd0, 0x3f, 0x4b, 0x60, 0x36, 0x02, 0x17, 0x16, 0x40, 0x19, 0x26, 0x5f, + 0x5f, 0xec, 0x8c, 0xbe, 0xd8, 0x61, 0xa2, 0x24, 0x5a, 0xba, 0xff, 0x27, 0x51, 0x12, 0x79, 0x95, + 0x23, 0x4a, 0x7e, 0x57, 0x8a, 0xbb, 0xfe, 0x95, 0x17, 0x25, 0x5f, 0xfc, 0x4e, 0xa6, 0xf9, 0x97, + 0x32, 0xb8, 0x90, 0xcc, 0x43, 0xa9, 0x40, 0x2a, 0x23, 0x0b, 0xe4, 0x2e, 0x98, 0x7b, 0xe4, 0xe9, + 0xfa, 0x80, 0x2f, 0x43, 0xac, 0x4a, 0xfa, 0xa5, 0xf5, 0x9b, 0xc2, 0x72, 0xee, 0xfb, 0x19, 0x18, + 0x94, 0x69, 0x99, 0x53, 0xec, 0xcb, 0xa7, 0x2a, 0xf6, 0xa9, 0x0a, 0x54, 0x39, 0x41, 0x05, 0xca, + 0x2c, 0xdc, 0xd5, 0x53, 0x14, 0xee, 0x93, 0x55, 0xda, 0x8c, 0x83, 0x6b, 0xe4, 0xab, 0xff, 0xcf, + 0x15, 0x30, 0x9f, 0xfd, 0xc2, 0x0d, 0x75, 0x30, 0x6d, 0xe0, 0xc7, 0xf1, 0x8b, 0x8f, 0x51, 0x45, + 0xc4, 0xa3, 0x9a, 0xae, 0xfa, 0x9f, 0x8c, 0xd4, 0x7b, 0x26, 0xdd, 0x71, 0xf6, 0xa8, 0xa3, 0x99, + 0x3d, 0xbf, 0xf2, 0x6e, 0x49, 0x5c, 0x28, 0xc1, 0xdd, 0xfc, 0x4c, 0x01, 0x0b, 0x39, 0x95, 0xef, + 0x6c, 0x3d, 0x81, 0x1f, 0x80, 0x9a, 0x81, 0x1f, 0xef, 0x79, 0x4e, 0x2f, 0xab, 0x56, 0x17, 0x1b, + 0x87, 0x67, 0xf3, 0x96, 0x60, 0x41, 0x21, 0x5f, 0x73, 0x07, 0x5c, 0x96, 0x26, 0xc9, 0x32, 0x87, + 0x3c, 0xf2, 0x74, 0x9e, 0x44, 0x42, 0x6c, 0x5c, 0x03, 0x13, 0x36, 0x76, 0xa8, 0x16, 0x4a, 0xd5, + 0x6a, 0x7b, 0x6a, 0x78, 0xbc, 0x34, 0xb1, 0x1b, 0x34, 0xa2, 0xa8, 0xbf, 0xf9, 0x5f, 0x05, 0x54, + 0xf7, 0x3a, 0x58, 0x27, 0x67, 0x50, 0xed, 0x37, 0xa4, 0x6a, 0x9f, 0x7f, 0x93, 0xce, 0xfd, 0xc9, + 0x2d, 0xf4, 0x9b, 0x89, 0x42, 0xff, 0xca, 0x08, 0x9e, 0x67, 0xd7, 0xf8, 0x77, 0xc0, 0x44, 0x38, + 0xdc, 0xc9, 0x0e, 0xa0, 0xe6, 0x6f, 0x4b, 0x60, 0x32, 0x36, 0xc4, 0x09, 0x8f, 0xaf, 0x87, 0xd2, + 0xb1, 0xcf, 0x12, 0x73, 0xb5, 0xc8, 0x44, 0xd4, 0xe0, 0x88, 0x7f, 0xd7, 0xa4, 0x4e, 0xfc, 0x05, + 0x2f, 0x7d, 0xf2, 0x7f, 0x07, 0x4c, 0x53, 0xec, 0xf4, 0x08, 0x0d, 0xfa, 0xf8, 0x82, 0x4d, 0x44, + 0xb7, 0x13, 0xf7, 0xa5, 0x5e, 0x94, 0x40, 0x2f, 0xde, 0x06, 0x53, 0xd2, 0x60, 0xf0, 0x02, 0x28, + 0x1f, 0x91, 0x81, 0x2f, 0x7b, 0x10, 0xfb, 0x09, 0xe7, 0x40, 0xb5, 0x8f, 0x75, 0xcf, 0x8f, 0xf3, + 0x09, 0xe4, 0x3f, 0xdc, 0x2a, 0xbd, 0xad, 0x34, 0x7f, 0xc5, 0x16, 0x27, 0x0a, 0xce, 0x33, 0x88, + 0xae, 0xf7, 0xa4, 0xe8, 0xca, 0xff, 0xa8, 0x17, 0x4f, 0x99, 0xbc, 0x18, 0x43, 0x89, 0x18, 0x7b, + 0xad, 0x10, 0xdb, 0xb3, 0x23, 0xed, 0x5f, 0x25, 0x30, 0x17, 0x43, 0x47, 0x72, 0xf2, 0xdb, 0x92, + 0x9c, 0x5c, 0x4e, 0xc8, 0xc9, 0x7a, 0x96, 0xcd, 0xd7, 0x7a, 0x72, 0xb4, 0x9e, 0xfc, 0xa3, 0x02, + 0x66, 0x62, 0x6b, 0x77, 0x06, 0x82, 0xf2, 0x9e, 0x2c, 0x28, 0x5f, 0x29, 0x12, 0x34, 0x39, 0x8a, + 0xf2, 0xaf, 0x55, 0xc9, 0xf9, 0xaf, 0xbc, 0xa4, 0xfc, 0x29, 0x98, 0xeb, 0x5b, 0xba, 0x67, 0x90, + 0x75, 0x1d, 0x6b, 0x46, 0x00, 0x60, 0xaa, 0xa9, 0x9c, 0x7c, 0x97, 0x0b, 0xe9, 0x89, 0xe3, 0x6a, + 0x2e, 0x25, 0x26, 0x7d, 0x10, 0x59, 0x46, 0xba, 0xef, 0x41, 0x06, 0x1d, 0xca, 0x1c, 0x04, 0xbe, + 0x09, 0x26, 0x99, 0x7e, 0xd3, 0x3a, 0x64, 0x1b, 0x1b, 0x41, 0x60, 0x85, 0x9f, 0xb0, 0xf6, 0xa2, + 0x2e, 0x14, 0xc7, 0xc1, 0x43, 0x30, 0x6b, 0x5b, 0xdd, 0x2d, 0x6c, 0xe2, 0x1e, 0x61, 0x32, 0x63, + 0x97, 0xff, 0x8f, 0x87, 0x5f, 0x7e, 0x4d, 0xb4, 0xdf, 0x0a, 0x6e, 0x45, 0x76, 0xd3, 0x10, 0xf6, + 0x92, 0x98, 0xd1, 0xcc, 0x93, 0x3a, 0x8b, 0x12, 0x3a, 0xa9, 0xcf, 0xae, 0xfe, 0x1d, 0xf1, 0x6a, + 0x91, 0x08, 0x3b, 0xe5, 0x87, 0xd7, 0xbc, 0xbb, 0xbd, 0xda, 0xa9, 0xbe, 0x9a, 0xfe, 0xbb, 0x02, + 0x2e, 0xa6, 0x8e, 0xca, 0x2f, 0xf1, 0x76, 0x2d, 0x25, 0xcf, 0xcb, 0x27, 0x90, 0xe7, 0x6b, 0x60, + 0x46, 0x7c, 0xb0, 0x4d, 0xa8, 0xfb, 0xf0, 0xfd, 0x67, 0x5d, 0xee, 0x46, 0x49, 0x7c, 0xd6, 0xed, + 0x5e, 0xf5, 0x84, 0xb7, 0x7b, 0x71, 0x2f, 0xc4, 0x1f, 0x90, 0xfc, 0xd0, 0x4b, 0x7b, 0x21, 0xfe, + 0x87, 0x94, 0xc4, 0x33, 0x85, 0xe0, 0xb3, 0x86, 0x0c, 0xe3, 0xb2, 0x42, 0xd8, 0x97, 0x7a, 0x51, + 0x02, 0xfd, 0x85, 0x3e, 0x4a, 0xe2, 0x8c, 0x8f, 0x92, 0x2b, 0x45, 0xe2, 0xb9, 0xf8, 0xbb, 0xc9, + 0xdf, 0x14, 0xf0, 0x52, 0x6e, 0x22, 0xc0, 0x35, 0xa9, 0xec, 0xae, 0x24, 0xca, 0xee, 0xb7, 0x72, + 0x0d, 0x63, 0xb5, 0xd7, 0xc9, 0xbe, 0x9a, 0x7b, 0xa7, 0xd8, 0xd5, 0x5c, 0x86, 0x76, 0x1f, 0x7d, + 0x47, 0xd7, 0x5e, 0x79, 0xf2, 0xb4, 0x71, 0xee, 0xe3, 0xa7, 0x8d, 0x73, 0x9f, 0x3e, 0x6d, 0x9c, + 0xfb, 0xd9, 0xb0, 0xa1, 0x3c, 0x19, 0x36, 0x94, 0x8f, 0x87, 0x0d, 0xe5, 0xd3, 0x61, 0x43, 0xf9, + 0xfb, 0xb0, 0xa1, 0xfc, 0xfa, 0xb3, 0xc6, 0xb9, 0x0f, 0xc6, 0xc5, 0x88, 0xff, 0x0b, 0x00, 0x00, + 0xff, 0xff, 0xe4, 0x8f, 0x6a, 0x57, 0x17, 0x29, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index 1cdc69a75fbb..d8fac403ac8f 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -31,6 +31,8 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta2"; +// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the +// release notes for more information. // ControllerRevision implements an immutable snapshot of state data. Clients // are responsible for serializing and deserializing the objects that contain // their internal state. @@ -63,6 +65,8 @@ message ControllerRevisionList { repeated ControllerRevision items = 2; } +// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for +// more information. // DaemonSet represents the configuration of a daemon set. message DaemonSet { // Standard object's metadata. @@ -84,6 +88,27 @@ message DaemonSet { optional DaemonSetStatus status = 3; } +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +message DaemonSetCondition { + // Type of DaemonSet condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. @@ -99,9 +124,8 @@ message DaemonSetList { message DaemonSetSpec { // A label query over pods that are managed by the daemon set. // Must match in order to be controlled. - // If empty, defaulted to labels on Pod template. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; // An object that describes the pod that will be created. @@ -175,6 +199,12 @@ message DaemonSetStatus { // create the name for the newest ControllerRevision. // +optional optional int32 collisionCount = 9; + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated DaemonSetCondition conditions = 10; } // DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. @@ -192,6 +222,8 @@ message DaemonSetUpdateStrategy { optional RollingUpdateDaemonSet rollingUpdate = 2; } +// DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for +// more information. // Deployment enables declarative updates for Pods and ReplicaSets. message Deployment { // Standard object metadata. @@ -247,7 +279,7 @@ message DeploymentSpec { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. - // +optional + // It must match the pod template's labels. optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. @@ -336,7 +368,9 @@ message DeploymentStrategy { optional RollingUpdateDeployment rollingUpdate = 2; } -// ReplicaSet represents the configuration of a ReplicaSet. +// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for +// more information. +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. message ReplicaSet { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. @@ -407,10 +441,9 @@ message ReplicaSetSpec { optional int32 minReadySeconds = 4; // Selector is a label query over pods that should match the replica count. - // If the selector is empty, it is defaulted to the labels present on the pod template. // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template is the object that describes the pod that will be created if @@ -549,6 +582,8 @@ message ScaleStatus { optional string targetSelector = 3; } +// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for +// more information. // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: // - Network: A single stable DNS and hostname. @@ -569,6 +604,27 @@ message StatefulSet { optional StatefulSetStatus status = 3; } +// StatefulSetCondition describes the state of a statefulset at a certain point. +message StatefulSetCondition { + // Type of statefulset condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + // StatefulSetList is a collection of StatefulSets. message StatefulSetList { // +optional @@ -588,9 +644,8 @@ message StatefulSetSpec { optional int32 replicas = 1; // selector is a label query over pods that should match the replica count. - // If empty, defaulted to labels on the pod template. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // template is the object that describes the pod that will be created if @@ -673,6 +728,12 @@ message StatefulSetStatus { // newest ControllerRevision. // +optional optional int32 collisionCount = 9; + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated StatefulSetCondition conditions = 10; } // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet diff --git a/vendor/k8s.io/api/apps/v1beta2/register.go b/vendor/k8s.io/api/apps/v1beta2/register.go index e207c7dc5a9a..2784ee37712e 100644 --- a/vendor/k8s.io/api/apps/v1beta2/register.go +++ b/vendor/k8s.io/api/apps/v1beta2/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Deployment{}, diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index 925191586a32..c8be2aacaeb4 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -28,6 +28,7 @@ const ( StatefulSetRevisionLabel = ControllerRevisionHashLabelKey DeprecatedRollbackTo = "deprecated.deployment.rollback.to" DeprecatedTemplateGeneration = "deprecated.daemonset.template.generation" + StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" ) // ScaleSpec describes the attributes of a scale subresource @@ -81,6 +82,8 @@ type Scale struct { // +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for +// more information. // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: // - Network: A single stable DNS and hostname. @@ -169,10 +172,9 @@ type StatefulSetSpec struct { Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` // selector is a label query over pods that should match the replica count. - // If empty, defaulted to labels on the pod template. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet @@ -254,6 +256,31 @@ type StatefulSetStatus struct { // newest ControllerRevision. // +optional CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type StatefulSetConditionType string + +// StatefulSetCondition describes the state of a statefulset at a certain point. +type StatefulSetCondition struct { + // Type of statefulset condition. + Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -269,6 +296,8 @@ type StatefulSetList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for +// more information. // Deployment enables declarative updates for Pods and ReplicaSets. type Deployment struct { metav1.TypeMeta `json:",inline"` @@ -294,8 +323,8 @@ type DeploymentSpec struct { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + // It must match the pod template's labels. + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` @@ -525,10 +554,9 @@ type RollingUpdateDaemonSet struct { type DaemonSetSpec struct { // A label query over pods that are managed by the daemon set. // Must match in order to be controlled. - // If empty, defaulted to labels on Pod template. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"` // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node @@ -601,11 +629,40 @@ type DaemonSetStatus struct { // create the name for the newest ControllerRevision. // +optional CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for +// more information. // DaemonSet represents the configuration of a daemon set. type DaemonSet struct { metav1.TypeMeta `json:",inline"` @@ -629,12 +686,6 @@ type DaemonSet struct { } const ( - // DEPRECATED: DefaultDaemonSetUniqueLabelKey is used instead. - // DaemonSetTemplateGenerationKey is the key of the labels that is added - // to daemon set pods to distinguish between old and new pod templates - // during DaemonSet template update. - DaemonSetTemplateGenerationKey string = "pod-template-generation" - // DefaultDaemonSetUniqueLabelKey is the default label key that is added // to existing DaemonSet pods to distinguish between old and new // DaemonSet pods during DaemonSet template updates. @@ -658,7 +709,9 @@ type DaemonSetList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ReplicaSet represents the configuration of a ReplicaSet. +// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for +// more information. +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. type ReplicaSet struct { metav1.TypeMeta `json:",inline"` @@ -713,11 +766,10 @@ type ReplicaSetSpec struct { MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` // Selector is a label query over pods that should match the replica count. - // If the selector is empty, it is defaulted to the labels present on the pod template. // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` // Template is the object that describes the pod that will be created if // insufficient replicas are detected. @@ -785,6 +837,8 @@ type ReplicaSetCondition struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the +// release notes for more information. // ControllerRevision implements an immutable snapshot of state data. Clients // are responsible for serializing and deserializing the objects that contain // their internal state. diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index 65730c71ecc5..e2f133b5181f 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1beta2 // AUTO-GENERATED FUNCTIONS START HERE var map_ControllerRevision = map[string]string{ - "": "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", + "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "data": "Data is the serialized representation of the state.", "revision": "Revision indicates the revision of the state represented by Data.", @@ -49,7 +49,7 @@ func (ControllerRevisionList) SwaggerDoc() map[string]string { } var map_DaemonSet = map[string]string{ - "": "DaemonSet represents the configuration of a daemon set.", + "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", @@ -59,6 +59,19 @@ func (DaemonSet) SwaggerDoc() map[string]string { return map_DaemonSet } +var map_DaemonSetCondition = map[string]string{ + "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", + "type": "Type of DaemonSet condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DaemonSetCondition) SwaggerDoc() map[string]string { + return map_DaemonSetCondition +} + var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", @@ -71,7 +84,7 @@ func (DaemonSetList) SwaggerDoc() map[string]string { var map_DaemonSetSpec = map[string]string{ "": "DaemonSetSpec is the specification of a daemon set.", - "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", @@ -93,6 +106,7 @@ var map_DaemonSetStatus = map[string]string{ "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a DaemonSet's current state.", } func (DaemonSetStatus) SwaggerDoc() map[string]string { @@ -110,7 +124,7 @@ func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string { } var map_Deployment = map[string]string{ - "": "Deployment enables declarative updates for Pods and ReplicaSets.", + "": "DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for more information. Deployment enables declarative updates for Pods and ReplicaSets.", "metadata": "Standard object metadata.", "spec": "Specification of the desired behavior of the Deployment.", "status": "Most recently observed status of the Deployment.", @@ -147,7 +161,7 @@ func (DeploymentList) SwaggerDoc() map[string]string { var map_DeploymentSpec = map[string]string{ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", - "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", + "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", "template": "Template describes the pods that will be created.", "strategy": "The deployment strategy to use to replace existing pods with new ones.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", @@ -187,7 +201,7 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { } var map_ReplicaSet = map[string]string{ - "": "ReplicaSet represents the configuration of a ReplicaSet.", + "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", @@ -224,7 +238,7 @@ var map_ReplicaSetSpec = map[string]string{ "": "ReplicaSetSpec is the specification of a ReplicaSet.", "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "selector": "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "selector": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", } @@ -306,7 +320,7 @@ func (ScaleStatus) SwaggerDoc() map[string]string { } var map_StatefulSet = map[string]string{ - "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "": "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", "spec": "Spec defines the desired identities of pods in this set.", "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", } @@ -315,6 +329,19 @@ func (StatefulSet) SwaggerDoc() map[string]string { return map_StatefulSet } +var map_StatefulSetCondition = map[string]string{ + "": "StatefulSetCondition describes the state of a statefulset at a certain point.", + "type": "Type of statefulset condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (StatefulSetCondition) SwaggerDoc() map[string]string { + return map_StatefulSetCondition +} + var map_StatefulSetList = map[string]string{ "": "StatefulSetList is a collection of StatefulSets.", } @@ -326,7 +353,7 @@ func (StatefulSetList) SwaggerDoc() map[string]string { var map_StatefulSetSpec = map[string]string{ "": "A StatefulSetSpec is the specification of a StatefulSet.", "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", - "selector": "selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", @@ -349,6 +376,7 @@ var map_StatefulSetStatus = map[string]string{ "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a statefulset's current state.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go index 124f84a9d4ef..d25c869bb7f7 100644 --- a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go @@ -23,141 +23,10 @@ package v1beta2 import ( core_v1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ControllerRevision).DeepCopyInto(out.(*ControllerRevision)) - return nil - }, InType: reflect.TypeOf(&ControllerRevision{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ControllerRevisionList).DeepCopyInto(out.(*ControllerRevisionList)) - return nil - }, InType: reflect.TypeOf(&ControllerRevisionList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSet).DeepCopyInto(out.(*DaemonSet)) - return nil - }, InType: reflect.TypeOf(&DaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetList).DeepCopyInto(out.(*DaemonSetList)) - return nil - }, InType: reflect.TypeOf(&DaemonSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetSpec).DeepCopyInto(out.(*DaemonSetSpec)) - return nil - }, InType: reflect.TypeOf(&DaemonSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetStatus).DeepCopyInto(out.(*DaemonSetStatus)) - return nil - }, InType: reflect.TypeOf(&DaemonSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetUpdateStrategy).DeepCopyInto(out.(*DaemonSetUpdateStrategy)) - return nil - }, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Deployment).DeepCopyInto(out.(*Deployment)) - return nil - }, InType: reflect.TypeOf(&Deployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentCondition).DeepCopyInto(out.(*DeploymentCondition)) - return nil - }, InType: reflect.TypeOf(&DeploymentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentList).DeepCopyInto(out.(*DeploymentList)) - return nil - }, InType: reflect.TypeOf(&DeploymentList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentSpec).DeepCopyInto(out.(*DeploymentSpec)) - return nil - }, InType: reflect.TypeOf(&DeploymentSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStatus).DeepCopyInto(out.(*DeploymentStatus)) - return nil - }, InType: reflect.TypeOf(&DeploymentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStrategy).DeepCopyInto(out.(*DeploymentStrategy)) - return nil - }, InType: reflect.TypeOf(&DeploymentStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSet).DeepCopyInto(out.(*ReplicaSet)) - return nil - }, InType: reflect.TypeOf(&ReplicaSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetCondition).DeepCopyInto(out.(*ReplicaSetCondition)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetList).DeepCopyInto(out.(*ReplicaSetList)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetSpec).DeepCopyInto(out.(*ReplicaSetSpec)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetStatus).DeepCopyInto(out.(*ReplicaSetStatus)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDaemonSet).DeepCopyInto(out.(*RollingUpdateDaemonSet)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDeployment).DeepCopyInto(out.(*RollingUpdateDeployment)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateStatefulSetStrategy).DeepCopyInto(out.(*RollingUpdateStatefulSetStrategy)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateStatefulSetStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Scale).DeepCopyInto(out.(*Scale)) - return nil - }, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleSpec).DeepCopyInto(out.(*ScaleSpec)) - return nil - }, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleStatus).DeepCopyInto(out.(*ScaleStatus)) - return nil - }, InType: reflect.TypeOf(&ScaleStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSet).DeepCopyInto(out.(*StatefulSet)) - return nil - }, InType: reflect.TypeOf(&StatefulSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetList).DeepCopyInto(out.(*StatefulSetList)) - return nil - }, InType: reflect.TypeOf(&StatefulSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetSpec).DeepCopyInto(out.(*StatefulSetSpec)) - return nil - }, InType: reflect.TypeOf(&StatefulSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetStatus).DeepCopyInto(out.(*StatefulSetStatus)) - return nil - }, InType: reflect.TypeOf(&StatefulSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetUpdateStrategy).DeepCopyInto(out.(*StatefulSetUpdateStrategy)) - return nil - }, InType: reflect.TypeOf(&StatefulSetUpdateStrategy{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { *out = *in @@ -249,6 +118,23 @@ func (in *DaemonSet) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in @@ -331,6 +217,13 @@ func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { **out = **in } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -880,6 +773,23 @@ func (in *StatefulSet) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. +func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { + if in == nil { + return nil + } + out := new(StatefulSetCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in @@ -978,6 +888,13 @@ func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { **out = **in } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StatefulSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/vendor/k8s.io/api/authentication/v1/BUILD b/vendor/k8s.io/api/authentication/v1/BUILD index 745ff2609dae..f2e2b1a989c1 100644 --- a/vendor/k8s.io/api/authentication/v1/BUILD +++ b/vendor/k8s.io/api/authentication/v1/BUILD @@ -15,11 +15,11 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/authentication/v1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go index b354f2da18f8..55cffaa1a88c 100644 --- a/vendor/k8s.io/api/authentication/v1/doc.go +++ b/vendor/k8s.io/api/authentication/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true package v1 diff --git a/vendor/k8s.io/api/authentication/v1/register.go b/vendor/k8s.io/api/authentication/v1/register.go index 936237c2be8d..2ca79a620a55 100644 --- a/vendor/k8s.io/api/authentication/v1/register.go +++ b/vendor/k8s.io/api/authentication/v1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &TokenReview{}, diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go index c1717c1cd862..243de7590fc4 100644 --- a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go @@ -21,40 +21,9 @@ limitations under the License. package v1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReview).DeepCopyInto(out.(*TokenReview)) - return nil - }, InType: reflect.TypeOf(&TokenReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReviewSpec).DeepCopyInto(out.(*TokenReviewSpec)) - return nil - }, InType: reflect.TypeOf(&TokenReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReviewStatus).DeepCopyInto(out.(*TokenReviewStatus)) - return nil - }, InType: reflect.TypeOf(&TokenReviewStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*UserInfo).DeepCopyInto(out.(*UserInfo)) - return nil - }, InType: reflect.TypeOf(&UserInfo{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in diff --git a/vendor/k8s.io/api/authentication/v1beta1/BUILD b/vendor/k8s.io/api/authentication/v1beta1/BUILD index 745ff2609dae..998d793fef71 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/BUILD +++ b/vendor/k8s.io/api/authentication/v1beta1/BUILD @@ -15,11 +15,11 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/authentication/v1beta1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/api/authentication/v1beta1/doc.go b/vendor/k8s.io/api/authentication/v1beta1/doc.go index c2e3656405ab..0efef36703d5 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/doc.go +++ b/vendor/k8s.io/api/authentication/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true package v1beta1 diff --git a/vendor/k8s.io/api/authentication/v1beta1/register.go b/vendor/k8s.io/api/authentication/v1beta1/register.go index 7d89e5ef70cd..ed23e50f7e95 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/register.go +++ b/vendor/k8s.io/api/authentication/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &TokenReview{}, diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go index 1b94ececa1f2..aa8d2ef3adb9 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go @@ -21,40 +21,9 @@ limitations under the License. package v1beta1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReview).DeepCopyInto(out.(*TokenReview)) - return nil - }, InType: reflect.TypeOf(&TokenReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReviewSpec).DeepCopyInto(out.(*TokenReviewSpec)) - return nil - }, InType: reflect.TypeOf(&TokenReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReviewStatus).DeepCopyInto(out.(*TokenReviewStatus)) - return nil - }, InType: reflect.TypeOf(&TokenReviewStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*UserInfo).DeepCopyInto(out.(*UserInfo)) - return nil - }, InType: reflect.TypeOf(&UserInfo{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in diff --git a/vendor/k8s.io/api/authorization/v1/BUILD b/vendor/k8s.io/api/authorization/v1/BUILD index 745ff2609dae..af9e74a63560 100644 --- a/vendor/k8s.io/api/authorization/v1/BUILD +++ b/vendor/k8s.io/api/authorization/v1/BUILD @@ -15,11 +15,11 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/authorization/v1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/api/authorization/v1/doc.go b/vendor/k8s.io/api/authorization/v1/doc.go index b2908215a16d..6fa0c3cbdd4c 100644 --- a/vendor/k8s.io/api/authorization/v1/doc.go +++ b/vendor/k8s.io/api/authorization/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=authorization.k8s.io diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go index 0795a6454920..d34d105f4c41 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -730,6 +730,14 @@ func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) i += copy(dAtA[i:], m.EvaluationError) + dAtA[i] = 0x20 + i++ + if m.Denied { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ return i, nil } @@ -1015,6 +1023,7 @@ func (m *SubjectAccessReviewStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.EvaluationError) n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } @@ -1205,6 +1214,7 @@ func (this *SubjectAccessReviewStatus) String() string { `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `Denied:` + fmt.Sprintf("%v", this.Denied) + `,`, `}`, }, "") return s @@ -3130,6 +3140,26 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } m.EvaluationError = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Denied", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Denied = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3422,77 +3452,77 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1137 bytes of a gzipped FileDescriptorProto + // 1152 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xae, 0xed, 0xc4, 0x1e, 0x37, 0x24, 0x9d, 0x28, 0xcd, 0x36, 0x15, 0x76, 0xb4, 0x48, - 0x90, 0x8a, 0xb2, 0x4b, 0x4c, 0xdb, 0x44, 0x95, 0x2a, 0x14, 0xab, 0x11, 0x8a, 0xd4, 0x96, 0x6a, + 0x14, 0xf7, 0xae, 0xed, 0xd4, 0x1e, 0x37, 0x24, 0x9d, 0x28, 0xcd, 0x36, 0x15, 0x76, 0xb4, 0x48, + 0x90, 0x8a, 0xb2, 0x4b, 0x4c, 0xdb, 0x44, 0x95, 0x2a, 0x14, 0x2b, 0x11, 0x8a, 0xd4, 0x96, 0x6a, 0xa2, 0x44, 0xa2, 0x08, 0xc4, 0x78, 0x3d, 0xb1, 0x97, 0xd8, 0xbb, 0xcb, 0xcc, 0xac, 0x43, 0x38, - 0x55, 0xe2, 0x0b, 0x70, 0xe4, 0xc0, 0x81, 0x6f, 0x80, 0x90, 0x90, 0xb8, 0x71, 0xe0, 0x80, 0x72, - 0xec, 0xb1, 0x07, 0x64, 0x91, 0xe5, 0xcc, 0x77, 0x40, 0x33, 0x3b, 0xf6, 0xae, 0x93, 0xb5, 0x9b, - 0x70, 0xa0, 0x97, 0xde, 0x76, 0xdf, 0xef, 0xf7, 0xfe, 0xcc, 0x7b, 0x6f, 0xde, 0x3c, 0xf0, 0xe0, - 0x70, 0x93, 0x59, 0xae, 0x6f, 0x1f, 0x86, 0x4d, 0x42, 0x3d, 0xc2, 0x09, 0xb3, 0xfb, 0xc4, 0x6b, - 0xf9, 0xd4, 0x56, 0x00, 0x0e, 0x5c, 0x1b, 0x87, 0xbc, 0xe3, 0x53, 0xf7, 0x1b, 0xcc, 0x5d, 0xdf, - 0xb3, 0xfb, 0xeb, 0x76, 0x9b, 0x78, 0x84, 0x62, 0x4e, 0x5a, 0x56, 0x40, 0x7d, 0xee, 0xc3, 0x1b, - 0x31, 0xd9, 0xc2, 0x81, 0x6b, 0x8d, 0x91, 0xad, 0xfe, 0xfa, 0xca, 0x7b, 0x6d, 0x97, 0x77, 0xc2, - 0xa6, 0xe5, 0xf8, 0x3d, 0xbb, 0xed, 0xb7, 0x7d, 0x5b, 0xea, 0x34, 0xc3, 0x03, 0xf9, 0x27, 0x7f, - 0xe4, 0x57, 0x6c, 0x6b, 0xe5, 0x76, 0xe2, 0xb8, 0x87, 0x9d, 0x8e, 0xeb, 0x11, 0x7a, 0x6c, 0x07, - 0x87, 0x6d, 0x21, 0x60, 0x76, 0x8f, 0x70, 0x9c, 0x11, 0xc1, 0x8a, 0x3d, 0x49, 0x8b, 0x86, 0x1e, - 0x77, 0x7b, 0xe4, 0x9c, 0xc2, 0xdd, 0x97, 0x29, 0x30, 0xa7, 0x43, 0x7a, 0xf8, 0x9c, 0xde, 0x07, - 0x93, 0xf4, 0x42, 0xee, 0x76, 0x6d, 0xd7, 0xe3, 0x8c, 0xd3, 0xb3, 0x4a, 0xe6, 0x06, 0x00, 0xdb, - 0x5f, 0x73, 0x8a, 0xf7, 0x71, 0x37, 0x24, 0xb0, 0x06, 0x8a, 0x2e, 0x27, 0x3d, 0x66, 0x68, 0xab, - 0xf9, 0xb5, 0x72, 0xa3, 0x1c, 0x0d, 0x6a, 0xc5, 0x1d, 0x21, 0x40, 0xb1, 0xfc, 0x5e, 0xe9, 0xfb, - 0x1f, 0x6b, 0xb9, 0x67, 0x7f, 0xae, 0xe6, 0xcc, 0x5f, 0x74, 0x60, 0x3c, 0xf4, 0x1d, 0xdc, 0xdd, - 0x0d, 0x9b, 0x5f, 0x12, 0x87, 0x6f, 0x39, 0x0e, 0x61, 0x0c, 0x91, 0xbe, 0x4b, 0x8e, 0xe0, 0x17, - 0xa0, 0x24, 0xd2, 0xd1, 0xc2, 0x1c, 0x1b, 0xda, 0xaa, 0xb6, 0x56, 0xa9, 0xbf, 0x6f, 0x25, 0x85, - 0x18, 0x45, 0x67, 0x05, 0x87, 0x6d, 0x21, 0x60, 0x96, 0x60, 0x5b, 0xfd, 0x75, 0xeb, 0x63, 0x69, - 0xeb, 0x11, 0xe1, 0xb8, 0x01, 0x4f, 0x06, 0xb5, 0x5c, 0x34, 0xa8, 0x81, 0x44, 0x86, 0x46, 0x56, - 0xe1, 0x3e, 0x28, 0xb0, 0x80, 0x38, 0x86, 0x2e, 0xad, 0xdf, 0xb6, 0xa6, 0x94, 0xd9, 0xca, 0x88, - 0x70, 0x37, 0x20, 0x4e, 0xe3, 0x8a, 0xf2, 0x50, 0x10, 0x7f, 0x48, 0xda, 0x83, 0x9f, 0x83, 0x19, - 0xc6, 0x31, 0x0f, 0x99, 0x91, 0x97, 0x96, 0xef, 0x5e, 0xda, 0xb2, 0xd4, 0x6e, 0xbc, 0xa1, 0x6c, - 0xcf, 0xc4, 0xff, 0x48, 0x59, 0x35, 0x3f, 0x05, 0x4b, 0x8f, 0x7d, 0x0f, 0x11, 0xe6, 0x87, 0xd4, - 0x21, 0x5b, 0x9c, 0x53, 0xb7, 0x19, 0x72, 0xc2, 0xe0, 0x2a, 0x28, 0x04, 0x98, 0x77, 0x64, 0xba, - 0xca, 0x49, 0x68, 0x4f, 0x30, 0xef, 0x20, 0x89, 0x08, 0x46, 0x9f, 0xd0, 0xa6, 0x3c, 0x72, 0x8a, - 0xb1, 0x4f, 0x68, 0x13, 0x49, 0xc4, 0xfc, 0x0a, 0xcc, 0xa7, 0x8c, 0xa3, 0xb0, 0x2b, 0x2b, 0x2a, - 0xa0, 0xb1, 0x8a, 0x0a, 0x0d, 0x86, 0x62, 0x39, 0xbc, 0x0f, 0xe6, 0xbd, 0x44, 0x67, 0x0f, 0x3d, - 0x64, 0x86, 0x2e, 0xa9, 0x8b, 0xd1, 0xa0, 0x96, 0x36, 0x27, 0x20, 0x74, 0x96, 0x6b, 0xfe, 0xa6, - 0x03, 0x98, 0x71, 0x1a, 0x1b, 0x94, 0x3d, 0xdc, 0x23, 0x2c, 0xc0, 0x0e, 0x51, 0x47, 0xba, 0xaa, - 0x02, 0x2e, 0x3f, 0x1e, 0x02, 0x28, 0xe1, 0xbc, 0xfc, 0x70, 0xf0, 0x2d, 0x50, 0x6c, 0x53, 0x3f, - 0x0c, 0x64, 0x61, 0xca, 0x8d, 0x39, 0x45, 0x29, 0x7e, 0x24, 0x84, 0x28, 0xc6, 0xe0, 0x4d, 0x30, - 0xdb, 0x27, 0x94, 0xb9, 0xbe, 0x67, 0x14, 0x24, 0x6d, 0x5e, 0xd1, 0x66, 0xf7, 0x63, 0x31, 0x1a, - 0xe2, 0xf0, 0x16, 0x28, 0x51, 0x15, 0xb8, 0x51, 0x94, 0xdc, 0x05, 0xc5, 0x2d, 0x8d, 0x32, 0x38, - 0x62, 0xc0, 0x3b, 0xa0, 0xc2, 0xc2, 0xe6, 0x48, 0x61, 0x46, 0x2a, 0x2c, 0x2a, 0x85, 0xca, 0x6e, - 0x02, 0xa1, 0x34, 0x4f, 0x1c, 0x4b, 0x9c, 0xd1, 0x98, 0x1d, 0x3f, 0x96, 0x48, 0x01, 0x92, 0x88, - 0xf9, 0xbb, 0x06, 0xae, 0x5c, 0xae, 0x62, 0xef, 0x82, 0x32, 0x0e, 0x5c, 0x79, 0xec, 0x61, 0xad, - 0xe6, 0x44, 0x5e, 0xb7, 0x9e, 0xec, 0xc4, 0x42, 0x94, 0xe0, 0x82, 0x3c, 0x0c, 0x46, 0xb4, 0xf4, - 0x88, 0x3c, 0x74, 0xc9, 0x50, 0x82, 0xc3, 0x0d, 0x30, 0x37, 0xfc, 0x91, 0x45, 0x32, 0x0a, 0x52, - 0xe1, 0x6a, 0x34, 0xa8, 0xcd, 0xa1, 0x34, 0x80, 0xc6, 0x79, 0xe6, 0xaf, 0x3a, 0x58, 0xde, 0x25, - 0xdd, 0x83, 0x57, 0x33, 0x0b, 0x9e, 0x8e, 0xcd, 0x82, 0xcd, 0xe9, 0x37, 0x36, 0x3b, 0xca, 0x57, - 0x36, 0x0f, 0x7e, 0xd0, 0xc1, 0x8d, 0x29, 0x31, 0xc1, 0x23, 0x00, 0xe9, 0xb9, 0xeb, 0xa5, 0xf2, - 0x68, 0x4f, 0x8d, 0xe5, 0xfc, 0xad, 0x6c, 0x5c, 0x8b, 0x06, 0xb5, 0x8c, 0xdb, 0x8a, 0x32, 0x5c, - 0xc0, 0x6f, 0x35, 0xb0, 0xe4, 0x65, 0x4d, 0x2a, 0x95, 0xe6, 0xfa, 0x54, 0xe7, 0x99, 0x33, 0xae, - 0x71, 0x3d, 0x1a, 0xd4, 0xb2, 0xc7, 0x1f, 0xca, 0xf6, 0x25, 0x5e, 0x99, 0x6b, 0xa9, 0xf4, 0x88, - 0x0b, 0xf2, 0xff, 0xf5, 0xd5, 0x27, 0x63, 0x7d, 0xb5, 0x71, 0xd1, 0xbe, 0x4a, 0x05, 0x39, 0xb1, - 0xad, 0x3e, 0x3b, 0xd3, 0x56, 0x77, 0x2e, 0xd2, 0x56, 0x69, 0xc3, 0xd3, 0xbb, 0xea, 0x11, 0x58, - 0x99, 0x1c, 0xd0, 0xa5, 0x87, 0xb3, 0xf9, 0x93, 0x0e, 0x16, 0x5f, 0x3f, 0xf3, 0x97, 0xb9, 0xd6, - 0x7f, 0x14, 0xc0, 0xf2, 0xeb, 0x2b, 0x3d, 0x69, 0xd1, 0x09, 0x19, 0xa1, 0xea, 0x19, 0x1f, 0x15, - 0x67, 0x8f, 0x11, 0x8a, 0x24, 0x02, 0x4d, 0x30, 0xd3, 0x8e, 0x5f, 0xb7, 0xf8, 0xfd, 0x01, 0x22, - 0xc1, 0xea, 0x69, 0x53, 0x08, 0x6c, 0x81, 0x22, 0x11, 0x7b, 0xab, 0x51, 0x5c, 0xcd, 0xaf, 0x55, - 0xea, 0x1f, 0xfe, 0x97, 0xce, 0xb0, 0xe4, 0xe6, 0xbb, 0xed, 0x71, 0x7a, 0x9c, 0xac, 0x13, 0x52, - 0x86, 0x62, 0xe3, 0xf0, 0x4d, 0x90, 0x0f, 0xdd, 0x96, 0x7a, 0xed, 0x2b, 0x8a, 0x92, 0xdf, 0xdb, - 0x79, 0x80, 0x84, 0x7c, 0x05, 0xab, 0xe5, 0x59, 0x9a, 0x80, 0x0b, 0x20, 0x7f, 0x48, 0x8e, 0xe3, - 0x0b, 0x85, 0xc4, 0x27, 0xbc, 0x0f, 0x8a, 0x7d, 0xb1, 0x57, 0xab, 0xfc, 0xbe, 0x33, 0x35, 0xc8, - 0x64, 0x0d, 0x47, 0xb1, 0xd6, 0x3d, 0x7d, 0x53, 0x33, 0x7f, 0xd6, 0xc0, 0xf5, 0x89, 0xed, 0x27, - 0xd6, 0x1d, 0xdc, 0xed, 0xfa, 0x47, 0xa4, 0x25, 0xdd, 0x96, 0x92, 0x75, 0x67, 0x2b, 0x16, 0xa3, - 0x21, 0x0e, 0xdf, 0x06, 0x33, 0x94, 0x60, 0xe6, 0x7b, 0x6a, 0xc5, 0x1a, 0x75, 0x2e, 0x92, 0x52, - 0xa4, 0x50, 0xb8, 0x05, 0xe6, 0x89, 0x70, 0x2f, 0xe3, 0xda, 0xa6, 0xd4, 0x1f, 0x56, 0x6a, 0x59, - 0x29, 0xcc, 0x6f, 0x8f, 0xc3, 0xe8, 0x2c, 0xdf, 0xfc, 0x47, 0x07, 0xc6, 0xa4, 0x91, 0x05, 0x0f, - 0x92, 0x1d, 0x43, 0x82, 0x72, 0xcd, 0xa9, 0xd4, 0x6f, 0x5e, 0xa8, 0xf1, 0x85, 0x46, 0x63, 0x49, - 0x05, 0x32, 0x97, 0x96, 0xa6, 0x56, 0x12, 0xf9, 0x0b, 0x29, 0x58, 0xf0, 0xc6, 0x77, 0xe1, 0x78, - 0x59, 0xaa, 0xd4, 0x6f, 0x5d, 0xb4, 0xcd, 0xa5, 0x37, 0x43, 0x79, 0x5b, 0x38, 0x03, 0x30, 0x74, - 0xce, 0x3e, 0xac, 0x03, 0xe0, 0x7a, 0x8e, 0xdf, 0x0b, 0xba, 0x84, 0x13, 0x99, 0xb6, 0x52, 0x32, - 0xdf, 0x76, 0x46, 0x08, 0x4a, 0xb1, 0xb2, 0xf2, 0x5d, 0xb8, 0x5c, 0xbe, 0x1b, 0x6b, 0x27, 0xa7, - 0xd5, 0xdc, 0xf3, 0xd3, 0x6a, 0xee, 0xc5, 0x69, 0x35, 0xf7, 0x2c, 0xaa, 0x6a, 0x27, 0x51, 0x55, - 0x7b, 0x1e, 0x55, 0xb5, 0x17, 0x51, 0x55, 0xfb, 0x2b, 0xaa, 0x6a, 0xdf, 0xfd, 0x5d, 0xcd, 0x3d, - 0xd5, 0xfb, 0xeb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xd9, 0x3f, 0xd9, 0x21, 0x54, 0x0f, 0x00, - 0x00, + 0x55, 0xe2, 0x0b, 0x70, 0xe4, 0xc0, 0x81, 0x6f, 0xc0, 0x05, 0x89, 0x1b, 0x07, 0x0e, 0x28, 0xc7, + 0x1e, 0x8b, 0x84, 0x2c, 0xb2, 0x9c, 0xf9, 0x0e, 0x68, 0x66, 0xc7, 0xde, 0x75, 0xb2, 0x76, 0x13, + 0x0e, 0xf4, 0xd2, 0xdb, 0xee, 0xfb, 0xfd, 0xde, 0x9f, 0x79, 0x7f, 0x66, 0x1e, 0xd8, 0x3a, 0xdc, + 0x60, 0x96, 0xeb, 0xdb, 0x87, 0x61, 0x93, 0x50, 0x8f, 0x70, 0xc2, 0xec, 0x3e, 0xf1, 0x5a, 0x3e, + 0xb5, 0x15, 0x80, 0x03, 0xd7, 0xc6, 0x21, 0xef, 0xf8, 0xd4, 0xfd, 0x06, 0x73, 0xd7, 0xf7, 0xec, + 0xfe, 0x9a, 0xdd, 0x26, 0x1e, 0xa1, 0x98, 0x93, 0x96, 0x15, 0x50, 0x9f, 0xfb, 0xf0, 0x66, 0x4c, + 0xb6, 0x70, 0xe0, 0x5a, 0x63, 0x64, 0xab, 0xbf, 0xb6, 0xfc, 0x5e, 0xdb, 0xe5, 0x9d, 0xb0, 0x69, + 0x39, 0x7e, 0xcf, 0x6e, 0xfb, 0x6d, 0xdf, 0x96, 0x3a, 0xcd, 0xf0, 0x40, 0xfe, 0xc9, 0x1f, 0xf9, + 0x15, 0xdb, 0x5a, 0xbe, 0x93, 0x38, 0xee, 0x61, 0xa7, 0xe3, 0x7a, 0x84, 0x1e, 0xdb, 0xc1, 0x61, + 0x5b, 0x08, 0x98, 0xdd, 0x23, 0x1c, 0x67, 0x44, 0xb0, 0x6c, 0x4f, 0xd2, 0xa2, 0xa1, 0xc7, 0xdd, + 0x1e, 0x39, 0xa7, 0x70, 0xef, 0x65, 0x0a, 0xcc, 0xe9, 0x90, 0x1e, 0x3e, 0xa7, 0xf7, 0xc1, 0x24, + 0xbd, 0x90, 0xbb, 0x5d, 0xdb, 0xf5, 0x38, 0xe3, 0xf4, 0xac, 0x92, 0xb9, 0x0e, 0xc0, 0xf6, 0xd7, + 0x9c, 0xe2, 0x7d, 0xdc, 0x0d, 0x09, 0xac, 0x81, 0xa2, 0xcb, 0x49, 0x8f, 0x19, 0xda, 0x4a, 0x7e, + 0xb5, 0xdc, 0x28, 0x47, 0x83, 0x5a, 0x71, 0x47, 0x08, 0x50, 0x2c, 0xbf, 0x5f, 0xfa, 0xfe, 0xc7, + 0x5a, 0xee, 0xd9, 0x9f, 0x2b, 0x39, 0xf3, 0x67, 0x1d, 0x18, 0x0f, 0x7d, 0x07, 0x77, 0x77, 0xc3, + 0xe6, 0x97, 0xc4, 0xe1, 0x9b, 0x8e, 0x43, 0x18, 0x43, 0xa4, 0xef, 0x92, 0x23, 0xf8, 0x05, 0x28, + 0x89, 0x74, 0xb4, 0x30, 0xc7, 0x86, 0xb6, 0xa2, 0xad, 0x56, 0xea, 0xef, 0x5b, 0x49, 0x21, 0x46, + 0xd1, 0x59, 0xc1, 0x61, 0x5b, 0x08, 0x98, 0x25, 0xd8, 0x56, 0x7f, 0xcd, 0xfa, 0x58, 0xda, 0x7a, + 0x44, 0x38, 0x6e, 0xc0, 0x93, 0x41, 0x2d, 0x17, 0x0d, 0x6a, 0x20, 0x91, 0xa1, 0x91, 0x55, 0xb8, + 0x0f, 0x0a, 0x2c, 0x20, 0x8e, 0xa1, 0x4b, 0xeb, 0x77, 0xac, 0x29, 0x65, 0xb6, 0x32, 0x22, 0xdc, + 0x0d, 0x88, 0xd3, 0xb8, 0xaa, 0x3c, 0x14, 0xc4, 0x1f, 0x92, 0xf6, 0xe0, 0xe7, 0x60, 0x86, 0x71, + 0xcc, 0x43, 0x66, 0xe4, 0xa5, 0xe5, 0x7b, 0x97, 0xb6, 0x2c, 0xb5, 0x1b, 0x6f, 0x28, 0xdb, 0x33, + 0xf1, 0x3f, 0x52, 0x56, 0xcd, 0x4f, 0xc1, 0xe2, 0x63, 0xdf, 0x43, 0x84, 0xf9, 0x21, 0x75, 0xc8, + 0x26, 0xe7, 0xd4, 0x6d, 0x86, 0x9c, 0x30, 0xb8, 0x02, 0x0a, 0x01, 0xe6, 0x1d, 0x99, 0xae, 0x72, + 0x12, 0xda, 0x13, 0xcc, 0x3b, 0x48, 0x22, 0x82, 0xd1, 0x27, 0xb4, 0x29, 0x8f, 0x9c, 0x62, 0xec, + 0x13, 0xda, 0x44, 0x12, 0x31, 0xbf, 0x02, 0x73, 0x29, 0xe3, 0x28, 0xec, 0xca, 0x8a, 0x0a, 0x68, + 0xac, 0xa2, 0x42, 0x83, 0xa1, 0x58, 0x0e, 0x1f, 0x80, 0x39, 0x2f, 0xd1, 0xd9, 0x43, 0x0f, 0x99, + 0xa1, 0x4b, 0xea, 0x42, 0x34, 0xa8, 0xa5, 0xcd, 0x09, 0x08, 0x9d, 0xe5, 0x9a, 0xbf, 0xea, 0x00, + 0x66, 0x9c, 0xc6, 0x06, 0x65, 0x0f, 0xf7, 0x08, 0x0b, 0xb0, 0x43, 0xd4, 0x91, 0xae, 0xa9, 0x80, + 0xcb, 0x8f, 0x87, 0x00, 0x4a, 0x38, 0x2f, 0x3f, 0x1c, 0x7c, 0x0b, 0x14, 0xdb, 0xd4, 0x0f, 0x03, + 0x59, 0x98, 0x72, 0x63, 0x56, 0x51, 0x8a, 0x1f, 0x09, 0x21, 0x8a, 0x31, 0x78, 0x0b, 0x5c, 0xe9, + 0x13, 0xca, 0x5c, 0xdf, 0x33, 0x0a, 0x92, 0x36, 0xa7, 0x68, 0x57, 0xf6, 0x63, 0x31, 0x1a, 0xe2, + 0xf0, 0x36, 0x28, 0x51, 0x15, 0xb8, 0x51, 0x94, 0xdc, 0x79, 0xc5, 0x2d, 0x8d, 0x32, 0x38, 0x62, + 0xc0, 0xbb, 0xa0, 0xc2, 0xc2, 0xe6, 0x48, 0x61, 0x46, 0x2a, 0x2c, 0x28, 0x85, 0xca, 0x6e, 0x02, + 0xa1, 0x34, 0x4f, 0x1c, 0x4b, 0x9c, 0xd1, 0xb8, 0x32, 0x7e, 0x2c, 0x91, 0x02, 0x24, 0x11, 0xf3, + 0x37, 0x0d, 0x5c, 0xbd, 0x5c, 0xc5, 0xde, 0x05, 0x65, 0x1c, 0xb8, 0xf2, 0xd8, 0xc3, 0x5a, 0xcd, + 0x8a, 0xbc, 0x6e, 0x3e, 0xd9, 0x89, 0x85, 0x28, 0xc1, 0x05, 0x79, 0x18, 0x8c, 0x68, 0xe9, 0x11, + 0x79, 0xe8, 0x92, 0xa1, 0x04, 0x87, 0xeb, 0x60, 0x76, 0xf8, 0x23, 0x8b, 0x64, 0x14, 0xa4, 0xc2, + 0xb5, 0x68, 0x50, 0x9b, 0x45, 0x69, 0x00, 0x8d, 0xf3, 0xcc, 0x5f, 0x74, 0xb0, 0xb4, 0x4b, 0xba, + 0x07, 0xaf, 0xe6, 0x2e, 0x78, 0x3a, 0x76, 0x17, 0x6c, 0x4c, 0x9f, 0xd8, 0xec, 0x28, 0x5f, 0xd9, + 0x7d, 0xf0, 0x83, 0x0e, 0x6e, 0x4e, 0x89, 0x09, 0x1e, 0x01, 0x48, 0xcf, 0x8d, 0x97, 0xca, 0xa3, + 0x3d, 0x35, 0x96, 0xf3, 0x53, 0xd9, 0xb8, 0x1e, 0x0d, 0x6a, 0x19, 0xd3, 0x8a, 0x32, 0x5c, 0xc0, + 0x6f, 0x35, 0xb0, 0xe8, 0x65, 0xdd, 0x54, 0x2a, 0xcd, 0xf5, 0xa9, 0xce, 0x33, 0xef, 0xb8, 0xc6, + 0x8d, 0x68, 0x50, 0xcb, 0xbe, 0xfe, 0x50, 0xb6, 0x2f, 0xf1, 0xca, 0x5c, 0x4f, 0xa5, 0x47, 0x0c, + 0xc8, 0xff, 0xd7, 0x57, 0x9f, 0x8c, 0xf5, 0xd5, 0xfa, 0x45, 0xfb, 0x2a, 0x15, 0xe4, 0xc4, 0xb6, + 0xfa, 0xec, 0x4c, 0x5b, 0xdd, 0xbd, 0x48, 0x5b, 0xa5, 0x0d, 0x4f, 0xef, 0xaa, 0x47, 0x60, 0x79, + 0x72, 0x40, 0x97, 0xbe, 0x9c, 0xcd, 0x9f, 0x74, 0xb0, 0xf0, 0xfa, 0x99, 0xbf, 0xcc, 0x58, 0xff, + 0x5e, 0x00, 0x4b, 0xaf, 0x47, 0x7a, 0xd2, 0xa2, 0x13, 0x32, 0x42, 0xd5, 0x33, 0x3e, 0x2a, 0xce, + 0x1e, 0x23, 0x14, 0x49, 0x04, 0x9a, 0x60, 0xa6, 0x1d, 0xbf, 0x6e, 0xf1, 0xfb, 0x03, 0x44, 0x82, + 0xd5, 0xd3, 0xa6, 0x10, 0xd8, 0x02, 0x45, 0x22, 0xf6, 0x56, 0xa3, 0xb8, 0x92, 0x5f, 0xad, 0xd4, + 0x3f, 0xfc, 0x2f, 0x9d, 0x61, 0xc9, 0xcd, 0x77, 0xdb, 0xe3, 0xf4, 0x38, 0x59, 0x27, 0xa4, 0x0c, + 0xc5, 0xc6, 0xe1, 0x9b, 0x20, 0x1f, 0xba, 0x2d, 0xf5, 0xda, 0x57, 0x14, 0x25, 0xbf, 0xb7, 0xb3, + 0x85, 0x84, 0x7c, 0x19, 0xab, 0xe5, 0x59, 0x9a, 0x80, 0xf3, 0x20, 0x7f, 0x48, 0x8e, 0xe3, 0x81, + 0x42, 0xe2, 0x13, 0x3e, 0x00, 0xc5, 0xbe, 0xd8, 0xab, 0x55, 0x7e, 0xdf, 0x99, 0x1a, 0x64, 0xb2, + 0x86, 0xa3, 0x58, 0xeb, 0xbe, 0xbe, 0xa1, 0x99, 0x7f, 0x68, 0xe0, 0xc6, 0xc4, 0xf6, 0x13, 0xeb, + 0x0e, 0xee, 0x76, 0xfd, 0x23, 0xd2, 0x92, 0x6e, 0x4b, 0xc9, 0xba, 0xb3, 0x19, 0x8b, 0xd1, 0x10, + 0x87, 0x6f, 0x83, 0x19, 0x4a, 0x30, 0xf3, 0x3d, 0xb5, 0x62, 0x8d, 0x3a, 0x17, 0x49, 0x29, 0x52, + 0x28, 0xdc, 0x04, 0x73, 0x44, 0xb8, 0x97, 0x71, 0x6d, 0x53, 0xea, 0x0f, 0x2b, 0xb5, 0xa4, 0x14, + 0xe6, 0xb6, 0xc7, 0x61, 0x74, 0x96, 0x2f, 0x5c, 0xb5, 0x88, 0xe7, 0x92, 0x96, 0xdc, 0xc1, 0x4a, + 0x89, 0xab, 0x2d, 0x29, 0x45, 0x0a, 0x35, 0xff, 0xd1, 0x81, 0x31, 0xe9, 0x6a, 0x83, 0x07, 0xc9, + 0x2e, 0x22, 0x41, 0xb9, 0x0e, 0x55, 0xea, 0xb7, 0x2e, 0x34, 0x20, 0x42, 0xa3, 0xb1, 0xa8, 0xdc, + 0xce, 0xa6, 0xa5, 0xa9, 0xd5, 0x45, 0xfe, 0x42, 0x0a, 0xe6, 0xbd, 0xf1, 0x9d, 0x39, 0x5e, 0xaa, + 0x2a, 0xf5, 0xdb, 0x17, 0x1d, 0x07, 0xe9, 0xcd, 0x50, 0xde, 0xe6, 0xcf, 0x00, 0x0c, 0x9d, 0xb3, + 0x0f, 0xeb, 0x00, 0xb8, 0x9e, 0xe3, 0xf7, 0x82, 0x2e, 0xe1, 0x44, 0xa6, 0xb7, 0x94, 0xdc, 0x83, + 0x3b, 0x23, 0x04, 0xa5, 0x58, 0x59, 0x75, 0x29, 0x5c, 0xae, 0x2e, 0x8d, 0xd5, 0x93, 0xd3, 0x6a, + 0xee, 0xf9, 0x69, 0x35, 0xf7, 0xe2, 0xb4, 0x9a, 0x7b, 0x16, 0x55, 0xb5, 0x93, 0xa8, 0xaa, 0x3d, + 0x8f, 0xaa, 0xda, 0x8b, 0xa8, 0xaa, 0xfd, 0x15, 0x55, 0xb5, 0xef, 0xfe, 0xae, 0xe6, 0x9e, 0xea, + 0xfd, 0xb5, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x0e, 0xab, 0x82, 0x7c, 0x0f, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/authorization/v1/generated.proto b/vendor/k8s.io/api/authorization/v1/generated.proto index 7f31d599a81b..28c8d660365c 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1/generated.proto @@ -121,7 +121,8 @@ message ResourceRule { // +optional repeated string apiGroups = 2; - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. "*" means all. + // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. + // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. // +optional repeated string resources = 3; @@ -225,9 +226,16 @@ message SubjectAccessReviewSpec { // SubjectAccessReviewStatus message SubjectAccessReviewStatus { - // Allowed is required. True if the action would be allowed, false otherwise. + // Allowed is required. True if the action would be allowed, false otherwise. optional bool allowed = 1; + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + optional bool denied = 4; + // Reason is optional. It indicates why a request was allowed or denied. // +optional optional string reason = 2; diff --git a/vendor/k8s.io/api/authorization/v1/register.go b/vendor/k8s.io/api/authorization/v1/register.go index d716eaa98da0..59311981e12b 100644 --- a/vendor/k8s.io/api/authorization/v1/register.go +++ b/vendor/k8s.io/api/authorization/v1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &SelfSubjectRulesReview{}, diff --git a/vendor/k8s.io/api/authorization/v1/types.go b/vendor/k8s.io/api/authorization/v1/types.go index 99ec3bcbf7f4..86b05c54e37b 100644 --- a/vendor/k8s.io/api/authorization/v1/types.go +++ b/vendor/k8s.io/api/authorization/v1/types.go @@ -169,8 +169,14 @@ type SelfSubjectAccessReviewSpec struct { // SubjectAccessReviewStatus type SubjectAccessReviewStatus struct { - // Allowed is required. True if the action would be allowed, false otherwise. + // Allowed is required. True if the action would be allowed, false otherwise. Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + Denied bool `json:"denied,omitempty" protobuf:"varint,4,opt,name=denied"` // Reason is optional. It indicates why a request was allowed or denied. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` @@ -241,7 +247,8 @@ type ResourceRule struct { // the enumerated resources in any API group will be allowed. "*" means all. // +optional APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. "*" means all. + // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. + // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. // +optional Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. diff --git a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go index 8a0fb8a857b8..85503660c58b 100644 --- a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go @@ -76,7 +76,7 @@ var map_ResourceRule = map[string]string{ "": "ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", "verbs": "Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"*\" means all.", - "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources. \"*\" means all.", + "resources": "Resources is a list of resources this rule applies to. \"*\" means all in the specified apiGroups.\n \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \"*\" means all.", } @@ -148,7 +148,8 @@ func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { var map_SubjectAccessReviewStatus = map[string]string{ "": "SubjectAccessReviewStatus", - "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "denied": "Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.", "reason": "Reason is optional. It indicates why a request was allowed or denied.", "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.", } diff --git a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go index 916974ffcf85..a415b2b1d622 100644 --- a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go @@ -21,76 +21,9 @@ limitations under the License. package v1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalSubjectAccessReview).DeepCopyInto(out.(*LocalSubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&LocalSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NonResourceAttributes).DeepCopyInto(out.(*NonResourceAttributes)) - return nil - }, InType: reflect.TypeOf(&NonResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NonResourceRule).DeepCopyInto(out.(*NonResourceRule)) - return nil - }, InType: reflect.TypeOf(&NonResourceRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceAttributes).DeepCopyInto(out.(*ResourceAttributes)) - return nil - }, InType: reflect.TypeOf(&ResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceRule).DeepCopyInto(out.(*ResourceRule)) - return nil - }, InType: reflect.TypeOf(&ResourceRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectAccessReview).DeepCopyInto(out.(*SelfSubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectAccessReviewSpec).DeepCopyInto(out.(*SelfSubjectAccessReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectRulesReview).DeepCopyInto(out.(*SelfSubjectRulesReview)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectRulesReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectRulesReviewSpec).DeepCopyInto(out.(*SelfSubjectRulesReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectRulesReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReview).DeepCopyInto(out.(*SubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReviewSpec).DeepCopyInto(out.(*SubjectAccessReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReviewStatus).DeepCopyInto(out.(*SubjectAccessReviewStatus)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReviewStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectRulesReviewStatus).DeepCopyInto(out.(*SubjectRulesReviewStatus)) - return nil - }, InType: reflect.TypeOf(&SubjectRulesReviewStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { *out = *in diff --git a/vendor/k8s.io/api/authorization/v1beta1/BUILD b/vendor/k8s.io/api/authorization/v1beta1/BUILD index 745ff2609dae..06c953f2a908 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/BUILD +++ b/vendor/k8s.io/api/authorization/v1beta1/BUILD @@ -15,11 +15,11 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/authorization/v1beta1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/api/authorization/v1beta1/doc.go b/vendor/k8s.io/api/authorization/v1beta1/doc.go index 48f1289d51ed..e609a445e396 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/doc.go +++ b/vendor/k8s.io/api/authorization/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=authorization.k8s.io diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go index e1f49d464678..9b1ad299e2ab 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -730,6 +730,14 @@ func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) i += copy(dAtA[i:], m.EvaluationError) + dAtA[i] = 0x20 + i++ + if m.Denied { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ return i, nil } @@ -1015,6 +1023,7 @@ func (m *SubjectAccessReviewStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.EvaluationError) n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } @@ -1205,6 +1214,7 @@ func (this *SubjectAccessReviewStatus) String() string { `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `Denied:` + fmt.Sprintf("%v", this.Denied) + `,`, `}`, }, "") return s @@ -3130,6 +3140,26 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } m.EvaluationError = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Denied", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Denied = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3422,77 +3452,78 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1139 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcf, 0x6f, 0x1b, 0x45, - 0x14, 0xf6, 0xfa, 0x47, 0x62, 0x8f, 0x1b, 0x92, 0x4e, 0x94, 0x66, 0x1b, 0x84, 0x6d, 0x19, 0x09, - 0x05, 0xd1, 0xee, 0x92, 0x50, 0x48, 0x09, 0xf4, 0x10, 0xab, 0x11, 0x8a, 0xd4, 0x96, 0x6a, 0xa2, - 0xe4, 0x40, 0x25, 0x60, 0x76, 0x33, 0xb1, 0x17, 0xdb, 0xbb, 0xcb, 0xcc, 0xac, 0x43, 0x10, 0x87, - 0x1e, 0x39, 0x72, 0xe4, 0xc8, 0x89, 0x3b, 0x47, 0x2e, 0x48, 0x70, 0xca, 0xb1, 0xc7, 0x1c, 0x90, - 0x45, 0x96, 0x3f, 0x82, 0x2b, 0x9a, 0xd9, 0xb1, 0x77, 0x1d, 0xaf, 0xe3, 0x24, 0x87, 0xf6, 0xd2, - 0xdb, 0xce, 0xfb, 0xde, 0xf7, 0xde, 0x9b, 0x37, 0xef, 0xbd, 0x7d, 0x60, 0xa7, 0x7d, 0x9f, 0x19, - 0x8e, 0x67, 0xb6, 0x03, 0x8b, 0x50, 0x97, 0x70, 0xc2, 0xcc, 0x1e, 0x71, 0x0f, 0x3c, 0x6a, 0x2a, - 0x00, 0xfb, 0x8e, 0x89, 0x03, 0xde, 0xf2, 0xa8, 0xf3, 0x3d, 0xe6, 0x8e, 0xe7, 0x9a, 0xbd, 0x35, - 0x8b, 0x70, 0xbc, 0x66, 0x36, 0x89, 0x4b, 0x28, 0xe6, 0xe4, 0xc0, 0xf0, 0xa9, 0xc7, 0x3d, 0x58, - 0x8b, 0x18, 0x06, 0xf6, 0x1d, 0x63, 0x84, 0x61, 0x28, 0xc6, 0xca, 0xdd, 0xa6, 0xc3, 0x5b, 0x81, - 0x65, 0xd8, 0x5e, 0xd7, 0x6c, 0x7a, 0x4d, 0xcf, 0x94, 0x44, 0x2b, 0x38, 0x94, 0x27, 0x79, 0x90, - 0x5f, 0x91, 0xc1, 0x95, 0x7b, 0x71, 0x08, 0x5d, 0x6c, 0xb7, 0x1c, 0x97, 0xd0, 0x63, 0xd3, 0x6f, - 0x37, 0x85, 0x80, 0x99, 0x5d, 0xc2, 0xb1, 0xd9, 0x1b, 0x0b, 0x63, 0xc5, 0x9c, 0xc4, 0xa2, 0x81, - 0xcb, 0x9d, 0x2e, 0x19, 0x23, 0x7c, 0x34, 0x8d, 0xc0, 0xec, 0x16, 0xe9, 0xe2, 0x31, 0xde, 0x07, - 0x93, 0x78, 0x01, 0x77, 0x3a, 0xa6, 0xe3, 0x72, 0xc6, 0xe9, 0x79, 0x52, 0x7d, 0x03, 0x80, 0xed, - 0xef, 0x38, 0xc5, 0xfb, 0xb8, 0x13, 0x10, 0x58, 0x05, 0x05, 0x87, 0x93, 0x2e, 0xd3, 0xb5, 0x5a, - 0x6e, 0xb5, 0xd4, 0x28, 0x85, 0xfd, 0x6a, 0x61, 0x47, 0x08, 0x50, 0x24, 0xdf, 0x2c, 0xfe, 0xfc, - 0x4b, 0x35, 0xf3, 0xfc, 0xef, 0x5a, 0xa6, 0xfe, 0x47, 0x16, 0xe8, 0x8f, 0x3c, 0x1b, 0x77, 0x76, - 0x03, 0xeb, 0x1b, 0x62, 0xf3, 0x2d, 0xdb, 0x26, 0x8c, 0x21, 0xd2, 0x73, 0xc8, 0x11, 0xfc, 0x1a, - 0x14, 0x45, 0x3a, 0x0e, 0x30, 0xc7, 0xba, 0x56, 0xd3, 0x56, 0xcb, 0xeb, 0xef, 0x1b, 0xf1, 0x6b, - 0x0c, 0xa3, 0x33, 0xfc, 0x76, 0x53, 0x08, 0x98, 0x21, 0xb4, 0x8d, 0xde, 0x9a, 0xf1, 0xb9, 0xb4, - 0xf5, 0x98, 0x70, 0xdc, 0x80, 0x27, 0xfd, 0x6a, 0x26, 0xec, 0x57, 0x41, 0x2c, 0x43, 0x43, 0xab, - 0xf0, 0x19, 0xc8, 0x33, 0x9f, 0xd8, 0x7a, 0x56, 0x5a, 0xff, 0xd8, 0x98, 0xf6, 0xd6, 0x46, 0x4a, - 0x98, 0xbb, 0x3e, 0xb1, 0x1b, 0x37, 0x94, 0x9b, 0xbc, 0x38, 0x21, 0x69, 0x14, 0xda, 0x60, 0x86, - 0x71, 0xcc, 0x03, 0xa6, 0xe7, 0xa4, 0xf9, 0x4f, 0xae, 0x67, 0x5e, 0x9a, 0x68, 0xbc, 0xa1, 0x1c, - 0xcc, 0x44, 0x67, 0xa4, 0x4c, 0xd7, 0x9f, 0x81, 0xa5, 0x27, 0x9e, 0x8b, 0x08, 0xf3, 0x02, 0x6a, - 0x93, 0x2d, 0xce, 0xa9, 0x63, 0x05, 0x9c, 0x30, 0x58, 0x03, 0x79, 0x1f, 0xf3, 0x96, 0x4c, 0x5c, - 0x29, 0x8e, 0xef, 0x29, 0xe6, 0x2d, 0x24, 0x11, 0xa1, 0xd1, 0x23, 0xd4, 0x92, 0x97, 0x4f, 0x68, - 0xec, 0x13, 0x6a, 0x21, 0x89, 0xd4, 0xbf, 0x05, 0xf3, 0x09, 0xe3, 0x28, 0xe8, 0xc8, 0xb7, 0x15, - 0xd0, 0xc8, 0xdb, 0x0a, 0x06, 0x43, 0x91, 0x1c, 0x3e, 0x00, 0xf3, 0x6e, 0xcc, 0xd9, 0x43, 0x8f, - 0x98, 0x9e, 0x95, 0xaa, 0x8b, 0x61, 0xbf, 0x9a, 0x34, 0x27, 0x20, 0x74, 0x5e, 0x57, 0x14, 0x04, - 0x4c, 0xb9, 0x8d, 0x09, 0x4a, 0x2e, 0xee, 0x12, 0xe6, 0x63, 0x9b, 0xa8, 0x2b, 0xdd, 0x54, 0x01, - 0x97, 0x9e, 0x0c, 0x00, 0x14, 0xeb, 0x4c, 0xbf, 0x1c, 0x7c, 0x1b, 0x14, 0x9a, 0xd4, 0x0b, 0x7c, - 0xf9, 0x3a, 0xa5, 0xc6, 0x9c, 0x52, 0x29, 0x7c, 0x26, 0x84, 0x28, 0xc2, 0xe0, 0xbb, 0x60, 0xb6, - 0x47, 0x28, 0x73, 0x3c, 0x57, 0xcf, 0x4b, 0xb5, 0x79, 0xa5, 0x36, 0xbb, 0x1f, 0x89, 0xd1, 0x00, - 0x87, 0x77, 0x40, 0x91, 0xaa, 0xc0, 0xf5, 0x82, 0xd4, 0x5d, 0x50, 0xba, 0xc5, 0x61, 0x06, 0x87, - 0x1a, 0xf0, 0x43, 0x50, 0x66, 0x81, 0x35, 0x24, 0xcc, 0x48, 0xc2, 0xa2, 0x22, 0x94, 0x77, 0x63, - 0x08, 0x25, 0xf5, 0xc4, 0xb5, 0xc4, 0x1d, 0xf5, 0xd9, 0xd1, 0x6b, 0x89, 0x14, 0x20, 0x89, 0xd4, - 0xff, 0xd2, 0xc0, 0x8d, 0xab, 0xbd, 0xd8, 0x7b, 0xa0, 0x84, 0x7d, 0x47, 0x5e, 0x7b, 0xf0, 0x56, - 0x73, 0x22, 0xaf, 0x5b, 0x4f, 0x77, 0x22, 0x21, 0x8a, 0x71, 0xa1, 0x3c, 0x08, 0x46, 0xd4, 0xf5, - 0x50, 0x79, 0xe0, 0x92, 0xa1, 0x18, 0x87, 0x1b, 0x60, 0x6e, 0x70, 0x90, 0x8f, 0xa4, 0xe7, 0x25, - 0xe1, 0x66, 0xd8, 0xaf, 0xce, 0xa1, 0x24, 0x80, 0x46, 0xf5, 0xea, 0x7f, 0x66, 0xc1, 0xf2, 0x2e, - 0xe9, 0x1c, 0xbe, 0x9a, 0xa9, 0xf0, 0xd5, 0xc8, 0x54, 0x78, 0x70, 0x89, 0xb6, 0x4d, 0x0f, 0xf5, - 0xd5, 0x4e, 0x86, 0x5f, 0xb3, 0xe0, 0xcd, 0x0b, 0x02, 0x83, 0x3f, 0x00, 0x48, 0xc7, 0x1a, 0x4d, - 0x65, 0xf4, 0xde, 0xf4, 0x80, 0xc6, 0x9b, 0xb4, 0x71, 0x2b, 0xec, 0x57, 0x53, 0x9a, 0x17, 0xa5, - 0xf8, 0x81, 0x3f, 0x6a, 0x60, 0xc9, 0x4d, 0x1b, 0x5c, 0x2a, 0xeb, 0x1b, 0xd3, 0x23, 0x48, 0x9d, - 0x7b, 0x8d, 0xdb, 0x61, 0xbf, 0x9a, 0x3e, 0x12, 0x51, 0xba, 0x43, 0x31, 0x72, 0x6e, 0x25, 0x12, - 0x25, 0x9a, 0xe6, 0xe5, 0xd5, 0xda, 0x97, 0x23, 0xb5, 0xf6, 0xe9, 0x95, 0x6a, 0x2d, 0x11, 0xe9, - 0xc4, 0x52, 0xb3, 0xce, 0x95, 0xda, 0xe6, 0xa5, 0x4b, 0x2d, 0x69, 0xfd, 0xe2, 0x4a, 0x7b, 0x0c, - 0x56, 0x26, 0x47, 0x75, 0xe5, 0xd1, 0x5d, 0xff, 0x3d, 0x0b, 0x16, 0x5f, 0xaf, 0x03, 0xd7, 0x6b, - 0xfa, 0xd3, 0x3c, 0x58, 0x7e, 0xdd, 0xf0, 0x17, 0x37, 0xbc, 0xf8, 0x89, 0x06, 0x8c, 0x50, 0xf5, - 0xe3, 0x1f, 0xbe, 0xd5, 0x1e, 0x23, 0x14, 0x49, 0x04, 0xd6, 0x06, 0xbb, 0x41, 0xf4, 0xc3, 0x02, - 0x22, 0xd3, 0xea, 0x5f, 0xa8, 0x16, 0x03, 0x07, 0x14, 0x88, 0xd8, 0x78, 0xf5, 0x42, 0x2d, 0xb7, - 0x5a, 0x5e, 0x7f, 0x78, 0xed, 0x5a, 0x31, 0xe4, 0xe2, 0xbc, 0xed, 0x72, 0x7a, 0x1c, 0xef, 0x20, - 0x52, 0x86, 0x22, 0x0f, 0xf0, 0x2d, 0x90, 0x0b, 0x9c, 0x03, 0xb5, 0x22, 0x94, 0x95, 0x4a, 0x6e, - 0x6f, 0xe7, 0x21, 0x12, 0xf2, 0x95, 0x43, 0xb5, 0x7b, 0x4b, 0x13, 0x70, 0x01, 0xe4, 0xda, 0xe4, - 0x38, 0xea, 0x33, 0x24, 0x3e, 0x61, 0x03, 0x14, 0x7a, 0x62, 0x2d, 0x57, 0x79, 0xbe, 0x33, 0x3d, - 0xd2, 0x78, 0x95, 0x47, 0x11, 0x75, 0x33, 0x7b, 0x5f, 0xab, 0xff, 0xa6, 0x81, 0xdb, 0x13, 0x0b, - 0x52, 0x2c, 0x4a, 0xb8, 0xd3, 0xf1, 0x8e, 0xc8, 0x81, 0xf4, 0x5d, 0x8c, 0x17, 0xa5, 0xad, 0x48, - 0x8c, 0x06, 0x38, 0x7c, 0x07, 0xcc, 0x50, 0x82, 0x99, 0xe7, 0xaa, 0xe5, 0x6c, 0x58, 0xcb, 0x48, - 0x4a, 0x91, 0x42, 0xe1, 0x16, 0x98, 0x27, 0xc2, 0xbd, 0x0c, 0x6e, 0x9b, 0x52, 0x6f, 0xf0, 0x62, - 0xcb, 0x8a, 0x30, 0xbf, 0x3d, 0x0a, 0xa3, 0xf3, 0xfa, 0xf5, 0xff, 0xb2, 0x40, 0x9f, 0x34, 0xce, - 0x60, 0x3b, 0xde, 0x4e, 0x24, 0x28, 0x17, 0xa4, 0xf2, 0xba, 0x71, 0xf9, 0x56, 0x10, 0xb4, 0xc6, - 0x92, 0x8a, 0x66, 0x2e, 0x29, 0x4d, 0x6c, 0x34, 0xf2, 0x08, 0x8f, 0xc0, 0x82, 0x3b, 0xba, 0x4a, - 0x47, 0xbb, 0x56, 0x79, 0x7d, 0xed, 0x4a, 0x85, 0x2f, 0x5d, 0xea, 0xca, 0xe5, 0xc2, 0x39, 0x80, - 0xa1, 0x31, 0x27, 0x70, 0x1d, 0x00, 0xc7, 0xb5, 0xbd, 0xae, 0xdf, 0x21, 0x9c, 0xc8, 0x04, 0x16, - 0xe3, 0x29, 0xb8, 0x33, 0x44, 0x50, 0x42, 0x2b, 0x2d, 0xf3, 0xf9, 0xab, 0x65, 0xbe, 0x71, 0xf7, - 0xe4, 0xac, 0x92, 0x79, 0x71, 0x56, 0xc9, 0x9c, 0x9e, 0x55, 0x32, 0xcf, 0xc3, 0x8a, 0x76, 0x12, - 0x56, 0xb4, 0x17, 0x61, 0x45, 0x3b, 0x0d, 0x2b, 0xda, 0x3f, 0x61, 0x45, 0xfb, 0xe9, 0xdf, 0x4a, - 0xe6, 0x8b, 0x59, 0x75, 0xc3, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x29, 0xa9, 0x9d, 0x7c, 0xb1, - 0x0f, 0x00, 0x00, + // 1154 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4d, 0x6f, 0x1b, 0xc5, + 0x1b, 0xf7, 0xfa, 0x25, 0xb1, 0xc7, 0xcd, 0x3f, 0xe9, 0x44, 0x69, 0xb6, 0xf9, 0x0b, 0xdb, 0x32, + 0x12, 0x0a, 0xa2, 0xdd, 0x25, 0xa1, 0x90, 0x12, 0xe8, 0x21, 0x56, 0x22, 0x14, 0xa9, 0x2d, 0xd5, + 0x44, 0xc9, 0x81, 0x4a, 0xc0, 0x78, 0x3d, 0xb1, 0x17, 0xdb, 0xbb, 0xcb, 0xcc, 0xac, 0x43, 0x10, + 0x87, 0x1e, 0x39, 0x72, 0xe4, 0xc8, 0x89, 0xef, 0xc0, 0x05, 0x09, 0x4e, 0x39, 0xf6, 0x18, 0x24, + 0x64, 0x91, 0xe5, 0x43, 0x70, 0x45, 0x33, 0x3b, 0xf6, 0xae, 0xe3, 0x75, 0x1c, 0xe7, 0x40, 0x2f, + 0xbd, 0xed, 0x3c, 0xbf, 0xe7, 0x6d, 0x9e, 0x97, 0xd9, 0x1f, 0xd8, 0x6f, 0x3f, 0x64, 0x86, 0xed, + 0x9a, 0x6d, 0xbf, 0x4e, 0xa8, 0x43, 0x38, 0x61, 0x66, 0x8f, 0x38, 0x0d, 0x97, 0x9a, 0x0a, 0xc0, + 0x9e, 0x6d, 0x62, 0x9f, 0xb7, 0x5c, 0x6a, 0x7f, 0x8b, 0xb9, 0xed, 0x3a, 0x66, 0x6f, 0xa3, 0x4e, + 0x38, 0xde, 0x30, 0x9b, 0xc4, 0x21, 0x14, 0x73, 0xd2, 0x30, 0x3c, 0xea, 0x72, 0x17, 0x56, 0x42, + 0x0b, 0x03, 0x7b, 0xb6, 0x31, 0x62, 0x61, 0x28, 0x8b, 0xb5, 0xfb, 0x4d, 0x9b, 0xb7, 0xfc, 0xba, + 0x61, 0xb9, 0x5d, 0xb3, 0xe9, 0x36, 0x5d, 0x53, 0x1a, 0xd6, 0xfd, 0x63, 0x79, 0x92, 0x07, 0xf9, + 0x15, 0x3a, 0x5c, 0x7b, 0x10, 0xa5, 0xd0, 0xc5, 0x56, 0xcb, 0x76, 0x08, 0x3d, 0x35, 0xbd, 0x76, + 0x53, 0x08, 0x98, 0xd9, 0x25, 0x1c, 0x9b, 0xbd, 0xb1, 0x34, 0xd6, 0xcc, 0x49, 0x56, 0xd4, 0x77, + 0xb8, 0xdd, 0x25, 0x63, 0x06, 0x1f, 0x4c, 0x33, 0x60, 0x56, 0x8b, 0x74, 0xf1, 0x98, 0xdd, 0x7b, + 0x93, 0xec, 0x7c, 0x6e, 0x77, 0x4c, 0xdb, 0xe1, 0x8c, 0xd3, 0xcb, 0x46, 0xd5, 0x2d, 0x00, 0xf6, + 0xbe, 0xe1, 0x14, 0x1f, 0xe1, 0x8e, 0x4f, 0x60, 0x19, 0xe4, 0x6c, 0x4e, 0xba, 0x4c, 0xd7, 0x2a, + 0x99, 0xf5, 0x42, 0xad, 0x10, 0xf4, 0xcb, 0xb9, 0x7d, 0x21, 0x40, 0xa1, 0x7c, 0x3b, 0xff, 0xe3, + 0x4f, 0xe5, 0xd4, 0x8b, 0x3f, 0x2b, 0xa9, 0xea, 0xaf, 0x69, 0xa0, 0x3f, 0x76, 0x2d, 0xdc, 0x39, + 0xf0, 0xeb, 0x5f, 0x11, 0x8b, 0xef, 0x58, 0x16, 0x61, 0x0c, 0x91, 0x9e, 0x4d, 0x4e, 0xe0, 0x97, + 0x20, 0x2f, 0xca, 0xd1, 0xc0, 0x1c, 0xeb, 0x5a, 0x45, 0x5b, 0x2f, 0x6e, 0xbe, 0x6b, 0x44, 0xdd, + 0x18, 0x66, 0x67, 0x78, 0xed, 0xa6, 0x10, 0x30, 0x43, 0x68, 0x1b, 0xbd, 0x0d, 0xe3, 0x53, 0xe9, + 0xeb, 0x09, 0xe1, 0xb8, 0x06, 0xcf, 0xfa, 0xe5, 0x54, 0xd0, 0x2f, 0x83, 0x48, 0x86, 0x86, 0x5e, + 0xe1, 0x73, 0x90, 0x65, 0x1e, 0xb1, 0xf4, 0xb4, 0xf4, 0xfe, 0xa1, 0x31, 0xad, 0xd7, 0x46, 0x42, + 0x9a, 0x07, 0x1e, 0xb1, 0x6a, 0xb7, 0x54, 0x98, 0xac, 0x38, 0x21, 0xe9, 0x14, 0x5a, 0x60, 0x8e, + 0x71, 0xcc, 0x7d, 0xa6, 0x67, 0xa4, 0xfb, 0x8f, 0x6e, 0xe6, 0x5e, 0xba, 0xa8, 0xfd, 0x4f, 0x05, + 0x98, 0x0b, 0xcf, 0x48, 0xb9, 0xae, 0x3e, 0x07, 0x2b, 0x4f, 0x5d, 0x07, 0x11, 0xe6, 0xfa, 0xd4, + 0x22, 0x3b, 0x9c, 0x53, 0xbb, 0xee, 0x73, 0xc2, 0x60, 0x05, 0x64, 0x3d, 0xcc, 0x5b, 0xb2, 0x70, + 0x85, 0x28, 0xbf, 0x67, 0x98, 0xb7, 0x90, 0x44, 0x84, 0x46, 0x8f, 0xd0, 0xba, 0xbc, 0x7c, 0x4c, + 0xe3, 0x88, 0xd0, 0x3a, 0x92, 0x48, 0xf5, 0x6b, 0xb0, 0x18, 0x73, 0x8e, 0xfc, 0x8e, 0xec, 0xad, + 0x80, 0x46, 0x7a, 0x2b, 0x2c, 0x18, 0x0a, 0xe5, 0xf0, 0x11, 0x58, 0x74, 0x22, 0x9b, 0x43, 0xf4, + 0x98, 0xe9, 0x69, 0xa9, 0xba, 0x1c, 0xf4, 0xcb, 0x71, 0x77, 0x02, 0x42, 0x97, 0x75, 0xc5, 0x40, + 0xc0, 0x84, 0xdb, 0x98, 0xa0, 0xe0, 0xe0, 0x2e, 0x61, 0x1e, 0xb6, 0x88, 0xba, 0xd2, 0x6d, 0x95, + 0x70, 0xe1, 0xe9, 0x00, 0x40, 0x91, 0xce, 0xf4, 0xcb, 0xc1, 0x37, 0x41, 0xae, 0x49, 0x5d, 0xdf, + 0x93, 0xdd, 0x29, 0xd4, 0x16, 0x94, 0x4a, 0xee, 0x13, 0x21, 0x44, 0x21, 0x06, 0xdf, 0x06, 0xf3, + 0x3d, 0x42, 0x99, 0xed, 0x3a, 0x7a, 0x56, 0xaa, 0x2d, 0x2a, 0xb5, 0xf9, 0xa3, 0x50, 0x8c, 0x06, + 0x38, 0xbc, 0x07, 0xf2, 0x54, 0x25, 0xae, 0xe7, 0xa4, 0xee, 0x92, 0xd2, 0xcd, 0x0f, 0x2b, 0x38, + 0xd4, 0x80, 0xef, 0x83, 0x22, 0xf3, 0xeb, 0x43, 0x83, 0x39, 0x69, 0xb0, 0xac, 0x0c, 0x8a, 0x07, + 0x11, 0x84, 0xe2, 0x7a, 0xe2, 0x5a, 0xe2, 0x8e, 0xfa, 0xfc, 0xe8, 0xb5, 0x44, 0x09, 0x90, 0x44, + 0xaa, 0xbf, 0x6b, 0xe0, 0xd6, 0x6c, 0x1d, 0x7b, 0x07, 0x14, 0xb0, 0x67, 0xcb, 0x6b, 0x0f, 0x7a, + 0xb5, 0x20, 0xea, 0xba, 0xf3, 0x6c, 0x3f, 0x14, 0xa2, 0x08, 0x17, 0xca, 0x83, 0x64, 0xc4, 0x5c, + 0x0f, 0x95, 0x07, 0x21, 0x19, 0x8a, 0x70, 0xb8, 0x05, 0x16, 0x06, 0x07, 0xd9, 0x24, 0x3d, 0x2b, + 0x0d, 0x6e, 0x07, 0xfd, 0xf2, 0x02, 0x8a, 0x03, 0x68, 0x54, 0xaf, 0xfa, 0x5b, 0x1a, 0xac, 0x1e, + 0x90, 0xce, 0xf1, 0xab, 0x79, 0x15, 0xbe, 0x18, 0x79, 0x15, 0x1e, 0x5d, 0x63, 0x6d, 0x93, 0x53, + 0x7d, 0xb5, 0x2f, 0xc3, 0xcf, 0x69, 0xf0, 0xff, 0x2b, 0x12, 0x83, 0xdf, 0x01, 0x48, 0xc7, 0x16, + 0x4d, 0x55, 0xf4, 0xc1, 0xf4, 0x84, 0xc6, 0x97, 0xb4, 0x76, 0x27, 0xe8, 0x97, 0x13, 0x96, 0x17, + 0x25, 0xc4, 0x81, 0xdf, 0x6b, 0x60, 0xc5, 0x49, 0x7a, 0xb8, 0x54, 0xd5, 0xb7, 0xa6, 0x67, 0x90, + 0xf8, 0xee, 0xd5, 0xee, 0x06, 0xfd, 0x72, 0xf2, 0x93, 0x88, 0x92, 0x03, 0x8a, 0x27, 0xe7, 0x4e, + 0xac, 0x50, 0x62, 0x69, 0xfe, 0xbb, 0x59, 0xfb, 0x7c, 0x64, 0xd6, 0x3e, 0x9e, 0x69, 0xd6, 0x62, + 0x99, 0x4e, 0x1c, 0xb5, 0xfa, 0xa5, 0x51, 0xdb, 0xbe, 0xf6, 0xa8, 0xc5, 0xbd, 0x5f, 0x3d, 0x69, + 0x4f, 0xc0, 0xda, 0xe4, 0xac, 0x66, 0x7e, 0xba, 0xab, 0xbf, 0xa4, 0xc1, 0xf2, 0x6b, 0x3a, 0x70, + 0xb3, 0xa5, 0x3f, 0xcf, 0x82, 0xd5, 0xd7, 0x0b, 0x7f, 0xf5, 0xc2, 0x8b, 0x9f, 0xa8, 0xcf, 0x08, + 0x55, 0x3f, 0xfe, 0x61, 0xaf, 0x0e, 0x19, 0xa1, 0x48, 0x22, 0xb0, 0x32, 0xe0, 0x06, 0xe1, 0x0f, + 0x0b, 0x88, 0x4a, 0xab, 0x7f, 0xa1, 0x22, 0x06, 0x36, 0xc8, 0x11, 0xc1, 0x78, 0xf5, 0x5c, 0x25, + 0xb3, 0x5e, 0xdc, 0xdc, 0xbd, 0xf1, 0xac, 0x18, 0x92, 0x38, 0xef, 0x39, 0x9c, 0x9e, 0x46, 0x1c, + 0x44, 0xca, 0x50, 0x18, 0x01, 0xbe, 0x01, 0x32, 0xbe, 0xdd, 0x50, 0x14, 0xa1, 0xa8, 0x54, 0x32, + 0x87, 0xfb, 0xbb, 0x48, 0xc8, 0xd7, 0x8e, 0x15, 0xf7, 0x96, 0x2e, 0xe0, 0x12, 0xc8, 0xb4, 0xc9, + 0x69, 0xb8, 0x67, 0x48, 0x7c, 0xc2, 0x1a, 0xc8, 0xf5, 0x04, 0x2d, 0x57, 0x75, 0xbe, 0x37, 0x3d, + 0xd3, 0x88, 0xca, 0xa3, 0xd0, 0x74, 0x3b, 0xfd, 0x50, 0xab, 0xfe, 0xa1, 0x81, 0xbb, 0x13, 0x07, + 0x52, 0x10, 0x25, 0xdc, 0xe9, 0xb8, 0x27, 0xa4, 0x21, 0x63, 0xe7, 0x23, 0xa2, 0xb4, 0x13, 0x8a, + 0xd1, 0x00, 0x87, 0x6f, 0x81, 0x39, 0x4a, 0x30, 0x73, 0x1d, 0x45, 0xce, 0x86, 0xb3, 0x8c, 0xa4, + 0x14, 0x29, 0x14, 0xee, 0x80, 0x45, 0x22, 0xc2, 0xcb, 0xe4, 0xf6, 0x28, 0x75, 0x07, 0x1d, 0x5b, + 0x55, 0x06, 0x8b, 0x7b, 0xa3, 0x30, 0xba, 0xac, 0x2f, 0x42, 0x35, 0x88, 0x63, 0x93, 0x86, 0x64, + 0x6f, 0xf9, 0x28, 0xd4, 0xae, 0x94, 0x22, 0x85, 0x56, 0xff, 0x49, 0x03, 0x7d, 0xd2, 0xb3, 0x07, + 0xdb, 0x11, 0x8b, 0x91, 0xa0, 0x24, 0x52, 0xc5, 0x4d, 0xe3, 0xfa, 0x2b, 0x23, 0xcc, 0x6a, 0x2b, + 0x2a, 0xf6, 0x42, 0x5c, 0x1a, 0x63, 0x3e, 0xf2, 0x08, 0x4f, 0xc0, 0x92, 0x33, 0x4a, 0xb9, 0x43, + 0x4e, 0x56, 0xdc, 0xdc, 0x98, 0x69, 0x41, 0x64, 0x48, 0x5d, 0x85, 0x5c, 0xba, 0x04, 0x30, 0x34, + 0x16, 0x04, 0x6e, 0x02, 0x60, 0x3b, 0x96, 0xdb, 0xf5, 0x3a, 0x84, 0x13, 0x59, 0xe8, 0x7c, 0xf4, + 0x5a, 0xee, 0x0f, 0x11, 0x14, 0xd3, 0x4a, 0xea, 0x50, 0x76, 0xb6, 0x0e, 0xd5, 0xee, 0x9f, 0x5d, + 0x94, 0x52, 0x2f, 0x2f, 0x4a, 0xa9, 0xf3, 0x8b, 0x52, 0xea, 0x45, 0x50, 0xd2, 0xce, 0x82, 0x92, + 0xf6, 0x32, 0x28, 0x69, 0xe7, 0x41, 0x49, 0xfb, 0x2b, 0x28, 0x69, 0x3f, 0xfc, 0x5d, 0x4a, 0x7d, + 0x36, 0xaf, 0x6e, 0xf8, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcc, 0xb3, 0x5e, 0x05, 0xd9, 0x0f, + 0x00, 0x00, } diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.proto b/vendor/k8s.io/api/authorization/v1beta1/generated.proto index 9e9942f367a1..59df0b6d44fa 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.proto @@ -121,7 +121,8 @@ message ResourceRule { // +optional repeated string apiGroups = 2; - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. "*" means all. + // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. + // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. // +optional repeated string resources = 3; @@ -225,9 +226,16 @@ message SubjectAccessReviewSpec { // SubjectAccessReviewStatus message SubjectAccessReviewStatus { - // Allowed is required. True if the action would be allowed, false otherwise. + // Allowed is required. True if the action would be allowed, false otherwise. optional bool allowed = 1; + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + optional bool denied = 4; + // Reason is optional. It indicates why a request was allowed or denied. // +optional optional string reason = 2; diff --git a/vendor/k8s.io/api/authorization/v1beta1/register.go b/vendor/k8s.io/api/authorization/v1beta1/register.go index d8116d5a47ac..84255dd6c90f 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/register.go +++ b/vendor/k8s.io/api/authorization/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &SelfSubjectRulesReview{}, diff --git a/vendor/k8s.io/api/authorization/v1beta1/types.go b/vendor/k8s.io/api/authorization/v1beta1/types.go index a0659d519c04..618ff8c0f1e5 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types.go @@ -169,8 +169,14 @@ type SelfSubjectAccessReviewSpec struct { // SubjectAccessReviewStatus type SubjectAccessReviewStatus struct { - // Allowed is required. True if the action would be allowed, false otherwise. + // Allowed is required. True if the action would be allowed, false otherwise. Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + Denied bool `json:"denied,omitempty" protobuf:"varint,4,opt,name=denied"` // Reason is optional. It indicates why a request was allowed or denied. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` @@ -241,7 +247,8 @@ type ResourceRule struct { // the enumerated resources in any API group will be allowed. "*" means all. // +optional APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. "*" means all. + // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. + // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. // +optional Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. diff --git a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go index 1d8bb9849ba7..2371b21c676b 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go @@ -76,7 +76,7 @@ var map_ResourceRule = map[string]string{ "": "ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", "verbs": "Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"*\" means all.", - "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources. \"*\" means all.", + "resources": "Resources is a list of resources this rule applies to. \"*\" means all in the specified apiGroups.\n \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \"*\" means all.", } @@ -148,7 +148,8 @@ func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { var map_SubjectAccessReviewStatus = map[string]string{ "": "SubjectAccessReviewStatus", - "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "denied": "Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.", "reason": "Reason is optional. It indicates why a request was allowed or denied.", "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.", } diff --git a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go index aeb77ddbc861..fcb0067a3818 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go @@ -21,76 +21,9 @@ limitations under the License. package v1beta1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalSubjectAccessReview).DeepCopyInto(out.(*LocalSubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&LocalSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NonResourceAttributes).DeepCopyInto(out.(*NonResourceAttributes)) - return nil - }, InType: reflect.TypeOf(&NonResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NonResourceRule).DeepCopyInto(out.(*NonResourceRule)) - return nil - }, InType: reflect.TypeOf(&NonResourceRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceAttributes).DeepCopyInto(out.(*ResourceAttributes)) - return nil - }, InType: reflect.TypeOf(&ResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceRule).DeepCopyInto(out.(*ResourceRule)) - return nil - }, InType: reflect.TypeOf(&ResourceRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectAccessReview).DeepCopyInto(out.(*SelfSubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectAccessReviewSpec).DeepCopyInto(out.(*SelfSubjectAccessReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectRulesReview).DeepCopyInto(out.(*SelfSubjectRulesReview)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectRulesReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectRulesReviewSpec).DeepCopyInto(out.(*SelfSubjectRulesReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectRulesReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReview).DeepCopyInto(out.(*SubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReviewSpec).DeepCopyInto(out.(*SubjectAccessReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReviewStatus).DeepCopyInto(out.(*SubjectAccessReviewStatus)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReviewStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectRulesReviewStatus).DeepCopyInto(out.(*SubjectRulesReviewStatus)) - return nil - }, InType: reflect.TypeOf(&SubjectRulesReviewStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { *out = *in diff --git a/vendor/k8s.io/api/autoscaling/v1/BUILD b/vendor/k8s.io/api/autoscaling/v1/BUILD index 3eb3cb296c88..ccf587be09e5 100644 --- a/vendor/k8s.io/api/autoscaling/v1/BUILD +++ b/vendor/k8s.io/api/autoscaling/v1/BUILD @@ -15,12 +15,12 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/autoscaling/v1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/api/autoscaling/v1/doc.go b/vendor/k8s.io/api/autoscaling/v1/doc.go index d567450ef24b..70cb1607b67a 100644 --- a/vendor/k8s.io/api/autoscaling/v1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1 diff --git a/vendor/k8s.io/api/autoscaling/v1/register.go b/vendor/k8s.io/api/autoscaling/v1/register.go index 521c0e4a3dc6..8dfe361edf54 100644 --- a/vendor/k8s.io/api/autoscaling/v1/register.go +++ b/vendor/k8s.io/api/autoscaling/v1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &HorizontalPodAutoscaler{}, diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go index a18c3730c7e9..e726c140399a 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -264,7 +264,7 @@ var ( // ScalingActive indicates that the HPA controller is able to scale if necessary: // it's correctly configured, can fetch the desired metrics, and isn't disabled. ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" - // AbleToScale indicates a lack of transient issues which prevent scaling from occuring, + // AbleToScale indicates a lack of transient issues which prevent scaling from occurring, // such as being in a backoff window, or being unable to access/update the target scale. AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale" // ScalingLimited indicates that the calculated scale based on metrics would be above or diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go index 20848f20c840..de4e6daa3825 100644 --- a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go @@ -23,92 +23,9 @@ package v1 import ( resource "k8s.io/apimachinery/pkg/api/resource" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CrossVersionObjectReference).DeepCopyInto(out.(*CrossVersionObjectReference)) - return nil - }, InType: reflect.TypeOf(&CrossVersionObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscaler).DeepCopyInto(out.(*HorizontalPodAutoscaler)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscaler{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerCondition).DeepCopyInto(out.(*HorizontalPodAutoscalerCondition)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerList).DeepCopyInto(out.(*HorizontalPodAutoscalerList)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerSpec).DeepCopyInto(out.(*HorizontalPodAutoscalerSpec)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerStatus).DeepCopyInto(out.(*HorizontalPodAutoscalerStatus)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetricSpec).DeepCopyInto(out.(*MetricSpec)) - return nil - }, InType: reflect.TypeOf(&MetricSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetricStatus).DeepCopyInto(out.(*MetricStatus)) - return nil - }, InType: reflect.TypeOf(&MetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMetricSource).DeepCopyInto(out.(*ObjectMetricSource)) - return nil - }, InType: reflect.TypeOf(&ObjectMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMetricStatus).DeepCopyInto(out.(*ObjectMetricStatus)) - return nil - }, InType: reflect.TypeOf(&ObjectMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodsMetricSource).DeepCopyInto(out.(*PodsMetricSource)) - return nil - }, InType: reflect.TypeOf(&PodsMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodsMetricStatus).DeepCopyInto(out.(*PodsMetricStatus)) - return nil - }, InType: reflect.TypeOf(&PodsMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceMetricSource).DeepCopyInto(out.(*ResourceMetricSource)) - return nil - }, InType: reflect.TypeOf(&ResourceMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceMetricStatus).DeepCopyInto(out.(*ResourceMetricStatus)) - return nil - }, InType: reflect.TypeOf(&ResourceMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Scale).DeepCopyInto(out.(*Scale)) - return nil - }, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleSpec).DeepCopyInto(out.(*ScaleSpec)) - return nil - }, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleStatus).DeepCopyInto(out.(*ScaleStatus)) - return nil - }, InType: reflect.TypeOf(&ScaleStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { *out = *in diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/BUILD b/vendor/k8s.io/api/autoscaling/v2beta1/BUILD index 3eb3cb296c88..32fc333eb5d5 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/BUILD +++ b/vendor/k8s.io/api/autoscaling/v2beta1/BUILD @@ -15,12 +15,12 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/autoscaling/v2beta1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go index 4758ad848661..18dbf5a5a591 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v2beta1 diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/register.go b/vendor/k8s.io/api/autoscaling/v2beta1/register.go index 9025b421c822..12d697f0130d 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/register.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &HorizontalPodAutoscaler{}, diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go index d7b262ffa25a..9c72ae25ca7f 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -180,7 +180,7 @@ var ( // ScalingActive indicates that the HPA controller is able to scale if necessary: // it's correctly configured, can fetch the desired metrics, and isn't disabled. ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" - // AbleToScale indicates a lack of transient issues which prevent scaling from occuring, + // AbleToScale indicates a lack of transient issues which prevent scaling from occurring, // such as being in a backoff window, or being unable to access/update the target scale. AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale" // ScalingLimited indicates that the calculated scale based on metrics would be above or diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go index b538e5b40c9e..0bb3dd30c635 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go @@ -23,80 +23,9 @@ package v2beta1 import ( resource "k8s.io/apimachinery/pkg/api/resource" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CrossVersionObjectReference).DeepCopyInto(out.(*CrossVersionObjectReference)) - return nil - }, InType: reflect.TypeOf(&CrossVersionObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscaler).DeepCopyInto(out.(*HorizontalPodAutoscaler)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscaler{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerCondition).DeepCopyInto(out.(*HorizontalPodAutoscalerCondition)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerList).DeepCopyInto(out.(*HorizontalPodAutoscalerList)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerSpec).DeepCopyInto(out.(*HorizontalPodAutoscalerSpec)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerStatus).DeepCopyInto(out.(*HorizontalPodAutoscalerStatus)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetricSpec).DeepCopyInto(out.(*MetricSpec)) - return nil - }, InType: reflect.TypeOf(&MetricSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetricStatus).DeepCopyInto(out.(*MetricStatus)) - return nil - }, InType: reflect.TypeOf(&MetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMetricSource).DeepCopyInto(out.(*ObjectMetricSource)) - return nil - }, InType: reflect.TypeOf(&ObjectMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMetricStatus).DeepCopyInto(out.(*ObjectMetricStatus)) - return nil - }, InType: reflect.TypeOf(&ObjectMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodsMetricSource).DeepCopyInto(out.(*PodsMetricSource)) - return nil - }, InType: reflect.TypeOf(&PodsMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodsMetricStatus).DeepCopyInto(out.(*PodsMetricStatus)) - return nil - }, InType: reflect.TypeOf(&PodsMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceMetricSource).DeepCopyInto(out.(*ResourceMetricSource)) - return nil - }, InType: reflect.TypeOf(&ResourceMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceMetricStatus).DeepCopyInto(out.(*ResourceMetricStatus)) - return nil - }, InType: reflect.TypeOf(&ResourceMetricStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { *out = *in diff --git a/vendor/k8s.io/api/batch/v1/BUILD b/vendor/k8s.io/api/batch/v1/BUILD index 5cc4f6f7b339..a7ca7a23267e 100644 --- a/vendor/k8s.io/api/batch/v1/BUILD +++ b/vendor/k8s.io/api/batch/v1/BUILD @@ -15,11 +15,11 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/batch/v1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/api/batch/v1/doc.go b/vendor/k8s.io/api/batch/v1/doc.go index d567450ef24b..70cb1607b67a 100644 --- a/vendor/k8s.io/api/batch/v1/doc.go +++ b/vendor/k8s.io/api/batch/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1 diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 08635ad2b2c1..0f43d2fd5a50 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -128,7 +128,7 @@ message JobSpec { // and other jobs to not function correctly. However, You may see // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` // API. - // More info: https://git.k8s.io/community/contributors/design-proposals/selector-generation.md + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector // +optional optional bool manualSelector = 5; diff --git a/vendor/k8s.io/api/batch/v1/register.go b/vendor/k8s.io/api/batch/v1/register.go index 2cdfc98ca42a..32fa51f0e4a3 100644 --- a/vendor/k8s.io/api/batch/v1/register.go +++ b/vendor/k8s.io/api/batch/v1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Job{}, diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 4f3b83e8a8d1..84abb1a9015a 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -107,7 +107,7 @@ type JobSpec struct { // and other jobs to not function correctly. However, You may see // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` // API. - // More info: https://git.k8s.io/community/contributors/design-proposals/selector-generation.md + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector // +optional ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index 53b2d634fd74..0ddf4b116442 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -69,7 +69,7 @@ var map_JobSpec = map[string]string{ "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://git.k8s.io/community/contributors/design-proposals/selector-generation.md", + "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", "template": "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", } diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go index 8738bd5d2c36..fa9ff4f96cb6 100644 --- a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go @@ -22,44 +22,9 @@ package v1 import ( meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Job).DeepCopyInto(out.(*Job)) - return nil - }, InType: reflect.TypeOf(&Job{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobCondition).DeepCopyInto(out.(*JobCondition)) - return nil - }, InType: reflect.TypeOf(&JobCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobList).DeepCopyInto(out.(*JobList)) - return nil - }, InType: reflect.TypeOf(&JobList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobSpec).DeepCopyInto(out.(*JobSpec)) - return nil - }, InType: reflect.TypeOf(&JobSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobStatus).DeepCopyInto(out.(*JobStatus)) - return nil - }, InType: reflect.TypeOf(&JobStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Job) DeepCopyInto(out *Job) { *out = *in diff --git a/vendor/k8s.io/api/batch/v1beta1/BUILD b/vendor/k8s.io/api/batch/v1beta1/BUILD index d421dd6227c3..3f0197b9c6c2 100644 --- a/vendor/k8s.io/api/batch/v1beta1/BUILD +++ b/vendor/k8s.io/api/batch/v1beta1/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -17,13 +15,12 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], - tags = ["automanaged"], + importpath = "k8s.io/api/batch/v1beta1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/api/batch/v1beta1/doc.go b/vendor/k8s.io/api/batch/v1beta1/doc.go index 882b0295cd63..5dd34c6d0660 100644 --- a/vendor/k8s.io/api/batch/v1beta1/doc.go +++ b/vendor/k8s.io/api/batch/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1beta1 diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto index f7b871632b2c..11fa4e2aea15 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -71,7 +71,10 @@ message CronJobSpec { optional int64 startingDeadlineSeconds = 2; // Specifies how to treat concurrent executions of a Job. - // Defaults to Allow. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one // +optional optional string concurrencyPolicy = 3; diff --git a/vendor/k8s.io/api/batch/v1beta1/register.go b/vendor/k8s.io/api/batch/v1beta1/register.go index 569817d3e4fd..226de49f4d2a 100644 --- a/vendor/k8s.io/api/batch/v1beta1/register.go +++ b/vendor/k8s.io/api/batch/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &JobTemplate{}, diff --git a/vendor/k8s.io/api/batch/v1beta1/types.go b/vendor/k8s.io/api/batch/v1beta1/types.go index ad13bba2f921..cb5c9bad2946 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types.go +++ b/vendor/k8s.io/api/batch/v1beta1/types.go @@ -100,7 +100,10 @@ type CronJobSpec struct { StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` // Specifies how to treat concurrent executions of a Job. - // Defaults to Allow. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one // +optional ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` diff --git a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go index f4c74e4327e9..3b53ac08a7d2 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go @@ -52,7 +52,7 @@ var map_CronJobSpec = map[string]string{ "": "CronJobSpec describes how the job execution will look like and when it will actually run.", "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Defaults to Allow.", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "Specifies the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3.", diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go index c730ca982e83..0f8562f8f438 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go @@ -23,48 +23,9 @@ package v1beta1 import ( v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJob).DeepCopyInto(out.(*CronJob)) - return nil - }, InType: reflect.TypeOf(&CronJob{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobList).DeepCopyInto(out.(*CronJobList)) - return nil - }, InType: reflect.TypeOf(&CronJobList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobSpec).DeepCopyInto(out.(*CronJobSpec)) - return nil - }, InType: reflect.TypeOf(&CronJobSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobStatus).DeepCopyInto(out.(*CronJobStatus)) - return nil - }, InType: reflect.TypeOf(&CronJobStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobTemplate).DeepCopyInto(out.(*JobTemplate)) - return nil - }, InType: reflect.TypeOf(&JobTemplate{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobTemplateSpec).DeepCopyInto(out.(*JobTemplateSpec)) - return nil - }, InType: reflect.TypeOf(&JobTemplateSpec{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CronJob) DeepCopyInto(out *CronJob) { *out = *in diff --git a/vendor/k8s.io/api/batch/v2alpha1/BUILD b/vendor/k8s.io/api/batch/v2alpha1/BUILD index 9bec42868bd7..0fc0ab57ac5e 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/BUILD +++ b/vendor/k8s.io/api/batch/v2alpha1/BUILD @@ -15,12 +15,12 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/batch/v2alpha1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/api/batch/v2alpha1/doc.go b/vendor/k8s.io/api/batch/v2alpha1/doc.go index 81396d1ee2dc..0c4ec7f7f79f 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/doc.go +++ b/vendor/k8s.io/api/batch/v2alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v2alpha1 diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.proto b/vendor/k8s.io/api/batch/v2alpha1/generated.proto index 0bdb247e3119..cc90d4190969 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.proto +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.proto @@ -71,7 +71,10 @@ message CronJobSpec { optional int64 startingDeadlineSeconds = 2; // Specifies how to treat concurrent executions of a Job. - // Defaults to Allow. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one // +optional optional string concurrencyPolicy = 3; diff --git a/vendor/k8s.io/api/batch/v2alpha1/register.go b/vendor/k8s.io/api/batch/v2alpha1/register.go index 4b02c0f481c2..ac7fa5087a94 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/register.go +++ b/vendor/k8s.io/api/batch/v2alpha1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &JobTemplate{}, diff --git a/vendor/k8s.io/api/batch/v2alpha1/types.go b/vendor/k8s.io/api/batch/v2alpha1/types.go index 451df40e6373..cccff94ff2ac 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/types.go +++ b/vendor/k8s.io/api/batch/v2alpha1/types.go @@ -100,7 +100,10 @@ type CronJobSpec struct { StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` // Specifies how to treat concurrent executions of a Job. - // Defaults to Allow. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one // +optional ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` diff --git a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go index 2b3ed4c56020..d166b807faea 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go @@ -52,7 +52,7 @@ var map_CronJobSpec = map[string]string{ "": "CronJobSpec describes how the job execution will look like and when it will actually run.", "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Defaults to Allow.", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "Specifies the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.", diff --git a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go index b572f9ca3aed..5474235d1994 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go @@ -23,48 +23,9 @@ package v2alpha1 import ( v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJob).DeepCopyInto(out.(*CronJob)) - return nil - }, InType: reflect.TypeOf(&CronJob{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobList).DeepCopyInto(out.(*CronJobList)) - return nil - }, InType: reflect.TypeOf(&CronJobList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobSpec).DeepCopyInto(out.(*CronJobSpec)) - return nil - }, InType: reflect.TypeOf(&CronJobSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobStatus).DeepCopyInto(out.(*CronJobStatus)) - return nil - }, InType: reflect.TypeOf(&CronJobStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobTemplate).DeepCopyInto(out.(*JobTemplate)) - return nil - }, InType: reflect.TypeOf(&JobTemplate{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobTemplateSpec).DeepCopyInto(out.(*JobTemplateSpec)) - return nil - }, InType: reflect.TypeOf(&JobTemplateSpec{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CronJob) DeepCopyInto(out *CronJob) { *out = *in diff --git a/vendor/k8s.io/api/certificates/v1beta1/BUILD b/vendor/k8s.io/api/certificates/v1beta1/BUILD index 745ff2609dae..4c94dd064816 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/BUILD +++ b/vendor/k8s.io/api/certificates/v1beta1/BUILD @@ -15,11 +15,11 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/certificates/v1beta1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/api/certificates/v1beta1/doc.go b/vendor/k8s.io/api/certificates/v1beta1/doc.go index 3f4281ec32e1..8250a24e484b 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/doc.go +++ b/vendor/k8s.io/api/certificates/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=certificates.k8s.io diff --git a/vendor/k8s.io/api/certificates/v1beta1/register.go b/vendor/k8s.io/api/certificates/v1beta1/register.go index b5ba3d7eca85..b4f3af9b9ca0 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/register.go +++ b/vendor/k8s.io/api/certificates/v1beta1/register.go @@ -46,7 +46,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &CertificateSigningRequest{}, diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go index 6ecc13118167..de0715db6247 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go @@ -21,44 +21,9 @@ limitations under the License. package v1beta1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequest).DeepCopyInto(out.(*CertificateSigningRequest)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequest{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequestCondition).DeepCopyInto(out.(*CertificateSigningRequestCondition)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequestCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequestList).DeepCopyInto(out.(*CertificateSigningRequestList)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequestList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequestSpec).DeepCopyInto(out.(*CertificateSigningRequestSpec)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequestSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequestStatus).DeepCopyInto(out.(*CertificateSigningRequestStatus)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequestStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CertificateSigningRequest) DeepCopyInto(out *CertificateSigningRequest) { *out = *in diff --git a/vendor/k8s.io/api/core/v1/BUILD b/vendor/k8s.io/api/core/v1/BUILD index bc5e9549e9aa..2ffdd3453094 100644 --- a/vendor/k8s.io/api/core/v1/BUILD +++ b/vendor/k8s.io/api/core/v1/BUILD @@ -12,6 +12,7 @@ go_test( "taint_test.go", "toleration_test.go", ], + importpath = "k8s.io/api/core/v1", library = ":go_default_library", ) @@ -31,12 +32,12 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/core/v1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go index e623913fdd12..de4e3cee4473 100644 --- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -45,12 +45,6 @@ const ( // to one container of a pod. SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" - // CreatedByAnnotation represents the key used to store the spec(json) - // used to create the resource. - // This field is deprecated in favor of ControllerRef (see #44407). - // TODO(#50720): Remove this field in v1.9. - CreatedByAnnotation = "kubernetes.io/created-by" - // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) // in the Annotations of a Node. PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" diff --git a/vendor/k8s.io/api/core/v1/doc.go b/vendor/k8s.io/api/core/v1/doc.go index 9f9a27c3c4eb..1c2c6fc09fa0 100644 --- a/vendor/k8s.io/api/core/v1/doc.go +++ b/vendor/k8s.io/api/core/v1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // Package v1 is the v1 version of the core API. package v1 diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index 6db917375e45..c2fe6bc5ceb9 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -33,6 +33,7 @@ limitations under the License. AzureFilePersistentVolumeSource AzureFileVolumeSource Binding + CSIPersistentVolumeSource Capabilities CephFSPersistentVolumeSource CephFSVolumeSource @@ -71,6 +72,7 @@ limitations under the License. EnvVarSource Event EventList + EventSeries EventSource ExecAction FCVolumeSource @@ -84,6 +86,7 @@ limitations under the License. Handler HostAlias HostPathVolumeSource + ISCSIPersistentVolumeSource ISCSIVolumeSource KeyToPath Lifecycle @@ -138,6 +141,8 @@ limitations under the License. PodAntiAffinity PodAttachOptions PodCondition + PodDNSConfig + PodDNSConfigOption PodExecOptions PodList PodLogOptions @@ -158,6 +163,7 @@ limitations under the License. Probe ProjectedVolumeSource QuobyteVolumeSource + RBDPersistentVolumeSource RBDVolumeSource RangeAllocation ReplicationController @@ -172,6 +178,7 @@ limitations under the License. ResourceQuotaStatus ResourceRequirements SELinuxOptions + ScaleIOPersistentVolumeSource ScaleIOVolumeSource Secret SecretEnvSource @@ -198,6 +205,7 @@ limitations under the License. Taint Toleration Volume + VolumeDevice VolumeMount VolumeProjection VolumeSource @@ -270,726 +278,766 @@ func (m *Binding) Reset() { *m = Binding{} } func (*Binding) ProtoMessage() {} func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} } +func (*CSIPersistentVolumeSource) ProtoMessage() {} +func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{8} +} + func (m *Capabilities) Reset() { *m = Capabilities{} } func (*Capabilities) ProtoMessage() {} -func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } func (*CephFSPersistentVolumeSource) ProtoMessage() {} func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} + return fileDescriptorGenerated, []int{10} } func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } func (*CephFSVolumeSource) ProtoMessage() {} -func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } func (*CinderVolumeSource) ProtoMessage() {} -func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } func (*ClientIPConfig) ProtoMessage() {} -func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } func (*ComponentCondition) ProtoMessage() {} -func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } func (*ComponentStatus) ProtoMessage() {} -func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } func (*ComponentStatusList) ProtoMessage() {} -func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *ConfigMap) Reset() { *m = ConfigMap{} } func (*ConfigMap) ProtoMessage() {} -func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } func (*ConfigMapEnvSource) ProtoMessage() {} -func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } func (*ConfigMapKeySelector) ProtoMessage() {} -func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } func (*ConfigMapList) ProtoMessage() {} -func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } func (*ConfigMapProjection) ProtoMessage() {} -func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } func (*ConfigMapVolumeSource) ProtoMessage() {} -func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *Container) Reset() { *m = Container{} } func (*Container) ProtoMessage() {} -func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *ContainerImage) Reset() { *m = ContainerImage{} } func (*ContainerImage) ProtoMessage() {} -func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *ContainerPort) Reset() { *m = ContainerPort{} } func (*ContainerPort) ProtoMessage() {} -func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} -func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} -func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{27} + return fileDescriptorGenerated, []int{28} } func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} -func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} -func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} -func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } func (*DeleteOptions) ProtoMessage() {} -func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } func (*DownwardAPIProjection) ProtoMessage() {} -func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } func (*DownwardAPIVolumeFile) ProtoMessage() {} -func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } func (*DownwardAPIVolumeSource) ProtoMessage() {} func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{34} + return fileDescriptorGenerated, []int{35} } func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } func (*EmptyDirVolumeSource) ProtoMessage() {} -func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } func (*EndpointAddress) ProtoMessage() {} -func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} -func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } func (*EndpointSubset) ProtoMessage() {} -func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } func (m *Endpoints) Reset() { *m = Endpoints{} } func (*Endpoints) ProtoMessage() {} -func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } func (m *EndpointsList) Reset() { *m = EndpointsList{} } func (*EndpointsList) ProtoMessage() {} -func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } func (*EnvFromSource) ProtoMessage() {} -func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } func (m *EnvVar) Reset() { *m = EnvVar{} } func (*EnvVar) ProtoMessage() {} -func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } func (*EnvVarSource) ProtoMessage() {} -func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } + +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} -func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } func (m *ExecAction) Reset() { *m = ExecAction{} } func (*ExecAction) ProtoMessage() {} -func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} -func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } +func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} -func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} -func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{51} + return fileDescriptorGenerated, []int{53} } func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} -func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} -func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} -func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} -func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } func (m *Handler) Reset() { *m = Handler{} } func (*Handler) ProtoMessage() {} -func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } +func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} -func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } +func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} -func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } + +func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } +func (*ISCSIPersistentVolumeSource) ProtoMessage() {} +func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{61} +} func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} -func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} -func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } +func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} -func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } +func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} -func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } +func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} -func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } +func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} -func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } +func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} -func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } func (m *ListOptions) Reset() { *m = ListOptions{} } func (*ListOptions) ProtoMessage() {} -func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } +func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} -func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} -func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} -func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } +func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} -func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } +func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} -func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} -func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } +func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} -func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } +func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} -func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } +func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} -func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } +func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} -func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } +func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} -func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } +func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} -func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } +func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} -func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } +func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} -func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} -func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } +func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} -func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } func (m *NodeResources) Reset() { *m = NodeResources{} } func (*NodeResources) ProtoMessage() {} -func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } +func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} -func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } +func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{87} + return fileDescriptorGenerated, []int{90} } func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} -func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} -func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } +func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} } func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} -func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} -func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} -func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} } +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } func (*ObjectMeta) ProtoMessage() {} -func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } +func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} -func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } +func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} -func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } +func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} -func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{97} + return fileDescriptorGenerated, []int{100} } func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{98} + return fileDescriptorGenerated, []int{101} } func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{99} + return fileDescriptorGenerated, []int{102} } func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{100} + return fileDescriptorGenerated, []int{103} } func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{101} + return fileDescriptorGenerated, []int{104} } func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} -func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{102} } +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{105} } func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{103} + return fileDescriptorGenerated, []int{106} } func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} -func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{104} } +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{107} } func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{105} + return fileDescriptorGenerated, []int{108} } func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{106} + return fileDescriptorGenerated, []int{109} } func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} -func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{107} } +func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} -func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } +func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} -func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{109} } +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} -func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} -func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } +func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} -func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } +func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } + +func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } +func (*PodDNSConfig) ProtoMessage() {} +func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } + +func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } +func (*PodDNSConfigOption) ProtoMessage() {} +func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} -func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } +func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} -func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } +func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} -func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } +func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} -func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} -func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } +func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} -func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } +func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} -func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } +func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} -func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } +func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} -func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } +func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} -func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } +func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} -func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } +func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} -func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } +func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} -func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} -func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} -func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{129} + return fileDescriptorGenerated, []int{134} } func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} -func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } +func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} -func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} -func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } + +func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } +func (*RBDPersistentVolumeSource) ProtoMessage() {} +func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{138} +} func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} -func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} -func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } +func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} -func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } +func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{136} + return fileDescriptorGenerated, []int{142} } func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{137} + return fileDescriptorGenerated, []int{143} } func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{138} + return fileDescriptorGenerated, []int{144} } func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{139} + return fileDescriptorGenerated, []int{145} } func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} -func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{146} } func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} -func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } +func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{147} } func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} -func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} -func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} -func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{144} } +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} -func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{145} } +func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} -func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{146} } +func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } + +func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } +func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} +func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{153} +} func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} -func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{147} } +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} -func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} -func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } +func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} -func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } +func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} -func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } +func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} -func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } +func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} -func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } +func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} -func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} -func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } +func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} -func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } +func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} -func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} -func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } +func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} -func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } +func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} -func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } +func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} -func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } +func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} -func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} -func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} -func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } +func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} } func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} -func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } +func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{166} + return fileDescriptorGenerated, []int{173} } func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} -func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } +func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} -func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } +func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} -func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } +func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} -func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } +func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} -func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} } +func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{178} } func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } + +func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } +func (*VolumeDevice) ProtoMessage() {} +func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{180} } func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} -func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{173} } +func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} -func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } +func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} -func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } +func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{176} + return fileDescriptorGenerated, []int{184} } func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{177} + return fileDescriptorGenerated, []int{185} } func init() { @@ -1001,6 +1049,7 @@ func init() { proto.RegisterType((*AzureFilePersistentVolumeSource)(nil), "k8s.io.api.core.v1.AzureFilePersistentVolumeSource") proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") + proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource") proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") @@ -1039,6 +1088,7 @@ func init() { proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource") proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event") proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList") + proto.RegisterType((*EventSeries)(nil), "k8s.io.api.core.v1.EventSeries") proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") @@ -1052,6 +1102,7 @@ func init() { proto.RegisterType((*Handler)(nil), "k8s.io.api.core.v1.Handler") proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") + proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") @@ -1106,6 +1157,8 @@ func init() { proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity") proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions") proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition") + proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig") + proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption") proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions") proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") @@ -1126,6 +1179,7 @@ func init() { proto.RegisterType((*Probe)(nil), "k8s.io.api.core.v1.Probe") proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.api.core.v1.ProjectedVolumeSource") proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.api.core.v1.QuobyteVolumeSource") + proto.RegisterType((*RBDPersistentVolumeSource)(nil), "k8s.io.api.core.v1.RBDPersistentVolumeSource") proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.api.core.v1.RBDVolumeSource") proto.RegisterType((*RangeAllocation)(nil), "k8s.io.api.core.v1.RangeAllocation") proto.RegisterType((*ReplicationController)(nil), "k8s.io.api.core.v1.ReplicationController") @@ -1140,6 +1194,7 @@ func init() { proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus") proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements") proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions") + proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource") proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource") proto.RegisterType((*Secret)(nil), "k8s.io.api.core.v1.Secret") proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.api.core.v1.SecretEnvSource") @@ -1166,6 +1221,7 @@ func init() { proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint") proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration") proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") + proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") @@ -1475,6 +1531,40 @@ func (m *Binding) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *CSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeHandle))) + i += copy(dAtA[i:], m.VolumeHandle) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + func (m *Capabilities) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2278,6 +2368,20 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) i += copy(dAtA[i:], m.TerminationMessagePolicy) + if len(m.VolumeDevices) > 0 { + for _, msg := range m.VolumeDevices { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -3187,6 +3291,46 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) + n48, err := m.EventTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + if m.Series != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) + n49, err := m.Series.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 + } + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i += copy(dAtA[i:], m.Action) + if m.Related != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) + n50, err := m.Related.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 + } + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i += copy(dAtA[i:], m.ReportingController) + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i += copy(dAtA[i:], m.ReportingInstance) return i, nil } @@ -3208,11 +3352,11 @@ func (m *EventList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n48, err := m.ListMeta.MarshalTo(dAtA[i:]) + n51, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n48 + i += n51 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -3228,6 +3372,39 @@ func (m *EventList) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *EventSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) + n52, err := m.LastObservedTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n52 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i += copy(dAtA[i:], m.State) + return i, nil +} + func (m *EventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3379,11 +3556,11 @@ func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n49, err := m.SecretRef.MarshalTo(dAtA[i:]) + n53, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n49 + i += n53 } dAtA[i] = 0x20 i++ @@ -3567,11 +3744,11 @@ func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n50, err := m.Port.MarshalTo(dAtA[i:]) + n54, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n50 + i += n54 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) @@ -3640,31 +3817,31 @@ func (m *Handler) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Exec.Size())) - n51, err := m.Exec.MarshalTo(dAtA[i:]) + n55, err := m.Exec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n51 + i += n55 } if m.HTTPGet != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HTTPGet.Size())) - n52, err := m.HTTPGet.MarshalTo(dAtA[i:]) + n56, err := m.HTTPGet.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n52 + i += n56 } if m.TCPSocket != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TCPSocket.Size())) - n53, err := m.TCPSocket.MarshalTo(dAtA[i:]) + n57, err := m.TCPSocket.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n53 + i += n57 } return i, nil } @@ -3734,6 +3911,98 @@ func (m *HostPathVolumeSource) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ISCSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) + i += copy(dAtA[i:], m.TargetPortal) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) + i += copy(dAtA[i:], m.IQN) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) + i += copy(dAtA[i:], m.ISCSIInterface) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x30 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.Portals) > 0 { + for _, s := range m.Portals { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x40 + i++ + if m.DiscoveryCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.SecretRef != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n58, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n58 + } + dAtA[i] = 0x58 + i++ + if m.SessionCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.InitiatorName != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) + i += copy(dAtA[i:], *m.InitiatorName) + } + return i, nil +} + func (m *ISCSIVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3803,11 +4072,11 @@ func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n54, err := m.SecretRef.MarshalTo(dAtA[i:]) + n59, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n54 + i += n59 } dAtA[i] = 0x58 i++ @@ -3876,21 +4145,21 @@ func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PostStart.Size())) - n55, err := m.PostStart.MarshalTo(dAtA[i:]) + n60, err := m.PostStart.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n55 + i += n60 } if m.PreStop != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PreStop.Size())) - n56, err := m.PreStop.MarshalTo(dAtA[i:]) + n61, err := m.PreStop.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n56 + i += n61 } return i, nil } @@ -3913,19 +4182,19 @@ func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n57, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n62, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n57 + i += n62 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n58, err := m.Spec.MarshalTo(dAtA[i:]) + n63, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n58 + i += n63 return i, nil } @@ -3972,11 +4241,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n59, err := (&v).MarshalTo(dAtA[i:]) + n64, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n59 + i += n64 } } if len(m.Min) > 0 { @@ -4003,11 +4272,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n60, err := (&v).MarshalTo(dAtA[i:]) + n65, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n60 + i += n65 } } if len(m.Default) > 0 { @@ -4034,11 +4303,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n61, err := (&v).MarshalTo(dAtA[i:]) + n66, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n61 + i += n66 } } if len(m.DefaultRequest) > 0 { @@ -4065,11 +4334,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n62, err := (&v).MarshalTo(dAtA[i:]) + n67, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n62 + i += n67 } } if len(m.MaxLimitRequestRatio) > 0 { @@ -4096,11 +4365,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n63, err := (&v).MarshalTo(dAtA[i:]) + n68, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n63 + i += n68 } } return i, nil @@ -4124,11 +4393,11 @@ func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n64, err := m.ListMeta.MarshalTo(dAtA[i:]) + n69, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n64 + i += n69 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -4192,11 +4461,11 @@ func (m *List) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n65, err := m.ListMeta.MarshalTo(dAtA[i:]) + n70, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n65 + i += n70 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -4415,27 +4684,27 @@ func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n66, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n71, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n66 + i += n71 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n67, err := m.Spec.MarshalTo(dAtA[i:]) + n72, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n67 + i += n72 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n68, err := m.Status.MarshalTo(dAtA[i:]) + n73, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n68 + i += n73 return i, nil } @@ -4457,11 +4726,11 @@ func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n69, err := m.ListMeta.MarshalTo(dAtA[i:]) + n74, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n69 + i += n74 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -4550,27 +4819,27 @@ func (m *Node) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n70, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n75, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n70 + i += n75 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n71, err := m.Spec.MarshalTo(dAtA[i:]) + n76, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n71 + i += n76 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n72, err := m.Status.MarshalTo(dAtA[i:]) + n77, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n72 + i += n77 return i, nil } @@ -4619,11 +4888,11 @@ func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) - n73, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) + n78, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n73 + i += n78 } if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { @@ -4666,19 +4935,19 @@ func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastHeartbeatTime.Size())) - n74, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) + n79, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n74 + i += n79 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n75, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n80, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n75 + i += n80 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -4709,11 +4978,11 @@ func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapRef.Size())) - n76, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) + n81, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n76 + i += n81 } return i, nil } @@ -4736,11 +5005,11 @@ func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.KubeletEndpoint.Size())) - n77, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) + n82, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n77 + i += n82 return i, nil } @@ -4762,11 +5031,11 @@ func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n78, err := m.ListMeta.MarshalTo(dAtA[i:]) + n83, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n78 + i += n83 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -4843,11 +5112,11 @@ func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n79, err := (&v).MarshalTo(dAtA[i:]) + n84, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n79 + i += n84 } } return i, nil @@ -5005,11 +5274,11 @@ func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigSource.Size())) - n80, err := m.ConfigSource.MarshalTo(dAtA[i:]) + n85, err := m.ConfigSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n80 + i += n85 } return i, nil } @@ -5053,11 +5322,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n81, err := (&v).MarshalTo(dAtA[i:]) + n86, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n81 + i += n86 } } if len(m.Allocatable) > 0 { @@ -5084,11 +5353,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n82, err := (&v).MarshalTo(dAtA[i:]) + n87, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n82 + i += n87 } } dAtA[i] = 0x1a @@ -5122,19 +5391,19 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DaemonEndpoints.Size())) - n83, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) + n88, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n83 + i += n88 dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NodeInfo.Size())) - n84, err := m.NodeInfo.MarshalTo(dAtA[i:]) + n89, err := m.NodeInfo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n84 + i += n89 if len(m.Images) > 0 { for _, msg := range m.Images { dAtA[i] = 0x42 @@ -5306,20 +5575,20 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) - n85, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) + n90, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n85 + i += n90 if m.DeletionTimestamp != nil { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) - n86, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) + n91, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n86 + i += n91 } if m.DeletionGracePeriodSeconds != nil { dAtA[i] = 0x50 @@ -5407,11 +5676,11 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) - n87, err := m.Initializers.MarshalTo(dAtA[i:]) + n92, err := m.Initializers.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n87 + i += n92 } return i, nil } @@ -5480,27 +5749,27 @@ func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n88, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n93, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n88 + i += n93 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n89, err := m.Spec.MarshalTo(dAtA[i:]) + n94, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n89 + i += n94 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n90, err := m.Status.MarshalTo(dAtA[i:]) + n95, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n90 + i += n95 return i, nil } @@ -5522,27 +5791,27 @@ func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n91, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n96, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n91 + i += n96 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n92, err := m.Spec.MarshalTo(dAtA[i:]) + n97, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n92 + i += n97 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n93, err := m.Status.MarshalTo(dAtA[i:]) + n98, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n93 + i += n98 return i, nil } @@ -5572,19 +5841,19 @@ func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n94, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + n99, err := m.LastProbeTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n94 + i += n99 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n95, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n100, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n95 + i += n100 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -5614,11 +5883,11 @@ func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n96, err := m.ListMeta.MarshalTo(dAtA[i:]) + n101, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n96 + i += n101 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -5667,11 +5936,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n97, err := m.Resources.MarshalTo(dAtA[i:]) + n102, err := m.Resources.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n97 + i += n102 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) @@ -5680,11 +5949,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n98, err := m.Selector.MarshalTo(dAtA[i:]) + n103, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n98 + i += n103 } if m.StorageClassName != nil { dAtA[i] = 0x2a @@ -5692,6 +5961,12 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) i += copy(dAtA[i:], *m.StorageClassName) } + if m.VolumeMode != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i += copy(dAtA[i:], *m.VolumeMode) + } return i, nil } @@ -5753,11 +6028,11 @@ func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n99, err := (&v).MarshalTo(dAtA[i:]) + n104, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n99 + i += n104 } } if len(m.Conditions) > 0 { @@ -5823,11 +6098,11 @@ func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n100, err := m.ListMeta.MarshalTo(dAtA[i:]) + n105, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n100 + i += n105 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -5862,151 +6137,151 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n101, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) + n106, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n101 + i += n106 } if m.AWSElasticBlockStore != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n102, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) + n107, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n102 + i += n107 } if m.HostPath != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n103, err := m.HostPath.MarshalTo(dAtA[i:]) + n108, err := m.HostPath.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n103 + i += n108 } if m.Glusterfs != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n104, err := m.Glusterfs.MarshalTo(dAtA[i:]) + n109, err := m.Glusterfs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n104 + i += n109 } if m.NFS != nil { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n105, err := m.NFS.MarshalTo(dAtA[i:]) + n110, err := m.NFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n105 + i += n110 } if m.RBD != nil { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n106, err := m.RBD.MarshalTo(dAtA[i:]) + n111, err := m.RBD.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n106 + i += n111 } if m.ISCSI != nil { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n107, err := m.ISCSI.MarshalTo(dAtA[i:]) + n112, err := m.ISCSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n107 + i += n112 } if m.Cinder != nil { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n108, err := m.Cinder.MarshalTo(dAtA[i:]) + n113, err := m.Cinder.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n108 + i += n113 } if m.CephFS != nil { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n109, err := m.CephFS.MarshalTo(dAtA[i:]) + n114, err := m.CephFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n109 + i += n114 } if m.FC != nil { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n110, err := m.FC.MarshalTo(dAtA[i:]) + n115, err := m.FC.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n110 + i += n115 } if m.Flocker != nil { dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n111, err := m.Flocker.MarshalTo(dAtA[i:]) + n116, err := m.Flocker.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n111 + i += n116 } if m.FlexVolume != nil { dAtA[i] = 0x62 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n112, err := m.FlexVolume.MarshalTo(dAtA[i:]) + n117, err := m.FlexVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n112 + i += n117 } if m.AzureFile != nil { dAtA[i] = 0x6a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n113, err := m.AzureFile.MarshalTo(dAtA[i:]) + n118, err := m.AzureFile.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n113 + i += n118 } if m.VsphereVolume != nil { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n114, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + n119, err := m.VsphereVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n114 + i += n119 } if m.Quobyte != nil { dAtA[i] = 0x7a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n115, err := m.Quobyte.MarshalTo(dAtA[i:]) + n120, err := m.Quobyte.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n115 + i += n120 } if m.AzureDisk != nil { dAtA[i] = 0x82 @@ -6014,11 +6289,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n116, err := m.AzureDisk.MarshalTo(dAtA[i:]) + n121, err := m.AzureDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n116 + i += n121 } if m.PhotonPersistentDisk != nil { dAtA[i] = 0x8a @@ -6026,11 +6301,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n117, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + n122, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n117 + i += n122 } if m.PortworxVolume != nil { dAtA[i] = 0x92 @@ -6038,11 +6313,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n118, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + n123, err := m.PortworxVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n118 + i += n123 } if m.ScaleIO != nil { dAtA[i] = 0x9a @@ -6050,11 +6325,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n119, err := m.ScaleIO.MarshalTo(dAtA[i:]) + n124, err := m.ScaleIO.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n119 + i += n124 } if m.Local != nil { dAtA[i] = 0xa2 @@ -6062,11 +6337,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Local.Size())) - n120, err := m.Local.MarshalTo(dAtA[i:]) + n125, err := m.Local.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n120 + i += n125 } if m.StorageOS != nil { dAtA[i] = 0xaa @@ -6074,11 +6349,23 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n121, err := m.StorageOS.MarshalTo(dAtA[i:]) + n126, err := m.StorageOS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n121 + i += n126 + } + if m.CSI != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) + n127, err := m.CSI.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n127 } return i, nil } @@ -6122,21 +6409,21 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n122, err := (&v).MarshalTo(dAtA[i:]) + n128, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n122 + i += n128 } } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeSource.Size())) - n123, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) + n129, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n123 + i += n129 if len(m.AccessModes) > 0 { for _, s := range m.AccessModes { dAtA[i] = 0x1a @@ -6156,11 +6443,11 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ClaimRef.Size())) - n124, err := m.ClaimRef.MarshalTo(dAtA[i:]) + n130, err := m.ClaimRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n124 + i += n130 } dAtA[i] = 0x2a i++ @@ -6185,6 +6472,12 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } + if m.VolumeMode != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i += copy(dAtA[i:], *m.VolumeMode) + } return i, nil } @@ -6262,27 +6555,27 @@ func (m *Pod) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n125, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n131, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n125 + i += n131 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n126, err := m.Spec.MarshalTo(dAtA[i:]) + n132, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n126 + i += n132 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n127, err := m.Status.MarshalTo(dAtA[i:]) + n133, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n127 + i += n133 return i, nil } @@ -6347,11 +6640,11 @@ func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LabelSelector.Size())) - n128, err := m.LabelSelector.MarshalTo(dAtA[i:]) + n134, err := m.LabelSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n128 + i += n134 } if len(m.Namespaces) > 0 { for _, s := range m.Namespaces { @@ -6497,19 +6790,19 @@ func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n129, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + n135, err := m.LastProbeTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n129 + i += n135 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n130, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n136, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n130 + i += n136 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -6521,6 +6814,94 @@ func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *PodDNSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDNSConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Searches) > 0 { + for _, s := range m.Searches { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Options) > 0 { + for _, msg := range m.Options { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodDNSConfigOption) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDNSConfigOption) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Value != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) + i += copy(dAtA[i:], *m.Value) + } + return i, nil +} + func (m *PodExecOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6608,11 +6989,11 @@ func (m *PodList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n131, err := m.ListMeta.MarshalTo(dAtA[i:]) + n137, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n131 + i += n137 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -6672,11 +7053,11 @@ func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size())) - n132, err := m.SinceTime.MarshalTo(dAtA[i:]) + n138, err := m.SinceTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n132 + i += n138 } dAtA[i] = 0x30 i++ @@ -6765,11 +7146,11 @@ func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n133, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n139, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n133 + i += n139 } if m.RunAsUser != nil { dAtA[i] = 0x10 @@ -6820,11 +7201,11 @@ func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodController.Size())) - n134, err := m.PodController.MarshalTo(dAtA[i:]) + n140, err := m.PodController.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n134 + i += n140 } return i, nil } @@ -6948,11 +7329,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n135, err := m.SecurityContext.MarshalTo(dAtA[i:]) + n141, err := m.SecurityContext.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n135 + i += n141 } if len(m.ImagePullSecrets) > 0 { for _, msg := range m.ImagePullSecrets { @@ -6984,11 +7365,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Affinity.Size())) - n136, err := m.Affinity.MarshalTo(dAtA[i:]) + n142, err := m.Affinity.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n136 + i += n142 } dAtA[i] = 0x9a i++ @@ -7063,6 +7444,18 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) } + if m.DNSConfig != nil { + dAtA[i] = 0xd2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DNSConfig.Size())) + n143, err := m.DNSConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n143 + } return i, nil } @@ -7117,11 +7510,11 @@ func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) - n137, err := m.StartTime.MarshalTo(dAtA[i:]) + n144, err := m.StartTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n137 + i += n144 } if len(m.ContainerStatuses) > 0 { for _, msg := range m.ContainerStatuses { @@ -7172,19 +7565,19 @@ func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n138, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n145, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n138 + i += n145 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n139, err := m.Status.MarshalTo(dAtA[i:]) + n146, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n139 + i += n146 return i, nil } @@ -7206,19 +7599,19 @@ func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n140, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n147, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n140 + i += n147 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n141, err := m.Template.MarshalTo(dAtA[i:]) + n148, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n141 + i += n148 return i, nil } @@ -7240,11 +7633,11 @@ func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n142, err := m.ListMeta.MarshalTo(dAtA[i:]) + n149, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n142 + i += n149 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -7278,19 +7671,19 @@ func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n143, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n150, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n143 + i += n150 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n144, err := m.Spec.MarshalTo(dAtA[i:]) + n151, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n144 + i += n151 return i, nil } @@ -7370,19 +7763,19 @@ func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSignature.Size())) - n145, err := m.PodSignature.MarshalTo(dAtA[i:]) + n152, err := m.PodSignature.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n145 + i += n152 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.EvictionTime.Size())) - n146, err := m.EvictionTime.MarshalTo(dAtA[i:]) + n153, err := m.EvictionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n146 + i += n153 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -7415,11 +7808,11 @@ func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Preference.Size())) - n147, err := m.Preference.MarshalTo(dAtA[i:]) + n154, err := m.Preference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n147 + i += n154 return i, nil } @@ -7441,11 +7834,11 @@ func (m *Probe) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Handler.Size())) - n148, err := m.Handler.MarshalTo(dAtA[i:]) + n155, err := m.Handler.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n148 + i += n155 dAtA[i] = 0x10 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) @@ -7541,6 +7934,77 @@ func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *RBDPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) + i += copy(dAtA[i:], m.RBDImage) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) + i += copy(dAtA[i:], m.RBDPool) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) + i += copy(dAtA[i:], m.RadosUser) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) + i += copy(dAtA[i:], m.Keyring) + if m.SecretRef != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n156, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n156 + } + dAtA[i] = 0x40 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + func (m *RBDVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7595,11 +8059,11 @@ func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n149, err := m.SecretRef.MarshalTo(dAtA[i:]) + n157, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n149 + i += n157 } dAtA[i] = 0x40 i++ @@ -7630,11 +8094,11 @@ func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n150, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n158, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n150 + i += n158 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) @@ -7666,27 +8130,27 @@ func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n151, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n159, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n151 + i += n159 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n152, err := m.Spec.MarshalTo(dAtA[i:]) + n160, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n152 + i += n160 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n153, err := m.Status.MarshalTo(dAtA[i:]) + n161, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n153 + i += n161 return i, nil } @@ -7716,11 +8180,11 @@ func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n154, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n162, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n154 + i += n162 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -7750,11 +8214,11 @@ func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n155, err := m.ListMeta.MarshalTo(dAtA[i:]) + n163, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n155 + i += n163 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -7816,11 +8280,11 @@ func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n156, err := m.Template.MarshalTo(dAtA[i:]) + n164, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n156 + i += n164 } dAtA[i] = 0x20 i++ @@ -7899,11 +8363,11 @@ func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Divisor.Size())) - n157, err := m.Divisor.MarshalTo(dAtA[i:]) + n165, err := m.Divisor.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n157 + i += n165 return i, nil } @@ -7925,27 +8389,27 @@ func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n158, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n166, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n158 + i += n166 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n159, err := m.Spec.MarshalTo(dAtA[i:]) + n167, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n159 + i += n167 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n160, err := m.Status.MarshalTo(dAtA[i:]) + n168, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n160 + i += n168 return i, nil } @@ -7967,11 +8431,11 @@ func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n161, err := m.ListMeta.MarshalTo(dAtA[i:]) + n169, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n161 + i += n169 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8026,11 +8490,11 @@ func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n162, err := (&v).MarshalTo(dAtA[i:]) + n170, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n162 + i += n170 } } if len(m.Scopes) > 0 { @@ -8090,11 +8554,11 @@ func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n163, err := (&v).MarshalTo(dAtA[i:]) + n171, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n163 + i += n171 } } if len(m.Used) > 0 { @@ -8121,11 +8585,11 @@ func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n164, err := (&v).MarshalTo(dAtA[i:]) + n172, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n164 + i += n172 } } return i, nil @@ -8170,11 +8634,11 @@ func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n165, err := (&v).MarshalTo(dAtA[i:]) + n173, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n165 + i += n173 } } if len(m.Requests) > 0 { @@ -8201,11 +8665,11 @@ func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n166, err := (&v).MarshalTo(dAtA[i:]) + n174, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n166 + i += n174 } } return i, nil @@ -8245,6 +8709,78 @@ func (m *SELinuxOptions) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ScaleIOPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) + i += copy(dAtA[i:], m.Gateway) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) + i += copy(dAtA[i:], m.System) + if m.SecretRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n175, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n175 + } + dAtA[i] = 0x20 + i++ + if m.SSLEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) + i += copy(dAtA[i:], m.ProtectionDomain) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) + i += copy(dAtA[i:], m.StoragePool) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) + i += copy(dAtA[i:], m.StorageMode) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i += copy(dAtA[i:], m.VolumeName) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x50 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + func (m *ScaleIOVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -8272,11 +8808,11 @@ func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n167, err := m.SecretRef.MarshalTo(dAtA[i:]) + n176, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n167 + i += n176 } dAtA[i] = 0x20 i++ @@ -8335,11 +8871,11 @@ func (m *Secret) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n168, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n177, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n168 + i += n177 if len(m.Data) > 0 { keysForData := make([]string, 0, len(m.Data)) for k := range m.Data { @@ -8415,11 +8951,11 @@ func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n169, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n178, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n169 + i += n178 if m.Optional != nil { dAtA[i] = 0x10 i++ @@ -8451,11 +8987,11 @@ func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n170, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n179, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n170 + i += n179 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) @@ -8491,11 +9027,11 @@ func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n171, err := m.ListMeta.MarshalTo(dAtA[i:]) + n180, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n171 + i += n180 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8529,11 +9065,11 @@ func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n172, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n181, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n172 + i += n181 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8653,11 +9189,11 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Capabilities.Size())) - n173, err := m.Capabilities.MarshalTo(dAtA[i:]) + n182, err := m.Capabilities.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n173 + i += n182 } if m.Privileged != nil { dAtA[i] = 0x10 @@ -8673,11 +9209,11 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n174, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n183, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n174 + i += n183 } if m.RunAsUser != nil { dAtA[i] = 0x20 @@ -8735,11 +9271,11 @@ func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Reference.Size())) - n175, err := m.Reference.MarshalTo(dAtA[i:]) + n184, err := m.Reference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n175 + i += n184 return i, nil } @@ -8761,27 +9297,27 @@ func (m *Service) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n176, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n185, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n176 + i += n185 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n177, err := m.Spec.MarshalTo(dAtA[i:]) + n186, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n177 + i += n186 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n178, err := m.Status.MarshalTo(dAtA[i:]) + n187, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n178 + i += n187 return i, nil } @@ -8803,11 +9339,11 @@ func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n179, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n188, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n179 + i += n188 if len(m.Secrets) > 0 { for _, msg := range m.Secrets { dAtA[i] = 0x12 @@ -8863,11 +9399,11 @@ func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n180, err := m.ListMeta.MarshalTo(dAtA[i:]) + n189, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n180 + i += n189 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8901,11 +9437,11 @@ func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n181, err := m.ListMeta.MarshalTo(dAtA[i:]) + n190, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n181 + i += n190 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8950,11 +9486,11 @@ func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TargetPort.Size())) - n182, err := m.TargetPort.MarshalTo(dAtA[i:]) + n191, err := m.TargetPort.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n182 + i += n191 dAtA[i] = 0x28 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) @@ -9101,11 +9637,11 @@ func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SessionAffinityConfig.Size())) - n183, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) + n192, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n183 + i += n192 } return i, nil } @@ -9128,11 +9664,11 @@ func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n184, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + n193, err := m.LoadBalancer.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n184 + i += n193 return i, nil } @@ -9155,11 +9691,11 @@ func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ClientIP.Size())) - n185, err := m.ClientIP.MarshalTo(dAtA[i:]) + n194, err := m.ClientIP.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n185 + i += n194 } return i, nil } @@ -9203,11 +9739,11 @@ func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n186, err := m.SecretRef.MarshalTo(dAtA[i:]) + n195, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n186 + i += n195 } return i, nil } @@ -9251,11 +9787,11 @@ func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n187, err := m.SecretRef.MarshalTo(dAtA[i:]) + n196, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n187 + i += n196 } return i, nil } @@ -9304,11 +9840,11 @@ func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n188, err := m.Port.MarshalTo(dAtA[i:]) + n197, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n188 + i += n197 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) @@ -9343,14 +9879,16 @@ func (m *Taint) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) i += copy(dAtA[i:], m.Effect) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TimeAdded.Size())) - n189, err := m.TimeAdded.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.TimeAdded != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TimeAdded.Size())) + n198, err := m.TimeAdded.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n198 } - i += n189 return i, nil } @@ -9415,11 +9953,37 @@ func (m *Volume) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VolumeSource.Size())) - n190, err := m.VolumeSource.MarshalTo(dAtA[i:]) + n199, err := m.VolumeSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n190 + i += n199 + return i, nil +} + +func (m *VolumeDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeDevice) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) + i += copy(dAtA[i:], m.DevicePath) return i, nil } @@ -9486,31 +10050,31 @@ func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n191, err := m.Secret.MarshalTo(dAtA[i:]) + n200, err := m.Secret.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n191 + i += n200 } if m.DownwardAPI != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n192, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + n201, err := m.DownwardAPI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n192 + i += n201 } if m.ConfigMap != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n193, err := m.ConfigMap.MarshalTo(dAtA[i:]) + n202, err := m.ConfigMap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n193 + i += n202 } return i, nil } @@ -9534,151 +10098,151 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n194, err := m.HostPath.MarshalTo(dAtA[i:]) + n203, err := m.HostPath.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n194 + i += n203 } if m.EmptyDir != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) - n195, err := m.EmptyDir.MarshalTo(dAtA[i:]) + n204, err := m.EmptyDir.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n195 + i += n204 } if m.GCEPersistentDisk != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n196, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) + n205, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n196 + i += n205 } if m.AWSElasticBlockStore != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n197, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) + n206, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n197 + i += n206 } if m.GitRepo != nil { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) - n198, err := m.GitRepo.MarshalTo(dAtA[i:]) + n207, err := m.GitRepo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n198 + i += n207 } if m.Secret != nil { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n199, err := m.Secret.MarshalTo(dAtA[i:]) + n208, err := m.Secret.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n199 + i += n208 } if m.NFS != nil { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n200, err := m.NFS.MarshalTo(dAtA[i:]) + n209, err := m.NFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n200 + i += n209 } if m.ISCSI != nil { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n201, err := m.ISCSI.MarshalTo(dAtA[i:]) + n210, err := m.ISCSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n201 + i += n210 } if m.Glusterfs != nil { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n202, err := m.Glusterfs.MarshalTo(dAtA[i:]) + n211, err := m.Glusterfs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n202 + i += n211 } if m.PersistentVolumeClaim != nil { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) - n203, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) + n212, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n203 + i += n212 } if m.RBD != nil { dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n204, err := m.RBD.MarshalTo(dAtA[i:]) + n213, err := m.RBD.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n204 + i += n213 } if m.FlexVolume != nil { dAtA[i] = 0x62 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n205, err := m.FlexVolume.MarshalTo(dAtA[i:]) + n214, err := m.FlexVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n205 + i += n214 } if m.Cinder != nil { dAtA[i] = 0x6a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n206, err := m.Cinder.MarshalTo(dAtA[i:]) + n215, err := m.Cinder.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n206 + i += n215 } if m.CephFS != nil { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n207, err := m.CephFS.MarshalTo(dAtA[i:]) + n216, err := m.CephFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n207 + i += n216 } if m.Flocker != nil { dAtA[i] = 0x7a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n208, err := m.Flocker.MarshalTo(dAtA[i:]) + n217, err := m.Flocker.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n208 + i += n217 } if m.DownwardAPI != nil { dAtA[i] = 0x82 @@ -9686,11 +10250,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n209, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + n218, err := m.DownwardAPI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n209 + i += n218 } if m.FC != nil { dAtA[i] = 0x8a @@ -9698,11 +10262,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n210, err := m.FC.MarshalTo(dAtA[i:]) + n219, err := m.FC.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n210 + i += n219 } if m.AzureFile != nil { dAtA[i] = 0x92 @@ -9710,11 +10274,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n211, err := m.AzureFile.MarshalTo(dAtA[i:]) + n220, err := m.AzureFile.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n211 + i += n220 } if m.ConfigMap != nil { dAtA[i] = 0x9a @@ -9722,11 +10286,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n212, err := m.ConfigMap.MarshalTo(dAtA[i:]) + n221, err := m.ConfigMap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n212 + i += n221 } if m.VsphereVolume != nil { dAtA[i] = 0xa2 @@ -9734,11 +10298,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n213, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + n222, err := m.VsphereVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n213 + i += n222 } if m.Quobyte != nil { dAtA[i] = 0xaa @@ -9746,11 +10310,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n214, err := m.Quobyte.MarshalTo(dAtA[i:]) + n223, err := m.Quobyte.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n214 + i += n223 } if m.AzureDisk != nil { dAtA[i] = 0xb2 @@ -9758,11 +10322,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n215, err := m.AzureDisk.MarshalTo(dAtA[i:]) + n224, err := m.AzureDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n215 + i += n224 } if m.PhotonPersistentDisk != nil { dAtA[i] = 0xba @@ -9770,11 +10334,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n216, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + n225, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n216 + i += n225 } if m.PortworxVolume != nil { dAtA[i] = 0xc2 @@ -9782,11 +10346,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n217, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + n226, err := m.PortworxVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n217 + i += n226 } if m.ScaleIO != nil { dAtA[i] = 0xca @@ -9794,11 +10358,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n218, err := m.ScaleIO.MarshalTo(dAtA[i:]) + n227, err := m.ScaleIO.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n218 + i += n227 } if m.Projected != nil { dAtA[i] = 0xd2 @@ -9806,11 +10370,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Projected.Size())) - n219, err := m.Projected.MarshalTo(dAtA[i:]) + n228, err := m.Projected.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n219 + i += n228 } if m.StorageOS != nil { dAtA[i] = 0xda @@ -9818,11 +10382,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n220, err := m.StorageOS.MarshalTo(dAtA[i:]) + n229, err := m.StorageOS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n220 + i += n229 } return i, nil } @@ -9882,11 +10446,11 @@ func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinityTerm.Size())) - n221, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) + n230, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n221 + i += n230 return i, nil } @@ -10030,6 +10594,17 @@ func (m *Binding) Size() (n int) { return n } +func (m *CSIPersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeHandle) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + func (m *Capabilities) Size() (n int) { var l int _ = l @@ -10319,6 +10894,12 @@ func (m *Container) Size() (n int) { } l = len(m.TerminationMessagePolicy) n += 2 + l + sovGenerated(uint64(l)) + if len(m.VolumeDevices) > 0 { + for _, e := range m.VolumeDevices { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -10661,6 +11242,22 @@ func (m *Event) Size() (n int) { n += 1 + sovGenerated(uint64(m.Count)) l = len(m.Type) n += 1 + l + sovGenerated(uint64(l)) + l = m.EventTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Series != nil { + l = m.Series.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + if m.Related != nil { + l = m.Related.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ReportingController) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ReportingInstance) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -10678,6 +11275,17 @@ func (m *EventList) Size() (n int) { return n } +func (m *EventSeries) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Count)) + l = m.LastObservedTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.State) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *EventSource) Size() (n int) { var l int _ = l @@ -10866,6 +11474,38 @@ func (m *HostPathVolumeSource) Size() (n int) { return n } +func (m *ISCSIPersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.InitiatorName != nil { + l = len(*m.InitiatorName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *ISCSIVolumeSource) Size() (n int) { var l int _ = l @@ -11569,6 +12209,10 @@ func (m *PersistentVolumeClaimSpec) Size() (n int) { l = len(*m.StorageClassName) n += 1 + l + sovGenerated(uint64(l)) } + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -11711,6 +12355,10 @@ func (m *PersistentVolumeSource) Size() (n int) { l = m.StorageOS.Size() n += 2 + l + sovGenerated(uint64(l)) } + if m.CSI != nil { + l = m.CSI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -11748,6 +12396,10 @@ func (m *PersistentVolumeSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -11869,6 +12521,42 @@ func (m *PodCondition) Size() (n int) { return n } +func (m *PodDNSConfig) Size() (n int) { + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Searches) > 0 { + for _, s := range m.Searches { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodDNSConfigOption) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *PodExecOptions) Size() (n int) { var l int _ = l @@ -12066,6 +12754,10 @@ func (m *PodSpec) Size() (n int) { if m.Priority != nil { n += 2 + sovGenerated(uint64(*m.Priority)) } + if m.DNSConfig != nil { + l = m.DNSConfig.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -12240,6 +12932,33 @@ func (m *QuobyteVolumeSource) Size() (n int) { return n } +func (m *RBDPersistentVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RBDImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RBDPool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RadosUser) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Keyring) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + func (m *RBDVolumeSource) Size() (n int) { var l int _ = l @@ -12483,6 +13202,32 @@ func (m *SELinuxOptions) Size() (n int) { return n } +func (m *ScaleIOPersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.System) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + l = len(m.ProtectionDomain) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageMode) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + func (m *ScaleIOVolumeSource) Size() (n int) { var l int _ = l @@ -12877,8 +13622,10 @@ func (m *Taint) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Effect) n += 1 + l + sovGenerated(uint64(l)) - l = m.TimeAdded.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.TimeAdded != nil { + l = m.TimeAdded.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -12909,6 +13656,16 @@ func (m *Volume) Size() (n int) { return n } +func (m *VolumeDevice) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DevicePath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *VolumeMount) Size() (n int) { var l int _ = l @@ -13191,6 +13948,18 @@ func (this *Binding) String() string { }, "") return s } +func (this *CSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSIPersistentVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `VolumeHandle:` + fmt.Sprintf("%v", this.VolumeHandle) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} func (this *Capabilities) String() string { if this == nil { return "nil" @@ -13394,6 +14163,7 @@ func (this *Container) String() string { `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + `,`, `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, + `VolumeDevices:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeDevices), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -13667,6 +14437,12 @@ func (this *Event) String() string { `LastTimestamp:` + strings.Replace(strings.Replace(this.LastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Count:` + fmt.Sprintf("%v", this.Count) + `,`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "ObjectReference", 1) + `,`, + `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, + `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, `}`, }, "") return s @@ -13682,6 +14458,18 @@ func (this *EventList) String() string { }, "") return s } +func (this *EventSeries) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSeries{`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `}`, + }, "") + return s +} func (this *EventSource) String() string { if this == nil { return "nil" @@ -13848,6 +14636,26 @@ func (this *HostPathVolumeSource) String() string { }, "") return s } +func (this *ISCSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ISCSIPersistentVolumeSource{`, + `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, + `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, + `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, + `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, + `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, + `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, + `}`, + }, "") + return s +} func (this *ISCSIVolumeSource) String() string { if this == nil { return "nil" @@ -14457,6 +15265,7 @@ func (this *PersistentVolumeClaimSpec) String() string { `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, `}`, }, "") return s @@ -14516,8 +15325,8 @@ func (this *PersistentVolumeSource) String() string { `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, - `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, - `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`, `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSPersistentVolumeSource", "CephFSPersistentVolumeSource", 1) + `,`, `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, @@ -14529,9 +15338,10 @@ func (this *PersistentVolumeSource) String() string { `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, - `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`, `Local:` + strings.Replace(fmt.Sprintf("%v", this.Local), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`, `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`, `}`, }, "") return s @@ -14558,6 +15368,7 @@ func (this *PersistentVolumeSpec) String() string { `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, `}`, }, "") return s @@ -14660,6 +15471,29 @@ func (this *PodCondition) String() string { }, "") return s } +func (this *PodDNSConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDNSConfig{`, + `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, + `Searches:` + fmt.Sprintf("%v", this.Searches) + `,`, + `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "PodDNSConfigOption", "PodDNSConfigOption", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDNSConfigOption) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDNSConfigOption{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + valueToStringGenerated(this.Value) + `,`, + `}`, + }, "") + return s +} func (this *PodExecOptions) String() string { if this == nil { return "nil" @@ -14787,6 +15621,7 @@ func (this *PodSpec) String() string { `HostAliases:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostAliases), "HostAlias", "HostAlias", 1), `&`, ``, 1) + `,`, `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "PodDNSConfig", "PodDNSConfig", 1) + `,`, `}`, }, "") return s @@ -14940,6 +15775,23 @@ func (this *QuobyteVolumeSource) String() string { }, "") return s } +func (this *RBDPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RBDPersistentVolumeSource{`, + `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, + `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, + `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, + `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} func (this *RBDVolumeSource) String() string { if this == nil { return "nil" @@ -15175,6 +16027,25 @@ func (this *SELinuxOptions) String() string { }, "") return s } +func (this *ScaleIOPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleIOPersistentVolumeSource{`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `System:` + fmt.Sprintf("%v", this.System) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, + `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, + `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, + `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, + `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} func (this *ScaleIOVolumeSource) String() string { if this == nil { return "nil" @@ -15504,7 +16375,7 @@ func (this *Taint) String() string { `Key:` + fmt.Sprintf("%v", this.Key) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, - `TimeAdded:` + strings.Replace(strings.Replace(this.TimeAdded.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `TimeAdded:` + strings.Replace(fmt.Sprintf("%v", this.TimeAdded), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, `}`, }, "") return s @@ -15534,6 +16405,17 @@ func (this *Volume) String() string { }, "") return s } +func (this *VolumeDevice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeDevice{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, + `}`, + }, "") + return s +} func (this *VolumeMount) String() string { if this == nil { return "nil" @@ -16728,7 +17610,7 @@ func (m *Binding) Unmarshal(dAtA []byte) error { } return nil } -func (m *Capabilities) Unmarshal(dAtA []byte) error { +func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16751,15 +17633,15 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") + return fmt.Errorf("proto: CSIPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16784,11 +17666,11 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Add = append(m.Add, Capability(dAtA[iNdEx:postIndex])) + m.Driver = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeHandle", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16813,8 +17695,28 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Drop = append(m.Drop, Capability(dAtA[iNdEx:postIndex])) + m.VolumeHandle = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -16836,7 +17738,7 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } return nil } -func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *Capabilities) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16859,15 +17761,15 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CephFSPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CephFSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16892,69 +17794,11 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + m.Add = append(m.Add, Capability(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16979,61 +17823,8 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SecretFile = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Drop = append(m.Drop, Capability(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -17055,7 +17846,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { +func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17078,10 +17869,10 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: CephFSPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CephFSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -17227,7 +18018,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} + m.SecretRef = &SecretReference{} } if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -17274,7 +18065,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { +func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17297,15 +18088,15 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17330,11 +18121,11 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeID = string(dAtA[iNdEx:postIndex]) + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17359,13 +18150,13 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17375,67 +18166,88 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - if skippy < 0 { + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.SecretFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } - if iNdEx >= l { + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var v int32 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17445,12 +18257,12 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.TimeoutSeconds = &v + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -17472,7 +18284,7 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *ComponentCondition) Unmarshal(dAtA []byte) error { +func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17495,15 +18307,15 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") + return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17528,11 +18340,11 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) + m.VolumeID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17557,42 +18369,13 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17602,21 +18385,12 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -17638,7 +18412,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *ComponentStatus) Unmarshal(dAtA []byte) error { +func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17661,47 +18435,17 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) } - var msglen int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17711,23 +18455,289 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, ComponentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.TimeoutSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ComponentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -19289,6 +20299,37 @@ func (m *Container) Unmarshal(dAtA []byte) error { } m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) + if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -22518,59 +23559,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { } m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EventTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22594,13 +23585,13 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.EventTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22624,64 +23615,16 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Event{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Series == nil { + m.Series = &EventSeries{} } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.Series.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Component", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22706,13 +23649,13 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Component = string(dAtA[iNdEx:postIndex]) + m.Action = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Related", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -22722,74 +23665,28 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecAction) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + if m.Related == nil { + m.Related = &ObjectReference{} } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if err := m.Related.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecAction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecAction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReportingController", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22814,61 +23711,516 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + m.ReportingController = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FCVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FCVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetWWNs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReportingInstance", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReportingInstance = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Event{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastObservedTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastObservedTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = EventSeriesState(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Component", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Component = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FCVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FCVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetWWNs", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24430,11 +25782,120 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := HostPathType(dAtA[iNdEx:postIndex]) + m.Type = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ISCSIPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ISCSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetPortal = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24459,8 +25920,236 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := HostPathType(dAtA[iNdEx:postIndex]) - m.Type = &s + m.IQN = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + } + m.Lun = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lun |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ISCSIInterface = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DiscoveryCHAPAuth = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SessionCHAPAuth = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.InitiatorName = &s iNdEx = postIndex default: iNdEx = preIndex @@ -31586,6 +33275,36 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.StorageClassName = &s iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PersistentVolumeMode(dAtA[iNdEx:postIndex]) + m.VolumeMode = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32298,7 +34017,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.RBD == nil { - m.RBD = &RBDVolumeSource{} + m.RBD = &RBDPersistentVolumeSource{} } if err := m.RBD.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -32331,7 +34050,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ISCSI == nil { - m.ISCSI = &ISCSIVolumeSource{} + m.ISCSI = &ISCSIPersistentVolumeSource{} } if err := m.ISCSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -32727,7 +34446,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ScaleIO == nil { - m.ScaleIO = &ScaleIOVolumeSource{} + m.ScaleIO = &ScaleIOPersistentVolumeSource{} } if err := m.ScaleIO.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -32799,6 +34518,39 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CSI == nil { + m.CSI = &CSIPersistentVolumeSource{} + } + if err := m.CSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -33149,6 +34901,36 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PersistentVolumeMode(dAtA[iNdEx:postIndex]) + m.VolumeMode = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -33193,17 +34975,262 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeStatus: wiretype end group for non-group") + return fmt.Errorf("proto: PersistentVolumeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = PersistentVolumePhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PhotonPersistentDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PhotonPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PdID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PdID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Pod) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Pod: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Pod: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33213,26 +35240,27 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Phase = PersistentVolumePhase(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33242,26 +35270,27 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33271,20 +35300,21 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -33307,7 +35337,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { +func (m *PodAffinity) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33330,17 +35360,17 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PhotonPersistentDiskVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: PodAffinity: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PhotonPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodAffinity: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PdID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33350,26 +35380,28 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.PdID = string(dAtA[iNdEx:postIndex]) + m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) + if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33379,20 +35411,22 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -33415,7 +35449,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Pod) Unmarshal(dAtA []byte) error { +func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33438,15 +35472,15 @@ func (m *Pod) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Pod: wiretype end group for non-group") + return fmt.Errorf("proto: PodAffinityTerm: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Pod: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -33470,15 +35504,18 @@ func (m *Pod) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.LabelSelector == nil { + m.LabelSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33488,27 +35525,26 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33518,21 +35554,20 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.TopologyKey = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -33555,7 +35590,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodAffinity) Unmarshal(dAtA []byte) error { +func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33578,10 +35613,10 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodAffinity: wiretype end group for non-group") + return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -33667,7 +35702,7 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { +func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33690,17 +35725,17 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodAffinityTerm: wiretype end group for non-group") + return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33710,30 +35745,37 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LabelSelector == nil { - m.LabelSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + m.Stdin = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) } - if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + m.Stdout = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33743,24 +35785,35 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + m.Stderr = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } - m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: + m.TTY = bool(v != 0) + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -33785,7 +35838,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TopologyKey = string(dAtA[iNdEx:postIndex]) + m.Container = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -33808,7 +35861,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { +func (m *PodCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33831,15 +35884,73 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group") + return fmt.Errorf("proto: PodCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PodConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -33863,14 +35974,13 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) - if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -33894,106 +36004,15 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) - if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdin = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdout = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34003,35 +36022,24 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.Stderr = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF } - m.TTY = bool(v != 0) - case 5: + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34056,7 +36064,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Container = string(dAtA[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -34079,7 +36087,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodCondition) Unmarshal(dAtA []byte) error { +func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34102,15 +36110,15 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodCondition: wiretype end group for non-group") + return fmt.Errorf("proto: PodDNSConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodDNSConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Nameservers", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34135,11 +36143,11 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = PodConditionType(dAtA[iNdEx:postIndex]) + m.Nameservers = append(m.Nameservers, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Searches", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34164,11 +36172,11 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + m.Searches = append(m.Searches, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -34192,43 +36200,64 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Options = append(m.Options, PodDNSConfigOption{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - if msglen < 0 { + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - iNdEx = postIndex - case 5: + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDNSConfigOption: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDNSConfigOption: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34253,11 +36282,11 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34282,7 +36311,8 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.Value = &s iNdEx = postIndex default: iNdEx = preIndex @@ -36115,6 +38145,39 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } } m.Priority = &v + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DNSConfig == nil { + m.DNSConfig = &PodDNSConfig{} + } + if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -37864,6 +39927,283 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } return nil } +func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RBDPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RBDPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CephMonitors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CephMonitors = append(m.CephMonitors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBDImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RBDImage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBDPool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RBDPool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RadosUser", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RadosUser = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyring", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyring = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -40371,11 +42711,317 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.User = string(dAtA[iNdEx:postIndex]) + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleIOPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleIOPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gateway = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field System", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.System = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SSLEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SSLEnabled = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProtectionDomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProtectionDomain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoragePool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoragePool = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StorageMode", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -40400,11 +43046,11 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Role = string(dAtA[iNdEx:postIndex]) + m.StorageMode = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -40429,11 +43075,11 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) + m.VolumeName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -40458,8 +43104,28 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Level = string(dAtA[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -44029,15 +46695,232 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Sysctl: wiretype end group for non-group") + return fmt.Errorf("proto: Sysctl: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sysctl: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TCPSocketAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TCPSocketAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Taint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Taint: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Sysctl: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Taint: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -44062,7 +46945,7 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -44093,61 +46976,11 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { } m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TCPSocketAction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TCPSocketAction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -44157,27 +46990,26 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Effect = TaintEffect(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TimeAdded", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -44187,20 +47019,24 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(dAtA[iNdEx:postIndex]) + if m.TimeAdded == nil { + m.TimeAdded = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -44223,7 +47059,7 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { } return nil } -func (m *Taint) Unmarshal(dAtA []byte) error { +func (m *Toleration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -44246,10 +47082,10 @@ func (m *Taint) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Taint: wiretype end group for non-group") + return fmt.Errorf("proto: Toleration: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Taint: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Toleration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -44283,7 +47119,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -44308,11 +47144,11 @@ func (m *Taint) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(dAtA[iNdEx:postIndex]) + m.Operator = TolerationOperator(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -44337,13 +47173,13 @@ func (m *Taint) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Effect = TaintEffect(dAtA[iNdEx:postIndex]) + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeAdded", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -44353,22 +47189,41 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Effect = TaintEffect(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TolerationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TolerationSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -44390,7 +47245,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } return nil } -func (m *Toleration) Unmarshal(dAtA []byte) error { +func (m *Volume) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -44413,15 +47268,15 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Toleration: wiretype end group for non-group") + return fmt.Errorf("proto: Volume: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Toleration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -44446,42 +47301,13 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operator = TolerationOperator(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeSource", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -44491,70 +47317,22 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.VolumeSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Effect = TaintEffect(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TolerationSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TolerationSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -44576,7 +47354,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } return nil } -func (m *Volume) Unmarshal(dAtA []byte) error { +func (m *VolumeDevice) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -44599,10 +47377,10 @@ func (m *Volume) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Volume: wiretype end group for non-group") + return fmt.Errorf("proto: VolumeDevice: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VolumeDevice: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -44636,9 +47414,9 @@ func (m *Volume) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeSource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -44648,21 +47426,20 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.VolumeSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.DevicePath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -46337,745 +49114,777 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 11834 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x6b, 0x70, 0x24, 0xd7, - 0x75, 0x18, 0xac, 0x9e, 0xc1, 0x6b, 0x0e, 0xde, 0x77, 0xb1, 0xe4, 0x10, 0x24, 0x17, 0xcb, 0xa6, - 0x44, 0x2e, 0x5f, 0x80, 0xb8, 0x24, 0x45, 0x5a, 0xa4, 0x68, 0x03, 0x18, 0x60, 0x17, 0xdc, 0xc5, - 0xee, 0xf0, 0x0e, 0x76, 0x69, 0x52, 0x34, 0x3f, 0x35, 0xa6, 0x2f, 0x80, 0x26, 0x1a, 0xdd, 0xc3, - 0xee, 0x1e, 0xec, 0x82, 0x65, 0x57, 0x7d, 0x51, 0x64, 0xe5, 0x21, 0xff, 0x70, 0x25, 0xaa, 0xc4, - 0xb1, 0x54, 0x4e, 0x55, 0x1e, 0x65, 0x2b, 0x4e, 0x52, 0x71, 0xe4, 0xd8, 0x8e, 0xe4, 0x54, 0x1c, - 0xe7, 0x51, 0xf2, 0x1f, 0xc5, 0xce, 0x1f, 0xa9, 0x2a, 0x15, 0xd8, 0x5a, 0xa5, 0x92, 0xf2, 0x8f, - 0xa4, 0x92, 0xf8, 0x97, 0x11, 0x27, 0x4a, 0xdd, 0x67, 0xdf, 0xdb, 0xd3, 0x3d, 0x33, 0x58, 0x62, - 0x41, 0x4a, 0xe5, 0x7f, 0x33, 0xe7, 0x9c, 0x7b, 0xee, 0xed, 0xfb, 0x38, 0xf7, 0xdc, 0x73, 0xcf, - 0x39, 0x17, 0x5e, 0xde, 0x7d, 0x29, 0x9e, 0xf7, 0xc2, 0x85, 0xdd, 0xf6, 0x26, 0x89, 0x02, 0x92, - 0x90, 0x78, 0x61, 0x9f, 0x04, 0x6e, 0x18, 0x2d, 0x08, 0x84, 0xd3, 0xf2, 0x16, 0x9a, 0x61, 0x44, - 0x16, 0xf6, 0x9f, 0x5d, 0xd8, 0x26, 0x01, 0x89, 0x9c, 0x84, 0xb8, 0xf3, 0xad, 0x28, 0x4c, 0x42, - 0x84, 0x38, 0xcd, 0xbc, 0xd3, 0xf2, 0xe6, 0x29, 0xcd, 0xfc, 0xfe, 0xb3, 0xb3, 0xcf, 0x6c, 0x7b, - 0xc9, 0x4e, 0x7b, 0x73, 0xbe, 0x19, 0xee, 0x2d, 0x6c, 0x87, 0xdb, 0xe1, 0x02, 0x23, 0xdd, 0x6c, - 0x6f, 0xb1, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x59, 0xcc, 0xae, 0xa7, 0xd5, 0x90, 0xdb, 0x09, 0x09, - 0x62, 0x2f, 0x0c, 0xe2, 0x67, 0x9c, 0x96, 0x17, 0x93, 0x68, 0x9f, 0x44, 0x0b, 0xad, 0xdd, 0x6d, - 0x8a, 0x8b, 0x4d, 0x82, 0x85, 0xfd, 0x67, 0x37, 0x49, 0xe2, 0x74, 0xb4, 0x68, 0xf6, 0xf9, 0x94, - 0xdd, 0x9e, 0xd3, 0xdc, 0xf1, 0x02, 0x12, 0x1d, 0x48, 0x1e, 0x0b, 0x11, 0x89, 0xc3, 0x76, 0xd4, - 0x24, 0xc7, 0x2a, 0x15, 0x2f, 0xec, 0x91, 0xc4, 0xc9, 0xf9, 0xfa, 0xd9, 0x85, 0xa2, 0x52, 0x51, - 0x3b, 0x48, 0xbc, 0xbd, 0xce, 0x6a, 0x3e, 0xd5, 0xab, 0x40, 0xdc, 0xdc, 0x21, 0x7b, 0x4e, 0x47, - 0xb9, 0xe7, 0x8a, 0xca, 0xb5, 0x13, 0xcf, 0x5f, 0xf0, 0x82, 0x24, 0x4e, 0xa2, 0x6c, 0x21, 0xfb, - 0x3b, 0x16, 0x9c, 0x5f, 0x7c, 0xa3, 0xb1, 0xe2, 0x3b, 0x71, 0xe2, 0x35, 0x97, 0xfc, 0xb0, 0xb9, - 0xdb, 0x48, 0xc2, 0x88, 0xdc, 0x0c, 0xfd, 0xf6, 0x1e, 0x69, 0xb0, 0x8e, 0x40, 0x4f, 0xc3, 0xc8, - 0x3e, 0xfb, 0xbf, 0x56, 0xab, 0x5a, 0xe7, 0xad, 0x0b, 0x95, 0xa5, 0xa9, 0x6f, 0x1d, 0xce, 0x7d, - 0xec, 0xce, 0xe1, 0xdc, 0xc8, 0x4d, 0x01, 0xc7, 0x8a, 0x02, 0x3d, 0x06, 0x43, 0x5b, 0xf1, 0xc6, - 0x41, 0x8b, 0x54, 0x4b, 0x8c, 0x76, 0x42, 0xd0, 0x0e, 0xad, 0x36, 0x28, 0x14, 0x0b, 0x2c, 0x5a, - 0x80, 0x4a, 0xcb, 0x89, 0x12, 0x2f, 0xf1, 0xc2, 0xa0, 0x5a, 0x3e, 0x6f, 0x5d, 0x18, 0x5c, 0x9a, - 0x16, 0xa4, 0x95, 0xba, 0x44, 0xe0, 0x94, 0x86, 0x36, 0x23, 0x22, 0x8e, 0x7b, 0x3d, 0xf0, 0x0f, - 0xaa, 0x03, 0xe7, 0xad, 0x0b, 0x23, 0x69, 0x33, 0xb0, 0x80, 0x63, 0x45, 0x61, 0xff, 0x62, 0x09, - 0x46, 0x16, 0xb7, 0xb6, 0xbc, 0xc0, 0x4b, 0x0e, 0xd0, 0x4d, 0x18, 0x0b, 0x42, 0x97, 0xc8, 0xff, - 0xec, 0x2b, 0x46, 0x2f, 0x9e, 0x9f, 0xef, 0x9c, 0x99, 0xf3, 0xd7, 0x34, 0xba, 0xa5, 0xa9, 0x3b, - 0x87, 0x73, 0x63, 0x3a, 0x04, 0x1b, 0x7c, 0x10, 0x86, 0xd1, 0x56, 0xe8, 0x2a, 0xb6, 0x25, 0xc6, - 0x76, 0x2e, 0x8f, 0x6d, 0x3d, 0x25, 0x5b, 0x9a, 0xbc, 0x73, 0x38, 0x37, 0xaa, 0x01, 0xb0, 0xce, - 0x04, 0x6d, 0xc2, 0x24, 0xfd, 0x1b, 0x24, 0x9e, 0xe2, 0x5b, 0x66, 0x7c, 0x1f, 0x2d, 0xe2, 0xab, - 0x91, 0x2e, 0x9d, 0xb9, 0x73, 0x38, 0x37, 0x99, 0x01, 0xe2, 0x2c, 0x43, 0xfb, 0x7d, 0x98, 0x58, - 0x4c, 0x12, 0xa7, 0xb9, 0x43, 0x5c, 0x3e, 0x82, 0xe8, 0x79, 0x18, 0x08, 0x9c, 0x3d, 0x22, 0xc6, - 0xf7, 0xbc, 0xe8, 0xd8, 0x81, 0x6b, 0xce, 0x1e, 0x39, 0x3a, 0x9c, 0x9b, 0xba, 0x11, 0x78, 0xef, - 0xb5, 0xc5, 0xac, 0xa0, 0x30, 0xcc, 0xa8, 0xd1, 0x45, 0x00, 0x97, 0xec, 0x7b, 0x4d, 0x52, 0x77, - 0x92, 0x1d, 0x31, 0xde, 0x48, 0x94, 0x85, 0x9a, 0xc2, 0x60, 0x8d, 0xca, 0xbe, 0x0d, 0x95, 0xc5, - 0xfd, 0xd0, 0x73, 0xeb, 0xa1, 0x1b, 0xa3, 0x5d, 0x98, 0x6c, 0x45, 0x64, 0x8b, 0x44, 0x0a, 0x54, - 0xb5, 0xce, 0x97, 0x2f, 0x8c, 0x5e, 0xbc, 0x90, 0xfb, 0xb1, 0x26, 0xe9, 0x4a, 0x90, 0x44, 0x07, - 0x4b, 0xf7, 0x8b, 0xfa, 0x26, 0x33, 0x58, 0x9c, 0xe5, 0x6c, 0xff, 0x9b, 0x12, 0x9c, 0x5d, 0x7c, - 0xbf, 0x1d, 0x91, 0x9a, 0x17, 0xef, 0x66, 0x67, 0xb8, 0xeb, 0xc5, 0xbb, 0xd7, 0xd2, 0x1e, 0x50, - 0x53, 0xab, 0x26, 0xe0, 0x58, 0x51, 0xa0, 0x67, 0x60, 0x98, 0xfe, 0xbe, 0x81, 0xd7, 0xc4, 0x27, - 0x9f, 0x11, 0xc4, 0xa3, 0x35, 0x27, 0x71, 0x6a, 0x1c, 0x85, 0x25, 0x0d, 0x5a, 0x87, 0xd1, 0x26, - 0x5b, 0x90, 0xdb, 0xeb, 0xa1, 0x4b, 0xd8, 0x60, 0x56, 0x96, 0x9e, 0xa2, 0xe4, 0xcb, 0x29, 0xf8, - 0xe8, 0x70, 0xae, 0xca, 0xdb, 0x26, 0x58, 0x68, 0x38, 0xac, 0x97, 0x47, 0xb6, 0x5a, 0x5f, 0x03, - 0x8c, 0x13, 0xe4, 0xac, 0xad, 0x0b, 0xda, 0x52, 0x19, 0x64, 0x4b, 0x65, 0x2c, 0x7f, 0x99, 0xa0, - 0x67, 0x61, 0x60, 0xd7, 0x0b, 0xdc, 0xea, 0x10, 0xe3, 0xf5, 0x30, 0x1d, 0xf3, 0x2b, 0x5e, 0xe0, - 0x1e, 0x1d, 0xce, 0x4d, 0x1b, 0xcd, 0xa1, 0x40, 0xcc, 0x48, 0xed, 0x3f, 0xb1, 0x60, 0x8e, 0xe1, - 0x56, 0x3d, 0x9f, 0xd4, 0x49, 0x14, 0x7b, 0x71, 0x42, 0x82, 0xc4, 0xe8, 0xd0, 0x8b, 0x00, 0x31, - 0x69, 0x46, 0x24, 0xd1, 0xba, 0x54, 0x4d, 0x8c, 0x86, 0xc2, 0x60, 0x8d, 0x8a, 0x0a, 0x84, 0x78, - 0xc7, 0x89, 0xd8, 0xfc, 0x12, 0x1d, 0xab, 0x04, 0x42, 0x43, 0x22, 0x70, 0x4a, 0x63, 0x08, 0x84, - 0x72, 0x2f, 0x81, 0x80, 0x3e, 0x03, 0x93, 0x69, 0x65, 0x71, 0xcb, 0x69, 0xca, 0x0e, 0x64, 0x4b, - 0xa6, 0x61, 0xa2, 0x70, 0x96, 0xd6, 0xfe, 0x07, 0x96, 0x98, 0x3c, 0xf4, 0xab, 0x3f, 0xe2, 0xdf, - 0x6a, 0xff, 0x96, 0x05, 0xc3, 0x4b, 0x5e, 0xe0, 0x7a, 0xc1, 0x36, 0xfa, 0x1c, 0x8c, 0xd0, 0xbd, - 0xc9, 0x75, 0x12, 0x47, 0xc8, 0xbd, 0x4f, 0x6a, 0x6b, 0x4b, 0x6d, 0x15, 0xf3, 0xad, 0xdd, 0x6d, - 0x0a, 0x88, 0xe7, 0x29, 0x35, 0x5d, 0x6d, 0xd7, 0x37, 0xdf, 0x25, 0xcd, 0x64, 0x9d, 0x24, 0x4e, - 0xfa, 0x39, 0x29, 0x0c, 0x2b, 0xae, 0xe8, 0x0a, 0x0c, 0x25, 0x4e, 0xb4, 0x4d, 0x12, 0x21, 0x00, - 0x73, 0x05, 0x15, 0x2f, 0x89, 0xe9, 0x8a, 0x24, 0x41, 0x93, 0xa4, 0xdb, 0xc2, 0x06, 0x2b, 0x8a, - 0x05, 0x0b, 0xbb, 0x09, 0x63, 0xcb, 0x4e, 0xcb, 0xd9, 0xf4, 0x7c, 0x2f, 0xf1, 0x48, 0x8c, 0x1e, - 0x87, 0xb2, 0xe3, 0xba, 0x4c, 0x2a, 0x54, 0x96, 0xce, 0xde, 0x39, 0x9c, 0x2b, 0x2f, 0xba, 0x74, - 0x7a, 0x82, 0xa2, 0x3a, 0xc0, 0x94, 0x02, 0x3d, 0x09, 0x03, 0x6e, 0x14, 0xb6, 0xaa, 0x25, 0x46, - 0x79, 0x1f, 0x9d, 0xc9, 0xb5, 0x28, 0x6c, 0x65, 0x48, 0x19, 0x8d, 0xfd, 0x3b, 0x25, 0x78, 0x68, - 0x99, 0xb4, 0x76, 0x56, 0x1b, 0x05, 0xf3, 0xf7, 0x02, 0x8c, 0xec, 0x85, 0x81, 0x97, 0x84, 0x51, - 0x2c, 0xaa, 0x66, 0x0b, 0x68, 0x5d, 0xc0, 0xb0, 0xc2, 0xa2, 0xf3, 0x30, 0xd0, 0x4a, 0x85, 0xdf, - 0x98, 0x14, 0x9c, 0x4c, 0xec, 0x31, 0x0c, 0xa5, 0x68, 0xc7, 0x24, 0x12, 0x0b, 0x5f, 0x51, 0xdc, - 0x88, 0x49, 0x84, 0x19, 0x26, 0x9d, 0x41, 0x74, 0x6e, 0x89, 0x59, 0x99, 0x99, 0x41, 0x14, 0x83, - 0x35, 0x2a, 0x54, 0x87, 0x0a, 0xff, 0x87, 0xc9, 0x16, 0x5b, 0xe3, 0x05, 0xfd, 0xde, 0x90, 0x44, - 0xa2, 0xdf, 0xc7, 0xd9, 0x14, 0x93, 0x40, 0x9c, 0x32, 0x31, 0xa6, 0xd8, 0x50, 0xcf, 0x29, 0xf6, - 0xcd, 0x12, 0x20, 0xde, 0x85, 0x3f, 0x64, 0x1d, 0x77, 0xa3, 0xb3, 0xe3, 0x72, 0x37, 0x9b, 0xab, - 0x61, 0xd3, 0xf1, 0xb3, 0xb3, 0xf6, 0xa4, 0x7a, 0xef, 0x17, 0x2c, 0x40, 0xcb, 0x5e, 0xe0, 0x92, - 0xe8, 0x14, 0x34, 0xad, 0xe3, 0xc9, 0x8e, 0xab, 0x30, 0xb1, 0xec, 0x7b, 0x24, 0x48, 0xd6, 0xea, - 0xcb, 0x61, 0xb0, 0xe5, 0x6d, 0xa3, 0x4f, 0xc3, 0x04, 0x55, 0x3c, 0xc3, 0x76, 0xd2, 0x20, 0xcd, - 0x30, 0x60, 0x7b, 0x34, 0x55, 0xd7, 0xd0, 0x9d, 0xc3, 0xb9, 0x89, 0x0d, 0x03, 0x83, 0x33, 0x94, - 0xf6, 0x7f, 0xa4, 0x1f, 0x1a, 0xee, 0xb5, 0xc2, 0x80, 0x04, 0xc9, 0x72, 0x18, 0xb8, 0x5c, 0x97, - 0xfb, 0x34, 0x0c, 0x24, 0xb4, 0xe1, 0xfc, 0x23, 0x1f, 0x93, 0x43, 0x4b, 0x9b, 0x7b, 0x74, 0x38, - 0x77, 0x5f, 0x67, 0x09, 0xf6, 0x41, 0xac, 0x0c, 0xfa, 0x31, 0x18, 0x8a, 0x13, 0x27, 0x69, 0xc7, - 0xe2, 0xb3, 0x1f, 0x91, 0x9f, 0xdd, 0x60, 0xd0, 0xa3, 0xc3, 0xb9, 0x49, 0x55, 0x8c, 0x83, 0xb0, - 0x28, 0x80, 0x9e, 0x80, 0xe1, 0x3d, 0x12, 0xc7, 0xce, 0xb6, 0xdc, 0x86, 0x27, 0x45, 0xd9, 0xe1, - 0x75, 0x0e, 0xc6, 0x12, 0x8f, 0x1e, 0x85, 0x41, 0x12, 0x45, 0x61, 0x24, 0x66, 0xd5, 0xb8, 0x20, - 0x1c, 0x5c, 0xa1, 0x40, 0xcc, 0x71, 0xf6, 0xbf, 0xb7, 0x60, 0x52, 0xb5, 0x95, 0xd7, 0x75, 0x0a, - 0xf2, 0xf6, 0x2d, 0x80, 0xa6, 0xfc, 0xc0, 0x98, 0xc9, 0xbb, 0xd1, 0x8b, 0x8f, 0xe5, 0x4d, 0xe1, - 0xce, 0x6e, 0x4c, 0x39, 0x2b, 0x50, 0x8c, 0x35, 0x6e, 0xf6, 0xbf, 0xb0, 0xe0, 0x4c, 0xe6, 0x8b, - 0xae, 0x7a, 0x71, 0x82, 0xde, 0xee, 0xf8, 0xaa, 0xf9, 0xfe, 0xbe, 0x8a, 0x96, 0x66, 0xdf, 0xa4, - 0xe6, 0x9c, 0x84, 0x68, 0x5f, 0x74, 0x19, 0x06, 0xbd, 0x84, 0xec, 0xc9, 0x8f, 0x79, 0xb4, 0xeb, - 0xc7, 0xf0, 0x56, 0xa5, 0x23, 0xb2, 0x46, 0x4b, 0x62, 0xce, 0xc0, 0xfe, 0x9f, 0x16, 0x54, 0xf8, - 0xb4, 0x5d, 0x77, 0x5a, 0xa7, 0x30, 0x16, 0x6b, 0x30, 0xc0, 0xb8, 0xf3, 0x86, 0x3f, 0x9e, 0xdf, - 0x70, 0xd1, 0x9c, 0x79, 0xaa, 0x4c, 0x71, 0xa5, 0x55, 0x09, 0x33, 0x0a, 0xc2, 0x8c, 0xc5, 0xec, - 0x8b, 0x50, 0x51, 0x04, 0x68, 0x0a, 0xca, 0xbb, 0x84, 0x1f, 0x54, 0x2a, 0x98, 0xfe, 0x44, 0x33, - 0x30, 0xb8, 0xef, 0xf8, 0x6d, 0xb1, 0xd8, 0x31, 0xff, 0xf3, 0xe9, 0xd2, 0x4b, 0x96, 0xfd, 0x0d, - 0xb6, 0xc6, 0x44, 0x25, 0x2b, 0xc1, 0xbe, 0x10, 0x26, 0xef, 0xc3, 0x8c, 0x9f, 0x23, 0xc3, 0x44, - 0x47, 0xf4, 0x2f, 0xf3, 0x1e, 0x12, 0x6d, 0x9d, 0xc9, 0xc3, 0xe2, 0xdc, 0x3a, 0xe8, 0x36, 0x10, - 0xb6, 0xe8, 0x8c, 0x72, 0x7c, 0xd6, 0x5e, 0xa1, 0x80, 0x5e, 0x17, 0x30, 0xac, 0xb0, 0x54, 0x40, - 0xcc, 0xa8, 0xc6, 0x5f, 0x21, 0x07, 0x0d, 0xe2, 0x93, 0x66, 0x12, 0x46, 0x1f, 0x6a, 0xf3, 0x1f, - 0xe6, 0xbd, 0xcf, 0xe5, 0xcb, 0xa8, 0x60, 0x50, 0xbe, 0x42, 0x0e, 0xf8, 0x50, 0xe8, 0x5f, 0x57, - 0xee, 0xfa, 0x75, 0xbf, 0x66, 0xc1, 0xb8, 0xfa, 0xba, 0x53, 0x58, 0x48, 0x4b, 0xe6, 0x42, 0x7a, - 0xb8, 0xeb, 0x7c, 0x2c, 0x58, 0x42, 0x3f, 0x60, 0x22, 0x40, 0xd0, 0xd4, 0xa3, 0x90, 0x76, 0x0d, - 0x95, 0xd9, 0x1f, 0xe6, 0x80, 0xf4, 0xf3, 0x5d, 0x57, 0xc8, 0xc1, 0x46, 0x48, 0xd5, 0x87, 0xfc, - 0xef, 0x32, 0x46, 0x6d, 0xa0, 0xeb, 0xa8, 0xfd, 0x7a, 0x09, 0xce, 0xaa, 0x1e, 0x30, 0x36, 0xe8, - 0x1f, 0xf6, 0x3e, 0x78, 0x16, 0x46, 0x5d, 0xb2, 0xe5, 0xb4, 0xfd, 0x44, 0x9d, 0x45, 0x07, 0xb9, - 0x3d, 0xa2, 0x96, 0x82, 0xb1, 0x4e, 0x73, 0x8c, 0x6e, 0xfb, 0xb7, 0xc0, 0x64, 0x6f, 0xe2, 0xd0, - 0x19, 0x4c, 0xb5, 0x37, 0xcd, 0xa2, 0x30, 0xa6, 0x5b, 0x14, 0x84, 0xf5, 0xe0, 0x51, 0x18, 0xf4, - 0xf6, 0xe8, 0x5e, 0x5c, 0x32, 0xb7, 0xd8, 0x35, 0x0a, 0xc4, 0x1c, 0x87, 0x3e, 0x01, 0xc3, 0xcd, - 0x70, 0x6f, 0xcf, 0x09, 0xdc, 0x6a, 0x99, 0xe9, 0x93, 0xa3, 0x74, 0xbb, 0x5e, 0xe6, 0x20, 0x2c, - 0x71, 0xe8, 0x21, 0x18, 0x70, 0xa2, 0xed, 0xb8, 0x3a, 0xc0, 0x68, 0x46, 0x68, 0x4d, 0x8b, 0xd1, - 0x76, 0x8c, 0x19, 0x94, 0xea, 0x89, 0xb7, 0xc2, 0x68, 0xd7, 0x0b, 0xb6, 0x6b, 0x5e, 0xc4, 0x94, - 0x3e, 0x4d, 0x4f, 0x7c, 0x43, 0x61, 0xb0, 0x46, 0x85, 0x56, 0x61, 0xb0, 0x15, 0x46, 0x49, 0x5c, - 0x1d, 0x62, 0xdd, 0xfd, 0x48, 0xc1, 0x52, 0xe2, 0x5f, 0x5b, 0x0f, 0xa3, 0x24, 0xfd, 0x00, 0xfa, - 0x2f, 0xc6, 0xbc, 0x38, 0xfa, 0x31, 0x28, 0x93, 0x60, 0xbf, 0x3a, 0xcc, 0xb8, 0xcc, 0xe6, 0x71, - 0x59, 0x09, 0xf6, 0x6f, 0x3a, 0x51, 0x2a, 0x67, 0x56, 0x82, 0x7d, 0x4c, 0xcb, 0xa0, 0x37, 0xa1, - 0x22, 0xad, 0x91, 0x71, 0x75, 0xa4, 0x78, 0x8a, 0x61, 0x41, 0x84, 0xc9, 0x7b, 0x6d, 0x2f, 0x22, - 0x7b, 0x24, 0x48, 0xe2, 0xf4, 0x3c, 0x29, 0xb1, 0x31, 0x4e, 0xb9, 0xa1, 0x37, 0x61, 0x8c, 0xeb, - 0x91, 0xeb, 0x61, 0x3b, 0x48, 0xe2, 0x6a, 0x85, 0x35, 0x2f, 0xd7, 0x74, 0x75, 0x33, 0xa5, 0x5b, - 0x9a, 0x11, 0x4c, 0xc7, 0x34, 0x60, 0x8c, 0x0d, 0x56, 0x08, 0xc3, 0xb8, 0xef, 0xed, 0x93, 0x80, - 0xc4, 0x71, 0x3d, 0x0a, 0x37, 0x49, 0x15, 0x58, 0xcb, 0x1f, 0xc8, 0xb7, 0xe8, 0x84, 0x9b, 0x64, - 0x69, 0xfa, 0xce, 0xe1, 0xdc, 0xf8, 0x55, 0xbd, 0x0c, 0x36, 0x59, 0xa0, 0x1b, 0x30, 0x41, 0x15, - 0x54, 0x2f, 0x65, 0x3a, 0xda, 0x8b, 0x29, 0xd3, 0x4e, 0xb1, 0x51, 0x08, 0x67, 0x98, 0xa0, 0xd7, - 0xa0, 0xe2, 0x7b, 0x5b, 0xa4, 0x79, 0xd0, 0xf4, 0x49, 0x75, 0x8c, 0x71, 0xcc, 0x5d, 0x56, 0x57, - 0x25, 0x11, 0x3f, 0x00, 0xa8, 0xbf, 0x38, 0x2d, 0x8e, 0x6e, 0xc2, 0x7d, 0x09, 0x89, 0xf6, 0xbc, - 0xc0, 0xa1, 0xcb, 0x41, 0xe8, 0x93, 0xcc, 0x2e, 0x36, 0xce, 0xe6, 0xdb, 0x39, 0xd1, 0x75, 0xf7, - 0x6d, 0xe4, 0x52, 0xe1, 0x82, 0xd2, 0xe8, 0x3a, 0x4c, 0xb2, 0x95, 0x50, 0x6f, 0xfb, 0x7e, 0x3d, - 0xf4, 0xbd, 0xe6, 0x41, 0x75, 0x82, 0x31, 0xfc, 0x84, 0x34, 0x7c, 0xad, 0x99, 0x68, 0x7a, 0xe2, - 0x4d, 0xff, 0xe1, 0x6c, 0x69, 0xb4, 0xc9, 0x0c, 0x21, 0xed, 0xc8, 0x4b, 0x0e, 0xe8, 0xfc, 0x25, - 0xb7, 0x93, 0xea, 0x64, 0xd7, 0xf3, 0xa3, 0x4e, 0xaa, 0xac, 0x25, 0x3a, 0x10, 0x67, 0x19, 0xd2, - 0xa5, 0x1d, 0x27, 0xae, 0x17, 0x54, 0xa7, 0x98, 0xc4, 0x50, 0x2b, 0xa3, 0x41, 0x81, 0x98, 0xe3, - 0x98, 0x11, 0x84, 0xfe, 0xb8, 0x4e, 0x25, 0xe8, 0x34, 0x23, 0x4c, 0x8d, 0x20, 0x12, 0x81, 0x53, - 0x1a, 0xba, 0x2d, 0x27, 0xc9, 0x41, 0x15, 0x31, 0x52, 0xb5, 0x5c, 0x36, 0x36, 0xde, 0xc4, 0x14, - 0x8e, 0xae, 0xc2, 0x30, 0x09, 0xf6, 0x57, 0xa3, 0x70, 0xaf, 0x7a, 0xa6, 0x78, 0xcd, 0xae, 0x70, - 0x12, 0x2e, 0xd0, 0xd3, 0x03, 0x80, 0x00, 0x63, 0xc9, 0x02, 0xdd, 0x86, 0x6a, 0xce, 0x88, 0xf0, - 0x01, 0x98, 0x61, 0x03, 0xf0, 0x8a, 0x28, 0x5b, 0xdd, 0x28, 0xa0, 0x3b, 0xea, 0x82, 0xc3, 0x85, - 0xdc, 0xed, 0x4d, 0x98, 0x50, 0x82, 0x85, 0x8d, 0x2d, 0x9a, 0x83, 0x41, 0x2a, 0x31, 0xe5, 0x91, - 0xba, 0x42, 0xbb, 0x92, 0x99, 0xa6, 0x30, 0x87, 0xb3, 0xae, 0xf4, 0xde, 0x27, 0x4b, 0x07, 0x09, - 0xe1, 0xc7, 0xa2, 0xb2, 0xd6, 0x95, 0x12, 0x81, 0x53, 0x1a, 0xfb, 0xff, 0x72, 0xc5, 0x24, 0x95, - 0x5e, 0x7d, 0xc8, 0xeb, 0xa7, 0x61, 0x64, 0x27, 0x8c, 0x13, 0x4a, 0xcd, 0xea, 0x18, 0x4c, 0x55, - 0x91, 0xcb, 0x02, 0x8e, 0x15, 0x05, 0x7a, 0x19, 0xc6, 0x9b, 0x7a, 0x05, 0x62, 0xb3, 0x39, 0x2b, - 0x8a, 0x98, 0xb5, 0x63, 0x93, 0x16, 0xbd, 0x04, 0x23, 0xec, 0x82, 0xa2, 0x19, 0xfa, 0xe2, 0x00, - 0x26, 0x77, 0xcc, 0x91, 0xba, 0x80, 0x1f, 0x69, 0xbf, 0xb1, 0xa2, 0xa6, 0x87, 0x62, 0xda, 0x84, - 0xb5, 0xba, 0x10, 0xf3, 0xea, 0x50, 0x7c, 0x99, 0x41, 0xb1, 0xc0, 0xda, 0x7f, 0xad, 0xa4, 0xf5, - 0x32, 0x3d, 0x52, 0x10, 0x54, 0x87, 0xe1, 0x5b, 0x8e, 0x97, 0x78, 0xc1, 0xb6, 0xd8, 0xcf, 0x9f, - 0xe8, 0x2a, 0xf3, 0x59, 0xa1, 0x37, 0x78, 0x01, 0xbe, 0x2b, 0x89, 0x3f, 0x58, 0xb2, 0xa1, 0x1c, - 0xa3, 0x76, 0x10, 0x50, 0x8e, 0xa5, 0x7e, 0x39, 0x62, 0x5e, 0x80, 0x73, 0x14, 0x7f, 0xb0, 0x64, - 0x83, 0xde, 0x06, 0x90, 0xf3, 0x86, 0xb8, 0xe2, 0x62, 0xe0, 0xe9, 0xde, 0x4c, 0x37, 0x54, 0x99, - 0xa5, 0x09, 0xba, 0xe7, 0xa5, 0xff, 0xb1, 0xc6, 0xcf, 0x4e, 0x98, 0xde, 0xd3, 0xd9, 0x18, 0xf4, - 0x59, 0xba, 0x54, 0x9d, 0x28, 0x21, 0xee, 0x62, 0x22, 0x3a, 0xe7, 0xc9, 0xfe, 0xd4, 0xd6, 0x0d, - 0x6f, 0x8f, 0xe8, 0xcb, 0x5a, 0x30, 0xc1, 0x29, 0x3f, 0xfb, 0x37, 0xcb, 0x50, 0x2d, 0x6a, 0x2e, - 0x9d, 0x74, 0xe4, 0xb6, 0x97, 0x2c, 0x53, 0x75, 0xc5, 0x32, 0x27, 0xdd, 0x8a, 0x80, 0x63, 0x45, - 0x41, 0x47, 0x3f, 0xf6, 0xb6, 0xe5, 0xa9, 0x63, 0x30, 0x1d, 0xfd, 0x06, 0x83, 0x62, 0x81, 0xa5, - 0x74, 0x11, 0x71, 0x62, 0x71, 0xf3, 0xa4, 0xcd, 0x12, 0xcc, 0xa0, 0x58, 0x60, 0x75, 0x83, 0xc1, - 0x40, 0x0f, 0x83, 0x81, 0xd1, 0x45, 0x83, 0x27, 0xdb, 0x45, 0xe8, 0x1d, 0x80, 0x2d, 0x2f, 0xf0, - 0xe2, 0x1d, 0xc6, 0x7d, 0xe8, 0xd8, 0xdc, 0x95, 0xb2, 0xb3, 0xaa, 0xb8, 0x60, 0x8d, 0x23, 0x7a, - 0x01, 0x46, 0xd5, 0x02, 0x5c, 0xab, 0x55, 0x87, 0xcd, 0x6b, 0x8d, 0x54, 0x1a, 0xd5, 0xb0, 0x4e, - 0x67, 0xbf, 0x9b, 0x9d, 0x2f, 0x62, 0x05, 0x68, 0xfd, 0x6b, 0xf5, 0xdb, 0xbf, 0xa5, 0xee, 0xfd, - 0x6b, 0xff, 0x6e, 0x19, 0x26, 0x8d, 0xca, 0xda, 0x71, 0x1f, 0x32, 0xeb, 0x12, 0xdd, 0x88, 0x9c, - 0x84, 0x88, 0xf5, 0x67, 0xf7, 0x5e, 0x2a, 0xfa, 0x66, 0x45, 0x57, 0x00, 0x2f, 0x8f, 0xde, 0x81, - 0x8a, 0xef, 0xc4, 0xcc, 0xf8, 0x40, 0xc4, 0xba, 0xeb, 0x87, 0x59, 0xaa, 0xe8, 0x3b, 0x71, 0xa2, - 0xed, 0x05, 0x9c, 0x77, 0xca, 0x92, 0xee, 0x98, 0x54, 0x39, 0x91, 0x57, 0x9b, 0xaa, 0x11, 0x54, - 0x83, 0x39, 0xc0, 0x1c, 0x87, 0x5e, 0x82, 0xb1, 0x88, 0xb0, 0x59, 0xb1, 0x4c, 0x75, 0x2d, 0x36, - 0xcd, 0x06, 0x53, 0xa5, 0x0c, 0x6b, 0x38, 0x6c, 0x50, 0xa6, 0xba, 0xf6, 0x50, 0x17, 0x5d, 0xfb, - 0x09, 0x18, 0x66, 0x3f, 0xd4, 0x0c, 0x50, 0xa3, 0xb1, 0xc6, 0xc1, 0x58, 0xe2, 0xb3, 0x13, 0x66, - 0xa4, 0xcf, 0x09, 0xf3, 0x24, 0x4c, 0xd4, 0x1c, 0xb2, 0x17, 0x06, 0x2b, 0x81, 0xdb, 0x0a, 0xbd, - 0x20, 0x41, 0x55, 0x18, 0x60, 0xbb, 0x03, 0x5f, 0xdb, 0x03, 0x94, 0x03, 0x1e, 0xa0, 0x9a, 0xb3, - 0xfd, 0x07, 0x25, 0x18, 0xaf, 0x11, 0x9f, 0x24, 0x84, 0x9f, 0x35, 0x62, 0xb4, 0x0a, 0x68, 0x3b, - 0x72, 0x9a, 0xa4, 0x4e, 0x22, 0x2f, 0x74, 0x75, 0x63, 0x64, 0x99, 0x19, 0xfc, 0xd1, 0xa5, 0x0e, - 0x2c, 0xce, 0x29, 0x81, 0xde, 0x82, 0xf1, 0x56, 0x44, 0x0c, 0x1b, 0x9a, 0x55, 0xa4, 0x2e, 0xd4, - 0x75, 0x42, 0xae, 0xa9, 0x1a, 0x20, 0x6c, 0xb2, 0x42, 0x3f, 0x01, 0x53, 0x61, 0xd4, 0xda, 0x71, - 0x82, 0x1a, 0x69, 0x91, 0xc0, 0xa5, 0xaa, 0xb8, 0xb0, 0x11, 0xcc, 0xdc, 0x39, 0x9c, 0x9b, 0xba, - 0x9e, 0xc1, 0xe1, 0x0e, 0x6a, 0xf4, 0x16, 0x4c, 0xb7, 0xa2, 0xb0, 0xe5, 0x6c, 0xb3, 0x89, 0x22, - 0x34, 0x0e, 0x2e, 0x7d, 0x9e, 0xbe, 0x73, 0x38, 0x37, 0x5d, 0xcf, 0x22, 0x8f, 0x0e, 0xe7, 0xce, - 0xb0, 0x8e, 0xa2, 0x90, 0x14, 0x89, 0x3b, 0xd9, 0xd8, 0xdb, 0x70, 0xb6, 0x16, 0xde, 0x0a, 0x6e, - 0x39, 0x91, 0xbb, 0x58, 0x5f, 0xd3, 0x0e, 0xf7, 0xd7, 0xe4, 0xe1, 0x92, 0x5f, 0xbf, 0xe6, 0xee, - 0x53, 0x5a, 0x49, 0xae, 0xfe, 0xaf, 0x7a, 0x3e, 0x29, 0x30, 0x22, 0xfc, 0xcd, 0x92, 0x51, 0x53, - 0x4a, 0xaf, 0xec, 0xfe, 0x56, 0xa1, 0xdd, 0xff, 0x75, 0x18, 0xd9, 0xf2, 0x88, 0xef, 0x62, 0xb2, - 0x25, 0x46, 0xe6, 0xf1, 0xe2, 0x1b, 0xa5, 0x55, 0x4a, 0x29, 0x8d, 0x46, 0xfc, 0x68, 0xba, 0x2a, - 0x0a, 0x63, 0xc5, 0x06, 0xed, 0xc2, 0x94, 0x3c, 0xfb, 0x48, 0xac, 0x58, 0xc4, 0x4f, 0x74, 0x3b, - 0x50, 0x99, 0xcc, 0xd9, 0x00, 0xe2, 0x0c, 0x1b, 0xdc, 0xc1, 0x98, 0x9e, 0x45, 0xf7, 0xe8, 0x76, - 0x35, 0xc0, 0xa6, 0x34, 0x3b, 0x8b, 0xb2, 0x63, 0x35, 0x83, 0xda, 0x5f, 0xb5, 0xe0, 0xfe, 0x8e, - 0x9e, 0x11, 0xe6, 0x85, 0x13, 0x1e, 0x85, 0xec, 0x71, 0xbf, 0xd4, 0xfb, 0xb8, 0x6f, 0xff, 0x43, - 0x0b, 0x66, 0x56, 0xf6, 0x5a, 0xc9, 0x41, 0xcd, 0x33, 0xef, 0x26, 0x5e, 0x84, 0xa1, 0x3d, 0xe2, - 0x7a, 0xed, 0x3d, 0x31, 0x72, 0x73, 0x52, 0xa4, 0xaf, 0x33, 0xe8, 0xd1, 0xe1, 0xdc, 0x78, 0x23, - 0x09, 0x23, 0x67, 0x9b, 0x70, 0x00, 0x16, 0xe4, 0x6c, 0x63, 0xf4, 0xde, 0x27, 0x57, 0xbd, 0x3d, - 0x4f, 0xde, 0x10, 0x76, 0x35, 0x79, 0xcd, 0xcb, 0x0e, 0x9d, 0x7f, 0xbd, 0xed, 0x04, 0x89, 0x97, - 0x1c, 0x88, 0x6b, 0x17, 0xc9, 0x04, 0xa7, 0xfc, 0xec, 0xef, 0x58, 0x30, 0x29, 0x65, 0xc9, 0xa2, - 0xeb, 0x46, 0x24, 0x8e, 0xd1, 0x2c, 0x94, 0xbc, 0x96, 0x68, 0x25, 0x88, 0x56, 0x96, 0xd6, 0xea, - 0xb8, 0xe4, 0xb5, 0x50, 0x1d, 0x2a, 0xfc, 0xa2, 0x31, 0x9d, 0x5c, 0x7d, 0x5d, 0x57, 0xb2, 0x16, - 0x6c, 0xc8, 0x92, 0x38, 0x65, 0x22, 0xb5, 0x62, 0xb6, 0x0f, 0x95, 0xcd, 0x3b, 0x9b, 0xcb, 0x02, - 0x8e, 0x15, 0x05, 0xba, 0x00, 0x23, 0x41, 0xe8, 0xf2, 0x7b, 0x5f, 0xbe, 0xa6, 0xd9, 0x94, 0xbd, - 0x26, 0x60, 0x58, 0x61, 0xed, 0x9f, 0xb3, 0x60, 0x4c, 0x7e, 0x59, 0x9f, 0x0a, 0x3a, 0x5d, 0x5a, - 0xa9, 0x72, 0x9e, 0x2e, 0x2d, 0xaa, 0x60, 0x33, 0x8c, 0xa1, 0x57, 0x97, 0x8f, 0xa3, 0x57, 0xdb, - 0x5f, 0x29, 0xc1, 0x84, 0x6c, 0x4e, 0xa3, 0xbd, 0x19, 0x93, 0x04, 0x6d, 0x40, 0xc5, 0xe1, 0x5d, - 0x4e, 0xe4, 0x8c, 0x7d, 0x34, 0xff, 0xc4, 0x65, 0x8c, 0x4f, 0xaa, 0xea, 0x2c, 0xca, 0xd2, 0x38, - 0x65, 0x84, 0x7c, 0x98, 0x0e, 0xc2, 0x84, 0x6d, 0x7b, 0x0a, 0xdf, 0xed, 0x5e, 0x20, 0xcb, 0xfd, - 0x01, 0xc1, 0x7d, 0xfa, 0x5a, 0x96, 0x0b, 0xee, 0x64, 0x8c, 0x56, 0xa4, 0x95, 0xa7, 0xcc, 0x6a, - 0x38, 0xdf, 0xad, 0x86, 0x62, 0x23, 0x8f, 0xfd, 0xdb, 0x16, 0x54, 0x24, 0xd9, 0x69, 0x5c, 0x01, - 0xad, 0xc3, 0x70, 0xcc, 0x06, 0x41, 0x76, 0x8d, 0xdd, 0xad, 0xe1, 0x7c, 0xbc, 0xd2, 0xdd, 0x9c, - 0xff, 0x8f, 0xb1, 0xe4, 0xc1, 0xcc, 0xd4, 0xaa, 0xf9, 0x1f, 0x11, 0x33, 0xb5, 0x6a, 0x4f, 0xc1, - 0x0e, 0xf3, 0x5f, 0x59, 0x9b, 0xb5, 0xb3, 0x3c, 0x55, 0x3a, 0x5b, 0x11, 0xd9, 0xf2, 0x6e, 0x67, - 0x95, 0xce, 0x3a, 0x83, 0x62, 0x81, 0x45, 0x6f, 0xc3, 0x58, 0x53, 0x5a, 0x77, 0x53, 0x31, 0xf0, - 0x58, 0x57, 0x5b, 0xb9, 0xba, 0x56, 0xe1, 0x3e, 0x61, 0xcb, 0x5a, 0x79, 0x6c, 0x70, 0x33, 0x2f, - 0xe6, 0xcb, 0xbd, 0x2e, 0xe6, 0x53, 0xbe, 0x85, 0x57, 0xcb, 0xf6, 0x2f, 0x59, 0x30, 0xc4, 0x6d, - 0x84, 0xfd, 0x19, 0x55, 0xb5, 0x6b, 0xa2, 0xb4, 0xef, 0x6e, 0x52, 0xa0, 0xb8, 0x35, 0x42, 0xeb, - 0x50, 0x61, 0x3f, 0x98, 0xad, 0xa4, 0x5c, 0xec, 0x0c, 0xc7, 0x6b, 0xd5, 0x1b, 0x78, 0x53, 0x16, - 0xc3, 0x29, 0x07, 0xfb, 0xcb, 0x65, 0x2a, 0xaa, 0x52, 0x52, 0x63, 0x07, 0xb7, 0xee, 0xdd, 0x0e, - 0x5e, 0xba, 0x57, 0x3b, 0xf8, 0x36, 0x4c, 0x36, 0xb5, 0x3b, 0xa9, 0x74, 0x24, 0x2f, 0x74, 0x9d, - 0x24, 0xda, 0xf5, 0x15, 0xb7, 0x93, 0x2d, 0x9b, 0x4c, 0x70, 0x96, 0x2b, 0xfa, 0x2c, 0x8c, 0xf1, - 0x71, 0x16, 0xb5, 0x0c, 0xb0, 0x5a, 0x3e, 0x51, 0x3c, 0x5f, 0xf4, 0x2a, 0xd8, 0x4c, 0x6c, 0x68, - 0xc5, 0xb1, 0xc1, 0xcc, 0xfe, 0xe2, 0x20, 0x0c, 0xae, 0xec, 0x93, 0x20, 0x39, 0x05, 0x81, 0xd4, - 0x84, 0x09, 0x2f, 0xd8, 0x0f, 0xfd, 0x7d, 0xe2, 0x72, 0xfc, 0x71, 0x36, 0xd7, 0xfb, 0x04, 0xeb, - 0x89, 0x35, 0x83, 0x05, 0xce, 0xb0, 0xbc, 0x17, 0xa7, 0xf6, 0x4b, 0x30, 0xc4, 0xc7, 0x5e, 0x1c, - 0xd9, 0x73, 0x2d, 0xe0, 0xac, 0x13, 0xc5, 0x2a, 0x48, 0x2d, 0x0a, 0xdc, 0xe4, 0x2e, 0x8a, 0xa3, - 0x77, 0x61, 0x62, 0xcb, 0x8b, 0xe2, 0x84, 0x1e, 0xb7, 0xe3, 0xc4, 0xd9, 0x6b, 0xdd, 0xc5, 0x29, - 0x5d, 0xf5, 0xc3, 0xaa, 0xc1, 0x09, 0x67, 0x38, 0xa3, 0x6d, 0x18, 0xa7, 0x07, 0xc7, 0xb4, 0xaa, - 0xe1, 0x63, 0x57, 0xa5, 0xcc, 0x70, 0x57, 0x75, 0x46, 0xd8, 0xe4, 0x4b, 0x85, 0x49, 0x93, 0x1d, - 0x34, 0x47, 0x98, 0x46, 0xa1, 0x84, 0x09, 0x3f, 0x61, 0x72, 0x1c, 0x95, 0x49, 0xcc, 0x97, 0xa3, - 0x62, 0xca, 0xa4, 0xd4, 0x63, 0xc3, 0xfe, 0x1a, 0xdd, 0x1d, 0x69, 0x1f, 0x9e, 0xc2, 0xd6, 0xf2, - 0xaa, 0xb9, 0xb5, 0x3c, 0x50, 0x38, 0x9e, 0x05, 0xdb, 0xca, 0xe7, 0x60, 0x54, 0x1b, 0x6e, 0xb4, - 0x00, 0x95, 0xa6, 0x74, 0x3c, 0x10, 0x52, 0x57, 0xa9, 0x2f, 0xca, 0x23, 0x01, 0xa7, 0x34, 0xb4, - 0x37, 0xa8, 0xb2, 0x97, 0x75, 0x6b, 0xa2, 0xaa, 0x20, 0x66, 0x18, 0xfb, 0x39, 0x80, 0x95, 0xdb, - 0xa4, 0xb9, 0xc8, 0x0f, 0x5e, 0xda, 0xfd, 0x96, 0x55, 0x7c, 0xbf, 0x65, 0xff, 0x07, 0x0b, 0x26, - 0x56, 0x97, 0x0d, 0x85, 0x7c, 0x1e, 0x80, 0x6b, 0xa1, 0x6f, 0xbc, 0x71, 0x4d, 0x5a, 0x86, 0xb9, - 0x71, 0x4f, 0x41, 0xb1, 0x46, 0x81, 0x1e, 0x80, 0xb2, 0xdf, 0x0e, 0x84, 0x72, 0x38, 0x7c, 0xe7, - 0x70, 0xae, 0x7c, 0xb5, 0x1d, 0x60, 0x0a, 0xd3, 0x3c, 0x89, 0xca, 0x7d, 0x7b, 0x12, 0xf5, 0x74, - 0xc1, 0x46, 0x73, 0x30, 0x78, 0xeb, 0x96, 0xe7, 0xc6, 0xd5, 0xc1, 0xd4, 0x6a, 0xfd, 0xc6, 0x1b, - 0x6b, 0xb5, 0x18, 0x73, 0xb8, 0xfd, 0x17, 0xca, 0x30, 0xb5, 0xea, 0x93, 0xdb, 0xc6, 0x67, 0x3d, - 0x06, 0x43, 0x6e, 0xe4, 0xed, 0x93, 0x28, 0xbb, 0x8b, 0xd7, 0x18, 0x14, 0x0b, 0x6c, 0xdf, 0xde, - 0x4f, 0x37, 0x3a, 0xf7, 0xe3, 0x93, 0xf6, 0xf7, 0xea, 0xdd, 0x15, 0x6f, 0xc3, 0x30, 0xbf, 0x26, - 0xe5, 0x9d, 0x31, 0x7a, 0xf1, 0xd9, 0xbc, 0x26, 0x64, 0xfb, 0x62, 0x5e, 0x18, 0x3e, 0xb8, 0xcf, - 0x88, 0x12, 0x62, 0x02, 0x8a, 0x25, 0xcb, 0xd9, 0x4f, 0xc3, 0x98, 0x4e, 0x79, 0x2c, 0xe7, 0x91, - 0xbf, 0x68, 0xc1, 0x99, 0x55, 0x3f, 0x6c, 0xee, 0x66, 0x5c, 0xd1, 0x5e, 0x80, 0x51, 0xba, 0x9e, - 0x62, 0xc3, 0xad, 0xd5, 0x70, 0x74, 0x16, 0x28, 0xac, 0xd3, 0x69, 0xc5, 0x6e, 0xdc, 0x58, 0xab, - 0xe5, 0xf9, 0x47, 0x0b, 0x14, 0xd6, 0xe9, 0xec, 0x6f, 0x5b, 0xf0, 0xf0, 0xa5, 0xe5, 0x95, 0xd4, - 0x1b, 0xb3, 0xc3, 0x45, 0x9b, 0x2a, 0x77, 0xae, 0xd6, 0x94, 0x54, 0xb9, 0xab, 0xb1, 0x56, 0x08, - 0xec, 0x47, 0x25, 0xfc, 0xe0, 0x57, 0x2c, 0x38, 0x73, 0xc9, 0x4b, 0x30, 0x69, 0x85, 0x59, 0x67, - 0xe1, 0x88, 0xb4, 0xc2, 0xd8, 0x4b, 0xc2, 0xe8, 0x20, 0xeb, 0x2c, 0x8c, 0x15, 0x06, 0x6b, 0x54, - 0xbc, 0xe6, 0x7d, 0x2f, 0xa6, 0x2d, 0x2d, 0x99, 0x27, 0x4c, 0x2c, 0xe0, 0x58, 0x51, 0xd0, 0x0f, - 0x73, 0xbd, 0x88, 0x69, 0x08, 0x07, 0x62, 0x39, 0xab, 0x0f, 0xab, 0x49, 0x04, 0x4e, 0x69, 0xec, - 0xaf, 0x5a, 0x70, 0xf6, 0x92, 0xdf, 0x8e, 0x13, 0x12, 0x6d, 0xc5, 0x46, 0x63, 0x9f, 0x83, 0x0a, - 0x91, 0x5a, 0xb8, 0x68, 0xab, 0xda, 0x37, 0x94, 0x7a, 0xce, 0x3d, 0x95, 0x15, 0x5d, 0x1f, 0x7e, - 0x9d, 0xc7, 0xf3, 0x47, 0xfc, 0x7a, 0x09, 0xc6, 0x2f, 0x6f, 0x6c, 0xd4, 0x2f, 0x91, 0x44, 0x88, - 0xcc, 0xde, 0x16, 0x24, 0xac, 0x1d, 0x84, 0xbb, 0xe9, 0x3a, 0xed, 0xc4, 0xf3, 0xe7, 0x79, 0x68, - 0xcc, 0xfc, 0x5a, 0x90, 0x5c, 0x8f, 0x1a, 0x49, 0xe4, 0x05, 0xdb, 0xb9, 0x47, 0x67, 0x29, 0xd8, - 0xcb, 0x45, 0x82, 0x1d, 0x3d, 0x07, 0x43, 0x2c, 0x36, 0x47, 0x6a, 0x1d, 0x0f, 0x2a, 0x55, 0x81, - 0x41, 0x8f, 0x0e, 0xe7, 0x2a, 0x37, 0xf0, 0x1a, 0xff, 0x83, 0x05, 0x29, 0xba, 0x01, 0xa3, 0x3b, - 0x49, 0xd2, 0xba, 0x4c, 0x1c, 0x97, 0x44, 0x52, 0x3a, 0x9c, 0xcb, 0x93, 0x0e, 0xb4, 0x13, 0x38, - 0x59, 0xba, 0xa0, 0x52, 0x58, 0x8c, 0x75, 0x3e, 0x76, 0x03, 0x20, 0xc5, 0x9d, 0xd0, 0xb1, 0xc1, - 0xfe, 0xbe, 0x05, 0xc3, 0x97, 0x9d, 0xc0, 0xf5, 0x49, 0x84, 0x5e, 0x81, 0x01, 0x72, 0x9b, 0x34, - 0xc5, 0x0e, 0x9e, 0xdb, 0xe0, 0x74, 0x97, 0xe3, 0x46, 0x30, 0xfa, 0x1f, 0xb3, 0x52, 0xe8, 0x32, - 0x0c, 0xd3, 0xd6, 0x5e, 0x52, 0x3e, 0xe3, 0x8f, 0x14, 0x7d, 0xb1, 0x1a, 0x76, 0xbe, 0x31, 0x0a, - 0x10, 0x96, 0xc5, 0x99, 0x41, 0xa7, 0xd9, 0x6a, 0x50, 0x01, 0x96, 0x74, 0x3b, 0x6e, 0x6d, 0x2c, - 0xd7, 0x39, 0x91, 0xe0, 0xc6, 0x0d, 0x3a, 0x12, 0x88, 0x53, 0x26, 0xf6, 0x06, 0x54, 0xe8, 0xa0, - 0x2e, 0xfa, 0x9e, 0xd3, 0xdd, 0x96, 0xf4, 0x14, 0x54, 0xa4, 0x5d, 0x27, 0x16, 0x6e, 0xe7, 0x8c, - 0xab, 0x34, 0xfb, 0xc4, 0x38, 0xc5, 0xdb, 0x5b, 0x30, 0xc3, 0x2e, 0x49, 0x9d, 0x64, 0xc7, 0x58, - 0x63, 0xbd, 0x27, 0xf3, 0xd3, 0x42, 0xbf, 0xe2, 0x23, 0x53, 0xd5, 0xfc, 0x64, 0xc7, 0x24, 0x47, - 0x4d, 0xd7, 0xfa, 0xcf, 0x03, 0x30, 0xbd, 0xd6, 0x58, 0x6e, 0x98, 0x86, 0xc5, 0x97, 0x60, 0x8c, - 0x6b, 0x02, 0x74, 0x42, 0x3b, 0xbe, 0xa8, 0x4d, 0x5d, 0x1c, 0x6c, 0x68, 0x38, 0x6c, 0x50, 0xa2, - 0x87, 0xa1, 0xec, 0xbd, 0x17, 0x64, 0x5d, 0xe1, 0xd6, 0x5e, 0xbf, 0x86, 0x29, 0x9c, 0xa2, 0xa9, - 0x52, 0xc1, 0x05, 0xa8, 0x42, 0x2b, 0xc5, 0xe2, 0x55, 0x98, 0xf0, 0xe2, 0x66, 0xec, 0xad, 0x05, - 0x54, 0xba, 0xa4, 0x31, 0x17, 0xa9, 0xc6, 0x4f, 0x9b, 0xaa, 0xb0, 0x38, 0x43, 0xad, 0x49, 0xf3, - 0xc1, 0xbe, 0x15, 0x93, 0x9e, 0xde, 0xd7, 0x54, 0xe7, 0x6a, 0xb1, 0xaf, 0x8b, 0x99, 0x5b, 0x8e, - 0xd0, 0xb9, 0xf8, 0x07, 0xc7, 0x58, 0xe2, 0xd0, 0x25, 0x98, 0x6e, 0xee, 0x38, 0xad, 0xc5, 0x76, - 0xb2, 0x53, 0xf3, 0xe2, 0x66, 0xb8, 0x4f, 0xa2, 0x03, 0xa6, 0x09, 0x8f, 0xa4, 0x46, 0x26, 0x85, - 0x58, 0xbe, 0xbc, 0x58, 0xa7, 0x94, 0xb8, 0xb3, 0x8c, 0xa9, 0x82, 0xc0, 0x89, 0xa9, 0x20, 0x8b, - 0x30, 0x29, 0xeb, 0x6a, 0x90, 0x98, 0x6d, 0x0f, 0xa3, 0xac, 0x75, 0x2a, 0x24, 0x4a, 0x80, 0x55, - 0xdb, 0xb2, 0xf4, 0xe8, 0x45, 0x18, 0xf7, 0x02, 0x2f, 0xf1, 0x9c, 0x24, 0x8c, 0xd8, 0xe6, 0x3a, - 0xc6, 0x37, 0x0c, 0x2a, 0xe1, 0xd7, 0x74, 0x04, 0x36, 0xe9, 0xec, 0x77, 0xa1, 0xa2, 0x7c, 0xcd, - 0xa4, 0xbb, 0xa4, 0x55, 0xe0, 0x2e, 0xd9, 0x7b, 0x47, 0x90, 0x16, 0xf3, 0x72, 0xae, 0xc5, 0xfc, - 0x6f, 0x59, 0x90, 0xba, 0xdc, 0xa0, 0xcb, 0x50, 0x69, 0x85, 0xec, 0xd6, 0x2c, 0x92, 0x57, 0xd1, - 0x0f, 0xe6, 0x0a, 0x0f, 0x2e, 0xa8, 0x78, 0xff, 0xd5, 0x65, 0x09, 0x9c, 0x16, 0x46, 0x4b, 0x30, - 0xdc, 0x8a, 0x48, 0x23, 0x61, 0x41, 0x23, 0x3d, 0xf9, 0xf0, 0x39, 0xc2, 0xe9, 0xb1, 0x2c, 0x68, - 0xff, 0xba, 0x05, 0xc0, 0x8d, 0xd2, 0x4e, 0xb0, 0x4d, 0x4e, 0xe1, 0xa0, 0x5d, 0x83, 0x81, 0xb8, - 0x45, 0x9a, 0xdd, 0xee, 0x33, 0xd3, 0xf6, 0x34, 0x5a, 0xa4, 0x99, 0x76, 0x38, 0xfd, 0x87, 0x59, - 0x69, 0xfb, 0x67, 0x01, 0x26, 0x52, 0x32, 0x7a, 0x00, 0x42, 0xcf, 0x18, 0x2e, 0xf9, 0x0f, 0x64, - 0x5c, 0xf2, 0x2b, 0x8c, 0x5a, 0xf3, 0xc2, 0x7f, 0x17, 0xca, 0x7b, 0xce, 0x6d, 0x71, 0xca, 0x7a, - 0xaa, 0x7b, 0x33, 0x28, 0xff, 0xf9, 0x75, 0xe7, 0x36, 0xd7, 0x63, 0x9f, 0x92, 0x13, 0x64, 0xdd, - 0xb9, 0x7d, 0xc4, 0x6f, 0x2d, 0x99, 0x90, 0xa2, 0x87, 0xb9, 0xcf, 0xff, 0x61, 0xfa, 0x9f, 0x4d, - 0x3b, 0x5a, 0x09, 0xab, 0xcb, 0x0b, 0x84, 0x89, 0xb6, 0xaf, 0xba, 0xbc, 0x20, 0x5b, 0x97, 0x17, - 0xf4, 0x51, 0x97, 0x17, 0xa0, 0xf7, 0x61, 0x58, 0x5c, 0x87, 0x30, 0x5f, 0xc2, 0xd1, 0x8b, 0x0b, - 0x7d, 0xd4, 0x27, 0x6e, 0x53, 0x78, 0x9d, 0x0b, 0x52, 0x4f, 0x17, 0xd0, 0x9e, 0xf5, 0xca, 0x0a, - 0xd1, 0xdf, 0xb0, 0x60, 0x42, 0xfc, 0xc6, 0xe4, 0xbd, 0x36, 0x89, 0x13, 0xa1, 0x0f, 0x7c, 0xaa, - 0xff, 0x36, 0x88, 0x82, 0xbc, 0x29, 0x9f, 0x92, 0x62, 0xd6, 0x44, 0xf6, 0x6c, 0x51, 0xa6, 0x15, - 0xe8, 0x1f, 0x5b, 0x30, 0xb3, 0xe7, 0xdc, 0xe6, 0x35, 0x72, 0x18, 0x76, 0x12, 0x2f, 0x14, 0xbe, - 0x91, 0xaf, 0xf4, 0x37, 0xfc, 0x1d, 0xc5, 0x79, 0x23, 0xa5, 0x1b, 0xd5, 0x4c, 0x1e, 0x49, 0xcf, - 0xa6, 0xe6, 0xb6, 0x6b, 0x76, 0x0b, 0x46, 0xe4, 0x7c, 0xcb, 0x39, 0x0d, 0xd5, 0x74, 0x65, 0xe7, - 0xd8, 0xb7, 0x51, 0xda, 0xe9, 0x89, 0xd5, 0x23, 0xe6, 0xda, 0x3d, 0xad, 0xe7, 0x5d, 0x18, 0xd3, - 0xe7, 0xd8, 0x3d, 0xad, 0xeb, 0x3d, 0x38, 0x93, 0x33, 0x97, 0xee, 0x69, 0x95, 0xb7, 0xe0, 0x81, - 0xc2, 0xf9, 0x71, 0x2f, 0x2b, 0xb6, 0xbf, 0x6e, 0xe9, 0x72, 0xf0, 0x14, 0xcc, 0x53, 0xcb, 0xa6, - 0x79, 0xea, 0x5c, 0xf7, 0x95, 0x53, 0x60, 0xa3, 0x7a, 0x5b, 0x6f, 0x34, 0x95, 0xea, 0xe8, 0x35, - 0x18, 0xf2, 0x29, 0x44, 0xde, 0xc3, 0xd9, 0xbd, 0x57, 0x64, 0xaa, 0x4b, 0x31, 0x78, 0x8c, 0x05, - 0x07, 0xfb, 0xb7, 0x2c, 0x18, 0x38, 0x85, 0x9e, 0xc0, 0x66, 0x4f, 0x3c, 0x53, 0xc8, 0x5a, 0xe4, - 0x3d, 0x98, 0xc7, 0xce, 0xad, 0x15, 0x99, 0xdb, 0xa1, 0xa0, 0x63, 0xfe, 0x4f, 0x09, 0x46, 0x69, - 0x55, 0xd2, 0x61, 0xe4, 0x65, 0x18, 0xf7, 0x9d, 0x4d, 0xe2, 0x4b, 0x93, 0x79, 0xf6, 0x10, 0x7b, - 0x55, 0x47, 0x62, 0x93, 0x96, 0x16, 0xde, 0xd2, 0x6f, 0x0f, 0x84, 0xfe, 0xa2, 0x0a, 0x1b, 0x57, - 0x0b, 0xd8, 0xa4, 0xa5, 0xe7, 0xa9, 0x5b, 0x4e, 0xd2, 0xdc, 0x11, 0x07, 0x5c, 0xd5, 0xdc, 0x37, - 0x28, 0x10, 0x73, 0x1c, 0x55, 0xe0, 0xe4, 0xec, 0xbc, 0x49, 0x22, 0xa6, 0xc0, 0x71, 0xf5, 0x58, - 0x29, 0x70, 0xd8, 0x44, 0xe3, 0x2c, 0x7d, 0x4e, 0x6c, 0xde, 0x20, 0x73, 0x87, 0xe9, 0x23, 0x36, - 0x0f, 0xd5, 0x61, 0xc6, 0x0b, 0x9a, 0x7e, 0xdb, 0x25, 0x37, 0x02, 0xae, 0xdd, 0xf9, 0xde, 0xfb, - 0xc4, 0x15, 0x0a, 0xb4, 0xf2, 0x5c, 0x5a, 0xcb, 0xa1, 0xc1, 0xb9, 0x25, 0xed, 0xff, 0x0f, 0xce, - 0x5c, 0x0d, 0x1d, 0x77, 0xc9, 0xf1, 0x9d, 0xa0, 0x49, 0xa2, 0xb5, 0x60, 0xbb, 0xe7, 0x85, 0xbc, - 0x7e, 0x7d, 0x5e, 0xea, 0x75, 0x7d, 0x6e, 0xef, 0x00, 0xd2, 0x2b, 0x10, 0x6e, 0x60, 0x18, 0x86, - 0x3d, 0x5e, 0x95, 0x98, 0xfe, 0x8f, 0xe7, 0x6b, 0xd7, 0x1d, 0x2d, 0xd3, 0x1c, 0x9c, 0x38, 0x00, - 0x4b, 0x46, 0xf6, 0x4b, 0x90, 0x1b, 0x9b, 0xd1, 0xfb, 0x28, 0x6d, 0xbf, 0x00, 0xd3, 0xac, 0xe4, - 0xf1, 0x8e, 0x79, 0xf6, 0x5f, 0xb1, 0x60, 0xf2, 0x5a, 0x26, 0x9a, 0xf6, 0x31, 0x18, 0xe2, 0x19, - 0x4e, 0xb2, 0x46, 0xaf, 0x06, 0x83, 0x62, 0x81, 0x3d, 0x71, 0x9b, 0xcb, 0x0f, 0x2c, 0xa8, 0xa8, - 0xd0, 0xf7, 0x53, 0x50, 0x6a, 0x97, 0x0d, 0xa5, 0x36, 0xd7, 0x16, 0xa0, 0x9a, 0x53, 0xa4, 0xd3, - 0xa2, 0x2b, 0x2a, 0x2e, 0xb4, 0x8b, 0x19, 0x20, 0x65, 0xc3, 0xa3, 0x08, 0x27, 0xcc, 0xe0, 0x51, - 0x19, 0x29, 0xca, 0x6e, 0xc4, 0x15, 0xed, 0x47, 0xe4, 0x46, 0x5c, 0xb5, 0xa7, 0x40, 0xfa, 0xd5, - 0xb5, 0x26, 0xb3, 0x5d, 0xe1, 0xc7, 0x99, 0xd7, 0x28, 0x5b, 0x9b, 0x2a, 0x1c, 0x7b, 0x4e, 0x78, - 0x81, 0x0a, 0xe8, 0x11, 0x13, 0x64, 0xe2, 0x1f, 0x4f, 0x53, 0x90, 0x16, 0xb1, 0x2f, 0xc3, 0x64, - 0xa6, 0xc3, 0xd0, 0x0b, 0x30, 0xd8, 0xda, 0x71, 0x62, 0x92, 0xf1, 0x02, 0x1a, 0xac, 0x53, 0xe0, - 0xd1, 0xe1, 0xdc, 0x84, 0x2a, 0xc0, 0x20, 0x98, 0x53, 0xdb, 0xff, 0xc3, 0x82, 0x81, 0x6b, 0xa1, - 0x7b, 0x1a, 0x93, 0xe9, 0x55, 0x63, 0x32, 0x3d, 0x54, 0x94, 0xe4, 0xa5, 0x70, 0x1e, 0xad, 0x66, - 0xe6, 0xd1, 0xb9, 0x42, 0x0e, 0xdd, 0xa7, 0xd0, 0x1e, 0x8c, 0xb2, 0xd4, 0x31, 0xc2, 0x2b, 0xe9, - 0x39, 0xe3, 0x7c, 0x35, 0x97, 0x39, 0x5f, 0x4d, 0x6a, 0xa4, 0xda, 0x29, 0xeb, 0x09, 0x18, 0x16, - 0x9e, 0x31, 0x59, 0xff, 0x58, 0x41, 0x8b, 0x25, 0xde, 0xfe, 0xa5, 0x32, 0x18, 0xa9, 0x6a, 0xd0, - 0x6f, 0x5b, 0x30, 0x1f, 0xf1, 0x88, 0x20, 0xb7, 0xd6, 0x8e, 0xbc, 0x60, 0xbb, 0xd1, 0xdc, 0x21, - 0x6e, 0xdb, 0xf7, 0x82, 0xed, 0xb5, 0xed, 0x20, 0x54, 0xe0, 0x95, 0xdb, 0xa4, 0xd9, 0x66, 0x76, - 0xf0, 0x1e, 0x79, 0x71, 0xd4, 0xcd, 0xf3, 0xc5, 0x3b, 0x87, 0x73, 0xf3, 0xf8, 0x58, 0xbc, 0xf1, - 0x31, 0xdb, 0x82, 0xbe, 0x6d, 0xc1, 0x02, 0xcf, 0xe0, 0xd2, 0x7f, 0xfb, 0xbb, 0x9c, 0x46, 0xeb, - 0x92, 0x55, 0xca, 0x64, 0x83, 0x44, 0x7b, 0x4b, 0x2f, 0x8a, 0x0e, 0x5d, 0xa8, 0x1f, 0xaf, 0x2e, - 0x7c, 0xdc, 0xc6, 0xd9, 0xff, 0xaa, 0x0c, 0xe3, 0xb4, 0x17, 0xd3, 0x28, 0xf8, 0x17, 0x8c, 0x29, - 0xf1, 0x48, 0x66, 0x4a, 0x4c, 0x1b, 0xc4, 0x27, 0x13, 0x00, 0x1f, 0xc3, 0xb4, 0xef, 0xc4, 0xc9, - 0x65, 0xe2, 0x44, 0xc9, 0x26, 0x71, 0xd8, 0x55, 0xaf, 0x98, 0xe6, 0xc7, 0xb9, 0x3d, 0x56, 0xe6, - 0xaf, 0xab, 0x59, 0x66, 0xb8, 0x93, 0x3f, 0xda, 0x07, 0xc4, 0xae, 0x95, 0x23, 0x27, 0x88, 0xf9, - 0xb7, 0x78, 0xc2, 0x46, 0x7e, 0xbc, 0x5a, 0x67, 0x45, 0xad, 0xe8, 0x6a, 0x07, 0x37, 0x9c, 0x53, - 0x83, 0xe6, 0x2e, 0x30, 0xd8, 0xaf, 0xbb, 0xc0, 0x50, 0x0f, 0x27, 0xf4, 0x3d, 0x98, 0x12, 0xa3, - 0xb2, 0xe5, 0x6d, 0x8b, 0x4d, 0xfa, 0xcd, 0x8c, 0x3b, 0x91, 0xd5, 0xbf, 0xe3, 0x43, 0x0f, 0x5f, - 0x22, 0xfb, 0xa7, 0xe1, 0x0c, 0xad, 0xce, 0x74, 0x99, 0x8e, 0x11, 0x81, 0xc9, 0xdd, 0xf6, 0x26, - 0xf1, 0x49, 0x22, 0x61, 0xa2, 0xd2, 0x5c, 0xb5, 0xdf, 0x2c, 0x9d, 0xea, 0x96, 0x57, 0x4c, 0x16, - 0x38, 0xcb, 0xd3, 0xfe, 0x65, 0x0b, 0x98, 0x63, 0xe2, 0x29, 0x6c, 0x7f, 0x9f, 0x31, 0xb7, 0xbf, - 0x6a, 0x91, 0x04, 0x2a, 0xd8, 0xf9, 0x9e, 0xe7, 0xc3, 0x52, 0x8f, 0xc2, 0xdb, 0x07, 0x52, 0xf7, - 0xef, 0xad, 0x71, 0xfd, 0x6f, 0x8b, 0x2f, 0x48, 0x15, 0x20, 0x89, 0x7e, 0x06, 0x46, 0x9a, 0x4e, - 0xcb, 0x69, 0xf2, 0x1c, 0x61, 0x85, 0xd6, 0x1f, 0xa3, 0xd0, 0xfc, 0xb2, 0x28, 0xc1, 0xad, 0x19, - 0x9f, 0x94, 0x5f, 0x29, 0xc1, 0x3d, 0x2d, 0x18, 0xaa, 0xca, 0xd9, 0x5d, 0x18, 0x37, 0x98, 0xdd, - 0xd3, 0xa3, 0xef, 0xcf, 0xf0, 0xed, 0x42, 0x9d, 0x58, 0xf6, 0x60, 0x3a, 0xd0, 0xfe, 0x53, 0xe1, - 0x28, 0xd5, 0xe9, 0x8f, 0xf7, 0xda, 0x10, 0x98, 0x24, 0xd5, 0x1c, 0x2f, 0x33, 0x6c, 0x70, 0x27, - 0x67, 0xfb, 0xef, 0x58, 0x70, 0xbf, 0x4e, 0xa8, 0xc5, 0xae, 0xf6, 0xb2, 0x27, 0xd7, 0x60, 0x24, - 0x6c, 0x91, 0xc8, 0x49, 0xcf, 0x64, 0x17, 0x64, 0xa7, 0x5f, 0x17, 0xf0, 0xa3, 0xc3, 0xb9, 0x19, - 0x9d, 0xbb, 0x84, 0x63, 0x55, 0x12, 0xd9, 0x30, 0xc4, 0x3a, 0x23, 0x16, 0x71, 0xc5, 0x2c, 0x8f, - 0x16, 0xbb, 0xee, 0x8a, 0xb1, 0xc0, 0xd8, 0x3f, 0x6b, 0xf1, 0x89, 0xa5, 0x37, 0x1d, 0xbd, 0x07, - 0x53, 0x7b, 0xf4, 0xf8, 0xb6, 0x72, 0xbb, 0x15, 0x71, 0x33, 0xba, 0xec, 0xa7, 0xa7, 0x7a, 0xf5, - 0x93, 0xf6, 0x91, 0x4b, 0x55, 0xd1, 0xe6, 0xa9, 0xf5, 0x0c, 0x33, 0xdc, 0xc1, 0xde, 0xfe, 0xd3, - 0x12, 0x5f, 0x89, 0x4c, 0xab, 0x7b, 0x02, 0x86, 0x5b, 0xa1, 0xbb, 0xbc, 0x56, 0xc3, 0xa2, 0x87, - 0x94, 0xb8, 0xaa, 0x73, 0x30, 0x96, 0x78, 0x74, 0x11, 0x80, 0xdc, 0x4e, 0x48, 0x14, 0x38, 0xbe, - 0xba, 0x8c, 0x57, 0xca, 0xd3, 0x8a, 0xc2, 0x60, 0x8d, 0x8a, 0x96, 0x69, 0x45, 0xe1, 0xbe, 0xe7, - 0xb2, 0xc0, 0x8e, 0xb2, 0x59, 0xa6, 0xae, 0x30, 0x58, 0xa3, 0xa2, 0x47, 0xe5, 0x76, 0x10, 0xf3, - 0x0d, 0xd0, 0xd9, 0x14, 0xa9, 0x78, 0x46, 0xd2, 0xa3, 0xf2, 0x0d, 0x1d, 0x89, 0x4d, 0x5a, 0xb4, - 0x08, 0x43, 0x89, 0xc3, 0xae, 0x98, 0x07, 0x8b, 0x5d, 0x76, 0x36, 0x28, 0x85, 0x9e, 0x34, 0x8a, - 0x16, 0xc0, 0xa2, 0x20, 0x7a, 0x4b, 0x8a, 0x60, 0x2e, 0x92, 0x85, 0xeb, 0x55, 0xe1, 0xb4, 0xd5, - 0xc5, 0xb7, 0x2e, 0x83, 0x85, 0x4b, 0x97, 0xc1, 0xcb, 0xfe, 0x42, 0x05, 0x20, 0xd5, 0xf6, 0xd0, - 0xfb, 0x1d, 0x22, 0xe2, 0xe9, 0xee, 0xfa, 0xe1, 0xc9, 0xc9, 0x07, 0xf4, 0x45, 0x0b, 0x46, 0x1d, - 0xdf, 0x0f, 0x9b, 0x4e, 0xc2, 0x7a, 0xb9, 0xd4, 0x5d, 0x44, 0x89, 0xfa, 0x17, 0xd3, 0x12, 0xbc, - 0x09, 0xcf, 0xc9, 0xdb, 0x63, 0x0d, 0xd3, 0xb3, 0x15, 0x7a, 0xc5, 0xe8, 0x93, 0xf2, 0x10, 0xc0, - 0xa7, 0xc7, 0x6c, 0xf6, 0x10, 0x50, 0x61, 0xd2, 0x58, 0xd3, 0xff, 0xd1, 0x0d, 0x23, 0x67, 0xcd, - 0x40, 0x71, 0x78, 0xae, 0xa1, 0xf4, 0xf4, 0x4a, 0x57, 0x83, 0xea, 0xba, 0x0b, 0xfa, 0x60, 0x71, - 0x0c, 0xbb, 0xa6, 0x5d, 0xf7, 0x70, 0x3f, 0x7f, 0x17, 0x26, 0x5d, 0x73, 0xbb, 0x15, 0xb3, 0xe9, - 0xf1, 0x22, 0xbe, 0x99, 0xdd, 0x39, 0xdd, 0x60, 0x33, 0x08, 0x9c, 0x65, 0x8c, 0xea, 0x3c, 0x18, - 0x60, 0x2d, 0xd8, 0x0a, 0x85, 0x0b, 0x9f, 0x5d, 0x38, 0x96, 0x07, 0x71, 0x42, 0xf6, 0x28, 0x65, - 0xba, 0x8f, 0x5e, 0x13, 0x65, 0xb1, 0xe2, 0x82, 0x5e, 0x83, 0x21, 0x16, 0xa1, 0x15, 0x57, 0x47, - 0x8a, 0xed, 0x80, 0x66, 0x70, 0x71, 0xba, 0xa8, 0xd8, 0xdf, 0x18, 0x0b, 0x0e, 0xe8, 0xb2, 0x4c, - 0x11, 0x10, 0xaf, 0x05, 0x37, 0x62, 0xc2, 0x52, 0x04, 0x54, 0x96, 0x3e, 0x9e, 0x46, 0xff, 0x73, - 0x78, 0x6e, 0x7a, 0x48, 0xa3, 0x24, 0xd5, 0x57, 0xc4, 0x7f, 0x99, 0x75, 0xb2, 0x0a, 0xc5, 0xcd, - 0x33, 0x33, 0x53, 0xa6, 0xdd, 0x79, 0xd3, 0x64, 0x81, 0xb3, 0x3c, 0x4f, 0x75, 0xfb, 0x9c, 0x0d, - 0x60, 0x2a, 0xbb, 0xb0, 0xee, 0xe9, 0x76, 0xfd, 0xfd, 0x01, 0x98, 0x30, 0x27, 0x02, 0x5a, 0x80, - 0x8a, 0x60, 0xa2, 0xd2, 0x85, 0xa9, 0xb9, 0xbd, 0x2e, 0x11, 0x38, 0xa5, 0x61, 0xe9, 0xd2, 0x58, - 0x71, 0xcd, 0x37, 0x2b, 0x4d, 0x97, 0xa6, 0x30, 0x58, 0xa3, 0xa2, 0x4a, 0xf4, 0x66, 0x18, 0x26, - 0x6a, 0x2b, 0x50, 0xb3, 0x65, 0x89, 0x41, 0xb1, 0xc0, 0xd2, 0x2d, 0x60, 0x97, 0x44, 0x01, 0xf1, - 0x4d, 0x4b, 0xa6, 0xda, 0x02, 0xae, 0xe8, 0x48, 0x6c, 0xd2, 0xd2, 0x2d, 0x2d, 0x8c, 0xd9, 0xf4, - 0x13, 0xaa, 0x7a, 0xea, 0xeb, 0xd6, 0xe0, 0x11, 0x8a, 0x12, 0x8f, 0xde, 0x84, 0xfb, 0x55, 0x40, - 0x21, 0xe6, 0x96, 0x61, 0x59, 0xe3, 0x90, 0x71, 0xb2, 0xbe, 0x7f, 0x39, 0x9f, 0x0c, 0x17, 0x95, - 0x47, 0xaf, 0xc2, 0x84, 0x50, 0x81, 0x25, 0xc7, 0x61, 0xd3, 0x59, 0xe1, 0x8a, 0x81, 0xc5, 0x19, - 0x6a, 0x54, 0x83, 0x29, 0x0a, 0x61, 0x5a, 0xa8, 0xe4, 0xc0, 0x03, 0x23, 0xd5, 0x5e, 0x7f, 0x25, - 0x83, 0xc7, 0x1d, 0x25, 0xd0, 0x22, 0x4c, 0x72, 0x1d, 0x85, 0x9e, 0x29, 0xd9, 0x38, 0x08, 0xcf, - 0x5a, 0xb5, 0x10, 0xae, 0x9b, 0x68, 0x9c, 0xa5, 0x47, 0x2f, 0xc1, 0x98, 0x13, 0x35, 0x77, 0xbc, - 0x84, 0x34, 0x93, 0x76, 0xc4, 0x13, 0x70, 0x68, 0xde, 0x1e, 0x8b, 0x1a, 0x0e, 0x1b, 0x94, 0xf6, - 0xfb, 0x70, 0x26, 0xc7, 0x29, 0x9f, 0x4e, 0x1c, 0xa7, 0xe5, 0xc9, 0x6f, 0xca, 0x78, 0xad, 0x2d, - 0xd6, 0xd7, 0xe4, 0xd7, 0x68, 0x54, 0x74, 0x76, 0x32, 0x93, 0xb8, 0x96, 0x1a, 0x56, 0xcd, 0xce, - 0x55, 0x89, 0xc0, 0x29, 0x8d, 0xfd, 0x7b, 0x00, 0x9a, 0x41, 0xa7, 0x0f, 0x9f, 0xa5, 0x97, 0x60, - 0x4c, 0xe6, 0x33, 0xd6, 0xf2, 0x68, 0xaa, 0xcf, 0xbc, 0xa4, 0xe1, 0xb0, 0x41, 0x49, 0xdb, 0x16, - 0xa8, 0x2c, 0xa0, 0x19, 0x1f, 0xb9, 0x34, 0x07, 0x68, 0x4a, 0x83, 0x9e, 0x86, 0x91, 0x98, 0xf8, - 0x5b, 0x57, 0xbd, 0x60, 0x57, 0x4c, 0x6c, 0x25, 0x85, 0x1b, 0x02, 0x8e, 0x15, 0x05, 0x5a, 0x82, - 0x72, 0xdb, 0x73, 0xc5, 0x54, 0x96, 0x1b, 0x7e, 0xf9, 0xc6, 0x5a, 0xed, 0xe8, 0x70, 0xee, 0x91, - 0xa2, 0x34, 0xcd, 0xf4, 0x68, 0x1f, 0xcf, 0xd3, 0xe5, 0x47, 0x0b, 0xe7, 0xdd, 0x0d, 0x0c, 0x1d, - 0xf3, 0x6e, 0xe0, 0x22, 0x80, 0xf8, 0x6a, 0x39, 0x97, 0xcb, 0xe9, 0xa8, 0x5d, 0x52, 0x18, 0xac, - 0x51, 0xa1, 0x18, 0xa6, 0x9b, 0x11, 0x71, 0xe4, 0x19, 0x9a, 0xbb, 0x97, 0x8f, 0xdc, 0xbd, 0x81, - 0x60, 0x39, 0xcb, 0x0c, 0x77, 0xf2, 0x47, 0x21, 0x4c, 0xbb, 0x22, 0x7e, 0x35, 0xad, 0xb4, 0x72, - 0x7c, 0x9f, 0x76, 0xe6, 0x90, 0x93, 0x65, 0x84, 0x3b, 0x79, 0xa3, 0x77, 0x60, 0x56, 0x02, 0x3b, - 0x43, 0x86, 0xd9, 0x72, 0x29, 0x2f, 0x9d, 0xbb, 0x73, 0x38, 0x37, 0x5b, 0x2b, 0xa4, 0xc2, 0x5d, - 0x38, 0x20, 0x0c, 0x43, 0xec, 0x2e, 0x29, 0xae, 0x8e, 0xb2, 0x7d, 0xee, 0xc9, 0x62, 0x63, 0x00, - 0x9d, 0xeb, 0xf3, 0xec, 0x1e, 0x4a, 0xb8, 0xf9, 0xa6, 0xd7, 0x72, 0x0c, 0x88, 0x05, 0x27, 0xb4, - 0x05, 0xa3, 0x4e, 0x10, 0x84, 0x89, 0xc3, 0x55, 0xa8, 0xb1, 0x62, 0xdd, 0x4f, 0x63, 0xbc, 0x98, - 0x96, 0xe0, 0xdc, 0x95, 0xe7, 0xa0, 0x86, 0xc1, 0x3a, 0x63, 0x74, 0x0b, 0x26, 0xc3, 0x5b, 0x54, - 0x38, 0x4a, 0x2b, 0x45, 0x5c, 0x1d, 0x67, 0x75, 0x3d, 0xdf, 0xa7, 0x9d, 0xd6, 0x28, 0xac, 0x49, - 0x2d, 0x93, 0x29, 0xce, 0xd6, 0x82, 0xe6, 0x0d, 0x6b, 0xf5, 0x44, 0xea, 0xcf, 0x9e, 0x5a, 0xab, - 0x75, 0xe3, 0x34, 0x0b, 0x41, 0xe7, 0x6e, 0xab, 0x6c, 0xf5, 0x4f, 0x66, 0x42, 0xd0, 0x53, 0x14, - 0xd6, 0xe9, 0xd0, 0x0e, 0x8c, 0xa5, 0x57, 0x56, 0x51, 0xcc, 0x32, 0xd4, 0x8c, 0x5e, 0xbc, 0xd8, - 0xdf, 0xc7, 0xad, 0x69, 0x25, 0xf9, 0xc9, 0x41, 0x87, 0x60, 0x83, 0xf3, 0xec, 0x8f, 0xc1, 0xa8, - 0x36, 0xb0, 0xc7, 0xf1, 0xca, 0x9e, 0x7d, 0x15, 0xa6, 0xb2, 0x43, 0x77, 0x2c, 0xaf, 0xee, 0xff, - 0x55, 0x82, 0xc9, 0x9c, 0x9b, 0x2b, 0x96, 0xea, 0x39, 0x23, 0x50, 0xd3, 0xcc, 0xce, 0xa6, 0x58, - 0x2c, 0xf5, 0x21, 0x16, 0xa5, 0x8c, 0x2e, 0x17, 0xca, 0x68, 0x21, 0x0a, 0x07, 0x3e, 0x88, 0x28, - 0x34, 0x77, 0x9f, 0xc1, 0xbe, 0x76, 0x9f, 0x13, 0x10, 0x9f, 0xc6, 0x06, 0x36, 0xdc, 0xc7, 0x06, - 0xf6, 0xe5, 0x12, 0x4c, 0x65, 0xf3, 0x09, 0x9f, 0xc2, 0x7d, 0xc7, 0x6b, 0xc6, 0x7d, 0x47, 0x7e, - 0xe2, 0xf4, 0x6c, 0x96, 0xe3, 0xa2, 0xbb, 0x0f, 0x9c, 0xb9, 0xfb, 0x78, 0xb2, 0x2f, 0x6e, 0xdd, - 0xef, 0x41, 0xfe, 0x6e, 0x09, 0xce, 0x66, 0x8b, 0x2c, 0xfb, 0x8e, 0xb7, 0x77, 0x0a, 0x7d, 0x73, - 0xdd, 0xe8, 0x9b, 0x67, 0xfa, 0xf9, 0x1a, 0xd6, 0xb4, 0xc2, 0x0e, 0x7a, 0x23, 0xd3, 0x41, 0x0b, - 0xfd, 0xb3, 0xec, 0xde, 0x4b, 0xdf, 0x29, 0xc3, 0xb9, 0xdc, 0x72, 0xe9, 0x75, 0xc1, 0xaa, 0x71, - 0x5d, 0x70, 0x31, 0x73, 0x5d, 0x60, 0x77, 0x2f, 0x7d, 0x32, 0xf7, 0x07, 0x22, 0xf2, 0x8c, 0x65, - 0x4f, 0xbb, 0xcb, 0xbb, 0x03, 0x23, 0xf2, 0x4c, 0x31, 0xc2, 0x26, 0xdf, 0x1f, 0xa5, 0x3b, 0x83, - 0xdf, 0xb3, 0xe0, 0x81, 0xdc, 0xb1, 0x39, 0x05, 0xbb, 0xfa, 0x35, 0xd3, 0xae, 0xfe, 0x44, 0xdf, - 0xb3, 0xb5, 0xc0, 0xd0, 0xfe, 0xd5, 0x72, 0xc1, 0xb7, 0x30, 0xcb, 0xe4, 0x75, 0x18, 0x75, 0x9a, - 0x4d, 0x12, 0xc7, 0xeb, 0xa1, 0xab, 0x92, 0x95, 0x3d, 0xc3, 0xb4, 0x8d, 0x14, 0x7c, 0x74, 0x38, - 0x37, 0x9b, 0x65, 0x91, 0xa2, 0xb1, 0xce, 0xc1, 0x4c, 0x80, 0x58, 0x3a, 0xd1, 0x04, 0x88, 0x17, - 0x01, 0xf6, 0x95, 0xbd, 0x22, 0x6b, 0xe6, 0xd4, 0x2c, 0x19, 0x1a, 0x15, 0xfa, 0x29, 0x76, 0x0a, - 0xe0, 0xce, 0x40, 0x7c, 0x2a, 0x3e, 0xd7, 0xe7, 0x58, 0xe9, 0x8e, 0x45, 0x3c, 0xc4, 0x59, 0x99, - 0x84, 0x15, 0x4b, 0xf4, 0x13, 0x30, 0x15, 0xf3, 0x0c, 0x1a, 0xcb, 0xbe, 0x13, 0xb3, 0xc0, 0x1a, - 0x31, 0x0b, 0x59, 0xdc, 0x72, 0x23, 0x83, 0xc3, 0x1d, 0xd4, 0xf6, 0x97, 0x07, 0xe0, 0xc1, 0x2e, - 0xc2, 0x07, 0x2d, 0x9a, 0x97, 0xf7, 0x4f, 0x65, 0xed, 0x76, 0xb3, 0xb9, 0x85, 0x0d, 0x43, 0x5e, - 0x66, 0x8c, 0x4b, 0x1f, 0x78, 0x8c, 0xbf, 0x64, 0x69, 0x16, 0x55, 0xee, 0xe2, 0xfb, 0x99, 0x63, - 0x0a, 0xd5, 0x13, 0x34, 0xb1, 0x6e, 0xe5, 0xd8, 0x29, 0x2f, 0xf6, 0xdd, 0x9c, 0xbe, 0x0d, 0x97, - 0xa7, 0x7b, 0xd5, 0xf3, 0x79, 0x0b, 0x1e, 0xc9, 0x6d, 0xaf, 0xe1, 0x6c, 0xb4, 0x00, 0x95, 0x26, - 0x05, 0x6a, 0x41, 0x76, 0x69, 0xa8, 0xab, 0x44, 0xe0, 0x94, 0xc6, 0xf0, 0x29, 0x2a, 0xf5, 0xf4, - 0x29, 0xfa, 0x97, 0x16, 0xcc, 0x64, 0x1b, 0x71, 0x0a, 0x12, 0x70, 0xcd, 0x94, 0x80, 0x1f, 0xef, - 0x67, 0x2c, 0x0b, 0x84, 0xdf, 0x1f, 0x4f, 0xc0, 0x7d, 0x05, 0xef, 0x45, 0xec, 0xc3, 0xf4, 0x76, - 0x93, 0x98, 0xe1, 0x8b, 0xe2, 0x63, 0x72, 0x23, 0x3d, 0xbb, 0xc6, 0x3a, 0xf2, 0x83, 0x6c, 0x07, - 0x09, 0xee, 0xac, 0x02, 0x7d, 0xde, 0x82, 0x19, 0xe7, 0x56, 0xdc, 0xf1, 0x7e, 0x93, 0x98, 0x33, - 0xcf, 0xe7, 0xda, 0x57, 0x7b, 0xbc, 0xf7, 0xc4, 0x42, 0x8c, 0x66, 0xf2, 0xa8, 0x70, 0x6e, 0x5d, - 0x08, 0x8b, 0xbc, 0x90, 0x54, 0x4f, 0xee, 0x12, 0x60, 0x9b, 0x17, 0xfe, 0xc4, 0x65, 0xa1, 0xc4, - 0x60, 0xc5, 0x07, 0xdd, 0x84, 0xca, 0xb6, 0x8c, 0x49, 0x14, 0xb2, 0x36, 0x77, 0xf3, 0xca, 0x0d, - 0x5c, 0xe4, 0x31, 0x1f, 0x0a, 0x85, 0x53, 0x56, 0xe8, 0x55, 0x28, 0x07, 0x5b, 0x71, 0xb7, 0x07, - 0x33, 0x32, 0x3e, 0x78, 0x3c, 0x52, 0xfa, 0xda, 0x6a, 0x03, 0xd3, 0x82, 0xb4, 0x7c, 0xb4, 0xe9, - 0x8a, 0x2b, 0x81, 0xdc, 0xf2, 0x78, 0xa9, 0xd6, 0x59, 0x1e, 0x2f, 0xd5, 0x30, 0x2d, 0x88, 0x56, - 0x61, 0x90, 0x85, 0x38, 0x09, 0x7b, 0x7f, 0x6e, 0xa6, 0x87, 0x8e, 0xf0, 0x2d, 0x1e, 0x3a, 0xcd, - 0xc0, 0x98, 0x17, 0x47, 0xaf, 0xc1, 0x50, 0x93, 0xbd, 0x1f, 0x21, 0x8c, 0x33, 0xf9, 0xd9, 0x4b, - 0x3a, 0x5e, 0x98, 0xe0, 0xb7, 0x9c, 0x1c, 0x8e, 0x05, 0x07, 0xb4, 0x01, 0x43, 0x4d, 0xd2, 0xda, - 0xd9, 0x8a, 0x85, 0xcd, 0xe5, 0x93, 0xb9, 0xbc, 0xba, 0x3c, 0x97, 0x22, 0xb8, 0x32, 0x0a, 0x2c, - 0x78, 0xa1, 0x4f, 0x43, 0x69, 0xab, 0x29, 0xa2, 0x9d, 0x72, 0xed, 0xfc, 0x66, 0x38, 0xfb, 0xd2, - 0xd0, 0x9d, 0xc3, 0xb9, 0xd2, 0xea, 0x32, 0x2e, 0x6d, 0x35, 0xd1, 0x35, 0x18, 0xde, 0xe2, 0x31, - 0xc9, 0x22, 0xcf, 0xef, 0xe3, 0xf9, 0xe1, 0xd2, 0x1d, 0x61, 0xcb, 0x3c, 0x4a, 0x47, 0x20, 0xb0, - 0x64, 0x82, 0x36, 0x00, 0xb6, 0x54, 0x6c, 0xb5, 0x48, 0xf4, 0xfb, 0xf1, 0x7e, 0x22, 0xb0, 0x85, - 0x01, 0x42, 0x41, 0xb1, 0xc6, 0x07, 0x7d, 0x0e, 0x2a, 0x8e, 0x7c, 0x11, 0x88, 0x25, 0xf9, 0x35, - 0xf5, 0x81, 0x74, 0xc1, 0x75, 0x7f, 0x2c, 0x89, 0xcf, 0x56, 0x45, 0x84, 0x53, 0xa6, 0x68, 0x17, - 0xc6, 0xf7, 0xe3, 0xd6, 0x0e, 0x91, 0x0b, 0x94, 0x65, 0xfe, 0x2d, 0xd8, 0x90, 0x6e, 0x0a, 0x42, - 0x2f, 0x4a, 0xda, 0x8e, 0xdf, 0x21, 0x53, 0x58, 0x48, 0xd7, 0x4d, 0x9d, 0x19, 0x36, 0x79, 0xd3, - 0x4e, 0x7f, 0xaf, 0x1d, 0x6e, 0x1e, 0x24, 0x44, 0xe4, 0x03, 0xce, 0xed, 0xf4, 0xd7, 0x39, 0x49, - 0x67, 0xa7, 0x0b, 0x04, 0x96, 0x4c, 0xe8, 0x12, 0x76, 0xe4, 0x6b, 0x5b, 0xc2, 0xca, 0xf2, 0x44, - 0x61, 0xf7, 0x74, 0xb4, 0x37, 0xed, 0x14, 0x26, 0xfb, 0x52, 0x56, 0x4c, 0xe6, 0xb5, 0x76, 0xc2, - 0x24, 0x0c, 0x32, 0xf2, 0x76, 0xba, 0x58, 0xe6, 0xd5, 0x73, 0xe8, 0x3b, 0x65, 0x5e, 0x1e, 0x15, - 0xce, 0xad, 0x0b, 0xb9, 0x30, 0xd1, 0x0a, 0xa3, 0xe4, 0x56, 0x18, 0xc9, 0x59, 0x85, 0xba, 0x1c, - 0xbf, 0x0d, 0x4a, 0x51, 0x23, 0xf3, 0xd0, 0x36, 0x31, 0x38, 0xc3, 0x93, 0x0e, 0x49, 0xdc, 0x74, - 0x7c, 0xb2, 0x76, 0xbd, 0x7a, 0xa6, 0x78, 0x48, 0x1a, 0x9c, 0xa4, 0x73, 0x48, 0x04, 0x02, 0x4b, - 0x26, 0x54, 0xfa, 0xb0, 0xd4, 0xf2, 0x2c, 0x81, 0x71, 0x81, 0xf4, 0xe9, 0xf0, 0x5d, 0xe6, 0xd2, - 0x87, 0x81, 0x31, 0x2f, 0x4e, 0x67, 0xbe, 0xd0, 0x3d, 0xc3, 0xb8, 0x7a, 0xb6, 0x78, 0xe6, 0x0b, - 0x95, 0xf5, 0x7a, 0xa3, 0xdb, 0xcc, 0x57, 0x44, 0x38, 0x65, 0x6a, 0x7f, 0x63, 0xa8, 0x53, 0x5b, - 0x60, 0x67, 0x8c, 0x2f, 0x58, 0x1d, 0x17, 0xf0, 0x9f, 0xea, 0xd7, 0xe4, 0x71, 0x82, 0x7a, 0xe2, - 0xe7, 0x2d, 0xb8, 0xaf, 0x95, 0xfb, 0x51, 0x62, 0xeb, 0xed, 0xcf, 0x72, 0xc2, 0xbb, 0x41, 0xa5, - 0x06, 0xcf, 0xc7, 0xe3, 0x82, 0x9a, 0xb2, 0xba, 0x78, 0xf9, 0x03, 0xeb, 0xe2, 0xeb, 0x30, 0xc2, - 0xd4, 0xbb, 0x34, 0x15, 0x51, 0x5f, 0x6e, 0x6c, 0x6c, 0x13, 0x5f, 0x16, 0x05, 0xb1, 0x62, 0x81, - 0x7e, 0xce, 0x82, 0x87, 0xb3, 0x4d, 0xc7, 0x84, 0xa1, 0x45, 0x5a, 0x4b, 0x7e, 0xbc, 0x59, 0x15, - 0xdf, 0xff, 0x70, 0xbd, 0x1b, 0xf1, 0x51, 0x2f, 0x02, 0xdc, 0xbd, 0x32, 0x54, 0xcb, 0x39, 0x5f, - 0x0d, 0x99, 0xf7, 0x73, 0xbd, 0xcf, 0x58, 0xe8, 0x79, 0x18, 0xdb, 0x0b, 0xdb, 0x81, 0x8c, 0x31, - 0x11, 0x11, 0xc4, 0xcc, 0x16, 0xbc, 0xae, 0xc1, 0xb1, 0x41, 0x75, 0xba, 0xfa, 0xfe, 0xd7, 0xac, - 0x1c, 0x45, 0x95, 0x9f, 0x00, 0x5f, 0x31, 0x4f, 0x80, 0x8f, 0x65, 0x4f, 0x80, 0x1d, 0x96, 0x3a, - 0xe3, 0xf0, 0xd7, 0x7f, 0xba, 0xde, 0x7e, 0x73, 0x35, 0xd9, 0x3e, 0x9c, 0xef, 0x25, 0x9c, 0x99, - 0x2b, 0x9f, 0xab, 0xee, 0xb8, 0x53, 0x57, 0x3e, 0x77, 0xad, 0x86, 0x19, 0xa6, 0xdf, 0xac, 0x1f, - 0xf6, 0x7f, 0xb3, 0xa0, 0x5c, 0x0f, 0xdd, 0x53, 0xb0, 0x3c, 0x7e, 0xc6, 0xb0, 0x3c, 0x3e, 0x58, - 0xf0, 0x76, 0x67, 0xa1, 0x9d, 0x71, 0x25, 0x63, 0x67, 0x7c, 0xb8, 0x88, 0x41, 0x77, 0xab, 0xe2, - 0xdf, 0x2b, 0x83, 0xfe, 0xd2, 0x28, 0xfa, 0xd7, 0x77, 0xe3, 0x13, 0x5e, 0xee, 0xf6, 0xf8, 0xa8, - 0xe0, 0xcc, 0x3c, 0x00, 0x65, 0xb8, 0xe9, 0x0f, 0x99, 0x6b, 0xf8, 0x1b, 0xc4, 0xdb, 0xde, 0x49, - 0x88, 0x9b, 0xfd, 0x9c, 0xd3, 0x73, 0x0d, 0xff, 0x2f, 0x16, 0x4c, 0x66, 0x6a, 0x47, 0x7e, 0x5e, - 0xec, 0xda, 0x5d, 0x5a, 0x9c, 0xa6, 0x7b, 0x06, 0xbb, 0xcd, 0x03, 0xa8, 0x6b, 0x1d, 0x69, 0xd5, - 0x61, 0xba, 0xaf, 0xba, 0xf7, 0x89, 0xb1, 0x46, 0x81, 0x5e, 0x80, 0xd1, 0x24, 0x6c, 0x85, 0x7e, - 0xb8, 0x7d, 0x70, 0x85, 0xc8, 0x3c, 0x33, 0xea, 0xf2, 0x6d, 0x23, 0x45, 0x61, 0x9d, 0xce, 0xfe, - 0x95, 0x32, 0x64, 0x5f, 0xa7, 0xfd, 0xf3, 0x39, 0xf9, 0xd1, 0x9c, 0x93, 0xdf, 0xb1, 0x60, 0x8a, - 0xd6, 0xce, 0xbc, 0xab, 0xa4, 0x53, 0xb5, 0x7a, 0xd7, 0xc3, 0xea, 0xf2, 0xae, 0xc7, 0x63, 0x54, - 0x76, 0xb9, 0x61, 0x3b, 0x11, 0x56, 0x21, 0x4d, 0x38, 0x51, 0x28, 0x16, 0x58, 0x41, 0x47, 0xa2, - 0x48, 0x44, 0xa4, 0xe9, 0x74, 0x24, 0x8a, 0xb0, 0xc0, 0xca, 0x67, 0x3f, 0x06, 0x0a, 0x9e, 0xfd, - 0x60, 0x29, 0xda, 0x84, 0x47, 0x8f, 0x50, 0x28, 0xb4, 0x14, 0x6d, 0xd2, 0xd5, 0x27, 0xa5, 0xb1, - 0xbf, 0x5e, 0x86, 0xb1, 0x7a, 0xe8, 0xa6, 0x17, 0x2b, 0xcf, 0x1b, 0x17, 0x2b, 0xe7, 0x33, 0x17, - 0x2b, 0x53, 0x3a, 0xed, 0x9f, 0x5f, 0xa3, 0x7c, 0x58, 0xd7, 0x28, 0x7f, 0x66, 0xc1, 0x44, 0x3d, - 0x74, 0xe9, 0x04, 0xfd, 0x51, 0x9a, 0x8d, 0x7a, 0x02, 0xc0, 0xa1, 0x2e, 0x09, 0x00, 0xff, 0xbe, - 0x05, 0xc3, 0xf5, 0xd0, 0x3d, 0x05, 0x8b, 0xe9, 0x2b, 0xa6, 0xc5, 0xf4, 0xfe, 0x02, 0x29, 0x5b, - 0x60, 0x24, 0xfd, 0x8d, 0x32, 0x8c, 0xd3, 0x76, 0x86, 0xdb, 0x72, 0x94, 0x8c, 0x1e, 0xb1, 0xfa, - 0xe8, 0x11, 0xaa, 0xcc, 0x85, 0xbe, 0x1f, 0xde, 0xca, 0x8e, 0xd8, 0x2a, 0x83, 0x62, 0x81, 0x45, - 0x4f, 0xc3, 0x48, 0x2b, 0x22, 0xfb, 0x5e, 0xd8, 0x8e, 0xb3, 0x31, 0xad, 0x75, 0x01, 0xc7, 0x8a, - 0x82, 0xea, 0xed, 0xb1, 0x17, 0x34, 0x89, 0xf4, 0xf2, 0x19, 0x60, 0x5e, 0x3e, 0x3c, 0x87, 0xaa, - 0x06, 0xc7, 0x06, 0x15, 0x7a, 0x03, 0x2a, 0xec, 0x3f, 0x5b, 0x37, 0xc7, 0x7f, 0xd5, 0x43, 0x24, - 0x2e, 0x17, 0x0c, 0x70, 0xca, 0x0b, 0x5d, 0x04, 0x48, 0xa4, 0x3f, 0x52, 0x2c, 0x42, 0xae, 0x95, - 0x46, 0xa9, 0x3c, 0x95, 0x62, 0xac, 0x51, 0xa1, 0xa7, 0xa0, 0x92, 0x38, 0x9e, 0x7f, 0xd5, 0x0b, - 0x48, 0x2c, 0xfc, 0xb9, 0x44, 0x5e, 0x72, 0x01, 0xc4, 0x29, 0x9e, 0xee, 0xe8, 0x2c, 0xa0, 0x9f, - 0xbf, 0x09, 0x34, 0xc2, 0xa8, 0xd9, 0x8e, 0x7e, 0x55, 0x41, 0xb1, 0x46, 0x61, 0xbf, 0x04, 0x67, - 0xeb, 0xa1, 0x5b, 0x0f, 0xa3, 0x64, 0x35, 0x8c, 0x6e, 0x39, 0x91, 0x2b, 0xc7, 0x6f, 0x4e, 0xa6, - 0xc8, 0xa6, 0xbb, 0xee, 0x20, 0xb7, 0x06, 0x18, 0xc9, 0xaf, 0x9f, 0x63, 0x7b, 0xfa, 0x31, 0x83, - 0x6f, 0xfe, 0x5d, 0x09, 0x50, 0x9d, 0x79, 0x4c, 0x19, 0x0f, 0x47, 0xbd, 0x03, 0x13, 0x31, 0xb9, - 0xea, 0x05, 0xed, 0xdb, 0xf2, 0x7c, 0xd5, 0x25, 0xb2, 0xa9, 0xb1, 0xa2, 0x53, 0x72, 0x8b, 0x8a, - 0x09, 0xc3, 0x19, 0x6e, 0xb4, 0x0b, 0xa3, 0x76, 0xb0, 0x18, 0xdf, 0x88, 0x49, 0x24, 0x1e, 0x4a, - 0x62, 0x5d, 0x88, 0x25, 0x10, 0xa7, 0x78, 0x3a, 0x65, 0xd8, 0x9f, 0x6b, 0x61, 0x80, 0xc3, 0x30, - 0x91, 0x93, 0x8c, 0x3d, 0xb5, 0xa1, 0xc1, 0xb1, 0x41, 0x85, 0x56, 0x01, 0xc5, 0xed, 0x56, 0xcb, - 0x67, 0xd7, 0x90, 0x8e, 0x7f, 0x29, 0x0a, 0xdb, 0x2d, 0x7e, 0x95, 0x24, 0x5e, 0xa9, 0x68, 0x74, - 0x60, 0x71, 0x4e, 0x09, 0x2a, 0x18, 0xb6, 0x62, 0xf6, 0x5b, 0xc4, 0xf4, 0x73, 0xdb, 0x66, 0x83, - 0x81, 0xb0, 0xc4, 0xd9, 0x3f, 0xc3, 0x36, 0x33, 0xf6, 0xbe, 0x4d, 0xd2, 0x8e, 0x08, 0xda, 0x83, - 0xf1, 0x16, 0xdb, 0xb0, 0x92, 0x28, 0xf4, 0x7d, 0x22, 0xf5, 0xc6, 0xbb, 0xf3, 0xde, 0xe2, 0xef, - 0x5d, 0xe8, 0xec, 0xb0, 0xc9, 0xdd, 0xfe, 0xc2, 0x24, 0x93, 0x4b, 0x0d, 0x7e, 0x68, 0x19, 0x16, - 0x3e, 0xd9, 0x42, 0x43, 0x9b, 0x2d, 0x7e, 0x4f, 0x2e, 0x95, 0xf4, 0xc2, 0xaf, 0x1b, 0xcb, 0xb2, - 0xe8, 0x75, 0x76, 0x07, 0xc7, 0x85, 0x41, 0xaf, 0x97, 0x2c, 0x39, 0x95, 0x71, 0xdd, 0x26, 0x0a, - 0x62, 0x8d, 0x09, 0xba, 0x0a, 0xe3, 0xe2, 0x39, 0x14, 0x61, 0x78, 0x28, 0x1b, 0xc7, 0xdf, 0x71, - 0xac, 0x23, 0x8f, 0xb2, 0x00, 0x6c, 0x16, 0x46, 0xdb, 0xf0, 0xb0, 0xf6, 0x78, 0x57, 0x8e, 0x07, - 0x21, 0x97, 0x2d, 0x8f, 0xdc, 0x39, 0x9c, 0x7b, 0x78, 0xa3, 0x1b, 0x21, 0xee, 0xce, 0x07, 0x5d, - 0x87, 0xb3, 0x4e, 0x33, 0xf1, 0xf6, 0x49, 0x8d, 0x38, 0xae, 0xef, 0x05, 0xc4, 0x4c, 0xf2, 0xf0, - 0xc0, 0x9d, 0xc3, 0xb9, 0xb3, 0x8b, 0x79, 0x04, 0x38, 0xbf, 0x1c, 0x7a, 0x05, 0x2a, 0x6e, 0x10, - 0x8b, 0x3e, 0x18, 0x32, 0xde, 0xa5, 0xab, 0xd4, 0xae, 0x35, 0xd4, 0xf7, 0xa7, 0x7f, 0x70, 0x5a, - 0x00, 0x6d, 0xc3, 0x98, 0x1e, 0xc8, 0x25, 0xde, 0x34, 0x7c, 0xa6, 0xcb, 0xd9, 0xd6, 0x88, 0x7e, - 0xe2, 0x56, 0x37, 0xe5, 0x9f, 0x6b, 0x04, 0x46, 0x19, 0x8c, 0xd1, 0x6b, 0x80, 0x62, 0x12, 0xed, - 0x7b, 0x4d, 0xb2, 0xd8, 0x64, 0x49, 0x86, 0x99, 0xad, 0x66, 0xc4, 0x08, 0x36, 0x41, 0x8d, 0x0e, - 0x0a, 0x9c, 0x53, 0x0a, 0x5d, 0xa6, 0x12, 0x45, 0x87, 0x0a, 0x77, 0x6a, 0xa9, 0xe6, 0x55, 0x6b, - 0xa4, 0x15, 0x91, 0xa6, 0x93, 0x10, 0xd7, 0xe4, 0x88, 0x33, 0xe5, 0xe8, 0x7e, 0xa3, 0xde, 0x6e, - 0x00, 0xd3, 0x09, 0xb8, 0xf3, 0xfd, 0x06, 0x7a, 0x42, 0xda, 0x09, 0xe3, 0xe4, 0x1a, 0x49, 0x6e, - 0x85, 0xd1, 0xae, 0xc8, 0xcc, 0x96, 0x26, 0x6e, 0x4c, 0x51, 0x58, 0xa7, 0xa3, 0x1a, 0x11, 0xbb, - 0x04, 0x5b, 0xab, 0xb1, 0x7b, 0x8a, 0x91, 0x74, 0x9d, 0x5c, 0xe6, 0x60, 0x2c, 0xf1, 0x92, 0x74, - 0xad, 0xbe, 0xcc, 0x6e, 0x1f, 0x32, 0xa4, 0x6b, 0xf5, 0x65, 0x2c, 0xf1, 0x88, 0x74, 0xbe, 0xf9, - 0x37, 0x51, 0x7c, 0x6f, 0xd4, 0x29, 0x97, 0xfb, 0x7c, 0xf6, 0x2f, 0x80, 0x29, 0xf5, 0xda, 0x20, - 0x4f, 0x59, 0x17, 0x57, 0x27, 0xd9, 0x24, 0xe9, 0x3f, 0xdf, 0x9d, 0xb2, 0xc5, 0xad, 0x65, 0x38, - 0xe1, 0x0e, 0xde, 0x46, 0xf2, 0x90, 0xa9, 0x9e, 0x6f, 0x6f, 0x2c, 0x40, 0x25, 0x6e, 0x6f, 0xba, - 0xe1, 0x9e, 0xe3, 0x05, 0xec, 0xb2, 0x40, 0x53, 0x44, 0x1a, 0x12, 0x81, 0x53, 0x1a, 0xb4, 0x0a, - 0x23, 0x8e, 0x38, 0x7c, 0x09, 0xf3, 0x7e, 0x6e, 0x36, 0x01, 0x79, 0x40, 0xe3, 0x76, 0x50, 0xf9, - 0x0f, 0xab, 0xb2, 0xe8, 0x65, 0x18, 0x17, 0x01, 0x6f, 0xc2, 0x57, 0xf5, 0x8c, 0x19, 0x1b, 0xd1, - 0xd0, 0x91, 0xd8, 0xa4, 0x45, 0x3f, 0x05, 0x13, 0x94, 0x4b, 0x2a, 0xd8, 0xaa, 0x33, 0xfd, 0x48, - 0x44, 0x2d, 0xa7, 0xba, 0x5e, 0x18, 0x67, 0x98, 0x21, 0x17, 0x1e, 0x72, 0xda, 0x49, 0xc8, 0x8c, - 0x95, 0xe6, 0xfc, 0xdf, 0x08, 0x77, 0x49, 0xc0, 0xac, 0xfb, 0x23, 0x4b, 0xe7, 0xef, 0x1c, 0xce, - 0x3d, 0xb4, 0xd8, 0x85, 0x0e, 0x77, 0xe5, 0x82, 0x6e, 0xc0, 0x68, 0x12, 0xfa, 0xc2, 0xc9, 0x3c, - 0xae, 0xde, 0x57, 0x9c, 0xfc, 0x68, 0x43, 0x91, 0xe9, 0xe6, 0x04, 0x55, 0x14, 0xeb, 0x7c, 0xd0, - 0x06, 0x5f, 0x63, 0x2c, 0x55, 0x27, 0x89, 0xab, 0xf7, 0x17, 0x77, 0x8c, 0xca, 0xe8, 0x69, 0x2e, - 0x41, 0x51, 0x12, 0xeb, 0x6c, 0xd0, 0x25, 0x98, 0x6e, 0x45, 0x5e, 0xc8, 0x26, 0xb6, 0x32, 0x14, - 0x57, 0x8d, 0xb4, 0x78, 0xd3, 0xf5, 0x2c, 0x01, 0xee, 0x2c, 0x83, 0x2e, 0x50, 0x05, 0x95, 0x03, - 0xab, 0x0f, 0xf0, 0x37, 0x59, 0xb8, 0x72, 0xca, 0x61, 0x58, 0x61, 0x67, 0x7f, 0x1c, 0xa6, 0x3b, - 0x24, 0xe5, 0xb1, 0x1c, 0x7e, 0xff, 0xc9, 0x20, 0x54, 0x94, 0x39, 0x10, 0x2d, 0x98, 0x56, 0xde, - 0x07, 0xb2, 0x56, 0xde, 0x11, 0xaa, 0xaf, 0xe9, 0x86, 0xdd, 0x8d, 0x9c, 0x27, 0xe5, 0xcf, 0x17, - 0x88, 0x86, 0xfe, 0xa3, 0xf3, 0x8e, 0xf1, 0xdc, 0x7e, 0x7a, 0x60, 0x1c, 0xe8, 0x7a, 0x60, 0xec, - 0xf3, 0x79, 0x47, 0x7a, 0x34, 0x6c, 0x85, 0xee, 0x5a, 0x3d, 0xfb, 0xde, 0x59, 0x9d, 0x02, 0x31, - 0xc7, 0x31, 0xe5, 0x9e, 0x6e, 0xeb, 0x4c, 0xb9, 0x1f, 0xbe, 0x4b, 0xe5, 0x5e, 0x32, 0xc0, 0x29, - 0x2f, 0xe4, 0xc3, 0x74, 0xd3, 0x7c, 0xaa, 0x4e, 0x45, 0xe4, 0x3d, 0xda, 0xf3, 0xd1, 0xb8, 0xb6, - 0xf6, 0x86, 0xcd, 0x72, 0x96, 0x0b, 0xee, 0x64, 0x8c, 0x5e, 0x86, 0x91, 0xf7, 0xc2, 0x98, 0x4d, - 0x3b, 0xb1, 0xb7, 0xc9, 0x18, 0xa8, 0x91, 0xd7, 0xaf, 0x37, 0x18, 0xfc, 0xe8, 0x70, 0x6e, 0xb4, - 0x1e, 0xba, 0xf2, 0x2f, 0x56, 0x05, 0xd0, 0x6d, 0x38, 0x6b, 0x48, 0x04, 0xd5, 0x5c, 0xe8, 0xbf, - 0xb9, 0x0f, 0x8b, 0xea, 0xce, 0xae, 0xe5, 0x71, 0xc2, 0xf9, 0x15, 0xd8, 0xdf, 0xe0, 0x46, 0x4f, - 0x61, 0x1a, 0x21, 0x71, 0xdb, 0x3f, 0x8d, 0x87, 0x2a, 0x56, 0x0c, 0xab, 0xcd, 0x5d, 0x1b, 0xd6, - 0x7f, 0xd7, 0x62, 0x86, 0xf5, 0x0d, 0xb2, 0xd7, 0xf2, 0x9d, 0xe4, 0x34, 0xdc, 0xbc, 0x5f, 0x87, - 0x91, 0x44, 0xd4, 0xd6, 0xed, 0x6d, 0x0d, 0xad, 0x51, 0xec, 0x72, 0x41, 0x6d, 0x88, 0x12, 0x8a, - 0x15, 0x1b, 0xfb, 0x9f, 0xf1, 0x11, 0x90, 0x98, 0x53, 0xb0, 0x2d, 0xd4, 0x4c, 0xdb, 0xc2, 0x5c, - 0x8f, 0x2f, 0x28, 0xb0, 0x31, 0xfc, 0x53, 0xb3, 0xdd, 0xec, 0xec, 0xf1, 0x51, 0xbf, 0xd1, 0xb1, - 0x7f, 0xd1, 0x82, 0x99, 0x3c, 0x47, 0x00, 0xaa, 0xc4, 0xf0, 0x93, 0x8f, 0xba, 0xe1, 0x52, 0x3d, - 0x78, 0x53, 0xc0, 0xb1, 0xa2, 0xe8, 0x3b, 0xbf, 0xfd, 0xf1, 0x12, 0x7e, 0x5d, 0x07, 0xf3, 0x55, - 0x43, 0xf4, 0x2a, 0x8f, 0xdb, 0xb0, 0xd4, 0xb3, 0x83, 0xc7, 0x8b, 0xd9, 0xb0, 0x7f, 0xb5, 0x04, - 0x33, 0xdc, 0x44, 0xbd, 0xb8, 0x1f, 0x7a, 0x6e, 0x3d, 0x74, 0x45, 0x14, 0xcb, 0x5b, 0x30, 0xd6, - 0xd2, 0x8e, 0xab, 0xdd, 0x52, 0x0e, 0xe9, 0xc7, 0xda, 0xf4, 0xd8, 0xa0, 0x43, 0xb1, 0xc1, 0x0b, - 0xb9, 0x30, 0x46, 0xf6, 0xbd, 0xa6, 0xb2, 0x73, 0x96, 0x8e, 0x2d, 0xd2, 0x55, 0x2d, 0x2b, 0x1a, - 0x1f, 0x6c, 0x70, 0xbd, 0x07, 0xaf, 0xd0, 0xd8, 0x5f, 0xb1, 0xe0, 0xfe, 0x82, 0x04, 0x45, 0xb4, - 0xba, 0x5b, 0xec, 0x32, 0x40, 0x3c, 0x91, 0xa9, 0xaa, 0xe3, 0x57, 0x04, 0x58, 0x60, 0xd1, 0x4f, - 0x02, 0x70, 0x13, 0x3f, 0xd5, 0xa2, 0xc5, 0xa7, 0xf7, 0x97, 0xb8, 0x43, 0xcb, 0xee, 0x20, 0xcb, - 0x63, 0x8d, 0x97, 0xfd, 0xcb, 0x65, 0x18, 0xe4, 0xef, 0xa5, 0xaf, 0xc2, 0xf0, 0x0e, 0x4f, 0x87, - 0xdc, 0x4f, 0xe6, 0xe5, 0xf4, 0x38, 0xc2, 0x01, 0x58, 0x16, 0x46, 0xeb, 0x70, 0x46, 0x44, 0x4a, - 0xd5, 0x88, 0xef, 0x1c, 0xc8, 0x53, 0x2d, 0x7f, 0x9a, 0x44, 0xa6, 0xcd, 0x3f, 0xb3, 0xd6, 0x49, - 0x82, 0xf3, 0xca, 0xa1, 0x57, 0x3b, 0x92, 0x20, 0xf2, 0x44, 0xd2, 0x4a, 0x07, 0xee, 0x91, 0x08, - 0xf1, 0x65, 0x18, 0x6f, 0x75, 0x9c, 0xdf, 0xb5, 0xa7, 0xaa, 0xcd, 0x33, 0xbb, 0x49, 0xcb, 0xbc, - 0x0a, 0xda, 0xcc, 0x87, 0x62, 0x63, 0x27, 0x22, 0xf1, 0x4e, 0xe8, 0xbb, 0xe2, 0x5d, 0xd6, 0xd4, - 0xab, 0x20, 0x83, 0xc7, 0x1d, 0x25, 0x28, 0x97, 0x2d, 0xc7, 0xf3, 0xdb, 0x11, 0x49, 0xb9, 0x0c, - 0x99, 0x5c, 0x56, 0x33, 0x78, 0xdc, 0x51, 0x82, 0xce, 0xa3, 0xb3, 0xe2, 0x51, 0x4f, 0x19, 0x3f, - 0xaf, 0x5c, 0x45, 0x86, 0xa5, 0x1f, 0x7d, 0x97, 0x9c, 0x2e, 0xe2, 0xca, 0x5f, 0x3d, 0x0b, 0xaa, - 0x3d, 0x19, 0x27, 0x3c, 0xe8, 0x25, 0x97, 0xbb, 0x79, 0x5a, 0xf2, 0x8f, 0x2c, 0x38, 0x93, 0xe3, - 0x3e, 0xc6, 0x45, 0xd5, 0xb6, 0x17, 0x27, 0xea, 0x45, 0x0c, 0x4d, 0x54, 0x71, 0x38, 0x56, 0x14, - 0x74, 0x3d, 0x70, 0x61, 0x98, 0x15, 0x80, 0xc2, 0xe5, 0x43, 0x60, 0x8f, 0x27, 0x00, 0xd1, 0x79, - 0x18, 0x68, 0xc7, 0x24, 0x92, 0x6f, 0x32, 0x4a, 0xf9, 0xcd, 0x2c, 0x82, 0x0c, 0x43, 0x35, 0xca, - 0x6d, 0x65, 0x8c, 0xd3, 0x34, 0x4a, 0x6e, 0x8e, 0xe3, 0x38, 0xfb, 0x4b, 0x65, 0x98, 0xcc, 0x38, - 0x80, 0xd2, 0x86, 0xec, 0x85, 0x81, 0x97, 0x84, 0x2a, 0x07, 0x1f, 0x4f, 0x39, 0x42, 0x5a, 0x3b, - 0xeb, 0x02, 0x8e, 0x15, 0x05, 0x7a, 0x4c, 0x3e, 0xd4, 0x9b, 0x7d, 0xe9, 0x63, 0xa9, 0x66, 0xbc, - 0xd5, 0xdb, 0xef, 0x93, 0x3d, 0x8f, 0xc2, 0x40, 0x2b, 0x54, 0xaf, 0xa8, 0xab, 0xf1, 0xc4, 0x4b, - 0xb5, 0x7a, 0x18, 0xfa, 0x98, 0x21, 0xd1, 0x27, 0xc4, 0xd7, 0x67, 0xee, 0x2b, 0xb0, 0xe3, 0x86, - 0xb1, 0xd6, 0x05, 0x4f, 0xc0, 0xf0, 0x2e, 0x39, 0x88, 0xbc, 0x60, 0x3b, 0x7b, 0x5b, 0x73, 0x85, - 0x83, 0xb1, 0xc4, 0x9b, 0x29, 0xef, 0x87, 0xef, 0xc9, 0xab, 0x3b, 0x23, 0x3d, 0x77, 0xb5, 0xdf, - 0xb0, 0x60, 0x92, 0xe5, 0xbb, 0x15, 0x99, 0x1a, 0xbc, 0x30, 0x38, 0x05, 0x3d, 0xe1, 0x51, 0x18, - 0x8c, 0x68, 0xa5, 0xd9, 0xa7, 0x34, 0x58, 0x4b, 0x30, 0xc7, 0xa1, 0x87, 0x60, 0x80, 0x35, 0x81, - 0x0e, 0xde, 0x18, 0xcf, 0x78, 0x5f, 0x73, 0x12, 0x07, 0x33, 0x28, 0x0b, 0x99, 0xc3, 0xa4, 0xe5, - 0x7b, 0xbc, 0xd1, 0xa9, 0xb9, 0xf5, 0xa3, 0x11, 0x32, 0x97, 0xdb, 0xb4, 0x0f, 0x16, 0x32, 0x97, - 0xcf, 0xb2, 0xbb, 0x0e, 0xfe, 0xdf, 0x4b, 0x70, 0x2e, 0xb7, 0x5c, 0xdf, 0x21, 0x73, 0xdd, 0x4b, - 0x9f, 0xcc, 0x5d, 0x6f, 0xfe, 0x15, 0x6c, 0xf9, 0x14, 0xaf, 0x60, 0x07, 0xfa, 0x55, 0x53, 0x06, - 0xfb, 0x88, 0x64, 0xcb, 0xed, 0xb2, 0x8f, 0x48, 0x24, 0x5b, 0x6e, 0xdb, 0x0a, 0xce, 0x10, 0x3f, - 0x28, 0x15, 0x7c, 0x0b, 0x3b, 0x4d, 0x5c, 0xa0, 0x72, 0x86, 0x21, 0x63, 0xa1, 0x76, 0x8d, 0x71, - 0x19, 0xc3, 0x61, 0x58, 0x61, 0x91, 0xa7, 0xc5, 0x84, 0xf1, 0xa6, 0xbd, 0x7c, 0xac, 0x25, 0x33, - 0x6f, 0x5a, 0xc7, 0xf5, 0xb4, 0x12, 0xd9, 0xf8, 0xb0, 0x75, 0xed, 0x04, 0x58, 0xee, 0xff, 0x04, - 0x38, 0x96, 0x7f, 0xfa, 0x43, 0x8b, 0x30, 0xb9, 0xe7, 0x05, 0xec, 0x3d, 0x5c, 0x53, 0xef, 0x51, - 0x21, 0xd2, 0xeb, 0x26, 0x1a, 0x67, 0xe9, 0x67, 0x5f, 0x86, 0xf1, 0xbb, 0x37, 0x59, 0x7d, 0xa7, - 0x0c, 0x0f, 0x76, 0x59, 0xf6, 0x5c, 0xd6, 0x1b, 0x63, 0xa0, 0xc9, 0xfa, 0x8e, 0x71, 0xa8, 0xc3, - 0xcc, 0x56, 0xdb, 0xf7, 0x0f, 0x98, 0x97, 0x13, 0x71, 0x25, 0x85, 0x50, 0x4c, 0x54, 0x32, 0xeb, - 0xd5, 0x1c, 0x1a, 0x9c, 0x5b, 0x12, 0xbd, 0x06, 0x28, 0xdc, 0x64, 0x09, 0x96, 0xdd, 0x34, 0x59, - 0x06, 0xeb, 0xf8, 0x72, 0xba, 0x18, 0xaf, 0x77, 0x50, 0xe0, 0x9c, 0x52, 0x54, 0xc3, 0x64, 0x2f, - 0xf8, 0xab, 0x66, 0x65, 0x34, 0x4c, 0xac, 0x23, 0xb1, 0x49, 0x8b, 0x2e, 0xc1, 0xb4, 0xb3, 0xef, - 0x78, 0x3c, 0x79, 0x9a, 0x64, 0xc0, 0x55, 0x4c, 0x65, 0x28, 0x5a, 0xcc, 0x12, 0xe0, 0xce, 0x32, - 0x99, 0xe0, 0xb6, 0xa1, 0xe2, 0xe0, 0xb6, 0xee, 0x72, 0xb1, 0x97, 0xdd, 0xcf, 0xfe, 0x4f, 0x16, - 0xdd, 0xbe, 0x72, 0x1e, 0x60, 0xa5, 0xfd, 0xa0, 0xec, 0x57, 0x5a, 0x9c, 0x99, 0xea, 0x87, 0x65, - 0x1d, 0x89, 0x4d, 0x5a, 0x3e, 0x21, 0xe2, 0xd4, 0xc9, 0xda, 0xd0, 0x13, 0x45, 0x00, 0xa8, 0xa2, - 0x40, 0x6f, 0xc2, 0xb0, 0xeb, 0xed, 0x7b, 0x71, 0x18, 0x89, 0xc5, 0x72, 0xdc, 0x47, 0xc7, 0x95, - 0x1c, 0xac, 0x71, 0x36, 0x58, 0xf2, 0xb3, 0xbf, 0x54, 0x82, 0x71, 0x59, 0xe3, 0xeb, 0xed, 0x30, - 0x71, 0x4e, 0x61, 0x5b, 0xbe, 0x64, 0x6c, 0xcb, 0x9f, 0xe8, 0x16, 0x05, 0xcb, 0x9a, 0x54, 0xb8, - 0x1d, 0x5f, 0xcf, 0x6c, 0xc7, 0x8f, 0xf7, 0x66, 0xd5, 0x7d, 0x1b, 0xfe, 0xe7, 0x16, 0x4c, 0x1b, - 0xf4, 0xa7, 0xb0, 0x1b, 0xac, 0x9a, 0xbb, 0xc1, 0x23, 0x3d, 0xbf, 0xa1, 0x60, 0x17, 0xf8, 0x5a, - 0x29, 0xd3, 0x76, 0x26, 0xfd, 0xdf, 0x83, 0x81, 0x1d, 0x27, 0x72, 0xbb, 0xa5, 0x00, 0xed, 0x28, - 0x34, 0x7f, 0xd9, 0x89, 0x5c, 0x2e, 0xc3, 0x9f, 0x56, 0x6f, 0xc3, 0x39, 0x91, 0xdb, 0x33, 0xa6, - 0x80, 0x55, 0x85, 0x5e, 0x82, 0xa1, 0xb8, 0x19, 0xb6, 0x94, 0xef, 0xe5, 0x79, 0xfe, 0x6e, 0x1c, - 0x85, 0x1c, 0x1d, 0xce, 0x21, 0xb3, 0x3a, 0x0a, 0xc6, 0x82, 0x7e, 0x76, 0x1b, 0x2a, 0xaa, 0xea, - 0x7b, 0xea, 0x55, 0xfe, 0x07, 0x65, 0x38, 0x93, 0x33, 0x2f, 0x50, 0x6c, 0xf4, 0xd6, 0xb3, 0x7d, - 0x4e, 0xa7, 0x0f, 0xd8, 0x5f, 0x31, 0x3b, 0xb1, 0xb8, 0x62, 0xfc, 0xfb, 0xae, 0xf4, 0x46, 0x4c, - 0xb2, 0x95, 0x52, 0x50, 0xef, 0x4a, 0x69, 0x65, 0xa7, 0xd6, 0xd5, 0xb4, 0x22, 0xd5, 0xd2, 0x7b, - 0x3a, 0xa6, 0x7f, 0x52, 0x86, 0x99, 0xbc, 0xe0, 0x79, 0xf4, 0xd3, 0x99, 0x07, 0x45, 0x9e, 0xef, - 0x37, 0xec, 0x9e, 0xbf, 0x32, 0x22, 0xb2, 0x0d, 0xcd, 0x9b, 0x4f, 0x8c, 0xf4, 0xec, 0x66, 0x51, - 0x27, 0x0b, 0xf2, 0x89, 0xf8, 0x43, 0x30, 0x72, 0x89, 0x7f, 0xaa, 0xef, 0x06, 0x88, 0x17, 0x64, - 0xe2, 0x4c, 0x90, 0x8f, 0x04, 0xf7, 0x0e, 0xf2, 0x91, 0x35, 0xcf, 0x7a, 0x30, 0xaa, 0x7d, 0xcd, - 0x3d, 0x1d, 0xf1, 0x5d, 0xba, 0xa3, 0x68, 0xed, 0xbe, 0xa7, 0xa3, 0xfe, 0x15, 0x0b, 0x32, 0x7e, - 0x52, 0xca, 0xfe, 0x61, 0x15, 0xda, 0x3f, 0xce, 0xc3, 0x40, 0x14, 0xfa, 0x24, 0xfb, 0xc6, 0x04, - 0x0e, 0x7d, 0x82, 0x19, 0x46, 0x3d, 0x04, 0x5d, 0x2e, 0x7a, 0x08, 0x9a, 0x1e, 0x8d, 0x7d, 0xb2, - 0x4f, 0xa4, 0x35, 0x42, 0xc9, 0xe4, 0xab, 0x14, 0x88, 0x39, 0xce, 0xfe, 0xb5, 0x01, 0x38, 0x93, - 0x13, 0xd2, 0x46, 0x0f, 0x2a, 0xdb, 0x4e, 0x42, 0x6e, 0x39, 0x07, 0xd9, 0xbc, 0xb7, 0x97, 0x38, - 0x18, 0x4b, 0x3c, 0xf3, 0xe5, 0xe4, 0xa9, 0xf3, 0x32, 0x36, 0x22, 0x91, 0x31, 0x4f, 0x60, 0xef, - 0xd5, 0xdb, 0xc0, 0x17, 0x01, 0xe2, 0xd8, 0x5f, 0x09, 0xa8, 0xf2, 0xe5, 0x0a, 0x4f, 0xd1, 0x34, - 0xcf, 0x62, 0xe3, 0xaa, 0xc0, 0x60, 0x8d, 0x0a, 0xd5, 0x60, 0xaa, 0x15, 0x85, 0x09, 0xb7, 0xbb, - 0xd5, 0xb8, 0x8f, 0xc2, 0xa0, 0x19, 0x9c, 0x54, 0xcf, 0xe0, 0x71, 0x47, 0x09, 0xf4, 0x02, 0x8c, - 0x8a, 0x80, 0xa5, 0x7a, 0x18, 0xfa, 0xc2, 0x4a, 0xa3, 0x6e, 0xbc, 0x1b, 0x29, 0x0a, 0xeb, 0x74, - 0x5a, 0x31, 0x66, 0xcc, 0x1b, 0xce, 0x2d, 0xc6, 0x0d, 0x7a, 0x1a, 0x5d, 0x26, 0x87, 0xc6, 0x48, - 0x5f, 0x39, 0x34, 0x52, 0xbb, 0x55, 0xa5, 0xef, 0xfb, 0x0b, 0xe8, 0x69, 0xe9, 0xf9, 0x46, 0x19, - 0x86, 0xf8, 0x50, 0x9c, 0x82, 0x2a, 0xb6, 0x2a, 0x6c, 0x37, 0x5d, 0x32, 0x0a, 0xf0, 0xb6, 0xcc, - 0xd7, 0x9c, 0xc4, 0xe1, 0x62, 0x48, 0xad, 0x86, 0xd4, 0xca, 0x83, 0xe6, 0x8d, 0xf5, 0x32, 0x9b, - 0x31, 0x4e, 0x00, 0xe7, 0xa1, 0xad, 0x9e, 0x77, 0x00, 0x62, 0xf6, 0x3e, 0x2d, 0xe5, 0x21, 0x72, - 0x53, 0x3c, 0xd9, 0xa5, 0xf6, 0x86, 0x22, 0xe6, 0x6d, 0x48, 0xa7, 0xa0, 0x42, 0x60, 0x8d, 0xe3, - 0xec, 0x8b, 0x50, 0x51, 0xc4, 0xbd, 0x4e, 0x72, 0x63, 0xba, 0xf0, 0xfa, 0x0c, 0x4c, 0x66, 0xea, - 0x3a, 0xd6, 0x41, 0xf0, 0x37, 0x2d, 0x98, 0xe4, 0x4d, 0x5e, 0x09, 0xf6, 0xc5, 0x62, 0x7f, 0x1f, - 0x66, 0xfc, 0x9c, 0x45, 0x27, 0x46, 0xb4, 0xff, 0x45, 0xaa, 0x0e, 0x7e, 0x79, 0x58, 0x9c, 0x5b, - 0x07, 0x3d, 0xfc, 0xf3, 0x97, 0xb5, 0x1d, 0x5f, 0x78, 0x20, 0x8f, 0xf1, 0xdc, 0xe2, 0x1c, 0x86, - 0x15, 0xd6, 0xfe, 0xae, 0x05, 0xd3, 0xbc, 0xe5, 0x57, 0xc8, 0x81, 0x3a, 0xe4, 0x7c, 0x98, 0x6d, - 0x17, 0xa9, 0xd3, 0x4b, 0x05, 0xa9, 0xd3, 0xf5, 0x4f, 0x2b, 0x77, 0xfd, 0xb4, 0x5f, 0xb5, 0x40, - 0xcc, 0xc0, 0x53, 0x50, 0xe7, 0x7f, 0xdc, 0x54, 0xe7, 0x67, 0x8b, 0x27, 0x75, 0x81, 0x1e, 0xff, - 0x67, 0x16, 0x4c, 0x71, 0x82, 0xf4, 0xf2, 0xe2, 0x43, 0x1d, 0x87, 0x7e, 0xde, 0xf3, 0x51, 0x0f, - 0xa8, 0xe6, 0x7f, 0x94, 0x31, 0x58, 0x03, 0x5d, 0x07, 0xcb, 0x95, 0x0b, 0xe8, 0x18, 0xef, 0x54, - 0x1d, 0x3b, 0xdb, 0x9f, 0xfd, 0xc7, 0x16, 0x20, 0x5e, 0x4d, 0xf6, 0x49, 0x73, 0xbe, 0xf5, 0x69, - 0x07, 0xfa, 0x54, 0xd4, 0x28, 0x0c, 0xd6, 0xa8, 0x4e, 0xa4, 0x7b, 0x32, 0x37, 0x50, 0xe5, 0xde, - 0x37, 0x50, 0xc7, 0xe8, 0xd1, 0xbf, 0x3a, 0x00, 0x59, 0x77, 0x47, 0x74, 0x13, 0xc6, 0x9a, 0x4e, - 0xcb, 0xd9, 0xf4, 0x7c, 0x2f, 0xf1, 0x48, 0xdc, 0xed, 0xea, 0x7a, 0x59, 0xa3, 0x13, 0xd7, 0x3d, - 0x1a, 0x04, 0x1b, 0x7c, 0xd0, 0x3c, 0x40, 0x2b, 0xf2, 0xf6, 0x3d, 0x9f, 0x6c, 0xb3, 0x13, 0x0d, - 0x8b, 0x79, 0xe0, 0xf7, 0xb1, 0x12, 0x8a, 0x35, 0x8a, 0x1c, 0x1f, 0xf9, 0xf2, 0xbd, 0xf3, 0x91, - 0x1f, 0x38, 0xa6, 0x8f, 0xfc, 0x60, 0x5f, 0x3e, 0xf2, 0x18, 0xee, 0x93, 0x7b, 0x37, 0xfd, 0xbf, - 0xea, 0xf9, 0x44, 0x28, 0x6c, 0x3c, 0x12, 0x62, 0xf6, 0xce, 0xe1, 0xdc, 0x7d, 0x38, 0x97, 0x02, - 0x17, 0x94, 0x44, 0x3f, 0x09, 0x55, 0xc7, 0xf7, 0xc3, 0x5b, 0xaa, 0xd7, 0x56, 0xe2, 0xa6, 0xe3, - 0xa7, 0xc9, 0x6f, 0x47, 0x96, 0x1e, 0xba, 0x73, 0x38, 0x57, 0x5d, 0x2c, 0xa0, 0xc1, 0x85, 0xa5, - 0xed, 0x5d, 0x38, 0xd3, 0x20, 0x91, 0x7c, 0xfa, 0x4e, 0x2d, 0xb1, 0x0d, 0xa8, 0x44, 0x19, 0xa1, - 0xd2, 0x57, 0xb8, 0xbc, 0x96, 0x98, 0x4c, 0x0a, 0x91, 0x94, 0x91, 0xfd, 0xa7, 0x16, 0x0c, 0x0b, - 0x17, 0xca, 0x53, 0xd0, 0x65, 0x16, 0x0d, 0xb3, 0xd2, 0x5c, 0xbe, 0xe0, 0x65, 0x8d, 0x29, 0x34, - 0x28, 0xad, 0x65, 0x0c, 0x4a, 0x8f, 0x74, 0x63, 0xd2, 0xdd, 0x94, 0xf4, 0x0b, 0x65, 0x98, 0x30, - 0xdd, 0x47, 0x4f, 0xa1, 0x0b, 0xae, 0xc1, 0x70, 0x2c, 0x7c, 0x95, 0x4b, 0xc5, 0x3e, 0x6f, 0xd9, - 0x41, 0x4c, 0x6f, 0xc6, 0x85, 0x77, 0xb2, 0x64, 0x92, 0xeb, 0x04, 0x5d, 0xbe, 0x87, 0x4e, 0xd0, - 0xbd, 0x3c, 0x78, 0x07, 0x4e, 0xc2, 0x83, 0xd7, 0xfe, 0x26, 0x13, 0xfe, 0x3a, 0xfc, 0x14, 0xf4, - 0x82, 0x4b, 0xe6, 0x36, 0x61, 0x77, 0x99, 0x59, 0xa2, 0x51, 0x05, 0xfa, 0xc1, 0x3f, 0xb2, 0x60, - 0x54, 0x10, 0x9e, 0x42, 0xb3, 0x7f, 0xc2, 0x6c, 0xf6, 0x83, 0x5d, 0x9a, 0x5d, 0xd0, 0xde, 0xbf, - 0x5d, 0x52, 0xed, 0xad, 0x87, 0x51, 0xd2, 0x57, 0x32, 0xf4, 0x11, 0x7a, 0x1a, 0x0c, 0x9b, 0xa1, - 0x2f, 0x36, 0xf3, 0x87, 0xd2, 0x60, 0x38, 0x0e, 0x3f, 0xd2, 0x7e, 0x63, 0x45, 0xcd, 0x62, 0xb5, - 0xc2, 0x28, 0x11, 0x1b, 0x68, 0x1a, 0xab, 0x15, 0x46, 0x09, 0x66, 0x18, 0xe4, 0x02, 0x24, 0x4e, - 0xb4, 0x4d, 0x12, 0x0a, 0x13, 0xd1, 0xa3, 0xc5, 0xab, 0xb0, 0x9d, 0x78, 0xfe, 0xbc, 0x17, 0x24, - 0x71, 0x12, 0xcd, 0xaf, 0x05, 0xc9, 0xf5, 0x88, 0x9f, 0x0d, 0xb4, 0xe8, 0x36, 0xc5, 0x0b, 0x6b, - 0x7c, 0x65, 0x78, 0x05, 0xab, 0x63, 0xd0, 0xbc, 0xef, 0xb9, 0x26, 0xe0, 0x58, 0x51, 0xd8, 0x2f, - 0x32, 0x99, 0xcc, 0x3a, 0xe8, 0x78, 0x81, 0x67, 0xdf, 0x1e, 0x51, 0x5d, 0xcb, 0x8c, 0xbd, 0x35, - 0x3d, 0xbc, 0xad, 0xbb, 0x08, 0xa4, 0x15, 0xeb, 0xae, 0xc4, 0x69, 0x0c, 0x1c, 0xfa, 0x6c, 0xc7, - 0x35, 0xe0, 0x33, 0x3d, 0x64, 0xe9, 0x31, 0x2e, 0xfe, 0x58, 0x66, 0x3e, 0x96, 0xc1, 0x6c, 0xad, - 0x9e, 0x4d, 0x57, 0xbf, 0x2c, 0x11, 0x38, 0xa5, 0x41, 0x0b, 0xe2, 0x64, 0xc9, 0xcd, 0x2c, 0x0f, - 0x66, 0x4e, 0x96, 0xf2, 0xf3, 0xb5, 0xa3, 0xe5, 0xb3, 0x30, 0xaa, 0x9e, 0x00, 0xaa, 0xf3, 0x97, - 0x54, 0x2a, 0x5c, 0x97, 0x5a, 0x49, 0xc1, 0x58, 0xa7, 0x41, 0x1b, 0x30, 0x19, 0xf3, 0xf7, 0x89, - 0x64, 0xc4, 0x83, 0xb0, 0x1b, 0x3c, 0x29, 0xaf, 0x0f, 0x1b, 0x26, 0xfa, 0x88, 0x81, 0xf8, 0x62, - 0x95, 0x31, 0x12, 0x59, 0x16, 0xe8, 0x55, 0x98, 0xf0, 0xf5, 0x77, 0x5a, 0xeb, 0xc2, 0xac, 0xa0, - 0x5c, 0xb9, 0x8c, 0x57, 0x5c, 0xeb, 0x38, 0x43, 0x4d, 0x95, 0x00, 0x1d, 0x22, 0x12, 0xe8, 0x38, - 0xc1, 0x36, 0x89, 0xc5, 0x03, 0x26, 0x4c, 0x09, 0xb8, 0x5a, 0x40, 0x83, 0x0b, 0x4b, 0xa3, 0x97, - 0x60, 0x4c, 0x7e, 0xbe, 0x16, 0x01, 0x94, 0x3a, 0x0c, 0x6a, 0x38, 0x6c, 0x50, 0xa2, 0x5b, 0x70, - 0x56, 0xfe, 0xdf, 0x88, 0x9c, 0xad, 0x2d, 0xaf, 0x29, 0x02, 0xb0, 0x46, 0x19, 0x8b, 0x45, 0xe9, - 0x3d, 0xbd, 0x92, 0x47, 0x74, 0x74, 0x38, 0x77, 0x5e, 0xf4, 0x5a, 0x2e, 0x9e, 0x0d, 0x62, 0x3e, - 0x7f, 0xb4, 0x0e, 0x67, 0x76, 0x88, 0xe3, 0x27, 0x3b, 0xcb, 0x3b, 0xa4, 0xb9, 0x2b, 0x17, 0x11, - 0x8b, 0x2b, 0xd2, 0xdc, 0xec, 0x2e, 0x77, 0x92, 0xe0, 0xbc, 0x72, 0xe8, 0x6d, 0xa8, 0xb6, 0xda, - 0x9b, 0xbe, 0x17, 0xef, 0x5c, 0x0b, 0x13, 0x76, 0x63, 0xa9, 0x5e, 0xd0, 0x11, 0x01, 0x48, 0x2a, - 0xa6, 0xaa, 0x5e, 0x40, 0x87, 0x0b, 0x39, 0xa0, 0xf7, 0xe1, 0x6c, 0x66, 0x32, 0xf0, 0x47, 0x99, - 0x44, 0xa0, 0xd2, 0x13, 0xf9, 0xcb, 0x29, 0xa7, 0x00, 0x0f, 0x8b, 0xcb, 0x45, 0xe1, 0xfc, 0x2a, - 0x3e, 0xd8, 0x3d, 0xf6, 0x7b, 0xb4, 0xb0, 0xa6, 0xdd, 0xa0, 0xcf, 0xc1, 0x98, 0x3e, 0x8b, 0xc4, - 0x06, 0xf3, 0x58, 0xaf, 0x37, 0x89, 0x85, 0x6e, 0xa4, 0x66, 0x94, 0x8e, 0xc3, 0x06, 0x47, 0x9b, - 0x40, 0xfe, 0xf7, 0xa1, 0xab, 0x30, 0xd2, 0xf4, 0x3d, 0x12, 0x24, 0x6b, 0xf5, 0x6e, 0x81, 0xb3, - 0xcb, 0x82, 0x46, 0x74, 0x98, 0xc8, 0xdf, 0xc4, 0x61, 0x58, 0x71, 0xb0, 0x7f, 0xa7, 0x04, 0x73, - 0x3d, 0x52, 0x78, 0x65, 0x6c, 0x80, 0x56, 0x5f, 0x36, 0xc0, 0x45, 0xf9, 0x1e, 0xd0, 0xb5, 0xcc, - 0xf9, 0x33, 0xf3, 0xd6, 0x4f, 0x7a, 0x0a, 0xcd, 0xd2, 0xf7, 0xed, 0xfe, 0xa6, 0x9b, 0x11, 0x07, - 0x7a, 0x7a, 0x01, 0xd6, 0x75, 0x7b, 0xf0, 0x60, 0xff, 0x1a, 0x7d, 0xa1, 0x29, 0xd8, 0xfe, 0x66, - 0x09, 0xce, 0xaa, 0x2e, 0xfc, 0xd1, 0xed, 0xb8, 0x1b, 0x9d, 0x1d, 0x77, 0x02, 0x86, 0x74, 0xfb, - 0x3a, 0x0c, 0x35, 0x0e, 0xe2, 0x66, 0xe2, 0xf7, 0xa1, 0x00, 0x3d, 0x6a, 0x2c, 0xd0, 0x74, 0x9b, - 0x66, 0x4f, 0xfa, 0x89, 0xf5, 0x6a, 0xff, 0x25, 0x0b, 0x26, 0x37, 0x96, 0xeb, 0x8d, 0xb0, 0xb9, - 0x4b, 0x92, 0x45, 0x6e, 0x26, 0xc2, 0x42, 0xff, 0xb1, 0xee, 0x52, 0xaf, 0xc9, 0xd3, 0x98, 0xce, - 0xc3, 0xc0, 0x4e, 0x18, 0x27, 0xd9, 0xcb, 0x92, 0xcb, 0x61, 0x9c, 0x60, 0x86, 0xb1, 0xff, 0xd0, - 0x82, 0x41, 0xf6, 0x8a, 0x5d, 0xaf, 0xd7, 0x0e, 0xfb, 0xf9, 0x2e, 0xf4, 0x02, 0x0c, 0x91, 0xad, - 0x2d, 0xd2, 0x4c, 0xc4, 0xa8, 0xca, 0x88, 0x9c, 0xa1, 0x15, 0x06, 0xa5, 0x9b, 0x3e, 0xab, 0x8c, - 0xff, 0xc5, 0x82, 0x18, 0x7d, 0x16, 0x2a, 0x89, 0xb7, 0x47, 0x16, 0x5d, 0x57, 0xdc, 0x53, 0x1c, - 0xcf, 0x25, 0x4d, 0x29, 0x21, 0x1b, 0x92, 0x09, 0x4e, 0xf9, 0xd9, 0x3f, 0x5f, 0x02, 0x48, 0x23, - 0xf7, 0x7a, 0x7d, 0xe6, 0x52, 0xc7, 0xa3, 0x8e, 0x8f, 0xe5, 0x3c, 0xea, 0x88, 0x52, 0x86, 0x39, - 0x4f, 0x3a, 0xaa, 0xae, 0x2a, 0xf7, 0xd5, 0x55, 0x03, 0xc7, 0xe9, 0xaa, 0x65, 0x98, 0x4e, 0x23, - 0x0f, 0xcd, 0x30, 0x6c, 0x96, 0xae, 0x77, 0x23, 0x8b, 0xc4, 0x9d, 0xf4, 0xf6, 0x17, 0x2d, 0x10, - 0x6e, 0xca, 0x7d, 0x4c, 0xe8, 0xb7, 0xe4, 0xfb, 0x6b, 0x46, 0x5e, 0xc1, 0xf3, 0xc5, 0x7e, 0xdb, - 0x22, 0x9b, 0xa0, 0xda, 0x40, 0x8c, 0x1c, 0x82, 0x06, 0x2f, 0xfb, 0xaf, 0x97, 0x60, 0x94, 0xa3, - 0x59, 0xce, 0xba, 0x3e, 0x5a, 0x73, 0xac, 0x64, 0xcf, 0xec, 0x69, 0x32, 0xca, 0x58, 0xe5, 0x04, - 0xd6, 0x9f, 0x26, 0x93, 0x08, 0x9c, 0xd2, 0xa0, 0x27, 0x60, 0x38, 0x6e, 0x6f, 0x32, 0xf2, 0x8c, - 0xa7, 0x72, 0x83, 0x83, 0xb1, 0xc4, 0xa3, 0x9f, 0x84, 0x29, 0x5e, 0x2e, 0x0a, 0x5b, 0xce, 0x36, - 0xb7, 0xef, 0x0c, 0xaa, 0x38, 0x95, 0xa9, 0xf5, 0x0c, 0xee, 0xe8, 0x70, 0x6e, 0x26, 0x0b, 0x63, - 0x96, 0xc1, 0x0e, 0x2e, 0x74, 0xc6, 0x4e, 0x65, 0x5d, 0xe0, 0xd1, 0x65, 0x18, 0xe2, 0x02, 0x49, - 0x08, 0x88, 0x2e, 0xf7, 0x3d, 0x9a, 0xe3, 0x3c, 0xf0, 0x57, 0xfa, 0x99, 0x20, 0x13, 0xe5, 0xd1, - 0xdb, 0x30, 0xea, 0x86, 0xb7, 0x82, 0x5b, 0x4e, 0xe4, 0x2e, 0xd6, 0xd7, 0xc4, 0x78, 0xe6, 0xea, - 0x35, 0xb5, 0x94, 0x4c, 0x77, 0xc6, 0x67, 0xb6, 0xcd, 0x14, 0x85, 0x75, 0x76, 0x68, 0x83, 0xa5, - 0x59, 0xe1, 0x6f, 0x07, 0x77, 0x73, 0xed, 0x51, 0xcf, 0x0d, 0x6b, 0x9c, 0xc7, 0x45, 0x2e, 0x16, - 0xf1, 0xf2, 0x70, 0xca, 0xc8, 0xfe, 0xfc, 0x19, 0x30, 0xe6, 0x91, 0x91, 0xec, 0xd9, 0x3a, 0xa1, - 0x64, 0xcf, 0x18, 0x46, 0xc8, 0x5e, 0x2b, 0x39, 0xa8, 0x79, 0x51, 0xb7, 0x2c, 0xff, 0x2b, 0x82, - 0xa6, 0x93, 0xa7, 0xc4, 0x60, 0xc5, 0x27, 0x3f, 0x23, 0x77, 0xf9, 0x43, 0xcc, 0xc8, 0x3d, 0x70, - 0x8a, 0x19, 0xb9, 0xaf, 0xc1, 0xf0, 0xb6, 0x97, 0x60, 0xd2, 0x0a, 0xc5, 0x66, 0x9c, 0x3b, 0x13, - 0x2e, 0x71, 0x92, 0xce, 0xbc, 0xb1, 0x02, 0x81, 0x25, 0x13, 0xf4, 0x9a, 0x5a, 0x03, 0x43, 0xc5, - 0xba, 0x6c, 0xe7, 0xd5, 0x40, 0xee, 0x2a, 0x10, 0x19, 0xb8, 0x87, 0xef, 0x36, 0x03, 0xb7, 0xca, - 0xa0, 0x3d, 0xf2, 0xc1, 0x32, 0x68, 0x1b, 0x19, 0xc6, 0x2b, 0x27, 0x97, 0x61, 0xfc, 0x8b, 0x16, - 0x9c, 0x6d, 0xe5, 0x25, 0xdb, 0x17, 0xb9, 0xb0, 0x5f, 0xe8, 0xfb, 0x35, 0x01, 0xa3, 0x42, 0x76, - 0xa8, 0xc9, 0x25, 0xc3, 0xf9, 0xd5, 0xc9, 0x54, 0xe5, 0xa3, 0x77, 0x9b, 0xaa, 0xfc, 0xde, 0x24, - 0xcd, 0x4e, 0x13, 0x97, 0x8f, 0x7f, 0xe0, 0xc4, 0xe5, 0xaf, 0xa9, 0xc4, 0xe5, 0x5d, 0x92, 0x59, - 0xf0, 0xb4, 0xe4, 0x3d, 0xd3, 0x95, 0x6b, 0x29, 0xc7, 0x27, 0x4f, 0x22, 0xe5, 0xf8, 0x3b, 0xa6, - 0xb0, 0xe7, 0xf9, 0xaf, 0x9f, 0xea, 0x21, 0xec, 0x0d, 0xbe, 0xdd, 0xc5, 0x3d, 0x4f, 0xaf, 0x3e, - 0x7d, 0x57, 0xe9, 0xd5, 0x6f, 0xea, 0x89, 0xcb, 0x51, 0x8f, 0xcc, 0xdc, 0x94, 0xa8, 0xcf, 0x74, - 0xe5, 0x37, 0xf5, 0x2d, 0xe8, 0x4c, 0x31, 0x5f, 0xb5, 0xd3, 0x74, 0xf2, 0xcd, 0xdb, 0x84, 0x3a, - 0xd3, 0xa0, 0xcf, 0x9c, 0x4e, 0x1a, 0xf4, 0xb3, 0x27, 0x9e, 0x06, 0xfd, 0xbe, 0x53, 0x48, 0x83, - 0x7e, 0xff, 0x87, 0x9a, 0x06, 0xbd, 0x7a, 0x6f, 0xd3, 0xa0, 0x3f, 0x70, 0x12, 0x69, 0xd0, 0x6f, - 0x42, 0xa5, 0x25, 0xa3, 0x24, 0xab, 0xb3, 0xc5, 0x43, 0x92, 0x1b, 0x4a, 0xc9, 0x87, 0x44, 0xa1, - 0x70, 0xca, 0x8a, 0xf2, 0x4d, 0xd3, 0xa2, 0x3f, 0xd8, 0xc5, 0x6c, 0x95, 0x67, 0x10, 0xe8, 0x92, - 0x0c, 0xfd, 0x2f, 0x97, 0xe0, 0x5c, 0xf7, 0x79, 0x9d, 0x5a, 0x13, 0xea, 0xa9, 0xf5, 0x3b, 0x63, - 0x4d, 0x60, 0x4a, 0x97, 0x46, 0xd5, 0x77, 0x28, 0xf9, 0x25, 0x98, 0x56, 0xce, 0x62, 0xbe, 0xd7, - 0x3c, 0xd0, 0x5e, 0x4c, 0x52, 0xf1, 0x07, 0x8d, 0x2c, 0x01, 0xee, 0x2c, 0x83, 0x16, 0x61, 0xd2, - 0x00, 0xae, 0xd5, 0x84, 0xb2, 0xaf, 0xcc, 0x17, 0x0d, 0x13, 0x8d, 0xb3, 0xf4, 0xf6, 0xd7, 0x2c, - 0xb8, 0xbf, 0x20, 0x37, 0x6a, 0xdf, 0x91, 0xd2, 0x5b, 0x30, 0xd9, 0x32, 0x8b, 0xf6, 0x48, 0xa8, - 0x60, 0x64, 0x60, 0x55, 0x6d, 0xcd, 0x20, 0x70, 0x96, 0xe9, 0xd2, 0x85, 0x6f, 0x7d, 0xef, 0xdc, - 0xc7, 0x7e, 0xff, 0x7b, 0xe7, 0x3e, 0xf6, 0xdd, 0xef, 0x9d, 0xfb, 0xd8, 0xff, 0x7f, 0xe7, 0x9c, - 0xf5, 0xad, 0x3b, 0xe7, 0xac, 0xdf, 0xbf, 0x73, 0xce, 0xfa, 0xee, 0x9d, 0x73, 0xd6, 0x1f, 0xdd, - 0x39, 0x67, 0xfd, 0xfc, 0xf7, 0xcf, 0x7d, 0xec, 0xad, 0xd2, 0xfe, 0xb3, 0xff, 0x2f, 0x00, 0x00, - 0xff, 0xff, 0x98, 0xcc, 0x13, 0x07, 0x60, 0xd1, 0x00, 0x00, + // 12346 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x90, 0x24, 0x47, + 0x56, 0xd8, 0x55, 0xf7, 0x7c, 0xf5, 0x9b, 0xef, 0xdc, 0x5d, 0xa9, 0x77, 0x24, 0x6d, 0xaf, 0x4a, + 0x77, 0xd2, 0xea, 0x6b, 0xe6, 0xb4, 0x92, 0x4e, 0xcb, 0xe9, 0x4e, 0x30, 0x33, 0x3d, 0xb3, 0xdb, + 0xda, 0x9d, 0xd9, 0x56, 0xf6, 0xec, 0xee, 0x9d, 0x10, 0xe7, 0xab, 0xe9, 0xce, 0x99, 0x29, 0x4d, + 0x4d, 0x55, 0xab, 0xaa, 0x7a, 0x76, 0x47, 0x01, 0x11, 0xf6, 0x19, 0xf0, 0x07, 0xfc, 0x20, 0x6c, + 0xc2, 0xc6, 0x40, 0xe0, 0x08, 0x1b, 0x07, 0x9c, 0xb1, 0x1d, 0xc6, 0x60, 0xc0, 0x80, 0x6d, 0x8c, + 0x1d, 0x0e, 0xf8, 0x83, 0xc1, 0xfe, 0x71, 0x44, 0x10, 0x1e, 0xc3, 0x40, 0xd8, 0xc1, 0x0f, 0x3b, + 0x6c, 0xf3, 0x8b, 0x31, 0x36, 0x8e, 0xfc, 0xac, 0xcc, 0xea, 0xaa, 0xee, 0x9e, 0xd5, 0xec, 0x48, + 0x10, 0xf7, 0xaf, 0x3b, 0xdf, 0xcb, 0x97, 0x59, 0xf9, 0xf1, 0xf2, 0xe5, 0xcb, 0xf7, 0x01, 0x6f, + 0xee, 0x5e, 0x8b, 0xe6, 0xdd, 0x60, 0x61, 0xb7, 0xb3, 0x49, 0x42, 0x9f, 0xc4, 0x24, 0x5a, 0xd8, + 0x27, 0x7e, 0x2b, 0x08, 0x17, 0x04, 0xc0, 0x69, 0xbb, 0x0b, 0xcd, 0x20, 0x24, 0x0b, 0xfb, 0xaf, + 0x2c, 0x6c, 0x13, 0x9f, 0x84, 0x4e, 0x4c, 0x5a, 0xf3, 0xed, 0x30, 0x88, 0x03, 0x84, 0x38, 0xce, + 0xbc, 0xd3, 0x76, 0xe7, 0x29, 0xce, 0xfc, 0xfe, 0x2b, 0x73, 0x2f, 0x6f, 0xbb, 0xf1, 0x4e, 0x67, + 0x73, 0xbe, 0x19, 0xec, 0x2d, 0x6c, 0x07, 0xdb, 0xc1, 0x02, 0x43, 0xdd, 0xec, 0x6c, 0xb1, 0x7f, + 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xad, 0x25, 0xcd, 0x90, 0x07, 0x31, 0xf1, 0x23, 0x37, 0xf0, + 0xa3, 0x97, 0x9d, 0xb6, 0x1b, 0x91, 0x70, 0x9f, 0x84, 0x0b, 0xed, 0xdd, 0x6d, 0x0a, 0x8b, 0x4c, + 0x84, 0x85, 0xfd, 0x57, 0x36, 0x49, 0xec, 0x74, 0xf5, 0x68, 0xee, 0xb5, 0x84, 0xdc, 0x9e, 0xd3, + 0xdc, 0x71, 0x7d, 0x12, 0x1e, 0x48, 0x1a, 0x0b, 0x21, 0x89, 0x82, 0x4e, 0xd8, 0x24, 0x27, 0xaa, + 0x15, 0x2d, 0xec, 0x91, 0xd8, 0xc9, 0xf8, 0xfa, 0xb9, 0x85, 0xbc, 0x5a, 0x61, 0xc7, 0x8f, 0xdd, + 0xbd, 0xee, 0x66, 0x3e, 0xd7, 0xaf, 0x42, 0xd4, 0xdc, 0x21, 0x7b, 0x4e, 0x57, 0xbd, 0x57, 0xf3, + 0xea, 0x75, 0x62, 0xd7, 0x5b, 0x70, 0xfd, 0x38, 0x8a, 0xc3, 0x74, 0x25, 0xfb, 0x1b, 0x16, 0x5c, + 0x5e, 0xbc, 0xd7, 0x58, 0xf1, 0x9c, 0x28, 0x76, 0x9b, 0x4b, 0x5e, 0xd0, 0xdc, 0x6d, 0xc4, 0x41, + 0x48, 0xee, 0x06, 0x5e, 0x67, 0x8f, 0x34, 0xd8, 0x40, 0xa0, 0x97, 0x60, 0x6c, 0x9f, 0xfd, 0xaf, + 0x55, 0xcb, 0xd6, 0x65, 0xeb, 0x4a, 0x69, 0x69, 0xe6, 0xd7, 0x0e, 0x2b, 0x9f, 0x3a, 0x3a, 0xac, + 0x8c, 0xdd, 0x15, 0xe5, 0x58, 0x61, 0xa0, 0x67, 0x61, 0x64, 0x2b, 0xda, 0x38, 0x68, 0x93, 0x72, + 0x81, 0xe1, 0x4e, 0x09, 0xdc, 0x91, 0xd5, 0x06, 0x2d, 0xc5, 0x02, 0x8a, 0x16, 0xa0, 0xd4, 0x76, + 0xc2, 0xd8, 0x8d, 0xdd, 0xc0, 0x2f, 0x17, 0x2f, 0x5b, 0x57, 0x86, 0x97, 0x66, 0x05, 0x6a, 0xa9, + 0x2e, 0x01, 0x38, 0xc1, 0xa1, 0xdd, 0x08, 0x89, 0xd3, 0xba, 0xed, 0x7b, 0x07, 0xe5, 0xa1, 0xcb, + 0xd6, 0x95, 0xb1, 0xa4, 0x1b, 0x58, 0x94, 0x63, 0x85, 0x61, 0xff, 0x70, 0x01, 0xc6, 0x16, 0xb7, + 0xb6, 0x5c, 0xdf, 0x8d, 0x0f, 0xd0, 0x5d, 0x98, 0xf0, 0x83, 0x16, 0x91, 0xff, 0xd9, 0x57, 0x8c, + 0x5f, 0xbd, 0x3c, 0xdf, 0xbd, 0x32, 0xe7, 0xd7, 0x35, 0xbc, 0xa5, 0x99, 0xa3, 0xc3, 0xca, 0x84, + 0x5e, 0x82, 0x0d, 0x3a, 0x08, 0xc3, 0x78, 0x3b, 0x68, 0x29, 0xb2, 0x05, 0x46, 0xb6, 0x92, 0x45, + 0xb6, 0x9e, 0xa0, 0x2d, 0x4d, 0x1f, 0x1d, 0x56, 0xc6, 0xb5, 0x02, 0xac, 0x13, 0x41, 0x9b, 0x30, + 0x4d, 0xff, 0xfa, 0xb1, 0xab, 0xe8, 0x16, 0x19, 0xdd, 0x67, 0xf2, 0xe8, 0x6a, 0xa8, 0x4b, 0xe7, + 0x8e, 0x0e, 0x2b, 0xd3, 0xa9, 0x42, 0x9c, 0x26, 0x68, 0x7f, 0x08, 0x53, 0x8b, 0x71, 0xec, 0x34, + 0x77, 0x48, 0x8b, 0xcf, 0x20, 0x7a, 0x0d, 0x86, 0x7c, 0x67, 0x8f, 0x88, 0xf9, 0xbd, 0x2c, 0x06, + 0x76, 0x68, 0xdd, 0xd9, 0x23, 0xc7, 0x87, 0x95, 0x99, 0x3b, 0xbe, 0xfb, 0x41, 0x47, 0xac, 0x0a, + 0x5a, 0x86, 0x19, 0x36, 0xba, 0x0a, 0xd0, 0x22, 0xfb, 0x6e, 0x93, 0xd4, 0x9d, 0x78, 0x47, 0xcc, + 0x37, 0x12, 0x75, 0xa1, 0xaa, 0x20, 0x58, 0xc3, 0xb2, 0x1f, 0x40, 0x69, 0x71, 0x3f, 0x70, 0x5b, + 0xf5, 0xa0, 0x15, 0xa1, 0x5d, 0x98, 0x6e, 0x87, 0x64, 0x8b, 0x84, 0xaa, 0xa8, 0x6c, 0x5d, 0x2e, + 0x5e, 0x19, 0xbf, 0x7a, 0x25, 0xf3, 0x63, 0x4d, 0xd4, 0x15, 0x3f, 0x0e, 0x0f, 0x96, 0x1e, 0x17, + 0xed, 0x4d, 0xa7, 0xa0, 0x38, 0x4d, 0xd9, 0xfe, 0x77, 0x05, 0xb8, 0xb0, 0xf8, 0x61, 0x27, 0x24, + 0x55, 0x37, 0xda, 0x4d, 0xaf, 0xf0, 0x96, 0x1b, 0xed, 0xae, 0x27, 0x23, 0xa0, 0x96, 0x56, 0x55, + 0x94, 0x63, 0x85, 0x81, 0x5e, 0x86, 0x51, 0xfa, 0xfb, 0x0e, 0xae, 0x89, 0x4f, 0x3e, 0x27, 0x90, + 0xc7, 0xab, 0x4e, 0xec, 0x54, 0x39, 0x08, 0x4b, 0x1c, 0xb4, 0x06, 0xe3, 0x4d, 0xb6, 0x21, 0xb7, + 0xd7, 0x82, 0x16, 0x61, 0x93, 0x59, 0x5a, 0x7a, 0x91, 0xa2, 0x2f, 0x27, 0xc5, 0xc7, 0x87, 0x95, + 0x32, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, 0x6a, 0x7f, 0x0d, 0x31, 0x4a, 0x90, + 0xb1, 0xb7, 0xae, 0x68, 0x5b, 0x65, 0x98, 0x6d, 0x95, 0x89, 0xec, 0x6d, 0x82, 0x5e, 0x81, 0xa1, + 0x5d, 0xd7, 0x6f, 0x95, 0x47, 0x18, 0xad, 0xa7, 0xe8, 0x9c, 0xdf, 0x74, 0xfd, 0xd6, 0xf1, 0x61, + 0x65, 0xd6, 0xe8, 0x0e, 0x2d, 0xc4, 0x0c, 0xd5, 0xfe, 0x23, 0x0b, 0x2a, 0x0c, 0xb6, 0xea, 0x7a, + 0xa4, 0x4e, 0xc2, 0xc8, 0x8d, 0x62, 0xe2, 0xc7, 0xc6, 0x80, 0x5e, 0x05, 0x88, 0x48, 0x33, 0x24, + 0xb1, 0x36, 0xa4, 0x6a, 0x61, 0x34, 0x14, 0x04, 0x6b, 0x58, 0x94, 0x21, 0x44, 0x3b, 0x4e, 0xc8, + 0xd6, 0x97, 0x18, 0x58, 0xc5, 0x10, 0x1a, 0x12, 0x80, 0x13, 0x1c, 0x83, 0x21, 0x14, 0xfb, 0x31, + 0x04, 0xf4, 0x45, 0x98, 0x4e, 0x1a, 0x8b, 0xda, 0x4e, 0x53, 0x0e, 0x20, 0xdb, 0x32, 0x0d, 0x13, + 0x84, 0xd3, 0xb8, 0xf6, 0x3f, 0xb4, 0xc4, 0xe2, 0xa1, 0x5f, 0xfd, 0x09, 0xff, 0x56, 0xfb, 0x17, + 0x2c, 0x18, 0x5d, 0x72, 0xfd, 0x96, 0xeb, 0x6f, 0xa3, 0xaf, 0xc2, 0x18, 0x3d, 0x9b, 0x5a, 0x4e, + 0xec, 0x08, 0xbe, 0xf7, 0x59, 0x6d, 0x6f, 0xa9, 0xa3, 0x62, 0xbe, 0xbd, 0xbb, 0x4d, 0x0b, 0xa2, + 0x79, 0x8a, 0x4d, 0x77, 0xdb, 0xed, 0xcd, 0xf7, 0x49, 0x33, 0x5e, 0x23, 0xb1, 0x93, 0x7c, 0x4e, + 0x52, 0x86, 0x15, 0x55, 0x74, 0x13, 0x46, 0x62, 0x27, 0xdc, 0x26, 0xb1, 0x60, 0x80, 0x99, 0x8c, + 0x8a, 0xd7, 0xc4, 0x74, 0x47, 0x12, 0xbf, 0x49, 0x92, 0x63, 0x61, 0x83, 0x55, 0xc5, 0x82, 0x84, + 0xfd, 0x53, 0x16, 0x5c, 0x5c, 0x6e, 0xd4, 0x72, 0xd6, 0xd5, 0xb3, 0x30, 0xd2, 0x0a, 0xdd, 0x7d, + 0x12, 0x8a, 0x71, 0x56, 0x54, 0xaa, 0xac, 0x14, 0x0b, 0x28, 0xba, 0x06, 0x13, 0xfc, 0x40, 0xba, + 0xe1, 0xf8, 0x2d, 0x4f, 0x0e, 0xf1, 0x79, 0x81, 0x3d, 0x71, 0x57, 0x83, 0x61, 0x03, 0xf3, 0x84, + 0x03, 0xdd, 0x84, 0x89, 0x65, 0xa7, 0xed, 0x6c, 0xba, 0x9e, 0x1b, 0xbb, 0x24, 0x42, 0xcf, 0x41, + 0xd1, 0x69, 0xb5, 0x18, 0x0f, 0x2b, 0x2d, 0x5d, 0x38, 0x3a, 0xac, 0x14, 0x17, 0x5b, 0x74, 0x33, + 0x81, 0xc2, 0x3a, 0xc0, 0x14, 0x03, 0xbd, 0x00, 0x43, 0xad, 0x30, 0x68, 0x97, 0x0b, 0x0c, 0xf3, + 0x31, 0xba, 0xef, 0xaa, 0x61, 0xd0, 0x4e, 0xa1, 0x32, 0x1c, 0xfb, 0x57, 0x0a, 0xf0, 0xe4, 0x32, + 0x69, 0xef, 0xac, 0x36, 0x72, 0x46, 0xe5, 0x0a, 0x8c, 0xed, 0x05, 0xbe, 0x1b, 0x07, 0x61, 0x24, + 0x9a, 0x66, 0xdb, 0x7d, 0x4d, 0x94, 0x61, 0x05, 0x45, 0x97, 0x61, 0xa8, 0x9d, 0xb0, 0xea, 0x09, + 0xc9, 0xe6, 0x19, 0x93, 0x66, 0x10, 0x8a, 0xd1, 0x89, 0x48, 0x28, 0xd8, 0x94, 0xc2, 0xb8, 0x13, + 0x91, 0x10, 0x33, 0x48, 0xb2, 0xde, 0xe9, 0x4e, 0x10, 0x7b, 0x28, 0xb5, 0xde, 0x29, 0x04, 0x6b, + 0x58, 0xa8, 0x0e, 0x25, 0xfe, 0x0f, 0x93, 0x2d, 0xc6, 0x91, 0x72, 0x56, 0x49, 0x43, 0x22, 0x89, + 0x55, 0x32, 0xc9, 0x36, 0x84, 0x2c, 0xc4, 0x09, 0x11, 0x63, 0x9e, 0x46, 0xfa, 0xce, 0xd3, 0x2f, + 0x15, 0x00, 0xf1, 0x21, 0xfc, 0x33, 0x36, 0x70, 0x77, 0xba, 0x07, 0x2e, 0xf3, 0x68, 0xbc, 0x15, + 0x34, 0x1d, 0x2f, 0xbd, 0xc7, 0x4e, 0x6b, 0xf4, 0x7e, 0xc8, 0x02, 0xb4, 0xec, 0xfa, 0x2d, 0x12, + 0x9e, 0x81, 0x5c, 0x78, 0xb2, 0x0d, 0x78, 0x0b, 0xa6, 0x96, 0x3d, 0x97, 0xf8, 0x71, 0xad, 0xbe, + 0x1c, 0xf8, 0x5b, 0xee, 0x36, 0xfa, 0x3c, 0x4c, 0x51, 0x31, 0x39, 0xe8, 0xc4, 0x0d, 0xd2, 0x0c, + 0x7c, 0x26, 0x51, 0x50, 0xe1, 0x12, 0x1d, 0x1d, 0x56, 0xa6, 0x36, 0x0c, 0x08, 0x4e, 0x61, 0xda, + 0xbf, 0x43, 0x3f, 0x34, 0xd8, 0x6b, 0x07, 0x3e, 0xf1, 0xe3, 0xe5, 0xc0, 0x6f, 0x71, 0xc9, 0xf3, + 0xf3, 0x30, 0x14, 0xd3, 0x8e, 0xf3, 0x8f, 0x7c, 0x56, 0x4e, 0x2d, 0xed, 0xee, 0xf1, 0x61, 0xe5, + 0xb1, 0xee, 0x1a, 0xec, 0x83, 0x58, 0x1d, 0xf4, 0x2d, 0x30, 0x12, 0xc5, 0x4e, 0xdc, 0x89, 0xc4, + 0x67, 0x3f, 0x2d, 0x3f, 0xbb, 0xc1, 0x4a, 0x8f, 0x0f, 0x2b, 0xd3, 0xaa, 0x1a, 0x2f, 0xc2, 0xa2, + 0x02, 0x7a, 0x1e, 0x46, 0xf7, 0x48, 0x14, 0x39, 0xdb, 0x52, 0x68, 0x98, 0x16, 0x75, 0x47, 0xd7, + 0x78, 0x31, 0x96, 0x70, 0xf4, 0x0c, 0x0c, 0x93, 0x30, 0x0c, 0x42, 0xb1, 0xaa, 0x26, 0x05, 0xe2, + 0xf0, 0x0a, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x07, 0x0b, 0xa6, 0x55, 0x5f, 0x79, 0x5b, 0x67, 0x70, + 0x3a, 0xbc, 0x0b, 0xd0, 0x94, 0x1f, 0x18, 0x31, 0x7e, 0x37, 0x7e, 0xf5, 0xd9, 0xac, 0x25, 0xdc, + 0x3d, 0x8c, 0x09, 0x65, 0x55, 0x14, 0x61, 0x8d, 0x9a, 0xfd, 0x2f, 0x2d, 0x38, 0x97, 0xfa, 0xa2, + 0x5b, 0x6e, 0x14, 0xa3, 0xf7, 0xba, 0xbe, 0x6a, 0x7e, 0xb0, 0xaf, 0xa2, 0xb5, 0xd9, 0x37, 0xa9, + 0x35, 0x27, 0x4b, 0xb4, 0x2f, 0xba, 0x01, 0xc3, 0x6e, 0x4c, 0xf6, 0xe4, 0xc7, 0x3c, 0xd3, 0xf3, + 0x63, 0x78, 0xaf, 0x92, 0x19, 0xa9, 0xd1, 0x9a, 0x98, 0x13, 0xb0, 0xff, 0x97, 0x05, 0x25, 0xbe, + 0x6c, 0xd7, 0x9c, 0xf6, 0x19, 0xcc, 0x45, 0x0d, 0x86, 0x18, 0x75, 0xde, 0xf1, 0xe7, 0xb2, 0x3b, + 0x2e, 0xba, 0x33, 0x4f, 0x45, 0x3f, 0x2e, 0x62, 0x2b, 0x66, 0x46, 0x8b, 0x30, 0x23, 0x31, 0xf7, + 0x06, 0x94, 0x14, 0x02, 0x9a, 0x81, 0xe2, 0x2e, 0xe1, 0xd7, 0xaa, 0x12, 0xa6, 0x3f, 0xd1, 0x79, + 0x18, 0xde, 0x77, 0xbc, 0x8e, 0xd8, 0xec, 0x98, 0xff, 0xf9, 0x7c, 0xe1, 0x9a, 0x65, 0xff, 0x22, + 0xdb, 0x63, 0xa2, 0x91, 0x15, 0x7f, 0x5f, 0x30, 0x93, 0x0f, 0xe1, 0xbc, 0x97, 0xc1, 0xc3, 0xc4, + 0x40, 0x0c, 0xce, 0xf3, 0x9e, 0x14, 0x7d, 0x3d, 0x9f, 0x05, 0xc5, 0x99, 0x6d, 0xd0, 0x63, 0x20, + 0x68, 0xd3, 0x15, 0xe5, 0x78, 0xac, 0xbf, 0x42, 0x5c, 0xbe, 0x2d, 0xca, 0xb0, 0x82, 0x52, 0x06, + 0x71, 0x5e, 0x75, 0xfe, 0x26, 0x39, 0x68, 0x10, 0x8f, 0x34, 0xe3, 0x20, 0xfc, 0x58, 0xbb, 0xff, + 0x14, 0x1f, 0x7d, 0xce, 0x5f, 0xc6, 0x05, 0x81, 0xe2, 0x4d, 0x72, 0xc0, 0xa7, 0x42, 0xff, 0xba, + 0x62, 0xcf, 0xaf, 0xfb, 0x69, 0x0b, 0x26, 0xd5, 0xd7, 0x9d, 0xc1, 0x46, 0x5a, 0x32, 0x37, 0xd2, + 0x53, 0x3d, 0xd7, 0x63, 0xce, 0x16, 0xfa, 0x53, 0xc6, 0x02, 0x04, 0x4e, 0x3d, 0x0c, 0xe8, 0xd0, + 0x50, 0x9e, 0xfd, 0x71, 0x4e, 0xc8, 0x20, 0xdf, 0x75, 0x93, 0x1c, 0x6c, 0x04, 0x54, 0x7c, 0xc8, + 0xfe, 0x2e, 0x63, 0xd6, 0x86, 0x7a, 0xce, 0xda, 0xcf, 0x16, 0xe0, 0x82, 0x1a, 0x01, 0xe3, 0x80, + 0xfe, 0xb3, 0x3e, 0x06, 0xaf, 0xc0, 0x78, 0x8b, 0x6c, 0x39, 0x1d, 0x2f, 0x56, 0x37, 0xe7, 0x61, + 0xae, 0x3d, 0xa9, 0x26, 0xc5, 0x58, 0xc7, 0x39, 0xc1, 0xb0, 0xfd, 0xf8, 0x38, 0xe3, 0xbd, 0xb1, + 0x43, 0x57, 0x30, 0x95, 0xde, 0x34, 0xfd, 0xc7, 0x84, 0xae, 0xff, 0x10, 0xba, 0x8e, 0x67, 0x60, + 0xd8, 0xdd, 0xa3, 0x67, 0x71, 0xc1, 0x3c, 0x62, 0x6b, 0xb4, 0x10, 0x73, 0x18, 0xfa, 0x0c, 0x8c, + 0x36, 0x83, 0xbd, 0x3d, 0xc7, 0x6f, 0x95, 0x8b, 0x4c, 0x9e, 0x1c, 0xa7, 0xc7, 0xf5, 0x32, 0x2f, + 0xc2, 0x12, 0x86, 0x9e, 0x84, 0x21, 0x27, 0xdc, 0x8e, 0xca, 0x43, 0x0c, 0x67, 0x8c, 0xb6, 0xb4, + 0x18, 0x6e, 0x47, 0x98, 0x95, 0x52, 0x39, 0xf1, 0x7e, 0x10, 0xee, 0xba, 0xfe, 0x76, 0xd5, 0x0d, + 0x99, 0xd0, 0xa7, 0xc9, 0x89, 0xf7, 0x14, 0x04, 0x6b, 0x58, 0x68, 0x15, 0x86, 0xdb, 0x41, 0x18, + 0x47, 0xe5, 0x11, 0x36, 0xdc, 0x4f, 0xe7, 0x6c, 0x25, 0xfe, 0xb5, 0xf5, 0x20, 0x8c, 0x93, 0x0f, + 0xa0, 0xff, 0x22, 0xcc, 0xab, 0xa3, 0x6f, 0x81, 0x22, 0xf1, 0xf7, 0xcb, 0xa3, 0x8c, 0xca, 0x5c, + 0x16, 0x95, 0x15, 0x7f, 0xff, 0xae, 0x13, 0x26, 0x7c, 0x66, 0xc5, 0xdf, 0xc7, 0xb4, 0x0e, 0xfa, + 0x32, 0x94, 0xa4, 0xee, 0x34, 0x2a, 0x8f, 0xe5, 0x2f, 0x31, 0x2c, 0x90, 0x30, 0xf9, 0xa0, 0xe3, + 0x86, 0x64, 0x8f, 0xf8, 0x71, 0x94, 0xdc, 0x7e, 0x25, 0x34, 0xc2, 0x09, 0x35, 0xf4, 0x65, 0x79, + 0x9d, 0x5b, 0x0b, 0x3a, 0x7e, 0x1c, 0x95, 0x4b, 0xac, 0x7b, 0x99, 0x8a, 0xb6, 0xbb, 0x09, 0x5e, + 0xfa, 0xbe, 0xc7, 0x2b, 0x63, 0x83, 0x14, 0xc2, 0x30, 0xe9, 0xb9, 0xfb, 0xc4, 0x27, 0x51, 0x54, + 0x0f, 0x83, 0x4d, 0x52, 0x06, 0xd6, 0xf3, 0x8b, 0xd9, 0xfa, 0xa7, 0x60, 0x93, 0x2c, 0xcd, 0x1e, + 0x1d, 0x56, 0x26, 0x6f, 0xe9, 0x75, 0xb0, 0x49, 0x02, 0xdd, 0x81, 0x29, 0x2a, 0xa0, 0xba, 0x09, + 0xd1, 0xf1, 0x7e, 0x44, 0x99, 0x74, 0x8a, 0x8d, 0x4a, 0x38, 0x45, 0x04, 0xbd, 0x0d, 0x25, 0xcf, + 0xdd, 0x22, 0xcd, 0x83, 0xa6, 0x47, 0xca, 0x13, 0x8c, 0x62, 0xe6, 0xb6, 0xba, 0x25, 0x91, 0xf8, + 0x05, 0x40, 0xfd, 0xc5, 0x49, 0x75, 0x74, 0x17, 0x1e, 0x8b, 0x49, 0xb8, 0xe7, 0xfa, 0x0e, 0xdd, + 0x0e, 0x42, 0x9e, 0x64, 0x5a, 0xbc, 0x49, 0xb6, 0xde, 0x2e, 0x89, 0xa1, 0x7b, 0x6c, 0x23, 0x13, + 0x0b, 0xe7, 0xd4, 0x46, 0xb7, 0x61, 0x9a, 0xed, 0x84, 0x7a, 0xc7, 0xf3, 0xea, 0x81, 0xe7, 0x36, + 0x0f, 0xca, 0x53, 0x8c, 0xe0, 0x67, 0xa4, 0x9a, 0xae, 0x66, 0x82, 0xe9, 0x8d, 0x37, 0xf9, 0x87, + 0xd3, 0xb5, 0xd1, 0x26, 0x53, 0xdb, 0x74, 0x42, 0x37, 0x3e, 0xa0, 0xeb, 0x97, 0x3c, 0x88, 0xcb, + 0xd3, 0x3d, 0xef, 0x8f, 0x3a, 0xaa, 0xd2, 0xed, 0xe8, 0x85, 0x38, 0x4d, 0x90, 0x6e, 0xed, 0x28, + 0x6e, 0xb9, 0x7e, 0x79, 0x86, 0x71, 0x0c, 0xb5, 0x33, 0x1a, 0xb4, 0x10, 0x73, 0x18, 0x53, 0xd9, + 0xd0, 0x1f, 0xb7, 0x29, 0x07, 0x9d, 0x65, 0x88, 0x89, 0xca, 0x46, 0x02, 0x70, 0x82, 0x43, 0x8f, + 0xe5, 0x38, 0x3e, 0x28, 0x23, 0x86, 0xaa, 0xb6, 0xcb, 0xc6, 0xc6, 0x97, 0x31, 0x2d, 0x47, 0xb7, + 0x60, 0x94, 0xf8, 0xfb, 0xab, 0x61, 0xb0, 0x57, 0x3e, 0x97, 0xbf, 0x67, 0x57, 0x38, 0x0a, 0x67, + 0xe8, 0xc9, 0x05, 0x40, 0x14, 0x63, 0x49, 0x02, 0x3d, 0x80, 0x72, 0xc6, 0x8c, 0xf0, 0x09, 0x38, + 0xcf, 0x26, 0xe0, 0x0b, 0xa2, 0x6e, 0x79, 0x23, 0x07, 0xef, 0xb8, 0x07, 0x0c, 0xe7, 0x52, 0x47, + 0xdf, 0x01, 0x93, 0x7c, 0x43, 0x71, 0x7d, 0x6f, 0x54, 0xbe, 0xc0, 0xbe, 0xe6, 0x72, 0xfe, 0xe6, + 0xe4, 0x88, 0x4b, 0x17, 0x44, 0x87, 0x26, 0xf5, 0xd2, 0x08, 0x9b, 0xd4, 0xec, 0x4d, 0x98, 0x52, + 0x7c, 0x8b, 0x2d, 0x1d, 0x54, 0x81, 0x61, 0xca, 0x90, 0xe5, 0x8d, 0xbd, 0x44, 0x67, 0x8a, 0xe9, + 0xe9, 0x30, 0x2f, 0x67, 0x33, 0xe5, 0x7e, 0x48, 0x96, 0x0e, 0x62, 0xc2, 0x6f, 0x5d, 0x45, 0x6d, + 0xa6, 0x24, 0x00, 0x27, 0x38, 0xf6, 0xff, 0xe3, 0x72, 0x4f, 0xc2, 0x1c, 0x07, 0x38, 0x0e, 0x5e, + 0x82, 0xb1, 0x9d, 0x20, 0x8a, 0x29, 0x36, 0x6b, 0x63, 0x38, 0x91, 0x74, 0x6e, 0x88, 0x72, 0xac, + 0x30, 0xd0, 0x9b, 0x30, 0xd9, 0xd4, 0x1b, 0x10, 0x67, 0x99, 0x1a, 0x02, 0xa3, 0x75, 0x6c, 0xe2, + 0xa2, 0x6b, 0x30, 0xc6, 0x5e, 0x6b, 0x9a, 0x81, 0x27, 0xee, 0x77, 0xf2, 0x40, 0x1e, 0xab, 0x8b, + 0xf2, 0x63, 0xed, 0x37, 0x56, 0xd8, 0xf4, 0xce, 0x4d, 0xbb, 0x50, 0xab, 0x8b, 0x53, 0x44, 0xdd, + 0xb9, 0x6f, 0xb0, 0x52, 0x2c, 0xa0, 0xf6, 0xdf, 0x28, 0x68, 0xa3, 0x4c, 0x6f, 0x2c, 0x04, 0xd5, + 0x61, 0xf4, 0xbe, 0xe3, 0xc6, 0xae, 0xbf, 0x2d, 0xc4, 0x85, 0xe7, 0x7b, 0x1e, 0x29, 0xac, 0xd2, + 0x3d, 0x5e, 0x81, 0x1f, 0x7a, 0xe2, 0x0f, 0x96, 0x64, 0x28, 0xc5, 0xb0, 0xe3, 0xfb, 0x94, 0x62, + 0x61, 0x50, 0x8a, 0x98, 0x57, 0xe0, 0x14, 0xc5, 0x1f, 0x2c, 0xc9, 0xa0, 0xf7, 0x00, 0xe4, 0xb2, + 0x24, 0x2d, 0xf1, 0x4a, 0xf2, 0x52, 0x7f, 0xa2, 0x1b, 0xaa, 0xce, 0xd2, 0x14, 0x3d, 0x52, 0x93, + 0xff, 0x58, 0xa3, 0x67, 0xc7, 0x4c, 0xac, 0xea, 0xee, 0x0c, 0xfa, 0x76, 0xca, 0x09, 0x9c, 0x30, + 0x26, 0xad, 0xc5, 0x58, 0x0c, 0xce, 0x0b, 0x83, 0x49, 0xc5, 0x1b, 0xee, 0x1e, 0xd1, 0xb9, 0x86, + 0x20, 0x82, 0x13, 0x7a, 0xf6, 0xcf, 0x17, 0xa1, 0x9c, 0xd7, 0x5d, 0xba, 0xe8, 0xc8, 0x03, 0x37, + 0x5e, 0xa6, 0xd2, 0x90, 0x65, 0x2e, 0xba, 0x15, 0x51, 0x8e, 0x15, 0x06, 0x9d, 0xfd, 0xc8, 0xdd, + 0x96, 0x97, 0x9a, 0xe1, 0x64, 0xf6, 0x1b, 0xac, 0x14, 0x0b, 0x28, 0xc5, 0x0b, 0x89, 0x13, 0x89, + 0x67, 0x38, 0x6d, 0x95, 0x60, 0x56, 0x8a, 0x05, 0x54, 0xd7, 0x47, 0x0c, 0xf5, 0xd1, 0x47, 0x18, + 0x43, 0x34, 0x7c, 0xba, 0x43, 0x84, 0xbe, 0x02, 0xb0, 0xe5, 0xfa, 0x6e, 0xb4, 0xc3, 0xa8, 0x8f, + 0x9c, 0x98, 0xba, 0x92, 0xa5, 0x56, 0x15, 0x15, 0xac, 0x51, 0x44, 0xaf, 0xc3, 0xb8, 0xda, 0x80, + 0xb5, 0x6a, 0x79, 0xd4, 0x7c, 0xe3, 0x49, 0xb8, 0x51, 0x15, 0xeb, 0x78, 0xf6, 0xfb, 0xe9, 0xf5, + 0x22, 0x76, 0x80, 0x36, 0xbe, 0xd6, 0xa0, 0xe3, 0x5b, 0xe8, 0x3d, 0xbe, 0xf6, 0xaf, 0x16, 0x61, + 0xda, 0x68, 0xac, 0x13, 0x0d, 0xc0, 0xb3, 0xae, 0xd3, 0x73, 0xce, 0x89, 0x89, 0xd8, 0x7f, 0x76, + 0xff, 0xad, 0xa2, 0x9f, 0x85, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x2b, 0x50, 0xf2, 0x9c, 0x88, 0xe9, + 0x36, 0x88, 0xd8, 0x77, 0x83, 0x10, 0x4b, 0xee, 0x11, 0x4e, 0x14, 0x6b, 0x47, 0x0d, 0xa7, 0x9d, + 0x90, 0xa4, 0x07, 0x32, 0x95, 0x7d, 0xe4, 0x3b, 0xaf, 0xea, 0x04, 0x15, 0x90, 0x0e, 0x30, 0x87, + 0xa1, 0x6b, 0x30, 0x11, 0x12, 0xb6, 0x2a, 0x96, 0xa9, 0x28, 0xc7, 0x96, 0xd9, 0x70, 0x22, 0xf3, + 0x61, 0x0d, 0x86, 0x0d, 0xcc, 0x44, 0x94, 0x1f, 0xe9, 0x21, 0xca, 0x3f, 0x0f, 0xa3, 0xec, 0x87, + 0x5a, 0x01, 0x6a, 0x36, 0x6a, 0xbc, 0x18, 0x4b, 0x78, 0x7a, 0xc1, 0x8c, 0x0d, 0xb8, 0x60, 0x5e, + 0x80, 0xa9, 0xaa, 0x43, 0xf6, 0x02, 0x7f, 0xc5, 0x6f, 0xb5, 0x03, 0xd7, 0x8f, 0x51, 0x19, 0x86, + 0xd8, 0xe9, 0xc0, 0xf7, 0xf6, 0x10, 0xa5, 0x80, 0x87, 0xa8, 0x60, 0x6e, 0xff, 0x56, 0x01, 0x26, + 0xab, 0xc4, 0x23, 0x31, 0xe1, 0x57, 0x99, 0x08, 0xad, 0x02, 0xda, 0x0e, 0x9d, 0x26, 0xa9, 0x93, + 0xd0, 0x0d, 0x5a, 0xba, 0xae, 0xb3, 0xc8, 0xde, 0x13, 0xd0, 0xf5, 0x2e, 0x28, 0xce, 0xa8, 0x81, + 0xde, 0x85, 0xc9, 0x76, 0x48, 0x0c, 0x15, 0x9d, 0x95, 0x27, 0x8d, 0xd4, 0x75, 0x44, 0x2e, 0x08, + 0x1b, 0x45, 0xd8, 0x24, 0x85, 0xbe, 0x0d, 0x66, 0x82, 0xb0, 0xbd, 0xe3, 0xf8, 0x55, 0xd2, 0x26, + 0x7e, 0x8b, 0x4a, 0xfa, 0x42, 0x05, 0x71, 0xfe, 0xe8, 0xb0, 0x32, 0x73, 0x3b, 0x05, 0xc3, 0x5d, + 0xd8, 0xe8, 0x5d, 0x98, 0x6d, 0x87, 0x41, 0xdb, 0xd9, 0x66, 0x0b, 0x45, 0x08, 0x34, 0x9c, 0xfb, + 0xbc, 0x74, 0x74, 0x58, 0x99, 0xad, 0xa7, 0x81, 0xc7, 0x87, 0x95, 0x73, 0x6c, 0xa0, 0x68, 0x49, + 0x02, 0xc4, 0xdd, 0x64, 0xec, 0x6d, 0xb8, 0x50, 0x0d, 0xee, 0xfb, 0xf7, 0x9d, 0xb0, 0xb5, 0x58, + 0xaf, 0x69, 0xba, 0x83, 0x75, 0x79, 0x77, 0xe5, 0x6f, 0xd1, 0x99, 0xe7, 0x94, 0x56, 0x93, 0xcb, + 0x2f, 0xab, 0xae, 0x47, 0x72, 0x74, 0x14, 0x7f, 0xbb, 0x60, 0xb4, 0x94, 0xe0, 0xab, 0x67, 0x05, + 0x2b, 0xf7, 0x59, 0xe1, 0x1d, 0x18, 0xdb, 0x72, 0x89, 0xd7, 0xc2, 0x64, 0x4b, 0xcc, 0xcc, 0x73, + 0xf9, 0xcf, 0x6b, 0xab, 0x14, 0x53, 0xea, 0xa4, 0xf8, 0xcd, 0x77, 0x55, 0x54, 0xc6, 0x8a, 0x0c, + 0xda, 0x85, 0x19, 0x79, 0xb5, 0x92, 0x50, 0xb1, 0x89, 0x9f, 0xef, 0x75, 0x5f, 0x33, 0x89, 0xb3, + 0x09, 0xc4, 0x29, 0x32, 0xb8, 0x8b, 0x30, 0xbd, 0xea, 0xee, 0xd1, 0xe3, 0x6a, 0x88, 0x2d, 0x69, + 0x76, 0xd5, 0x65, 0xb7, 0x76, 0x56, 0x6a, 0xff, 0xa8, 0x05, 0x8f, 0x77, 0x8d, 0x8c, 0xd0, 0x5e, + 0x9c, 0xf2, 0x2c, 0xa4, 0xb5, 0x09, 0x85, 0xfe, 0xda, 0x04, 0xfb, 0x1f, 0x59, 0x70, 0x7e, 0x65, + 0xaf, 0x1d, 0x1f, 0x54, 0x5d, 0xf3, 0xe9, 0xe3, 0x0d, 0x18, 0xd9, 0x23, 0x2d, 0xb7, 0xb3, 0x27, + 0x66, 0xae, 0x22, 0x59, 0xfa, 0x1a, 0x2b, 0x3d, 0x3e, 0xac, 0x4c, 0x36, 0xe2, 0x20, 0x74, 0xb6, + 0x09, 0x2f, 0xc0, 0x02, 0x9d, 0x1d, 0x8c, 0xee, 0x87, 0xe4, 0x96, 0xbb, 0xe7, 0xca, 0xe7, 0xd2, + 0x9e, 0x1a, 0xb5, 0x79, 0x39, 0xa0, 0xf3, 0xef, 0x74, 0x1c, 0x3f, 0x76, 0xe3, 0x03, 0xf1, 0xaa, + 0x23, 0x89, 0xe0, 0x84, 0x9e, 0xfd, 0x0d, 0x0b, 0xa6, 0x25, 0x2f, 0x59, 0x6c, 0xb5, 0x42, 0x12, + 0x45, 0x68, 0x0e, 0x0a, 0x6e, 0x5b, 0xf4, 0x12, 0x44, 0x2f, 0x0b, 0xb5, 0x3a, 0x2e, 0xb8, 0x6d, + 0x54, 0x87, 0x12, 0x7f, 0x75, 0x4d, 0x16, 0xd7, 0x40, 0x6f, 0xb7, 0xac, 0x07, 0x1b, 0xb2, 0x26, + 0x4e, 0x88, 0x48, 0xa9, 0x98, 0x9d, 0x43, 0x45, 0xf3, 0x49, 0xe8, 0x86, 0x28, 0xc7, 0x0a, 0x03, + 0x5d, 0x81, 0x31, 0x3f, 0x68, 0xf1, 0x47, 0x70, 0xbe, 0xa7, 0xd9, 0x92, 0x5d, 0x17, 0x65, 0x58, + 0x41, 0xed, 0xef, 0xb7, 0x60, 0x42, 0x7e, 0xd9, 0x80, 0x02, 0x3a, 0xdd, 0x5a, 0x89, 0x70, 0x9e, + 0x6c, 0x2d, 0x2a, 0x60, 0x33, 0x88, 0x21, 0x57, 0x17, 0x4f, 0x22, 0x57, 0xdb, 0x3f, 0x52, 0x80, + 0x29, 0xd9, 0x9d, 0x46, 0x67, 0x33, 0x22, 0x31, 0xda, 0x80, 0x92, 0xc3, 0x87, 0x9c, 0xc8, 0x15, + 0xfb, 0x4c, 0xf6, 0x85, 0xce, 0x98, 0x9f, 0x44, 0xd4, 0x59, 0x94, 0xb5, 0x71, 0x42, 0x08, 0x79, + 0x30, 0xeb, 0x07, 0x31, 0x3b, 0xf6, 0x14, 0xbc, 0xd7, 0xb3, 0x43, 0x9a, 0xfa, 0x45, 0x41, 0x7d, + 0x76, 0x3d, 0x4d, 0x05, 0x77, 0x13, 0x46, 0x2b, 0x52, 0x89, 0x54, 0xcc, 0xbf, 0xc2, 0xe9, 0xb3, + 0x90, 0xad, 0x43, 0xb2, 0x7f, 0xd9, 0x82, 0x92, 0x44, 0x3b, 0x8b, 0x17, 0xa6, 0x35, 0x18, 0x8d, + 0xd8, 0x24, 0xc8, 0xa1, 0xb1, 0x7b, 0x75, 0x9c, 0xcf, 0x57, 0x72, 0x9a, 0xf3, 0xff, 0x11, 0x96, + 0x34, 0x98, 0x16, 0x5c, 0x75, 0xff, 0x13, 0xa2, 0x05, 0x57, 0xfd, 0xc9, 0x39, 0x61, 0xfe, 0x1b, + 0xeb, 0xb3, 0xa6, 0x2a, 0xa0, 0x42, 0x67, 0x3b, 0x24, 0x5b, 0xee, 0x83, 0xb4, 0xd0, 0x59, 0x67, + 0xa5, 0x58, 0x40, 0xd1, 0x7b, 0x30, 0xd1, 0x94, 0xca, 0xe3, 0x84, 0x0d, 0x3c, 0xdb, 0x53, 0x15, + 0xaf, 0x5e, 0x6d, 0xb8, 0x81, 0xdc, 0xb2, 0x56, 0x1f, 0x1b, 0xd4, 0xcc, 0x77, 0xff, 0x62, 0xbf, + 0x77, 0xff, 0x84, 0x6e, 0xee, 0xcb, 0xb5, 0xfd, 0x63, 0x16, 0x8c, 0x70, 0x15, 0xe4, 0x60, 0x3a, + 0x5b, 0xed, 0x15, 0x2a, 0x19, 0xbb, 0xbb, 0xb4, 0x50, 0x3c, 0x4a, 0xa1, 0x35, 0x28, 0xb1, 0x1f, + 0x4c, 0x15, 0x53, 0xcc, 0xb7, 0x0c, 0xe4, 0xad, 0xea, 0x1d, 0xbc, 0x2b, 0xab, 0xe1, 0x84, 0x82, + 0xfd, 0x83, 0x45, 0xca, 0xaa, 0x12, 0x54, 0xe3, 0x04, 0xb7, 0x1e, 0xdd, 0x09, 0x5e, 0x78, 0x54, + 0x27, 0xf8, 0x36, 0x4c, 0x37, 0xb5, 0x27, 0xaf, 0x64, 0x26, 0xaf, 0xf4, 0x5c, 0x24, 0xda, 0xeb, + 0x18, 0x57, 0xc3, 0x2d, 0x9b, 0x44, 0x70, 0x9a, 0x2a, 0xfa, 0x76, 0x98, 0xe0, 0xf3, 0x2c, 0x5a, + 0x19, 0x62, 0xad, 0x7c, 0x26, 0x7f, 0xbd, 0xe8, 0x4d, 0xb0, 0x95, 0xd8, 0xd0, 0xaa, 0x63, 0x83, + 0x98, 0xfd, 0xf3, 0x63, 0x30, 0xbc, 0xb2, 0x4f, 0xfc, 0xf8, 0x0c, 0x18, 0x52, 0x13, 0xa6, 0x5c, + 0x7f, 0x3f, 0xf0, 0xf6, 0x49, 0x8b, 0xc3, 0x4f, 0x72, 0xb8, 0x3e, 0x26, 0x48, 0x4f, 0xd5, 0x0c, + 0x12, 0x38, 0x45, 0xf2, 0x51, 0xdc, 0xda, 0xaf, 0xc3, 0x08, 0x9f, 0x7b, 0x71, 0x65, 0xcf, 0x54, + 0xb0, 0xb3, 0x41, 0x14, 0xbb, 0x20, 0xd1, 0x28, 0x70, 0x8d, 0xbe, 0xa8, 0x8e, 0xde, 0x87, 0xa9, + 0x2d, 0x37, 0x8c, 0x62, 0x7a, 0xdd, 0x8e, 0x62, 0x67, 0xaf, 0xfd, 0x10, 0xb7, 0x74, 0x35, 0x0e, + 0xab, 0x06, 0x25, 0x9c, 0xa2, 0x8c, 0xb6, 0x61, 0x92, 0x5e, 0x1c, 0x93, 0xa6, 0x46, 0x4f, 0xdc, + 0x94, 0x52, 0xc3, 0xdd, 0xd2, 0x09, 0x61, 0x93, 0x2e, 0x65, 0x26, 0x4d, 0x76, 0xd1, 0x1c, 0x63, + 0x12, 0x85, 0x62, 0x26, 0xfc, 0x86, 0xc9, 0x61, 0x94, 0x27, 0x31, 0x53, 0x91, 0x92, 0xc9, 0x93, + 0x34, 0x83, 0x90, 0xaf, 0x42, 0x89, 0xd0, 0x21, 0xa4, 0x84, 0xc5, 0x63, 0xc3, 0xc2, 0x60, 0x7d, + 0x5d, 0x73, 0x9b, 0x61, 0x60, 0xea, 0x47, 0x56, 0x24, 0x25, 0x9c, 0x10, 0x45, 0xcb, 0x30, 0x12, + 0x91, 0xd0, 0x25, 0x91, 0x78, 0x76, 0xe8, 0x31, 0x8d, 0x0c, 0x8d, 0x9b, 0x90, 0xf2, 0xdf, 0x58, + 0x54, 0xa5, 0xcb, 0xcb, 0x61, 0xb7, 0x21, 0xf6, 0xd2, 0xa0, 0x2d, 0xaf, 0x45, 0x56, 0x8a, 0x05, + 0x14, 0xbd, 0x0d, 0xa3, 0x21, 0xf1, 0x98, 0x02, 0x6e, 0x72, 0xf0, 0x45, 0xce, 0xf5, 0x79, 0xbc, + 0x1e, 0x96, 0x04, 0xd0, 0x4d, 0x40, 0x21, 0xa1, 0x32, 0x84, 0xeb, 0x6f, 0x2b, 0x03, 0x0a, 0xf1, + 0x7e, 0xf0, 0x84, 0x68, 0xff, 0x1c, 0x4e, 0x30, 0xfc, 0x38, 0x0c, 0x3c, 0x8f, 0x84, 0x38, 0xa3, + 0x1a, 0xba, 0x0e, 0xb3, 0xaa, 0xb4, 0xe6, 0x47, 0xb1, 0xe3, 0x37, 0x09, 0x7b, 0x3a, 0x28, 0x25, + 0x52, 0x11, 0x4e, 0x23, 0xe0, 0xee, 0x3a, 0xf6, 0xd7, 0xa9, 0x38, 0x43, 0x47, 0xeb, 0x0c, 0x64, + 0x81, 0xb7, 0x4c, 0x59, 0xe0, 0x62, 0xee, 0xcc, 0xe5, 0xc8, 0x01, 0x47, 0x16, 0x8c, 0x6b, 0x33, + 0x9b, 0xac, 0x59, 0xab, 0xc7, 0x9a, 0xed, 0xc0, 0x0c, 0x5d, 0xe9, 0xb7, 0x37, 0x99, 0x37, 0x45, + 0x8b, 0x2d, 0xcc, 0xc2, 0xc3, 0x2d, 0xcc, 0xb2, 0x68, 0x60, 0xe6, 0x56, 0x8a, 0x20, 0xee, 0x6a, + 0x02, 0xbd, 0x21, 0xb5, 0x51, 0x45, 0xc3, 0x30, 0x8a, 0x6b, 0x9a, 0x8e, 0x0f, 0x2b, 0x33, 0xda, + 0x87, 0xe8, 0xda, 0x27, 0xfb, 0xab, 0xf2, 0x1b, 0x39, 0xb3, 0x59, 0x80, 0x52, 0x53, 0x2d, 0x16, + 0xcb, 0xb4, 0xa5, 0x55, 0xcb, 0x01, 0x27, 0x38, 0x74, 0x8f, 0xd2, 0x2b, 0x48, 0xda, 0x96, 0x8f, + 0x5e, 0x50, 0x30, 0x83, 0xd8, 0xaf, 0x02, 0xac, 0x3c, 0x20, 0x4d, 0xbe, 0xd4, 0xf5, 0x47, 0x5d, + 0x2b, 0xff, 0x51, 0xd7, 0xfe, 0x8f, 0x16, 0x4c, 0xad, 0x2e, 0x1b, 0xd7, 0xc4, 0x79, 0x00, 0x7e, + 0x37, 0xba, 0x77, 0x6f, 0x5d, 0xbe, 0x57, 0x70, 0x95, 0xb3, 0x2a, 0xc5, 0x1a, 0x06, 0xba, 0x08, + 0x45, 0xaf, 0xe3, 0x8b, 0x2b, 0xcb, 0xe8, 0xd1, 0x61, 0xa5, 0x78, 0xab, 0xe3, 0x63, 0x5a, 0xa6, + 0x99, 0xcf, 0x15, 0x07, 0x36, 0x9f, 0xeb, 0xeb, 0x25, 0x81, 0x2a, 0x30, 0x7c, 0xff, 0xbe, 0xdb, + 0x8a, 0xca, 0xc3, 0xc9, 0x5b, 0xca, 0xbd, 0x7b, 0xb5, 0x6a, 0x84, 0x79, 0xb9, 0xfd, 0x97, 0x8a, + 0x30, 0xb3, 0xea, 0x91, 0x07, 0x0f, 0x65, 0x85, 0x3b, 0xa8, 0xc9, 0xdf, 0x9d, 0x6e, 0x29, 0xf1, + 0xb4, 0x8d, 0x1c, 0xfb, 0x0f, 0xc5, 0x7b, 0x30, 0xca, 0x6d, 0x03, 0xf8, 0x60, 0x8c, 0x5f, 0x7d, + 0x25, 0xab, 0x0b, 0xe9, 0xb1, 0x98, 0x17, 0xea, 0x38, 0x6e, 0x28, 0xa5, 0x8e, 0x56, 0x51, 0x8a, + 0x25, 0xc9, 0xb9, 0xcf, 0xc3, 0x84, 0x8e, 0x79, 0x22, 0x8b, 0xa9, 0xbf, 0x6c, 0xc1, 0xb9, 0x55, + 0x2f, 0x68, 0xee, 0xa6, 0xec, 0x2f, 0x5f, 0x87, 0x71, 0xca, 0x34, 0x22, 0xc3, 0xf2, 0xdc, 0xf0, + 0x45, 0x10, 0x20, 0xac, 0xe3, 0x69, 0xd5, 0xee, 0xdc, 0xa9, 0x55, 0xb3, 0x5c, 0x18, 0x04, 0x08, + 0xeb, 0x78, 0xf6, 0x6f, 0x58, 0xf0, 0xd4, 0xf5, 0xe5, 0x95, 0xc4, 0x04, 0xb9, 0xcb, 0x8b, 0x82, + 0x5e, 0x39, 0x5a, 0x5a, 0x57, 0x92, 0x2b, 0x47, 0x95, 0xf5, 0x42, 0x40, 0x3f, 0x29, 0x1e, 0x42, + 0x3f, 0x69, 0xc1, 0xb9, 0xeb, 0x6e, 0x4c, 0xcf, 0x80, 0xb4, 0x3d, 0x3f, 0x3d, 0x04, 0x22, 0x37, + 0x0e, 0xc2, 0x83, 0xb4, 0x3d, 0x3f, 0x56, 0x10, 0xac, 0x61, 0xf1, 0x96, 0xf7, 0xdd, 0x88, 0xf6, + 0xb4, 0x60, 0xea, 0x3d, 0xb0, 0x28, 0xc7, 0x0a, 0x83, 0x7e, 0x58, 0xcb, 0x0d, 0x99, 0xdc, 0x7a, + 0x20, 0xb6, 0xb3, 0xfa, 0xb0, 0xaa, 0x04, 0xe0, 0x04, 0xc7, 0xfe, 0x51, 0x0b, 0x2e, 0x5c, 0xf7, + 0x3a, 0x51, 0x4c, 0xc2, 0xad, 0xc8, 0xe8, 0xec, 0xab, 0x50, 0x22, 0xf2, 0x6e, 0x28, 0xfa, 0xaa, + 0xa4, 0x19, 0x75, 0x69, 0xe4, 0xce, 0x04, 0x0a, 0x6f, 0x00, 0x63, 0xe6, 0x93, 0x19, 0xe1, 0xfe, + 0x4c, 0x01, 0x26, 0x6f, 0x6c, 0x6c, 0xd4, 0xaf, 0x93, 0x58, 0xb0, 0xcc, 0xfe, 0x7a, 0x4d, 0xac, + 0xa9, 0x67, 0x7a, 0x49, 0xe0, 0x9d, 0xd8, 0xf5, 0xe6, 0xb9, 0xf7, 0xda, 0x7c, 0xcd, 0x8f, 0x6f, + 0x87, 0x8d, 0x38, 0x74, 0xfd, 0xed, 0x4c, 0x85, 0x8e, 0x64, 0xec, 0xc5, 0x3c, 0xc6, 0x8e, 0x5e, + 0x85, 0x11, 0xe6, 0x3e, 0x27, 0x65, 0xe1, 0x27, 0x94, 0x00, 0xcb, 0x4a, 0x8f, 0x0f, 0x2b, 0xa5, + 0x3b, 0xb8, 0xc6, 0xff, 0x60, 0x81, 0x8a, 0xee, 0xc0, 0xf8, 0x4e, 0x1c, 0xb7, 0x6f, 0x10, 0xa7, + 0x45, 0x42, 0xc9, 0x1d, 0x2e, 0x65, 0x71, 0x07, 0x3a, 0x08, 0x1c, 0x2d, 0xd9, 0x50, 0x49, 0x59, + 0x84, 0x75, 0x3a, 0x76, 0x03, 0x20, 0x81, 0x9d, 0xd2, 0x65, 0xd6, 0xfe, 0x7d, 0x0b, 0x46, 0xb9, + 0x27, 0x43, 0x88, 0xbe, 0x00, 0x43, 0xe4, 0x01, 0x69, 0x0a, 0x31, 0x25, 0xb3, 0xc3, 0xc9, 0x29, + 0xc7, 0x55, 0xb3, 0xf4, 0x3f, 0x66, 0xb5, 0xd0, 0x0d, 0x18, 0xa5, 0xbd, 0xbd, 0xae, 0xdc, 0x3a, + 0x9e, 0xce, 0xfb, 0x62, 0x35, 0xed, 0xfc, 0x60, 0x14, 0x45, 0x58, 0x56, 0x67, 0x6a, 0xc6, 0x66, + 0xbb, 0x41, 0x19, 0x58, 0xdc, 0x4b, 0x09, 0xb0, 0xb1, 0x5c, 0xe7, 0x48, 0x82, 0x1a, 0x57, 0x33, + 0xca, 0x42, 0x9c, 0x10, 0xb1, 0x37, 0xa0, 0x44, 0x27, 0x75, 0xd1, 0x73, 0x9d, 0xde, 0x1a, 0xce, + 0x17, 0xa1, 0x24, 0xb5, 0x8d, 0x91, 0xf0, 0xb5, 0x60, 0x54, 0xa5, 0x32, 0x32, 0xc2, 0x09, 0xdc, + 0xde, 0x82, 0xf3, 0xec, 0xe9, 0xde, 0x89, 0x77, 0x8c, 0x3d, 0xd6, 0x7f, 0x31, 0xbf, 0x24, 0xa4, + 0x7e, 0x3e, 0x33, 0x65, 0xcd, 0x38, 0x7c, 0x42, 0x52, 0x4c, 0x6e, 0x00, 0xf6, 0x1f, 0x0e, 0xc1, + 0x13, 0xb5, 0x46, 0xbe, 0x93, 0xcb, 0x35, 0x98, 0xe0, 0x32, 0x01, 0x5d, 0xda, 0x8e, 0x27, 0xda, + 0x55, 0x0f, 0x5b, 0x1b, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x29, 0x28, 0xba, 0x1f, 0xf8, 0x69, 0x4b, + 0xd0, 0xda, 0x3b, 0xeb, 0x98, 0x96, 0x53, 0x30, 0x15, 0x2f, 0x38, 0x2b, 0x55, 0x60, 0x25, 0x62, + 0xbc, 0x05, 0x53, 0x6e, 0xd4, 0x8c, 0xdc, 0x9a, 0x4f, 0xf9, 0x4c, 0xe2, 0x20, 0x95, 0xdc, 0x48, + 0x69, 0xa7, 0x15, 0x14, 0xa7, 0xb0, 0x35, 0xbe, 0x3e, 0x3c, 0xb0, 0x88, 0xd2, 0xd7, 0xf9, 0x80, + 0x4a, 0x5f, 0x6d, 0xf6, 0x75, 0x11, 0xb3, 0x4a, 0x13, 0xd2, 0x17, 0xff, 0xe0, 0x08, 0x4b, 0x18, + 0x15, 0xf7, 0x9b, 0x3b, 0x4e, 0x7b, 0xb1, 0x13, 0xef, 0x54, 0xdd, 0xa8, 0x19, 0xec, 0x93, 0xf0, + 0x80, 0xdd, 0xd4, 0xc6, 0x12, 0x71, 0x5f, 0x01, 0x96, 0x6f, 0x2c, 0xd6, 0x29, 0x26, 0xee, 0xae, + 0x63, 0xaa, 0xac, 0xe0, 0x34, 0x5c, 0x55, 0x16, 0x61, 0x5a, 0x36, 0xd3, 0x20, 0x11, 0x3b, 0x23, + 0xc6, 0x59, 0xc7, 0x94, 0xeb, 0xa2, 0x28, 0x56, 0xdd, 0x4a, 0xe3, 0xa3, 0x37, 0x60, 0xd2, 0xf5, + 0xdd, 0xd8, 0x75, 0xe2, 0x20, 0x64, 0x27, 0x2c, 0xbf, 0x94, 0xb1, 0x17, 0xb8, 0x9a, 0x0e, 0xc0, + 0x26, 0x9e, 0xfd, 0x07, 0x43, 0x30, 0xcb, 0xa6, 0xed, 0x9b, 0x2b, 0xec, 0x13, 0xb3, 0xc2, 0xee, + 0x74, 0xaf, 0xb0, 0xd3, 0x10, 0x77, 0x3f, 0xce, 0x65, 0xf6, 0x3e, 0x94, 0x94, 0x31, 0xaf, 0xb4, + 0x47, 0xb7, 0x72, 0xec, 0xd1, 0xfb, 0x4b, 0x1f, 0xf2, 0xcd, 0xb0, 0x98, 0xf9, 0x66, 0xf8, 0x77, + 0x2c, 0x48, 0x6c, 0x1a, 0xd1, 0x0d, 0x28, 0xb5, 0x03, 0x66, 0x37, 0x10, 0x4a, 0x63, 0x9c, 0x27, + 0x32, 0x0f, 0x2a, 0x7e, 0x28, 0xf2, 0xf1, 0xab, 0xcb, 0x1a, 0x38, 0xa9, 0x8c, 0x96, 0x60, 0xb4, + 0x1d, 0x92, 0x46, 0xcc, 0xbc, 0xf2, 0xfa, 0xd2, 0xe1, 0x6b, 0x84, 0xe3, 0x63, 0x59, 0xd1, 0xfe, + 0x59, 0x0b, 0x80, 0x3f, 0xcb, 0x39, 0xfe, 0x36, 0x39, 0x03, 0x55, 0x63, 0x15, 0x86, 0xa2, 0x36, + 0x69, 0xf6, 0xb2, 0xe8, 0x48, 0xfa, 0xd3, 0x68, 0x93, 0x66, 0x32, 0xe0, 0xf4, 0x1f, 0x66, 0xb5, + 0xed, 0xef, 0x01, 0x98, 0x4a, 0xd0, 0x6a, 0x31, 0xd9, 0x43, 0x2f, 0x1b, 0x3e, 0x4f, 0x17, 0x53, + 0x3e, 0x4f, 0x25, 0x86, 0xad, 0x69, 0xb5, 0xde, 0x87, 0xe2, 0x9e, 0xf3, 0x40, 0xa8, 0x2d, 0x5e, + 0xec, 0xdd, 0x0d, 0x4a, 0x7f, 0x7e, 0xcd, 0x79, 0xc0, 0xef, 0x4c, 0x2f, 0xca, 0x05, 0xb2, 0xe6, + 0x3c, 0x38, 0xe6, 0x76, 0x1b, 0x8c, 0x49, 0xdd, 0x72, 0xa3, 0xf8, 0x6b, 0xff, 0x25, 0xf9, 0xcf, + 0x96, 0x1d, 0x6d, 0x84, 0xb5, 0xe5, 0xfa, 0xe2, 0x91, 0x6a, 0xa0, 0xb6, 0x5c, 0x3f, 0xdd, 0x96, + 0xeb, 0x0f, 0xd0, 0x96, 0xeb, 0xa3, 0x0f, 0x61, 0x54, 0x3c, 0x08, 0x33, 0x63, 0x6d, 0x53, 0x25, + 0x92, 0xd7, 0x9e, 0x78, 0x4f, 0xe6, 0x6d, 0x2e, 0xc8, 0x3b, 0xa1, 0x28, 0xed, 0xdb, 0xae, 0x6c, + 0x10, 0xfd, 0x2d, 0x0b, 0xa6, 0xc4, 0x6f, 0x4c, 0x3e, 0xe8, 0x90, 0x28, 0x16, 0xb2, 0xe7, 0xe7, + 0x06, 0xef, 0x83, 0xa8, 0xc8, 0xbb, 0xf2, 0x39, 0xc9, 0x66, 0x4d, 0x60, 0xdf, 0x1e, 0xa5, 0x7a, + 0x81, 0xfe, 0x89, 0x05, 0xe7, 0xf7, 0x9c, 0x07, 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xd8, 0x0d, 0x84, + 0xf1, 0xf9, 0x17, 0x06, 0x9b, 0xfe, 0xae, 0xea, 0xbc, 0x93, 0xd2, 0x4e, 0xf5, 0x7c, 0x16, 0x4a, + 0xdf, 0xae, 0x66, 0xf6, 0x6b, 0x6e, 0x0b, 0xc6, 0xe4, 0x7a, 0xcb, 0xb8, 0x79, 0x57, 0x75, 0xc1, + 0xfa, 0xc4, 0xef, 0xf1, 0xda, 0x4d, 0x9d, 0xb5, 0x23, 0xd6, 0xda, 0x23, 0x6d, 0xe7, 0x7d, 0x98, + 0xd0, 0xd7, 0xd8, 0x23, 0x6d, 0xeb, 0x03, 0x38, 0x97, 0xb1, 0x96, 0x1e, 0x69, 0x93, 0xf7, 0xe1, + 0x62, 0xee, 0xfa, 0x78, 0x94, 0x0d, 0xdb, 0x3f, 0x63, 0xe9, 0x7c, 0xf0, 0x0c, 0xf4, 0xbd, 0xcb, + 0xa6, 0xbe, 0xf7, 0x52, 0xef, 0x9d, 0x93, 0xa3, 0xf4, 0x7d, 0x4f, 0xef, 0x34, 0xe5, 0xea, 0xe8, + 0x6d, 0x18, 0xf1, 0x68, 0x89, 0xb4, 0x44, 0xb0, 0xfb, 0xef, 0xc8, 0x44, 0x96, 0x62, 0xe5, 0x11, + 0x16, 0x14, 0xec, 0x5f, 0xb0, 0x60, 0xe8, 0x0c, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x72, 0x2e, 0x69, + 0x11, 0x06, 0x67, 0x1e, 0x3b, 0xf7, 0x57, 0x64, 0xa8, 0x9f, 0x9c, 0x81, 0xf9, 0xbf, 0x05, 0x18, + 0xa7, 0x4d, 0x49, 0x93, 0xb9, 0x37, 0x61, 0xd2, 0x73, 0x36, 0x89, 0x27, 0x1f, 0x0d, 0xd3, 0x0a, + 0x93, 0x5b, 0x3a, 0x10, 0x9b, 0xb8, 0xb4, 0xf2, 0x96, 0xfe, 0x7e, 0x2a, 0xe4, 0x17, 0x55, 0xd9, + 0x78, 0x5c, 0xc5, 0x26, 0x2e, 0xbd, 0xbb, 0xdf, 0x77, 0xe2, 0xe6, 0x8e, 0x50, 0xa6, 0xa8, 0xee, + 0xde, 0xa3, 0x85, 0x98, 0xc3, 0xa8, 0x00, 0x27, 0x57, 0xe7, 0x5d, 0x7a, 0x33, 0x0c, 0x7c, 0x21, + 0x1e, 0x2b, 0x01, 0x0e, 0x9b, 0x60, 0x9c, 0xc6, 0xcf, 0x70, 0x7e, 0x1e, 0x66, 0x06, 0x81, 0x03, + 0x38, 0x3f, 0xa3, 0x3a, 0x9c, 0x77, 0xfd, 0xa6, 0xd7, 0x69, 0x91, 0x3b, 0x3e, 0x97, 0xee, 0x3c, + 0xf7, 0x43, 0xd2, 0x12, 0x02, 0xb4, 0xb2, 0xdd, 0xac, 0x65, 0xe0, 0xe0, 0xcc, 0x9a, 0xf6, 0x5f, + 0x80, 0x73, 0xb7, 0x02, 0xa7, 0xb5, 0xe4, 0x78, 0x8e, 0xdf, 0x24, 0x61, 0xcd, 0xdf, 0xee, 0x6b, + 0x92, 0xa4, 0x1b, 0x10, 0x15, 0xfa, 0x19, 0x10, 0xd9, 0x3b, 0x80, 0xf4, 0x06, 0x84, 0x21, 0x2c, + 0x86, 0x51, 0x97, 0x37, 0x25, 0x96, 0xff, 0x73, 0xd9, 0xd2, 0x75, 0x57, 0xcf, 0x34, 0x13, 0x4f, + 0x5e, 0x80, 0x25, 0x21, 0xfb, 0x1a, 0x64, 0x3a, 0xbf, 0xf5, 0x57, 0xdb, 0xd8, 0xaf, 0xc3, 0x2c, + 0xab, 0x79, 0x32, 0x95, 0x82, 0xfd, 0xd7, 0x2c, 0x98, 0x5e, 0x4f, 0x85, 0x2b, 0x78, 0x96, 0x3d, + 0xec, 0x65, 0xe8, 0xdd, 0x1b, 0xac, 0x14, 0x0b, 0xe8, 0xa9, 0xeb, 0xf7, 0xfe, 0xd4, 0x82, 0x92, + 0x8a, 0x84, 0x72, 0x06, 0x42, 0xed, 0xb2, 0x21, 0xd4, 0x66, 0xea, 0x9d, 0x54, 0x77, 0xf2, 0x64, + 0x5a, 0x74, 0x53, 0x39, 0xde, 0xf7, 0x50, 0x39, 0x25, 0x64, 0xb8, 0x9b, 0xf6, 0x94, 0xe9, 0x9d, + 0x2f, 0x5d, 0xf1, 0x99, 0x4d, 0x90, 0xc2, 0xfd, 0x84, 0xd8, 0x04, 0xa9, 0xfe, 0xe4, 0x70, 0xbf, + 0xba, 0xd6, 0x65, 0x76, 0x2a, 0x7c, 0x2b, 0xb3, 0x9b, 0x67, 0x7b, 0x53, 0xc5, 0xbb, 0xa8, 0x08, + 0x3b, 0x78, 0x51, 0x7a, 0xcc, 0x18, 0x99, 0xf8, 0xc7, 0xa3, 0xd6, 0x24, 0x55, 0xec, 0x1b, 0x30, + 0x9d, 0x1a, 0x30, 0xf4, 0x3a, 0x0c, 0xb7, 0x77, 0x9c, 0x88, 0xa4, 0xec, 0x20, 0x87, 0xeb, 0xb4, + 0xf0, 0xf8, 0xb0, 0x32, 0xa5, 0x2a, 0xb0, 0x12, 0xcc, 0xb1, 0xed, 0xff, 0x69, 0xc1, 0xd0, 0x7a, + 0xd0, 0x3a, 0x8b, 0xc5, 0xf4, 0x96, 0xb1, 0x98, 0x9e, 0xcc, 0x8b, 0xf9, 0x95, 0xbb, 0x8e, 0x56, + 0x53, 0xeb, 0xe8, 0x52, 0x2e, 0x85, 0xde, 0x4b, 0x68, 0x0f, 0xc6, 0x59, 0x24, 0x31, 0x61, 0x97, + 0xf9, 0xaa, 0x71, 0xbf, 0xaa, 0xa4, 0xee, 0x57, 0xd3, 0x1a, 0xaa, 0x76, 0xcb, 0x7a, 0x1e, 0x46, + 0x85, 0x6d, 0x60, 0xda, 0x43, 0x40, 0xe0, 0x62, 0x09, 0xb7, 0x7f, 0xac, 0x08, 0x46, 0xe4, 0x32, + 0xf4, 0xcb, 0x16, 0xcc, 0x87, 0xdc, 0xe5, 0xb2, 0x55, 0xed, 0x84, 0xae, 0xbf, 0xdd, 0x68, 0xee, + 0x90, 0x56, 0xc7, 0x73, 0xfd, 0xed, 0xda, 0xb6, 0x1f, 0xa8, 0xe2, 0x95, 0x07, 0xa4, 0xd9, 0x61, + 0x6f, 0x2e, 0x7d, 0xc2, 0xa4, 0x29, 0xdb, 0x9b, 0xab, 0x47, 0x87, 0x95, 0x79, 0x7c, 0x22, 0xda, + 0xf8, 0x84, 0x7d, 0x41, 0xbf, 0x61, 0xc1, 0x02, 0x0f, 0xe8, 0x35, 0x78, 0xff, 0x7b, 0xdc, 0x46, + 0xeb, 0x92, 0x54, 0x42, 0x64, 0x83, 0x84, 0x7b, 0x4b, 0x6f, 0x88, 0x01, 0x5d, 0xa8, 0x9f, 0xac, + 0x2d, 0x7c, 0xd2, 0xce, 0xd9, 0xff, 0xa6, 0x08, 0x93, 0x74, 0x14, 0x93, 0x30, 0x23, 0xaf, 0x1b, + 0x4b, 0xe2, 0xe9, 0xd4, 0x92, 0x98, 0x35, 0x90, 0x4f, 0x27, 0xc2, 0x48, 0x04, 0xb3, 0x9e, 0x13, + 0xc5, 0x37, 0x88, 0x13, 0xc6, 0x9b, 0xc4, 0xe1, 0x36, 0x29, 0xc5, 0x13, 0xdb, 0xcf, 0x28, 0xf5, + 0xd7, 0xad, 0x34, 0x31, 0xdc, 0x4d, 0x1f, 0xed, 0x03, 0x62, 0x86, 0x35, 0xa1, 0xe3, 0x47, 0xfc, + 0x5b, 0x5c, 0xf1, 0x1e, 0x73, 0xb2, 0x56, 0xe7, 0x44, 0xab, 0xe8, 0x56, 0x17, 0x35, 0x9c, 0xd1, + 0x82, 0x66, 0x30, 0x35, 0x3c, 0xa8, 0xc1, 0xd4, 0x48, 0x1f, 0x37, 0x9c, 0x3d, 0x98, 0x11, 0xb3, + 0xb2, 0xe5, 0x6e, 0x8b, 0x43, 0xfa, 0xcb, 0x29, 0x83, 0x4a, 0x6b, 0x70, 0xab, 0x98, 0x3e, 0xd6, + 0x94, 0xf6, 0x77, 0xc2, 0x39, 0xda, 0x9c, 0xe9, 0x34, 0x12, 0x21, 0x02, 0xd3, 0xbb, 0x9d, 0x4d, + 0xe2, 0x91, 0x58, 0x96, 0x89, 0x46, 0x33, 0xc5, 0x7e, 0xb3, 0x76, 0x22, 0x5b, 0xde, 0x34, 0x49, + 0xe0, 0x34, 0x4d, 0xfb, 0x27, 0x2c, 0x60, 0xa6, 0xd9, 0x67, 0x70, 0xfc, 0x7d, 0xd1, 0x3c, 0xfe, + 0xca, 0x79, 0x1c, 0x28, 0xe7, 0xe4, 0x7b, 0x8d, 0x4f, 0x4b, 0x3d, 0x0c, 0x1e, 0x1c, 0x48, 0xd9, + 0xbf, 0xbf, 0xc4, 0xf5, 0x7f, 0x2c, 0xbe, 0x21, 0x95, 0x07, 0x3a, 0xfa, 0x2e, 0x18, 0x6b, 0x3a, + 0x6d, 0xa7, 0xc9, 0x43, 0x46, 0xe6, 0x6a, 0x7f, 0x8c, 0x4a, 0xf3, 0xcb, 0xa2, 0x06, 0xd7, 0x66, + 0x7c, 0x56, 0x7e, 0xa5, 0x2c, 0xee, 0xab, 0xc1, 0x50, 0x4d, 0xce, 0xed, 0xc2, 0xa4, 0x41, 0xec, + 0x91, 0x5e, 0x7d, 0xbf, 0x8b, 0x1f, 0x17, 0xea, 0xc6, 0xb2, 0x07, 0xb3, 0xbe, 0xf6, 0x9f, 0x32, + 0x47, 0x29, 0x4e, 0x7f, 0xba, 0xdf, 0x81, 0xc0, 0x38, 0xa9, 0x66, 0x7a, 0x9e, 0x22, 0x83, 0xbb, + 0x29, 0xdb, 0x7f, 0xcf, 0x82, 0xc7, 0x75, 0x44, 0x2d, 0x38, 0x40, 0x3f, 0x7d, 0x72, 0x15, 0xc6, + 0x82, 0x36, 0x09, 0x9d, 0xe4, 0x4e, 0x76, 0x45, 0x0e, 0xfa, 0x6d, 0x51, 0x7e, 0x7c, 0x58, 0x39, + 0xaf, 0x53, 0x97, 0xe5, 0x58, 0xd5, 0x44, 0x36, 0x8c, 0xb0, 0xc1, 0x88, 0x44, 0xe0, 0x06, 0x66, + 0x13, 0xc7, 0x9e, 0x56, 0x23, 0x2c, 0x20, 0xf6, 0xf7, 0x58, 0x7c, 0x61, 0xe9, 0x5d, 0x47, 0x1f, + 0xc0, 0xcc, 0x1e, 0xbd, 0xbe, 0xad, 0x3c, 0x68, 0x87, 0x5c, 0x8d, 0x2e, 0xc7, 0xe9, 0xc5, 0x7e, + 0xe3, 0xa4, 0x7d, 0x64, 0x62, 0x39, 0xb5, 0x96, 0x22, 0x86, 0xbb, 0xc8, 0xdb, 0x7f, 0x5c, 0xe0, + 0x3b, 0x91, 0x49, 0x75, 0xcf, 0xc3, 0x68, 0x3b, 0x68, 0x2d, 0xd7, 0xaa, 0x58, 0x8c, 0x90, 0x62, + 0x57, 0x75, 0x5e, 0x8c, 0x25, 0x1c, 0x5d, 0x05, 0x20, 0x0f, 0x62, 0x12, 0xfa, 0x8e, 0xa7, 0x0c, + 0x3f, 0x94, 0xf0, 0xb4, 0xa2, 0x20, 0x58, 0xc3, 0xa2, 0x75, 0xda, 0x61, 0xb0, 0xef, 0xb6, 0x98, + 0x6b, 0x5b, 0xd1, 0xac, 0x53, 0x57, 0x10, 0xac, 0x61, 0xd1, 0xab, 0x72, 0xc7, 0x8f, 0xf8, 0x01, + 0xe8, 0x6c, 0x8a, 0x58, 0x67, 0x63, 0xc9, 0x55, 0xf9, 0x8e, 0x0e, 0xc4, 0x26, 0x2e, 0x5a, 0x84, + 0x91, 0xd8, 0x61, 0xe6, 0x0c, 0xc3, 0xf9, 0x36, 0x70, 0x1b, 0x14, 0x43, 0x8f, 0x21, 0x48, 0x2b, + 0x60, 0x51, 0x11, 0xbd, 0x2b, 0x59, 0x30, 0x67, 0xc9, 0xc2, 0xf8, 0x34, 0x77, 0xd9, 0xea, 0xec, + 0x5b, 0xe7, 0xc1, 0xc2, 0xa8, 0xd5, 0xa0, 0x65, 0x7f, 0x77, 0x09, 0x20, 0x91, 0xf6, 0xd0, 0x87, + 0x5d, 0x2c, 0xe2, 0xa5, 0xde, 0xf2, 0xe1, 0xe9, 0xf1, 0x07, 0xf4, 0xbd, 0x16, 0x8c, 0x3b, 0x9e, + 0x17, 0x34, 0x9d, 0x98, 0x8d, 0x72, 0xa1, 0x37, 0x8b, 0x12, 0xed, 0x2f, 0x26, 0x35, 0x78, 0x17, + 0x5e, 0x95, 0x96, 0x0a, 0x1a, 0xa4, 0x6f, 0x2f, 0xf4, 0x86, 0xd1, 0x67, 0xe5, 0x25, 0x80, 0x2f, + 0x8f, 0xb9, 0xf4, 0x25, 0xa0, 0xc4, 0xb8, 0xb1, 0x26, 0xff, 0xa3, 0x3b, 0x46, 0x50, 0xb0, 0xa1, + 0xfc, 0xf8, 0x07, 0x86, 0xd0, 0xd3, 0x2f, 0x1e, 0x18, 0xaa, 0xeb, 0x4e, 0x38, 0xc3, 0xf9, 0x41, + 0x42, 0x34, 0xe9, 0xba, 0x8f, 0x03, 0xce, 0xfb, 0x30, 0xdd, 0x32, 0x8f, 0x5b, 0xb1, 0x9a, 0x9e, + 0xcb, 0xa3, 0x9b, 0x3a, 0x9d, 0x93, 0x03, 0x36, 0x05, 0xc0, 0x69, 0xc2, 0xa8, 0xce, 0xdd, 0xa1, + 0x6a, 0xfe, 0x56, 0x20, 0x8c, 0x98, 0xed, 0xdc, 0xb9, 0x3c, 0x88, 0x62, 0xb2, 0x47, 0x31, 0x93, + 0x73, 0x74, 0x5d, 0xd4, 0xc5, 0x8a, 0x0a, 0x7a, 0x1b, 0x46, 0x98, 0x8f, 0x6a, 0x54, 0x1e, 0xcb, + 0xd7, 0x03, 0x9a, 0xe1, 0x15, 0x92, 0x4d, 0xc5, 0xfe, 0x46, 0x58, 0x50, 0x40, 0x37, 0x64, 0x0c, + 0x96, 0xa8, 0xe6, 0xdf, 0x89, 0x08, 0x8b, 0xc1, 0x52, 0x5a, 0xfa, 0x74, 0x12, 0x5e, 0x85, 0x97, + 0x67, 0x46, 0x0b, 0x36, 0x6a, 0x52, 0x79, 0x45, 0xfc, 0x97, 0x41, 0x88, 0xcb, 0x90, 0xdf, 0x3d, + 0x33, 0x50, 0x71, 0x32, 0x9c, 0x77, 0x4d, 0x12, 0x38, 0x4d, 0xf3, 0x4c, 0x8f, 0xcf, 0x39, 0x1f, + 0x66, 0xd2, 0x1b, 0xeb, 0x91, 0x1e, 0xd7, 0xbf, 0x3f, 0x04, 0x53, 0xe6, 0x42, 0x40, 0x0b, 0x50, + 0x12, 0x44, 0x54, 0x3c, 0x46, 0xb5, 0xb6, 0xd7, 0x24, 0x00, 0x27, 0x38, 0x2c, 0x1e, 0x25, 0xab, + 0xae, 0xd9, 0x01, 0x26, 0xf1, 0x28, 0x15, 0x04, 0x6b, 0x58, 0x54, 0x88, 0xde, 0x0c, 0x82, 0x58, + 0x1d, 0x05, 0x6a, 0xb5, 0x2c, 0xb1, 0x52, 0x2c, 0xa0, 0xf4, 0x08, 0xd8, 0x25, 0xa1, 0x4f, 0x3c, + 0x53, 0x93, 0xa9, 0x8e, 0x80, 0x9b, 0x3a, 0x10, 0x9b, 0xb8, 0xf4, 0x48, 0x0b, 0x22, 0xb6, 0xfc, + 0x84, 0xa8, 0x9e, 0xd8, 0x55, 0x36, 0xb8, 0x8f, 0xb6, 0x84, 0xa3, 0x2f, 0xc3, 0xe3, 0xca, 0xa5, + 0x1a, 0x73, 0xcd, 0xb0, 0x6c, 0x71, 0xc4, 0xb8, 0x59, 0x3f, 0xbe, 0x9c, 0x8d, 0x86, 0xf3, 0xea, + 0xa3, 0xb7, 0x60, 0x4a, 0x88, 0xc0, 0x92, 0xe2, 0xa8, 0x69, 0xac, 0x70, 0xd3, 0x80, 0xe2, 0x14, + 0x36, 0xaa, 0xc2, 0x0c, 0x2d, 0x61, 0x52, 0xa8, 0xa4, 0xc0, 0x5d, 0xc3, 0xd5, 0x59, 0x7f, 0x33, + 0x05, 0xc7, 0x5d, 0x35, 0xd0, 0x22, 0x4c, 0x73, 0x19, 0x85, 0xde, 0x29, 0xd9, 0x3c, 0x08, 0xdf, + 0x02, 0xb5, 0x11, 0x6e, 0x9b, 0x60, 0x9c, 0xc6, 0x47, 0xd7, 0x60, 0xc2, 0x09, 0x9b, 0x3b, 0x6e, + 0x4c, 0x9a, 0x71, 0x27, 0xe4, 0x4e, 0x07, 0x9a, 0xb5, 0xc7, 0xa2, 0x06, 0xc3, 0x06, 0xa6, 0xfd, + 0x21, 0x9c, 0xcb, 0x70, 0x4b, 0xa2, 0x0b, 0xc7, 0x69, 0xbb, 0xf2, 0x9b, 0x52, 0x16, 0x92, 0x8b, + 0xf5, 0x9a, 0xfc, 0x1a, 0x0d, 0x8b, 0xae, 0x4e, 0xa6, 0x12, 0xd7, 0x22, 0x85, 0xab, 0xd5, 0xb9, + 0x2a, 0x01, 0x38, 0xc1, 0xb1, 0x7f, 0x1d, 0x40, 0x53, 0xe8, 0x0c, 0x60, 0x1f, 0x77, 0x0d, 0x26, + 0x64, 0x78, 0x7b, 0x2d, 0xac, 0xb2, 0xfa, 0xcc, 0xeb, 0x1a, 0x0c, 0x1b, 0x98, 0xb4, 0x6f, 0xbe, + 0x0a, 0x0a, 0x9d, 0xb2, 0xc7, 0x4c, 0x42, 0x42, 0x27, 0x38, 0xe8, 0x25, 0x18, 0x8b, 0x88, 0xb7, + 0x75, 0xcb, 0xf5, 0x77, 0xc5, 0xc2, 0x56, 0x5c, 0xb8, 0x21, 0xca, 0xb1, 0xc2, 0x40, 0x4b, 0x50, + 0xec, 0xb8, 0x2d, 0xb1, 0x94, 0xe5, 0x81, 0x5f, 0xbc, 0x53, 0xab, 0x1e, 0x1f, 0x56, 0x9e, 0xce, + 0x8b, 0xda, 0x4f, 0xaf, 0xf6, 0xd1, 0x3c, 0xdd, 0x7e, 0xb4, 0x72, 0xd6, 0xdb, 0xc0, 0xc8, 0x09, + 0xdf, 0x06, 0xae, 0x02, 0x88, 0xaf, 0x96, 0x6b, 0xb9, 0x98, 0xcc, 0xda, 0x75, 0x05, 0xc1, 0x1a, + 0x16, 0x8a, 0x60, 0xb6, 0x19, 0x12, 0x47, 0xde, 0xa1, 0xb9, 0x83, 0xcd, 0xd8, 0xc3, 0x2b, 0x08, + 0x96, 0xd3, 0xc4, 0x70, 0x37, 0x7d, 0x14, 0xc0, 0x6c, 0x4b, 0x78, 0xf0, 0x27, 0x8d, 0x96, 0x4e, + 0xee, 0xd5, 0xc3, 0x0c, 0x72, 0xd2, 0x84, 0x70, 0x37, 0x6d, 0xf4, 0x15, 0x98, 0x93, 0x85, 0xdd, + 0x41, 0x13, 0xd8, 0x76, 0x29, 0x2e, 0x5d, 0x3a, 0x3a, 0xac, 0xcc, 0x55, 0x73, 0xb1, 0x70, 0x0f, + 0x0a, 0x08, 0xc3, 0x08, 0x7b, 0x4b, 0x8a, 0xca, 0xe3, 0xec, 0x9c, 0x7b, 0x21, 0x5f, 0x19, 0x40, + 0xd7, 0xfa, 0x3c, 0x7b, 0x87, 0x12, 0x26, 0xe5, 0xc9, 0xb3, 0x1c, 0x2b, 0xc4, 0x82, 0x12, 0xda, + 0x82, 0x71, 0xc7, 0xf7, 0x83, 0xd8, 0xe1, 0x22, 0xd4, 0x44, 0xbe, 0xec, 0xa7, 0x11, 0x5e, 0x4c, + 0x6a, 0x70, 0xea, 0xca, 0x4a, 0x55, 0x83, 0x60, 0x9d, 0x30, 0xba, 0x0f, 0xd3, 0xc1, 0x7d, 0xca, + 0x1c, 0xa5, 0x96, 0x22, 0x2a, 0x4f, 0xb2, 0xb6, 0x5e, 0x1b, 0x50, 0x4f, 0x6b, 0x54, 0xd6, 0xb8, + 0x96, 0x49, 0x14, 0xa7, 0x5b, 0x41, 0xf3, 0x86, 0xb6, 0x7a, 0x2a, 0xf1, 0x9d, 0x48, 0xb4, 0xd5, + 0xba, 0x72, 0x9a, 0x05, 0xe1, 0xe0, 0x26, 0xd2, 0x6c, 0xf7, 0x4f, 0xa7, 0x82, 0x70, 0x24, 0x20, + 0xac, 0xe3, 0xa1, 0x1d, 0x98, 0x48, 0x9e, 0xac, 0xc2, 0x88, 0x85, 0x00, 0x1b, 0xbf, 0x7a, 0x75, + 0xb0, 0x8f, 0xab, 0x69, 0x35, 0xf9, 0xcd, 0x41, 0x2f, 0xc1, 0x06, 0xe5, 0xb9, 0x6f, 0x81, 0x71, + 0x6d, 0x62, 0x4f, 0xe2, 0x01, 0x30, 0xf7, 0x16, 0xcc, 0xa4, 0xa7, 0xee, 0x44, 0x1e, 0x04, 0xff, + 0xbb, 0x00, 0xd3, 0x19, 0x2f, 0x57, 0x2c, 0xf2, 0x7f, 0x8a, 0xa1, 0x26, 0x81, 0xfe, 0x4d, 0xb6, + 0x58, 0x18, 0x80, 0x2d, 0x4a, 0x1e, 0x5d, 0xcc, 0xe5, 0xd1, 0x82, 0x15, 0x0e, 0x7d, 0x14, 0x56, + 0x68, 0x9e, 0x3e, 0xc3, 0x03, 0x9d, 0x3e, 0xa7, 0xc0, 0x3e, 0x8d, 0x03, 0x6c, 0x74, 0x80, 0x03, + 0xec, 0x07, 0x0b, 0x30, 0x93, 0xb6, 0xf0, 0x3d, 0x83, 0xf7, 0x8e, 0xb7, 0x8d, 0xf7, 0x8e, 0xec, + 0x3c, 0x1a, 0x69, 0xbb, 0xe3, 0xbc, 0xb7, 0x0f, 0x9c, 0x7a, 0xfb, 0x78, 0x61, 0x20, 0x6a, 0xbd, + 0xdf, 0x41, 0xfe, 0x7e, 0x01, 0x2e, 0xa4, 0xab, 0x2c, 0x7b, 0x8e, 0xbb, 0x77, 0x06, 0x63, 0x73, + 0xdb, 0x18, 0x9b, 0x97, 0x07, 0xf9, 0x1a, 0xd6, 0xb5, 0xdc, 0x01, 0xba, 0x97, 0x1a, 0xa0, 0x85, + 0xc1, 0x49, 0xf6, 0x1e, 0xa5, 0x6f, 0x14, 0xe1, 0x52, 0x66, 0xbd, 0xe4, 0xb9, 0x60, 0xd5, 0x78, + 0x2e, 0xb8, 0x9a, 0x7a, 0x2e, 0xb0, 0x7b, 0xd7, 0x3e, 0x9d, 0xf7, 0x03, 0xe1, 0x7b, 0xcb, 0xc2, + 0x53, 0x3e, 0xe4, 0xdb, 0x81, 0xe1, 0x7b, 0xab, 0x08, 0x61, 0x93, 0xee, 0x9f, 0xa7, 0x37, 0x83, + 0x5f, 0xb7, 0xe0, 0x62, 0xe6, 0xdc, 0x9c, 0x81, 0x5e, 0x7d, 0xdd, 0xd4, 0xab, 0x3f, 0x3f, 0xf0, + 0x6a, 0xcd, 0x51, 0xb4, 0xff, 0x41, 0x31, 0xe7, 0x5b, 0x98, 0x66, 0xf2, 0x36, 0x8c, 0x3b, 0xcd, + 0x26, 0x89, 0xa2, 0xb5, 0xa0, 0xa5, 0xc2, 0x35, 0xbe, 0xcc, 0xa4, 0x8d, 0xa4, 0xf8, 0xf8, 0xb0, + 0x32, 0x97, 0x26, 0x91, 0x80, 0xb1, 0x4e, 0xc1, 0x8c, 0x30, 0x5b, 0x38, 0xd5, 0x08, 0xb3, 0x57, + 0x01, 0xf6, 0x95, 0xbe, 0x22, 0xad, 0xe6, 0xd4, 0x34, 0x19, 0x1a, 0x16, 0xfa, 0x0e, 0x76, 0x0b, + 0xe0, 0xc6, 0x40, 0x7c, 0x29, 0xbe, 0x3a, 0xe0, 0x5c, 0xe9, 0x86, 0x45, 0x3c, 0xc8, 0x83, 0x52, + 0x09, 0x2b, 0x92, 0xe8, 0xdb, 0x60, 0x26, 0xe2, 0x31, 0x84, 0x96, 0x3d, 0x27, 0x62, 0x4e, 0x5c, + 0x62, 0x15, 0xb2, 0xc8, 0x0d, 0x8d, 0x14, 0x0c, 0x77, 0x61, 0xa3, 0x55, 0xf9, 0x51, 0x2c, 0xe0, + 0x11, 0x5f, 0x98, 0xcf, 0x26, 0x1f, 0x24, 0xf2, 0x0e, 0x9d, 0x4f, 0x0f, 0x3f, 0x1b, 0x78, 0xad, + 0xa6, 0xfd, 0x83, 0x43, 0xf0, 0x44, 0x0f, 0x26, 0x86, 0x16, 0x4d, 0x23, 0x80, 0x17, 0xd3, 0xfa, + 0xbf, 0xb9, 0xcc, 0xca, 0x86, 0x42, 0x30, 0xb5, 0x56, 0x0a, 0x1f, 0x79, 0xad, 0x7c, 0x9f, 0xa5, + 0x69, 0x66, 0xb9, 0xa9, 0xf0, 0x17, 0x4f, 0xc8, 0x9c, 0x4f, 0x51, 0x55, 0xbb, 0x95, 0xa1, 0xef, + 0xbc, 0x3a, 0x70, 0x77, 0x06, 0x56, 0x80, 0x9e, 0xed, 0x93, 0xd1, 0xd7, 0x2c, 0x78, 0x3a, 0xb3, + 0xbf, 0x86, 0xd1, 0xd2, 0x02, 0x94, 0x9a, 0xb4, 0x50, 0x73, 0x0c, 0x4d, 0xdc, 0xb3, 0x25, 0x00, + 0x27, 0x38, 0x86, 0x6d, 0x52, 0xa1, 0xaf, 0x6d, 0xd2, 0xbf, 0xb6, 0xa0, 0x6b, 0x01, 0x9f, 0x01, + 0x27, 0xad, 0x99, 0x9c, 0xf4, 0xd3, 0x83, 0xcc, 0x65, 0x0e, 0x13, 0xfd, 0xad, 0x69, 0x78, 0x2c, + 0xc7, 0x13, 0x6c, 0x1f, 0x66, 0xb7, 0x9b, 0xc4, 0x74, 0xb9, 0x15, 0x1f, 0x93, 0xe9, 0x9d, 0xdc, + 0xd3, 0x3f, 0x97, 0x5f, 0x88, 0xbb, 0x50, 0x70, 0x77, 0x13, 0xe8, 0x6b, 0x16, 0x9c, 0x77, 0xee, + 0x47, 0x5d, 0x69, 0x01, 0xc5, 0x9a, 0x79, 0x2d, 0x53, 0x4f, 0xdb, 0x27, 0x8d, 0x20, 0x73, 0x8b, + 0x3b, 0x9f, 0x85, 0x85, 0x33, 0xdb, 0x42, 0x58, 0x44, 0xd8, 0xa5, 0xf2, 0x76, 0x0f, 0xa7, 0xf0, + 0x2c, 0x97, 0x3d, 0xce, 0x53, 0x25, 0x04, 0x2b, 0x3a, 0xe8, 0x2e, 0x94, 0xb6, 0xa5, 0x1f, 0xad, + 0xe0, 0xd9, 0x99, 0x87, 0x60, 0xa6, 0xb3, 0x2d, 0xf7, 0x1d, 0x51, 0x20, 0x9c, 0x90, 0x42, 0x6f, + 0x41, 0xd1, 0xdf, 0x8a, 0x7a, 0x65, 0x36, 0x4a, 0xd9, 0xf2, 0x71, 0xef, 0xfe, 0xf5, 0xd5, 0x06, + 0xa6, 0x15, 0xd1, 0x0d, 0x28, 0x86, 0x9b, 0x2d, 0xf1, 0xb4, 0x90, 0x29, 0x97, 0xe2, 0xa5, 0x6a, + 0xf6, 0x22, 0xe1, 0x94, 0xf0, 0x52, 0x15, 0x53, 0x12, 0xa8, 0x0e, 0xc3, 0xcc, 0x69, 0x4a, 0xbc, + 0x20, 0x64, 0x0a, 0xa4, 0x3d, 0x9c, 0x0f, 0x79, 0x08, 0x00, 0x86, 0x80, 0x39, 0x21, 0xf4, 0x36, + 0x8c, 0x34, 0x59, 0xf2, 0x1f, 0xa1, 0xf8, 0xc9, 0x8e, 0x0d, 0xd5, 0x95, 0x1e, 0x88, 0xbf, 0xa0, + 0xf2, 0x72, 0x2c, 0x28, 0xa0, 0x0d, 0x18, 0x69, 0x92, 0xf6, 0xce, 0x56, 0x24, 0xf4, 0x39, 0x9f, + 0xcd, 0xa4, 0xd5, 0x23, 0xd7, 0x95, 0xa0, 0xca, 0x30, 0xb0, 0xa0, 0x85, 0x3e, 0x0f, 0x85, 0xad, + 0xa6, 0xf0, 0xa4, 0xca, 0x7c, 0x43, 0x30, 0xc3, 0x32, 0x2c, 0x8d, 0x1c, 0x1d, 0x56, 0x0a, 0xab, + 0xcb, 0xb8, 0xb0, 0xd5, 0x44, 0xeb, 0x30, 0xba, 0xc5, 0x7d, 0xeb, 0x45, 0xb4, 0x94, 0xe7, 0xb2, + 0xdd, 0xfe, 0xbb, 0xdc, 0xef, 0xb9, 0x07, 0x90, 0x00, 0x60, 0x49, 0x04, 0x6d, 0x00, 0x6c, 0xa9, + 0x18, 0x01, 0x22, 0x4a, 0xfb, 0xa7, 0x07, 0x89, 0x24, 0x20, 0x94, 0x1b, 0xaa, 0x14, 0x6b, 0x74, + 0xd0, 0x57, 0xa1, 0xe4, 0xc8, 0xe4, 0x73, 0x22, 0xce, 0xca, 0xab, 0x99, 0x9b, 0xb0, 0x77, 0x5e, + 0x3e, 0xbe, 0x82, 0x15, 0x12, 0x4e, 0x88, 0xa2, 0x5d, 0x98, 0xdc, 0x8f, 0xda, 0x3b, 0x44, 0x6e, + 0x5a, 0x16, 0x76, 0x25, 0xe7, 0x90, 0xba, 0x2b, 0x10, 0xdd, 0x30, 0xee, 0x38, 0x5e, 0x17, 0x9f, + 0x61, 0xee, 0x62, 0x77, 0x75, 0x62, 0xd8, 0xa4, 0x4d, 0x07, 0xfd, 0x83, 0x4e, 0xb0, 0x79, 0x10, + 0x13, 0x11, 0xcc, 0x3d, 0x73, 0xd0, 0xdf, 0xe1, 0x28, 0xdd, 0x83, 0x2e, 0x00, 0x58, 0x12, 0xa1, + 0xdb, 0xda, 0x91, 0x89, 0x1d, 0x85, 0x06, 0xe7, 0xf9, 0xdc, 0xe1, 0xe9, 0xea, 0x6f, 0x32, 0x28, + 0x8c, 0x1f, 0x26, 0xa4, 0x18, 0x1f, 0x6c, 0xef, 0x04, 0x71, 0xe0, 0xa7, 0x78, 0xf0, 0x6c, 0x3e, + 0x1f, 0xac, 0x67, 0xe0, 0x77, 0xf3, 0xc1, 0x2c, 0x2c, 0x9c, 0xd9, 0x16, 0x6a, 0xc1, 0x54, 0x3b, + 0x08, 0xe3, 0xfb, 0x41, 0x28, 0x57, 0x15, 0xea, 0x71, 0xb5, 0x37, 0x30, 0x45, 0x8b, 0xcc, 0xfa, + 0xdb, 0x84, 0xe0, 0x14, 0x4d, 0xf4, 0x25, 0x18, 0x8d, 0x9a, 0x8e, 0x47, 0x6a, 0xb7, 0xcb, 0xe7, + 0xf2, 0x0f, 0x98, 0x06, 0x47, 0xc9, 0x59, 0x5d, 0x6c, 0x72, 0x04, 0x0a, 0x96, 0xe4, 0xd0, 0x2a, + 0x0c, 0xb3, 0x0c, 0x21, 0x2c, 0x0e, 0x7d, 0x4e, 0x3c, 0xaf, 0x2e, 0x0b, 0x69, 0xce, 0x87, 0x58, + 0x31, 0xe6, 0xd5, 0xe9, 0x1e, 0x10, 0x12, 0x6e, 0x10, 0x95, 0x2f, 0xe4, 0xef, 0x01, 0x21, 0x18, + 0xdf, 0x6e, 0xf4, 0xda, 0x03, 0x0a, 0x09, 0x27, 0x44, 0x29, 0x17, 0xa6, 0x9c, 0xf3, 0xb1, 0x7c, + 0x2e, 0x9c, 0xcf, 0x37, 0x19, 0x17, 0xa6, 0x5c, 0x93, 0x92, 0xb0, 0xbf, 0x36, 0xda, 0x2d, 0x95, + 0xb0, 0x3b, 0xd1, 0x77, 0x5b, 0x5d, 0x06, 0x03, 0x9f, 0x1b, 0x54, 0x45, 0x73, 0x8a, 0xf2, 0xe8, + 0xd7, 0x2c, 0x78, 0xac, 0x9d, 0xf9, 0x21, 0xe2, 0x88, 0x1f, 0x4c, 0xd3, 0xc3, 0x3f, 0x5d, 0xe5, + 0x8a, 0xc8, 0x86, 0xe3, 0x9c, 0x96, 0xd2, 0x32, 0x7f, 0xf1, 0x23, 0xcb, 0xfc, 0x6b, 0x30, 0xc6, + 0xc4, 0xc8, 0x24, 0x78, 0xdc, 0x40, 0x66, 0x77, 0x4c, 0x58, 0x58, 0x16, 0x15, 0xb1, 0x22, 0x81, + 0xbe, 0xdf, 0x82, 0xa7, 0xd2, 0x5d, 0xc7, 0x84, 0x81, 0x45, 0x20, 0x62, 0x7e, 0x1d, 0x5b, 0x15, + 0xdf, 0xff, 0x54, 0xbd, 0x17, 0xf2, 0x71, 0x3f, 0x04, 0xdc, 0xbb, 0x31, 0x54, 0xcd, 0xb8, 0x0f, + 0x8e, 0x98, 0xef, 0x89, 0x03, 0xdc, 0x09, 0x5f, 0x83, 0x89, 0xbd, 0xa0, 0xe3, 0x4b, 0x9f, 0x18, + 0xe1, 0xf1, 0xcc, 0x74, 0xd7, 0x6b, 0x5a, 0x39, 0x36, 0xb0, 0x52, 0x37, 0xc9, 0xb1, 0x87, 0xbd, + 0x49, 0x9e, 0xed, 0xfd, 0xe4, 0xeb, 0x56, 0x86, 0x60, 0xcd, 0x6f, 0xac, 0x5f, 0x30, 0x6f, 0xac, + 0xcf, 0xa6, 0x6f, 0xac, 0x5d, 0x1a, 0x4a, 0xe3, 0xb2, 0x3a, 0x78, 0xa0, 0xf6, 0x41, 0xa3, 0xf4, + 0xd9, 0x1e, 0x5c, 0xee, 0x77, 0x70, 0x30, 0x13, 0xc6, 0x96, 0x7a, 0xdb, 0x4f, 0x4c, 0x18, 0x5b, + 0xb5, 0x2a, 0x66, 0x90, 0x41, 0x23, 0xeb, 0xd8, 0xff, 0xdd, 0x82, 0x62, 0x3d, 0x68, 0x9d, 0x81, + 0xc6, 0xf5, 0x8b, 0x86, 0xc6, 0xf5, 0x89, 0x9c, 0x14, 0xd6, 0xb9, 0xfa, 0xd5, 0x95, 0x94, 0x7e, + 0xf5, 0xa9, 0x3c, 0x02, 0xbd, 0xb5, 0xa9, 0x3f, 0x5e, 0x04, 0x3d, 0xe1, 0x36, 0xfa, 0xb7, 0x0f, + 0x63, 0x0b, 0x5f, 0xec, 0x95, 0x83, 0x5b, 0x50, 0x66, 0x96, 0x8f, 0xd2, 0xcd, 0xf6, 0xcf, 0x98, + 0x49, 0xfc, 0x3d, 0xe2, 0x6e, 0xef, 0xc4, 0xa4, 0x95, 0xfe, 0x9c, 0xb3, 0x33, 0x89, 0xff, 0xaf, + 0x16, 0x4c, 0xa7, 0x5a, 0x47, 0x5e, 0x96, 0xcf, 0xde, 0x43, 0x6a, 0xda, 0x66, 0xfb, 0x3a, 0xf9, + 0xcd, 0x03, 0xa8, 0xe7, 0x2c, 0xa9, 0x85, 0x62, 0x72, 0xb9, 0x7a, 0xef, 0x8a, 0xb0, 0x86, 0x81, + 0x5e, 0x87, 0xf1, 0x38, 0x68, 0x07, 0x5e, 0xb0, 0x7d, 0x70, 0x93, 0xc8, 0x58, 0x4e, 0xea, 0xd1, + 0x71, 0x23, 0x01, 0x61, 0x1d, 0xcf, 0xfe, 0xc9, 0x22, 0xa4, 0x93, 0xb4, 0x7f, 0x73, 0x4d, 0x7e, + 0x32, 0xd7, 0xe4, 0x37, 0x2c, 0x98, 0xa1, 0xad, 0x33, 0xab, 0x32, 0x79, 0x1c, 0xaa, 0x84, 0x51, + 0x56, 0x8f, 0x84, 0x51, 0xcf, 0x52, 0xde, 0xd5, 0x0a, 0x3a, 0xb1, 0xd0, 0x62, 0x69, 0xcc, 0x89, + 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x0c, 0x85, 0x27, 0x9e, 0x8e, 0x47, 0xc2, 0x10, 0x0b, 0xa8, + 0xcc, 0x27, 0x35, 0x94, 0x93, 0x4f, 0x8a, 0x85, 0x41, 0x14, 0x96, 0x4c, 0x42, 0x30, 0xd1, 0xc2, + 0x20, 0x4a, 0x13, 0xa7, 0x04, 0xc7, 0xfe, 0x99, 0x22, 0x4c, 0xd4, 0x83, 0x56, 0xf2, 0xa0, 0xf4, + 0x9a, 0xf1, 0xa0, 0x74, 0x39, 0xf5, 0xa0, 0x34, 0xa3, 0xe3, 0x7e, 0xf3, 0xf9, 0xe8, 0xe3, 0x7a, + 0x3e, 0xfa, 0x57, 0x16, 0x9b, 0xb5, 0xea, 0x7a, 0x43, 0xe4, 0x3b, 0x7e, 0x05, 0xc6, 0x19, 0x43, + 0x62, 0xae, 0x9f, 0xf2, 0x95, 0x85, 0xa5, 0x35, 0x58, 0x4f, 0x8a, 0xb1, 0x8e, 0x83, 0xae, 0xc0, + 0x58, 0x44, 0x9c, 0xb0, 0xb9, 0xa3, 0x78, 0x9c, 0x78, 0x83, 0xe0, 0x65, 0x58, 0x41, 0xd1, 0x3b, + 0x49, 0x50, 0xc4, 0x62, 0x7e, 0xe6, 0x5e, 0xbd, 0x3f, 0x7c, 0x8b, 0xe4, 0x47, 0x42, 0xb4, 0xef, + 0x01, 0xea, 0xc6, 0x1f, 0xc0, 0xbc, 0xab, 0x62, 0x86, 0x3f, 0x2b, 0x75, 0x85, 0x3e, 0xfb, 0x13, + 0x0b, 0xa6, 0xea, 0x41, 0x8b, 0x6e, 0xdd, 0x3f, 0x4f, 0xfb, 0x54, 0x0f, 0x3f, 0x3a, 0xd2, 0x23, + 0xfc, 0xe8, 0x3f, 0xb0, 0x60, 0xb4, 0x1e, 0xb4, 0xce, 0x40, 0xf7, 0xfd, 0x05, 0x53, 0xf7, 0xfd, + 0x78, 0xce, 0x92, 0xc8, 0x51, 0x77, 0xff, 0x5c, 0x11, 0x26, 0x69, 0x3f, 0x83, 0x6d, 0x39, 0x4b, + 0xc6, 0x88, 0x58, 0x03, 0x8c, 0x08, 0x15, 0x73, 0x03, 0xcf, 0x0b, 0xee, 0xa7, 0x67, 0x6c, 0x95, + 0x95, 0x62, 0x01, 0x45, 0x2f, 0xc1, 0x58, 0x3b, 0x24, 0xfb, 0x6e, 0xd0, 0x89, 0xd2, 0x5e, 0xce, + 0x75, 0x51, 0x8e, 0x15, 0x06, 0xbd, 0x19, 0x45, 0xae, 0xdf, 0x24, 0xd2, 0xee, 0x6b, 0x88, 0xd9, + 0x7d, 0xf1, 0xb8, 0xe2, 0x5a, 0x39, 0x36, 0xb0, 0xd0, 0x3d, 0x28, 0xb1, 0xff, 0x8c, 0xa3, 0x9c, + 0x3c, 0xd3, 0x95, 0x48, 0xe6, 0x21, 0x08, 0xe0, 0x84, 0x16, 0xba, 0x0a, 0x10, 0x4b, 0x0b, 0xb5, + 0x48, 0x38, 0xe1, 0x2b, 0x59, 0x5b, 0xd9, 0xae, 0x45, 0x58, 0xc3, 0x42, 0x2f, 0x42, 0x29, 0x76, + 0x5c, 0xef, 0x96, 0xeb, 0x93, 0x48, 0x58, 0xf8, 0x89, 0x5c, 0x1d, 0xa2, 0x10, 0x27, 0x70, 0x2a, + 0xeb, 0xb0, 0x10, 0x0f, 0x3c, 0x4f, 0xde, 0x18, 0xc3, 0x66, 0xb2, 0xce, 0x2d, 0x55, 0x8a, 0x35, + 0x0c, 0xfb, 0x1a, 0x5c, 0xa8, 0x07, 0xad, 0x7a, 0x10, 0xc6, 0xab, 0x41, 0x78, 0xdf, 0x09, 0x5b, + 0x72, 0xfe, 0x2a, 0x32, 0x6d, 0x04, 0xe5, 0x3d, 0xc3, 0x7c, 0x67, 0x1a, 0x09, 0x21, 0x5e, 0x65, + 0xd2, 0xce, 0x09, 0xdd, 0xb1, 0xfe, 0x7d, 0x81, 0x31, 0x8a, 0x54, 0xf2, 0x46, 0xf4, 0x15, 0x98, + 0x8a, 0xc8, 0x2d, 0xd7, 0xef, 0x3c, 0x90, 0x37, 0xd8, 0x1e, 0xbe, 0x6e, 0x8d, 0x15, 0x1d, 0x93, + 0xeb, 0xc1, 0xcc, 0x32, 0x9c, 0xa2, 0x46, 0x87, 0x30, 0xec, 0xf8, 0x8b, 0xd1, 0x9d, 0x88, 0x84, + 0x22, 0x79, 0x20, 0x1b, 0x42, 0x2c, 0x0b, 0x71, 0x02, 0xa7, 0x4b, 0x86, 0xfd, 0x59, 0x0f, 0x7c, + 0x1c, 0x04, 0xb1, 0x5c, 0x64, 0x2c, 0xfd, 0x94, 0x56, 0x8e, 0x0d, 0x2c, 0xb4, 0x0a, 0x28, 0xea, + 0xb4, 0xdb, 0x1e, 0x7b, 0x98, 0x76, 0xbc, 0xeb, 0x61, 0xd0, 0x69, 0xf3, 0x47, 0x41, 0x91, 0xb9, + 0xa9, 0xd1, 0x05, 0xc5, 0x19, 0x35, 0x28, 0x63, 0xd8, 0x8a, 0xd8, 0x6f, 0x11, 0xe5, 0x81, 0x6b, + 0xa4, 0x1b, 0xac, 0x08, 0x4b, 0x98, 0xfd, 0x5d, 0xec, 0xc0, 0x60, 0x39, 0xdf, 0xe2, 0x4e, 0x48, + 0xd0, 0x1e, 0x4c, 0xb6, 0xd9, 0x51, 0x2e, 0xa2, 0x67, 0x8b, 0x01, 0x7c, 0x38, 0x7b, 0x3e, 0x9e, + 0x03, 0x4a, 0x27, 0x87, 0x4d, 0xea, 0xf6, 0x7f, 0x9a, 0x66, 0x7c, 0xa9, 0xc1, 0xaf, 0x73, 0xa3, + 0xc2, 0x4a, 0x5f, 0xc8, 0xae, 0x73, 0xf9, 0x59, 0x22, 0x93, 0x23, 0x44, 0x58, 0xfa, 0x63, 0x59, + 0x17, 0xbd, 0xc3, 0x5e, 0x53, 0x39, 0x33, 0xe8, 0x97, 0x3c, 0x9a, 0x63, 0x19, 0x0f, 0xa7, 0xa2, + 0x22, 0xd6, 0x88, 0xa0, 0x5b, 0x30, 0x29, 0x52, 0x84, 0x09, 0xd5, 0x4e, 0xd1, 0x50, 0x0c, 0x4c, + 0x62, 0x1d, 0x78, 0x9c, 0x2e, 0xc0, 0x66, 0x65, 0xb4, 0x0d, 0x4f, 0x69, 0xf9, 0x32, 0x33, 0x6c, + 0x4a, 0x39, 0x6f, 0x79, 0xfa, 0xe8, 0xb0, 0xf2, 0xd4, 0x46, 0x2f, 0x44, 0xdc, 0x9b, 0x0e, 0xba, + 0x0d, 0x17, 0x9c, 0x66, 0xec, 0xee, 0x93, 0x2a, 0x71, 0x5a, 0x9e, 0xeb, 0x13, 0x33, 0xec, 0xc7, + 0xc5, 0xa3, 0xc3, 0xca, 0x85, 0xc5, 0x2c, 0x04, 0x9c, 0x5d, 0x0f, 0x7d, 0x01, 0x4a, 0x2d, 0x3f, + 0x12, 0x63, 0x30, 0x62, 0xa4, 0x82, 0x2d, 0x55, 0xd7, 0x1b, 0xea, 0xfb, 0x93, 0x3f, 0x38, 0xa9, + 0x80, 0xb6, 0x61, 0x42, 0x77, 0xed, 0x13, 0x69, 0x84, 0x5f, 0xee, 0x71, 0xeb, 0x37, 0xfc, 0xe1, + 0xb8, 0x5e, 0x53, 0x59, 0x6c, 0x1b, 0xae, 0x72, 0x06, 0x61, 0xf4, 0x36, 0x20, 0x2a, 0xcc, 0xb8, + 0x4d, 0xb2, 0xd8, 0x64, 0x41, 0xcc, 0x99, 0x36, 0x6c, 0xcc, 0x70, 0x3f, 0x42, 0x8d, 0x2e, 0x0c, + 0x9c, 0x51, 0x0b, 0xdd, 0xa0, 0x1c, 0x45, 0x2f, 0x15, 0x06, 0xf6, 0x52, 0x00, 0x2e, 0x57, 0x49, + 0x3b, 0x24, 0x4d, 0x27, 0x26, 0x2d, 0x93, 0x22, 0x4e, 0xd5, 0xa3, 0xe7, 0x8d, 0xca, 0x67, 0x04, + 0xa6, 0x59, 0x78, 0x77, 0x4e, 0x23, 0x7a, 0x77, 0xdc, 0x09, 0xa2, 0x78, 0x9d, 0xc4, 0xf7, 0x83, + 0x70, 0x57, 0xc4, 0xea, 0x4b, 0xc2, 0xc6, 0x26, 0x20, 0xac, 0xe3, 0x51, 0x59, 0x91, 0x3d, 0x67, + 0xd6, 0xaa, 0xec, 0x75, 0x69, 0x2c, 0xd9, 0x27, 0x37, 0x78, 0x31, 0x96, 0x70, 0x89, 0x5a, 0xab, + 0x2f, 0xb3, 0x37, 0xa3, 0x14, 0x6a, 0xad, 0xbe, 0x8c, 0x25, 0x1c, 0x91, 0xee, 0x34, 0xbb, 0x53, + 0xf9, 0xaf, 0x7d, 0xdd, 0x7c, 0x79, 0xc0, 0x4c, 0xbb, 0x3e, 0xcc, 0xa8, 0x04, 0xbf, 0x3c, 0x88, + 0x61, 0x54, 0x9e, 0x66, 0x8b, 0x64, 0xf0, 0x08, 0x88, 0x4a, 0xdb, 0x59, 0x4b, 0x51, 0xc2, 0x5d, + 0xb4, 0x8d, 0x70, 0x32, 0x33, 0x7d, 0xf3, 0x51, 0x2d, 0x40, 0x29, 0xea, 0x6c, 0xb6, 0x82, 0x3d, + 0xc7, 0xf5, 0xd9, 0x13, 0x8f, 0x26, 0x88, 0x34, 0x24, 0x00, 0x27, 0x38, 0x68, 0x15, 0xc6, 0x1c, + 0x71, 0x2d, 0x15, 0x8f, 0x32, 0x99, 0xf1, 0x25, 0xe4, 0xd5, 0x95, 0x8b, 0xd9, 0xf2, 0x1f, 0x56, + 0x75, 0xd1, 0x9b, 0x30, 0x29, 0x5c, 0x20, 0x85, 0xf5, 0xf2, 0x39, 0xd3, 0x5b, 0xa6, 0xa1, 0x03, + 0xb1, 0x89, 0x8b, 0xbe, 0x03, 0xa6, 0x28, 0x95, 0x84, 0xb1, 0x95, 0xcf, 0x0f, 0xc2, 0x11, 0xb5, + 0x3c, 0x23, 0x7a, 0x65, 0x9c, 0x22, 0x86, 0x5a, 0xf0, 0xa4, 0xd3, 0x89, 0x03, 0xa6, 0x0e, 0x36, + 0xd7, 0xff, 0x46, 0xb0, 0x4b, 0x7c, 0xf6, 0x12, 0x33, 0xb6, 0x74, 0xf9, 0xe8, 0xb0, 0xf2, 0xe4, + 0x62, 0x0f, 0x3c, 0xdc, 0x93, 0x0a, 0xba, 0x03, 0xe3, 0x71, 0xe0, 0x09, 0xb7, 0x83, 0xa8, 0xfc, + 0x58, 0x7e, 0x38, 0xac, 0x0d, 0x85, 0xa6, 0x2b, 0x5a, 0x54, 0x55, 0xac, 0xd3, 0x41, 0x1b, 0x7c, + 0x8f, 0xb1, 0x40, 0xc1, 0x24, 0x2a, 0x3f, 0x9e, 0x3f, 0x30, 0x2a, 0x9e, 0xb0, 0xb9, 0x05, 0x45, + 0x4d, 0xac, 0x93, 0x41, 0xd7, 0x61, 0xb6, 0x1d, 0xba, 0x01, 0x5b, 0xd8, 0x4a, 0x15, 0x5f, 0x36, + 0x53, 0x4b, 0xd4, 0xd3, 0x08, 0xb8, 0xbb, 0x0e, 0xbd, 0x88, 0xc9, 0xc2, 0xf2, 0x45, 0x9e, 0xa7, + 0x8c, 0x0b, 0xa7, 0xbc, 0x0c, 0x2b, 0x28, 0x5a, 0x63, 0x7c, 0x99, 0x5f, 0x99, 0xca, 0x73, 0xf9, + 0x71, 0x39, 0xf4, 0xab, 0x15, 0x17, 0x5c, 0xd4, 0x5f, 0x9c, 0x50, 0x98, 0xfb, 0x56, 0x98, 0xed, + 0x62, 0xbc, 0x27, 0xb2, 0x28, 0xff, 0xa7, 0xc3, 0x50, 0x52, 0x7a, 0x57, 0xb4, 0x60, 0xaa, 0xd3, + 0x2f, 0xa6, 0xd5, 0xe9, 0x63, 0x54, 0xfc, 0xd3, 0x35, 0xe8, 0x1b, 0x86, 0x3d, 0x54, 0x21, 0x3f, + 0xdd, 0x98, 0xae, 0x74, 0xe8, 0xeb, 0xfe, 0xa9, 0x5d, 0xa3, 0x8b, 0x03, 0xeb, 0xe5, 0x87, 0x7a, + 0xde, 0xcc, 0x07, 0xcc, 0xa0, 0x4c, 0x6f, 0x9a, 0xed, 0xa0, 0x55, 0xab, 0xa7, 0x53, 0x8a, 0xd6, + 0x69, 0x21, 0xe6, 0x30, 0x76, 0x57, 0xa0, 0x52, 0x02, 0xbb, 0x2b, 0x8c, 0x3e, 0xe4, 0x5d, 0x41, + 0x12, 0xc0, 0x09, 0x2d, 0xe4, 0xc1, 0x6c, 0xd3, 0xcc, 0x06, 0xab, 0x5c, 0x3e, 0x9f, 0xe9, 0x9b, + 0x97, 0xb5, 0xa3, 0xa5, 0x89, 0x5b, 0x4e, 0x53, 0xc1, 0xdd, 0x84, 0xd1, 0x9b, 0x30, 0xf6, 0x41, + 0x10, 0xb1, 0x55, 0x2c, 0x8e, 0x4a, 0xe9, 0x64, 0x37, 0xf6, 0xce, 0xed, 0x06, 0x2b, 0x3f, 0x3e, + 0xac, 0x8c, 0xd7, 0x83, 0x96, 0xfc, 0x8b, 0x55, 0x05, 0xf4, 0x00, 0x2e, 0x18, 0x0c, 0x46, 0x75, + 0x17, 0x06, 0xef, 0xee, 0x53, 0xa2, 0xb9, 0x0b, 0xb5, 0x2c, 0x4a, 0x38, 0xbb, 0x01, 0xfb, 0x17, + 0xb9, 0x76, 0x59, 0xe8, 0xa0, 0x48, 0xd4, 0xf1, 0xce, 0x22, 0x17, 0xd4, 0x8a, 0xa1, 0x1e, 0x7b, + 0xe8, 0x17, 0x8c, 0x5f, 0xb5, 0xd8, 0x0b, 0xc6, 0x06, 0xd9, 0x6b, 0x7b, 0x4e, 0x7c, 0x16, 0x7e, + 0x04, 0xef, 0xc0, 0x58, 0x2c, 0x5a, 0xeb, 0x95, 0xbe, 0x4a, 0xeb, 0x14, 0x7b, 0xc5, 0x51, 0xe7, + 0xab, 0x2c, 0xc5, 0x8a, 0x8c, 0xfd, 0xcf, 0xf9, 0x0c, 0x48, 0xc8, 0x19, 0xa8, 0x2a, 0xaa, 0xa6, + 0xaa, 0xa2, 0xd2, 0xe7, 0x0b, 0x72, 0x54, 0x16, 0xff, 0xcc, 0xec, 0x37, 0xbb, 0xca, 0x7c, 0xd2, + 0x9f, 0xce, 0xec, 0x1f, 0xb6, 0xe0, 0x7c, 0x96, 0x35, 0x08, 0x95, 0x89, 0xf8, 0x45, 0x4a, 0x3d, + 0x25, 0xaa, 0x11, 0xbc, 0x2b, 0xca, 0xb1, 0xc2, 0x18, 0x38, 0x59, 0xc7, 0xc9, 0x22, 0xca, 0xdd, + 0x06, 0x33, 0x71, 0x30, 0x7a, 0x8b, 0x3b, 0x06, 0x59, 0x2a, 0xb3, 0xef, 0xc9, 0x9c, 0x82, 0xec, + 0x9f, 0x2a, 0xc0, 0x79, 0xfe, 0x16, 0xb0, 0xb8, 0x1f, 0xb8, 0xad, 0x7a, 0xd0, 0x12, 0x6e, 0x52, + 0xef, 0xc2, 0x44, 0x5b, 0xbb, 0xfd, 0xf6, 0x8a, 0x69, 0xa5, 0xdf, 0x92, 0x93, 0x5b, 0x88, 0x5e, + 0x8a, 0x0d, 0x5a, 0xa8, 0x05, 0x13, 0x64, 0xdf, 0x6d, 0x2a, 0x85, 0x72, 0xe1, 0xc4, 0x2c, 0x5d, + 0xb5, 0xb2, 0xa2, 0xd1, 0xc1, 0x06, 0xd5, 0x47, 0x90, 0xe8, 0xcd, 0xfe, 0x11, 0x0b, 0x1e, 0xcf, + 0x89, 0x80, 0x45, 0x9b, 0xbb, 0xcf, 0x5e, 0x5d, 0x44, 0xce, 0x28, 0xd5, 0x1c, 0x7f, 0x8b, 0xc1, + 0x02, 0x8a, 0xbe, 0x04, 0xc0, 0xdf, 0x52, 0xa8, 0x50, 0x2e, 0x3e, 0x7d, 0xb0, 0xc8, 0x30, 0x5a, + 0xf8, 0x10, 0x59, 0x1f, 0x6b, 0xb4, 0xec, 0x9f, 0x28, 0xc2, 0x30, 0xd3, 0xdd, 0xa3, 0x55, 0x18, + 0xdd, 0xe1, 0xf1, 0xb6, 0x07, 0x09, 0xed, 0x9d, 0xdc, 0x6e, 0x78, 0x01, 0x96, 0x95, 0xd1, 0x1a, + 0x9c, 0x13, 0xae, 0x78, 0x55, 0xe2, 0x39, 0x07, 0xf2, 0x92, 0xcc, 0xf3, 0x2c, 0xa9, 0xcc, 0x62, + 0xb5, 0x6e, 0x14, 0x9c, 0x55, 0x0f, 0xbd, 0xd5, 0x15, 0x65, 0x93, 0x47, 0x2a, 0x57, 0x22, 0x75, + 0x9f, 0x48, 0x9b, 0x6f, 0xc2, 0x64, 0xbb, 0x4b, 0x1d, 0x30, 0x9c, 0x88, 0xfb, 0xa6, 0x0a, 0xc0, + 0xc4, 0x65, 0x66, 0x20, 0x1d, 0x66, 0xf4, 0xb2, 0xb1, 0x13, 0x92, 0x68, 0x27, 0xf0, 0x5a, 0x22, + 0xf5, 0x79, 0x62, 0x06, 0x92, 0x82, 0xe3, 0xae, 0x1a, 0x94, 0xca, 0x96, 0xe3, 0x7a, 0x9d, 0x90, + 0x24, 0x54, 0x46, 0x4c, 0x2a, 0xab, 0x29, 0x38, 0xee, 0xaa, 0x41, 0xd7, 0xd1, 0x05, 0x91, 0x37, + 0x5b, 0x06, 0x68, 0x50, 0xb6, 0x3d, 0xa3, 0xd2, 0x51, 0xa3, 0x47, 0xd0, 0x20, 0x61, 0x5b, 0xa1, + 0x32, 0x6f, 0x6b, 0x59, 0x59, 0x85, 0x8b, 0x86, 0xa4, 0xf2, 0x30, 0xd9, 0x9b, 0x7f, 0xd7, 0x82, + 0x73, 0x19, 0x36, 0x84, 0x9c, 0x55, 0x6d, 0xbb, 0x51, 0xac, 0xd2, 0xfb, 0x68, 0xac, 0x8a, 0x97, + 0x63, 0x85, 0x41, 0xf7, 0x03, 0x67, 0x86, 0x69, 0x06, 0x28, 0x6c, 0x74, 0x04, 0xf4, 0x64, 0x0c, + 0x10, 0x5d, 0x86, 0xa1, 0x4e, 0x44, 0x42, 0x99, 0xf6, 0x58, 0xf2, 0x6f, 0xa6, 0x60, 0x64, 0x10, + 0x2a, 0x51, 0x6e, 0x2b, 0xdd, 0x9e, 0x26, 0x51, 0x72, 0xed, 0x1e, 0x87, 0xd9, 0x3f, 0x50, 0x84, + 0x8b, 0xb9, 0x96, 0xc1, 0xb4, 0x4b, 0x7b, 0x81, 0xef, 0xc6, 0x81, 0x7a, 0x17, 0xe2, 0xd1, 0x6d, + 0x48, 0x7b, 0x67, 0x4d, 0x94, 0x63, 0x85, 0x81, 0x9e, 0x95, 0x59, 0xf1, 0xd3, 0x09, 0x8c, 0x96, + 0xaa, 0x46, 0x62, 0xfc, 0x41, 0x33, 0x91, 0x3d, 0x03, 0x43, 0xed, 0x20, 0xf0, 0xd2, 0xcc, 0x88, + 0x76, 0x37, 0x08, 0x3c, 0xcc, 0x80, 0xe8, 0x33, 0x62, 0x1c, 0x52, 0x0f, 0x21, 0xd8, 0x69, 0x05, + 0x91, 0x36, 0x18, 0xcf, 0xc3, 0xe8, 0x2e, 0x39, 0x08, 0x5d, 0x7f, 0x3b, 0xfd, 0x40, 0x76, 0x93, + 0x17, 0x63, 0x09, 0x37, 0xf3, 0x77, 0x8c, 0x9e, 0x46, 0xfe, 0x0e, 0x7d, 0x66, 0xc7, 0xfa, 0x1e, + 0x6d, 0xdf, 0x57, 0x84, 0x69, 0xbc, 0x54, 0xfd, 0xe6, 0x44, 0xdc, 0xe9, 0x9e, 0x88, 0xd3, 0xce, + 0xea, 0xd6, 0x7f, 0x36, 0x7e, 0xce, 0x82, 0x69, 0x16, 0xe3, 0x5a, 0x44, 0x67, 0x71, 0x03, 0xff, + 0x0c, 0x44, 0xb7, 0x67, 0x60, 0x38, 0xa4, 0x8d, 0xa6, 0x53, 0x35, 0xb1, 0x9e, 0x60, 0x0e, 0x43, + 0x4f, 0xc2, 0x10, 0xeb, 0x02, 0x9d, 0xbc, 0x09, 0x9e, 0xe5, 0xa2, 0xea, 0xc4, 0x0e, 0x66, 0xa5, + 0xcc, 0x4d, 0x16, 0x93, 0xb6, 0xe7, 0xf2, 0x4e, 0x27, 0x0a, 0xf5, 0x4f, 0x86, 0x9b, 0x6c, 0x66, + 0xd7, 0x3e, 0x9a, 0x9b, 0x6c, 0x36, 0xc9, 0xde, 0xd7, 0xa2, 0xff, 0x51, 0x80, 0x4b, 0x99, 0xf5, + 0x06, 0x76, 0x93, 0xed, 0x5d, 0xfb, 0x74, 0xec, 0x1c, 0xb2, 0xcd, 0x0f, 0x8a, 0x67, 0x68, 0x7e, + 0x30, 0x34, 0xa8, 0xe4, 0x38, 0x3c, 0x80, 0xf7, 0x6a, 0xe6, 0x90, 0x7d, 0x42, 0xbc, 0x57, 0x33, + 0xfb, 0x96, 0x73, 0xad, 0xfb, 0xd3, 0x42, 0xce, 0xb7, 0xb0, 0x0b, 0xde, 0x15, 0xca, 0x67, 0x18, + 0x30, 0x12, 0x92, 0xf0, 0x04, 0xe7, 0x31, 0xbc, 0x0c, 0x2b, 0x28, 0x72, 0x35, 0x3f, 0x50, 0xde, + 0xb5, 0x37, 0x4f, 0xb4, 0x65, 0xe6, 0xcd, 0xf7, 0x0f, 0x3d, 0x94, 0x4c, 0xda, 0x27, 0x74, 0x4d, + 0xbb, 0x94, 0x17, 0x07, 0xbf, 0x94, 0x4f, 0x64, 0x5f, 0xc8, 0xd1, 0x22, 0x4c, 0xef, 0xb9, 0x3e, + 0x65, 0x9b, 0x07, 0xa6, 0x28, 0xaa, 0xc2, 0x22, 0xac, 0x99, 0x60, 0x9c, 0xc6, 0x9f, 0x7b, 0x13, + 0x26, 0x1f, 0x5e, 0x8b, 0xf8, 0x8d, 0x22, 0x3c, 0xd1, 0x63, 0xdb, 0x73, 0x5e, 0x6f, 0xcc, 0x81, + 0xc6, 0xeb, 0xbb, 0xe6, 0xa1, 0x0e, 0xe7, 0xb7, 0x3a, 0x9e, 0x77, 0xc0, 0x2c, 0xfc, 0x48, 0x4b, + 0x62, 0x08, 0x59, 0x51, 0x05, 0xb0, 0x5f, 0xcd, 0xc0, 0xc1, 0x99, 0x35, 0xd1, 0xdb, 0x80, 0x02, + 0x91, 0xb2, 0x36, 0x09, 0x90, 0xc3, 0x06, 0xbe, 0x98, 0x6c, 0xc6, 0xdb, 0x5d, 0x18, 0x38, 0xa3, + 0x16, 0x15, 0xfa, 0xe9, 0xa9, 0x74, 0xa0, 0xba, 0x95, 0x12, 0xfa, 0xb1, 0x0e, 0xc4, 0x26, 0x2e, + 0xba, 0x0e, 0xb3, 0xce, 0xbe, 0xe3, 0xf2, 0x80, 0x89, 0x92, 0x00, 0x97, 0xfa, 0x95, 0xee, 0x6e, + 0x31, 0x8d, 0x80, 0xbb, 0xeb, 0xa4, 0x1c, 0x51, 0x47, 0xf2, 0x1d, 0x51, 0x7b, 0xf3, 0xc5, 0x7e, + 0xaa, 0x58, 0xfb, 0x3f, 0x5b, 0xf4, 0xf8, 0xca, 0x48, 0x3b, 0x4f, 0xc7, 0x41, 0xa9, 0x14, 0x35, + 0x9f, 0x50, 0x35, 0x0e, 0xcb, 0x3a, 0x10, 0x9b, 0xb8, 0x7c, 0x41, 0x44, 0x89, 0xa3, 0x82, 0x21, + 0xba, 0x0b, 0xa7, 0x6f, 0x85, 0x81, 0xbe, 0x0c, 0xa3, 0x2d, 0x77, 0xdf, 0x8d, 0x82, 0x50, 0x6c, + 0x96, 0x13, 0x1a, 0x93, 0x27, 0x7c, 0xb0, 0xca, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0xbe, 0x02, 0x4c, + 0xca, 0x16, 0xdf, 0xe9, 0x04, 0xb1, 0x73, 0x06, 0xc7, 0xf2, 0x75, 0xe3, 0x58, 0xfe, 0x4c, 0x2f, + 0xcf, 0x77, 0xd6, 0xa5, 0xdc, 0xe3, 0xf8, 0x76, 0xea, 0x38, 0x7e, 0xae, 0x3f, 0xa9, 0xde, 0xc7, + 0xf0, 0xbf, 0xb0, 0x60, 0xd6, 0xc0, 0x3f, 0x83, 0xd3, 0x60, 0xd5, 0x3c, 0x0d, 0x9e, 0xee, 0xfb, + 0x0d, 0x39, 0xa7, 0xc0, 0xd7, 0x0b, 0xa9, 0xbe, 0x33, 0xee, 0xff, 0x01, 0x0c, 0xed, 0x38, 0x61, + 0xab, 0x57, 0xd8, 0xdf, 0xae, 0x4a, 0xf3, 0x37, 0x9c, 0xb0, 0xc5, 0x79, 0xf8, 0x4b, 0x2a, 0xf7, + 0xa8, 0x13, 0xb6, 0xfa, 0xfa, 0xe5, 0xb0, 0xa6, 0xd0, 0x35, 0x18, 0x89, 0x9a, 0x41, 0x5b, 0xd9, + 0xe4, 0x5d, 0xe6, 0x79, 0x49, 0x69, 0xc9, 0xf1, 0x61, 0x05, 0x99, 0xcd, 0xd1, 0x62, 0x2c, 0xf0, + 0xe7, 0xb6, 0xa1, 0xa4, 0x9a, 0x7e, 0xa4, 0x1e, 0x15, 0xbf, 0x55, 0x84, 0x73, 0x19, 0xeb, 0x02, + 0x45, 0xc6, 0x68, 0xbd, 0x32, 0xe0, 0x72, 0xfa, 0x88, 0xe3, 0x15, 0xb1, 0x1b, 0x4b, 0x4b, 0xcc, + 0xff, 0xc0, 0x8d, 0xde, 0x89, 0x48, 0xba, 0x51, 0x5a, 0xd4, 0xbf, 0x51, 0xda, 0xd8, 0x99, 0x0d, + 0x35, 0x6d, 0x48, 0xf5, 0xf4, 0x91, 0xce, 0xe9, 0x1f, 0x15, 0xe1, 0x7c, 0x56, 0xc0, 0x0c, 0xf4, + 0x9d, 0xa9, 0x24, 0x42, 0xaf, 0x0d, 0x1a, 0x6a, 0x83, 0x67, 0x16, 0x12, 0x11, 0xc6, 0xe6, 0xcd, + 0xb4, 0x42, 0x7d, 0x87, 0x59, 0xb4, 0xc9, 0x1c, 0xe5, 0x42, 0x9e, 0xfc, 0x49, 0x6e, 0xf1, 0xcf, + 0x0d, 0xdc, 0x01, 0x91, 0x35, 0x2a, 0x4a, 0x39, 0xca, 0xc9, 0xe2, 0xfe, 0x8e, 0x72, 0xb2, 0xe5, + 0x39, 0x17, 0xc6, 0xb5, 0xaf, 0x79, 0xa4, 0x33, 0xbe, 0x4b, 0x4f, 0x14, 0xad, 0xdf, 0x8f, 0x74, + 0xd6, 0x7f, 0xc4, 0x82, 0x94, 0x25, 0x9c, 0x52, 0x49, 0x59, 0xb9, 0x2a, 0xa9, 0xcb, 0x30, 0x14, + 0x06, 0x1e, 0x49, 0xe7, 0x95, 0xc1, 0x81, 0x47, 0x30, 0x83, 0x50, 0x8c, 0x38, 0x51, 0x48, 0x4c, + 0xe8, 0x97, 0x2d, 0x71, 0x8d, 0x7a, 0x06, 0x86, 0x3d, 0xb2, 0x4f, 0xa4, 0x36, 0x42, 0xf1, 0xe4, + 0x5b, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x73, 0x43, 0xf0, 0x54, 0x4f, 0x57, 0x53, 0x7a, 0x65, 0xd9, + 0x76, 0x62, 0x72, 0xdf, 0x39, 0x48, 0x47, 0xbd, 0xbe, 0xce, 0x8b, 0xb1, 0x84, 0x33, 0xbb, 0x5d, + 0x1e, 0x38, 0x33, 0xa5, 0xc0, 0x13, 0xf1, 0x32, 0x05, 0xd4, 0x54, 0x1c, 0x15, 0x4f, 0x43, 0x71, + 0x74, 0x15, 0x20, 0x8a, 0xbc, 0x15, 0x9f, 0x4a, 0x60, 0x2d, 0x61, 0x10, 0x9c, 0x04, 0x58, 0x6d, + 0xdc, 0x12, 0x10, 0xac, 0x61, 0xa1, 0x2a, 0xcc, 0xb4, 0xc3, 0x20, 0xe6, 0xfa, 0xd0, 0x2a, 0x37, + 0x45, 0x19, 0x36, 0xbd, 0xfc, 0xea, 0x29, 0x38, 0xee, 0xaa, 0x81, 0x5e, 0x87, 0x71, 0xe1, 0xf9, + 0x57, 0x0f, 0x02, 0x4f, 0xa8, 0x6a, 0x94, 0x61, 0x43, 0x23, 0x01, 0x61, 0x1d, 0x4f, 0xab, 0xc6, + 0x94, 0xac, 0xa3, 0x99, 0xd5, 0xb8, 0xa2, 0x55, 0xc3, 0x4b, 0x05, 0xcf, 0x19, 0x1b, 0x28, 0x78, + 0x4e, 0xa2, 0xbc, 0x2a, 0x0d, 0xfc, 0xae, 0x04, 0x7d, 0xd5, 0x3d, 0x3f, 0x3d, 0x04, 0xe7, 0xc4, + 0xc2, 0x79, 0xd4, 0xcb, 0xe5, 0x4e, 0xf7, 0x72, 0x39, 0x0d, 0xf5, 0xd6, 0x37, 0xd7, 0xcc, 0x59, + 0xaf, 0x99, 0x5f, 0x2c, 0xc2, 0x08, 0x9f, 0x8a, 0x33, 0x90, 0xe1, 0x57, 0x85, 0xd2, 0xaf, 0x47, + 0xd8, 0x18, 0xde, 0x97, 0xf9, 0xaa, 0x13, 0x3b, 0xfc, 0xfc, 0x52, 0x6c, 0x34, 0x51, 0x0f, 0xa2, + 0x79, 0x83, 0xd1, 0xce, 0xa5, 0xb4, 0x5a, 0xc0, 0x69, 0x68, 0x6c, 0xf7, 0x2b, 0x00, 0x11, 0x4b, + 0x9c, 0x4f, 0x69, 0x88, 0x00, 0x44, 0x2f, 0xf4, 0x68, 0xbd, 0xa1, 0x90, 0x79, 0x1f, 0x92, 0x25, + 0xa8, 0x00, 0x58, 0xa3, 0x38, 0xf7, 0x06, 0x94, 0x14, 0x72, 0x3f, 0x15, 0xc0, 0x84, 0x7e, 0xea, + 0x7d, 0x11, 0xa6, 0x53, 0x6d, 0x9d, 0x48, 0x83, 0xf0, 0xf3, 0x16, 0x4c, 0xf3, 0x2e, 0xaf, 0xf8, + 0xfb, 0x62, 0xb3, 0x7f, 0x08, 0xe7, 0xbd, 0x8c, 0x4d, 0x27, 0x66, 0x74, 0xf0, 0x4d, 0xaa, 0x34, + 0x06, 0x59, 0x50, 0x9c, 0xd9, 0x06, 0xba, 0x02, 0x63, 0xdc, 0xd1, 0xc5, 0xf1, 0x84, 0x73, 0xc2, + 0x04, 0x4f, 0x44, 0xc1, 0xcb, 0xb0, 0x82, 0xda, 0xbf, 0x6d, 0xc1, 0x2c, 0xef, 0xf9, 0x4d, 0x72, + 0xa0, 0x6e, 0xc7, 0x1f, 0x67, 0xdf, 0x45, 0x9e, 0x8d, 0x42, 0x4e, 0x9e, 0x0d, 0xfd, 0xd3, 0x8a, + 0x3d, 0x3f, 0xed, 0xa7, 0x2c, 0x10, 0x2b, 0xf0, 0x0c, 0xee, 0x81, 0xdf, 0x6a, 0xde, 0x03, 0xe7, + 0xf2, 0x17, 0x75, 0xce, 0x05, 0xf0, 0x4f, 0x2c, 0x98, 0xe1, 0x08, 0xc9, 0x43, 0xe4, 0xc7, 0x3a, + 0x0f, 0x83, 0x24, 0x7f, 0x53, 0xd9, 0xb6, 0xb3, 0x3f, 0xca, 0x98, 0xac, 0xa1, 0x9e, 0x93, 0xd5, + 0x92, 0x1b, 0xe8, 0x04, 0x49, 0x0d, 0x4f, 0x1c, 0x1a, 0xd6, 0xfe, 0x43, 0x0b, 0x10, 0x6f, 0xc6, + 0x38, 0x97, 0xe9, 0x69, 0xc7, 0x4a, 0x35, 0x4d, 0x50, 0xc2, 0x6a, 0x14, 0x04, 0x6b, 0x58, 0xa7, + 0x32, 0x3c, 0xa9, 0xd7, 0xe4, 0x62, 0xff, 0xd7, 0xe4, 0x13, 0x8c, 0xe8, 0x5f, 0x1f, 0x82, 0xb4, + 0x25, 0x34, 0xba, 0x0b, 0x13, 0x4d, 0xa7, 0xed, 0x6c, 0xba, 0x9e, 0x1b, 0xbb, 0x24, 0xea, 0x65, + 0x86, 0xb2, 0xac, 0xe1, 0x89, 0x77, 0x42, 0xad, 0x04, 0x1b, 0x74, 0xd0, 0x3c, 0x40, 0x3b, 0x74, + 0xf7, 0x5d, 0x8f, 0x6c, 0xb3, 0xab, 0x30, 0x73, 0x87, 0xe2, 0xb6, 0x15, 0xb2, 0x14, 0x6b, 0x18, + 0x19, 0xee, 0x33, 0xc5, 0x47, 0xe7, 0x3e, 0x33, 0x74, 0x42, 0xf7, 0x99, 0xe1, 0x81, 0xdc, 0x67, + 0x30, 0x3c, 0x26, 0xcf, 0x6e, 0xfa, 0x7f, 0xd5, 0xf5, 0x88, 0x10, 0xd8, 0xb8, 0x93, 0xd4, 0xdc, + 0xd1, 0x61, 0xe5, 0x31, 0x9c, 0x89, 0x81, 0x73, 0x6a, 0xa2, 0x2f, 0x41, 0xd9, 0xf1, 0xbc, 0xe0, + 0xbe, 0x1a, 0xb5, 0x95, 0xa8, 0xe9, 0x78, 0x49, 0xa4, 0xf4, 0xb1, 0xa5, 0x27, 0x8f, 0x0e, 0x2b, + 0xe5, 0xc5, 0x1c, 0x1c, 0x9c, 0x5b, 0xdb, 0xde, 0x85, 0x73, 0x0d, 0x12, 0xca, 0x3c, 0xa9, 0x6a, + 0x8b, 0x6d, 0x40, 0x29, 0x4c, 0x31, 0x95, 0x81, 0x62, 0x95, 0x68, 0x51, 0x2c, 0x25, 0x13, 0x49, + 0x08, 0xd9, 0x7f, 0x6c, 0xc1, 0xa8, 0xb0, 0xae, 0x3e, 0x03, 0x59, 0x66, 0xd1, 0xd0, 0x47, 0x56, + 0xb2, 0x19, 0x2f, 0xeb, 0x4c, 0xae, 0x26, 0xb2, 0x96, 0xd2, 0x44, 0x3e, 0xdd, 0x8b, 0x48, 0x6f, + 0x1d, 0xe4, 0x0f, 0x15, 0x61, 0xca, 0xb4, 0x2c, 0x3f, 0x83, 0x21, 0x58, 0x87, 0xd1, 0x48, 0xb8, + 0x31, 0x14, 0xf2, 0xed, 0x57, 0xd3, 0x93, 0x98, 0x58, 0xb9, 0x08, 0xc7, 0x05, 0x49, 0x24, 0xd3, + 0x3f, 0xa2, 0xf8, 0x08, 0xfd, 0x23, 0xfa, 0x19, 0xf7, 0x0f, 0x9d, 0x86, 0x71, 0xbf, 0xfd, 0x4b, + 0x8c, 0xf9, 0xeb, 0xe5, 0x67, 0x20, 0x17, 0x5c, 0x37, 0x8f, 0x09, 0xbb, 0xc7, 0xca, 0x12, 0x9d, + 0xca, 0x91, 0x0f, 0xfe, 0xb1, 0x05, 0xe3, 0x02, 0xf1, 0x0c, 0xba, 0xfd, 0x6d, 0x66, 0xb7, 0x9f, + 0xe8, 0xd1, 0xed, 0x9c, 0xfe, 0xfe, 0xdd, 0x82, 0xea, 0x6f, 0x3d, 0x08, 0xe3, 0x81, 0x32, 0x67, + 0x8c, 0xd1, 0xdb, 0x60, 0xd0, 0x0c, 0x3c, 0x71, 0x98, 0x3f, 0x99, 0xf8, 0xc9, 0xf2, 0xf2, 0x63, + 0xed, 0x37, 0x56, 0xd8, 0xcc, 0x8d, 0x33, 0x08, 0x63, 0x71, 0x80, 0x26, 0x6e, 0x9c, 0x41, 0x18, + 0x63, 0x06, 0x41, 0x2d, 0x80, 0xd8, 0x09, 0xb7, 0x49, 0x4c, 0xcb, 0x84, 0xcb, 0x7d, 0xfe, 0x2e, + 0xec, 0xc4, 0xae, 0x37, 0xef, 0xfa, 0x71, 0x14, 0x87, 0xf3, 0x35, 0x3f, 0xbe, 0x1d, 0xf2, 0xbb, + 0x81, 0xe6, 0xf8, 0xaa, 0x68, 0x61, 0x8d, 0xae, 0xf4, 0xbc, 0x62, 0x6d, 0x0c, 0x9b, 0x0f, 0x85, + 0xeb, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0x0d, 0xc6, 0x93, 0xd9, 0x00, 0x9d, 0xcc, 0x27, 0xf5, 0x37, + 0xc6, 0xd4, 0xd0, 0xb2, 0x57, 0x82, 0xaa, 0xee, 0xf9, 0xda, 0x9b, 0x05, 0xd2, 0x86, 0x75, 0xb7, + 0x80, 0xc4, 0x3d, 0x16, 0x7d, 0x7b, 0xd7, 0xfb, 0xf1, 0xcb, 0x7d, 0x78, 0xe9, 0x09, 0x5e, 0x8c, + 0x59, 0xf8, 0x55, 0x16, 0xa6, 0xb2, 0x56, 0x4f, 0xe7, 0x36, 0x59, 0x96, 0x00, 0x9c, 0xe0, 0xa0, + 0x05, 0x71, 0xb3, 0xe4, 0xfa, 0xb9, 0x27, 0x52, 0x37, 0x4b, 0xf9, 0xf9, 0xda, 0xd5, 0xf2, 0x15, + 0x18, 0x57, 0xf9, 0xe2, 0xea, 0x3c, 0xed, 0x96, 0x08, 0x40, 0xb0, 0x92, 0x14, 0x63, 0x1d, 0x07, + 0x6d, 0xc0, 0x74, 0xc4, 0x93, 0xd9, 0x49, 0x67, 0x28, 0xa1, 0x37, 0x78, 0x41, 0xbe, 0x3b, 0x37, + 0x4c, 0xf0, 0x31, 0x2b, 0xe2, 0x9b, 0x55, 0xba, 0x4f, 0xa5, 0x49, 0xa0, 0xb7, 0x60, 0xca, 0xd3, + 0x93, 0x7a, 0xd7, 0x85, 0x5a, 0x41, 0x99, 0x65, 0x1a, 0x29, 0xbf, 0xeb, 0x38, 0x85, 0x4d, 0x85, + 0x00, 0xbd, 0x44, 0x44, 0x2f, 0x73, 0xfc, 0x6d, 0x12, 0x89, 0x6c, 0x57, 0x4c, 0x08, 0xb8, 0x95, + 0x83, 0x83, 0x73, 0x6b, 0xa3, 0x6b, 0x30, 0x21, 0x3f, 0x5f, 0x73, 0x0e, 0x4c, 0x8c, 0x7f, 0x35, + 0x18, 0x36, 0x30, 0xd1, 0x7d, 0xb8, 0x20, 0xff, 0x6f, 0x84, 0xce, 0xd6, 0x96, 0xdb, 0x14, 0xbe, + 0x99, 0xe3, 0x8c, 0xc4, 0xa2, 0xf4, 0x84, 0x58, 0xc9, 0x42, 0x3a, 0x3e, 0xac, 0x5c, 0x16, 0xa3, + 0x96, 0x09, 0x67, 0x93, 0x98, 0x4d, 0x1f, 0xad, 0xc1, 0xb9, 0x1d, 0xe2, 0x78, 0xf1, 0xce, 0xf2, + 0x0e, 0x69, 0xee, 0xca, 0x4d, 0xc4, 0x5c, 0x0e, 0x35, 0x93, 0xd9, 0x1b, 0xdd, 0x28, 0x38, 0xab, + 0x1e, 0x7a, 0x0f, 0xca, 0xed, 0xce, 0xa6, 0xe7, 0x46, 0x3b, 0xeb, 0x41, 0xcc, 0x9e, 0xba, 0x55, + 0xba, 0x35, 0xe1, 0x9b, 0xa8, 0xdc, 0x2d, 0xeb, 0x39, 0x78, 0x38, 0x97, 0x02, 0xfa, 0x10, 0x2e, + 0xa4, 0x16, 0x83, 0xf0, 0x94, 0x9a, 0xca, 0x8f, 0x05, 0xd9, 0xc8, 0xaa, 0xc0, 0x3d, 0x66, 0x33, + 0x41, 0x38, 0xbb, 0x89, 0x8f, 0x66, 0x00, 0xf1, 0x01, 0xad, 0xac, 0x49, 0x37, 0xe8, 0xab, 0x30, + 0xa1, 0xaf, 0x22, 0x71, 0xc0, 0x3c, 0xdb, 0x2f, 0x81, 0xbd, 0x90, 0x8d, 0xd4, 0x8a, 0xd2, 0x61, + 0xd8, 0xa0, 0x68, 0x13, 0xc8, 0xfe, 0x3e, 0x74, 0x0b, 0xc6, 0x9a, 0x9e, 0x4b, 0xfc, 0xb8, 0x56, + 0xef, 0xe5, 0x53, 0xbf, 0x2c, 0x70, 0xc4, 0x80, 0x89, 0xe0, 0x79, 0xbc, 0x0c, 0x2b, 0x0a, 0xf6, + 0xaf, 0x14, 0xa0, 0xd2, 0x27, 0x12, 0x63, 0x4a, 0x07, 0x68, 0x0d, 0xa4, 0x03, 0x5c, 0x94, 0xc9, + 0xe3, 0xd6, 0x53, 0xf7, 0xcf, 0x54, 0x62, 0xb8, 0xe4, 0x16, 0x9a, 0xc6, 0x1f, 0xd8, 0x6e, 0x52, + 0x57, 0x23, 0x0e, 0xf5, 0xb5, 0xe8, 0x35, 0x9e, 0x0f, 0x86, 0x07, 0x97, 0xe8, 0x73, 0x55, 0xc1, + 0xf6, 0x2f, 0x15, 0xe0, 0x82, 0x1a, 0xc2, 0x3f, 0xbf, 0x03, 0x77, 0xa7, 0x7b, 0xe0, 0x4e, 0x41, + 0x91, 0x6e, 0xdf, 0x86, 0x91, 0xc6, 0x41, 0xd4, 0x8c, 0xbd, 0x01, 0x04, 0xa0, 0x67, 0xcc, 0xd8, + 0x32, 0xea, 0x98, 0x36, 0xe2, 0xcb, 0xfc, 0x15, 0x0b, 0xa6, 0x37, 0x96, 0xeb, 0x8d, 0xa0, 0xb9, + 0x4b, 0xe2, 0x45, 0xae, 0x26, 0xc2, 0x42, 0xfe, 0xb1, 0x1e, 0x52, 0xae, 0xc9, 0x92, 0x98, 0x2e, + 0xc3, 0xd0, 0x4e, 0x10, 0xc5, 0xe9, 0x57, 0xb6, 0x1b, 0x41, 0x14, 0x63, 0x06, 0xb1, 0x7f, 0xc7, + 0x82, 0x61, 0x96, 0xf2, 0xb4, 0x5f, 0x6a, 0xdc, 0x41, 0xbe, 0x0b, 0xbd, 0x0e, 0x23, 0x64, 0x6b, + 0x8b, 0x34, 0x63, 0x31, 0xab, 0xd2, 0xbb, 0x6e, 0x64, 0x85, 0x95, 0xd2, 0x43, 0x9f, 0x35, 0xc6, + 0xff, 0x62, 0x81, 0x8c, 0xee, 0x41, 0x29, 0x76, 0xf7, 0xc8, 0x62, 0xab, 0x25, 0xde, 0x29, 0x1e, + 0xc2, 0x99, 0x71, 0x43, 0x12, 0xc0, 0x09, 0x2d, 0xfb, 0x07, 0x0a, 0x00, 0x89, 0x43, 0x6f, 0xbf, + 0x4f, 0x5c, 0xea, 0xca, 0xfe, 0xfb, 0x6c, 0x46, 0xf6, 0x5f, 0x94, 0x10, 0xcc, 0xc8, 0xfd, 0xab, + 0x86, 0xa9, 0x38, 0xd0, 0x30, 0x0d, 0x9d, 0x64, 0x98, 0x96, 0x61, 0x36, 0x71, 0x48, 0x36, 0xa3, + 0x33, 0xb0, 0x78, 0xec, 0x1b, 0x69, 0x20, 0xee, 0xc6, 0xb7, 0xbf, 0xd7, 0x02, 0xe1, 0x6e, 0x30, + 0xc0, 0x62, 0x7e, 0x57, 0x26, 0xea, 0x34, 0x02, 0xba, 0x5e, 0xce, 0xf7, 0xbf, 0x10, 0x61, 0x5c, + 0xd5, 0xe1, 0x61, 0x04, 0x6f, 0x35, 0x68, 0xd9, 0x2d, 0x10, 0xd0, 0x2a, 0x61, 0x4a, 0x86, 0xfe, + 0xbd, 0xb9, 0x0a, 0xd0, 0x62, 0xb8, 0x5a, 0xe2, 0x3f, 0xc5, 0xaa, 0xaa, 0x0a, 0x82, 0x35, 0x2c, + 0xfb, 0x6f, 0x16, 0x60, 0x5c, 0x06, 0x10, 0xa5, 0xf7, 0xf8, 0xfe, 0xad, 0x9c, 0x28, 0x67, 0x00, + 0xcb, 0x94, 0x49, 0x09, 0xab, 0xd0, 0xf2, 0x7a, 0xa6, 0x4c, 0x09, 0xc0, 0x09, 0x0e, 0x7a, 0x1e, + 0x46, 0xa3, 0xce, 0x26, 0x43, 0x4f, 0x19, 0xd1, 0x37, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x25, 0x98, + 0xe1, 0xf5, 0xc2, 0xa0, 0xed, 0x6c, 0x73, 0x0d, 0xd2, 0xb0, 0xf2, 0x6a, 0x9b, 0x59, 0x4b, 0xc1, + 0x8e, 0x0f, 0x2b, 0xe7, 0xd3, 0x65, 0x4c, 0xf7, 0xd8, 0x45, 0x85, 0xee, 0x8b, 0x99, 0xb4, 0xc3, + 0x0c, 0xba, 0x01, 0x23, 0x9c, 0xe5, 0x09, 0x16, 0xd4, 0xe3, 0x45, 0x49, 0x73, 0xb3, 0x61, 0x41, + 0xd4, 0x05, 0xd7, 0x14, 0xf5, 0xd1, 0x7b, 0x30, 0xde, 0x0a, 0xee, 0xfb, 0xf7, 0x9d, 0xb0, 0xb5, + 0x58, 0xaf, 0x89, 0x55, 0x93, 0x29, 0x39, 0x55, 0x13, 0x34, 0xdd, 0x75, 0x87, 0x69, 0x4f, 0x13, + 0x10, 0xd6, 0xc9, 0xa1, 0x0d, 0x16, 0xe3, 0x89, 0xa7, 0xb2, 0xef, 0x65, 0x75, 0xa6, 0xb2, 0xdf, + 0x6b, 0x94, 0x27, 0x45, 0x20, 0x28, 0x91, 0x08, 0x3f, 0x21, 0x64, 0x7f, 0xed, 0x1c, 0x18, 0xab, + 0xd5, 0xc8, 0x19, 0x60, 0x9d, 0x52, 0xce, 0x00, 0x0c, 0x63, 0x64, 0xaf, 0x1d, 0x1f, 0x54, 0xdd, + 0xb0, 0x57, 0xd2, 0x99, 0x15, 0x81, 0xd3, 0x4d, 0x53, 0x42, 0xb0, 0xa2, 0x93, 0x9d, 0xd8, 0xa1, + 0xf8, 0x31, 0x26, 0x76, 0x18, 0x3a, 0xc3, 0xc4, 0x0e, 0xeb, 0x30, 0xba, 0xed, 0xc6, 0x98, 0xb4, + 0x03, 0x71, 0xdc, 0x67, 0xae, 0x84, 0xeb, 0x1c, 0xa5, 0x3b, 0xc0, 0xb8, 0x00, 0x60, 0x49, 0x04, + 0xbd, 0xad, 0xf6, 0xc0, 0x48, 0xbe, 0xb4, 0xdc, 0xfd, 0xf8, 0x90, 0xb9, 0x0b, 0x44, 0x22, 0x87, + 0xd1, 0x87, 0x4d, 0xe4, 0xb0, 0x2a, 0xd3, 0x2f, 0x8c, 0xe5, 0x1b, 0x69, 0xb2, 0xec, 0x0a, 0x7d, + 0x92, 0x2e, 0x18, 0x89, 0x2a, 0x4a, 0xa7, 0x97, 0xa8, 0xe2, 0x7b, 0x2d, 0xb8, 0xd0, 0xce, 0xca, + 0xd9, 0x22, 0xd2, 0x27, 0xbc, 0x3e, 0x70, 0x52, 0x1a, 0xa3, 0x41, 0x76, 0x6d, 0xca, 0x44, 0xc3, + 0xd9, 0xcd, 0xd1, 0x81, 0x0e, 0x37, 0x5b, 0x22, 0xe7, 0xc2, 0x33, 0x39, 0x19, 0x2f, 0x7a, 0xe4, + 0xb9, 0x78, 0x34, 0x79, 0x16, 0x92, 0x5c, 0x17, 0x93, 0x1f, 0x39, 0xd7, 0xc5, 0xdb, 0x2a, 0xd7, + 0x45, 0x8f, 0x48, 0x3a, 0x3c, 0x93, 0x45, 0xdf, 0x0c, 0x17, 0x5a, 0x96, 0x8a, 0xe9, 0xd3, 0xc8, + 0x52, 0xf1, 0x15, 0x93, 0xd9, 0xf3, 0x94, 0x09, 0x2f, 0xf6, 0x61, 0xf6, 0x06, 0xdd, 0xde, 0xec, + 0x9e, 0x67, 0xe4, 0x98, 0x7d, 0xa8, 0x8c, 0x1c, 0x77, 0xf5, 0x5c, 0x17, 0xa8, 0x4f, 0x32, 0x07, + 0x8a, 0x34, 0x60, 0x86, 0x8b, 0xbb, 0xfa, 0x11, 0x74, 0x2e, 0x9f, 0xae, 0x3a, 0x69, 0xba, 0xe9, + 0x66, 0x1d, 0x42, 0xdd, 0x99, 0x33, 0xce, 0x9f, 0x4d, 0xe6, 0x8c, 0x0b, 0xa7, 0x9e, 0x39, 0xe3, + 0xb1, 0x33, 0xc8, 0x9c, 0xf1, 0xf8, 0xc7, 0x9a, 0x39, 0xa3, 0xfc, 0x08, 0x32, 0x67, 0xac, 0x27, + 0x99, 0x33, 0x2e, 0xe6, 0x4f, 0x49, 0x86, 0x55, 0x5a, 0x4e, 0xbe, 0x8c, 0xbb, 0x50, 0x6a, 0x4b, + 0x9f, 0x6a, 0x11, 0xea, 0x27, 0x3b, 0x51, 0x5f, 0x96, 0xe3, 0x35, 0x9f, 0x12, 0x05, 0xc2, 0x09, + 0x29, 0x4a, 0x37, 0xc9, 0x9f, 0xf1, 0x44, 0x0f, 0xc5, 0x58, 0x96, 0xca, 0x21, 0x3f, 0x6b, 0x86, + 0xfd, 0x57, 0x0b, 0x70, 0xa9, 0xf7, 0xba, 0x4e, 0xf4, 0x15, 0xf5, 0x44, 0xbf, 0x9e, 0xd2, 0x57, + 0xf0, 0x4b, 0x40, 0x82, 0x35, 0x70, 0xe0, 0x89, 0xeb, 0x30, 0xab, 0xcc, 0xd1, 0x3c, 0xb7, 0x79, + 0xa0, 0x25, 0xf0, 0x53, 0xae, 0x31, 0x8d, 0x34, 0x02, 0xee, 0xae, 0x83, 0x16, 0x61, 0xda, 0x28, + 0xac, 0x55, 0x85, 0xb0, 0xaf, 0x14, 0x24, 0x0d, 0x13, 0x8c, 0xd3, 0xf8, 0xf6, 0xd7, 0x2d, 0x78, + 0x3c, 0x27, 0x64, 0xf5, 0xc0, 0x71, 0x15, 0xb6, 0x60, 0xba, 0x6d, 0x56, 0xed, 0x13, 0x7e, 0xc5, + 0x08, 0x8c, 0xad, 0xfa, 0x9a, 0x02, 0xe0, 0x34, 0xd1, 0xa5, 0x2b, 0xbf, 0xf6, 0x7b, 0x97, 0x3e, + 0xf5, 0x9b, 0xbf, 0x77, 0xe9, 0x53, 0xbf, 0xfd, 0x7b, 0x97, 0x3e, 0xf5, 0x17, 0x8f, 0x2e, 0x59, + 0xbf, 0x76, 0x74, 0xc9, 0xfa, 0xcd, 0xa3, 0x4b, 0xd6, 0x6f, 0x1f, 0x5d, 0xb2, 0x7e, 0xf7, 0xe8, + 0x92, 0xf5, 0x03, 0xbf, 0x7f, 0xe9, 0x53, 0xef, 0x16, 0xf6, 0x5f, 0xf9, 0xff, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x99, 0x05, 0xfa, 0x71, 0xfe, 0xdd, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index 6f04f8009eb7..de2c1e690e8c 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -121,7 +121,7 @@ message AzureDiskVolumeSource { // +optional optional bool readOnly = 5; - // Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared optional string kind = 6; } @@ -170,6 +170,23 @@ message Binding { optional ObjectReference target = 2; } +// Represents storage that is managed by an external CSI volume driver +message CSIPersistentVolumeSource { + // Driver is the name of the driver to use for this volume. + // Required. + optional string driver = 1; + + // VolumeHandle is the unique volume name returned by the CSI volume + // plugin’s CreateVolume to refer to the volume on all subsequent calls. + // Required. + optional string volumeHandle = 2; + + // Optional: The value to pass to ControllerPublishVolumeRequest. + // Defaults to false (read/write). + // +optional + optional bool readOnly = 3; +} + // Adds and removes POSIX capabilities from running containers. message Capabilities { // Added capabilities @@ -516,6 +533,13 @@ message Container { // +patchStrategy=merge repeated VolumeMount volumeMounts = 9; + // volumeDevices is the list of block devices to be used by the container. + // This is an alpha feature and may change in the future. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + repeated VolumeDevice volumeDevices = 21; + // Periodic probe of container liveness. // Container will be restarted if the probe fails. // Cannot be updated. @@ -565,7 +589,7 @@ message Container { // Security options the pod should run with. // More info: https://kubernetes.io/docs/concepts/policy/security-context/ - // More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ // +optional optional SecurityContext securityContext = 15; @@ -765,6 +789,10 @@ message DeleteOptions { // Either this field or OrphanDependents may be set, but not both. // The default policy is decided by the existing finalizer set in the // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. // +optional optional string propagationPolicy = 4; } @@ -1001,7 +1029,6 @@ message EnvVarSource { } // Event is a report of an event somewhere in the cluster. -// TODO: Decide whether to store these separately or with the object they apply to. message Event { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata @@ -1040,6 +1067,30 @@ message Event { // Type of this event (Normal, Warning), new types could be added in the future // +optional optional string type = 9; + + // Time when this Event was first observed. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 10; + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + optional EventSeries series = 11; + + // What action was taken/failed regarding to the Regarding object. + // +optional + optional string action = 12; + + // Optional secondary object for more complex actions. + // +optional + optional ObjectReference related = 13; + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + optional string reportingComponent = 14; + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + optional string reportingInstance = 15; } // EventList is a list of events. @@ -1053,6 +1104,19 @@ message EventList { repeated Event items = 2; } +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continously for some time. +message EventSeries { + // Number of occurrences in this series up to the last heartbeat time + optional int32 count = 1; + + // Time of the last occurence observed + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + + // State of this Series: Ongoing or Finished + optional string state = 3; +} + // EventSource contains information for an event. message EventSource { // Component from which the event is generated. @@ -1106,7 +1170,7 @@ message FCVolumeSource { } // FlexVolume represents a generic volume resource that is -// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. +// provisioned/attached using an exec based plugin. message FlexVolumeSource { // Driver is the name of the driver to use for this volume. optional string driver = 1; @@ -1299,21 +1363,78 @@ message HostPathVolumeSource { optional string type = 2; } +// ISCSIPersistentVolumeSource represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +message ISCSIPersistentVolumeSource { + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + optional string targetPortal = 1; + + // Target iSCSI Qualified Name. + optional string iqn = 2; + + // iSCSI Target Lun number. + optional int32 lun = 3; + + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). + // +optional + optional string iscsiInterface = 4; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 5; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // +optional + optional bool readOnly = 6; + + // iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + repeated string portals = 7; + + // whether support iSCSI Discovery CHAP authentication + // +optional + optional bool chapAuthDiscovery = 8; + + // whether support iSCSI Session CHAP authentication + // +optional + optional bool chapAuthSession = 11; + + // CHAP Secret for iSCSI target and initiator authentication + // +optional + optional SecretReference secretRef = 10; + + // Custom iSCSI Initiator Name. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + optional string initiatorName = 12; +} + // Represents an ISCSI disk. // ISCSI volumes can only be mounted as read/write once. // ISCSI volumes support ownership management and SELinux relabeling. message ISCSIVolumeSource { - // iSCSI target portal. The portal is either an IP or ip_addr:port if the port + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). optional string targetPortal = 1; // Target iSCSI Qualified Name. optional string iqn = 2; - // iSCSI target lun number. + // iSCSI Target Lun number. optional int32 lun = 3; - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). // +optional optional string iscsiInterface = 4; @@ -1330,7 +1451,7 @@ message ISCSIVolumeSource { // +optional optional bool readOnly = 6; - // iSCSI target portal List. The portal is either an IP or ip_addr:port if the port + // iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). // +optional repeated string portals = 7; @@ -1343,11 +1464,11 @@ message ISCSIVolumeSource { // +optional optional bool chapAuthSession = 11; - // CHAP secret for iSCSI target and initiator authentication + // CHAP Secret for iSCSI target and initiator authentication // +optional optional LocalObjectReference secretRef = 10; - // Custom iSCSI initiator name. + // Custom iSCSI Initiator Name. // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface // : will be created for the connection. // +optional @@ -1442,7 +1563,7 @@ message LimitRangeList { optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of LimitRange objects. - // More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_limit_range.md + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ repeated LimitRange items = 2; } @@ -1593,7 +1714,7 @@ message NamespaceList { // NamespaceSpec describes the attributes on a Namespace. message NamespaceSpec { // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. - // More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#finalizers + // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional repeated string finalizers = 1; } @@ -1601,7 +1722,7 @@ message NamespaceSpec { // NamespaceStatus is information about the current status of a Namespace. message NamespaceStatus { // Phase is the current lifecycle phase of the namespace. - // More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#phases + // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional optional string phase = 1; } @@ -2198,6 +2319,12 @@ message PersistentVolumeClaimSpec { // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 // +optional optional string storageClassName = 5; + + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // This is an alpha feature and may change in the future. + // +optional + optional string volumeMode = 6; } // PersistentVolumeClaimStatus is the current status of a persistent volume claim. @@ -2287,12 +2414,12 @@ message PersistentVolumeSource { // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md // +optional - optional RBDVolumeSource rbd = 6; + optional RBDPersistentVolumeSource rbd = 6; // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. // +optional - optional ISCSIVolumeSource iscsi = 7; + optional ISCSIPersistentVolumeSource iscsi = 7; // Cinder represents a cinder volume attached and mounted on kubelets host machine // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md @@ -2312,8 +2439,7 @@ message PersistentVolumeSource { optional FlockerVolumeSource flocker = 11; // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an - // alpha feature and may change in future. + // provisioned/attached using an exec based plugin. // +optional optional FlexVolumeSource flexVolume = 12; @@ -2342,7 +2468,7 @@ message PersistentVolumeSource { // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. // +optional - optional ScaleIOVolumeSource scaleIO = 19; + optional ScaleIOPersistentVolumeSource scaleIO = 19; // Local represents directly-attached storage with node affinity // +optional @@ -2352,6 +2478,10 @@ message PersistentVolumeSource { // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md // +optional optional StorageOSPersistentVolumeSource storageos = 21; + + // CSI represents storage that handled by an external CSI driver + // +optional + optional CSIPersistentVolumeSource csi = 22; } // PersistentVolumeSpec is the specification of a persistent volume. @@ -2393,6 +2523,12 @@ message PersistentVolumeSpec { // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options // +optional repeated string mountOptions = 7; + + // volumeMode defines if a volume is intended to be used with a formatted filesystem + // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is an alpha feature and may change in the future. + // +optional + optional string volumeMode = 8; } // PersistentVolumeStatus is the current status of a persistent volume. @@ -2474,7 +2610,7 @@ message PodAffinity { // relative to the given namespace(s)) that this pod should be // co-located (affinity) or not co-located (anti-affinity) with, // where co-located is defined as running on a node whose value of -// the label with key tches that of any node on which +// the label with key matches that of any node on which // a pod of the set of pods is running message PodAffinityTerm { // A label query over a set of resources, in this case pods. @@ -2489,10 +2625,7 @@ message PodAffinityTerm { // the labelSelector in the specified namespaces, where co-located is defined as running on a node // whose value of the label with key topologyKey matches that of any node on which any of the // selected pods is running. - // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" - // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); - // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. - // +optional + // Empty topologyKey is not allowed. optional string topologyKey = 3; } @@ -2583,6 +2716,38 @@ message PodCondition { optional string message = 6; } +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +message PodDNSConfig { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // Duplicated nameservers will be removed. + // +optional + repeated string nameservers = 1; + + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // Duplicated search paths will be removed. + // +optional + repeated string searches = 2; + + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Duplicated entries will be removed. Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + repeated PodDNSConfigOption options = 3; +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +message PodDNSConfigOption { + // Required. + optional string name = 1; + + // +optional + optional string value = 2; +} + // PodExecOptions is the query options to a Pod's remote exec call. // --- // TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging @@ -2807,10 +2972,13 @@ message PodSpec { // +optional optional int64 activeDeadlineSeconds = 5; - // Set DNS policy for containers within the pod. - // One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. + // Set DNS policy for the pod. // Defaults to "ClusterFirst". - // To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + // Note that 'None' policy is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it. // +optional optional string dnsPolicy = 6; @@ -2919,6 +3087,13 @@ message PodSpec { // The higher the value, the higher the priority. // +optional optional int32 priority = 25; + + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // This is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it. + // +optional + optional PodDNSConfig dnsConfig = 26; } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -3157,6 +3332,57 @@ message QuobyteVolumeSource { optional string group = 5; } +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +message RBDPersistentVolumeSource { + // A collection of Ceph monitors. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + repeated string monitors = 1; + + // The rados image name. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + optional string image = 2; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 3; + + // The rados pool name. + // Default is rbd. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional string pool = 4; + + // The rados user name. + // Default is admin. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional string user = 5; + + // Keyring is the path to key ring for RBDUser. + // Default is /etc/ceph/keyring. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional string keyring = 6; + + // SecretRef is name of the authentication secret for RBDUser. If provided + // overrides keyring. + // Default is nil. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional SecretReference secretRef = 7; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional bool readOnly = 8; +} + // Represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. message RBDVolumeSource { @@ -3377,14 +3603,14 @@ message ResourceQuotaList { optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ResourceQuota objects. - // More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ repeated ResourceQuota items = 2; } // ResourceQuotaSpec defines the desired hard limits to enforce for Quota. message ResourceQuotaSpec { // Hard is the set of desired hard limits for each named resource. - // More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ // +optional map hard = 1; @@ -3397,7 +3623,7 @@ message ResourceQuotaSpec { // ResourceQuotaStatus defines the enforced hard limits and observed use. message ResourceQuotaStatus { // Hard is the set of enforced hard limits for each named resource. - // More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ // +optional map hard = 1; @@ -3440,6 +3666,50 @@ message SELinuxOptions { optional string level = 4; } +// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume +message ScaleIOPersistentVolumeSource { + // The host address of the ScaleIO API Gateway. + optional string gateway = 1; + + // The name of the storage system as configured in ScaleIO. + optional string system = 2; + + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + optional SecretReference secretRef = 3; + + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + optional bool sslEnabled = 4; + + // The name of the ScaleIO Protection Domain for the configured storage. + // +optional + optional string protectionDomain = 5; + + // The ScaleIO Storage Pool associated with the protection domain. + // +optional + optional string storagePool = 6; + + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // +optional + optional string storageMode = 7; + + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + optional string volumeName = 8; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 9; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 10; +} + // ScaleIOVolumeSource represents a persistent ScaleIO volume message ScaleIOVolumeSource { // The host address of the ScaleIO API Gateway. @@ -3456,15 +3726,15 @@ message ScaleIOVolumeSource { // +optional optional bool sslEnabled = 4; - // The name of the Protection Domain for the configured storage (defaults to "default"). + // The name of the ScaleIO Protection Domain for the configured storage. // +optional optional string protectionDomain = 5; - // The Storage Pool associated with the protection domain (defaults to "default"). + // The ScaleIO Storage Pool associated with the protection domain. // +optional optional string storagePool = 6; - // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // +optional optional string storageMode = 7; @@ -3883,7 +4153,8 @@ message ServiceSpec { // externalName is the external reference that kubedns or equivalent will // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid DNS name and requires Type to be ExternalName. + // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // and requires Type to be ExternalName. // +optional optional string externalName = 10; @@ -4088,6 +4359,15 @@ message Volume { optional VolumeSource volumeSource = 2; } +// volumeDevice describes a mapping of a raw block device within a container. +message VolumeDevice { + // name must match the name of a persistentVolumeClaim in the pod + optional string name = 1; + + // devicePath is the path inside of the container that the device will be mapped to. + optional string devicePath = 2; +} + // VolumeMount describes a mounting of a Volume within a container. message VolumeMount { // This must match the Name of a Volume. @@ -4196,8 +4476,7 @@ message VolumeSource { optional RBDVolumeSource rbd = 11; // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an - // alpha feature and may change in future. + // provisioned/attached using an exec based plugin. // +optional optional FlexVolumeSource flexVolume = 12; diff --git a/vendor/k8s.io/api/core/v1/register.go b/vendor/k8s.io/api/core/v1/register.go index 4916ee3c23cf..62bf076ebc05 100644 --- a/vendor/k8s.io/api/core/v1/register.go +++ b/vendor/k8s.io/api/core/v1/register.go @@ -43,7 +43,7 @@ var ( AddToScheme = SchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Pod{}, diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index d1a8f89cd2b4..728cbd5a62b0 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -302,8 +302,7 @@ type VolumeSource struct { // +optional RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an - // alpha feature and may change in future. + // provisioned/attached using an exec based plugin. // +optional FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` // Cinder represents a cinder volume attached and mounted on kubelets host machine @@ -398,11 +397,11 @@ type PersistentVolumeSource struct { // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md // +optional - RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` + RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. // +optional - ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` + ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` // Cinder represents a cinder volume attached and mounted on kubelets host machine // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional @@ -417,8 +416,7 @@ type PersistentVolumeSource struct { // +optional Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"` // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an - // alpha feature and may change in future. + // provisioned/attached using an exec based plugin. // +optional FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. @@ -440,7 +438,7 @@ type PersistentVolumeSource struct { PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"` // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. // +optional - ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"` + ScaleIO *ScaleIOPersistentVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"` // Local represents directly-attached storage with node affinity // +optional Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"` @@ -448,6 +446,9 @@ type PersistentVolumeSource struct { // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md // +optional StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"` + // CSI represents storage that handled by an external CSI driver + // +optional + CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"` } const ( @@ -524,6 +525,11 @@ type PersistentVolumeSpec struct { // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options // +optional MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"` + // volumeMode defines if a volume is intended to be used with a formatted filesystem + // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is an alpha feature and may change in the future. + // +optional + VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"` } // PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes. @@ -541,6 +547,16 @@ const ( PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" ) +// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. +type PersistentVolumeMode string + +const ( + // PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device. + PersistentVolumeBlock PersistentVolumeMode = "Block" + // PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem. + PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem" +) + // PersistentVolumeStatus is the current status of a persistent volume. type PersistentVolumeStatus struct { // Phase indicates if a volume is available, bound to a claim, or released by a claim. @@ -628,6 +644,11 @@ type PersistentVolumeClaimSpec struct { // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 // +optional StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"` + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // This is an alpha feature and may change in the future. + // +optional + VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"` } // PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type @@ -838,6 +859,50 @@ type RBDVolumeSource struct { ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` } +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDPersistentVolumeSource struct { + // A collection of Ceph monitors. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` + // The rados image name. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + // The rados pool name. + // Default is rbd. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` + // The rados user name. + // Default is admin. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` + // Keyring is the path to key ring for RBDUser. + // Default is /etc/ceph/keyring. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` + // SecretRef is name of the authentication secret for RBDUser. If provided + // overrides keyring. + // Default is nil. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` +} + // Represents a cinder volume resource in Openstack. // A Cinder volume must exist before mounting to a container. // The volume must also be in the same region as the kubelet. @@ -945,7 +1010,7 @@ type StorageMedium string const ( StorageMediumDefault StorageMedium = "" // use whatever the default is for the node StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) - StorageMediumHugepages StorageMedium = "HugePages" // use hugepages + StorageMediumHugePages StorageMedium = "HugePages" // use hugepages ) // Protocol defines network protocols supported for things like container ports. @@ -1017,7 +1082,7 @@ type QuobyteVolumeSource struct { } // FlexVolume represents a generic volume resource that is -// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. +// provisioned/attached using an exec based plugin. type FlexVolumeSource struct { // Driver is the name of the driver to use for this volume. Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` @@ -1169,14 +1234,15 @@ type NFSVolumeSource struct { // ISCSI volumes can only be mounted as read/write once. // ISCSI volumes support ownership management and SELinux relabeling. type ISCSIVolumeSource struct { - // iSCSI target portal. The portal is either an IP or ip_addr:port if the port + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"` // Target iSCSI Qualified Name. IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"` - // iSCSI target lun number. + // iSCSI Target Lun number. Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"` - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). // +optional ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` // Filesystem type of the volume that you want to mount. @@ -1190,7 +1256,7 @@ type ISCSIVolumeSource struct { // Defaults to false. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` - // iSCSI target portal List. The portal is either an IP or ip_addr:port if the port + // iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). // +optional Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` @@ -1200,10 +1266,56 @@ type ISCSIVolumeSource struct { // whether support iSCSI Session CHAP authentication // +optional SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"` - // CHAP secret for iSCSI target and initiator authentication + // CHAP Secret for iSCSI target and initiator authentication // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"` - // Custom iSCSI initiator name. + // Custom iSCSI Initiator Name. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"` +} + +// ISCSIPersistentVolumeSource represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIPersistentVolumeSource struct { + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"` + // Target iSCSI Qualified Name. + IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"` + // iSCSI Target Lun number. + Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"` + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). + // +optional + ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` + // iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` + // whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"` + // whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"` + // CHAP Secret for iSCSI target and initiator authentication + // +optional + SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"` + // Custom iSCSI Initiator Name. // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface // : will be created for the connection. // +optional @@ -1322,7 +1434,7 @@ type AzureDiskVolumeSource struct { // the ReadOnly setting in VolumeMounts. // +optional ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"` - // Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"` } @@ -1352,13 +1464,48 @@ type ScaleIOVolumeSource struct { // Flag to enable/disable SSL communication with Gateway, default false // +optional SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"` - // The name of the Protection Domain for the configured storage (defaults to "default"). + // The name of the ScaleIO Protection Domain for the configured storage. + // +optional + ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"` + // The ScaleIO Storage Pool associated with the protection domain. + // +optional + StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"` + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // +optional + StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"` +} + +// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume +type ScaleIOPersistentVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"` + // The name of the storage system as configured in ScaleIO. + System string `json:"system" protobuf:"bytes,2,opt,name=system"` + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *SecretReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"` + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"` + // The name of the ScaleIO Protection Domain for the configured storage. // +optional ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"` - // The Storage Pool associated with the protection domain (defaults to "default"). + // The ScaleIO Storage Pool associated with the protection domain. // +optional StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"` - // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // +optional StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` // The name of a volume already created in the ScaleIO system @@ -1542,6 +1689,23 @@ type LocalVolumeSource struct { Path string `json:"path" protobuf:"bytes,1,opt,name=path"` } +// Represents storage that is managed by an external CSI volume driver +type CSIPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + // Required. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` + + // VolumeHandle is the unique volume name returned by the CSI volume + // plugin’s CreateVolume to refer to the volume on all subsequent calls. + // Required. + VolumeHandle string `json:"volumeHandle" protobuf:"bytes,2,opt,name=volumeHandle"` + + // Optional: The value to pass to ControllerPublishVolumeRequest. + // Defaults to false (read/write). + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + // ContainerPort represents a network port in a single container. type ContainerPort struct { // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each @@ -1610,6 +1774,14 @@ const ( MountPropagationBidirectional MountPropagationMode = "Bidirectional" ) +// volumeDevice describes a mapping of a raw block device within a container. +type VolumeDevice struct { + // name must match the name of a persistentVolumeClaim in the pod + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // devicePath is the path inside of the container that the device will be mapped to. + DevicePath string `json:"devicePath" protobuf:"bytes,2,opt,name=devicePath"` +} + // EnvVar represents an environment variable present in a Container. type EnvVar struct { // Name of the environment variable. Must be a C_IDENTIFIER. @@ -1953,6 +2125,12 @@ type Container struct { // +patchMergeKey=mountPath // +patchStrategy=merge VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // This is an alpha feature and may change in the future. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` // Periodic probe of container liveness. // Container will be restarted if the probe fails. // Cannot be updated. @@ -1996,7 +2174,7 @@ type Container struct { ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` // Security options the pod should run with. // More info: https://kubernetes.io/docs/concepts/policy/security-context/ - // More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ // +optional SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` @@ -2251,6 +2429,11 @@ const ( // determined by kubelet) DNS settings. DNSDefault DNSPolicy = "Default" + // DNSNone indicates that the pod should use empty DNS settings. DNS + // parameters such as nameservers and search paths should be defined via + // DNSConfig. + DNSNone DNSPolicy = "None" + DefaultTerminationGracePeriodSeconds = 30 ) @@ -2394,7 +2577,7 @@ type WeightedPodAffinityTerm struct { // relative to the given namespace(s)) that this pod should be // co-located (affinity) or not co-located (anti-affinity) with, // where co-located is defined as running on a node whose value of -// the label with key tches that of any node on which +// the label with key matches that of any node on which // a pod of the set of pods is running type PodAffinityTerm struct { // A label query over a set of resources, in this case pods. @@ -2407,11 +2590,8 @@ type PodAffinityTerm struct { // the labelSelector in the specified namespaces, where co-located is defined as running on a node // whose value of the label with key topologyKey matches that of any node on which any of the // selected pods is running. - // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" - // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); - // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. - // +optional - TopologyKey string `json:"topologyKey,omitempty" protobuf:"bytes,3,opt,name=topologyKey"` + // Empty topologyKey is not allowed. + TopologyKey string `json:"topologyKey" protobuf:"bytes,3,opt,name=topologyKey"` } // Node affinity is a group of node affinity scheduling rules. @@ -2469,7 +2649,7 @@ type Taint struct { // TimeAdded represents the time at which the taint was added. // It is only written for NoExecute taints. // +optional - TimeAdded metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"` + TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"` } type TaintEffect string @@ -2583,10 +2763,13 @@ type PodSpec struct { // Value must be a positive integer. // +optional ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"` - // Set DNS policy for containers within the pod. - // One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. + // Set DNS policy for the pod. // Defaults to "ClusterFirst". - // To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + // Note that 'None' policy is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it. // +optional DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"` // NodeSelector is a selector which must be true for the pod to fit on a node. @@ -2679,6 +2862,12 @@ type PodSpec struct { // The higher the value, the higher the priority. // +optional Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"` + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // This is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it. + // +optional + DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"` } // HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the @@ -2746,6 +2935,35 @@ const ( PodQOSBestEffort PodQOSClass = "BestEffort" ) +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +type PodDNSConfig struct { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // Duplicated nameservers will be removed. + // +optional + Nameservers []string `json:"nameservers,omitempty" protobuf:"bytes,1,rep,name=nameservers"` + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // Duplicated search paths will be removed. + // +optional + Searches []string `json:"searches,omitempty" protobuf:"bytes,2,rep,name=searches"` + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Duplicated entries will be removed. Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + Options []PodDNSConfigOption `json:"options,omitempty" protobuf:"bytes,3,rep,name=options"` +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +type PodDNSConfigOption struct { + // Required. + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // +optional + Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` +} + // PodStatus represents information about the status of a pod. Status may trail the actual // state of a system. type PodStatus struct { @@ -3209,7 +3427,8 @@ type ServiceSpec struct { // externalName is the external reference that kubedns or equivalent will // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid DNS name and requires Type to be ExternalName. + // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // and requires Type to be ExternalName. // +optional ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"` @@ -3762,8 +3981,6 @@ const ( ) const ( - // Namespace prefix for opaque counted resources (alpha). - ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-" // Default namespace prefix. ResourceDefaultNamespacePrefix = "kubernetes.io/" // Name prefix for huge page resources (alpha). @@ -3825,7 +4042,7 @@ const ( // NamespaceSpec describes the attributes on a Namespace. type NamespaceSpec struct { // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. - // More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#finalizers + // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"` } @@ -3833,7 +4050,7 @@ type NamespaceSpec struct { // NamespaceStatus is information about the current status of a Namespace. type NamespaceStatus struct { // Phase is the current lifecycle phase of the namespace. - // More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#phases + // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"` } @@ -3955,6 +4172,10 @@ type DeleteOptions struct { // Either this field or OrphanDependents may be set, but not both. // The default policy is decided by the existing finalizer set in the // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. // +optional PropagationPolicy *DeletionPropagation `protobuf:"bytes,4,opt,name=propagationPolicy,casttype=DeletionPropagation"` } @@ -4247,7 +4468,6 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Event is a report of an event somewhere in the cluster. -// TODO: Decide whether to store these separately or with the object they apply to. type Event struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -4287,8 +4507,51 @@ type Event struct { // Type of this event (Normal, Warning), new types could be added in the future // +optional Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"` + + // Time when this Event was first observed. + // +optional + EventTime metav1.MicroTime `json:"eventTime,omitempty" protobuf:"bytes,10,opt,name=eventTime"` + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + Series *EventSeries `json:"series,omitempty" protobuf:"bytes,11,opt,name=series"` + + // What action was taken/failed regarding to the Regarding object. + // +optional + Action string `json:"action,omitempty" protobuf:"bytes,12,opt,name=action"` + + // Optional secondary object for more complex actions. + // +optional + Related *ObjectReference `json:"related,omitempty" protobuf:"bytes,13,opt,name=related"` + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + ReportingController string `json:"reportingComponent" protobuf:"bytes,14,opt,name=reportingComponent"` + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + ReportingInstance string `json:"reportingInstance" protobuf:"bytes,15,opt,name=reportingInstance"` } +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continously for some time. +type EventSeries struct { + // Number of occurrences in this series up to the last heartbeat time + Count int32 `json:"count,omitempty" protobuf:"varint,1,name=count"` + // Time of the last occurence observed + LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"` + // State of this Series: Ongoing or Finished + State EventSeriesState `json:"state,omitempty" protobuf:"bytes,3,name=state"` +} + +type EventSeriesState string + +const ( + EventSeriesStateOngoing EventSeriesState = "Ongoing" + EventSeriesStateFinished EventSeriesState = "Finished" + EventSeriesStateUnknown EventSeriesState = "Unknown" +) + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // EventList is a list of events. @@ -4376,7 +4639,7 @@ type LimitRangeList struct { metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of LimitRange objects. - // More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_limit_range.md + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -4416,6 +4679,13 @@ const ( ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" ) +// The following identify resource prefix for Kubernetes object types +const ( + // HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // As burst is not supported for HugePages, we would only quota its request, and ignore the limit. + ResourceRequestsHugePagesPrefix = "requests.hugepages-" +) + // A ResourceQuotaScope defines a filter that must match each object tracked by a quota type ResourceQuotaScope string @@ -4433,7 +4703,7 @@ const ( // ResourceQuotaSpec defines the desired hard limits to enforce for Quota. type ResourceQuotaSpec struct { // Hard is the set of desired hard limits for each named resource. - // More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ // +optional Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` // A collection of filters that must match each object tracked by a quota. @@ -4445,7 +4715,7 @@ type ResourceQuotaSpec struct { // ResourceQuotaStatus defines the enforced hard limits and observed use. type ResourceQuotaStatus struct { // Hard is the set of enforced hard limits for each named resource. - // More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ // +optional Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` // Used is the current observed total usage of the resource in the namespace. @@ -4486,7 +4756,7 @@ type ResourceQuotaList struct { metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of ResourceQuota objects. - // More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -4844,7 +5114,7 @@ const ( // corresponding to every RequiredDuringScheduling affinity rule. // When the --hard-pod-affinity-weight scheduler flag is not specified, // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. - DefaultHardPodAffinitySymmetricWeight int = 1 + DefaultHardPodAffinitySymmetricWeight int32 = 1 ) // Sysctl defines a kernel parameter to be set diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 4fc4deaa4a7c..c50dd0a0520b 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -76,7 +76,7 @@ var map_AzureDiskVolumeSource = map[string]string{ "cachingMode": "Host Caching mode: None, Read Only, Read Write.", "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "kind": "Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", + "kind": "Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", } func (AzureDiskVolumeSource) SwaggerDoc() map[string]string { @@ -116,6 +116,17 @@ func (Binding) SwaggerDoc() map[string]string { return map_Binding } +var map_CSIPersistentVolumeSource = map[string]string{ + "": "Represents storage that is managed by an external CSI volume driver", + "driver": "Driver is the name of the driver to use for this volume. Required.", + "volumeHandle": "VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.", + "readOnly": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", +} + +func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_CSIPersistentVolumeSource +} + var map_Capabilities = map[string]string{ "": "Adds and removes POSIX capabilities from running containers.", "add": "Added capabilities", @@ -278,13 +289,14 @@ var map_Container = map[string]string{ "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", - "securityContext": "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md", + "securityContext": "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", @@ -392,7 +404,7 @@ var map_DeleteOptions = map[string]string{ "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "PropagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.", + "PropagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", } func (DeleteOptions) SwaggerDoc() map[string]string { @@ -529,16 +541,22 @@ func (EnvVarSource) SwaggerDoc() map[string]string { } var map_Event = map[string]string{ - "": "Event is a report of an event somewhere in the cluster.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "involvedObject": "The object that this event is about.", - "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", - "message": "A human-readable description of the status of this operation.", - "source": "The component reporting this event. Should be a short machine understandable string.", - "firstTimestamp": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)", - "lastTimestamp": "The time at which the most recent occurrence of this event was recorded.", - "count": "The number of times this event has occurred.", - "type": "Type of this event (Normal, Warning), new types could be added in the future", + "": "Event is a report of an event somewhere in the cluster.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "involvedObject": "The object that this event is about.", + "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", + "message": "A human-readable description of the status of this operation.", + "source": "The component reporting this event. Should be a short machine understandable string.", + "firstTimestamp": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)", + "lastTimestamp": "The time at which the most recent occurrence of this event was recorded.", + "count": "The number of times this event has occurred.", + "type": "Type of this event (Normal, Warning), new types could be added in the future", + "eventTime": "Time when this Event was first observed.", + "series": "Data about the Event series this event represents or nil if it's a singleton Event.", + "action": "What action was taken/failed regarding to the Regarding object.", + "related": "Optional secondary object for more complex actions.", + "reportingComponent": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.", + "reportingInstance": "ID of the controller instance, e.g. `kubelet-xyzf`.", } func (Event) SwaggerDoc() map[string]string { @@ -555,6 +573,17 @@ func (EventList) SwaggerDoc() map[string]string { return map_EventList } +var map_EventSeries = map[string]string{ + "": "EventSeries contain information on series of events, i.e. thing that was/is happening continously for some time.", + "count": "Number of occurrences in this series up to the last heartbeat time", + "lastObservedTime": "Time of the last occurence observed", + "state": "State of this Series: Ongoing or Finished", +} + +func (EventSeries) SwaggerDoc() map[string]string { + return map_EventSeries +} + var map_EventSource = map[string]string{ "": "EventSource contains information for an event.", "component": "Component from which the event is generated.", @@ -588,7 +617,7 @@ func (FCVolumeSource) SwaggerDoc() map[string]string { } var map_FlexVolumeSource = map[string]string{ - "": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "driver": "Driver is the name of the driver to use for this volume.", "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", "secretRef": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", @@ -698,19 +727,38 @@ func (HostPathVolumeSource) SwaggerDoc() map[string]string { return map_HostPathVolumeSource } +var map_ISCSIPersistentVolumeSource = map[string]string{ + "": "ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + "targetPortal": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "iqn": "Target iSCSI Qualified Name.", + "lun": "iSCSI Target Lun number.", + "iscsiInterface": "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "portals": "iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "chapAuthDiscovery": "whether support iSCSI Discovery CHAP authentication", + "chapAuthSession": "whether support iSCSI Session CHAP authentication", + "secretRef": "CHAP Secret for iSCSI target and initiator authentication", + "initiatorName": "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", +} + +func (ISCSIPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_ISCSIPersistentVolumeSource +} + var map_ISCSIVolumeSource = map[string]string{ "": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", - "targetPortal": "iSCSI target portal. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "targetPortal": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", "iqn": "Target iSCSI Qualified Name.", - "lun": "iSCSI target lun number.", - "iscsiInterface": "Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.", + "lun": "iSCSI Target Lun number.", + "iscsiInterface": "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", - "portals": "iSCSI target portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "portals": "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", "chapAuthDiscovery": "whether support iSCSI Discovery CHAP authentication", "chapAuthSession": "whether support iSCSI Session CHAP authentication", - "secretRef": "CHAP secret for iSCSI target and initiator authentication", - "initiatorName": "Custom iSCSI initiator name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", + "secretRef": "CHAP Secret for iSCSI target and initiator authentication", + "initiatorName": "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", } func (ISCSIVolumeSource) SwaggerDoc() map[string]string { @@ -765,7 +813,7 @@ func (LimitRangeItem) SwaggerDoc() map[string]string { var map_LimitRangeList = map[string]string{ "": "LimitRangeList is a list of LimitRange items.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "Items is a list of LimitRange objects. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_limit_range.md", + "items": "Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", } func (LimitRangeList) SwaggerDoc() map[string]string { @@ -866,7 +914,7 @@ func (NamespaceList) SwaggerDoc() map[string]string { var map_NamespaceSpec = map[string]string{ "": "NamespaceSpec describes the attributes on a Namespace.", - "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#finalizers", + "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", } func (NamespaceSpec) SwaggerDoc() map[string]string { @@ -875,7 +923,7 @@ func (NamespaceSpec) SwaggerDoc() map[string]string { var map_NamespaceStatus = map[string]string{ "": "NamespaceStatus is information about the current status of a Namespace.", - "phase": "Phase is the current lifecycle phase of the namespace. More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#phases", + "phase": "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", } func (NamespaceStatus) SwaggerDoc() map[string]string { @@ -1151,6 +1199,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "resources": "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", + "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is an alpha feature and may change in the future.", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1202,7 +1251,7 @@ var map_PersistentVolumeSource = map[string]string{ "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", - "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", @@ -1212,6 +1261,7 @@ var map_PersistentVolumeSource = map[string]string{ "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", "local": "Local represents directly-attached storage with node affinity", "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md", + "csi": "CSI represents storage that handled by an external CSI driver", } func (PersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1226,6 +1276,7 @@ var map_PersistentVolumeSpec = map[string]string{ "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming", "storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", "mountOptions": "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", + "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is an alpha feature and may change in the future.", } func (PersistentVolumeSpec) SwaggerDoc() map[string]string { @@ -1275,10 +1326,10 @@ func (PodAffinity) SwaggerDoc() map[string]string { } var map_PodAffinityTerm = map[string]string{ - "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key tches that of any node on which a pod of the set of pods is running", + "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running", "labelSelector": "A label query over a set of resources, in this case pods.", "namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"", - "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as \"all topologies\" (\"all topologies\" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.", + "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", } func (PodAffinityTerm) SwaggerDoc() map[string]string { @@ -1322,6 +1373,26 @@ func (PodCondition) SwaggerDoc() map[string]string { return map_PodCondition } +var map_PodDNSConfig = map[string]string{ + "": "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.", + "nameservers": "A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.", + "searches": "A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.", + "options": "A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.", +} + +func (PodDNSConfig) SwaggerDoc() map[string]string { + return map_PodDNSConfig +} + +var map_PodDNSConfigOption = map[string]string{ + "": "PodDNSConfigOption defines DNS resolver options of a pod.", + "name": "Required.", +} + +func (PodDNSConfigOption) SwaggerDoc() map[string]string { + return map_PodDNSConfigOption +} + var map_PodExecOptions = map[string]string{ "": "PodExecOptions is the query options to a Pod's remote exec call.", "stdin": "Redirect the standard input stream of the pod for this call. Defaults to false.", @@ -1410,7 +1481,7 @@ var map_PodSpec = map[string]string{ "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", - "dnsPolicy": "Set DNS policy for containers within the pod. One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. Defaults to \"ClusterFirst\". To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", + "dnsPolicy": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. Note that 'None' policy is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it.", "nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", "serviceAccount": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", @@ -1429,6 +1500,7 @@ var map_PodSpec = map[string]string{ "hostAliases": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", "priorityClassName": "If specified, indicates the pod's priority. \"SYSTEM\" is a special keyword which indicates the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", "priority": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", + "dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. This is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1571,6 +1643,22 @@ func (QuobyteVolumeSource) SwaggerDoc() map[string]string { return map_QuobyteVolumeSource } +var map_RBDPersistentVolumeSource = map[string]string{ + "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", + "monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", + "pool": "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "user": "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", +} + +func (RBDPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_RBDPersistentVolumeSource +} + var map_RBDVolumeSource = map[string]string{ "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", "monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", @@ -1683,7 +1771,7 @@ func (ResourceQuota) SwaggerDoc() map[string]string { var map_ResourceQuotaList = map[string]string{ "": "ResourceQuotaList is a list of ResourceQuota items.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "Items is a list of ResourceQuota objects. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md", + "items": "Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", } func (ResourceQuotaList) SwaggerDoc() map[string]string { @@ -1692,7 +1780,7 @@ func (ResourceQuotaList) SwaggerDoc() map[string]string { var map_ResourceQuotaSpec = map[string]string{ "": "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.", - "hard": "Hard is the set of desired hard limits for each named resource. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md", + "hard": "Hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", "scopes": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.", } @@ -1702,7 +1790,7 @@ func (ResourceQuotaSpec) SwaggerDoc() map[string]string { var map_ResourceQuotaStatus = map[string]string{ "": "ResourceQuotaStatus defines the enforced hard limits and observed use.", - "hard": "Hard is the set of enforced hard limits for each named resource. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md", + "hard": "Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", "used": "Used is the current observed total usage of the resource in the namespace.", } @@ -1732,15 +1820,33 @@ func (SELinuxOptions) SwaggerDoc() map[string]string { return map_SELinuxOptions } +var map_ScaleIOPersistentVolumeSource = map[string]string{ + "": "ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume", + "gateway": "The host address of the ScaleIO API Gateway.", + "system": "The name of the storage system as configured in ScaleIO.", + "secretRef": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", + "sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false", + "protectionDomain": "The name of the ScaleIO Protection Domain for the configured storage.", + "storagePool": "The ScaleIO Storage Pool associated with the protection domain.", + "storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.", + "volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (ScaleIOPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_ScaleIOPersistentVolumeSource +} + var map_ScaleIOVolumeSource = map[string]string{ "": "ScaleIOVolumeSource represents a persistent ScaleIO volume", "gateway": "The host address of the ScaleIO API Gateway.", "system": "The name of the storage system as configured in ScaleIO.", "secretRef": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", "sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false", - "protectionDomain": "The name of the Protection Domain for the configured storage (defaults to \"default\").", - "storagePool": "The Storage Pool associated with the protection domain (defaults to \"default\").", - "storageMode": "Indicates whether the storage for a volume should be thick or thin (defaults to \"thin\").", + "protectionDomain": "The name of the ScaleIO Protection Domain for the configured storage.", + "storagePool": "The ScaleIO Storage Pool associated with the protection domain.", + "storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.", "volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", @@ -1922,7 +2028,7 @@ var map_ServiceSpec = map[string]string{ "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/", - "externalName": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid DNS name and requires Type to be ExternalName.", + "externalName": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be ExternalName.", "externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.", "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.", "publishNotReadyAddresses": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery. This field will replace the service.alpha.kubernetes.io/tolerate-unready-endpoints when that annotation is deprecated and all clients have been converted to use this field.", @@ -2031,6 +2137,16 @@ func (Volume) SwaggerDoc() map[string]string { return map_Volume } +var map_VolumeDevice = map[string]string{ + "": "volumeDevice describes a mapping of a raw block device within a container.", + "name": "name must match the name of a persistentVolumeClaim in the pod", + "devicePath": "devicePath is the path inside of the container that the device will be mapped to.", +} + +func (VolumeDevice) SwaggerDoc() map[string]string { + return map_VolumeDevice +} + var map_VolumeMount = map[string]string{ "": "VolumeMount describes a mounting of a Volume within a container.", "name": "This must match the Name of a Volume.", @@ -2068,7 +2184,7 @@ var map_VolumeSource = map[string]string{ "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", - "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 66d4e8e1fa79..951b2e264796 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -23,737 +23,10 @@ package v1 import ( resource "k8s.io/apimachinery/pkg/api/resource" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AWSElasticBlockStoreVolumeSource).DeepCopyInto(out.(*AWSElasticBlockStoreVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AWSElasticBlockStoreVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Affinity).DeepCopyInto(out.(*Affinity)) - return nil - }, InType: reflect.TypeOf(&Affinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AttachedVolume).DeepCopyInto(out.(*AttachedVolume)) - return nil - }, InType: reflect.TypeOf(&AttachedVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AvoidPods).DeepCopyInto(out.(*AvoidPods)) - return nil - }, InType: reflect.TypeOf(&AvoidPods{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureDiskVolumeSource).DeepCopyInto(out.(*AzureDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureFilePersistentVolumeSource).DeepCopyInto(out.(*AzureFilePersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureFilePersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureFileVolumeSource).DeepCopyInto(out.(*AzureFileVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureFileVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Binding).DeepCopyInto(out.(*Binding)) - return nil - }, InType: reflect.TypeOf(&Binding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Capabilities).DeepCopyInto(out.(*Capabilities)) - return nil - }, InType: reflect.TypeOf(&Capabilities{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CephFSPersistentVolumeSource).DeepCopyInto(out.(*CephFSPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CephFSPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CephFSVolumeSource).DeepCopyInto(out.(*CephFSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CephFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CinderVolumeSource).DeepCopyInto(out.(*CinderVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CinderVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClientIPConfig).DeepCopyInto(out.(*ClientIPConfig)) - return nil - }, InType: reflect.TypeOf(&ClientIPConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentCondition).DeepCopyInto(out.(*ComponentCondition)) - return nil - }, InType: reflect.TypeOf(&ComponentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentStatus).DeepCopyInto(out.(*ComponentStatus)) - return nil - }, InType: reflect.TypeOf(&ComponentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentStatusList).DeepCopyInto(out.(*ComponentStatusList)) - return nil - }, InType: reflect.TypeOf(&ComponentStatusList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMap).DeepCopyInto(out.(*ConfigMap)) - return nil - }, InType: reflect.TypeOf(&ConfigMap{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapEnvSource).DeepCopyInto(out.(*ConfigMapEnvSource)) - return nil - }, InType: reflect.TypeOf(&ConfigMapEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapKeySelector).DeepCopyInto(out.(*ConfigMapKeySelector)) - return nil - }, InType: reflect.TypeOf(&ConfigMapKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapList).DeepCopyInto(out.(*ConfigMapList)) - return nil - }, InType: reflect.TypeOf(&ConfigMapList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapProjection).DeepCopyInto(out.(*ConfigMapProjection)) - return nil - }, InType: reflect.TypeOf(&ConfigMapProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapVolumeSource).DeepCopyInto(out.(*ConfigMapVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ConfigMapVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Container).DeepCopyInto(out.(*Container)) - return nil - }, InType: reflect.TypeOf(&Container{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerImage).DeepCopyInto(out.(*ContainerImage)) - return nil - }, InType: reflect.TypeOf(&ContainerImage{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerPort).DeepCopyInto(out.(*ContainerPort)) - return nil - }, InType: reflect.TypeOf(&ContainerPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerState).DeepCopyInto(out.(*ContainerState)) - return nil - }, InType: reflect.TypeOf(&ContainerState{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateRunning).DeepCopyInto(out.(*ContainerStateRunning)) - return nil - }, InType: reflect.TypeOf(&ContainerStateRunning{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateTerminated).DeepCopyInto(out.(*ContainerStateTerminated)) - return nil - }, InType: reflect.TypeOf(&ContainerStateTerminated{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateWaiting).DeepCopyInto(out.(*ContainerStateWaiting)) - return nil - }, InType: reflect.TypeOf(&ContainerStateWaiting{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStatus).DeepCopyInto(out.(*ContainerStatus)) - return nil - }, InType: reflect.TypeOf(&ContainerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonEndpoint).DeepCopyInto(out.(*DaemonEndpoint)) - return nil - }, InType: reflect.TypeOf(&DaemonEndpoint{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeleteOptions).DeepCopyInto(out.(*DeleteOptions)) - return nil - }, InType: reflect.TypeOf(&DeleteOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIProjection).DeepCopyInto(out.(*DownwardAPIProjection)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIVolumeFile).DeepCopyInto(out.(*DownwardAPIVolumeFile)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIVolumeSource).DeepCopyInto(out.(*DownwardAPIVolumeSource)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EmptyDirVolumeSource).DeepCopyInto(out.(*EmptyDirVolumeSource)) - return nil - }, InType: reflect.TypeOf(&EmptyDirVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointAddress).DeepCopyInto(out.(*EndpointAddress)) - return nil - }, InType: reflect.TypeOf(&EndpointAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointPort).DeepCopyInto(out.(*EndpointPort)) - return nil - }, InType: reflect.TypeOf(&EndpointPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointSubset).DeepCopyInto(out.(*EndpointSubset)) - return nil - }, InType: reflect.TypeOf(&EndpointSubset{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Endpoints).DeepCopyInto(out.(*Endpoints)) - return nil - }, InType: reflect.TypeOf(&Endpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointsList).DeepCopyInto(out.(*EndpointsList)) - return nil - }, InType: reflect.TypeOf(&EndpointsList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvFromSource).DeepCopyInto(out.(*EnvFromSource)) - return nil - }, InType: reflect.TypeOf(&EnvFromSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvVar).DeepCopyInto(out.(*EnvVar)) - return nil - }, InType: reflect.TypeOf(&EnvVar{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvVarSource).DeepCopyInto(out.(*EnvVarSource)) - return nil - }, InType: reflect.TypeOf(&EnvVarSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Event).DeepCopyInto(out.(*Event)) - return nil - }, InType: reflect.TypeOf(&Event{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EventList).DeepCopyInto(out.(*EventList)) - return nil - }, InType: reflect.TypeOf(&EventList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EventSource).DeepCopyInto(out.(*EventSource)) - return nil - }, InType: reflect.TypeOf(&EventSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExecAction).DeepCopyInto(out.(*ExecAction)) - return nil - }, InType: reflect.TypeOf(&ExecAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FCVolumeSource).DeepCopyInto(out.(*FCVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FCVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FlexVolumeSource).DeepCopyInto(out.(*FlexVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FlexVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FlockerVolumeSource).DeepCopyInto(out.(*FlockerVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FlockerVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GCEPersistentDiskVolumeSource).DeepCopyInto(out.(*GCEPersistentDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GCEPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GitRepoVolumeSource).DeepCopyInto(out.(*GitRepoVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GitRepoVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GlusterfsVolumeSource).DeepCopyInto(out.(*GlusterfsVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GlusterfsVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPGetAction).DeepCopyInto(out.(*HTTPGetAction)) - return nil - }, InType: reflect.TypeOf(&HTTPGetAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPHeader).DeepCopyInto(out.(*HTTPHeader)) - return nil - }, InType: reflect.TypeOf(&HTTPHeader{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Handler).DeepCopyInto(out.(*Handler)) - return nil - }, InType: reflect.TypeOf(&Handler{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostAlias).DeepCopyInto(out.(*HostAlias)) - return nil - }, InType: reflect.TypeOf(&HostAlias{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostPathVolumeSource).DeepCopyInto(out.(*HostPathVolumeSource)) - return nil - }, InType: reflect.TypeOf(&HostPathVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ISCSIVolumeSource).DeepCopyInto(out.(*ISCSIVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ISCSIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KeyToPath).DeepCopyInto(out.(*KeyToPath)) - return nil - }, InType: reflect.TypeOf(&KeyToPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Lifecycle).DeepCopyInto(out.(*Lifecycle)) - return nil - }, InType: reflect.TypeOf(&Lifecycle{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRange).DeepCopyInto(out.(*LimitRange)) - return nil - }, InType: reflect.TypeOf(&LimitRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeItem).DeepCopyInto(out.(*LimitRangeItem)) - return nil - }, InType: reflect.TypeOf(&LimitRangeItem{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeList).DeepCopyInto(out.(*LimitRangeList)) - return nil - }, InType: reflect.TypeOf(&LimitRangeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeSpec).DeepCopyInto(out.(*LimitRangeSpec)) - return nil - }, InType: reflect.TypeOf(&LimitRangeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*List).DeepCopyInto(out.(*List)) - return nil - }, InType: reflect.TypeOf(&List{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ListOptions).DeepCopyInto(out.(*ListOptions)) - return nil - }, InType: reflect.TypeOf(&ListOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LoadBalancerIngress).DeepCopyInto(out.(*LoadBalancerIngress)) - return nil - }, InType: reflect.TypeOf(&LoadBalancerIngress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LoadBalancerStatus).DeepCopyInto(out.(*LoadBalancerStatus)) - return nil - }, InType: reflect.TypeOf(&LoadBalancerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalObjectReference).DeepCopyInto(out.(*LocalObjectReference)) - return nil - }, InType: reflect.TypeOf(&LocalObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalVolumeSource).DeepCopyInto(out.(*LocalVolumeSource)) - return nil - }, InType: reflect.TypeOf(&LocalVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NFSVolumeSource).DeepCopyInto(out.(*NFSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&NFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Namespace).DeepCopyInto(out.(*Namespace)) - return nil - }, InType: reflect.TypeOf(&Namespace{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceList).DeepCopyInto(out.(*NamespaceList)) - return nil - }, InType: reflect.TypeOf(&NamespaceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceSpec).DeepCopyInto(out.(*NamespaceSpec)) - return nil - }, InType: reflect.TypeOf(&NamespaceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceStatus).DeepCopyInto(out.(*NamespaceStatus)) - return nil - }, InType: reflect.TypeOf(&NamespaceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Node).DeepCopyInto(out.(*Node)) - return nil - }, InType: reflect.TypeOf(&Node{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeAddress).DeepCopyInto(out.(*NodeAddress)) - return nil - }, InType: reflect.TypeOf(&NodeAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeAffinity).DeepCopyInto(out.(*NodeAffinity)) - return nil - }, InType: reflect.TypeOf(&NodeAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeCondition).DeepCopyInto(out.(*NodeCondition)) - return nil - }, InType: reflect.TypeOf(&NodeCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeConfigSource).DeepCopyInto(out.(*NodeConfigSource)) - return nil - }, InType: reflect.TypeOf(&NodeConfigSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeDaemonEndpoints).DeepCopyInto(out.(*NodeDaemonEndpoints)) - return nil - }, InType: reflect.TypeOf(&NodeDaemonEndpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeList).DeepCopyInto(out.(*NodeList)) - return nil - }, InType: reflect.TypeOf(&NodeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeProxyOptions).DeepCopyInto(out.(*NodeProxyOptions)) - return nil - }, InType: reflect.TypeOf(&NodeProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeResources).DeepCopyInto(out.(*NodeResources)) - return nil - }, InType: reflect.TypeOf(&NodeResources{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelector).DeepCopyInto(out.(*NodeSelector)) - return nil - }, InType: reflect.TypeOf(&NodeSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelectorRequirement).DeepCopyInto(out.(*NodeSelectorRequirement)) - return nil - }, InType: reflect.TypeOf(&NodeSelectorRequirement{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelectorTerm).DeepCopyInto(out.(*NodeSelectorTerm)) - return nil - }, InType: reflect.TypeOf(&NodeSelectorTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSpec).DeepCopyInto(out.(*NodeSpec)) - return nil - }, InType: reflect.TypeOf(&NodeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeStatus).DeepCopyInto(out.(*NodeStatus)) - return nil - }, InType: reflect.TypeOf(&NodeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSystemInfo).DeepCopyInto(out.(*NodeSystemInfo)) - return nil - }, InType: reflect.TypeOf(&NodeSystemInfo{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectFieldSelector).DeepCopyInto(out.(*ObjectFieldSelector)) - return nil - }, InType: reflect.TypeOf(&ObjectFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMeta).DeepCopyInto(out.(*ObjectMeta)) - return nil - }, InType: reflect.TypeOf(&ObjectMeta{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectReference).DeepCopyInto(out.(*ObjectReference)) - return nil - }, InType: reflect.TypeOf(&ObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolume).DeepCopyInto(out.(*PersistentVolume)) - return nil - }, InType: reflect.TypeOf(&PersistentVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaim).DeepCopyInto(out.(*PersistentVolumeClaim)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimCondition).DeepCopyInto(out.(*PersistentVolumeClaimCondition)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimList).DeepCopyInto(out.(*PersistentVolumeClaimList)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimSpec).DeepCopyInto(out.(*PersistentVolumeClaimSpec)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimStatus).DeepCopyInto(out.(*PersistentVolumeClaimStatus)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimVolumeSource).DeepCopyInto(out.(*PersistentVolumeClaimVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeList).DeepCopyInto(out.(*PersistentVolumeList)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeSource).DeepCopyInto(out.(*PersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeSpec).DeepCopyInto(out.(*PersistentVolumeSpec)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeStatus).DeepCopyInto(out.(*PersistentVolumeStatus)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PhotonPersistentDiskVolumeSource).DeepCopyInto(out.(*PhotonPersistentDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PhotonPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Pod).DeepCopyInto(out.(*Pod)) - return nil - }, InType: reflect.TypeOf(&Pod{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAffinity).DeepCopyInto(out.(*PodAffinity)) - return nil - }, InType: reflect.TypeOf(&PodAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAffinityTerm).DeepCopyInto(out.(*PodAffinityTerm)) - return nil - }, InType: reflect.TypeOf(&PodAffinityTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAntiAffinity).DeepCopyInto(out.(*PodAntiAffinity)) - return nil - }, InType: reflect.TypeOf(&PodAntiAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAttachOptions).DeepCopyInto(out.(*PodAttachOptions)) - return nil - }, InType: reflect.TypeOf(&PodAttachOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodCondition).DeepCopyInto(out.(*PodCondition)) - return nil - }, InType: reflect.TypeOf(&PodCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodExecOptions).DeepCopyInto(out.(*PodExecOptions)) - return nil - }, InType: reflect.TypeOf(&PodExecOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodList).DeepCopyInto(out.(*PodList)) - return nil - }, InType: reflect.TypeOf(&PodList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodLogOptions).DeepCopyInto(out.(*PodLogOptions)) - return nil - }, InType: reflect.TypeOf(&PodLogOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPortForwardOptions).DeepCopyInto(out.(*PodPortForwardOptions)) - return nil - }, InType: reflect.TypeOf(&PodPortForwardOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodProxyOptions).DeepCopyInto(out.(*PodProxyOptions)) - return nil - }, InType: reflect.TypeOf(&PodProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityContext).DeepCopyInto(out.(*PodSecurityContext)) - return nil - }, InType: reflect.TypeOf(&PodSecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSignature).DeepCopyInto(out.(*PodSignature)) - return nil - }, InType: reflect.TypeOf(&PodSignature{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSpec).DeepCopyInto(out.(*PodSpec)) - return nil - }, InType: reflect.TypeOf(&PodSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodStatus).DeepCopyInto(out.(*PodStatus)) - return nil - }, InType: reflect.TypeOf(&PodStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodStatusResult).DeepCopyInto(out.(*PodStatusResult)) - return nil - }, InType: reflect.TypeOf(&PodStatusResult{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplate).DeepCopyInto(out.(*PodTemplate)) - return nil - }, InType: reflect.TypeOf(&PodTemplate{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplateList).DeepCopyInto(out.(*PodTemplateList)) - return nil - }, InType: reflect.TypeOf(&PodTemplateList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplateSpec).DeepCopyInto(out.(*PodTemplateSpec)) - return nil - }, InType: reflect.TypeOf(&PodTemplateSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PortworxVolumeSource).DeepCopyInto(out.(*PortworxVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PortworxVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Preconditions).DeepCopyInto(out.(*Preconditions)) - return nil - }, InType: reflect.TypeOf(&Preconditions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PreferAvoidPodsEntry).DeepCopyInto(out.(*PreferAvoidPodsEntry)) - return nil - }, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PreferredSchedulingTerm).DeepCopyInto(out.(*PreferredSchedulingTerm)) - return nil - }, InType: reflect.TypeOf(&PreferredSchedulingTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Probe).DeepCopyInto(out.(*Probe)) - return nil - }, InType: reflect.TypeOf(&Probe{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ProjectedVolumeSource).DeepCopyInto(out.(*ProjectedVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ProjectedVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*QuobyteVolumeSource).DeepCopyInto(out.(*QuobyteVolumeSource)) - return nil - }, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RBDVolumeSource).DeepCopyInto(out.(*RBDVolumeSource)) - return nil - }, InType: reflect.TypeOf(&RBDVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RangeAllocation).DeepCopyInto(out.(*RangeAllocation)) - return nil - }, InType: reflect.TypeOf(&RangeAllocation{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationController).DeepCopyInto(out.(*ReplicationController)) - return nil - }, InType: reflect.TypeOf(&ReplicationController{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerCondition).DeepCopyInto(out.(*ReplicationControllerCondition)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerList).DeepCopyInto(out.(*ReplicationControllerList)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerSpec).DeepCopyInto(out.(*ReplicationControllerSpec)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerStatus).DeepCopyInto(out.(*ReplicationControllerStatus)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceFieldSelector).DeepCopyInto(out.(*ResourceFieldSelector)) - return nil - }, InType: reflect.TypeOf(&ResourceFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuota).DeepCopyInto(out.(*ResourceQuota)) - return nil - }, InType: reflect.TypeOf(&ResourceQuota{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaList).DeepCopyInto(out.(*ResourceQuotaList)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaSpec).DeepCopyInto(out.(*ResourceQuotaSpec)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaStatus).DeepCopyInto(out.(*ResourceQuotaStatus)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceRequirements).DeepCopyInto(out.(*ResourceRequirements)) - return nil - }, InType: reflect.TypeOf(&ResourceRequirements{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SELinuxOptions).DeepCopyInto(out.(*SELinuxOptions)) - return nil - }, InType: reflect.TypeOf(&SELinuxOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleIOVolumeSource).DeepCopyInto(out.(*ScaleIOVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ScaleIOVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Secret).DeepCopyInto(out.(*Secret)) - return nil - }, InType: reflect.TypeOf(&Secret{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretEnvSource).DeepCopyInto(out.(*SecretEnvSource)) - return nil - }, InType: reflect.TypeOf(&SecretEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretKeySelector).DeepCopyInto(out.(*SecretKeySelector)) - return nil - }, InType: reflect.TypeOf(&SecretKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretList).DeepCopyInto(out.(*SecretList)) - return nil - }, InType: reflect.TypeOf(&SecretList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretProjection).DeepCopyInto(out.(*SecretProjection)) - return nil - }, InType: reflect.TypeOf(&SecretProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretReference).DeepCopyInto(out.(*SecretReference)) - return nil - }, InType: reflect.TypeOf(&SecretReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretVolumeSource).DeepCopyInto(out.(*SecretVolumeSource)) - return nil - }, InType: reflect.TypeOf(&SecretVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecurityContext).DeepCopyInto(out.(*SecurityContext)) - return nil - }, InType: reflect.TypeOf(&SecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SerializedReference).DeepCopyInto(out.(*SerializedReference)) - return nil - }, InType: reflect.TypeOf(&SerializedReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Service).DeepCopyInto(out.(*Service)) - return nil - }, InType: reflect.TypeOf(&Service{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAccount).DeepCopyInto(out.(*ServiceAccount)) - return nil - }, InType: reflect.TypeOf(&ServiceAccount{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAccountList).DeepCopyInto(out.(*ServiceAccountList)) - return nil - }, InType: reflect.TypeOf(&ServiceAccountList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceList).DeepCopyInto(out.(*ServiceList)) - return nil - }, InType: reflect.TypeOf(&ServiceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServicePort).DeepCopyInto(out.(*ServicePort)) - return nil - }, InType: reflect.TypeOf(&ServicePort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceProxyOptions).DeepCopyInto(out.(*ServiceProxyOptions)) - return nil - }, InType: reflect.TypeOf(&ServiceProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceSpec).DeepCopyInto(out.(*ServiceSpec)) - return nil - }, InType: reflect.TypeOf(&ServiceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceStatus).DeepCopyInto(out.(*ServiceStatus)) - return nil - }, InType: reflect.TypeOf(&ServiceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SessionAffinityConfig).DeepCopyInto(out.(*SessionAffinityConfig)) - return nil - }, InType: reflect.TypeOf(&SessionAffinityConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageOSPersistentVolumeSource).DeepCopyInto(out.(*StorageOSPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&StorageOSPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageOSVolumeSource).DeepCopyInto(out.(*StorageOSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&StorageOSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Sysctl).DeepCopyInto(out.(*Sysctl)) - return nil - }, InType: reflect.TypeOf(&Sysctl{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TCPSocketAction).DeepCopyInto(out.(*TCPSocketAction)) - return nil - }, InType: reflect.TypeOf(&TCPSocketAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Taint).DeepCopyInto(out.(*Taint)) - return nil - }, InType: reflect.TypeOf(&Taint{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Toleration).DeepCopyInto(out.(*Toleration)) - return nil - }, InType: reflect.TypeOf(&Toleration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Volume).DeepCopyInto(out.(*Volume)) - return nil - }, InType: reflect.TypeOf(&Volume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeMount).DeepCopyInto(out.(*VolumeMount)) - return nil - }, InType: reflect.TypeOf(&VolumeMount{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeProjection).DeepCopyInto(out.(*VolumeProjection)) - return nil - }, InType: reflect.TypeOf(&VolumeProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeSource).DeepCopyInto(out.(*VolumeSource)) - return nil - }, InType: reflect.TypeOf(&VolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VsphereVirtualDiskVolumeSource).DeepCopyInto(out.(*VsphereVirtualDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*WeightedPodAffinityTerm).DeepCopyInto(out.(*WeightedPodAffinityTerm)) - return nil - }, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) { *out = *in @@ -973,6 +246,22 @@ func (in *Binding) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIPersistentVolumeSource. +func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Capabilities) DeepCopyInto(out *Capabilities) { *out = *in @@ -1417,6 +706,11 @@ func (in *Container) DeepCopyInto(out *Container) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]VolumeDevice, len(*in)) + copy(*out, *in) + } if in.LivenessProbe != nil { in, out := &in.LivenessProbe, &out.LivenessProbe if *in == nil { @@ -2089,6 +1383,25 @@ func (in *Event) DeepCopyInto(out *Event) { out.Source = in.Source in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp) in.LastTimestamp.DeepCopyInto(&out.LastTimestamp) + in.EventTime.DeepCopyInto(&out.EventTime) + if in.Series != nil { + in, out := &in.Series, &out.Series + if *in == nil { + *out = nil + } else { + *out = new(EventSeries) + (*in).DeepCopyInto(*out) + } + } + if in.Related != nil { + in, out := &in.Related, &out.Related + if *in == nil { + *out = nil + } else { + *out = new(ObjectReference) + **out = **in + } + } return } @@ -2145,6 +1458,23 @@ func (in *EventList) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSeries) DeepCopyInto(out *EventSeries) { + *out = *in + in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. +func (in *EventSeries) DeepCopy() *EventSeries { + if in == nil { + return nil + } + out := new(EventSeries) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EventSource) DeepCopyInto(out *EventSource) { *out = *in @@ -2440,6 +1770,45 @@ func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSource) { + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + if in.InitiatorName != nil { + in, out := &in.InitiatorName, &out.InitiatorName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIPersistentVolumeSource. +func (in *ISCSIPersistentVolumeSource) DeepCopy() *ISCSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ISCSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) { *out = *in @@ -3582,6 +2951,15 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec **out = **in } } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + if *in == nil { + *out = nil + } else { + *out = new(PersistentVolumeMode) + **out = **in + } + } return } @@ -3733,7 +3111,7 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { if *in == nil { *out = nil } else { - *out = new(RBDVolumeSource) + *out = new(RBDPersistentVolumeSource) (*in).DeepCopyInto(*out) } } @@ -3742,7 +3120,7 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { if *in == nil { *out = nil } else { - *out = new(ISCSIVolumeSource) + *out = new(ISCSIPersistentVolumeSource) (*in).DeepCopyInto(*out) } } @@ -3850,7 +3228,7 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { if *in == nil { *out = nil } else { - *out = new(ScaleIOVolumeSource) + *out = new(ScaleIOPersistentVolumeSource) (*in).DeepCopyInto(*out) } } @@ -3872,6 +3250,15 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { (*in).DeepCopyInto(*out) } } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + if *in == nil { + *out = nil + } else { + *out = new(CSIPersistentVolumeSource) + **out = **in + } + } return } @@ -3915,6 +3302,15 @@ func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + if *in == nil { + *out = nil + } else { + *out = new(PersistentVolumeMode) + **out = **in + } + } return } @@ -4123,6 +3519,64 @@ func (in *PodCondition) DeepCopy() *PodCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) { + *out = *in + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Searches != nil { + in, out := &in.Searches, &out.Searches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]PodDNSConfigOption, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig. +func (in *PodDNSConfig) DeepCopy() *PodDNSConfig { + if in == nil { + return nil + } + out := new(PodDNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption. +func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption { + if in == nil { + return nil + } + out := new(PodDNSConfigOption) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) { *out = *in @@ -4493,6 +3947,15 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { **out = **in } } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + if *in == nil { + *out = nil + } else { + *out = new(PodDNSConfig) + (*in).DeepCopyInto(*out) + } + } return } @@ -4801,6 +4264,36 @@ func (in *QuobyteVolumeSource) DeepCopy() *QuobyteVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RBDPersistentVolumeSource) DeepCopyInto(out *RBDPersistentVolumeSource) { + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDPersistentVolumeSource. +func (in *RBDPersistentVolumeSource) DeepCopy() *RBDPersistentVolumeSource { + if in == nil { + return nil + } + out := new(RBDPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) { *out = *in @@ -5191,6 +4684,31 @@ func (in *SELinuxOptions) DeepCopy() *SELinuxOptions { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleIOPersistentVolumeSource) DeepCopyInto(out *ScaleIOPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOPersistentVolumeSource. +func (in *ScaleIOPersistentVolumeSource) DeepCopy() *ScaleIOPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ScaleIOPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) { *out = *in @@ -5905,7 +5423,15 @@ func (in *TCPSocketAction) DeepCopy() *TCPSocketAction { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Taint) DeepCopyInto(out *Taint) { *out = *in - in.TimeAdded.DeepCopyInto(&out.TimeAdded) + if in.TimeAdded != nil { + in, out := &in.TimeAdded, &out.TimeAdded + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.Time) + (*in).DeepCopyInto(*out) + } + } return } @@ -5961,6 +5487,22 @@ func (in *Volume) DeepCopy() *Volume { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeDevice) DeepCopyInto(out *VolumeDevice) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeDevice. +func (in *VolumeDevice) DeepCopy() *VolumeDevice { + if in == nil { + return nil + } + out := new(VolumeDevice) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { *out = *in diff --git a/vendor/k8s.io/api/events/v1beta1/BUILD b/vendor/k8s.io/api/events/v1beta1/BUILD new file mode 100644 index 000000000000..851874e78cbb --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/BUILD @@ -0,0 +1,42 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +filegroup( + name = "go_default_library_protos", + srcs = ["generated.proto"], + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "generated.pb.go", + "register.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/api/events/v1beta1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/api/events/v1beta1/doc.go b/vendor/k8s.io/api/events/v1beta1/doc.go new file mode 100644 index 000000000000..0841008b743f --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +// +groupName=events.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/vendor/k8s.io/api/events/v1beta1/generated.pb.go new file mode 100644 index 000000000000..4861697c09d9 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/generated.pb.go @@ -0,0 +1,1306 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto + + It has these top-level messages: + Event + EventList + EventSeries +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_api_core_v1 "k8s.io/api/core/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func init() { + proto.RegisterType((*Event)(nil), "k8s.io.api.events.v1beta1.Event") + proto.RegisterType((*EventList)(nil), "k8s.io.api.events.v1beta1.EventList") + proto.RegisterType((*EventSeries)(nil), "k8s.io.api.events.v1beta1.EventSeries") +} +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) + n2, err := m.EventTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.Series != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) + n3, err := m.Series.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i += copy(dAtA[i:], m.ReportingController) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i += copy(dAtA[i:], m.ReportingInstance) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i += copy(dAtA[i:], m.Action) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Regarding.Size())) + n4, err := m.Regarding.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if m.Related != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) + n5, err := m.Related.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Note))) + i += copy(dAtA[i:], m.Note) + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedSource.Size())) + n6, err := m.DeprecatedSource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedFirstTimestamp.Size())) + n7, err := m.DeprecatedFirstTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedLastTimestamp.Size())) + n8, err := m.DeprecatedLastTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + dAtA[i] = 0x78 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedCount)) + return i, nil +} + +func (m *EventList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EventSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) + n10, err := m.LastObservedTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i += copy(dAtA[i:], m.State) + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Event) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.EventTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Series != nil { + l = m.Series.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ReportingController) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ReportingInstance) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Regarding.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Related != nil { + l = m.Related.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Note) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.DeprecatedSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.DeprecatedFirstTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.DeprecatedLastTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.DeprecatedCount)) + return n +} + +func (m *EventList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EventSeries) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Count)) + l = m.LastObservedTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.State) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Event) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Event{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, + `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, + `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Regarding:` + strings.Replace(strings.Replace(this.Regarding.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1) + `,`, + `Note:` + fmt.Sprintf("%v", this.Note) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `DeprecatedSource:` + strings.Replace(strings.Replace(this.DeprecatedSource.String(), "EventSource", "k8s_io_api_core_v1.EventSource", 1), `&`, ``, 1) + `,`, + `DeprecatedFirstTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedFirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedLastTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedLastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedCount:` + fmt.Sprintf("%v", this.DeprecatedCount) + `,`, + `}`, + }, "") + return s +} +func (this *EventList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EventSeries) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSeries{`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EventTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EventTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Series == nil { + m.Series = &EventSeries{} + } + if err := m.Series.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReportingController", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReportingController = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReportingInstance", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReportingInstance = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Regarding", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Regarding.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Related", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Related == nil { + m.Related = &k8s_io_api_core_v1.ObjectReference{} + } + if err := m.Related.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Note", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Note = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeprecatedSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedFirstTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeprecatedFirstTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedLastTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeprecatedLastTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedCount", wireType) + } + m.DeprecatedCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DeprecatedCount |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Event{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastObservedTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastObservedTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = EventSeriesState(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 814 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x16, 0x13, 0x4b, 0xb6, 0x56, 0x49, 0x2c, 0x6f, 0x0e, 0xde, 0xb8, 0x00, 0xa5, 0x3a, 0x40, + 0x60, 0x14, 0x08, 0x59, 0xa7, 0x45, 0xdb, 0x6b, 0x18, 0xbb, 0x45, 0x02, 0xbb, 0x01, 0xd6, 0x3e, + 0x15, 0x3d, 0x64, 0x45, 0x4d, 0xe8, 0xad, 0xa5, 0x5d, 0x62, 0x77, 0x29, 0xc0, 0xb7, 0x5e, 0x0a, + 0xf4, 0xd8, 0x67, 0xe8, 0x13, 0xf4, 0x31, 0x7c, 0xcc, 0x31, 0x27, 0xa1, 0x66, 0xdf, 0xa2, 0xa7, + 0x82, 0xcb, 0x95, 0x28, 0x8b, 0x16, 0xec, 0x22, 0x37, 0x72, 0xe6, 0xfb, 0x99, 0x19, 0x0e, 0x07, + 0x45, 0xe7, 0xdf, 0xe9, 0x80, 0xcb, 0xf0, 0x3c, 0x1b, 0x80, 0x12, 0x60, 0x40, 0x87, 0x13, 0x10, + 0x43, 0xa9, 0x42, 0x97, 0x60, 0x29, 0x0f, 0x61, 0x02, 0xc2, 0xe8, 0x70, 0xb2, 0x3f, 0x00, 0xc3, + 0xf6, 0xc3, 0x04, 0x04, 0x28, 0x66, 0x60, 0x18, 0xa4, 0x4a, 0x1a, 0x89, 0x9f, 0x94, 0xd0, 0x80, + 0xa5, 0x3c, 0x28, 0xa1, 0x81, 0x83, 0xee, 0x3c, 0x4f, 0xb8, 0x39, 0xcb, 0x06, 0x41, 0x2c, 0xc7, + 0x61, 0x22, 0x13, 0x19, 0x5a, 0xc6, 0x20, 0x7b, 0x6f, 0xdf, 0xec, 0x8b, 0x7d, 0x2a, 0x95, 0x76, + 0x76, 0x17, 0x4c, 0x63, 0xa9, 0x20, 0x9c, 0xd4, 0xdc, 0x76, 0xbe, 0xae, 0x30, 0x63, 0x16, 0x9f, + 0x71, 0x01, 0xea, 0x22, 0x4c, 0xcf, 0x93, 0x22, 0xa0, 0xc3, 0x31, 0x18, 0x76, 0x13, 0x2b, 0x5c, + 0xc5, 0x52, 0x99, 0x30, 0x7c, 0x0c, 0x35, 0xc2, 0x37, 0xb7, 0x11, 0x74, 0x7c, 0x06, 0x63, 0x56, + 0xe3, 0x7d, 0xb5, 0x8a, 0x97, 0x19, 0x3e, 0x0a, 0xb9, 0x30, 0xda, 0xa8, 0x65, 0xd2, 0xee, 0x9f, + 0x6d, 0xd4, 0x3c, 0x2c, 0x26, 0x87, 0xdf, 0xa1, 0x8d, 0xa2, 0x85, 0x21, 0x33, 0x8c, 0x78, 0x7d, + 0x6f, 0xaf, 0xf3, 0xe2, 0xcb, 0xa0, 0x1a, 0xef, 0x5c, 0x31, 0x48, 0xcf, 0x93, 0x22, 0xa0, 0x83, + 0x02, 0x1d, 0x4c, 0xf6, 0x83, 0xb7, 0x83, 0x5f, 0x20, 0x36, 0xc7, 0x60, 0x58, 0x84, 0x2f, 0xa7, + 0xbd, 0x46, 0x3e, 0xed, 0xa1, 0x2a, 0x46, 0xe7, 0xaa, 0xf8, 0x1d, 0x6a, 0xdb, 0x8f, 0x74, 0xca, + 0xc7, 0x40, 0xee, 0x59, 0x8b, 0xf0, 0x6e, 0x16, 0xc7, 0x3c, 0x56, 0xb2, 0xa0, 0x45, 0x5b, 0xce, + 0xa1, 0x7d, 0x38, 0x53, 0xa2, 0x95, 0x28, 0x7e, 0x83, 0x5a, 0x1a, 0x14, 0x07, 0x4d, 0xee, 0x5b, + 0xf9, 0x67, 0xc1, 0xca, 0x05, 0x09, 0xac, 0xc0, 0x89, 0x45, 0x47, 0x28, 0x9f, 0xf6, 0x5a, 0xe5, + 0x33, 0x75, 0x0a, 0xf8, 0x18, 0x3d, 0x56, 0x90, 0x4a, 0x65, 0xb8, 0x48, 0x5e, 0x49, 0x61, 0x94, + 0x1c, 0x8d, 0x40, 0x91, 0xb5, 0xbe, 0xb7, 0xd7, 0x8e, 0x3e, 0x73, 0x65, 0x3c, 0xa6, 0x75, 0x08, + 0xbd, 0x89, 0x87, 0x7f, 0x40, 0x5b, 0xf3, 0xf0, 0x6b, 0xa1, 0x0d, 0x13, 0x31, 0x90, 0xa6, 0x15, + 0x7b, 0xe2, 0xc4, 0xb6, 0xe8, 0x32, 0x80, 0xd6, 0x39, 0xf8, 0x19, 0x6a, 0xb1, 0xd8, 0x70, 0x29, + 0x48, 0xcb, 0xb2, 0x1f, 0x39, 0x76, 0xeb, 0xa5, 0x8d, 0x52, 0x97, 0x2d, 0x70, 0x0a, 0x98, 0x96, + 0x82, 0xac, 0x5f, 0xc7, 0x51, 0x1b, 0xa5, 0x2e, 0x8b, 0x4f, 0x51, 0x5b, 0x41, 0xc2, 0xd4, 0x90, + 0x8b, 0x84, 0x6c, 0xd8, 0xb1, 0x3d, 0x5d, 0x1c, 0x5b, 0xf1, 0x37, 0x54, 0x9f, 0x99, 0xc2, 0x7b, + 0x50, 0x20, 0xe2, 0x85, 0x2f, 0x41, 0x67, 0x6c, 0x5a, 0x09, 0xe1, 0x37, 0x68, 0x5d, 0xc1, 0xa8, + 0x58, 0x34, 0xd2, 0xbe, 0xbb, 0x66, 0x27, 0x9f, 0xf6, 0xd6, 0x69, 0xc9, 0xa3, 0x33, 0x01, 0xdc, + 0x47, 0x6b, 0x42, 0x1a, 0x20, 0xc8, 0xf6, 0xf1, 0xc0, 0xf9, 0xae, 0xfd, 0x28, 0x0d, 0x50, 0x9b, + 0x29, 0x10, 0xe6, 0x22, 0x05, 0xd2, 0xb9, 0x8e, 0x38, 0xbd, 0x48, 0x81, 0xda, 0x0c, 0x06, 0xd4, + 0x1d, 0x42, 0xaa, 0x20, 0x2e, 0x14, 0x4f, 0x64, 0xa6, 0x62, 0x20, 0x0f, 0x6c, 0x61, 0xbd, 0x9b, + 0x0a, 0x2b, 0x97, 0xc3, 0xc2, 0x22, 0xe2, 0xe4, 0xba, 0x07, 0x4b, 0x02, 0xb4, 0x26, 0x89, 0x7f, + 0xf7, 0x10, 0xa9, 0x82, 0xdf, 0x73, 0xa5, 0xed, 0x62, 0x6a, 0xc3, 0xc6, 0x29, 0x79, 0x68, 0xfd, + 0xbe, 0xb8, 0xdb, 0xca, 0xdb, 0x6d, 0xef, 0x3b, 0x6b, 0x72, 0xb0, 0x42, 0x93, 0xae, 0x74, 0xc3, + 0xbf, 0x79, 0x68, 0xbb, 0x4a, 0x1e, 0xb1, 0xc5, 0x4a, 0x1e, 0xfd, 0xef, 0x4a, 0x7a, 0xae, 0x92, + 0xed, 0x83, 0x9b, 0x25, 0xe9, 0x2a, 0x2f, 0xfc, 0x12, 0x6d, 0x56, 0xa9, 0x57, 0x32, 0x13, 0x86, + 0x6c, 0xf6, 0xbd, 0xbd, 0x66, 0xb4, 0xed, 0x24, 0x37, 0x0f, 0xae, 0xa7, 0xe9, 0x32, 0x7e, 0xf7, + 0x2f, 0x0f, 0x95, 0xff, 0xfb, 0x11, 0xd7, 0x06, 0xff, 0x5c, 0x3b, 0x54, 0xc1, 0xdd, 0x1a, 0x29, + 0xd8, 0xf6, 0x4c, 0x75, 0x9d, 0xf3, 0xc6, 0x2c, 0xb2, 0x70, 0xa4, 0x0e, 0x51, 0x93, 0x1b, 0x18, + 0x6b, 0x72, 0xaf, 0x7f, 0x7f, 0xaf, 0xf3, 0xa2, 0x7f, 0xdb, 0x05, 0x89, 0x1e, 0x3a, 0xb1, 0xe6, + 0xeb, 0x82, 0x46, 0x4b, 0xf6, 0x6e, 0xee, 0xa1, 0xce, 0xc2, 0x85, 0xc1, 0x4f, 0x51, 0x33, 0xb6, + 0xbd, 0x7b, 0xb6, 0xf7, 0x39, 0xa9, 0xec, 0xb8, 0xcc, 0xe1, 0x0c, 0x75, 0x47, 0x4c, 0x9b, 0xb7, + 0x03, 0x0d, 0x6a, 0x02, 0xc3, 0x4f, 0xb9, 0x93, 0xf3, 0xa5, 0x3d, 0x5a, 0x12, 0xa4, 0x35, 0x0b, + 0xfc, 0x2d, 0x6a, 0x6a, 0xc3, 0x0c, 0xd8, 0xa3, 0xd9, 0x8e, 0x3e, 0x9f, 0xd5, 0x76, 0x52, 0x04, + 0xff, 0x9d, 0xf6, 0xba, 0x0b, 0x8d, 0xd8, 0x18, 0x2d, 0xf1, 0xd1, 0xf3, 0xcb, 0x2b, 0xbf, 0xf1, + 0xe1, 0xca, 0x6f, 0x7c, 0xbc, 0xf2, 0x1b, 0xbf, 0xe6, 0xbe, 0x77, 0x99, 0xfb, 0xde, 0x87, 0xdc, + 0xf7, 0x3e, 0xe6, 0xbe, 0xf7, 0x77, 0xee, 0x7b, 0x7f, 0xfc, 0xe3, 0x37, 0x7e, 0x5a, 0x77, 0xf3, + 0xfa, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x69, 0xa9, 0x7b, 0x6e, 0xf2, 0x07, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/events/v1beta1/generated.proto b/vendor/k8s.io/api/events/v1beta1/generated.proto new file mode 100644 index 000000000000..c13a12a5eb7a --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/generated.proto @@ -0,0 +1,122 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.events.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +message Event { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Required. Time when this Event was first observed. + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + optional EventSeries series = 3; + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + optional string reportingController = 4; + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + optional string reportingInstance = 5; + + // What action was taken/failed regarding to the regarding object. + // +optional + optional string action = 6; + + // Why the action was taken. + optional string reason = 7; + + // The object this Event is about. In most cases it's an Object reporting controller implements. + // E.g. ReplicaSetController implements ReplicaSets and this event is emitted because + // it acts on some changes in a ReplicaSet object. + // +optional + optional k8s.io.api.core.v1.ObjectReference regarding = 8; + + // Optional secondary object for more complex actions. E.g. when regarding object triggers + // a creation or deletion of related object. + // +optional + optional k8s.io.api.core.v1.ObjectReference related = 9; + + // Optional. A human-readable description of the status of this operation. + // Maximal length of the note is 1kB, but libraries should be prepared to + // handle values up to 64kB. + // +optional + optional string note = 10; + + // Type of this event (Normal, Warning), new types could be added in the + // future. + // +optional + optional string type = 11; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional k8s.io.api.core.v1.EventSource deprecatedSource = 12; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional int32 deprecatedCount = 15; +} + +// EventList is a list of Event objects. +message EventList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated Event items = 2; +} + +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continously for some time. +message EventSeries { + // Number of occurrences in this series up to the last heartbeat time + optional int32 count = 1; + + // Time when last Event from the series was seen before last heartbeat. + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + + // Information whether this series is ongoing or finished. + optional string state = 3; +} + diff --git a/vendor/k8s.io/api/events/v1beta1/register.go b/vendor/k8s.io/api/events/v1beta1/register.go new file mode 100644 index 000000000000..4506782914c1 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "events.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Event{}, + &EventList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/events/v1beta1/types.go b/vendor/k8s.io/api/events/v1beta1/types.go new file mode 100644 index 000000000000..1b68bd743e97 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/types.go @@ -0,0 +1,122 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +type Event struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Required. Time when this Event was first observed. + EventTime metav1.MicroTime `json:"eventTime" protobuf:"bytes,2,opt,name=eventTime"` + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + Series *EventSeries `json:"series,omitempty" protobuf:"bytes,3,opt,name=series"` + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + ReportingController string `json:"reportingController,omitempty" protobuf:"bytes,4,opt,name=reportingController"` + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + ReportingInstance string `json:"reportingInstance,omitemtpy" protobuf:"bytes,5,opt,name=reportingInstance"` + + // What action was taken/failed regarding to the regarding object. + // +optional + Action string `json:"action,omitemtpy" protobuf:"bytes,6,name=action"` + + // Why the action was taken. + Reason string `json:"reason,omitempty" protobuf:"bytes,7,name=reason"` + + // The object this Event is about. In most cases it's an Object reporting controller implements. + // E.g. ReplicaSetController implements ReplicaSets and this event is emitted because + // it acts on some changes in a ReplicaSet object. + // +optional + Regarding corev1.ObjectReference `json:"regarding,omitempty" protobuf:"bytes,8,opt,name=regarding"` + + // Optional secondary object for more complex actions. E.g. when regarding object triggers + // a creation or deletion of related object. + // +optional + Related *corev1.ObjectReference `json:"related,omitempty" protobuf:"bytes,9,opt,name=related"` + + // Optional. A human-readable description of the status of this operation. + // Maximal length of the note is 1kB, but libraries should be prepared to + // handle values up to 64kB. + // +optional + Note string `json:"note,omitempty" protobuf:"bytes,10,opt,name=note"` + + // Type of this event (Normal, Warning), new types could be added in the + // future. + // +optional + Type string `json:"type,omitempty" protobuf:"bytes,11,opt,name=type"` + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedSource corev1.EventSource `json:"deprecatedSource,omitempty" protobuf:"bytes,12,opt,name=deprecatedSource"` + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedFirstTimestamp metav1.Time `json:"deprecatedFirstTimestamp,omitempty" protobuf:"bytes,13,opt,name=deprecatedFirstTimestamp"` + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedLastTimestamp metav1.Time `json:"deprecatedLastTimestamp,omitempty" protobuf:"bytes,14,opt,name=deprecatedLastTimestamp"` + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedCount int32 `json:"deprecatedCount,omitempty" protobuf:"varint,15,opt,name=deprecatedCount"` +} + +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continously for some time. +type EventSeries struct { + // Number of occurrences in this series up to the last heartbeat time + Count int32 `json:"count" protobuf:"varint,1,opt,name=count"` + // Time when last Event from the series was seen before last heartbeat. + LastObservedTime metav1.MicroTime `json:"lastObservedTime" protobuf:"bytes,2,opt,name=lastObservedTime"` + // Information whether this series is ongoing or finished. + State EventSeriesState `json:"state" protobuf:"bytes,3,opt,name=state"` +} + +type EventSeriesState string + +const ( + EventSeriesStateOngoing EventSeriesState = "Ongoing" + EventSeriesStateFinished EventSeriesState = "Finished" + EventSeriesStateUnknown EventSeriesState = "Unknown" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EventList is a list of Event objects. +type EventList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000000..04a4a91227f8 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,73 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Event = map[string]string{ + "": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.", + "eventTime": "Required. Time when this Event was first observed.", + "series": "Data about the Event series this event represents or nil if it's a singleton Event.", + "reportingController": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.", + "reportingInstance": "ID of the controller instance, e.g. `kubelet-xyzf`.", + "action": "What action was taken/failed regarding to the regarding object.", + "reason": "Why the action was taken.", + "regarding": "The object this Event is about. In most cases it's an Object reporting controller implements. E.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.", + "related": "Optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.", + "note": "Optional. A human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.", + "type": "Type of this event (Normal, Warning), new types could be added in the future.", + "deprecatedSource": "Deprecated field assuring backward compatibility with core.v1 Event type", + "deprecatedFirstTimestamp": "Deprecated field assuring backward compatibility with core.v1 Event type", + "deprecatedLastTimestamp": "Deprecated field assuring backward compatibility with core.v1 Event type", + "deprecatedCount": "Deprecated field assuring backward compatibility with core.v1 Event type", +} + +func (Event) SwaggerDoc() map[string]string { + return map_Event +} + +var map_EventList = map[string]string{ + "": "EventList is a list of Event objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (EventList) SwaggerDoc() map[string]string { + return map_EventList +} + +var map_EventSeries = map[string]string{ + "": "EventSeries contain information on series of events, i.e. thing that was/is happening continously for some time.", + "count": "Number of occurrences in this series up to the last heartbeat time", + "lastObservedTime": "Time when last Event from the series was seen before last heartbeat.", + "state": "Information whether this series is ongoing or finished.", +} + +func (EventSeries) SwaggerDoc() map[string]string { + return map_EventSeries +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000000..2886eba13c8c --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,127 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.EventTime.DeepCopyInto(&out.EventTime) + if in.Series != nil { + in, out := &in.Series, &out.Series + if *in == nil { + *out = nil + } else { + *out = new(EventSeries) + (*in).DeepCopyInto(*out) + } + } + out.Regarding = in.Regarding + if in.Related != nil { + in, out := &in.Related, &out.Related + if *in == nil { + *out = nil + } else { + *out = new(v1.ObjectReference) + **out = **in + } + } + out.DeprecatedSource = in.DeprecatedSource + in.DeprecatedFirstTimestamp.DeepCopyInto(&out.DeprecatedFirstTimestamp) + in.DeprecatedLastTimestamp.DeepCopyInto(&out.DeprecatedLastTimestamp) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Event) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventList) DeepCopyInto(out *EventList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. +func (in *EventList) DeepCopy() *EventList { + if in == nil { + return nil + } + out := new(EventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSeries) DeepCopyInto(out *EventSeries) { + *out = *in + in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. +func (in *EventSeries) DeepCopy() *EventSeries { + if in == nil { + return nil + } + out := new(EventSeries) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/extensions/v1beta1/BUILD b/vendor/k8s.io/api/extensions/v1beta1/BUILD index f9974f2328bc..360ad999c944 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/BUILD +++ b/vendor/k8s.io/api/extensions/v1beta1/BUILD @@ -15,6 +15,7 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/extensions/v1beta1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", @@ -22,7 +23,6 @@ go_library( "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", diff --git a/vendor/k8s.io/api/extensions/v1beta1/doc.go b/vendor/k8s.io/api/extensions/v1beta1/doc.go index 82af692e6248..f763ac1a62d9 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/doc.go +++ b/vendor/k8s.io/api/extensions/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1beta1 diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index 63d492fc85fd..ba5998e2773f 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -25,13 +25,14 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto It has these top-level messages: - APIVersion + AllowedFlexVolume AllowedHostPath CustomMetricCurrentStatus CustomMetricCurrentStatusList CustomMetricTarget CustomMetricTargetList DaemonSet + DaemonSetCondition DaemonSetList DaemonSetSpec DaemonSetStatus @@ -82,10 +83,6 @@ limitations under the License. ScaleSpec ScaleStatus SupplementalGroupsStrategyOptions - ThirdPartyResource - ThirdPartyResourceData - ThirdPartyResourceDataList - ThirdPartyResourceList */ package v1beta1 @@ -117,9 +114,9 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *APIVersion) Reset() { *m = APIVersion{} } -func (*APIVersion) ProtoMessage() {} -func (*APIVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } func (*AllowedHostPath) ProtoMessage() {} @@ -149,246 +146,233 @@ func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} + return fileDescriptorGenerated, []int{11} } func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *HostPortRange) Reset() { *m = HostPortRange{} } func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *IDRange) Reset() { *m = IDRange{} } func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *IPBlock) Reset() { *m = IPBlock{} } func (*IPBlock) ProtoMessage() {} -func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{33} + return fileDescriptorGenerated, []int{34} } func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{34} + return fileDescriptorGenerated, []int{35} } func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } func (*ReplicationControllerDummy) ProtoMessage() {} func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{47} + return fileDescriptorGenerated, []int{48} } func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } +func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{50} + return fileDescriptorGenerated, []int{51} } func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{51} + return fileDescriptorGenerated, []int{52} } func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{56} -} - -func (m *ThirdPartyResource) Reset() { *m = ThirdPartyResource{} } -func (*ThirdPartyResource) ProtoMessage() {} -func (*ThirdPartyResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } - -func (m *ThirdPartyResourceData) Reset() { *m = ThirdPartyResourceData{} } -func (*ThirdPartyResourceData) ProtoMessage() {} -func (*ThirdPartyResourceData) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } - -func (m *ThirdPartyResourceDataList) Reset() { *m = ThirdPartyResourceDataList{} } -func (*ThirdPartyResourceDataList) ProtoMessage() {} -func (*ThirdPartyResourceDataList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{59} + return fileDescriptorGenerated, []int{57} } -func (m *ThirdPartyResourceList) Reset() { *m = ThirdPartyResourceList{} } -func (*ThirdPartyResourceList) ProtoMessage() {} -func (*ThirdPartyResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } - func init() { - proto.RegisterType((*APIVersion)(nil), "k8s.io.api.extensions.v1beta1.APIVersion") + proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatus") proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatusList") proto.RegisterType((*CustomMetricTarget)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTarget") proto.RegisterType((*CustomMetricTargetList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTargetList") proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") + proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition") proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList") proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetSpec") proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetStatus") @@ -439,12 +423,8 @@ func init() { proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") - proto.RegisterType((*ThirdPartyResource)(nil), "k8s.io.api.extensions.v1beta1.ThirdPartyResource") - proto.RegisterType((*ThirdPartyResourceData)(nil), "k8s.io.api.extensions.v1beta1.ThirdPartyResourceData") - proto.RegisterType((*ThirdPartyResourceDataList)(nil), "k8s.io.api.extensions.v1beta1.ThirdPartyResourceDataList") - proto.RegisterType((*ThirdPartyResourceList)(nil), "k8s.io.api.extensions.v1beta1.ThirdPartyResourceList") } -func (m *APIVersion) Marshal() (dAtA []byte, err error) { +func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -454,15 +434,15 @@ func (m *APIVersion) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *APIVersion) MarshalTo(dAtA []byte) (int, error) { +func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) return i, nil } @@ -650,6 +630,48 @@ func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n6, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -668,11 +690,11 @@ func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + n7, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n7 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -707,28 +729,28 @@ func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n7, err := m.Selector.MarshalTo(dAtA[i:]) + n8, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(dAtA[i:]) + n9, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n9 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n9, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + n10, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -787,6 +809,18 @@ func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -813,11 +847,11 @@ func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n10, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n11, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 } return i, nil } @@ -840,27 +874,27 @@ func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n11, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n12 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n12, err := m.Spec.MarshalTo(dAtA[i:]) + n13, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n13 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n13, err := m.Status.MarshalTo(dAtA[i:]) + n14, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n13 + i += n14 return i, nil } @@ -898,19 +932,19 @@ func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n14, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + n15, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n15 dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n15, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n16, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n16 return i, nil } @@ -932,11 +966,11 @@ func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n16, err := m.ListMeta.MarshalTo(dAtA[i:]) + n17, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n17 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -996,11 +1030,11 @@ func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n17, err := m.RollbackTo.MarshalTo(dAtA[i:]) + n18, err := m.RollbackTo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n18 return i, nil } @@ -1028,28 +1062,28 @@ func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n18, err := m.Selector.MarshalTo(dAtA[i:]) + n19, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n19 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n19, err := m.Template.MarshalTo(dAtA[i:]) + n20, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n19 + i += n20 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n20, err := m.Strategy.MarshalTo(dAtA[i:]) + n21, err := m.Strategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n20 + i += n21 dAtA[i] = 0x28 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -1070,11 +1104,11 @@ func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n21, err := m.RollbackTo.MarshalTo(dAtA[i:]) + n22, err := m.RollbackTo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n21 + i += n22 } if m.ProgressDeadlineSeconds != nil { dAtA[i] = 0x48 @@ -1160,11 +1194,11 @@ func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n23, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n22 + i += n23 } return i, nil } @@ -1225,11 +1259,11 @@ func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n23, err := m.Backend.MarshalTo(dAtA[i:]) + n24, err := m.Backend.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n23 + i += n24 return i, nil } @@ -1366,27 +1400,27 @@ func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n24, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n25, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n24 + i += n25 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n25, err := m.Spec.MarshalTo(dAtA[i:]) + n26, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n25 + i += n26 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n26, err := m.Status.MarshalTo(dAtA[i:]) + n27, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n27 return i, nil } @@ -1412,11 +1446,11 @@ func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ServicePort.Size())) - n27, err := m.ServicePort.MarshalTo(dAtA[i:]) + n28, err := m.ServicePort.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n28 return i, nil } @@ -1438,11 +1472,11 @@ func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n28, err := m.ListMeta.MarshalTo(dAtA[i:]) + n29, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n29 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1480,11 +1514,11 @@ func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.IngressRuleValue.Size())) - n29, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) + n30, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n30 return i, nil } @@ -1507,11 +1541,11 @@ func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HTTP.Size())) - n30, err := m.HTTP.MarshalTo(dAtA[i:]) + n31, err := m.HTTP.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n31 } return i, nil } @@ -1535,11 +1569,11 @@ func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n31, err := m.Backend.MarshalTo(dAtA[i:]) + n32, err := m.Backend.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n32 } if len(m.TLS) > 0 { for _, msg := range m.TLS { @@ -1586,11 +1620,11 @@ func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n32, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + n33, err := m.LoadBalancer.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n33 return i, nil } @@ -1649,19 +1683,19 @@ func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n34, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n33 + i += n34 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n34, err := m.Spec.MarshalTo(dAtA[i:]) + n35, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n34 + i += n35 return i, nil } @@ -1767,11 +1801,11 @@ func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n35, err := m.ListMeta.MarshalTo(dAtA[i:]) + n36, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n36 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1806,31 +1840,31 @@ func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n36, err := m.PodSelector.MarshalTo(dAtA[i:]) + n37, err := m.PodSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n37 } if m.NamespaceSelector != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n37, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) + n38, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n38 } if m.IPBlock != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) - n38, err := m.IPBlock.MarshalTo(dAtA[i:]) + n39, err := m.IPBlock.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n39 } return i, nil } @@ -1860,11 +1894,11 @@ func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n39, err := m.Port.MarshalTo(dAtA[i:]) + n40, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n40 } return i, nil } @@ -1887,11 +1921,11 @@ func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n40, err := m.PodSelector.MarshalTo(dAtA[i:]) + n41, err := m.PodSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n41 if len(m.Ingress) > 0 { for _, msg := range m.Ingress { dAtA[i] = 0x12 @@ -1952,19 +1986,19 @@ func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n41, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n42, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n42 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n42, err := m.Spec.MarshalTo(dAtA[i:]) + n43, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n42 + i += n43 return i, nil } @@ -1986,11 +2020,11 @@ func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n43, err := m.ListMeta.MarshalTo(dAtA[i:]) + n44, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n43 + i += n44 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2128,35 +2162,35 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) - n44, err := m.SELinux.MarshalTo(dAtA[i:]) + n45, err := m.SELinux.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n44 + i += n45 dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) - n45, err := m.RunAsUser.MarshalTo(dAtA[i:]) + n46, err := m.RunAsUser.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n45 + i += n46 dAtA[i] = 0x62 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) - n46, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) + n47, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n46 + i += n47 dAtA[i] = 0x6a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) - n47, err := m.FSGroup.MarshalTo(dAtA[i:]) + n48, err := m.FSGroup.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n47 + i += n48 dAtA[i] = 0x70 i++ if m.ReadOnlyRootFilesystem { @@ -2175,16 +2209,18 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { } i++ } - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - if m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.AllowPrivilegeEscalation != nil { + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ } - i++ if len(m.AllowedHostPaths) > 0 { for _, msg := range m.AllowedHostPaths { dAtA[i] = 0x8a @@ -2199,6 +2235,20 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { i += n } } + if len(m.AllowedFlexVolumes) > 0 { + for _, msg := range m.AllowedFlexVolumes { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -2220,27 +2270,27 @@ func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n48, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n49, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n48 + i += n49 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n49, err := m.Spec.MarshalTo(dAtA[i:]) + n50, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n49 + i += n50 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n50, err := m.Status.MarshalTo(dAtA[i:]) + n51, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n50 + i += n51 return i, nil } @@ -2270,11 +2320,11 @@ func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n51, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n52, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n51 + i += n52 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -2304,11 +2354,11 @@ func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n52, err := m.ListMeta.MarshalTo(dAtA[i:]) + n53, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n52 + i += n53 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2348,20 +2398,20 @@ func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n53, err := m.Selector.MarshalTo(dAtA[i:]) + n54, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n53 + i += n54 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n54, err := m.Template.MarshalTo(dAtA[i:]) + n55, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n54 + i += n55 dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -2471,11 +2521,11 @@ func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n55, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n56, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n55 + i += n56 } return i, nil } @@ -2499,21 +2549,21 @@ func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n56, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n57, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n56 + i += n57 } if m.MaxSurge != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n57, err := m.MaxSurge.MarshalTo(dAtA[i:]) + n58, err := m.MaxSurge.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n57 + i += n58 } return i, nil } @@ -2575,11 +2625,11 @@ func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n58, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n59, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n58 + i += n59 } return i, nil } @@ -2602,27 +2652,27 @@ func (m *Scale) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n59, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n60, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n59 + i += n60 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n60, err := m.Spec.MarshalTo(dAtA[i:]) + n61, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n60 + i += n61 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n61, err := m.Status.MarshalTo(dAtA[i:]) + n62, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n61 + i += n62 return i, nil } @@ -2728,207 +2778,57 @@ func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) return i, nil } -func (m *ThirdPartyResource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - return dAtA[:n], nil + dAtA[offset] = uint8(v) + return offset + 1 } - -func (m *ThirdPartyResource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i +func (m *AllowedFlexVolume) Size() (n int) { var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n62, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n62 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - if len(m.Versions) > 0 { - for _, msg := range m.Versions { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ThirdPartyResourceData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *AllowedHostPath) Size() (n int) { + var l int + _ = l + l = len(m.PathPrefix) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ThirdPartyResourceData) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i +func (m *CustomMetricCurrentStatus) Size() (n int) { var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n63, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n63 - if m.Data != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) - } - return i, nil -} - -func (m *ThirdPartyResourceDataList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ThirdPartyResourceDataList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n64, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n64 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ThirdPartyResourceList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ThirdPartyResourceList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n65, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n65 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *APIVersion) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AllowedHostPath) Size() (n int) { - var l int - _ = l - l = len(m.PathPrefix) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CustomMetricCurrentStatus) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.CurrentValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } func (m *CustomMetricCurrentStatusList) Size() (n int) { @@ -2977,6 +2877,22 @@ func (m *DaemonSet) Size() (n int) { return n } +func (m *DaemonSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *DaemonSetList) Size() (n int) { var l int _ = l @@ -3024,6 +2940,12 @@ func (m *DaemonSetStatus) Size() (n int) { if m.CollisionCount != nil { n += 1 + sovGenerated(uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -3520,13 +3442,21 @@ func (m *PodSecurityPolicySpec) Size() (n int) { if m.DefaultAllowPrivilegeEscalation != nil { n += 2 } - n += 3 + if m.AllowPrivilegeEscalation != nil { + n += 3 + } if len(m.AllowedHostPaths) > 0 { for _, e := range m.AllowedHostPaths { l = e.Size() n += 2 + l + sovGenerated(uint64(l)) } } + if len(m.AllowedFlexVolumes) > 0 { + for _, e := range m.AllowedFlexVolumes { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -3718,62 +3648,6 @@ func (m *SupplementalGroupsStrategyOptions) Size() (n int) { return n } -func (m *ThirdPartyResource) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Description) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Versions) > 0 { - for _, e := range m.Versions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ThirdPartyResourceData) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ThirdPartyResourceDataList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ThirdPartyResourceList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func sovGenerated(x uint64) (n int) { for { n++ @@ -3787,12 +3661,12 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *APIVersion) String() string { +func (this *AllowedFlexVolume) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&APIVersion{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + s := strings.Join([]string{`&AllowedFlexVolume{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, `}`, }, "") return s @@ -3861,6 +3735,20 @@ func (this *DaemonSet) String() string { }, "") return s } +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} func (this *DaemonSetList) String() string { if this == nil { return "nil" @@ -3901,6 +3789,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4297,8 +4186,9 @@ func (this *PodSecurityPolicySpec) String() string { `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, - `AllowPrivilegeEscalation:` + fmt.Sprintf("%v", this.AllowPrivilegeEscalation) + `,`, + `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, `AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`, + `AllowedFlexVolumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedFlexVolumes), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4485,51 +4375,6 @@ func (this *SupplementalGroupsStrategyOptions) String() string { }, "") return s } -func (this *ThirdPartyResource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResource{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "APIVersion", "APIVersion", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ThirdPartyResourceData) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResourceData{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + valueToStringGenerated(this.Data) + `,`, - `}`, - }, "") - return s -} -func (this *ThirdPartyResourceDataList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResourceDataList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ThirdPartyResourceData", "ThirdPartyResourceData", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ThirdPartyResourceList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResourceList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ThirdPartyResource", "ThirdPartyResource", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -4538,7 +4383,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *APIVersion) Unmarshal(dAtA []byte) error { +func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4561,15 +4406,15 @@ func (m *APIVersion) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: APIVersion: wiretype end group for non-group") + return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: APIVersion: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4594,7 +4439,7 @@ func (m *APIVersion) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Driver = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5216,7 +5061,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } return nil } -func (m *DaemonSetList) Unmarshal(dAtA []byte) error { +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5239,17 +5084,17 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5259,21 +5104,217 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { @@ -5729,6 +5770,37 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } } m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -10170,7 +10242,8 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { break } } - m.AllowPrivilegeEscalation = bool(v != 0) + b := bool(v != 0) + m.AllowPrivilegeEscalation = &b case 17: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) @@ -10202,6 +10275,37 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) + if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -12071,479 +12175,6 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *ThirdPartyResource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Versions = append(m.Versions, APIVersion{}) - if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ThirdPartyResourceData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResourceData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResourceData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ThirdPartyResourceDataList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResourceDataList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResourceDataList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ThirdPartyResourceData{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ThirdPartyResourceList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResourceList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResourceList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ThirdPartyResource{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 @@ -12654,233 +12285,229 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 3634 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x5b, 0xcd, 0x6f, 0x23, 0xc9, - 0x75, 0x9f, 0xe6, 0x87, 0x48, 0x3d, 0x8d, 0xbe, 0x4a, 0xb3, 0x12, 0xad, 0xdd, 0x11, 0xe5, 0x5e, - 0x60, 0x32, 0xbb, 0xd9, 0x25, 0x77, 0xb4, 0x3b, 0xeb, 0xcd, 0x2e, 0x62, 0x47, 0x94, 0xe6, 0x43, - 0x8e, 0x3e, 0xb8, 0x45, 0x4a, 0x4e, 0x16, 0x1e, 0x67, 0x5b, 0x64, 0x89, 0xea, 0x51, 0xb3, 0xbb, - 0xdd, 0x5d, 0x2d, 0x8b, 0x97, 0x20, 0x87, 0xc0, 0x40, 0x80, 0x04, 0x49, 0x0e, 0x0e, 0x1c, 0x20, - 0x87, 0xf8, 0x92, 0x53, 0x82, 0xf8, 0x96, 0x1c, 0x0c, 0x03, 0x01, 0x1c, 0x60, 0x10, 0x38, 0x81, - 0x4f, 0x89, 0x4f, 0x42, 0x56, 0x3e, 0xe6, 0x1f, 0x08, 0xe6, 0x10, 0x04, 0x55, 0x5d, 0xfd, 0xdd, - 0x2d, 0x92, 0xda, 0x91, 0x10, 0xe4, 0xc6, 0xae, 0xf7, 0xde, 0xef, 0xbd, 0x7a, 0x55, 0xf5, 0xde, - 0xab, 0x0f, 0xc2, 0xe3, 0x93, 0x8f, 0xec, 0x9a, 0x6a, 0xd4, 0x4f, 0x9c, 0x43, 0x62, 0xe9, 0x84, - 0x12, 0xbb, 0x7e, 0x4a, 0xf4, 0xae, 0x61, 0xd5, 0x05, 0x41, 0x31, 0xd5, 0x3a, 0x39, 0xa3, 0x44, - 0xb7, 0x55, 0x43, 0xb7, 0xeb, 0xa7, 0x0f, 0x0e, 0x09, 0x55, 0x1e, 0xd4, 0x7b, 0x44, 0x27, 0x96, - 0x42, 0x49, 0xb7, 0x66, 0x5a, 0x06, 0x35, 0xd0, 0x5d, 0x97, 0xbd, 0xa6, 0x98, 0x6a, 0x2d, 0x60, - 0xaf, 0x09, 0xf6, 0xe5, 0x77, 0x7b, 0x2a, 0x3d, 0x76, 0x0e, 0x6b, 0x1d, 0xa3, 0x5f, 0xef, 0x19, - 0x3d, 0xa3, 0xce, 0xa5, 0x0e, 0x9d, 0x23, 0xfe, 0xc5, 0x3f, 0xf8, 0x2f, 0x17, 0x6d, 0x59, 0x0e, - 0x29, 0xef, 0x18, 0x16, 0xa9, 0x9f, 0x26, 0x34, 0x2e, 0xbf, 0x15, 0xe2, 0x31, 0x0d, 0x4d, 0xed, - 0x0c, 0xb2, 0x8c, 0x5b, 0xfe, 0x20, 0x60, 0xed, 0x2b, 0x9d, 0x63, 0x55, 0x27, 0xd6, 0xa0, 0x6e, - 0x9e, 0xf4, 0xb8, 0xac, 0x45, 0x6c, 0xc3, 0xb1, 0x3a, 0x64, 0x2c, 0x29, 0xbb, 0xde, 0x27, 0x54, - 0x49, 0x33, 0xab, 0x9e, 0x25, 0x65, 0x39, 0x3a, 0x55, 0xfb, 0x49, 0x35, 0x1f, 0x0e, 0x13, 0xb0, - 0x3b, 0xc7, 0xa4, 0xaf, 0x24, 0xe4, 0xde, 0xcf, 0x92, 0x73, 0xa8, 0xaa, 0xd5, 0x55, 0x9d, 0xda, - 0xd4, 0x8a, 0x0b, 0xc9, 0x35, 0x80, 0xf5, 0xe6, 0xd6, 0x01, 0xb1, 0xd8, 0xf0, 0xa0, 0x55, 0x28, - 0xe8, 0x4a, 0x9f, 0x54, 0xa4, 0x55, 0xe9, 0xfe, 0x64, 0xe3, 0xf6, 0x8b, 0xf3, 0xea, 0xad, 0x8b, - 0xf3, 0x6a, 0x61, 0x57, 0xe9, 0x13, 0xcc, 0x29, 0xf2, 0x23, 0x98, 0x5d, 0xd7, 0x34, 0xe3, 0x7b, - 0xa4, 0xfb, 0xd4, 0xb0, 0x69, 0x53, 0xa1, 0xc7, 0x68, 0x0d, 0xc0, 0x54, 0xe8, 0x71, 0xd3, 0x22, - 0x47, 0xea, 0x99, 0x10, 0x45, 0x42, 0x14, 0x9a, 0x3e, 0x05, 0x87, 0xb8, 0xe4, 0xbf, 0x94, 0xe0, - 0x2b, 0x1b, 0x8e, 0x4d, 0x8d, 0xfe, 0x0e, 0xa1, 0x96, 0xda, 0xd9, 0x70, 0x2c, 0x8b, 0xe8, 0xb4, - 0x45, 0x15, 0xea, 0xd8, 0xc3, 0xcd, 0x40, 0x9f, 0x41, 0xf1, 0x54, 0xd1, 0x1c, 0x52, 0xc9, 0xad, - 0x4a, 0xf7, 0xa7, 0xd6, 0x6a, 0xb5, 0x60, 0xb6, 0xf9, 0x7d, 0xaf, 0x99, 0x27, 0x3d, 0x3e, 0xfd, - 0xbc, 0x01, 0xad, 0x7d, 0xea, 0x28, 0x3a, 0x55, 0xe9, 0xa0, 0x71, 0x47, 0x40, 0xde, 0x16, 0x7a, - 0x0f, 0x18, 0x16, 0x76, 0x21, 0xe5, 0xdf, 0x87, 0xbb, 0x99, 0xa6, 0x6d, 0xab, 0x36, 0x45, 0xcf, - 0xa0, 0xa8, 0x52, 0xd2, 0xb7, 0x2b, 0xd2, 0x6a, 0xfe, 0xfe, 0xd4, 0xda, 0x47, 0xb5, 0x4b, 0xa7, - 0x7a, 0x2d, 0x13, 0xac, 0x31, 0x2d, 0xcc, 0x28, 0x6e, 0x31, 0x38, 0xec, 0xa2, 0xca, 0x7f, 0x2e, - 0x01, 0x0a, 0xcb, 0xb4, 0x15, 0xab, 0x47, 0xe8, 0x08, 0x4e, 0xf9, 0xdd, 0x2f, 0xe7, 0x94, 0x05, - 0x01, 0x39, 0xe5, 0x2a, 0x8c, 0xf8, 0xc4, 0x84, 0xc5, 0xa4, 0x49, 0xdc, 0x19, 0x07, 0x51, 0x67, - 0x3c, 0x18, 0xc3, 0x19, 0x2e, 0x4a, 0x86, 0x17, 0x7e, 0x90, 0x83, 0xc9, 0x4d, 0x85, 0xf4, 0x0d, - 0xbd, 0x45, 0x28, 0xfa, 0x1c, 0xca, 0x6c, 0x7d, 0x75, 0x15, 0xaa, 0x70, 0x07, 0x4c, 0xad, 0xbd, - 0x77, 0x59, 0xef, 0xec, 0x1a, 0xe3, 0xae, 0x9d, 0x3e, 0xa8, 0xed, 0x1d, 0x3e, 0x27, 0x1d, 0xba, - 0x43, 0xa8, 0x12, 0xcc, 0xc9, 0xa0, 0x0d, 0xfb, 0xa8, 0x68, 0x17, 0x0a, 0xb6, 0x49, 0x3a, 0xc2, - 0x77, 0xef, 0x0c, 0xe9, 0x86, 0x6f, 0x59, 0xcb, 0x24, 0x9d, 0x60, 0x30, 0xd8, 0x17, 0xe6, 0x38, - 0xe8, 0x00, 0x26, 0x6c, 0x3e, 0xca, 0x95, 0x7c, 0x62, 0x34, 0x2e, 0x47, 0x74, 0xe7, 0xc6, 0x8c, - 0xc0, 0x9c, 0x70, 0xbf, 0xb1, 0x40, 0x93, 0x7f, 0x22, 0xc1, 0xb4, 0xcf, 0xcb, 0x47, 0xe0, 0xdb, - 0x09, 0xdf, 0xd4, 0x46, 0xf3, 0x0d, 0x93, 0xe6, 0x9e, 0x99, 0x13, 0xba, 0xca, 0x5e, 0x4b, 0xc8, - 0x2f, 0x3b, 0xde, 0xf8, 0xe6, 0xf8, 0xf8, 0xde, 0x1f, 0xb5, 0x1b, 0x19, 0xc3, 0xfa, 0x17, 0x85, - 0x90, 0xf9, 0xcc, 0x5d, 0xe8, 0x19, 0x94, 0x6d, 0xa2, 0x91, 0x0e, 0x35, 0x2c, 0x61, 0xfe, 0xfb, - 0x23, 0x9a, 0xaf, 0x1c, 0x12, 0xad, 0x25, 0x44, 0x1b, 0xb7, 0x99, 0xfd, 0xde, 0x17, 0xf6, 0x21, - 0xd1, 0xa7, 0x50, 0xa6, 0xa4, 0x6f, 0x6a, 0x0a, 0xf5, 0xd6, 0xc5, 0x9b, 0xe1, 0x2e, 0xb0, 0x64, - 0xc2, 0xc0, 0x9a, 0x46, 0xb7, 0x2d, 0xd8, 0xf8, 0x90, 0xfa, 0x2e, 0xf1, 0x5a, 0xb1, 0x0f, 0x83, - 0x4e, 0x61, 0xc6, 0x31, 0xbb, 0x8c, 0x93, 0xb2, 0x50, 0xda, 0x1b, 0x88, 0x21, 0xfe, 0x70, 0x54, - 0xdf, 0xec, 0x47, 0xa4, 0x1b, 0x8b, 0x42, 0xd7, 0x4c, 0xb4, 0x1d, 0xc7, 0xb4, 0xa0, 0x75, 0x98, - 0xed, 0xab, 0x3a, 0x26, 0x4a, 0x77, 0xd0, 0x22, 0x1d, 0x43, 0xef, 0xda, 0x95, 0xc2, 0xaa, 0x74, - 0xbf, 0xd8, 0x58, 0x12, 0x00, 0xb3, 0x3b, 0x51, 0x32, 0x8e, 0xf3, 0xa3, 0x6f, 0x02, 0xf2, 0xba, - 0xf1, 0xc4, 0xcd, 0x04, 0xaa, 0xa1, 0x57, 0x8a, 0xab, 0xd2, 0xfd, 0x7c, 0x63, 0x59, 0xa0, 0xa0, - 0x76, 0x82, 0x03, 0xa7, 0x48, 0xa1, 0x6d, 0xb8, 0x63, 0x91, 0x53, 0x95, 0xf5, 0xf1, 0xa9, 0x6a, - 0x53, 0xc3, 0x1a, 0x6c, 0xab, 0x7d, 0x95, 0x56, 0x26, 0xb8, 0x4d, 0x95, 0x8b, 0xf3, 0xea, 0x1d, - 0x9c, 0x42, 0xc7, 0xa9, 0x52, 0xf2, 0x8f, 0x8b, 0x30, 0x1b, 0x5b, 0x03, 0xe8, 0x00, 0x16, 0x3b, - 0x6e, 0xc0, 0xdc, 0x75, 0xfa, 0x87, 0xc4, 0x6a, 0x75, 0x8e, 0x49, 0xd7, 0xd1, 0x48, 0x97, 0x4f, - 0x94, 0x62, 0x63, 0x45, 0x58, 0xbc, 0xb8, 0x91, 0xca, 0x85, 0x33, 0xa4, 0x99, 0x17, 0x74, 0xde, - 0xb4, 0xa3, 0xda, 0xb6, 0x8f, 0x99, 0xe3, 0x98, 0xbe, 0x17, 0x76, 0x13, 0x1c, 0x38, 0x45, 0x8a, - 0xd9, 0xd8, 0x25, 0xb6, 0x6a, 0x91, 0x6e, 0xdc, 0xc6, 0x7c, 0xd4, 0xc6, 0xcd, 0x54, 0x2e, 0x9c, - 0x21, 0x8d, 0x1e, 0xc2, 0x94, 0xab, 0x8d, 0x8f, 0x9f, 0x18, 0x68, 0x3f, 0x44, 0xef, 0x06, 0x24, - 0x1c, 0xe6, 0x63, 0x5d, 0x33, 0x0e, 0x6d, 0x62, 0x9d, 0x92, 0x6e, 0xf6, 0x00, 0xef, 0x25, 0x38, - 0x70, 0x8a, 0x14, 0xeb, 0x9a, 0x3b, 0x03, 0x13, 0x5d, 0x9b, 0x88, 0x76, 0x6d, 0x3f, 0x95, 0x0b, - 0x67, 0x48, 0xb3, 0x79, 0xec, 0x9a, 0xbc, 0x7e, 0xaa, 0xa8, 0x9a, 0x72, 0xa8, 0x91, 0x4a, 0x29, - 0x3a, 0x8f, 0x77, 0xa3, 0x64, 0x1c, 0xe7, 0x47, 0x4f, 0x60, 0xde, 0x6d, 0xda, 0xd7, 0x15, 0x1f, - 0xa4, 0xcc, 0x41, 0xbe, 0x22, 0x40, 0xe6, 0x77, 0xe3, 0x0c, 0x38, 0x29, 0x83, 0x3e, 0x86, 0x99, - 0x8e, 0xa1, 0x69, 0x7c, 0x3e, 0x6e, 0x18, 0x8e, 0x4e, 0x2b, 0x93, 0x1c, 0x05, 0xb1, 0xf5, 0xb8, - 0x11, 0xa1, 0xe0, 0x18, 0xa7, 0xfc, 0x2f, 0x12, 0x2c, 0x65, 0xac, 0x69, 0xf4, 0x0d, 0x28, 0xd0, - 0x81, 0xe9, 0x65, 0xeb, 0x5f, 0xf7, 0x12, 0x44, 0x7b, 0x60, 0x92, 0x97, 0xe7, 0xd5, 0xd7, 0x33, - 0xc4, 0x18, 0x19, 0x73, 0x41, 0xa4, 0xc3, 0xb4, 0xc5, 0xd4, 0xe9, 0x3d, 0x97, 0x45, 0x04, 0xaf, - 0x87, 0x43, 0x62, 0x0c, 0x0e, 0xcb, 0x04, 0xc1, 0x78, 0xfe, 0xe2, 0xbc, 0x3a, 0x1d, 0xa1, 0xe1, - 0x28, 0xbc, 0xfc, 0xc3, 0x1c, 0xc0, 0x26, 0x31, 0x35, 0x63, 0xd0, 0x27, 0xfa, 0x4d, 0x24, 0xdc, - 0xbd, 0x48, 0xc2, 0x7d, 0x77, 0x58, 0xec, 0xf4, 0x4d, 0xcb, 0xcc, 0xb8, 0xdf, 0x8a, 0x65, 0xdc, - 0xfa, 0xe8, 0x90, 0x97, 0xa7, 0xdc, 0xff, 0xc8, 0xc3, 0x42, 0xc0, 0xbc, 0x61, 0xe8, 0x5d, 0x95, - 0xaf, 0x8f, 0x4f, 0x22, 0x63, 0xfc, 0x6b, 0xb1, 0x31, 0x5e, 0x4a, 0x11, 0x09, 0x8d, 0xef, 0xb6, - 0x6f, 0x6d, 0x8e, 0x8b, 0x7f, 0x10, 0x55, 0xfe, 0xf2, 0xbc, 0x9a, 0xb2, 0xe7, 0xa9, 0xf9, 0x48, - 0x51, 0x13, 0xd1, 0x3d, 0x98, 0xb0, 0x88, 0x62, 0x1b, 0x3a, 0x0f, 0x14, 0x93, 0x41, 0x57, 0x30, - 0x6f, 0xc5, 0x82, 0x8a, 0xde, 0x82, 0x52, 0x9f, 0xd8, 0xb6, 0xd2, 0x23, 0x3c, 0x26, 0x4c, 0x36, - 0x66, 0x05, 0x63, 0x69, 0xc7, 0x6d, 0xc6, 0x1e, 0x1d, 0x3d, 0x87, 0x19, 0x4d, 0xb1, 0xc5, 0x04, - 0x6d, 0xab, 0x7d, 0xc2, 0x57, 0xfd, 0xd4, 0xda, 0xdb, 0xa3, 0xcd, 0x03, 0x26, 0x11, 0x64, 0xb6, - 0xed, 0x08, 0x12, 0x8e, 0x21, 0xa3, 0x53, 0x40, 0xac, 0xa5, 0x6d, 0x29, 0xba, 0xed, 0x3a, 0x8a, - 0xe9, 0x2b, 0x8d, 0xad, 0xcf, 0x8f, 0x70, 0xdb, 0x09, 0x34, 0x9c, 0xa2, 0x41, 0xfe, 0xa9, 0x04, - 0x33, 0xc1, 0x30, 0xdd, 0x40, 0x35, 0xb5, 0x1b, 0xad, 0xa6, 0xde, 0x1a, 0x79, 0x8a, 0x66, 0x94, - 0x53, 0xff, 0x9d, 0x03, 0x14, 0x30, 0xb1, 0x05, 0x7e, 0xa8, 0x74, 0x4e, 0x46, 0xd8, 0x2b, 0xfc, - 0x40, 0x02, 0x24, 0xc2, 0xf3, 0xba, 0xae, 0x1b, 0x94, 0x47, 0x7c, 0xcf, 0xac, 0xad, 0x91, 0xcd, - 0xf2, 0x34, 0xd6, 0xf6, 0x13, 0x58, 0x8f, 0x74, 0x6a, 0x0d, 0x82, 0x11, 0x49, 0x32, 0xe0, 0x14, - 0x03, 0x90, 0x02, 0x60, 0x09, 0xcc, 0xb6, 0x21, 0x16, 0xf2, 0xbb, 0x23, 0xc4, 0x3c, 0x26, 0xb0, - 0x61, 0xe8, 0x47, 0x6a, 0x2f, 0x08, 0x3b, 0xd8, 0x07, 0xc2, 0x21, 0xd0, 0xe5, 0x47, 0xb0, 0x94, - 0x61, 0x2d, 0x9a, 0x83, 0xfc, 0x09, 0x19, 0xb8, 0x6e, 0xc3, 0xec, 0x27, 0xba, 0x13, 0xde, 0x53, - 0x4d, 0x8a, 0xed, 0xd0, 0xc7, 0xb9, 0x8f, 0x24, 0xf9, 0x27, 0xc5, 0xf0, 0xdc, 0xe1, 0xa5, 0xec, - 0x7d, 0x28, 0x5b, 0xc4, 0xd4, 0xd4, 0x8e, 0x62, 0x8b, 0x0a, 0x85, 0x57, 0xa5, 0x58, 0xb4, 0x61, - 0x9f, 0x1a, 0x29, 0x7a, 0x73, 0xd7, 0x5b, 0xf4, 0xe6, 0x5f, 0x4d, 0xd1, 0xfb, 0x7b, 0x50, 0xb6, - 0xbd, 0x72, 0xb7, 0xc0, 0x21, 0x1f, 0x8c, 0x11, 0x5f, 0x45, 0xa5, 0xeb, 0x2b, 0xf0, 0x6b, 0x5c, - 0x1f, 0x34, 0xad, 0xba, 0x2d, 0x8e, 0x59, 0xdd, 0xbe, 0xd2, 0x8a, 0x94, 0xc5, 0x54, 0x53, 0x71, - 0x6c, 0xd2, 0xe5, 0x81, 0xa8, 0x1c, 0xc4, 0xd4, 0x26, 0x6f, 0xc5, 0x82, 0x8a, 0x9e, 0x45, 0xa6, - 0x6c, 0xf9, 0x2a, 0x53, 0x76, 0x26, 0x7b, 0xba, 0xa2, 0x7d, 0x58, 0x32, 0x2d, 0xa3, 0x67, 0x11, - 0xdb, 0xde, 0x24, 0x4a, 0x57, 0x53, 0x75, 0xe2, 0xf9, 0xc7, 0x2d, 0x55, 0x5e, 0xbf, 0x38, 0xaf, - 0x2e, 0x35, 0xd3, 0x59, 0x70, 0x96, 0xac, 0xfc, 0xa2, 0x00, 0x73, 0xf1, 0x0c, 0x98, 0x51, 0x3d, - 0x4a, 0x57, 0xaa, 0x1e, 0xdf, 0x09, 0x2d, 0x06, 0xb7, 0xb4, 0xf6, 0x47, 0x3f, 0x65, 0x41, 0xac, - 0xc3, 0xac, 0x88, 0x06, 0x1e, 0x51, 0xd4, 0xcf, 0xfe, 0xe8, 0xef, 0x47, 0xc9, 0x38, 0xce, 0xcf, - 0x6a, 0xc2, 0xa0, 0xd4, 0xf3, 0x40, 0x0a, 0xd1, 0x9a, 0x70, 0x3d, 0xce, 0x80, 0x93, 0x32, 0x68, - 0x07, 0x16, 0x1c, 0x3d, 0x09, 0xe5, 0xce, 0xc6, 0xd7, 0x05, 0xd4, 0xc2, 0x7e, 0x92, 0x05, 0xa7, - 0xc9, 0xa1, 0x23, 0x80, 0x8e, 0x97, 0xb6, 0xed, 0xca, 0x04, 0x8f, 0xb0, 0x6b, 0x23, 0xaf, 0x1d, - 0x3f, 0xe3, 0x07, 0x71, 0xcd, 0x6f, 0xb2, 0x71, 0x08, 0x19, 0x7d, 0x02, 0xd3, 0x16, 0xdf, 0x10, - 0x78, 0x06, 0xbb, 0x45, 0xf5, 0x6b, 0x42, 0x6c, 0x1a, 0x87, 0x89, 0x38, 0xca, 0x9b, 0x52, 0x07, - 0x97, 0x47, 0xae, 0x83, 0xff, 0x49, 0x0a, 0x27, 0x21, 0xbf, 0x04, 0xfe, 0x38, 0x52, 0x1e, 0xdd, - 0x8b, 0x95, 0x47, 0x8b, 0x49, 0x89, 0x50, 0x75, 0x64, 0xa4, 0x57, 0xbf, 0x1f, 0x8e, 0x55, 0xfd, - 0x06, 0xc9, 0x73, 0x78, 0xf9, 0xfb, 0x23, 0x09, 0x16, 0x1f, 0xb7, 0x9e, 0x58, 0x86, 0x63, 0x7a, - 0xe6, 0xec, 0x99, 0xae, 0x5f, 0xbf, 0x06, 0x05, 0xcb, 0xd1, 0xbc, 0x7e, 0xbc, 0xe9, 0xf5, 0x03, - 0x3b, 0x1a, 0xeb, 0xc7, 0x42, 0x4c, 0xca, 0xed, 0x04, 0x13, 0x40, 0xbb, 0x30, 0x61, 0x29, 0x7a, - 0x8f, 0x78, 0x69, 0xf5, 0xde, 0x10, 0xeb, 0xb7, 0x36, 0x31, 0x63, 0x0f, 0x15, 0x6f, 0x5c, 0x1a, - 0x0b, 0x14, 0xf9, 0x4f, 0x24, 0x98, 0x7d, 0xda, 0x6e, 0x37, 0xb7, 0x74, 0xbe, 0xa2, 0xf9, 0xe1, - 0xeb, 0x2a, 0x14, 0x4c, 0x85, 0x1e, 0xc7, 0x33, 0x3d, 0xa3, 0x61, 0x4e, 0x41, 0xbf, 0x03, 0x25, - 0x16, 0x49, 0x88, 0xde, 0x1d, 0xb1, 0xd4, 0x16, 0xf0, 0x0d, 0x57, 0x28, 0xa8, 0x10, 0x45, 0x03, - 0xf6, 0xe0, 0xe4, 0x13, 0xb8, 0x13, 0x32, 0x87, 0xf9, 0x83, 0x9f, 0x19, 0xa2, 0x16, 0x14, 0x99, - 0x66, 0xef, 0x48, 0x70, 0xd8, 0xc9, 0x57, 0xac, 0x4b, 0x41, 0xa5, 0xc3, 0xbe, 0x6c, 0xec, 0x62, - 0xc9, 0x3b, 0x30, 0xcd, 0x4f, 0x9c, 0x0d, 0x8b, 0x72, 0xb7, 0xa0, 0xbb, 0x90, 0xef, 0xab, 0xba, - 0xc8, 0xb3, 0x53, 0x42, 0x26, 0xcf, 0x72, 0x04, 0x6b, 0xe7, 0x64, 0xe5, 0x4c, 0x44, 0x9e, 0x80, - 0xac, 0x9c, 0x61, 0xd6, 0x2e, 0x3f, 0x81, 0x92, 0x70, 0x77, 0x18, 0x28, 0x7f, 0x39, 0x50, 0x3e, - 0x05, 0x68, 0x0f, 0x4a, 0x5b, 0xcd, 0x86, 0x66, 0xb8, 0x55, 0x57, 0x47, 0xed, 0x5a, 0xf1, 0xb1, - 0xd8, 0xd8, 0xda, 0xc4, 0x98, 0x53, 0x90, 0x0c, 0x13, 0xe4, 0xac, 0x43, 0x4c, 0xca, 0x67, 0xc4, - 0x64, 0x03, 0xd8, 0x28, 0x3f, 0xe2, 0x2d, 0x58, 0x50, 0xe4, 0x3f, 0xcd, 0x41, 0x49, 0xb8, 0xe3, - 0x06, 0x76, 0x61, 0xdb, 0x91, 0x5d, 0xd8, 0xdb, 0xa3, 0x4d, 0x8d, 0xcc, 0x2d, 0x58, 0x3b, 0xb6, - 0x05, 0x7b, 0x67, 0x44, 0xbc, 0xcb, 0xf7, 0x5f, 0x3f, 0x96, 0x60, 0x26, 0x3a, 0x29, 0xd1, 0x43, - 0x98, 0x62, 0x09, 0x47, 0xed, 0x90, 0xdd, 0xa0, 0xce, 0xf5, 0x4f, 0x47, 0x5a, 0x01, 0x09, 0x87, - 0xf9, 0x50, 0xcf, 0x17, 0x63, 0xf3, 0x48, 0x74, 0x3a, 0xdb, 0xa5, 0x0e, 0x55, 0xb5, 0x9a, 0x7b, - 0x71, 0x52, 0xdb, 0xd2, 0xe9, 0x9e, 0xd5, 0xa2, 0x96, 0xaa, 0xf7, 0x12, 0x8a, 0xf8, 0xa4, 0x0c, - 0x23, 0xcb, 0xff, 0x28, 0xc1, 0x94, 0x30, 0xf9, 0x06, 0x76, 0x15, 0xbf, 0x1d, 0xdd, 0x55, 0xdc, - 0x1b, 0x71, 0x81, 0xa7, 0x6f, 0x29, 0xfe, 0x26, 0x30, 0x9d, 0x2d, 0x69, 0x36, 0xab, 0x8f, 0x0d, - 0x9b, 0xc6, 0x67, 0x35, 0x5b, 0x8c, 0x98, 0x53, 0x90, 0x03, 0x73, 0x6a, 0x2c, 0x06, 0x08, 0xd7, - 0xd6, 0x47, 0xb3, 0xc4, 0x17, 0x6b, 0x54, 0x04, 0xfc, 0x5c, 0x9c, 0x82, 0x13, 0x2a, 0x64, 0x02, - 0x09, 0x2e, 0xf4, 0x29, 0x14, 0x8e, 0x29, 0x35, 0x53, 0x0e, 0x92, 0x87, 0x44, 0x9e, 0xc0, 0x84, - 0x32, 0xef, 0x5d, 0xbb, 0xdd, 0xc4, 0x1c, 0x4a, 0xfe, 0x9f, 0xc0, 0x1f, 0x2d, 0x77, 0x8e, 0xfb, - 0xf1, 0x54, 0xba, 0x4a, 0x3c, 0x9d, 0x4a, 0x8b, 0xa5, 0xe8, 0x29, 0xe4, 0xa9, 0x36, 0xea, 0xb6, - 0x50, 0x20, 0xb6, 0xb7, 0x5b, 0x41, 0x40, 0x6a, 0x6f, 0xb7, 0x30, 0x83, 0x40, 0x7b, 0x50, 0x64, - 0xd9, 0x87, 0x2d, 0xc1, 0xfc, 0xe8, 0x4b, 0x9a, 0xf5, 0x3f, 0x98, 0x10, 0xec, 0xcb, 0xc6, 0x2e, - 0x8e, 0xfc, 0x5d, 0x98, 0x8e, 0xac, 0x53, 0xf4, 0x39, 0xdc, 0xd6, 0x0c, 0xa5, 0xdb, 0x50, 0x34, - 0x45, 0xef, 0x10, 0xef, 0xd4, 0xfe, 0x5e, 0xda, 0x0e, 0x63, 0x3b, 0xc4, 0x27, 0x56, 0xb9, 0x7f, - 0xf7, 0x16, 0xa6, 0xe1, 0x08, 0xa2, 0xac, 0x00, 0x04, 0x7d, 0x44, 0x55, 0x28, 0xb2, 0x79, 0xe6, - 0xe6, 0x93, 0xc9, 0xc6, 0x24, 0xb3, 0x90, 0x4d, 0x3f, 0x1b, 0xbb, 0xed, 0x68, 0x0d, 0xc0, 0x26, - 0x1d, 0x8b, 0x50, 0x1e, 0x0c, 0x72, 0xd1, 0x1b, 0xc8, 0x96, 0x4f, 0xc1, 0x21, 0x2e, 0xf9, 0x9f, - 0x25, 0x98, 0xde, 0x25, 0xf4, 0x7b, 0x86, 0x75, 0xd2, 0xe4, 0x97, 0xc5, 0x37, 0x10, 0x6c, 0x71, - 0x24, 0xd8, 0xbe, 0x37, 0x64, 0x64, 0x22, 0xd6, 0x65, 0x85, 0x5c, 0xf9, 0xa7, 0x12, 0x2c, 0x45, - 0x38, 0x1f, 0x05, 0x4b, 0x77, 0x1f, 0x8a, 0xa6, 0x61, 0x51, 0x2f, 0x11, 0x8f, 0xa5, 0x90, 0x85, - 0xb1, 0x50, 0x2a, 0x66, 0x30, 0xd8, 0x45, 0x43, 0xdb, 0x90, 0xa3, 0x86, 0x98, 0xaa, 0xe3, 0x61, - 0x12, 0x62, 0x35, 0x40, 0x60, 0xe6, 0xda, 0x06, 0xce, 0x51, 0x83, 0x0d, 0x44, 0x25, 0xc2, 0x15, - 0x0e, 0x3e, 0xd7, 0xd4, 0x03, 0x0c, 0x85, 0x23, 0xcb, 0xe8, 0x5f, 0xb9, 0x0f, 0xfe, 0x40, 0x3c, - 0xb6, 0x8c, 0x3e, 0xe6, 0x58, 0xf2, 0xcf, 0x24, 0x98, 0x8f, 0x70, 0xde, 0x40, 0xe0, 0xff, 0x34, - 0x1a, 0xf8, 0xdf, 0x19, 0xa7, 0x23, 0x19, 0xe1, 0xff, 0x67, 0xb9, 0x58, 0x37, 0x58, 0x87, 0xd1, - 0x11, 0x4c, 0x99, 0x46, 0xb7, 0xf5, 0x0a, 0xee, 0xe9, 0x66, 0x59, 0xde, 0x6c, 0x06, 0x58, 0x38, - 0x0c, 0x8c, 0xce, 0x60, 0x5e, 0x57, 0xfa, 0xc4, 0x36, 0x95, 0x0e, 0x69, 0xbd, 0x82, 0x03, 0x92, - 0xd7, 0xf8, 0x45, 0x40, 0x1c, 0x11, 0x27, 0x95, 0xa0, 0x1d, 0x28, 0xa9, 0x26, 0xaf, 0xe3, 0x44, - 0xed, 0x32, 0x34, 0x8b, 0xba, 0x55, 0x9f, 0x1b, 0xcf, 0xc5, 0x07, 0xf6, 0x30, 0xe4, 0xbf, 0x8d, - 0xcf, 0x06, 0x36, 0xff, 0xd0, 0x13, 0x28, 0xf3, 0x67, 0x17, 0x1d, 0x43, 0xf3, 0x6e, 0x06, 0xd8, - 0xc8, 0x36, 0x45, 0xdb, 0xcb, 0xf3, 0xea, 0xeb, 0x29, 0x87, 0xbe, 0x1e, 0x19, 0xfb, 0xc2, 0x68, - 0x17, 0x0a, 0xe6, 0x97, 0xa9, 0x60, 0x78, 0x92, 0xe3, 0x65, 0x0b, 0xc7, 0x91, 0xff, 0x30, 0x1f, - 0x33, 0x97, 0xa7, 0xba, 0xe7, 0xaf, 0x6c, 0xd4, 0xfd, 0x8a, 0x29, 0x73, 0xe4, 0x0f, 0xa1, 0x24, - 0x32, 0xbc, 0x98, 0xcc, 0x5f, 0x1b, 0x67, 0x32, 0x87, 0xb3, 0x98, 0xbf, 0x61, 0xf1, 0x1a, 0x3d, - 0x60, 0xf4, 0x1d, 0x98, 0x20, 0xae, 0x0a, 0x37, 0x37, 0x7e, 0x38, 0x8e, 0x8a, 0x20, 0xae, 0x06, - 0x85, 0xaa, 0x68, 0x13, 0xa8, 0xe8, 0x1b, 0xcc, 0x5f, 0x8c, 0x97, 0x6d, 0x02, 0xed, 0x4a, 0x81, - 0xa7, 0xab, 0xbb, 0x6e, 0xb7, 0xfd, 0xe6, 0x97, 0xe7, 0x55, 0x08, 0x3e, 0x71, 0x58, 0x42, 0xfe, - 0x57, 0x09, 0xe6, 0xb9, 0x87, 0x3a, 0x8e, 0xa5, 0xd2, 0xc1, 0x8d, 0x25, 0xa6, 0x83, 0x48, 0x62, - 0xfa, 0x60, 0x88, 0x5b, 0x12, 0x16, 0x66, 0x26, 0xa7, 0x9f, 0x4b, 0xf0, 0x5a, 0x82, 0xfb, 0x06, - 0xe2, 0xe2, 0x7e, 0x34, 0x2e, 0xbe, 0x37, 0x6e, 0x87, 0x32, 0x62, 0xe3, 0x5f, 0xdd, 0x4e, 0xe9, - 0x0e, 0x5f, 0x29, 0x6b, 0x00, 0xa6, 0xa5, 0x9e, 0xaa, 0x1a, 0xe9, 0x89, 0xdb, 0xe9, 0x72, 0xe8, - 0x0d, 0x94, 0x4f, 0xc1, 0x21, 0x2e, 0x64, 0xc3, 0x62, 0x97, 0x1c, 0x29, 0x8e, 0x46, 0xd7, 0xbb, - 0xdd, 0x0d, 0xc5, 0x54, 0x0e, 0x55, 0x4d, 0xa5, 0xaa, 0x38, 0x2e, 0x98, 0x6c, 0x7c, 0xe2, 0xde, - 0x1a, 0xa7, 0x71, 0xbc, 0x3c, 0xaf, 0xde, 0x4d, 0xbb, 0x1d, 0xf2, 0x58, 0x06, 0x38, 0x03, 0x1a, - 0x0d, 0xa0, 0x62, 0x91, 0xef, 0x3a, 0xaa, 0x45, 0xba, 0x9b, 0x96, 0x61, 0x46, 0xd4, 0xe6, 0xb9, - 0xda, 0xdf, 0xbc, 0x38, 0xaf, 0x56, 0x70, 0x06, 0xcf, 0x70, 0xc5, 0x99, 0xf0, 0xe8, 0x39, 0x2c, - 0x28, 0xee, 0xd3, 0xb1, 0x88, 0x56, 0x77, 0x95, 0x7c, 0x74, 0x71, 0x5e, 0x5d, 0x58, 0x4f, 0x92, - 0x87, 0x2b, 0x4c, 0x03, 0x45, 0x75, 0x28, 0x9d, 0x1a, 0x9a, 0xd3, 0x27, 0x76, 0xa5, 0xc8, 0xf1, - 0x59, 0x22, 0x28, 0x1d, 0xb8, 0x4d, 0x2f, 0xcf, 0xab, 0x13, 0x8f, 0x5b, 0x7c, 0xf5, 0x79, 0x5c, - 0x6c, 0x43, 0xc9, 0x6a, 0x49, 0xb1, 0xe2, 0xf9, 0x89, 0x71, 0x39, 0x88, 0x5a, 0x4f, 0x03, 0x12, - 0x0e, 0xf3, 0xa1, 0x67, 0x30, 0x79, 0x2c, 0x4e, 0x25, 0xec, 0x4a, 0x69, 0xa4, 0x24, 0x1c, 0x39, - 0xc5, 0x68, 0xcc, 0x0b, 0x15, 0x93, 0x5e, 0xb3, 0x8d, 0x03, 0x44, 0xf4, 0x16, 0x94, 0xf8, 0xc7, - 0xd6, 0x26, 0x3f, 0x8e, 0x2b, 0x07, 0xb1, 0xed, 0xa9, 0xdb, 0x8c, 0x3d, 0xba, 0xc7, 0xba, 0xd5, - 0xdc, 0xe0, 0xc7, 0xc2, 0x31, 0xd6, 0xad, 0xe6, 0x06, 0xf6, 0xe8, 0xe8, 0x73, 0x28, 0xd9, 0x64, - 0x5b, 0xd5, 0x9d, 0xb3, 0x0a, 0x8c, 0x74, 0xa9, 0xdc, 0x7a, 0xc4, 0xb9, 0x63, 0x07, 0x63, 0x81, - 0x06, 0x41, 0xc7, 0x1e, 0x2c, 0x3a, 0x86, 0x49, 0xcb, 0xd1, 0xd7, 0xed, 0x7d, 0x9b, 0x58, 0x95, - 0x29, 0xae, 0x63, 0x58, 0x38, 0xc7, 0x1e, 0x7f, 0x5c, 0x8b, 0xef, 0x21, 0x9f, 0x03, 0x07, 0xe0, - 0xe8, 0x8f, 0x25, 0x40, 0xb6, 0x63, 0x9a, 0x1a, 0xe9, 0x13, 0x9d, 0x2a, 0x1a, 0x3f, 0x8b, 0xb3, - 0x2b, 0xb7, 0xb9, 0xce, 0xdf, 0x1a, 0xd6, 0xaf, 0x84, 0x60, 0x5c, 0xb9, 0x7f, 0xe8, 0x9d, 0x64, - 0xc5, 0x29, 0x7a, 0x99, 0x6b, 0x8f, 0x6c, 0xfe, 0xbb, 0x32, 0x3d, 0x92, 0x6b, 0xd3, 0xcf, 0x1c, - 0x03, 0xd7, 0x0a, 0x3a, 0xf6, 0x60, 0xd1, 0x01, 0x2c, 0x5a, 0x44, 0xe9, 0xee, 0xe9, 0xda, 0x00, - 0x1b, 0x06, 0x7d, 0xac, 0x6a, 0xc4, 0x1e, 0xd8, 0x94, 0xf4, 0x2b, 0x33, 0x7c, 0xd8, 0xfd, 0x47, - 0x19, 0x38, 0x95, 0x0b, 0x67, 0x48, 0xa3, 0x3e, 0x54, 0xbd, 0x90, 0xc1, 0xd6, 0x93, 0x1f, 0xb3, - 0x1e, 0xd9, 0x1d, 0x45, 0x73, 0xef, 0x01, 0x66, 0xb9, 0x82, 0x37, 0x2f, 0xce, 0xab, 0xd5, 0xcd, - 0xcb, 0x59, 0xf1, 0x30, 0x2c, 0xf4, 0x6d, 0xa8, 0x28, 0x59, 0x7a, 0xe6, 0xb8, 0x9e, 0x55, 0xd1, - 0x91, 0x4a, 0xa6, 0x92, 0x4c, 0x04, 0x44, 0x61, 0x4e, 0x89, 0xbe, 0x52, 0xb5, 0x2b, 0xf3, 0x23, - 0x1d, 0x46, 0xc6, 0x1e, 0xb7, 0x06, 0x07, 0x12, 0x31, 0x82, 0x8d, 0x13, 0x1a, 0xf8, 0x13, 0x0a, - 0x71, 0xa0, 0x7e, 0x33, 0x6f, 0x16, 0xc7, 0x7b, 0x42, 0x11, 0x98, 0xf6, 0xca, 0x9e, 0x50, 0x84, - 0x20, 0x2f, 0x3f, 0xc2, 0xfb, 0xaf, 0x1c, 0x2c, 0x04, 0xcc, 0x23, 0x3f, 0xa1, 0x48, 0x11, 0xb9, - 0xb6, 0x27, 0x14, 0xe9, 0x6f, 0x10, 0xf2, 0xd7, 0xfd, 0x06, 0xe1, 0x1a, 0x9e, 0x6e, 0xf0, 0x67, - 0x0d, 0x81, 0xeb, 0xfe, 0xef, 0x3d, 0x6b, 0x08, 0x6c, 0xcb, 0x28, 0xb4, 0xfe, 0x3e, 0x17, 0xee, - 0xc0, 0xff, 0xfb, 0xbb, 0xf5, 0x2f, 0xff, 0xb0, 0x53, 0xfe, 0x79, 0x1e, 0xe6, 0xe2, 0xab, 0x31, - 0x72, 0x05, 0x2b, 0x0d, 0xbd, 0x82, 0x6d, 0xc2, 0x9d, 0x23, 0x47, 0xd3, 0x06, 0xdc, 0x0d, 0xa1, - 0x7b, 0x58, 0xf7, 0x0a, 0xe5, 0x0d, 0x21, 0x79, 0xe7, 0x71, 0x0a, 0x0f, 0x4e, 0x95, 0xcc, 0xb8, - 0x4e, 0xce, 0x5f, 0xe9, 0x3a, 0x39, 0x71, 0xbb, 0x59, 0x18, 0xe3, 0x76, 0x33, 0xf5, 0x6a, 0xb8, - 0x78, 0x85, 0xab, 0xe1, 0xab, 0xdc, 0xe5, 0xa6, 0x04, 0xb1, 0x61, 0x77, 0xb9, 0xf2, 0x1b, 0xb0, - 0x2c, 0xc4, 0x28, 0xbf, 0x66, 0xd5, 0xa9, 0x65, 0x68, 0x1a, 0xb1, 0x36, 0x9d, 0x7e, 0x7f, 0x20, - 0x7f, 0x1d, 0x66, 0xa2, 0x0f, 0x08, 0xdc, 0x91, 0x76, 0xdf, 0x30, 0x88, 0x8b, 0xac, 0xd0, 0x48, - 0xbb, 0xed, 0xd8, 0xe7, 0x90, 0xbf, 0x2f, 0xc1, 0x62, 0xfa, 0x43, 0x41, 0xa4, 0xc1, 0x4c, 0x5f, - 0x39, 0x0b, 0xbf, 0xaa, 0x94, 0xae, 0x78, 0xc4, 0xc0, 0x6f, 0x8e, 0x77, 0x22, 0x58, 0x38, 0x86, - 0x2d, 0xff, 0x4a, 0x82, 0xa5, 0x8c, 0x3b, 0xdb, 0x9b, 0xb5, 0x04, 0x7d, 0x06, 0xe5, 0xbe, 0x72, - 0xd6, 0x72, 0xac, 0x1e, 0xb9, 0xf2, 0xa1, 0x0a, 0x8f, 0x18, 0x3b, 0x02, 0x05, 0xfb, 0x78, 0xf2, - 0x8f, 0x24, 0xa8, 0x64, 0x95, 0xb7, 0xe8, 0x61, 0xe4, 0x76, 0xf9, 0xab, 0xb1, 0xdb, 0xe5, 0xf9, - 0x84, 0xdc, 0x35, 0xdd, 0x2d, 0xff, 0x9d, 0x04, 0x8b, 0xe9, 0x65, 0x3e, 0x7a, 0x3f, 0x62, 0x61, - 0x35, 0x66, 0xe1, 0x6c, 0x4c, 0x4a, 0xd8, 0xf7, 0x1d, 0x98, 0x11, 0x9b, 0x01, 0x01, 0x23, 0xbc, - 0x2a, 0xa7, 0xc5, 0x4a, 0x01, 0xe1, 0x15, 0xbf, 0x7c, 0xbc, 0xa2, 0x6d, 0x38, 0x86, 0x26, 0xff, - 0x51, 0x0e, 0x8a, 0xad, 0x8e, 0xa2, 0x91, 0x1b, 0x28, 0xb3, 0xbe, 0x19, 0x29, 0xb3, 0x86, 0xfd, - 0x03, 0x82, 0x5b, 0x95, 0x59, 0x61, 0xe1, 0x58, 0x85, 0xf5, 0xf6, 0x48, 0x68, 0x97, 0x17, 0x57, - 0xbf, 0x01, 0x93, 0xbe, 0xd2, 0xf1, 0x62, 0xbe, 0xfc, 0xd7, 0x39, 0x98, 0x0a, 0xa9, 0x18, 0x33, - 0x63, 0x1c, 0x45, 0x32, 0xed, 0x28, 0xff, 0x85, 0x0a, 0xe9, 0xaa, 0x79, 0xb9, 0xd5, 0x7d, 0x28, - 0x18, 0x3c, 0x0d, 0x4b, 0xa6, 0xdc, 0xaf, 0xc3, 0x0c, 0xe5, 0xff, 0x15, 0xf2, 0x8f, 0x22, 0xf3, - 0x7c, 0x2e, 0xfa, 0xcf, 0x4b, 0xdb, 0x11, 0x2a, 0x8e, 0x71, 0x2f, 0x7f, 0x02, 0xd3, 0x11, 0x65, - 0x63, 0xbd, 0xf3, 0xfb, 0x07, 0x09, 0xbe, 0x3a, 0x74, 0xa3, 0x88, 0x1a, 0x91, 0x45, 0x52, 0x8b, - 0x2d, 0x92, 0x95, 0x6c, 0x80, 0x6b, 0x7c, 0x2f, 0xf2, 0xfd, 0x1c, 0xa0, 0xf6, 0xb1, 0x6a, 0x75, - 0x9b, 0x8a, 0x45, 0x07, 0x58, 0xfc, 0xe1, 0xeb, 0x06, 0x16, 0xcc, 0x43, 0x98, 0xea, 0x12, 0xbb, - 0x63, 0xa9, 0xdc, 0x39, 0xa2, 0x3a, 0xf7, 0x0f, 0x53, 0x36, 0x03, 0x12, 0x0e, 0xf3, 0xa1, 0x6f, - 0x41, 0xf9, 0xd4, 0xfd, 0x23, 0xa2, 0x77, 0x40, 0x3b, 0xac, 0x90, 0x0c, 0xfe, 0xba, 0x18, 0xcc, - 0x1f, 0xd1, 0x60, 0x63, 0x1f, 0x4c, 0xfe, 0xa1, 0x04, 0x8b, 0x49, 0x47, 0x6c, 0x32, 0x53, 0xaf, - 0xdf, 0x19, 0x6f, 0x40, 0x81, 0xa3, 0x33, 0x2f, 0xdc, 0x76, 0x0f, 0xde, 0x99, 0x66, 0xcc, 0x5b, - 0xe5, 0x7f, 0x97, 0x60, 0x39, 0xdd, 0xb4, 0x1b, 0x28, 0xdb, 0x3f, 0x8b, 0x96, 0xed, 0xc3, 0xce, - 0x2a, 0xd2, 0xed, 0xcc, 0x28, 0xe1, 0xff, 0x2d, 0xd5, 0xe7, 0x37, 0xd0, 0xa9, 0x83, 0x68, 0xa7, - 0x1e, 0x8c, 0xdd, 0xa9, 0xf4, 0x0e, 0x35, 0xde, 0x7d, 0xf1, 0xc5, 0xca, 0xad, 0x5f, 0x7c, 0xb1, - 0x72, 0xeb, 0x97, 0x5f, 0xac, 0xdc, 0xfa, 0x83, 0x8b, 0x15, 0xe9, 0xc5, 0xc5, 0x8a, 0xf4, 0x8b, - 0x8b, 0x15, 0xe9, 0x97, 0x17, 0x2b, 0xd2, 0x7f, 0x5e, 0xac, 0x48, 0x7f, 0xf6, 0xab, 0x95, 0x5b, - 0x9f, 0x95, 0x04, 0xee, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x57, 0xe8, 0x50, 0x1c, 0x48, 0x3d, - 0x00, 0x00, + // 3571 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x1c, 0x47, + 0x76, 0x57, 0xcf, 0x0c, 0x39, 0xc3, 0x47, 0xf1, 0xab, 0x28, 0x93, 0x63, 0xca, 0xe2, 0xc8, 0x6d, + 0x40, 0x91, 0x1c, 0x69, 0xc6, 0x92, 0x2d, 0x59, 0xb1, 0x10, 0x3b, 0x1c, 0x52, 0x1f, 0x74, 0xf8, + 0xa5, 0x1a, 0x52, 0x71, 0x8c, 0xc8, 0x71, 0x73, 0xa6, 0x38, 0x6c, 0xb1, 0xa7, 0xbb, 0xdd, 0x5d, + 0x4d, 0x73, 0x80, 0x20, 0xc8, 0x21, 0x08, 0x10, 0x20, 0x41, 0x92, 0x83, 0xf3, 0x71, 0x8b, 0x2f, + 0x39, 0x25, 0x48, 0x6e, 0xc9, 0xc1, 0x30, 0x10, 0xc0, 0x0b, 0x08, 0x0b, 0x2f, 0xe0, 0xdb, 0xfa, + 0x44, 0xac, 0xe9, 0xd3, 0x62, 0xff, 0x81, 0x85, 0x0e, 0x8b, 0x45, 0x55, 0x57, 0x7f, 0x77, 0x73, + 0x66, 0x68, 0x89, 0x58, 0x2c, 0xf6, 0xc6, 0xa9, 0xf7, 0xde, 0xef, 0xbd, 0x7a, 0xf5, 0xea, 0xbd, + 0xd7, 0x55, 0x45, 0xb8, 0xb7, 0x77, 0xdb, 0xae, 0xaa, 0x46, 0x6d, 0xcf, 0xd9, 0x26, 0x96, 0x4e, + 0x28, 0xb1, 0x6b, 0xfb, 0x44, 0x6f, 0x19, 0x56, 0x4d, 0x10, 0x14, 0x53, 0xad, 0x91, 0x03, 0x4a, + 0x74, 0x5b, 0x35, 0x74, 0xbb, 0xb6, 0x7f, 0x7d, 0x9b, 0x50, 0xe5, 0x7a, 0xad, 0x4d, 0x74, 0x62, + 0x29, 0x94, 0xb4, 0xaa, 0xa6, 0x65, 0x50, 0x03, 0x5d, 0x70, 0xd9, 0xab, 0x8a, 0xa9, 0x56, 0x03, + 0xf6, 0xaa, 0x60, 0x9f, 0xbb, 0xd6, 0x56, 0xe9, 0xae, 0xb3, 0x5d, 0x6d, 0x1a, 0x9d, 0x5a, 0xdb, + 0x68, 0x1b, 0x35, 0x2e, 0xb5, 0xed, 0xec, 0xf0, 0x5f, 0xfc, 0x07, 0xff, 0xcb, 0x45, 0x9b, 0x93, + 0x43, 0xca, 0x9b, 0x86, 0x45, 0x6a, 0xfb, 0x09, 0x8d, 0x73, 0x57, 0x42, 0x3c, 0xa6, 0xa1, 0xa9, + 0xcd, 0x6e, 0x96, 0x71, 0x73, 0x6f, 0x05, 0xac, 0x1d, 0xa5, 0xb9, 0xab, 0xea, 0xc4, 0xea, 0xd6, + 0xcc, 0xbd, 0x36, 0x97, 0xb5, 0x88, 0x6d, 0x38, 0x56, 0x93, 0x0c, 0x24, 0x65, 0xd7, 0x3a, 0x84, + 0x2a, 0x69, 0x66, 0xd5, 0xb2, 0xa4, 0x2c, 0x47, 0xa7, 0x6a, 0x27, 0xa9, 0xe6, 0x56, 0x2f, 0x01, + 0xbb, 0xb9, 0x4b, 0x3a, 0x4a, 0x42, 0xee, 0xcd, 0x2c, 0x39, 0x87, 0xaa, 0x5a, 0x4d, 0xd5, 0xa9, + 0x4d, 0xad, 0xb8, 0x90, 0x7c, 0x07, 0xa6, 0x16, 0x34, 0xcd, 0xf8, 0x94, 0xb4, 0xee, 0x69, 0xe4, + 0xe0, 0x91, 0xa1, 0x39, 0x1d, 0x82, 0x2e, 0xc1, 0x70, 0xcb, 0x52, 0xf7, 0x89, 0x55, 0x96, 0x2e, + 0x4a, 0x97, 0x47, 0xea, 0xe3, 0x4f, 0x0f, 0x2b, 0x67, 0x8e, 0x0e, 0x2b, 0xc3, 0x4b, 0x7c, 0x14, + 0x0b, 0xaa, 0x7c, 0x17, 0x26, 0x84, 0xf0, 0x03, 0xc3, 0xa6, 0x1b, 0x0a, 0xdd, 0x45, 0x37, 0x00, + 0x4c, 0x85, 0xee, 0x6e, 0x58, 0x64, 0x47, 0x3d, 0x10, 0xe2, 0x48, 0x88, 0xc3, 0x86, 0x4f, 0xc1, + 0x21, 0x2e, 0xf9, 0xdf, 0x24, 0x78, 0x79, 0xd1, 0xb1, 0xa9, 0xd1, 0x59, 0x25, 0xd4, 0x52, 0x9b, + 0x8b, 0x8e, 0x65, 0x11, 0x9d, 0x36, 0xa8, 0x42, 0x1d, 0x1b, 0x5d, 0x84, 0x82, 0xae, 0x74, 0x88, + 0xc0, 0x3a, 0x2b, 0xb0, 0x0a, 0x6b, 0x4a, 0x87, 0x60, 0x4e, 0x41, 0x1f, 0xc2, 0xd0, 0xbe, 0xa2, + 0x39, 0xa4, 0x9c, 0xbb, 0x28, 0x5d, 0x1e, 0xbd, 0x51, 0xad, 0x06, 0xa1, 0xe7, 0x3b, 0xa2, 0x6a, + 0xee, 0xb5, 0x79, 0x2c, 0x7a, 0xab, 0x5b, 0x7d, 0xe8, 0x28, 0x3a, 0x55, 0x69, 0xb7, 0x7e, 0x4e, + 0x40, 0x9e, 0x15, 0x7a, 0x1f, 0x31, 0x2c, 0xec, 0x42, 0xca, 0x7f, 0x09, 0x17, 0x32, 0x4d, 0x5b, + 0x51, 0x6d, 0x8a, 0x1e, 0xc3, 0x90, 0x4a, 0x49, 0xc7, 0x2e, 0x4b, 0x17, 0xf3, 0x97, 0x47, 0x6f, + 0xdc, 0xae, 0x1e, 0x1b, 0xf7, 0xd5, 0x4c, 0xb0, 0xfa, 0x98, 0x30, 0x63, 0x68, 0x99, 0xc1, 0x61, + 0x17, 0x55, 0xfe, 0x27, 0x09, 0x50, 0x58, 0x66, 0x53, 0xb1, 0xda, 0x84, 0xf6, 0xe1, 0x94, 0x3f, + 0xfd, 0x61, 0x4e, 0x99, 0x16, 0x90, 0xa3, 0xae, 0xc2, 0x88, 0x4f, 0x4c, 0x98, 0x49, 0x9a, 0xc4, + 0x9d, 0xf1, 0x28, 0xea, 0x8c, 0xeb, 0x03, 0x38, 0xc3, 0x45, 0xc9, 0xf0, 0xc2, 0x67, 0x39, 0x18, + 0x59, 0x52, 0x48, 0xc7, 0xd0, 0x1b, 0x84, 0xa2, 0x8f, 0xa1, 0xc4, 0x36, 0x5b, 0x4b, 0xa1, 0x0a, + 0x77, 0xc0, 0xe8, 0x8d, 0x37, 0x8e, 0x9b, 0x9d, 0x5d, 0x65, 0xdc, 0xd5, 0xfd, 0xeb, 0xd5, 0xf5, + 0xed, 0x27, 0xa4, 0x49, 0x57, 0x09, 0x55, 0x82, 0x98, 0x0c, 0xc6, 0xb0, 0x8f, 0x8a, 0xd6, 0xa0, + 0x60, 0x9b, 0xa4, 0x29, 0x7c, 0x77, 0xb5, 0xc7, 0x34, 0x7c, 0xcb, 0x1a, 0x26, 0x69, 0x06, 0x8b, + 0xc1, 0x7e, 0x61, 0x8e, 0x83, 0x1e, 0xc1, 0xb0, 0xcd, 0x57, 0xb9, 0x9c, 0x4f, 0xac, 0xc6, 0xf1, + 0x88, 0x6e, 0x6c, 0xf8, 0x1b, 0xd0, 0xfd, 0x8d, 0x05, 0x9a, 0xfc, 0xf3, 0x1c, 0x20, 0x9f, 0x77, + 0xd1, 0xd0, 0x5b, 0x2a, 0x55, 0x0d, 0x1d, 0xbd, 0x03, 0x05, 0xda, 0x35, 0xbd, 0xe8, 0xb8, 0xe4, + 0x19, 0xb4, 0xd9, 0x35, 0xc9, 0xb3, 0xc3, 0xca, 0x4c, 0x52, 0x82, 0x51, 0x30, 0x97, 0x41, 0x2b, + 0xbe, 0xa9, 0x39, 0x2e, 0xfd, 0x56, 0x54, 0xf5, 0xb3, 0xc3, 0x4a, 0x4a, 0x2e, 0xae, 0xfa, 0x48, + 0x51, 0x03, 0xd1, 0x3e, 0x20, 0x4d, 0xb1, 0xe9, 0xa6, 0xa5, 0xe8, 0xb6, 0xab, 0x49, 0xed, 0x10, + 0xe1, 0x84, 0xd7, 0xfb, 0x5b, 0x34, 0x26, 0x51, 0x9f, 0x13, 0x56, 0xa0, 0x95, 0x04, 0x1a, 0x4e, + 0xd1, 0xc0, 0x32, 0x98, 0x45, 0x14, 0xdb, 0xd0, 0xcb, 0x85, 0x68, 0x06, 0xc3, 0x7c, 0x14, 0x0b, + 0x2a, 0xba, 0x02, 0xc5, 0x0e, 0xb1, 0x6d, 0xa5, 0x4d, 0xca, 0x43, 0x9c, 0x71, 0x42, 0x30, 0x16, + 0x57, 0xdd, 0x61, 0xec, 0xd1, 0xe5, 0x2f, 0x24, 0x18, 0xf3, 0x3d, 0xc7, 0xa3, 0xfd, 0xcf, 0x12, + 0x71, 0x58, 0xed, 0x6f, 0x4a, 0x4c, 0x9a, 0x47, 0xe1, 0xa4, 0xd0, 0x56, 0xf2, 0x46, 0x42, 0x31, + 0xb8, 0xea, 0xed, 0xa5, 0x1c, 0xdf, 0x4b, 0x97, 0xfb, 0x0d, 0x99, 0x8c, 0x2d, 0xf4, 0xcf, 0x85, + 0x90, 0xf9, 0x2c, 0x34, 0xd1, 0x63, 0x28, 0xd9, 0x44, 0x23, 0x4d, 0x6a, 0x58, 0xc2, 0xfc, 0x37, + 0xfb, 0x34, 0x5f, 0xd9, 0x26, 0x5a, 0x43, 0x88, 0xd6, 0xcf, 0x32, 0xfb, 0xbd, 0x5f, 0xd8, 0x87, + 0x44, 0x0f, 0xa1, 0x44, 0x49, 0xc7, 0xd4, 0x14, 0xea, 0xe5, 0xa0, 0xd7, 0xc2, 0x53, 0x60, 0x91, + 0xc3, 0xc0, 0x36, 0x8c, 0xd6, 0xa6, 0x60, 0xe3, 0xdb, 0xc7, 0x77, 0x89, 0x37, 0x8a, 0x7d, 0x18, + 0xb4, 0x0f, 0xe3, 0x8e, 0xd9, 0x62, 0x9c, 0x94, 0xd5, 0xb0, 0x76, 0x57, 0x44, 0xd2, 0xad, 0x7e, + 0x7d, 0xb3, 0x15, 0x91, 0xae, 0xcf, 0x08, 0x5d, 0xe3, 0xd1, 0x71, 0x1c, 0xd3, 0x82, 0x16, 0x60, + 0xa2, 0xa3, 0xea, 0x98, 0x28, 0xad, 0x6e, 0x83, 0x34, 0x0d, 0xbd, 0x65, 0xf3, 0xb0, 0x1a, 0xaa, + 0xcf, 0x0a, 0x80, 0x89, 0xd5, 0x28, 0x19, 0xc7, 0xf9, 0xd1, 0xfb, 0x80, 0xbc, 0x69, 0xdc, 0x77, + 0x4b, 0xb0, 0x6a, 0xe8, 0x3c, 0xe6, 0xf2, 0x41, 0x70, 0x6f, 0x26, 0x38, 0x70, 0x8a, 0x14, 0x5a, + 0x81, 0x73, 0x16, 0xd9, 0x57, 0xd9, 0x1c, 0x1f, 0xa8, 0x36, 0x35, 0xac, 0xee, 0x8a, 0xda, 0x51, + 0x69, 0x79, 0x98, 0xdb, 0x54, 0x3e, 0x3a, 0xac, 0x9c, 0xc3, 0x29, 0x74, 0x9c, 0x2a, 0x25, 0xff, + 0xcb, 0x30, 0x4c, 0xc4, 0xf2, 0x0d, 0x7a, 0x04, 0x33, 0x4d, 0xb7, 0x38, 0xad, 0x39, 0x9d, 0x6d, + 0x62, 0x35, 0x9a, 0xbb, 0xa4, 0xe5, 0x68, 0xa4, 0xc5, 0x03, 0x65, 0xa8, 0x3e, 0x2f, 0x2c, 0x9e, + 0x59, 0x4c, 0xe5, 0xc2, 0x19, 0xd2, 0xcc, 0x0b, 0x3a, 0x1f, 0x5a, 0x55, 0x6d, 0xdb, 0xc7, 0xcc, + 0x71, 0x4c, 0xdf, 0x0b, 0x6b, 0x09, 0x0e, 0x9c, 0x22, 0xc5, 0x6c, 0x6c, 0x11, 0x5b, 0xb5, 0x48, + 0x2b, 0x6e, 0x63, 0x3e, 0x6a, 0xe3, 0x52, 0x2a, 0x17, 0xce, 0x90, 0x46, 0x37, 0x61, 0xd4, 0xd5, + 0xc6, 0xd7, 0x4f, 0x2c, 0xb4, 0x5f, 0x0e, 0xd7, 0x02, 0x12, 0x0e, 0xf3, 0xb1, 0xa9, 0x19, 0xdb, + 0x36, 0xb1, 0xf6, 0x49, 0x2b, 0x7b, 0x81, 0xd7, 0x13, 0x1c, 0x38, 0x45, 0x8a, 0x4d, 0xcd, 0x8d, + 0xc0, 0xc4, 0xd4, 0x86, 0xa3, 0x53, 0xdb, 0x4a, 0xe5, 0xc2, 0x19, 0xd2, 0x2c, 0x8e, 0x5d, 0x93, + 0x17, 0xf6, 0x15, 0x55, 0x53, 0xb6, 0x35, 0x52, 0x2e, 0x46, 0xe3, 0x78, 0x2d, 0x4a, 0xc6, 0x71, + 0x7e, 0x74, 0x1f, 0xa6, 0xdc, 0xa1, 0x2d, 0x5d, 0xf1, 0x41, 0x4a, 0x1c, 0xe4, 0x65, 0x01, 0x32, + 0xb5, 0x16, 0x67, 0xc0, 0x49, 0x19, 0xf4, 0x0e, 0x8c, 0x37, 0x0d, 0x4d, 0xe3, 0xf1, 0xb8, 0x68, + 0x38, 0x3a, 0x2d, 0x8f, 0x70, 0x14, 0xc4, 0xf6, 0xe3, 0x62, 0x84, 0x82, 0x63, 0x9c, 0x88, 0x00, + 0x34, 0xbd, 0x82, 0x63, 0x97, 0xa1, 0xaf, 0x5e, 0x23, 0x59, 0xf4, 0x82, 0x1e, 0xc0, 0x1f, 0xb2, + 0x71, 0x08, 0x58, 0xfe, 0xb1, 0x04, 0xb3, 0x19, 0xa9, 0x03, 0xbd, 0x17, 0x29, 0xb1, 0xbf, 0x1f, + 0x2b, 0xb1, 0xe7, 0x33, 0xc4, 0x42, 0x75, 0x56, 0x87, 0x31, 0x8b, 0xcd, 0x4a, 0x6f, 0xbb, 0x2c, + 0x22, 0x47, 0xde, 0xec, 0x31, 0x0d, 0x1c, 0x96, 0x09, 0x72, 0xfe, 0xd4, 0xd1, 0x61, 0x65, 0x2c, + 0x42, 0xc3, 0x51, 0x78, 0xf9, 0x5f, 0x73, 0x00, 0x4b, 0xc4, 0xd4, 0x8c, 0x6e, 0x87, 0xe8, 0xa7, + 0xd1, 0x43, 0xad, 0x47, 0x7a, 0xa8, 0x6b, 0xbd, 0x96, 0xc7, 0x37, 0x2d, 0xb3, 0x89, 0xfa, 0x93, + 0x58, 0x13, 0x55, 0xeb, 0x1f, 0xf2, 0xf8, 0x2e, 0xea, 0xa7, 0x79, 0x98, 0x0e, 0x98, 0x83, 0x36, + 0xea, 0x4e, 0x64, 0x8d, 0x7f, 0x2f, 0xb6, 0xc6, 0xb3, 0x29, 0x22, 0x2f, 0xac, 0x8f, 0x7a, 0xfe, + 0xfd, 0x0c, 0x7a, 0x02, 0xe3, 0xac, 0x71, 0x72, 0xc3, 0x83, 0xb7, 0x65, 0xc3, 0x03, 0xb7, 0x65, + 0x7e, 0x01, 0x5d, 0x89, 0x20, 0xe1, 0x18, 0x72, 0x46, 0x1b, 0x58, 0x7c, 0xd1, 0x6d, 0xa0, 0xfc, + 0xa5, 0x04, 0xe3, 0xc1, 0x32, 0x9d, 0x42, 0xd3, 0xb6, 0x16, 0x6d, 0xda, 0xae, 0xf4, 0x1d, 0xa2, + 0x19, 0x5d, 0xdb, 0x2f, 0x59, 0x83, 0xef, 0x33, 0xb1, 0x0d, 0xbe, 0xad, 0x34, 0xf7, 0xfa, 0xf8, + 0xfc, 0xfb, 0x4c, 0x02, 0x24, 0xaa, 0xc0, 0x82, 0xae, 0x1b, 0x54, 0x71, 0x73, 0xa5, 0x6b, 0xd6, + 0x72, 0xdf, 0x66, 0x79, 0x1a, 0xab, 0x5b, 0x09, 0xac, 0xbb, 0x3a, 0xb5, 0xba, 0xc1, 0x8a, 0x24, + 0x19, 0x70, 0x8a, 0x01, 0x48, 0x01, 0xb0, 0x04, 0xe6, 0xa6, 0x21, 0x36, 0xf2, 0xb5, 0x3e, 0x72, + 0x1e, 0x13, 0x58, 0x34, 0xf4, 0x1d, 0xb5, 0x1d, 0xa4, 0x1d, 0xec, 0x03, 0xe1, 0x10, 0xe8, 0xdc, + 0x5d, 0x98, 0xcd, 0xb0, 0x16, 0x4d, 0x42, 0x7e, 0x8f, 0x74, 0x5d, 0xb7, 0x61, 0xf6, 0x27, 0x3a, + 0x17, 0xfe, 0x4c, 0x1e, 0x11, 0x5f, 0xb8, 0xef, 0xe4, 0x6e, 0x4b, 0xf2, 0x17, 0x43, 0xe1, 0xd8, + 0xe1, 0x1d, 0xf3, 0x65, 0x28, 0x59, 0xc4, 0xd4, 0xd4, 0xa6, 0x62, 0x8b, 0x46, 0x88, 0x37, 0xbf, + 0x58, 0x8c, 0x61, 0x9f, 0x1a, 0xe9, 0xad, 0x73, 0x2f, 0xb6, 0xb7, 0xce, 0x3f, 0x9f, 0xde, 0xfa, + 0xcf, 0xa1, 0x64, 0x7b, 0x5d, 0x75, 0x81, 0x43, 0x5e, 0x1f, 0x20, 0xbf, 0x8a, 0x86, 0xda, 0x57, + 0xe0, 0xb7, 0xd2, 0x3e, 0x68, 0x5a, 0x13, 0x3d, 0x34, 0x60, 0x13, 0xfd, 0x5c, 0x1b, 0x5f, 0x96, + 0x53, 0x4d, 0xc5, 0xb1, 0x49, 0x8b, 0x27, 0xa2, 0x52, 0x90, 0x53, 0x37, 0xf8, 0x28, 0x16, 0x54, + 0xf4, 0x38, 0x12, 0xb2, 0xa5, 0x93, 0x84, 0xec, 0x78, 0x76, 0xb8, 0xa2, 0x2d, 0x98, 0x35, 0x2d, + 0xa3, 0x6d, 0x11, 0xdb, 0x5e, 0x22, 0x4a, 0x4b, 0x53, 0x75, 0xe2, 0xf9, 0xc7, 0xed, 0x88, 0xce, + 0x1f, 0x1d, 0x56, 0x66, 0x37, 0xd2, 0x59, 0x70, 0x96, 0xac, 0xfc, 0xb4, 0x00, 0x93, 0xf1, 0x0a, + 0x98, 0xd1, 0xa4, 0x4a, 0x27, 0x6a, 0x52, 0xaf, 0x86, 0x36, 0x83, 0xdb, 0xc1, 0xfb, 0xab, 0x9f, + 0xb2, 0x21, 0x16, 0x60, 0x42, 0x64, 0x03, 0x8f, 0x28, 0xda, 0x74, 0x7f, 0xf5, 0xb7, 0xa2, 0x64, + 0x1c, 0xe7, 0x67, 0xad, 0x67, 0xd0, 0x51, 0x7a, 0x20, 0x85, 0x68, 0xeb, 0xb9, 0x10, 0x67, 0xc0, + 0x49, 0x19, 0xb4, 0x0a, 0xd3, 0x8e, 0x9e, 0x84, 0x72, 0xa3, 0xf1, 0xbc, 0x80, 0x9a, 0xde, 0x4a, + 0xb2, 0xe0, 0x34, 0x39, 0xb4, 0x13, 0xe9, 0x46, 0x87, 0x79, 0x86, 0xbd, 0xd1, 0xf7, 0xde, 0xe9, + 0xbb, 0x1d, 0x45, 0x77, 0x60, 0xcc, 0xe2, 0xdf, 0x1d, 0x9e, 0xc1, 0x6e, 0xef, 0xfe, 0x92, 0x10, + 0x1b, 0xc3, 0x61, 0x22, 0x8e, 0xf2, 0xa6, 0xb4, 0xdb, 0xa5, 0x7e, 0xdb, 0x6d, 0xf9, 0xff, 0xa5, + 0x70, 0x11, 0xf2, 0x5b, 0xe0, 0x5e, 0xa7, 0x4c, 0x09, 0x89, 0x50, 0x77, 0x64, 0xa4, 0x77, 0xbf, + 0xb7, 0x06, 0xea, 0x7e, 0x83, 0xe2, 0xd9, 0xbb, 0xfd, 0xfd, 0x5c, 0x82, 0x99, 0x7b, 0x8d, 0xfb, + 0x96, 0xe1, 0x98, 0x9e, 0x39, 0xeb, 0xa6, 0xeb, 0xd7, 0xb7, 0xa1, 0x60, 0x39, 0x9a, 0x37, 0x8f, + 0xd7, 0xbc, 0x79, 0x60, 0x47, 0x63, 0xf3, 0x98, 0x8e, 0x49, 0xb9, 0x93, 0x60, 0x02, 0x68, 0x0d, + 0x86, 0x2d, 0x45, 0x6f, 0x13, 0xaf, 0xac, 0x5e, 0xea, 0x61, 0xfd, 0xf2, 0x12, 0x66, 0xec, 0xa1, + 0xe6, 0x8d, 0x4b, 0x63, 0x81, 0x22, 0xff, 0xbd, 0x04, 0x13, 0x0f, 0x36, 0x37, 0x37, 0x96, 0x75, + 0xbe, 0xa3, 0xf9, 0x79, 0xfa, 0x45, 0x28, 0x98, 0x0a, 0xdd, 0x8d, 0x57, 0x7a, 0x46, 0xc3, 0x9c, + 0x82, 0x3e, 0x80, 0x22, 0xcb, 0x24, 0x44, 0x6f, 0xf5, 0xd9, 0x6a, 0x0b, 0xf8, 0xba, 0x2b, 0x14, + 0x74, 0x88, 0x62, 0x00, 0x7b, 0x70, 0xf2, 0x1e, 0x9c, 0x0b, 0x99, 0xc3, 0xfc, 0xc1, 0x8f, 0x81, + 0x51, 0x03, 0x86, 0x98, 0x66, 0xef, 0x94, 0xb7, 0xd7, 0x61, 0x66, 0x6c, 0x4a, 0x41, 0xa7, 0xc3, + 0x7e, 0xd9, 0xd8, 0xc5, 0x92, 0x57, 0x61, 0x8c, 0x5f, 0x22, 0x18, 0x16, 0xe5, 0x6e, 0x41, 0x17, + 0x20, 0xdf, 0x51, 0x75, 0x51, 0x67, 0x47, 0x85, 0x4c, 0x9e, 0xd5, 0x08, 0x36, 0xce, 0xc9, 0xca, + 0x81, 0xc8, 0x3c, 0x01, 0x59, 0x39, 0xc0, 0x6c, 0x5c, 0xbe, 0x0f, 0x45, 0xe1, 0xee, 0x30, 0x50, + 0xfe, 0x78, 0xa0, 0x7c, 0x0a, 0xd0, 0x3a, 0x14, 0x97, 0x37, 0xea, 0x9a, 0xe1, 0x76, 0x5d, 0x4d, + 0xb5, 0x65, 0xc5, 0xd7, 0x62, 0x71, 0x79, 0x09, 0x63, 0x4e, 0x41, 0x32, 0x0c, 0x93, 0x83, 0x26, + 0x31, 0x29, 0x8f, 0x88, 0x91, 0x3a, 0xb0, 0x55, 0xbe, 0xcb, 0x47, 0xb0, 0xa0, 0xc8, 0xff, 0x90, + 0x83, 0xa2, 0x70, 0xc7, 0x29, 0x7c, 0x85, 0xad, 0x44, 0xbe, 0xc2, 0x5e, 0xef, 0x2f, 0x34, 0x32, + 0x3f, 0xc1, 0x36, 0x63, 0x9f, 0x60, 0x57, 0xfb, 0xc4, 0x3b, 0xfe, 0xfb, 0xeb, 0x7f, 0x24, 0x18, + 0x8f, 0x06, 0x25, 0xba, 0x09, 0xa3, 0xac, 0xe0, 0xa8, 0x4d, 0xb2, 0x16, 0xf4, 0xb9, 0xfe, 0x21, + 0x4c, 0x23, 0x20, 0xe1, 0x30, 0x1f, 0x6a, 0xfb, 0x62, 0x2c, 0x8e, 0xc4, 0xa4, 0xb3, 0x5d, 0xea, + 0x50, 0x55, 0xab, 0xba, 0x17, 0x63, 0xd5, 0x65, 0x9d, 0xae, 0x5b, 0x0d, 0x6a, 0xa9, 0x7a, 0x3b, + 0xa1, 0x88, 0x07, 0x65, 0x18, 0x59, 0xfe, 0x3f, 0x09, 0x46, 0x85, 0xc9, 0xa7, 0xf0, 0x55, 0xf1, + 0xc7, 0xd1, 0xaf, 0x8a, 0x4b, 0x7d, 0x6e, 0xf0, 0xf4, 0x4f, 0x8a, 0xff, 0x08, 0x4c, 0x67, 0x5b, + 0x9a, 0x45, 0xf5, 0xae, 0x61, 0xd3, 0x78, 0x54, 0xb3, 0xcd, 0x88, 0x39, 0x05, 0x39, 0x30, 0xa9, + 0xc6, 0x72, 0x80, 0x70, 0x6d, 0xad, 0x3f, 0x4b, 0x7c, 0xb1, 0x7a, 0x59, 0xc0, 0x4f, 0xc6, 0x29, + 0x38, 0xa1, 0x42, 0x26, 0x90, 0xe0, 0x42, 0x0f, 0xa1, 0xb0, 0x4b, 0xa9, 0x99, 0x72, 0x5e, 0xdd, + 0x23, 0xf3, 0x04, 0x26, 0x94, 0xf8, 0xec, 0x36, 0x37, 0x37, 0x30, 0x87, 0x92, 0x7f, 0x15, 0xf8, + 0xa3, 0xe1, 0xc6, 0xb8, 0x9f, 0x4f, 0xa5, 0x93, 0xe4, 0xd3, 0xd1, 0xb4, 0x5c, 0x8a, 0x1e, 0x40, + 0x9e, 0x6a, 0xfd, 0x7e, 0x16, 0x0a, 0xc4, 0xcd, 0x95, 0x46, 0x90, 0x90, 0x36, 0x57, 0x1a, 0x98, + 0x41, 0xa0, 0x75, 0x18, 0x62, 0xd5, 0x87, 0x6d, 0xc1, 0x7c, 0xff, 0x5b, 0x9a, 0xcd, 0x3f, 0x08, + 0x08, 0xf6, 0xcb, 0xc6, 0x2e, 0x8e, 0xfc, 0x09, 0x8c, 0x45, 0xf6, 0x29, 0xfa, 0x18, 0xce, 0x6a, + 0x86, 0xd2, 0xaa, 0x2b, 0x9a, 0xa2, 0x37, 0x89, 0x77, 0x39, 0x70, 0x29, 0xed, 0x0b, 0x63, 0x25, + 0xc4, 0x27, 0x76, 0xb9, 0x7f, 0x9d, 0x1a, 0xa6, 0xe1, 0x08, 0xa2, 0xac, 0x00, 0x04, 0x73, 0x44, + 0x15, 0x18, 0x62, 0x71, 0xe6, 0xd6, 0x93, 0x91, 0xfa, 0x08, 0xb3, 0x90, 0x85, 0x9f, 0x8d, 0xdd, + 0x71, 0x74, 0x03, 0xc0, 0x26, 0x4d, 0x8b, 0x50, 0x9e, 0x0c, 0x72, 0xd1, 0x4b, 0xe5, 0x86, 0x4f, + 0xc1, 0x21, 0x2e, 0xf9, 0x47, 0x12, 0x8c, 0xad, 0x11, 0xfa, 0xa9, 0x61, 0xed, 0x6d, 0xf0, 0xc7, + 0x00, 0xa7, 0x90, 0x6c, 0x71, 0x24, 0xd9, 0xbe, 0xd1, 0x63, 0x65, 0x22, 0xd6, 0x65, 0xa5, 0x5c, + 0xf9, 0x4b, 0x09, 0x66, 0x23, 0x9c, 0x77, 0x83, 0xad, 0xbb, 0x05, 0x43, 0xa6, 0x61, 0x51, 0xaf, + 0x10, 0x0f, 0xa4, 0x90, 0xa5, 0xb1, 0x50, 0x29, 0x66, 0x30, 0xd8, 0x45, 0x43, 0x2b, 0x90, 0xa3, + 0x86, 0x08, 0xd5, 0xc1, 0x30, 0x09, 0xb1, 0xea, 0x20, 0x30, 0x73, 0x9b, 0x06, 0xce, 0x51, 0x83, + 0x2d, 0x44, 0x39, 0xc2, 0x15, 0x4e, 0x3e, 0x2f, 0x68, 0x06, 0x18, 0x0a, 0x3b, 0x96, 0xd1, 0x39, + 0xf1, 0x1c, 0xfc, 0x85, 0xb8, 0x67, 0x19, 0x1d, 0xcc, 0xb1, 0xe4, 0xaf, 0x24, 0x98, 0x8a, 0x70, + 0x9e, 0x42, 0xe2, 0x7f, 0x18, 0x4d, 0xfc, 0x57, 0x07, 0x99, 0x48, 0x46, 0xfa, 0xff, 0x2a, 0x17, + 0x9b, 0x06, 0x9b, 0x30, 0xda, 0x81, 0x51, 0xd3, 0x68, 0x35, 0x9e, 0xc3, 0x75, 0xe0, 0x04, 0xab, + 0x9b, 0x1b, 0x01, 0x16, 0x0e, 0x03, 0xa3, 0x03, 0x98, 0xd2, 0x95, 0x0e, 0xb1, 0x4d, 0xa5, 0x49, + 0x1a, 0xcf, 0xe1, 0x80, 0xe4, 0x25, 0x7e, 0xdf, 0x10, 0x47, 0xc4, 0x49, 0x25, 0x68, 0x15, 0x8a, + 0xaa, 0xc9, 0xfb, 0x38, 0xd1, 0xbb, 0xf4, 0xac, 0xa2, 0x6e, 0xd7, 0xe7, 0xe6, 0x73, 0xf1, 0x03, + 0x7b, 0x18, 0xf2, 0x7f, 0xc6, 0xa3, 0x81, 0xc5, 0x1f, 0xba, 0x0f, 0x25, 0xfe, 0xac, 0xa6, 0x69, + 0x68, 0xde, 0xcd, 0x00, 0x5b, 0xd9, 0x0d, 0x31, 0xf6, 0xec, 0xb0, 0x72, 0x3e, 0xe5, 0xd0, 0xd7, + 0x23, 0x63, 0x5f, 0x18, 0xad, 0x41, 0xc1, 0xfc, 0x21, 0x1d, 0x0c, 0x2f, 0x72, 0xbc, 0x6d, 0xe1, + 0x38, 0xf2, 0x5f, 0xe7, 0x63, 0xe6, 0xf2, 0x52, 0xf7, 0xe4, 0xb9, 0xad, 0xba, 0xdf, 0x31, 0x65, + 0xae, 0xfc, 0x36, 0x14, 0x45, 0x85, 0x17, 0xc1, 0xfc, 0xf6, 0x20, 0xc1, 0x1c, 0xae, 0x62, 0xfe, + 0x07, 0x8b, 0x37, 0xe8, 0x01, 0xa3, 0x8f, 0x60, 0x98, 0xb8, 0x2a, 0xdc, 0xda, 0x78, 0x6b, 0x10, + 0x15, 0x41, 0x5e, 0x0d, 0x1a, 0x55, 0x31, 0x26, 0x50, 0xd1, 0x7b, 0xcc, 0x5f, 0x8c, 0x97, 0x7d, + 0x04, 0xda, 0xe5, 0x02, 0x2f, 0x57, 0x17, 0xdc, 0x69, 0xfb, 0xc3, 0xcf, 0x0e, 0x2b, 0x10, 0xfc, + 0xc4, 0x61, 0x09, 0xf9, 0x27, 0x12, 0x4c, 0x71, 0x0f, 0x35, 0x1d, 0x4b, 0xa5, 0xdd, 0x53, 0x2b, + 0x4c, 0x8f, 0x22, 0x85, 0xe9, 0xad, 0x1e, 0x6e, 0x49, 0x58, 0x98, 0x59, 0x9c, 0xbe, 0x96, 0xe0, + 0xa5, 0x04, 0xf7, 0x29, 0xe4, 0xc5, 0xad, 0x68, 0x5e, 0x7c, 0x63, 0xd0, 0x09, 0x65, 0xbd, 0x91, + 0x18, 0x4b, 0x99, 0x0e, 0xdf, 0x29, 0x37, 0x00, 0x4c, 0x4b, 0xdd, 0x57, 0x35, 0xd2, 0x16, 0x97, + 0xe0, 0xa5, 0xd0, 0xb3, 0x36, 0x9f, 0x82, 0x43, 0x5c, 0xc8, 0x86, 0x99, 0x16, 0xd9, 0x51, 0x1c, + 0x8d, 0x2e, 0xb4, 0x5a, 0x8b, 0x8a, 0xa9, 0x6c, 0xab, 0x9a, 0x4a, 0x55, 0x71, 0x5c, 0x30, 0x52, + 0xbf, 0xe3, 0x5e, 0x4e, 0xa7, 0x71, 0x3c, 0x3b, 0xac, 0x5c, 0x48, 0xbb, 0x1d, 0xf2, 0x58, 0xba, + 0x38, 0x03, 0x1a, 0x75, 0xa1, 0x6c, 0x91, 0x4f, 0x1c, 0xd5, 0x22, 0xad, 0x25, 0xcb, 0x30, 0x23, + 0x6a, 0xf3, 0x5c, 0xed, 0x1f, 0x1e, 0x1d, 0x56, 0xca, 0x38, 0x83, 0xa7, 0xb7, 0xe2, 0x4c, 0x78, + 0xf4, 0x04, 0xa6, 0x15, 0xf7, 0x35, 0x60, 0x44, 0xab, 0xbb, 0x4b, 0x6e, 0x1f, 0x1d, 0x56, 0xa6, + 0x17, 0x92, 0xe4, 0xde, 0x0a, 0xd3, 0x40, 0x51, 0x0d, 0x8a, 0xfb, 0xfc, 0xad, 0xa2, 0x5d, 0x1e, + 0xe2, 0xf8, 0xac, 0x10, 0x14, 0xdd, 0xe7, 0x8b, 0x0c, 0x73, 0xf8, 0x5e, 0x83, 0xef, 0x3e, 0x8f, + 0x8b, 0x7d, 0x50, 0xb2, 0x5e, 0x52, 0xec, 0x78, 0x7e, 0x62, 0x5c, 0x0a, 0xb2, 0xd6, 0x83, 0x80, + 0x84, 0xc3, 0x7c, 0xe8, 0x31, 0x8c, 0xec, 0x8a, 0x53, 0x09, 0xbb, 0x5c, 0xec, 0xab, 0x08, 0x47, + 0x4e, 0x31, 0xea, 0x53, 0x42, 0xc5, 0x88, 0x37, 0x6c, 0xe3, 0x00, 0x11, 0x5d, 0x81, 0x22, 0xff, + 0xb1, 0xbc, 0xc4, 0x8f, 0xe3, 0x4a, 0x41, 0x6e, 0x7b, 0xe0, 0x0e, 0x63, 0x8f, 0xee, 0xb1, 0x2e, + 0x6f, 0x2c, 0xf2, 0x63, 0xe1, 0x18, 0xeb, 0xf2, 0xc6, 0x22, 0xf6, 0xe8, 0xe8, 0x63, 0x28, 0xda, + 0x64, 0x45, 0xd5, 0x9d, 0x83, 0x32, 0xf4, 0x75, 0xa9, 0xdc, 0xb8, 0xcb, 0xb9, 0x63, 0x07, 0x63, + 0x81, 0x06, 0x41, 0xc7, 0x1e, 0x2c, 0xda, 0x85, 0x11, 0xcb, 0xd1, 0x17, 0xec, 0x2d, 0x9b, 0x58, + 0xe5, 0x51, 0xae, 0xa3, 0x57, 0x3a, 0xc7, 0x1e, 0x7f, 0x5c, 0x8b, 0xef, 0x21, 0x9f, 0x03, 0x07, + 0xe0, 0xe8, 0xef, 0x24, 0x40, 0xb6, 0x63, 0x9a, 0x1a, 0xe9, 0x10, 0x9d, 0x2a, 0x1a, 0x3f, 0x8b, + 0xb3, 0xcb, 0x67, 0xb9, 0xce, 0x3f, 0xea, 0x35, 0xaf, 0x84, 0x60, 0x5c, 0xb9, 0x7f, 0xe8, 0x9d, + 0x64, 0xc5, 0x29, 0x7a, 0x99, 0x6b, 0x77, 0x6c, 0xfe, 0x77, 0x79, 0xac, 0x2f, 0xd7, 0xa6, 0x9f, + 0x39, 0x06, 0xae, 0x15, 0x74, 0xec, 0xc1, 0xa2, 0x47, 0x30, 0x63, 0x11, 0xa5, 0xb5, 0xae, 0x6b, + 0x5d, 0x6c, 0x18, 0xf4, 0x9e, 0xaa, 0x11, 0xbb, 0x6b, 0x53, 0xd2, 0x29, 0x8f, 0xf3, 0x65, 0xf7, + 0xdf, 0x7e, 0xe0, 0x54, 0x2e, 0x9c, 0x21, 0x8d, 0x3a, 0x50, 0xf1, 0x52, 0x06, 0xdb, 0x4f, 0x7e, + 0xce, 0xba, 0x6b, 0x37, 0x15, 0xcd, 0xbd, 0x07, 0x98, 0xe0, 0x0a, 0x5e, 0x3b, 0x3a, 0xac, 0x54, + 0x96, 0x8e, 0x67, 0xc5, 0xbd, 0xb0, 0xd0, 0x07, 0x50, 0x56, 0xb2, 0xf4, 0x4c, 0x72, 0x3d, 0xaf, + 0xb0, 0x3c, 0x94, 0xa9, 0x20, 0x53, 0x1a, 0x51, 0x98, 0x54, 0xa2, 0x8f, 0x8e, 0xed, 0xf2, 0x54, + 0x5f, 0x07, 0x91, 0xb1, 0xb7, 0xca, 0xc1, 0x61, 0x44, 0x8c, 0x60, 0xe3, 0x84, 0x06, 0xf4, 0x17, + 0x80, 0x94, 0xf8, 0x3b, 0x69, 0xbb, 0x8c, 0xfa, 0x2a, 0x3f, 0x89, 0x07, 0xd6, 0x41, 0xd8, 0x25, + 0x48, 0x36, 0x4e, 0xd1, 0xc3, 0x1f, 0x6f, 0x88, 0xa3, 0xfc, 0xd3, 0x79, 0x00, 0x3b, 0xd8, 0xe3, + 0x8d, 0xc0, 0xb4, 0xe7, 0xf6, 0x78, 0x23, 0x04, 0x79, 0xfc, 0xe1, 0xe1, 0x2f, 0x72, 0x30, 0x1d, + 0x30, 0xf7, 0xfd, 0x78, 0x23, 0x45, 0xe4, 0x77, 0x8f, 0x60, 0x7b, 0x3f, 0x82, 0xfd, 0x52, 0x82, + 0xf1, 0xc0, 0x75, 0xbf, 0x79, 0x0f, 0x2a, 0x02, 0xdb, 0x32, 0x5a, 0xbc, 0xff, 0xce, 0x85, 0x27, + 0xf0, 0x5b, 0x7f, 0xab, 0xff, 0xc3, 0x5f, 0xae, 0xca, 0x5f, 0xe7, 0x61, 0x32, 0xbe, 0x1b, 0x23, + 0x97, 0xbf, 0x52, 0xcf, 0xcb, 0xdf, 0x0d, 0x38, 0xb7, 0xe3, 0x68, 0x5a, 0x97, 0xbb, 0x21, 0x74, + 0x03, 0xec, 0x5e, 0xde, 0xbc, 0x22, 0x24, 0xcf, 0xdd, 0x4b, 0xe1, 0xc1, 0xa9, 0x92, 0x19, 0x17, + 0xd9, 0xf9, 0x13, 0x5d, 0x64, 0x27, 0xee, 0x55, 0x0b, 0x03, 0xdc, 0xab, 0xa6, 0x5e, 0x4a, 0x0f, + 0x9d, 0xe0, 0x52, 0xfa, 0x24, 0xb7, 0xc8, 0x29, 0x49, 0xac, 0xe7, 0xa3, 0xc6, 0x57, 0x60, 0x4e, + 0x88, 0x51, 0x7e, 0xc1, 0xab, 0x53, 0xcb, 0xd0, 0x34, 0x62, 0x2d, 0x39, 0x9d, 0x4e, 0x57, 0x7e, + 0x17, 0xc6, 0xa3, 0x4f, 0x17, 0xdc, 0x95, 0x76, 0x5f, 0x4f, 0x88, 0x2b, 0xb4, 0xd0, 0x4a, 0xbb, + 0xe3, 0xd8, 0xe7, 0x90, 0xff, 0x46, 0x82, 0x99, 0xf4, 0x27, 0x8a, 0x48, 0x83, 0xf1, 0x8e, 0x72, + 0x10, 0x7e, 0x36, 0x2a, 0x9d, 0xf0, 0x70, 0x83, 0xdf, 0x59, 0xaf, 0x46, 0xb0, 0x70, 0x0c, 0x5b, + 0xfe, 0x5e, 0x82, 0xd9, 0x8c, 0xdb, 0xe2, 0xd3, 0xb5, 0x04, 0x7d, 0x08, 0xa5, 0x8e, 0x72, 0xd0, + 0x70, 0xac, 0x36, 0x39, 0xf1, 0x71, 0x0e, 0xcf, 0x18, 0xab, 0x02, 0x05, 0xfb, 0x78, 0xf2, 0xe7, + 0x12, 0x94, 0xb3, 0x1a, 0x6b, 0x74, 0x33, 0x72, 0xaf, 0xfd, 0x6a, 0xec, 0x5e, 0x7b, 0x2a, 0x21, + 0xf7, 0x82, 0x6e, 0xb5, 0xff, 0x4b, 0x82, 0x99, 0xf4, 0x0f, 0x0c, 0xf4, 0x66, 0xc4, 0xc2, 0x4a, + 0xcc, 0xc2, 0x89, 0x98, 0x94, 0xb0, 0xef, 0x23, 0x18, 0x17, 0x9f, 0x21, 0x02, 0x46, 0x78, 0x55, + 0x4e, 0xcb, 0x95, 0x02, 0xc2, 0x6b, 0xbb, 0xf9, 0x7a, 0x45, 0xc7, 0x70, 0x0c, 0x4d, 0xfe, 0xdb, + 0x1c, 0x0c, 0x35, 0x9a, 0x8a, 0x46, 0x4e, 0xa1, 0xcd, 0x7a, 0x3f, 0xd2, 0x66, 0xf5, 0xfa, 0x17, + 0x0f, 0x6e, 0x55, 0x66, 0x87, 0x85, 0x63, 0x1d, 0xd6, 0xeb, 0x7d, 0xa1, 0x1d, 0xdf, 0x5c, 0xfd, + 0x01, 0x8c, 0xf8, 0x4a, 0x07, 0xcb, 0xf9, 0xf2, 0xbf, 0xe7, 0x60, 0x34, 0xa4, 0x62, 0xc0, 0x8a, + 0xb1, 0x13, 0xa9, 0xb4, 0xfd, 0xfc, 0x63, 0x5d, 0x48, 0x57, 0xd5, 0xab, 0xad, 0xee, 0x13, 0xc5, + 0xe0, 0x51, 0x5a, 0xb2, 0xe4, 0xbe, 0x0b, 0xe3, 0x94, 0xff, 0xe3, 0x99, 0x7f, 0x08, 0x9a, 0xe7, + 0xb1, 0xe8, 0x3f, 0x6c, 0xdd, 0x8c, 0x50, 0x71, 0x8c, 0x7b, 0xee, 0x0e, 0x8c, 0x45, 0x94, 0x0d, + 0xf4, 0xc2, 0xf0, 0x7f, 0x25, 0x78, 0xb5, 0xe7, 0x27, 0x2a, 0xaa, 0x47, 0x36, 0x49, 0x35, 0xb6, + 0x49, 0xe6, 0xb3, 0x01, 0x5e, 0xdc, 0x4b, 0x95, 0xfa, 0xb5, 0xa7, 0xdf, 0xcd, 0x9f, 0xf9, 0xe6, + 0xbb, 0xf9, 0x33, 0xdf, 0x7e, 0x37, 0x7f, 0xe6, 0xaf, 0x8e, 0xe6, 0xa5, 0xa7, 0x47, 0xf3, 0xd2, + 0x37, 0x47, 0xf3, 0xd2, 0xb7, 0x47, 0xf3, 0xd2, 0xcf, 0x8e, 0xe6, 0xa5, 0x7f, 0xfc, 0x7e, 0xfe, + 0xcc, 0x87, 0x45, 0x01, 0xf7, 0xeb, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb1, 0xb3, 0xc8, 0xe2, 0x54, + 0x3c, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index 252a9db88a36..7a3e70290d9a 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -32,11 +32,10 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; -// An APIVersion represents a single concrete version of an object model. -message APIVersion { - // Name of this version (e.g. 'v1'). - // +optional - optional string name = 1; +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +message AllowedFlexVolume { + // Driver is the name of the Flexvolume driver. + optional string driver = 1; } // defines the host volume conditions that will be enabled by a policy @@ -100,6 +99,27 @@ message DaemonSet { optional DaemonSetStatus status = 3; } +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +message DaemonSetCondition { + // Type of DaemonSet condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. @@ -197,6 +217,12 @@ message DaemonSetStatus { // create the name for the newest ControllerRevision. // +optional optional int32 collisionCount = 9; + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated DaemonSetCondition conditions = 10; } message DaemonSetUpdateStrategy { @@ -441,6 +467,7 @@ message IDRange { optional int64 max = 2; } +// DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. // IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods // matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should // not be included within this rule. @@ -582,6 +609,7 @@ message IngressTLS { optional string secretName = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. // NetworkPolicy describes what network traffic is allowed for a set of Pods message NetworkPolicy { // Standard object's metadata. @@ -594,6 +622,7 @@ message NetworkPolicy { optional NetworkPolicySpec spec = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. // NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. // This type is beta-level in 1.8 @@ -615,6 +644,7 @@ message NetworkPolicyEgressRule { repeated NetworkPolicyPeer to = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. // This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. message NetworkPolicyIngressRule { // List of ports which should be made accessible on the pods selected for this rule. @@ -634,6 +664,7 @@ message NetworkPolicyIngressRule { repeated NetworkPolicyPeer from = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. // Network Policy List is a list of NetworkPolicy objects. message NetworkPolicyList { // Standard list metadata. @@ -645,6 +676,7 @@ message NetworkPolicyList { repeated NetworkPolicy items = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer. message NetworkPolicyPeer { // This is a label selector which selects Pods in this namespace. // This field follows standard label selector semantics. @@ -664,6 +696,7 @@ message NetworkPolicyPeer { optional IPBlock ipBlock = 3; } +// DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort. message NetworkPolicyPort { // Optional. The protocol (TCP or UDP) which traffic must match. // If not specified, this field defaults to TCP. @@ -679,6 +712,7 @@ message NetworkPolicyPort { optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec. message NetworkPolicySpec { // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules // is applied to any pods selected by this field. Multiple network policies can select the @@ -752,8 +786,9 @@ message PodSecurityPolicySpec { optional bool privileged = 1; // DefaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capabiility in both - // DefaultAddCapabilities and RequiredDropCapabilities. + // unless the pod spec specifically drops the capability. You may not list a capability in both + // DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the AllowedCapabilities list. // +optional repeated string defaultAddCapabilities = 2; @@ -815,18 +850,24 @@ message PodSecurityPolicySpec { optional bool defaultAllowPrivilegeEscalation = 15; // AllowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. + // privilege escalation. If unspecified, defaults to true. // +optional optional bool allowPrivilegeEscalation = 16; // is a white list of allowed host paths. Empty indicates that all host paths may be used. // +optional repeated AllowedHostPath allowedHostPaths = 17; + + // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "Volumes" field. + // +optional + repeated AllowedFlexVolume allowedFlexVolumes = 18; } // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for // more information. -// ReplicaSet represents the configuration of a ReplicaSet. +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. message ReplicaSet { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. @@ -1016,7 +1057,7 @@ message SELinuxStrategyOptions { optional string rule = 1; // seLinuxOptions required to run as; required for MustRunAs - // More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ // +optional optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; } @@ -1074,51 +1115,3 @@ message SupplementalGroupsStrategyOptions { repeated IDRange ranges = 2; } -// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource -// types to the API. It consists of one or more Versions of the api. -message ThirdPartyResource { - // Standard object metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Description is the description of this object. - // +optional - optional string description = 2; - - // Versions are versions for this third party object - // +optional - repeated APIVersion versions = 3; -} - -// An internal object, used for versioned storage in etcd. Not exposed to the end user. -message ThirdPartyResourceData { - // Standard object metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Data is the raw JSON data for this data. - // +optional - optional bytes data = 2; -} - -// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. -message ThirdPartyResourceDataList { - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of ThirdpartyResourceData. - repeated ThirdPartyResourceData items = 2; -} - -// ThirdPartyResourceList is a list of ThirdPartyResources. -message ThirdPartyResourceList { - // Standard list metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of ThirdPartyResources. - repeated ThirdPartyResource items = 2; -} - diff --git a/vendor/k8s.io/api/extensions/v1beta1/register.go b/vendor/k8s.io/api/extensions/v1beta1/register.go index 626701cf013c..7625f678137b 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/register.go +++ b/vendor/k8s.io/api/extensions/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Deployment{}, @@ -49,12 +49,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &DeploymentRollback{}, &ReplicationControllerDummy{}, &Scale{}, - &ThirdPartyResource{}, - &ThirdPartyResourceList{}, &DaemonSetList{}, &DaemonSet{}, - &ThirdPartyResourceData{}, - &ThirdPartyResourceDataList{}, &Ingress{}, &IngressList{}, &ReplicaSet{}, diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index 8b552ae27f3d..c3d9f72d7340 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -100,63 +100,6 @@ type CustomMetricCurrentStatusList struct { Items []CustomMetricCurrentStatus `json:"items" protobuf:"bytes,1,rep,name=items"` } -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource -// types to the API. It consists of one or more Versions of the api. -type ThirdPartyResource struct { - metav1.TypeMeta `json:",inline"` - - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Description is the description of this object. - // +optional - Description string `json:"description,omitempty" protobuf:"bytes,2,opt,name=description"` - - // Versions are versions for this third party object - // +optional - Versions []APIVersion `json:"versions,omitempty" protobuf:"bytes,3,rep,name=versions"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ThirdPartyResourceList is a list of ThirdPartyResources. -type ThirdPartyResourceList struct { - metav1.TypeMeta `json:",inline"` - - // Standard list metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of ThirdPartyResources. - Items []ThirdPartyResource `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// An APIVersion represents a single concrete version of an object model. -type APIVersion struct { - // Name of this version (e.g. 'v1'). - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// An internal object, used for versioned storage in etcd. Not exposed to the end user. -type ThirdPartyResourceData struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Data is the raw JSON data for this data. - // +optional - Data []byte `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` -} - // +genclient // +genclient:method=GetScale,verb=get,subresource=scale,result=Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale @@ -532,6 +475,33 @@ type DaemonSetStatus struct { // create the name for the newest ControllerRevision. // +optional CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } // +genclient @@ -588,20 +558,6 @@ type DaemonSetList struct { Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. -type ThirdPartyResourceDataList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of ThirdpartyResourceData. - Items []ThirdPartyResourceData `json:"items" protobuf:"bytes,2,rep,name=items"` -} - // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -778,7 +734,7 @@ type IngressBackend struct { // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for // more information. -// ReplicaSet represents the configuration of a ReplicaSet. +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. type ReplicaSet struct { metav1.TypeMeta `json:",inline"` @@ -926,8 +882,9 @@ type PodSecurityPolicySpec struct { // +optional Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` // DefaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capabiility in both - // DefaultAddCapabilities and RequiredDropCapabilities. + // unless the pod spec specifically drops the capability. You may not list a capability in both + // DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the AllowedCapabilities list. // +optional DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"` // RequiredDropCapabilities are the capabilities that will be dropped from the container. These @@ -975,12 +932,17 @@ type PodSecurityPolicySpec struct { // +optional DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"` // AllowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. + // privilege escalation. If unspecified, defaults to true. // +optional - AllowPrivilegeEscalation bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"` + AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"` // is a white list of allowed host paths. Empty indicates that all host paths may be used. // +optional AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` + // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "Volumes" field. + // +optional + AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` } // defines the host volume conditions that will be enabled by a policy @@ -1024,6 +986,12 @@ var ( All FSType = "*" ) +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +type AllowedFlexVolume struct { + // Driver is the name of the Flexvolume driver. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` +} + // Host Port Range defines a range of host ports that will be enabled by a policy // for pods to use. It requires both the start and end to be defined. type HostPortRange struct { @@ -1038,7 +1006,7 @@ type SELinuxStrategyOptions struct { // type is the strategy that will dictate the allowable labels that may be set. Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` // seLinuxOptions required to run as; required for MustRunAs - // More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ // +optional SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` } @@ -1144,6 +1112,7 @@ type PodSecurityPolicyList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. // NetworkPolicy describes what network traffic is allowed for a set of Pods type NetworkPolicy struct { metav1.TypeMeta `json:",inline"` @@ -1157,6 +1126,7 @@ type NetworkPolicy struct { Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } +// DEPRECATED 1.9 - This group version of PolicyType is deprecated by networking/v1/PolicyType. // Policy Type string describes the NetworkPolicy type // This type is beta-level in 1.8 type PolicyType string @@ -1168,6 +1138,7 @@ const ( PolicyTypeEgress PolicyType = "Egress" ) +// DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec. type NetworkPolicySpec struct { // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules // is applied to any pods selected by this field. Multiple network policies can select the @@ -1210,6 +1181,7 @@ type NetworkPolicySpec struct { PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"` } +// DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. // This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. type NetworkPolicyIngressRule struct { // List of ports which should be made accessible on the pods selected for this rule. @@ -1229,6 +1201,7 @@ type NetworkPolicyIngressRule struct { From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` } +// DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. // NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. // This type is beta-level in 1.8 @@ -1250,6 +1223,7 @@ type NetworkPolicyEgressRule struct { To []NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,2,rep,name=to"` } +// DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort. type NetworkPolicyPort struct { // Optional. The protocol (TCP or UDP) which traffic must match. // If not specified, this field defaults to TCP. @@ -1265,6 +1239,7 @@ type NetworkPolicyPort struct { Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` } +// DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. // IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods // matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should // not be included within this rule. @@ -1279,6 +1254,7 @@ type IPBlock struct { Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` } +// DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer. type NetworkPolicyPeer struct { // Exactly one of the following must be specified. @@ -1302,6 +1278,7 @@ type NetworkPolicyPeer struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. // Network Policy List is a list of NetworkPolicy objects. type NetworkPolicyList struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index bcbd54dac25e..236d934fa247 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -27,13 +27,13 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE -var map_APIVersion = map[string]string{ - "": "An APIVersion represents a single concrete version of an object model.", - "name": "Name of this version (e.g. 'v1').", +var map_AllowedFlexVolume = map[string]string{ + "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", + "driver": "Driver is the name of the Flexvolume driver.", } -func (APIVersion) SwaggerDoc() map[string]string { - return map_APIVersion +func (AllowedFlexVolume) SwaggerDoc() map[string]string { + return map_AllowedFlexVolume } var map_AllowedHostPath = map[string]string{ @@ -75,6 +75,19 @@ func (DaemonSet) SwaggerDoc() map[string]string { return map_DaemonSet } +var map_DaemonSetCondition = map[string]string{ + "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", + "type": "Type of DaemonSet condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DaemonSetCondition) SwaggerDoc() map[string]string { + return map_DaemonSetCondition +} + var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", @@ -110,6 +123,7 @@ var map_DaemonSetStatus = map[string]string{ "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a DaemonSet's current state.", } func (DaemonSetStatus) SwaggerDoc() map[string]string { @@ -264,7 +278,7 @@ func (IDRange) SwaggerDoc() map[string]string { } var map_IPBlock = map[string]string{ - "": "IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", + "": "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\"", "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" Except values will be rejected if they are outside the CIDR range", } @@ -352,7 +366,7 @@ func (IngressTLS) SwaggerDoc() map[string]string { } var map_NetworkPolicy = map[string]string{ - "": "NetworkPolicy describes what network traffic is allowed for a set of Pods", + "": "DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. NetworkPolicy describes what network traffic is allowed for a set of Pods", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "Specification of the desired behavior for this NetworkPolicy.", } @@ -362,7 +376,7 @@ func (NetworkPolicy) SwaggerDoc() map[string]string { } var map_NetworkPolicyEgressRule = map[string]string{ - "": "NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", + "": "DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", "ports": "List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", "to": "List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", } @@ -372,7 +386,7 @@ func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { } var map_NetworkPolicyIngressRule = map[string]string{ - "": "This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", + "": "DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", } @@ -382,7 +396,7 @@ func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { } var map_NetworkPolicyList = map[string]string{ - "": "Network Policy List is a list of NetworkPolicy objects.", + "": "DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. Network Policy List is a list of NetworkPolicy objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -392,6 +406,7 @@ func (NetworkPolicyList) SwaggerDoc() map[string]string { } var map_NetworkPolicyPeer = map[string]string{ + "": "DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer.", "podSelector": "This is a label selector which selects Pods in this namespace. This field follows standard label selector semantics. If present but empty, this selector selects all pods in this namespace.", "namespaceSelector": "Selects Namespaces using cluster scoped-labels. This matches all pods in all namespaces selected by this label selector. This field follows standard label selector semantics. If present but empty, this selector selects all namespaces.", "ipBlock": "IPBlock defines policy on a particular IPBlock", @@ -402,6 +417,7 @@ func (NetworkPolicyPeer) SwaggerDoc() map[string]string { } var map_NetworkPolicyPort = map[string]string{ + "": "DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort.", "protocol": "Optional. The protocol (TCP or UDP) which traffic must match. If not specified, this field defaults to TCP.", "port": "If specified, the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", } @@ -411,6 +427,7 @@ func (NetworkPolicyPort) SwaggerDoc() map[string]string { } var map_NetworkPolicySpec = map[string]string{ + "": "DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec.", "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default).", "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", @@ -444,7 +461,7 @@ func (PodSecurityPolicyList) SwaggerDoc() map[string]string { var map_PodSecurityPolicySpec = map[string]string{ "": "Pod Security Policy Spec defines the policy enforced.", "privileged": "privileged determines if a pod can request to be run as privileged.", - "defaultAddCapabilities": "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.", + "defaultAddCapabilities": "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the AllowedCapabilities list.", "requiredDropCapabilities": "RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", "allowedCapabilities": "AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.", "volumes": "volumes is a white list of allowed volume plugins. Empty indicates that all plugins may be used.", @@ -458,8 +475,9 @@ var map_PodSecurityPolicySpec = map[string]string{ "fsGroup": "FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.", "readOnlyRootFilesystem": "ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", "defaultAllowPrivilegeEscalation": "DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", - "allowPrivilegeEscalation": "AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation.", + "allowPrivilegeEscalation": "AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "allowedHostPaths": "is a white list of allowed host paths. Empty indicates that all host paths may be used.", + "allowedFlexVolumes": "AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"Volumes\" field.", } func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { @@ -467,7 +485,7 @@ func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { } var map_ReplicaSet = map[string]string{ - "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet represents the configuration of a ReplicaSet.", + "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", @@ -575,7 +593,7 @@ func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { var map_SELinuxStrategyOptions = map[string]string{ "": "SELinux Strategy Options defines the strategy type and any options used to create the strategy.", "rule": "type is the strategy that will dictate the allowable labels that may be set.", - "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md", + "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", } func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { @@ -623,45 +641,4 @@ func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { return map_SupplementalGroupsStrategyOptions } -var map_ThirdPartyResource = map[string]string{ - "": "A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource types to the API. It consists of one or more Versions of the api.", - "metadata": "Standard object metadata", - "description": "Description is the description of this object.", - "versions": "Versions are versions for this third party object", -} - -func (ThirdPartyResource) SwaggerDoc() map[string]string { - return map_ThirdPartyResource -} - -var map_ThirdPartyResourceData = map[string]string{ - "": "An internal object, used for versioned storage in etcd. Not exposed to the end user.", - "metadata": "Standard object metadata.", - "data": "Data is the raw JSON data for this data.", -} - -func (ThirdPartyResourceData) SwaggerDoc() map[string]string { - return map_ThirdPartyResourceData -} - -var map_ThirdPartyResourceDataList = map[string]string{ - "": "ThirdPartyResrouceDataList is a list of ThirdPartyResourceData.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is the list of ThirdpartyResourceData.", -} - -func (ThirdPartyResourceDataList) SwaggerDoc() map[string]string { - return map_ThirdPartyResourceDataList -} - -var map_ThirdPartyResourceList = map[string]string{ - "": "ThirdPartyResourceList is a list of ThirdPartyResources.", - "metadata": "Standard list metadata.", - "items": "Items is the list of ThirdPartyResources.", -} - -func (ThirdPartyResourceList) SwaggerDoc() map[string]string { - return map_ThirdPartyResourceList -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index 45b56997bf87..02a84cccd2ce 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -23,281 +23,22 @@ package v1beta1 import ( core_v1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIVersion).DeepCopyInto(out.(*APIVersion)) - return nil - }, InType: reflect.TypeOf(&APIVersion{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AllowedHostPath).DeepCopyInto(out.(*AllowedHostPath)) - return nil - }, InType: reflect.TypeOf(&AllowedHostPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricCurrentStatus).DeepCopyInto(out.(*CustomMetricCurrentStatus)) - return nil - }, InType: reflect.TypeOf(&CustomMetricCurrentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricCurrentStatusList).DeepCopyInto(out.(*CustomMetricCurrentStatusList)) - return nil - }, InType: reflect.TypeOf(&CustomMetricCurrentStatusList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricTarget).DeepCopyInto(out.(*CustomMetricTarget)) - return nil - }, InType: reflect.TypeOf(&CustomMetricTarget{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricTargetList).DeepCopyInto(out.(*CustomMetricTargetList)) - return nil - }, InType: reflect.TypeOf(&CustomMetricTargetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSet).DeepCopyInto(out.(*DaemonSet)) - return nil - }, InType: reflect.TypeOf(&DaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetList).DeepCopyInto(out.(*DaemonSetList)) - return nil - }, InType: reflect.TypeOf(&DaemonSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetSpec).DeepCopyInto(out.(*DaemonSetSpec)) - return nil - }, InType: reflect.TypeOf(&DaemonSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetStatus).DeepCopyInto(out.(*DaemonSetStatus)) - return nil - }, InType: reflect.TypeOf(&DaemonSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetUpdateStrategy).DeepCopyInto(out.(*DaemonSetUpdateStrategy)) - return nil - }, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Deployment).DeepCopyInto(out.(*Deployment)) - return nil - }, InType: reflect.TypeOf(&Deployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentCondition).DeepCopyInto(out.(*DeploymentCondition)) - return nil - }, InType: reflect.TypeOf(&DeploymentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentList).DeepCopyInto(out.(*DeploymentList)) - return nil - }, InType: reflect.TypeOf(&DeploymentList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentRollback).DeepCopyInto(out.(*DeploymentRollback)) - return nil - }, InType: reflect.TypeOf(&DeploymentRollback{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentSpec).DeepCopyInto(out.(*DeploymentSpec)) - return nil - }, InType: reflect.TypeOf(&DeploymentSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStatus).DeepCopyInto(out.(*DeploymentStatus)) - return nil - }, InType: reflect.TypeOf(&DeploymentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStrategy).DeepCopyInto(out.(*DeploymentStrategy)) - return nil - }, InType: reflect.TypeOf(&DeploymentStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FSGroupStrategyOptions).DeepCopyInto(out.(*FSGroupStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&FSGroupStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPIngressPath).DeepCopyInto(out.(*HTTPIngressPath)) - return nil - }, InType: reflect.TypeOf(&HTTPIngressPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPIngressRuleValue).DeepCopyInto(out.(*HTTPIngressRuleValue)) - return nil - }, InType: reflect.TypeOf(&HTTPIngressRuleValue{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostPortRange).DeepCopyInto(out.(*HostPortRange)) - return nil - }, InType: reflect.TypeOf(&HostPortRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IDRange).DeepCopyInto(out.(*IDRange)) - return nil - }, InType: reflect.TypeOf(&IDRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IPBlock).DeepCopyInto(out.(*IPBlock)) - return nil - }, InType: reflect.TypeOf(&IPBlock{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Ingress).DeepCopyInto(out.(*Ingress)) - return nil - }, InType: reflect.TypeOf(&Ingress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressBackend).DeepCopyInto(out.(*IngressBackend)) - return nil - }, InType: reflect.TypeOf(&IngressBackend{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressList).DeepCopyInto(out.(*IngressList)) - return nil - }, InType: reflect.TypeOf(&IngressList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressRule).DeepCopyInto(out.(*IngressRule)) - return nil - }, InType: reflect.TypeOf(&IngressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressRuleValue).DeepCopyInto(out.(*IngressRuleValue)) - return nil - }, InType: reflect.TypeOf(&IngressRuleValue{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressSpec).DeepCopyInto(out.(*IngressSpec)) - return nil - }, InType: reflect.TypeOf(&IngressSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressStatus).DeepCopyInto(out.(*IngressStatus)) - return nil - }, InType: reflect.TypeOf(&IngressStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressTLS).DeepCopyInto(out.(*IngressTLS)) - return nil - }, InType: reflect.TypeOf(&IngressTLS{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicy).DeepCopyInto(out.(*NetworkPolicy)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyEgressRule).DeepCopyInto(out.(*NetworkPolicyEgressRule)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyEgressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyIngressRule).DeepCopyInto(out.(*NetworkPolicyIngressRule)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyIngressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyList).DeepCopyInto(out.(*NetworkPolicyList)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyPeer).DeepCopyInto(out.(*NetworkPolicyPeer)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyPeer{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyPort).DeepCopyInto(out.(*NetworkPolicyPort)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicySpec).DeepCopyInto(out.(*NetworkPolicySpec)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicySpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityPolicy).DeepCopyInto(out.(*PodSecurityPolicy)) - return nil - }, InType: reflect.TypeOf(&PodSecurityPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityPolicyList).DeepCopyInto(out.(*PodSecurityPolicyList)) - return nil - }, InType: reflect.TypeOf(&PodSecurityPolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityPolicySpec).DeepCopyInto(out.(*PodSecurityPolicySpec)) - return nil - }, InType: reflect.TypeOf(&PodSecurityPolicySpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSet).DeepCopyInto(out.(*ReplicaSet)) - return nil - }, InType: reflect.TypeOf(&ReplicaSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetCondition).DeepCopyInto(out.(*ReplicaSetCondition)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetList).DeepCopyInto(out.(*ReplicaSetList)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetSpec).DeepCopyInto(out.(*ReplicaSetSpec)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetStatus).DeepCopyInto(out.(*ReplicaSetStatus)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerDummy).DeepCopyInto(out.(*ReplicationControllerDummy)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerDummy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollbackConfig).DeepCopyInto(out.(*RollbackConfig)) - return nil - }, InType: reflect.TypeOf(&RollbackConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDaemonSet).DeepCopyInto(out.(*RollingUpdateDaemonSet)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDeployment).DeepCopyInto(out.(*RollingUpdateDeployment)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RunAsUserStrategyOptions).DeepCopyInto(out.(*RunAsUserStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&RunAsUserStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SELinuxStrategyOptions).DeepCopyInto(out.(*SELinuxStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&SELinuxStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Scale).DeepCopyInto(out.(*Scale)) - return nil - }, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleSpec).DeepCopyInto(out.(*ScaleSpec)) - return nil - }, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleStatus).DeepCopyInto(out.(*ScaleStatus)) - return nil - }, InType: reflect.TypeOf(&ScaleStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SupplementalGroupsStrategyOptions).DeepCopyInto(out.(*SupplementalGroupsStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&SupplementalGroupsStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResource).DeepCopyInto(out.(*ThirdPartyResource)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResourceData).DeepCopyInto(out.(*ThirdPartyResourceData)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResourceData{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResourceDataList).DeepCopyInto(out.(*ThirdPartyResourceDataList)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResourceDataList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResourceList).DeepCopyInto(out.(*ThirdPartyResourceList)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResourceList{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIVersion) DeepCopyInto(out *APIVersion) { +func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersion. -func (in *APIVersion) DeepCopy() *APIVersion { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. +func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { if in == nil { return nil } - out := new(APIVersion) + out := new(AllowedFlexVolume) in.DeepCopyInto(out) return out } @@ -427,6 +168,23 @@ func (in *DaemonSet) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in @@ -509,6 +267,13 @@ func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { **out = **in } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1431,11 +1196,25 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { **out = **in } } + if in.AllowPrivilegeEscalation != nil { + in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } if in.AllowedHostPaths != nil { in, out := &in.AllowedHostPaths, &out.AllowedHostPaths *out = make([]AllowedHostPath, len(*in)) copy(*out, *in) } + if in.AllowedFlexVolumes != nil { + in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes + *out = make([]AllowedFlexVolume, len(*in)) + copy(*out, *in) + } return } @@ -1822,135 +1601,3 @@ func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrat in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResource) DeepCopyInto(out *ThirdPartyResource) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]APIVersion, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResource. -func (in *ThirdPartyResource) DeepCopy() *ThirdPartyResource { - if in == nil { - return nil - } - out := new(ThirdPartyResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResource) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResourceData) DeepCopyInto(out *ThirdPartyResourceData) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceData. -func (in *ThirdPartyResourceData) DeepCopy() *ThirdPartyResourceData { - if in == nil { - return nil - } - out := new(ThirdPartyResourceData) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResourceData) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResourceDataList) DeepCopyInto(out *ThirdPartyResourceDataList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ThirdPartyResourceData, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceDataList. -func (in *ThirdPartyResourceDataList) DeepCopy() *ThirdPartyResourceDataList { - if in == nil { - return nil - } - out := new(ThirdPartyResourceDataList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResourceDataList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResourceList) DeepCopyInto(out *ThirdPartyResourceList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ThirdPartyResource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceList. -func (in *ThirdPartyResourceList) DeepCopy() *ThirdPartyResourceList { - if in == nil { - return nil - } - out := new(ThirdPartyResourceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResourceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/BUILD b/vendor/k8s.io/api/imagepolicy/v1alpha1/BUILD index 745ff2609dae..455205f7adfc 100644 --- a/vendor/k8s.io/api/imagepolicy/v1alpha1/BUILD +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/BUILD @@ -15,11 +15,11 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/imagepolicy/v1alpha1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go index 1d6c4f2e22d1..b0c6b3713a32 100644 --- a/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=imagepolicy.k8s.io diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go index 3e762bb4f94a..477571bbb270 100644 --- a/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &ImageReview{}, diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go index 95b204e0eb91..21fcd39e418a 100644 --- a/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go @@ -21,40 +21,9 @@ limitations under the License. package v1alpha1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ImageReview).DeepCopyInto(out.(*ImageReview)) - return nil - }, InType: reflect.TypeOf(&ImageReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ImageReviewContainerSpec).DeepCopyInto(out.(*ImageReviewContainerSpec)) - return nil - }, InType: reflect.TypeOf(&ImageReviewContainerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ImageReviewSpec).DeepCopyInto(out.(*ImageReviewSpec)) - return nil - }, InType: reflect.TypeOf(&ImageReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ImageReviewStatus).DeepCopyInto(out.(*ImageReviewStatus)) - return nil - }, InType: reflect.TypeOf(&ImageReviewStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageReview) DeepCopyInto(out *ImageReview) { *out = *in diff --git a/vendor/k8s.io/api/networking/v1/BUILD b/vendor/k8s.io/api/networking/v1/BUILD index 5ca0fae6817a..beb3afcaf713 100644 --- a/vendor/k8s.io/api/networking/v1/BUILD +++ b/vendor/k8s.io/api/networking/v1/BUILD @@ -15,11 +15,11 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/networking/v1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", diff --git a/vendor/k8s.io/api/networking/v1/doc.go b/vendor/k8s.io/api/networking/v1/doc.go index b4076cbbf1d5..9111e722dc39 100644 --- a/vendor/k8s.io/api/networking/v1/doc.go +++ b/vendor/k8s.io/api/networking/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=networking.k8s.io package v1 diff --git a/vendor/k8s.io/api/networking/v1/register.go b/vendor/k8s.io/api/networking/v1/register.go index 2ba9951d8ad9..f47f22e9e883 100644 --- a/vendor/k8s.io/api/networking/v1/register.go +++ b/vendor/k8s.io/api/networking/v1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &NetworkPolicy{}, diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go index 0e6709667de4..955e74344efd 100644 --- a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go @@ -23,57 +23,10 @@ package v1 import ( core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IPBlock).DeepCopyInto(out.(*IPBlock)) - return nil - }, InType: reflect.TypeOf(&IPBlock{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicy).DeepCopyInto(out.(*NetworkPolicy)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyEgressRule).DeepCopyInto(out.(*NetworkPolicyEgressRule)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyEgressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyIngressRule).DeepCopyInto(out.(*NetworkPolicyIngressRule)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyIngressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyList).DeepCopyInto(out.(*NetworkPolicyList)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyPeer).DeepCopyInto(out.(*NetworkPolicyPeer)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyPeer{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyPort).DeepCopyInto(out.(*NetworkPolicyPort)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicySpec).DeepCopyInto(out.(*NetworkPolicySpec)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicySpec{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPBlock) DeepCopyInto(out *IPBlock) { *out = *in diff --git a/vendor/k8s.io/api/policy/v1beta1/BUILD b/vendor/k8s.io/api/policy/v1beta1/BUILD index 66faa776eb95..6e4a07deb67c 100644 --- a/vendor/k8s.io/api/policy/v1beta1/BUILD +++ b/vendor/k8s.io/api/policy/v1beta1/BUILD @@ -15,11 +15,11 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/policy/v1beta1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", diff --git a/vendor/k8s.io/api/policy/v1beta1/doc.go b/vendor/k8s.io/api/policy/v1beta1/doc.go index 3492435add62..972192786dd0 100644 --- a/vendor/k8s.io/api/policy/v1beta1/doc.go +++ b/vendor/k8s.io/api/policy/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // Package policy is for any kind of policy object. Suitable examples, even if // they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, diff --git a/vendor/k8s.io/api/policy/v1beta1/register.go b/vendor/k8s.io/api/policy/v1beta1/register.go index e0c6247a7904..d77f13040706 100644 --- a/vendor/k8s.io/api/policy/v1beta1/register.go +++ b/vendor/k8s.io/api/policy/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &PodDisruptionBudget{}, diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go index 93c201e2dd52..70872f09817e 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -22,45 +22,10 @@ package v1beta1 import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Eviction).DeepCopyInto(out.(*Eviction)) - return nil - }, InType: reflect.TypeOf(&Eviction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodDisruptionBudget).DeepCopyInto(out.(*PodDisruptionBudget)) - return nil - }, InType: reflect.TypeOf(&PodDisruptionBudget{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodDisruptionBudgetList).DeepCopyInto(out.(*PodDisruptionBudgetList)) - return nil - }, InType: reflect.TypeOf(&PodDisruptionBudgetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodDisruptionBudgetSpec).DeepCopyInto(out.(*PodDisruptionBudgetSpec)) - return nil - }, InType: reflect.TypeOf(&PodDisruptionBudgetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodDisruptionBudgetStatus).DeepCopyInto(out.(*PodDisruptionBudgetStatus)) - return nil - }, InType: reflect.TypeOf(&PodDisruptionBudgetStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Eviction) DeepCopyInto(out *Eviction) { *out = *in diff --git a/vendor/k8s.io/api/rbac/v1/BUILD b/vendor/k8s.io/api/rbac/v1/BUILD index 6a38064dd240..539afb7ae85a 100644 --- a/vendor/k8s.io/api/rbac/v1/BUILD +++ b/vendor/k8s.io/api/rbac/v1/BUILD @@ -15,10 +15,10 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/rbac/v1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/api/rbac/v1/doc.go b/vendor/k8s.io/api/rbac/v1/doc.go index 7b5c3250a3e7..eca05ac68c2e 100644 --- a/vendor/k8s.io/api/rbac/v1/doc.go +++ b/vendor/k8s.io/api/rbac/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io diff --git a/vendor/k8s.io/api/rbac/v1/generated.pb.go b/vendor/k8s.io/api/rbac/v1/generated.pb.go index dba0ec95bef3..1530d379c086 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1/generated.pb.go @@ -25,6 +25,7 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto It has these top-level messages: + AggregationRule ClusterRole ClusterRoleBinding ClusterRoleBindingList @@ -43,6 +44,8 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + import strings "strings" import reflect "reflect" @@ -59,51 +62,56 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + func (m *ClusterRole) Reset() { *m = ClusterRole{} } func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *PolicyRule) Reset() { *m = PolicyRule{} } func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *Role) Reset() { *m = Role{} } func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *RoleList) Reset() { *m = RoleList{} } func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *RoleRef) Reset() { *m = RoleRef{} } func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func init() { + proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1.AggregationRule") proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1.ClusterRole") proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1.ClusterRoleBinding") proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1.ClusterRoleBindingList") @@ -116,6 +124,36 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1.Subject") } +func (m *AggregationRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, msg := range m.ClusterRoleSelectors { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -151,6 +189,16 @@ func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.AggregationRule != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) + n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } return i, nil } @@ -172,11 +220,11 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n2, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n3 if len(m.Subjects) > 0 { for _, msg := range m.Subjects { dAtA[i] = 0x12 @@ -192,11 +240,11 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n3, err := m.RoleRef.MarshalTo(dAtA[i:]) + n4, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n4 return i, nil } @@ -218,11 +266,11 @@ func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n5 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -256,11 +304,11 @@ func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n6 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -387,11 +435,11 @@ func (m *Role) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n6, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n7 if len(m.Rules) > 0 { for _, msg := range m.Rules { dAtA[i] = 0x12 @@ -425,11 +473,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 if len(m.Subjects) > 0 { for _, msg := range m.Subjects { dAtA[i] = 0x12 @@ -445,11 +493,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n8, err := m.RoleRef.MarshalTo(dAtA[i:]) + n9, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n9 return i, nil } @@ -471,11 +519,11 @@ func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + n10, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -509,11 +557,11 @@ func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -620,6 +668,18 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } +func (m *AggregationRule) Size() (n int) { + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, e := range m.ClusterRoleSelectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ClusterRole) Size() (n int) { var l int _ = l @@ -631,6 +691,10 @@ func (m *ClusterRole) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -811,6 +875,16 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *AggregationRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AggregationRule{`, + `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *ClusterRole) String() string { if this == nil { return "nil" @@ -818,6 +892,7 @@ func (this *ClusterRole) String() string { s := strings.Join([]string{`&ClusterRole{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -948,6 +1023,87 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *AggregationRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ClusterRole) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1038,6 +1194,39 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AggregationRule == nil { + m.AggregationRule = &AggregationRule{} + } + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2504,52 +2693,57 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 751 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x94, 0xcd, 0x6e, 0xd3, 0x4a, - 0x14, 0xc7, 0xe3, 0x7c, 0x28, 0xf1, 0xe4, 0x46, 0xb9, 0xf5, 0x95, 0xee, 0xb5, 0x2a, 0x5d, 0x27, - 0x0a, 0x2c, 0x2a, 0x95, 0xda, 0xb4, 0x20, 0x60, 0x83, 0x04, 0x66, 0x01, 0x55, 0x4b, 0xa8, 0x06, - 0xc1, 0x02, 0xb1, 0x60, 0xe2, 0x4c, 0xd3, 0x21, 0xf1, 0x87, 0x66, 0xc6, 0x91, 0x2a, 0x36, 0x3c, - 0x00, 0x0b, 0x24, 0x5e, 0x83, 0x15, 0x3b, 0x78, 0x82, 0x2c, 0xbb, 0xec, 0x2a, 0xa2, 0xe6, 0x41, - 0x40, 0x33, 0xb6, 0xe3, 0xa4, 0x69, 0xda, 0xac, 0x22, 0x21, 0xb1, 0x4a, 0xe6, 0x9c, 0xdf, 0xf9, - 0x9f, 0x0f, 0xcf, 0x1c, 0xf0, 0xa0, 0x7f, 0x8f, 0x99, 0xc4, 0xb7, 0xfa, 0x61, 0x07, 0x53, 0x0f, - 0x73, 0xcc, 0xac, 0x21, 0xf6, 0xba, 0x3e, 0xb5, 0x12, 0x07, 0x0a, 0x88, 0x45, 0x3b, 0xc8, 0xb1, - 0x86, 0xdb, 0x1d, 0xcc, 0xd1, 0xb6, 0xd5, 0xc3, 0x1e, 0xa6, 0x88, 0xe3, 0xae, 0x19, 0x50, 0x9f, - 0xfb, 0xda, 0x7f, 0x31, 0x68, 0xa2, 0x80, 0x98, 0x02, 0x34, 0x13, 0x70, 0x7d, 0xab, 0x47, 0xf8, - 0x51, 0xd8, 0x31, 0x1d, 0xdf, 0xb5, 0x7a, 0x7e, 0xcf, 0xb7, 0x24, 0xdf, 0x09, 0x0f, 0xe5, 0x49, - 0x1e, 0xe4, 0xbf, 0x58, 0x67, 0xfd, 0x76, 0x96, 0xd0, 0x45, 0xce, 0x11, 0xf1, 0x30, 0x3d, 0xb6, - 0x82, 0x7e, 0x4f, 0x18, 0x98, 0xe5, 0x62, 0x8e, 0xac, 0xe1, 0x5c, 0xf6, 0x75, 0x6b, 0x51, 0x14, - 0x0d, 0x3d, 0x4e, 0x5c, 0x3c, 0x17, 0x70, 0xe7, 0xaa, 0x00, 0xe6, 0x1c, 0x61, 0x17, 0xcd, 0xc5, - 0xdd, 0x5a, 0x14, 0x17, 0x72, 0x32, 0xb0, 0x88, 0xc7, 0x19, 0xa7, 0xe7, 0x83, 0x5a, 0x5f, 0x15, - 0x50, 0x7d, 0x34, 0x08, 0x19, 0xc7, 0x14, 0xfa, 0x03, 0xac, 0xbd, 0x01, 0x15, 0xd1, 0x48, 0x17, - 0x71, 0xa4, 0x2b, 0x4d, 0x65, 0xa3, 0xba, 0x73, 0xd3, 0xcc, 0xc6, 0x37, 0xd1, 0x35, 0x83, 0x7e, - 0x4f, 0x18, 0x98, 0x29, 0x68, 0x73, 0xb8, 0x6d, 0x3e, 0xeb, 0xbc, 0xc5, 0x0e, 0x7f, 0x8a, 0x39, - 0xb2, 0xb5, 0xd1, 0xb8, 0x91, 0x8b, 0xc6, 0x0d, 0x90, 0xd9, 0xe0, 0x44, 0x55, 0x7b, 0x02, 0x4a, - 0x34, 0x1c, 0x60, 0xa6, 0xe7, 0x9b, 0x85, 0x8d, 0xea, 0xce, 0x35, 0x73, 0xc1, 0xd7, 0x31, 0x0f, - 0xfc, 0x01, 0x71, 0x8e, 0x61, 0x38, 0xc0, 0x76, 0x2d, 0x51, 0x2c, 0x89, 0x13, 0x83, 0xb1, 0x40, - 0xeb, 0x53, 0x1e, 0x68, 0x53, 0xb5, 0xdb, 0xc4, 0xeb, 0x12, 0xaf, 0xb7, 0x82, 0x16, 0xda, 0xa0, - 0xc2, 0x42, 0xe9, 0x48, 0xbb, 0x68, 0x2e, 0xec, 0xe2, 0x79, 0x0c, 0xda, 0x7f, 0x27, 0x8a, 0x95, - 0xc4, 0xc0, 0xe0, 0x44, 0x43, 0xdb, 0x03, 0x65, 0xea, 0x0f, 0x30, 0xc4, 0x87, 0x7a, 0x41, 0x16, - 0xbc, 0x58, 0x0e, 0xc6, 0x9c, 0x5d, 0x4f, 0xe4, 0xca, 0x89, 0x01, 0xa6, 0x0a, 0xad, 0x91, 0x02, - 0xfe, 0x9d, 0x9f, 0xca, 0x3e, 0x61, 0x5c, 0x7b, 0x3d, 0x37, 0x19, 0x73, 0xb9, 0xc9, 0x88, 0x68, - 0x39, 0x97, 0x49, 0x17, 0xa9, 0x65, 0x6a, 0x2a, 0x07, 0xa0, 0x44, 0x38, 0x76, 0xd3, 0x91, 0x6c, - 0x2e, 0xec, 0x61, 0xbe, 0xba, 0xec, 0x03, 0xef, 0x0a, 0x05, 0x18, 0x0b, 0xb5, 0xbe, 0x29, 0xa0, - 0x3e, 0x05, 0xaf, 0xa0, 0x87, 0xdd, 0xd9, 0x1e, 0xae, 0x2f, 0xd5, 0xc3, 0xc5, 0xc5, 0xff, 0x54, - 0x00, 0xc8, 0xae, 0xb0, 0xd6, 0x00, 0xa5, 0x21, 0xa6, 0x1d, 0xa6, 0x2b, 0xcd, 0xc2, 0x86, 0x6a, - 0xab, 0x82, 0x7f, 0x29, 0x0c, 0x30, 0xb6, 0x6b, 0x9b, 0x40, 0x45, 0x01, 0x79, 0x4c, 0xfd, 0x30, - 0x88, 0xd3, 0xab, 0x76, 0x2d, 0x1a, 0x37, 0xd4, 0x87, 0x07, 0xbb, 0xb1, 0x11, 0x66, 0x7e, 0x01, - 0x53, 0xcc, 0xfc, 0x90, 0x3a, 0x98, 0xe9, 0x85, 0x0c, 0x86, 0xa9, 0x11, 0x66, 0x7e, 0xed, 0x2e, - 0xa8, 0xa5, 0x87, 0x36, 0x72, 0x31, 0xd3, 0x8b, 0x32, 0x60, 0x2d, 0x1a, 0x37, 0x6a, 0x70, 0xda, - 0x01, 0x67, 0x39, 0xed, 0x3e, 0xa8, 0x7b, 0xbe, 0x97, 0x22, 0x2f, 0xe0, 0x3e, 0xd3, 0x4b, 0x32, - 0xf4, 0x9f, 0x68, 0xdc, 0xa8, 0xb7, 0x67, 0x5d, 0xf0, 0x3c, 0xdb, 0xfa, 0xa2, 0x80, 0xe2, 0x6f, - 0xb7, 0x54, 0x3e, 0xe4, 0x41, 0xf5, 0xcf, 0x36, 0x99, 0x6c, 0x13, 0xf1, 0x04, 0x57, 0xbb, 0x46, - 0x96, 0x7e, 0x82, 0x57, 0xef, 0x8f, 0xcf, 0x0a, 0xa8, 0xac, 0x68, 0x71, 0xd8, 0xb3, 0x55, 0xff, - 0x7f, 0x79, 0xd5, 0x17, 0x97, 0xfb, 0x0e, 0xa4, 0xf3, 0xd7, 0x6e, 0x80, 0x4a, 0xfa, 0xd8, 0x65, - 0xb1, 0x6a, 0x96, 0x3c, 0xdd, 0x07, 0x70, 0x42, 0x68, 0x4d, 0x50, 0xec, 0x13, 0xaf, 0xab, 0xe7, - 0x25, 0xf9, 0x57, 0x42, 0x16, 0xf7, 0x88, 0xd7, 0x85, 0xd2, 0x23, 0x08, 0x0f, 0xb9, 0x58, 0x5e, - 0x88, 0x29, 0x42, 0x3c, 0x73, 0x28, 0x3d, 0x62, 0x56, 0xe5, 0xe4, 0x32, 0x4d, 0xf4, 0x94, 0x85, - 0x7a, 0xd3, 0xf5, 0xe5, 0x97, 0xa9, 0xef, 0xf2, 0xec, 0x9a, 0x05, 0x54, 0xf1, 0xcb, 0x02, 0xe4, - 0x60, 0xbd, 0x28, 0xb1, 0xb5, 0x04, 0x53, 0xdb, 0xa9, 0x03, 0x66, 0x8c, 0xbd, 0x35, 0x3a, 0x33, - 0x72, 0x27, 0x67, 0x46, 0xee, 0xf4, 0xcc, 0xc8, 0xbd, 0x8f, 0x0c, 0x65, 0x14, 0x19, 0xca, 0x49, - 0x64, 0x28, 0xa7, 0x91, 0xa1, 0x7c, 0x8f, 0x0c, 0xe5, 0xe3, 0x0f, 0x23, 0xf7, 0xaa, 0x9c, 0x4c, - 0xfd, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x74, 0x24, 0x6a, 0xfa, 0x45, 0x0a, 0x00, 0x00, + // 827 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x55, 0xcf, 0x8b, 0x23, 0x45, + 0x18, 0x4d, 0x65, 0x12, 0x26, 0x5d, 0x31, 0xc4, 0x2d, 0x17, 0x69, 0xa2, 0x74, 0x86, 0x16, 0x24, + 0xa0, 0x76, 0x9b, 0x5d, 0x51, 0x41, 0xf6, 0xb0, 0xbd, 0xa2, 0x0c, 0x3b, 0x8e, 0x4b, 0x2d, 0x7a, + 0x10, 0x0f, 0x56, 0x77, 0x6a, 0x3b, 0x65, 0xfa, 0x17, 0x55, 0xd5, 0x81, 0xc5, 0x8b, 0x08, 0x1e, + 0xbc, 0x79, 0xd4, 0xbf, 0xc0, 0x8b, 0x1e, 0xfd, 0x0b, 0xbc, 0xcc, 0x71, 0x8f, 0x7b, 0x0a, 0x4e, + 0xfb, 0x87, 0x28, 0xfd, 0x2b, 0x9d, 0xa4, 0x3b, 0x4e, 0x4e, 0x01, 0xf1, 0x34, 0x53, 0xdf, 0xf7, + 0xde, 0xfb, 0x5e, 0xbf, 0xa9, 0xaf, 0x06, 0x7e, 0xb0, 0x78, 0x5f, 0x18, 0x2c, 0x34, 0x17, 0xb1, + 0x4d, 0x79, 0x40, 0x25, 0x15, 0xe6, 0x92, 0x06, 0xb3, 0x90, 0x9b, 0x45, 0x83, 0x44, 0xcc, 0xe4, + 0x36, 0x71, 0xcc, 0xe5, 0xd4, 0x74, 0x69, 0x40, 0x39, 0x91, 0x74, 0x66, 0x44, 0x3c, 0x94, 0x21, + 0x42, 0x39, 0xc6, 0x20, 0x11, 0x33, 0x52, 0x8c, 0xb1, 0x9c, 0x8e, 0xde, 0x72, 0x99, 0x9c, 0xc7, + 0xb6, 0xe1, 0x84, 0xbe, 0xe9, 0x86, 0x6e, 0x68, 0x66, 0x50, 0x3b, 0x7e, 0x92, 0x9d, 0xb2, 0x43, + 0xf6, 0x5b, 0x2e, 0x31, 0x9a, 0xd4, 0xc7, 0x10, 0x2f, 0x9a, 0x93, 0xda, 0xb0, 0xd1, 0x3b, 0x15, + 0xd2, 0x27, 0xce, 0x9c, 0x05, 0x94, 0x3f, 0x35, 0xa3, 0x85, 0x9b, 0x16, 0x84, 0xe9, 0x53, 0x49, + 0x1a, 0x2c, 0x8e, 0xcc, 0x7d, 0x2c, 0x1e, 0x07, 0x92, 0xf9, 0xb4, 0x46, 0x78, 0xf7, 0x26, 0x82, + 0x70, 0xe6, 0xd4, 0x27, 0x35, 0xde, 0xdd, 0x7d, 0xbc, 0x58, 0x32, 0xcf, 0x64, 0x81, 0x14, 0x92, + 0xef, 0x92, 0xf4, 0x9f, 0x01, 0x1c, 0xde, 0x77, 0x5d, 0x4e, 0x5d, 0x22, 0x59, 0x18, 0xe0, 0xd8, + 0xa3, 0xe8, 0x7b, 0x00, 0x6f, 0x3b, 0x5e, 0x2c, 0x24, 0xe5, 0x38, 0xf4, 0xe8, 0x63, 0xea, 0x51, + 0x47, 0x86, 0x5c, 0xa8, 0xe0, 0xec, 0x64, 0xd2, 0xbf, 0x73, 0xd7, 0xa8, 0x42, 0x5f, 0x0f, 0x32, + 0xa2, 0x85, 0x9b, 0x16, 0x84, 0x91, 0xe6, 0x60, 0x2c, 0xa7, 0xc6, 0x05, 0xb1, 0xa9, 0x57, 0x72, + 0xad, 0x57, 0xaf, 0x56, 0xe3, 0x56, 0xb2, 0x1a, 0xdf, 0x7e, 0xd0, 0x20, 0x8c, 0x1b, 0xc7, 0xe9, + 0x3f, 0xb5, 0x61, 0x7f, 0x03, 0x8e, 0xbe, 0x82, 0xbd, 0x54, 0x7c, 0x46, 0x24, 0x51, 0xc1, 0x19, + 0x98, 0xf4, 0xef, 0xbc, 0x7d, 0x98, 0x95, 0x4f, 0xed, 0xaf, 0xa9, 0x23, 0x3f, 0xa1, 0x92, 0x58, + 0xa8, 0xf0, 0x01, 0xab, 0x1a, 0x5e, 0xab, 0xa2, 0x07, 0xb0, 0xcb, 0x63, 0x8f, 0x0a, 0xb5, 0x9d, + 0x7d, 0xa9, 0x66, 0xd4, 0xaf, 0x97, 0xf1, 0x28, 0xf4, 0x98, 0xf3, 0x34, 0x0d, 0xca, 0x1a, 0x14, + 0x62, 0xdd, 0xf4, 0x24, 0x70, 0xce, 0x45, 0x36, 0x1c, 0x92, 0xed, 0x44, 0xd5, 0x93, 0xcc, 0xed, + 0x6b, 0x4d, 0x72, 0x3b, 0xe1, 0x5b, 0x2f, 0x25, 0xab, 0xf1, 0xee, 0x5f, 0x04, 0xef, 0x0a, 0xea, + 0x3f, 0xb4, 0x21, 0xda, 0x88, 0xc6, 0x62, 0xc1, 0x8c, 0x05, 0xee, 0x11, 0x12, 0x3a, 0x87, 0x3d, + 0x11, 0x67, 0x8d, 0x32, 0xa4, 0x57, 0x9a, 0xbe, 0xea, 0x71, 0x8e, 0xb1, 0x5e, 0x2c, 0xc4, 0x7a, + 0x45, 0x41, 0xe0, 0x35, 0x1d, 0x7d, 0x04, 0x4f, 0x79, 0xe8, 0x51, 0x4c, 0x9f, 0x14, 0xf9, 0x34, + 0x2a, 0xe1, 0x1c, 0x62, 0x0d, 0x0b, 0xa5, 0xd3, 0xa2, 0x80, 0x4b, 0xb2, 0xfe, 0x07, 0x80, 0x2f, + 0xd7, 0xb3, 0xb8, 0x60, 0x42, 0xa2, 0x2f, 0x6b, 0x79, 0x18, 0x07, 0x5e, 0x5e, 0x26, 0xf2, 0x34, + 0xd6, 0x1f, 0x50, 0x56, 0x36, 0xb2, 0x78, 0x08, 0xbb, 0x4c, 0x52, 0xbf, 0x0c, 0xe2, 0xf5, 0x26, + 0xfb, 0x75, 0x63, 0xd5, 0xad, 0x39, 0x4f, 0xc9, 0x38, 0xd7, 0xd0, 0x7f, 0x07, 0x70, 0xb8, 0x01, + 0x3e, 0x82, 0xfd, 0x0f, 0xb7, 0xed, 0x8f, 0x6f, 0xb2, 0xdf, 0xec, 0xfb, 0x6f, 0x00, 0x61, 0xb5, + 0x12, 0x68, 0x0c, 0xbb, 0x4b, 0xca, 0xed, 0xfc, 0xad, 0x50, 0x2c, 0x25, 0xc5, 0x7f, 0x9e, 0x16, + 0x70, 0x5e, 0x47, 0x6f, 0x40, 0x85, 0x44, 0xec, 0x63, 0x1e, 0xc6, 0x51, 0x3e, 0x59, 0xb1, 0x06, + 0xc9, 0x6a, 0xac, 0xdc, 0x7f, 0x74, 0x9e, 0x17, 0x71, 0xd5, 0x4f, 0xc1, 0x9c, 0x8a, 0x30, 0xe6, + 0x0e, 0x15, 0xea, 0x49, 0x05, 0xc6, 0x65, 0x11, 0x57, 0x7d, 0xf4, 0x1e, 0x1c, 0x94, 0x87, 0x4b, + 0xe2, 0x53, 0xa1, 0x76, 0x32, 0xc2, 0xad, 0x64, 0x35, 0x1e, 0xe0, 0xcd, 0x06, 0xde, 0xc6, 0xa1, + 0x7b, 0x70, 0x18, 0x84, 0x41, 0x09, 0xf9, 0x0c, 0x5f, 0x08, 0xb5, 0x9b, 0x51, 0xb3, 0x5d, 0xbc, + 0xdc, 0x6e, 0xe1, 0x5d, 0xac, 0xfe, 0x1b, 0x80, 0x9d, 0xff, 0xd0, 0xfb, 0xa4, 0x7f, 0xd7, 0x86, + 0xfd, 0xff, 0xfd, 0xa3, 0x91, 0xae, 0xdb, 0x71, 0x5f, 0x8b, 0x43, 0xd6, 0xed, 0xe6, 0x67, 0xe2, + 0x17, 0x00, 0x7b, 0x47, 0x7a, 0x1f, 0xee, 0x6d, 0x1b, 0x56, 0xf7, 0x1a, 0x6e, 0x76, 0xfa, 0x0d, + 0x2c, 0x53, 0x47, 0x6f, 0xc2, 0x5e, 0xb9, 0xd3, 0x99, 0x4f, 0xa5, 0x9a, 0x5b, 0xae, 0x3d, 0x5e, + 0x23, 0xd0, 0x19, 0xec, 0x2c, 0x58, 0x30, 0x53, 0xdb, 0x19, 0xf2, 0x85, 0x02, 0xd9, 0x79, 0xc8, + 0x82, 0x19, 0xce, 0x3a, 0x29, 0x22, 0x20, 0x7e, 0xfe, 0x6f, 0x75, 0x03, 0x91, 0x6e, 0x33, 0xce, + 0x3a, 0xfa, 0xaf, 0x00, 0x9e, 0x16, 0xb7, 0x67, 0xad, 0x07, 0xf6, 0xea, 0x6d, 0xfa, 0x6b, 0x1f, + 0xe2, 0xef, 0xdf, 0xa7, 0x23, 0x13, 0x2a, 0xe9, 0x4f, 0x11, 0x11, 0x87, 0xaa, 0x9d, 0x0c, 0x76, + 0xab, 0x80, 0x29, 0x97, 0x65, 0x03, 0x57, 0x18, 0x6b, 0x72, 0x75, 0xad, 0xb5, 0x9e, 0x5d, 0x6b, + 0xad, 0xe7, 0xd7, 0x5a, 0xeb, 0xdb, 0x44, 0x03, 0x57, 0x89, 0x06, 0x9e, 0x25, 0x1a, 0x78, 0x9e, + 0x68, 0xe0, 0xcf, 0x44, 0x03, 0x3f, 0xfe, 0xa5, 0xb5, 0xbe, 0x68, 0x2f, 0xa7, 0xff, 0x04, 0x00, + 0x00, 0xff, 0xff, 0x32, 0xe3, 0x23, 0xf8, 0x2e, 0x0b, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto index 29aa3d5eebd5..6edb2779a64a 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1/generated.proto @@ -21,6 +21,7 @@ syntax = 'proto2'; package k8s.io.api.rbac.v1; +import "k8s.io/api/rbac/v1alpha1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -29,6 +30,14 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +message AggregationRule { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; +} + // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. message ClusterRole { // Standard object's metadata. @@ -37,6 +46,12 @@ message ClusterRole { // Rules holds all the PolicyRules for this ClusterRole repeated PolicyRule rules = 2; + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + optional AggregationRule aggregationRule = 3; } // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, diff --git a/vendor/k8s.io/api/rbac/v1/register.go b/vendor/k8s.io/api/rbac/v1/register.go index 7336b5455e65..8f1fd460a211 100644 --- a/vendor/k8s.io/api/rbac/v1/register.go +++ b/vendor/k8s.io/api/rbac/v1/register.go @@ -40,7 +40,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Role{}, diff --git a/vendor/k8s.io/api/rbac/v1/types.go b/vendor/k8s.io/api/rbac/v1/types.go index 8dbd1a8b89aa..91990548bc4f 100644 --- a/vendor/k8s.io/api/rbac/v1/types.go +++ b/vendor/k8s.io/api/rbac/v1/types.go @@ -170,6 +170,20 @@ type ClusterRole struct { // Rules holds all the PolicyRules for this ClusterRole Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` +} + +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +type AggregationRule struct { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` } // +genclient diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go index 7770d4085b16..280ae5a82f10 100644 --- a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go @@ -27,10 +27,20 @@ package v1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE +var map_AggregationRule = map[string]string{ + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", +} + +func (AggregationRule) SwaggerDoc() map[string]string { + return map_AggregationRule +} + var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", - "metadata": "Standard object's metadata.", - "rules": "Rules holds all the PolicyRules for this ClusterRole", + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", } func (ClusterRole) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go index 7ffc81869d2c..e1aab58145d7 100644 --- a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go @@ -21,66 +21,31 @@ limitations under the License. package v1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRole).DeepCopyInto(out.(*ClusterRole)) - return nil - }, InType: reflect.TypeOf(&ClusterRole{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBinding).DeepCopyInto(out.(*ClusterRoleBinding)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBindingList).DeepCopyInto(out.(*ClusterRoleBindingList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleList).DeepCopyInto(out.(*ClusterRoleList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PolicyRule).DeepCopyInto(out.(*PolicyRule)) - return nil - }, InType: reflect.TypeOf(&PolicyRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Role).DeepCopyInto(out.(*Role)) - return nil - }, InType: reflect.TypeOf(&Role{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBinding).DeepCopyInto(out.(*RoleBinding)) - return nil - }, InType: reflect.TypeOf(&RoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBindingList).DeepCopyInto(out.(*RoleBindingList)) - return nil - }, InType: reflect.TypeOf(&RoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleList).DeepCopyInto(out.(*RoleList)) - return nil - }, InType: reflect.TypeOf(&RoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleRef).DeepCopyInto(out.(*RoleRef)) - return nil - }, InType: reflect.TypeOf(&RoleRef{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Subject).DeepCopyInto(out.(*Subject)) - return nil - }, InType: reflect.TypeOf(&Subject{})}, - ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { + *out = *in + if in.ClusterRoleSelectors != nil { + in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors + *out = make([]meta_v1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. +func (in *AggregationRule) DeepCopy() *AggregationRule { + if in == nil { + return nil + } + out := new(AggregationRule) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -95,6 +60,15 @@ func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.AggregationRule != nil { + in, out := &in.AggregationRule, &out.AggregationRule + if *in == nil { + *out = nil + } else { + *out = new(AggregationRule) + (*in).DeepCopyInto(*out) + } + } return } diff --git a/vendor/k8s.io/api/rbac/v1alpha1/BUILD b/vendor/k8s.io/api/rbac/v1alpha1/BUILD index 6a38064dd240..68d7cdfbb1d9 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/BUILD +++ b/vendor/k8s.io/api/rbac/v1alpha1/BUILD @@ -15,10 +15,10 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/rbac/v1alpha1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/api/rbac/v1alpha1/doc.go b/vendor/k8s.io/api/rbac/v1alpha1/doc.go index 3c3c703755c8..d8b4bdef8a7e 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/doc.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go index 31e68aeedaae..c66cadd95b2e 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go @@ -25,6 +25,7 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto It has these top-level messages: + AggregationRule ClusterRole ClusterRoleBinding ClusterRoleBindingList @@ -43,6 +44,8 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + import strings "strings" import reflect "reflect" @@ -59,51 +62,56 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + func (m *ClusterRole) Reset() { *m = ClusterRole{} } func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *PolicyRule) Reset() { *m = PolicyRule{} } func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *Role) Reset() { *m = Role{} } func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *RoleList) Reset() { *m = RoleList{} } func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *RoleRef) Reset() { *m = RoleRef{} } func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func init() { + proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1alpha1.AggregationRule") proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRole") proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleBinding") proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleBindingList") @@ -116,6 +124,36 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1alpha1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1alpha1.Subject") } +func (m *AggregationRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, msg := range m.ClusterRoleSelectors { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -151,6 +189,16 @@ func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.AggregationRule != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) + n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } return i, nil } @@ -172,11 +220,11 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n2, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n3 if len(m.Subjects) > 0 { for _, msg := range m.Subjects { dAtA[i] = 0x12 @@ -192,11 +240,11 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n3, err := m.RoleRef.MarshalTo(dAtA[i:]) + n4, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n4 return i, nil } @@ -218,11 +266,11 @@ func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n5 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -256,11 +304,11 @@ func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n6 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -387,11 +435,11 @@ func (m *Role) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n6, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n7 if len(m.Rules) > 0 { for _, msg := range m.Rules { dAtA[i] = 0x12 @@ -425,11 +473,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 if len(m.Subjects) > 0 { for _, msg := range m.Subjects { dAtA[i] = 0x12 @@ -445,11 +493,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n8, err := m.RoleRef.MarshalTo(dAtA[i:]) + n9, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n9 return i, nil } @@ -471,11 +519,11 @@ func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + n10, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -509,11 +557,11 @@ func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -620,6 +668,18 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } +func (m *AggregationRule) Size() (n int) { + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, e := range m.ClusterRoleSelectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ClusterRole) Size() (n int) { var l int _ = l @@ -631,6 +691,10 @@ func (m *ClusterRole) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -811,6 +875,16 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *AggregationRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AggregationRule{`, + `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *ClusterRole) String() string { if this == nil { return "nil" @@ -818,6 +892,7 @@ func (this *ClusterRole) String() string { s := strings.Join([]string{`&ClusterRole{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -948,6 +1023,87 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *AggregationRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ClusterRole) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1038,6 +1194,39 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AggregationRule == nil { + m.AggregationRule = &AggregationRule{} + } + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2504,53 +2693,58 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 766 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x94, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xc7, 0xb3, 0xf9, 0xa0, 0xc9, 0x86, 0x28, 0xd4, 0x48, 0xc8, 0xea, 0xc1, 0x09, 0x11, 0x48, - 0x95, 0x28, 0x36, 0x2d, 0x08, 0xb8, 0x70, 0x68, 0x7a, 0x40, 0x81, 0xd2, 0x96, 0x45, 0xf4, 0x80, - 0x38, 0xb0, 0x71, 0xb6, 0xc9, 0x12, 0x7f, 0x69, 0xd7, 0x8e, 0x54, 0x71, 0xe1, 0x09, 0x10, 0x17, - 0x1e, 0x83, 0x0b, 0xdc, 0xe0, 0x05, 0xca, 0xad, 0xc7, 0x9e, 0x22, 0x6a, 0x1e, 0x04, 0xb4, 0x6b, - 0x3b, 0x4e, 0x9a, 0x86, 0xf4, 0x14, 0x09, 0x89, 0x93, 0xbd, 0x33, 0xbf, 0xf9, 0xef, 0xcc, 0xec, - 0xee, 0xc0, 0xcd, 0xfe, 0x43, 0xae, 0x53, 0xd7, 0xe8, 0x07, 0x6d, 0xc2, 0x1c, 0xe2, 0x13, 0x6e, - 0x0c, 0x88, 0xd3, 0x71, 0x99, 0x11, 0x3b, 0xb0, 0x47, 0x0d, 0xd6, 0xc6, 0xa6, 0x31, 0x58, 0xc7, - 0x96, 0xd7, 0xc3, 0xeb, 0x46, 0x97, 0x38, 0x84, 0x61, 0x9f, 0x74, 0x74, 0x8f, 0xb9, 0xbe, 0xab, - 0xa8, 0x11, 0xa9, 0x63, 0x8f, 0xea, 0x82, 0xd4, 0x13, 0x72, 0xe5, 0x76, 0x97, 0xfa, 0xbd, 0xa0, - 0xad, 0x9b, 0xae, 0x6d, 0x74, 0xdd, 0xae, 0x6b, 0xc8, 0x80, 0x76, 0x70, 0x20, 0x57, 0x72, 0x21, - 0xff, 0x22, 0xa1, 0x95, 0x7b, 0xe9, 0x96, 0x36, 0x36, 0x7b, 0xd4, 0x21, 0xec, 0xd0, 0xf0, 0xfa, - 0x5d, 0x61, 0xe0, 0x86, 0x4d, 0x7c, 0x6c, 0x0c, 0xa6, 0xb6, 0x5f, 0x31, 0x66, 0x45, 0xb1, 0xc0, - 0xf1, 0xa9, 0x4d, 0xa6, 0x02, 0xee, 0xcf, 0x0b, 0xe0, 0x66, 0x8f, 0xd8, 0x78, 0x2a, 0xee, 0xee, - 0xac, 0xb8, 0xc0, 0xa7, 0x96, 0x41, 0x1d, 0x9f, 0xfb, 0xec, 0x6c, 0x50, 0xe3, 0x1b, 0x80, 0xe5, - 0x2d, 0x2b, 0xe0, 0x3e, 0x61, 0xc8, 0xb5, 0x88, 0xf2, 0x06, 0x16, 0x45, 0x21, 0x1d, 0xec, 0x63, - 0x15, 0xd4, 0xc1, 0x6a, 0x79, 0xe3, 0x8e, 0x9e, 0xf6, 0x6f, 0xa4, 0xab, 0x7b, 0xfd, 0xae, 0x30, - 0x70, 0x5d, 0xd0, 0xfa, 0x60, 0x5d, 0xdf, 0x6d, 0xbf, 0x25, 0xa6, 0xff, 0x8c, 0xf8, 0xb8, 0xa9, - 0x1c, 0x0d, 0x6b, 0x99, 0x70, 0x58, 0x83, 0xa9, 0x0d, 0x8d, 0x54, 0x95, 0x16, 0x2c, 0xb0, 0xc0, - 0x22, 0x5c, 0xcd, 0xd6, 0x73, 0xab, 0xe5, 0x8d, 0x1b, 0xfa, 0xac, 0xe3, 0xd1, 0xf7, 0x5c, 0x8b, - 0x9a, 0x87, 0x28, 0xb0, 0x48, 0xb3, 0x12, 0x4b, 0x16, 0xc4, 0x8a, 0xa3, 0x48, 0xa1, 0xf1, 0x29, - 0x0b, 0x95, 0xb1, 0xe4, 0x9b, 0xd4, 0xe9, 0x50, 0xa7, 0xbb, 0x80, 0x1a, 0x76, 0x61, 0x91, 0x07, - 0xd2, 0x91, 0x94, 0x71, 0x7d, 0x76, 0x19, 0x2f, 0x22, 0xb2, 0x79, 0x25, 0x96, 0x2c, 0xc6, 0x06, - 0x8e, 0x46, 0x22, 0xca, 0x36, 0x5c, 0x62, 0xae, 0x45, 0x10, 0x39, 0x50, 0x73, 0x32, 0xe3, 0xbf, - 0xe8, 0xa1, 0x08, 0x6c, 0x56, 0x63, 0xbd, 0xa5, 0xd8, 0x80, 0x12, 0x89, 0xc6, 0x0f, 0x00, 0xaf, - 0x4d, 0xf7, 0x65, 0x9b, 0x72, 0x5f, 0x79, 0x3d, 0xd5, 0x1b, 0xfd, 0x62, 0xbd, 0x11, 0xd1, 0xb2, - 0x33, 0xa3, 0x32, 0x12, 0xcb, 0x58, 0x5f, 0x9e, 0xc3, 0x02, 0xf5, 0x89, 0x9d, 0x34, 0x65, 0x6d, - 0x76, 0x11, 0xd3, 0xe9, 0xa5, 0x67, 0xdc, 0x12, 0x12, 0x28, 0x52, 0x6a, 0x7c, 0x07, 0xb0, 0x3a, - 0x06, 0x2f, 0xa0, 0x88, 0x27, 0x93, 0x45, 0xdc, 0xbc, 0x58, 0x11, 0xe7, 0x67, 0xff, 0x1b, 0x40, - 0x98, 0x5e, 0x63, 0xa5, 0x06, 0x0b, 0x03, 0xc2, 0xda, 0x5c, 0x05, 0xf5, 0xdc, 0x6a, 0xa9, 0x59, - 0x12, 0xfc, 0xbe, 0x30, 0xa0, 0xc8, 0xae, 0xdc, 0x82, 0x25, 0xec, 0xd1, 0xc7, 0xcc, 0x0d, 0x3c, - 0xae, 0xe6, 0x24, 0x54, 0x09, 0x87, 0xb5, 0xd2, 0xe6, 0x5e, 0x2b, 0x32, 0xa2, 0xd4, 0x2f, 0x60, - 0x46, 0xb8, 0x1b, 0x30, 0x93, 0x70, 0x35, 0x9f, 0xc2, 0x28, 0x31, 0xa2, 0xd4, 0xaf, 0x3c, 0x80, - 0x95, 0x64, 0xb1, 0x83, 0x6d, 0xc2, 0xd5, 0x82, 0x0c, 0x58, 0x0e, 0x87, 0xb5, 0x0a, 0x1a, 0x77, - 0xa0, 0x49, 0x4e, 0x79, 0x04, 0xab, 0x8e, 0xeb, 0x24, 0xc8, 0x4b, 0xb4, 0xcd, 0xd5, 0x4b, 0x32, - 0xf4, 0x6a, 0x38, 0xac, 0x55, 0x77, 0x26, 0x5d, 0xe8, 0x2c, 0xdb, 0xf8, 0x0a, 0x60, 0xfe, 0xdf, - 0x9b, 0x2c, 0x1f, 0xb2, 0xb0, 0xfc, 0x7f, 0xa4, 0x8c, 0x8d, 0x14, 0xf1, 0x0c, 0x17, 0x3b, 0x4b, - 0x2e, 0xfe, 0x0c, 0xe7, 0x0f, 0x91, 0xcf, 0x00, 0x16, 0x17, 0x34, 0x3d, 0xb6, 0x26, 0xd3, 0xd6, - 0xe6, 0xa4, 0x7d, 0x7e, 0xbe, 0xef, 0x60, 0x72, 0x02, 0xca, 0x1a, 0x2c, 0x26, 0x2f, 0x5e, 0x66, - 0x5b, 0x4a, 0x77, 0x4f, 0x86, 0x02, 0x1a, 0x11, 0x4a, 0x1d, 0xe6, 0xfb, 0xd4, 0xe9, 0xa8, 0x59, - 0x49, 0x5e, 0x8e, 0xc9, 0xfc, 0x53, 0xea, 0x74, 0x90, 0xf4, 0x08, 0xc2, 0xc1, 0x36, 0x91, 0x77, - 0x62, 0x8c, 0x10, 0x6f, 0x1d, 0x49, 0x4f, 0xe3, 0x0b, 0x80, 0x4b, 0xf1, 0x7d, 0x1a, 0xe9, 0x81, - 0x99, 0x7a, 0x1b, 0x10, 0x62, 0x8f, 0xee, 0x13, 0xc6, 0xa9, 0xeb, 0xc4, 0xfb, 0x8e, 0x6e, 0xfa, - 0xe6, 0x5e, 0x2b, 0xf6, 0xa0, 0x31, 0x6a, 0x7e, 0x0e, 0x8a, 0x01, 0x4b, 0xe2, 0xcb, 0x3d, 0x6c, - 0x12, 0x35, 0x2f, 0xb1, 0xe5, 0x18, 0x2b, 0xed, 0x24, 0x0e, 0x94, 0x32, 0x4d, 0xfd, 0xe8, 0x54, - 0xcb, 0x1c, 0x9f, 0x6a, 0x99, 0x93, 0x53, 0x2d, 0xf3, 0x3e, 0xd4, 0xc0, 0x51, 0xa8, 0x81, 0xe3, - 0x50, 0x03, 0x27, 0xa1, 0x06, 0x7e, 0x86, 0x1a, 0xf8, 0xf8, 0x4b, 0xcb, 0xbc, 0x2a, 0x26, 0xcd, - 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x38, 0x05, 0x46, 0x58, 0x0a, 0x00, 0x00, + // 844 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, + 0x14, 0xce, 0x64, 0x13, 0x36, 0x99, 0x65, 0x15, 0x6e, 0x38, 0x21, 0x6b, 0x85, 0x9c, 0xc5, 0x02, + 0xe9, 0x10, 0x87, 0xcd, 0xee, 0x21, 0xa0, 0xa1, 0x58, 0x5f, 0x81, 0x16, 0x96, 0xbd, 0x65, 0x4e, + 0x5c, 0x81, 0x28, 0x98, 0x38, 0x73, 0xce, 0x10, 0xdb, 0x63, 0xcd, 0x8c, 0x23, 0x9d, 0x68, 0x68, + 0x68, 0x11, 0x0d, 0x05, 0x3d, 0x2d, 0x0d, 0x94, 0xfc, 0x03, 0x4b, 0x77, 0xe5, 0x56, 0x11, 0x6b, + 0xfe, 0x10, 0x90, 0xc7, 0x76, 0xec, 0xfc, 0x22, 0xa9, 0x22, 0x21, 0x51, 0x25, 0xf3, 0xde, 0xf7, + 0xbe, 0xf7, 0xde, 0x37, 0xf3, 0x9e, 0xe1, 0xd9, 0xf8, 0x03, 0x69, 0x33, 0xee, 0x8c, 0x93, 0x01, + 0x15, 0x11, 0x55, 0x54, 0x3a, 0x13, 0x1a, 0x0d, 0xb9, 0x70, 0x0a, 0x07, 0x89, 0x99, 0x23, 0x06, + 0xc4, 0x73, 0x26, 0x27, 0x24, 0x88, 0x47, 0xe4, 0xc4, 0xf1, 0x69, 0x44, 0x05, 0x51, 0x74, 0x68, + 0xc7, 0x82, 0x2b, 0x8e, 0x8c, 0x1c, 0x69, 0x93, 0x98, 0xd9, 0x19, 0xd2, 0x2e, 0x91, 0x47, 0x6f, + 0xfb, 0x4c, 0x8d, 0x92, 0x81, 0xed, 0xf1, 0xd0, 0xf1, 0xb9, 0xcf, 0x1d, 0x1d, 0x30, 0x48, 0x9e, + 0xea, 0x93, 0x3e, 0xe8, 0x7f, 0x39, 0xd1, 0xd1, 0xbb, 0x55, 0xca, 0x90, 0x78, 0x23, 0x16, 0x51, + 0xf1, 0xcc, 0x89, 0xc7, 0x7e, 0x66, 0x90, 0x4e, 0x48, 0x15, 0x71, 0x26, 0x4b, 0xe9, 0x8f, 0x9c, + 0x75, 0x51, 0x22, 0x89, 0x14, 0x0b, 0xe9, 0x52, 0xc0, 0x7b, 0x9b, 0x02, 0xa4, 0x37, 0xa2, 0x21, + 0x59, 0x8a, 0x7b, 0xb0, 0x2e, 0x2e, 0x51, 0x2c, 0x70, 0x58, 0xa4, 0xa4, 0x12, 0x8b, 0x41, 0xd6, + 0x4f, 0x00, 0xf6, 0xce, 0x7c, 0x5f, 0x50, 0x9f, 0x28, 0xc6, 0x23, 0x9c, 0x04, 0x14, 0x7d, 0x07, + 0xe0, 0x5d, 0x2f, 0x48, 0xa4, 0xa2, 0x02, 0xf3, 0x80, 0x3e, 0xa6, 0x01, 0xf5, 0x14, 0x17, 0xd2, + 0x00, 0xc7, 0x7b, 0xf7, 0x0e, 0x4e, 0x1f, 0xd8, 0x95, 0xa0, 0xb3, 0x44, 0x76, 0x3c, 0xf6, 0x33, + 0x83, 0xb4, 0x33, 0x1d, 0xec, 0xc9, 0x89, 0x7d, 0x41, 0x06, 0x34, 0x28, 0x63, 0xdd, 0x57, 0xaf, + 0xa7, 0xfd, 0x46, 0x3a, 0xed, 0xdf, 0x7d, 0xb8, 0x82, 0x18, 0xaf, 0x4c, 0x67, 0xfd, 0xdc, 0x84, + 0x07, 0x35, 0x38, 0xfa, 0x0a, 0x76, 0x32, 0xf2, 0x21, 0x51, 0xc4, 0x00, 0xc7, 0xe0, 0xde, 0xc1, + 0xe9, 0x3b, 0xdb, 0x95, 0xf2, 0x68, 0xf0, 0x35, 0xf5, 0xd4, 0xa7, 0x54, 0x11, 0x17, 0x15, 0x75, + 0xc0, 0xca, 0x86, 0x67, 0xac, 0xe8, 0x1c, 0xb6, 0x45, 0x12, 0x50, 0x69, 0x34, 0x75, 0xa7, 0xaf, + 0xdb, 0xeb, 0x9e, 0x8e, 0x7d, 0xc5, 0x03, 0xe6, 0x3d, 0xcb, 0xe4, 0x72, 0x0f, 0x0b, 0xca, 0x76, + 0x76, 0x92, 0x38, 0x67, 0x40, 0x23, 0xd8, 0x23, 0xf3, 0xba, 0x1a, 0x7b, 0xba, 0xe6, 0x37, 0xd7, + 0x93, 0x2e, 0x5c, 0x84, 0xfb, 0x72, 0x3a, 0xed, 0x2f, 0xde, 0x0e, 0x5e, 0xa4, 0xb5, 0x7e, 0x6c, + 0x42, 0x54, 0x93, 0xc9, 0x65, 0xd1, 0x90, 0x45, 0xfe, 0x0e, 0xd4, 0x7a, 0x04, 0x3b, 0x32, 0xd1, + 0x8e, 0x52, 0xb0, 0xd7, 0xd6, 0xf7, 0xf6, 0x38, 0x47, 0xba, 0x2f, 0x15, 0x94, 0x9d, 0xc2, 0x20, + 0xf1, 0x8c, 0x04, 0x5d, 0xc0, 0x7d, 0xc1, 0x03, 0x8a, 0xe9, 0xd3, 0x42, 0xab, 0x7f, 0xe1, 0xc3, + 0x39, 0xd0, 0xed, 0x15, 0x7c, 0xfb, 0x85, 0x01, 0x97, 0x14, 0xd6, 0x1f, 0x00, 0xbe, 0xb2, 0xac, + 0xcb, 0x05, 0x93, 0x0a, 0x7d, 0xb9, 0xa4, 0x8d, 0xbd, 0xe5, 0xa3, 0x66, 0x32, 0x57, 0x66, 0xd6, + 0x46, 0x69, 0xa9, 0xe9, 0xf2, 0x19, 0x6c, 0x33, 0x45, 0xc3, 0x52, 0x94, 0xfb, 0xeb, 0x9b, 0x58, + 0x2e, 0xaf, 0x7a, 0x4d, 0xe7, 0x19, 0x05, 0xce, 0x99, 0xac, 0xdf, 0x01, 0xec, 0xd5, 0xc0, 0x3b, + 0x68, 0xe2, 0xe3, 0xf9, 0x26, 0xde, 0xd8, 0xae, 0x89, 0xd5, 0xd5, 0xff, 0x0d, 0x20, 0xac, 0x06, + 0x06, 0xf5, 0x61, 0x7b, 0x42, 0xc5, 0x20, 0xdf, 0x27, 0x5d, 0xb7, 0x9b, 0xe1, 0x9f, 0x64, 0x06, + 0x9c, 0xdb, 0xd1, 0x5b, 0xb0, 0x4b, 0x62, 0xf6, 0x91, 0xe0, 0x49, 0x2c, 0x8d, 0x3d, 0x0d, 0x3a, + 0x4c, 0xa7, 0xfd, 0xee, 0xd9, 0xd5, 0x79, 0x6e, 0xc4, 0x95, 0x3f, 0x03, 0x0b, 0x2a, 0x79, 0x22, + 0x3c, 0x2a, 0x8d, 0x56, 0x05, 0xc6, 0xa5, 0x11, 0x57, 0x7e, 0xf4, 0x3e, 0x3c, 0x2c, 0x0f, 0x97, + 0x24, 0xa4, 0xd2, 0x68, 0xeb, 0x80, 0x3b, 0xe9, 0xb4, 0x7f, 0x88, 0xeb, 0x0e, 0x3c, 0x8f, 0x43, + 0x1f, 0xc2, 0x5e, 0xc4, 0xa3, 0x12, 0xf2, 0x39, 0xbe, 0x90, 0xc6, 0x0b, 0x3a, 0x54, 0xcf, 0xe8, + 0xe5, 0xbc, 0x0b, 0x2f, 0x62, 0xad, 0xdf, 0x00, 0x6c, 0xfd, 0xe7, 0x76, 0x98, 0xf5, 0x7d, 0x13, + 0x1e, 0xfc, 0xbf, 0x52, 0x6a, 0x2b, 0x25, 0x1b, 0xc3, 0xdd, 0xee, 0x92, 0xed, 0xc7, 0x70, 0xf3, + 0x12, 0xf9, 0x05, 0xc0, 0xce, 0x8e, 0xb6, 0xc7, 0xc3, 0xf9, 0xb2, 0xcd, 0x0d, 0x65, 0xaf, 0xae, + 0xf7, 0x1b, 0x58, 0xde, 0x00, 0xba, 0x0f, 0x3b, 0xe5, 0xc4, 0xeb, 0x6a, 0xbb, 0x55, 0xf6, 0x72, + 0x29, 0xe0, 0x19, 0x02, 0x1d, 0xc3, 0xd6, 0x98, 0x45, 0x43, 0xa3, 0xa9, 0x91, 0x2f, 0x16, 0xc8, + 0xd6, 0x27, 0x2c, 0x1a, 0x62, 0xed, 0xc9, 0x10, 0x11, 0x09, 0xf3, 0x4f, 0x72, 0x0d, 0x91, 0xcd, + 0x3a, 0xd6, 0x1e, 0xeb, 0x57, 0x00, 0xf7, 0x8b, 0xf7, 0x34, 0xe3, 0x03, 0x6b, 0xf9, 0x4e, 0x21, + 0x24, 0x31, 0x7b, 0x42, 0x85, 0x64, 0x3c, 0x2a, 0xf2, 0xce, 0x5e, 0xfa, 0xd9, 0xd5, 0x79, 0xe1, + 0xc1, 0x35, 0xd4, 0xe6, 0x1a, 0x90, 0x03, 0xbb, 0xd9, 0xaf, 0x8c, 0x89, 0x47, 0x8d, 0x96, 0x86, + 0xdd, 0x29, 0x60, 0xdd, 0xcb, 0xd2, 0x81, 0x2b, 0x8c, 0x6b, 0x5f, 0xdf, 0x9a, 0x8d, 0xe7, 0xb7, + 0x66, 0xe3, 0xe6, 0xd6, 0x6c, 0x7c, 0x9b, 0x9a, 0xe0, 0x3a, 0x35, 0xc1, 0xf3, 0xd4, 0x04, 0x37, + 0xa9, 0x09, 0xfe, 0x4c, 0x4d, 0xf0, 0xc3, 0x5f, 0x66, 0xe3, 0x8b, 0x4e, 0x29, 0xfe, 0x3f, 0x01, + 0x00, 0x00, 0xff, 0xff, 0xeb, 0xcc, 0xe2, 0x61, 0x5e, 0x0b, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto index 89f45b28e71e..28a4ae3d010e 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -29,6 +29,14 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1alpha1"; +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +message AggregationRule { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; +} + // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. message ClusterRole { // Standard object's metadata. @@ -37,6 +45,12 @@ message ClusterRole { // Rules holds all the PolicyRules for this ClusterRole repeated PolicyRule rules = 2; + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + optional AggregationRule aggregationRule = 3; } // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, diff --git a/vendor/k8s.io/api/rbac/v1alpha1/register.go b/vendor/k8s.io/api/rbac/v1alpha1/register.go index 9cfd71f451e6..0c6977685bef 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/register.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/register.go @@ -40,7 +40,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Role{}, diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go index 06fa6ce8e8f8..843d998ec9e9 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go @@ -172,6 +172,20 @@ type ClusterRole struct { // Rules holds all the PolicyRules for this ClusterRole Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` +} + +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +type AggregationRule struct { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` } // +genclient diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go index d58a722af17e..e56cd0f10159 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -27,10 +27,20 @@ package v1alpha1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE +var map_AggregationRule = map[string]string{ + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", +} + +func (AggregationRule) SwaggerDoc() map[string]string { + return map_AggregationRule +} + var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", - "metadata": "Standard object's metadata.", - "rules": "Rules holds all the PolicyRules for this ClusterRole", + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", } func (ClusterRole) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go index e4cab8325349..abbb994fdad9 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go @@ -21,66 +21,31 @@ limitations under the License. package v1alpha1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRole).DeepCopyInto(out.(*ClusterRole)) - return nil - }, InType: reflect.TypeOf(&ClusterRole{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBinding).DeepCopyInto(out.(*ClusterRoleBinding)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBindingList).DeepCopyInto(out.(*ClusterRoleBindingList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleList).DeepCopyInto(out.(*ClusterRoleList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PolicyRule).DeepCopyInto(out.(*PolicyRule)) - return nil - }, InType: reflect.TypeOf(&PolicyRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Role).DeepCopyInto(out.(*Role)) - return nil - }, InType: reflect.TypeOf(&Role{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBinding).DeepCopyInto(out.(*RoleBinding)) - return nil - }, InType: reflect.TypeOf(&RoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBindingList).DeepCopyInto(out.(*RoleBindingList)) - return nil - }, InType: reflect.TypeOf(&RoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleList).DeepCopyInto(out.(*RoleList)) - return nil - }, InType: reflect.TypeOf(&RoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleRef).DeepCopyInto(out.(*RoleRef)) - return nil - }, InType: reflect.TypeOf(&RoleRef{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Subject).DeepCopyInto(out.(*Subject)) - return nil - }, InType: reflect.TypeOf(&Subject{})}, - ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { + *out = *in + if in.ClusterRoleSelectors != nil { + in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors + *out = make([]v1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. +func (in *AggregationRule) DeepCopy() *AggregationRule { + if in == nil { + return nil + } + out := new(AggregationRule) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -95,6 +60,15 @@ func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.AggregationRule != nil { + in, out := &in.AggregationRule, &out.AggregationRule + if *in == nil { + *out = nil + } else { + *out = new(AggregationRule) + (*in).DeepCopyInto(*out) + } + } return } diff --git a/vendor/k8s.io/api/rbac/v1beta1/BUILD b/vendor/k8s.io/api/rbac/v1beta1/BUILD index 6a38064dd240..c9aa763f410f 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/BUILD +++ b/vendor/k8s.io/api/rbac/v1beta1/BUILD @@ -15,10 +15,10 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/rbac/v1beta1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/api/rbac/v1beta1/doc.go b/vendor/k8s.io/api/rbac/v1beta1/doc.go index 5633737172d0..e2d442626ff1 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/doc.go +++ b/vendor/k8s.io/api/rbac/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go index 9cb4935c022a..8cb2c4bec6e7 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go @@ -25,6 +25,7 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto It has these top-level messages: + AggregationRule ClusterRole ClusterRoleBinding ClusterRoleBindingList @@ -43,6 +44,8 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + import strings "strings" import reflect "reflect" @@ -59,51 +62,56 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + func (m *ClusterRole) Reset() { *m = ClusterRole{} } func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *PolicyRule) Reset() { *m = PolicyRule{} } func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *Role) Reset() { *m = Role{} } func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *RoleList) Reset() { *m = RoleList{} } func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *RoleRef) Reset() { *m = RoleRef{} } func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func init() { + proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1beta1.AggregationRule") proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1beta1.ClusterRole") proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleBinding") proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleBindingList") @@ -116,6 +124,36 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1beta1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1beta1.Subject") } +func (m *AggregationRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, msg := range m.ClusterRoleSelectors { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -151,6 +189,16 @@ func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.AggregationRule != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) + n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } return i, nil } @@ -172,11 +220,11 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n2, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n3 if len(m.Subjects) > 0 { for _, msg := range m.Subjects { dAtA[i] = 0x12 @@ -192,11 +240,11 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n3, err := m.RoleRef.MarshalTo(dAtA[i:]) + n4, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n4 return i, nil } @@ -218,11 +266,11 @@ func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n5 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -256,11 +304,11 @@ func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n6 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -387,11 +435,11 @@ func (m *Role) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n6, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n7 if len(m.Rules) > 0 { for _, msg := range m.Rules { dAtA[i] = 0x12 @@ -425,11 +473,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 if len(m.Subjects) > 0 { for _, msg := range m.Subjects { dAtA[i] = 0x12 @@ -445,11 +493,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n8, err := m.RoleRef.MarshalTo(dAtA[i:]) + n9, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n9 return i, nil } @@ -471,11 +519,11 @@ func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + n10, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -509,11 +557,11 @@ func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -620,6 +668,18 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } +func (m *AggregationRule) Size() (n int) { + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, e := range m.ClusterRoleSelectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ClusterRole) Size() (n int) { var l int _ = l @@ -631,6 +691,10 @@ func (m *ClusterRole) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -811,6 +875,16 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *AggregationRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AggregationRule{`, + `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *ClusterRole) String() string { if this == nil { return "nil" @@ -818,6 +892,7 @@ func (this *ClusterRole) String() string { s := strings.Join([]string{`&ClusterRole{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -948,6 +1023,87 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *AggregationRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ClusterRole) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1038,6 +1194,39 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AggregationRule == nil { + m.AggregationRule = &AggregationRule{} + } + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2504,52 +2693,58 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 751 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x94, 0xcd, 0x6e, 0xd3, 0x4a, - 0x14, 0xc7, 0xe3, 0x7c, 0x28, 0xf1, 0xe4, 0x46, 0xb9, 0xf5, 0x95, 0xee, 0xb5, 0x2a, 0x5d, 0x27, - 0x0a, 0x2c, 0x2a, 0x95, 0xda, 0xb4, 0x20, 0x60, 0x83, 0x04, 0x66, 0x01, 0x55, 0x4b, 0xa8, 0x06, - 0xc1, 0x02, 0xb1, 0x60, 0xe2, 0x4c, 0xd3, 0x21, 0xf1, 0x87, 0x66, 0xc6, 0x91, 0x2a, 0x36, 0x3c, - 0x00, 0x0b, 0x24, 0x5e, 0x83, 0x15, 0x3b, 0x78, 0x82, 0x2c, 0xbb, 0xec, 0x2a, 0xa2, 0xe6, 0x41, - 0x40, 0x33, 0xb6, 0xe3, 0xa4, 0x69, 0xda, 0xac, 0x22, 0x21, 0xb1, 0x4a, 0xe6, 0x9c, 0xdf, 0xf9, - 0x9f, 0x0f, 0xcf, 0x1c, 0xf0, 0xa0, 0x7f, 0x8f, 0x99, 0xc4, 0xb7, 0xfa, 0x61, 0x07, 0x53, 0x0f, - 0x73, 0xcc, 0xac, 0x21, 0xf6, 0xba, 0x3e, 0xb5, 0x12, 0x07, 0x0a, 0x88, 0x45, 0x3b, 0xc8, 0xb1, - 0x86, 0xdb, 0x1d, 0xcc, 0xd1, 0xb6, 0xd5, 0xc3, 0x1e, 0xa6, 0x88, 0xe3, 0xae, 0x19, 0x50, 0x9f, - 0xfb, 0xda, 0x7f, 0x31, 0x68, 0xa2, 0x80, 0x98, 0x02, 0x34, 0x13, 0x70, 0x7d, 0xab, 0x47, 0xf8, - 0x51, 0xd8, 0x31, 0x1d, 0xdf, 0xb5, 0x7a, 0x7e, 0xcf, 0xb7, 0x24, 0xdf, 0x09, 0x0f, 0xe5, 0x49, - 0x1e, 0xe4, 0xbf, 0x58, 0x67, 0xfd, 0x76, 0x96, 0xd0, 0x45, 0xce, 0x11, 0xf1, 0x30, 0x3d, 0xb6, - 0x82, 0x7e, 0x4f, 0x18, 0x98, 0xe5, 0x62, 0x8e, 0xac, 0xe1, 0x5c, 0xf6, 0x75, 0x6b, 0x51, 0x14, - 0x0d, 0x3d, 0x4e, 0x5c, 0x3c, 0x17, 0x70, 0xe7, 0xaa, 0x00, 0xe6, 0x1c, 0x61, 0x17, 0xcd, 0xc5, - 0xdd, 0x5a, 0x14, 0x17, 0x72, 0x32, 0xb0, 0x88, 0xc7, 0x19, 0xa7, 0xe7, 0x83, 0x5a, 0x5f, 0x15, - 0x50, 0x7d, 0x34, 0x08, 0x19, 0xc7, 0x14, 0xfa, 0x03, 0xac, 0xbd, 0x01, 0x15, 0xd1, 0x48, 0x17, - 0x71, 0xa4, 0x2b, 0x4d, 0x65, 0xa3, 0xba, 0x73, 0xd3, 0xcc, 0xc6, 0x37, 0xd1, 0x35, 0x83, 0x7e, - 0x4f, 0x18, 0x98, 0x29, 0x68, 0x73, 0xb8, 0x6d, 0x3e, 0xeb, 0xbc, 0xc5, 0x0e, 0x7f, 0x8a, 0x39, - 0xb2, 0xb5, 0xd1, 0xb8, 0x91, 0x8b, 0xc6, 0x0d, 0x90, 0xd9, 0xe0, 0x44, 0x55, 0x7b, 0x02, 0x4a, - 0x34, 0x1c, 0x60, 0xa6, 0xe7, 0x9b, 0x85, 0x8d, 0xea, 0xce, 0x35, 0x73, 0xc1, 0xd7, 0x31, 0x0f, - 0xfc, 0x01, 0x71, 0x8e, 0x61, 0x38, 0xc0, 0x76, 0x2d, 0x51, 0x2c, 0x89, 0x13, 0x83, 0xb1, 0x40, - 0xeb, 0x53, 0x1e, 0x68, 0x53, 0xb5, 0xdb, 0xc4, 0xeb, 0x12, 0xaf, 0xb7, 0x82, 0x16, 0xda, 0xa0, - 0xc2, 0x42, 0xe9, 0x48, 0xbb, 0x68, 0x2e, 0xec, 0xe2, 0x79, 0x0c, 0xda, 0x7f, 0x27, 0x8a, 0x95, - 0xc4, 0xc0, 0xe0, 0x44, 0x43, 0xdb, 0x03, 0x65, 0xea, 0x0f, 0x30, 0xc4, 0x87, 0x7a, 0x41, 0x16, - 0xbc, 0x58, 0x0e, 0xc6, 0x9c, 0x5d, 0x4f, 0xe4, 0xca, 0x89, 0x01, 0xa6, 0x0a, 0xad, 0x91, 0x02, - 0xfe, 0x9d, 0x9f, 0xca, 0x3e, 0x61, 0x5c, 0x7b, 0x3d, 0x37, 0x19, 0x73, 0xb9, 0xc9, 0x88, 0x68, - 0x39, 0x97, 0x49, 0x17, 0xa9, 0x65, 0x6a, 0x2a, 0x07, 0xa0, 0x44, 0x38, 0x76, 0xd3, 0x91, 0x6c, - 0x2e, 0xec, 0x61, 0xbe, 0xba, 0xec, 0x03, 0xef, 0x0a, 0x05, 0x18, 0x0b, 0xb5, 0xbe, 0x29, 0xa0, - 0x3e, 0x05, 0xaf, 0xa0, 0x87, 0xdd, 0xd9, 0x1e, 0xae, 0x2f, 0xd5, 0xc3, 0xc5, 0xc5, 0xff, 0x54, - 0x00, 0xc8, 0xae, 0xb0, 0xd6, 0x00, 0xa5, 0x21, 0xa6, 0x1d, 0xa6, 0x2b, 0xcd, 0xc2, 0x86, 0x6a, - 0xab, 0x82, 0x7f, 0x29, 0x0c, 0x30, 0xb6, 0x6b, 0x9b, 0x40, 0x45, 0x01, 0x79, 0x4c, 0xfd, 0x30, - 0x88, 0xd3, 0xab, 0x76, 0x2d, 0x1a, 0x37, 0xd4, 0x87, 0x07, 0xbb, 0xb1, 0x11, 0x66, 0x7e, 0x01, - 0x53, 0xcc, 0xfc, 0x90, 0x3a, 0x98, 0xe9, 0x85, 0x0c, 0x86, 0xa9, 0x11, 0x66, 0x7e, 0xed, 0x2e, - 0xa8, 0xa5, 0x87, 0x36, 0x72, 0x31, 0xd3, 0x8b, 0x32, 0x60, 0x2d, 0x1a, 0x37, 0x6a, 0x70, 0xda, - 0x01, 0x67, 0x39, 0xed, 0x3e, 0xa8, 0x7b, 0xbe, 0x97, 0x22, 0x2f, 0xe0, 0x3e, 0xd3, 0x4b, 0x32, - 0xf4, 0x9f, 0x68, 0xdc, 0xa8, 0xb7, 0x67, 0x5d, 0xf0, 0x3c, 0xdb, 0xfa, 0xa2, 0x80, 0xe2, 0x6f, - 0xb7, 0x54, 0x3e, 0xe4, 0x41, 0xf5, 0xcf, 0x36, 0x99, 0x6c, 0x13, 0xf1, 0x04, 0x57, 0xbb, 0x46, - 0x96, 0x7e, 0x82, 0x57, 0xef, 0x8f, 0xcf, 0x0a, 0xa8, 0xac, 0x68, 0x71, 0xd8, 0xb3, 0x55, 0xff, - 0x7f, 0x79, 0xd5, 0x17, 0x97, 0xfb, 0x0e, 0xa4, 0xf3, 0xd7, 0x6e, 0x80, 0x4a, 0xfa, 0xd8, 0x65, - 0xb1, 0x6a, 0x96, 0x3c, 0xdd, 0x07, 0x70, 0x42, 0x68, 0x4d, 0x50, 0xec, 0x13, 0xaf, 0xab, 0xe7, - 0x25, 0xf9, 0x57, 0x42, 0x16, 0xf7, 0x88, 0xd7, 0x85, 0xd2, 0x23, 0x08, 0x0f, 0xb9, 0x58, 0x5e, - 0x88, 0x29, 0x42, 0x3c, 0x73, 0x28, 0x3d, 0x62, 0x56, 0xe5, 0xe4, 0x32, 0x4d, 0xf4, 0x94, 0x85, - 0x7a, 0xd3, 0xf5, 0xe5, 0x97, 0xa9, 0xef, 0xf2, 0xec, 0x9a, 0x05, 0x54, 0xf1, 0xcb, 0x02, 0xe4, - 0x60, 0xbd, 0x28, 0xb1, 0xb5, 0x04, 0x53, 0xdb, 0xa9, 0x03, 0x66, 0x8c, 0xbd, 0x35, 0x3a, 0x33, - 0x72, 0x27, 0x67, 0x46, 0xee, 0xf4, 0xcc, 0xc8, 0xbd, 0x8f, 0x0c, 0x65, 0x14, 0x19, 0xca, 0x49, - 0x64, 0x28, 0xa7, 0x91, 0xa1, 0x7c, 0x8f, 0x0c, 0xe5, 0xe3, 0x0f, 0x23, 0xf7, 0xaa, 0x9c, 0x4c, - 0xfd, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x74, 0x24, 0x6a, 0xfa, 0x45, 0x0a, 0x00, 0x00, + // 833 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, + 0x14, 0xce, 0x64, 0x13, 0x6d, 0x3c, 0xcb, 0x2a, 0xdc, 0x70, 0x02, 0x6b, 0x05, 0xce, 0x2a, 0x50, + 0x44, 0x3a, 0xce, 0x66, 0xef, 0x10, 0xd0, 0x20, 0x71, 0xa6, 0x80, 0xd5, 0x2d, 0x61, 0x35, 0x27, + 0x28, 0x10, 0x05, 0x63, 0x67, 0xce, 0x19, 0xe2, 0x5f, 0x9a, 0x19, 0x47, 0x3a, 0xd1, 0xd0, 0xd0, + 0x51, 0x20, 0x51, 0xd1, 0x52, 0x53, 0x51, 0xf2, 0x17, 0xa4, 0xbc, 0xf2, 0xaa, 0x88, 0x35, 0x7f, + 0x08, 0x68, 0xfc, 0x23, 0x4e, 0xe2, 0xf8, 0x2e, 0x55, 0x24, 0xa4, 0xab, 0x76, 0xe7, 0xbd, 0xef, + 0x7d, 0xef, 0x7b, 0x9f, 0x67, 0x5e, 0xe0, 0x27, 0xb3, 0x8f, 0x84, 0xc9, 0x22, 0x6b, 0x96, 0x38, + 0x94, 0x87, 0x54, 0x52, 0x61, 0xcd, 0x69, 0x38, 0x89, 0xb8, 0x55, 0x24, 0x48, 0xcc, 0x2c, 0xee, + 0x10, 0xd7, 0x9a, 0x5f, 0x38, 0x54, 0x92, 0x0b, 0xcb, 0xa3, 0x21, 0xe5, 0x44, 0xd2, 0x89, 0x19, + 0xf3, 0x48, 0x46, 0xe8, 0x8d, 0x1c, 0x68, 0x92, 0x98, 0x99, 0x0a, 0x68, 0x16, 0xc0, 0xb3, 0xbb, + 0x1e, 0x93, 0xd3, 0xc4, 0x31, 0xdd, 0x28, 0xb0, 0xbc, 0xc8, 0x8b, 0xac, 0x0c, 0xef, 0x24, 0x8f, + 0xb3, 0x53, 0x76, 0xc8, 0xfe, 0xcb, 0x79, 0xce, 0x46, 0xf5, 0x86, 0xc4, 0x8f, 0xa7, 0xf5, 0x8e, + 0x67, 0xef, 0x57, 0xc8, 0x80, 0xb8, 0x53, 0x16, 0x52, 0xfe, 0xc4, 0x8a, 0x67, 0x9e, 0x0a, 0x08, + 0x2b, 0xa0, 0x92, 0x58, 0xf3, 0x7a, 0x95, 0xd5, 0x54, 0xc5, 0x93, 0x50, 0xb2, 0x80, 0xd6, 0x0a, + 0x3e, 0x78, 0x51, 0x81, 0x70, 0xa7, 0x34, 0x20, 0xb5, 0xba, 0xfb, 0x4d, 0x75, 0x89, 0x64, 0xbe, + 0xc5, 0x42, 0x29, 0x24, 0xdf, 0x2e, 0x1a, 0xfe, 0x06, 0x60, 0xff, 0x81, 0xe7, 0x71, 0xea, 0x11, + 0xc9, 0xa2, 0x10, 0x27, 0x3e, 0x45, 0x3f, 0x01, 0x78, 0xdb, 0xf5, 0x13, 0x21, 0x29, 0xc7, 0x91, + 0x4f, 0x1f, 0x51, 0x9f, 0xba, 0x32, 0xe2, 0x42, 0x07, 0xe7, 0x47, 0xa3, 0x93, 0x7b, 0xf7, 0xcd, + 0xca, 0xf9, 0x55, 0x23, 0x33, 0x9e, 0x79, 0x2a, 0x20, 0x4c, 0xe5, 0x83, 0x39, 0xbf, 0x30, 0xaf, + 0x88, 0x43, 0xfd, 0xb2, 0xd6, 0x7e, 0x73, 0xb1, 0x1c, 0xb4, 0xd2, 0xe5, 0xe0, 0xf6, 0xa7, 0x3b, + 0x88, 0xf1, 0xce, 0x76, 0xc3, 0xdf, 0xdb, 0xf0, 0x64, 0x0d, 0x8e, 0xbe, 0x83, 0x3d, 0x45, 0x3e, + 0x21, 0x92, 0xe8, 0xe0, 0x1c, 0x8c, 0x4e, 0xee, 0xbd, 0xb7, 0x9f, 0x94, 0x2f, 0x9d, 0xef, 0xa9, + 0x2b, 0xbf, 0xa0, 0x92, 0xd8, 0xa8, 0xd0, 0x01, 0xab, 0x18, 0x5e, 0xb1, 0xa2, 0xcf, 0x61, 0x97, + 0x27, 0x3e, 0x15, 0x7a, 0x3b, 0x9b, 0xf4, 0x6d, 0xb3, 0xe1, 0x8e, 0x99, 0xd7, 0x91, 0xcf, 0xdc, + 0x27, 0xca, 0x2d, 0xfb, 0xb4, 0x60, 0xec, 0xaa, 0x93, 0xc0, 0x39, 0x01, 0xf2, 0x60, 0x9f, 0x6c, + 0xda, 0xaa, 0x1f, 0x65, 0x92, 0x47, 0x8d, 0x9c, 0x5b, 0x9f, 0xc1, 0x7e, 0x2d, 0x5d, 0x0e, 0xb6, + 0xbf, 0x0d, 0xde, 0x66, 0x1d, 0xfe, 0xda, 0x86, 0x68, 0xcd, 0x24, 0x9b, 0x85, 0x13, 0x16, 0x7a, + 0x07, 0xf0, 0x6a, 0x0c, 0x7b, 0x22, 0xc9, 0x12, 0xa5, 0x5d, 0xe7, 0x8d, 0xa3, 0x3d, 0xca, 0x81, + 0xf6, 0xab, 0x05, 0x63, 0xaf, 0x08, 0x08, 0xbc, 0xe2, 0x40, 0x0f, 0xe1, 0x31, 0x8f, 0x7c, 0x8a, + 0xe9, 0xe3, 0xc2, 0xa9, 0x66, 0x3a, 0x9c, 0xe3, 0xec, 0x7e, 0x41, 0x77, 0x5c, 0x04, 0x70, 0xc9, + 0x30, 0x5c, 0x00, 0xf8, 0x7a, 0xdd, 0x95, 0x2b, 0x26, 0x24, 0xfa, 0xb6, 0xe6, 0x8c, 0xb9, 0xe7, + 0x85, 0x66, 0x22, 0xf7, 0x65, 0x35, 0x45, 0x19, 0x59, 0x73, 0xe5, 0x1a, 0x76, 0x99, 0xa4, 0x41, + 0x69, 0xc9, 0x9d, 0xc6, 0x19, 0xea, 0xea, 0xaa, 0x9b, 0x74, 0xa9, 0x18, 0x70, 0x4e, 0x34, 0xfc, + 0x0b, 0xc0, 0xfe, 0x1a, 0xf8, 0x00, 0x33, 0x5c, 0x6e, 0xce, 0xf0, 0xce, 0x5e, 0x33, 0xec, 0x16, + 0xff, 0x2f, 0x80, 0xb0, 0x7a, 0x2b, 0x68, 0x00, 0xbb, 0x73, 0xca, 0x9d, 0x7c, 0x93, 0x68, 0xb6, + 0xa6, 0xf0, 0x5f, 0xab, 0x00, 0xce, 0xe3, 0xe8, 0x0e, 0xd4, 0x48, 0xcc, 0x3e, 0xe3, 0x51, 0x12, + 0xe7, 0xed, 0x35, 0xfb, 0x34, 0x5d, 0x0e, 0xb4, 0x07, 0xd7, 0x97, 0x79, 0x10, 0x57, 0x79, 0x05, + 0xe6, 0x54, 0x44, 0x09, 0x77, 0xa9, 0xd0, 0x8f, 0x2a, 0x30, 0x2e, 0x83, 0xb8, 0xca, 0xa3, 0x0f, + 0xe1, 0x69, 0x79, 0x18, 0x93, 0x80, 0x0a, 0xbd, 0x93, 0x15, 0xdc, 0x4a, 0x97, 0x83, 0x53, 0xbc, + 0x9e, 0xc0, 0x9b, 0x38, 0xf4, 0x31, 0xec, 0x87, 0x51, 0x58, 0x42, 0xbe, 0xc2, 0x57, 0x42, 0xef, + 0x66, 0xa5, 0xd9, 0xfb, 0x1c, 0x6f, 0xa6, 0xf0, 0x36, 0x76, 0xf8, 0x27, 0x80, 0x9d, 0xff, 0xdb, + 0xf6, 0x1a, 0xfe, 0xdc, 0x86, 0x27, 0x2f, 0xb7, 0xc9, 0x6a, 0x9b, 0xa8, 0x27, 0x78, 0xd8, 0x35, + 0xb2, 0xf7, 0x13, 0x7c, 0xf1, 0xfe, 0xf8, 0x03, 0xc0, 0xde, 0x81, 0x16, 0x87, 0xbd, 0xa9, 0xfa, + 0xad, 0xe7, 0xab, 0xde, 0x2d, 0xf7, 0x07, 0x58, 0xfa, 0x8f, 0xde, 0x85, 0xbd, 0xf2, 0xb1, 0x67, + 0x62, 0xb5, 0xaa, 0x79, 0xb9, 0x0f, 0xf0, 0x0a, 0x81, 0xce, 0x61, 0x67, 0xc6, 0xc2, 0x89, 0xde, + 0xce, 0x90, 0xaf, 0x14, 0xc8, 0xce, 0x43, 0x16, 0x4e, 0x70, 0x96, 0x51, 0x88, 0x90, 0x04, 0xf9, + 0x0f, 0xf1, 0x1a, 0x42, 0x3d, 0x73, 0x9c, 0x65, 0x94, 0x57, 0xc7, 0xc5, 0x65, 0x5a, 0xf1, 0x81, + 0x46, 0xbe, 0x75, 0x7d, 0xed, 0x7d, 0xf4, 0x3d, 0xbf, 0x3b, 0xb2, 0xa0, 0xa6, 0xfe, 0x8a, 0x98, + 0xb8, 0x54, 0xef, 0x64, 0xb0, 0x5b, 0x05, 0x4c, 0x1b, 0x97, 0x09, 0x5c, 0x61, 0xec, 0xbb, 0x8b, + 0x1b, 0xa3, 0xf5, 0xf4, 0xc6, 0x68, 0x3d, 0xbb, 0x31, 0x5a, 0x3f, 0xa6, 0x06, 0x58, 0xa4, 0x06, + 0x78, 0x9a, 0x1a, 0xe0, 0x59, 0x6a, 0x80, 0xbf, 0x53, 0x03, 0xfc, 0xf2, 0x8f, 0xd1, 0xfa, 0xe6, + 0xb8, 0x70, 0xfd, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd1, 0x99, 0xaf, 0xff, 0x74, 0x0b, 0x00, + 0x00, } diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto index 6469de720a52..975de1096b45 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto @@ -21,6 +21,7 @@ syntax = 'proto2'; package k8s.io.api.rbac.v1beta1; +import "k8s.io/api/rbac/v1alpha1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -29,6 +30,14 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +message AggregationRule { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; +} + // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. message ClusterRole { // Standard object's metadata. @@ -37,6 +46,12 @@ message ClusterRole { // Rules holds all the PolicyRules for this ClusterRole repeated PolicyRule rules = 2; + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + optional AggregationRule aggregationRule = 3; } // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, @@ -85,7 +100,8 @@ message PolicyRule { // +optional repeated string apiGroups = 2; - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. + // '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups. // +optional repeated string resources = 3; diff --git a/vendor/k8s.io/api/rbac/v1beta1/register.go b/vendor/k8s.io/api/rbac/v1beta1/register.go index 8f60f01953f0..c8526a656bac 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/register.go +++ b/vendor/k8s.io/api/rbac/v1beta1/register.go @@ -40,7 +40,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Role{}, diff --git a/vendor/k8s.io/api/rbac/v1beta1/types.go b/vendor/k8s.io/api/rbac/v1beta1/types.go index 30f95a774053..091fc1dc95f9 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types.go @@ -54,7 +54,8 @@ type PolicyRule struct { // the enumerated resources in any API group will be allowed. // +optional APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. + // '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups. // +optional Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. @@ -170,6 +171,19 @@ type ClusterRole struct { // Rules holds all the PolicyRules for this ClusterRole Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` +} + +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +type AggregationRule struct { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` } // +genclient diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go index 1463d8feac07..6180d6d43e7e 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go @@ -27,10 +27,20 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE +var map_AggregationRule = map[string]string{ + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", +} + +func (AggregationRule) SwaggerDoc() map[string]string { + return map_AggregationRule +} + var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", - "metadata": "Standard object's metadata.", - "rules": "Rules holds all the PolicyRules for this ClusterRole", + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", } func (ClusterRole) SwaggerDoc() map[string]string { @@ -72,7 +82,7 @@ var map_PolicyRule = map[string]string{ "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", - "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", + "resources": "Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", } diff --git a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go index 922727646f24..ac23895615d9 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go @@ -21,66 +21,31 @@ limitations under the License. package v1beta1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRole).DeepCopyInto(out.(*ClusterRole)) - return nil - }, InType: reflect.TypeOf(&ClusterRole{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBinding).DeepCopyInto(out.(*ClusterRoleBinding)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBindingList).DeepCopyInto(out.(*ClusterRoleBindingList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleList).DeepCopyInto(out.(*ClusterRoleList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PolicyRule).DeepCopyInto(out.(*PolicyRule)) - return nil - }, InType: reflect.TypeOf(&PolicyRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Role).DeepCopyInto(out.(*Role)) - return nil - }, InType: reflect.TypeOf(&Role{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBinding).DeepCopyInto(out.(*RoleBinding)) - return nil - }, InType: reflect.TypeOf(&RoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBindingList).DeepCopyInto(out.(*RoleBindingList)) - return nil - }, InType: reflect.TypeOf(&RoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleList).DeepCopyInto(out.(*RoleList)) - return nil - }, InType: reflect.TypeOf(&RoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleRef).DeepCopyInto(out.(*RoleRef)) - return nil - }, InType: reflect.TypeOf(&RoleRef{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Subject).DeepCopyInto(out.(*Subject)) - return nil - }, InType: reflect.TypeOf(&Subject{})}, - ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { + *out = *in + if in.ClusterRoleSelectors != nil { + in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors + *out = make([]v1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. +func (in *AggregationRule) DeepCopy() *AggregationRule { + if in == nil { + return nil + } + out := new(AggregationRule) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -95,6 +60,15 @@ func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.AggregationRule != nil { + in, out := &in.AggregationRule, &out.AggregationRule + if *in == nil { + *out = nil + } else { + *out = new(AggregationRule) + (*in).DeepCopyInto(*out) + } + } return } diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/BUILD b/vendor/k8s.io/api/scheduling/v1alpha1/BUILD index 6a38064dd240..c8c197664726 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/BUILD +++ b/vendor/k8s.io/api/scheduling/v1alpha1/BUILD @@ -15,10 +15,10 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/scheduling/v1alpha1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go index 8c401bcf2fa7..31206998bd7a 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/register.go b/vendor/k8s.io/api/scheduling/v1alpha1/register.go index 91ce6e0cc3a0..24689f0ad841 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/register.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &PriorityClass{}, diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go index a3ae5c0ff960..fad4db662d5c 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go @@ -21,32 +21,9 @@ limitations under the License. package v1alpha1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PriorityClass).DeepCopyInto(out.(*PriorityClass)) - return nil - }, InType: reflect.TypeOf(&PriorityClass{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PriorityClassList).DeepCopyInto(out.(*PriorityClassList)) - return nil - }, InType: reflect.TypeOf(&PriorityClassList{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { *out = *in diff --git a/vendor/k8s.io/api/settings/v1alpha1/BUILD b/vendor/k8s.io/api/settings/v1alpha1/BUILD index 5cc4f6f7b339..d7457427dfc4 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/BUILD +++ b/vendor/k8s.io/api/settings/v1alpha1/BUILD @@ -15,11 +15,11 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/settings/v1alpha1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/api/settings/v1alpha1/doc.go b/vendor/k8s.io/api/settings/v1alpha1/doc.go index 605b355574ea..483a9207c4a0 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/doc.go +++ b/vendor/k8s.io/api/settings/v1alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=settings.k8s.io diff --git a/vendor/k8s.io/api/settings/v1alpha1/register.go b/vendor/k8s.io/api/settings/v1alpha1/register.go index 2f39af0f9b2a..eee278d95928 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/register.go +++ b/vendor/k8s.io/api/settings/v1alpha1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &PodPreset{}, diff --git a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go index 07e6ab72d0e9..2c925622b7e9 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go @@ -22,36 +22,9 @@ package v1alpha1 import ( v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPreset).DeepCopyInto(out.(*PodPreset)) - return nil - }, InType: reflect.TypeOf(&PodPreset{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPresetList).DeepCopyInto(out.(*PodPresetList)) - return nil - }, InType: reflect.TypeOf(&PodPresetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPresetSpec).DeepCopyInto(out.(*PodPresetSpec)) - return nil - }, InType: reflect.TypeOf(&PodPresetSpec{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodPreset) DeepCopyInto(out *PodPreset) { *out = *in diff --git a/vendor/k8s.io/api/storage/v1/BUILD b/vendor/k8s.io/api/storage/v1/BUILD index 03d98c7a1f4b..50a09f0eba2c 100644 --- a/vendor/k8s.io/api/storage/v1/BUILD +++ b/vendor/k8s.io/api/storage/v1/BUILD @@ -10,13 +10,13 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/storage/v1", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/api/storage/v1/doc.go b/vendor/k8s.io/api/storage/v1/doc.go index 0672c58e14e0..8f4a4045c43b 100644 --- a/vendor/k8s.io/api/storage/v1/doc.go +++ b/vendor/k8s.io/api/storage/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true package v1 diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go index 4befedff1ef9..7157b72ff285 100644 --- a/vendor/k8s.io/api/storage/v1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -146,6 +146,12 @@ func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.VolumeBindingMode != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i += copy(dAtA[i:], *m.VolumeBindingMode) + } return i, nil } @@ -242,6 +248,10 @@ func (m *StorageClass) Size() (n int) { if m.AllowVolumeExpansion != nil { n += 2 } + if m.VolumeBindingMode != nil { + l = len(*m.VolumeBindingMode) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -293,6 +303,7 @@ func (this *StorageClass) String() string { `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, + `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, `}`, }, "") return s @@ -600,6 +611,36 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.AllowVolumeExpansion = &b + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := VolumeBindingMode(dAtA[iNdEx:postIndex]) + m.VolumeBindingMode = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -842,43 +883,44 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 593 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x6f, 0xd3, 0x3e, - 0x18, 0xc6, 0x9b, 0x76, 0xfd, 0x6b, 0x73, 0x37, 0xfd, 0xab, 0x30, 0xa4, 0xa8, 0x87, 0xb4, 0x1a, - 0x97, 0x0a, 0x09, 0x7b, 0xdd, 0x06, 0x9a, 0x90, 0x40, 0xa2, 0x68, 0x12, 0x48, 0x9b, 0x56, 0x05, - 0x89, 0x03, 0xe2, 0x80, 0x9b, 0xbd, 0x64, 0x26, 0x89, 0x1d, 0xd9, 0x4e, 0xa0, 0x37, 0x3e, 0x02, - 0x9f, 0x87, 0x13, 0xc7, 0x1d, 0x77, 0xdc, 0x29, 0x62, 0xe1, 0x5b, 0x70, 0x42, 0x49, 0xca, 0x92, - 0xad, 0x9d, 0xd8, 0xcd, 0x7e, 0xdf, 0xe7, 0xf7, 0xd8, 0x7e, 0xfd, 0xa0, 0xe7, 0xfe, 0xbe, 0xc2, - 0x4c, 0x10, 0x3f, 0x9e, 0x82, 0xe4, 0xa0, 0x41, 0x91, 0x04, 0xf8, 0x89, 0x90, 0x64, 0xde, 0xa0, - 0x11, 0x23, 0x4a, 0x0b, 0x49, 0x3d, 0x20, 0xc9, 0x88, 0x78, 0xc0, 0x41, 0x52, 0x0d, 0x27, 0x38, - 0x92, 0x42, 0x0b, 0xf3, 0x7e, 0x29, 0xc3, 0x34, 0x62, 0x78, 0x2e, 0xc3, 0xc9, 0xa8, 0xf7, 0xc8, - 0x63, 0xfa, 0x34, 0x9e, 0x62, 0x57, 0x84, 0xc4, 0x13, 0x9e, 0x20, 0x85, 0x7a, 0x1a, 0x7f, 0x2c, - 0x76, 0xc5, 0xa6, 0x58, 0x95, 0x2e, 0xbd, 0x87, 0x4b, 0x0f, 0x9b, 0x82, 0xa6, 0x0b, 0x27, 0xf6, - 0xf6, 0x2a, 0x6d, 0x48, 0xdd, 0x53, 0xc6, 0x41, 0xce, 0x48, 0xe4, 0x7b, 0x79, 0x41, 0x91, 0x10, - 0x34, 0x5d, 0x72, 0xcf, 0x1e, 0xb9, 0x8d, 0x92, 0x31, 0xd7, 0x2c, 0x84, 0x05, 0xe0, 0xc9, 0xbf, - 0x00, 0xe5, 0x9e, 0x42, 0x48, 0x17, 0xb8, 0xdd, 0xdb, 0xb8, 0x58, 0xb3, 0x80, 0x30, 0xae, 0x95, - 0x96, 0x37, 0xa1, 0xad, 0x1f, 0x2b, 0x68, 0xfd, 0x4d, 0xf9, 0xee, 0x97, 0x01, 0x55, 0xca, 0xfc, - 0x80, 0x56, 0xf3, 0x97, 0x9c, 0x50, 0x4d, 0x2d, 0x63, 0x60, 0x0c, 0x3b, 0x3b, 0xdb, 0xb8, 0x9a, - 0xf4, 0x95, 0x31, 0x8e, 0x7c, 0x2f, 0x2f, 0x28, 0x9c, 0xab, 0x71, 0x32, 0xc2, 0xc7, 0xd3, 0x4f, - 0xe0, 0xea, 0x23, 0xd0, 0x74, 0x6c, 0x9e, 0xa5, 0xfd, 0x46, 0x96, 0xf6, 0x51, 0x55, 0x73, 0xae, - 0x5c, 0xcd, 0xc7, 0xa8, 0x13, 0x49, 0x91, 0x30, 0xc5, 0x04, 0x07, 0x69, 0x35, 0x07, 0xc6, 0x70, - 0x6d, 0x7c, 0x6f, 0x8e, 0x74, 0x26, 0x55, 0xcb, 0xa9, 0xeb, 0x4c, 0x0f, 0xa1, 0x88, 0x4a, 0x1a, - 0x82, 0x06, 0xa9, 0xac, 0xd6, 0xa0, 0x35, 0xec, 0xec, 0xec, 0xe2, 0xa5, 0x21, 0xc0, 0xf5, 0x17, - 0xe1, 0xc9, 0x15, 0x75, 0xc0, 0xb5, 0x9c, 0x55, 0xb7, 0xab, 0x1a, 0x4e, 0xcd, 0xda, 0xf4, 0xd1, - 0x86, 0x04, 0x37, 0xa0, 0x2c, 0x9c, 0x88, 0x80, 0xb9, 0x33, 0x6b, 0xa5, 0xb8, 0xe1, 0x41, 0x96, - 0xf6, 0x37, 0x9c, 0x7a, 0xe3, 0x77, 0xda, 0xdf, 0xae, 0xc5, 0xc7, 0x15, 0x32, 0xcf, 0x0e, 0x9e, - 0x80, 0x54, 0x4c, 0x69, 0xe0, 0xfa, 0xad, 0x08, 0xe2, 0x10, 0xae, 0x31, 0xce, 0x75, 0x6f, 0x73, - 0x0f, 0xad, 0x87, 0x22, 0xe6, 0xfa, 0x38, 0xd2, 0x4c, 0x70, 0x65, 0xb5, 0x07, 0xad, 0xe1, 0xda, - 0xb8, 0x9b, 0xa5, 0xfd, 0xf5, 0xa3, 0x5a, 0xdd, 0xb9, 0xa6, 0x32, 0x0f, 0xd1, 0x26, 0x0d, 0x02, - 0xf1, 0xb9, 0x3c, 0xe0, 0xe0, 0x4b, 0x44, 0x79, 0x3e, 0x25, 0xeb, 0xbf, 0x81, 0x31, 0x5c, 0x1d, - 0x5b, 0x59, 0xda, 0xdf, 0x7c, 0xb1, 0xa4, 0xef, 0x2c, 0xa5, 0x7a, 0xcf, 0xd0, 0xff, 0x37, 0x66, - 0x64, 0x76, 0x51, 0xcb, 0x87, 0x59, 0x11, 0x80, 0x35, 0x27, 0x5f, 0x9a, 0x9b, 0xa8, 0x9d, 0xd0, - 0x20, 0x86, 0xf2, 0xbf, 0x9c, 0x72, 0xf3, 0xb4, 0xb9, 0x6f, 0x6c, 0x7d, 0x37, 0x50, 0xb7, 0x3e, - 0xf0, 0x43, 0xa6, 0xb4, 0xf9, 0x7e, 0x21, 0x46, 0xf8, 0x6e, 0x31, 0xca, 0xe9, 0x22, 0x44, 0xdd, - 0xf9, 0x37, 0xad, 0xfe, 0xad, 0xd4, 0x22, 0xf4, 0x0a, 0xb5, 0x99, 0x86, 0x50, 0x59, 0xcd, 0x22, - 0x06, 0x0f, 0xee, 0x10, 0x83, 0xf1, 0xc6, 0xdc, 0xaf, 0xfd, 0x3a, 0x27, 0x9d, 0xd2, 0x60, 0x3c, - 0x3c, 0xbb, 0xb4, 0x1b, 0xe7, 0x97, 0x76, 0xe3, 0xe2, 0xd2, 0x6e, 0x7c, 0xcd, 0x6c, 0xe3, 0x2c, - 0xb3, 0x8d, 0xf3, 0xcc, 0x36, 0x2e, 0x32, 0xdb, 0xf8, 0x99, 0xd9, 0xc6, 0xb7, 0x5f, 0x76, 0xe3, - 0x5d, 0x33, 0x19, 0xfd, 0x09, 0x00, 0x00, 0xff, 0xff, 0xbb, 0x57, 0xe7, 0x15, 0xb0, 0x04, 0x00, - 0x00, + // 623 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0x4f, 0x6f, 0xd3, 0x3c, + 0x18, 0x6f, 0xda, 0xb7, 0x2f, 0x9b, 0xbb, 0x89, 0x2e, 0x0c, 0x29, 0xea, 0x21, 0xa9, 0xc6, 0xa5, + 0x9a, 0x84, 0xb3, 0x6e, 0x03, 0x4d, 0x48, 0x20, 0x11, 0x34, 0x09, 0xa4, 0x4d, 0xab, 0x82, 0x34, + 0x21, 0xc4, 0x01, 0x37, 0x7d, 0xc8, 0x4c, 0x13, 0x3b, 0xb2, 0x9d, 0x40, 0x6f, 0x7c, 0x04, 0xce, + 0x7c, 0x14, 0x3e, 0xc1, 0x8e, 0x3b, 0xee, 0x14, 0xb1, 0xf0, 0x2d, 0x76, 0x42, 0x49, 0xca, 0x9a, + 0xad, 0x9d, 0xd8, 0x2d, 0xfe, 0xfd, 0xb3, 0x9f, 0x27, 0x3f, 0xf4, 0x62, 0xbc, 0x27, 0x31, 0xe5, + 0xf6, 0x38, 0x1e, 0x82, 0x60, 0xa0, 0x40, 0xda, 0x09, 0xb0, 0x11, 0x17, 0xf6, 0x94, 0x20, 0x11, + 0xb5, 0xa5, 0xe2, 0x82, 0xf8, 0x60, 0x27, 0x7d, 0xdb, 0x07, 0x06, 0x82, 0x28, 0x18, 0xe1, 0x48, + 0x70, 0xc5, 0xf5, 0x87, 0xa5, 0x0c, 0x93, 0x88, 0xe2, 0xa9, 0x0c, 0x27, 0xfd, 0xce, 0x63, 0x9f, + 0xaa, 0x93, 0x78, 0x88, 0x3d, 0x1e, 0xda, 0x3e, 0xf7, 0xb9, 0x5d, 0xa8, 0x87, 0xf1, 0xa7, 0xe2, + 0x54, 0x1c, 0x8a, 0xaf, 0x32, 0xa5, 0xb3, 0xb9, 0xf0, 0xb2, 0x21, 0x28, 0x32, 0x77, 0x63, 0x67, + 0x77, 0xa6, 0x0d, 0x89, 0x77, 0x42, 0x19, 0x88, 0x89, 0x1d, 0x8d, 0xfd, 0x1c, 0x90, 0x76, 0x08, + 0x8a, 0x2c, 0x78, 0x67, 0xc7, 0xbe, 0xcd, 0x25, 0x62, 0xa6, 0x68, 0x08, 0x73, 0x86, 0xa7, 0xff, + 0x32, 0x48, 0xef, 0x04, 0x42, 0x32, 0xe7, 0xdb, 0xb9, 0xcd, 0x17, 0x2b, 0x1a, 0xd8, 0x94, 0x29, + 0xa9, 0xc4, 0x4d, 0xd3, 0xc6, 0x8f, 0x26, 0x5a, 0x79, 0x5b, 0xce, 0xfd, 0x2a, 0x20, 0x52, 0xea, + 0x1f, 0xd1, 0x52, 0x3e, 0xc9, 0x88, 0x28, 0x62, 0x68, 0x5d, 0xad, 0xd7, 0xda, 0xde, 0xc2, 0xb3, + 0x4d, 0x5f, 0x05, 0xe3, 0x68, 0xec, 0xe7, 0x80, 0xc4, 0xb9, 0x1a, 0x27, 0x7d, 0x7c, 0x34, 0xfc, + 0x0c, 0x9e, 0x3a, 0x04, 0x45, 0x1c, 0xfd, 0x34, 0xb5, 0x6a, 0x59, 0x6a, 0xa1, 0x19, 0xe6, 0x5e, + 0xa5, 0xea, 0x4f, 0x50, 0x2b, 0x12, 0x3c, 0xa1, 0x92, 0x72, 0x06, 0xc2, 0xa8, 0x77, 0xb5, 0xde, + 0xb2, 0xf3, 0x60, 0x6a, 0x69, 0x0d, 0x66, 0x94, 0x5b, 0xd5, 0xe9, 0x3e, 0x42, 0x11, 0x11, 0x24, + 0x04, 0x05, 0x42, 0x1a, 0x8d, 0x6e, 0xa3, 0xd7, 0xda, 0xde, 0xc1, 0x0b, 0x4b, 0x80, 0xab, 0x13, + 0xe1, 0xc1, 0x95, 0x6b, 0x9f, 0x29, 0x31, 0x99, 0xbd, 0x6e, 0x46, 0xb8, 0x95, 0x68, 0x7d, 0x8c, + 0x56, 0x05, 0x78, 0x01, 0xa1, 0xe1, 0x80, 0x07, 0xd4, 0x9b, 0x18, 0xff, 0x15, 0x2f, 0xdc, 0xcf, + 0x52, 0x6b, 0xd5, 0xad, 0x12, 0x97, 0xa9, 0xb5, 0x55, 0xa9, 0x8f, 0xc7, 0x45, 0xde, 0x1d, 0x3c, + 0x00, 0x21, 0xa9, 0x54, 0xc0, 0xd4, 0x31, 0x0f, 0xe2, 0x10, 0xae, 0x79, 0xdc, 0xeb, 0xd9, 0xfa, + 0x2e, 0x5a, 0x09, 0x79, 0xcc, 0xd4, 0x51, 0xa4, 0x28, 0x67, 0xd2, 0x68, 0x76, 0x1b, 0xbd, 0x65, + 0xa7, 0x9d, 0xa5, 0xd6, 0xca, 0x61, 0x05, 0x77, 0xaf, 0xa9, 0xf4, 0x03, 0xb4, 0x4e, 0x82, 0x80, + 0x7f, 0x29, 0x2f, 0xd8, 0xff, 0x1a, 0x11, 0x96, 0x6f, 0xc9, 0xf8, 0xbf, 0xab, 0xf5, 0x96, 0x1c, + 0x23, 0x4b, 0xad, 0xf5, 0x97, 0x0b, 0x78, 0x77, 0xa1, 0x4b, 0x7f, 0x87, 0xd6, 0x92, 0x02, 0x72, + 0x28, 0x1b, 0x51, 0xe6, 0x1f, 0xf2, 0x11, 0x18, 0xf7, 0x8a, 0xa1, 0x37, 0xb3, 0xd4, 0x5a, 0x3b, + 0xbe, 0x49, 0x5e, 0x2e, 0x02, 0xdd, 0xf9, 0x90, 0xce, 0x73, 0x74, 0xff, 0xc6, 0xf6, 0xf5, 0x36, + 0x6a, 0x8c, 0x61, 0x52, 0x54, 0x6b, 0xd9, 0xcd, 0x3f, 0xf5, 0x75, 0xd4, 0x4c, 0x48, 0x10, 0x43, + 0xd9, 0x04, 0xb7, 0x3c, 0x3c, 0xab, 0xef, 0x69, 0x1b, 0x3f, 0x35, 0xd4, 0xae, 0xfe, 0xca, 0x03, + 0x2a, 0x95, 0xfe, 0x61, 0xae, 0xa0, 0xf8, 0x6e, 0x05, 0xcd, 0xdd, 0x45, 0x3d, 0xdb, 0xd3, 0x02, + 0x2c, 0xfd, 0x45, 0x2a, 0xe5, 0x7c, 0x8d, 0x9a, 0x54, 0x41, 0x28, 0x8d, 0x7a, 0x51, 0xb0, 0x47, + 0x77, 0x28, 0x98, 0xb3, 0x3a, 0xcd, 0x6b, 0xbe, 0xc9, 0x9d, 0x6e, 0x19, 0xe0, 0xf4, 0x4e, 0x2f, + 0xcc, 0xda, 0xd9, 0x85, 0x59, 0x3b, 0xbf, 0x30, 0x6b, 0xdf, 0x32, 0x53, 0x3b, 0xcd, 0x4c, 0xed, + 0x2c, 0x33, 0xb5, 0xf3, 0xcc, 0xd4, 0x7e, 0x65, 0xa6, 0xf6, 0xfd, 0xb7, 0x59, 0x7b, 0x5f, 0x4f, + 0xfa, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xee, 0x56, 0xcc, 0xfd, 0x0a, 0x05, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index 6a335e5268ae..939ebde685f8 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -63,6 +63,13 @@ message StorageClass { // AllowVolumeExpansion shows whether the storage class allow volume expand // +optional optional bool allowVolumeExpansion = 6; + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is alpha-level and is only honored by servers that enable + // the VolumeScheduling feature. + // +optional + optional string volumeBindingMode = 7; } // StorageClassList is a collection of storage classes. diff --git a/vendor/k8s.io/api/storage/v1/register.go b/vendor/k8s.io/api/storage/v1/register.go index 59c4e59ba19c..c058add84001 100644 --- a/vendor/k8s.io/api/storage/v1/register.go +++ b/vendor/k8s.io/api/storage/v1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &StorageClass{}, diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index 9afdafb62ac2..288d40abb8f5 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -59,6 +59,13 @@ type StorageClass struct { // AllowVolumeExpansion shows whether the storage class allow volume expand // +optional AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is alpha-level and is only honored by servers that enable + // the VolumeScheduling feature. + // +optional + VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -74,3 +81,18 @@ type StorageClassList struct { // Items is the list of StorageClasses Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// VolumeBindingMode indicates how PersistentVolumeClaims should be bound. +type VolumeBindingMode string + +const ( + // VolumeBindingImmediate indicates that PersistentVolumeClaims should be + // immediately provisioned and bound. This is the default mode. + VolumeBindingImmediate VolumeBindingMode = "Immediate" + + // VolumeBindingWaitForFirstConsumer indicates that PersistentVolumeClaims + // should not be provisioned and bound until the first Pod is created that + // references the PeristentVolumeClaim. The volume provisioning and + // binding will occur during Pod scheduing. + VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer" +) diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index b4be857dd1a6..3eb9bdab7698 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -35,6 +35,7 @@ var map_StorageClass = map[string]string{ "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", + "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is alpha-level and is only honored by servers that enable the VolumeScheduling feature.", } func (StorageClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go index 50c707abb9d3..a2b2f8e71527 100644 --- a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -22,32 +22,9 @@ package v1 import ( core_v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageClass).DeepCopyInto(out.(*StorageClass)) - return nil - }, InType: reflect.TypeOf(&StorageClass{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageClassList).DeepCopyInto(out.(*StorageClassList)) - return nil - }, InType: reflect.TypeOf(&StorageClassList{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageClass) DeepCopyInto(out *StorageClass) { *out = *in @@ -83,6 +60,15 @@ func (in *StorageClass) DeepCopyInto(out *StorageClass) { **out = **in } } + if in.VolumeBindingMode != nil { + in, out := &in.VolumeBindingMode, &out.VolumeBindingMode + if *in == nil { + *out = nil + } else { + *out = new(VolumeBindingMode) + **out = **in + } + } return } diff --git a/vendor/k8s.io/api/storage/v1alpha1/BUILD b/vendor/k8s.io/api/storage/v1alpha1/BUILD new file mode 100644 index 000000000000..98342a4262d1 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/BUILD @@ -0,0 +1,42 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +filegroup( + name = "go_default_library_protos", + srcs = ["generated.proto"], + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "generated.pb.go", + "register.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/api/storage/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/api/storage/v1alpha1/doc.go b/vendor/k8s.io/api/storage/v1alpha1/doc.go new file mode 100644 index 000000000000..dce99a3344b7 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register +// +groupName=storage.k8s.io +// +k8s:openapi-gen=true +package v1alpha1 diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go new file mode 100644 index 000000000000..3d55c6ec4304 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -0,0 +1,1523 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto + + It has these top-level messages: + VolumeAttachment + VolumeAttachmentList + VolumeAttachmentSource + VolumeAttachmentSpec + VolumeAttachmentStatus + VolumeError +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func init() { + proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachment") + proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentList") + proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSource") + proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSpec") + proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus") + proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1alpha1.VolumeError") +} +func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PersistentVolumeName != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) + i += copy(dAtA[i:], *m.PersistentVolumeName) + } + return i, nil +} + +func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i += copy(dAtA[i:], m.Attacher) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) + n5, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i += copy(dAtA[i:], m.NodeName) + return i, nil +} + +func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.AttachmentMetadata) > 0 { + keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) + for k := range m.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + for _, k := range keysForAttachmentMetadata { + dAtA[i] = 0x12 + i++ + v := m.AttachmentMetadata[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.AttachError != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) + n6, err := m.AttachError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.DetachError != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) + n7, err := m.DetachError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *VolumeError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) + n8, err := m.Time.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *VolumeAttachment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *VolumeAttachmentSource) Size() (n int) { + var l int + _ = l + if m.PersistentVolumeName != nil { + l = len(*m.PersistentVolumeName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeAttachmentSpec) Size() (n int) { + var l int + _ = l + l = len(m.Attacher) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentStatus) Size() (n int) { + var l int + _ = l + n += 2 + if len(m.AttachmentMetadata) > 0 { + for k, v := range m.AttachmentMetadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AttachError != nil { + l = m.AttachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DetachError != nil { + l = m.DetachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeError) Size() (n int) { + var l int + _ = l + l = m.Time.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *VolumeAttachment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSource{`, + `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSpec{`, + `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentStatus) String() string { + if this == nil { + return "nil" + } + keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) + for k := range this.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + mapStringForAttachmentMetadata := "map[string]string{" + for _, k := range keysForAttachmentMetadata { + mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) + } + mapStringForAttachmentMetadata += "}" + s := strings.Join([]string{`&VolumeAttachmentStatus{`, + `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, + `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, + `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeError) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeError{`, + `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, VolumeAttachment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PersistentVolumeName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attacher", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attacher = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Attached", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Attached = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachmentMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.AttachmentMetadata == nil { + m.AttachmentMetadata = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.AttachmentMetadata[mapkey] = mapvalue + } else { + var mapvalue string + m.AttachmentMetadata[mapkey] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AttachError == nil { + m.AttachError = &VolumeError{} + } + if err := m.AttachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DetachError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DetachError == nil { + m.DetachError = &VolumeError{} + } + if err := m.DetachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeError) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeError: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeError: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 745 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x3d, 0x6f, 0xdb, 0x46, + 0x18, 0xc7, 0x45, 0x49, 0xb6, 0xe5, 0x53, 0x5f, 0x8c, 0x83, 0xd0, 0x0a, 0x2a, 0x40, 0x19, 0x9a, + 0xdc, 0xa2, 0x3e, 0x56, 0x76, 0x51, 0x18, 0xdd, 0x44, 0xd8, 0x43, 0x51, 0xcb, 0x2d, 0xe8, 0xa2, + 0x43, 0xdb, 0xa1, 0x27, 0xf2, 0x31, 0x45, 0x4b, 0x7c, 0xc1, 0xdd, 0x51, 0x88, 0xb7, 0x4c, 0x99, + 0xb3, 0xe5, 0x1b, 0xe4, 0xb3, 0x68, 0x8b, 0x47, 0x4f, 0x42, 0xcc, 0x7c, 0x8b, 0x2c, 0x09, 0x78, + 0x3c, 0x89, 0xb2, 0x29, 0x25, 0xb6, 0x37, 0x3e, 0xcf, 0x3d, 0xff, 0xdf, 0xf3, 0x76, 0x47, 0x74, + 0x3c, 0x3a, 0xe2, 0xc4, 0x0b, 0x8d, 0x51, 0x3c, 0x00, 0x16, 0x80, 0x00, 0x6e, 0x4c, 0x20, 0x70, + 0x42, 0x66, 0xa8, 0x03, 0x1a, 0x79, 0x06, 0x17, 0x21, 0xa3, 0x2e, 0x18, 0x93, 0x2e, 0x1d, 0x47, + 0x43, 0xda, 0x35, 0x5c, 0x08, 0x80, 0x51, 0x01, 0x0e, 0x89, 0x58, 0x28, 0x42, 0xfc, 0x5d, 0x16, + 0x4c, 0x68, 0xe4, 0x11, 0x15, 0x4c, 0xe6, 0xc1, 0xad, 0x7d, 0xd7, 0x13, 0xc3, 0x78, 0x40, 0xec, + 0xd0, 0x37, 0xdc, 0xd0, 0x0d, 0x0d, 0xa9, 0x19, 0xc4, 0x17, 0xd2, 0x92, 0x86, 0xfc, 0xca, 0x58, + 0xad, 0x7e, 0x9e, 0x18, 0x9e, 0x09, 0x08, 0xb8, 0x17, 0x06, 0x7c, 0x9f, 0x46, 0x1e, 0x07, 0x36, + 0x01, 0x66, 0x44, 0x23, 0x37, 0x3d, 0xe3, 0x77, 0x03, 0x8c, 0x49, 0x77, 0x00, 0xa2, 0x58, 0x5a, + 0xeb, 0xe7, 0x1c, 0xe7, 0x53, 0x7b, 0xe8, 0x05, 0xc0, 0xae, 0x72, 0x86, 0x0f, 0x82, 0x1a, 0x93, + 0xa2, 0xca, 0x58, 0xa7, 0x62, 0x71, 0x20, 0x3c, 0x1f, 0x0a, 0x82, 0x5f, 0x3e, 0x27, 0xe0, 0xf6, + 0x10, 0x7c, 0x5a, 0xd0, 0x1d, 0xae, 0xd3, 0xc5, 0xc2, 0x1b, 0x1b, 0x5e, 0x20, 0xb8, 0x60, 0xf7, + 0x45, 0x9d, 0xd7, 0x65, 0xb4, 0xf3, 0x77, 0x38, 0x8e, 0x7d, 0xe8, 0x09, 0x41, 0xed, 0xa1, 0x0f, + 0x81, 0xc0, 0xff, 0xa3, 0x5a, 0xda, 0x8d, 0x43, 0x05, 0x6d, 0x6a, 0xbb, 0xda, 0x5e, 0xfd, 0xe0, + 0x27, 0x92, 0xaf, 0x65, 0x01, 0x27, 0xd1, 0xc8, 0x4d, 0x1d, 0x9c, 0xa4, 0xd1, 0x64, 0xd2, 0x25, + 0x7f, 0x0c, 0x2e, 0xc1, 0x16, 0x7d, 0x10, 0xd4, 0xc4, 0xd3, 0x59, 0xbb, 0x94, 0xcc, 0xda, 0x28, + 0xf7, 0x59, 0x0b, 0x2a, 0x3e, 0x47, 0x55, 0x1e, 0x81, 0xdd, 0x2c, 0x4b, 0x7a, 0x97, 0x7c, 0x62, + 0xe9, 0xe4, 0x7e, 0x79, 0xe7, 0x11, 0xd8, 0xe6, 0x17, 0x0a, 0x5f, 0x4d, 0x2d, 0x4b, 0xc2, 0xf0, + 0xbf, 0x68, 0x93, 0x0b, 0x2a, 0x62, 0xde, 0xac, 0x48, 0xec, 0xe1, 0xe3, 0xb0, 0x52, 0x6a, 0x7e, + 0xa5, 0xc0, 0x9b, 0x99, 0x6d, 0x29, 0x64, 0x67, 0xaa, 0xa1, 0xc6, 0x7d, 0xc9, 0xa9, 0xc7, 0x05, + 0xfe, 0xaf, 0x30, 0x2c, 0xf2, 0xb0, 0x61, 0xa5, 0x6a, 0x39, 0xaa, 0x1d, 0x95, 0xb2, 0x36, 0xf7, + 0x2c, 0x0d, 0xca, 0x42, 0x1b, 0x9e, 0x00, 0x9f, 0x37, 0xcb, 0xbb, 0x95, 0xbd, 0xfa, 0xc1, 0xfe, + 0xa3, 0x5a, 0x32, 0xbf, 0x54, 0xe4, 0x8d, 0xdf, 0x52, 0x86, 0x95, 0xa1, 0x3a, 0x17, 0xe8, 0x9b, + 0x42, 0xf3, 0x61, 0xcc, 0x6c, 0xc0, 0xa7, 0xa8, 0x11, 0x01, 0xe3, 0x1e, 0x17, 0x10, 0x88, 0x2c, + 0xe6, 0x8c, 0xfa, 0x20, 0xfb, 0xda, 0x36, 0x9b, 0xc9, 0xac, 0xdd, 0xf8, 0x73, 0xc5, 0xb9, 0xb5, + 0x52, 0xd5, 0x79, 0xb3, 0x62, 0x64, 0xe9, 0xba, 0xf0, 0x8f, 0xa8, 0x46, 0xa5, 0x07, 0x98, 0x42, + 0x2f, 0x46, 0xd0, 0x53, 0x7e, 0x6b, 0x11, 0x21, 0xd7, 0x2a, 0xcb, 0x53, 0xb7, 0xe5, 0x91, 0x6b, + 0x95, 0xd2, 0xa5, 0xb5, 0x4a, 0xdb, 0x52, 0xc8, 0xb4, 0x94, 0x20, 0x74, 0xb2, 0x2e, 0x2b, 0x77, + 0x4b, 0x39, 0x53, 0x7e, 0x6b, 0x11, 0xd1, 0xf9, 0x50, 0x59, 0x31, 0x3a, 0x79, 0x3f, 0x96, 0x7a, + 0x72, 0x64, 0x4f, 0xb5, 0x42, 0x4f, 0xce, 0xa2, 0x27, 0x07, 0xbf, 0xd2, 0x10, 0xa6, 0x0b, 0x44, + 0x7f, 0x7e, 0x7f, 0xb2, 0x25, 0xff, 0xfe, 0x84, 0x7b, 0x4b, 0x7a, 0x05, 0xda, 0x49, 0x20, 0xd8, + 0x95, 0xd9, 0x52, 0x55, 0xe0, 0x62, 0x80, 0xb5, 0xa2, 0x04, 0x7c, 0x89, 0xea, 0x99, 0xf7, 0x84, + 0xb1, 0x90, 0xa9, 0x97, 0xb4, 0xf7, 0x80, 0x8a, 0x64, 0xbc, 0xa9, 0x27, 0xb3, 0x76, 0xbd, 0x97, + 0x03, 0xde, 0xcf, 0xda, 0xf5, 0xa5, 0x73, 0x6b, 0x19, 0x9e, 0xe6, 0x72, 0x20, 0xcf, 0x55, 0x7d, + 0x4a, 0xae, 0x63, 0x58, 0x9f, 0x6b, 0x09, 0xde, 0x3a, 0x41, 0xdf, 0xae, 0x19, 0x11, 0xde, 0x41, + 0x95, 0x11, 0x5c, 0x65, 0x37, 0xd1, 0x4a, 0x3f, 0x71, 0x03, 0x6d, 0x4c, 0xe8, 0x38, 0xce, 0x6e, + 0xdc, 0xb6, 0x95, 0x19, 0xbf, 0x96, 0x8f, 0xb4, 0xce, 0x0b, 0x0d, 0x2d, 0xe7, 0xc0, 0xa7, 0xa8, + 0x9a, 0xfe, 0x93, 0xd5, 0xcb, 0xff, 0xe1, 0x61, 0x2f, 0xff, 0x2f, 0xcf, 0x87, 0xfc, 0x0f, 0x96, + 0x5a, 0x96, 0xa4, 0xe0, 0xef, 0xd1, 0x96, 0x0f, 0x9c, 0x53, 0x57, 0x65, 0x36, 0xbf, 0x56, 0x41, + 0x5b, 0xfd, 0xcc, 0x6d, 0xcd, 0xcf, 0x4d, 0x32, 0xbd, 0xd5, 0x4b, 0xd7, 0xb7, 0x7a, 0xe9, 0xe6, + 0x56, 0x2f, 0x3d, 0x4f, 0x74, 0x6d, 0x9a, 0xe8, 0xda, 0x75, 0xa2, 0x6b, 0x37, 0x89, 0xae, 0xbd, + 0x4d, 0x74, 0xed, 0xe5, 0x3b, 0xbd, 0xf4, 0x4f, 0x6d, 0x3e, 0xb8, 0x8f, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x68, 0x82, 0x7b, 0x73, 0x9e, 0x07, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto new file mode 100644 index 000000000000..c9421cf06bba --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.storage.v1alpha1; + +import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +message VolumeAttachment { + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + optional VolumeAttachmentSpec spec = 2; + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeAttachmentStatus status = 3; +} + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +message VolumeAttachmentList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of VolumeAttachments + repeated VolumeAttachment items = 2; +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +message VolumeAttachmentSource { + // Name of the persistent volume to attach. + // +optional + optional string persistentVolumeName = 1; +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +message VolumeAttachmentSpec { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + optional string attacher = 1; + + // Source represents the volume that should be attached. + optional VolumeAttachmentSource source = 2; + + // The node that the volume should be attached to. + optional string nodeName = 3; +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +message VolumeAttachmentStatus { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + optional bool attached = 1; + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + map attachmentMetadata = 2; + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError attachError = 3; + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError detachError = 4; +} + +// VolumeError captures an error encountered during a volume operation. +message VolumeError { + // Time the error was encountered. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + + // String detailing the error encountered during Attach or Detach operation. + // This string maybe logged, so it should not contain sensitive + // information. + // +optional + optional string message = 2; +} + diff --git a/vendor/k8s.io/api/storage/v1alpha1/register.go b/vendor/k8s.io/api/storage/v1alpha1/register.go new file mode 100644 index 000000000000..7b81ee49c2bc --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "storage.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &VolumeAttachment{}, + &VolumeAttachmentList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go new file mode 100644 index 000000000000..964bb5f7b17c --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -0,0 +1,126 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +type VolumeAttachment struct { + metav1.TypeMeta `json:",inline"` + + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + Status VolumeAttachmentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +type VolumeAttachmentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of VolumeAttachments + Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +type VolumeAttachmentSpec struct { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` + + // Source represents the volume that should be attached. + Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` + + // The node that the volume should be attached to. + NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +type VolumeAttachmentSource struct { + // Name of the persistent volume to attach. + // +optional + PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` + + // Placeholder for *VolumeSource to accommodate inline volumes in pods. +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +type VolumeAttachmentStatus struct { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"` +} + +// VolumeError captures an error encountered during a volume operation. +type VolumeError struct { + // Time the error was encountered. + // +optional + Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` + + // String detailing the error encountered during Attach or Detach operation. + // This string maybe logged, so it should not contain sensitive + // information. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000000..faca8e939eac --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,93 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_VolumeAttachment = map[string]string{ + "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachment) SwaggerDoc() map[string]string { + return map_VolumeAttachment +} + +var map_VolumeAttachmentList = map[string]string{ + "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is the list of VolumeAttachments", +} + +func (VolumeAttachmentList) SwaggerDoc() map[string]string { + return map_VolumeAttachmentList +} + +var map_VolumeAttachmentSource = map[string]string{ + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "persistentVolumeName": "Name of the persistent volume to attach.", +} + +func (VolumeAttachmentSource) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSource +} + +var map_VolumeAttachmentSpec = map[string]string{ + "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", + "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "Source represents the volume that should be attached.", + "nodeName": "The node that the volume should be attached to.", +} + +func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSpec +} + +var map_VolumeAttachmentStatus = map[string]string{ + "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", + "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { + return map_VolumeAttachmentStatus +} + +var map_VolumeError = map[string]string{ + "": "VolumeError captures an error encountered during a volume operation.", + "time": "Time the error was encountered.", + "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", +} + +func (VolumeError) SwaggerDoc() map[string]string { + return map_VolumeError +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000000..19ae948535dd --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,188 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment. +func (in *VolumeAttachment) DeepCopy() *VolumeAttachment { + if in == nil { + return nil + } + out := new(VolumeAttachment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeAttachment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList. +func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList { + if in == nil { + return nil + } + out := new(VolumeAttachmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { + *out = *in + if in.PersistentVolumeName != nil { + in, out := &in.PersistentVolumeName, &out.PersistentVolumeName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource. +func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource { + if in == nil { + return nil + } + out := new(VolumeAttachmentSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec. +func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec { + if in == nil { + return nil + } + out := new(VolumeAttachmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) { + *out = *in + if in.AttachmentMetadata != nil { + in, out := &in.AttachmentMetadata, &out.AttachmentMetadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AttachError != nil { + in, out := &in.AttachError, &out.AttachError + if *in == nil { + *out = nil + } else { + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + } + if in.DetachError != nil { + in, out := &in.DetachError, &out.DetachError + if *in == nil { + *out = nil + } else { + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus. +func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { + if in == nil { + return nil + } + out := new(VolumeAttachmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeError) DeepCopyInto(out *VolumeError) { + *out = *in + in.Time.DeepCopyInto(&out.Time) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError. +func (in *VolumeError) DeepCopy() *VolumeError { + if in == nil { + return nil + } + out := new(VolumeError) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/storage/v1beta1/BUILD b/vendor/k8s.io/api/storage/v1beta1/BUILD index 03d98c7a1f4b..e659e9de0155 100644 --- a/vendor/k8s.io/api/storage/v1beta1/BUILD +++ b/vendor/k8s.io/api/storage/v1beta1/BUILD @@ -10,13 +10,13 @@ go_library( "types_swagger_doc_generated.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/api/storage/v1beta1", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/api/storage/v1beta1/doc.go b/vendor/k8s.io/api/storage/v1beta1/doc.go index 58d2548ae2a3..545d636fdfc6 100644 --- a/vendor/k8s.io/api/storage/v1beta1/doc.go +++ b/vendor/k8s.io/api/storage/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true package v1beta1 diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index b31d6f122968..f2c8ea960055 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -146,6 +146,12 @@ func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.VolumeBindingMode != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i += copy(dAtA[i:], *m.VolumeBindingMode) + } return i, nil } @@ -242,6 +248,10 @@ func (m *StorageClass) Size() (n int) { if m.AllowVolumeExpansion != nil { n += 2 } + if m.VolumeBindingMode != nil { + l = len(*m.VolumeBindingMode) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -293,6 +303,7 @@ func (this *StorageClass) String() string { `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, + `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, `}`, }, "") return s @@ -600,6 +611,36 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.AllowVolumeExpansion = &b + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := VolumeBindingMode(dAtA[iNdEx:postIndex]) + m.VolumeBindingMode = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -842,42 +883,44 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 589 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x4f, 0x4f, 0xd4, 0x40, - 0x18, 0xc6, 0xb7, 0x2c, 0xab, 0x30, 0x0b, 0x71, 0x53, 0x39, 0x34, 0x7b, 0xe8, 0x6e, 0x38, 0xf5, - 0xc2, 0x0c, 0x20, 0x1a, 0x62, 0xe2, 0xc1, 0x12, 0x0e, 0x26, 0x10, 0x36, 0x35, 0xf1, 0x60, 0x3c, - 0x38, 0x5b, 0x5e, 0xcb, 0xd8, 0x76, 0xa6, 0x99, 0x99, 0xae, 0xee, 0xcd, 0x8f, 0xe0, 0x37, 0xf2, - 0x64, 0xc2, 0x91, 0x23, 0xa7, 0x46, 0xea, 0xb7, 0xf0, 0x64, 0xfa, 0x47, 0x5a, 0x58, 0x88, 0xdc, - 0x3a, 0xef, 0xfb, 0xfc, 0x9e, 0xb7, 0xf3, 0xce, 0x83, 0x0e, 0xc2, 0x7d, 0x85, 0x99, 0x20, 0x61, - 0x3a, 0x05, 0xc9, 0x41, 0x83, 0x22, 0x33, 0xe0, 0xa7, 0x42, 0x92, 0xba, 0x41, 0x13, 0x46, 0x94, - 0x16, 0x92, 0x06, 0x40, 0x66, 0x3b, 0x53, 0xd0, 0x74, 0x87, 0x04, 0xc0, 0x41, 0x52, 0x0d, 0xa7, - 0x38, 0x91, 0x42, 0x0b, 0x73, 0x58, 0x69, 0x31, 0x4d, 0x18, 0xae, 0xb5, 0xb8, 0xd6, 0x0e, 0xb7, - 0x02, 0xa6, 0xcf, 0xd2, 0x29, 0xf6, 0x45, 0x4c, 0x02, 0x11, 0x08, 0x52, 0x22, 0xd3, 0xf4, 0x53, - 0x79, 0x2a, 0x0f, 0xe5, 0x57, 0x65, 0x35, 0xdc, 0x6c, 0x8d, 0xf5, 0x85, 0x2c, 0x66, 0xde, 0x1e, - 0x37, 0xdc, 0x6b, 0x34, 0x31, 0xf5, 0xcf, 0x18, 0x07, 0x39, 0x27, 0x49, 0x18, 0x14, 0x05, 0x45, - 0x62, 0xd0, 0xf4, 0x2e, 0x8a, 0xdc, 0x47, 0xc9, 0x94, 0x6b, 0x16, 0xc3, 0x02, 0xf0, 0xe2, 0x7f, - 0x80, 0xf2, 0xcf, 0x20, 0xa6, 0x0b, 0xdc, 0xb3, 0xfb, 0xb8, 0x54, 0xb3, 0x88, 0x30, 0xae, 0x95, - 0x96, 0xb7, 0xa1, 0xcd, 0x9f, 0xcb, 0x68, 0xed, 0x6d, 0xb5, 0xba, 0x83, 0x88, 0x2a, 0x65, 0x7e, - 0x44, 0x2b, 0xc5, 0x4d, 0x4e, 0xa9, 0xa6, 0x96, 0x31, 0x36, 0x9c, 0xfe, 0xee, 0x36, 0x6e, 0xd6, - 0x7c, 0x6d, 0x8c, 0x93, 0x30, 0x28, 0x0a, 0x0a, 0x17, 0x6a, 0x3c, 0xdb, 0xc1, 0x27, 0xd3, 0xcf, - 0xe0, 0xeb, 0x63, 0xd0, 0xd4, 0x35, 0xcf, 0xb3, 0x51, 0x27, 0xcf, 0x46, 0xa8, 0xa9, 0x79, 0xd7, - 0xae, 0xe6, 0x73, 0xd4, 0x4f, 0xa4, 0x98, 0x31, 0xc5, 0x04, 0x07, 0x69, 0x2d, 0x8d, 0x0d, 0x67, - 0xd5, 0x7d, 0x5a, 0x23, 0xfd, 0x49, 0xd3, 0xf2, 0xda, 0x3a, 0x33, 0x42, 0x28, 0xa1, 0x92, 0xc6, - 0xa0, 0x41, 0x2a, 0xab, 0x3b, 0xee, 0x3a, 0xfd, 0xdd, 0x7d, 0x7c, 0x7f, 0x02, 0x70, 0xfb, 0x5a, - 0x78, 0x72, 0x8d, 0x1e, 0x72, 0x2d, 0xe7, 0xcd, 0x2f, 0x36, 0x0d, 0xaf, 0xe5, 0x6f, 0x86, 0x68, - 0x5d, 0x82, 0x1f, 0x51, 0x16, 0x4f, 0x44, 0xc4, 0xfc, 0xb9, 0xb5, 0x5c, 0xfe, 0xe6, 0x61, 0x9e, - 0x8d, 0xd6, 0xbd, 0x76, 0xe3, 0x4f, 0x36, 0xda, 0x5e, 0xcc, 0x0e, 0x9e, 0x80, 0x54, 0x4c, 0x69, - 0xe0, 0xfa, 0x9d, 0x88, 0xd2, 0x18, 0x6e, 0x30, 0xde, 0x4d, 0x6f, 0x73, 0x0f, 0xad, 0xc5, 0x22, - 0xe5, 0xfa, 0x24, 0xd1, 0x4c, 0x70, 0x65, 0xf5, 0xc6, 0x5d, 0x67, 0xd5, 0x1d, 0xe4, 0xd9, 0x68, - 0xed, 0xb8, 0x55, 0xf7, 0x6e, 0xa8, 0xcc, 0x23, 0xb4, 0x41, 0xa3, 0x48, 0x7c, 0xa9, 0x06, 0x1c, - 0x7e, 0x4d, 0x28, 0x2f, 0x56, 0x65, 0x3d, 0x1a, 0x1b, 0xce, 0x8a, 0x6b, 0xe5, 0xd9, 0x68, 0xe3, - 0xf5, 0x1d, 0x7d, 0xef, 0x4e, 0x6a, 0xf8, 0x0a, 0x3d, 0xb9, 0xb5, 0x23, 0x73, 0x80, 0xba, 0x21, - 0xcc, 0xcb, 0x14, 0xac, 0x7a, 0xc5, 0xa7, 0xb9, 0x81, 0x7a, 0x33, 0x1a, 0xa5, 0x50, 0x3d, 0x9a, - 0x57, 0x1d, 0x5e, 0x2e, 0xed, 0x1b, 0x9b, 0x3f, 0x0c, 0x34, 0x68, 0x2f, 0xfc, 0x88, 0x29, 0x6d, - 0x7e, 0x58, 0xc8, 0x12, 0x7e, 0x58, 0x96, 0x0a, 0xba, 0x4c, 0xd2, 0xa0, 0x7e, 0xa6, 0x95, 0x7f, - 0x95, 0x56, 0x8e, 0x8e, 0x51, 0x8f, 0x69, 0x88, 0x95, 0xb5, 0x54, 0x66, 0xc1, 0x79, 0x68, 0x16, - 0xdc, 0xf5, 0xda, 0xb4, 0xf7, 0xa6, 0xc0, 0xbd, 0xca, 0xc5, 0xdd, 0x3a, 0xbf, 0xb2, 0x3b, 0x17, - 0x57, 0x76, 0xe7, 0xf2, 0xca, 0xee, 0x7c, 0xcb, 0x6d, 0xe3, 0x3c, 0xb7, 0x8d, 0x8b, 0xdc, 0x36, - 0x2e, 0x73, 0xdb, 0xf8, 0x95, 0xdb, 0xc6, 0xf7, 0xdf, 0x76, 0xe7, 0xfd, 0xe3, 0xda, 0xf1, 0x6f, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x1b, 0xae, 0x44, 0x72, 0xc1, 0x04, 0x00, 0x00, + // 622 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xcd, 0x6e, 0xd3, 0x40, + 0x14, 0x85, 0xe3, 0x86, 0xd0, 0x76, 0xd2, 0x8a, 0xd4, 0x74, 0x61, 0x65, 0x61, 0x47, 0x5d, 0x45, + 0x48, 0x1d, 0xb7, 0xa5, 0xa0, 0x0a, 0x89, 0x05, 0xae, 0xba, 0x40, 0x6a, 0xd4, 0xc8, 0x48, 0x15, + 0x42, 0x2c, 0x98, 0x38, 0x17, 0x77, 0x88, 0x3d, 0x63, 0xcd, 0x8c, 0x03, 0xd9, 0xf1, 0x08, 0xbc, + 0x01, 0x8f, 0xc2, 0xb6, 0xcb, 0x2e, 0xbb, 0xb2, 0xa8, 0x79, 0x8b, 0xae, 0x90, 0x7f, 0x68, 0xdc, + 0xfc, 0x88, 0xee, 0x3c, 0xf7, 0x9e, 0xef, 0xdc, 0x99, 0xeb, 0x83, 0x8e, 0x47, 0x47, 0x12, 0x53, + 0x6e, 0x8f, 0xe2, 0x01, 0x08, 0x06, 0x0a, 0xa4, 0x3d, 0x06, 0x36, 0xe4, 0xc2, 0x2e, 0x1b, 0x24, + 0xa2, 0xb6, 0x54, 0x5c, 0x10, 0x1f, 0xec, 0xf1, 0xfe, 0x00, 0x14, 0xd9, 0xb7, 0x7d, 0x60, 0x20, + 0x88, 0x82, 0x21, 0x8e, 0x04, 0x57, 0x5c, 0x6f, 0x17, 0x5a, 0x4c, 0x22, 0x8a, 0x4b, 0x2d, 0x2e, + 0xb5, 0xed, 0x5d, 0x9f, 0xaa, 0x8b, 0x78, 0x80, 0x3d, 0x1e, 0xda, 0x3e, 0xf7, 0xb9, 0x9d, 0x23, + 0x83, 0xf8, 0x73, 0x7e, 0xca, 0x0f, 0xf9, 0x57, 0x61, 0xd5, 0xde, 0xa9, 0x8c, 0xf5, 0xb8, 0xc8, + 0x66, 0xce, 0x8e, 0x6b, 0x1f, 0x4e, 0x35, 0x21, 0xf1, 0x2e, 0x28, 0x03, 0x31, 0xb1, 0xa3, 0x91, + 0x9f, 0x15, 0xa4, 0x1d, 0x82, 0x22, 0x8b, 0x28, 0x7b, 0x19, 0x25, 0x62, 0xa6, 0x68, 0x08, 0x73, + 0xc0, 0xcb, 0xff, 0x01, 0xd2, 0xbb, 0x80, 0x90, 0xcc, 0x71, 0xcf, 0x97, 0x71, 0xb1, 0xa2, 0x81, + 0x4d, 0x99, 0x92, 0x4a, 0xcc, 0x42, 0x3b, 0x3f, 0x1b, 0x68, 0xe3, 0x5d, 0xb1, 0xba, 0xe3, 0x80, + 0x48, 0xa9, 0x7f, 0x42, 0x6b, 0xd9, 0x4b, 0x86, 0x44, 0x11, 0x43, 0xeb, 0x68, 0xdd, 0xe6, 0xc1, + 0x1e, 0x9e, 0xae, 0xf9, 0xce, 0x18, 0x47, 0x23, 0x3f, 0x2b, 0x48, 0x9c, 0xa9, 0xf1, 0x78, 0x1f, + 0x9f, 0x0d, 0xbe, 0x80, 0xa7, 0x7a, 0xa0, 0x88, 0xa3, 0x5f, 0x26, 0x56, 0x2d, 0x4d, 0x2c, 0x34, + 0xad, 0xb9, 0x77, 0xae, 0xfa, 0x0b, 0xd4, 0x8c, 0x04, 0x1f, 0x53, 0x49, 0x39, 0x03, 0x61, 0xac, + 0x74, 0xb4, 0xee, 0xba, 0xf3, 0xb4, 0x44, 0x9a, 0xfd, 0x69, 0xcb, 0xad, 0xea, 0xf4, 0x00, 0xa1, + 0x88, 0x08, 0x12, 0x82, 0x02, 0x21, 0x8d, 0x7a, 0xa7, 0xde, 0x6d, 0x1e, 0x1c, 0xe1, 0xe5, 0x09, + 0xc0, 0xd5, 0x67, 0xe1, 0xfe, 0x1d, 0x7a, 0xc2, 0x94, 0x98, 0x4c, 0xaf, 0x38, 0x6d, 0xb8, 0x15, + 0x7f, 0x7d, 0x84, 0x36, 0x05, 0x78, 0x01, 0xa1, 0x61, 0x9f, 0x07, 0xd4, 0x9b, 0x18, 0x8f, 0xf2, + 0x6b, 0x9e, 0xa4, 0x89, 0xb5, 0xe9, 0x56, 0x1b, 0xb7, 0x89, 0xb5, 0x37, 0x9f, 0x1d, 0xdc, 0x07, + 0x21, 0xa9, 0x54, 0xc0, 0xd4, 0x39, 0x0f, 0xe2, 0x10, 0xee, 0x31, 0xee, 0x7d, 0x6f, 0xfd, 0x10, + 0x6d, 0x84, 0x3c, 0x66, 0xea, 0x2c, 0x52, 0x94, 0x33, 0x69, 0x34, 0x3a, 0xf5, 0xee, 0xba, 0xd3, + 0x4a, 0x13, 0x6b, 0xa3, 0x57, 0xa9, 0xbb, 0xf7, 0x54, 0xfa, 0x29, 0xda, 0x26, 0x41, 0xc0, 0xbf, + 0x16, 0x03, 0x4e, 0xbe, 0x45, 0x84, 0x65, 0xab, 0x32, 0x1e, 0x77, 0xb4, 0xee, 0x9a, 0x63, 0xa4, + 0x89, 0xb5, 0xfd, 0x66, 0x41, 0xdf, 0x5d, 0x48, 0xe9, 0xef, 0xd1, 0xd6, 0x38, 0x2f, 0x39, 0x94, + 0x0d, 0x29, 0xf3, 0x7b, 0x7c, 0x08, 0xc6, 0x6a, 0xfe, 0xe8, 0x67, 0x69, 0x62, 0x6d, 0x9d, 0xcf, + 0x36, 0x6f, 0x17, 0x15, 0xdd, 0x79, 0x93, 0xf6, 0x6b, 0xf4, 0x64, 0x66, 0xfb, 0x7a, 0x0b, 0xd5, + 0x47, 0x30, 0xc9, 0xf3, 0xb5, 0xee, 0x66, 0x9f, 0xfa, 0x36, 0x6a, 0x8c, 0x49, 0x10, 0x43, 0x11, + 0x07, 0xb7, 0x38, 0xbc, 0x5a, 0x39, 0xd2, 0x76, 0x7e, 0x69, 0xa8, 0x55, 0xfd, 0x95, 0xa7, 0x54, + 0x2a, 0xfd, 0xe3, 0x5c, 0x4a, 0xf1, 0xc3, 0x52, 0x9a, 0xd1, 0x79, 0x46, 0x5b, 0x65, 0x00, 0xd6, + 0xfe, 0x55, 0x2a, 0x09, 0xed, 0xa1, 0x06, 0x55, 0x10, 0x4a, 0x63, 0x25, 0x4f, 0x59, 0xf7, 0xa1, + 0x29, 0x73, 0x36, 0x4b, 0xd3, 0xc6, 0xdb, 0x0c, 0x77, 0x0b, 0x17, 0x67, 0xf7, 0xf2, 0xc6, 0xac, + 0x5d, 0xdd, 0x98, 0xb5, 0xeb, 0x1b, 0xb3, 0xf6, 0x3d, 0x35, 0xb5, 0xcb, 0xd4, 0xd4, 0xae, 0x52, + 0x53, 0xbb, 0x4e, 0x4d, 0xed, 0x77, 0x6a, 0x6a, 0x3f, 0xfe, 0x98, 0xb5, 0x0f, 0xab, 0xa5, 0xe3, + 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x66, 0xe2, 0x8e, 0x84, 0x1b, 0x05, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index f8070b67cf41..b0e030c01b4a 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -63,6 +63,13 @@ message StorageClass { // AllowVolumeExpansion shows whether the storage class allow volume expand // +optional optional bool allowVolumeExpansion = 6; + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is alpha-level and is only honored by servers that enable + // the VolumeScheduling feature. + // +optional + optional string volumeBindingMode = 7; } // StorageClassList is a collection of storage classes. diff --git a/vendor/k8s.io/api/storage/v1beta1/register.go b/vendor/k8s.io/api/storage/v1beta1/register.go index 759ba81aa9b7..7f1f0c8e8359 100644 --- a/vendor/k8s.io/api/storage/v1beta1/register.go +++ b/vendor/k8s.io/api/storage/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &StorageClass{}, diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index e5036b55b335..7fb9ad98077b 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -59,6 +59,13 @@ type StorageClass struct { // AllowVolumeExpansion shows whether the storage class allow volume expand // +optional AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is alpha-level and is only honored by servers that enable + // the VolumeScheduling feature. + // +optional + VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -74,3 +81,18 @@ type StorageClassList struct { // Items is the list of StorageClasses Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// VolumeBindingMode indicates how PersistentVolumeClaims should be bound. +type VolumeBindingMode string + +const ( + // VolumeBindingImmediate indicates that PersistentVolumeClaims should be + // immediately provisioned and bound. This is the default mode. + VolumeBindingImmediate VolumeBindingMode = "Immediate" + + // VolumeBindingWaitForFirstConsumer indicates that PersistentVolumeClaims + // should not be provisioned and bound until the first Pod is created that + // references the PeristentVolumeClaim. The volume provisioning and + // binding will occur during Pod scheduing. + VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer" +) diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index e2148c231f45..85886f7dfb3f 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -35,6 +35,7 @@ var map_StorageClass = map[string]string{ "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", + "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is alpha-level and is only honored by servers that enable the VolumeScheduling feature.", } func (StorageClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index bf1f91a67d77..9d1e79823b01 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -22,32 +22,9 @@ package v1beta1 import ( v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageClass).DeepCopyInto(out.(*StorageClass)) - return nil - }, InType: reflect.TypeOf(&StorageClass{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageClassList).DeepCopyInto(out.(*StorageClassList)) - return nil - }, InType: reflect.TypeOf(&StorageClassList{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageClass) DeepCopyInto(out *StorageClass) { *out = *in @@ -83,6 +60,15 @@ func (in *StorageClass) DeepCopyInto(out *StorageClass) { **out = **in } } + if in.VolumeBindingMode != nil { + in, out := &in.VolumeBindingMode, &out.VolumeBindingMode + if *in == nil { + *out = nil + } else { + *out = new(VolumeBindingMode) + **out = **in + } + } return } diff --git a/vendor/k8s.io/apiextensions-apiserver/LICENSE b/vendor/k8s.io/apiextensions-apiserver/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/BUILD index a7b948801fe4..303d94b93822 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/BUILD @@ -17,9 +17,9 @@ go_library( "types_jsonschema.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], @@ -28,6 +28,7 @@ go_library( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + importpath = "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions", library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"], ) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go index f53f641317ee..c94916a7b8e1 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // Package apiextensions is the internal version of the API. // +groupName=apiextensions.k8s.io diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go index 299ddc22b45d..8dc7f72d6603 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go @@ -16,11 +16,18 @@ limitations under the License. package apiextensions +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + // SetCRDCondition sets the status condition. It either overwrites the existing one or // creates a new one func SetCRDCondition(crd *CustomResourceDefinition, newCondition CustomResourceDefinitionCondition) { existingCondition := FindCRDCondition(crd, newCondition.Type) if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) crd.Status.Conditions = append(crd.Status.Conditions, newCondition) return } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install/BUILD index 33c068fd7396..3a0a056a2830 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["roundtrip_test.go"], + importpath = "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install", library = ":go_default_library", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/fuzzer:go_default_library", @@ -19,6 +20,7 @@ go_test( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/register.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/register.go index df3bc363f3e1..273f7f123b64 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/register.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = SchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &CustomResourceDefinition{}, diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go index fba9c90bc26b..880da65ce956 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go @@ -47,7 +47,7 @@ type CustomResourceDefinitionNames struct { ListKind string } -// ResourceScope is an enum defining the different scopes availabe to a custom resource +// ResourceScope is an enum defining the different scopes available to a custom resource type ResourceScope string const ( diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/BUILD index 93a07cfe7a1d..32f2ab8fed5b 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/BUILD @@ -22,6 +22,7 @@ go_library( "zz_generated.deepcopy.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", @@ -59,6 +60,7 @@ go_test( "conversion_test.go", "marshal_test.go", ], + importpath = "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", library = ":go_default_library", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go index 2aae533f5f50..5ca4e85f3450 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/apiextensions-apiserver/pkg/apis/apiextensions // +k8s:defaulter-gen=TypeMeta diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto index 57a69e60c1b2..7cfc44c4cf12 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -105,7 +105,6 @@ message CustomResourceDefinitionSpec { optional string scope = 4; // Validation describes the validation methods for CustomResources - // This field is alpha-level and should only be sent to servers that enable the CustomResourceValidation feature. // +optional optional CustomResourceValidation validation = 5; } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go index 8ced548fa968..77f849975f87 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go @@ -43,7 +43,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &CustomResourceDefinition{}, diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go index fe61f32151cd..9ac37efe0fb1 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go @@ -29,7 +29,6 @@ type CustomResourceDefinitionSpec struct { // Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced Scope ResourceScope `json:"scope" protobuf:"bytes,4,opt,name=scope,casttype=ResourceScope"` // Validation describes the validation methods for CustomResources - // This field is alpha-level and should only be sent to servers that enable the CustomResourceValidation feature. // +optional Validation *CustomResourceValidation `json:"validation,omitempty" protobuf:"bytes,5,opt,name=validation"` } @@ -49,7 +48,7 @@ type CustomResourceDefinitionNames struct { ListKind string `json:"listKind,omitempty" protobuf:"bytes,5,opt,name=listKind"` } -// ResourceScope is an enum defining the different scopes availabe to a custom resource +// ResourceScope is an enum defining the different scopes available to a custom resource type ResourceScope string const ( diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go index 6f97eb6db634..697f87417fc8 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go @@ -21,76 +21,9 @@ limitations under the License. package v1beta1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomResourceDefinition).DeepCopyInto(out.(*CustomResourceDefinition)) - return nil - }, InType: reflect.TypeOf(&CustomResourceDefinition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomResourceDefinitionCondition).DeepCopyInto(out.(*CustomResourceDefinitionCondition)) - return nil - }, InType: reflect.TypeOf(&CustomResourceDefinitionCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomResourceDefinitionList).DeepCopyInto(out.(*CustomResourceDefinitionList)) - return nil - }, InType: reflect.TypeOf(&CustomResourceDefinitionList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomResourceDefinitionNames).DeepCopyInto(out.(*CustomResourceDefinitionNames)) - return nil - }, InType: reflect.TypeOf(&CustomResourceDefinitionNames{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomResourceDefinitionSpec).DeepCopyInto(out.(*CustomResourceDefinitionSpec)) - return nil - }, InType: reflect.TypeOf(&CustomResourceDefinitionSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomResourceDefinitionStatus).DeepCopyInto(out.(*CustomResourceDefinitionStatus)) - return nil - }, InType: reflect.TypeOf(&CustomResourceDefinitionStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomResourceValidation).DeepCopyInto(out.(*CustomResourceValidation)) - return nil - }, InType: reflect.TypeOf(&CustomResourceValidation{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExternalDocumentation).DeepCopyInto(out.(*ExternalDocumentation)) - return nil - }, InType: reflect.TypeOf(&ExternalDocumentation{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JSON).DeepCopyInto(out.(*JSON)) - return nil - }, InType: reflect.TypeOf(&JSON{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JSONSchemaProps).DeepCopyInto(out.(*JSONSchemaProps)) - return nil - }, InType: reflect.TypeOf(&JSONSchemaProps{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JSONSchemaPropsOrArray).DeepCopyInto(out.(*JSONSchemaPropsOrArray)) - return nil - }, InType: reflect.TypeOf(&JSONSchemaPropsOrArray{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JSONSchemaPropsOrBool).DeepCopyInto(out.(*JSONSchemaPropsOrBool)) - return nil - }, InType: reflect.TypeOf(&JSONSchemaPropsOrBool{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JSONSchemaPropsOrStringArray).DeepCopyInto(out.(*JSONSchemaPropsOrStringArray)) - return nil - }, InType: reflect.TypeOf(&JSONSchemaPropsOrStringArray{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomResourceDefinition) DeepCopyInto(out *CustomResourceDefinition) { *out = *in diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/BUILD index 201115a4ef44..25b1bbd5919c 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation", deps = [ "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", @@ -23,6 +24,7 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation", library = ":go_default_library", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go index ae6f2d79f89d..f7d57c22e8d9 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go @@ -21,72 +21,9 @@ limitations under the License. package apiextensions import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomResourceDefinition).DeepCopyInto(out.(*CustomResourceDefinition)) - return nil - }, InType: reflect.TypeOf(&CustomResourceDefinition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomResourceDefinitionCondition).DeepCopyInto(out.(*CustomResourceDefinitionCondition)) - return nil - }, InType: reflect.TypeOf(&CustomResourceDefinitionCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomResourceDefinitionList).DeepCopyInto(out.(*CustomResourceDefinitionList)) - return nil - }, InType: reflect.TypeOf(&CustomResourceDefinitionList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomResourceDefinitionNames).DeepCopyInto(out.(*CustomResourceDefinitionNames)) - return nil - }, InType: reflect.TypeOf(&CustomResourceDefinitionNames{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomResourceDefinitionSpec).DeepCopyInto(out.(*CustomResourceDefinitionSpec)) - return nil - }, InType: reflect.TypeOf(&CustomResourceDefinitionSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomResourceDefinitionStatus).DeepCopyInto(out.(*CustomResourceDefinitionStatus)) - return nil - }, InType: reflect.TypeOf(&CustomResourceDefinitionStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomResourceValidation).DeepCopyInto(out.(*CustomResourceValidation)) - return nil - }, InType: reflect.TypeOf(&CustomResourceValidation{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExternalDocumentation).DeepCopyInto(out.(*ExternalDocumentation)) - return nil - }, InType: reflect.TypeOf(&ExternalDocumentation{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JSONSchemaProps).DeepCopyInto(out.(*JSONSchemaProps)) - return nil - }, InType: reflect.TypeOf(&JSONSchemaProps{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JSONSchemaPropsOrArray).DeepCopyInto(out.(*JSONSchemaPropsOrArray)) - return nil - }, InType: reflect.TypeOf(&JSONSchemaPropsOrArray{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JSONSchemaPropsOrBool).DeepCopyInto(out.(*JSONSchemaPropsOrBool)) - return nil - }, InType: reflect.TypeOf(&JSONSchemaPropsOrBool{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JSONSchemaPropsOrStringArray).DeepCopyInto(out.(*JSONSchemaPropsOrStringArray)) - return nil - }, InType: reflect.TypeOf(&JSONSchemaPropsOrStringArray{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomResourceDefinition) DeepCopyInto(out *CustomResourceDefinition) { *out = *in diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD index eb29291c7d42..7b5d2d283374 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD @@ -3,6 +3,7 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( @@ -13,6 +14,7 @@ go_library( "customresource_discovery_controller.go", "customresource_handler.go", ], + importpath = "k8s.io/apiextensions-apiserver/pkg/apiserver", deps = [ "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/go-openapi/strfmt:go_default_library", @@ -32,6 +34,7 @@ go_library( "//vendor/k8s.io/apiextensions-apiserver/pkg/controller/status:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", @@ -78,3 +81,11 @@ filegroup( ], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = ["customresource_handler_test.go"], + importpath = "k8s.io/apiextensions-apiserver/pkg/apiserver", + library = ":go_default_library", + deps = ["//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library"], +) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index a64f0260efe6..773a06577265 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -17,7 +17,6 @@ limitations under the License. package apiserver import ( - "bytes" "fmt" "io" "net/http" @@ -31,6 +30,7 @@ import ( "github.com/go-openapi/validate" "github.com/golang/glog" + apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -80,6 +80,11 @@ type crdHandler struct { // crdInfo stores enough information to serve the storage for the custom resource type crdInfo struct { + // spec and acceptedNames are used to compare against if a change is made on a CRD. We only update + // the storage if one of these changes. + spec *apiextensions.CustomResourceDefinitionSpec + acceptedNames *apiextensions.CustomResourceDefinitionNames + storage *customresource.REST requestScope handlers.RequestScope } @@ -109,6 +114,9 @@ func NewCustomResourceDefinitionHandler( crdInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: ret.updateCustomResourceDefinition, + DeleteFunc: func(obj interface{}) { + ret.removeDeadStorage() + }, }) ret.customStorage.Store(crdStorageMap{}) @@ -246,7 +254,7 @@ func (r *crdHandler) removeDeadStorage() { return } - for uid := range storageMap { + for uid, s := range storageMap { found := false for _, crd := range allCustomResourceDefinitions { if crd.UID == uid { @@ -255,6 +263,8 @@ func (r *crdHandler) removeDeadStorage() { } } if !found { + glog.V(4).Infof("Removing dead CRD storage for %v", s.requestScope.Resource) + s.storage.DestroyFunc() delete(storageMap, uid) } } @@ -299,10 +309,9 @@ func (r *crdHandler) getServingInfoFor(crd *apiextensions.CustomResourceDefiniti &metav1.GetOptions{}, &metav1.DeleteOptions{}, ) - parameterScheme.AddGeneratedDeepCopyFuncs(metav1.GetGeneratedDeepCopyFuncs()...) parameterCodec := runtime.NewParameterCodec(parameterScheme) - kind := schema.GroupVersionKind{Group: crd.Spec.Group, Version: crd.Spec.Version, Kind: crd.Spec.Names.Kind} + kind := schema.GroupVersionKind{Group: crd.Spec.Group, Version: crd.Spec.Version, Kind: crd.Status.AcceptedNames.Kind} typer := unstructuredObjectTyper{ delegate: parameterScheme, unstructuredTyper: discovery.NewUnstructuredObjectTyper(nil), @@ -320,9 +329,8 @@ func (r *crdHandler) getServingInfoFor(crd *apiextensions.CustomResourceDefiniti validator := validate.NewSchemaValidator(openapiSchema, nil, "", strfmt.Default) storage := customresource.NewREST( - schema.GroupResource{Group: crd.Spec.Group, Resource: crd.Spec.Names.Plural}, - schema.GroupVersionKind{Group: crd.Spec.Group, Version: crd.Spec.Version, Kind: crd.Spec.Names.ListKind}, - UnstructuredCopier{}, + schema.GroupResource{Group: crd.Spec.Group, Resource: crd.Status.AcceptedNames.Plural}, + schema.GroupVersionKind{Group: crd.Spec.Group, Version: crd.Spec.Version, Kind: crd.Status.AcceptedNames.ListKind}, customresource.NewStrategy( typer, crd.Spec.Scope == apiextensions.NamespaceScoped, @@ -340,6 +348,8 @@ func (r *crdHandler) getServingInfoFor(crd *apiextensions.CustomResourceDefiniti selfLinkPrefix = "/" + path.Join("apis", crd.Spec.Group, crd.Spec.Version, "namespaces") + "/" } + clusterScoped := crd.Spec.Scope == apiextensions.ClusterScoped + requestScope := handlers.RequestScope{ Namer: handlers.ContextBasedNaming{ GetContext: func(req *http.Request) apirequest.Context { @@ -347,7 +357,7 @@ func (r *crdHandler) getServingInfoFor(crd *apiextensions.CustomResourceDefiniti return ret }, SelfLinker: meta.NewAccessor(), - ClusterScoped: crd.Spec.Scope == apiextensions.ClusterScoped, + ClusterScoped: clusterScoped, SelfLinkPathPrefix: selfLinkPrefix, }, ContextFunc: func(req *http.Request) apirequest.Context { @@ -358,14 +368,16 @@ func (r *crdHandler) getServingInfoFor(crd *apiextensions.CustomResourceDefiniti Serializer: unstructuredNegotiatedSerializer{typer: typer, creator: creator}, ParameterCodec: parameterCodec, - Creater: creator, - Convertor: unstructured.UnstructuredObjectConverter{}, + Creater: creator, + Convertor: crdObjectConverter{ + UnstructuredObjectConverter: unstructured.UnstructuredObjectConverter{}, + clusterScoped: clusterScoped, + }, Defaulter: unstructuredDefaulter{parameterScheme}, - Copier: UnstructuredCopier{}, Typer: typer, UnsafeConvertor: unstructured.UnstructuredObjectConverter{}, - Resource: schema.GroupVersionResource{Group: crd.Spec.Group, Version: crd.Spec.Version, Resource: crd.Spec.Names.Plural}, + Resource: schema.GroupVersionResource{Group: crd.Spec.Group, Version: crd.Spec.Version, Resource: crd.Status.AcceptedNames.Plural}, Kind: kind, Subresource: "", @@ -373,6 +385,9 @@ func (r *crdHandler) getServingInfoFor(crd *apiextensions.CustomResourceDefiniti } ret = &crdInfo{ + spec: &crd.Spec, + acceptedNames: &crd.Status.AcceptedNames, + storage: storage, requestScope: requestScope, } @@ -390,23 +405,54 @@ func (r *crdHandler) getServingInfoFor(crd *apiextensions.CustomResourceDefiniti return ret, nil } -func (c *crdHandler) updateCustomResourceDefinition(oldObj, _ interface{}) { +// crdObjectConverter is a converter that supports field selectors for CRDs. +type crdObjectConverter struct { + unstructured.UnstructuredObjectConverter + clusterScoped bool +} + +func (c crdObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + // We currently only support metadata.namespace and metadata.name. + switch { + case label == "metadata.name": + return label, value, nil + case !c.clusterScoped && label == "metadata.namespace": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } +} + +func (c *crdHandler) updateCustomResourceDefinition(oldObj, newObj interface{}) { oldCRD := oldObj.(*apiextensions.CustomResourceDefinition) - glog.V(4).Infof("Updating customresourcedefinition %s", oldCRD.Name) + newCRD := newObj.(*apiextensions.CustomResourceDefinition) c.customStorageLock.Lock() defer c.customStorageLock.Unlock() - storageMap := c.customStorage.Load().(crdStorageMap) + + oldInfo, found := storageMap[newCRD.UID] + if !found { + return + } + if apiequality.Semantic.DeepEqual(&newCRD.Spec, oldInfo.spec) && apiequality.Semantic.DeepEqual(&newCRD.Status.AcceptedNames, oldInfo.acceptedNames) { + glog.V(6).Infof("Ignoring customresourcedefinition %s update because neither spec, nor accepted names changed", oldCRD.Name) + return + } + + glog.V(4).Infof("Updating customresourcedefinition %s", oldCRD.Name) storageMap2 := make(crdStorageMap, len(storageMap)) // Copy because we cannot write to storageMap without a race // as it is used without locking elsewhere for k, v := range storageMap { + if k == oldCRD.UID { + v.storage.DestroyFunc() + continue + } storageMap2[k] = v } - delete(storageMap2, oldCRD.UID) c.customStorage.Store(storageMap2) } @@ -499,28 +545,6 @@ func (c unstructuredCreator) New(kind schema.GroupVersionKind) (runtime.Object, return ret, nil } -type UnstructuredCopier struct{} - -func (UnstructuredCopier) Copy(obj runtime.Object) (runtime.Object, error) { - if _, ok := obj.(runtime.Unstructured); !ok { - // Callers should not use this UnstructuredCopier for things other than Unstructured. - // If they do, the copy they get back will become Unstructured, which can lead to - // difficult-to-debug errors downstream. To make such errors more obvious, - // we explicitly reject anything that isn't Unstructured. - return nil, fmt.Errorf("UnstructuredCopier can't copy type %T", obj) - } - - // serialize and deserialize to ensure a clean copy - buf := &bytes.Buffer{} - err := unstructured.UnstructuredJSONScheme.Encode(obj, buf) - if err != nil { - return nil, err - } - out := &unstructured.Unstructured{} - result, _, err := unstructured.UnstructuredJSONScheme.Decode(buf.Bytes(), nil, out) - return result, err -} - type unstructuredDefaulter struct { delegate runtime.ObjectDefaulter } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/BUILD index b64622758b0a..a85afaef5135 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/BUILD @@ -1,16 +1,15 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( name = "go_default_library", srcs = ["validation.go"], - tags = ["automanaged"], + importpath = "k8s.io/apiextensions-apiserver/pkg/apiserver/validation", deps = [ "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/go-openapi/validate:go_default_library", @@ -30,3 +29,21 @@ filegroup( srcs = [":package-srcs"], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = ["validation_test.go"], + importpath = "k8s.io/apiextensions-apiserver/pkg/apiserver/validation", + library = ":go_default_library", + deps = [ + "//vendor/github.com/go-openapi/spec:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/fuzzer:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/testing/fuzzer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", + ], +) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/BUILD index 494522755ca3..c6312b7301f0 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/BUILD @@ -11,6 +11,7 @@ go_library( "clientset.go", "doc.go", ], + importpath = "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go index 7d72e7fb20a3..7f670fed477b 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated clientset. package clientset diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/BUILD index c37ee3f5c0a2..9906c9c1a13e 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/BUILD @@ -11,6 +11,7 @@ go_library( "doc.go", "register.go", ], + importpath = "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go index 5d8ec824f0fb..3ec2200d0990 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package contains the scheme of the automatically generated clientset. package scheme diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/BUILD index 3cb4e1f6219b..054ceeca81a3 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/BUILD @@ -13,6 +13,7 @@ go_library( "doc.go", "generated_expansion.go", ], + importpath = "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go index 11b523897207..1b50aa199700 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/BUILD index 97024ea635d8..a31718d0518c 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/BUILD @@ -11,6 +11,7 @@ go_library( "clientset.go", "doc.go", ], + importpath = "k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/doc.go index 309c091bfd33..b667dd5157a5 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/doc.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated clientset. package internalclientset diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/BUILD index 040a7fcdc48c..ae5835d47253 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/BUILD @@ -11,6 +11,7 @@ go_library( "doc.go", "register.go", ], + importpath = "k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/doc.go index 5d8ec824f0fb..3ec2200d0990 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/doc.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package contains the scheme of the automatically generated clientset. package scheme diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/BUILD index 75584d5a391e..d92943a88d7a 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/BUILD @@ -13,6 +13,7 @@ go_library( "doc.go", "generated_expansion.go", ], + importpath = "k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/doc.go index 57e5b70cf564..3adf06d8934f 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/doc.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package internalversion diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/BUILD index 94f642a2c990..57f38ce2590c 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/BUILD @@ -11,11 +11,13 @@ go_library( "factory.go", "generic.go", ], + importpath = "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/BUILD index f8613a82dbf4..a0b8cee3dbd2 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/interface.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/interface.go index de7f6e34e65c..e1ad6afcc089 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/interface.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { - return v1beta1.New(g.SharedInformerFactory) + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/BUILD index 69f89a94bfa0..bd2d06784ca8 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/BUILD @@ -11,6 +11,7 @@ go_library( "customresourcedefinition.go", "interface.go", ], + importpath = "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go index e28ec5db662c..cc86c42e957d 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go @@ -38,19 +38,33 @@ type CustomResourceDefinitionInformer interface { } type customResourceDefinitionInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewCustomResourceDefinitionInformer constructs a new informer for CustomResourceDefinition type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewCustomResourceDefinitionInformer(client clientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCustomResourceDefinitionInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCustomResourceDefinitionInformer constructs a new informer for CustomResourceDefinition type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCustomResourceDefinitionInformer(client clientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.ApiextensionsV1beta1().CustomResourceDefinitions().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.ApiextensionsV1beta1().CustomResourceDefinitions().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewCustomResourceDefinitionInformer(client clientset.Interface, resyncPerio ) } -func defaultCustomResourceDefinitionInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewCustomResourceDefinitionInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *customResourceDefinitionInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCustomResourceDefinitionInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *customResourceDefinitionInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&apiextensions_v1beta1.CustomResourceDefinition{}, defaultCustomResourceDefinitionInformer) + return f.factory.InformerFor(&apiextensions_v1beta1.CustomResourceDefinition{}, f.defaultInformer) } func (f *customResourceDefinitionInformer) Lister() v1beta1.CustomResourceDefinitionLister { diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/interface.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/interface.go index cada14028b4a..2a582702655f 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/interface.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // CustomResourceDefinitions returns a CustomResourceDefinitionInformer. func (v *version) CustomResourceDefinitions() CustomResourceDefinitionInformer { - return &customResourceDefinitionInformer{factory: v.SharedInformerFactory} + return &customResourceDefinitionInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/factory.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/factory.go index 0f94a664d927..fbaa2c65d3ca 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/factory.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/factory.go @@ -22,6 +22,7 @@ import ( clientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apiextensions "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions" internalinterfaces "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" @@ -31,9 +32,11 @@ import ( ) type sharedInformerFactory struct { - client clientset.Interface - lock sync.Mutex - defaultResync time.Duration + client clientset.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. @@ -43,8 +46,17 @@ type sharedInformerFactory struct { // NewSharedInformerFactory constructs a new instance of sharedInformerFactory func NewSharedInformerFactory(client clientset.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewFilteredSharedInformerFactory(client, defaultResync, v1.NamespaceAll, nil) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +func NewFilteredSharedInformerFactory(client clientset.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { return &sharedInformerFactory{ client: client, + namespace: namespace, + tweakListOptions: tweakListOptions, defaultResync: defaultResync, informers: make(map[reflect.Type]cache.SharedIndexInformer), startedInformers: make(map[reflect.Type]bool), @@ -114,5 +126,5 @@ type SharedInformerFactory interface { } func (f *sharedInformerFactory) Apiextensions() apiextensions.Interface { - return apiextensions.New(f) + return apiextensions.New(f, f.namespace, f.tweakListOptions) } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/generic.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/generic.go index 06abdaa9de03..059fd57247a7 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/generic.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/generic.go @@ -51,7 +51,7 @@ func (f *genericInformer) Lister() cache.GenericLister { // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { - // Group=Apiextensions, Version=V1beta1 + // Group=apiextensions.k8s.io, Version=v1beta1 case v1beta1.SchemeGroupVersion.WithResource("customresourcedefinitions"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apiextensions().V1beta1().CustomResourceDefinitions().Informer()}, nil diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/BUILD index 3c686edf4735..56bb1f8d8001 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/BUILD @@ -8,8 +8,10 @@ load( go_library( name = "go_default_library", srcs = ["factory_interfaces.go"], + importpath = "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go index 6f7b0a81069d..80b53e77253b 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -20,6 +20,7 @@ package internalinterfaces import ( clientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" time "time" @@ -32,3 +33,5 @@ type SharedInformerFactory interface { Start(stopCh <-chan struct{}) InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer } + +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/BUILD index 3855ac6236ba..ec24afd538f0 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/BUILD @@ -11,11 +11,13 @@ go_library( "factory.go", "generic.go", ], + importpath = "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/BUILD index 3d8440cb1181..b1e3dfa022ab 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/interface.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/interface.go index 8137e94b71ea..e422eb4404bd 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/interface.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // InternalVersion returns a new internalversion.Interface. func (g *group) InternalVersion() internalversion.Interface { - return internalversion.New(g.SharedInformerFactory) + return internalversion.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/BUILD index ba99018061a9..1432233320af 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/BUILD @@ -11,6 +11,7 @@ go_library( "customresourcedefinition.go", "interface.go", ], + importpath = "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/customresourcedefinition.go index 4c14fd1119fa..20c86312598a 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/customresourcedefinition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/customresourcedefinition.go @@ -38,19 +38,33 @@ type CustomResourceDefinitionInformer interface { } type customResourceDefinitionInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewCustomResourceDefinitionInformer constructs a new informer for CustomResourceDefinition type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewCustomResourceDefinitionInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCustomResourceDefinitionInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCustomResourceDefinitionInformer constructs a new informer for CustomResourceDefinition type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCustomResourceDefinitionInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Apiextensions().CustomResourceDefinitions().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Apiextensions().CustomResourceDefinitions().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewCustomResourceDefinitionInformer(client internalclientset.Interface, res ) } -func defaultCustomResourceDefinitionInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewCustomResourceDefinitionInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *customResourceDefinitionInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCustomResourceDefinitionInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *customResourceDefinitionInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&apiextensions.CustomResourceDefinition{}, defaultCustomResourceDefinitionInformer) + return f.factory.InformerFor(&apiextensions.CustomResourceDefinition{}, f.defaultInformer) } func (f *customResourceDefinitionInformer) Lister() internalversion.CustomResourceDefinitionLister { diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/interface.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/interface.go index e9ba1c90a2ee..4183e973d4a0 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/interface.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // CustomResourceDefinitions returns a CustomResourceDefinitionInformer. func (v *version) CustomResourceDefinitions() CustomResourceDefinitionInformer { - return &customResourceDefinitionInformer{factory: v.SharedInformerFactory} + return &customResourceDefinitionInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/factory.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/factory.go index fc5fda9fc48b..ee83d6dfc850 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/factory.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/factory.go @@ -22,6 +22,7 @@ import ( internalclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset" apiextensions "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions" internalinterfaces "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" @@ -31,9 +32,11 @@ import ( ) type sharedInformerFactory struct { - client internalclientset.Interface - lock sync.Mutex - defaultResync time.Duration + client internalclientset.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. @@ -43,8 +46,17 @@ type sharedInformerFactory struct { // NewSharedInformerFactory constructs a new instance of sharedInformerFactory func NewSharedInformerFactory(client internalclientset.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewFilteredSharedInformerFactory(client, defaultResync, v1.NamespaceAll, nil) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +func NewFilteredSharedInformerFactory(client internalclientset.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { return &sharedInformerFactory{ client: client, + namespace: namespace, + tweakListOptions: tweakListOptions, defaultResync: defaultResync, informers: make(map[reflect.Type]cache.SharedIndexInformer), startedInformers: make(map[reflect.Type]bool), @@ -114,5 +126,5 @@ type SharedInformerFactory interface { } func (f *sharedInformerFactory) Apiextensions() apiextensions.Interface { - return apiextensions.New(f) + return apiextensions.New(f, f.namespace, f.tweakListOptions) } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/generic.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/generic.go index a338c57c5392..4a013283ae5c 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/generic.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/generic.go @@ -51,7 +51,7 @@ func (f *genericInformer) Lister() cache.GenericLister { // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { - // Group=Apiextensions, Version=InternalVersion + // Group=apiextensions.k8s.io, Version=internalVersion case apiextensions.SchemeGroupVersion.WithResource("customresourcedefinitions"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apiextensions().InternalVersion().CustomResourceDefinitions().Informer()}, nil diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces/BUILD index deda85345e4f..398d1f2f8e12 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces/BUILD @@ -8,8 +8,10 @@ load( go_library( name = "go_default_library", srcs = ["factory_interfaces.go"], + importpath = "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go index 395b69ff8971..e93f43f50032 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go @@ -20,6 +20,7 @@ package internalinterfaces import ( internalclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" time "time" @@ -32,3 +33,5 @@ type SharedInformerFactory interface { Start(stopCh <-chan struct{}) InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer } + +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion/BUILD index f76dfc119dbc..54732b7f9c85 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion/BUILD @@ -11,10 +11,10 @@ go_library( "customresourcedefinition.go", "expansion_generated.go", ], + importpath = "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion/customresourcedefinition.go index 27bb4cad884a..1bd8480514eb 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion/customresourcedefinition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion/customresourcedefinition.go @@ -21,7 +21,6 @@ package internalversion import ( apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -55,8 +54,7 @@ func (s *customResourceDefinitionLister) List(selector labels.Selector) (ret []* // Get retrieves the CustomResourceDefinition from the index for a given name. func (s *customResourceDefinitionLister) Get(name string) (*apiextensions.CustomResourceDefinition, error) { - key := &apiextensions.CustomResourceDefinition{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/BUILD index 9b69d966d559..f57e6d77fcf8 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/BUILD @@ -11,10 +11,10 @@ go_library( "customresourcedefinition.go", "expansion_generated.go", ], + importpath = "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/customresourcedefinition.go index f9b2a8f1e4b0..316721bd68e9 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/customresourcedefinition.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/customresourcedefinition.go @@ -21,7 +21,6 @@ package v1beta1 import ( v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -55,8 +54,7 @@ func (s *customResourceDefinitionLister) List(selector labels.Selector) (ret []* // Get retrieves the CustomResourceDefinition from the index for a given name. func (s *customResourceDefinitionLister) Get(name string) (*v1beta1.CustomResourceDefinition, error) { - key := &v1beta1.CustomResourceDefinition{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/cmd/server/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/cmd/server/BUILD index b4bbd16bcb5a..a5cd39b43771 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/cmd/server/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/cmd/server/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["start.go"], + importpath = "k8s.io/apiextensions-apiserver/pkg/cmd/server", deps = [ "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/cmd/server/start.go b/vendor/k8s.io/apiextensions-apiserver/pkg/cmd/server/start.go index ccf52cfeb7a8..6b41a2e06de3 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/cmd/server/start.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/cmd/server/start.go @@ -43,7 +43,7 @@ type CustomResourceDefinitionsServerOptions struct { func NewCustomResourceDefinitionsServerOptions(out, errOut io.Writer) *CustomResourceDefinitionsServerOptions { o := &CustomResourceDefinitionsServerOptions{ - RecommendedOptions: genericoptions.NewRecommendedOptions(defaultEtcdPathPrefix, apiserver.Scheme, apiserver.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion)), + RecommendedOptions: genericoptions.NewRecommendedOptions(defaultEtcdPathPrefix, apiserver.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion)), StdOut: out, StdErr: errOut, @@ -121,7 +121,6 @@ func NewCRDRESTOptionsGetter(etcdOptions genericoptions.EtcdOptions) genericregi DeleteCollectionWorkers: etcdOptions.DeleteCollectionWorkers, } ret.StorageConfig.Codec = unstructured.UnstructuredJSONScheme - ret.StorageConfig.Copier = apiserver.UnstructuredCopier{} return ret } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/BUILD index 058f6e256238..452dfefdf5f4 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["crd_finalizer.go"], + importpath = "k8s.io/apiextensions-apiserver/pkg/controller/finalizer", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD index 2737a2efa49c..2abd62ff18d1 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["naming_controller_test.go"], + importpath = "k8s.io/apiextensions-apiserver/pkg/controller/status", library = ":go_default_library", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", @@ -21,6 +22,7 @@ go_test( go_library( name = "go_default_library", srcs = ["naming_controller.go"], + importpath = "k8s.io/apiextensions-apiserver/pkg/controller/status", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", @@ -28,7 +30,6 @@ go_library( "//vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go b/vendor/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go index 271d11adf867..c0a7cc86f9d1 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go @@ -25,7 +25,6 @@ import ( "github.com/golang/glog" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -192,22 +191,20 @@ func (c *NamingConditionController) calculateNamesAndConditions(in *apiextension // set EstablishedCondition to true if all names are accepted. Never set it back to false. establishedCondition := apiextensions.CustomResourceDefinitionCondition{ - Type: apiextensions.Established, - Status: apiextensions.ConditionFalse, - Reason: "NotAccepted", - Message: "not all names are accepted", - LastTransitionTime: metav1.NewTime(time.Now()), + Type: apiextensions.Established, + Status: apiextensions.ConditionFalse, + Reason: "NotAccepted", + Message: "not all names are accepted", } if old := apiextensions.FindCRDCondition(in, apiextensions.Established); old != nil { establishedCondition = *old } if establishedCondition.Status != apiextensions.ConditionTrue && namesAcceptedCondition.Status == apiextensions.ConditionTrue { establishedCondition = apiextensions.CustomResourceDefinitionCondition{ - Type: apiextensions.Established, - Status: apiextensions.ConditionTrue, - Reason: "InitialNamesAccepted", - Message: "the initial names have been accepted", - LastTransitionTime: metav1.NewTime(time.Now()), + Type: apiextensions.Established, + Status: apiextensions.ConditionTrue, + Reason: "InitialNamesAccepted", + Message: "the initial names have been accepted", } } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/features/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/features/BUILD index 3b415effc454..c6438cb40c70 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/features/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/features/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["kube_features.go"], + importpath = "k8s.io/apiextensions-apiserver/pkg/features", deps = ["//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library"], ) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go index fa78ce6ab516..80b37bf7bce7 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go @@ -29,6 +29,7 @@ const ( // owner: @sttts, @nikhita // alpha: v1.8 + // beta: v1.9 // // CustomResourceValidation is a list of validation methods for CustomResources CustomResourceValidation utilfeature.Feature = "CustomResourceValidation" @@ -42,5 +43,5 @@ func init() { // To add a new feature, define a key for it above and add it here. The features will be // available throughout Kubernetes binaries. var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{ - CustomResourceValidation: {Default: false, PreRelease: utilfeature.Alpha}, + CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta}, } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/BUILD index 90eb4120ecfa..84d77bd5bf7e 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/BUILD @@ -11,6 +11,7 @@ go_library( "etcd.go", "strategy.go", ], + importpath = "k8s.io/apiextensions-apiserver/pkg/registry/customresource", deps = [ "//vendor/github.com/go-openapi/validate:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd.go b/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd.go index 5f953b1c699a..0cbf3b1122e4 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd.go @@ -30,9 +30,8 @@ type REST struct { } // NewREST returns a RESTStorage object that will work against API services. -func NewREST(resource schema.GroupResource, listKind schema.GroupVersionKind, copier runtime.ObjectCopier, strategy customResourceDefinitionStorageStrategy, optsGetter generic.RESTOptionsGetter) *REST { +func NewREST(resource schema.GroupResource, listKind schema.GroupVersionKind, strategy customResourceDefinitionStorageStrategy, optsGetter generic.RESTOptionsGetter) *REST { store := &genericregistry.Store{ - Copier: copier, NewFunc: func() runtime.Object { return &unstructured.Unstructured{} }, NewListFunc: func() runtime.Object { // lists are never stored, only manufactured, so stomp in the right kind diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/strategy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/strategy.go index 18c6c2b74543..f001dc57a0fc 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/strategy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/strategy.go @@ -135,7 +135,7 @@ func (a customResourceValidator) Validate(ctx genericapirequest.Context, obj run return field.ErrorList{field.Invalid(field.NewPath("kind"), typeAccessor.GetKind(), fmt.Sprintf("must be %v", a.kind.Kind))} } if typeAccessor.GetAPIVersion() != a.kind.Group+"/"+a.kind.Version { - return field.ErrorList{field.Invalid(field.NewPath("apiVersion"), typeAccessor.GetKind(), fmt.Sprintf("must be %v", a.kind.Group+"/"+a.kind.Version))} + return field.ErrorList{field.Invalid(field.NewPath("apiVersion"), typeAccessor.GetAPIVersion(), fmt.Sprintf("must be %v", a.kind.Group+"/"+a.kind.Version))} } customResourceObject, ok := obj.(*unstructured.Unstructured) @@ -169,7 +169,7 @@ func (a customResourceValidator) ValidateUpdate(ctx genericapirequest.Context, o return field.ErrorList{field.Invalid(field.NewPath("kind"), typeAccessor.GetKind(), fmt.Sprintf("must be %v", a.kind.Kind))} } if typeAccessor.GetAPIVersion() != a.kind.Group+"/"+a.kind.Version { - return field.ErrorList{field.Invalid(field.NewPath("apiVersion"), typeAccessor.GetKind(), fmt.Sprintf("must be %v", a.kind.Group+"/"+a.kind.Version))} + return field.ErrorList{field.Invalid(field.NewPath("apiVersion"), typeAccessor.GetAPIVersion(), fmt.Sprintf("must be %v", a.kind.Group+"/"+a.kind.Version))} } customResourceObject, ok := obj.(*unstructured.Unstructured) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition/BUILD b/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition/BUILD index 5a433a3e0abb..2a77d83b3252 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition/BUILD +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition/BUILD @@ -11,6 +11,7 @@ go_library( "etcd.go", "strategy.go", ], + importpath = "k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation:go_default_library", diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition/etcd.go b/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition/etcd.go index 901406bfef24..85d981f1a3b2 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition/etcd.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition/etcd.go @@ -41,7 +41,6 @@ func NewREST(scheme *runtime.Scheme, optsGetter generic.RESTOptionsGetter) *REST strategy := NewStrategy(scheme) store := &genericregistry.Store{ - Copier: scheme, NewFunc: func() runtime.Object { return &apiextensions.CustomResourceDefinition{} }, NewListFunc: func() runtime.Object { return &apiextensions.CustomResourceDefinitionList{} }, PredicateFunc: MatchCustomResourceDefinition, @@ -168,6 +167,6 @@ func (r *StatusREST) New() runtime.Object { } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } diff --git a/vendor/k8s.io/apimachinery/LICENSE b/vendor/k8s.io/apimachinery/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/k8s.io/apimachinery/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apimachinery/pkg/api/equality/BUILD b/vendor/k8s.io/apimachinery/pkg/api/equality/BUILD index f4282535715d..75051f35b0b6 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/equality/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/api/equality/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["semantic.go"], + importpath = "k8s.io/apimachinery/pkg/api/equality", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/BUILD b/vendor/k8s.io/apimachinery/pkg/api/errors/BUILD index bae266c4b12f..80e205320ade 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["errors_test.go"], + importpath = "k8s.io/apimachinery/pkg/api/errors", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -24,6 +25,7 @@ go_library( "doc.go", "errors.go", ], + importpath = "k8s.io/apimachinery/pkg/api/errors", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS index ff82254f8312..af32c1fdf758 100755 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -20,7 +20,6 @@ reviewers: - dims - hongchaodeng - krousey -- satnam6502 - cjcullen - david-mcmahon - goltermann diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index d5503fac5d98..9960600be337 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -405,84 +405,84 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr // IsNotFound returns true if the specified error was created by NewNotFound. func IsNotFound(err error) bool { - return reasonForError(err) == metav1.StatusReasonNotFound + return ReasonForError(err) == metav1.StatusReasonNotFound } // IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists. func IsAlreadyExists(err error) bool { - return reasonForError(err) == metav1.StatusReasonAlreadyExists + return ReasonForError(err) == metav1.StatusReasonAlreadyExists } // IsConflict determines if the err is an error which indicates the provided update conflicts. func IsConflict(err error) bool { - return reasonForError(err) == metav1.StatusReasonConflict + return ReasonForError(err) == metav1.StatusReasonConflict } // IsInvalid determines if the err is an error which indicates the provided resource is not valid. func IsInvalid(err error) bool { - return reasonForError(err) == metav1.StatusReasonInvalid + return ReasonForError(err) == metav1.StatusReasonInvalid } // IsGone is true if the error indicates the requested resource is no longer available. func IsGone(err error) bool { - return reasonForError(err) == metav1.StatusReasonGone + return ReasonForError(err) == metav1.StatusReasonGone } // IsResourceExpired is true if the error indicates the resource has expired and the current action is // no longer possible. func IsResourceExpired(err error) bool { - return reasonForError(err) == metav1.StatusReasonExpired + return ReasonForError(err) == metav1.StatusReasonExpired } // IsMethodNotSupported determines if the err is an error which indicates the provided action could not // be performed because it is not supported by the server. func IsMethodNotSupported(err error) bool { - return reasonForError(err) == metav1.StatusReasonMethodNotAllowed + return ReasonForError(err) == metav1.StatusReasonMethodNotAllowed } // IsServiceUnavailable is true if the error indicates the underlying service is no longer available. func IsServiceUnavailable(err error) bool { - return reasonForError(err) == metav1.StatusReasonServiceUnavailable + return ReasonForError(err) == metav1.StatusReasonServiceUnavailable } // IsBadRequest determines if err is an error which indicates that the request is invalid. func IsBadRequest(err error) bool { - return reasonForError(err) == metav1.StatusReasonBadRequest + return ReasonForError(err) == metav1.StatusReasonBadRequest } // IsUnauthorized determines if err is an error which indicates that the request is unauthorized and // requires authentication by the user. func IsUnauthorized(err error) bool { - return reasonForError(err) == metav1.StatusReasonUnauthorized + return ReasonForError(err) == metav1.StatusReasonUnauthorized } // IsForbidden determines if err is an error which indicates that the request is forbidden and cannot // be completed as requested. func IsForbidden(err error) bool { - return reasonForError(err) == metav1.StatusReasonForbidden + return ReasonForError(err) == metav1.StatusReasonForbidden } // IsTimeout determines if err is an error which indicates that request times out due to long // processing. func IsTimeout(err error) bool { - return reasonForError(err) == metav1.StatusReasonTimeout + return ReasonForError(err) == metav1.StatusReasonTimeout } // IsServerTimeout determines if err is an error which indicates that the request needs to be retried // by the client. func IsServerTimeout(err error) bool { - return reasonForError(err) == metav1.StatusReasonServerTimeout + return ReasonForError(err) == metav1.StatusReasonServerTimeout } // IsInternalError determines if err is an error which indicates an internal server error. func IsInternalError(err error) bool { - return reasonForError(err) == metav1.StatusReasonInternalError + return ReasonForError(err) == metav1.StatusReasonInternalError } // IsTooManyRequests determines if err is an error which indicates that there are too many requests // that the server cannot handle. func IsTooManyRequests(err error) bool { - if reasonForError(err) == metav1.StatusReasonTooManyRequests { + if ReasonForError(err) == metav1.StatusReasonTooManyRequests { return true } switch t := err.(type) { @@ -536,7 +536,8 @@ func SuggestsClientDelay(err error) (int, bool) { return 0, false } -func reasonForError(err error) metav1.StatusReason { +// ReasonForError returns the HTTP status for a particular error. +func ReasonForError(err error) metav1.StatusReason { switch t := err.(type) { case APIStatus: return t.Status().Reason diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD b/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD index f7efd8e8e88a..f42cd77fbecc 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD @@ -14,6 +14,7 @@ go_test( "priority_test.go", "restmapper_test.go", ], + importpath = "k8s.io/apimachinery/pkg/api/meta", library = ":go_default_library", deps = [ "//vendor/github.com/google/gofuzz:go_default_library", @@ -33,12 +34,14 @@ go_library( "firsthit_restmapper.go", "help.go", "interfaces.go", + "lazy.go", "meta.go", "multirestmapper.go", "priority.go", "restmapper.go", "unstructured.go", ], + importpath = "k8s.io/apimachinery/pkg/api/meta", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go index bf1723693a21..5dc9d89e67f1 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go @@ -75,6 +75,9 @@ type MetadataAccessor interface { Annotations(obj runtime.Object) (map[string]string, error) SetAnnotations(obj runtime.Object, annotations map[string]string) error + Continue(obj runtime.Object) (string, error) + SetContinue(obj runtime.Object, c string) error + runtime.ResourceVersioner } diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go new file mode 100644 index 000000000000..7f92f39a43f0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go @@ -0,0 +1,121 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// lazyObject defers loading the mapper and typer until necessary. +type lazyObject struct { + loader func() (RESTMapper, runtime.ObjectTyper, error) + + lock sync.Mutex + loaded bool + err error + mapper RESTMapper + typer runtime.ObjectTyper +} + +// NewLazyObjectLoader handles unrecoverable errors when creating a RESTMapper / ObjectTyper by +// returning those initialization errors when the interface methods are invoked. This defers the +// initialization and any server calls until a client actually needs to perform the action. +func NewLazyObjectLoader(fn func() (RESTMapper, runtime.ObjectTyper, error)) (RESTMapper, runtime.ObjectTyper) { + obj := &lazyObject{loader: fn} + return obj, obj +} + +// init lazily loads the mapper and typer, returning an error if initialization has failed. +func (o *lazyObject) init() error { + o.lock.Lock() + defer o.lock.Unlock() + if o.loaded { + return o.err + } + o.mapper, o.typer, o.err = o.loader() + o.loaded = true + return o.err +} + +var _ RESTMapper = &lazyObject{} +var _ runtime.ObjectTyper = &lazyObject{} + +func (o *lazyObject) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + if err := o.init(); err != nil { + return schema.GroupVersionKind{}, err + } + return o.mapper.KindFor(resource) +} + +func (o *lazyObject) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + if err := o.init(); err != nil { + return []schema.GroupVersionKind{}, err + } + return o.mapper.KindsFor(resource) +} + +func (o *lazyObject) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + if err := o.init(); err != nil { + return schema.GroupVersionResource{}, err + } + return o.mapper.ResourceFor(input) +} + +func (o *lazyObject) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + if err := o.init(); err != nil { + return []schema.GroupVersionResource{}, err + } + return o.mapper.ResourcesFor(input) +} + +func (o *lazyObject) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { + if err := o.init(); err != nil { + return nil, err + } + return o.mapper.RESTMapping(gk, versions...) +} + +func (o *lazyObject) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { + if err := o.init(); err != nil { + return nil, err + } + return o.mapper.RESTMappings(gk, versions...) +} + +func (o *lazyObject) ResourceSingularizer(resource string) (singular string, err error) { + if err := o.init(); err != nil { + return "", err + } + return o.mapper.ResourceSingularizer(resource) +} + +func (o *lazyObject) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) { + if err := o.init(); err != nil { + return nil, false, err + } + return o.typer.ObjectKinds(obj) +} + +func (o *lazyObject) Recognizes(gvk schema.GroupVersionKind) bool { + if err := o.init(); err != nil { + return false + } + return o.typer.Recognizes(gvk) +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go index 1889b951262f..c2d51b43c73b 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -89,7 +89,7 @@ func ListAccessor(obj interface{}) (List, error) { } return nil, errNotList default: - panic(fmt.Errorf("%T does not implement the List interface", obj)) + return nil, errNotList } } @@ -367,6 +367,23 @@ func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) e return nil } +func (resourceAccessor) Continue(obj runtime.Object) (string, error) { + accessor, err := ListAccessor(obj) + if err != nil { + return "", err + } + return accessor.GetContinue(), nil +} + +func (resourceAccessor) SetContinue(obj runtime.Object, version string) error { + accessor, err := ListAccessor(obj) + if err != nil { + return err + } + accessor.SetContinue(version) + return nil +} + // extractFromOwnerReference extracts v to o. v is the OwnerReferences field of an object. func extractFromOwnerReference(v reflect.Value, o *metav1.OwnerReference) error { if err := runtime.Field(v, "APIVersion", &o.APIVersion); err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go b/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go index 3ebf2481573d..4e13efea3c3e 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go @@ -21,8 +21,24 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) +// InterfacesForUnstructuredConversion returns VersionInterfaces suitable for +// dealing with unstructured.Unstructured objects and supports conversion +// from typed objects (provided by parent) to untyped objects. +func InterfacesForUnstructuredConversion(parent VersionInterfacesFunc) VersionInterfacesFunc { + return func(version schema.GroupVersion) (*VersionInterfaces, error) { + if i, err := parent(version); err == nil { + return &VersionInterfaces{ + ObjectConvertor: i.ObjectConvertor, + MetadataAccessor: NewAccessor(), + }, nil + } + return InterfacesForUnstructured(version) + } +} + // InterfacesForUnstructured returns VersionInterfaces suitable for -// dealing with unstructured.Unstructured objects. +// dealing with unstructured.Unstructured objects. It will return errors for +// other conversions. func InterfacesForUnstructured(schema.GroupVersion) (*VersionInterfaces, error) { return &VersionInterfaces{ ObjectConvertor: &unstructured.UnstructuredObjectConverter{}, diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/BUILD b/vendor/k8s.io/apimachinery/pkg/api/resource/BUILD index 840c575e747f..1fb88704e32e 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/BUILD @@ -15,6 +15,7 @@ go_test( "quantity_test.go", "scale_int_test.go", ], + importpath = "k8s.io/apimachinery/pkg/api/resource", library = ":go_default_library", deps = [ "//vendor/github.com/google/gofuzz:go_default_library", @@ -33,7 +34,9 @@ go_library( "quantity_proto.go", "scale_int.go", "suffix.go", + "zz_generated.deepcopy.go", ], + importpath = "k8s.io/apimachinery/pkg/api/resource", deps = [ "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", @@ -46,6 +49,7 @@ go_library( go_test( name = "go_default_xtest", srcs = ["quantity_example_test.go"], + importpath = "k8s.io/apimachinery/pkg/api/resource_test", deps = ["//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library"], ) diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index 608299da4d7a..091d11bdba14 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -87,6 +87,7 @@ option go_package = "resource"; // +protobuf.embed=string // +protobuf.options.marshal=false // +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen=true // +k8s:openapi-gen=true message Quantity { optional string string = 1; diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 1839d78a34b5..682ee9aa646a 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -93,6 +93,7 @@ import ( // +protobuf.embed=string // +protobuf.options.marshal=false // +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen=true // +k8s:openapi-gen=true type Quantity struct { // i is the quantity in int64 scaled form, if d.Dec == nil @@ -415,7 +416,7 @@ func (_ Quantity) OpenAPIDefinition() openapi.OpenAPIDefinition { // Note about BinarySI: // * If q.Format is set to BinarySI and q.Amount represents a non-zero value between // -1 and +1, it will be emitted as if q.Format were DecimalSI. -// * Otherwise, if q.Format is set to BinarySI, frational parts of q.Amount will be +// * Otherwise, if q.Format is set to BinarySI, fractional parts of q.Amount will be // rounded up. (1.1i becomes 2i.) func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) { if q.IsZero() { diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go new file mode 100644 index 000000000000..eb49f819943c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go @@ -0,0 +1,27 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package resource + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Quantity) DeepCopyInto(out *Quantity) { + *out = in.DeepCopy() + return +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/BUILD b/vendor/k8s.io/apimachinery/pkg/api/validation/BUILD index 9de7826bc805..8546d1a5932b 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/validation/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["objectmeta_test.go"], + importpath = "k8s.io/apimachinery/pkg/api/validation", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -23,6 +24,7 @@ go_library( "generic.go", "objectmeta.go", ], + importpath = "k8s.io/apimachinery/pkg/api/validation", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/path/BUILD b/vendor/k8s.io/apimachinery/pkg/api/validation/path/BUILD index 1aff05d0513e..e50632c50922 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/validation/path/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/path/BUILD @@ -9,12 +9,14 @@ load( go_test( name = "go_default_test", srcs = ["name_test.go"], + importpath = "k8s.io/apimachinery/pkg/api/validation/path", library = ":go_default_library", ) go_library( name = "go_default_library", srcs = ["name.go"], + importpath = "k8s.io/apimachinery/pkg/api/validation/path", ) filegroup( diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/BUILD b/vendor/k8s.io/apimachinery/pkg/apimachinery/BUILD index fc983842e1bc..2c6753d141e6 100644 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["types_test.go"], + importpath = "k8s.io/apimachinery/pkg/apimachinery", library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library"], ) @@ -19,6 +20,7 @@ go_library( "doc.go", "types.go", ], + importpath = "k8s.io/apimachinery/pkg/apimachinery", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/BUILD b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/BUILD index 466f86e1fdf5..314ddcad227d 100644 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["announced_test.go"], + importpath = "k8s.io/apimachinery/pkg/apimachinery/announced", library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library"], ) @@ -19,6 +20,7 @@ go_library( "announced.go", "group_factory.go", ], + importpath = "k8s.io/apimachinery/pkg/apimachinery/announced", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/BUILD b/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/BUILD index e4c6856471d1..8982b562911d 100644 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["registered_test.go"], + importpath = "k8s.io/apimachinery/pkg/apimachinery/registered", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apimachinery:go_default_library", @@ -19,6 +20,7 @@ go_test( go_library( name = "go_default_library", srcs = ["registered.go"], + importpath = "k8s.io/apimachinery/pkg/apimachinery/registered", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go index 2d5946f68486..e4c6cf40d920 100644 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package to keep track of API Versions that can be registered and are enabled in api.Scheme. +// Package to keep track of API Versions that can be registered and are enabled in a Scheme. package registered import ( diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go index 213e34bc00e3..baca784fad12 100644 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go @@ -38,7 +38,7 @@ type GroupMeta struct { // to go through the InterfacesFor method below. SelfLinker runtime.SelfLinker - // RESTMapper provides the default mapping between REST paths and the objects declared in api.Scheme and all known + // RESTMapper provides the default mapping between REST paths and the objects declared in a Scheme and all known // versions. RESTMapper meta.RESTMapper diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD index 5caf075fa2fc..636707fe2dc5 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD @@ -12,6 +12,7 @@ go_test( "register_test.go", "roundtrip_test.go", ], + importpath = "k8s.io/apimachinery/pkg/apis/meta/internalversion", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/testing/roundtrip:go_default_library", @@ -31,6 +32,7 @@ go_library( "zz_generated.conversion.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/apimachinery/pkg/apis/meta/internalversion", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go index 4d45f81e39b9..db79fb0cb5e5 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go @@ -38,9 +38,6 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Copier exposes copying on this scheme. -var Copier runtime.ObjectCopier = scheme - // Codecs provides access to encoding and decoding for the scheme. var Codecs = serializer.NewCodecFactory(scheme) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go index f13ff9fc50de..deaf5309d639 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -21,27 +21,9 @@ limitations under the License. package internalversion import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*List).DeepCopyInto(out.(*List)) - return nil - }, InType: reflect.TypeOf(&List{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ListOptions).DeepCopyInto(out.(*ListOptions)) - return nil - }, InType: reflect.TypeOf(&ListOptions{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *List) DeepCopyInto(out *List) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD index 12fdf76a676a..4a96c3f948c5 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD @@ -18,6 +18,7 @@ go_test( "time_test.go", "types_test.go", ], + importpath = "k8s.io/apimachinery/pkg/apis/meta/v1", library = ":go_default_library", deps = [ "//vendor/github.com/ghodss/yaml:go_default_library", @@ -50,6 +51,7 @@ go_library( "zz_generated.deepcopy.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/apimachinery/pkg/apis/meta/v1", deps = [ "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", @@ -91,3 +93,13 @@ filegroup( srcs = ["generated.proto"], visibility = ["//visibility:public"], ) + +go_test( + name = "go_default_xtest", + srcs = ["conversion_test.go"], + importpath = "k8s.io/apimachinery/pkg/apis/meta/v1_test", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go index 7049c9a33bac..c62f853351ed 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -38,6 +38,7 @@ func AddConversionFuncs(scheme *runtime.Scheme) error { Convert_intstr_IntOrString_To_intstr_IntOrString, Convert_unversioned_Time_To_unversioned_Time, + Convert_unversioned_MicroTime_To_unversioned_MicroTime, Convert_Pointer_v1_Duration_To_v1_Duration, Convert_v1_Duration_To_Pointer_v1_Duration, @@ -199,6 +200,12 @@ func Convert_v1_Duration_To_Pointer_v1_Duration(in *Duration, out **Duration, s return nil } +func Convert_unversioned_MicroTime_To_unversioned_MicroTime(in *MicroTime, out *MicroTime, s conversion.Scope) error { + // Cannot deep copy these, because time.Time has unexported fields. + *out = *in + return nil +} + // Convert_Slice_string_To_unversioned_Time allows converting a URL query parameter value func Convert_Slice_string_To_unversioned_Time(input *[]string, out *Time, s conversion.Scope) error { str := "" @@ -252,7 +259,6 @@ func Convert_map_to_unversioned_LabelSelector(in *map[string]string, out *LabelS if in == nil { return nil } - out = new(LabelSelector) for labelKey, labelValue := range *in { AddLabelToSelector(out, labelKey, labelValue) } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 653b30237b9c..b8218469f40f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -50,6 +50,7 @@ limitations under the License. MicroTime ObjectMeta OwnerReference + Patch Preconditions RootPaths ServerAddressByClientCIDR @@ -196,51 +197,55 @@ func (m *OwnerReference) Reset() { *m = OwnerReference{} } func (*OwnerReference) ProtoMessage() {} func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (m *Patch) Reset() { *m = Patch{} } +func (*Patch) ProtoMessage() {} +func (*Patch) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *RootPaths) Reset() { *m = RootPaths{} } func (*RootPaths) ProtoMessage() {} -func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{27} + return fileDescriptorGenerated, []int{28} } func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *StatusCause) Reset() { *m = StatusCause{} } func (*StatusCause) ProtoMessage() {} -func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *StatusDetails) Reset() { *m = StatusDetails{} } func (*StatusDetails) ProtoMessage() {} -func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *Time) Reset() { *m = Time{} } func (*Time) ProtoMessage() {} -func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *Timestamp) Reset() { *m = Timestamp{} } func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *TypeMeta) Reset() { *m = TypeMeta{} } func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *Verbs) Reset() { *m = Verbs{} } func (*Verbs) ProtoMessage() {} -func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } func (m *WatchEvent) Reset() { *m = WatchEvent{} } func (*WatchEvent) ProtoMessage() {} -func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func init() { proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") @@ -268,6 +273,7 @@ func init() { proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime") proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta") proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference") + proto.RegisterType((*Patch)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Patch") proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions") proto.RegisterType((*RootPaths)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.RootPaths") proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR") @@ -1317,6 +1323,24 @@ func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *Patch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Patch) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + func (m *Preconditions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2065,6 +2089,12 @@ func (m *OwnerReference) Size() (n int) { return n } +func (m *Patch) Size() (n int) { + var l int + _ = l + return n +} + func (m *Preconditions) Size() (n int) { var l int _ = l @@ -2464,6 +2494,15 @@ func (this *OwnerReference) String() string { }, "") return s } +func (this *Patch) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Patch{`, + `}`, + }, "") + return s +} func (this *Preconditions) String() string { if this == nil { return "nil" @@ -6382,6 +6421,56 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } return nil } +func (m *Patch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Patch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Patch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Preconditions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -7715,7 +7804,7 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 2428 bytes of a gzipped FileDescriptorProto + // 2435 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4d, 0x6c, 0x23, 0x49, 0x15, 0x4e, 0xdb, 0xb1, 0x63, 0x3f, 0xc7, 0xf9, 0xa9, 0xcd, 0x80, 0x37, 0x02, 0x3b, 0xdb, 0x8b, 0x56, 0x59, 0x98, 0xb5, 0x49, 0x16, 0x56, 0xc3, 0x00, 0x03, 0xe9, 0x38, 0x33, 0x8a, 0x76, 0x32, @@ -7723,7 +7812,7 @@ var fileDescriptorGenerated = []byte{ 0x33, 0x09, 0x1c, 0xd8, 0x03, 0x48, 0x1c, 0x10, 0x9a, 0x23, 0x27, 0xb4, 0x23, 0xb8, 0x70, 0xe5, 0xc4, 0x05, 0x4e, 0x48, 0xcc, 0x71, 0x24, 0x2e, 0x7b, 0x40, 0xd6, 0x8e, 0xf7, 0xc0, 0x09, 0x71, 0xcf, 0x09, 0x55, 0x75, 0xf5, 0x9f, 0x1d, 0x4f, 0xda, 0x3b, 0x0b, 0xe2, 0x14, 0xf7, 0xfb, 0xf9, - 0xde, 0xab, 0xaa, 0xf7, 0x5e, 0xbd, 0x7a, 0x81, 0xbd, 0xe3, 0x6b, 0xac, 0x6e, 0x7b, 0x8d, 0xe3, + 0xde, 0xab, 0x57, 0xaf, 0x5e, 0xbd, 0x7a, 0x81, 0xbd, 0xe3, 0x6b, 0xac, 0x6e, 0x7b, 0x8d, 0xe3, 0xfe, 0x01, 0xa1, 0x2e, 0xe1, 0x84, 0x35, 0x4e, 0x88, 0xdb, 0xf6, 0x68, 0x43, 0x31, 0xcc, 0x9e, 0xdd, 0x35, 0xad, 0x23, 0xdb, 0x25, 0xf4, 0xac, 0xd1, 0x3b, 0xee, 0x08, 0x02, 0x6b, 0x74, 0x09, 0x37, 0x1b, 0x27, 0x1b, 0x8d, 0x0e, 0x71, 0x09, 0x35, 0x39, 0x69, 0xd7, 0x7b, 0xd4, 0xe3, 0x1e, @@ -7735,137 +7824,138 @@ var fileDescriptorGenerated = []byte{ 0xff, 0x96, 0x85, 0xc2, 0x56, 0x6b, 0xf7, 0x16, 0xf5, 0xfa, 0x3d, 0xb4, 0x06, 0xb3, 0xae, 0xd9, 0x25, 0x15, 0x6d, 0x4d, 0x5b, 0x2f, 0x1a, 0xf3, 0x4f, 0x07, 0xb5, 0x99, 0xe1, 0xa0, 0x36, 0x7b, 0xc7, 0xec, 0x12, 0x2c, 0x39, 0xc8, 0x81, 0xc2, 0x09, 0xa1, 0xcc, 0xf6, 0x5c, 0x56, 0xc9, 0xac, - 0x65, 0xd7, 0x4b, 0x9b, 0x37, 0xea, 0x69, 0x36, 0xad, 0x2e, 0x0d, 0xdc, 0xf7, 0x55, 0x6f, 0x7a, - 0xb4, 0x69, 0x33, 0xcb, 0x3b, 0x21, 0xf4, 0xcc, 0x58, 0x52, 0x56, 0x0a, 0x8a, 0xc9, 0x70, 0x68, - 0x01, 0xfd, 0x5c, 0x83, 0xa5, 0x1e, 0x25, 0x87, 0x84, 0x52, 0xd2, 0x56, 0xfc, 0x4a, 0x76, 0x4d, - 0xfb, 0x0c, 0xcc, 0x56, 0x94, 0xd9, 0xa5, 0xd6, 0x08, 0x3e, 0x1e, 0xb3, 0x88, 0x7e, 0xa7, 0xc1, - 0x2a, 0x23, 0xf4, 0x84, 0xd0, 0xad, 0x76, 0x9b, 0x12, 0xc6, 0x8c, 0xb3, 0x6d, 0xc7, 0x26, 0x2e, - 0xdf, 0xde, 0x6d, 0x62, 0x56, 0x99, 0x95, 0xfb, 0xf0, 0x9d, 0x74, 0x0e, 0xed, 0x4f, 0xc2, 0x31, - 0x74, 0xe5, 0xd1, 0xea, 0x44, 0x11, 0x86, 0x5f, 0xe0, 0x86, 0x7e, 0x08, 0xf3, 0xc1, 0x41, 0xde, - 0xb6, 0x19, 0x47, 0xf7, 0x21, 0xdf, 0x11, 0x1f, 0xac, 0xa2, 0x49, 0x07, 0xeb, 0xe9, 0x1c, 0x0c, - 0x30, 0x8c, 0x05, 0xe5, 0x4f, 0x5e, 0x7e, 0x32, 0xac, 0xd0, 0xf4, 0x3f, 0x67, 0xa1, 0xb4, 0xd5, - 0xda, 0xc5, 0x84, 0x79, 0x7d, 0x6a, 0x91, 0x14, 0x41, 0xb3, 0x09, 0x20, 0xfe, 0xb2, 0x9e, 0x69, - 0x91, 0x76, 0x25, 0xb3, 0xa6, 0xad, 0x17, 0x0c, 0xa4, 0xe4, 0xe0, 0x4e, 0xc8, 0xc1, 0x31, 0x29, - 0x81, 0x7a, 0x6c, 0xbb, 0x6d, 0x79, 0xda, 0x31, 0xd4, 0x77, 0x6d, 0xb7, 0x8d, 0x25, 0x07, 0xdd, - 0x86, 0xdc, 0x09, 0xa1, 0x07, 0x62, 0xff, 0x45, 0x40, 0x7c, 0x25, 0xdd, 0xf2, 0xee, 0x0b, 0x15, - 0xa3, 0x38, 0x1c, 0xd4, 0x72, 0xf2, 0x27, 0xf6, 0x41, 0x50, 0x1d, 0x80, 0x1d, 0x79, 0x94, 0x4b, - 0x77, 0x2a, 0xb9, 0xb5, 0xec, 0x7a, 0xd1, 0x58, 0x10, 0xfe, 0xed, 0x87, 0x54, 0x1c, 0x93, 0x40, - 0xd7, 0x60, 0x9e, 0xd9, 0x6e, 0xa7, 0xef, 0x98, 0x54, 0x10, 0x2a, 0x79, 0xe9, 0xe7, 0x8a, 0xf2, - 0x73, 0x7e, 0x3f, 0xc6, 0xc3, 0x09, 0x49, 0x61, 0xc9, 0x32, 0x39, 0xe9, 0x78, 0xd4, 0x26, 0xac, - 0x32, 0x17, 0x59, 0xda, 0x0e, 0xa9, 0x38, 0x26, 0x81, 0x5e, 0x87, 0x9c, 0xdc, 0xf9, 0x4a, 0x41, - 0x9a, 0x28, 0x2b, 0x13, 0x39, 0x79, 0x2c, 0xd8, 0xe7, 0xa1, 0x37, 0x61, 0x4e, 0x65, 0x4d, 0xa5, - 0x28, 0xc5, 0x16, 0x95, 0xd8, 0x5c, 0x10, 0xd6, 0x01, 0x5f, 0xff, 0xa3, 0x06, 0x8b, 0xb1, 0xf3, - 0x93, 0xb1, 0x72, 0x0d, 0xe6, 0x3b, 0xb1, 0x4c, 0x51, 0x67, 0x19, 0xae, 0x26, 0x9e, 0x45, 0x38, - 0x21, 0x89, 0x08, 0x14, 0xa9, 0x42, 0x0a, 0x2a, 0xc2, 0x46, 0xea, 0x40, 0x0b, 0x7c, 0x88, 0x2c, - 0xc5, 0x88, 0x0c, 0x47, 0xc8, 0xfa, 0x3f, 0x35, 0x19, 0x74, 0x41, 0x8d, 0x40, 0xeb, 0xb1, 0x3a, - 0xa4, 0xc9, 0x2d, 0x9c, 0x9f, 0x50, 0x43, 0x2e, 0x49, 0xde, 0xcc, 0xff, 0x45, 0xf2, 0x5e, 0x2f, + 0x65, 0xd7, 0x4b, 0x9b, 0x37, 0xea, 0x69, 0x82, 0x56, 0x97, 0x06, 0xee, 0xfb, 0xaa, 0x37, 0x3d, + 0xda, 0xb4, 0x99, 0xe5, 0x9d, 0x10, 0x7a, 0x66, 0x2c, 0x29, 0x2b, 0x05, 0xc5, 0x64, 0x38, 0xb4, + 0x80, 0x7e, 0xae, 0xc1, 0x52, 0x8f, 0x92, 0x43, 0x42, 0x29, 0x69, 0x2b, 0x7e, 0x25, 0xbb, 0xa6, + 0x7d, 0x06, 0x66, 0x2b, 0xca, 0xec, 0x52, 0x6b, 0x04, 0x1f, 0x8f, 0x59, 0x44, 0xbf, 0xd3, 0x60, + 0x95, 0x11, 0x7a, 0x42, 0xe8, 0x56, 0xbb, 0x4d, 0x09, 0x63, 0xc6, 0xd9, 0xb6, 0x63, 0x13, 0x97, + 0x6f, 0xef, 0x36, 0x31, 0xab, 0xcc, 0xca, 0x38, 0x7c, 0x27, 0x9d, 0x43, 0xfb, 0x93, 0x70, 0x0c, + 0x5d, 0x79, 0xb4, 0x3a, 0x51, 0x84, 0xe1, 0x17, 0xb8, 0xa1, 0x1f, 0xc2, 0x7c, 0xb0, 0x91, 0xb7, + 0x6d, 0xc6, 0xd1, 0x7d, 0xc8, 0x77, 0xc4, 0x07, 0xab, 0x68, 0xd2, 0xc1, 0x7a, 0x3a, 0x07, 0x03, + 0x0c, 0x63, 0x41, 0xf9, 0x93, 0x97, 0x9f, 0x0c, 0x2b, 0x34, 0xfd, 0xcf, 0x59, 0x28, 0x6d, 0xb5, + 0x76, 0x31, 0x61, 0x5e, 0x9f, 0x5a, 0x24, 0x45, 0xd2, 0x6c, 0x02, 0x88, 0xbf, 0xac, 0x67, 0x5a, + 0xa4, 0x5d, 0xc9, 0xac, 0x69, 0xeb, 0x05, 0x03, 0x29, 0x39, 0xb8, 0x13, 0x72, 0x70, 0x4c, 0x4a, + 0xa0, 0x1e, 0xdb, 0x6e, 0x5b, 0xee, 0x76, 0x0c, 0xf5, 0x5d, 0xdb, 0x6d, 0x63, 0xc9, 0x41, 0xb7, + 0x21, 0x77, 0x42, 0xe8, 0x81, 0x88, 0xbf, 0x48, 0x88, 0xaf, 0xa4, 0x5b, 0xde, 0x7d, 0xa1, 0x62, + 0x14, 0x87, 0x83, 0x5a, 0x4e, 0xfe, 0xc4, 0x3e, 0x08, 0xaa, 0x03, 0xb0, 0x23, 0x8f, 0x72, 0xe9, + 0x4e, 0x25, 0xb7, 0x96, 0x5d, 0x2f, 0x1a, 0x0b, 0xc2, 0xbf, 0xfd, 0x90, 0x8a, 0x63, 0x12, 0xe8, + 0x1a, 0xcc, 0x33, 0xdb, 0xed, 0xf4, 0x1d, 0x93, 0x0a, 0x42, 0x25, 0x2f, 0xfd, 0x5c, 0x51, 0x7e, + 0xce, 0xef, 0xc7, 0x78, 0x38, 0x21, 0x29, 0x2c, 0x59, 0x26, 0x27, 0x1d, 0x8f, 0xda, 0x84, 0x55, + 0xe6, 0x22, 0x4b, 0xdb, 0x21, 0x15, 0xc7, 0x24, 0xd0, 0xeb, 0x90, 0x93, 0x91, 0xaf, 0x14, 0xa4, + 0x89, 0xb2, 0x32, 0x91, 0x93, 0xdb, 0x82, 0x7d, 0x1e, 0x7a, 0x13, 0xe6, 0xd4, 0xa9, 0xa9, 0x14, + 0xa5, 0xd8, 0xa2, 0x12, 0x9b, 0x0b, 0xd2, 0x3a, 0xe0, 0xeb, 0x7f, 0xd4, 0x60, 0x31, 0xb6, 0x7f, + 0x32, 0x57, 0xae, 0xc1, 0x7c, 0x27, 0x76, 0x52, 0xd4, 0x5e, 0x86, 0xab, 0x89, 0x9f, 0x22, 0x9c, + 0x90, 0x44, 0x04, 0x8a, 0x54, 0x21, 0x05, 0x15, 0x61, 0x23, 0x75, 0xa2, 0x05, 0x3e, 0x44, 0x96, + 0x62, 0x44, 0x86, 0x23, 0x64, 0xfd, 0x9f, 0x9a, 0x4c, 0xba, 0xa0, 0x46, 0xa0, 0xf5, 0x58, 0x1d, + 0xd2, 0x64, 0x08, 0xe7, 0x27, 0xd4, 0x90, 0x4b, 0x0e, 0x6f, 0xe6, 0xff, 0xe2, 0xf0, 0x5e, 0x2f, 0xfc, 0xe6, 0xc3, 0xda, 0xcc, 0x07, 0xff, 0x58, 0x9b, 0xd1, 0x3f, 0xc9, 0x40, 0xb9, 0x49, 0x1c, 0xc2, 0xc9, 0xdd, 0x1e, 0x97, 0x2b, 0xb8, 0x09, 0xa8, 0x43, 0x4d, 0x8b, 0xb4, 0x08, 0xb5, 0xbd, - 0xf6, 0x3e, 0xb1, 0x3c, 0xb7, 0xcd, 0xe4, 0x11, 0x65, 0x8d, 0xcf, 0x0d, 0x07, 0x35, 0x74, 0x6b, - 0x8c, 0x8b, 0x2f, 0xd0, 0x40, 0x0e, 0x94, 0x7b, 0x54, 0xfe, 0xb6, 0xb9, 0x2a, 0xe0, 0x22, 0x71, - 0xde, 0x4e, 0xb7, 0xf6, 0x56, 0x5c, 0xd5, 0x58, 0x1e, 0x0e, 0x6a, 0xe5, 0x04, 0x09, 0x27, 0xc1, - 0xd1, 0x77, 0x61, 0xc9, 0xa3, 0xbd, 0x23, 0xd3, 0x6d, 0x92, 0x1e, 0x71, 0xdb, 0xc4, 0xe5, 0x4c, - 0x26, 0x73, 0xc1, 0x58, 0x11, 0x65, 0xf7, 0xee, 0x08, 0x0f, 0x8f, 0x49, 0xa3, 0x07, 0xb0, 0xdc, - 0xa3, 0x5e, 0xcf, 0xec, 0x98, 0x02, 0xb1, 0xe5, 0x39, 0xb6, 0x75, 0x26, 0x93, 0xbd, 0x68, 0x5c, - 0x1d, 0x0e, 0x6a, 0xcb, 0xad, 0x51, 0xe6, 0xf9, 0xa0, 0xf6, 0x8a, 0xdc, 0x3a, 0x41, 0x89, 0x98, - 0x78, 0x1c, 0x46, 0xdf, 0x85, 0x42, 0xb3, 0x4f, 0x25, 0x05, 0x7d, 0x1b, 0x0a, 0x6d, 0xf5, 0x5b, - 0xed, 0xea, 0x6b, 0xc1, 0x9d, 0x14, 0xc8, 0x9c, 0x0f, 0x6a, 0x65, 0x71, 0xf5, 0xd6, 0x03, 0x02, - 0x0e, 0x55, 0xf4, 0x87, 0x50, 0xde, 0x39, 0xed, 0x79, 0x94, 0x07, 0xe7, 0xf5, 0x06, 0xe4, 0x89, - 0x24, 0x48, 0xb4, 0x42, 0x54, 0x48, 0x7d, 0x31, 0xac, 0xb8, 0x22, 0xb1, 0xc9, 0xa9, 0x69, 0x71, - 0x55, 0x11, 0xc3, 0xc4, 0xde, 0x11, 0x44, 0xec, 0xf3, 0xf4, 0x27, 0x1a, 0xc0, 0x2d, 0x12, 0x62, - 0x6f, 0xc1, 0x62, 0x90, 0x14, 0xc9, 0x5c, 0xfd, 0xbc, 0xd2, 0x5e, 0xc4, 0x49, 0x36, 0x1e, 0x95, - 0x47, 0x2d, 0x58, 0xb1, 0x5d, 0xcb, 0xe9, 0xb7, 0xc9, 0x3d, 0xd7, 0x76, 0x6d, 0x6e, 0x9b, 0x8e, - 0xfd, 0x93, 0xb0, 0x2e, 0x7f, 0x41, 0xe1, 0xac, 0xec, 0x5e, 0x20, 0x83, 0x2f, 0xd4, 0xd4, 0x1f, - 0x42, 0x51, 0x56, 0x08, 0x51, 0x9c, 0xa3, 0x72, 0xa5, 0xbd, 0xa0, 0x5c, 0x05, 0xd5, 0x3d, 0x33, - 0xa9, 0xba, 0xc7, 0x12, 0xc2, 0x81, 0xb2, 0xaf, 0x1b, 0x5c, 0x38, 0xa9, 0x2c, 0x5c, 0x85, 0x42, - 0xb0, 0x70, 0x65, 0x25, 0x6c, 0x34, 0x02, 0x20, 0x1c, 0x4a, 0xc4, 0xac, 0x1d, 0x41, 0xa2, 0xda, - 0xa5, 0x33, 0x16, 0xab, 0xbe, 0x99, 0x17, 0x57, 0xdf, 0x98, 0xa5, 0x9f, 0x41, 0x65, 0x52, 0x77, - 0xf2, 0x12, 0xf5, 0x38, 0xbd, 0x2b, 0xfa, 0xaf, 0x35, 0x58, 0x8a, 0x23, 0xa5, 0x3f, 0xbe, 0xf4, - 0x46, 0x2e, 0xbf, 0xc7, 0x63, 0x3b, 0xf2, 0x5b, 0x0d, 0x56, 0x12, 0x4b, 0x9b, 0xea, 0xc4, 0xa7, - 0x70, 0x2a, 0x1e, 0x1c, 0xd9, 0x29, 0x82, 0xa3, 0x01, 0xa5, 0xdd, 0x30, 0xee, 0xe9, 0xe5, 0x9d, - 0x8f, 0xfe, 0x17, 0x0d, 0xe6, 0x63, 0x1a, 0x0c, 0x3d, 0x84, 0x39, 0x51, 0xdf, 0x6c, 0xb7, 0xa3, - 0xba, 0xb2, 0x94, 0x97, 0x65, 0x0c, 0x24, 0x5a, 0x57, 0xcb, 0x47, 0xc2, 0x01, 0x24, 0x6a, 0x41, - 0x9e, 0x12, 0xd6, 0x77, 0xb8, 0x2a, 0xed, 0x57, 0x53, 0x5e, 0x6b, 0xdc, 0xe4, 0x7d, 0x66, 0x80, - 0xa8, 0x51, 0x58, 0xea, 0x63, 0x85, 0xa3, 0xff, 0x3d, 0x03, 0xe5, 0xdb, 0xe6, 0x01, 0x71, 0xf6, - 0x89, 0x43, 0x2c, 0xee, 0x51, 0xf4, 0x53, 0x28, 0x75, 0x4d, 0x6e, 0x1d, 0x49, 0x6a, 0xd0, 0x5b, - 0x36, 0xd3, 0x19, 0x4a, 0x20, 0xd5, 0xf7, 0x22, 0x98, 0x1d, 0x97, 0xd3, 0x33, 0xe3, 0x15, 0xb5, - 0xb0, 0x52, 0x8c, 0x83, 0xe3, 0xd6, 0xe4, 0x83, 0x40, 0x7e, 0xef, 0x9c, 0xf6, 0xc4, 0x25, 0x3a, - 0xfd, 0x3b, 0x24, 0xe1, 0x02, 0x26, 0xef, 0xf7, 0x6d, 0x4a, 0xba, 0xc4, 0xe5, 0xd1, 0x83, 0x60, - 0x6f, 0x04, 0x1f, 0x8f, 0x59, 0x5c, 0xbd, 0x01, 0x4b, 0xa3, 0xce, 0xa3, 0x25, 0xc8, 0x1e, 0x93, - 0x33, 0x3f, 0x16, 0xb0, 0xf8, 0x89, 0x56, 0x20, 0x77, 0x62, 0x3a, 0x7d, 0x55, 0x7f, 0xb0, 0xff, - 0x71, 0x3d, 0x73, 0x4d, 0xd3, 0x7f, 0xaf, 0x41, 0x65, 0x92, 0x23, 0xe8, 0x8b, 0x31, 0x20, 0xa3, - 0xa4, 0xbc, 0xca, 0xbe, 0x4b, 0xce, 0x7c, 0xd4, 0x1d, 0x28, 0x78, 0x3d, 0xf1, 0x84, 0xf3, 0xa8, - 0x8a, 0xf3, 0x37, 0x83, 0xd8, 0xbd, 0xab, 0xe8, 0xe7, 0x83, 0xda, 0x95, 0x04, 0x7c, 0xc0, 0xc0, - 0xa1, 0x2a, 0xd2, 0x21, 0x2f, 0xfd, 0x11, 0x97, 0xb2, 0x68, 0x9f, 0xe4, 0xe1, 0xdf, 0x97, 0x14, - 0xac, 0x38, 0xfa, 0x9f, 0x34, 0x98, 0x95, 0xed, 0xe1, 0x43, 0x28, 0x88, 0xfd, 0x6b, 0x9b, 0xdc, - 0x94, 0x7e, 0xa5, 0x7e, 0x4c, 0x08, 0xed, 0x3d, 0xc2, 0xcd, 0x28, 0xbf, 0x02, 0x0a, 0x0e, 0x11, - 0x11, 0x86, 0x9c, 0xcd, 0x49, 0x37, 0x38, 0xc8, 0xb7, 0x26, 0x42, 0xab, 0xf7, 0x6f, 0x1d, 0x9b, - 0x8f, 0x76, 0x4e, 0x39, 0x71, 0xc5, 0x61, 0x44, 0xc5, 0x60, 0x57, 0x60, 0x60, 0x1f, 0x4a, 0xff, - 0x83, 0x06, 0xa1, 0x29, 0x91, 0xee, 0x8c, 0x38, 0x87, 0xb7, 0x6d, 0xf7, 0x58, 0x6d, 0x6b, 0xe8, - 0xce, 0xbe, 0xa2, 0xe3, 0x50, 0xe2, 0xa2, 0x2b, 0x36, 0x33, 0xe5, 0x15, 0x7b, 0x15, 0x0a, 0x96, - 0xe7, 0x72, 0xdb, 0xed, 0x8f, 0xd5, 0x97, 0x6d, 0x45, 0xc7, 0xa1, 0x84, 0xfe, 0x2c, 0x0b, 0x25, - 0xe1, 0x6b, 0x70, 0xc7, 0x7f, 0x13, 0xca, 0x4e, 0xfc, 0xf4, 0x94, 0xcf, 0x57, 0x14, 0x44, 0x32, - 0x1f, 0x71, 0x52, 0x56, 0x28, 0x1f, 0xda, 0xc4, 0x69, 0x87, 0xca, 0x99, 0xa4, 0xf2, 0xcd, 0x38, - 0x13, 0x27, 0x65, 0x45, 0x9d, 0x7d, 0x24, 0xe2, 0x5a, 0x35, 0x6a, 0xe1, 0xd6, 0x7e, 0x4f, 0x10, - 0xb1, 0xcf, 0xbb, 0x68, 0x7f, 0x66, 0xa7, 0xdc, 0x9f, 0xeb, 0xb0, 0x20, 0x0e, 0xd2, 0xeb, 0xf3, - 0xa0, 0x9b, 0xcd, 0xc9, 0xbe, 0x0b, 0x0d, 0x07, 0xb5, 0x85, 0xf7, 0x12, 0x1c, 0x3c, 0x22, 0x39, - 0xb1, 0x7d, 0xc9, 0x7f, 0xda, 0xf6, 0x45, 0xac, 0xda, 0xb1, 0xbb, 0x36, 0xaf, 0xcc, 0x49, 0x27, - 0xc2, 0x55, 0xdf, 0x16, 0x44, 0xec, 0xf3, 0x12, 0x47, 0x5a, 0xb8, 0xf4, 0x48, 0xdf, 0x87, 0xe2, - 0x9e, 0x6d, 0x51, 0x4f, 0xac, 0x45, 0x5c, 0x4c, 0x2c, 0xd1, 0xb4, 0x87, 0x05, 0x3c, 0x58, 0x63, - 0xc0, 0x17, 0xae, 0xb8, 0xa6, 0xeb, 0xf9, 0xad, 0x79, 0x2e, 0x72, 0xe5, 0x8e, 0x20, 0x62, 0x9f, - 0x77, 0x7d, 0x45, 0xdc, 0x47, 0xbf, 0x7c, 0x52, 0x9b, 0x79, 0xfc, 0xa4, 0x36, 0xf3, 0xe1, 0x13, - 0x75, 0x37, 0xfd, 0x0b, 0x00, 0xee, 0x1e, 0xfc, 0x98, 0x58, 0x7e, 0xcc, 0x5f, 0xfe, 0x2a, 0x17, - 0x3d, 0x86, 0x1a, 0x06, 0xc9, 0x17, 0x6c, 0x66, 0xa4, 0xc7, 0x88, 0xf1, 0x70, 0x42, 0x12, 0x35, - 0xa0, 0x18, 0xbe, 0xd4, 0x55, 0x7c, 0x2f, 0x2b, 0xb5, 0x62, 0xf8, 0x9c, 0xc7, 0x91, 0x4c, 0x22, - 0x01, 0x67, 0x2f, 0x4d, 0x40, 0x03, 0xb2, 0x7d, 0xbb, 0x2d, 0x43, 0xa2, 0x68, 0x7c, 0x35, 0x28, - 0x80, 0xf7, 0x76, 0x9b, 0xe7, 0x83, 0xda, 0x6b, 0x93, 0x66, 0x5c, 0xfc, 0xac, 0x47, 0x58, 0xfd, - 0xde, 0x6e, 0x13, 0x0b, 0xe5, 0x8b, 0x82, 0x34, 0x3f, 0x65, 0x90, 0x6e, 0x02, 0xa8, 0x55, 0x0b, - 0x6d, 0x3f, 0x36, 0xc2, 0xa9, 0xc5, 0xad, 0x90, 0x83, 0x63, 0x52, 0x88, 0xc1, 0xb2, 0x45, 0x89, - 0xfc, 0x2d, 0x8e, 0x9e, 0x71, 0xb3, 0xeb, 0xbf, 0xdb, 0x4b, 0x9b, 0x5f, 0x4e, 0x57, 0x31, 0x85, - 0x9a, 0xf1, 0xaa, 0x32, 0xb3, 0xbc, 0x3d, 0x0a, 0x86, 0xc7, 0xf1, 0x91, 0x07, 0xcb, 0x6d, 0xf5, - 0xea, 0x89, 0x8c, 0x16, 0xa7, 0x36, 0x7a, 0x45, 0x18, 0x6c, 0x8e, 0x02, 0xe1, 0x71, 0x6c, 0xf4, - 0x43, 0x58, 0x0d, 0x88, 0xe3, 0x4f, 0xcf, 0x0a, 0xc8, 0x9d, 0xaa, 0x8a, 0xc7, 0x70, 0x73, 0xa2, - 0x14, 0x7e, 0x01, 0x02, 0x6a, 0x43, 0xde, 0xf1, 0xbb, 0x8b, 0x92, 0xbc, 0x11, 0xbe, 0x95, 0x6e, - 0x15, 0x51, 0xf4, 0xd7, 0xe3, 0x5d, 0x45, 0xf8, 0xfc, 0x52, 0x0d, 0x85, 0xc2, 0x46, 0xa7, 0x50, - 0x32, 0x5d, 0xd7, 0xe3, 0xa6, 0xff, 0x18, 0x9e, 0x97, 0xa6, 0xb6, 0xa6, 0x36, 0xb5, 0x15, 0x61, - 0x8c, 0x74, 0x31, 0x31, 0x0e, 0x8e, 0x9b, 0x42, 0x8f, 0x60, 0xd1, 0x7b, 0xe4, 0x12, 0x8a, 0xc9, - 0x21, 0xa1, 0xc4, 0xb5, 0x08, 0xab, 0x94, 0xa5, 0xf5, 0xaf, 0xa5, 0xb4, 0x9e, 0x50, 0x8e, 0x42, - 0x3a, 0x49, 0x67, 0x78, 0xd4, 0x0a, 0xaa, 0x03, 0x1c, 0xda, 0xae, 0xea, 0x45, 0x2b, 0x0b, 0xd1, - 0xe8, 0xe9, 0x66, 0x48, 0xc5, 0x31, 0x09, 0xf4, 0x75, 0x28, 0x59, 0x4e, 0x9f, 0x71, 0xe2, 0xcf, - 0xb8, 0x16, 0x65, 0x06, 0x85, 0xeb, 0xdb, 0x8e, 0x58, 0x38, 0x2e, 0x87, 0x8e, 0x60, 0xde, 0x8e, - 0x35, 0xbd, 0x95, 0x25, 0x19, 0x8b, 0x9b, 0x53, 0x77, 0xba, 0xcc, 0x58, 0x12, 0x95, 0x28, 0x4e, - 0xc1, 0x09, 0xe4, 0xd5, 0x6f, 0x40, 0xe9, 0x53, 0xf6, 0x60, 0xa2, 0x87, 0x1b, 0x3d, 0xba, 0xa9, - 0x7a, 0xb8, 0xbf, 0x66, 0x60, 0x21, 0xb9, 0xe1, 0xe1, 0x5b, 0x47, 0x9b, 0x38, 0xb3, 0x0c, 0xaa, - 0x72, 0x76, 0x62, 0x55, 0x56, 0xc5, 0x6f, 0xf6, 0x65, 0x8a, 0xdf, 0x26, 0x80, 0xd9, 0xb3, 0x83, - 0xba, 0xe7, 0xd7, 0xd1, 0xb0, 0x72, 0x45, 0x53, 0x34, 0x1c, 0x93, 0x92, 0x53, 0x49, 0xcf, 0xe5, - 0xd4, 0x73, 0x1c, 0x42, 0xd5, 0x65, 0xea, 0x4f, 0x25, 0x43, 0x2a, 0x8e, 0x49, 0xa0, 0x9b, 0x80, - 0x0e, 0x1c, 0xcf, 0x3a, 0x96, 0x5b, 0x10, 0xe4, 0xb9, 0xac, 0x92, 0x05, 0x7f, 0x28, 0x65, 0x8c, - 0x71, 0xf1, 0x05, 0x1a, 0xfa, 0x5d, 0x48, 0x8e, 0x91, 0xd0, 0x0d, 0x7f, 0x03, 0xb4, 0x70, 0xce, - 0x33, 0xdd, 0xe2, 0xf5, 0xab, 0x50, 0xc4, 0x9e, 0xc7, 0x5b, 0x26, 0x3f, 0x62, 0xa8, 0x06, 0xb9, - 0x9e, 0xf8, 0xa1, 0x66, 0x84, 0x72, 0xec, 0x2b, 0x39, 0xd8, 0xa7, 0xeb, 0xbf, 0xd2, 0xe0, 0xd5, - 0x89, 0x23, 0x3b, 0xb1, 0x91, 0x56, 0xf8, 0xa5, 0x5c, 0x0a, 0x37, 0x32, 0x92, 0xc3, 0x31, 0x29, - 0xd1, 0x80, 0x25, 0xe6, 0x7c, 0xa3, 0x0d, 0x58, 0xc2, 0x1a, 0x4e, 0xca, 0xea, 0xff, 0xce, 0x40, - 0xde, 0x7f, 0x8d, 0xfd, 0x97, 0x7b, 0xee, 0x37, 0x20, 0xcf, 0xa4, 0x1d, 0xe5, 0x5e, 0x58, 0x24, - 0x7d, 0xeb, 0x58, 0x71, 0x45, 0xef, 0xd2, 0x25, 0x8c, 0x99, 0x9d, 0x20, 0x66, 0xc3, 0xde, 0x65, - 0xcf, 0x27, 0xe3, 0x80, 0x8f, 0xde, 0x11, 0x8f, 0x4f, 0x93, 0x85, 0xed, 0x60, 0x35, 0x80, 0xc4, - 0x92, 0x7a, 0x3e, 0xa8, 0xcd, 0x2b, 0x70, 0xf9, 0x8d, 0x95, 0x34, 0x7a, 0x00, 0x73, 0x6d, 0xc2, - 0x4d, 0xdb, 0xf1, 0xbb, 0xc0, 0xd4, 0x03, 0x49, 0x1f, 0xac, 0xe9, 0xab, 0x1a, 0x25, 0xe1, 0x93, - 0xfa, 0xc0, 0x01, 0xa0, 0xc8, 0x37, 0xcb, 0x6b, 0xfb, 0xd3, 0xf9, 0x5c, 0x94, 0x6f, 0xdb, 0x5e, - 0x9b, 0x60, 0xc9, 0xd1, 0x1f, 0x6b, 0x50, 0xf2, 0x91, 0xb6, 0xcd, 0x3e, 0x23, 0x68, 0x23, 0x5c, - 0x85, 0x7f, 0xdc, 0xc1, 0x55, 0x3c, 0xfb, 0xde, 0x59, 0x8f, 0x9c, 0x0f, 0x6a, 0x45, 0x29, 0x26, - 0x3e, 0xc2, 0x05, 0xc4, 0xf6, 0x28, 0x73, 0xc9, 0x1e, 0xbd, 0x0e, 0x39, 0xd9, 0x71, 0xab, 0xcd, - 0x0c, 0xfb, 0x3b, 0xd9, 0x95, 0x63, 0x9f, 0xa7, 0x7f, 0x9c, 0x81, 0x72, 0x62, 0x71, 0x29, 0x9a, - 0xb9, 0x70, 0x42, 0x92, 0x49, 0x31, 0x75, 0x9b, 0xfc, 0x3f, 0x95, 0xef, 0x43, 0xde, 0x12, 0xeb, - 0x0b, 0xfe, 0xa9, 0xb5, 0x31, 0xcd, 0x51, 0xc8, 0x9d, 0x89, 0x22, 0x49, 0x7e, 0x32, 0xac, 0x00, - 0xd1, 0x2d, 0x58, 0xa6, 0x84, 0xd3, 0xb3, 0xad, 0x43, 0x4e, 0x68, 0xbc, 0xed, 0xcf, 0x45, 0xed, - 0x0e, 0x1e, 0x15, 0xc0, 0xe3, 0x3a, 0x41, 0x85, 0xcc, 0xbf, 0x44, 0x85, 0xd4, 0x1d, 0x98, 0xfd, - 0x1f, 0xb6, 0xe6, 0x3f, 0x80, 0x62, 0xd4, 0x3c, 0x7d, 0xc6, 0x26, 0xf5, 0x1f, 0x41, 0x41, 0x44, - 0x63, 0xd0, 0xf4, 0x5f, 0x72, 0x01, 0x25, 0xaf, 0x86, 0x4c, 0x9a, 0xab, 0x41, 0xdf, 0x04, 0xff, - 0x5f, 0x65, 0xa2, 0x9a, 0xfa, 0x0f, 0xf5, 0x58, 0x35, 0x8d, 0xbf, 0xba, 0x63, 0x93, 0xb2, 0x5f, - 0x68, 0x00, 0xf2, 0xd5, 0xb8, 0x73, 0x42, 0x5c, 0x2e, 0x1c, 0x13, 0x27, 0x30, 0xea, 0x98, 0x4c, - 0x23, 0xc9, 0x41, 0xf7, 0x20, 0xef, 0xc9, 0xa6, 0x4a, 0x8d, 0xae, 0xa6, 0x9c, 0x02, 0x84, 0x51, - 0xe7, 0x77, 0x66, 0x58, 0x81, 0x19, 0xeb, 0x4f, 0x9f, 0x57, 0x67, 0x9e, 0x3d, 0xaf, 0xce, 0x7c, - 0xf4, 0xbc, 0x3a, 0xf3, 0xc1, 0xb0, 0xaa, 0x3d, 0x1d, 0x56, 0xb5, 0x67, 0xc3, 0xaa, 0xf6, 0xd1, - 0xb0, 0xaa, 0x7d, 0x3c, 0xac, 0x6a, 0x8f, 0x3f, 0xa9, 0xce, 0x3c, 0xc8, 0x9c, 0x6c, 0xfc, 0x27, - 0x00, 0x00, 0xff, 0xff, 0x66, 0xe7, 0x2a, 0x84, 0x4b, 0x20, 0x00, 0x00, + 0xf6, 0x3e, 0xb1, 0x3c, 0xb7, 0xcd, 0xe4, 0x16, 0x65, 0x8d, 0xcf, 0x0d, 0x07, 0x35, 0x74, 0x6b, + 0x8c, 0x8b, 0x2f, 0xd0, 0x40, 0x0e, 0x94, 0x7b, 0x54, 0xfe, 0xb6, 0xb9, 0x2a, 0xe0, 0xe2, 0xe0, + 0xbc, 0x9d, 0x6e, 0xed, 0xad, 0xb8, 0xaa, 0xb1, 0x3c, 0x1c, 0xd4, 0xca, 0x09, 0x12, 0x4e, 0x82, + 0xa3, 0xef, 0xc2, 0x92, 0x47, 0x7b, 0x47, 0xa6, 0xdb, 0x24, 0x3d, 0xe2, 0xb6, 0x89, 0xcb, 0x99, + 0x3c, 0xcc, 0x05, 0x63, 0x45, 0x94, 0xdd, 0xbb, 0x23, 0x3c, 0x3c, 0x26, 0x8d, 0x1e, 0xc0, 0x72, + 0x8f, 0x7a, 0x3d, 0xb3, 0x63, 0x0a, 0xc4, 0x96, 0xe7, 0xd8, 0xd6, 0x99, 0x3c, 0xec, 0x45, 0xe3, + 0xea, 0x70, 0x50, 0x5b, 0x6e, 0x8d, 0x32, 0xcf, 0x07, 0xb5, 0x57, 0x64, 0xe8, 0x04, 0x25, 0x62, + 0xe2, 0x71, 0x18, 0x7d, 0x17, 0x0a, 0xcd, 0x3e, 0x95, 0x14, 0xf4, 0x6d, 0x28, 0xb4, 0xd5, 0x6f, + 0x15, 0xd5, 0xd7, 0x82, 0x3b, 0x29, 0x90, 0x39, 0x1f, 0xd4, 0xca, 0xe2, 0xea, 0xad, 0x07, 0x04, + 0x1c, 0xaa, 0xe8, 0x0f, 0xa1, 0xbc, 0x73, 0xda, 0xf3, 0x28, 0x0f, 0xf6, 0xeb, 0x0d, 0xc8, 0x13, + 0x49, 0x90, 0x68, 0x85, 0xa8, 0x90, 0xfa, 0x62, 0x58, 0x71, 0xc5, 0xc1, 0x26, 0xa7, 0xa6, 0xc5, + 0x55, 0x45, 0x0c, 0x0f, 0xf6, 0x8e, 0x20, 0x62, 0x9f, 0xa7, 0x3f, 0xd1, 0x00, 0x6e, 0x91, 0x10, + 0x7b, 0x0b, 0x16, 0x83, 0x43, 0x91, 0x3c, 0xab, 0x9f, 0x57, 0xda, 0x8b, 0x38, 0xc9, 0xc6, 0xa3, + 0xf2, 0xa8, 0x05, 0x2b, 0xb6, 0x6b, 0x39, 0xfd, 0x36, 0xb9, 0xe7, 0xda, 0xae, 0xcd, 0x6d, 0xd3, + 0xb1, 0x7f, 0x12, 0xd6, 0xe5, 0x2f, 0x28, 0x9c, 0x95, 0xdd, 0x0b, 0x64, 0xf0, 0x85, 0x9a, 0xfa, + 0x43, 0x28, 0xca, 0x0a, 0x21, 0x8a, 0x73, 0x54, 0xae, 0xb4, 0x17, 0x94, 0xab, 0xa0, 0xba, 0x67, + 0x26, 0x55, 0xf7, 0xd8, 0x81, 0x70, 0xa0, 0xec, 0xeb, 0x06, 0x17, 0x4e, 0x2a, 0x0b, 0x57, 0xa1, + 0x10, 0x2c, 0x5c, 0x59, 0x09, 0x1b, 0x8d, 0x00, 0x08, 0x87, 0x12, 0x31, 0x6b, 0x47, 0x90, 0xa8, + 0x76, 0xe9, 0x8c, 0xc5, 0xaa, 0x6f, 0xe6, 0xc5, 0xd5, 0x37, 0x66, 0xe9, 0x67, 0x50, 0x99, 0xd4, + 0x9d, 0xbc, 0x44, 0x3d, 0x4e, 0xef, 0x8a, 0xfe, 0x6b, 0x0d, 0x96, 0xe2, 0x48, 0xe9, 0xb7, 0x2f, + 0xbd, 0x91, 0xcb, 0xef, 0xf1, 0x58, 0x44, 0x7e, 0xab, 0xc1, 0x4a, 0x62, 0x69, 0x53, 0xed, 0xf8, + 0x14, 0x4e, 0xc5, 0x93, 0x23, 0x3b, 0x45, 0x72, 0x34, 0xa0, 0xb4, 0x1b, 0xe6, 0x3d, 0xbd, 0xbc, + 0xf3, 0xd1, 0xff, 0xa2, 0xc1, 0x7c, 0x4c, 0x83, 0xa1, 0x87, 0x30, 0x27, 0xea, 0x9b, 0xed, 0x76, + 0x54, 0x57, 0x96, 0xf2, 0xb2, 0x8c, 0x81, 0x44, 0xeb, 0x6a, 0xf9, 0x48, 0x38, 0x80, 0x44, 0x2d, + 0xc8, 0x53, 0xc2, 0xfa, 0x0e, 0x57, 0xa5, 0xfd, 0x6a, 0xca, 0x6b, 0x8d, 0x9b, 0xbc, 0xcf, 0x0c, + 0x10, 0x35, 0x0a, 0x4b, 0x7d, 0xac, 0x70, 0xf4, 0xbf, 0x67, 0xa0, 0x7c, 0xdb, 0x3c, 0x20, 0xce, + 0x3e, 0x71, 0x88, 0xc5, 0x3d, 0x8a, 0x7e, 0x0a, 0xa5, 0xae, 0xc9, 0xad, 0x23, 0x49, 0x0d, 0x7a, + 0xcb, 0x66, 0x3a, 0x43, 0x09, 0xa4, 0xfa, 0x5e, 0x04, 0xb3, 0xe3, 0x72, 0x7a, 0x66, 0xbc, 0xa2, + 0x16, 0x56, 0x8a, 0x71, 0x70, 0xdc, 0x9a, 0x7c, 0x10, 0xc8, 0xef, 0x9d, 0xd3, 0x9e, 0xb8, 0x44, + 0xa7, 0x7f, 0x87, 0x24, 0x5c, 0xc0, 0xe4, 0xfd, 0xbe, 0x4d, 0x49, 0x97, 0xb8, 0x3c, 0x7a, 0x10, + 0xec, 0x8d, 0xe0, 0xe3, 0x31, 0x8b, 0xab, 0x37, 0x60, 0x69, 0xd4, 0x79, 0xb4, 0x04, 0xd9, 0x63, + 0x72, 0xe6, 0xe7, 0x02, 0x16, 0x3f, 0xd1, 0x0a, 0xe4, 0x4e, 0x4c, 0xa7, 0xaf, 0xea, 0x0f, 0xf6, + 0x3f, 0xae, 0x67, 0xae, 0x69, 0xfa, 0xef, 0x35, 0xa8, 0x4c, 0x72, 0x04, 0x7d, 0x31, 0x06, 0x64, + 0x94, 0x94, 0x57, 0xd9, 0x77, 0xc9, 0x99, 0x8f, 0xba, 0x03, 0x05, 0xaf, 0x27, 0x9e, 0x70, 0x1e, + 0x55, 0x79, 0xfe, 0x66, 0x90, 0xbb, 0x77, 0x15, 0xfd, 0x7c, 0x50, 0xbb, 0x92, 0x80, 0x0f, 0x18, + 0x38, 0x54, 0x45, 0x3a, 0xe4, 0xa5, 0x3f, 0xe2, 0x52, 0x16, 0xed, 0x93, 0xdc, 0xfc, 0xfb, 0x92, + 0x82, 0x15, 0x47, 0xff, 0x93, 0x06, 0xb3, 0xb2, 0x3d, 0x7c, 0x08, 0x05, 0x11, 0xbf, 0xb6, 0xc9, + 0x4d, 0xe9, 0x57, 0xea, 0xc7, 0x84, 0xd0, 0xde, 0x23, 0xdc, 0x8c, 0xce, 0x57, 0x40, 0xc1, 0x21, + 0x22, 0xc2, 0x90, 0xb3, 0x39, 0xe9, 0x06, 0x1b, 0xf9, 0xd6, 0x44, 0x68, 0xf5, 0xfe, 0xad, 0x63, + 0xf3, 0xd1, 0xce, 0x29, 0x27, 0xae, 0xd8, 0x8c, 0xa8, 0x18, 0xec, 0x0a, 0x0c, 0xec, 0x43, 0xe9, + 0x7f, 0xd0, 0x20, 0x34, 0x25, 0x8e, 0x3b, 0x23, 0xce, 0xe1, 0x6d, 0xdb, 0x3d, 0x56, 0x61, 0x0d, + 0xdd, 0xd9, 0x57, 0x74, 0x1c, 0x4a, 0x5c, 0x74, 0xc5, 0x66, 0xa6, 0xbc, 0x62, 0xaf, 0x42, 0xc1, + 0xf2, 0x5c, 0x6e, 0xbb, 0xfd, 0xb1, 0xfa, 0xb2, 0xad, 0xe8, 0x38, 0x94, 0xd0, 0x9f, 0x65, 0xa1, + 0x24, 0x7c, 0x0d, 0xee, 0xf8, 0x6f, 0x42, 0xd9, 0x89, 0xef, 0x9e, 0xf2, 0xf9, 0x8a, 0x82, 0x48, + 0x9e, 0x47, 0x9c, 0x94, 0x15, 0xca, 0x87, 0x36, 0x71, 0xda, 0xa1, 0x72, 0x26, 0xa9, 0x7c, 0x33, + 0xce, 0xc4, 0x49, 0x59, 0x51, 0x67, 0x1f, 0x89, 0xbc, 0x56, 0x8d, 0x5a, 0x18, 0xda, 0xef, 0x09, + 0x22, 0xf6, 0x79, 0x17, 0xc5, 0x67, 0x76, 0xca, 0xf8, 0x5c, 0x87, 0x05, 0xb1, 0x91, 0x5e, 0x9f, + 0x07, 0xdd, 0x6c, 0x4e, 0xf6, 0x5d, 0x68, 0x38, 0xa8, 0x2d, 0xbc, 0x97, 0xe0, 0xe0, 0x11, 0xc9, + 0x89, 0xed, 0x4b, 0xfe, 0xd3, 0xb6, 0x2f, 0x62, 0xd5, 0x8e, 0xdd, 0xb5, 0x79, 0x65, 0x4e, 0x3a, + 0x11, 0xae, 0xfa, 0xb6, 0x20, 0x62, 0x9f, 0x97, 0xd8, 0xd2, 0xc2, 0xa5, 0x5b, 0xfa, 0x3e, 0x14, + 0xf7, 0x6c, 0x8b, 0x7a, 0x62, 0x2d, 0xe2, 0x62, 0x62, 0x89, 0xa6, 0x3d, 0x2c, 0xe0, 0xc1, 0x1a, + 0x03, 0xbe, 0x70, 0xc5, 0x35, 0x5d, 0xcf, 0x6f, 0xcd, 0x73, 0x91, 0x2b, 0x77, 0x04, 0x11, 0xfb, + 0xbc, 0xeb, 0x2b, 0xe2, 0x3e, 0xfa, 0xe5, 0x93, 0xda, 0xcc, 0xe3, 0x27, 0xb5, 0x99, 0x0f, 0x9f, + 0xa8, 0xbb, 0xe9, 0x5f, 0x00, 0x70, 0xf7, 0xe0, 0xc7, 0xc4, 0xf2, 0x73, 0xfe, 0xf2, 0x57, 0xb9, + 0xe8, 0x31, 0xd4, 0x30, 0x48, 0xbe, 0x60, 0x33, 0x23, 0x3d, 0x46, 0x8c, 0x87, 0x13, 0x92, 0xa8, + 0x01, 0xc5, 0xf0, 0xa5, 0xae, 0xf2, 0x7b, 0x59, 0xa9, 0x15, 0xc3, 0xe7, 0x3c, 0x8e, 0x64, 0x12, + 0x07, 0x70, 0xf6, 0xd2, 0x03, 0x68, 0x40, 0xb6, 0x6f, 0xb7, 0x65, 0x4a, 0x14, 0x8d, 0xaf, 0x06, + 0x05, 0xf0, 0xde, 0x6e, 0xf3, 0x7c, 0x50, 0x7b, 0x6d, 0xd2, 0x8c, 0x8b, 0x9f, 0xf5, 0x08, 0xab, + 0xdf, 0xdb, 0x6d, 0x62, 0xa1, 0x7c, 0x51, 0x92, 0xe6, 0xa7, 0x4c, 0xd2, 0x4d, 0x00, 0xb5, 0x6a, + 0xa1, 0xed, 0xe7, 0x46, 0x38, 0xb5, 0xb8, 0x15, 0x72, 0x70, 0x4c, 0x0a, 0x31, 0x58, 0xb6, 0x28, + 0x91, 0xbf, 0xc5, 0xd6, 0x33, 0x6e, 0x76, 0xfd, 0x77, 0x7b, 0x69, 0xf3, 0xcb, 0xe9, 0x2a, 0xa6, + 0x50, 0x33, 0x5e, 0x55, 0x66, 0x96, 0xb7, 0x47, 0xc1, 0xf0, 0x38, 0x3e, 0xf2, 0x60, 0xb9, 0xad, + 0x5e, 0x3d, 0x91, 0xd1, 0xe2, 0xd4, 0x46, 0xaf, 0x08, 0x83, 0xcd, 0x51, 0x20, 0x3c, 0x8e, 0x8d, + 0x7e, 0x08, 0xab, 0x01, 0x71, 0xfc, 0xe9, 0x59, 0x01, 0x19, 0xa9, 0xaa, 0x78, 0x0c, 0x37, 0x27, + 0x4a, 0xe1, 0x17, 0x20, 0xa0, 0x36, 0xe4, 0x1d, 0xbf, 0xbb, 0x28, 0xc9, 0x1b, 0xe1, 0x5b, 0xe9, + 0x56, 0x11, 0x65, 0x7f, 0x3d, 0xde, 0x55, 0x84, 0xcf, 0x2f, 0xd5, 0x50, 0x28, 0x6c, 0x74, 0x0a, + 0x25, 0xd3, 0x75, 0x3d, 0x6e, 0xfa, 0x8f, 0xe1, 0x79, 0x69, 0x6a, 0x6b, 0x6a, 0x53, 0x5b, 0x11, + 0xc6, 0x48, 0x17, 0x13, 0xe3, 0xe0, 0xb8, 0x29, 0xf4, 0x08, 0x16, 0xbd, 0x47, 0x2e, 0xa1, 0x98, + 0x1c, 0x12, 0x4a, 0x5c, 0x8b, 0xb0, 0x4a, 0x59, 0x5a, 0xff, 0x5a, 0x4a, 0xeb, 0x09, 0xe5, 0x28, + 0xa5, 0x93, 0x74, 0x86, 0x47, 0xad, 0xa0, 0x3a, 0xc0, 0xa1, 0xed, 0xaa, 0x5e, 0xb4, 0xb2, 0x10, + 0x8d, 0x9e, 0x6e, 0x86, 0x54, 0x1c, 0x93, 0x40, 0x5f, 0x87, 0x92, 0xe5, 0xf4, 0x19, 0x27, 0xfe, + 0x8c, 0x6b, 0x51, 0x9e, 0xa0, 0x70, 0x7d, 0xdb, 0x11, 0x0b, 0xc7, 0xe5, 0xd0, 0x11, 0xcc, 0xdb, + 0xb1, 0xa6, 0xb7, 0xb2, 0x24, 0x73, 0x71, 0x73, 0xea, 0x4e, 0x97, 0x19, 0x4b, 0xa2, 0x12, 0xc5, + 0x29, 0x38, 0x81, 0xbc, 0xfa, 0x0d, 0x28, 0x7d, 0xca, 0x1e, 0x4c, 0xf4, 0x70, 0xa3, 0x5b, 0x37, + 0x55, 0x0f, 0xf7, 0xd7, 0x0c, 0x2c, 0x24, 0x03, 0x1e, 0xbe, 0x75, 0xb4, 0x89, 0x33, 0xcb, 0xa0, + 0x2a, 0x67, 0x27, 0x56, 0x65, 0x55, 0xfc, 0x66, 0x5f, 0xa6, 0xf8, 0x6d, 0x02, 0x98, 0x3d, 0x3b, + 0xa8, 0x7b, 0x7e, 0x1d, 0x0d, 0x2b, 0x57, 0x34, 0x45, 0xc3, 0x31, 0x29, 0x39, 0x95, 0xf4, 0x5c, + 0x4e, 0x3d, 0xc7, 0x21, 0x54, 0x5d, 0xa6, 0xfe, 0x54, 0x32, 0xa4, 0xe2, 0x98, 0x04, 0xba, 0x09, + 0xe8, 0xc0, 0xf1, 0xac, 0x63, 0x19, 0x82, 0xe0, 0x9c, 0xcb, 0x2a, 0x59, 0xf0, 0x87, 0x52, 0xc6, + 0x18, 0x17, 0x5f, 0xa0, 0xa1, 0xcf, 0x41, 0xae, 0x25, 0xda, 0x0a, 0xfd, 0x2e, 0x24, 0xe7, 0x49, + 0xe8, 0x86, 0x1f, 0x09, 0x2d, 0x1c, 0xf8, 0x4c, 0x17, 0x05, 0xfd, 0x2a, 0x14, 0xb1, 0xe7, 0xf1, + 0x96, 0xc9, 0x8f, 0x18, 0xaa, 0x41, 0xae, 0x27, 0x7e, 0xa8, 0x61, 0xa1, 0x9c, 0xff, 0x4a, 0x0e, + 0xf6, 0xe9, 0xfa, 0xaf, 0x34, 0x78, 0x75, 0xe2, 0xec, 0x4e, 0x44, 0xd4, 0x0a, 0xbf, 0x94, 0x4b, + 0x61, 0x44, 0x23, 0x39, 0x1c, 0x93, 0x12, 0x9d, 0x58, 0x62, 0xe0, 0x37, 0xda, 0x89, 0x25, 0xac, + 0xe1, 0xa4, 0xac, 0xfe, 0xef, 0x0c, 0xe4, 0xfd, 0x67, 0xd9, 0x7f, 0xb9, 0xf9, 0x7e, 0x03, 0xf2, + 0x4c, 0xda, 0x51, 0xee, 0x85, 0xd5, 0xd2, 0xb7, 0x8e, 0x15, 0x57, 0x34, 0x31, 0x5d, 0xc2, 0x98, + 0xd9, 0x09, 0x92, 0x37, 0x6c, 0x62, 0xf6, 0x7c, 0x32, 0x0e, 0xf8, 0xe8, 0x1d, 0xf1, 0x0a, 0x35, + 0x59, 0xd8, 0x17, 0x56, 0x03, 0x48, 0x2c, 0xa9, 0xe7, 0x83, 0xda, 0xbc, 0x02, 0x97, 0xdf, 0x58, + 0x49, 0xa3, 0x07, 0x30, 0xd7, 0x26, 0xdc, 0xb4, 0x1d, 0xbf, 0x1d, 0x4c, 0x3d, 0x99, 0xf4, 0xc1, + 0x9a, 0xbe, 0xaa, 0x51, 0x12, 0x3e, 0xa9, 0x0f, 0x1c, 0x00, 0x8a, 0x83, 0x67, 0x79, 0x6d, 0x7f, + 0x4c, 0x9f, 0x8b, 0x0e, 0xde, 0xb6, 0xd7, 0x26, 0x58, 0x72, 0xf4, 0xc7, 0x1a, 0x94, 0x7c, 0xa4, + 0x6d, 0xb3, 0xcf, 0x08, 0xda, 0x08, 0x57, 0xe1, 0x6f, 0x77, 0x70, 0x27, 0xcf, 0xbe, 0x77, 0xd6, + 0x23, 0xe7, 0x83, 0x5a, 0x51, 0x8a, 0x89, 0x8f, 0x70, 0x01, 0xb1, 0x18, 0x65, 0x2e, 0x89, 0xd1, + 0xeb, 0x90, 0x93, 0xad, 0xb7, 0x0a, 0x66, 0xd8, 0xe8, 0xc9, 0xf6, 0x1c, 0xfb, 0x3c, 0xfd, 0xe3, + 0x0c, 0x94, 0x13, 0x8b, 0x4b, 0xd1, 0xd5, 0x85, 0xa3, 0x92, 0x4c, 0x8a, 0xf1, 0xdb, 0xe4, 0x7f, + 0xae, 0x7c, 0x1f, 0xf2, 0x96, 0x58, 0x5f, 0xf0, 0xdf, 0xad, 0x8d, 0x69, 0xb6, 0x42, 0x46, 0x26, + 0xca, 0x24, 0xf9, 0xc9, 0xb0, 0x02, 0x44, 0xb7, 0x60, 0x99, 0x12, 0x4e, 0xcf, 0xb6, 0x0e, 0x39, + 0xa1, 0xf1, 0xfe, 0x3f, 0x17, 0xf5, 0x3d, 0x78, 0x54, 0x00, 0x8f, 0xeb, 0x04, 0xa5, 0x32, 0xff, + 0x12, 0xa5, 0x52, 0x77, 0x60, 0xf6, 0x7f, 0xd8, 0xa3, 0xff, 0x00, 0x8a, 0x51, 0x17, 0xf5, 0x19, + 0x9b, 0xd4, 0x7f, 0x04, 0x05, 0x91, 0x8d, 0x41, 0xf7, 0x7f, 0xc9, 0x4d, 0x94, 0xbc, 0x23, 0x32, + 0x69, 0xee, 0x08, 0x7d, 0x13, 0xfc, 0xff, 0x99, 0x89, 0x6a, 0xea, 0xbf, 0xd8, 0x63, 0xd5, 0x34, + 0xfe, 0xfc, 0x8e, 0x8d, 0xcc, 0x7e, 0xa1, 0x01, 0xc8, 0xe7, 0xe3, 0xce, 0x09, 0x71, 0xb9, 0x70, + 0x4c, 0xec, 0xc0, 0xa8, 0x63, 0xf2, 0x18, 0x49, 0x0e, 0xba, 0x07, 0x79, 0x4f, 0x76, 0x57, 0x6a, + 0x86, 0x35, 0xe5, 0x38, 0x20, 0xcc, 0x3a, 0xbf, 0x45, 0xc3, 0x0a, 0xcc, 0x58, 0x7f, 0xfa, 0xbc, + 0x3a, 0xf3, 0xec, 0x79, 0x75, 0xe6, 0xa3, 0xe7, 0xd5, 0x99, 0x0f, 0x86, 0x55, 0xed, 0xe9, 0xb0, + 0xaa, 0x3d, 0x1b, 0x56, 0xb5, 0x8f, 0x86, 0x55, 0xed, 0xe3, 0x61, 0x55, 0x7b, 0xfc, 0x49, 0x75, + 0xe6, 0x41, 0xe6, 0x64, 0xe3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x6c, 0xc5, 0x28, 0xb2, 0x54, + 0x20, 0x00, 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index ea48226b73f2..ab23a0713b37 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -149,6 +149,10 @@ message DeleteOptions { // Either this field or OrphanDependents may be set, but not both. // The default policy is decided by the existing finalizer set in the // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. // +optional optional string propagationPolicy = 4; } @@ -510,15 +514,16 @@ message ObjectMeta { // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This // field is set by the server when a graceful deletion is requested by the user, and is not // directly settable by a client. The resource is expected to be deleted (no longer visible - // from resource lists, and not reachable by name) after the time in this field. Once set, - // this value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard - // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the - // API. In the presence of network partitions, this object may still exist after this - // timestamp, until an administrator or automated process can determine the resource is - // fully terminated. + // from resource lists, and not reachable by name) after the time in this field, once the + // finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. + // Once the deletionTimestamp is set, this value may not be unset or be set further into the + // future, although it may be shortened or the resource may be deleted prior to this time. + // For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react + // by sending a graceful termination signal to the containers in the pod. After that 30 seconds, + // the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, + // remove the pod from the API. In the presence of network partitions, this object may still + // exist after this timestamp, until an administrator or automated process can determine the + // resource is fully terminated. // If not set, graceful deletion of the object has not been requested. // // Populated by the system when a graceful deletion is requested. @@ -616,6 +621,10 @@ message OwnerReference { optional bool blockOwnerDeletion = 7; } +// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. +message Patch { +} + // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. message Preconditions { // Specifies the target UID. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index dbe206704f89..a09d79571c31 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -175,10 +175,10 @@ func (t *MicroTime) Fuzz(c fuzz.Continue) { if t == nil { return } - // Allow for about 1000 years of randomness. Leave off nanoseconds - // because JSON doesn't represent them so they can't round-trip - // properly. - t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60*1000*1000), 0) + // Allow for about 1000 years of randomness. Accurate to a tenth of + // micro second. Leave off nanoseconds because JSON doesn't + // represent them so they can't round-trip properly. + t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000)) } var _ fuzz.Interface = &MicroTime{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go index 6e449a436a14..b300d37015a9 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go @@ -70,7 +70,6 @@ func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) ) // register manually. This usually goes through the SchemeBuilder, which we cannot use here. - scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) AddConversionFuncs(scheme) RegisterDefaults(scheme) } @@ -90,6 +89,5 @@ func init() { ) // register manually. This usually goes through the SchemeBuilder, which we cannot use here. - scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) RegisterDefaults(scheme) } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index 435f6a8f5994..0a9f2a377562 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -80,7 +80,13 @@ func (t *Time) Before(u *Time) bool { // Equal reports whether the time instant t is equal to u. func (t *Time) Equal(u *Time) bool { - return t.Time.Equal(u.Time) + if t == nil && u == nil { + return true + } + if t != nil && u != nil { + return t.Time.Equal(u.Time) + } + return false } // Unix returns the local time corresponding to the given Unix time diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 13ae66c6f47c..c8ee4e5d65b2 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -177,15 +177,16 @@ type ObjectMeta struct { // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This // field is set by the server when a graceful deletion is requested by the user, and is not // directly settable by a client. The resource is expected to be deleted (no longer visible - // from resource lists, and not reachable by name) after the time in this field. Once set, - // this value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard - // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the - // API. In the presence of network partitions, this object may still exist after this - // timestamp, until an administrator or automated process can determine the resource is - // fully terminated. + // from resource lists, and not reachable by name) after the time in this field, once the + // finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. + // Once the deletionTimestamp is set, this value may not be unset or be set further into the + // future, although it may be shortened or the resource may be deleted prior to this time. + // For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react + // by sending a graceful termination signal to the containers in the pod. After that 30 seconds, + // the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, + // remove the pod from the API. In the presence of network partitions, this object may still + // exist after this timestamp, until an administrator or automated process can determine the + // resource is fully terminated. // If not set, graceful deletion of the object has not been requested. // // Populated by the system when a graceful deletion is requested. @@ -445,6 +446,10 @@ type DeleteOptions struct { // Either this field or OrphanDependents may be set, but not both. // The default policy is decided by the existing finalizer set in the // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. // +optional PropagationPolicy *DeletionPropagation `json:"propagationPolicy,omitempty" protobuf:"varint,4,opt,name=propagationPolicy"` } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 49d2de1ef794..5dbba4b02f51 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -90,7 +90,7 @@ var map_DeleteOptions = map[string]string{ "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.", + "propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", } func (DeleteOptions) SwaggerDoc() map[string]string { @@ -214,7 +214,7 @@ var map_ObjectMeta = map[string]string{ "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD index 9c2154a3aa78..7ae9cb0f9d89 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD @@ -8,22 +8,30 @@ load( go_test( name = "go_default_test", - srcs = ["unstructured_test.go"], + srcs = [ + "helpers_test.go", + "unstructured_list_test.go", + ], + importpath = "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", library = ":go_default_library", - deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"], + deps = [ + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], ) go_library( name = "go_default_library", srcs = [ + "helpers.go", "unstructured.go", + "unstructured_list.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go new file mode 100644 index 000000000000..8d03525474d6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -0,0 +1,453 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unstructured + +import ( + gojson "encoding/json" + "errors" + "fmt" + "io" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" +) + +// NestedFieldCopy returns a deep copy of the value of a nested field. +// false is returned if the value is missing. +// nil, true is returned for a nil field. +func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return nil, false + } + return runtime.DeepCopyJSONValue(val), true +} + +func nestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool) { + var val interface{} = obj + for _, field := range fields { + if m, ok := val.(map[string]interface{}); ok { + val, ok = m[field] + if !ok { + return nil, false + } + } else { + // Expected map[string]interface{}, got something else + return nil, false + } + } + return val, true +} + +// NestedString returns the string value of a nested field. +// Returns false if value is not found or is not a string. +func NestedString(obj map[string]interface{}, fields ...string) (string, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return "", false + } + s, ok := val.(string) + return s, ok +} + +// NestedBool returns the bool value of a nested field. +// Returns false if value is not found or is not a bool. +func NestedBool(obj map[string]interface{}, fields ...string) (bool, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return false, false + } + b, ok := val.(bool) + return b, ok +} + +// NestedFloat64 returns the bool value of a nested field. +// Returns false if value is not found or is not a float64. +func NestedFloat64(obj map[string]interface{}, fields ...string) (float64, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return 0, false + } + f, ok := val.(float64) + return f, ok +} + +// NestedInt64 returns the int64 value of a nested field. +// Returns false if value is not found or is not an int64. +func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return 0, false + } + i, ok := val.(int64) + return i, ok +} + +// NestedStringSlice returns a copy of []string value of a nested field. +// Returns false if value is not found, is not a []interface{} or contains non-string items in the slice. +func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return nil, false + } + if m, ok := val.([]interface{}); ok { + strSlice := make([]string, 0, len(m)) + for _, v := range m { + if str, ok := v.(string); ok { + strSlice = append(strSlice, str) + } else { + return nil, false + } + } + return strSlice, true + } + return nil, false +} + +// NestedSlice returns a deep copy of []interface{} value of a nested field. +// Returns false if value is not found or is not a []interface{}. +func NestedSlice(obj map[string]interface{}, fields ...string) ([]interface{}, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return nil, false + } + if _, ok := val.([]interface{}); ok { + return runtime.DeepCopyJSONValue(val).([]interface{}), true + } + return nil, false +} + +// NestedStringMap returns a copy of map[string]string value of a nested field. +// Returns false if value is not found, is not a map[string]interface{} or contains non-string values in the map. +func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool) { + m, ok := nestedMapNoCopy(obj, fields...) + if !ok { + return nil, false + } + strMap := make(map[string]string, len(m)) + for k, v := range m { + if str, ok := v.(string); ok { + strMap[k] = str + } else { + return nil, false + } + } + return strMap, true +} + +// NestedMap returns a deep copy of map[string]interface{} value of a nested field. +// Returns false if value is not found or is not a map[string]interface{}. +func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool) { + m, ok := nestedMapNoCopy(obj, fields...) + if !ok { + return nil, false + } + return runtime.DeepCopyJSON(m), true +} + +// nestedMapNoCopy returns a map[string]interface{} value of a nested field. +// Returns false if value is not found or is not a map[string]interface{}. +func nestedMapNoCopy(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return nil, false + } + m, ok := val.(map[string]interface{}) + return m, ok +} + +// SetNestedField sets the value of a nested field to a deep copy of the value provided. +// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedField(obj map[string]interface{}, value interface{}, fields ...string) bool { + return setNestedFieldNoCopy(obj, runtime.DeepCopyJSONValue(value), fields...) +} + +func setNestedFieldNoCopy(obj map[string]interface{}, value interface{}, fields ...string) bool { + m := obj + for _, field := range fields[:len(fields)-1] { + if val, ok := m[field]; ok { + if valMap, ok := val.(map[string]interface{}); ok { + m = valMap + } else { + return false + } + } else { + newVal := make(map[string]interface{}) + m[field] = newVal + m = newVal + } + } + m[fields[len(fields)-1]] = value + return true +} + +// SetNestedStringSlice sets the string slice value of a nested field. +// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedStringSlice(obj map[string]interface{}, value []string, fields ...string) bool { + m := make([]interface{}, 0, len(value)) // convert []string into []interface{} + for _, v := range value { + m = append(m, v) + } + return setNestedFieldNoCopy(obj, m, fields...) +} + +// SetNestedSlice sets the slice value of a nested field. +// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedSlice(obj map[string]interface{}, value []interface{}, fields ...string) bool { + return SetNestedField(obj, value, fields...) +} + +// SetNestedStringMap sets the map[string]string value of a nested field. +// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedStringMap(obj map[string]interface{}, value map[string]string, fields ...string) bool { + m := make(map[string]interface{}, len(value)) // convert map[string]string into map[string]interface{} + for k, v := range value { + m[k] = v + } + return setNestedFieldNoCopy(obj, m, fields...) +} + +// SetNestedMap sets the map[string]interface{} value of a nested field. +// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedMap(obj map[string]interface{}, value map[string]interface{}, fields ...string) bool { + return SetNestedField(obj, value, fields...) +} + +// RemoveNestedField removes the nested field from the obj. +func RemoveNestedField(obj map[string]interface{}, fields ...string) { + m := obj + for _, field := range fields[:len(fields)-1] { + if x, ok := m[field].(map[string]interface{}); ok { + m = x + } else { + return + } + } + delete(m, fields[len(fields)-1]) +} + +func getNestedString(obj map[string]interface{}, fields ...string) string { + val, ok := NestedString(obj, fields...) + if !ok { + return "" + } + return val +} + +func extractOwnerReference(v map[string]interface{}) metav1.OwnerReference { + // though this field is a *bool, but when decoded from JSON, it's + // unmarshalled as bool. + var controllerPtr *bool + if controller, ok := NestedBool(v, "controller"); ok { + controllerPtr = &controller + } + var blockOwnerDeletionPtr *bool + if blockOwnerDeletion, ok := NestedBool(v, "blockOwnerDeletion"); ok { + blockOwnerDeletionPtr = &blockOwnerDeletion + } + return metav1.OwnerReference{ + Kind: getNestedString(v, "kind"), + Name: getNestedString(v, "name"), + APIVersion: getNestedString(v, "apiVersion"), + UID: types.UID(getNestedString(v, "uid")), + Controller: controllerPtr, + BlockOwnerDeletion: blockOwnerDeletionPtr, + } +} + +// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured +// type, which can be used for generic access to objects without a predefined scheme. +// TODO: move into serializer/json. +var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{} + +type unstructuredJSONScheme struct{} + +func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + var err error + if obj != nil { + err = s.decodeInto(data, obj) + } else { + obj, err = s.decode(data) + } + + if err != nil { + return nil, nil, err + } + + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, &gvk, runtime.NewMissingKindErr(string(data)) + } + + return obj, &gvk, nil +} + +func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { + switch t := obj.(type) { + case *Unstructured: + return json.NewEncoder(w).Encode(t.Object) + case *UnstructuredList: + items := make([]interface{}, 0, len(t.Items)) + for _, i := range t.Items { + items = append(items, i.Object) + } + listObj := make(map[string]interface{}, len(t.Object)+1) + for k, v := range t.Object { // Make a shallow copy + listObj[k] = v + } + listObj["items"] = items + return json.NewEncoder(w).Encode(listObj) + case *runtime.Unknown: + // TODO: Unstructured needs to deal with ContentType. + _, err := w.Write(t.Raw) + return err + default: + return json.NewEncoder(w).Encode(t) + } +} + +func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) { + type detector struct { + Items gojson.RawMessage + } + var det detector + if err := json.Unmarshal(data, &det); err != nil { + return nil, err + } + + if det.Items != nil { + list := &UnstructuredList{} + err := s.decodeToList(data, list) + return list, err + } + + // No Items field, so it wasn't a list. + unstruct := &Unstructured{} + err := s.decodeToUnstructured(data, unstruct) + return unstruct, err +} + +func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) error { + switch x := obj.(type) { + case *Unstructured: + return s.decodeToUnstructured(data, x) + case *UnstructuredList: + return s.decodeToList(data, x) + case *runtime.VersionedObjects: + o, err := s.decode(data) + if err == nil { + x.Objects = []runtime.Object{o} + } + return err + default: + return json.Unmarshal(data, x) + } +} + +func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error { + m := make(map[string]interface{}) + if err := json.Unmarshal(data, &m); err != nil { + return err + } + + unstruct.Object = m + + return nil +} + +func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error { + type decodeList struct { + Items []gojson.RawMessage + } + + var dList decodeList + if err := json.Unmarshal(data, &dList); err != nil { + return err + } + + if err := json.Unmarshal(data, &list.Object); err != nil { + return err + } + + // For typed lists, e.g., a PodList, API server doesn't set each item's + // APIVersion and Kind. We need to set it. + listAPIVersion := list.GetAPIVersion() + listKind := list.GetKind() + itemKind := strings.TrimSuffix(listKind, "List") + + delete(list.Object, "items") + list.Items = make([]Unstructured, 0, len(dList.Items)) + for _, i := range dList.Items { + unstruct := &Unstructured{} + if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil { + return err + } + // This is hacky. Set the item's Kind and APIVersion to those inferred + // from the List. + if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 { + unstruct.SetKind(itemKind) + unstruct.SetAPIVersion(listAPIVersion) + } + list.Items = append(list.Items, *unstruct) + } + return nil +} + +// UnstructuredObjectConverter is an ObjectConverter for use with +// Unstructured objects. Since it has no schema or type information, +// it will only succeed for no-op conversions. This is provided as a +// sane implementation for APIs that require an object converter. +type UnstructuredObjectConverter struct{} + +func (UnstructuredObjectConverter) Convert(in, out, context interface{}) error { + unstructIn, ok := in.(*Unstructured) + if !ok { + return fmt.Errorf("input type %T in not valid for unstructured conversion", in) + } + + unstructOut, ok := out.(*Unstructured) + if !ok { + return fmt.Errorf("output type %T in not valid for unstructured conversion", out) + } + + // maybe deep copy the map? It is documented in the + // ObjectConverter interface that this function is not + // guaranteed to not mutate the input. Or maybe set the input + // object to nil. + unstructOut.Object = unstructIn.Object + return nil +} + +func (UnstructuredObjectConverter) ConvertToVersion(in runtime.Object, target runtime.GroupVersioner) (runtime.Object, error) { + if kind := in.GetObjectKind().GroupVersionKind(); !kind.Empty() { + gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{kind}) + if !ok { + // TODO: should this be a typed error? + return nil, fmt.Errorf("%v is unstructured and is not suitable for converting to %q", kind, target) + } + in.GetObjectKind().SetGroupVersionKind(gvk) + } + return in, nil +} + +func (UnstructuredObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + return "", "", errors.New("unstructured cannot convert field labels") +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index 588230f40903..36e769bd602f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -18,20 +18,13 @@ package unstructured import ( "bytes" - gojson "encoding/json" "errors" "fmt" - "io" - "strings" - - "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) @@ -54,39 +47,31 @@ type Unstructured struct { var _ metav1.Object = &Unstructured{} var _ runtime.Unstructured = &Unstructured{} -var _ runtime.Unstructured = &UnstructuredList{} - -func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj } -func (obj *UnstructuredList) GetObjectKind() schema.ObjectKind { return obj } -func (obj *Unstructured) IsUnstructuredObject() {} -func (obj *UnstructuredList) IsUnstructuredObject() {} +func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj } func (obj *Unstructured) IsList() bool { - if obj.Object != nil { - _, ok := obj.Object["items"] - return ok + field, ok := obj.Object["items"] + if !ok { + return false } - return false + _, ok = field.([]interface{}) + return ok } -func (obj *UnstructuredList) IsList() bool { return true } func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error { - if obj.Object == nil { - return fmt.Errorf("content is not a list") - } field, ok := obj.Object["items"] if !ok { - return fmt.Errorf("content is not a list") + return errors.New("content is not a list") } items, ok := field.([]interface{}) if !ok { - return nil + return fmt.Errorf("content is not a list: %T", field) } for _, item := range items { child, ok := item.(map[string]interface{}) if !ok { - return fmt.Errorf("items member is not an object") + return fmt.Errorf("items member is not an object: %T", child) } if err := fn(&Unstructured{Object: child}); err != nil { return err @@ -95,15 +80,6 @@ func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error { return nil } -func (obj *UnstructuredList) EachListItem(fn func(runtime.Object) error) error { - for i := range obj.Items { - if err := fn(&obj.Items[i]); err != nil { - return err - } - } - return nil -} - func (obj *Unstructured) UnstructuredContent() map[string]interface{} { if obj.Object == nil { obj.Object = make(map[string]interface{}) @@ -111,23 +87,8 @@ func (obj *Unstructured) UnstructuredContent() map[string]interface{} { return obj.Object } -// UnstructuredContent returns a map contain an overlay of the Items field onto -// the Object field. Items always overwrites overlay. Changing "items" in the -// returned object will affect items in the underlying Items field, but changing -// the "items" slice itself will have no effect. -// TODO: expose SetUnstructuredContent on runtime.Unstructured that allows -// items to be changed. -func (obj *UnstructuredList) UnstructuredContent() map[string]interface{} { - out := obj.Object - if out == nil { - out = make(map[string]interface{}) - } - items := make([]interface{}, len(obj.Items)) - for i, item := range obj.Items { - items[i] = item.Object - } - out["items"] = items - return out +func (obj *Unstructured) SetUnstructuredContent(content map[string]interface{}) { + obj.Object = content } // MarshalJSON ensures that the unstructured object produces proper @@ -145,253 +106,67 @@ func (u *Unstructured) UnmarshalJSON(b []byte) error { return err } -func deepCopyJSON(x interface{}) interface{} { - switch x := x.(type) { - case map[string]interface{}: - clone := make(map[string]interface{}, len(x)) - for k, v := range x { - clone[k] = deepCopyJSON(v) - } - return clone - case []interface{}: - clone := make([]interface{}, len(x)) - for i := range x { - clone[i] = deepCopyJSON(x[i]) - } - return clone - default: - // only non-pointer values (float64, int64, bool, string) are left. These can be copied by-value. - return x - } -} - func (in *Unstructured) DeepCopy() *Unstructured { if in == nil { return nil } out := new(Unstructured) *out = *in - out.Object = deepCopyJSON(in.Object).(map[string]interface{}) + out.Object = runtime.DeepCopyJSON(in.Object) return out } -func (in *UnstructuredList) DeepCopy() *UnstructuredList { - if in == nil { - return nil - } - out := new(UnstructuredList) - *out = *in - out.Object = deepCopyJSON(in.Object).(map[string]interface{}) - out.Items = make([]Unstructured, len(in.Items)) - for i := range in.Items { - in.Items[i].DeepCopyInto(&out.Items[i]) - } - return out -} - -func getNestedField(obj map[string]interface{}, fields ...string) interface{} { - var val interface{} = obj - for _, field := range fields { - if _, ok := val.(map[string]interface{}); !ok { - return nil - } - val = val.(map[string]interface{})[field] - } - return val -} - -func getNestedString(obj map[string]interface{}, fields ...string) string { - if str, ok := getNestedField(obj, fields...).(string); ok { - return str - } - return "" -} - -func getNestedInt64(obj map[string]interface{}, fields ...string) int64 { - if str, ok := getNestedField(obj, fields...).(int64); ok { - return str - } - return 0 -} - -func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 { - nested := getNestedField(obj, fields...) - switch n := nested.(type) { - case int64: - return &n - case *int64: - return n - default: - return nil - } -} - -func getNestedSlice(obj map[string]interface{}, fields ...string) []string { - if m, ok := getNestedField(obj, fields...).([]interface{}); ok { - strSlice := make([]string, 0, len(m)) - for _, v := range m { - if str, ok := v.(string); ok { - strSlice = append(strSlice, str) - } - } - return strSlice - } - return nil -} - -func getNestedMap(obj map[string]interface{}, fields ...string) map[string]string { - if m, ok := getNestedField(obj, fields...).(map[string]interface{}); ok { - strMap := make(map[string]string, len(m)) - for k, v := range m { - if str, ok := v.(string); ok { - strMap[k] = str - } - } - return strMap - } - return nil -} - -func setNestedField(obj map[string]interface{}, value interface{}, fields ...string) { - m := obj - if len(fields) > 1 { - for _, field := range fields[0 : len(fields)-1] { - if _, ok := m[field].(map[string]interface{}); !ok { - m[field] = make(map[string]interface{}) - } - m = m[field].(map[string]interface{}) - } - } - m[fields[len(fields)-1]] = value -} - -func setNestedSlice(obj map[string]interface{}, value []string, fields ...string) { - m := make([]interface{}, 0, len(value)) - for _, v := range value { - m = append(m, v) - } - setNestedField(obj, m, fields...) -} - -func setNestedMap(obj map[string]interface{}, value map[string]string, fields ...string) { - m := make(map[string]interface{}, len(value)) - for k, v := range value { - m[k] = v - } - setNestedField(obj, m, fields...) -} - func (u *Unstructured) setNestedField(value interface{}, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) } - setNestedField(u.Object, value, fields...) + SetNestedField(u.Object, value, fields...) } func (u *Unstructured) setNestedSlice(value []string, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) } - setNestedSlice(u.Object, value, fields...) + SetNestedStringSlice(u.Object, value, fields...) } func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) } - setNestedMap(u.Object, value, fields...) + SetNestedStringMap(u.Object, value, fields...) } -func extractOwnerReference(src interface{}) metav1.OwnerReference { - v := src.(map[string]interface{}) - // though this field is a *bool, but when decoded from JSON, it's - // unmarshalled as bool. - var controllerPtr *bool - controller, ok := (getNestedField(v, "controller")).(bool) - if !ok { - controllerPtr = nil - } else { - controllerCopy := controller - controllerPtr = &controllerCopy - } - var blockOwnerDeletionPtr *bool - blockOwnerDeletion, ok := (getNestedField(v, "blockOwnerDeletion")).(bool) +func (u *Unstructured) GetOwnerReferences() []metav1.OwnerReference { + field, ok := nestedFieldNoCopy(u.Object, "metadata", "ownerReferences") if !ok { - blockOwnerDeletionPtr = nil - } else { - blockOwnerDeletionCopy := blockOwnerDeletion - blockOwnerDeletionPtr = &blockOwnerDeletionCopy - } - return metav1.OwnerReference{ - Kind: getNestedString(v, "kind"), - Name: getNestedString(v, "name"), - APIVersion: getNestedString(v, "apiVersion"), - UID: (types.UID)(getNestedString(v, "uid")), - Controller: controllerPtr, - BlockOwnerDeletion: blockOwnerDeletionPtr, - } -} - -func setOwnerReference(src metav1.OwnerReference) map[string]interface{} { - ret := make(map[string]interface{}) - setNestedField(ret, src.Kind, "kind") - setNestedField(ret, src.Name, "name") - setNestedField(ret, src.APIVersion, "apiVersion") - setNestedField(ret, string(src.UID), "uid") - // json.Unmarshal() extracts boolean json fields as bool, not as *bool and hence extractOwnerReference() - // expects bool or a missing field, not *bool. So if pointer is nil, fields are omitted from the ret object. - // If pointer is non-nil, they are set to the referenced value. - if src.Controller != nil { - setNestedField(ret, *src.Controller, "controller") - } - if src.BlockOwnerDeletion != nil { - setNestedField(ret, *src.BlockOwnerDeletion, "blockOwnerDeletion") - } - return ret -} - -func getOwnerReferences(object map[string]interface{}) ([]map[string]interface{}, error) { - field := getNestedField(object, "metadata", "ownerReferences") - if field == nil { - return nil, fmt.Errorf("cannot find field metadata.ownerReferences in %v", object) - } - ownerReferences, ok := field.([]map[string]interface{}) - if ok { - return ownerReferences, nil + return nil } - // TODO: This is hacky... - interfaces, ok := field.([]interface{}) + original, ok := field.([]interface{}) if !ok { - return nil, fmt.Errorf("expect metadata.ownerReferences to be a slice in %#v", object) - } - ownerReferences = make([]map[string]interface{}, 0, len(interfaces)) - for i := 0; i < len(interfaces); i++ { - r, ok := interfaces[i].(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("expect element metadata.ownerReferences to be a map[string]interface{} in %#v", object) - } - ownerReferences = append(ownerReferences, r) - } - return ownerReferences, nil -} - -func (u *Unstructured) GetOwnerReferences() []metav1.OwnerReference { - original, err := getOwnerReferences(u.Object) - if err != nil { - glog.V(6).Info(err) return nil } ret := make([]metav1.OwnerReference, 0, len(original)) - for i := 0; i < len(original); i++ { - ret = append(ret, extractOwnerReference(original[i])) + for _, obj := range original { + o, ok := obj.(map[string]interface{}) + if !ok { + // expected map[string]interface{}, got something else + return nil + } + ret = append(ret, extractOwnerReference(o)) } return ret } func (u *Unstructured) SetOwnerReferences(references []metav1.OwnerReference) { - var newReferences = make([]map[string]interface{}, 0, len(references)) - for i := 0; i < len(references); i++ { - newReferences = append(newReferences, setOwnerReference(references[i])) + newReferences := make([]interface{}, 0, len(references)) + for _, reference := range references { + out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&reference) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to convert Owner Reference: %v", err)) + continue + } + newReferences = append(newReferences, out) } u.setNestedField(newReferences, "metadata", "ownerReferences") } @@ -453,7 +228,11 @@ func (u *Unstructured) SetResourceVersion(version string) { } func (u *Unstructured) GetGeneration() int64 { - return getNestedInt64(u.Object, "metadata", "generation") + val, ok := NestedInt64(u.Object, "metadata", "generation") + if !ok { + return 0 + } + return val } func (u *Unstructured) SetGeneration(generation int64) { @@ -484,6 +263,10 @@ func (u *Unstructured) GetCreationTimestamp() metav1.Time { func (u *Unstructured) SetCreationTimestamp(timestamp metav1.Time) { ts, _ := timestamp.MarshalQueryParameter() + if len(ts) == 0 || timestamp.Time.IsZero() { + RemoveNestedField(u.Object, "metadata", "creationTimestamp") + return + } u.setNestedField(ts, "metadata", "creationTimestamp") } @@ -498,7 +281,7 @@ func (u *Unstructured) GetDeletionTimestamp() *metav1.Time { func (u *Unstructured) SetDeletionTimestamp(timestamp *metav1.Time) { if timestamp == nil { - u.setNestedField(nil, "metadata", "deletionTimestamp") + RemoveNestedField(u.Object, "metadata", "deletionTimestamp") return } ts, _ := timestamp.MarshalQueryParameter() @@ -506,15 +289,24 @@ func (u *Unstructured) SetDeletionTimestamp(timestamp *metav1.Time) { } func (u *Unstructured) GetDeletionGracePeriodSeconds() *int64 { - return getNestedInt64Pointer(u.Object, "metadata", "deletionGracePeriodSeconds") + val, ok := NestedInt64(u.Object, "metadata", "deletionGracePeriodSeconds") + if !ok { + return nil + } + return &val } func (u *Unstructured) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) { - u.setNestedField(deletionGracePeriodSeconds, "metadata", "deletionGracePeriodSeconds") + if deletionGracePeriodSeconds == nil { + RemoveNestedField(u.Object, "metadata", "deletionGracePeriodSeconds") + return + } + u.setNestedField(*deletionGracePeriodSeconds, "metadata", "deletionGracePeriodSeconds") } func (u *Unstructured) GetLabels() map[string]string { - return getNestedMap(u.Object, "metadata", "labels") + m, _ := NestedStringMap(u.Object, "metadata", "labels") + return m } func (u *Unstructured) SetLabels(labels map[string]string) { @@ -522,7 +314,8 @@ func (u *Unstructured) SetLabels(labels map[string]string) { } func (u *Unstructured) GetAnnotations() map[string]string { - return getNestedMap(u.Object, "metadata", "annotations") + m, _ := NestedStringMap(u.Object, "metadata", "annotations") + return m } func (u *Unstructured) SetAnnotations(annotations map[string]string) { @@ -543,41 +336,34 @@ func (u *Unstructured) GroupVersionKind() schema.GroupVersionKind { return gvk } -var converter = unstructured.NewConverter(false) - func (u *Unstructured) GetInitializers() *metav1.Initializers { - field := getNestedField(u.Object, "metadata", "initializers") - if field == nil { - return nil - } - obj, ok := field.(map[string]interface{}) + m, ok := nestedMapNoCopy(u.Object, "metadata", "initializers") if !ok { return nil } out := &metav1.Initializers{} - if err := converter.FromUnstructured(obj, out); err != nil { + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, out); err != nil { utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) + return nil } return out } func (u *Unstructured) SetInitializers(initializers *metav1.Initializers) { - if u.Object == nil { - u.Object = make(map[string]interface{}) - } if initializers == nil { - setNestedField(u.Object, nil, "metadata", "initializers") + RemoveNestedField(u.Object, "metadata", "initializers") return } - out, err := converter.ToUnstructured(initializers) + out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(initializers) if err != nil { utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) } - setNestedField(u.Object, out, "metadata", "initializers") + u.setNestedField(out, "metadata", "initializers") } func (u *Unstructured) GetFinalizers() []string { - return getNestedSlice(u.Object, "metadata", "finalizers") + val, _ := NestedStringSlice(u.Object, "metadata", "finalizers") + return val } func (u *Unstructured) SetFinalizers(finalizers []string) { @@ -591,272 +377,3 @@ func (u *Unstructured) GetClusterName() string { func (u *Unstructured) SetClusterName(clusterName string) { u.setNestedField(clusterName, "metadata", "clusterName") } - -// UnstructuredList allows lists that do not have Golang structs -// registered to be manipulated generically. This can be used to deal -// with the API lists from a plug-in. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:deepcopy-gen=true -type UnstructuredList struct { - Object map[string]interface{} - - // Items is a list of unstructured objects. - Items []Unstructured `json:"items"` -} - -var _ metav1.ListInterface = &UnstructuredList{} - -// MarshalJSON ensures that the unstructured list object produces proper -// JSON when passed to Go's standard JSON library. -func (u *UnstructuredList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - err := UnstructuredJSONScheme.Encode(u, &buf) - return buf.Bytes(), err -} - -// UnmarshalJSON ensures that the unstructured list object properly -// decodes JSON when passed to Go's standard JSON library. -func (u *UnstructuredList) UnmarshalJSON(b []byte) error { - _, _, err := UnstructuredJSONScheme.Decode(b, nil, u) - return err -} - -func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) { - if u.Object == nil { - u.Object = make(map[string]interface{}) - } - setNestedField(u.Object, value, fields...) -} - -func (u *UnstructuredList) GetAPIVersion() string { - return getNestedString(u.Object, "apiVersion") -} - -func (u *UnstructuredList) SetAPIVersion(version string) { - u.setNestedField(version, "apiVersion") -} - -func (u *UnstructuredList) GetKind() string { - return getNestedString(u.Object, "kind") -} - -func (u *UnstructuredList) SetKind(kind string) { - u.setNestedField(kind, "kind") -} - -func (u *UnstructuredList) GetResourceVersion() string { - return getNestedString(u.Object, "metadata", "resourceVersion") -} - -func (u *UnstructuredList) SetResourceVersion(version string) { - u.setNestedField(version, "metadata", "resourceVersion") -} - -func (u *UnstructuredList) GetSelfLink() string { - return getNestedString(u.Object, "metadata", "selfLink") -} - -func (u *UnstructuredList) SetSelfLink(selfLink string) { - u.setNestedField(selfLink, "metadata", "selfLink") -} - -func (u *UnstructuredList) GetContinue() string { - return getNestedString(u.Object, "metadata", "continue") -} - -func (u *UnstructuredList) SetContinue(c string) { - u.setNestedField(c, "metadata", "continue") -} - -func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) { - u.SetAPIVersion(gvk.GroupVersion().String()) - u.SetKind(gvk.Kind) -} - -func (u *UnstructuredList) GroupVersionKind() schema.GroupVersionKind { - gv, err := schema.ParseGroupVersion(u.GetAPIVersion()) - if err != nil { - return schema.GroupVersionKind{} - } - gvk := gv.WithKind(u.GetKind()) - return gvk -} - -// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured -// type, which can be used for generic access to objects without a predefined scheme. -// TODO: move into serializer/json. -var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{} - -type unstructuredJSONScheme struct{} - -func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - var err error - if obj != nil { - err = s.decodeInto(data, obj) - } else { - obj, err = s.decode(data) - } - - if err != nil { - return nil, nil, err - } - - gvk := obj.GetObjectKind().GroupVersionKind() - if len(gvk.Kind) == 0 { - return nil, &gvk, runtime.NewMissingKindErr(string(data)) - } - - return obj, &gvk, nil -} - -func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { - switch t := obj.(type) { - case *Unstructured: - return json.NewEncoder(w).Encode(t.Object) - case *UnstructuredList: - items := make([]map[string]interface{}, 0, len(t.Items)) - for _, i := range t.Items { - items = append(items, i.Object) - } - listObj := make(map[string]interface{}, len(t.Object)+1) - for k, v := range t.Object { // Make a shallow copy - listObj[k] = v - } - listObj["items"] = items - return json.NewEncoder(w).Encode(listObj) - case *runtime.Unknown: - // TODO: Unstructured needs to deal with ContentType. - _, err := w.Write(t.Raw) - return err - default: - return json.NewEncoder(w).Encode(t) - } -} - -func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) { - type detector struct { - Items gojson.RawMessage - } - var det detector - if err := json.Unmarshal(data, &det); err != nil { - return nil, err - } - - if det.Items != nil { - list := &UnstructuredList{} - err := s.decodeToList(data, list) - return list, err - } - - // No Items field, so it wasn't a list. - unstruct := &Unstructured{} - err := s.decodeToUnstructured(data, unstruct) - return unstruct, err -} - -func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) error { - switch x := obj.(type) { - case *Unstructured: - return s.decodeToUnstructured(data, x) - case *UnstructuredList: - return s.decodeToList(data, x) - case *runtime.VersionedObjects: - o, err := s.decode(data) - if err == nil { - x.Objects = []runtime.Object{o} - } - return err - default: - return json.Unmarshal(data, x) - } -} - -func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error { - m := make(map[string]interface{}) - if err := json.Unmarshal(data, &m); err != nil { - return err - } - - unstruct.Object = m - - return nil -} - -func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error { - type decodeList struct { - Items []gojson.RawMessage - } - - var dList decodeList - if err := json.Unmarshal(data, &dList); err != nil { - return err - } - - if err := json.Unmarshal(data, &list.Object); err != nil { - return err - } - - // For typed lists, e.g., a PodList, API server doesn't set each item's - // APIVersion and Kind. We need to set it. - listAPIVersion := list.GetAPIVersion() - listKind := list.GetKind() - itemKind := strings.TrimSuffix(listKind, "List") - - delete(list.Object, "items") - list.Items = nil - for _, i := range dList.Items { - unstruct := &Unstructured{} - if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil { - return err - } - // This is hacky. Set the item's Kind and APIVersion to those inferred - // from the List. - if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 { - unstruct.SetKind(itemKind) - unstruct.SetAPIVersion(listAPIVersion) - } - list.Items = append(list.Items, *unstruct) - } - return nil -} - -// UnstructuredObjectConverter is an ObjectConverter for use with -// Unstructured objects. Since it has no schema or type information, -// it will only succeed for no-op conversions. This is provided as a -// sane implementation for APIs that require an object converter. -type UnstructuredObjectConverter struct{} - -func (UnstructuredObjectConverter) Convert(in, out, context interface{}) error { - unstructIn, ok := in.(*Unstructured) - if !ok { - return fmt.Errorf("input type %T in not valid for unstructured conversion", in) - } - - unstructOut, ok := out.(*Unstructured) - if !ok { - return fmt.Errorf("output type %T in not valid for unstructured conversion", out) - } - - // maybe deep copy the map? It is documented in the - // ObjectConverter interface that this function is not - // guaranteeed to not mutate the input. Or maybe set the input - // object to nil. - unstructOut.Object = unstructIn.Object - return nil -} - -func (UnstructuredObjectConverter) ConvertToVersion(in runtime.Object, target runtime.GroupVersioner) (runtime.Object, error) { - if kind := in.GetObjectKind().GroupVersionKind(); !kind.Empty() { - gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{kind}) - if !ok { - // TODO: should this be a typed error? - return nil, fmt.Errorf("%v is unstructured and is not suitable for converting to %q", kind, target) - } - in.GetObjectKind().SetGroupVersionKind(gvk) - } - return in, nil -} - -func (UnstructuredObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { - return "", "", errors.New("unstructured cannot convert field labels") -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go new file mode 100644 index 000000000000..57d78a09dcc7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go @@ -0,0 +1,189 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unstructured + +import ( + "bytes" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var _ runtime.Unstructured = &UnstructuredList{} +var _ metav1.ListInterface = &UnstructuredList{} + +// UnstructuredList allows lists that do not have Golang structs +// registered to be manipulated generically. This can be used to deal +// with the API lists from a plug-in. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:deepcopy-gen=true +type UnstructuredList struct { + Object map[string]interface{} + + // Items is a list of unstructured objects. + Items []Unstructured `json:"items"` +} + +func (u *UnstructuredList) GetObjectKind() schema.ObjectKind { return u } + +func (u *UnstructuredList) IsList() bool { return true } + +func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error { + for i := range u.Items { + if err := fn(&u.Items[i]); err != nil { + return err + } + } + return nil +} + +// UnstructuredContent returns a map contain an overlay of the Items field onto +// the Object field. Items always overwrites overlay. Changing "items" in the +// returned object will affect items in the underlying Items field, but changing +// the "items" slice itself will have no effect. +// TODO: expose SetUnstructuredContent on runtime.Unstructured that allows +// items to be changed. +func (u *UnstructuredList) UnstructuredContent() map[string]interface{} { + out := u.Object + if out == nil { + out = make(map[string]interface{}) + } + items := make([]interface{}, len(u.Items)) + for i, item := range u.Items { + items[i] = item.Object + } + out["items"] = items + return out +} + +// SetUnstructuredContent obeys the conventions of List and keeps Items and the items +// array in sync. If items is not an array of objects in the incoming map, then any +// mismatched item will be removed. +func (obj *UnstructuredList) SetUnstructuredContent(content map[string]interface{}) { + obj.Object = content + if content == nil { + obj.Items = nil + return + } + items, ok := obj.Object["items"].([]interface{}) + if !ok || items == nil { + items = []interface{}{} + } + unstructuredItems := make([]Unstructured, 0, len(items)) + newItems := make([]interface{}, 0, len(items)) + for _, item := range items { + o, ok := item.(map[string]interface{}) + if !ok { + continue + } + unstructuredItems = append(unstructuredItems, Unstructured{Object: o}) + newItems = append(newItems, o) + } + obj.Items = unstructuredItems + obj.Object["items"] = newItems +} + +func (u *UnstructuredList) DeepCopy() *UnstructuredList { + if u == nil { + return nil + } + out := new(UnstructuredList) + *out = *u + out.Object = runtime.DeepCopyJSON(u.Object) + out.Items = make([]Unstructured, len(u.Items)) + for i := range u.Items { + u.Items[i].DeepCopyInto(&out.Items[i]) + } + return out +} + +// MarshalJSON ensures that the unstructured list object produces proper +// JSON when passed to Go's standard JSON library. +func (u *UnstructuredList) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + err := UnstructuredJSONScheme.Encode(u, &buf) + return buf.Bytes(), err +} + +// UnmarshalJSON ensures that the unstructured list object properly +// decodes JSON when passed to Go's standard JSON library. +func (u *UnstructuredList) UnmarshalJSON(b []byte) error { + _, _, err := UnstructuredJSONScheme.Decode(b, nil, u) + return err +} + +func (u *UnstructuredList) GetAPIVersion() string { + return getNestedString(u.Object, "apiVersion") +} + +func (u *UnstructuredList) SetAPIVersion(version string) { + u.setNestedField(version, "apiVersion") +} + +func (u *UnstructuredList) GetKind() string { + return getNestedString(u.Object, "kind") +} + +func (u *UnstructuredList) SetKind(kind string) { + u.setNestedField(kind, "kind") +} + +func (u *UnstructuredList) GetResourceVersion() string { + return getNestedString(u.Object, "metadata", "resourceVersion") +} + +func (u *UnstructuredList) SetResourceVersion(version string) { + u.setNestedField(version, "metadata", "resourceVersion") +} + +func (u *UnstructuredList) GetSelfLink() string { + return getNestedString(u.Object, "metadata", "selfLink") +} + +func (u *UnstructuredList) SetSelfLink(selfLink string) { + u.setNestedField(selfLink, "metadata", "selfLink") +} + +func (u *UnstructuredList) GetContinue() string { + return getNestedString(u.Object, "metadata", "continue") +} + +func (u *UnstructuredList) SetContinue(c string) { + u.setNestedField(c, "metadata", "continue") +} + +func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) { + u.SetAPIVersion(gvk.GroupVersion().String()) + u.SetKind(gvk.Kind) +} + +func (u *UnstructuredList) GroupVersionKind() schema.GroupVersionKind { + gv, err := schema.ParseGroupVersion(u.GetAPIVersion()) + if err != nil { + return schema.GroupVersionKind{} + } + gvk := gv.WithKind(u.GetKind()) + return gvk +} + +func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + SetNestedField(u.Object, value, fields...) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go index 248acf989bfe..8f6a17bf6a6c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go @@ -21,27 +21,9 @@ limitations under the License. package unstructured import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Unstructured).DeepCopyInto(out.(*Unstructured)) - return nil - }, InType: reflect.TypeOf(&Unstructured{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*UnstructuredList).DeepCopyInto(out.(*UnstructuredList)) - return nil - }, InType: reflect.TypeOf(&UnstructuredList{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Unstructured) DeepCopyInto(out *Unstructured) { clone := in.DeepCopy() diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/BUILD b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/BUILD index 2544ce6e1509..216ef6368b29 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/apimachinery/pkg/apis/meta/v1/validation", library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library"], ) @@ -16,6 +17,7 @@ go_test( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/apimachinery/pkg/apis/meta/v1/validation", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index c73e777b50b5..ed4585502162 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -21,164 +21,10 @@ limitations under the License. package v1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" - reflect "reflect" ) -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIGroup).DeepCopyInto(out.(*APIGroup)) - return nil - }, InType: reflect.TypeOf(&APIGroup{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIGroupList).DeepCopyInto(out.(*APIGroupList)) - return nil - }, InType: reflect.TypeOf(&APIGroupList{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIResource).DeepCopyInto(out.(*APIResource)) - return nil - }, InType: reflect.TypeOf(&APIResource{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIResourceList).DeepCopyInto(out.(*APIResourceList)) - return nil - }, InType: reflect.TypeOf(&APIResourceList{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIVersions).DeepCopyInto(out.(*APIVersions)) - return nil - }, InType: reflect.TypeOf(&APIVersions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeleteOptions).DeepCopyInto(out.(*DeleteOptions)) - return nil - }, InType: reflect.TypeOf(&DeleteOptions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Duration).DeepCopyInto(out.(*Duration)) - return nil - }, InType: reflect.TypeOf(&Duration{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExportOptions).DeepCopyInto(out.(*ExportOptions)) - return nil - }, InType: reflect.TypeOf(&ExportOptions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GetOptions).DeepCopyInto(out.(*GetOptions)) - return nil - }, InType: reflect.TypeOf(&GetOptions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupKind).DeepCopyInto(out.(*GroupKind)) - return nil - }, InType: reflect.TypeOf(&GroupKind{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupResource).DeepCopyInto(out.(*GroupResource)) - return nil - }, InType: reflect.TypeOf(&GroupResource{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupVersion).DeepCopyInto(out.(*GroupVersion)) - return nil - }, InType: reflect.TypeOf(&GroupVersion{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupVersionForDiscovery).DeepCopyInto(out.(*GroupVersionForDiscovery)) - return nil - }, InType: reflect.TypeOf(&GroupVersionForDiscovery{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupVersionKind).DeepCopyInto(out.(*GroupVersionKind)) - return nil - }, InType: reflect.TypeOf(&GroupVersionKind{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupVersionResource).DeepCopyInto(out.(*GroupVersionResource)) - return nil - }, InType: reflect.TypeOf(&GroupVersionResource{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Initializer).DeepCopyInto(out.(*Initializer)) - return nil - }, InType: reflect.TypeOf(&Initializer{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Initializers).DeepCopyInto(out.(*Initializers)) - return nil - }, InType: reflect.TypeOf(&Initializers{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*InternalEvent).DeepCopyInto(out.(*InternalEvent)) - return nil - }, InType: reflect.TypeOf(&InternalEvent{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LabelSelector).DeepCopyInto(out.(*LabelSelector)) - return nil - }, InType: reflect.TypeOf(&LabelSelector{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LabelSelectorRequirement).DeepCopyInto(out.(*LabelSelectorRequirement)) - return nil - }, InType: reflect.TypeOf(&LabelSelectorRequirement{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*List).DeepCopyInto(out.(*List)) - return nil - }, InType: reflect.TypeOf(&List{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ListMeta).DeepCopyInto(out.(*ListMeta)) - return nil - }, InType: reflect.TypeOf(&ListMeta{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ListOptions).DeepCopyInto(out.(*ListOptions)) - return nil - }, InType: reflect.TypeOf(&ListOptions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MicroTime).DeepCopyInto(out.(*MicroTime)) - return nil - }, InType: reflect.TypeOf(&MicroTime{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMeta).DeepCopyInto(out.(*ObjectMeta)) - return nil - }, InType: reflect.TypeOf(&ObjectMeta{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*OwnerReference).DeepCopyInto(out.(*OwnerReference)) - return nil - }, InType: reflect.TypeOf(&OwnerReference{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Patch).DeepCopyInto(out.(*Patch)) - return nil - }, InType: reflect.TypeOf(&Patch{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Preconditions).DeepCopyInto(out.(*Preconditions)) - return nil - }, InType: reflect.TypeOf(&Preconditions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RootPaths).DeepCopyInto(out.(*RootPaths)) - return nil - }, InType: reflect.TypeOf(&RootPaths{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServerAddressByClientCIDR).DeepCopyInto(out.(*ServerAddressByClientCIDR)) - return nil - }, InType: reflect.TypeOf(&ServerAddressByClientCIDR{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Status).DeepCopyInto(out.(*Status)) - return nil - }, InType: reflect.TypeOf(&Status{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatusCause).DeepCopyInto(out.(*StatusCause)) - return nil - }, InType: reflect.TypeOf(&StatusCause{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatusDetails).DeepCopyInto(out.(*StatusDetails)) - return nil - }, InType: reflect.TypeOf(&StatusDetails{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Time).DeepCopyInto(out.(*Time)) - return nil - }, InType: reflect.TypeOf(&Time{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Timestamp).DeepCopyInto(out.(*Timestamp)) - return nil - }, InType: reflect.TypeOf(&Timestamp{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*WatchEvent).DeepCopyInto(out.(*WatchEvent)) - return nil - }, InType: reflect.TypeOf(&WatchEvent{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *APIGroup) DeepCopyInto(out *APIGroup) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/BUILD b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/BUILD index e981dea096c6..ab36b18731f6 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/BUILD @@ -18,6 +18,7 @@ go_library( "zz_generated.deepcopy.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/apimachinery/pkg/apis/meta/v1alpha1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/types.go index 9c3c2a35e41a..1c97414a9f9b 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/types.go @@ -98,7 +98,7 @@ type TableRowCondition struct { type RowConditionType string // These are valid conditions of a row. This list is not exhaustive and new conditions may be -// inculded by other resources. +// included by other resources. const ( // RowCompleted means the underlying resource has reached completion and may be given less // visual priority than other resources. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go index 043456cdb1d6..4ae545d9119a 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go @@ -21,47 +21,9 @@ limitations under the License. package v1alpha1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PartialObjectMetadata).DeepCopyInto(out.(*PartialObjectMetadata)) - return nil - }, InType: reflect.TypeOf(&PartialObjectMetadata{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PartialObjectMetadataList).DeepCopyInto(out.(*PartialObjectMetadataList)) - return nil - }, InType: reflect.TypeOf(&PartialObjectMetadataList{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Table).DeepCopyInto(out.(*Table)) - return nil - }, InType: reflect.TypeOf(&Table{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TableColumnDefinition).DeepCopyInto(out.(*TableColumnDefinition)) - return nil - }, InType: reflect.TypeOf(&TableColumnDefinition{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TableOptions).DeepCopyInto(out.(*TableOptions)) - return nil - }, InType: reflect.TypeOf(&TableOptions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TableRow).DeepCopyInto(out.(*TableRow)) - return nil - }, InType: reflect.TypeOf(&TableRow{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TableRowCondition).DeepCopyInto(out.(*TableRowCondition)) - return nil - }, InType: reflect.TypeOf(&TableRowCondition{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/BUILD b/vendor/k8s.io/apimachinery/pkg/conversion/BUILD index eb7f1749660b..184dafbff1d8 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/conversion/BUILD @@ -10,9 +10,9 @@ go_test( name = "go_default_test", srcs = [ "converter_test.go", - "deep_copy_test.go", "helper_test.go", ], + importpath = "k8s.io/apimachinery/pkg/conversion", library = ":go_default_library", deps = [ "//vendor/github.com/google/gofuzz:go_default_library", @@ -24,12 +24,12 @@ go_test( go_library( name = "go_default_library", srcs = [ - "cloner.go", "converter.go", "deep_equal.go", "doc.go", "helper.go", ], + importpath = "k8s.io/apimachinery/pkg/conversion", deps = ["//vendor/k8s.io/apimachinery/third_party/forked/golang/reflect:go_default_library"], ) @@ -45,7 +45,6 @@ filegroup( srcs = [ ":package-srcs", "//staging/src/k8s.io/apimachinery/pkg/conversion/queryparams:all-srcs", - "//staging/src/k8s.io/apimachinery/pkg/conversion/unstructured:all-srcs", ], tags = ["automanaged"], ) diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go b/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go deleted file mode 100644 index c5dec1f31e41..000000000000 --- a/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go +++ /dev/null @@ -1,249 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package conversion - -import ( - "fmt" - "reflect" -) - -// Cloner knows how to copy one type to another. -type Cloner struct { - // Map from the type to a function which can do the deep copy. - deepCopyFuncs map[reflect.Type]reflect.Value - generatedDeepCopyFuncs map[reflect.Type]func(in interface{}, out interface{}, c *Cloner) error -} - -// NewCloner creates a new Cloner object. -func NewCloner() *Cloner { - c := &Cloner{ - deepCopyFuncs: map[reflect.Type]reflect.Value{}, - generatedDeepCopyFuncs: map[reflect.Type]func(in interface{}, out interface{}, c *Cloner) error{}, - } - if err := c.RegisterDeepCopyFunc(byteSliceDeepCopy); err != nil { - // If one of the deep-copy functions is malformed, detect it immediately. - panic(err) - } - return c -} - -// Prevent recursing into every byte... -func byteSliceDeepCopy(in *[]byte, out *[]byte, c *Cloner) error { - if *in != nil { - *out = make([]byte, len(*in)) - copy(*out, *in) - } else { - *out = nil - } - return nil -} - -// Verifies whether a deep-copy function has a correct signature. -func verifyDeepCopyFunctionSignature(ft reflect.Type) error { - if ft.Kind() != reflect.Func { - return fmt.Errorf("expected func, got: %v", ft) - } - if ft.NumIn() != 3 { - return fmt.Errorf("expected three 'in' params, got %v", ft) - } - if ft.NumOut() != 1 { - return fmt.Errorf("expected one 'out' param, got %v", ft) - } - if ft.In(0).Kind() != reflect.Ptr { - return fmt.Errorf("expected pointer arg for 'in' param 0, got: %v", ft) - } - if ft.In(1) != ft.In(0) { - return fmt.Errorf("expected 'in' param 0 the same as param 1, got: %v", ft) - } - var forClonerType Cloner - if expected := reflect.TypeOf(&forClonerType); ft.In(2) != expected { - return fmt.Errorf("expected '%v' arg for 'in' param 2, got: '%v'", expected, ft.In(2)) - } - var forErrorType error - // This convolution is necessary, otherwise TypeOf picks up on the fact - // that forErrorType is nil - errorType := reflect.TypeOf(&forErrorType).Elem() - if ft.Out(0) != errorType { - return fmt.Errorf("expected error return, got: %v", ft) - } - return nil -} - -// RegisterGeneratedDeepCopyFunc registers a copying func with the Cloner. -// deepCopyFunc must take three parameters: a type input, a pointer to a -// type output, and a pointer to Cloner. It should return an error. -// -// Example: -// c.RegisterGeneratedDeepCopyFunc( -// func(in Pod, out *Pod, c *Cloner) error { -// // deep copy logic... -// return nil -// }) -func (c *Cloner) RegisterDeepCopyFunc(deepCopyFunc interface{}) error { - fv := reflect.ValueOf(deepCopyFunc) - ft := fv.Type() - if err := verifyDeepCopyFunctionSignature(ft); err != nil { - return err - } - c.deepCopyFuncs[ft.In(0)] = fv - return nil -} - -// GeneratedDeepCopyFunc bundles an untyped generated deep-copy function of a type -// with a reflection type object used as a key to lookup the deep-copy function. -type GeneratedDeepCopyFunc struct { - Fn func(in interface{}, out interface{}, c *Cloner) error - InType reflect.Type -} - -// Similar to RegisterDeepCopyFunc, but registers deep copy function that were -// automatically generated. -func (c *Cloner) RegisterGeneratedDeepCopyFunc(fn GeneratedDeepCopyFunc) error { - c.generatedDeepCopyFuncs[fn.InType] = fn.Fn - return nil -} - -// DeepCopy will perform a deep copy of a given object. -func (c *Cloner) DeepCopy(in interface{}) (interface{}, error) { - // Can be invalid if we run DeepCopy(X) where X is a nil interface type. - // For example, we get an invalid value when someone tries to deep-copy - // a nil labels.Selector. - // This does not occur if X is nil and is a pointer to a concrete type. - if in == nil { - return nil, nil - } - inValue := reflect.ValueOf(in) - outValue, err := c.deepCopy(inValue) - if err != nil { - return nil, err - } - return outValue.Interface(), nil -} - -func (c *Cloner) deepCopy(src reflect.Value) (reflect.Value, error) { - inType := src.Type() - - switch src.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - if src.IsNil() { - return src, nil - } - } - - if fv, ok := c.deepCopyFuncs[inType]; ok { - return c.customDeepCopy(src, fv) - } - if fv, ok := c.generatedDeepCopyFuncs[inType]; ok { - var outValue reflect.Value - outValue = reflect.New(inType.Elem()) - err := fv(src.Interface(), outValue.Interface(), c) - return outValue, err - } - return c.defaultDeepCopy(src) -} - -func (c *Cloner) customDeepCopy(src, fv reflect.Value) (reflect.Value, error) { - outValue := reflect.New(src.Type().Elem()) - args := []reflect.Value{src, outValue, reflect.ValueOf(c)} - result := fv.Call(args)[0].Interface() - // This convolution is necessary because nil interfaces won't convert - // to error. - if result == nil { - return outValue, nil - } - return outValue, result.(error) -} - -func (c *Cloner) defaultDeepCopy(src reflect.Value) (reflect.Value, error) { - switch src.Kind() { - case reflect.Chan, reflect.Func, reflect.UnsafePointer, reflect.Uintptr: - return src, fmt.Errorf("cannot deep copy kind: %s", src.Kind()) - case reflect.Array: - dst := reflect.New(src.Type()) - for i := 0; i < src.Len(); i++ { - copyVal, err := c.deepCopy(src.Index(i)) - if err != nil { - return src, err - } - dst.Elem().Index(i).Set(copyVal) - } - return dst.Elem(), nil - case reflect.Interface: - if src.IsNil() { - return src, nil - } - return c.deepCopy(src.Elem()) - case reflect.Map: - if src.IsNil() { - return src, nil - } - dst := reflect.MakeMap(src.Type()) - for _, k := range src.MapKeys() { - copyVal, err := c.deepCopy(src.MapIndex(k)) - if err != nil { - return src, err - } - dst.SetMapIndex(k, copyVal) - } - return dst, nil - case reflect.Ptr: - if src.IsNil() { - return src, nil - } - dst := reflect.New(src.Type().Elem()) - copyVal, err := c.deepCopy(src.Elem()) - if err != nil { - return src, err - } - dst.Elem().Set(copyVal) - return dst, nil - case reflect.Slice: - if src.IsNil() { - return src, nil - } - dst := reflect.MakeSlice(src.Type(), 0, src.Len()) - for i := 0; i < src.Len(); i++ { - copyVal, err := c.deepCopy(src.Index(i)) - if err != nil { - return src, err - } - dst = reflect.Append(dst, copyVal) - } - return dst, nil - case reflect.Struct: - dst := reflect.New(src.Type()) - for i := 0; i < src.NumField(); i++ { - if !dst.Elem().Field(i).CanSet() { - // Can't set private fields. At this point, the - // best we can do is a shallow copy. For - // example, time.Time is a value type with - // private members that can be shallow copied. - return src, nil - } - copyVal, err := c.deepCopy(src.Field(i)) - if err != nil { - return src, err - } - dst.Elem().Field(i).Set(copyVal) - } - return dst.Elem(), nil - - default: - // Value types like numbers, booleans, and strings. - return src, nil - } -} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD index e2beb3e0a10d..8b871ab126cb 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD @@ -12,11 +12,13 @@ go_library( "convert.go", "doc.go", ], + importpath = "k8s.io/apimachinery/pkg/conversion/queryparams", ) go_test( name = "go_default_xtest", srcs = ["convert_test.go"], + importpath = "k8s.io/apimachinery/pkg/conversion/queryparams_test", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion/queryparams:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go index 30f717b2cecb..17b366617049 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go @@ -90,7 +90,14 @@ func customMarshalValue(value reflect.Value) (reflect.Value, bool) { marshaler, ok := value.Interface().(Marshaler) if !ok { - return reflect.Value{}, false + if !isPointerKind(value.Kind()) && value.CanAddr() { + marshaler, ok = value.Addr().Interface().(Marshaler) + if !ok { + return reflect.Value{}, false + } + } else { + return reflect.Value{}, false + } } // Don't invoke functions on nil pointers diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/BUILD b/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/BUILD deleted file mode 100644 index a98e4232620f..000000000000 --- a/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/BUILD +++ /dev/null @@ -1,37 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "converter.go", - "doc.go", - ], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//staging/src/k8s.io/apimachinery/pkg/conversion/unstructured/testing:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/doc.go deleted file mode 100644 index 75128804e939..000000000000 --- a/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package unstructured provides conversion from runtime objects -// to map[string]interface{} representation. -package unstructured diff --git a/vendor/k8s.io/apimachinery/pkg/fields/BUILD b/vendor/k8s.io/apimachinery/pkg/fields/BUILD index 637ecc4bb01b..2bae1350393e 100644 --- a/vendor/k8s.io/apimachinery/pkg/fields/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/fields/BUILD @@ -12,6 +12,7 @@ go_test( "fields_test.go", "selector_test.go", ], + importpath = "k8s.io/apimachinery/pkg/fields", library = ":go_default_library", ) @@ -23,6 +24,7 @@ go_library( "requirements.go", "selector.go", ], + importpath = "k8s.io/apimachinery/pkg/fields", deps = ["//vendor/k8s.io/apimachinery/pkg/selection:go_default_library"], ) diff --git a/vendor/k8s.io/apimachinery/pkg/labels/BUILD b/vendor/k8s.io/apimachinery/pkg/labels/BUILD index f49d05a27d90..fba6648e00f9 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/labels/BUILD @@ -12,6 +12,7 @@ go_test( "labels_test.go", "selector_test.go", ], + importpath = "k8s.io/apimachinery/pkg/labels", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/selection:go_default_library", @@ -27,9 +28,9 @@ go_library( "selector.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/apimachinery/pkg/labels", deps = [ "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/selection:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index ac123033a0f2..b301b4284037 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -550,7 +550,7 @@ func (p *Parser) lookahead(context ParserContext) (Token, string) { return tok, lit } -// consume returns current token and string. Increments the the position +// consume returns current token and string. Increments the position func (p *Parser) consume(context ParserContext) (Token, string) { p.position++ tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal diff --git a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go index 80ba3fb751d2..823ef32a34e4 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go @@ -20,23 +20,6 @@ limitations under the License. package labels -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - reflect "reflect" -) - -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Requirement).DeepCopyInto(out.(*Requirement)) - return nil - }, InType: reflect.TypeOf(&Requirement{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Requirement) DeepCopyInto(out *Requirement) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/BUILD b/vendor/k8s.io/apimachinery/pkg/runtime/BUILD index ef9dc9af6f99..93c6dcbfc778 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/runtime/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["swagger_doc_generator_test.go"], + importpath = "k8s.io/apimachinery/pkg/runtime", library = ":go_default_library", ) @@ -18,6 +19,7 @@ go_library( "codec.go", "codec_check.go", "conversion.go", + "converter.go", "doc.go", "embedded.go", "error.go", @@ -33,12 +35,16 @@ go_library( "types_proto.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/apimachinery/pkg/runtime", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion/queryparams:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", ], ) @@ -46,20 +52,26 @@ go_test( name = "go_default_xtest", srcs = [ "conversion_test.go", + "converter_test.go", "embedded_test.go", "extension_test.go", "scheme_test.go", ], + importpath = "k8s.io/apimachinery/pkg/runtime_test", deps = [ "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/testing:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", ], ) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go index d9748f0664cc..5b3080aa5845 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go @@ -139,6 +139,7 @@ func NewParameterCodec(scheme *Scheme) ParameterCodec { typer: scheme, convertor: scheme, creator: scheme, + defaulter: scheme, } } @@ -147,6 +148,7 @@ type parameterCodec struct { typer ObjectTyper convertor ObjectConvertor creator ObjectCreater + defaulter ObjectDefaulter } var _ ParameterCodec = ¶meterCodec{} @@ -163,9 +165,17 @@ func (c *parameterCodec) DecodeParameters(parameters url.Values, from schema.Gro } for i := range targetGVKs { if targetGVKs[i].GroupVersion() == from { - return c.convertor.Convert(¶meters, into, nil) + if err := c.convertor.Convert(¶meters, into, nil); err != nil { + return err + } + // in the case where we going into the same object we're receiving, default on the outbound object + if c.defaulter != nil { + c.defaulter.Default(into) + } + return nil } } + input, err := c.creator.New(from.WithKind(targetGVKs[0].Kind)) if err != nil { return err @@ -173,6 +183,10 @@ func (c *parameterCodec) DecodeParameters(parameters url.Values, from schema.Gro if err := c.convertor.Convert(¶meters, input, nil); err != nil { return err } + // if we have defaulter, default the input before converting to output + if c.defaulter != nil { + c.defaulter.Default(input) + } return c.convertor.Convert(input, into, nil) } diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go similarity index 83% rename from vendor/k8s.io/apimachinery/pkg/conversion/unstructured/converter.go rename to vendor/k8s.io/apimachinery/pkg/runtime/converter.go index 84dca4ac7c2d..f6f7c10de627 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package unstructured +package runtime import ( "bytes" @@ -27,18 +27,18 @@ import ( "strings" "sync" "sync/atomic" + "time" - apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "github.com/golang/glog" ) -// Converter is an interface for converting between interface{} +// UnstructuredConverter is an interface for converting between interface{} // and map[string]interface representation. -type Converter interface { +type UnstructuredConverter interface { ToUnstructured(obj interface{}) (map[string]interface{}, error) FromUnstructured(u map[string]interface{}, obj interface{}) error } @@ -77,7 +77,16 @@ var ( float64Type = reflect.TypeOf(float64(0)) boolType = reflect.TypeOf(bool(false)) fieldCache = newFieldsCache() - DefaultConverter = NewConverter(parseBool(os.Getenv("KUBE_PATCH_CONVERSION_DETECTOR"))) + + // DefaultUnstructuredConverter performs unstructured to Go typed object conversions. + DefaultUnstructuredConverter = &unstructuredConverter{ + mismatchDetection: parseBool(os.Getenv("KUBE_PATCH_CONVERSION_DETECTOR")), + comparison: conversion.EqualitiesOrDie( + func(a, b time.Time) bool { + return a.UTC() == b.UTC() + }, + ), + } ) func parseBool(key string) bool { @@ -91,22 +100,30 @@ func parseBool(key string) bool { return value } -// ConverterImpl knows how to convert between interface{} and +// unstructuredConverter knows how to convert between interface{} and // Unstructured in both ways. -type converterImpl struct { +type unstructuredConverter struct { // If true, we will be additionally running conversion via json // to ensure that the result is true. // This is supposed to be set only in tests. mismatchDetection bool + // comparison is the default test logic used to compare + comparison conversion.Equalities } -func NewConverter(mismatchDetection bool) Converter { - return &converterImpl{ - mismatchDetection: mismatchDetection, +// NewTestUnstructuredConverter creates an UnstructuredConverter that accepts JSON typed maps and translates them +// to Go types via reflection. It performs mismatch detection automatically and is intended for use by external +// test tools. Use DefaultUnstructuredConverter if you do not explicitly need mismatch detection. +func NewTestUnstructuredConverter(comparison conversion.Equalities) UnstructuredConverter { + return &unstructuredConverter{ + mismatchDetection: true, + comparison: comparison, } } -func (c *converterImpl) FromUnstructured(u map[string]interface{}, obj interface{}) error { +// FromUnstructured converts an object from map[string]interface{} representation into a concrete type. +// It uses encoding/json/Unmarshaler if object implements it or reflection if not. +func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj interface{}) error { t := reflect.TypeOf(obj) value := reflect.ValueOf(obj) if t.Kind() != reflect.Ptr || value.IsNil() { @@ -119,8 +136,8 @@ func (c *converterImpl) FromUnstructured(u map[string]interface{}, obj interface if (err != nil) != (newErr != nil) { glog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err) } - if err == nil && !apiequality.Semantic.DeepEqual(obj, newObj) { - glog.Fatalf("FromUnstructured mismatch for %#v, diff: %v", obj, diff.ObjectReflectDiff(obj, newObj)) + if err == nil && !c.comparison.DeepEqual(obj, newObj) { + glog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) } } return err @@ -388,28 +405,66 @@ func interfaceFromUnstructured(sv, dv reflect.Value) error { return nil } -func (c *converterImpl) ToUnstructured(obj interface{}) (map[string]interface{}, error) { - t := reflect.TypeOf(obj) - value := reflect.ValueOf(obj) - if t.Kind() != reflect.Ptr || value.IsNil() { - return nil, fmt.Errorf("ToUnstructured requires a non-nil pointer to an object, got %v", t) +// ToUnstructured converts an object into map[string]interface{} representation. +// It uses encoding/json/Marshaler if object implements it or reflection if not. +func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]interface{}, error) { + var u map[string]interface{} + var err error + if unstr, ok := obj.(Unstructured); ok { + // UnstructuredContent() mutates the object so we need to make a copy first + u = unstr.DeepCopyObject().(Unstructured).UnstructuredContent() + } else { + t := reflect.TypeOf(obj) + value := reflect.ValueOf(obj) + if t.Kind() != reflect.Ptr || value.IsNil() { + return nil, fmt.Errorf("ToUnstructured requires a non-nil pointer to an object, got %v", t) + } + u = map[string]interface{}{} + err = toUnstructured(value.Elem(), reflect.ValueOf(&u).Elem()) } - u := &map[string]interface{}{} - err := toUnstructured(value.Elem(), reflect.ValueOf(u).Elem()) if c.mismatchDetection { - newUnstr := &map[string]interface{}{} - newErr := toUnstructuredViaJSON(obj, newUnstr) + newUnstr := map[string]interface{}{} + newErr := toUnstructuredViaJSON(obj, &newUnstr) if (err != nil) != (newErr != nil) { - glog.Fatalf("ToUnstructured unexpected error for %v: error: %v", obj, err) + glog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr) } - if err == nil && !apiequality.Semantic.DeepEqual(u, newUnstr) { - glog.Fatalf("ToUnstructured mismatch for %#v, diff: %v", u, diff.ObjectReflectDiff(u, newUnstr)) + if err == nil && !c.comparison.DeepEqual(u, newUnstr) { + glog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr) } } if err != nil { return nil, err } - return *u, nil + return u, nil +} + +// DeepCopyJSON deep copies the passed value, assuming it is a valid JSON representation i.e. only contains +// types produced by json.Unmarshal(). +func DeepCopyJSON(x map[string]interface{}) map[string]interface{} { + return DeepCopyJSONValue(x).(map[string]interface{}) +} + +// DeepCopyJSONValue deep copies the passed value, assuming it is a valid JSON representation i.e. only contains +// types produced by json.Unmarshal(). +func DeepCopyJSONValue(x interface{}) interface{} { + switch x := x.(type) { + case map[string]interface{}: + clone := make(map[string]interface{}, len(x)) + for k, v := range x { + clone[k] = DeepCopyJSONValue(v) + } + return clone + case []interface{}: + clone := make([]interface{}, len(x)) + for i, v := range x { + clone[i] = DeepCopyJSONValue(v) + } + return clone + case string, int64, bool, float64, nil, encodingjson.Number: + return x + default: + panic(fmt.Errorf("cannot deep copy %T", x)) + } } func toUnstructuredViaJSON(obj interface{}, u *map[string]interface{}) error { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go index 21a3557077bb..86b24840f0ae 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/error.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go @@ -94,8 +94,6 @@ type missingVersionErr struct { data string } -// IsMissingVersion returns true if the error indicates that the provided object -// is missing a 'Version' field. func NewMissingVersionErr(data string) error { return &missingVersionErr{data} } @@ -104,6 +102,8 @@ func (k *missingVersionErr) Error() string { return fmt.Sprintf("Object 'apiVersion' is missing in '%s'", k.data) } +// IsMissingVersion returns true if the error indicates that the provided object +// is missing a 'Version' field. func IsMissingVersion(err error) bool { if err == nil { return false diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index db4c2807e674..9d00f1650e9e 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -203,13 +203,6 @@ type ObjectCreater interface { New(kind schema.GroupVersionKind) (out Object, err error) } -// ObjectCopier duplicates an object. -type ObjectCopier interface { - // Copy returns an exact copy of the provided Object, or an error if the - // copy could not be completed. - Copy(Object) (Object, error) -} - // ResourceVersioner provides methods for setting and retrieving // the resource version from an API object. type ResourceVersioner interface { @@ -240,13 +233,13 @@ type Object interface { // Unstructured objects store values as map[string]interface{}, with only values that can be serialized // to JSON allowed. type Unstructured interface { - // IsUnstructuredObject is a marker interface to allow objects that can be serialized but not introspected - // to bypass conversion. - IsUnstructuredObject() + Object // UnstructuredContent returns a non-nil, mutable map of the contents of this object. Values may be // []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to // and from JSON. UnstructuredContent() map[string]interface{} + // SetUnstructuredContent updates the object content to match the provided map. + SetUnstructuredContent(map[string]interface{}) // IsList returns true if this type is a list or matches the list convention - has an array called "items". IsList() bool // EachListItem should pass a single item out of the list as an Object to the provided function. Any diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD b/vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD index 07faa4dc5fe8..032d866edb4c 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["group_version_test.go"], + importpath = "k8s.io/apimachinery/pkg/runtime/schema", library = ":go_default_library", ) @@ -19,6 +20,7 @@ go_library( "group_version.go", "interfaces.go", ], + importpath = "k8s.io/apimachinery/pkg/runtime/schema", deps = ["//vendor/github.com/gogo/protobuf/proto:go_default_library"], ) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index c3d4b7f5f8ee..08b7553810bb 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -68,10 +68,6 @@ type Scheme struct { // converter stores all registered conversion functions. It also has // default coverting behavior. converter *conversion.Converter - - // cloner stores all registered copy functions. It also has default - // deep copy behavior. - cloner *conversion.Cloner } // Function to convert a field selector to internal representation. @@ -80,11 +76,10 @@ type FieldLabelConversionFunc func(label, value string) (internalLabel, internal // NewScheme creates a new Scheme. This scheme is pluggable by default. func NewScheme() *Scheme { s := &Scheme{ - gvkToType: map[schema.GroupVersionKind]reflect.Type{}, - typeToGVK: map[reflect.Type][]schema.GroupVersionKind{}, - unversionedTypes: map[reflect.Type]schema.GroupVersionKind{}, - unversionedKinds: map[string]reflect.Type{}, - cloner: conversion.NewCloner(), + gvkToType: map[schema.GroupVersionKind]reflect.Type{}, + typeToGVK: map[reflect.Type][]schema.GroupVersionKind{}, + unversionedTypes: map[reflect.Type]schema.GroupVersionKind{}, + unversionedKinds: map[string]reflect.Type{}, fieldLabelConversionFuncs: map[string]map[string]FieldLabelConversionFunc{}, defaulterFuncs: map[reflect.Type]func(interface{}){}, } @@ -222,19 +217,22 @@ func (s *Scheme) AllKnownTypes() map[schema.GroupVersionKind]reflect.Type { return s.gvkToType } -// ObjectKind returns the group,version,kind of the go object and true if this object -// is considered unversioned, or an error if it's not a pointer or is unregistered. -func (s *Scheme) ObjectKind(obj Object) (schema.GroupVersionKind, bool, error) { - gvks, unversionedType, err := s.ObjectKinds(obj) - if err != nil { - return schema.GroupVersionKind{}, false, err - } - return gvks[0], unversionedType, nil -} - // ObjectKinds returns all possible group,version,kind of the go object, true if the // object is considered unversioned, or an error if it's not a pointer or is unregistered. func (s *Scheme) ObjectKinds(obj Object) ([]schema.GroupVersionKind, bool, error) { + // Unstructured objects are always considered to have their declared GVK + if _, ok := obj.(Unstructured); ok { + // we require that the GVK be populated in order to recognize the object + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, false, NewMissingKindErr("unstructured object has no kind") + } + if len(gvk.Version) == 0 { + return nil, false, NewMissingVersionErr("unstructured object has no version") + } + return []schema.GroupVersionKind{gvk}, false, nil + } + v, err := conversion.EnforcePtr(obj) if err != nil { return nil, false, err @@ -343,7 +341,7 @@ func (s *Scheme) AddConversionFuncs(conversionFuncs ...interface{}) error { return nil } -// Similar to AddConversionFuncs, but registers conversion functions that were +// AddGeneratedConversionFuncs registers conversion functions that were // automatically generated. func (s *Scheme) AddGeneratedConversionFuncs(conversionFuncs ...interface{}) error { for _, f := range conversionFuncs { @@ -354,29 +352,6 @@ func (s *Scheme) AddGeneratedConversionFuncs(conversionFuncs ...interface{}) err return nil } -// AddDeepCopyFuncs adds a function to the list of deep-copy functions. -// For the expected format of deep-copy function, see the comment for -// Copier.RegisterDeepCopyFunction. -func (s *Scheme) AddDeepCopyFuncs(deepCopyFuncs ...interface{}) error { - for _, f := range deepCopyFuncs { - if err := s.cloner.RegisterDeepCopyFunc(f); err != nil { - return err - } - } - return nil -} - -// Similar to AddDeepCopyFuncs, but registers deep-copy functions that were -// automatically generated. -func (s *Scheme) AddGeneratedDeepCopyFuncs(deepCopyFuncs ...conversion.GeneratedDeepCopyFunc) error { - for _, fn := range deepCopyFuncs { - if err := s.cloner.RegisterGeneratedDeepCopyFunc(fn); err != nil { - return err - } - } - return nil -} - // AddFieldLabelConversionFunc adds a conversion function to convert field selectors // of the given kind from the given version to internal version representation. func (s *Scheme) AddFieldLabelConversionFunc(version, kind string, conversionFunc FieldLabelConversionFunc) error { @@ -420,28 +395,72 @@ func (s *Scheme) Default(src Object) { } } -// Copy does a deep copy of an API object. -func (s *Scheme) Copy(src Object) (Object, error) { - dst, err := s.DeepCopy(src) - if err != nil { - return nil, err - } - return dst.(Object), nil -} - -// Performs a deep copy of the given object. -func (s *Scheme) DeepCopy(src interface{}) (interface{}, error) { - return s.cloner.DeepCopy(src) -} - // Convert will attempt to convert in into out. Both must be pointers. For easy // testing of conversion functions. Returns an error if the conversion isn't // possible. You can call this with types that haven't been registered (for example, // a to test conversion of types that are nested within registered types). The -// context interface is passed to the convertor. -// TODO: identify whether context should be hidden, or behind a formal context/scope -// interface +// context interface is passed to the convertor. Convert also supports Unstructured +// types and will convert them intelligently. func (s *Scheme) Convert(in, out interface{}, context interface{}) error { + unstructuredIn, okIn := in.(Unstructured) + unstructuredOut, okOut := out.(Unstructured) + switch { + case okIn && okOut: + // converting unstructured input to an unstructured output is a straight copy - unstructured + // is a "smart holder" and the contents are passed by reference between the two objects + unstructuredOut.SetUnstructuredContent(unstructuredIn.UnstructuredContent()) + return nil + + case okOut: + // if the output is an unstructured object, use the standard Go type to unstructured + // conversion. The object must not be internal. + obj, ok := in.(Object) + if !ok { + return fmt.Errorf("unable to convert object type %T to Unstructured, must be a runtime.Object", in) + } + gvks, unversioned, err := s.ObjectKinds(obj) + if err != nil { + return err + } + gvk := gvks[0] + + // if no conversion is necessary, convert immediately + if unversioned || gvk.Version != APIVersionInternal { + content, err := DefaultUnstructuredConverter.ToUnstructured(in) + if err != nil { + return err + } + unstructuredOut.SetUnstructuredContent(content) + return nil + } + + // attempt to convert the object to an external version first. + target, ok := context.(GroupVersioner) + if !ok { + return fmt.Errorf("unable to convert the internal object type %T to Unstructured without providing a preferred version to convert to", in) + } + // Convert is implicitly unsafe, so we don't need to perform a safe conversion + versioned, err := s.UnsafeConvertToVersion(obj, target) + if err != nil { + return err + } + content, err := DefaultUnstructuredConverter.ToUnstructured(versioned) + if err != nil { + return err + } + unstructuredOut.SetUnstructuredContent(content) + return nil + + case okIn: + // converting an unstructured object to any type is modeled by first converting + // the input to a versioned type, then running standard conversions + typed, err := s.unstructuredToTyped(unstructuredIn) + if err != nil { + return err + } + in = typed + } + flags, meta := s.generateConvertMeta(in) meta.Context = context if flags == 0 { @@ -450,8 +469,8 @@ func (s *Scheme) Convert(in, out interface{}, context interface{}) error { return s.converter.Convert(in, out, flags, meta) } -// Converts the given field label and value for an kind field selector from -// versioned representation to an unversioned one. +// ConvertFieldLabel alters the given field label and value for an kind field selector from +// versioned representation to an unversioned one or returns an error. func (s *Scheme) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { if s.fieldLabelConversionFuncs[version] == nil { return DefaultMetaV1FieldSelectorConversion(label, value) @@ -481,15 +500,30 @@ func (s *Scheme) UnsafeConvertToVersion(in Object, target GroupVersioner) (Objec // convertToVersion handles conversion with an optional copy. func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) (Object, error) { - // determine the incoming kinds with as few allocations as possible. - t := reflect.TypeOf(in) - if t.Kind() != reflect.Ptr { - return nil, fmt.Errorf("only pointer types may be converted: %v", t) - } - t = t.Elem() - if t.Kind() != reflect.Struct { - return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t) + var t reflect.Type + + if u, ok := in.(Unstructured); ok { + typed, err := s.unstructuredToTyped(u) + if err != nil { + return nil, err + } + + in = typed + // unstructuredToTyped returns an Object, which must be a pointer to a struct. + t = reflect.TypeOf(in).Elem() + + } else { + // determine the incoming kinds with as few allocations as possible. + t = reflect.TypeOf(in) + if t.Kind() != reflect.Ptr { + return nil, fmt.Errorf("only pointer types may be converted: %v", t) + } + t = t.Elem() + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t) + } } + kinds, ok := s.typeToGVK[t] if !ok || len(kinds) == 0 { return nil, NewNotRegisteredErrForType(t) @@ -501,27 +535,26 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( // TODO: when we move to server API versions, we should completely remove the unversioned concept if unversionedKind, ok := s.unversionedTypes[t]; ok { if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok { - return copyAndSetTargetKind(copy, s, in, gvk) + return copyAndSetTargetKind(copy, in, gvk) } - return copyAndSetTargetKind(copy, s, in, unversionedKind) + return copyAndSetTargetKind(copy, in, unversionedKind) } - return nil, NewNotRegisteredErrForTarget(t, target) } // target wants to use the existing type, set kind and return (no conversion necessary) for _, kind := range kinds { if gvk == kind { - return copyAndSetTargetKind(copy, s, in, gvk) + return copyAndSetTargetKind(copy, in, gvk) } } // type is unversioned, no conversion necessary if unversionedKind, ok := s.unversionedTypes[t]; ok { if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok { - return copyAndSetTargetKind(copy, s, in, gvk) + return copyAndSetTargetKind(copy, in, gvk) } - return copyAndSetTargetKind(copy, s, in, unversionedKind) + return copyAndSetTargetKind(copy, in, unversionedKind) } out, err := s.New(gvk) @@ -543,13 +576,32 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( return out, nil } +// unstructuredToTyped attempts to transform an unstructured object to a typed +// object if possible. It will return an error if conversion is not possible, or the versioned +// Go form of the object. Note that this conversion will lose fields. +func (s *Scheme) unstructuredToTyped(in Unstructured) (Object, error) { + // the type must be something we recognize + gvks, _, err := s.ObjectKinds(in) + if err != nil { + return nil, err + } + typed, err := s.New(gvks[0]) + if err != nil { + return nil, err + } + if err := DefaultUnstructuredConverter.FromUnstructured(in.UnstructuredContent(), typed); err != nil { + return nil, fmt.Errorf("unable to convert unstructured object to %v: %v", gvks[0], err) + } + return typed, nil +} + // generateConvertMeta constructs the meta value we pass to Convert. func (s *Scheme) generateConvertMeta(in interface{}) (conversion.FieldMatchingFlags, *conversion.Meta) { return s.converter.DefaultMeta(reflect.TypeOf(in)) } // copyAndSetTargetKind performs a conditional copy before returning the object, or an error if copy was not successful. -func copyAndSetTargetKind(copy bool, copier ObjectCopier, obj Object, kind schema.GroupVersionKind) (Object, error) { +func copyAndSetTargetKind(copy bool, obj Object, kind schema.GroupVersionKind) (Object, error) { if copy { obj = obj.DeepCopyObject() } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD index 4a0a8f812bee..9403c3376f49 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["codec_test.go"], + importpath = "k8s.io/apimachinery/pkg/runtime/serializer", library = ":go_default_library", deps = [ "//vendor/github.com/ghodss/yaml:go_default_library", @@ -30,6 +31,7 @@ go_library( "negotiated_codec.go", "protobuf_extension.go", ], + importpath = "k8s.io/apimachinery/pkg/runtime/serializer", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD index d8a2b8d6541c..0d43ce95429f 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["meta_test.go"], + importpath = "k8s.io/apimachinery/pkg/runtime/serializer/json", library = ":go_default_library", ) @@ -18,6 +19,7 @@ go_library( "json.go", "meta.go", ], + importpath = "k8s.io/apimachinery/pkg/runtime/serializer/json", deps = [ "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/json-iterator/go:go_default_library", @@ -32,6 +34,7 @@ go_library( go_test( name = "go_default_xtest", srcs = ["json_test.go"], + importpath = "k8s.io/apimachinery/pkg/runtime/serializer/json_test", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index ce3d77c2b3bb..8a217f32e318 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -150,9 +150,10 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i } if into != nil { + _, isUnstructured := into.(runtime.Unstructured) types, _, err := s.typer.ObjectKinds(into) switch { - case runtime.IsNotRegisteredError(err): + case runtime.IsNotRegisteredError(err), isUnstructured: if err := jsoniter.ConfigFastest.Unmarshal(data, into); err != nil { return nil, actual, err } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD index dba23107dc86..3eb91d8626cc 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD @@ -11,6 +11,7 @@ go_library( "doc.go", "protobuf.go", ], + importpath = "k8s.io/apimachinery/pkg/runtime/serializer/protobuf", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/BUILD b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/BUILD index 95239b02822c..de54abb3d27d 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["recognizer.go"], + importpath = "k8s.io/apimachinery/pkg/runtime/serializer/recognizer", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD index f3991e494a73..4903338fa6bb 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["streaming_test.go"], + importpath = "k8s.io/apimachinery/pkg/runtime/serializer/streaming", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -20,6 +21,7 @@ go_test( go_library( name = "go_default_library", srcs = ["streaming.go"], + importpath = "k8s.io/apimachinery/pkg/runtime/serializer/streaming", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD index f9406b4bce25..a1b0e6eb2774 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["versioning_test.go"], + importpath = "k8s.io/apimachinery/pkg/runtime/serializer/versioning", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -20,10 +21,10 @@ go_test( go_library( name = "go_default_library", srcs = ["versioning.go"], + importpath = "k8s.io/apimachinery/pkg/runtime/serializer/versioning", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", ], ) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index 74ad84dd95ed..b717fe8fe6cb 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -21,7 +21,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) // NewCodecForScheme is a convenience method for callers that are using a scheme. @@ -33,7 +32,7 @@ func NewCodecForScheme( encodeVersion runtime.GroupVersioner, decodeVersion runtime.GroupVersioner, ) runtime.Codec { - return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, nil, encodeVersion, decodeVersion) + return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, nil, encodeVersion, decodeVersion) } // NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme. @@ -45,7 +44,7 @@ func NewDefaultingCodecForScheme( encodeVersion runtime.GroupVersioner, decodeVersion runtime.GroupVersioner, ) runtime.Codec { - return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, scheme, encodeVersion, decodeVersion) + return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, encodeVersion, decodeVersion) } // NewCodec takes objects in their internal versions and converts them to external versions before @@ -56,7 +55,6 @@ func NewCodec( decoder runtime.Decoder, convertor runtime.ObjectConvertor, creater runtime.ObjectCreater, - copier runtime.ObjectCopier, typer runtime.ObjectTyper, defaulter runtime.ObjectDefaulter, encodeVersion runtime.GroupVersioner, @@ -67,7 +65,6 @@ func NewCodec( decoder: decoder, convertor: convertor, creater: creater, - copier: copier, typer: typer, defaulter: defaulter, @@ -82,7 +79,6 @@ type codec struct { decoder runtime.Decoder convertor runtime.ObjectConvertor creater runtime.ObjectCreater - copier runtime.ObjectCopier typer runtime.ObjectTyper defaulter runtime.ObjectDefaulter @@ -123,12 +119,7 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru if c.defaulter != nil { // create a copy to ensure defaulting is not applied to the original versioned objects if isVersioned { - copied, err := c.copier.Copy(obj) - if err != nil { - utilruntime.HandleError(err) - copied = obj - } - versioned.Objects = []runtime.Object{copied} + versioned.Objects = []runtime.Object{obj.DeepCopyObject()} } c.defaulter.Default(obj) } else { @@ -151,12 +142,7 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru // Convert if needed. if isVersioned { // create a copy, because ConvertToVersion does not guarantee non-mutation of objects - copied, err := c.copier.Copy(obj) - if err != nil { - utilruntime.HandleError(err) - copied = obj - } - versioned.Objects = []runtime.Object{copied} + versioned.Objects = []runtime.Object{obj.DeepCopyObject()} } // perform defaulting if requested diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go index d347461ac69d..929c67a9d130 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -20,31 +20,6 @@ limitations under the License. package runtime -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - reflect "reflect" -) - -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RawExtension).DeepCopyInto(out.(*RawExtension)) - return nil - }, InType: reflect.TypeOf(&RawExtension{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Unknown).DeepCopyInto(out.(*Unknown)) - return nil - }, InType: reflect.TypeOf(&Unknown{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VersionedObjects).DeepCopyInto(out.(*VersionedObjects)) - return nil - }, InType: reflect.TypeOf(&VersionedObjects{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RawExtension) DeepCopyInto(out *RawExtension) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/selection/BUILD b/vendor/k8s.io/apimachinery/pkg/selection/BUILD index 29027a0b9ec0..3790df9af2ae 100644 --- a/vendor/k8s.io/apimachinery/pkg/selection/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/selection/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["operator.go"], + importpath = "k8s.io/apimachinery/pkg/selection", ) filegroup( diff --git a/vendor/k8s.io/apimachinery/pkg/types/BUILD b/vendor/k8s.io/apimachinery/pkg/types/BUILD index 63d4f6b15ab0..3db635c8a3dc 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/types/BUILD @@ -14,6 +14,7 @@ go_library( "patch.go", "uid.go", ], + importpath = "k8s.io/apimachinery/pkg/types", ) filegroup( diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/BUILD b/vendor/k8s.io/apimachinery/pkg/util/cache/BUILD index ecc2cd93bf74..d589c0d152a0 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/cache/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/BUILD @@ -12,6 +12,7 @@ go_test( "cache_test.go", "lruexpirecache_test.go", ], + importpath = "k8s.io/apimachinery/pkg/util/cache", library = ":go_default_library", deps = [ "//vendor/github.com/golang/groupcache/lru:go_default_library", @@ -25,6 +26,7 @@ go_library( "cache.go", "lruexpirecache.go", ], + importpath = "k8s.io/apimachinery/pkg/util/cache", deps = ["//vendor/github.com/hashicorp/golang-lru:go_default_library"], ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/BUILD b/vendor/k8s.io/apimachinery/pkg/util/clock/BUILD index 90d28c490769..62ad5a87b183 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/clock/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/clock/BUILD @@ -9,12 +9,14 @@ load( go_test( name = "go_default_test", srcs = ["clock_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/clock", library = ":go_default_library", ) go_library( name = "go_default_library", srcs = ["clock.go"], + importpath = "k8s.io/apimachinery/pkg/util/clock", ) filegroup( diff --git a/vendor/k8s.io/apimachinery/pkg/util/diff/BUILD b/vendor/k8s.io/apimachinery/pkg/util/diff/BUILD index 225d0b6f771f..4ba69bc6354b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/diff/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/diff/BUILD @@ -9,12 +9,14 @@ load( go_test( name = "go_default_test", srcs = ["diff_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/diff", library = ":go_default_library", ) go_library( name = "go_default_library", srcs = ["diff.go"], + importpath = "k8s.io/apimachinery/pkg/util/diff", deps = [ "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go index 0f730875e857..3d5ec14bfa94 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go +++ b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go @@ -142,10 +142,6 @@ func objectReflectDiff(path *field.Path, a, b reflect.Value) []diff { } if sub := objectReflectDiff(path.Child(a.Type().Field(i).Name), a.Field(i), b.Field(i)); len(sub) > 0 { changes = append(changes, sub...) - } else { - if !reflect.DeepEqual(a.Field(i).Interface(), b.Field(i).Interface()) { - changes = append(changes, diff{path: path, a: a.Field(i).Interface(), b: b.Field(i).Interface()}) - } } } return changes @@ -178,21 +174,18 @@ func objectReflectDiff(path *field.Path, a, b reflect.Value) []diff { } return nil } + var diffs []diff for i := 0; i < l; i++ { if !reflect.DeepEqual(a.Index(i), b.Index(i)) { - return objectReflectDiff(path.Index(i), a.Index(i), b.Index(i)) + diffs = append(diffs, objectReflectDiff(path.Index(i), a.Index(i), b.Index(i))...) } } - var diffs []diff for i := l; i < lA; i++ { diffs = append(diffs, diff{path: path.Index(i), a: a.Index(i), b: nil}) } for i := l; i < lB; i++ { diffs = append(diffs, diff{path: path.Index(i), a: nil, b: b.Index(i)}) } - if len(diffs) == 0 { - diffs = append(diffs, diff{path: path, a: a, b: b}) - } return diffs case reflect.Map: if reflect.DeepEqual(a.Interface(), b.Interface()) { diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/BUILD b/vendor/k8s.io/apimachinery/pkg/util/errors/BUILD index 8859a8e47d1f..d13ff2407197 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["errors_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/errors", library = ":go_default_library", ) @@ -18,6 +19,7 @@ go_library( "doc.go", "errors.go", ], + importpath = "k8s.io/apimachinery/pkg/util/errors", ) filegroup( diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/BUILD b/vendor/k8s.io/apimachinery/pkg/util/framer/BUILD index 7b6827195197..f0b7cdec52a3 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/framer/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/BUILD @@ -9,12 +9,14 @@ load( go_test( name = "go_default_test", srcs = ["framer_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/framer", library = ":go_default_library", ) go_library( name = "go_default_library", srcs = ["framer.go"], + importpath = "k8s.io/apimachinery/pkg/util/framer", ) filegroup( diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/BUILD b/vendor/k8s.io/apimachinery/pkg/util/httpstream/BUILD index fa913eb774c4..94c1d94a2498 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["httpstream_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/httpstream", library = ":go_default_library", ) @@ -18,6 +19,7 @@ go_library( "doc.go", "httpstream.go", ], + importpath = "k8s.io/apimachinery/pkg/util/httpstream", ) filegroup( diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD index 3d82eb526219..8342083ad9f0 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD @@ -13,6 +13,7 @@ go_test( "roundtripper_test.go", "upgrade_test.go", ], + importpath = "k8s.io/apimachinery/pkg/util/httpstream/spdy", library = ":go_default_library", deps = [ "//vendor/github.com/elazarl/goproxy:go_default_library", @@ -27,6 +28,7 @@ go_library( "roundtripper.go", "upgrade.go", ], + importpath = "k8s.io/apimachinery/pkg/util/httpstream/spdy", deps = [ "//vendor/github.com/docker/spdystream:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go index 12bef075dab3..d2d3ad8cb016 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go @@ -110,7 +110,7 @@ func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) { func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) { proxier := s.proxier if proxier == nil { - proxier = http.ProxyFromEnvironment + proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) } proxyURL, err := proxier(req) if err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/util/initialization/BUILD b/vendor/k8s.io/apimachinery/pkg/util/initialization/BUILD index 8c0f05b66a70..37691997b197 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/initialization/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/initialization/BUILD @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["initialization.go"], + importpath = "k8s.io/apimachinery/pkg/util/initialization", visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD b/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD index 9eece8f8059a..2e3fe6516194 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["intstr_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/intstr", library = ":go_default_library", deps = ["//vendor/github.com/ghodss/yaml:go_default_library"], ) @@ -19,6 +20,7 @@ go_library( "generated.pb.go", "intstr.go", ], + importpath = "k8s.io/apimachinery/pkg/util/intstr", deps = [ "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/BUILD b/vendor/k8s.io/apimachinery/pkg/util/json/BUILD index cc30f70404db..c9b57bcba3f5 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/json/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/json/BUILD @@ -9,11 +9,13 @@ load( go_library( name = "go_default_library", srcs = ["json.go"], + importpath = "k8s.io/apimachinery/pkg/util/json", ) go_test( name = "go_default_test", srcs = ["json_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/json", library = ":go_default_library", ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD index 775b5b2b62e2..007159566464 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["util_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/mergepatch", library = ":go_default_library", ) @@ -18,6 +19,7 @@ go_library( "errors.go", "util.go", ], + importpath = "k8s.io/apimachinery/pkg/util/mergepatch", deps = [ "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/github.com/ghodss/yaml:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go index ac3c1e8cfcf5..16501d5afe9c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go @@ -29,6 +29,7 @@ var ( ErrBadPatchFormatForRetainKeys = errors.New("invalid patch format of retainKeys") ErrBadPatchFormatForSetElementOrderList = errors.New("invalid patch format of setElementOrder list") ErrPatchContentNotMatchRetainKeys = errors.New("patch content doesn't match retainKeys list") + ErrUnsupportedStrategicMergePatchFormat = errors.New("strategic merge patch format is not supported") ) func ErrNoMergeKey(m map[string]interface{}, k string) error { diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/BUILD b/vendor/k8s.io/apimachinery/pkg/util/net/BUILD index bdd086a61ed0..d7390ed5c29c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/net/BUILD @@ -15,6 +15,7 @@ go_test( "port_split_test.go", "util_test.go", ], + importpath = "k8s.io/apimachinery/pkg/util/net", library = ":go_default_library", deps = ["//vendor/github.com/spf13/pflag:go_default_library"], ) @@ -28,6 +29,7 @@ go_library( "port_split.go", "util.go", ], + importpath = "k8s.io/apimachinery/pkg/util/net", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/http2:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index 41be5b2fe259..bc2a531b9d24 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -256,8 +256,11 @@ func isDefault(transportProxier func(*http.Request) (*url.URL, error)) bool { // NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if // no matching CIDRs are found func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { - // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it + // we wrap the default method, so we only need to perform our check if the NO_PROXY (or no_proxy) envvar has a CIDR in it noProxyEnv := os.Getenv("NO_PROXY") + if noProxyEnv == "" { + noProxyEnv = os.Getenv("no_proxy") + } noProxyRules := strings.Split(noProxyEnv, ",") cidrs := []*net.IPNet{} @@ -273,17 +276,7 @@ func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error } return func(req *http.Request) (*url.URL, error) { - host := req.URL.Host - // for some urls, the Host is already the host, not the host:port - if net.ParseIP(host) == nil { - var err error - host, _, err = net.SplitHostPort(req.URL.Host) - if err != nil { - return delegate(req) - } - } - - ip := net.ParseIP(host) + ip := net.ParseIP(req.URL.Hostname()) if ip == nil { return delegate(req) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/BUILD b/vendor/k8s.io/apimachinery/pkg/util/proxy/BUILD index ff2aafdbaea5..368915c2c81c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/proxy/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/BUILD @@ -13,6 +13,7 @@ go_test( "transport_test.go", "upgradeaware_test.go", ], + importpath = "k8s.io/apimachinery/pkg/util/proxy", library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", @@ -32,6 +33,7 @@ go_library( "transport.go", "upgradeaware.go", ], + importpath = "k8s.io/apimachinery/pkg/util/proxy", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/mxk/go-flowrate/flowrate:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go index 5bf229697414..6c34ab5241de 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go +++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go @@ -109,7 +109,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { } if redirect := resp.Header.Get("Location"); redirect != "" { - resp.Header.Set("Location", t.rewriteURL(redirect, req.URL)) + resp.Header.Set("Location", t.rewriteURL(redirect, req.URL, req.Host)) return resp, nil } @@ -131,21 +131,39 @@ func (rt *Transport) WrappedRoundTripper() http.RoundTripper { // rewriteURL rewrites a single URL to go through the proxy, if the URL refers // to the same host as sourceURL, which is the page on which the target URL -// occurred. If any error occurs (e.g. parsing), it returns targetURL. -func (t *Transport) rewriteURL(targetURL string, sourceURL *url.URL) string { +// occurred, or if the URL matches the sourceRequestHost. If any error occurs (e.g. +// parsing), it returns targetURL. +func (t *Transport) rewriteURL(targetURL string, sourceURL *url.URL, sourceRequestHost string) string { url, err := url.Parse(targetURL) if err != nil { return targetURL } - isDifferentHost := url.Host != "" && url.Host != sourceURL.Host + // Example: + // When API server processes a proxy request to a service (e.g. /api/v1/namespace/foo/service/bar/proxy/), + // the sourceURL.Host (i.e. req.URL.Host) is the endpoint IP address of the service. The + // sourceRequestHost (i.e. req.Host) is the Host header that specifies the host on which the + // URL is sought, which can be different from sourceURL.Host. For example, if user sends the + // request through "kubectl proxy" locally (i.e. localhost:8001/api/v1/namespace/foo/service/bar/proxy/), + // sourceRequestHost is "localhost:8001". + // + // If the service's response URL contains non-empty host, and url.Host is equal to either sourceURL.Host + // or sourceRequestHost, we should not consider the returned URL to be a completely different host. + // It's the API server's responsibility to rewrite a same-host-and-absolute-path URL and append the + // necessary URL prefix (i.e. /api/v1/namespace/foo/service/bar/proxy/). + isDifferentHost := url.Host != "" && url.Host != sourceURL.Host && url.Host != sourceRequestHost isRelative := !strings.HasPrefix(url.Path, "/") if isDifferentHost || isRelative { return targetURL } - url.Scheme = t.Scheme - url.Host = t.Host + // Do not rewrite scheme and host if the Transport has empty scheme and host + // when targetURL already contains the sourceRequestHost + if !(url.Host == sourceRequestHost && t.Scheme == "" && t.Host == "") { + url.Scheme = t.Scheme + url.Host = t.Host + } + origPath := url.Path // Do not rewrite URL if the sourceURL already contains the necessary prefix. if strings.HasPrefix(url.Path, t.PathPrepend) { @@ -223,7 +241,7 @@ func (t *Transport) rewriteResponse(req *http.Request, resp *http.Response) (*ht } urlRewriter := func(targetUrl string) string { - return t.rewriteURL(targetUrl, req.URL) + return t.rewriteURL(targetUrl, req.URL, req.Host) } err := rewriteHTML(reader, writer, urlRewriter) if err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/util/rand/BUILD b/vendor/k8s.io/apimachinery/pkg/util/rand/BUILD index a92d241f9a29..b7769be86e46 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/rand/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/rand/BUILD @@ -9,12 +9,14 @@ load( go_test( name = "go_default_test", srcs = ["rand_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/rand", library = ":go_default_library", ) go_library( name = "go_default_library", srcs = ["rand.go"], + importpath = "k8s.io/apimachinery/pkg/util/rand", ) filegroup( diff --git a/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go index 4c593b3c4b3f..9421edae866a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go +++ b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go @@ -70,16 +70,41 @@ func Perm(n int) []int { return rng.rand.Perm(n) } -// We omit vowels from the set of available characters to reduce the chances -// of "bad words" being formed. -var alphanums = []rune("bcdfghjklmnpqrstvwxz2456789") +const ( + // We omit vowels from the set of available characters to reduce the chances + // of "bad words" being formed. + alphanums = "bcdfghjklmnpqrstvwxz2456789" + // No. of bits required to index into alphanums string. + alphanumsIdxBits = 5 + // Mask used to extract last alphanumsIdxBits of an int. + alphanumsIdxMask = 1<>= alphanumsIdxBits + remaining-- } return string(b) } @@ -87,7 +112,7 @@ func String(length int) string { // SafeEncodeString encodes s using the same characters as rand.String. This reduces the chances of bad words and // ensures that strings generated from hash functions appear consistent throughout the API. func SafeEncodeString(s string) string { - r := make([]rune, len(s)) + r := make([]byte, len(s)) for i, b := range []rune(s) { r[i] = alphanums[(int(b) % len(alphanums))] } diff --git a/vendor/k8s.io/apimachinery/pkg/util/remotecommand/BUILD b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/BUILD index 0293e0ddad33..9919ad54a618 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/remotecommand/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["constants.go"], + importpath = "k8s.io/apimachinery/pkg/util/remotecommand", deps = ["//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"], ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD b/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD index 55109577d9c4..40892fa783cb 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD @@ -9,12 +9,14 @@ load( go_test( name = "go_default_test", srcs = ["runtime_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/runtime", library = ":go_default_library", ) go_library( name = "go_default_library", srcs = ["runtime.go"], + importpath = "k8s.io/apimachinery/pkg/util/runtime", deps = ["//vendor/github.com/golang/glog:go_default_library"], ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index 748174e19191..442dde7df2c8 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -128,7 +128,9 @@ func (r *rudimentaryErrorBackoff) OnError(error) { r.lastErrorTimeLock.Lock() defer r.lastErrorTimeLock.Unlock() d := time.Since(r.lastErrorTime) - if d < r.minPeriod { + if d < r.minPeriod && d >= 0 { + // If the time moves backwards for any reason, do nothing + // TODO: remove check "d >= 0" after go 1.8 is no longer supported time.Sleep(r.minPeriod - d) } r.lastErrorTime = time.Now() diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD b/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD index 3bf4ebc13b67..5a6175ad4fd6 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD @@ -17,6 +17,7 @@ go_library( "int64.go", "string.go", ], + importpath = "k8s.io/apimachinery/pkg/util/sets", ) go_genrule( @@ -50,6 +51,7 @@ $(location //vendor/k8s.io/code-generator/cmd/set-gen) \ go_test( name = "go_default_test", srcs = ["set_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/sets", library = ":go_default_library", ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD index eebfbf69a751..e69fe0abb1da 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD @@ -9,23 +9,38 @@ load( go_test( name = "go_default_test", srcs = ["patch_test.go"], + data = [ + "testdata/swagger-merge-item.json", + "testdata/swagger-precision-item.json", + ], + importpath = "k8s.io/apimachinery/pkg/util/strategicpatch", library = ":go_default_library", deps = [ "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch/testing:go_default_library", ], ) go_library( name = "go_default_library", - srcs = ["patch.go"], + srcs = [ + "errors.go", + "meta.go", + "patch.go", + "types.go", + ], + importpath = "k8s.io/apimachinery/pkg/util/strategicpatch", deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library", "//vendor/k8s.io/apimachinery/third_party/forked/golang/json:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", ], ) @@ -38,6 +53,9 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/testing:all-srcs", + ], tags = ["automanaged"], ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go new file mode 100644 index 000000000000..ab66d04523ab --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "fmt" +) + +type LookupPatchMetaError struct { + Path string + Err error +} + +func (e LookupPatchMetaError) Error() string { + return fmt.Sprintf("LookupPatchMetaError(%s): %v", e.Path, e.Err) +} + +type FieldNotFoundError struct { + Path string + Field string +} + +func (e FieldNotFoundError) Error() string { + return fmt.Sprintf("unable to find api field %q in %s", e.Field, e.Path) +} + +type InvalidTypeError struct { + Path string + Expected string + Actual string +} + +func (e InvalidTypeError) Error() string { + return fmt.Sprintf("invalid type for %s: got %q, expected %q", e.Path, e.Actual, e.Expected) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go new file mode 100644 index 000000000000..c31de15e7aa3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go @@ -0,0 +1,194 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "errors" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/util/mergepatch" + forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" + openapi "k8s.io/kube-openapi/pkg/util/proto" +) + +type PatchMeta struct { + patchStrategies []string + patchMergeKey string +} + +func (pm PatchMeta) GetPatchStrategies() []string { + if pm.patchStrategies == nil { + return []string{} + } + return pm.patchStrategies +} + +func (pm PatchMeta) SetPatchStrategies(ps []string) { + pm.patchStrategies = ps +} + +func (pm PatchMeta) GetPatchMergeKey() string { + return pm.patchMergeKey +} + +func (pm PatchMeta) SetPatchMergeKey(pmk string) { + pm.patchMergeKey = pmk +} + +type LookupPatchMeta interface { + // LookupPatchMetadataForStruct gets subschema and the patch metadata (e.g. patch strategy and merge key) for map. + LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) + // LookupPatchMetadataForSlice get subschema and the patch metadata for slice. + LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) + // Get the type name of the field + Name() string +} + +type PatchMetaFromStruct struct { + T reflect.Type +} + +func NewPatchMetaFromStruct(dataStruct interface{}) (PatchMetaFromStruct, error) { + t, err := getTagStructType(dataStruct) + return PatchMetaFromStruct{T: t}, err +} + +var _ LookupPatchMeta = PatchMetaFromStruct{} + +func (s PatchMetaFromStruct) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadataForStruct(s.T, key) + if err != nil { + return nil, PatchMeta{}, err + } + + return PatchMetaFromStruct{T: fieldType}, + PatchMeta{ + patchStrategies: fieldPatchStrategies, + patchMergeKey: fieldPatchMergeKey, + }, nil +} + +func (s PatchMetaFromStruct) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + subschema, patchMeta, err := s.LookupPatchMetadataForStruct(key) + if err != nil { + return nil, PatchMeta{}, err + } + elemPatchMetaFromStruct := subschema.(PatchMetaFromStruct) + t := elemPatchMetaFromStruct.T + + var elemType reflect.Type + switch t.Kind() { + // If t is an array or a slice, get the element type. + // If element is still an array or a slice, return an error. + // Otherwise, return element type. + case reflect.Array, reflect.Slice: + elemType = t.Elem() + if elemType.Kind() == reflect.Array || elemType.Kind() == reflect.Slice { + return nil, PatchMeta{}, errors.New("unexpected slice of slice") + } + // If t is an pointer, get the underlying element. + // If the underlying element is neither an array nor a slice, the pointer is pointing to a slice, + // e.g. https://github.com/kubernetes/kubernetes/blob/bc22e206c79282487ea0bf5696d5ccec7e839a76/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go#L2782-L2822 + // If the underlying element is either an array or a slice, return its element type. + case reflect.Ptr: + t = t.Elem() + if t.Kind() == reflect.Array || t.Kind() == reflect.Slice { + t = t.Elem() + } + elemType = t + default: + return nil, PatchMeta{}, fmt.Errorf("expected slice or array type, but got: %s", s.T.Kind().String()) + } + + return PatchMetaFromStruct{T: elemType}, patchMeta, nil +} + +func (s PatchMetaFromStruct) Name() string { + return s.T.Kind().String() +} + +func getTagStructType(dataStruct interface{}) (reflect.Type, error) { + if dataStruct == nil { + return nil, mergepatch.ErrBadArgKind(struct{}{}, nil) + } + + t := reflect.TypeOf(dataStruct) + // Get the underlying type for pointers + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return nil, mergepatch.ErrBadArgKind(struct{}{}, dataStruct) + } + + return t, nil +} + +func GetTagStructTypeOrDie(dataStruct interface{}) reflect.Type { + t, err := getTagStructType(dataStruct) + if err != nil { + panic(err) + } + return t +} + +type PatchMetaFromOpenAPI struct { + Schema openapi.Schema +} + +func NewPatchMetaFromOpenAPI(s openapi.Schema) PatchMetaFromOpenAPI { + return PatchMetaFromOpenAPI{Schema: s} +} + +var _ LookupPatchMeta = PatchMetaFromOpenAPI{} + +func (s PatchMetaFromOpenAPI) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + if s.Schema == nil { + return nil, PatchMeta{}, nil + } + kindItem := NewKindItem(key, s.Schema.GetPath()) + s.Schema.Accept(kindItem) + + err := kindItem.Error() + if err != nil { + return nil, PatchMeta{}, err + } + return PatchMetaFromOpenAPI{Schema: kindItem.subschema}, + kindItem.patchmeta, nil +} + +func (s PatchMetaFromOpenAPI) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + if s.Schema == nil { + return nil, PatchMeta{}, nil + } + sliceItem := NewSliceItem(key, s.Schema.GetPath()) + s.Schema.Accept(sliceItem) + + err := sliceItem.Error() + if err != nil { + return nil, PatchMeta{}, err + } + return PatchMetaFromOpenAPI{Schema: sliceItem.subschema}, + sliceItem.patchmeta, nil +} + +func (s PatchMetaFromOpenAPI) Name() string { + schema := s.Schema + return schema.GetName() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go index 8884c738ed93..fd08759f3910 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -22,9 +22,9 @@ import ( "sort" "strings" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/mergepatch" - forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" ) // An alternate implementation of JSON Merge Patch @@ -92,6 +92,16 @@ type MergeOptions struct { // return a patch that yields the modified document when applied to the original document, or an error // if either of the two documents is invalid. func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return CreateTwoWayMergePatchUsingLookupPatchMeta(original, modified, schema, fns...) +} + +func CreateTwoWayMergePatchUsingLookupPatchMeta( + original, modified []byte, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) ([]byte, error) { originalMap := map[string]interface{}{} if len(original) > 0 { if err := json.Unmarshal(original, &originalMap); err != nil { @@ -106,7 +116,7 @@ func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, f } } - patchMap, err := CreateTwoWayMergeMapPatch(originalMap, modifiedMap, dataStruct, fns...) + patchMap, err := CreateTwoWayMergeMapPatchUsingLookupPatchMeta(originalMap, modifiedMap, schema, fns...) if err != nil { return nil, err } @@ -118,15 +128,19 @@ func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, f // encoded JSONMap. // The serialized version of the map can then be passed to StrategicMergeMapPatch. func CreateTwoWayMergeMapPatch(original, modified JSONMap, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { - t, err := getTagStructType(dataStruct) + schema, err := NewPatchMetaFromStruct(dataStruct) if err != nil { return nil, err } + return CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified, schema, fns...) +} + +func CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified JSONMap, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { diffOptions := DiffOptions{ SetElementOrder: true, } - patchMap, err := diffMaps(original, modified, t, diffOptions) + patchMap, err := diffMaps(original, modified, schema, diffOptions) if err != nil { return nil, err } @@ -151,12 +165,9 @@ func CreateTwoWayMergeMapPatch(original, modified JSONMap, dataStruct interface{ // - IFF list of primitives && merge strategy - use parallel deletion list // - IFF list of maps or primitives with replace strategy (default) - set patch value to the value in modified // - Build $retainKeys directive for fields with retainKeys patch strategy -func diffMaps(original, modified map[string]interface{}, t reflect.Type, diffOptions DiffOptions) (map[string]interface{}, error) { +func diffMaps(original, modified map[string]interface{}, schema LookupPatchMeta, diffOptions DiffOptions) (map[string]interface{}, error) { patch := map[string]interface{}{} - // Get the underlying type for pointers - if t.Kind() == reflect.Ptr { - t = t.Elem() - } + // This will be used to build the $retainKeys directive sent in the patch retainKeysList := make([]interface{}, 0, len(modified)) @@ -198,10 +209,10 @@ func diffMaps(original, modified map[string]interface{}, t reflect.Type, diffOpt switch originalValueTyped := originalValue.(type) { case map[string]interface{}: modifiedValueTyped := modifiedValue.(map[string]interface{}) - err = handleMapDiff(key, originalValueTyped, modifiedValueTyped, patch, t, diffOptions) + err = handleMapDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) case []interface{}: modifiedValueTyped := modifiedValue.([]interface{}) - err = handleSliceDiff(key, originalValueTyped, modifiedValueTyped, patch, t, diffOptions) + err = handleSliceDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) default: replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions) } @@ -248,8 +259,9 @@ func handleDirectiveMarker(key string, originalValue, modifiedValue interface{}, // patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue // diffOptions contains multiple options to control how we do the diff. func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]interface{}, - t reflect.Type, diffOptions DiffOptions) error { - fieldType, fieldPatchStrategies, _, err := forkedjson.LookupPatchMetadata(t, key) + schema LookupPatchMeta, diffOptions DiffOptions) error { + subschema, patchMeta, err := schema.LookupPatchMetadataForStruct(key) + if err != nil { // We couldn't look up metadata for the field // If the values are identical, this doesn't matter, no patch is needed @@ -259,7 +271,7 @@ func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]in // Otherwise, return the error return err } - retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies) + retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) if err != nil { return err } @@ -271,7 +283,7 @@ func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]in patch[key] = modifiedValue } default: - patchValue, err := diffMaps(originalValue, modifiedValue, fieldType, diffOptions) + patchValue, err := diffMaps(originalValue, modifiedValue, subschema, diffOptions) if err != nil { return err } @@ -290,8 +302,8 @@ func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]in // patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue // diffOptions contains multiple options to control how we do the diff. func handleSliceDiff(key string, originalValue, modifiedValue []interface{}, patch map[string]interface{}, - t reflect.Type, diffOptions DiffOptions) error { - fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, key) + schema LookupPatchMeta, diffOptions DiffOptions) error { + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(key) if err != nil { // We couldn't look up metadata for the field // If the values are identical, this doesn't matter, no patch is needed @@ -301,7 +313,7 @@ func handleSliceDiff(key string, originalValue, modifiedValue []interface{}, pat // Otherwise, return the error return err } - retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies) + retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) if err != nil { return err } @@ -309,7 +321,7 @@ func handleSliceDiff(key string, originalValue, modifiedValue []interface{}, pat // Merge the 2 slices using mergePatchKey case mergeDirective: diffOptions.BuildRetainKeysDirective = retainKeys - addList, deletionList, setOrderList, err := diffLists(originalValue, modifiedValue, fieldType.Elem(), fieldPatchMergeKey, diffOptions) + addList, deletionList, setOrderList, err := diffLists(originalValue, modifiedValue, subschema, patchMeta.GetPatchMergeKey(), diffOptions) if err != nil { return err } @@ -515,6 +527,9 @@ func normalizeSliceOrder(toSort, order []interface{}, mergeKey string, kind refl return nil, err } toSort, toDelete, err = extractToDeleteItems(toSort) + if err != nil { + return nil, err + } } sort.SliceStable(toSort, func(i, j int) bool { @@ -533,7 +548,7 @@ func normalizeSliceOrder(toSort, order []interface{}, mergeKey string, kind refl // another list to set the order of the list // Only list of primitives with merge strategy will generate a parallel deletion list. // These two lists should yield modified when applied to original, for lists with merge semantics. -func diffLists(original, modified []interface{}, t reflect.Type, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, []interface{}, error) { +func diffLists(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, []interface{}, error) { if len(original) == 0 { // Both slices are empty - do nothing if len(modified) == 0 || diffOptions.IgnoreChangesAndAdditions { @@ -553,8 +568,14 @@ func diffLists(original, modified []interface{}, t reflect.Type, mergeKey string kind := elementType.Kind() switch kind { case reflect.Map: - patchList, deleteList, err = diffListsOfMaps(original, modified, t, mergeKey, diffOptions) + patchList, deleteList, err = diffListsOfMaps(original, modified, schema, mergeKey, diffOptions) + if err != nil { + return nil, nil, nil, err + } patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) + if err != nil { + return nil, nil, nil, err + } orderSame, err := isOrderSame(original, modified, mergeKey) if err != nil { return nil, nil, nil, err @@ -580,6 +601,9 @@ func diffLists(original, modified []interface{}, t reflect.Type, mergeKey string return nil, nil, nil, mergepatch.ErrNoListOfLists default: patchList, deleteList, err = diffListsOfScalars(original, modified, diffOptions) + if err != nil { + return nil, nil, nil, err + } patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) // generate the setElementOrder list when there are content changes or order changes if diffOptions.SetElementOrder && ((!diffOptions.IgnoreDeletions && len(deleteList) > 0) || @@ -690,15 +714,15 @@ func compareListValuesAtIndex(list1Inbounds, list2Inbounds bool, list1Value, lis // diffListsOfMaps takes a pair of lists and // returns a (recursive) strategic merge patch list contains additions and changes and // a deletion list contains deletions -func diffListsOfMaps(original, modified []interface{}, t reflect.Type, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { +func diffListsOfMaps(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { patch := make([]interface{}, 0, len(modified)) deletionList := make([]interface{}, 0, len(original)) - originalSorted, err := sortMergeListsByNameArray(original, t, mergeKey, false) + originalSorted, err := sortMergeListsByNameArray(original, schema, mergeKey, false) if err != nil { return nil, nil, err } - modifiedSorted, err := sortMergeListsByNameArray(modified, t, mergeKey, false) + modifiedSorted, err := sortMergeListsByNameArray(modified, schema, mergeKey, false) if err != nil { return nil, nil, err } @@ -733,7 +757,7 @@ func diffListsOfMaps(original, modified []interface{}, t reflect.Type, mergeKey switch { case bothInBounds && ItemMatchesOriginalAndModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): // Merge key values are equal, so recurse - patchValue, err := diffMaps(originalElement, modifiedElement, t, diffOptions) + patchValue, err := diffMaps(originalElement, modifiedElement, schema, diffOptions) if err != nil { return nil, nil, err } @@ -786,6 +810,15 @@ func getMapAndMergeKeyValueByIndex(index int, mergeKey string, listOfMaps []inte // must be json encoded content. A patch can be created from an original and a modified document // by calling CreateStrategicMergePatch. func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return StrategicMergePatchUsingLookupPatchMeta(original, patch, schema) +} + +func StrategicMergePatchUsingLookupPatchMeta(original, patch []byte, schema LookupPatchMeta) ([]byte, error) { originalMap, err := handleUnmarshal(original) if err != nil { return nil, err @@ -795,7 +828,7 @@ func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte return nil, err } - result, err := StrategicMergeMapPatch(originalMap, patchMap, dataStruct) + result, err := StrategicMergeMapPatchUsingLookupPatchMeta(originalMap, patchMap, schema) if err != nil { return nil, err } @@ -816,38 +849,35 @@ func handleUnmarshal(j []byte) (map[string]interface{}, error) { return m, nil } -// StrategicMergePatch applies a strategic merge patch. The original and patch documents +// StrategicMergeMapPatch applies a strategic merge patch. The original and patch documents // must be JSONMap. A patch can be created from an original and modified document by // calling CreateTwoWayMergeMapPatch. // Warning: the original and patch JSONMap objects are mutated by this function and should not be reused. func StrategicMergeMapPatch(original, patch JSONMap, dataStruct interface{}) (JSONMap, error) { - t, err := getTagStructType(dataStruct) + schema, err := NewPatchMetaFromStruct(dataStruct) if err != nil { return nil, err } - mergeOptions := MergeOptions{ - MergeParallelList: true, - IgnoreUnmatchedNulls: true, - } - return mergeMap(original, patch, t, mergeOptions) -} -func getTagStructType(dataStruct interface{}) (reflect.Type, error) { - if dataStruct == nil { - return nil, mergepatch.ErrBadArgKind(struct{}{}, nil) - } + // We need the go struct tags `patchMergeKey` and `patchStrategy` for fields that support a strategic merge patch. + // For native resources, we can easily figure out these tags since we know the fields. - t := reflect.TypeOf(dataStruct) - // Get the underlying type for pointers - if t.Kind() == reflect.Ptr { - t = t.Elem() + // Because custom resources are decoded as Unstructured and because we're missing the metadata about how to handle + // each field in a strategic merge patch, we can't find the go struct tags. Hence, we can't easily do a strategic merge + // for custom resources. So we should fail fast and return an error. + if _, ok := dataStruct.(*unstructured.Unstructured); ok { + return nil, mergepatch.ErrUnsupportedStrategicMergePatchFormat } - if t.Kind() != reflect.Struct { - return nil, mergepatch.ErrBadArgKind(struct{}{}, dataStruct) - } + return StrategicMergeMapPatchUsingLookupPatchMeta(original, patch, schema) +} - return t, nil +func StrategicMergeMapPatchUsingLookupPatchMeta(original, patch JSONMap, schema LookupPatchMeta) (JSONMap, error) { + mergeOptions := MergeOptions{ + MergeParallelList: true, + IgnoreUnmatchedNulls: true, + } + return mergeMap(original, patch, schema, mergeOptions) } // handleDirectiveInMergeMap handles the patch directive when merging 2 maps. @@ -1054,8 +1084,8 @@ func applyRetainKeysDirective(original, patch map[string]interface{}, options Me // Then, sort them by the relative order in setElementOrder, patch list and live list. // The precedence is $setElementOrder > order in patch list > order in live list. // This function will delete the item after merging it to prevent process it again in the future. -// Ref: https://git.k8s.io/community/contributors/design-proposals/preserve-order-in-strategic-merge-patch.md -func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Type, mergeOptions MergeOptions) error { +// Ref: https://git.k8s.io/community/contributors/design-proposals/cli/preserve-order-in-strategic-merge-patch.md +func mergePatchIntoOriginal(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) error { for key, patchV := range patch { // Do nothing if there is no ordering directive if !strings.HasPrefix(key, setElementOrderDirectivePrefix) { @@ -1082,9 +1112,9 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Ty var ( ok bool originalFieldValue, patchFieldValue, merged []interface{} - patchStrategy, mergeKey string - patchStrategies []string - fieldType reflect.Type + patchStrategy string + patchMeta PatchMeta + subschema LookupPatchMeta ) typedSetElementOrderList, ok := setElementOrderInPatch.([]interface{}) if !ok { @@ -1110,16 +1140,16 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Ty return mergepatch.ErrBadArgType(patchFieldValue, patchList) } } - fieldType, patchStrategies, mergeKey, err = forkedjson.LookupPatchMetadata(t, originalKey) + subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(originalKey) if err != nil { return err } - _, patchStrategy, err = extractRetainKeysPatchStrategy(patchStrategies) + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) if err != nil { return err } // Check for consistency between the element order list and the field it applies to - err = validatePatchWithSetOrderList(patchFieldValue, typedSetElementOrderList, mergeKey) + err = validatePatchWithSetOrderList(patchFieldValue, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) if err != nil { return err } @@ -1132,8 +1162,8 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Ty // list was added merged = patchFieldValue case foundOriginal && foundPatch: - merged, err = mergeSliceHandler(originalList, patchList, fieldType, - patchStrategy, mergeKey, false, mergeOptions) + merged, err = mergeSliceHandler(originalList, patchList, subschema, + patchStrategy, patchMeta.GetPatchMergeKey(), false, mergeOptions) if err != nil { return err } @@ -1143,13 +1173,13 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Ty // Split all items into patch items and server-only items and then enforce the order. var patchItems, serverOnlyItems []interface{} - if len(mergeKey) == 0 { + if len(patchMeta.GetPatchMergeKey()) == 0 { // Primitives doesn't need merge key to do partitioning. patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, typedSetElementOrderList) } else { // Maps need merge key to do partitioning. - patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, typedSetElementOrderList, mergeKey) + patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) if err != nil { return err } @@ -1163,7 +1193,7 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Ty // normalize merged list // typedSetElementOrderList contains all the relative order in typedPatchList, // so don't need to use typedPatchList - both, err := normalizeElementOrder(patchItems, serverOnlyItems, typedSetElementOrderList, originalFieldValue, mergeKey, kind) + both, err := normalizeElementOrder(patchItems, serverOnlyItems, typedSetElementOrderList, originalFieldValue, patchMeta.GetPatchMergeKey(), kind) if err != nil { return err } @@ -1225,7 +1255,7 @@ func partitionMapsByPresentInList(original, partitionBy []interface{}, mergeKey // If patch contains any null field (e.g. field_1: null) that is not // present in original, then to propagate it to the end result use // mergeOptions.IgnoreUnmatchedNulls == false. -func mergeMap(original, patch map[string]interface{}, t reflect.Type, mergeOptions MergeOptions) (map[string]interface{}, error) { +func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) (map[string]interface{}, error) { if v, ok := patch[directiveMarker]; ok { return handleDirectiveInMergeMap(v, patch) } @@ -1245,7 +1275,7 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type, mergeOptio // When not merging the directive, it will make sure $setElementOrder list exist only in original. // When merging the directive, it will process $setElementOrder and its patch list together. // This function will delete the merged elements from patch so they will not be reprocessed - err = mergePatchIntoOriginal(original, patch, t, mergeOptions) + err = mergePatchIntoOriginal(original, patch, schema, mergeOptions) if err != nil { return nil, err } @@ -1283,11 +1313,6 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type, mergeOptio continue } - // If the data type is a pointer, resolve the element. - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - originalType := reflect.TypeOf(original[k]) patchType := reflect.TypeOf(patchV) if originalType != patchType { @@ -1295,22 +1320,27 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type, mergeOptio continue } // If they're both maps or lists, recurse into the value. - // First find the fieldPatchStrategy and fieldPatchMergeKey. - fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k) - if err != nil { - return nil, err - } - _, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies) - if err != nil { - return nil, err - } - switch originalType.Kind() { case reflect.Map: - - original[k], err = mergeMapHandler(original[k], patchV, fieldType, patchStrategy, mergeOptions) + subschema, patchMeta, err := schema.LookupPatchMetadataForStruct(k) + if err != nil { + return nil, err + } + _, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return nil, err + } + original[k], err = mergeMapHandler(original[k], patchV, subschema, patchStrategy, mergeOptions) case reflect.Slice: - original[k], err = mergeSliceHandler(original[k], patchV, fieldType, patchStrategy, fieldPatchMergeKey, isDeleteList, mergeOptions) + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(k) + if err != nil { + return nil, err + } + _, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return nil, err + } + original[k], err = mergeSliceHandler(original[k], patchV, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), isDeleteList, mergeOptions) default: original[k] = patchV } @@ -1323,7 +1353,7 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type, mergeOptio // mergeMapHandler handles how to merge `patchV` whose key is `key` with `original` respecting // fieldPatchStrategy and mergeOptions. -func mergeMapHandler(original, patch interface{}, fieldType reflect.Type, +func mergeMapHandler(original, patch interface{}, schema LookupPatchMeta, fieldPatchStrategy string, mergeOptions MergeOptions) (map[string]interface{}, error) { typedOriginal, typedPatch, err := mapTypeAssertion(original, patch) if err != nil { @@ -1331,7 +1361,7 @@ func mergeMapHandler(original, patch interface{}, fieldType reflect.Type, } if fieldPatchStrategy != replaceDirective { - return mergeMap(typedOriginal, typedPatch, fieldType, mergeOptions) + return mergeMap(typedOriginal, typedPatch, schema, mergeOptions) } else { return typedPatch, nil } @@ -1339,7 +1369,7 @@ func mergeMapHandler(original, patch interface{}, fieldType reflect.Type, // mergeSliceHandler handles how to merge `patchV` whose key is `key` with `original` respecting // fieldPatchStrategy, fieldPatchMergeKey, isDeleteList and mergeOptions. -func mergeSliceHandler(original, patch interface{}, fieldType reflect.Type, +func mergeSliceHandler(original, patch interface{}, schema LookupPatchMeta, fieldPatchStrategy, fieldPatchMergeKey string, isDeleteList bool, mergeOptions MergeOptions) ([]interface{}, error) { typedOriginal, typedPatch, err := sliceTypeAssertion(original, patch) if err != nil { @@ -1347,8 +1377,7 @@ func mergeSliceHandler(original, patch interface{}, fieldType reflect.Type, } if fieldPatchStrategy == mergeDirective { - elemType := fieldType.Elem() - return mergeSlice(typedOriginal, typedPatch, elemType, fieldPatchMergeKey, mergeOptions, isDeleteList) + return mergeSlice(typedOriginal, typedPatch, schema, fieldPatchMergeKey, mergeOptions, isDeleteList) } else { return typedPatch, nil } @@ -1357,7 +1386,7 @@ func mergeSliceHandler(original, patch interface{}, fieldType reflect.Type, // Merge two slices together. Note: This may modify both the original slice and // the patch because getting a deep copy of a slice in golang is highly // non-trivial. -func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey string, mergeOptions MergeOptions, isDeleteList bool) ([]interface{}, error) { +func mergeSlice(original, patch []interface{}, schema LookupPatchMeta, mergeKey string, mergeOptions MergeOptions, isDeleteList bool) ([]interface{}, error) { if len(original) == 0 && len(patch) == 0 { return original, nil } @@ -1382,7 +1411,7 @@ func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey s } else { if mergeKey == "" { - return nil, fmt.Errorf("cannot merge lists without merge key for type %s", elemType.Kind().String()) + return nil, fmt.Errorf("cannot merge lists without merge key for %s", schema.Name()) } original, patch, err = mergeSliceWithSpecialElements(original, patch, mergeKey) @@ -1390,7 +1419,7 @@ func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey s return nil, err } - merged, err = mergeSliceWithoutSpecialElements(original, patch, mergeKey, elemType, mergeOptions) + merged, err = mergeSliceWithoutSpecialElements(original, patch, mergeKey, schema, mergeOptions) if err != nil { return nil, err } @@ -1468,7 +1497,7 @@ func deleteMatchingEntries(original []interface{}, mergeKey string, mergeValue i // mergeSliceWithoutSpecialElements merges slices with non-special elements. // original and patch must be slices of maps, they should be checked before calling this function. -func mergeSliceWithoutSpecialElements(original, patch []interface{}, mergeKey string, elemType reflect.Type, mergeOptions MergeOptions) ([]interface{}, error) { +func mergeSliceWithoutSpecialElements(original, patch []interface{}, mergeKey string, schema LookupPatchMeta, mergeOptions MergeOptions) ([]interface{}, error) { for _, v := range patch { typedV := v.(map[string]interface{}) mergeValue, ok := typedV[mergeKey] @@ -1487,7 +1516,7 @@ func mergeSliceWithoutSpecialElements(original, patch []interface{}, mergeKey st var mergedMaps interface{} var err error // Merge into original. - mergedMaps, err = mergeMap(originalMap, typedV, elemType, mergeOptions) + mergedMaps, err = mergeMap(originalMap, typedV, schema, mergeOptions) if err != nil { return nil, err } @@ -1520,7 +1549,7 @@ func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{ for k, v := range m { typedV, ok := v.(map[string]interface{}) if !ok { - return nil, 0, false, fmt.Errorf("value for key %v is not a map.", k) + return nil, 0, false, fmt.Errorf("value for key %v is not a map", k) } valueToMatch, ok := typedV[key] @@ -1536,14 +1565,14 @@ func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{ // by key. This is needed by tests because in JSON, list order is significant, // but in Strategic Merge Patch, merge lists do not have significant order. // Sorting the lists allows for order-insensitive comparison of patched maps. -func sortMergeListsByName(mapJSON []byte, dataStruct interface{}) ([]byte, error) { +func sortMergeListsByName(mapJSON []byte, schema LookupPatchMeta) ([]byte, error) { var m map[string]interface{} err := json.Unmarshal(mapJSON, &m) if err != nil { - return nil, err + return nil, mergepatch.ErrBadJSONDoc } - newM, err := sortMergeListsByNameMap(m, reflect.TypeOf(dataStruct)) + newM, err := sortMergeListsByNameMap(m, schema) if err != nil { return nil, err } @@ -1552,7 +1581,7 @@ func sortMergeListsByName(mapJSON []byte, dataStruct interface{}) ([]byte, error } // Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in a map. -func sortMergeListsByNameMap(s map[string]interface{}, t reflect.Type) (map[string]interface{}, error) { +func sortMergeListsByNameMap(s map[string]interface{}, schema LookupPatchMeta) (map[string]interface{}, error) { newS := map[string]interface{}{} for k, v := range s { if k == retainKeysDirective { @@ -1573,26 +1602,29 @@ func sortMergeListsByNameMap(s map[string]interface{}, t reflect.Type) (map[stri return nil, mergepatch.ErrBadPatchFormatForSetElementOrderList } } else if k != directiveMarker { - fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k) - if err != nil { - return nil, err - } - _, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies) - if err != nil { - return nil, err - } - - // If v is a map or a merge slice, recurse. - if typedV, ok := v.(map[string]interface{}); ok { - var err error - v, err = sortMergeListsByNameMap(typedV, fieldType) + // recurse for map and slice. + switch typedV := v.(type) { + case map[string]interface{}: + subschema, _, err := schema.LookupPatchMetadataForStruct(k) + if err != nil { + return nil, err + } + v, err = sortMergeListsByNameMap(typedV, subschema) + if err != nil { + return nil, err + } + case []interface{}: + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(k) + if err != nil { + return nil, err + } + _, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) if err != nil { return nil, err } - } else if typedV, ok := v.([]interface{}); ok { if patchStrategy == mergeDirective { var err error - v, err = sortMergeListsByNameArray(typedV, fieldType.Elem(), fieldPatchMergeKey, true) + v, err = sortMergeListsByNameArray(typedV, subschema, patchMeta.GetPatchMergeKey(), true) if err != nil { return nil, err } @@ -1607,7 +1639,7 @@ func sortMergeListsByNameMap(s map[string]interface{}, t reflect.Type) (map[stri } // Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in an array. -func sortMergeListsByNameArray(s []interface{}, elemType reflect.Type, mergeKey string, recurse bool) ([]interface{}, error) { +func sortMergeListsByNameArray(s []interface{}, schema LookupPatchMeta, mergeKey string, recurse bool) ([]interface{}, error) { if len(s) == 0 { return s, nil } @@ -1630,7 +1662,7 @@ func sortMergeListsByNameArray(s []interface{}, elemType reflect.Type, mergeKey for _, elem := range s { if recurse { typedElem := elem.(map[string]interface{}) - newElem, err := sortMergeListsByNameMap(typedElem, elemType) + newElem, err := sortMergeListsByNameMap(typedElem, schema) if err != nil { return nil, err } @@ -1776,18 +1808,13 @@ func sliceElementType(slices ...[]interface{}) (reflect.Type, error) { // objects overlap with different values in any key. All keys are required to be // strings. Since patches of the same Type have congruent keys, this is valid // for multiple patch types. This method supports strategic merge patch semantics. -func MergingMapsHaveConflicts(left, right map[string]interface{}, dataStruct interface{}) (bool, error) { - t, err := getTagStructType(dataStruct) - if err != nil { - return true, err - } - - return mergingMapFieldsHaveConflicts(left, right, t, "", "") +func MergingMapsHaveConflicts(left, right map[string]interface{}, schema LookupPatchMeta) (bool, error) { + return mergingMapFieldsHaveConflicts(left, right, schema, "", "") } func mergingMapFieldsHaveConflicts( left, right interface{}, - fieldType reflect.Type, + schema LookupPatchMeta, fieldPatchStrategy, fieldPatchMergeKey string, ) (bool, error) { switch leftType := left.(type) { @@ -1818,15 +1845,14 @@ func mergingMapFieldsHaveConflicts( return false, nil } // Check the individual keys. - return mapsHaveConflicts(leftType, rightType, fieldType) + return mapsHaveConflicts(leftType, rightType, schema) case []interface{}: rightType, ok := right.([]interface{}) if !ok { return true, nil } - return slicesHaveConflicts(leftType, rightType, fieldType, fieldPatchStrategy, fieldPatchMergeKey) - + return slicesHaveConflicts(leftType, rightType, schema, fieldPatchStrategy, fieldPatchMergeKey) case string, float64, bool, int, int64, nil: return !reflect.DeepEqual(left, right), nil default: @@ -1834,21 +1860,37 @@ func mergingMapFieldsHaveConflicts( } } -func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType reflect.Type) (bool, error) { +func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { for key, leftValue := range typedLeft { if key != directiveMarker && key != retainKeysDirective { if rightValue, ok := typedRight[key]; ok { - fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(structType, key) - if err != nil { - return true, err - } - _, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies) - if err != nil { - return true, err + var subschema LookupPatchMeta + var patchMeta PatchMeta + var patchStrategy string + var err error + switch leftValue.(type) { + case []interface{}: + subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(key) + if err != nil { + return true, err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) + if err != nil { + return true, err + } + case map[string]interface{}: + subschema, patchMeta, err = schema.LookupPatchMetadataForStruct(key) + if err != nil { + return true, err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) + if err != nil { + return true, err + } } if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, - fieldType, patchStrategy, fieldPatchMergeKey); hasConflicts { + subschema, patchStrategy, patchMeta.GetPatchMergeKey()); hasConflicts { return true, err } } @@ -1860,7 +1902,7 @@ func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType func slicesHaveConflicts( typedLeft, typedRight []interface{}, - fieldType reflect.Type, + schema LookupPatchMeta, fieldPatchStrategy, fieldPatchMergeKey string, ) (bool, error) { elementType, err := sliceElementType(typedLeft, typedRight) @@ -1868,7 +1910,6 @@ func slicesHaveConflicts( return true, err } - valueType := fieldType.Elem() if fieldPatchStrategy == mergeDirective { // Merging lists of scalars have no conflicts by definition // So we only need to check further if the elements are maps @@ -1887,7 +1928,7 @@ func slicesHaveConflicts( return true, err } - return mapsOfMapsHaveConflicts(leftMap, rightMap, valueType) + return mapsOfMapsHaveConflicts(leftMap, rightMap, schema) } // Either we don't have type information, or these are non-merging lists @@ -1905,7 +1946,7 @@ func slicesHaveConflicts( // Compare the slices element by element in order // This test will fail if the slices are not sorted for i := range typedLeft { - if hasConflicts, err := mergingMapFieldsHaveConflicts(typedLeft[i], typedRight[i], valueType, "", ""); hasConflicts { + if hasConflicts, err := mergingMapFieldsHaveConflicts(typedLeft[i], typedRight[i], schema, "", ""); hasConflicts { return true, err } } @@ -1932,10 +1973,10 @@ func sliceOfMapsToMapOfMaps(slice []interface{}, mergeKey string) (map[string]in return result, nil } -func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType reflect.Type) (bool, error) { +func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { for key, leftValue := range typedLeft { if rightValue, ok := typedRight[key]; ok { - if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, structType, "", ""); hasConflicts { + if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, schema, "", ""); hasConflicts { return true, err } } @@ -1955,7 +1996,7 @@ func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, struc // in a way that is different from how it is changed in current (e.g., deleting it, changing its // value). We also propagate values fields that do not exist in original but are explicitly // defined in modified. -func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct interface{}, overwrite bool, fns ...mergepatch.PreconditionFunc) ([]byte, error) { +func CreateThreeWayMergePatch(original, modified, current []byte, schema LookupPatchMeta, overwrite bool, fns ...mergepatch.PreconditionFunc) ([]byte, error) { originalMap := map[string]interface{}{} if len(original) > 0 { if err := json.Unmarshal(original, &originalMap); err != nil { @@ -1977,11 +2018,6 @@ func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct int } } - t, err := getTagStructType(dataStruct) - if err != nil { - return nil, err - } - // The patch is the difference from current to modified without deletions, plus deletions // from original to modified. To find it, we compute deletions, which are the deletions from // original to modified, and delta, which is the difference from current to modified without @@ -1990,7 +2026,7 @@ func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct int IgnoreDeletions: true, SetElementOrder: true, } - deltaMap, err := diffMaps(currentMap, modifiedMap, t, deltaMapDiffOptions) + deltaMap, err := diffMaps(currentMap, modifiedMap, schema, deltaMapDiffOptions) if err != nil { return nil, err } @@ -1998,13 +2034,13 @@ func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct int SetElementOrder: true, IgnoreChangesAndAdditions: true, } - deletionsMap, err := diffMaps(originalMap, modifiedMap, t, deletionsMapDiffOptions) + deletionsMap, err := diffMaps(originalMap, modifiedMap, schema, deletionsMapDiffOptions) if err != nil { return nil, err } mergeOptions := MergeOptions{} - patchMap, err := mergeMap(deletionsMap, deltaMap, t, mergeOptions) + patchMap, err := mergeMap(deletionsMap, deltaMap, schema, mergeOptions) if err != nil { return nil, err } @@ -2020,12 +2056,12 @@ func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct int // then return a conflict error. if !overwrite { changeMapDiffOptions := DiffOptions{} - changedMap, err := diffMaps(originalMap, currentMap, t, changeMapDiffOptions) + changedMap, err := diffMaps(originalMap, currentMap, schema, changeMapDiffOptions) if err != nil { return nil, err } - hasConflicts, err := MergingMapsHaveConflicts(patchMap, changedMap, dataStruct) + hasConflicts, err := MergingMapsHaveConflicts(patchMap, changedMap, schema) if err != nil { return nil, err } diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go new file mode 100644 index 000000000000..f84d65aacb3d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go @@ -0,0 +1,193 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "errors" + "strings" + + "k8s.io/apimachinery/pkg/util/mergepatch" + openapi "k8s.io/kube-openapi/pkg/util/proto" +) + +const ( + patchStrategyOpenapiextensionKey = "x-kubernetes-patch-strategy" + patchMergeKeyOpenapiextensionKey = "x-kubernetes-patch-merge-key" +) + +type LookupPatchItem interface { + openapi.SchemaVisitor + + Error() error + Path() *openapi.Path +} + +type kindItem struct { + key string + path *openapi.Path + err error + patchmeta PatchMeta + subschema openapi.Schema + hasVisitKind bool +} + +func NewKindItem(key string, path *openapi.Path) *kindItem { + return &kindItem{ + key: key, + path: path, + } +} + +var _ LookupPatchItem = &kindItem{} + +func (item *kindItem) Error() error { + return item.err +} + +func (item *kindItem) Path() *openapi.Path { + return item.path +} + +func (item *kindItem) VisitPrimitive(schema *openapi.Primitive) { + item.err = errors.New("expected kind, but got primitive") +} + +func (item *kindItem) VisitArray(schema *openapi.Array) { + item.err = errors.New("expected kind, but got slice") +} + +func (item *kindItem) VisitMap(schema *openapi.Map) { + item.err = errors.New("expected kind, but got map") +} + +func (item *kindItem) VisitReference(schema openapi.Reference) { + if !item.hasVisitKind { + schema.SubSchema().Accept(item) + } +} + +func (item *kindItem) VisitKind(schema *openapi.Kind) { + subschema, ok := schema.Fields[item.key] + if !ok { + item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} + return + } + + mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) + if err != nil { + item.err = err + return + } + item.patchmeta = PatchMeta{ + patchStrategies: patchStrategies, + patchMergeKey: mergeKey, + } + item.subschema = subschema +} + +type sliceItem struct { + key string + path *openapi.Path + err error + patchmeta PatchMeta + subschema openapi.Schema + hasVisitKind bool +} + +func NewSliceItem(key string, path *openapi.Path) *sliceItem { + return &sliceItem{ + key: key, + path: path, + } +} + +var _ LookupPatchItem = &sliceItem{} + +func (item *sliceItem) Error() error { + return item.err +} + +func (item *sliceItem) Path() *openapi.Path { + return item.path +} + +func (item *sliceItem) VisitPrimitive(schema *openapi.Primitive) { + item.err = errors.New("expected slice, but got primitive") +} + +func (item *sliceItem) VisitArray(schema *openapi.Array) { + if !item.hasVisitKind { + item.err = errors.New("expected visit kind first, then visit array") + } + subschema := schema.SubType + item.subschema = subschema +} + +func (item *sliceItem) VisitMap(schema *openapi.Map) { + item.err = errors.New("expected slice, but got map") +} + +func (item *sliceItem) VisitReference(schema openapi.Reference) { + if !item.hasVisitKind { + schema.SubSchema().Accept(item) + } else { + item.subschema = schema.SubSchema() + } +} + +func (item *sliceItem) VisitKind(schema *openapi.Kind) { + subschema, ok := schema.Fields[item.key] + if !ok { + item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} + return + } + + mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) + if err != nil { + item.err = err + return + } + item.patchmeta = PatchMeta{ + patchStrategies: patchStrategies, + patchMergeKey: mergeKey, + } + item.hasVisitKind = true + subschema.Accept(item) +} + +func parsePatchMetadata(extensions map[string]interface{}) (string, []string, error) { + ps, foundPS := extensions[patchStrategyOpenapiextensionKey] + var patchStrategies []string + var mergeKey, patchStrategy string + var ok bool + if foundPS { + patchStrategy, ok = ps.(string) + if ok { + patchStrategies = strings.Split(patchStrategy, ",") + } else { + return "", nil, mergepatch.ErrBadArgType(patchStrategy, ps) + } + } + mk, foundMK := extensions[patchMergeKeyOpenapiextensionKey] + if foundMK { + mergeKey, ok = mk.(string) + if !ok { + return "", nil, mergepatch.ErrBadArgType(mergeKey, mk) + } + } + return mergeKey, patchStrategies, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/uuid/BUILD b/vendor/k8s.io/apimachinery/pkg/util/uuid/BUILD index 5ee2b075b7df..786d8fbe3967 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/uuid/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/uuid/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["uuid.go"], + importpath = "k8s.io/apimachinery/pkg/util/uuid", deps = [ "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/BUILD b/vendor/k8s.io/apimachinery/pkg/util/validation/BUILD index 252ee5308629..9680c1fa7b7c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/validation", library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library"], ) @@ -16,6 +17,7 @@ go_test( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/apimachinery/pkg/util/validation", deps = ["//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library"], ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD b/vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD index 9138331856da..5508ab94c8e3 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD @@ -12,6 +12,7 @@ go_test( "errors_test.go", "path_test.go", ], + importpath = "k8s.io/apimachinery/pkg/util/validation/field", library = ":go_default_library", ) @@ -21,6 +22,7 @@ go_library( "errors.go", "path.go", ], + importpath = "k8s.io/apimachinery/pkg/util/validation/field", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index 43c779a11b76..31705dee3866 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -19,6 +19,7 @@ package field import ( "fmt" "reflect" + "strconv" "strings" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -175,7 +176,11 @@ func Invalid(field *Path, value interface{}, detail string) *Error { func NotSupported(field *Path, value interface{}, validValues []string) *Error { detail := "" if validValues != nil && len(validValues) > 0 { - detail = "supported values: " + strings.Join(validValues, ", ") + quotedValues := make([]string, len(validValues)) + for i, v := range validValues { + quotedValues[i] = strconv.Quote(v) + } + detail = "supported values: " + strings.Join(quotedValues, ", ") } return &Error{ErrorTypeNotSupported, field.String(), value, detail} } diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD b/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD index dfbfb2a7c078..6eca13c02b57 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["wait_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/wait", library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library"], ) @@ -19,6 +20,7 @@ go_library( "doc.go", "wait.go", ], + importpath = "k8s.io/apimachinery/pkg/util/wait", deps = ["//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library"], ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/waitgroup/BUILD b/vendor/k8s.io/apimachinery/pkg/util/waitgroup/BUILD new file mode 100644 index 000000000000..a35c520103b7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/waitgroup/BUILD @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "waitgroup.go", + ], + importpath = "k8s.io/apimachinery/pkg/util/waitgroup", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["waitgroup_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/waitgroup", + library = ":go_default_library", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/waitgroup/doc.go b/vendor/k8s.io/apimachinery/pkg/util/waitgroup/doc.go new file mode 100644 index 000000000000..6eb7903a73b4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/waitgroup/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package waitgroup implements SafeWaitGroup wrap of sync.WaitGroup. +// Add with positive delta when waiting will fail, to prevent sync.WaitGroup race issue. +package waitgroup diff --git a/vendor/k8s.io/apimachinery/pkg/util/waitgroup/waitgroup.go b/vendor/k8s.io/apimachinery/pkg/util/waitgroup/waitgroup.go new file mode 100644 index 000000000000..488f563407d2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/waitgroup/waitgroup.go @@ -0,0 +1,57 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package waitgroup + +import ( + "fmt" + "sync" +) + +// SafeWaitGroup must not be copied after first use. +type SafeWaitGroup struct { + wg sync.WaitGroup + mu sync.RWMutex + // wait indicate whether Wait is called, if true, + // then any Add with positive delta will return error. + wait bool +} + +// Add adds delta, which may be negative, similar to sync.WaitGroup. +// If Add with a positive delta happens after Wait, it will return error, +// which prevent unsafe Add. +func (wg *SafeWaitGroup) Add(delta int) error { + wg.mu.RLock() + defer wg.mu.RUnlock() + if wg.wait && delta > 0 { + return fmt.Errorf("add with postive delta after Wait is forbidden") + } + wg.wg.Add(delta) + return nil +} + +// Done decrements the WaitGroup counter. +func (wg *SafeWaitGroup) Done() { + wg.wg.Done() +} + +// Wait blocks until the WaitGroup counter is zero. +func (wg *SafeWaitGroup) Wait() { + wg.mu.Lock() + wg.wait = true + wg.mu.Unlock() + wg.wg.Wait() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD b/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD index 433c3eacb6b8..e660edfe7cb8 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD @@ -9,12 +9,14 @@ load( go_test( name = "go_default_test", srcs = ["decoder_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/yaml", library = ":go_default_library", ) go_library( name = "go_default_library", srcs = ["decoder.go"], + importpath = "k8s.io/apimachinery/pkg/util/yaml", deps = [ "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/version/BUILD b/vendor/k8s.io/apimachinery/pkg/version/BUILD index 8a548064ae16..bdccf7b3a7a9 100644 --- a/vendor/k8s.io/apimachinery/pkg/version/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/version/BUILD @@ -11,6 +11,7 @@ go_library( "doc.go", "types.go", ], + importpath = "k8s.io/apimachinery/pkg/version", ) filegroup( diff --git a/vendor/k8s.io/apimachinery/pkg/watch/BUILD b/vendor/k8s.io/apimachinery/pkg/watch/BUILD index 6285679f3661..3e850d2dda07 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/watch/BUILD @@ -17,9 +17,9 @@ go_library( "watch.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/apimachinery/pkg/watch", deps = [ "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", @@ -36,6 +36,7 @@ go_test( "streamwatcher_test.go", "watch_test.go", ], + importpath = "k8s.io/apimachinery/pkg/watch_test", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", @@ -47,6 +48,7 @@ go_test( go_test( name = "go_default_test", srcs = ["until_test.go"], + importpath = "k8s.io/apimachinery/pkg/watch", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go index 322923d4a035..ab590e135313 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go @@ -20,23 +20,6 @@ limitations under the License. package watch -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - reflect "reflect" -) - -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Event).DeepCopyInto(out.(*Event)) - return nil - }, InType: reflect.TypeOf(&Event{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Event) DeepCopyInto(out *Event) { *out = *in diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD index 9754f28e9321..4c20d9771d1d 100644 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD @@ -9,11 +9,13 @@ load( go_library( name = "go_default_library", srcs = ["fields.go"], + importpath = "k8s.io/apimachinery/third_party/forked/golang/json", ) go_test( name = "go_default_test", srcs = ["fields_test.go"], + importpath = "k8s.io/apimachinery/third_party/forked/golang/json", library = ":go_default_library", ) diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go index 006972ecafd1..8205a4dd1388 100644 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go @@ -26,17 +26,14 @@ const ( // struct field given the struct type and the JSON name of the field. // It returns field type, a slice of patch strategies, merge key and error. // TODO: fix the returned errors to be introspectable. -func LookupPatchMetadata(t reflect.Type, jsonField string) ( +func LookupPatchMetadataForStruct(t reflect.Type, jsonField string) ( elemType reflect.Type, patchStrategies []string, patchMergeKey string, e error) { if t.Kind() == reflect.Ptr { t = t.Elem() } - if t.Kind() == reflect.Map { - elemType = t.Elem() - return - } + if t.Kind() != reflect.Struct { - e = fmt.Errorf("merging an object in json but data type is not map or struct, instead is: %s", + e = fmt.Errorf("merging an object in json but data type is not struct, instead is: %s", t.Kind().String()) return } diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/BUILD b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/BUILD index b48ba03127e2..1df290578f71 100644 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/BUILD +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["addr.go"], + importpath = "k8s.io/apimachinery/third_party/forked/golang/netutil", ) filegroup( diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD index b5f3d74658b0..9f09628b6272 100644 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD @@ -9,12 +9,14 @@ load( go_test( name = "go_default_test", srcs = ["deep_equal_test.go"], + importpath = "k8s.io/apimachinery/third_party/forked/golang/reflect", library = ":go_default_library", ) go_library( name = "go_default_library", srcs = ["deep_equal.go"], + importpath = "k8s.io/apimachinery/third_party/forked/golang/reflect", ) filegroup( diff --git a/vendor/k8s.io/apiserver/LICENSE b/vendor/k8s.io/apiserver/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/k8s.io/apiserver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apiserver/pkg/admission/BUILD b/vendor/k8s.io/apiserver/pkg/admission/BUILD index 8c380dd88927..65542c02016b 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/BUILD +++ b/vendor/k8s.io/apiserver/pkg/admission/BUILD @@ -12,12 +12,16 @@ go_test( "chain_test.go", "config_test.go", "errors_test.go", + "handler_test.go", ], + importpath = "k8s.io/apiserver/pkg/admission", library = ":go_default_library", deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/apiserver:go_default_library", + "//vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1:go_default_library", ], ) @@ -32,20 +36,18 @@ go_library( "interfaces.go", "plugins.go", ], + importpath = "k8s.io/apiserver/pkg/admission", deps = [ "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/apiserver:go_default_library", - "//vendor/k8s.io/apiserver/pkg/apis/apiserver/install:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", ], @@ -62,8 +64,21 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//staging/src/k8s.io/apiserver/pkg/admission/configuration:all-srcs", "//staging/src/k8s.io/apiserver/pkg/admission/initializer:all-srcs", + "//staging/src/k8s.io/apiserver/pkg/admission/metrics:all-srcs", + "//staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization:all-srcs", "//staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle:all-srcs", + "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config:all-srcs", + "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/errors:all-srcs", + "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer:all-srcs", + "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating:all-srcs", + "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace:all-srcs", + "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/request:all-srcs", + "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/rules:all-srcs", + "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/testcerts:all-srcs", + "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating:all-srcs", + "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned:all-srcs", ], tags = ["automanaged"], ) diff --git a/vendor/k8s.io/apiserver/pkg/admission/chain.go b/vendor/k8s.io/apiserver/pkg/admission/chain.go index 45c7f72f9cf6..011641ff0656 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/chain.go +++ b/vendor/k8s.io/apiserver/pkg/admission/chain.go @@ -16,11 +16,12 @@ limitations under the License. package admission -// chainAdmissionHandler is an instance of admission.Interface that performs admission control using a chain of admission handlers +// chainAdmissionHandler is an instance of admission.NamedHandler that performs admission control using +// a chain of admission handlers type chainAdmissionHandler []Interface // NewChainHandler creates a new chain handler from an array of handlers. Used for testing. -func NewChainHandler(handlers ...Interface) Interface { +func NewChainHandler(handlers ...Interface) chainAdmissionHandler { return chainAdmissionHandler(handlers) } @@ -30,9 +31,27 @@ func (admissionHandler chainAdmissionHandler) Admit(a Attributes) error { if !handler.Handles(a.GetOperation()) { continue } - err := handler.Admit(a) - if err != nil { - return err + if mutator, ok := handler.(MutationInterface); ok { + err := mutator.Admit(a) + if err != nil { + return err + } + } + } + return nil +} + +// Validate performs an admission control check using a chain of handlers, and returns immediately on first error +func (admissionHandler chainAdmissionHandler) Validate(a Attributes) error { + for _, handler := range admissionHandler { + if !handler.Handles(a.GetOperation()) { + continue + } + if validator, ok := handler.(ValidationInterface); ok { + err := validator.Validate(a) + if err != nil { + return err + } } } return nil diff --git a/vendor/k8s.io/apiserver/pkg/admission/config.go b/vendor/k8s.io/apiserver/pkg/admission/config.go index d233c7cb92bc..72da98fe2631 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/config.go +++ b/vendor/k8s.io/apiserver/pkg/admission/config.go @@ -29,27 +29,14 @@ import ( "bytes" - "k8s.io/apimachinery/pkg/apimachinery/announced" - "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/apis/apiserver" - "k8s.io/apiserver/pkg/apis/apiserver/install" apiserverv1alpha1 "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1" ) -var ( - groupFactoryRegistry = make(announced.APIGroupFactoryRegistry) - registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS")) - scheme = runtime.NewScheme() - codecs = serializer.NewCodecFactory(scheme) -) - -func init() { - install.Install(groupFactoryRegistry, registry, scheme) -} - func makeAbs(path, base string) (string, error) { if filepath.IsAbs(path) { return path, nil @@ -70,7 +57,7 @@ func makeAbs(path, base string) (string, error) { // set of pluginNames whose config location references the specified configFilePath. // It does this to preserve backward compatibility when admission control files were opaque. // It returns an error if the file did not exist. -func ReadAdmissionConfiguration(pluginNames []string, configFilePath string) (ConfigProvider, error) { +func ReadAdmissionConfiguration(pluginNames []string, configFilePath string, configScheme *runtime.Scheme) (ConfigProvider, error) { if configFilePath == "" { return configProvider{config: &apiserver.AdmissionConfiguration{}}, nil } @@ -79,6 +66,7 @@ func ReadAdmissionConfiguration(pluginNames []string, configFilePath string) (Co if err != nil { return nil, fmt.Errorf("unable to read admission control configuration from %q [%v]", configFilePath, err) } + codecs := serializer.NewCodecFactory(configScheme) decoder := codecs.UniversalDecoder() decodedObj, err := runtime.Decode(decoder, data) // we were able to decode the file successfully @@ -99,7 +87,10 @@ func ReadAdmissionConfiguration(pluginNames []string, configFilePath string) (Co } decodedConfig.Plugins[i].Path = absPath } - return configProvider{config: decodedConfig}, nil + return configProvider{ + config: decodedConfig, + scheme: configScheme, + }, nil } // we got an error where the decode wasn't related to a missing type if !(runtime.IsMissingVersion(err) || runtime.IsMissingKind(err) || runtime.IsNotRegisteredError(err)) { @@ -119,25 +110,29 @@ func ReadAdmissionConfiguration(pluginNames []string, configFilePath string) (Co Path: configFilePath}) } } - scheme.Default(externalConfig) + configScheme.Default(externalConfig) internalConfig := &apiserver.AdmissionConfiguration{} - if err := scheme.Convert(externalConfig, internalConfig, nil); err != nil { + if err := configScheme.Convert(externalConfig, internalConfig, nil); err != nil { return nil, err } - return configProvider{config: internalConfig}, nil + return configProvider{ + config: internalConfig, + scheme: configScheme, + }, nil } type configProvider struct { config *apiserver.AdmissionConfiguration + scheme *runtime.Scheme } // GetAdmissionPluginConfigurationFor returns a reader that holds the admission plugin configuration. -func GetAdmissionPluginConfigurationFor(pluginCfg apiserver.AdmissionPluginConfiguration) (io.Reader, error) { +func GetAdmissionPluginConfigurationFor(pluginCfg apiserver.AdmissionPluginConfiguration, scheme *runtime.Scheme) (io.Reader, error) { // if there is nothing nested in the object, we return the named location obj := pluginCfg.Configuration if obj != nil { // serialize the configuration and build a reader for it - content, err := writeYAML(obj) + content, err := writeYAML(obj, scheme) if err != nil { return nil, err } @@ -168,7 +163,7 @@ func (p configProvider) ConfigFor(pluginName string) (io.Reader, error) { if pluginName != pluginCfg.Name { continue } - pluginConfig, err := GetAdmissionPluginConfigurationFor(pluginCfg) + pluginConfig, err := GetAdmissionPluginConfigurationFor(pluginCfg, p.scheme) if err != nil { return nil, err } @@ -179,8 +174,17 @@ func (p configProvider) ConfigFor(pluginName string) (io.Reader, error) { } // writeYAML writes the specified object to a byte array as yaml. -func writeYAML(obj runtime.Object) ([]byte, error) { - json, err := runtime.Encode(codecs.LegacyCodec(), obj) +func writeYAML(obj runtime.Object, scheme *runtime.Scheme) ([]byte, error) { + gvks, _, err := scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + gvs := []schema.GroupVersion{} + for _, gvk := range gvks { + gvs = append(gvs, gvk.GroupVersion()) + } + codecs := serializer.NewCodecFactory(scheme) + json, err := runtime.Encode(codecs.LegacyCodec(gvs...), obj) if err != nil { return nil, err } diff --git a/vendor/k8s.io/apiserver/pkg/admission/configuration/BUILD b/vendor/k8s.io/apiserver/pkg/admission/configuration/BUILD new file mode 100644 index 000000000000..d6d2f3ee199d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/configuration/BUILD @@ -0,0 +1,61 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = [ + "configuration_manager_test.go", + "initializer_manager_test.go", + "mutating_webhook_manager_test.go", + "validating_webhook_manager_test.go", + ], + importpath = "k8s.io/apiserver/pkg/admission/configuration", + library = ":go_default_library", + deps = [ + "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = [ + "configuration_manager.go", + "initializer_manager.go", + "mutating_webhook_manager.go", + "validating_webhook_manager.go", + ], + importpath = "k8s.io/apiserver/pkg/admission/configuration", + deps = [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/configuration_manager.go b/vendor/k8s.io/apiserver/pkg/admission/configuration/configuration_manager.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/configuration_manager.go rename to vendor/k8s.io/apiserver/pkg/admission/configuration/configuration_manager.go index d31b391c0b7d..4c4bf74c92f7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/configuration_manager.go +++ b/vendor/k8s.io/apiserver/pkg/admission/configuration/configuration_manager.go @@ -104,6 +104,7 @@ func (a *poller) bootstrapping() { // bootstrapGracePeriod is read-only, so no lock is required timer := time.NewTimer(a.bootstrapGracePeriod) go func() { + defer timer.Stop() <-timer.C a.lock.Lock() defer a.lock.Unlock() diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/initializer_manager.go b/vendor/k8s.io/apiserver/pkg/admission/configuration/initializer_manager.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/initializer_manager.go rename to vendor/k8s.io/apiserver/pkg/admission/configuration/initializer_manager.go diff --git a/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go b/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go new file mode 100644 index 000000000000..bf4d0eabf986 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go @@ -0,0 +1,101 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configuration + +import ( + "fmt" + "reflect" + "sort" + + "github.com/golang/glog" + + "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +type MutatingWebhookConfigurationLister interface { + List(opts metav1.ListOptions) (*v1beta1.MutatingWebhookConfigurationList, error) +} + +// MutatingWebhookConfigurationManager collects the mutating webhook objects so that they can be called. +type MutatingWebhookConfigurationManager struct { + *poller +} + +func NewMutatingWebhookConfigurationManager(c MutatingWebhookConfigurationLister) *MutatingWebhookConfigurationManager { + getFn := func() (runtime.Object, error) { + list, err := c.List(metav1.ListOptions{}) + if err != nil { + if errors.IsNotFound(err) || errors.IsForbidden(err) { + glog.V(5).Infof("MutatingWebhookConfiguration are disabled due to an error: %v", err) + return nil, ErrDisabled + } + return nil, err + } + return mergeMutatingWebhookConfigurations(list), nil + } + + return &MutatingWebhookConfigurationManager{ + newPoller(getFn), + } +} + +// Webhooks returns the merged MutatingWebhookConfiguration. +func (im *MutatingWebhookConfigurationManager) Webhooks() (*v1beta1.MutatingWebhookConfiguration, error) { + configuration, err := im.poller.configuration() + if err != nil { + return nil, err + } + mutatingWebhookConfiguration, ok := configuration.(*v1beta1.MutatingWebhookConfiguration) + if !ok { + return nil, fmt.Errorf("expected type %v, got type %v", reflect.TypeOf(mutatingWebhookConfiguration), reflect.TypeOf(configuration)) + } + return mutatingWebhookConfiguration, nil +} + +func (im *MutatingWebhookConfigurationManager) Run(stopCh <-chan struct{}) { + im.poller.Run(stopCh) +} + +func mergeMutatingWebhookConfigurations( + list *v1beta1.MutatingWebhookConfigurationList, +) *v1beta1.MutatingWebhookConfiguration { + configurations := append([]v1beta1.MutatingWebhookConfiguration{}, list.Items...) + var ret v1beta1.MutatingWebhookConfiguration + // The internal order of webhooks for each configuration is provided by the user + // but configurations themselves can be in any order. As we are going to run these + // webhooks in serial, they are sorted here to have a deterministic order. + sort.Sort(byName(configurations)) + for _, c := range configurations { + ret.Webhooks = append(ret.Webhooks, c.Webhooks...) + } + return &ret +} + +// byName sorts MutatingWebhookConfiguration by name. These objects are all in +// cluster namespace (aka no namespace) thus they all have unique names. +type byName []v1beta1.MutatingWebhookConfiguration + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + return x[i].ObjectMeta.Name < x[j].ObjectMeta.Name +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go b/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go new file mode 100644 index 000000000000..8f9fd34daae3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go @@ -0,0 +1,84 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configuration + +import ( + "fmt" + "reflect" + + "github.com/golang/glog" + + "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +type ValidatingWebhookConfigurationLister interface { + List(opts metav1.ListOptions) (*v1beta1.ValidatingWebhookConfigurationList, error) +} + +// ValidatingWebhookConfigurationManager collects the validating webhook objects so that they can be called. +type ValidatingWebhookConfigurationManager struct { + *poller +} + +func NewValidatingWebhookConfigurationManager(c ValidatingWebhookConfigurationLister) *ValidatingWebhookConfigurationManager { + getFn := func() (runtime.Object, error) { + list, err := c.List(metav1.ListOptions{}) + if err != nil { + if errors.IsNotFound(err) || errors.IsForbidden(err) { + glog.V(5).Infof("ValidatingWebhookConfiguration are disabled due to an error: %v", err) + return nil, ErrDisabled + } + return nil, err + } + return mergeValidatingWebhookConfigurations(list), nil + } + + return &ValidatingWebhookConfigurationManager{ + newPoller(getFn), + } +} + +// Webhooks returns the merged ValidatingWebhookConfiguration. +func (im *ValidatingWebhookConfigurationManager) Webhooks() (*v1beta1.ValidatingWebhookConfiguration, error) { + configuration, err := im.poller.configuration() + if err != nil { + return nil, err + } + validatingWebhookConfiguration, ok := configuration.(*v1beta1.ValidatingWebhookConfiguration) + if !ok { + return nil, fmt.Errorf("expected type %v, got type %v", reflect.TypeOf(validatingWebhookConfiguration), reflect.TypeOf(configuration)) + } + return validatingWebhookConfiguration, nil +} + +func (im *ValidatingWebhookConfigurationManager) Run(stopCh <-chan struct{}) { + im.poller.Run(stopCh) +} + +func mergeValidatingWebhookConfigurations( + list *v1beta1.ValidatingWebhookConfigurationList, +) *v1beta1.ValidatingWebhookConfiguration { + configurations := list.Items + var ret v1beta1.ValidatingWebhookConfiguration + for _, c := range configurations { + ret.Webhooks = append(ret.Webhooks, c.Webhooks...) + } + return &ret +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/handler.go b/vendor/k8s.io/apiserver/pkg/admission/handler.go index 5de066f0587b..d2a9e7d4cf7c 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/handler.go +++ b/vendor/k8s.io/apiserver/pkg/admission/handler.go @@ -66,14 +66,8 @@ func (h *Handler) WaitForReady() bool { if h.readyFunc == nil { return true } - return h.waitForReadyInternal(time.After(timeToWaitForReady)) -} -func (h *Handler) waitForReadyInternal(timeout <-chan time.Time) bool { - // there is no configured ready func, so return immediately - if h.readyFunc == nil { - return true - } + timeout := time.After(timeToWaitForReady) for !h.readyFunc() { select { case <-time.After(100 * time.Millisecond): diff --git a/vendor/k8s.io/apiserver/pkg/admission/initializer/BUILD b/vendor/k8s.io/apiserver/pkg/admission/initializer/BUILD index 2755ab090161..6884d643a4e2 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/initializer/BUILD +++ b/vendor/k8s.io/apiserver/pkg/admission/initializer/BUILD @@ -12,7 +12,9 @@ go_library( "initializer.go", "interfaces.go", ], + importpath = "k8s.io/apiserver/pkg/admission/initializer", deps = [ + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", @@ -23,7 +25,9 @@ go_library( go_test( name = "go_default_xtest", srcs = ["initializer_test.go"], + importpath = "k8s.io/apiserver/pkg/admission/initializer_test", deps = [ + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/initializer:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go b/vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go index c0b1e1d25b75..abe764bb94dc 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go +++ b/vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go @@ -17,6 +17,7 @@ limitations under the License. package initializer import ( + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/client-go/informers" @@ -27,15 +28,23 @@ type pluginInitializer struct { externalClient kubernetes.Interface externalInformers informers.SharedInformerFactory authorizer authorizer.Authorizer + scheme *runtime.Scheme } // New creates an instance of admission plugins initializer. -func New(extClientset kubernetes.Interface, extInformers informers.SharedInformerFactory, authz authorizer.Authorizer) (pluginInitializer, error) { +// TODO(p0lyn0mial): make the parameters public, this construction seems to be redundant. +func New( + extClientset kubernetes.Interface, + extInformers informers.SharedInformerFactory, + authz authorizer.Authorizer, + scheme *runtime.Scheme, +) pluginInitializer { return pluginInitializer{ externalClient: extClientset, externalInformers: extInformers, authorizer: authz, - }, nil + scheme: scheme, + } } // Initialize checks the initialization interfaces implemented by a plugin @@ -52,6 +61,10 @@ func (i pluginInitializer) Initialize(plugin admission.Interface) { if wants, ok := plugin.(WantsAuthorizer); ok { wants.SetAuthorizer(i.authorizer) } + + if wants, ok := plugin.(WantsScheme); ok { + wants.SetScheme(i.scheme) + } } var _ admission.PluginInitializer = pluginInitializer{} diff --git a/vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go b/vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go index a216c9888fab..98a07585406c 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go +++ b/vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go @@ -17,6 +17,7 @@ limitations under the License. package initializer import ( + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/client-go/informers" @@ -26,17 +27,23 @@ import ( // WantsExternalKubeClientSet defines a function which sets external ClientSet for admission plugins that need it type WantsExternalKubeClientSet interface { SetExternalKubeClientSet(kubernetes.Interface) - admission.Validator + admission.InitializationValidator } // WantsExternalKubeInformerFactory defines a function which sets InformerFactory for admission plugins that need it type WantsExternalKubeInformerFactory interface { SetExternalKubeInformerFactory(informers.SharedInformerFactory) - admission.Validator + admission.InitializationValidator } // WantsAuthorizer defines a function which sets Authorizer for admission plugins that need it. type WantsAuthorizer interface { SetAuthorizer(authorizer.Authorizer) - admission.Validator + admission.InitializationValidator +} + +// WantsScheme defines a function that accepts runtime.Scheme for admission plugins that need it. +type WantsScheme interface { + SetScheme(*runtime.Scheme) + admission.InitializationValidator } diff --git a/vendor/k8s.io/apiserver/pkg/admission/interfaces.go b/vendor/k8s.io/apiserver/pkg/admission/interfaces.go index a8e671db5140..76d3864e275d 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/interfaces.go +++ b/vendor/k8s.io/apiserver/pkg/admission/interfaces.go @@ -53,14 +53,26 @@ type Attributes interface { // Interface is an abstract, pluggable interface for Admission Control decisions. type Interface interface { - // Admit makes an admission decision based on the request attributes - Admit(a Attributes) (err error) - // Handles returns true if this admission controller can handle the given operation // where operation can be one of CREATE, UPDATE, DELETE, or CONNECT Handles(operation Operation) bool } +type MutationInterface interface { + Interface + + // Admit makes an admission decision based on the request attributes + Admit(a Attributes) (err error) +} + +// ValidationInterface is an abstract, pluggable interface for Admission Control decisions. +type ValidationInterface interface { + Interface + + // Validate makes an admission decision based on the request attributes. It is NOT allowed to mutate + Validate(a Attributes) (err error) +} + // Operation is the type of resource operation being checked for admission control type Operation string @@ -78,10 +90,10 @@ type PluginInitializer interface { Initialize(plugin Interface) } -// Validator holds Validate functions, which are responsible for validation of initialized shared resources -// and should be implemented on admission plugins -type Validator interface { - Validate() error +// InitializationValidator holds ValidateInitialization functions, which are responsible for validation of initialized +// shared resources and should be implemented on admission plugins +type InitializationValidator interface { + ValidateInitialization() error } // ConfigProvider provides a way to get configuration for an admission plugin based on its name diff --git a/vendor/k8s.io/apiserver/pkg/admission/metrics/BUILD b/vendor/k8s.io/apiserver/pkg/admission/metrics/BUILD new file mode 100644 index 000000000000..32feb2f9f246 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/metrics/BUILD @@ -0,0 +1,42 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["metrics.go"], + importpath = "k8s.io/apiserver/pkg/admission/metrics", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "metrics_test.go", + "testutil_test.go", + ], + importpath = "k8s.io/apiserver/pkg/admission/metrics", + library = ":go_default_library", + deps = [ + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/github.com/prometheus/client_model/go:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go new file mode 100644 index 000000000000..0955a98c9b88 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go @@ -0,0 +1,217 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "strconv" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/apiserver/pkg/admission" +) + +const ( + namespace = "apiserver" + subsystem = "admission" +) + +var ( + // Use buckets ranging from 25 ms to ~2.5 seconds. + latencyBuckets = prometheus.ExponentialBuckets(25000, 2.5, 5) + latencySummaryMaxAge = 5 * time.Hour + + // Metrics provides access to all admission metrics. + Metrics = newAdmissionMetrics() +) + +// ObserverFunc is a func that emits metrics. +type ObserverFunc func(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) + +const ( + stepValidate = "validate" + stepAdmit = "admit" +) + +// WithControllerMetrics is a decorator for named admission handlers. +func WithControllerMetrics(i admission.Interface, name string) admission.Interface { + return WithMetrics(i, Metrics.ObserveAdmissionController, name) +} + +// WithStepMetrics is a decorator for a whole admission phase, i.e. admit or validation.admission step. +func WithStepMetrics(i admission.Interface) admission.Interface { + return WithMetrics(i, Metrics.ObserveAdmissionStep) +} + +// WithMetrics is a decorator for admission handlers with a generic observer func. +func WithMetrics(i admission.Interface, observer ObserverFunc, extraLabels ...string) admission.Interface { + return &pluginHandlerWithMetrics{ + Interface: i, + observer: observer, + extraLabels: extraLabels, + } +} + +// pluginHandlerWithMetrics decorates a admission handler with metrics. +type pluginHandlerWithMetrics struct { + admission.Interface + observer ObserverFunc + extraLabels []string +} + +// Admit performs a mutating admission control check and emit metrics. +func (p pluginHandlerWithMetrics) Admit(a admission.Attributes) error { + mutatingHandler, ok := p.Interface.(admission.MutationInterface) + if !ok { + return nil + } + + start := time.Now() + err := mutatingHandler.Admit(a) + p.observer(time.Since(start), err != nil, a, stepAdmit, p.extraLabels...) + return err +} + +// Validate performs a non-mutating admission control check and emits metrics. +func (p pluginHandlerWithMetrics) Validate(a admission.Attributes) error { + validatingHandler, ok := p.Interface.(admission.ValidationInterface) + if !ok { + return nil + } + + start := time.Now() + err := validatingHandler.Validate(a) + p.observer(time.Since(start), err != nil, a, stepValidate, p.extraLabels...) + return err +} + +// AdmissionMetrics instruments admission with prometheus metrics. +type AdmissionMetrics struct { + step *metricSet + controller *metricSet + webhook *metricSet +} + +// newAdmissionMetrics create a new AdmissionMetrics, configured with default metric names. +func newAdmissionMetrics() *AdmissionMetrics { + // Admission metrics for a step of the admission flow. The entire admission flow is broken down into a series of steps + // Each step is identified by a distinct type label value. + step := newMetricSet("step", + []string{"type", "operation", "group", "version", "resource", "subresource", "rejected"}, + "Admission sub-step %s, broken out for each operation and API resource and step type (validate or admit).", true) + + // Built-in admission controller metrics. Each admission controller is identified by name. + controller := newMetricSet("controller", + []string{"name", "type", "operation", "group", "version", "resource", "subresource", "rejected"}, + "Admission controller %s, identified by name and broken out for each operation and API resource and type (validate or admit).", false) + + // Admission webhook metrics. Each webhook is identified by name. + webhook := newMetricSet("webhook", + []string{"name", "type", "operation", "group", "version", "resource", "subresource", "rejected"}, + "Admission webhook %s, identified by name and broken out for each operation and API resource and type (validate or admit).", false) + + step.mustRegister() + controller.mustRegister() + webhook.mustRegister() + return &AdmissionMetrics{step: step, controller: controller, webhook: webhook} +} + +func (m *AdmissionMetrics) reset() { + m.step.reset() + m.controller.reset() + m.webhook.reset() +} + +// ObserveAdmissionStep records admission related metrics for a admission step, identified by step type. +func (m *AdmissionMetrics) ObserveAdmissionStep(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) { + gvr := attr.GetResource() + m.step.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), gvr.Group, gvr.Version, gvr.Resource, attr.GetSubresource(), strconv.FormatBool(rejected))...) +} + +// ObserveAdmissionController records admission related metrics for a built-in admission controller, identified by it's plugin handler name. +func (m *AdmissionMetrics) ObserveAdmissionController(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) { + gvr := attr.GetResource() + m.controller.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), gvr.Group, gvr.Version, gvr.Resource, attr.GetSubresource(), strconv.FormatBool(rejected))...) +} + +// ObserveWebhook records admission related metrics for a admission webhook. +func (m *AdmissionMetrics) ObserveWebhook(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) { + gvr := attr.GetResource() + m.webhook.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), gvr.Group, gvr.Version, gvr.Resource, attr.GetSubresource(), strconv.FormatBool(rejected))...) +} + +type metricSet struct { + latencies *prometheus.HistogramVec + latenciesSummary *prometheus.SummaryVec +} + +func newMetricSet(name string, labels []string, helpTemplate string, hasSummary bool) *metricSet { + var summary *prometheus.SummaryVec + if hasSummary { + summary = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: fmt.Sprintf("%s_admission_latencies_seconds_summary", name), + Help: fmt.Sprintf(helpTemplate, "latency summary"), + MaxAge: latencySummaryMaxAge, + }, + labels, + ) + } + + return &metricSet{ + latencies: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: fmt.Sprintf("%s_admission_latencies_seconds", name), + Help: fmt.Sprintf(helpTemplate, "latency histogram"), + Buckets: latencyBuckets, + }, + labels, + ), + + latenciesSummary: summary, + } +} + +// MustRegister registers all the prometheus metrics in the metricSet. +func (m *metricSet) mustRegister() { + prometheus.MustRegister(m.latencies) + if m.latenciesSummary != nil { + prometheus.MustRegister(m.latenciesSummary) + } +} + +// Reset resets all the prometheus metrics in the metricSet. +func (m *metricSet) reset() { + m.latencies.Reset() + if m.latenciesSummary != nil { + m.latenciesSummary.Reset() + } +} + +// Observe records an observed admission event to all metrics in the metricSet. +func (m *metricSet) observe(elapsed time.Duration, labels ...string) { + elapsedMicroseconds := float64(elapsed / time.Microsecond) + m.latencies.WithLabelValues(labels...).Observe(elapsedMicroseconds) + if m.latenciesSummary != nil { + m.latenciesSummary.WithLabelValues(labels...).Observe(elapsedMicroseconds) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/initialization/BUILD b/vendor/k8s.io/apiserver/pkg/admission/plugin/initialization/BUILD new file mode 100644 index 000000000000..6479a44881cf --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/initialization/BUILD @@ -0,0 +1,61 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = ["initialization.go"], + importpath = "k8s.io/apiserver/pkg/admission/plugin/initialization", + deps = [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/validation:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/configuration:go_default_library", + "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", + "//vendor/k8s.io/apiserver/pkg/features:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +go_test( + name = "go_default_test", + srcs = ["initialization_test.go"], + importpath = "k8s.io/apiserver/pkg/admission/plugin/initialization", + library = ":go_default_library", + deps = [ + "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialization/initialization.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/initialization/initialization.go similarity index 89% rename from vendor/k8s.io/kubernetes/plugin/pkg/admission/initialization/initialization.go rename to vendor/k8s.io/apiserver/pkg/admission/plugin/initialization/initialization.go index 8c56f51e3b3f..1bb59da5d539 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialization/initialization.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/initialization/initialization.go @@ -24,6 +24,7 @@ import ( "github.com/golang/glog" "k8s.io/api/admissionregistration/v1alpha1" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/validation" @@ -32,17 +33,21 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/configuration" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration" +) + +const ( + // Name of admission plug-in + PluginName = "Initializers" ) // Register registers a plugin func Register(plugins *admission.Plugins) { - plugins.Register("Initializers", func(config io.Reader) (admission.Interface, error) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { return NewInitializer(), nil }) } @@ -69,7 +74,8 @@ func NewInitializer() admission.Interface { return &initializer{} } -func (i *initializer) Validate() error { +// ValidateInitialization implements the InitializationValidator interface. +func (i *initializer) ValidateInitialization() error { if i.config == nil { return fmt.Errorf("the Initializer admission plugin requires a Kubernetes client to be provided") } @@ -89,10 +95,12 @@ func (i *initializer) Validate() error { return nil } +// SetExternalKubeClientSet implements the WantsExternalKubeClientSet interface. func (i *initializer) SetExternalKubeClientSet(client clientset.Interface) { - i.config = configuration.NewInitializerConfigurationManager(client.Admissionregistration().InitializerConfigurations()) + i.config = configuration.NewInitializerConfigurationManager(client.AdmissionregistrationV1alpha1().InitializerConfigurations()) } +// SetAuthorizer implements the WantsAuthorizer interface. func (i *initializer) SetAuthorizer(a authorizer.Authorizer) { i.authorizer = a } @@ -183,8 +191,13 @@ func (i *initializer) Admit(a admission.Attributes) (err error) { // Mirror pods are exempt from initialization because they are created and initialized // on the Kubelet before they appear in the API. // TODO: once this moves to REST storage layer, this becomes a pod specific concern - if pod, ok := a.GetObject().(*api.Pod); ok && pod != nil { - if _, isMirror := pod.Annotations[api.MirrorPodAnnotationKey]; isMirror { + if a.GetKind().GroupKind() == v1.SchemeGroupVersion.WithKind("Pod").GroupKind() { + accessor, err := meta.Accessor(a.GetObject()) + if err != nil { + return err + } + annotations := accessor.GetAnnotations() + if _, isMirror := annotations[v1.MirrorPodAnnotationKey]; isMirror { return nil } } @@ -208,6 +221,13 @@ func (i *initializer) Admit(a admission.Attributes) (err error) { } updated := accessor.GetInitializers() + // controllers deployed with an empty initializers.pending have their initializers set to nil + // but should be able to update without changing their manifest + if updated != nil && len(updated.Pending) == 0 && updated.Result == nil { + accessor.SetInitializers(nil) + updated = nil + } + existingAccessor, err := meta.Accessor(a.GetOldObject()) if err != nil { // if the old object does not have an accessor, but the new one does, error out @@ -222,9 +242,6 @@ func (i *initializer) Admit(a admission.Attributes) (err error) { glog.V(5).Infof("Modifying uninitialized resource %s", a.GetResource()) - if updated != nil && len(updated.Pending) == 0 && updated.Result == nil { - accessor.SetInitializers(nil) - } // because we are called before validation, we need to ensure the update transition is valid. if errs := validation.ValidateInitializersUpdate(updated, existing, initializerFieldPath); len(errs) > 0 { return errors.NewInvalid(a.GetKind().GroupKind(), a.GetName(), errs) @@ -243,7 +260,7 @@ func (i *initializer) Admit(a admission.Attributes) (err error) { func (i *initializer) canInitialize(a admission.Attributes, message string) error { // caller must have the ability to mutate un-initialized resources - authorized, reason, err := i.authorizer.Authorize(authorizer.AttributesRecord{ + decision, reason, err := i.authorizer.Authorize(authorizer.AttributesRecord{ Name: a.GetName(), ResourceRequest: true, User: a.GetUserInfo(), @@ -256,12 +273,14 @@ func (i *initializer) canInitialize(a admission.Attributes, message string) erro if err != nil { return err } - if !authorized { + if decision != authorizer.DecisionAllow { return errors.NewForbidden(a.GetResource().GroupResource(), a.GetName(), fmt.Errorf("%s: %s", message, reason)) } return nil } +// Handles returns true if this admission controller can handle the given operation +// where operation can be one of CREATE, UPDATE, DELETE, or CONNECT func (i *initializer) Handles(op admission.Operation) bool { return op == admission.Create || op == admission.Update } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD b/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD index 96d15ce08337..1c821c23cc06 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["admission.go"], + importpath = "k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -29,6 +30,7 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle", library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go index 20e7bd88d179..158f41642761 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go @@ -57,9 +57,9 @@ func Register(plugins *admission.Plugins) { }) } -// lifecycle is an implementation of admission.Interface. +// Lifecycle is an implementation of admission.Interface. // It enforces life-cycle constraints around a Namespace depending on its Phase -type lifecycle struct { +type Lifecycle struct { *admission.Handler client kubernetes.Interface immortalNamespaces sets.String @@ -73,8 +73,8 @@ type forceLiveLookupEntry struct { expiry time.Time } -var _ = initializer.WantsExternalKubeInformerFactory(&lifecycle{}) -var _ = initializer.WantsExternalKubeClientSet(&lifecycle{}) +var _ = initializer.WantsExternalKubeInformerFactory(&Lifecycle{}) +var _ = initializer.WantsExternalKubeClientSet(&Lifecycle{}) func makeNamespaceKey(namespace string) *v1.Namespace { return &v1.Namespace{ @@ -85,7 +85,7 @@ func makeNamespaceKey(namespace string) *v1.Namespace { } } -func (l *lifecycle) Admit(a admission.Attributes) error { +func (l *Lifecycle) Admit(a admission.Attributes) error { // prevent deletion of immortal namespaces if a.GetOperation() == admission.Delete && a.GetKind().GroupKind() == v1.SchemeGroupVersion.WithKind("Namespace").GroupKind() && l.immortalNamespaces.Has(a.GetName()) { return errors.NewForbidden(a.GetResource().GroupResource(), a.GetName(), fmt.Errorf("this namespace may not be deleted")) @@ -165,7 +165,7 @@ func (l *lifecycle) Admit(a admission.Attributes) error { // refuse to operate on non-existent namespaces if !exists || forceLiveLookup { // as a last resort, make a call directly to storage - namespace, err = l.client.Core().Namespaces().Get(a.GetNamespace(), metav1.GetOptions{}) + namespace, err = l.client.CoreV1().Namespaces().Get(a.GetNamespace(), metav1.GetOptions{}) switch { case errors.IsNotFound(err): return err @@ -188,31 +188,34 @@ func (l *lifecycle) Admit(a admission.Attributes) error { return nil } -// NewLifecycle creates a new namespace lifecycle admission control handler -func NewLifecycle(immortalNamespaces sets.String) (admission.Interface, error) { +// NewLifecycle creates a new namespace Lifecycle admission control handler +func NewLifecycle(immortalNamespaces sets.String) (*Lifecycle, error) { return newLifecycleWithClock(immortalNamespaces, clock.RealClock{}) } -func newLifecycleWithClock(immortalNamespaces sets.String, clock utilcache.Clock) (admission.Interface, error) { +func newLifecycleWithClock(immortalNamespaces sets.String, clock utilcache.Clock) (*Lifecycle, error) { forceLiveLookupCache := utilcache.NewLRUExpireCacheWithClock(100, clock) - return &lifecycle{ + return &Lifecycle{ Handler: admission.NewHandler(admission.Create, admission.Update, admission.Delete), immortalNamespaces: immortalNamespaces, forceLiveLookupCache: forceLiveLookupCache, }, nil } -func (l *lifecycle) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { +// SetExternalKubeInformerFactory implements the WantsExternalKubeInformerFactory interface. +func (l *Lifecycle) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { namespaceInformer := f.Core().V1().Namespaces() l.namespaceLister = namespaceInformer.Lister() l.SetReadyFunc(namespaceInformer.Informer().HasSynced) } -func (l *lifecycle) SetExternalKubeClientSet(client kubernetes.Interface) { +// SetExternalKubeClientSet implements the WantsExternalKubeClientSet interface. +func (l *Lifecycle) SetExternalKubeClientSet(client kubernetes.Interface) { l.client = client } -func (l *lifecycle) Validate() error { +// ValidateInitialization implements the InitializationValidator interface. +func (l *Lifecycle) ValidateInitialization() error { if l.namespaceLister == nil { return fmt.Errorf("missing namespaceLister") } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/BUILD b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/BUILD new file mode 100644 index 000000000000..76c2fd3dcade --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/BUILD @@ -0,0 +1,60 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "authentication.go", + "client.go", + "kubeconfig.go", + "serviceresolver.go", + ], + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/config", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/hashicorp/golang-lru:go_default_library", + "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", + "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "authentication_test.go", + "serviceresolver_test.go", + ], + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/config", + library = ":go_default_library", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/BUILD b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/BUILD new file mode 100644 index 000000000000..3b6bdc2abeeb --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/BUILD @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + "types.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/install:all-srcs", + "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/doc.go new file mode 100644 index 000000000000..63ab310393c9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +package webhookadmission diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/register.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/register.go new file mode 100644 index 000000000000..c958d15baa5d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhookadmission + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// GroupName is the group name use in this package +const GroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func addKnownTypes(scheme *runtime.Scheme) error { + // TODO this will get cleaned up with the scheme types are fixed + scheme.AddKnownTypes(SchemeGroupVersion, + &WebhookAdmission{}, + ) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/types.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/types.go new file mode 100644 index 000000000000..71ce47b1f42e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/types.go @@ -0,0 +1,29 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhookadmission + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// WebhookAdmission provides configuration for the webhook admission controller. +type WebhookAdmission struct { + metav1.TypeMeta + + // KubeConfigFile is the path to the kubeconfig file. + KubeConfigFile string +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/BUILD b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/BUILD new file mode 100644 index 000000000000..01d39435ecb7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/BUILD @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + "types.go", + "zz_generated.conversion.go", + "zz_generated.deepcopy.go", + "zz_generated.defaults.go", + ], + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go new file mode 100644 index 000000000000..04c376f77950 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission +// +k8s:defaulter-gen=TypeMeta + +// Package v1alpha1 is the v1alpha1 version of the API. +// +groupName=apiserver.config.k8s.io +package v1alpha1 diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/register.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/register.go new file mode 100644 index 000000000000..56489f7804cc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &WebhookAdmission{}, + ) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/types.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/types.go new file mode 100644 index 000000000000..a49a6a813ea4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/types.go @@ -0,0 +1,29 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// WebhookAdmission provides configuration for the webhook admission controller. +type WebhookAdmission struct { + metav1.TypeMeta `json:",inline"` + + // KubeConfigFile is the path to the kubeconfig file. + KubeConfigFile string `json:"kubeConfigFile"` +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go new file mode 100644 index 000000000000..1adab17ac9ed --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,60 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1alpha1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + webhookadmission "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission, + Convert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission, + ) +} + +func autoConvert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in *WebhookAdmission, out *webhookadmission.WebhookAdmission, s conversion.Scope) error { + out.KubeConfigFile = in.KubeConfigFile + return nil +} + +// Convert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission is an autogenerated conversion function. +func Convert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in *WebhookAdmission, out *webhookadmission.WebhookAdmission, s conversion.Scope) error { + return autoConvert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in, out, s) +} + +func autoConvert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission(in *webhookadmission.WebhookAdmission, out *WebhookAdmission, s conversion.Scope) error { + out.KubeConfigFile = in.KubeConfigFile + return nil +} + +// Convert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission is an autogenerated conversion function. +func Convert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission(in *webhookadmission.WebhookAdmission, out *WebhookAdmission, s conversion.Scope) error { + return autoConvert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000000..7c875e120227 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,51 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookAdmission) DeepCopyInto(out *WebhookAdmission) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookAdmission. +func (in *WebhookAdmission) DeepCopy() *WebhookAdmission { + if in == nil { + return nil + } + out := new(WebhookAdmission) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebhookAdmission) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admission/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/apis/admission/v1alpha1/zz_generated.defaults.go rename to vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go new file mode 100644 index 000000000000..19da07207b29 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go @@ -0,0 +1,51 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package webhookadmission + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookAdmission) DeepCopyInto(out *WebhookAdmission) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookAdmission. +func (in *WebhookAdmission) DeepCopy() *WebhookAdmission { + if in == nil { + return nil + } + out := new(WebhookAdmission) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WebhookAdmission) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/authentication.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/authentication.go new file mode 100644 index 000000000000..dd956f140a74 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/authentication.go @@ -0,0 +1,156 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "io/ioutil" + "strings" + "time" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +// AuthenticationInfoResolverWrapper can be used to inject Dial function to the +// rest.Config generated by the resolver. +type AuthenticationInfoResolverWrapper func(AuthenticationInfoResolver) AuthenticationInfoResolver + +// AuthenticationInfoResolver builds rest.Config base on the server name. +type AuthenticationInfoResolver interface { + ClientConfigFor(server string) (*rest.Config, error) +} + +// AuthenticationInfoResolverFunc implements AuthenticationInfoResolver. +type AuthenticationInfoResolverFunc func(server string) (*rest.Config, error) + +//ClientConfigFor implements AuthenticationInfoResolver. +func (a AuthenticationInfoResolverFunc) ClientConfigFor(server string) (*rest.Config, error) { + return a(server) +} + +type defaultAuthenticationInfoResolver struct { + kubeconfig clientcmdapi.Config +} + +// NewDefaultAuthenticationInfoResolver generates an AuthenticationInfoResolver +// that builds rest.Config based on the kubeconfig file. kubeconfigFile is the +// path to the kubeconfig. +func NewDefaultAuthenticationInfoResolver(kubeconfigFile string) (AuthenticationInfoResolver, error) { + if len(kubeconfigFile) == 0 { + return &defaultAuthenticationInfoResolver{}, nil + } + + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + loadingRules.ExplicitPath = kubeconfigFile + loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) + clientConfig, err := loader.RawConfig() + if err != nil { + return nil, err + } + + return &defaultAuthenticationInfoResolver{kubeconfig: clientConfig}, nil +} + +func (c *defaultAuthenticationInfoResolver) ClientConfigFor(server string) (*rest.Config, error) { + // exact match + if authConfig, ok := c.kubeconfig.AuthInfos[server]; ok { + return restConfigFromKubeconfig(authConfig) + } + + // star prefixed match + serverSteps := strings.Split(server, ".") + for i := 1; i < len(serverSteps); i++ { + nickName := "*." + strings.Join(serverSteps[i:], ".") + if authConfig, ok := c.kubeconfig.AuthInfos[nickName]; ok { + return restConfigFromKubeconfig(authConfig) + } + } + + // if we're trying to hit the kube-apiserver and there wasn't an explicit config, use the in-cluster config + if server == "kubernetes.default.svc" { + // if we can find an in-cluster-config use that. If we can't, fall through. + inClusterConfig, err := rest.InClusterConfig() + if err == nil { + return setGlobalDefaults(inClusterConfig), nil + } + } + + // star (default) match + if authConfig, ok := c.kubeconfig.AuthInfos["*"]; ok { + return restConfigFromKubeconfig(authConfig) + } + + // use the current context from the kubeconfig if possible + if len(c.kubeconfig.CurrentContext) > 0 { + if currContext, ok := c.kubeconfig.Contexts[c.kubeconfig.CurrentContext]; ok { + if len(currContext.AuthInfo) > 0 { + if currAuth, ok := c.kubeconfig.AuthInfos[currContext.AuthInfo]; ok { + return restConfigFromKubeconfig(currAuth) + } + } + } + } + + // anonymous + return setGlobalDefaults(&rest.Config{}), nil +} + +func restConfigFromKubeconfig(configAuthInfo *clientcmdapi.AuthInfo) (*rest.Config, error) { + config := &rest.Config{} + + // blindly overwrite existing values based on precedence + if len(configAuthInfo.Token) > 0 { + config.BearerToken = configAuthInfo.Token + } else if len(configAuthInfo.TokenFile) > 0 { + tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) + if err != nil { + return nil, err + } + config.BearerToken = string(tokenBytes) + } + if len(configAuthInfo.Impersonate) > 0 { + config.Impersonate = rest.ImpersonationConfig{ + UserName: configAuthInfo.Impersonate, + Groups: configAuthInfo.ImpersonateGroups, + Extra: configAuthInfo.ImpersonateUserExtra, + } + } + if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { + config.CertFile = configAuthInfo.ClientCertificate + config.CertData = configAuthInfo.ClientCertificateData + config.KeyFile = configAuthInfo.ClientKey + config.KeyData = configAuthInfo.ClientKeyData + } + if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { + config.Username = configAuthInfo.Username + config.Password = configAuthInfo.Password + } + if configAuthInfo.AuthProvider != nil { + return nil, fmt.Errorf("auth provider not supported") + } + + return setGlobalDefaults(config), nil +} + +func setGlobalDefaults(config *rest.Config) *rest.Config { + config.UserAgent = "kube-apiserver-admission" + config.Timeout = 30 * time.Second + + return config +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/client.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/client.go new file mode 100644 index 000000000000..28fac414e176 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/client.go @@ -0,0 +1,175 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "encoding/json" + "errors" + "fmt" + "net" + "net/url" + + lru "github.com/hashicorp/golang-lru" + "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + webhookerrors "k8s.io/apiserver/pkg/admission/plugin/webhook/errors" + "k8s.io/client-go/rest" +) + +const ( + defaultCacheSize = 200 +) + +var ( + ErrNeedServiceOrURL = errors.New("webhook configuration must have either service or URL") +) + +// ClientManager builds REST clients to talk to webhooks. It caches the clients +// to avoid duplicate creation. +type ClientManager struct { + authInfoResolver AuthenticationInfoResolver + serviceResolver ServiceResolver + negotiatedSerializer runtime.NegotiatedSerializer + cache *lru.Cache +} + +// NewClientManager creates a ClientManager. +func NewClientManager() (ClientManager, error) { + cache, err := lru.New(defaultCacheSize) + if err != nil { + return ClientManager{}, err + } + return ClientManager{ + cache: cache, + }, nil +} + +// SetAuthenticationInfoResolverWrapper sets the +// AuthenticationInfoResolverWrapper. +func (cm *ClientManager) SetAuthenticationInfoResolverWrapper(wrapper AuthenticationInfoResolverWrapper) { + if wrapper != nil { + cm.authInfoResolver = wrapper(cm.authInfoResolver) + } +} + +// SetAuthenticationInfoResolver sets the AuthenticationInfoResolver. +func (cm *ClientManager) SetAuthenticationInfoResolver(resolver AuthenticationInfoResolver) { + cm.authInfoResolver = resolver +} + +// SetServiceResolver sets the ServiceResolver. +func (cm *ClientManager) SetServiceResolver(sr ServiceResolver) { + if sr != nil { + cm.serviceResolver = sr + } +} + +// SetNegotiatedSerializer sets the NegotiatedSerializer. +func (cm *ClientManager) SetNegotiatedSerializer(n runtime.NegotiatedSerializer) { + cm.negotiatedSerializer = n +} + +// Validate checks if ClientManager is properly set up. +func (cm *ClientManager) Validate() error { + var errs []error + if cm.negotiatedSerializer == nil { + errs = append(errs, fmt.Errorf("the ClientManager requires a negotiatedSerializer")) + } + if cm.serviceResolver == nil { + errs = append(errs, fmt.Errorf("the ClientManager requires a serviceResolver")) + } + if cm.authInfoResolver == nil { + errs = append(errs, fmt.Errorf("the ClientManager requires an authInfoResolver")) + } + return utilerrors.NewAggregate(errs) +} + +// HookClient get a RESTClient from the cache, or constructs one based on the +// webhook configuration. +func (cm *ClientManager) HookClient(h *v1beta1.Webhook) (*rest.RESTClient, error) { + cacheKey, err := json.Marshal(h.ClientConfig) + if err != nil { + return nil, err + } + if client, ok := cm.cache.Get(string(cacheKey)); ok { + return client.(*rest.RESTClient), nil + } + + complete := func(cfg *rest.Config) (*rest.RESTClient, error) { + cfg.TLSClientConfig.CAData = h.ClientConfig.CABundle + cfg.ContentConfig.NegotiatedSerializer = cm.negotiatedSerializer + cfg.ContentConfig.ContentType = runtime.ContentTypeJSON + client, err := rest.UnversionedRESTClientFor(cfg) + if err == nil { + cm.cache.Add(string(cacheKey), client) + } + return client, err + } + + if svc := h.ClientConfig.Service; svc != nil { + serverName := svc.Name + "." + svc.Namespace + ".svc" + restConfig, err := cm.authInfoResolver.ClientConfigFor(serverName) + if err != nil { + return nil, err + } + cfg := rest.CopyConfig(restConfig) + host := serverName + ":443" + cfg.Host = "https://" + host + if svc.Path != nil { + cfg.APIPath = *svc.Path + } + cfg.TLSClientConfig.ServerName = serverName + + delegateDialer := cfg.Dial + if delegateDialer == nil { + delegateDialer = net.Dial + } + cfg.Dial = func(network, addr string) (net.Conn, error) { + if addr == host { + u, err := cm.serviceResolver.ResolveEndpoint(svc.Namespace, svc.Name) + if err != nil { + return nil, err + } + addr = u.Host + } + return delegateDialer(network, addr) + } + + return complete(cfg) + } + + if h.ClientConfig.URL == nil { + return nil, &webhookerrors.ErrCallingWebhook{WebhookName: h.Name, Reason: ErrNeedServiceOrURL} + } + + u, err := url.Parse(*h.ClientConfig.URL) + if err != nil { + return nil, &webhookerrors.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("Unparsable URL: %v", err)} + } + + restConfig, err := cm.authInfoResolver.ClientConfigFor(u.Host) + if err != nil { + return nil, err + } + + cfg := rest.CopyConfig(restConfig) + cfg.Host = u.Host + cfg.APIPath = u.Path + + return complete(cfg) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go new file mode 100644 index 000000000000..7cf0d319345a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go @@ -0,0 +1,68 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "io" + "io/ioutil" + "path" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission" + "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1" +) + +var ( + scheme = runtime.NewScheme() + codecs = serializer.NewCodecFactory(scheme) +) + +func init() { + webhookadmission.AddToScheme(scheme) + v1alpha1.AddToScheme(scheme) +} + +// LoadConfig extract the KubeConfigFile from configFile +func LoadConfig(configFile io.Reader) (string, error) { + var kubeconfigFile string + if configFile != nil { + // we have a config so parse it. + data, err := ioutil.ReadAll(configFile) + if err != nil { + return "", err + } + decoder := codecs.UniversalDecoder() + decodedObj, err := runtime.Decode(decoder, data) + if err != nil { + return "", err + } + config, ok := decodedObj.(*webhookadmission.WebhookAdmission) + if !ok { + return "", fmt.Errorf("unexpected type: %T", decodedObj) + } + + if !path.IsAbs(config.KubeConfigFile) { + return "", field.Invalid(field.NewPath("kubeConfigFile"), config.KubeConfigFile, "must be an absolute file path") + } + + kubeconfigFile = config.KubeConfigFile + } + return kubeconfigFile, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/serviceresolver.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/serviceresolver.go new file mode 100644 index 000000000000..47b96a709fe5 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/serviceresolver.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "errors" + "fmt" + "net/url" +) + +// ServiceResolver knows how to convert a service reference into an actual location. +type ServiceResolver interface { + ResolveEndpoint(namespace, name string) (*url.URL, error) +} + +type defaultServiceResolver struct{} + +func NewDefaultServiceResolver() ServiceResolver { + return &defaultServiceResolver{} +} + +// ResolveEndpoint constructs a service URL from a given namespace and name +// note that the name and namespace are required and by default all created addresses use HTTPS scheme. +// for example: +// name=ross namespace=andromeda resolves to https://ross.andromeda.svc:443 +func (sr defaultServiceResolver) ResolveEndpoint(namespace, name string) (*url.URL, error) { + if len(name) == 0 || len(namespace) == 0 { + return nil, errors.New("cannot resolve an empty service name or namespace") + } + return &url.URL{Scheme: "https", Host: fmt.Sprintf("%s.%s.svc:443", name, namespace)}, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/BUILD b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/BUILD new file mode 100644 index 000000000000..288007238431 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/BUILD @@ -0,0 +1,38 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "errors.go", + "statuserror.go", + ], + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/errors", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["statuserror_test.go"], + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/errors", + library = ":go_default_library", + deps = ["//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/doc.go new file mode 100644 index 000000000000..cac04f5b26c5 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package errors contains utilities for admission webhook specific errors +package errors diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/errors.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/errors.go new file mode 100644 index 000000000000..2396152286c2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/errors.go @@ -0,0 +1,34 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import "fmt" + +// ErrCallingWebhook is returned for transport-layer errors calling webhooks. It +// represents a failure to talk to the webhook, not the webhook rejecting a +// request. +type ErrCallingWebhook struct { + WebhookName string + Reason error +} + +func (e *ErrCallingWebhook) Error() string { + if e.Reason != nil { + return fmt.Sprintf("failed calling admission webhook %q: %v", e.WebhookName, e.Reason) + } + return fmt.Sprintf("failed calling admission webhook %q; no further details available", e.WebhookName) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/statuserror.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/statuserror.go new file mode 100644 index 000000000000..f37dec0177b4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/statuserror.go @@ -0,0 +1,47 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ToStatusErr returns a StatusError with information about the webhook plugin +func ToStatusErr(webhookName string, result *metav1.Status) *apierrors.StatusError { + deniedBy := fmt.Sprintf("admission webhook %q denied the request", webhookName) + const noExp = "without explanation" + + if result == nil { + result = &metav1.Status{Status: metav1.StatusFailure} + } + + switch { + case len(result.Message) > 0: + result.Message = fmt.Sprintf("%s: %s", deniedBy, result.Message) + case len(result.Reason) > 0: + result.Message = fmt.Sprintf("%s: %s", deniedBy, result.Reason) + default: + result.Message = fmt.Sprintf("%s %s", deniedBy, noExp) + } + + return &apierrors.StatusError{ + ErrStatus: *result, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer/BUILD b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer/BUILD new file mode 100644 index 000000000000..14fa71007c7a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer/BUILD @@ -0,0 +1,37 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["initializer.go"], + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/initializer", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["initializer_test.go"], + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/initializer", + library = ":go_default_library", + deps = [ + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer/initializer.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer/initializer.go new file mode 100644 index 000000000000..398200b92cae --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer/initializer.go @@ -0,0 +1,76 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package initializer + +import ( + "net/url" + + "k8s.io/apiserver/pkg/admission" + webhookconfig "k8s.io/apiserver/pkg/admission/plugin/webhook/config" +) + +// WantsServiceResolver defines a fuction that accepts a ServiceResolver for +// admission plugins that need to make calls to services. +type WantsServiceResolver interface { + SetServiceResolver(webhookconfig.ServiceResolver) +} + +// ServiceResolver knows how to convert a service reference into an actual +// location. +type ServiceResolver interface { + ResolveEndpoint(namespace, name string) (*url.URL, error) +} + +// WantsAuthenticationInfoResolverWrapper defines a function that wraps the standard AuthenticationInfoResolver +// to allow the apiserver to control what is returned as auth info +type WantsAuthenticationInfoResolverWrapper interface { + SetAuthenticationInfoResolverWrapper(webhookconfig.AuthenticationInfoResolverWrapper) + admission.InitializationValidator +} + +// PluginInitializer is used for initialization of the webhook admission plugin. +type PluginInitializer struct { + serviceResolver webhookconfig.ServiceResolver + authenticationInfoResolverWrapper webhookconfig.AuthenticationInfoResolverWrapper +} + +var _ admission.PluginInitializer = &PluginInitializer{} + +// NewPluginInitializer constructs new instance of PluginInitializer +func NewPluginInitializer( + authenticationInfoResolverWrapper webhookconfig.AuthenticationInfoResolverWrapper, + serviceResolver webhookconfig.ServiceResolver, +) *PluginInitializer { + return &PluginInitializer{ + authenticationInfoResolverWrapper: authenticationInfoResolverWrapper, + serviceResolver: serviceResolver, + } +} + +// Initialize checks the initialization interfaces implemented by each plugin +// and provide the appropriate initialization data +func (i *PluginInitializer) Initialize(plugin admission.Interface) { + if wants, ok := plugin.(WantsServiceResolver); ok { + wants.SetServiceResolver(i.serviceResolver) + } + + if wants, ok := plugin.(WantsAuthenticationInfoResolverWrapper); ok { + if i.authenticationInfoResolverWrapper != nil { + wants.SetAuthenticationInfoResolverWrapper(i.authenticationInfoResolverWrapper) + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD new file mode 100644 index 000000000000..d94dc0f6b005 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD @@ -0,0 +1,71 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "admission.go", + "doc.go", + ], + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/evanphx/json-patch:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/admission/v1beta1:go_default_library", + "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/configuration:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/initializer:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/metrics:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned:go_default_library", + "//vendor/k8s.io/client-go/informers:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["admission_test.go"], + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating", + library = ":go_default_library", + deps = [ + "//vendor/k8s.io/api/admission/v1beta1:go_default_library", + "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/testcerts:go_default_library", + "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go new file mode 100644 index 000000000000..f944152770d2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go @@ -0,0 +1,321 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package mutating delegates admission checks to dynamically configured +// mutating webhooks. +package mutating + +import ( + "context" + "fmt" + "io" + "time" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/golang/glog" + + admissionv1beta1 "k8s.io/api/admission/v1beta1" + "k8s.io/api/admissionregistration/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/configuration" + genericadmissioninit "k8s.io/apiserver/pkg/admission/initializer" + admissionmetrics "k8s.io/apiserver/pkg/admission/metrics" + "k8s.io/apiserver/pkg/admission/plugin/webhook/config" + webhookerrors "k8s.io/apiserver/pkg/admission/plugin/webhook/errors" + "k8s.io/apiserver/pkg/admission/plugin/webhook/namespace" + "k8s.io/apiserver/pkg/admission/plugin/webhook/request" + "k8s.io/apiserver/pkg/admission/plugin/webhook/rules" + "k8s.io/apiserver/pkg/admission/plugin/webhook/versioned" + "k8s.io/client-go/informers" + clientset "k8s.io/client-go/kubernetes" +) + +const ( + // Name of admission plug-in + PluginName = "MutatingAdmissionWebhook" +) + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(configFile io.Reader) (admission.Interface, error) { + plugin, err := NewMutatingWebhook(configFile) + if err != nil { + return nil, err + } + + return plugin, nil + }) +} + +// WebhookSource can list dynamic webhook plugins. +type WebhookSource interface { + Run(stopCh <-chan struct{}) + Webhooks() (*v1beta1.MutatingWebhookConfiguration, error) +} + +// NewMutatingWebhook returns a generic admission webhook plugin. +func NewMutatingWebhook(configFile io.Reader) (*MutatingWebhook, error) { + kubeconfigFile, err := config.LoadConfig(configFile) + if err != nil { + return nil, err + } + + cm, err := config.NewClientManager() + if err != nil { + return nil, err + } + authInfoResolver, err := config.NewDefaultAuthenticationInfoResolver(kubeconfigFile) + if err != nil { + return nil, err + } + // Set defaults which may be overridden later. + cm.SetAuthenticationInfoResolver(authInfoResolver) + cm.SetServiceResolver(config.NewDefaultServiceResolver()) + + return &MutatingWebhook{ + Handler: admission.NewHandler( + admission.Connect, + admission.Create, + admission.Delete, + admission.Update, + ), + clientManager: cm, + }, nil +} + +var _ admission.MutationInterface = &MutatingWebhook{} + +// MutatingWebhook is an implementation of admission.Interface. +type MutatingWebhook struct { + *admission.Handler + hookSource WebhookSource + namespaceMatcher namespace.Matcher + clientManager config.ClientManager + convertor versioned.Convertor + jsonSerializer runtime.Serializer +} + +var ( + _ = genericadmissioninit.WantsExternalKubeClientSet(&MutatingWebhook{}) +) + +// TODO find a better way wire this, but keep this pull small for now. +func (a *MutatingWebhook) SetAuthenticationInfoResolverWrapper(wrapper config.AuthenticationInfoResolverWrapper) { + a.clientManager.SetAuthenticationInfoResolverWrapper(wrapper) +} + +// SetServiceResolver sets a service resolver for the webhook admission plugin. +// Passing a nil resolver does not have an effect, instead a default one will be used. +func (a *MutatingWebhook) SetServiceResolver(sr config.ServiceResolver) { + a.clientManager.SetServiceResolver(sr) +} + +// SetScheme sets a serializer(NegotiatedSerializer) which is derived from the scheme +func (a *MutatingWebhook) SetScheme(scheme *runtime.Scheme) { + if scheme != nil { + a.clientManager.SetNegotiatedSerializer(serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{ + Serializer: serializer.NewCodecFactory(scheme).LegacyCodec(admissionv1beta1.SchemeGroupVersion), + })) + a.convertor.Scheme = scheme + a.jsonSerializer = json.NewSerializer(json.DefaultMetaFactory, scheme, scheme, false) + } +} + +// WantsExternalKubeClientSet defines a function which sets external ClientSet for admission plugins that need it +func (a *MutatingWebhook) SetExternalKubeClientSet(client clientset.Interface) { + a.namespaceMatcher.Client = client + a.hookSource = configuration.NewMutatingWebhookConfigurationManager(client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations()) +} + +// SetExternalKubeInformerFactory implements the WantsExternalKubeInformerFactory interface. +func (a *MutatingWebhook) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { + namespaceInformer := f.Core().V1().Namespaces() + a.namespaceMatcher.NamespaceLister = namespaceInformer.Lister() + a.SetReadyFunc(namespaceInformer.Informer().HasSynced) +} + +// ValidateInitialization implements the InitializationValidator interface. +func (a *MutatingWebhook) ValidateInitialization() error { + if a.hookSource == nil { + return fmt.Errorf("MutatingWebhook admission plugin requires a Kubernetes client to be provided") + } + if a.jsonSerializer == nil { + return fmt.Errorf("MutatingWebhook admission plugin's jsonSerializer is not properly setup") + } + if err := a.namespaceMatcher.Validate(); err != nil { + return fmt.Errorf("MutatingWebhook.namespaceMatcher is not properly setup: %v", err) + } + if err := a.clientManager.Validate(); err != nil { + return fmt.Errorf("MutatingWebhook.clientManager is not properly setup: %v", err) + } + if err := a.convertor.Validate(); err != nil { + return fmt.Errorf("MutatingWebhook.convertor is not properly setup: %v", err) + } + go a.hookSource.Run(wait.NeverStop) + return nil +} + +func (a *MutatingWebhook) loadConfiguration(attr admission.Attributes) (*v1beta1.MutatingWebhookConfiguration, error) { + hookConfig, err := a.hookSource.Webhooks() + // if Webhook configuration is disabled, fail open + if err == configuration.ErrDisabled { + return &v1beta1.MutatingWebhookConfiguration{}, nil + } + if err != nil { + e := apierrors.NewServerTimeout(attr.GetResource().GroupResource(), string(attr.GetOperation()), 1) + e.ErrStatus.Message = fmt.Sprintf("Unable to refresh the Webhook configuration: %v", err) + e.ErrStatus.Reason = "LoadingConfiguration" + e.ErrStatus.Details.Causes = append(e.ErrStatus.Details.Causes, metav1.StatusCause{ + Type: "MutatingWebhookConfigurationFailure", + Message: "An error has occurred while refreshing the MutatingWebhook configuration, no resources can be created/updated/deleted/connected until a refresh succeeds.", + }) + return nil, e + } + return hookConfig, nil +} + +// Admit makes an admission decision based on the request attributes. +func (a *MutatingWebhook) Admit(attr admission.Attributes) error { + hookConfig, err := a.loadConfiguration(attr) + if err != nil { + return err + } + hooks := hookConfig.Webhooks + ctx := context.TODO() + + var relevantHooks []*v1beta1.Webhook + for i := range hooks { + call, err := a.shouldCallHook(&hooks[i], attr) + if err != nil { + return err + } + if call { + relevantHooks = append(relevantHooks, &hooks[i]) + } + } + + if len(relevantHooks) == 0 { + // no matching hooks + return nil + } + + // convert the object to the external version before sending it to the webhook + versionedAttr := versioned.Attributes{ + Attributes: attr, + } + if oldObj := attr.GetOldObject(); oldObj != nil { + out, err := a.convertor.ConvertToGVK(oldObj, attr.GetKind()) + if err != nil { + return apierrors.NewInternalError(err) + } + versionedAttr.OldObject = out + } + if obj := attr.GetObject(); obj != nil { + out, err := a.convertor.ConvertToGVK(obj, attr.GetKind()) + if err != nil { + return apierrors.NewInternalError(err) + } + versionedAttr.Object = out + } + + for _, hook := range relevantHooks { + t := time.Now() + err := a.callAttrMutatingHook(ctx, hook, versionedAttr) + admissionmetrics.Metrics.ObserveWebhook(time.Since(t), err != nil, attr, "admit", hook.Name) + if err == nil { + continue + } + + ignoreClientCallFailures := hook.FailurePolicy != nil && *hook.FailurePolicy == v1beta1.Ignore + if callErr, ok := err.(*webhookerrors.ErrCallingWebhook); ok { + if ignoreClientCallFailures { + glog.Warningf("Failed calling webhook, failing open %v: %v", hook.Name, callErr) + utilruntime.HandleError(callErr) + continue + } + glog.Warningf("Failed calling webhook, failing closed %v: %v", hook.Name, err) + } + return apierrors.NewInternalError(err) + } + + // convert attr.Object to the internal version + return a.convertor.Convert(versionedAttr.Object, attr.GetObject()) +} + +// TODO: factor into a common place along with the validating webhook version. +func (a *MutatingWebhook) shouldCallHook(h *v1beta1.Webhook, attr admission.Attributes) (bool, *apierrors.StatusError) { + var matches bool + for _, r := range h.Rules { + m := rules.Matcher{Rule: r, Attr: attr} + if m.Matches() { + matches = true + break + } + } + if !matches { + return false, nil + } + + return a.namespaceMatcher.MatchNamespaceSelector(h, attr) +} + +// note that callAttrMutatingHook updates attr +func (a *MutatingWebhook) callAttrMutatingHook(ctx context.Context, h *v1beta1.Webhook, attr versioned.Attributes) error { + // Make the webhook request + request := request.CreateAdmissionReview(attr) + client, err := a.clientManager.HookClient(h) + if err != nil { + return &webhookerrors.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + response := &admissionv1beta1.AdmissionReview{} + if err := client.Post().Context(ctx).Body(&request).Do().Into(response); err != nil { + return &webhookerrors.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + + if !response.Response.Allowed { + return webhookerrors.ToStatusErr(h.Name, response.Response.Result) + } + + patchJS := response.Response.Patch + if len(patchJS) == 0 { + return nil + } + patchObj, err := jsonpatch.DecodePatch(patchJS) + if err != nil { + return apierrors.NewInternalError(err) + } + objJS, err := runtime.Encode(a.jsonSerializer, attr.Object) + if err != nil { + return apierrors.NewInternalError(err) + } + patchedJS, err := patchObj.Apply(objJS) + if err != nil { + return apierrors.NewInternalError(err) + } + // TODO: if we have multiple mutating webhooks, we can remember the json + // instead of encoding and decoding for each one. + if _, _, err := a.jsonSerializer.Decode(patchedJS, nil, attr.Object); err != nil { + return apierrors.NewInternalError(err) + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/doc.go new file mode 100644 index 000000000000..664a9a69a354 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package mutating makes calls to mutating webhooks during the admission +// process. +package mutating diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/BUILD b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/BUILD new file mode 100644 index 000000000000..a75286a6886b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/BUILD @@ -0,0 +1,52 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "matcher.go", + ], + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/namespace", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["matcher_test.go"], + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/namespace", + library = ":go_default_library", + deps = [ + "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/doc.go new file mode 100644 index 000000000000..6899b55d83a2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package namespace defines the utilities that are used by the webhook +// plugin to decide if a webhook should be applied to an object based on its +// namespace. +package namespace diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go new file mode 100644 index 000000000000..b9157b9ba76a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go @@ -0,0 +1,117 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package namespace + +import ( + "fmt" + + "k8s.io/api/admissionregistration/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apiserver/pkg/admission" + clientset "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" +) + +// Matcher decides if a request is exempted by the NamespaceSelector of a +// webhook configuration. +type Matcher struct { + NamespaceLister corelisters.NamespaceLister + Client clientset.Interface +} + +// Validate checks if the Matcher has a NamespaceLister and Client. +func (m *Matcher) Validate() error { + var errs []error + if m.NamespaceLister == nil { + errs = append(errs, fmt.Errorf("the namespace matcher requires a namespaceLister")) + } + if m.Client == nil { + errs = append(errs, fmt.Errorf("the namespace matcher requires a namespaceLister")) + } + return utilerrors.NewAggregate(errs) +} + +// GetNamespaceLabels gets the labels of the namespace related to the attr. +func (m *Matcher) GetNamespaceLabels(attr admission.Attributes) (map[string]string, error) { + // If the request itself is creating or updating a namespace, then get the + // labels from attr.Object, because namespaceLister doesn't have the latest + // namespace yet. + // + // However, if the request is deleting a namespace, then get the label from + // the namespace in the namespaceLister, because a delete request is not + // going to change the object, and attr.Object will be a DeleteOptions + // rather than a namespace object. + if attr.GetResource().Resource == "namespaces" && + len(attr.GetSubresource()) == 0 && + (attr.GetOperation() == admission.Create || attr.GetOperation() == admission.Update) { + accessor, err := meta.Accessor(attr.GetObject()) + if err != nil { + return nil, err + } + return accessor.GetLabels(), nil + } + + namespaceName := attr.GetNamespace() + namespace, err := m.NamespaceLister.Get(namespaceName) + if err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + if apierrors.IsNotFound(err) { + // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not + namespace, err = m.Client.CoreV1().Namespaces().Get(namespaceName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + } + return namespace.Labels, nil +} + +// MatchNamespaceSelector decideds whether the request matches the +// namespaceSelctor of the webhook. Only when they match, the webhook is called. +func (m *Matcher) MatchNamespaceSelector(h *v1beta1.Webhook, attr admission.Attributes) (bool, *apierrors.StatusError) { + namespaceName := attr.GetNamespace() + if len(namespaceName) == 0 && attr.GetResource().Resource != "namespaces" { + // If the request is about a cluster scoped resource, and it is not a + // namespace, it is exempted from all webhooks for now. + // TODO: figure out a way selective exempt cluster scoped resources. + // Also update the comment in types.go + return false, nil + } + namespaceLabels, err := m.GetNamespaceLabels(attr) + // this means the namespace is not found, for backwards compatibility, + // return a 404 + if apierrors.IsNotFound(err) { + status, ok := err.(apierrors.APIStatus) + if !ok { + return false, apierrors.NewInternalError(err) + } + return false, &apierrors.StatusError{status.Status()} + } + if err != nil { + return false, apierrors.NewInternalError(err) + } + // TODO: adding an LRU cache to cache the translation + selector, err := metav1.LabelSelectorAsSelector(h.NamespaceSelector) + if err != nil { + return false, apierrors.NewInternalError(err) + } + return selector.Matches(labels.Set(namespaceLabels)), nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/BUILD b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/BUILD new file mode 100644 index 000000000000..036015274f01 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/BUILD @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "admissionreview.go", + "doc.go", + ], + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/request", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/admission/v1beta1:go_default_library", + "//vendor/k8s.io/api/authentication/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go new file mode 100644 index 000000000000..5b8a41db29e8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go @@ -0,0 +1,71 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package request + +import ( + admissionv1beta1 "k8s.io/api/admission/v1beta1" + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/apiserver/pkg/admission" +) + +// CreateAdmissionReview creates an AdmissionReview for the provided admission.Attributes +func CreateAdmissionReview(attr admission.Attributes) admissionv1beta1.AdmissionReview { + gvk := attr.GetKind() + gvr := attr.GetResource() + aUserInfo := attr.GetUserInfo() + userInfo := authenticationv1.UserInfo{ + Extra: make(map[string]authenticationv1.ExtraValue), + Groups: aUserInfo.GetGroups(), + UID: aUserInfo.GetUID(), + Username: aUserInfo.GetName(), + } + + // Convert the extra information in the user object + for key, val := range aUserInfo.GetExtra() { + userInfo.Extra[key] = authenticationv1.ExtraValue(val) + } + + return admissionv1beta1.AdmissionReview{ + Request: &admissionv1beta1.AdmissionRequest{ + UID: uuid.NewUUID(), + Kind: metav1.GroupVersionKind{ + Group: gvk.Group, + Kind: gvk.Kind, + Version: gvk.Version, + }, + Resource: metav1.GroupVersionResource{ + Group: gvr.Group, + Resource: gvr.Resource, + Version: gvr.Version, + }, + SubResource: attr.GetSubresource(), + Name: attr.GetName(), + Namespace: attr.GetNamespace(), + Operation: admissionv1beta1.Operation(attr.GetOperation()), + UserInfo: userInfo, + Object: runtime.RawExtension{ + Object: attr.GetObject(), + }, + OldObject: runtime.RawExtension{ + Object: attr.GetOldObject(), + }, + }, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/doc.go new file mode 100644 index 000000000000..8e207e6271e3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package request creates admissionReview request based on admission attributes. +package request diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/BUILD b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/BUILD new file mode 100644 index 000000000000..6152811d1368 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/BUILD @@ -0,0 +1,38 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["rules.go"], + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/rules", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["rules_test.go"], + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/rules", + library = ":go_default_library", + deps = [ + "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/webhook/rules.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules.go similarity index 79% rename from vendor/k8s.io/kubernetes/plugin/pkg/admission/webhook/rules.go rename to vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules.go index 6fc734e8de76..eb99357569d4 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/webhook/rules.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules.go @@ -14,22 +14,23 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package webhook checks a webhook for configured operation admission -package webhook +package rules import ( "strings" - "k8s.io/api/admissionregistration/v1alpha1" + "k8s.io/api/admissionregistration/v1beta1" "k8s.io/apiserver/pkg/admission" ) -type RuleMatcher struct { - Rule v1alpha1.RuleWithOperations +// Matcher determines if the Attr matches the Rule. +type Matcher struct { + Rule v1beta1.RuleWithOperations Attr admission.Attributes } -func (r *RuleMatcher) Matches() bool { +// Matches returns if the Attr matches the Rule. +func (r *Matcher) Matches() bool { return r.operation() && r.group() && r.version() && @@ -49,23 +50,23 @@ func exactOrWildcard(items []string, requested string) bool { return false } -func (r *RuleMatcher) group() bool { +func (r *Matcher) group() bool { return exactOrWildcard(r.Rule.APIGroups, r.Attr.GetResource().Group) } -func (r *RuleMatcher) version() bool { +func (r *Matcher) version() bool { return exactOrWildcard(r.Rule.APIVersions, r.Attr.GetResource().Version) } -func (r *RuleMatcher) operation() bool { +func (r *Matcher) operation() bool { attrOp := r.Attr.GetOperation() for _, op := range r.Rule.Operations { - if op == v1alpha1.OperationAll { + if op == v1beta1.OperationAll { return true } // The constants are the same such that this is a valid cast (and this // is tested). - if op == v1alpha1.OperationType(attrOp) { + if op == v1beta1.OperationType(attrOp) { return true } } @@ -80,7 +81,7 @@ func splitResource(resSub string) (res, sub string) { return parts[0], "" } -func (r *RuleMatcher) resource() bool { +func (r *Matcher) resource() bool { opRes, opSub := r.Attr.GetResource().Resource, r.Attr.GetSubresource() for _, res := range r.Rule.Resources { res, sub := splitResource(res) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD new file mode 100644 index 000000000000..f4a982d121a2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD @@ -0,0 +1,71 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "admission.go", + "doc.go", + ], + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/validating", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/admission/v1beta1:go_default_library", + "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/configuration:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/initializer:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/metrics:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned:go_default_library", + "//vendor/k8s.io/client-go/informers:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["admission_test.go"], + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/validating", + library = ":go_default_library", + deps = [ + "//vendor/k8s.io/api/admission/v1beta1:go_default_library", + "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/testcerts:go_default_library", + "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go new file mode 100644 index 000000000000..b88556631cfb --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go @@ -0,0 +1,326 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package validating delegates admission checks to dynamically configured +// validating webhooks. +package validating + +import ( + "context" + "fmt" + "io" + "sync" + "time" + + "github.com/golang/glog" + + admissionv1beta1 "k8s.io/api/admission/v1beta1" + "k8s.io/api/admissionregistration/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/configuration" + genericadmissioninit "k8s.io/apiserver/pkg/admission/initializer" + admissionmetrics "k8s.io/apiserver/pkg/admission/metrics" + "k8s.io/apiserver/pkg/admission/plugin/webhook/config" + webhookadmissionapi "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission" + webhookadmissionapiv1alpha1 "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1" + webhookerrors "k8s.io/apiserver/pkg/admission/plugin/webhook/errors" + "k8s.io/apiserver/pkg/admission/plugin/webhook/namespace" + "k8s.io/apiserver/pkg/admission/plugin/webhook/request" + "k8s.io/apiserver/pkg/admission/plugin/webhook/rules" + "k8s.io/apiserver/pkg/admission/plugin/webhook/versioned" + "k8s.io/client-go/informers" + clientset "k8s.io/client-go/kubernetes" +) + +const ( + // Name of admission plug-in + PluginName = "ValidatingAdmissionWebhook" +) + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(configFile io.Reader) (admission.Interface, error) { + plugin, err := NewValidatingAdmissionWebhook(configFile) + if err != nil { + return nil, err + } + + return plugin, nil + }) + // add our config types + webhookadmissionapi.AddToScheme(plugins.ConfigScheme) + webhookadmissionapiv1alpha1.AddToScheme(plugins.ConfigScheme) +} + +// WebhookSource can list dynamic webhook plugins. +type WebhookSource interface { + Run(stopCh <-chan struct{}) + Webhooks() (*v1beta1.ValidatingWebhookConfiguration, error) +} + +// NewValidatingAdmissionWebhook returns a generic admission webhook plugin. +func NewValidatingAdmissionWebhook(configFile io.Reader) (*ValidatingAdmissionWebhook, error) { + kubeconfigFile, err := config.LoadConfig(configFile) + if err != nil { + return nil, err + } + + cm, err := config.NewClientManager() + if err != nil { + return nil, err + } + authInfoResolver, err := config.NewDefaultAuthenticationInfoResolver(kubeconfigFile) + if err != nil { + return nil, err + } + // Set defaults which may be overridden later. + cm.SetAuthenticationInfoResolver(authInfoResolver) + cm.SetServiceResolver(config.NewDefaultServiceResolver()) + + return &ValidatingAdmissionWebhook{ + Handler: admission.NewHandler( + admission.Connect, + admission.Create, + admission.Delete, + admission.Update, + ), + clientManager: cm, + }, nil +} + +var _ admission.ValidationInterface = &ValidatingAdmissionWebhook{} + +// ValidatingAdmissionWebhook is an implementation of admission.Interface. +type ValidatingAdmissionWebhook struct { + *admission.Handler + hookSource WebhookSource + namespaceMatcher namespace.Matcher + clientManager config.ClientManager + convertor versioned.Convertor +} + +var ( + _ = genericadmissioninit.WantsExternalKubeClientSet(&ValidatingAdmissionWebhook{}) +) + +// TODO find a better way wire this, but keep this pull small for now. +func (a *ValidatingAdmissionWebhook) SetAuthenticationInfoResolverWrapper(wrapper config.AuthenticationInfoResolverWrapper) { + a.clientManager.SetAuthenticationInfoResolverWrapper(wrapper) +} + +// SetServiceResolver sets a service resolver for the webhook admission plugin. +// Passing a nil resolver does not have an effect, instead a default one will be used. +func (a *ValidatingAdmissionWebhook) SetServiceResolver(sr config.ServiceResolver) { + a.clientManager.SetServiceResolver(sr) +} + +// SetScheme sets a serializer(NegotiatedSerializer) which is derived from the scheme +func (a *ValidatingAdmissionWebhook) SetScheme(scheme *runtime.Scheme) { + if scheme != nil { + a.clientManager.SetNegotiatedSerializer(serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{ + Serializer: serializer.NewCodecFactory(scheme).LegacyCodec(admissionv1beta1.SchemeGroupVersion), + })) + a.convertor.Scheme = scheme + } +} + +// WantsExternalKubeClientSet defines a function which sets external ClientSet for admission plugins that need it +func (a *ValidatingAdmissionWebhook) SetExternalKubeClientSet(client clientset.Interface) { + a.namespaceMatcher.Client = client + a.hookSource = configuration.NewValidatingWebhookConfigurationManager(client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations()) +} + +// SetExternalKubeInformerFactory implements the WantsExternalKubeInformerFactory interface. +func (a *ValidatingAdmissionWebhook) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { + namespaceInformer := f.Core().V1().Namespaces() + a.namespaceMatcher.NamespaceLister = namespaceInformer.Lister() + a.SetReadyFunc(namespaceInformer.Informer().HasSynced) +} + +// ValidateInitialization implements the InitializationValidator interface. +func (a *ValidatingAdmissionWebhook) ValidateInitialization() error { + if a.hookSource == nil { + return fmt.Errorf("ValidatingAdmissionWebhook admission plugin requires a Kubernetes client to be provided") + } + if err := a.namespaceMatcher.Validate(); err != nil { + return fmt.Errorf("ValidatingAdmissionWebhook.namespaceMatcher is not properly setup: %v", err) + } + if err := a.clientManager.Validate(); err != nil { + return fmt.Errorf("ValidatingAdmissionWebhook.clientManager is not properly setup: %v", err) + } + if err := a.convertor.Validate(); err != nil { + return fmt.Errorf("ValidatingAdmissionWebhook.convertor is not properly setup: %v", err) + } + go a.hookSource.Run(wait.NeverStop) + return nil +} + +func (a *ValidatingAdmissionWebhook) loadConfiguration(attr admission.Attributes) (*v1beta1.ValidatingWebhookConfiguration, error) { + hookConfig, err := a.hookSource.Webhooks() + // if Webhook configuration is disabled, fail open + if err == configuration.ErrDisabled { + return &v1beta1.ValidatingWebhookConfiguration{}, nil + } + if err != nil { + e := apierrors.NewServerTimeout(attr.GetResource().GroupResource(), string(attr.GetOperation()), 1) + e.ErrStatus.Message = fmt.Sprintf("Unable to refresh the Webhook configuration: %v", err) + e.ErrStatus.Reason = "LoadingConfiguration" + e.ErrStatus.Details.Causes = append(e.ErrStatus.Details.Causes, metav1.StatusCause{ + Type: "ValidatingWebhookConfigurationFailure", + Message: "An error has occurred while refreshing the ValidatingWebhook configuration, no resources can be created/updated/deleted/connected until a refresh succeeds.", + }) + return nil, e + } + return hookConfig, nil +} + +// Validate makes an admission decision based on the request attributes. +func (a *ValidatingAdmissionWebhook) Validate(attr admission.Attributes) error { + hookConfig, err := a.loadConfiguration(attr) + if err != nil { + return err + } + hooks := hookConfig.Webhooks + ctx := context.TODO() + + var relevantHooks []*v1beta1.Webhook + for i := range hooks { + call, err := a.shouldCallHook(&hooks[i], attr) + if err != nil { + return err + } + if call { + relevantHooks = append(relevantHooks, &hooks[i]) + } + } + + if len(relevantHooks) == 0 { + // no matching hooks + return nil + } + + // convert the object to the external version before sending it to the webhook + versionedAttr := versioned.Attributes{ + Attributes: attr, + } + if oldObj := attr.GetOldObject(); oldObj != nil { + out, err := a.convertor.ConvertToGVK(oldObj, attr.GetKind()) + if err != nil { + return apierrors.NewInternalError(err) + } + versionedAttr.OldObject = out + } + if obj := attr.GetObject(); obj != nil { + out, err := a.convertor.ConvertToGVK(obj, attr.GetKind()) + if err != nil { + return apierrors.NewInternalError(err) + } + versionedAttr.Object = out + } + + wg := sync.WaitGroup{} + errCh := make(chan error, len(relevantHooks)) + wg.Add(len(relevantHooks)) + for i := range relevantHooks { + go func(hook *v1beta1.Webhook) { + defer wg.Done() + + t := time.Now() + err := a.callHook(ctx, hook, versionedAttr) + admissionmetrics.Metrics.ObserveWebhook(time.Since(t), err != nil, attr, "validating", hook.Name) + if err == nil { + return + } + + ignoreClientCallFailures := hook.FailurePolicy != nil && *hook.FailurePolicy == v1beta1.Ignore + if callErr, ok := err.(*webhookerrors.ErrCallingWebhook); ok { + if ignoreClientCallFailures { + glog.Warningf("Failed calling webhook, failing open %v: %v", hook.Name, callErr) + utilruntime.HandleError(callErr) + return + } + + glog.Warningf("Failed calling webhook, failing closed %v: %v", hook.Name, err) + errCh <- apierrors.NewInternalError(err) + return + } + + glog.Warningf("rejected by webhook %q: %#v", hook.Name, err) + errCh <- err + }(relevantHooks[i]) + } + wg.Wait() + close(errCh) + + var errs []error + for e := range errCh { + errs = append(errs, e) + } + if len(errs) == 0 { + return nil + } + if len(errs) > 1 { + for i := 1; i < len(errs); i++ { + // TODO: merge status errors; until then, just return the first one. + utilruntime.HandleError(errs[i]) + } + } + return errs[0] +} + +// TODO: factor into a common place along with the validating webhook version. +func (a *ValidatingAdmissionWebhook) shouldCallHook(h *v1beta1.Webhook, attr admission.Attributes) (bool, *apierrors.StatusError) { + var matches bool + for _, r := range h.Rules { + m := rules.Matcher{Rule: r, Attr: attr} + if m.Matches() { + matches = true + break + } + } + if !matches { + return false, nil + } + + return a.namespaceMatcher.MatchNamespaceSelector(h, attr) +} + +func (a *ValidatingAdmissionWebhook) callHook(ctx context.Context, h *v1beta1.Webhook, attr admission.Attributes) error { + // Make the webhook request + request := request.CreateAdmissionReview(attr) + client, err := a.clientManager.HookClient(h) + if err != nil { + return &webhookerrors.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + response := &admissionv1beta1.AdmissionReview{} + if err := client.Post().Context(ctx).Body(&request).Do().Into(response); err != nil { + return &webhookerrors.ErrCallingWebhook{WebhookName: h.Name, Reason: err} + } + + if response.Response == nil { + return &webhookerrors.ErrCallingWebhook{WebhookName: h.Name, Reason: fmt.Errorf("Webhook response was absent")} + } + if response.Response.Allowed { + return nil + } + return webhookerrors.ToStatusErr(h.Name, response.Response.Result) +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/doc.go new file mode 100644 index 000000000000..1dcc4b696f5e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package validating makes calls to validating (i.e., non-mutating) webhooks +// during the admission process. +package validating diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/BUILD b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/BUILD new file mode 100644 index 000000000000..4bd5efbf3839 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/BUILD @@ -0,0 +1,47 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "attributes.go", + "conversion.go", + "doc.go", + ], + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/versioned", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["conversion_test.go"], + importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/versioned", + library = ":go_default_library", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apiserver/pkg/apis/example:go_default_library", + "//vendor/k8s.io/apiserver/pkg/apis/example/v1:go_default_library", + "//vendor/k8s.io/apiserver/pkg/apis/example2/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/attributes.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/attributes.go new file mode 100644 index 000000000000..58f8ae6aa378 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/attributes.go @@ -0,0 +1,42 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versioned + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/admission" +) + +// Attributes is a wrapper around the original admission attributes. It allows +// override the internal objects with the versioned ones. +type Attributes struct { + admission.Attributes + OldObject runtime.Object + Object runtime.Object +} + +// GetObject overrides the original GetObjects() and it returns the versioned +// object. +func (v Attributes) GetObject() runtime.Object { + return v.Object +} + +// GetOldObject overrides the original GetOldObjects() and it returns the +// versioned oldObject. +func (v Attributes) GetOldObject() runtime.Object { + return v.OldObject +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/conversion.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/conversion.go new file mode 100644 index 000000000000..a1ba712fc743 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/conversion.go @@ -0,0 +1,67 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versioned + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Convertor converts objects to the desired version. +type Convertor struct { + Scheme *runtime.Scheme +} + +// Convert converts the in object to the out object and returns an error if the +// conversion fails. +func (c Convertor) Convert(in runtime.Object, out runtime.Object) error { + // For custom resources, because ConvertToGVK reuses the passed in object as + // the output. c.Scheme.Convert resets the objects to empty if in == out, so + // we skip the conversion if that's the case. + if in == out { + return nil + } + return c.Scheme.Convert(in, out, nil) +} + +// ConvertToGVK converts object to the desired gvk. +func (c Convertor) ConvertToGVK(obj runtime.Object, gvk schema.GroupVersionKind) (runtime.Object, error) { + // Unlike other resources, custom resources do not have internal version, so + // if obj is a custom resource, it should not need conversion. + if obj.GetObjectKind().GroupVersionKind() == gvk { + return obj, nil + } + out, err := c.Scheme.New(gvk) + if err != nil { + return nil, err + } + err = c.Scheme.Convert(obj, out, nil) + if err != nil { + return nil, err + } + return out, nil +} + +// Validate checks if the conversion has a scheme. +func (c *Convertor) Validate() error { + if c.Scheme == nil { + return fmt.Errorf("the Convertor requires a scheme") + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/doc.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/doc.go new file mode 100644 index 000000000000..7abea78744bd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package versioned provides tools for making sure the objects sent to a +// webhook are in a version the webhook understands. +package versioned diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugins.go b/vendor/k8s.io/apiserver/pkg/admission/plugins.go index 5ddfc7e1f84b..3ede44a173fa 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugins.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugins.go @@ -25,6 +25,8 @@ import ( "sort" "sync" + "k8s.io/apimachinery/pkg/runtime" + "github.com/golang/glog" ) @@ -37,6 +39,16 @@ type Factory func(config io.Reader) (Interface, error) type Plugins struct { lock sync.Mutex registry map[string]Factory + + // ConfigScheme is used to parse the admission plugin config file. + // It is exposed to act as a hook for extending server providing their own config. + ConfigScheme *runtime.Scheme +} + +func NewPlugins() *Plugins { + return &Plugins{ + ConfigScheme: runtime.NewScheme(), + } } // All registered admission options. @@ -118,10 +130,12 @@ func splitStream(config io.Reader) (io.Reader, io.Reader, error) { return bytes.NewBuffer(configBytes), bytes.NewBuffer(configBytes), nil } +type Decorator func(handler Interface, name string) Interface + // NewFromPlugins returns an admission.Interface that will enforce admission control decisions of all // the given plugins. -func (ps *Plugins) NewFromPlugins(pluginNames []string, configProvider ConfigProvider, pluginInitializer PluginInitializer) (Interface, error) { - plugins := []Interface{} +func (ps *Plugins) NewFromPlugins(pluginNames []string, configProvider ConfigProvider, pluginInitializer PluginInitializer, decorator Decorator) (Interface, error) { + handlers := []Interface{} for _, pluginName := range pluginNames { pluginConfig, err := configProvider.ConfigFor(pluginName) if err != nil { @@ -133,10 +147,14 @@ func (ps *Plugins) NewFromPlugins(pluginNames []string, configProvider ConfigPro return nil, err } if plugin != nil { - plugins = append(plugins, plugin) + if decorator != nil { + handlers = append(handlers, decorator(plugin, pluginName)) + } else { + handlers = append(handlers, plugin) + } } } - return chainAdmissionHandler(plugins), nil + return chainAdmissionHandler(handlers), nil } // InitPlugin creates an instance of the named interface. @@ -156,18 +174,18 @@ func (ps *Plugins) InitPlugin(name string, config io.Reader, pluginInitializer P pluginInitializer.Initialize(plugin) // ensure that plugins have been properly initialized - if err := Validate(plugin); err != nil { + if err := ValidateInitialization(plugin); err != nil { return nil, err } return plugin, nil } -// Validate will call the Validate function in each plugin if they implement -// the Validator interface. -func Validate(plugin Interface) error { - if validater, ok := plugin.(Validator); ok { - err := validater.Validate() +// ValidateInitialization will call the InitializationValidate function in each plugin if they implement +// the InitializationValidator interface. +func ValidateInitialization(plugin Interface) error { + if validater, ok := plugin.(InitializationValidator); ok { + err := validater.ValidateInitialization() if err != nil { return err } diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/BUILD b/vendor/k8s.io/apiserver/pkg/apis/apiserver/BUILD index d8709cb0f48b..d8956d72be53 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/BUILD +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/BUILD @@ -13,9 +13,9 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/apiserver/pkg/apis/apiserver", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go index 98944f0cd7b4..004efce15ec5 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // Package apiserver is the internal version of the API. // +groupName=apiserver.k8s.io diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/BUILD b/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/BUILD index b5236bef9959..e92ce5e57190 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/BUILD +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/apiserver/pkg/apis/apiserver/install", deps = [ "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go index 1410518b9ed1..ffe9942a6ccf 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = SchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &AdmissionConfiguration{}, diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/BUILD b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/BUILD index a4d1b7659436..2640ff64b642 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/BUILD +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = [ + "conversion.go", "doc.go", "register.go", "types.go", @@ -15,6 +16,7 @@ go_library( "zz_generated.deepcopy.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go new file mode 100644 index 000000000000..378cc080d3a4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +var _ runtime.NestedObjectDecoder = &AdmissionConfiguration{} + +// DecodeNestedObjects handles encoding RawExtensions on the AdmissionConfiguration, ensuring the +// objects are decoded with the provided decoder. +func (c *AdmissionConfiguration) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for k, v := range c.Plugins { + decodeNestedRawExtensionOrUnknown(d, &v.Configuration) + c.Plugins[k] = v + } + return nil +} + +var _ runtime.NestedObjectEncoder = &AdmissionConfiguration{} + +// EncodeNestedObjects handles encoding RawExtensions on the AdmissionConfiguration, ensuring the +// objects are encoded with the provided encoder. +func (c *AdmissionConfiguration) EncodeNestedObjects(e runtime.Encoder) error { + for k, v := range c.Plugins { + if err := encodeNestedRawExtension(e, &v.Configuration); err != nil { + return err + } + c.Plugins[k] = v + } + return nil +} + +// decodeNestedRawExtensionOrUnknown decodes the raw extension into an object once. If called +// On a RawExtension that has already been decoded (has an object), it will not run again. +func decodeNestedRawExtensionOrUnknown(d runtime.Decoder, ext *runtime.RawExtension) { + if ext.Raw == nil || ext.Object != nil { + return + } + obj, gvk, err := d.Decode(ext.Raw, nil, nil) + if err != nil { + unk := &runtime.Unknown{Raw: ext.Raw} + if runtime.IsNotRegisteredError(err) { + if _, gvk, err := d.Decode(ext.Raw, nil, unk); err == nil { + unk.APIVersion = gvk.GroupVersion().String() + unk.Kind = gvk.Kind + ext.Object = unk + return + } + } + // TODO: record mime-type with the object + if gvk != nil { + unk.APIVersion = gvk.GroupVersion().String() + unk.Kind = gvk.Kind + } + obj = unk + } + ext.Object = obj +} + +func encodeNestedRawExtension(e runtime.Encoder, ext *runtime.RawExtension) error { + if ext.Raw != nil || ext.Object == nil { + return nil + } + data, err := runtime.Encode(e, ext.Object) + if err != nil { + return err + } + ext.Raw = data + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go index d6035f24b548..bd9a47a8264d 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/apiserver // +k8s:defaulter-gen=TypeMeta diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go index 090adec3201a..466b19ae5bbb 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go @@ -42,7 +42,7 @@ func init() { localSchemeBuilder.Register(addKnownTypes) } -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &AdmissionConfiguration{}, diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go index 35164ce77d16..ba634e485ce0 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go @@ -21,32 +21,9 @@ limitations under the License. package v1alpha1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AdmissionConfiguration).DeepCopyInto(out.(*AdmissionConfiguration)) - return nil - }, InType: reflect.TypeOf(&AdmissionConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AdmissionPluginConfiguration).DeepCopyInto(out.(*AdmissionPluginConfiguration)) - return nil - }, InType: reflect.TypeOf(&AdmissionPluginConfiguration{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AdmissionConfiguration) DeepCopyInto(out *AdmissionConfiguration) { *out = *in diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go index b455115d81a8..5a67e612dfc0 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go @@ -21,32 +21,9 @@ limitations under the License. package apiserver import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AdmissionConfiguration).DeepCopyInto(out.(*AdmissionConfiguration)) - return nil - }, InType: reflect.TypeOf(&AdmissionConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AdmissionPluginConfiguration).DeepCopyInto(out.(*AdmissionPluginConfiguration)) - return nil - }, InType: reflect.TypeOf(&AdmissionPluginConfiguration{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AdmissionConfiguration) DeepCopyInto(out *AdmissionConfiguration) { *out = *in diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/BUILD b/vendor/k8s.io/apiserver/pkg/apis/audit/BUILD index b6556662a8c5..2388d79f8ce5 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/BUILD +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/BUILD @@ -14,9 +14,9 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/apiserver/pkg/apis/audit", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/install/BUILD b/vendor/k8s.io/apiserver/pkg/apis/audit/install/BUILD index 76f39a88093e..f6489a534163 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/install/BUILD +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/install/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/apiserver/pkg/apis/audit/install", deps = [ "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", @@ -36,6 +37,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["roundtrip_test.go"], + importpath = "k8s.io/apiserver/pkg/apis/audit/install", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/testing/roundtrip:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/register.go b/vendor/k8s.io/apiserver/pkg/apis/audit/register.go index e14b82c1b137..9abf739ae0c3 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/register.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/register.go @@ -49,6 +49,5 @@ func addKnownTypes(scheme *runtime.Scheme) error { &Policy{}, &PolicyList{}, ) - scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) return nil } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/types.go b/vendor/k8s.io/apiserver/pkg/apis/audit/types.go index 5e9ead5ac57a..2b318d5754d5 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/types.go @@ -68,7 +68,7 @@ const ( // The stage for events generated once the response body has been completed, and no more bytes // will be sent. StageResponseComplete = "ResponseComplete" - // The stage for events generated when a panic occured. + // The stage for events generated when a panic occurred. StagePanic = "Panic" ) @@ -77,15 +77,10 @@ const ( // Event captures all the information that can be included in an API audit log. type Event struct { metav1.TypeMeta - // ObjectMeta is included for interoperability with API infrastructure. - // +optional - metav1.ObjectMeta // AuditLevel at which event was generated Level Level - // Time the request reached the apiserver. - Timestamp metav1.Time // Unique audit ID, generated for each request. AuditID types.UID // Stage of the request handling when this event instance was generated. @@ -121,10 +116,15 @@ type Event struct { // +optional RequestObject *runtime.Unknown // API object returned in the response, in JSON. The ResponseObject is recorded after conversion - // to the external type, and serialized as JSON. Omitted for non-resource requests. Only logged + // to the external type, and serialized as JSON. Omitted for non-resource requests. Only logged // at Response Level. // +optional ResponseObject *runtime.Unknown + + // Time the request reached the apiserver. + RequestReceivedTimestamp metav1.MicroTime + // Time the request reached current audit stage. + StageTimestamp metav1.MicroTime } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -153,6 +153,11 @@ type Policy struct { // The default audit level is None, but can be overridden by a catch-all rule at the end of the list. // PolicyRules are strictly ordered. Rules []PolicyRule + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified per rule in which case the union of both are omitted. + // +optional + OmitStages []Stage } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -208,8 +213,10 @@ type PolicyRule struct { // +optional NonResourceURLs []string - // OmitStages specify events generated in which stages will not be emitted to backend. + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified policy wide in which case the union of both are omitted. // An empty list means no restrictions will apply. + // +optional OmitStages []Stage } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/BUILD b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/BUILD index 654b9f0f4afe..04b9b943db9f 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/BUILD +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/BUILD @@ -18,6 +18,7 @@ go_library( "zz_generated.deepcopy.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/apiserver/pkg/apis/audit/v1alpha1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/api/authentication/v1:go_default_library", @@ -52,8 +53,10 @@ filegroup( go_test( name = "go_default_test", srcs = ["conversion_test.go"], + importpath = "k8s.io/apiserver/pkg/apis/audit/v1alpha1", library = ":go_default_library", deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/conversion.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/conversion.go index 5f9cc5c1e11e..78e5eab09dc1 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/conversion.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/conversion.go @@ -19,6 +19,7 @@ package v1alpha1 import ( "strings" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apiserver/pkg/apis/audit" ) @@ -52,3 +53,26 @@ func Convert_v1alpha1_ObjectReference_To_audit_ObjectReference(in *ObjectReferen } return nil } + +func Convert_v1alpha1_Event_To_audit_Event(in *Event, out *audit.Event, s conversion.Scope) error { + if err := autoConvert_v1alpha1_Event_To_audit_Event(in, out, s); err != nil { + return err + } + if out.StageTimestamp.IsZero() { + out.StageTimestamp = metav1.NewMicroTime(in.CreationTimestamp.Time) + } + if out.RequestReceivedTimestamp.IsZero() { + out.RequestReceivedTimestamp = metav1.NewMicroTime(in.Timestamp.Time) + } + return nil +} + +func Convert_audit_Event_To_v1alpha1_Event(in *audit.Event, out *Event, s conversion.Scope) error { + if err := autoConvert_audit_Event_To_v1alpha1_Event(in, out, s); err != nil { + return err + } + out.CreationTimestamp = metav1.NewTime(in.StageTimestamp.Time) + out.Timestamp = metav1.NewTime(in.RequestReceivedTimestamp.Time) + return nil + +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go index 661f59e59f3d..51dbef645cab 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/audit // +k8s:openapi-gen=true // +k8s:defaulter-gen=TypeMeta diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go index 9247ef6a2ddb..a4fc41537e66 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go @@ -222,6 +222,24 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { } i += n8 } + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RequestReceivedTimestamp.Size())) + n9, err := m.RequestReceivedTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StageTimestamp.Size())) + n10, err := m.StageTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 return i, nil } @@ -243,11 +261,11 @@ func (m *EventList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n11 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -379,11 +397,11 @@ func (m *Policy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n10, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n12 if len(m.Rules) > 0 { for _, msg := range m.Rules { dAtA[i] = 0x12 @@ -396,6 +414,21 @@ func (m *Policy) MarshalTo(dAtA []byte) (int, error) { i += n } } + if len(m.OmitStages) > 0 { + for _, s := range m.OmitStages { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } return i, nil } @@ -417,11 +450,11 @@ func (m *PolicyList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) + n13, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n13 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -633,6 +666,10 @@ func (m *Event) Size() (n int) { l = m.ResponseObject.Size() n += 1 + l + sovGenerated(uint64(l)) } + l = m.RequestReceivedTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.StageTimestamp.Size() + n += 2 + l + sovGenerated(uint64(l)) return n } @@ -701,6 +738,12 @@ func (m *Policy) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.OmitStages) > 0 { + for _, s := range m.OmitStages { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -800,6 +843,8 @@ func (this *Event) String() string { `ResponseStatus:` + strings.Replace(fmt.Sprintf("%v", this.ResponseStatus), "Status", "k8s_io_apimachinery_pkg_apis_meta_v1.Status", 1) + `,`, `RequestObject:` + strings.Replace(fmt.Sprintf("%v", this.RequestObject), "Unknown", "k8s_io_apimachinery_pkg_runtime.Unknown", 1) + `,`, `ResponseObject:` + strings.Replace(fmt.Sprintf("%v", this.ResponseObject), "Unknown", "k8s_io_apimachinery_pkg_runtime.Unknown", 1) + `,`, + `RequestReceivedTimestamp:` + strings.Replace(strings.Replace(this.RequestReceivedTimestamp.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `StageTimestamp:` + strings.Replace(strings.Replace(this.StageTimestamp.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -850,6 +895,7 @@ func (this *Policy) String() string { s := strings.Join([]string{`&Policy{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`, `}`, }, "") return s @@ -1348,6 +1394,66 @@ func (m *Event) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestReceivedTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RequestReceivedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StageTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StageTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1960,6 +2066,35 @@ func (m *Policy) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OmitStages", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2486,76 +2621,80 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1129 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xcd, 0x6e, 0x23, 0x45, - 0x10, 0xce, 0xac, 0xe3, 0x8d, 0xa7, 0xb3, 0xf9, 0xd9, 0x5e, 0xc4, 0x8e, 0x72, 0xb0, 0x83, 0x91, - 0x90, 0x05, 0x61, 0x26, 0x09, 0x81, 0x5d, 0x0e, 0x1c, 0x62, 0x2d, 0x02, 0x4b, 0x21, 0x84, 0x4e, - 0xbc, 0x12, 0x3f, 0x07, 0xc6, 0x76, 0xc5, 0x1e, 0xe2, 0xf9, 0xa1, 0xbb, 0xc7, 0x28, 0x37, 0x0e, - 0x3c, 0x00, 0x77, 0x1e, 0x66, 0x85, 0x04, 0x52, 0x8e, 0x7b, 0xdc, 0x93, 0x45, 0xcc, 0x5b, 0xe4, - 0x84, 0xba, 0xa7, 0x7b, 0x7a, 0xec, 0xac, 0x85, 0x73, 0xd9, 0xdb, 0x74, 0xd5, 0xf7, 0x7d, 0x5d, - 0x55, 0x53, 0x55, 0x33, 0xe8, 0x9b, 0x8b, 0xa7, 0xcc, 0x0d, 0x62, 0xef, 0x22, 0xed, 0x00, 0x8d, - 0x80, 0x03, 0xf3, 0x46, 0x10, 0xf5, 0x62, 0xea, 0x29, 0x87, 0x9f, 0x04, 0x0c, 0xe8, 0x08, 0xa8, - 0x97, 0x5c, 0xf4, 0xe5, 0xc9, 0xf3, 0xd3, 0x5e, 0xc0, 0xbd, 0xd1, 0x9e, 0x3f, 0x4c, 0x06, 0xfe, - 0x9e, 0xd7, 0x87, 0x08, 0xa8, 0xcf, 0xa1, 0xe7, 0x26, 0x34, 0xe6, 0x31, 0x6e, 0x64, 0x4c, 0x37, - 0x67, 0xba, 0xc9, 0x45, 0x5f, 0x9e, 0x5c, 0xc9, 0x74, 0x35, 0x73, 0xeb, 0xc3, 0x7e, 0xc0, 0x07, - 0x69, 0xc7, 0xed, 0xc6, 0xa1, 0xd7, 0x8f, 0xfb, 0xb1, 0x27, 0x05, 0x3a, 0xe9, 0xb9, 0x3c, 0xc9, - 0x83, 0x7c, 0xca, 0x84, 0xb7, 0x76, 0x4c, 0x48, 0x9e, 0x9f, 0xf2, 0x01, 0x44, 0x3c, 0xe8, 0xfa, - 0x3c, 0x88, 0x23, 0x6f, 0x74, 0x2b, 0x8c, 0xad, 0x03, 0x83, 0x0e, 0xfd, 0xee, 0x20, 0x88, 0x80, - 0x5e, 0x9a, 0x1c, 0x42, 0xe0, 0xfe, 0xeb, 0x58, 0xde, 0x3c, 0x16, 0x4d, 0x23, 0x1e, 0x84, 0x70, - 0x8b, 0xf0, 0xc9, 0xff, 0x11, 0x58, 0x77, 0x00, 0xa1, 0x7f, 0x8b, 0xf7, 0xd1, 0x3c, 0x5e, 0xca, - 0x83, 0xa1, 0x17, 0x44, 0x9c, 0x71, 0x3a, 0x4b, 0xaa, 0xff, 0x55, 0x41, 0xe5, 0xcf, 0x47, 0x10, - 0x71, 0xfc, 0x23, 0xaa, 0x88, 0x14, 0x7a, 0x3e, 0xf7, 0x1d, 0x6b, 0xdb, 0x6a, 0xac, 0xee, 0xef, - 0xba, 0xa6, 0xee, 0xb9, 0xa2, 0x29, 0xbd, 0x40, 0xbb, 0xa3, 0x3d, 0xf7, 0xeb, 0xce, 0x4f, 0xd0, - 0xe5, 0x5f, 0x01, 0xf7, 0x9b, 0xf8, 0x6a, 0x5c, 0x5b, 0x9a, 0x8c, 0x6b, 0xc8, 0xd8, 0x48, 0xae, - 0x8a, 0x77, 0x50, 0x79, 0x08, 0x23, 0x18, 0x3a, 0xf7, 0xb6, 0xad, 0x86, 0xdd, 0x7c, 0x5b, 0x81, - 0xcb, 0x47, 0xc2, 0x78, 0xa3, 0x1f, 0x48, 0x06, 0xc2, 0xdf, 0x23, 0x5b, 0x64, 0xcb, 0xb8, 0x1f, - 0x26, 0x4e, 0x49, 0x06, 0xf4, 0xfe, 0x62, 0x01, 0x9d, 0x05, 0x21, 0x34, 0x1f, 0x2a, 0x75, 0xfb, - 0x4c, 0x8b, 0x10, 0xa3, 0x87, 0x8f, 0xd1, 0x8a, 0xec, 0x9c, 0xd6, 0x33, 0x67, 0x59, 0x06, 0x73, - 0xa0, 0xe0, 0x2b, 0x87, 0x99, 0xf9, 0x66, 0x5c, 0x7b, 0x67, 0x5e, 0x3d, 0xf9, 0x65, 0x02, 0xcc, - 0x6d, 0xb7, 0x9e, 0x11, 0x2d, 0x22, 0x52, 0x63, 0xdc, 0xef, 0x83, 0x53, 0x9e, 0x4e, 0xed, 0x54, - 0x18, 0x6f, 0xf4, 0x03, 0xc9, 0x40, 0x78, 0x1f, 0x21, 0x0a, 0x3f, 0xa7, 0xc0, 0x78, 0x9b, 0xb4, - 0x9c, 0xfb, 0x92, 0x92, 0x97, 0x8e, 0xe4, 0x1e, 0x52, 0x40, 0xe1, 0x6d, 0xb4, 0x3c, 0x02, 0xda, - 0x71, 0x56, 0x24, 0xfa, 0x81, 0x42, 0x2f, 0x3f, 0x07, 0xda, 0x21, 0xd2, 0x83, 0xbf, 0x44, 0xcb, - 0x29, 0x03, 0xea, 0x54, 0x64, 0xad, 0xde, 0x2b, 0xd4, 0xca, 0x9d, 0xee, 0x6d, 0x51, 0xa3, 0x36, - 0x03, 0xda, 0x8a, 0xce, 0x63, 0xa3, 0x24, 0x2c, 0x44, 0x2a, 0xe0, 0x01, 0xda, 0x0c, 0xc2, 0x04, - 0x28, 0x8b, 0x23, 0xd1, 0x2a, 0xc2, 0xe3, 0xd8, 0x77, 0x52, 0x7d, 0x6b, 0x32, 0xae, 0x6d, 0xb6, - 0x66, 0x34, 0xc8, 0x2d, 0x55, 0xfc, 0x01, 0xb2, 0x59, 0x9c, 0xd2, 0x2e, 0xb4, 0x4e, 0x98, 0x83, - 0xb6, 0x4b, 0x0d, 0xbb, 0xb9, 0x26, 0x5e, 0xda, 0xa9, 0x36, 0x12, 0xe3, 0xc7, 0xe7, 0xc8, 0x8e, - 0x65, 0x5f, 0x11, 0x38, 0x77, 0x56, 0x65, 0x3c, 0x9f, 0xba, 0x8b, 0xae, 0x06, 0xd5, 0xa6, 0x04, - 0xce, 0x81, 0x42, 0xd4, 0x85, 0xec, 0x9e, 0xdc, 0x48, 0x8c, 0x34, 0x1e, 0xa0, 0x75, 0x0a, 0x2c, - 0x89, 0x23, 0x06, 0xa7, 0xdc, 0xe7, 0x29, 0x73, 0x1e, 0xc8, 0xcb, 0x76, 0x16, 0x6b, 0xbf, 0x8c, - 0xd3, 0xc4, 0x93, 0x71, 0x6d, 0x9d, 0x4c, 0xe9, 0x90, 0x19, 0x5d, 0xec, 0xa3, 0x35, 0xf5, 0x8a, - 0xb3, 0x40, 0x9c, 0x35, 0x79, 0x51, 0x63, 0xee, 0x45, 0x6a, 0x05, 0xb8, 0xed, 0xe8, 0x22, 0x8a, - 0x7f, 0x89, 0x9a, 0x0f, 0x27, 0xe3, 0xda, 0x1a, 0x29, 0x4a, 0x90, 0x69, 0x45, 0xdc, 0x33, 0xc9, - 0xa8, 0x3b, 0xd6, 0xef, 0x78, 0xc7, 0x54, 0x22, 0xea, 0x92, 0x19, 0xcd, 0xfa, 0x0b, 0x0b, 0xd9, - 0x72, 0x8d, 0x1c, 0x05, 0x8c, 0xe3, 0x1f, 0x6e, 0xad, 0x12, 0x77, 0xb1, 0xd2, 0x09, 0xb6, 0x5c, - 0x24, 0x9b, 0xaa, 0x2b, 0x2b, 0xda, 0x52, 0x58, 0x23, 0x67, 0xa8, 0x1c, 0x70, 0x08, 0x99, 0x73, - 0x6f, 0xbb, 0xd4, 0x58, 0xdd, 0xf7, 0x16, 0x6f, 0x01, 0x19, 0x61, 0x73, 0x4d, 0x0f, 0x67, 0x4b, - 0xa8, 0x90, 0x4c, 0xac, 0xfe, 0x87, 0x85, 0xd6, 0xbf, 0xa0, 0x71, 0x9a, 0x10, 0xc8, 0x3a, 0x8e, - 0xe1, 0x77, 0x51, 0xb9, 0x2f, 0x2c, 0x32, 0x07, 0xdb, 0xf0, 0x32, 0x58, 0xe6, 0x13, 0x1d, 0x4c, - 0x35, 0x43, 0x46, 0xa4, 0x3a, 0x38, 0x97, 0x21, 0xc6, 0x8f, 0x9f, 0x88, 0xf7, 0x9d, 0x1d, 0x8e, - 0xfd, 0x10, 0x98, 0x53, 0x92, 0x04, 0xf5, 0x16, 0x0b, 0x0e, 0x32, 0x8d, 0xab, 0xff, 0x56, 0x42, - 0x1b, 0x33, 0x0d, 0x8c, 0x77, 0x50, 0x45, 0x83, 0x54, 0x84, 0x79, 0xd5, 0xb4, 0x16, 0xc9, 0x11, - 0xd8, 0x43, 0x76, 0x24, 0xa4, 0x12, 0xbf, 0x0b, 0x6a, 0x01, 0xe7, 0x2b, 0xf2, 0x58, 0x3b, 0x88, - 0xc1, 0x88, 0x85, 0x23, 0x0e, 0x72, 0xf5, 0x16, 0x16, 0x8e, 0xc0, 0x12, 0xe9, 0xc1, 0x4d, 0x54, - 0x4a, 0x83, 0x9e, 0x5a, 0xa0, 0xbb, 0x0a, 0x50, 0x6a, 0x2f, 0xba, 0x3c, 0x05, 0x59, 0xac, 0x42, - 0x3f, 0x09, 0x9e, 0x03, 0x65, 0x41, 0x1c, 0xa9, 0xed, 0x99, 0xaf, 0xc2, 0xc3, 0x93, 0x96, 0xf2, - 0x90, 0x02, 0x0a, 0x1f, 0xa2, 0x0d, 0x9d, 0x96, 0x26, 0x66, 0x3b, 0xf4, 0xb1, 0x22, 0x6e, 0x90, - 0x69, 0x37, 0x99, 0xc5, 0xe3, 0x8f, 0xd1, 0x2a, 0x4b, 0x3b, 0x79, 0xf9, 0xb2, 0xa5, 0xfa, 0x48, - 0xd1, 0x57, 0x4f, 0x8d, 0x8b, 0x14, 0x71, 0xf5, 0xbf, 0x2d, 0x74, 0xff, 0x24, 0x1e, 0x06, 0xdd, - 0xcb, 0x37, 0xf0, 0xb9, 0xfc, 0x16, 0x95, 0x69, 0x3a, 0x04, 0xdd, 0xe7, 0x07, 0x8b, 0xf7, 0x79, - 0x16, 0x22, 0x49, 0x87, 0x60, 0x9a, 0x56, 0x9c, 0x18, 0xc9, 0x14, 0xeb, 0x7f, 0x5a, 0x08, 0x65, - 0xa0, 0x37, 0x30, 0xaf, 0xed, 0xe9, 0x79, 0xdd, 0xbd, 0x6b, 0x1e, 0x73, 0x06, 0xf6, 0x45, 0x49, - 0xe7, 0x20, 0x52, 0x33, 0x3f, 0x17, 0xd6, 0x22, 0x3f, 0x17, 0x35, 0x54, 0x16, 0x5f, 0x3a, 0x3d, - 0xb1, 0xb6, 0x40, 0x8a, 0x0f, 0x12, 0x23, 0x99, 0x1d, 0xbb, 0x08, 0x89, 0x07, 0x39, 0xea, 0x7a, - 0x4c, 0xd7, 0xc5, 0xab, 0x6a, 0xe7, 0x56, 0x52, 0x40, 0x08, 0x41, 0xf1, 0x11, 0x66, 0xce, 0xb2, - 0x11, 0x14, 0xdf, 0x66, 0x46, 0x32, 0x3b, 0x0e, 0x8a, 0x7b, 0xa2, 0x2c, 0x2b, 0xf1, 0x74, 0xf1, - 0x4a, 0x4c, 0x6f, 0x26, 0x33, 0xb9, 0xaf, 0xdd, 0x32, 0x2e, 0x42, 0xf9, 0x18, 0x33, 0xe7, 0xbe, - 0x89, 0x3d, 0x9f, 0x73, 0x46, 0x0a, 0x08, 0xfc, 0x19, 0xda, 0x88, 0xe2, 0x48, 0x4b, 0xb5, 0xc9, - 0x11, 0x73, 0x56, 0x24, 0xe9, 0x91, 0x98, 0xa5, 0xe3, 0x69, 0x17, 0x99, 0xc5, 0xe2, 0x27, 0x08, - 0xc5, 0x61, 0xc0, 0xe5, 0x1f, 0x0e, 0x73, 0x2a, 0x92, 0xf9, 0x58, 0x76, 0x75, 0x6e, 0x35, 0x7f, - 0x40, 0x05, 0x68, 0xd3, 0xbd, 0xba, 0xae, 0x2e, 0xbd, 0xbc, 0xae, 0x2e, 0xbd, 0xba, 0xae, 0x2e, - 0xfd, 0x3a, 0xa9, 0x5a, 0x57, 0x93, 0xaa, 0xf5, 0x72, 0x52, 0xb5, 0x5e, 0x4d, 0xaa, 0xd6, 0x3f, - 0x93, 0xaa, 0xf5, 0xfb, 0xbf, 0xd5, 0xa5, 0xef, 0x2a, 0xba, 0x08, 0xff, 0x05, 0x00, 0x00, 0xff, - 0xff, 0x92, 0xdb, 0xa5, 0x50, 0x5a, 0x0c, 0x00, 0x00, + // 1190 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xd6, 0x71, 0x62, 0x4f, 0x1a, 0x27, 0x9d, 0x22, 0xba, 0xca, 0xc1, 0x36, 0x46, 0x42, + 0x11, 0x84, 0xdd, 0xa4, 0x04, 0x5a, 0x0e, 0x1c, 0x62, 0x15, 0x81, 0xa5, 0x34, 0x84, 0x49, 0x5c, + 0x89, 0x3f, 0x07, 0xd6, 0xf6, 0x8b, 0xbd, 0xc4, 0xde, 0x5d, 0x66, 0x66, 0x8d, 0x72, 0xe3, 0xc0, + 0x15, 0x89, 0x3b, 0x1f, 0xa6, 0xe2, 0x80, 0x94, 0x63, 0x8f, 0x3d, 0x59, 0xc4, 0x7c, 0x8b, 0x1c, + 0x10, 0x9a, 0xd9, 0x99, 0x9d, 0x5d, 0xa7, 0x56, 0x1d, 0x0e, 0xbd, 0xed, 0xbc, 0xf7, 0x7b, 0xbf, + 0xf7, 0xe6, 0xed, 0xfb, 0x33, 0xe8, 0xeb, 0xf3, 0xc7, 0xcc, 0xf1, 0x43, 0xf7, 0x3c, 0xee, 0x00, + 0x0d, 0x80, 0x03, 0x73, 0xc7, 0x10, 0xf4, 0x42, 0xea, 0x2a, 0x85, 0x17, 0xf9, 0x0c, 0xe8, 0x18, + 0xa8, 0x1b, 0x9d, 0xf7, 0xe5, 0xc9, 0xf5, 0xe2, 0x9e, 0xcf, 0xdd, 0xf1, 0x9e, 0x37, 0x8c, 0x06, + 0xde, 0x9e, 0xdb, 0x87, 0x00, 0xa8, 0xc7, 0xa1, 0xe7, 0x44, 0x34, 0xe4, 0x21, 0xde, 0x4e, 0x2c, + 0x9d, 0xd4, 0xd2, 0x89, 0xce, 0xfb, 0xf2, 0xe4, 0x48, 0x4b, 0x47, 0x5b, 0x6e, 0x7d, 0xd8, 0xf7, + 0xf9, 0x20, 0xee, 0x38, 0xdd, 0x70, 0xe4, 0xf6, 0xc3, 0x7e, 0xe8, 0x4a, 0x82, 0x4e, 0x7c, 0x26, + 0x4f, 0xf2, 0x20, 0xbf, 0x12, 0xe2, 0xad, 0x1d, 0x13, 0x92, 0xeb, 0xc5, 0x7c, 0x00, 0x01, 0xf7, + 0xbb, 0x1e, 0xf7, 0xc3, 0xc0, 0x1d, 0xdf, 0x08, 0x63, 0x6b, 0xdf, 0xa0, 0x47, 0x5e, 0x77, 0xe0, + 0x07, 0x40, 0x2f, 0xcc, 0x1d, 0x46, 0xc0, 0xbd, 0x57, 0x59, 0xb9, 0xf3, 0xac, 0x68, 0x1c, 0x70, + 0x7f, 0x04, 0x37, 0x0c, 0x3e, 0x79, 0x9d, 0x01, 0xeb, 0x0e, 0x60, 0xe4, 0xdd, 0xb0, 0xfb, 0x68, + 0x9e, 0x5d, 0xcc, 0xfd, 0xa1, 0xeb, 0x07, 0x9c, 0x71, 0x3a, 0x6b, 0xd4, 0xf8, 0x0b, 0xa1, 0xe2, + 0xe7, 0x63, 0x08, 0x38, 0xfe, 0x01, 0x95, 0xc4, 0x15, 0x7a, 0x1e, 0xf7, 0x6c, 0xab, 0x6e, 0x6d, + 0xaf, 0x3d, 0xdc, 0x75, 0x4c, 0xde, 0x53, 0x46, 0x93, 0x7a, 0x81, 0x76, 0xc6, 0x7b, 0xce, 0x57, + 0x9d, 0x1f, 0xa1, 0xcb, 0x9f, 0x02, 0xf7, 0x9a, 0xf8, 0x72, 0x52, 0x5b, 0x9a, 0x4e, 0x6a, 0xc8, + 0xc8, 0x48, 0xca, 0x8a, 0x77, 0x50, 0x71, 0x08, 0x63, 0x18, 0xda, 0x77, 0xea, 0xd6, 0x76, 0xb9, + 0xf9, 0xb6, 0x02, 0x17, 0x0f, 0x85, 0xf0, 0x5a, 0x7f, 0x90, 0x04, 0x84, 0xbf, 0x43, 0x65, 0x71, + 0x5b, 0xc6, 0xbd, 0x51, 0x64, 0x17, 0x64, 0x40, 0xef, 0x2f, 0x16, 0xd0, 0xa9, 0x3f, 0x82, 0xe6, + 0x3d, 0xc5, 0x5e, 0x3e, 0xd5, 0x24, 0xc4, 0xf0, 0xe1, 0x23, 0xb4, 0x2a, 0x2b, 0xa7, 0xf5, 0xc4, + 0x5e, 0x96, 0xc1, 0xec, 0x2b, 0xf8, 0xea, 0x41, 0x22, 0xbe, 0x9e, 0xd4, 0xde, 0x99, 0x97, 0x4f, + 0x7e, 0x11, 0x01, 0x73, 0xda, 0xad, 0x27, 0x44, 0x93, 0x88, 0xab, 0x31, 0xee, 0xf5, 0xc1, 0x2e, + 0xe6, 0xaf, 0x76, 0x22, 0x84, 0xd7, 0xfa, 0x83, 0x24, 0x20, 0xfc, 0x10, 0x21, 0x0a, 0x3f, 0xc5, + 0xc0, 0x78, 0x9b, 0xb4, 0xec, 0x15, 0x69, 0x92, 0xa6, 0x8e, 0xa4, 0x1a, 0x92, 0x41, 0xe1, 0x3a, + 0x5a, 0x1e, 0x03, 0xed, 0xd8, 0xab, 0x12, 0x7d, 0x57, 0xa1, 0x97, 0x9f, 0x01, 0xed, 0x10, 0xa9, + 0xc1, 0x5f, 0xa2, 0xe5, 0x98, 0x01, 0xb5, 0x4b, 0x32, 0x57, 0xef, 0x65, 0x72, 0xe5, 0xe4, 0x6b, + 0x5b, 0xe4, 0xa8, 0xcd, 0x80, 0xb6, 0x82, 0xb3, 0xd0, 0x30, 0x09, 0x09, 0x91, 0x0c, 0x78, 0x80, + 0x36, 0xfd, 0x51, 0x04, 0x94, 0x85, 0x81, 0x28, 0x15, 0xa1, 0xb1, 0xcb, 0xb7, 0x62, 0x7d, 0x6b, + 0x3a, 0xa9, 0x6d, 0xb6, 0x66, 0x38, 0xc8, 0x0d, 0x56, 0xfc, 0x01, 0x2a, 0xb3, 0x30, 0xa6, 0x5d, + 0x68, 0x1d, 0x33, 0x1b, 0xd5, 0x0b, 0xdb, 0xe5, 0xe6, 0xba, 0xf8, 0x69, 0x27, 0x5a, 0x48, 0x8c, + 0x1e, 0x9f, 0xa1, 0x72, 0x28, 0xeb, 0x8a, 0xc0, 0x99, 0xbd, 0x26, 0xe3, 0xf9, 0xd4, 0x59, 0x74, + 0x34, 0xa8, 0x32, 0x25, 0x70, 0x06, 0x14, 0x82, 0x2e, 0x24, 0x7e, 0x52, 0x21, 0x31, 0xd4, 0x78, + 0x80, 0x2a, 0x14, 0x58, 0x14, 0x06, 0x0c, 0x4e, 0xb8, 0xc7, 0x63, 0x66, 0xdf, 0x95, 0xce, 0x76, + 0x16, 0x2b, 0xbf, 0xc4, 0xa6, 0x89, 0xa7, 0x93, 0x5a, 0x85, 0xe4, 0x78, 0xc8, 0x0c, 0x2f, 0xf6, + 0xd0, 0xba, 0xfa, 0xc5, 0x49, 0x20, 0xf6, 0xba, 0x74, 0xb4, 0x3d, 0xd7, 0x91, 0x1a, 0x01, 0x4e, + 0x3b, 0x38, 0x0f, 0xc2, 0x9f, 0x83, 0xe6, 0xbd, 0xe9, 0xa4, 0xb6, 0x4e, 0xb2, 0x14, 0x24, 0xcf, + 0x88, 0x7b, 0xe6, 0x32, 0xca, 0x47, 0xe5, 0x96, 0x3e, 0x72, 0x17, 0x51, 0x4e, 0x66, 0x38, 0xf1, + 0x6f, 0x16, 0xb2, 0x95, 0x5f, 0x02, 0x5d, 0xf0, 0xc7, 0xd0, 0x4b, 0xfb, 0xce, 0xde, 0x90, 0x0e, + 0xdd, 0xc5, 0xb2, 0xf7, 0xd4, 0xef, 0xd2, 0x50, 0x76, 0x70, 0x5d, 0x55, 0xa6, 0x4d, 0xe6, 0x10, + 0x93, 0xb9, 0x2e, 0x71, 0x88, 0x2a, 0xb2, 0xd5, 0x4c, 0x10, 0x9b, 0xff, 0x2f, 0x08, 0xdd, 0xc9, + 0x95, 0x93, 0x1c, 0x1d, 0x99, 0xa1, 0x6f, 0x3c, 0xb7, 0x50, 0x59, 0xce, 0xd1, 0x43, 0x9f, 0x71, + 0xfc, 0xfd, 0x8d, 0x59, 0xea, 0x2c, 0xe6, 0x58, 0x58, 0xcb, 0x49, 0xba, 0xa9, 0xfc, 0x96, 0xb4, + 0x24, 0x33, 0x47, 0x4f, 0x51, 0xd1, 0xe7, 0x30, 0x62, 0xf6, 0x9d, 0x7a, 0x61, 0xe6, 0x4e, 0xaf, + 0xe9, 0x01, 0x19, 0x61, 0x73, 0x5d, 0x4f, 0xa7, 0x96, 0x60, 0x21, 0x09, 0x59, 0xe3, 0x0f, 0x0b, + 0x55, 0xbe, 0xa0, 0x61, 0x1c, 0x11, 0x48, 0x5a, 0x8e, 0xe1, 0x77, 0x51, 0xb1, 0x2f, 0x24, 0xf2, + 0x0e, 0x65, 0x63, 0x97, 0xc0, 0x12, 0x9d, 0x68, 0x61, 0xaa, 0x2d, 0x64, 0x44, 0xaa, 0x85, 0x53, + 0x1a, 0x62, 0xf4, 0xf8, 0x91, 0x28, 0xf8, 0xe4, 0x70, 0xe4, 0x8d, 0x80, 0xd9, 0x05, 0x69, 0xa0, + 0xca, 0x38, 0xa3, 0x20, 0x79, 0x5c, 0xe3, 0xd7, 0x02, 0xda, 0x98, 0xe9, 0x60, 0xbc, 0x83, 0x4a, + 0x1a, 0xa4, 0x22, 0x4c, 0xb3, 0xa6, 0xb9, 0x48, 0x8a, 0xc0, 0x2e, 0x2a, 0x07, 0x82, 0x2a, 0xf2, + 0xba, 0xa0, 0x36, 0x50, 0xba, 0x23, 0x8e, 0xb4, 0x82, 0x18, 0x8c, 0x98, 0xb8, 0xe2, 0x20, 0x77, + 0x4f, 0x66, 0xe2, 0x0a, 0x2c, 0x91, 0x1a, 0xdc, 0x44, 0x85, 0xd8, 0xef, 0xa9, 0x0d, 0xb2, 0xab, + 0x00, 0x85, 0xf6, 0xa2, 0xdb, 0x43, 0x18, 0x8b, 0x5d, 0xe0, 0x45, 0xfe, 0x33, 0xa0, 0xcc, 0x0f, + 0x03, 0xb5, 0x3e, 0xd2, 0x5d, 0x70, 0x70, 0xdc, 0x52, 0x1a, 0x92, 0x41, 0xe1, 0x03, 0xb4, 0xa1, + 0xaf, 0xa5, 0x0d, 0x93, 0x25, 0xf2, 0x40, 0x19, 0x6e, 0x90, 0xbc, 0x9a, 0xcc, 0xe2, 0xf1, 0xc7, + 0x68, 0x8d, 0xc5, 0x9d, 0x34, 0x7d, 0xc9, 0x56, 0xb9, 0xaf, 0xcc, 0xd7, 0x4e, 0x8c, 0x8a, 0x64, + 0x71, 0x8d, 0x7f, 0x2d, 0xb4, 0x72, 0x1c, 0x0e, 0xfd, 0xee, 0xc5, 0x1b, 0x78, 0x2f, 0x7c, 0x83, + 0x8a, 0x34, 0x1e, 0x82, 0xae, 0xf3, 0xfd, 0xc5, 0xeb, 0x3c, 0x09, 0x91, 0xc4, 0x43, 0x30, 0x45, + 0x2b, 0x4e, 0x8c, 0x24, 0x8c, 0xf8, 0x11, 0x42, 0xe1, 0xc8, 0xe7, 0xb2, 0xa9, 0x75, 0x11, 0x3e, + 0x90, 0x81, 0xa4, 0x52, 0xb3, 0xb5, 0x33, 0xd0, 0xc6, 0x9f, 0x16, 0x42, 0x09, 0xfb, 0x1b, 0x68, + 0xf4, 0x76, 0xbe, 0xd1, 0x77, 0x6f, 0x9b, 0x80, 0x39, 0x9d, 0xfe, 0xbc, 0xa0, 0xef, 0x20, 0x72, + 0x62, 0x9e, 0x65, 0xd6, 0x22, 0xcf, 0xb2, 0x1a, 0x2a, 0x8a, 0x37, 0x82, 0x6e, 0xf5, 0xb2, 0x40, + 0x8a, 0x55, 0xce, 0x48, 0x22, 0xc7, 0x0e, 0x42, 0xe2, 0x43, 0xce, 0x08, 0x9d, 0xda, 0x8a, 0x48, + 0x6d, 0x3b, 0x95, 0x92, 0x0c, 0x42, 0x10, 0x8a, 0xe7, 0x0b, 0xb3, 0x97, 0x0d, 0xa1, 0x78, 0xd5, + 0x30, 0x92, 0xc8, 0xb1, 0x9f, 0x1d, 0x30, 0x45, 0x99, 0x89, 0xc7, 0x8b, 0x67, 0x22, 0x3f, 0xd2, + 0x4c, 0xcb, 0xbf, 0x72, 0x3c, 0x39, 0x08, 0xa5, 0xfd, 0xcf, 0xec, 0x15, 0x13, 0x7b, 0x3a, 0x20, + 0x18, 0xc9, 0x20, 0xf0, 0x67, 0x68, 0x23, 0x08, 0x03, 0x4d, 0xd5, 0x26, 0x87, 0xcc, 0x5e, 0x95, + 0x46, 0xf7, 0x45, 0x13, 0x1e, 0xe5, 0x55, 0x64, 0x16, 0x3b, 0x53, 0x85, 0xa5, 0x85, 0xab, 0xb0, + 0xe9, 0x5c, 0x5e, 0x55, 0x97, 0x5e, 0x5c, 0x55, 0x97, 0x5e, 0x5e, 0x55, 0x97, 0x7e, 0x99, 0x56, + 0xad, 0xcb, 0x69, 0xd5, 0x7a, 0x31, 0xad, 0x5a, 0x2f, 0xa7, 0x55, 0xeb, 0xef, 0x69, 0xd5, 0xfa, + 0xfd, 0x9f, 0xea, 0xd2, 0xb7, 0x25, 0x9d, 0x84, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x55, 0x14, + 0x18, 0x3e, 0x94, 0x0d, 0x00, 0x00, } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto index 3ab6575311ec..32a5c73640f6 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.proto @@ -89,6 +89,14 @@ message Event { // at Response Level. // +optional optional k8s.io.apimachinery.pkg.runtime.Unknown responseObject = 14; + + // Time the request reached the apiserver. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime requestReceivedTimestamp = 15; + + // Time the request reached current audit stage. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime stageTimestamp = 16; } // EventList is a list of audit Events. @@ -156,6 +164,11 @@ message Policy { // The default audit level is None, but can be overridden by a catch-all rule at the end of the list. // PolicyRules are strictly ordered. repeated PolicyRule rules = 2; + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified per rule in which case the union of both are omitted. + // +optional + repeated string omitStages = 3; } // PolicyList is a list of audit Policies. @@ -206,8 +219,10 @@ message PolicyRule { // +optional repeated string nonResourceURLs = 7; - // OmitStages specify events generated in which stages will not be emitted to backend. + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified policy wide in which case the union of both are omitted. // An empty list means no restrictions will apply. + // +optional repeated string omitStages = 8; } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/types.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/types.go index 768a515b0cd8..21f10c78d48b 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/types.go @@ -69,7 +69,7 @@ const ( // The stage for events generated once the response body has been completed, and no more bytes // will be sent. StageResponseComplete = "ResponseComplete" - // The stage for events generated when a panic occured. + // The stage for events generated when a panic occurred. StagePanic = "Panic" ) @@ -126,6 +126,12 @@ type Event struct { // at Response Level. // +optional ResponseObject *runtime.Unknown `json:"responseObject,omitempty" protobuf:"bytes,14,opt,name=responseObject"` + // Time the request reached the apiserver. + // +optional + RequestReceivedTimestamp metav1.MicroTime `json:"requestReceivedTimestamp" protobuf:"bytes,15,opt,name=requestReceivedTimestamp"` + // Time the request reached current audit stage. + // +optional + StageTimestamp metav1.MicroTime `json:"stageTimestamp" protobuf:"bytes,16,opt,name=stageTimestamp"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -154,6 +160,11 @@ type Policy struct { // The default audit level is None, but can be overridden by a catch-all rule at the end of the list. // PolicyRules are strictly ordered. Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified per rule in which case the union of both are omitted. + // +optional + OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,3,rep,name=omitStages"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -209,8 +220,10 @@ type PolicyRule struct { // +optional NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,7,rep,name=nonResourceURLs"` - // OmitStages specify events generated in which stages will not be emitted to backend. + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified policy wide in which case the union of both are omitted. // An empty list means no restrictions will apply. + // +optional OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,8,rep,name=omitStages"` } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go index 107268f4a542..166c17bd9409 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go @@ -56,9 +56,9 @@ func RegisterConversions(scheme *runtime.Scheme) error { } func autoConvert_v1alpha1_Event_To_audit_Event(in *Event, out *audit.Event, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta + // WARNING: in.ObjectMeta requires manual conversion: does not exist in peer-type out.Level = audit.Level(in.Level) - out.Timestamp = in.Timestamp + // WARNING: in.Timestamp requires manual conversion: does not exist in peer-type out.AuditID = types.UID(in.AuditID) out.Stage = audit.Stage(in.Stage) out.RequestURI = in.RequestURI @@ -81,18 +81,13 @@ func autoConvert_v1alpha1_Event_To_audit_Event(in *Event, out *audit.Event, s co out.ResponseStatus = (*v1.Status)(unsafe.Pointer(in.ResponseStatus)) out.RequestObject = (*runtime.Unknown)(unsafe.Pointer(in.RequestObject)) out.ResponseObject = (*runtime.Unknown)(unsafe.Pointer(in.ResponseObject)) + out.RequestReceivedTimestamp = in.RequestReceivedTimestamp + out.StageTimestamp = in.StageTimestamp return nil } -// Convert_v1alpha1_Event_To_audit_Event is an autogenerated conversion function. -func Convert_v1alpha1_Event_To_audit_Event(in *Event, out *audit.Event, s conversion.Scope) error { - return autoConvert_v1alpha1_Event_To_audit_Event(in, out, s) -} - func autoConvert_audit_Event_To_v1alpha1_Event(in *audit.Event, out *Event, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta out.Level = Level(in.Level) - out.Timestamp = in.Timestamp out.AuditID = types.UID(in.AuditID) out.Stage = Stage(in.Stage) out.RequestURI = in.RequestURI @@ -115,14 +110,11 @@ func autoConvert_audit_Event_To_v1alpha1_Event(in *audit.Event, out *Event, s co out.ResponseStatus = (*v1.Status)(unsafe.Pointer(in.ResponseStatus)) out.RequestObject = (*runtime.Unknown)(unsafe.Pointer(in.RequestObject)) out.ResponseObject = (*runtime.Unknown)(unsafe.Pointer(in.ResponseObject)) + out.RequestReceivedTimestamp = in.RequestReceivedTimestamp + out.StageTimestamp = in.StageTimestamp return nil } -// Convert_audit_Event_To_v1alpha1_Event is an autogenerated conversion function. -func Convert_audit_Event_To_v1alpha1_Event(in *audit.Event, out *Event, s conversion.Scope) error { - return autoConvert_audit_Event_To_v1alpha1_Event(in, out, s) -} - func autoConvert_v1alpha1_EventList_To_audit_EventList(in *EventList, out *audit.EventList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { @@ -215,6 +207,7 @@ func autoConvert_audit_ObjectReference_To_v1alpha1_ObjectReference(in *audit.Obj func autoConvert_v1alpha1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Rules = *(*[]audit.PolicyRule)(unsafe.Pointer(&in.Rules)) + out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages)) return nil } @@ -226,6 +219,7 @@ func Convert_v1alpha1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s co func autoConvert_audit_Policy_To_v1alpha1_Policy(in *audit.Policy, out *Policy, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) + out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages)) return nil } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.deepcopy.go index 7b0029e933a1..e914b98ec35b 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.deepcopy.go @@ -23,52 +23,9 @@ package v1alpha1 import ( v1 "k8s.io/api/authentication/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Event).DeepCopyInto(out.(*Event)) - return nil - }, InType: reflect.TypeOf(&Event{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EventList).DeepCopyInto(out.(*EventList)) - return nil - }, InType: reflect.TypeOf(&EventList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupResources).DeepCopyInto(out.(*GroupResources)) - return nil - }, InType: reflect.TypeOf(&GroupResources{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectReference).DeepCopyInto(out.(*ObjectReference)) - return nil - }, InType: reflect.TypeOf(&ObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Policy).DeepCopyInto(out.(*Policy)) - return nil - }, InType: reflect.TypeOf(&Policy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PolicyList).DeepCopyInto(out.(*PolicyList)) - return nil - }, InType: reflect.TypeOf(&PolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PolicyRule).DeepCopyInto(out.(*PolicyRule)) - return nil - }, InType: reflect.TypeOf(&PolicyRule{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Event) DeepCopyInto(out *Event) { *out = *in @@ -126,6 +83,8 @@ func (in *Event) DeepCopyInto(out *Event) { (*in).DeepCopyInto(*out) } } + in.RequestReceivedTimestamp.DeepCopyInto(&out.RequestReceivedTimestamp) + in.StageTimestamp.DeepCopyInto(&out.StageTimestamp) return } @@ -236,6 +195,11 @@ func (in *Policy) DeepCopyInto(out *Policy) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.OmitStages != nil { + in, out := &in.OmitStages, &out.OmitStages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/BUILD b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/BUILD index a08dd171a9a3..07e14e74c034 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/BUILD +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/BUILD @@ -1,15 +1,15 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( name = "go_default_library", srcs = [ + "conversion.go", "doc.go", "generated.pb.go", "register.go", @@ -18,7 +18,7 @@ go_library( "zz_generated.deepcopy.go", "zz_generated.defaults.go", ], - tags = ["automanaged"], + importpath = "k8s.io/apiserver/pkg/apis/audit/v1beta1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/api/authentication/v1:go_default_library", @@ -50,3 +50,16 @@ filegroup( srcs = ["generated.proto"], visibility = ["//visibility:public"], ) + +go_test( + name = "go_default_test", + srcs = ["conversion_test.go"], + importpath = "k8s.io/apiserver/pkg/apis/audit/v1beta1", + library = ":go_default_library", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", + ], +) diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/conversion.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/conversion.go new file mode 100644 index 000000000000..bccdcb72437b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/conversion.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apiserver/pkg/apis/audit" +) + +func Convert_v1beta1_Event_To_audit_Event(in *Event, out *audit.Event, s conversion.Scope) error { + if err := autoConvert_v1beta1_Event_To_audit_Event(in, out, s); err != nil { + return err + } + if out.StageTimestamp.IsZero() { + out.StageTimestamp = metav1.NewMicroTime(in.CreationTimestamp.Time) + } + if out.RequestReceivedTimestamp.IsZero() { + out.RequestReceivedTimestamp = metav1.NewMicroTime(in.Timestamp.Time) + } + return nil +} + +func Convert_audit_Event_To_v1beta1_Event(in *audit.Event, out *Event, s conversion.Scope) error { + if err := autoConvert_audit_Event_To_v1beta1_Event(in, out, s); err != nil { + return err + } + out.CreationTimestamp = metav1.NewTime(in.StageTimestamp.Time) + out.Timestamp = metav1.NewTime(in.RequestReceivedTimestamp.Time) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go index dfc148edfd68..f2e3c67d7ad0 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/audit // +k8s:openapi-gen=true // +k8s:defaulter-gen=TypeMeta diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go index eaf98883bb58..1a6639416916 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go @@ -222,6 +222,24 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { } i += n8 } + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RequestReceivedTimestamp.Size())) + n9, err := m.RequestReceivedTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StageTimestamp.Size())) + n10, err := m.StageTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 return i, nil } @@ -243,11 +261,11 @@ func (m *EventList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n11 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -383,11 +401,11 @@ func (m *Policy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n10, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n12 if len(m.Rules) > 0 { for _, msg := range m.Rules { dAtA[i] = 0x12 @@ -400,6 +418,21 @@ func (m *Policy) MarshalTo(dAtA []byte) (int, error) { i += n } } + if len(m.OmitStages) > 0 { + for _, s := range m.OmitStages { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } return i, nil } @@ -421,11 +454,11 @@ func (m *PolicyList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) + n13, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n13 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -637,6 +670,10 @@ func (m *Event) Size() (n int) { l = m.ResponseObject.Size() n += 1 + l + sovGenerated(uint64(l)) } + l = m.RequestReceivedTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.StageTimestamp.Size() + n += 2 + l + sovGenerated(uint64(l)) return n } @@ -707,6 +744,12 @@ func (m *Policy) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.OmitStages) > 0 { + for _, s := range m.OmitStages { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -806,6 +849,8 @@ func (this *Event) String() string { `ResponseStatus:` + strings.Replace(fmt.Sprintf("%v", this.ResponseStatus), "Status", "k8s_io_apimachinery_pkg_apis_meta_v1.Status", 1) + `,`, `RequestObject:` + strings.Replace(fmt.Sprintf("%v", this.RequestObject), "Unknown", "k8s_io_apimachinery_pkg_runtime.Unknown", 1) + `,`, `ResponseObject:` + strings.Replace(fmt.Sprintf("%v", this.ResponseObject), "Unknown", "k8s_io_apimachinery_pkg_runtime.Unknown", 1) + `,`, + `RequestReceivedTimestamp:` + strings.Replace(strings.Replace(this.RequestReceivedTimestamp.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `StageTimestamp:` + strings.Replace(strings.Replace(this.StageTimestamp.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -857,6 +902,7 @@ func (this *Policy) String() string { s := strings.Join([]string{`&Policy{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`, `}`, }, "") return s @@ -1355,6 +1401,66 @@ func (m *Event) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestReceivedTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RequestReceivedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StageTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StageTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1996,6 +2102,35 @@ func (m *Policy) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OmitStages", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2522,78 +2657,81 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1153 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xd6, 0x71, 0xe2, 0x9d, 0x34, 0x7f, 0x3a, 0x45, 0x74, 0x95, 0x83, 0x1d, 0x8c, 0x04, - 0x11, 0xa4, 0xbb, 0x4d, 0x5b, 0x48, 0x2e, 0x1c, 0x62, 0x15, 0x81, 0xa5, 0x10, 0xa2, 0x49, 0x5c, - 0x21, 0xe0, 0xc0, 0xd8, 0x7e, 0xb1, 0x97, 0x78, 0xff, 0x30, 0x33, 0x6b, 0x94, 0x1b, 0x1f, 0x81, - 0x3b, 0xdf, 0x82, 0x0f, 0x50, 0x21, 0xc1, 0x21, 0xc7, 0x1e, 0x7b, 0xb2, 0x88, 0xf9, 0x16, 0x39, - 0xa1, 0x99, 0x9d, 0xdd, 0x59, 0xdb, 0x4d, 0xeb, 0x5c, 0x7a, 0xdb, 0x7d, 0xef, 0xf7, 0xfb, 0xcd, - 0x7b, 0x6f, 0xde, 0x7b, 0xbb, 0xe8, 0xf8, 0x7c, 0x9f, 0xbb, 0x7e, 0xe4, 0x9d, 0x27, 0x6d, 0x60, - 0x21, 0x08, 0xe0, 0xde, 0x10, 0xc2, 0x6e, 0xc4, 0x3c, 0xed, 0xa0, 0xb1, 0xcf, 0x81, 0x0d, 0x81, - 0x79, 0xf1, 0x79, 0x4f, 0xbd, 0x79, 0x34, 0xe9, 0xfa, 0xc2, 0x1b, 0xee, 0xb6, 0x41, 0xd0, 0x5d, - 0xaf, 0x07, 0x21, 0x30, 0x2a, 0xa0, 0xeb, 0xc6, 0x2c, 0x12, 0x11, 0xfe, 0x38, 0x25, 0xba, 0x39, - 0xd1, 0x8d, 0xcf, 0x7b, 0xea, 0xcd, 0x55, 0x44, 0x57, 0x13, 0x37, 0x1f, 0xf6, 0x7c, 0xd1, 0x4f, - 0xda, 0x6e, 0x27, 0x0a, 0xbc, 0x5e, 0xd4, 0x8b, 0x3c, 0xc5, 0x6f, 0x27, 0x67, 0xea, 0x4d, 0xbd, - 0xa8, 0xa7, 0x54, 0x77, 0x73, 0xc7, 0x04, 0xe4, 0xd1, 0x44, 0xf4, 0x21, 0x14, 0x7e, 0x87, 0x0a, - 0x3f, 0x0a, 0xbd, 0xe1, 0x4c, 0x14, 0x9b, 0x4f, 0x0d, 0x3a, 0xa0, 0x9d, 0xbe, 0x1f, 0x02, 0xbb, - 0x30, 0x19, 0x04, 0x20, 0xe8, 0xeb, 0x58, 0xde, 0x4d, 0x2c, 0x96, 0x84, 0xc2, 0x0f, 0x60, 0x86, - 0xf0, 0xf9, 0xdb, 0x08, 0xbc, 0xd3, 0x87, 0x80, 0xce, 0xf0, 0x9e, 0xdc, 0xc4, 0x4b, 0x84, 0x3f, - 0xf0, 0xfc, 0x50, 0x70, 0xc1, 0x66, 0x48, 0xfb, 0x6f, 0xbf, 0x12, 0x3a, 0x88, 0xfb, 0xb3, 0x77, - 0x52, 0xff, 0xbb, 0x82, 0xca, 0x5f, 0x0e, 0x21, 0x14, 0xf8, 0x27, 0x54, 0x91, 0xc9, 0x77, 0xa9, - 0xa0, 0x8e, 0xb5, 0x65, 0x6d, 0xaf, 0x3c, 0x7e, 0xe4, 0x9a, 0x0b, 0xcb, 0x63, 0x31, 0x77, 0x26, - 0xd1, 0xee, 0x70, 0xd7, 0xfd, 0xb6, 0xfd, 0x33, 0x74, 0xc4, 0x37, 0x20, 0x68, 0x03, 0x5f, 0x8e, - 0x6a, 0x0b, 0xe3, 0x51, 0x0d, 0x19, 0x1b, 0xc9, 0x55, 0xf1, 0x0e, 0x2a, 0x0f, 0x60, 0x08, 0x03, - 0xe7, 0xce, 0x96, 0xb5, 0x6d, 0x37, 0xde, 0xd7, 0xe0, 0xf2, 0xa1, 0x34, 0x5e, 0x67, 0x0f, 0x24, - 0x05, 0xe1, 0x1f, 0x90, 0x2d, 0xeb, 0xc4, 0x05, 0x0d, 0x62, 0xa7, 0xa4, 0x02, 0xfa, 0x64, 0xbe, - 0x80, 0x4e, 0xfd, 0x00, 0x1a, 0xf7, 0xb4, 0xba, 0x7d, 0x9a, 0x89, 0x10, 0xa3, 0x87, 0x8f, 0xd0, - 0xb2, 0x2a, 0x4c, 0xf3, 0x99, 0xb3, 0xa8, 0x82, 0x79, 0xaa, 0xe1, 0xcb, 0x07, 0xa9, 0xf9, 0x7a, - 0x54, 0xfb, 0xe0, 0xa6, 0x9b, 0x10, 0x17, 0x31, 0x70, 0xb7, 0xd5, 0x7c, 0x46, 0x32, 0x11, 0x99, - 0x1a, 0x17, 0xb4, 0x07, 0x4e, 0x79, 0x32, 0xb5, 0x13, 0x69, 0xbc, 0xce, 0x1e, 0x48, 0x0a, 0xc2, - 0x8f, 0x11, 0x62, 0xf0, 0x4b, 0x02, 0x5c, 0xb4, 0x48, 0xd3, 0x59, 0x52, 0x94, 0xbc, 0x74, 0x24, - 0xf7, 0x90, 0x02, 0x0a, 0x6f, 0xa1, 0xc5, 0x21, 0xb0, 0xb6, 0xb3, 0xac, 0xd0, 0x77, 0x35, 0x7a, - 0xf1, 0x39, 0xb0, 0x36, 0x51, 0x1e, 0xfc, 0x35, 0x5a, 0x4c, 0x38, 0x30, 0xa7, 0xa2, 0x6a, 0xf5, - 0x51, 0xa1, 0x56, 0xee, 0xe4, 0x54, 0xc8, 0x1a, 0xb5, 0x38, 0xb0, 0x66, 0x78, 0x16, 0x19, 0x25, - 0x69, 0x21, 0x4a, 0x01, 0xf7, 0xd1, 0x86, 0x1f, 0xc4, 0xc0, 0x78, 0x14, 0xca, 0x56, 0x91, 0x1e, - 0xc7, 0xbe, 0x95, 0xea, 0x7b, 0xe3, 0x51, 0x6d, 0xa3, 0x39, 0xa5, 0x41, 0x66, 0x54, 0xf1, 0xa7, - 0xc8, 0xe6, 0x51, 0xc2, 0x3a, 0xd0, 0x3c, 0xe6, 0x0e, 0xda, 0x2a, 0x6d, 0xdb, 0x8d, 0x55, 0x79, - 0x69, 0x27, 0x99, 0x91, 0x18, 0x3f, 0x06, 0x64, 0x47, 0xaa, 0xaf, 0x08, 0x9c, 0x39, 0x2b, 0x2a, - 0x9e, 0x7d, 0x77, 0xce, 0x9d, 0xa2, 0xbb, 0x94, 0xc0, 0x19, 0x30, 0x08, 0x3b, 0x90, 0x1e, 0x93, - 0x1b, 0x89, 0x51, 0xc6, 0x7d, 0xb4, 0xc6, 0x80, 0xc7, 0x51, 0xc8, 0xe1, 0x44, 0x50, 0x91, 0x70, - 0xe7, 0xae, 0x3a, 0x6b, 0x67, 0xbe, 0xee, 0x4b, 0x39, 0x0d, 0x3c, 0x1e, 0xd5, 0xd6, 0xc8, 0x84, - 0x0e, 0x99, 0xd2, 0xc5, 0x14, 0xad, 0xea, 0x1b, 0x4e, 0x03, 0x71, 0x56, 0xd5, 0x41, 0xdb, 0x37, - 0x1e, 0xa4, 0x77, 0x87, 0xdb, 0x0a, 0xcf, 0xc3, 0xe8, 0xd7, 0xb0, 0x71, 0x6f, 0x3c, 0xaa, 0xad, - 0x92, 0xa2, 0x04, 0x99, 0x54, 0xc4, 0x5d, 0x93, 0x8c, 0x3e, 0x63, 0xed, 0x96, 0x67, 0x4c, 0x24, - 0xa2, 0x0f, 0x99, 0xd2, 0xac, 0xbf, 0xb0, 0x90, 0xad, 0xb6, 0xc8, 0xa1, 0xcf, 0x05, 0xfe, 0x71, - 0x66, 0x93, 0xb8, 0xf3, 0x95, 0x4e, 0xb2, 0xd5, 0x1e, 0xd9, 0xd0, 0x4d, 0x59, 0xc9, 0x2c, 0x85, - 0x2d, 0x72, 0x82, 0xca, 0xbe, 0x80, 0x80, 0x3b, 0x77, 0xb6, 0x4a, 0x53, 0xd2, 0x6f, 0xee, 0x00, - 0x15, 0x60, 0x63, 0x35, 0x1b, 0xcd, 0xa6, 0x14, 0x21, 0xa9, 0x56, 0xfd, 0x0f, 0x0b, 0xad, 0x7d, - 0xc5, 0xa2, 0x24, 0x26, 0x90, 0xf6, 0x1b, 0xc7, 0x1f, 0xa2, 0x72, 0x4f, 0x5a, 0x54, 0x0a, 0xb6, - 0xe1, 0xa5, 0xb0, 0xd4, 0x27, 0xfb, 0x97, 0x65, 0x0c, 0x15, 0x90, 0xee, 0xdf, 0x5c, 0x86, 0x18, - 0x3f, 0xde, 0x93, 0xd7, 0x9d, 0xbe, 0x1c, 0xd1, 0x00, 0xb8, 0x53, 0x52, 0x04, 0x7d, 0x89, 0x05, - 0x07, 0x99, 0xc4, 0xd5, 0xff, 0x2c, 0xa1, 0xf5, 0xa9, 0xfe, 0xc5, 0x3b, 0xa8, 0x92, 0x81, 0x74, - 0x84, 0x79, 0xd1, 0x32, 0x2d, 0x92, 0x23, 0xb0, 0x87, 0xec, 0x50, 0x4a, 0xc5, 0xb4, 0x03, 0x7a, - 0xfd, 0xe6, 0x0b, 0xf2, 0x28, 0x73, 0x10, 0x83, 0x91, 0xeb, 0x46, 0xbe, 0xa8, 0xc5, 0x5b, 0x58, - 0x37, 0x12, 0x4b, 0x94, 0x07, 0x37, 0x50, 0x29, 0xf1, 0xbb, 0x7a, 0x7d, 0x3e, 0xd2, 0x80, 0x52, - 0x6b, 0xde, 0xd5, 0x29, 0xc9, 0x32, 0x09, 0x1a, 0xfb, 0xaa, 0xa2, 0x7a, 0x73, 0xe6, 0x49, 0x1c, - 0x1c, 0x37, 0xd3, 0x4a, 0xe7, 0x08, 0xb9, 0x36, 0x69, 0xec, 0x3f, 0x07, 0xc6, 0xfd, 0x28, 0x9c, - 0x5e, 0x9b, 0x07, 0xc7, 0x4d, 0xed, 0x21, 0x05, 0x14, 0x3e, 0x40, 0xeb, 0x59, 0x11, 0x32, 0x62, - 0xba, 0x41, 0x1f, 0x68, 0xe2, 0x3a, 0x99, 0x74, 0x93, 0x69, 0x3c, 0xfe, 0x0c, 0xad, 0xf0, 0xa4, - 0x9d, 0x17, 0xbb, 0xa2, 0xe8, 0xf7, 0x35, 0x7d, 0xe5, 0xc4, 0xb8, 0x48, 0x11, 0x57, 0xff, 0xc7, - 0x42, 0x4b, 0xc7, 0xd1, 0xc0, 0xef, 0x5c, 0xbc, 0x83, 0x4f, 0xeb, 0x77, 0xa8, 0xcc, 0x92, 0x01, - 0x64, 0x43, 0xf1, 0x64, 0xee, 0xa1, 0x48, 0x23, 0x24, 0xc9, 0x00, 0x4c, 0x87, 0xcb, 0x37, 0x4e, - 0x52, 0xc1, 0xfa, 0x5f, 0x16, 0x42, 0x29, 0xe8, 0x1d, 0xcc, 0xf6, 0xe9, 0xe4, 0x6c, 0x7b, 0xb7, - 0x4c, 0xe3, 0x86, 0xe1, 0x7e, 0x51, 0xca, 0x52, 0x90, 0x99, 0x99, 0xdf, 0x10, 0x6b, 0x9e, 0xdf, - 0x90, 0x1a, 0x2a, 0xcb, 0x6f, 0x62, 0x36, 0xdd, 0xb6, 0x44, 0xca, 0x4f, 0x17, 0x27, 0xa9, 0x1d, - 0xbb, 0x08, 0xc9, 0x07, 0xd5, 0xa2, 0xd9, 0x48, 0xaf, 0xc9, 0x8b, 0x6a, 0xe5, 0x56, 0x52, 0x40, - 0x48, 0x41, 0xf9, 0xb9, 0xe6, 0xce, 0xa2, 0x11, 0x94, 0x5f, 0x71, 0x4e, 0x52, 0x3b, 0xee, 0x17, - 0x77, 0x4a, 0x59, 0x15, 0x62, 0x6f, 0xee, 0x42, 0x4c, 0x2e, 0x31, 0x33, 0xe4, 0xaf, 0x5d, 0x48, - 0x2e, 0x42, 0xf9, 0xc4, 0x73, 0x67, 0xc9, 0x84, 0x9e, 0xaf, 0x04, 0x4e, 0x0a, 0x08, 0xfc, 0x05, - 0x5a, 0x0f, 0xa3, 0x30, 0x93, 0x6a, 0x91, 0x43, 0xee, 0x2c, 0x2b, 0xd2, 0x7d, 0x39, 0x48, 0x47, - 0x93, 0x2e, 0x32, 0x8d, 0xc5, 0x7b, 0x08, 0x45, 0x81, 0x2f, 0xd4, 0xaf, 0x10, 0x77, 0x2a, 0x8a, - 0xf9, 0x40, 0xb5, 0x74, 0x6e, 0x35, 0xbf, 0x4a, 0x05, 0x68, 0xe3, 0xe1, 0xe5, 0x55, 0x75, 0xe1, - 0xe5, 0x55, 0x75, 0xe1, 0xd5, 0x55, 0x75, 0xe1, 0xb7, 0x71, 0xd5, 0xba, 0x1c, 0x57, 0xad, 0x97, - 0xe3, 0xaa, 0xf5, 0x6a, 0x5c, 0xb5, 0xfe, 0x1d, 0x57, 0xad, 0xdf, 0xff, 0xab, 0x2e, 0x7c, 0xbf, - 0xac, 0x6b, 0xf0, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x44, 0x81, 0xf3, 0xba, 0x0c, 0x00, - 0x00, + // 1216 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x41, 0x6f, 0x1b, 0x45, + 0x14, 0xce, 0xd6, 0x71, 0x63, 0x4f, 0x1a, 0x27, 0x9d, 0x22, 0xba, 0xca, 0xc1, 0x36, 0x46, 0x82, + 0x08, 0xd2, 0xdd, 0xa6, 0x2d, 0x24, 0x17, 0x0e, 0xb1, 0x8a, 0xc0, 0x52, 0x1a, 0xa2, 0x71, 0x5c, + 0x21, 0xe0, 0xc0, 0xda, 0x7e, 0xb1, 0x87, 0xd8, 0xbb, 0xcb, 0xcc, 0xac, 0x51, 0x6e, 0xfc, 0x01, + 0x24, 0xee, 0xfc, 0x0b, 0x7e, 0x40, 0xc5, 0x81, 0x43, 0x8e, 0x3d, 0xf6, 0x64, 0x11, 0xf3, 0x2f, + 0x22, 0x21, 0xa1, 0x99, 0x9d, 0xdd, 0x59, 0xdb, 0x35, 0x75, 0x38, 0xf4, 0xb6, 0xf3, 0xde, 0xf7, + 0x7d, 0xf3, 0xe6, 0xcd, 0x7b, 0x6f, 0x16, 0x9d, 0x9c, 0x1f, 0x70, 0x87, 0x06, 0xee, 0x79, 0xd4, + 0x06, 0xe6, 0x83, 0x00, 0xee, 0x8e, 0xc0, 0xef, 0x06, 0xcc, 0xd5, 0x0e, 0x2f, 0xa4, 0x1c, 0xd8, + 0x08, 0x98, 0x1b, 0x9e, 0xf7, 0xd4, 0xca, 0xf5, 0xa2, 0x2e, 0x15, 0xee, 0x68, 0xaf, 0x0d, 0xc2, + 0xdb, 0x73, 0x7b, 0xe0, 0x03, 0xf3, 0x04, 0x74, 0x9d, 0x90, 0x05, 0x22, 0xc0, 0x1f, 0xc6, 0x44, + 0x27, 0x25, 0x3a, 0xe1, 0x79, 0x4f, 0xad, 0x1c, 0x45, 0x74, 0x34, 0x71, 0xfb, 0x41, 0x8f, 0x8a, + 0x7e, 0xd4, 0x76, 0x3a, 0xc1, 0xd0, 0xed, 0x05, 0xbd, 0xc0, 0x55, 0xfc, 0x76, 0x74, 0xa6, 0x56, + 0x6a, 0xa1, 0xbe, 0x62, 0xdd, 0xed, 0x5d, 0x13, 0x90, 0xeb, 0x45, 0xa2, 0x0f, 0xbe, 0xa0, 0x1d, + 0x4f, 0xd0, 0xc0, 0x77, 0x47, 0x73, 0x51, 0x6c, 0x3f, 0x31, 0xe8, 0xa1, 0xd7, 0xe9, 0x53, 0x1f, + 0xd8, 0x85, 0x39, 0xc1, 0x10, 0x84, 0xf7, 0x3a, 0x96, 0xbb, 0x88, 0xc5, 0x22, 0x5f, 0xd0, 0x21, + 0xcc, 0x11, 0x3e, 0x7d, 0x13, 0x81, 0x77, 0xfa, 0x30, 0xf4, 0xe6, 0x78, 0x8f, 0x17, 0xf1, 0x22, + 0x41, 0x07, 0x2e, 0xf5, 0x05, 0x17, 0x6c, 0x8e, 0x74, 0xf0, 0xe6, 0x2b, 0xf1, 0x06, 0x61, 0x7f, + 0xfe, 0x4e, 0x6a, 0x7f, 0x22, 0x94, 0xff, 0x7c, 0x04, 0xbe, 0xc0, 0xdf, 0xa3, 0x82, 0x3c, 0x7c, + 0xd7, 0x13, 0x9e, 0x6d, 0x55, 0xad, 0x9d, 0xf5, 0x47, 0x0f, 0x1d, 0x73, 0x61, 0x69, 0x2c, 0xe6, + 0xce, 0x24, 0xda, 0x19, 0xed, 0x39, 0x5f, 0xb5, 0x7f, 0x80, 0x8e, 0x78, 0x06, 0xc2, 0xab, 0xe3, + 0xcb, 0x71, 0x65, 0x65, 0x32, 0xae, 0x20, 0x63, 0x23, 0xa9, 0x2a, 0xde, 0x45, 0xf9, 0x01, 0x8c, + 0x60, 0x60, 0xdf, 0xaa, 0x5a, 0x3b, 0xc5, 0xfa, 0xbb, 0x1a, 0x9c, 0x3f, 0x92, 0xc6, 0xeb, 0xe4, + 0x83, 0xc4, 0x20, 0xfc, 0x2d, 0x2a, 0xca, 0x3c, 0x71, 0xe1, 0x0d, 0x43, 0x3b, 0xa7, 0x02, 0xfa, + 0x68, 0xb9, 0x80, 0x4e, 0xe9, 0x10, 0xea, 0x77, 0xb5, 0x7a, 0xf1, 0x34, 0x11, 0x21, 0x46, 0x0f, + 0x1f, 0xa3, 0x35, 0x95, 0x98, 0xc6, 0x53, 0x7b, 0x55, 0x05, 0xf3, 0x44, 0xc3, 0xd7, 0x0e, 0x63, + 0xf3, 0xf5, 0xb8, 0xf2, 0xde, 0xa2, 0x9b, 0x10, 0x17, 0x21, 0x70, 0xa7, 0xd5, 0x78, 0x4a, 0x12, + 0x11, 0x79, 0x34, 0x2e, 0xbc, 0x1e, 0xd8, 0xf9, 0xe9, 0xa3, 0x35, 0xa5, 0xf1, 0x3a, 0xf9, 0x20, + 0x31, 0x08, 0x3f, 0x42, 0x88, 0xc1, 0x8f, 0x11, 0x70, 0xd1, 0x22, 0x0d, 0xfb, 0xb6, 0xa2, 0xa4, + 0xa9, 0x23, 0xa9, 0x87, 0x64, 0x50, 0xb8, 0x8a, 0x56, 0x47, 0xc0, 0xda, 0xf6, 0x9a, 0x42, 0xdf, + 0xd1, 0xe8, 0xd5, 0xe7, 0xc0, 0xda, 0x44, 0x79, 0xf0, 0x97, 0x68, 0x35, 0xe2, 0xc0, 0xec, 0x82, + 0xca, 0xd5, 0x07, 0x99, 0x5c, 0x39, 0xd3, 0x5d, 0x21, 0x73, 0xd4, 0xe2, 0xc0, 0x1a, 0xfe, 0x59, + 0x60, 0x94, 0xa4, 0x85, 0x28, 0x05, 0xdc, 0x47, 0x5b, 0x74, 0x18, 0x02, 0xe3, 0x81, 0x2f, 0x4b, + 0x45, 0x7a, 0xec, 0xe2, 0x8d, 0x54, 0xdf, 0x99, 0x8c, 0x2b, 0x5b, 0x8d, 0x19, 0x0d, 0x32, 0xa7, + 0x8a, 0x3f, 0x46, 0x45, 0x1e, 0x44, 0xac, 0x03, 0x8d, 0x13, 0x6e, 0xa3, 0x6a, 0x6e, 0xa7, 0x58, + 0xdf, 0x90, 0x97, 0xd6, 0x4c, 0x8c, 0xc4, 0xf8, 0x31, 0xa0, 0x62, 0xa0, 0xea, 0x8a, 0xc0, 0x99, + 0xbd, 0xae, 0xe2, 0x39, 0x70, 0x96, 0x9c, 0x29, 0xba, 0x4a, 0x09, 0x9c, 0x01, 0x03, 0xbf, 0x03, + 0xf1, 0x36, 0xa9, 0x91, 0x18, 0x65, 0xdc, 0x47, 0x25, 0x06, 0x3c, 0x0c, 0x7c, 0x0e, 0x4d, 0xe1, + 0x89, 0x88, 0xdb, 0x77, 0xd4, 0x5e, 0xbb, 0xcb, 0x55, 0x5f, 0xcc, 0xa9, 0xe3, 0xc9, 0xb8, 0x52, + 0x22, 0x53, 0x3a, 0x64, 0x46, 0x17, 0x7b, 0x68, 0x43, 0xdf, 0x70, 0x1c, 0x88, 0xbd, 0xa1, 0x36, + 0xda, 0x59, 0xb8, 0x91, 0x9e, 0x1d, 0x4e, 0xcb, 0x3f, 0xf7, 0x83, 0x9f, 0xfc, 0xfa, 0xdd, 0xc9, + 0xb8, 0xb2, 0x41, 0xb2, 0x12, 0x64, 0x5a, 0x11, 0x77, 0xcd, 0x61, 0xf4, 0x1e, 0xa5, 0x1b, 0xee, + 0x31, 0x75, 0x10, 0xbd, 0xc9, 0x8c, 0x26, 0xfe, 0xc5, 0x42, 0xb6, 0xde, 0x97, 0x40, 0x07, 0xe8, + 0x08, 0xba, 0x69, 0xdb, 0xd9, 0x9b, 0x6a, 0x43, 0x77, 0xb9, 0xec, 0x3d, 0xa3, 0x1d, 0x16, 0xa8, + 0x06, 0xae, 0xea, 0xc2, 0xb4, 0xc9, 0x02, 0x61, 0xb2, 0x70, 0x4b, 0x1c, 0xa0, 0x92, 0xea, 0x34, + 0x13, 0xc4, 0xd6, 0xff, 0x0b, 0x22, 0x69, 0xe4, 0x52, 0x73, 0x4a, 0x8e, 0xcc, 0xc8, 0xd7, 0x5e, + 0x58, 0xa8, 0xa8, 0xc6, 0xe8, 0x11, 0xe5, 0x02, 0x7f, 0x37, 0x37, 0x4a, 0x9d, 0xe5, 0x36, 0x96, + 0x6c, 0x35, 0x48, 0xb7, 0xf4, 0xbe, 0x85, 0xc4, 0x92, 0x19, 0xa3, 0x4d, 0x94, 0xa7, 0x02, 0x86, + 0xdc, 0xbe, 0x55, 0xcd, 0xcd, 0x48, 0xff, 0x77, 0x0b, 0xa8, 0x00, 0xeb, 0x1b, 0xc9, 0x6c, 0x6a, + 0x48, 0x11, 0x12, 0x6b, 0xd5, 0x7e, 0xb3, 0x50, 0xe9, 0x0b, 0x16, 0x44, 0x21, 0x81, 0xb8, 0xe1, + 0x38, 0x7e, 0x1f, 0xe5, 0x7b, 0xd2, 0xa2, 0x8e, 0x50, 0x34, 0xbc, 0x18, 0x16, 0xfb, 0x64, 0x03, + 0xb3, 0x84, 0xa1, 0x02, 0xd2, 0x0d, 0x9c, 0xca, 0x10, 0xe3, 0xc7, 0xfb, 0xb2, 0xde, 0xe3, 0xc5, + 0xb1, 0x37, 0x04, 0x6e, 0xe7, 0x14, 0x41, 0x57, 0x71, 0xc6, 0x41, 0xa6, 0x71, 0xb5, 0xdf, 0x73, + 0x68, 0x73, 0xa6, 0x81, 0xf1, 0x2e, 0x2a, 0x24, 0x20, 0x1d, 0x61, 0x9a, 0xb4, 0x44, 0x8b, 0xa4, + 0x08, 0xec, 0xa2, 0xa2, 0x2f, 0xa5, 0x42, 0xaf, 0x03, 0xfa, 0xfd, 0x49, 0x5f, 0x88, 0xe3, 0xc4, + 0x41, 0x0c, 0x46, 0xce, 0x5b, 0xb9, 0x50, 0x2f, 0x4f, 0x66, 0xde, 0x4a, 0x2c, 0x51, 0x1e, 0x5c, + 0x47, 0xb9, 0x88, 0x76, 0xf5, 0xfb, 0xf1, 0x50, 0x03, 0x72, 0xad, 0x65, 0xdf, 0x0e, 0x49, 0x96, + 0x87, 0xf0, 0x42, 0xaa, 0x32, 0xaa, 0x9f, 0x8e, 0xf4, 0x10, 0x87, 0x27, 0x8d, 0x38, 0xd3, 0x29, + 0x42, 0xbe, 0x1b, 0x5e, 0x48, 0x9f, 0x03, 0xe3, 0x34, 0xf0, 0x67, 0xdf, 0x8d, 0xc3, 0x93, 0x86, + 0xf6, 0x90, 0x0c, 0x0a, 0x1f, 0xa2, 0xcd, 0x24, 0x09, 0x09, 0x31, 0x7e, 0x42, 0xee, 0x6b, 0xe2, + 0x26, 0x99, 0x76, 0x93, 0x59, 0x3c, 0xfe, 0x04, 0xad, 0xf3, 0xa8, 0x9d, 0x26, 0xbb, 0xa0, 0xe8, + 0xf7, 0x34, 0x7d, 0xbd, 0x69, 0x5c, 0x24, 0x8b, 0xab, 0xfd, 0x63, 0xa1, 0xdb, 0x27, 0xc1, 0x80, + 0x76, 0x2e, 0xde, 0xc2, 0xbf, 0xc5, 0xd7, 0x28, 0xcf, 0xa2, 0x01, 0x24, 0x4d, 0xf1, 0x78, 0xe9, + 0xa6, 0x88, 0x23, 0x24, 0xd1, 0x00, 0x4c, 0x85, 0xcb, 0x15, 0x27, 0xb1, 0x20, 0xde, 0x47, 0x28, + 0x18, 0x52, 0xa1, 0x06, 0x40, 0x52, 0xb1, 0xf7, 0x55, 0x1c, 0xa9, 0xd5, 0x3c, 0xf0, 0x19, 0x68, + 0xed, 0x0f, 0x0b, 0xa1, 0x58, 0xfd, 0x2d, 0x0c, 0x85, 0xd3, 0xe9, 0xa1, 0xe0, 0xde, 0xf0, 0xfc, + 0x0b, 0xa6, 0xc2, 0x8b, 0x5c, 0x72, 0x04, 0x99, 0x12, 0xf3, 0x03, 0x67, 0x2d, 0xf3, 0x03, 0x57, + 0x41, 0x79, 0xf9, 0x37, 0x91, 0x8c, 0x85, 0xa2, 0x44, 0xca, 0x47, 0x9f, 0x93, 0xd8, 0x8e, 0x1d, + 0x84, 0xe4, 0x87, 0xaa, 0xed, 0x24, 0xb3, 0x25, 0x99, 0xd9, 0x56, 0x6a, 0x25, 0x19, 0x84, 0x14, + 0x94, 0x3f, 0x3a, 0xdc, 0x5e, 0x35, 0x82, 0xf2, 0xff, 0x87, 0x93, 0xd8, 0x8e, 0xfb, 0xd9, 0x61, + 0x94, 0x57, 0x89, 0xd8, 0x5f, 0x3a, 0x11, 0xd3, 0xd3, 0xcf, 0x4c, 0x87, 0xd7, 0x4e, 0x32, 0x07, + 0xa1, 0x74, 0x54, 0x70, 0xfb, 0xb6, 0x09, 0x3d, 0x9d, 0x25, 0x9c, 0x64, 0x10, 0xf8, 0x33, 0xb4, + 0xe9, 0x07, 0x7e, 0x22, 0xd5, 0x22, 0x47, 0xdc, 0x5e, 0x53, 0xa4, 0x7b, 0xb2, 0x03, 0x8f, 0xa7, + 0x5d, 0x64, 0x16, 0x3b, 0x53, 0x83, 0x85, 0xa5, 0x6b, 0xb0, 0xfe, 0xe0, 0xf2, 0xaa, 0xbc, 0xf2, + 0xf2, 0xaa, 0xbc, 0xf2, 0xea, 0xaa, 0xbc, 0xf2, 0xf3, 0xa4, 0x6c, 0x5d, 0x4e, 0xca, 0xd6, 0xcb, + 0x49, 0xd9, 0x7a, 0x35, 0x29, 0x5b, 0x7f, 0x4d, 0xca, 0xd6, 0xaf, 0x7f, 0x97, 0x57, 0xbe, 0x59, + 0xd3, 0x39, 0xf8, 0x37, 0x00, 0x00, 0xff, 0xff, 0x04, 0xbb, 0x40, 0x37, 0xf4, 0x0d, 0x00, 0x00, } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto index efbb037c5429..dfe4e947803e 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.proto @@ -35,12 +35,15 @@ option go_package = "v1beta1"; message Event { // ObjectMeta is included for interoperability with API infrastructure. // +optional + // DEPRECATED: Use StageTimestamp which supports micro second instead of ObjectMeta.CreateTimestamp + // and the rest of the object is not used optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // AuditLevel at which event was generated optional string level = 2; // Time the request reached the apiserver. + // DEPRECATED: Use RequestReceivedTimestamp which supports micro second instead. optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 3; // Unique audit ID, generated for each request. @@ -90,6 +93,14 @@ message Event { // at Response Level. // +optional optional k8s.io.apimachinery.pkg.runtime.Unknown responseObject = 14; + + // Time the request reached the apiserver. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime requestReceivedTimestamp = 15; + + // Time the request reached current audit stage. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime stageTimestamp = 16; } // EventList is a list of audit Events. @@ -163,6 +174,11 @@ message Policy { // The default audit level is None, but can be overridden by a catch-all rule at the end of the list. // PolicyRules are strictly ordered. repeated PolicyRule rules = 2; + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified per rule in which case the union of both are omitted. + // +optional + repeated string omitStages = 3; } // PolicyList is a list of audit Policies. @@ -213,8 +229,10 @@ message PolicyRule { // +optional repeated string nonResourceURLs = 7; - // OmitStages specify events generated in which stages will not be emitted to backend. + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified policy wide in which case the union of both are omitted. // An empty list means no restrictions will apply. + // +optional repeated string omitStages = 8; } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/types.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/types.go index 1de8d75748cb..259599c050eb 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/types.go @@ -73,12 +73,15 @@ type Event struct { metav1.TypeMeta `json:",inline"` // ObjectMeta is included for interoperability with API infrastructure. // +optional + // DEPRECATED: Use StageTimestamp which supports micro second instead of ObjectMeta.CreateTimestamp + // and the rest of the object is not used metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // AuditLevel at which event was generated Level Level `json:"level" protobuf:"bytes,2,opt,name=level,casttype=Level"` // Time the request reached the apiserver. + // DEPRECATED: Use RequestReceivedTimestamp which supports micro second instead. Timestamp metav1.Time `json:"timestamp" protobuf:"bytes,3,opt,name=timestamp"` // Unique audit ID, generated for each request. AuditID types.UID `json:"auditID" protobuf:"bytes,4,opt,name=auditID,casttype=k8s.io/apimachinery/pkg/types.UID"` @@ -119,6 +122,12 @@ type Event struct { // at Response Level. // +optional ResponseObject *runtime.Unknown `json:"responseObject,omitempty" protobuf:"bytes,14,opt,name=responseObject"` + // Time the request reached the apiserver. + // +optional + RequestReceivedTimestamp metav1.MicroTime `json:"requestReceivedTimestamp" protobuf:"bytes,15,opt,name=requestReceivedTimestamp"` + // Time the request reached current audit stage. + // +optional + StageTimestamp metav1.MicroTime `json:"stageTimestamp" protobuf:"bytes,16,opt,name=stageTimestamp"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -147,6 +156,11 @@ type Policy struct { // The default audit level is None, but can be overridden by a catch-all rule at the end of the list. // PolicyRules are strictly ordered. Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified per rule in which case the union of both are omitted. + // +optional + OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,3,rep,name=omitStages"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -202,8 +216,10 @@ type PolicyRule struct { // +optional NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,7,rep,name=nonResourceURLs"` - // OmitStages specify events generated in which stages will not be emitted to backend. + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified policy wide in which case the union of both are omitted. // An empty list means no restrictions will apply. + // +optional OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,8,rep,name=omitStages"` } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go index 81d9270d2046..e608d13817d9 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go @@ -56,9 +56,9 @@ func RegisterConversions(scheme *runtime.Scheme) error { } func autoConvert_v1beta1_Event_To_audit_Event(in *Event, out *audit.Event, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta + // WARNING: in.ObjectMeta requires manual conversion: does not exist in peer-type out.Level = audit.Level(in.Level) - out.Timestamp = in.Timestamp + // WARNING: in.Timestamp requires manual conversion: does not exist in peer-type out.AuditID = types.UID(in.AuditID) out.Stage = audit.Stage(in.Stage) out.RequestURI = in.RequestURI @@ -73,18 +73,13 @@ func autoConvert_v1beta1_Event_To_audit_Event(in *Event, out *audit.Event, s con out.ResponseStatus = (*v1.Status)(unsafe.Pointer(in.ResponseStatus)) out.RequestObject = (*runtime.Unknown)(unsafe.Pointer(in.RequestObject)) out.ResponseObject = (*runtime.Unknown)(unsafe.Pointer(in.ResponseObject)) + out.RequestReceivedTimestamp = in.RequestReceivedTimestamp + out.StageTimestamp = in.StageTimestamp return nil } -// Convert_v1beta1_Event_To_audit_Event is an autogenerated conversion function. -func Convert_v1beta1_Event_To_audit_Event(in *Event, out *audit.Event, s conversion.Scope) error { - return autoConvert_v1beta1_Event_To_audit_Event(in, out, s) -} - func autoConvert_audit_Event_To_v1beta1_Event(in *audit.Event, out *Event, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta out.Level = Level(in.Level) - out.Timestamp = in.Timestamp out.AuditID = types.UID(in.AuditID) out.Stage = Stage(in.Stage) out.RequestURI = in.RequestURI @@ -99,17 +94,24 @@ func autoConvert_audit_Event_To_v1beta1_Event(in *audit.Event, out *Event, s con out.ResponseStatus = (*v1.Status)(unsafe.Pointer(in.ResponseStatus)) out.RequestObject = (*runtime.Unknown)(unsafe.Pointer(in.RequestObject)) out.ResponseObject = (*runtime.Unknown)(unsafe.Pointer(in.ResponseObject)) + out.RequestReceivedTimestamp = in.RequestReceivedTimestamp + out.StageTimestamp = in.StageTimestamp return nil } -// Convert_audit_Event_To_v1beta1_Event is an autogenerated conversion function. -func Convert_audit_Event_To_v1beta1_Event(in *audit.Event, out *Event, s conversion.Scope) error { - return autoConvert_audit_Event_To_v1beta1_Event(in, out, s) -} - func autoConvert_v1beta1_EventList_To_audit_EventList(in *EventList, out *audit.EventList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]audit.Event)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]audit.Event, len(*in)) + for i := range *in { + if err := Convert_v1beta1_Event_To_audit_Event(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -120,7 +122,17 @@ func Convert_v1beta1_EventList_To_audit_EventList(in *EventList, out *audit.Even func autoConvert_audit_EventList_To_v1beta1_EventList(in *audit.EventList, out *EventList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]Event)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + if err := Convert_audit_Event_To_v1beta1_Event(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -190,6 +202,7 @@ func Convert_audit_ObjectReference_To_v1beta1_ObjectReference(in *audit.ObjectRe func autoConvert_v1beta1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Rules = *(*[]audit.PolicyRule)(unsafe.Pointer(&in.Rules)) + out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages)) return nil } @@ -201,6 +214,7 @@ func Convert_v1beta1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s con func autoConvert_audit_Policy_To_v1beta1_Policy(in *audit.Policy, out *Policy, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) + out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages)) return nil } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.deepcopy.go index 7da86d5399ad..e088db37ab12 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.deepcopy.go @@ -23,52 +23,9 @@ package v1beta1 import ( v1 "k8s.io/api/authentication/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Event).DeepCopyInto(out.(*Event)) - return nil - }, InType: reflect.TypeOf(&Event{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EventList).DeepCopyInto(out.(*EventList)) - return nil - }, InType: reflect.TypeOf(&EventList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupResources).DeepCopyInto(out.(*GroupResources)) - return nil - }, InType: reflect.TypeOf(&GroupResources{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectReference).DeepCopyInto(out.(*ObjectReference)) - return nil - }, InType: reflect.TypeOf(&ObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Policy).DeepCopyInto(out.(*Policy)) - return nil - }, InType: reflect.TypeOf(&Policy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PolicyList).DeepCopyInto(out.(*PolicyList)) - return nil - }, InType: reflect.TypeOf(&PolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PolicyRule).DeepCopyInto(out.(*PolicyRule)) - return nil - }, InType: reflect.TypeOf(&PolicyRule{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Event) DeepCopyInto(out *Event) { *out = *in @@ -126,6 +83,8 @@ func (in *Event) DeepCopyInto(out *Event) { (*in).DeepCopyInto(*out) } } + in.RequestReceivedTimestamp.DeepCopyInto(&out.RequestReceivedTimestamp) + in.StageTimestamp.DeepCopyInto(&out.StageTimestamp) return } @@ -236,6 +195,11 @@ func (in *Policy) DeepCopyInto(out *Policy) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.OmitStages != nil { + in, out := &in.OmitStages, &out.OmitStages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/validation/BUILD b/vendor/k8s.io/apiserver/pkg/apis/audit/validation/BUILD index 38ab70d076c2..956db3591a8a 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/validation/BUILD +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/validation/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/apiserver/pkg/apis/audit/validation", library = ":go_default_library", deps = ["//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library"], ) @@ -16,6 +17,7 @@ go_test( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/apiserver/pkg/apis/audit/validation", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/validation/validation.go b/vendor/k8s.io/apiserver/pkg/apis/audit/validation/validation.go index 6520a7639488..f80aba01ffec 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/validation/validation.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/validation/validation.go @@ -26,6 +26,7 @@ import ( func ValidatePolicy(policy *audit.Policy) field.ErrorList { var allErrs field.ErrorList + allErrs = append(allErrs, validateOmitStages(policy.OmitStages, field.NewPath("omitStages"))...) rulePath := field.NewPath("rules") for i, rule := range policy.Rules { allErrs = append(allErrs, validatePolicyRule(rule, rulePath.Index(i))...) diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go index 907e51b1248d..ebc89e66b7d9 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go @@ -22,57 +22,13 @@ package audit import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Event).DeepCopyInto(out.(*Event)) - return nil - }, InType: reflect.TypeOf(&Event{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EventList).DeepCopyInto(out.(*EventList)) - return nil - }, InType: reflect.TypeOf(&EventList{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupResources).DeepCopyInto(out.(*GroupResources)) - return nil - }, InType: reflect.TypeOf(&GroupResources{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectReference).DeepCopyInto(out.(*ObjectReference)) - return nil - }, InType: reflect.TypeOf(&ObjectReference{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Policy).DeepCopyInto(out.(*Policy)) - return nil - }, InType: reflect.TypeOf(&Policy{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PolicyList).DeepCopyInto(out.(*PolicyList)) - return nil - }, InType: reflect.TypeOf(&PolicyList{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PolicyRule).DeepCopyInto(out.(*PolicyRule)) - return nil - }, InType: reflect.TypeOf(&PolicyRule{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*UserInfo).DeepCopyInto(out.(*UserInfo)) - return nil - }, InType: reflect.TypeOf(&UserInfo{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Event) DeepCopyInto(out *Event) { *out = *in out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Timestamp.DeepCopyInto(&out.Timestamp) in.User.DeepCopyInto(&out.User) if in.ImpersonatedUser != nil { in, out := &in.ImpersonatedUser, &out.ImpersonatedUser @@ -124,6 +80,8 @@ func (in *Event) DeepCopyInto(out *Event) { (*in).DeepCopyInto(*out) } } + in.RequestReceivedTimestamp.DeepCopyInto(&out.RequestReceivedTimestamp) + in.StageTimestamp.DeepCopyInto(&out.StageTimestamp) return } @@ -234,6 +192,11 @@ func (in *Policy) DeepCopyInto(out *Policy) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.OmitStages != nil { + in, out := &in.OmitStages, &out.OmitStages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/apiserver/pkg/audit/BUILD b/vendor/k8s.io/apiserver/pkg/audit/BUILD index 7574915a00e9..525424cdded0 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/BUILD +++ b/vendor/k8s.io/apiserver/pkg/audit/BUILD @@ -16,6 +16,7 @@ go_library( "types.go", "union.go", ], + importpath = "k8s.io/apiserver/pkg/audit", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pborman/uuid:go_default_library", @@ -38,6 +39,7 @@ go_library( go_test( name = "go_default_test", srcs = ["union_test.go"], + importpath = "k8s.io/apiserver/pkg/audit", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/audit/format.go b/vendor/k8s.io/apiserver/pkg/audit/format.go index 62076d695c54..bf805f525734 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/format.go +++ b/vendor/k8s.io/apiserver/pkg/audit/format.go @@ -61,7 +61,7 @@ func EventString(ev *auditinternal.Event) string { } return fmt.Sprintf("%s AUDIT: id=%q stage=%q ip=%q method=%q user=%q groups=%q as=%q asgroups=%q namespace=%q uri=%q response=\"%s\"", - ev.Timestamp.Format(time.RFC3339Nano), ev.AuditID, ev.Stage, ip, ev.Verb, username, groups, asuser, asgroups, namespace, ev.RequestURI, response) + ev.RequestReceivedTimestamp.Format(time.RFC3339Nano), ev.AuditID, ev.Stage, ip, ev.Verb, username, groups, asuser, asgroups, namespace, ev.RequestURI, response) } func auditStringSlice(inList []string) string { diff --git a/vendor/k8s.io/apiserver/pkg/audit/policy/BUILD b/vendor/k8s.io/apiserver/pkg/audit/policy/BUILD index a0e731f624af..17d5881b0669 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/policy/BUILD +++ b/vendor/k8s.io/apiserver/pkg/audit/policy/BUILD @@ -12,6 +12,7 @@ go_test( "checker_test.go", "reader_test.go", ], + importpath = "k8s.io/apiserver/pkg/audit/policy", library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", @@ -30,9 +31,10 @@ go_library( "checker.go", "reader.go", ], + importpath = "k8s.io/apiserver/pkg/audit/policy", deps = [ "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/audit/policy/checker.go b/vendor/k8s.io/apiserver/pkg/audit/policy/checker.go index b92ffe26b680..3259013ad722 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/policy/checker.go +++ b/vendor/k8s.io/apiserver/pkg/audit/policy/checker.go @@ -36,9 +36,26 @@ type Checker interface { // NewChecker creates a new policy checker. func NewChecker(policy *audit.Policy) Checker { + for i, rule := range policy.Rules { + policy.Rules[i].OmitStages = unionStages(policy.OmitStages, rule.OmitStages) + } return &policyChecker{*policy} } +func unionStages(stageLists ...[]audit.Stage) []audit.Stage { + m := make(map[audit.Stage]bool) + for _, sl := range stageLists { + for _, s := range sl { + m[s] = true + } + } + result := make([]audit.Stage, 0, len(m)) + for key := range m { + result = append(result, key) + } + return result +} + // FakeChecker creates a checker that returns a constant level for all requests (for testing). func FakeChecker(level audit.Level, stage []audit.Stage) Checker { return &fakeChecker{level, stage} @@ -54,7 +71,7 @@ func (p *policyChecker) LevelAndStages(attrs authorizer.Attributes) (audit.Level return rule.Level, rule.OmitStages } } - return DefaultAuditLevel, nil + return DefaultAuditLevel, p.OmitStages } // Check whether the rule matches the request attrs. diff --git a/vendor/k8s.io/apiserver/pkg/audit/policy/reader.go b/vendor/k8s.io/apiserver/pkg/audit/policy/reader.go index b748e8649cc6..1d02e1a3fb9f 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/policy/reader.go +++ b/vendor/k8s.io/apiserver/pkg/audit/policy/reader.go @@ -20,7 +20,7 @@ import ( "fmt" "io/ioutil" - "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" auditinternal "k8s.io/apiserver/pkg/apis/audit" auditv1alpha1 "k8s.io/apiserver/pkg/apis/audit/v1alpha1" auditv1beta1 "k8s.io/apiserver/pkg/apis/audit/v1beta1" @@ -30,6 +30,20 @@ import ( "github.com/golang/glog" ) +var ( + apiGroupVersions = []schema.GroupVersion{ + auditv1beta1.SchemeGroupVersion, + auditv1alpha1.SchemeGroupVersion, + } + apiGroupVersionSet = map[schema.GroupVersion]bool{} +) + +func init() { + for _, gv := range apiGroupVersions { + apiGroupVersionSet[gv] = true + } +} + func LoadPolicyFromFile(filePath string) (*auditinternal.Policy, error) { if filePath == "" { return nil, fmt.Errorf("file path not specified") @@ -40,11 +54,18 @@ func LoadPolicyFromFile(filePath string) (*auditinternal.Policy, error) { } policy := &auditinternal.Policy{} - decoder := audit.Codecs.UniversalDecoder(auditv1beta1.SchemeGroupVersion, auditv1alpha1.SchemeGroupVersion) - if err := runtime.DecodeInto(decoder, policyDef, policy); err != nil { + decoder := audit.Codecs.UniversalDecoder(apiGroupVersions...) + + _, gvk, err := decoder.Decode(policyDef, nil, policy) + if err != nil { return nil, fmt.Errorf("failed decoding file %q: %v", filePath, err) } + // Ensure the policy file contained an apiVersion and kind. + if !apiGroupVersionSet[schema.GroupVersion{Group: gvk.Group, Version: gvk.Version}] { + return nil, fmt.Errorf("unknown group version field %v in policy file %s", gvk, filePath) + } + if err := validation.ValidatePolicy(policy); err != nil { return nil, err.ToAggregate() } diff --git a/vendor/k8s.io/apiserver/pkg/audit/request.go b/vendor/k8s.io/apiserver/pkg/audit/request.go index 955cef2e1046..b738076a74ff 100644 --- a/vendor/k8s.io/apiserver/pkg/audit/request.go +++ b/vendor/k8s.io/apiserver/pkg/audit/request.go @@ -40,7 +40,7 @@ import ( func NewEventFromRequest(req *http.Request, level auditinternal.Level, attribs authorizer.Attributes) (*auditinternal.Event, error) { ev := &auditinternal.Event{ - Timestamp: metav1.NewTime(time.Now()), + RequestReceivedTimestamp: metav1.NewMicroTime(time.Now()), Verb: attribs.GetVerb(), RequestURI: req.URL.RequestURI(), } diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD index e49523f29989..268bb29f1aa2 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interfaces.go"], + importpath = "k8s.io/apiserver/pkg/authentication/authenticator", deps = ["//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library"], ) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD index 19084bed46a3..7782bf5d34f6 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD @@ -12,6 +12,7 @@ go_library( "loopback.go", "requestheader.go", ], + importpath = "k8s.io/apiserver/pkg/authentication/authenticatorfactory", deps = [ "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/authentication/group/BUILD b/vendor/k8s.io/apiserver/pkg/authentication/group/BUILD index bfe3ebdf46cf..ea84fb9dcd91 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/group/BUILD +++ b/vendor/k8s.io/apiserver/pkg/authentication/group/BUILD @@ -12,6 +12,7 @@ go_test( "group_adder_test.go", "token_group_adder_test.go", ], + importpath = "k8s.io/apiserver/pkg/authentication/group", library = ":go_default_library", deps = [ "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", @@ -26,6 +27,7 @@ go_library( "group_adder.go", "token_group_adder.go", ], + importpath = "k8s.io/apiserver/pkg/authentication/group", deps = [ "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/BUILD b/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/BUILD index e0538757bc7d..c20d90c88c84 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/BUILD +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["anonymous_test.go"], + importpath = "k8s.io/apiserver/pkg/authentication/request/anonymous", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", @@ -20,6 +21,7 @@ go_test( go_library( name = "go_default_library", srcs = ["anonymous.go"], + importpath = "k8s.io/apiserver/pkg/authentication/request/anonymous", deps = [ "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD b/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD index 37a5d33443d5..93f6ad9e9320 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["bearertoken_test.go"], + importpath = "k8s.io/apiserver/pkg/authentication/request/bearertoken", library = ":go_default_library", deps = [ "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", @@ -19,6 +20,7 @@ go_test( go_library( name = "go_default_library", srcs = ["bearertoken.go"], + importpath = "k8s.io/apiserver/pkg/authentication/request/bearertoken", deps = [ "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/BUILD b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/BUILD index 93a0164bf86a..a37735742f55 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/BUILD +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["requestheader_test.go"], + importpath = "k8s.io/apiserver/pkg/authentication/request/headerrequest", library = ":go_default_library", deps = ["//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library"], ) @@ -16,6 +17,7 @@ go_test( go_library( name = "go_default_library", srcs = ["requestheader.go"], + importpath = "k8s.io/apiserver/pkg/authentication/request/headerrequest", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/union/BUILD b/vendor/k8s.io/apiserver/pkg/authentication/request/union/BUILD index c3c0131e7e8c..4b2dcadf2dfc 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/union/BUILD +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/union/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["unionauth_test.go"], + importpath = "k8s.io/apiserver/pkg/authentication/request/union", library = ":go_default_library", deps = ["//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library"], ) @@ -16,6 +17,7 @@ go_test( go_library( name = "go_default_library", srcs = ["union.go"], + importpath = "k8s.io/apiserver/pkg/authentication/request/union", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD b/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD index ccb9d7424637..1ea8e4c5b4c4 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["protocol.go"], + importpath = "k8s.io/apiserver/pkg/authentication/request/websocket", deps = [ "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", @@ -19,6 +20,7 @@ go_library( go_test( name = "go_default_test", srcs = ["protocol_test.go"], + importpath = "k8s.io/apiserver/pkg/authentication/request/websocket", library = ":go_default_library", deps = [ "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD index faa86bde6c87..2114dc00bd7a 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD @@ -15,6 +15,7 @@ go_test( "testdata/intermediate.pem", "testdata/root.pem", ], + importpath = "k8s.io/apiserver/pkg/authentication/request/x509", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", @@ -29,6 +30,7 @@ go_library( "doc.go", "x509.go", ], + importpath = "k8s.io/apiserver/pkg/authentication/request/x509", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go index d583a3218468..708a89e9eace 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go @@ -47,6 +47,10 @@ var clientCertificateExpirationHistogram = prometheus.NewHistogram( (2 * 24 * time.Hour).Seconds(), (4 * 24 * time.Hour).Seconds(), (7 * 24 * time.Hour).Seconds(), + (30 * 24 * time.Hour).Seconds(), + (3 * 30 * 24 * time.Hour).Seconds(), + (6 * 30 * 24 * time.Hour).Seconds(), + (12 * 30 * 24 * time.Hour).Seconds(), }, }, ) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/BUILD b/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/BUILD index 9f63af9e36ac..db06fbde332a 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/BUILD +++ b/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/BUILD @@ -9,12 +9,14 @@ load( go_test( name = "go_default_test", srcs = ["util_test.go"], + importpath = "k8s.io/apiserver/pkg/authentication/serviceaccount", library = ":go_default_library", ) go_library( name = "go_default_library", srcs = ["util.go"], + importpath = "k8s.io/apiserver/pkg/authentication/serviceaccount", deps = ["//vendor/k8s.io/apimachinery/pkg/api/validation:go_default_library"], ) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/BUILD b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/BUILD index 3384859f7236..59cd4322f72e 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/BUILD +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -14,8 +12,8 @@ go_test( "cache_test.go", "cached_token_authenticator_test.go", ], + importpath = "k8s.io/apiserver/pkg/authentication/token/cache", library = ":go_default_library", - tags = ["automanaged"], deps = [ "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", @@ -31,7 +29,7 @@ go_library( "cache_striped.go", "cached_token_authenticator.go", ], - tags = ["automanaged"], + importpath = "k8s.io/apiserver/pkg/authentication/token/cache", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/cache:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD b/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD index f298f15b490c..4d32dd1d31c1 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["tokenfile_test.go"], + importpath = "k8s.io/apiserver/pkg/authentication/token/tokenfile", library = ":go_default_library", deps = ["//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library"], ) @@ -16,6 +17,7 @@ go_test( go_library( name = "go_default_library", srcs = ["tokenfile.go"], + importpath = "k8s.io/apiserver/pkg/authentication/token/tokenfile", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go b/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go index 43ff764de9e3..57bb6c596d8e 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go @@ -62,11 +62,17 @@ func NewCSV(path string) (*TokenAuthenticator, error) { if len(record) < 3 { return nil, fmt.Errorf("token file '%s' must have at least 3 columns (token, user name, user uid), found %d", path, len(record)) } + + recordNum++ + if record[0] == "" { + glog.Warningf("empty token has been found in token file '%s', record number '%d'", path, recordNum) + continue + } + obj := &user.DefaultInfo{ Name: record[1], UID: record[2], } - recordNum++ if _, exist := tokens[record[0]]; exist { glog.Warningf("duplicate token has been found in token file '%s', record number '%d'", path, recordNum) } diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/union/BUILD b/vendor/k8s.io/apiserver/pkg/authentication/token/union/BUILD index 7a6b23db96b4..0f940c33475c 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/token/union/BUILD +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/union/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -11,15 +9,15 @@ load( go_test( name = "go_default_test", srcs = ["unionauth_test.go"], + importpath = "k8s.io/apiserver/pkg/authentication/token/union", library = ":go_default_library", - tags = ["automanaged"], deps = ["//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library"], ) go_library( name = "go_default_library", srcs = ["union.go"], - tags = ["automanaged"], + importpath = "k8s.io/apiserver/pkg/authentication/token/union", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/authentication/user/BUILD b/vendor/k8s.io/apiserver/pkg/authentication/user/BUILD index 8f8d940ea824..f22095c254c8 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/user/BUILD +++ b/vendor/k8s.io/apiserver/pkg/authentication/user/BUILD @@ -11,6 +11,7 @@ go_library( "doc.go", "user.go", ], + importpath = "k8s.io/apiserver/pkg/authentication/user", ) filegroup( diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizer/BUILD b/vendor/k8s.io/apiserver/pkg/authorization/authorizer/BUILD index be1b16ea16f5..2a7508f4d253 100644 --- a/vendor/k8s.io/apiserver/pkg/authorization/authorizer/BUILD +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizer/BUILD @@ -11,6 +11,7 @@ go_library( "interfaces.go", "rule.go", ], + importpath = "k8s.io/apiserver/pkg/authorization/authorizer", deps = ["//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library"], ) diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go b/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go index e94da3e1a44e..594109096b0f 100644 --- a/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go @@ -67,12 +67,12 @@ type Attributes interface { // zero or more calls to methods of the Attributes interface. It returns nil when an action is // authorized, otherwise it returns an error. type Authorizer interface { - Authorize(a Attributes) (authorized bool, reason string, err error) + Authorize(a Attributes) (authorized Decision, reason string, err error) } -type AuthorizerFunc func(a Attributes) (bool, string, error) +type AuthorizerFunc func(a Attributes) (Decision, string, error) -func (f AuthorizerFunc) Authorize(a Attributes) (bool, string, error) { +func (f AuthorizerFunc) Authorize(a Attributes) (Decision, string, error) { return f(a) } @@ -144,3 +144,15 @@ func (a AttributesRecord) IsResourceRequest() bool { func (a AttributesRecord) GetPath() string { return a.Path } + +type Decision int + +const ( + // DecisionDeny means that an authorizer decided to deny the action. + DecisionDeny Decision = iota + // DecisionAllow means that an authorizer decided to allow the action. + DecisionAllow + // DecisionNoOpionion means that an authorizer has no opinion on wether + // to allow or deny an action. + DecisionNoOpinion +) diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/BUILD b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/BUILD index 4dd0801ca066..7fff4c90a325 100644 --- a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/BUILD +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/BUILD @@ -8,7 +8,8 @@ load( go_test( name = "go_default_test", - srcs = ["authz_test.go"], + srcs = ["builtin_test.go"], + importpath = "k8s.io/apiserver/pkg/authorization/authorizerfactory", library = ":go_default_library", deps = [ "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", @@ -22,6 +23,7 @@ go_library( "builtin.go", "delegating.go", ], + importpath = "k8s.io/apiserver/pkg/authorization/authorizerfactory", deps = [ "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go index 8381e83f4376..fc36bc0bc933 100644 --- a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go @@ -28,8 +28,8 @@ import ( // It is useful in tests and when using kubernetes in an open manner. type alwaysAllowAuthorizer struct{} -func (alwaysAllowAuthorizer) Authorize(a authorizer.Attributes) (authorized bool, reason string, err error) { - return true, "", nil +func (alwaysAllowAuthorizer) Authorize(a authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) { + return authorizer.DecisionAllow, "", nil } func (alwaysAllowAuthorizer) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { @@ -56,8 +56,8 @@ func NewAlwaysAllowAuthorizer() *alwaysAllowAuthorizer { // It is useful in unit tests to force an operation to be forbidden. type alwaysDenyAuthorizer struct{} -func (alwaysDenyAuthorizer) Authorize(a authorizer.Attributes) (authorized bool, reason string, err error) { - return false, "Everything is forbidden.", nil +func (alwaysDenyAuthorizer) Authorize(a authorizer.Attributes) (decision authorizer.Decision, reason string, err error) { + return authorizer.DecisionNoOpinion, "Everything is forbidden.", nil } func (alwaysDenyAuthorizer) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { @@ -68,35 +68,22 @@ func NewAlwaysDenyAuthorizer() *alwaysDenyAuthorizer { return new(alwaysDenyAuthorizer) } -// alwaysFailAuthorizer is an implementation of authorizer.Attributes -// which always says no to an authorization request. -// It is useful in unit tests to force an operation to fail with error. -type alwaysFailAuthorizer struct{} - -func (alwaysFailAuthorizer) Authorize(a authorizer.Attributes) (authorized bool, reason string, err error) { - return false, "", errors.New("Authorization failure.") -} - -func NewAlwaysFailAuthorizer() authorizer.Authorizer { - return new(alwaysFailAuthorizer) -} - type privilegedGroupAuthorizer struct { groups []string } -func (r *privilegedGroupAuthorizer) Authorize(attr authorizer.Attributes) (bool, string, error) { +func (r *privilegedGroupAuthorizer) Authorize(attr authorizer.Attributes) (authorizer.Decision, string, error) { if attr.GetUser() == nil { - return false, "Error", errors.New("no user on request.") + return authorizer.DecisionNoOpinion, "Error", errors.New("no user on request.") } for _, attr_group := range attr.GetUser().GetGroups() { for _, priv_group := range r.groups { if priv_group == attr_group { - return true, "", nil + return authorizer.DecisionAllow, "", nil } } } - return false, "", nil + return authorizer.DecisionNoOpinion, "", nil } // NewPrivilegedGroups is for use in loopback scenarios diff --git a/vendor/k8s.io/apiserver/pkg/authorization/union/BUILD b/vendor/k8s.io/apiserver/pkg/authorization/union/BUILD index c533b9bf829a..0614f330cc5d 100644 --- a/vendor/k8s.io/apiserver/pkg/authorization/union/BUILD +++ b/vendor/k8s.io/apiserver/pkg/authorization/union/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["union_test.go"], + importpath = "k8s.io/apiserver/pkg/authorization/union", library = ":go_default_library", deps = [ "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", @@ -19,6 +20,7 @@ go_test( go_library( name = "go_default_library", srcs = ["union.go"], + importpath = "k8s.io/apiserver/pkg/authorization/union", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/authorization/union/union.go b/vendor/k8s.io/apiserver/pkg/authorization/union/union.go index 367da59d1be2..3246e4c0759e 100644 --- a/vendor/k8s.io/apiserver/pkg/authorization/union/union.go +++ b/vendor/k8s.io/apiserver/pkg/authorization/union/union.go @@ -14,6 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package union implements an authorizer that combines multiple subauthorizer. +// The union authorizer iterates over each subauthorizer and returns the first +// decision that is either an Allow decision or a Deny decision. If a +// subauthorizer returns a NoOpinion, then the union authorizer moves onto the +// next authorizer or, if the subauthorizer was the last authorizer, returns +// NoOpinion as the aggregate decision. I.e. union authorizer creates an +// aggregate decision and supports short-circut allows and denies from +// subauthorizers. package union import ( @@ -33,14 +41,14 @@ func New(authorizationHandlers ...authorizer.Authorizer) authorizer.Authorizer { } // Authorizes against a chain of authorizer.Authorizer objects and returns nil if successful and returns error if unsuccessful -func (authzHandler unionAuthzHandler) Authorize(a authorizer.Attributes) (bool, string, error) { +func (authzHandler unionAuthzHandler) Authorize(a authorizer.Attributes) (authorizer.Decision, string, error) { var ( errlist []error reasonlist []string ) for _, currAuthzHandler := range authzHandler { - authorized, reason, err := currAuthzHandler.Authorize(a) + decision, reason, err := currAuthzHandler.Authorize(a) if err != nil { errlist = append(errlist, err) @@ -48,13 +56,15 @@ func (authzHandler unionAuthzHandler) Authorize(a authorizer.Attributes) (bool, if len(reason) != 0 { reasonlist = append(reasonlist, reason) } - if !authorized { - continue + switch decision { + case authorizer.DecisionAllow, authorizer.DecisionDeny: + return decision, reason, err + case authorizer.DecisionNoOpinion: + // continue to the next authorizer } - return true, reason, nil } - return false, strings.Join(reasonlist, "\n"), utilerrors.NewAggregate(errlist) + return authorizer.DecisionNoOpinion, strings.Join(reasonlist, "\n"), utilerrors.NewAggregate(errlist) } // unionAuthzRulesHandler authorizer against a chain of authorizer.RuleResolver diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/BUILD b/vendor/k8s.io/apiserver/pkg/endpoints/BUILD index 65996a8900f7..f73d619255a1 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/BUILD +++ b/vendor/k8s.io/apiserver/pkg/endpoints/BUILD @@ -15,6 +15,7 @@ go_test( "proxy_test.go", "watch_test.go", ], + importpath = "k8s.io/apiserver/pkg/endpoints", library = ":go_default_library", deps = [ "//vendor/github.com/emicklei/go-restful:go_default_library", @@ -64,6 +65,7 @@ go_library( "groupversion.go", "installer.go", ], + importpath = "k8s.io/apiserver/pkg/endpoints", deps = [ "//vendor/github.com/emicklei/go-restful:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/BUILD b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/BUILD index 3623cd9e4a51..008dcff56797 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/BUILD +++ b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/BUILD @@ -12,6 +12,7 @@ go_test( "addresses_test.go", "root_test.go", ], + importpath = "k8s.io/apiserver/pkg/endpoints/discovery", library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", @@ -36,6 +37,7 @@ go_library( "util.go", "version.go", ], + importpath = "k8s.io/apiserver/pkg/endpoints/discovery", deps = [ "//vendor/github.com/emicklei/go-restful:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD b/vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD index ec1116dae3bd..664756aba156 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD @@ -17,6 +17,7 @@ go_test( "legacy_audit_test.go", "requestinfo_test.go", ], + importpath = "k8s.io/apiserver/pkg/endpoints/filters", library = ":go_default_library", deps = [ "//vendor/github.com/pborman/uuid:go_default_library", @@ -50,6 +51,7 @@ go_library( "legacy_audit.go", "requestinfo.go", ], + importpath = "k8s.io/apiserver/pkg/endpoints/filters", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pborman/uuid:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go index 26af50b1d371..01fc957c1ce5 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go @@ -148,7 +148,12 @@ func processAuditEvent(sink audit.Sink, ev *auditinternal.Event, omitStages []au return } } - ev.CreationTimestamp = metav1.NewTime(time.Now()) + + if ev.Stage == auditinternal.StageRequestReceived { + ev.StageTimestamp = metav1.NewMicroTime(ev.RequestReceivedTimestamp.Time) + } else { + ev.StageTimestamp = metav1.NewMicroTime(time.Now()) + } audit.ObserveEvent() sink.ProcessEvents(ev) } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go index d244257a04d5..f60756b6fd14 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go @@ -47,7 +47,7 @@ func WithAuthorization(handler http.Handler, requestContextMapper request.Reques return } authorized, reason, err := a.Authorize(attributes) - if authorized { + if authorized == authorizer.DecisionAllow { handler.ServeHTTP(w, req) return } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go index 8f67caf86fef..9af292e1cef2 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go @@ -110,8 +110,8 @@ func WithImpersonation(handler http.Handler, requestContextMapper request.Reques return } - allowed, reason, err := a.Authorize(actingAsAttributes) - if err != nil || !allowed { + decision, reason, err := a.Authorize(actingAsAttributes) + if err != nil || decision != authorizer.DecisionAllow { glog.V(4).Infof("Forbidden: %#v, Reason: %s, Error: %v", req.RequestURI, reason, err) responsewriters.Forbidden(ctx, actingAsAttributes, w, req, reason, s) return diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go b/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go index 851a570093ab..4b8823758176 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go @@ -66,7 +66,6 @@ type APIGroupVersion struct { Typer runtime.ObjectTyper Creater runtime.ObjectCreater Convertor runtime.ObjectConvertor - Copier runtime.ObjectCopier Defaulter runtime.ObjectDefaulter Linker runtime.SelfLinker UnsafeConvertor runtime.ObjectConvertor @@ -76,12 +75,6 @@ type APIGroupVersion struct { MinRequestTimeout time.Duration - // SubresourceGroupVersionKind contains the GroupVersionKind overrides for each subresource that is - // accessible from this API group version. The GroupVersionKind is that of the external version of - // the subresource. The key of this map should be the path of the subresource. The keys here should - // match the keys in the Storage map above for subresources. - SubresourceGroupVersionKind map[string]schema.GroupVersionKind - // EnableAPIResponseCompression indicates whether API Responses should support compression // if the client requests it via Accept-Encoding EnableAPIResponseCompression bool diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/BUILD b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/BUILD index f60cf7b64628..7e2cdcc1b197 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/BUILD +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/BUILD @@ -12,6 +12,7 @@ go_test( "namer_test.go", "rest_test.go", ], + importpath = "k8s.io/apiserver/pkg/endpoints/handlers", library = ":go_default_library", deps = [ "//vendor/github.com/evanphx/json-patch:go_default_library", @@ -20,6 +21,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", @@ -36,14 +38,19 @@ go_test( go_library( name = "go_default_library", srcs = [ + "create.go", + "delete.go", "doc.go", + "get.go", "namer.go", "patch.go", "proxy.go", "response.go", "rest.go", + "update.go", "watch.go", ], + importpath = "k8s.io/apiserver/pkg/endpoints/handlers", deps = [ "//vendor/github.com/evanphx/json-patch:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -53,7 +60,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go new file mode 100644 index 000000000000..dc3560623ea9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go @@ -0,0 +1,168 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "fmt" + "net/http" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/rest" + utiltrace "k8s.io/apiserver/pkg/util/trace" +) + +func createHandler(r rest.NamedCreater, scope RequestScope, typer runtime.ObjectTyper, admit admission.Interface, includeName bool) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + // For performance tracking purposes. + trace := utiltrace.New("Create " + req.URL.Path) + defer trace.LogIfLong(500 * time.Millisecond) + + // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) + timeout := parseTimeout(req.URL.Query().Get("timeout")) + + var ( + namespace, name string + err error + ) + if includeName { + namespace, name, err = scope.Namer.Name(req) + } else { + namespace, err = scope.Namer.Namespace(req) + } + if err != nil { + scope.err(err, w, req) + return + } + + ctx := scope.ContextFunc(req) + ctx = request.WithNamespace(ctx, namespace) + + gv := scope.Kind.GroupVersion() + s, err := negotiation.NegotiateInputSerializer(req, scope.Serializer) + if err != nil { + scope.err(err, w, req) + return + } + decoder := scope.Serializer.DecoderToVersion(s.Serializer, schema.GroupVersion{Group: gv.Group, Version: runtime.APIVersionInternal}) + + body, err := readBody(req) + if err != nil { + scope.err(err, w, req) + return + } + + defaultGVK := scope.Kind + original := r.New() + trace.Step("About to convert to expected version") + obj, gvk, err := decoder.Decode(body, &defaultGVK, original) + if err != nil { + err = transformDecodeError(typer, err, original, gvk, body) + scope.err(err, w, req) + return + } + if gvk.GroupVersion() != gv { + err = errors.NewBadRequest(fmt.Sprintf("the API version in the data (%s) does not match the expected API version (%v)", gvk.GroupVersion().String(), gv.String())) + scope.err(err, w, req) + return + } + trace.Step("Conversion done") + + ae := request.AuditEventFrom(ctx) + audit.LogRequestObject(ae, obj, scope.Resource, scope.Subresource, scope.Serializer) + + userInfo, _ := request.UserFrom(ctx) + admissionAttributes := admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, userInfo) + if mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) { + err = mutatingAdmission.Admit(admissionAttributes) + if err != nil { + scope.err(err, w, req) + return + } + } + + // TODO: replace with content type negotiation? + includeUninitialized := req.URL.Query().Get("includeUninitialized") == "1" + + trace.Step("About to store object in database") + result, err := finishRequest(timeout, func() (runtime.Object, error) { + return r.Create( + ctx, + name, + obj, + rest.AdmissionToValidateObjectFunc(admit, admissionAttributes), + includeUninitialized, + ) + }) + if err != nil { + scope.err(err, w, req) + return + } + trace.Step("Object stored in database") + + requestInfo, ok := request.RequestInfoFrom(ctx) + if !ok { + scope.err(fmt.Errorf("missing requestInfo"), w, req) + return + } + if err := setSelfLink(result, requestInfo, scope.Namer); err != nil { + scope.err(err, w, req) + return + } + trace.Step("Self-link added") + + // If the object is partially initialized, always indicate it via StatusAccepted + code := http.StatusCreated + if accessor, err := meta.Accessor(result); err == nil { + if accessor.GetInitializers() != nil { + code = http.StatusAccepted + } + } + status, ok := result.(*metav1.Status) + if ok && err == nil && status.Code == 0 { + status.Code = int32(code) + } + + transformResponseObject(ctx, scope, req, w, code, result) + } +} + +// CreateNamedResource returns a function that will handle a resource creation with name. +func CreateNamedResource(r rest.NamedCreater, scope RequestScope, typer runtime.ObjectTyper, admission admission.Interface) http.HandlerFunc { + return createHandler(r, scope, typer, admission, true) +} + +// CreateResource returns a function that will handle a resource creation. +func CreateResource(r rest.Creater, scope RequestScope, typer runtime.ObjectTyper, admission admission.Interface) http.HandlerFunc { + return createHandler(&namedCreaterAdapter{r}, scope, typer, admission, false) +} + +type namedCreaterAdapter struct { + rest.Creater +} + +func (c *namedCreaterAdapter) Create(ctx request.Context, name string, obj runtime.Object, createValidatingAdmission rest.ValidateObjectFunc, includeUninitialized bool) (runtime.Object, error) { + return c.Creater.Create(ctx, obj, createValidatingAdmission, includeUninitialized) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go new file mode 100644 index 000000000000..0bc5a659b55e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go @@ -0,0 +1,282 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "fmt" + "net/http" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/rest" + utiltrace "k8s.io/apiserver/pkg/util/trace" +) + +// DeleteResource returns a function that will handle a resource deletion +// TODO admission here becomes solely validating admission +func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope RequestScope, admit admission.Interface) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + // For performance tracking purposes. + trace := utiltrace.New("Delete " + req.URL.Path) + defer trace.LogIfLong(500 * time.Millisecond) + + // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) + timeout := parseTimeout(req.URL.Query().Get("timeout")) + + namespace, name, err := scope.Namer.Name(req) + if err != nil { + scope.err(err, w, req) + return + } + ctx := scope.ContextFunc(req) + ctx = request.WithNamespace(ctx, namespace) + + options := &metav1.DeleteOptions{} + if allowsOptions { + body, err := readBody(req) + if err != nil { + scope.err(err, w, req) + return + } + if len(body) > 0 { + s, err := negotiation.NegotiateInputSerializer(req, metainternalversion.Codecs) + if err != nil { + scope.err(err, w, req) + return + } + // For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions + // It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions + defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions") + obj, _, err := metainternalversion.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) + if err != nil { + scope.err(err, w, req) + return + } + if obj != options { + scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req) + return + } + trace.Step("Decoded delete options") + + ae := request.AuditEventFrom(ctx) + audit.LogRequestObject(ae, obj, scope.Resource, scope.Subresource, scope.Serializer) + trace.Step("Recorded the audit event") + } else { + if values := req.URL.Query(); len(values) > 0 { + if err := metainternalversion.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil { + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + } + } + } + + trace.Step("About to check admission control") + if admit != nil && admit.Handles(admission.Delete) { + userInfo, _ := request.UserFrom(ctx) + attrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, userInfo) + if mutatingAdmission, ok := admit.(admission.MutationInterface); ok { + if err := mutatingAdmission.Admit(attrs); err != nil { + scope.err(err, w, req) + return + } + } + if validatingAdmission, ok := admit.(admission.ValidationInterface); ok { + if err := validatingAdmission.Validate(attrs); err != nil { + scope.err(err, w, req) + return + } + } + } + + trace.Step("About to delete object from database") + wasDeleted := true + result, err := finishRequest(timeout, func() (runtime.Object, error) { + obj, deleted, err := r.Delete(ctx, name, options) + wasDeleted = deleted + return obj, err + }) + if err != nil { + scope.err(err, w, req) + return + } + trace.Step("Object deleted from database") + + status := http.StatusOK + // Return http.StatusAccepted if the resource was not deleted immediately and + // user requested cascading deletion by setting OrphanDependents=false. + // Note: We want to do this always if resource was not deleted immediately, but + // that will break existing clients. + // Other cases where resource is not instantly deleted are: namespace deletion + // and pod graceful deletion. + if !wasDeleted && options.OrphanDependents != nil && *options.OrphanDependents == false { + status = http.StatusAccepted + } + // if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid + // object with the response. + if result == nil { + result = &metav1.Status{ + Status: metav1.StatusSuccess, + Code: int32(status), + Details: &metav1.StatusDetails{ + Name: name, + Kind: scope.Kind.Kind, + }, + } + } else { + // when a non-status response is returned, set the self link + requestInfo, ok := request.RequestInfoFrom(ctx) + if !ok { + scope.err(fmt.Errorf("missing requestInfo"), w, req) + return + } + if _, ok := result.(*metav1.Status); !ok { + if err := setSelfLink(result, requestInfo, scope.Namer); err != nil { + scope.err(err, w, req) + return + } + } + } + + transformResponseObject(ctx, scope, req, w, status, result) + } +} + +// DeleteCollection returns a function that will handle a collection deletion +func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope RequestScope, admit admission.Interface) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) + timeout := parseTimeout(req.URL.Query().Get("timeout")) + + namespace, err := scope.Namer.Namespace(req) + if err != nil { + scope.err(err, w, req) + return + } + + ctx := scope.ContextFunc(req) + ctx = request.WithNamespace(ctx, namespace) + + if mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Delete) { + userInfo, _ := request.UserFrom(ctx) + + err = mutatingAdmission.Admit(admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, "", scope.Resource, scope.Subresource, admission.Delete, userInfo)) + if err != nil { + scope.err(err, w, req) + return + } + } + // TODO: avoid calling Handles twice + if validatingAdmission, ok := admit.(admission.ValidationInterface); ok && validatingAdmission.Handles(admission.Delete) { + userInfo, _ := request.UserFrom(ctx) + + err = validatingAdmission.Validate(admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, "", scope.Resource, scope.Subresource, admission.Delete, userInfo)) + if err != nil { + scope.err(err, w, req) + return + } + } + + listOptions := metainternalversion.ListOptions{} + if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &listOptions); err != nil { + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + + // transform fields + // TODO: DecodeParametersInto should do this. + if listOptions.FieldSelector != nil { + fn := func(label, value string) (newLabel, newValue string, err error) { + return scope.Convertor.ConvertFieldLabel(scope.Kind.GroupVersion().String(), scope.Kind.Kind, label, value) + } + if listOptions.FieldSelector, err = listOptions.FieldSelector.Transform(fn); err != nil { + // TODO: allow bad request to set field causes based on query parameters + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + } + + options := &metav1.DeleteOptions{} + if checkBody { + body, err := readBody(req) + if err != nil { + scope.err(err, w, req) + return + } + if len(body) > 0 { + s, err := negotiation.NegotiateInputSerializer(req, scope.Serializer) + if err != nil { + scope.err(err, w, req) + return + } + defaultGVK := scope.Kind.GroupVersion().WithKind("DeleteOptions") + obj, _, err := scope.Serializer.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) + if err != nil { + scope.err(err, w, req) + return + } + if obj != options { + scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req) + return + } + + ae := request.AuditEventFrom(ctx) + audit.LogRequestObject(ae, obj, scope.Resource, scope.Subresource, scope.Serializer) + } + } + + result, err := finishRequest(timeout, func() (runtime.Object, error) { + return r.DeleteCollection(ctx, options, &listOptions) + }) + if err != nil { + scope.err(err, w, req) + return + } + + // if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid + // object with the response. + if result == nil { + result = &metav1.Status{ + Status: metav1.StatusSuccess, + Code: http.StatusOK, + Details: &metav1.StatusDetails{ + Kind: scope.Kind.Kind, + }, + } + } else { + // when a non-status response is returned, set the self link + if _, ok := result.(*metav1.Status); !ok { + if _, err := setListSelfLink(result, ctx, req, scope.Namer); err != nil { + scope.err(err, w, req) + return + } + } + } + + transformResponseObject(ctx, scope, req, w, http.StatusOK, result) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go new file mode 100644 index 000000000000..7461ece64c2d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go @@ -0,0 +1,278 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "fmt" + "math/rand" + "net/http" + "net/url" + "strings" + "time" + + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/endpoints/metrics" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/rest" + utiltrace "k8s.io/apiserver/pkg/util/trace" +) + +// getterFunc performs a get request with the given context and object name. The request +// may be used to deserialize an options object to pass to the getter. +type getterFunc func(ctx request.Context, name string, req *http.Request, trace *utiltrace.Trace) (runtime.Object, error) + +// getResourceHandler is an HTTP handler function for get requests. It delegates to the +// passed-in getterFunc to perform the actual get. +func getResourceHandler(scope RequestScope, getter getterFunc) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + trace := utiltrace.New("Get " + req.URL.Path) + defer trace.LogIfLong(500 * time.Millisecond) + + namespace, name, err := scope.Namer.Name(req) + if err != nil { + scope.err(err, w, req) + return + } + ctx := scope.ContextFunc(req) + ctx = request.WithNamespace(ctx, namespace) + + result, err := getter(ctx, name, req, trace) + if err != nil { + scope.err(err, w, req) + return + } + requestInfo, ok := request.RequestInfoFrom(ctx) + if !ok { + scope.err(fmt.Errorf("missing requestInfo"), w, req) + return + } + if err := setSelfLink(result, requestInfo, scope.Namer); err != nil { + scope.err(err, w, req) + return + } + + trace.Step("About to write a response") + transformResponseObject(ctx, scope, req, w, http.StatusOK, result) + } +} + +// GetResource returns a function that handles retrieving a single resource from a rest.Storage object. +func GetResource(r rest.Getter, e rest.Exporter, scope RequestScope) http.HandlerFunc { + return getResourceHandler(scope, + func(ctx request.Context, name string, req *http.Request, trace *utiltrace.Trace) (runtime.Object, error) { + // check for export + options := metav1.GetOptions{} + if values := req.URL.Query(); len(values) > 0 { + exports := metav1.ExportOptions{} + if err := metainternalversion.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, &exports); err != nil { + err = errors.NewBadRequest(err.Error()) + return nil, err + } + if exports.Export { + if e == nil { + return nil, errors.NewBadRequest(fmt.Sprintf("export of %q is not supported", scope.Resource.Resource)) + } + return e.Export(ctx, name, exports) + } + if err := metainternalversion.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, &options); err != nil { + err = errors.NewBadRequest(err.Error()) + return nil, err + } + } + if trace != nil { + trace.Step("About to Get from storage") + } + return r.Get(ctx, name, &options) + }) +} + +// GetResourceWithOptions returns a function that handles retrieving a single resource from a rest.Storage object. +func GetResourceWithOptions(r rest.GetterWithOptions, scope RequestScope, isSubresource bool) http.HandlerFunc { + return getResourceHandler(scope, + func(ctx request.Context, name string, req *http.Request, trace *utiltrace.Trace) (runtime.Object, error) { + opts, subpath, subpathKey := r.NewGetOptions() + trace.Step("About to process Get options") + if err := getRequestOptions(req, scope, opts, subpath, subpathKey, isSubresource); err != nil { + err = errors.NewBadRequest(err.Error()) + return nil, err + } + if trace != nil { + trace.Step("About to Get from storage") + } + return r.Get(ctx, name, opts) + }) +} + +// getRequestOptions parses out options and can include path information. The path information shouldn't include the subresource. +func getRequestOptions(req *http.Request, scope RequestScope, into runtime.Object, subpath bool, subpathKey string, isSubresource bool) error { + if into == nil { + return nil + } + + query := req.URL.Query() + if subpath { + newQuery := make(url.Values) + for k, v := range query { + newQuery[k] = v + } + + ctx := scope.ContextFunc(req) + requestInfo, _ := request.RequestInfoFrom(ctx) + startingIndex := 2 + if isSubresource { + startingIndex = 3 + } + + p := strings.Join(requestInfo.Parts[startingIndex:], "/") + + // ensure non-empty subpaths correctly reflect a leading slash + if len(p) > 0 && !strings.HasPrefix(p, "/") { + p = "/" + p + } + + // ensure subpaths correctly reflect the presence of a trailing slash on the original request + if strings.HasSuffix(requestInfo.Path, "/") && !strings.HasSuffix(p, "/") { + p += "/" + } + + newQuery[subpathKey] = []string{p} + query = newQuery + } + return scope.ParameterCodec.DecodeParameters(query, scope.Kind.GroupVersion(), into) +} + +func ListResource(r rest.Lister, rw rest.Watcher, scope RequestScope, forceWatch bool, minRequestTimeout time.Duration) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + // For performance tracking purposes. + trace := utiltrace.New("List " + req.URL.Path) + + namespace, err := scope.Namer.Namespace(req) + if err != nil { + scope.err(err, w, req) + return + } + + // Watches for single objects are routed to this function. + // Treat a name parameter the same as a field selector entry. + hasName := true + _, name, err := scope.Namer.Name(req) + if err != nil { + hasName = false + } + + ctx := scope.ContextFunc(req) + ctx = request.WithNamespace(ctx, namespace) + + opts := metainternalversion.ListOptions{} + if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &opts); err != nil { + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + + // transform fields + // TODO: DecodeParametersInto should do this. + if opts.FieldSelector != nil { + fn := func(label, value string) (newLabel, newValue string, err error) { + return scope.Convertor.ConvertFieldLabel(scope.Kind.GroupVersion().String(), scope.Kind.Kind, label, value) + } + if opts.FieldSelector, err = opts.FieldSelector.Transform(fn); err != nil { + // TODO: allow bad request to set field causes based on query parameters + err = errors.NewBadRequest(err.Error()) + scope.err(err, w, req) + return + } + } + + if hasName { + // metadata.name is the canonical internal name. + // SelectionPredicate will notice that this is + // a request for a single object and optimize the + // storage query accordingly. + nameSelector := fields.OneTermEqualSelector("metadata.name", name) + if opts.FieldSelector != nil && !opts.FieldSelector.Empty() { + // It doesn't make sense to ask for both a name + // and a field selector, since just the name is + // sufficient to narrow down the request to a + // single object. + scope.err(errors.NewBadRequest("both a name and a field selector provided; please provide one or the other."), w, req) + return + } + opts.FieldSelector = nameSelector + } + + if opts.Watch || forceWatch { + if rw == nil { + scope.err(errors.NewMethodNotSupported(scope.Resource.GroupResource(), "watch"), w, req) + return + } + // TODO: Currently we explicitly ignore ?timeout= and use only ?timeoutSeconds=. + timeout := time.Duration(0) + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + if timeout == 0 && minRequestTimeout > 0 { + timeout = time.Duration(float64(minRequestTimeout) * (rand.Float64() + 1.0)) + } + glog.V(2).Infof("Starting watch for %s, rv=%s labels=%s fields=%s timeout=%s", req.URL.Path, opts.ResourceVersion, opts.LabelSelector, opts.FieldSelector, timeout) + + watcher, err := rw.Watch(ctx, &opts) + if err != nil { + scope.err(err, w, req) + return + } + requestInfo, _ := request.RequestInfoFrom(ctx) + metrics.RecordLongRunning(req, requestInfo, func() { + serveWatch(watcher, scope, req, w, timeout) + }) + return + } + + // Log only long List requests (ignore Watch). + defer trace.LogIfLong(500 * time.Millisecond) + trace.Step("About to List from storage") + result, err := r.List(ctx, &opts) + if err != nil { + scope.err(err, w, req) + return + } + trace.Step("Listing from storage done") + numberOfItems, err := setListSelfLink(result, ctx, req, scope.Namer) + if err != nil { + scope.err(err, w, req) + return + } + trace.Step("Self-linking done") + // Ensure empty lists return a non-nil items slice + if numberOfItems == 0 && meta.IsListType(result) { + if err := meta.SetList(result, []runtime.Object{}); err != nil { + scope.err(err, w, req) + return + } + } + + transformResponseObject(ctx, scope, req, w, http.StatusOK, result) + trace.Step(fmt.Sprintf("Writing http response done (%d items)", numberOfItems)) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/BUILD b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/BUILD index aed172285db5..c0ceb323d8d8 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/BUILD +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["negotiate_test.go"], + importpath = "k8s.io/apiserver/pkg/endpoints/handlers/negotiation", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -23,6 +24,7 @@ go_library( "errors.go", "negotiate.go", ], + importpath = "k8s.io/apiserver/pkg/endpoints/handlers/negotiation", deps = [ "//vendor/bitbucket.org/ww/goautoneg:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go index 896961b6ba13..7f4225a5b93f 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go @@ -40,6 +40,8 @@ func MediaTypesForSerializer(ns runtime.NegotiatedSerializer) (mediaTypes, strea return mediaTypes, streamMediaTypes } +// NegotiateOutputMediaType negotiates the output structured media type and a serializer, or +// returns an error. func NegotiateOutputMediaType(req *http.Request, ns runtime.NegotiatedSerializer, restrictions EndpointRestrictions) (MediaTypeOptions, runtime.SerializerInfo, error) { mediaType, ok := NegotiateMediaTypeOptions(req.Header.Get("Accept"), AcceptedMediaTypesForEndpoint(ns), restrictions) if !ok { @@ -54,11 +56,13 @@ func NegotiateOutputMediaType(req *http.Request, ns runtime.NegotiatedSerializer return mediaType, info, nil } +// NegotiateOutputSerializer returns a serializer for the output. func NegotiateOutputSerializer(req *http.Request, ns runtime.NegotiatedSerializer) (runtime.SerializerInfo, error) { _, info, err := NegotiateOutputMediaType(req, ns, DefaultEndpointRestrictions) return info, err } +// NegotiateOutputStreamSerializer returns a stream serializer for the given request. func NegotiateOutputStreamSerializer(req *http.Request, ns runtime.NegotiatedSerializer) (runtime.SerializerInfo, error) { mediaType, ok := NegotiateMediaTypeOptions(req.Header.Get("Accept"), AcceptedMediaTypesForEndpoint(ns), DefaultEndpointRestrictions) if !ok || mediaType.Accepted.Serializer.StreamSerializer == nil { @@ -68,9 +72,15 @@ func NegotiateOutputStreamSerializer(req *http.Request, ns runtime.NegotiatedSer return mediaType.Accepted.Serializer, nil } +// NegotiateInputSerializer returns the input serializer for the provided request. func NegotiateInputSerializer(req *http.Request, ns runtime.NegotiatedSerializer) (runtime.SerializerInfo, error) { - mediaTypes := ns.SupportedMediaTypes() mediaType := req.Header.Get("Content-Type") + return NegotiateInputSerializerForMediaType(mediaType, ns) +} + +// NegotiateInputSerializerForMediaType returns the appropriate serializer for the given media type or an error. +func NegotiateInputSerializerForMediaType(mediaType string, ns runtime.NegotiatedSerializer) (runtime.SerializerInfo, error) { + mediaTypes := ns.SupportedMediaTypes() if len(mediaType) == 0 { mediaType = mediaTypes[0].MediaType } @@ -263,6 +273,13 @@ func acceptMediaTypeOptions(params map[string]string, accepts *AcceptedMediaType return options, true } +type candidateMediaType struct { + accepted *AcceptedMediaType + clauses goautoneg.Accept +} + +type candidateMediaTypeSlice []candidateMediaType + // NegotiateMediaTypeOptions returns the most appropriate content type given the accept header and // a list of alternatives along with the accepted media type parameters. func NegotiateMediaTypeOptions(header string, accepted []AcceptedMediaType, endpoint EndpointRestrictions) (MediaTypeOptions, bool) { @@ -272,6 +289,7 @@ func NegotiateMediaTypeOptions(header string, accepted []AcceptedMediaType, endp }, true } + var candidates candidateMediaTypeSlice clauses := goautoneg.ParseAccept(header) for _, clause := range clauses { for i := range accepted { @@ -280,12 +298,17 @@ func NegotiateMediaTypeOptions(header string, accepted []AcceptedMediaType, endp case clause.Type == accepts.Type && clause.SubType == accepts.SubType, clause.Type == accepts.Type && clause.SubType == "*", clause.Type == "*" && clause.SubType == "*": - // TODO: should we prefer the first type with no unrecognized options? Do we need to ignore unrecognized - // parameters. - return acceptMediaTypeOptions(clause.Params, accepts, endpoint) + candidates = append(candidates, candidateMediaType{accepted: accepts, clauses: clause}) } } } + + for _, v := range candidates { + if retVal, ret := acceptMediaTypeOptions(v.clauses.Params, v.accepted, endpoint); ret { + return retVal, true + } + } + return MediaTypeOptions{}, false } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go index ca585e6ddf4a..1ac736d09dda 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go @@ -18,16 +18,353 @@ package handlers import ( "fmt" + "net/http" + "strings" + "time" - "k8s.io/apimachinery/pkg/conversion/unstructured" + "github.com/evanphx/json-patch" + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/mergepatch" "k8s.io/apimachinery/pkg/util/strategicpatch" - - "github.com/evanphx/json-patch" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/rest" ) +// PatchResource returns a function that will handle a resource patch +// TODO: Eventually PatchResource should just use GuaranteedUpdate and this routine should be a bit cleaner +func PatchResource(r rest.Patcher, scope RequestScope, admit admission.Interface, converter runtime.ObjectConvertor) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + // TODO: we either want to remove timeout or document it (if we + // document, move timeout out of this function and declare it in + // api_installer) + timeout := parseTimeout(req.URL.Query().Get("timeout")) + + namespace, name, err := scope.Namer.Name(req) + if err != nil { + scope.err(err, w, req) + return + } + + ctx := scope.ContextFunc(req) + ctx = request.WithNamespace(ctx, namespace) + + versionedObj, err := converter.ConvertToVersion(r.New(), scope.Kind.GroupVersion()) + if err != nil { + scope.err(err, w, req) + return + } + + // TODO: handle this in negotiation + contentType := req.Header.Get("Content-Type") + // Remove "; charset=" if included in header. + if idx := strings.Index(contentType, ";"); idx > 0 { + contentType = contentType[:idx] + } + patchType := types.PatchType(contentType) + + patchJS, err := readBody(req) + if err != nil { + scope.err(err, w, req) + return + } + + ae := request.AuditEventFrom(ctx) + audit.LogRequestPatch(ae, patchJS) + + s, ok := runtime.SerializerInfoForMediaType(scope.Serializer.SupportedMediaTypes(), runtime.ContentTypeJSON) + if !ok { + scope.err(fmt.Errorf("no serializer defined for JSON"), w, req) + return + } + gv := scope.Kind.GroupVersion() + codec := runtime.NewCodec( + scope.Serializer.EncoderForVersion(s.Serializer, gv), + scope.Serializer.DecoderToVersion(s.Serializer, schema.GroupVersion{Group: gv.Group, Version: runtime.APIVersionInternal}), + ) + + userInfo, _ := request.UserFrom(ctx) + staticAdmissionAttributes := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, userInfo) + updateMutation := func(updatedObject runtime.Object, currentObject runtime.Object) error { + if mutatingAdmission, ok := admit.(admission.MutationInterface); ok && admit.Handles(admission.Update) { + return mutatingAdmission.Admit(admission.NewAttributesRecord(updatedObject, currentObject, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, userInfo)) + } + return nil + } + + result, err := patchResource( + ctx, + updateMutation, + rest.AdmissionToValidateObjectFunc(admit, staticAdmissionAttributes), + rest.AdmissionToValidateObjectUpdateFunc(admit, staticAdmissionAttributes), + timeout, versionedObj, + r, + name, + patchType, + patchJS, + scope.Namer, scope.Creater, scope.Defaulter, scope.UnsafeConvertor, scope.Kind, scope.Resource, codec) + if err != nil { + scope.err(err, w, req) + return + } + + requestInfo, ok := request.RequestInfoFrom(ctx) + if !ok { + scope.err(fmt.Errorf("missing requestInfo"), w, req) + return + } + if err := setSelfLink(result, requestInfo, scope.Namer); err != nil { + scope.err(err, w, req) + return + } + + transformResponseObject(ctx, scope, req, w, http.StatusOK, result) + } +} + +type mutateObjectUpdateFunc func(obj, old runtime.Object) error + +// patchResource divides PatchResource for easier unit testing +func patchResource( + ctx request.Context, + updateMutation mutateObjectUpdateFunc, + createValidation rest.ValidateObjectFunc, + updateValidation rest.ValidateObjectUpdateFunc, + timeout time.Duration, + versionedObj runtime.Object, + patcher rest.Patcher, + name string, + patchType types.PatchType, + patchJS []byte, + namer ScopeNamer, + creater runtime.ObjectCreater, + defaulter runtime.ObjectDefaulter, + unsafeConvertor runtime.ObjectConvertor, + kind schema.GroupVersionKind, + resource schema.GroupVersionResource, + codec runtime.Codec, +) (runtime.Object, error) { + + namespace := request.NamespaceValue(ctx) + + var ( + originalObjJS []byte + originalPatchedObjJS []byte + originalObjMap map[string]interface{} + getOriginalPatchMap func() (map[string]interface{}, error) + lastConflictErr error + originalResourceVersion string + ) + + // applyPatch is called every time GuaranteedUpdate asks for the updated object, + // and is given the currently persisted object as input. + applyPatch := func(_ request.Context, _, currentObject runtime.Object) (runtime.Object, error) { + // Make sure we actually have a persisted currentObject + if hasUID, err := hasUID(currentObject); err != nil { + return nil, err + } else if !hasUID { + return nil, errors.NewNotFound(resource.GroupResource(), name) + } + + currentResourceVersion := "" + if currentMetadata, err := meta.Accessor(currentObject); err == nil { + currentResourceVersion = currentMetadata.GetResourceVersion() + } + + switch { + case originalObjJS == nil && originalObjMap == nil: + // first time through, + // 1. apply the patch + // 2. save the original and patched to detect whether there were conflicting changes on retries + + originalResourceVersion = currentResourceVersion + objToUpdate := patcher.New() + + // For performance reasons, in case of strategicpatch, we avoid json + // marshaling and unmarshaling and operate just on map[string]interface{}. + // In case of other patch types, we still have to operate on JSON + // representations. + switch patchType { + case types.JSONPatchType, types.MergePatchType: + originalJS, patchedJS, err := patchObjectJSON(patchType, codec, currentObject, patchJS, objToUpdate, versionedObj) + if err != nil { + return nil, interpretPatchError(err) + } + originalObjJS, originalPatchedObjJS = originalJS, patchedJS + + // Make a getter that can return a fresh strategic patch map if needed for conflict retries + // We have to rebuild it each time we need it, because the map gets mutated when being applied + var originalPatchBytes []byte + getOriginalPatchMap = func() (map[string]interface{}, error) { + if originalPatchBytes == nil { + // Compute once + originalPatchBytes, err = strategicpatch.CreateTwoWayMergePatch(originalObjJS, originalPatchedObjJS, versionedObj) + if err != nil { + return nil, interpretPatchError(err) + } + } + // Return a fresh map every time + originalPatchMap := make(map[string]interface{}) + if err := json.Unmarshal(originalPatchBytes, &originalPatchMap); err != nil { + return nil, errors.NewBadRequest(err.Error()) + } + return originalPatchMap, nil + } + + case types.StrategicMergePatchType: + // Since the patch is applied on versioned objects, we need to convert the + // current object to versioned representation first. + currentVersionedObject, err := unsafeConvertor.ConvertToVersion(currentObject, kind.GroupVersion()) + if err != nil { + return nil, err + } + versionedObjToUpdate, err := creater.New(kind) + if err != nil { + return nil, err + } + // Capture the original object map and patch for possible retries. + originalMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(currentVersionedObject) + if err != nil { + return nil, err + } + if err := strategicPatchObject(codec, defaulter, currentVersionedObject, patchJS, versionedObjToUpdate, versionedObj); err != nil { + return nil, err + } + // Convert the object back to unversioned. + gvk := kind.GroupKind().WithVersion(runtime.APIVersionInternal) + unversionedObjToUpdate, err := unsafeConvertor.ConvertToVersion(versionedObjToUpdate, gvk.GroupVersion()) + if err != nil { + return nil, err + } + objToUpdate = unversionedObjToUpdate + // Store unstructured representation for possible retries. + originalObjMap = originalMap + // Make a getter that can return a fresh strategic patch map if needed for conflict retries + // We have to rebuild it each time we need it, because the map gets mutated when being applied + getOriginalPatchMap = func() (map[string]interface{}, error) { + patchMap := make(map[string]interface{}) + if err := json.Unmarshal(patchJS, &patchMap); err != nil { + return nil, errors.NewBadRequest(err.Error()) + } + return patchMap, nil + } + } + if err := checkName(objToUpdate, name, namespace, namer); err != nil { + return nil, err + } + return objToUpdate, nil + + default: + // on a conflict, + // 1. build a strategic merge patch from originalJS and the patchedJS. Different patch types can + // be specified, but a strategic merge patch should be expressive enough handle them. Build the + // patch with this type to handle those cases. + // 2. build a strategic merge patch from originalJS and the currentJS + // 3. ensure no conflicts between the two patches + // 4. apply the #1 patch to the currentJS object + + // Since the patch is applied on versioned objects, we need to convert the + // current object to versioned representation first. + currentVersionedObject, err := unsafeConvertor.ConvertToVersion(currentObject, kind.GroupVersion()) + if err != nil { + return nil, err + } + currentObjMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(currentVersionedObject) + if err != nil { + return nil, err + } + + var currentPatchMap map[string]interface{} + if originalObjMap != nil { + var err error + currentPatchMap, err = strategicpatch.CreateTwoWayMergeMapPatch(originalObjMap, currentObjMap, versionedObj) + if err != nil { + return nil, interpretPatchError(err) + } + } else { + // Compute current patch. + currentObjJS, err := runtime.Encode(codec, currentObject) + if err != nil { + return nil, err + } + currentPatch, err := strategicpatch.CreateTwoWayMergePatch(originalObjJS, currentObjJS, versionedObj) + if err != nil { + return nil, interpretPatchError(err) + } + currentPatchMap = make(map[string]interface{}) + if err := json.Unmarshal(currentPatch, ¤tPatchMap); err != nil { + return nil, errors.NewBadRequest(err.Error()) + } + } + + // Get a fresh copy of the original strategic patch each time through, since applying it mutates the map + originalPatchMap, err := getOriginalPatchMap() + if err != nil { + return nil, err + } + + hasConflicts, err := mergepatch.HasConflicts(originalPatchMap, currentPatchMap) + if err != nil { + return nil, err + } + + if hasConflicts { + diff1, _ := json.Marshal(currentPatchMap) + diff2, _ := json.Marshal(originalPatchMap) + patchDiffErr := fmt.Errorf("there is a meaningful conflict (firstResourceVersion: %q, currentResourceVersion: %q):\n diff1=%v\n, diff2=%v\n", originalResourceVersion, currentResourceVersion, string(diff1), string(diff2)) + glog.V(4).Infof("patchResource failed for resource %s, because there is a meaningful conflict(firstResourceVersion: %q, currentResourceVersion: %q):\n diff1=%v\n, diff2=%v\n", name, originalResourceVersion, currentResourceVersion, string(diff1), string(diff2)) + + // Return the last conflict error we got if we have one + if lastConflictErr != nil { + return nil, lastConflictErr + } + // Otherwise manufacture one of our own + return nil, errors.NewConflict(resource.GroupResource(), name, patchDiffErr) + } + + versionedObjToUpdate, err := creater.New(kind) + if err != nil { + return nil, err + } + if err := applyPatchToObject(codec, defaulter, currentObjMap, originalPatchMap, versionedObjToUpdate, versionedObj); err != nil { + return nil, err + } + // Convert the object back to unversioned. + gvk := kind.GroupKind().WithVersion(runtime.APIVersionInternal) + objToUpdate, err := unsafeConvertor.ConvertToVersion(versionedObjToUpdate, gvk.GroupVersion()) + if err != nil { + return nil, err + } + + return objToUpdate, nil + } + } + + // applyAdmission is called every time GuaranteedUpdate asks for the updated object, + // and is given the currently persisted object and the patched object as input. + applyAdmission := func(ctx request.Context, patchedObject runtime.Object, currentObject runtime.Object) (runtime.Object, error) { + return patchedObject, updateMutation(patchedObject, currentObject) + } + updatedObjectInfo := rest.DefaultUpdatedObjectInfo(nil, applyPatch, applyAdmission) + + return finishRequest(timeout, func() (runtime.Object, error) { + updateObject, _, updateErr := patcher.Update(ctx, name, updatedObjectInfo, createValidation, updateValidation) + for i := 0; i < MaxRetryWhenPatchConflicts && (errors.IsConflict(updateErr)); i++ { + lastConflictErr = updateErr + updateObject, _, updateErr = patcher.Update(ctx, name, updatedObjectInfo, createValidation, updateValidation) + } + return updateObject, updateErr + }) +} + // patchObjectJSON patches the with and stores // the result in . // Currently it also returns the original and patched objects serialized to @@ -87,14 +424,14 @@ func strategicPatchObject( objToUpdate runtime.Object, versionedObj runtime.Object, ) error { - originalObjMap, err := unstructured.DefaultConverter.ToUnstructured(originalObject) + originalObjMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(originalObject) if err != nil { return err } patchMap := make(map[string]interface{}) if err := json.Unmarshal(patchJS, &patchMap); err != nil { - return err + return errors.NewBadRequest(err.Error()) } if err := applyPatchToObject(codec, defaulter, originalObjMap, patchMap, objToUpdate, versionedObj); err != nil { @@ -116,11 +453,11 @@ func applyPatchToObject( ) error { patchedObjMap, err := strategicpatch.StrategicMergeMapPatch(originalMap, patchMap, versionedObj) if err != nil { - return err + return interpretPatchError(err) } // Rather than serialize the patched map to JSON, then decode it to an object, we go directly from a map to an object - if err := unstructured.DefaultConverter.FromUnstructured(patchedObjMap, objToUpdate); err != nil { + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(patchedObjMap, objToUpdate); err != nil { return err } // Decoding from JSON to a versioned object would apply defaults, so we do the same here @@ -128,3 +465,15 @@ func applyPatchToObject( return nil } + +// interpretPatchError interprets the error type and returns an error with appropriate HTTP code. +func interpretPatchError(err error) error { + switch err { + case mergepatch.ErrBadJSONDoc, mergepatch.ErrBadPatchFormatForPrimitiveList, mergepatch.ErrBadPatchFormatForRetainKeys, mergepatch.ErrBadPatchFormatForSetElementOrderList, mergepatch.ErrUnsupportedStrategicMergePatchFormat: + return errors.NewBadRequest(err.Error()) + case mergepatch.ErrNoListOfLists, mergepatch.ErrPatchContentNotMatchRetainKeys: + return errors.NewGenericServerResponse(http.StatusUnprocessableEntity, "", schema.GroupResource{}, "", err.Error(), 0, false) + default: + return err + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/proxy.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/proxy.go index e8ee6d5f083e..3e8c917a7589 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/proxy.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/proxy.go @@ -54,22 +54,18 @@ type ProxyHandler struct { func (r *ProxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { reqStart := time.Now() - proxyHandlerTraceID := rand.Int63() - var verb, apiResource, subresource, scope string var httpCode int - + var requestInfo *request.RequestInfo defer func() { responseLength := 0 if rw, ok := w.(*metrics.ResponseWriterDelegator); ok { responseLength = rw.ContentLength() + if httpCode == 0 { + httpCode = rw.Status() + } } - metrics.Monitor( - verb, apiResource, subresource, scope, - net.GetHTTPClient(req), - w.Header().Get("Content-Type"), - httpCode, responseLength, reqStart, - ) + metrics.Record(req, requestInfo, w.Header().Get("Content-Type"), httpCode, responseLength, time.Now().Sub(reqStart)) }() ctx, ok := r.Mapper.Get(req) @@ -79,32 +75,32 @@ func (r *ProxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { return } - requestInfo, ok := request.RequestInfoFrom(ctx) + requestInfo, ok = request.RequestInfoFrom(ctx) if !ok { responsewriters.InternalError(w, req, errors.New("Error getting RequestInfo from context")) httpCode = http.StatusInternalServerError return } + + metrics.RecordLongRunning(req, requestInfo, func() { + httpCode = r.serveHTTP(w, req, ctx, requestInfo) + }) +} + +// serveHTTP performs proxy handling and returns the status code of the operation. +func (r *ProxyHandler) serveHTTP(w http.ResponseWriter, req *http.Request, ctx request.Context, requestInfo *request.RequestInfo) int { + proxyHandlerTraceID := rand.Int63() + if !requestInfo.IsResourceRequest { responsewriters.NotFound(w, req) - httpCode = http.StatusNotFound - return - } - verb = requestInfo.Verb - namespace, resource, subresource, parts := requestInfo.Namespace, requestInfo.Resource, requestInfo.Subresource, requestInfo.Parts - scope = "cluster" - if namespace != "" { - scope = "namespace" - } - if requestInfo.Name != "" { - scope = "resource" + return http.StatusNotFound } + namespace, resource, parts := requestInfo.Namespace, requestInfo.Resource, requestInfo.Parts ctx = request.WithNamespace(ctx, namespace) if len(parts) < 2 { responsewriters.NotFound(w, req) - httpCode = http.StatusNotFound - return + return http.StatusNotFound } id := parts[1] remainder := "" @@ -122,31 +118,26 @@ func (r *ProxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { if !ok { httplog.LogOf(req, w).Addf("'%v' has no storage object", resource) responsewriters.NotFound(w, req) - httpCode = http.StatusNotFound - return + return http.StatusNotFound } - apiResource = resource gv := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} redirector, ok := storage.(rest.Redirector) if !ok { httplog.LogOf(req, w).Addf("'%v' is not a redirector", resource) - httpCode = responsewriters.ErrorNegotiated(ctx, apierrors.NewMethodNotSupported(schema.GroupResource{Resource: resource}, "proxy"), r.Serializer, gv, w, req) - return + return responsewriters.ErrorNegotiated(ctx, apierrors.NewMethodNotSupported(schema.GroupResource{Resource: resource}, "proxy"), r.Serializer, gv, w, req) } location, roundTripper, err := redirector.ResourceLocation(ctx, id) if err != nil { httplog.LogOf(req, w).Addf("Error getting ResourceLocation: %v", err) - httpCode = responsewriters.ErrorNegotiated(ctx, err, r.Serializer, gv, w, req) - return + return responsewriters.ErrorNegotiated(ctx, err, r.Serializer, gv, w, req) } if location == nil { httplog.LogOf(req, w).Addf("ResourceLocation for %v returned nil", id) responsewriters.NotFound(w, req) - httpCode = http.StatusNotFound - return + return http.StatusNotFound } if roundTripper != nil { @@ -179,7 +170,7 @@ func (r *ProxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { // https://github.com/openshift/origin/blob/master/pkg/util/httpproxy/upgradeawareproxy.go. // That proxy needs to be modified to support multiple backends, not just 1. if r.tryUpgrade(ctx, w, req, newReq, location, roundTripper, gv) { - return + return http.StatusSwitchingProtocols } // Redirect requests of the form "/{resource}/{name}" to "/{resource}/{name}/" @@ -192,7 +183,7 @@ func (r *ProxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { } w.Header().Set("Location", req.URL.Path+"/"+queryPart) w.WriteHeader(http.StatusMovedPermanently) - return + return http.StatusMovedPermanently } start := time.Now() @@ -224,6 +215,7 @@ func (r *ProxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { proxy.Transport = roundTripper proxy.FlushInterval = 200 * time.Millisecond proxy.ServeHTTP(w, newReq) + return 0 } // tryUpgrade returns true if the request was handled. diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/BUILD b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/BUILD index 4ee001070e49..31963fd3b6d1 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/BUILD +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/BUILD @@ -12,6 +12,7 @@ go_test( "errors_test.go", "status_test.go", ], + importpath = "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -33,6 +34,7 @@ go_library( "status.go", "writers.go", ], + importpath = "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -42,6 +44,7 @@ go_library( "//vendor/k8s.io/apiserver/pkg/audit:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation:go_default_library", + "//vendor/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/status.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/status.go index 2f67cb6f9d62..e37a8ea4786e 100755 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/status.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/status.go @@ -46,6 +46,8 @@ func ErrorToAPIStatus(err error) *metav1.Status { status.Code = http.StatusInternalServerError } } + status.Kind = "Status" + status.APIVersion = "v1" //TODO: check for invalid responses return &status default: @@ -61,6 +63,10 @@ func ErrorToAPIStatus(err error) *metav1.Status { // cases. runtime.HandleError(fmt.Errorf("apiserver received an error that is not an metav1.Status: %v", err)) return &metav1.Status{ + TypeMeta: metav1.TypeMeta{ + Kind: "Status", + APIVersion: "v1", + }, Status: metav1.StatusFailure, Code: int32(status), Reason: metav1.StatusReasonUnknown, diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go index 420e430669c8..58dac561f4aa 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go @@ -28,6 +28,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/metrics" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/util/flushwriter" @@ -42,7 +43,10 @@ import ( func WriteObject(ctx request.Context, statusCode int, gv schema.GroupVersion, s runtime.NegotiatedSerializer, object runtime.Object, w http.ResponseWriter, req *http.Request) { stream, ok := object.(rest.ResourceStreamer) if ok { - StreamObject(ctx, statusCode, gv, s, stream, w, req) + requestInfo, _ := request.RequestInfoFrom(ctx) + metrics.RecordLongRunning(req, requestInfo, func() { + StreamObject(ctx, statusCode, gv, s, stream, w, req) + }) return } WriteObjectNegotiated(ctx, s, gv, w, req, statusCode, object) @@ -100,6 +104,12 @@ func SerializeObject(mediaType string, encoder runtime.Encoder, w http.ResponseW func WriteObjectNegotiated(ctx request.Context, s runtime.NegotiatedSerializer, gv schema.GroupVersion, w http.ResponseWriter, req *http.Request, statusCode int, object runtime.Object) { serializer, err := negotiation.NegotiateOutputSerializer(req, s) if err != nil { + // if original statusCode was not successful we need to return the original error + // we cannot hide it behind negotiation problems + if statusCode < http.StatusOK || statusCode >= http.StatusBadRequest { + WriteRawJSON(int(statusCode), object, w) + return + } status := ErrorToAPIStatus(err) WriteRawJSON(int(status.Code), status, w) return diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go index dabbd85cbeaa..768005daa237 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go @@ -20,35 +20,23 @@ import ( "encoding/hex" "fmt" "io/ioutil" - "math/rand" "net/http" - "net/url" - "strings" "time" "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" - metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1alpha1 "k8s.io/apimachinery/pkg/apis/meta/v1alpha1" - "k8s.io/apimachinery/pkg/conversion/unstructured" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/json" - "k8s.io/apimachinery/pkg/util/mergepatch" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apiserver/pkg/admission" - "k8s.io/apiserver/pkg/audit" - "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" + "k8s.io/apiserver/pkg/endpoints/metrics" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - utiltrace "k8s.io/apiserver/pkg/util/trace" ) // RequestScope encapsulates common fields across all RESTful handler methods. @@ -62,7 +50,6 @@ type RequestScope struct { Creater runtime.ObjectCreater Convertor runtime.ObjectConvertor Defaulter runtime.ObjectDefaulter - Copier runtime.ObjectCopier Typer runtime.ObjectTyper UnsafeConvertor runtime.ObjectConvertor @@ -104,133 +91,9 @@ func (scope *RequestScope) AllowsStreamSchema(s string) bool { return s == "watch" } -// getterFunc performs a get request with the given context and object name. The request -// may be used to deserialize an options object to pass to the getter. -type getterFunc func(ctx request.Context, name string, req *http.Request, trace *utiltrace.Trace) (runtime.Object, error) - // MaxRetryWhenPatchConflicts is the maximum number of conflicts retry during a patch operation before returning failure const MaxRetryWhenPatchConflicts = 5 -// getResourceHandler is an HTTP handler function for get requests. It delegates to the -// passed-in getterFunc to perform the actual get. -func getResourceHandler(scope RequestScope, getter getterFunc) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - trace := utiltrace.New("Get " + req.URL.Path) - defer trace.LogIfLong(500 * time.Millisecond) - - namespace, name, err := scope.Namer.Name(req) - if err != nil { - scope.err(err, w, req) - return - } - ctx := scope.ContextFunc(req) - ctx = request.WithNamespace(ctx, namespace) - - result, err := getter(ctx, name, req, trace) - if err != nil { - scope.err(err, w, req) - return - } - requestInfo, ok := request.RequestInfoFrom(ctx) - if !ok { - scope.err(fmt.Errorf("missing requestInfo"), w, req) - return - } - if err := setSelfLink(result, requestInfo, scope.Namer); err != nil { - scope.err(err, w, req) - return - } - - trace.Step("About to write a response") - transformResponseObject(ctx, scope, req, w, http.StatusOK, result) - } -} - -// GetResource returns a function that handles retrieving a single resource from a rest.Storage object. -func GetResource(r rest.Getter, e rest.Exporter, scope RequestScope) http.HandlerFunc { - return getResourceHandler(scope, - func(ctx request.Context, name string, req *http.Request, trace *utiltrace.Trace) (runtime.Object, error) { - // check for export - options := metav1.GetOptions{} - if values := req.URL.Query(); len(values) > 0 { - exports := metav1.ExportOptions{} - if err := metainternalversion.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, &exports); err != nil { - err = errors.NewBadRequest(err.Error()) - return nil, err - } - if exports.Export { - if e == nil { - return nil, errors.NewBadRequest(fmt.Sprintf("export of %q is not supported", scope.Resource.Resource)) - } - return e.Export(ctx, name, exports) - } - if err := metainternalversion.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, &options); err != nil { - err = errors.NewBadRequest(err.Error()) - return nil, err - } - } - if trace != nil { - trace.Step("About to Get from storage") - } - return r.Get(ctx, name, &options) - }) -} - -// GetResourceWithOptions returns a function that handles retrieving a single resource from a rest.Storage object. -func GetResourceWithOptions(r rest.GetterWithOptions, scope RequestScope, isSubresource bool) http.HandlerFunc { - return getResourceHandler(scope, - func(ctx request.Context, name string, req *http.Request, trace *utiltrace.Trace) (runtime.Object, error) { - opts, subpath, subpathKey := r.NewGetOptions() - trace.Step("About to process Get options") - if err := getRequestOptions(req, scope, opts, subpath, subpathKey, isSubresource); err != nil { - err = errors.NewBadRequest(err.Error()) - return nil, err - } - if trace != nil { - trace.Step("About to Get from storage") - } - return r.Get(ctx, name, opts) - }) -} - -// getRequestOptions parses out options and can include path information. The path information shouldn't include the subresource. -func getRequestOptions(req *http.Request, scope RequestScope, into runtime.Object, subpath bool, subpathKey string, isSubresource bool) error { - if into == nil { - return nil - } - - query := req.URL.Query() - if subpath { - newQuery := make(url.Values) - for k, v := range query { - newQuery[k] = v - } - - ctx := scope.ContextFunc(req) - requestInfo, _ := request.RequestInfoFrom(ctx) - startingIndex := 2 - if isSubresource { - startingIndex = 3 - } - - p := strings.Join(requestInfo.Parts[startingIndex:], "/") - - // ensure non-empty subpaths correctly reflect a leading slash - if len(p) > 0 && !strings.HasPrefix(p, "/") { - p = "/" + p - } - - // ensure subpaths correctly reflect the presence of a trailing slash on the original request - if strings.HasSuffix(requestInfo.Path, "/") && !strings.HasSuffix(p, "/") { - p += "/" - } - - newQuery[subpathKey] = []string{p} - query = newQuery - } - return scope.ParameterCodec.DecodeParameters(query, scope.Kind.GroupVersion(), into) -} - // ConnectResource returns a function that handles a connect request on a rest.Storage object. func ConnectResource(connecter rest.Connecter, scope RequestScope, admit admission.Interface, restPath string, isSubresource bool) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { @@ -254,19 +117,31 @@ func ConnectResource(connecter rest.Connecter, scope RequestScope, admit admissi ResourcePath: restPath, } userInfo, _ := request.UserFrom(ctx) - - err = admit.Admit(admission.NewAttributesRecord(connectRequest, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, userInfo)) + // TODO: remove the mutating admission here as soon as we have ported all plugin that handle CONNECT + if mutatingAdmit, ok := admit.(admission.MutationInterface); ok { + err = mutatingAdmit.Admit(admission.NewAttributesRecord(connectRequest, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, userInfo)) + if err != nil { + scope.err(err, w, req) + return + } + } + if mutatingAdmit, ok := admit.(admission.ValidationInterface); ok { + err = mutatingAdmit.Validate(admission.NewAttributesRecord(connectRequest, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, userInfo)) + if err != nil { + scope.err(err, w, req) + return + } + } + } + requestInfo, _ := request.RequestInfoFrom(ctx) + metrics.RecordLongRunning(req, requestInfo, func() { + handler, err := connecter.Connect(ctx, name, opts, &responder{scope: scope, req: req, w: w}) if err != nil { scope.err(err, w, req) return } - } - handler, err := connecter.Connect(ctx, name, opts, &responder{scope: scope, req: req, w: w}) - if err != nil { - scope.err(err, w, req) - return - } - handler.ServeHTTP(w, req) + handler.ServeHTTP(w, req) + }) } } @@ -286,886 +161,6 @@ func (r *responder) Error(err error) { r.scope.err(err, r.w, r.req) } -func ListResource(r rest.Lister, rw rest.Watcher, scope RequestScope, forceWatch bool, minRequestTimeout time.Duration) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - // For performance tracking purposes. - trace := utiltrace.New("List " + req.URL.Path) - - namespace, err := scope.Namer.Namespace(req) - if err != nil { - scope.err(err, w, req) - return - } - - // Watches for single objects are routed to this function. - // Treat a name parameter the same as a field selector entry. - hasName := true - _, name, err := scope.Namer.Name(req) - if err != nil { - hasName = false - } - - ctx := scope.ContextFunc(req) - ctx = request.WithNamespace(ctx, namespace) - - opts := metainternalversion.ListOptions{} - if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &opts); err != nil { - err = errors.NewBadRequest(err.Error()) - scope.err(err, w, req) - return - } - - // transform fields - // TODO: DecodeParametersInto should do this. - if opts.FieldSelector != nil { - fn := func(label, value string) (newLabel, newValue string, err error) { - return scope.Convertor.ConvertFieldLabel(scope.Kind.GroupVersion().String(), scope.Kind.Kind, label, value) - } - if opts.FieldSelector, err = opts.FieldSelector.Transform(fn); err != nil { - // TODO: allow bad request to set field causes based on query parameters - err = errors.NewBadRequest(err.Error()) - scope.err(err, w, req) - return - } - } - - if hasName { - // metadata.name is the canonical internal name. - // SelectionPredicate will notice that this is - // a request for a single object and optimize the - // storage query accordingly. - nameSelector := fields.OneTermEqualSelector("metadata.name", name) - if opts.FieldSelector != nil && !opts.FieldSelector.Empty() { - // It doesn't make sense to ask for both a name - // and a field selector, since just the name is - // sufficient to narrow down the request to a - // single object. - scope.err(errors.NewBadRequest("both a name and a field selector provided; please provide one or the other."), w, req) - return - } - opts.FieldSelector = nameSelector - } - - if opts.Watch || forceWatch { - if rw == nil { - scope.err(errors.NewMethodNotSupported(scope.Resource.GroupResource(), "watch"), w, req) - return - } - // TODO: Currently we explicitly ignore ?timeout= and use only ?timeoutSeconds=. - timeout := time.Duration(0) - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - if timeout == 0 && minRequestTimeout > 0 { - timeout = time.Duration(float64(minRequestTimeout) * (rand.Float64() + 1.0)) - } - glog.V(2).Infof("Starting watch for %s, rv=%s labels=%s fields=%s timeout=%s", req.URL.Path, opts.ResourceVersion, opts.LabelSelector, opts.FieldSelector, timeout) - - watcher, err := rw.Watch(ctx, &opts) - if err != nil { - scope.err(err, w, req) - return - } - serveWatch(watcher, scope, req, w, timeout) - return - } - - // Log only long List requests (ignore Watch). - defer trace.LogIfLong(500 * time.Millisecond) - trace.Step("About to List from storage") - result, err := r.List(ctx, &opts) - if err != nil { - scope.err(err, w, req) - return - } - trace.Step("Listing from storage done") - numberOfItems, err := setListSelfLink(result, ctx, req, scope.Namer) - if err != nil { - scope.err(err, w, req) - return - } - trace.Step("Self-linking done") - // Ensure empty lists return a non-nil items slice - if numberOfItems == 0 && meta.IsListType(result) { - if err := meta.SetList(result, []runtime.Object{}); err != nil { - scope.err(err, w, req) - return - } - } - - transformResponseObject(ctx, scope, req, w, http.StatusOK, result) - trace.Step(fmt.Sprintf("Writing http response done (%d items)", numberOfItems)) - } -} - -func createHandler(r rest.NamedCreater, scope RequestScope, typer runtime.ObjectTyper, admit admission.Interface, includeName bool) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - // For performance tracking purposes. - trace := utiltrace.New("Create " + req.URL.Path) - defer trace.LogIfLong(500 * time.Millisecond) - - // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) - timeout := parseTimeout(req.URL.Query().Get("timeout")) - - var ( - namespace, name string - err error - ) - if includeName { - namespace, name, err = scope.Namer.Name(req) - } else { - namespace, err = scope.Namer.Namespace(req) - } - if err != nil { - scope.err(err, w, req) - return - } - - ctx := scope.ContextFunc(req) - ctx = request.WithNamespace(ctx, namespace) - - gv := scope.Kind.GroupVersion() - s, err := negotiation.NegotiateInputSerializer(req, scope.Serializer) - if err != nil { - scope.err(err, w, req) - return - } - decoder := scope.Serializer.DecoderToVersion(s.Serializer, schema.GroupVersion{Group: gv.Group, Version: runtime.APIVersionInternal}) - - body, err := readBody(req) - if err != nil { - scope.err(err, w, req) - return - } - - defaultGVK := scope.Kind - original := r.New() - trace.Step("About to convert to expected version") - obj, gvk, err := decoder.Decode(body, &defaultGVK, original) - if err != nil { - err = transformDecodeError(typer, err, original, gvk, body) - scope.err(err, w, req) - return - } - if gvk.GroupVersion() != gv { - err = errors.NewBadRequest(fmt.Sprintf("the API version in the data (%s) does not match the expected API version (%v)", gvk.GroupVersion().String(), gv.String())) - scope.err(err, w, req) - return - } - trace.Step("Conversion done") - - ae := request.AuditEventFrom(ctx) - audit.LogRequestObject(ae, obj, scope.Resource, scope.Subresource, scope.Serializer) - - if admit != nil && admit.Handles(admission.Create) { - userInfo, _ := request.UserFrom(ctx) - - err = admit.Admit(admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, userInfo)) - if err != nil { - scope.err(err, w, req) - return - } - } - - // TODO: replace with content type negotiation? - includeUninitialized := req.URL.Query().Get("includeUninitialized") == "1" - - trace.Step("About to store object in database") - result, err := finishRequest(timeout, func() (runtime.Object, error) { - return r.Create(ctx, name, obj, includeUninitialized) - }) - if err != nil { - scope.err(err, w, req) - return - } - trace.Step("Object stored in database") - - requestInfo, ok := request.RequestInfoFrom(ctx) - if !ok { - scope.err(fmt.Errorf("missing requestInfo"), w, req) - return - } - if err := setSelfLink(result, requestInfo, scope.Namer); err != nil { - scope.err(err, w, req) - return - } - trace.Step("Self-link added") - - // If the object is partially initialized, always indicate it via StatusAccepted - code := http.StatusCreated - if accessor, err := meta.Accessor(result); err == nil { - if accessor.GetInitializers() != nil { - code = http.StatusAccepted - } - } - status, ok := result.(*metav1.Status) - if ok && err == nil && status.Code == 0 { - status.Code = int32(code) - } - - transformResponseObject(ctx, scope, req, w, code, result) - } -} - -// CreateNamedResource returns a function that will handle a resource creation with name. -func CreateNamedResource(r rest.NamedCreater, scope RequestScope, typer runtime.ObjectTyper, admit admission.Interface) http.HandlerFunc { - return createHandler(r, scope, typer, admit, true) -} - -// CreateResource returns a function that will handle a resource creation. -func CreateResource(r rest.Creater, scope RequestScope, typer runtime.ObjectTyper, admit admission.Interface) http.HandlerFunc { - return createHandler(&namedCreaterAdapter{r}, scope, typer, admit, false) -} - -type namedCreaterAdapter struct { - rest.Creater -} - -func (c *namedCreaterAdapter) Create(ctx request.Context, name string, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) { - return c.Creater.Create(ctx, obj, includeUninitialized) -} - -// PatchResource returns a function that will handle a resource patch -// TODO: Eventually PatchResource should just use GuaranteedUpdate and this routine should be a bit cleaner -func PatchResource(r rest.Patcher, scope RequestScope, admit admission.Interface, converter runtime.ObjectConvertor) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - // TODO: we either want to remove timeout or document it (if we - // document, move timeout out of this function and declare it in - // api_installer) - timeout := parseTimeout(req.URL.Query().Get("timeout")) - - namespace, name, err := scope.Namer.Name(req) - if err != nil { - scope.err(err, w, req) - return - } - - ctx := scope.ContextFunc(req) - ctx = request.WithNamespace(ctx, namespace) - - versionedObj, err := converter.ConvertToVersion(r.New(), scope.Kind.GroupVersion()) - if err != nil { - scope.err(err, w, req) - return - } - - // TODO: handle this in negotiation - contentType := req.Header.Get("Content-Type") - // Remove "; charset=" if included in header. - if idx := strings.Index(contentType, ";"); idx > 0 { - contentType = contentType[:idx] - } - patchType := types.PatchType(contentType) - - patchJS, err := readBody(req) - if err != nil { - scope.err(err, w, req) - return - } - - ae := request.AuditEventFrom(ctx) - audit.LogRequestPatch(ae, patchJS) - - s, ok := runtime.SerializerInfoForMediaType(scope.Serializer.SupportedMediaTypes(), runtime.ContentTypeJSON) - if !ok { - scope.err(fmt.Errorf("no serializer defined for JSON"), w, req) - return - } - gv := scope.Kind.GroupVersion() - codec := runtime.NewCodec( - scope.Serializer.EncoderForVersion(s.Serializer, gv), - scope.Serializer.DecoderToVersion(s.Serializer, schema.GroupVersion{Group: gv.Group, Version: runtime.APIVersionInternal}), - ) - - updateAdmit := func(updatedObject runtime.Object, currentObject runtime.Object) error { - if admit != nil && admit.Handles(admission.Update) { - userInfo, _ := request.UserFrom(ctx) - return admit.Admit(admission.NewAttributesRecord(updatedObject, currentObject, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, userInfo)) - } - - return nil - } - - result, err := patchResource(ctx, updateAdmit, timeout, versionedObj, r, name, patchType, patchJS, - scope.Namer, scope.Copier, scope.Creater, scope.Defaulter, scope.UnsafeConvertor, scope.Kind, scope.Resource, codec) - if err != nil { - scope.err(err, w, req) - return - } - - requestInfo, ok := request.RequestInfoFrom(ctx) - if !ok { - scope.err(fmt.Errorf("missing requestInfo"), w, req) - return - } - if err := setSelfLink(result, requestInfo, scope.Namer); err != nil { - scope.err(err, w, req) - return - } - - transformResponseObject(ctx, scope, req, w, http.StatusOK, result) - } -} - -type updateAdmissionFunc func(updatedObject runtime.Object, currentObject runtime.Object) error - -// patchResource divides PatchResource for easier unit testing -func patchResource( - ctx request.Context, - admit updateAdmissionFunc, - timeout time.Duration, - versionedObj runtime.Object, - patcher rest.Patcher, - name string, - patchType types.PatchType, - patchJS []byte, - namer ScopeNamer, - copier runtime.ObjectCopier, - creater runtime.ObjectCreater, - defaulter runtime.ObjectDefaulter, - unsafeConvertor runtime.ObjectConvertor, - kind schema.GroupVersionKind, - resource schema.GroupVersionResource, - codec runtime.Codec, -) (runtime.Object, error) { - - namespace := request.NamespaceValue(ctx) - - var ( - originalObjJS []byte - originalPatchedObjJS []byte - originalObjMap map[string]interface{} - getOriginalPatchMap func() (map[string]interface{}, error) - lastConflictErr error - originalResourceVersion string - ) - - // applyPatch is called every time GuaranteedUpdate asks for the updated object, - // and is given the currently persisted object as input. - applyPatch := func(_ request.Context, _, currentObject runtime.Object) (runtime.Object, error) { - // Make sure we actually have a persisted currentObject - if hasUID, err := hasUID(currentObject); err != nil { - return nil, err - } else if !hasUID { - return nil, errors.NewNotFound(resource.GroupResource(), name) - } - - currentResourceVersion := "" - if currentMetadata, err := meta.Accessor(currentObject); err == nil { - currentResourceVersion = currentMetadata.GetResourceVersion() - } - - switch { - case originalObjJS == nil && originalObjMap == nil: - // first time through, - // 1. apply the patch - // 2. save the original and patched to detect whether there were conflicting changes on retries - - originalResourceVersion = currentResourceVersion - objToUpdate := patcher.New() - - // For performance reasons, in case of strategicpatch, we avoid json - // marshaling and unmarshaling and operate just on map[string]interface{}. - // In case of other patch types, we still have to operate on JSON - // representations. - switch patchType { - case types.JSONPatchType, types.MergePatchType: - originalJS, patchedJS, err := patchObjectJSON(patchType, codec, currentObject, patchJS, objToUpdate, versionedObj) - if err != nil { - return nil, err - } - originalObjJS, originalPatchedObjJS = originalJS, patchedJS - - // Make a getter that can return a fresh strategic patch map if needed for conflict retries - // We have to rebuild it each time we need it, because the map gets mutated when being applied - var originalPatchBytes []byte - getOriginalPatchMap = func() (map[string]interface{}, error) { - if originalPatchBytes == nil { - // Compute once - originalPatchBytes, err = strategicpatch.CreateTwoWayMergePatch(originalObjJS, originalPatchedObjJS, versionedObj) - if err != nil { - return nil, err - } - } - // Return a fresh map every time - originalPatchMap := make(map[string]interface{}) - if err := json.Unmarshal(originalPatchBytes, &originalPatchMap); err != nil { - return nil, err - } - return originalPatchMap, nil - } - - case types.StrategicMergePatchType: - // Since the patch is applied on versioned objects, we need to convert the - // current object to versioned representation first. - currentVersionedObject, err := unsafeConvertor.ConvertToVersion(currentObject, kind.GroupVersion()) - if err != nil { - return nil, err - } - versionedObjToUpdate, err := creater.New(kind) - if err != nil { - return nil, err - } - // Capture the original object map and patch for possible retries. - originalMap, err := unstructured.DefaultConverter.ToUnstructured(currentVersionedObject) - if err != nil { - return nil, err - } - if err := strategicPatchObject(codec, defaulter, currentVersionedObject, patchJS, versionedObjToUpdate, versionedObj); err != nil { - return nil, err - } - // Convert the object back to unversioned. - gvk := kind.GroupKind().WithVersion(runtime.APIVersionInternal) - unversionedObjToUpdate, err := unsafeConvertor.ConvertToVersion(versionedObjToUpdate, gvk.GroupVersion()) - if err != nil { - return nil, err - } - objToUpdate = unversionedObjToUpdate - // Store unstructured representation for possible retries. - originalObjMap = originalMap - // Make a getter that can return a fresh strategic patch map if needed for conflict retries - // We have to rebuild it each time we need it, because the map gets mutated when being applied - getOriginalPatchMap = func() (map[string]interface{}, error) { - patchMap := make(map[string]interface{}) - if err := json.Unmarshal(patchJS, &patchMap); err != nil { - return nil, err - } - return patchMap, nil - } - } - if err := checkName(objToUpdate, name, namespace, namer); err != nil { - return nil, err - } - return objToUpdate, nil - - default: - // on a conflict, - // 1. build a strategic merge patch from originalJS and the patchedJS. Different patch types can - // be specified, but a strategic merge patch should be expressive enough handle them. Build the - // patch with this type to handle those cases. - // 2. build a strategic merge patch from originalJS and the currentJS - // 3. ensure no conflicts between the two patches - // 4. apply the #1 patch to the currentJS object - - // Since the patch is applied on versioned objects, we need to convert the - // current object to versioned representation first. - currentVersionedObject, err := unsafeConvertor.ConvertToVersion(currentObject, kind.GroupVersion()) - if err != nil { - return nil, err - } - currentObjMap, err := unstructured.DefaultConverter.ToUnstructured(currentVersionedObject) - if err != nil { - return nil, err - } - - var currentPatchMap map[string]interface{} - if originalObjMap != nil { - var err error - currentPatchMap, err = strategicpatch.CreateTwoWayMergeMapPatch(originalObjMap, currentObjMap, versionedObj) - if err != nil { - return nil, err - } - } else { - // Compute current patch. - currentObjJS, err := runtime.Encode(codec, currentObject) - if err != nil { - return nil, err - } - currentPatch, err := strategicpatch.CreateTwoWayMergePatch(originalObjJS, currentObjJS, versionedObj) - if err != nil { - return nil, err - } - currentPatchMap = make(map[string]interface{}) - if err := json.Unmarshal(currentPatch, ¤tPatchMap); err != nil { - return nil, err - } - } - - // Get a fresh copy of the original strategic patch each time through, since applying it mutates the map - originalPatchMap, err := getOriginalPatchMap() - if err != nil { - return nil, err - } - - hasConflicts, err := mergepatch.HasConflicts(originalPatchMap, currentPatchMap) - if err != nil { - return nil, err - } - - if hasConflicts { - diff1, _ := json.Marshal(currentPatchMap) - diff2, _ := json.Marshal(originalPatchMap) - patchDiffErr := fmt.Errorf("there is a meaningful conflict (firstResourceVersion: %q, currentResourceVersion: %q):\n diff1=%v\n, diff2=%v\n", originalResourceVersion, currentResourceVersion, string(diff1), string(diff2)) - glog.V(4).Infof("patchResource failed for resource %s, because there is a meaningful conflict(firstResourceVersion: %q, currentResourceVersion: %q):\n diff1=%v\n, diff2=%v\n", name, originalResourceVersion, currentResourceVersion, string(diff1), string(diff2)) - - // Return the last conflict error we got if we have one - if lastConflictErr != nil { - return nil, lastConflictErr - } - // Otherwise manufacture one of our own - return nil, errors.NewConflict(resource.GroupResource(), name, patchDiffErr) - } - - versionedObjToUpdate, err := creater.New(kind) - if err != nil { - return nil, err - } - if err := applyPatchToObject(codec, defaulter, currentObjMap, originalPatchMap, versionedObjToUpdate, versionedObj); err != nil { - return nil, err - } - // Convert the object back to unversioned. - gvk := kind.GroupKind().WithVersion(runtime.APIVersionInternal) - objToUpdate, err := unsafeConvertor.ConvertToVersion(versionedObjToUpdate, gvk.GroupVersion()) - if err != nil { - return nil, err - } - - return objToUpdate, nil - } - } - - // applyAdmission is called every time GuaranteedUpdate asks for the updated object, - // and is given the currently persisted object and the patched object as input. - applyAdmission := func(ctx request.Context, patchedObject runtime.Object, currentObject runtime.Object) (runtime.Object, error) { - return patchedObject, admit(patchedObject, currentObject) - } - - updatedObjectInfo := rest.DefaultUpdatedObjectInfo(nil, copier, applyPatch, applyAdmission) - - return finishRequest(timeout, func() (runtime.Object, error) { - updateObject, _, updateErr := patcher.Update(ctx, name, updatedObjectInfo) - for i := 0; i < MaxRetryWhenPatchConflicts && (errors.IsConflict(updateErr)); i++ { - lastConflictErr = updateErr - updateObject, _, updateErr = patcher.Update(ctx, name, updatedObjectInfo) - } - return updateObject, updateErr - }) -} - -// UpdateResource returns a function that will handle a resource update -func UpdateResource(r rest.Updater, scope RequestScope, typer runtime.ObjectTyper, admit admission.Interface) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - // For performance tracking purposes. - trace := utiltrace.New("Update " + req.URL.Path) - defer trace.LogIfLong(500 * time.Millisecond) - - // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) - timeout := parseTimeout(req.URL.Query().Get("timeout")) - - namespace, name, err := scope.Namer.Name(req) - if err != nil { - scope.err(err, w, req) - return - } - ctx := scope.ContextFunc(req) - ctx = request.WithNamespace(ctx, namespace) - - body, err := readBody(req) - if err != nil { - scope.err(err, w, req) - return - } - - s, err := negotiation.NegotiateInputSerializer(req, scope.Serializer) - if err != nil { - scope.err(err, w, req) - return - } - defaultGVK := scope.Kind - original := r.New() - trace.Step("About to convert to expected version") - decoder := scope.Serializer.DecoderToVersion(s.Serializer, schema.GroupVersion{Group: defaultGVK.Group, Version: runtime.APIVersionInternal}) - obj, gvk, err := decoder.Decode(body, &defaultGVK, original) - if err != nil { - err = transformDecodeError(typer, err, original, gvk, body) - scope.err(err, w, req) - return - } - if gvk.GroupVersion() != defaultGVK.GroupVersion() { - err = errors.NewBadRequest(fmt.Sprintf("the API version in the data (%s) does not match the expected API version (%s)", gvk.GroupVersion(), defaultGVK.GroupVersion())) - scope.err(err, w, req) - return - } - trace.Step("Conversion done") - - ae := request.AuditEventFrom(ctx) - audit.LogRequestObject(ae, obj, scope.Resource, scope.Subresource, scope.Serializer) - - if err := checkName(obj, name, namespace, scope.Namer); err != nil { - scope.err(err, w, req) - return - } - - var transformers []rest.TransformFunc - if admit != nil && admit.Handles(admission.Update) { - transformers = append(transformers, func(ctx request.Context, newObj, oldObj runtime.Object) (runtime.Object, error) { - userInfo, _ := request.UserFrom(ctx) - return newObj, admit.Admit(admission.NewAttributesRecord(newObj, oldObj, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, userInfo)) - }) - } - - trace.Step("About to store object in database") - wasCreated := false - result, err := finishRequest(timeout, func() (runtime.Object, error) { - obj, created, err := r.Update(ctx, name, rest.DefaultUpdatedObjectInfo(obj, scope.Copier, transformers...)) - wasCreated = created - return obj, err - }) - if err != nil { - scope.err(err, w, req) - return - } - trace.Step("Object stored in database") - - requestInfo, ok := request.RequestInfoFrom(ctx) - if !ok { - scope.err(fmt.Errorf("missing requestInfo"), w, req) - return - } - if err := setSelfLink(result, requestInfo, scope.Namer); err != nil { - scope.err(err, w, req) - return - } - trace.Step("Self-link added") - - status := http.StatusOK - if wasCreated { - status = http.StatusCreated - } - - transformResponseObject(ctx, scope, req, w, status, result) - } -} - -// DeleteResource returns a function that will handle a resource deletion -func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope RequestScope, admit admission.Interface) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - // For performance tracking purposes. - trace := utiltrace.New("Delete " + req.URL.Path) - defer trace.LogIfLong(500 * time.Millisecond) - - // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) - timeout := parseTimeout(req.URL.Query().Get("timeout")) - - namespace, name, err := scope.Namer.Name(req) - if err != nil { - scope.err(err, w, req) - return - } - ctx := scope.ContextFunc(req) - ctx = request.WithNamespace(ctx, namespace) - - options := &metav1.DeleteOptions{} - if allowsOptions { - body, err := readBody(req) - if err != nil { - scope.err(err, w, req) - return - } - if len(body) > 0 { - s, err := negotiation.NegotiateInputSerializer(req, metainternalversion.Codecs) - if err != nil { - scope.err(err, w, req) - return - } - // For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions - // It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions - defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions") - obj, _, err := metainternalversion.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) - if err != nil { - scope.err(err, w, req) - return - } - if obj != options { - scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req) - return - } - - trace.Step("About to record audit event") - ae := request.AuditEventFrom(ctx) - audit.LogRequestObject(ae, obj, scope.Resource, scope.Subresource, scope.Serializer) - } else { - if values := req.URL.Query(); len(values) > 0 { - if err := metainternalversion.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil { - err = errors.NewBadRequest(err.Error()) - scope.err(err, w, req) - return - } - } - } - } - - if admit != nil && admit.Handles(admission.Delete) { - trace.Step("About to check admission control") - userInfo, _ := request.UserFrom(ctx) - - err = admit.Admit(admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, userInfo)) - if err != nil { - scope.err(err, w, req) - return - } - } - - trace.Step("About to delete object from database") - wasDeleted := true - result, err := finishRequest(timeout, func() (runtime.Object, error) { - obj, deleted, err := r.Delete(ctx, name, options) - wasDeleted = deleted - return obj, err - }) - if err != nil { - scope.err(err, w, req) - return - } - trace.Step("Object deleted from database") - - status := http.StatusOK - // Return http.StatusAccepted if the resource was not deleted immediately and - // user requested cascading deletion by setting OrphanDependents=false. - // Note: We want to do this always if resource was not deleted immediately, but - // that will break existing clients. - // Other cases where resource is not instantly deleted are: namespace deletion - // and pod graceful deletion. - if !wasDeleted && options.OrphanDependents != nil && *options.OrphanDependents == false { - status = http.StatusAccepted - } - // if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid - // object with the response. - if result == nil { - result = &metav1.Status{ - Status: metav1.StatusSuccess, - Code: int32(status), - Details: &metav1.StatusDetails{ - Name: name, - Kind: scope.Kind.Kind, - }, - } - } else { - // when a non-status response is returned, set the self link - requestInfo, ok := request.RequestInfoFrom(ctx) - if !ok { - scope.err(fmt.Errorf("missing requestInfo"), w, req) - return - } - if _, ok := result.(*metav1.Status); !ok { - if err := setSelfLink(result, requestInfo, scope.Namer); err != nil { - scope.err(err, w, req) - return - } - } - } - - transformResponseObject(ctx, scope, req, w, status, result) - } -} - -// DeleteCollection returns a function that will handle a collection deletion -func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope RequestScope, admit admission.Interface) http.HandlerFunc { - return func(w http.ResponseWriter, req *http.Request) { - // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) - timeout := parseTimeout(req.URL.Query().Get("timeout")) - - namespace, err := scope.Namer.Namespace(req) - if err != nil { - scope.err(err, w, req) - return - } - - ctx := scope.ContextFunc(req) - ctx = request.WithNamespace(ctx, namespace) - - if admit != nil && admit.Handles(admission.Delete) { - userInfo, _ := request.UserFrom(ctx) - - err = admit.Admit(admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, "", scope.Resource, scope.Subresource, admission.Delete, userInfo)) - if err != nil { - scope.err(err, w, req) - return - } - } - - listOptions := metainternalversion.ListOptions{} - if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &listOptions); err != nil { - err = errors.NewBadRequest(err.Error()) - scope.err(err, w, req) - return - } - - // transform fields - // TODO: DecodeParametersInto should do this. - if listOptions.FieldSelector != nil { - fn := func(label, value string) (newLabel, newValue string, err error) { - return scope.Convertor.ConvertFieldLabel(scope.Kind.GroupVersion().String(), scope.Kind.Kind, label, value) - } - if listOptions.FieldSelector, err = listOptions.FieldSelector.Transform(fn); err != nil { - // TODO: allow bad request to set field causes based on query parameters - err = errors.NewBadRequest(err.Error()) - scope.err(err, w, req) - return - } - } - - options := &metav1.DeleteOptions{} - if checkBody { - body, err := readBody(req) - if err != nil { - scope.err(err, w, req) - return - } - if len(body) > 0 { - s, err := negotiation.NegotiateInputSerializer(req, scope.Serializer) - if err != nil { - scope.err(err, w, req) - return - } - defaultGVK := scope.Kind.GroupVersion().WithKind("DeleteOptions") - obj, _, err := scope.Serializer.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options) - if err != nil { - scope.err(err, w, req) - return - } - if obj != options { - scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req) - return - } - - ae := request.AuditEventFrom(ctx) - audit.LogRequestObject(ae, obj, scope.Resource, scope.Subresource, scope.Serializer) - } - } - - result, err := finishRequest(timeout, func() (runtime.Object, error) { - return r.DeleteCollection(ctx, options, &listOptions) - }) - if err != nil { - scope.err(err, w, req) - return - } - - // if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid - // object with the response. - if result == nil { - result = &metav1.Status{ - Status: metav1.StatusSuccess, - Code: http.StatusOK, - Details: &metav1.StatusDetails{ - Kind: scope.Kind.Kind, - }, - } - } else { - // when a non-status response is returned, set the self link - if _, ok := result.(*metav1.Status); !ok { - if _, err := setListSelfLink(result, ctx, req, scope.Namer); err != nil { - scope.err(err, w, req) - return - } - } - } - - transformResponseObject(ctx, scope, req, w, http.StatusOK, result) - } -} - // resultFunc is a function that returns a rest result and can be run in a goroutine type resultFunc func() (runtime.Object, error) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go new file mode 100644 index 000000000000..319bfd51b7cb --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go @@ -0,0 +1,135 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "fmt" + "net/http" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/rest" + utiltrace "k8s.io/apiserver/pkg/util/trace" +) + +// UpdateResource returns a function that will handle a resource update +func UpdateResource(r rest.Updater, scope RequestScope, typer runtime.ObjectTyper, admit admission.Interface) http.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request) { + // For performance tracking purposes. + trace := utiltrace.New("Update " + req.URL.Path) + defer trace.LogIfLong(500 * time.Millisecond) + + // TODO: we either want to remove timeout or document it (if we document, move timeout out of this function and declare it in api_installer) + timeout := parseTimeout(req.URL.Query().Get("timeout")) + + namespace, name, err := scope.Namer.Name(req) + if err != nil { + scope.err(err, w, req) + return + } + ctx := scope.ContextFunc(req) + ctx = request.WithNamespace(ctx, namespace) + + body, err := readBody(req) + if err != nil { + scope.err(err, w, req) + return + } + + s, err := negotiation.NegotiateInputSerializer(req, scope.Serializer) + if err != nil { + scope.err(err, w, req) + return + } + defaultGVK := scope.Kind + original := r.New() + trace.Step("About to convert to expected version") + decoder := scope.Serializer.DecoderToVersion(s.Serializer, schema.GroupVersion{Group: defaultGVK.Group, Version: runtime.APIVersionInternal}) + obj, gvk, err := decoder.Decode(body, &defaultGVK, original) + if err != nil { + err = transformDecodeError(typer, err, original, gvk, body) + scope.err(err, w, req) + return + } + if gvk.GroupVersion() != defaultGVK.GroupVersion() { + err = errors.NewBadRequest(fmt.Sprintf("the API version in the data (%s) does not match the expected API version (%s)", gvk.GroupVersion(), defaultGVK.GroupVersion())) + scope.err(err, w, req) + return + } + trace.Step("Conversion done") + + ae := request.AuditEventFrom(ctx) + audit.LogRequestObject(ae, obj, scope.Resource, scope.Subresource, scope.Serializer) + + if err := checkName(obj, name, namespace, scope.Namer); err != nil { + scope.err(err, w, req) + return + } + + userInfo, _ := request.UserFrom(ctx) + staticAdmissionAttributes := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, userInfo) + var transformers []rest.TransformFunc + if mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Update) { + transformers = append(transformers, func(ctx request.Context, newObj, oldObj runtime.Object) (runtime.Object, error) { + return newObj, mutatingAdmission.Admit(admission.NewAttributesRecord(newObj, oldObj, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, userInfo)) + }) + } + + trace.Step("About to store object in database") + wasCreated := false + result, err := finishRequest(timeout, func() (runtime.Object, error) { + obj, created, err := r.Update( + ctx, + name, + rest.DefaultUpdatedObjectInfo(obj, transformers...), + rest.AdmissionToValidateObjectFunc(admit, staticAdmissionAttributes), + rest.AdmissionToValidateObjectUpdateFunc(admit, staticAdmissionAttributes), + ) + wasCreated = created + return obj, err + }) + if err != nil { + scope.err(err, w, req) + return + } + trace.Step("Object stored in database") + + requestInfo, ok := request.RequestInfoFrom(ctx) + if !ok { + scope.err(fmt.Errorf("missing requestInfo"), w, req) + return + } + if err := setSelfLink(result, requestInfo, scope.Namer); err != nil { + scope.err(err, w, req) + return + } + trace.Step("Self-link added") + + status := http.StatusOK + if wasCreated { + status = http.StatusCreated + } + + transformResponseObject(ctx, scope, req, w, status, result) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/installer.go b/vendor/k8s.io/apiserver/pkg/endpoints/installer.go index 38926a253b7a..87bf1700d2bb 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/installer.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/installer.go @@ -144,8 +144,9 @@ func (a *APIInstaller) newWebService() *restful.WebService { // object. If the storage object is a subresource and has an override supplied for it, it returns // the group version kind supplied in the override. func (a *APIInstaller) getResourceKind(path string, storage rest.Storage) (schema.GroupVersionKind, error) { - if fqKindToRegister, ok := a.group.SubresourceGroupVersionKind[path]; ok { - return fqKindToRegister, nil + // Let the storage tell us exactly what GVK it has + if gvkProvider, ok := storage.(rest.GroupVersionKindProvider); ok { + return gvkProvider.GroupVersionKind(a.group.GroupVersion), nil } object := storage.New() @@ -162,12 +163,6 @@ func (a *APIInstaller) getResourceKind(path string, storage rest.Storage) (schem fqKindToRegister = a.group.GroupVersion.WithKind(fqKind.Kind) break } - - // TODO: keep rid of extensions api group dependency here - // This keeps it doing what it was doing before, but it doesn't feel right. - if fqKind.Group == "extensions" && fqKind.Kind == "ThirdPartyResourceData" { - fqKindToRegister = a.group.GroupVersion.WithKind(fqKind.Kind) - } } if fqKindToRegister.Empty() { return schema.GroupVersionKind{}, fmt.Errorf("unable to locate fully qualified kind for %v: found %v when registering for %v", reflect.TypeOf(object), fqKinds, a.group.GroupVersion) @@ -316,9 +311,12 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag return nil, err } getOptionsInternalKind = getOptionsInternalKinds[0] - versionedGetOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind(getOptionsInternalKind.Kind)) + versionedGetOptions, err = a.group.Creater.New(a.group.GroupVersion.WithKind(getOptionsInternalKind.Kind)) if err != nil { - return nil, err + versionedGetOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind(getOptionsInternalKind.Kind)) + if err != nil { + return nil, err + } } isGetter = true } @@ -347,9 +345,12 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag } connectOptionsInternalKind = connectOptionsInternalKinds[0] - versionedConnectOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind(connectOptionsInternalKind.Kind)) + versionedConnectOptions, err = a.group.Creater.New(a.group.GroupVersion.WithKind(connectOptionsInternalKind.Kind)) if err != nil { - return nil, err + versionedConnectOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind(connectOptionsInternalKind.Kind)) + if err != nil { + return nil, err + } } } } @@ -526,7 +527,6 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag ParameterCodec: a.group.ParameterCodec, Creater: a.group.Creater, Convertor: a.group.Convertor, - Copier: a.group.Copier, Defaulter: a.group.Defaulter, Typer: a.group.Typer, UnsafeConvertor: a.group.UnsafeConvertor, @@ -545,9 +545,9 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag reqScope.MetaGroupVersion = *a.group.MetaGroupVersion } for _, action := range actions { - versionedObject := storageMeta.ProducesObject(action.Verb) - if versionedObject == nil { - versionedObject = defaultVersionedObject + producedObject := storageMeta.ProducesObject(action.Verb) + if producedObject == nil { + producedObject = defaultVersionedObject } reqScope.Namer = action.Namer @@ -617,8 +617,8 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("read"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). - Returns(http.StatusOK, "OK", versionedObject). - Writes(versionedObject) + Returns(http.StatusOK, "OK", producedObject). + Writes(producedObject) if isGetterWithOptions { if err := addObjectParams(ws, route, versionedGetOptions); err != nil { return nil, err @@ -677,9 +677,12 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("replace"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). - Returns(http.StatusOK, "OK", versionedObject). - Reads(versionedObject). - Writes(versionedObject) + Returns(http.StatusOK, "OK", producedObject). + // TODO: in some cases, the API may return a v1.Status instead of the versioned object + // but currently go-restful can't handle multiple different objects being returned. + Returns(http.StatusCreated, "Created", producedObject). + Reads(defaultVersionedObject). + Writes(producedObject) addParams(route, action.Params) routes = append(routes, route) case "PATCH": // Partially update a resource @@ -694,9 +697,9 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Consumes(string(types.JSONPatchType), string(types.MergePatchType), string(types.StrategicMergePatchType)). Operation("patch"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). - Returns(http.StatusOK, "OK", versionedObject). + Returns(http.StatusOK, "OK", producedObject). Reads(metav1.Patch{}). - Writes(versionedObject) + Writes(producedObject) addParams(route, action.Params) routes = append(routes, route) case "POST": // Create a resource. @@ -717,9 +720,13 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("create"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). - Returns(http.StatusOK, "OK", versionedObject). - Reads(versionedObject). - Writes(versionedObject) + Returns(http.StatusOK, "OK", producedObject). + // TODO: in some cases, the API may return a v1.Status instead of the versioned object + // but currently go-restful can't handle multiple different objects being returned. + Returns(http.StatusCreated, "Created", producedObject). + Returns(http.StatusAccepted, "Accepted", producedObject). + Reads(defaultVersionedObject). + Writes(producedObject) addParams(route, action.Params) routes = append(routes, route) case "DELETE": // Delete a resource. @@ -814,6 +821,10 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag routes = append(routes, buildProxyRoute(ws, "OPTIONS", a.prefix, action.Path, kind, resource, subresource, namespaced, requestScope, hasSubresource, action.Params, proxyHandler, operationSuffix)) case "CONNECT": for _, method := range connecter.ConnectMethods() { + connectProducedObject := storageMeta.ProducesObject(method) + if connectProducedObject == nil { + connectProducedObject = "string" + } doc := "connect " + method + " requests to " + kind if hasSubresource { doc = "connect " + method + " requests to " + subresource + " of " + kind @@ -825,7 +836,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Operation("connect" + strings.Title(strings.ToLower(method)) + namespaced + kind + strings.Title(subresource) + operationSuffix). Produces("*/*"). Consumes("*/*"). - Writes("string") + Writes(connectProducedObject) if versionedConnectOptions != nil { if err := addObjectParams(ws, route, versionedConnectOptions); err != nil { return nil, err @@ -862,7 +873,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag apiResource.Categories = categoriesProvider.Categories() } if gvkProvider, ok := storage.(rest.GroupVersionKindProvider); ok { - gvk := gvkProvider.GroupVersionKind() + gvk := gvkProvider.GroupVersionKind(a.group.GroupVersion) apiResource.Group = gvk.Group apiResource.Version = gvk.Version apiResource.Kind = gvk.Kind diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/BUILD b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/BUILD index bcc239904e73..8d57d273e3f5 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/BUILD +++ b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/BUILD @@ -9,16 +9,19 @@ load( go_test( name = "go_default_test", srcs = ["metrics_test.go"], + importpath = "k8s.io/apiserver/pkg/endpoints/metrics", library = ":go_default_library", ) go_library( name = "go_default_library", srcs = ["metrics.go"], + importpath = "k8s.io/apiserver/pkg/endpoints/metrics", deps = [ "//vendor/github.com/emicklei/go-restful:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index 90eaed114c28..65e651a33175 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -27,7 +27,7 @@ import ( "time" utilnet "k8s.io/apimachinery/pkg/util/net" - //utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/endpoints/request" "github.com/emicklei/go-restful" "github.com/prometheus/client_golang/prometheus" @@ -43,6 +43,13 @@ var ( }, []string{"verb", "resource", "subresource", "scope", "client", "contentType", "code"}, ) + longRunningRequestGauge = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "apiserver_longrunning_gauge", + Help: "Gauge of all active long-running apiserver requests broken out by verb, API resource, and scope. Not all requests are tracked this way.", + }, + []string{"verb", "resource", "subresource", "scope"}, + ) requestLatencies = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "apiserver_request_latencies", @@ -77,43 +84,59 @@ var ( // Register all metrics. func Register() { prometheus.MustRegister(requestCounter) + prometheus.MustRegister(longRunningRequestGauge) prometheus.MustRegister(requestLatencies) prometheus.MustRegister(requestLatenciesSummary) prometheus.MustRegister(responseSizes) } -// Monitor records a request to the apiserver endpoints that follow the Kubernetes API conventions. verb must be -// uppercase to be backwards compatible with existing monitoring tooling. -func Monitor(verb, resource, subresource, scope, client, contentType string, httpCode, respSize int, reqStart time.Time) { - elapsed := float64((time.Since(reqStart)) / time.Microsecond) - requestCounter.WithLabelValues(verb, resource, subresource, scope, client, contentType, codeToString(httpCode)).Inc() - requestLatencies.WithLabelValues(verb, resource, subresource, scope).Observe(elapsed) - requestLatenciesSummary.WithLabelValues(verb, resource, subresource, scope).Observe(elapsed) - // We are only interested in response sizes of read requests. - if verb == "GET" || verb == "LIST" { - responseSizes.WithLabelValues(verb, resource, subresource, scope).Observe(float64(respSize)) +// Record records a single request to the standard metrics endpoints. For use by handlers that perform their own +// processing. All API paths should use InstrumentRouteFunc implicitly. Use this instead of MonitorRequest if +// you already have a RequestInfo object. +func Record(req *http.Request, requestInfo *request.RequestInfo, contentType string, code int, responseSizeInBytes int, elapsed time.Duration) { + if requestInfo == nil { + requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path} + } + scope := cleanScope(requestInfo) + if requestInfo.IsResourceRequest { + MonitorRequest(req, strings.ToUpper(requestInfo.Verb), requestInfo.Resource, requestInfo.Subresource, contentType, scope, code, responseSizeInBytes, elapsed) + } else { + MonitorRequest(req, strings.ToUpper(requestInfo.Verb), "", requestInfo.Path, contentType, scope, code, responseSizeInBytes, elapsed) } } -// MonitorRequest handles standard transformations for client and the reported verb and then invokes Monitor to record -// a request. verb must be uppercase to be backwards compatible with existing monitoring tooling. -func MonitorRequest(request *http.Request, verb, resource, subresource, scope, contentType string, httpCode, respSize int, reqStart time.Time) { - reportedVerb := verb - if verb == "LIST" { - // see apimachinery/pkg/runtime/conversion.go Convert_Slice_string_To_bool - if values := request.URL.Query()["watch"]; len(values) > 0 { - if value := strings.ToLower(values[0]); value != "0" && value != "false" { - reportedVerb = "WATCH" - } - } +// RecordLongRunning tracks the execution of a long running request against the API server. It provides an accurate count +// of the total number of open long running requests. requestInfo may be nil if the caller is not in the normal request flow. +func RecordLongRunning(req *http.Request, requestInfo *request.RequestInfo, fn func()) { + if requestInfo == nil { + requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path} } - // normalize the legacy WATCHLIST to WATCH to ensure users aren't surprised by metrics - if verb == "WATCHLIST" { - reportedVerb = "WATCH" + var g prometheus.Gauge + scope := cleanScope(requestInfo) + reportedVerb := cleanVerb(strings.ToUpper(requestInfo.Verb), req) + if requestInfo.IsResourceRequest { + g = longRunningRequestGauge.WithLabelValues(reportedVerb, requestInfo.Resource, requestInfo.Subresource, scope) + } else { + g = longRunningRequestGauge.WithLabelValues(reportedVerb, "", requestInfo.Path, scope) } + g.Inc() + defer g.Dec() + fn() +} - client := cleanUserAgent(utilnet.GetHTTPClient(request)) - Monitor(reportedVerb, resource, subresource, scope, client, contentType, httpCode, respSize, reqStart) +// MonitorRequest handles standard transformations for client and the reported verb and then invokes Monitor to record +// a request. verb must be uppercase to be backwards compatible with existing monitoring tooling. +func MonitorRequest(req *http.Request, verb, resource, subresource, scope, contentType string, httpCode, respSize int, elapsed time.Duration) { + reportedVerb := cleanVerb(verb, req) + client := cleanUserAgent(utilnet.GetHTTPClient(req)) + elapsedMicroseconds := float64(elapsed / time.Microsecond) + requestCounter.WithLabelValues(reportedVerb, resource, subresource, scope, client, contentType, codeToString(httpCode)).Inc() + requestLatencies.WithLabelValues(reportedVerb, resource, subresource, scope).Observe(elapsedMicroseconds) + requestLatenciesSummary.WithLabelValues(reportedVerb, resource, subresource, scope).Observe(elapsedMicroseconds) + // We are only interested in response sizes of read requests. + if verb == "GET" || verb == "LIST" { + responseSizes.WithLabelValues(reportedVerb, resource, subresource, scope).Observe(float64(respSize)) + } } func Reset() { @@ -144,10 +167,41 @@ func InstrumentRouteFunc(verb, resource, subresource, scope string, routeFunc re routeFunc(request, response) - MonitorRequest(request.Request, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), now) + MonitorRequest(request.Request, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Now().Sub(now)) }) } +func cleanScope(requestInfo *request.RequestInfo) string { + if requestInfo.Namespace != "" { + return "namespace" + } + if requestInfo.Name != "" { + return "resource" + } + if requestInfo.IsResourceRequest { + return "cluster" + } + // this is the empty scope + return "" +} + +func cleanVerb(verb string, request *http.Request) string { + reportedVerb := verb + if verb == "LIST" { + // see apimachinery/pkg/runtime/conversion.go Convert_Slice_string_To_bool + if values := request.URL.Query()["watch"]; len(values) > 0 { + if value := strings.ToLower(values[0]); value != "0" && value != "false" { + reportedVerb = "WATCH" + } + } + } + // normalize the legacy WATCHLIST to WATCH to ensure users aren't surprised by metrics + if verb == "WATCHLIST" { + reportedVerb = "WATCH" + } + return reportedVerb +} + func cleanUserAgent(ua string) string { // We collapse all "web browser"-type user agents into one "browser" to reduce metric cardinality. if strings.HasPrefix(ua, "Mozilla/") { diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/openapi/BUILD b/vendor/k8s.io/apiserver/pkg/endpoints/openapi/BUILD index 619e36e25821..0e972d7f9889 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/openapi/BUILD +++ b/vendor/k8s.io/apiserver/pkg/endpoints/openapi/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["openapi_test.go"], + importpath = "k8s.io/apiserver/pkg/endpoints/openapi", library = ":go_default_library", deps = [ "//vendor/github.com/go-openapi/spec:go_default_library", @@ -21,6 +22,7 @@ go_test( go_library( name = "go_default_library", srcs = ["openapi.go"], + importpath = "k8s.io/apiserver/pkg/endpoints/openapi", deps = [ "//vendor/github.com/emicklei/go-restful:go_default_library", "//vendor/github.com/go-openapi/spec:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD b/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD index a9fff53ccd7e..ca759c1b66b0 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["requestinfo_test.go"], + importpath = "k8s.io/apiserver/pkg/endpoints/request", library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library"], ) @@ -21,6 +22,7 @@ go_library( "requestcontext.go", "requestinfo.go", ], + importpath = "k8s.io/apiserver/pkg/endpoints/request", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", @@ -34,6 +36,7 @@ go_library( go_test( name = "go_default_xtest", srcs = ["context_test.go"], + importpath = "k8s.io/apiserver/pkg/endpoints/request_test", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/features/BUILD b/vendor/k8s.io/apiserver/pkg/features/BUILD index 3b415effc454..1d45a428ec78 100644 --- a/vendor/k8s.io/apiserver/pkg/features/BUILD +++ b/vendor/k8s.io/apiserver/pkg/features/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["kube_features.go"], + importpath = "k8s.io/apiserver/pkg/features", deps = ["//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library"], ) diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go index d5b52fd87586..57bab8b00242 100644 --- a/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -58,6 +58,7 @@ const ( // owner: @smarterclayton // alpha: v1.8 + // beta: v1.9 // // Allow API clients to retrieve resource lists in chunks rather than // all at once. @@ -76,5 +77,5 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS AdvancedAuditing: {Default: true, PreRelease: utilfeature.Beta}, APIResponseCompression: {Default: false, PreRelease: utilfeature.Alpha}, Initializers: {Default: false, PreRelease: utilfeature.Alpha}, - APIListChunking: {Default: false, PreRelease: utilfeature.Alpha}, + APIListChunking: {Default: true, PreRelease: utilfeature.Beta}, } diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/BUILD b/vendor/k8s.io/apiserver/pkg/registry/generic/BUILD index 8d2212a4d8ad..43471c04376d 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/BUILD +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/BUILD @@ -13,6 +13,7 @@ go_library( "options.go", "storage_decorator.go", ], + importpath = "k8s.io/apiserver/pkg/registry/generic", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -38,6 +39,7 @@ filegroup( ":package-srcs", "//staging/src/k8s.io/apiserver/pkg/registry/generic/registry:all-srcs", "//staging/src/k8s.io/apiserver/pkg/registry/generic/rest:all-srcs", + "//staging/src/k8s.io/apiserver/pkg/registry/generic/testing:all-srcs", ], tags = ["automanaged"], ) diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/BUILD b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/BUILD index 2a397147f73e..02c6becc7331 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/BUILD +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/BUILD @@ -12,6 +12,7 @@ go_test( "decorated_watcher_test.go", "store_test.go", ], + importpath = "k8s.io/apiserver/pkg/registry/generic/registry", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", @@ -23,6 +24,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/selection:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", @@ -53,6 +55,7 @@ go_library( "storage_factory.go", "store.go", ], + importpath = "k8s.io/apiserver/pkg/registry/generic/registry", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go index c193f01cc2a8..c1ef906f66d8 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go @@ -17,6 +17,8 @@ limitations under the License. package registry import ( + "sync" + "github.com/golang/glog" "k8s.io/apimachinery/pkg/runtime" @@ -30,7 +32,6 @@ import ( // Creates a cacher based given storageConfig. func StorageWithCacher(capacity int) generic.StorageDecorator { return func( - copier runtime.ObjectCopier, storageConfig *storagebackend.Config, objectType runtime.Object, resourcePrefix string, @@ -52,7 +53,6 @@ func StorageWithCacher(capacity int) generic.StorageDecorator { CacheCapacity: capacity, Storage: s, Versioner: etcdstorage.APIObjectVersioner{}, - Copier: copier, Type: objectType, ResourcePrefix: resourcePrefix, KeyFunc: keyFunc, @@ -67,6 +67,53 @@ func StorageWithCacher(capacity int) generic.StorageDecorator { d() } + // TODO : Remove RegisterStorageCleanup below when PR + // https://github.com/kubernetes/kubernetes/pull/50690 + // merges as that shuts down storage properly + RegisterStorageCleanup(destroyFunc) + return cacher, destroyFunc } } + +// TODO : Remove all the code below when PR +// https://github.com/kubernetes/kubernetes/pull/50690 +// merges as that shuts down storage properly +// HACK ALERT : Track the destroy methods to call them +// from the test harness. TrackStorageCleanup will be called +// only from the test harness, so Register/Cleanup will be +// no-op at runtime. + +var cleanupLock sync.Mutex +var cleanup []func() = nil + +func TrackStorageCleanup() { + cleanupLock.Lock() + defer cleanupLock.Unlock() + + if cleanup != nil { + panic("Conflicting storage tracking") + } + cleanup = make([]func(), 0) +} + +func RegisterStorageCleanup(fn func()) { + cleanupLock.Lock() + defer cleanupLock.Unlock() + + if cleanup == nil { + return + } + cleanup = append(cleanup, fn) +} + +func CleanupStorage() { + cleanupLock.Lock() + old := cleanup + cleanup = nil + cleanupLock.Unlock() + + for _, d := range old { + d() + } +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go index 0ec4f76f0586..db77f4efea84 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go @@ -76,9 +76,6 @@ type GenericStore interface { // // TODO: make the default exposed methods exactly match a generic RESTStorage type Store struct { - // Copier is used to make some storage caching decorators work - Copier runtime.ObjectCopier - // NewFunc returns a new instance of the type this registry returns for a // GET of a single object, e.g.: // @@ -151,11 +148,13 @@ type Store struct { // AfterCreate implements a further operation to run after a resource is // created and before it is decorated, optional. AfterCreate ObjectFunc + // UpdateStrategy implements resource-specific behavior during updates. UpdateStrategy rest.RESTUpdateStrategy // AfterUpdate implements a further operation to run after a resource is // updated and before it is decorated, optional. AfterUpdate ObjectFunc + // DeleteStrategy implements resource-specific behavior during deletion. DeleteStrategy rest.RESTDeleteStrategy // AfterDelete implements a further operation to run after a resource is @@ -306,10 +305,18 @@ func (e *Store) ListPredicate(ctx genericapirequest.Context, p storage.Selection } // Create inserts a new item according to the unique key from the object. -func (e *Store) Create(ctx genericapirequest.Context, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) { +func (e *Store) Create(ctx genericapirequest.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, includeUninitialized bool) (runtime.Object, error) { if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil { return nil, err } + // at this point we have a fully formed object. It is time to call the validators that the apiserver + // handling chain wants to enforce. + if createValidation != nil { + if err := createValidation(obj.DeepCopyObject()); err != nil { + return nil, err + } + } + name, err := e.ObjectNameFunc(obj) if err != nil { return nil, err @@ -497,7 +504,7 @@ func (e *Store) deleteWithoutFinalizers(ctx genericapirequest.Context, name, key // Update performs an atomic update and set of the object. Returns the result of the update // or an error. If the registry allows create-on-update, the create flow will be executed. // A bool is returned along with the object and any errors, to indicate object creation. -func (e *Store) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { +func (e *Store) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { key, err := e.KeyFunc(ctx, name) if err != nil { return nil, false, err @@ -547,10 +554,18 @@ func (e *Store) Update(ctx genericapirequest.Context, name string, objInfo rest. if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil { return nil, nil, err } + // at this point we have a fully formed object. It is time to call the validators that the apiserver + // handling chain wants to enforce. + if createValidation != nil { + if err := createValidation(obj.DeepCopyObject()); err != nil { + return nil, nil, err + } + } ttl, err := e.calculateTTL(obj, 0, false) if err != nil { return nil, nil, err } + return obj, &ttl, nil } @@ -585,6 +600,13 @@ func (e *Store) Update(ctx genericapirequest.Context, name string, objInfo rest. if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil { return nil, nil, err } + // at this point we have a fully formed object. It is time to call the validators that the apiserver + // handling chain wants to enforce. + if updateValidation != nil { + if err := updateValidation(obj.DeepCopyObject(), existing.DeepCopyObject()); err != nil { + return nil, nil, err + } + } if e.shouldDeleteDuringUpdate(ctx, key, obj, existing) { deleteObj = obj return nil, nil, errEmptiedFinalizers @@ -677,12 +699,17 @@ var ( // priority, there are three factors affect whether to add/remove the // FinalizerOrphanDependents: options, existing finalizers of the object, // and e.DeleteStrategy.DefaultGarbageCollectionPolicy. -func shouldOrphanDependents(e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool { - if gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy); ok { - if gcStrategy.DefaultGarbageCollectionPolicy() == rest.Unsupported { - // return false to indicate that we should NOT orphan - return false - } +func shouldOrphanDependents(ctx genericapirequest.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool { + // Get default GC policy from this REST object type + gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy) + var defaultGCPolicy rest.GarbageCollectionPolicy + if ok { + defaultGCPolicy = gcStrategy.DefaultGarbageCollectionPolicy(ctx) + } + + if defaultGCPolicy == rest.Unsupported { + // return false to indicate that we should NOT orphan + return false } // An explicit policy was set at deletion time, that overrides everything @@ -711,10 +738,8 @@ func shouldOrphanDependents(e *Store, accessor metav1.Object, options *metav1.De } // Get default orphan policy from this REST object type if it exists - if gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy); ok { - if gcStrategy.DefaultGarbageCollectionPolicy() == rest.OrphanDependents { - return true - } + if defaultGCPolicy == rest.OrphanDependents { + return true } return false } @@ -724,9 +749,9 @@ func shouldOrphanDependents(e *Store, accessor metav1.Object, options *metav1.De // priority, there are three factors affect whether to add/remove the // FinalizerDeleteDependents: options, existing finalizers of the object, and // e.DeleteStrategy.DefaultGarbageCollectionPolicy. -func shouldDeleteDependents(e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool { - // Get default orphan policy from this REST object type - if gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy); ok && gcStrategy.DefaultGarbageCollectionPolicy() == rest.Unsupported { +func shouldDeleteDependents(ctx genericapirequest.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool { + // Get default GC policy from this REST object type + if gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy); ok && gcStrategy.DefaultGarbageCollectionPolicy(ctx) == rest.Unsupported { // return false to indicate that we should NOT delete in foreground return false } @@ -767,12 +792,12 @@ func shouldDeleteDependents(e *Store, accessor metav1.Object, options *metav1.De // The finalizers returned are intended to be handled by the garbage collector. // If garbage collection is disabled for the store, this function returns false // to ensure finalizers aren't set which will never be cleared. -func deletionFinalizersForGarbageCollection(e *Store, accessor metav1.Object, options *metav1.DeleteOptions) (bool, []string) { +func deletionFinalizersForGarbageCollection(ctx genericapirequest.Context, e *Store, accessor metav1.Object, options *metav1.DeleteOptions) (bool, []string) { if !e.EnableGarbageCollection { return false, []string{} } - shouldOrphan := shouldOrphanDependents(e, accessor, options) - shouldDeleteDependentInForeground := shouldDeleteDependents(e, accessor, options) + shouldOrphan := shouldOrphanDependents(ctx, e, accessor, options) + shouldDeleteDependentInForeground := shouldDeleteDependents(ctx, e, accessor, options) newFinalizers := []string{} // first remove both finalizers, add them back if needed. @@ -858,7 +883,7 @@ func (e *Store) updateForGracefulDeletionAndFinalizers(ctx genericapirequest.Con if err != nil { return nil, err } - needsUpdate, newFinalizers := deletionFinalizersForGarbageCollection(e, existingAccessor, options) + needsUpdate, newFinalizers := deletionFinalizersForGarbageCollection(ctx, e, existingAccessor, options) if needsUpdate { existingAccessor.SetFinalizers(newFinalizers) } @@ -950,7 +975,7 @@ func (e *Store) Delete(ctx genericapirequest.Context, name string, options *meta // Handle combinations of graceful deletion and finalization by issuing // the correct updates. - shouldUpdateFinalizers, _ := deletionFinalizersForGarbageCollection(e, accessor, options) + shouldUpdateFinalizers, _ := deletionFinalizersForGarbageCollection(ctx, e, accessor, options) // TODO: remove the check, because we support no-op updates now. if graceful || pendingFinalizers || shouldUpdateFinalizers { err, ignoreNotFound, deleteImmediately, out, lastExisting = e.updateForGracefulDeletionAndFinalizers(ctx, name, key, options, preconditions, obj) @@ -1331,7 +1356,6 @@ func (e *Store) CompleteWithOptions(options *generic.StoreOptions) error { if e.Storage == nil { e.Storage, e.DestroyFunc = opts.Decorator( - e.Copier, opts.StorageConfig, e.NewFunc(), prefix, diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/rest/BUILD b/vendor/k8s.io/apiserver/pkg/registry/generic/rest/BUILD index 06fb0e46e081..2bf06aae705a 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/rest/BUILD +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/rest/BUILD @@ -12,6 +12,7 @@ go_test( "response_checker_test.go", "streamer_test.go", ], + importpath = "k8s.io/apiserver/pkg/registry/generic/rest", library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", @@ -27,6 +28,7 @@ go_library( "response_checker.go", "streamer.go", ], + importpath = "k8s.io/apiserver/pkg/registry/generic/rest", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go b/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go index 6c65230f35be..94a4794422c0 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go @@ -27,7 +27,6 @@ import ( // StorageDecorator is a function signature for producing a storage.Interface // and an associated DestroyFunc from given parameters. type StorageDecorator func( - copier runtime.ObjectCopier, config *storagebackend.Config, objectType runtime.Object, resourcePrefix string, @@ -39,7 +38,6 @@ type StorageDecorator func( // UndecoratedStorage returns the given a new storage from the given config // without any decoration. func UndecoratedStorage( - copier runtime.ObjectCopier, config *storagebackend.Config, objectType runtime.Object, resourcePrefix string, diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/BUILD b/vendor/k8s.io/apiserver/pkg/registry/rest/BUILD index 860516b3bacd..e662a4ee7d7b 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/rest/BUILD +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["meta_test.go"], + importpath = "k8s.io/apiserver/pkg/registry/rest", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", @@ -32,6 +33,7 @@ go_library( "update.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/apiserver/pkg/registry/rest", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", @@ -41,12 +43,12 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/features:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS b/vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS index e9de724aa6b7..9d8627ad1243 100755 --- a/vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/OWNERS @@ -22,7 +22,6 @@ reviewers: - euank - markturansky - fgrzadkowski -- satnam6502 - fabioy - ingvagabund - david-mcmahon diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/create.go b/vendor/k8s.io/apiserver/pkg/registry/rest/create.go index 55b628f03089..1f998332e9cf 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/rest/create.go +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/create.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/storage/names" @@ -151,3 +152,28 @@ type NamespaceScopedStrategy interface { // NamespaceScoped returns if the object must be in a namespace. NamespaceScoped() bool } + +// AdmissionToValidateObjectFunc converts validating admission to a rest validate object func +func AdmissionToValidateObjectFunc(admit admission.Interface, staticAttributes admission.Attributes) ValidateObjectFunc { + validatingAdmission, ok := admit.(admission.ValidationInterface) + if !ok { + return func(obj runtime.Object) error { return nil } + } + return func(obj runtime.Object) error { + finalAttributes := admission.NewAttributesRecord( + obj, + staticAttributes.GetOldObject(), + staticAttributes.GetKind(), + staticAttributes.GetNamespace(), + staticAttributes.GetName(), + staticAttributes.GetResource(), + staticAttributes.GetSubresource(), + staticAttributes.GetOperation(), + staticAttributes.GetUserInfo(), + ) + if !validatingAdmission.Handles(finalAttributes.GetOperation()) { + return nil + } + return validatingAdmission.Validate(finalAttributes) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/delete.go b/vendor/k8s.io/apiserver/pkg/registry/rest/delete.go index d7782a279fcd..11a1474c8407 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/rest/delete.go +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/delete.go @@ -48,7 +48,7 @@ const ( // orphan dependents by default. type GarbageCollectionDeleteStrategy interface { // DefaultGarbageCollectionPolicy returns the default garbage collection behavior. - DefaultGarbageCollectionPolicy() GarbageCollectionPolicy + DefaultGarbageCollectionPolicy(ctx genericapirequest.Context) GarbageCollectionPolicy } // RESTGracefulDeleteStrategy must be implemented by the registry that supports diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go b/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go index 42a3b0973c39..1155c8ade275 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/rest.go @@ -82,7 +82,7 @@ type CategoriesProvider interface { // This trumps KindProvider since it is capable of providing the information required. // TODO KindProvider (only used by federation) should be removed and replaced with this, but that presents greater risk late in 1.8. type GroupVersionKindProvider interface { - GroupVersionKind() schema.GroupVersionKind + GroupVersionKind(containingGV schema.GroupVersion) schema.GroupVersionKind } // Lister is an object that can retrieve resources that match the provided field and label criteria. @@ -192,7 +192,7 @@ type Creater interface { // Create creates a new version of a resource. If includeUninitialized is set, the object may be returned // without completing initialization. - Create(ctx genericapirequest.Context, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) + Create(ctx genericapirequest.Context, obj runtime.Object, createValidation ValidateObjectFunc, includeUninitialized bool) (runtime.Object, error) } // NamedCreater is an object that can create an instance of a RESTful object using a name parameter. @@ -205,7 +205,7 @@ type NamedCreater interface { // This is needed for create operations on subresources which include the name of the parent // resource in the path. If includeUninitialized is set, the object may be returned without // completing initialization. - Create(ctx genericapirequest.Context, name string, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) + Create(ctx genericapirequest.Context, name string, obj runtime.Object, createValidation ValidateObjectFunc, includeUninitialized bool) (runtime.Object, error) } // UpdatedObjectInfo provides information about an updated object to an Updater. @@ -221,6 +221,26 @@ type UpdatedObjectInfo interface { UpdatedObject(ctx genericapirequest.Context, oldObj runtime.Object) (newObj runtime.Object, err error) } +// ValidateObjectFunc is a function to act on a given object. An error may be returned +// if the hook cannot be completed. An ObjectFunc may NOT transform the provided +// object. +type ValidateObjectFunc func(obj runtime.Object) error + +// ValidateAllObjectFunc is a "admit everything" instance of ValidateObjectFunc. +func ValidateAllObjectFunc(obj runtime.Object) error { + return nil +} + +// ValidateObjectUpdateFunc is a function to act on a given object and its predecessor. +// An error may be returned if the hook cannot be completed. An UpdateObjectFunc +// may NOT transform the provided object. +type ValidateObjectUpdateFunc func(obj, old runtime.Object) error + +// ValidateAllObjectUpdateFunc is a "admit everything" instance of ValidateObjectUpdateFunc. +func ValidateAllObjectUpdateFunc(obj, old runtime.Object) error { + return nil +} + // Updater is an object that can update an instance of a RESTful object. type Updater interface { // New returns an empty object that can be used with Update after request data has been put into it. @@ -230,14 +250,14 @@ type Updater interface { // Update finds a resource in the storage and updates it. Some implementations // may allow updates creates the object - they should set the created boolean // to true. - Update(ctx genericapirequest.Context, name string, objInfo UpdatedObjectInfo) (runtime.Object, bool, error) + Update(ctx genericapirequest.Context, name string, objInfo UpdatedObjectInfo, createValidation ValidateObjectFunc, updateValidation ValidateObjectUpdateFunc) (runtime.Object, bool, error) } // CreaterUpdater is a storage object that must support both create and update. // Go prevents embedded interfaces that implement the same method. type CreaterUpdater interface { Creater - Update(ctx genericapirequest.Context, name string, objInfo UpdatedObjectInfo) (runtime.Object, bool, error) + Update(ctx genericapirequest.Context, name string, objInfo UpdatedObjectInfo, createValidation ValidateObjectFunc, updateValidation ValidateObjectUpdateFunc) (runtime.Object, bool, error) } // CreaterUpdater must satisfy the Updater interface. diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/table.go b/vendor/k8s.io/apiserver/pkg/registry/rest/table.go index 087421e9f3d5..4bc1b36685ef 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/rest/table.go +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/table.go @@ -63,6 +63,16 @@ func (c defaultTableConvertor) ConvertToTable(ctx genericapirequest.Context, obj return nil, err } } + if m, err := meta.ListAccessor(object); err == nil { + table.ResourceVersion = m.GetResourceVersion() + table.SelfLink = m.GetSelfLink() + table.Continue = m.GetContinue() + } else { + if m, err := meta.CommonAccessor(object); err == nil { + table.ResourceVersion = m.GetResourceVersion() + table.SelfLink = m.GetSelfLink() + } + } table.ColumnDefinitions = []metav1alpha1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name", Description: swaggerMetadataDescriptions["name"]}, {Name: "Created At", Type: "date", Description: swaggerMetadataDescriptions["creationTimestamp"]}, diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/update.go b/vendor/k8s.io/apiserver/pkg/registry/rest/update.go index cc80db19575b..aaee5800c296 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/rest/update.go +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/update.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -136,19 +137,14 @@ type defaultUpdatedObjectInfo struct { // obj is the updated object obj runtime.Object - // copier makes a copy of the object before returning it. - // this allows repeated calls to UpdatedObject() to return - // pristine data, even if the returned value is mutated. - copier runtime.ObjectCopier - // transformers is an optional list of transforming functions that modify or // replace obj using information from the context, old object, or other sources. transformers []TransformFunc } // DefaultUpdatedObjectInfo returns an UpdatedObjectInfo impl based on the specified object. -func DefaultUpdatedObjectInfo(obj runtime.Object, copier runtime.ObjectCopier, transformers ...TransformFunc) UpdatedObjectInfo { - return &defaultUpdatedObjectInfo{obj, copier, transformers} +func DefaultUpdatedObjectInfo(obj runtime.Object, transformers ...TransformFunc) UpdatedObjectInfo { + return &defaultUpdatedObjectInfo{obj, transformers} } // Preconditions satisfies the UpdatedObjectInfo interface. @@ -234,3 +230,28 @@ func (i *wrappedUpdatedObjectInfo) UpdatedObject(ctx genericapirequest.Context, return newObj, nil } + +// AdmissionToValidateObjectUpdateFunc converts validating admission to a rest validate object update func +func AdmissionToValidateObjectUpdateFunc(admit admission.Interface, staticAttributes admission.Attributes) ValidateObjectUpdateFunc { + validatingAdmission, ok := admit.(admission.ValidationInterface) + if !ok { + return func(obj, old runtime.Object) error { return nil } + } + return func(obj, old runtime.Object) error { + finalAttributes := admission.NewAttributesRecord( + obj, + old, + staticAttributes.GetKind(), + staticAttributes.GetNamespace(), + staticAttributes.GetName(), + staticAttributes.GetResource(), + staticAttributes.GetSubresource(), + staticAttributes.GetOperation(), + staticAttributes.GetUserInfo(), + ) + if !validatingAdmission.Handles(finalAttributes.GetOperation()) { + return nil + } + return validatingAdmission.Validate(finalAttributes) + } +} diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/registry/rest/zz_generated.deepcopy.go index fbc6c0558547..fd5212b3f344 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/rest/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/registry/rest/zz_generated.deepcopy.go @@ -21,23 +21,9 @@ limitations under the License. package rest import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConnectRequest).DeepCopyInto(out.(*ConnectRequest)) - return nil - }, InType: reflect.TypeOf(&ConnectRequest{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConnectRequest) DeepCopyInto(out *ConnectRequest) { *out = *in diff --git a/vendor/k8s.io/apiserver/pkg/server/BUILD b/vendor/k8s.io/apiserver/pkg/server/BUILD index e4ae0d1783a3..40bc126b1949 100644 --- a/vendor/k8s.io/apiserver/pkg/server/BUILD +++ b/vendor/k8s.io/apiserver/pkg/server/BUILD @@ -13,6 +13,7 @@ go_test( "config_test.go", "genericapiserver_test.go", ], + importpath = "k8s.io/apiserver/pkg/server", library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", @@ -29,8 +30,10 @@ go_test( "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/discovery:go_default_library", + "//vendor/k8s.io/apiserver/pkg/endpoints/filters:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server/filters:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", @@ -60,6 +63,7 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/apiserver/pkg/server", deps = [ "//vendor/github.com/coreos/go-systemd/daemon:go_default_library", "//vendor/github.com/emicklei/go-restful:go_default_library", @@ -67,7 +71,6 @@ go_library( "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pborman/uuid:go_default_library", - "//vendor/github.com/pkg/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", @@ -75,12 +78,17 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/waitgroup:go_default_library", "//vendor/k8s.io/apimachinery/pkg/version:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/initialization:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/apiserver/install:go_default_library", "//vendor/k8s.io/apiserver/pkg/audit:go_default_library", "//vendor/k8s.io/apiserver/pkg/audit/policy:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/apiserver/pkg/server/config.go index 69c0e4497eb3..877071ad3b21 100644 --- a/vendor/k8s.io/apiserver/pkg/server/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/config.go @@ -36,6 +36,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/sets" + utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup" "k8s.io/apimachinery/pkg/version" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/audit" @@ -67,7 +68,7 @@ import ( ) const ( - // DefaultLegacyAPIPrefix is where the the legacy APIs will be located. + // DefaultLegacyAPIPrefix is where the legacy APIs will be located. DefaultLegacyAPIPrefix = "/api" // APIGroupPrefix is where non-legacy API group will be located. @@ -128,6 +129,8 @@ type Config struct { // BuildHandlerChainFunc allows you to build custom handler chains by decorating the apiHandler. BuildHandlerChainFunc func(apiHandler http.Handler, c *Config) (secure http.Handler) + // HandlerChainWaitGroup allows you to wait for all chain handlers exit after the server shutdown. + HandlerChainWaitGroup *utilwaitgroup.SafeWaitGroup // DiscoveryAddresses is used to build the IPs pass to discovery. If nil, the ExternalAddress is // always reported DiscoveryAddresses discovery.Addresses @@ -192,14 +195,16 @@ type RecommendedConfig struct { // RecommendedOptions.CoreAPI.ApplyTo called by RecommendedOptions.ApplyTo. It uses an in-cluster client config // by default, or the kubeconfig given with kubeconfig command line flag. SharedInformerFactory informers.SharedInformerFactory + + // ClientConfig holds the kubernetes client configuration. + // This value is set by RecommendedOptions.CoreAPI.ApplyTo called by RecommendedOptions.ApplyTo. + // By default in-cluster client config is used. + ClientConfig *restclient.Config } type SecureServingInfo struct { - // BindAddress is the ip:port to serve on - BindAddress string - // BindNetwork is the type of network to bind to - defaults to "tcp", accepts "tcp", - // "tcp4", and "tcp6". - BindNetwork string + // Listener is the secure server network listener. + Listener net.Listener // Cert is the main server cert which is used if SNI does not match. Cert must be non-nil and is // allowed to be in SNICerts. @@ -231,6 +236,7 @@ func NewConfig(codecs serializer.CodecFactory) *Config { ReadWritePort: 443, RequestContextMapper: apirequest.NewRequestContextMapper(), BuildHandlerChainFunc: DefaultBuildHandlerChain, + HandlerChainWaitGroup: new(utilwaitgroup.SafeWaitGroup), LegacyAPIGroupPrefixes: sets.NewString(DefaultLegacyAPIPrefix), DisabledPostStartHooks: sets.NewString(), HealthzChecks: []healthz.HealthzChecker{healthz.PingHealthz}, @@ -441,8 +447,10 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G Serializer: c.Serializer, AuditBackend: c.AuditBackend, delegationTarget: delegationTarget, + HandlerChainWaitGroup: c.HandlerChainWaitGroup, minRequestTimeout: time.Duration(c.MinRequestTimeout) * time.Second, + ShutdownTimeout: c.RequestTimeout, SecureServingInfo: c.SecureServingInfo, ExternalAddress: c.ExternalAddress, @@ -455,6 +463,7 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G openAPIConfig: c.OpenAPIConfig, postStartHooks: map[string]postStartHookEntry{}, + preShutdownHooks: map[string]preShutdownHookEntry{}, disabledPostStartHooks: c.DisabledPostStartHooks, healthzChecks: c.HealthzChecks, @@ -468,8 +477,12 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G s.postStartHooks[k] = v } + for k, v := range delegationTarget.PreShutdownHooks() { + s.preShutdownHooks[k] = v + } + genericApiServerHookName := "generic-apiserver-start-informers" - if c.SharedInformerFactory != nil && !s.isHookRegistered(genericApiServerHookName) { + if c.SharedInformerFactory != nil && !s.isPostStartHookRegistered(genericApiServerHookName) { err := s.AddPostStartHook(genericApiServerHookName, func(context PostStartHookContext) error { c.SharedInformerFactory.Start(context.StopCh) return nil @@ -478,6 +491,7 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G return nil, err } } + for _, delegateCheck := range delegationTarget.HealthzChecks() { skip := false for _, existingCheck := range c.HealthzChecks { @@ -525,6 +539,7 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { handler = genericapifilters.WithAuthentication(handler, c.RequestContextMapper, c.Authenticator, failedHandler) handler = genericfilters.WithCORS(handler, c.CorsAllowedOriginList, nil, nil, nil, "true") handler = genericfilters.WithTimeoutForNonLongRunningRequests(handler, c.RequestContextMapper, c.LongRunningFunc, c.RequestTimeout) + handler = genericfilters.WithWaitGroup(handler, c.RequestContextMapper, c.LongRunningFunc, c.HandlerChainWaitGroup) handler = genericapifilters.WithRequestInfo(handler, c.RequestInfoResolver, c.RequestContextMapper) handler = apirequest.WithRequestContext(handler, c.RequestContextMapper) handler = genericfilters.WithPanicRecovery(handler) diff --git a/vendor/k8s.io/apiserver/pkg/server/config_selfclient.go b/vendor/k8s.io/apiserver/pkg/server/config_selfclient.go index 5e37c1ad267e..bf09161c9fcb 100644 --- a/vendor/k8s.io/apiserver/pkg/server/config_selfclient.go +++ b/vendor/k8s.io/apiserver/pkg/server/config_selfclient.go @@ -32,7 +32,7 @@ func (s *SecureServingInfo) NewLoopbackClientConfig(token string, loopbackCert [ return nil, nil } - host, port, err := LoopbackHostPort(s.BindAddress) + host, port, err := LoopbackHostPort(s.Listener.Addr().String()) if err != nil { return nil, err } @@ -64,7 +64,7 @@ func LoopbackHostPort(bindAddress string) (string, string, error) { } // Value is expected to be an IP or DNS name, not "0.0.0.0". - if host == "0.0.0.0" { + if host == "0.0.0.0" || host == "::" { host = "localhost" // Get ip of local interface, but fall back to "localhost". // Note that "localhost" is resolved with the external nameserver first with Go's stdlib. diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/BUILD b/vendor/k8s.io/apiserver/pkg/server/filters/BUILD index a4051a5edc37..bf72edc372fd 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/BUILD +++ b/vendor/k8s.io/apiserver/pkg/server/filters/BUILD @@ -14,6 +14,7 @@ go_test( "maxinflight_test.go", "timeout_test.go", ], + importpath = "k8s.io/apiserver/pkg/server/filters", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -36,14 +37,17 @@ go_library( "longrunning.go", "maxinflight.go", "timeout.go", + "waitgroup.go", "wrap.go", ], + importpath = "k8s.io/apiserver/pkg/server/filters", deps = [ "//vendor/github.com/emicklei/go-restful:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/waitgroup:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go b/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go index a0208ed034df..47ed949131d6 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go @@ -19,14 +19,11 @@ package filters import ( "fmt" "net/http" - "strings" - "time" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/endpoints/metrics" apirequest "k8s.io/apiserver/pkg/endpoints/request" - genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "github.com/golang/glog" ) @@ -48,7 +45,7 @@ func WithMaxInFlightLimit( handler http.Handler, nonMutatingLimit int, mutatingLimit int, - requestContextMapper genericapirequest.RequestContextMapper, + requestContextMapper apirequest.RequestContextMapper, longRunningRequestCheck apirequest.LongRunningRequestCheck, ) http.Handler { if nonMutatingLimit == 0 && mutatingLimit == 0 { @@ -108,18 +105,7 @@ func WithMaxInFlightLimit( } } } - scope := "cluster" - if requestInfo.Namespace != "" { - scope = "namespace" - } - if requestInfo.Name != "" { - scope = "resource" - } - if requestInfo.IsResourceRequest { - metrics.MonitorRequest(r, strings.ToUpper(requestInfo.Verb), requestInfo.Resource, requestInfo.Subresource, "", scope, http.StatusTooManyRequests, 0, time.Now()) - } else { - metrics.MonitorRequest(r, strings.ToUpper(requestInfo.Verb), "", requestInfo.Path, "", scope, http.StatusTooManyRequests, 0, time.Now()) - } + metrics.Record(r, requestInfo, "", http.StatusTooManyRequests, 0, 0) tooManyRequests(r, w) } } diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go b/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go index 5bf10d9fca55..0e67da25293c 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/timeout.go @@ -22,7 +22,6 @@ import ( "fmt" "net" "net/http" - "strings" "sync" "time" @@ -55,20 +54,8 @@ func WithTimeoutForNonLongRunningRequests(handler http.Handler, requestContextMa if longRunning(req, requestInfo) { return nil, nil, nil } - now := time.Now() metricFn := func() { - scope := "cluster" - if requestInfo.Namespace != "" { - scope = "namespace" - } - if requestInfo.Name != "" { - scope = "resource" - } - if requestInfo.IsResourceRequest { - metrics.MonitorRequest(req, strings.ToUpper(requestInfo.Verb), requestInfo.Resource, requestInfo.Subresource, "", scope, http.StatusGatewayTimeout, 0, now) - } else { - metrics.MonitorRequest(req, strings.ToUpper(requestInfo.Verb), "", requestInfo.Path, "", scope, http.StatusGatewayTimeout, 0, now) - } + metrics.Record(req, requestInfo, "", http.StatusGatewayTimeout, 0, 0) } return time.After(timeout), metricFn, apierrors.NewTimeoutError(fmt.Sprintf("request did not complete within %s", timeout), 0) } diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go b/vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go new file mode 100644 index 000000000000..be73a2c9d973 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "net/http" + + utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup" + apirequest "k8s.io/apiserver/pkg/endpoints/request" +) + +// WithWaitGroup adds all non long-running requests to wait group, which is used for graceful shutdown. +func WithWaitGroup(handler http.Handler, requestContextMapper apirequest.RequestContextMapper, longRunning apirequest.LongRunningRequestCheck, wg *utilwaitgroup.SafeWaitGroup) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx, ok := requestContextMapper.Get(req) + if !ok { + // if this happens, the handler chain isn't setup correctly because there is no context mapper + handler.ServeHTTP(w, req) + return + } + + requestInfo, ok := apirequest.RequestInfoFrom(ctx) + if !ok { + // if this happens, the handler chain isn't setup correctly because there is no request info + handler.ServeHTTP(w, req) + return + } + + if !longRunning(req, requestInfo) { + if err := wg.Add(1); err != nil { + http.Error(w, "Apisever is shutting down.", http.StatusInternalServerError) + return + } + defer wg.Done() + } + + handler.ServeHTTP(w, req) + }) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go b/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go index 9be725d4e269..8235b26540f5 100644 --- a/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go +++ b/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -34,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/sets" + utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/audit" genericapi "k8s.io/apiserver/pkg/endpoints" @@ -70,12 +71,6 @@ type APIGroupInfo struct { NegotiatedSerializer runtime.NegotiatedSerializer // ParameterCodec performs conversions for query parameters passed to API calls ParameterCodec runtime.ParameterCodec - - // SubresourceGroupVersionKind contains the GroupVersionKind overrides for each subresource that is - // accessible from this API group version. The GroupVersionKind is that of the external version of - // the subresource. The key of this map should be the path of the subresource. The keys here should - // match the keys in the Storage map above for subresources. - SubresourceGroupVersionKind map[string]schema.GroupVersionKind } // GenericAPIServer contains state for a Kubernetes cluster api server. @@ -89,6 +84,10 @@ type GenericAPIServer struct { // minRequestTimeout is how short the request timeout can be. This is used to build the RESTHandler minRequestTimeout time.Duration + // ShutdownTimeout is the timeout used for server shutdown. This specifies the timeout before server + // gracefully shutdown returns. + ShutdownTimeout time.Duration + // legacyAPIGroupPrefixes is used to set up URL parsing for authorization and for validating requests // to InstallLegacyAPIGroup legacyAPIGroupPrefixes sets.String @@ -101,9 +100,6 @@ type GenericAPIServer struct { SecureServingInfo *SecureServingInfo - // numerical ports, set after listening - effectiveSecurePort int - // ExternalAddress is the address (hostname or IP and port) that should be used in // external (public internet) URLs for this GenericAPIServer. ExternalAddress string @@ -134,6 +130,10 @@ type GenericAPIServer struct { postStartHooksCalled bool disabledPostStartHooks sets.String + preShutdownHookLock sync.Mutex + preShutdownHooks map[string]preShutdownHookEntry + preShutdownHooksCalled bool + // healthz checks healthzLock sync.Mutex healthzChecks []healthz.HealthzChecker @@ -148,6 +148,9 @@ type GenericAPIServer struct { // delegationTarget is the next delegate in the chain or nil delegationTarget DelegationTarget + + // HandlerChainWaitGroup allows you to wait for all chain handlers finish after the server shutdown. + HandlerChainWaitGroup *utilwaitgroup.SafeWaitGroup } // DelegationTarget is an interface which allows for composition of API servers with top level handling that works @@ -163,6 +166,9 @@ type DelegationTarget interface { // PostStartHooks returns the post-start hooks that need to be combined PostStartHooks() map[string]postStartHookEntry + // PreShutdownHooks returns the pre-stop hooks that need to be combined + PreShutdownHooks() map[string]preShutdownHookEntry + // HealthzChecks returns the healthz checks that need to be combined HealthzChecks() []healthz.HealthzChecker @@ -180,6 +186,9 @@ func (s *GenericAPIServer) UnprotectedHandler() http.Handler { func (s *GenericAPIServer) PostStartHooks() map[string]postStartHookEntry { return s.postStartHooks } +func (s *GenericAPIServer) PreShutdownHooks() map[string]preShutdownHookEntry { + return s.preShutdownHooks +} func (s *GenericAPIServer) HealthzChecks() []healthz.HealthzChecker { return s.healthzChecks } @@ -205,6 +214,9 @@ func (s emptyDelegate) UnprotectedHandler() http.Handler { func (s emptyDelegate) PostStartHooks() map[string]postStartHookEntry { return map[string]postStartHookEntry{} } +func (s emptyDelegate) PreShutdownHooks() map[string]preShutdownHookEntry { + return map[string]preShutdownHookEntry{} +} func (s emptyDelegate) HealthzChecks() []healthz.HealthzChecker { return []healthz.HealthzChecker{} } @@ -253,6 +265,14 @@ func (s *GenericAPIServer) PrepareRun() preparedGenericAPIServer { // Run spawns the secure http server. It only returns if stopCh is closed // or the secure port cannot be listened on initially. func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error { + // Register audit backend preShutdownHook. + if s.AuditBackend != nil { + s.AddPreShutdownHook("audit-backend", func() error { + s.AuditBackend.Shutdown() + return nil + }) + } + err := s.NonBlockingRun(stopCh) if err != nil { return err @@ -260,16 +280,32 @@ func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error { <-stopCh - if s.GenericAPIServer.AuditBackend != nil { - s.GenericAPIServer.AuditBackend.Shutdown() + err = s.RunPreShutdownHooks() + if err != nil { + return err } + // Wait for all requests to finish, which are bounded by the RequestTimeout variable. + s.HandlerChainWaitGroup.Wait() + return nil } // NonBlockingRun spawns the secure http server. An error is // returned if the secure port cannot be listened on. func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}) error { + // Use an stop channel to allow graceful shutdown without dropping audit events + // after http server shutdown. + auditStopCh := make(chan struct{}) + + // Start the audit backend before any request comes in. This means we must call Backend.Run + // before http server start serving. Otherwise the Backend.ProcessEvents call might block. + if s.AuditBackend != nil { + if err := s.AuditBackend.Run(auditStopCh); err != nil { + return fmt.Errorf("failed to run the audit backend: %v", err) + } + } + // Use an internal stop channel to allow cleanup of the listeners on error. internalStopCh := make(chan struct{}) @@ -286,16 +322,10 @@ func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}) error { go func() { <-stopCh close(internalStopCh) + s.HandlerChainWaitGroup.Wait() + close(auditStopCh) }() - // Start the audit backend before any request comes in. This means we cannot turn it into a - // post start hook because without calling Backend.Run the Backend.ProcessEvents call might block. - if s.AuditBackend != nil { - if err := s.AuditBackend.Run(stopCh); err != nil { - return fmt.Errorf("failed to run the audit backend: %v", err) - } - } - s.RunPostStartHooks(stopCh) if _, err := systemd.SdNotify(true, "READY=1\n"); err != nil { @@ -305,11 +335,6 @@ func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}) error { return nil } -// EffectiveSecurePort returns the secure port we bound to. -func (s *GenericAPIServer) EffectiveSecurePort() int { - return s.effectiveSecurePort -} - // installAPIResources is a private method for installing the REST storage backing each api groupversionresource func (s *GenericAPIServer) installAPIResources(apiPrefix string, apiGroupInfo *APIGroupInfo) error { for _, groupVersion := range apiGroupInfo.GroupMeta.GroupVersions { @@ -416,12 +441,10 @@ func (s *GenericAPIServer) newAPIGroupVersion(apiGroupInfo *APIGroupInfo, groupV Creater: apiGroupInfo.Scheme, Convertor: apiGroupInfo.Scheme, UnsafeConvertor: runtime.UnsafeObjectConvertor(apiGroupInfo.Scheme), - Copier: apiGroupInfo.Scheme, Defaulter: apiGroupInfo.Scheme, Typer: apiGroupInfo.Scheme, - SubresourceGroupVersionKind: apiGroupInfo.SubresourceGroupVersionKind, - Linker: apiGroupInfo.GroupMeta.SelfLinker, - Mapper: apiGroupInfo.GroupMeta.RESTMapper, + Linker: apiGroupInfo.GroupMeta.SelfLinker, + Mapper: apiGroupInfo.GroupMeta.RESTMapper, Admit: s.admissionControl, Context: s.RequestContextMapper(), diff --git a/vendor/k8s.io/apiserver/pkg/server/handler.go b/vendor/k8s.io/apiserver/pkg/server/handler.go index 48d1408a45cb..5368e2e0fbff 100644 --- a/vendor/k8s.io/apiserver/pkg/server/handler.go +++ b/vendor/k8s.io/apiserver/pkg/server/handler.go @@ -62,7 +62,7 @@ type APIServerHandler struct { // which we don't fit into and it still muddies up swagger. Trying to switch the webservices into a route doesn't work because the // containing webservice faces all the same problems listed above. // This leads to the crazy thing done here. Our mux does what we need, so we'll place it in front of gorestful. It will introspect to - // decide if the the route is likely to be handled by goresful and route there if needed. Otherwise, it goes to PostGoRestful mux in + // decide if the route is likely to be handled by goresful and route there if needed. Otherwise, it goes to PostGoRestful mux in // order to handle "normal" paths and delegation. Hopefully no API consumers will ever have to deal with this level of detail. I think // we should consider completely removing gorestful. // Other servers should only use this opaquely to delegate to an API server. diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD b/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD index ae2d20238bc5..8b3901bb8b21 100644 --- a/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD +++ b/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["healthz_test.go"], + importpath = "k8s.io/apiserver/pkg/server/healthz", library = ":go_default_library", ) @@ -18,6 +19,7 @@ go_library( "doc.go", "healthz.go", ], + importpath = "k8s.io/apiserver/pkg/server/healthz", deps = ["//vendor/github.com/golang/glog:go_default_library"], ) diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go index d5dbb5ccdd7a..bd8bfe7b53d9 100644 --- a/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go +++ b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go @@ -106,9 +106,10 @@ func handleRootHealthz(checks ...HealthzChecker) http.HandlerFunc { failed := false var verboseOut bytes.Buffer for _, check := range checks { - if check.Check(r) != nil { + if err := check.Check(r); err != nil { // don't include the error since this endpoint is public. If someone wants more detail // they should have explicit permission to the detailed checks. + glog.V(6).Infof("healthz check %v failed: %v", check.Name(), err) fmt.Fprintf(&verboseOut, "[-]%v failed: reason withheld\n", check.Name()) failed = true } else { diff --git a/vendor/k8s.io/apiserver/pkg/server/hooks.go b/vendor/k8s.io/apiserver/pkg/server/hooks.go index a190f5622022..ccf8ee17ad02 100644 --- a/vendor/k8s.io/apiserver/pkg/server/hooks.go +++ b/vendor/k8s.io/apiserver/pkg/server/hooks.go @@ -23,6 +23,7 @@ import ( "github.com/golang/glog" + utilerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/server/healthz" restclient "k8s.io/client-go/rest" @@ -39,6 +40,9 @@ import ( // until it becomes easier to use. type PostStartHookFunc func(context PostStartHookContext) error +// PreShutdownHookFunc is a function that can be added to the shutdown logic. +type PreShutdownHookFunc func() error + // PostStartHookContext provides information about this API server to a PostStartHookFunc type PostStartHookContext struct { // LoopbackClientConfig is a config for a privileged loopback connection to the API server @@ -59,6 +63,10 @@ type postStartHookEntry struct { done chan struct{} } +type preShutdownHookEntry struct { + hook PreShutdownHookFunc +} + // AddPostStartHook allows you to add a PostStartHook. func (s *GenericAPIServer) AddPostStartHook(name string, hook PostStartHookFunc) error { if len(name) == 0 { @@ -97,6 +105,37 @@ func (s *GenericAPIServer) AddPostStartHookOrDie(name string, hook PostStartHook } } +// AddPreShutdownHook allows you to add a PreShutdownHook. +func (s *GenericAPIServer) AddPreShutdownHook(name string, hook PreShutdownHookFunc) error { + if len(name) == 0 { + return fmt.Errorf("missing name") + } + if hook == nil { + return nil + } + + s.preShutdownHookLock.Lock() + defer s.preShutdownHookLock.Unlock() + + if s.preShutdownHooksCalled { + return fmt.Errorf("unable to add %q because PreShutdownHooks have already been called", name) + } + if _, exists := s.preShutdownHooks[name]; exists { + return fmt.Errorf("unable to add %q because it is already registered", name) + } + + s.preShutdownHooks[name] = preShutdownHookEntry{hook: hook} + + return nil +} + +// AddPreShutdownHookOrDie allows you to add a PostStartHook, but dies on failure +func (s *GenericAPIServer) AddPreShutdownHookOrDie(name string, hook PreShutdownHookFunc) { + if err := s.AddPreShutdownHook(name, hook); err != nil { + glog.Fatalf("Error registering PreShutdownHook %q: %v", name, err) + } +} + // RunPostStartHooks runs the PostStartHooks for the server func (s *GenericAPIServer) RunPostStartHooks(stopCh <-chan struct{}) { s.postStartHookLock.Lock() @@ -113,8 +152,24 @@ func (s *GenericAPIServer) RunPostStartHooks(stopCh <-chan struct{}) { } } -// isHookRegistered checks whether a given hook is registered -func (s *GenericAPIServer) isHookRegistered(name string) bool { +// RunPreShutdownHooks runs the PreShutdownHooks for the server +func (s *GenericAPIServer) RunPreShutdownHooks() error { + var errorList []error + + s.preShutdownHookLock.Lock() + defer s.preShutdownHookLock.Unlock() + s.preShutdownHooksCalled = true + + for hookName, hookEntry := range s.preShutdownHooks { + if err := runPreShutdownHook(hookName, hookEntry); err != nil { + errorList = append(errorList, err) + } + } + return utilerrors.NewAggregate(errorList) +} + +// isPostStartHookRegistered checks whether a given PostStartHook is registered +func (s *GenericAPIServer) isPostStartHookRegistered(name string) bool { s.postStartHookLock.Lock() defer s.postStartHookLock.Unlock() _, exists := s.postStartHooks[name] @@ -135,6 +190,19 @@ func runPostStartHook(name string, entry postStartHookEntry, context PostStartHo close(entry.done) } +func runPreShutdownHook(name string, entry preShutdownHookEntry) error { + var err error + func() { + // don't let the hook *accidentally* panic and kill the server + defer utilruntime.HandleCrash() + err = entry.hook() + }() + if err != nil { + return fmt.Errorf("PreShutdownHook %q failed: %v", name, err) + } + return nil +} + // postStartHookHealthz implements a healthz check for poststarthooks. It will return a "hookNotFinished" // error until the poststarthook is finished. type postStartHookHealthz struct { diff --git a/vendor/k8s.io/apiserver/pkg/server/httplog/BUILD b/vendor/k8s.io/apiserver/pkg/server/httplog/BUILD index 92c63dcadf62..16ea0eb730a8 100644 --- a/vendor/k8s.io/apiserver/pkg/server/httplog/BUILD +++ b/vendor/k8s.io/apiserver/pkg/server/httplog/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["httplog_test.go"], + importpath = "k8s.io/apiserver/pkg/server/httplog", library = ":go_default_library", ) @@ -18,6 +19,7 @@ go_library( "doc.go", "httplog.go", ], + importpath = "k8s.io/apiserver/pkg/server/httplog", deps = ["//vendor/github.com/golang/glog:go_default_library"], ) diff --git a/vendor/k8s.io/apiserver/pkg/server/mux/BUILD b/vendor/k8s.io/apiserver/pkg/server/mux/BUILD index acbbf22cce31..fecbbe7b7222 100644 --- a/vendor/k8s.io/apiserver/pkg/server/mux/BUILD +++ b/vendor/k8s.io/apiserver/pkg/server/mux/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["pathrecorder_test.go"], + importpath = "k8s.io/apiserver/pkg/server/mux", library = ":go_default_library", deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"], ) @@ -19,6 +20,7 @@ go_library( "doc.go", "pathrecorder.go", ], + importpath = "k8s.io/apiserver/pkg/server/mux", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/server/options/BUILD b/vendor/k8s.io/apiserver/pkg/server/options/BUILD index c7d7c2d5afbc..7a9196f14d68 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/BUILD +++ b/vendor/k8s.io/apiserver/pkg/server/options/BUILD @@ -1,28 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = ["serving_test.go"], - data = glob(["testdata/**"]), - library = ":go_default_library", - deps = [ - "//vendor/github.com/stretchr/testify/assert:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/version:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/k8s.io/apiserver/pkg/server:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", - "//vendor/k8s.io/client-go/discovery:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -39,6 +15,8 @@ go_library( "server_run_options.go", "serving.go", ], + importpath = "k8s.io/apiserver/pkg/server/options", + visibility = ["//visibility:public"], deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pborman/uuid:go_default_library", @@ -52,6 +30,13 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission/initializer:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/metrics:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/initialization:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating:go_default_library", + "//vendor/k8s.io/apiserver/pkg/apis/apiserver:go_default_library", + "//vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library", "//vendor/k8s.io/apiserver/pkg/audit:go_default_library", "//vendor/k8s.io/apiserver/pkg/audit/policy:go_default_library", @@ -80,9 +65,26 @@ go_library( ], ) -filegroup( - name = "testdata", - srcs = glob(["testdata/*"]), +go_test( + name = "go_default_test", + srcs = [ + "admission_test.go", + "serving_test.go", + ], + data = glob(["testdata/**"]), + importpath = "k8s.io/apiserver/pkg/server/options", + library = ":go_default_library", + deps = [ + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/version:go_default_library", + "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", + "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + ], ) filegroup( @@ -99,4 +101,5 @@ filegroup( "//staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig:all-srcs", ], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/k8s.io/apiserver/pkg/server/options/admission.go b/vendor/k8s.io/apiserver/pkg/server/options/admission.go index 40c34ce3d3ae..6232567f7a4b 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/admission.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/admission.go @@ -21,29 +21,55 @@ import ( "strings" "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/admission/initializer" + admissionmetrics "k8s.io/apiserver/pkg/admission/metrics" + "k8s.io/apiserver/pkg/admission/plugin/initialization" + "k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle" + mutatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating" + validatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/validating" + apiserverapi "k8s.io/apiserver/pkg/apis/apiserver" + apiserverapiv1alpha1 "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1" "k8s.io/apiserver/pkg/server" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" ) // AdmissionOptions holds the admission options type AdmissionOptions struct { - PluginNames []string - ConfigFile string - Plugins *admission.Plugins + // RecommendedPluginOrder holds an ordered list of plugin names we recommend to use by default + RecommendedPluginOrder []string + // DefaultOffPlugins a list of plugin names that should be disabled by default + DefaultOffPlugins []string + PluginNames []string + ConfigFile string + + Plugins *admission.Plugins } // NewAdmissionOptions creates a new instance of AdmissionOptions // Note: -// In addition it calls RegisterAllAdmissionPlugins to register -// all generic admission plugins. +// In addition it calls RegisterAllAdmissionPlugins to register +// all generic admission plugins. +// +// Provides the list of RecommendedPluginOrder that holds sane values +// that can be used by servers that don't care about admission chain. +// Servers that do care can overwrite/append that field after creation. func NewAdmissionOptions() *AdmissionOptions { options := &AdmissionOptions{ - Plugins: &admission.Plugins{}, + Plugins: admission.NewPlugins(), PluginNames: []string{}, + // This list is mix of mutating admission plugins and validating + // admission plugins. The apiserver always runs the validating ones + // after all the mutating ones, so their relative order in this list + // doesn't matter. + RecommendedPluginOrder: []string{lifecycle.PluginName, initialization.PluginName, mutatingwebhook.PluginName, validatingwebhook.PluginName}, + DefaultOffPlugins: []string{initialization.PluginName, mutatingwebhook.PluginName, validatingwebhook.PluginName}, } + apiserverapi.AddToScheme(options.Plugins.ConfigScheme) + apiserverapiv1alpha1.AddToScheme(options.Plugins.ConfigScheme) server.RegisterAllAdmissionPlugins(options.Plugins) return options } @@ -51,43 +77,54 @@ func NewAdmissionOptions() *AdmissionOptions { // AddFlags adds flags related to admission for a specific APIServer to the specified FlagSet func (a *AdmissionOptions) AddFlags(fs *pflag.FlagSet) { fs.StringSliceVar(&a.PluginNames, "admission-control", a.PluginNames, ""+ - "Ordered list of plug-ins to do admission control of resources into cluster. "+ + "Admission is divided into two phases. "+ + "In the first phase, only mutating admission plugins run. "+ + "In the second phase, only validating admission plugins run. "+ + "The names in the below list may represent a validating plugin, a mutating plugin, or both. "+ + "Within each phase, the plugins will run in the order in which they are passed to this flag. "+ "Comma-delimited list of: "+strings.Join(a.Plugins.Registered(), ", ")+".") fs.StringVar(&a.ConfigFile, "admission-control-config-file", a.ConfigFile, "File with admission control configuration.") } -// ApplyTo adds the admission chain to the server configuration -// the method lazily initializes a generic plugin that is appended to the list of pluginInitializers +// ApplyTo adds the admission chain to the server configuration. +// In case admission plugin names were not provided by a custer-admin they will be prepared from the recommended/default values. +// In addition the method lazily initializes a generic plugin that is appended to the list of pluginInitializers // note this method uses: -// genericconfig.LoopbackClientConfig -// genericconfig.SharedInformerFactory // genericconfig.Authorizer -func (a *AdmissionOptions) ApplyTo(c *server.Config, informers informers.SharedInformerFactory, pluginInitializers ...admission.PluginInitializer) error { - pluginsConfigProvider, err := admission.ReadAdmissionConfiguration(a.PluginNames, a.ConfigFile) - if err != nil { - return fmt.Errorf("failed to read plugin config: %v", err) +func (a *AdmissionOptions) ApplyTo( + c *server.Config, + informers informers.SharedInformerFactory, + kubeAPIServerClientConfig *rest.Config, + scheme *runtime.Scheme, + pluginInitializers ...admission.PluginInitializer, +) error { + pluginNames := a.PluginNames + if len(a.PluginNames) == 0 { + pluginNames = a.enabledPluginNames() } - clientset, err := kubernetes.NewForConfig(c.LoopbackClientConfig) + pluginsConfigProvider, err := admission.ReadAdmissionConfiguration(pluginNames, a.ConfigFile, a.Plugins.ConfigScheme) if err != nil { - return err + return fmt.Errorf("failed to read plugin config: %v", err) } - genericInitializer, err := initializer.New(clientset, informers, c.Authorizer) + + clientset, err := kubernetes.NewForConfig(kubeAPIServerClientConfig) if err != nil { return err } + genericInitializer := initializer.New(clientset, informers, c.Authorizer, scheme) initializersChain := admission.PluginInitializers{} pluginInitializers = append(pluginInitializers, genericInitializer) initializersChain = append(initializersChain, pluginInitializers...) - admissionChain, err := a.Plugins.NewFromPlugins(a.PluginNames, pluginsConfigProvider, initializersChain) + admissionChain, err := a.Plugins.NewFromPlugins(pluginNames, pluginsConfigProvider, initializersChain, admissionmetrics.WithControllerMetrics) if err != nil { return err } - c.AdmissionControl = admissionChain + c.AdmissionControl = admissionmetrics.WithStepMetrics(admissionChain) return nil } @@ -95,3 +132,33 @@ func (a *AdmissionOptions) Validate() []error { errs := []error{} return errs } + +// enabledPluginNames makes use of RecommendedPluginOrder and DefaultOffPlugins fields +// to prepare a list of plugin names that are enabled. +// +// TODO(p0lyn0mial): In the end we will introduce two new flags: +// --disable-admission-plugin this would be a list of admission plugins that a cluster-admin wants to explicitly disable. +// --enable-admission-plugin this would be a list of admission plugins that a cluster-admin wants to explicitly enable. +// both flags are going to be handled by this method +func (a *AdmissionOptions) enabledPluginNames() []string { + //TODO(p0lyn0mial): first subtract plugins that a user wants to explicitly enable from allOffPlugins (DefaultOffPlugins) + //TODO(p0lyn0miial): then add/append plugins that a user wants to explicitly disable to allOffPlugins + //TODO(p0lyn0mial): so that --off=three --on=one,three default-off=one,two results in "one" being enabled. + allOffPlugins := a.DefaultOffPlugins + onlyEnabledPluginNames := []string{} + for _, pluginName := range a.RecommendedPluginOrder { + disablePlugin := false + for _, disabledPluginName := range allOffPlugins { + if pluginName == disabledPluginName { + disablePlugin = true + break + } + } + if disablePlugin { + continue + } + onlyEnabledPluginNames = append(onlyEnabledPluginNames, pluginName) + } + + return onlyEnabledPluginNames +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/audit.go b/vendor/k8s.io/apiserver/pkg/server/options/audit.go index 833e91e5307d..e43ebb12bc90 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/audit.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/audit.go @@ -77,12 +77,17 @@ type AuditWebhookOptions struct { // // Defaults to asynchronous batch events. Mode string + // Configuration for batching webhook. Only used in batch mode. + BatchConfig pluginwebhook.BatchBackendConfig } func NewAuditOptions() *AuditOptions { return &AuditOptions{ - WebhookOptions: AuditWebhookOptions{Mode: pluginwebhook.ModeBatch}, - LogOptions: AuditLogOptions{Format: pluginlog.FormatJson}, + WebhookOptions: AuditWebhookOptions{ + Mode: pluginwebhook.ModeBatch, + BatchConfig: pluginwebhook.NewDefaultBatchBackendConfig(), + }, + LogOptions: AuditLogOptions{Format: pluginlog.FormatJson}, } } @@ -102,7 +107,7 @@ func (o *AuditOptions) Validate() []error { allErrors = append(allErrors, fmt.Errorf("feature '%s' must be enabled to set option --audit-webhook-config-file", features.AdvancedAuditing)) } } else { - // check webhook mode + // Check webhook mode validMode := false for _, m := range pluginwebhook.AllowedModes { if m == o.WebhookOptions.Mode { @@ -114,7 +119,21 @@ func (o *AuditOptions) Validate() []error { allErrors = append(allErrors, fmt.Errorf("invalid audit webhook mode %s, allowed modes are %q", o.WebhookOptions.Mode, strings.Join(pluginwebhook.AllowedModes, ","))) } - // check log format + // Check webhook batch configuration + if o.WebhookOptions.BatchConfig.BufferSize <= 0 { + allErrors = append(allErrors, fmt.Errorf("invalid audit batch webhook buffer size %v, must be a positive number", o.WebhookOptions.BatchConfig.BufferSize)) + } + if o.WebhookOptions.BatchConfig.MaxBatchSize <= 0 { + allErrors = append(allErrors, fmt.Errorf("invalid audit batch webhook max batch size %v, must be a positive number", o.WebhookOptions.BatchConfig.MaxBatchSize)) + } + if o.WebhookOptions.BatchConfig.ThrottleQPS <= 0 { + allErrors = append(allErrors, fmt.Errorf("invalid audit batch webhook throttle QPS %v, must be a positive number", o.WebhookOptions.BatchConfig.ThrottleQPS)) + } + if o.WebhookOptions.BatchConfig.ThrottleBurst <= 0 { + allErrors = append(allErrors, fmt.Errorf("invalid audit batch webhook throttle burst %v, must be a positive number", o.WebhookOptions.BatchConfig.ThrottleBurst)) + } + + // Check log format validFormat := false for _, f := range pluginlog.AllowedFormats { if f == o.LogOptions.Format { @@ -249,6 +268,24 @@ func (o *AuditWebhookOptions) AddFlags(fs *pflag.FlagSet) { "Strategy for sending audit events. Blocking indicates sending events should block"+ " server responses. Batch causes the webhook to buffer and send events"+ " asynchronously. Known modes are "+strings.Join(pluginwebhook.AllowedModes, ",")+".") + fs.IntVar(&o.BatchConfig.BufferSize, "audit-webhook-batch-buffer-size", + o.BatchConfig.BufferSize, "The size of the buffer to store events before "+ + "batching and sending to the webhook. Only used in batch mode.") + fs.IntVar(&o.BatchConfig.MaxBatchSize, "audit-webhook-batch-max-size", + o.BatchConfig.MaxBatchSize, "The maximum size of a batch sent to the webhook. "+ + "Only used in batch mode.") + fs.DurationVar(&o.BatchConfig.MaxBatchWait, "audit-webhook-batch-max-wait", + o.BatchConfig.MaxBatchWait, "The amount of time to wait before force sending the "+ + "batch that hadn't reached the max size. Only used in batch mode.") + fs.Float32Var(&o.BatchConfig.ThrottleQPS, "audit-webhook-batch-throttle-qps", + o.BatchConfig.ThrottleQPS, "Maximum average number of requests per second. "+ + "Only used in batch mode.") + fs.IntVar(&o.BatchConfig.ThrottleBurst, "audit-webhook-batch-throttle-burst", + o.BatchConfig.ThrottleBurst, "Maximum number of requests sent at the same "+ + "moment if ThrottleQPS was not utilized before. Only used in batch mode.") + fs.DurationVar(&o.BatchConfig.InitialBackoff, "audit-webhook-batch-initial-backoff", + o.BatchConfig.InitialBackoff, "The amount of time to wait before retrying the "+ + "first failed requests. Only used in batch mode.") } func (o *AuditWebhookOptions) applyTo(c *server.Config) error { @@ -256,7 +293,7 @@ func (o *AuditWebhookOptions) applyTo(c *server.Config) error { return nil } - webhook, err := pluginwebhook.NewBackend(o.ConfigFile, o.Mode, auditv1beta1.SchemeGroupVersion) + webhook, err := pluginwebhook.NewBackend(o.ConfigFile, o.Mode, auditv1beta1.SchemeGroupVersion, o.BatchConfig) if err != nil { return fmt.Errorf("initializing audit webhook: %v", err) } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/coreapi.go b/vendor/k8s.io/apiserver/pkg/server/options/coreapi.go index edcf3a7b0587..d46dece4a78c 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/coreapi.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/coreapi.go @@ -73,6 +73,7 @@ func (o *CoreAPIOptions) ApplyTo(config *server.RecommendedConfig) error { if err != nil { return fmt.Errorf("failed to create Kubernetes clientset: %v", err) } + config.ClientConfig = kubeconfig config.SharedInformerFactory = clientgoinformers.NewSharedInformerFactory(clientgoExternalClient, 10*time.Minute) return nil diff --git a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/BUILD b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/BUILD index 442550bb97fa..bfea74f88431 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/BUILD +++ b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/BUILD @@ -13,6 +13,7 @@ go_library( "plugins.go", "types.go", ], + importpath = "k8s.io/apiserver/pkg/server/options/encryptionconfig", deps = [ "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -27,7 +28,8 @@ go_library( go_test( name = "go_default_test", - srcs = ["encryptionconfig_test.go"], + srcs = ["config_test.go"], + importpath = "k8s.io/apiserver/pkg/server/options/encryptionconfig", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/server/options/etcd.go b/vendor/k8s.io/apiserver/pkg/server/options/etcd.go index 4919c1e82c2a..d522cde9bcb8 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/etcd.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/etcd.go @@ -144,10 +144,15 @@ func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) { "SSL Certificate Authority file used to secure etcd communication.") fs.BoolVar(&s.StorageConfig.Quorum, "etcd-quorum-read", s.StorageConfig.Quorum, - "If true, enable quorum read.") + "If true, enable quorum read. It defaults to true and is strongly recommended not setting to false.") + fs.MarkDeprecated("etcd-quorum-read", "This flag is deprecated and the ability to switch off quorum read will be removed in a future release.") fs.StringVar(&s.EncryptionProviderConfigFilepath, "experimental-encryption-provider-config", s.EncryptionProviderConfigFilepath, "The file containing configuration for encryption providers to be used for storing secrets in etcd") + + fs.DurationVar(&s.StorageConfig.CompactionInterval, "etcd-compaction-interval", s.StorageConfig.CompactionInterval, + "The interval of compaction requests. If 0, the compaction request from apiserver is disabled.") + } func (s *EtcdOptions) ApplyTo(c *server.Config) error { diff --git a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go index 97460b877929..21c3dd76159f 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go @@ -37,9 +37,9 @@ type RecommendedOptions struct { CoreAPI *CoreAPIOptions } -func NewRecommendedOptions(prefix string, copier runtime.ObjectCopier, codec runtime.Codec) *RecommendedOptions { +func NewRecommendedOptions(prefix string, codec runtime.Codec) *RecommendedOptions { return &RecommendedOptions{ - Etcd: NewEtcdOptions(storagebackend.NewDefaultConfig(prefix, copier, codec)), + Etcd: NewEtcdOptions(storagebackend.NewDefaultConfig(prefix, codec)), SecureServing: NewSecureServingOptions(), Authentication: NewDelegatingAuthenticationOptions(), Authorization: NewDelegatingAuthorizationOptions(), diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving.go b/vendor/k8s.io/apiserver/pkg/server/options/serving.go index 775476c3d9f1..43042fde97bf 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/serving.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving.go @@ -38,6 +38,14 @@ import ( type SecureServingOptions struct { BindAddress net.IP BindPort int + // BindNetwork is the type of network to bind to - defaults to "tcp", accepts "tcp", + // "tcp4", and "tcp6". + BindNetwork string + + // Listener is the secure server network listener. + // either Listener or BindAddress/BindPort/BindNetwork is set, + // if Listener is set, use it and omit BindAddress/BindPort/BindNetwork. + Listener net.Listener // ServerCert is the TLS cert info for serving secure traffic ServerCert GeneratableKeyCert @@ -116,7 +124,7 @@ func (s *SecureServingOptions) AddFlags(fs *pflag.FlagSet) { "File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated "+ "after server cert). If HTTPS serving is enabled, and --tls-cert-file and "+ "--tls-private-key-file are not provided, a self-signed certificate and key "+ - "are generated for the public address and saved to /var/run/kubernetes.") + "are generated for the public address and saved to the directory specified by --cert-dir.") fs.StringVar(&s.ServerCert.CertKey.KeyFile, "tls-private-key-file", s.ServerCert.CertKey.KeyFile, "File containing the default x509 private key matching --tls-cert-file.") @@ -147,14 +155,25 @@ func (s *SecureServingOptions) ApplyTo(c *server.Config) error { if s == nil { return nil } - if s.BindPort <= 0 { return nil } + + if s.Listener == nil { + var err error + addr := net.JoinHostPort(s.BindAddress.String(), strconv.Itoa(s.BindPort)) + s.Listener, s.BindPort, err = CreateListener(s.BindNetwork, addr) + if err != nil { + return fmt.Errorf("failed to create listener: %v", err) + } + } + if err := s.applyServingInfoTo(c); err != nil { return err } + c.SecureServingInfo.Listener = s.Listener + // create self-signed cert+key with the fake server.LoopbackClientServerNameOverride and // let the server return it when the loopback client connects. certPem, keyPem, err := certutil.GenerateSelfSignedCertKey(server.LoopbackClientServerNameOverride, nil, nil) @@ -184,16 +203,9 @@ func (s *SecureServingOptions) ApplyTo(c *server.Config) error { } func (s *SecureServingOptions) applyServingInfoTo(c *server.Config) error { - if s.BindPort <= 0 { - return nil - } - - secureServingInfo := &server.SecureServingInfo{ - BindAddress: net.JoinHostPort(s.BindAddress.String(), strconv.Itoa(s.BindPort)), - } + secureServingInfo := &server.SecureServingInfo{} serverCertFile, serverKeyFile := s.ServerCert.CertKey.CertFile, s.ServerCert.CertKey.KeyFile - // load main cert if len(serverCertFile) != 0 || len(serverKeyFile) != 0 { tlsCert, err := tls.LoadX509KeyPair(serverCertFile, serverKeyFile) @@ -250,7 +262,7 @@ func (s *SecureServingOptions) MaybeDefaultWithSelfSignedCerts(publicAddress str return nil } keyCert := &s.ServerCert.CertKey - if s.BindPort == 0 || len(keyCert.CertFile) != 0 || len(keyCert.KeyFile) != 0 { + if len(keyCert.CertFile) != 0 || len(keyCert.KeyFile) != 0 { return nil } @@ -286,3 +298,22 @@ func (s *SecureServingOptions) MaybeDefaultWithSelfSignedCerts(publicAddress str return nil } + +func CreateListener(network, addr string) (net.Listener, int, error) { + if len(network) == 0 { + network = "tcp" + } + ln, err := net.Listen(network, addr) + if err != nil { + return nil, 0, fmt.Errorf("failed to listen on %v: %v", addr, err) + } + + // get port + tcpAddr, ok := ln.Addr().(*net.TCPAddr) + if !ok { + ln.Close() + return nil, 0, fmt.Errorf("invalid listen address: %q", ln.Addr().String()) + } + + return ln, tcpAddr.Port, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/plugins.go b/vendor/k8s.io/apiserver/pkg/server/plugins.go index 404e8afc4c8a..84813aafe6b3 100644 --- a/vendor/k8s.io/apiserver/pkg/server/plugins.go +++ b/vendor/k8s.io/apiserver/pkg/server/plugins.go @@ -19,10 +19,16 @@ package server // This file exists to force the desired plugin implementations to be linked into genericapi pkg. import ( "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/plugin/initialization" "k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle" + mutatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating" + validatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/validating" ) // RegisterAllAdmissionPlugins registers all admission plugins func RegisterAllAdmissionPlugins(plugins *admission.Plugins) { lifecycle.Register(plugins) + initialization.Register(plugins) + validatingwebhook.Register(plugins) + mutatingwebhook.Register(plugins) } diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/BUILD b/vendor/k8s.io/apiserver/pkg/server/routes/BUILD index bf0f862403a2..e7ffd916bfd0 100644 --- a/vendor/k8s.io/apiserver/pkg/server/routes/BUILD +++ b/vendor/k8s.io/apiserver/pkg/server/routes/BUILD @@ -17,6 +17,7 @@ go_library( "swaggerui.go", "version.go", ], + importpath = "k8s.io/apiserver/pkg/server/routes", deps = [ "//vendor/github.com/elazarl/go-bindata-assetfs:go_default_library", "//vendor/github.com/emicklei/go-restful:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/data/swagger/BUILD b/vendor/k8s.io/apiserver/pkg/server/routes/data/swagger/BUILD index 1b6d661a2e68..f715802fad34 100644 --- a/vendor/k8s.io/apiserver/pkg/server/routes/data/swagger/BUILD +++ b/vendor/k8s.io/apiserver/pkg/server/routes/data/swagger/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["datafile.go"], + importpath = "k8s.io/apiserver/pkg/server/routes/data/swagger", ) filegroup( diff --git a/vendor/k8s.io/apiserver/pkg/server/serve.go b/vendor/k8s.io/apiserver/pkg/server/serve.go index 90f4078c7532..dcb4de1e5926 100644 --- a/vendor/k8s.io/apiserver/pkg/server/serve.go +++ b/vendor/k8s.io/apiserver/pkg/server/serve.go @@ -17,6 +17,7 @@ limitations under the License. package server import ( + "context" "crypto/tls" "crypto/x509" "fmt" @@ -25,11 +26,10 @@ import ( "strings" "time" + "github.com/golang/glog" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/validation" - - "github.com/golang/glog" - "github.com/pkg/errors" ) const ( @@ -40,8 +40,12 @@ const ( // be loaded or the initial listen call fails. The actual server loop (stoppable by closing // stopCh) runs in a go routine, i.e. serveSecurely does not block. func (s *GenericAPIServer) serveSecurely(stopCh <-chan struct{}) error { + if s.SecureServingInfo.Listener == nil { + return fmt.Errorf("listener must not be nil") + } + secureServer := &http.Server{ - Addr: s.SecureServingInfo.BindAddress, + Addr: s.SecureServingInfo.Listener.Addr().String(), Handler: s.Handler, MaxHeaderBytes: 1 << 20, TLSConfig: &tls.Config{ @@ -82,39 +86,30 @@ func (s *GenericAPIServer) serveSecurely(stopCh <-chan struct{}) error { secureServer.TLSConfig.ClientCAs = s.SecureServingInfo.ClientCA } - glog.Infof("Serving securely on %s", s.SecureServingInfo.BindAddress) - var err error - s.effectiveSecurePort, err = RunServer(secureServer, s.SecureServingInfo.BindNetwork, stopCh) + glog.Infof("Serving securely on %s", secureServer.Addr) + err := RunServer(secureServer, s.SecureServingInfo.Listener, s.ShutdownTimeout, stopCh) return err } -// RunServer listens on the given port, then spawns a go-routine continuously serving -// until the stopCh is closed. The port is returned. This function does not block. -func RunServer(server *http.Server, network string, stopCh <-chan struct{}) (int, error) { - if len(server.Addr) == 0 { - return 0, errors.New("address cannot be empty") - } - - if len(network) == 0 { - network = "tcp" - } - - ln, err := net.Listen(network, server.Addr) - if err != nil { - return 0, fmt.Errorf("failed to listen on %v: %v", server.Addr, err) - } - - // get port - tcpAddr, ok := ln.Addr().(*net.TCPAddr) - if !ok { - ln.Close() - return 0, fmt.Errorf("invalid listen address: %q", ln.Addr().String()) +// RunServer listens on the given port if listener is not given, +// then spawns a go-routine continuously serving +// until the stopCh is closed. This function does not block. +func RunServer( + server *http.Server, + ln net.Listener, + shutDownTimeout time.Duration, + stopCh <-chan struct{}, +) error { + if ln == nil { + return fmt.Errorf("listener must not be nil") } - // Stop the server by closing the listener + // Shutdown server gracefully. go func() { <-stopCh - ln.Close() + ctx, cancel := context.WithTimeout(context.Background(), shutDownTimeout) + server.Shutdown(ctx) + cancel() }() go func() { @@ -128,7 +123,7 @@ func RunServer(server *http.Server, network string, stopCh <-chan struct{}) (int err := server.Serve(listener) - msg := fmt.Sprintf("Stopped listening on %s", tcpAddr.String()) + msg := fmt.Sprintf("Stopped listening on %s", ln.Addr().String()) select { case <-stopCh: glog.Info(msg) @@ -137,7 +132,7 @@ func RunServer(server *http.Server, network string, stopCh <-chan struct{}) (int } }() - return tcpAddr.Port, nil + return nil } type NamedTLSCert struct { diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/BUILD b/vendor/k8s.io/apiserver/pkg/server/storage/BUILD index 9f9194d76f50..186764721076 100644 --- a/vendor/k8s.io/apiserver/pkg/server/storage/BUILD +++ b/vendor/k8s.io/apiserver/pkg/server/storage/BUILD @@ -12,6 +12,7 @@ go_test( "resource_config_test.go", "storage_factory_test.go", ], + importpath = "k8s.io/apiserver/pkg/server/storage", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", @@ -36,6 +37,7 @@ go_library( "storage_codec.go", "storage_factory.go", ], + importpath = "k8s.io/apiserver/pkg/server/storage", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go b/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go index efa445182276..429e14d5ca23 100644 --- a/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go +++ b/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go @@ -151,19 +151,7 @@ var _ StorageFactory = &DefaultStorageFactory{} const AllResources = "*" -// specialDefaultResourcePrefixes are prefixes compiled into Kubernetes. -// TODO: move out of this package, it is not generic -var specialDefaultResourcePrefixes = map[schema.GroupResource]string{ - {Group: "", Resource: "replicationControllers"}: "controllers", - {Group: "", Resource: "replicationcontrollers"}: "controllers", - {Group: "", Resource: "endpoints"}: "services/endpoints", - {Group: "", Resource: "nodes"}: "minions", - {Group: "", Resource: "services"}: "services/specs", - {Group: "extensions", Resource: "ingresses"}: "ingress", - {Group: "extensions", Resource: "podsecuritypolicies"}: "podsecuritypolicy", -} - -func NewDefaultStorageFactory(config storagebackend.Config, defaultMediaType string, defaultSerializer runtime.StorageSerializer, resourceEncodingConfig ResourceEncodingConfig, resourceConfig APIResourceConfigSource) *DefaultStorageFactory { +func NewDefaultStorageFactory(config storagebackend.Config, defaultMediaType string, defaultSerializer runtime.StorageSerializer, resourceEncodingConfig ResourceEncodingConfig, resourceConfig APIResourceConfigSource, specialDefaultResourcePrefixes map[schema.GroupResource]string) *DefaultStorageFactory { config.Paging = utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) if len(defaultMediaType) == 0 { defaultMediaType = runtime.ContentTypeJSON diff --git a/vendor/k8s.io/apiserver/pkg/storage/BUILD b/vendor/k8s.io/apiserver/pkg/storage/BUILD index f85dff5f977a..1838e84d85ce 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/BUILD +++ b/vendor/k8s.io/apiserver/pkg/storage/BUILD @@ -15,6 +15,7 @@ go_test( "util_test.go", "watch_cache_test.go", ], + importpath = "k8s.io/apiserver/pkg/storage", library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", @@ -30,7 +31,6 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], ) @@ -47,6 +47,7 @@ go_library( "util.go", "watch_cache.go", ], + importpath = "k8s.io/apiserver/pkg/storage", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher.go index d2f39030a1b9..e3c787d08420 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher.go @@ -54,8 +54,6 @@ type CacherConfig struct { // An underlying storage.Versioner. Versioner Versioner - Copier runtime.ObjectCopier - // The Cache will be caching objects of a given Type and assumes that they // are all stored under ResourcePrefix directory in the underlying database. Type interface{} @@ -161,8 +159,6 @@ type Cacher struct { // Underlying storage.Interface. storage Interface - copier runtime.ObjectCopier - // Expected type of objects in the underlying cache. objectType reflect.Type @@ -212,7 +208,6 @@ func NewCacherFromConfig(config CacherConfig) *Cacher { cacher := &Cacher{ ready: newReady(), storage: config.Storage, - copier: config.Copier, objectType: reflect.TypeOf(config.Type), watchCache: watchCache, reflector: cache.NewNamedReflector(reflectorName, listerWatcher, config.Type, watchCache, 0), @@ -343,7 +338,7 @@ func (c *Cacher) Watch(ctx context.Context, key string, resourceVersion string, c.Lock() defer c.Unlock() forget := forgetWatcher(c, c.watcherIdx, triggerValue, triggerSupported) - watcher := newCacheWatcher(c.copier, watchRV, chanSize, initEvents, watchFilterFunction(key, pred), forget) + watcher := newCacheWatcher(watchRV, chanSize, initEvents, watchFilterFunction(key, pred), forget) c.watchers.addWatcher(watcher, c.watcherIdx, triggerValue, triggerSupported) c.watcherIdx++ @@ -473,10 +468,15 @@ func (c *Cacher) GetToList(ctx context.Context, key string, resourceVersion stri // Implements storage.Interface. func (c *Cacher) List(ctx context.Context, key string, resourceVersion string, pred SelectionPredicate, listObj runtime.Object) error { pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) - if resourceVersion == "" || (pagingEnabled && (len(pred.Continue) > 0 || pred.Limit > 0)) { + hasContinuation := pagingEnabled && len(pred.Continue) > 0 + hasLimit := pagingEnabled && pred.Limit > 0 && resourceVersion != "0" + if resourceVersion == "" || hasContinuation || hasLimit { // If resourceVersion is not specified, serve it from underlying - // storage (for backward compatibility). If a continuation or limit is + // storage (for backward compatibility). If a continuation is // requested, serve it from the underlying storage as well. + // Limits are only sent to storage when resourceVersion is non-zero + // since the watch cache isn't able to perform continuations, and + // limits are ignored when resource version is zero. return c.storage.List(ctx, key, resourceVersion, pred, listObj) } @@ -649,7 +649,15 @@ func (c *Cacher) isStopped() bool { } func (c *Cacher) Stop() { + // avoid stopping twice (note: cachers are shared with subresources) + if c.isStopped() { + return + } c.stopLock.Lock() + if c.stopped { + // avoid that it was locked meanwhile as isStopped only read-locks + return + } c.stopped = true c.stopLock.Unlock() close(c.stopCh) @@ -778,7 +786,6 @@ func (c *errWatcher) Stop() { // cacherWatch implements watch.Interface type cacheWatcher struct { sync.Mutex - copier runtime.ObjectCopier input chan *watchCacheEvent result chan watch.Event done chan struct{} @@ -787,9 +794,8 @@ type cacheWatcher struct { forget func(bool) } -func newCacheWatcher(copier runtime.ObjectCopier, resourceVersion uint64, chanSize int, initEvents []*watchCacheEvent, filter watchFilterFunc, forget func(bool)) *cacheWatcher { +func newCacheWatcher(resourceVersion uint64, chanSize int, initEvents []*watchCacheEvent, filter watchFilterFunc, forget func(bool)) *cacheWatcher { watcher := &cacheWatcher{ - copier: copier, input: make(chan *watchCacheEvent, chanSize), result: make(chan watch.Event, chanSize), done: make(chan struct{}), diff --git a/vendor/k8s.io/apiserver/pkg/storage/errors/BUILD b/vendor/k8s.io/apiserver/pkg/storage/errors/BUILD index 41f19e21bc1b..dd07c9c3724f 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/errors/BUILD +++ b/vendor/k8s.io/apiserver/pkg/storage/errors/BUILD @@ -11,6 +11,7 @@ go_library( "doc.go", "storage.go", ], + importpath = "k8s.io/apiserver/pkg/storage/errors", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd/BUILD b/vendor/k8s.io/apiserver/pkg/storage/etcd/BUILD index c1f36633edc8..7ac2e7102408 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd/BUILD +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd/BUILD @@ -13,6 +13,7 @@ go_test( "etcd_helper_test.go", "etcd_watcher_test.go", ], + importpath = "k8s.io/apiserver/pkg/storage/etcd", library = ":go_default_library", deps = [ "//vendor/github.com/coreos/etcd/client:go_default_library", @@ -46,6 +47,7 @@ go_library( "etcd_helper.go", "etcd_watcher.go", ], + importpath = "k8s.io/apiserver/pkg/storage/etcd", deps = [ "//vendor/github.com/coreos/etcd/client:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go b/vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go index 9c09c071c3e3..322ccefabd1c 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go @@ -62,13 +62,12 @@ var IdentityTransformer ValueTransformer = identityTransformer{} // Creates a new storage interface from the client // TODO: deprecate in favor of storage.Config abstraction over time -func NewEtcdStorage(client etcd.Client, codec runtime.Codec, prefix string, quorum bool, cacheSize int, copier runtime.ObjectCopier, transformer ValueTransformer) storage.Interface { +func NewEtcdStorage(client etcd.Client, codec runtime.Codec, prefix string, quorum bool, cacheSize int, transformer ValueTransformer) storage.Interface { return &etcdHelper{ etcdMembersAPI: etcd.NewMembersAPI(client), etcdKeysAPI: etcd.NewKeysAPI(client), codec: codec, versioner: APIObjectVersioner{}, - copier: copier, transformer: transformer, pathPrefix: path.Join("/", prefix), quorum: quorum, @@ -81,7 +80,6 @@ type etcdHelper struct { etcdMembersAPI etcd.MembersAPI etcdKeysAPI etcd.KeysAPI codec runtime.Codec - copier runtime.ObjectCopier transformer ValueTransformer // Note that versioner is required for etcdHelper to work correctly. // The public constructors (NewStorage & NewEtcdStorage) are setting it diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd/metrics/BUILD b/vendor/k8s.io/apiserver/pkg/storage/etcd/metrics/BUILD index b9badf163a2a..f4edcf221b54 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd/metrics/BUILD +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd/metrics/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["metrics.go"], + importpath = "k8s.io/apiserver/pkg/storage/etcd/metrics", deps = ["//vendor/github.com/prometheus/client_golang/prometheus:go_default_library"], ) diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd/util/BUILD b/vendor/k8s.io/apiserver/pkg/storage/etcd/util/BUILD index 935b7faebcfc..f1405e872969 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd/util/BUILD +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd/util/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["etcd_util_test.go"], + importpath = "k8s.io/apiserver/pkg/storage/etcd/util", library = ":go_default_library", deps = [ "//vendor/github.com/coreos/etcd/client:go_default_library", @@ -22,6 +23,7 @@ go_library( "doc.go", "etcd_util.go", ], + importpath = "k8s.io/apiserver/pkg/storage/etcd/util", deps = ["//vendor/github.com/coreos/etcd/client:go_default_library"], ) diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/BUILD b/vendor/k8s.io/apiserver/pkg/storage/etcd3/BUILD index ad5b06d819cb..0b7c4311fdce 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/BUILD +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/BUILD @@ -13,11 +13,13 @@ go_test( "store_test.go", "watcher_test.go", ], + importpath = "k8s.io/apiserver/pkg/storage/etcd3", library = ":go_default_library", deps = [ "//vendor/github.com/coreos/etcd/clientv3:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", "//vendor/github.com/coreos/etcd/integration:go_default_library", + "//vendor/github.com/coreos/pkg/capnslog:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/testing:go_default_library", @@ -47,6 +49,7 @@ go_library( "store.go", "watcher.go", ], + importpath = "k8s.io/apiserver/pkg/storage/etcd3", deps = [ "//vendor/github.com/coreos/etcd/clientv3:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go index c778119e77d4..2bceacc8ee2e 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go @@ -27,8 +27,7 @@ import ( ) const ( - compactInterval = 5 * time.Minute - compactRevKey = "compact_rev_key" + compactRevKey = "compact_rev_key" ) var ( @@ -44,7 +43,7 @@ func init() { // By default, we save the most recent 10 minutes data and compact versions > 10minutes ago. // It should be enough for slow watchers and to tolerate burst. // TODO: We might keep a longer history (12h) in the future once storage API can take advantage of past version of keys. -func StartCompactor(ctx context.Context, client *clientv3.Client) { +func StartCompactor(ctx context.Context, client *clientv3.Client, compactInterval time.Duration) { endpointsMapMu.Lock() defer endpointsMapMu.Unlock() @@ -60,7 +59,9 @@ func StartCompactor(ctx context.Context, client *clientv3.Client) { endpointsMap[ep] = struct{}{} } - go compactor(ctx, client, compactInterval) + if compactInterval != 0 { + go compactor(ctx, client, compactInterval) + } } // compactor periodically compacts historical versions of keys in etcd. diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/preflight/BUILD b/vendor/k8s.io/apiserver/pkg/storage/etcd3/preflight/BUILD index b1392aa408d0..864a1141d65f 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/preflight/BUILD +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/preflight/BUILD @@ -9,11 +9,13 @@ load( go_library( name = "go_default_library", srcs = ["checks.go"], + importpath = "k8s.io/apiserver/pkg/storage/etcd3/preflight", ) go_test( name = "go_default_test", srcs = ["checks_test.go"], + importpath = "k8s.io/apiserver/pkg/storage/etcd3/preflight", library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library"], ) diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go index e5a67f043ec2..2350a5526b87 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -332,8 +332,11 @@ func (s *store) GuaranteedUpdate( if err != nil { return err } - mustCheckData = false - continue + if !bytes.Equal(data, origState.data) { + // original data changed, restart loop + mustCheckData = false + continue + } } return decode(s.codec, s.versioner, origState.data, out, origState.rev) } @@ -396,11 +399,11 @@ func (s *store) GetToList(ctx context.Context, key string, resourceVersion strin if err != nil { return storage.NewInternalError(err.Error()) } - elems := []*elemForDecode{{ - data: data, - rev: uint64(getResp.Kvs[0].ModRevision), - }} - if err := decodeList(elems, storage.SimpleFilter(pred), listPtr, s.codec, s.versioner); err != nil { + v, err := conversion.EnforcePtr(listPtr) + if err != nil || v.Kind() != reflect.Slice { + panic("need ptr to slice") + } + if err := appendListItem(v, data, uint64(getResp.Kvs[0].ModRevision), storage.SimpleFilter(pred), s.codec, s.versioner); err != nil { return err } // update version with cluster level revision @@ -429,25 +432,26 @@ func decodeContinue(continueValue, keyPrefix string) (fromKey string, rv int64, return "", 0, fmt.Errorf("continue key is not valid: %v", err) } switch c.APIVersion { - case "v1alpha1": + case "meta.k8s.io/v1": if c.ResourceVersion == 0 { - return "", 0, fmt.Errorf("continue key is not valid: incorrect encoded start resourceVersion (version v1alpha1)") + return "", 0, fmt.Errorf("continue key is not valid: incorrect encoded start resourceVersion (version meta.k8s.io/v1)") } if len(c.StartKey) == 0 { - return "", 0, fmt.Errorf("continue key is not valid: encoded start key empty (version v1alpha1)") + return "", 0, fmt.Errorf("continue key is not valid: encoded start key empty (version meta.k8s.io/v1)") } // defend against path traversal attacks by clients - path.Clean will ensure that startKey cannot // be at a higher level of the hierarchy, and so when we append the key prefix we will end up with // continue start key that is fully qualified and cannot range over anything less specific than // keyPrefix. - cleaned := path.Clean(c.StartKey) - if cleaned != c.StartKey || cleaned == "." || cleaned == "/" { - return "", 0, fmt.Errorf("continue key is not valid: %s", cleaned) + key := c.StartKey + if !strings.HasPrefix(key, "/") { + key = "/" + key } - if len(cleaned) == 0 { - return "", 0, fmt.Errorf("continue key is not valid: encoded start key empty (version 0)") + cleaned := path.Clean(key) + if cleaned != key { + return "", 0, fmt.Errorf("continue key is not valid: %s", c.StartKey) } - return keyPrefix + cleaned, c.ResourceVersion, nil + return keyPrefix + cleaned[1:], c.ResourceVersion, nil default: return "", 0, fmt.Errorf("continue key is not valid: server does not recognize this encoded version %q", c.APIVersion) } @@ -459,7 +463,7 @@ func encodeContinue(key, keyPrefix string, resourceVersion int64) (string, error if nextKey == key { return "", fmt.Errorf("unable to encode next field: the key and key prefix do not match") } - out, err := json.Marshal(&continueToken{APIVersion: "v1alpha1", ResourceVersion: resourceVersion, StartKey: nextKey}) + out, err := json.Marshal(&continueToken{APIVersion: "meta.k8s.io/v1", ResourceVersion: resourceVersion, StartKey: nextKey}) if err != nil { return "", err } @@ -472,7 +476,14 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor if err != nil { return err } - key = path.Join(s.pathPrefix, key) + v, err := conversion.EnforcePtr(listPtr) + if err != nil || v.Kind() != reflect.Slice { + panic("need ptr to slice") + } + + if s.pathPrefix != "" { + key = path.Join(s.pathPrefix, key) + } // We need to make sure the key ended with "/" so that we only get children "directories". // e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three, // while with prefix "/a/" will return only "/a/b" which is the correct answer. @@ -481,81 +492,166 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor } keyPrefix := key + filter := storage.SimpleFilter(pred) + // set the appropriate clientv3 options to filter the returned data set + var paging bool options := make([]clientv3.OpOption, 0, 4) if s.pagingEnabled && pred.Limit > 0 { + paging = true options = append(options, clientv3.WithLimit(pred.Limit)) } + var returnedRV int64 switch { case s.pagingEnabled && len(pred.Continue) > 0: continueKey, continueRV, err := decodeContinue(pred.Continue, keyPrefix) if err != nil { - return err + return apierrors.NewBadRequest(fmt.Sprintf("invalid continue token: %v", err)) + } + + if len(resourceVersion) > 0 && resourceVersion != "0" { + return apierrors.NewBadRequest("specifying resource version is not allowed when using continue") } - options = append(options, clientv3.WithRange(clientv3.GetPrefixRangeEnd(key))) + rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix) + options = append(options, clientv3.WithRange(rangeEnd)) key = continueKey options = append(options, clientv3.WithRev(continueRV)) returnedRV = continueRV - case len(resourceVersion) > 0: - fromRV, err := strconv.ParseInt(resourceVersion, 10, 64) - if err != nil { - return fmt.Errorf("invalid resource version: %v", err) + case s.pagingEnabled && pred.Limit > 0: + if len(resourceVersion) > 0 { + fromRV, err := strconv.ParseInt(resourceVersion, 10, 64) + if err != nil { + return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err)) + } + if fromRV > 0 { + options = append(options, clientv3.WithRev(fromRV)) + } + returnedRV = fromRV } - options = append(options, clientv3.WithPrefix(), clientv3.WithRev(fromRV)) - returnedRV = fromRV + rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix) + options = append(options, clientv3.WithRange(rangeEnd)) default: - options = append(options, clientv3.WithPrefix()) - } + if len(resourceVersion) > 0 { + fromRV, err := strconv.ParseInt(resourceVersion, 10, 64) + if err != nil { + return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err)) + } + if fromRV > 0 { + options = append(options, clientv3.WithRev(fromRV)) + } + returnedRV = fromRV + } - getResp, err := s.client.KV.Get(ctx, key, options...) - if err != nil { - return interpretListError(err, len(pred.Continue) > 0) + options = append(options, clientv3.WithPrefix()) } - elems := make([]*elemForDecode, 0, len(getResp.Kvs)) - for _, kv := range getResp.Kvs { - data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(kv.Key)) + // loop until we have filled the requested limit from etcd or there are no more results + var lastKey []byte + var hasMore bool + for { + getResp, err := s.client.KV.Get(ctx, key, options...) if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to transform key %q: %v", kv.Key, err)) - continue + return interpretListError(err, len(pred.Continue) > 0) } + hasMore = getResp.More - elems = append(elems, &elemForDecode{ - data: data, - rev: uint64(kv.ModRevision), - }) - } + if len(getResp.Kvs) == 0 && getResp.More { + return fmt.Errorf("no results were found, but etcd indicated there were more values remaining") + } - if err := decodeList(elems, storage.SimpleFilter(pred), listPtr, s.codec, s.versioner); err != nil { - return err - } + // avoid small allocations for the result slice, since this can be called in many + // different contexts and we don't know how significantly the result will be filtered + if pred.Empty() { + growSlice(v, len(getResp.Kvs)) + } else { + growSlice(v, 2048, len(getResp.Kvs)) + } + + // take items from the response until the bucket is full, filtering as we go + for _, kv := range getResp.Kvs { + if paging && int64(v.Len()) >= pred.Limit { + hasMore = true + break + } + lastKey = kv.Key + + data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(kv.Key)) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to transform key %q: %v", kv.Key, err)) + continue + } - // indicate to the client which resource version was returned - if returnedRV == 0 { - returnedRV = getResp.Header.Revision + if err := appendListItem(v, data, uint64(kv.ModRevision), filter, s.codec, s.versioner); err != nil { + return err + } + } + + // indicate to the client which resource version was returned + if returnedRV == 0 { + returnedRV = getResp.Header.Revision + } + + // no more results remain or we didn't request paging + if !hasMore || !paging { + break + } + // we're paging but we have filled our bucket + if int64(v.Len()) >= pred.Limit { + break + } + key = string(lastKey) + "\x00" } - switch { - case !getResp.More: - // no continuation - return s.versioner.UpdateList(listObj, uint64(returnedRV), "") - case len(getResp.Kvs) == 0: - return fmt.Errorf("no results were found, but etcd indicated there were more values") - default: + + // instruct the client to begin querying from immediately after the last key we returned + // we never return a key that the client wouldn't be allowed to see + if hasMore { // we want to start immediately after the last key - // TODO: this reveals info about certain keys - key := string(getResp.Kvs[len(getResp.Kvs)-1].Key) - next, err := encodeContinue(key+"\x00", keyPrefix, returnedRV) + next, err := encodeContinue(string(lastKey)+"\x00", keyPrefix, returnedRV) if err != nil { return err } return s.versioner.UpdateList(listObj, uint64(returnedRV), next) } + + // no continuation + return s.versioner.UpdateList(listObj, uint64(returnedRV), "") +} + +// growSlice takes a slice value and grows its capacity up +// to the maximum of the passed sizes or maxCapacity, whichever +// is smaller. Above maxCapacity decisions about allocation are left +// to the Go runtime on append. This allows a caller to make an +// educated guess about the potential size of the total list while +// still avoiding overly aggressive initial allocation. If sizes +// is empty maxCapacity will be used as the size to grow. +func growSlice(v reflect.Value, maxCapacity int, sizes ...int) { + cap := v.Cap() + max := cap + for _, size := range sizes { + if size > max { + max = size + } + } + if len(sizes) == 0 || max > maxCapacity { + max = maxCapacity + } + if max <= cap { + return + } + if v.Len() > 0 { + extra := reflect.MakeSlice(v.Type(), 0, max) + reflect.Copy(extra, v) + v.Set(extra) + } else { + extra := reflect.MakeSlice(v.Type(), 0, max) + v.Set(extra) + } } // Watch implements storage.Interface.Watch. @@ -677,23 +773,16 @@ func decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objP return nil } -// decodeList decodes a list of values into a list of objects, with resource version set to corresponding rev. -// On success, ListPtr would be set to the list of objects. -func decodeList(elems []*elemForDecode, filter storage.FilterFunc, listPtr interface{}, codec runtime.Codec, versioner storage.Versioner) error { - v, err := conversion.EnforcePtr(listPtr) - if err != nil || v.Kind() != reflect.Slice { - panic("need ptr to slice") +// appendListItem decodes and appends the object (if it passes filter) to v, which must be a slice. +func appendListItem(v reflect.Value, data []byte, rev uint64, filter storage.FilterFunc, codec runtime.Codec, versioner storage.Versioner) error { + obj, _, err := codec.Decode(data, nil, reflect.New(v.Type().Elem()).Interface().(runtime.Object)) + if err != nil { + return err } - for _, elem := range elems { - obj, _, err := codec.Decode(elem.data, nil, reflect.New(v.Type().Elem()).Interface().(runtime.Object)) - if err != nil { - return err - } - // being unable to set the version does not prevent the object from being extracted - versioner.UpdateObject(obj, elem.rev) - if filter(obj) { - v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem())) - } + // being unable to set the version does not prevent the object from being extracted + versioner.UpdateObject(obj, rev) + if filter(obj) { + v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem())) } return nil } diff --git a/vendor/k8s.io/apiserver/pkg/storage/names/BUILD b/vendor/k8s.io/apiserver/pkg/storage/names/BUILD index 88ee18bb1577..4e7511282f31 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/names/BUILD +++ b/vendor/k8s.io/apiserver/pkg/storage/names/BUILD @@ -9,12 +9,14 @@ load( go_test( name = "go_default_test", srcs = ["generate_test.go"], + importpath = "k8s.io/apiserver/pkg/storage/names", library = ":go_default_library", ) go_library( name = "go_default_library", srcs = ["generate.go"], + importpath = "k8s.io/apiserver/pkg/storage/names", deps = ["//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library"], ) diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/BUILD b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/BUILD index 1609b74c2b96..6b5bc7dc9a5d 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/BUILD +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["config.go"], + importpath = "k8s.io/apiserver/pkg/storage/storagebackend", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go index 919b11571cb6..b725985f693f 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go @@ -17,6 +17,8 @@ limitations under the License. package storagebackend import ( + "time" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/storage/value" ) @@ -25,6 +27,8 @@ const ( StorageTypeUnset = "" StorageTypeETCD2 = "etcd2" StorageTypeETCD3 = "etcd3" + + DefaultCompactInterval = 5 * time.Minute ) // Config is configuration for creating a storage backend. @@ -51,19 +55,23 @@ type Config struct { // We will drop the cache once using protobuf. DeserializationCacheSize int - Codec runtime.Codec - Copier runtime.ObjectCopier + Codec runtime.Codec // Transformer allows the value to be transformed prior to persisting into etcd. Transformer value.Transformer + + // CompactionInterval is an interval of requesting compaction from apiserver. + // If the value is 0, no compaction will be issued. + CompactionInterval time.Duration } -func NewDefaultConfig(prefix string, copier runtime.ObjectCopier, codec runtime.Codec) *Config { +func NewDefaultConfig(prefix string, codec runtime.Codec) *Config { return &Config{ Prefix: prefix, // Default cache size to 0 - if unset, its size will be set based on target // memory usage. DeserializationCacheSize: 0, - Copier: copier, - Codec: codec, + Codec: codec, + CompactionInterval: DefaultCompactInterval, + Quorum: true, } } diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD index 38f9bb641052..474d8990339a 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["tls_test.go"], + importpath = "k8s.io/apiserver/pkg/storage/storagebackend/factory", library = ":go_default_library", deps = [ "//vendor/github.com/coreos/etcd/integration:go_default_library", @@ -32,6 +33,7 @@ go_library( "etcd3.go", "factory.go", ], + importpath = "k8s.io/apiserver/pkg/storage/storagebackend/factory", deps = [ "//vendor/github.com/coreos/etcd/client:go_default_library", "//vendor/github.com/coreos/etcd/clientv3:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd2.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd2.go index 874ff53b892d..84b0381049b9 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd2.go +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd2.go @@ -39,7 +39,7 @@ func newETCD2Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, e if err != nil { return nil, nil, err } - s := etcd.NewEtcdStorage(client, c.Codec, c.Prefix, c.Quorum, c.DeserializationCacheSize, c.Copier, etcd.IdentityTransformer) + s := etcd.NewEtcdStorage(client, c.Codec, c.Prefix, c.Quorum, c.DeserializationCacheSize, etcd.IdentityTransformer) return s, tr.CloseIdleConnections, nil } diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go index 9ec49f7df01c..a5ccbf2fdd49 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go @@ -51,7 +51,7 @@ func newETCD3Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, e return nil, nil, err } ctx, cancel := context.WithCancel(context.Background()) - etcd3.StartCompactor(ctx, client) + etcd3.StartCompactor(ctx, client, c.CompactionInterval) destroyFunc := func() { cancel() client.Close() diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/BUILD b/vendor/k8s.io/apiserver/pkg/storage/value/BUILD index 0933e97415b6..22d5ec1761a1 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/BUILD +++ b/vendor/k8s.io/apiserver/pkg/storage/value/BUILD @@ -9,12 +9,14 @@ load( go_test( name = "go_default_test", srcs = ["transformer_test.go"], + importpath = "k8s.io/apiserver/pkg/storage/value", library = ":go_default_library", ) go_library( name = "go_default_library", srcs = ["transformer.go"], + importpath = "k8s.io/apiserver/pkg/storage/value", ) filegroup( diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes/BUILD b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes/BUILD index 430d6438de9a..4c3bc7a875ff 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes/BUILD +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["aes_test.go"], + importpath = "k8s.io/apiserver/pkg/storage/value/encrypt/aes", library = ":go_default_library", deps = ["//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library"], ) @@ -16,6 +17,7 @@ go_test( go_library( name = "go_default_library", srcs = ["aes.go"], + importpath = "k8s.io/apiserver/pkg/storage/value/encrypt/aes", deps = ["//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library"], ) diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/BUILD b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/BUILD index 3d45be121cfe..3ceb65a02c64 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/BUILD +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["envelope.go"], + importpath = "k8s.io/apiserver/pkg/storage/value/encrypt/envelope", deps = [ "//vendor/github.com/hashicorp/golang-lru:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library", @@ -18,6 +19,7 @@ go_library( go_test( name = "go_default_test", srcs = ["envelope_test.go"], + importpath = "k8s.io/apiserver/pkg/storage/value/encrypt/envelope", library = ":go_default_library", deps = [ "//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go index acb26e6f5edb..9782e44b66d3 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/envelope.go @@ -46,9 +46,6 @@ type envelopeTransformer struct { // transformers is a thread-safe LRU cache which caches decrypted DEKs indexed by their encrypted form. transformers *lru.Cache - // cacheSize is the maximum number of DEKs that are cached. - cacheSize int - // baseTransformerFunc creates a new transformer for encrypting the data with the DEK. baseTransformerFunc func(cipher.Block) value.Transformer } @@ -68,7 +65,6 @@ func NewEnvelopeTransformer(envelopeService Service, cacheSize int, baseTransfor return &envelopeTransformer{ envelopeService: envelopeService, transformers: cache, - cacheSize: cacheSize, baseTransformerFunc: baseTransformerFunc, }, nil } diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/identity/BUILD b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/identity/BUILD index 570402289905..69c617f105ed 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/identity/BUILD +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/identity/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["identity.go"], + importpath = "k8s.io/apiserver/pkg/storage/value/encrypt/identity", deps = ["//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library"], ) diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/BUILD b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/BUILD index 79308921a350..7c26c0808c03 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/BUILD +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["secretbox_test.go"], + importpath = "k8s.io/apiserver/pkg/storage/value/encrypt/secretbox", library = ":go_default_library", deps = ["//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library"], ) @@ -16,6 +17,7 @@ go_test( go_library( name = "go_default_library", srcs = ["secretbox.go"], + importpath = "k8s.io/apiserver/pkg/storage/value/encrypt/secretbox", deps = [ "//vendor/golang.org/x/crypto/nacl/secretbox:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go index f53aa2c3701d..0eaa6282457e 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox/secretbox.go @@ -27,7 +27,7 @@ import ( ) // secretbox implements at rest encryption of the provided values given a 32 byte secret key. -// Uses a standard 24 byte nonce (placed at the the beginning of the cipher text) generated +// Uses a standard 24 byte nonce (placed at the beginning of the cipher text) generated // from crypto/rand. Does not perform authentication of the data at rest. type secretboxTransformer struct { key [32]byte diff --git a/vendor/k8s.io/apiserver/pkg/util/feature/BUILD b/vendor/k8s.io/apiserver/pkg/util/feature/BUILD index 7788cf0af4be..60a1a866d83e 100644 --- a/vendor/k8s.io/apiserver/pkg/util/feature/BUILD +++ b/vendor/k8s.io/apiserver/pkg/util/feature/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["feature_gate_test.go"], + importpath = "k8s.io/apiserver/pkg/util/feature", library = ":go_default_library", deps = ["//vendor/github.com/spf13/pflag:go_default_library"], ) @@ -16,6 +17,7 @@ go_test( go_library( name = "go_default_library", srcs = ["feature_gate.go"], + importpath = "k8s.io/apiserver/pkg/util/feature", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go b/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go index ca926d0eec66..95ca0e77aaae 100644 --- a/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go +++ b/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go @@ -22,6 +22,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "github.com/golang/glog" "github.com/spf13/pflag" @@ -46,7 +47,7 @@ var ( } // Special handling for a few gates. - specialFeatures = map[Feature]func(f *featureGate, val bool){ + specialFeatures = map[Feature]func(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool){ allAlphaGate: setUnsetAlphaGates, } @@ -76,6 +77,8 @@ type FeatureGate interface { // Set parses and stores flag gates for known features // from a string like feature1=true,feature2=false,... Set(value string) error + // SetFromMap stores flag gates for known features from a map[string]bool or returns an error + SetFromMap(m map[string]bool) error // Enabled returns true if the key is enabled. Enabled(key Feature) bool // Add adds features to the featureGate. @@ -86,21 +89,23 @@ type FeatureGate interface { // featureGate implements FeatureGate as well as pflag.Value for flag parsing. type featureGate struct { - lock sync.RWMutex - - known map[Feature]FeatureSpec - special map[Feature]func(*featureGate, bool) - enabled map[Feature]bool - - // is set to true when AddFlag is called. Note: initialization is not go-routine safe, lookup is + special map[Feature]func(map[Feature]FeatureSpec, map[Feature]bool, bool) + + // lock guards writes to known, enabled, and reads/writes of closed + lock sync.Mutex + // known holds a map[Feature]FeatureSpec + known *atomic.Value + // enabled holds a map[Feature]bool + enabled *atomic.Value + // closed is set to true when AddFlag is called, and prevents subsequent calls to Add closed bool } -func setUnsetAlphaGates(f *featureGate, val bool) { - for k, v := range f.known { +func setUnsetAlphaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool) { + for k, v := range known { if v.PreRelease == Alpha { - if _, found := f.enabled[k]; !found { - f.enabled[k] = val + if _, found := enabled[k]; !found { + enabled[k] = val } } } @@ -110,30 +115,49 @@ func setUnsetAlphaGates(f *featureGate, val bool) { var _ pflag.Value = &featureGate{} func NewFeatureGate() *featureGate { + known := map[Feature]FeatureSpec{} + for k, v := range defaultFeatures { + known[k] = v + } + + knownValue := &atomic.Value{} + knownValue.Store(known) + + enabled := map[Feature]bool{} + enabledValue := &atomic.Value{} + enabledValue.Store(enabled) + f := &featureGate{ - known: map[Feature]FeatureSpec{}, + known: knownValue, special: specialFeatures, - enabled: map[Feature]bool{}, - } - for k, v := range defaultFeatures { - f.known[k] = v + enabled: enabledValue, } return f } -// Set Parses a string of the form "key1=value1,key2=value2,..." into a +// Set parses a string of the form "key1=value1,key2=value2,..." into a // map[string]bool of known keys or returns an error. func (f *featureGate) Set(value string) error { f.lock.Lock() defer f.lock.Unlock() + // Copy existing state + known := map[Feature]FeatureSpec{} + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + known[k] = v + } + enabled := map[Feature]bool{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + enabled[k] = v + } + for _, s := range strings.Split(value, ",") { if len(s) == 0 { continue } arr := strings.SplitN(s, "=", 2) k := Feature(strings.TrimSpace(arr[0])) - _, ok := f.known[Feature(k)] + _, ok := known[k] if !ok { return fmt.Errorf("unrecognized key: %s", k) } @@ -145,25 +169,62 @@ func (f *featureGate) Set(value string) error { if err != nil { return fmt.Errorf("invalid value of %s: %s, err: %v", k, v, err) } - f.enabled[k] = boolValue + enabled[k] = boolValue + + // Handle "special" features like "all alpha gates" + if fn, found := f.special[k]; found { + fn(known, enabled, boolValue) + } + } + + // Persist changes + f.known.Store(known) + f.enabled.Store(enabled) + + glog.Infof("feature gates: %v", enabled) + return nil +} + +// SetFromMap stores flag gates for known features from a map[string]bool or returns an error +func (f *featureGate) SetFromMap(m map[string]bool) error { + f.lock.Lock() + defer f.lock.Unlock() + // Copy existing state + known := map[Feature]FeatureSpec{} + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + known[k] = v + } + enabled := map[Feature]bool{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + enabled[k] = v + } + + for k, v := range m { + k := Feature(k) + _, ok := known[k] + if !ok { + return fmt.Errorf("unrecognized key: %s", k) + } + enabled[k] = v // Handle "special" features like "all alpha gates" if fn, found := f.special[k]; found { - fn(f, boolValue) + fn(known, enabled, v) } } + // Persist changes + f.known.Store(known) + f.enabled.Store(enabled) + glog.Infof("feature gates: %v", f.enabled) return nil } // String returns a string containing all enabled feature gates, formatted as "key1=value1,key2=value2,...". func (f *featureGate) String() string { - f.lock.RLock() - defer f.lock.RUnlock() - pairs := []string{} - for k, v := range f.enabled { + for k, v := range f.enabled.Load().(map[Feature]bool) { pairs = append(pairs, fmt.Sprintf("%s=%t", k, v)) } sort.Strings(pairs) @@ -183,31 +244,35 @@ func (f *featureGate) Add(features map[Feature]FeatureSpec) error { return fmt.Errorf("cannot add a feature gate after adding it to the flag set") } + // Copy existing state + known := map[Feature]FeatureSpec{} + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + known[k] = v + } + for name, spec := range features { - if existingSpec, found := f.known[name]; found { + if existingSpec, found := known[name]; found { if existingSpec == spec { continue } return fmt.Errorf("feature gate %q with different spec already exists: %v", name, existingSpec) } - f.known[name] = spec + known[name] = spec } + + // Persist updated state + f.known.Store(known) + return nil } // Enabled returns true if the key is enabled. func (f *featureGate) Enabled(key Feature) bool { - f.lock.RLock() - defer f.lock.RUnlock() - - defaultValue := f.known[key].Default - if f.enabled != nil { - if v, ok := f.enabled[key]; ok { - return v - } + if v, ok := f.enabled.Load().(map[Feature]bool)[key]; ok { + return v } - return defaultValue + return f.known.Load().(map[Feature]FeatureSpec)[key].Default } // AddFlag adds a flag for setting global feature gates to the specified FlagSet. @@ -224,10 +289,8 @@ func (f *featureGate) AddFlag(fs *pflag.FlagSet) { // KnownFeatures returns a slice of strings describing the FeatureGate's known features. func (f *featureGate) KnownFeatures() []string { - f.lock.RLock() - defer f.lock.RUnlock() var known []string - for k, v := range f.known { + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { pre := "" if v.PreRelease != GA { pre = fmt.Sprintf("%s - ", v.PreRelease) diff --git a/vendor/k8s.io/apiserver/pkg/util/flag/BUILD b/vendor/k8s.io/apiserver/pkg/util/flag/BUILD index 83a353d935df..57ab4058da16 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flag/BUILD +++ b/vendor/k8s.io/apiserver/pkg/util/flag/BUILD @@ -8,7 +8,14 @@ load( go_test( name = "go_default_test", - srcs = ["namedcertkey_flag_test.go"], + srcs = [ + "colon_separated_multimap_string_string_test.go", + "langle_separated_map_string_string_test.go", + "map_string_bool_test.go", + "map_string_string_test.go", + "namedcertkey_flag_test.go", + ], + importpath = "k8s.io/apiserver/pkg/util/flag", library = ":go_default_library", deps = ["//vendor/github.com/spf13/pflag:go_default_library"], ) @@ -16,12 +23,18 @@ go_test( go_library( name = "go_default_library", srcs = [ + "colon_separated_multimap_string_string.go", "configuration_map.go", "flags.go", + "langle_separated_map_string_string.go", + "map_string_bool.go", + "map_string_string.go", "namedcertkey_flag.go", + "omitempty.go", "string_flag.go", "tristate.go", ], + importpath = "k8s.io/apiserver/pkg/util/flag", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", diff --git a/vendor/k8s.io/apiserver/pkg/util/flag/colon_separated_multimap_string_string.go b/vendor/k8s.io/apiserver/pkg/util/flag/colon_separated_multimap_string_string.go new file mode 100644 index 000000000000..bd2cf5f87521 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flag/colon_separated_multimap_string_string.go @@ -0,0 +1,102 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "fmt" + "sort" + "strings" +) + +// ColonSeparatedMultimapStringString supports setting a map[string][]string from an encoding +// that separates keys from values with ':' and separates key-value pairs with ','. +// A key can be repeated multiple times, in which case the values are appended to a +// slice of strings associated with that key. Items in the list associated with a given +// key will appear in the order provided. +// For example: `a:hello,b:again,c:world,b:beautiful` results in `{"a": ["hello"], "b": ["again", "beautiful"], "c": ["world"]}` +// The first call to Set will clear the map before adding entries; subsequent calls will simply append to the map. +// This makes it possible to override default values with a command-line option rather than appending to defaults, +// while still allowing the distribution of key-value pairs across multiple flag invocations. +// For example: `--flag "a:hello" --flag "b:again" --flag "b:beautiful" --flag "c:world"` results in `{"a": ["hello"], "b": ["again", "beautiful"], "c": ["world"]}` +type ColonSeparatedMultimapStringString struct { + Multimap *map[string][]string + initialized bool // set to true after the first Set call +} + +// NewColonSeparatedMultimapStringString takes a pointer to a map[string][]string and returns the +// ColonSeparatedMultimapStringString flag parsing shim for that map. +func NewColonSeparatedMultimapStringString(m *map[string][]string) *ColonSeparatedMultimapStringString { + return &ColonSeparatedMultimapStringString{Multimap: m} +} + +// Set implements github.com/spf13/pflag.Value +func (m *ColonSeparatedMultimapStringString) Set(value string) error { + if m.Multimap == nil { + return fmt.Errorf("no target (nil pointer to map[string][]string)") + } + if !m.initialized || *m.Multimap == nil { + // clear default values, or allocate if no existing map + *m.Multimap = make(map[string][]string) + m.initialized = true + } + for _, pair := range strings.Split(value, ",") { + if len(pair) == 0 { + continue + } + kv := strings.SplitN(pair, ":", 2) + if len(kv) != 2 { + return fmt.Errorf("malformed pair, expect string:string") + } + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) + (*m.Multimap)[k] = append((*m.Multimap)[k], v) + } + return nil +} + +// String implements github.com/spf13/pflag.Value +func (m *ColonSeparatedMultimapStringString) String() string { + type kv struct { + k string + v string + } + kvs := make([]kv, 0, len(*m.Multimap)) + for k, vs := range *m.Multimap { + for i := range vs { + kvs = append(kvs, kv{k: k, v: vs[i]}) + } + } + // stable sort by keys, order of values should be preserved + sort.SliceStable(kvs, func(i, j int) bool { + return kvs[i].k < kvs[j].k + }) + pairs := make([]string, 0, len(kvs)) + for i := range kvs { + pairs = append(pairs, fmt.Sprintf("%s:%s", kvs[i].k, kvs[i].v)) + } + return strings.Join(pairs, ",") +} + +// Type implements github.com/spf13/pflag.Value +func (m *ColonSeparatedMultimapStringString) Type() string { + return "colonSeparatedMultimapStringString" +} + +// Empty implements OmitEmpty +func (m *ColonSeparatedMultimapStringString) Empty() bool { + return len(*m.Multimap) == 0 +} diff --git a/vendor/k8s.io/apiserver/pkg/util/flag/flags.go b/vendor/k8s.io/apiserver/pkg/util/flag/flags.go index 6dd62bdc0891..55a3ed34a8e7 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flag/flags.go +++ b/vendor/k8s.io/apiserver/pkg/util/flag/flags.go @@ -49,6 +49,6 @@ func InitFlags() { pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) pflag.Parse() pflag.VisitAll(func(flag *pflag.Flag) { - glog.V(4).Infof("FLAG: --%s=%q", flag.Name, flag.Value) + glog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value) }) } diff --git a/vendor/k8s.io/apiserver/pkg/util/flag/langle_separated_map_string_string.go b/vendor/k8s.io/apiserver/pkg/util/flag/langle_separated_map_string_string.go new file mode 100644 index 000000000000..bf8dbfb9bfc8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/flag/langle_separated_map_string_string.go @@ -0,0 +1,82 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "fmt" + "sort" + "strings" +) + +// LangleSeparatedMapStringString can be set from the command line with the format `--flag "string 0 && version == v1.SchemeGroupVersion { - path = "/swaggerapi" + d.LegacyPrefix + "/" + version.Version - } else { - path = "/swaggerapi/apis/" + version.Group + "/" + version.Version - } - - body, err := d.restClient.Get().AbsPath(path).Do().Raw() - if err != nil { - return nil, err - } - var schema swagger.ApiDeclaration - err = json.Unmarshal(body, &schema) - if err != nil { - return nil, fmt.Errorf("got '%s': %v", string(body), err) - } - return &schema, nil -} - // OpenAPISchema fetches the open api schema using a rest client and parses the proto. func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { data, err := d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do().Raw() diff --git a/vendor/k8s.io/client-go/discovery/unstructured.go b/vendor/k8s.io/client-go/discovery/unstructured.go index ee72d668bcfa..fa7f2ec061b0 100644 --- a/vendor/k8s.io/client-go/discovery/unstructured.go +++ b/vendor/k8s.io/client-go/discovery/unstructured.go @@ -17,22 +17,28 @@ limitations under the License. package discovery import ( - "fmt" + "reflect" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) -// UnstructuredObjectTyper provides a runtime.ObjectTyper implmentation for +// UnstructuredObjectTyper provides a runtime.ObjectTyper implementation for // runtime.Unstructured object based on discovery information. type UnstructuredObjectTyper struct { registered map[schema.GroupVersionKind]bool + typers []runtime.ObjectTyper } // NewUnstructuredObjectTyper returns a runtime.ObjectTyper for -// unstructred objects based on discovery information. -func NewUnstructuredObjectTyper(groupResources []*APIGroupResources) *UnstructuredObjectTyper { - dot := &UnstructuredObjectTyper{registered: make(map[schema.GroupVersionKind]bool)} +// unstructured objects based on discovery information. It accepts a list of fallback typers +// for handling objects that are not runtime.Unstructured. It does not delegate the Recognizes +// check, only ObjectKinds. +func NewUnstructuredObjectTyper(groupResources []*APIGroupResources, typers ...runtime.ObjectTyper) *UnstructuredObjectTyper { + dot := &UnstructuredObjectTyper{ + registered: make(map[schema.GroupVersionKind]bool), + typers: typers, + } for _, group := range groupResources { for _, discoveryVersion := range group.Group.Versions { resources, ok := group.VersionedResources[discoveryVersion.Version] @@ -49,29 +55,35 @@ func NewUnstructuredObjectTyper(groupResources []*APIGroupResources) *Unstructur return dot } -// ObjectKind returns the group,version,kind of the provided object, or an error -// if the object in not runtime.Unstructured or has no group,version,kind -// information. -func (d *UnstructuredObjectTyper) ObjectKind(obj runtime.Object) (schema.GroupVersionKind, error) { - if _, ok := obj.(runtime.Unstructured); !ok { - return schema.GroupVersionKind{}, fmt.Errorf("type %T is invalid for dynamic object typer", obj) - } - - return obj.GetObjectKind().GroupVersionKind(), nil -} - // ObjectKinds returns a slice of one element with the group,version,kind of the // provided object, or an error if the object is not runtime.Unstructured or // has no group,version,kind information. unversionedType will always be false // because runtime.Unstructured object should always have group,version,kind // information set. func (d *UnstructuredObjectTyper) ObjectKinds(obj runtime.Object) (gvks []schema.GroupVersionKind, unversionedType bool, err error) { - gvk, err := d.ObjectKind(obj) - if err != nil { - return nil, false, err + if _, ok := obj.(runtime.Unstructured); ok { + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, false, runtime.NewMissingKindErr("object has no kind field ") + } + if len(gvk.Version) == 0 { + return nil, false, runtime.NewMissingVersionErr("object has no apiVersion field") + } + return []schema.GroupVersionKind{gvk}, false, nil } - - return []schema.GroupVersionKind{gvk}, false, nil + var lastErr error + for _, typer := range d.typers { + gvks, unversioned, err := typer.ObjectKinds(obj) + if err != nil { + lastErr = err + continue + } + return gvks, unversioned, nil + } + if lastErr == nil { + lastErr = runtime.NewNotRegisteredErrForType(reflect.TypeOf(obj)) + } + return nil, false, lastErr } // Recognizes returns true if the provided group,version,kind was in the @@ -80,16 +92,4 @@ func (d *UnstructuredObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool { return d.registered[gvk] } -// IsUnversioned returns false always because runtime.Unstructured objects -// should always have group,version,kind information set. ok will be true if the -// object's group,version,kind is api.Registry. -func (d *UnstructuredObjectTyper) IsUnversioned(obj runtime.Object) (unversioned bool, ok bool) { - gvk, err := d.ObjectKind(obj) - if err != nil { - return false, false - } - - return false, d.registered[gvk] -} - var _ runtime.ObjectTyper = &UnstructuredObjectTyper{} diff --git a/vendor/k8s.io/client-go/dynamic/BUILD b/vendor/k8s.io/client-go/dynamic/BUILD index 788462039df1..a0271859e3b2 100644 --- a/vendor/k8s.io/client-go/dynamic/BUILD +++ b/vendor/k8s.io/client-go/dynamic/BUILD @@ -12,6 +12,7 @@ go_test( "client_test.go", "dynamic_util_test.go", ], + importpath = "k8s.io/client-go/dynamic", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -33,6 +34,7 @@ go_library( "client_pool.go", "dynamic_util.go", ], + importpath = "k8s.io/client-go/dynamic", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", diff --git a/vendor/k8s.io/client-go/dynamic/client_pool.go b/vendor/k8s.io/client-go/dynamic/client_pool.go index 277dad49cc68..a5e1b2978c5e 100644 --- a/vendor/k8s.io/client-go/dynamic/client_pool.go +++ b/vendor/k8s.io/client-go/dynamic/client_pool.go @@ -26,7 +26,7 @@ import ( // ClientPool manages a pool of dynamic clients. type ClientPool interface { - // ClientForGroupVersionKind returns a client configured for the specified groupVersionResource. + // ClientForGroupVersionResource returns a client configured for the specified groupVersionResource. // Resource may be empty. ClientForGroupVersionResource(resource schema.GroupVersionResource) (Interface, error) // ClientForGroupVersionKind returns a client configured for the specified groupVersionKind. @@ -56,7 +56,7 @@ type clientPoolImpl struct { mapper meta.RESTMapper } -// NewClientPool returns a ClientPool from the specified config. It reuses clients for the the same +// NewClientPool returns a ClientPool from the specified config. It reuses clients for the same // group version. It is expected this type may be wrapped by specific logic that special cases certain // resources or groups. func NewClientPool(config *restclient.Config, mapper meta.RESTMapper, apiPathResolverFunc APIPathResolverFunc) ClientPool { diff --git a/vendor/k8s.io/client-go/dynamic/dynamic_util.go b/vendor/k8s.io/client-go/dynamic/dynamic_util.go index c834dbb0dffa..c2cf0daeae09 100644 --- a/vendor/k8s.io/client-go/dynamic/dynamic_util.go +++ b/vendor/k8s.io/client-go/dynamic/dynamic_util.go @@ -84,7 +84,7 @@ func NewObjectTyper(resources []*metav1.APIResourceList) (runtime.ObjectTyper, e // information. func (ot *ObjectTyper) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) { if _, ok := obj.(*unstructured.Unstructured); !ok { - return nil, false, fmt.Errorf("type %T is invalid for dynamic object typer", obj) + return nil, false, fmt.Errorf("type %T is invalid for determining dynamic object types", obj) } return []schema.GroupVersionKind{obj.GetObjectKind().GroupVersionKind()}, false, nil } diff --git a/vendor/k8s.io/client-go/informers/BUILD b/vendor/k8s.io/client-go/informers/BUILD index 1b1df1450b86..99486a1c8d15 100644 --- a/vendor/k8s.io/client-go/informers/BUILD +++ b/vendor/k8s.io/client-go/informers/BUILD @@ -11,8 +11,11 @@ go_library( "factory.go", "generic.go", ], + importpath = "k8s.io/client-go/informers", deps = [ "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/api/autoscaling/v1:go_default_library", @@ -22,6 +25,7 @@ go_library( "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/events/v1beta1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/api/networking/v1:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", @@ -31,7 +35,9 @@ go_library( "//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library", "//vendor/k8s.io/api/settings/v1alpha1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", + "//vendor/k8s.io/api/storage/v1alpha1:go_default_library", "//vendor/k8s.io/api/storage/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/client-go/informers/admissionregistration:go_default_library", @@ -40,6 +46,7 @@ go_library( "//vendor/k8s.io/client-go/informers/batch:go_default_library", "//vendor/k8s.io/client-go/informers/certificates:go_default_library", "//vendor/k8s.io/client-go/informers/core:go_default_library", + "//vendor/k8s.io/client-go/informers/events:go_default_library", "//vendor/k8s.io/client-go/informers/extensions:go_default_library", "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", "//vendor/k8s.io/client-go/informers/networking:go_default_library", @@ -70,6 +77,7 @@ filegroup( "//staging/src/k8s.io/client-go/informers/batch:all-srcs", "//staging/src/k8s.io/client-go/informers/certificates:all-srcs", "//staging/src/k8s.io/client-go/informers/core:all-srcs", + "//staging/src/k8s.io/client-go/informers/events:all-srcs", "//staging/src/k8s.io/client-go/informers/extensions:all-srcs", "//staging/src/k8s.io/client-go/informers/internalinterfaces:all-srcs", "//staging/src/k8s.io/client-go/informers/networking:all-srcs", diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/BUILD b/vendor/k8s.io/client-go/informers/admissionregistration/BUILD index 283605130986..6f93c10a26da 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/BUILD +++ b/vendor/k8s.io/client-go/informers/admissionregistration/BUILD @@ -8,8 +8,10 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/client-go/informers/admissionregistration", deps = [ "//vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/client-go/informers/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", ], ) @@ -26,6 +28,7 @@ filegroup( srcs = [ ":package-srcs", "//staging/src/k8s.io/client-go/informers/admissionregistration/v1alpha1:all-srcs", + "//staging/src/k8s.io/client-go/informers/admissionregistration/v1beta1:all-srcs", ], tags = ["automanaged"], ) diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/interface.go index 19099ba0225b..74bfb60198ac 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/interface.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/interface.go @@ -20,6 +20,7 @@ package admissionregistration import ( v1alpha1 "k8s.io/client-go/informers/admissionregistration/v1alpha1" + v1beta1 "k8s.io/client-go/informers/admissionregistration/v1beta1" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" ) @@ -27,18 +28,27 @@ import ( type Interface interface { // V1alpha1 provides access to shared informers for resources in V1alpha1. V1alpha1() v1alpha1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // V1alpha1 returns a new v1alpha1.Interface. func (g *group) V1alpha1() v1alpha1.Interface { - return v1alpha1.New(g.SharedInformerFactory) + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/BUILD b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/BUILD index 8f405a3a5a8d..7292f8ca2fb3 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/BUILD +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/BUILD @@ -1,17 +1,13 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "externaladmissionhookconfiguration.go", "initializerconfiguration.go", "interface.go", ], + importpath = "k8s.io/client-go/informers/admissionregistration/v1alpha1", + visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -35,4 +31,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/externaladmissionhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/externaladmissionhookconfiguration.go deleted file mode 100644 index cdc03d99331a..000000000000 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/externaladmissionhookconfiguration.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was automatically generated by informer-gen - -package v1alpha1 - -import ( - admissionregistration_v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" - cache "k8s.io/client-go/tools/cache" - time "time" -) - -// ExternalAdmissionHookConfigurationInformer provides access to a shared informer and lister for -// ExternalAdmissionHookConfigurations. -type ExternalAdmissionHookConfigurationInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.ExternalAdmissionHookConfigurationLister -} - -type externalAdmissionHookConfigurationInformer struct { - factory internalinterfaces.SharedInformerFactory -} - -// NewExternalAdmissionHookConfigurationInformer constructs a new informer for ExternalAdmissionHookConfiguration type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewExternalAdmissionHookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return client.AdmissionregistrationV1alpha1().ExternalAdmissionHookConfigurations().List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return client.AdmissionregistrationV1alpha1().ExternalAdmissionHookConfigurations().Watch(options) - }, - }, - &admissionregistration_v1alpha1.ExternalAdmissionHookConfiguration{}, - resyncPeriod, - indexers, - ) -} - -func defaultExternalAdmissionHookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewExternalAdmissionHookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) -} - -func (f *externalAdmissionHookConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistration_v1alpha1.ExternalAdmissionHookConfiguration{}, defaultExternalAdmissionHookConfigurationInformer) -} - -func (f *externalAdmissionHookConfigurationInformer) Lister() v1alpha1.ExternalAdmissionHookConfigurationLister { - return v1alpha1.NewExternalAdmissionHookConfigurationLister(f.Informer().GetIndexer()) -} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go index 90e0078fc07d..0f55c737f53d 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go @@ -38,19 +38,33 @@ type InitializerConfigurationInformer interface { } type initializerConfigurationInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewInitializerConfigurationInformer constructs a new informer for InitializerConfiguration type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewInitializerConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredInitializerConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredInitializerConfigurationInformer constructs a new informer for InitializerConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredInitializerConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AdmissionregistrationV1alpha1().InitializerConfigurations().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AdmissionregistrationV1alpha1().InitializerConfigurations().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewInitializerConfigurationInformer(client kubernetes.Interface, resyncPeri ) } -func defaultInitializerConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewInitializerConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *initializerConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredInitializerConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *initializerConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistration_v1alpha1.InitializerConfiguration{}, defaultInitializerConfigurationInformer) + return f.factory.InformerFor(&admissionregistration_v1alpha1.InitializerConfiguration{}, f.defaultInformer) } func (f *initializerConfigurationInformer) Lister() v1alpha1.InitializerConfigurationLister { diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go index 33ff70766c57..44da0479673c 100644 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go @@ -24,27 +24,22 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { - // ExternalAdmissionHookConfigurations returns a ExternalAdmissionHookConfigurationInformer. - ExternalAdmissionHookConfigurations() ExternalAdmissionHookConfigurationInformer // InitializerConfigurations returns a InitializerConfigurationInformer. InitializerConfigurations() InitializerConfigurationInformer } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} -} - -// ExternalAdmissionHookConfigurations returns a ExternalAdmissionHookConfigurationInformer. -func (v *version) ExternalAdmissionHookConfigurations() ExternalAdmissionHookConfigurationInformer { - return &externalAdmissionHookConfigurationInformer{factory: v.SharedInformerFactory} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // InitializerConfigurations returns a InitializerConfigurationInformer. func (v *version) InitializerConfigurations() InitializerConfigurationInformer { - return &initializerConfigurationInformer{factory: v.SharedInformerFactory} + return &initializerConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/BUILD b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/BUILD new file mode 100644 index 000000000000..e66e6eb44021 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/BUILD @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "interface.go", + "mutatingwebhookconfiguration.go", + "validatingwebhookconfiguration.go", + ], + importpath = "k8s.io/client-go/informers/admissionregistration/v1beta1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/listers/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go new file mode 100644 index 000000000000..4f08d69a01d5 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go @@ -0,0 +1,52 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer. + MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer + // ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer. + ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer. +func (v *version) MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer { + return &mutatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer. +func (v *version) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer { + return &validatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go new file mode 100644 index 000000000000..31a2a865cb61 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + admissionregistration_v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// MutatingWebhookConfigurationInformer provides access to a shared informer and lister for +// MutatingWebhookConfigurations. +type MutatingWebhookConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.MutatingWebhookConfigurationLister +} + +type mutatingWebhookConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Watch(options) + }, + }, + &admissionregistration_v1beta1.MutatingWebhookConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *mutatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *mutatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistration_v1beta1.MutatingWebhookConfiguration{}, f.defaultInformer) +} + +func (f *mutatingWebhookConfigurationInformer) Lister() v1beta1.MutatingWebhookConfigurationLister { + return v1beta1.NewMutatingWebhookConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go new file mode 100644 index 000000000000..d87ab900285b --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + admissionregistration_v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ValidatingWebhookConfigurationInformer provides access to a shared informer and lister for +// ValidatingWebhookConfigurations. +type ValidatingWebhookConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.ValidatingWebhookConfigurationLister +} + +type validatingWebhookConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Watch(options) + }, + }, + &admissionregistration_v1beta1.ValidatingWebhookConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *validatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *validatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistration_v1beta1.ValidatingWebhookConfiguration{}, f.defaultInformer) +} + +func (f *validatingWebhookConfigurationInformer) Lister() v1beta1.ValidatingWebhookConfigurationLister { + return v1beta1.NewValidatingWebhookConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/BUILD b/vendor/k8s.io/client-go/informers/apps/BUILD index f7c0d1151dc0..80615b108ec6 100644 --- a/vendor/k8s.io/client-go/informers/apps/BUILD +++ b/vendor/k8s.io/client-go/informers/apps/BUILD @@ -8,7 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/client-go/informers/apps", deps = [ + "//vendor/k8s.io/client-go/informers/apps/v1:go_default_library", "//vendor/k8s.io/client-go/informers/apps/v1beta1:go_default_library", "//vendor/k8s.io/client-go/informers/apps/v1beta2:go_default_library", "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", @@ -26,6 +28,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//staging/src/k8s.io/client-go/informers/apps/v1:all-srcs", "//staging/src/k8s.io/client-go/informers/apps/v1beta1:all-srcs", "//staging/src/k8s.io/client-go/informers/apps/v1beta2:all-srcs", ], diff --git a/vendor/k8s.io/client-go/informers/apps/interface.go b/vendor/k8s.io/client-go/informers/apps/interface.go index 8be06cfd201c..fdd32de0f3fb 100644 --- a/vendor/k8s.io/client-go/informers/apps/interface.go +++ b/vendor/k8s.io/client-go/informers/apps/interface.go @@ -19,6 +19,7 @@ limitations under the License. package apps import ( + v1 "k8s.io/client-go/informers/apps/v1" v1beta1 "k8s.io/client-go/informers/apps/v1beta1" v1beta2 "k8s.io/client-go/informers/apps/v1beta2" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" @@ -26,6 +27,8 @@ import ( // Interface provides access to each of this group's versions. type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface // V1beta2 provides access to shared informers for resources in V1beta2. @@ -33,20 +36,27 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) } // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { - return v1beta1.New(g.SharedInformerFactory) + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) } // V1beta2 returns a new v1beta2.Interface. func (g *group) V1beta2() v1beta2.Interface { - return v1beta2.New(g.SharedInformerFactory) + return v1beta2.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/apps/v1/BUILD b/vendor/k8s.io/client-go/informers/apps/v1/BUILD new file mode 100644 index 000000000000..233e26101d80 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1/BUILD @@ -0,0 +1,39 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "controllerrevision.go", + "daemonset.go", + "deployment.go", + "interface.go", + "replicaset.go", + "statefulset.go", + ], + importpath = "k8s.io/client-go/informers/apps/v1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/apps/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/listers/apps/v1:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go new file mode 100644 index 000000000000..a69be9c70f47 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + apps_v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/apps/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ControllerRevisionInformer provides access to a shared informer and lister for +// ControllerRevisions. +type ControllerRevisionInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ControllerRevisionLister +} + +type controllerRevisionInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewControllerRevisionInformer constructs a new informer for ControllerRevision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredControllerRevisionInformer constructs a new informer for ControllerRevision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().ControllerRevisions(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().ControllerRevisions(namespace).Watch(options) + }, + }, + &apps_v1.ControllerRevision{}, + resyncPeriod, + indexers, + ) +} + +func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1.ControllerRevision{}, f.defaultInformer) +} + +func (f *controllerRevisionInformer) Lister() v1.ControllerRevisionLister { + return v1.NewControllerRevisionLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go b/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go new file mode 100644 index 000000000000..1c7abf7d09fc --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + apps_v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/apps/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// DaemonSetInformer provides access to a shared informer and lister for +// DaemonSets. +type DaemonSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.DaemonSetLister +} + +type daemonSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDaemonSetInformer constructs a new informer for DaemonSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDaemonSetInformer constructs a new informer for DaemonSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().DaemonSets(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().DaemonSets(namespace).Watch(options) + }, + }, + &apps_v1.DaemonSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *daemonSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1.DaemonSet{}, f.defaultInformer) +} + +func (f *daemonSetInformer) Lister() v1.DaemonSetLister { + return v1.NewDaemonSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1/deployment.go b/vendor/k8s.io/client-go/informers/apps/v1/deployment.go new file mode 100644 index 000000000000..9f6beed6e078 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1/deployment.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + apps_v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/apps/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// DeploymentInformer provides access to a shared informer and lister for +// Deployments. +type DeploymentInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.DeploymentLister +} + +type deploymentInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().Deployments(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().Deployments(namespace).Watch(options) + }, + }, + &apps_v1.Deployment{}, + resyncPeriod, + indexers, + ) +} + +func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *deploymentInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1.Deployment{}, f.defaultInformer) +} + +func (f *deploymentInformer) Lister() v1.DeploymentLister { + return v1.NewDeploymentLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1/interface.go b/vendor/k8s.io/client-go/informers/apps/v1/interface.go new file mode 100644 index 000000000000..6145fd6ccd9c --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1/interface.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ControllerRevisions returns a ControllerRevisionInformer. + ControllerRevisions() ControllerRevisionInformer + // DaemonSets returns a DaemonSetInformer. + DaemonSets() DaemonSetInformer + // Deployments returns a DeploymentInformer. + Deployments() DeploymentInformer + // ReplicaSets returns a ReplicaSetInformer. + ReplicaSets() ReplicaSetInformer + // StatefulSets returns a StatefulSetInformer. + StatefulSets() StatefulSetInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ControllerRevisions returns a ControllerRevisionInformer. +func (v *version) ControllerRevisions() ControllerRevisionInformer { + return &controllerRevisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// DaemonSets returns a DaemonSetInformer. +func (v *version) DaemonSets() DaemonSetInformer { + return &daemonSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Deployments returns a DeploymentInformer. +func (v *version) Deployments() DeploymentInformer { + return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ReplicaSets returns a ReplicaSetInformer. +func (v *version) ReplicaSets() ReplicaSetInformer { + return &replicaSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// StatefulSets returns a StatefulSetInformer. +func (v *version) StatefulSets() StatefulSetInformer { + return &statefulSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go b/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go new file mode 100644 index 000000000000..1ac50607f2ea --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + apps_v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/apps/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ReplicaSetInformer provides access to a shared informer and lister for +// ReplicaSets. +type ReplicaSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ReplicaSetLister +} + +type replicaSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewReplicaSetInformer constructs a new informer for ReplicaSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredReplicaSetInformer constructs a new informer for ReplicaSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().ReplicaSets(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().ReplicaSets(namespace).Watch(options) + }, + }, + &apps_v1.ReplicaSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *replicaSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1.ReplicaSet{}, f.defaultInformer) +} + +func (f *replicaSetInformer) Lister() v1.ReplicaSetLister { + return v1.NewReplicaSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go b/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go new file mode 100644 index 000000000000..535790df9749 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + apps_v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/apps/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// StatefulSetInformer provides access to a shared informer and lister for +// StatefulSets. +type StatefulSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.StatefulSetLister +} + +type statefulSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewStatefulSetInformer constructs a new informer for StatefulSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredStatefulSetInformer constructs a new informer for StatefulSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().StatefulSets(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().StatefulSets(namespace).Watch(options) + }, + }, + &apps_v1.StatefulSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *statefulSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1.StatefulSet{}, f.defaultInformer) +} + +func (f *statefulSetInformer) Lister() v1.StatefulSetLister { + return v1.NewStatefulSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/BUILD b/vendor/k8s.io/client-go/informers/apps/v1beta1/BUILD index c6eaee5e3ab2..80ca939e1ad1 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/BUILD @@ -13,6 +13,7 @@ go_library( "interface.go", "statefulset.go", ], + importpath = "k8s.io/client-go/informers/apps/v1beta1", deps = [ "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go index 1a433f84efb2..1e2de416bc5e 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go @@ -38,19 +38,34 @@ type ControllerRevisionInformer interface { } type controllerRevisionInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewControllerRevisionInformer constructs a new informer for ControllerRevision type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredControllerRevisionInformer constructs a new informer for ControllerRevision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AppsV1beta1().ControllerRevisions(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AppsV1beta1().ControllerRevisions(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewControllerRevisionInformer(client kubernetes.Interface, namespace string ) } -func defaultControllerRevisionInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewControllerRevisionInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&apps_v1beta1.ControllerRevision{}, defaultControllerRevisionInformer) + return f.factory.InformerFor(&apps_v1beta1.ControllerRevision{}, f.defaultInformer) } func (f *controllerRevisionInformer) Lister() v1beta1.ControllerRevisionLister { diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go index d77f3b3c2639..4d2dea575ad1 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go @@ -38,19 +38,34 @@ type DeploymentInformer interface { } type deploymentInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewDeploymentInformer constructs a new informer for Deployment type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AppsV1beta1().Deployments(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AppsV1beta1().Deployments(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewDeploymentInformer(client kubernetes.Interface, namespace string, resync ) } -func defaultDeploymentInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewDeploymentInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *deploymentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&apps_v1beta1.Deployment{}, defaultDeploymentInformer) + return f.factory.InformerFor(&apps_v1beta1.Deployment{}, f.defaultInformer) } func (f *deploymentInformer) Lister() v1beta1.DeploymentLister { diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/apps/v1beta1/interface.go index 11f55faad9d0..3a51a1f5b48b 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/interface.go @@ -33,25 +33,27 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // ControllerRevisions returns a ControllerRevisionInformer. func (v *version) ControllerRevisions() ControllerRevisionInformer { - return &controllerRevisionInformer{factory: v.SharedInformerFactory} + return &controllerRevisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Deployments returns a DeploymentInformer. func (v *version) Deployments() DeploymentInformer { - return &deploymentInformer{factory: v.SharedInformerFactory} + return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // StatefulSets returns a StatefulSetInformer. func (v *version) StatefulSets() StatefulSetInformer { - return &statefulSetInformer{factory: v.SharedInformerFactory} + return &statefulSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go index 3bd8af7d733f..779ae2c60daf 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go @@ -38,19 +38,34 @@ type StatefulSetInformer interface { } type statefulSetInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewStatefulSetInformer constructs a new informer for StatefulSet type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredStatefulSetInformer constructs a new informer for StatefulSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AppsV1beta1().StatefulSets(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AppsV1beta1().StatefulSets(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyn ) } -func defaultStatefulSetInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewStatefulSetInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *statefulSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&apps_v1beta1.StatefulSet{}, defaultStatefulSetInformer) + return f.factory.InformerFor(&apps_v1beta1.StatefulSet{}, f.defaultInformer) } func (f *statefulSetInformer) Lister() v1beta1.StatefulSetLister { diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/BUILD b/vendor/k8s.io/client-go/informers/apps/v1beta2/BUILD index 1ad6b8f2ae33..5b0137bb26f2 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta2/BUILD +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/BUILD @@ -15,6 +15,7 @@ go_library( "replicaset.go", "statefulset.go", ], + importpath = "k8s.io/client-go/informers/apps/v1beta2", deps = [ "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go index ba1be506be2d..a7d55ab4c6d0 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go @@ -38,19 +38,34 @@ type ControllerRevisionInformer interface { } type controllerRevisionInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewControllerRevisionInformer constructs a new informer for ControllerRevision type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredControllerRevisionInformer constructs a new informer for ControllerRevision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AppsV1beta2().ControllerRevisions(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AppsV1beta2().ControllerRevisions(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewControllerRevisionInformer(client kubernetes.Interface, namespace string ) } -func defaultControllerRevisionInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewControllerRevisionInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&apps_v1beta2.ControllerRevision{}, defaultControllerRevisionInformer) + return f.factory.InformerFor(&apps_v1beta2.ControllerRevision{}, f.defaultInformer) } func (f *controllerRevisionInformer) Lister() v1beta2.ControllerRevisionLister { diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go index 5c440f239ae0..5d3288026e40 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go @@ -38,19 +38,34 @@ type DaemonSetInformer interface { } type daemonSetInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewDaemonSetInformer constructs a new informer for DaemonSet type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDaemonSetInformer constructs a new informer for DaemonSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AppsV1beta2().DaemonSets(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AppsV1beta2().DaemonSets(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewDaemonSetInformer(client kubernetes.Interface, namespace string, resyncP ) } -func defaultDaemonSetInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewDaemonSetInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *daemonSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&apps_v1beta2.DaemonSet{}, defaultDaemonSetInformer) + return f.factory.InformerFor(&apps_v1beta2.DaemonSet{}, f.defaultInformer) } func (f *daemonSetInformer) Lister() v1beta2.DaemonSetLister { diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go index 031bcddd238c..6b6cd60352dd 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go @@ -38,19 +38,34 @@ type DeploymentInformer interface { } type deploymentInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewDeploymentInformer constructs a new informer for Deployment type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AppsV1beta2().Deployments(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AppsV1beta2().Deployments(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewDeploymentInformer(client kubernetes.Interface, namespace string, resync ) } -func defaultDeploymentInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewDeploymentInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *deploymentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&apps_v1beta2.Deployment{}, defaultDeploymentInformer) + return f.factory.InformerFor(&apps_v1beta2.Deployment{}, f.defaultInformer) } func (f *deploymentInformer) Lister() v1beta2.DeploymentLister { diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/interface.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/interface.go index 3f12f50325b3..59a6e73d4ad8 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta2/interface.go +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/interface.go @@ -37,35 +37,37 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // ControllerRevisions returns a ControllerRevisionInformer. func (v *version) ControllerRevisions() ControllerRevisionInformer { - return &controllerRevisionInformer{factory: v.SharedInformerFactory} + return &controllerRevisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // DaemonSets returns a DaemonSetInformer. func (v *version) DaemonSets() DaemonSetInformer { - return &daemonSetInformer{factory: v.SharedInformerFactory} + return &daemonSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Deployments returns a DeploymentInformer. func (v *version) Deployments() DeploymentInformer { - return &deploymentInformer{factory: v.SharedInformerFactory} + return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // ReplicaSets returns a ReplicaSetInformer. func (v *version) ReplicaSets() ReplicaSetInformer { - return &replicaSetInformer{factory: v.SharedInformerFactory} + return &replicaSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // StatefulSets returns a StatefulSetInformer. func (v *version) StatefulSets() StatefulSetInformer { - return &statefulSetInformer{factory: v.SharedInformerFactory} + return &statefulSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go index 1eec8e8c9952..988a3e4fbb9b 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go @@ -38,19 +38,34 @@ type ReplicaSetInformer interface { } type replicaSetInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewReplicaSetInformer constructs a new informer for ReplicaSet type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredReplicaSetInformer constructs a new informer for ReplicaSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AppsV1beta2().ReplicaSets(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AppsV1beta2().ReplicaSets(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resync ) } -func defaultReplicaSetInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewReplicaSetInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *replicaSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&apps_v1beta2.ReplicaSet{}, defaultReplicaSetInformer) + return f.factory.InformerFor(&apps_v1beta2.ReplicaSet{}, f.defaultInformer) } func (f *replicaSetInformer) Lister() v1beta2.ReplicaSetLister { diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go index 004e6b8ecf3c..dff9c24083f9 100644 --- a/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go @@ -38,19 +38,34 @@ type StatefulSetInformer interface { } type statefulSetInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewStatefulSetInformer constructs a new informer for StatefulSet type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredStatefulSetInformer constructs a new informer for StatefulSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AppsV1beta2().StatefulSets(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AppsV1beta2().StatefulSets(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyn ) } -func defaultStatefulSetInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewStatefulSetInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *statefulSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&apps_v1beta2.StatefulSet{}, defaultStatefulSetInformer) + return f.factory.InformerFor(&apps_v1beta2.StatefulSet{}, f.defaultInformer) } func (f *statefulSetInformer) Lister() v1beta2.StatefulSetLister { diff --git a/vendor/k8s.io/client-go/informers/autoscaling/BUILD b/vendor/k8s.io/client-go/informers/autoscaling/BUILD index 81faa5955fe1..7ad9e975e8ab 100644 --- a/vendor/k8s.io/client-go/informers/autoscaling/BUILD +++ b/vendor/k8s.io/client-go/informers/autoscaling/BUILD @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/client-go/informers/autoscaling", visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/client-go/informers/autoscaling/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/autoscaling/interface.go b/vendor/k8s.io/client-go/informers/autoscaling/interface.go index 520c0be3cc68..63a5c0ccda2c 100644 --- a/vendor/k8s.io/client-go/informers/autoscaling/interface.go +++ b/vendor/k8s.io/client-go/informers/autoscaling/interface.go @@ -33,20 +33,22 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // V1 returns a new v1.Interface. func (g *group) V1() v1.Interface { - return v1.New(g.SharedInformerFactory) + return v1.New(g.factory, g.namespace, g.tweakListOptions) } // V2beta1 returns a new v2beta1.Interface. func (g *group) V2beta1() v2beta1.Interface { - return v2beta1.New(g.SharedInformerFactory) + return v2beta1.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v1/BUILD b/vendor/k8s.io/client-go/informers/autoscaling/v1/BUILD index 2a802abd05e1..5c3ba46bc94b 100644 --- a/vendor/k8s.io/client-go/informers/autoscaling/v1/BUILD +++ b/vendor/k8s.io/client-go/informers/autoscaling/v1/BUILD @@ -6,6 +6,7 @@ go_library( "horizontalpodautoscaler.go", "interface.go", ], + importpath = "k8s.io/client-go/informers/autoscaling/v1", visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/api/autoscaling/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go index d1f8cae617e8..7d875e73566d 100644 --- a/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go @@ -38,19 +38,34 @@ type HorizontalPodAutoscalerInformer interface { } type horizontalPodAutoscalerInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredHorizontalPodAutoscalerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace s ) } -func defaultHorizontalPodAutoscalerInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewHorizontalPodAutoscalerInformer(client, meta_v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredHorizontalPodAutoscalerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&autoscaling_v1.HorizontalPodAutoscaler{}, defaultHorizontalPodAutoscalerInformer) + return f.factory.InformerFor(&autoscaling_v1.HorizontalPodAutoscaler{}, f.defaultInformer) } func (f *horizontalPodAutoscalerInformer) Lister() v1.HorizontalPodAutoscalerLister { diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v1/interface.go b/vendor/k8s.io/client-go/informers/autoscaling/v1/interface.go index 45f4bb35a8c9..5ba907012076 100644 --- a/vendor/k8s.io/client-go/informers/autoscaling/v1/interface.go +++ b/vendor/k8s.io/client-go/informers/autoscaling/v1/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer. func (v *version) HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer { - return &horizontalPodAutoscalerInformer{factory: v.SharedInformerFactory} + return &horizontalPodAutoscalerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/BUILD b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/BUILD index 2e31e337bc4b..3439a4ed2b0f 100644 --- a/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/BUILD +++ b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/BUILD @@ -6,6 +6,7 @@ go_library( "horizontalpodautoscaler.go", "interface.go", ], + importpath = "k8s.io/client-go/informers/autoscaling/v2beta1", visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go index 9291b8050740..9865f8e13339 100644 --- a/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -38,19 +38,34 @@ type HorizontalPodAutoscalerInformer interface { } type horizontalPodAutoscalerInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredHorizontalPodAutoscalerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace s ) } -func defaultHorizontalPodAutoscalerInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewHorizontalPodAutoscalerInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredHorizontalPodAutoscalerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&autoscaling_v2beta1.HorizontalPodAutoscaler{}, defaultHorizontalPodAutoscalerInformer) + return f.factory.InformerFor(&autoscaling_v2beta1.HorizontalPodAutoscaler{}, f.defaultInformer) } func (f *horizontalPodAutoscalerInformer) Lister() v2beta1.HorizontalPodAutoscalerLister { diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go index 4733291b6aa8..4c9ea84999fc 100644 --- a/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer. func (v *version) HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer { - return &horizontalPodAutoscalerInformer{factory: v.SharedInformerFactory} + return &horizontalPodAutoscalerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/batch/BUILD b/vendor/k8s.io/client-go/informers/batch/BUILD index 58a0ff24383b..9199107c7ebe 100644 --- a/vendor/k8s.io/client-go/informers/batch/BUILD +++ b/vendor/k8s.io/client-go/informers/batch/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/client-go/informers/batch", deps = [ "//vendor/k8s.io/client-go/informers/batch/v1:go_default_library", "//vendor/k8s.io/client-go/informers/batch/v1beta1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/batch/interface.go b/vendor/k8s.io/client-go/informers/batch/interface.go index 159e75665ced..bbaec7964833 100644 --- a/vendor/k8s.io/client-go/informers/batch/interface.go +++ b/vendor/k8s.io/client-go/informers/batch/interface.go @@ -36,25 +36,27 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // V1 returns a new v1.Interface. func (g *group) V1() v1.Interface { - return v1.New(g.SharedInformerFactory) + return v1.New(g.factory, g.namespace, g.tweakListOptions) } // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { - return v1beta1.New(g.SharedInformerFactory) + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) } // V2alpha1 returns a new v2alpha1.Interface. func (g *group) V2alpha1() v2alpha1.Interface { - return v2alpha1.New(g.SharedInformerFactory) + return v2alpha1.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/batch/v1/BUILD b/vendor/k8s.io/client-go/informers/batch/v1/BUILD index b5c3bcd56c24..ed4825488419 100644 --- a/vendor/k8s.io/client-go/informers/batch/v1/BUILD +++ b/vendor/k8s.io/client-go/informers/batch/v1/BUILD @@ -11,6 +11,7 @@ go_library( "interface.go", "job.go", ], + importpath = "k8s.io/client-go/informers/batch/v1", deps = [ "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/batch/v1/interface.go b/vendor/k8s.io/client-go/informers/batch/v1/interface.go index d4d7d56f624d..41c08ea2d978 100644 --- a/vendor/k8s.io/client-go/informers/batch/v1/interface.go +++ b/vendor/k8s.io/client-go/informers/batch/v1/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // Jobs returns a JobInformer. func (v *version) Jobs() JobInformer { - return &jobInformer{factory: v.SharedInformerFactory} + return &jobInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/batch/v1/job.go b/vendor/k8s.io/client-go/informers/batch/v1/job.go index a5a35680aa91..8a2e5f0d8bfb 100644 --- a/vendor/k8s.io/client-go/informers/batch/v1/job.go +++ b/vendor/k8s.io/client-go/informers/batch/v1/job.go @@ -38,19 +38,34 @@ type JobInformer interface { } type jobInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewJobInformer constructs a new informer for Job type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredJobInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredJobInformer constructs a new informer for Job type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.BatchV1().Jobs(namespace).List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.BatchV1().Jobs(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewJobInformer(client kubernetes.Interface, namespace string, resyncPeriod ) } -func defaultJobInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewJobInformer(client, meta_v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *jobInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredJobInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *jobInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&batch_v1.Job{}, defaultJobInformer) + return f.factory.InformerFor(&batch_v1.Job{}, f.defaultInformer) } func (f *jobInformer) Lister() v1.JobLister { diff --git a/vendor/k8s.io/client-go/informers/batch/v1beta1/BUILD b/vendor/k8s.io/client-go/informers/batch/v1beta1/BUILD index e917e1d4ee7a..20f4ac3c4830 100644 --- a/vendor/k8s.io/client-go/informers/batch/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/informers/batch/v1beta1/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -13,7 +11,7 @@ go_library( "cronjob.go", "interface.go", ], - tags = ["automanaged"], + importpath = "k8s.io/client-go/informers/batch/v1beta1", deps = [ "//vendor/k8s.io/api/batch/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go index 11bf035d3479..4edfd4153d0b 100644 --- a/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go +++ b/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go @@ -38,19 +38,34 @@ type CronJobInformer interface { } type cronJobInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewCronJobInformer constructs a new informer for CronJob type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewCronJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCronJobInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCronJobInformer constructs a new informer for CronJob type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.BatchV1beta1().CronJobs(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.BatchV1beta1().CronJobs(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewCronJobInformer(client kubernetes.Interface, namespace string, resyncPer ) } -func defaultCronJobInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewCronJobInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *cronJobInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCronJobInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *cronJobInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&batch_v1beta1.CronJob{}, defaultCronJobInformer) + return f.factory.InformerFor(&batch_v1beta1.CronJob{}, f.defaultInformer) } func (f *cronJobInformer) Lister() v1beta1.CronJobLister { diff --git a/vendor/k8s.io/client-go/informers/batch/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/batch/v1beta1/interface.go index 10b2f9f659bb..0ba1935dc6d6 100644 --- a/vendor/k8s.io/client-go/informers/batch/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/batch/v1beta1/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // CronJobs returns a CronJobInformer. func (v *version) CronJobs() CronJobInformer { - return &cronJobInformer{factory: v.SharedInformerFactory} + return &cronJobInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/batch/v2alpha1/BUILD b/vendor/k8s.io/client-go/informers/batch/v2alpha1/BUILD index 74c94faea841..4684ca4b49d1 100644 --- a/vendor/k8s.io/client-go/informers/batch/v2alpha1/BUILD +++ b/vendor/k8s.io/client-go/informers/batch/v2alpha1/BUILD @@ -11,6 +11,7 @@ go_library( "cronjob.go", "interface.go", ], + importpath = "k8s.io/client-go/informers/batch/v2alpha1", deps = [ "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go b/vendor/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go index 26ce57e5b7ea..03a6e6f883e7 100644 --- a/vendor/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go +++ b/vendor/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go @@ -38,19 +38,34 @@ type CronJobInformer interface { } type cronJobInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewCronJobInformer constructs a new informer for CronJob type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewCronJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCronJobInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCronJobInformer constructs a new informer for CronJob type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.BatchV2alpha1().CronJobs(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.BatchV2alpha1().CronJobs(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewCronJobInformer(client kubernetes.Interface, namespace string, resyncPer ) } -func defaultCronJobInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewCronJobInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *cronJobInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCronJobInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *cronJobInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&batch_v2alpha1.CronJob{}, defaultCronJobInformer) + return f.factory.InformerFor(&batch_v2alpha1.CronJob{}, f.defaultInformer) } func (f *cronJobInformer) Lister() v2alpha1.CronJobLister { diff --git a/vendor/k8s.io/client-go/informers/batch/v2alpha1/interface.go b/vendor/k8s.io/client-go/informers/batch/v2alpha1/interface.go index 261330bf0897..39b6f33f05f7 100644 --- a/vendor/k8s.io/client-go/informers/batch/v2alpha1/interface.go +++ b/vendor/k8s.io/client-go/informers/batch/v2alpha1/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // CronJobs returns a CronJobInformer. func (v *version) CronJobs() CronJobInformer { - return &cronJobInformer{factory: v.SharedInformerFactory} + return &cronJobInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/certificates/BUILD b/vendor/k8s.io/client-go/informers/certificates/BUILD index d87e8883afd9..bbe39da8b794 100644 --- a/vendor/k8s.io/client-go/informers/certificates/BUILD +++ b/vendor/k8s.io/client-go/informers/certificates/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/client-go/informers/certificates", deps = [ "//vendor/k8s.io/client-go/informers/certificates/v1beta1:go_default_library", "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/certificates/interface.go b/vendor/k8s.io/client-go/informers/certificates/interface.go index 4d9f8ea50461..1eefe479737b 100644 --- a/vendor/k8s.io/client-go/informers/certificates/interface.go +++ b/vendor/k8s.io/client-go/informers/certificates/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { - return v1beta1.New(g.SharedInformerFactory) + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/certificates/v1beta1/BUILD b/vendor/k8s.io/client-go/informers/certificates/v1beta1/BUILD index eee6b25088d0..d8ada3891ee9 100644 --- a/vendor/k8s.io/client-go/informers/certificates/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/informers/certificates/v1beta1/BUILD @@ -11,6 +11,7 @@ go_library( "certificatesigningrequest.go", "interface.go", ], + importpath = "k8s.io/client-go/informers/certificates/v1beta1", deps = [ "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go index 3d949742d89d..44aac5c72453 100644 --- a/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go @@ -38,19 +38,33 @@ type CertificateSigningRequestInformer interface { } type certificateSigningRequestInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewCertificateSigningRequestInformer constructs a new informer for CertificateSigningRequest type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewCertificateSigningRequestInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCertificateSigningRequestInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCertificateSigningRequestInformer constructs a new informer for CertificateSigningRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCertificateSigningRequestInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CertificatesV1beta1().CertificateSigningRequests().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CertificatesV1beta1().CertificateSigningRequests().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewCertificateSigningRequestInformer(client kubernetes.Interface, resyncPer ) } -func defaultCertificateSigningRequestInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewCertificateSigningRequestInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *certificateSigningRequestInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCertificateSigningRequestInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *certificateSigningRequestInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&certificates_v1beta1.CertificateSigningRequest{}, defaultCertificateSigningRequestInformer) + return f.factory.InformerFor(&certificates_v1beta1.CertificateSigningRequest{}, f.defaultInformer) } func (f *certificateSigningRequestInformer) Lister() v1beta1.CertificateSigningRequestLister { diff --git a/vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go index 8facec1276e3..8578023c789f 100644 --- a/vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // CertificateSigningRequests returns a CertificateSigningRequestInformer. func (v *version) CertificateSigningRequests() CertificateSigningRequestInformer { - return &certificateSigningRequestInformer{factory: v.SharedInformerFactory} + return &certificateSigningRequestInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/core/BUILD b/vendor/k8s.io/client-go/informers/core/BUILD index 1b4345ea6318..6945899090fb 100644 --- a/vendor/k8s.io/client-go/informers/core/BUILD +++ b/vendor/k8s.io/client-go/informers/core/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/client-go/informers/core", deps = [ "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/core/interface.go b/vendor/k8s.io/client-go/informers/core/interface.go index f90c24851a2d..7fc2a5cd5fa5 100644 --- a/vendor/k8s.io/client-go/informers/core/interface.go +++ b/vendor/k8s.io/client-go/informers/core/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // V1 returns a new v1.Interface. func (g *group) V1() v1.Interface { - return v1.New(g.SharedInformerFactory) + return v1.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/core/v1/BUILD b/vendor/k8s.io/client-go/informers/core/v1/BUILD index 697caaf139ce..1d459c5db718 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/BUILD +++ b/vendor/k8s.io/client-go/informers/core/v1/BUILD @@ -26,6 +26,7 @@ go_library( "service.go", "serviceaccount.go", ], + importpath = "k8s.io/client-go/informers/core/v1", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go b/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go index f07f9120be52..77b17fd3ee8b 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go @@ -38,19 +38,33 @@ type ComponentStatusInformer interface { } type componentStatusInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewComponentStatusInformer constructs a new informer for ComponentStatus type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewComponentStatusInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredComponentStatusInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredComponentStatusInformer constructs a new informer for ComponentStatus type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredComponentStatusInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().ComponentStatuses().List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().ComponentStatuses().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewComponentStatusInformer(client kubernetes.Interface, resyncPeriod time.D ) } -func defaultComponentStatusInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewComponentStatusInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *componentStatusInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredComponentStatusInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *componentStatusInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&core_v1.ComponentStatus{}, defaultComponentStatusInformer) + return f.factory.InformerFor(&core_v1.ComponentStatus{}, f.defaultInformer) } func (f *componentStatusInformer) Lister() v1.ComponentStatusLister { diff --git a/vendor/k8s.io/client-go/informers/core/v1/configmap.go b/vendor/k8s.io/client-go/informers/core/v1/configmap.go index 7a14f1db4035..ed0f4c2d92c1 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/configmap.go +++ b/vendor/k8s.io/client-go/informers/core/v1/configmap.go @@ -38,19 +38,34 @@ type ConfigMapInformer interface { } type configMapInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewConfigMapInformer constructs a new informer for ConfigMap type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewConfigMapInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredConfigMapInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredConfigMapInformer constructs a new informer for ConfigMap type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredConfigMapInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().ConfigMaps(namespace).List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().ConfigMaps(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewConfigMapInformer(client kubernetes.Interface, namespace string, resyncP ) } -func defaultConfigMapInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewConfigMapInformer(client, meta_v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *configMapInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredConfigMapInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *configMapInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&core_v1.ConfigMap{}, defaultConfigMapInformer) + return f.factory.InformerFor(&core_v1.ConfigMap{}, f.defaultInformer) } func (f *configMapInformer) Lister() v1.ConfigMapLister { diff --git a/vendor/k8s.io/client-go/informers/core/v1/endpoints.go b/vendor/k8s.io/client-go/informers/core/v1/endpoints.go index 248aedfee59c..8a7228bafb40 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/endpoints.go +++ b/vendor/k8s.io/client-go/informers/core/v1/endpoints.go @@ -38,19 +38,34 @@ type EndpointsInformer interface { } type endpointsInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewEndpointsInformer constructs a new informer for Endpoints type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewEndpointsInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEndpointsInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEndpointsInformer constructs a new informer for Endpoints type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEndpointsInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().Endpoints(namespace).List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().Endpoints(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewEndpointsInformer(client kubernetes.Interface, namespace string, resyncP ) } -func defaultEndpointsInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewEndpointsInformer(client, meta_v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *endpointsInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEndpointsInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *endpointsInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&core_v1.Endpoints{}, defaultEndpointsInformer) + return f.factory.InformerFor(&core_v1.Endpoints{}, f.defaultInformer) } func (f *endpointsInformer) Lister() v1.EndpointsLister { diff --git a/vendor/k8s.io/client-go/informers/core/v1/event.go b/vendor/k8s.io/client-go/informers/core/v1/event.go index 0751775c3e74..23f5ead665d9 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/event.go +++ b/vendor/k8s.io/client-go/informers/core/v1/event.go @@ -38,19 +38,34 @@ type EventInformer interface { } type eventInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewEventInformer constructs a new informer for Event type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEventInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEventInformer constructs a new informer for Event type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().Events(namespace).List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().Events(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewEventInformer(client kubernetes.Interface, namespace string, resyncPerio ) } -func defaultEventInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewEventInformer(client, meta_v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *eventInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEventInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *eventInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&core_v1.Event{}, defaultEventInformer) + return f.factory.InformerFor(&core_v1.Event{}, f.defaultInformer) } func (f *eventInformer) Lister() v1.EventLister { diff --git a/vendor/k8s.io/client-go/informers/core/v1/interface.go b/vendor/k8s.io/client-go/informers/core/v1/interface.go index 5f2ff96b2cff..e560b12f8098 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/interface.go +++ b/vendor/k8s.io/client-go/informers/core/v1/interface.go @@ -59,90 +59,92 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // ComponentStatuses returns a ComponentStatusInformer. func (v *version) ComponentStatuses() ComponentStatusInformer { - return &componentStatusInformer{factory: v.SharedInformerFactory} + return &componentStatusInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } // ConfigMaps returns a ConfigMapInformer. func (v *version) ConfigMaps() ConfigMapInformer { - return &configMapInformer{factory: v.SharedInformerFactory} + return &configMapInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Endpoints returns a EndpointsInformer. func (v *version) Endpoints() EndpointsInformer { - return &endpointsInformer{factory: v.SharedInformerFactory} + return &endpointsInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Events returns a EventInformer. func (v *version) Events() EventInformer { - return &eventInformer{factory: v.SharedInformerFactory} + return &eventInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // LimitRanges returns a LimitRangeInformer. func (v *version) LimitRanges() LimitRangeInformer { - return &limitRangeInformer{factory: v.SharedInformerFactory} + return &limitRangeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Namespaces returns a NamespaceInformer. func (v *version) Namespaces() NamespaceInformer { - return &namespaceInformer{factory: v.SharedInformerFactory} + return &namespaceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } // Nodes returns a NodeInformer. func (v *version) Nodes() NodeInformer { - return &nodeInformer{factory: v.SharedInformerFactory} + return &nodeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } // PersistentVolumes returns a PersistentVolumeInformer. func (v *version) PersistentVolumes() PersistentVolumeInformer { - return &persistentVolumeInformer{factory: v.SharedInformerFactory} + return &persistentVolumeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } // PersistentVolumeClaims returns a PersistentVolumeClaimInformer. func (v *version) PersistentVolumeClaims() PersistentVolumeClaimInformer { - return &persistentVolumeClaimInformer{factory: v.SharedInformerFactory} + return &persistentVolumeClaimInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Pods returns a PodInformer. func (v *version) Pods() PodInformer { - return &podInformer{factory: v.SharedInformerFactory} + return &podInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // PodTemplates returns a PodTemplateInformer. func (v *version) PodTemplates() PodTemplateInformer { - return &podTemplateInformer{factory: v.SharedInformerFactory} + return &podTemplateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // ReplicationControllers returns a ReplicationControllerInformer. func (v *version) ReplicationControllers() ReplicationControllerInformer { - return &replicationControllerInformer{factory: v.SharedInformerFactory} + return &replicationControllerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // ResourceQuotas returns a ResourceQuotaInformer. func (v *version) ResourceQuotas() ResourceQuotaInformer { - return &resourceQuotaInformer{factory: v.SharedInformerFactory} + return &resourceQuotaInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Secrets returns a SecretInformer. func (v *version) Secrets() SecretInformer { - return &secretInformer{factory: v.SharedInformerFactory} + return &secretInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Services returns a ServiceInformer. func (v *version) Services() ServiceInformer { - return &serviceInformer{factory: v.SharedInformerFactory} + return &serviceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // ServiceAccounts returns a ServiceAccountInformer. func (v *version) ServiceAccounts() ServiceAccountInformer { - return &serviceAccountInformer{factory: v.SharedInformerFactory} + return &serviceAccountInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/core/v1/limitrange.go b/vendor/k8s.io/client-go/informers/core/v1/limitrange.go index 77ffbb6dc1c3..9588b9402176 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/limitrange.go +++ b/vendor/k8s.io/client-go/informers/core/v1/limitrange.go @@ -38,19 +38,34 @@ type LimitRangeInformer interface { } type limitRangeInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewLimitRangeInformer constructs a new informer for LimitRange type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewLimitRangeInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredLimitRangeInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredLimitRangeInformer constructs a new informer for LimitRange type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredLimitRangeInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().LimitRanges(namespace).List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().LimitRanges(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewLimitRangeInformer(client kubernetes.Interface, namespace string, resync ) } -func defaultLimitRangeInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewLimitRangeInformer(client, meta_v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *limitRangeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredLimitRangeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *limitRangeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&core_v1.LimitRange{}, defaultLimitRangeInformer) + return f.factory.InformerFor(&core_v1.LimitRange{}, f.defaultInformer) } func (f *limitRangeInformer) Lister() v1.LimitRangeLister { diff --git a/vendor/k8s.io/client-go/informers/core/v1/namespace.go b/vendor/k8s.io/client-go/informers/core/v1/namespace.go index e37dccfda54b..eb841b157b48 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/informers/core/v1/namespace.go @@ -38,19 +38,33 @@ type NamespaceInformer interface { } type namespaceInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewNamespaceInformer constructs a new informer for Namespace type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewNamespaceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNamespaceInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredNamespaceInformer constructs a new informer for Namespace type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNamespaceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().Namespaces().List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().Namespaces().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewNamespaceInformer(client kubernetes.Interface, resyncPeriod time.Duratio ) } -func defaultNamespaceInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewNamespaceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *namespaceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNamespaceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *namespaceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&core_v1.Namespace{}, defaultNamespaceInformer) + return f.factory.InformerFor(&core_v1.Namespace{}, f.defaultInformer) } func (f *namespaceInformer) Lister() v1.NamespaceLister { diff --git a/vendor/k8s.io/client-go/informers/core/v1/node.go b/vendor/k8s.io/client-go/informers/core/v1/node.go index 9cd065b22c1a..3c70e52b039e 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/node.go +++ b/vendor/k8s.io/client-go/informers/core/v1/node.go @@ -38,19 +38,33 @@ type NodeInformer interface { } type nodeInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewNodeInformer constructs a new informer for Node type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewNodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNodeInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredNodeInformer constructs a new informer for Node type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().Nodes().List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().Nodes().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewNodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, in ) } -func defaultNodeInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewNodeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *nodeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNodeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *nodeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&core_v1.Node{}, defaultNodeInformer) + return f.factory.InformerFor(&core_v1.Node{}, f.defaultInformer) } func (f *nodeInformer) Lister() v1.NodeLister { diff --git a/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go index c41e22f37407..e944560f7939 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go @@ -38,19 +38,33 @@ type PersistentVolumeInformer interface { } type persistentVolumeInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewPersistentVolumeInformer constructs a new informer for PersistentVolume type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewPersistentVolumeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPersistentVolumeInformer constructs a new informer for PersistentVolume type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPersistentVolumeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().PersistentVolumes().List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().PersistentVolumes().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewPersistentVolumeInformer(client kubernetes.Interface, resyncPeriod time. ) } -func defaultPersistentVolumeInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewPersistentVolumeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *persistentVolumeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *persistentVolumeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&core_v1.PersistentVolume{}, defaultPersistentVolumeInformer) + return f.factory.InformerFor(&core_v1.PersistentVolume{}, f.defaultInformer) } func (f *persistentVolumeInformer) Lister() v1.PersistentVolumeLister { diff --git a/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go index 2d0364caa89a..136884d4c999 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go @@ -38,19 +38,34 @@ type PersistentVolumeClaimInformer interface { } type persistentVolumeClaimInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewPersistentVolumeClaimInformer constructs a new informer for PersistentVolumeClaim type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewPersistentVolumeClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeClaimInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPersistentVolumeClaimInformer constructs a new informer for PersistentVolumeClaim type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPersistentVolumeClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().PersistentVolumeClaims(namespace).List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().PersistentVolumeClaims(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewPersistentVolumeClaimInformer(client kubernetes.Interface, namespace str ) } -func defaultPersistentVolumeClaimInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewPersistentVolumeClaimInformer(client, meta_v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *persistentVolumeClaimInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeClaimInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *persistentVolumeClaimInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&core_v1.PersistentVolumeClaim{}, defaultPersistentVolumeClaimInformer) + return f.factory.InformerFor(&core_v1.PersistentVolumeClaim{}, f.defaultInformer) } func (f *persistentVolumeClaimInformer) Lister() v1.PersistentVolumeClaimLister { diff --git a/vendor/k8s.io/client-go/informers/core/v1/pod.go b/vendor/k8s.io/client-go/informers/core/v1/pod.go index d926fbd0f89d..b9720829024c 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/pod.go +++ b/vendor/k8s.io/client-go/informers/core/v1/pod.go @@ -38,19 +38,34 @@ type PodInformer interface { } type podInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewPodInformer constructs a new informer for Pod type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewPodInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodInformer constructs a new informer for Pod type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().Pods(namespace).List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().Pods(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewPodInformer(client kubernetes.Interface, namespace string, resyncPeriod ) } -func defaultPodInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewPodInformer(client, meta_v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *podInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *podInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&core_v1.Pod{}, defaultPodInformer) + return f.factory.InformerFor(&core_v1.Pod{}, f.defaultInformer) } func (f *podInformer) Lister() v1.PodLister { diff --git a/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go b/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go index b61f8667bc90..c05753850c72 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go +++ b/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go @@ -38,19 +38,34 @@ type PodTemplateInformer interface { } type podTemplateInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewPodTemplateInformer constructs a new informer for PodTemplate type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewPodTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodTemplateInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodTemplateInformer constructs a new informer for PodTemplate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().PodTemplates(namespace).List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().PodTemplates(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewPodTemplateInformer(client kubernetes.Interface, namespace string, resyn ) } -func defaultPodTemplateInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewPodTemplateInformer(client, meta_v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *podTemplateInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodTemplateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *podTemplateInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&core_v1.PodTemplate{}, defaultPodTemplateInformer) + return f.factory.InformerFor(&core_v1.PodTemplate{}, f.defaultInformer) } func (f *podTemplateInformer) Lister() v1.PodTemplateLister { diff --git a/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go index f305994cc52a..e04cd146992d 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go +++ b/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go @@ -38,19 +38,34 @@ type ReplicationControllerInformer interface { } type replicationControllerInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewReplicationControllerInformer constructs a new informer for ReplicationController type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewReplicationControllerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredReplicationControllerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredReplicationControllerInformer constructs a new informer for ReplicationController type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredReplicationControllerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().ReplicationControllers(namespace).List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().ReplicationControllers(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewReplicationControllerInformer(client kubernetes.Interface, namespace str ) } -func defaultReplicationControllerInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewReplicationControllerInformer(client, meta_v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *replicationControllerInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredReplicationControllerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *replicationControllerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&core_v1.ReplicationController{}, defaultReplicationControllerInformer) + return f.factory.InformerFor(&core_v1.ReplicationController{}, f.defaultInformer) } func (f *replicationControllerInformer) Lister() v1.ReplicationControllerLister { diff --git a/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go b/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go index ac69106c8e9e..3ef4f4c12ccf 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go +++ b/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go @@ -38,19 +38,34 @@ type ResourceQuotaInformer interface { } type resourceQuotaInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewResourceQuotaInformer constructs a new informer for ResourceQuota type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewResourceQuotaInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceQuotaInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredResourceQuotaInformer constructs a new informer for ResourceQuota type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResourceQuotaInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().ResourceQuotas(namespace).List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().ResourceQuotas(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewResourceQuotaInformer(client kubernetes.Interface, namespace string, res ) } -func defaultResourceQuotaInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewResourceQuotaInformer(client, meta_v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *resourceQuotaInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceQuotaInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *resourceQuotaInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&core_v1.ResourceQuota{}, defaultResourceQuotaInformer) + return f.factory.InformerFor(&core_v1.ResourceQuota{}, f.defaultInformer) } func (f *resourceQuotaInformer) Lister() v1.ResourceQuotaLister { diff --git a/vendor/k8s.io/client-go/informers/core/v1/secret.go b/vendor/k8s.io/client-go/informers/core/v1/secret.go index 0c7464bc88d9..7bc6395a4483 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/secret.go +++ b/vendor/k8s.io/client-go/informers/core/v1/secret.go @@ -38,19 +38,34 @@ type SecretInformer interface { } type secretInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewSecretInformer constructs a new informer for Secret type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewSecretInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredSecretInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredSecretInformer constructs a new informer for Secret type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredSecretInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().Secrets(namespace).List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().Secrets(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewSecretInformer(client kubernetes.Interface, namespace string, resyncPeri ) } -func defaultSecretInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewSecretInformer(client, meta_v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *secretInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredSecretInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *secretInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&core_v1.Secret{}, defaultSecretInformer) + return f.factory.InformerFor(&core_v1.Secret{}, f.defaultInformer) } func (f *secretInformer) Lister() v1.SecretLister { diff --git a/vendor/k8s.io/client-go/informers/core/v1/service.go b/vendor/k8s.io/client-go/informers/core/v1/service.go index bba8ab9dc432..d1b5ed02f982 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/service.go +++ b/vendor/k8s.io/client-go/informers/core/v1/service.go @@ -38,19 +38,34 @@ type ServiceInformer interface { } type serviceInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewServiceInformer constructs a new informer for Service type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewServiceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServiceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredServiceInformer constructs a new informer for Service type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServiceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().Services(namespace).List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().Services(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewServiceInformer(client kubernetes.Interface, namespace string, resyncPer ) } -func defaultServiceInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewServiceInformer(client, meta_v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *serviceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServiceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *serviceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&core_v1.Service{}, defaultServiceInformer) + return f.factory.InformerFor(&core_v1.Service{}, f.defaultInformer) } func (f *serviceInformer) Lister() v1.ServiceLister { diff --git a/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go index d41667bb2981..fb9c50aa3550 100644 --- a/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go +++ b/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go @@ -38,19 +38,34 @@ type ServiceAccountInformer interface { } type serviceAccountInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewServiceAccountInformer constructs a new informer for ServiceAccount type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewServiceAccountInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServiceAccountInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredServiceAccountInformer constructs a new informer for ServiceAccount type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServiceAccountInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().ServiceAccounts(namespace).List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.CoreV1().ServiceAccounts(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewServiceAccountInformer(client kubernetes.Interface, namespace string, re ) } -func defaultServiceAccountInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewServiceAccountInformer(client, meta_v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *serviceAccountInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServiceAccountInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *serviceAccountInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&core_v1.ServiceAccount{}, defaultServiceAccountInformer) + return f.factory.InformerFor(&core_v1.ServiceAccount{}, f.defaultInformer) } func (f *serviceAccountInformer) Lister() v1.ServiceAccountLister { diff --git a/vendor/k8s.io/client-go/informers/events/BUILD b/vendor/k8s.io/client-go/informers/events/BUILD new file mode 100644 index 000000000000..cf87d7c56da1 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/events/BUILD @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["interface.go"], + importpath = "k8s.io/client-go/informers/events", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/client-go/informers/events/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/client-go/informers/events/v1beta1:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/informers/events/interface.go b/vendor/k8s.io/client-go/informers/events/interface.go new file mode 100644 index 000000000000..81f6646f7be9 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/events/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package events + +import ( + v1beta1 "k8s.io/client-go/informers/events/v1beta1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/events/v1beta1/BUILD b/vendor/k8s.io/client-go/informers/events/v1beta1/BUILD new file mode 100644 index 000000000000..7f398d3ef3a5 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/events/v1beta1/BUILD @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "event.go", + "interface.go", + ], + importpath = "k8s.io/client-go/informers/events/v1beta1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/events/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/listers/events/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/informers/events/v1beta1/event.go b/vendor/k8s.io/client-go/informers/events/v1beta1/event.go new file mode 100644 index 000000000000..d604b4cf0da2 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/events/v1beta1/event.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + events_v1beta1 "k8s.io/api/events/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/events/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// EventInformer provides access to a shared informer and lister for +// Events. +type EventInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.EventLister +} + +type eventInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewEventInformer constructs a new informer for Event type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEventInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEventInformer constructs a new informer for Event type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.EventsV1beta1().Events(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.EventsV1beta1().Events(namespace).Watch(options) + }, + }, + &events_v1beta1.Event{}, + resyncPeriod, + indexers, + ) +} + +func (f *eventInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEventInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *eventInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&events_v1beta1.Event{}, f.defaultInformer) +} + +func (f *eventInformer) Lister() v1beta1.EventLister { + return v1beta1.NewEventLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/events/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/events/v1beta1/interface.go new file mode 100644 index 000000000000..d079afed593e --- /dev/null +++ b/vendor/k8s.io/client-go/informers/events/v1beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Events returns a EventInformer. + Events() EventInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Events returns a EventInformer. +func (v *version) Events() EventInformer { + return &eventInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/extensions/BUILD b/vendor/k8s.io/client-go/informers/extensions/BUILD index 214883a11bf1..070358e205dd 100644 --- a/vendor/k8s.io/client-go/informers/extensions/BUILD +++ b/vendor/k8s.io/client-go/informers/extensions/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/client-go/informers/extensions", deps = [ "//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/extensions/interface.go b/vendor/k8s.io/client-go/informers/extensions/interface.go index 009a89b945af..a6bfc3b44d36 100644 --- a/vendor/k8s.io/client-go/informers/extensions/interface.go +++ b/vendor/k8s.io/client-go/informers/extensions/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { - return v1beta1.New(g.SharedInformerFactory) + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/BUILD b/vendor/k8s.io/client-go/informers/extensions/v1beta1/BUILD index 195f023eadf3..7f11d7bdf799 100644 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/BUILD @@ -14,8 +14,8 @@ go_library( "interface.go", "podsecuritypolicy.go", "replicaset.go", - "thirdpartyresource.go", ], + importpath = "k8s.io/client-go/informers/extensions/v1beta1", deps = [ "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go index e203ea6f20cc..c64b14c3da74 100644 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go @@ -38,19 +38,34 @@ type DaemonSetInformer interface { } type daemonSetInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewDaemonSetInformer constructs a new informer for DaemonSet type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDaemonSetInformer constructs a new informer for DaemonSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.ExtensionsV1beta1().DaemonSets(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.ExtensionsV1beta1().DaemonSets(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewDaemonSetInformer(client kubernetes.Interface, namespace string, resyncP ) } -func defaultDaemonSetInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewDaemonSetInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *daemonSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensions_v1beta1.DaemonSet{}, defaultDaemonSetInformer) + return f.factory.InformerFor(&extensions_v1beta1.DaemonSet{}, f.defaultInformer) } func (f *daemonSetInformer) Lister() v1beta1.DaemonSetLister { diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go index 4d20f8abe2d3..4bcfc5c252be 100644 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go @@ -38,19 +38,34 @@ type DeploymentInformer interface { } type deploymentInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewDeploymentInformer constructs a new informer for Deployment type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.ExtensionsV1beta1().Deployments(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.ExtensionsV1beta1().Deployments(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewDeploymentInformer(client kubernetes.Interface, namespace string, resync ) } -func defaultDeploymentInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewDeploymentInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *deploymentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensions_v1beta1.Deployment{}, defaultDeploymentInformer) + return f.factory.InformerFor(&extensions_v1beta1.Deployment{}, f.defaultInformer) } func (f *deploymentInformer) Lister() v1beta1.DeploymentLister { diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go index 9280ffe37993..22dac92b9cef 100644 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go @@ -38,19 +38,34 @@ type IngressInformer interface { } type ingressInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewIngressInformer constructs a new informer for Ingress type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewIngressInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredIngressInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredIngressInformer constructs a new informer for Ingress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.ExtensionsV1beta1().Ingresses(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.ExtensionsV1beta1().Ingresses(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewIngressInformer(client kubernetes.Interface, namespace string, resyncPer ) } -func defaultIngressInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewIngressInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *ingressInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredIngressInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *ingressInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensions_v1beta1.Ingress{}, defaultIngressInformer) + return f.factory.InformerFor(&extensions_v1beta1.Ingress{}, f.defaultInformer) } func (f *ingressInformer) Lister() v1beta1.IngressLister { diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go index ef226bae04f2..ce060e0d90fb 100644 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go @@ -34,45 +34,40 @@ type Interface interface { PodSecurityPolicies() PodSecurityPolicyInformer // ReplicaSets returns a ReplicaSetInformer. ReplicaSets() ReplicaSetInformer - // ThirdPartyResources returns a ThirdPartyResourceInformer. - ThirdPartyResources() ThirdPartyResourceInformer } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // DaemonSets returns a DaemonSetInformer. func (v *version) DaemonSets() DaemonSetInformer { - return &daemonSetInformer{factory: v.SharedInformerFactory} + return &daemonSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Deployments returns a DeploymentInformer. func (v *version) Deployments() DeploymentInformer { - return &deploymentInformer{factory: v.SharedInformerFactory} + return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Ingresses returns a IngressInformer. func (v *version) Ingresses() IngressInformer { - return &ingressInformer{factory: v.SharedInformerFactory} + return &ingressInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // PodSecurityPolicies returns a PodSecurityPolicyInformer. func (v *version) PodSecurityPolicies() PodSecurityPolicyInformer { - return &podSecurityPolicyInformer{factory: v.SharedInformerFactory} + return &podSecurityPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } // ReplicaSets returns a ReplicaSetInformer. func (v *version) ReplicaSets() ReplicaSetInformer { - return &replicaSetInformer{factory: v.SharedInformerFactory} -} - -// ThirdPartyResources returns a ThirdPartyResourceInformer. -func (v *version) ThirdPartyResources() ThirdPartyResourceInformer { - return &thirdPartyResourceInformer{factory: v.SharedInformerFactory} + return &replicaSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go index 59336b0e46a5..18ef2735b576 100644 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go @@ -38,19 +38,33 @@ type PodSecurityPolicyInformer interface { } type podSecurityPolicyInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewPodSecurityPolicyInformer constructs a new informer for PodSecurityPolicy type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewPodSecurityPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodSecurityPolicyInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPodSecurityPolicyInformer constructs a new informer for PodSecurityPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodSecurityPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.ExtensionsV1beta1().PodSecurityPolicies().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.ExtensionsV1beta1().PodSecurityPolicies().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewPodSecurityPolicyInformer(client kubernetes.Interface, resyncPeriod time ) } -func defaultPodSecurityPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewPodSecurityPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *podSecurityPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodSecurityPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *podSecurityPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensions_v1beta1.PodSecurityPolicy{}, defaultPodSecurityPolicyInformer) + return f.factory.InformerFor(&extensions_v1beta1.PodSecurityPolicy{}, f.defaultInformer) } func (f *podSecurityPolicyInformer) Lister() v1beta1.PodSecurityPolicyLister { diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go index 8d1af811990d..856cb30bab54 100644 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go @@ -38,19 +38,34 @@ type ReplicaSetInformer interface { } type replicaSetInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewReplicaSetInformer constructs a new informer for ReplicaSet type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredReplicaSetInformer constructs a new informer for ReplicaSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.ExtensionsV1beta1().ReplicaSets(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.ExtensionsV1beta1().ReplicaSets(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resync ) } -func defaultReplicaSetInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewReplicaSetInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *replicaSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensions_v1beta1.ReplicaSet{}, defaultReplicaSetInformer) + return f.factory.InformerFor(&extensions_v1beta1.ReplicaSet{}, f.defaultInformer) } func (f *replicaSetInformer) Lister() v1beta1.ReplicaSetLister { diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/thirdpartyresource.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/thirdpartyresource.go deleted file mode 100644 index 62d02a548a65..000000000000 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/thirdpartyresource.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was automatically generated by informer-gen - -package v1beta1 - -import ( - extensions_v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" - cache "k8s.io/client-go/tools/cache" - time "time" -) - -// ThirdPartyResourceInformer provides access to a shared informer and lister for -// ThirdPartyResources. -type ThirdPartyResourceInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1beta1.ThirdPartyResourceLister -} - -type thirdPartyResourceInformer struct { - factory internalinterfaces.SharedInformerFactory -} - -// NewThirdPartyResourceInformer constructs a new informer for ThirdPartyResource type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewThirdPartyResourceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return client.ExtensionsV1beta1().ThirdPartyResources().List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return client.ExtensionsV1beta1().ThirdPartyResources().Watch(options) - }, - }, - &extensions_v1beta1.ThirdPartyResource{}, - resyncPeriod, - indexers, - ) -} - -func defaultThirdPartyResourceInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewThirdPartyResourceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) -} - -func (f *thirdPartyResourceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensions_v1beta1.ThirdPartyResource{}, defaultThirdPartyResourceInformer) -} - -func (f *thirdPartyResourceInformer) Lister() v1beta1.ThirdPartyResourceLister { - return v1beta1.NewThirdPartyResourceLister(f.Informer().GetIndexer()) -} diff --git a/vendor/k8s.io/client-go/informers/factory.go b/vendor/k8s.io/client-go/informers/factory.go index c28c7bc41b3d..e922c1276cb8 100644 --- a/vendor/k8s.io/client-go/informers/factory.go +++ b/vendor/k8s.io/client-go/informers/factory.go @@ -19,6 +19,7 @@ limitations under the License. package informers import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" admissionregistration "k8s.io/client-go/informers/admissionregistration" @@ -27,6 +28,7 @@ import ( batch "k8s.io/client-go/informers/batch" certificates "k8s.io/client-go/informers/certificates" core "k8s.io/client-go/informers/core" + events "k8s.io/client-go/informers/events" extensions "k8s.io/client-go/informers/extensions" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" networking "k8s.io/client-go/informers/networking" @@ -43,9 +45,11 @@ import ( ) type sharedInformerFactory struct { - client kubernetes.Interface - lock sync.Mutex - defaultResync time.Duration + client kubernetes.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. @@ -55,8 +59,17 @@ type sharedInformerFactory struct { // NewSharedInformerFactory constructs a new instance of sharedInformerFactory func NewSharedInformerFactory(client kubernetes.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewFilteredSharedInformerFactory(client, defaultResync, v1.NamespaceAll, nil) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +func NewFilteredSharedInformerFactory(client kubernetes.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { return &sharedInformerFactory{ client: client, + namespace: namespace, + tweakListOptions: tweakListOptions, defaultResync: defaultResync, informers: make(map[reflect.Type]cache.SharedIndexInformer), startedInformers: make(map[reflect.Type]bool), @@ -128,6 +141,7 @@ type SharedInformerFactory interface { Batch() batch.Interface Certificates() certificates.Interface Core() core.Interface + Events() events.Interface Extensions() extensions.Interface Networking() networking.Interface Policy() policy.Interface @@ -138,53 +152,57 @@ type SharedInformerFactory interface { } func (f *sharedInformerFactory) Admissionregistration() admissionregistration.Interface { - return admissionregistration.New(f) + return admissionregistration.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Apps() apps.Interface { - return apps.New(f) + return apps.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Autoscaling() autoscaling.Interface { - return autoscaling.New(f) + return autoscaling.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Batch() batch.Interface { - return batch.New(f) + return batch.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Certificates() certificates.Interface { - return certificates.New(f) + return certificates.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Core() core.Interface { - return core.New(f) + return core.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Events() events.Interface { + return events.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Extensions() extensions.Interface { - return extensions.New(f) + return extensions.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Networking() networking.Interface { - return networking.New(f) + return networking.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Policy() policy.Interface { - return policy.New(f) + return policy.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Rbac() rbac.Interface { - return rbac.New(f) + return rbac.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Scheduling() scheduling.Interface { - return scheduling.New(f) + return scheduling.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Settings() settings.Interface { - return settings.New(f) + return settings.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Storage() storage.Interface { - return storage.New(f) + return storage.New(f, f.namespace, f.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go index 23c4440c33ae..70ed43317d7a 100644 --- a/vendor/k8s.io/client-go/informers/generic.go +++ b/vendor/k8s.io/client-go/informers/generic.go @@ -21,15 +21,18 @@ package informers import ( "fmt" v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - v1beta1 "k8s.io/api/apps/v1beta1" + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/api/apps/v1" + apps_v1beta1 "k8s.io/api/apps/v1beta1" v1beta2 "k8s.io/api/apps/v1beta2" - v1 "k8s.io/api/autoscaling/v1" + autoscaling_v1 "k8s.io/api/autoscaling/v1" v2beta1 "k8s.io/api/autoscaling/v2beta1" batch_v1 "k8s.io/api/batch/v1" batch_v1beta1 "k8s.io/api/batch/v1beta1" v2alpha1 "k8s.io/api/batch/v2alpha1" certificates_v1beta1 "k8s.io/api/certificates/v1beta1" core_v1 "k8s.io/api/core/v1" + events_v1beta1 "k8s.io/api/events/v1beta1" extensions_v1beta1 "k8s.io/api/extensions/v1beta1" networking_v1 "k8s.io/api/networking/v1" policy_v1beta1 "k8s.io/api/policy/v1beta1" @@ -39,6 +42,7 @@ import ( scheduling_v1alpha1 "k8s.io/api/scheduling/v1alpha1" settings_v1alpha1 "k8s.io/api/settings/v1alpha1" storage_v1 "k8s.io/api/storage/v1" + storage_v1alpha1 "k8s.io/api/storage/v1alpha1" storage_v1beta1 "k8s.io/api/storage/v1beta1" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" @@ -70,21 +74,37 @@ func (f *genericInformer) Lister() cache.GenericLister { // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { - // Group=Admissionregistration, Version=V1alpha1 - case v1alpha1.SchemeGroupVersion.WithResource("externaladmissionhookconfigurations"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1alpha1().ExternalAdmissionHookConfigurations().Informer()}, nil + // Group=admissionregistration.k8s.io, Version=v1alpha1 case v1alpha1.SchemeGroupVersion.WithResource("initializerconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1alpha1().InitializerConfigurations().Informer()}, nil - // Group=Apps, Version=V1beta1 - case v1beta1.SchemeGroupVersion.WithResource("controllerrevisions"): + // Group=admissionregistration.k8s.io, Version=v1beta1 + case v1beta1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().MutatingWebhookConfigurations().Informer()}, nil + case v1beta1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().ValidatingWebhookConfigurations().Informer()}, nil + + // Group=apps, Version=v1 + case v1.SchemeGroupVersion.WithResource("controllerrevisions"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().ControllerRevisions().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("daemonsets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().DaemonSets().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("deployments"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().Deployments().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("replicasets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().ReplicaSets().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("statefulsets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().StatefulSets().Informer()}, nil + + // Group=apps, Version=v1beta1 + case apps_v1beta1.SchemeGroupVersion.WithResource("controllerrevisions"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta1().ControllerRevisions().Informer()}, nil - case v1beta1.SchemeGroupVersion.WithResource("deployments"): + case apps_v1beta1.SchemeGroupVersion.WithResource("deployments"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta1().Deployments().Informer()}, nil - case v1beta1.SchemeGroupVersion.WithResource("statefulsets"): + case apps_v1beta1.SchemeGroupVersion.WithResource("statefulsets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta1().StatefulSets().Informer()}, nil - // Group=Apps, Version=V1beta2 + // Group=apps, Version=v1beta2 case v1beta2.SchemeGroupVersion.WithResource("controllerrevisions"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().ControllerRevisions().Informer()}, nil case v1beta2.SchemeGroupVersion.WithResource("daemonsets"): @@ -96,31 +116,31 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1beta2.SchemeGroupVersion.WithResource("statefulsets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().StatefulSets().Informer()}, nil - // Group=Autoscaling, Version=V1 - case v1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"): + // Group=autoscaling, Version=v1 + case autoscaling_v1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V1().HorizontalPodAutoscalers().Informer()}, nil - // Group=Autoscaling, Version=V2beta1 + // Group=autoscaling, Version=v2beta1 case v2beta1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V2beta1().HorizontalPodAutoscalers().Informer()}, nil - // Group=Batch, Version=V1 + // Group=batch, Version=v1 case batch_v1.SchemeGroupVersion.WithResource("jobs"): return &genericInformer{resource: resource.GroupResource(), informer: f.Batch().V1().Jobs().Informer()}, nil - // Group=Batch, Version=V1beta1 + // Group=batch, Version=v1beta1 case batch_v1beta1.SchemeGroupVersion.WithResource("cronjobs"): return &genericInformer{resource: resource.GroupResource(), informer: f.Batch().V1beta1().CronJobs().Informer()}, nil - // Group=Batch, Version=V2alpha1 + // Group=batch, Version=v2alpha1 case v2alpha1.SchemeGroupVersion.WithResource("cronjobs"): return &genericInformer{resource: resource.GroupResource(), informer: f.Batch().V2alpha1().CronJobs().Informer()}, nil - // Group=Certificates, Version=V1beta1 + // Group=certificates.k8s.io, Version=v1beta1 case certificates_v1beta1.SchemeGroupVersion.WithResource("certificatesigningrequests"): return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1beta1().CertificateSigningRequests().Informer()}, nil - // Group=Core, Version=V1 + // Group=core, Version=v1 case core_v1.SchemeGroupVersion.WithResource("componentstatuses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ComponentStatuses().Informer()}, nil case core_v1.SchemeGroupVersion.WithResource("configmaps"): @@ -154,7 +174,11 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case core_v1.SchemeGroupVersion.WithResource("serviceaccounts"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ServiceAccounts().Informer()}, nil - // Group=Extensions, Version=V1beta1 + // Group=events.k8s.io, Version=v1beta1 + case events_v1beta1.SchemeGroupVersion.WithResource("events"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Events().V1beta1().Events().Informer()}, nil + + // Group=extensions, Version=v1beta1 case extensions_v1beta1.SchemeGroupVersion.WithResource("daemonsets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().DaemonSets().Informer()}, nil case extensions_v1beta1.SchemeGroupVersion.WithResource("deployments"): @@ -165,18 +189,16 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().PodSecurityPolicies().Informer()}, nil case extensions_v1beta1.SchemeGroupVersion.WithResource("replicasets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().ReplicaSets().Informer()}, nil - case extensions_v1beta1.SchemeGroupVersion.WithResource("thirdpartyresources"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().ThirdPartyResources().Informer()}, nil - // Group=Networking, Version=V1 + // Group=networking.k8s.io, Version=v1 case networking_v1.SchemeGroupVersion.WithResource("networkpolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().NetworkPolicies().Informer()}, nil - // Group=Policy, Version=V1beta1 + // Group=policy, Version=v1beta1 case policy_v1beta1.SchemeGroupVersion.WithResource("poddisruptionbudgets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().V1beta1().PodDisruptionBudgets().Informer()}, nil - // Group=Rbac, Version=V1 + // Group=rbac.authorization.k8s.io, Version=v1 case rbac_v1.SchemeGroupVersion.WithResource("clusterroles"): return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1().ClusterRoles().Informer()}, nil case rbac_v1.SchemeGroupVersion.WithResource("clusterrolebindings"): @@ -186,7 +208,7 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case rbac_v1.SchemeGroupVersion.WithResource("rolebindings"): return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1().RoleBindings().Informer()}, nil - // Group=Rbac, Version=V1alpha1 + // Group=rbac.authorization.k8s.io, Version=v1alpha1 case rbac_v1alpha1.SchemeGroupVersion.WithResource("clusterroles"): return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1alpha1().ClusterRoles().Informer()}, nil case rbac_v1alpha1.SchemeGroupVersion.WithResource("clusterrolebindings"): @@ -196,7 +218,7 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case rbac_v1alpha1.SchemeGroupVersion.WithResource("rolebindings"): return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1alpha1().RoleBindings().Informer()}, nil - // Group=Rbac, Version=V1beta1 + // Group=rbac.authorization.k8s.io, Version=v1beta1 case rbac_v1beta1.SchemeGroupVersion.WithResource("clusterroles"): return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().ClusterRoles().Informer()}, nil case rbac_v1beta1.SchemeGroupVersion.WithResource("clusterrolebindings"): @@ -206,19 +228,23 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case rbac_v1beta1.SchemeGroupVersion.WithResource("rolebindings"): return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().RoleBindings().Informer()}, nil - // Group=Scheduling, Version=V1alpha1 + // Group=scheduling.k8s.io, Version=v1alpha1 case scheduling_v1alpha1.SchemeGroupVersion.WithResource("priorityclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1alpha1().PriorityClasses().Informer()}, nil - // Group=Settings, Version=V1alpha1 + // Group=settings.k8s.io, Version=v1alpha1 case settings_v1alpha1.SchemeGroupVersion.WithResource("podpresets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Settings().V1alpha1().PodPresets().Informer()}, nil - // Group=Storage, Version=V1 + // Group=storage.k8s.io, Version=v1 case storage_v1.SchemeGroupVersion.WithResource("storageclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().StorageClasses().Informer()}, nil - // Group=Storage, Version=V1beta1 + // Group=storage.k8s.io, Version=v1alpha1 + case storage_v1alpha1.SchemeGroupVersion.WithResource("volumeattachments"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1alpha1().VolumeAttachments().Informer()}, nil + + // Group=storage.k8s.io, Version=v1beta1 case storage_v1beta1.SchemeGroupVersion.WithResource("storageclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().StorageClasses().Informer()}, nil diff --git a/vendor/k8s.io/client-go/informers/internalinterfaces/BUILD b/vendor/k8s.io/client-go/informers/internalinterfaces/BUILD index 1f3323abccec..5b20c78f76f5 100644 --- a/vendor/k8s.io/client-go/informers/internalinterfaces/BUILD +++ b/vendor/k8s.io/client-go/informers/internalinterfaces/BUILD @@ -8,7 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["factory_interfaces.go"], + importpath = "k8s.io/client-go/informers/internalinterfaces", deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go index bfe354a59319..61155f7404d0 100644 --- a/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go +++ b/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go @@ -19,6 +19,7 @@ limitations under the License. package internalinterfaces import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" kubernetes "k8s.io/client-go/kubernetes" cache "k8s.io/client-go/tools/cache" @@ -32,3 +33,5 @@ type SharedInformerFactory interface { Start(stopCh <-chan struct{}) InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer } + +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/client-go/informers/networking/BUILD b/vendor/k8s.io/client-go/informers/networking/BUILD index 89809fa6c34d..3219e7d02b5a 100644 --- a/vendor/k8s.io/client-go/informers/networking/BUILD +++ b/vendor/k8s.io/client-go/informers/networking/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/client-go/informers/networking", deps = [ "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", "//vendor/k8s.io/client-go/informers/networking/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/networking/interface.go b/vendor/k8s.io/client-go/informers/networking/interface.go index a5df58b0418f..79e0d0c15122 100644 --- a/vendor/k8s.io/client-go/informers/networking/interface.go +++ b/vendor/k8s.io/client-go/informers/networking/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // V1 returns a new v1.Interface. func (g *group) V1() v1.Interface { - return v1.New(g.SharedInformerFactory) + return v1.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/networking/v1/BUILD b/vendor/k8s.io/client-go/informers/networking/v1/BUILD index 43d2bbf83754..dc6f32696cb1 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1/BUILD +++ b/vendor/k8s.io/client-go/informers/networking/v1/BUILD @@ -11,6 +11,7 @@ go_library( "interface.go", "networkpolicy.go", ], + importpath = "k8s.io/client-go/informers/networking/v1", deps = [ "//vendor/k8s.io/api/networking/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/networking/v1/interface.go b/vendor/k8s.io/client-go/informers/networking/v1/interface.go index 2ae314a5a51a..980a7be9938b 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1/interface.go +++ b/vendor/k8s.io/client-go/informers/networking/v1/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // NetworkPolicies returns a NetworkPolicyInformer. func (v *version) NetworkPolicies() NetworkPolicyInformer { - return &networkPolicyInformer{factory: v.SharedInformerFactory} + return &networkPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go index d85384596ae8..b712ba0305b6 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go @@ -38,19 +38,34 @@ type NetworkPolicyInformer interface { } type networkPolicyInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewNetworkPolicyInformer constructs a new informer for NetworkPolicy type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNetworkPolicyInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredNetworkPolicyInformer constructs a new informer for NetworkPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.NetworkingV1().NetworkPolicies(namespace).List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.NetworkingV1().NetworkPolicies(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewNetworkPolicyInformer(client kubernetes.Interface, namespace string, res ) } -func defaultNetworkPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewNetworkPolicyInformer(client, meta_v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *networkPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNetworkPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *networkPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networking_v1.NetworkPolicy{}, defaultNetworkPolicyInformer) + return f.factory.InformerFor(&networking_v1.NetworkPolicy{}, f.defaultInformer) } func (f *networkPolicyInformer) Lister() v1.NetworkPolicyLister { diff --git a/vendor/k8s.io/client-go/informers/policy/BUILD b/vendor/k8s.io/client-go/informers/policy/BUILD index 66dc7d48b7b6..882d54d5ef5b 100644 --- a/vendor/k8s.io/client-go/informers/policy/BUILD +++ b/vendor/k8s.io/client-go/informers/policy/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/client-go/informers/policy", deps = [ "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", "//vendor/k8s.io/client-go/informers/policy/v1beta1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/policy/interface.go b/vendor/k8s.io/client-go/informers/policy/interface.go index c03b9bfd7bb0..f893c3d5b9f2 100644 --- a/vendor/k8s.io/client-go/informers/policy/interface.go +++ b/vendor/k8s.io/client-go/informers/policy/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { - return v1beta1.New(g.SharedInformerFactory) + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/policy/v1beta1/BUILD b/vendor/k8s.io/client-go/informers/policy/v1beta1/BUILD index a1d036432323..39a7a9d34d9b 100644 --- a/vendor/k8s.io/client-go/informers/policy/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/informers/policy/v1beta1/BUILD @@ -11,6 +11,7 @@ go_library( "interface.go", "poddisruptionbudget.go", ], + importpath = "k8s.io/client-go/informers/policy/v1beta1", deps = [ "//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go index 39b1abbeddfe..f235ee1d0c82 100644 --- a/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // PodDisruptionBudgets returns a PodDisruptionBudgetInformer. func (v *version) PodDisruptionBudgets() PodDisruptionBudgetInformer { - return &podDisruptionBudgetInformer{factory: v.SharedInformerFactory} + return &podDisruptionBudgetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go index b10ec42f340e..ba0da35b1eee 100644 --- a/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go @@ -38,19 +38,34 @@ type PodDisruptionBudgetInformer interface { } type podDisruptionBudgetInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewPodDisruptionBudgetInformer constructs a new informer for PodDisruptionBudget type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewPodDisruptionBudgetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodDisruptionBudgetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodDisruptionBudgetInformer constructs a new informer for PodDisruptionBudget type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodDisruptionBudgetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.PolicyV1beta1().PodDisruptionBudgets(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.PolicyV1beta1().PodDisruptionBudgets(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewPodDisruptionBudgetInformer(client kubernetes.Interface, namespace strin ) } -func defaultPodDisruptionBudgetInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewPodDisruptionBudgetInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *podDisruptionBudgetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodDisruptionBudgetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *podDisruptionBudgetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&policy_v1beta1.PodDisruptionBudget{}, defaultPodDisruptionBudgetInformer) + return f.factory.InformerFor(&policy_v1beta1.PodDisruptionBudget{}, f.defaultInformer) } func (f *podDisruptionBudgetInformer) Lister() v1beta1.PodDisruptionBudgetLister { diff --git a/vendor/k8s.io/client-go/informers/rbac/BUILD b/vendor/k8s.io/client-go/informers/rbac/BUILD index 107e22b2d02b..e2afef9346c1 100644 --- a/vendor/k8s.io/client-go/informers/rbac/BUILD +++ b/vendor/k8s.io/client-go/informers/rbac/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/client-go/informers/rbac", deps = [ "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", "//vendor/k8s.io/client-go/informers/rbac/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/rbac/interface.go b/vendor/k8s.io/client-go/informers/rbac/interface.go index dfa6fcb4c896..df7adfcd3cb4 100644 --- a/vendor/k8s.io/client-go/informers/rbac/interface.go +++ b/vendor/k8s.io/client-go/informers/rbac/interface.go @@ -36,25 +36,27 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // V1 returns a new v1.Interface. func (g *group) V1() v1.Interface { - return v1.New(g.SharedInformerFactory) + return v1.New(g.factory, g.namespace, g.tweakListOptions) } // V1alpha1 returns a new v1alpha1.Interface. func (g *group) V1alpha1() v1alpha1.Interface { - return v1alpha1.New(g.SharedInformerFactory) + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) } // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { - return v1beta1.New(g.SharedInformerFactory) + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/BUILD b/vendor/k8s.io/client-go/informers/rbac/v1/BUILD index 6a5ef536f0fb..4278890e876d 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1/BUILD +++ b/vendor/k8s.io/client-go/informers/rbac/v1/BUILD @@ -14,6 +14,7 @@ go_library( "role.go", "rolebinding.go", ], + importpath = "k8s.io/client-go/informers/rbac/v1", deps = [ "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go index 1d1f93404259..ac75abbc8ce6 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go @@ -38,19 +38,33 @@ type ClusterRoleInformer interface { } type clusterRoleInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewClusterRoleInformer constructs a new informer for ClusterRole type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleInformer constructs a new informer for ClusterRole type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1().ClusterRoles().List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1().ClusterRoles().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Durat ) } -func defaultClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewClusterRoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *clusterRoleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbac_v1.ClusterRole{}, defaultClusterRoleInformer) + return f.factory.InformerFor(&rbac_v1.ClusterRole{}, f.defaultInformer) } func (f *clusterRoleInformer) Lister() v1.ClusterRoleLister { diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go index 1c8cdda89656..a3c73e586c3f 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go @@ -38,19 +38,33 @@ type ClusterRoleBindingInformer interface { } type clusterRoleBindingInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1().ClusterRoleBindings().List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1().ClusterRoleBindings().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod tim ) } -func defaultClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewClusterRoleBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *clusterRoleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbac_v1.ClusterRoleBinding{}, defaultClusterRoleBindingInformer) + return f.factory.InformerFor(&rbac_v1.ClusterRoleBinding{}, f.defaultInformer) } func (f *clusterRoleBindingInformer) Lister() v1.ClusterRoleBindingLister { diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/interface.go b/vendor/k8s.io/client-go/informers/rbac/v1/interface.go index 38900ef1e2e3..1e46b039bdc5 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1/interface.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1/interface.go @@ -35,30 +35,32 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // ClusterRoles returns a ClusterRoleInformer. func (v *version) ClusterRoles() ClusterRoleInformer { - return &clusterRoleInformer{factory: v.SharedInformerFactory} + return &clusterRoleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } // ClusterRoleBindings returns a ClusterRoleBindingInformer. func (v *version) ClusterRoleBindings() ClusterRoleBindingInformer { - return &clusterRoleBindingInformer{factory: v.SharedInformerFactory} + return &clusterRoleBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } // Roles returns a RoleInformer. func (v *version) Roles() RoleInformer { - return &roleInformer{factory: v.SharedInformerFactory} + return &roleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // RoleBindings returns a RoleBindingInformer. func (v *version) RoleBindings() RoleBindingInformer { - return &roleBindingInformer{factory: v.SharedInformerFactory} + return &roleBindingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/role.go b/vendor/k8s.io/client-go/informers/rbac/v1/role.go index 79f33eabc3bf..fb1de46145a6 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1/role.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1/role.go @@ -38,19 +38,34 @@ type RoleInformer interface { } type roleInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewRoleInformer constructs a new informer for Role type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleInformer constructs a new informer for Role type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1().Roles(namespace).List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1().Roles(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod ) } -func defaultRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewRoleInformer(client, meta_v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *roleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *roleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbac_v1.Role{}, defaultRoleInformer) + return f.factory.InformerFor(&rbac_v1.Role{}, f.defaultInformer) } func (f *roleInformer) Lister() v1.RoleLister { diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go index 71548e1a3723..78c78fa1ac82 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go @@ -38,19 +38,34 @@ type RoleBindingInformer interface { } type roleBindingInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewRoleBindingInformer constructs a new informer for RoleBinding type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleBindingInformer constructs a new informer for RoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1().RoleBindings(namespace).List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1().RoleBindings(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewRoleBindingInformer(client kubernetes.Interface, namespace string, resyn ) } -func defaultRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewRoleBindingInformer(client, meta_v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *roleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *roleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbac_v1.RoleBinding{}, defaultRoleBindingInformer) + return f.factory.InformerFor(&rbac_v1.RoleBinding{}, f.defaultInformer) } func (f *roleBindingInformer) Lister() v1.RoleBindingLister { diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/BUILD b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/BUILD index 52d662231a3f..345e10c6fed5 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/BUILD +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/BUILD @@ -14,6 +14,7 @@ go_library( "role.go", "rolebinding.go", ], + importpath = "k8s.io/client-go/informers/rbac/v1alpha1", deps = [ "//vendor/k8s.io/api/rbac/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go index 435bec5a2ba7..ec257965b753 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go @@ -38,19 +38,33 @@ type ClusterRoleInformer interface { } type clusterRoleInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewClusterRoleInformer constructs a new informer for ClusterRole type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleInformer constructs a new informer for ClusterRole type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1alpha1().ClusterRoles().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1alpha1().ClusterRoles().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Durat ) } -func defaultClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewClusterRoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *clusterRoleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbac_v1alpha1.ClusterRole{}, defaultClusterRoleInformer) + return f.factory.InformerFor(&rbac_v1alpha1.ClusterRole{}, f.defaultInformer) } func (f *clusterRoleInformer) Lister() v1alpha1.ClusterRoleLister { diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go index 167a1887087e..a2d0c3960727 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go @@ -38,19 +38,33 @@ type ClusterRoleBindingInformer interface { } type clusterRoleBindingInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1alpha1().ClusterRoleBindings().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1alpha1().ClusterRoleBindings().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod tim ) } -func defaultClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewClusterRoleBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *clusterRoleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbac_v1alpha1.ClusterRoleBinding{}, defaultClusterRoleBindingInformer) + return f.factory.InformerFor(&rbac_v1alpha1.ClusterRoleBinding{}, f.defaultInformer) } func (f *clusterRoleBindingInformer) Lister() v1alpha1.ClusterRoleBindingLister { diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/interface.go index 4c84b9a54c87..586283d4a2e7 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/interface.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/interface.go @@ -35,30 +35,32 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // ClusterRoles returns a ClusterRoleInformer. func (v *version) ClusterRoles() ClusterRoleInformer { - return &clusterRoleInformer{factory: v.SharedInformerFactory} + return &clusterRoleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } // ClusterRoleBindings returns a ClusterRoleBindingInformer. func (v *version) ClusterRoleBindings() ClusterRoleBindingInformer { - return &clusterRoleBindingInformer{factory: v.SharedInformerFactory} + return &clusterRoleBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } // Roles returns a RoleInformer. func (v *version) Roles() RoleInformer { - return &roleInformer{factory: v.SharedInformerFactory} + return &roleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // RoleBindings returns a RoleBindingInformer. func (v *version) RoleBindings() RoleBindingInformer { - return &roleBindingInformer{factory: v.SharedInformerFactory} + return &roleBindingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go index 81ef493a2f39..4564b3361657 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go @@ -38,19 +38,34 @@ type RoleInformer interface { } type roleInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewRoleInformer constructs a new informer for Role type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleInformer constructs a new informer for Role type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1alpha1().Roles(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1alpha1().Roles(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod ) } -func defaultRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewRoleInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *roleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *roleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbac_v1alpha1.Role{}, defaultRoleInformer) + return f.factory.InformerFor(&rbac_v1alpha1.Role{}, f.defaultInformer) } func (f *roleInformer) Lister() v1alpha1.RoleLister { diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go index bfc896578df8..556f966a86f3 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go @@ -38,19 +38,34 @@ type RoleBindingInformer interface { } type roleBindingInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewRoleBindingInformer constructs a new informer for RoleBinding type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleBindingInformer constructs a new informer for RoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1alpha1().RoleBindings(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1alpha1().RoleBindings(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewRoleBindingInformer(client kubernetes.Interface, namespace string, resyn ) } -func defaultRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewRoleBindingInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *roleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *roleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbac_v1alpha1.RoleBinding{}, defaultRoleBindingInformer) + return f.factory.InformerFor(&rbac_v1alpha1.RoleBinding{}, f.defaultInformer) } func (f *roleBindingInformer) Lister() v1alpha1.RoleBindingLister { diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/BUILD b/vendor/k8s.io/client-go/informers/rbac/v1beta1/BUILD index 229b451e4f89..0ed73f4ec643 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/BUILD @@ -14,6 +14,7 @@ go_library( "role.go", "rolebinding.go", ], + importpath = "k8s.io/client-go/informers/rbac/v1beta1", deps = [ "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go index 9798b9b25ffb..821746b90d78 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go @@ -38,19 +38,33 @@ type ClusterRoleInformer interface { } type clusterRoleInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewClusterRoleInformer constructs a new informer for ClusterRole type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleInformer constructs a new informer for ClusterRole type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1beta1().ClusterRoles().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1beta1().ClusterRoles().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Durat ) } -func defaultClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewClusterRoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *clusterRoleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbac_v1beta1.ClusterRole{}, defaultClusterRoleInformer) + return f.factory.InformerFor(&rbac_v1beta1.ClusterRole{}, f.defaultInformer) } func (f *clusterRoleInformer) Lister() v1beta1.ClusterRoleLister { diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go index 52370de1f92a..c517ac456104 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go @@ -38,19 +38,33 @@ type ClusterRoleBindingInformer interface { } type clusterRoleBindingInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1beta1().ClusterRoleBindings().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1beta1().ClusterRoleBindings().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod tim ) } -func defaultClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewClusterRoleBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *clusterRoleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbac_v1beta1.ClusterRoleBinding{}, defaultClusterRoleBindingInformer) + return f.factory.InformerFor(&rbac_v1beta1.ClusterRoleBinding{}, f.defaultInformer) } func (f *clusterRoleBindingInformer) Lister() v1beta1.ClusterRoleBindingLister { diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/interface.go index f8d082a3f4c3..9d375d947c27 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/interface.go @@ -35,30 +35,32 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // ClusterRoles returns a ClusterRoleInformer. func (v *version) ClusterRoles() ClusterRoleInformer { - return &clusterRoleInformer{factory: v.SharedInformerFactory} + return &clusterRoleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } // ClusterRoleBindings returns a ClusterRoleBindingInformer. func (v *version) ClusterRoleBindings() ClusterRoleBindingInformer { - return &clusterRoleBindingInformer{factory: v.SharedInformerFactory} + return &clusterRoleBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } // Roles returns a RoleInformer. func (v *version) Roles() RoleInformer { - return &roleInformer{factory: v.SharedInformerFactory} + return &roleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // RoleBindings returns a RoleBindingInformer. func (v *version) RoleBindings() RoleBindingInformer { - return &roleBindingInformer{factory: v.SharedInformerFactory} + return &roleBindingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go index 7fcf97f25c89..0f13d3aaf6a4 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go @@ -38,19 +38,34 @@ type RoleInformer interface { } type roleInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewRoleInformer constructs a new informer for Role type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleInformer constructs a new informer for Role type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1beta1().Roles(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1beta1().Roles(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod ) } -func defaultRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewRoleInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *roleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *roleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbac_v1beta1.Role{}, defaultRoleInformer) + return f.factory.InformerFor(&rbac_v1beta1.Role{}, f.defaultInformer) } func (f *roleInformer) Lister() v1beta1.RoleLister { diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go index d6e93bec6631..c951d97d5019 100644 --- a/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go @@ -38,19 +38,34 @@ type RoleBindingInformer interface { } type roleBindingInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewRoleBindingInformer constructs a new informer for RoleBinding type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleBindingInformer constructs a new informer for RoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1beta1().RoleBindings(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.RbacV1beta1().RoleBindings(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewRoleBindingInformer(client kubernetes.Interface, namespace string, resyn ) } -func defaultRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewRoleBindingInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *roleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *roleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbac_v1beta1.RoleBinding{}, defaultRoleBindingInformer) + return f.factory.InformerFor(&rbac_v1beta1.RoleBinding{}, f.defaultInformer) } func (f *roleBindingInformer) Lister() v1beta1.RoleBindingLister { diff --git a/vendor/k8s.io/client-go/informers/scheduling/BUILD b/vendor/k8s.io/client-go/informers/scheduling/BUILD index dec09ae459e4..14c2a70b904e 100644 --- a/vendor/k8s.io/client-go/informers/scheduling/BUILD +++ b/vendor/k8s.io/client-go/informers/scheduling/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/client-go/informers/scheduling", deps = [ "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", "//vendor/k8s.io/client-go/informers/scheduling/v1alpha1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/scheduling/interface.go b/vendor/k8s.io/client-go/informers/scheduling/interface.go index df6a93eb1fb1..60b63e8e5c33 100644 --- a/vendor/k8s.io/client-go/informers/scheduling/interface.go +++ b/vendor/k8s.io/client-go/informers/scheduling/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // V1alpha1 returns a new v1alpha1.Interface. func (g *group) V1alpha1() v1alpha1.Interface { - return v1alpha1.New(g.SharedInformerFactory) + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/BUILD b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/BUILD index 34676949cff7..b168cda49ab0 100644 --- a/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/BUILD +++ b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/BUILD @@ -11,6 +11,7 @@ go_library( "interface.go", "priorityclass.go", ], + importpath = "k8s.io/client-go/informers/scheduling/v1alpha1", deps = [ "//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go index a97d5f477c5c..1cceef7b2561 100644 --- a/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go +++ b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // PriorityClasses returns a PriorityClassInformer. func (v *version) PriorityClasses() PriorityClassInformer { - return &priorityClassInformer{factory: v.SharedInformerFactory} + return &priorityClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go index 9b1361c06540..5c90f43df0a5 100644 --- a/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go +++ b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go @@ -38,19 +38,33 @@ type PriorityClassInformer interface { } type priorityClassInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewPriorityClassInformer constructs a new informer for PriorityClass type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPriorityClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPriorityClassInformer constructs a new informer for PriorityClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.SchedulingV1alpha1().PriorityClasses().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.SchedulingV1alpha1().PriorityClasses().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Dur ) } -func defaultPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewPriorityClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *priorityClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPriorityClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *priorityClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&scheduling_v1alpha1.PriorityClass{}, defaultPriorityClassInformer) + return f.factory.InformerFor(&scheduling_v1alpha1.PriorityClass{}, f.defaultInformer) } func (f *priorityClassInformer) Lister() v1alpha1.PriorityClassLister { diff --git a/vendor/k8s.io/client-go/informers/settings/BUILD b/vendor/k8s.io/client-go/informers/settings/BUILD index 4c84b1758c8c..5dc3cfd1479f 100644 --- a/vendor/k8s.io/client-go/informers/settings/BUILD +++ b/vendor/k8s.io/client-go/informers/settings/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/client-go/informers/settings", deps = [ "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", "//vendor/k8s.io/client-go/informers/settings/v1alpha1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/settings/interface.go b/vendor/k8s.io/client-go/informers/settings/interface.go index 00d051eac61c..53bc66217092 100644 --- a/vendor/k8s.io/client-go/informers/settings/interface.go +++ b/vendor/k8s.io/client-go/informers/settings/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // V1alpha1 returns a new v1alpha1.Interface. func (g *group) V1alpha1() v1alpha1.Interface { - return v1alpha1.New(g.SharedInformerFactory) + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/settings/v1alpha1/BUILD b/vendor/k8s.io/client-go/informers/settings/v1alpha1/BUILD index c09df485dca1..fd24549e84ff 100644 --- a/vendor/k8s.io/client-go/informers/settings/v1alpha1/BUILD +++ b/vendor/k8s.io/client-go/informers/settings/v1alpha1/BUILD @@ -11,6 +11,7 @@ go_library( "interface.go", "podpreset.go", ], + importpath = "k8s.io/client-go/informers/settings/v1alpha1", deps = [ "//vendor/k8s.io/api/settings/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/settings/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/settings/v1alpha1/interface.go index 58114f6e20c7..39007ebe25fd 100644 --- a/vendor/k8s.io/client-go/informers/settings/v1alpha1/interface.go +++ b/vendor/k8s.io/client-go/informers/settings/v1alpha1/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // PodPresets returns a PodPresetInformer. func (v *version) PodPresets() PodPresetInformer { - return &podPresetInformer{factory: v.SharedInformerFactory} + return &podPresetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go index 226cec527b8f..2e630c73d032 100644 --- a/vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go +++ b/vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go @@ -38,19 +38,34 @@ type PodPresetInformer interface { } type podPresetInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewPodPresetInformer constructs a new informer for PodPreset type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewPodPresetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodPresetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodPresetInformer constructs a new informer for PodPreset type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodPresetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.SettingsV1alpha1().PodPresets(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.SettingsV1alpha1().PodPresets(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewPodPresetInformer(client kubernetes.Interface, namespace string, resyncP ) } -func defaultPodPresetInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewPodPresetInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *podPresetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodPresetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *podPresetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&settings_v1alpha1.PodPreset{}, defaultPodPresetInformer) + return f.factory.InformerFor(&settings_v1alpha1.PodPreset{}, f.defaultInformer) } func (f *podPresetInformer) Lister() v1alpha1.PodPresetLister { diff --git a/vendor/k8s.io/client-go/informers/storage/BUILD b/vendor/k8s.io/client-go/informers/storage/BUILD index 550a51e6c935..56e9e185027c 100644 --- a/vendor/k8s.io/client-go/informers/storage/BUILD +++ b/vendor/k8s.io/client-go/informers/storage/BUILD @@ -8,9 +8,11 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/client-go/informers/storage", deps = [ "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", "//vendor/k8s.io/client-go/informers/storage/v1:go_default_library", + "//vendor/k8s.io/client-go/informers/storage/v1alpha1:go_default_library", "//vendor/k8s.io/client-go/informers/storage/v1beta1:go_default_library", ], ) @@ -27,6 +29,7 @@ filegroup( srcs = [ ":package-srcs", "//staging/src/k8s.io/client-go/informers/storage/v1:all-srcs", + "//staging/src/k8s.io/client-go/informers/storage/v1alpha1:all-srcs", "//staging/src/k8s.io/client-go/informers/storage/v1beta1:all-srcs", ], tags = ["automanaged"], diff --git a/vendor/k8s.io/client-go/informers/storage/interface.go b/vendor/k8s.io/client-go/informers/storage/interface.go index 7f3a69eb29ae..b91613a9216a 100644 --- a/vendor/k8s.io/client-go/informers/storage/interface.go +++ b/vendor/k8s.io/client-go/informers/storage/interface.go @@ -21,6 +21,7 @@ package storage import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" v1 "k8s.io/client-go/informers/storage/v1" + v1alpha1 "k8s.io/client-go/informers/storage/v1alpha1" v1beta1 "k8s.io/client-go/informers/storage/v1beta1" ) @@ -28,25 +29,34 @@ import ( type Interface interface { // V1 provides access to shared informers for resources in V1. V1() v1.Interface + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // V1 returns a new v1.Interface. func (g *group) V1() v1.Interface { - return v1.New(g.SharedInformerFactory) + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) } // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { - return v1beta1.New(g.SharedInformerFactory) + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/client-go/informers/storage/v1/BUILD b/vendor/k8s.io/client-go/informers/storage/v1/BUILD index 5868626b186f..29c0b883d02d 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1/BUILD +++ b/vendor/k8s.io/client-go/informers/storage/v1/BUILD @@ -11,6 +11,7 @@ go_library( "interface.go", "storageclass.go", ], + importpath = "k8s.io/client-go/informers/storage/v1", deps = [ "//vendor/k8s.io/api/storage/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/storage/v1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1/interface.go index 642d4c494d9d..fadb1a0739b5 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1/interface.go +++ b/vendor/k8s.io/client-go/informers/storage/v1/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // StorageClasses returns a StorageClassInformer. func (v *version) StorageClasses() StorageClassInformer { - return &storageClassInformer{factory: v.SharedInformerFactory} + return &storageClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go b/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go index 397474c31fb8..341549f0fb72 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go @@ -38,19 +38,33 @@ type StorageClassInformer interface { } type storageClassInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewStorageClassInformer constructs a new informer for StorageClass type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStorageClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredStorageClassInformer constructs a new informer for StorageClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.StorageV1().StorageClasses().List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.StorageV1().StorageClasses().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Dura ) } -func defaultStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewStorageClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *storageClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStorageClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *storageClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storage_v1.StorageClass{}, defaultStorageClassInformer) + return f.factory.InformerFor(&storage_v1.StorageClass{}, f.defaultInformer) } func (f *storageClassInformer) Lister() v1.StorageClassLister { diff --git a/vendor/k8s.io/client-go/informers/storage/v1alpha1/BUILD b/vendor/k8s.io/client-go/informers/storage/v1alpha1/BUILD new file mode 100644 index 000000000000..1871ff4962a5 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1alpha1/BUILD @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "interface.go", + "volumeattachment.go", + ], + importpath = "k8s.io/client-go/informers/storage/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/storage/v1alpha1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/listers/storage/v1alpha1:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go new file mode 100644 index 000000000000..d84eb5fd27e3 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // VolumeAttachments returns a VolumeAttachmentInformer. + VolumeAttachments() VolumeAttachmentInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// VolumeAttachments returns a VolumeAttachmentInformer. +func (v *version) VolumeAttachments() VolumeAttachmentInformer { + return &volumeAttachmentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go new file mode 100644 index 000000000000..cab9ffc4690d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + storage_v1alpha1 "k8s.io/api/storage/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/storage/v1alpha1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// VolumeAttachmentInformer provides access to a shared informer and lister for +// VolumeAttachments. +type VolumeAttachmentInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.VolumeAttachmentLister +} + +type volumeAttachmentInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewVolumeAttachmentInformer constructs a new informer for VolumeAttachment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredVolumeAttachmentInformer constructs a new informer for VolumeAttachment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1alpha1().VolumeAttachments().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1alpha1().VolumeAttachments().Watch(options) + }, + }, + &storage_v1alpha1.VolumeAttachment{}, + resyncPeriod, + indexers, + ) +} + +func (f *volumeAttachmentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *volumeAttachmentInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storage_v1alpha1.VolumeAttachment{}, f.defaultInformer) +} + +func (f *volumeAttachmentInformer) Lister() v1alpha1.VolumeAttachmentLister { + return v1alpha1.NewVolumeAttachmentLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/BUILD b/vendor/k8s.io/client-go/informers/storage/v1beta1/BUILD index dd0a1ca1fcf4..0a7ad316b60d 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/BUILD @@ -11,6 +11,7 @@ go_library( "interface.go", "storageclass.go", ], + importpath = "k8s.io/client-go/informers/storage/v1beta1", deps = [ "//vendor/k8s.io/api/storage/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go index 49e139f63133..7fa1abf5f607 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // StorageClasses returns a StorageClassInformer. func (v *version) StorageClasses() StorageClassInformer { - return &storageClassInformer{factory: v.SharedInformerFactory} + return &storageClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go index 404e0a164dc1..3e96b2820002 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go @@ -38,19 +38,33 @@ type StorageClassInformer interface { } type storageClassInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewStorageClassInformer constructs a new informer for StorageClass type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStorageClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredStorageClassInformer constructs a new informer for StorageClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.StorageV1beta1().StorageClasses().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.StorageV1beta1().StorageClasses().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Dura ) } -func defaultStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewStorageClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *storageClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStorageClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *storageClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storage_v1beta1.StorageClass{}, defaultStorageClassInformer) + return f.factory.InformerFor(&storage_v1beta1.StorageClass{}, f.defaultInformer) } func (f *storageClassInformer) Lister() v1beta1.StorageClassLister { diff --git a/vendor/k8s.io/client-go/kubernetes/BUILD b/vendor/k8s.io/client-go/kubernetes/BUILD index 2a15cc0aa799..0ef4f867149f 100644 --- a/vendor/k8s.io/client-go/kubernetes/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/BUILD @@ -12,10 +12,13 @@ go_library( "doc.go", "import.go", ], + importpath = "k8s.io/client-go/kubernetes", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/client-go/discovery:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/authentication/v1:go_default_library", @@ -29,6 +32,7 @@ go_library( "//vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/networking/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1:go_default_library", @@ -38,6 +42,7 @@ go_library( "//vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/storage/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", @@ -58,6 +63,8 @@ filegroup( "//staging/src/k8s.io/client-go/kubernetes/fake:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/scheme:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1:all-srcs", + "//staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1:all-srcs", + "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta2:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1:all-srcs", @@ -71,6 +78,7 @@ filegroup( "//staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:all-srcs", + "//staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/networking/v1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1:all-srcs", @@ -80,6 +88,7 @@ filegroup( "//staging/src/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/storage/v1:all-srcs", + "//staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1:all-srcs", "//staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1:all-srcs", ], tags = ["automanaged"], diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index d0070914f78a..7dcf86a70285 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -20,6 +20,8 @@ import ( glog "github.com/golang/glog" discovery "k8s.io/client-go/discovery" admissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" + admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" + appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" @@ -33,6 +35,7 @@ import ( batchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1" certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" @@ -42,6 +45,7 @@ import ( schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" + storagev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" @@ -50,12 +54,14 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface + AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface // Deprecated: please explicitly pick a version if possible. - Admissionregistration() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface + Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface AppsV1beta1() appsv1beta1.AppsV1beta1Interface AppsV1beta2() appsv1beta2.AppsV1beta2Interface + AppsV1() appsv1.AppsV1Interface // Deprecated: please explicitly pick a version if possible. - Apps() appsv1beta2.AppsV1beta2Interface + Apps() appsv1.AppsV1Interface AuthenticationV1() authenticationv1.AuthenticationV1Interface // Deprecated: please explicitly pick a version if possible. Authentication() authenticationv1.AuthenticationV1Interface @@ -79,6 +85,9 @@ type Interface interface { CoreV1() corev1.CoreV1Interface // Deprecated: please explicitly pick a version if possible. Core() corev1.CoreV1Interface + EventsV1beta1() eventsv1beta1.EventsV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Events() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface // Deprecated: please explicitly pick a version if possible. Extensions() extensionsv1beta1.ExtensionsV1beta1Interface @@ -103,6 +112,7 @@ type Interface interface { StorageV1() storagev1.StorageV1Interface // Deprecated: please explicitly pick a version if possible. Storage() storagev1.StorageV1Interface + StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -110,8 +120,10 @@ type Interface interface { type Clientset struct { *discovery.DiscoveryClient admissionregistrationV1alpha1 *admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Client + admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client appsV1beta1 *appsv1beta1.AppsV1beta1Client appsV1beta2 *appsv1beta2.AppsV1beta2Client + appsV1 *appsv1.AppsV1Client authenticationV1 *authenticationv1.AuthenticationV1Client authenticationV1beta1 *authenticationv1beta1.AuthenticationV1beta1Client authorizationV1 *authorizationv1.AuthorizationV1Client @@ -123,6 +135,7 @@ type Clientset struct { batchV2alpha1 *batchv2alpha1.BatchV2alpha1Client certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client coreV1 *corev1.CoreV1Client + eventsV1beta1 *eventsv1beta1.EventsV1beta1Client extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client networkingV1 *networkingv1.NetworkingV1Client policyV1beta1 *policyv1beta1.PolicyV1beta1Client @@ -133,6 +146,7 @@ type Clientset struct { settingsV1alpha1 *settingsv1alpha1.SettingsV1alpha1Client storageV1beta1 *storagev1beta1.StorageV1beta1Client storageV1 *storagev1.StorageV1Client + storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client } // AdmissionregistrationV1alpha1 retrieves the AdmissionregistrationV1alpha1Client @@ -140,10 +154,15 @@ func (c *Clientset) AdmissionregistrationV1alpha1() admissionregistrationv1alpha return c.admissionregistrationV1alpha1 } +// AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client +func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { + return c.admissionregistrationV1beta1 +} + // Deprecated: Admissionregistration retrieves the default version of AdmissionregistrationClient. // Please explicitly pick a version. -func (c *Clientset) Admissionregistration() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface { - return c.admissionregistrationV1alpha1 +func (c *Clientset) Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { + return c.admissionregistrationV1beta1 } // AppsV1beta1 retrieves the AppsV1beta1Client @@ -156,10 +175,15 @@ func (c *Clientset) AppsV1beta2() appsv1beta2.AppsV1beta2Interface { return c.appsV1beta2 } +// AppsV1 retrieves the AppsV1Client +func (c *Clientset) AppsV1() appsv1.AppsV1Interface { + return c.appsV1 +} + // Deprecated: Apps retrieves the default version of AppsClient. // Please explicitly pick a version. -func (c *Clientset) Apps() appsv1beta2.AppsV1beta2Interface { - return c.appsV1beta2 +func (c *Clientset) Apps() appsv1.AppsV1Interface { + return c.appsV1 } // AuthenticationV1 retrieves the AuthenticationV1Client @@ -253,6 +277,17 @@ func (c *Clientset) Core() corev1.CoreV1Interface { return c.coreV1 } +// EventsV1beta1 retrieves the EventsV1beta1Client +func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { + return c.eventsV1beta1 +} + +// Deprecated: Events retrieves the default version of EventsClient. +// Please explicitly pick a version. +func (c *Clientset) Events() eventsv1beta1.EventsV1beta1Interface { + return c.eventsV1beta1 +} + // ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { return c.extensionsV1beta1 @@ -345,6 +380,11 @@ func (c *Clientset) Storage() storagev1.StorageV1Interface { return c.storageV1 } +// StorageV1alpha1 retrieves the StorageV1alpha1Client +func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface { + return c.storageV1alpha1 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -365,6 +405,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.appsV1beta1, err = appsv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -373,6 +417,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.appsV1, err = appsv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.authenticationV1, err = authenticationv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -417,6 +465,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.eventsV1beta1, err = eventsv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.extensionsV1beta1, err = extensionsv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -457,6 +509,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.storageV1alpha1, err = storagev1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { @@ -471,8 +527,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.NewForConfigOrDie(c) + cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.NewForConfigOrDie(c) cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) cs.appsV1beta2 = appsv1beta2.NewForConfigOrDie(c) + cs.appsV1 = appsv1.NewForConfigOrDie(c) cs.authenticationV1 = authenticationv1.NewForConfigOrDie(c) cs.authenticationV1beta1 = authenticationv1beta1.NewForConfigOrDie(c) cs.authorizationV1 = authorizationv1.NewForConfigOrDie(c) @@ -484,6 +542,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.batchV2alpha1 = batchv2alpha1.NewForConfigOrDie(c) cs.certificatesV1beta1 = certificatesv1beta1.NewForConfigOrDie(c) cs.coreV1 = corev1.NewForConfigOrDie(c) + cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c) cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) cs.networkingV1 = networkingv1.NewForConfigOrDie(c) cs.policyV1beta1 = policyv1beta1.NewForConfigOrDie(c) @@ -494,6 +553,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.settingsV1alpha1 = settingsv1alpha1.NewForConfigOrDie(c) cs.storageV1beta1 = storagev1beta1.NewForConfigOrDie(c) cs.storageV1 = storagev1.NewForConfigOrDie(c) + cs.storageV1alpha1 = storagev1alpha1.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &cs @@ -503,8 +563,10 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { func New(c rest.Interface) *Clientset { var cs Clientset cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.New(c) + cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.New(c) cs.appsV1beta1 = appsv1beta1.New(c) cs.appsV1beta2 = appsv1beta2.New(c) + cs.appsV1 = appsv1.New(c) cs.authenticationV1 = authenticationv1.New(c) cs.authenticationV1beta1 = authenticationv1beta1.New(c) cs.authorizationV1 = authorizationv1.New(c) @@ -516,6 +578,7 @@ func New(c rest.Interface) *Clientset { cs.batchV2alpha1 = batchv2alpha1.New(c) cs.certificatesV1beta1 = certificatesv1beta1.New(c) cs.coreV1 = corev1.New(c) + cs.eventsV1beta1 = eventsv1beta1.New(c) cs.extensionsV1beta1 = extensionsv1beta1.New(c) cs.networkingV1 = networkingv1.New(c) cs.policyV1beta1 = policyv1beta1.New(c) @@ -526,6 +589,7 @@ func New(c rest.Interface) *Clientset { cs.settingsV1alpha1 = settingsv1alpha1.New(c) cs.storageV1beta1 = storagev1beta1.New(c) cs.storageV1 = storagev1.New(c) + cs.storageV1alpha1 = storagev1alpha1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/vendor/k8s.io/client-go/kubernetes/doc.go b/vendor/k8s.io/client-go/kubernetes/doc.go index 2af84c6699de..d8e920a5cda7 100644 --- a/vendor/k8s.io/client-go/kubernetes/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated clientset. package kubernetes diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/BUILD b/vendor/k8s.io/client-go/kubernetes/scheme/BUILD index e0dc5f6f45af..1bb118aa0443 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/scheme/BUILD @@ -11,8 +11,11 @@ go_library( "doc.go", "register.go", ], + importpath = "k8s.io/client-go/kubernetes/scheme", deps = [ "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/api/authentication/v1:go_default_library", @@ -26,6 +29,7 @@ go_library( "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/events/v1beta1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/api/networking/v1:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", @@ -35,6 +39,7 @@ go_library( "//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library", "//vendor/k8s.io/api/settings/v1alpha1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", + "//vendor/k8s.io/api/storage/v1alpha1:go_default_library", "//vendor/k8s.io/api/storage/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/doc.go b/vendor/k8s.io/client-go/kubernetes/scheme/doc.go index 5d8ec824f0fb..3ec2200d0990 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package contains the scheme of the automatically generated clientset. package scheme diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 0ee2638749b9..7bfd3361859e 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -18,6 +18,8 @@ package scheme import ( admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" authenticationv1 "k8s.io/api/authentication/v1" @@ -31,6 +33,7 @@ import ( batchv2alpha1 "k8s.io/api/batch/v2alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" corev1 "k8s.io/api/core/v1" + eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -40,6 +43,7 @@ import ( schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" settingsv1alpha1 "k8s.io/api/settings/v1alpha1" storagev1 "k8s.io/api/storage/v1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" storagev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -72,8 +76,10 @@ func init() { // correctly. func AddToScheme(scheme *runtime.Scheme) { admissionregistrationv1alpha1.AddToScheme(scheme) + admissionregistrationv1beta1.AddToScheme(scheme) appsv1beta1.AddToScheme(scheme) appsv1beta2.AddToScheme(scheme) + appsv1.AddToScheme(scheme) authenticationv1.AddToScheme(scheme) authenticationv1beta1.AddToScheme(scheme) authorizationv1.AddToScheme(scheme) @@ -85,6 +91,7 @@ func AddToScheme(scheme *runtime.Scheme) { batchv2alpha1.AddToScheme(scheme) certificatesv1beta1.AddToScheme(scheme) corev1.AddToScheme(scheme) + eventsv1beta1.AddToScheme(scheme) extensionsv1beta1.AddToScheme(scheme) networkingv1.AddToScheme(scheme) policyv1beta1.AddToScheme(scheme) @@ -95,5 +102,6 @@ func AddToScheme(scheme *runtime.Scheme) { settingsv1alpha1.AddToScheme(scheme) storagev1beta1.AddToScheme(scheme) storagev1.AddToScheme(scheme) + storagev1alpha1.AddToScheme(scheme) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/BUILD index f2b505731280..6ad245311e6e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/BUILD @@ -1,19 +1,15 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ "admissionregistration_client.go", "doc.go", - "externaladmissionhookconfiguration.go", "generated_expansion.go", "initializerconfiguration.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1", + visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -39,4 +35,5 @@ filegroup( "//staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake:all-srcs", ], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go index 95d5339197b4..5150fee3c556 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go @@ -25,7 +25,6 @@ import ( type AdmissionregistrationV1alpha1Interface interface { RESTClient() rest.Interface - ExternalAdmissionHookConfigurationsGetter InitializerConfigurationsGetter } @@ -34,10 +33,6 @@ type AdmissionregistrationV1alpha1Client struct { restClient rest.Interface } -func (c *AdmissionregistrationV1alpha1Client) ExternalAdmissionHookConfigurations() ExternalAdmissionHookConfigurationInterface { - return newExternalAdmissionHookConfigurations(c) -} - func (c *AdmissionregistrationV1alpha1Client) InitializerConfigurations() InitializerConfigurationInterface { return newInitializerConfigurations(c) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go index ba8d10d3b666..cdaaf620786b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/externaladmissionhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/externaladmissionhookconfiguration.go deleted file mode 100644 index 1ddc6eb48c97..000000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/externaladmissionhookconfiguration.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ExternalAdmissionHookConfigurationsGetter has a method to return a ExternalAdmissionHookConfigurationInterface. -// A group's client should implement this interface. -type ExternalAdmissionHookConfigurationsGetter interface { - ExternalAdmissionHookConfigurations() ExternalAdmissionHookConfigurationInterface -} - -// ExternalAdmissionHookConfigurationInterface has methods to work with ExternalAdmissionHookConfiguration resources. -type ExternalAdmissionHookConfigurationInterface interface { - Create(*v1alpha1.ExternalAdmissionHookConfiguration) (*v1alpha1.ExternalAdmissionHookConfiguration, error) - Update(*v1alpha1.ExternalAdmissionHookConfiguration) (*v1alpha1.ExternalAdmissionHookConfiguration, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.ExternalAdmissionHookConfiguration, error) - List(opts v1.ListOptions) (*v1alpha1.ExternalAdmissionHookConfigurationList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ExternalAdmissionHookConfiguration, err error) - ExternalAdmissionHookConfigurationExpansion -} - -// externalAdmissionHookConfigurations implements ExternalAdmissionHookConfigurationInterface -type externalAdmissionHookConfigurations struct { - client rest.Interface -} - -// newExternalAdmissionHookConfigurations returns a ExternalAdmissionHookConfigurations -func newExternalAdmissionHookConfigurations(c *AdmissionregistrationV1alpha1Client) *externalAdmissionHookConfigurations { - return &externalAdmissionHookConfigurations{ - client: c.RESTClient(), - } -} - -// Get takes name of the externalAdmissionHookConfiguration, and returns the corresponding externalAdmissionHookConfiguration object, and an error if there is any. -func (c *externalAdmissionHookConfigurations) Get(name string, options v1.GetOptions) (result *v1alpha1.ExternalAdmissionHookConfiguration, err error) { - result = &v1alpha1.ExternalAdmissionHookConfiguration{} - err = c.client.Get(). - Resource("externaladmissionhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ExternalAdmissionHookConfigurations that match those selectors. -func (c *externalAdmissionHookConfigurations) List(opts v1.ListOptions) (result *v1alpha1.ExternalAdmissionHookConfigurationList, err error) { - result = &v1alpha1.ExternalAdmissionHookConfigurationList{} - err = c.client.Get(). - Resource("externaladmissionhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested externalAdmissionHookConfigurations. -func (c *externalAdmissionHookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("externaladmissionhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a externalAdmissionHookConfiguration and creates it. Returns the server's representation of the externalAdmissionHookConfiguration, and an error, if there is any. -func (c *externalAdmissionHookConfigurations) Create(externalAdmissionHookConfiguration *v1alpha1.ExternalAdmissionHookConfiguration) (result *v1alpha1.ExternalAdmissionHookConfiguration, err error) { - result = &v1alpha1.ExternalAdmissionHookConfiguration{} - err = c.client.Post(). - Resource("externaladmissionhookconfigurations"). - Body(externalAdmissionHookConfiguration). - Do(). - Into(result) - return -} - -// Update takes the representation of a externalAdmissionHookConfiguration and updates it. Returns the server's representation of the externalAdmissionHookConfiguration, and an error, if there is any. -func (c *externalAdmissionHookConfigurations) Update(externalAdmissionHookConfiguration *v1alpha1.ExternalAdmissionHookConfiguration) (result *v1alpha1.ExternalAdmissionHookConfiguration, err error) { - result = &v1alpha1.ExternalAdmissionHookConfiguration{} - err = c.client.Put(). - Resource("externaladmissionhookconfigurations"). - Name(externalAdmissionHookConfiguration.Name). - Body(externalAdmissionHookConfiguration). - Do(). - Into(result) - return -} - -// Delete takes name of the externalAdmissionHookConfiguration and deletes it. Returns an error if one occurs. -func (c *externalAdmissionHookConfigurations) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("externaladmissionhookconfigurations"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *externalAdmissionHookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("externaladmissionhookconfigurations"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched externalAdmissionHookConfiguration. -func (c *externalAdmissionHookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ExternalAdmissionHookConfiguration, err error) { - result = &v1alpha1.ExternalAdmissionHookConfiguration{} - err = c.client.Patch(pt). - Resource("externaladmissionhookconfigurations"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go index eef439ab4725..ccdfb43f6b80 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go @@ -16,6 +16,4 @@ limitations under the License. package v1alpha1 -type ExternalAdmissionHookConfigurationExpansion interface{} - type InitializerConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/BUILD new file mode 100644 index 000000000000..197f738eba11 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/BUILD @@ -0,0 +1,40 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "admissionregistration_client.go", + "doc.go", + "generated_expansion.go", + "mutatingwebhookconfiguration.go", + "validatingwebhookconfiguration.go", + ], + importpath = "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go new file mode 100644 index 000000000000..8d3774b4e4dd --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go @@ -0,0 +1,93 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AdmissionregistrationV1beta1Interface interface { + RESTClient() rest.Interface + MutatingWebhookConfigurationsGetter + ValidatingWebhookConfigurationsGetter +} + +// AdmissionregistrationV1beta1Client is used to interact with features provided by the admissionregistration.k8s.io group. +type AdmissionregistrationV1beta1Client struct { + restClient rest.Interface +} + +func (c *AdmissionregistrationV1beta1Client) MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface { + return newMutatingWebhookConfigurations(c) +} + +func (c *AdmissionregistrationV1beta1Client) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface { + return newValidatingWebhookConfigurations(c) +} + +// NewForConfig creates a new AdmissionregistrationV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AdmissionregistrationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AdmissionregistrationV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new AdmissionregistrationV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AdmissionregistrationV1beta1Client for the given RESTClient. +func New(c rest.Interface) *AdmissionregistrationV1beta1Client { + return &AdmissionregistrationV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AdmissionregistrationV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go new file mode 100644 index 000000000000..1b50aa199700 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go new file mode 100644 index 000000000000..012a8da7e778 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type MutatingWebhookConfigurationExpansion interface{} + +type ValidatingWebhookConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go new file mode 100644 index 000000000000..36711a500929 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type MutatingWebhookConfigurationsGetter interface { + MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface +} + +// MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. +type MutatingWebhookConfigurationInterface interface { + Create(*v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error) + Update(*v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.MutatingWebhookConfiguration, error) + List(opts v1.ListOptions) (*v1beta1.MutatingWebhookConfigurationList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) + MutatingWebhookConfigurationExpansion +} + +// mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface +type mutatingWebhookConfigurations struct { + client rest.Interface +} + +// newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations +func newMutatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *mutatingWebhookConfigurations { + return &mutatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. +func (c *mutatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. +func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { + result = &v1beta1.MutatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. +func (c *mutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Post(). + Resource("mutatingwebhookconfigurations"). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Put(). + Resource("mutatingwebhookconfigurations"). + Name(mutatingWebhookConfiguration.Name). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *mutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *mutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched mutatingWebhookConfiguration. +func (c *mutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("mutatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go new file mode 100644 index 000000000000..d66849225d65 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type ValidatingWebhookConfigurationsGetter interface { + ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface +} + +// ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. +type ValidatingWebhookConfigurationInterface interface { + Create(*v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error) + Update(*v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ValidatingWebhookConfiguration, error) + List(opts v1.ListOptions) (*v1beta1.ValidatingWebhookConfigurationList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) + ValidatingWebhookConfigurationExpansion +} + +// validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface +type validatingWebhookConfigurations struct { + client rest.Interface +} + +// newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations +func newValidatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *validatingWebhookConfigurations { + return &validatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. +func (c *validatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. +func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { + result = &v1beta1.ValidatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. +func (c *validatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Create(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Post(). + Resource("validatingwebhookconfigurations"). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Update(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Put(). + Resource("validatingwebhookconfigurations"). + Name(validatingWebhookConfiguration.Name). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *validatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *validatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched validatingWebhookConfiguration. +func (c *validatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("validatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/BUILD new file mode 100644 index 000000000000..5d77a94df2af --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/BUILD @@ -0,0 +1,43 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "apps_client.go", + "controllerrevision.go", + "daemonset.go", + "deployment.go", + "doc.go", + "generated_expansion.go", + "replicaset.go", + "statefulset.go", + ], + importpath = "k8s.io/client-go/kubernetes/typed/apps/v1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/apps/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1/fake:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go new file mode 100644 index 000000000000..07936304ea2d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go @@ -0,0 +1,108 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AppsV1Interface interface { + RESTClient() rest.Interface + ControllerRevisionsGetter + DaemonSetsGetter + DeploymentsGetter + ReplicaSetsGetter + StatefulSetsGetter +} + +// AppsV1Client is used to interact with features provided by the apps group. +type AppsV1Client struct { + restClient rest.Interface +} + +func (c *AppsV1Client) ControllerRevisions(namespace string) ControllerRevisionInterface { + return newControllerRevisions(c, namespace) +} + +func (c *AppsV1Client) DaemonSets(namespace string) DaemonSetInterface { + return newDaemonSets(c, namespace) +} + +func (c *AppsV1Client) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *AppsV1Client) ReplicaSets(namespace string) ReplicaSetInterface { + return newReplicaSets(c, namespace) +} + +func (c *AppsV1Client) StatefulSets(namespace string) StatefulSetInterface { + return newStatefulSets(c, namespace) +} + +// NewForConfig creates a new AppsV1Client for the given config. +func NewForConfig(c *rest.Config) (*AppsV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AppsV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AppsV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AppsV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AppsV1Client for the given RESTClient. +func New(c rest.Interface) *AppsV1Client { + return &AppsV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AppsV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go new file mode 100644 index 000000000000..1d9f831346e4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. +// A group's client should implement this interface. +type ControllerRevisionsGetter interface { + ControllerRevisions(namespace string) ControllerRevisionInterface +} + +// ControllerRevisionInterface has methods to work with ControllerRevision resources. +type ControllerRevisionInterface interface { + Create(*v1.ControllerRevision) (*v1.ControllerRevision, error) + Update(*v1.ControllerRevision) (*v1.ControllerRevision, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ControllerRevision, error) + List(opts meta_v1.ListOptions) (*v1.ControllerRevisionList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error) + ControllerRevisionExpansion +} + +// controllerRevisions implements ControllerRevisionInterface +type controllerRevisions struct { + client rest.Interface + ns string +} + +// newControllerRevisions returns a ControllerRevisions +func newControllerRevisions(c *AppsV1Client, namespace string) *controllerRevisions { + return &controllerRevisions{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. +func (c *controllerRevisions) Get(name string, options meta_v1.GetOptions) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. +func (c *controllerRevisions) List(opts meta_v1.ListOptions) (result *v1.ControllerRevisionList, err error) { + result = &v1.ControllerRevisionList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested controllerRevisions. +func (c *controllerRevisions) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *controllerRevisions) Create(controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Post(). + Namespace(c.ns). + Resource("controllerrevisions"). + Body(controllerRevision). + Do(). + Into(result) + return +} + +// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *controllerRevisions) Update(controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Put(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(controllerRevision.Name). + Body(controllerRevision). + Do(). + Into(result) + return +} + +// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. +func (c *controllerRevisions) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *controllerRevisions) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched controllerRevision. +func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("controllerrevisions"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go new file mode 100644 index 000000000000..fd38c53c5ff4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// DaemonSetsGetter has a method to return a DaemonSetInterface. +// A group's client should implement this interface. +type DaemonSetsGetter interface { + DaemonSets(namespace string) DaemonSetInterface +} + +// DaemonSetInterface has methods to work with DaemonSet resources. +type DaemonSetInterface interface { + Create(*v1.DaemonSet) (*v1.DaemonSet, error) + Update(*v1.DaemonSet) (*v1.DaemonSet, error) + UpdateStatus(*v1.DaemonSet) (*v1.DaemonSet, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.DaemonSet, error) + List(opts meta_v1.ListOptions) (*v1.DaemonSetList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DaemonSet, err error) + DaemonSetExpansion +} + +// daemonSets implements DaemonSetInterface +type daemonSets struct { + client rest.Interface + ns string +} + +// newDaemonSets returns a DaemonSets +func newDaemonSets(c *AppsV1Client, namespace string) *daemonSets { + return &daemonSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. +func (c *daemonSets) Get(name string, options meta_v1.GetOptions) (result *v1.DaemonSet, err error) { + result = &v1.DaemonSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. +func (c *daemonSets) List(opts meta_v1.ListOptions) (result *v1.DaemonSetList, err error) { + result = &v1.DaemonSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested daemonSets. +func (c *daemonSets) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Create(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) { + result = &v1.DaemonSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("daemonsets"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Update(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) { + result = &v1.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + Body(daemonSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *daemonSets) UpdateStatus(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) { + result = &v1.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + SubResource("status"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. +func (c *daemonSets) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *daemonSets) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched daemonSet. +func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DaemonSet, err error) { + result = &v1.DaemonSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("daemonsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go new file mode 100644 index 000000000000..34c06c8de513 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// DeploymentsGetter has a method to return a DeploymentInterface. +// A group's client should implement this interface. +type DeploymentsGetter interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + Create(*v1.Deployment) (*v1.Deployment, error) + Update(*v1.Deployment) (*v1.Deployment, error) + UpdateStatus(*v1.Deployment) (*v1.Deployment, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Deployment, error) + List(opts meta_v1.ListOptions) (*v1.DeploymentList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) + DeploymentExpansion +} + +// deployments implements DeploymentInterface +type deployments struct { + client rest.Interface + ns string +} + +// newDeployments returns a Deployments +func newDeployments(c *AppsV1Client, namespace string) *deployments { + return &deployments{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string, options meta_v1.GetOptions) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts meta_v1.ListOptions) (result *v1.DeploymentList, err error) { + result = &v1.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *v1.Deployment) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *v1.Deployment) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *deployments) UpdateStatus(deployment *v1.Deployment) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched deployment. +func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("deployments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go new file mode 100644 index 000000000000..b6a2a4672853 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go new file mode 100644 index 000000000000..500d67dd3653 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +type ControllerRevisionExpansion interface{} + +type DaemonSetExpansion interface{} + +type DeploymentExpansion interface{} + +type ReplicaSetExpansion interface{} + +type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go new file mode 100644 index 000000000000..5047b0c5f8df --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ReplicaSetsGetter has a method to return a ReplicaSetInterface. +// A group's client should implement this interface. +type ReplicaSetsGetter interface { + ReplicaSets(namespace string) ReplicaSetInterface +} + +// ReplicaSetInterface has methods to work with ReplicaSet resources. +type ReplicaSetInterface interface { + Create(*v1.ReplicaSet) (*v1.ReplicaSet, error) + Update(*v1.ReplicaSet) (*v1.ReplicaSet, error) + UpdateStatus(*v1.ReplicaSet) (*v1.ReplicaSet, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ReplicaSet, error) + List(opts meta_v1.ListOptions) (*v1.ReplicaSetList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) + ReplicaSetExpansion +} + +// replicaSets implements ReplicaSetInterface +type replicaSets struct { + client rest.Interface + ns string +} + +// newReplicaSets returns a ReplicaSets +func newReplicaSets(c *AppsV1Client, namespace string) *replicaSets { + return &replicaSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. +func (c *replicaSets) Get(name string, options meta_v1.GetOptions) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. +func (c *replicaSets) List(opts meta_v1.ListOptions) (result *v1.ReplicaSetList, err error) { + result = &v1.ReplicaSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *replicaSets) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Create(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicasets"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Update(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + Body(replicaSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *replicaSets) UpdateStatus(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + SubResource("status"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. +func (c *replicaSets) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicaSets) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched replicaSet. +func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("replicasets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go new file mode 100644 index 000000000000..2c927ac0cd13 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// StatefulSetsGetter has a method to return a StatefulSetInterface. +// A group's client should implement this interface. +type StatefulSetsGetter interface { + StatefulSets(namespace string) StatefulSetInterface +} + +// StatefulSetInterface has methods to work with StatefulSet resources. +type StatefulSetInterface interface { + Create(*v1.StatefulSet) (*v1.StatefulSet, error) + Update(*v1.StatefulSet) (*v1.StatefulSet, error) + UpdateStatus(*v1.StatefulSet) (*v1.StatefulSet, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.StatefulSet, error) + List(opts meta_v1.ListOptions) (*v1.StatefulSetList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) + StatefulSetExpansion +} + +// statefulSets implements StatefulSetInterface +type statefulSets struct { + client rest.Interface + ns string +} + +// newStatefulSets returns a StatefulSets +func newStatefulSets(c *AppsV1Client, namespace string) *statefulSets { + return &statefulSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. +func (c *statefulSets) Get(name string, options meta_v1.GetOptions) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. +func (c *statefulSets) List(opts meta_v1.ListOptions) (result *v1.StatefulSetList, err error) { + result = &v1.StatefulSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested statefulSets. +func (c *statefulSets) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Create(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("statefulsets"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Update(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + Body(statefulSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *statefulSets) UpdateStatus(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + SubResource("status"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. +func (c *statefulSets) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *statefulSets) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched statefulSet. +func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("statefulsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD index 5b620a0bba9b..18dca71634cb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD @@ -16,6 +16,7 @@ go_library( "scale.go", "statefulset.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/apps/v1beta1", deps = [ "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go index 11b523897207..1b50aa199700 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/BUILD index aab785f59a07..0af42bcf8c1c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/BUILD @@ -18,6 +18,7 @@ go_library( "scale.go", "statefulset.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/apps/v1beta2", deps = [ "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go index 6ecc1bad012b..e8d65fa241c4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1beta2 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD index 8b47c1079312..b89498dea7be 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD @@ -14,6 +14,7 @@ go_library( "tokenreview.go", "tokenreview_expansion.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/authentication/v1", deps = [ "//vendor/k8s.io/api/authentication/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go index 54673bfa7382..b6a2a4672853 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD index 29b88edf5acf..ddbe7e984f61 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD @@ -14,6 +14,7 @@ go_library( "tokenreview.go", "tokenreview_expansion.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/authentication/v1beta1", deps = [ "//vendor/k8s.io/api/authentication/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go index 11b523897207..1b50aa199700 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD index 816aaa3d8fc6..f6255f38c631 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD @@ -20,6 +20,7 @@ go_library( "subjectaccessreview.go", "subjectaccessreview_expansion.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/authorization/v1", deps = [ "//vendor/k8s.io/api/authorization/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go index 54673bfa7382..b6a2a4672853 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD index be939575b580..793ec39b50d5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD @@ -20,6 +20,7 @@ go_library( "subjectaccessreview.go", "subjectaccessreview_expansion.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/authorization/v1beta1", deps = [ "//vendor/k8s.io/api/authorization/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go index 11b523897207..1b50aa199700 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/BUILD index c9f72448330d..30e434a68107 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/BUILD @@ -13,6 +13,7 @@ go_library( "generated_expansion.go", "horizontalpodautoscaler.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/autoscaling/v1", deps = [ "//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go index 54673bfa7382..b6a2a4672853 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/BUILD index c33f07f66500..1bb145072b56 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/BUILD @@ -8,6 +8,7 @@ go_library( "generated_expansion.go", "horizontalpodautoscaler.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1", visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go index a357dc2558be..de272bd126dd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v2beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/BUILD index ff1e7ea69c39..2af6de36404d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/BUILD @@ -13,6 +13,7 @@ go_library( "generated_expansion.go", "job.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/batch/v1", deps = [ "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go index 54673bfa7382..b6a2a4672853 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/BUILD index 9d1e3aee058d..58e0a6afabf2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -15,7 +13,7 @@ go_library( "doc.go", "generated_expansion.go", ], - tags = ["automanaged"], + importpath = "k8s.io/client-go/kubernetes/typed/batch/v1beta1", deps = [ "//vendor/k8s.io/api/batch/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go index 11b523897207..1b50aa199700 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/BUILD index 83e35ab4ec70..c698053adf99 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/BUILD @@ -13,6 +13,7 @@ go_library( "doc.go", "generated_expansion.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/batch/v2alpha1", deps = [ "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go index d29bd3f4e1c8..7f1ef91b1607 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v2alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/BUILD index ba36b6761037..1caeb018f844 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/BUILD @@ -14,6 +14,7 @@ go_library( "doc.go", "generated_expansion.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/certificates/v1beta1", deps = [ "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go index 11b523897207..1b50aa199700 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD index 93c02b9ec786..dd86cfecd2be 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD @@ -33,6 +33,7 @@ go_library( "service_expansion.go", "serviceaccount.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/core/v1", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go index 54673bfa7382..b6a2a4672853 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/BUILD index 71e65a8492c1..f62201d87dd7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/BUILD @@ -32,6 +32,7 @@ go_library( "fake_service_expansion.go", "fake_serviceaccount.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/core/v1/fake", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go index c6548330a0d0..c58fac35e4be 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/BUILD new file mode 100644 index 000000000000..307884cc4304 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/BUILD @@ -0,0 +1,39 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "event.go", + "events_client.go", + "generated_expansion.go", + ], + importpath = "k8s.io/client-go/kubernetes/typed/events/v1beta1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/events/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go new file mode 100644 index 000000000000..1b50aa199700 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go new file mode 100644 index 000000000000..3473e99c4204 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/events/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// EventsGetter has a method to return a EventInterface. +// A group's client should implement this interface. +type EventsGetter interface { + Events(namespace string) EventInterface +} + +// EventInterface has methods to work with Event resources. +type EventInterface interface { + Create(*v1beta1.Event) (*v1beta1.Event, error) + Update(*v1beta1.Event) (*v1beta1.Event, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Event, error) + List(opts v1.ListOptions) (*v1beta1.EventList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) + EventExpansion +} + +// events implements EventInterface +type events struct { + client rest.Interface + ns string +} + +// newEvents returns a Events +func newEvents(c *EventsV1beta1Client, namespace string) *events { + return &events{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the event, and returns the corresponding event object, and an error if there is any. +func (c *events) Get(name string, options v1.GetOptions) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Events that match those selectors. +func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error) { + result = &v1beta1.EventList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested events. +func (c *events) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Create(event *v1beta1.Event) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Post(). + Namespace(c.ns). + Resource("events"). + Body(event). + Do(). + Into(result) + return +} + +// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Update(event *v1beta1.Event) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Put(). + Namespace(c.ns). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return +} + +// Delete takes name of the event and deletes it. Returns an error if one occurs. +func (c *events) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *events) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched event. +func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("events"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go new file mode 100644 index 000000000000..05cee7fb26ca --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/events/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type EventsV1beta1Interface interface { + RESTClient() rest.Interface + EventsGetter +} + +// EventsV1beta1Client is used to interact with features provided by the events.k8s.io group. +type EventsV1beta1Client struct { + restClient rest.Interface +} + +func (c *EventsV1beta1Client) Events(namespace string) EventInterface { + return newEvents(c, namespace) +} + +// NewForConfig creates a new EventsV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*EventsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &EventsV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new EventsV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *EventsV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new EventsV1beta1Client for the given RESTClient. +func New(c rest.Interface) *EventsV1beta1Client { + return &EventsV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *EventsV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go new file mode 100644 index 000000000000..82b2fb4a1f72 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type EventExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD index af61bbeffdc9..5ad976867308 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD @@ -19,8 +19,8 @@ go_library( "replicaset.go", "scale.go", "scale_expansion.go", - "thirdpartyresource.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/extensions/v1beta1", deps = [ "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go index 11b523897207..1b50aa199700 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index c1798f6737d9..b4f8886ad257 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -31,7 +31,6 @@ type ExtensionsV1beta1Interface interface { PodSecurityPoliciesGetter ReplicaSetsGetter ScalesGetter - ThirdPartyResourcesGetter } // ExtensionsV1beta1Client is used to interact with features provided by the extensions group. @@ -63,10 +62,6 @@ func (c *ExtensionsV1beta1Client) Scales(namespace string) ScaleInterface { return newScales(c, namespace) } -func (c *ExtensionsV1beta1Client) ThirdPartyResources() ThirdPartyResourceInterface { - return newThirdPartyResources(c) -} - // NewForConfig creates a new ExtensionsV1beta1Client for the given config. func NewForConfig(c *rest.Config) (*ExtensionsV1beta1Client, error) { config := *c diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go index d0a3d64bcff5..e693fe68c1bf 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go @@ -23,5 +23,3 @@ type IngressExpansion interface{} type PodSecurityPolicyExpansion interface{} type ReplicaSetExpansion interface{} - -type ThirdPartyResourceExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go deleted file mode 100644 index 28fb6dcb5a29..000000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ThirdPartyResourcesGetter has a method to return a ThirdPartyResourceInterface. -// A group's client should implement this interface. -type ThirdPartyResourcesGetter interface { - ThirdPartyResources() ThirdPartyResourceInterface -} - -// ThirdPartyResourceInterface has methods to work with ThirdPartyResource resources. -type ThirdPartyResourceInterface interface { - Create(*v1beta1.ThirdPartyResource) (*v1beta1.ThirdPartyResource, error) - Update(*v1beta1.ThirdPartyResource) (*v1beta1.ThirdPartyResource, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.ThirdPartyResource, error) - List(opts v1.ListOptions) (*v1beta1.ThirdPartyResourceList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ThirdPartyResource, err error) - ThirdPartyResourceExpansion -} - -// thirdPartyResources implements ThirdPartyResourceInterface -type thirdPartyResources struct { - client rest.Interface -} - -// newThirdPartyResources returns a ThirdPartyResources -func newThirdPartyResources(c *ExtensionsV1beta1Client) *thirdPartyResources { - return &thirdPartyResources{ - client: c.RESTClient(), - } -} - -// Get takes name of the thirdPartyResource, and returns the corresponding thirdPartyResource object, and an error if there is any. -func (c *thirdPartyResources) Get(name string, options v1.GetOptions) (result *v1beta1.ThirdPartyResource, err error) { - result = &v1beta1.ThirdPartyResource{} - err = c.client.Get(). - Resource("thirdpartyresources"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ThirdPartyResources that match those selectors. -func (c *thirdPartyResources) List(opts v1.ListOptions) (result *v1beta1.ThirdPartyResourceList, err error) { - result = &v1beta1.ThirdPartyResourceList{} - err = c.client.Get(). - Resource("thirdpartyresources"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested thirdPartyResources. -func (c *thirdPartyResources) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("thirdpartyresources"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a thirdPartyResource and creates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. -func (c *thirdPartyResources) Create(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { - result = &v1beta1.ThirdPartyResource{} - err = c.client.Post(). - Resource("thirdpartyresources"). - Body(thirdPartyResource). - Do(). - Into(result) - return -} - -// Update takes the representation of a thirdPartyResource and updates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. -func (c *thirdPartyResources) Update(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { - result = &v1beta1.ThirdPartyResource{} - err = c.client.Put(). - Resource("thirdpartyresources"). - Name(thirdPartyResource.Name). - Body(thirdPartyResource). - Do(). - Into(result) - return -} - -// Delete takes name of the thirdPartyResource and deletes it. Returns an error if one occurs. -func (c *thirdPartyResources) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("thirdpartyresources"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *thirdPartyResources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("thirdpartyresources"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched thirdPartyResource. -func (c *thirdPartyResources) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ThirdPartyResource, err error) { - result = &v1beta1.ThirdPartyResource{} - err = c.client.Patch(pt). - Resource("thirdpartyresources"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/BUILD index 275b2fea5fbe..7a2e3e2b12c3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/BUILD @@ -13,6 +13,7 @@ go_library( "networking_client.go", "networkpolicy.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/networking/v1", deps = [ "//vendor/k8s.io/api/networking/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go index 54673bfa7382..b6a2a4672853 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/BUILD index be2393905eb3..2abeb549d216 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/BUILD @@ -15,6 +15,7 @@ go_library( "poddisruptionbudget.go", "policy_client.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/policy/v1beta1", deps = [ "//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go index 11b523897207..1b50aa199700 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/BUILD index 436c97130896..76e85d4fc631 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/BUILD @@ -16,6 +16,7 @@ go_library( "role.go", "rolebinding.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/rbac/v1", deps = [ "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go index 54673bfa7382..b6a2a4672853 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/BUILD index c61acfed0d8e..11065cf6a4a4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/BUILD @@ -16,6 +16,7 @@ go_library( "role.go", "rolebinding.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1", deps = [ "//vendor/k8s.io/api/rbac/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go index ba8d10d3b666..cdaaf620786b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/BUILD index 4d29cb8cda9f..e750f1e17e4c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/BUILD @@ -16,6 +16,7 @@ go_library( "role.go", "rolebinding.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/rbac/v1beta1", deps = [ "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go index 11b523897207..1b50aa199700 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/BUILD index e18791017104..db9b36f32af2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/BUILD @@ -13,6 +13,7 @@ go_library( "priorityclass.go", "scheduling_client.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1", deps = [ "//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go index ba8d10d3b666..cdaaf620786b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/BUILD index a4304afd3c7d..a1abc87e3e18 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/BUILD @@ -13,6 +13,7 @@ go_library( "podpreset.go", "settings_client.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/settings/v1alpha1", deps = [ "//vendor/k8s.io/api/settings/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go index ba8d10d3b666..cdaaf620786b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD index 6d26ca505e68..0c5507141674 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD @@ -13,6 +13,7 @@ go_library( "storage_client.go", "storageclass.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/storage/v1", deps = [ "//vendor/k8s.io/api/storage/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go index 54673bfa7382..b6a2a4672853 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/BUILD new file mode 100644 index 000000000000..155421963f31 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/BUILD @@ -0,0 +1,39 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "generated_expansion.go", + "storage_client.go", + "volumeattachment.go", + ], + importpath = "k8s.io/client-go/kubernetes/typed/storage/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/storage/v1alpha1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go new file mode 100644 index 000000000000..cdaaf620786b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go new file mode 100644 index 000000000000..afa636a2f09b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go new file mode 100644 index 000000000000..3e8c70bf1e95 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/storage/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type StorageV1alpha1Interface interface { + RESTClient() rest.Interface + VolumeAttachmentsGetter +} + +// StorageV1alpha1Client is used to interact with features provided by the storage.k8s.io group. +type StorageV1alpha1Client struct { + restClient rest.Interface +} + +func (c *StorageV1alpha1Client) VolumeAttachments() VolumeAttachmentInterface { + return newVolumeAttachments(c) +} + +// NewForConfig creates a new StorageV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*StorageV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &StorageV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new StorageV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *StorageV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new StorageV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *StorageV1alpha1Client { + return &StorageV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *StorageV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go new file mode 100644 index 000000000000..08bdfb2596d6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go @@ -0,0 +1,161 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/storage/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. +// A group's client should implement this interface. +type VolumeAttachmentsGetter interface { + VolumeAttachments() VolumeAttachmentInterface +} + +// VolumeAttachmentInterface has methods to work with VolumeAttachment resources. +type VolumeAttachmentInterface interface { + Create(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) + Update(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) + UpdateStatus(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.VolumeAttachment, error) + List(opts v1.ListOptions) (*v1alpha1.VolumeAttachmentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) + VolumeAttachmentExpansion +} + +// volumeAttachments implements VolumeAttachmentInterface +type volumeAttachments struct { + client rest.Interface +} + +// newVolumeAttachments returns a VolumeAttachments +func newVolumeAttachments(c *StorageV1alpha1Client) *volumeAttachments { + return &volumeAttachments{ + client: c.RESTClient(), + } +} + +// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. +func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Get(). + Resource("volumeattachments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. +func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { + result = &v1alpha1.VolumeAttachmentList{} + err = c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeAttachments. +func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Create(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Post(). + Resource("volumeattachments"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Update(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + SubResource("status"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. +func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("volumeattachments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("volumeattachments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched volumeAttachment. +func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Patch(pt). + Resource("volumeattachments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/BUILD b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/BUILD index 9bee7a36ff73..08cb7f6d1ce3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/BUILD @@ -13,6 +13,7 @@ go_library( "storage_client.go", "storageclass.go", ], + importpath = "k8s.io/client-go/kubernetes/typed/storage/v1beta1", deps = [ "//vendor/k8s.io/api/storage/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go index 11b523897207..1b50aa199700 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/BUILD b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/BUILD index cdc0c6e4aefc..df0364232e7d 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/BUILD +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/BUILD @@ -1,21 +1,16 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ "expansion_generated.go", - "externaladmissionhookconfiguration.go", "initializerconfiguration.go", ], + importpath = "k8s.io/client-go/listers/admissionregistration/v1alpha1", + visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], @@ -32,4 +27,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go index 5d9f14708fff..fb3b00987497 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go @@ -18,10 +18,6 @@ limitations under the License. package v1alpha1 -// ExternalAdmissionHookConfigurationListerExpansion allows custom methods to be added to -// ExternalAdmissionHookConfigurationLister. -type ExternalAdmissionHookConfigurationListerExpansion interface{} - // InitializerConfigurationListerExpansion allows custom methods to be added to // InitializerConfigurationLister. type InitializerConfigurationListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/externaladmissionhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/externaladmissionhookconfiguration.go deleted file mode 100644 index 9a536c99ea6d..000000000000 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/externaladmissionhookconfiguration.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was automatically generated by lister-gen - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ExternalAdmissionHookConfigurationLister helps list ExternalAdmissionHookConfigurations. -type ExternalAdmissionHookConfigurationLister interface { - // List lists all ExternalAdmissionHookConfigurations in the indexer. - List(selector labels.Selector) (ret []*v1alpha1.ExternalAdmissionHookConfiguration, err error) - // Get retrieves the ExternalAdmissionHookConfiguration from the index for a given name. - Get(name string) (*v1alpha1.ExternalAdmissionHookConfiguration, error) - ExternalAdmissionHookConfigurationListerExpansion -} - -// externalAdmissionHookConfigurationLister implements the ExternalAdmissionHookConfigurationLister interface. -type externalAdmissionHookConfigurationLister struct { - indexer cache.Indexer -} - -// NewExternalAdmissionHookConfigurationLister returns a new ExternalAdmissionHookConfigurationLister. -func NewExternalAdmissionHookConfigurationLister(indexer cache.Indexer) ExternalAdmissionHookConfigurationLister { - return &externalAdmissionHookConfigurationLister{indexer: indexer} -} - -// List lists all ExternalAdmissionHookConfigurations in the indexer. -func (s *externalAdmissionHookConfigurationLister) List(selector labels.Selector) (ret []*v1alpha1.ExternalAdmissionHookConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ExternalAdmissionHookConfiguration)) - }) - return ret, err -} - -// Get retrieves the ExternalAdmissionHookConfiguration from the index for a given name. -func (s *externalAdmissionHookConfigurationLister) Get(name string) (*v1alpha1.ExternalAdmissionHookConfiguration, error) { - key := &v1alpha1.ExternalAdmissionHookConfiguration{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("externaladmissionhookconfiguration"), name) - } - return obj.(*v1alpha1.ExternalAdmissionHookConfiguration), nil -} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go index a19d2080750f..60b004ef9f72 100644 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go @@ -21,7 +21,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -55,8 +54,7 @@ func (s *initializerConfigurationLister) List(selector labels.Selector) (ret []* // Get retrieves the InitializerConfiguration from the index for a given name. func (s *initializerConfigurationLister) Get(name string) (*v1alpha1.InitializerConfiguration, error) { - key := &v1alpha1.InitializerConfiguration{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/BUILD b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/BUILD new file mode 100644 index 000000000000..d5128618f0ab --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/BUILD @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "expansion_generated.go", + "mutatingwebhookconfiguration.go", + "validatingwebhookconfiguration.go", + ], + importpath = "k8s.io/client-go/listers/admissionregistration/v1beta1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go new file mode 100644 index 000000000000..c9bf0fa5da61 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// MutatingWebhookConfigurationListerExpansion allows custom methods to be added to +// MutatingWebhookConfigurationLister. +type MutatingWebhookConfigurationListerExpansion interface{} + +// ValidatingWebhookConfigurationListerExpansion allows custom methods to be added to +// ValidatingWebhookConfigurationLister. +type ValidatingWebhookConfigurationListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go new file mode 100644 index 000000000000..753dd18565c0 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// MutatingWebhookConfigurationLister helps list MutatingWebhookConfigurations. +type MutatingWebhookConfigurationLister interface { + // List lists all MutatingWebhookConfigurations in the indexer. + List(selector labels.Selector) (ret []*v1beta1.MutatingWebhookConfiguration, err error) + // Get retrieves the MutatingWebhookConfiguration from the index for a given name. + Get(name string) (*v1beta1.MutatingWebhookConfiguration, error) + MutatingWebhookConfigurationListerExpansion +} + +// mutatingWebhookConfigurationLister implements the MutatingWebhookConfigurationLister interface. +type mutatingWebhookConfigurationLister struct { + indexer cache.Indexer +} + +// NewMutatingWebhookConfigurationLister returns a new MutatingWebhookConfigurationLister. +func NewMutatingWebhookConfigurationLister(indexer cache.Indexer) MutatingWebhookConfigurationLister { + return &mutatingWebhookConfigurationLister{indexer: indexer} +} + +// List lists all MutatingWebhookConfigurations in the indexer. +func (s *mutatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1beta1.MutatingWebhookConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.MutatingWebhookConfiguration)) + }) + return ret, err +} + +// Get retrieves the MutatingWebhookConfiguration from the index for a given name. +func (s *mutatingWebhookConfigurationLister) Get(name string) (*v1beta1.MutatingWebhookConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("mutatingwebhookconfiguration"), name) + } + return obj.(*v1beta1.MutatingWebhookConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go new file mode 100644 index 000000000000..6cb6067a167d --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ValidatingWebhookConfigurationLister helps list ValidatingWebhookConfigurations. +type ValidatingWebhookConfigurationLister interface { + // List lists all ValidatingWebhookConfigurations in the indexer. + List(selector labels.Selector) (ret []*v1beta1.ValidatingWebhookConfiguration, err error) + // Get retrieves the ValidatingWebhookConfiguration from the index for a given name. + Get(name string) (*v1beta1.ValidatingWebhookConfiguration, error) + ValidatingWebhookConfigurationListerExpansion +} + +// validatingWebhookConfigurationLister implements the ValidatingWebhookConfigurationLister interface. +type validatingWebhookConfigurationLister struct { + indexer cache.Indexer +} + +// NewValidatingWebhookConfigurationLister returns a new ValidatingWebhookConfigurationLister. +func NewValidatingWebhookConfigurationLister(indexer cache.Indexer) ValidatingWebhookConfigurationLister { + return &validatingWebhookConfigurationLister{indexer: indexer} +} + +// List lists all ValidatingWebhookConfigurations in the indexer. +func (s *validatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1beta1.ValidatingWebhookConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ValidatingWebhookConfiguration)) + }) + return ret, err +} + +// Get retrieves the ValidatingWebhookConfiguration from the index for a given name. +func (s *validatingWebhookConfigurationLister) Get(name string) (*v1beta1.ValidatingWebhookConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("validatingwebhookconfiguration"), name) + } + return obj.(*v1beta1.ValidatingWebhookConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/BUILD b/vendor/k8s.io/client-go/listers/apps/v1/BUILD new file mode 100644 index 000000000000..fb3cfbbb6af7 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/BUILD @@ -0,0 +1,41 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "controllerrevision.go", + "daemonset.go", + "daemonset_expansion.go", + "deployment.go", + "deployment_expansion.go", + "expansion_generated.go", + "replicaset.go", + "replicaset_expansion.go", + "statefulset.go", + "statefulset_expansion.go", + ], + importpath = "k8s.io/client-go/listers/apps/v1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/apps/v1:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go new file mode 100644 index 000000000000..c05d14c25431 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ControllerRevisionLister helps list ControllerRevisions. +type ControllerRevisionLister interface { + // List lists all ControllerRevisions in the indexer. + List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) + // ControllerRevisions returns an object that can list and get ControllerRevisions. + ControllerRevisions(namespace string) ControllerRevisionNamespaceLister + ControllerRevisionListerExpansion +} + +// controllerRevisionLister implements the ControllerRevisionLister interface. +type controllerRevisionLister struct { + indexer cache.Indexer +} + +// NewControllerRevisionLister returns a new ControllerRevisionLister. +func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { + return &controllerRevisionLister{indexer: indexer} +} + +// List lists all ControllerRevisions in the indexer. +func (s *controllerRevisionLister) List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ControllerRevision)) + }) + return ret, err +} + +// ControllerRevisions returns an object that can list and get ControllerRevisions. +func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { + return controllerRevisionNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ControllerRevisionNamespaceLister helps list and get ControllerRevisions. +type ControllerRevisionNamespaceLister interface { + // List lists all ControllerRevisions in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) + // Get retrieves the ControllerRevision from the indexer for a given namespace and name. + Get(name string) (*v1.ControllerRevision, error) + ControllerRevisionNamespaceListerExpansion +} + +// controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister +// interface. +type controllerRevisionNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ControllerRevisions in the indexer for a given namespace. +func (s controllerRevisionNamespaceLister) List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ControllerRevision)) + }) + return ret, err +} + +// Get retrieves the ControllerRevision from the indexer for a given namespace and name. +func (s controllerRevisionNamespaceLister) Get(name string) (*v1.ControllerRevision, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("controllerrevision"), name) + } + return obj.(*v1.ControllerRevision), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go b/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go new file mode 100644 index 000000000000..307f8bc7c6c2 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DaemonSetLister helps list DaemonSets. +type DaemonSetLister interface { + // List lists all DaemonSets in the indexer. + List(selector labels.Selector) (ret []*v1.DaemonSet, err error) + // DaemonSets returns an object that can list and get DaemonSets. + DaemonSets(namespace string) DaemonSetNamespaceLister + DaemonSetListerExpansion +} + +// daemonSetLister implements the DaemonSetLister interface. +type daemonSetLister struct { + indexer cache.Indexer +} + +// NewDaemonSetLister returns a new DaemonSetLister. +func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { + return &daemonSetLister{indexer: indexer} +} + +// List lists all DaemonSets in the indexer. +func (s *daemonSetLister) List(selector labels.Selector) (ret []*v1.DaemonSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.DaemonSet)) + }) + return ret, err +} + +// DaemonSets returns an object that can list and get DaemonSets. +func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { + return daemonSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DaemonSetNamespaceLister helps list and get DaemonSets. +type DaemonSetNamespaceLister interface { + // List lists all DaemonSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.DaemonSet, err error) + // Get retrieves the DaemonSet from the indexer for a given namespace and name. + Get(name string) (*v1.DaemonSet, error) + DaemonSetNamespaceListerExpansion +} + +// daemonSetNamespaceLister implements the DaemonSetNamespaceLister +// interface. +type daemonSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all DaemonSets in the indexer for a given namespace. +func (s daemonSetNamespaceLister) List(selector labels.Selector) (ret []*v1.DaemonSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.DaemonSet)) + }) + return ret, err +} + +// Get retrieves the DaemonSet from the indexer for a given namespace and name. +func (s daemonSetNamespaceLister) Get(name string) (*v1.DaemonSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("daemonset"), name) + } + return obj.(*v1.DaemonSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/daemonset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1/daemonset_expansion.go new file mode 100644 index 000000000000..83435561a14a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/daemonset_expansion.go @@ -0,0 +1,113 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// DaemonSetListerExpansion allows custom methods to be added to +// DaemonSetLister. +type DaemonSetListerExpansion interface { + GetPodDaemonSets(pod *v1.Pod) ([]*apps.DaemonSet, error) + GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*apps.DaemonSet, error) +} + +// DaemonSetNamespaceListerExpansion allows custom methods to be added to +// DaemonSetNamespaceLister. +type DaemonSetNamespaceListerExpansion interface{} + +// GetPodDaemonSets returns a list of DaemonSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching DaemonSets are found. +func (s *daemonSetLister) GetPodDaemonSets(pod *v1.Pod) ([]*apps.DaemonSet, error) { + var selector labels.Selector + var daemonSet *apps.DaemonSet + + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no daemon sets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.DaemonSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var daemonSets []*apps.DaemonSet + for i := range list { + daemonSet = list[i] + if daemonSet.Namespace != pod.Namespace { + continue + } + selector, err = metav1.LabelSelectorAsSelector(daemonSet.Spec.Selector) + if err != nil { + // this should not happen if the DaemonSet passed validation + return nil, err + } + + // If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + daemonSets = append(daemonSets, daemonSet) + } + + if len(daemonSets) == 0 { + return nil, fmt.Errorf("could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return daemonSets, nil +} + +// GetHistoryDaemonSets returns a list of DaemonSets that potentially +// match a ControllerRevision. Only the one specified in the ControllerRevision's ControllerRef +// will actually manage it. +// Returns an error only if no matching DaemonSets are found. +func (s *daemonSetLister) GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*apps.DaemonSet, error) { + if len(history.Labels) == 0 { + return nil, fmt.Errorf("no DaemonSet found for ControllerRevision %s because it has no labels", history.Name) + } + + list, err := s.DaemonSets(history.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var daemonSets []*apps.DaemonSet + for _, ds := range list { + selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + // If a DaemonSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(history.Labels)) { + continue + } + daemonSets = append(daemonSets, ds) + } + + if len(daemonSets) == 0 { + return nil, fmt.Errorf("could not find DaemonSets for ControllerRevision %s in namespace %s with labels: %v", history.Name, history.Namespace, history.Labels) + } + + return daemonSets, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1/deployment.go new file mode 100644 index 000000000000..36af90094787 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/deployment.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DeploymentLister helps list Deployments. +type DeploymentLister interface { + // List lists all Deployments in the indexer. + List(selector labels.Selector) (ret []*v1.Deployment, err error) + // Deployments returns an object that can list and get Deployments. + Deployments(namespace string) DeploymentNamespaceLister + DeploymentListerExpansion +} + +// deploymentLister implements the DeploymentLister interface. +type deploymentLister struct { + indexer cache.Indexer +} + +// NewDeploymentLister returns a new DeploymentLister. +func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { + return &deploymentLister{indexer: indexer} +} + +// List lists all Deployments in the indexer. +func (s *deploymentLister) List(selector labels.Selector) (ret []*v1.Deployment, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Deployment)) + }) + return ret, err +} + +// Deployments returns an object that can list and get Deployments. +func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { + return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DeploymentNamespaceLister helps list and get Deployments. +type DeploymentNamespaceLister interface { + // List lists all Deployments in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Deployment, err error) + // Get retrieves the Deployment from the indexer for a given namespace and name. + Get(name string) (*v1.Deployment, error) + DeploymentNamespaceListerExpansion +} + +// deploymentNamespaceLister implements the DeploymentNamespaceLister +// interface. +type deploymentNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Deployments in the indexer for a given namespace. +func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1.Deployment, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Deployment)) + }) + return ret, err +} + +// Get retrieves the Deployment from the indexer for a given namespace and name. +func (s deploymentNamespaceLister) Get(name string) (*v1.Deployment, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("deployment"), name) + } + return obj.(*v1.Deployment), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/deployment_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1/deployment_expansion.go new file mode 100644 index 000000000000..7802eca5a5e1 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/deployment_expansion.go @@ -0,0 +1,70 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// DeploymentListerExpansion allows custom methods to be added to +// DeploymentLister. +type DeploymentListerExpansion interface { + GetDeploymentsForReplicaSet(rs *apps.ReplicaSet) ([]*apps.Deployment, error) +} + +// DeploymentNamespaceListerExpansion allows custom methods to be added to +// DeploymentNamespaceLister. +type DeploymentNamespaceListerExpansion interface{} + +// GetDeploymentsForReplicaSet returns a list of Deployments that potentially +// match a ReplicaSet. Only the one specified in the ReplicaSet's ControllerRef +// will actually manage it. +// Returns an error only if no matching Deployments are found. +func (s *deploymentLister) GetDeploymentsForReplicaSet(rs *apps.ReplicaSet) ([]*apps.Deployment, error) { + if len(rs.Labels) == 0 { + return nil, fmt.Errorf("no deployments found for ReplicaSet %v because it has no labels", rs.Name) + } + + // TODO: MODIFY THIS METHOD so that it checks for the podTemplateSpecHash label + dList, err := s.Deployments(rs.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var deployments []*apps.Deployment + for _, d := range dList { + selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + // If a deployment with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(rs.Labels)) { + continue + } + deployments = append(deployments, d) + } + + if len(deployments) == 0 { + return nil, fmt.Errorf("could not find deployments set for ReplicaSet %s in namespace %s with labels: %v", rs.Name, rs.Namespace, rs.Labels) + } + + return deployments, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/apps/v1/expansion_generated.go new file mode 100644 index 000000000000..48917c2c0514 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +// ControllerRevisionListerExpansion allows custom methods to be added to +// ControllerRevisionLister. +type ControllerRevisionListerExpansion interface{} + +// ControllerRevisionNamespaceListerExpansion allows custom methods to be added to +// ControllerRevisionNamespaceLister. +type ControllerRevisionNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go b/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go new file mode 100644 index 000000000000..7e316d6b4dd8 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ReplicaSetLister helps list ReplicaSets. +type ReplicaSetLister interface { + // List lists all ReplicaSets in the indexer. + List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) + // ReplicaSets returns an object that can list and get ReplicaSets. + ReplicaSets(namespace string) ReplicaSetNamespaceLister + ReplicaSetListerExpansion +} + +// replicaSetLister implements the ReplicaSetLister interface. +type replicaSetLister struct { + indexer cache.Indexer +} + +// NewReplicaSetLister returns a new ReplicaSetLister. +func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { + return &replicaSetLister{indexer: indexer} +} + +// List lists all ReplicaSets in the indexer. +func (s *replicaSetLister) List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ReplicaSet)) + }) + return ret, err +} + +// ReplicaSets returns an object that can list and get ReplicaSets. +func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { + return replicaSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ReplicaSetNamespaceLister helps list and get ReplicaSets. +type ReplicaSetNamespaceLister interface { + // List lists all ReplicaSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) + // Get retrieves the ReplicaSet from the indexer for a given namespace and name. + Get(name string) (*v1.ReplicaSet, error) + ReplicaSetNamespaceListerExpansion +} + +// replicaSetNamespaceLister implements the ReplicaSetNamespaceLister +// interface. +type replicaSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ReplicaSets in the indexer for a given namespace. +func (s replicaSetNamespaceLister) List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ReplicaSet)) + }) + return ret, err +} + +// Get retrieves the ReplicaSet from the indexer for a given namespace and name. +func (s replicaSetNamespaceLister) Get(name string) (*v1.ReplicaSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("replicaset"), name) + } + return obj.(*v1.ReplicaSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/replicaset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1/replicaset_expansion.go new file mode 100644 index 000000000000..675e615aecc9 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/replicaset_expansion.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// ReplicaSetListerExpansion allows custom methods to be added to +// ReplicaSetLister. +type ReplicaSetListerExpansion interface { + GetPodReplicaSets(pod *v1.Pod) ([]*apps.ReplicaSet, error) +} + +// ReplicaSetNamespaceListerExpansion allows custom methods to be added to +// ReplicaSetNamespaceLister. +type ReplicaSetNamespaceListerExpansion interface{} + +// GetPodReplicaSets returns a list of ReplicaSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching ReplicaSets are found. +func (s *replicaSetLister) GetPodReplicaSets(pod *v1.Pod) ([]*apps.ReplicaSet, error) { + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no ReplicaSets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.ReplicaSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var rss []*apps.ReplicaSet + for _, rs := range list { + if rs.Namespace != pod.Namespace { + continue + } + selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid selector: %v", err) + } + + // If a ReplicaSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + rss = append(rss, rs) + } + + if len(rss) == 0 { + return nil, fmt.Errorf("could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return rss, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go new file mode 100644 index 000000000000..fe584038e250 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// StatefulSetLister helps list StatefulSets. +type StatefulSetLister interface { + // List lists all StatefulSets in the indexer. + List(selector labels.Selector) (ret []*v1.StatefulSet, err error) + // StatefulSets returns an object that can list and get StatefulSets. + StatefulSets(namespace string) StatefulSetNamespaceLister + StatefulSetListerExpansion +} + +// statefulSetLister implements the StatefulSetLister interface. +type statefulSetLister struct { + indexer cache.Indexer +} + +// NewStatefulSetLister returns a new StatefulSetLister. +func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { + return &statefulSetLister{indexer: indexer} +} + +// List lists all StatefulSets in the indexer. +func (s *statefulSetLister) List(selector labels.Selector) (ret []*v1.StatefulSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.StatefulSet)) + }) + return ret, err +} + +// StatefulSets returns an object that can list and get StatefulSets. +func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { + return statefulSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// StatefulSetNamespaceLister helps list and get StatefulSets. +type StatefulSetNamespaceLister interface { + // List lists all StatefulSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.StatefulSet, err error) + // Get retrieves the StatefulSet from the indexer for a given namespace and name. + Get(name string) (*v1.StatefulSet, error) + StatefulSetNamespaceListerExpansion +} + +// statefulSetNamespaceLister implements the StatefulSetNamespaceLister +// interface. +type statefulSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all StatefulSets in the indexer for a given namespace. +func (s statefulSetNamespaceLister) List(selector labels.Selector) (ret []*v1.StatefulSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.StatefulSet)) + }) + return ret, err +} + +// Get retrieves the StatefulSet from the indexer for a given namespace and name. +func (s statefulSetNamespaceLister) Get(name string) (*v1.StatefulSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("statefulset"), name) + } + return obj.(*v1.StatefulSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/statefulset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1/statefulset_expansion.go new file mode 100644 index 000000000000..b4912976b69a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/statefulset_expansion.go @@ -0,0 +1,77 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// StatefulSetListerExpansion allows custom methods to be added to +// StatefulSetLister. +type StatefulSetListerExpansion interface { + GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) +} + +// StatefulSetNamespaceListerExpansion allows custom methods to be added to +// StatefulSetNamespaceLister. +type StatefulSetNamespaceListerExpansion interface{} + +// GetPodStatefulSets returns a list of StatefulSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching StatefulSets are found. +func (s *statefulSetLister) GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) { + var selector labels.Selector + var ps *apps.StatefulSet + + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no StatefulSets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.StatefulSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var psList []*apps.StatefulSet + for i := range list { + ps = list[i] + if ps.Namespace != pod.Namespace { + continue + } + selector, err = metav1.LabelSelectorAsSelector(ps.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid selector: %v", err) + } + + // If a StatefulSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + psList = append(psList, ps) + } + + if len(psList) == 0 { + return nil, fmt.Errorf("could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return psList, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/BUILD b/vendor/k8s.io/client-go/listers/apps/v1beta1/BUILD index 586e116fd35b..f6ddce5303ee 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/BUILD @@ -15,6 +15,7 @@ go_library( "statefulset.go", "statefulset_expansion.go", ], + importpath = "k8s.io/client-go/listers/apps/v1beta1", deps = [ "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/BUILD b/vendor/k8s.io/client-go/listers/apps/v1beta2/BUILD index f81e6e17418d..50aefcab1812 100644 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/BUILD +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/BUILD @@ -20,6 +20,7 @@ go_library( "statefulset.go", "statefulset_expansion.go", ], + importpath = "k8s.io/client-go/listers/apps/v1beta2", deps = [ "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v1/BUILD b/vendor/k8s.io/client-go/listers/autoscaling/v1/BUILD index 964cc6662e28..8c5bfd95dc6b 100644 --- a/vendor/k8s.io/client-go/listers/autoscaling/v1/BUILD +++ b/vendor/k8s.io/client-go/listers/autoscaling/v1/BUILD @@ -6,6 +6,7 @@ go_library( "expansion_generated.go", "horizontalpodautoscaler.go", ], + importpath = "k8s.io/client-go/listers/autoscaling/v1", visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/api/autoscaling/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/BUILD b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/BUILD index dc90768494ac..78aca5847bcb 100644 --- a/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/BUILD +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/BUILD @@ -6,6 +6,7 @@ go_library( "expansion_generated.go", "horizontalpodautoscaler.go", ], + importpath = "k8s.io/client-go/listers/autoscaling/v2beta1", visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", diff --git a/vendor/k8s.io/client-go/listers/batch/v1/BUILD b/vendor/k8s.io/client-go/listers/batch/v1/BUILD index 8a188ac09229..c695b985687c 100644 --- a/vendor/k8s.io/client-go/listers/batch/v1/BUILD +++ b/vendor/k8s.io/client-go/listers/batch/v1/BUILD @@ -12,6 +12,7 @@ go_library( "job.go", "job_expansion.go", ], + importpath = "k8s.io/client-go/listers/batch/v1", deps = [ "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/listers/batch/v1beta1/BUILD b/vendor/k8s.io/client-go/listers/batch/v1beta1/BUILD index 8d2a39f3fc36..0ae1a2eda9b8 100644 --- a/vendor/k8s.io/client-go/listers/batch/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/listers/batch/v1beta1/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -13,7 +11,7 @@ go_library( "cronjob.go", "expansion_generated.go", ], - tags = ["automanaged"], + importpath = "k8s.io/client-go/listers/batch/v1beta1", deps = [ "//vendor/k8s.io/api/batch/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/vendor/k8s.io/client-go/listers/batch/v2alpha1/BUILD b/vendor/k8s.io/client-go/listers/batch/v2alpha1/BUILD index d4b630172c5b..92220512e16e 100644 --- a/vendor/k8s.io/client-go/listers/batch/v2alpha1/BUILD +++ b/vendor/k8s.io/client-go/listers/batch/v2alpha1/BUILD @@ -11,6 +11,7 @@ go_library( "cronjob.go", "expansion_generated.go", ], + importpath = "k8s.io/client-go/listers/batch/v2alpha1", deps = [ "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/vendor/k8s.io/client-go/listers/certificates/v1beta1/BUILD b/vendor/k8s.io/client-go/listers/certificates/v1beta1/BUILD index a3a11915cc5c..b7ca4270c198 100644 --- a/vendor/k8s.io/client-go/listers/certificates/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/listers/certificates/v1beta1/BUILD @@ -11,10 +11,10 @@ go_library( "certificatesigningrequest.go", "expansion_generated.go", ], + importpath = "k8s.io/client-go/listers/certificates/v1beta1", deps = [ "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], diff --git a/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go index 62af289b2401..425dc6b4d942 100644 --- a/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go @@ -21,7 +21,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/certificates/v1beta1" "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -55,8 +54,7 @@ func (s *certificateSigningRequestLister) List(selector labels.Selector) (ret [] // Get retrieves the CertificateSigningRequest from the index for a given name. func (s *certificateSigningRequestLister) Get(name string) (*v1beta1.CertificateSigningRequest, error) { - key := &v1beta1.CertificateSigningRequest{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/listers/core/v1/BUILD b/vendor/k8s.io/client-go/listers/core/v1/BUILD index beb7e6635cc2..313cdc299c6f 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/BUILD +++ b/vendor/k8s.io/client-go/listers/core/v1/BUILD @@ -29,10 +29,10 @@ go_library( "service_expansion.go", "serviceaccount.go", ], + importpath = "k8s.io/client-go/listers/core/v1", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], diff --git a/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go b/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go index a612a544cc93..6ba67d0bd562 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go @@ -21,7 +21,6 @@ package v1 import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -55,8 +54,7 @@ func (s *componentStatusLister) List(selector labels.Selector) (ret []*v1.Compon // Get retrieves the ComponentStatus from the index for a given name. func (s *componentStatusLister) Get(name string) (*v1.ComponentStatus, error) { - key := &v1.ComponentStatus{ObjectMeta: meta_v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/listers/core/v1/namespace.go b/vendor/k8s.io/client-go/listers/core/v1/namespace.go index 9b184f71c3bb..21be6878a384 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/listers/core/v1/namespace.go @@ -21,7 +21,6 @@ package v1 import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -55,8 +54,7 @@ func (s *namespaceLister) List(selector labels.Selector) (ret []*v1.Namespace, e // Get retrieves the Namespace from the index for a given name. func (s *namespaceLister) Get(name string) (*v1.Namespace, error) { - key := &v1.Namespace{ObjectMeta: meta_v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/listers/core/v1/node.go b/vendor/k8s.io/client-go/listers/core/v1/node.go index 24a01fbdda8f..d43a682c90b5 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/node.go +++ b/vendor/k8s.io/client-go/listers/core/v1/node.go @@ -21,7 +21,6 @@ package v1 import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -55,8 +54,7 @@ func (s *nodeLister) List(selector labels.Selector) (ret []*v1.Node, err error) // Get retrieves the Node from the index for a given name. func (s *nodeLister) Get(name string) (*v1.Node, error) { - key := &v1.Node{ObjectMeta: meta_v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go index ea2f79dd7759..593ba14ed185 100644 --- a/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go @@ -21,7 +21,6 @@ package v1 import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -55,8 +54,7 @@ func (s *persistentVolumeLister) List(selector labels.Selector) (ret []*v1.Persi // Get retrieves the PersistentVolume from the index for a given name. func (s *persistentVolumeLister) Get(name string) (*v1.PersistentVolume, error) { - key := &v1.PersistentVolume{ObjectMeta: meta_v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/listers/events/v1beta1/BUILD b/vendor/k8s.io/client-go/listers/events/v1beta1/BUILD new file mode 100644 index 000000000000..f4ea35d4af2e --- /dev/null +++ b/vendor/k8s.io/client-go/listers/events/v1beta1/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "event.go", + "expansion_generated.go", + ], + importpath = "k8s.io/client-go/listers/events/v1beta1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/events/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/listers/events/v1beta1/event.go b/vendor/k8s.io/client-go/listers/events/v1beta1/event.go new file mode 100644 index 000000000000..bca3c452adfd --- /dev/null +++ b/vendor/k8s.io/client-go/listers/events/v1beta1/event.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/events/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// EventLister helps list Events. +type EventLister interface { + // List lists all Events in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Event, err error) + // Events returns an object that can list and get Events. + Events(namespace string) EventNamespaceLister + EventListerExpansion +} + +// eventLister implements the EventLister interface. +type eventLister struct { + indexer cache.Indexer +} + +// NewEventLister returns a new EventLister. +func NewEventLister(indexer cache.Indexer) EventLister { + return &eventLister{indexer: indexer} +} + +// List lists all Events in the indexer. +func (s *eventLister) List(selector labels.Selector) (ret []*v1beta1.Event, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Event)) + }) + return ret, err +} + +// Events returns an object that can list and get Events. +func (s *eventLister) Events(namespace string) EventNamespaceLister { + return eventNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EventNamespaceLister helps list and get Events. +type EventNamespaceLister interface { + // List lists all Events in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Event, err error) + // Get retrieves the Event from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Event, error) + EventNamespaceListerExpansion +} + +// eventNamespaceLister implements the EventNamespaceLister +// interface. +type eventNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Events in the indexer for a given namespace. +func (s eventNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Event, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Event)) + }) + return ret, err +} + +// Get retrieves the Event from the indexer for a given namespace and name. +func (s eventNamespaceLister) Get(name string) (*v1beta1.Event, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("event"), name) + } + return obj.(*v1beta1.Event), nil +} diff --git a/vendor/k8s.io/client-go/listers/events/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/events/v1beta1/expansion_generated.go new file mode 100644 index 000000000000..7e8fb62b1bc0 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/events/v1beta1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// EventListerExpansion allows custom methods to be added to +// EventLister. +type EventListerExpansion interface{} + +// EventNamespaceListerExpansion allows custom methods to be added to +// EventNamespaceLister. +type EventNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/BUILD b/vendor/k8s.io/client-go/listers/extensions/v1beta1/BUILD index 9dec1ff0711f..9bab4a64f830 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/BUILD @@ -19,8 +19,8 @@ go_library( "replicaset.go", "replicaset_expansion.go", "scale.go", - "thirdpartyresource.go", ], + importpath = "k8s.io/client-go/listers/extensions/v1beta1", deps = [ "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -35,6 +35,7 @@ go_library( go_test( name = "go_default_test", srcs = ["daemonset_expansion_test.go"], + importpath = "k8s.io/client-go/listers/extensions/v1beta1", library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go index b3c29269b263..060c7a35aec8 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go @@ -37,7 +37,3 @@ type ScaleListerExpansion interface{} // ScaleNamespaceListerExpansion allows custom methods to be added to // ScaleNamespaceLister. type ScaleNamespaceListerExpansion interface{} - -// ThirdPartyResourceListerExpansion allows custom methods to be added to -// ThirdPartyResourceLister. -type ThirdPartyResourceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go index da47dbb52633..3189ff7c9dae 100644 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go @@ -21,7 +21,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -55,8 +54,7 @@ func (s *podSecurityPolicyLister) List(selector labels.Selector) (ret []*v1beta1 // Get retrieves the PodSecurityPolicy from the index for a given name. func (s *podSecurityPolicyLister) Get(name string) (*v1beta1.PodSecurityPolicy, error) { - key := &v1beta1.PodSecurityPolicy{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/thirdpartyresource.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/thirdpartyresource.go deleted file mode 100644 index 3cd699a86371..000000000000 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/thirdpartyresource.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was automatically generated by lister-gen - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ThirdPartyResourceLister helps list ThirdPartyResources. -type ThirdPartyResourceLister interface { - // List lists all ThirdPartyResources in the indexer. - List(selector labels.Selector) (ret []*v1beta1.ThirdPartyResource, err error) - // Get retrieves the ThirdPartyResource from the index for a given name. - Get(name string) (*v1beta1.ThirdPartyResource, error) - ThirdPartyResourceListerExpansion -} - -// thirdPartyResourceLister implements the ThirdPartyResourceLister interface. -type thirdPartyResourceLister struct { - indexer cache.Indexer -} - -// NewThirdPartyResourceLister returns a new ThirdPartyResourceLister. -func NewThirdPartyResourceLister(indexer cache.Indexer) ThirdPartyResourceLister { - return &thirdPartyResourceLister{indexer: indexer} -} - -// List lists all ThirdPartyResources in the indexer. -func (s *thirdPartyResourceLister) List(selector labels.Selector) (ret []*v1beta1.ThirdPartyResource, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ThirdPartyResource)) - }) - return ret, err -} - -// Get retrieves the ThirdPartyResource from the index for a given name. -func (s *thirdPartyResourceLister) Get(name string) (*v1beta1.ThirdPartyResource, error) { - key := &v1beta1.ThirdPartyResource{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("thirdpartyresource"), name) - } - return obj.(*v1beta1.ThirdPartyResource), nil -} diff --git a/vendor/k8s.io/client-go/listers/networking/v1/BUILD b/vendor/k8s.io/client-go/listers/networking/v1/BUILD index 4b6e21832392..64dc638bacd3 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1/BUILD +++ b/vendor/k8s.io/client-go/listers/networking/v1/BUILD @@ -11,6 +11,7 @@ go_library( "expansion_generated.go", "networkpolicy.go", ], + importpath = "k8s.io/client-go/listers/networking/v1", deps = [ "//vendor/k8s.io/api/networking/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/BUILD b/vendor/k8s.io/client-go/listers/policy/v1beta1/BUILD index 8bcde940c237..c678177ae40a 100644 --- a/vendor/k8s.io/client-go/listers/policy/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/BUILD @@ -13,6 +13,7 @@ go_library( "poddisruptionbudget.go", "poddisruptionbudget_expansion.go", ], + importpath = "k8s.io/client-go/listers/policy/v1beta1", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/BUILD b/vendor/k8s.io/client-go/listers/rbac/v1/BUILD index d3c5e2e38e8e..9facdc04dfde 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1/BUILD +++ b/vendor/k8s.io/client-go/listers/rbac/v1/BUILD @@ -14,10 +14,10 @@ go_library( "role.go", "rolebinding.go", ], + importpath = "k8s.io/client-go/listers/rbac/v1", deps = [ "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go index f95d063bef31..5dc9a225e709 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go @@ -21,7 +21,6 @@ package v1 import ( v1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -55,8 +54,7 @@ func (s *clusterRoleLister) List(selector labels.Selector) (ret []*v1.ClusterRol // Get retrieves the ClusterRole from the index for a given name. func (s *clusterRoleLister) Get(name string) (*v1.ClusterRole, error) { - key := &v1.ClusterRole{ObjectMeta: meta_v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go index cce8a0f48706..bb3186a067b6 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go @@ -21,7 +21,6 @@ package v1 import ( v1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -55,8 +54,7 @@ func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*v1.Clu // Get retrieves the ClusterRoleBinding from the index for a given name. func (s *clusterRoleBindingLister) Get(name string) (*v1.ClusterRoleBinding, error) { - key := &v1.ClusterRoleBinding{ObjectMeta: meta_v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/BUILD b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/BUILD index c497f2966b1e..917c535fc54d 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/BUILD +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/BUILD @@ -14,10 +14,10 @@ go_library( "role.go", "rolebinding.go", ], + importpath = "k8s.io/client-go/listers/rbac/v1alpha1", deps = [ "//vendor/k8s.io/api/rbac/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go index 21adfb3512dd..9e20a6d16299 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go @@ -21,7 +21,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/rbac/v1alpha1" "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -55,8 +54,7 @@ func (s *clusterRoleLister) List(selector labels.Selector) (ret []*v1alpha1.Clus // Get retrieves the ClusterRole from the index for a given name. func (s *clusterRoleLister) Get(name string) (*v1alpha1.ClusterRole, error) { - key := &v1alpha1.ClusterRole{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go index 5c1b93a77c32..155666aba025 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go @@ -21,7 +21,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/rbac/v1alpha1" "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -55,8 +54,7 @@ func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*v1alph // Get retrieves the ClusterRoleBinding from the index for a given name. func (s *clusterRoleBindingLister) Get(name string) (*v1alpha1.ClusterRoleBinding, error) { - key := &v1alpha1.ClusterRoleBinding{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/BUILD b/vendor/k8s.io/client-go/listers/rbac/v1beta1/BUILD index 85de3ea19284..bde3d52e81bb 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/BUILD @@ -14,10 +14,10 @@ go_library( "role.go", "rolebinding.go", ], + importpath = "k8s.io/client-go/listers/rbac/v1beta1", deps = [ "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go index 4e759c494c69..65ec3eb978fa 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go @@ -21,7 +21,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/rbac/v1beta1" "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -55,8 +54,7 @@ func (s *clusterRoleLister) List(selector labels.Selector) (ret []*v1beta1.Clust // Get retrieves the ClusterRole from the index for a given name. func (s *clusterRoleLister) Get(name string) (*v1beta1.ClusterRole, error) { - key := &v1beta1.ClusterRole{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go index 6781ac60eef4..146f2d7f2949 100644 --- a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go @@ -21,7 +21,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/rbac/v1beta1" "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -55,8 +54,7 @@ func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*v1beta // Get retrieves the ClusterRoleBinding from the index for a given name. func (s *clusterRoleBindingLister) Get(name string) (*v1beta1.ClusterRoleBinding, error) { - key := &v1beta1.ClusterRoleBinding{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/BUILD b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/BUILD index 163c965a32d6..cf60e81322da 100644 --- a/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/BUILD +++ b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/BUILD @@ -11,10 +11,10 @@ go_library( "expansion_generated.go", "priorityclass.go", ], + importpath = "k8s.io/client-go/listers/scheduling/v1alpha1", deps = [ "//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go index 67b6864e7db2..9ed04fd2ae19 100644 --- a/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go +++ b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go @@ -21,7 +21,6 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/scheduling/v1alpha1" "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -55,8 +54,7 @@ func (s *priorityClassLister) List(selector labels.Selector) (ret []*v1alpha1.Pr // Get retrieves the PriorityClass from the index for a given name. func (s *priorityClassLister) Get(name string) (*v1alpha1.PriorityClass, error) { - key := &v1alpha1.PriorityClass{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/listers/settings/v1alpha1/BUILD b/vendor/k8s.io/client-go/listers/settings/v1alpha1/BUILD index 436fa356c17f..9979b5e3cc44 100644 --- a/vendor/k8s.io/client-go/listers/settings/v1alpha1/BUILD +++ b/vendor/k8s.io/client-go/listers/settings/v1alpha1/BUILD @@ -11,6 +11,7 @@ go_library( "expansion_generated.go", "podpreset.go", ], + importpath = "k8s.io/client-go/listers/settings/v1alpha1", deps = [ "//vendor/k8s.io/api/settings/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/vendor/k8s.io/client-go/listers/storage/v1/BUILD b/vendor/k8s.io/client-go/listers/storage/v1/BUILD index 4bf813e3b205..b90f06bb7a64 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/BUILD +++ b/vendor/k8s.io/client-go/listers/storage/v1/BUILD @@ -11,10 +11,10 @@ go_library( "expansion_generated.go", "storageclass.go", ], + importpath = "k8s.io/client-go/listers/storage/v1", deps = [ "//vendor/k8s.io/api/storage/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], diff --git a/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go b/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go index 09e659438081..7c37321fd9ee 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go @@ -21,7 +21,6 @@ package v1 import ( v1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -55,8 +54,7 @@ func (s *storageClassLister) List(selector labels.Selector) (ret []*v1.StorageCl // Get retrieves the StorageClass from the index for a given name. func (s *storageClassLister) Get(name string) (*v1.StorageClass, error) { - key := &v1.StorageClass{ObjectMeta: meta_v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/BUILD b/vendor/k8s.io/client-go/listers/storage/v1alpha1/BUILD new file mode 100644 index 000000000000..50592d670784 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "expansion_generated.go", + "volumeattachment.go", + ], + importpath = "k8s.io/client-go/listers/storage/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/storage/v1alpha1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go new file mode 100644 index 000000000000..63abe94ab223 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +// VolumeAttachmentListerExpansion allows custom methods to be added to +// VolumeAttachmentLister. +type VolumeAttachmentListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go new file mode 100644 index 000000000000..02004629a674 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/storage/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// VolumeAttachmentLister helps list VolumeAttachments. +type VolumeAttachmentLister interface { + // List lists all VolumeAttachments in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.VolumeAttachment, err error) + // Get retrieves the VolumeAttachment from the index for a given name. + Get(name string) (*v1alpha1.VolumeAttachment, error) + VolumeAttachmentListerExpansion +} + +// volumeAttachmentLister implements the VolumeAttachmentLister interface. +type volumeAttachmentLister struct { + indexer cache.Indexer +} + +// NewVolumeAttachmentLister returns a new VolumeAttachmentLister. +func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { + return &volumeAttachmentLister{indexer: indexer} +} + +// List lists all VolumeAttachments in the indexer. +func (s *volumeAttachmentLister) List(selector labels.Selector) (ret []*v1alpha1.VolumeAttachment, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.VolumeAttachment)) + }) + return ret, err +} + +// Get retrieves the VolumeAttachment from the index for a given name. +func (s *volumeAttachmentLister) Get(name string) (*v1alpha1.VolumeAttachment, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("volumeattachment"), name) + } + return obj.(*v1alpha1.VolumeAttachment), nil +} diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/BUILD b/vendor/k8s.io/client-go/listers/storage/v1beta1/BUILD index 57fcd37c6982..d4b82cbc808c 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/BUILD +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/BUILD @@ -11,10 +11,10 @@ go_library( "expansion_generated.go", "storageclass.go", ], + importpath = "k8s.io/client-go/listers/storage/v1beta1", deps = [ "//vendor/k8s.io/api/storage/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go index 04efe5f8dadc..9253319bec61 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go @@ -21,7 +21,6 @@ package v1beta1 import ( v1beta1 "k8s.io/api/storage/v1beta1" "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -55,8 +54,7 @@ func (s *storageClassLister) List(selector labels.Selector) (ret []*v1beta1.Stor // Get retrieves the StorageClass from the index for a given name. func (s *storageClassLister) Get(name string) (*v1beta1.StorageClass, error) { - key := &v1beta1.StorageClass{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/client-go/pkg/version/BUILD b/vendor/k8s.io/client-go/pkg/version/BUILD index 2bb94c6e225a..ee19d8cde928 100644 --- a/vendor/k8s.io/client-go/pkg/version/BUILD +++ b/vendor/k8s.io/client-go/pkg/version/BUILD @@ -12,6 +12,7 @@ go_library( "doc.go", "version.go", ], + importpath = "k8s.io/client-go/pkg/version", deps = ["//vendor/k8s.io/apimachinery/pkg/version:go_default_library"], ) diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go index 8dc21a97d068..7ab0ed3a7c64 100644 --- a/vendor/k8s.io/client-go/pkg/version/base.go +++ b/vendor/k8s.io/client-go/pkg/version/base.go @@ -39,8 +39,8 @@ var ( // them irrelevant. (Next we'll take it out, which may muck with // scripts consuming the kubectl version output - but most of // these should be looking at gitVersion already anyways.) - gitMajor string = "1" // major version, always numeric - gitMinor string = "8" // minor version, numeric possibly followed by "+" + gitMajor string = "" // major version, always numeric + gitMinor string = "" // minor version, numeric possibly followed by "+" // semantic version, derived by build scripts (see // https://github.com/kubernetes/kubernetes/blob/master/docs/design/versioning.md @@ -51,9 +51,13 @@ var ( // semantic version is a git hash, but the version itself is no // longer the direct output of "git describe", but a slight // translation to be semver compliant. - gitVersion string = "v1.8.0+$Format:%h$" - gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) - gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty" + + // NOTE: The $Format strings are replaced during 'git archive' thanks to the + // companion .gitattributes file containing 'export-subst' in this same + // directory. See also https://git-scm.com/docs/gitattributes + gitVersion string = "v0.0.0-master+$Format:%h$" + gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) + gitTreeState string = "" // state of git tree, either "clean" or "dirty" buildDate string = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') ) diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/BUILD b/vendor/k8s.io/client-go/plugin/pkg/client/auth/BUILD index c4ed1de5aab6..8ab2d745296e 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/BUILD +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["plugins.go"], + importpath = "k8s.io/client-go/plugin/pkg/client/auth", deps = [ "//vendor/k8s.io/client-go/plugin/pkg/client/auth/azure:go_default_library", "//vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp:go_default_library", diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD b/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD index 78f22c981e43..135ba355f99a 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["azure_test.go"], + importpath = "k8s.io/client-go/plugin/pkg/client/auth/azure", library = ":go_default_library", deps = ["//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library"], ) @@ -16,6 +17,7 @@ go_test( go_library( name = "go_default_library", srcs = ["azure.go"], + importpath = "k8s.io/client-go/plugin/pkg/client/auth/azure", deps = [ "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD index e5ed0ffd2b92..d9ef7bded3e2 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["gcp_test.go"], + importpath = "k8s.io/client-go/plugin/pkg/client/auth/gcp", library = ":go_default_library", deps = ["//vendor/golang.org/x/oauth2:go_default_library"], ) @@ -16,6 +17,7 @@ go_test( go_library( name = "go_default_library", srcs = ["gcp.go"], + importpath = "k8s.io/client-go/plugin/pkg/client/auth/gcp", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go index d2e622ba7c34..5ed1203b2a6c 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go @@ -228,9 +228,11 @@ func newCmdTokenSource(cmd string, args []string, tokenKey, expiryKey, timeFmt s func (c *commandTokenSource) Token() (*oauth2.Token, error) { fullCmd := strings.Join(append([]string{c.cmd}, c.args...), " ") cmd := execCommand(c.cmd, c.args...) + var stderr bytes.Buffer + cmd.Stderr = &stderr output, err := cmd.Output() if err != nil { - return nil, fmt.Errorf("error executing access token command %q: err=%v output=%s", fullCmd, err, output) + return nil, fmt.Errorf("error executing access token command %q: err=%v output=%s stderr=%s", fullCmd, err, output, string(stderr.Bytes())) } token, err := c.parseTokenCmdOutput(output) if err != nil { diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD b/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD index 8f855cd0a8aa..03ccf499ce0f 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD @@ -9,12 +9,14 @@ load( go_test( name = "go_default_test", srcs = ["oidc_test.go"], + importpath = "k8s.io/client-go/plugin/pkg/client/auth/oidc", library = ":go_default_library", ) go_library( name = "go_default_library", srcs = ["oidc.go"], + importpath = "k8s.io/client-go/plugin/pkg/client/auth/oidc", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/oauth2:go_default_library", diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go index 27ba722a6bc3..1fe52c5241d5 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go @@ -258,7 +258,11 @@ func (p *oidcAuthProvider) idToken() (string, error) { idToken, ok := token.Extra("id_token").(string) if !ok { - return "", fmt.Errorf("token response did not contain an id_token") + // id_token isn't a required part of a refresh token response, so some + // providers (Okta) don't return this value. + // + // See https://github.com/kubernetes/kubernetes/issues/36847 + return "", fmt.Errorf("token response did not contain an id_token, either the scope \"openid\" wasn't requested upon login, or the provider doesn't support id_tokens as part of the refresh response.") } // Create a new config to persist. diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/BUILD b/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/BUILD index e69128294445..6ebaf302d2e2 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/BUILD +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/BUILD @@ -9,12 +9,14 @@ load( go_test( name = "go_default_test", srcs = ["openstack_test.go"], + importpath = "k8s.io/client-go/plugin/pkg/client/auth/openstack", library = ":go_default_library", ) go_library( name = "go_default_library", srcs = ["openstack.go"], + importpath = "k8s.io/client-go/plugin/pkg/client/auth/openstack", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack:go_default_library", diff --git a/vendor/k8s.io/client-go/rest/BUILD b/vendor/k8s.io/client-go/rest/BUILD index f349bdda4bce..44958631da1e 100644 --- a/vendor/k8s.io/client-go/rest/BUILD +++ b/vendor/k8s.io/client-go/rest/BUILD @@ -16,8 +16,10 @@ go_test( "url_utils_test.go", "urlbackoff_test.go", ], + importpath = "k8s.io/client-go/rest", library = ":go_default_library", deps = [ + "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -56,13 +58,13 @@ go_library( "versions.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/client-go/rest", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/http2:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming:go_default_library", diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index 57848c8a74fa..038fee94537c 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -420,5 +420,45 @@ func AnonymousClientConfig(config *Config) *Config { QPS: config.QPS, Burst: config.Burst, Timeout: config.Timeout, + Dial: config.Dial, + } +} + +// CopyConfig returns a copy of the given config +func CopyConfig(config *Config) *Config { + return &Config{ + Host: config.Host, + APIPath: config.APIPath, + Prefix: config.Prefix, + ContentConfig: config.ContentConfig, + Username: config.Username, + Password: config.Password, + BearerToken: config.BearerToken, + CacheDir: config.CacheDir, + Impersonate: ImpersonationConfig{ + Groups: config.Impersonate.Groups, + Extra: config.Impersonate.Extra, + UserName: config.Impersonate.UserName, + }, + AuthProvider: config.AuthProvider, + AuthConfigPersister: config.AuthConfigPersister, + TLSClientConfig: TLSClientConfig{ + Insecure: config.TLSClientConfig.Insecure, + ServerName: config.TLSClientConfig.ServerName, + CertFile: config.TLSClientConfig.CertFile, + KeyFile: config.TLSClientConfig.KeyFile, + CAFile: config.TLSClientConfig.CAFile, + CertData: config.TLSClientConfig.CertData, + KeyData: config.TLSClientConfig.KeyData, + CAData: config.TLSClientConfig.CAData, + }, + UserAgent: config.UserAgent, + Transport: config.Transport, + WrapTransport: config.WrapTransport, + QPS: config.QPS, + Burst: config.Burst, + RateLimiter: config.RateLimiter, + Timeout: config.Timeout, + Dial: config.Dial, } } diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 1709824a4e03..aac126ccca95 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -179,6 +179,24 @@ func (r *Request) Resource(resource string) *Request { return r } +// BackOff sets the request's backoff manager to the one specified, +// or defaults to the stub implementation if nil is provided +func (r *Request) BackOff(manager BackoffManager) *Request { + if manager == nil { + r.backoffMgr = &NoBackoff{} + return r + } + + r.backoffMgr = manager + return r +} + +// Throttle receives a rate-limiter and sets or replaces an existing request limiter +func (r *Request) Throttle(limiter flowcontrol.RateLimiter) *Request { + r.throttle = limiter + return r +} + // SubResource sets a sub-resource path which can be multiple segments segment after the resource // name but before the suffix. func (r *Request) SubResource(subresources ...string) *Request { @@ -823,6 +841,25 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu } } +// truncateBody decides if the body should be truncated, based on the glog Verbosity. +func truncateBody(body string) string { + max := 0 + switch { + case bool(glog.V(10)): + return body + case bool(glog.V(9)): + max = 10240 + case bool(glog.V(8)): + max = 1024 + } + + if len(body) <= max { + return body + } + + return body[:max] + fmt.Sprintf(" [truncated %d chars]", len(body)-max) +} + // glogBody logs a body output that could be either JSON or protobuf. It explicitly guards against // allocating a new string for the body output unless necessary. Uses a simple heuristic to determine // whether the body is printable. @@ -831,9 +868,9 @@ func glogBody(prefix string, body []byte) { if bytes.IndexFunc(body, func(r rune) bool { return r < 0x0a }) != -1 { - glog.Infof("%s:\n%s", prefix, hex.Dump(body)) + glog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body))) } else { - glog.Infof("%s: %s", prefix, string(body)) + glog.Infof("%s: %s", prefix, truncateBody(string(body))) } } } diff --git a/vendor/k8s.io/client-go/rest/url_utils.go b/vendor/k8s.io/client-go/rest/url_utils.go index 14f94650a9e3..a56d1838d8fd 100644 --- a/vendor/k8s.io/client-go/rest/url_utils.go +++ b/vendor/k8s.io/client-go/rest/url_utils.go @@ -56,6 +56,14 @@ func DefaultServerURL(host, apiPath string, groupVersion schema.GroupVersion, de // hostURL.Path should be blank. // // versionedAPIPath, a path relative to baseURL.Path, points to a versioned API base + versionedAPIPath := DefaultVersionedAPIPath(apiPath, groupVersion) + + return hostURL, versionedAPIPath, nil +} + +// DefaultVersionedAPIPathFor constructs the default path for the given group version, assuming the given +// API path, following the standard conventions of the Kubernetes API. +func DefaultVersionedAPIPath(apiPath string, groupVersion schema.GroupVersion) string { versionedAPIPath := path.Join("/", apiPath) // Add the version to the end of the path @@ -64,10 +72,9 @@ func DefaultServerURL(host, apiPath string, groupVersion schema.GroupVersion, de } else { versionedAPIPath = path.Join(versionedAPIPath, groupVersion.Version) - } - return hostURL, versionedAPIPath, nil + return versionedAPIPath } // defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It diff --git a/vendor/k8s.io/client-go/rest/watch/BUILD b/vendor/k8s.io/client-go/rest/watch/BUILD index 9e0082cc276e..27183286380c 100644 --- a/vendor/k8s.io/client-go/rest/watch/BUILD +++ b/vendor/k8s.io/client-go/rest/watch/BUILD @@ -12,6 +12,7 @@ go_library( "decoder.go", "encoder.go", ], + importpath = "k8s.io/client-go/rest/watch", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -26,6 +27,7 @@ go_test( "decoder_test.go", "encoder_test.go", ], + importpath = "k8s.io/client-go/rest/watch_test", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", diff --git a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go index 1632e1efe3f3..59050fc491a4 100644 --- a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go @@ -20,23 +20,6 @@ limitations under the License. package rest -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - reflect "reflect" -) - -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TLSClientConfig).DeepCopyInto(out.(*TLSClientConfig)) - return nil - }, InType: reflect.TypeOf(&TLSClientConfig{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TLSClientConfig) DeepCopyInto(out *TLSClientConfig) { *out = *in diff --git a/vendor/k8s.io/client-go/scale/BUILD b/vendor/k8s.io/client-go/scale/BUILD new file mode 100644 index 000000000000..4fb0a949e402 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/BUILD @@ -0,0 +1,77 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "client.go", + "doc.go", + "interfaces.go", + "util.go", + ], + importpath = "k8s.io/client-go/scale", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/autoscaling/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/client-go/dynamic:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/scale/scheme:go_default_library", + "//vendor/k8s.io/client-go/scale/scheme/appsint:go_default_library", + "//vendor/k8s.io/client-go/scale/scheme/appsv1beta1:go_default_library", + "//vendor/k8s.io/client-go/scale/scheme/appsv1beta2:go_default_library", + "//vendor/k8s.io/client-go/scale/scheme/autoscalingv1:go_default_library", + "//vendor/k8s.io/client-go/scale/scheme/extensionsint:go_default_library", + "//vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "client_test.go", + "roundtrip_test.go", + ], + importpath = "k8s.io/client-go/scale", + library = ":go_default_library", + deps = [ + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/k8s.io/api/apps/v1beta1:go_default_library", + "//vendor/k8s.io/api/apps/v1beta2:go_default_library", + "//vendor/k8s.io/api/autoscaling/v1:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/testing/roundtrip:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/client-go/discovery/fake:go_default_library", + "//vendor/k8s.io/client-go/dynamic:go_default_library", + "//vendor/k8s.io/client-go/rest/fake:go_default_library", + "//vendor/k8s.io/client-go/testing:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/client-go/scale/fake:all-srcs", + "//staging/src/k8s.io/client-go/scale/scheme:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/scale/client.go b/vendor/k8s.io/client-go/scale/client.go new file mode 100644 index 000000000000..3f85197a0b6f --- /dev/null +++ b/vendor/k8s.io/client-go/scale/client.go @@ -0,0 +1,197 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scale + +import ( + "fmt" + + autoscaling "k8s.io/api/autoscaling/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/dynamic" + restclient "k8s.io/client-go/rest" +) + +var scaleConverter = NewScaleConverter() +var codecs = serializer.NewCodecFactory(scaleConverter.Scheme()) + +// restInterfaceProvider turns a restclient.Config into a restclient.Interface. +// It's overridable for the purposes of testing. +type restInterfaceProvider func(*restclient.Config) (restclient.Interface, error) + +// scaleClient is an implementation of ScalesGetter +// which makes use of a RESTMapper and a generic REST +// client to support an discoverable resource. +// It behaves somewhat similarly to the dynamic ClientPool, +// but is more specifically scoped to Scale. +type scaleClient struct { + mapper meta.RESTMapper + + apiPathResolverFunc dynamic.APIPathResolverFunc + scaleKindResolver ScaleKindResolver + clientBase restclient.Interface +} + +// NewForConfig creates a new ScalesGetter which resolves kinds +// to resources using the given RESTMapper, and API paths using +// the given dynamic.APIPathResolverFunc. +func NewForConfig(cfg *restclient.Config, mapper meta.RESTMapper, resolver dynamic.APIPathResolverFunc, scaleKindResolver ScaleKindResolver) (ScalesGetter, error) { + // so that the RESTClientFor doesn't complain + cfg.GroupVersion = &schema.GroupVersion{} + + cfg.NegotiatedSerializer = serializer.DirectCodecFactory{ + CodecFactory: codecs, + } + if len(cfg.UserAgent) == 0 { + cfg.UserAgent = restclient.DefaultKubernetesUserAgent() + } + + client, err := restclient.RESTClientFor(cfg) + if err != nil { + return nil, err + } + + return New(client, mapper, resolver, scaleKindResolver), nil +} + +// New creates a new ScalesGetter using the given client to make requests. +// The GroupVersion on the client is ignored. +func New(baseClient restclient.Interface, mapper meta.RESTMapper, resolver dynamic.APIPathResolverFunc, scaleKindResolver ScaleKindResolver) ScalesGetter { + return &scaleClient{ + mapper: mapper, + + apiPathResolverFunc: resolver, + scaleKindResolver: scaleKindResolver, + clientBase: baseClient, + } +} + +// pathAndVersionFor returns the appropriate base path and the associated full GroupVersionResource +// for the given GroupResource +func (c *scaleClient) pathAndVersionFor(resource schema.GroupResource) (string, schema.GroupVersionResource, error) { + gvr, err := c.mapper.ResourceFor(resource.WithVersion("")) + if err != nil { + return "", gvr, fmt.Errorf("unable to get full preferred group-version-resource for %s: %v", resource.String(), err) + } + + groupVer := gvr.GroupVersion() + + // we need to set the API path based on GroupVersion (defaulting to the legacy path if none is set) + // TODO: we "cheat" here since the API path really only depends on group ATM, but this should + // *probably* take GroupVersionResource and not GroupVersionKind. + apiPath := c.apiPathResolverFunc(groupVer.WithKind("")) + if apiPath == "" { + apiPath = "/api" + } + + path := restclient.DefaultVersionedAPIPath(apiPath, groupVer) + + return path, gvr, nil +} + +// namespacedScaleClient is an ScaleInterface for fetching +// Scales in a given namespace. +type namespacedScaleClient struct { + client *scaleClient + namespace string +} + +func (c *scaleClient) Scales(namespace string) ScaleInterface { + return &namespacedScaleClient{ + client: c, + namespace: namespace, + } +} + +func (c *namespacedScaleClient) Get(resource schema.GroupResource, name string) (*autoscaling.Scale, error) { + // Currently, a /scale endpoint can return different scale types. + // Until we have support for the alternative API representations proposal, + // we need to deal with accepting different API versions. + // In practice, this is autoscaling/v1.Scale and extensions/v1beta1.Scale + + path, gvr, err := c.client.pathAndVersionFor(resource) + if err != nil { + return nil, fmt.Errorf("unable to get client for %s: %v", resource.String(), err) + } + + rawObj, err := c.client.clientBase.Get(). + AbsPath(path). + Namespace(c.namespace). + Resource(gvr.Resource). + Name(name). + SubResource("scale"). + Do(). + Get() + + if err != nil { + return nil, err + } + + // convert whatever this is to autoscaling/v1.Scale + scaleObj, err := scaleConverter.ConvertToVersion(rawObj, autoscaling.SchemeGroupVersion) + if err != nil { + return nil, fmt.Errorf("received an object from a /scale endpoint which was not convertible to autoscaling Scale: %v", err) + } + + return scaleObj.(*autoscaling.Scale), nil +} + +func (c *namespacedScaleClient) Update(resource schema.GroupResource, scale *autoscaling.Scale) (*autoscaling.Scale, error) { + path, gvr, err := c.client.pathAndVersionFor(resource) + if err != nil { + return nil, fmt.Errorf("unable to get client for %s: %v", resource.String(), err) + } + + // Currently, a /scale endpoint can receive and return different scale types. + // Until we hvae support for the alternative API representations proposal, + // we need to deal with sending and accepting differnet API versions. + + // figure out what scale we actually need here + desiredGVK, err := c.client.scaleKindResolver.ScaleForResource(gvr) + if err != nil { + return nil, fmt.Errorf("could not find proper group-version for scale subresource of %s: %v", gvr.String(), err) + } + + // convert this to whatever this endpoint wants + scaleUpdate, err := scaleConverter.ConvertToVersion(scale, desiredGVK.GroupVersion()) + if err != nil { + return nil, fmt.Errorf("could not convert scale update to internal Scale: %v", err) + } + + rawObj, err := c.client.clientBase.Put(). + AbsPath(path). + Namespace(c.namespace). + Resource(gvr.Resource). + Name(scale.Name). + SubResource("scale"). + Body(scaleUpdate). + Do(). + Get() + + if err != nil { + return nil, fmt.Errorf("could not fetch the scale for %s %s: %v", resource.String(), scale.Name, err) + } + + // convert whatever this is back to autoscaling/v1.Scale + scaleObj, err := scaleConverter.ConvertToVersion(rawObj, autoscaling.SchemeGroupVersion) + if err != nil { + return nil, fmt.Errorf("received an object from a /scale endpoint which was not convertible to autoscaling Scale: %v", err) + } + + return scaleObj.(*autoscaling.Scale), err +} diff --git a/vendor/k8s.io/client-go/scale/doc.go b/vendor/k8s.io/client-go/scale/doc.go new file mode 100644 index 000000000000..59fd39146cae --- /dev/null +++ b/vendor/k8s.io/client-go/scale/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package scale provides a polymorphic scale client capable of fetching +// and updating Scale for any resource which implements the `scale` subresource, +// as long as that subresource operates on a version of scale convertable to +// autoscaling.Scale. +package scale diff --git a/vendor/k8s.io/client-go/scale/interfaces.go b/vendor/k8s.io/client-go/scale/interfaces.go new file mode 100644 index 000000000000..4668c7417d18 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/interfaces.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scale + +import ( + autoscalingapi "k8s.io/api/autoscaling/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// ScalesGetter can produce a ScaleInterface +// for a particular namespace. +type ScalesGetter interface { + Scales(namespace string) ScaleInterface +} + +// ScaleInterface can fetch and update scales for +// resources in a particular namespace which implement +// the scale subresource. +type ScaleInterface interface { + // Get fetches the scale of the given scalable resource. + Get(resource schema.GroupResource, name string) (*autoscalingapi.Scale, error) + + // Update updates the scale of the the given scalable resource. + Update(resource schema.GroupResource, scale *autoscalingapi.Scale) (*autoscalingapi.Scale, error) +} diff --git a/vendor/k8s.io/client-go/scale/scheme/BUILD b/vendor/k8s.io/client-go/scale/scheme/BUILD new file mode 100644 index 000000000000..74bd976a1f76 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/BUILD @@ -0,0 +1,41 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + "types.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/client-go/scale/scheme", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/autoscaling/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/client-go/scale/scheme/appsint:all-srcs", + "//staging/src/k8s.io/client-go/scale/scheme/appsv1beta1:all-srcs", + "//staging/src/k8s.io/client-go/scale/scheme/appsv1beta2:all-srcs", + "//staging/src/k8s.io/client-go/scale/scheme/autoscalingv1:all-srcs", + "//staging/src/k8s.io/client-go/scale/scheme/extensionsint:all-srcs", + "//staging/src/k8s.io/client-go/scale/scheme/extensionsv1beta1:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/scale/scheme/appsint/BUILD b/vendor/k8s.io/client-go/scale/scheme/appsint/BUILD new file mode 100644 index 000000000000..2fef63f0e058 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsint/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + ], + importpath = "k8s.io/client-go/scale/scheme/appsint", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/apps/v1beta2:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/client-go/scale/scheme:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/scale/scheme/appsint/doc.go b/vendor/k8s.io/client-go/scale/scheme/appsint/doc.go new file mode 100644 index 000000000000..5b1d6841c4c3 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsint/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package appsint contains the necessary scaffolding of the +// internal version of extensions as required by conversion logic. +// It doesn't have any of its own types -- it's just necessary to +// get the expected behavoir out of runtime.Scheme.ConvertToVersion +// and associated methods. +package appsint diff --git a/vendor/k8s.io/client-go/scale/scheme/appsint/register.go b/vendor/k8s.io/client-go/scale/scheme/appsint/register.go new file mode 100644 index 000000000000..bbeaedac549f --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsint/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appsint + +import ( + appsv1beta2 "k8s.io/api/apps/v1beta2" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + scalescheme "k8s.io/client-go/scale/scheme" +) + +// GroupName is the group name use in this package +const GroupName = appsv1beta2.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &scalescheme.Scale{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/BUILD b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/BUILD new file mode 100644 index 000000000000..05530e300b24 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/BUILD @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "conversion.go", + "doc.go", + "register.go", + "zz_generated.conversion.go", + ], + importpath = "k8s.io/client-go/scale/scheme/appsv1beta1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/apps/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/client-go/scale/scheme:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go new file mode 100644 index 000000000000..af062b3c6350 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appsv1beta1 + +import ( + "fmt" + + v1beta1 "k8s.io/api/apps/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + scheme "k8s.io/client-go/scale/scheme" +) + +// addConversions registers conversions between the internal version +// of Scale and supported external versions of Scale. +func addConversionFuncs(scheme *runtime.Scheme) error { + err := scheme.AddConversionFuncs( + Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus, + Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus, + ) + if err != nil { + return err + } + + return nil +} +func Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = nil + out.TargetSelector = "" + if in.Selector != nil { + if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { + out.Selector = in.Selector.MatchLabels + } + + selector, err := metav1.LabelSelectorAsSelector(in.Selector) + if err != nil { + return fmt.Errorf("invalid label selector: %v", err) + } + out.TargetSelector = selector.String() + } + + return nil +} + +func Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + + // Normally when 2 fields map to the same internal value we favor the old field, since + // old clients can't be expected to know about new fields but clients that know about the + // new field can be expected to know about the old field (though that's not quite true, due + // to kubectl apply). However, these fields are readonly, so any non-nil value should work. + if in.TargetSelector != "" { + labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) + if err != nil { + out.Selector = nil + return fmt.Errorf("failed to parse target selector: %v", err) + } + out.Selector = labelSelector + } else if in.Selector != nil { + out.Selector = new(metav1.LabelSelector) + selector := make(map[string]string) + for key, val := range in.Selector { + selector[key] = val + } + out.Selector.MatchLabels = selector + } else { + out.Selector = nil + } + + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/doc.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/doc.go new file mode 100644 index 000000000000..14c365580032 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme +// +k8s:conversion-gen-external-types=k8s.io/api/apps/v1beta1 + +package appsv1beta1 diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go new file mode 100644 index 000000000000..a684f2d535bc --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appsv1beta1 + +import ( + appsapiv1beta1 "k8s.io/api/apps/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = appsapiv1beta1.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &appsapiv1beta1.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addConversionFuncs) +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go new file mode 100644 index 000000000000..93e0e61640d3 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go @@ -0,0 +1,110 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package appsv1beta1 + +import ( + v1beta1 "k8s.io/api/apps/v1beta1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + scheme "k8s.io/client-go/scale/scheme" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_Scale_To_scheme_Scale, + Convert_scheme_Scale_To_v1beta1_Scale, + Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec, + Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec, + Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus, + Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus, + ) +} + +func autoConvert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_Scale_To_scheme_Scale is an autogenerated conversion function. +func Convert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error { + return autoConvert_v1beta1_Scale_To_scheme_Scale(in, out, s) +} + +func autoConvert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_scheme_Scale_To_v1beta1_Scale is an autogenerated conversion function. +func Convert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error { + return autoConvert_scheme_Scale_To_v1beta1_Scale(in, out, s) +} + +func autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function. +func Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in, out, s) +} + +func autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec is an autogenerated conversion function. +func Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { + return autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) +} + +func autoConvert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) + // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/BUILD b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/BUILD new file mode 100644 index 000000000000..7e52bc060b60 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/BUILD @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "conversion.go", + "doc.go", + "register.go", + "zz_generated.conversion.go", + ], + importpath = "k8s.io/client-go/scale/scheme/appsv1beta2", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/apps/v1beta2:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/client-go/scale/scheme:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go new file mode 100644 index 000000000000..f07de6bdae02 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appsv1beta2 + +import ( + "fmt" + + v1beta2 "k8s.io/api/apps/v1beta2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + scheme "k8s.io/client-go/scale/scheme" +) + +// addConversions registers conversions between the internal version +// of Scale and supported external versions of Scale. +func addConversionFuncs(scheme *runtime.Scheme) error { + err := scheme.AddConversionFuncs( + Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus, + Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus, + ) + if err != nil { + return err + } + + return nil +} +func Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(in *scheme.ScaleStatus, out *v1beta2.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = nil + out.TargetSelector = "" + if in.Selector != nil { + if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { + out.Selector = in.Selector.MatchLabels + } + + selector, err := metav1.LabelSelectorAsSelector(in.Selector) + if err != nil { + return fmt.Errorf("invalid label selector: %v", err) + } + out.TargetSelector = selector.String() + } + + return nil +} + +func Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(in *v1beta2.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + + // Normally when 2 fields map to the same internal value we favor the old field, since + // old clients can't be expected to know about new fields but clients that know about the + // new field can be expected to know about the old field (though that's not quite true, due + // to kubectl apply). However, these fields are readonly, so any non-nil value should work. + if in.TargetSelector != "" { + labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) + if err != nil { + out.Selector = nil + return fmt.Errorf("failed to parse target selector: %v", err) + } + out.Selector = labelSelector + } else if in.Selector != nil { + out.Selector = new(metav1.LabelSelector) + selector := make(map[string]string) + for key, val := range in.Selector { + selector[key] = val + } + out.Selector.MatchLabels = selector + } else { + out.Selector = nil + } + + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/doc.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/doc.go new file mode 100644 index 000000000000..aca77688cc32 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme +// +k8s:conversion-gen-external-types=k8s.io/api/apps/v1beta2 + +package appsv1beta2 diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go new file mode 100644 index 000000000000..88de08932937 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appsv1beta2 + +import ( + appsapiv1beta2 "k8s.io/api/apps/v1beta2" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = appsapiv1beta2.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &appsapiv1beta2.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addConversionFuncs) +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go new file mode 100644 index 000000000000..410a0d90c5e5 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go @@ -0,0 +1,110 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package appsv1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + scheme "k8s.io/client-go/scale/scheme" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta2_Scale_To_scheme_Scale, + Convert_scheme_Scale_To_v1beta2_Scale, + Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec, + Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec, + Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus, + Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus, + ) +} + +func autoConvert_v1beta2_Scale_To_scheme_Scale(in *v1beta2.Scale, out *scheme.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta2_Scale_To_scheme_Scale is an autogenerated conversion function. +func Convert_v1beta2_Scale_To_scheme_Scale(in *v1beta2.Scale, out *scheme.Scale, s conversion.Scope) error { + return autoConvert_v1beta2_Scale_To_scheme_Scale(in, out, s) +} + +func autoConvert_scheme_Scale_To_v1beta2_Scale(in *scheme.Scale, out *v1beta2.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_scheme_Scale_To_v1beta2_Scale is an autogenerated conversion function. +func Convert_scheme_Scale_To_v1beta2_Scale(in *scheme.Scale, out *v1beta2.Scale, s conversion.Scope) error { + return autoConvert_scheme_Scale_To_v1beta2_Scale(in, out, s) +} + +func autoConvert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(in *v1beta2.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function. +func Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(in *v1beta2.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(in, out, s) +} + +func autoConvert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(in *scheme.ScaleSpec, out *v1beta2.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec is an autogenerated conversion function. +func Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(in *scheme.ScaleSpec, out *v1beta2.ScaleSpec, s conversion.Scope) error { + return autoConvert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(in, out, s) +} + +func autoConvert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(in *v1beta2.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) + // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(in *scheme.ScaleStatus, out *v1beta2.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/BUILD b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/BUILD new file mode 100644 index 000000000000..646a6fdf8e7a --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/BUILD @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "conversion.go", + "doc.go", + "register.go", + "zz_generated.conversion.go", + ], + importpath = "k8s.io/client-go/scale/scheme/autoscalingv1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/autoscaling/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/client-go/scale/scheme:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go new file mode 100644 index 000000000000..a775bcc225bc --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go @@ -0,0 +1,69 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscalingv1 + +import ( + "fmt" + + v1 "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + scheme "k8s.io/client-go/scale/scheme" +) + +// addConversions registers conversions between the internal version +// of Scale and supported external versions of Scale. +func addConversionFuncs(scheme *runtime.Scheme) error { + err := scheme.AddConversionFuncs( + Convert_scheme_ScaleStatus_To_v1_ScaleStatus, + Convert_v1_ScaleStatus_To_scheme_ScaleStatus, + ) + if err != nil { + return err + } + + return nil +} + +func Convert_scheme_ScaleStatus_To_v1_ScaleStatus(in *scheme.ScaleStatus, out *v1.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = "" + if in.Selector != nil { + selector, err := metav1.LabelSelectorAsSelector(in.Selector) + if err != nil { + return fmt.Errorf("invalid label selector: %v", err) + } + out.Selector = selector.String() + } + + return nil +} + +func Convert_v1_ScaleStatus_To_scheme_ScaleStatus(in *v1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + if in.Selector != "" { + labelSelector, err := metav1.ParseToLabelSelector(in.Selector) + if err != nil { + out.Selector = nil + return fmt.Errorf("failed to parse target selector: %v", err) + } + out.Selector = labelSelector + } + + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/doc.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/doc.go new file mode 100644 index 000000000000..2e0509952828 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme +// +k8s:conversion-gen-external-types=k8s.io/api/autoscaling/v1 + +package autoscalingv1 diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go new file mode 100644 index 000000000000..b15701c4ff7e --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscalingv1 + +import ( + autoscalingapiv1 "k8s.io/api/autoscaling/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = autoscalingapiv1.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &autoscalingapiv1.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addConversionFuncs) +} diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go new file mode 100644 index 000000000000..1eaa0d180270 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go @@ -0,0 +1,109 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package autoscalingv1 + +import ( + v1 "k8s.io/api/autoscaling/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + scheme "k8s.io/client-go/scale/scheme" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_Scale_To_scheme_Scale, + Convert_scheme_Scale_To_v1_Scale, + Convert_v1_ScaleSpec_To_scheme_ScaleSpec, + Convert_scheme_ScaleSpec_To_v1_ScaleSpec, + Convert_v1_ScaleStatus_To_scheme_ScaleStatus, + Convert_scheme_ScaleStatus_To_v1_ScaleStatus, + ) +} + +func autoConvert_v1_Scale_To_scheme_Scale(in *v1.Scale, out *scheme.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Scale_To_scheme_Scale is an autogenerated conversion function. +func Convert_v1_Scale_To_scheme_Scale(in *v1.Scale, out *scheme.Scale, s conversion.Scope) error { + return autoConvert_v1_Scale_To_scheme_Scale(in, out, s) +} + +func autoConvert_scheme_Scale_To_v1_Scale(in *scheme.Scale, out *v1.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_scheme_ScaleSpec_To_v1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_scheme_ScaleStatus_To_v1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_scheme_Scale_To_v1_Scale is an autogenerated conversion function. +func Convert_scheme_Scale_To_v1_Scale(in *scheme.Scale, out *v1.Scale, s conversion.Scope) error { + return autoConvert_scheme_Scale_To_v1_Scale(in, out, s) +} + +func autoConvert_v1_ScaleSpec_To_scheme_ScaleSpec(in *v1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_v1_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function. +func Convert_v1_ScaleSpec_To_scheme_ScaleSpec(in *v1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1_ScaleSpec_To_scheme_ScaleSpec(in, out, s) +} + +func autoConvert_scheme_ScaleSpec_To_v1_ScaleSpec(in *scheme.ScaleSpec, out *v1.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_scheme_ScaleSpec_To_v1_ScaleSpec is an autogenerated conversion function. +func Convert_scheme_ScaleSpec_To_v1_ScaleSpec(in *scheme.ScaleSpec, out *v1.ScaleSpec, s conversion.Scope) error { + return autoConvert_scheme_ScaleSpec_To_v1_ScaleSpec(in, out, s) +} + +func autoConvert_v1_ScaleStatus_To_scheme_ScaleStatus(in *v1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) + return nil +} + +func autoConvert_scheme_ScaleStatus_To_v1_ScaleStatus(in *scheme.ScaleStatus, out *v1.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs string) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/doc.go b/vendor/k8s.io/client-go/scale/scheme/doc.go new file mode 100644 index 000000000000..0203d6d5a27f --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// Package scheme contains a runtime.Scheme to be used for serializing +// and deserializing different versions of Scale, and for converting +// in between them. +package scheme diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsint/BUILD b/vendor/k8s.io/client-go/scale/scheme/extensionsint/BUILD new file mode 100644 index 000000000000..6174a88b145d --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsint/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + ], + importpath = "k8s.io/client-go/scale/scheme/extensionsint", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/client-go/scale/scheme:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsint/doc.go b/vendor/k8s.io/client-go/scale/scheme/extensionsint/doc.go new file mode 100644 index 000000000000..cc92bf18824d --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsint/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package extensionsint contains the necessary scaffolding of the +// internal version of extensions as required by conversion logic. +// It doesn't have any of its own types -- it's just necessary to +// get the expected behavoir out of runtime.Scheme.ConvertToVersion +// and associated methods. +package extensionsint diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsint/register.go b/vendor/k8s.io/client-go/scale/scheme/extensionsint/register.go new file mode 100644 index 000000000000..5a96ac56140a --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsint/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extensionsint + +import ( + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + scalescheme "k8s.io/client-go/scale/scheme" +) + +// GroupName is the group name use in this package +const GroupName = extensionsv1beta1.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &scalescheme.Scale{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/BUILD b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/BUILD new file mode 100644 index 000000000000..4c992c7d113e --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/BUILD @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "conversion.go", + "doc.go", + "register.go", + "zz_generated.conversion.go", + ], + importpath = "k8s.io/client-go/scale/scheme/extensionsv1beta1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/client-go/scale/scheme:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go new file mode 100644 index 000000000000..1b6b9e610379 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extensionsv1beta1 + +import ( + "fmt" + + v1beta1 "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + scheme "k8s.io/client-go/scale/scheme" +) + +// addConversions registers conversions between the internal version +// of Scale and supported external versions of Scale. +func addConversionFuncs(scheme *runtime.Scheme) error { + err := scheme.AddConversionFuncs( + Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus, + Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus, + ) + if err != nil { + return err + } + + return nil +} +func Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = nil + out.TargetSelector = "" + if in.Selector != nil { + if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { + out.Selector = in.Selector.MatchLabels + } + + selector, err := metav1.LabelSelectorAsSelector(in.Selector) + if err != nil { + return fmt.Errorf("invalid label selector: %v", err) + } + out.TargetSelector = selector.String() + } + + return nil +} + +func Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + + // Normally when 2 fields map to the same internal value we favor the old field, since + // old clients can't be expected to know about new fields but clients that know about the + // new field can be expected to know about the old field (though that's not quite true, due + // to kubectl apply). However, these fields are readonly, so any non-nil value should work. + if in.TargetSelector != "" { + labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) + if err != nil { + out.Selector = nil + return fmt.Errorf("failed to parse target selector: %v", err) + } + out.Selector = labelSelector + } else if in.Selector != nil { + out.Selector = new(metav1.LabelSelector) + selector := make(map[string]string) + for key, val := range in.Selector { + selector[key] = val + } + out.Selector.MatchLabels = selector + } else { + out.Selector = nil + } + + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/doc.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/doc.go new file mode 100644 index 000000000000..f41470d6b406 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme +// +k8s:conversion-gen-external-types=k8s.io/api/extensions/v1beta1 + +package extensionsv1beta1 diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go new file mode 100644 index 000000000000..aed1174e023f --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extensionsv1beta1 + +import ( + extensionsapiv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = extensionsapiv1beta1.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &extensionsapiv1beta1.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addConversionFuncs) +} diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go new file mode 100644 index 000000000000..848cb8d2aa3a --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go @@ -0,0 +1,110 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package extensionsv1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + scheme "k8s.io/client-go/scale/scheme" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_Scale_To_scheme_Scale, + Convert_scheme_Scale_To_v1beta1_Scale, + Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec, + Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec, + Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus, + Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus, + ) +} + +func autoConvert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_Scale_To_scheme_Scale is an autogenerated conversion function. +func Convert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error { + return autoConvert_v1beta1_Scale_To_scheme_Scale(in, out, s) +} + +func autoConvert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_scheme_Scale_To_v1beta1_Scale is an autogenerated conversion function. +func Convert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error { + return autoConvert_scheme_Scale_To_v1beta1_Scale(in, out, s) +} + +func autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function. +func Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in, out, s) +} + +func autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec is an autogenerated conversion function. +func Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { + return autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) +} + +func autoConvert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) + // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/register.go b/vendor/k8s.io/client-go/scale/scheme/register.go new file mode 100644 index 000000000000..7e6decfff5f8 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + autoscalingv1 "k8s.io/api/autoscaling/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = autoscalingv1.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Scale{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/types.go b/vendor/k8s.io/client-go/scale/scheme/types.go new file mode 100644 index 000000000000..13aec2b3c3cd --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/types.go @@ -0,0 +1,60 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// This file contains our own "internal" version of scale that we use for conversions, +// since we can't use the main Kubernetes internal versions. + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Scale represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta + + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec + + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus +} + +// ScaleSpec describes the attributes of a scale subresource. +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 +} + +// ScaleStatus represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 + + // label query over pods that should match the replicas count. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector +} diff --git a/vendor/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go new file mode 100644 index 000000000000..19b679583502 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go @@ -0,0 +1,96 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package scheme + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scale) DeepCopyInto(out *Scale) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. +func (in *Scale) DeepCopy() *Scale { + if in == nil { + return nil + } + out := new(Scale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scale) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. +func (in *ScaleSpec) DeepCopy() *ScaleSpec { + if in == nil { + return nil + } + out := new(ScaleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. +func (in *ScaleStatus) DeepCopy() *ScaleStatus { + if in == nil { + return nil + } + out := new(ScaleStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/scale/util.go b/vendor/k8s.io/client-go/scale/util.go new file mode 100644 index 000000000000..9eb10853605c --- /dev/null +++ b/vendor/k8s.io/client-go/scale/util.go @@ -0,0 +1,167 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scale + +import ( + "fmt" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + scalescheme "k8s.io/client-go/scale/scheme" + scaleappsint "k8s.io/client-go/scale/scheme/appsint" + scaleappsv1beta1 "k8s.io/client-go/scale/scheme/appsv1beta1" + scaleappsv1beta2 "k8s.io/client-go/scale/scheme/appsv1beta2" + scaleautoscaling "k8s.io/client-go/scale/scheme/autoscalingv1" + scaleextint "k8s.io/client-go/scale/scheme/extensionsint" + scaleext "k8s.io/client-go/scale/scheme/extensionsv1beta1" +) + +// ScaleKindResolver knows about the relationship between +// resources and the GroupVersionKind of their scale subresources. +type ScaleKindResolver interface { + // ScaleForResource returns the GroupVersionKind of the + // scale subresource for the given GroupVersionResource. + ScaleForResource(resource schema.GroupVersionResource) (scaleVersion schema.GroupVersionKind, err error) +} + +// discoveryScaleResolver is a ScaleKindResolver that uses +// a DiscoveryInterface to associate resources with their +// scale-kinds +type discoveryScaleResolver struct { + discoveryClient discovery.ServerResourcesInterface +} + +func (r *discoveryScaleResolver) ScaleForResource(inputRes schema.GroupVersionResource) (scaleVersion schema.GroupVersionKind, err error) { + groupVerResources, err := r.discoveryClient.ServerResourcesForGroupVersion(inputRes.GroupVersion().String()) + if err != nil { + return schema.GroupVersionKind{}, fmt.Errorf("unable to fetch discovery information for %s: %v", inputRes.String(), err) + } + + for _, resource := range groupVerResources.APIResources { + resourceParts := strings.SplitN(resource.Name, "/", 2) + if len(resourceParts) != 2 || resourceParts[0] != inputRes.Resource || resourceParts[1] != "scale" { + // skip non-scale resources, or scales for resources that we're not looking for + continue + } + + scaleGV := inputRes.GroupVersion() + if resource.Group != "" && resource.Version != "" { + scaleGV = schema.GroupVersion{ + Group: resource.Group, + Version: resource.Version, + } + } + + return scaleGV.WithKind(resource.Kind), nil + } + + return schema.GroupVersionKind{}, fmt.Errorf("could not find scale subresource for %s in discovery information", inputRes.String()) +} + +// cachedScaleKindResolver is a ScaleKindResolver that caches results +// from another ScaleKindResolver, re-fetching on cache misses. +type cachedScaleKindResolver struct { + base ScaleKindResolver + + cache map[schema.GroupVersionResource]schema.GroupVersionKind + mu sync.RWMutex +} + +func (r *cachedScaleKindResolver) ScaleForResource(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + r.mu.RLock() + gvk, isCached := r.cache[resource] + r.mu.RUnlock() + if isCached { + return gvk, nil + } + + // we could have multiple fetches of the same resources, but that's probably + // better than limiting to only one reader at once (mu.Mutex), + // or blocking checks for other resources while we fetch + // (mu.Lock before fetch). + gvk, err := r.base.ScaleForResource(resource) + if err != nil { + return schema.GroupVersionKind{}, err + } + + r.mu.Lock() + defer r.mu.Unlock() + r.cache[resource] = gvk + + return gvk, nil +} + +// NewDiscoveryScaleKindResolver creates a new ScaleKindResolver which uses information from the given +// disovery client to resolve the correct Scale GroupVersionKind for different resources. +func NewDiscoveryScaleKindResolver(client discovery.ServerResourcesInterface) ScaleKindResolver { + base := &discoveryScaleResolver{ + discoveryClient: client, + } + + return &cachedScaleKindResolver{ + base: base, + cache: make(map[schema.GroupVersionResource]schema.GroupVersionKind), + } +} + +// ScaleConverter knows how to convert between external scale versions. +type ScaleConverter struct { + scheme *runtime.Scheme + internalVersioner runtime.GroupVersioner +} + +// NewScaleConverter creates a new ScaleConverter for converting between +// Scales in autoscaling/v1 and extensions/v1beta1. +func NewScaleConverter() *ScaleConverter { + scheme := runtime.NewScheme() + scaleautoscaling.AddToScheme(scheme) + scalescheme.AddToScheme(scheme) + scaleext.AddToScheme(scheme) + scaleextint.AddToScheme(scheme) + scaleappsint.AddToScheme(scheme) + scaleappsv1beta1.AddToScheme(scheme) + scaleappsv1beta2.AddToScheme(scheme) + + return &ScaleConverter{ + scheme: scheme, + internalVersioner: runtime.NewMultiGroupVersioner( + scalescheme.SchemeGroupVersion, + schema.GroupKind{Group: scaleext.GroupName, Kind: "Scale"}, + schema.GroupKind{Group: scaleautoscaling.GroupName, Kind: "Scale"}, + schema.GroupKind{Group: scaleappsv1beta1.GroupName, Kind: "Scale"}, + schema.GroupKind{Group: scaleappsv1beta2.GroupName, Kind: "Scale"}, + ), + } +} + +// Scheme returns the scheme used by this scale converter. +func (c *ScaleConverter) Scheme() *runtime.Scheme { + return c.scheme +} + +// ConvertToVersion converts the given *external* input object to the given output *external* output group-version. +func (c *ScaleConverter) ConvertToVersion(in runtime.Object, outVersion schema.GroupVersion) (runtime.Object, error) { + scaleInt, err := c.scheme.ConvertToVersion(in, c.internalVersioner) + if err != nil { + return nil, err + } + + return c.scheme.ConvertToVersion(scaleInt, outVersion) +} diff --git a/vendor/k8s.io/client-go/testing/BUILD b/vendor/k8s.io/client-go/testing/BUILD index 3eb2728b3b96..b26e662876dc 100644 --- a/vendor/k8s.io/client-go/testing/BUILD +++ b/vendor/k8s.io/client-go/testing/BUILD @@ -12,6 +12,7 @@ go_library( "fake.go", "fixture.go", ], + importpath = "k8s.io/client-go/testing", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", diff --git a/vendor/k8s.io/client-go/testing/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go index a53c960c2311..08379fb0897d 100644 --- a/vendor/k8s.io/client-go/testing/fixture.go +++ b/vendor/k8s.io/client-go/testing/fixture.go @@ -59,7 +59,6 @@ type ObjectTracker interface { // ObjectScheme abstracts the implementation of common operations on objects. type ObjectScheme interface { runtime.ObjectCreater - runtime.ObjectCopier runtime.ObjectTyper } @@ -183,10 +182,7 @@ func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionK if err := meta.SetList(list, matchingObjs); err != nil { return nil, err } - if list, err = t.scheme.Copy(list); err != nil { - return nil, err - } - return list, nil + return list.DeepCopyObject(), nil } func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) { @@ -214,11 +210,7 @@ func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime // Only one object should match in the tracker if it works // correctly, as Add/Update methods enforce kind/namespace/name // uniqueness. - obj, err := t.scheme.Copy(matchingObjs[0]) - if err != nil { - return nil, err - } - + obj := matchingObjs[0].DeepCopyObject() if status, ok := obj.(*metav1.Status); ok { if status.Status != metav1.StatusSuccess { return nil, &errors.StatusError{ErrStatus: *status} @@ -280,10 +272,7 @@ func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns st // To avoid the object from being accidentally modified by caller // after it's been added to the tracker, we always store the deep // copy. - obj, err := t.scheme.Copy(obj) - if err != nil { - return err - } + obj = obj.DeepCopyObject() newMeta, err := meta.Accessor(obj) if err != nil { diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/template/BUILD b/vendor/k8s.io/client-go/third_party/forked/golang/template/BUILD index 26dd36f17edc..cebdf7b63a49 100644 --- a/vendor/k8s.io/client-go/third_party/forked/golang/template/BUILD +++ b/vendor/k8s.io/client-go/third_party/forked/golang/template/BUILD @@ -11,6 +11,7 @@ go_library( "exec.go", "funcs.go", ], + importpath = "k8s.io/client-go/third_party/forked/golang/template", ) filegroup( diff --git a/vendor/k8s.io/client-go/tools/auth/BUILD b/vendor/k8s.io/client-go/tools/auth/BUILD index 5f5238bb52ff..b418469efa59 100644 --- a/vendor/k8s.io/client-go/tools/auth/BUILD +++ b/vendor/k8s.io/client-go/tools/auth/BUILD @@ -9,12 +9,14 @@ load( go_library( name = "go_default_library", srcs = ["clientauth.go"], + importpath = "k8s.io/client-go/tools/auth", deps = ["//vendor/k8s.io/client-go/rest:go_default_library"], ) go_test( name = "go_default_xtest", srcs = ["clientauth_test.go"], + importpath = "k8s.io/client-go/tools/auth_test", deps = ["//vendor/k8s.io/client-go/tools/auth:go_default_library"], ) diff --git a/vendor/k8s.io/client-go/tools/cache/BUILD b/vendor/k8s.io/client-go/tools/cache/BUILD index 3531eba74c1b..79e21e1ea51c 100644 --- a/vendor/k8s.io/client-go/tools/cache/BUILD +++ b/vendor/k8s.io/client-go/tools/cache/BUILD @@ -22,6 +22,8 @@ go_test( "store_test.go", "undelta_store_test.go", ], + features = ["-race"], + importpath = "k8s.io/client-go/tools/cache", library = ":go_default_library", deps = [ "//vendor/github.com/google/gofuzz:go_default_library", @@ -33,7 +35,6 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/tools/cache/testing:go_default_library", ], ) @@ -61,6 +62,7 @@ go_library( "thread_safe_store.go", "undelta_store.go", ], + importpath = "k8s.io/client-go/tools/cache", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", @@ -78,9 +80,9 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/pager:go_default_library", + "//vendor/k8s.io/client-go/util/buffer:go_default_library", ], ) diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index a71db60488d7..f06d1c5b1cf0 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -539,6 +539,10 @@ func (f *DeltaFIFO) Resync() error { f.lock.Lock() defer f.lock.Unlock() + if f.knownObjects == nil { + return nil + } + keys := f.knownObjects.ListKeys() for _, k := range keys { if err := f.syncKeyLocked(k); err != nil { diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go index 3f6e2a9480a3..ef70b7aca1d6 100644 --- a/vendor/k8s.io/client-go/tools/cache/fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -169,7 +169,7 @@ func (f *FIFO) AddIfNotPresent(obj interface{}) error { return nil } -// addIfNotPresent assumes the fifo lock is already held and adds the the provided +// addIfNotPresent assumes the fifo lock is already held and adds the provided // item to the queue under id if it does not already exist. func (f *FIFO) addIfNotPresent(id string, obj interface{}) { f.populated = true diff --git a/vendor/k8s.io/client-go/tools/cache/listwatch.go b/vendor/k8s.io/client-go/tools/cache/listwatch.go index 55a90b631d61..db2329c55a24 100644 --- a/vendor/k8s.io/client-go/tools/cache/listwatch.go +++ b/vendor/k8s.io/client-go/tools/cache/listwatch.go @@ -25,6 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/pager" @@ -51,8 +52,7 @@ type WatchFunc func(options metav1.ListOptions) (watch.Interface, error) type ListWatch struct { ListFunc ListFunc WatchFunc WatchFunc - // DisableChunking requests no chunking for this list watcher. It has no effect in Kubernetes 1.8, but in - // 1.9 will allow a controller to opt out of chunking. + // DisableChunking requests no chunking for this list watcher. DisableChunking bool } @@ -93,9 +93,7 @@ func timeoutFromListOptions(options metav1.ListOptions) time.Duration { // List a set of apiserver resources func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) { - // chunking will become the default for list watchers starting in Kubernetes 1.9, unless - // otherwise disabled. - if false && !lw.DisableChunking { + if !lw.DisableChunking { return pager.New(pager.SimplePageFunc(lw.ListFunc)).List(context.TODO(), options) } return lw.ListFunc(options) @@ -106,6 +104,8 @@ func (lw *ListWatch) Watch(options metav1.ListOptions) (watch.Interface, error) return lw.WatchFunc(options) } +// ListWatchUntil checks the provided conditions against the items returned by the list watcher, returning wait.ErrWaitTimeout +// if timeout is exceeded without all conditions returning true, or an error if an error occurs. // TODO: check for watch expired error and retry watch from latest point? Same issue exists for Until. func ListWatchUntil(timeout time.Duration, lw ListerWatcher, conditions ...watch.ConditionFunc) (*watch.Event, error) { if len(conditions) == 0 { @@ -169,5 +169,10 @@ func ListWatchUntil(timeout time.Duration, lw ListerWatcher, conditions ...watch return nil, err } - return watch.Until(timeout, watchInterface, remainingConditions...) + evt, err := watch.Until(timeout, watchInterface, remainingConditions...) + if err == watch.ErrWatchClosed { + // present a consistent error interface to callers + err = wait.ErrWaitTimeout + } + return evt, err } diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go index 8c067cab90c5..8e6338a1ba72 100644 --- a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go +++ b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/diff" - "k8s.io/client-go/kubernetes/scheme" ) var mutationDetectionEnabled = false @@ -96,18 +95,13 @@ func (d *defaultCacheMutationDetector) AddObject(obj interface{}) { if _, ok := obj.(DeletedFinalStateUnknown); ok { return } - if _, ok := obj.(runtime.Object); !ok { - return - } + if obj, ok := obj.(runtime.Object); ok { + copiedObj := obj.DeepCopyObject() - copiedObj, err := scheme.Scheme.Copy(obj.(runtime.Object)) - if err != nil { - return + d.lock.Lock() + defer d.lock.Unlock() + d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj}) } - - d.lock.Lock() - defer d.lock.Unlock() - d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj}) } func (d *defaultCacheMutationDetector) CompareObjects() { diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index 4967f98d4600..a97b5f98abb9 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -109,7 +109,7 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, r := &Reflector{ name: name, // we need this to be unique per process (some names are still the same)but obvious who it belongs to - metrics: newReflectorMetrics(makeValidPromethusMetricName(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))), + metrics: newReflectorMetrics(makeValidPromethusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))), listerWatcher: lw, store: store, expectedType: reflect.TypeOf(expectedType), @@ -120,9 +120,9 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, return r } -func makeValidPromethusMetricName(in string) string { +func makeValidPromethusMetricLabel(in string) string { // this isn't perfect, but it removes our common characters - return strings.NewReplacer("/", "_", ".", "_", "-", "_").Replace(in) + return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in) } // internalPackages are packages that ignored when creating a default reflector name. These packages are in the common @@ -295,6 +295,13 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { }() for { + // give the stopCh a chance to stop the loop, even in case of continue statements further down on errors + select { + case <-stopCh: + return nil + default: + } + timemoutseconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) options = metav1.ListOptions{ ResourceVersion: resourceVersion, diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index 451ef88a7bb2..b11f0ebdd77c 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/buffer" "github.com/golang/glog" ) @@ -92,8 +93,13 @@ func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEve // InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced. type InformerSynced func() bool -// syncedPollPeriod controls how often you look at the status of your sync funcs -const syncedPollPeriod = 100 * time.Millisecond +const ( + // syncedPollPeriod controls how often you look at the status of your sync funcs + syncedPollPeriod = 100 * time.Millisecond + + // initialBufferSize is the initial number of event notifications that can be buffered. + initialBufferSize = 1024 +) // WaitForCacheSync waits for caches to populate. It returns true if it was successful, false // if the controller should shutdown @@ -313,7 +319,7 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv } } - listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now()) + listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize) if !s.started { s.processor.addListener(listener) @@ -465,6 +471,13 @@ type processorListener struct { handler ResourceEventHandler + // pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed. + // There is one per listener, but a failing/stalled listener will have infinite pendingNotifications + // added until we OOM. + // TODO: This is no worse than before, since reflectors were backed by unbounded DeltaFIFOs, but + // we should try to do something better. + pendingNotifications buffer.RingGrowing + // requestedResyncPeriod is how frequently the listener wants a full resync from the shared informer requestedResyncPeriod time.Duration // resyncPeriod is how frequently the listener wants a full resync from the shared informer. This @@ -477,11 +490,12 @@ type processorListener struct { resyncLock sync.Mutex } -func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time) *processorListener { +func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener { ret := &processorListener{ nextCh: make(chan interface{}), addCh: make(chan interface{}), handler: handler, + pendingNotifications: *buffer.NewRingGrowing(bufferSize), requestedResyncPeriod: requestedResyncPeriod, resyncPeriod: resyncPeriod, } @@ -499,25 +513,16 @@ func (p *processorListener) pop() { defer utilruntime.HandleCrash() defer close(p.nextCh) // Tell .run() to stop - // pendingNotifications is an unbounded slice that holds all notifications not yet distributed - // there is one per listener, but a failing/stalled listener will have infinite pendingNotifications - // added until we OOM. - // TODO This is no worse than before, since reflectors were backed by unbounded DeltaFIFOs, but - // we should try to do something better - var pendingNotifications []interface{} var nextCh chan<- interface{} var notification interface{} for { select { case nextCh <- notification: // Notification dispatched - if len(pendingNotifications) == 0 { // Nothing to pop + var ok bool + notification, ok = p.pendingNotifications.ReadOne() + if !ok { // Nothing to pop nextCh = nil // Disable this select case - notification = nil - } else { - notification = pendingNotifications[0] - pendingNotifications[0] = nil - pendingNotifications = pendingNotifications[1:] } case notificationToAdd, ok := <-p.addCh: if !ok { @@ -528,7 +533,7 @@ func (p *processorListener) pop() { notification = notificationToAdd nextCh = p.nextCh } else { // There is already a notification waiting to be dispatched - pendingNotifications = append(pendingNotifications, notificationToAdd) + p.pendingNotifications.WriteOne(notificationToAdd) } } } diff --git a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go index 4eb350c439a0..1c201efb6292 100644 --- a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go +++ b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -241,7 +241,7 @@ func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error { // updateIndices modifies the objects location in the managed indexes, if this is an update, you must provide an oldObj // updateIndices must be called from a function that already has a lock on the cache -func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) error { +func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) { // if we got an old object, we need to remove it before we add it again if oldObj != nil { c.deleteFromIndices(oldObj, key) @@ -249,7 +249,7 @@ func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, ke for name, indexFunc := range c.indexers { indexValues, err := indexFunc(newObj) if err != nil { - return err + panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) } index := c.indices[name] if index == nil { @@ -266,16 +266,15 @@ func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, ke set.Insert(key) } } - return nil } // deleteFromIndices removes the object from each of the managed indexes // it is intended to be called from a function that already has a lock on the cache -func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) error { +func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) { for name, indexFunc := range c.indexers { indexValues, err := indexFunc(obj) if err != nil { - return err + panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) } index := c.indices[name] @@ -289,7 +288,6 @@ func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) error { } } } - return nil } func (c *threadSafeMap) Resync() error { diff --git a/vendor/k8s.io/client-go/tools/clientcmd/BUILD b/vendor/k8s.io/client-go/tools/clientcmd/BUILD index 7767cd7d830a..77a8d2229f69 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/BUILD +++ b/vendor/k8s.io/client-go/tools/clientcmd/BUILD @@ -15,6 +15,7 @@ go_test( "overrides_test.go", "validation_test.go", ], + importpath = "k8s.io/client-go/tools/clientcmd", library = ":go_default_library", deps = [ "//vendor/github.com/ghodss/yaml:go_default_library", @@ -42,6 +43,7 @@ go_library( "overrides.go", "validation.go", ], + importpath = "k8s.io/client-go/tools/clientcmd", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/howeyc/gopass:go_default_library", diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/BUILD b/vendor/k8s.io/client-go/tools/clientcmd/api/BUILD index 443b31f79391..d46f4e28fb10 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/BUILD +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/BUILD @@ -12,6 +12,7 @@ go_test( "helpers_test.go", "types_test.go", ], + importpath = "k8s.io/client-go/tools/clientcmd/api", library = ":go_default_library", deps = ["//vendor/github.com/ghodss/yaml:go_default_library"], ) @@ -25,8 +26,8 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/client-go/tools/clientcmd/api", deps = [ - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go index 93a891e9543e..0a081871ac82 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package package api diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/latest/BUILD b/vendor/k8s.io/client-go/tools/clientcmd/api/latest/BUILD index deab93e06c93..308319e22081 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/latest/BUILD +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/latest/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["latest.go"], + importpath = "k8s.io/client-go/tools/clientcmd/api/latest", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/BUILD b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/BUILD index f95ee400e132..2b9daaf08644 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/BUILD +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/BUILD @@ -14,6 +14,7 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/client-go/tools/clientcmd/api/v1", deps = [ "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go index c8ede58a3424..9750cf73acca 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package package v1 diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go index b123a9fbe2e8..8d634671b058 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go @@ -21,64 +21,9 @@ limitations under the License. package v1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AuthInfo).DeepCopyInto(out.(*AuthInfo)) - return nil - }, InType: reflect.TypeOf(&AuthInfo{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AuthProviderConfig).DeepCopyInto(out.(*AuthProviderConfig)) - return nil - }, InType: reflect.TypeOf(&AuthProviderConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Cluster).DeepCopyInto(out.(*Cluster)) - return nil - }, InType: reflect.TypeOf(&Cluster{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Config).DeepCopyInto(out.(*Config)) - return nil - }, InType: reflect.TypeOf(&Config{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Context).DeepCopyInto(out.(*Context)) - return nil - }, InType: reflect.TypeOf(&Context{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamedAuthInfo).DeepCopyInto(out.(*NamedAuthInfo)) - return nil - }, InType: reflect.TypeOf(&NamedAuthInfo{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamedCluster).DeepCopyInto(out.(*NamedCluster)) - return nil - }, InType: reflect.TypeOf(&NamedCluster{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamedContext).DeepCopyInto(out.(*NamedContext)) - return nil - }, InType: reflect.TypeOf(&NamedContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamedExtension).DeepCopyInto(out.(*NamedExtension)) - return nil - }, InType: reflect.TypeOf(&NamedExtension{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Preferences).DeepCopyInto(out.(*Preferences)) - return nil - }, InType: reflect.TypeOf(&Preferences{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuthInfo) DeepCopyInto(out *AuthInfo) { *out = *in diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go index b787f0ddf829..51668f05bbe2 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go @@ -21,48 +21,9 @@ limitations under the License. package api import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AuthInfo).DeepCopyInto(out.(*AuthInfo)) - return nil - }, InType: reflect.TypeOf(&AuthInfo{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AuthProviderConfig).DeepCopyInto(out.(*AuthProviderConfig)) - return nil - }, InType: reflect.TypeOf(&AuthProviderConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Cluster).DeepCopyInto(out.(*Cluster)) - return nil - }, InType: reflect.TypeOf(&Cluster{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Config).DeepCopyInto(out.(*Config)) - return nil - }, InType: reflect.TypeOf(&Config{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Context).DeepCopyInto(out.(*Context)) - return nil - }, InType: reflect.TypeOf(&Context{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Preferences).DeepCopyInto(out.(*Preferences)) - return nil - }, InType: reflect.TypeOf(&Preferences{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuthInfo) DeepCopyInto(out *AuthInfo) { *out = *in diff --git a/vendor/k8s.io/client-go/tools/leaderelection/BUILD b/vendor/k8s.io/client-go/tools/leaderelection/BUILD index 103c0c5a1830..5ea32efdebf0 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/BUILD +++ b/vendor/k8s.io/client-go/tools/leaderelection/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["leaderelection.go"], + importpath = "k8s.io/client-go/tools/leaderelection", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -22,6 +23,7 @@ go_library( go_test( name = "go_default_test", srcs = ["leaderelection_test.go"], + importpath = "k8s.io/client-go/tools/leaderelection", library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/BUILD b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/BUILD index 51a814e350aa..8a9f8104f830 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/BUILD +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/BUILD @@ -12,6 +12,7 @@ go_library( "endpointslock.go", "interface.go", ], + importpath = "k8s.io/client-go/tools/leaderelection/resourcelock", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/client-go/tools/metrics/BUILD b/vendor/k8s.io/client-go/tools/metrics/BUILD index 8a0344488be6..7b06e0f967ef 100644 --- a/vendor/k8s.io/client-go/tools/metrics/BUILD +++ b/vendor/k8s.io/client-go/tools/metrics/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["metrics.go"], + importpath = "k8s.io/client-go/tools/metrics", ) filegroup( diff --git a/vendor/k8s.io/client-go/tools/pager/BUILD b/vendor/k8s.io/client-go/tools/pager/BUILD index e7b95c40da90..c4a2d4d1cac4 100644 --- a/vendor/k8s.io/client-go/tools/pager/BUILD +++ b/vendor/k8s.io/client-go/tools/pager/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -11,7 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["pager.go"], - tags = ["automanaged"], + importpath = "k8s.io/client-go/tools/pager", deps = [ "//vendor/golang.org/x/net/context:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -39,6 +37,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["pager_test.go"], + importpath = "k8s.io/client-go/tools/pager", library = ":go_default_library", deps = [ "//vendor/golang.org/x/net/context:go_default_library", diff --git a/vendor/k8s.io/client-go/tools/record/BUILD b/vendor/k8s.io/client-go/tools/record/BUILD index fe5af78fad0b..f89aa3e2896a 100644 --- a/vendor/k8s.io/client-go/tools/record/BUILD +++ b/vendor/k8s.io/client-go/tools/record/BUILD @@ -12,6 +12,7 @@ go_test( "event_test.go", "events_cache_test.go", ], + importpath = "k8s.io/client-go/tools/record", library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", @@ -35,6 +36,7 @@ go_library( "events_cache.go", "fake.go", ], + importpath = "k8s.io/client-go/tools/record", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/groupcache/lru:go_default_library", diff --git a/vendor/k8s.io/client-go/tools/reference/BUILD b/vendor/k8s.io/client-go/tools/reference/BUILD index ea1c113e07f4..47da958ab92e 100644 --- a/vendor/k8s.io/client-go/tools/reference/BUILD +++ b/vendor/k8s.io/client-go/tools/reference/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["ref.go"], + importpath = "k8s.io/client-go/tools/reference", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", diff --git a/vendor/k8s.io/client-go/tools/remotecommand/BUILD b/vendor/k8s.io/client-go/tools/remotecommand/BUILD index 9d061ed06876..e665af3f4d86 100644 --- a/vendor/k8s.io/client-go/tools/remotecommand/BUILD +++ b/vendor/k8s.io/client-go/tools/remotecommand/BUILD @@ -12,6 +12,7 @@ go_test( "v2_test.go", "v4_test.go", ], + importpath = "k8s.io/client-go/tools/remotecommand", library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", @@ -32,6 +33,7 @@ go_library( "v3.go", "v4.go", ], + importpath = "k8s.io/client-go/tools/remotecommand", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -40,7 +42,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/remotecommand:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/transport:go_default_library", "//vendor/k8s.io/client-go/transport/spdy:go_default_library", "//vendor/k8s.io/client-go/util/exec:go_default_library", ], diff --git a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go index bcbe9fcd4f0e..6b69f366e484 100644 --- a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go +++ b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go @@ -27,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/apimachinery/pkg/util/remotecommand" restclient "k8s.io/client-go/rest" - "k8s.io/client-go/transport" spdy "k8s.io/client-go/transport/spdy" ) @@ -72,8 +71,18 @@ type streamExecutor struct { // NewSPDYExecutor connects to the provided server and upgrades the connection to // multiplexed bidirectional streams. func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) { + wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config) + if err != nil { + return nil, err + } + return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url) +} + +// NewSPDYExecutorForTransports connects to the provided server using the given transport, +// upgrades the response using the given upgrader to multiplexed bidirectional streams. +func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) { return NewSPDYExecutorForProtocols( - config, method, url, + transport, upgrader, method, url, remotecommand.StreamProtocolV4Name, remotecommand.StreamProtocolV3Name, remotecommand.StreamProtocolV2Name, @@ -83,16 +92,11 @@ func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Ex // NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to // multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most -// callers should use NewSPDYExecutor. -func NewSPDYExecutorForProtocols(config *restclient.Config, method string, url *url.URL, protocols ...string) (Executor, error) { - wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config) - if err != nil { - return nil, err - } - wrapper = transport.DebugWrappers(wrapper) +// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports. +func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) { return &streamExecutor{ - upgrader: upgradeRoundTripper, - transport: wrapper, + upgrader: upgrader, + transport: transport, method: method, url: url, protocols: protocols, diff --git a/vendor/k8s.io/client-go/transport/BUILD b/vendor/k8s.io/client-go/transport/BUILD index c03ed48cf5fc..91c3831b2347 100644 --- a/vendor/k8s.io/client-go/transport/BUILD +++ b/vendor/k8s.io/client-go/transport/BUILD @@ -13,6 +13,7 @@ go_test( "round_trippers_test.go", "transport_test.go", ], + importpath = "k8s.io/client-go/transport", library = ":go_default_library", ) @@ -24,6 +25,7 @@ go_library( "round_trippers.go", "transport.go", ], + importpath = "k8s.io/client-go/transport", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/gregjones/httpcache:go_default_library", diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go index 561c92c18190..da22cdee5904 100644 --- a/vendor/k8s.io/client-go/transport/cache.go +++ b/vendor/k8s.io/client-go/transport/cache.go @@ -88,5 +88,5 @@ func tlsConfigKey(c *Config) (string, error) { return "", err } // Only include the things that actually affect the tls.Config - return fmt.Sprintf("%v/%x/%x/%x", c.TLS.Insecure, c.TLS.CAData, c.TLS.CertData, c.TLS.KeyData), nil + return fmt.Sprintf("%v/%x/%x/%x/%v", c.TLS.Insecure, c.TLS.CAData, c.TLS.CertData, c.TLS.KeyData, c.TLS.ServerName), nil } diff --git a/vendor/k8s.io/client-go/transport/spdy/BUILD b/vendor/k8s.io/client-go/transport/spdy/BUILD index 7e5145c990e9..bf90084b6513 100644 --- a/vendor/k8s.io/client-go/transport/spdy/BUILD +++ b/vendor/k8s.io/client-go/transport/spdy/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["spdy.go"], + importpath = "k8s.io/client-go/transport/spdy", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/httpstream:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy:go_default_library", diff --git a/vendor/k8s.io/client-go/util/buffer/BUILD b/vendor/k8s.io/client-go/util/buffer/BUILD new file mode 100644 index 000000000000..b5629d5cb978 --- /dev/null +++ b/vendor/k8s.io/client-go/util/buffer/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["ring_growing.go"], + importpath = "k8s.io/client-go/util/buffer", + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["ring_growing_test.go"], + importpath = "k8s.io/client-go/util/buffer", + library = ":go_default_library", + deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/util/buffer/ring_growing.go b/vendor/k8s.io/client-go/util/buffer/ring_growing.go new file mode 100644 index 000000000000..86965a51311c --- /dev/null +++ b/vendor/k8s.io/client-go/util/buffer/ring_growing.go @@ -0,0 +1,72 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package buffer + +// RingGrowing is a growing ring buffer. +// Not thread safe. +type RingGrowing struct { + data []interface{} + n int // Size of Data + beg int // First available element + readable int // Number of data items available +} + +// NewRingGrowing constructs a new RingGrowing instance with provided parameters. +func NewRingGrowing(initialSize int) *RingGrowing { + return &RingGrowing{ + data: make([]interface{}, initialSize), + n: initialSize, + } +} + +// ReadOne reads (consumes) first item from the buffer if it is available, otherwise returns false. +func (r *RingGrowing) ReadOne() (data interface{}, ok bool) { + if r.readable == 0 { + return nil, false + } + r.readable-- + element := r.data[r.beg] + r.data[r.beg] = nil // Remove reference to the object to help GC + if r.beg == r.n-1 { + // Was the last element + r.beg = 0 + } else { + r.beg++ + } + return element, true +} + +// WriteOne adds an item to the end of the buffer, growing it if it is full. +func (r *RingGrowing) WriteOne(data interface{}) { + if r.readable == r.n { + // Time to grow + newN := r.n * 2 + newData := make([]interface{}, newN) + to := r.beg + r.readable + if to <= r.n { + copy(newData, r.data[r.beg:to]) + } else { + copied := copy(newData, r.data[r.beg:]) + copy(newData[copied:], r.data[:(to%r.n)]) + } + r.beg = 0 + r.data = newData + r.n = newN + } + r.data[(r.readable+r.beg)%r.n] = data + r.readable++ +} diff --git a/vendor/k8s.io/client-go/util/cert/BUILD b/vendor/k8s.io/client-go/util/cert/BUILD index d8076548ce85..93ca7c9c6c54 100644 --- a/vendor/k8s.io/client-go/util/cert/BUILD +++ b/vendor/k8s.io/client-go/util/cert/BUILD @@ -13,6 +13,7 @@ go_test( "pem_test.go", ], data = glob(["testdata/**"]), + importpath = "k8s.io/client-go/util/cert", library = ":go_default_library", ) @@ -27,6 +28,7 @@ go_library( data = [ "testdata/dontUseThisKey.pem", ], + importpath = "k8s.io/client-go/util/cert", ) filegroup( diff --git a/vendor/k8s.io/client-go/util/cert/io.go b/vendor/k8s.io/client-go/util/cert/io.go index 487456b69c0a..a41f8054a0eb 100644 --- a/vendor/k8s.io/client-go/util/cert/io.go +++ b/vendor/k8s.io/client-go/util/cert/io.go @@ -66,10 +66,7 @@ func WriteCert(certPath string, data []byte) error { if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil { return err } - if err := ioutil.WriteFile(certPath, data, os.FileMode(0644)); err != nil { - return err - } - return nil + return ioutil.WriteFile(certPath, data, os.FileMode(0644)) } // WriteKey writes the pem-encoded key data to keyPath. @@ -80,10 +77,7 @@ func WriteKey(keyPath string, data []byte) error { if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { return err } - if err := ioutil.WriteFile(keyPath, data, os.FileMode(0600)); err != nil { - return err - } - return nil + return ioutil.WriteFile(keyPath, data, os.FileMode(0600)) } // LoadOrGenerateKeyFile looks for a key in the file at the given path. If it diff --git a/vendor/k8s.io/client-go/util/certificate/BUILD b/vendor/k8s.io/client-go/util/certificate/BUILD new file mode 100644 index 000000000000..f10a2d9e21eb --- /dev/null +++ b/vendor/k8s.io/client-go/util/certificate/BUILD @@ -0,0 +1,67 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = [ + "certificate_manager_test.go", + "certificate_store_test.go", + ], + importpath = "k8s.io/client-go/util/certificate", + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/util/cert:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = [ + "certificate_manager.go", + "certificate_store.go", + ], + importpath = "k8s.io/client-go/util/certificate", + tags = ["automanaged"], + deps = [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/util/cert:go_default_library", + "//vendor/k8s.io/client-go/util/certificate/csr:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/client-go/util/certificate/csr:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/client-go/util/certificate/OWNERS b/vendor/k8s.io/client-go/util/certificate/OWNERS new file mode 100644 index 000000000000..2dce803b34dd --- /dev/null +++ b/vendor/k8s.io/client-go/util/certificate/OWNERS @@ -0,0 +1,8 @@ +reviewers: +- mikedanese +- liggit +- smarterclayton +approvers: +- mikedanese +- liggit +- smarterclayton diff --git a/vendor/k8s.io/client-go/util/certificate/certificate_manager.go b/vendor/k8s.io/client-go/util/certificate/certificate_manager.go new file mode 100644 index 000000000000..7ea05ba794a4 --- /dev/null +++ b/vendor/k8s.io/client-go/util/certificate/certificate_manager.go @@ -0,0 +1,440 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate + +import ( + "crypto/ecdsa" + "crypto/elliptic" + cryptorand "crypto/rand" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "sync" + "time" + + "github.com/golang/glog" + + certificates "k8s.io/api/certificates/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + certificatesclient "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" + "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/certificate/csr" +) + +// certificateWaitBackoff controls the amount and timing of retries when the +// watch for certificate approval is interrupted. +var certificateWaitBackoff = wait.Backoff{Duration: 30 * time.Second, Steps: 4, Factor: 1.5, Jitter: 0.1} + +// Manager maintains and updates the certificates in use by this certificate +// manager. In the background it communicates with the API server to get new +// certificates for certificates about to expire. +type Manager interface { + // CertificateSigningRequestClient sets the client interface that is used for + // signing new certificates generated as part of rotation. + SetCertificateSigningRequestClient(certificatesclient.CertificateSigningRequestInterface) error + // Start the API server status sync loop. + Start() + // Current returns the currently selected certificate from the + // certificate manager, as well as the associated certificate and key data + // in PEM format. + Current() *tls.Certificate + // ServerHealthy returns true if the manager is able to communicate with + // the server. This allows a caller to determine whether the cert manager + // thinks it can potentially talk to the API server. The cert manager may + // be very conservative and only return true if recent communication has + // occurred with the server. + ServerHealthy() bool +} + +// Config is the set of configuration parameters available for a new Manager. +type Config struct { + // CertificateSigningRequestClient will be used for signing new certificate + // requests generated when a key rotation occurs. It must be set either at + // initialization or by using CertificateSigningRequestClient before + // Manager.Start() is called. + CertificateSigningRequestClient certificatesclient.CertificateSigningRequestInterface + // Template is the CertificateRequest that will be used as a template for + // generating certificate signing requests for all new keys generated as + // part of rotation. It follows the same rules as the template parameter of + // crypto.x509.CreateCertificateRequest in the Go standard libraries. + Template *x509.CertificateRequest + // Usages is the types of usages that certificates generated by the manager + // can be used for. + Usages []certificates.KeyUsage + // CertificateStore is a persistent store where the current cert/key is + // kept and future cert/key pairs will be persisted after they are + // generated. + CertificateStore Store + // BootstrapCertificatePEM is the certificate data that will be returned + // from the Manager if the CertificateStore doesn't have any cert/key pairs + // currently available and has not yet had a chance to get a new cert/key + // pair from the API. If the CertificateStore does have a cert/key pair, + // this will be ignored. If there is no cert/key pair available in the + // CertificateStore, as soon as Start is called, it will request a new + // cert/key pair from the CertificateSigningRequestClient. This is intended + // to allow the first boot of a component to be initialized using a + // generic, multi-use cert/key pair which will be quickly replaced with a + // unique cert/key pair. + BootstrapCertificatePEM []byte + // BootstrapKeyPEM is the key data that will be returned from the Manager + // if the CertificateStore doesn't have any cert/key pairs currently + // available. If the CertificateStore does have a cert/key pair, this will + // be ignored. If the bootstrap cert/key pair are used, they will be + // rotated at the first opportunity, possibly well in advance of expiring. + // This is intended to allow the first boot of a component to be + // initialized using a generic, multi-use cert/key pair which will be + // quickly replaced with a unique cert/key pair. + BootstrapKeyPEM []byte + // CertificateExpiration will record a metric that shows the remaining + // lifetime of the certificate. + CertificateExpiration Gauge +} + +// Store is responsible for getting and updating the current certificate. +// Depending on the concrete implementation, the backing store for this +// behavior may vary. +type Store interface { + // Current returns the currently selected certificate, as well as the + // associated certificate and key data in PEM format. If the Store doesn't + // have a cert/key pair currently, it should return a NoCertKeyError so + // that the Manager can recover by using bootstrap certificates to request + // a new cert/key pair. + Current() (*tls.Certificate, error) + // Update accepts the PEM data for the cert/key pair and makes the new + // cert/key pair the 'current' pair, that will be returned by future calls + // to Current(). + Update(cert, key []byte) (*tls.Certificate, error) +} + +// Gauge will record the remaining lifetime of the certificate each time it is +// updated. +type Gauge interface { + Set(float64) +} + +// NoCertKeyError indicates there is no cert/key currently available. +type NoCertKeyError string + +func (e *NoCertKeyError) Error() string { return string(*e) } + +type manager struct { + certSigningRequestClient certificatesclient.CertificateSigningRequestInterface + template *x509.CertificateRequest + usages []certificates.KeyUsage + certStore Store + certAccessLock sync.RWMutex + cert *tls.Certificate + rotationDeadline time.Time + forceRotation bool + certificateExpiration Gauge + serverHealth bool +} + +// NewManager returns a new certificate manager. A certificate manager is +// responsible for being the authoritative source of certificates in the +// Kubelet and handling updates due to rotation. +func NewManager(config *Config) (Manager, error) { + cert, forceRotation, err := getCurrentCertificateOrBootstrap( + config.CertificateStore, + config.BootstrapCertificatePEM, + config.BootstrapKeyPEM) + if err != nil { + return nil, err + } + + m := manager{ + certSigningRequestClient: config.CertificateSigningRequestClient, + template: config.Template, + usages: config.Usages, + certStore: config.CertificateStore, + cert: cert, + forceRotation: forceRotation, + certificateExpiration: config.CertificateExpiration, + } + + return &m, nil +} + +// Current returns the currently selected certificate from the certificate +// manager. This can be nil if the manager was initialized without a +// certificate and has not yet received one from the +// CertificateSigningRequestClient. +func (m *manager) Current() *tls.Certificate { + m.certAccessLock.RLock() + defer m.certAccessLock.RUnlock() + return m.cert +} + +// ServerHealthy returns true if the cert manager believes the server +// is currently alive. +func (m *manager) ServerHealthy() bool { + m.certAccessLock.RLock() + defer m.certAccessLock.RUnlock() + return m.serverHealth +} + +// SetCertificateSigningRequestClient sets the client interface that is used +// for signing new certificates generated as part of rotation. It must be +// called before Start() and can not be used to change the +// CertificateSigningRequestClient that has already been set. This method is to +// support the one specific scenario where the CertificateSigningRequestClient +// uses the CertificateManager. +func (m *manager) SetCertificateSigningRequestClient(certSigningRequestClient certificatesclient.CertificateSigningRequestInterface) error { + if m.certSigningRequestClient == nil { + m.certSigningRequestClient = certSigningRequestClient + return nil + } + return fmt.Errorf("property CertificateSigningRequestClient is already set") +} + +// Start will start the background work of rotating the certificates. +func (m *manager) Start() { + // Certificate rotation depends on access to the API server certificate + // signing API, so don't start the certificate manager if we don't have a + // client. This will happen on the cluster master, where the kubelet is + // responsible for bootstrapping the pods of the master components. + if m.certSigningRequestClient == nil { + glog.V(2).Infof("Certificate rotation is not enabled, no connection to the apiserver.") + return + } + + glog.V(2).Infof("Certificate rotation is enabled.") + + m.setRotationDeadline() + + // Synchronously request a certificate before entering the background + // loop to allow bootstrap scenarios, where the certificate manager + // doesn't have a certificate at all yet. + if m.shouldRotate() { + glog.V(1).Infof("shouldRotate() is true, forcing immediate rotation") + if _, err := m.rotateCerts(); err != nil { + utilruntime.HandleError(fmt.Errorf("Could not rotate certificates: %v", err)) + } + } + backoff := wait.Backoff{ + Duration: 2 * time.Second, + Factor: 2, + Jitter: 0.1, + Steps: 5, + } + go wait.Forever(func() { + sleepInterval := m.rotationDeadline.Sub(time.Now()) + glog.V(2).Infof("Waiting %v for next certificate rotation", sleepInterval) + time.Sleep(sleepInterval) + if err := wait.ExponentialBackoff(backoff, m.rotateCerts); err != nil { + utilruntime.HandleError(fmt.Errorf("Reached backoff limit, still unable to rotate certs: %v", err)) + wait.PollInfinite(32*time.Second, m.rotateCerts) + } + }, 0) +} + +func getCurrentCertificateOrBootstrap( + store Store, + bootstrapCertificatePEM []byte, + bootstrapKeyPEM []byte) (cert *tls.Certificate, shouldRotate bool, errResult error) { + + currentCert, err := store.Current() + if err == nil { + return currentCert, false, nil + } + + if _, ok := err.(*NoCertKeyError); !ok { + return nil, false, err + } + + if bootstrapCertificatePEM == nil || bootstrapKeyPEM == nil { + return nil, true, nil + } + + bootstrapCert, err := tls.X509KeyPair(bootstrapCertificatePEM, bootstrapKeyPEM) + if err != nil { + return nil, false, err + } + if len(bootstrapCert.Certificate) < 1 { + return nil, false, fmt.Errorf("no cert/key data found") + } + + certs, err := x509.ParseCertificates(bootstrapCert.Certificate[0]) + if err != nil { + return nil, false, fmt.Errorf("unable to parse certificate data: %v", err) + } + bootstrapCert.Leaf = certs[0] + return &bootstrapCert, true, nil +} + +// shouldRotate looks at how close the current certificate is to expiring and +// decides if it is time to rotate or not. +func (m *manager) shouldRotate() bool { + m.certAccessLock.RLock() + defer m.certAccessLock.RUnlock() + if m.cert == nil { + return true + } + if m.forceRotation { + return true + } + return time.Now().After(m.rotationDeadline) +} + +// rotateCerts attempts to request a client cert from the server, wait a reasonable +// period of time for it to be signed, and then update the cert on disk. If it cannot +// retrieve a cert, it will return false. It will only return error in exceptional cases. +// This method also keeps track of "server health" by interpreting the responses it gets +// from the server on the various calls it makes. +func (m *manager) rotateCerts() (bool, error) { + glog.V(2).Infof("Rotating certificates") + + csrPEM, keyPEM, privateKey, err := m.generateCSR() + if err != nil { + utilruntime.HandleError(fmt.Errorf("Unable to generate a certificate signing request: %v", err)) + return false, nil + } + + // Call the Certificate Signing Request API to get a certificate for the + // new private key. + req, err := csr.RequestCertificate(m.certSigningRequestClient, csrPEM, "", m.usages, privateKey) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Failed while requesting a signed certificate from the master: %v", err)) + return false, m.updateServerError(err) + } + + // Wait for the certificate to be signed. Instead of one long watch, we retry with slighly longer + // intervals each time in order to tolerate failures from the server AND to preserve the liveliness + // of the cert manager loop. This creates slightly more traffic against the API server in return + // for bounding the amount of time we wait when a certificate expires. + var crtPEM []byte + watchDuration := time.Minute + if err := wait.ExponentialBackoff(certificateWaitBackoff, func() (bool, error) { + data, err := csr.WaitForCertificate(m.certSigningRequestClient, req, watchDuration) + switch { + case err == nil: + crtPEM = data + return true, nil + case err == wait.ErrWaitTimeout: + watchDuration += time.Minute + if watchDuration > 5*time.Minute { + watchDuration = 5 * time.Minute + } + return false, nil + default: + utilruntime.HandleError(fmt.Errorf("Unable to check certificate signing status: %v", err)) + return false, m.updateServerError(err) + } + }); err != nil { + utilruntime.HandleError(fmt.Errorf("Certificate request was not signed: %v", err)) + return false, nil + } + + cert, err := m.certStore.Update(crtPEM, keyPEM) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Unable to store the new cert/key pair: %v", err)) + return false, nil + } + + m.updateCached(cert) + m.setRotationDeadline() + m.forceRotation = false + return true, nil +} + +// setRotationDeadline sets a cached value for the threshold at which the +// current certificate should be rotated, 80%+/-10% of the expiration of the +// certificate. +func (m *manager) setRotationDeadline() { + m.certAccessLock.RLock() + defer m.certAccessLock.RUnlock() + if m.cert == nil { + m.rotationDeadline = time.Now() + return + } + + notAfter := m.cert.Leaf.NotAfter + totalDuration := float64(notAfter.Sub(m.cert.Leaf.NotBefore)) + + m.rotationDeadline = m.cert.Leaf.NotBefore.Add(jitteryDuration(totalDuration)) + glog.V(2).Infof("Certificate expiration is %v, rotation deadline is %v", notAfter, m.rotationDeadline) + if m.certificateExpiration != nil { + m.certificateExpiration.Set(float64(notAfter.Unix())) + } +} + +// jitteryDuration uses some jitter to set the rotation threshold so each node +// will rotate at approximately 70-90% of the total lifetime of the +// certificate. With jitter, if a number of nodes are added to a cluster at +// approximately the same time (such as cluster creation time), they won't all +// try to rotate certificates at the same time for the rest of the life of the +// cluster. +// +// This function is represented as a variable to allow replacement during testing. +var jitteryDuration = func(totalDuration float64) time.Duration { + return wait.Jitter(time.Duration(totalDuration), 0.2) - time.Duration(totalDuration*0.3) +} + +// updateCached sets the most recent retrieved cert. It also sets the server +// as assumed healthy. +func (m *manager) updateCached(cert *tls.Certificate) { + m.certAccessLock.Lock() + defer m.certAccessLock.Unlock() + m.serverHealth = true + m.cert = cert +} + +// updateServerError takes an error returned by the server and infers +// the health of the server based on the error. It will return nil if +// the error does not require immediate termination of any wait loops, +// and otherwise it will return the error. +func (m *manager) updateServerError(err error) error { + m.certAccessLock.Lock() + defer m.certAccessLock.Unlock() + switch { + case errors.IsUnauthorized(err): + // SSL terminating proxies may report this error instead of the master + m.serverHealth = true + case errors.IsUnexpectedServerError(err): + // generally indicates a proxy or other load balancer problem, rather than a problem coming + // from the master + m.serverHealth = false + default: + // Identify known errors that could be expected for a cert request that + // indicate everything is working normally + m.serverHealth = errors.IsNotFound(err) || errors.IsForbidden(err) + } + return nil +} + +func (m *manager) generateCSR() (csrPEM []byte, keyPEM []byte, key interface{}, err error) { + // Generate a new private key. + privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) + if err != nil { + return nil, nil, nil, fmt.Errorf("unable to generate a new private key: %v", err) + } + der, err := x509.MarshalECPrivateKey(privateKey) + if err != nil { + return nil, nil, nil, fmt.Errorf("unable to marshal the new key to DER: %v", err) + } + + keyPEM = pem.EncodeToMemory(&pem.Block{Type: cert.ECPrivateKeyBlockType, Bytes: der}) + + csrPEM, err = cert.MakeCSRFromTemplate(privateKey, m.template) + if err != nil { + return nil, nil, nil, fmt.Errorf("unable to create a csr from the private key: %v", err) + } + return csrPEM, keyPEM, privateKey, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/certificate_store.go b/vendor/k8s.io/client-go/util/certificate/certificate_store.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/kubelet/certificate/certificate_store.go rename to vendor/k8s.io/client-go/util/certificate/certificate_store.go index a98a9031978d..42a40dcdf009 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/certificate_store.go +++ b/vendor/k8s.io/client-go/util/certificate/certificate_store.go @@ -28,8 +28,6 @@ import ( "time" "github.com/golang/glog" - - utilfile "k8s.io/kubernetes/pkg/util/file" ) const ( @@ -86,7 +84,7 @@ func NewFileStore( func (s *fileStore) recover() error { // If the 'current' file doesn't exist, continue on with the recovery process. currentPath := filepath.Join(s.certDirectory, s.filename(currentPair)) - if exists, err := utilfile.FileExists(currentPath); err != nil { + if exists, err := fileExists(currentPath); err != nil { return err } else if exists { return nil @@ -113,18 +111,18 @@ func (s *fileStore) recover() error { func (s *fileStore) Current() (*tls.Certificate, error) { pairFile := filepath.Join(s.certDirectory, s.filename(currentPair)) - if pairFileExists, err := utilfile.FileExists(pairFile); err != nil { + if pairFileExists, err := fileExists(pairFile); err != nil { return nil, err } else if pairFileExists { glog.Infof("Loading cert/key pair from %q.", pairFile) return loadFile(pairFile) } - certFileExists, err := utilfile.FileExists(s.certFile) + certFileExists, err := fileExists(s.certFile) if err != nil { return nil, err } - keyFileExists, err := utilfile.FileExists(s.keyFile) + keyFileExists, err := fileExists(s.keyFile) if err != nil { return nil, err } @@ -135,11 +133,11 @@ func (s *fileStore) Current() (*tls.Certificate, error) { c := filepath.Join(s.certDirectory, s.pairNamePrefix+certExtension) k := filepath.Join(s.keyDirectory, s.pairNamePrefix+keyExtension) - certFileExists, err = utilfile.FileExists(c) + certFileExists, err = fileExists(c) if err != nil { return nil, err } - keyFileExists, err = utilfile.FileExists(k) + keyFileExists, err = fileExists(k) if err != nil { return nil, err } @@ -240,7 +238,7 @@ func (s *fileStore) updateSymlink(filename string) error { return err } } else if fi.Mode()&os.ModeSymlink != os.ModeSymlink { - return fmt.Errorf("expected %q to be a symlink but it is a file.", currentPath) + return fmt.Errorf("expected %q to be a symlink but it is a file", currentPath) } else { currentPathExists = true } @@ -253,7 +251,7 @@ func (s *fileStore) updateSymlink(filename string) error { return err } } else if fi.Mode()&os.ModeSymlink != os.ModeSymlink { - return fmt.Errorf("expected %q to be a symlink but it is a file.", updatedPath) + return fmt.Errorf("expected %q to be a symlink but it is a file", updatedPath) } else { if err := os.Remove(updatedPath); err != nil { return fmt.Errorf("unable to remove %q: %v", updatedPath, err) @@ -262,12 +260,19 @@ func (s *fileStore) updateSymlink(filename string) error { // Check that the new cert/key pair file exists to avoid rotating to an // invalid cert/key. - if filenameExists, err := utilfile.FileExists(filename); err != nil { + if filenameExists, err := fileExists(filename); err != nil { return err } else if !filenameExists { return fmt.Errorf("file %q does not exist so it can not be used as the currently selected cert/key", filename) } + // Ensure the source path is absolute to ensure the symlink target is + // correct when certDirectory is a relative path. + filename, err := filepath.Abs(filename) + if err != nil { + return err + } + // Create the 'updated' symlink pointing to the requested file name. if err := os.Symlink(filename, updatedPath); err != nil { return fmt.Errorf("unable to create a symlink from %q to %q: %v", updatedPath, filename, err) @@ -307,3 +312,13 @@ func loadX509KeyPair(certFile, keyFile string) (*tls.Certificate, error) { cert.Leaf = certs[0] return &cert, nil } + +// FileExists checks if specified file exists. +func fileExists(filename string) (bool, error) { + if _, err := os.Stat(filename); os.IsNotExist(err) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} diff --git a/vendor/k8s.io/client-go/util/certificate/csr/BUILD b/vendor/k8s.io/client-go/util/certificate/csr/BUILD new file mode 100644 index 000000000000..c6def5bbf0c8 --- /dev/null +++ b/vendor/k8s.io/client-go/util/certificate/csr/BUILD @@ -0,0 +1,54 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = ["csr.go"], + importpath = "k8s.io/client-go/util/certificate/csr", + deps = [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/client-go/util/cert:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +go_test( + name = "go_default_test", + srcs = ["csr_test.go"], + importpath = "k8s.io/client-go/util/certificate/csr", + library = ":go_default_library", + deps = [ + "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/util/cert:go_default_library", + ], +) diff --git a/vendor/k8s.io/client-go/util/certificate/csr/csr.go b/vendor/k8s.io/client-go/util/certificate/csr/csr.go new file mode 100644 index 000000000000..22112a5b5b6f --- /dev/null +++ b/vendor/k8s.io/client-go/util/certificate/csr/csr.go @@ -0,0 +1,261 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package csr + +import ( + "crypto" + "crypto/sha512" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/pem" + "fmt" + "github.com/golang/glog" + "reflect" + "time" + + certificates "k8s.io/api/certificates/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + certificatesclient "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" + "k8s.io/client-go/tools/cache" + certutil "k8s.io/client-go/util/cert" +) + +// RequestNodeCertificate will create a certificate signing request for a node +// (Organization and CommonName for the CSR will be set as expected for node +// certificates) and send it to API server, then it will watch the object's +// status, once approved by API server, it will return the API server's issued +// certificate (pem-encoded). If there is any errors, or the watch timeouts, it +// will return an error. This is intended for use on nodes (kubelet and +// kubeadm). +func RequestNodeCertificate(client certificatesclient.CertificateSigningRequestInterface, privateKeyData []byte, nodeName types.NodeName) (certData []byte, err error) { + subject := &pkix.Name{ + Organization: []string{"system:nodes"}, + CommonName: "system:node:" + string(nodeName), + } + + privateKey, err := certutil.ParsePrivateKeyPEM(privateKeyData) + if err != nil { + return nil, fmt.Errorf("invalid private key for certificate request: %v", err) + } + csrData, err := certutil.MakeCSR(privateKey, subject, nil, nil) + if err != nil { + return nil, fmt.Errorf("unable to generate certificate request: %v", err) + } + + usages := []certificates.KeyUsage{ + certificates.UsageDigitalSignature, + certificates.UsageKeyEncipherment, + certificates.UsageClientAuth, + } + name := digestedName(privateKeyData, subject, usages) + req, err := RequestCertificate(client, csrData, name, usages, privateKey) + if err != nil { + return nil, err + } + return WaitForCertificate(client, req, 3600*time.Second) +} + +// RequestCertificate will either use an existing (if this process has run +// before but not to completion) or create a certificate signing request using the +// PEM encoded CSR and send it to API server, then it will watch the object's +// status, once approved by API server, it will return the API server's issued +// certificate (pem-encoded). If there is any errors, or the watch timeouts, it +// will return an error. +func RequestCertificate(client certificatesclient.CertificateSigningRequestInterface, csrData []byte, name string, usages []certificates.KeyUsage, privateKey interface{}) (req *certificates.CertificateSigningRequest, err error) { + csr := &certificates.CertificateSigningRequest{ + // Username, UID, Groups will be injected by API server. + TypeMeta: metav1.TypeMeta{Kind: "CertificateSigningRequest"}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: certificates.CertificateSigningRequestSpec{ + Request: csrData, + Usages: usages, + }, + } + if len(csr.Name) == 0 { + csr.GenerateName = "csr-" + } + + req, err = client.Create(csr) + switch { + case err == nil: + case errors.IsAlreadyExists(err) && len(name) > 0: + glog.Infof("csr for this node already exists, reusing") + req, err = client.Get(name, metav1.GetOptions{}) + if err != nil { + return nil, formatError("cannot retrieve certificate signing request: %v", err) + } + if err := ensureCompatible(req, csr, privateKey); err != nil { + return nil, fmt.Errorf("retrieved csr is not compatible: %v", err) + } + glog.Infof("csr for this node is still valid") + default: + return nil, formatError("cannot create certificate signing request: %v", err) + } + return req, nil +} + +// WaitForCertificate waits for a certificate to be issued until timeout, or returns an error. +func WaitForCertificate(client certificatesclient.CertificateSigningRequestInterface, req *certificates.CertificateSigningRequest, timeout time.Duration) (certData []byte, err error) { + fieldSelector := fields.OneTermEqualSelector("metadata.name", req.Name).String() + + event, err := cache.ListWatchUntil( + timeout, + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = fieldSelector + return client.List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = fieldSelector + return client.Watch(options) + }, + }, + func(event watch.Event) (bool, error) { + switch event.Type { + case watch.Modified, watch.Added: + case watch.Deleted: + return false, fmt.Errorf("csr %q was deleted", req.Name) + default: + return false, nil + } + csr := event.Object.(*certificates.CertificateSigningRequest) + if csr.UID != req.UID { + return false, fmt.Errorf("csr %q changed UIDs", csr.Name) + } + for _, c := range csr.Status.Conditions { + if c.Type == certificates.CertificateDenied { + return false, fmt.Errorf("certificate signing request is not approved, reason: %v, message: %v", c.Reason, c.Message) + } + if c.Type == certificates.CertificateApproved && csr.Status.Certificate != nil { + return true, nil + } + } + return false, nil + }, + ) + if err == wait.ErrWaitTimeout { + return nil, wait.ErrWaitTimeout + } + if err != nil { + return nil, formatError("cannot watch on the certificate signing request: %v", err) + } + + return event.Object.(*certificates.CertificateSigningRequest).Status.Certificate, nil +} + +// This digest should include all the relevant pieces of the CSR we care about. +// We can't direcly hash the serialized CSR because of random padding that we +// regenerate every loop and we include usages which are not contained in the +// CSR. This needs to be kept up to date as we add new fields to the node +// certificates and with ensureCompatible. +func digestedName(privateKeyData []byte, subject *pkix.Name, usages []certificates.KeyUsage) string { + hash := sha512.New512_256() + + // Here we make sure two different inputs can't write the same stream + // to the hash. This delimiter is not in the base64.URLEncoding + // alphabet so there is no way to have spill over collisions. Without + // it 'CN:foo,ORG:bar' hashes to the same value as 'CN:foob,ORG:ar' + const delimiter = '|' + encode := base64.RawURLEncoding.EncodeToString + + write := func(data []byte) { + hash.Write([]byte(encode(data))) + hash.Write([]byte{delimiter}) + } + + write(privateKeyData) + write([]byte(subject.CommonName)) + for _, v := range subject.Organization { + write([]byte(v)) + } + for _, v := range usages { + write([]byte(v)) + } + + return "node-csr-" + encode(hash.Sum(nil)) +} + +// ensureCompatible ensures that a CSR object is compatible with an original CSR +func ensureCompatible(new, orig *certificates.CertificateSigningRequest, privateKey interface{}) error { + newCsr, err := ParseCSR(new) + if err != nil { + return fmt.Errorf("unable to parse new csr: %v", err) + } + origCsr, err := ParseCSR(orig) + if err != nil { + return fmt.Errorf("unable to parse original csr: %v", err) + } + if !reflect.DeepEqual(newCsr.Subject, origCsr.Subject) { + return fmt.Errorf("csr subjects differ: new: %#v, orig: %#v", newCsr.Subject, origCsr.Subject) + } + signer, ok := privateKey.(crypto.Signer) + if !ok { + return fmt.Errorf("privateKey is not a signer") + } + newCsr.PublicKey = signer.Public() + if err := newCsr.CheckSignature(); err != nil { + return fmt.Errorf("error validating signature new CSR against old key: %v", err) + } + if len(new.Status.Certificate) > 0 { + certs, err := certutil.ParseCertsPEM(new.Status.Certificate) + if err != nil { + return fmt.Errorf("error parsing signed certificate for CSR: %v", err) + } + now := time.Now() + for _, cert := range certs { + if now.After(cert.NotAfter) { + return fmt.Errorf("one of the certificates for the CSR has expired: %s", cert.NotAfter) + } + } + } + return nil +} + +// formatError preserves the type of an API message but alters the message. Expects +// a single argument format string, and returns the wrapped error. +func formatError(format string, err error) error { + if s, ok := err.(errors.APIStatus); ok { + se := &errors.StatusError{ErrStatus: s.Status()} + se.ErrStatus.Message = fmt.Sprintf(format, se.ErrStatus.Message) + return se + } + return fmt.Errorf(format, err) +} + +// ParseCSR extracts the CSR from the API object and decodes it. +func ParseCSR(obj *certificates.CertificateSigningRequest) (*x509.CertificateRequest, error) { + // extract PEM from request object + pemBytes := obj.Spec.Request + block, _ := pem.Decode(pemBytes) + if block == nil || block.Type != "CERTIFICATE REQUEST" { + return nil, fmt.Errorf("PEM block type must be CERTIFICATE REQUEST") + } + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return nil, err + } + return csr, nil +} diff --git a/vendor/k8s.io/client-go/util/exec/BUILD b/vendor/k8s.io/client-go/util/exec/BUILD index 396658dc5b4e..57b58e5cc5f3 100644 --- a/vendor/k8s.io/client-go/util/exec/BUILD +++ b/vendor/k8s.io/client-go/util/exec/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["exec.go"], + importpath = "k8s.io/client-go/util/exec", ) filegroup( diff --git a/vendor/k8s.io/client-go/util/flowcontrol/BUILD b/vendor/k8s.io/client-go/util/flowcontrol/BUILD index ff03657a420a..d74b3f55446d 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/BUILD +++ b/vendor/k8s.io/client-go/util/flowcontrol/BUILD @@ -12,6 +12,7 @@ go_test( "backoff_test.go", "throttle_test.go", ], + importpath = "k8s.io/client-go/util/flowcontrol", library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library"], ) @@ -22,6 +23,7 @@ go_library( "backoff.go", "throttle.go", ], + importpath = "k8s.io/client-go/util/flowcontrol", deps = [ "//vendor/github.com/juju/ratelimit:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", diff --git a/vendor/k8s.io/client-go/util/homedir/BUILD b/vendor/k8s.io/client-go/util/homedir/BUILD index 255d866b8b5a..21a1952cf197 100644 --- a/vendor/k8s.io/client-go/util/homedir/BUILD +++ b/vendor/k8s.io/client-go/util/homedir/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["homedir.go"], + importpath = "k8s.io/client-go/util/homedir", ) filegroup( diff --git a/vendor/k8s.io/client-go/util/integer/BUILD b/vendor/k8s.io/client-go/util/integer/BUILD index 25e7d7f94a8a..67f050e422d4 100644 --- a/vendor/k8s.io/client-go/util/integer/BUILD +++ b/vendor/k8s.io/client-go/util/integer/BUILD @@ -9,12 +9,14 @@ load( go_test( name = "go_default_test", srcs = ["integer_test.go"], + importpath = "k8s.io/client-go/util/integer", library = ":go_default_library", ) go_library( name = "go_default_library", srcs = ["integer.go"], + importpath = "k8s.io/client-go/util/integer", ) filegroup( diff --git a/vendor/k8s.io/client-go/util/jsonpath/BUILD b/vendor/k8s.io/client-go/util/jsonpath/BUILD index 8cd93e1c9bc2..0856e29caff0 100644 --- a/vendor/k8s.io/client-go/util/jsonpath/BUILD +++ b/vendor/k8s.io/client-go/util/jsonpath/BUILD @@ -12,6 +12,7 @@ go_test( "jsonpath_test.go", "parser_test.go", ], + importpath = "k8s.io/client-go/util/jsonpath", library = ":go_default_library", ) @@ -23,6 +24,7 @@ go_library( "node.go", "parser.go", ], + importpath = "k8s.io/client-go/util/jsonpath", deps = ["//vendor/k8s.io/client-go/third_party/forked/golang/template:go_default_library"], ) diff --git a/vendor/k8s.io/client-go/util/jsonpath/parser.go b/vendor/k8s.io/client-go/util/jsonpath/parser.go index 2f02de1a315e..ef0f9213a4fe 100644 --- a/vendor/k8s.io/client-go/util/jsonpath/parser.go +++ b/vendor/k8s.io/client-go/util/jsonpath/parser.go @@ -43,7 +43,11 @@ type Parser struct { width int } -var ErrSyntax = errors.New("invalid syntax") +var ( + ErrSyntax = errors.New("invalid syntax") + dictKeyRex = regexp.MustCompile(`^'([^']*)'$`) + sliceOperatorRex = regexp.MustCompile(`^(-?[\d]*)(:-?[\d]*)?(:[\d]*)?$`) +) // Parse parsed the given text and return a node Parser. // If an error is encountered, parsing stops and an empty @@ -283,8 +287,7 @@ Loop: } // dict key - reg := regexp.MustCompile(`^'([^']*)'$`) - value := reg.FindStringSubmatch(text) + value := dictKeyRex.FindStringSubmatch(text) if value != nil { parser, err := parseAction("arraydict", fmt.Sprintf(".%s", value[1])) if err != nil { @@ -297,8 +300,7 @@ Loop: } //slice operator - reg = regexp.MustCompile(`^(-?[\d]*)(:-?[\d]*)?(:[\d]*)?$`) - value = reg.FindStringSubmatch(text) + value = sliceOperatorRex.FindStringSubmatch(text) if value == nil { return fmt.Errorf("invalid array index %s", text) } diff --git a/vendor/k8s.io/client-go/util/retry/BUILD b/vendor/k8s.io/client-go/util/retry/BUILD index 9c9bb048763d..9f6f4b84886c 100644 --- a/vendor/k8s.io/client-go/util/retry/BUILD +++ b/vendor/k8s.io/client-go/util/retry/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["util.go"], + importpath = "k8s.io/client-go/util/retry", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", @@ -18,6 +19,7 @@ go_library( go_test( name = "go_default_test", srcs = ["util_test.go"], + importpath = "k8s.io/client-go/util/retry", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/vendor/k8s.io/client-go/util/workqueue/BUILD b/vendor/k8s.io/client-go/util/workqueue/BUILD index 58adbda042b0..2abd2f82d8bc 100644 --- a/vendor/k8s.io/client-go/util/workqueue/BUILD +++ b/vendor/k8s.io/client-go/util/workqueue/BUILD @@ -13,6 +13,7 @@ go_test( "delaying_queue_test.go", "rate_limitting_queue_test.go", ], + importpath = "k8s.io/client-go/util/workqueue", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", @@ -31,6 +32,7 @@ go_library( "queue.go", "rate_limitting_queue.go", ], + importpath = "k8s.io/client-go/util/workqueue", deps = [ "//vendor/github.com/juju/ratelimit:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", @@ -41,6 +43,7 @@ go_library( go_test( name = "go_default_xtest", srcs = ["queue_test.go"], + importpath = "k8s.io/client-go/util/workqueue_test", deps = ["//vendor/k8s.io/client-go/util/workqueue:go_default_library"], ) diff --git a/vendor/k8s.io/kube-aggregator/LICENSE b/vendor/k8s.io/kube-aggregator/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/BUILD b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/BUILD index fcbc32a1992a..6fa710dab970 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/BUILD +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/BUILD @@ -14,9 +14,9 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kube-aggregator/pkg/apis/apiregistration", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/doc.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/doc.go index b6339838e5ea..71354555f7ca 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/doc.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // Package api is the internal version of the API. // +groupName=apiregistration.k8s.io diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/install/BUILD b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/install/BUILD index 1fb42eb62730..f584073c1c4c 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/install/BUILD +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/install/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kube-aggregator/pkg/apis/apiregistration/install", deps = [ "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/register.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/register.go index 6a18624f5113..2ab61e9b4fe2 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/register.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = SchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &APIService{}, diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go index 1dd68fe6fa83..853d0152061b 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go @@ -55,7 +55,7 @@ type APIServiceSpec struct { // CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. CABundle []byte - // GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is prefered by clients over lower priority ones. + // GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. // Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. // The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). // The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/BUILD b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/BUILD index b3e620266ba3..362ed42bd686 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/BUILD +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/BUILD @@ -15,6 +15,7 @@ go_library( "zz_generated.conversion.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/doc.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/doc.go index 57c710a55c79..cc5d9e9e87a9 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/doc.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/kube-aggregator/pkg/apis/apiregistration // +k8s:openapi-gen=true diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto index 10b7b2799ba7..dabc9e0fec38 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto @@ -91,7 +91,7 @@ message APIServiceSpec { // CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. optional bytes caBundle = 5; - // GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is prefered by clients over lower priority ones. + // GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. // Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. // The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). // The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/register.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/register.go index a8823629f294..38babafa9ea5 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/register.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/register.go @@ -47,7 +47,7 @@ func init() { localSchemeBuilder.Register(addKnownTypes) } -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &APIService{}, diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go index 66238bc29fa1..2ac807e39c0a 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go @@ -55,7 +55,7 @@ type APIServiceSpec struct { // CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. CABundle []byte `json:"caBundle" protobuf:"bytes,5,opt,name=caBundle"` - // GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is prefered by clients over lower priority ones. + // GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. // Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. // The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). // The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go index 653220a9a4d8..dd64fbd2115b 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go @@ -21,48 +21,9 @@ limitations under the License. package v1beta1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIService).DeepCopyInto(out.(*APIService)) - return nil - }, InType: reflect.TypeOf(&APIService{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIServiceCondition).DeepCopyInto(out.(*APIServiceCondition)) - return nil - }, InType: reflect.TypeOf(&APIServiceCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIServiceList).DeepCopyInto(out.(*APIServiceList)) - return nil - }, InType: reflect.TypeOf(&APIServiceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIServiceSpec).DeepCopyInto(out.(*APIServiceSpec)) - return nil - }, InType: reflect.TypeOf(&APIServiceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIServiceStatus).DeepCopyInto(out.(*APIServiceStatus)) - return nil - }, InType: reflect.TypeOf(&APIServiceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceReference).DeepCopyInto(out.(*ServiceReference)) - return nil - }, InType: reflect.TypeOf(&ServiceReference{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *APIService) DeepCopyInto(out *APIService) { *out = *in diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/validation/BUILD b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/validation/BUILD index 040e64bea5f3..3d6243aea1f5 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/validation/BUILD +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/validation/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/kube-aggregator/pkg/apis/apiregistration/validation", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/validation/path:go_default_library", diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go index 30958a6d6722..ee9db9426ef9 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go @@ -21,48 +21,9 @@ limitations under the License. package apiregistration import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIService).DeepCopyInto(out.(*APIService)) - return nil - }, InType: reflect.TypeOf(&APIService{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIServiceCondition).DeepCopyInto(out.(*APIServiceCondition)) - return nil - }, InType: reflect.TypeOf(&APIServiceCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIServiceList).DeepCopyInto(out.(*APIServiceList)) - return nil - }, InType: reflect.TypeOf(&APIServiceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIServiceSpec).DeepCopyInto(out.(*APIServiceSpec)) - return nil - }, InType: reflect.TypeOf(&APIServiceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIServiceStatus).DeepCopyInto(out.(*APIServiceStatus)) - return nil - }, InType: reflect.TypeOf(&APIServiceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceReference).DeepCopyInto(out.(*ServiceReference)) - return nil - }, InType: reflect.TypeOf(&ServiceReference{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *APIService) DeepCopyInto(out *APIService) { *out = *in diff --git a/vendor/k8s.io/kube-aggregator/pkg/apiserver/BUILD b/vendor/k8s.io/kube-aggregator/pkg/apiserver/BUILD index 48ff6a7cea68..caddde3de3b4 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apiserver/BUILD +++ b/vendor/k8s.io/kube-aggregator/pkg/apiserver/BUILD @@ -12,6 +12,7 @@ go_test( "handler_apis_test.go", "handler_proxy_test.go", ], + importpath = "k8s.io/kube-aggregator/pkg/apiserver", library = ":go_default_library", deps = [ "//vendor/golang.org/x/net/websocket:go_default_library", @@ -37,6 +38,7 @@ go_library( "handler_proxy.go", "resolvers.go", ], + importpath = "k8s.io/kube-aggregator/pkg/apiserver", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go b/vendor/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go index 3e70b7a5e223..f9188609ea8d 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go +++ b/vendor/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go @@ -207,6 +207,8 @@ func (c completedConfig) NewWithDelegate(delegationTarget genericapiserver.Deleg c.GenericConfig.SharedInformerFactory.Core().V1().Services(), c.GenericConfig.SharedInformerFactory.Core().V1().Endpoints(), apiregistrationClient.Apiregistration(), + c.ExtraConfig.ProxyTransport, + s.serviceResolver, ) s.GenericAPIServer.AddPostStartHook("start-kube-aggregator-informers", func(context genericapiserver.PostStartHookContext) error { @@ -219,7 +221,8 @@ func (c completedConfig) NewWithDelegate(delegationTarget genericapiserver.Deleg return nil }) s.GenericAPIServer.AddPostStartHook("apiservice-status-available-controller", func(context genericapiserver.PostStartHookContext) error { - go availableController.Run(context.StopCh) + // if we end up blocking for long periods of time, we may need to increase threadiness. + go availableController.Run(5, context.StopCh) return nil }) diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/BUILD b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/BUILD index 1565118e39b5..39a91a2dd89a 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/BUILD +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/BUILD @@ -11,6 +11,7 @@ go_library( "clientset.go", "doc.go", ], + importpath = "k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/client-go/discovery:go_default_library", diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/doc.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/doc.go index 309c091bfd33..b667dd5157a5 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/doc.go +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated clientset. package internalclientset diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/BUILD b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/BUILD index 8b598eadc1d8..94a2da323d38 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/BUILD +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/BUILD @@ -11,6 +11,7 @@ go_library( "doc.go", "register.go", ], + importpath = "k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme", deps = [ "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/doc.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/doc.go index 5d8ec824f0fb..3ec2200d0990 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/doc.go +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package contains the scheme of the automatically generated clientset. package scheme diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/BUILD b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/BUILD index 1c7ac3f1a7ea..019d173114b4 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/BUILD +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/BUILD @@ -13,6 +13,7 @@ go_library( "doc.go", "generated_expansion.go", ], + importpath = "k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/doc.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/doc.go index 57e5b70cf564..3adf06d8934f 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/doc.go +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package internalversion diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/BUILD b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/BUILD index 77812a0b2684..6ac03efea32e 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/BUILD +++ b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/BUILD @@ -11,7 +11,9 @@ go_library( "factory.go", "generic.go", ], + importpath = "k8s.io/kube-aggregator/pkg/client/informers/internalversion", deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/BUILD b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/BUILD index 8701145496c1..e88dbbad5963 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/BUILD +++ b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration", deps = [ "//vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion:go_default_library", "//vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces:go_default_library", diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/interface.go b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/interface.go index daa61f6fbad1..9d5c0dda9ccf 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/interface.go +++ b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // InternalVersion returns a new internalversion.Interface. func (g *group) InternalVersion() internalversion.Interface { - return internalversion.New(g.SharedInformerFactory) + return internalversion.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/BUILD b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/BUILD index 055da5c8ba27..ec39ea186596 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/BUILD +++ b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/BUILD @@ -11,6 +11,7 @@ go_library( "apiservice.go", "interface.go", ], + importpath = "k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/apiservice.go b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/apiservice.go index a6afb6a0b1cc..f0e0cfdebcab 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/apiservice.go +++ b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/apiservice.go @@ -38,19 +38,33 @@ type APIServiceInformer interface { } type aPIServiceInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewAPIServiceInformer constructs a new informer for APIService type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewAPIServiceInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredAPIServiceInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredAPIServiceInformer constructs a new informer for APIService type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredAPIServiceInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Apiregistration().APIServices().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Apiregistration().APIServices().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewAPIServiceInformer(client internalclientset.Interface, resyncPeriod time ) } -func defaultAPIServiceInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewAPIServiceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *aPIServiceInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredAPIServiceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *aPIServiceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&apiregistration.APIService{}, defaultAPIServiceInformer) + return f.factory.InformerFor(&apiregistration.APIService{}, f.defaultInformer) } func (f *aPIServiceInformer) Lister() internalversion.APIServiceLister { diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/interface.go b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/interface.go index b5ee434578ee..a1d6f9da5eaa 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/interface.go +++ b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // APIServices returns a APIServiceInformer. func (v *version) APIServices() APIServiceInformer { - return &aPIServiceInformer{factory: v.SharedInformerFactory} + return &aPIServiceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/factory.go b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/factory.go index b59cb1f55f51..141c95f35f10 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/factory.go +++ b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/factory.go @@ -19,6 +19,7 @@ limitations under the License. package internalversion import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" @@ -31,9 +32,11 @@ import ( ) type sharedInformerFactory struct { - client internalclientset.Interface - lock sync.Mutex - defaultResync time.Duration + client internalclientset.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. @@ -43,8 +46,17 @@ type sharedInformerFactory struct { // NewSharedInformerFactory constructs a new instance of sharedInformerFactory func NewSharedInformerFactory(client internalclientset.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewFilteredSharedInformerFactory(client, defaultResync, v1.NamespaceAll, nil) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +func NewFilteredSharedInformerFactory(client internalclientset.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { return &sharedInformerFactory{ client: client, + namespace: namespace, + tweakListOptions: tweakListOptions, defaultResync: defaultResync, informers: make(map[reflect.Type]cache.SharedIndexInformer), startedInformers: make(map[reflect.Type]bool), @@ -114,5 +126,5 @@ type SharedInformerFactory interface { } func (f *sharedInformerFactory) Apiregistration() apiregistration.Interface { - return apiregistration.New(f) + return apiregistration.New(f, f.namespace, f.tweakListOptions) } diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/generic.go b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/generic.go index 914836185263..e82fcf4b84c1 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/generic.go +++ b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/generic.go @@ -51,7 +51,7 @@ func (f *genericInformer) Lister() cache.GenericLister { // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { - // Group=Apiregistration, Version=InternalVersion + // Group=apiregistration.k8s.io, Version=internalVersion case apiregistration.SchemeGroupVersion.WithResource("apiservices"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apiregistration().InternalVersion().APIServices().Informer()}, nil diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces/BUILD b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces/BUILD index 78dbde363ee0..93102c52732e 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces/BUILD +++ b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces/BUILD @@ -8,7 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["factory_interfaces.go"], + importpath = "k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces", deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go index b07867e1ce58..9cbdb8008e91 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go +++ b/vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces/factory_interfaces.go @@ -19,6 +19,7 @@ limitations under the License. package internalinterfaces import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" internalclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset" @@ -32,3 +33,5 @@ type SharedInformerFactory interface { Start(stopCh <-chan struct{}) InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer } + +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion/BUILD b/vendor/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion/BUILD index 152e0db9ef10..5e8cbddf431c 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion/BUILD +++ b/vendor/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion/BUILD @@ -11,9 +11,9 @@ go_library( "apiservice.go", "expansion_generated.go", ], + importpath = "k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library", diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion/apiservice.go b/vendor/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion/apiservice.go index 24f2409d9975..675d8228a96d 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion/apiservice.go +++ b/vendor/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion/apiservice.go @@ -20,7 +20,6 @@ package internalversion import ( "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" apiregistration "k8s.io/kube-aggregator/pkg/apis/apiregistration" @@ -55,8 +54,7 @@ func (s *aPIServiceLister) List(selector labels.Selector) (ret []*apiregistratio // Get retrieves the APIService from the index for a given name. func (s *aPIServiceLister) Get(name string) (*apiregistration.APIService, error) { - key := &apiregistration.APIService{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kube-aggregator/pkg/controllers/BUILD b/vendor/k8s.io/kube-aggregator/pkg/controllers/BUILD index ed9d7227bbd5..16ad761c1ca0 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/controllers/BUILD +++ b/vendor/k8s.io/kube-aggregator/pkg/controllers/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["cache.go"], + importpath = "k8s.io/kube-aggregator/pkg/controllers", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", diff --git a/vendor/k8s.io/kube-aggregator/pkg/controllers/autoregister/BUILD b/vendor/k8s.io/kube-aggregator/pkg/controllers/autoregister/BUILD index 0688ca9a459a..3d8a16741b24 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/controllers/autoregister/BUILD +++ b/vendor/k8s.io/kube-aggregator/pkg/controllers/autoregister/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["autoregister_controller_test.go"], + importpath = "k8s.io/kube-aggregator/pkg/controllers/autoregister", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -24,6 +25,7 @@ go_test( go_library( name = "go_default_library", srcs = ["autoregister_controller.go"], + importpath = "k8s.io/kube-aggregator/pkg/controllers/autoregister", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/vendor/k8s.io/kube-aggregator/pkg/controllers/openapi/BUILD b/vendor/k8s.io/kube-aggregator/pkg/controllers/openapi/BUILD index b45e675eaa4c..48dda1504975 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/controllers/openapi/BUILD +++ b/vendor/k8s.io/kube-aggregator/pkg/controllers/openapi/BUILD @@ -7,6 +7,7 @@ go_library( "controller.go", "downloader.go", ], + importpath = "k8s.io/kube-aggregator/pkg/controllers/openapi", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/emicklei/go-restful:go_default_library", @@ -29,6 +30,7 @@ go_library( go_test( name = "go_default_test", srcs = ["aggregator_test.go"], + importpath = "k8s.io/kube-aggregator/pkg/controllers/openapi", library = ":go_default_library", deps = [ "//vendor/github.com/go-openapi/spec:go_default_library", diff --git a/vendor/k8s.io/kube-aggregator/pkg/controllers/status/BUILD b/vendor/k8s.io/kube-aggregator/pkg/controllers/status/BUILD index 6624253ca11f..c34f9c180533 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/controllers/status/BUILD +++ b/vendor/k8s.io/kube-aggregator/pkg/controllers/status/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["available_controller.go"], + importpath = "k8s.io/kube-aggregator/pkg/controllers/status", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -34,6 +35,7 @@ go_library( go_test( name = "go_default_test", srcs = ["available_controller_test.go"], + importpath = "k8s.io/kube-aggregator/pkg/controllers/status", library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go b/vendor/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go index df6a2b0f4de0..a9bbe08a1fd6 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go +++ b/vendor/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go @@ -17,7 +17,10 @@ limitations under the License. package apiserver import ( + "crypto/tls" "fmt" + "net/http" + "net/url" "time" "github.com/golang/glog" @@ -42,6 +45,10 @@ import ( "k8s.io/kube-aggregator/pkg/controllers" ) +type ServiceResolver interface { + ResolveEndpoint(namespace, name string) (*url.URL, error) +} + type AvailableConditionController struct { apiServiceClient apiregistrationclient.APIServicesGetter @@ -55,6 +62,9 @@ type AvailableConditionController struct { endpointsLister v1listers.EndpointsLister endpointsSynced cache.InformerSynced + discoveryClient *http.Client + serviceResolver ServiceResolver + // To allow injection for testing. syncFn func(key string) error @@ -66,6 +76,8 @@ func NewAvailableConditionController( serviceInformer v1informers.ServiceInformer, endpointsInformer v1informers.EndpointsInformer, apiServiceClient apiregistrationclient.APIServicesGetter, + proxyTransport *http.Transport, + serviceResolver ServiceResolver, ) *AvailableConditionController { c := &AvailableConditionController{ apiServiceClient: apiServiceClient, @@ -75,14 +87,35 @@ func NewAvailableConditionController( servicesSynced: serviceInformer.Informer().HasSynced, endpointsLister: endpointsInformer.Lister(), endpointsSynced: endpointsInformer.Informer().HasSynced, + serviceResolver: serviceResolver, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "AvailableConditionController"), } - apiServiceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: c.addAPIService, - UpdateFunc: c.updateAPIService, - DeleteFunc: c.deleteAPIService, - }) + // construct an http client that will ignore TLS verification (if someone owns the network and messes with your status + // that's not so bad) and sets a very short timeout. + discoveryClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + // the request should happen quickly. + Timeout: 5 * time.Second, + } + if proxyTransport != nil { + discoveryClient.Transport = proxyTransport + } + c.discoveryClient = discoveryClient + + // resync on this one because it is low cardinality and rechecking the actual discovery + // allows us to detect health in a more timely fashion when network connectivity to + // nodes is snipped, but the network still attempts to route there. See + // https://github.com/openshift/origin/issues/17159#issuecomment-341798063 + apiServiceInformer.Informer().AddEventHandlerWithResyncPeriod( + cache.ResourceEventHandlerFuncs{ + AddFunc: c.addAPIService, + UpdateFunc: c.updateAPIService, + DeleteFunc: c.deleteAPIService, + }, + 30*time.Second) serviceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.addService, @@ -175,8 +208,45 @@ func (c *AvailableConditionController) sync(key string) error { return err } } + // actually try to hit the discovery endpoint when it isn't local and when we're routing as a service. + if apiService.Spec.Service != nil && c.serviceResolver != nil { + discoveryURL, err := c.serviceResolver.ResolveEndpoint(apiService.Spec.Service.Namespace, apiService.Spec.Service.Name) + if err != nil { + return err + } + + errCh := make(chan error) + go func() { + resp, err := c.discoveryClient.Get(discoveryURL.String()) + if resp != nil { + resp.Body.Close() + } + errCh <- err + }() - // TODO actually try to hit the discovery endpoint + select { + case err = <-errCh: + + // we had trouble with slow dial and DNS responses causing us to wait too long. + // we added this as insurance + case <-time.After(6 * time.Second): + err = fmt.Errorf("timed out waiting for %v", discoveryURL) + } + + if err != nil { + availableCondition.Status = apiregistration.ConditionFalse + availableCondition.Reason = "FailedDiscoveryCheck" + availableCondition.Message = fmt.Sprintf("no response from %v: %v", discoveryURL, err) + apiregistration.SetAPIServiceCondition(apiService, availableCondition) + _, updateErr := c.apiServiceClient.APIServices().UpdateStatus(apiService) + if updateErr != nil { + return updateErr + } + // force a requeue to make it very obvious that this will be retried at some point in the future + // along with other requeues done via service change, endpoint change, and resync + return err + } + } availableCondition.Reason = "Passed" availableCondition.Message = "all checks passed" @@ -185,7 +255,7 @@ func (c *AvailableConditionController) sync(key string) error { return err } -func (c *AvailableConditionController) Run(stopCh <-chan struct{}) { +func (c *AvailableConditionController) Run(threadiness int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() @@ -196,9 +266,9 @@ func (c *AvailableConditionController) Run(stopCh <-chan struct{}) { return } - // only start one worker thread since its a slow moving API and the aggregation server adding bits - // aren't threadsafe - go wait.Until(c.runWorker, time.Second, stopCh) + for i := 0; i < threadiness; i++ { + go wait.Until(c.runWorker, time.Second, stopCh) + } <-stopCh } diff --git a/vendor/k8s.io/kube-aggregator/pkg/registry/apiservice/BUILD b/vendor/k8s.io/kube-aggregator/pkg/registry/apiservice/BUILD index f77b75f5bcbb..ff17cc2e9b93 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/registry/apiservice/BUILD +++ b/vendor/k8s.io/kube-aggregator/pkg/registry/apiservice/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["strategy.go"], + importpath = "k8s.io/kube-aggregator/pkg/registry/apiservice", deps = [ "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", diff --git a/vendor/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd/BUILD b/vendor/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd/BUILD index 1a486312dcda..0fb9c4f5ea92 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd/BUILD +++ b/vendor/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["etcd.go"], + importpath = "k8s.io/kube-aggregator/pkg/registry/apiservice/etcd", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", diff --git a/vendor/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd/etcd.go b/vendor/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd/etcd.go index 82db5783f2ff..b69f1a39d560 100644 --- a/vendor/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd/etcd.go +++ b/vendor/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd/etcd.go @@ -35,7 +35,6 @@ type REST struct { func NewREST(scheme *runtime.Scheme, optsGetter generic.RESTOptionsGetter) *REST { strategy := apiservice.NewStrategy(scheme) store := &genericregistry.Store{ - Copier: scheme, NewFunc: func() runtime.Object { return &apiregistration.APIService{} }, NewListFunc: func() runtime.Object { return &apiregistration.APIServiceList{} }, PredicateFunc: apiservice.MatchAPIService, @@ -73,6 +72,6 @@ func (r *StatusREST) New() runtime.Object { } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go index 94b026edb441..fbe01cabb3b5 100644 --- a/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -89,6 +89,30 @@ type Config struct { DefaultSecurity []map[string][]string } +var schemaTypeFormatMap = map[string][]string{ + "uint": {"integer", "int32"}, + "uint8": {"integer", "byte"}, + "uint16": {"integer", "int32"}, + "uint32": {"integer", "int64"}, + "uint64": {"integer", "int64"}, + "int": {"integer", "int32"}, + "int8": {"integer", "byte"}, + "int16": {"integer", "int32"}, + "int32": {"integer", "int32"}, + "int64": {"integer", "int64"}, + "byte": {"integer", "byte"}, + "float64": {"number", "double"}, + "float32": {"number", "float"}, + "bool": {"boolean", ""}, + "time.Time": {"string", "date-time"}, + "string": {"string", ""}, + "integer": {"integer", ""}, + "number": {"number", ""}, + "boolean": {"boolean", ""}, + "[]byte": {"string", "byte"}, // base64 encoded characters + "interface{}": {"object", ""}, +} + // This function is a reference for converting go (or any custom type) to a simple open API type,format pair. There are // two ways to customize spec for a type. If you add it here, a type will be converted to a simple type and the type // comment (the comment that is added before type definition) will be lost. The spec will still have the property @@ -129,29 +153,6 @@ type Config struct { // } // func GetOpenAPITypeFormat(typeName string) (string, string) { - schemaTypeFormatMap := map[string][]string{ - "uint": {"integer", "int32"}, - "uint8": {"integer", "byte"}, - "uint16": {"integer", "int32"}, - "uint32": {"integer", "int64"}, - "uint64": {"integer", "int64"}, - "int": {"integer", "int32"}, - "int8": {"integer", "byte"}, - "int16": {"integer", "int32"}, - "int32": {"integer", "int32"}, - "int64": {"integer", "int64"}, - "byte": {"integer", "byte"}, - "float64": {"number", "double"}, - "float32": {"number", "float"}, - "bool": {"boolean", ""}, - "time.Time": {"string", "date-time"}, - "string": {"string", ""}, - "integer": {"integer", ""}, - "number": {"number", ""}, - "boolean": {"boolean", ""}, - "[]byte": {"string", "byte"}, // base64 encoded characters - "interface{}": {"object", ""}, - } mapped, ok := schemaTypeFormatMap[typeName] if !ok { return "", "" diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go new file mode 100644 index 000000000000..11ed8a6b7ccf --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package proto is a collection of libraries for parsing and indexing the type definitions. +// The openapi spec contains the object model definitions and extensions metadata. +package proto diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go new file mode 100644 index 000000000000..5f607c767018 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -0,0 +1,275 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto + +import ( + "fmt" + "sort" + "strings" + + openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2" + yaml "gopkg.in/yaml.v2" +) + +func newSchemaError(path *Path, format string, a ...interface{}) error { + err := fmt.Sprintf(format, a...) + if path.Len() == 0 { + return fmt.Errorf("SchemaError: %v", err) + } + return fmt.Errorf("SchemaError(%v): %v", path, err) +} + +// VendorExtensionToMap converts openapi VendorExtension to a map. +func VendorExtensionToMap(e []*openapi_v2.NamedAny) map[string]interface{} { + values := map[string]interface{}{} + + for _, na := range e { + if na.GetName() == "" || na.GetValue() == nil { + continue + } + if na.GetValue().GetYaml() == "" { + continue + } + var value interface{} + err := yaml.Unmarshal([]byte(na.GetValue().GetYaml()), &value) + if err != nil { + continue + } + + values[na.GetName()] = value + } + + return values +} + +// Definitions is an implementation of `Models`. It looks for +// models in an openapi Schema. +type Definitions struct { + models map[string]Schema +} + +var _ Models = &Definitions{} + +// NewOpenAPIData creates a new `Models` out of the openapi document. +func NewOpenAPIData(doc *openapi_v2.Document) (Models, error) { + definitions := Definitions{ + models: map[string]Schema{}, + } + + // Save the list of all models first. This will allow us to + // validate that we don't have any dangling reference. + for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { + definitions.models[namedSchema.GetName()] = nil + } + + // Now, parse each model. We can validate that references exists. + for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { + path := NewPath(namedSchema.GetName()) + schema, err := definitions.ParseSchema(namedSchema.GetValue(), &path) + if err != nil { + return nil, err + } + definitions.models[namedSchema.GetName()] = schema + } + + return &definitions, nil +} + +// We believe the schema is a reference, verify that and returns a new +// Schema +func (d *Definitions) parseReference(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetProperties().GetAdditionalProperties()) > 0 { + return nil, newSchemaError(path, "unallowed embedded type definition") + } + if len(s.GetType().GetValue()) > 0 { + return nil, newSchemaError(path, "definition reference can't have a type") + } + + if !strings.HasPrefix(s.GetXRef(), "#/definitions/") { + return nil, newSchemaError(path, "unallowed reference to non-definition %q", s.GetXRef()) + } + reference := strings.TrimPrefix(s.GetXRef(), "#/definitions/") + if _, ok := d.models[reference]; !ok { + return nil, newSchemaError(path, "unknown model in reference: %q", reference) + } + return &Ref{ + BaseSchema: d.parseBaseSchema(s, path), + reference: reference, + definitions: d, + }, nil +} + +func (d *Definitions) parseBaseSchema(s *openapi_v2.Schema, path *Path) BaseSchema { + return BaseSchema{ + Description: s.GetDescription(), + Extensions: VendorExtensionToMap(s.GetVendorExtension()), + Path: *path, + } +} + +// We believe the schema is a map, verify and return a new schema +func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { + return nil, newSchemaError(path, "invalid object type") + } + if s.GetAdditionalProperties().GetSchema() == nil { + return nil, newSchemaError(path, "invalid object doesn't have additional properties") + } + sub, err := d.ParseSchema(s.GetAdditionalProperties().GetSchema(), path) + if err != nil { + return nil, err + } + return &Map{ + BaseSchema: d.parseBaseSchema(s, path), + SubType: sub, + }, nil +} + +func (d *Definitions) parsePrimitive(s *openapi_v2.Schema, path *Path) (Schema, error) { + var t string + if len(s.GetType().GetValue()) > 1 { + return nil, newSchemaError(path, "primitive can't have more than 1 type") + } + if len(s.GetType().GetValue()) == 1 { + t = s.GetType().GetValue()[0] + } + switch t { + case String: + case Number: + case Integer: + case Boolean: + case "": // Some models are completely empty, and can be safely ignored. + // Do nothing + default: + return nil, newSchemaError(path, "Unknown primitive type: %q", t) + } + return &Primitive{ + BaseSchema: d.parseBaseSchema(s, path), + Type: t, + Format: s.GetFormat(), + }, nil +} + +func (d *Definitions) parseArray(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 1 { + return nil, newSchemaError(path, "array should have exactly one type") + } + if s.GetType().GetValue()[0] != array { + return nil, newSchemaError(path, `array should have type "array"`) + } + if len(s.GetItems().GetSchema()) != 1 { + return nil, newSchemaError(path, "array should have exactly one sub-item") + } + sub, err := d.ParseSchema(s.GetItems().GetSchema()[0], path) + if err != nil { + return nil, err + } + return &Array{ + BaseSchema: d.parseBaseSchema(s, path), + SubType: sub, + }, nil +} + +func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { + return nil, newSchemaError(path, "invalid object type") + } + if s.GetProperties() == nil { + return nil, newSchemaError(path, "object doesn't have properties") + } + + fields := map[string]Schema{} + + for _, namedSchema := range s.GetProperties().GetAdditionalProperties() { + var err error + path := path.FieldPath(namedSchema.GetName()) + fields[namedSchema.GetName()], err = d.ParseSchema(namedSchema.GetValue(), &path) + if err != nil { + return nil, err + } + } + + return &Kind{ + BaseSchema: d.parseBaseSchema(s, path), + RequiredFields: s.GetRequired(), + Fields: fields, + }, nil +} + +// ParseSchema creates a walkable Schema from an openapi schema. While +// this function is public, it doesn't leak through the interface. +func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) == 1 { + t := s.GetType().GetValue()[0] + switch t { + case object: + return d.parseMap(s, path) + case array: + return d.parseArray(s, path) + } + + } + if s.GetXRef() != "" { + return d.parseReference(s, path) + } + if s.GetProperties() != nil { + return d.parseKind(s, path) + } + return d.parsePrimitive(s, path) +} + +// LookupModel is public through the interface of Models. It +// returns a visitable schema from the given model name. +func (d *Definitions) LookupModel(model string) Schema { + return d.models[model] +} + +func (d *Definitions) ListModels() []string { + models := []string{} + + for model := range d.models { + models = append(models, model) + } + + sort.Strings(models) + return models +} + +type Ref struct { + BaseSchema + + reference string + definitions *Definitions +} + +var _ Reference = &Ref{} + +func (r *Ref) Reference() string { + return r.reference +} + +func (r *Ref) SubSchema() Schema { + return r.definitions.models[r.reference] +} + +func (r *Ref) Accept(v SchemaVisitor) { + v.VisitReference(r) +} + +func (r *Ref) GetName() string { + return fmt.Sprintf("Reference to %q", r.reference) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go new file mode 100644 index 000000000000..02ab06d6d535 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go @@ -0,0 +1,251 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto + +import ( + "fmt" + "sort" + "strings" +) + +// Defines openapi types. +const ( + Integer = "integer" + Number = "number" + String = "string" + Boolean = "boolean" + + // These types are private as they should never leak, and are + // represented by actual structs. + array = "array" + object = "object" +) + +// Models interface describe a model provider. They can give you the +// schema for a specific model. +type Models interface { + LookupModel(string) Schema + ListModels() []string +} + +// SchemaVisitor is an interface that you need to implement if you want +// to "visit" an openapi schema. A dispatch on the Schema type will call +// the appropriate function based on its actual type: +// - Array is a list of one and only one given subtype +// - Map is a map of string to one and only one given subtype +// - Primitive can be string, integer, number and boolean. +// - Kind is an object with specific fields mapping to specific types. +// - Reference is a link to another definition. +type SchemaVisitor interface { + VisitArray(*Array) + VisitMap(*Map) + VisitPrimitive(*Primitive) + VisitKind(*Kind) + VisitReference(Reference) +} + +// Schema is the base definition of an openapi type. +type Schema interface { + // Giving a visitor here will let you visit the actual type. + Accept(SchemaVisitor) + + // Pretty print the name of the type. + GetName() string + // Describes how to access this field. + GetPath() *Path + // Describes the field. + GetDescription() string + // Returns type extensions. + GetExtensions() map[string]interface{} +} + +// Path helps us keep track of type paths +type Path struct { + parent *Path + key string +} + +func NewPath(key string) Path { + return Path{key: key} +} + +func (p *Path) Get() []string { + if p == nil { + return []string{} + } + if p.key == "" { + return p.parent.Get() + } + return append(p.parent.Get(), p.key) +} + +func (p *Path) Len() int { + return len(p.Get()) +} + +func (p *Path) String() string { + return strings.Join(p.Get(), "") +} + +// ArrayPath appends an array index and creates a new path +func (p *Path) ArrayPath(i int) Path { + return Path{ + parent: p, + key: fmt.Sprintf("[%d]", i), + } +} + +// FieldPath appends a field name and creates a new path +func (p *Path) FieldPath(field string) Path { + return Path{ + parent: p, + key: fmt.Sprintf(".%s", field), + } +} + +// BaseSchema holds data used by each types of schema. +type BaseSchema struct { + Description string + Extensions map[string]interface{} + + Path Path +} + +func (b *BaseSchema) GetDescription() string { + return b.Description +} + +func (b *BaseSchema) GetExtensions() map[string]interface{} { + return b.Extensions +} + +func (b *BaseSchema) GetPath() *Path { + return &b.Path +} + +// Array must have all its element of the same `SubType`. +type Array struct { + BaseSchema + + SubType Schema +} + +var _ Schema = &Array{} + +func (a *Array) Accept(v SchemaVisitor) { + v.VisitArray(a) +} + +func (a *Array) GetName() string { + return fmt.Sprintf("Array of %s", a.SubType.GetName()) +} + +// Kind is a complex object. It can have multiple different +// subtypes for each field, as defined in the `Fields` field. Mandatory +// fields are listed in `RequiredFields`. The key of the object is +// always of type `string`. +type Kind struct { + BaseSchema + + // Lists names of required fields. + RequiredFields []string + // Maps field names to types. + Fields map[string]Schema +} + +var _ Schema = &Kind{} + +func (k *Kind) Accept(v SchemaVisitor) { + v.VisitKind(k) +} + +func (k *Kind) GetName() string { + properties := []string{} + for key := range k.Fields { + properties = append(properties, key) + } + return fmt.Sprintf("Kind(%v)", properties) +} + +// IsRequired returns true if `field` is a required field for this type. +func (k *Kind) IsRequired(field string) bool { + for _, f := range k.RequiredFields { + if f == field { + return true + } + } + return false +} + +// Keys returns a alphabetically sorted list of keys. +func (k *Kind) Keys() []string { + keys := make([]string, 0) + for key := range k.Fields { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +// Map is an object who values must all be of the same `SubType`. +// The key of the object is always of type `string`. +type Map struct { + BaseSchema + + SubType Schema +} + +var _ Schema = &Map{} + +func (m *Map) Accept(v SchemaVisitor) { + v.VisitMap(m) +} + +func (m *Map) GetName() string { + return fmt.Sprintf("Map of %s", m.SubType.GetName()) +} + +// Primitive is a literal. There can be multiple types of primitives, +// and this subtype can be visited through the `subType` field. +type Primitive struct { + BaseSchema + + // Type of a primitive must be one of: integer, number, string, boolean. + Type string + Format string +} + +var _ Schema = &Primitive{} + +func (p *Primitive) Accept(v SchemaVisitor) { + v.VisitPrimitive(p) +} + +func (p *Primitive) GetName() string { + if p.Format == "" { + return p.Type + } + return fmt.Sprintf("%s (%s)", p.Type, p.Format) +} + +// Reference implementation depends on the type of document. +type Reference interface { + Schema + + Reference() string + SubSchema() Schema +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/errors.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/errors.go new file mode 100644 index 000000000000..b1aa8c008af8 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/errors.go @@ -0,0 +1,79 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" +) + +type errors struct { + errors []error +} + +func (e *errors) Errors() []error { + return e.errors +} + +func (e *errors) AppendErrors(err ...error) { + e.errors = append(e.errors, err...) +} + +type ValidationError struct { + Path string + Err error +} + +func (e ValidationError) Error() string { + return fmt.Sprintf("ValidationError(%s): %v", e.Path, e.Err) +} + +type InvalidTypeError struct { + Path string + Expected string + Actual string +} + +func (e InvalidTypeError) Error() string { + return fmt.Sprintf("invalid type for %s: got %q, expected %q", e.Path, e.Actual, e.Expected) +} + +type MissingRequiredFieldError struct { + Path string + Field string +} + +func (e MissingRequiredFieldError) Error() string { + return fmt.Sprintf("missing required field %q in %s", e.Field, e.Path) +} + +type UnknownFieldError struct { + Path string + Field string +} + +func (e UnknownFieldError) Error() string { + return fmt.Sprintf("unknown field %q in %s", e.Field, e.Path) +} + +type InvalidObjectTypeError struct { + Path string + Type string +} + +func (e InvalidObjectTypeError) Error() string { + return fmt.Sprintf("unknown object type %q in %s", e.Type, e.Path) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go new file mode 100644 index 000000000000..0be7a5302f11 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go @@ -0,0 +1,289 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "reflect" + "sort" + + "k8s.io/kube-openapi/pkg/util/proto" +) + +type validationItem interface { + proto.SchemaVisitor + + Errors() []error + Path() *proto.Path +} + +type baseItem struct { + errors errors + path proto.Path +} + +// Errors returns the list of errors found for this item. +func (item *baseItem) Errors() []error { + return item.errors.Errors() +} + +// AddValidationError wraps the given error into a ValidationError and +// attaches it to this item. +func (item *baseItem) AddValidationError(err error) { + item.errors.AppendErrors(ValidationError{Path: item.path.String(), Err: err}) +} + +// AddError adds a regular (non-validation related) error to the list. +func (item *baseItem) AddError(err error) { + item.errors.AppendErrors(err) +} + +// CopyErrors adds a list of errors to this item. This is useful to copy +// errors from subitems. +func (item *baseItem) CopyErrors(errs []error) { + item.errors.AppendErrors(errs...) +} + +// Path returns the path of this item, helps print useful errors. +func (item *baseItem) Path() *proto.Path { + return &item.path +} + +// mapItem represents a map entry in the yaml. +type mapItem struct { + baseItem + + Map map[string]interface{} +} + +func (item *mapItem) sortedKeys() []string { + sortedKeys := []string{} + for key := range item.Map { + sortedKeys = append(sortedKeys, key) + } + sort.Strings(sortedKeys) + return sortedKeys +} + +var _ validationItem = &mapItem{} + +func (item *mapItem) VisitPrimitive(schema *proto.Primitive) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: schema.Type, Actual: "map"}) +} + +func (item *mapItem) VisitArray(schema *proto.Array) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: "map"}) +} + +func (item *mapItem) VisitMap(schema *proto.Map) { + for _, key := range item.sortedKeys() { + subItem, err := itemFactory(item.Path().FieldPath(key), item.Map[key]) + if err != nil { + item.AddError(err) + continue + } + schema.SubType.Accept(subItem) + item.CopyErrors(subItem.Errors()) + } +} + +func (item *mapItem) VisitKind(schema *proto.Kind) { + // Verify each sub-field. + for _, key := range item.sortedKeys() { + if item.Map[key] == nil { + continue + } + subItem, err := itemFactory(item.Path().FieldPath(key), item.Map[key]) + if err != nil { + item.AddError(err) + continue + } + if _, ok := schema.Fields[key]; !ok { + item.AddValidationError(UnknownFieldError{Path: schema.GetPath().String(), Field: key}) + continue + } + schema.Fields[key].Accept(subItem) + item.CopyErrors(subItem.Errors()) + } + + // Verify that all required fields are present. + for _, required := range schema.RequiredFields { + if v, ok := item.Map[required]; !ok || v == nil { + item.AddValidationError(MissingRequiredFieldError{Path: schema.GetPath().String(), Field: required}) + } + } +} + +func (item *mapItem) VisitReference(schema proto.Reference) { + // passthrough + schema.SubSchema().Accept(item) +} + +// arrayItem represents a yaml array. +type arrayItem struct { + baseItem + + Array []interface{} +} + +var _ validationItem = &arrayItem{} + +func (item *arrayItem) VisitPrimitive(schema *proto.Primitive) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: schema.Type, Actual: "array"}) +} + +func (item *arrayItem) VisitArray(schema *proto.Array) { + for i, v := range item.Array { + path := item.Path().ArrayPath(i) + if v == nil { + item.AddValidationError(InvalidObjectTypeError{Type: "nil", Path: path.String()}) + continue + } + subItem, err := itemFactory(path, v) + if err != nil { + item.AddError(err) + continue + } + schema.SubType.Accept(subItem) + item.CopyErrors(subItem.Errors()) + } +} + +func (item *arrayItem) VisitMap(schema *proto.Map) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: "map"}) +} + +func (item *arrayItem) VisitKind(schema *proto.Kind) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: "map"}) +} + +func (item *arrayItem) VisitReference(schema proto.Reference) { + // passthrough + schema.SubSchema().Accept(item) +} + +// primitiveItem represents a yaml value. +type primitiveItem struct { + baseItem + + Value interface{} + Kind string +} + +var _ validationItem = &primitiveItem{} + +func (item *primitiveItem) VisitPrimitive(schema *proto.Primitive) { + // Some types of primitives can match more than one (a number + // can be a string, but not the other way around). Return from + // the switch if we have a valid possible type conversion + // NOTE(apelisse): This logic is blindly copied from the + // existing swagger logic, and I'm not sure I agree with it. + switch schema.Type { + case proto.Boolean: + switch item.Kind { + case proto.Boolean: + return + } + case proto.Integer: + switch item.Kind { + case proto.Integer, proto.Number: + return + } + case proto.Number: + switch item.Kind { + case proto.Number: + return + } + case proto.String: + return + } + + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: schema.Type, Actual: item.Kind}) +} + +func (item *primitiveItem) VisitArray(schema *proto.Array) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: item.Kind}) +} + +func (item *primitiveItem) VisitMap(schema *proto.Map) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: item.Kind}) +} + +func (item *primitiveItem) VisitKind(schema *proto.Kind) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: item.Kind}) +} + +func (item *primitiveItem) VisitReference(schema proto.Reference) { + // passthrough + schema.SubSchema().Accept(item) +} + +// itemFactory creates the relevant item type/visitor based on the current yaml type. +func itemFactory(path proto.Path, v interface{}) (validationItem, error) { + // We need to special case for no-type fields in yaml (e.g. empty item in list) + if v == nil { + return nil, InvalidObjectTypeError{Type: "nil", Path: path.String()} + } + kind := reflect.TypeOf(v).Kind() + switch kind { + case reflect.Bool: + return &primitiveItem{ + baseItem: baseItem{path: path}, + Value: v, + Kind: proto.Boolean, + }, nil + case reflect.Int, + reflect.Int8, + reflect.Int16, + reflect.Int32, + reflect.Int64, + reflect.Uint, + reflect.Uint8, + reflect.Uint16, + reflect.Uint32, + reflect.Uint64: + return &primitiveItem{ + baseItem: baseItem{path: path}, + Value: v, + Kind: proto.Integer, + }, nil + case reflect.Float32, + reflect.Float64: + return &primitiveItem{ + baseItem: baseItem{path: path}, + Value: v, + Kind: proto.Number, + }, nil + case reflect.String: + return &primitiveItem{ + baseItem: baseItem{path: path}, + Value: v, + Kind: proto.String, + }, nil + case reflect.Array, + reflect.Slice: + return &arrayItem{ + baseItem: baseItem{path: path}, + Array: v.([]interface{}), + }, nil + case reflect.Map: + return &mapItem{ + baseItem: baseItem{path: path}, + Map: v.(map[string]interface{}), + }, nil + } + return nil, InvalidObjectTypeError{Type: kind.String(), Path: path.String()} +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/validation.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/validation.go new file mode 100644 index 000000000000..35310f637a1a --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/validation.go @@ -0,0 +1,30 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "k8s.io/kube-openapi/pkg/util/proto" +) + +func ValidateModel(obj interface{}, schema proto.Schema, name string) []error { + rootValidation, err := itemFactory(proto.NewPath(name), obj) + if err != nil { + return []error{err} + } + schema.Accept(rootValidation) + return rootValidation.Errors() +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/util.go b/vendor/k8s.io/kube-openapi/pkg/util/util.go new file mode 100644 index 000000000000..bcc0c4d4bb57 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/util.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import "strings" + +// ToCanonicalName converts Golang package/type name into canonical OpenAPI name. +// Examples: +// Input: k8s.io/api/core/v1.Pod +// Output: io.k8s.api.core.v1.Pod +// +// Input: k8s.io/api/core/v1 +// Output: io.k8s.api.core.v1 +func ToCanonicalName(name string) string { + nameParts := strings.Split(name, "/") + // Reverse first part. e.g., io.k8s... instead of k8s.io... + if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") { + parts := strings.Split(nameParts[0], ".") + for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { + parts[i], parts[j] = parts[j], parts[i] + } + nameParts[0] = strings.Join(parts, ".") + } + return strings.Join(nameParts, ".") +} diff --git a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/BUILD b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/BUILD index d816bb0a7580..6f2497a161a2 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/BUILD +++ b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/BUILD @@ -12,13 +12,18 @@ go_library( "apiextensions.go", "server.go", ], + importpath = "k8s.io/kubernetes/cmd/kube-apiserver/app", deps = [ "//cmd/kube-apiserver/app/options:go_default_library", - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/admissionregistration:go_default_library", "//pkg/apis/apps:go_default_library", "//pkg/apis/batch:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/events:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/apis/networking:go_default_library", + "//pkg/apis/storage:go_default_library", "//pkg/capabilities:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", @@ -33,6 +38,7 @@ go_library( "//pkg/kubeapiserver/server:go_default_library", "//pkg/master:go_default_library", "//pkg/master/controller/crdregistration:go_default_library", + "//pkg/master/reconcilers:go_default_library", "//pkg/master/tunneler:go_default_library", "//pkg/quota/install:go_default_library", "//pkg/registry/cachesize:go_default_library", @@ -56,6 +62,8 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//vendor/k8s.io/apiserver/pkg/server:go_default_library", @@ -67,6 +75,7 @@ go_library( "//vendor/k8s.io/apiserver/pkg/storage/etcd3/preflight:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library", "//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go index 28bb181b71e0..6d6e469434bb 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go @@ -59,7 +59,6 @@ func createAggregatorConfig(kubeAPIServerConfig genericapiserver.Config, command // copy the etcd options so we don't mutate originals. etcdOptions := *commandOptions.Etcd etcdOptions.StorageConfig.Codec = aggregatorapiserver.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion) - etcdOptions.StorageConfig.Copier = aggregatorapiserver.Scheme genericConfig.RESTOptionsGetter = &genericoptions.SimpleRestOptionsFactory{Options: etcdOptions} var err error @@ -204,7 +203,9 @@ var apiVersionPriorities = map[schema.GroupVersion]priority{ {Group: "extensions", Version: "v1beta1"}: {group: 17900, version: 1}, // to my knowledge, nothing below here collides {Group: "apps", Version: "v1beta1"}: {group: 17800, version: 1}, - {Group: "apps", Version: "v1beta2"}: {group: 17800, version: 1}, + {Group: "apps", Version: "v1beta2"}: {group: 17800, version: 9}, + {Group: "apps", Version: "v1"}: {group: 17800, version: 15}, + {Group: "events.k8s.io", Version: "v1beta1"}: {group: 17750, version: 5}, {Group: "authentication.k8s.io", Version: "v1"}: {group: 17700, version: 15}, {Group: "authentication.k8s.io", Version: "v1beta1"}: {group: 17700, version: 9}, {Group: "authorization.k8s.io", Version: "v1"}: {group: 17600, version: 15}, @@ -223,8 +224,11 @@ var apiVersionPriorities = map[schema.GroupVersion]priority{ {Group: "settings.k8s.io", Version: "v1alpha1"}: {group: 16900, version: 9}, {Group: "storage.k8s.io", Version: "v1"}: {group: 16800, version: 15}, {Group: "storage.k8s.io", Version: "v1beta1"}: {group: 16800, version: 9}, + {Group: "storage.k8s.io", Version: "v1alpha1"}: {group: 16800, version: 1}, {Group: "apiextensions.k8s.io", Version: "v1beta1"}: {group: 16700, version: 9}, + {Group: "admissionregistration.k8s.io", Version: "v1beta1"}: {group: 16700, version: 12}, {Group: "admissionregistration.k8s.io", Version: "v1alpha1"}: {group: 16700, version: 9}, + {Group: "scheduling.k8s.io", Version: "v1alpha1"}: {group: 16600, version: 9}, } func apiServicesToRegister(delegateAPIServer genericapiserver.DelegationTarget, registration autoregister.AutoAPIServiceRegistration) []*apiregistration.APIService { diff --git a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/apiextensions.go b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/apiextensions.go index aa9c19fe3db4..1ef2916e7027 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/apiextensions.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/apiextensions.go @@ -37,7 +37,6 @@ func createAPIExtensionsConfig(kubeAPIServerConfig genericapiserver.Config, exte // copy the etcd options so we don't mutate originals. etcdOptions := *commandOptions.Etcd etcdOptions.StorageConfig.Codec = apiextensionsapiserver.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion) - etcdOptions.StorageConfig.Copier = apiextensionsapiserver.Scheme genericConfig.RESTOptionsGetter = &genericoptions.SimpleRestOptionsFactory{Options: etcdOptions} apiextensionsConfig := &apiextensionsapiserver.Config{ diff --git a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/BUILD b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/BUILD index f65e5b213d46..96087a582ebc 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/BUILD +++ b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/BUILD @@ -13,14 +13,16 @@ go_library( "plugins.go", "validation.go", ], + importpath = "k8s.io/kubernetes/cmd/kube-apiserver/app/options", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//pkg/cloudprovider/providers:go_default_library", "//pkg/features:go_default_library", "//pkg/kubeapiserver/options:go_default_library", "//pkg/kubelet/client:go_default_library", "//pkg/master/ports:go_default_library", + "//pkg/master/reconcilers:go_default_library", "//plugin/pkg/admission/admit:go_default_library", "//plugin/pkg/admission/alwayspullimages:go_default_library", "//plugin/pkg/admission/antiaffinity:go_default_library", @@ -28,9 +30,9 @@ go_library( "//plugin/pkg/admission/deny:go_default_library", "//plugin/pkg/admission/eventratelimit:go_default_library", "//plugin/pkg/admission/exec:go_default_library", + "//plugin/pkg/admission/extendedresourcetoleration:go_default_library", "//plugin/pkg/admission/gc:go_default_library", "//plugin/pkg/admission/imagepolicy:go_default_library", - "//plugin/pkg/admission/initialization:go_default_library", "//plugin/pkg/admission/initialresources:go_default_library", "//plugin/pkg/admission/limitranger:go_default_library", "//plugin/pkg/admission/namespace/autoprovision:go_default_library", @@ -38,6 +40,7 @@ go_library( "//plugin/pkg/admission/noderestriction:go_default_library", "//plugin/pkg/admission/persistentvolume/label:go_default_library", "//plugin/pkg/admission/persistentvolume/resize:go_default_library", + "//plugin/pkg/admission/persistentvolumeclaim/pvcprotection:go_default_library", "//plugin/pkg/admission/podnodeselector:go_default_library", "//plugin/pkg/admission/podpreset:go_default_library", "//plugin/pkg/admission/podtolerationrestriction:go_default_library", @@ -47,7 +50,6 @@ go_library( "//plugin/pkg/admission/securitycontext/scdeny:go_default_library", "//plugin/pkg/admission/serviceaccount:go_default_library", "//plugin/pkg/admission/storageclass/setdefault:go_default_library", - "//plugin/pkg/admission/webhook:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", @@ -59,16 +61,20 @@ go_library( go_test( name = "go_default_test", srcs = ["options_test.go"], + importpath = "k8s.io/kubernetes/cmd/kube-apiserver/app/options", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/kubeapiserver/options:go_default_library", "//pkg/kubelet/client:go_default_library", + "//pkg/master/reconcilers:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/options:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", + "//vendor/k8s.io/apiserver/plugin/pkg/audit/webhook:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/options.go b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/options.go index 31cc05294340..8b95fb66678e 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/options.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/options.go @@ -19,16 +19,18 @@ package options import ( "net" + "strings" "time" utilnet "k8s.io/apimachinery/pkg/util/net" genericoptions "k8s.io/apiserver/pkg/server/options" "k8s.io/apiserver/pkg/storage/storagebackend" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/validation" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/validation" kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options" kubeletclient "k8s.io/kubernetes/pkg/kubelet/client" "k8s.io/kubernetes/pkg/master/ports" + "k8s.io/kubernetes/pkg/master/reconcilers" // add the kubernetes feature gates _ "k8s.io/kubernetes/pkg/features" @@ -36,9 +38,6 @@ import ( "github.com/spf13/pflag" ) -// DefaultServiceNodePortRange is the default port range for NodePort services. -var DefaultServiceNodePortRange = utilnet.PortRange{Base: 30000, Size: 2768} - // ServerRunOptions runs a kubernetes api server. type ServerRunOptions struct { GenericServerRunOptions *genericoptions.ServerRunOptions @@ -59,7 +58,6 @@ type ServerRunOptions struct { EventTTL time.Duration KubeletConfig kubeletclient.KubeletClientConfig KubernetesServiceNodePort int - MasterCount int MaxConnectionBytesPerSec int64 ServiceClusterIPRange net.IPNet // TODO: make this a list ServiceNodePortRange utilnet.PortRange @@ -70,13 +68,16 @@ type ServerRunOptions struct { ProxyClientKeyFile string EnableAggregatorRouting bool + + MasterCount int + EndpointReconcilerType string } // NewServerRunOptions creates a new ServerRunOptions object with default parameters func NewServerRunOptions() *ServerRunOptions { s := ServerRunOptions{ GenericServerRunOptions: genericoptions.NewServerRunOptions(), - Etcd: genericoptions.NewEtcdOptions(storagebackend.NewDefaultConfig(kubeoptions.DefaultEtcdPathPrefix, api.Scheme, nil)), + Etcd: genericoptions.NewEtcdOptions(storagebackend.NewDefaultConfig(kubeoptions.DefaultEtcdPathPrefix, nil)), SecureServing: kubeoptions.NewSecureServingOptions(), InsecureServing: kubeoptions.NewInsecureServingOptions(), Audit: genericoptions.NewAuditOptions(), @@ -88,9 +89,10 @@ func NewServerRunOptions() *ServerRunOptions { StorageSerialization: kubeoptions.NewStorageSerializationOptions(), APIEnablement: kubeoptions.NewAPIEnablementOptions(), - EnableLogsHandler: true, - EventTTL: 1 * time.Hour, - MasterCount: 1, + EnableLogsHandler: true, + EventTTL: 1 * time.Hour, + MasterCount: 1, + EndpointReconcilerType: string(reconcilers.MasterCountReconcilerType), KubeletConfig: kubeletclient.KubeletClientConfig{ Port: ports.KubeletPort, ReadOnlyPort: ports.KubeletReadOnlyPort, @@ -109,7 +111,7 @@ func NewServerRunOptions() *ServerRunOptions { EnableHttps: true, HTTPTimeout: time.Duration(5) * time.Second, }, - ServiceNodePortRange: DefaultServiceNodePortRange, + ServiceNodePortRange: kubeoptions.DefaultServiceNodePortRange, } // Overwrite the default for storage data format. s.Etcd.DefaultStorageMediaType = "application/vnd.kubernetes.protobuf" @@ -151,11 +153,15 @@ func (s *ServerRunOptions) AddFlags(fs *pflag.FlagSet) { fs.BoolVar(&s.EnableLogsHandler, "enable-logs-handler", s.EnableLogsHandler, "If true, install a /logs handler for the apiserver logs.") + // Deprecated in release 1.9 fs.StringVar(&s.SSHUser, "ssh-user", s.SSHUser, "If non-empty, use secure SSH proxy to the nodes, using this user name") + fs.MarkDeprecated("ssh-user", "This flag will be removed in a future version.") + // Deprecated in release 1.9 fs.StringVar(&s.SSHKeyfile, "ssh-keyfile", s.SSHKeyfile, "If non-empty, use secure SSH proxy to the nodes, using this user keyfile") + fs.MarkDeprecated("ssh-keyfile", "This flag will be removed in a future version.") fs.Int64Var(&s.MaxConnectionBytesPerSec, "max-connection-bytes-per-sec", s.MaxConnectionBytesPerSec, ""+ "If non-zero, throttle each user connection to this number of bytes/sec. "+ @@ -164,6 +170,9 @@ func (s *ServerRunOptions) AddFlags(fs *pflag.FlagSet) { fs.IntVar(&s.MasterCount, "apiserver-count", s.MasterCount, "The number of apiservers running in the cluster, must be a positive number.") + fs.StringVar(&s.EndpointReconcilerType, "endpoint-reconciler-type", string(s.EndpointReconcilerType), + "Use an endpoint reconciler ("+strings.Join(reconcilers.AllTypes.Names(), ", ")+")") + // See #14282 for details on how to test/try this option out. // TODO: remove this comment once this option is tested in CI. fs.IntVar(&s.KubernetesServiceNodePort, "kubernetes-service-node-port", s.KubernetesServiceNodePort, ""+ @@ -175,15 +184,9 @@ func (s *ServerRunOptions) AddFlags(fs *pflag.FlagSet) { "A CIDR notation IP range from which to assign service cluster IPs. This must not "+ "overlap with any IP ranges assigned to nodes for pods.") - fs.IPNetVar(&s.ServiceClusterIPRange, "portal-net", s.ServiceClusterIPRange, - "DEPRECATED: see --service-cluster-ip-range instead.") - fs.MarkDeprecated("portal-net", "see --service-cluster-ip-range instead") - fs.Var(&s.ServiceNodePortRange, "service-node-port-range", ""+ "A port range to reserve for services with NodePort visibility. "+ "Example: '30000-32767'. Inclusive at both ends of the range.") - fs.Var(&s.ServiceNodePortRange, "service-node-ports", "DEPRECATED: see --service-node-port-range instead") - fs.MarkDeprecated("service-node-ports", "see --service-node-port-range instead") // Kubelet related flags: fs.BoolVar(&s.KubeletConfig.EnableHttps, "kubelet-https", s.KubeletConfig.EnableHttps, diff --git a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/plugins.go b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/plugins.go index 0e86e7cfdb56..a0d2502e7f58 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/plugins.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/plugins.go @@ -32,9 +32,9 @@ import ( "k8s.io/kubernetes/plugin/pkg/admission/deny" "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit" "k8s.io/kubernetes/plugin/pkg/admission/exec" + "k8s.io/kubernetes/plugin/pkg/admission/extendedresourcetoleration" "k8s.io/kubernetes/plugin/pkg/admission/gc" "k8s.io/kubernetes/plugin/pkg/admission/imagepolicy" - "k8s.io/kubernetes/plugin/pkg/admission/initialization" "k8s.io/kubernetes/plugin/pkg/admission/initialresources" "k8s.io/kubernetes/plugin/pkg/admission/limitranger" "k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision" @@ -42,6 +42,7 @@ import ( "k8s.io/kubernetes/plugin/pkg/admission/noderestriction" "k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label" "k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/resize" + "k8s.io/kubernetes/plugin/pkg/admission/persistentvolumeclaim/pvcprotection" "k8s.io/kubernetes/plugin/pkg/admission/podnodeselector" "k8s.io/kubernetes/plugin/pkg/admission/podpreset" "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction" @@ -51,7 +52,6 @@ import ( "k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny" "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount" "k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault" - "k8s.io/kubernetes/plugin/pkg/admission/webhook" ) // RegisterAllAdmissionPlugins registers all admission plugins @@ -63,9 +63,9 @@ func RegisterAllAdmissionPlugins(plugins *admission.Plugins) { deny.Register(plugins) eventratelimit.Register(plugins) exec.Register(plugins) + extendedresourcetoleration.Register(plugins) gc.Register(plugins) imagepolicy.Register(plugins) - initialization.Register(plugins) initialresources.Register(plugins) limitranger.Register(plugins) autoprovision.Register(plugins) @@ -81,6 +81,6 @@ func RegisterAllAdmissionPlugins(plugins *admission.Plugins) { scdeny.Register(plugins) serviceaccount.Register(plugins) setdefault.Register(plugins) - webhook.Register(plugins) resize.Register(plugins) + pvcprotection.Register(plugins) } diff --git a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go index 143f8cdae5ec..f578ee677b43 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go @@ -44,6 +44,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" utilwait "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/admission" + webhookconfig "k8s.io/apiserver/pkg/admission/plugin/webhook/config" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authorization/authorizer" genericapiserver "k8s.io/apiserver/pkg/server" @@ -54,16 +55,21 @@ import ( aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" openapi "k8s.io/kube-openapi/pkg/common" + webhookinit "k8s.io/apiserver/pkg/admission/plugin/webhook/initializer" "k8s.io/apiserver/pkg/storage/etcd3/preflight" clientgoinformers "k8s.io/client-go/informers" clientgoclientset "k8s.io/client-go/kubernetes" - clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/apis/admissionregistration" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/batch" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/events" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/networking" + "k8s.io/kubernetes/pkg/apis/storage" "k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" @@ -77,6 +83,7 @@ import ( kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options" kubeserver "k8s.io/kubernetes/pkg/kubeapiserver/server" "k8s.io/kubernetes/pkg/master" + "k8s.io/kubernetes/pkg/master/reconcilers" "k8s.io/kubernetes/pkg/master/tunneler" quotainstall "k8s.io/kubernetes/pkg/quota/install" "k8s.io/kubernetes/pkg/registry/cachesize" @@ -153,7 +160,7 @@ func CreateServerChain(runOptions *options.ServerRunOptions, stopCh <-chan struc if len(os.Getenv("KUBE_API_VERSIONS")) > 0 { if insecureServingOptions != nil { insecureHandlerChain := kubeserver.BuildInsecureHandlerChain(kubeAPIServer.GenericAPIServer.UnprotectedHandler(), kubeAPIServerConfig.GenericConfig) - if err := kubeserver.NonBlockingRun(insecureServingOptions, insecureHandlerChain, stopCh); err != nil { + if err := kubeserver.NonBlockingRun(insecureServingOptions, insecureHandlerChain, kubeAPIServerConfig.GenericConfig.RequestTimeout, stopCh); err != nil { return nil, err } } @@ -183,7 +190,7 @@ func CreateServerChain(runOptions *options.ServerRunOptions, stopCh <-chan struc if insecureServingOptions != nil { insecureHandlerChain := kubeserver.BuildInsecureHandlerChain(aggregatorServer.GenericAPIServer.UnprotectedHandler(), kubeAPIServerConfig.GenericConfig) - if err := kubeserver.NonBlockingRun(insecureServingOptions, insecureHandlerChain, stopCh); err != nil { + if err := kubeserver.NonBlockingRun(insecureServingOptions, insecureHandlerChain, kubeAPIServerConfig.GenericConfig.RequestTimeout, stopCh); err != nil { return nil, err } } @@ -262,13 +269,6 @@ func CreateKubeAPIServerConfig(s *options.ServerRunOptions, nodeTunneler tunnele return nil, nil, nil, nil, nil, utilerrors.NewAggregate(errs) } - if s.CloudProvider != nil { - // Initialize the cloudprovider once, to give it a chance to register KMS plugins, if any. - _, err := cloudprovider.InitCloudProvider(s.CloudProvider.CloudProvider, s.CloudProvider.CloudConfigFile) - if err != nil { - return nil, nil, nil, nil, nil, err - } - } genericConfig, sharedInformers, versionedInformers, insecureServingOptions, serviceResolver, err := BuildGenericConfig(s, proxyTransport) if err != nil { return nil, nil, nil, nil, nil, err @@ -340,7 +340,8 @@ func CreateKubeAPIServerConfig(s *options.ServerRunOptions, nodeTunneler tunnele ServiceNodePortRange: s.ServiceNodePortRange, KubernetesServiceNodePort: s.KubernetesServiceNodePort, - MasterCount: s.MasterCount, + EndpointReconcilerType: reconcilers.Type(s.EndpointReconcilerType), + MasterCount: s.MasterCount, }, } @@ -354,10 +355,11 @@ func CreateKubeAPIServerConfig(s *options.ServerRunOptions, nodeTunneler tunnele // BuildGenericConfig takes the master server options and produces the genericapiserver.Config associated with it func BuildGenericConfig(s *options.ServerRunOptions, proxyTransport *http.Transport) (*genericapiserver.Config, informers.SharedInformerFactory, clientgoinformers.SharedInformerFactory, *kubeserver.InsecureServingInfo, aggregatorapiserver.ServiceResolver, error) { - genericConfig := genericapiserver.NewConfig(api.Codecs) + genericConfig := genericapiserver.NewConfig(legacyscheme.Codecs) if err := s.GenericServerRunOptions.ApplyTo(genericConfig); err != nil { return nil, nil, nil, nil, nil, err } + insecureServingOptions, err := s.InsecureServing.ApplyTo(genericConfig) if err != nil { return nil, nil, nil, nil, nil, err @@ -375,7 +377,7 @@ func BuildGenericConfig(s *options.ServerRunOptions, proxyTransport *http.Transp return nil, nil, nil, nil, nil, err } - genericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(generatedopenapi.GetOpenAPIDefinitions, api.Scheme) + genericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(generatedopenapi.GetOpenAPIDefinitions, legacyscheme.Scheme) genericConfig.OpenAPIConfig.PostProcessSpec = postProcessOpenAPISpecForBackwardCompatibility genericConfig.OpenAPIConfig.Info.Title = "Kubernetes" genericConfig.SwaggerConfig = genericapiserver.DefaultSwaggerConfig() @@ -415,13 +417,10 @@ func BuildGenericConfig(s *options.ServerRunOptions, proxyTransport *http.Transp // TODO: get rid of KUBE_API_VERSIONS or define sane behaviour if set glog.Errorf("Failed to create clientset with KUBE_API_VERSIONS=%q. KUBE_API_VERSIONS is only for testing. Things will break.", kubeAPIVersions) } - externalClient, err := clientset.NewForConfig(genericConfig.LoopbackClientConfig) - if err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("failed to create external clientset: %v", err) - } - sharedInformers := informers.NewSharedInformerFactory(client, 10*time.Minute) - clientgoExternalClient, err := clientgoclientset.NewForConfig(genericConfig.LoopbackClientConfig) + kubeClientConfig := genericConfig.LoopbackClientConfig + sharedInformers := informers.NewSharedInformerFactory(client, 10*time.Minute) + clientgoExternalClient, err := clientgoclientset.NewForConfig(kubeClientConfig) if err != nil { return nil, nil, nil, nil, nil, fmt.Errorf("failed to create real external clientset: %v", err) } @@ -452,14 +451,27 @@ func BuildGenericConfig(s *options.ServerRunOptions, proxyTransport *http.Transp genericConfig.DisabledPostStartHooks.Insert(rbacrest.PostStartHookName) } - pluginInitializer, err := BuildAdmissionPluginInitializer( + webhookAuthResolver := func(delegate webhookconfig.AuthenticationInfoResolver) webhookconfig.AuthenticationInfoResolver { + return webhookconfig.AuthenticationInfoResolverFunc(func(server string) (*rest.Config, error) { + if server == "kubernetes.default.svc" { + return genericConfig.LoopbackClientConfig, nil + } + ret, err := delegate.ClientConfigFor(server) + if err != nil { + return nil, err + } + if proxyTransport != nil && proxyTransport.Dial != nil { + ret.Dial = proxyTransport.Dial + } + return ret, err + }) + } + pluginInitializers, err := BuildAdmissionPluginInitializers( s, client, - externalClient, sharedInformers, - genericConfig.Authorizer, serviceResolver, - proxyTransport, + webhookAuthResolver, ) if err != nil { return nil, nil, nil, nil, nil, fmt.Errorf("failed to create admission plugin initializer: %v", err) @@ -468,15 +480,18 @@ func BuildGenericConfig(s *options.ServerRunOptions, proxyTransport *http.Transp err = s.Admission.ApplyTo( genericConfig, versionedInformers, - pluginInitializer) + kubeClientConfig, + legacyscheme.Scheme, + pluginInitializers...) if err != nil { return nil, nil, nil, nil, nil, fmt.Errorf("failed to initialize admission: %v", err) } + return genericConfig, sharedInformers, versionedInformers, insecureServingOptions, serviceResolver, nil } -// BuildAdmissionPluginInitializer constructs the admission plugin initializer -func BuildAdmissionPluginInitializer(s *options.ServerRunOptions, client internalclientset.Interface, externalClient clientset.Interface, sharedInformers informers.SharedInformerFactory, apiAuthorizer authorizer.Authorizer, serviceResolver aggregatorapiserver.ServiceResolver, proxyTransport *http.Transport) (admission.PluginInitializer, error) { +// BuildAdmissionPluginInitializers constructs the admission plugin initializer +func BuildAdmissionPluginInitializers(s *options.ServerRunOptions, client internalclientset.Interface, sharedInformers informers.SharedInformerFactory, serviceResolver aggregatorapiserver.ServiceResolver, webhookAuthWrapper webhookconfig.AuthenticationInfoResolverWrapper) ([]admission.PluginInitializer, error) { var cloudConfig []byte if s.CloudProvider.CloudConfigFile != "" { @@ -488,31 +503,14 @@ func BuildAdmissionPluginInitializer(s *options.ServerRunOptions, client interna } // TODO: use a dynamic restmapper. See https://github.com/kubernetes/kubernetes/pull/42615. - restMapper := api.Registry.RESTMapper() + restMapper := legacyscheme.Registry.RESTMapper() - // NOTE: we do not provide informers to the quota registry because admission level decisions - // do not require us to open watches for all items tracked by quota. - quotaRegistry := quotainstall.NewRegistry(nil, nil) + quotaConfiguration := quotainstall.NewQuotaConfigurationForAdmission() - pluginInitializer := kubeapiserveradmission.NewPluginInitializer(client, externalClient, sharedInformers, apiAuthorizer, cloudConfig, restMapper, quotaRegistry) + kubePluginInitializer := kubeapiserveradmission.NewPluginInitializer(client, sharedInformers, cloudConfig, restMapper, quotaConfiguration) + webhookPluginInitializer := webhookinit.NewPluginInitializer(webhookAuthWrapper, serviceResolver) - // Read client cert/key for plugins that need to make calls out - if len(s.ProxyClientCertFile) > 0 && len(s.ProxyClientKeyFile) > 0 { - certBytes, err := ioutil.ReadFile(s.ProxyClientCertFile) - if err != nil { - return nil, err - } - keyBytes, err := ioutil.ReadFile(s.ProxyClientKeyFile) - if err != nil { - return nil, err - } - pluginInitializer = pluginInitializer.SetClientCert(certBytes, keyBytes) - } - - pluginInitializer = pluginInitializer.SetServiceResolver(serviceResolver) - pluginInitializer = pluginInitializer.SetProxyTransport(proxyTransport) - - return pluginInitializer, nil + return []admission.PluginInitializer{webhookPluginInitializer, kubePluginInitializer}, nil } // BuildAuthenticator constructs the authenticator @@ -561,20 +559,28 @@ func BuildStorageFactory(s *options.ServerRunOptions) (*serverstorage.DefaultSto return nil, fmt.Errorf("error generating storage version map: %s", err) } storageFactory, err := kubeapiserver.NewStorageFactory( - s.Etcd.StorageConfig, s.Etcd.DefaultStorageMediaType, api.Codecs, - serverstorage.NewDefaultResourceEncodingConfig(api.Registry), storageGroupsToEncodingVersion, + s.Etcd.StorageConfig, s.Etcd.DefaultStorageMediaType, legacyscheme.Codecs, + serverstorage.NewDefaultResourceEncodingConfig(legacyscheme.Registry), storageGroupsToEncodingVersion, + // The list includes resources that need to be stored in a different + // group version than other resources in the groups. // FIXME (soltysh): this GroupVersionResource override should be configurable - []schema.GroupVersionResource{batch.Resource("cronjobs").WithVersion("v1beta1")}, + []schema.GroupVersionResource{ + batch.Resource("cronjobs").WithVersion("v1beta1"), + storage.Resource("volumeattachments").WithVersion("v1alpha1"), + admissionregistration.Resource("initializerconfigurations").WithVersion("v1alpha1"), + }, master.DefaultAPIResourceConfigSource(), s.APIEnablement.RuntimeConfig) if err != nil { return nil, fmt.Errorf("error in initializing storage factory: %s", err) } - // keep Deployments, NetworkPolicies, Daemonsets and ReplicaSets in extensions for backwards compatibility, we'll have to migrate at some point, eventually + storageFactory.AddCohabitatingResources(networking.Resource("networkpolicies"), extensions.Resource("networkpolicies")) + + // keep Deployments, Daemonsets and ReplicaSets in extensions for backwards compatibility, we'll have to migrate at some point, eventually storageFactory.AddCohabitatingResources(extensions.Resource("deployments"), apps.Resource("deployments")) storageFactory.AddCohabitatingResources(extensions.Resource("daemonsets"), apps.Resource("daemonsets")) storageFactory.AddCohabitatingResources(extensions.Resource("replicasets"), apps.Resource("replicasets")) - storageFactory.AddCohabitatingResources(extensions.Resource("networkpolicies"), networking.Resource("networkpolicies")) + storageFactory.AddCohabitatingResources(api.Resource("events"), events.Resource("events")) for _, override := range s.Etcd.EtcdServersOverrides { tokens := strings.Split(override, "#") if len(tokens) != 2 { @@ -616,10 +622,11 @@ func defaultOptions(s *options.ServerRunOptions) error { if err := kubeoptions.DefaultAdvertiseAddress(s.GenericServerRunOptions, s.InsecureServing); err != nil { return err } - _, apiServerServiceIP, err := master.DefaultServiceIPRange(s.ServiceClusterIPRange) + serviceIPRange, apiServerServiceIP, err := master.DefaultServiceIPRange(s.ServiceClusterIPRange) if err != nil { return fmt.Errorf("error determining service IP ranges: %v", err) } + s.ServiceClusterIPRange = serviceIPRange if err := s.SecureServing.MaybeDefaultWithSelfSignedCerts(s.GenericServerRunOptions.AdvertiseAddress.String(), []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes"}, []net.IP{apiServerServiceIP}); err != nil { return fmt.Errorf("error creating self-signed certificates: %v", err) } @@ -686,333 +693,333 @@ func readCAorNil(file string) ([]byte, error) { // PostProcessSpec adds removed definitions for backward compatibility func postProcessOpenAPISpecForBackwardCompatibility(s *spec.Swagger) (*spec.Swagger, error) { compatibilityMap := map[string]string{ - "io.k8s.kubernetes.pkg.apis.authorization.v1beta1.SelfSubjectAccessReview": "io.k8s.api.authorization.v1beta1.SelfSubjectAccessReview", - "io.k8s.kubernetes.pkg.api.v1.GitRepoVolumeSource": "io.k8s.api.core.v1.GitRepoVolumeSource", - "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.ExternalAdmissionHookConfigurationList": "io.k8s.api.admissionregistration.v1alpha1.ExternalAdmissionHookConfigurationList", - "io.k8s.kubernetes.pkg.api.v1.EndpointPort": "io.k8s.api.core.v1.EndpointPort", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.SupplementalGroupsStrategyOptions": "io.k8s.api.extensions.v1beta1.SupplementalGroupsStrategyOptions", - "io.k8s.kubernetes.pkg.api.v1.PodStatus": "io.k8s.api.core.v1.PodStatus", - "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.RoleBindingList": "io.k8s.api.rbac.v1beta1.RoleBindingList", - "io.k8s.kubernetes.pkg.apis.policy.v1beta1.PodDisruptionBudgetSpec": "io.k8s.api.policy.v1beta1.PodDisruptionBudgetSpec", - "io.k8s.kubernetes.pkg.api.v1.HTTPGetAction": "io.k8s.api.core.v1.HTTPGetAction", - "io.k8s.kubernetes.pkg.apis.authorization.v1.ResourceAttributes": "io.k8s.api.authorization.v1.ResourceAttributes", - "io.k8s.kubernetes.pkg.api.v1.PersistentVolumeList": "io.k8s.api.core.v1.PersistentVolumeList", - "io.k8s.kubernetes.pkg.apis.batch.v2alpha1.CronJobSpec": "io.k8s.api.batch.v2alpha1.CronJobSpec", - "io.k8s.kubernetes.pkg.api.v1.CephFSVolumeSource": "io.k8s.api.core.v1.CephFSVolumeSource", - "io.k8s.kubernetes.pkg.api.v1.Affinity": "io.k8s.api.core.v1.Affinity", - "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.PolicyRule": "io.k8s.api.rbac.v1beta1.PolicyRule", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetSpec": "io.k8s.api.extensions.v1beta1.DaemonSetSpec", - "io.k8s.kubernetes.pkg.api.v1.ProjectedVolumeSource": "io.k8s.api.core.v1.ProjectedVolumeSource", - "io.k8s.kubernetes.pkg.api.v1.TCPSocketAction": "io.k8s.api.core.v1.TCPSocketAction", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DaemonSet": "io.k8s.api.extensions.v1beta1.DaemonSet", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IngressList": "io.k8s.api.extensions.v1beta1.IngressList", - "io.k8s.kubernetes.pkg.api.v1.PodSpec": "io.k8s.api.core.v1.PodSpec", - "io.k8s.kubernetes.pkg.apis.authentication.v1.TokenReview": "io.k8s.api.authentication.v1.TokenReview", - "io.k8s.kubernetes.pkg.apis.authorization.v1beta1.SubjectAccessReview": "io.k8s.api.authorization.v1beta1.SubjectAccessReview", - "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleBinding": "io.k8s.api.rbac.v1alpha1.ClusterRoleBinding", - "io.k8s.kubernetes.pkg.api.v1.Node": "io.k8s.api.core.v1.Node", - "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.ServiceReference": "io.k8s.api.admissionregistration.v1alpha1.ServiceReference", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentStatus": "io.k8s.api.extensions.v1beta1.DeploymentStatus", - "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.RoleRef": "io.k8s.api.rbac.v1beta1.RoleRef", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.Scale": "io.k8s.api.apps.v1beta1.Scale", - "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.InitializerConfiguration": "io.k8s.api.admissionregistration.v1alpha1.InitializerConfiguration", - "io.k8s.kubernetes.pkg.api.v1.PhotonPersistentDiskVolumeSource": "io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource", - "io.k8s.kubernetes.pkg.api.v1.PreferredSchedulingTerm": "io.k8s.api.core.v1.PreferredSchedulingTerm", - "io.k8s.kubernetes.pkg.apis.batch.v1.JobSpec": "io.k8s.api.batch.v1.JobSpec", - "io.k8s.kubernetes.pkg.api.v1.EventSource": "io.k8s.api.core.v1.EventSource", - "io.k8s.kubernetes.pkg.api.v1.Container": "io.k8s.api.core.v1.Container", - "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.AdmissionHookClientConfig": "io.k8s.api.admissionregistration.v1alpha1.AdmissionHookClientConfig", - "io.k8s.kubernetes.pkg.api.v1.ResourceQuota": "io.k8s.api.core.v1.ResourceQuota", - "io.k8s.kubernetes.pkg.api.v1.SecretList": "io.k8s.api.core.v1.SecretList", - "io.k8s.kubernetes.pkg.api.v1.NodeSystemInfo": "io.k8s.api.core.v1.NodeSystemInfo", - "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.PolicyRule": "io.k8s.api.rbac.v1alpha1.PolicyRule", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetSpec": "io.k8s.api.extensions.v1beta1.ReplicaSetSpec", - "io.k8s.kubernetes.pkg.api.v1.NodeStatus": "io.k8s.api.core.v1.NodeStatus", - "io.k8s.kubernetes.pkg.api.v1.ResourceQuotaList": "io.k8s.api.core.v1.ResourceQuotaList", - "io.k8s.kubernetes.pkg.api.v1.HostPathVolumeSource": "io.k8s.api.core.v1.HostPathVolumeSource", - "io.k8s.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequest": "io.k8s.api.certificates.v1beta1.CertificateSigningRequest", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IngressRule": "io.k8s.api.extensions.v1beta1.IngressRule", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyPeer": "io.k8s.api.extensions.v1beta1.NetworkPolicyPeer", - "io.k8s.kubernetes.pkg.apis.storage.v1.StorageClass": "io.k8s.api.storage.v1.StorageClass", - "io.k8s.kubernetes.pkg.apis.networking.v1.NetworkPolicyPeer": "io.k8s.api.networking.v1.NetworkPolicyPeer", - "io.k8s.kubernetes.pkg.apis.networking.v1.NetworkPolicyIngressRule": "io.k8s.api.networking.v1.NetworkPolicyIngressRule", - "io.k8s.kubernetes.pkg.api.v1.StorageOSPersistentVolumeSource": "io.k8s.api.core.v1.StorageOSPersistentVolumeSource", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyIngressRule": "io.k8s.api.extensions.v1beta1.NetworkPolicyIngressRule", - "io.k8s.kubernetes.pkg.api.v1.PodAffinity": "io.k8s.api.core.v1.PodAffinity", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.RollbackConfig": "io.k8s.api.extensions.v1beta1.RollbackConfig", - "io.k8s.kubernetes.pkg.api.v1.PodList": "io.k8s.api.core.v1.PodList", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ScaleStatus": "io.k8s.api.extensions.v1beta1.ScaleStatus", - "io.k8s.kubernetes.pkg.api.v1.ComponentCondition": "io.k8s.api.core.v1.ComponentCondition", - "io.k8s.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequestList": "io.k8s.api.certificates.v1beta1.CertificateSigningRequestList", - "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleBindingList": "io.k8s.api.rbac.v1alpha1.ClusterRoleBindingList", - "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerCondition": "io.k8s.api.autoscaling.v2alpha1.HorizontalPodAutoscalerCondition", - "io.k8s.kubernetes.pkg.api.v1.ServiceList": "io.k8s.api.core.v1.ServiceList", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.PodSecurityPolicy": "io.k8s.api.extensions.v1beta1.PodSecurityPolicy", - "io.k8s.kubernetes.pkg.apis.batch.v1.JobCondition": "io.k8s.api.batch.v1.JobCondition", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentStatus": "io.k8s.api.apps.v1beta1.DeploymentStatus", - "io.k8s.kubernetes.pkg.api.v1.Volume": "io.k8s.api.core.v1.Volume", - "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.RoleBindingList": "io.k8s.api.rbac.v1alpha1.RoleBindingList", - "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.Rule": "io.k8s.api.admissionregistration.v1alpha1.Rule", - "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.InitializerConfigurationList": "io.k8s.api.admissionregistration.v1alpha1.InitializerConfigurationList", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicy": "io.k8s.api.extensions.v1beta1.NetworkPolicy", - "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleList": "io.k8s.api.rbac.v1alpha1.ClusterRoleList", - "io.k8s.kubernetes.pkg.api.v1.ObjectFieldSelector": "io.k8s.api.core.v1.ObjectFieldSelector", - "io.k8s.kubernetes.pkg.api.v1.EventList": "io.k8s.api.core.v1.EventList", - "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.MetricStatus": "io.k8s.api.autoscaling.v2alpha1.MetricStatus", - "io.k8s.kubernetes.pkg.apis.networking.v1.NetworkPolicyPort": "io.k8s.api.networking.v1.NetworkPolicyPort", - "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.RoleList": "io.k8s.api.rbac.v1beta1.RoleList", - "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.RoleList": "io.k8s.api.rbac.v1alpha1.RoleList", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentStrategy": "io.k8s.api.apps.v1beta1.DeploymentStrategy", - "io.k8s.kubernetes.pkg.apis.autoscaling.v1.CrossVersionObjectReference": "io.k8s.api.autoscaling.v1.CrossVersionObjectReference", - "io.k8s.kubernetes.pkg.api.v1.ConfigMapProjection": "io.k8s.api.core.v1.ConfigMapProjection", - "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.CrossVersionObjectReference": "io.k8s.api.autoscaling.v2alpha1.CrossVersionObjectReference", - "io.k8s.kubernetes.pkg.api.v1.LoadBalancerStatus": "io.k8s.api.core.v1.LoadBalancerStatus", - "io.k8s.kubernetes.pkg.api.v1.ISCSIVolumeSource": "io.k8s.api.core.v1.ISCSIVolumeSource", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.ControllerRevisionList": "io.k8s.api.apps.v1beta1.ControllerRevisionList", - "io.k8s.kubernetes.pkg.api.v1.EndpointSubset": "io.k8s.api.core.v1.EndpointSubset", - "io.k8s.kubernetes.pkg.api.v1.SELinuxOptions": "io.k8s.api.core.v1.SELinuxOptions", - "io.k8s.kubernetes.pkg.api.v1.PersistentVolumeClaimVolumeSource": "io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource", - "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.MetricSpec": "io.k8s.api.autoscaling.v2alpha1.MetricSpec", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.StatefulSetList": "io.k8s.api.apps.v1beta1.StatefulSetList", - "io.k8s.kubernetes.pkg.apis.authorization.v1beta1.ResourceAttributes": "io.k8s.api.authorization.v1beta1.ResourceAttributes", - "io.k8s.kubernetes.pkg.api.v1.Capabilities": "io.k8s.api.core.v1.Capabilities", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.Deployment": "io.k8s.api.extensions.v1beta1.Deployment", - "io.k8s.kubernetes.pkg.api.v1.Binding": "io.k8s.api.core.v1.Binding", - "io.k8s.kubernetes.pkg.api.v1.ReplicationControllerList": "io.k8s.api.core.v1.ReplicationControllerList", - "io.k8s.kubernetes.pkg.apis.authorization.v1.SelfSubjectAccessReview": "io.k8s.api.authorization.v1.SelfSubjectAccessReview", - "io.k8s.kubernetes.pkg.apis.authentication.v1beta1.UserInfo": "io.k8s.api.authentication.v1beta1.UserInfo", - "io.k8s.kubernetes.pkg.api.v1.HostAlias": "io.k8s.api.core.v1.HostAlias", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.StatefulSetUpdateStrategy": "io.k8s.api.apps.v1beta1.StatefulSetUpdateStrategy", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IngressSpec": "io.k8s.api.extensions.v1beta1.IngressSpec", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentCondition": "io.k8s.api.extensions.v1beta1.DeploymentCondition", - "io.k8s.kubernetes.pkg.api.v1.GCEPersistentDiskVolumeSource": "io.k8s.api.core.v1.GCEPersistentDiskVolumeSource", - "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.ExternalAdmissionHook": "io.k8s.api.admissionregistration.v1alpha1.ExternalAdmissionHook", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.Scale": "io.k8s.api.extensions.v1beta1.Scale", - "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerStatus": "io.k8s.api.autoscaling.v2alpha1.HorizontalPodAutoscalerStatus", - "io.k8s.kubernetes.pkg.api.v1.FlexVolumeSource": "io.k8s.api.core.v1.FlexVolumeSource", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.RollingUpdateDeployment": "io.k8s.api.extensions.v1beta1.RollingUpdateDeployment", - "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.ObjectMetricStatus": "io.k8s.api.autoscaling.v2alpha1.ObjectMetricStatus", - "io.k8s.kubernetes.pkg.api.v1.Event": "io.k8s.api.core.v1.Event", - "io.k8s.kubernetes.pkg.api.v1.ResourceQuotaSpec": "io.k8s.api.core.v1.ResourceQuotaSpec", - "io.k8s.kubernetes.pkg.api.v1.Handler": "io.k8s.api.core.v1.Handler", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IngressBackend": "io.k8s.api.extensions.v1beta1.IngressBackend", - "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.Role": "io.k8s.api.rbac.v1alpha1.Role", - "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.ObjectMetricSource": "io.k8s.api.autoscaling.v2alpha1.ObjectMetricSource", - "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.ResourceMetricStatus": "io.k8s.api.autoscaling.v2alpha1.ResourceMetricStatus", - "io.k8s.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerSpec": "io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerSpec", - "io.k8s.kubernetes.pkg.api.v1.Lifecycle": "io.k8s.api.core.v1.Lifecycle", - "io.k8s.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequestStatus": "io.k8s.api.certificates.v1beta1.CertificateSigningRequestStatus", - "io.k8s.kubernetes.pkg.api.v1.ContainerStateRunning": "io.k8s.api.core.v1.ContainerStateRunning", - "io.k8s.kubernetes.pkg.api.v1.ServiceAccountList": "io.k8s.api.core.v1.ServiceAccountList", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.HostPortRange": "io.k8s.api.extensions.v1beta1.HostPortRange", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.ControllerRevision": "io.k8s.api.apps.v1beta1.ControllerRevision", - "io.k8s.kubernetes.pkg.api.v1.ReplicationControllerSpec": "io.k8s.api.core.v1.ReplicationControllerSpec", - "io.k8s.kubernetes.pkg.api.v1.ContainerStateTerminated": "io.k8s.api.core.v1.ContainerStateTerminated", - "io.k8s.kubernetes.pkg.api.v1.ReplicationControllerStatus": "io.k8s.api.core.v1.ReplicationControllerStatus", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetList": "io.k8s.api.extensions.v1beta1.DaemonSetList", - "io.k8s.kubernetes.pkg.apis.authorization.v1.SelfSubjectAccessReviewSpec": "io.k8s.api.authorization.v1.SelfSubjectAccessReviewSpec", - "io.k8s.kubernetes.pkg.api.v1.ComponentStatusList": "io.k8s.api.core.v1.ComponentStatusList", - "io.k8s.kubernetes.pkg.api.v1.ContainerStateWaiting": "io.k8s.api.core.v1.ContainerStateWaiting", - "io.k8s.kubernetes.pkg.api.v1.VolumeMount": "io.k8s.api.core.v1.VolumeMount", - "io.k8s.kubernetes.pkg.api.v1.Secret": "io.k8s.api.core.v1.Secret", - "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.ClusterRoleList": "io.k8s.api.rbac.v1beta1.ClusterRoleList", - "io.k8s.kubernetes.pkg.api.v1.ConfigMapList": "io.k8s.api.core.v1.ConfigMapList", - "io.k8s.kubernetes.pkg.apis.storage.v1beta1.StorageClassList": "io.k8s.api.storage.v1beta1.StorageClassList", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.HTTPIngressPath": "io.k8s.api.extensions.v1beta1.HTTPIngressPath", - "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRole": "io.k8s.api.rbac.v1alpha1.ClusterRole", - "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.ResourceMetricSource": "io.k8s.api.autoscaling.v2alpha1.ResourceMetricSource", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentRollback": "io.k8s.api.extensions.v1beta1.DeploymentRollback", - "io.k8s.kubernetes.pkg.api.v1.PersistentVolumeClaimSpec": "io.k8s.api.core.v1.PersistentVolumeClaimSpec", - "io.k8s.kubernetes.pkg.api.v1.ReplicationController": "io.k8s.api.core.v1.ReplicationController", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.StatefulSetSpec": "io.k8s.api.apps.v1beta1.StatefulSetSpec", - "io.k8s.kubernetes.pkg.api.v1.SecurityContext": "io.k8s.api.core.v1.SecurityContext", - "io.k8s.kubernetes.pkg.apis.networking.v1.NetworkPolicySpec": "io.k8s.api.networking.v1.NetworkPolicySpec", - "io.k8s.kubernetes.pkg.api.v1.LocalObjectReference": "io.k8s.api.core.v1.LocalObjectReference", - "io.k8s.kubernetes.pkg.api.v1.RBDVolumeSource": "io.k8s.api.core.v1.RBDVolumeSource", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicySpec": "io.k8s.api.extensions.v1beta1.NetworkPolicySpec", - "io.k8s.kubernetes.pkg.api.v1.KeyToPath": "io.k8s.api.core.v1.KeyToPath", - "io.k8s.kubernetes.pkg.api.v1.WeightedPodAffinityTerm": "io.k8s.api.core.v1.WeightedPodAffinityTerm", - "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.PodsMetricStatus": "io.k8s.api.autoscaling.v2alpha1.PodsMetricStatus", - "io.k8s.kubernetes.pkg.api.v1.NodeAddress": "io.k8s.api.core.v1.NodeAddress", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.Ingress": "io.k8s.api.extensions.v1beta1.Ingress", - "io.k8s.kubernetes.pkg.apis.policy.v1beta1.PodDisruptionBudget": "io.k8s.api.policy.v1beta1.PodDisruptionBudget", - "io.k8s.kubernetes.pkg.api.v1.ServicePort": "io.k8s.api.core.v1.ServicePort", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IDRange": "io.k8s.api.extensions.v1beta1.IDRange", - "io.k8s.kubernetes.pkg.api.v1.SecretEnvSource": "io.k8s.api.core.v1.SecretEnvSource", - "io.k8s.kubernetes.pkg.api.v1.NodeSelector": "io.k8s.api.core.v1.NodeSelector", - "io.k8s.kubernetes.pkg.api.v1.PersistentVolumeClaimStatus": "io.k8s.api.core.v1.PersistentVolumeClaimStatus", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentSpec": "io.k8s.api.apps.v1beta1.DeploymentSpec", - "io.k8s.kubernetes.pkg.apis.authorization.v1.NonResourceAttributes": "io.k8s.api.authorization.v1.NonResourceAttributes", - "io.k8s.kubernetes.pkg.apis.autoscaling.v1.ScaleStatus": "io.k8s.api.autoscaling.v1.ScaleStatus", - "io.k8s.kubernetes.pkg.api.v1.PodCondition": "io.k8s.api.core.v1.PodCondition", - "io.k8s.kubernetes.pkg.api.v1.PodTemplateSpec": "io.k8s.api.core.v1.PodTemplateSpec", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.StatefulSet": "io.k8s.api.apps.v1beta1.StatefulSet", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyPort": "io.k8s.api.extensions.v1beta1.NetworkPolicyPort", - "io.k8s.kubernetes.pkg.apis.authentication.v1beta1.TokenReview": "io.k8s.api.authentication.v1beta1.TokenReview", - "io.k8s.kubernetes.pkg.api.v1.LimitRangeSpec": "io.k8s.api.core.v1.LimitRangeSpec", - "io.k8s.kubernetes.pkg.api.v1.FlockerVolumeSource": "io.k8s.api.core.v1.FlockerVolumeSource", - "io.k8s.kubernetes.pkg.apis.policy.v1beta1.Eviction": "io.k8s.api.policy.v1beta1.Eviction", - "io.k8s.kubernetes.pkg.api.v1.PersistentVolumeClaimList": "io.k8s.api.core.v1.PersistentVolumeClaimList", - "io.k8s.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequestCondition": "io.k8s.api.certificates.v1beta1.CertificateSigningRequestCondition", - "io.k8s.kubernetes.pkg.api.v1.DownwardAPIVolumeFile": "io.k8s.api.core.v1.DownwardAPIVolumeFile", - "io.k8s.kubernetes.pkg.apis.authorization.v1beta1.LocalSubjectAccessReview": "io.k8s.api.authorization.v1beta1.LocalSubjectAccessReview", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.ScaleStatus": "io.k8s.api.apps.v1beta1.ScaleStatus", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.HTTPIngressRuleValue": "io.k8s.api.extensions.v1beta1.HTTPIngressRuleValue", - "io.k8s.kubernetes.pkg.apis.batch.v1.Job": "io.k8s.api.batch.v1.Job", - "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.ExternalAdmissionHookConfiguration": "io.k8s.api.admissionregistration.v1alpha1.ExternalAdmissionHookConfiguration", - "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.RoleBinding": "io.k8s.api.rbac.v1beta1.RoleBinding", - "io.k8s.kubernetes.pkg.api.v1.FCVolumeSource": "io.k8s.api.core.v1.FCVolumeSource", - "io.k8s.kubernetes.pkg.api.v1.EndpointAddress": "io.k8s.api.core.v1.EndpointAddress", - "io.k8s.kubernetes.pkg.api.v1.ContainerPort": "io.k8s.api.core.v1.ContainerPort", - "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.ClusterRoleBinding": "io.k8s.api.rbac.v1beta1.ClusterRoleBinding", - "io.k8s.kubernetes.pkg.api.v1.GlusterfsVolumeSource": "io.k8s.api.core.v1.GlusterfsVolumeSource", - "io.k8s.kubernetes.pkg.api.v1.ResourceRequirements": "io.k8s.api.core.v1.ResourceRequirements", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.RollingUpdateDeployment": "io.k8s.api.apps.v1beta1.RollingUpdateDeployment", - "io.k8s.kubernetes.pkg.api.v1.NamespaceStatus": "io.k8s.api.core.v1.NamespaceStatus", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.RunAsUserStrategyOptions": "io.k8s.api.extensions.v1beta1.RunAsUserStrategyOptions", - "io.k8s.kubernetes.pkg.api.v1.Namespace": "io.k8s.api.core.v1.Namespace", - "io.k8s.kubernetes.pkg.apis.authorization.v1.SubjectAccessReviewSpec": "io.k8s.api.authorization.v1.SubjectAccessReviewSpec", - "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscaler": "io.k8s.api.autoscaling.v2alpha1.HorizontalPodAutoscaler", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetCondition": "io.k8s.api.extensions.v1beta1.ReplicaSetCondition", - "io.k8s.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerStatus": "io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerStatus", - "io.k8s.kubernetes.pkg.apis.authentication.v1.TokenReviewStatus": "io.k8s.api.authentication.v1.TokenReviewStatus", - "io.k8s.kubernetes.pkg.api.v1.PersistentVolume": "io.k8s.api.core.v1.PersistentVolume", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.FSGroupStrategyOptions": "io.k8s.api.extensions.v1beta1.FSGroupStrategyOptions", - "io.k8s.kubernetes.pkg.api.v1.PodSecurityContext": "io.k8s.api.core.v1.PodSecurityContext", - "io.k8s.kubernetes.pkg.api.v1.PodTemplate": "io.k8s.api.core.v1.PodTemplate", - "io.k8s.kubernetes.pkg.apis.authorization.v1.LocalSubjectAccessReview": "io.k8s.api.authorization.v1.LocalSubjectAccessReview", - "io.k8s.kubernetes.pkg.api.v1.StorageOSVolumeSource": "io.k8s.api.core.v1.StorageOSVolumeSource", - "io.k8s.kubernetes.pkg.api.v1.NodeSelectorTerm": "io.k8s.api.core.v1.NodeSelectorTerm", - "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.Role": "io.k8s.api.rbac.v1beta1.Role", - "io.k8s.kubernetes.pkg.api.v1.ContainerStatus": "io.k8s.api.core.v1.ContainerStatus", - "io.k8s.kubernetes.pkg.apis.authorization.v1.SubjectAccessReviewStatus": "io.k8s.api.authorization.v1.SubjectAccessReviewStatus", - "io.k8s.kubernetes.pkg.apis.authentication.v1.TokenReviewSpec": "io.k8s.api.authentication.v1.TokenReviewSpec", - "io.k8s.kubernetes.pkg.api.v1.ConfigMap": "io.k8s.api.core.v1.ConfigMap", - "io.k8s.kubernetes.pkg.api.v1.ServiceStatus": "io.k8s.api.core.v1.ServiceStatus", - "io.k8s.kubernetes.pkg.apis.authorization.v1beta1.SelfSubjectAccessReviewSpec": "io.k8s.api.authorization.v1beta1.SelfSubjectAccessReviewSpec", - "io.k8s.kubernetes.pkg.api.v1.CinderVolumeSource": "io.k8s.api.core.v1.CinderVolumeSource", - "io.k8s.kubernetes.pkg.apis.settings.v1alpha1.PodPresetSpec": "io.k8s.api.settings.v1alpha1.PodPresetSpec", - "io.k8s.kubernetes.pkg.apis.authorization.v1beta1.NonResourceAttributes": "io.k8s.api.authorization.v1beta1.NonResourceAttributes", - "io.k8s.kubernetes.pkg.api.v1.ContainerImage": "io.k8s.api.core.v1.ContainerImage", - "io.k8s.kubernetes.pkg.api.v1.ReplicationControllerCondition": "io.k8s.api.core.v1.ReplicationControllerCondition", - "io.k8s.kubernetes.pkg.api.v1.EmptyDirVolumeSource": "io.k8s.api.core.v1.EmptyDirVolumeSource", - "io.k8s.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerList": "io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerList", - "io.k8s.kubernetes.pkg.apis.batch.v1.JobList": "io.k8s.api.batch.v1.JobList", - "io.k8s.kubernetes.pkg.api.v1.NFSVolumeSource": "io.k8s.api.core.v1.NFSVolumeSource", - "io.k8s.kubernetes.pkg.api.v1.Pod": "io.k8s.api.core.v1.Pod", - "io.k8s.kubernetes.pkg.api.v1.ObjectReference": "io.k8s.api.core.v1.ObjectReference", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.Deployment": "io.k8s.api.apps.v1beta1.Deployment", - "io.k8s.kubernetes.pkg.apis.storage.v1.StorageClassList": "io.k8s.api.storage.v1.StorageClassList", - "io.k8s.kubernetes.pkg.api.v1.AttachedVolume": "io.k8s.api.core.v1.AttachedVolume", - "io.k8s.kubernetes.pkg.api.v1.AWSElasticBlockStoreVolumeSource": "io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource", - "io.k8s.kubernetes.pkg.apis.batch.v2alpha1.CronJobList": "io.k8s.api.batch.v2alpha1.CronJobList", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentSpec": "io.k8s.api.extensions.v1beta1.DeploymentSpec", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.PodSecurityPolicyList": "io.k8s.api.extensions.v1beta1.PodSecurityPolicyList", - "io.k8s.kubernetes.pkg.api.v1.PodAffinityTerm": "io.k8s.api.core.v1.PodAffinityTerm", - "io.k8s.kubernetes.pkg.api.v1.HTTPHeader": "io.k8s.api.core.v1.HTTPHeader", - "io.k8s.kubernetes.pkg.api.v1.ConfigMapKeySelector": "io.k8s.api.core.v1.ConfigMapKeySelector", - "io.k8s.kubernetes.pkg.api.v1.SecretKeySelector": "io.k8s.api.core.v1.SecretKeySelector", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentList": "io.k8s.api.extensions.v1beta1.DeploymentList", - "io.k8s.kubernetes.pkg.apis.authentication.v1.UserInfo": "io.k8s.api.authentication.v1.UserInfo", - "io.k8s.kubernetes.pkg.api.v1.LoadBalancerIngress": "io.k8s.api.core.v1.LoadBalancerIngress", - "io.k8s.kubernetes.pkg.api.v1.DaemonEndpoint": "io.k8s.api.core.v1.DaemonEndpoint", - "io.k8s.kubernetes.pkg.api.v1.NodeSelectorRequirement": "io.k8s.api.core.v1.NodeSelectorRequirement", - "io.k8s.kubernetes.pkg.apis.batch.v2alpha1.CronJobStatus": "io.k8s.api.batch.v2alpha1.CronJobStatus", - "io.k8s.kubernetes.pkg.apis.autoscaling.v1.Scale": "io.k8s.api.autoscaling.v1.Scale", - "io.k8s.kubernetes.pkg.api.v1.ScaleIOVolumeSource": "io.k8s.api.core.v1.ScaleIOVolumeSource", - "io.k8s.kubernetes.pkg.api.v1.PodAntiAffinity": "io.k8s.api.core.v1.PodAntiAffinity", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.PodSecurityPolicySpec": "io.k8s.api.extensions.v1beta1.PodSecurityPolicySpec", - "io.k8s.kubernetes.pkg.apis.settings.v1alpha1.PodPresetList": "io.k8s.api.settings.v1alpha1.PodPresetList", - "io.k8s.kubernetes.pkg.api.v1.NodeAffinity": "io.k8s.api.core.v1.NodeAffinity", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentCondition": "io.k8s.api.apps.v1beta1.DeploymentCondition", - "io.k8s.kubernetes.pkg.api.v1.NodeSpec": "io.k8s.api.core.v1.NodeSpec", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.StatefulSetStatus": "io.k8s.api.apps.v1beta1.StatefulSetStatus", - "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.RuleWithOperations": "io.k8s.api.admissionregistration.v1alpha1.RuleWithOperations", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IngressStatus": "io.k8s.api.extensions.v1beta1.IngressStatus", - "io.k8s.kubernetes.pkg.api.v1.LimitRangeList": "io.k8s.api.core.v1.LimitRangeList", - "io.k8s.kubernetes.pkg.api.v1.AzureDiskVolumeSource": "io.k8s.api.core.v1.AzureDiskVolumeSource", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetStatus": "io.k8s.api.extensions.v1beta1.ReplicaSetStatus", - "io.k8s.kubernetes.pkg.api.v1.ComponentStatus": "io.k8s.api.core.v1.ComponentStatus", - "io.k8s.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscaler": "io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler", - "io.k8s.kubernetes.pkg.apis.networking.v1.NetworkPolicy": "io.k8s.api.networking.v1.NetworkPolicy", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.RollbackConfig": "io.k8s.api.apps.v1beta1.RollbackConfig", - "io.k8s.kubernetes.pkg.api.v1.NodeCondition": "io.k8s.api.core.v1.NodeCondition", - "io.k8s.kubernetes.pkg.api.v1.DownwardAPIProjection": "io.k8s.api.core.v1.DownwardAPIProjection", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.SELinuxStrategyOptions": "io.k8s.api.extensions.v1beta1.SELinuxStrategyOptions", - "io.k8s.kubernetes.pkg.api.v1.NamespaceSpec": "io.k8s.api.core.v1.NamespaceSpec", - "io.k8s.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequestSpec": "io.k8s.api.certificates.v1beta1.CertificateSigningRequestSpec", - "io.k8s.kubernetes.pkg.api.v1.ServiceSpec": "io.k8s.api.core.v1.ServiceSpec", - "io.k8s.kubernetes.pkg.apis.authorization.v1.SubjectAccessReview": "io.k8s.api.authorization.v1.SubjectAccessReview", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentList": "io.k8s.api.apps.v1beta1.DeploymentList", - "io.k8s.kubernetes.pkg.api.v1.Toleration": "io.k8s.api.core.v1.Toleration", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyList": "io.k8s.api.extensions.v1beta1.NetworkPolicyList", - "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.PodsMetricSource": "io.k8s.api.autoscaling.v2alpha1.PodsMetricSource", - "io.k8s.kubernetes.pkg.api.v1.EnvFromSource": "io.k8s.api.core.v1.EnvFromSource", - "io.k8s.kubernetes.pkg.apis.autoscaling.v1.ScaleSpec": "io.k8s.api.autoscaling.v1.ScaleSpec", - "io.k8s.kubernetes.pkg.api.v1.PodTemplateList": "io.k8s.api.core.v1.PodTemplateList", - "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerSpec": "io.k8s.api.autoscaling.v2alpha1.HorizontalPodAutoscalerSpec", - "io.k8s.kubernetes.pkg.api.v1.SecretProjection": "io.k8s.api.core.v1.SecretProjection", - "io.k8s.kubernetes.pkg.api.v1.ResourceFieldSelector": "io.k8s.api.core.v1.ResourceFieldSelector", - "io.k8s.kubernetes.pkg.api.v1.PersistentVolumeSpec": "io.k8s.api.core.v1.PersistentVolumeSpec", - "io.k8s.kubernetes.pkg.api.v1.ConfigMapVolumeSource": "io.k8s.api.core.v1.ConfigMapVolumeSource", - "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerList": "io.k8s.api.autoscaling.v2alpha1.HorizontalPodAutoscalerList", - "io.k8s.kubernetes.pkg.apis.authentication.v1beta1.TokenReviewStatus": "io.k8s.api.authentication.v1beta1.TokenReviewStatus", - "io.k8s.kubernetes.pkg.apis.networking.v1.NetworkPolicyList": "io.k8s.api.networking.v1.NetworkPolicyList", - "io.k8s.kubernetes.pkg.api.v1.Endpoints": "io.k8s.api.core.v1.Endpoints", - "io.k8s.kubernetes.pkg.api.v1.LimitRangeItem": "io.k8s.api.core.v1.LimitRangeItem", - "io.k8s.kubernetes.pkg.api.v1.ServiceAccount": "io.k8s.api.core.v1.ServiceAccount", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ScaleSpec": "io.k8s.api.extensions.v1beta1.ScaleSpec", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IngressTLS": "io.k8s.api.extensions.v1beta1.IngressTLS", - "io.k8s.kubernetes.pkg.apis.batch.v2alpha1.CronJob": "io.k8s.api.batch.v2alpha1.CronJob", - "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.Subject": "io.k8s.api.rbac.v1alpha1.Subject", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetStatus": "io.k8s.api.extensions.v1beta1.DaemonSetStatus", - "io.k8s.kubernetes.pkg.apis.policy.v1beta1.PodDisruptionBudgetList": "io.k8s.api.policy.v1beta1.PodDisruptionBudgetList", - "io.k8s.kubernetes.pkg.api.v1.VsphereVirtualDiskVolumeSource": "io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource", - "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.RoleRef": "io.k8s.api.rbac.v1alpha1.RoleRef", - "io.k8s.kubernetes.pkg.api.v1.PortworxVolumeSource": "io.k8s.api.core.v1.PortworxVolumeSource", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetList": "io.k8s.api.extensions.v1beta1.ReplicaSetList", - "io.k8s.kubernetes.pkg.api.v1.VolumeProjection": "io.k8s.api.core.v1.VolumeProjection", - "io.k8s.kubernetes.pkg.apis.storage.v1beta1.StorageClass": "io.k8s.api.storage.v1beta1.StorageClass", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSet": "io.k8s.api.extensions.v1beta1.ReplicaSet", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentRollback": "io.k8s.api.apps.v1beta1.DeploymentRollback", - "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.RoleBinding": "io.k8s.api.rbac.v1alpha1.RoleBinding", - "io.k8s.kubernetes.pkg.api.v1.AzureFileVolumeSource": "io.k8s.api.core.v1.AzureFileVolumeSource", - "io.k8s.kubernetes.pkg.apis.policy.v1beta1.PodDisruptionBudgetStatus": "io.k8s.api.policy.v1beta1.PodDisruptionBudgetStatus", - "io.k8s.kubernetes.pkg.apis.authentication.v1beta1.TokenReviewSpec": "io.k8s.api.authentication.v1beta1.TokenReviewSpec", - "io.k8s.kubernetes.pkg.api.v1.EndpointsList": "io.k8s.api.core.v1.EndpointsList", - "io.k8s.kubernetes.pkg.api.v1.ConfigMapEnvSource": "io.k8s.api.core.v1.ConfigMapEnvSource", - "io.k8s.kubernetes.pkg.apis.batch.v2alpha1.JobTemplateSpec": "io.k8s.api.batch.v2alpha1.JobTemplateSpec", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetUpdateStrategy": "io.k8s.api.extensions.v1beta1.DaemonSetUpdateStrategy", - "io.k8s.kubernetes.pkg.apis.authorization.v1beta1.SubjectAccessReviewSpec": "io.k8s.api.authorization.v1beta1.SubjectAccessReviewSpec", - "io.k8s.kubernetes.pkg.api.v1.LocalVolumeSource": "io.k8s.api.core.v1.LocalVolumeSource", - "io.k8s.kubernetes.pkg.api.v1.ContainerState": "io.k8s.api.core.v1.ContainerState", - "io.k8s.kubernetes.pkg.api.v1.Service": "io.k8s.api.core.v1.Service", - "io.k8s.kubernetes.pkg.api.v1.ExecAction": "io.k8s.api.core.v1.ExecAction", - "io.k8s.kubernetes.pkg.api.v1.Taint": "io.k8s.api.core.v1.Taint", - "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.Subject": "io.k8s.api.rbac.v1beta1.Subject", - "io.k8s.kubernetes.pkg.apis.authorization.v1beta1.SubjectAccessReviewStatus": "io.k8s.api.authorization.v1beta1.SubjectAccessReviewStatus", - "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.ClusterRoleBindingList": "io.k8s.api.rbac.v1beta1.ClusterRoleBindingList", - "io.k8s.kubernetes.pkg.api.v1.DownwardAPIVolumeSource": "io.k8s.api.core.v1.DownwardAPIVolumeSource", - "io.k8s.kubernetes.pkg.apis.batch.v1.JobStatus": "io.k8s.api.batch.v1.JobStatus", - "io.k8s.kubernetes.pkg.api.v1.ResourceQuotaStatus": "io.k8s.api.core.v1.ResourceQuotaStatus", - "io.k8s.kubernetes.pkg.api.v1.PersistentVolumeStatus": "io.k8s.api.core.v1.PersistentVolumeStatus", - "io.k8s.kubernetes.pkg.api.v1.PersistentVolumeClaim": "io.k8s.api.core.v1.PersistentVolumeClaim", - "io.k8s.kubernetes.pkg.api.v1.NodeDaemonEndpoints": "io.k8s.api.core.v1.NodeDaemonEndpoints", - "io.k8s.kubernetes.pkg.api.v1.EnvVar": "io.k8s.api.core.v1.EnvVar", - "io.k8s.kubernetes.pkg.api.v1.SecretVolumeSource": "io.k8s.api.core.v1.SecretVolumeSource", - "io.k8s.kubernetes.pkg.api.v1.EnvVarSource": "io.k8s.api.core.v1.EnvVarSource", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.RollingUpdateStatefulSetStrategy": "io.k8s.api.apps.v1beta1.RollingUpdateStatefulSetStrategy", - "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.ClusterRole": "io.k8s.api.rbac.v1beta1.ClusterRole", - "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.Initializer": "io.k8s.api.admissionregistration.v1alpha1.Initializer", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentStrategy": "io.k8s.api.extensions.v1beta1.DeploymentStrategy", - "io.k8s.kubernetes.pkg.apis.apps.v1beta1.ScaleSpec": "io.k8s.api.apps.v1beta1.ScaleSpec", - "io.k8s.kubernetes.pkg.apis.settings.v1alpha1.PodPreset": "io.k8s.api.settings.v1alpha1.PodPreset", - "io.k8s.kubernetes.pkg.api.v1.Probe": "io.k8s.api.core.v1.Probe", - "io.k8s.kubernetes.pkg.api.v1.NamespaceList": "io.k8s.api.core.v1.NamespaceList", - "io.k8s.kubernetes.pkg.api.v1.QuobyteVolumeSource": "io.k8s.api.core.v1.QuobyteVolumeSource", - "io.k8s.kubernetes.pkg.api.v1.NodeList": "io.k8s.api.core.v1.NodeList", - "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.RollingUpdateDaemonSet": "io.k8s.api.extensions.v1beta1.RollingUpdateDaemonSet", - "io.k8s.kubernetes.pkg.api.v1.LimitRange": "io.k8s.api.core.v1.LimitRange", + "io.k8s.kubernetes.pkg.apis.authorization.v1beta1.SelfSubjectAccessReview": "io.k8s.api.authorization.v1beta1.SelfSubjectAccessReview", + "io.k8s.kubernetes.pkg.api.v1.GitRepoVolumeSource": "io.k8s.api.core.v1.GitRepoVolumeSource", + "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.ValidatingWebhookConfigurationList": "io.k8s.api.admissionregistration.v1alpha1.ValidatingWebhookConfigurationList", + "io.k8s.kubernetes.pkg.api.v1.EndpointPort": "io.k8s.api.core.v1.EndpointPort", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.SupplementalGroupsStrategyOptions": "io.k8s.api.extensions.v1beta1.SupplementalGroupsStrategyOptions", + "io.k8s.kubernetes.pkg.api.v1.PodStatus": "io.k8s.api.core.v1.PodStatus", + "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.RoleBindingList": "io.k8s.api.rbac.v1beta1.RoleBindingList", + "io.k8s.kubernetes.pkg.apis.policy.v1beta1.PodDisruptionBudgetSpec": "io.k8s.api.policy.v1beta1.PodDisruptionBudgetSpec", + "io.k8s.kubernetes.pkg.api.v1.HTTPGetAction": "io.k8s.api.core.v1.HTTPGetAction", + "io.k8s.kubernetes.pkg.apis.authorization.v1.ResourceAttributes": "io.k8s.api.authorization.v1.ResourceAttributes", + "io.k8s.kubernetes.pkg.api.v1.PersistentVolumeList": "io.k8s.api.core.v1.PersistentVolumeList", + "io.k8s.kubernetes.pkg.apis.batch.v2alpha1.CronJobSpec": "io.k8s.api.batch.v2alpha1.CronJobSpec", + "io.k8s.kubernetes.pkg.api.v1.CephFSVolumeSource": "io.k8s.api.core.v1.CephFSVolumeSource", + "io.k8s.kubernetes.pkg.api.v1.Affinity": "io.k8s.api.core.v1.Affinity", + "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.PolicyRule": "io.k8s.api.rbac.v1beta1.PolicyRule", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetSpec": "io.k8s.api.extensions.v1beta1.DaemonSetSpec", + "io.k8s.kubernetes.pkg.api.v1.ProjectedVolumeSource": "io.k8s.api.core.v1.ProjectedVolumeSource", + "io.k8s.kubernetes.pkg.api.v1.TCPSocketAction": "io.k8s.api.core.v1.TCPSocketAction", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DaemonSet": "io.k8s.api.extensions.v1beta1.DaemonSet", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IngressList": "io.k8s.api.extensions.v1beta1.IngressList", + "io.k8s.kubernetes.pkg.api.v1.PodSpec": "io.k8s.api.core.v1.PodSpec", + "io.k8s.kubernetes.pkg.apis.authentication.v1.TokenReview": "io.k8s.api.authentication.v1.TokenReview", + "io.k8s.kubernetes.pkg.apis.authorization.v1beta1.SubjectAccessReview": "io.k8s.api.authorization.v1beta1.SubjectAccessReview", + "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleBinding": "io.k8s.api.rbac.v1alpha1.ClusterRoleBinding", + "io.k8s.kubernetes.pkg.api.v1.Node": "io.k8s.api.core.v1.Node", + "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.ServiceReference": "io.k8s.api.admissionregistration.v1alpha1.ServiceReference", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentStatus": "io.k8s.api.extensions.v1beta1.DeploymentStatus", + "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.RoleRef": "io.k8s.api.rbac.v1beta1.RoleRef", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.Scale": "io.k8s.api.apps.v1beta1.Scale", + "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.InitializerConfiguration": "io.k8s.api.admissionregistration.v1alpha1.InitializerConfiguration", + "io.k8s.kubernetes.pkg.api.v1.PhotonPersistentDiskVolumeSource": "io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource", + "io.k8s.kubernetes.pkg.api.v1.PreferredSchedulingTerm": "io.k8s.api.core.v1.PreferredSchedulingTerm", + "io.k8s.kubernetes.pkg.apis.batch.v1.JobSpec": "io.k8s.api.batch.v1.JobSpec", + "io.k8s.kubernetes.pkg.api.v1.EventSource": "io.k8s.api.core.v1.EventSource", + "io.k8s.kubernetes.pkg.api.v1.Container": "io.k8s.api.core.v1.Container", + "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.AdmissionHookClientConfig": "io.k8s.api.admissionregistration.v1alpha1.AdmissionHookClientConfig", + "io.k8s.kubernetes.pkg.api.v1.ResourceQuota": "io.k8s.api.core.v1.ResourceQuota", + "io.k8s.kubernetes.pkg.api.v1.SecretList": "io.k8s.api.core.v1.SecretList", + "io.k8s.kubernetes.pkg.api.v1.NodeSystemInfo": "io.k8s.api.core.v1.NodeSystemInfo", + "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.PolicyRule": "io.k8s.api.rbac.v1alpha1.PolicyRule", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetSpec": "io.k8s.api.extensions.v1beta1.ReplicaSetSpec", + "io.k8s.kubernetes.pkg.api.v1.NodeStatus": "io.k8s.api.core.v1.NodeStatus", + "io.k8s.kubernetes.pkg.api.v1.ResourceQuotaList": "io.k8s.api.core.v1.ResourceQuotaList", + "io.k8s.kubernetes.pkg.api.v1.HostPathVolumeSource": "io.k8s.api.core.v1.HostPathVolumeSource", + "io.k8s.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequest": "io.k8s.api.certificates.v1beta1.CertificateSigningRequest", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IngressRule": "io.k8s.api.extensions.v1beta1.IngressRule", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyPeer": "io.k8s.api.extensions.v1beta1.NetworkPolicyPeer", + "io.k8s.kubernetes.pkg.apis.storage.v1.StorageClass": "io.k8s.api.storage.v1.StorageClass", + "io.k8s.kubernetes.pkg.apis.networking.v1.NetworkPolicyPeer": "io.k8s.api.networking.v1.NetworkPolicyPeer", + "io.k8s.kubernetes.pkg.apis.networking.v1.NetworkPolicyIngressRule": "io.k8s.api.networking.v1.NetworkPolicyIngressRule", + "io.k8s.kubernetes.pkg.api.v1.StorageOSPersistentVolumeSource": "io.k8s.api.core.v1.StorageOSPersistentVolumeSource", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyIngressRule": "io.k8s.api.extensions.v1beta1.NetworkPolicyIngressRule", + "io.k8s.kubernetes.pkg.api.v1.PodAffinity": "io.k8s.api.core.v1.PodAffinity", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.RollbackConfig": "io.k8s.api.extensions.v1beta1.RollbackConfig", + "io.k8s.kubernetes.pkg.api.v1.PodList": "io.k8s.api.core.v1.PodList", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ScaleStatus": "io.k8s.api.extensions.v1beta1.ScaleStatus", + "io.k8s.kubernetes.pkg.api.v1.ComponentCondition": "io.k8s.api.core.v1.ComponentCondition", + "io.k8s.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequestList": "io.k8s.api.certificates.v1beta1.CertificateSigningRequestList", + "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleBindingList": "io.k8s.api.rbac.v1alpha1.ClusterRoleBindingList", + "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerCondition": "io.k8s.api.autoscaling.v2alpha1.HorizontalPodAutoscalerCondition", + "io.k8s.kubernetes.pkg.api.v1.ServiceList": "io.k8s.api.core.v1.ServiceList", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.PodSecurityPolicy": "io.k8s.api.extensions.v1beta1.PodSecurityPolicy", + "io.k8s.kubernetes.pkg.apis.batch.v1.JobCondition": "io.k8s.api.batch.v1.JobCondition", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentStatus": "io.k8s.api.apps.v1beta1.DeploymentStatus", + "io.k8s.kubernetes.pkg.api.v1.Volume": "io.k8s.api.core.v1.Volume", + "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.RoleBindingList": "io.k8s.api.rbac.v1alpha1.RoleBindingList", + "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.Rule": "io.k8s.api.admissionregistration.v1alpha1.Rule", + "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.InitializerConfigurationList": "io.k8s.api.admissionregistration.v1alpha1.InitializerConfigurationList", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicy": "io.k8s.api.extensions.v1beta1.NetworkPolicy", + "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleList": "io.k8s.api.rbac.v1alpha1.ClusterRoleList", + "io.k8s.kubernetes.pkg.api.v1.ObjectFieldSelector": "io.k8s.api.core.v1.ObjectFieldSelector", + "io.k8s.kubernetes.pkg.api.v1.EventList": "io.k8s.api.core.v1.EventList", + "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.MetricStatus": "io.k8s.api.autoscaling.v2alpha1.MetricStatus", + "io.k8s.kubernetes.pkg.apis.networking.v1.NetworkPolicyPort": "io.k8s.api.networking.v1.NetworkPolicyPort", + "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.RoleList": "io.k8s.api.rbac.v1beta1.RoleList", + "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.RoleList": "io.k8s.api.rbac.v1alpha1.RoleList", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentStrategy": "io.k8s.api.apps.v1beta1.DeploymentStrategy", + "io.k8s.kubernetes.pkg.apis.autoscaling.v1.CrossVersionObjectReference": "io.k8s.api.autoscaling.v1.CrossVersionObjectReference", + "io.k8s.kubernetes.pkg.api.v1.ConfigMapProjection": "io.k8s.api.core.v1.ConfigMapProjection", + "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.CrossVersionObjectReference": "io.k8s.api.autoscaling.v2alpha1.CrossVersionObjectReference", + "io.k8s.kubernetes.pkg.api.v1.LoadBalancerStatus": "io.k8s.api.core.v1.LoadBalancerStatus", + "io.k8s.kubernetes.pkg.api.v1.ISCSIVolumeSource": "io.k8s.api.core.v1.ISCSIVolumeSource", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.ControllerRevisionList": "io.k8s.api.apps.v1beta1.ControllerRevisionList", + "io.k8s.kubernetes.pkg.api.v1.EndpointSubset": "io.k8s.api.core.v1.EndpointSubset", + "io.k8s.kubernetes.pkg.api.v1.SELinuxOptions": "io.k8s.api.core.v1.SELinuxOptions", + "io.k8s.kubernetes.pkg.api.v1.PersistentVolumeClaimVolumeSource": "io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource", + "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.MetricSpec": "io.k8s.api.autoscaling.v2alpha1.MetricSpec", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.StatefulSetList": "io.k8s.api.apps.v1beta1.StatefulSetList", + "io.k8s.kubernetes.pkg.apis.authorization.v1beta1.ResourceAttributes": "io.k8s.api.authorization.v1beta1.ResourceAttributes", + "io.k8s.kubernetes.pkg.api.v1.Capabilities": "io.k8s.api.core.v1.Capabilities", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.Deployment": "io.k8s.api.extensions.v1beta1.Deployment", + "io.k8s.kubernetes.pkg.api.v1.Binding": "io.k8s.api.core.v1.Binding", + "io.k8s.kubernetes.pkg.api.v1.ReplicationControllerList": "io.k8s.api.core.v1.ReplicationControllerList", + "io.k8s.kubernetes.pkg.apis.authorization.v1.SelfSubjectAccessReview": "io.k8s.api.authorization.v1.SelfSubjectAccessReview", + "io.k8s.kubernetes.pkg.apis.authentication.v1beta1.UserInfo": "io.k8s.api.authentication.v1beta1.UserInfo", + "io.k8s.kubernetes.pkg.api.v1.HostAlias": "io.k8s.api.core.v1.HostAlias", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.StatefulSetUpdateStrategy": "io.k8s.api.apps.v1beta1.StatefulSetUpdateStrategy", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IngressSpec": "io.k8s.api.extensions.v1beta1.IngressSpec", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentCondition": "io.k8s.api.extensions.v1beta1.DeploymentCondition", + "io.k8s.kubernetes.pkg.api.v1.GCEPersistentDiskVolumeSource": "io.k8s.api.core.v1.GCEPersistentDiskVolumeSource", + "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.Webhook": "io.k8s.api.admissionregistration.v1alpha1.Webhook", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.Scale": "io.k8s.api.extensions.v1beta1.Scale", + "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerStatus": "io.k8s.api.autoscaling.v2alpha1.HorizontalPodAutoscalerStatus", + "io.k8s.kubernetes.pkg.api.v1.FlexVolumeSource": "io.k8s.api.core.v1.FlexVolumeSource", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.RollingUpdateDeployment": "io.k8s.api.extensions.v1beta1.RollingUpdateDeployment", + "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.ObjectMetricStatus": "io.k8s.api.autoscaling.v2alpha1.ObjectMetricStatus", + "io.k8s.kubernetes.pkg.api.v1.Event": "io.k8s.api.core.v1.Event", + "io.k8s.kubernetes.pkg.api.v1.ResourceQuotaSpec": "io.k8s.api.core.v1.ResourceQuotaSpec", + "io.k8s.kubernetes.pkg.api.v1.Handler": "io.k8s.api.core.v1.Handler", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IngressBackend": "io.k8s.api.extensions.v1beta1.IngressBackend", + "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.Role": "io.k8s.api.rbac.v1alpha1.Role", + "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.ObjectMetricSource": "io.k8s.api.autoscaling.v2alpha1.ObjectMetricSource", + "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.ResourceMetricStatus": "io.k8s.api.autoscaling.v2alpha1.ResourceMetricStatus", + "io.k8s.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerSpec": "io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerSpec", + "io.k8s.kubernetes.pkg.api.v1.Lifecycle": "io.k8s.api.core.v1.Lifecycle", + "io.k8s.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequestStatus": "io.k8s.api.certificates.v1beta1.CertificateSigningRequestStatus", + "io.k8s.kubernetes.pkg.api.v1.ContainerStateRunning": "io.k8s.api.core.v1.ContainerStateRunning", + "io.k8s.kubernetes.pkg.api.v1.ServiceAccountList": "io.k8s.api.core.v1.ServiceAccountList", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.HostPortRange": "io.k8s.api.extensions.v1beta1.HostPortRange", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.ControllerRevision": "io.k8s.api.apps.v1beta1.ControllerRevision", + "io.k8s.kubernetes.pkg.api.v1.ReplicationControllerSpec": "io.k8s.api.core.v1.ReplicationControllerSpec", + "io.k8s.kubernetes.pkg.api.v1.ContainerStateTerminated": "io.k8s.api.core.v1.ContainerStateTerminated", + "io.k8s.kubernetes.pkg.api.v1.ReplicationControllerStatus": "io.k8s.api.core.v1.ReplicationControllerStatus", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetList": "io.k8s.api.extensions.v1beta1.DaemonSetList", + "io.k8s.kubernetes.pkg.apis.authorization.v1.SelfSubjectAccessReviewSpec": "io.k8s.api.authorization.v1.SelfSubjectAccessReviewSpec", + "io.k8s.kubernetes.pkg.api.v1.ComponentStatusList": "io.k8s.api.core.v1.ComponentStatusList", + "io.k8s.kubernetes.pkg.api.v1.ContainerStateWaiting": "io.k8s.api.core.v1.ContainerStateWaiting", + "io.k8s.kubernetes.pkg.api.v1.VolumeMount": "io.k8s.api.core.v1.VolumeMount", + "io.k8s.kubernetes.pkg.api.v1.Secret": "io.k8s.api.core.v1.Secret", + "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.ClusterRoleList": "io.k8s.api.rbac.v1beta1.ClusterRoleList", + "io.k8s.kubernetes.pkg.api.v1.ConfigMapList": "io.k8s.api.core.v1.ConfigMapList", + "io.k8s.kubernetes.pkg.apis.storage.v1beta1.StorageClassList": "io.k8s.api.storage.v1beta1.StorageClassList", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.HTTPIngressPath": "io.k8s.api.extensions.v1beta1.HTTPIngressPath", + "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRole": "io.k8s.api.rbac.v1alpha1.ClusterRole", + "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.ResourceMetricSource": "io.k8s.api.autoscaling.v2alpha1.ResourceMetricSource", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentRollback": "io.k8s.api.extensions.v1beta1.DeploymentRollback", + "io.k8s.kubernetes.pkg.api.v1.PersistentVolumeClaimSpec": "io.k8s.api.core.v1.PersistentVolumeClaimSpec", + "io.k8s.kubernetes.pkg.api.v1.ReplicationController": "io.k8s.api.core.v1.ReplicationController", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.StatefulSetSpec": "io.k8s.api.apps.v1beta1.StatefulSetSpec", + "io.k8s.kubernetes.pkg.api.v1.SecurityContext": "io.k8s.api.core.v1.SecurityContext", + "io.k8s.kubernetes.pkg.apis.networking.v1.NetworkPolicySpec": "io.k8s.api.networking.v1.NetworkPolicySpec", + "io.k8s.kubernetes.pkg.api.v1.LocalObjectReference": "io.k8s.api.core.v1.LocalObjectReference", + "io.k8s.kubernetes.pkg.api.v1.RBDVolumeSource": "io.k8s.api.core.v1.RBDVolumeSource", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicySpec": "io.k8s.api.extensions.v1beta1.NetworkPolicySpec", + "io.k8s.kubernetes.pkg.api.v1.KeyToPath": "io.k8s.api.core.v1.KeyToPath", + "io.k8s.kubernetes.pkg.api.v1.WeightedPodAffinityTerm": "io.k8s.api.core.v1.WeightedPodAffinityTerm", + "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.PodsMetricStatus": "io.k8s.api.autoscaling.v2alpha1.PodsMetricStatus", + "io.k8s.kubernetes.pkg.api.v1.NodeAddress": "io.k8s.api.core.v1.NodeAddress", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.Ingress": "io.k8s.api.extensions.v1beta1.Ingress", + "io.k8s.kubernetes.pkg.apis.policy.v1beta1.PodDisruptionBudget": "io.k8s.api.policy.v1beta1.PodDisruptionBudget", + "io.k8s.kubernetes.pkg.api.v1.ServicePort": "io.k8s.api.core.v1.ServicePort", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IDRange": "io.k8s.api.extensions.v1beta1.IDRange", + "io.k8s.kubernetes.pkg.api.v1.SecretEnvSource": "io.k8s.api.core.v1.SecretEnvSource", + "io.k8s.kubernetes.pkg.api.v1.NodeSelector": "io.k8s.api.core.v1.NodeSelector", + "io.k8s.kubernetes.pkg.api.v1.PersistentVolumeClaimStatus": "io.k8s.api.core.v1.PersistentVolumeClaimStatus", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentSpec": "io.k8s.api.apps.v1beta1.DeploymentSpec", + "io.k8s.kubernetes.pkg.apis.authorization.v1.NonResourceAttributes": "io.k8s.api.authorization.v1.NonResourceAttributes", + "io.k8s.kubernetes.pkg.apis.autoscaling.v1.ScaleStatus": "io.k8s.api.autoscaling.v1.ScaleStatus", + "io.k8s.kubernetes.pkg.api.v1.PodCondition": "io.k8s.api.core.v1.PodCondition", + "io.k8s.kubernetes.pkg.api.v1.PodTemplateSpec": "io.k8s.api.core.v1.PodTemplateSpec", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.StatefulSet": "io.k8s.api.apps.v1beta1.StatefulSet", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyPort": "io.k8s.api.extensions.v1beta1.NetworkPolicyPort", + "io.k8s.kubernetes.pkg.apis.authentication.v1beta1.TokenReview": "io.k8s.api.authentication.v1beta1.TokenReview", + "io.k8s.kubernetes.pkg.api.v1.LimitRangeSpec": "io.k8s.api.core.v1.LimitRangeSpec", + "io.k8s.kubernetes.pkg.api.v1.FlockerVolumeSource": "io.k8s.api.core.v1.FlockerVolumeSource", + "io.k8s.kubernetes.pkg.apis.policy.v1beta1.Eviction": "io.k8s.api.policy.v1beta1.Eviction", + "io.k8s.kubernetes.pkg.api.v1.PersistentVolumeClaimList": "io.k8s.api.core.v1.PersistentVolumeClaimList", + "io.k8s.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequestCondition": "io.k8s.api.certificates.v1beta1.CertificateSigningRequestCondition", + "io.k8s.kubernetes.pkg.api.v1.DownwardAPIVolumeFile": "io.k8s.api.core.v1.DownwardAPIVolumeFile", + "io.k8s.kubernetes.pkg.apis.authorization.v1beta1.LocalSubjectAccessReview": "io.k8s.api.authorization.v1beta1.LocalSubjectAccessReview", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.ScaleStatus": "io.k8s.api.apps.v1beta1.ScaleStatus", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.HTTPIngressRuleValue": "io.k8s.api.extensions.v1beta1.HTTPIngressRuleValue", + "io.k8s.kubernetes.pkg.apis.batch.v1.Job": "io.k8s.api.batch.v1.Job", + "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.ValidatingWebhookConfiguration": "io.k8s.api.admissionregistration.v1alpha1.ValidatingWebhookConfiguration", + "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.RoleBinding": "io.k8s.api.rbac.v1beta1.RoleBinding", + "io.k8s.kubernetes.pkg.api.v1.FCVolumeSource": "io.k8s.api.core.v1.FCVolumeSource", + "io.k8s.kubernetes.pkg.api.v1.EndpointAddress": "io.k8s.api.core.v1.EndpointAddress", + "io.k8s.kubernetes.pkg.api.v1.ContainerPort": "io.k8s.api.core.v1.ContainerPort", + "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.ClusterRoleBinding": "io.k8s.api.rbac.v1beta1.ClusterRoleBinding", + "io.k8s.kubernetes.pkg.api.v1.GlusterfsVolumeSource": "io.k8s.api.core.v1.GlusterfsVolumeSource", + "io.k8s.kubernetes.pkg.api.v1.ResourceRequirements": "io.k8s.api.core.v1.ResourceRequirements", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.RollingUpdateDeployment": "io.k8s.api.apps.v1beta1.RollingUpdateDeployment", + "io.k8s.kubernetes.pkg.api.v1.NamespaceStatus": "io.k8s.api.core.v1.NamespaceStatus", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.RunAsUserStrategyOptions": "io.k8s.api.extensions.v1beta1.RunAsUserStrategyOptions", + "io.k8s.kubernetes.pkg.api.v1.Namespace": "io.k8s.api.core.v1.Namespace", + "io.k8s.kubernetes.pkg.apis.authorization.v1.SubjectAccessReviewSpec": "io.k8s.api.authorization.v1.SubjectAccessReviewSpec", + "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscaler": "io.k8s.api.autoscaling.v2alpha1.HorizontalPodAutoscaler", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetCondition": "io.k8s.api.extensions.v1beta1.ReplicaSetCondition", + "io.k8s.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerStatus": "io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerStatus", + "io.k8s.kubernetes.pkg.apis.authentication.v1.TokenReviewStatus": "io.k8s.api.authentication.v1.TokenReviewStatus", + "io.k8s.kubernetes.pkg.api.v1.PersistentVolume": "io.k8s.api.core.v1.PersistentVolume", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.FSGroupStrategyOptions": "io.k8s.api.extensions.v1beta1.FSGroupStrategyOptions", + "io.k8s.kubernetes.pkg.api.v1.PodSecurityContext": "io.k8s.api.core.v1.PodSecurityContext", + "io.k8s.kubernetes.pkg.api.v1.PodTemplate": "io.k8s.api.core.v1.PodTemplate", + "io.k8s.kubernetes.pkg.apis.authorization.v1.LocalSubjectAccessReview": "io.k8s.api.authorization.v1.LocalSubjectAccessReview", + "io.k8s.kubernetes.pkg.api.v1.StorageOSVolumeSource": "io.k8s.api.core.v1.StorageOSVolumeSource", + "io.k8s.kubernetes.pkg.api.v1.NodeSelectorTerm": "io.k8s.api.core.v1.NodeSelectorTerm", + "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.Role": "io.k8s.api.rbac.v1beta1.Role", + "io.k8s.kubernetes.pkg.api.v1.ContainerStatus": "io.k8s.api.core.v1.ContainerStatus", + "io.k8s.kubernetes.pkg.apis.authorization.v1.SubjectAccessReviewStatus": "io.k8s.api.authorization.v1.SubjectAccessReviewStatus", + "io.k8s.kubernetes.pkg.apis.authentication.v1.TokenReviewSpec": "io.k8s.api.authentication.v1.TokenReviewSpec", + "io.k8s.kubernetes.pkg.api.v1.ConfigMap": "io.k8s.api.core.v1.ConfigMap", + "io.k8s.kubernetes.pkg.api.v1.ServiceStatus": "io.k8s.api.core.v1.ServiceStatus", + "io.k8s.kubernetes.pkg.apis.authorization.v1beta1.SelfSubjectAccessReviewSpec": "io.k8s.api.authorization.v1beta1.SelfSubjectAccessReviewSpec", + "io.k8s.kubernetes.pkg.api.v1.CinderVolumeSource": "io.k8s.api.core.v1.CinderVolumeSource", + "io.k8s.kubernetes.pkg.apis.settings.v1alpha1.PodPresetSpec": "io.k8s.api.settings.v1alpha1.PodPresetSpec", + "io.k8s.kubernetes.pkg.apis.authorization.v1beta1.NonResourceAttributes": "io.k8s.api.authorization.v1beta1.NonResourceAttributes", + "io.k8s.kubernetes.pkg.api.v1.ContainerImage": "io.k8s.api.core.v1.ContainerImage", + "io.k8s.kubernetes.pkg.api.v1.ReplicationControllerCondition": "io.k8s.api.core.v1.ReplicationControllerCondition", + "io.k8s.kubernetes.pkg.api.v1.EmptyDirVolumeSource": "io.k8s.api.core.v1.EmptyDirVolumeSource", + "io.k8s.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerList": "io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerList", + "io.k8s.kubernetes.pkg.apis.batch.v1.JobList": "io.k8s.api.batch.v1.JobList", + "io.k8s.kubernetes.pkg.api.v1.NFSVolumeSource": "io.k8s.api.core.v1.NFSVolumeSource", + "io.k8s.kubernetes.pkg.api.v1.Pod": "io.k8s.api.core.v1.Pod", + "io.k8s.kubernetes.pkg.api.v1.ObjectReference": "io.k8s.api.core.v1.ObjectReference", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.Deployment": "io.k8s.api.apps.v1beta1.Deployment", + "io.k8s.kubernetes.pkg.apis.storage.v1.StorageClassList": "io.k8s.api.storage.v1.StorageClassList", + "io.k8s.kubernetes.pkg.api.v1.AttachedVolume": "io.k8s.api.core.v1.AttachedVolume", + "io.k8s.kubernetes.pkg.api.v1.AWSElasticBlockStoreVolumeSource": "io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource", + "io.k8s.kubernetes.pkg.apis.batch.v2alpha1.CronJobList": "io.k8s.api.batch.v2alpha1.CronJobList", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentSpec": "io.k8s.api.extensions.v1beta1.DeploymentSpec", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.PodSecurityPolicyList": "io.k8s.api.extensions.v1beta1.PodSecurityPolicyList", + "io.k8s.kubernetes.pkg.api.v1.PodAffinityTerm": "io.k8s.api.core.v1.PodAffinityTerm", + "io.k8s.kubernetes.pkg.api.v1.HTTPHeader": "io.k8s.api.core.v1.HTTPHeader", + "io.k8s.kubernetes.pkg.api.v1.ConfigMapKeySelector": "io.k8s.api.core.v1.ConfigMapKeySelector", + "io.k8s.kubernetes.pkg.api.v1.SecretKeySelector": "io.k8s.api.core.v1.SecretKeySelector", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentList": "io.k8s.api.extensions.v1beta1.DeploymentList", + "io.k8s.kubernetes.pkg.apis.authentication.v1.UserInfo": "io.k8s.api.authentication.v1.UserInfo", + "io.k8s.kubernetes.pkg.api.v1.LoadBalancerIngress": "io.k8s.api.core.v1.LoadBalancerIngress", + "io.k8s.kubernetes.pkg.api.v1.DaemonEndpoint": "io.k8s.api.core.v1.DaemonEndpoint", + "io.k8s.kubernetes.pkg.api.v1.NodeSelectorRequirement": "io.k8s.api.core.v1.NodeSelectorRequirement", + "io.k8s.kubernetes.pkg.apis.batch.v2alpha1.CronJobStatus": "io.k8s.api.batch.v2alpha1.CronJobStatus", + "io.k8s.kubernetes.pkg.apis.autoscaling.v1.Scale": "io.k8s.api.autoscaling.v1.Scale", + "io.k8s.kubernetes.pkg.api.v1.ScaleIOVolumeSource": "io.k8s.api.core.v1.ScaleIOVolumeSource", + "io.k8s.kubernetes.pkg.api.v1.PodAntiAffinity": "io.k8s.api.core.v1.PodAntiAffinity", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.PodSecurityPolicySpec": "io.k8s.api.extensions.v1beta1.PodSecurityPolicySpec", + "io.k8s.kubernetes.pkg.apis.settings.v1alpha1.PodPresetList": "io.k8s.api.settings.v1alpha1.PodPresetList", + "io.k8s.kubernetes.pkg.api.v1.NodeAffinity": "io.k8s.api.core.v1.NodeAffinity", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentCondition": "io.k8s.api.apps.v1beta1.DeploymentCondition", + "io.k8s.kubernetes.pkg.api.v1.NodeSpec": "io.k8s.api.core.v1.NodeSpec", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.StatefulSetStatus": "io.k8s.api.apps.v1beta1.StatefulSetStatus", + "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.RuleWithOperations": "io.k8s.api.admissionregistration.v1alpha1.RuleWithOperations", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IngressStatus": "io.k8s.api.extensions.v1beta1.IngressStatus", + "io.k8s.kubernetes.pkg.api.v1.LimitRangeList": "io.k8s.api.core.v1.LimitRangeList", + "io.k8s.kubernetes.pkg.api.v1.AzureDiskVolumeSource": "io.k8s.api.core.v1.AzureDiskVolumeSource", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetStatus": "io.k8s.api.extensions.v1beta1.ReplicaSetStatus", + "io.k8s.kubernetes.pkg.api.v1.ComponentStatus": "io.k8s.api.core.v1.ComponentStatus", + "io.k8s.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscaler": "io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler", + "io.k8s.kubernetes.pkg.apis.networking.v1.NetworkPolicy": "io.k8s.api.networking.v1.NetworkPolicy", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.RollbackConfig": "io.k8s.api.apps.v1beta1.RollbackConfig", + "io.k8s.kubernetes.pkg.api.v1.NodeCondition": "io.k8s.api.core.v1.NodeCondition", + "io.k8s.kubernetes.pkg.api.v1.DownwardAPIProjection": "io.k8s.api.core.v1.DownwardAPIProjection", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.SELinuxStrategyOptions": "io.k8s.api.extensions.v1beta1.SELinuxStrategyOptions", + "io.k8s.kubernetes.pkg.api.v1.NamespaceSpec": "io.k8s.api.core.v1.NamespaceSpec", + "io.k8s.kubernetes.pkg.apis.certificates.v1beta1.CertificateSigningRequestSpec": "io.k8s.api.certificates.v1beta1.CertificateSigningRequestSpec", + "io.k8s.kubernetes.pkg.api.v1.ServiceSpec": "io.k8s.api.core.v1.ServiceSpec", + "io.k8s.kubernetes.pkg.apis.authorization.v1.SubjectAccessReview": "io.k8s.api.authorization.v1.SubjectAccessReview", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentList": "io.k8s.api.apps.v1beta1.DeploymentList", + "io.k8s.kubernetes.pkg.api.v1.Toleration": "io.k8s.api.core.v1.Toleration", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyList": "io.k8s.api.extensions.v1beta1.NetworkPolicyList", + "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.PodsMetricSource": "io.k8s.api.autoscaling.v2alpha1.PodsMetricSource", + "io.k8s.kubernetes.pkg.api.v1.EnvFromSource": "io.k8s.api.core.v1.EnvFromSource", + "io.k8s.kubernetes.pkg.apis.autoscaling.v1.ScaleSpec": "io.k8s.api.autoscaling.v1.ScaleSpec", + "io.k8s.kubernetes.pkg.api.v1.PodTemplateList": "io.k8s.api.core.v1.PodTemplateList", + "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerSpec": "io.k8s.api.autoscaling.v2alpha1.HorizontalPodAutoscalerSpec", + "io.k8s.kubernetes.pkg.api.v1.SecretProjection": "io.k8s.api.core.v1.SecretProjection", + "io.k8s.kubernetes.pkg.api.v1.ResourceFieldSelector": "io.k8s.api.core.v1.ResourceFieldSelector", + "io.k8s.kubernetes.pkg.api.v1.PersistentVolumeSpec": "io.k8s.api.core.v1.PersistentVolumeSpec", + "io.k8s.kubernetes.pkg.api.v1.ConfigMapVolumeSource": "io.k8s.api.core.v1.ConfigMapVolumeSource", + "io.k8s.kubernetes.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerList": "io.k8s.api.autoscaling.v2alpha1.HorizontalPodAutoscalerList", + "io.k8s.kubernetes.pkg.apis.authentication.v1beta1.TokenReviewStatus": "io.k8s.api.authentication.v1beta1.TokenReviewStatus", + "io.k8s.kubernetes.pkg.apis.networking.v1.NetworkPolicyList": "io.k8s.api.networking.v1.NetworkPolicyList", + "io.k8s.kubernetes.pkg.api.v1.Endpoints": "io.k8s.api.core.v1.Endpoints", + "io.k8s.kubernetes.pkg.api.v1.LimitRangeItem": "io.k8s.api.core.v1.LimitRangeItem", + "io.k8s.kubernetes.pkg.api.v1.ServiceAccount": "io.k8s.api.core.v1.ServiceAccount", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ScaleSpec": "io.k8s.api.extensions.v1beta1.ScaleSpec", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.IngressTLS": "io.k8s.api.extensions.v1beta1.IngressTLS", + "io.k8s.kubernetes.pkg.apis.batch.v2alpha1.CronJob": "io.k8s.api.batch.v2alpha1.CronJob", + "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.Subject": "io.k8s.api.rbac.v1alpha1.Subject", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetStatus": "io.k8s.api.extensions.v1beta1.DaemonSetStatus", + "io.k8s.kubernetes.pkg.apis.policy.v1beta1.PodDisruptionBudgetList": "io.k8s.api.policy.v1beta1.PodDisruptionBudgetList", + "io.k8s.kubernetes.pkg.api.v1.VsphereVirtualDiskVolumeSource": "io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource", + "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.RoleRef": "io.k8s.api.rbac.v1alpha1.RoleRef", + "io.k8s.kubernetes.pkg.api.v1.PortworxVolumeSource": "io.k8s.api.core.v1.PortworxVolumeSource", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetList": "io.k8s.api.extensions.v1beta1.ReplicaSetList", + "io.k8s.kubernetes.pkg.api.v1.VolumeProjection": "io.k8s.api.core.v1.VolumeProjection", + "io.k8s.kubernetes.pkg.apis.storage.v1beta1.StorageClass": "io.k8s.api.storage.v1beta1.StorageClass", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSet": "io.k8s.api.extensions.v1beta1.ReplicaSet", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.DeploymentRollback": "io.k8s.api.apps.v1beta1.DeploymentRollback", + "io.k8s.kubernetes.pkg.apis.rbac.v1alpha1.RoleBinding": "io.k8s.api.rbac.v1alpha1.RoleBinding", + "io.k8s.kubernetes.pkg.api.v1.AzureFileVolumeSource": "io.k8s.api.core.v1.AzureFileVolumeSource", + "io.k8s.kubernetes.pkg.apis.policy.v1beta1.PodDisruptionBudgetStatus": "io.k8s.api.policy.v1beta1.PodDisruptionBudgetStatus", + "io.k8s.kubernetes.pkg.apis.authentication.v1beta1.TokenReviewSpec": "io.k8s.api.authentication.v1beta1.TokenReviewSpec", + "io.k8s.kubernetes.pkg.api.v1.EndpointsList": "io.k8s.api.core.v1.EndpointsList", + "io.k8s.kubernetes.pkg.api.v1.ConfigMapEnvSource": "io.k8s.api.core.v1.ConfigMapEnvSource", + "io.k8s.kubernetes.pkg.apis.batch.v2alpha1.JobTemplateSpec": "io.k8s.api.batch.v2alpha1.JobTemplateSpec", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetUpdateStrategy": "io.k8s.api.extensions.v1beta1.DaemonSetUpdateStrategy", + "io.k8s.kubernetes.pkg.apis.authorization.v1beta1.SubjectAccessReviewSpec": "io.k8s.api.authorization.v1beta1.SubjectAccessReviewSpec", + "io.k8s.kubernetes.pkg.api.v1.LocalVolumeSource": "io.k8s.api.core.v1.LocalVolumeSource", + "io.k8s.kubernetes.pkg.api.v1.ContainerState": "io.k8s.api.core.v1.ContainerState", + "io.k8s.kubernetes.pkg.api.v1.Service": "io.k8s.api.core.v1.Service", + "io.k8s.kubernetes.pkg.api.v1.ExecAction": "io.k8s.api.core.v1.ExecAction", + "io.k8s.kubernetes.pkg.api.v1.Taint": "io.k8s.api.core.v1.Taint", + "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.Subject": "io.k8s.api.rbac.v1beta1.Subject", + "io.k8s.kubernetes.pkg.apis.authorization.v1beta1.SubjectAccessReviewStatus": "io.k8s.api.authorization.v1beta1.SubjectAccessReviewStatus", + "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.ClusterRoleBindingList": "io.k8s.api.rbac.v1beta1.ClusterRoleBindingList", + "io.k8s.kubernetes.pkg.api.v1.DownwardAPIVolumeSource": "io.k8s.api.core.v1.DownwardAPIVolumeSource", + "io.k8s.kubernetes.pkg.apis.batch.v1.JobStatus": "io.k8s.api.batch.v1.JobStatus", + "io.k8s.kubernetes.pkg.api.v1.ResourceQuotaStatus": "io.k8s.api.core.v1.ResourceQuotaStatus", + "io.k8s.kubernetes.pkg.api.v1.PersistentVolumeStatus": "io.k8s.api.core.v1.PersistentVolumeStatus", + "io.k8s.kubernetes.pkg.api.v1.PersistentVolumeClaim": "io.k8s.api.core.v1.PersistentVolumeClaim", + "io.k8s.kubernetes.pkg.api.v1.NodeDaemonEndpoints": "io.k8s.api.core.v1.NodeDaemonEndpoints", + "io.k8s.kubernetes.pkg.api.v1.EnvVar": "io.k8s.api.core.v1.EnvVar", + "io.k8s.kubernetes.pkg.api.v1.SecretVolumeSource": "io.k8s.api.core.v1.SecretVolumeSource", + "io.k8s.kubernetes.pkg.api.v1.EnvVarSource": "io.k8s.api.core.v1.EnvVarSource", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.RollingUpdateStatefulSetStrategy": "io.k8s.api.apps.v1beta1.RollingUpdateStatefulSetStrategy", + "io.k8s.kubernetes.pkg.apis.rbac.v1beta1.ClusterRole": "io.k8s.api.rbac.v1beta1.ClusterRole", + "io.k8s.kubernetes.pkg.apis.admissionregistration.v1alpha1.Initializer": "io.k8s.api.admissionregistration.v1alpha1.Initializer", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.DeploymentStrategy": "io.k8s.api.extensions.v1beta1.DeploymentStrategy", + "io.k8s.kubernetes.pkg.apis.apps.v1beta1.ScaleSpec": "io.k8s.api.apps.v1beta1.ScaleSpec", + "io.k8s.kubernetes.pkg.apis.settings.v1alpha1.PodPreset": "io.k8s.api.settings.v1alpha1.PodPreset", + "io.k8s.kubernetes.pkg.api.v1.Probe": "io.k8s.api.core.v1.Probe", + "io.k8s.kubernetes.pkg.api.v1.NamespaceList": "io.k8s.api.core.v1.NamespaceList", + "io.k8s.kubernetes.pkg.api.v1.QuobyteVolumeSource": "io.k8s.api.core.v1.QuobyteVolumeSource", + "io.k8s.kubernetes.pkg.api.v1.NodeList": "io.k8s.api.core.v1.NodeList", + "io.k8s.kubernetes.pkg.apis.extensions.v1beta1.RollingUpdateDaemonSet": "io.k8s.api.extensions.v1beta1.RollingUpdateDaemonSet", + "io.k8s.kubernetes.pkg.api.v1.LimitRange": "io.k8s.api.core.v1.LimitRange", } for k, v := range compatibilityMap { diff --git a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/BUILD b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/BUILD index d735068257c6..a533b2e29fc1 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/BUILD +++ b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/BUILD @@ -20,11 +20,12 @@ go_library( "import_known_versions.go", "plugins.go", "policy.go", + "rbac.go", ], + importpath = "k8s.io/kubernetes/cmd/kube-controller-manager/app", deps = [ "//cmd/kube-controller-manager/app/options:go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/apps/install:go_default_library", "//pkg/apis/authentication/install:go_default_library", "//pkg/apis/authorization/install:go_default_library", @@ -32,6 +33,8 @@ go_library( "//pkg/apis/batch/install:go_default_library", "//pkg/apis/certificates/install:go_default_library", "//pkg/apis/componentconfig:go_default_library", + "//pkg/apis/core/install:go_default_library", + "//pkg/apis/events/install:go_default_library", "//pkg/apis/extensions/install:go_default_library", "//pkg/apis/policy/install:go_default_library", "//pkg/apis/rbac/install:go_default_library", @@ -40,16 +43,12 @@ go_library( "//pkg/apis/storage/install:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers:go_default_library", - "//pkg/cloudprovider/providers/aws:go_default_library", - "//pkg/cloudprovider/providers/azure:go_default_library", - "//pkg/cloudprovider/providers/gce:go_default_library", - "//pkg/cloudprovider/providers/openstack:go_default_library", - "//pkg/cloudprovider/providers/photon:go_default_library", - "//pkg/cloudprovider/providers/vsphere:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/bootstrap:go_default_library", "//pkg/controller/certificates/approver:go_default_library", + "//pkg/controller/certificates/cleaner:go_default_library", "//pkg/controller/certificates/signer:go_default_library", + "//pkg/controller/clusterroleaggregation:go_default_library", "//pkg/controller/cronjob:go_default_library", "//pkg/controller/daemon:go_default_library", "//pkg/controller/deployment:go_default_library", @@ -74,7 +73,9 @@ go_library( "//pkg/controller/volume/attachdetach:go_default_library", "//pkg/controller/volume/expand:go_default_library", "//pkg/controller/volume/persistentvolume:go_default_library", + "//pkg/controller/volume/pvcprotection:go_default_library", "//pkg/features:go_default_library", + "//pkg/quota/generic:go_default_library", "//pkg/quota/install:go_default_library", "//pkg/serviceaccount:go_default_library", "//pkg/util/configz:go_default_library", @@ -85,6 +86,7 @@ go_library( "//pkg/volume/azure_dd:go_default_library", "//pkg/volume/azure_file:go_default_library", "//pkg/volume/cinder:go_default_library", + "//pkg/volume/csi:go_default_library", "//pkg/volume/fc:go_default_library", "//pkg/volume/flexvolume:go_default_library", "//pkg/volume/flocker:go_default_library", @@ -121,6 +123,7 @@ go_library( "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/client-go/tools/leaderelection:go_default_library", "//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library", @@ -150,6 +153,10 @@ filegroup( go_test( name = "go_default_test", srcs = ["controller_manager_test.go"], + importpath = "k8s.io/kubernetes/cmd/kube-controller-manager/app", library = ":go_default_library", - deps = ["//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library"], + deps = [ + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], ) diff --git a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/autoscaling.go b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/autoscaling.go index 2a7d83f1db8c..43c1cd1ab009 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/autoscaling.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/autoscaling.go @@ -21,7 +21,12 @@ limitations under the License. package app import ( + apimeta "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + discocache "k8s.io/client-go/discovery/cached" // Saturday Night Fever + "k8s.io/client-go/dynamic" + "k8s.io/client-go/scale" "k8s.io/kubernetes/pkg/controller/podautoscaler" "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" resourceclient "k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1" @@ -63,12 +68,33 @@ func startHPAControllerWithLegacyClient(ctx ControllerContext) (bool, error) { } func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient metrics.MetricsClient) (bool, error) { + hpaClientGoClient := ctx.ClientBuilder.ClientGoClientOrDie("horizontal-pod-autoscaler") hpaClient := ctx.ClientBuilder.ClientOrDie("horizontal-pod-autoscaler") - replicaCalc := podautoscaler.NewReplicaCalculator(metricsClient, hpaClient.Core()) + hpaClientConfig := ctx.ClientBuilder.ConfigOrDie("horizontal-pod-autoscaler") + + // TODO: we need something like deferred discovery REST mapper that calls invalidate + // on cache misses. + cachedDiscovery := discocache.NewMemCacheClient(hpaClientGoClient.Discovery()) + restMapper := discovery.NewDeferredDiscoveryRESTMapper(cachedDiscovery, apimeta.InterfacesForUnstructured) + restMapper.Reset() + // we don't use cached discovery because DiscoveryScaleKindResolver does its own caching, + // so we want to re-fetch every time when we actually ask for it + scaleKindResolver := scale.NewDiscoveryScaleKindResolver(hpaClientGoClient.Discovery()) + scaleClient, err := scale.NewForConfig(hpaClientConfig, restMapper, dynamic.LegacyAPIPathResolverFunc, scaleKindResolver) + if err != nil { + return false, err + } + + replicaCalc := podautoscaler.NewReplicaCalculator( + metricsClient, + hpaClient.CoreV1(), + ctx.Options.HorizontalPodAutoscalerTolerance, + ) go podautoscaler.NewHorizontalController( - ctx.ClientBuilder.ClientGoClientOrDie("horizontal-pod-autoscaler").Core(), - hpaClient.Extensions(), - hpaClient.Autoscaling(), + hpaClientGoClient.CoreV1(), + scaleClient, + hpaClient.AutoscalingV1(), + restMapper, replicaCalc, ctx.InformerFactory.Autoscaling().V1().HorizontalPodAutoscalers(), ctx.Options.HorizontalPodAutoscalerSyncPeriod.Duration, diff --git a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/batch.go b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/batch.go index 0333206144db..b60d7c149e57 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/batch.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/batch.go @@ -21,6 +21,8 @@ limitations under the License. package app import ( + "fmt" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/controller/cronjob" "k8s.io/kubernetes/pkg/controller/job" @@ -42,8 +44,12 @@ func startCronJobController(ctx ControllerContext) (bool, error) { if !ctx.AvailableResources[schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"}] { return false, nil } - go cronjob.NewCronJobController( + cjc, err := cronjob.NewCronJobController( ctx.ClientBuilder.ClientOrDie("cronjob-controller"), - ).Run(ctx.Stop) + ) + if err != nil { + return true, fmt.Errorf("error creating CronJob controller: %v", err) + } + go cjc.Run(ctx.Stop) return true, nil } diff --git a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/bootstrap.go b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/bootstrap.go index 05ff566fd884..046070ecb277 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/bootstrap.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/bootstrap.go @@ -16,20 +16,32 @@ limitations under the License. package app -import "k8s.io/kubernetes/pkg/controller/bootstrap" +import ( + "fmt" + + "k8s.io/kubernetes/pkg/controller/bootstrap" +) func startBootstrapSignerController(ctx ControllerContext) (bool, error) { - go bootstrap.NewBootstrapSigner( + bsc, err := bootstrap.NewBootstrapSigner( ctx.ClientBuilder.ClientGoClientOrDie("bootstrap-signer"), bootstrap.DefaultBootstrapSignerOptions(), - ).Run(ctx.Stop) + ) + if err != nil { + return true, fmt.Errorf("error creating BootstrapSigner controller: %v", err) + } + go bsc.Run(ctx.Stop) return true, nil } func startTokenCleanerController(ctx ControllerContext) (bool, error) { - go bootstrap.NewTokenCleaner( + tcc, err := bootstrap.NewTokenCleaner( ctx.ClientBuilder.ClientGoClientOrDie("token-cleaner"), bootstrap.DefaultTokenCleanerOptions(), - ).Run(ctx.Stop) + ) + if err != nil { + return true, fmt.Errorf("error creating TokenCleaner controller: %v", err) + } + go tcc.Run(ctx.Stop) return true, nil } diff --git a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/certificates.go b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/certificates.go index 96819b4b4406..7ef295821752 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/certificates.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/certificates.go @@ -21,10 +21,15 @@ limitations under the License. package app import ( + "fmt" + "os" + "github.com/golang/glog" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/cmd/kube-controller-manager/app/options" "k8s.io/kubernetes/pkg/controller/certificates/approver" + "k8s.io/kubernetes/pkg/controller/certificates/cleaner" "k8s.io/kubernetes/pkg/controller/certificates/signer" ) @@ -35,6 +40,45 @@ func startCSRSigningController(ctx ControllerContext) (bool, error) { if ctx.Options.ClusterSigningCertFile == "" || ctx.Options.ClusterSigningKeyFile == "" { return false, nil } + + // Deprecation warning for old defaults. + // + // * If the signing cert and key are the default paths but the files + // exist, warn that the paths need to be specified explicitly in a + // later release and the defaults will be removed. We don't expect this + // to be the case. + // + // * If the signing cert and key are default paths but the files don't exist, + // bail out of startController without logging. + var keyFileExists, keyUsesDefault, certFileExists, certUsesDefault bool + + _, err := os.Stat(ctx.Options.ClusterSigningCertFile) + certFileExists = !os.IsNotExist(err) + + certUsesDefault = (ctx.Options.ClusterSigningCertFile == options.DefaultClusterSigningCertFile) + + _, err = os.Stat(ctx.Options.ClusterSigningKeyFile) + keyFileExists = !os.IsNotExist(err) + + keyUsesDefault = (ctx.Options.ClusterSigningKeyFile == options.DefaultClusterSigningKeyFile) + + switch { + case (keyFileExists && keyUsesDefault) || (certFileExists && certUsesDefault): + glog.Warningf("You might be using flag defaulting for --cluster-signing-cert-file and" + + " --cluster-signing-key-file. These defaults are deprecated and will be removed" + + " in a subsequent release. Please pass these options explicitly.") + case (!keyFileExists && keyUsesDefault) && (!certFileExists && certUsesDefault): + // This is what we expect right now if people aren't + // setting up the signing controller. This isn't + // actually a problem since the signer is not a + // required controller. + return false, nil + default: + // Note that '!filesExist && !usesDefaults' is obviously + // operator error. We don't handle this case here and instead + // allow it to be handled by NewCSR... below. + } + c := ctx.ClientBuilder.ClientOrDie("certificate-controller") signer, err := signer.NewCSRSigningController( @@ -45,8 +89,7 @@ func startCSRSigningController(ctx ControllerContext) (bool, error) { ctx.Options.ClusterSigningDuration.Duration, ) if err != nil { - glog.Errorf("Failed to start certificate controller: %v", err) - return false, nil + return false, fmt.Errorf("failed to start certificate controller: %v", err) } go signer.Run(1, ctx.Stop) @@ -57,19 +100,21 @@ func startCSRApprovingController(ctx ControllerContext) (bool, error) { if !ctx.AvailableResources[schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"}] { return false, nil } - c := ctx.ClientBuilder.ClientOrDie("certificate-controller") - approver, err := approver.NewCSRApprovingController( - c, + approver := approver.NewCSRApprovingController( + ctx.ClientBuilder.ClientOrDie("certificate-controller"), ctx.InformerFactory.Certificates().V1beta1().CertificateSigningRequests(), ) - if err != nil { - // TODO this is failing consistently in test-cmd and local-up-cluster.sh. Fix them and make it consistent with all others which - // cause a crash loop - glog.Errorf("Failed to start certificate controller: %v", err) - return false, nil - } go approver.Run(1, ctx.Stop) return true, nil } + +func startCSRCleanerController(ctx ControllerContext) (bool, error) { + cleaner := cleaner.NewCSRCleanerController( + ctx.ClientBuilder.ClientOrDie("certificate-controller").CertificatesV1beta1().CertificateSigningRequests(), + ctx.InformerFactory.Certificates().V1beta1().CertificateSigningRequests(), + ) + go cleaner.Run(1, ctx.Stop) + return true, nil +} diff --git a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/controllermanager.go b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/controllermanager.go index b336c3123235..d08e9658a8df 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/controllermanager.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/controllermanager.go @@ -41,7 +41,6 @@ import ( "k8s.io/api/core/v1" "k8s.io/client-go/discovery" - "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -53,7 +52,7 @@ import ( "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/kubernetes/cmd/kube-controller-manager/app/options" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller" serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount" @@ -132,7 +131,7 @@ func Run(s *options.CMServer) error { } var clientBuilder controller.ControllerClientBuilder if s.UseServiceAccountCredentials { - if len(s.ServiceAccountKeyFile) > 0 { + if len(s.ServiceAccountKeyFile) == 0 { // It's possible another controller process is creating the tokens for us. // If one isn't, we'll timeout and exit when our client builder is unable to create the tokens. glog.Warningf("--use-service-account-credentials was specified without providing a --service-account-private-key-file") @@ -225,7 +224,7 @@ func createRecorder(kubeClient *clientset.Clientset) record.EventRecorder { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) - return eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "controller-manager"}) + return eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "controller-manager"}) } func createClients(s *options.CMServer) (*clientset.Clientset, *clientset.Clientset, *restclient.Config, error) { @@ -242,7 +241,7 @@ func createClients(s *options.CMServer) (*clientset.Clientset, *clientset.Client if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } - leaderElectionClient := kubernetes.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election")) + leaderElectionClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election")) return kubeClient, leaderElectionClient, kubeconfig, nil } @@ -349,6 +348,7 @@ func NewControllerInitializers() map[string]InitFunc { controllers["cronjob"] = startCronJobController controllers["csrsigning"] = startCSRSigningController controllers["csrapproving"] = startCSRApprovingController + controllers["csrcleaner"] = startCSRCleanerController controllers["ttl"] = startTTLController controllers["bootstrapsigner"] = startBootstrapSignerController controllers["tokencleaner"] = startTokenCleanerController @@ -358,6 +358,8 @@ func NewControllerInitializers() map[string]InitFunc { controllers["persistentvolume-binder"] = startPersistentVolumeBinderController controllers["attachdetach"] = startAttachDetachController controllers["persistentvolume-expander"] = startVolumeExpandController + controllers["clusterrole-aggregation"] = startClusterRoleAggregrationController + controllers["pvc-protection"] = startPVCProtectionController return controllers } @@ -441,6 +443,10 @@ func CreateControllerContext(s *options.CMServer, rootClientBuilder, clientBuild } } + if informerUserCloud, ok := cloud.(cloudprovider.InformerUser); ok { + informerUserCloud.SetInformers(sharedInformers) + } + ctx := ControllerContext{ ClientBuilder: clientBuilder, InformerFactory: sharedInformers, @@ -525,7 +531,7 @@ func (c serviceAccountTokenControllerStarter) startServiceAccountTokenController rootCA = c.rootClientBuilder.ConfigOrDie("tokens-controller").CAData } - controller := serviceaccountcontroller.NewTokensController( + controller, err := serviceaccountcontroller.NewTokensController( ctx.InformerFactory.Core().V1().ServiceAccounts(), ctx.InformerFactory.Core().V1().Secrets(), c.rootClientBuilder.ClientOrDie("tokens-controller"), @@ -534,6 +540,9 @@ func (c serviceAccountTokenControllerStarter) startServiceAccountTokenController RootCA: rootCA, }, ) + if err != nil { + return true, fmt.Errorf("error creating Tokens controller: %v", err) + } go controller.Run(int(ctx.Options.ConcurrentSATokenSyncs), ctx.Stop) // start the first set of informers now so that other controllers can start diff --git a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/core.go b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/core.go index 154438743603..819d18e0f013 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/core.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/core.go @@ -36,7 +36,7 @@ import ( cacheddiscovery "k8s.io/client-go/discovery/cached" "k8s.io/client-go/dynamic" clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/controller" endpointcontroller "k8s.io/kubernetes/pkg/controller/endpoint" "k8s.io/kubernetes/pkg/controller/garbagecollector" @@ -53,7 +53,9 @@ import ( "k8s.io/kubernetes/pkg/controller/volume/attachdetach" "k8s.io/kubernetes/pkg/controller/volume/expand" persistentvolumecontroller "k8s.io/kubernetes/pkg/controller/volume/persistentvolume" + "k8s.io/kubernetes/pkg/controller/volume/pvcprotection" "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/quota/generic" quotainstall "k8s.io/kubernetes/pkg/quota/install" "k8s.io/kubernetes/pkg/util/metrics" ) @@ -67,6 +69,7 @@ func startServiceController(ctx ControllerContext) (bool, error) { ctx.Options.ClusterName, ) if err != nil { + // This error shouldn't fail. It lives like this as a legacy. glog.Errorf("Failed to start service controller: %v", err) return false, nil } @@ -75,20 +78,22 @@ func startServiceController(ctx ControllerContext) (bool, error) { } func startNodeController(ctx ControllerContext) (bool, error) { - var clusterCIDR *net.IPNet - var err error - if len(strings.TrimSpace(ctx.Options.ClusterCIDR)) != 0 { - _, clusterCIDR, err = net.ParseCIDR(ctx.Options.ClusterCIDR) - if err != nil { - glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.Options.ClusterCIDR, err) + var clusterCIDR *net.IPNet = nil + var serviceCIDR *net.IPNet = nil + if ctx.Options.AllocateNodeCIDRs { + var err error + if len(strings.TrimSpace(ctx.Options.ClusterCIDR)) != 0 { + _, clusterCIDR, err = net.ParseCIDR(ctx.Options.ClusterCIDR) + if err != nil { + glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.Options.ClusterCIDR, err) + } } - } - var serviceCIDR *net.IPNet - if len(strings.TrimSpace(ctx.Options.ServiceCIDR)) != 0 { - _, serviceCIDR, err = net.ParseCIDR(ctx.Options.ServiceCIDR) - if err != nil { - glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.Options.ServiceCIDR, err) + if len(strings.TrimSpace(ctx.Options.ServiceCIDR)) != 0 { + _, serviceCIDR, err = net.ParseCIDR(ctx.Options.ServiceCIDR) + if err != nil { + glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.Options.ServiceCIDR, err) + } } } @@ -123,10 +128,6 @@ func startNodeController(ctx ControllerContext) (bool, error) { } func startRouteController(ctx ControllerContext) (bool, error) { - _, clusterCIDR, err := net.ParseCIDR(ctx.Options.ClusterCIDR) - if err != nil { - glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.Options.ClusterCIDR, err) - } if !ctx.Options.AllocateNodeCIDRs || !ctx.Options.ConfigureCloudRoutes { glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", ctx.Options.AllocateNodeCIDRs, ctx.Options.ConfigureCloudRoutes) return false, nil @@ -140,6 +141,10 @@ func startRouteController(ctx ControllerContext) (bool, error) { glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.") return false, nil } + _, clusterCIDR, err := net.ParseCIDR(ctx.Options.ClusterCIDR) + if err != nil { + glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.Options.ClusterCIDR, err) + } routeController := routecontroller.New(routes, ctx.ClientBuilder.ClientOrDie("route-controller"), ctx.InformerFactory.Core().V1().Nodes(), ctx.Options.ClusterName, clusterCIDR) go routeController.Run(ctx.Stop, ctx.Options.RouteReconciliationPeriod.Duration) return true, nil @@ -239,37 +244,42 @@ func startPodGCController(ctx ControllerContext) (bool, error) { func startResourceQuotaController(ctx ControllerContext) (bool, error) { resourceQuotaControllerClient := ctx.ClientBuilder.ClientOrDie("resourcequota-controller") - resourceQuotaRegistry := quotainstall.NewRegistry(resourceQuotaControllerClient, ctx.InformerFactory) - groupKindsToReplenish := []schema.GroupKind{ - api.Kind("Pod"), - api.Kind("Service"), - api.Kind("ReplicationController"), - api.Kind("PersistentVolumeClaim"), - api.Kind("Secret"), - api.Kind("ConfigMap"), - } + discoveryFunc := resourceQuotaControllerClient.Discovery().ServerPreferredNamespacedResources + listerFuncForResource := generic.ListerFuncForResourceFunc(ctx.InformerFactory.ForResource) + quotaConfiguration := quotainstall.NewQuotaConfigurationForControllers(listerFuncForResource) + resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{ - QuotaClient: resourceQuotaControllerClient.Core(), + QuotaClient: resourceQuotaControllerClient.CoreV1(), ResourceQuotaInformer: ctx.InformerFactory.Core().V1().ResourceQuotas(), ResyncPeriod: controller.StaticResyncPeriodFunc(ctx.Options.ResourceQuotaSyncPeriod.Duration), - Registry: resourceQuotaRegistry, - ControllerFactory: resourcequotacontroller.NewReplenishmentControllerFactory(ctx.InformerFactory), + InformerFactory: ctx.InformerFactory, ReplenishmentResyncPeriod: ResyncPeriod(&ctx.Options), - GroupKindsToReplenish: groupKindsToReplenish, + DiscoveryFunc: discoveryFunc, + IgnoredResourcesFunc: quotaConfiguration.IgnoredResources, + InformersStarted: ctx.InformersStarted, + Registry: generic.NewRegistry(quotaConfiguration.Evaluators()), } - if resourceQuotaControllerClient.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("resource_quota_controller", resourceQuotaControllerClient.Core().RESTClient().GetRateLimiter()) + if resourceQuotaControllerClient.CoreV1().RESTClient().GetRateLimiter() != nil { + if err := metrics.RegisterMetricAndTrackRateLimiterUsage("resource_quota_controller", resourceQuotaControllerClient.CoreV1().RESTClient().GetRateLimiter()); err != nil { + return true, err + } + } + + resourceQuotaController, err := resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions) + if err != nil { + return false, err } + go resourceQuotaController.Run(int(ctx.Options.ConcurrentResourceQuotaSyncs), ctx.Stop) + + // Periodically the quota controller to detect new resource types + go resourceQuotaController.Sync(discoveryFunc, 30*time.Second, ctx.Stop) - go resourcequotacontroller.NewResourceQuotaController( - resourceQuotaControllerOptions, - ).Run(int(ctx.Options.ConcurrentResourceQuotaSyncs), ctx.Stop) return true, nil } func startNamespaceController(ctx ControllerContext) (bool, error) { // TODO: should use a dynamic RESTMapper built from the discovery results. - restMapper := api.Registry.RESTMapper() + restMapper := legacyscheme.Registry.RESTMapper() // the namespace cleanup controller is very chatty. It makes lots of discovery calls and then it makes lots of delete calls // the ratelimiter negatively affects its speed. Deleting 100 total items in a namespace (that's only a few of each resource @@ -296,12 +306,16 @@ func startNamespaceController(ctx ControllerContext) (bool, error) { } func startServiceAccountController(ctx ControllerContext) (bool, error) { - go serviceaccountcontroller.NewServiceAccountsController( + sac, err := serviceaccountcontroller.NewServiceAccountsController( ctx.InformerFactory.Core().V1().ServiceAccounts(), ctx.InformerFactory.Core().V1().Namespaces(), ctx.ClientBuilder.ClientOrDie("service-account-controller"), serviceaccountcontroller.DefaultServiceAccountsControllerOptions(), - ).Run(1, ctx.Stop) + ) + if err != nil { + return true, fmt.Errorf("error creating ServiceAccount controller: %v", err) + } + go sac.Run(1, ctx.Stop) return true, nil } @@ -335,10 +349,7 @@ func startGarbageCollectorController(ctx ControllerContext) (bool, error) { clientPool := dynamic.NewClientPool(config, restMapper, dynamic.LegacyAPIPathResolverFunc) // Get an initial set of deletable resources to prime the garbage collector. - deletableResources, err := garbagecollector.GetDeletableResources(discoveryClient) - if err != nil { - return true, err - } + deletableResources := garbagecollector.GetDeletableResources(discoveryClient) ignoredResources := make(map[schema.GroupResource]struct{}) for _, r := range ctx.Options.GCIgnoredResources { ignoredResources[schema.GroupResource{Group: r.Group, Resource: r.Resource}] = struct{}{} @@ -366,3 +377,15 @@ func startGarbageCollectorController(ctx ControllerContext) (bool, error) { return true, nil } + +func startPVCProtectionController(ctx ControllerContext) (bool, error) { + if utilfeature.DefaultFeatureGate.Enabled(features.PVCProtection) { + go pvcprotection.NewPVCProtectionController( + ctx.InformerFactory.Core().V1().PersistentVolumeClaims(), + ctx.InformerFactory.Core().V1().Pods(), + ctx.ClientBuilder.ClientOrDie("pvc-protection-controller"), + ).Run(1, ctx.Stop) + return true, nil + } + return false, nil +} diff --git a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/extensions.go b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/extensions.go index 7e9be6a87e34..060704bbf530 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/extensions.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/extensions.go @@ -21,6 +21,8 @@ limitations under the License. package app import ( + "fmt" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/controller/daemon" "k8s.io/kubernetes/pkg/controller/deployment" @@ -31,13 +33,17 @@ func startDaemonSetController(ctx ControllerContext) (bool, error) { if !ctx.AvailableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "daemonsets"}] { return false, nil } - go daemon.NewDaemonSetsController( + dsc, err := daemon.NewDaemonSetsController( ctx.InformerFactory.Extensions().V1beta1().DaemonSets(), ctx.InformerFactory.Apps().V1beta1().ControllerRevisions(), ctx.InformerFactory.Core().V1().Pods(), ctx.InformerFactory.Core().V1().Nodes(), ctx.ClientBuilder.ClientOrDie("daemon-set-controller"), - ).Run(int(ctx.Options.ConcurrentDaemonSetSyncs), ctx.Stop) + ) + if err != nil { + return true, fmt.Errorf("error creating DaemonSets controller: %v", err) + } + go dsc.Run(int(ctx.Options.ConcurrentDaemonSetSyncs), ctx.Stop) return true, nil } @@ -45,12 +51,16 @@ func startDeploymentController(ctx ControllerContext) (bool, error) { if !ctx.AvailableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"}] { return false, nil } - go deployment.NewDeploymentController( + dc, err := deployment.NewDeploymentController( ctx.InformerFactory.Extensions().V1beta1().Deployments(), ctx.InformerFactory.Extensions().V1beta1().ReplicaSets(), ctx.InformerFactory.Core().V1().Pods(), ctx.ClientBuilder.ClientOrDie("deployment-controller"), - ).Run(int(ctx.Options.ConcurrentDeploymentSyncs), ctx.Stop) + ) + if err != nil { + return true, fmt.Errorf("error creating Deployment controller: %v", err) + } + go dc.Run(int(ctx.Options.ConcurrentDeploymentSyncs), ctx.Stop) return true, nil } diff --git a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/import_known_versions.go b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/import_known_versions.go index 0f2436ac082e..f63e298a2401 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/import_known_versions.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/import_known_versions.go @@ -15,21 +15,22 @@ limitations under the License. */ // TODO: Remove this file when namespace controller and garbage collector -// stops using api.Registry.RESTMapper() +// stops using legacyscheme.Registry.RESTMapper() package app // These imports are the API groups the client will support. import ( "fmt" - "k8s.io/kubernetes/pkg/api" - _ "k8s.io/kubernetes/pkg/api/install" + "k8s.io/kubernetes/pkg/api/legacyscheme" _ "k8s.io/kubernetes/pkg/apis/apps/install" _ "k8s.io/kubernetes/pkg/apis/authentication/install" _ "k8s.io/kubernetes/pkg/apis/authorization/install" _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" _ "k8s.io/kubernetes/pkg/apis/batch/install" _ "k8s.io/kubernetes/pkg/apis/certificates/install" + _ "k8s.io/kubernetes/pkg/apis/core/install" + _ "k8s.io/kubernetes/pkg/apis/events/install" _ "k8s.io/kubernetes/pkg/apis/extensions/install" _ "k8s.io/kubernetes/pkg/apis/policy/install" _ "k8s.io/kubernetes/pkg/apis/rbac/install" @@ -39,7 +40,7 @@ import ( ) func init() { - if missingVersions := api.Registry.ValidateEnvRequestedVersions(); len(missingVersions) != 0 { + if missingVersions := legacyscheme.Registry.ValidateEnvRequestedVersions(); len(missingVersions) != 0 { panic(fmt.Sprintf("KUBE_API_VERSIONS contains versions that are not installed: %q.", missingVersions)) } } diff --git a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/options/BUILD b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/options/BUILD index 6bbaaeb27e6f..588b64c68668 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/options/BUILD +++ b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/options/BUILD @@ -3,11 +3,13 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( name = "go_default_library", srcs = ["options.go"], + importpath = "k8s.io/kubernetes/cmd/kube-controller-manager/app/options", deps = [ "//pkg/apis/componentconfig:go_default_library", "//pkg/client/leaderelectionconfig:go_default_library", @@ -35,3 +37,17 @@ filegroup( srcs = [":package-srcs"], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = ["options_test.go"], + importpath = "k8s.io/kubernetes/cmd/kube-controller-manager/app/options", + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//pkg/apis/componentconfig:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/options/options.go b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/options/options.go index e5acccd066b6..d681bfd18957 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/options/options.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/options/options.go @@ -39,6 +39,16 @@ import ( "github.com/spf13/pflag" ) +const ( + // These defaults are deprecated and exported so that we can warn if + // they are being used. + + // DefaultClusterSigningCertFile is deprecated. Do not use. + DefaultClusterSigningCertFile = "/etc/kubernetes/ca/ca.pem" + // DefaultClusterSigningKeyFile is deprecated. Do not use. + DefaultClusterSigningKeyFile = "/etc/kubernetes/ca/ca.key" +) + // CMServer is the main context object for the controller manager. type CMServer struct { componentconfig.KubeControllerManagerConfiguration @@ -55,6 +65,8 @@ func NewCMServer() *CMServer { } s := CMServer{ + // Part of these default values also present in 'cmd/cloud-controller-manager/app/options/options.go'. + // Please keep them in sync when doing update. KubeControllerManagerConfiguration: componentconfig.KubeControllerManagerConfiguration{ Controllers: []string{"*"}, Port: ports.ControllerManagerPort, @@ -77,6 +89,7 @@ func NewCMServer() *CMServer { HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 3 * time.Minute}, HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 5 * time.Minute}, + HorizontalPodAutoscalerTolerance: 0.1, DeploymentControllerSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, MinResyncPeriod: metav1.Duration{Duration: 12 * time.Hour}, RegisterRetryCount: 10, @@ -108,12 +121,12 @@ func NewCMServer() *CMServer { EnableGarbageCollector: true, ConcurrentGCSyncs: 20, GCIgnoredResources: gcIgnoredResources, - ClusterSigningCertFile: "/etc/kubernetes/ca/ca.pem", - ClusterSigningKeyFile: "/etc/kubernetes/ca/ca.key", + ClusterSigningCertFile: DefaultClusterSigningCertFile, + ClusterSigningKeyFile: DefaultClusterSigningKeyFile, ClusterSigningDuration: metav1.Duration{Duration: helpers.OneYear}, ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 60 * time.Second}, EnableTaintManager: true, - HorizontalPodAutoscalerUseRESTClients: false, + HorizontalPodAutoscalerUseRESTClients: true, }, } s.LeaderElection.LeaderElect = true @@ -164,6 +177,7 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet, allControllers []string, disabled fs.DurationVar(&s.HorizontalPodAutoscalerSyncPeriod.Duration, "horizontal-pod-autoscaler-sync-period", s.HorizontalPodAutoscalerSyncPeriod.Duration, "The period for syncing the number of pods in horizontal pod autoscaler.") fs.DurationVar(&s.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, "horizontal-pod-autoscaler-upscale-delay", s.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, "The period since last upscale, before another upscale can be performed in horizontal pod autoscaler.") fs.DurationVar(&s.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, "horizontal-pod-autoscaler-downscale-delay", s.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, "The period since last downscale, before another downscale can be performed in horizontal pod autoscaler.") + fs.Float64Var(&s.HorizontalPodAutoscalerTolerance, "horizontal-pod-autoscaler-tolerance", s.HorizontalPodAutoscalerTolerance, "The minimum change (from 1.0) in the desired-to-actual metrics ratio for the horizontal pod autoscaler to consider scaling.") fs.DurationVar(&s.DeploymentControllerSyncPeriod.Duration, "deployment-controller-sync-period", s.DeploymentControllerSyncPeriod.Duration, "Period for syncing the deployments.") fs.DurationVar(&s.PodEvictionTimeout.Duration, "pod-eviction-timeout", s.PodEvictionTimeout.Duration, "The grace period for deleting pods on failed nodes.") fs.Float32Var(&s.DeletingPodsQps, "deleting-pods-qps", 0.1, "Number of nodes per second on which pods are deleted in case of node failure.") @@ -191,8 +205,8 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet, allControllers []string, disabled fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/") fs.BoolVar(&s.EnableContentionProfiling, "contention-profiling", false, "Enable lock contention profiling, if profiling is enabled") fs.StringVar(&s.ClusterName, "cluster-name", s.ClusterName, "The instance prefix for the cluster") - fs.StringVar(&s.ClusterCIDR, "cluster-cidr", s.ClusterCIDR, "CIDR Range for Pods in cluster.") - fs.StringVar(&s.ServiceCIDR, "service-cluster-ip-range", s.ServiceCIDR, "CIDR Range for Services in cluster.") + fs.StringVar(&s.ClusterCIDR, "cluster-cidr", s.ClusterCIDR, "CIDR Range for Pods in cluster. Requires --allocate-node-cidrs to be true") + fs.StringVar(&s.ServiceCIDR, "service-cluster-ip-range", s.ServiceCIDR, "CIDR Range for Services in cluster. Requires --allocate-node-cidrs to be true") fs.Int32Var(&s.NodeCIDRMaskSize, "node-cidr-mask-size", s.NodeCIDRMaskSize, "Mask size for node cidr in cluster.") fs.BoolVar(&s.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.") diff --git a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/plugins.go b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/plugins.go index bf9c9fe12993..170c366c90a7 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/plugins.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/plugins.go @@ -30,17 +30,12 @@ import ( // Volume plugins "github.com/golang/glog" "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" - "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" - "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" - "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack" - "k8s.io/kubernetes/pkg/cloudprovider/providers/photon" - "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/aws_ebs" "k8s.io/kubernetes/pkg/volume/azure_dd" "k8s.io/kubernetes/pkg/volume/azure_file" "k8s.io/kubernetes/pkg/volume/cinder" + "k8s.io/kubernetes/pkg/volume/csi" "k8s.io/kubernetes/pkg/volume/fc" "k8s.io/kubernetes/pkg/volume/flexvolume" "k8s.io/kubernetes/pkg/volume/flocker" @@ -58,6 +53,9 @@ import ( "k8s.io/kubernetes/pkg/volume/storageos" volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/vsphere_volume" + + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" ) // ProbeAttachableVolumePlugins collects all volume plugins for the attach/ @@ -78,6 +76,10 @@ func ProbeAttachableVolumePlugins() []volume.VolumePlugin { allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...) allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...) allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...) + if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) { + allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...) + } return allPlugins } @@ -104,6 +106,9 @@ func ProbeExpandableVolumePlugins(config componentconfig.VolumeConfiguration) [] allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...) allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...) allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...) + if !utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) { + allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...) + } return allPlugins } @@ -154,22 +159,12 @@ func ProbeControllerVolumePlugins(cloud cloudprovider.Interface, config componen allPlugins = append(allPlugins, local.ProbeVolumePlugins()...) allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...) - if cloud != nil { - switch { - case aws.ProviderName == cloud.ProviderName(): - allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) - case gce.ProviderName == cloud.ProviderName(): - allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) - case openstack.ProviderName == cloud.ProviderName(): - allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) - case vsphere.ProviderName == cloud.ProviderName(): - allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...) - case azure.CloudProviderName == cloud.ProviderName(): - allPlugins = append(allPlugins, azure_dd.ProbeVolumePlugins()...) - case photon.ProviderName == cloud.ProviderName(): - allPlugins = append(allPlugins, photon_pd.ProbeVolumePlugins()...) - } - } + allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, azure_dd.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, photon_pd.ProbeVolumePlugins()...) return allPlugins } diff --git a/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/rbac.go b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/rbac.go new file mode 100644 index 000000000000..b49d3403fe07 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/rbac.go @@ -0,0 +1,33 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/pkg/controller/clusterroleaggregation" +) + +func startClusterRoleAggregrationController(ctx ControllerContext) (bool, error) { + if !ctx.AvailableResources[schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"}] { + return false, nil + } + go clusterroleaggregation.NewClusterRoleAggregation( + ctx.InformerFactory.Rbac().V1().ClusterRoles(), + ctx.ClientBuilder.ClientOrDie("clusterrole-aggregation-controller").RbacV1(), + ).Run(5, ctx.Stop) + return true, nil +} diff --git a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/BUILD b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/BUILD index 33a206756004..4a4cb061112a 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/BUILD +++ b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/BUILD @@ -18,10 +18,10 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/cmd/kube-proxy/app", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/componentconfig:go_default_library", - "//pkg/apis/componentconfig/v1alpha1:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/features:go_default_library", @@ -29,13 +29,19 @@ go_library( "//pkg/kubelet/qos:go_default_library", "//pkg/master/ports:go_default_library", "//pkg/proxy:go_default_library", + "//pkg/proxy/apis/kubeproxyconfig:go_default_library", + "//pkg/proxy/apis/kubeproxyconfig/scheme:go_default_library", + "//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library", + "//pkg/proxy/apis/kubeproxyconfig/validation:go_default_library", "//pkg/proxy/config:go_default_library", "//pkg/proxy/healthcheck:go_default_library", "//pkg/proxy/iptables:go_default_library", "//pkg/proxy/ipvs:go_default_library", + "//pkg/proxy/metrics:go_default_library", "//pkg/proxy/userspace:go_default_library", "//pkg/util/configz:go_default_library", "//pkg/util/dbus:go_default_library", + "//pkg/util/ipset:go_default_library", "//pkg/util/iptables:go_default_library", "//pkg/util/ipvs:go_default_library", "//pkg/util/mount:go_default_library", @@ -44,6 +50,7 @@ go_library( "//pkg/util/pointer:go_default_library", "//pkg/util/resourcecontainer:go_default_library", "//pkg/util/sysctl:go_default_library", + "//pkg/version:go_default_library", "//pkg/version/verflag:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", @@ -62,6 +69,7 @@ go_library( "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", @@ -79,17 +87,16 @@ go_library( go_test( name = "go_default_test", srcs = ["server_test.go"], + importpath = "k8s.io/kubernetes/cmd/kube-proxy/app", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/apis/componentconfig:go_default_library", - "//pkg/apis/componentconfig/v1alpha1:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/proxy/apis/kubeproxyconfig:go_default_library", "//pkg/util/configz:go_default_library", "//pkg/util/iptables:go_default_library", "//pkg/util/pointer:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go index 2b66fbea9589..35a7c14e58a8 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go @@ -41,30 +41,36 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" clientgoclientset "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/tools/record" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/componentconfig" - "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" + api "k8s.io/kubernetes/pkg/apis/core" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/proxy" - proxyconfig "k8s.io/kubernetes/pkg/proxy/config" + "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig" + "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme" + "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1" + "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation" + "k8s.io/kubernetes/pkg/proxy/config" "k8s.io/kubernetes/pkg/proxy/healthcheck" "k8s.io/kubernetes/pkg/proxy/iptables" "k8s.io/kubernetes/pkg/proxy/ipvs" "k8s.io/kubernetes/pkg/proxy/userspace" "k8s.io/kubernetes/pkg/util/configz" + utilipset "k8s.io/kubernetes/pkg/util/ipset" utiliptables "k8s.io/kubernetes/pkg/util/iptables" utilipvs "k8s.io/kubernetes/pkg/util/ipvs" utilnode "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/util/oom" utilpointer "k8s.io/kubernetes/pkg/util/pointer" "k8s.io/kubernetes/pkg/util/resourcecontainer" + "k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/pkg/version/verflag" "k8s.io/utils/exec" @@ -81,15 +87,6 @@ const ( proxyModeKernelspace = "kernelspace" ) -// checkKnownProxyMode returns true if proxyMode is valid. -func checkKnownProxyMode(proxyMode string) bool { - switch proxyMode { - case "", proxyModeUserspace, proxyModeIPTables, proxyModeIPVS, proxyModeKernelspace: - return true - } - return false -} - // Options contains everything necessary to create and run a proxy server. type Options struct { // ConfigFile is the location of the proxy server's configuration file. @@ -98,12 +95,13 @@ type Options struct { WriteConfigTo string // CleanupAndExit, when true, makes the proxy server clean up iptables rules, then exit. CleanupAndExit bool - + // CleanupIPVS, when true, makes the proxy server clean up ipvs rules before running. + CleanupIPVS bool // config is the proxy server's configuration object. - config *componentconfig.KubeProxyConfiguration + config *kubeproxyconfig.KubeProxyConfiguration // The fields below here are placeholders for flags that can't be directly mapped into - // componentconfig.KubeProxyConfiguration. + // kubeproxyconfig.KubeProxyConfiguration. // // TODO remove these fields once the deprecated flags are removed. @@ -120,16 +118,18 @@ type Options struct { func AddFlags(options *Options, fs *pflag.FlagSet) { fs.StringVar(&options.ConfigFile, "config", options.ConfigFile, "The path to the configuration file.") fs.StringVar(&options.WriteConfigTo, "write-config-to", options.WriteConfigTo, "If set, write the default configuration values to this file and exit.") - fs.MarkDeprecated("cleanup-iptables", "This flag is replaced by cleanup-proxyrules.") + fs.BoolVar(&options.CleanupAndExit, "cleanup-iptables", options.CleanupAndExit, "If true cleanup iptables and ipvs rules and exit.") + fs.MarkDeprecated("cleanup-iptables", "This flag is replaced by --cleanup.") fs.BoolVar(&options.CleanupAndExit, "cleanup", options.CleanupAndExit, "If true cleanup iptables and ipvs rules and exit.") + fs.BoolVar(&options.CleanupIPVS, "cleanup-ipvs", options.CleanupIPVS, "If true make kube-proxy cleanup ipvs rules before running. Default is true") // All flags below here are deprecated and will eventually be removed. - fs.Var(componentconfig.IPVar{Val: &options.config.BindAddress}, "bind-address", "The IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)") + fs.Var(componentconfig.IPVar{Val: &options.config.BindAddress}, "bind-address", "The IP address for the proxy server to serve on (set to `0.0.0.0` for all IPv4 interfaces and `::` for all IPv6 interfaces)") fs.StringVar(&options.master, "master", options.master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") fs.Int32Var(&options.healthzPort, "healthz-port", options.healthzPort, "The port to bind the health check server. Use 0 to disable.") - fs.Var(componentconfig.IPVar{Val: &options.config.HealthzBindAddress}, "healthz-bind-address", "The IP address and port for the health check server to serve on (set to 0.0.0.0 for all interfaces)") - fs.Var(componentconfig.IPVar{Val: &options.config.MetricsBindAddress}, "metrics-bind-address", "The IP address and port for the metrics server to serve on (set to 0.0.0.0 for all interfaces)") + fs.Var(componentconfig.IPVar{Val: &options.config.HealthzBindAddress}, "healthz-bind-address", "The IP address and port for the health check server to serve on (set to `0.0.0.0` for all IPv4 interfaces and `::` for all IPv6 interfaces)") + fs.Var(componentconfig.IPVar{Val: &options.config.MetricsBindAddress}, "metrics-bind-address", "The IP address and port for the metrics server to serve on (set to `0.0.0.0` for all IPv4 interfaces and `::` for all IPv6 interfaces)") fs.Int32Var(options.config.OOMScoreAdj, "oom-score-adj", utilpointer.Int32PtrDerefOr(options.config.OOMScoreAdj, int32(qos.KubeProxyOOMScoreAdj)), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]") fs.StringVar(&options.config.ResourceContainer, "resource-container", options.config.ResourceContainer, "Absolute name of the resource-only container to create and run the Kube-proxy in (Default: /kube-proxy).") fs.MarkDeprecated("resource-container", "This feature will be removed in a later release.") @@ -147,14 +147,17 @@ func AddFlags(options *Options, fs *pflag.FlagSet) { fs.StringVar(&options.config.ClusterCIDR, "cluster-cidr", options.config.ClusterCIDR, "The CIDR range of pods in the cluster. When configured, traffic sent to a Service cluster IP from outside this range will be masqueraded and traffic sent from pods to an external LoadBalancer IP will be directed to the respective cluster IP instead") fs.StringVar(&options.config.ClientConnection.ContentType, "kube-api-content-type", options.config.ClientConnection.ContentType, "Content type of requests sent to apiserver.") fs.Float32Var(&options.config.ClientConnection.QPS, "kube-api-qps", options.config.ClientConnection.QPS, "QPS to use while talking with kubernetes apiserver") - fs.IntVar(&options.config.ClientConnection.Burst, "kube-api-burst", options.config.ClientConnection.Burst, "Burst to use while talking with kubernetes apiserver") + fs.Int32Var(&options.config.ClientConnection.Burst, "kube-api-burst", options.config.ClientConnection.Burst, "Burst to use while talking with kubernetes apiserver") fs.DurationVar(&options.config.UDPIdleTimeout.Duration, "udp-timeout", options.config.UDPIdleTimeout.Duration, "How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace") - fs.Int32Var(&options.config.Conntrack.Max, "conntrack-max", options.config.Conntrack.Max, + if options.config.Conntrack.Max == nil { + options.config.Conntrack.Max = utilpointer.Int32Ptr(0) + } + fs.Int32Var(options.config.Conntrack.Max, "conntrack-max", *options.config.Conntrack.Max, "Maximum number of NAT connections to track (0 to leave as-is). This overrides conntrack-max-per-core and conntrack-min.") fs.MarkDeprecated("conntrack-max", "This feature will be removed in a later release.") - fs.Int32Var(&options.config.Conntrack.MaxPerCore, "conntrack-max-per-core", options.config.Conntrack.MaxPerCore, + fs.Int32Var(options.config.Conntrack.MaxPerCore, "conntrack-max-per-core", *options.config.Conntrack.MaxPerCore, "Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min).") - fs.Int32Var(&options.config.Conntrack.Min, "conntrack-min", options.config.Conntrack.Min, + fs.Int32Var(options.config.Conntrack.Min, "conntrack-min", *options.config.Conntrack.Min, "Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is).") fs.DurationVar(&options.config.Conntrack.TCPEstablishedTimeout.Duration, "conntrack-tcp-timeout-established", options.config.Conntrack.TCPEstablishedTimeout.Duration, "Idle timeout for established TCP connections (0 to leave as-is)") fs.DurationVar( @@ -166,23 +169,14 @@ func AddFlags(options *Options, fs *pflag.FlagSet) { utilfeature.DefaultFeatureGate.AddFlag(fs) } -func NewOptions() (*Options, error) { - o := &Options{ - config: new(componentconfig.KubeProxyConfiguration), +func NewOptions() *Options { + return &Options{ + config: new(kubeproxyconfig.KubeProxyConfiguration), healthzPort: ports.ProxyHealthzPort, + scheme: scheme.Scheme, + codecs: scheme.Codecs, + CleanupIPVS: true, } - - o.scheme = runtime.NewScheme() - o.codecs = serializer.NewCodecFactory(o.scheme) - - if err := componentconfig.AddToScheme(o.scheme); err != nil { - return nil, err - } - if err := v1alpha1.AddToScheme(o.scheme); err != nil { - return nil, err - } - - return o, nil } // Complete completes all the required options. @@ -192,6 +186,17 @@ func (o *Options) Complete() error { o.applyDeprecatedHealthzPortToConfig() } + // Load the config file here in Complete, so that Validate validates the fully-resolved config. + if len(o.ConfigFile) > 0 { + if c, err := o.loadConfigFromFile(o.ConfigFile); err != nil { + return err + } else { + o.config = c + // Make sure we apply the feature gate settings in the config file. + utilfeature.DefaultFeatureGate.Set(o.config.FeatureGates) + } + } + return nil } @@ -201,27 +206,19 @@ func (o *Options) Validate(args []string) error { return errors.New("no arguments are supported") } + if errs := validation.Validate(o.config); len(errs) != 0 { + return errs.ToAggregate() + } + return nil } func (o *Options) Run() error { - config := o.config - if len(o.WriteConfigTo) > 0 { return o.writeConfigFile() } - if len(o.ConfigFile) > 0 { - if c, err := o.loadConfigFromFile(o.ConfigFile); err != nil { - return err - } else { - config = c - // Make sure we apply the feature gate settings in the config file. - utilfeature.DefaultFeatureGate.Set(config.FeatureGates) - } - } - - proxyServer, err := NewProxyServer(config, o.CleanupAndExit, o.scheme, o.master) + proxyServer, err := NewProxyServer(o) if err != nil { return err } @@ -281,7 +278,7 @@ func (o *Options) applyDeprecatedHealthzPortToConfig() { // loadConfigFromFile loads the contents of file and decodes it as a // KubeProxyConfiguration object. -func (o *Options) loadConfigFromFile(file string) (*componentconfig.KubeProxyConfiguration, error) { +func (o *Options) loadConfigFromFile(file string) (*kubeproxyconfig.KubeProxyConfiguration, error) { data, err := ioutil.ReadFile(file) if err != nil { return nil, err @@ -291,19 +288,19 @@ func (o *Options) loadConfigFromFile(file string) (*componentconfig.KubeProxyCon } // loadConfig decodes data as a KubeProxyConfiguration object. -func (o *Options) loadConfig(data []byte) (*componentconfig.KubeProxyConfiguration, error) { +func (o *Options) loadConfig(data []byte) (*kubeproxyconfig.KubeProxyConfiguration, error) { configObj, gvk, err := o.codecs.UniversalDecoder().Decode(data, nil, nil) if err != nil { return nil, err } - config, ok := configObj.(*componentconfig.KubeProxyConfiguration) + config, ok := configObj.(*kubeproxyconfig.KubeProxyConfiguration) if !ok { return nil, fmt.Errorf("got unexpected config type: %v", gvk) } return config, nil } -func (o *Options) ApplyDefaults(in *componentconfig.KubeProxyConfiguration) (*componentconfig.KubeProxyConfiguration, error) { +func (o *Options) ApplyDefaults(in *kubeproxyconfig.KubeProxyConfiguration) (*kubeproxyconfig.KubeProxyConfiguration, error) { external, err := o.scheme.ConvertToVersion(in, v1alpha1.SchemeGroupVersion) if err != nil { return nil, err @@ -311,29 +308,30 @@ func (o *Options) ApplyDefaults(in *componentconfig.KubeProxyConfiguration) (*co o.scheme.Default(external) - internal, err := o.scheme.ConvertToVersion(external, componentconfig.SchemeGroupVersion) + internal, err := o.scheme.ConvertToVersion(external, kubeproxyconfig.SchemeGroupVersion) if err != nil { return nil, err } - out := internal.(*componentconfig.KubeProxyConfiguration) + out := internal.(*kubeproxyconfig.KubeProxyConfiguration) return out, nil } +func (o *Options) SetConfig(in *kubeproxyconfig.KubeProxyConfiguration) { + o.config = in +} + // NewProxyCommand creates a *cobra.Command object with default parameters func NewProxyCommand() *cobra.Command { - opts, err := NewOptions() - if err != nil { - glog.Fatalf("Unable to initialize command options: %v", err) - } + opts := NewOptions() cmd := &cobra.Command{ Use: "kube-proxy", Long: `The Kubernetes network proxy runs on each node. This reflects services as defined in the Kubernetes API on each node and can do simple -TCP,UDP stream forwarding or round robin TCP,UDP forwarding across a set of backends. -Service cluster ips and ports are currently found through Docker-links-compatible +TCP and UDP stream forwarding or round robin TCP and UDP forwarding across a set of backends. +Service cluster IPs and ports are currently found through Docker-links-compatible environment variables specifying ports opened by the service proxy. There is an optional addon that provides cluster DNS for these cluster IPs. The user must create a service with the apiserver API to configure the proxy.`, @@ -345,6 +343,7 @@ with the apiserver API to configure the proxy.`, }, } + var err error opts.config, err = opts.ApplyDefaults(opts.config) if err != nil { glog.Fatalf("unable to create flag defaults: %v", err) @@ -365,37 +364,43 @@ type ProxyServer struct { EventClient v1core.EventsGetter IptInterface utiliptables.Interface IpvsInterface utilipvs.Interface + IpsetInterface utilipset.Interface execer exec.Interface Proxier proxy.ProxyProvider Broadcaster record.EventBroadcaster Recorder record.EventRecorder - ConntrackConfiguration componentconfig.KubeProxyConntrackConfiguration + ConntrackConfiguration kubeproxyconfig.KubeProxyConntrackConfiguration Conntracker Conntracker // if nil, ignored ProxyMode string NodeRef *v1.ObjectReference CleanupAndExit bool + CleanupIPVS bool MetricsBindAddress string EnableProfiling bool OOMScoreAdj *int32 ResourceContainer string ConfigSyncPeriod time.Duration - ServiceEventHandler proxyconfig.ServiceHandler - EndpointsEventHandler proxyconfig.EndpointsHandler + ServiceEventHandler config.ServiceHandler + EndpointsEventHandler config.EndpointsHandler HealthzServer *healthcheck.HealthzServer } // createClients creates a kube client and an event client from the given config and masterOverride. // TODO remove masterOverride when CLI flags are removed. -func createClients(config componentconfig.ClientConnectionConfiguration, masterOverride string) (clientset.Interface, v1core.EventsGetter, error) { +func createClients(config kubeproxyconfig.ClientConnectionConfiguration, masterOverride string) (clientset.Interface, v1core.EventsGetter, error) { + var kubeConfig *rest.Config + var err error + if len(config.KubeConfigFile) == 0 && len(masterOverride) == 0 { - glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") + glog.Info("Neither kubeconfig file nor master URL was specified. Falling back to in-cluster config.") + kubeConfig, err = rest.InClusterConfig() + } else { + // This creates a client, first loading any specified kubeconfig + // file, and then overriding the Master flag, if non-empty. + kubeConfig, err = clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ExplicitPath: config.KubeConfigFile}, + &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterOverride}}).ClientConfig() } - - // This creates a client, first loading any specified kubeconfig - // file, and then overriding the Master flag, if non-empty. - kubeConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( - &clientcmd.ClientConfigLoadingRules{ExplicitPath: config.KubeConfigFile}, - &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterOverride}}).ClientConfig() if err != nil { return nil, nil, err } @@ -421,11 +426,13 @@ func createClients(config componentconfig.ClientConnectionConfiguration, masterO // Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set). func (s *ProxyServer) Run() error { + // To help debugging, immediately log version + glog.Infof("Version: %+v", version.Get()) // remove iptables rules and exit if s.CleanupAndExit { encounteredError := userspace.CleanupLeftovers(s.IptInterface) encounteredError = iptables.CleanupLeftovers(s.IptInterface) || encounteredError - encounteredError = ipvs.CleanupLeftovers(s.execer, s.IpvsInterface, s.IptInterface) || encounteredError + encounteredError = ipvs.CleanupLeftovers(s.IpvsInterface, s.IptInterface, s.IpsetInterface, s.CleanupIPVS) || encounteredError if encounteredError { return errors.New("encountered an error while tearing down rules.") } @@ -507,14 +514,14 @@ func (s *ProxyServer) Run() error { } } - if s.ConntrackConfiguration.TCPEstablishedTimeout.Duration > 0 { + if s.ConntrackConfiguration.TCPEstablishedTimeout != nil && s.ConntrackConfiguration.TCPEstablishedTimeout.Duration > 0 { timeout := int(s.ConntrackConfiguration.TCPEstablishedTimeout.Duration / time.Second) if err := s.Conntracker.SetTCPEstablishedTimeout(timeout); err != nil { return err } } - if s.ConntrackConfiguration.TCPCloseWaitTimeout.Duration > 0 { + if s.ConntrackConfiguration.TCPCloseWaitTimeout != nil && s.ConntrackConfiguration.TCPCloseWaitTimeout.Duration > 0 { timeout := int(s.ConntrackConfiguration.TCPCloseWaitTimeout.Duration / time.Second) if err := s.Conntracker.SetTCPCloseWaitTimeout(timeout); err != nil { return err @@ -528,11 +535,11 @@ func (s *ProxyServer) Run() error { // Note: RegisterHandler() calls need to happen before creation of Sources because sources // only notify on changes, and the initial update (on process start) may be lost if no handlers // are registered yet. - serviceConfig := proxyconfig.NewServiceConfig(informerFactory.Core().InternalVersion().Services(), s.ConfigSyncPeriod) + serviceConfig := config.NewServiceConfig(informerFactory.Core().InternalVersion().Services(), s.ConfigSyncPeriod) serviceConfig.RegisterEventHandler(s.ServiceEventHandler) go serviceConfig.Run(wait.NeverStop) - endpointsConfig := proxyconfig.NewEndpointsConfig(informerFactory.Core().InternalVersion().Endpoints(), s.ConfigSyncPeriod) + endpointsConfig := config.NewEndpointsConfig(informerFactory.Core().InternalVersion().Endpoints(), s.ConfigSyncPeriod) endpointsConfig.RegisterEventHandler(s.EndpointsEventHandler) go endpointsConfig.Run(wait.NeverStop) @@ -552,17 +559,20 @@ func (s *ProxyServer) birthCry() { s.Recorder.Eventf(s.NodeRef, api.EventTypeNormal, "Starting", "Starting kube-proxy.") } -func getConntrackMax(config componentconfig.KubeProxyConntrackConfiguration) (int, error) { - if config.Max > 0 { - if config.MaxPerCore > 0 { +func getConntrackMax(config kubeproxyconfig.KubeProxyConntrackConfiguration) (int, error) { + if config.Max != nil && *config.Max > 0 { + if config.MaxPerCore != nil && *config.MaxPerCore > 0 { return -1, fmt.Errorf("invalid config: Conntrack Max and Conntrack MaxPerCore are mutually exclusive") } glog.V(3).Infof("getConntrackMax: using absolute conntrack-max (deprecated)") - return int(config.Max), nil + return int(*config.Max), nil } - if config.MaxPerCore > 0 { - floor := int(config.Min) - scaled := int(config.MaxPerCore) * goruntime.NumCPU() + if config.MaxPerCore != nil && *config.MaxPerCore > 0 { + floor := 0 + if config.Min != nil { + floor = int(*config.Min) + } + scaled := int(*config.MaxPerCore) * goruntime.NumCPU() if scaled > floor { glog.V(3).Infof("getConntrackMax: using scaled conntrack-max-per-core") return scaled, nil diff --git a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go index 02b631fb680f..51bb09fd015e 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_others.go @@ -32,16 +32,18 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" - "k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/proxy" + proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig" proxyconfig "k8s.io/kubernetes/pkg/proxy/config" "k8s.io/kubernetes/pkg/proxy/healthcheck" "k8s.io/kubernetes/pkg/proxy/iptables" "k8s.io/kubernetes/pkg/proxy/ipvs" + "k8s.io/kubernetes/pkg/proxy/metrics" "k8s.io/kubernetes/pkg/proxy/userspace" "k8s.io/kubernetes/pkg/util/configz" utildbus "k8s.io/kubernetes/pkg/util/dbus" + utilipset "k8s.io/kubernetes/pkg/util/ipset" utiliptables "k8s.io/kubernetes/pkg/util/iptables" utilipvs "k8s.io/kubernetes/pkg/util/ipvs" utilnode "k8s.io/kubernetes/pkg/util/node" @@ -52,12 +54,22 @@ import ( ) // NewProxyServer returns a new ProxyServer. -func NewProxyServer(config *componentconfig.KubeProxyConfiguration, cleanupAndExit bool, scheme *runtime.Scheme, master string) (*ProxyServer, error) { +func NewProxyServer(o *Options) (*ProxyServer, error) { + return newProxyServer(o.config, o.CleanupAndExit, o.CleanupIPVS, o.scheme, o.master) +} + +func newProxyServer( + config *proxyconfigapi.KubeProxyConfiguration, + cleanupAndExit bool, + cleanupIPVS bool, + scheme *runtime.Scheme, + master string) (*ProxyServer, error) { + if config == nil { return nil, errors.New("config is required") } - if c, err := configz.New("componentconfig"); err == nil { + if c, err := configz.New(proxyconfigapi.GroupName); err == nil { c.Set(config) } else { glog.Warningf("unable to register configz: %s", err) @@ -65,11 +77,13 @@ func NewProxyServer(config *componentconfig.KubeProxyConfiguration, cleanupAndEx protocol := utiliptables.ProtocolIpv4 if net.ParseIP(config.BindAddress).To4() == nil { + glog.V(0).Infof("IPv6 bind address (%s), assume IPv6 operation", config.BindAddress) protocol = utiliptables.ProtocolIpv6 } var iptInterface utiliptables.Interface var ipvsInterface utilipvs.Interface + var ipsetInterface utilipset.Interface var dbus utildbus.Interface // Create a iptables utils. @@ -78,10 +92,17 @@ func NewProxyServer(config *componentconfig.KubeProxyConfiguration, cleanupAndEx dbus = utildbus.New() iptInterface = utiliptables.New(execer, dbus, protocol) ipvsInterface = utilipvs.New(execer) + ipsetInterface = utilipset.New(execer) // We omit creation of pretty much everything if we run in cleanup mode if cleanupAndExit { - return &ProxyServer{IptInterface: iptInterface, IpvsInterface: ipvsInterface, CleanupAndExit: cleanupAndExit}, nil + return &ProxyServer{ + execer: execer, + IptInterface: iptInterface, + IpvsInterface: ipvsInterface, + IpsetInterface: ipsetInterface, + CleanupAndExit: cleanupAndExit, + }, nil } client, eventClient, err := createClients(config.ClientConnection, master) @@ -112,13 +133,11 @@ func NewProxyServer(config *componentconfig.KubeProxyConfiguration, cleanupAndEx var serviceEventHandler proxyconfig.ServiceHandler var endpointsEventHandler proxyconfig.EndpointsHandler - proxyMode := getProxyMode(string(config.Mode), iptInterface, iptables.LinuxKernelCompatTester{}) + proxyMode := getProxyMode(string(config.Mode), iptInterface, ipsetInterface, iptables.LinuxKernelCompatTester{}) if proxyMode == proxyModeIPTables { glog.V(0).Info("Using iptables Proxier.") - var nodeIP net.IP - if config.BindAddress != "0.0.0.0" { - nodeIP = net.ParseIP(config.BindAddress) - } else { + nodeIP := net.ParseIP(config.BindAddress) + if nodeIP.Equal(net.IPv4zero) || nodeIP.Equal(net.IPv6zero) { nodeIP = getNodeIP(client, hostname) } if config.IPTables.MasqueradeBit == nil { @@ -144,7 +163,7 @@ func NewProxyServer(config *componentconfig.KubeProxyConfiguration, cleanupAndEx if err != nil { return nil, fmt.Errorf("unable to create proxier: %v", err) } - iptables.RegisterMetrics() + metrics.RegisterMetrics() proxier = proxierIPTables serviceEventHandler = proxierIPTables endpointsEventHandler = proxierIPTables @@ -152,14 +171,17 @@ func NewProxyServer(config *componentconfig.KubeProxyConfiguration, cleanupAndEx glog.V(0).Info("Tearing down inactive rules.") // TODO this has side effects that should only happen when Run() is invoked. userspace.CleanupLeftovers(iptInterface) - // IPVS Proxier will generate some iptables rules, - // need to clean them before switching to other proxy mode. - ipvs.CleanupLeftovers(execer, ipvsInterface, iptInterface) + // IPVS Proxier will generate some iptables rules, need to clean them before switching to other proxy mode. + // Besides, ipvs proxier will create some ipvs rules as well. Because there is no way to tell if a given + // ipvs rule is created by IPVS proxier or not. Users should explicitly specify `--clean-ipvs=true` to flush + // all ipvs rules when kube-proxy start up. Users do this operation should be with caution. + ipvs.CleanupLeftovers(ipvsInterface, iptInterface, ipsetInterface, cleanupIPVS) } else if proxyMode == proxyModeIPVS { glog.V(0).Info("Using ipvs Proxier.") proxierIPVS, err := ipvs.NewProxier( iptInterface, ipvsInterface, + ipsetInterface, utilsysctl.New(), execer, config.IPVS.SyncPeriod.Duration, @@ -176,6 +198,7 @@ func NewProxyServer(config *componentconfig.KubeProxyConfiguration, cleanupAndEx if err != nil { return nil, fmt.Errorf("unable to create proxier: %v", err) } + metrics.RegisterMetrics() proxier = proxierIPVS serviceEventHandler = proxierIPVS endpointsEventHandler = proxierIPVS @@ -212,9 +235,11 @@ func NewProxyServer(config *componentconfig.KubeProxyConfiguration, cleanupAndEx glog.V(0).Info("Tearing down inactive rules.") // TODO this has side effects that should only happen when Run() is invoked. iptables.CleanupLeftovers(iptInterface) - // IPVS Proxier will generate some iptables rules, - // need to clean them before switching to other proxy mode. - ipvs.CleanupLeftovers(execer, ipvsInterface, iptInterface) + // IPVS Proxier will generate some iptables rules, need to clean them before switching to other proxy mode. + // Besides, ipvs proxier will create some ipvs rules as well. Because there is no way to tell if a given + // ipvs rule is created by IPVS proxier or not. Users should explicitly specify `--clean-ipvs=true` to flush + // all ipvs rules when kube-proxy start up. Users do this operation should be with caution. + ipvs.CleanupLeftovers(ipvsInterface, iptInterface, ipsetInterface, cleanupIPVS) } iptInterface.AddReloadFunc(proxier.Sync) @@ -224,6 +249,7 @@ func NewProxyServer(config *componentconfig.KubeProxyConfiguration, cleanupAndEx EventClient: eventClient, IptInterface: iptInterface, IpvsInterface: ipvsInterface, + IpsetInterface: ipsetInterface, execer: execer, Proxier: proxier, Broadcaster: eventBroadcaster, @@ -243,7 +269,7 @@ func NewProxyServer(config *componentconfig.KubeProxyConfiguration, cleanupAndEx }, nil } -func getProxyMode(proxyMode string, iptver iptables.IPTablesVersioner, kcompat iptables.KernelCompatTester) string { +func getProxyMode(proxyMode string, iptver iptables.IPTablesVersioner, ipsetver ipvs.IPSetVersioner, kcompat iptables.KernelCompatTester) string { if proxyMode == proxyModeUserspace { return proxyModeUserspace } @@ -254,7 +280,7 @@ func getProxyMode(proxyMode string, iptver iptables.IPTablesVersioner, kcompat i if utilfeature.DefaultFeatureGate.Enabled(features.SupportIPVSProxyMode) { if proxyMode == proxyModeIPVS { - return tryIPVSProxy(iptver, kcompat) + return tryIPVSProxy(iptver, ipsetver, kcompat) } else { glog.Warningf("Can't use ipvs proxier, trying iptables proxier") return tryIPTablesProxy(iptver, kcompat) @@ -264,20 +290,18 @@ func getProxyMode(proxyMode string, iptver iptables.IPTablesVersioner, kcompat i return tryIPTablesProxy(iptver, kcompat) } -func tryIPVSProxy(iptver iptables.IPTablesVersioner, kcompat iptables.KernelCompatTester) string { +func tryIPVSProxy(iptver iptables.IPTablesVersioner, ipsetver ipvs.IPSetVersioner, kcompat iptables.KernelCompatTester) string { // guaranteed false on error, error only necessary for debugging - // IPVS Proxier relies on iptables - useIPVSProxy, err := ipvs.CanUseIPVSProxier() + // IPVS Proxier relies on ipset + useIPVSProxy, err := ipvs.CanUseIPVSProxier(ipsetver) if err != nil { - utilruntime.HandleError(fmt.Errorf("can't determine whether to use ipvs proxy, using userspace proxier: %v", err)) - return proxyModeUserspace + // Try to fallback to iptables before falling back to userspace + utilruntime.HandleError(fmt.Errorf("can't determine whether to use ipvs proxy, error: %v", err)) } if useIPVSProxy { return proxyModeIPVS } - // TODO: Check ipvs version - // Try to fallback to iptables before falling back to userspace glog.V(1).Infof("Can't use ipvs proxier, trying iptables proxier") return tryIPTablesProxy(iptver, kcompat) diff --git a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go index f4f46386a741..1fb4f4ce7ec1 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server_windows.go @@ -31,8 +31,8 @@ import ( "k8s.io/apimachinery/pkg/types" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/client-go/tools/record" - "k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/proxy" + proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig" proxyconfig "k8s.io/kubernetes/pkg/proxy/config" "k8s.io/kubernetes/pkg/proxy/healthcheck" "k8s.io/kubernetes/pkg/proxy/winkernel" @@ -46,12 +46,16 @@ import ( ) // NewProxyServer returns a new ProxyServer. -func NewProxyServer(config *componentconfig.KubeProxyConfiguration, cleanupAndExit bool, scheme *runtime.Scheme, master string) (*ProxyServer, error) { +func NewProxyServer(o *Options) (*ProxyServer, error) { + return newProxyServer(o.config, o.CleanupAndExit, o.scheme, o.master) +} + +func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExit bool, scheme *runtime.Scheme, master string) (*ProxyServer, error) { if config == nil { return nil, errors.New("config is required") } - if c, err := configz.New("componentconfig"); err == nil { + if c, err := configz.New(proxyconfigapi.GroupName); err == nil { c.Set(config) } else { return nil, fmt.Errorf("unable to register configz: %s", err) diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/BUILD b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/BUILD index 5b6884926abb..c1a584258ea9 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/BUILD +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/BUILD @@ -9,7 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["constants.go"], + importpath = "k8s.io/kubernetes/cmd/kubeadm/app/constants", deps = [ + "//pkg/registry/core/service/ipallocator:go_default_library", "//pkg/util/version:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", ], @@ -31,6 +33,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["constants_test.go"], + importpath = "k8s.io/kubernetes/cmd/kubeadm/app/constants", library = ":go_default_library", deps = ["//pkg/util/version:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants.go index 4e04af2d2353..4cf9076eb9a7 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants.go @@ -19,11 +19,13 @@ package constants import ( "fmt" "io/ioutil" + "net" "os" "path/filepath" "time" "k8s.io/api/core/v1" + "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" "k8s.io/kubernetes/pkg/util/version" ) @@ -32,58 +34,102 @@ import ( var KubernetesDir = "/etc/kubernetes" const ( + // ManifestsSubDirName defines directory name to store manifests ManifestsSubDirName = "manifests" - TempDirForKubeadm = "/etc/kubernetes/tmp" + // TempDirForKubeadm defines temporary directory for kubeadm + TempDirForKubeadm = "/etc/kubernetes/tmp" + // CACertAndKeyBaseName defines certificate authority base name CACertAndKeyBaseName = "ca" - CACertName = "ca.crt" - CAKeyName = "ca.key" + // CACertName defines certificate name + CACertName = "ca.crt" + // CAKeyName defines certificate name + CAKeyName = "ca.key" + // APIServerCertAndKeyBaseName defines API's server certificate and key base name APIServerCertAndKeyBaseName = "apiserver" - APIServerCertName = "apiserver.crt" - APIServerKeyName = "apiserver.key" - APIServerCertCommonName = "kube-apiserver" //used as subject.commonname attribute (CN) - + // APIServerCertName defines API's server certificate name + APIServerCertName = "apiserver.crt" + // APIServerKeyName defines API's server key name + APIServerKeyName = "apiserver.key" + // APIServerCertCommonName defines API's server certificate common name (CN) + APIServerCertCommonName = "kube-apiserver" + + // APIServerKubeletClientCertAndKeyBaseName defines kubelet client certificate and key base name APIServerKubeletClientCertAndKeyBaseName = "apiserver-kubelet-client" - APIServerKubeletClientCertName = "apiserver-kubelet-client.crt" - APIServerKubeletClientKeyName = "apiserver-kubelet-client.key" - APIServerKubeletClientCertCommonName = "kube-apiserver-kubelet-client" //used as subject.commonname attribute (CN) - - ServiceAccountKeyBaseName = "sa" - ServiceAccountPublicKeyName = "sa.pub" + // APIServerKubeletClientCertName defines kubelet client certificate name + APIServerKubeletClientCertName = "apiserver-kubelet-client.crt" + // APIServerKubeletClientKeyName defines kubelet client key name + APIServerKubeletClientKeyName = "apiserver-kubelet-client.key" + // APIServerKubeletClientCertCommonName defines kubelet client certificate common name (CN) + APIServerKubeletClientCertCommonName = "kube-apiserver-kubelet-client" + + // ServiceAccountKeyBaseName defines SA key base name + ServiceAccountKeyBaseName = "sa" + // ServiceAccountPublicKeyName defines SA public key base name + ServiceAccountPublicKeyName = "sa.pub" + // ServiceAccountPrivateKeyName defines SA private key base name ServiceAccountPrivateKeyName = "sa.key" + // FrontProxyCACertAndKeyBaseName defines front proxy CA certificate and key base name FrontProxyCACertAndKeyBaseName = "front-proxy-ca" - FrontProxyCACertName = "front-proxy-ca.crt" - FrontProxyCAKeyName = "front-proxy-ca.key" + // FrontProxyCACertName defines front proxy CA certificate name + FrontProxyCACertName = "front-proxy-ca.crt" + // FrontProxyCAKeyName defines front proxy CA key name + FrontProxyCAKeyName = "front-proxy-ca.key" + // FrontProxyClientCertAndKeyBaseName defines front proxy certificate and key base name FrontProxyClientCertAndKeyBaseName = "front-proxy-client" - FrontProxyClientCertName = "front-proxy-client.crt" - FrontProxyClientKeyName = "front-proxy-client.key" - FrontProxyClientCertCommonName = "front-proxy-client" //used as subject.commonname attribute (CN) - - AdminKubeConfigFileName = "admin.conf" - KubeletBootstrapKubeConfigFileName = "bootstrap-kubelet.conf" - KubeletKubeConfigFileName = "kubelet.conf" + // FrontProxyClientCertName defines front proxy certificate name + FrontProxyClientCertName = "front-proxy-client.crt" + // FrontProxyClientKeyName defines front proxy key name + FrontProxyClientKeyName = "front-proxy-client.key" + // FrontProxyClientCertCommonName defines front proxy certificate common name + FrontProxyClientCertCommonName = "front-proxy-client" //used as subject.commonname attribute (CN) + + // AdminKubeConfigFileName defines name for the KubeConfig aimed to be used by the superuser/admin of the cluster + AdminKubeConfigFileName = "admin.conf" + // KubeletBootstrapKubeConfigFileName defines the file name for the KubeConfig that the kubelet will use to do + // the TLS bootstrap to get itself an unique credential + KubeletBootstrapKubeConfigFileName = "bootstrap-kubelet.conf" + + // KubeletKubeConfigFileName defines the file name for the KubeConfig that the master kubelet will use for talking + // to the API server + KubeletKubeConfigFileName = "kubelet.conf" + // ControllerManagerKubeConfigFileName defines the file name for the controller manager's KubeConfig file ControllerManagerKubeConfigFileName = "controller-manager.conf" - SchedulerKubeConfigFileName = "scheduler.conf" + // SchedulerKubeConfigFileName defines the file name for the scheduler's KubeConfig file + SchedulerKubeConfigFileName = "scheduler.conf" // Some well-known users and groups in the core Kubernetes authorization system - ControllerManagerUser = "system:kube-controller-manager" - SchedulerUser = "system:kube-scheduler" - MastersGroup = "system:masters" - NodesGroup = "system:nodes" + // ControllerManagerUser defines the well-known user the controller-manager should be authenticated as + ControllerManagerUser = "system:kube-controller-manager" + // SchedulerUser defines the well-known user the scheduler should be authenticated as + SchedulerUser = "system:kube-scheduler" + // MastersGroup defines the well-known group for the apiservers. This group is also superuser by default + // (i.e. bound to the cluster-admin ClusterRole) + MastersGroup = "system:masters" + // NodesGroup defines the well-known group for all nodes. + NodesGroup = "system:nodes" + // NodesClusterRoleBinding defines the well-known ClusterRoleBinding which binds the too permissive system:node + // ClusterRole to the system:nodes group. Since kubeadm is using the Node Authorizer, this ClusterRoleBinding's + // system:nodes group subject is removed if present. NodesClusterRoleBinding = "system:node" + // KubeletBaseConfigMapRoleName defines the base kubelet configuration ConfigMap. + KubeletBaseConfigMapRoleName = "kubeadm:kubelet-base-configmap" + // APICallRetryInterval defines how long kubeadm should wait before retrying a failed API operation APICallRetryInterval = 500 * time.Millisecond // DiscoveryRetryInterval specifies how long kubeadm should wait before retrying to connect to the master when doing discovery DiscoveryRetryInterval = 5 * time.Second // MarkMasterTimeout specifies how long kubeadm should wait for applying the label and taint on the master before timing out MarkMasterTimeout = 2 * time.Minute + // UpdateNodeTimeout specifies how long kubeadm should wait for updating node with the initial remote configuration of kubelet before timing out + UpdateNodeTimeout = 2 * time.Minute - // Minimum amount of nodes the Service subnet should allow. + // MinimumAddressesInServiceSubnet defines minimum amount of nodes the Service subnet should allow. // We need at least ten, because the DNS service is always at the tenth cluster clusterIP MinimumAddressesInServiceSubnet = 10 @@ -92,7 +138,7 @@ const ( DefaultTokenDuration = 24 * time.Hour // LabelNodeRoleMaster specifies that a node is a master - // It's copied over to kubeadm until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112 + // This is a duplicate definition of the constant in pkg/controller/service/service_controller.go LabelNodeRoleMaster = "node-role.kubernetes.io/master" // MasterConfigurationConfigMap specifies in what ConfigMap in the kube-system namespace the `kubeadm init` configuration should be stored @@ -101,17 +147,38 @@ const ( // MasterConfigurationConfigMapKey specifies in what ConfigMap key the master configuration should be stored MasterConfigurationConfigMapKey = "MasterConfiguration" + // KubeletBaseConfigurationConfigMap specifies in what ConfigMap in the kube-system namespace the initial remote configuration of kubelet should be stored + KubeletBaseConfigurationConfigMap = "kubelet-base-config-1.9" + + // KubeletBaseConfigurationConfigMapKey specifies in what ConfigMap key the initial remote configuration of kubelet should be stored + // TODO: Use the constant ("kubelet.config.k8s.io") defined in pkg/kubelet/kubeletconfig/util/keys/keys.go + // after https://github.com/kubernetes/kubernetes/pull/53833 being merged. + KubeletBaseConfigurationConfigMapKey = "kubelet" + + // KubeletBaseConfigurationDir specifies the directory on the node where stores the initial remote configuration of kubelet + KubeletBaseConfigurationDir = "/var/lib/kubelet/config/init" + + // KubeletBaseConfigurationFile specifies the file name on the node which stores initial remote configuration of kubelet + // TODO: Use the constant ("kubelet.config.k8s.io") defined in pkg/kubelet/kubeletconfig/util/keys/keys.go + // after https://github.com/kubernetes/kubernetes/pull/53833 being merged. + KubeletBaseConfigurationFile = "kubelet" + // MinExternalEtcdVersion indicates minimum external etcd version which kubeadm supports MinExternalEtcdVersion = "3.0.14" // DefaultEtcdVersion indicates the default etcd version that kubeadm uses - DefaultEtcdVersion = "3.0.17" + DefaultEtcdVersion = "3.1.10" - Etcd = "etcd" - KubeAPIServer = "kube-apiserver" + // Etcd defines variable used internally when referring to etcd component + Etcd = "etcd" + // KubeAPIServer defines variable used internally when referring to kube-apiserver component + KubeAPIServer = "kube-apiserver" + // KubeControllerManager defines variable used internally when referring to kube-controller-manager component KubeControllerManager = "kube-controller-manager" - KubeScheduler = "kube-scheduler" - KubeProxy = "kube-proxy" + // KubeScheduler defines variable used internally when referring to kube-scheduler component + KubeScheduler = "kube-scheduler" + // KubeProxy defines variable used internally when referring to kube-proxy component + KubeProxy = "kube-proxy" // SelfHostingPrefix describes the prefix workloads that are self-hosted by kubeadm has SelfHostingPrefix = "self-hosted-" @@ -122,18 +189,19 @@ const ( // KubeConfigVolumeName specifies the name for the Volume that is used for injecting the kubeconfig to talk securely to the api server for a control plane component if applicable KubeConfigVolumeName = "kubeconfig" - // V17NodeBootstrapTokenAuthGroup specifies which group a Node Bootstrap Token should be authenticated in, in v1.7 - V17NodeBootstrapTokenAuthGroup = "system:bootstrappers" - - // V18NodeBootstrapTokenAuthGroup specifies which group a Node Bootstrap Token should be authenticated in, in v1.8 - V18NodeBootstrapTokenAuthGroup = "system:bootstrappers:kubeadm:default-node-token" + // NodeBootstrapTokenAuthGroup specifies which group a Node Bootstrap Token should be authenticated in + NodeBootstrapTokenAuthGroup = "system:bootstrappers:kubeadm:default-node-token" // DefaultCIImageRepository points to image registry where CI uploads images from ci-cross build job DefaultCIImageRepository = "gcr.io/kubernetes-ci-images" + + // CoreDNS defines a variable used internally when referring to the CoreDNS addon for a cluster + CoreDNS = "coredns" + // KubeDNS defines a variable used internally when referring to the kube-dns addon for a cluster + KubeDNS = "kube-dns" ) var ( - // MasterTaint is the taint to apply on the PodSpec for being able to run that Pod on the master MasterTaint = v1.Taint{ Key: LabelNodeRoleMaster, @@ -146,7 +214,9 @@ var ( Effect: v1.TaintEffectNoSchedule, } - AuthorizationPolicyPath = filepath.Join(KubernetesDir, "abac_policy.json") + // AuthorizationPolicyPath defines the supported location of authorization policy file + AuthorizationPolicyPath = filepath.Join(KubernetesDir, "abac_policy.json") + // AuthorizationWebhookConfigPath defines the supported location of webhook config file AuthorizationWebhookConfigPath = filepath.Join(KubernetesDir, "webhook_authz.conf") // DefaultTokenUsages specifies the default functions a token will get @@ -156,17 +226,40 @@ var ( MasterComponents = []string{KubeAPIServer, KubeControllerManager, KubeScheduler} // MinimumControlPlaneVersion specifies the minimum control plane version kubeadm can deploy - MinimumControlPlaneVersion = version.MustParseSemantic("v1.7.0") + MinimumControlPlaneVersion = version.MustParseSemantic("v1.8.0") + + // MinimumKubeletVersion specifies the minimum version of kubelet which kubeadm supports + MinimumKubeletVersion = version.MustParseSemantic("v1.8.0") - // MinimumCSRAutoApprovalClusterRolesVersion defines whether kubeadm can rely on the built-in CSR approval ClusterRole or not (note, the binding is always created by kubeadm!) - // TODO: Remove this when the v1.9 cycle starts and we bump the minimum supported version to v1.8.0 - MinimumCSRAutoApprovalClusterRolesVersion = version.MustParseSemantic("v1.8.0-alpha.3") + // MinimumKubeProxyComponentConfigVersion specifies the minimum version for the kubeProxyComponent + MinimumKubeProxyComponentConfigVersion = version.MustParseSemantic("v1.9.0-alpha.3") - // UseEnableBootstrapTokenAuthFlagVersion defines the first version where the API server supports the --enable-bootstrap-token-auth flag instead of the old and deprecated flag. - // TODO: Remove this when the v1.9 cycle starts and we bump the minimum supported version to v1.8.0 - UseEnableBootstrapTokenAuthFlagVersion = version.MustParseSemantic("v1.8.0-beta.0") + // SupportedEtcdVersion lists officially supported etcd versions with corresponding kubernetes releases + SupportedEtcdVersion = map[uint8]string{ + 8: "3.0.17", + 9: "3.1.10", + 10: "3.1.10", + } ) +// EtcdSupportedVersion returns officially supported version of etcd for a specific kubernetes release +// if passed version is not listed, the function returns nil and an error +func EtcdSupportedVersion(versionString string) (*version.Version, error) { + kubernetesVersion, err := version.ParseSemantic(versionString) + if err != nil { + return nil, err + } + + if etcdStringVersion, ok := SupportedEtcdVersion[uint8(kubernetesVersion.Minor())]; ok { + etcdVersion, err := version.ParseSemantic(etcdStringVersion) + if err != nil { + return nil, err + } + return etcdVersion, nil + } + return nil, fmt.Errorf("Unsupported or unknown kubernetes version") +} + // GetStaticPodDirectory returns the location on the disk where the Static Pod should be present func GetStaticPodDirectory() string { return filepath.Join(KubernetesDir, ManifestsSubDirName) @@ -201,10 +294,19 @@ func CreateTempDirForKubeadm(dirName string) (string, error) { return tempDir, nil } -// GetNodeBootstrapTokenAuthGroup gets the bootstrap token auth group conditionally based on version -func GetNodeBootstrapTokenAuthGroup(k8sVersion *version.Version) string { - if k8sVersion.AtLeast(UseEnableBootstrapTokenAuthFlagVersion) { - return V18NodeBootstrapTokenAuthGroup +// GetDNSIP returns a dnsIP, which is 10th IP in svcSubnet CIDR range +func GetDNSIP(svcSubnet string) (net.IP, error) { + // Get the service subnet CIDR + _, svcSubnetCIDR, err := net.ParseCIDR(svcSubnet) + if err != nil { + return nil, fmt.Errorf("couldn't parse service subnet CIDR %q: %v", svcSubnet, err) + } + + // Selects the 10th IP in service subnet CIDR range as dnsIP + dnsIP, err := ipallocator.GetIndexedIP(svcSubnetCIDR, 10) + if err != nil { + return nil, fmt.Errorf("unable to get tenth IP address from service subnet CIDR %s: %v", svcSubnetCIDR.String(), err) } - return V17NodeBootstrapTokenAuthGroup + + return dnsIP, nil } diff --git a/vendor/k8s.io/kubernetes/cmd/kubelet/app/BUILD b/vendor/k8s.io/kubernetes/cmd/kubelet/app/BUILD index ae004b5f48a5..c7e482f49ea8 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubelet/app/BUILD +++ b/vendor/k8s.io/kubernetes/cmd/kubelet/app/BUILD @@ -9,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["server_test.go"], + importpath = "k8s.io/kubernetes/cmd/kubelet/app", library = ":go_default_library", - deps = ["//pkg/kubelet/apis/kubeletconfig:go_default_library"], ) go_library( @@ -26,9 +26,11 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/cmd/kubelet/app", deps = [ "//cmd/kubelet/app/options:go_default_library", - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/capabilities:go_default_library", "//pkg/client/chaosclient:go_default_library", "//pkg/cloudprovider:go_default_library", @@ -50,7 +52,6 @@ go_library( "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/dockershim:go_default_library", - "//pkg/kubelet/dockershim/libdocker:go_default_library", "//pkg/kubelet/dockershim/remote:go_default_library", "//pkg/kubelet/eviction:go_default_library", "//pkg/kubelet/eviction/api:go_default_library", @@ -76,6 +77,7 @@ go_library( "//pkg/volume/cephfs:go_default_library", "//pkg/volume/cinder:go_default_library", "//pkg/volume/configmap:go_default_library", + "//pkg/volume/csi:go_default_library", "//pkg/volume/downwardapi:go_default_library", "//pkg/volume/empty_dir:go_default_library", "//pkg/volume/fc:go_default_library", @@ -102,6 +104,7 @@ go_library( "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", @@ -121,6 +124,7 @@ go_library( "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/client-go/util/cert:go_default_library", + "//vendor/k8s.io/client-go/util/certificate:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ "//vendor/golang.org/x/exp/inotify:go_default_library", diff --git a/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/BUILD b/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/BUILD index 9b19054d1324..846d42e9c453 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/BUILD +++ b/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/BUILD @@ -3,6 +3,7 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( @@ -11,13 +12,17 @@ go_library( "container_runtime.go", "options.go", ], + importpath = "k8s.io/kubernetes/cmd/kubelet/app/options", deps = [ "//pkg/apis/componentconfig:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library", "//pkg/kubelet/apis/kubeletconfig/v1alpha1:go_default_library", "//pkg/kubelet/apis/kubeletconfig/validation:go_default_library", + "//pkg/kubelet/config:go_default_library", + "//pkg/kubelet/types:go_default_library", "//pkg/util/taints:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -38,3 +43,15 @@ filegroup( srcs = [":package-srcs"], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = ["options_test.go"], + importpath = "k8s.io/kubernetes/cmd/kubelet/app/options", + library = ":go_default_library", + deps = [ + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/container_runtime.go b/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/container_runtime.go index 38fa4ef06eac..d1174ea044c2 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/container_runtime.go +++ b/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/container_runtime.go @@ -20,9 +20,9 @@ import ( "runtime" "time" - "github.com/spf13/pflag" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/kubelet/config" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) const ( @@ -39,75 +39,18 @@ var ( defaultPodSandboxImageVersion ) -type ContainerRuntimeOptions struct { - // Docker-specific options. - - // DockershimRootDirectory is the path to the dockershim root directory. Defaults to - // /var/lib/dockershim if unset. Exposed for integration testing (e.g. in OpenShift). - DockershimRootDirectory string - // Enable dockershim only mode. - ExperimentalDockershim bool - // This flag, if set, disables use of a shared PID namespace for pods running in the docker CRI runtime. - // A shared PID namespace is the only option in non-docker runtimes and is required by the CRI. The ability to - // disable it for docker will be removed unless a compelling use case is discovered with widespread use. - // TODO: Remove once we no longer support disabling shared PID namespace (https://issues.k8s.io/41938) - DockerDisableSharedPID bool - // PodSandboxImage is the image whose network/ipc namespaces - // containers in each pod will use. - PodSandboxImage string - // DockerEndpoint is the path to the docker endpoint to communicate with. - DockerEndpoint string - // DockerExecHandlerName is the handler to use when executing a command - // in a container. Valid values are 'native' and 'nsenter'. Defaults to - // 'native'. - DockerExecHandlerName string - // If no pulling progress is made before the deadline imagePullProgressDeadline, - // the image pulling will be cancelled. Defaults to 1m0s. - // +optional - ImagePullProgressDeadline metav1.Duration - - // Network plugin options. - - // networkPluginName is the name of the network plugin to be invoked for - // various events in kubelet/pod lifecycle - NetworkPluginName string - // NetworkPluginMTU is the MTU to be passed to the network plugin, - // and overrides the default MTU for cases where it cannot be automatically - // computed (such as IPSEC). - NetworkPluginMTU int32 - // NetworkPluginDir is the full path of the directory in which to search - // for network plugins (and, for backwards-compat, CNI config files) - NetworkPluginDir string - // CNIConfDir is the full path of the directory in which to search for - // CNI config files - CNIConfDir string - // CNIBinDir is the full path of the directory in which to search for - // CNI plugin binaries - CNIBinDir string - - // rkt-specific options. - - // rktPath is the path of rkt binary. Leave empty to use the first rkt in $PATH. - RktPath string - // rktApiEndpoint is the endpoint of the rkt API service to communicate with. - RktAPIEndpoint string - // rktStage1Image is the image to use as stage1. Local paths and - // http/https URLs are supported. - RktStage1Image string -} - // NewContainerRuntimeOptions will create a new ContainerRuntimeOptions with // default values. -func NewContainerRuntimeOptions() *ContainerRuntimeOptions { +func NewContainerRuntimeOptions() *config.ContainerRuntimeOptions { dockerEndpoint := "" if runtime.GOOS != "windows" { dockerEndpoint = "unix:///var/run/docker.sock" } - return &ContainerRuntimeOptions{ + return &config.ContainerRuntimeOptions{ + ContainerRuntime: kubetypes.DockerContainerRuntime, DockerEndpoint: dockerEndpoint, DockershimRootDirectory: "/var/lib/dockershim", - DockerExecHandlerName: "native", DockerDisableSharedPID: true, PodSandboxImage: defaultPodSandboxImage, ImagePullProgressDeadline: metav1.Duration{Duration: 1 * time.Minute}, @@ -115,34 +58,3 @@ func NewContainerRuntimeOptions() *ContainerRuntimeOptions { ExperimentalDockershim: false, } } - -func (s *ContainerRuntimeOptions) AddFlags(fs *pflag.FlagSet) { - // Docker-specific settings. - fs.BoolVar(&s.ExperimentalDockershim, "experimental-dockershim", s.ExperimentalDockershim, "Enable dockershim only mode. In this mode, kubelet will only start dockershim without any other functionalities. This flag only serves test purpose, please do not use it unless you are conscious of what you are doing. [default=false]") - fs.MarkHidden("experimental-dockershim") - fs.StringVar(&s.DockershimRootDirectory, "experimental-dockershim-root-directory", s.DockershimRootDirectory, "Path to the dockershim root directory.") - fs.MarkHidden("experimental-dockershim-root-directory") - fs.BoolVar(&s.DockerDisableSharedPID, "docker-disable-shared-pid", s.DockerDisableSharedPID, "The Container Runtime Interface (CRI) defaults to using a shared PID namespace for containers in a pod when running with Docker 1.13.1 or higher. Setting this flag reverts to the previous behavior of isolated PID namespaces. This ability will be removed in a future Kubernetes release.") - fs.StringVar(&s.PodSandboxImage, "pod-infra-container-image", s.PodSandboxImage, "The image whose network/ipc namespaces containers in each pod will use.") - fs.StringVar(&s.DockerEndpoint, "docker-endpoint", s.DockerEndpoint, "Use this for the docker endpoint to communicate with") - // TODO(#40229): Remove the docker-exec-handler flag. - fs.StringVar(&s.DockerExecHandlerName, "docker-exec-handler", s.DockerExecHandlerName, "Handler to use when executing a command in a container. Valid values are 'native' and 'nsenter'. Defaults to 'native'.") - fs.MarkDeprecated("docker-exec-handler", "this flag will be removed and only the 'native' handler will be supported in the future.") - fs.DurationVar(&s.ImagePullProgressDeadline.Duration, "image-pull-progress-deadline", s.ImagePullProgressDeadline.Duration, "If no pulling progress is made before this deadline, the image pulling will be cancelled.") - - // Network plugin settings. Shared by both docker and rkt. - fs.StringVar(&s.NetworkPluginName, "network-plugin", s.NetworkPluginName, " The name of the network plugin to be invoked for various events in kubelet/pod lifecycle") - //TODO(#46410): Remove the network-plugin-dir flag. - fs.StringVar(&s.NetworkPluginDir, "network-plugin-dir", s.NetworkPluginDir, " The full path of the directory in which to search for network plugins or CNI config") - fs.MarkDeprecated("network-plugin-dir", "Use --cni-bin-dir instead. This flag will be removed in a future version.") - fs.StringVar(&s.CNIConfDir, "cni-conf-dir", s.CNIConfDir, " The full path of the directory in which to search for CNI config files. Default: /etc/cni/net.d") - fs.StringVar(&s.CNIBinDir, "cni-bin-dir", s.CNIBinDir, " The full path of the directory in which to search for CNI plugin binaries. Default: /opt/cni/bin") - fs.Int32Var(&s.NetworkPluginMTU, "network-plugin-mtu", s.NetworkPluginMTU, " The MTU to be passed to the network plugin, to override the default. Set to 0 to use the default 1460 MTU.") - - // Rkt-specific settings. - fs.StringVar(&s.RktPath, "rkt-path", s.RktPath, "Path of rkt binary. Leave empty to use the first rkt in $PATH. Only used if --container-runtime='rkt'.") - fs.StringVar(&s.RktAPIEndpoint, "rkt-api-endpoint", s.RktAPIEndpoint, "The endpoint of the rkt API service to communicate with. Only used if --container-runtime='rkt'.") - fs.StringVar(&s.RktStage1Image, "rkt-stage1-image", s.RktStage1Image, "image to use as stage1. Local paths and http/https URLs are supported. If empty, the 'stage1.aci' in the same directory as '--rkt-path' will be used.") - fs.MarkDeprecated("rkt-stage1-image", "Will be removed in a future version. The default stage1 image will be specified by the rkt configurations, see https://github.com/coreos/rkt/blob/master/Documentation/configuration.md for more details.") - -} diff --git a/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/options.go b/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/options.go index a227a41d8007..6d8e13abe8cc 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/options.go +++ b/vendor/k8s.io/kubernetes/cmd/kubelet/app/options/options.go @@ -20,27 +20,24 @@ package options import ( "fmt" _ "net/http/pprof" + "path/filepath" + "runtime" "strings" + "github.com/spf13/pflag" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/apiserver/pkg/util/flag" - utilflag "k8s.io/apiserver/pkg/util/flag" "k8s.io/kubernetes/pkg/apis/componentconfig" + "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1" kubeletconfigvalidation "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation" + "k8s.io/kubernetes/pkg/kubelet/config" utiltaints "k8s.io/kubernetes/pkg/util/taints" - - "github.com/spf13/pflag" -) - -const ( - DefaultKubeletPodsDirName = "pods" - DefaultKubeletVolumesDirName = "volumes" - DefaultKubeletPluginsDirName = "plugins" - DefaultKubeletContainersDirName = "containers" ) // A configuration field should go in KubeletFlags instead of KubeletConfiguration if any of these are true: @@ -54,9 +51,6 @@ type KubeletFlags struct { BootstrapKubeconfig string RotateCertificates bool - // RequireKubeConfig is deprecated! A valid KubeConfig is now required if --kubeconfig is provided. - RequireKubeConfig bool - // Insert a probability of random errors during calls to the master. ChaosChance float64 // Crash immediately, rather than eating panics. @@ -81,7 +75,7 @@ type KubeletFlags struct { ProviderID string // Container-runtime-specific options. - ContainerRuntimeOptions + config.ContainerRuntimeOptions // certDirectory is the directory where the TLS certs are located (by // default /var/run/kubernetes). If tlsCertFile and tlsPrivateKeyFile @@ -110,25 +104,123 @@ type KubeletFlags struct { // The Kubelet will look in this directory for an init configuration. // The path may be absolute or relative; relative paths are under the Kubelet's current working directory. // Omit this flag to use the combination of built-in default configuration values and flags. - // To use this flag, the DynamicKubeletConfig feature gate must be enabled. + // To use this flag, the KubeletConfigFile feature gate must be enabled. InitConfigDir flag.StringFlag + + // registerNode enables automatic registration with the apiserver. + RegisterNode bool + + // registerWithTaints are an array of taints to add to a node object when + // the kubelet registers itself. This only takes effect when registerNode + // is true and upon the initial registration of the node. + RegisterWithTaints []core.Taint + + // EXPERIMENTAL FLAGS + // Whitelist of unsafe sysctls or sysctl patterns (ending in *). + // +optional + AllowedUnsafeSysctls []string + // containerized should be set to true if kubelet is running in a container. + Containerized bool + // remoteRuntimeEndpoint is the endpoint of remote runtime service + RemoteRuntimeEndpoint string + // remoteImageEndpoint is the endpoint of remote image service + RemoteImageEndpoint string + // experimentalMounterPath is the path of mounter binary. Leave empty to use the default mount path + ExperimentalMounterPath string + // If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling. + // +optional + ExperimentalKernelMemcgNotification bool + // This flag, if set, enables a check prior to mount operations to verify that the required components + // (binaries, etc.) to mount the volume are available on the underlying node. If the check is enabled + // and fails the mount operation fails. + ExperimentalCheckNodeCapabilitiesBeforeMount bool + // This flag, if set, will avoid including `EvictionHard` limits while computing Node Allocatable. + // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md) doc for more information. + ExperimentalNodeAllocatableIgnoreEvictionThreshold bool + // A set of ResourceName=Percentage (e.g. memory=50%) pairs that describe + // how pod resource requests are reserved at the QoS level. + // Currently only memory is supported. [default=none]" + ExperimentalQOSReserved map[string]string + // Node Labels are the node labels to add when registering the node in the cluster + NodeLabels map[string]string + // volumePluginDir is the full path of the directory in which to search + // for additional third party volume plugins + VolumePluginDir string + // lockFilePath is the path that kubelet will use to as a lock file. + // It uses this file as a lock to synchronize with other kubelet processes + // that may be running. + LockFilePath string + // ExitOnLockContention is a flag that signifies to the kubelet that it is running + // in "bootstrap" mode. This requires that 'LockFilePath' has been set. + // This will cause the kubelet to listen to inotify events on the lock file, + // releasing it and exiting when another process tries to open that file. + ExitOnLockContention bool + // seccompProfileRoot is the directory path for seccomp profiles. + SeccompProfileRoot string + // bootstrapCheckpointPath is the path to the directory containing pod checkpoints to + // run on restore + BootstrapCheckpointPath string + + // DEPRECATED FLAGS + // minimumGCAge is the minimum age for a finished container before it is + // garbage collected. + MinimumGCAge metav1.Duration + // maxPerPodContainerCount is the maximum number of old instances to + // retain per container. Each container takes up some disk space. + MaxPerPodContainerCount int32 + // maxContainerCount is the maximum number of old instances of containers + // to retain globally. Each container takes up some disk space. + MaxContainerCount int32 + // masterServiceNamespace is The namespace from which the kubernetes + // master services should be injected into pods. + MasterServiceNamespace string + // registerSchedulable tells the kubelet to register the node as + // schedulable. Won't have any effect if register-node is false. + // DEPRECATED: use registerWithTaints instead + RegisterSchedulable bool + // RequireKubeConfig is deprecated! A valid KubeConfig is now required if --kubeconfig is provided. + RequireKubeConfig bool + // nonMasqueradeCIDR configures masquerading: traffic to IPs outside this range will use IP masquerade. + NonMasqueradeCIDR string + // This flag, if set, instructs the kubelet to keep volumes from terminated pods mounted to the node. + // This can be useful for debugging volume related issues. + KeepTerminatedPodVolumes bool + // enable gathering custom metrics. + EnableCustomMetrics bool } // NewKubeletFlags will create a new KubeletFlags with default values func NewKubeletFlags() *KubeletFlags { + remoteRuntimeEndpoint := "" + if runtime.GOOS == "linux" { + remoteRuntimeEndpoint = "unix:///var/run/dockershim.sock" + } else if runtime.GOOS == "windows" { + remoteRuntimeEndpoint = "tcp://localhost:3735" + } + return &KubeletFlags{ // TODO(#41161:v1.10.0): Remove the default kubeconfig path and --require-kubeconfig. - RequireKubeConfig: false, - KubeConfig: flag.NewStringFlag("/var/lib/kubelet/kubeconfig"), - ContainerRuntimeOptions: *NewContainerRuntimeOptions(), - CertDirectory: "/var/run/kubernetes", - RootDirectory: v1alpha1.DefaultRootDir, - // DEPRECATED: auto detecting cloud providers goes against the initiative - // for out-of-tree cloud providers as we'll now depend on cAdvisor integrations - // with cloud providers instead of in the core repo. - // More details here: https://github.com/kubernetes/kubernetes/issues/50986 - CloudProvider: v1alpha1.AutoDetectCloudProvider, - RotateCertificates: false, + RequireKubeConfig: false, + KubeConfig: flag.NewStringFlag("/var/lib/kubelet/kubeconfig"), + ContainerRuntimeOptions: *NewContainerRuntimeOptions(), + CertDirectory: "/var/lib/kubelet/pki", + RootDirectory: v1alpha1.DefaultRootDir, + MasterServiceNamespace: metav1.NamespaceDefault, + MaxContainerCount: -1, + MaxPerPodContainerCount: 1, + MinimumGCAge: metav1.Duration{Duration: 0}, + NonMasqueradeCIDR: "10.0.0.0/8", + RegisterSchedulable: true, + ExperimentalKernelMemcgNotification: false, + ExperimentalQOSReserved: make(map[string]string), + RemoteRuntimeEndpoint: remoteRuntimeEndpoint, + RotateCertificates: false, + // TODO(#54161:v1.11.0): Remove --enable-custom-metrics flag, it is deprecated. + EnableCustomMetrics: false, + NodeLabels: make(map[string]string), + VolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/", + RegisterNode: true, + SeccompProfileRoot: filepath.Join(v1alpha1.DefaultRootDir, "seccomp"), } } @@ -137,7 +229,7 @@ func ValidateKubeletFlags(f *KubeletFlags) error { if f.DynamicConfigDir.Provided() && !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { return fmt.Errorf("the DynamicKubeletConfig feature gate must be enabled in order to use the --dynamic-config-dir flag") } - // ensure that nobody sets InitConfigDir if the dynamic config feature gate is turned off + // ensure that nobody sets InitConfigDir if the KubeletConfigFile feature gate is turned off if f.InitConfigDir.Provided() && !utilfeature.DefaultFeatureGate.Enabled(features.KubeletConfigFile) { return fmt.Errorf("the KubeletConfigFile feature gate must be enabled in order to use the --init-config-dir flag") } @@ -227,13 +319,54 @@ func (f *KubeletFlags) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&f.CertDirectory, "cert-dir", f.CertDirectory, "The directory where the TLS certs are located. "+ "If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.") - fs.StringVar(&f.CloudProvider, "cloud-provider", f.CloudProvider, "The provider for cloud services. By default, kubelet will attempt to auto-detect the cloud provider (deprecated). Specify empty string for running with no cloud provider, this will be the default in upcoming releases.") + fs.StringVar(&f.CloudProvider, "cloud-provider", f.CloudProvider, "The provider for cloud services. Specify empty string for running with no cloud provider.") fs.StringVar(&f.CloudConfigFile, "cloud-config", f.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.") fs.StringVar(&f.RootDirectory, "root-dir", f.RootDirectory, "Directory path for managing kubelet files (volume mounts,etc).") fs.Var(&f.DynamicConfigDir, "dynamic-config-dir", "The Kubelet will use this directory for checkpointing downloaded configurations and tracking configuration health. The Kubelet will create this directory if it does not already exist. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Providing this flag enables dynamic Kubelet configuration. Presently, you must also enable the DynamicKubeletConfig feature gate to pass this flag.") - fs.Var(&f.InitConfigDir, "init-config-dir", "The Kubelet will look in this directory for the init configuration. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Omit this argument to use the built-in default configuration values. Presently, you must also enable the DynamicKubeletConfig feature gate to pass this flag.") + fs.Var(&f.InitConfigDir, "init-config-dir", "The Kubelet will look in this directory for the init configuration. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Omit this argument to use the built-in default configuration values. Presently, you must also enable the KubeletConfigFile feature gate to pass this flag.") + + fs.BoolVar(&f.RegisterNode, "register-node", f.RegisterNode, "Register the node with the apiserver. If --kubeconfig is not provided, this flag is irrelevant, as the Kubelet won't have an apiserver to register with. Default=true.") + fs.Var(utiltaints.NewTaintsVar(&f.RegisterWithTaints), "register-with-taints", "Register the node with the given list of taints (comma separated \"=:\"). No-op if register-node is false.") + + // EXPERIMENTAL FLAGS + fs.StringVar(&f.ExperimentalMounterPath, "experimental-mounter-path", f.ExperimentalMounterPath, "[Experimental] Path of mounter binary. Leave empty to use the default mount.") + fs.StringSliceVar(&f.AllowedUnsafeSysctls, "experimental-allowed-unsafe-sysctls", f.AllowedUnsafeSysctls, "Comma-separated whitelist of unsafe sysctls or unsafe sysctl patterns (ending in *). Use these at your own risk.") + fs.BoolVar(&f.Containerized, "containerized", f.Containerized, "Experimental support for running kubelet in a container. Intended for testing.") + fs.BoolVar(&f.ExperimentalKernelMemcgNotification, "experimental-kernel-memcg-notification", f.ExperimentalKernelMemcgNotification, "If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling.") + fs.StringVar(&f.RemoteRuntimeEndpoint, "container-runtime-endpoint", f.RemoteRuntimeEndpoint, "[Experimental] The endpoint of remote runtime service. Currently unix socket is supported on Linux, and tcp is supported on windows. Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735'") + fs.StringVar(&f.RemoteImageEndpoint, "image-service-endpoint", f.RemoteImageEndpoint, "[Experimental] The endpoint of remote image service. If not specified, it will be the same with container-runtime-endpoint by default. Currently unix socket is supported on Linux, and tcp is supported on windows. Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735'") + fs.BoolVar(&f.ExperimentalCheckNodeCapabilitiesBeforeMount, "experimental-check-node-capabilities-before-mount", f.ExperimentalCheckNodeCapabilitiesBeforeMount, "[Experimental] if set true, the kubelet will check the underlying node for required componenets (binaries, etc.) before performing the mount") + fs.BoolVar(&f.ExperimentalNodeAllocatableIgnoreEvictionThreshold, "experimental-allocatable-ignore-eviction", f.ExperimentalNodeAllocatableIgnoreEvictionThreshold, "When set to 'true', Hard Eviction Thresholds will be ignored while calculating Node Allocatable. See https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ for more details. [default=false]") + fs.Var(flag.NewMapStringString(&f.ExperimentalQOSReserved), "experimental-qos-reserved", "A set of ResourceName=Percentage (e.g. memory=50%) pairs that describe how pod resource requests are reserved at the QoS level. Currently only memory is supported. [default=none]") + bindableNodeLabels := flag.ConfigurationMap(f.NodeLabels) + fs.Var(&bindableNodeLabels, "node-labels", " Labels to add when registering the node in the cluster. Labels must be key=value pairs separated by ','.") + fs.StringVar(&f.VolumePluginDir, "volume-plugin-dir", f.VolumePluginDir, " The full path of the directory in which to search for additional third party volume plugins") + fs.StringVar(&f.LockFilePath, "lock-file", f.LockFilePath, " The path to file for kubelet to use as a lock file.") + fs.BoolVar(&f.ExitOnLockContention, "exit-on-lock-contention", f.ExitOnLockContention, "Whether kubelet should exit upon lock-file contention.") + fs.StringVar(&f.SeccompProfileRoot, "seccomp-profile-root", f.SeccompProfileRoot, " Directory path for seccomp profiles.") + fs.StringVar(&f.BootstrapCheckpointPath, "bootstrap-checkpoint-path", f.BootstrapCheckpointPath, " Path to to the directory where the checkpoints are stored") + + // DEPRECATED FLAGS + fs.DurationVar(&f.MinimumGCAge.Duration, "minimum-container-ttl-duration", f.MinimumGCAge.Duration, "Minimum age for a finished container before it is garbage collected. Examples: '300ms', '10s' or '2h45m'") + fs.MarkDeprecated("minimum-container-ttl-duration", "Use --eviction-hard or --eviction-soft instead. Will be removed in a future version.") + fs.Int32Var(&f.MaxPerPodContainerCount, "maximum-dead-containers-per-container", f.MaxPerPodContainerCount, "Maximum number of old instances to retain per container. Each container takes up some disk space.") + fs.MarkDeprecated("maximum-dead-containers-per-container", "Use --eviction-hard or --eviction-soft instead. Will be removed in a future version.") + fs.Int32Var(&f.MaxContainerCount, "maximum-dead-containers", f.MaxContainerCount, "Maximum number of old instances of containers to retain globally. Each container takes up some disk space. To disable, set to a negative number.") + fs.MarkDeprecated("maximum-dead-containers", "Use --eviction-hard or --eviction-soft instead. Will be removed in a future version.") + fs.StringVar(&f.MasterServiceNamespace, "master-service-namespace", f.MasterServiceNamespace, "The namespace from which the kubernetes master services should be injected into pods") + fs.MarkDeprecated("master-service-namespace", "This flag will be removed in a future version.") + fs.BoolVar(&f.RegisterSchedulable, "register-schedulable", f.RegisterSchedulable, "Register the node as schedulable. Won't have any effect if register-node is false.") + fs.MarkDeprecated("register-schedulable", "will be removed in a future version") + fs.StringVar(&f.NonMasqueradeCIDR, "non-masquerade-cidr", f.NonMasqueradeCIDR, "Traffic to IPs outside this range will use IP masquerade. Set to '0.0.0.0/0' to never masquerade.") + fs.MarkDeprecated("non-masquerade-cidr", "will be removed in a future version") + fs.BoolVar(&f.KeepTerminatedPodVolumes, "keep-terminated-pod-volumes", f.KeepTerminatedPodVolumes, "Keep terminated pod volumes mounted to the node after the pod terminates. Can be useful for debugging volume related issues.") + fs.MarkDeprecated("keep-terminated-pod-volumes", "will be removed in a future version") + // TODO(#54161:v1.11.0): Remove --enable-custom-metrics flag, it is deprecated. + fs.BoolVar(&f.EnableCustomMetrics, "enable-custom-metrics", f.EnableCustomMetrics, "Support for gathering custom metrics.") + fs.MarkDeprecated("enable-custom-metrics", "will be removed in a future version") + } // AddKubeletConfigFlags adds flags for a specific kubeletconfig.KubeletConfiguration to the specified FlagSet @@ -242,14 +375,14 @@ func AddKubeletConfigFlags(fs *pflag.FlagSet, c *kubeletconfig.KubeletConfigurat fs.BoolVar(&c.FailSwapOn, "experimental-fail-swap-on", true, "DEPRECATED: please use --fail-swap-on instead.") fs.MarkDeprecated("experimental-fail-swap-on", "This flag is deprecated and will be removed in future releases. please use --fail-swap-on instead.") - fs.StringVar(&c.PodManifestPath, "pod-manifest-path", c.PodManifestPath, "Path to to the directory containing pod manifest files to run, or the path to a single pod manifest file. Files starting with dots will be ignored.") + fs.StringVar(&c.PodManifestPath, "pod-manifest-path", c.PodManifestPath, "Path to the directory containing pod manifest files to run, or the path to a single pod manifest file. Files starting with dots will be ignored.") fs.DurationVar(&c.SyncFrequency.Duration, "sync-frequency", c.SyncFrequency.Duration, "Max period between synchronizing running containers and config") fs.DurationVar(&c.FileCheckFrequency.Duration, "file-check-frequency", c.FileCheckFrequency.Duration, "Duration between checking config files for new data") fs.DurationVar(&c.HTTPCheckFrequency.Duration, "http-check-frequency", c.HTTPCheckFrequency.Duration, "Duration between checking http for new data") fs.StringVar(&c.ManifestURL, "manifest-url", c.ManifestURL, "URL for accessing the container manifest") - fs.StringVar(&c.ManifestURLHeader, "manifest-url-header", c.ManifestURLHeader, "HTTP header to use when accessing the manifest URL, with the key separated from the value with a ':', as in 'key:value'") + fs.Var(flag.NewColonSeparatedMultimapStringString(&c.ManifestURLHeader), "manifest-url-header", "Comma-separated list of HTTP headers to use when accessing the manifest URL. Multiple headers with the same name will be added in the same order provided. This flag can be repeatedly invoked. For example: `--manifest-url-header 'a:hello,b:again,c:world' --manifest-url-header 'b:beautiful'`") fs.BoolVar(&c.EnableServer, "enable-server", c.EnableServer, "Enable the Kubelet's server") - fs.Var(componentconfig.IPVar{Val: &c.Address}, "address", "The IP address for the Kubelet to serve on (set to 0.0.0.0 for all interfaces)") + fs.Var(componentconfig.IPVar{Val: &c.Address}, "address", "The IP address for the Kubelet to serve on (set to `0.0.0.0` for all IPv4 interfaces and `::` for all IPv6 interfaces)") fs.Int32Var(&c.Port, "port", c.Port, "The port for the Kubelet to serve on.") fs.Int32Var(&c.ReadOnlyPort, "read-only-port", c.ReadOnlyPort, "The read-only port for the Kubelet to serve on with no authentication/authorization (set to 0 to disable)") @@ -281,7 +414,6 @@ func AddKubeletConfigFlags(fs *pflag.FlagSet, c *kubeletconfig.KubeletConfigurat "are generated for the public address and saved to the directory passed to --cert-dir.") fs.StringVar(&c.TLSPrivateKeyFile, "tls-private-key-file", c.TLSPrivateKeyFile, "File containing x509 private key matching --tls-cert-file.") - fs.StringVar(&c.SeccompProfileRoot, "seccomp-profile-root", c.SeccompProfileRoot, "Directory path for seccomp profiles.") fs.BoolVar(&c.AllowPrivileged, "allow-privileged", c.AllowPrivileged, "If true, allow containers to request privileged mode.") fs.StringSliceVar(&c.HostNetworkSources, "host-network-sources", c.HostNetworkSources, "Comma-separated list of sources from which the Kubelet allows pods to use of host network.") fs.StringSliceVar(&c.HostPIDSources, "host-pid-sources", c.HostPIDSources, "Comma-separated list of sources from which the Kubelet allows pods to use the host pid namespace.") @@ -293,34 +425,21 @@ func AddKubeletConfigFlags(fs *pflag.FlagSet, c *kubeletconfig.KubeletConfigurat fs.BoolVar(&c.EnableDebuggingHandlers, "enable-debugging-handlers", c.EnableDebuggingHandlers, "Enables server endpoints for log collection and local running of containers and commands") fs.BoolVar(&c.EnableContentionProfiling, "contention-profiling", false, "Enable lock contention profiling, if profiling is enabled") - fs.DurationVar(&c.MinimumGCAge.Duration, "minimum-container-ttl-duration", c.MinimumGCAge.Duration, "Minimum age for a finished container before it is garbage collected. Examples: '300ms', '10s' or '2h45m'") - fs.MarkDeprecated("minimum-container-ttl-duration", "Use --eviction-hard or --eviction-soft instead. Will be removed in a future version.") - fs.Int32Var(&c.MaxPerPodContainerCount, "maximum-dead-containers-per-container", c.MaxPerPodContainerCount, "Maximum number of old instances to retain per container. Each container takes up some disk space.") - fs.MarkDeprecated("maximum-dead-containers-per-container", "Use --eviction-hard or --eviction-soft instead. Will be removed in a future version.") - fs.Int32Var(&c.MaxContainerCount, "maximum-dead-containers", c.MaxContainerCount, "Maximum number of old instances of containers to retain globally. Each container takes up some disk space. To disable, set to a negative number.") - fs.MarkDeprecated("maximum-dead-containers", "Use --eviction-hard or --eviction-soft instead. Will be removed in a future version.") fs.Int32Var(&c.CAdvisorPort, "cadvisor-port", c.CAdvisorPort, "The port of the localhost cAdvisor endpoint (set to 0 to disable)") fs.Int32Var(&c.HealthzPort, "healthz-port", c.HealthzPort, "The port of the localhost healthz endpoint (set to 0 to disable)") - fs.Var(componentconfig.IPVar{Val: &c.HealthzBindAddress}, "healthz-bind-address", "The IP address for the healthz server to serve on. (set to 0.0.0.0 for all interfaces)") + fs.Var(componentconfig.IPVar{Val: &c.HealthzBindAddress}, "healthz-bind-address", "The IP address for the healthz server to serve on (set to `0.0.0.0` for all IPv4 interfaces and `::` for all IPv6 interfaces)") fs.Int32Var(&c.OOMScoreAdj, "oom-score-adj", c.OOMScoreAdj, "The oom-score-adj value for kubelet process. Values must be within the range [-1000, 1000]") - fs.BoolVar(&c.RegisterNode, "register-node", c.RegisterNode, "Register the node with the apiserver. If --kubeconfig is not provided, this flag is irrelevant, as the Kubelet won't have an apiserver to register with. Default=true.") fs.StringVar(&c.ClusterDomain, "cluster-domain", c.ClusterDomain, "Domain for this cluster. If set, kubelet will configure all containers to search this domain in addition to the host's search domains") - fs.StringVar(&c.MasterServiceNamespace, "master-service-namespace", c.MasterServiceNamespace, "The namespace from which the kubernetes master services should be injected into pods") - fs.MarkDeprecated("master-service-namespace", "This flag will be removed in a future version.") + fs.StringSliceVar(&c.ClusterDNS, "cluster-dns", c.ClusterDNS, "Comma-separated list of DNS server IP address. This value is used for containers DNS server in case of Pods with \"dnsPolicy=ClusterFirst\". Note: all DNS servers appearing in the list MUST serve the same set of records otherwise name resolution within the cluster may not work correctly. There is no guarantee as to which DNS server may be contacted for name resolution.") fs.DurationVar(&c.StreamingConnectionIdleTimeout.Duration, "streaming-connection-idle-timeout", c.StreamingConnectionIdleTimeout.Duration, "Maximum time a streaming connection can be idle before the connection is automatically closed. 0 indicates no timeout. Example: '5m'") fs.DurationVar(&c.NodeStatusUpdateFrequency.Duration, "node-status-update-frequency", c.NodeStatusUpdateFrequency.Duration, "Specifies how often kubelet posts node status to master. Note: be cautious when changing the constant, it must work with nodeMonitorGracePeriod in nodecontroller.") - c.NodeLabels = make(map[string]string) - bindableNodeLabels := utilflag.ConfigurationMap(c.NodeLabels) - fs.Var(&bindableNodeLabels, "node-labels", " Labels to add when registering the node in the cluster. Labels must be key=value pairs separated by ','.") fs.DurationVar(&c.ImageMinimumGCAge.Duration, "minimum-image-ttl-duration", c.ImageMinimumGCAge.Duration, "Minimum age for an unused image before it is garbage collected. Examples: '300ms', '10s' or '2h45m'.") fs.Int32Var(&c.ImageGCHighThresholdPercent, "image-gc-high-threshold", c.ImageGCHighThresholdPercent, "The percent of disk usage after which image garbage collection is always run.") fs.Int32Var(&c.ImageGCLowThresholdPercent, "image-gc-low-threshold", c.ImageGCLowThresholdPercent, "The percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to.") fs.DurationVar(&c.VolumeStatsAggPeriod.Duration, "volume-stats-agg-period", c.VolumeStatsAggPeriod.Duration, "Specifies interval for kubelet to calculate and cache the volume disk usage for all pods and volumes. To disable volume calculations, set to 0.") - fs.StringVar(&c.VolumePluginDir, "volume-plugin-dir", c.VolumePluginDir, " The full path of the directory in which to search for additional third party volume plugins") - fs.StringVar(&c.FeatureGates, "feature-gates", c.FeatureGates, "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ + fs.Var(flag.NewMapStringBool(&c.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ "Options are:\n"+strings.Join(utilfeature.DefaultFeatureGate.KnownFeatures(), "\n")) - fs.StringVar(&c.KubeletCgroups, "kubelet-cgroups", c.KubeletCgroups, "Optional absolute name of cgroups to create and run the Kubelet in.") fs.StringVar(&c.SystemCgroups, "system-cgroups", c.SystemCgroups, "Optional absolute name of cgroups in which to place all non-kernel processes that are not already inside a cgroup under `/`. Empty for no container. Rolling back the flag requires a reboot.") @@ -329,15 +448,10 @@ func AddKubeletConfigFlags(fs *pflag.FlagSet, c *kubeletconfig.KubeletConfigurat fs.StringVar(&c.CgroupRoot, "cgroup-root", c.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.") fs.StringVar(&c.CPUManagerPolicy, "cpu-manager-policy", c.CPUManagerPolicy, " CPU Manager policy to use. Possible values: 'none', 'static'. Default: 'none'") fs.DurationVar(&c.CPUManagerReconcilePeriod.Duration, "cpu-manager-reconcile-period", c.CPUManagerReconcilePeriod.Duration, " CPU Manager reconciliation period. Examples: '10s', or '1m'. If not supplied, defaults to `NodeStatusUpdateFrequency`") - fs.StringVar(&c.ContainerRuntime, "container-runtime", c.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'.") fs.DurationVar(&c.RuntimeRequestTimeout.Duration, "runtime-request-timeout", c.RuntimeRequestTimeout.Duration, "Timeout of all runtime requests except long running request - pull, logs, exec and attach. When timeout exceeded, kubelet will cancel the request, throw out an error and retry later.") - fs.StringVar(&c.LockFilePath, "lock-file", c.LockFilePath, " The path to file for kubelet to use as a lock file.") - fs.BoolVar(&c.ExitOnLockContention, "exit-on-lock-contention", c.ExitOnLockContention, "Whether kubelet should exit upon lock-file contention.") - fs.StringVar(&c.ExperimentalMounterPath, "experimental-mounter-path", c.ExperimentalMounterPath, "[Experimental] Path of mounter binary. Leave empty to use the default mount.") fs.StringVar(&c.HairpinMode, "hairpin-mode", c.HairpinMode, "How should the kubelet setup hairpin NAT. This allows endpoints of a Service to loadbalance back to themselves if they should try to access their own Service. Valid values are \"promiscuous-bridge\", \"hairpin-veth\" and \"none\".") fs.Int32Var(&c.MaxPods, "max-pods", c.MaxPods, "Number of Pods that can run on this Kubelet.") - fs.StringVar(&c.NonMasqueradeCIDR, "non-masquerade-cidr", c.NonMasqueradeCIDR, "Traffic to IPs outside this range will use IP masquerade. Set to '0.0.0.0/0' to never masquerade.") - fs.MarkDeprecated("non-masquerade-cidr", "will be removed in a future version") + fs.StringVar(&c.PodCIDR, "pod-cidr", "", "The CIDR to use for pod IP addresses, only used in standalone mode. In cluster mode, this is obtained from the master.") fs.StringVar(&c.ResolverConfig, "resolv-conf", c.ResolverConfig, "Resolver configuration file used as the basis for the container DNS resolution configuration.") fs.BoolVar(&c.CPUCFSQuota, "cpu-cfs-quota", c.CPUCFSQuota, "Enable CPU CFS quota enforcement for containers that specify CPU limits") @@ -345,46 +459,28 @@ func AddKubeletConfigFlags(fs *pflag.FlagSet, c *kubeletconfig.KubeletConfigurat fs.BoolVar(&c.MakeIPTablesUtilChains, "make-iptables-util-chains", c.MakeIPTablesUtilChains, "If true, kubelet will ensure iptables utility rules are present on host.") fs.Int32Var(&c.IPTablesMasqueradeBit, "iptables-masquerade-bit", c.IPTablesMasqueradeBit, "The bit of the fwmark space to mark packets for SNAT. Must be within the range [0, 31]. Please match this parameter with corresponding parameter in kube-proxy.") fs.Int32Var(&c.IPTablesDropBit, "iptables-drop-bit", c.IPTablesDropBit, "The bit of the fwmark space to mark packets for dropping. Must be within the range [0, 31].") - fs.StringSliceVar(&c.AllowedUnsafeSysctls, "experimental-allowed-unsafe-sysctls", c.AllowedUnsafeSysctls, "Comma-separated whitelist of unsafe sysctls or unsafe sysctl patterns (ending in *). Use these at your own risk.") // Flags intended for testing, not recommended used in production environments. - fs.BoolVar(&c.Containerized, "containerized", c.Containerized, "Experimental support for running kubelet in a container. Intended for testing.") fs.Int64Var(&c.MaxOpenFiles, "max-open-files", c.MaxOpenFiles, "Number of files that can be opened by Kubelet process.") - fs.BoolVar(&c.RegisterSchedulable, "register-schedulable", c.RegisterSchedulable, "Register the node as schedulable. Won't have any effect if register-node is false.") - fs.MarkDeprecated("register-schedulable", "will be removed in a future version") - fs.Var(utiltaints.NewTaintsVar(&c.RegisterWithTaints), "register-with-taints", "Register the node with the given list of taints (comma separated \"=:\"). No-op if register-node is false.") + fs.StringVar(&c.ContentType, "kube-api-content-type", c.ContentType, "Content type of requests sent to apiserver.") fs.Int32Var(&c.KubeAPIQPS, "kube-api-qps", c.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver") fs.Int32Var(&c.KubeAPIBurst, "kube-api-burst", c.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver") fs.BoolVar(&c.SerializeImagePulls, "serialize-image-pulls", c.SerializeImagePulls, "Pull images one at a time. We recommend *not* changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Issue #10959 has more details.") - fs.BoolVar(&c.EnableCustomMetrics, "enable-custom-metrics", c.EnableCustomMetrics, "Support for gathering custom metrics.") - fs.StringVar(&c.RuntimeCgroups, "runtime-cgroups", c.RuntimeCgroups, "Optional absolute name of cgroups to create and run the runtime in.") - fs.StringVar(&c.EvictionHard, "eviction-hard", c.EvictionHard, "A set of eviction thresholds (e.g. memory.available<1Gi) that if met would trigger a pod eviction.") - fs.StringVar(&c.EvictionSoft, "eviction-soft", c.EvictionSoft, "A set of eviction thresholds (e.g. memory.available<1.5Gi) that if met over a corresponding grace period would trigger a pod eviction.") - fs.StringVar(&c.EvictionSoftGracePeriod, "eviction-soft-grace-period", c.EvictionSoftGracePeriod, "A set of eviction grace periods (e.g. memory.available=1m30s) that correspond to how long a soft eviction threshold must hold before triggering a pod eviction.") + fs.Var(flag.NewLangleSeparatedMapStringString(&c.EvictionHard), "eviction-hard", "A set of eviction thresholds (e.g. memory.available<1Gi) that if met would trigger a pod eviction.") + fs.Var(flag.NewLangleSeparatedMapStringString(&c.EvictionSoft), "eviction-soft", "A set of eviction thresholds (e.g. memory.available<1.5Gi) that if met over a corresponding grace period would trigger a pod eviction.") + fs.Var(flag.NewMapStringString(&c.EvictionSoftGracePeriod), "eviction-soft-grace-period", "A set of eviction grace periods (e.g. memory.available=1m30s) that correspond to how long a soft eviction threshold must hold before triggering a pod eviction.") fs.DurationVar(&c.EvictionPressureTransitionPeriod.Duration, "eviction-pressure-transition-period", c.EvictionPressureTransitionPeriod.Duration, "Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition.") fs.Int32Var(&c.EvictionMaxPodGracePeriod, "eviction-max-pod-grace-period", c.EvictionMaxPodGracePeriod, "Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. If negative, defer to pod specified value.") - fs.StringVar(&c.EvictionMinimumReclaim, "eviction-minimum-reclaim", c.EvictionMinimumReclaim, "A set of minimum reclaims (e.g. imagefs.available=2Gi) that describes the minimum amount of resource the kubelet will reclaim when performing a pod eviction if that resource is under pressure.") - fs.BoolVar(&c.ExperimentalKernelMemcgNotification, "experimental-kernel-memcg-notification", c.ExperimentalKernelMemcgNotification, "If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling.") + fs.Var(flag.NewMapStringString(&c.EvictionMinimumReclaim), "eviction-minimum-reclaim", "A set of minimum reclaims (e.g. imagefs.available=2Gi) that describes the minimum amount of resource the kubelet will reclaim when performing a pod eviction if that resource is under pressure.") fs.Int32Var(&c.PodsPerCore, "pods-per-core", c.PodsPerCore, "Number of Pods per core that can run on this Kubelet. The total number of Pods on this Kubelet cannot exceed max-pods, so max-pods will be used if this calculation results in a larger number of Pods allowed on the Kubelet. A value of 0 disables this limit.") fs.BoolVar(&c.ProtectKernelDefaults, "protect-kernel-defaults", c.ProtectKernelDefaults, "Default kubelet behaviour for kernel tuning. If set, kubelet errors if any of kernel tunables is different than kubelet defaults.") - fs.BoolVar(&c.KeepTerminatedPodVolumes, "keep-terminated-pod-volumes", c.KeepTerminatedPodVolumes, "Keep terminated pod volumes mounted to the node after the pod terminates. Can be useful for debugging volume related issues.") - fs.MarkDeprecated("keep-terminated-pod-volumes", "will be removed in a future version") - - // CRI flags. - fs.StringVar(&c.RemoteRuntimeEndpoint, "container-runtime-endpoint", c.RemoteRuntimeEndpoint, "[Experimental] The endpoint of remote runtime service. Currently unix socket is supported on Linux, and tcp is supported on windows. Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735'") - fs.StringVar(&c.RemoteImageEndpoint, "image-service-endpoint", c.RemoteImageEndpoint, "[Experimental] The endpoint of remote image service. If not specified, it will be the same with container-runtime-endpoint by default. Currently unix socket is supported on Linux, and tcp is supported on windows. Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735'") - - fs.BoolVar(&c.ExperimentalCheckNodeCapabilitiesBeforeMount, "experimental-check-node-capabilities-before-mount", c.ExperimentalCheckNodeCapabilitiesBeforeMount, "[Experimental] if set true, the kubelet will check the underlying node for required componenets (binaries, etc.) before performing the mount") // Node Allocatable Flags - fs.Var(&c.SystemReserved, "system-reserved", "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=500Mi) pairs that describe resources reserved for non-kubernetes components. Currently only cpu and memory are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. [default=none]") - fs.Var(&c.KubeReserved, "kube-reserved", "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=500Mi, storage=1Gi) pairs that describe resources reserved for kubernetes system components. Currently cpu, memory and local storage for root file system are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. [default=none]") - fs.StringSliceVar(&c.EnforceNodeAllocatable, "enforce-node-allocatable", c.EnforceNodeAllocatable, "A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. Acceptible options are 'pods', 'system-reserved' & 'kube-reserved'. If the latter two options are specified, '--system-reserved-cgroup' & '--kube-reserved-cgroup' must also be set respectively. See https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md for more details.") + fs.Var(flag.NewMapStringString(&c.SystemReserved), "system-reserved", "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=500Mi,ephemeral-storage=1Gi) pairs that describe resources reserved for non-kubernetes components. Currently only cpu and memory are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. [default=none]") + fs.Var(flag.NewMapStringString(&c.KubeReserved), "kube-reserved", "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=500Mi,ephemeral-storage=1Gi) pairs that describe resources reserved for kubernetes system components. Currently cpu, memory and local ephemeral storage for root file system are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. [default=none]") + fs.StringSliceVar(&c.EnforceNodeAllocatable, "enforce-node-allocatable", c.EnforceNodeAllocatable, "A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. Acceptible options are 'pods', 'system-reserved' & 'kube-reserved'. If the latter two options are specified, '--system-reserved-cgroup' & '--kube-reserved-cgroup' must also be set respectively. See https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ for more details.") fs.StringVar(&c.SystemReservedCgroup, "system-reserved-cgroup", c.SystemReservedCgroup, "Absolute name of the top level cgroup that is used to manage non-kubernetes components for which compute resources were reserved via '--system-reserved' flag. Ex. '/system-reserved'. [default='']") fs.StringVar(&c.KubeReservedCgroup, "kube-reserved-cgroup", c.KubeReservedCgroup, "Absolute name of the top level cgroup that is used to manage kubernetes components for which compute resources were reserved via '--kube-reserved' flag. Ex. '/kube-reserved'. [default='']") - fs.BoolVar(&c.ExperimentalNodeAllocatableIgnoreEvictionThreshold, "experimental-allocatable-ignore-eviction", c.ExperimentalNodeAllocatableIgnoreEvictionThreshold, "When set to 'true', Hard Eviction Thresholds will be ignored while calculating Node Allocatable. See https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md for more details. [default=false]") - - fs.Var(&c.ExperimentalQOSReserved, "experimental-qos-reserved", "A set of ResourceName=Percentage (e.g. memory=50%) pairs that describe how pod resource requests are reserved at the QoS level. Currently only memory is supported. [default=none]") } diff --git a/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins.go b/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins.go index 13abcd9ff739..ef41bb8e9098 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins.go +++ b/vendor/k8s.io/kubernetes/cmd/kubelet/app/plugins.go @@ -35,6 +35,7 @@ import ( "k8s.io/kubernetes/pkg/volume/cephfs" "k8s.io/kubernetes/pkg/volume/cinder" "k8s.io/kubernetes/pkg/volume/configmap" + "k8s.io/kubernetes/pkg/volume/csi" "k8s.io/kubernetes/pkg/volume/downwardapi" "k8s.io/kubernetes/pkg/volume/empty_dir" "k8s.io/kubernetes/pkg/volume/fc" @@ -58,6 +59,9 @@ import ( "k8s.io/kubernetes/pkg/volume/vsphere_volume" // Cloud providers _ "k8s.io/kubernetes/pkg/cloudprovider/providers" + // features check + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" ) // ProbeVolumePlugins collects all volume plugins into an easy to use list. @@ -96,6 +100,9 @@ func ProbeVolumePlugins() []volume.VolumePlugin { allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...) allPlugins = append(allPlugins, local.ProbeVolumePlugins()...) allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...) + if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) { + allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...) + } return allPlugins } @@ -107,21 +114,12 @@ func GetDynamicPluginProber(pluginDir string) volume.DynamicPluginProber { } // ProbeNetworkPlugins collects all compiled-in plugins -func ProbeNetworkPlugins(pluginDir, cniConfDir, cniBinDir string) []network.NetworkPlugin { +func ProbeNetworkPlugins(cniConfDir, cniBinDir string) []network.NetworkPlugin { allPlugins := []network.NetworkPlugin{} - // for backwards-compat, allow pluginDir as a source of CNI config files - if cniConfDir == "" { - cniConfDir = pluginDir - } - - binDir := cniBinDir - if binDir == "" { - binDir = pluginDir - } // for each existing plugin, add to the list - allPlugins = append(allPlugins, cni.ProbeNetworkPlugins(cniConfDir, binDir)...) - allPlugins = append(allPlugins, kubenet.NewPlugin(binDir)) + allPlugins = append(allPlugins, cni.ProbeNetworkPlugins(cniConfDir, cniBinDir)...) + allPlugins = append(allPlugins, kubenet.NewPlugin(cniBinDir)) return allPlugins } diff --git a/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go b/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go index 406dba293f62..8100b44afae2 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go +++ b/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go @@ -38,6 +38,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" @@ -45,15 +46,16 @@ import ( "k8s.io/apiserver/pkg/server/healthz" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/apiserver/pkg/util/flag" - clientgoclientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/record" certutil "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/certificate" "k8s.io/kubernetes/cmd/kubelet/app/options" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/client/chaosclient" "k8s.io/kubernetes/pkg/cloudprovider" @@ -64,13 +66,12 @@ import ( kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme" kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/cadvisor" - "k8s.io/kubernetes/pkg/kubelet/certificate" + kubeletcertificate "k8s.io/kubernetes/pkg/kubelet/certificate" "k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap" "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockershim" - "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" dockerremote "k8s.io/kubernetes/pkg/kubelet/dockershim/remote" "k8s.io/kubernetes/pkg/kubelet/eviction" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" @@ -143,12 +144,13 @@ func UnsecuredDependencies(s *options.KubeletServer) (*kubelet.Dependencies, err writer = &kubeio.NsenterWriter{} } - var dockerClient libdocker.Interface + var dockerClientConfig *dockershim.ClientConfig if s.ContainerRuntime == kubetypes.DockerContainerRuntime { - dockerClient = libdocker.ConnectToDockerOrDie(s.DockerEndpoint, s.RuntimeRequestTimeout.Duration, - s.ImagePullProgressDeadline.Duration) - } else { - dockerClient = nil + dockerClientConfig = &dockershim.ClientConfig{ + DockerEndpoint: s.DockerEndpoint, + RuntimeRequestTimeout: s.RuntimeRequestTimeout.Duration, + ImagePullProgressDeadline: s.ImagePullProgressDeadline.Duration, + } } return &kubelet.Dependencies{ @@ -156,13 +158,13 @@ func UnsecuredDependencies(s *options.KubeletServer) (*kubelet.Dependencies, err CAdvisorInterface: nil, // cadvisor.New launches background processes (bg http.ListenAndServe, and some bg cleaners), not set here Cloud: nil, // cloud provider might start background processes ContainerManager: nil, - DockerClient: dockerClient, + DockerClientConfig: dockerClientConfig, KubeClient: nil, HeartbeatClient: nil, ExternalKubeClient: nil, EventClient: nil, Mounter: mounter, - NetworkPlugins: ProbeNetworkPlugins(s.NetworkPluginDir, s.CNIConfDir, s.CNIBinDir), + NetworkPlugins: ProbeNetworkPlugins(s.CNIConfDir, s.CNIBinDir), OOMAdjuster: oom.NewOOMAdjuster(), OSInterface: kubecontainer.RealOS{}, Writer: writer, @@ -176,6 +178,8 @@ func UnsecuredDependencies(s *options.KubeletServer) (*kubelet.Dependencies, err // Otherwise, the caller is assumed to have set up the Dependencies object and a default one will // not be generated. func Run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) error { + // To help debugging, immediately log version + glog.Infof("Version: %+v", version.Get()) if err := run(s, kubeDeps); err != nil { return fmt.Errorf("failed to run Kubelet: %v", err) } @@ -217,13 +221,13 @@ func initConfigz(kc *kubeletconfiginternal.KubeletConfiguration) error { return nil } -// makeEventRecorder sets up kubeDeps.Recorder if its nil. Its a no-op otherwise. +// makeEventRecorder sets up kubeDeps.Recorder if it's nil. It's a no-op otherwise. func makeEventRecorder(kubeDeps *kubelet.Dependencies, nodeName types.NodeName) { if kubeDeps.Recorder != nil { return } eventBroadcaster := record.NewBroadcaster() - kubeDeps.Recorder = eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: componentKubelet, Host: string(nodeName)}) + kubeDeps.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: componentKubelet, Host: string(nodeName)}) eventBroadcaster.StartLogging(glog.V(3).Infof) if kubeDeps.EventClient != nil { glog.V(4).Infof("Sending events to api server.") @@ -235,7 +239,7 @@ func makeEventRecorder(kubeDeps *kubelet.Dependencies, nodeName types.NodeName) func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) { // Set global feature gates based on the value on the initial KubeletServer - err = utilfeature.DefaultFeatureGate.Set(s.KubeletConfiguration.FeatureGates) + err = utilfeature.DefaultFeatureGate.SetFromMap(s.KubeletConfiguration.FeatureGates) if err != nil { return err } @@ -327,18 +331,20 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) { var kubeClient clientset.Interface var eventClient v1core.EventsGetter var heartbeatClient v1core.CoreV1Interface - var externalKubeClient clientgoclientset.Interface + var externalKubeClient clientset.Interface clientConfig, err := CreateAPIServerClientConfig(s) var clientCertificateManager certificate.Manager if err == nil { if s.RotateCertificates && utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletClientCertificate) { - clientCertificateManager, err = certificate.NewKubeletClientCertificateManager(s.CertDirectory, nodeName, clientConfig.CertData, clientConfig.KeyData, clientConfig.CertFile, clientConfig.KeyFile) + clientCertificateManager, err = kubeletcertificate.NewKubeletClientCertificateManager(s.CertDirectory, nodeName, clientConfig.CertData, clientConfig.KeyData, clientConfig.CertFile, clientConfig.KeyFile) if err != nil { return err } - if err := certificate.UpdateTransport(wait.NeverStop, clientConfig, clientCertificateManager); err != nil { + // we set exitIfExpired to true because we use this client configuration to request new certs - if we are unable + // to request new certs, we will be unable to continue normal operation + if err := kubeletcertificate.UpdateTransport(wait.NeverStop, clientConfig, clientCertificateManager, true); err != nil { return err } } @@ -346,12 +352,12 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) { kubeClient, err = clientset.NewForConfig(clientConfig) if err != nil { glog.Warningf("New kubeClient from clientConfig error: %v", err) - } else if kubeClient.Certificates() != nil && clientCertificateManager != nil { + } else if kubeClient.CertificatesV1beta1() != nil && clientCertificateManager != nil { glog.V(2).Info("Starting client certificate rotation.") - clientCertificateManager.SetCertificateSigningRequestClient(kubeClient.Certificates().CertificateSigningRequests()) + clientCertificateManager.SetCertificateSigningRequestClient(kubeClient.CertificatesV1beta1().CertificateSigningRequests()) clientCertificateManager.Start() } - externalKubeClient, err = clientgoclientset.NewForConfig(clientConfig) + externalKubeClient, err = clientset.NewForConfig(clientConfig) if err != nil { glog.Warningf("New kubeClient from clientConfig error: %v", err) } @@ -408,7 +414,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) { if kubeDeps.CAdvisorInterface == nil { imageFsInfoProvider := cadvisor.NewImageFsInfoProvider(s.ContainerRuntime, s.RemoteRuntimeEndpoint) - kubeDeps.CAdvisorInterface, err = cadvisor.New(s.Address, uint(s.CAdvisorPort), imageFsInfoProvider, s.RootDirectory) + kubeDeps.CAdvisorInterface, err = cadvisor.New(s.Address, uint(s.CAdvisorPort), imageFsInfoProvider, s.RootDirectory, cadvisor.UsingLegacyCadvisorStats(s.ContainerRuntime, s.RemoteRuntimeEndpoint)) if err != nil { return err } @@ -433,7 +439,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) { var hardEvictionThresholds []evictionapi.Threshold // If the user requested to ignore eviction thresholds, then do not set valid values for hardEvictionThresholds here. if !s.ExperimentalNodeAllocatableIgnoreEvictionThreshold { - hardEvictionThresholds, err = eviction.ParseThresholdConfig([]string{}, s.EvictionHard, "", "", "") + hardEvictionThresholds, err = eviction.ParseThresholdConfig([]string{}, s.EvictionHard, nil, nil, nil) if err != nil { return err } @@ -456,6 +462,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) { CgroupsPerQOS: s.CgroupsPerQOS, CgroupRoot: s.CgroupRoot, CgroupDriver: s.CgroupDriver, + KubeletRootDir: s.RootDirectory, ProtectKernelDefaults: s.ProtectKernelDefaults, NodeAllocatableConfig: cm.NodeAllocatableConfig{ KubeReservedCgroupName: s.KubeReservedCgroup, @@ -647,7 +654,7 @@ func addChaosToClientConfig(s *options.KubeletServer, config *restclient.Config) // Eventually, #2 will be replaced with instances of #3 func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *kubelet.Dependencies, runOnce bool) error { hostname := nodeutil.GetHostname(kubeFlags.HostnameOverride) - // Query the cloud provider for our node name, default to hostname if kcfg.Cloud == nil + // Query the cloud provider for our node name, default to hostname if kubeDeps.Cloud == nil nodeName, err := getNodeName(kubeDeps.Cloud, hostname) if err != nil { return err @@ -693,7 +700,37 @@ func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *kubeletconfiginternal. kubeDeps.OSInterface = kubecontainer.RealOS{} } - k, err := builder(kubeCfg, kubeDeps, &kubeFlags.ContainerRuntimeOptions, kubeFlags.HostnameOverride, kubeFlags.NodeIP, kubeFlags.ProviderID, kubeFlags.CloudProvider, kubeFlags.CertDirectory, kubeFlags.RootDirectory) + k, err := builder(kubeCfg, + kubeDeps, + &kubeFlags.ContainerRuntimeOptions, + kubeFlags.ContainerRuntime, + kubeFlags.RuntimeCgroups, + kubeFlags.HostnameOverride, + kubeFlags.NodeIP, + kubeFlags.ProviderID, + kubeFlags.CloudProvider, + kubeFlags.CertDirectory, + kubeFlags.RootDirectory, + kubeFlags.RegisterNode, + kubeFlags.RegisterWithTaints, + kubeFlags.AllowedUnsafeSysctls, + kubeFlags.Containerized, + kubeFlags.RemoteRuntimeEndpoint, + kubeFlags.RemoteImageEndpoint, + kubeFlags.ExperimentalMounterPath, + kubeFlags.ExperimentalKernelMemcgNotification, + kubeFlags.ExperimentalCheckNodeCapabilitiesBeforeMount, + kubeFlags.ExperimentalNodeAllocatableIgnoreEvictionThreshold, + kubeFlags.MinimumGCAge, + kubeFlags.MaxPerPodContainerCount, + kubeFlags.MaxContainerCount, + kubeFlags.MasterServiceNamespace, + kubeFlags.RegisterSchedulable, + kubeFlags.NonMasqueradeCIDR, + kubeFlags.KeepTerminatedPodVolumes, + kubeFlags.NodeLabels, + kubeFlags.SeccompProfileRoot, + kubeFlags.BootstrapCheckpointPath) if err != nil { return fmt.Errorf("failed to create kubelet: %v", err) } @@ -712,10 +749,10 @@ func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *kubeletconfiginternal. if _, err := k.RunOnce(podCfg.Updates()); err != nil { return fmt.Errorf("runonce failed: %v", err) } - glog.Infof("Started kubelet %s as runonce", version.Get().String()) + glog.Infof("Started kubelet as runonce") } else { startKubelet(k, podCfg, kubeCfg, kubeDeps) - glog.Infof("Started kubelet %s", version.Get().String()) + glog.Infof("Started kubelet") } return nil } @@ -739,17 +776,69 @@ func startKubelet(k kubelet.Bootstrap, podCfg *config.PodConfig, kubeCfg *kubele func CreateAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *kubelet.Dependencies, - crOptions *options.ContainerRuntimeOptions, - hostnameOverride, - nodeIP, - providerID, - cloudProvider, - certDirectory, - rootDirectory string) (k kubelet.Bootstrap, err error) { + crOptions *config.ContainerRuntimeOptions, + containerRuntime string, + runtimeCgroups string, + hostnameOverride string, + nodeIP string, + providerID string, + cloudProvider string, + certDirectory string, + rootDirectory string, + registerNode bool, + registerWithTaints []api.Taint, + allowedUnsafeSysctls []string, + containerized bool, + remoteRuntimeEndpoint string, + remoteImageEndpoint string, + experimentalMounterPath string, + experimentalKernelMemcgNotification bool, + experimentalCheckNodeCapabilitiesBeforeMount bool, + experimentalNodeAllocatableIgnoreEvictionThreshold bool, + minimumGCAge metav1.Duration, + maxPerPodContainerCount int32, + maxContainerCount int32, + masterServiceNamespace string, + registerSchedulable bool, + nonMasqueradeCIDR string, + keepTerminatedPodVolumes bool, + nodeLabels map[string]string, + seccompProfileRoot string, + bootstrapCheckpointPath string) (k kubelet.Bootstrap, err error) { // TODO: block until all sources have delivered at least one update to the channel, or break the sync loop // up into "per source" synchronizations - k, err = kubelet.NewMainKubelet(kubeCfg, kubeDeps, crOptions, hostnameOverride, nodeIP, providerID, cloudProvider, certDirectory, rootDirectory) + k, err = kubelet.NewMainKubelet(kubeCfg, + kubeDeps, + crOptions, + containerRuntime, + runtimeCgroups, + hostnameOverride, + nodeIP, + providerID, + cloudProvider, + certDirectory, + rootDirectory, + registerNode, + registerWithTaints, + allowedUnsafeSysctls, + containerized, + remoteRuntimeEndpoint, + remoteImageEndpoint, + experimentalMounterPath, + experimentalKernelMemcgNotification, + experimentalCheckNodeCapabilitiesBeforeMount, + experimentalNodeAllocatableIgnoreEvictionThreshold, + minimumGCAge, + maxPerPodContainerCount, + maxContainerCount, + masterServiceNamespace, + registerSchedulable, + nonMasqueradeCIDR, + keepTerminatedPodVolumes, + nodeLabels, + seccompProfileRoot, + bootstrapCheckpointPath) if err != nil { return nil, err } @@ -763,7 +852,7 @@ func CreateAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, // parseResourceList parses the given configuration map into an API // ResourceList or returns an error. -func parseResourceList(m kubeletconfiginternal.ConfigurationMap) (v1.ResourceList, error) { +func parseResourceList(m map[string]string) (v1.ResourceList, error) { if len(m) == 0 { return nil, nil } @@ -823,23 +912,24 @@ func BootstrapKubeletConfigController(defaultConfig *kubeletconfiginternal.Kubel // RunDockershim only starts the dockershim in current process. This is only used for cri validate testing purpose // TODO(random-liu): Move this to a separate binary. -func RunDockershim(c *kubeletconfiginternal.KubeletConfiguration, r *options.ContainerRuntimeOptions) error { - // Create docker client. - dockerClient := libdocker.ConnectToDockerOrDie(r.DockerEndpoint, c.RuntimeRequestTimeout.Duration, - r.ImagePullProgressDeadline.Duration) +func RunDockershim(f *options.KubeletFlags, c *kubeletconfiginternal.KubeletConfiguration) error { + r := &f.ContainerRuntimeOptions - // Initialize network plugin settings. - binDir := r.CNIBinDir - if binDir == "" { - binDir = r.NetworkPluginDir + // Initialize docker client configuration. + dockerClientConfig := &dockershim.ClientConfig{ + DockerEndpoint: r.DockerEndpoint, + RuntimeRequestTimeout: c.RuntimeRequestTimeout.Duration, + ImagePullProgressDeadline: r.ImagePullProgressDeadline.Duration, } + + // Initialize network plugin settings. nh := &kubelet.NoOpLegacyHost{} pluginSettings := dockershim.NetworkPluginSettings{ HairpinMode: kubeletconfiginternal.HairpinMode(c.HairpinMode), - NonMasqueradeCIDR: c.NonMasqueradeCIDR, + NonMasqueradeCIDR: f.NonMasqueradeCIDR, PluginName: r.NetworkPluginName, PluginConfDir: r.CNIConfDir, - PluginBinDir: binDir, + PluginBinDir: r.CNIBinDir, MTU: int(r.NetworkPluginMTU), LegacyRuntimeHost: nh, } @@ -854,8 +944,8 @@ func RunDockershim(c *kubeletconfiginternal.KubeletConfiguration, r *options.Con SupportedPortForwardProtocols: streaming.DefaultConfig.SupportedPortForwardProtocols, } - ds, err := dockershim.NewDockerService(dockerClient, r.PodSandboxImage, streamingConfig, &pluginSettings, - c.RuntimeCgroups, c.CgroupDriver, r.DockerExecHandlerName, r.DockershimRootDirectory, r.DockerDisableSharedPID) + ds, err := dockershim.NewDockerService(dockerClientConfig, r.PodSandboxImage, streamingConfig, &pluginSettings, + f.RuntimeCgroups, c.CgroupDriver, r.DockershimRootDirectory, r.DockerDisableSharedPID) if err != nil { return err } @@ -864,7 +954,7 @@ func RunDockershim(c *kubeletconfiginternal.KubeletConfiguration, r *options.Con } glog.V(2).Infof("Starting the GRPC server for the docker CRI shim.") - server := dockerremote.NewDockerServer(c.RemoteRuntimeEndpoint, ds) + server := dockerremote.NewDockerServer(f.RemoteRuntimeEndpoint, ds) if err := server.Start(); err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/BUILD b/vendor/k8s.io/kubernetes/federation/apis/federation/BUILD deleted file mode 100644 index 0afb2d90b770..000000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/BUILD +++ /dev/null @@ -1,43 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "annotations.go", - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - deps = [ - "//pkg/api:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//federation/apis/federation/install:all-srcs", - "//federation/apis/federation/v1beta1:all-srcs", - "//federation/apis/federation/validation:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/annotations.go b/vendor/k8s.io/kubernetes/federation/apis/federation/annotations.go deleted file mode 100644 index 9a9e519f3631..000000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/annotations.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package federation - -// FederationNameAnnotation is the annotation which holds the name of -// the federation that a federation control plane component is associated -// with. It must be applied to all the API types that represent that federations -// control plane's components in the host cluster and in joining clusters. -const FederationNameAnnotation = "federation.alpha.kubernetes.io/federation-name" - -// ClusterNameAnnotation is the annotation which holds the name of -// the cluster that an object is associated with. If the object is -// not associated with any cluster, then this annotation is not -// required. -const ClusterNameAnnotation = "federation.alpha.kubernetes.io/cluster-name" diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/doc.go b/vendor/k8s.io/kubernetes/federation/apis/federation/doc.go deleted file mode 100644 index 7a45fb7bb8ca..000000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register - -package federation diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/install/BUILD b/vendor/k8s.io/kubernetes/federation/apis/federation/install/BUILD deleted file mode 100644 index c00f6011194b..000000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/install/BUILD +++ /dev/null @@ -1,48 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = ["install.go"], - deps = [ - "//federation/apis/federation:go_default_library", - "//federation/apis/federation/v1beta1:go_default_library", - "//pkg/api:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["install_test.go"], - library = ":go_default_library", - deps = [ - "//federation/apis/federation:go_default_library", - "//federation/apis/federation/v1beta1:go_default_library", - "//pkg/api:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/install/install.go b/vendor/k8s.io/kubernetes/federation/apis/federation/install/install.go deleted file mode 100644 index a4ceffdb1401..000000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/install/install.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package install - -import ( - "k8s.io/apimachinery/pkg/apimachinery/announced" - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" - - "k8s.io/kubernetes/federation/apis/federation" - "k8s.io/kubernetes/federation/apis/federation/v1beta1" -) - -func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) -} - -// Install registers the API group and adds types to a scheme -func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { - if err := announced.NewGroupMetaFactory( - &announced.GroupMetaFactoryArgs{ - GroupName: federation.GroupName, - VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, - AddInternalObjectsToScheme: federation.AddToScheme, - RootScopedKinds: sets.NewString( - "Cluster", - ), - }, - announced.VersionToSchemeFunc{ - v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, - }, - ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { - panic(err) - } -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/register.go b/vendor/k8s.io/kubernetes/federation/apis/federation/register.go deleted file mode 100644 index 9fe9bfc865ae..000000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/register.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package federation - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "federation" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Cluster{}, - &ClusterList{}, - ) - return nil -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/types.go b/vendor/k8s.io/kubernetes/federation/apis/federation/types.go deleted file mode 100644 index 0e0aeb38f767..000000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/types.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package federation - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/api" -) - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -type ServerAddressByClientCIDR struct { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - ClientCIDR string - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - ServerAddress string -} - -// ClusterSpec describes the attributes of a kubernetes cluster. -type ClusterSpec struct { - // A map of client CIDR to server address. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - ServerAddressByClientCIDRs []ServerAddressByClientCIDR - // Name of the secret containing kubeconfig to access this cluster. - // The secret is read from the kubernetes cluster that is hosting federation control plane. - // Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key "kubeconfig". - // This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. - // This can be left empty if the cluster allows insecure access. - // +optional - SecretRef *api.LocalObjectReference -} - -type ClusterConditionType string - -// These are valid conditions of a cluster. -const ( - // ClusterReady means the cluster is ready to accept workloads. - ClusterReady ClusterConditionType = "Ready" - // ClusterOffline means the cluster is temporarily down or not reachable - ClusterOffline ClusterConditionType = "Offline" -) - -// ClusterCondition describes current state of a cluster. -type ClusterCondition struct { - // Type of cluster condition, Complete or Failed. - Type ClusterConditionType - // Status of the condition, one of True, False, Unknown. - Status api.ConditionStatus - // Last time the condition was checked. - // +optional - LastProbeTime metav1.Time - // Last time the condition transit from one status to another. - // +optional - LastTransitionTime metav1.Time - // (brief) reason for the condition's last transition. - // +optional - Reason string - // Human readable message indicating details about last transition. - // +optional - Message string -} - -// ClusterStatus is information about the current status of a cluster updated by cluster controller periodically. -type ClusterStatus struct { - // Conditions is an array of current cluster conditions. - // +optional - Conditions []ClusterCondition - // Zones is the list of availability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. - // These will always be in the same region. - // +optional - Zones []string - // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. - // +optional - Region string -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation. -type Cluster struct { - metav1.TypeMeta - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of the Cluster. - // +optional - Spec ClusterSpec - // Status describes the current status of a Cluster - // +optional - Status ClusterStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// A list of all the kubernetes clusters registered to the federation -type ClusterList struct { - metav1.TypeMeta - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta - - // List of Cluster objects. - Items []Cluster -} - -// Temporary/alpha structures to support custom replica assignments within Federated workloads. - -// A set of preferences that can be added to federated version of workloads (deployments, replicasets, ..) -// as a json-serialized annotation. The preferences allow the users to express in which clusters they -// want to put their replicas within the mentioned workload objects. -type ReplicaAllocationPreferences struct { - // If set to true then already scheduled and running replicas may be moved to other clusters - // in order to match current state to the specified preferences. Otherwise, if set to false, - // up and running replicas will not be moved. - // +optional - Rebalance bool - - // A mapping between cluster names and preferences regarding a local workload object (dep, rs, .. ) in - // these clusters. - // "*" (if provided) applies to all clusters if an explicit mapping is not provided. - // If omitted, clusters without explicit preferences should not have any replicas scheduled. - // +optional - Clusters map[string]ClusterPreferences -} - -// Preferences regarding number of replicas assigned to a cluster workload object (dep, rs, ..) within -// a federated workload object. -type ClusterPreferences struct { - // Minimum number of replicas that should be assigned to this cluster workload object. 0 by default. - // +optional - MinReplicas int64 - - // Maximum number of replicas that should be assigned to this cluster workload object. - // Unbounded if no value provided (default). - // +optional - MaxReplicas *int64 - - // A number expressing the preference to put an additional replica to this cluster workload object. - // 0 by default. - Weight int64 -} - -// Annotation for a federated service to keep record of service loadbalancer ingresses in federated cluster -type FederatedServiceIngress struct { - // List of loadbalancer ingress of a service in all federated clusters - // +optional - Items []ClusterServiceIngress `json:"items,omitempty"` -} - -// Loadbalancer ingresses of a service within a federated cluster -type ClusterServiceIngress struct { - // Cluster is the name of the federated cluster - Cluster string `json:"cluster"` - // List of loadbalancer ingresses of a federated service within a federated cluster - Items []v1.LoadBalancerIngress `json:"items"` -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/BUILD b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/BUILD deleted file mode 100644 index ae49d1a9ec45..000000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/BUILD +++ /dev/null @@ -1,50 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - deps = [ - "//federation/apis/federation:go_default_library", - "//pkg/api:go_default_library", - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) - -filegroup( - name = "go_default_library_protos", - srcs = ["generated.proto"], - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/defaults.go deleted file mode 100644 index 37abb53bd210..000000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/defaults.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/apimachinery/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/doc.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/doc.go deleted file mode 100644 index 2110288a0c4f..000000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=k8s.io/kubernetes/federation/apis/federation -// +k8s:openapi-gen=true -// +k8s:defaulter-gen=TypeMeta -package v1beta1 diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.pb.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.pb.go deleted file mode 100644 index a93e24c32208..000000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.pb.go +++ /dev/null @@ -1,1766 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto - - It has these top-level messages: - Cluster - ClusterCondition - ClusterList - ClusterSelectorRequirement - ClusterSpec - ClusterStatus - ServerAddressByClientCIDR -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *Cluster) Reset() { *m = Cluster{} } -func (*Cluster) ProtoMessage() {} -func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *ClusterCondition) Reset() { *m = ClusterCondition{} } -func (*ClusterCondition) ProtoMessage() {} -func (*ClusterCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *ClusterList) Reset() { *m = ClusterList{} } -func (*ClusterList) ProtoMessage() {} -func (*ClusterList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *ClusterSelectorRequirement) Reset() { *m = ClusterSelectorRequirement{} } -func (*ClusterSelectorRequirement) ProtoMessage() {} -func (*ClusterSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} -} - -func (m *ClusterSpec) Reset() { *m = ClusterSpec{} } -func (*ClusterSpec) ProtoMessage() {} -func (*ClusterSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *ClusterStatus) Reset() { *m = ClusterStatus{} } -func (*ClusterStatus) ProtoMessage() {} -func (*ClusterStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } -func (*ServerAddressByClientCIDR) ProtoMessage() {} -func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} -} - -func init() { - proto.RegisterType((*Cluster)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.Cluster") - proto.RegisterType((*ClusterCondition)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ClusterCondition") - proto.RegisterType((*ClusterList)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ClusterList") - proto.RegisterType((*ClusterSelectorRequirement)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ClusterSelectorRequirement") - proto.RegisterType((*ClusterSpec)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ClusterSpec") - proto.RegisterType((*ClusterStatus)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ClusterStatus") - proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ServerAddressByClientCIDR") -} -func (m *Cluster) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Cluster) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *ClusterCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n4, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n5, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *ClusterList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ClusterSelectorRequirement) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) - if len(m.Values) > 0 { - for _, s := range m.Values { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *ClusterSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.SecretRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n7, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - return i, nil -} - -func (m *ClusterStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Zones) > 0 { - for _, s := range m.Zones { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region))) - i += copy(dAtA[i:], m.Region) - return i, nil -} - -func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServerAddressByClientCIDR) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientCIDR))) - i += copy(dAtA[i:], m.ClientCIDR) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServerAddress))) - i += copy(dAtA[i:], m.ServerAddress) - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Cluster) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ClusterCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ClusterList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ClusterSelectorRequirement) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ClusterSpec) Size() (n int) { - var l int - _ = l - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, e := range m.ServerAddressByClientCIDRs { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ClusterStatus) Size() (n int) { - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Zones) > 0 { - for _, s := range m.Zones { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Region) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServerAddressByClientCIDR) Size() (n int) { - var l int - _ = l - l = len(m.ClientCIDR) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ServerAddress) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Cluster) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Cluster{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterSpec", "ClusterSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ClusterStatus", "ClusterStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Cluster", "Cluster", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterSelectorRequirement) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterSelectorRequirement{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterSpec{`, - `ServerAddressByClientCIDRs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServerAddressByClientCIDRs), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "k8s_io_api_core_v1.LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterStatus{`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ClusterCondition", "ClusterCondition", 1), `&`, ``, 1) + `,`, - `Zones:` + fmt.Sprintf("%v", this.Zones) + `,`, - `Region:` + fmt.Sprintf("%v", this.Region) + `,`, - `}`, - }, "") - return s -} -func (this *ServerAddressByClientCIDR) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServerAddressByClientCIDR{`, - `ClientCIDR:` + fmt.Sprintf("%v", this.ClientCIDR) + `,`, - `ServerAddress:` + fmt.Sprintf("%v", this.ServerAddress) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Cluster) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Cluster: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Cluster: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = ClusterConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Cluster{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterSelectorRequirement) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterSelectorRequirement: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operator = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) - if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &k8s_io_api_core_v1.LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, ClusterCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Zones = append(m.Zones, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Region = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServerAddressByClientCIDR: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServerAddressByClientCIDR: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientCIDR", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientCIDR = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerAddress = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 871 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0x8e, 0x9b, 0x26, 0x6d, 0xa6, 0x04, 0x56, 0x23, 0x90, 0x42, 0x24, 0x1c, 0x64, 0x21, 0x54, - 0x10, 0x6b, 0xd3, 0x52, 0xa1, 0x95, 0x10, 0x87, 0x75, 0x56, 0x48, 0x2b, 0xba, 0x2a, 0x9a, 0x16, - 0x0e, 0x2b, 0x0e, 0x4c, 0x9c, 0x57, 0x77, 0x48, 0xec, 0x31, 0x33, 0xe3, 0x48, 0xd9, 0x13, 0x7f, - 0x00, 0x48, 0x9c, 0xf9, 0x37, 0x38, 0x73, 0xef, 0x09, 0xf6, 0xc0, 0x61, 0x4f, 0x11, 0x0d, 0xff, - 0xc5, 0x9e, 0xd0, 0x8c, 0x27, 0x4e, 0xbc, 0x69, 0x60, 0xb7, 0xbd, 0x79, 0x3e, 0xbf, 0xef, 0xfb, - 0xde, 0xbc, 0x1f, 0x83, 0x1e, 0x8c, 0xee, 0x49, 0x9f, 0xf1, 0x60, 0x94, 0x0f, 0x40, 0xa4, 0xa0, - 0x40, 0x06, 0xe7, 0x30, 0x04, 0x41, 0x15, 0xe3, 0x69, 0x40, 0x33, 0x56, 0x39, 0x4f, 0x0e, 0x06, - 0xa0, 0xe8, 0x41, 0x10, 0x43, 0xaa, 0x21, 0x18, 0xfa, 0x99, 0xe0, 0x8a, 0xe3, 0xa3, 0x42, 0xc5, - 0x5f, 0xaa, 0xf8, 0x4b, 0x96, 0xaf, 0x55, 0x56, 0xcf, 0x56, 0xa5, 0x7b, 0x37, 0x66, 0xea, 0x22, - 0x1f, 0xf8, 0x11, 0x4f, 0x82, 0x98, 0xc7, 0x3c, 0x30, 0x62, 0x83, 0xfc, 0xdc, 0x9c, 0xcc, 0xc1, - 0x7c, 0x15, 0x26, 0x5d, 0xcf, 0xa6, 0x4a, 0x33, 0x16, 0x44, 0x5c, 0x40, 0x30, 0x59, 0x4b, 0xa4, - 0x7b, 0xb4, 0x8c, 0x49, 0x68, 0x74, 0xc1, 0x52, 0x10, 0xd3, 0x20, 0x1b, 0xc5, 0xc5, 0x4d, 0x12, - 0x50, 0xf4, 0x3a, 0x56, 0xb0, 0x89, 0x25, 0xf2, 0x54, 0xb1, 0x04, 0xd6, 0x08, 0x9f, 0xfe, 0x1f, - 0x41, 0x46, 0x17, 0x90, 0xd0, 0x35, 0xde, 0x27, 0x9b, 0x78, 0xb9, 0x62, 0xe3, 0x80, 0xa5, 0x4a, - 0x2a, 0xf1, 0x22, 0xc9, 0xfb, 0x7d, 0x0b, 0xed, 0xf4, 0xc7, 0xb9, 0x54, 0x20, 0xf0, 0x77, 0x68, - 0x57, 0x5f, 0x62, 0x48, 0x15, 0xed, 0x38, 0xef, 0x3a, 0xfb, 0x7b, 0x87, 0x1f, 0xfb, 0xb6, 0xf6, - 0xab, 0x9a, 0x7e, 0x36, 0x8a, 0x8b, 0xb2, 0xeb, 0x68, 0x7f, 0x72, 0xe0, 0x9f, 0x0c, 0xbe, 0x87, - 0x48, 0x3d, 0x02, 0x45, 0x43, 0x7c, 0x39, 0xeb, 0xd5, 0xe6, 0xb3, 0x1e, 0x5a, 0x62, 0xa4, 0x54, - 0xc5, 0x11, 0xda, 0x96, 0x19, 0x44, 0x9d, 0x2d, 0xa3, 0x7e, 0xdf, 0xbf, 0x49, 0x67, 0x7d, 0x9b, - 0xee, 0x69, 0x06, 0x51, 0xf8, 0x9a, 0xb5, 0xdb, 0xd6, 0x27, 0x62, 0xc4, 0xf1, 0x08, 0x35, 0xa5, - 0xa2, 0x2a, 0x97, 0x9d, 0xba, 0xb1, 0xe9, 0xdf, 0xce, 0xc6, 0x48, 0x85, 0xaf, 0x5b, 0xa3, 0x66, - 0x71, 0x26, 0xd6, 0xc2, 0xfb, 0xa3, 0x8e, 0xee, 0xd8, 0xc8, 0x3e, 0x4f, 0x87, 0x4c, 0x4b, 0xe0, - 0x7b, 0x68, 0x5b, 0x4d, 0x33, 0x30, 0x45, 0x6c, 0x85, 0xef, 0x2d, 0x72, 0x3c, 0x9b, 0x66, 0xf0, - 0x7c, 0xd6, 0x7b, 0xf3, 0xc5, 0x78, 0x8d, 0x13, 0xc3, 0xc0, 0xc7, 0x65, 0xee, 0x5b, 0x86, 0x7b, - 0x54, 0xb5, 0x7d, 0x3e, 0xeb, 0x5d, 0x33, 0xa8, 0x7e, 0xa9, 0x54, 0x4d, 0x0e, 0xc7, 0xa8, 0x3d, - 0xa6, 0x52, 0x7d, 0x25, 0xf8, 0x00, 0xce, 0x58, 0x02, 0xb6, 0x20, 0x1f, 0xbe, 0x5c, 0x57, 0x35, - 0x23, 0x7c, 0xcb, 0x26, 0xd0, 0x3e, 0x5e, 0x15, 0x22, 0x55, 0x5d, 0x3c, 0x41, 0x58, 0x03, 0x67, - 0x82, 0xa6, 0xb2, 0xb8, 0x92, 0x76, 0xdb, 0x7e, 0x65, 0xb7, 0xae, 0x75, 0xc3, 0xc7, 0x6b, 0x6a, - 0xe4, 0x1a, 0x07, 0xfc, 0x3e, 0x6a, 0x0a, 0xa0, 0x92, 0xa7, 0x9d, 0x86, 0x29, 0x57, 0xd9, 0x25, - 0x62, 0x50, 0x62, 0xff, 0xe2, 0x0f, 0xd0, 0x4e, 0x02, 0x52, 0xd2, 0x18, 0x3a, 0x4d, 0x13, 0xf8, - 0x86, 0x0d, 0xdc, 0x79, 0x54, 0xc0, 0x64, 0xf1, 0xdf, 0xfb, 0xd3, 0x41, 0x7b, 0xb6, 0x41, 0xc7, - 0x4c, 0x2a, 0xfc, 0xed, 0xda, 0x52, 0xf8, 0x2f, 0x77, 0x21, 0xcd, 0x36, 0x2b, 0x71, 0xc7, 0x7a, - 0xed, 0x2e, 0x90, 0x95, 0x85, 0x18, 0xa0, 0x06, 0x53, 0x90, 0xe8, 0x76, 0xd7, 0xf7, 0xf7, 0x0e, - 0x3f, 0xbf, 0xd5, 0xa8, 0x86, 0x6d, 0xeb, 0xd4, 0x78, 0xa8, 0x35, 0x49, 0x21, 0xed, 0xfd, 0xec, - 0xa0, 0xee, 0x62, 0x98, 0x61, 0x0c, 0x91, 0xe2, 0x82, 0xc0, 0x0f, 0x39, 0x13, 0x90, 0x40, 0xaa, - 0xf0, 0x3b, 0xa8, 0x3e, 0x82, 0xa9, 0x9d, 0xd5, 0x3d, 0xab, 0x50, 0xff, 0x12, 0xa6, 0x44, 0xe3, - 0xf8, 0x23, 0xb4, 0xcb, 0x33, 0x6d, 0xc8, 0x85, 0x9d, 0xc9, 0xf2, 0x3e, 0x27, 0x16, 0x27, 0x65, - 0x04, 0xf6, 0x50, 0x73, 0x42, 0xc7, 0x39, 0xe8, 0xdd, 0xab, 0xef, 0xb7, 0x42, 0xa4, 0x9b, 0xf1, - 0x8d, 0x41, 0x88, 0xfd, 0xe3, 0xfd, 0xba, 0x55, 0x56, 0x58, 0x6f, 0x2d, 0xfe, 0xcd, 0x41, 0x5d, - 0x09, 0x62, 0x02, 0xe2, 0xfe, 0x70, 0x28, 0x40, 0xca, 0x70, 0xda, 0x1f, 0x33, 0x48, 0x55, 0xff, - 0xe1, 0x03, 0x22, 0x3b, 0x8e, 0xa9, 0xcc, 0xc9, 0xcd, 0x2a, 0x73, 0xba, 0x49, 0x37, 0xf4, 0xec, - 0x2d, 0xba, 0x1b, 0x43, 0x24, 0xf9, 0x8f, 0xb4, 0xf0, 0xd7, 0xa8, 0x25, 0x21, 0x12, 0xa0, 0x08, - 0x9c, 0xdb, 0xf7, 0x6c, 0x7f, 0x65, 0x30, 0x7c, 0xbd, 0x9b, 0x66, 0x0c, 0x78, 0x44, 0xc7, 0xc5, - 0x63, 0x48, 0xe0, 0x1c, 0x04, 0xa4, 0x11, 0x84, 0xed, 0xf9, 0xac, 0xd7, 0x3a, 0x5d, 0xd0, 0xc9, - 0x52, 0xc9, 0xfb, 0xcb, 0x41, 0xed, 0xca, 0xcb, 0x83, 0x9f, 0x20, 0x14, 0x2d, 0xf6, 0x7b, 0x51, - 0x8d, 0x2f, 0x6e, 0x35, 0x27, 0xe5, 0x73, 0xb1, 0x7c, 0xad, 0x4b, 0x48, 0x92, 0x15, 0x37, 0xdc, - 0x43, 0x8d, 0x27, 0x3c, 0x05, 0xd9, 0x69, 0x98, 0x6e, 0xb6, 0xf4, 0x6c, 0x3d, 0xd6, 0x00, 0x29, - 0xf0, 0x62, 0x01, 0x63, 0xc6, 0x53, 0xbb, 0x57, 0x2b, 0x0b, 0xa8, 0x51, 0x62, 0xff, 0x7a, 0x3f, - 0x39, 0xe8, 0xed, 0x8d, 0x85, 0xc6, 0x87, 0x08, 0x45, 0xe5, 0xc9, 0x4e, 0xe2, 0x32, 0xb5, 0xf2, - 0x0f, 0x59, 0x89, 0xc2, 0x9f, 0xa1, 0x76, 0xa5, 0x3b, 0x76, 0x38, 0xcb, 0xf7, 0xaa, 0xe2, 0x46, - 0xaa, 0xb1, 0xe1, 0xdd, 0xcb, 0x2b, 0xb7, 0xf6, 0xf4, 0xca, 0xad, 0x3d, 0xbb, 0x72, 0x6b, 0x3f, - 0xce, 0x5d, 0xe7, 0x72, 0xee, 0x3a, 0x4f, 0xe7, 0xae, 0xf3, 0x6c, 0xee, 0x3a, 0x7f, 0xcf, 0x5d, - 0xe7, 0x97, 0x7f, 0xdc, 0xda, 0xe3, 0x1d, 0x5b, 0xb7, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xed, - 0x72, 0x6d, 0x62, 0xc8, 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto deleted file mode 100644 index 5e1f66528740..000000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto +++ /dev/null @@ -1,150 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.federation.apis.federation.v1beta1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; -import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation. -message Cluster { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec defines the behavior of the Cluster. - // +optional - optional ClusterSpec spec = 2; - - // Status describes the current status of a Cluster - // +optional - optional ClusterStatus status = 3; -} - -// ClusterCondition describes current state of a cluster. -message ClusterCondition { - // Type of cluster condition, Complete or Failed. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition was checked. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; - - // Last time the condition transit from one status to another. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; - - // (brief) reason for the condition's last transition. - // +optional - optional string reason = 5; - - // Human readable message indicating details about last transition. - // +optional - optional string message = 6; -} - -// A list of all the kubernetes clusters registered to the federation -message ClusterList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of Cluster objects. - repeated Cluster items = 2; -} - -// ClusterSelectorRequirement contains values, a key, and an operator that relates the key and values. -// The zero value of ClusterSelectorRequirement is invalid. -// ClusterSelectorRequirement implements both set based match and exact match -message ClusterSelectorRequirement { - // +patchMergeKey=key - // +patchStrategy=merge - optional string key = 1; - - // The Operator defines how the Key is matched to the Values. One of "in", "notin", - // "exists", "!", "=", "!=", "gt" or "lt". - optional string operator = 2; - - // An array of string values. If the operator is "in" or "notin", - // the values array must be non-empty. If the operator is "exists" or "!", - // the values array must be empty. If the operator is "gt" or "lt", the values - // array must have a single element, which will be interpreted as an integer. - // This array is replaced during a strategic merge patch. - // +optional - repeated string values = 3; -} - -// ClusterSpec describes the attributes of a kubernetes cluster. -message ClusterSpec { - // A map of client CIDR to server address. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - // +patchMergeKey=clientCIDR - // +patchStrategy=merge - repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 1; - - // Name of the secret containing kubeconfig to access this cluster. - // The secret is read from the kubernetes cluster that is hosting federation control plane. - // Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key "kubeconfig". - // This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. - // This can be left empty if the cluster allows insecure access. - // +optional - optional k8s.io.api.core.v1.LocalObjectReference secretRef = 2; -} - -// ClusterStatus is information about the current status of a cluster updated by cluster controller periodically. -message ClusterStatus { - // Conditions is an array of current cluster conditions. - // +optional - repeated ClusterCondition conditions = 1; - - // Zones is the list of availability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. - // These will always be in the same region. - // +optional - repeated string zones = 5; - - // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. - // +optional - optional string region = 6; -} - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -message ServerAddressByClientCIDR { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - optional string clientCIDR = 1; - - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - optional string serverAddress = 2; -} - diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/register.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/register.go deleted file mode 100644 index b14f5b1b0616..000000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "federation" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs) -} - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Cluster{}, - &ClusterList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types.go deleted file mode 100644 index b8c9cca3fdb3..000000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types.go +++ /dev/null @@ -1,161 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -type ServerAddressByClientCIDR struct { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"` - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"` -} - -// ClusterSpec describes the attributes of a kubernetes cluster. -type ClusterSpec struct { - // A map of client CIDR to server address. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - // +patchMergeKey=clientCIDR - // +patchStrategy=merge - ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" patchStrategy:"merge" patchMergeKey:"clientCIDR" protobuf:"bytes,1,rep,name=serverAddressByClientCIDRs"` - // Name of the secret containing kubeconfig to access this cluster. - // The secret is read from the kubernetes cluster that is hosting federation control plane. - // Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key "kubeconfig". - // This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. - // This can be left empty if the cluster allows insecure access. - // +optional - SecretRef *v1.LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,2,opt,name=secretRef"` -} - -type ClusterConditionType string - -// These are valid conditions of a cluster. -const ( - // ClusterReady means the cluster is ready to accept workloads. - ClusterReady ClusterConditionType = "Ready" - // ClusterOffline means the cluster is temporarily down or not reachable - ClusterOffline ClusterConditionType = "Offline" -) - -// ClusterCondition describes current state of a cluster. -type ClusterCondition struct { - // Type of cluster condition, Complete or Failed. - Type ClusterConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ClusterConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` - // Last time the condition was checked. - // +optional - LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` - // Last time the condition transit from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // (brief) reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // Human readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` -} - -// ClusterStatus is information about the current status of a cluster updated by cluster controller periodically. -type ClusterStatus struct { - // Conditions is an array of current cluster conditions. - // +optional - Conditions []ClusterCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` - // Zones is the list of availability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. - // These will always be in the same region. - // +optional - Zones []string `json:"zones,omitempty" protobuf:"bytes,5,rep,name=zones"` - // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. - // +optional - Region string `json:"region,omitempty" protobuf:"bytes,6,opt,name=region"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +genclient:nonNamespaced - -// Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation. -type Cluster struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the behavior of the Cluster. - // +optional - Spec ClusterSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status describes the current status of a Cluster - // +optional - Status ClusterStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// A list of all the kubernetes clusters registered to the federation -type ClusterList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of Cluster objects. - Items []Cluster `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// Expressed as value of annotation for selecting the clusters on which a resource is created. -type ClusterSelector []ClusterSelectorRequirement - -// ClusterSelectorRequirement contains values, a key, and an operator that relates the key and values. -// The zero value of ClusterSelectorRequirement is invalid. -// ClusterSelectorRequirement implements both set based match and exact match -type ClusterSelectorRequirement struct { - // +patchMergeKey=key - // +patchStrategy=merge - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` - // The Operator defines how the Key is matched to the Values. One of "in", "notin", - // "exists", "!", "=", "!=", "gt" or "lt". - Operator string `json:"operator" protobuf:"bytes,2,opt,name=operator"` - // An array of string values. If the operator is "in" or "notin", - // the values array must be non-empty. If the operator is "exists" or "!", - // the values array must be empty. If the operator is "gt" or "lt", the values - // array must have a single element, which will be interpreted as an integer. - // This array is replaced during a strategic merge patch. - // +optional - Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` -} - -const ( - // FederationNamespaceSystem is the system namespace where we place federation control plane components. - FederationNamespaceSystem string = "federation-system" - - // FederationClusterSelectorAnnotation is used to determine placement of objects on federated clusters - FederationClusterSelectorAnnotation string = "federation.alpha.kubernetes.io/cluster-selector" - - // FederationOnlyClusterSelector is the cluster selector to indicate any object in - // federation having this annotation should not be synced to federated clusters. - FederationOnlyClusterSelector string = "federation.kubernetes.io/federation-control-plane=true" -) diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index dda8cf524b9e..000000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_Cluster = map[string]string{ - "": "Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of the Cluster.", - "status": "Status describes the current status of a Cluster", -} - -func (Cluster) SwaggerDoc() map[string]string { - return map_Cluster -} - -var map_ClusterCondition = map[string]string{ - "": "ClusterCondition describes current state of a cluster.", - "type": "Type of cluster condition, Complete or Failed.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastProbeTime": "Last time the condition was checked.", - "lastTransitionTime": "Last time the condition transit from one status to another.", - "reason": "(brief) reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (ClusterCondition) SwaggerDoc() map[string]string { - return map_ClusterCondition -} - -var map_ClusterList = map[string]string{ - "": "A list of all the kubernetes clusters registered to the federation", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of Cluster objects.", -} - -func (ClusterList) SwaggerDoc() map[string]string { - return map_ClusterList -} - -var map_ClusterSelectorRequirement = map[string]string{ - "": "ClusterSelectorRequirement contains values, a key, and an operator that relates the key and values. The zero value of ClusterSelectorRequirement is invalid. ClusterSelectorRequirement implements both set based match and exact match", - "operator": "The Operator defines how the Key is matched to the Values. One of \"in\", \"notin\", \"exists\", \"!\", \"=\", \"!=\", \"gt\" or \"lt\".", - "values": "An array of string values. If the operator is \"in\" or \"notin\", the values array must be non-empty. If the operator is \"exists\" or \"!\", the values array must be empty. If the operator is \"gt\" or \"lt\", the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", -} - -func (ClusterSelectorRequirement) SwaggerDoc() map[string]string { - return map_ClusterSelectorRequirement -} - -var map_ClusterSpec = map[string]string{ - "": "ClusterSpec describes the attributes of a kubernetes cluster.", - "serverAddressByClientCIDRs": "A map of client CIDR to server address. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR.", - "secretRef": "Name of the secret containing kubeconfig to access this cluster. The secret is read from the kubernetes cluster that is hosting federation control plane. Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key \"kubeconfig\". This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. This can be left empty if the cluster allows insecure access.", -} - -func (ClusterSpec) SwaggerDoc() map[string]string { - return map_ClusterSpec -} - -var map_ClusterStatus = map[string]string{ - "": "ClusterStatus is information about the current status of a cluster updated by cluster controller periodically.", - "conditions": "Conditions is an array of current cluster conditions.", - "zones": "Zones is the list of availability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. These will always be in the same region.", - "region": "Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'.", -} - -func (ClusterStatus) SwaggerDoc() map[string]string { - return map_ClusterStatus -} - -var map_ServerAddressByClientCIDR = map[string]string{ - "": "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.", - "clientCIDR": "The CIDR with which clients can match their IP to figure out the server address that they should use.", - "serverAddress": "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.", -} - -func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string { - return map_ServerAddressByClientCIDR -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/zz_generated.conversion.go deleted file mode 100644 index 1f9e733ddcdc..000000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/zz_generated.conversion.go +++ /dev/null @@ -1,205 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - federation "k8s.io/kubernetes/federation/apis/federation" - api "k8s.io/kubernetes/pkg/api" - unsafe "unsafe" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1beta1_Cluster_To_federation_Cluster, - Convert_federation_Cluster_To_v1beta1_Cluster, - Convert_v1beta1_ClusterCondition_To_federation_ClusterCondition, - Convert_federation_ClusterCondition_To_v1beta1_ClusterCondition, - Convert_v1beta1_ClusterList_To_federation_ClusterList, - Convert_federation_ClusterList_To_v1beta1_ClusterList, - Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec, - Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec, - Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus, - Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus, - Convert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR, - Convert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR, - ) -} - -func autoConvert_v1beta1_Cluster_To_federation_Cluster(in *Cluster, out *federation.Cluster, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta1_Cluster_To_federation_Cluster is an autogenerated conversion function. -func Convert_v1beta1_Cluster_To_federation_Cluster(in *Cluster, out *federation.Cluster, s conversion.Scope) error { - return autoConvert_v1beta1_Cluster_To_federation_Cluster(in, out, s) -} - -func autoConvert_federation_Cluster_To_v1beta1_Cluster(in *federation.Cluster, out *Cluster, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_federation_Cluster_To_v1beta1_Cluster is an autogenerated conversion function. -func Convert_federation_Cluster_To_v1beta1_Cluster(in *federation.Cluster, out *Cluster, s conversion.Scope) error { - return autoConvert_federation_Cluster_To_v1beta1_Cluster(in, out, s) -} - -func autoConvert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in *ClusterCondition, out *federation.ClusterCondition, s conversion.Scope) error { - out.Type = federation.ClusterConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1beta1_ClusterCondition_To_federation_ClusterCondition is an autogenerated conversion function. -func Convert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in *ClusterCondition, out *federation.ClusterCondition, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in, out, s) -} - -func autoConvert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in *federation.ClusterCondition, out *ClusterCondition, s conversion.Scope) error { - out.Type = ClusterConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_federation_ClusterCondition_To_v1beta1_ClusterCondition is an autogenerated conversion function. -func Convert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in *federation.ClusterCondition, out *ClusterCondition, s conversion.Scope) error { - return autoConvert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in, out, s) -} - -func autoConvert_v1beta1_ClusterList_To_federation_ClusterList(in *ClusterList, out *federation.ClusterList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]federation.Cluster)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1beta1_ClusterList_To_federation_ClusterList is an autogenerated conversion function. -func Convert_v1beta1_ClusterList_To_federation_ClusterList(in *ClusterList, out *federation.ClusterList, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterList_To_federation_ClusterList(in, out, s) -} - -func autoConvert_federation_ClusterList_To_v1beta1_ClusterList(in *federation.ClusterList, out *ClusterList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]Cluster)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_federation_ClusterList_To_v1beta1_ClusterList is an autogenerated conversion function. -func Convert_federation_ClusterList_To_v1beta1_ClusterList(in *federation.ClusterList, out *ClusterList, s conversion.Scope) error { - return autoConvert_federation_ClusterList_To_v1beta1_ClusterList(in, out, s) -} - -func autoConvert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in *ClusterSpec, out *federation.ClusterSpec, s conversion.Scope) error { - out.ServerAddressByClientCIDRs = *(*[]federation.ServerAddressByClientCIDR)(unsafe.Pointer(&in.ServerAddressByClientCIDRs)) - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec is an autogenerated conversion function. -func Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in *ClusterSpec, out *federation.ClusterSpec, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in, out, s) -} - -func autoConvert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in *federation.ClusterSpec, out *ClusterSpec, s conversion.Scope) error { - out.ServerAddressByClientCIDRs = *(*[]ServerAddressByClientCIDR)(unsafe.Pointer(&in.ServerAddressByClientCIDRs)) - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec is an autogenerated conversion function. -func Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in *federation.ClusterSpec, out *ClusterSpec, s conversion.Scope) error { - return autoConvert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in, out, s) -} - -func autoConvert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in *ClusterStatus, out *federation.ClusterStatus, s conversion.Scope) error { - out.Conditions = *(*[]federation.ClusterCondition)(unsafe.Pointer(&in.Conditions)) - out.Zones = *(*[]string)(unsafe.Pointer(&in.Zones)) - out.Region = in.Region - return nil -} - -// Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus is an autogenerated conversion function. -func Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in *ClusterStatus, out *federation.ClusterStatus, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in, out, s) -} - -func autoConvert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in *federation.ClusterStatus, out *ClusterStatus, s conversion.Scope) error { - out.Conditions = *(*[]ClusterCondition)(unsafe.Pointer(&in.Conditions)) - out.Zones = *(*[]string)(unsafe.Pointer(&in.Zones)) - out.Region = in.Region - return nil -} - -// Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus is an autogenerated conversion function. -func Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in *federation.ClusterStatus, out *ClusterStatus, s conversion.Scope) error { - return autoConvert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in, out, s) -} - -func autoConvert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in *ServerAddressByClientCIDR, out *federation.ServerAddressByClientCIDR, s conversion.Scope) error { - out.ClientCIDR = in.ClientCIDR - out.ServerAddress = in.ServerAddress - return nil -} - -// Convert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR is an autogenerated conversion function. -func Convert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in *ServerAddressByClientCIDR, out *federation.ServerAddressByClientCIDR, s conversion.Scope) error { - return autoConvert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in, out, s) -} - -func autoConvert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in *federation.ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, s conversion.Scope) error { - out.ClientCIDR = in.ClientCIDR - out.ServerAddress = in.ServerAddress - return nil -} - -// Convert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR is an autogenerated conversion function. -func Convert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in *federation.ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, s conversion.Scope) error { - return autoConvert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 716b3af29f23..000000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,245 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Cluster).DeepCopyInto(out.(*Cluster)) - return nil - }, InType: reflect.TypeOf(&Cluster{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterCondition).DeepCopyInto(out.(*ClusterCondition)) - return nil - }, InType: reflect.TypeOf(&ClusterCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterList).DeepCopyInto(out.(*ClusterList)) - return nil - }, InType: reflect.TypeOf(&ClusterList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterSelectorRequirement).DeepCopyInto(out.(*ClusterSelectorRequirement)) - return nil - }, InType: reflect.TypeOf(&ClusterSelectorRequirement{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterSpec).DeepCopyInto(out.(*ClusterSpec)) - return nil - }, InType: reflect.TypeOf(&ClusterSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterStatus).DeepCopyInto(out.(*ClusterStatus)) - return nil - }, InType: reflect.TypeOf(&ClusterStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServerAddressByClientCIDR).DeepCopyInto(out.(*ServerAddressByClientCIDR)) - return nil - }, InType: reflect.TypeOf(&ServerAddressByClientCIDR{})}, - ) -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Cluster) DeepCopyInto(out *Cluster) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. -func (in *Cluster) DeepCopy() *Cluster { - if in == nil { - return nil - } - out := new(Cluster) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Cluster) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCondition) DeepCopyInto(out *ClusterCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCondition. -func (in *ClusterCondition) DeepCopy() *ClusterCondition { - if in == nil { - return nil - } - out := new(ClusterCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterList) DeepCopyInto(out *ClusterList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Cluster, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. -func (in *ClusterList) DeepCopy() *ClusterList { - if in == nil { - return nil - } - out := new(ClusterList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterSelectorRequirement) DeepCopyInto(out *ClusterSelectorRequirement) { - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSelectorRequirement. -func (in *ClusterSelectorRequirement) DeepCopy() *ClusterSelectorRequirement { - if in == nil { - return nil - } - out := new(ClusterSelectorRequirement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { - *out = *in - if in.ServerAddressByClientCIDRs != nil { - in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]ServerAddressByClientCIDR, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(v1.LocalObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. -func (in *ClusterSpec) DeepCopy() *ClusterSpec { - if in == nil { - return nil - } - out := new(ClusterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ClusterCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Zones != nil { - in, out := &in.Zones, &out.Zones - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. -func (in *ClusterStatus) DeepCopy() *ClusterStatus { - if in == nil { - return nil - } - out := new(ClusterStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServerAddressByClientCIDR) DeepCopyInto(out *ServerAddressByClientCIDR) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerAddressByClientCIDR. -func (in *ServerAddressByClientCIDR) DeepCopy() *ServerAddressByClientCIDR { - if in == nil { - return nil - } - out := new(ServerAddressByClientCIDR) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/federation/apis/federation/zz_generated.deepcopy.go deleted file mode 100644 index a944361d9621..000000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/zz_generated.deepcopy.go +++ /dev/null @@ -1,331 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package federation - -import ( - v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Cluster).DeepCopyInto(out.(*Cluster)) - return nil - }, InType: reflect.TypeOf(&Cluster{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterCondition).DeepCopyInto(out.(*ClusterCondition)) - return nil - }, InType: reflect.TypeOf(&ClusterCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterList).DeepCopyInto(out.(*ClusterList)) - return nil - }, InType: reflect.TypeOf(&ClusterList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterPreferences).DeepCopyInto(out.(*ClusterPreferences)) - return nil - }, InType: reflect.TypeOf(&ClusterPreferences{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterServiceIngress).DeepCopyInto(out.(*ClusterServiceIngress)) - return nil - }, InType: reflect.TypeOf(&ClusterServiceIngress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterSpec).DeepCopyInto(out.(*ClusterSpec)) - return nil - }, InType: reflect.TypeOf(&ClusterSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterStatus).DeepCopyInto(out.(*ClusterStatus)) - return nil - }, InType: reflect.TypeOf(&ClusterStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FederatedServiceIngress).DeepCopyInto(out.(*FederatedServiceIngress)) - return nil - }, InType: reflect.TypeOf(&FederatedServiceIngress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaAllocationPreferences).DeepCopyInto(out.(*ReplicaAllocationPreferences)) - return nil - }, InType: reflect.TypeOf(&ReplicaAllocationPreferences{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServerAddressByClientCIDR).DeepCopyInto(out.(*ServerAddressByClientCIDR)) - return nil - }, InType: reflect.TypeOf(&ServerAddressByClientCIDR{})}, - ) -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Cluster) DeepCopyInto(out *Cluster) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. -func (in *Cluster) DeepCopy() *Cluster { - if in == nil { - return nil - } - out := new(Cluster) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Cluster) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCondition) DeepCopyInto(out *ClusterCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCondition. -func (in *ClusterCondition) DeepCopy() *ClusterCondition { - if in == nil { - return nil - } - out := new(ClusterCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterList) DeepCopyInto(out *ClusterList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Cluster, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. -func (in *ClusterList) DeepCopy() *ClusterList { - if in == nil { - return nil - } - out := new(ClusterList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterPreferences) DeepCopyInto(out *ClusterPreferences) { - *out = *in - if in.MaxReplicas != nil { - in, out := &in.MaxReplicas, &out.MaxReplicas - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPreferences. -func (in *ClusterPreferences) DeepCopy() *ClusterPreferences { - if in == nil { - return nil - } - out := new(ClusterPreferences) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterServiceIngress) DeepCopyInto(out *ClusterServiceIngress) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1.LoadBalancerIngress, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceIngress. -func (in *ClusterServiceIngress) DeepCopy() *ClusterServiceIngress { - if in == nil { - return nil - } - out := new(ClusterServiceIngress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { - *out = *in - if in.ServerAddressByClientCIDRs != nil { - in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]ServerAddressByClientCIDR, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(api.LocalObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. -func (in *ClusterSpec) DeepCopy() *ClusterSpec { - if in == nil { - return nil - } - out := new(ClusterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ClusterCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Zones != nil { - in, out := &in.Zones, &out.Zones - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. -func (in *ClusterStatus) DeepCopy() *ClusterStatus { - if in == nil { - return nil - } - out := new(ClusterStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FederatedServiceIngress) DeepCopyInto(out *FederatedServiceIngress) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterServiceIngress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FederatedServiceIngress. -func (in *FederatedServiceIngress) DeepCopy() *FederatedServiceIngress { - if in == nil { - return nil - } - out := new(FederatedServiceIngress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaAllocationPreferences) DeepCopyInto(out *ReplicaAllocationPreferences) { - *out = *in - if in.Clusters != nil { - in, out := &in.Clusters, &out.Clusters - *out = make(map[string]ClusterPreferences, len(*in)) - for key, val := range *in { - newVal := new(ClusterPreferences) - val.DeepCopyInto(newVal) - (*out)[key] = *newVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaAllocationPreferences. -func (in *ReplicaAllocationPreferences) DeepCopy() *ReplicaAllocationPreferences { - if in == nil { - return nil - } - out := new(ReplicaAllocationPreferences) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServerAddressByClientCIDR) DeepCopyInto(out *ServerAddressByClientCIDR) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerAddressByClientCIDR. -func (in *ServerAddressByClientCIDR) DeepCopy() *ServerAddressByClientCIDR { - if in == nil { - return nil - } - out := new(ServerAddressByClientCIDR) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/BUILD b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/BUILD deleted file mode 100644 index 751c58a76183..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/BUILD +++ /dev/null @@ -1,49 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "clientset.go", - "doc.go", - "import_known_versions.go", - ], - deps = [ - "//federation/apis/federation/install:go_default_library", - "//federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1:go_default_library", - "//federation/client/clientset_generated/federation_clientset/typed/batch/v1:go_default_library", - "//federation/client/clientset_generated/federation_clientset/typed/core/v1:go_default_library", - "//federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1:go_default_library", - "//federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/client-go/discovery:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//federation/client/clientset_generated/federation_clientset/fake:all-srcs", - "//federation/client/clientset_generated/federation_clientset/scheme:all-srcs", - "//federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1:all-srcs", - "//federation/client/clientset_generated/federation_clientset/typed/batch/v1:all-srcs", - "//federation/client/clientset_generated/federation_clientset/typed/core/v1:all-srcs", - "//federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1:all-srcs", - "//federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/clientset.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/clientset.go deleted file mode 100644 index 38b461c111a0..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/clientset.go +++ /dev/null @@ -1,186 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package federation_clientset - -import ( - glog "github.com/golang/glog" - discovery "k8s.io/client-go/discovery" - rest "k8s.io/client-go/rest" - flowcontrol "k8s.io/client-go/util/flowcontrol" - autoscalingv1 "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1" - batchv1 "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/batch/v1" - corev1 "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1" - extensionsv1beta1 "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1" - federationv1beta1 "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1" -) - -type Interface interface { - Discovery() discovery.DiscoveryInterface - AutoscalingV1() autoscalingv1.AutoscalingV1Interface - // Deprecated: please explicitly pick a version if possible. - Autoscaling() autoscalingv1.AutoscalingV1Interface - BatchV1() batchv1.BatchV1Interface - // Deprecated: please explicitly pick a version if possible. - Batch() batchv1.BatchV1Interface - CoreV1() corev1.CoreV1Interface - // Deprecated: please explicitly pick a version if possible. - Core() corev1.CoreV1Interface - ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Extensions() extensionsv1beta1.ExtensionsV1beta1Interface - FederationV1beta1() federationv1beta1.FederationV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Federation() federationv1beta1.FederationV1beta1Interface -} - -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. -type Clientset struct { - *discovery.DiscoveryClient - autoscalingV1 *autoscalingv1.AutoscalingV1Client - batchV1 *batchv1.BatchV1Client - coreV1 *corev1.CoreV1Client - extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client - federationV1beta1 *federationv1beta1.FederationV1beta1Client -} - -// AutoscalingV1 retrieves the AutoscalingV1Client -func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { - return c.autoscalingV1 -} - -// Deprecated: Autoscaling retrieves the default version of AutoscalingClient. -// Please explicitly pick a version. -func (c *Clientset) Autoscaling() autoscalingv1.AutoscalingV1Interface { - return c.autoscalingV1 -} - -// BatchV1 retrieves the BatchV1Client -func (c *Clientset) BatchV1() batchv1.BatchV1Interface { - return c.batchV1 -} - -// Deprecated: Batch retrieves the default version of BatchClient. -// Please explicitly pick a version. -func (c *Clientset) Batch() batchv1.BatchV1Interface { - return c.batchV1 -} - -// CoreV1 retrieves the CoreV1Client -func (c *Clientset) CoreV1() corev1.CoreV1Interface { - return c.coreV1 -} - -// Deprecated: Core retrieves the default version of CoreClient. -// Please explicitly pick a version. -func (c *Clientset) Core() corev1.CoreV1Interface { - return c.coreV1 -} - -// ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client -func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { - return c.extensionsV1beta1 -} - -// Deprecated: Extensions retrieves the default version of ExtensionsClient. -// Please explicitly pick a version. -func (c *Clientset) Extensions() extensionsv1beta1.ExtensionsV1beta1Interface { - return c.extensionsV1beta1 -} - -// FederationV1beta1 retrieves the FederationV1beta1Client -func (c *Clientset) FederationV1beta1() federationv1beta1.FederationV1beta1Interface { - return c.federationV1beta1 -} - -// Deprecated: Federation retrieves the default version of FederationClient. -// Please explicitly pick a version. -func (c *Clientset) Federation() federationv1beta1.FederationV1beta1Interface { - return c.federationV1beta1 -} - -// Discovery retrieves the DiscoveryClient -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - if c == nil { - return nil - } - return c.DiscoveryClient -} - -// NewForConfig creates a new Clientset for the given config. -func NewForConfig(c *rest.Config) (*Clientset, error) { - configShallowCopy := *c - if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { - configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) - } - var cs Clientset - var err error - cs.autoscalingV1, err = autoscalingv1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.batchV1, err = batchv1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.coreV1, err = corev1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.extensionsV1beta1, err = extensionsv1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.federationV1beta1, err = federationv1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) - if err != nil { - glog.Errorf("failed to create the DiscoveryClient: %v", err) - return nil, err - } - return &cs, nil -} - -// NewForConfigOrDie creates a new Clientset for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *Clientset { - var cs Clientset - cs.autoscalingV1 = autoscalingv1.NewForConfigOrDie(c) - cs.batchV1 = batchv1.NewForConfigOrDie(c) - cs.coreV1 = corev1.NewForConfigOrDie(c) - cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) - cs.federationV1beta1 = federationv1beta1.NewForConfigOrDie(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &cs -} - -// New creates a new Clientset for the given RESTClient. -func New(c rest.Interface) *Clientset { - var cs Clientset - cs.autoscalingV1 = autoscalingv1.New(c) - cs.batchV1 = batchv1.New(c) - cs.coreV1 = corev1.New(c) - cs.extensionsV1beta1 = extensionsv1beta1.New(c) - cs.federationV1beta1 = federationv1beta1.New(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClient(c) - return &cs -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/doc.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/doc.go deleted file mode 100644 index 3dd8ea7d7de9..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with custom arguments. - -// This package has the automatically generated clientset. -package federation_clientset diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/import_known_versions.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/import_known_versions.go deleted file mode 100644 index eb07236607e8..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/import_known_versions.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package federation_clientset - -// These imports are the API groups the client will support. -import ( - _ "k8s.io/kubernetes/federation/apis/federation/install" -) - -func init() { -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme/BUILD b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme/BUILD deleted file mode 100644 index dd6f3c565f5c..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme/BUILD +++ /dev/null @@ -1,38 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - ], - deps = [ - "//federation/apis/federation/v1beta1:go_default_library", - "//vendor/k8s.io/api/autoscaling/v1:go_default_library", - "//vendor/k8s.io/api/batch/v1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme/doc.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme/doc.go deleted file mode 100644 index 5d8ec824f0fb..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with custom arguments. - -// This package contains the scheme of the automatically generated clientset. -package scheme diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme/register.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme/register.go deleted file mode 100644 index db48b8e89b96..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme/register.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheme - -import ( - autoscalingv1 "k8s.io/api/autoscaling/v1" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - federationv1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1" -) - -var Scheme = runtime.NewScheme() -var Codecs = serializer.NewCodecFactory(Scheme) -var ParameterCodec = runtime.NewParameterCodec(Scheme) - -func init() { - v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) - AddToScheme(Scheme) -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kuberentes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -func AddToScheme(scheme *runtime.Scheme) { - autoscalingv1.AddToScheme(scheme) - batchv1.AddToScheme(scheme) - corev1.AddToScheme(scheme) - extensionsv1beta1.AddToScheme(scheme) - federationv1beta1.AddToScheme(scheme) - -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1/BUILD b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1/BUILD deleted file mode 100644 index 35da55d20af6..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1/BUILD +++ /dev/null @@ -1,41 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "autoscaling_client.go", - "doc.go", - "generated_expansion.go", - "horizontalpodautoscaler.go", - ], - deps = [ - "//federation/client/clientset_generated/federation_clientset/scheme:go_default_library", - "//vendor/k8s.io/api/autoscaling/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1/fake:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1/autoscaling_client.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1/autoscaling_client.go deleted file mode 100644 index af02fdc6efb0..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1/autoscaling_client.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - v1 "k8s.io/api/autoscaling/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme" -) - -type AutoscalingV1Interface interface { - RESTClient() rest.Interface - HorizontalPodAutoscalersGetter -} - -// AutoscalingV1Client is used to interact with features provided by the autoscaling group. -type AutoscalingV1Client struct { - restClient rest.Interface -} - -func (c *AutoscalingV1Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { - return newHorizontalPodAutoscalers(c, namespace) -} - -// NewForConfig creates a new AutoscalingV1Client for the given config. -func NewForConfig(c *rest.Config) (*AutoscalingV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AutoscalingV1Client{client}, nil -} - -// NewForConfigOrDie creates a new AutoscalingV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AutoscalingV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AutoscalingV1Client for the given RESTClient. -func New(c rest.Interface) *AutoscalingV1Client { - return &AutoscalingV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AutoscalingV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1/doc.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1/doc.go deleted file mode 100644 index 54673bfa7382..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with custom arguments. - -// This package has the automatically generated typed clients. -package v1 diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1/generated_expansion.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1/generated_expansion.go deleted file mode 100644 index effefbd50b6c..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1/generated_expansion.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1/horizontalpodautoscaler.go deleted file mode 100644 index 980221240cc0..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1/horizontalpodautoscaler.go +++ /dev/null @@ -1,172 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - v1 "k8s.io/api/autoscaling/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - scheme "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme" -) - -// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. -// A group's client should implement this interface. -type HorizontalPodAutoscalersGetter interface { - HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface -} - -// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. -type HorizontalPodAutoscalerInterface interface { - Create(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) - Update(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) - UpdateStatus(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) - Delete(name string, options *meta_v1.DeleteOptions) error - DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error - Get(name string, options meta_v1.GetOptions) (*v1.HorizontalPodAutoscaler, error) - List(opts meta_v1.ListOptions) (*v1.HorizontalPodAutoscalerList, error) - Watch(opts meta_v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) - HorizontalPodAutoscalerExpansion -} - -// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface -type horizontalPodAutoscalers struct { - client rest.Interface - ns string -} - -// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers -func newHorizontalPodAutoscalers(c *AutoscalingV1Client, namespace string) *horizontalPodAutoscalers { - return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(name string, options meta_v1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(opts meta_v1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { - result = &v1.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(name string, options *meta_v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/batch/v1/BUILD b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/batch/v1/BUILD deleted file mode 100644 index d878e764d720..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/batch/v1/BUILD +++ /dev/null @@ -1,41 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "batch_client.go", - "doc.go", - "generated_expansion.go", - "job.go", - ], - deps = [ - "//federation/client/clientset_generated/federation_clientset/scheme:go_default_library", - "//vendor/k8s.io/api/batch/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//federation/client/clientset_generated/federation_clientset/typed/batch/v1/fake:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/batch/v1/batch_client.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/batch/v1/batch_client.go deleted file mode 100644 index d16249a0b115..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/batch/v1/batch_client.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - v1 "k8s.io/api/batch/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme" -) - -type BatchV1Interface interface { - RESTClient() rest.Interface - JobsGetter -} - -// BatchV1Client is used to interact with features provided by the batch group. -type BatchV1Client struct { - restClient rest.Interface -} - -func (c *BatchV1Client) Jobs(namespace string) JobInterface { - return newJobs(c, namespace) -} - -// NewForConfig creates a new BatchV1Client for the given config. -func NewForConfig(c *rest.Config) (*BatchV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &BatchV1Client{client}, nil -} - -// NewForConfigOrDie creates a new BatchV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *BatchV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new BatchV1Client for the given RESTClient. -func New(c rest.Interface) *BatchV1Client { - return &BatchV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *BatchV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/batch/v1/doc.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/batch/v1/doc.go deleted file mode 100644 index 54673bfa7382..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/batch/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with custom arguments. - -// This package has the automatically generated typed clients. -package v1 diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/batch/v1/generated_expansion.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/batch/v1/generated_expansion.go deleted file mode 100644 index 68d7741fa0b1..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/batch/v1/generated_expansion.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -type JobExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/batch/v1/job.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/batch/v1/job.go deleted file mode 100644 index 74e10550f86d..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/batch/v1/job.go +++ /dev/null @@ -1,172 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - v1 "k8s.io/api/batch/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - scheme "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme" -) - -// JobsGetter has a method to return a JobInterface. -// A group's client should implement this interface. -type JobsGetter interface { - Jobs(namespace string) JobInterface -} - -// JobInterface has methods to work with Job resources. -type JobInterface interface { - Create(*v1.Job) (*v1.Job, error) - Update(*v1.Job) (*v1.Job, error) - UpdateStatus(*v1.Job) (*v1.Job, error) - Delete(name string, options *meta_v1.DeleteOptions) error - DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error - Get(name string, options meta_v1.GetOptions) (*v1.Job, error) - List(opts meta_v1.ListOptions) (*v1.JobList, error) - Watch(opts meta_v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) - JobExpansion -} - -// jobs implements JobInterface -type jobs struct { - client rest.Interface - ns string -} - -// newJobs returns a Jobs -func newJobs(c *BatchV1Client, namespace string) *jobs { - return &jobs{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the job, and returns the corresponding job object, and an error if there is any. -func (c *jobs) Get(name string, options meta_v1.GetOptions) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Jobs that match those selectors. -func (c *jobs) List(opts meta_v1.ListOptions) (result *v1.JobList, err error) { - result = &v1.JobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested jobs. -func (c *jobs) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Create(job *v1.Job) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Post(). - Namespace(c.ns). - Resource("jobs"). - Body(job). - Do(). - Into(result) - return -} - -// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Update(job *v1.Job) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - Body(job). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *jobs) UpdateStatus(job *v1.Job) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - SubResource("status"). - Body(job). - Do(). - Into(result) - return -} - -// Delete takes name of the job and deletes it. Returns an error if one occurs. -func (c *jobs) Delete(name string, options *meta_v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *jobs) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched job. -func (c *jobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("jobs"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/BUILD b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/BUILD deleted file mode 100644 index 078c54b2784a..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/BUILD +++ /dev/null @@ -1,46 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "configmap.go", - "core_client.go", - "doc.go", - "event.go", - "generated_expansion.go", - "namespace.go", - "namespace_expansion.go", - "secret.go", - "service.go", - ], - deps = [ - "//federation/client/clientset_generated/federation_clientset/scheme:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//federation/client/clientset_generated/federation_clientset/typed/core/v1/fake:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/configmap.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/configmap.go deleted file mode 100644 index d1f66980bf2c..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/configmap.go +++ /dev/null @@ -1,155 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - scheme "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme" -) - -// ConfigMapsGetter has a method to return a ConfigMapInterface. -// A group's client should implement this interface. -type ConfigMapsGetter interface { - ConfigMaps(namespace string) ConfigMapInterface -} - -// ConfigMapInterface has methods to work with ConfigMap resources. -type ConfigMapInterface interface { - Create(*v1.ConfigMap) (*v1.ConfigMap, error) - Update(*v1.ConfigMap) (*v1.ConfigMap, error) - Delete(name string, options *meta_v1.DeleteOptions) error - DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error - Get(name string, options meta_v1.GetOptions) (*v1.ConfigMap, error) - List(opts meta_v1.ListOptions) (*v1.ConfigMapList, error) - Watch(opts meta_v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) - ConfigMapExpansion -} - -// configMaps implements ConfigMapInterface -type configMaps struct { - client rest.Interface - ns string -} - -// newConfigMaps returns a ConfigMaps -func newConfigMaps(c *CoreV1Client, namespace string) *configMaps { - return &configMaps{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. -func (c *configMaps) Get(name string, options meta_v1.GetOptions) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. -func (c *configMaps) List(opts meta_v1.ListOptions) (result *v1.ConfigMapList, err error) { - result = &v1.ConfigMapList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested configMaps. -func (c *configMaps) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Create(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Post(). - Namespace(c.ns). - Resource("configmaps"). - Body(configMap). - Do(). - Into(result) - return -} - -// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Update(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Put(). - Namespace(c.ns). - Resource("configmaps"). - Name(configMap.Name). - Body(configMap). - Do(). - Into(result) - return -} - -// Delete takes name of the configMap and deletes it. Returns an error if one occurs. -func (c *configMaps) Delete(name string, options *meta_v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *configMaps) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched configMap. -func (c *configMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("configmaps"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/core_client.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/core_client.go deleted file mode 100644 index a3a96f850457..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/core_client.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme" -) - -type CoreV1Interface interface { - RESTClient() rest.Interface - ConfigMapsGetter - EventsGetter - NamespacesGetter - SecretsGetter - ServicesGetter -} - -// CoreV1Client is used to interact with features provided by the group. -type CoreV1Client struct { - restClient rest.Interface -} - -func (c *CoreV1Client) ConfigMaps(namespace string) ConfigMapInterface { - return newConfigMaps(c, namespace) -} - -func (c *CoreV1Client) Events(namespace string) EventInterface { - return newEvents(c, namespace) -} - -func (c *CoreV1Client) Namespaces() NamespaceInterface { - return newNamespaces(c) -} - -func (c *CoreV1Client) Secrets(namespace string) SecretInterface { - return newSecrets(c, namespace) -} - -func (c *CoreV1Client) Services(namespace string) ServiceInterface { - return newServices(c, namespace) -} - -// NewForConfig creates a new CoreV1Client for the given config. -func NewForConfig(c *rest.Config) (*CoreV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &CoreV1Client{client}, nil -} - -// NewForConfigOrDie creates a new CoreV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *CoreV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new CoreV1Client for the given RESTClient. -func New(c rest.Interface) *CoreV1Client { - return &CoreV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/api" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *CoreV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/doc.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/doc.go deleted file mode 100644 index 54673bfa7382..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with custom arguments. - -// This package has the automatically generated typed clients. -package v1 diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/event.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/event.go deleted file mode 100644 index 8c9a91a3481c..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/event.go +++ /dev/null @@ -1,155 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - scheme "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme" -) - -// EventsGetter has a method to return a EventInterface. -// A group's client should implement this interface. -type EventsGetter interface { - Events(namespace string) EventInterface -} - -// EventInterface has methods to work with Event resources. -type EventInterface interface { - Create(*v1.Event) (*v1.Event, error) - Update(*v1.Event) (*v1.Event, error) - Delete(name string, options *meta_v1.DeleteOptions) error - DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error - Get(name string, options meta_v1.GetOptions) (*v1.Event, error) - List(opts meta_v1.ListOptions) (*v1.EventList, error) - Watch(opts meta_v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) - EventExpansion -} - -// events implements EventInterface -type events struct { - client rest.Interface - ns string -} - -// newEvents returns a Events -func newEvents(c *CoreV1Client, namespace string) *events { - return &events{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(name string, options meta_v1.GetOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(opts meta_v1.ListOptions) (result *v1.EventList, err error) { - result = &v1.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(event *v1.Event) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Post(). - Namespace(c.ns). - Resource("events"). - Body(event). - Do(). - Into(result) - return -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(event *v1.Event) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Put(). - Namespace(c.ns). - Resource("events"). - Name(event.Name). - Body(event). - Do(). - Into(result) - return -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(name string, options *meta_v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched event. -func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("events"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/generated_expansion.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/generated_expansion.go deleted file mode 100644 index 6c1fd71f351a..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/generated_expansion.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -type ConfigMapExpansion interface{} - -type EventExpansion interface{} - -type SecretExpansion interface{} - -type ServiceExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/namespace.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/namespace.go deleted file mode 100644 index 34f16aa6d0c7..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/namespace.go +++ /dev/null @@ -1,161 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - scheme "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme" -) - -// NamespacesGetter has a method to return a NamespaceInterface. -// A group's client should implement this interface. -type NamespacesGetter interface { - Namespaces() NamespaceInterface -} - -// NamespaceInterface has methods to work with Namespace resources. -type NamespaceInterface interface { - Create(*v1.Namespace) (*v1.Namespace, error) - Update(*v1.Namespace) (*v1.Namespace, error) - UpdateStatus(*v1.Namespace) (*v1.Namespace, error) - Delete(name string, options *meta_v1.DeleteOptions) error - DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error - Get(name string, options meta_v1.GetOptions) (*v1.Namespace, error) - List(opts meta_v1.ListOptions) (*v1.NamespaceList, error) - Watch(opts meta_v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) - NamespaceExpansion -} - -// namespaces implements NamespaceInterface -type namespaces struct { - client rest.Interface -} - -// newNamespaces returns a Namespaces -func newNamespaces(c *CoreV1Client) *namespaces { - return &namespaces{ - client: c.RESTClient(), - } -} - -// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. -func (c *namespaces) Get(name string, options meta_v1.GetOptions) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Get(). - Resource("namespaces"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Namespaces that match those selectors. -func (c *namespaces) List(opts meta_v1.ListOptions) (result *v1.NamespaceList, err error) { - result = &v1.NamespaceList{} - err = c.client.Get(). - Resource("namespaces"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested namespaces. -func (c *namespaces) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("namespaces"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Create(namespace *v1.Namespace) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Post(). - Resource("namespaces"). - Body(namespace). - Do(). - Into(result) - return -} - -// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Update(namespace *v1.Namespace) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - Body(namespace). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *namespaces) UpdateStatus(namespace *v1.Namespace) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - SubResource("status"). - Body(namespace). - Do(). - Into(result) - return -} - -// Delete takes name of the namespace and deletes it. Returns an error if one occurs. -func (c *namespaces) Delete(name string, options *meta_v1.DeleteOptions) error { - return c.client.Delete(). - Resource("namespaces"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *namespaces) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - return c.client.Delete(). - Resource("namespaces"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched namespace. -func (c *namespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Patch(pt). - Resource("namespaces"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/namespace_expansion.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/namespace_expansion.go deleted file mode 100644 index 17effe29c65a..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/namespace_expansion.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import "k8s.io/api/core/v1" - -// The NamespaceExpansion interface allows manually adding extra methods to the NamespaceInterface. -type NamespaceExpansion interface { - Finalize(item *v1.Namespace) (*v1.Namespace, error) -} - -// Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. -func (c *namespaces) Finalize(namespace *v1.Namespace) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Put().Resource("namespaces").Name(namespace.Name).SubResource("finalize").Body(namespace).Do().Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/secret.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/secret.go deleted file mode 100644 index 1551a51dedfa..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/secret.go +++ /dev/null @@ -1,155 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - scheme "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme" -) - -// SecretsGetter has a method to return a SecretInterface. -// A group's client should implement this interface. -type SecretsGetter interface { - Secrets(namespace string) SecretInterface -} - -// SecretInterface has methods to work with Secret resources. -type SecretInterface interface { - Create(*v1.Secret) (*v1.Secret, error) - Update(*v1.Secret) (*v1.Secret, error) - Delete(name string, options *meta_v1.DeleteOptions) error - DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error - Get(name string, options meta_v1.GetOptions) (*v1.Secret, error) - List(opts meta_v1.ListOptions) (*v1.SecretList, error) - Watch(opts meta_v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) - SecretExpansion -} - -// secrets implements SecretInterface -type secrets struct { - client rest.Interface - ns string -} - -// newSecrets returns a Secrets -func newSecrets(c *CoreV1Client, namespace string) *secrets { - return &secrets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. -func (c *secrets) Get(name string, options meta_v1.GetOptions) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Secrets that match those selectors. -func (c *secrets) List(opts meta_v1.ListOptions) (result *v1.SecretList, err error) { - result = &v1.SecretList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested secrets. -func (c *secrets) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Create(secret *v1.Secret) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Post(). - Namespace(c.ns). - Resource("secrets"). - Body(secret). - Do(). - Into(result) - return -} - -// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Update(secret *v1.Secret) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Put(). - Namespace(c.ns). - Resource("secrets"). - Name(secret.Name). - Body(secret). - Do(). - Into(result) - return -} - -// Delete takes name of the secret and deletes it. Returns an error if one occurs. -func (c *secrets) Delete(name string, options *meta_v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *secrets) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched secret. -func (c *secrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("secrets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/service.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/service.go deleted file mode 100644 index 47c4ea2407d5..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1/service.go +++ /dev/null @@ -1,172 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - scheme "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme" -) - -// ServicesGetter has a method to return a ServiceInterface. -// A group's client should implement this interface. -type ServicesGetter interface { - Services(namespace string) ServiceInterface -} - -// ServiceInterface has methods to work with Service resources. -type ServiceInterface interface { - Create(*v1.Service) (*v1.Service, error) - Update(*v1.Service) (*v1.Service, error) - UpdateStatus(*v1.Service) (*v1.Service, error) - Delete(name string, options *meta_v1.DeleteOptions) error - DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error - Get(name string, options meta_v1.GetOptions) (*v1.Service, error) - List(opts meta_v1.ListOptions) (*v1.ServiceList, error) - Watch(opts meta_v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) - ServiceExpansion -} - -// services implements ServiceInterface -type services struct { - client rest.Interface - ns string -} - -// newServices returns a Services -func newServices(c *CoreV1Client, namespace string) *services { - return &services{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the service, and returns the corresponding service object, and an error if there is any. -func (c *services) Get(name string, options meta_v1.GetOptions) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Services that match those selectors. -func (c *services) List(opts meta_v1.ListOptions) (result *v1.ServiceList, err error) { - result = &v1.ServiceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested services. -func (c *services) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Create(service *v1.Service) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Post(). - Namespace(c.ns). - Resource("services"). - Body(service). - Do(). - Into(result) - return -} - -// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Update(service *v1.Service) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - Body(service). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *services) UpdateStatus(service *v1.Service) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - SubResource("status"). - Body(service). - Do(). - Into(result) - return -} - -// Delete takes name of the service and deletes it. Returns an error if one occurs. -func (c *services) Delete(name string, options *meta_v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("services"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *services) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched service. -func (c *services) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("services"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/BUILD b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/BUILD deleted file mode 100644 index b2a7fd93db4d..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/BUILD +++ /dev/null @@ -1,45 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "daemonset.go", - "deployment.go", - "deployment_expansion.go", - "doc.go", - "extensions_client.go", - "generated_expansion.go", - "ingress.go", - "replicaset.go", - ], - deps = [ - "//federation/client/clientset_generated/federation_clientset/scheme:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/fake:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/daemonset.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/daemonset.go deleted file mode 100644 index f688789d5645..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/daemonset.go +++ /dev/null @@ -1,172 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - scheme "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme" -) - -// DaemonSetsGetter has a method to return a DaemonSetInterface. -// A group's client should implement this interface. -type DaemonSetsGetter interface { - DaemonSets(namespace string) DaemonSetInterface -} - -// DaemonSetInterface has methods to work with DaemonSet resources. -type DaemonSetInterface interface { - Create(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) - Update(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) - UpdateStatus(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.DaemonSet, error) - List(opts v1.ListOptions) (*v1beta1.DaemonSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) - DaemonSetExpansion -} - -// daemonSets implements DaemonSetInterface -type daemonSets struct { - client rest.Interface - ns string -} - -// newDaemonSets returns a DaemonSets -func newDaemonSets(c *ExtensionsV1beta1Client, namespace string) *daemonSets { - return &daemonSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { - result = &v1beta1.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - Body(daemonSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - Body(daemonSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *daemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - Body(daemonSet). - Do(). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/deployment.go deleted file mode 100644 index f26630be8f3d..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/deployment.go +++ /dev/null @@ -1,203 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - scheme "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme" -) - -// DeploymentsGetter has a method to return a DeploymentInterface. -// A group's client should implement this interface. -type DeploymentsGetter interface { - Deployments(namespace string) DeploymentInterface -} - -// DeploymentInterface has methods to work with Deployment resources. -type DeploymentInterface interface { - Create(*v1beta1.Deployment) (*v1beta1.Deployment, error) - Update(*v1beta1.Deployment) (*v1beta1.Deployment, error) - UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Deployment, error) - List(opts v1.ListOptions) (*v1beta1.DeploymentList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) - GetScale(deploymentName string, options v1.GetOptions) (*v1beta1.Scale, error) - UpdateScale(deploymentName string, scale *v1beta1.Scale) (*v1beta1.Scale, error) - - DeploymentExpansion -} - -// deployments implements DeploymentInterface -type deployments struct { - client rest.Interface - ns string -} - -// newDeployments returns a Deployments -func newDeployments(c *ExtensionsV1beta1Client, namespace string) *deployments { - return &deployments{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - result = &v1beta1.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - Body(deployment). - Do(). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - Body(deployment). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - Body(deployment). - Do(). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} - -// GetScale takes name of the deployment, and returns the corresponding v1beta1.Scale object, and an error if there is any. -func (c *deployments) GetScale(deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(deploymentName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *deployments) UpdateScale(deploymentName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deploymentName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/deployment_expansion.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/deployment_expansion.go deleted file mode 100644 index 24734be6a6e6..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/deployment_expansion.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import "k8s.io/api/extensions/v1beta1" - -// The DeploymentExpansion interface allows manually adding extra methods to the DeploymentInterface. -type DeploymentExpansion interface { - Rollback(*v1beta1.DeploymentRollback) error -} - -// Rollback applied the provided DeploymentRollback to the named deployment in the current namespace. -func (c *deployments) Rollback(deploymentRollback *v1beta1.DeploymentRollback) error { - return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do().Error() -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/doc.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/doc.go deleted file mode 100644 index 11b523897207..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with custom arguments. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/extensions_client.go deleted file mode 100644 index 2f50f3bb02b5..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/extensions_client.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - rest "k8s.io/client-go/rest" - "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme" -) - -type ExtensionsV1beta1Interface interface { - RESTClient() rest.Interface - DaemonSetsGetter - DeploymentsGetter - IngressesGetter - ReplicaSetsGetter -} - -// ExtensionsV1beta1Client is used to interact with features provided by the extensions group. -type ExtensionsV1beta1Client struct { - restClient rest.Interface -} - -func (c *ExtensionsV1beta1Client) DaemonSets(namespace string) DaemonSetInterface { - return newDaemonSets(c, namespace) -} - -func (c *ExtensionsV1beta1Client) Deployments(namespace string) DeploymentInterface { - return newDeployments(c, namespace) -} - -func (c *ExtensionsV1beta1Client) Ingresses(namespace string) IngressInterface { - return newIngresses(c, namespace) -} - -func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterface { - return newReplicaSets(c, namespace) -} - -// NewForConfig creates a new ExtensionsV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*ExtensionsV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &ExtensionsV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new ExtensionsV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *ExtensionsV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new ExtensionsV1beta1Client for the given RESTClient. -func New(c rest.Interface) *ExtensionsV1beta1Client { - return &ExtensionsV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *ExtensionsV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/generated_expansion.go deleted file mode 100644 index e67c4e302502..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/generated_expansion.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -type DaemonSetExpansion interface{} - -type IngressExpansion interface{} - -type ReplicaSetExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/ingress.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/ingress.go deleted file mode 100644 index 2b5e1f071e63..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/ingress.go +++ /dev/null @@ -1,172 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - scheme "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme" -) - -// IngressesGetter has a method to return a IngressInterface. -// A group's client should implement this interface. -type IngressesGetter interface { - Ingresses(namespace string) IngressInterface -} - -// IngressInterface has methods to work with Ingress resources. -type IngressInterface interface { - Create(*v1beta1.Ingress) (*v1beta1.Ingress, error) - Update(*v1beta1.Ingress) (*v1beta1.Ingress, error) - UpdateStatus(*v1beta1.Ingress) (*v1beta1.Ingress, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Ingress, error) - List(opts v1.ListOptions) (*v1beta1.IngressList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) - IngressExpansion -} - -// ingresses implements IngressInterface -type ingresses struct { - client rest.Interface - ns string -} - -// newIngresses returns a Ingresses -func newIngresses(c *ExtensionsV1beta1Client, namespace string) *ingresses { - return &ingresses{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { - result = &v1beta1.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - Body(ingress). - Do(). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - Body(ingress). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *ingresses) UpdateStatus(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - Body(ingress). - Do(). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ingresses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/replicaset.go deleted file mode 100644 index 390387807391..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1/replicaset.go +++ /dev/null @@ -1,203 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - scheme "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme" -) - -// ReplicaSetsGetter has a method to return a ReplicaSetInterface. -// A group's client should implement this interface. -type ReplicaSetsGetter interface { - ReplicaSets(namespace string) ReplicaSetInterface -} - -// ReplicaSetInterface has methods to work with ReplicaSet resources. -type ReplicaSetInterface interface { - Create(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) - Update(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) - UpdateStatus(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.ReplicaSet, error) - List(opts v1.ListOptions) (*v1beta1.ReplicaSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) - GetScale(replicaSetName string, options v1.GetOptions) (*v1beta1.Scale, error) - UpdateScale(replicaSetName string, scale *v1beta1.Scale) (*v1beta1.Scale, error) - - ReplicaSetExpansion -} - -// replicaSets implements ReplicaSetInterface -type replicaSets struct { - client rest.Interface - ns string -} - -// newReplicaSets returns a ReplicaSets -func newReplicaSets(c *ExtensionsV1beta1Client, namespace string) *replicaSets { - return &replicaSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { - result = &v1beta1.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - Body(replicaSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *replicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} - -// GetScale takes name of the replicaSet, and returns the corresponding v1beta1.Scale object, and an error if there is any. -func (c *replicaSets) GetScale(replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSetName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicaSets) UpdateScale(replicaSetName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSetName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1/BUILD b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1/BUILD deleted file mode 100644 index b8e9c1b21345..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1/BUILD +++ /dev/null @@ -1,41 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "cluster.go", - "doc.go", - "federation_client.go", - "generated_expansion.go", - ], - deps = [ - "//federation/apis/federation/v1beta1:go_default_library", - "//federation/client/clientset_generated/federation_clientset/scheme:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1/fake:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1/cluster.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1/cluster.go deleted file mode 100644 index d02abdf672b2..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1/cluster.go +++ /dev/null @@ -1,161 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - v1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1" - scheme "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme" -) - -// ClustersGetter has a method to return a ClusterInterface. -// A group's client should implement this interface. -type ClustersGetter interface { - Clusters() ClusterInterface -} - -// ClusterInterface has methods to work with Cluster resources. -type ClusterInterface interface { - Create(*v1beta1.Cluster) (*v1beta1.Cluster, error) - Update(*v1beta1.Cluster) (*v1beta1.Cluster, error) - UpdateStatus(*v1beta1.Cluster) (*v1beta1.Cluster, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Cluster, error) - List(opts v1.ListOptions) (*v1beta1.ClusterList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Cluster, err error) - ClusterExpansion -} - -// clusters implements ClusterInterface -type clusters struct { - client rest.Interface -} - -// newClusters returns a Clusters -func newClusters(c *FederationV1beta1Client) *clusters { - return &clusters{ - client: c.RESTClient(), - } -} - -// Get takes name of the cluster, and returns the corresponding cluster object, and an error if there is any. -func (c *clusters) Get(name string, options v1.GetOptions) (result *v1beta1.Cluster, err error) { - result = &v1beta1.Cluster{} - err = c.client.Get(). - Resource("clusters"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Clusters that match those selectors. -func (c *clusters) List(opts v1.ListOptions) (result *v1beta1.ClusterList, err error) { - result = &v1beta1.ClusterList{} - err = c.client.Get(). - Resource("clusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusters. -func (c *clusters) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("clusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a cluster and creates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *clusters) Create(cluster *v1beta1.Cluster) (result *v1beta1.Cluster, err error) { - result = &v1beta1.Cluster{} - err = c.client.Post(). - Resource("clusters"). - Body(cluster). - Do(). - Into(result) - return -} - -// Update takes the representation of a cluster and updates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *clusters) Update(cluster *v1beta1.Cluster) (result *v1beta1.Cluster, err error) { - result = &v1beta1.Cluster{} - err = c.client.Put(). - Resource("clusters"). - Name(cluster.Name). - Body(cluster). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *clusters) UpdateStatus(cluster *v1beta1.Cluster) (result *v1beta1.Cluster, err error) { - result = &v1beta1.Cluster{} - err = c.client.Put(). - Resource("clusters"). - Name(cluster.Name). - SubResource("status"). - Body(cluster). - Do(). - Into(result) - return -} - -// Delete takes name of the cluster and deletes it. Returns an error if one occurs. -func (c *clusters) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusters"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusters) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("clusters"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched cluster. -func (c *clusters) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Cluster, err error) { - result = &v1beta1.Cluster{} - err = c.client.Patch(pt). - Resource("clusters"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1/doc.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1/doc.go deleted file mode 100644 index 11b523897207..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with custom arguments. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1/federation_client.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1/federation_client.go deleted file mode 100644 index e5c8d1d04094..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1/federation_client.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - rest "k8s.io/client-go/rest" - v1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1" - "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme" -) - -type FederationV1beta1Interface interface { - RESTClient() rest.Interface - ClustersGetter -} - -// FederationV1beta1Client is used to interact with features provided by the federation group. -type FederationV1beta1Client struct { - restClient rest.Interface -} - -func (c *FederationV1beta1Client) Clusters() ClusterInterface { - return newClusters(c) -} - -// NewForConfig creates a new FederationV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*FederationV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &FederationV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new FederationV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *FederationV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new FederationV1beta1Client for the given RESTClient. -func New(c rest.Interface) *FederationV1beta1Client { - return &FederationV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FederationV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1/generated_expansion.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1/generated_expansion.go deleted file mode 100644 index fca6b514936a..000000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1/generated_expansion.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -type ClusterExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/api/BUILD b/vendor/k8s.io/kubernetes/pkg/api/BUILD deleted file mode 100644 index 40261d53aec1..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/BUILD +++ /dev/null @@ -1,116 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "annotation_key_constants.go", - "doc.go", - "field_constants.go", - "json.go", - "objectreference.go", - "register.go", - "resource.go", - "taint.go", - "toleration.go", - "types.go", - "zz_generated.deepcopy.go", - ], - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["taint_test.go"], - library = ":go_default_library", -) - -go_test( - name = "go_default_xtest", - srcs = [ - "conversion_test.go", - "copy_test.go", - "deep_copy_test.go", - "defaulting_test.go", - "meta_test.go", - "serialization_proto_test.go", - "serialization_test.go", - "unstructured_test.go", - ], - deps = [ - ":go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/api/testing:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/apis/extensions/v1beta1:go_default_library", - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/google/gofuzz:go_default_library", - "//vendor/github.com/json-iterator/go:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/testing/fuzzer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/testing/roundtrip:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion/unstructured:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/api/endpoints:all-srcs", - "//pkg/api/events:all-srcs", - "//pkg/api/fuzzer:all-srcs", - "//pkg/api/helper:all-srcs", - "//pkg/api/install:all-srcs", - "//pkg/api/persistentvolume:all-srcs", - "//pkg/api/pod:all-srcs", - "//pkg/api/ref:all-srcs", - "//pkg/api/resource:all-srcs", - "//pkg/api/service:all-srcs", - "//pkg/api/testapi:all-srcs", - "//pkg/api/testing:all-srcs", - "//pkg/api/unversioned:all-srcs", - "//pkg/api/util:all-srcs", - "//pkg/api/v1:all-srcs", - "//pkg/api/validation:all-srcs", - ], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/doc.go b/vendor/k8s.io/kubernetes/pkg/api/doc.go deleted file mode 100644 index 1507a8823b4c..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register - -// Package api contains the latest (or "internal") version of the -// Kubernetes API objects. This is the API objects as represented in memory. -// The contract presented to clients is located in the versioned packages, -// which are sub-directories. The first one is "v1". Those packages -// describe how a particular version is serialized to storage/network. -package api diff --git a/vendor/k8s.io/kubernetes/pkg/api/endpoints/BUILD b/vendor/k8s.io/kubernetes/pkg/api/endpoints/BUILD index cbe2c30730b2..07e3aeef187a 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/endpoints/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/api/endpoints/BUILD @@ -9,8 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["util.go"], + importpath = "k8s.io/kubernetes/pkg/api/endpoints", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/util/hash:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", ], @@ -19,9 +20,10 @@ go_library( go_test( name = "go_default_test", srcs = ["util_test.go"], + importpath = "k8s.io/kubernetes/pkg/api/endpoints", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go b/vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go index 37084d64973a..49bdbc47a195 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go +++ b/vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go @@ -24,7 +24,7 @@ import ( "sort" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" hashutil "k8s.io/kubernetes/pkg/util/hash" ) diff --git a/vendor/k8s.io/kubernetes/pkg/api/events/BUILD b/vendor/k8s.io/kubernetes/pkg/api/events/BUILD index 1bb10b03c89c..96becd0b701e 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/events/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/api/events/BUILD @@ -9,15 +9,17 @@ load( go_library( name = "go_default_library", srcs = ["sorted_event_list.go"], - deps = ["//pkg/api:go_default_library"], + importpath = "k8s.io/kubernetes/pkg/api/events", + deps = ["//pkg/apis/core:go_default_library"], ) go_test( name = "go_default_test", srcs = ["sorted_event_list_test.go"], + importpath = "k8s.io/kubernetes/pkg/api/events", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/api/events/sorted_event_list.go b/vendor/k8s.io/kubernetes/pkg/api/events/sorted_event_list.go index b96c615debaf..9976c10ce734 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/events/sorted_event_list.go +++ b/vendor/k8s.io/kubernetes/pkg/api/events/sorted_event_list.go @@ -17,7 +17,7 @@ limitations under the License. package events import ( - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // SortableEvents implements sort.Interface for []api.Event based on the Timestamp field diff --git a/vendor/k8s.io/kubernetes/pkg/api/helper/BUILD b/vendor/k8s.io/kubernetes/pkg/api/helper/BUILD deleted file mode 100644 index 384d2d096097..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/helper/BUILD +++ /dev/null @@ -1,49 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = ["helpers_test.go"], - library = ":go_default_library", - deps = [ - "//pkg/api:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - ], -) - -go_library( - name = "go_default_library", - srcs = ["helpers.go"], - deps = [ - "//pkg/api:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/selection:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/api/helper/qos:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/helper/helpers.go b/vendor/k8s.io/kubernetes/pkg/api/helper/helpers.go deleted file mode 100644 index 636d1dda0ea5..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/helper/helpers.go +++ /dev/null @@ -1,650 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helper - -import ( - "encoding/json" - "fmt" - "strings" - - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" -) - -// IsHugePageResourceName returns true if the resource name has the huge page -// resource prefix. -func IsHugePageResourceName(name api.ResourceName) bool { - return strings.HasPrefix(string(name), api.ResourceHugePagesPrefix) -} - -// HugePageResourceName returns a ResourceName with the canonical hugepage -// prefix prepended for the specified page size. The page size is converted -// to its canonical representation. -func HugePageResourceName(pageSize resource.Quantity) api.ResourceName { - return api.ResourceName(fmt.Sprintf("%s%s", api.ResourceHugePagesPrefix, pageSize.String())) -} - -// HugePageSizeFromResourceName returns the page size for the specified huge page -// resource name. If the specified input is not a valid huge page resource name -// an error is returned. -func HugePageSizeFromResourceName(name api.ResourceName) (resource.Quantity, error) { - if !IsHugePageResourceName(name) { - return resource.Quantity{}, fmt.Errorf("resource name: %s is not valid hugepage name", name) - } - pageSize := strings.TrimPrefix(string(name), api.ResourceHugePagesPrefix) - return resource.ParseQuantity(pageSize) -} - -// NonConvertibleFields iterates over the provided map and filters out all but -// any keys with the "non-convertible.kubernetes.io" prefix. -func NonConvertibleFields(annotations map[string]string) map[string]string { - nonConvertibleKeys := map[string]string{} - for key, value := range annotations { - if strings.HasPrefix(key, api.NonConvertibleAnnotationPrefix) { - nonConvertibleKeys[key] = value - } - } - return nonConvertibleKeys -} - -// Semantic can do semantic deep equality checks for api objects. -// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true -var Semantic = conversion.EqualitiesOrDie( - func(a, b resource.Quantity) bool { - // Ignore formatting, only care that numeric value stayed the same. - // TODO: if we decide it's important, it should be safe to start comparing the format. - // - // Uninitialized quantities are equivalent to 0 quantities. - return a.Cmp(b) == 0 - }, - func(a, b metav1.MicroTime) bool { - return a.UTC() == b.UTC() - }, - func(a, b metav1.Time) bool { - return a.UTC() == b.UTC() - }, - func(a, b labels.Selector) bool { - return a.String() == b.String() - }, - func(a, b fields.Selector) bool { - return a.String() == b.String() - }, -) - -var standardResourceQuotaScopes = sets.NewString( - string(api.ResourceQuotaScopeTerminating), - string(api.ResourceQuotaScopeNotTerminating), - string(api.ResourceQuotaScopeBestEffort), - string(api.ResourceQuotaScopeNotBestEffort), -) - -// IsStandardResourceQuotaScope returns true if the scope is a standard value -func IsStandardResourceQuotaScope(str string) bool { - return standardResourceQuotaScopes.Has(str) -} - -var podObjectCountQuotaResources = sets.NewString( - string(api.ResourcePods), -) - -var podComputeQuotaResources = sets.NewString( - string(api.ResourceCPU), - string(api.ResourceMemory), - string(api.ResourceLimitsCPU), - string(api.ResourceLimitsMemory), - string(api.ResourceRequestsCPU), - string(api.ResourceRequestsMemory), -) - -// IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope -func IsResourceQuotaScopeValidForResource(scope api.ResourceQuotaScope, resource string) bool { - switch scope { - case api.ResourceQuotaScopeTerminating, api.ResourceQuotaScopeNotTerminating, api.ResourceQuotaScopeNotBestEffort: - return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource) - case api.ResourceQuotaScopeBestEffort: - return podObjectCountQuotaResources.Has(resource) - default: - return true - } -} - -var standardContainerResources = sets.NewString( - string(api.ResourceCPU), - string(api.ResourceMemory), - string(api.ResourceEphemeralStorage), -) - -// IsStandardContainerResourceName returns true if the container can make a resource request -// for the specified resource -func IsStandardContainerResourceName(str string) bool { - return standardContainerResources.Has(str) || IsHugePageResourceName(api.ResourceName(str)) -} - -// IsExtendedResourceName returns true if the resource name is not in the -// default namespace, or it has the opaque integer resource prefix. -func IsExtendedResourceName(name api.ResourceName) bool { - // TODO: Remove OIR part following deprecation. - return !IsDefaultNamespaceResource(name) || IsOpaqueIntResourceName(name) -} - -// IsDefaultNamespaceResource returns true if the resource name is in the -// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are -// implicitly in the kubernetes.io/ namespace. -func IsDefaultNamespaceResource(name api.ResourceName) bool { - return !strings.Contains(string(name), "/") || - strings.Contains(string(name), api.ResourceDefaultNamespacePrefix) -} - -// IsOpaqueIntResourceName returns true if the resource name has the opaque -// integer resource prefix. -func IsOpaqueIntResourceName(name api.ResourceName) bool { - return strings.HasPrefix(string(name), api.ResourceOpaqueIntPrefix) -} - -// OpaqueIntResourceName returns a ResourceName with the canonical opaque -// integer prefix prepended. If the argument already has the prefix, it is -// returned unmodified. -func OpaqueIntResourceName(name string) api.ResourceName { - if IsOpaqueIntResourceName(api.ResourceName(name)) { - return api.ResourceName(name) - } - return api.ResourceName(fmt.Sprintf("%s%s", api.ResourceOpaqueIntPrefix, name)) -} - -var overcommitBlacklist = sets.NewString(string(api.ResourceNvidiaGPU)) - -// IsOvercommitAllowed returns true if the resource is in the default -// namespace and not blacklisted. -func IsOvercommitAllowed(name api.ResourceName) bool { - return IsDefaultNamespaceResource(name) && - !IsHugePageResourceName(name) && - !overcommitBlacklist.Has(string(name)) -} - -var standardLimitRangeTypes = sets.NewString( - string(api.LimitTypePod), - string(api.LimitTypeContainer), - string(api.LimitTypePersistentVolumeClaim), -) - -// IsStandardLimitRangeType returns true if the type is Pod or Container -func IsStandardLimitRangeType(str string) bool { - return standardLimitRangeTypes.Has(str) -} - -var standardQuotaResources = sets.NewString( - string(api.ResourceCPU), - string(api.ResourceMemory), - string(api.ResourceEphemeralStorage), - string(api.ResourceRequestsCPU), - string(api.ResourceRequestsMemory), - string(api.ResourceRequestsStorage), - string(api.ResourceRequestsEphemeralStorage), - string(api.ResourceLimitsCPU), - string(api.ResourceLimitsMemory), - string(api.ResourceLimitsEphemeralStorage), - string(api.ResourcePods), - string(api.ResourceQuotas), - string(api.ResourceServices), - string(api.ResourceReplicationControllers), - string(api.ResourceSecrets), - string(api.ResourcePersistentVolumeClaims), - string(api.ResourceConfigMaps), - string(api.ResourceServicesNodePorts), - string(api.ResourceServicesLoadBalancers), -) - -// IsStandardQuotaResourceName returns true if the resource is known to -// the quota tracking system -func IsStandardQuotaResourceName(str string) bool { - return standardQuotaResources.Has(str) -} - -var standardResources = sets.NewString( - string(api.ResourceCPU), - string(api.ResourceMemory), - string(api.ResourceEphemeralStorage), - string(api.ResourceRequestsCPU), - string(api.ResourceRequestsMemory), - string(api.ResourceRequestsEphemeralStorage), - string(api.ResourceLimitsCPU), - string(api.ResourceLimitsMemory), - string(api.ResourceLimitsEphemeralStorage), - string(api.ResourcePods), - string(api.ResourceQuotas), - string(api.ResourceServices), - string(api.ResourceReplicationControllers), - string(api.ResourceSecrets), - string(api.ResourceConfigMaps), - string(api.ResourcePersistentVolumeClaims), - string(api.ResourceStorage), - string(api.ResourceRequestsStorage), - string(api.ResourceServicesNodePorts), - string(api.ResourceServicesLoadBalancers), -) - -// IsStandardResourceName returns true if the resource is known to the system -func IsStandardResourceName(str string) bool { - return standardResources.Has(str) || IsHugePageResourceName(api.ResourceName(str)) -} - -var integerResources = sets.NewString( - string(api.ResourcePods), - string(api.ResourceQuotas), - string(api.ResourceServices), - string(api.ResourceReplicationControllers), - string(api.ResourceSecrets), - string(api.ResourceConfigMaps), - string(api.ResourcePersistentVolumeClaims), - string(api.ResourceServicesNodePorts), - string(api.ResourceServicesLoadBalancers), -) - -// IsIntegerResourceName returns true if the resource is measured in integer values -func IsIntegerResourceName(str string) bool { - return integerResources.Has(str) || IsExtendedResourceName(api.ResourceName(str)) -} - -// this function aims to check if the service's ClusterIP is set or not -// the objective is not to perform validation here -func IsServiceIPSet(service *api.Service) bool { - return service.Spec.ClusterIP != api.ClusterIPNone && service.Spec.ClusterIP != "" -} - -// this function aims to check if the service's cluster IP is requested or not -func IsServiceIPRequested(service *api.Service) bool { - // ExternalName services are CNAME aliases to external ones. Ignore the IP. - if service.Spec.Type == api.ServiceTypeExternalName { - return false - } - return service.Spec.ClusterIP == "" -} - -var standardFinalizers = sets.NewString( - string(api.FinalizerKubernetes), - metav1.FinalizerOrphanDependents, - metav1.FinalizerDeleteDependents, -) - -// HasAnnotation returns a bool if passed in annotation exists -func HasAnnotation(obj api.ObjectMeta, ann string) bool { - _, found := obj.Annotations[ann] - return found -} - -// SetMetaDataAnnotation sets the annotation and value -func SetMetaDataAnnotation(obj *api.ObjectMeta, ann string, value string) { - if obj.Annotations == nil { - obj.Annotations = make(map[string]string) - } - obj.Annotations[ann] = value -} - -func IsStandardFinalizerName(str string) bool { - return standardFinalizers.Has(str) -} - -// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, -// only if they do not already exist -func AddToNodeAddresses(addresses *[]api.NodeAddress, addAddresses ...api.NodeAddress) { - for _, add := range addAddresses { - exists := false - for _, existing := range *addresses { - if existing.Address == add.Address && existing.Type == add.Type { - exists = true - break - } - } - if !exists { - *addresses = append(*addresses, add) - } - } -} - -// TODO: make method on LoadBalancerStatus? -func LoadBalancerStatusEqual(l, r *api.LoadBalancerStatus) bool { - return ingressSliceEqual(l.Ingress, r.Ingress) -} - -func ingressSliceEqual(lhs, rhs []api.LoadBalancerIngress) bool { - if len(lhs) != len(rhs) { - return false - } - for i := range lhs { - if !ingressEqual(&lhs[i], &rhs[i]) { - return false - } - } - return true -} - -func ingressEqual(lhs, rhs *api.LoadBalancerIngress) bool { - if lhs.IP != rhs.IP { - return false - } - if lhs.Hostname != rhs.Hostname { - return false - } - return true -} - -// TODO: make method on LoadBalancerStatus? -func LoadBalancerStatusDeepCopy(lb *api.LoadBalancerStatus) *api.LoadBalancerStatus { - c := &api.LoadBalancerStatus{} - c.Ingress = make([]api.LoadBalancerIngress, len(lb.Ingress)) - for i := range lb.Ingress { - c.Ingress[i] = lb.Ingress[i] - } - return c -} - -// GetAccessModesAsString returns a string representation of an array of access modes. -// modes, when present, are always in the same order: RWO,ROX,RWX. -func GetAccessModesAsString(modes []api.PersistentVolumeAccessMode) string { - modes = removeDuplicateAccessModes(modes) - modesStr := []string{} - if containsAccessMode(modes, api.ReadWriteOnce) { - modesStr = append(modesStr, "RWO") - } - if containsAccessMode(modes, api.ReadOnlyMany) { - modesStr = append(modesStr, "ROX") - } - if containsAccessMode(modes, api.ReadWriteMany) { - modesStr = append(modesStr, "RWX") - } - return strings.Join(modesStr, ",") -} - -// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString -func GetAccessModesFromString(modes string) []api.PersistentVolumeAccessMode { - strmodes := strings.Split(modes, ",") - accessModes := []api.PersistentVolumeAccessMode{} - for _, s := range strmodes { - s = strings.Trim(s, " ") - switch { - case s == "RWO": - accessModes = append(accessModes, api.ReadWriteOnce) - case s == "ROX": - accessModes = append(accessModes, api.ReadOnlyMany) - case s == "RWX": - accessModes = append(accessModes, api.ReadWriteMany) - } - } - return accessModes -} - -// removeDuplicateAccessModes returns an array of access modes without any duplicates -func removeDuplicateAccessModes(modes []api.PersistentVolumeAccessMode) []api.PersistentVolumeAccessMode { - accessModes := []api.PersistentVolumeAccessMode{} - for _, m := range modes { - if !containsAccessMode(accessModes, m) { - accessModes = append(accessModes, m) - } - } - return accessModes -} - -func containsAccessMode(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool { - for _, m := range modes { - if m == mode { - return true - } - } - return false -} - -// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements -// labels.Selector. -func NodeSelectorRequirementsAsSelector(nsm []api.NodeSelectorRequirement) (labels.Selector, error) { - if len(nsm) == 0 { - return labels.Nothing(), nil - } - selector := labels.NewSelector() - for _, expr := range nsm { - var op selection.Operator - switch expr.Operator { - case api.NodeSelectorOpIn: - op = selection.In - case api.NodeSelectorOpNotIn: - op = selection.NotIn - case api.NodeSelectorOpExists: - op = selection.Exists - case api.NodeSelectorOpDoesNotExist: - op = selection.DoesNotExist - case api.NodeSelectorOpGt: - op = selection.GreaterThan - case api.NodeSelectorOpLt: - op = selection.LessThan - default: - return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) - } - r, err := labels.NewRequirement(expr.Key, op, expr.Values) - if err != nil { - return nil, err - } - selector = selector.Add(*r) - } - return selector, nil -} - -// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations -// and converts it to the []Toleration type in api. -func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]api.Toleration, error) { - var tolerations []api.Toleration - if len(annotations) > 0 && annotations[api.TolerationsAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[api.TolerationsAnnotationKey]), &tolerations) - if err != nil { - return tolerations, err - } - } - return tolerations, nil -} - -// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. -// Returns true if something was updated, false otherwise. -func AddOrUpdateTolerationInPod(pod *api.Pod, toleration *api.Toleration) bool { - podTolerations := pod.Spec.Tolerations - - var newTolerations []api.Toleration - updated := false - for i := range podTolerations { - if toleration.MatchToleration(&podTolerations[i]) { - if Semantic.DeepEqual(toleration, podTolerations[i]) { - return false - } - newTolerations = append(newTolerations, *toleration) - updated = true - continue - } - - newTolerations = append(newTolerations, podTolerations[i]) - } - - if !updated { - newTolerations = append(newTolerations, *toleration) - } - - pod.Spec.Tolerations = newTolerations - return true -} - -// TolerationToleratesTaint checks if the toleration tolerates the taint. -func TolerationToleratesTaint(toleration *api.Toleration, taint *api.Taint) bool { - if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect { - return false - } - - if toleration.Key != taint.Key { - return false - } - // TODO: Use proper defaulting when Toleration becomes a field of PodSpec - if (len(toleration.Operator) == 0 || toleration.Operator == api.TolerationOpEqual) && toleration.Value == taint.Value { - return true - } - if toleration.Operator == api.TolerationOpExists { - return true - } - return false -} - -// TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations. -func TaintToleratedByTolerations(taint *api.Taint, tolerations []api.Toleration) bool { - tolerated := false - for i := range tolerations { - if TolerationToleratesTaint(&tolerations[i], taint) { - tolerated = true - break - } - } - return tolerated -} - -// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations -// and converts it to the []Taint type in api. -func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]api.Taint, error) { - var taints []api.Taint - if len(annotations) > 0 && annotations[api.TaintsAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[api.TaintsAnnotationKey]), &taints) - if err != nil { - return []api.Taint{}, err - } - } - return taints, nil -} - -// SysctlsFromPodAnnotations parses the sysctl annotations into a slice of safe Sysctls -// and a slice of unsafe Sysctls. This is only a convenience wrapper around -// SysctlsFromPodAnnotation. -func SysctlsFromPodAnnotations(a map[string]string) ([]api.Sysctl, []api.Sysctl, error) { - safe, err := SysctlsFromPodAnnotation(a[api.SysctlsPodAnnotationKey]) - if err != nil { - return nil, nil, err - } - unsafe, err := SysctlsFromPodAnnotation(a[api.UnsafeSysctlsPodAnnotationKey]) - if err != nil { - return nil, nil, err - } - - return safe, unsafe, nil -} - -// SysctlsFromPodAnnotation parses an annotation value into a slice of Sysctls. -func SysctlsFromPodAnnotation(annotation string) ([]api.Sysctl, error) { - if len(annotation) == 0 { - return nil, nil - } - - kvs := strings.Split(annotation, ",") - sysctls := make([]api.Sysctl, len(kvs)) - for i, kv := range kvs { - cs := strings.Split(kv, "=") - if len(cs) != 2 || len(cs[0]) == 0 { - return nil, fmt.Errorf("sysctl %q not of the format sysctl_name=value", kv) - } - sysctls[i].Name = cs[0] - sysctls[i].Value = cs[1] - } - return sysctls, nil -} - -// PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls. -func PodAnnotationsFromSysctls(sysctls []api.Sysctl) string { - if len(sysctls) == 0 { - return "" - } - - kvs := make([]string, len(sysctls)) - for i := range sysctls { - kvs[i] = fmt.Sprintf("%s=%s", sysctls[i].Name, sysctls[i].Value) - } - return strings.Join(kvs, ",") -} - -// GetPersistentVolumeClass returns StorageClassName. -func GetPersistentVolumeClass(volume *api.PersistentVolume) string { - // Use beta annotation first - if class, found := volume.Annotations[api.BetaStorageClassAnnotation]; found { - return class - } - - return volume.Spec.StorageClassName -} - -// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was -// requested, it returns "". -func GetPersistentVolumeClaimClass(claim *api.PersistentVolumeClaim) string { - // Use beta annotation first - if class, found := claim.Annotations[api.BetaStorageClassAnnotation]; found { - return class - } - - if claim.Spec.StorageClassName != nil { - return *claim.Spec.StorageClassName - } - - return "" -} - -// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. -func PersistentVolumeClaimHasClass(claim *api.PersistentVolumeClaim) bool { - // Use beta annotation first - if _, found := claim.Annotations[api.BetaStorageClassAnnotation]; found { - return true - } - - if claim.Spec.StorageClassName != nil { - return true - } - - return false -} - -// GetStorageNodeAffinityFromAnnotation gets the json serialized data from PersistentVolume.Annotations -// and converts it to the NodeAffinity type in api. -// TODO: update when storage node affinity graduates to beta -func GetStorageNodeAffinityFromAnnotation(annotations map[string]string) (*api.NodeAffinity, error) { - if len(annotations) > 0 && annotations[api.AlphaStorageNodeAffinityAnnotation] != "" { - var affinity api.NodeAffinity - err := json.Unmarshal([]byte(annotations[api.AlphaStorageNodeAffinityAnnotation]), &affinity) - if err != nil { - return nil, err - } - return &affinity, nil - } - return nil, nil -} - -// Converts NodeAffinity type to Alpha annotation for use in PersistentVolumes -// TODO: update when storage node affinity graduates to beta -func StorageNodeAffinityToAlphaAnnotation(annotations map[string]string, affinity *api.NodeAffinity) error { - if affinity == nil { - return nil - } - - json, err := json.Marshal(*affinity) - if err != nil { - return err - } - annotations[api.AlphaStorageNodeAffinityAnnotation] = string(json) - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/helper/qos/BUILD b/vendor/k8s.io/kubernetes/pkg/api/helper/qos/BUILD deleted file mode 100644 index 1b2f763a17e1..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/helper/qos/BUILD +++ /dev/null @@ -1,30 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["qos.go"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/helper/qos/qos.go b/vendor/k8s.io/kubernetes/pkg/api/helper/qos/qos.go deleted file mode 100644 index cc58bde0c317..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/helper/qos/qos.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// NOTE: DO NOT use those helper functions through client-go, the -// package path will be changed in the future. -package qos - -import ( - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" -) - -func isSupportedQoSComputeResource(name api.ResourceName) bool { - supportedQoSComputeResources := sets.NewString(string(api.ResourceCPU), string(api.ResourceMemory)) - return supportedQoSComputeResources.Has(string(name)) || helper.IsHugePageResourceName(name) -} - -// GetPodQOS returns the QoS class of a pod. -// A pod is besteffort if none of its containers have specified any requests or limits. -// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. -// A pod is burstable if limits and requests do not match across all containers. -func GetPodQOS(pod *api.Pod) api.PodQOSClass { - requests := api.ResourceList{} - limits := api.ResourceList{} - zeroQuantity := resource.MustParse("0") - isGuaranteed := true - for _, container := range pod.Spec.Containers { - // process requests - for name, quantity := range container.Resources.Requests { - if !isSupportedQoSComputeResource(name) { - continue - } - if quantity.Cmp(zeroQuantity) == 1 { - delta := quantity.Copy() - if _, exists := requests[name]; !exists { - requests[name] = *delta - } else { - delta.Add(requests[name]) - requests[name] = *delta - } - } - } - // process limits - qosLimitsFound := sets.NewString() - for name, quantity := range container.Resources.Limits { - if !isSupportedQoSComputeResource(name) { - continue - } - if quantity.Cmp(zeroQuantity) == 1 { - qosLimitsFound.Insert(string(name)) - delta := quantity.Copy() - if _, exists := limits[name]; !exists { - limits[name] = *delta - } else { - delta.Add(limits[name]) - limits[name] = *delta - } - } - } - - if !qosLimitsFound.HasAll(string(api.ResourceMemory), string(api.ResourceCPU)) { - isGuaranteed = false - } - } - if len(requests) == 0 && len(limits) == 0 { - return api.PodQOSBestEffort - } - // Check is requests match limits for all resources. - if isGuaranteed { - for name, req := range requests { - if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 { - isGuaranteed = false - break - } - } - } - if isGuaranteed && - len(requests) == len(limits) { - return api.PodQOSGuaranteed - } - return api.PodQOSBurstable -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/install/BUILD b/vendor/k8s.io/kubernetes/pkg/api/install/BUILD deleted file mode 100644 index ad64a56ce606..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/install/BUILD +++ /dev/null @@ -1,46 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = ["install.go"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["install_test.go"], - library = ":go_default_library", - deps = [ - "//pkg/api:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/install/install.go b/vendor/k8s.io/kubernetes/pkg/api/install/install.go deleted file mode 100644 index cd3879ccb6ad..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/install/install.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the v1 monolithic api, making it available as an -// option to all of the API encoding/decoding machinery. -package install - -import ( - "k8s.io/apimachinery/pkg/apimachinery/announced" - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1" -) - -func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) -} - -// Install registers the API group and adds types to a scheme -func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { - if err := announced.NewGroupMetaFactory( - &announced.GroupMetaFactoryArgs{ - GroupName: api.GroupName, - VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version}, - AddInternalObjectsToScheme: api.AddToScheme, - RootScopedKinds: sets.NewString( - "Node", - "Namespace", - "PersistentVolume", - "ComponentStatus", - ), - IgnoredKinds: sets.NewString( - "ListOptions", - "DeleteOptions", - "Status", - "PodLogOptions", - "PodExecOptions", - "PodAttachOptions", - "PodPortForwardOptions", - "PodProxyOptions", - "NodeProxyOptions", - "ServiceProxyOptions", - "ThirdPartyResource", - "ThirdPartyResourceData", - "ThirdPartyResourceList", - ), - }, - announced.VersionToSchemeFunc{ - v1.SchemeGroupVersion.Version: v1.AddToScheme, - }, - ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { - panic(err) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/legacyscheme/BUILD b/vendor/k8s.io/kubernetes/pkg/api/legacyscheme/BUILD new file mode 100644 index 000000000000..42136a953426 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/legacyscheme/BUILD @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["scheme.go"], + importpath = "k8s.io/kubernetes/pkg/api/legacyscheme", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/api/legacyscheme/scheme.go b/vendor/k8s.io/kubernetes/pkg/api/legacyscheme/scheme.go new file mode 100644 index 000000000000..64c63a3606e3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/legacyscheme/scheme.go @@ -0,0 +1,46 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package legacyscheme + +import ( + "os" + + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// GroupFactoryRegistry is the APIGroupFactoryRegistry (overlaps a bit with Registry, see comments in package for details) +var GroupFactoryRegistry = make(announced.APIGroupFactoryRegistry) + +// Registry is an instance of an API registry. This is an interim step to start removing the idea of a global +// API registry. +var Registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS")) + +// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. +// NOTE: If you are copying this file to start a new api group, STOP! Copy the +// extensions group instead. This Scheme is special and should appear ONLY in +// the api group, unless you really know what you're doing. +// TODO(lavalamp): make the above error impossible. +var Scheme = runtime.NewScheme() + +// Codecs provides access to encoding and decoding for the scheme +var Codecs = serializer.NewCodecFactory(Scheme) + +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(Scheme) diff --git a/vendor/k8s.io/kubernetes/pkg/api/node_example.json b/vendor/k8s.io/kubernetes/pkg/api/node_example.json deleted file mode 100644 index ea249cb78541..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/node_example.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "kind": "Node", - "apiVersion": "v1", - "metadata": { - "name": "e2e-test-wojtekt-node-etd6", - "selfLink": "/api/v1/nodes/e2e-test-wojtekt-node-etd6", - "uid": "a7e89222-e8e5-11e4-8fde-42010af09327", - "resourceVersion": "379", - "creationTimestamp": "2015-04-22T11:49:39Z" - }, - "spec": { - "externalID": "15488322946290398375" - }, - "status": { - "capacity": { - "cpu": "1", - "memory": "1745152Ki" - }, - "conditions": [ - { - "type": "Ready", - "status": "True", - "lastHeartbeatTime": "2015-04-22T11:58:17Z", - "lastTransitionTime": "2015-04-22T11:49:52Z", - "reason": "kubelet is posting ready status" - } - ], - "addresses": [ - { - "type": "ExternalIP", - "address": "104.197.49.213" - }, - { - "type": "LegacyHostIP", - "address": "104.197.20.11" - } - ], - "nodeInfo": { - "machineID": "", - "systemUUID": "D59FA3FA-7B5B-7287-5E1A-1D79F13CB577", - "bootID": "44a832f3-8cfb-4de5-b7d2-d66030b6cd95", - "kernelVersion": "3.16.0-0.bpo.4-amd64", - "osImage": "Debian GNU/Linux 7 (wheezy)", - "containerRuntimeVersion": "docker://1.5.0", - "kubeletVersion": "v0.15.0-484-g0c8ee980d705a3-dirty", - "kubeProxyVersion": "v0.15.0-484-g0c8ee980d705a3-dirty" - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/persistentvolume/BUILD b/vendor/k8s.io/kubernetes/pkg/api/persistentvolume/BUILD index 26471bdb6db2..f65e81e9f27b 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/persistentvolume/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/api/persistentvolume/BUILD @@ -9,7 +9,12 @@ load( go_library( name = "go_default_library", srcs = ["util.go"], - deps = ["//pkg/api:go_default_library"], + importpath = "k8s.io/kubernetes/pkg/api/persistentvolume", + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/features:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + ], ) filegroup( @@ -28,10 +33,13 @@ filegroup( go_test( name = "go_default_test", srcs = ["util_test.go"], + importpath = "k8s.io/kubernetes/pkg/api/persistentvolume", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/features:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/api/persistentvolume/util.go b/vendor/k8s.io/kubernetes/pkg/api/persistentvolume/util.go index 0a8de7c5e68f..fe6dc9b2c6f7 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/persistentvolume/util.go +++ b/vendor/k8s.io/kubernetes/pkg/api/persistentvolume/util.go @@ -17,7 +17,9 @@ limitations under the License. package persistentvolume import ( - "k8s.io/kubernetes/pkg/api" + utilfeature "k8s.io/apiserver/pkg/util/feature" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/features" ) func getClaimRefNamespace(pv *api.PersistentVolume) string { @@ -64,16 +66,38 @@ func VisitPVSecretNames(pv *api.PersistentVolume, visitor Visitor) bool { return false } case source.RBD != nil: - if source.RBD.SecretRef != nil && !visitor(getClaimRefNamespace(pv), source.RBD.SecretRef.Name) { - return false + if source.RBD.SecretRef != nil { + // previously persisted PV objects use claimRef namespace + ns := getClaimRefNamespace(pv) + if len(source.RBD.SecretRef.Namespace) > 0 { + // use the secret namespace if namespace is set + ns = source.RBD.SecretRef.Namespace + } + if !visitor(ns, source.RBD.SecretRef.Name) { + return false + } } case source.ScaleIO != nil: - if source.ScaleIO.SecretRef != nil && !visitor(getClaimRefNamespace(pv), source.ScaleIO.SecretRef.Name) { - return false + if source.ScaleIO.SecretRef != nil { + ns := getClaimRefNamespace(pv) + if source.ScaleIO.SecretRef != nil && len(source.ScaleIO.SecretRef.Namespace) > 0 { + ns = source.ScaleIO.SecretRef.Namespace + } + if !visitor(ns, source.ScaleIO.SecretRef.Name) { + return false + } } case source.ISCSI != nil: - if source.ISCSI.SecretRef != nil && !visitor(getClaimRefNamespace(pv), source.ISCSI.SecretRef.Name) { - return false + if source.ISCSI.SecretRef != nil { + // previously persisted PV objects use claimRef namespace + ns := getClaimRefNamespace(pv) + if len(source.ISCSI.SecretRef.Namespace) > 0 { + // use the secret namespace if namespace is set + ns = source.ISCSI.SecretRef.Namespace + } + if !visitor(ns, source.ISCSI.SecretRef.Name) { + return false + } } case source.StorageOS != nil: if source.StorageOS.SecretRef != nil && !visitor(source.StorageOS.SecretRef.Namespace, source.StorageOS.SecretRef.Name) { @@ -82,3 +106,11 @@ func VisitPVSecretNames(pv *api.PersistentVolume, visitor Visitor) bool { } return true } + +// DropDisabledAlphaFields removes disabled fields from the pv spec. +// This should be called from PrepareForCreate/PrepareForUpdate for all resources containing a pv spec. +func DropDisabledAlphaFields(pvSpec *api.PersistentVolumeSpec) { + if !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + pvSpec.VolumeMode = nil + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/persistentvolumeclaim/BUILD b/vendor/k8s.io/kubernetes/pkg/api/persistentvolumeclaim/BUILD new file mode 100644 index 000000000000..6c1cea847cd0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/persistentvolumeclaim/BUILD @@ -0,0 +1,43 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = ["util.go"], + importpath = "k8s.io/kubernetes/pkg/api/persistentvolumeclaim", + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/features:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +go_test( + name = "go_default_test", + srcs = ["util_test.go"], + importpath = "k8s.io/kubernetes/pkg/api/persistentvolumeclaim", + library = ":go_default_library", + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/features:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/api/persistentvolumeclaim/OWNERS b/vendor/k8s.io/kubernetes/pkg/api/persistentvolumeclaim/OWNERS new file mode 100755 index 000000000000..8575aa9767a5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/persistentvolumeclaim/OWNERS @@ -0,0 +1,4 @@ +reviewers: +- smarterclayton +- jsafrane +- david-mcmahon diff --git a/vendor/k8s.io/kubernetes/pkg/api/persistentvolumeclaim/util.go b/vendor/k8s.io/kubernetes/pkg/api/persistentvolumeclaim/util.go new file mode 100644 index 000000000000..a6321d3404fb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/persistentvolumeclaim/util.go @@ -0,0 +1,31 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolumeclaim + +import ( + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/features" +) + +// DropDisabledAlphaFields removes disabled fields from the pvc spec. +// This should be called from PrepareForCreate/PrepareForUpdate for all resources containing a pvc spec. +func DropDisabledAlphaFields(pvcSpec *core.PersistentVolumeClaimSpec) { + if !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + pvcSpec.VolumeMode = nil + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD b/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD index 71c3c3db22d3..7b189d02effd 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD @@ -9,8 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["util.go"], + importpath = "k8s.io/kubernetes/pkg/api/pod", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/features:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", @@ -33,10 +34,13 @@ filegroup( go_test( name = "go_default_test", srcs = ["util_test.go"], + importpath = "k8s.io/kubernetes/pkg/api/pod", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/features:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/api/pod/util.go b/vendor/k8s.io/kubernetes/pkg/api/pod/util.go index 7c8afdf46a88..3cb7d69f399b 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/pod/util.go +++ b/vendor/k8s.io/kubernetes/pkg/api/pod/util.go @@ -19,7 +19,7 @@ package pod import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/features" ) @@ -243,12 +243,15 @@ func DropDisabledAlphaFields(podSpec *api.PodSpec) { } } } + for i := range podSpec.Containers { DropDisabledVolumeMountsAlphaFields(podSpec.Containers[i].VolumeMounts) } for i := range podSpec.InitContainers { DropDisabledVolumeMountsAlphaFields(podSpec.InitContainers[i].VolumeMounts) } + + DropDisabledVolumeDevicesAlphaFields(podSpec) } // DropDisabledVolumeMountsAlphaFields removes disabled fields from []VolumeMount. @@ -260,3 +263,16 @@ func DropDisabledVolumeMountsAlphaFields(volumeMounts []api.VolumeMount) { } } } + +// DropDisabledVolumeDevicesAlphaFields removes disabled fields from []VolumeDevice. +// This should be called from PrepareForCreate/PrepareForUpdate for all resources containing a VolumeDevice +func DropDisabledVolumeDevicesAlphaFields(podSpec *api.PodSpec) { + if !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + for i := range podSpec.Containers { + podSpec.Containers[i].VolumeDevices = nil + } + for i := range podSpec.InitContainers { + podSpec.InitContainers[i].VolumeDevices = nil + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/ref/BUILD b/vendor/k8s.io/kubernetes/pkg/api/ref/BUILD index 531f1b65c674..f4d23d86f80c 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/ref/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/api/ref/BUILD @@ -9,9 +9,11 @@ load( go_test( name = "go_default_test", srcs = ["ref_test.go"], + importpath = "k8s.io/kubernetes/pkg/api/ref", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", @@ -21,8 +23,9 @@ go_test( go_library( name = "go_default_library", srcs = ["ref.go"], + importpath = "k8s.io/kubernetes/pkg/api/ref", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/api/ref/ref.go b/vendor/k8s.io/kubernetes/pkg/api/ref/ref.go index a60013a44791..d6576750ad11 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/ref/ref.go +++ b/vendor/k8s.io/kubernetes/pkg/api/ref/ref.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) var ( diff --git a/vendor/k8s.io/kubernetes/pkg/api/register.go b/vendor/k8s.io/kubernetes/pkg/api/register.go deleted file mode 100644 index 39562e5a566f..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/register.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "os" - - "k8s.io/apimachinery/pkg/apimachinery/announced" - "k8s.io/apimachinery/pkg/apimachinery/registered" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" -) - -// GroupFactoryRegistry is the APIGroupFactoryRegistry (overlaps a bit with Registry, see comments in package for details) -var GroupFactoryRegistry = make(announced.APIGroupFactoryRegistry) - -// Registry is an instance of an API registry. This is an interim step to start removing the idea of a global -// API registry. -var Registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS")) - -// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. -// NOTE: If you are copying this file to start a new api group, STOP! Copy the -// extensions group instead. This Scheme is special and should appear ONLY in -// the api group, unless you really know what you're doing. -// TODO(lavalamp): make the above error impossible. -var Scheme = runtime.NewScheme() - -// Codecs provides access to encoding and decoding for the scheme -var Codecs = serializer.NewCodecFactory(Scheme) - -// GroupName is the group name use in this package -const GroupName = "" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// ParameterCodec handles versioning of objects that are converted to query parameters. -var ParameterCodec = runtime.NewParameterCodec(Scheme) - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -func addKnownTypes(scheme *runtime.Scheme) error { - if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { - return err - } - scheme.AddKnownTypes(SchemeGroupVersion, - &Pod{}, - &PodList{}, - &PodStatusResult{}, - &PodTemplate{}, - &PodTemplateList{}, - &ReplicationControllerList{}, - &ReplicationController{}, - &ServiceList{}, - &Service{}, - &ServiceProxyOptions{}, - &NodeList{}, - &Node{}, - &NodeConfigSource{}, - &NodeProxyOptions{}, - &Endpoints{}, - &EndpointsList{}, - &Binding{}, - &Event{}, - &EventList{}, - &List{}, - &LimitRange{}, - &LimitRangeList{}, - &ResourceQuota{}, - &ResourceQuotaList{}, - &Namespace{}, - &NamespaceList{}, - &ServiceAccount{}, - &ServiceAccountList{}, - &Secret{}, - &SecretList{}, - &PersistentVolume{}, - &PersistentVolumeList{}, - &PersistentVolumeClaim{}, - &PersistentVolumeClaimList{}, - &PodAttachOptions{}, - &PodLogOptions{}, - &PodExecOptions{}, - &PodPortForwardOptions{}, - &PodProxyOptions{}, - &ComponentStatus{}, - &ComponentStatusList{}, - &SerializedReference{}, - &RangeAllocation{}, - &ConfigMap{}, - &ConfigMapList{}, - ) - - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/replication_controller_example.json b/vendor/k8s.io/kubernetes/pkg/api/replication_controller_example.json deleted file mode 100644 index 70eef1cff323..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/replication_controller_example.json +++ /dev/null @@ -1,83 +0,0 @@ -{ - "kind": "ReplicationController", - "apiVersion": "v1", - "metadata": { - "name": "elasticsearch-logging-controller", - "namespace": "default", - "selfLink": "/api/v1/namespaces/default/replicationcontrollers/elasticsearch-logging-controller", - "uid": "aa76f162-e8e5-11e4-8fde-42010af09327", - "resourceVersion": "98", - "creationTimestamp": "2015-04-22T11:49:43Z", - "labels": { - "kubernetes.io/cluster-service": "true", - "name": "elasticsearch-logging" - } - }, - "spec": { - "replicas": 1, - "selector": { - "name": "elasticsearch-logging" - }, - "template": { - "metadata": { - "creationTimestamp": null, - "labels": { - "kubernetes.io/cluster-service": "true", - "name": "elasticsearch-logging" - } - }, - "spec": { - "volumes": [ - { - "name": "es-persistent-storage", - "hostPath": null, - "emptyDir": { - "medium": "" - }, - "gcePersistentDisk": null, - "awsElasticBlockStore": null, - "gitRepo": null, - "secret": null, - "nfs": null, - "iscsi": null, - "glusterfs": null, - "quobyte": null - } - ], - "containers": [ - { - "name": "elasticsearch-logging", - "image": "gcr.io/google_containers/elasticsearch:1.0", - "ports": [ - { - "name": "db", - "containerPort": 9200, - "protocol": "TCP" - }, - { - "name": "transport", - "containerPort": 9300, - "protocol": "TCP" - } - ], - "resources": {}, - "volumeMounts": [ - { - "name": "es-persistent-storage", - "mountPath": "/data" - } - ], - "terminationMessagePath": "/dev/termination-log", - "imagePullPolicy": "IfNotPresent", - "capabilities": {} - } - ], - "restartPolicy": "Always", - "dnsPolicy": "ClusterFirst" - } - } - }, - "status": { - "replicas": 1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/BUILD b/vendor/k8s.io/kubernetes/pkg/api/resource/BUILD index f115af29e9b0..f31dbd9db260 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/resource/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/api/resource/BUILD @@ -9,8 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["helpers.go"], + importpath = "k8s.io/kubernetes/pkg/api/resource", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", ], ) @@ -31,9 +32,10 @@ filegroup( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + importpath = "k8s.io/kubernetes/pkg/api/resource", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/helpers.go b/vendor/k8s.io/kubernetes/pkg/api/resource/helpers.go index eecc26ed222a..78084393eb23 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/resource/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/api/resource/helpers.go @@ -22,7 +22,7 @@ import ( "strconv" "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // PodRequestsAndLimits returns a dictionary of all defined resources summed up for all diff --git a/vendor/k8s.io/kubernetes/pkg/api/service/BUILD b/vendor/k8s.io/kubernetes/pkg/api/service/BUILD index 795a97072966..735bf8d5a472 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/service/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/api/service/BUILD @@ -9,8 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["util.go"], + importpath = "k8s.io/kubernetes/pkg/api/service", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/util/net/sets:go_default_library", ], ) @@ -18,9 +19,10 @@ go_library( go_test( name = "go_default_test", srcs = ["util_test.go"], + importpath = "k8s.io/kubernetes/pkg/api/service", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/util/net/sets:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/api/service/util.go b/vendor/k8s.io/kubernetes/pkg/api/service/util.go index 242aab77f1f9..5de5f276574b 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/service/util.go +++ b/vendor/k8s.io/kubernetes/pkg/api/service/util.go @@ -18,10 +18,9 @@ package service import ( "fmt" - "strings" - - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" netsets "k8s.io/kubernetes/pkg/util/net/sets" + "strings" ) const ( diff --git a/vendor/k8s.io/kubernetes/pkg/api/types.go b/vendor/k8s.io/kubernetes/pkg/api/types.go deleted file mode 100644 index 92d45de61886..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/types.go +++ /dev/null @@ -1,4287 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "k8s.io/apimachinery/pkg/api/resource" - metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// Common string formats -// --------------------- -// Many fields in this API have formatting requirements. The commonly used -// formats are defined here. -// -// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier" -// in the C language. This is captured by the following regex: -// [A-Za-z_][A-Za-z0-9_]* -// This defines the format, but not the length restriction, which should be -// specified at the definition of any field of this type. -// -// DNS_LABEL: This is a string, no more than 63 characters long, that conforms -// to the definition of a "label" in RFCs 1035 and 1123. This is captured -// by the following regex: -// [a-z0-9]([-a-z0-9]*[a-z0-9])? -// -// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms -// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured -// by the following regex: -// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* -// or more simply: -// DNS_LABEL(\.DNS_LABEL)* -// -// IANA_SVC_NAME: This is a string, no more than 15 characters long, that -// conforms to the definition of IANA service name in RFC 6335. -// It must contains at least one letter [a-z] and it must contains only [a-z0-9-]. -// Hypens ('-') cannot be leading or trailing character of the string -// and cannot be adjacent to other hyphens. - -// ObjectMeta is metadata that all persisted resources must have, which includes all objects -// users must create. -// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. -type ObjectMeta struct { - // Name is unique within a namespace. Name is required when creating resources, although - // some resources may allow a client to request the generation of an appropriate name - // automatically. Name is primarily intended for creation idempotence and configuration - // definition. - // +optional - Name string - - // GenerateName indicates that the name should be made unique by the server prior to persisting - // it. A non-empty value for the field indicates the name will be made unique (and the name - // returned to the client will be different than the name passed). The value of this field will - // be combined with a unique suffix on the server if the Name field has not been provided. - // The provided value must be valid within the rules for Name, and may be truncated by the length - // of the suffix required to make the value unique on the server. - // - // If this field is specified, and Name is not present, the server will NOT return a 409 if the - // generated name exists - instead, it will either return 201 Created or 500 with Reason - // ServerTimeout indicating a unique name could not be found in the time allotted, and the client - // should retry (optionally after the time indicated in the Retry-After header). - // +optional - GenerateName string - - // Namespace defines the space within which name must be unique. An empty namespace is - // equivalent to the "default" namespace, but "default" is the canonical representation. - // Not all objects are required to be scoped to a namespace - the value of this field for - // those objects will be empty. - // +optional - Namespace string - - // SelfLink is a URL representing this object. - // +optional - SelfLink string - - // UID is the unique in time and space value for this object. It is typically generated by - // the server on successful creation of a resource and is not allowed to change on PUT - // operations. - // +optional - UID types.UID - - // An opaque value that represents the version of this resource. May be used for optimistic - // concurrency, change detection, and the watch operation on a resource or set of resources. - // Clients must treat these values as opaque and values may only be valid for a particular - // resource or set of resources. Only servers will generate resource versions. - // +optional - ResourceVersion string - - // A sequence number representing a specific generation of the desired state. - // Populated by the system. Read-only. - // +optional - Generation int64 - - // CreationTimestamp is a timestamp representing the server time when this object was - // created. It is not guaranteed to be set in happens-before order across separate operations. - // Clients may not set this value. It is represented in RFC3339 form and is in UTC. - // +optional - CreationTimestamp metav1.Time - - // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This - // field is set by the server when a graceful deletion is requested by the user, and is not - // directly settable by a client. The resource is expected to be deleted (no longer visible - // from resource lists, and not reachable by name) after the time in this field. Once set, - // this value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard - // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the - // API. In the presence of network partitions, this object may still exist after this - // timestamp, until an administrator or automated process can determine the resource is - // fully terminated. - // If not set, graceful deletion of the object has not been requested. - // - // Populated by the system when a graceful deletion is requested. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - DeletionTimestamp *metav1.Time - - // DeletionGracePeriodSeconds records the graceful deletion value set when graceful deletion - // was requested. Represents the most recent grace period, and may only be shortened once set. - // +optional - DeletionGracePeriodSeconds *int64 - - // Labels are key value pairs that may be used to scope and select individual resources. - // Label keys are of the form: - // label-key ::= prefixed-name | name - // prefixed-name ::= prefix '/' name - // prefix ::= DNS_SUBDOMAIN - // name ::= DNS_LABEL - // The prefix is optional. If the prefix is not specified, the key is assumed to be private - // to the user. Other system components that wish to use labels must specify a prefix. The - // "kubernetes.io/" prefix is reserved for use by kubernetes components. - // +optional - Labels map[string]string - - // Annotations are unstructured key value data stored with a resource that may be set by - // external tooling. They are not queryable and should be preserved when modifying - // objects. Annotation keys have the same formatting restrictions as Label keys. See the - // comments on Labels for details. - // +optional - Annotations map[string]string - - // List of objects depended by this object. If ALL objects in the list have - // been deleted, this object will be garbage collected. If this object is managed by a controller, - // then an entry in this list will point to this controller, with the controller field set to true. - // There cannot be more than one managing controller. - // +optional - OwnerReferences []metav1.OwnerReference - - // An initializer is a controller which enforces some system invariant at object creation time. - // This field is a list of initializers that have not yet acted on this object. If nil or empty, - // this object has been completely initialized. Otherwise, the object is considered uninitialized - // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to - // observe uninitialized objects. - // - // When an object is created, the system will populate this list with the current set of initializers. - // Only privileged users may set or modify this list. Once it is empty, it may not be modified further - // by any user. - Initializers *metav1.Initializers - - // Must be empty before the object is deleted from the registry. Each entry - // is an identifier for the responsible component that will remove the entry - // from the list. If the deletionTimestamp of the object is non-nil, entries - // in this list can only be removed. - // +optional - Finalizers []string - - // The name of the cluster which the object belongs to. - // This is used to distinguish resources with same name and namespace in different clusters. - // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. - // +optional - ClusterName string -} - -const ( - // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients - NamespaceDefault string = "default" - // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces - NamespaceAll string = "" - // NamespaceNone is the argument for a context when there is no namespace. - NamespaceNone string = "" - // NamespaceSystem is the system namespace where we place system components. - NamespaceSystem string = "kube-system" - // NamespacePublic is the namespace where we place public info (ConfigMaps) - NamespacePublic string = "kube-public" - // TerminationMessagePathDefault means the default path to capture the application termination message running in a container - TerminationMessagePathDefault string = "/dev/termination-log" -) - -// Volume represents a named volume in a pod that may be accessed by any containers in the pod. -type Volume struct { - // Required: This must be a DNS_LABEL. Each volume in a pod must have - // a unique name. - Name string - // The VolumeSource represents the location and type of a volume to mount. - // This is optional for now. If not specified, the Volume is implied to be an EmptyDir. - // This implied behavior is deprecated and will be removed in a future version. - // +optional - VolumeSource -} - -// VolumeSource represents the source location of a volume to mount. -// Only one of its members may be specified. -type VolumeSource struct { - // HostPath represents file or directory on the host machine that is - // directly exposed to the container. This is generally used for system - // agents or other privileged things that are allowed to see the host - // machine. Most containers will NOT need this. - // --- - // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - // mount host directories as read/write. - // +optional - HostPath *HostPathVolumeSource - // EmptyDir represents a temporary directory that shares a pod's lifetime. - // +optional - EmptyDir *EmptyDirVolumeSource - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - GCEPersistentDisk *GCEPersistentDiskVolumeSource - // AWSElasticBlockStore represents an AWS EBS disk that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource - // GitRepo represents a git repository at a particular revision. - // +optional - GitRepo *GitRepoVolumeSource - // Secret represents a secret that should populate this volume. - // +optional - Secret *SecretVolumeSource - // NFS represents an NFS mount on the host that shares a pod's lifetime - // +optional - NFS *NFSVolumeSource - // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - ISCSI *ISCSIVolumeSource - // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime - // +optional - Glusterfs *GlusterfsVolumeSource - // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace - // +optional - PersistentVolumeClaim *PersistentVolumeClaimVolumeSource - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime - // +optional - RBD *RBDVolumeSource - - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - // +optional - Quobyte *QuobyteVolumeSource - - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. - // +optional - FlexVolume *FlexVolumeSource - - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // +optional - Cinder *CinderVolumeSource - - // CephFS represents a Cephfs mount on the host that shares a pod's lifetime - // +optional - CephFS *CephFSVolumeSource - - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - // +optional - Flocker *FlockerVolumeSource - - // DownwardAPI represents metadata about the pod that should populate this volume - // +optional - DownwardAPI *DownwardAPIVolumeSource - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - // +optional - FC *FCVolumeSource - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - // +optional - AzureFile *AzureFileVolumeSource - // ConfigMap represents a configMap that should populate this volume - // +optional - ConfigMap *ConfigMapVolumeSource - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - // +optional - VsphereVolume *VsphereVirtualDiskVolumeSource - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - // +optional - AzureDisk *AzureDiskVolumeSource - // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine - PhotonPersistentDisk *PhotonPersistentDiskVolumeSource - // Items for all in one resources secrets, configmaps, and downward API - Projected *ProjectedVolumeSource - // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine - // +optional - PortworxVolume *PortworxVolumeSource - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. - // +optional - ScaleIO *ScaleIOVolumeSource - // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // +optional - StorageOS *StorageOSVolumeSource -} - -// Similar to VolumeSource but meant for the administrator who creates PVs. -// Exactly one of its members must be set. -type PersistentVolumeSource struct { - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - GCEPersistentDisk *GCEPersistentDiskVolumeSource - // AWSElasticBlockStore represents an AWS EBS disk that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource - // HostPath represents a directory on the host. - // Provisioned by a developer or tester. - // This is useful for single-node development and testing only! - // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. - // +optional - HostPath *HostPathVolumeSource - // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod - // +optional - Glusterfs *GlusterfsVolumeSource - // NFS represents an NFS mount on the host that shares a pod's lifetime - // +optional - NFS *NFSVolumeSource - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime - // +optional - RBD *RBDVolumeSource - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - // +optional - Quobyte *QuobyteVolumeSource - // ISCSIVolumeSource represents an ISCSI resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - ISCSI *ISCSIVolumeSource - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. - // +optional - FlexVolume *FlexVolumeSource - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // +optional - Cinder *CinderVolumeSource - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - // +optional - CephFS *CephFSPersistentVolumeSource - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - // +optional - FC *FCVolumeSource - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - // +optional - Flocker *FlockerVolumeSource - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - // +optional - AzureFile *AzureFilePersistentVolumeSource - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - // +optional - VsphereVolume *VsphereVirtualDiskVolumeSource - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - // +optional - AzureDisk *AzureDiskVolumeSource - // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine - PhotonPersistentDisk *PhotonPersistentDiskVolumeSource - // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine - // +optional - PortworxVolume *PortworxVolumeSource - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. - // +optional - ScaleIO *ScaleIOVolumeSource - // Local represents directly-attached storage with node affinity - // +optional - Local *LocalVolumeSource - // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md - // +optional - StorageOS *StorageOSPersistentVolumeSource -} - -type PersistentVolumeClaimVolumeSource struct { - // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume - ClaimName string - // Optional: Defaults to false (read/write). ReadOnly here - // will force the ReadOnly setting in VolumeMounts - // +optional - ReadOnly bool -} - -const ( - // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. - // It's currently still used and will be held for backwards compatibility - BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" - - // MountOptionAnnotation defines mount option annotation used in PVs - MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options" - - // AlphaStorageNodeAffinityAnnotation defines node affinity policies for a PersistentVolume. - // Value is a string of the json representation of type NodeAffinity - AlphaStorageNodeAffinityAnnotation = "volume.alpha.kubernetes.io/node-affinity" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type PersistentVolume struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - //Spec defines a persistent volume owned by the cluster - // +optional - Spec PersistentVolumeSpec - - // Status represents the current information about persistent volume. - // +optional - Status PersistentVolumeStatus -} - -type PersistentVolumeSpec struct { - // Resources represents the actual resources of the volume - Capacity ResourceList - // Source represents the location and type of a volume to mount. - PersistentVolumeSource - // AccessModes contains all ways the volume can be mounted - // +optional - AccessModes []PersistentVolumeAccessMode - // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. - // ClaimRef is expected to be non-nil when bound. - // claim.VolumeName is the authoritative bind between PV and PVC. - // When set to non-nil value, PVC.Spec.Selector of the referenced PVC is - // ignored, i.e. labels of this PV do not need to match PVC selector. - // +optional - ClaimRef *ObjectReference - // Optional: what happens to a persistent volume when released from its claim. - // +optional - PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy - // Name of StorageClass to which this persistent volume belongs. Empty value - // means that this volume does not belong to any StorageClass. - // +optional - StorageClassName string - // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will - // simply fail if one is invalid. - // +optional - MountOptions []string -} - -// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes -type PersistentVolumeReclaimPolicy string - -const ( - // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. - // The volume plugin must support Recycling. - PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" - // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. - // The volume plugin must support Deletion. - PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" - // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. - // The default policy is Retain. - PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" -) - -type PersistentVolumeStatus struct { - // Phase indicates if a volume is available, bound to a claim, or released by a claim - // +optional - Phase PersistentVolumePhase - // A human-readable message indicating details about why the volume is in this state. - // +optional - Message string - // Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI - // +optional - Reason string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type PersistentVolumeList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - Items []PersistentVolume -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PersistentVolumeClaim is a user's request for and claim to a persistent volume -type PersistentVolumeClaim struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the volume requested by a pod author - // +optional - Spec PersistentVolumeClaimSpec - - // Status represents the current information about a claim - // +optional - Status PersistentVolumeClaimStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type PersistentVolumeClaimList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - Items []PersistentVolumeClaim -} - -// PersistentVolumeClaimSpec describes the common attributes of storage devices -// and allows a Source for provider-specific attributes -type PersistentVolumeClaimSpec struct { - // Contains the types of access modes required - // +optional - AccessModes []PersistentVolumeAccessMode - // A label query over volumes to consider for binding. This selector is - // ignored when VolumeName is set - // +optional - Selector *metav1.LabelSelector - // Resources represents the minimum resources required - // +optional - Resources ResourceRequirements - // VolumeName is the binding reference to the PersistentVolume backing this - // claim. When set to non-empty value Selector is not evaluated - // +optional - VolumeName string - // Name of the StorageClass required by the claim. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 - // +optional - StorageClassName *string -} - -type PersistentVolumeClaimConditionType string - -// These are valid conditions of Pvc -const ( - // An user trigger resize of pvc has been started - PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" -) - -type PersistentVolumeClaimCondition struct { - Type PersistentVolumeClaimConditionType - Status ConditionStatus - // +optional - LastProbeTime metav1.Time - // +optional - LastTransitionTime metav1.Time - // +optional - Reason string - // +optional - Message string -} - -type PersistentVolumeClaimStatus struct { - // Phase represents the current phase of PersistentVolumeClaim - // +optional - Phase PersistentVolumeClaimPhase - // AccessModes contains all ways the volume backing the PVC can be mounted - // +optional - AccessModes []PersistentVolumeAccessMode - // Represents the actual resources of the underlying volume - // +optional - Capacity ResourceList - // +optional - Conditions []PersistentVolumeClaimCondition -} - -type PersistentVolumeAccessMode string - -const ( - // can be mounted read/write mode to exactly 1 host - ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" - // can be mounted in read-only mode to many hosts - ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" - // can be mounted in read/write mode to many hosts - ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" -) - -type PersistentVolumePhase string - -const ( - // used for PersistentVolumes that are not available - VolumePending PersistentVolumePhase = "Pending" - // used for PersistentVolumes that are not yet bound - // Available volumes are held by the binder and matched to PersistentVolumeClaims - VolumeAvailable PersistentVolumePhase = "Available" - // used for PersistentVolumes that are bound - VolumeBound PersistentVolumePhase = "Bound" - // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted - // released volumes must be recycled before becoming available again - // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource - VolumeReleased PersistentVolumePhase = "Released" - // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim - VolumeFailed PersistentVolumePhase = "Failed" -) - -type PersistentVolumeClaimPhase string - -const ( - // used for PersistentVolumeClaims that are not yet bound - ClaimPending PersistentVolumeClaimPhase = "Pending" - // used for PersistentVolumeClaims that are bound - ClaimBound PersistentVolumeClaimPhase = "Bound" - // used for PersistentVolumeClaims that lost their underlying - // PersistentVolume. The claim was bound to a PersistentVolume and this - // volume does not exist any longer and all data on it was lost. - ClaimLost PersistentVolumeClaimPhase = "Lost" -) - -type HostPathType string - -const ( - // For backwards compatible, leave it empty if unset - HostPathUnset HostPathType = "" - // If nothing exists at the given path, an empty directory will be created there - // as needed with file mode 0755, having the same group and ownership with Kubelet. - HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate" - // A directory must exist at the given path - HostPathDirectory HostPathType = "Directory" - // If nothing exists at the given path, an empty file will be created there - // as needed with file mode 0644, having the same group and ownership with Kubelet. - HostPathFileOrCreate HostPathType = "FileOrCreate" - // A file must exist at the given path - HostPathFile HostPathType = "File" - // A UNIX socket must exist at the given path - HostPathSocket HostPathType = "Socket" - // A character device must exist at the given path - HostPathCharDev HostPathType = "CharDevice" - // A block device must exist at the given path - HostPathBlockDev HostPathType = "BlockDevice" -) - -// Represents a host path mapped into a pod. -// Host path volumes do not support ownership management or SELinux relabeling. -type HostPathVolumeSource struct { - // If the path is a symlink, it will follow the link to the real path. - Path string - // Defaults to "" - Type *HostPathType -} - -// Represents an empty directory for a pod. -// Empty directory volumes support ownership management and SELinux relabeling. -type EmptyDirVolumeSource struct { - // TODO: Longer term we want to represent the selection of underlying - // media more like a scheduling problem - user says what traits they - // need, we give them a backing store that satisfies that. For now - // this will cover the most common needs. - // Optional: what type of storage medium should back this directory. - // The default is "" which means to use the node's default medium. - // +optional - Medium StorageMedium - // Total amount of local storage required for this EmptyDir volume. - // The size limit is also applicable for memory medium. - // The maximum usage on memory medium EmptyDir would be the minimum value between - // the SizeLimit specified here and the sum of memory limits of all containers in a pod. - // The default is nil which means that the limit is undefined. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir - // +optional - SizeLimit *resource.Quantity -} - -// StorageMedium defines ways that storage can be allocated to a volume. -type StorageMedium string - -const ( - StorageMediumDefault StorageMedium = "" // use whatever the default is for the node - StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) - StorageMediumHugePages StorageMedium = "HugePages" // use hugepages -) - -// Protocol defines network protocols supported for things like container ports. -type Protocol string - -const ( - // ProtocolTCP is the TCP protocol. - ProtocolTCP Protocol = "TCP" - // ProtocolUDP is the UDP protocol. - ProtocolUDP Protocol = "UDP" -) - -// Represents a Persistent Disk resource in Google Compute Engine. -// -// A GCE PD must exist before mounting to a container. The disk must -// also be in the same GCE project and zone as the kubelet. A GCE PD -// can only be mounted as read/write once or read-only many times. GCE -// PDs support ownership management and SELinux relabeling. -type GCEPersistentDiskVolumeSource struct { - // Unique name of the PD resource. Used to identify the disk in GCE - PDName string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Partition on the disk to mount. - // If omitted, kubelet will attempt to mount the device name. - // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. - // +optional - Partition int32 - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents an ISCSI disk. -// ISCSI volumes can only be mounted as read/write once. -// ISCSI volumes support ownership management and SELinux relabeling. -type ISCSIVolumeSource struct { - // Required: iSCSI target portal - // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) - // +optional - TargetPortal string - // Required: target iSCSI Qualified Name - // +optional - IQN string - // Required: iSCSI target lun number - // +optional - Lun int32 - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. - // +optional - ISCSIInterface string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: list of iSCSI target portal ips for high availability. - // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) - // +optional - Portals []string - // Optional: whether support iSCSI Discovery CHAP authentication - // +optional - DiscoveryCHAPAuth bool - // Optional: whether support iSCSI Session CHAP authentication - // +optional - SessionCHAPAuth bool - // Optional: CHAP secret for iSCSI target and initiator authentication. - // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true - // +optional - SecretRef *LocalObjectReference - // Optional: Custom initiator name per volume. - // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface - // : will be created for the connection. - // +optional - InitiatorName *string -} - -// Represents a Fibre Channel volume. -// Fibre Channel volumes can only be mounted as read/write once. -// Fibre Channel volumes support ownership management and SELinux relabeling. -type FCVolumeSource struct { - // Optional: FC target worldwide names (WWNs) - // +optional - TargetWWNs []string - // Optional: FC target lun number - // +optional - Lun *int32 - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: FC volume World Wide Identifiers (WWIDs) - // Either WWIDs or TargetWWNs and Lun must be set, but not both simultaneously. - // +optional - WWIDs []string -} - -// FlexVolume represents a generic volume resource that is -// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. -type FlexVolumeSource struct { - // Driver is the name of the driver to use for this volume. - Driver string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - // +optional - FSType string - // Optional: SecretRef is reference to the secret object containing - // sensitive information to pass to the plugin scripts. This may be - // empty if no secret object is specified. If the secret object - // contains more than one secret, all secrets are passed to the plugin - // scripts. - // +optional - SecretRef *LocalObjectReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: Extra driver options if any. - // +optional - Options map[string]string -} - -// Represents a Persistent Disk resource in AWS. -// -// An AWS EBS disk must exist before mounting to a container. The disk -// must also be in the same AWS zone as the kubelet. An AWS EBS disk -// can only be mounted as read/write once. AWS EBS volumes support -// ownership management and SELinux relabeling. -type AWSElasticBlockStoreVolumeSource struct { - // Unique id of the persistent disk resource. Used to identify the disk in AWS - VolumeID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Partition on the disk to mount. - // If omitted, kubelet will attempt to mount the device name. - // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. - // +optional - Partition int32 - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a volume that is populated with the contents of a git repository. -// Git repo volumes do not support ownership management. -// Git repo volumes support SELinux relabeling. -type GitRepoVolumeSource struct { - // Repository URL - Repository string - // Commit hash, this is optional - // +optional - Revision string - // Clone target, this is optional - // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - // git repository. Otherwise, if specified, the volume will contain the git repository in - // the subdirectory with the given name. - // +optional - Directory string - // TODO: Consider credentials here. -} - -// Adapts a Secret into a volume. -// -// The contents of the target Secret's Data field will be presented in a volume -// as files using the keys in the Data field as the file names. -// Secret volumes support ownership management and SELinux relabeling. -type SecretVolumeSource struct { - // Name of the secret in the pod's namespace to use. - // +optional - SecretName string - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 - // Specify whether the Secret or its key must be defined - // +optional - Optional *bool -} - -// Adapts a secret into a projected volume. -// -// The contents of the target Secret's Data field will be presented in a -// projected volume as files using the keys in the Data field as the file names. -// Note that this is identical to a secret volume source without the default -// mode. -type SecretProjection struct { - LocalObjectReference - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Specify whether the Secret or its key must be defined - // +optional - Optional *bool -} - -// Represents an NFS mount that lasts the lifetime of a pod. -// NFS volumes do not support ownership management or SELinux relabeling. -type NFSVolumeSource struct { - // Server is the hostname or IP address of the NFS server - Server string - - // Path is the exported NFS share - Path string - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the NFS export to be mounted with read-only permissions - // +optional - ReadOnly bool -} - -// Represents a Quobyte mount that lasts the lifetime of a pod. -// Quobyte volumes do not support ownership management or SELinux relabeling. -type QuobyteVolumeSource struct { - // Registry represents a single or multiple Quobyte Registry services - // specified as a string as host:port pair (multiple entries are separated with commas) - // which acts as the central registry for volumes - Registry string - - // Volume is a string that references an already created Quobyte volume by name. - Volume string - - // Defaults to false (read/write). ReadOnly here will force - // the Quobyte to be mounted with read-only permissions - // +optional - ReadOnly bool - - // User to map volume access to - // Defaults to the root user - // +optional - User string - - // Group to map volume access to - // Default is no group - // +optional - Group string -} - -// Represents a Glusterfs mount that lasts the lifetime of a pod. -// Glusterfs volumes do not support ownership management or SELinux relabeling. -type GlusterfsVolumeSource struct { - // Required: EndpointsName is the endpoint name that details Glusterfs topology - EndpointsName string - - // Required: Path is the Glusterfs volume path - Path string - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the Glusterfs to be mounted with read-only permissions - // +optional - ReadOnly bool -} - -// Represents a Rados Block Device mount that lasts the lifetime of a pod. -// RBD volumes support ownership management and SELinux relabeling. -type RBDVolumeSource struct { - // Required: CephMonitors is a collection of Ceph monitors - CephMonitors []string - // Required: RBDImage is the rados image name - RBDImage string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: RadosPool is the rados pool name,default is rbd - // +optional - RBDPool string - // Optional: RBDUser is the rados user name, default is admin - // +optional - RadosUser string - // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring - // +optional - Keyring string - // Optional: SecretRef is name of the authentication secret for RBDUser, default is nil. - // +optional - SecretRef *LocalObjectReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a cinder volume resource in Openstack. A Cinder volume -// must exist before mounting to a container. The volume must also be -// in the same region as the kubelet. Cinder volumes support ownership -// management and SELinux relabeling. -type CinderVolumeSource struct { - // Unique id of the volume used to identify the cinder volume - VolumeID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -type CephFSVolumeSource struct { - // Required: Monitors is a collection of Ceph monitors - Monitors []string - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - // +optional - Path string - // Optional: User is the rados user name, default is admin - // +optional - User string - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // +optional - SecretFile string - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // +optional - SecretRef *LocalObjectReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// SecretReference represents a Secret Reference. It has enough information to retrieve secret -// in any namespace -type SecretReference struct { - // Name is unique within a namespace to reference a secret resource. - // +optional - Name string - // Namespace defines the space within which the secret name must be unique. - // +optional - Namespace string -} - -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -type CephFSPersistentVolumeSource struct { - // Required: Monitors is a collection of Ceph monitors - Monitors []string - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - // +optional - Path string - // Optional: User is the rados user name, default is admin - // +optional - User string - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // +optional - SecretFile string - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // +optional - SecretRef *SecretReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a Flocker volume mounted by the Flocker agent. -// One and only one of datasetName and datasetUUID should be set. -// Flocker volumes do not support ownership management or SELinux relabeling. -type FlockerVolumeSource struct { - // Name of the dataset stored as metadata -> name on the dataset for Flocker - // should be considered as deprecated - // +optional - DatasetName string - // UUID of the dataset. This is unique identifier of a Flocker dataset - // +optional - DatasetUUID string -} - -// Represents a volume containing downward API info. -// Downward API volumes support ownership management and SELinux relabeling. -type DownwardAPIVolumeSource struct { - // Items is a list of DownwardAPIVolume file - // +optional - Items []DownwardAPIVolumeFile - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 -} - -// Represents a single file containing information from the downward API -type DownwardAPIVolumeFile struct { - // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' - Path string - // Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported. - // +optional - FieldRef *ObjectFieldSelector - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - // +optional - ResourceFieldRef *ResourceFieldSelector - // Optional: mode bits to use on this file, must be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - Mode *int32 -} - -// Represents downward API info for projecting into a projected volume. -// Note that this is identical to a downwardAPI volume source without the default -// mode. -type DownwardAPIProjection struct { - // Items is a list of DownwardAPIVolume file - // +optional - Items []DownwardAPIVolumeFile -} - -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. -type AzureFileVolumeSource struct { - // the name of secret that contains Azure Storage Account Name and Key - SecretName string - // Share Name - ShareName string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. -type AzureFilePersistentVolumeSource struct { - // the name of secret that contains Azure Storage Account Name and Key - SecretName string - // Share Name - ShareName string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // the namespace of the secret that contains Azure Storage Account Name and Key - // default is the same as the Pod - // +optional - SecretNamespace *string -} - -// Represents a vSphere volume resource. -type VsphereVirtualDiskVolumeSource struct { - // Path that identifies vSphere volume vmdk - VolumePath string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Storage Policy Based Management (SPBM) profile name. - // +optional - StoragePolicyName string - // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. - // +optional - StoragePolicyID string -} - -// Represents a Photon Controller persistent disk resource. -type PhotonPersistentDiskVolumeSource struct { - // ID that identifies Photon Controller persistent disk - PdID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - FSType string -} - -// PortworxVolumeSource represents a Portworx volume resource. -type PortworxVolumeSource struct { - // VolumeID uniquely identifies a Portworx volume - VolumeID string - // FSType represents the filesystem type to mount - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -type AzureDataDiskCachingMode string -type AzureDataDiskKind string - -const ( - AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" - AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" - AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" - - AzureSharedBlobDisk AzureDataDiskKind = "Shared" - AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated" - AzureManagedDisk AzureDataDiskKind = "Managed" -) - -// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. -type AzureDiskVolumeSource struct { - // The Name of the data disk in the blob storage - DiskName string - // The URI of the data disk in the blob storage - DataDiskURI string - // Host Caching mode: None, Read Only, Read Write. - // +optional - CachingMode *AzureDataDiskCachingMode - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType *string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly *bool - // Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared - Kind *AzureDataDiskKind -} - -// ScaleIOVolumeSource represents a persistent ScaleIO volume -type ScaleIOVolumeSource struct { - // The host address of the ScaleIO API Gateway. - Gateway string - // The name of the storage system as configured in ScaleIO. - System string - // SecretRef references to the secret for ScaleIO user and other - // sensitive information. If this is not provided, Login operation will fail. - SecretRef *LocalObjectReference - // Flag to enable/disable SSL communication with Gateway, default false - // +optional - SSLEnabled bool - // The name of the Protection Domain for the configured storage (defaults to "default"). - // +optional - ProtectionDomain string - // The Storage Pool associated with the protection domain (defaults to "default"). - // +optional - StoragePool string - // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). - // +optional - StorageMode string - // The name of a volume already created in the ScaleIO system - // that is associated with this volume source. - VolumeName string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a StorageOS persistent volume resource. -type StorageOSVolumeSource struct { - // VolumeName is the human-readable name of the StorageOS volume. Volume - // names are only unique within a namespace. - VolumeName string - // VolumeNamespace specifies the scope of the volume within StorageOS. If no - // namespace is specified then the Pod's namespace will be used. This allows the - // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - // Set VolumeName to any name to override the default behaviour. - // Set to "default" if you are not using namespaces within StorageOS. - // Namespaces that do not pre-exist within StorageOS will be created. - // +optional - VolumeNamespace string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // SecretRef specifies the secret to use for obtaining the StorageOS API - // credentials. If not specified, default values will be attempted. - // +optional - SecretRef *LocalObjectReference -} - -// Represents a StorageOS persistent volume resource. -type StorageOSPersistentVolumeSource struct { - // VolumeName is the human-readable name of the StorageOS volume. Volume - // names are only unique within a namespace. - VolumeName string - // VolumeNamespace specifies the scope of the volume within StorageOS. If no - // namespace is specified then the Pod's namespace will be used. This allows the - // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - // Set VolumeName to any name to override the default behaviour. - // Set to "default" if you are not using namespaces within StorageOS. - // Namespaces that do not pre-exist within StorageOS will be created. - // +optional - VolumeNamespace string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // SecretRef specifies the secret to use for obtaining the StorageOS API - // credentials. If not specified, default values will be attempted. - // +optional - SecretRef *ObjectReference -} - -// Adapts a ConfigMap into a volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// volume as files using the keys in the Data field as the file names, unless -// the items element is populated with specific mappings of keys to paths. -// ConfigMap volumes support ownership management and SELinux relabeling. -type ConfigMapVolumeSource struct { - LocalObjectReference - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 - // Specify whether the ConfigMap or it's keys must be defined - // +optional - Optional *bool -} - -// Adapts a ConfigMap into a projected volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// projected volume as files using the keys in the Data field as the file names, -// unless the items element is populated with specific mappings of keys to paths. -// Note that this is identical to a configmap volume source without the default -// mode. -type ConfigMapProjection struct { - LocalObjectReference - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Specify whether the ConfigMap or it's keys must be defined - // +optional - Optional *bool -} - -// Represents a projected volume source -type ProjectedVolumeSource struct { - // list of volume projections - Sources []VolumeProjection - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 -} - -// Projection that may be projected along with other supported volume types -type VolumeProjection struct { - // all types below are the supported types for projection into the same volume - - // information about the secret data to project - Secret *SecretProjection - // information about the downwardAPI data to project - DownwardAPI *DownwardAPIProjection - // information about the configMap data to project - ConfigMap *ConfigMapProjection -} - -// Maps a string key to a path within a volume. -type KeyToPath struct { - // The key to project. - Key string - - // The relative path of the file to map the key to. - // May not be an absolute path. - // May not contain the path element '..'. - // May not start with the string '..'. - Path string - // Optional: mode bits to use on this file, should be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - Mode *int32 -} - -// Local represents directly-attached storage with node affinity -type LocalVolumeSource struct { - // The full path to the volume on the node - // For alpha, this path must be a directory - // Once block as a source is supported, then this path can point to a block device - Path string -} - -// ContainerPort represents a network port in a single container -type ContainerPort struct { - // Optional: If specified, this must be an IANA_SVC_NAME Each named port - // in a pod must have a unique name. - // +optional - Name string - // Optional: If specified, this must be a valid port number, 0 < x < 65536. - // If HostNetwork is specified, this must match ContainerPort. - // +optional - HostPort int32 - // Required: This must be a valid port number, 0 < x < 65536. - ContainerPort int32 - // Required: Supports "TCP" and "UDP". - // +optional - Protocol Protocol - // Optional: What host IP to bind the external port to. - // +optional - HostIP string -} - -// VolumeMount describes a mounting of a Volume within a container. -type VolumeMount struct { - // Required: This must match the Name of a Volume [above]. - Name string - // Optional: Defaults to false (read-write). - // +optional - ReadOnly bool - // Required. Must not contain ':'. - MountPath string - // Path within the volume from which the container's volume should be mounted. - // Defaults to "" (volume's root). - // +optional - SubPath string - // mountPropagation determines how mounts are propagated from the host - // to container and the other way around. - // When not set, MountPropagationHostToContainer is used. - // This field is alpha in 1.8 and can be reworked or removed in a future - // release. - // +optional - MountPropagation *MountPropagationMode -} - -// MountPropagationMode describes mount propagation. -type MountPropagationMode string - -const ( - // MountPropagationHostToContainer means that the volume in a container will - // receive new mounts from the host or other containers, but filesystems - // mounted inside the container won't be propagated to the host or other - // containers. - // Note that this mode is recursively applied to all mounts in the volume - // ("rslave" in Linux terminology). - MountPropagationHostToContainer MountPropagationMode = "HostToContainer" - // MountPropagationBidirectional means that the volume in a container will - // receive new mounts from the host or other containers, and its own mounts - // will be propagated from the container to the host or other containers. - // Note that this mode is recursively applied to all mounts in the volume - // ("rshared" in Linux terminology). - MountPropagationBidirectional MountPropagationMode = "Bidirectional" -) - -// EnvVar represents an environment variable present in a Container. -type EnvVar struct { - // Required: This must be a C_IDENTIFIER. - Name string - // Optional: no more than one of the following may be specified. - // Optional: Defaults to ""; variable references $(VAR_NAME) are expanded - // using the previous defined environment variables in the container and - // any service environment variables. If a variable cannot be resolved, - // the reference in the input string will be unchanged. The $(VAR_NAME) - // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped - // references will never be expanded, regardless of whether the variable - // exists or not. - // +optional - Value string - // Optional: Specifies a source the value of this var should come from. - // +optional - ValueFrom *EnvVarSource -} - -// EnvVarSource represents a source for the value of an EnvVar. -// Only one of its fields may be set. -type EnvVarSource struct { - // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // metadata.uid, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. - // +optional - FieldRef *ObjectFieldSelector - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - // +optional - ResourceFieldRef *ResourceFieldSelector - // Selects a key of a ConfigMap. - // +optional - ConfigMapKeyRef *ConfigMapKeySelector - // Selects a key of a secret in the pod's namespace. - // +optional - SecretKeyRef *SecretKeySelector -} - -// ObjectFieldSelector selects an APIVersioned field of an object. -type ObjectFieldSelector struct { - // Required: Version of the schema the FieldPath is written in terms of. - // If no value is specified, it will be defaulted to the APIVersion of the - // enclosing object. - APIVersion string - // Required: Path of the field to select in the specified API version - FieldPath string -} - -// ResourceFieldSelector represents container resources (cpu, memory) and their output format -type ResourceFieldSelector struct { - // Container name: required for volumes, optional for env vars - // +optional - ContainerName string - // Required: resource to select - Resource string - // Specifies the output format of the exposed resources, defaults to "1" - // +optional - Divisor resource.Quantity -} - -// Selects a key from a ConfigMap. -type ConfigMapKeySelector struct { - // The ConfigMap to select from. - LocalObjectReference - // The key to select. - Key string - // Specify whether the ConfigMap or it's key must be defined - // +optional - Optional *bool -} - -// SecretKeySelector selects a key of a Secret. -type SecretKeySelector struct { - // The name of the secret in the pod's namespace to select from. - LocalObjectReference - // The key of the secret to select from. Must be a valid secret key. - Key string - // Specify whether the Secret or it's key must be defined - // +optional - Optional *bool -} - -// EnvFromSource represents the source of a set of ConfigMaps -type EnvFromSource struct { - // An optional identifier to prepend to each key in the ConfigMap. - // +optional - Prefix string - // The ConfigMap to select from. - //+optional - ConfigMapRef *ConfigMapEnvSource - // The Secret to select from. - //+optional - SecretRef *SecretEnvSource -} - -// ConfigMapEnvSource selects a ConfigMap to populate the environment -// variables with. -// -// The contents of the target ConfigMap's Data field will represent the -// key-value pairs as environment variables. -type ConfigMapEnvSource struct { - // The ConfigMap to select from. - LocalObjectReference - // Specify whether the ConfigMap must be defined - // +optional - Optional *bool -} - -// SecretEnvSource selects a Secret to populate the environment -// variables with. -// -// The contents of the target Secret's Data field will represent the -// key-value pairs as environment variables. -type SecretEnvSource struct { - // The Secret to select from. - LocalObjectReference - // Specify whether the Secret must be defined - // +optional - Optional *bool -} - -// HTTPHeader describes a custom header to be used in HTTP probes -type HTTPHeader struct { - // The header field name - Name string - // The header field value - Value string -} - -// HTTPGetAction describes an action based on HTTP Get requests. -type HTTPGetAction struct { - // Optional: Path to access on the HTTP server. - // +optional - Path string - // Required: Name or number of the port to access on the container. - // +optional - Port intstr.IntOrString - // Optional: Host name to connect to, defaults to the pod IP. You - // probably want to set "Host" in httpHeaders instead. - // +optional - Host string - // Optional: Scheme to use for connecting to the host, defaults to HTTP. - // +optional - Scheme URIScheme - // Optional: Custom headers to set in the request. HTTP allows repeated headers. - // +optional - HTTPHeaders []HTTPHeader -} - -// URIScheme identifies the scheme used for connection to a host for Get actions -type URIScheme string - -const ( - // URISchemeHTTP means that the scheme used will be http:// - URISchemeHTTP URIScheme = "HTTP" - // URISchemeHTTPS means that the scheme used will be https:// - URISchemeHTTPS URIScheme = "HTTPS" -) - -// TCPSocketAction describes an action based on opening a socket -type TCPSocketAction struct { - // Required: Port to connect to. - // +optional - Port intstr.IntOrString - // Optional: Host name to connect to, defaults to the pod IP. - // +optional - Host string -} - -// ExecAction describes a "run in container" action. -type ExecAction struct { - // Command is the command line to execute inside the container, the working directory for the - // command is root ('/') in the container's filesystem. The command is simply exec'd, it is - // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - // a shell, you need to explicitly call out to that shell. - // +optional - Command []string -} - -// Probe describes a health check to be performed against a container to determine whether it is -// alive or ready to receive traffic. -type Probe struct { - // The action taken to determine the health of a container - Handler - // Length of time before health checking is activated. In seconds. - // +optional - InitialDelaySeconds int32 - // Length of time before health checking times out. In seconds. - // +optional - TimeoutSeconds int32 - // How often (in seconds) to perform the probe. - // +optional - PeriodSeconds int32 - // Minimum consecutive successes for the probe to be considered successful after having failed. - // Must be 1 for liveness. - // +optional - SuccessThreshold int32 - // Minimum consecutive failures for the probe to be considered failed after having succeeded. - // +optional - FailureThreshold int32 -} - -// PullPolicy describes a policy for if/when to pull a container image -type PullPolicy string - -const ( - // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. - PullAlways PullPolicy = "Always" - // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present - PullNever PullPolicy = "Never" - // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. - PullIfNotPresent PullPolicy = "IfNotPresent" -) - -// TerminationMessagePolicy describes how termination messages are retrieved from a container. -type TerminationMessagePolicy string - -const ( - // TerminationMessageReadFile is the default behavior and will set the container status message to - // the contents of the container's terminationMessagePath when the container exits. - TerminationMessageReadFile TerminationMessagePolicy = "File" - // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs - // for the container status message when the container exits with an error and the - // terminationMessagePath has no contents. - TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" -) - -// Capability represent POSIX capabilities type -type Capability string - -// Capabilities represent POSIX capabilities that can be added or removed to a running container. -type Capabilities struct { - // Added capabilities - // +optional - Add []Capability - // Removed capabilities - // +optional - Drop []Capability -} - -// ResourceRequirements describes the compute resource requirements. -type ResourceRequirements struct { - // Limits describes the maximum amount of compute resources allowed. - // +optional - Limits ResourceList - // Requests describes the minimum amount of compute resources required. - // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value - // +optional - Requests ResourceList -} - -// Container represents a single container that is expected to be run on the host. -type Container struct { - // Required: This must be a DNS_LABEL. Each container in a pod must - // have a unique name. - Name string - // Required. - Image string - // Optional: The docker image's entrypoint is used if this is not provided; cannot be updated. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // +optional - Command []string - // Optional: The docker image's cmd is used if this is not provided; cannot be updated. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // +optional - Args []string - // Optional: Defaults to Docker's default. - // +optional - WorkingDir string - // +optional - Ports []ContainerPort - // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple - // sources, the value associated with the last source will take precedence. - // Values defined by an Env with a duplicate key will take precedence. - // Cannot be updated. - // +optional - EnvFrom []EnvFromSource - // +optional - Env []EnvVar - // Compute resource requirements. - // +optional - Resources ResourceRequirements - // +optional - VolumeMounts []VolumeMount - // +optional - LivenessProbe *Probe - // +optional - ReadinessProbe *Probe - // +optional - Lifecycle *Lifecycle - // Required. - // +optional - TerminationMessagePath string - // +optional - TerminationMessagePolicy TerminationMessagePolicy - // Required: Policy for pulling images for this container - ImagePullPolicy PullPolicy - // Optional: SecurityContext defines the security options the container should be run with. - // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - // +optional - SecurityContext *SecurityContext - - // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) - // and shouldn't be used for general purpose containers. - // +optional - Stdin bool - // +optional - StdinOnce bool - // +optional - TTY bool -} - -// Handler defines a specific action that should be taken -// TODO: pass structured data to these actions, and document that data here. -type Handler struct { - // One and only one of the following should be specified. - // Exec specifies the action to take. - // +optional - Exec *ExecAction - // HTTPGet specifies the http request to perform. - // +optional - HTTPGet *HTTPGetAction - // TCPSocket specifies an action involving a TCP port. - // TODO: implement a realistic TCP lifecycle hook - // +optional - TCPSocket *TCPSocketAction -} - -// Lifecycle describes actions that the management system should take in response to container lifecycle -// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks -// until the action is complete, unless the container process fails, in which case the handler is aborted. -type Lifecycle struct { - // PostStart is called immediately after a container is created. If the handler fails, the container - // is terminated and restarted. - // +optional - PostStart *Handler - // PreStop is called immediately before a container is terminated. The reason for termination is - // passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. - // +optional - PreStop *Handler -} - -// The below types are used by kube_client and api_server. - -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; -// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -type ContainerStateWaiting struct { - // A brief CamelCase string indicating details about why the container is in waiting state. - // +optional - Reason string - // A human-readable message indicating details about why the container is in waiting state. - // +optional - Message string -} - -type ContainerStateRunning struct { - // +optional - StartedAt metav1.Time -} - -type ContainerStateTerminated struct { - ExitCode int32 - // +optional - Signal int32 - // +optional - Reason string - // +optional - Message string - // +optional - StartedAt metav1.Time - // +optional - FinishedAt metav1.Time - // +optional - ContainerID string -} - -// ContainerState holds a possible state of container. -// Only one of its members may be specified. -// If none of them is specified, the default one is ContainerStateWaiting. -type ContainerState struct { - // +optional - Waiting *ContainerStateWaiting - // +optional - Running *ContainerStateRunning - // +optional - Terminated *ContainerStateTerminated -} - -type ContainerStatus struct { - // Each container in a pod must have a unique name. - Name string - // +optional - State ContainerState - // +optional - LastTerminationState ContainerState - // Ready specifies whether the container has passed its readiness check. - Ready bool - // Note that this is calculated from dead containers. But those containers are subject to - // garbage collection. This value will get capped at 5 by GC. - RestartCount int32 - Image string - ImageID string - // +optional - ContainerID string -} - -// PodPhase is a label for the condition of a pod at the current time. -type PodPhase string - -// These are the valid statuses of pods. -const ( - // PodPending means the pod has been accepted by the system, but one or more of the containers - // has not been started. This includes time before being bound to a node, as well as time spent - // pulling images onto the host. - PodPending PodPhase = "Pending" - // PodRunning means the pod has been bound to a node and all of the containers have been started. - // At least one container is still running or is in the process of being restarted. - PodRunning PodPhase = "Running" - // PodSucceeded means that all containers in the pod have voluntarily terminated - // with a container exit code of 0, and the system is not going to restart any of these containers. - PodSucceeded PodPhase = "Succeeded" - // PodFailed means that all containers in the pod have terminated, and at least one container has - // terminated in a failure (exited with a non-zero exit code or was stopped by the system). - PodFailed PodPhase = "Failed" - // PodUnknown means that for some reason the state of the pod could not be obtained, typically due - // to an error in communicating with the host of the pod. - PodUnknown PodPhase = "Unknown" -) - -type PodConditionType string - -// These are valid conditions of pod. -const ( - // PodScheduled represents status of the scheduling process for this pod. - PodScheduled PodConditionType = "PodScheduled" - // PodReady means the pod is able to service requests and should be added to the - // load balancing pools of all matching services. - PodReady PodConditionType = "Ready" - // PodInitialized means that all init containers in the pod have started successfully. - PodInitialized PodConditionType = "Initialized" - // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler - // can't schedule the pod right now, for example due to insufficient resources in the cluster. - PodReasonUnschedulable = "Unschedulable" -) - -type PodCondition struct { - Type PodConditionType - Status ConditionStatus - // +optional - LastProbeTime metav1.Time - // +optional - LastTransitionTime metav1.Time - // +optional - Reason string - // +optional - Message string -} - -// RestartPolicy describes how the container should be restarted. -// Only one of the following restart policies may be specified. -// If none of the following policies is specified, the default one -// is RestartPolicyAlways. -type RestartPolicy string - -const ( - RestartPolicyAlways RestartPolicy = "Always" - RestartPolicyOnFailure RestartPolicy = "OnFailure" - RestartPolicyNever RestartPolicy = "Never" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodList is a list of Pods. -type PodList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Pod -} - -// DNSPolicy defines how a pod's DNS will be configured. -type DNSPolicy string - -const ( - // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS - // first, if it is available, then fall back on the default - // (as determined by kubelet) DNS settings. - DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" - - // DNSClusterFirst indicates that the pod should use cluster DNS - // first unless hostNetwork is true, if it is available, then - // fall back on the default (as determined by kubelet) DNS settings. - DNSClusterFirst DNSPolicy = "ClusterFirst" - - // DNSDefault indicates that the pod should use the default (as - // determined by kubelet) DNS settings. - DNSDefault DNSPolicy = "Default" -) - -// A node selector represents the union of the results of one or more label queries -// over a set of nodes; that is, it represents the OR of the selectors represented -// by the node selector terms. -type NodeSelector struct { - //Required. A list of node selector terms. The terms are ORed. - NodeSelectorTerms []NodeSelectorTerm -} - -// A null or empty node selector term matches no objects. -type NodeSelectorTerm struct { - //Required. A list of node selector requirements. The requirements are ANDed. - MatchExpressions []NodeSelectorRequirement -} - -// A node selector requirement is a selector that contains values, a key, and an operator -// that relates the key and values. -type NodeSelectorRequirement struct { - // The label key that the selector applies to. - Key string - // Represents a key's relationship to a set of values. - // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - Operator NodeSelectorOperator - // An array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. If the operator is Gt or Lt, the values - // array must have a single element, which will be interpreted as an integer. - // This array is replaced during a strategic merge patch. - // +optional - Values []string -} - -// A node selector operator is the set of operators that can be used in -// a node selector requirement. -type NodeSelectorOperator string - -const ( - NodeSelectorOpIn NodeSelectorOperator = "In" - NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" - NodeSelectorOpExists NodeSelectorOperator = "Exists" - NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" - NodeSelectorOpGt NodeSelectorOperator = "Gt" - NodeSelectorOpLt NodeSelectorOperator = "Lt" -) - -// Affinity is a group of affinity scheduling rules. -type Affinity struct { - // Describes node affinity scheduling rules for the pod. - // +optional - NodeAffinity *NodeAffinity - // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - // +optional - PodAffinity *PodAffinity - // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - // +optional - PodAntiAffinity *PodAntiAffinity -} - -// Pod affinity is a group of inter pod affinity scheduling rules. -type PodAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm - - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm -} - -// Pod anti affinity is a group of inter pod anti affinity scheduling rules. -type PodAntiAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm - - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm - // The scheduler will prefer to schedule pods to nodes that satisfy - // the anti-affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling anti-affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm -} - -// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) -type WeightedPodAffinityTerm struct { - // weight associated with matching the corresponding podAffinityTerm, - // in the range 1-100. - Weight int32 - // Required. A pod affinity term, associated with the corresponding weight. - PodAffinityTerm PodAffinityTerm -} - -// Defines a set of pods (namely those matching the labelSelector -// relative to the given namespace(s)) that this pod should be -// co-located (affinity) or not co-located (anti-affinity) with, -// where co-located is defined as running on a node whose value of -// the label with key matches that of any node on which -// a pod of the set of pods is running. -type PodAffinityTerm struct { - // A label query over a set of resources, in this case pods. - // +optional - LabelSelector *metav1.LabelSelector - // namespaces specifies which namespaces the labelSelector applies to (matches against); - // null or empty list means "this pod's namespace" - Namespaces []string - // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - // the labelSelector in the specified namespaces, where co-located is defined as running on a node - // whose value of the label with key topologyKey matches that of any node on which any of the - // selected pods is running. - // Empty topologyKey is not allowed. - // +optional - TopologyKey string -} - -// Node affinity is a group of node affinity scheduling rules. -type NodeAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // will try to eventually evict the pod from its node. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector - - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // may or may not try to eventually evict the pod from its node. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node matches the corresponding matchExpressions; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm -} - -// An empty preferred scheduling term matches all objects with implicit weight 0 -// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). -type PreferredSchedulingTerm struct { - // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - Weight int32 - // A node selector term, associated with the corresponding weight. - Preference NodeSelectorTerm -} - -// The node this Taint is attached to has the "effect" on -// any pod that does not tolerate the Taint. -type Taint struct { - // Required. The taint key to be applied to a node. - Key string - // Required. The taint value corresponding to the taint key. - // +optional - Value string - // Required. The effect of the taint on pods - // that do not tolerate the taint. - // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. - Effect TaintEffect - // TimeAdded represents the time at which the taint was added. - // It is only written for NoExecute taints. - // +optional - TimeAdded metav1.Time -} - -type TaintEffect string - -const ( - // Do not allow new pods to schedule onto the node unless they tolerate the taint, - // but allow all pods submitted to Kubelet without going through the scheduler - // to start, and allow all already-running pods to continue running. - // Enforced by the scheduler. - TaintEffectNoSchedule TaintEffect = "NoSchedule" - // Like TaintEffectNoSchedule, but the scheduler tries not to schedule - // new pods onto the node, rather than prohibiting new pods from scheduling - // onto the node entirely. Enforced by the scheduler. - TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to - // Kubelet without going through the scheduler to start. - // Enforced by Kubelet and the scheduler. - // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" - - // Evict any already-running pods that do not tolerate the taint. - // Currently enforced by NodeController. - TaintEffectNoExecute TaintEffect = "NoExecute" -) - -// The pod this Toleration is attached to tolerates any taint that matches -// the triple using the matching operator . -type Toleration struct { - // Key is the taint key that the toleration applies to. Empty means match all taint keys. - // If the key is empty, operator must be Exists; this combination means to match all values and all keys. - // +optional - Key string - // Operator represents a key's relationship to the value. - // Valid operators are Exists and Equal. Defaults to Equal. - // Exists is equivalent to wildcard for value, so that a pod can - // tolerate all taints of a particular category. - // +optional - Operator TolerationOperator - // Value is the taint value the toleration matches to. - // If the operator is Exists, the value should be empty, otherwise just a regular string. - // +optional - Value string - // Effect indicates the taint effect to match. Empty means match all taint effects. - // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - // +optional - Effect TaintEffect - // TolerationSeconds represents the period of time the toleration (which must be - // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - // it is not set, which means tolerate the taint forever (do not evict). Zero and - // negative values will be treated as 0 (evict immediately) by the system. - // +optional - TolerationSeconds *int64 -} - -// A toleration operator is the set of operators that can be used in a toleration. -type TolerationOperator string - -const ( - TolerationOpExists TolerationOperator = "Exists" - TolerationOpEqual TolerationOperator = "Equal" -) - -// PodSpec is a description of a pod -type PodSpec struct { - Volumes []Volume - // List of initialization containers belonging to the pod. - InitContainers []Container - // List of containers belonging to the pod. - Containers []Container - // +optional - RestartPolicy RestartPolicy - // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - // Value must be non-negative integer. The value zero indicates delete immediately. - // If this value is nil, the default grace period will be used instead. - // The grace period is the duration in seconds after the processes running in the pod are sent - // a termination signal and the time when the processes are forcibly halted with a kill signal. - // Set this value longer than the expected cleanup time for your process. - // +optional - TerminationGracePeriodSeconds *int64 - // Optional duration in seconds relative to the StartTime that the pod may be active on a node - // before the system actively tries to terminate the pod; value must be positive integer - // +optional - ActiveDeadlineSeconds *int64 - // Required: Set DNS policy. - // +optional - DNSPolicy DNSPolicy - // NodeSelector is a selector which must be true for the pod to fit on a node - // +optional - NodeSelector map[string]string - - // ServiceAccountName is the name of the ServiceAccount to use to run this pod - // The pod will be allowed to use secrets referenced by the ServiceAccount - ServiceAccountName string - // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - // +optional - AutomountServiceAccountToken *bool - - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. - // +optional - NodeName string - // SecurityContext holds pod-level security attributes and common container settings. - // Optional: Defaults to empty. See type description for default values of each field. - // +optional - SecurityContext *PodSecurityContext - // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - // If specified, these secrets will be passed to individual puller implementations for them to use. For example, - // in the case of docker, only DockerConfig type secrets are honored. - // +optional - ImagePullSecrets []LocalObjectReference - // Specifies the hostname of the Pod. - // If not specified, the pod's hostname will be set to a system-defined value. - // +optional - Hostname string - // If specified, the fully qualified Pod hostname will be "...svc.". - // If not specified, the pod will not have a domainname at all. - // +optional - Subdomain string - // If specified, the pod's scheduling constraints - // +optional - Affinity *Affinity - // If specified, the pod will be dispatched by specified scheduler. - // If not specified, the pod will be dispatched by default scheduler. - // +optional - SchedulerName string - // If specified, the pod's tolerations. - // +optional - Tolerations []Toleration - // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - // file if specified. This is only valid for non-hostNetwork pods. - // +optional - HostAliases []HostAlias - // If specified, indicates the pod's priority. "SYSTEM" is a special keyword - // which indicates the highest priority. Any other name must be defined by - // creating a PriorityClass object with that name. - // If not specified, the pod priority will be default or zero if there is no - // default. - // +optional - PriorityClassName string - // The priority value. Various system components use this field to find the - // priority of the pod. When Priority Admission Controller is enabled, it - // prevents users from setting this field. The admission controller populates - // this field from PriorityClassName. - // The higher the value, the higher the priority. - // +optional - Priority *int32 -} - -// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the -// pod's hosts file. -type HostAlias struct { - IP string - Hostnames []string -} - -// Sysctl defines a kernel parameter to be set -type Sysctl struct { - // Name of a property to set - Name string - // Value of a property to set - Value string -} - -// PodSecurityContext holds pod-level security attributes and common container settings. -// Some fields are also present in container.securityContext. Field values of -// container.securityContext take precedence over field values of PodSecurityContext. -type PodSecurityContext struct { - // Use the host's network namespace. If this option is set, the ports that will be - // used must be specified. - // Optional: Default to false - // +k8s:conversion-gen=false - // +optional - HostNetwork bool - // Use the host's pid namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - HostPID bool - // Use the host's ipc namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - HostIPC bool - // The SELinux context to be applied to all containers. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in SecurityContext. If set in - // both SecurityContext and PodSecurityContext, the value specified in SecurityContext - // takes precedence for that container. - // +optional - SELinuxOptions *SELinuxOptions - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence - // for that container. - // +optional - RunAsUser *int64 - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsNonRoot *bool - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID. If unspecified, no groups will be added to - // any container. - // +optional - SupplementalGroups []int64 - // A special supplemental group that applies to all containers in a pod. - // Some volume types allow the Kubelet to change the ownership of that volume - // to be owned by the pod: - // - // 1. The owning GID will be the FSGroup - // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - // 3. The permission bits are OR'd with rw-rw---- - // - // If unset, the Kubelet will not modify the ownership and permissions of any volume. - // +optional - FSGroup *int64 -} - -// PodQOSClass defines the supported qos classes of Pods. -type PodQOSClass string - -const ( - // PodQOSGuaranteed is the Guaranteed qos class. - PodQOSGuaranteed PodQOSClass = "Guaranteed" - // PodQOSBurstable is the Burstable qos class. - PodQOSBurstable PodQOSClass = "Burstable" - // PodQOSBestEffort is the BestEffort qos class. - PodQOSBestEffort PodQOSClass = "BestEffort" -) - -// PodStatus represents information about the status of a pod. Status may trail the actual -// state of a system. -type PodStatus struct { - // +optional - Phase PodPhase - // +optional - Conditions []PodCondition - // A human readable message indicating details about why the pod is in this state. - // +optional - Message string - // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted' - // +optional - Reason string - - // +optional - HostIP string - // +optional - PodIP string - - // Date and time at which the object was acknowledged by the Kubelet. - // This is before the Kubelet pulled the container image(s) for the pod. - // +optional - StartTime *metav1.Time - // +optional - QOSClass PodQOSClass - - // The list has one entry per init container in the manifest. The most recent successful - // init container will have ready = true, the most recently started container will have - // startTime set. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status - InitContainerStatuses []ContainerStatus - // The list has one entry per container in the manifest. Each entry is - // currently the output of `docker inspect`. This output format is *not* - // final and should not be relied upon. - // TODO: Make real decisions about what our info should look like. Re-enable fuzz test - // when we have done this. - // +optional - ContainerStatuses []ContainerStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded -type PodStatusResult struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - // Status represents the current information about a pod. This data may not be up - // to date. - // +optional - Status PodStatus -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Pod is a collection of containers, used as either input (create, update) or as output (list, get). -type Pod struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a pod. - // +optional - Spec PodSpec - - // Status represents the current information about a pod. This data may not be up - // to date. - // +optional - Status PodStatus -} - -// PodTemplateSpec describes the data a pod should have when created from a template -type PodTemplateSpec struct { - // Metadata of the pods created from this template. - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a pod. - // +optional - Spec PodSpec -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodTemplate describes a template for creating copies of a predefined pod. -type PodTemplate struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Template defines the pods that will be created from this pod template - // +optional - Template PodTemplateSpec -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodTemplateList is a list of PodTemplates. -type PodTemplateList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []PodTemplate -} - -// ReplicationControllerSpec is the specification of a replication controller. -// As the internal representation of a replication controller, it may have either -// a TemplateRef or a Template set. -type ReplicationControllerSpec struct { - // Replicas is the number of desired replicas. - Replicas int32 - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 - - // Selector is a label query over pods that should match the Replicas count. - Selector map[string]string - - // TemplateRef is a reference to an object that describes the pod that will be created if - // insufficient replicas are detected. This reference is ignored if a Template is set. - // Must be set before converting to a versioned API object - // +optional - //TemplateRef *ObjectReference - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. Internally, this takes precedence over a - // TemplateRef. - // +optional - Template *PodTemplateSpec -} - -// ReplicationControllerStatus represents the current status of a replication -// controller. -type ReplicationControllerStatus struct { - // Replicas is the number of actual replicas. - Replicas int32 - - // The number of pods that have labels matching the labels of the pod template of the replication controller. - // +optional - FullyLabeledReplicas int32 - - // The number of ready replicas for this replication controller. - // +optional - ReadyReplicas int32 - - // The number of available replicas (ready for at least minReadySeconds) for this replication controller. - // +optional - AvailableReplicas int32 - - // ObservedGeneration is the most recent generation observed by the controller. - // +optional - ObservedGeneration int64 - - // Represents the latest available observations of a replication controller's current state. - // +optional - Conditions []ReplicationControllerCondition -} - -type ReplicationControllerConditionType string - -// These are valid conditions of a replication controller. -const ( - // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods - // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, - // etc. or deleted due to kubelet being down or finalizers are failing. - ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" -) - -// ReplicationControllerCondition describes the state of a replication controller at a certain point. -type ReplicationControllerCondition struct { - // Type of replication controller condition. - Type ReplicationControllerConditionType - // Status of the condition, one of True, False, Unknown. - Status ConditionStatus - // The last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time - // The reason for the condition's last transition. - // +optional - Reason string - // A human readable message indicating details about the transition. - // +optional - Message string -} - -// +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/extensions.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/extensions.Scale,result=k8s.io/kubernetes/pkg/apis/extensions.Scale -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicationController represents the configuration of a replication controller. -type ReplicationController struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the desired behavior of this replication controller. - // +optional - Spec ReplicationControllerSpec - - // Status is the current status of this replication controller. This data may be - // out of date by some window of time. - // +optional - Status ReplicationControllerStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicationControllerList is a collection of replication controllers. -type ReplicationControllerList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ReplicationController -} - -const ( - // ClusterIPNone - do not assign a cluster IP - // no proxying required and no environment variables should be created for pods - ClusterIPNone = "None" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceList holds a list of services. -type ServiceList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Service -} - -// Session Affinity Type string -type ServiceAffinity string - -const ( - // ServiceAffinityClientIP is the Client IP based. - ServiceAffinityClientIP ServiceAffinity = "ClientIP" - - // ServiceAffinityNone - no session affinity. - ServiceAffinityNone ServiceAffinity = "None" -) - -const ( - // DefaultClientIPServiceAffinitySeconds is the default timeout seconds - // of Client IP based session affinity - 3 hours. - DefaultClientIPServiceAffinitySeconds int32 = 10800 - // MaxClientIPServiceAffinitySeconds is the max timeout seconds - // of Client IP based session affinity - 1 day. - MaxClientIPServiceAffinitySeconds int32 = 86400 -) - -// SessionAffinityConfig represents the configurations of session affinity. -type SessionAffinityConfig struct { - // clientIP contains the configurations of Client IP based session affinity. - // +optional - ClientIP *ClientIPConfig -} - -// ClientIPConfig represents the configurations of Client IP based session affinity. -type ClientIPConfig struct { - // timeoutSeconds specifies the seconds of ClientIP type session sticky time. - // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". - // Default value is 10800(for 3 hours). - // +optional - TimeoutSeconds *int32 -} - -// Service Type string describes ingress methods for a service -type ServiceType string - -const ( - // ServiceTypeClusterIP means a service will only be accessible inside the - // cluster, via the ClusterIP. - ServiceTypeClusterIP ServiceType = "ClusterIP" - - // ServiceTypeNodePort means a service will be exposed on one port of - // every node, in addition to 'ClusterIP' type. - ServiceTypeNodePort ServiceType = "NodePort" - - // ServiceTypeLoadBalancer means a service will be exposed via an - // external load balancer (if the cloud provider supports it), in addition - // to 'NodePort' type. - ServiceTypeLoadBalancer ServiceType = "LoadBalancer" - - // ServiceTypeExternalName means a service consists of only a reference to - // an external name that kubedns or equivalent will return as a CNAME - // record, with no exposing or proxying of any pods involved. - ServiceTypeExternalName ServiceType = "ExternalName" -) - -// Service External Traffic Policy Type string -type ServiceExternalTrafficPolicyType string - -const ( - // ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior. - ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" - // ServiceExternalTrafficPolicyTypeCluster specifies cluster-wide (legacy) behavior. - ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" -) - -// ServiceStatus represents the current status of a service -type ServiceStatus struct { - // LoadBalancer contains the current status of the load-balancer, - // if one is present. - // +optional - LoadBalancer LoadBalancerStatus -} - -// LoadBalancerStatus represents the status of a load-balancer -type LoadBalancerStatus struct { - // Ingress is a list containing ingress points for the load-balancer; - // traffic intended for the service should be sent to these ingress points. - // +optional - Ingress []LoadBalancerIngress -} - -// LoadBalancerIngress represents the status of a load-balancer ingress point: -// traffic intended for the service should be sent to an ingress point. -type LoadBalancerIngress struct { - // IP is set for load-balancer ingress points that are IP based - // (typically GCE or OpenStack load-balancers) - // +optional - IP string - - // Hostname is set for load-balancer ingress points that are DNS based - // (typically AWS load-balancers) - // +optional - Hostname string -} - -// ServiceSpec describes the attributes that a user creates on a service -type ServiceSpec struct { - // Type determines how the Service is exposed. Defaults to ClusterIP. Valid - // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - // "ExternalName" maps to the specified externalName. - // "ClusterIP" allocates a cluster-internal IP address for load-balancing to - // endpoints. Endpoints are determined by the selector or if that is not - // specified, by manual construction of an Endpoints object. If clusterIP is - // "None", no virtual IP is allocated and the endpoints are published as a - // set of endpoints rather than a stable IP. - // "NodePort" builds on ClusterIP and allocates a port on every node which - // routes to the clusterIP. - // "LoadBalancer" builds on NodePort and creates an - // external load-balancer (if supported in the current cloud) which routes - // to the clusterIP. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/ - // +optional - Type ServiceType - - // Required: The list of ports that are exposed by this service. - Ports []ServicePort - - // Route service traffic to pods with label keys and values matching this - // selector. If empty or not present, the service is assumed to have an - // external process managing its endpoints, which Kubernetes will not - // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. - // Ignored if type is ExternalName. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/ - Selector map[string]string - - // ClusterIP is the IP address of the service and is usually assigned - // randomly by the master. If an address is specified manually and is not in - // use by others, it will be allocated to the service; otherwise, creation - // of the service will fail. This field can not be changed through updates. - // Valid values are "None", empty string (""), or a valid IP address. "None" - // can be specified for headless services when proxying is not required. - // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if - // type is ExternalName. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - // +optional - ClusterIP string - - // ExternalName is the external reference that kubedns or equivalent will - // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid DNS name and requires Type to be ExternalName. - ExternalName string - - // ExternalIPs are used by external load balancers, or can be set by - // users to handle external traffic that arrives at a node. - // +optional - ExternalIPs []string - - // Only applies to Service Type: LoadBalancer - // LoadBalancer will get created with the IP specified in this field. - // This feature depends on whether the underlying cloud-provider supports specifying - // the loadBalancerIP when a load balancer is created. - // This field will be ignored if the cloud-provider does not support the feature. - // +optional - LoadBalancerIP string - - // Optional: Supports "ClientIP" and "None". Used to maintain session affinity. - // +optional - SessionAffinity ServiceAffinity - - // sessionAffinityConfig contains the configurations of session affinity. - // +optional - SessionAffinityConfig *SessionAffinityConfig - - // Optional: If specified and supported by the platform, this will restrict traffic through the cloud-provider - // load-balancer will be restricted to the specified client IPs. This field will be ignored if the - // cloud-provider does not support the feature." - // +optional - LoadBalancerSourceRanges []string - - // externalTrafficPolicy denotes if this Service desires to route external - // traffic to node-local or cluster-wide endpoints. "Local" preserves the - // client source IP and avoids a second hop for LoadBalancer and Nodeport - // type services, but risks potentially imbalanced traffic spreading. - // "Cluster" obscures the client source IP and may cause a second hop to - // another node, but should have good overall load-spreading. - // +optional - ExternalTrafficPolicy ServiceExternalTrafficPolicyType - - // healthCheckNodePort specifies the healthcheck nodePort for the service. - // If not specified, HealthCheckNodePort is created by the service api - // backend with the allocated nodePort. Will use user-specified nodePort value - // if specified by the client. Only effects when Type is set to LoadBalancer - // and ExternalTrafficPolicy is set to Local. - // +optional - HealthCheckNodePort int32 - - // publishNotReadyAddresses, when set to true, indicates that DNS implementations - // must publish the notReadyAddresses of subsets for the Endpoints associated with - // the Service. The default value is false. - // The primary use case for setting this field is to use a StatefulSet's Headless Service - // to propagate SRV records for its Pods without respect to their readiness for purpose - // of peer discovery. - // This field will replace the service.alpha.kubernetes.io/tolerate-unready-endpoints - // when that annotation is deprecated and all clients have been converted to use this - // field. - // +optional - PublishNotReadyAddresses bool -} - -type ServicePort struct { - // Optional if only one ServicePort is defined on this service: The - // name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. - Name string - - // The IP protocol for this port. Supports "TCP" and "UDP". - Protocol Protocol - - // The port that will be exposed on the service. - Port int32 - - // Optional: The target port on pods selected by this service. If this - // is a string, it will be looked up as a named port in the target - // Pod's container ports. If this is not specified, the value - // of the 'port' field is used (an identity map). - // This field is ignored for services with clusterIP=None, and should be - // omitted or set equal to the 'port' field. - TargetPort intstr.IntOrString - - // The port on each node on which this service is exposed. - // Default is to auto-allocate a port if the ServiceType of this Service requires one. - NodePort int32 -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Service is a named abstraction of software service (for example, mysql) consisting of local port -// (for example 3306) that the proxy listens on, and the selector that determines which pods -// will answer requests sent through the proxy. -type Service struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a service. - // +optional - Spec ServiceSpec - - // Status represents the current status of a service. - // +optional - Status ServiceStatus -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceAccount binds together: -// * a name, understood by users, and perhaps by peripheral systems, for an identity -// * a principal that can be authenticated and authorized -// * a set of secrets -type ServiceAccount struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount - Secrets []ObjectReference - - // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images - // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets - // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // +optional - ImagePullSecrets []LocalObjectReference - - // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. - // Can be overridden at the pod level. - // +optional - AutomountServiceAccountToken *bool -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceAccountList is a list of ServiceAccount objects -type ServiceAccountList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ServiceAccount -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Endpoints is a collection of endpoints that implement the actual service. Example: -// Name: "mysvc", -// Subsets: [ -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// }, -// { -// Addresses: [{"ip": "10.10.3.3"}], -// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] -// }, -// ] -type Endpoints struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // The set of all endpoints is the union of all subsets. - Subsets []EndpointSubset -} - -// EndpointSubset is a group of addresses with a common set of ports. The -// expanded set of endpoints is the Cartesian product of Addresses x Ports. -// For example, given: -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// } -// The resulting set of endpoints can be viewed as: -// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], -// b: [ 10.10.1.1:309, 10.10.2.2:309 ] -type EndpointSubset struct { - Addresses []EndpointAddress - NotReadyAddresses []EndpointAddress - Ports []EndpointPort -} - -// EndpointAddress is a tuple that describes single IP address. -type EndpointAddress struct { - // The IP of this endpoint. - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, see #4447. - IP string - // Optional: Hostname of this endpoint - // Meant to be used by DNS servers etc. - // +optional - Hostname string - // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. - // +optional - NodeName *string - // Optional: The kubernetes object related to the entry point. - TargetRef *ObjectReference -} - -// EndpointPort is a tuple that describes a single port. -type EndpointPort struct { - // The name of this port (corresponds to ServicePort.Name). Optional - // if only one port is defined. Must be a DNS_LABEL. - Name string - - // The port number. - Port int32 - - // The IP protocol for this port. - Protocol Protocol -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// EndpointsList is a list of endpoints. -type EndpointsList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Endpoints -} - -// NodeSpec describes the attributes that a node is created with. -type NodeSpec struct { - // PodCIDR represents the pod IP range assigned to the node - // Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs. - // +optional - PodCIDR string - - // External ID of the node assigned by some machine database (e.g. a cloud provider) - // +optional - ExternalID string - - // ID of the node assigned by the cloud provider - // Note: format is "://" - // +optional - ProviderID string - - // Unschedulable controls node schedulability of new pods. By default node is schedulable. - // +optional - Unschedulable bool - - // If specified, the node's taints. - // +optional - Taints []Taint - - // If specified, the source to get node configuration from - // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field - // +optional - ConfigSource *NodeConfigSource -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeConfigSource specifies a source of node configuration. Exactly one subfield must be non-nil. -type NodeConfigSource struct { - metav1.TypeMeta - ConfigMapRef *ObjectReference -} - -// DaemonEndpoint contains information about a single Daemon endpoint. -type DaemonEndpoint struct { - /* - The port tag was not properly in quotes in earlier releases, so it must be - uppercased for backwards compat (since it was falling back to var name of - 'Port'). - */ - - // Port number of the given endpoint. - Port int32 -} - -// NodeDaemonEndpoints lists ports opened by daemons running on the Node. -type NodeDaemonEndpoints struct { - // Endpoint on which Kubelet is listening. - // +optional - KubeletEndpoint DaemonEndpoint -} - -// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. -type NodeSystemInfo struct { - // MachineID reported by the node. For unique machine identification - // in the cluster this field is preferred. Learn more from man(5) - // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html - MachineID string - // SystemUUID reported by the node. For unique machine identification - // MachineID is preferred. This field is specific to Red Hat hosts - // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html - SystemUUID string - // Boot ID reported by the node. - BootID string - // Kernel Version reported by the node. - KernelVersion string - // OS Image reported by the node. - OSImage string - // ContainerRuntime Version reported by the node. - ContainerRuntimeVersion string - // Kubelet Version reported by the node. - KubeletVersion string - // KubeProxy Version reported by the node. - KubeProxyVersion string - // The Operating System reported by the node - OperatingSystem string - // The Architecture reported by the node - Architecture string -} - -// NodeStatus is information about the current status of a node. -type NodeStatus struct { - // Capacity represents the total resources of a node. - // +optional - Capacity ResourceList - // Allocatable represents the resources of a node that are available for scheduling. - // +optional - Allocatable ResourceList - // NodePhase is the current lifecycle phase of the node. - // +optional - Phase NodePhase - // Conditions is an array of current node conditions. - // +optional - Conditions []NodeCondition - // Queried from cloud provider, if available. - // +optional - Addresses []NodeAddress - // Endpoints of daemons running on the Node. - // +optional - DaemonEndpoints NodeDaemonEndpoints - // Set of ids/uuids to uniquely identify the node. - // +optional - NodeInfo NodeSystemInfo - // List of container images on this node - // +optional - Images []ContainerImage - // List of attachable volumes in use (mounted) by the node. - // +optional - VolumesInUse []UniqueVolumeName - // List of volumes that are attached to the node. - // +optional - VolumesAttached []AttachedVolume -} - -type UniqueVolumeName string - -// AttachedVolume describes a volume attached to a node -type AttachedVolume struct { - // Name of the attached volume - Name UniqueVolumeName - - // DevicePath represents the device path where the volume should be available - DevicePath string -} - -// AvoidPods describes pods that should avoid this node. This is the value for a -// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and -// will eventually become a field of NodeStatus. -type AvoidPods struct { - // Bounded-sized list of signatures of pods that should avoid this node, sorted - // in timestamp order from oldest to newest. Size of the slice is unspecified. - // +optional - PreferAvoidPods []PreferAvoidPodsEntry -} - -// Describes a class of pods that should avoid this node. -type PreferAvoidPodsEntry struct { - // The class of pods. - PodSignature PodSignature - // Time at which this entry was added to the list. - // +optional - EvictionTime metav1.Time - // (brief) reason why this entry was added to the list. - // +optional - Reason string - // Human readable message indicating why this entry was added to the list. - // +optional - Message string -} - -// Describes the class of pods that should avoid this node. -// Exactly one field should be set. -type PodSignature struct { - // Reference to controller whose pods should avoid this node. - // +optional - PodController *metav1.OwnerReference -} - -// Describe a container image -type ContainerImage struct { - // Names by which this image is known. - Names []string - // The size of the image in bytes. - // +optional - SizeBytes int64 -} - -type NodePhase string - -// These are the valid phases of node. -const ( - // NodePending means the node has been created/added by the system, but not configured. - NodePending NodePhase = "Pending" - // NodeRunning means the node has been configured and has Kubernetes components running. - NodeRunning NodePhase = "Running" - // NodeTerminated means the node has been removed from the cluster. - NodeTerminated NodePhase = "Terminated" -) - -type NodeConditionType string - -// These are valid conditions of node. Currently, we don't have enough information to decide -// node condition. In the future, we will add more. The proposed set of conditions are: -// NodeReady, NodeReachable -const ( - // NodeReady means kubelet is healthy and ready to accept pods. - NodeReady NodeConditionType = "Ready" - // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk - // space on the node. - NodeOutOfDisk NodeConditionType = "OutOfDisk" - // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. - NodeMemoryPressure NodeConditionType = "MemoryPressure" - // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. - NodeDiskPressure NodeConditionType = "DiskPressure" - // NodeNetworkUnavailable means that network for the node is not correctly configured. - NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" - // NodeConfigOK indicates whether the kubelet is correctly configured - NodeConfigOK NodeConditionType = "ConfigOK" -) - -type NodeCondition struct { - Type NodeConditionType - Status ConditionStatus - // +optional - LastHeartbeatTime metav1.Time - // +optional - LastTransitionTime metav1.Time - // +optional - Reason string - // +optional - Message string -} - -type NodeAddressType string - -const ( - NodeHostName NodeAddressType = "Hostname" - NodeExternalIP NodeAddressType = "ExternalIP" - NodeInternalIP NodeAddressType = "InternalIP" - NodeExternalDNS NodeAddressType = "ExternalDNS" - NodeInternalDNS NodeAddressType = "InternalDNS" -) - -type NodeAddress struct { - Type NodeAddressType - Address string -} - -// NodeResources is an object for conveying resource information about a node. -// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. -type NodeResources struct { - // Capacity represents the available resources of a node - // +optional - Capacity ResourceList -} - -// ResourceName is the name identifying various resources in a ResourceList. -type ResourceName string - -// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, -// with the -, _, and . characters allowed anywhere, except the first or last character. -// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than -// camel case, separating compound words. -// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. -const ( - // CPU, in cores. (500m = .5 cores) - ResourceCPU ResourceName = "cpu" - // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceMemory ResourceName = "memory" - // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) - ResourceStorage ResourceName = "storage" - // Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - // The resource name for ResourceEphemeralStorage is alpha and it can change across releases. - ResourceEphemeralStorage ResourceName = "ephemeral-storage" - // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned. - ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu" -) - -const ( - // Namespace prefix for opaque counted resources (alpha). - ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-" - // Default namespace prefix. - ResourceDefaultNamespacePrefix = "kubernetes.io/" - // Name prefix for huge page resources (alpha). - ResourceHugePagesPrefix = "hugepages-" -) - -// ResourceList is a set of (resource name, quantity) pairs. -type ResourceList map[ResourceName]resource.Quantity - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Node is a worker node in Kubernetes -// The name of the node according to etcd is in ObjectMeta.Name. -type Node struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a node. - // +optional - Spec NodeSpec - - // Status describes the current status of a Node - // +optional - Status NodeStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeList is a list of nodes. -type NodeList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Node -} - -// NamespaceSpec describes the attributes on a Namespace -type NamespaceSpec struct { - // Finalizers is an opaque list of values that must be empty to permanently remove object from storage - Finalizers []FinalizerName -} - -// FinalizerName is the name identifying a finalizer during namespace lifecycle. -type FinalizerName string - -// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or -// in metav1. -const ( - FinalizerKubernetes FinalizerName = "kubernetes" -) - -// NamespaceStatus is information about the current status of a Namespace. -type NamespaceStatus struct { - // Phase is the current lifecycle phase of the namespace. - // +optional - Phase NamespacePhase -} - -type NamespacePhase string - -// These are the valid phases of a namespace. -const ( - // NamespaceActive means the namespace is available for use in the system - NamespaceActive NamespacePhase = "Active" - // NamespaceTerminating means the namespace is undergoing graceful termination - NamespaceTerminating NamespacePhase = "Terminating" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// A namespace provides a scope for Names. -// Use of multiple namespaces is optional -type Namespace struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of the Namespace. - // +optional - Spec NamespaceSpec - - // Status describes the current status of a Namespace - // +optional - Status NamespaceStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NamespaceList is a list of Namespaces. -type NamespaceList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Namespace -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. -// Deprecated in 1.7, please use the bindings subresource of pods instead. -type Binding struct { - metav1.TypeMeta - // ObjectMeta describes the object that is being bound. - // +optional - metav1.ObjectMeta - - // Target is the object to bind to. - Target ObjectReference -} - -// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. -type Preconditions struct { - // Specifies the target UID. - // +optional - UID *types.UID -} - -// DeletionPropagation decides whether and how garbage collection will be performed. -type DeletionPropagation string - -const ( - // Orphans the dependents. - DeletePropagationOrphan DeletionPropagation = "Orphan" - // Deletes the object from the key-value store, the garbage collector will delete the dependents in the background. - DeletePropagationBackground DeletionPropagation = "Background" - // The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store. - // API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp. - // This policy is cascading, i.e., the dependents will be deleted with Foreground. - DeletePropagationForeground DeletionPropagation = "Foreground" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DeleteOptions may be provided when deleting an API object -// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. -type DeleteOptions struct { - metav1.TypeMeta - - // Optional duration in seconds before the object should be deleted. Value must be non-negative integer. - // The value zero indicates delete immediately. If this value is nil, the default grace period for the - // specified type will be used. - // +optional - GracePeriodSeconds *int64 - - // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be - // returned. - // +optional - Preconditions *Preconditions - - // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. - // Should the dependent objects be orphaned. If true/false, the "orphan" - // finalizer will be added to/removed from the object's finalizers list. - // Either this field or PropagationPolicy may be set, but not both. - // +optional - OrphanDependents *bool - - // Whether and how garbage collection will be performed. - // Either this field or OrphanDependents may be set, but not both. - // The default policy is decided by the existing finalizer set in the - // metadata.finalizers and the resource-specific default policy. - // +optional - PropagationPolicy *DeletionPropagation -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ListOptions is the query options to a standard REST list call, and has future support for -// watch calls. -// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. -type ListOptions struct { - metav1.TypeMeta - - // A selector based on labels - LabelSelector labels.Selector - // A selector based on fields - FieldSelector fields.Selector - - // If true, partially initialized resources are included in the response. - IncludeUninitialized bool - - // If true, watch for changes to this list - Watch bool - // When specified with a watch call, shows changes that occur after that particular version of a resource. - // Defaults to changes from the beginning of history. - // When specified for list: - // - if unset, then the result is returned from remote storage based on quorum-read flag; - // - if it's 0, then we simply return what we currently have in cache, no guarantee; - // - if set to non zero, then the result is at least as fresh as given rv. - ResourceVersion string - // Timeout for the list/watch call. - TimeoutSeconds *int64 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodLogOptions is the query options for a Pod's logs REST call -type PodLogOptions struct { - metav1.TypeMeta - - // Container for which to return logs - Container string - // If true, follow the logs for the pod - Follow bool - // If true, return previous terminated container logs - Previous bool - // A relative time in seconds before the current time from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - SinceSeconds *int64 - // An RFC3339 timestamp from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - SinceTime *metav1.Time - // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line - // of log output. - Timestamps bool - // If set, the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime - TailLines *int64 - // If set, the number of bytes to read from the server before terminating the - // log output. This may not display a complete final line of logging, and may return - // slightly more or slightly less than the specified limit. - LimitBytes *int64 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodAttachOptions is the query options to a Pod's remote attach call -// TODO: merge w/ PodExecOptions below for stdin, stdout, etc -type PodAttachOptions struct { - metav1.TypeMeta - - // Stdin if true indicates that stdin is to be redirected for the attach call - // +optional - Stdin bool - - // Stdout if true indicates that stdout is to be redirected for the attach call - // +optional - Stdout bool - - // Stderr if true indicates that stderr is to be redirected for the attach call - // +optional - Stderr bool - - // TTY if true indicates that a tty will be allocated for the attach call - // +optional - TTY bool - - // Container to attach to. - // +optional - Container string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodExecOptions is the query options to a Pod's remote exec call -type PodExecOptions struct { - metav1.TypeMeta - - // Stdin if true indicates that stdin is to be redirected for the exec call - Stdin bool - - // Stdout if true indicates that stdout is to be redirected for the exec call - Stdout bool - - // Stderr if true indicates that stderr is to be redirected for the exec call - Stderr bool - - // TTY if true indicates that a tty will be allocated for the exec call - TTY bool - - // Container in which to execute the command. - Container string - - // Command is the remote command to execute; argv array; not executed within a shell. - Command []string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodPortForwardOptions is the query options to a Pod's port forward call -type PodPortForwardOptions struct { - metav1.TypeMeta - - // The list of ports to forward - // +optional - Ports []int32 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodProxyOptions is the query options to a Pod's proxy call -type PodProxyOptions struct { - metav1.TypeMeta - - // Path is the URL path to use for the current proxy request - Path string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeProxyOptions is the query options to a Node's proxy call -type NodeProxyOptions struct { - metav1.TypeMeta - - // Path is the URL path to use for the current proxy request - Path string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceProxyOptions is the query options to a Service's proxy call. -type ServiceProxyOptions struct { - metav1.TypeMeta - - // Path is the part of URLs that include service endpoints, suffixes, - // and parameters to use for the current proxy request to service. - // For example, the whole request URL is - // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. - // Path is _search?q=user:kimchy. - Path string -} - -// ObjectReference contains enough information to let you inspect or modify the referred object. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type ObjectReference struct { - // +optional - Kind string - // +optional - Namespace string - // +optional - Name string - // +optional - UID types.UID - // +optional - APIVersion string - // +optional - ResourceVersion string - - // Optional. If referring to a piece of an object instead of an entire object, this string - // should contain information to identify the sub-object. For example, if the object - // reference is to a container within a pod, this would take on a value like: - // "spec.containers{name}" (where "name" refers to the name of the container that triggered - // the event) or if no container name is specified "spec.containers[2]" (container with - // index 2 in this pod). This syntax is chosen only to have some well-defined way of - // referencing a part of an object. - // TODO: this design is not final and this field is subject to change in the future. - // +optional - FieldPath string -} - -// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. -type LocalObjectReference struct { - //TODO: Add other useful fields. apiVersion, kind, uid? - Name string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type SerializedReference struct { - metav1.TypeMeta - // +optional - Reference ObjectReference -} - -type EventSource struct { - // Component from which the event is generated. - // +optional - Component string - // Node name on which the event is generated. - // +optional - Host string -} - -// Valid values for event types (new types could be added in future) -const ( - // Information only and will not cause any problems - EventTypeNormal string = "Normal" - // These events are to warn that something might go wrong - EventTypeWarning string = "Warning" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Event is a report of an event somewhere in the cluster. -// TODO: Decide whether to store these separately or with the object they apply to. -type Event struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Required. The object that this event is about. - // +optional - InvolvedObject ObjectReference - - // Optional; this should be a short, machine understandable string that gives the reason - // for this event being generated. For example, if the event is reporting that a container - // can't start, the Reason might be "ImageNotFound". - // TODO: provide exact specification for format. - // +optional - Reason string - - // Optional. A human-readable description of the status of this operation. - // TODO: decide on maximum length. - // +optional - Message string - - // Optional. The component reporting this event. Should be a short machine understandable string. - // +optional - Source EventSource - - // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) - // +optional - FirstTimestamp metav1.Time - - // The time at which the most recent occurrence of this event was recorded. - // +optional - LastTimestamp metav1.Time - - // The number of times this event has occurred. - // +optional - Count int32 - - // Type of this event (Normal, Warning), new types could be added in the future. - // +optional - Type string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// EventList is a list of events. -type EventList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Event -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// List holds a list of objects, which may not be known by the server. -type List metainternalversion.List - -// A type of object that is limited -type LimitType string - -const ( - // Limit that applies to all pods in a namespace - LimitTypePod LimitType = "Pod" - // Limit that applies to all containers in a namespace - LimitTypeContainer LimitType = "Container" - // Limit that applies to all persistent volume claims in a namespace - LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" -) - -// LimitRangeItem defines a min/max usage limit for any resource that matches on kind -type LimitRangeItem struct { - // Type of resource that this limit applies to - // +optional - Type LimitType - // Max usage constraints on this kind by resource name - // +optional - Max ResourceList - // Min usage constraints on this kind by resource name - // +optional - Min ResourceList - // Default resource requirement limit value by resource name. - // +optional - Default ResourceList - // DefaultRequest resource requirement request value by resource name. - // +optional - DefaultRequest ResourceList - // MaxLimitRequestRatio represents the max burst value for the named resource - // +optional - MaxLimitRequestRatio ResourceList -} - -// LimitRangeSpec defines a min/max usage limit for resources that match on kind -type LimitRangeSpec struct { - // Limits is the list of LimitRangeItem objects that are enforced - Limits []LimitRangeItem -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LimitRange sets resource usage limits for each kind of resource in a Namespace -type LimitRange struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the limits enforced - // +optional - Spec LimitRangeSpec -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LimitRangeList is a list of LimitRange items. -type LimitRangeList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is a list of LimitRange objects - Items []LimitRange -} - -// The following identify resource constants for Kubernetes object types -const ( - // Pods, number - ResourcePods ResourceName = "pods" - // Services, number - ResourceServices ResourceName = "services" - // ReplicationControllers, number - ResourceReplicationControllers ResourceName = "replicationcontrollers" - // ResourceQuotas, number - ResourceQuotas ResourceName = "resourcequotas" - // ResourceSecrets, number - ResourceSecrets ResourceName = "secrets" - // ResourceConfigMaps, number - ResourceConfigMaps ResourceName = "configmaps" - // ResourcePersistentVolumeClaims, number - ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" - // ResourceServicesNodePorts, number - ResourceServicesNodePorts ResourceName = "services.nodeports" - // ResourceServicesLoadBalancers, number - ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" - // CPU request, in cores. (500m = .5 cores) - ResourceRequestsCPU ResourceName = "requests.cpu" - // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceRequestsMemory ResourceName = "requests.memory" - // Storage request, in bytes - ResourceRequestsStorage ResourceName = "requests.storage" - // Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage" - // CPU limit, in cores. (500m = .5 cores) - ResourceLimitsCPU ResourceName = "limits.cpu" - // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceLimitsMemory ResourceName = "limits.memory" - // Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" -) - -// A ResourceQuotaScope defines a filter that must match each object tracked by a quota -type ResourceQuotaScope string - -const ( - // Match all pod objects where spec.activeDeadlineSeconds - ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" - // Match all pod objects where !spec.activeDeadlineSeconds - ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" - // Match all pod objects that have best effort quality of service - ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" - // Match all pod objects that do not have best effort quality of service - ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" -) - -// ResourceQuotaSpec defines the desired hard limits to enforce for Quota -type ResourceQuotaSpec struct { - // Hard is the set of desired hard limits for each named resource - // +optional - Hard ResourceList - // A collection of filters that must match each object tracked by a quota. - // If not specified, the quota matches all objects. - // +optional - Scopes []ResourceQuotaScope -} - -// ResourceQuotaStatus defines the enforced hard limits and observed use -type ResourceQuotaStatus struct { - // Hard is the set of enforced hard limits for each named resource - // +optional - Hard ResourceList - // Used is the current observed total usage of the resource in the namespace - // +optional - Used ResourceList -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ResourceQuota sets aggregate quota restrictions enforced per namespace -type ResourceQuota struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the desired quota - // +optional - Spec ResourceQuotaSpec - - // Status defines the actual enforced quota and its current usage - // +optional - Status ResourceQuotaStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ResourceQuotaList is a list of ResourceQuota items -type ResourceQuotaList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is a list of ResourceQuota objects - Items []ResourceQuota -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Secret holds secret data of a certain type. The total bytes of the values in -// the Data field must be less than MaxSecretSize bytes. -type Secret struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Data contains the secret data. Each key must consist of alphanumeric - // characters, '-', '_' or '.'. The serialized form of the secret data is a - // base64 encoded string, representing the arbitrary (possibly non-string) - // data value here. - // +optional - Data map[string][]byte - - // Used to facilitate programmatic handling of secret data. - // +optional - Type SecretType -} - -const MaxSecretSize = 1 * 1024 * 1024 - -type SecretType string - -const ( - // SecretTypeOpaque is the default; arbitrary user-defined data - SecretTypeOpaque SecretType = "Opaque" - - // SecretTypeServiceAccountToken contains a token that identifies a service account to the API - // - // Required fields: - // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies - // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies - // - Secret.Data["token"] - a token that identifies the service account to the API - SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" - - // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets - ServiceAccountNameKey = "kubernetes.io/service-account.name" - // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets - ServiceAccountUIDKey = "kubernetes.io/service-account.uid" - // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets - ServiceAccountTokenKey = "token" - // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets - ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" - // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets - ServiceAccountRootCAKey = "ca.crt" - // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls - ServiceAccountNamespaceKey = "namespace" - - // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg - // - // Required fields: - // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file - SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" - - // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets - DockerConfigKey = ".dockercfg" - - // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json - // - // Required fields: - // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file - SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" - - // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets - DockerConfigJsonKey = ".dockerconfigjson" - - // SecretTypeBasicAuth contains data needed for basic authentication. - // - // Required at least one of fields: - // - Secret.Data["username"] - username used for authentication - // - Secret.Data["password"] - password or token needed for authentication - SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" - - // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets - BasicAuthUsernameKey = "username" - // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets - BasicAuthPasswordKey = "password" - - // SecretTypeSSHAuth contains data needed for SSH authetication. - // - // Required field: - // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication - SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" - - // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets - SSHAuthPrivateKey = "ssh-privatekey" - - // SecretTypeTLS contains information about a TLS client or server secret. It - // is primarily used with TLS termination of the Ingress resource, but may be - // used in other types. - // - // Required fields: - // - Secret.Data["tls.key"] - TLS private key. - // Secret.Data["tls.crt"] - TLS certificate. - // TODO: Consider supporting different formats, specifying CA/destinationCA. - SecretTypeTLS SecretType = "kubernetes.io/tls" - - // TLSCertKey is the key for tls certificates in a TLS secret. - TLSCertKey = "tls.crt" - // TLSPrivateKeyKey is the key for the private key field in a TLS secret. - TLSPrivateKeyKey = "tls.key" - // SecretTypeBootstrapToken is used during the automated bootstrap process (first - // implemented by kubeadm). It stores tokens that are used to sign well known - // ConfigMaps. They are used for authn. - SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type SecretList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Secret -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ConfigMap holds configuration data for components or applications to consume. -type ConfigMap struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Data contains the configuration data. - // Each key must consist of alphanumeric characters, '-', '_' or '.'. - // +optional - Data map[string]string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ConfigMapList is a resource containing a list of ConfigMap objects. -type ConfigMapList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is the list of ConfigMaps. - Items []ConfigMap -} - -// These constants are for remote command execution and port forwarding and are -// used by both the client side and server side components. -// -// This is probably not the ideal place for them, but it didn't seem worth it -// to create pkg/exec and pkg/portforward just to contain a single file with -// constants in it. Suggestions for more appropriate alternatives are -// definitely welcome! -const ( - // Enable stdin for remote command execution - ExecStdinParam = "input" - // Enable stdout for remote command execution - ExecStdoutParam = "output" - // Enable stderr for remote command execution - ExecStderrParam = "error" - // Enable TTY for remote command execution - ExecTTYParam = "tty" - // Command to run for remote command execution - ExecCommandParam = "command" - - // Name of header that specifies stream type - StreamType = "streamType" - // Value for streamType header for stdin stream - StreamTypeStdin = "stdin" - // Value for streamType header for stdout stream - StreamTypeStdout = "stdout" - // Value for streamType header for stderr stream - StreamTypeStderr = "stderr" - // Value for streamType header for data stream - StreamTypeData = "data" - // Value for streamType header for error stream - StreamTypeError = "error" - // Value for streamType header for terminal resize stream - StreamTypeResize = "resize" - - // Name of header that specifies the port being forwarded - PortHeader = "port" - // Name of header that specifies a request ID used to associate the error - // and data streams for a single forwarded connection - PortForwardRequestIDHeader = "requestID" -) - -// Type and constants for component health validation. -type ComponentConditionType string - -// These are the valid conditions for the component. -const ( - ComponentHealthy ComponentConditionType = "Healthy" -) - -type ComponentCondition struct { - Type ComponentConditionType - Status ConditionStatus - // +optional - Message string - // +optional - Error string -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ComponentStatus (and ComponentStatusList) holds the cluster validation info. -type ComponentStatus struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // +optional - Conditions []ComponentCondition -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type ComponentStatusList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ComponentStatus -} - -// SecurityContext holds security configuration that will be applied to a container. -// Some fields are present in both SecurityContext and PodSecurityContext. When both -// are set, the values in SecurityContext take precedence. -type SecurityContext struct { - // The capabilities to add/drop when running containers. - // Defaults to the default set of capabilities granted by the container runtime. - // +optional - Capabilities *Capabilities - // Run container in privileged mode. - // Processes in privileged containers are essentially equivalent to root on the host. - // Defaults to false. - // +optional - Privileged *bool - // The SELinux context to be applied to the container. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - SELinuxOptions *SELinuxOptions - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsUser *int64 - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsNonRoot *bool - // The read-only root filesystem allows you to restrict the locations that an application can write - // files to, ensuring the persistent data can only be written to mounts. - // +optional - ReadOnlyRootFilesystem *bool - // AllowPrivilegeEscalation controls whether a process can gain more - // privileges than its parent process. This bool directly controls if - // the no_new_privs flag will be set on the container process. - // +optional - AllowPrivilegeEscalation *bool -} - -// SELinuxOptions are the labels to be applied to the container. -type SELinuxOptions struct { - // SELinux user label - // +optional - User string - // SELinux role label - // +optional - Role string - // SELinux type label - // +optional - Type string - // SELinux level label. - // +optional - Level string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RangeAllocation is an opaque API object (not exposed to end users) that can be persisted to record -// the global allocation state of the cluster. The schema of Range and Data generic, in that Range -// should be a string representation of the inputs to a range (for instance, for IP allocation it -// might be a CIDR) and Data is an opaque blob understood by an allocator which is typically a -// binary range. Consumers should use annotations to record additional information (schema version, -// data encoding hints). A range allocation should *ALWAYS* be recreatable at any time by observation -// of the cluster, thus the object is less strongly typed than most. -type RangeAllocation struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - // A string representing a unique label for a range of resources, such as a CIDR "10.0.0.0/8" or - // port range "10000-30000". Range is not strongly schema'd here. The Range is expected to define - // a start and end unless there is an implicit end. - Range string - // A byte array representing the serialized state of a range allocation. Additional clarifiers on - // the type or format of data should be represented with annotations. For IP allocations, this is - // represented as a bit array starting at the base IP of the CIDR in Range, with each bit representing - // a single allocated address (the fifth bit on CIDR 10.0.0.0/8 is 10.0.0.4). - Data []byte -} - -const ( - // "default-scheduler" is the name of default scheduler. - DefaultSchedulerName = "default-scheduler" - - // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule - // corresponding to every RequiredDuringScheduling affinity rule. - // When the --hard-pod-affinity-weight scheduler flag is not specified, - // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. - DefaultHardPodAffinitySymmetricWeight int = 1 -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/util/BUILD b/vendor/k8s.io/kubernetes/pkg/api/util/BUILD deleted file mode 100644 index ae59f2f0ea91..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/util/BUILD +++ /dev/null @@ -1,31 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = ["group_version.go"], -) - -go_test( - name = "go_default_test", - srcs = ["group_version_test.go"], - library = ":go_default_library", -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/util/OWNERS b/vendor/k8s.io/kubernetes/pkg/api/util/OWNERS deleted file mode 100755 index 18cdb2005a9d..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/util/OWNERS +++ /dev/null @@ -1,3 +0,0 @@ -reviewers: -- caesarxuchao -- david-mcmahon diff --git a/vendor/k8s.io/kubernetes/pkg/api/util/group_version.go b/vendor/k8s.io/kubernetes/pkg/api/util/group_version.go deleted file mode 100644 index fea2f17f811a..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/util/group_version.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// TODO: This GetVersion/GetGroup arrangement is temporary and will be replaced -// with a GroupAndVersion type. -package util - -import "strings" - -func GetVersion(groupVersion string) string { - s := strings.Split(groupVersion, "/") - if len(s) != 2 { - // e.g. return "v1" for groupVersion="v1" - return s[len(s)-1] - } - return s[1] -} - -func GetGroup(groupVersion string) string { - s := strings.Split(groupVersion, "/") - if len(s) == 1 { - // e.g. return "" for groupVersion="v1" - return "" - } - return s[0] -} - -// GetGroupVersion returns the "group/version". It returns "version" is if group -// is empty. It returns "group/" if version is empty. -func GetGroupVersion(group, version string) string { - if len(group) == 0 { - return version - } - return group + "/" + version -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/api/v1/BUILD deleted file mode 100644 index a80efb82f565..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/BUILD +++ /dev/null @@ -1,82 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generate.go", - "register.go", - "zz_generated.conversion.go", - "zz_generated.defaults.go", - ], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apis/extensions:go_default_library", - "//pkg/util/parsers:go_default_library", - "//pkg/util/pointer:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - ], -) - -go_test( - name = "go_default_xtest", - srcs = [ - "backward_compatibility_test.go", - "conversion_test.go", - "defaults_test.go", - ], - deps = [ - ":go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", - "//pkg/api/testing/compat:go_default_library", - "//pkg/api/validation:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/api/v1/endpoints:all-srcs", - "//pkg/api/v1/helper:all-srcs", - "//pkg/api/v1/node:all-srcs", - "//pkg/api/v1/pod:all-srcs", - "//pkg/api/v1/resource:all-srcs", - "//pkg/api/v1/service:all-srcs", - "//pkg/api/v1/validation:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go deleted file mode 100644 index 3076db9ccd79..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go +++ /dev/null @@ -1,556 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - "reflect" - - "k8s.io/api/core/v1" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" -) - -// This is a "fast-path" that avoids reflection for common types. It focuses on the objects that are -// converted the most in the cluster. -// TODO: generate one of these for every external API group - this is to prove the impact -func addFastPathConversionFuncs(scheme *runtime.Scheme) error { - scheme.AddGenericConversionFunc(func(objA, objB interface{}, s conversion.Scope) (bool, error) { - switch a := objA.(type) { - case *v1.Pod: - switch b := objB.(type) { - case *api.Pod: - return true, Convert_v1_Pod_To_api_Pod(a, b, s) - } - case *api.Pod: - switch b := objB.(type) { - case *v1.Pod: - return true, Convert_api_Pod_To_v1_Pod(a, b, s) - } - - case *v1.Event: - switch b := objB.(type) { - case *api.Event: - return true, Convert_v1_Event_To_api_Event(a, b, s) - } - case *api.Event: - switch b := objB.(type) { - case *v1.Event: - return true, Convert_api_Event_To_v1_Event(a, b, s) - } - - case *v1.ReplicationController: - switch b := objB.(type) { - case *api.ReplicationController: - return true, Convert_v1_ReplicationController_To_api_ReplicationController(a, b, s) - } - case *api.ReplicationController: - switch b := objB.(type) { - case *v1.ReplicationController: - return true, Convert_api_ReplicationController_To_v1_ReplicationController(a, b, s) - } - - case *v1.Node: - switch b := objB.(type) { - case *api.Node: - return true, Convert_v1_Node_To_api_Node(a, b, s) - } - case *api.Node: - switch b := objB.(type) { - case *v1.Node: - return true, Convert_api_Node_To_v1_Node(a, b, s) - } - - case *v1.Namespace: - switch b := objB.(type) { - case *api.Namespace: - return true, Convert_v1_Namespace_To_api_Namespace(a, b, s) - } - case *api.Namespace: - switch b := objB.(type) { - case *v1.Namespace: - return true, Convert_api_Namespace_To_v1_Namespace(a, b, s) - } - - case *v1.Service: - switch b := objB.(type) { - case *api.Service: - return true, Convert_v1_Service_To_api_Service(a, b, s) - } - case *api.Service: - switch b := objB.(type) { - case *v1.Service: - return true, Convert_api_Service_To_v1_Service(a, b, s) - } - - case *v1.Endpoints: - switch b := objB.(type) { - case *api.Endpoints: - return true, Convert_v1_Endpoints_To_api_Endpoints(a, b, s) - } - case *api.Endpoints: - switch b := objB.(type) { - case *v1.Endpoints: - return true, Convert_api_Endpoints_To_v1_Endpoints(a, b, s) - } - - case *metav1.WatchEvent: - switch b := objB.(type) { - case *metav1.InternalEvent: - return true, metav1.Convert_versioned_Event_to_versioned_InternalEvent(a, b, s) - } - case *metav1.InternalEvent: - switch b := objB.(type) { - case *metav1.WatchEvent: - return true, metav1.Convert_versioned_InternalEvent_to_versioned_Event(a, b, s) - } - } - return false, nil - }) - return nil -} - -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - err := scheme.AddConversionFuncs( - Convert_api_Pod_To_v1_Pod, - Convert_api_PodSpec_To_v1_PodSpec, - Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, - Convert_api_ServiceSpec_To_v1_ServiceSpec, - Convert_v1_Pod_To_api_Pod, - Convert_v1_PodSpec_To_api_PodSpec, - Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec, - Convert_v1_Secret_To_api_Secret, - Convert_v1_ServiceSpec_To_api_ServiceSpec, - Convert_v1_ResourceList_To_api_ResourceList, - Convert_v1_ReplicationController_to_extensions_ReplicaSet, - Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec, - Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus, - Convert_extensions_ReplicaSet_to_v1_ReplicationController, - Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec, - Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus, - ) - if err != nil { - return err - } - - // Add field conversion funcs. - err = scheme.AddFieldLabelConversionFunc("v1", "Pod", - func(label, value string) (string, string, error) { - switch label { - case "metadata.annotations", - "metadata.labels", - "metadata.name", - "metadata.namespace", - "metadata.uid", - "spec.nodeName", - "spec.restartPolicy", - "spec.serviceAccountName", - "spec.schedulerName", - "status.phase", - "status.hostIP", - "status.podIP": - return label, value, nil - // This is for backwards compatibility with old v1 clients which send spec.host - case "spec.host": - return "spec.nodeName", value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }, - ) - if err != nil { - return err - } - err = scheme.AddFieldLabelConversionFunc("v1", "Node", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name": - return label, value, nil - case "spec.unschedulable": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }, - ) - if err != nil { - return err - } - err = scheme.AddFieldLabelConversionFunc("v1", "ReplicationController", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name", - "metadata.namespace", - "status.replicas": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) - if err != nil { - return err - } - if err := AddFieldLabelConversionsForEvent(scheme); err != nil { - return err - } - if err := AddFieldLabelConversionsForNamespace(scheme); err != nil { - return err - } - if err := AddFieldLabelConversionsForSecret(scheme); err != nil { - return err - } - return nil -} - -func Convert_v1_ReplicationController_to_extensions_ReplicaSet(in *v1.ReplicationController, out *extensions.ReplicaSet, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec(in *v1.ReplicationControllerSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { - out.Replicas = *in.Replicas - if in.Selector != nil { - metav1.Convert_map_to_unversioned_LabelSelector(&in.Selector, out.Selector, s) - } - if in.Template != nil { - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, &out.Template, s); err != nil { - return err - } - } - return nil -} - -func Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus(in *v1.ReplicationControllerStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.ObservedGeneration = in.ObservedGeneration - return nil -} - -func Convert_extensions_ReplicaSet_to_v1_ReplicationController(in *extensions.ReplicaSet, out *v1.ReplicationController, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { - fieldErr, ok := err.(*field.Error) - if !ok { - return err - } - if out.Annotations == nil { - out.Annotations = make(map[string]string) - } - out.Annotations[v1.NonConvertibleAnnotationPrefix+"/"+fieldErr.Field] = reflect.ValueOf(fieldErr.BadValue).String() - } - if err := Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec(in *extensions.ReplicaSetSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { - out.Replicas = new(int32) - *out.Replicas = in.Replicas - out.MinReadySeconds = in.MinReadySeconds - var invalidErr error - if in.Selector != nil { - invalidErr = metav1.Convert_unversioned_LabelSelector_to_map(in.Selector, &out.Selector, s) - } - out.Template = new(v1.PodTemplateSpec) - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, out.Template, s); err != nil { - return err - } - return invalidErr -} - -func Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus(in *extensions.ReplicaSetStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.ObservedGeneration = in.ObservedGeneration - return nil -} - -func Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { - out.Replicas = &in.Replicas - out.MinReadySeconds = in.MinReadySeconds - out.Selector = in.Selector - if in.Template != nil { - out.Template = new(v1.PodTemplateSpec) - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil { - return err - } - } else { - out.Template = nil - } - return nil -} - -func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error { - if in.Replicas != nil { - out.Replicas = *in.Replicas - } - out.MinReadySeconds = in.MinReadySeconds - out.Selector = in.Selector - if in.Template != nil { - out.Template = new(api.PodTemplateSpec) - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, out.Template, s); err != nil { - return err - } - } else { - out.Template = nil - } - return nil -} - -func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { - if err := autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil { - return err - } - - return nil -} - -func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *v1.PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { - if err := autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s); err != nil { - return err - } - - return nil -} - -// The following two v1.PodSpec conversions are done here to support v1.ServiceAccount -// as an alias for ServiceAccountName. -func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *v1.PodSpec, s conversion.Scope) error { - if err := autoConvert_api_PodSpec_To_v1_PodSpec(in, out, s); err != nil { - return err - } - - // DeprecatedServiceAccount is an alias for ServiceAccountName. - out.DeprecatedServiceAccount = in.ServiceAccountName - - if in.SecurityContext != nil { - // the host namespace fields have to be handled here for backward compatibility - // with v1.0.0 - out.HostPID = in.SecurityContext.HostPID - out.HostNetwork = in.SecurityContext.HostNetwork - out.HostIPC = in.SecurityContext.HostIPC - } - - return nil -} - -func Convert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conversion.Scope) error { - if err := autoConvert_v1_PodSpec_To_api_PodSpec(in, out, s); err != nil { - return err - } - - // We support DeprecatedServiceAccount as an alias for ServiceAccountName. - // If both are specified, ServiceAccountName (the new field) wins. - if in.ServiceAccountName == "" { - out.ServiceAccountName = in.DeprecatedServiceAccount - } - - // the host namespace fields have to be handled specially for backward compatibility - // with v1.0.0 - if out.SecurityContext == nil { - out.SecurityContext = new(api.PodSecurityContext) - } - out.SecurityContext.HostNetwork = in.HostNetwork - out.SecurityContext.HostPID = in.HostPID - out.SecurityContext.HostIPC = in.HostIPC - - return nil -} - -func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *v1.Pod, s conversion.Scope) error { - if err := autoConvert_api_Pod_To_v1_Pod(in, out, s); err != nil { - return err - } - - // drop init container annotations so they don't take effect on legacy kubelets. - // remove this once the oldest supported kubelet no longer honors the annotations over the field. - if len(out.Annotations) > 0 { - old := out.Annotations - out.Annotations = make(map[string]string, len(old)) - for k, v := range old { - out.Annotations[k] = v - } - delete(out.Annotations, "pod.beta.kubernetes.io/init-containers") - delete(out.Annotations, "pod.alpha.kubernetes.io/init-containers") - delete(out.Annotations, "pod.beta.kubernetes.io/init-container-statuses") - delete(out.Annotations, "pod.alpha.kubernetes.io/init-container-statuses") - } - - return nil -} - -func Convert_v1_Secret_To_api_Secret(in *v1.Secret, out *api.Secret, s conversion.Scope) error { - if err := autoConvert_v1_Secret_To_api_Secret(in, out, s); err != nil { - return err - } - - // StringData overwrites Data - if len(in.StringData) > 0 { - if out.Data == nil { - out.Data = map[string][]byte{} - } - for k, v := range in.StringData { - out.Data[k] = []byte(v) - } - } - - return nil -} -func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { - if in.Capabilities != nil { - out.Capabilities = new(v1.Capabilities) - if err := Convert_api_Capabilities_To_v1_Capabilities(in.Capabilities, out.Capabilities, s); err != nil { - return err - } - } else { - out.Capabilities = nil - } - out.Privileged = in.Privileged - if in.SELinuxOptions != nil { - out.SELinuxOptions = new(v1.SELinuxOptions) - if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - out.RunAsUser = in.RunAsUser - out.RunAsNonRoot = in.RunAsNonRoot - out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem - out.AllowPrivilegeEscalation = in.AllowPrivilegeEscalation - return nil -} - -func Convert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *v1.PodSecurityContext, s conversion.Scope) error { - out.SupplementalGroups = in.SupplementalGroups - if in.SELinuxOptions != nil { - out.SELinuxOptions = new(v1.SELinuxOptions) - if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - out.RunAsUser = in.RunAsUser - out.RunAsNonRoot = in.RunAsNonRoot - out.FSGroup = in.FSGroup - return nil -} - -func Convert_v1_PodSecurityContext_To_api_PodSecurityContext(in *v1.PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error { - out.SupplementalGroups = in.SupplementalGroups - if in.SELinuxOptions != nil { - out.SELinuxOptions = new(api.SELinuxOptions) - if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - out.RunAsUser = in.RunAsUser - out.RunAsNonRoot = in.RunAsNonRoot - out.FSGroup = in.FSGroup - return nil -} - -// +k8s:conversion-fn=copy-only -func Convert_v1_ResourceList_To_api_ResourceList(in *v1.ResourceList, out *api.ResourceList, s conversion.Scope) error { - if *in == nil { - return nil - } - if *out == nil { - *out = make(api.ResourceList, len(*in)) - } - for key, val := range *in { - // Moved to defaults - // TODO(#18538): We round up resource values to milli scale to maintain API compatibility. - // In the future, we should instead reject values that need rounding. - // const milliScale = -3 - // val.RoundUp(milliScale) - - (*out)[api.ResourceName(key)] = val - } - return nil -} - -func AddFieldLabelConversionsForEvent(scheme *runtime.Scheme) error { - return scheme.AddFieldLabelConversionFunc("v1", "Event", - func(label, value string) (string, string, error) { - switch label { - case "involvedObject.kind", - "involvedObject.namespace", - "involvedObject.name", - "involvedObject.uid", - "involvedObject.apiVersion", - "involvedObject.resourceVersion", - "involvedObject.fieldPath", - "reason", - "source", - "type", - "metadata.namespace", - "metadata.name": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) -} - -func AddFieldLabelConversionsForNamespace(scheme *runtime.Scheme) error { - return scheme.AddFieldLabelConversionFunc("v1", "Namespace", - func(label, value string) (string, string, error) { - switch label { - case "status.phase", - "metadata.name": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) -} - -func AddFieldLabelConversionsForSecret(scheme *runtime.Scheme) error { - return scheme.AddFieldLabelConversionFunc("v1", "Secret", - func(label, value string) (string, string, error) { - switch label { - case "type", - "metadata.namespace", - "metadata.name": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go deleted file mode 100644 index 9bf082d3d9ab..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go +++ /dev/null @@ -1,391 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/kubernetes/pkg/util/parsers" - utilpointer "k8s.io/kubernetes/pkg/util/pointer" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} - -func SetDefaults_ResourceList(obj *v1.ResourceList) { - for key, val := range *obj { - // TODO(#18538): We round up resource values to milli scale to maintain API compatibility. - // In the future, we should instead reject values that need rounding. - const milliScale = -3 - val.RoundUp(milliScale) - - (*obj)[v1.ResourceName(key)] = val - } -} - -func SetDefaults_PodExecOptions(obj *v1.PodExecOptions) { - obj.Stdout = true - obj.Stderr = true -} -func SetDefaults_PodAttachOptions(obj *v1.PodAttachOptions) { - obj.Stdout = true - obj.Stderr = true -} -func SetDefaults_ReplicationController(obj *v1.ReplicationController) { - var labels map[string]string - if obj.Spec.Template != nil { - labels = obj.Spec.Template.Labels - } - // TODO: support templates defined elsewhere when we support them in the API - if labels != nil { - if len(obj.Spec.Selector) == 0 { - obj.Spec.Selector = labels - } - if len(obj.Labels) == 0 { - obj.Labels = labels - } - } - if obj.Spec.Replicas == nil { - obj.Spec.Replicas = new(int32) - *obj.Spec.Replicas = 1 - } -} -func SetDefaults_Volume(obj *v1.Volume) { - if utilpointer.AllPtrFieldsNil(&obj.VolumeSource) { - obj.VolumeSource = v1.VolumeSource{ - EmptyDir: &v1.EmptyDirVolumeSource{}, - } - } -} -func SetDefaults_ContainerPort(obj *v1.ContainerPort) { - if obj.Protocol == "" { - obj.Protocol = v1.ProtocolTCP - } -} -func SetDefaults_Container(obj *v1.Container) { - if obj.ImagePullPolicy == "" { - // Ignore error and assume it has been validated elsewhere - _, tag, _, _ := parsers.ParseImageName(obj.Image) - - // Check image tag - if tag == "latest" { - obj.ImagePullPolicy = v1.PullAlways - } else { - obj.ImagePullPolicy = v1.PullIfNotPresent - } - } - if obj.TerminationMessagePath == "" { - obj.TerminationMessagePath = v1.TerminationMessagePathDefault - } - if obj.TerminationMessagePolicy == "" { - obj.TerminationMessagePolicy = v1.TerminationMessageReadFile - } -} -func SetDefaults_Service(obj *v1.Service) { - if obj.Spec.SessionAffinity == "" { - obj.Spec.SessionAffinity = v1.ServiceAffinityNone - } - if obj.Spec.SessionAffinity == v1.ServiceAffinityNone { - obj.Spec.SessionAffinityConfig = nil - } - if obj.Spec.SessionAffinity == v1.ServiceAffinityClientIP { - if obj.Spec.SessionAffinityConfig == nil || obj.Spec.SessionAffinityConfig.ClientIP == nil || obj.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds == nil { - timeoutSeconds := v1.DefaultClientIPServiceAffinitySeconds - obj.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{ - ClientIP: &v1.ClientIPConfig{ - TimeoutSeconds: &timeoutSeconds, - }, - } - } - } - if obj.Spec.Type == "" { - obj.Spec.Type = v1.ServiceTypeClusterIP - } - for i := range obj.Spec.Ports { - sp := &obj.Spec.Ports[i] - if sp.Protocol == "" { - sp.Protocol = v1.ProtocolTCP - } - if sp.TargetPort == intstr.FromInt(0) || sp.TargetPort == intstr.FromString("") { - sp.TargetPort = intstr.FromInt(int(sp.Port)) - } - } - // Defaults ExternalTrafficPolicy field for NodePort / LoadBalancer service - // to Global for consistency. - if (obj.Spec.Type == v1.ServiceTypeNodePort || - obj.Spec.Type == v1.ServiceTypeLoadBalancer) && - obj.Spec.ExternalTrafficPolicy == "" { - obj.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster - } -} -func SetDefaults_Pod(obj *v1.Pod) { - // If limits are specified, but requests are not, default requests to limits - // This is done here rather than a more specific defaulting pass on v1.ResourceRequirements - // because we only want this defaulting semantic to take place on a v1.Pod and not a v1.PodTemplate - for i := range obj.Spec.Containers { - // set requests to limits if requests are not specified, but limits are - if obj.Spec.Containers[i].Resources.Limits != nil { - if obj.Spec.Containers[i].Resources.Requests == nil { - obj.Spec.Containers[i].Resources.Requests = make(v1.ResourceList) - } - for key, value := range obj.Spec.Containers[i].Resources.Limits { - if _, exists := obj.Spec.Containers[i].Resources.Requests[key]; !exists { - obj.Spec.Containers[i].Resources.Requests[key] = *(value.Copy()) - } - } - } - } - for i := range obj.Spec.InitContainers { - if obj.Spec.InitContainers[i].Resources.Limits != nil { - if obj.Spec.InitContainers[i].Resources.Requests == nil { - obj.Spec.InitContainers[i].Resources.Requests = make(v1.ResourceList) - } - for key, value := range obj.Spec.InitContainers[i].Resources.Limits { - if _, exists := obj.Spec.InitContainers[i].Resources.Requests[key]; !exists { - obj.Spec.InitContainers[i].Resources.Requests[key] = *(value.Copy()) - } - } - } - } -} -func SetDefaults_PodSpec(obj *v1.PodSpec) { - if obj.DNSPolicy == "" { - obj.DNSPolicy = v1.DNSClusterFirst - } - if obj.RestartPolicy == "" { - obj.RestartPolicy = v1.RestartPolicyAlways - } - if obj.HostNetwork { - defaultHostNetworkPorts(&obj.Containers) - defaultHostNetworkPorts(&obj.InitContainers) - } - if obj.SecurityContext == nil { - obj.SecurityContext = &v1.PodSecurityContext{} - } - if obj.TerminationGracePeriodSeconds == nil { - period := int64(v1.DefaultTerminationGracePeriodSeconds) - obj.TerminationGracePeriodSeconds = &period - } - if obj.SchedulerName == "" { - obj.SchedulerName = v1.DefaultSchedulerName - } -} -func SetDefaults_Probe(obj *v1.Probe) { - if obj.TimeoutSeconds == 0 { - obj.TimeoutSeconds = 1 - } - if obj.PeriodSeconds == 0 { - obj.PeriodSeconds = 10 - } - if obj.SuccessThreshold == 0 { - obj.SuccessThreshold = 1 - } - if obj.FailureThreshold == 0 { - obj.FailureThreshold = 3 - } -} -func SetDefaults_SecretVolumeSource(obj *v1.SecretVolumeSource) { - if obj.DefaultMode == nil { - perm := int32(v1.SecretVolumeSourceDefaultMode) - obj.DefaultMode = &perm - } -} -func SetDefaults_ConfigMapVolumeSource(obj *v1.ConfigMapVolumeSource) { - if obj.DefaultMode == nil { - perm := int32(v1.ConfigMapVolumeSourceDefaultMode) - obj.DefaultMode = &perm - } -} -func SetDefaults_DownwardAPIVolumeSource(obj *v1.DownwardAPIVolumeSource) { - if obj.DefaultMode == nil { - perm := int32(v1.DownwardAPIVolumeSourceDefaultMode) - obj.DefaultMode = &perm - } -} -func SetDefaults_Secret(obj *v1.Secret) { - if obj.Type == "" { - obj.Type = v1.SecretTypeOpaque - } -} -func SetDefaults_ProjectedVolumeSource(obj *v1.ProjectedVolumeSource) { - if obj.DefaultMode == nil { - perm := int32(v1.ProjectedVolumeSourceDefaultMode) - obj.DefaultMode = &perm - } -} -func SetDefaults_PersistentVolume(obj *v1.PersistentVolume) { - if obj.Status.Phase == "" { - obj.Status.Phase = v1.VolumePending - } - if obj.Spec.PersistentVolumeReclaimPolicy == "" { - obj.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimRetain - } -} -func SetDefaults_PersistentVolumeClaim(obj *v1.PersistentVolumeClaim) { - if obj.Status.Phase == "" { - obj.Status.Phase = v1.ClaimPending - } -} -func SetDefaults_ISCSIVolumeSource(obj *v1.ISCSIVolumeSource) { - if obj.ISCSIInterface == "" { - obj.ISCSIInterface = "default" - } -} -func SetDefaults_AzureDiskVolumeSource(obj *v1.AzureDiskVolumeSource) { - if obj.CachingMode == nil { - obj.CachingMode = new(v1.AzureDataDiskCachingMode) - *obj.CachingMode = v1.AzureDataDiskCachingReadWrite - } - if obj.Kind == nil { - obj.Kind = new(v1.AzureDataDiskKind) - *obj.Kind = v1.AzureSharedBlobDisk - } - if obj.FSType == nil { - obj.FSType = new(string) - *obj.FSType = "ext4" - } - if obj.ReadOnly == nil { - obj.ReadOnly = new(bool) - *obj.ReadOnly = false - } -} -func SetDefaults_Endpoints(obj *v1.Endpoints) { - for i := range obj.Subsets { - ss := &obj.Subsets[i] - for i := range ss.Ports { - ep := &ss.Ports[i] - if ep.Protocol == "" { - ep.Protocol = v1.ProtocolTCP - } - } - } -} -func SetDefaults_HTTPGetAction(obj *v1.HTTPGetAction) { - if obj.Path == "" { - obj.Path = "/" - } - if obj.Scheme == "" { - obj.Scheme = v1.URISchemeHTTP - } -} -func SetDefaults_NamespaceStatus(obj *v1.NamespaceStatus) { - if obj.Phase == "" { - obj.Phase = v1.NamespaceActive - } -} -func SetDefaults_Node(obj *v1.Node) { - if obj.Spec.ExternalID == "" { - obj.Spec.ExternalID = obj.Name - } -} -func SetDefaults_NodeStatus(obj *v1.NodeStatus) { - if obj.Allocatable == nil && obj.Capacity != nil { - obj.Allocatable = make(v1.ResourceList, len(obj.Capacity)) - for key, value := range obj.Capacity { - obj.Allocatable[key] = *(value.Copy()) - } - obj.Allocatable = obj.Capacity - } -} -func SetDefaults_ObjectFieldSelector(obj *v1.ObjectFieldSelector) { - if obj.APIVersion == "" { - obj.APIVersion = "v1" - } -} -func SetDefaults_LimitRangeItem(obj *v1.LimitRangeItem) { - // for container limits, we apply default values - if obj.Type == v1.LimitTypeContainer { - - if obj.Default == nil { - obj.Default = make(v1.ResourceList) - } - if obj.DefaultRequest == nil { - obj.DefaultRequest = make(v1.ResourceList) - } - - // If a default limit is unspecified, but the max is specified, default the limit to the max - for key, value := range obj.Max { - if _, exists := obj.Default[key]; !exists { - obj.Default[key] = *(value.Copy()) - } - } - // If a default limit is specified, but the default request is not, default request to limit - for key, value := range obj.Default { - if _, exists := obj.DefaultRequest[key]; !exists { - obj.DefaultRequest[key] = *(value.Copy()) - } - } - // If a default request is not specified, but the min is provided, default request to the min - for key, value := range obj.Min { - if _, exists := obj.DefaultRequest[key]; !exists { - obj.DefaultRequest[key] = *(value.Copy()) - } - } - } -} -func SetDefaults_ConfigMap(obj *v1.ConfigMap) { - if obj.Data == nil { - obj.Data = make(map[string]string) - } -} - -// With host networking default all container ports to host ports. -func defaultHostNetworkPorts(containers *[]v1.Container) { - for i := range *containers { - for j := range (*containers)[i].Ports { - if (*containers)[i].Ports[j].HostPort == 0 { - (*containers)[i].Ports[j].HostPort = (*containers)[i].Ports[j].ContainerPort - } - } - } -} - -func SetDefaults_RBDVolumeSource(obj *v1.RBDVolumeSource) { - if obj.RBDPool == "" { - obj.RBDPool = "rbd" - } - if obj.RadosUser == "" { - obj.RadosUser = "admin" - } - if obj.Keyring == "" { - obj.Keyring = "/etc/ceph/keyring" - } -} - -func SetDefaults_ScaleIOVolumeSource(obj *v1.ScaleIOVolumeSource) { - if obj.ProtectionDomain == "" { - obj.ProtectionDomain = "default" - } - if obj.StoragePool == "" { - obj.StoragePool = "default" - } - if obj.StorageMode == "" { - obj.StorageMode = "ThinProvisioned" - } - if obj.FSType == "" { - obj.FSType = "xfs" - } -} - -func SetDefaults_HostPathVolumeSource(obj *v1.HostPathVolumeSource) { - typeVol := v1.HostPathUnset - if obj.Type == nil { - obj.Type = &typeVol - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/api/v1/doc.go deleted file mode 100644 index 09b5d461dec8..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/api -// +k8s:conversion-gen-external-types=../../../vendor/k8s.io/api/core/v1 -// +k8s:defaulter-gen=TypeMeta -// +k8s:defaulter-gen-input=../../../vendor/k8s.io/api/core/v1 - -// Package v1 is the v1 version of the API. -package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/endpoints/BUILD b/vendor/k8s.io/kubernetes/pkg/api/v1/endpoints/BUILD index 10752b7b505a..f9597b402ea8 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/endpoints/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/endpoints/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["util.go"], + importpath = "k8s.io/kubernetes/pkg/api/v1/endpoints", deps = [ "//pkg/util/hash:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -19,6 +20,7 @@ go_library( go_test( name = "go_default_test", srcs = ["util_test.go"], + importpath = "k8s.io/kubernetes/pkg/api/v1/endpoints", library = ":go_default_library", deps = [ "//vendor/github.com/davecgh/go-spew/spew:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/generate.go b/vendor/k8s.io/kubernetes/pkg/api/v1/generate.go deleted file mode 100644 index 5ebe9a0e5169..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/generate.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - "k8s.io/api/core/v1" - - utilrand "k8s.io/apimachinery/pkg/util/rand" -) - -// NameGenerator generates names for objects. Some backends may have more information -// available to guide selection of new names and this interface hides those details. -type NameGenerator interface { - // GenerateName generates a valid name from the base name, adding a random suffix to the - // the base. If base is valid, the returned name must also be valid. The generator is - // responsible for knowing the maximum valid name length. - GenerateName(base string) string -} - -// GenerateName will resolve the object name of the provided v1.ObjectMeta to a generated version if -// necessary. It expects that validation for v1.ObjectMeta has already completed (that Base is a -// valid name) and that the NameGenerator generates a name that is also valid. -func GenerateName(u NameGenerator, meta *v1.ObjectMeta) { - if len(meta.GenerateName) == 0 || len(meta.Name) != 0 { - return - } - meta.Name = u.GenerateName(meta.GenerateName) -} - -// simpleNameGenerator generates random names. -type simpleNameGenerator struct{} - -// SimpleNameGenerator is a generator that returns the name plus a random suffix of five alphanumerics -// when a name is requested. The string is guaranteed to not exceed the length of a standard Kubernetes -// name (63 characters) -var SimpleNameGenerator NameGenerator = simpleNameGenerator{} - -const ( - // TODO: make this flexible for non-core resources with alternate naming rules. - maxNameLength = 63 - randomLength = 5 - maxGeneratedNameLength = maxNameLength - randomLength -) - -func (simpleNameGenerator) GenerateName(base string) string { - if len(base) > maxGeneratedNameLength { - base = base[:maxGeneratedNameLength] - } - return fmt.Sprintf("%s%s", base, utilrand.String(randomLength)) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/helper/BUILD b/vendor/k8s.io/kubernetes/pkg/api/v1/helper/BUILD deleted file mode 100644 index f7e8849de73c..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/helper/BUILD +++ /dev/null @@ -1,48 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = ["helpers_test.go"], - library = ":go_default_library", - deps = [ - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - ], -) - -go_library( - name = "go_default_library", - srcs = ["helpers.go"], - deps = [ - "//pkg/api/helper:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/selection:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/api/v1/helper/qos:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/helper/helpers.go b/vendor/k8s.io/kubernetes/pkg/api/v1/helper/helpers.go deleted file mode 100644 index 0e6a48d81638..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/helper/helpers.go +++ /dev/null @@ -1,472 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helper - -import ( - "encoding/json" - "fmt" - "strings" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api/helper" -) - -// IsExtendedResourceName returns true if the resource name is not in the -// default namespace, or it has the opaque integer resource prefix. -func IsExtendedResourceName(name v1.ResourceName) bool { - // TODO: Remove OIR part following deprecation. - return !IsDefaultNamespaceResource(name) || IsOpaqueIntResourceName(name) -} - -// IsDefaultNamespaceResource returns true if the resource name is in the -// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are -// implicitly in the kubernetes.io/ namespace. -func IsDefaultNamespaceResource(name v1.ResourceName) bool { - return !strings.Contains(string(name), "/") || - strings.Contains(string(name), v1.ResourceDefaultNamespacePrefix) - -} - -// IsHugePageResourceName returns true if the resource name has the huge page -// resource prefix. -func IsHugePageResourceName(name v1.ResourceName) bool { - return strings.HasPrefix(string(name), v1.ResourceHugePagesPrefix) -} - -// HugePageResourceName returns a ResourceName with the canonical hugepage -// prefix prepended for the specified page size. The page size is converted -// to its canonical representation. -func HugePageResourceName(pageSize resource.Quantity) v1.ResourceName { - return v1.ResourceName(fmt.Sprintf("%s%s", v1.ResourceHugePagesPrefix, pageSize.String())) -} - -// HugePageSizeFromResourceName returns the page size for the specified huge page -// resource name. If the specified input is not a valid huge page resource name -// an error is returned. -func HugePageSizeFromResourceName(name v1.ResourceName) (resource.Quantity, error) { - if !IsHugePageResourceName(name) { - return resource.Quantity{}, fmt.Errorf("resource name: %s is not valid hugepage name", name) - } - pageSize := strings.TrimPrefix(string(name), v1.ResourceHugePagesPrefix) - return resource.ParseQuantity(pageSize) -} - -// IsOpaqueIntResourceName returns true if the resource name has the opaque -// integer resource prefix. -func IsOpaqueIntResourceName(name v1.ResourceName) bool { - return strings.HasPrefix(string(name), v1.ResourceOpaqueIntPrefix) -} - -// OpaqueIntResourceName returns a ResourceName with the canonical opaque -// integer prefix prepended. If the argument already has the prefix, it is -// returned unmodified. -func OpaqueIntResourceName(name string) v1.ResourceName { - if IsOpaqueIntResourceName(v1.ResourceName(name)) { - return v1.ResourceName(name) - } - return v1.ResourceName(fmt.Sprintf("%s%s", v1.ResourceOpaqueIntPrefix, name)) -} - -var overcommitBlacklist = sets.NewString(string(v1.ResourceNvidiaGPU)) - -// IsOvercommitAllowed returns true if the resource is in the default -// namespace and not blacklisted. -func IsOvercommitAllowed(name v1.ResourceName) bool { - return IsDefaultNamespaceResource(name) && - !overcommitBlacklist.Has(string(name)) -} - -// this function aims to check if the service's ClusterIP is set or not -// the objective is not to perform validation here -func IsServiceIPSet(service *v1.Service) bool { - return service.Spec.ClusterIP != v1.ClusterIPNone && service.Spec.ClusterIP != "" -} - -// this function aims to check if the service's cluster IP is requested or not -func IsServiceIPRequested(service *v1.Service) bool { - // ExternalName services are CNAME aliases to external ones. Ignore the IP. - if service.Spec.Type == v1.ServiceTypeExternalName { - return false - } - return service.Spec.ClusterIP == "" -} - -// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, -// only if they do not already exist -func AddToNodeAddresses(addresses *[]v1.NodeAddress, addAddresses ...v1.NodeAddress) { - for _, add := range addAddresses { - exists := false - for _, existing := range *addresses { - if existing.Address == add.Address && existing.Type == add.Type { - exists = true - break - } - } - if !exists { - *addresses = append(*addresses, add) - } - } -} - -// TODO: make method on LoadBalancerStatus? -func LoadBalancerStatusEqual(l, r *v1.LoadBalancerStatus) bool { - return ingressSliceEqual(l.Ingress, r.Ingress) -} - -func ingressSliceEqual(lhs, rhs []v1.LoadBalancerIngress) bool { - if len(lhs) != len(rhs) { - return false - } - for i := range lhs { - if !ingressEqual(&lhs[i], &rhs[i]) { - return false - } - } - return true -} - -func ingressEqual(lhs, rhs *v1.LoadBalancerIngress) bool { - if lhs.IP != rhs.IP { - return false - } - if lhs.Hostname != rhs.Hostname { - return false - } - return true -} - -// TODO: make method on LoadBalancerStatus? -func LoadBalancerStatusDeepCopy(lb *v1.LoadBalancerStatus) *v1.LoadBalancerStatus { - c := &v1.LoadBalancerStatus{} - c.Ingress = make([]v1.LoadBalancerIngress, len(lb.Ingress)) - for i := range lb.Ingress { - c.Ingress[i] = lb.Ingress[i] - } - return c -} - -// GetAccessModesAsString returns a string representation of an array of access modes. -// modes, when present, are always in the same order: RWO,ROX,RWX. -func GetAccessModesAsString(modes []v1.PersistentVolumeAccessMode) string { - modes = removeDuplicateAccessModes(modes) - modesStr := []string{} - if containsAccessMode(modes, v1.ReadWriteOnce) { - modesStr = append(modesStr, "RWO") - } - if containsAccessMode(modes, v1.ReadOnlyMany) { - modesStr = append(modesStr, "ROX") - } - if containsAccessMode(modes, v1.ReadWriteMany) { - modesStr = append(modesStr, "RWX") - } - return strings.Join(modesStr, ",") -} - -// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString -func GetAccessModesFromString(modes string) []v1.PersistentVolumeAccessMode { - strmodes := strings.Split(modes, ",") - accessModes := []v1.PersistentVolumeAccessMode{} - for _, s := range strmodes { - s = strings.Trim(s, " ") - switch { - case s == "RWO": - accessModes = append(accessModes, v1.ReadWriteOnce) - case s == "ROX": - accessModes = append(accessModes, v1.ReadOnlyMany) - case s == "RWX": - accessModes = append(accessModes, v1.ReadWriteMany) - } - } - return accessModes -} - -// removeDuplicateAccessModes returns an array of access modes without any duplicates -func removeDuplicateAccessModes(modes []v1.PersistentVolumeAccessMode) []v1.PersistentVolumeAccessMode { - accessModes := []v1.PersistentVolumeAccessMode{} - for _, m := range modes { - if !containsAccessMode(accessModes, m) { - accessModes = append(accessModes, m) - } - } - return accessModes -} - -func containsAccessMode(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool { - for _, m := range modes { - if m == mode { - return true - } - } - return false -} - -// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements -// labels.Selector. -func NodeSelectorRequirementsAsSelector(nsm []v1.NodeSelectorRequirement) (labels.Selector, error) { - if len(nsm) == 0 { - return labels.Nothing(), nil - } - selector := labels.NewSelector() - for _, expr := range nsm { - var op selection.Operator - switch expr.Operator { - case v1.NodeSelectorOpIn: - op = selection.In - case v1.NodeSelectorOpNotIn: - op = selection.NotIn - case v1.NodeSelectorOpExists: - op = selection.Exists - case v1.NodeSelectorOpDoesNotExist: - op = selection.DoesNotExist - case v1.NodeSelectorOpGt: - op = selection.GreaterThan - case v1.NodeSelectorOpLt: - op = selection.LessThan - default: - return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) - } - r, err := labels.NewRequirement(expr.Key, op, expr.Values) - if err != nil { - return nil, err - } - selector = selector.Add(*r) - } - return selector, nil -} - -// AddOrUpdateTolerationInPodSpec tries to add a toleration to the toleration list in PodSpec. -// Returns true if something was updated, false otherwise. -func AddOrUpdateTolerationInPodSpec(spec *v1.PodSpec, toleration *v1.Toleration) bool { - podTolerations := spec.Tolerations - - var newTolerations []v1.Toleration - updated := false - for i := range podTolerations { - if toleration.MatchToleration(&podTolerations[i]) { - if helper.Semantic.DeepEqual(toleration, podTolerations[i]) { - return false - } - newTolerations = append(newTolerations, *toleration) - updated = true - continue - } - - newTolerations = append(newTolerations, podTolerations[i]) - } - - if !updated { - newTolerations = append(newTolerations, *toleration) - } - - spec.Tolerations = newTolerations - return true -} - -// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. -// Returns true if something was updated, false otherwise. -func AddOrUpdateTolerationInPod(pod *v1.Pod, toleration *v1.Toleration) bool { - return AddOrUpdateTolerationInPodSpec(&pod.Spec, toleration) -} - -// TolerationsTolerateTaint checks if taint is tolerated by any of the tolerations. -func TolerationsTolerateTaint(tolerations []v1.Toleration, taint *v1.Taint) bool { - for i := range tolerations { - if tolerations[i].ToleratesTaint(taint) { - return true - } - } - return false -} - -type taintsFilterFunc func(*v1.Taint) bool - -// TolerationsTolerateTaintsWithFilter checks if given tolerations tolerates -// all the taints that apply to the filter in given taint list. -func TolerationsTolerateTaintsWithFilter(tolerations []v1.Toleration, taints []v1.Taint, applyFilter taintsFilterFunc) bool { - if len(taints) == 0 { - return true - } - - for i := range taints { - if applyFilter != nil && !applyFilter(&taints[i]) { - continue - } - - if !TolerationsTolerateTaint(tolerations, &taints[i]) { - return false - } - } - - return true -} - -// Returns true and list of Tolerations matching all Taints if all are tolerated, or false otherwise. -func GetMatchingTolerations(taints []v1.Taint, tolerations []v1.Toleration) (bool, []v1.Toleration) { - if len(taints) == 0 { - return true, []v1.Toleration{} - } - if len(tolerations) == 0 && len(taints) > 0 { - return false, []v1.Toleration{} - } - result := []v1.Toleration{} - for i := range taints { - tolerated := false - for j := range tolerations { - if tolerations[j].ToleratesTaint(&taints[i]) { - result = append(result, tolerations[j]) - tolerated = true - break - } - } - if !tolerated { - return false, []v1.Toleration{} - } - } - return true, result -} - -func GetAvoidPodsFromNodeAnnotations(annotations map[string]string) (v1.AvoidPods, error) { - var avoidPods v1.AvoidPods - if len(annotations) > 0 && annotations[v1.PreferAvoidPodsAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[v1.PreferAvoidPodsAnnotationKey]), &avoidPods) - if err != nil { - return avoidPods, err - } - } - return avoidPods, nil -} - -// SysctlsFromPodAnnotations parses the sysctl annotations into a slice of safe Sysctls -// and a slice of unsafe Sysctls. This is only a convenience wrapper around -// SysctlsFromPodAnnotation. -func SysctlsFromPodAnnotations(a map[string]string) ([]v1.Sysctl, []v1.Sysctl, error) { - safe, err := SysctlsFromPodAnnotation(a[v1.SysctlsPodAnnotationKey]) - if err != nil { - return nil, nil, err - } - unsafe, err := SysctlsFromPodAnnotation(a[v1.UnsafeSysctlsPodAnnotationKey]) - if err != nil { - return nil, nil, err - } - - return safe, unsafe, nil -} - -// SysctlsFromPodAnnotation parses an annotation value into a slice of Sysctls. -func SysctlsFromPodAnnotation(annotation string) ([]v1.Sysctl, error) { - if len(annotation) == 0 { - return nil, nil - } - - kvs := strings.Split(annotation, ",") - sysctls := make([]v1.Sysctl, len(kvs)) - for i, kv := range kvs { - cs := strings.Split(kv, "=") - if len(cs) != 2 || len(cs[0]) == 0 { - return nil, fmt.Errorf("sysctl %q not of the format sysctl_name=value", kv) - } - sysctls[i].Name = cs[0] - sysctls[i].Value = cs[1] - } - return sysctls, nil -} - -// PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls. -func PodAnnotationsFromSysctls(sysctls []v1.Sysctl) string { - if len(sysctls) == 0 { - return "" - } - - kvs := make([]string, len(sysctls)) - for i := range sysctls { - kvs[i] = fmt.Sprintf("%s=%s", sysctls[i].Name, sysctls[i].Value) - } - return strings.Join(kvs, ",") -} - -// GetPersistentVolumeClass returns StorageClassName. -func GetPersistentVolumeClass(volume *v1.PersistentVolume) string { - // Use beta annotation first - if class, found := volume.Annotations[v1.BetaStorageClassAnnotation]; found { - return class - } - - return volume.Spec.StorageClassName -} - -// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was -// requested, it returns "". -func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { - // Use beta annotation first - if class, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { - return class - } - - if claim.Spec.StorageClassName != nil { - return *claim.Spec.StorageClassName - } - - return "" -} - -// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. -func PersistentVolumeClaimHasClass(claim *v1.PersistentVolumeClaim) bool { - // Use beta annotation first - if _, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { - return true - } - - if claim.Spec.StorageClassName != nil { - return true - } - - return false -} - -// GetStorageNodeAffinityFromAnnotation gets the json serialized data from PersistentVolume.Annotations -// and converts it to the NodeAffinity type in api. -// TODO: update when storage node affinity graduates to beta -func GetStorageNodeAffinityFromAnnotation(annotations map[string]string) (*v1.NodeAffinity, error) { - if len(annotations) > 0 && annotations[v1.AlphaStorageNodeAffinityAnnotation] != "" { - var affinity v1.NodeAffinity - err := json.Unmarshal([]byte(annotations[v1.AlphaStorageNodeAffinityAnnotation]), &affinity) - if err != nil { - return nil, err - } - return &affinity, nil - } - return nil, nil -} - -// Converts NodeAffinity type to Alpha annotation for use in PersistentVolumes -// TODO: update when storage node affinity graduates to beta -func StorageNodeAffinityToAlphaAnnotation(annotations map[string]string, affinity *v1.NodeAffinity) error { - if affinity == nil { - return nil - } - - json, err := json.Marshal(*affinity) - if err != nil { - return err - } - annotations[v1.AlphaStorageNodeAffinityAnnotation] = string(json) - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/helper/qos/BUILD b/vendor/k8s.io/kubernetes/pkg/api/v1/helper/qos/BUILD deleted file mode 100644 index 65eaa353ab32..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/helper/qos/BUILD +++ /dev/null @@ -1,45 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = ["qos_test.go"], - library = ":go_default_library", - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper/qos:go_default_library", - "//pkg/api/v1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - ], -) - -go_library( - name = "go_default_library", - srcs = ["qos.go"], - deps = [ - "//pkg/api/v1/helper:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/helper/qos/qos.go b/vendor/k8s.io/kubernetes/pkg/api/v1/helper/qos/qos.go deleted file mode 100644 index 2fc7f22856a0..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/helper/qos/qos.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package qos - -import ( - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/util/sets" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" -) - -// QOSList is a set of (resource name, QoS class) pairs. -type QOSList map[v1.ResourceName]v1.PodQOSClass - -func isSupportedQoSComputeResource(name v1.ResourceName) bool { - supportedQoSComputeResources := sets.NewString(string(v1.ResourceCPU), string(v1.ResourceMemory)) - return supportedQoSComputeResources.Has(string(name)) || v1helper.IsHugePageResourceName(name) -} - -// GetPodQOS returns the QoS class of a pod. -// A pod is besteffort if none of its containers have specified any requests or limits. -// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. -// A pod is burstable if limits and requests do not match across all containers. -func GetPodQOS(pod *v1.Pod) v1.PodQOSClass { - requests := v1.ResourceList{} - limits := v1.ResourceList{} - zeroQuantity := resource.MustParse("0") - isGuaranteed := true - for _, container := range pod.Spec.Containers { - // process requests - for name, quantity := range container.Resources.Requests { - if !isSupportedQoSComputeResource(name) { - continue - } - if quantity.Cmp(zeroQuantity) == 1 { - delta := quantity.Copy() - if _, exists := requests[name]; !exists { - requests[name] = *delta - } else { - delta.Add(requests[name]) - requests[name] = *delta - } - } - } - // process limits - qosLimitsFound := sets.NewString() - for name, quantity := range container.Resources.Limits { - if !isSupportedQoSComputeResource(name) { - continue - } - if quantity.Cmp(zeroQuantity) == 1 { - qosLimitsFound.Insert(string(name)) - delta := quantity.Copy() - if _, exists := limits[name]; !exists { - limits[name] = *delta - } else { - delta.Add(limits[name]) - limits[name] = *delta - } - } - } - - if !qosLimitsFound.HasAll(string(v1.ResourceMemory), string(v1.ResourceCPU)) { - isGuaranteed = false - } - } - if len(requests) == 0 && len(limits) == 0 { - return v1.PodQOSBestEffort - } - // Check is requests match limits for all resources. - if isGuaranteed { - for name, req := range requests { - if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 { - isGuaranteed = false - break - } - } - } - if isGuaranteed && - len(requests) == len(limits) { - return v1.PodQOSGuaranteed - } - return v1.PodQOSBurstable -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/node/BUILD b/vendor/k8s.io/kubernetes/pkg/api/v1/node/BUILD index 1c0101263989..52ebef6ccf84 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/node/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/node/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["util.go"], + importpath = "k8s.io/kubernetes/pkg/api/v1/node", deps = ["//vendor/k8s.io/api/core/v1:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/pod/BUILD b/vendor/k8s.io/kubernetes/pkg/api/v1/pod/BUILD index b619ba1b0589..86a70f7b2c79 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/pod/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/pod/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["util.go"], + importpath = "k8s.io/kubernetes/pkg/api/v1/pod", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -19,6 +20,7 @@ go_library( go_test( name = "go_default_test", srcs = ["util_test.go"], + importpath = "k8s.io/kubernetes/pkg/api/v1/pod", library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/resource/BUILD b/vendor/k8s.io/kubernetes/pkg/api/v1/resource/BUILD index dd60163916d9..21979e2b6cb9 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/resource/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/resource/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + importpath = "k8s.io/kubernetes/pkg/api/v1/resource", library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", @@ -20,6 +21,7 @@ go_test( go_library( name = "go_default_library", srcs = ["helpers.go"], + importpath = "k8s.io/kubernetes/pkg/api/v1/resource", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/resource/helpers.go b/vendor/k8s.io/kubernetes/pkg/api/v1/resource/helpers.go index ad10fc16b7ba..09d9ea041877 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/resource/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/resource/helpers.go @@ -111,13 +111,9 @@ func ExtractResourceValueByContainerName(fs *v1.ResourceFieldSelector, pod *v1.P return ExtractContainerResourceValue(fs, container) } -type deepCopier interface { - DeepCopy(interface{}) (interface{}, error) -} - // ExtractResourceValueByContainerNameAndNodeAllocatable extracts the value of a resource // by providing container name and node allocatable -func ExtractResourceValueByContainerNameAndNodeAllocatable(copier deepCopier, fs *v1.ResourceFieldSelector, pod *v1.Pod, containerName string, nodeAllocatable v1.ResourceList) (string, error) { +func ExtractResourceValueByContainerNameAndNodeAllocatable(fs *v1.ResourceFieldSelector, pod *v1.Pod, containerName string, nodeAllocatable v1.ResourceList) (string, error) { realContainer, err := findContainerInPod(pod, containerName) if err != nil { return "", err @@ -196,7 +192,7 @@ func MergeContainerResourceLimits(container *v1.Container, if container.Resources.Limits == nil { container.Resources.Limits = make(v1.ResourceList) } - for _, resource := range []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory} { + for _, resource := range []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory, v1.ResourceEphemeralStorage} { if quantity, exists := container.Resources.Limits[resource]; !exists || quantity.IsZero() { if cap, exists := allocatable[resource]; exists { container.Resources.Limits[resource] = *cap.Copy() diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/service/BUILD b/vendor/k8s.io/kubernetes/pkg/api/v1/service/BUILD index 190e84512e68..6c56835787ba 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/service/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/service/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["util.go"], + importpath = "k8s.io/kubernetes/pkg/api/v1/service", deps = [ "//pkg/util/net/sets:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -18,6 +19,7 @@ go_library( go_test( name = "go_default_test", srcs = ["util_test.go"], + importpath = "k8s.io/kubernetes/pkg/api/v1/service", library = ":go_default_library", deps = [ "//pkg/util/net/sets:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/api/v1/validation/BUILD deleted file mode 100644 index 14612044e10c..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/validation/BUILD +++ /dev/null @@ -1,47 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = ["validation.go"], - deps = [ - "//pkg/api/helper:go_default_library", - "//pkg/api/v1/helper:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = ["validation_test.go"], - library = ":go_default_library", - tags = ["automanaged"], - deps = [ - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/api/v1/validation/validation.go deleted file mode 100644 index 109054f804ea..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/validation/validation.go +++ /dev/null @@ -1,171 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "fmt" - "strings" - - "github.com/golang/glog" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api/helper" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" -) - -const isNegativeErrorMsg string = `must be greater than or equal to 0` -const isNotIntegerErrorMsg string = `must be an integer` - -func ValidateResourceRequirements(requirements *v1.ResourceRequirements, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - limPath := fldPath.Child("limits") - reqPath := fldPath.Child("requests") - for resourceName, quantity := range requirements.Limits { - fldPath := limPath.Key(string(resourceName)) - // Validate resource name. - allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) - - // Validate resource quantity. - allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) - - } - for resourceName, quantity := range requirements.Requests { - fldPath := reqPath.Key(string(resourceName)) - // Validate resource name. - allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) - // Validate resource quantity. - allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) - - // Check that request <= limit. - limitQuantity, exists := requirements.Limits[resourceName] - if exists { - // For GPUs, not only requests can't exceed limits, they also can't be lower, i.e. must be equal. - if quantity.Cmp(limitQuantity) != 0 && !v1helper.IsOvercommitAllowed(resourceName) { - allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit", resourceName))) - } else if quantity.Cmp(limitQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit", resourceName))) - } - } else if resourceName == v1.ResourceNvidiaGPU { - allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s request", v1.ResourceNvidiaGPU))) - } - } - - return allErrs -} - -func validateContainerResourceName(value string, fldPath *field.Path) field.ErrorList { - allErrs := validateResourceName(value, fldPath) - if len(strings.Split(value, "/")) == 1 { - if !helper.IsStandardContainerResourceName(value) { - return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource for containers")) - } - } - return allErrs -} - -// ValidateResourceQuantityValue enforces that specified quantity is valid for specified resource -func ValidateResourceQuantityValue(resource string, value resource.Quantity, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidateNonnegativeQuantity(value, fldPath)...) - if helper.IsIntegerResourceName(resource) { - if value.MilliValue()%int64(1000) != int64(0) { - allErrs = append(allErrs, field.Invalid(fldPath, value, isNotIntegerErrorMsg)) - } - } - return allErrs -} - -// Validates that a Quantity is not negative -func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if value.Cmp(resource.Quantity{}) < 0 { - allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNegativeErrorMsg)) - } - return allErrs -} - -// Validate compute resource typename. -// Refer to docs/design/resources.md for more details. -func validateResourceName(value string, fldPath *field.Path) field.ErrorList { - // Opaque integer resources (OIR) deprecation began in v1.8 - // TODO: Remove warning after OIR deprecation cycle. - if v1helper.IsOpaqueIntResourceName(v1.ResourceName(value)) { - glog.Errorf("DEPRECATION WARNING! Opaque integer resources are deprecated starting with v1.8: %s", value) - } - - allErrs := field.ErrorList{} - for _, msg := range validation.IsQualifiedName(value) { - allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) - } - if len(allErrs) != 0 { - return allErrs - } - - if len(strings.Split(value, "/")) == 1 { - if !helper.IsStandardResourceName(value) { - return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource type or fully qualified")) - } - } - - return allErrs -} - -func ValidatePodLogOptions(opts *v1.PodLogOptions) field.ErrorList { - allErrs := field.ErrorList{} - if opts.TailLines != nil && *opts.TailLines < 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), *opts.TailLines, isNegativeErrorMsg)) - } - if opts.LimitBytes != nil && *opts.LimitBytes < 1 { - allErrs = append(allErrs, field.Invalid(field.NewPath("limitBytes"), *opts.LimitBytes, "must be greater than 0")) - } - switch { - case opts.SinceSeconds != nil && opts.SinceTime != nil: - allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "at most one of `sinceTime` or `sinceSeconds` may be specified")) - case opts.SinceSeconds != nil: - if *opts.SinceSeconds < 1 { - allErrs = append(allErrs, field.Invalid(field.NewPath("sinceSeconds"), *opts.SinceSeconds, "must be greater than 0")) - } - } - return allErrs -} - -func AccumulateUniqueHostPorts(containers []v1.Container, accumulator *sets.String, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - for ci, ctr := range containers { - idxPath := fldPath.Index(ci) - portsPath := idxPath.Child("ports") - for pi := range ctr.Ports { - idxPath := portsPath.Index(pi) - port := ctr.Ports[pi].HostPort - if port == 0 { - continue - } - str := fmt.Sprintf("%d/%s", port, ctr.Ports[pi].Protocol) - if accumulator.Has(str) { - allErrs = append(allErrs, field.Duplicate(idxPath.Child("hostPort"), str)) - } else { - accumulator.Insert(str) - } - } - } - return allErrs -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go deleted file mode 100644 index 716ca05ed77f..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go +++ /dev/null @@ -1,5354 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - resource "k8s.io/apimachinery/pkg/api/resource" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - types "k8s.io/apimachinery/pkg/types" - api "k8s.io/kubernetes/pkg/api" - unsafe "unsafe" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource, - Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource, - Convert_v1_Affinity_To_api_Affinity, - Convert_api_Affinity_To_v1_Affinity, - Convert_v1_AttachedVolume_To_api_AttachedVolume, - Convert_api_AttachedVolume_To_v1_AttachedVolume, - Convert_v1_AvoidPods_To_api_AvoidPods, - Convert_api_AvoidPods_To_v1_AvoidPods, - Convert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource, - Convert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource, - Convert_v1_AzureFilePersistentVolumeSource_To_api_AzureFilePersistentVolumeSource, - Convert_api_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource, - Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource, - Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource, - Convert_v1_Binding_To_api_Binding, - Convert_api_Binding_To_v1_Binding, - Convert_v1_Capabilities_To_api_Capabilities, - Convert_api_Capabilities_To_v1_Capabilities, - Convert_v1_CephFSPersistentVolumeSource_To_api_CephFSPersistentVolumeSource, - Convert_api_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource, - Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource, - Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource, - Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource, - Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource, - Convert_v1_ClientIPConfig_To_api_ClientIPConfig, - Convert_api_ClientIPConfig_To_v1_ClientIPConfig, - Convert_v1_ComponentCondition_To_api_ComponentCondition, - Convert_api_ComponentCondition_To_v1_ComponentCondition, - Convert_v1_ComponentStatus_To_api_ComponentStatus, - Convert_api_ComponentStatus_To_v1_ComponentStatus, - Convert_v1_ComponentStatusList_To_api_ComponentStatusList, - Convert_api_ComponentStatusList_To_v1_ComponentStatusList, - Convert_v1_ConfigMap_To_api_ConfigMap, - Convert_api_ConfigMap_To_v1_ConfigMap, - Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource, - Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource, - Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector, - Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector, - Convert_v1_ConfigMapList_To_api_ConfigMapList, - Convert_api_ConfigMapList_To_v1_ConfigMapList, - Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection, - Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection, - Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource, - Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource, - Convert_v1_Container_To_api_Container, - Convert_api_Container_To_v1_Container, - Convert_v1_ContainerImage_To_api_ContainerImage, - Convert_api_ContainerImage_To_v1_ContainerImage, - Convert_v1_ContainerPort_To_api_ContainerPort, - Convert_api_ContainerPort_To_v1_ContainerPort, - Convert_v1_ContainerState_To_api_ContainerState, - Convert_api_ContainerState_To_v1_ContainerState, - Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning, - Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning, - Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated, - Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated, - Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting, - Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting, - Convert_v1_ContainerStatus_To_api_ContainerStatus, - Convert_api_ContainerStatus_To_v1_ContainerStatus, - Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint, - Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint, - Convert_v1_DeleteOptions_To_api_DeleteOptions, - Convert_api_DeleteOptions_To_v1_DeleteOptions, - Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection, - Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection, - Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile, - Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile, - Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource, - Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource, - Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource, - Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource, - Convert_v1_EndpointAddress_To_api_EndpointAddress, - Convert_api_EndpointAddress_To_v1_EndpointAddress, - Convert_v1_EndpointPort_To_api_EndpointPort, - Convert_api_EndpointPort_To_v1_EndpointPort, - Convert_v1_EndpointSubset_To_api_EndpointSubset, - Convert_api_EndpointSubset_To_v1_EndpointSubset, - Convert_v1_Endpoints_To_api_Endpoints, - Convert_api_Endpoints_To_v1_Endpoints, - Convert_v1_EndpointsList_To_api_EndpointsList, - Convert_api_EndpointsList_To_v1_EndpointsList, - Convert_v1_EnvFromSource_To_api_EnvFromSource, - Convert_api_EnvFromSource_To_v1_EnvFromSource, - Convert_v1_EnvVar_To_api_EnvVar, - Convert_api_EnvVar_To_v1_EnvVar, - Convert_v1_EnvVarSource_To_api_EnvVarSource, - Convert_api_EnvVarSource_To_v1_EnvVarSource, - Convert_v1_Event_To_api_Event, - Convert_api_Event_To_v1_Event, - Convert_v1_EventList_To_api_EventList, - Convert_api_EventList_To_v1_EventList, - Convert_v1_EventSource_To_api_EventSource, - Convert_api_EventSource_To_v1_EventSource, - Convert_v1_ExecAction_To_api_ExecAction, - Convert_api_ExecAction_To_v1_ExecAction, - Convert_v1_FCVolumeSource_To_api_FCVolumeSource, - Convert_api_FCVolumeSource_To_v1_FCVolumeSource, - Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource, - Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource, - Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource, - Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource, - Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource, - Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource, - Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource, - Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource, - Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource, - Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource, - Convert_v1_HTTPGetAction_To_api_HTTPGetAction, - Convert_api_HTTPGetAction_To_v1_HTTPGetAction, - Convert_v1_HTTPHeader_To_api_HTTPHeader, - Convert_api_HTTPHeader_To_v1_HTTPHeader, - Convert_v1_Handler_To_api_Handler, - Convert_api_Handler_To_v1_Handler, - Convert_v1_HostAlias_To_api_HostAlias, - Convert_api_HostAlias_To_v1_HostAlias, - Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource, - Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource, - Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource, - Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource, - Convert_v1_KeyToPath_To_api_KeyToPath, - Convert_api_KeyToPath_To_v1_KeyToPath, - Convert_v1_Lifecycle_To_api_Lifecycle, - Convert_api_Lifecycle_To_v1_Lifecycle, - Convert_v1_LimitRange_To_api_LimitRange, - Convert_api_LimitRange_To_v1_LimitRange, - Convert_v1_LimitRangeItem_To_api_LimitRangeItem, - Convert_api_LimitRangeItem_To_v1_LimitRangeItem, - Convert_v1_LimitRangeList_To_api_LimitRangeList, - Convert_api_LimitRangeList_To_v1_LimitRangeList, - Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec, - Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec, - Convert_v1_List_To_api_List, - Convert_api_List_To_v1_List, - Convert_v1_ListOptions_To_api_ListOptions, - Convert_api_ListOptions_To_v1_ListOptions, - Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress, - Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress, - Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus, - Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus, - Convert_v1_LocalObjectReference_To_api_LocalObjectReference, - Convert_api_LocalObjectReference_To_v1_LocalObjectReference, - Convert_v1_LocalVolumeSource_To_api_LocalVolumeSource, - Convert_api_LocalVolumeSource_To_v1_LocalVolumeSource, - Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource, - Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource, - Convert_v1_Namespace_To_api_Namespace, - Convert_api_Namespace_To_v1_Namespace, - Convert_v1_NamespaceList_To_api_NamespaceList, - Convert_api_NamespaceList_To_v1_NamespaceList, - Convert_v1_NamespaceSpec_To_api_NamespaceSpec, - Convert_api_NamespaceSpec_To_v1_NamespaceSpec, - Convert_v1_NamespaceStatus_To_api_NamespaceStatus, - Convert_api_NamespaceStatus_To_v1_NamespaceStatus, - Convert_v1_Node_To_api_Node, - Convert_api_Node_To_v1_Node, - Convert_v1_NodeAddress_To_api_NodeAddress, - Convert_api_NodeAddress_To_v1_NodeAddress, - Convert_v1_NodeAffinity_To_api_NodeAffinity, - Convert_api_NodeAffinity_To_v1_NodeAffinity, - Convert_v1_NodeCondition_To_api_NodeCondition, - Convert_api_NodeCondition_To_v1_NodeCondition, - Convert_v1_NodeConfigSource_To_api_NodeConfigSource, - Convert_api_NodeConfigSource_To_v1_NodeConfigSource, - Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints, - Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints, - Convert_v1_NodeList_To_api_NodeList, - Convert_api_NodeList_To_v1_NodeList, - Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions, - Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions, - Convert_v1_NodeResources_To_api_NodeResources, - Convert_api_NodeResources_To_v1_NodeResources, - Convert_v1_NodeSelector_To_api_NodeSelector, - Convert_api_NodeSelector_To_v1_NodeSelector, - Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement, - Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement, - Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm, - Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm, - Convert_v1_NodeSpec_To_api_NodeSpec, - Convert_api_NodeSpec_To_v1_NodeSpec, - Convert_v1_NodeStatus_To_api_NodeStatus, - Convert_api_NodeStatus_To_v1_NodeStatus, - Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo, - Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo, - Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector, - Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector, - Convert_v1_ObjectMeta_To_api_ObjectMeta, - Convert_api_ObjectMeta_To_v1_ObjectMeta, - Convert_v1_ObjectReference_To_api_ObjectReference, - Convert_api_ObjectReference_To_v1_ObjectReference, - Convert_v1_PersistentVolume_To_api_PersistentVolume, - Convert_api_PersistentVolume_To_v1_PersistentVolume, - Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim, - Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim, - Convert_v1_PersistentVolumeClaimCondition_To_api_PersistentVolumeClaimCondition, - Convert_api_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition, - Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList, - Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList, - Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec, - Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec, - Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus, - Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus, - Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource, - Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource, - Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList, - Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList, - Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource, - Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource, - Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec, - Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec, - Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus, - Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus, - Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource, - Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource, - Convert_v1_Pod_To_api_Pod, - Convert_api_Pod_To_v1_Pod, - Convert_v1_PodAffinity_To_api_PodAffinity, - Convert_api_PodAffinity_To_v1_PodAffinity, - Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm, - Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm, - Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity, - Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity, - Convert_v1_PodAttachOptions_To_api_PodAttachOptions, - Convert_api_PodAttachOptions_To_v1_PodAttachOptions, - Convert_v1_PodCondition_To_api_PodCondition, - Convert_api_PodCondition_To_v1_PodCondition, - Convert_v1_PodExecOptions_To_api_PodExecOptions, - Convert_api_PodExecOptions_To_v1_PodExecOptions, - Convert_v1_PodList_To_api_PodList, - Convert_api_PodList_To_v1_PodList, - Convert_v1_PodLogOptions_To_api_PodLogOptions, - Convert_api_PodLogOptions_To_v1_PodLogOptions, - Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions, - Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions, - Convert_v1_PodProxyOptions_To_api_PodProxyOptions, - Convert_api_PodProxyOptions_To_v1_PodProxyOptions, - Convert_v1_PodSecurityContext_To_api_PodSecurityContext, - Convert_api_PodSecurityContext_To_v1_PodSecurityContext, - Convert_v1_PodSignature_To_api_PodSignature, - Convert_api_PodSignature_To_v1_PodSignature, - Convert_v1_PodSpec_To_api_PodSpec, - Convert_api_PodSpec_To_v1_PodSpec, - Convert_v1_PodStatus_To_api_PodStatus, - Convert_api_PodStatus_To_v1_PodStatus, - Convert_v1_PodStatusResult_To_api_PodStatusResult, - Convert_api_PodStatusResult_To_v1_PodStatusResult, - Convert_v1_PodTemplate_To_api_PodTemplate, - Convert_api_PodTemplate_To_v1_PodTemplate, - Convert_v1_PodTemplateList_To_api_PodTemplateList, - Convert_api_PodTemplateList_To_v1_PodTemplateList, - Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec, - Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec, - Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource, - Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource, - Convert_v1_Preconditions_To_api_Preconditions, - Convert_api_Preconditions_To_v1_Preconditions, - Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry, - Convert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry, - Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm, - Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm, - Convert_v1_Probe_To_api_Probe, - Convert_api_Probe_To_v1_Probe, - Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource, - Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource, - Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource, - Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource, - Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource, - Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource, - Convert_v1_RangeAllocation_To_api_RangeAllocation, - Convert_api_RangeAllocation_To_v1_RangeAllocation, - Convert_v1_ReplicationController_To_api_ReplicationController, - Convert_api_ReplicationController_To_v1_ReplicationController, - Convert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition, - Convert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition, - Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList, - Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList, - Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec, - Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, - Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus, - Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus, - Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector, - Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector, - Convert_v1_ResourceQuota_To_api_ResourceQuota, - Convert_api_ResourceQuota_To_v1_ResourceQuota, - Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList, - Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList, - Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec, - Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec, - Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus, - Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus, - Convert_v1_ResourceRequirements_To_api_ResourceRequirements, - Convert_api_ResourceRequirements_To_v1_ResourceRequirements, - Convert_v1_SELinuxOptions_To_api_SELinuxOptions, - Convert_api_SELinuxOptions_To_v1_SELinuxOptions, - Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource, - Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource, - Convert_v1_Secret_To_api_Secret, - Convert_api_Secret_To_v1_Secret, - Convert_v1_SecretEnvSource_To_api_SecretEnvSource, - Convert_api_SecretEnvSource_To_v1_SecretEnvSource, - Convert_v1_SecretKeySelector_To_api_SecretKeySelector, - Convert_api_SecretKeySelector_To_v1_SecretKeySelector, - Convert_v1_SecretList_To_api_SecretList, - Convert_api_SecretList_To_v1_SecretList, - Convert_v1_SecretProjection_To_api_SecretProjection, - Convert_api_SecretProjection_To_v1_SecretProjection, - Convert_v1_SecretReference_To_api_SecretReference, - Convert_api_SecretReference_To_v1_SecretReference, - Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource, - Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource, - Convert_v1_SecurityContext_To_api_SecurityContext, - Convert_api_SecurityContext_To_v1_SecurityContext, - Convert_v1_SerializedReference_To_api_SerializedReference, - Convert_api_SerializedReference_To_v1_SerializedReference, - Convert_v1_Service_To_api_Service, - Convert_api_Service_To_v1_Service, - Convert_v1_ServiceAccount_To_api_ServiceAccount, - Convert_api_ServiceAccount_To_v1_ServiceAccount, - Convert_v1_ServiceAccountList_To_api_ServiceAccountList, - Convert_api_ServiceAccountList_To_v1_ServiceAccountList, - Convert_v1_ServiceList_To_api_ServiceList, - Convert_api_ServiceList_To_v1_ServiceList, - Convert_v1_ServicePort_To_api_ServicePort, - Convert_api_ServicePort_To_v1_ServicePort, - Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions, - Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions, - Convert_v1_ServiceSpec_To_api_ServiceSpec, - Convert_api_ServiceSpec_To_v1_ServiceSpec, - Convert_v1_ServiceStatus_To_api_ServiceStatus, - Convert_api_ServiceStatus_To_v1_ServiceStatus, - Convert_v1_SessionAffinityConfig_To_api_SessionAffinityConfig, - Convert_api_SessionAffinityConfig_To_v1_SessionAffinityConfig, - Convert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource, - Convert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource, - Convert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource, - Convert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource, - Convert_v1_Sysctl_To_api_Sysctl, - Convert_api_Sysctl_To_v1_Sysctl, - Convert_v1_TCPSocketAction_To_api_TCPSocketAction, - Convert_api_TCPSocketAction_To_v1_TCPSocketAction, - Convert_v1_Taint_To_api_Taint, - Convert_api_Taint_To_v1_Taint, - Convert_v1_Toleration_To_api_Toleration, - Convert_api_Toleration_To_v1_Toleration, - Convert_v1_Volume_To_api_Volume, - Convert_api_Volume_To_v1_Volume, - Convert_v1_VolumeMount_To_api_VolumeMount, - Convert_api_VolumeMount_To_v1_VolumeMount, - Convert_v1_VolumeProjection_To_api_VolumeProjection, - Convert_api_VolumeProjection_To_v1_VolumeProjection, - Convert_v1_VolumeSource_To_api_VolumeSource, - Convert_api_VolumeSource_To_v1_VolumeSource, - Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource, - Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource, - Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm, - Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm, - ) -} - -func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function. -func Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in, out, s) -} - -func autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function. -func Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - return autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in, out, s) -} - -func autoConvert_v1_Affinity_To_api_Affinity(in *v1.Affinity, out *api.Affinity, s conversion.Scope) error { - out.NodeAffinity = (*api.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) - out.PodAffinity = (*api.PodAffinity)(unsafe.Pointer(in.PodAffinity)) - out.PodAntiAffinity = (*api.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) - return nil -} - -// Convert_v1_Affinity_To_api_Affinity is an autogenerated conversion function. -func Convert_v1_Affinity_To_api_Affinity(in *v1.Affinity, out *api.Affinity, s conversion.Scope) error { - return autoConvert_v1_Affinity_To_api_Affinity(in, out, s) -} - -func autoConvert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *v1.Affinity, s conversion.Scope) error { - out.NodeAffinity = (*v1.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) - out.PodAffinity = (*v1.PodAffinity)(unsafe.Pointer(in.PodAffinity)) - out.PodAntiAffinity = (*v1.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) - return nil -} - -// Convert_api_Affinity_To_v1_Affinity is an autogenerated conversion function. -func Convert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *v1.Affinity, s conversion.Scope) error { - return autoConvert_api_Affinity_To_v1_Affinity(in, out, s) -} - -func autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in *v1.AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error { - out.Name = api.UniqueVolumeName(in.Name) - out.DevicePath = in.DevicePath - return nil -} - -// Convert_v1_AttachedVolume_To_api_AttachedVolume is an autogenerated conversion function. -func Convert_v1_AttachedVolume_To_api_AttachedVolume(in *v1.AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error { - return autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in, out, s) -} - -func autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *v1.AttachedVolume, s conversion.Scope) error { - out.Name = v1.UniqueVolumeName(in.Name) - out.DevicePath = in.DevicePath - return nil -} - -// Convert_api_AttachedVolume_To_v1_AttachedVolume is an autogenerated conversion function. -func Convert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *v1.AttachedVolume, s conversion.Scope) error { - return autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in, out, s) -} - -func autoConvert_v1_AvoidPods_To_api_AvoidPods(in *v1.AvoidPods, out *api.AvoidPods, s conversion.Scope) error { - out.PreferAvoidPods = *(*[]api.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) - return nil -} - -// Convert_v1_AvoidPods_To_api_AvoidPods is an autogenerated conversion function. -func Convert_v1_AvoidPods_To_api_AvoidPods(in *v1.AvoidPods, out *api.AvoidPods, s conversion.Scope) error { - return autoConvert_v1_AvoidPods_To_api_AvoidPods(in, out, s) -} - -func autoConvert_api_AvoidPods_To_v1_AvoidPods(in *api.AvoidPods, out *v1.AvoidPods, s conversion.Scope) error { - out.PreferAvoidPods = *(*[]v1.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) - return nil -} - -// Convert_api_AvoidPods_To_v1_AvoidPods is an autogenerated conversion function. -func Convert_api_AvoidPods_To_v1_AvoidPods(in *api.AvoidPods, out *v1.AvoidPods, s conversion.Scope) error { - return autoConvert_api_AvoidPods_To_v1_AvoidPods(in, out, s) -} - -func autoConvert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in *v1.AzureDiskVolumeSource, out *api.AzureDiskVolumeSource, s conversion.Scope) error { - out.DiskName = in.DiskName - out.DataDiskURI = in.DataDiskURI - out.CachingMode = (*api.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) - out.FSType = (*string)(unsafe.Pointer(in.FSType)) - out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) - out.Kind = (*api.AzureDataDiskKind)(unsafe.Pointer(in.Kind)) - return nil -} - -// Convert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource is an autogenerated conversion function. -func Convert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in *v1.AzureDiskVolumeSource, out *api.AzureDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in, out, s) -} - -func autoConvert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *api.AzureDiskVolumeSource, out *v1.AzureDiskVolumeSource, s conversion.Scope) error { - out.DiskName = in.DiskName - out.DataDiskURI = in.DataDiskURI - out.CachingMode = (*v1.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) - out.FSType = (*string)(unsafe.Pointer(in.FSType)) - out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) - out.Kind = (*v1.AzureDataDiskKind)(unsafe.Pointer(in.Kind)) - return nil -} - -// Convert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource is an autogenerated conversion function. -func Convert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *api.AzureDiskVolumeSource, out *v1.AzureDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_AzureFilePersistentVolumeSource_To_api_AzureFilePersistentVolumeSource(in *v1.AzureFilePersistentVolumeSource, out *api.AzureFilePersistentVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - out.SecretNamespace = (*string)(unsafe.Pointer(in.SecretNamespace)) - return nil -} - -// Convert_v1_AzureFilePersistentVolumeSource_To_api_AzureFilePersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_AzureFilePersistentVolumeSource_To_api_AzureFilePersistentVolumeSource(in *v1.AzureFilePersistentVolumeSource, out *api.AzureFilePersistentVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AzureFilePersistentVolumeSource_To_api_AzureFilePersistentVolumeSource(in, out, s) -} - -func autoConvert_api_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *api.AzureFilePersistentVolumeSource, out *v1.AzureFilePersistentVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - out.SecretNamespace = (*string)(unsafe.Pointer(in.SecretNamespace)) - return nil -} - -// Convert_api_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource is an autogenerated conversion function. -func Convert_api_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *api.AzureFilePersistentVolumeSource, out *v1.AzureFilePersistentVolumeSource, s conversion.Scope) error { - return autoConvert_api_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in, out, s) -} - -func autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource is an autogenerated conversion function. -func Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in, out, s) -} - -func autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource is an autogenerated conversion function. -func Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { - return autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in, out, s) -} - -func autoConvert_v1_Binding_To_api_Binding(in *v1.Binding, out *api.Binding, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Target, &out.Target, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Binding_To_api_Binding is an autogenerated conversion function. -func Convert_v1_Binding_To_api_Binding(in *v1.Binding, out *api.Binding, s conversion.Scope) error { - return autoConvert_v1_Binding_To_api_Binding(in, out, s) -} - -func autoConvert_api_Binding_To_v1_Binding(in *api.Binding, out *v1.Binding, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Target, &out.Target, s); err != nil { - return err - } - return nil -} - -// Convert_api_Binding_To_v1_Binding is an autogenerated conversion function. -func Convert_api_Binding_To_v1_Binding(in *api.Binding, out *v1.Binding, s conversion.Scope) error { - return autoConvert_api_Binding_To_v1_Binding(in, out, s) -} - -func autoConvert_v1_Capabilities_To_api_Capabilities(in *v1.Capabilities, out *api.Capabilities, s conversion.Scope) error { - out.Add = *(*[]api.Capability)(unsafe.Pointer(&in.Add)) - out.Drop = *(*[]api.Capability)(unsafe.Pointer(&in.Drop)) - return nil -} - -// Convert_v1_Capabilities_To_api_Capabilities is an autogenerated conversion function. -func Convert_v1_Capabilities_To_api_Capabilities(in *v1.Capabilities, out *api.Capabilities, s conversion.Scope) error { - return autoConvert_v1_Capabilities_To_api_Capabilities(in, out, s) -} - -func autoConvert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *v1.Capabilities, s conversion.Scope) error { - out.Add = *(*[]v1.Capability)(unsafe.Pointer(&in.Add)) - out.Drop = *(*[]v1.Capability)(unsafe.Pointer(&in.Drop)) - return nil -} - -// Convert_api_Capabilities_To_v1_Capabilities is an autogenerated conversion function. -func Convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *v1.Capabilities, s conversion.Scope) error { - return autoConvert_api_Capabilities_To_v1_Capabilities(in, out, s) -} - -func autoConvert_v1_CephFSPersistentVolumeSource_To_api_CephFSPersistentVolumeSource(in *v1.CephFSPersistentVolumeSource, out *api.CephFSPersistentVolumeSource, s conversion.Scope) error { - out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - out.SecretRef = (*api.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_CephFSPersistentVolumeSource_To_api_CephFSPersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_CephFSPersistentVolumeSource_To_api_CephFSPersistentVolumeSource(in *v1.CephFSPersistentVolumeSource, out *api.CephFSPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_v1_CephFSPersistentVolumeSource_To_api_CephFSPersistentVolumeSource(in, out, s) -} - -func autoConvert_api_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *api.CephFSPersistentVolumeSource, out *v1.CephFSPersistentVolumeSource, s conversion.Scope) error { - out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource is an autogenerated conversion function. -func Convert_api_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *api.CephFSPersistentVolumeSource, out *v1.CephFSPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_api_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in, out, s) -} - -func autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { - out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource is an autogenerated conversion function. -func Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { - return autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in, out, s) -} - -func autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { - out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource is an autogenerated conversion function. -func Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { - return autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in, out, s) -} - -func autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *v1.CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource is an autogenerated conversion function. -func Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *v1.CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { - return autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in, out, s) -} - -func autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource is an autogenerated conversion function. -func Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { - return autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s) -} - -func autoConvert_v1_ClientIPConfig_To_api_ClientIPConfig(in *v1.ClientIPConfig, out *api.ClientIPConfig, s conversion.Scope) error { - out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds)) - return nil -} - -// Convert_v1_ClientIPConfig_To_api_ClientIPConfig is an autogenerated conversion function. -func Convert_v1_ClientIPConfig_To_api_ClientIPConfig(in *v1.ClientIPConfig, out *api.ClientIPConfig, s conversion.Scope) error { - return autoConvert_v1_ClientIPConfig_To_api_ClientIPConfig(in, out, s) -} - -func autoConvert_api_ClientIPConfig_To_v1_ClientIPConfig(in *api.ClientIPConfig, out *v1.ClientIPConfig, s conversion.Scope) error { - out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds)) - return nil -} - -// Convert_api_ClientIPConfig_To_v1_ClientIPConfig is an autogenerated conversion function. -func Convert_api_ClientIPConfig_To_v1_ClientIPConfig(in *api.ClientIPConfig, out *v1.ClientIPConfig, s conversion.Scope) error { - return autoConvert_api_ClientIPConfig_To_v1_ClientIPConfig(in, out, s) -} - -func autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in *v1.ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { - out.Type = api.ComponentConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.Message = in.Message - out.Error = in.Error - return nil -} - -// Convert_v1_ComponentCondition_To_api_ComponentCondition is an autogenerated conversion function. -func Convert_v1_ComponentCondition_To_api_ComponentCondition(in *v1.ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { - return autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in, out, s) -} - -func autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *v1.ComponentCondition, s conversion.Scope) error { - out.Type = v1.ComponentConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.Message = in.Message - out.Error = in.Error - return nil -} - -// Convert_api_ComponentCondition_To_v1_ComponentCondition is an autogenerated conversion function. -func Convert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *v1.ComponentCondition, s conversion.Scope) error { - return autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in, out, s) -} - -func autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in *v1.ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Conditions = *(*[]api.ComponentCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_v1_ComponentStatus_To_api_ComponentStatus is an autogenerated conversion function. -func Convert_v1_ComponentStatus_To_api_ComponentStatus(in *v1.ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { - return autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in, out, s) -} - -func autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *v1.ComponentStatus, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Conditions = *(*[]v1.ComponentCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_api_ComponentStatus_To_v1_ComponentStatus is an autogenerated conversion function. -func Convert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *v1.ComponentStatus, s conversion.Scope) error { - return autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in, out, s) -} - -func autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in *v1.ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.ComponentStatus)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_ComponentStatusList_To_api_ComponentStatusList is an autogenerated conversion function. -func Convert_v1_ComponentStatusList_To_api_ComponentStatusList(in *v1.ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { - return autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in, out, s) -} - -func autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *v1.ComponentStatusList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.ComponentStatus)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_ComponentStatusList_To_v1_ComponentStatusList is an autogenerated conversion function. -func Convert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *v1.ComponentStatusList, s conversion.Scope) error { - return autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in, out, s) -} - -func autoConvert_v1_ConfigMap_To_api_ConfigMap(in *v1.ConfigMap, out *api.ConfigMap, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) - return nil -} - -// Convert_v1_ConfigMap_To_api_ConfigMap is an autogenerated conversion function. -func Convert_v1_ConfigMap_To_api_ConfigMap(in *v1.ConfigMap, out *api.ConfigMap, s conversion.Scope) error { - return autoConvert_v1_ConfigMap_To_api_ConfigMap(in, out, s) -} - -func autoConvert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *v1.ConfigMap, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) - return nil -} - -// Convert_api_ConfigMap_To_v1_ConfigMap is an autogenerated conversion function. -func Convert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *v1.ConfigMap, s conversion.Scope) error { - return autoConvert_api_ConfigMap_To_v1_ConfigMap(in, out, s) -} - -func autoConvert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in *v1.ConfigMapEnvSource, out *api.ConfigMapEnvSource, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource is an autogenerated conversion function. -func Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in *v1.ConfigMapEnvSource, out *api.ConfigMapEnvSource, s conversion.Scope) error { - return autoConvert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in, out, s) -} - -func autoConvert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *api.ConfigMapEnvSource, out *v1.ConfigMapEnvSource, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource is an autogenerated conversion function. -func Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *api.ConfigMapEnvSource, out *v1.ConfigMapEnvSource, s conversion.Scope) error { - return autoConvert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in, out, s) -} - -func autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector is an autogenerated conversion function. -func Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { - return autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in, out, s) -} - -func autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector is an autogenerated conversion function. -func Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { - return autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in, out, s) -} - -func autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in *v1.ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.ConfigMap)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_ConfigMapList_To_api_ConfigMapList is an autogenerated conversion function. -func Convert_v1_ConfigMapList_To_api_ConfigMapList(in *v1.ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { - return autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in, out, s) -} - -func autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *v1.ConfigMapList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.ConfigMap)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_ConfigMapList_To_v1_ConfigMapList is an autogenerated conversion function. -func Convert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *v1.ConfigMapList, s conversion.Scope) error { - return autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in, out, s) -} - -func autoConvert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in *v1.ConfigMapProjection, out *api.ConfigMapProjection, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection is an autogenerated conversion function. -func Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in *v1.ConfigMapProjection, out *api.ConfigMapProjection, s conversion.Scope) error { - return autoConvert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in, out, s) -} - -func autoConvert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in *api.ConfigMapProjection, out *v1.ConfigMapProjection, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection is an autogenerated conversion function. -func Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in *api.ConfigMapProjection, out *v1.ConfigMapProjection, s conversion.Scope) error { - return autoConvert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in, out, s) -} - -func autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource is an autogenerated conversion function. -func Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in, out, s) -} - -func autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource is an autogenerated conversion function. -func Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { - return autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in, out, s) -} - -func autoConvert_v1_Container_To_api_Container(in *v1.Container, out *api.Container, s conversion.Scope) error { - out.Name = in.Name - out.Image = in.Image - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) - out.WorkingDir = in.WorkingDir - out.Ports = *(*[]api.ContainerPort)(unsafe.Pointer(&in.Ports)) - out.EnvFrom = *(*[]api.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) - out.Env = *(*[]api.EnvVar)(unsafe.Pointer(&in.Env)) - if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - out.VolumeMounts = *(*[]api.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) - out.LivenessProbe = (*api.Probe)(unsafe.Pointer(in.LivenessProbe)) - out.ReadinessProbe = (*api.Probe)(unsafe.Pointer(in.ReadinessProbe)) - out.Lifecycle = (*api.Lifecycle)(unsafe.Pointer(in.Lifecycle)) - out.TerminationMessagePath = in.TerminationMessagePath - out.TerminationMessagePolicy = api.TerminationMessagePolicy(in.TerminationMessagePolicy) - out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy) - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(api.SecurityContext) - if err := Convert_v1_SecurityContext_To_api_SecurityContext(*in, *out, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY - return nil -} - -// Convert_v1_Container_To_api_Container is an autogenerated conversion function. -func Convert_v1_Container_To_api_Container(in *v1.Container, out *api.Container, s conversion.Scope) error { - return autoConvert_v1_Container_To_api_Container(in, out, s) -} - -func autoConvert_api_Container_To_v1_Container(in *api.Container, out *v1.Container, s conversion.Scope) error { - out.Name = in.Name - out.Image = in.Image - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) - out.WorkingDir = in.WorkingDir - out.Ports = *(*[]v1.ContainerPort)(unsafe.Pointer(&in.Ports)) - out.EnvFrom = *(*[]v1.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) - out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env)) - if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) - out.LivenessProbe = (*v1.Probe)(unsafe.Pointer(in.LivenessProbe)) - out.ReadinessProbe = (*v1.Probe)(unsafe.Pointer(in.ReadinessProbe)) - out.Lifecycle = (*v1.Lifecycle)(unsafe.Pointer(in.Lifecycle)) - out.TerminationMessagePath = in.TerminationMessagePath - out.TerminationMessagePolicy = v1.TerminationMessagePolicy(in.TerminationMessagePolicy) - out.ImagePullPolicy = v1.PullPolicy(in.ImagePullPolicy) - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(v1.SecurityContext) - if err := Convert_api_SecurityContext_To_v1_SecurityContext(*in, *out, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY - return nil -} - -// Convert_api_Container_To_v1_Container is an autogenerated conversion function. -func Convert_api_Container_To_v1_Container(in *api.Container, out *v1.Container, s conversion.Scope) error { - return autoConvert_api_Container_To_v1_Container(in, out, s) -} - -func autoConvert_v1_ContainerImage_To_api_ContainerImage(in *v1.ContainerImage, out *api.ContainerImage, s conversion.Scope) error { - out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) - out.SizeBytes = in.SizeBytes - return nil -} - -// Convert_v1_ContainerImage_To_api_ContainerImage is an autogenerated conversion function. -func Convert_v1_ContainerImage_To_api_ContainerImage(in *v1.ContainerImage, out *api.ContainerImage, s conversion.Scope) error { - return autoConvert_v1_ContainerImage_To_api_ContainerImage(in, out, s) -} - -func autoConvert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *v1.ContainerImage, s conversion.Scope) error { - out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) - out.SizeBytes = in.SizeBytes - return nil -} - -// Convert_api_ContainerImage_To_v1_ContainerImage is an autogenerated conversion function. -func Convert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *v1.ContainerImage, s conversion.Scope) error { - return autoConvert_api_ContainerImage_To_v1_ContainerImage(in, out, s) -} - -func autoConvert_v1_ContainerPort_To_api_ContainerPort(in *v1.ContainerPort, out *api.ContainerPort, s conversion.Scope) error { - out.Name = in.Name - out.HostPort = in.HostPort - out.ContainerPort = in.ContainerPort - out.Protocol = api.Protocol(in.Protocol) - out.HostIP = in.HostIP - return nil -} - -// Convert_v1_ContainerPort_To_api_ContainerPort is an autogenerated conversion function. -func Convert_v1_ContainerPort_To_api_ContainerPort(in *v1.ContainerPort, out *api.ContainerPort, s conversion.Scope) error { - return autoConvert_v1_ContainerPort_To_api_ContainerPort(in, out, s) -} - -func autoConvert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { - out.Name = in.Name - out.HostPort = in.HostPort - out.ContainerPort = in.ContainerPort - out.Protocol = v1.Protocol(in.Protocol) - out.HostIP = in.HostIP - return nil -} - -// Convert_api_ContainerPort_To_v1_ContainerPort is an autogenerated conversion function. -func Convert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { - return autoConvert_api_ContainerPort_To_v1_ContainerPort(in, out, s) -} - -func autoConvert_v1_ContainerState_To_api_ContainerState(in *v1.ContainerState, out *api.ContainerState, s conversion.Scope) error { - out.Waiting = (*api.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) - out.Running = (*api.ContainerStateRunning)(unsafe.Pointer(in.Running)) - out.Terminated = (*api.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) - return nil -} - -// Convert_v1_ContainerState_To_api_ContainerState is an autogenerated conversion function. -func Convert_v1_ContainerState_To_api_ContainerState(in *v1.ContainerState, out *api.ContainerState, s conversion.Scope) error { - return autoConvert_v1_ContainerState_To_api_ContainerState(in, out, s) -} - -func autoConvert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *v1.ContainerState, s conversion.Scope) error { - out.Waiting = (*v1.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) - out.Running = (*v1.ContainerStateRunning)(unsafe.Pointer(in.Running)) - out.Terminated = (*v1.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) - return nil -} - -// Convert_api_ContainerState_To_v1_ContainerState is an autogenerated conversion function. -func Convert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *v1.ContainerState, s conversion.Scope) error { - return autoConvert_api_ContainerState_To_v1_ContainerState(in, out, s) -} - -func autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *v1.ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { - out.StartedAt = in.StartedAt - return nil -} - -// Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning is an autogenerated conversion function. -func Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *v1.ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { - return autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in, out, s) -} - -func autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *v1.ContainerStateRunning, s conversion.Scope) error { - out.StartedAt = in.StartedAt - return nil -} - -// Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning is an autogenerated conversion function. -func Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *v1.ContainerStateRunning, s conversion.Scope) error { - return autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in, out, s) -} - -func autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *v1.ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { - out.ExitCode = in.ExitCode - out.Signal = in.Signal - out.Reason = in.Reason - out.Message = in.Message - out.StartedAt = in.StartedAt - out.FinishedAt = in.FinishedAt - out.ContainerID = in.ContainerID - return nil -} - -// Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated is an autogenerated conversion function. -func Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *v1.ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { - return autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in, out, s) -} - -func autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *v1.ContainerStateTerminated, s conversion.Scope) error { - out.ExitCode = in.ExitCode - out.Signal = in.Signal - out.Reason = in.Reason - out.Message = in.Message - out.StartedAt = in.StartedAt - out.FinishedAt = in.FinishedAt - out.ContainerID = in.ContainerID - return nil -} - -// Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated is an autogenerated conversion function. -func Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *v1.ContainerStateTerminated, s conversion.Scope) error { - return autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in, out, s) -} - -func autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *v1.ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting is an autogenerated conversion function. -func Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *v1.ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { - return autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in, out, s) -} - -func autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *v1.ContainerStateWaiting, s conversion.Scope) error { - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting is an autogenerated conversion function. -func Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *v1.ContainerStateWaiting, s conversion.Scope) error { - return autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in, out, s) -} - -func autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in *v1.ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_v1_ContainerState_To_api_ContainerState(&in.State, &out.State, s); err != nil { - return err - } - if err := Convert_v1_ContainerState_To_api_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { - return err - } - out.Ready = in.Ready - out.RestartCount = in.RestartCount - out.Image = in.Image - out.ImageID = in.ImageID - out.ContainerID = in.ContainerID - return nil -} - -// Convert_v1_ContainerStatus_To_api_ContainerStatus is an autogenerated conversion function. -func Convert_v1_ContainerStatus_To_api_ContainerStatus(in *v1.ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { - return autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in, out, s) -} - -func autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *v1.ContainerStatus, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_api_ContainerState_To_v1_ContainerState(&in.State, &out.State, s); err != nil { - return err - } - if err := Convert_api_ContainerState_To_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { - return err - } - out.Ready = in.Ready - out.RestartCount = in.RestartCount - out.Image = in.Image - out.ImageID = in.ImageID - out.ContainerID = in.ContainerID - return nil -} - -// Convert_api_ContainerStatus_To_v1_ContainerStatus is an autogenerated conversion function. -func Convert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *v1.ContainerStatus, s conversion.Scope) error { - return autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in, out, s) -} - -func autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *v1.DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { - out.Port = in.Port - return nil -} - -// Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint is an autogenerated conversion function. -func Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *v1.DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { - return autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in, out, s) -} - -func autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *v1.DaemonEndpoint, s conversion.Scope) error { - out.Port = in.Port - return nil -} - -// Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint is an autogenerated conversion function. -func Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *v1.DaemonEndpoint, s conversion.Scope) error { - return autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in, out, s) -} - -func autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in *v1.DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { - out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) - out.Preconditions = (*api.Preconditions)(unsafe.Pointer(in.Preconditions)) - out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) - out.PropagationPolicy = (*api.DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) - return nil -} - -// Convert_v1_DeleteOptions_To_api_DeleteOptions is an autogenerated conversion function. -func Convert_v1_DeleteOptions_To_api_DeleteOptions(in *v1.DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { - return autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in, out, s) -} - -func autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *v1.DeleteOptions, s conversion.Scope) error { - out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) - out.Preconditions = (*v1.Preconditions)(unsafe.Pointer(in.Preconditions)) - out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) - out.PropagationPolicy = (*v1.DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) - return nil -} - -// Convert_api_DeleteOptions_To_v1_DeleteOptions is an autogenerated conversion function. -func Convert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *v1.DeleteOptions, s conversion.Scope) error { - return autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in, out, s) -} - -func autoConvert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in *v1.DownwardAPIProjection, out *api.DownwardAPIProjection, s conversion.Scope) error { - out.Items = *(*[]api.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection is an autogenerated conversion function. -func Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in *v1.DownwardAPIProjection, out *api.DownwardAPIProjection, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in, out, s) -} - -func autoConvert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *api.DownwardAPIProjection, out *v1.DownwardAPIProjection, s conversion.Scope) error { - out.Items = *(*[]v1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection is an autogenerated conversion function. -func Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *api.DownwardAPIProjection, out *v1.DownwardAPIProjection, s conversion.Scope) error { - return autoConvert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in, out, s) -} - -func autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { - out.Path = in.Path - out.FieldRef = (*api.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) - out.ResourceFieldRef = (*api.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) - out.Mode = (*int32)(unsafe.Pointer(in.Mode)) - return nil -} - -// Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile is an autogenerated conversion function. -func Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in, out, s) -} - -func autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { - out.Path = in.Path - out.FieldRef = (*v1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) - out.ResourceFieldRef = (*v1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) - out.Mode = (*int32)(unsafe.Pointer(in.Mode)) - return nil -} - -// Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile is an autogenerated conversion function. -func Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { - return autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in, out, s) -} - -func autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { - out.Items = *(*[]api.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - return nil -} - -// Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource is an autogenerated conversion function. -func Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in, out, s) -} - -func autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { - out.Items = *(*[]v1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - return nil -} - -// Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource is an autogenerated conversion function. -func Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { - return autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in, out, s) -} - -func autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { - out.Medium = api.StorageMedium(in.Medium) - out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) - return nil -} - -// Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource is an autogenerated conversion function. -func Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { - return autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in, out, s) -} - -func autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { - out.Medium = v1.StorageMedium(in.Medium) - out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) - return nil -} - -// Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource is an autogenerated conversion function. -func Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { - return autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in, out, s) -} - -func autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in *v1.EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) - out.TargetRef = (*api.ObjectReference)(unsafe.Pointer(in.TargetRef)) - return nil -} - -// Convert_v1_EndpointAddress_To_api_EndpointAddress is an autogenerated conversion function. -func Convert_v1_EndpointAddress_To_api_EndpointAddress(in *v1.EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { - return autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in, out, s) -} - -func autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *v1.EndpointAddress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) - out.TargetRef = (*v1.ObjectReference)(unsafe.Pointer(in.TargetRef)) - return nil -} - -// Convert_api_EndpointAddress_To_v1_EndpointAddress is an autogenerated conversion function. -func Convert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *v1.EndpointAddress, s conversion.Scope) error { - return autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in, out, s) -} - -func autoConvert_v1_EndpointPort_To_api_EndpointPort(in *v1.EndpointPort, out *api.EndpointPort, s conversion.Scope) error { - out.Name = in.Name - out.Port = in.Port - out.Protocol = api.Protocol(in.Protocol) - return nil -} - -// Convert_v1_EndpointPort_To_api_EndpointPort is an autogenerated conversion function. -func Convert_v1_EndpointPort_To_api_EndpointPort(in *v1.EndpointPort, out *api.EndpointPort, s conversion.Scope) error { - return autoConvert_v1_EndpointPort_To_api_EndpointPort(in, out, s) -} - -func autoConvert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *v1.EndpointPort, s conversion.Scope) error { - out.Name = in.Name - out.Port = in.Port - out.Protocol = v1.Protocol(in.Protocol) - return nil -} - -// Convert_api_EndpointPort_To_v1_EndpointPort is an autogenerated conversion function. -func Convert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *v1.EndpointPort, s conversion.Scope) error { - return autoConvert_api_EndpointPort_To_v1_EndpointPort(in, out, s) -} - -func autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in *v1.EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { - out.Addresses = *(*[]api.EndpointAddress)(unsafe.Pointer(&in.Addresses)) - out.NotReadyAddresses = *(*[]api.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) - out.Ports = *(*[]api.EndpointPort)(unsafe.Pointer(&in.Ports)) - return nil -} - -// Convert_v1_EndpointSubset_To_api_EndpointSubset is an autogenerated conversion function. -func Convert_v1_EndpointSubset_To_api_EndpointSubset(in *v1.EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { - return autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in, out, s) -} - -func autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *v1.EndpointSubset, s conversion.Scope) error { - out.Addresses = *(*[]v1.EndpointAddress)(unsafe.Pointer(&in.Addresses)) - out.NotReadyAddresses = *(*[]v1.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) - out.Ports = *(*[]v1.EndpointPort)(unsafe.Pointer(&in.Ports)) - return nil -} - -// Convert_api_EndpointSubset_To_v1_EndpointSubset is an autogenerated conversion function. -func Convert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *v1.EndpointSubset, s conversion.Scope) error { - return autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in, out, s) -} - -func autoConvert_v1_Endpoints_To_api_Endpoints(in *v1.Endpoints, out *api.Endpoints, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Subsets = *(*[]api.EndpointSubset)(unsafe.Pointer(&in.Subsets)) - return nil -} - -// Convert_v1_Endpoints_To_api_Endpoints is an autogenerated conversion function. -func Convert_v1_Endpoints_To_api_Endpoints(in *v1.Endpoints, out *api.Endpoints, s conversion.Scope) error { - return autoConvert_v1_Endpoints_To_api_Endpoints(in, out, s) -} - -func autoConvert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *v1.Endpoints, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Subsets = *(*[]v1.EndpointSubset)(unsafe.Pointer(&in.Subsets)) - return nil -} - -// Convert_api_Endpoints_To_v1_Endpoints is an autogenerated conversion function. -func Convert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *v1.Endpoints, s conversion.Scope) error { - return autoConvert_api_Endpoints_To_v1_Endpoints(in, out, s) -} - -func autoConvert_v1_EndpointsList_To_api_EndpointsList(in *v1.EndpointsList, out *api.EndpointsList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.Endpoints)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_EndpointsList_To_api_EndpointsList is an autogenerated conversion function. -func Convert_v1_EndpointsList_To_api_EndpointsList(in *v1.EndpointsList, out *api.EndpointsList, s conversion.Scope) error { - return autoConvert_v1_EndpointsList_To_api_EndpointsList(in, out, s) -} - -func autoConvert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *v1.EndpointsList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.Endpoints)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_EndpointsList_To_v1_EndpointsList is an autogenerated conversion function. -func Convert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *v1.EndpointsList, s conversion.Scope) error { - return autoConvert_api_EndpointsList_To_v1_EndpointsList(in, out, s) -} - -func autoConvert_v1_EnvFromSource_To_api_EnvFromSource(in *v1.EnvFromSource, out *api.EnvFromSource, s conversion.Scope) error { - out.Prefix = in.Prefix - out.ConfigMapRef = (*api.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) - out.SecretRef = (*api.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_v1_EnvFromSource_To_api_EnvFromSource is an autogenerated conversion function. -func Convert_v1_EnvFromSource_To_api_EnvFromSource(in *v1.EnvFromSource, out *api.EnvFromSource, s conversion.Scope) error { - return autoConvert_v1_EnvFromSource_To_api_EnvFromSource(in, out, s) -} - -func autoConvert_api_EnvFromSource_To_v1_EnvFromSource(in *api.EnvFromSource, out *v1.EnvFromSource, s conversion.Scope) error { - out.Prefix = in.Prefix - out.ConfigMapRef = (*v1.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) - out.SecretRef = (*v1.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_api_EnvFromSource_To_v1_EnvFromSource is an autogenerated conversion function. -func Convert_api_EnvFromSource_To_v1_EnvFromSource(in *api.EnvFromSource, out *v1.EnvFromSource, s conversion.Scope) error { - return autoConvert_api_EnvFromSource_To_v1_EnvFromSource(in, out, s) -} - -func autoConvert_v1_EnvVar_To_api_EnvVar(in *v1.EnvVar, out *api.EnvVar, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - out.ValueFrom = (*api.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) - return nil -} - -// Convert_v1_EnvVar_To_api_EnvVar is an autogenerated conversion function. -func Convert_v1_EnvVar_To_api_EnvVar(in *v1.EnvVar, out *api.EnvVar, s conversion.Scope) error { - return autoConvert_v1_EnvVar_To_api_EnvVar(in, out, s) -} - -func autoConvert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *v1.EnvVar, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - out.ValueFrom = (*v1.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) - return nil -} - -// Convert_api_EnvVar_To_v1_EnvVar is an autogenerated conversion function. -func Convert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *v1.EnvVar, s conversion.Scope) error { - return autoConvert_api_EnvVar_To_v1_EnvVar(in, out, s) -} - -func autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in *v1.EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { - out.FieldRef = (*api.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) - out.ResourceFieldRef = (*api.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) - out.ConfigMapKeyRef = (*api.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) - out.SecretKeyRef = (*api.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) - return nil -} - -// Convert_v1_EnvVarSource_To_api_EnvVarSource is an autogenerated conversion function. -func Convert_v1_EnvVarSource_To_api_EnvVarSource(in *v1.EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { - return autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in, out, s) -} - -func autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { - out.FieldRef = (*v1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) - out.ResourceFieldRef = (*v1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) - out.ConfigMapKeyRef = (*v1.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) - out.SecretKeyRef = (*v1.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) - return nil -} - -// Convert_api_EnvVarSource_To_v1_EnvVarSource is an autogenerated conversion function. -func Convert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { - return autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in, out, s) -} - -func autoConvert_v1_Event_To_api_Event(in *v1.Event, out *api.Event, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - if err := Convert_v1_EventSource_To_api_EventSource(&in.Source, &out.Source, s); err != nil { - return err - } - out.FirstTimestamp = in.FirstTimestamp - out.LastTimestamp = in.LastTimestamp - out.Count = in.Count - out.Type = in.Type - return nil -} - -// Convert_v1_Event_To_api_Event is an autogenerated conversion function. -func Convert_v1_Event_To_api_Event(in *v1.Event, out *api.Event, s conversion.Scope) error { - return autoConvert_v1_Event_To_api_Event(in, out, s) -} - -func autoConvert_api_Event_To_v1_Event(in *api.Event, out *v1.Event, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - if err := Convert_api_EventSource_To_v1_EventSource(&in.Source, &out.Source, s); err != nil { - return err - } - out.FirstTimestamp = in.FirstTimestamp - out.LastTimestamp = in.LastTimestamp - out.Count = in.Count - out.Type = in.Type - return nil -} - -// Convert_api_Event_To_v1_Event is an autogenerated conversion function. -func Convert_api_Event_To_v1_Event(in *api.Event, out *v1.Event, s conversion.Scope) error { - return autoConvert_api_Event_To_v1_Event(in, out, s) -} - -func autoConvert_v1_EventList_To_api_EventList(in *v1.EventList, out *api.EventList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.Event)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_EventList_To_api_EventList is an autogenerated conversion function. -func Convert_v1_EventList_To_api_EventList(in *v1.EventList, out *api.EventList, s conversion.Scope) error { - return autoConvert_v1_EventList_To_api_EventList(in, out, s) -} - -func autoConvert_api_EventList_To_v1_EventList(in *api.EventList, out *v1.EventList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.Event)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_EventList_To_v1_EventList is an autogenerated conversion function. -func Convert_api_EventList_To_v1_EventList(in *api.EventList, out *v1.EventList, s conversion.Scope) error { - return autoConvert_api_EventList_To_v1_EventList(in, out, s) -} - -func autoConvert_v1_EventSource_To_api_EventSource(in *v1.EventSource, out *api.EventSource, s conversion.Scope) error { - out.Component = in.Component - out.Host = in.Host - return nil -} - -// Convert_v1_EventSource_To_api_EventSource is an autogenerated conversion function. -func Convert_v1_EventSource_To_api_EventSource(in *v1.EventSource, out *api.EventSource, s conversion.Scope) error { - return autoConvert_v1_EventSource_To_api_EventSource(in, out, s) -} - -func autoConvert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *v1.EventSource, s conversion.Scope) error { - out.Component = in.Component - out.Host = in.Host - return nil -} - -// Convert_api_EventSource_To_v1_EventSource is an autogenerated conversion function. -func Convert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *v1.EventSource, s conversion.Scope) error { - return autoConvert_api_EventSource_To_v1_EventSource(in, out, s) -} - -func autoConvert_v1_ExecAction_To_api_ExecAction(in *v1.ExecAction, out *api.ExecAction, s conversion.Scope) error { - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - return nil -} - -// Convert_v1_ExecAction_To_api_ExecAction is an autogenerated conversion function. -func Convert_v1_ExecAction_To_api_ExecAction(in *v1.ExecAction, out *api.ExecAction, s conversion.Scope) error { - return autoConvert_v1_ExecAction_To_api_ExecAction(in, out, s) -} - -func autoConvert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *v1.ExecAction, s conversion.Scope) error { - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - return nil -} - -// Convert_api_ExecAction_To_v1_ExecAction is an autogenerated conversion function. -func Convert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *v1.ExecAction, s conversion.Scope) error { - return autoConvert_api_ExecAction_To_v1_ExecAction(in, out, s) -} - -func autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in *v1.FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { - out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) - out.Lun = (*int32)(unsafe.Pointer(in.Lun)) - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.WWIDs = *(*[]string)(unsafe.Pointer(&in.WWIDs)) - return nil -} - -// Convert_v1_FCVolumeSource_To_api_FCVolumeSource is an autogenerated conversion function. -func Convert_v1_FCVolumeSource_To_api_FCVolumeSource(in *v1.FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in, out, s) -} - -func autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { - out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) - out.Lun = (*int32)(unsafe.Pointer(in.Lun)) - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.WWIDs = *(*[]string)(unsafe.Pointer(&in.WWIDs)) - return nil -} - -// Convert_api_FCVolumeSource_To_v1_FCVolumeSource is an autogenerated conversion function. -func Convert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { - return autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in, out, s) -} - -func autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *v1.FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { - out.Driver = in.Driver - out.FSType = in.FSType - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) - return nil -} - -// Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource is an autogenerated conversion function. -func Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *v1.FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in, out, s) -} - -func autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { - out.Driver = in.Driver - out.FSType = in.FSType - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) - return nil -} - -// Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource is an autogenerated conversion function. -func Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { - return autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in, out, s) -} - -func autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { - out.DatasetName = in.DatasetName - out.DatasetUUID = in.DatasetUUID - return nil -} - -// Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource is an autogenerated conversion function. -func Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in, out, s) -} - -func autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { - out.DatasetName = in.DatasetName - out.DatasetUUID = in.DatasetUUID - return nil -} - -// Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource is an autogenerated conversion function. -func Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { - return autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in, out, s) -} - -func autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource is an autogenerated conversion function. -func Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource is an autogenerated conversion function. -func Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory - return nil -} - -// Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource is an autogenerated conversion function. -func Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in, out, s) -} - -func autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory - return nil -} - -// Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource is an autogenerated conversion function. -func Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { - return autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s) -} - -func autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource is an autogenerated conversion function. -func Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in, out, s) -} - -func autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource is an autogenerated conversion function. -func Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { - return autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in, out, s) -} - -func autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in *v1.HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { - out.Path = in.Path - out.Port = in.Port - out.Host = in.Host - out.Scheme = api.URIScheme(in.Scheme) - out.HTTPHeaders = *(*[]api.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) - return nil -} - -// Convert_v1_HTTPGetAction_To_api_HTTPGetAction is an autogenerated conversion function. -func Convert_v1_HTTPGetAction_To_api_HTTPGetAction(in *v1.HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { - return autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in, out, s) -} - -func autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { - out.Path = in.Path - out.Port = in.Port - out.Host = in.Host - out.Scheme = v1.URIScheme(in.Scheme) - out.HTTPHeaders = *(*[]v1.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) - return nil -} - -// Convert_api_HTTPGetAction_To_v1_HTTPGetAction is an autogenerated conversion function. -func Convert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { - return autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in, out, s) -} - -func autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in *v1.HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -// Convert_v1_HTTPHeader_To_api_HTTPHeader is an autogenerated conversion function. -func Convert_v1_HTTPHeader_To_api_HTTPHeader(in *v1.HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { - return autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in, out, s) -} - -func autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -// Convert_api_HTTPHeader_To_v1_HTTPHeader is an autogenerated conversion function. -func Convert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { - return autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in, out, s) -} - -func autoConvert_v1_Handler_To_api_Handler(in *v1.Handler, out *api.Handler, s conversion.Scope) error { - out.Exec = (*api.ExecAction)(unsafe.Pointer(in.Exec)) - out.HTTPGet = (*api.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) - out.TCPSocket = (*api.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) - return nil -} - -// Convert_v1_Handler_To_api_Handler is an autogenerated conversion function. -func Convert_v1_Handler_To_api_Handler(in *v1.Handler, out *api.Handler, s conversion.Scope) error { - return autoConvert_v1_Handler_To_api_Handler(in, out, s) -} - -func autoConvert_api_Handler_To_v1_Handler(in *api.Handler, out *v1.Handler, s conversion.Scope) error { - out.Exec = (*v1.ExecAction)(unsafe.Pointer(in.Exec)) - out.HTTPGet = (*v1.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) - out.TCPSocket = (*v1.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) - return nil -} - -// Convert_api_Handler_To_v1_Handler is an autogenerated conversion function. -func Convert_api_Handler_To_v1_Handler(in *api.Handler, out *v1.Handler, s conversion.Scope) error { - return autoConvert_api_Handler_To_v1_Handler(in, out, s) -} - -func autoConvert_v1_HostAlias_To_api_HostAlias(in *v1.HostAlias, out *api.HostAlias, s conversion.Scope) error { - out.IP = in.IP - out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames)) - return nil -} - -// Convert_v1_HostAlias_To_api_HostAlias is an autogenerated conversion function. -func Convert_v1_HostAlias_To_api_HostAlias(in *v1.HostAlias, out *api.HostAlias, s conversion.Scope) error { - return autoConvert_v1_HostAlias_To_api_HostAlias(in, out, s) -} - -func autoConvert_api_HostAlias_To_v1_HostAlias(in *api.HostAlias, out *v1.HostAlias, s conversion.Scope) error { - out.IP = in.IP - out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames)) - return nil -} - -// Convert_api_HostAlias_To_v1_HostAlias is an autogenerated conversion function. -func Convert_api_HostAlias_To_v1_HostAlias(in *api.HostAlias, out *v1.HostAlias, s conversion.Scope) error { - return autoConvert_api_HostAlias_To_v1_HostAlias(in, out, s) -} - -func autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { - out.Path = in.Path - out.Type = (*api.HostPathType)(unsafe.Pointer(in.Type)) - return nil -} - -// Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource is an autogenerated conversion function. -func Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { - return autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in, out, s) -} - -func autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { - out.Path = in.Path - out.Type = (*v1.HostPathType)(unsafe.Pointer(in.Type)) - return nil -} - -// Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource is an autogenerated conversion function. -func Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { - return autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in, out, s) -} - -func autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = in.Lun - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) - out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth - out.SessionCHAPAuth = in.SessionCHAPAuth - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) - return nil -} - -// Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource is an autogenerated conversion function. -func Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in, out, s) -} - -func autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = in.Lun - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) - out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth - out.SessionCHAPAuth = in.SessionCHAPAuth - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) - return nil -} - -// Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource is an autogenerated conversion function. -func Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { - return autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s) -} - -func autoConvert_v1_KeyToPath_To_api_KeyToPath(in *v1.KeyToPath, out *api.KeyToPath, s conversion.Scope) error { - out.Key = in.Key - out.Path = in.Path - out.Mode = (*int32)(unsafe.Pointer(in.Mode)) - return nil -} - -// Convert_v1_KeyToPath_To_api_KeyToPath is an autogenerated conversion function. -func Convert_v1_KeyToPath_To_api_KeyToPath(in *v1.KeyToPath, out *api.KeyToPath, s conversion.Scope) error { - return autoConvert_v1_KeyToPath_To_api_KeyToPath(in, out, s) -} - -func autoConvert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { - out.Key = in.Key - out.Path = in.Path - out.Mode = (*int32)(unsafe.Pointer(in.Mode)) - return nil -} - -// Convert_api_KeyToPath_To_v1_KeyToPath is an autogenerated conversion function. -func Convert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { - return autoConvert_api_KeyToPath_To_v1_KeyToPath(in, out, s) -} - -func autoConvert_v1_Lifecycle_To_api_Lifecycle(in *v1.Lifecycle, out *api.Lifecycle, s conversion.Scope) error { - out.PostStart = (*api.Handler)(unsafe.Pointer(in.PostStart)) - out.PreStop = (*api.Handler)(unsafe.Pointer(in.PreStop)) - return nil -} - -// Convert_v1_Lifecycle_To_api_Lifecycle is an autogenerated conversion function. -func Convert_v1_Lifecycle_To_api_Lifecycle(in *v1.Lifecycle, out *api.Lifecycle, s conversion.Scope) error { - return autoConvert_v1_Lifecycle_To_api_Lifecycle(in, out, s) -} - -func autoConvert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { - out.PostStart = (*v1.Handler)(unsafe.Pointer(in.PostStart)) - out.PreStop = (*v1.Handler)(unsafe.Pointer(in.PreStop)) - return nil -} - -// Convert_api_Lifecycle_To_v1_Lifecycle is an autogenerated conversion function. -func Convert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { - return autoConvert_api_Lifecycle_To_v1_Lifecycle(in, out, s) -} - -func autoConvert_v1_LimitRange_To_api_LimitRange(in *v1.LimitRange, out *api.LimitRange, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_v1_LimitRange_To_api_LimitRange is an autogenerated conversion function. -func Convert_v1_LimitRange_To_api_LimitRange(in *v1.LimitRange, out *api.LimitRange, s conversion.Scope) error { - return autoConvert_v1_LimitRange_To_api_LimitRange(in, out, s) -} - -func autoConvert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *v1.LimitRange, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_api_LimitRange_To_v1_LimitRange is an autogenerated conversion function. -func Convert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *v1.LimitRange, s conversion.Scope) error { - return autoConvert_api_LimitRange_To_v1_LimitRange(in, out, s) -} - -func autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in *v1.LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { - out.Type = api.LimitType(in.Type) - out.Max = *(*api.ResourceList)(unsafe.Pointer(&in.Max)) - out.Min = *(*api.ResourceList)(unsafe.Pointer(&in.Min)) - out.Default = *(*api.ResourceList)(unsafe.Pointer(&in.Default)) - out.DefaultRequest = *(*api.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) - out.MaxLimitRequestRatio = *(*api.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) - return nil -} - -// Convert_v1_LimitRangeItem_To_api_LimitRangeItem is an autogenerated conversion function. -func Convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *v1.LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { - return autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in, out, s) -} - -func autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *v1.LimitRangeItem, s conversion.Scope) error { - out.Type = v1.LimitType(in.Type) - out.Max = *(*v1.ResourceList)(unsafe.Pointer(&in.Max)) - out.Min = *(*v1.ResourceList)(unsafe.Pointer(&in.Min)) - out.Default = *(*v1.ResourceList)(unsafe.Pointer(&in.Default)) - out.DefaultRequest = *(*v1.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) - out.MaxLimitRequestRatio = *(*v1.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) - return nil -} - -// Convert_api_LimitRangeItem_To_v1_LimitRangeItem is an autogenerated conversion function. -func Convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *v1.LimitRangeItem, s conversion.Scope) error { - return autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in, out, s) -} - -func autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in *v1.LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.LimitRange)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_LimitRangeList_To_api_LimitRangeList is an autogenerated conversion function. -func Convert_v1_LimitRangeList_To_api_LimitRangeList(in *v1.LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { - return autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in, out, s) -} - -func autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *v1.LimitRangeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.LimitRange)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_LimitRangeList_To_v1_LimitRangeList is an autogenerated conversion function. -func Convert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *v1.LimitRangeList, s conversion.Scope) error { - return autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in, out, s) -} - -func autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *v1.LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { - out.Limits = *(*[]api.LimitRangeItem)(unsafe.Pointer(&in.Limits)) - return nil -} - -// Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec is an autogenerated conversion function. -func Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *v1.LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { - return autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in, out, s) -} - -func autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *v1.LimitRangeSpec, s conversion.Scope) error { - out.Limits = *(*[]v1.LimitRangeItem)(unsafe.Pointer(&in.Limits)) - return nil -} - -// Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec is an autogenerated conversion function. -func Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *v1.LimitRangeSpec, s conversion.Scope) error { - return autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in, out, s) -} - -func autoConvert_v1_List_To_api_List(in *v1.List, out *api.List, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]runtime.Object, len(*in)) - for i := range *in { - if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_List_To_api_List is an autogenerated conversion function. -func Convert_v1_List_To_api_List(in *v1.List, out *api.List, s conversion.Scope) error { - return autoConvert_v1_List_To_api_List(in, out, s) -} - -func autoConvert_api_List_To_v1_List(in *api.List, out *v1.List, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]runtime.RawExtension, len(*in)) - for i := range *in { - if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_List_To_v1_List is an autogenerated conversion function. -func Convert_api_List_To_v1_List(in *api.List, out *v1.List, s conversion.Scope) error { - return autoConvert_api_List_To_v1_List(in, out, s) -} - -func autoConvert_v1_ListOptions_To_api_ListOptions(in *v1.ListOptions, out *api.ListOptions, s conversion.Scope) error { - if err := meta_v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { - return err - } - if err := meta_v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { - return err - } - out.IncludeUninitialized = in.IncludeUninitialized - out.Watch = in.Watch - out.ResourceVersion = in.ResourceVersion - out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) - return nil -} - -// Convert_v1_ListOptions_To_api_ListOptions is an autogenerated conversion function. -func Convert_v1_ListOptions_To_api_ListOptions(in *v1.ListOptions, out *api.ListOptions, s conversion.Scope) error { - return autoConvert_v1_ListOptions_To_api_ListOptions(in, out, s) -} - -func autoConvert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *v1.ListOptions, s conversion.Scope) error { - if err := meta_v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { - return err - } - if err := meta_v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { - return err - } - out.IncludeUninitialized = in.IncludeUninitialized - out.Watch = in.Watch - out.ResourceVersion = in.ResourceVersion - out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) - return nil -} - -// Convert_api_ListOptions_To_v1_ListOptions is an autogenerated conversion function. -func Convert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *v1.ListOptions, s conversion.Scope) error { - return autoConvert_api_ListOptions_To_v1_ListOptions(in, out, s) -} - -func autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - return nil -} - -// Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress is an autogenerated conversion function. -func Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { - return autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in, out, s) -} - -func autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - return nil -} - -// Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress is an autogenerated conversion function. -func Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error { - return autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in, out, s) -} - -func autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { - out.Ingress = *(*[]api.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) - return nil -} - -// Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus is an autogenerated conversion function. -func Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { - return autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in, out, s) -} - -func autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { - out.Ingress = *(*[]v1.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) - return nil -} - -// Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus is an autogenerated conversion function. -func Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { - return autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s) -} - -func autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in *v1.LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { - out.Name = in.Name - return nil -} - -// Convert_v1_LocalObjectReference_To_api_LocalObjectReference is an autogenerated conversion function. -func Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in *v1.LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { - return autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in, out, s) -} - -func autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { - out.Name = in.Name - return nil -} - -// Convert_api_LocalObjectReference_To_v1_LocalObjectReference is an autogenerated conversion function. -func Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { - return autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in, out, s) -} - -func autoConvert_v1_LocalVolumeSource_To_api_LocalVolumeSource(in *v1.LocalVolumeSource, out *api.LocalVolumeSource, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_v1_LocalVolumeSource_To_api_LocalVolumeSource is an autogenerated conversion function. -func Convert_v1_LocalVolumeSource_To_api_LocalVolumeSource(in *v1.LocalVolumeSource, out *api.LocalVolumeSource, s conversion.Scope) error { - return autoConvert_v1_LocalVolumeSource_To_api_LocalVolumeSource(in, out, s) -} - -func autoConvert_api_LocalVolumeSource_To_v1_LocalVolumeSource(in *api.LocalVolumeSource, out *v1.LocalVolumeSource, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_api_LocalVolumeSource_To_v1_LocalVolumeSource is an autogenerated conversion function. -func Convert_api_LocalVolumeSource_To_v1_LocalVolumeSource(in *api.LocalVolumeSource, out *v1.LocalVolumeSource, s conversion.Scope) error { - return autoConvert_api_LocalVolumeSource_To_v1_LocalVolumeSource(in, out, s) -} - -func autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *v1.NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource is an autogenerated conversion function. -func Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *v1.NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { - return autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in, out, s) -} - -func autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource is an autogenerated conversion function. -func Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { - return autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in, out, s) -} - -func autoConvert_v1_Namespace_To_api_Namespace(in *v1.Namespace, out *api.Namespace, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_NamespaceSpec_To_api_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_NamespaceStatus_To_api_NamespaceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Namespace_To_api_Namespace is an autogenerated conversion function. -func Convert_v1_Namespace_To_api_Namespace(in *v1.Namespace, out *api.Namespace, s conversion.Scope) error { - return autoConvert_v1_Namespace_To_api_Namespace(in, out, s) -} - -func autoConvert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *v1.Namespace, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_NamespaceSpec_To_v1_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_NamespaceStatus_To_v1_NamespaceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_Namespace_To_v1_Namespace is an autogenerated conversion function. -func Convert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *v1.Namespace, s conversion.Scope) error { - return autoConvert_api_Namespace_To_v1_Namespace(in, out, s) -} - -func autoConvert_v1_NamespaceList_To_api_NamespaceList(in *v1.NamespaceList, out *api.NamespaceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.Namespace)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_NamespaceList_To_api_NamespaceList is an autogenerated conversion function. -func Convert_v1_NamespaceList_To_api_NamespaceList(in *v1.NamespaceList, out *api.NamespaceList, s conversion.Scope) error { - return autoConvert_v1_NamespaceList_To_api_NamespaceList(in, out, s) -} - -func autoConvert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *v1.NamespaceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.Namespace)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_NamespaceList_To_v1_NamespaceList is an autogenerated conversion function. -func Convert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *v1.NamespaceList, s conversion.Scope) error { - return autoConvert_api_NamespaceList_To_v1_NamespaceList(in, out, s) -} - -func autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in *v1.NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { - out.Finalizers = *(*[]api.FinalizerName)(unsafe.Pointer(&in.Finalizers)) - return nil -} - -// Convert_v1_NamespaceSpec_To_api_NamespaceSpec is an autogenerated conversion function. -func Convert_v1_NamespaceSpec_To_api_NamespaceSpec(in *v1.NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { - return autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in, out, s) -} - -func autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *v1.NamespaceSpec, s conversion.Scope) error { - out.Finalizers = *(*[]v1.FinalizerName)(unsafe.Pointer(&in.Finalizers)) - return nil -} - -// Convert_api_NamespaceSpec_To_v1_NamespaceSpec is an autogenerated conversion function. -func Convert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *v1.NamespaceSpec, s conversion.Scope) error { - return autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in, out, s) -} - -func autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in *v1.NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { - out.Phase = api.NamespacePhase(in.Phase) - return nil -} - -// Convert_v1_NamespaceStatus_To_api_NamespaceStatus is an autogenerated conversion function. -func Convert_v1_NamespaceStatus_To_api_NamespaceStatus(in *v1.NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { - return autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in, out, s) -} - -func autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *v1.NamespaceStatus, s conversion.Scope) error { - out.Phase = v1.NamespacePhase(in.Phase) - return nil -} - -// Convert_api_NamespaceStatus_To_v1_NamespaceStatus is an autogenerated conversion function. -func Convert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *v1.NamespaceStatus, s conversion.Scope) error { - return autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in, out, s) -} - -func autoConvert_v1_Node_To_api_Node(in *v1.Node, out *api.Node, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_NodeSpec_To_api_NodeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_NodeStatus_To_api_NodeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Node_To_api_Node is an autogenerated conversion function. -func Convert_v1_Node_To_api_Node(in *v1.Node, out *api.Node, s conversion.Scope) error { - return autoConvert_v1_Node_To_api_Node(in, out, s) -} - -func autoConvert_api_Node_To_v1_Node(in *api.Node, out *v1.Node, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_NodeSpec_To_v1_NodeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_NodeStatus_To_v1_NodeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_Node_To_v1_Node is an autogenerated conversion function. -func Convert_api_Node_To_v1_Node(in *api.Node, out *v1.Node, s conversion.Scope) error { - return autoConvert_api_Node_To_v1_Node(in, out, s) -} - -func autoConvert_v1_NodeAddress_To_api_NodeAddress(in *v1.NodeAddress, out *api.NodeAddress, s conversion.Scope) error { - out.Type = api.NodeAddressType(in.Type) - out.Address = in.Address - return nil -} - -// Convert_v1_NodeAddress_To_api_NodeAddress is an autogenerated conversion function. -func Convert_v1_NodeAddress_To_api_NodeAddress(in *v1.NodeAddress, out *api.NodeAddress, s conversion.Scope) error { - return autoConvert_v1_NodeAddress_To_api_NodeAddress(in, out, s) -} - -func autoConvert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *v1.NodeAddress, s conversion.Scope) error { - out.Type = v1.NodeAddressType(in.Type) - out.Address = in.Address - return nil -} - -// Convert_api_NodeAddress_To_v1_NodeAddress is an autogenerated conversion function. -func Convert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *v1.NodeAddress, s conversion.Scope) error { - return autoConvert_api_NodeAddress_To_v1_NodeAddress(in, out, s) -} - -func autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in *v1.NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = (*api.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -// Convert_v1_NodeAffinity_To_api_NodeAffinity is an autogenerated conversion function. -func Convert_v1_NodeAffinity_To_api_NodeAffinity(in *v1.NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error { - return autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in, out, s) -} - -func autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *v1.NodeAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = (*v1.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -// Convert_api_NodeAffinity_To_v1_NodeAffinity is an autogenerated conversion function. -func Convert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *v1.NodeAffinity, s conversion.Scope) error { - return autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in, out, s) -} - -func autoConvert_v1_NodeCondition_To_api_NodeCondition(in *v1.NodeCondition, out *api.NodeCondition, s conversion.Scope) error { - out.Type = api.NodeConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastHeartbeatTime = in.LastHeartbeatTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_NodeCondition_To_api_NodeCondition is an autogenerated conversion function. -func Convert_v1_NodeCondition_To_api_NodeCondition(in *v1.NodeCondition, out *api.NodeCondition, s conversion.Scope) error { - return autoConvert_v1_NodeCondition_To_api_NodeCondition(in, out, s) -} - -func autoConvert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *v1.NodeCondition, s conversion.Scope) error { - out.Type = v1.NodeConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.LastHeartbeatTime = in.LastHeartbeatTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_api_NodeCondition_To_v1_NodeCondition is an autogenerated conversion function. -func Convert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *v1.NodeCondition, s conversion.Scope) error { - return autoConvert_api_NodeCondition_To_v1_NodeCondition(in, out, s) -} - -func autoConvert_v1_NodeConfigSource_To_api_NodeConfigSource(in *v1.NodeConfigSource, out *api.NodeConfigSource, s conversion.Scope) error { - out.ConfigMapRef = (*api.ObjectReference)(unsafe.Pointer(in.ConfigMapRef)) - return nil -} - -// Convert_v1_NodeConfigSource_To_api_NodeConfigSource is an autogenerated conversion function. -func Convert_v1_NodeConfigSource_To_api_NodeConfigSource(in *v1.NodeConfigSource, out *api.NodeConfigSource, s conversion.Scope) error { - return autoConvert_v1_NodeConfigSource_To_api_NodeConfigSource(in, out, s) -} - -func autoConvert_api_NodeConfigSource_To_v1_NodeConfigSource(in *api.NodeConfigSource, out *v1.NodeConfigSource, s conversion.Scope) error { - out.ConfigMapRef = (*v1.ObjectReference)(unsafe.Pointer(in.ConfigMapRef)) - return nil -} - -// Convert_api_NodeConfigSource_To_v1_NodeConfigSource is an autogenerated conversion function. -func Convert_api_NodeConfigSource_To_v1_NodeConfigSource(in *api.NodeConfigSource, out *v1.NodeConfigSource, s conversion.Scope) error { - return autoConvert_api_NodeConfigSource_To_v1_NodeConfigSource(in, out, s) -} - -func autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *v1.NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { - if err := Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { - return err - } - return nil -} - -// Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints is an autogenerated conversion function. -func Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *v1.NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { - return autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in, out, s) -} - -func autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *v1.NodeDaemonEndpoints, s conversion.Scope) error { - if err := Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { - return err - } - return nil -} - -// Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints is an autogenerated conversion function. -func Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *v1.NodeDaemonEndpoints, s conversion.Scope) error { - return autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in, out, s) -} - -func autoConvert_v1_NodeList_To_api_NodeList(in *v1.NodeList, out *api.NodeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.Node)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_NodeList_To_api_NodeList is an autogenerated conversion function. -func Convert_v1_NodeList_To_api_NodeList(in *v1.NodeList, out *api.NodeList, s conversion.Scope) error { - return autoConvert_v1_NodeList_To_api_NodeList(in, out, s) -} - -func autoConvert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *v1.NodeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.Node)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_NodeList_To_v1_NodeList is an autogenerated conversion function. -func Convert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *v1.NodeList, s conversion.Scope) error { - return autoConvert_api_NodeList_To_v1_NodeList(in, out, s) -} - -func autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *v1.NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions is an autogenerated conversion function. -func Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *v1.NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { - return autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in, out, s) -} - -func autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOptions, out *v1.NodeProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions is an autogenerated conversion function. -func Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOptions, out *v1.NodeProxyOptions, s conversion.Scope) error { - return autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in, out, s) -} - -func autoConvert_v1_NodeResources_To_api_NodeResources(in *v1.NodeResources, out *api.NodeResources, s conversion.Scope) error { - out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) - return nil -} - -// Convert_v1_NodeResources_To_api_NodeResources is an autogenerated conversion function. -func Convert_v1_NodeResources_To_api_NodeResources(in *v1.NodeResources, out *api.NodeResources, s conversion.Scope) error { - return autoConvert_v1_NodeResources_To_api_NodeResources(in, out, s) -} - -func autoConvert_api_NodeResources_To_v1_NodeResources(in *api.NodeResources, out *v1.NodeResources, s conversion.Scope) error { - out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) - return nil -} - -// Convert_api_NodeResources_To_v1_NodeResources is an autogenerated conversion function. -func Convert_api_NodeResources_To_v1_NodeResources(in *api.NodeResources, out *v1.NodeResources, s conversion.Scope) error { - return autoConvert_api_NodeResources_To_v1_NodeResources(in, out, s) -} - -func autoConvert_v1_NodeSelector_To_api_NodeSelector(in *v1.NodeSelector, out *api.NodeSelector, s conversion.Scope) error { - out.NodeSelectorTerms = *(*[]api.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) - return nil -} - -// Convert_v1_NodeSelector_To_api_NodeSelector is an autogenerated conversion function. -func Convert_v1_NodeSelector_To_api_NodeSelector(in *v1.NodeSelector, out *api.NodeSelector, s conversion.Scope) error { - return autoConvert_v1_NodeSelector_To_api_NodeSelector(in, out, s) -} - -func autoConvert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *v1.NodeSelector, s conversion.Scope) error { - out.NodeSelectorTerms = *(*[]v1.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) - return nil -} - -// Convert_api_NodeSelector_To_v1_NodeSelector is an autogenerated conversion function. -func Convert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *v1.NodeSelector, s conversion.Scope) error { - return autoConvert_api_NodeSelector_To_v1_NodeSelector(in, out, s) -} - -func autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *v1.NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error { - out.Key = in.Key - out.Operator = api.NodeSelectorOperator(in.Operator) - out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) - return nil -} - -// Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement is an autogenerated conversion function. -func Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *v1.NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error { - return autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in, out, s) -} - -func autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *v1.NodeSelectorRequirement, s conversion.Scope) error { - out.Key = in.Key - out.Operator = v1.NodeSelectorOperator(in.Operator) - out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) - return nil -} - -// Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement is an autogenerated conversion function. -func Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *v1.NodeSelectorRequirement, s conversion.Scope) error { - return autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in, out, s) -} - -func autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *v1.NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error { - out.MatchExpressions = *(*[]api.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) - return nil -} - -// Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm is an autogenerated conversion function. -func Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *v1.NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error { - return autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in, out, s) -} - -func autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *v1.NodeSelectorTerm, s conversion.Scope) error { - out.MatchExpressions = *(*[]v1.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) - return nil -} - -// Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm is an autogenerated conversion function. -func Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *v1.NodeSelectorTerm, s conversion.Scope) error { - return autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in, out, s) -} - -func autoConvert_v1_NodeSpec_To_api_NodeSpec(in *v1.NodeSpec, out *api.NodeSpec, s conversion.Scope) error { - out.PodCIDR = in.PodCIDR - out.ExternalID = in.ExternalID - out.ProviderID = in.ProviderID - out.Unschedulable = in.Unschedulable - out.Taints = *(*[]api.Taint)(unsafe.Pointer(&in.Taints)) - out.ConfigSource = (*api.NodeConfigSource)(unsafe.Pointer(in.ConfigSource)) - return nil -} - -// Convert_v1_NodeSpec_To_api_NodeSpec is an autogenerated conversion function. -func Convert_v1_NodeSpec_To_api_NodeSpec(in *v1.NodeSpec, out *api.NodeSpec, s conversion.Scope) error { - return autoConvert_v1_NodeSpec_To_api_NodeSpec(in, out, s) -} - -func autoConvert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *v1.NodeSpec, s conversion.Scope) error { - out.PodCIDR = in.PodCIDR - out.ExternalID = in.ExternalID - out.ProviderID = in.ProviderID - out.Unschedulable = in.Unschedulable - out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints)) - out.ConfigSource = (*v1.NodeConfigSource)(unsafe.Pointer(in.ConfigSource)) - return nil -} - -// Convert_api_NodeSpec_To_v1_NodeSpec is an autogenerated conversion function. -func Convert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *v1.NodeSpec, s conversion.Scope) error { - return autoConvert_api_NodeSpec_To_v1_NodeSpec(in, out, s) -} - -func autoConvert_v1_NodeStatus_To_api_NodeStatus(in *v1.NodeStatus, out *api.NodeStatus, s conversion.Scope) error { - out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) - out.Allocatable = *(*api.ResourceList)(unsafe.Pointer(&in.Allocatable)) - out.Phase = api.NodePhase(in.Phase) - out.Conditions = *(*[]api.NodeCondition)(unsafe.Pointer(&in.Conditions)) - out.Addresses = *(*[]api.NodeAddress)(unsafe.Pointer(&in.Addresses)) - if err := Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { - return err - } - if err := Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { - return err - } - out.Images = *(*[]api.ContainerImage)(unsafe.Pointer(&in.Images)) - out.VolumesInUse = *(*[]api.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) - out.VolumesAttached = *(*[]api.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) - return nil -} - -// Convert_v1_NodeStatus_To_api_NodeStatus is an autogenerated conversion function. -func Convert_v1_NodeStatus_To_api_NodeStatus(in *v1.NodeStatus, out *api.NodeStatus, s conversion.Scope) error { - return autoConvert_v1_NodeStatus_To_api_NodeStatus(in, out, s) -} - -func autoConvert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *v1.NodeStatus, s conversion.Scope) error { - out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) - out.Allocatable = *(*v1.ResourceList)(unsafe.Pointer(&in.Allocatable)) - out.Phase = v1.NodePhase(in.Phase) - out.Conditions = *(*[]v1.NodeCondition)(unsafe.Pointer(&in.Conditions)) - out.Addresses = *(*[]v1.NodeAddress)(unsafe.Pointer(&in.Addresses)) - if err := Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { - return err - } - if err := Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { - return err - } - out.Images = *(*[]v1.ContainerImage)(unsafe.Pointer(&in.Images)) - out.VolumesInUse = *(*[]v1.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) - out.VolumesAttached = *(*[]v1.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) - return nil -} - -// Convert_api_NodeStatus_To_v1_NodeStatus is an autogenerated conversion function. -func Convert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *v1.NodeStatus, s conversion.Scope) error { - return autoConvert_api_NodeStatus_To_v1_NodeStatus(in, out, s) -} - -func autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *v1.NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error { - out.MachineID = in.MachineID - out.SystemUUID = in.SystemUUID - out.BootID = in.BootID - out.KernelVersion = in.KernelVersion - out.OSImage = in.OSImage - out.ContainerRuntimeVersion = in.ContainerRuntimeVersion - out.KubeletVersion = in.KubeletVersion - out.KubeProxyVersion = in.KubeProxyVersion - out.OperatingSystem = in.OperatingSystem - out.Architecture = in.Architecture - return nil -} - -// Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo is an autogenerated conversion function. -func Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *v1.NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error { - return autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in, out, s) -} - -func autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *v1.NodeSystemInfo, s conversion.Scope) error { - out.MachineID = in.MachineID - out.SystemUUID = in.SystemUUID - out.BootID = in.BootID - out.KernelVersion = in.KernelVersion - out.OSImage = in.OSImage - out.ContainerRuntimeVersion = in.ContainerRuntimeVersion - out.KubeletVersion = in.KubeletVersion - out.KubeProxyVersion = in.KubeProxyVersion - out.OperatingSystem = in.OperatingSystem - out.Architecture = in.Architecture - return nil -} - -// Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo is an autogenerated conversion function. -func Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *v1.NodeSystemInfo, s conversion.Scope) error { - return autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in, out, s) -} - -func autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath - return nil -} - -// Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector is an autogenerated conversion function. -func Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { - return autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in, out, s) -} - -func autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath - return nil -} - -// Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector is an autogenerated conversion function. -func Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { - return autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in, out, s) -} - -func autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in *v1.ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = types.UID(in.UID) - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - out.CreationTimestamp = in.CreationTimestamp - out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) - out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) - out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) - out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) - out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) - out.Initializers = (*meta_v1.Initializers)(unsafe.Pointer(in.Initializers)) - out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) - out.ClusterName = in.ClusterName - return nil -} - -// Convert_v1_ObjectMeta_To_api_ObjectMeta is an autogenerated conversion function. -func Convert_v1_ObjectMeta_To_api_ObjectMeta(in *v1.ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { - return autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in, out, s) -} - -func autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *v1.ObjectMeta, s conversion.Scope) error { - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = types.UID(in.UID) - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - out.CreationTimestamp = in.CreationTimestamp - out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) - out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) - out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) - out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) - out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) - out.Initializers = (*meta_v1.Initializers)(unsafe.Pointer(in.Initializers)) - out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) - out.ClusterName = in.ClusterName - return nil -} - -// Convert_api_ObjectMeta_To_v1_ObjectMeta is an autogenerated conversion function. -func Convert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *v1.ObjectMeta, s conversion.Scope) error { - return autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in, out, s) -} - -func autoConvert_v1_ObjectReference_To_api_ObjectReference(in *v1.ObjectReference, out *api.ObjectReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Namespace = in.Namespace - out.Name = in.Name - out.UID = types.UID(in.UID) - out.APIVersion = in.APIVersion - out.ResourceVersion = in.ResourceVersion - out.FieldPath = in.FieldPath - return nil -} - -// Convert_v1_ObjectReference_To_api_ObjectReference is an autogenerated conversion function. -func Convert_v1_ObjectReference_To_api_ObjectReference(in *v1.ObjectReference, out *api.ObjectReference, s conversion.Scope) error { - return autoConvert_v1_ObjectReference_To_api_ObjectReference(in, out, s) -} - -func autoConvert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *v1.ObjectReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Namespace = in.Namespace - out.Name = in.Name - out.UID = types.UID(in.UID) - out.APIVersion = in.APIVersion - out.ResourceVersion = in.ResourceVersion - out.FieldPath = in.FieldPath - return nil -} - -// Convert_api_ObjectReference_To_v1_ObjectReference is an autogenerated conversion function. -func Convert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *v1.ObjectReference, s conversion.Scope) error { - return autoConvert_api_ObjectReference_To_v1_ObjectReference(in, out, s) -} - -func autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in *v1.PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_PersistentVolume_To_api_PersistentVolume is an autogenerated conversion function. -func Convert_v1_PersistentVolume_To_api_PersistentVolume(in *v1.PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error { - return autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in, out, s) -} - -func autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *v1.PersistentVolume, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_PersistentVolume_To_v1_PersistentVolume is an autogenerated conversion function. -func Convert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *v1.PersistentVolume, s conversion.Scope) error { - return autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *v1.PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *v1.PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *v1.PersistentVolumeClaim, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim is an autogenerated conversion function. -func Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *v1.PersistentVolumeClaim, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimCondition_To_api_PersistentVolumeClaimCondition(in *v1.PersistentVolumeClaimCondition, out *api.PersistentVolumeClaimCondition, s conversion.Scope) error { - out.Type = api.PersistentVolumeClaimConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_PersistentVolumeClaimCondition_To_api_PersistentVolumeClaimCondition is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimCondition_To_api_PersistentVolumeClaimCondition(in *v1.PersistentVolumeClaimCondition, out *api.PersistentVolumeClaimCondition, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimCondition_To_api_PersistentVolumeClaimCondition(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *api.PersistentVolumeClaimCondition, out *v1.PersistentVolumeClaimCondition, s conversion.Scope) error { - out.Type = v1.PersistentVolumeClaimConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_api_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition is an autogenerated conversion function. -func Convert_api_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *api.PersistentVolumeClaimCondition, out *v1.PersistentVolumeClaimCondition, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *v1.PersistentVolumeClaimList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList is an autogenerated conversion function. -func Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *v1.PersistentVolumeClaimList, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *v1.PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { - out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - out.VolumeName = in.VolumeName - out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) - return nil -} - -// Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *v1.PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *v1.PersistentVolumeClaimSpec, s conversion.Scope) error { - out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - out.VolumeName = in.VolumeName - out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) - return nil -} - -// Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec is an autogenerated conversion function. -func Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *v1.PersistentVolumeClaimSpec, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *v1.PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error { - out.Phase = api.PersistentVolumeClaimPhase(in.Phase) - out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) - out.Conditions = *(*[]api.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *v1.PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *v1.PersistentVolumeClaimStatus, s conversion.Scope) error { - out.Phase = v1.PersistentVolumeClaimPhase(in.Phase) - out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) - out.Conditions = *(*[]v1.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus is an autogenerated conversion function. -func Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *v1.PersistentVolumeClaimStatus, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource is an autogenerated conversion function. -func Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in, out, s) -} - -func autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *v1.PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.PersistentVolume, len(*in)) - for i := range *in { - if err := Convert_v1_PersistentVolume_To_api_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList is an autogenerated conversion function. -func Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *v1.PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in, out, s) -} - -func autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *v1.PersistentVolumeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1.PersistentVolume, len(*in)) - for i := range *in { - if err := Convert_api_PersistentVolume_To_v1_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList is an autogenerated conversion function. -func Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *v1.PersistentVolumeList, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in, out, s) -} - -func autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *v1.PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error { - out.GCEPersistentDisk = (*api.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) - out.AWSElasticBlockStore = (*api.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) - out.HostPath = (*api.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.Glusterfs = (*api.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) - out.NFS = (*api.NFSVolumeSource)(unsafe.Pointer(in.NFS)) - out.RBD = (*api.RBDVolumeSource)(unsafe.Pointer(in.RBD)) - out.ISCSI = (*api.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) - out.Cinder = (*api.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) - out.CephFS = (*api.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS)) - out.FC = (*api.FCVolumeSource)(unsafe.Pointer(in.FC)) - out.Flocker = (*api.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) - out.FlexVolume = (*api.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) - out.AzureFile = (*api.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile)) - out.VsphereVolume = (*api.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) - out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) - out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) - out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) - out.PortworxVolume = (*api.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) - out.ScaleIO = (*api.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) - out.Local = (*api.LocalVolumeSource)(unsafe.Pointer(in.Local)) - out.StorageOS = (*api.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS)) - return nil -} - -// Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *v1.PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in, out, s) -} - -func autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *v1.PersistentVolumeSource, s conversion.Scope) error { - out.GCEPersistentDisk = (*v1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) - out.AWSElasticBlockStore = (*v1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) - out.HostPath = (*v1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.Glusterfs = (*v1.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) - out.NFS = (*v1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) - out.RBD = (*v1.RBDVolumeSource)(unsafe.Pointer(in.RBD)) - out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) - out.ISCSI = (*v1.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) - out.FlexVolume = (*v1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) - out.Cinder = (*v1.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) - out.CephFS = (*v1.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS)) - out.FC = (*v1.FCVolumeSource)(unsafe.Pointer(in.FC)) - out.Flocker = (*v1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) - out.AzureFile = (*v1.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile)) - out.VsphereVolume = (*v1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) - out.AzureDisk = (*v1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) - out.PhotonPersistentDisk = (*v1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) - out.PortworxVolume = (*v1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) - out.ScaleIO = (*v1.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) - out.Local = (*v1.LocalVolumeSource)(unsafe.Pointer(in.Local)) - out.StorageOS = (*v1.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS)) - return nil -} - -// Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource is an autogenerated conversion function. -func Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *v1.PersistentVolumeSource, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in, out, s) -} - -func autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *v1.PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error { - out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) - if err := Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { - return err - } - out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.ClaimRef = (*api.ObjectReference)(unsafe.Pointer(in.ClaimRef)) - out.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) - out.StorageClassName = in.StorageClassName - out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) - return nil -} - -// Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec is an autogenerated conversion function. -func Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *v1.PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in, out, s) -} - -func autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *v1.PersistentVolumeSpec, s conversion.Scope) error { - out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) - if err := Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { - return err - } - out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.ClaimRef = (*v1.ObjectReference)(unsafe.Pointer(in.ClaimRef)) - out.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) - out.StorageClassName = in.StorageClassName - out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) - return nil -} - -// Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec is an autogenerated conversion function. -func Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *v1.PersistentVolumeSpec, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in, out, s) -} - -func autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *v1.PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error { - out.Phase = api.PersistentVolumePhase(in.Phase) - out.Message = in.Message - out.Reason = in.Reason - return nil -} - -// Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus is an autogenerated conversion function. -func Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *v1.PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in, out, s) -} - -func autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *v1.PersistentVolumeStatus, s conversion.Scope) error { - out.Phase = v1.PersistentVolumePhase(in.Phase) - out.Message = in.Message - out.Reason = in.Reason - return nil -} - -// Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus is an autogenerated conversion function. -func Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *v1.PersistentVolumeStatus, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in, out, s) -} - -func autoConvert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in *v1.PhotonPersistentDiskVolumeSource, out *api.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { - out.PdID = in.PdID - out.FSType = in.FSType - return nil -} - -// Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource is an autogenerated conversion function. -func Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in *v1.PhotonPersistentDiskVolumeSource, out *api.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *api.PhotonPersistentDiskVolumeSource, out *v1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { - out.PdID = in.PdID - out.FSType = in.FSType - return nil -} - -// Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource is an autogenerated conversion function. -func Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *api.PhotonPersistentDiskVolumeSource, out *v1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_Pod_To_api_Pod(in *v1.Pod, out *api.Pod, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Pod_To_api_Pod is an autogenerated conversion function. -func Convert_v1_Pod_To_api_Pod(in *v1.Pod, out *api.Pod, s conversion.Scope) error { - return autoConvert_v1_Pod_To_api_Pod(in, out, s) -} - -func autoConvert_api_Pod_To_v1_Pod(in *api.Pod, out *v1.Pod, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1_PodAffinity_To_api_PodAffinity(in *v1.PodAffinity, out *api.PodAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]api.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -// Convert_v1_PodAffinity_To_api_PodAffinity is an autogenerated conversion function. -func Convert_v1_PodAffinity_To_api_PodAffinity(in *v1.PodAffinity, out *api.PodAffinity, s conversion.Scope) error { - return autoConvert_v1_PodAffinity_To_api_PodAffinity(in, out, s) -} - -func autoConvert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *v1.PodAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -// Convert_api_PodAffinity_To_v1_PodAffinity is an autogenerated conversion function. -func Convert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *v1.PodAffinity, s conversion.Scope) error { - return autoConvert_api_PodAffinity_To_v1_PodAffinity(in, out, s) -} - -func autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *v1.PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { - out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) - out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) - out.TopologyKey = in.TopologyKey - return nil -} - -// Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm is an autogenerated conversion function. -func Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *v1.PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { - return autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in, out, s) -} - -func autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *v1.PodAffinityTerm, s conversion.Scope) error { - out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) - out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) - out.TopologyKey = in.TopologyKey - return nil -} - -// Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm is an autogenerated conversion function. -func Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *v1.PodAffinityTerm, s conversion.Scope) error { - return autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in, out, s) -} - -func autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *v1.PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]api.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -// Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity is an autogenerated conversion function. -func Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *v1.PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { - return autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in, out, s) -} - -func autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *v1.PodAntiAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -// Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity is an autogenerated conversion function. -func Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *v1.PodAntiAffinity, s conversion.Scope) error { - return autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in, out, s) -} - -func autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in *v1.PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - return nil -} - -// Convert_v1_PodAttachOptions_To_api_PodAttachOptions is an autogenerated conversion function. -func Convert_v1_PodAttachOptions_To_api_PodAttachOptions(in *v1.PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { - return autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in, out, s) -} - -func autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *v1.PodAttachOptions, s conversion.Scope) error { - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - return nil -} - -// Convert_api_PodAttachOptions_To_v1_PodAttachOptions is an autogenerated conversion function. -func Convert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *v1.PodAttachOptions, s conversion.Scope) error { - return autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in, out, s) -} - -func autoConvert_v1_PodCondition_To_api_PodCondition(in *v1.PodCondition, out *api.PodCondition, s conversion.Scope) error { - out.Type = api.PodConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_PodCondition_To_api_PodCondition is an autogenerated conversion function. -func Convert_v1_PodCondition_To_api_PodCondition(in *v1.PodCondition, out *api.PodCondition, s conversion.Scope) error { - return autoConvert_v1_PodCondition_To_api_PodCondition(in, out, s) -} - -func autoConvert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *v1.PodCondition, s conversion.Scope) error { - out.Type = v1.PodConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_api_PodCondition_To_v1_PodCondition is an autogenerated conversion function. -func Convert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *v1.PodCondition, s conversion.Scope) error { - return autoConvert_api_PodCondition_To_v1_PodCondition(in, out, s) -} - -func autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in *v1.PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error { - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - return nil -} - -// Convert_v1_PodExecOptions_To_api_PodExecOptions is an autogenerated conversion function. -func Convert_v1_PodExecOptions_To_api_PodExecOptions(in *v1.PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error { - return autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in, out, s) -} - -func autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *v1.PodExecOptions, s conversion.Scope) error { - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - return nil -} - -// Convert_api_PodExecOptions_To_v1_PodExecOptions is an autogenerated conversion function. -func Convert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *v1.PodExecOptions, s conversion.Scope) error { - return autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in, out, s) -} - -func autoConvert_v1_PodList_To_api_PodList(in *v1.PodList, out *api.PodList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.Pod, len(*in)) - for i := range *in { - if err := Convert_v1_Pod_To_api_Pod(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_PodList_To_api_PodList is an autogenerated conversion function. -func Convert_v1_PodList_To_api_PodList(in *v1.PodList, out *api.PodList, s conversion.Scope) error { - return autoConvert_v1_PodList_To_api_PodList(in, out, s) -} - -func autoConvert_api_PodList_To_v1_PodList(in *api.PodList, out *v1.PodList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1.Pod, len(*in)) - for i := range *in { - if err := Convert_api_Pod_To_v1_Pod(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_PodList_To_v1_PodList is an autogenerated conversion function. -func Convert_api_PodList_To_v1_PodList(in *api.PodList, out *v1.PodList, s conversion.Scope) error { - return autoConvert_api_PodList_To_v1_PodList(in, out, s) -} - -func autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in *v1.PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error { - out.Container = in.Container - out.Follow = in.Follow - out.Previous = in.Previous - out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) - out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) - out.Timestamps = in.Timestamps - out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) - out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) - return nil -} - -// Convert_v1_PodLogOptions_To_api_PodLogOptions is an autogenerated conversion function. -func Convert_v1_PodLogOptions_To_api_PodLogOptions(in *v1.PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error { - return autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in, out, s) -} - -func autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *v1.PodLogOptions, s conversion.Scope) error { - out.Container = in.Container - out.Follow = in.Follow - out.Previous = in.Previous - out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) - out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) - out.Timestamps = in.Timestamps - out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) - out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) - return nil -} - -// Convert_api_PodLogOptions_To_v1_PodLogOptions is an autogenerated conversion function. -func Convert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *v1.PodLogOptions, s conversion.Scope) error { - return autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in, out, s) -} - -func autoConvert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in *v1.PodPortForwardOptions, out *api.PodPortForwardOptions, s conversion.Scope) error { - out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) - return nil -} - -// Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions is an autogenerated conversion function. -func Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in *v1.PodPortForwardOptions, out *api.PodPortForwardOptions, s conversion.Scope) error { - return autoConvert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in, out, s) -} - -func autoConvert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *api.PodPortForwardOptions, out *v1.PodPortForwardOptions, s conversion.Scope) error { - out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) - return nil -} - -// Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions is an autogenerated conversion function. -func Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *api.PodPortForwardOptions, out *v1.PodPortForwardOptions, s conversion.Scope) error { - return autoConvert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in, out, s) -} - -func autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in *v1.PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_v1_PodProxyOptions_To_api_PodProxyOptions is an autogenerated conversion function. -func Convert_v1_PodProxyOptions_To_api_PodProxyOptions(in *v1.PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error { - return autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in, out, s) -} - -func autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *v1.PodProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_api_PodProxyOptions_To_v1_PodProxyOptions is an autogenerated conversion function. -func Convert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *v1.PodProxyOptions, s conversion.Scope) error { - return autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in, out, s) -} - -func autoConvert_v1_PodSecurityContext_To_api_PodSecurityContext(in *v1.PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error { - out.SELinuxOptions = (*api.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) - out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) - out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) - out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) - return nil -} - -func autoConvert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *v1.PodSecurityContext, s conversion.Scope) error { - // INFO: in.HostNetwork opted out of conversion generation - // INFO: in.HostPID opted out of conversion generation - // INFO: in.HostIPC opted out of conversion generation - out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) - out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) - out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) - out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) - return nil -} - -func autoConvert_v1_PodSignature_To_api_PodSignature(in *v1.PodSignature, out *api.PodSignature, s conversion.Scope) error { - out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) - return nil -} - -// Convert_v1_PodSignature_To_api_PodSignature is an autogenerated conversion function. -func Convert_v1_PodSignature_To_api_PodSignature(in *v1.PodSignature, out *api.PodSignature, s conversion.Scope) error { - return autoConvert_v1_PodSignature_To_api_PodSignature(in, out, s) -} - -func autoConvert_api_PodSignature_To_v1_PodSignature(in *api.PodSignature, out *v1.PodSignature, s conversion.Scope) error { - out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) - return nil -} - -// Convert_api_PodSignature_To_v1_PodSignature is an autogenerated conversion function. -func Convert_api_PodSignature_To_v1_PodSignature(in *api.PodSignature, out *v1.PodSignature, s conversion.Scope) error { - return autoConvert_api_PodSignature_To_v1_PodSignature(in, out, s) -} - -func autoConvert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conversion.Scope) error { - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]api.Volume, len(*in)) - for i := range *in { - if err := Convert_v1_Volume_To_api_Volume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]api.Container, len(*in)) - for i := range *in { - if err := Convert_v1_Container_To_api_Container(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.InitContainers = nil - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]api.Container, len(*in)) - for i := range *in { - if err := Convert_v1_Container_To_api_Container(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Containers = nil - } - out.RestartPolicy = api.RestartPolicy(in.RestartPolicy) - out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) - out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) - out.DNSPolicy = api.DNSPolicy(in.DNSPolicy) - out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) - out.ServiceAccountName = in.ServiceAccountName - // INFO: in.DeprecatedServiceAccount opted out of conversion generation - out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) - out.NodeName = in.NodeName - // INFO: in.HostNetwork opted out of conversion generation - // INFO: in.HostPID opted out of conversion generation - // INFO: in.HostIPC opted out of conversion generation - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(api.PodSecurityContext) - if err := Convert_v1_PodSecurityContext_To_api_PodSecurityContext(*in, *out, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.ImagePullSecrets = *(*[]api.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) - out.Hostname = in.Hostname - out.Subdomain = in.Subdomain - out.Affinity = (*api.Affinity)(unsafe.Pointer(in.Affinity)) - out.SchedulerName = in.SchedulerName - out.Tolerations = *(*[]api.Toleration)(unsafe.Pointer(&in.Tolerations)) - out.HostAliases = *(*[]api.HostAlias)(unsafe.Pointer(&in.HostAliases)) - out.PriorityClassName = in.PriorityClassName - out.Priority = (*int32)(unsafe.Pointer(in.Priority)) - return nil -} - -func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *v1.PodSpec, s conversion.Scope) error { - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]v1.Volume, len(*in)) - for i := range *in { - if err := Convert_api_Volume_To_v1_Volume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]v1.Container, len(*in)) - for i := range *in { - if err := Convert_api_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.InitContainers = nil - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]v1.Container, len(*in)) - for i := range *in { - if err := Convert_api_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Containers = nil - } - out.RestartPolicy = v1.RestartPolicy(in.RestartPolicy) - out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) - out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) - out.DNSPolicy = v1.DNSPolicy(in.DNSPolicy) - out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) - out.ServiceAccountName = in.ServiceAccountName - out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) - out.NodeName = in.NodeName - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(v1.PodSecurityContext) - if err := Convert_api_PodSecurityContext_To_v1_PodSecurityContext(*in, *out, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.ImagePullSecrets = *(*[]v1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) - out.Hostname = in.Hostname - out.Subdomain = in.Subdomain - out.Affinity = (*v1.Affinity)(unsafe.Pointer(in.Affinity)) - out.SchedulerName = in.SchedulerName - out.Tolerations = *(*[]v1.Toleration)(unsafe.Pointer(&in.Tolerations)) - out.HostAliases = *(*[]v1.HostAlias)(unsafe.Pointer(&in.HostAliases)) - out.PriorityClassName = in.PriorityClassName - out.Priority = (*int32)(unsafe.Pointer(in.Priority)) - return nil -} - -func autoConvert_v1_PodStatus_To_api_PodStatus(in *v1.PodStatus, out *api.PodStatus, s conversion.Scope) error { - out.Phase = api.PodPhase(in.Phase) - out.Conditions = *(*[]api.PodCondition)(unsafe.Pointer(&in.Conditions)) - out.Message = in.Message - out.Reason = in.Reason - out.HostIP = in.HostIP - out.PodIP = in.PodIP - out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) - out.InitContainerStatuses = *(*[]api.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) - out.ContainerStatuses = *(*[]api.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) - out.QOSClass = api.PodQOSClass(in.QOSClass) - return nil -} - -// Convert_v1_PodStatus_To_api_PodStatus is an autogenerated conversion function. -func Convert_v1_PodStatus_To_api_PodStatus(in *v1.PodStatus, out *api.PodStatus, s conversion.Scope) error { - return autoConvert_v1_PodStatus_To_api_PodStatus(in, out, s) -} - -func autoConvert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *v1.PodStatus, s conversion.Scope) error { - out.Phase = v1.PodPhase(in.Phase) - out.Conditions = *(*[]v1.PodCondition)(unsafe.Pointer(&in.Conditions)) - out.Message = in.Message - out.Reason = in.Reason - out.HostIP = in.HostIP - out.PodIP = in.PodIP - out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) - out.QOSClass = v1.PodQOSClass(in.QOSClass) - out.InitContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) - out.ContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) - return nil -} - -// Convert_api_PodStatus_To_v1_PodStatus is an autogenerated conversion function. -func Convert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *v1.PodStatus, s conversion.Scope) error { - return autoConvert_api_PodStatus_To_v1_PodStatus(in, out, s) -} - -func autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in *v1.PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_PodStatusResult_To_api_PodStatusResult is an autogenerated conversion function. -func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *v1.PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { - return autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in, out, s) -} - -func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *v1.PodStatusResult, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_PodStatusResult_To_v1_PodStatusResult is an autogenerated conversion function. -func Convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *v1.PodStatusResult, s conversion.Scope) error { - return autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in, out, s) -} - -func autoConvert_v1_PodTemplate_To_api_PodTemplate(in *v1.PodTemplate, out *api.PodTemplate, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -// Convert_v1_PodTemplate_To_api_PodTemplate is an autogenerated conversion function. -func Convert_v1_PodTemplate_To_api_PodTemplate(in *v1.PodTemplate, out *api.PodTemplate, s conversion.Scope) error { - return autoConvert_v1_PodTemplate_To_api_PodTemplate(in, out, s) -} - -func autoConvert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *v1.PodTemplate, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -// Convert_api_PodTemplate_To_v1_PodTemplate is an autogenerated conversion function. -func Convert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *v1.PodTemplate, s conversion.Scope) error { - return autoConvert_api_PodTemplate_To_v1_PodTemplate(in, out, s) -} - -func autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in *v1.PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.PodTemplate, len(*in)) - for i := range *in { - if err := Convert_v1_PodTemplate_To_api_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_PodTemplateList_To_api_PodTemplateList is an autogenerated conversion function. -func Convert_v1_PodTemplateList_To_api_PodTemplateList(in *v1.PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { - return autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in, out, s) -} - -func autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *v1.PodTemplateList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1.PodTemplate, len(*in)) - for i := range *in { - if err := Convert_api_PodTemplate_To_v1_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_PodTemplateList_To_v1_PodTemplateList is an autogenerated conversion function. -func Convert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *v1.PodTemplateList, s conversion.Scope) error { - return autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in, out, s) -} - -func autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *v1.PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in *v1.PortworxVolumeSource, out *api.PortworxVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource is an autogenerated conversion function. -func Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in *v1.PortworxVolumeSource, out *api.PortworxVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in, out, s) -} - -func autoConvert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *api.PortworxVolumeSource, out *v1.PortworxVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource is an autogenerated conversion function. -func Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *api.PortworxVolumeSource, out *v1.PortworxVolumeSource, s conversion.Scope) error { - return autoConvert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in, out, s) -} - -func autoConvert_v1_Preconditions_To_api_Preconditions(in *v1.Preconditions, out *api.Preconditions, s conversion.Scope) error { - out.UID = (*types.UID)(unsafe.Pointer(in.UID)) - return nil -} - -// Convert_v1_Preconditions_To_api_Preconditions is an autogenerated conversion function. -func Convert_v1_Preconditions_To_api_Preconditions(in *v1.Preconditions, out *api.Preconditions, s conversion.Scope) error { - return autoConvert_v1_Preconditions_To_api_Preconditions(in, out, s) -} - -func autoConvert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *v1.Preconditions, s conversion.Scope) error { - out.UID = (*types.UID)(unsafe.Pointer(in.UID)) - return nil -} - -// Convert_api_Preconditions_To_v1_Preconditions is an autogenerated conversion function. -func Convert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *v1.Preconditions, s conversion.Scope) error { - return autoConvert_api_Preconditions_To_v1_Preconditions(in, out, s) -} - -func autoConvert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in *v1.PreferAvoidPodsEntry, out *api.PreferAvoidPodsEntry, s conversion.Scope) error { - if err := Convert_v1_PodSignature_To_api_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { - return err - } - out.EvictionTime = in.EvictionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry is an autogenerated conversion function. -func Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in *v1.PreferAvoidPodsEntry, out *api.PreferAvoidPodsEntry, s conversion.Scope) error { - return autoConvert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in, out, s) -} - -func autoConvert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *api.PreferAvoidPodsEntry, out *v1.PreferAvoidPodsEntry, s conversion.Scope) error { - if err := Convert_api_PodSignature_To_v1_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { - return err - } - out.EvictionTime = in.EvictionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry is an autogenerated conversion function. -func Convert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *api.PreferAvoidPodsEntry, out *v1.PreferAvoidPodsEntry, s conversion.Scope) error { - return autoConvert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in, out, s) -} - -func autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *v1.PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error { - out.Weight = in.Weight - if err := Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { - return err - } - return nil -} - -// Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm is an autogenerated conversion function. -func Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *v1.PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error { - return autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in, out, s) -} - -func autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *v1.PreferredSchedulingTerm, s conversion.Scope) error { - out.Weight = in.Weight - if err := Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { - return err - } - return nil -} - -// Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm is an autogenerated conversion function. -func Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *v1.PreferredSchedulingTerm, s conversion.Scope) error { - return autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in, out, s) -} - -func autoConvert_v1_Probe_To_api_Probe(in *v1.Probe, out *api.Probe, s conversion.Scope) error { - if err := Convert_v1_Handler_To_api_Handler(&in.Handler, &out.Handler, s); err != nil { - return err - } - out.InitialDelaySeconds = in.InitialDelaySeconds - out.TimeoutSeconds = in.TimeoutSeconds - out.PeriodSeconds = in.PeriodSeconds - out.SuccessThreshold = in.SuccessThreshold - out.FailureThreshold = in.FailureThreshold - return nil -} - -// Convert_v1_Probe_To_api_Probe is an autogenerated conversion function. -func Convert_v1_Probe_To_api_Probe(in *v1.Probe, out *api.Probe, s conversion.Scope) error { - return autoConvert_v1_Probe_To_api_Probe(in, out, s) -} - -func autoConvert_api_Probe_To_v1_Probe(in *api.Probe, out *v1.Probe, s conversion.Scope) error { - if err := Convert_api_Handler_To_v1_Handler(&in.Handler, &out.Handler, s); err != nil { - return err - } - out.InitialDelaySeconds = in.InitialDelaySeconds - out.TimeoutSeconds = in.TimeoutSeconds - out.PeriodSeconds = in.PeriodSeconds - out.SuccessThreshold = in.SuccessThreshold - out.FailureThreshold = in.FailureThreshold - return nil -} - -// Convert_api_Probe_To_v1_Probe is an autogenerated conversion function. -func Convert_api_Probe_To_v1_Probe(in *api.Probe, out *v1.Probe, s conversion.Scope) error { - return autoConvert_api_Probe_To_v1_Probe(in, out, s) -} - -func autoConvert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in *v1.ProjectedVolumeSource, out *api.ProjectedVolumeSource, s conversion.Scope) error { - out.Sources = *(*[]api.VolumeProjection)(unsafe.Pointer(&in.Sources)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - return nil -} - -// Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource is an autogenerated conversion function. -func Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in *v1.ProjectedVolumeSource, out *api.ProjectedVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in, out, s) -} - -func autoConvert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *api.ProjectedVolumeSource, out *v1.ProjectedVolumeSource, s conversion.Scope) error { - out.Sources = *(*[]v1.VolumeProjection)(unsafe.Pointer(&in.Sources)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - return nil -} - -// Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource is an autogenerated conversion function. -func Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *api.ProjectedVolumeSource, out *v1.ProjectedVolumeSource, s conversion.Scope) error { - return autoConvert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in, out, s) -} - -func autoConvert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in *v1.QuobyteVolumeSource, out *api.QuobyteVolumeSource, s conversion.Scope) error { - out.Registry = in.Registry - out.Volume = in.Volume - out.ReadOnly = in.ReadOnly - out.User = in.User - out.Group = in.Group - return nil -} - -// Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource is an autogenerated conversion function. -func Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in *v1.QuobyteVolumeSource, out *api.QuobyteVolumeSource, s conversion.Scope) error { - return autoConvert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in, out, s) -} - -func autoConvert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *api.QuobyteVolumeSource, out *v1.QuobyteVolumeSource, s conversion.Scope) error { - out.Registry = in.Registry - out.Volume = in.Volume - out.ReadOnly = in.ReadOnly - out.User = in.User - out.Group = in.Group - return nil -} - -// Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource is an autogenerated conversion function. -func Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *api.QuobyteVolumeSource, out *v1.QuobyteVolumeSource, s conversion.Scope) error { - return autoConvert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in, out, s) -} - -func autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *v1.RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { - out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource is an autogenerated conversion function. -func Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *v1.RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { - return autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in, out, s) -} - -func autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { - out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource is an autogenerated conversion function. -func Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { - return autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in, out, s) -} - -func autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in *v1.RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Range = in.Range - out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) - return nil -} - -// Convert_v1_RangeAllocation_To_api_RangeAllocation is an autogenerated conversion function. -func Convert_v1_RangeAllocation_To_api_RangeAllocation(in *v1.RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error { - return autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in, out, s) -} - -func autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *v1.RangeAllocation, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Range = in.Range - out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) - return nil -} - -// Convert_api_RangeAllocation_To_v1_RangeAllocation is an autogenerated conversion function. -func Convert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *v1.RangeAllocation, s conversion.Scope) error { - return autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in, out, s) -} - -func autoConvert_v1_ReplicationController_To_api_ReplicationController(in *v1.ReplicationController, out *api.ReplicationController, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_ReplicationController_To_api_ReplicationController is an autogenerated conversion function. -func Convert_v1_ReplicationController_To_api_ReplicationController(in *v1.ReplicationController, out *api.ReplicationController, s conversion.Scope) error { - return autoConvert_v1_ReplicationController_To_api_ReplicationController(in, out, s) -} - -func autoConvert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *v1.ReplicationController, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_ReplicationController_To_v1_ReplicationController is an autogenerated conversion function. -func Convert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *v1.ReplicationController, s conversion.Scope) error { - return autoConvert_api_ReplicationController_To_v1_ReplicationController(in, out, s) -} - -func autoConvert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in *v1.ReplicationControllerCondition, out *api.ReplicationControllerCondition, s conversion.Scope) error { - out.Type = api.ReplicationControllerConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition is an autogenerated conversion function. -func Convert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in *v1.ReplicationControllerCondition, out *api.ReplicationControllerCondition, s conversion.Scope) error { - return autoConvert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in, out, s) -} - -func autoConvert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *api.ReplicationControllerCondition, out *v1.ReplicationControllerCondition, s conversion.Scope) error { - out.Type = v1.ReplicationControllerConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition is an autogenerated conversion function. -func Convert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *api.ReplicationControllerCondition, out *v1.ReplicationControllerCondition, s conversion.Scope) error { - return autoConvert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in, out, s) -} - -func autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *v1.ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.ReplicationController, len(*in)) - for i := range *in { - if err := Convert_v1_ReplicationController_To_api_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList is an autogenerated conversion function. -func Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *v1.ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error { - return autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in, out, s) -} - -func autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *v1.ReplicationControllerList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1.ReplicationController, len(*in)) - for i := range *in { - if err := Convert_api_ReplicationController_To_v1_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList is an autogenerated conversion function. -func Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *v1.ReplicationControllerList, s conversion.Scope) error { - return autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in, out, s) -} - -func autoConvert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error { - if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - out.MinReadySeconds = in.MinReadySeconds - out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(api.PodTemplateSpec) - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(*in, *out, s); err != nil { - return err - } - } else { - out.Template = nil - } - return nil -} - -func autoConvert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { - if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - out.MinReadySeconds = in.MinReadySeconds - out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(v1.PodTemplateSpec) - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(*in, *out, s); err != nil { - return err - } - } else { - out.Template = nil - } - return nil -} - -func autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *v1.ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*[]api.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus is an autogenerated conversion function. -func Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *v1.ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error { - return autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in, out, s) -} - -func autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*[]v1.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus is an autogenerated conversion function. -func Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { - return autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s) -} - -func autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *v1.ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error { - out.ContainerName = in.ContainerName - out.Resource = in.Resource - out.Divisor = in.Divisor - return nil -} - -// Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector is an autogenerated conversion function. -func Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *v1.ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error { - return autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in, out, s) -} - -func autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *v1.ResourceFieldSelector, s conversion.Scope) error { - out.ContainerName = in.ContainerName - out.Resource = in.Resource - out.Divisor = in.Divisor - return nil -} - -// Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector is an autogenerated conversion function. -func Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *v1.ResourceFieldSelector, s conversion.Scope) error { - return autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in, out, s) -} - -func autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in *v1.ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_ResourceQuota_To_api_ResourceQuota is an autogenerated conversion function. -func Convert_v1_ResourceQuota_To_api_ResourceQuota(in *v1.ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error { - return autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in, out, s) -} - -func autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *v1.ResourceQuota, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_ResourceQuota_To_v1_ResourceQuota is an autogenerated conversion function. -func Convert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *v1.ResourceQuota, s conversion.Scope) error { - return autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in, out, s) -} - -func autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *v1.ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.ResourceQuota)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList is an autogenerated conversion function. -func Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *v1.ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error { - return autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in, out, s) -} - -func autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *v1.ResourceQuotaList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.ResourceQuota)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList is an autogenerated conversion function. -func Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *v1.ResourceQuotaList, s conversion.Scope) error { - return autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in, out, s) -} - -func autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *v1.ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error { - out.Hard = *(*api.ResourceList)(unsafe.Pointer(&in.Hard)) - out.Scopes = *(*[]api.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) - return nil -} - -// Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec is an autogenerated conversion function. -func Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *v1.ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error { - return autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in, out, s) -} - -func autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *v1.ResourceQuotaSpec, s conversion.Scope) error { - out.Hard = *(*v1.ResourceList)(unsafe.Pointer(&in.Hard)) - out.Scopes = *(*[]v1.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) - return nil -} - -// Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec is an autogenerated conversion function. -func Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *v1.ResourceQuotaSpec, s conversion.Scope) error { - return autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in, out, s) -} - -func autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *v1.ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error { - out.Hard = *(*api.ResourceList)(unsafe.Pointer(&in.Hard)) - out.Used = *(*api.ResourceList)(unsafe.Pointer(&in.Used)) - return nil -} - -// Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus is an autogenerated conversion function. -func Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *v1.ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error { - return autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in, out, s) -} - -func autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *v1.ResourceQuotaStatus, s conversion.Scope) error { - out.Hard = *(*v1.ResourceList)(unsafe.Pointer(&in.Hard)) - out.Used = *(*v1.ResourceList)(unsafe.Pointer(&in.Used)) - return nil -} - -// Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus is an autogenerated conversion function. -func Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *v1.ResourceQuotaStatus, s conversion.Scope) error { - return autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in, out, s) -} - -func autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in *v1.ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { - out.Limits = *(*api.ResourceList)(unsafe.Pointer(&in.Limits)) - out.Requests = *(*api.ResourceList)(unsafe.Pointer(&in.Requests)) - return nil -} - -// Convert_v1_ResourceRequirements_To_api_ResourceRequirements is an autogenerated conversion function. -func Convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *v1.ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { - return autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in, out, s) -} - -func autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { - out.Limits = *(*v1.ResourceList)(unsafe.Pointer(&in.Limits)) - out.Requests = *(*v1.ResourceList)(unsafe.Pointer(&in.Requests)) - return nil -} - -// Convert_api_ResourceRequirements_To_v1_ResourceRequirements is an autogenerated conversion function. -func Convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { - return autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in, out, s) -} - -func autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in *v1.SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level - return nil -} - -// Convert_v1_SELinuxOptions_To_api_SELinuxOptions is an autogenerated conversion function. -func Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in *v1.SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { - return autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in, out, s) -} - -func autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level - return nil -} - -// Convert_api_SELinuxOptions_To_v1_SELinuxOptions is an autogenerated conversion function. -func Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { - return autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in, out, s) -} - -func autoConvert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in *v1.ScaleIOVolumeSource, out *api.ScaleIOVolumeSource, s conversion.Scope) error { - out.Gateway = in.Gateway - out.System = in.System - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.SSLEnabled = in.SSLEnabled - out.ProtectionDomain = in.ProtectionDomain - out.StoragePool = in.StoragePool - out.StorageMode = in.StorageMode - out.VolumeName = in.VolumeName - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource is an autogenerated conversion function. -func Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in *v1.ScaleIOVolumeSource, out *api.ScaleIOVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in, out, s) -} - -func autoConvert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *api.ScaleIOVolumeSource, out *v1.ScaleIOVolumeSource, s conversion.Scope) error { - out.Gateway = in.Gateway - out.System = in.System - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.SSLEnabled = in.SSLEnabled - out.ProtectionDomain = in.ProtectionDomain - out.StoragePool = in.StoragePool - out.StorageMode = in.StorageMode - out.VolumeName = in.VolumeName - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource is an autogenerated conversion function. -func Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *api.ScaleIOVolumeSource, out *v1.ScaleIOVolumeSource, s conversion.Scope) error { - return autoConvert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in, out, s) -} - -func autoConvert_v1_Secret_To_api_Secret(in *v1.Secret, out *api.Secret, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) - // INFO: in.StringData opted out of conversion generation - out.Type = api.SecretType(in.Type) - return nil -} - -func autoConvert_api_Secret_To_v1_Secret(in *api.Secret, out *v1.Secret, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) - out.Type = v1.SecretType(in.Type) - return nil -} - -// Convert_api_Secret_To_v1_Secret is an autogenerated conversion function. -func Convert_api_Secret_To_v1_Secret(in *api.Secret, out *v1.Secret, s conversion.Scope) error { - return autoConvert_api_Secret_To_v1_Secret(in, out, s) -} - -func autoConvert_v1_SecretEnvSource_To_api_SecretEnvSource(in *v1.SecretEnvSource, out *api.SecretEnvSource, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_SecretEnvSource_To_api_SecretEnvSource is an autogenerated conversion function. -func Convert_v1_SecretEnvSource_To_api_SecretEnvSource(in *v1.SecretEnvSource, out *api.SecretEnvSource, s conversion.Scope) error { - return autoConvert_v1_SecretEnvSource_To_api_SecretEnvSource(in, out, s) -} - -func autoConvert_api_SecretEnvSource_To_v1_SecretEnvSource(in *api.SecretEnvSource, out *v1.SecretEnvSource, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_SecretEnvSource_To_v1_SecretEnvSource is an autogenerated conversion function. -func Convert_api_SecretEnvSource_To_v1_SecretEnvSource(in *api.SecretEnvSource, out *v1.SecretEnvSource, s conversion.Scope) error { - return autoConvert_api_SecretEnvSource_To_v1_SecretEnvSource(in, out, s) -} - -func autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in *v1.SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_SecretKeySelector_To_api_SecretKeySelector is an autogenerated conversion function. -func Convert_v1_SecretKeySelector_To_api_SecretKeySelector(in *v1.SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { - return autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in, out, s) -} - -func autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_SecretKeySelector_To_v1_SecretKeySelector is an autogenerated conversion function. -func Convert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { - return autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in, out, s) -} - -func autoConvert_v1_SecretList_To_api_SecretList(in *v1.SecretList, out *api.SecretList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.Secret, len(*in)) - for i := range *in { - if err := Convert_v1_Secret_To_api_Secret(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_SecretList_To_api_SecretList is an autogenerated conversion function. -func Convert_v1_SecretList_To_api_SecretList(in *v1.SecretList, out *api.SecretList, s conversion.Scope) error { - return autoConvert_v1_SecretList_To_api_SecretList(in, out, s) -} - -func autoConvert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *v1.SecretList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1.Secret, len(*in)) - for i := range *in { - if err := Convert_api_Secret_To_v1_Secret(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_SecretList_To_v1_SecretList is an autogenerated conversion function. -func Convert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *v1.SecretList, s conversion.Scope) error { - return autoConvert_api_SecretList_To_v1_SecretList(in, out, s) -} - -func autoConvert_v1_SecretProjection_To_api_SecretProjection(in *v1.SecretProjection, out *api.SecretProjection, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_SecretProjection_To_api_SecretProjection is an autogenerated conversion function. -func Convert_v1_SecretProjection_To_api_SecretProjection(in *v1.SecretProjection, out *api.SecretProjection, s conversion.Scope) error { - return autoConvert_v1_SecretProjection_To_api_SecretProjection(in, out, s) -} - -func autoConvert_api_SecretProjection_To_v1_SecretProjection(in *api.SecretProjection, out *v1.SecretProjection, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_SecretProjection_To_v1_SecretProjection is an autogenerated conversion function. -func Convert_api_SecretProjection_To_v1_SecretProjection(in *api.SecretProjection, out *v1.SecretProjection, s conversion.Scope) error { - return autoConvert_api_SecretProjection_To_v1_SecretProjection(in, out, s) -} - -func autoConvert_v1_SecretReference_To_api_SecretReference(in *v1.SecretReference, out *api.SecretReference, s conversion.Scope) error { - out.Name = in.Name - out.Namespace = in.Namespace - return nil -} - -// Convert_v1_SecretReference_To_api_SecretReference is an autogenerated conversion function. -func Convert_v1_SecretReference_To_api_SecretReference(in *v1.SecretReference, out *api.SecretReference, s conversion.Scope) error { - return autoConvert_v1_SecretReference_To_api_SecretReference(in, out, s) -} - -func autoConvert_api_SecretReference_To_v1_SecretReference(in *api.SecretReference, out *v1.SecretReference, s conversion.Scope) error { - out.Name = in.Name - out.Namespace = in.Namespace - return nil -} - -// Convert_api_SecretReference_To_v1_SecretReference is an autogenerated conversion function. -func Convert_api_SecretReference_To_v1_SecretReference(in *api.SecretReference, out *v1.SecretReference, s conversion.Scope) error { - return autoConvert_api_SecretReference_To_v1_SecretReference(in, out, s) -} - -func autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *v1.SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource is an autogenerated conversion function. -func Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *v1.SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { - return autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in, out, s) -} - -func autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource is an autogenerated conversion function. -func Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { - return autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in, out, s) -} - -func autoConvert_v1_SecurityContext_To_api_SecurityContext(in *v1.SecurityContext, out *api.SecurityContext, s conversion.Scope) error { - out.Capabilities = (*api.Capabilities)(unsafe.Pointer(in.Capabilities)) - out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) - out.SELinuxOptions = (*api.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) - out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) - out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) - out.AllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.AllowPrivilegeEscalation)) - return nil -} - -// Convert_v1_SecurityContext_To_api_SecurityContext is an autogenerated conversion function. -func Convert_v1_SecurityContext_To_api_SecurityContext(in *v1.SecurityContext, out *api.SecurityContext, s conversion.Scope) error { - return autoConvert_v1_SecurityContext_To_api_SecurityContext(in, out, s) -} - -func autoConvert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { - out.Capabilities = (*v1.Capabilities)(unsafe.Pointer(in.Capabilities)) - out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) - out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) - out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) - out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) - out.AllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.AllowPrivilegeEscalation)) - return nil -} - -func autoConvert_v1_SerializedReference_To_api_SerializedReference(in *v1.SerializedReference, out *api.SerializedReference, s conversion.Scope) error { - if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Reference, &out.Reference, s); err != nil { - return err - } - return nil -} - -// Convert_v1_SerializedReference_To_api_SerializedReference is an autogenerated conversion function. -func Convert_v1_SerializedReference_To_api_SerializedReference(in *v1.SerializedReference, out *api.SerializedReference, s conversion.Scope) error { - return autoConvert_v1_SerializedReference_To_api_SerializedReference(in, out, s) -} - -func autoConvert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *v1.SerializedReference, s conversion.Scope) error { - if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Reference, &out.Reference, s); err != nil { - return err - } - return nil -} - -// Convert_api_SerializedReference_To_v1_SerializedReference is an autogenerated conversion function. -func Convert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *v1.SerializedReference, s conversion.Scope) error { - return autoConvert_api_SerializedReference_To_v1_SerializedReference(in, out, s) -} - -func autoConvert_v1_Service_To_api_Service(in *v1.Service, out *api.Service, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ServiceSpec_To_api_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ServiceStatus_To_api_ServiceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Service_To_api_Service is an autogenerated conversion function. -func Convert_v1_Service_To_api_Service(in *v1.Service, out *api.Service, s conversion.Scope) error { - return autoConvert_v1_Service_To_api_Service(in, out, s) -} - -func autoConvert_api_Service_To_v1_Service(in *api.Service, out *v1.Service, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ServiceSpec_To_v1_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_ServiceStatus_To_v1_ServiceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_Service_To_v1_Service is an autogenerated conversion function. -func Convert_api_Service_To_v1_Service(in *api.Service, out *v1.Service, s conversion.Scope) error { - return autoConvert_api_Service_To_v1_Service(in, out, s) -} - -func autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in *v1.ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Secrets = *(*[]api.ObjectReference)(unsafe.Pointer(&in.Secrets)) - out.ImagePullSecrets = *(*[]api.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) - out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) - return nil -} - -// Convert_v1_ServiceAccount_To_api_ServiceAccount is an autogenerated conversion function. -func Convert_v1_ServiceAccount_To_api_ServiceAccount(in *v1.ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error { - return autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in, out, s) -} - -func autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *v1.ServiceAccount, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Secrets = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.Secrets)) - out.ImagePullSecrets = *(*[]v1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) - out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) - return nil -} - -// Convert_api_ServiceAccount_To_v1_ServiceAccount is an autogenerated conversion function. -func Convert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *v1.ServiceAccount, s conversion.Scope) error { - return autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in, out, s) -} - -func autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in *v1.ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.ServiceAccount)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_ServiceAccountList_To_api_ServiceAccountList is an autogenerated conversion function. -func Convert_v1_ServiceAccountList_To_api_ServiceAccountList(in *v1.ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error { - return autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in, out, s) -} - -func autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *v1.ServiceAccountList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.ServiceAccount)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_ServiceAccountList_To_v1_ServiceAccountList is an autogenerated conversion function. -func Convert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *v1.ServiceAccountList, s conversion.Scope) error { - return autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in, out, s) -} - -func autoConvert_v1_ServiceList_To_api_ServiceList(in *v1.ServiceList, out *api.ServiceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.Service, len(*in)) - for i := range *in { - if err := Convert_v1_Service_To_api_Service(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_ServiceList_To_api_ServiceList is an autogenerated conversion function. -func Convert_v1_ServiceList_To_api_ServiceList(in *v1.ServiceList, out *api.ServiceList, s conversion.Scope) error { - return autoConvert_v1_ServiceList_To_api_ServiceList(in, out, s) -} - -func autoConvert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *v1.ServiceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1.Service, len(*in)) - for i := range *in { - if err := Convert_api_Service_To_v1_Service(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_ServiceList_To_v1_ServiceList is an autogenerated conversion function. -func Convert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *v1.ServiceList, s conversion.Scope) error { - return autoConvert_api_ServiceList_To_v1_ServiceList(in, out, s) -} - -func autoConvert_v1_ServicePort_To_api_ServicePort(in *v1.ServicePort, out *api.ServicePort, s conversion.Scope) error { - out.Name = in.Name - out.Protocol = api.Protocol(in.Protocol) - out.Port = in.Port - out.TargetPort = in.TargetPort - out.NodePort = in.NodePort - return nil -} - -// Convert_v1_ServicePort_To_api_ServicePort is an autogenerated conversion function. -func Convert_v1_ServicePort_To_api_ServicePort(in *v1.ServicePort, out *api.ServicePort, s conversion.Scope) error { - return autoConvert_v1_ServicePort_To_api_ServicePort(in, out, s) -} - -func autoConvert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *v1.ServicePort, s conversion.Scope) error { - out.Name = in.Name - out.Protocol = v1.Protocol(in.Protocol) - out.Port = in.Port - out.TargetPort = in.TargetPort - out.NodePort = in.NodePort - return nil -} - -// Convert_api_ServicePort_To_v1_ServicePort is an autogenerated conversion function. -func Convert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *v1.ServicePort, s conversion.Scope) error { - return autoConvert_api_ServicePort_To_v1_ServicePort(in, out, s) -} - -func autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *v1.ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions is an autogenerated conversion function. -func Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *v1.ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error { - return autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in, out, s) -} - -func autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *v1.ServiceProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions is an autogenerated conversion function. -func Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *v1.ServiceProxyOptions, s conversion.Scope) error { - return autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in, out, s) -} - -func autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in *v1.ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error { - out.Ports = *(*[]api.ServicePort)(unsafe.Pointer(&in.Ports)) - out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) - out.ClusterIP = in.ClusterIP - out.Type = api.ServiceType(in.Type) - out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) - out.SessionAffinity = api.ServiceAffinity(in.SessionAffinity) - out.LoadBalancerIP = in.LoadBalancerIP - out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) - out.ExternalName = in.ExternalName - out.ExternalTrafficPolicy = api.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy) - out.HealthCheckNodePort = in.HealthCheckNodePort - out.PublishNotReadyAddresses = in.PublishNotReadyAddresses - out.SessionAffinityConfig = (*api.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig)) - return nil -} - -// Convert_v1_ServiceSpec_To_api_ServiceSpec is an autogenerated conversion function. -func Convert_v1_ServiceSpec_To_api_ServiceSpec(in *v1.ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error { - return autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in, out, s) -} - -func autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *v1.ServiceSpec, s conversion.Scope) error { - out.Type = v1.ServiceType(in.Type) - out.Ports = *(*[]v1.ServicePort)(unsafe.Pointer(&in.Ports)) - out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) - out.ClusterIP = in.ClusterIP - out.ExternalName = in.ExternalName - out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) - out.LoadBalancerIP = in.LoadBalancerIP - out.SessionAffinity = v1.ServiceAffinity(in.SessionAffinity) - out.SessionAffinityConfig = (*v1.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig)) - out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) - out.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy) - out.HealthCheckNodePort = in.HealthCheckNodePort - out.PublishNotReadyAddresses = in.PublishNotReadyAddresses - return nil -} - -// Convert_api_ServiceSpec_To_v1_ServiceSpec is an autogenerated conversion function. -func Convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *v1.ServiceSpec, s conversion.Scope) error { - return autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in, out, s) -} - -func autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in *v1.ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error { - if err := Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { - return err - } - return nil -} - -// Convert_v1_ServiceStatus_To_api_ServiceStatus is an autogenerated conversion function. -func Convert_v1_ServiceStatus_To_api_ServiceStatus(in *v1.ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error { - return autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in, out, s) -} - -func autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *v1.ServiceStatus, s conversion.Scope) error { - if err := Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { - return err - } - return nil -} - -// Convert_api_ServiceStatus_To_v1_ServiceStatus is an autogenerated conversion function. -func Convert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *v1.ServiceStatus, s conversion.Scope) error { - return autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in, out, s) -} - -func autoConvert_v1_SessionAffinityConfig_To_api_SessionAffinityConfig(in *v1.SessionAffinityConfig, out *api.SessionAffinityConfig, s conversion.Scope) error { - out.ClientIP = (*api.ClientIPConfig)(unsafe.Pointer(in.ClientIP)) - return nil -} - -// Convert_v1_SessionAffinityConfig_To_api_SessionAffinityConfig is an autogenerated conversion function. -func Convert_v1_SessionAffinityConfig_To_api_SessionAffinityConfig(in *v1.SessionAffinityConfig, out *api.SessionAffinityConfig, s conversion.Scope) error { - return autoConvert_v1_SessionAffinityConfig_To_api_SessionAffinityConfig(in, out, s) -} - -func autoConvert_api_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *api.SessionAffinityConfig, out *v1.SessionAffinityConfig, s conversion.Scope) error { - out.ClientIP = (*v1.ClientIPConfig)(unsafe.Pointer(in.ClientIP)) - return nil -} - -// Convert_api_SessionAffinityConfig_To_v1_SessionAffinityConfig is an autogenerated conversion function. -func Convert_api_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *api.SessionAffinityConfig, out *v1.SessionAffinityConfig, s conversion.Scope) error { - return autoConvert_api_SessionAffinityConfig_To_v1_SessionAffinityConfig(in, out, s) -} - -func autoConvert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource(in *v1.StorageOSPersistentVolumeSource, out *api.StorageOSPersistentVolumeSource, s conversion.Scope) error { - out.VolumeName = in.VolumeName - out.VolumeNamespace = in.VolumeNamespace - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.SecretRef = (*api.ObjectReference)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource(in *v1.StorageOSPersistentVolumeSource, out *api.StorageOSPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource(in, out, s) -} - -func autoConvert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *api.StorageOSPersistentVolumeSource, out *v1.StorageOSPersistentVolumeSource, s conversion.Scope) error { - out.VolumeName = in.VolumeName - out.VolumeNamespace = in.VolumeNamespace - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.SecretRef = (*v1.ObjectReference)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource is an autogenerated conversion function. -func Convert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *api.StorageOSPersistentVolumeSource, out *v1.StorageOSPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in, out, s) -} - -func autoConvert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource(in *v1.StorageOSVolumeSource, out *api.StorageOSVolumeSource, s conversion.Scope) error { - out.VolumeName = in.VolumeName - out.VolumeNamespace = in.VolumeNamespace - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource is an autogenerated conversion function. -func Convert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource(in *v1.StorageOSVolumeSource, out *api.StorageOSVolumeSource, s conversion.Scope) error { - return autoConvert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource(in, out, s) -} - -func autoConvert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *api.StorageOSVolumeSource, out *v1.StorageOSVolumeSource, s conversion.Scope) error { - out.VolumeName = in.VolumeName - out.VolumeNamespace = in.VolumeNamespace - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource is an autogenerated conversion function. -func Convert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *api.StorageOSVolumeSource, out *v1.StorageOSVolumeSource, s conversion.Scope) error { - return autoConvert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in, out, s) -} - -func autoConvert_v1_Sysctl_To_api_Sysctl(in *v1.Sysctl, out *api.Sysctl, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -// Convert_v1_Sysctl_To_api_Sysctl is an autogenerated conversion function. -func Convert_v1_Sysctl_To_api_Sysctl(in *v1.Sysctl, out *api.Sysctl, s conversion.Scope) error { - return autoConvert_v1_Sysctl_To_api_Sysctl(in, out, s) -} - -func autoConvert_api_Sysctl_To_v1_Sysctl(in *api.Sysctl, out *v1.Sysctl, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -// Convert_api_Sysctl_To_v1_Sysctl is an autogenerated conversion function. -func Convert_api_Sysctl_To_v1_Sysctl(in *api.Sysctl, out *v1.Sysctl, s conversion.Scope) error { - return autoConvert_api_Sysctl_To_v1_Sysctl(in, out, s) -} - -func autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in *v1.TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { - out.Port = in.Port - out.Host = in.Host - return nil -} - -// Convert_v1_TCPSocketAction_To_api_TCPSocketAction is an autogenerated conversion function. -func Convert_v1_TCPSocketAction_To_api_TCPSocketAction(in *v1.TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { - return autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in, out, s) -} - -func autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { - out.Port = in.Port - out.Host = in.Host - return nil -} - -// Convert_api_TCPSocketAction_To_v1_TCPSocketAction is an autogenerated conversion function. -func Convert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { - return autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in, out, s) -} - -func autoConvert_v1_Taint_To_api_Taint(in *v1.Taint, out *api.Taint, s conversion.Scope) error { - out.Key = in.Key - out.Value = in.Value - out.Effect = api.TaintEffect(in.Effect) - out.TimeAdded = in.TimeAdded - return nil -} - -// Convert_v1_Taint_To_api_Taint is an autogenerated conversion function. -func Convert_v1_Taint_To_api_Taint(in *v1.Taint, out *api.Taint, s conversion.Scope) error { - return autoConvert_v1_Taint_To_api_Taint(in, out, s) -} - -func autoConvert_api_Taint_To_v1_Taint(in *api.Taint, out *v1.Taint, s conversion.Scope) error { - out.Key = in.Key - out.Value = in.Value - out.Effect = v1.TaintEffect(in.Effect) - out.TimeAdded = in.TimeAdded - return nil -} - -// Convert_api_Taint_To_v1_Taint is an autogenerated conversion function. -func Convert_api_Taint_To_v1_Taint(in *api.Taint, out *v1.Taint, s conversion.Scope) error { - return autoConvert_api_Taint_To_v1_Taint(in, out, s) -} - -func autoConvert_v1_Toleration_To_api_Toleration(in *v1.Toleration, out *api.Toleration, s conversion.Scope) error { - out.Key = in.Key - out.Operator = api.TolerationOperator(in.Operator) - out.Value = in.Value - out.Effect = api.TaintEffect(in.Effect) - out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) - return nil -} - -// Convert_v1_Toleration_To_api_Toleration is an autogenerated conversion function. -func Convert_v1_Toleration_To_api_Toleration(in *v1.Toleration, out *api.Toleration, s conversion.Scope) error { - return autoConvert_v1_Toleration_To_api_Toleration(in, out, s) -} - -func autoConvert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *v1.Toleration, s conversion.Scope) error { - out.Key = in.Key - out.Operator = v1.TolerationOperator(in.Operator) - out.Value = in.Value - out.Effect = v1.TaintEffect(in.Effect) - out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) - return nil -} - -// Convert_api_Toleration_To_v1_Toleration is an autogenerated conversion function. -func Convert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *v1.Toleration, s conversion.Scope) error { - return autoConvert_api_Toleration_To_v1_Toleration(in, out, s) -} - -func autoConvert_v1_Volume_To_api_Volume(in *v1.Volume, out *api.Volume, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_v1_VolumeSource_To_api_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Volume_To_api_Volume is an autogenerated conversion function. -func Convert_v1_Volume_To_api_Volume(in *v1.Volume, out *api.Volume, s conversion.Scope) error { - return autoConvert_v1_Volume_To_api_Volume(in, out, s) -} - -func autoConvert_api_Volume_To_v1_Volume(in *api.Volume, out *v1.Volume, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_api_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { - return err - } - return nil -} - -// Convert_api_Volume_To_v1_Volume is an autogenerated conversion function. -func Convert_api_Volume_To_v1_Volume(in *api.Volume, out *v1.Volume, s conversion.Scope) error { - return autoConvert_api_Volume_To_v1_Volume(in, out, s) -} - -func autoConvert_v1_VolumeMount_To_api_VolumeMount(in *v1.VolumeMount, out *api.VolumeMount, s conversion.Scope) error { - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - out.SubPath = in.SubPath - out.MountPropagation = (*api.MountPropagationMode)(unsafe.Pointer(in.MountPropagation)) - return nil -} - -// Convert_v1_VolumeMount_To_api_VolumeMount is an autogenerated conversion function. -func Convert_v1_VolumeMount_To_api_VolumeMount(in *v1.VolumeMount, out *api.VolumeMount, s conversion.Scope) error { - return autoConvert_v1_VolumeMount_To_api_VolumeMount(in, out, s) -} - -func autoConvert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - out.SubPath = in.SubPath - out.MountPropagation = (*v1.MountPropagationMode)(unsafe.Pointer(in.MountPropagation)) - return nil -} - -// Convert_api_VolumeMount_To_v1_VolumeMount is an autogenerated conversion function. -func Convert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { - return autoConvert_api_VolumeMount_To_v1_VolumeMount(in, out, s) -} - -func autoConvert_v1_VolumeProjection_To_api_VolumeProjection(in *v1.VolumeProjection, out *api.VolumeProjection, s conversion.Scope) error { - out.Secret = (*api.SecretProjection)(unsafe.Pointer(in.Secret)) - out.DownwardAPI = (*api.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) - out.ConfigMap = (*api.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) - return nil -} - -// Convert_v1_VolumeProjection_To_api_VolumeProjection is an autogenerated conversion function. -func Convert_v1_VolumeProjection_To_api_VolumeProjection(in *v1.VolumeProjection, out *api.VolumeProjection, s conversion.Scope) error { - return autoConvert_v1_VolumeProjection_To_api_VolumeProjection(in, out, s) -} - -func autoConvert_api_VolumeProjection_To_v1_VolumeProjection(in *api.VolumeProjection, out *v1.VolumeProjection, s conversion.Scope) error { - out.Secret = (*v1.SecretProjection)(unsafe.Pointer(in.Secret)) - out.DownwardAPI = (*v1.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) - out.ConfigMap = (*v1.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) - return nil -} - -// Convert_api_VolumeProjection_To_v1_VolumeProjection is an autogenerated conversion function. -func Convert_api_VolumeProjection_To_v1_VolumeProjection(in *api.VolumeProjection, out *v1.VolumeProjection, s conversion.Scope) error { - return autoConvert_api_VolumeProjection_To_v1_VolumeProjection(in, out, s) -} - -func autoConvert_v1_VolumeSource_To_api_VolumeSource(in *v1.VolumeSource, out *api.VolumeSource, s conversion.Scope) error { - out.HostPath = (*api.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.EmptyDir = (*api.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) - out.GCEPersistentDisk = (*api.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) - out.AWSElasticBlockStore = (*api.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) - out.GitRepo = (*api.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) - out.Secret = (*api.SecretVolumeSource)(unsafe.Pointer(in.Secret)) - out.NFS = (*api.NFSVolumeSource)(unsafe.Pointer(in.NFS)) - out.ISCSI = (*api.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) - out.Glusterfs = (*api.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) - out.PersistentVolumeClaim = (*api.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) - out.RBD = (*api.RBDVolumeSource)(unsafe.Pointer(in.RBD)) - out.FlexVolume = (*api.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) - out.Cinder = (*api.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) - out.CephFS = (*api.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) - out.Flocker = (*api.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) - out.DownwardAPI = (*api.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) - out.FC = (*api.FCVolumeSource)(unsafe.Pointer(in.FC)) - out.AzureFile = (*api.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) - out.ConfigMap = (*api.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) - out.VsphereVolume = (*api.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) - out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) - out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) - out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) - out.Projected = (*api.ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) - out.PortworxVolume = (*api.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) - out.ScaleIO = (*api.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) - out.StorageOS = (*api.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) - return nil -} - -// Convert_v1_VolumeSource_To_api_VolumeSource is an autogenerated conversion function. -func Convert_v1_VolumeSource_To_api_VolumeSource(in *v1.VolumeSource, out *api.VolumeSource, s conversion.Scope) error { - return autoConvert_v1_VolumeSource_To_api_VolumeSource(in, out, s) -} - -func autoConvert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { - out.HostPath = (*v1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.EmptyDir = (*v1.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) - out.GCEPersistentDisk = (*v1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) - out.AWSElasticBlockStore = (*v1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) - out.GitRepo = (*v1.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) - out.Secret = (*v1.SecretVolumeSource)(unsafe.Pointer(in.Secret)) - out.NFS = (*v1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) - out.ISCSI = (*v1.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) - out.Glusterfs = (*v1.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) - out.PersistentVolumeClaim = (*v1.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) - out.RBD = (*v1.RBDVolumeSource)(unsafe.Pointer(in.RBD)) - out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) - out.FlexVolume = (*v1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) - out.Cinder = (*v1.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) - out.CephFS = (*v1.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) - out.Flocker = (*v1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) - out.DownwardAPI = (*v1.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) - out.FC = (*v1.FCVolumeSource)(unsafe.Pointer(in.FC)) - out.AzureFile = (*v1.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) - out.ConfigMap = (*v1.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) - out.VsphereVolume = (*v1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) - out.AzureDisk = (*v1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) - out.PhotonPersistentDisk = (*v1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) - out.Projected = (*v1.ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) - out.PortworxVolume = (*v1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) - out.ScaleIO = (*v1.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) - out.StorageOS = (*v1.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) - return nil -} - -// Convert_api_VolumeSource_To_v1_VolumeSource is an autogenerated conversion function. -func Convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { - return autoConvert_api_VolumeSource_To_v1_VolumeSource(in, out, s) -} - -func autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *v1.VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - out.VolumePath = in.VolumePath - out.FSType = in.FSType - out.StoragePolicyName = in.StoragePolicyName - out.StoragePolicyID = in.StoragePolicyID - return nil -} - -// Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource is an autogenerated conversion function. -func Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *v1.VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in, out, s) -} - -func autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *v1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - out.VolumePath = in.VolumePath - out.FSType = in.FSType - out.StoragePolicyName = in.StoragePolicyName - out.StoragePolicyID = in.StoragePolicyID - return nil -} - -// Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource is an autogenerated conversion function. -func Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *v1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *v1.WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { - out.Weight = in.Weight - if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { - return err - } - return nil -} - -// Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm is an autogenerated conversion function. -func Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *v1.WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { - return autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in, out, s) -} - -func autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *v1.WeightedPodAffinityTerm, s conversion.Scope) error { - out.Weight = in.Weight - if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { - return err - } - return nil -} - -// Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm is an autogenerated conversion function. -func Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *v1.WeightedPodAffinityTerm, s conversion.Scope) error { - return autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.defaults.go deleted file mode 100644 index c41d6ae3e37b..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.defaults.go +++ /dev/null @@ -1,648 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by defaulter-gen. Do not edit it manually! - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&v1.ConfigMap{}, func(obj interface{}) { SetObjectDefaults_ConfigMap(obj.(*v1.ConfigMap)) }) - scheme.AddTypeDefaultingFunc(&v1.ConfigMapList{}, func(obj interface{}) { SetObjectDefaults_ConfigMapList(obj.(*v1.ConfigMapList)) }) - scheme.AddTypeDefaultingFunc(&v1.Endpoints{}, func(obj interface{}) { SetObjectDefaults_Endpoints(obj.(*v1.Endpoints)) }) - scheme.AddTypeDefaultingFunc(&v1.EndpointsList{}, func(obj interface{}) { SetObjectDefaults_EndpointsList(obj.(*v1.EndpointsList)) }) - scheme.AddTypeDefaultingFunc(&v1.LimitRange{}, func(obj interface{}) { SetObjectDefaults_LimitRange(obj.(*v1.LimitRange)) }) - scheme.AddTypeDefaultingFunc(&v1.LimitRangeList{}, func(obj interface{}) { SetObjectDefaults_LimitRangeList(obj.(*v1.LimitRangeList)) }) - scheme.AddTypeDefaultingFunc(&v1.Namespace{}, func(obj interface{}) { SetObjectDefaults_Namespace(obj.(*v1.Namespace)) }) - scheme.AddTypeDefaultingFunc(&v1.NamespaceList{}, func(obj interface{}) { SetObjectDefaults_NamespaceList(obj.(*v1.NamespaceList)) }) - scheme.AddTypeDefaultingFunc(&v1.Node{}, func(obj interface{}) { SetObjectDefaults_Node(obj.(*v1.Node)) }) - scheme.AddTypeDefaultingFunc(&v1.NodeList{}, func(obj interface{}) { SetObjectDefaults_NodeList(obj.(*v1.NodeList)) }) - scheme.AddTypeDefaultingFunc(&v1.PersistentVolume{}, func(obj interface{}) { SetObjectDefaults_PersistentVolume(obj.(*v1.PersistentVolume)) }) - scheme.AddTypeDefaultingFunc(&v1.PersistentVolumeClaim{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeClaim(obj.(*v1.PersistentVolumeClaim)) }) - scheme.AddTypeDefaultingFunc(&v1.PersistentVolumeClaimList{}, func(obj interface{}) { - SetObjectDefaults_PersistentVolumeClaimList(obj.(*v1.PersistentVolumeClaimList)) - }) - scheme.AddTypeDefaultingFunc(&v1.PersistentVolumeList{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeList(obj.(*v1.PersistentVolumeList)) }) - scheme.AddTypeDefaultingFunc(&v1.Pod{}, func(obj interface{}) { SetObjectDefaults_Pod(obj.(*v1.Pod)) }) - scheme.AddTypeDefaultingFunc(&v1.PodAttachOptions{}, func(obj interface{}) { SetObjectDefaults_PodAttachOptions(obj.(*v1.PodAttachOptions)) }) - scheme.AddTypeDefaultingFunc(&v1.PodExecOptions{}, func(obj interface{}) { SetObjectDefaults_PodExecOptions(obj.(*v1.PodExecOptions)) }) - scheme.AddTypeDefaultingFunc(&v1.PodList{}, func(obj interface{}) { SetObjectDefaults_PodList(obj.(*v1.PodList)) }) - scheme.AddTypeDefaultingFunc(&v1.PodTemplate{}, func(obj interface{}) { SetObjectDefaults_PodTemplate(obj.(*v1.PodTemplate)) }) - scheme.AddTypeDefaultingFunc(&v1.PodTemplateList{}, func(obj interface{}) { SetObjectDefaults_PodTemplateList(obj.(*v1.PodTemplateList)) }) - scheme.AddTypeDefaultingFunc(&v1.ReplicationController{}, func(obj interface{}) { SetObjectDefaults_ReplicationController(obj.(*v1.ReplicationController)) }) - scheme.AddTypeDefaultingFunc(&v1.ReplicationControllerList{}, func(obj interface{}) { - SetObjectDefaults_ReplicationControllerList(obj.(*v1.ReplicationControllerList)) - }) - scheme.AddTypeDefaultingFunc(&v1.ResourceQuota{}, func(obj interface{}) { SetObjectDefaults_ResourceQuota(obj.(*v1.ResourceQuota)) }) - scheme.AddTypeDefaultingFunc(&v1.ResourceQuotaList{}, func(obj interface{}) { SetObjectDefaults_ResourceQuotaList(obj.(*v1.ResourceQuotaList)) }) - scheme.AddTypeDefaultingFunc(&v1.Secret{}, func(obj interface{}) { SetObjectDefaults_Secret(obj.(*v1.Secret)) }) - scheme.AddTypeDefaultingFunc(&v1.SecretList{}, func(obj interface{}) { SetObjectDefaults_SecretList(obj.(*v1.SecretList)) }) - scheme.AddTypeDefaultingFunc(&v1.Service{}, func(obj interface{}) { SetObjectDefaults_Service(obj.(*v1.Service)) }) - scheme.AddTypeDefaultingFunc(&v1.ServiceList{}, func(obj interface{}) { SetObjectDefaults_ServiceList(obj.(*v1.ServiceList)) }) - return nil -} - -func SetObjectDefaults_ConfigMap(in *v1.ConfigMap) { - SetDefaults_ConfigMap(in) -} - -func SetObjectDefaults_ConfigMapList(in *v1.ConfigMapList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_ConfigMap(a) - } -} - -func SetObjectDefaults_Endpoints(in *v1.Endpoints) { - SetDefaults_Endpoints(in) -} - -func SetObjectDefaults_EndpointsList(in *v1.EndpointsList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_Endpoints(a) - } -} - -func SetObjectDefaults_LimitRange(in *v1.LimitRange) { - for i := range in.Spec.Limits { - a := &in.Spec.Limits[i] - SetDefaults_LimitRangeItem(a) - SetDefaults_ResourceList(&a.Max) - SetDefaults_ResourceList(&a.Min) - SetDefaults_ResourceList(&a.Default) - SetDefaults_ResourceList(&a.DefaultRequest) - SetDefaults_ResourceList(&a.MaxLimitRequestRatio) - } -} - -func SetObjectDefaults_LimitRangeList(in *v1.LimitRangeList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_LimitRange(a) - } -} - -func SetObjectDefaults_Namespace(in *v1.Namespace) { - SetDefaults_NamespaceStatus(&in.Status) -} - -func SetObjectDefaults_NamespaceList(in *v1.NamespaceList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_Namespace(a) - } -} - -func SetObjectDefaults_Node(in *v1.Node) { - SetDefaults_Node(in) - SetDefaults_NodeStatus(&in.Status) - SetDefaults_ResourceList(&in.Status.Capacity) - SetDefaults_ResourceList(&in.Status.Allocatable) -} - -func SetObjectDefaults_NodeList(in *v1.NodeList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_Node(a) - } -} - -func SetObjectDefaults_PersistentVolume(in *v1.PersistentVolume) { - SetDefaults_PersistentVolume(in) - SetDefaults_ResourceList(&in.Spec.Capacity) - if in.Spec.PersistentVolumeSource.HostPath != nil { - SetDefaults_HostPathVolumeSource(in.Spec.PersistentVolumeSource.HostPath) - } - if in.Spec.PersistentVolumeSource.RBD != nil { - SetDefaults_RBDVolumeSource(in.Spec.PersistentVolumeSource.RBD) - } - if in.Spec.PersistentVolumeSource.ISCSI != nil { - SetDefaults_ISCSIVolumeSource(in.Spec.PersistentVolumeSource.ISCSI) - } - if in.Spec.PersistentVolumeSource.AzureDisk != nil { - SetDefaults_AzureDiskVolumeSource(in.Spec.PersistentVolumeSource.AzureDisk) - } - if in.Spec.PersistentVolumeSource.ScaleIO != nil { - SetDefaults_ScaleIOVolumeSource(in.Spec.PersistentVolumeSource.ScaleIO) - } -} - -func SetObjectDefaults_PersistentVolumeClaim(in *v1.PersistentVolumeClaim) { - SetDefaults_PersistentVolumeClaim(in) - SetDefaults_ResourceList(&in.Spec.Resources.Limits) - SetDefaults_ResourceList(&in.Spec.Resources.Requests) - SetDefaults_ResourceList(&in.Status.Capacity) -} - -func SetObjectDefaults_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_PersistentVolumeClaim(a) - } -} - -func SetObjectDefaults_PersistentVolumeList(in *v1.PersistentVolumeList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_PersistentVolume(a) - } -} - -func SetObjectDefaults_Pod(in *v1.Pod) { - SetDefaults_Pod(in) - SetDefaults_PodSpec(&in.Spec) - for i := range in.Spec.Volumes { - a := &in.Spec.Volumes[i] - SetDefaults_Volume(a) - if a.VolumeSource.HostPath != nil { - SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath) - } - if a.VolumeSource.Secret != nil { - SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) - } - if a.VolumeSource.ISCSI != nil { - SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) - } - if a.VolumeSource.RBD != nil { - SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) - } - if a.VolumeSource.DownwardAPI != nil { - SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) - for j := range a.VolumeSource.DownwardAPI.Items { - b := &a.VolumeSource.DownwardAPI.Items[j] - if b.FieldRef != nil { - SetDefaults_ObjectFieldSelector(b.FieldRef) - } - } - } - if a.VolumeSource.ConfigMap != nil { - SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) - } - if a.VolumeSource.AzureDisk != nil { - SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) - } - if a.VolumeSource.Projected != nil { - SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) - for j := range a.VolumeSource.Projected.Sources { - b := &a.VolumeSource.Projected.Sources[j] - if b.DownwardAPI != nil { - for k := range b.DownwardAPI.Items { - c := &b.DownwardAPI.Items[k] - if c.FieldRef != nil { - SetDefaults_ObjectFieldSelector(c.FieldRef) - } - } - } - } - } - if a.VolumeSource.ScaleIO != nil { - SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) - } - } - for i := range in.Spec.InitContainers { - a := &in.Spec.InitContainers[i] - SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - SetDefaults_ResourceList(&a.Resources.Limits) - SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } - for i := range in.Spec.Containers { - a := &in.Spec.Containers[i] - SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - SetDefaults_ResourceList(&a.Resources.Limits) - SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } -} - -func SetObjectDefaults_PodAttachOptions(in *v1.PodAttachOptions) { - SetDefaults_PodAttachOptions(in) -} - -func SetObjectDefaults_PodExecOptions(in *v1.PodExecOptions) { - SetDefaults_PodExecOptions(in) -} - -func SetObjectDefaults_PodList(in *v1.PodList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_Pod(a) - } -} - -func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) { - SetDefaults_PodSpec(&in.Template.Spec) - for i := range in.Template.Spec.Volumes { - a := &in.Template.Spec.Volumes[i] - SetDefaults_Volume(a) - if a.VolumeSource.HostPath != nil { - SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath) - } - if a.VolumeSource.Secret != nil { - SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) - } - if a.VolumeSource.ISCSI != nil { - SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) - } - if a.VolumeSource.RBD != nil { - SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) - } - if a.VolumeSource.DownwardAPI != nil { - SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) - for j := range a.VolumeSource.DownwardAPI.Items { - b := &a.VolumeSource.DownwardAPI.Items[j] - if b.FieldRef != nil { - SetDefaults_ObjectFieldSelector(b.FieldRef) - } - } - } - if a.VolumeSource.ConfigMap != nil { - SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) - } - if a.VolumeSource.AzureDisk != nil { - SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) - } - if a.VolumeSource.Projected != nil { - SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) - for j := range a.VolumeSource.Projected.Sources { - b := &a.VolumeSource.Projected.Sources[j] - if b.DownwardAPI != nil { - for k := range b.DownwardAPI.Items { - c := &b.DownwardAPI.Items[k] - if c.FieldRef != nil { - SetDefaults_ObjectFieldSelector(c.FieldRef) - } - } - } - } - } - if a.VolumeSource.ScaleIO != nil { - SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) - } - } - for i := range in.Template.Spec.InitContainers { - a := &in.Template.Spec.InitContainers[i] - SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - SetDefaults_ResourceList(&a.Resources.Limits) - SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } - for i := range in.Template.Spec.Containers { - a := &in.Template.Spec.Containers[i] - SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - SetDefaults_ResourceList(&a.Resources.Limits) - SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } -} - -func SetObjectDefaults_PodTemplateList(in *v1.PodTemplateList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_PodTemplate(a) - } -} - -func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) { - SetDefaults_ReplicationController(in) - if in.Spec.Template != nil { - SetDefaults_PodSpec(&in.Spec.Template.Spec) - for i := range in.Spec.Template.Spec.Volumes { - a := &in.Spec.Template.Spec.Volumes[i] - SetDefaults_Volume(a) - if a.VolumeSource.HostPath != nil { - SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath) - } - if a.VolumeSource.Secret != nil { - SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) - } - if a.VolumeSource.ISCSI != nil { - SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) - } - if a.VolumeSource.RBD != nil { - SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) - } - if a.VolumeSource.DownwardAPI != nil { - SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) - for j := range a.VolumeSource.DownwardAPI.Items { - b := &a.VolumeSource.DownwardAPI.Items[j] - if b.FieldRef != nil { - SetDefaults_ObjectFieldSelector(b.FieldRef) - } - } - } - if a.VolumeSource.ConfigMap != nil { - SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) - } - if a.VolumeSource.AzureDisk != nil { - SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) - } - if a.VolumeSource.Projected != nil { - SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) - for j := range a.VolumeSource.Projected.Sources { - b := &a.VolumeSource.Projected.Sources[j] - if b.DownwardAPI != nil { - for k := range b.DownwardAPI.Items { - c := &b.DownwardAPI.Items[k] - if c.FieldRef != nil { - SetDefaults_ObjectFieldSelector(c.FieldRef) - } - } - } - } - } - if a.VolumeSource.ScaleIO != nil { - SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) - } - } - for i := range in.Spec.Template.Spec.InitContainers { - a := &in.Spec.Template.Spec.InitContainers[i] - SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - SetDefaults_ResourceList(&a.Resources.Limits) - SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } - for i := range in.Spec.Template.Spec.Containers { - a := &in.Spec.Template.Spec.Containers[i] - SetDefaults_Container(a) - for j := range a.Ports { - b := &a.Ports[j] - SetDefaults_ContainerPort(b) - } - for j := range a.Env { - b := &a.Env[j] - if b.ValueFrom != nil { - if b.ValueFrom.FieldRef != nil { - SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) - } - } - } - SetDefaults_ResourceList(&a.Resources.Limits) - SetDefaults_ResourceList(&a.Resources.Requests) - if a.LivenessProbe != nil { - SetDefaults_Probe(a.LivenessProbe) - if a.LivenessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) - } - } - if a.ReadinessProbe != nil { - SetDefaults_Probe(a.ReadinessProbe) - if a.ReadinessProbe.Handler.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) - } - } - if a.Lifecycle != nil { - if a.Lifecycle.PostStart != nil { - if a.Lifecycle.PostStart.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) - } - } - if a.Lifecycle.PreStop != nil { - if a.Lifecycle.PreStop.HTTPGet != nil { - SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) - } - } - } - } - } -} - -func SetObjectDefaults_ReplicationControllerList(in *v1.ReplicationControllerList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_ReplicationController(a) - } -} - -func SetObjectDefaults_ResourceQuota(in *v1.ResourceQuota) { - SetDefaults_ResourceList(&in.Spec.Hard) - SetDefaults_ResourceList(&in.Status.Hard) - SetDefaults_ResourceList(&in.Status.Used) -} - -func SetObjectDefaults_ResourceQuotaList(in *v1.ResourceQuotaList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_ResourceQuota(a) - } -} - -func SetObjectDefaults_Secret(in *v1.Secret) { - SetDefaults_Secret(in) -} - -func SetObjectDefaults_SecretList(in *v1.SecretList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_Secret(a) - } -} - -func SetObjectDefaults_Service(in *v1.Service) { - SetDefaults_Service(in) -} - -func SetObjectDefaults_ServiceList(in *v1.ServiceList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_Service(a) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/api/validation/BUILD deleted file mode 100644 index 8107e7f45d18..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/BUILD +++ /dev/null @@ -1,79 +0,0 @@ -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "events.go", - "validation.go", - ], - visibility = ["//visibility:public"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", - "//pkg/api/service:go_default_library", - "//pkg/api/util:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/api/v1/helper:go_default_library", - "//pkg/capabilities:go_default_library", - "//pkg/features:go_default_library", - "//pkg/security/apparmor:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "events_test.go", - "validation_test.go", - ], - library = ":go_default_library", - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/capabilities:go_default_library", - "//pkg/security/apparmor:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/events.go b/vendor/k8s.io/kubernetes/pkg/api/validation/events.go deleted file mode 100644 index a255f58e2b95..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/events.go +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" - apiutil "k8s.io/kubernetes/pkg/api/util" -) - -// ValidateEvent makes sure that the event makes sense. -func ValidateEvent(event *api.Event) field.ErrorList { - allErrs := field.ErrorList{} - - // Make sure event.Namespace and the involvedObject.Namespace agree - if len(event.InvolvedObject.Namespace) == 0 { - // event.Namespace must also be empty (or "default", for compatibility with old clients) - if event.Namespace != metav1.NamespaceNone && event.Namespace != metav1.NamespaceDefault { - allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) - } - } else { - // event namespace must match - if event.Namespace != event.InvolvedObject.Namespace { - allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) - } - } - - // For kinds we recognize, make sure involvedObject.Namespace is set for namespaced kinds - if namespaced, err := isNamespacedKind(event.InvolvedObject.Kind, event.InvolvedObject.APIVersion); err == nil { - if namespaced && len(event.InvolvedObject.Namespace) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("involvedObject", "namespace"), fmt.Sprintf("required for kind %s", event.InvolvedObject.Kind))) - } - if !namespaced && len(event.InvolvedObject.Namespace) > 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, fmt.Sprintf("not allowed for kind %s", event.InvolvedObject.Kind))) - } - } - - for _, msg := range validation.IsDNS1123Subdomain(event.Namespace) { - allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, msg)) - } - return allErrs -} - -// Check whether the kind in groupVersion is scoped at the root of the api hierarchy -func isNamespacedKind(kind, groupVersion string) (bool, error) { - group := apiutil.GetGroup(groupVersion) - g, err := api.Registry.Group(group) - if err != nil { - return false, err - } - restMapping, err := g.RESTMapper.RESTMapping(schema.GroupKind{Group: group, Kind: kind}, apiutil.GetVersion(groupVersion)) - if err != nil { - return false, err - } - scopeName := restMapping.Scope.Name() - if scopeName == meta.RESTScopeNameNamespace { - return true, nil - } - return false, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go deleted file mode 100644 index 3d08bbe51ee4..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go +++ /dev/null @@ -1,4491 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "encoding/json" - "fmt" - "math" - "net" - "path" - "path/filepath" - "reflect" - "regexp" - "strings" - - "github.com/golang/glog" - - "k8s.io/api/core/v1" - apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/resource" - apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" - genericvalidation "k8s.io/apimachinery/pkg/api/validation" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/diff" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/apimachinery/pkg/util/validation/field" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" - apiservice "k8s.io/kubernetes/pkg/api/service" - k8s_api_v1 "k8s.io/kubernetes/pkg/api/v1" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" - "k8s.io/kubernetes/pkg/capabilities" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/security/apparmor" -) - -// TODO: delete this global variable when we enable the validation of common -// fields by default. -var RepairMalformedUpdates bool = genericvalidation.RepairMalformedUpdates - -const isNegativeErrorMsg string = apimachineryvalidation.IsNegativeErrorMsg -const isInvalidQuotaResource string = `must be a standard resource for quota` -const fieldImmutableErrorMsg string = genericvalidation.FieldImmutableErrorMsg -const isNotIntegerErrorMsg string = `must be an integer` - -var pdPartitionErrorMsg string = validation.InclusiveRangeError(1, 255) -var volumeModeErrorMsg string = "must be a number between 0 and 0777 (octal), both inclusive" - -// BannedOwners is a black list of object that are not allowed to be owners. -var BannedOwners = genericvalidation.BannedOwners - -var iscsiInitiatorIqnRegex = regexp.MustCompile(`iqn\.\d{4}-\d{2}\.([[:alnum:]-.]+)(:[^,;*&$|\s]+)$`) -var iscsiInitiatorEuiRegex = regexp.MustCompile(`^eui.[[:alnum:]]{16}$`) -var iscsiInitiatorNaaRegex = regexp.MustCompile(`^naa.[[:alnum:]]{32}$`) - -// ValidateHasLabel requires that metav1.ObjectMeta has a Label with key and expectedValue -func ValidateHasLabel(meta metav1.ObjectMeta, fldPath *field.Path, key, expectedValue string) field.ErrorList { - allErrs := field.ErrorList{} - actualValue, found := meta.Labels[key] - if !found { - allErrs = append(allErrs, field.Required(fldPath.Child("labels").Key(key), - fmt.Sprintf("must be '%s'", expectedValue))) - return allErrs - } - if actualValue != expectedValue { - allErrs = append(allErrs, field.Invalid(fldPath.Child("labels").Key(key), meta.Labels, - fmt.Sprintf("must be '%s'", expectedValue))) - } - return allErrs -} - -// ValidateAnnotations validates that a set of annotations are correctly defined. -func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - return genericvalidation.ValidateAnnotations(annotations, fldPath) -} - -func ValidateDNS1123Label(value string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for _, msg := range validation.IsDNS1123Label(value) { - allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) - } - return allErrs -} - -// ValidateDNS1123Subdomain validates that a name is a proper DNS subdomain. -func ValidateDNS1123Subdomain(value string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for _, msg := range validation.IsDNS1123Subdomain(value) { - allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) - } - return allErrs -} - -func ValidatePodSpecificAnnotations(annotations map[string]string, spec *api.PodSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if value, isMirror := annotations[api.MirrorPodAnnotationKey]; isMirror { - if len(spec.NodeName) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Key(api.MirrorPodAnnotationKey), value, "must set spec.nodeName if mirror pod annotation is set")) - } - } - - if annotations[api.TolerationsAnnotationKey] != "" { - allErrs = append(allErrs, ValidateTolerationsInPodAnnotations(annotations, fldPath)...) - } - - allErrs = append(allErrs, ValidateSeccompPodAnnotations(annotations, fldPath)...) - allErrs = append(allErrs, ValidateAppArmorPodAnnotations(annotations, spec, fldPath)...) - - sysctls, err := helper.SysctlsFromPodAnnotation(annotations[api.SysctlsPodAnnotationKey]) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Key(api.SysctlsPodAnnotationKey), annotations[api.SysctlsPodAnnotationKey], err.Error())) - } else { - allErrs = append(allErrs, validateSysctls(sysctls, fldPath.Key(api.SysctlsPodAnnotationKey))...) - } - unsafeSysctls, err := helper.SysctlsFromPodAnnotation(annotations[api.UnsafeSysctlsPodAnnotationKey]) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Key(api.UnsafeSysctlsPodAnnotationKey), annotations[api.UnsafeSysctlsPodAnnotationKey], err.Error())) - } else { - allErrs = append(allErrs, validateSysctls(unsafeSysctls, fldPath.Key(api.UnsafeSysctlsPodAnnotationKey))...) - } - inBoth := sysctlIntersection(sysctls, unsafeSysctls) - if len(inBoth) > 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Key(api.UnsafeSysctlsPodAnnotationKey), strings.Join(inBoth, ", "), "can not be safe and unsafe")) - } - - return allErrs -} - -// ValidateTolerationsInPodAnnotations tests that the serialized tolerations in Pod.Annotations has valid data -func ValidateTolerationsInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - tolerations, err := helper.GetTolerationsFromPodAnnotations(annotations) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath, api.TolerationsAnnotationKey, err.Error())) - return allErrs - } - - if len(tolerations) > 0 { - allErrs = append(allErrs, ValidateTolerations(tolerations, fldPath.Child(api.TolerationsAnnotationKey))...) - } - - return allErrs -} - -func ValidatePodSpecificAnnotationUpdates(newPod, oldPod *api.Pod, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - newAnnotations := newPod.Annotations - oldAnnotations := oldPod.Annotations - for k, oldVal := range oldAnnotations { - if newVal, exists := newAnnotations[k]; exists && newVal == oldVal { - continue // No change. - } - if strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { - allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not remove or update AppArmor annotations")) - } - if k == api.MirrorPodAnnotationKey { - allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not remove or update mirror pod annotation")) - } - } - // Check for additions - for k := range newAnnotations { - if _, ok := oldAnnotations[k]; ok { - continue // No change. - } - if strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { - allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not add AppArmor annotations")) - } - if k == api.MirrorPodAnnotationKey { - allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not add mirror pod annotation")) - } - } - allErrs = append(allErrs, ValidatePodSpecificAnnotations(newAnnotations, &newPod.Spec, fldPath)...) - return allErrs -} - -func ValidateEndpointsSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - return allErrs -} - -func ValidateOwnerReferences(ownerReferences []metav1.OwnerReference, fldPath *field.Path) field.ErrorList { - return genericvalidation.ValidateOwnerReferences(ownerReferences, fldPath) -} - -// ValidateNameFunc validates that the provided name is valid for a given resource type. -// Not all resources have the same validation rules for names. Prefix is true -// if the name will have a value appended to it. If the name is not valid, -// this returns a list of descriptions of individual characteristics of the -// value that were not valid. Otherwise this returns an empty list or nil. -type ValidateNameFunc apimachineryvalidation.ValidateNameFunc - -// maskTrailingDash replaces the final character of a string with a subdomain safe -// value if is a dash. -func maskTrailingDash(name string) string { - if strings.HasSuffix(name, "-") { - return name[:len(name)-2] + "a" - } - return name -} - -// ValidatePodName can be used to check whether the given pod name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidatePodName = NameIsDNSSubdomain - -// ValidateReplicationControllerName can be used to check whether the given replication -// controller name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateReplicationControllerName = NameIsDNSSubdomain - -// ValidateServiceName can be used to check whether the given service name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateServiceName = NameIsDNS1035Label - -// ValidateNodeName can be used to check whether the given node name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateNodeName = NameIsDNSSubdomain - -// ValidateNamespaceName can be used to check whether the given namespace name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateNamespaceName = apimachineryvalidation.ValidateNamespaceName - -// ValidateLimitRangeName can be used to check whether the given limit range name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateLimitRangeName = NameIsDNSSubdomain - -// ValidateResourceQuotaName can be used to check whether the given -// resource quota name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateResourceQuotaName = NameIsDNSSubdomain - -// ValidateSecretName can be used to check whether the given secret name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateSecretName = NameIsDNSSubdomain - -// ValidateServiceAccountName can be used to check whether the given service account name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateServiceAccountName = apimachineryvalidation.ValidateServiceAccountName - -// ValidateEndpointsName can be used to check whether the given endpoints name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateEndpointsName = NameIsDNSSubdomain - -// ValidateClusterName can be used to check whether the given cluster name is valid. -var ValidateClusterName = genericvalidation.ValidateClusterName - -// ValidateClassName can be used to check whether the given class name is valid. -// It is defined here to avoid import cycle between pkg/apis/storage/validation -// (where it should be) and this file. -var ValidateClassName = NameIsDNSSubdomain - -// ValidatePiorityClassName can be used to check whether the given priority -// class name is valid. -var ValidatePriorityClassName = NameIsDNSSubdomain - -// TODO update all references to these functions to point to the genericvalidation ones -// NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain. -func NameIsDNSSubdomain(name string, prefix bool) []string { - return apimachineryvalidation.NameIsDNSSubdomain(name, prefix) -} - -// NameIsDNSLabel is a ValidateNameFunc for names that must be a DNS 1123 label. -func NameIsDNSLabel(name string, prefix bool) []string { - return apimachineryvalidation.NameIsDNSLabel(name, prefix) -} - -// NameIsDNS1035Label is a ValidateNameFunc for names that must be a DNS 952 label. -func NameIsDNS1035Label(name string, prefix bool) []string { - return apimachineryvalidation.NameIsDNS1035Label(name, prefix) -} - -// Validates that given value is not negative. -func ValidateNonnegativeField(value int64, fldPath *field.Path) field.ErrorList { - return apimachineryvalidation.ValidateNonnegativeField(value, fldPath) -} - -// Validates that a Quantity is not negative -func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if value.Cmp(resource.Quantity{}) < 0 { - allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNegativeErrorMsg)) - } - return allErrs -} - -func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList { - return genericvalidation.ValidateImmutableField(newVal, oldVal, fldPath) -} - -func ValidateImmutableAnnotation(newVal string, oldVal string, annotation string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if oldVal != newVal { - allErrs = append(allErrs, field.Invalid(fldPath.Child("annotations", annotation), newVal, fieldImmutableErrorMsg)) - } - return allErrs -} - -// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already -// been performed. -// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. -// TODO: Remove calls to this method scattered in validations of specific resources, e.g., ValidatePodUpdate. -func ValidateObjectMeta(meta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { - allErrs := genericvalidation.ValidateObjectMeta(meta, requiresNamespace, apimachineryvalidation.ValidateNameFunc(nameFn), fldPath) - // run additional checks for the finalizer name - for i := range meta.Finalizers { - allErrs = append(allErrs, validateKubeFinalizerName(string(meta.Finalizers[i]), fldPath.Child("finalizers").Index(i))...) - } - return allErrs -} - -// ValidateObjectMetaUpdate validates an object's metadata when updated -func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList { - allErrs := genericvalidation.ValidateObjectMetaUpdate(newMeta, oldMeta, fldPath) - // run additional checks for the finalizer name - for i := range newMeta.Finalizers { - allErrs = append(allErrs, validateKubeFinalizerName(string(newMeta.Finalizers[i]), fldPath.Child("finalizers").Index(i))...) - } - - return allErrs -} - -func ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList { - return genericvalidation.ValidateNoNewFinalizers(newFinalizers, oldFinalizers, fldPath) -} - -func ValidateVolumes(volumes []api.Volume, fldPath *field.Path) (sets.String, field.ErrorList) { - allErrs := field.ErrorList{} - - allNames := sets.String{} - for i, vol := range volumes { - idxPath := fldPath.Index(i) - namePath := idxPath.Child("name") - el := validateVolumeSource(&vol.VolumeSource, idxPath, vol.Name) - if len(vol.Name) == 0 { - el = append(el, field.Required(namePath, "")) - } else { - el = append(el, ValidateDNS1123Label(vol.Name, namePath)...) - } - if allNames.Has(vol.Name) { - el = append(el, field.Duplicate(namePath, vol.Name)) - } - if len(el) == 0 { - allNames.Insert(vol.Name) - } else { - allErrs = append(allErrs, el...) - } - - } - return allNames, allErrs -} - -func validateVolumeSource(source *api.VolumeSource, fldPath *field.Path, volName string) field.ErrorList { - numVolumes := 0 - allErrs := field.ErrorList{} - if source.EmptyDir != nil { - numVolumes++ - if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { - if source.EmptyDir.SizeLimit != nil && source.EmptyDir.SizeLimit.Cmp(resource.Quantity{}) != 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("sizeLimit"), "SizeLimit field disabled by feature-gate for EmptyDir volumes")) - } - } else { - if source.EmptyDir.SizeLimit != nil && source.EmptyDir.SizeLimit.Cmp(resource.Quantity{}) < 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("sizeLimit"), "SizeLimit field must be a valid resource quantity")) - } - } - if !utilfeature.DefaultFeatureGate.Enabled(features.HugePages) && source.EmptyDir.Medium == api.StorageMediumHugePages { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("medium"), "HugePages medium is disabled by feature-gate for EmptyDir volumes")) - } - } - if source.HostPath != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostPath"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateHostPathVolumeSource(source.HostPath, fldPath.Child("hostPath"))...) - } - } - if source.GitRepo != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("gitRepo"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateGitRepoVolumeSource(source.GitRepo, fldPath.Child("gitRepo"))...) - } - } - if source.GCEPersistentDisk != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(source.GCEPersistentDisk, fldPath.Child("persistentDisk"))...) - } - } - if source.AWSElasticBlockStore != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(source.AWSElasticBlockStore, fldPath.Child("awsElasticBlockStore"))...) - } - } - if source.Secret != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("secret"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateSecretVolumeSource(source.Secret, fldPath.Child("secret"))...) - } - } - if source.NFS != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("nfs"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateNFSVolumeSource(source.NFS, fldPath.Child("nfs"))...) - } - } - if source.ISCSI != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("iscsi"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateISCSIVolumeSource(source.ISCSI, fldPath.Child("iscsi"))...) - } - if source.ISCSI.InitiatorName != nil && len(volName+":"+source.ISCSI.TargetPortal) > 64 { - tooLongErr := "Total length of : must be under 64 characters if iscsi.initiatorName is specified." - allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), volName, tooLongErr)) - } - } - if source.Glusterfs != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("glusterfs"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateGlusterfsVolumeSource(source.Glusterfs, fldPath.Child("glusterfs"))...) - } - } - if source.Flocker != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("flocker"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateFlockerVolumeSource(source.Flocker, fldPath.Child("flocker"))...) - } - } - if source.PersistentVolumeClaim != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("persistentVolumeClaim"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validatePersistentClaimVolumeSource(source.PersistentVolumeClaim, fldPath.Child("persistentVolumeClaim"))...) - } - } - if source.RBD != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("rbd"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateRBDVolumeSource(source.RBD, fldPath.Child("rbd"))...) - } - } - if source.Cinder != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("cinder"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateCinderVolumeSource(source.Cinder, fldPath.Child("cinder"))...) - } - } - if source.CephFS != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("cephFS"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateCephFSVolumeSource(source.CephFS, fldPath.Child("cephfs"))...) - } - } - if source.Quobyte != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("quobyte"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateQuobyteVolumeSource(source.Quobyte, fldPath.Child("quobyte"))...) - } - } - if source.DownwardAPI != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("downwarAPI"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateDownwardAPIVolumeSource(source.DownwardAPI, fldPath.Child("downwardAPI"))...) - } - } - if source.FC != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("fc"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateFCVolumeSource(source.FC, fldPath.Child("fc"))...) - } - } - if source.FlexVolume != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("flexVolume"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateFlexVolumeSource(source.FlexVolume, fldPath.Child("flexVolume"))...) - } - } - if source.ConfigMap != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("configMap"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateConfigMapVolumeSource(source.ConfigMap, fldPath.Child("configMap"))...) - } - } - - if source.AzureFile != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("azureFile"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateAzureFile(source.AzureFile, fldPath.Child("azureFile"))...) - } - } - - if source.VsphereVolume != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("vsphereVolume"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateVsphereVolumeSource(source.VsphereVolume, fldPath.Child("vsphereVolume"))...) - } - } - if source.PhotonPersistentDisk != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("photonPersistentDisk"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validatePhotonPersistentDiskVolumeSource(source.PhotonPersistentDisk, fldPath.Child("photonPersistentDisk"))...) - } - } - if source.PortworxVolume != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("portworxVolume"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validatePortworxVolumeSource(source.PortworxVolume, fldPath.Child("portworxVolume"))...) - } - } - if source.AzureDisk != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("azureDisk"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateAzureDisk(source.AzureDisk, fldPath.Child("azureDisk"))...) - } - } - if source.StorageOS != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("storageos"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateStorageOSVolumeSource(source.StorageOS, fldPath.Child("storageos"))...) - } - } - if source.Projected != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("projected"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateProjectedVolumeSource(source.Projected, fldPath.Child("projected"))...) - } - } - if source.ScaleIO != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("scaleIO"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateScaleIOVolumeSource(source.ScaleIO, fldPath.Child("scaleIO"))...) - } - } - - if numVolumes == 0 { - allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type")) - } - - return allErrs -} - -func validateHostPathVolumeSource(hostPath *api.HostPathVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(hostPath.Path) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) - return allErrs - } - - allErrs = append(allErrs, validatePathNoBacksteps(hostPath.Path, fldPath.Child("path"))...) - allErrs = append(allErrs, validateHostPathType(hostPath.Type, fldPath.Child("type"))...) - return allErrs -} - -func validateGitRepoVolumeSource(gitRepo *api.GitRepoVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(gitRepo.Repository) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("repository"), "")) - } - - pathErrs := validateLocalDescendingPath(gitRepo.Directory, fldPath.Child("directory")) - allErrs = append(allErrs, pathErrs...) - return allErrs -} - -func validateISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(iscsi.TargetPortal) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("targetPortal"), "")) - } - if len(iscsi.IQN) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("iqn"), "")) - } else { - if !strings.HasPrefix(iscsi.IQN, "iqn") && !strings.HasPrefix(iscsi.IQN, "eui") && !strings.HasPrefix(iscsi.IQN, "naa") { - allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) - } else if strings.HasPrefix(iscsi.IQN, "iqn") && !iscsiInitiatorIqnRegex.MatchString(iscsi.IQN) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) - } else if strings.HasPrefix(iscsi.IQN, "eui") && !iscsiInitiatorEuiRegex.MatchString(iscsi.IQN) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) - } else if strings.HasPrefix(iscsi.IQN, "naa") && !iscsiInitiatorNaaRegex.MatchString(iscsi.IQN) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) - } - } - if iscsi.Lun < 0 || iscsi.Lun > 255 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), iscsi.Lun, validation.InclusiveRangeError(0, 255))) - } - if (iscsi.DiscoveryCHAPAuth || iscsi.SessionCHAPAuth) && iscsi.SecretRef == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("secretRef"), "")) - } - if iscsi.InitiatorName != nil { - initiator := *iscsi.InitiatorName - if !strings.HasPrefix(initiator, "iqn") && !strings.HasPrefix(initiator, "eui") && !strings.HasPrefix(initiator, "naa") { - allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) - } - if strings.HasPrefix(initiator, "iqn") && !iscsiInitiatorIqnRegex.MatchString(initiator) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) - } else if strings.HasPrefix(initiator, "eui") && !iscsiInitiatorEuiRegex.MatchString(initiator) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) - } else if strings.HasPrefix(initiator, "naa") && !iscsiInitiatorNaaRegex.MatchString(initiator) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) - } - } - return allErrs -} - -func validateFCVolumeSource(fc *api.FCVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(fc.TargetWWNs) < 1 && len(fc.WWIDs) < 1 { - allErrs = append(allErrs, field.Required(fldPath.Child("targetWWNs"), "must specify either targetWWNs or wwids, but not both")) - } - - if len(fc.TargetWWNs) != 0 && len(fc.WWIDs) != 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("targetWWNs"), fc.TargetWWNs, "targetWWNs and wwids can not be specified simultaneously")) - } - - if len(fc.TargetWWNs) != 0 { - if fc.Lun == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("lun"), "lun is required if targetWWNs is specified")) - } else { - if *fc.Lun < 0 || *fc.Lun > 255 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), fc.Lun, validation.InclusiveRangeError(0, 255))) - } - } - } - return allErrs -} - -func validateGCEPersistentDiskVolumeSource(pd *api.GCEPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(pd.PDName) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("pdName"), "")) - } - if pd.Partition < 0 || pd.Partition > 255 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), pd.Partition, pdPartitionErrorMsg)) - } - return allErrs -} - -func validateAWSElasticBlockStoreVolumeSource(PD *api.AWSElasticBlockStoreVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(PD.VolumeID) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) - } - if PD.Partition < 0 || PD.Partition > 255 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), PD.Partition, pdPartitionErrorMsg)) - } - return allErrs -} - -func validateSecretVolumeSource(secretSource *api.SecretVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(secretSource.SecretName) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) - } - - secretMode := secretSource.DefaultMode - if secretMode != nil && (*secretMode > 0777 || *secretMode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *secretMode, volumeModeErrorMsg)) - } - - itemsPath := fldPath.Child("items") - for i, kp := range secretSource.Items { - itemPath := itemsPath.Index(i) - allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...) - } - return allErrs -} - -func validateConfigMapVolumeSource(configMapSource *api.ConfigMapVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(configMapSource.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } - - configMapMode := configMapSource.DefaultMode - if configMapMode != nil && (*configMapMode > 0777 || *configMapMode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *configMapMode, volumeModeErrorMsg)) - } - - itemsPath := fldPath.Child("items") - for i, kp := range configMapSource.Items { - itemPath := itemsPath.Index(i) - allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...) - } - return allErrs -} - -func validateKeyToPath(kp *api.KeyToPath, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(kp.Key) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("key"), "")) - } - if len(kp.Path) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) - } - allErrs = append(allErrs, validateLocalNonReservedPath(kp.Path, fldPath.Child("path"))...) - if kp.Mode != nil && (*kp.Mode > 0777 || *kp.Mode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *kp.Mode, volumeModeErrorMsg)) - } - - return allErrs -} - -func validatePersistentClaimVolumeSource(claim *api.PersistentVolumeClaimVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(claim.ClaimName) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("claimName"), "")) - } - return allErrs -} - -func validateNFSVolumeSource(nfs *api.NFSVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(nfs.Server) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("server"), "")) - } - if len(nfs.Path) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) - } - if !path.IsAbs(nfs.Path) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("path"), nfs.Path, "must be an absolute path")) - } - return allErrs -} - -func validateQuobyteVolumeSource(quobyte *api.QuobyteVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(quobyte.Registry) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("registry"), "must be a host:port pair or multiple pairs separated by commas")) - } else { - for _, hostPortPair := range strings.Split(quobyte.Registry, ",") { - if _, _, err := net.SplitHostPort(hostPortPair); err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("registry"), quobyte.Registry, "must be a host:port pair or multiple pairs separated by commas")) - } - } - } - - if len(quobyte.Volume) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("volume"), "")) - } - return allErrs -} - -func validateGlusterfsVolumeSource(glusterfs *api.GlusterfsVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(glusterfs.EndpointsName) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("endpoints"), "")) - } - if len(glusterfs.Path) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) - } - return allErrs -} - -func validateFlockerVolumeSource(flocker *api.FlockerVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(flocker.DatasetName) == 0 && len(flocker.DatasetUUID) == 0 { - //TODO: consider adding a RequiredOneOf() error for this and similar cases - allErrs = append(allErrs, field.Required(fldPath, "one of datasetName and datasetUUID is required")) - } - if len(flocker.DatasetName) != 0 && len(flocker.DatasetUUID) != 0 { - allErrs = append(allErrs, field.Invalid(fldPath, "resource", "datasetName and datasetUUID can not be specified simultaneously")) - } - if strings.Contains(flocker.DatasetName, "/") { - allErrs = append(allErrs, field.Invalid(fldPath.Child("datasetName"), flocker.DatasetName, "must not contain '/'")) - } - return allErrs -} - -var validDownwardAPIFieldPathExpressions = sets.NewString( - "metadata.name", - "metadata.namespace", - "metadata.labels", - "metadata.annotations", - "metadata.uid") - -func validateDownwardAPIVolumeFile(file *api.DownwardAPIVolumeFile, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if len(file.Path) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) - } - allErrs = append(allErrs, validateLocalNonReservedPath(file.Path, fldPath.Child("path"))...) - if file.FieldRef != nil { - allErrs = append(allErrs, validateObjectFieldSelector(file.FieldRef, &validDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...) - if file.ResourceFieldRef != nil { - allErrs = append(allErrs, field.Invalid(fldPath, "resource", "fieldRef and resourceFieldRef can not be specified simultaneously")) - } - } else if file.ResourceFieldRef != nil { - allErrs = append(allErrs, validateContainerResourceFieldSelector(file.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), true)...) - } else { - allErrs = append(allErrs, field.Required(fldPath, "one of fieldRef and resourceFieldRef is required")) - } - if file.Mode != nil && (*file.Mode > 0777 || *file.Mode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *file.Mode, volumeModeErrorMsg)) - } - - return allErrs -} - -func validateDownwardAPIVolumeSource(downwardAPIVolume *api.DownwardAPIVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - downwardAPIMode := downwardAPIVolume.DefaultMode - if downwardAPIMode != nil && (*downwardAPIMode > 0777 || *downwardAPIMode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *downwardAPIMode, volumeModeErrorMsg)) - } - - for _, file := range downwardAPIVolume.Items { - allErrs = append(allErrs, validateDownwardAPIVolumeFile(&file, fldPath)...) - } - return allErrs -} - -func validateProjectionSources(projection *api.ProjectedVolumeSource, projectionMode *int32, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allPaths := sets.String{} - - for _, source := range projection.Sources { - numSources := 0 - if source.Secret != nil { - if numSources > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("secret"), "may not specify more than 1 volume type")) - } else { - numSources++ - if len(source.Secret.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } - itemsPath := fldPath.Child("items") - for i, kp := range source.Secret.Items { - itemPath := itemsPath.Index(i) - allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...) - if len(kp.Path) > 0 { - curPath := kp.Path - if !allPaths.Has(curPath) { - allPaths.Insert(curPath) - } else { - allErrs = append(allErrs, field.Invalid(fldPath, source.Secret.Name, "conflicting duplicate paths")) - } - } - } - } - } - if source.ConfigMap != nil { - if numSources > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("configMap"), "may not specify more than 1 volume type")) - } else { - numSources++ - if len(source.ConfigMap.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } - itemsPath := fldPath.Child("items") - for i, kp := range source.ConfigMap.Items { - itemPath := itemsPath.Index(i) - allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...) - if len(kp.Path) > 0 { - curPath := kp.Path - if !allPaths.Has(curPath) { - allPaths.Insert(curPath) - } else { - allErrs = append(allErrs, field.Invalid(fldPath, source.ConfigMap.Name, "conflicting duplicate paths")) - } - - } - } - } - } - if source.DownwardAPI != nil { - if numSources > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("downwardAPI"), "may not specify more than 1 volume type")) - } else { - numSources++ - for _, file := range source.DownwardAPI.Items { - allErrs = append(allErrs, validateDownwardAPIVolumeFile(&file, fldPath.Child("downwardAPI"))...) - if len(file.Path) > 0 { - curPath := file.Path - if !allPaths.Has(curPath) { - allPaths.Insert(curPath) - } else { - allErrs = append(allErrs, field.Invalid(fldPath, curPath, "conflicting duplicate paths")) - } - - } - } - } - } - } - return allErrs -} - -func validateProjectedVolumeSource(projection *api.ProjectedVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - projectionMode := projection.DefaultMode - if projectionMode != nil && (*projectionMode > 0777 || *projectionMode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *projectionMode, volumeModeErrorMsg)) - } - - allErrs = append(allErrs, validateProjectionSources(projection, projectionMode, fldPath)...) - return allErrs -} - -var supportedHostPathTypes = sets.NewString( - string(api.HostPathUnset), - string(api.HostPathDirectoryOrCreate), - string(api.HostPathDirectory), - string(api.HostPathFileOrCreate), - string(api.HostPathFile), - string(api.HostPathSocket), - string(api.HostPathCharDev), - string(api.HostPathBlockDev)) - -func validateHostPathType(hostPathType *api.HostPathType, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if hostPathType != nil && !supportedHostPathTypes.Has(string(*hostPathType)) { - allErrs = append(allErrs, field.NotSupported(fldPath, hostPathType, supportedHostPathTypes.List())) - } - - return allErrs -} - -// This validate will make sure targetPath: -// 1. is not abs path -// 2. does not have any element which is ".." -func validateLocalDescendingPath(targetPath string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if path.IsAbs(targetPath) { - allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must be a relative path")) - } - - allErrs = append(allErrs, validatePathNoBacksteps(targetPath, fldPath)...) - - return allErrs -} - -// validatePathNoBacksteps makes sure the targetPath does not have any `..` path elements when split -// -// This assumes the OS of the apiserver and the nodes are the same. The same check should be done -// on the node to ensure there are no backsteps. -func validatePathNoBacksteps(targetPath string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - parts := strings.Split(filepath.ToSlash(targetPath), "/") - for _, item := range parts { - if item == ".." { - allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not contain '..'")) - break // even for `../../..`, one error is sufficient to make the point - } - } - return allErrs -} - -// validateMountPropagation verifies that MountPropagation field is valid and -// allowed for given container. -func validateMountPropagation(mountPropagation *api.MountPropagationMode, container *api.Container, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if mountPropagation == nil { - return allErrs - } - if !utilfeature.DefaultFeatureGate.Enabled(features.MountPropagation) { - allErrs = append(allErrs, field.Forbidden(fldPath, "mount propagation is disabled by feature-gate")) - return allErrs - } - - supportedMountPropagations := sets.NewString(string(api.MountPropagationBidirectional), string(api.MountPropagationHostToContainer)) - if !supportedMountPropagations.Has(string(*mountPropagation)) { - allErrs = append(allErrs, field.NotSupported(fldPath, *mountPropagation, supportedMountPropagations.List())) - } - - if container == nil { - // The container is not available yet, e.g. during validation of - // PodPreset. Stop validation now, Pod validation will refuse final - // Pods with Bidirectional propagation in non-privileged containers. - return allErrs - } - - privileged := container.SecurityContext != nil && container.SecurityContext.Privileged != nil && *container.SecurityContext.Privileged - if *mountPropagation == api.MountPropagationBidirectional && !privileged { - allErrs = append(allErrs, field.Forbidden(fldPath, "Bidirectional mount propagation is available only to privileged containers")) - } - return allErrs -} - -// This validate will make sure targetPath: -// 1. is not abs path -// 2. does not contain any '..' elements -// 3. does not start with '..' -func validateLocalNonReservedPath(targetPath string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, validateLocalDescendingPath(targetPath, fldPath)...) - // Don't report this error if the check for .. elements already caught it. - if strings.HasPrefix(targetPath, "..") && !strings.HasPrefix(targetPath, "../") { - allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not start with '..'")) - } - return allErrs -} - -func validateRBDVolumeSource(rbd *api.RBDVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(rbd.CephMonitors) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) - } - if len(rbd.RBDImage) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("image"), "")) - } - return allErrs -} - -func validateCinderVolumeSource(cd *api.CinderVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(cd.VolumeID) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) - } - return allErrs -} - -func validateCephFSVolumeSource(cephfs *api.CephFSVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(cephfs.Monitors) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) - } - return allErrs -} - -func validateCephFSPersistentVolumeSource(cephfs *api.CephFSPersistentVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(cephfs.Monitors) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) - } - return allErrs -} - -func validateFlexVolumeSource(fv *api.FlexVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(fv.Driver) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("driver"), "")) - } - - // Make sure user-specified options don't use kubernetes namespaces - for k := range fv.Options { - namespace := k - if parts := strings.SplitN(k, "/", 2); len(parts) == 2 { - namespace = parts[0] - } - normalized := "." + strings.ToLower(namespace) - if strings.HasSuffix(normalized, ".kubernetes.io") || strings.HasSuffix(normalized, ".k8s.io") { - allErrs = append(allErrs, field.Invalid(fldPath.Child("options").Key(k), k, "kubernetes.io and k8s.io namespaces are reserved")) - } - } - - return allErrs -} - -func validateAzureFile(azure *api.AzureFileVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if azure.SecretName == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) - } - if azure.ShareName == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("shareName"), "")) - } - return allErrs -} - -func validateAzureFilePV(azure *api.AzureFilePersistentVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if azure.SecretName == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) - } - if azure.ShareName == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("shareName"), "")) - } - if azure.SecretNamespace != nil { - if len(*azure.SecretNamespace) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("secretNamespace"), "")) - } - } - return allErrs -} - -func validateAzureDisk(azure *api.AzureDiskVolumeSource, fldPath *field.Path) field.ErrorList { - var supportedCachingModes = sets.NewString(string(api.AzureDataDiskCachingNone), string(api.AzureDataDiskCachingReadOnly), string(api.AzureDataDiskCachingReadWrite)) - var supportedDiskKinds = sets.NewString(string(api.AzureSharedBlobDisk), string(api.AzureDedicatedBlobDisk), string(api.AzureManagedDisk)) - - diskUriSupportedManaged := []string{"/subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}"} - diskUriSupportedblob := []string{"https://{account-name}.blob.core.windows.net/{container-name}/{disk-name}.vhd"} - - allErrs := field.ErrorList{} - if azure.DiskName == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("diskName"), "")) - } - - if azure.DataDiskURI == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("diskURI"), "")) - } - - if azure.CachingMode != nil && !supportedCachingModes.Has(string(*azure.CachingMode)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("cachingMode"), *azure.CachingMode, supportedCachingModes.List())) - } - - if azure.Kind != nil && !supportedDiskKinds.Has(string(*azure.Kind)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("kind"), *azure.Kind, supportedDiskKinds.List())) - } - - // validate that DiskUri is the correct format - if azure.Kind != nil && *azure.Kind == api.AzureManagedDisk && strings.Index(azure.DataDiskURI, "/subscriptions/") != 0 { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("diskURI"), azure.DataDiskURI, diskUriSupportedManaged)) - } - - if azure.Kind != nil && *azure.Kind != api.AzureManagedDisk && strings.Index(azure.DataDiskURI, "https://") != 0 { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("diskURI"), azure.DataDiskURI, diskUriSupportedblob)) - } - - return allErrs -} - -func validateVsphereVolumeSource(cd *api.VsphereVirtualDiskVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(cd.VolumePath) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("volumePath"), "")) - } - return allErrs -} - -func validatePhotonPersistentDiskVolumeSource(cd *api.PhotonPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(cd.PdID) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("pdID"), "")) - } - return allErrs -} - -func validatePortworxVolumeSource(pwx *api.PortworxVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(pwx.VolumeID) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) - } - return allErrs -} - -func validateScaleIOVolumeSource(sio *api.ScaleIOVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if sio.Gateway == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("gateway"), "")) - } - if sio.System == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("system"), "")) - } - if sio.VolumeName == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), "")) - } - return allErrs -} - -func validateLocalVolumeSource(ls *api.LocalVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if ls.Path == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) - return allErrs - } - - allErrs = append(allErrs, validatePathNoBacksteps(ls.Path, fldPath.Child("path"))...) - return allErrs -} - -func validateStorageOSVolumeSource(storageos *api.StorageOSVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(storageos.VolumeName) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), "")) - } else { - allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeName, fldPath.Child("volumeName"))...) - } - if len(storageos.VolumeNamespace) > 0 { - allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeNamespace, fldPath.Child("volumeNamespace"))...) - } - if storageos.SecretRef != nil { - if len(storageos.SecretRef.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), "")) - } - } - return allErrs -} - -func validateStorageOSPersistentVolumeSource(storageos *api.StorageOSPersistentVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(storageos.VolumeName) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), "")) - } else { - allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeName, fldPath.Child("volumeName"))...) - } - if len(storageos.VolumeNamespace) > 0 { - allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeNamespace, fldPath.Child("volumeNamespace"))...) - } - if storageos.SecretRef != nil { - if len(storageos.SecretRef.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), "")) - } - if len(storageos.SecretRef.Namespace) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "namespace"), "")) - } - } - return allErrs -} - -// ValidatePersistentVolumeName checks that a name is appropriate for a -// PersistentVolumeName object. -var ValidatePersistentVolumeName = NameIsDNSSubdomain - -var supportedAccessModes = sets.NewString(string(api.ReadWriteOnce), string(api.ReadOnlyMany), string(api.ReadWriteMany)) - -var supportedReclaimPolicy = sets.NewString(string(api.PersistentVolumeReclaimDelete), string(api.PersistentVolumeReclaimRecycle), string(api.PersistentVolumeReclaimRetain)) - -func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList { - metaPath := field.NewPath("metadata") - allErrs := ValidateObjectMeta(&pv.ObjectMeta, false, ValidatePersistentVolumeName, metaPath) - - specPath := field.NewPath("spec") - if len(pv.Spec.AccessModes) == 0 { - allErrs = append(allErrs, field.Required(specPath.Child("accessModes"), "")) - } - for _, mode := range pv.Spec.AccessModes { - if !supportedAccessModes.Has(string(mode)) { - allErrs = append(allErrs, field.NotSupported(specPath.Child("accessModes"), mode, supportedAccessModes.List())) - } - } - - if len(pv.Spec.Capacity) == 0 { - allErrs = append(allErrs, field.Required(specPath.Child("capacity"), "")) - } - - if _, ok := pv.Spec.Capacity[api.ResourceStorage]; !ok || len(pv.Spec.Capacity) > 1 { - allErrs = append(allErrs, field.NotSupported(specPath.Child("capacity"), pv.Spec.Capacity, []string{string(api.ResourceStorage)})) - } - capPath := specPath.Child("capacity") - for r, qty := range pv.Spec.Capacity { - allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...) - } - if len(string(pv.Spec.PersistentVolumeReclaimPolicy)) > 0 { - if !supportedReclaimPolicy.Has(string(pv.Spec.PersistentVolumeReclaimPolicy)) { - allErrs = append(allErrs, field.NotSupported(specPath.Child("persistentVolumeReclaimPolicy"), pv.Spec.PersistentVolumeReclaimPolicy, supportedReclaimPolicy.List())) - } - } - - nodeAffinitySpecified, errs := validateStorageNodeAffinityAnnotation(pv.ObjectMeta.Annotations, metaPath.Child("annotations")) - allErrs = append(allErrs, errs...) - - numVolumes := 0 - if pv.Spec.HostPath != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("hostPath"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateHostPathVolumeSource(pv.Spec.HostPath, specPath.Child("hostPath"))...) - } - } - if pv.Spec.GCEPersistentDisk != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(pv.Spec.GCEPersistentDisk, specPath.Child("persistentDisk"))...) - } - } - if pv.Spec.AWSElasticBlockStore != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(pv.Spec.AWSElasticBlockStore, specPath.Child("awsElasticBlockStore"))...) - } - } - if pv.Spec.Glusterfs != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("glusterfs"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateGlusterfsVolumeSource(pv.Spec.Glusterfs, specPath.Child("glusterfs"))...) - } - } - if pv.Spec.Flocker != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("flocker"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateFlockerVolumeSource(pv.Spec.Flocker, specPath.Child("flocker"))...) - } - } - if pv.Spec.NFS != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("nfs"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateNFSVolumeSource(pv.Spec.NFS, specPath.Child("nfs"))...) - } - } - if pv.Spec.RBD != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("rbd"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateRBDVolumeSource(pv.Spec.RBD, specPath.Child("rbd"))...) - } - } - if pv.Spec.Quobyte != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("quobyte"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateQuobyteVolumeSource(pv.Spec.Quobyte, specPath.Child("quobyte"))...) - } - } - if pv.Spec.CephFS != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("cephFS"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateCephFSPersistentVolumeSource(pv.Spec.CephFS, specPath.Child("cephfs"))...) - } - } - if pv.Spec.ISCSI != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("iscsi"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateISCSIVolumeSource(pv.Spec.ISCSI, specPath.Child("iscsi"))...) - } - if pv.Spec.ISCSI.InitiatorName != nil && len(pv.ObjectMeta.Name+":"+pv.Spec.ISCSI.TargetPortal) > 64 { - tooLongErr := "Total length of : must be under 64 characters if iscsi.initiatorName is specified." - allErrs = append(allErrs, field.Invalid(metaPath.Child("name"), pv.ObjectMeta.Name, tooLongErr)) - } - } - if pv.Spec.Cinder != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("cinder"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateCinderVolumeSource(pv.Spec.Cinder, specPath.Child("cinder"))...) - } - } - if pv.Spec.FC != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("fc"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateFCVolumeSource(pv.Spec.FC, specPath.Child("fc"))...) - } - } - if pv.Spec.FlexVolume != nil { - numVolumes++ - allErrs = append(allErrs, validateFlexVolumeSource(pv.Spec.FlexVolume, specPath.Child("flexVolume"))...) - } - if pv.Spec.AzureFile != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("azureFile"), "may not specify more than 1 volume type")) - - } else { - numVolumes++ - allErrs = append(allErrs, validateAzureFilePV(pv.Spec.AzureFile, specPath.Child("azureFile"))...) - } - } - - if pv.Spec.VsphereVolume != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("vsphereVolume"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateVsphereVolumeSource(pv.Spec.VsphereVolume, specPath.Child("vsphereVolume"))...) - } - } - if pv.Spec.PhotonPersistentDisk != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("photonPersistentDisk"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validatePhotonPersistentDiskVolumeSource(pv.Spec.PhotonPersistentDisk, specPath.Child("photonPersistentDisk"))...) - } - } - if pv.Spec.PortworxVolume != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("portworxVolume"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validatePortworxVolumeSource(pv.Spec.PortworxVolume, specPath.Child("portworxVolume"))...) - } - } - if pv.Spec.AzureDisk != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("azureDisk"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateAzureDisk(pv.Spec.AzureDisk, specPath.Child("azureDisk"))...) - } - } - if pv.Spec.ScaleIO != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("scaleIO"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateScaleIOVolumeSource(pv.Spec.ScaleIO, specPath.Child("scaleIO"))...) - } - } - if pv.Spec.Local != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("local"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - if !utilfeature.DefaultFeatureGate.Enabled(features.PersistentLocalVolumes) { - allErrs = append(allErrs, field.Forbidden(specPath.Child("local"), "Local volumes are disabled by feature-gate")) - } - allErrs = append(allErrs, validateLocalVolumeSource(pv.Spec.Local, specPath.Child("local"))...) - - // NodeAffinity is required - if !nodeAffinitySpecified { - allErrs = append(allErrs, field.Required(metaPath.Child("annotations"), "Local volume requires node affinity")) - } - } - } - if pv.Spec.StorageOS != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("storageos"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateStorageOSPersistentVolumeSource(pv.Spec.StorageOS, specPath.Child("storageos"))...) - } - } - - if numVolumes == 0 { - allErrs = append(allErrs, field.Required(specPath, "must specify a volume type")) - } - - // do not allow hostPath mounts of '/' to have a 'recycle' reclaim policy - if pv.Spec.HostPath != nil && path.Clean(pv.Spec.HostPath.Path) == "/" && pv.Spec.PersistentVolumeReclaimPolicy == api.PersistentVolumeReclaimRecycle { - allErrs = append(allErrs, field.Forbidden(specPath.Child("persistentVolumeReclaimPolicy"), "may not be 'recycle' for a hostPath mount of '/'")) - } - - if len(pv.Spec.StorageClassName) > 0 { - for _, msg := range ValidateClassName(pv.Spec.StorageClassName, false) { - allErrs = append(allErrs, field.Invalid(specPath.Child("storageClassName"), pv.Spec.StorageClassName, msg)) - } - } - - return allErrs -} - -// ValidatePersistentVolumeUpdate tests to see if the update is legal for an end user to make. -// newPv is updated with fields that cannot be changed. -func ValidatePersistentVolumeUpdate(newPv, oldPv *api.PersistentVolume) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = ValidatePersistentVolume(newPv) - newPv.Status = oldPv.Status - return allErrs -} - -// ValidatePersistentVolumeStatusUpdate tests to see if the status update is legal for an end user to make. -// newPv is updated with fields that cannot be changed. -func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *api.PersistentVolume) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newPv.ObjectMeta, &oldPv.ObjectMeta, field.NewPath("metadata")) - if len(newPv.ResourceVersion) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) - } - newPv.Spec = oldPv.Spec - return allErrs -} - -// ValidatePersistentVolumeClaim validates a PersistentVolumeClaim -func ValidatePersistentVolumeClaim(pvc *api.PersistentVolumeClaim) field.ErrorList { - allErrs := ValidateObjectMeta(&pvc.ObjectMeta, true, ValidatePersistentVolumeName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidatePersistentVolumeClaimSpec(&pvc.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidatePersistentVolumeClaimSpec validates a PersistentVolumeClaimSpec -func ValidatePersistentVolumeClaimSpec(spec *api.PersistentVolumeClaimSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(spec.AccessModes) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("accessModes"), "at least 1 access mode is required")) - } - if spec.Selector != nil { - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) - } - for _, mode := range spec.AccessModes { - if mode != api.ReadWriteOnce && mode != api.ReadOnlyMany && mode != api.ReadWriteMany { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, supportedAccessModes.List())) - } - } - storageValue, ok := spec.Resources.Requests[api.ResourceStorage] - if !ok { - allErrs = append(allErrs, field.Required(fldPath.Child("resources").Key(string(api.ResourceStorage)), "")) - } else { - allErrs = append(allErrs, ValidateResourceQuantityValue(string(api.ResourceStorage), storageValue, fldPath.Child("resources").Key(string(api.ResourceStorage)))...) - } - - if spec.StorageClassName != nil && len(*spec.StorageClassName) > 0 { - for _, msg := range ValidateClassName(*spec.StorageClassName, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("storageClassName"), *spec.StorageClassName, msg)) - } - } - return allErrs -} - -// ValidatePersistentVolumeClaimUpdate validates an update to a PersistentVolumeClaim -func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *api.PersistentVolumeClaim) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidatePersistentVolumeClaim(newPvc)...) - // PVController needs to update PVC.Spec w/ VolumeName. - // Claims are immutable in order to enforce quota, range limits, etc. without gaming the system. - if len(oldPvc.Spec.VolumeName) == 0 { - // volumeName changes are allowed once. - // Reset back to empty string after equality check - oldPvc.Spec.VolumeName = newPvc.Spec.VolumeName - defer func() { oldPvc.Spec.VolumeName = "" }() - } - - if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) { - newPVCSpecCopy := newPvc.Spec.DeepCopy() - - // lets make sure storage values are same. - if newPvc.Status.Phase == api.ClaimBound && newPVCSpecCopy.Resources.Requests != nil { - newPVCSpecCopy.Resources.Requests["storage"] = oldPvc.Spec.Resources.Requests["storage"] - } - - oldSize := oldPvc.Spec.Resources.Requests["storage"] - newSize := newPvc.Spec.Resources.Requests["storage"] - - if !apiequality.Semantic.DeepEqual(*newPVCSpecCopy, oldPvc.Spec) { - allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "is immutable after creation except resources.requests for bound claims")) - } - if newSize.Cmp(oldSize) < 0 { - allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "resources", "requests", "storage"), "field can not be less than previous value")) - } - - } else { - // changes to Spec are not allowed, but updates to label/and some annotations are OK. - // no-op updates pass validation. - if !apiequality.Semantic.DeepEqual(newPvc.Spec, oldPvc.Spec) { - allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "field is immutable after creation")) - } - } - - // storageclass annotation should be immutable after creation - // TODO: remove Beta when no longer needed - allErrs = append(allErrs, ValidateImmutableAnnotation(newPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], oldPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], v1.BetaStorageClassAnnotation, field.NewPath("metadata"))...) - - newPvc.Status = oldPvc.Status - return allErrs -} - -// ValidatePersistentVolumeClaimStatusUpdate validates an update to status of a PersistentVolumeClaim -func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *api.PersistentVolumeClaim) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata")) - if len(newPvc.ResourceVersion) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) - } - if len(newPvc.Spec.AccessModes) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("Spec", "accessModes"), "")) - } - if !utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) && len(newPvc.Status.Conditions) > 0 { - conditionPath := field.NewPath("status", "conditions") - allErrs = append(allErrs, field.Forbidden(conditionPath, "invalid field")) - } - capPath := field.NewPath("status", "capacity") - for r, qty := range newPvc.Status.Capacity { - allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...) - } - newPvc.Spec = oldPvc.Spec - return allErrs -} - -var supportedPortProtocols = sets.NewString(string(api.ProtocolTCP), string(api.ProtocolUDP)) - -func validateContainerPorts(ports []api.ContainerPort, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - allNames := sets.String{} - for i, port := range ports { - idxPath := fldPath.Index(i) - if len(port.Name) > 0 { - if msgs := validation.IsValidPortName(port.Name); len(msgs) != 0 { - for i = range msgs { - allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), port.Name, msgs[i])) - } - } else if allNames.Has(port.Name) { - allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), port.Name)) - } else { - allNames.Insert(port.Name) - } - } - if port.ContainerPort == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("containerPort"), "")) - } else { - for _, msg := range validation.IsValidPortNum(int(port.ContainerPort)) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, msg)) - } - } - if port.HostPort != 0 { - for _, msg := range validation.IsValidPortNum(int(port.HostPort)) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("hostPort"), port.HostPort, msg)) - } - } - if len(port.Protocol) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("protocol"), "")) - } else if !supportedPortProtocols.Has(string(port.Protocol)) { - allErrs = append(allErrs, field.NotSupported(idxPath.Child("protocol"), port.Protocol, supportedPortProtocols.List())) - } - } - return allErrs -} - -// ValidateEnv validates env vars -func ValidateEnv(vars []api.EnvVar, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - for i, ev := range vars { - idxPath := fldPath.Index(i) - if len(ev.Name) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) - } else { - for _, msg := range validation.IsEnvVarName(ev.Name) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), ev.Name, msg)) - } - } - allErrs = append(allErrs, validateEnvVarValueFrom(ev, idxPath.Child("valueFrom"))...) - } - return allErrs -} - -var validFieldPathExpressionsEnv = sets.NewString("metadata.name", "metadata.namespace", "metadata.uid", "spec.nodeName", "spec.serviceAccountName", "status.hostIP", "status.podIP") -var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "limits.ephemeral-storage", "requests.cpu", "requests.memory", "requests.ephemeral-storage") - -func validateEnvVarValueFrom(ev api.EnvVar, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if ev.ValueFrom == nil { - return allErrs - } - - numSources := 0 - - if ev.ValueFrom.FieldRef != nil { - numSources++ - allErrs = append(allErrs, validateObjectFieldSelector(ev.ValueFrom.FieldRef, &validFieldPathExpressionsEnv, fldPath.Child("fieldRef"))...) - } - if ev.ValueFrom.ResourceFieldRef != nil { - numSources++ - allErrs = append(allErrs, validateContainerResourceFieldSelector(ev.ValueFrom.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), false)...) - } - if ev.ValueFrom.ConfigMapKeyRef != nil { - numSources++ - allErrs = append(allErrs, validateConfigMapKeySelector(ev.ValueFrom.ConfigMapKeyRef, fldPath.Child("configMapKeyRef"))...) - } - if ev.ValueFrom.SecretKeyRef != nil { - numSources++ - allErrs = append(allErrs, validateSecretKeySelector(ev.ValueFrom.SecretKeyRef, fldPath.Child("secretKeyRef"))...) - } - - if numSources == 0 { - allErrs = append(allErrs, field.Invalid(fldPath, "", "must specify one of: `fieldRef`, `resourceFieldRef`, `configMapKeyRef` or `secretKeyRef`")) - } else if len(ev.Value) != 0 { - if numSources != 0 { - allErrs = append(allErrs, field.Invalid(fldPath, "", "may not be specified when `value` is not empty")) - } - } else if numSources > 1 { - allErrs = append(allErrs, field.Invalid(fldPath, "", "may not have more than one field specified at a time")) - } - - return allErrs -} - -func validateObjectFieldSelector(fs *api.ObjectFieldSelector, expressions *sets.String, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if len(fs.APIVersion) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("apiVersion"), "")) - } else if len(fs.FieldPath) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("fieldPath"), "")) - } else { - internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "") - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldPath"), fs.FieldPath, fmt.Sprintf("error converting fieldPath: %v", err))) - } else if !expressions.Has(internalFieldPath) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("fieldPath"), internalFieldPath, expressions.List())) - } - } - - return allErrs -} - -func fsResourceIsEphemeralStorage(resource string) bool { - if resource == "limits.ephemeral-storage" || resource == "requests.ephemeral-storage" { - return true - } - return false -} - -func validateContainerResourceFieldSelector(fs *api.ResourceFieldSelector, expressions *sets.String, fldPath *field.Path, volume bool) field.ErrorList { - allErrs := field.ErrorList{} - - if volume && len(fs.ContainerName) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("containerName"), "")) - } else if len(fs.Resource) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("resource"), "")) - } else if !expressions.Has(fs.Resource) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("resource"), fs.Resource, expressions.List())) - } else if fsResourceIsEphemeralStorage(fs.Resource) && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { - allErrs = append(allErrs, field.Forbidden(fldPath, "Containers' ephemeral storage requests/limits disabled by feature-gate for Downward API")) - } - allErrs = append(allErrs, validateContainerResourceDivisor(fs.Resource, fs.Divisor, fldPath)...) - return allErrs -} - -func ValidateEnvFrom(vars []api.EnvFromSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for i, ev := range vars { - idxPath := fldPath.Index(i) - if len(ev.Prefix) > 0 { - for _, msg := range validation.IsEnvVarName(ev.Prefix) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("prefix"), ev.Prefix, msg)) - } - } - - numSources := 0 - if ev.ConfigMapRef != nil { - numSources++ - allErrs = append(allErrs, validateConfigMapEnvSource(ev.ConfigMapRef, idxPath.Child("configMapRef"))...) - } - if ev.SecretRef != nil { - numSources++ - allErrs = append(allErrs, validateSecretEnvSource(ev.SecretRef, idxPath.Child("secretRef"))...) - } - - if numSources == 0 { - allErrs = append(allErrs, field.Invalid(fldPath, "", "must specify one of: `configMapRef` or `secretRef`")) - } else if numSources > 1 { - allErrs = append(allErrs, field.Invalid(fldPath, "", "may not have more than one field specified at a time")) - } - } - return allErrs -} - -func validateConfigMapEnvSource(configMapSource *api.ConfigMapEnvSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(configMapSource.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } else { - for _, msg := range ValidateConfigMapName(configMapSource.Name, true) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), configMapSource.Name, msg)) - } - } - return allErrs -} - -func validateSecretEnvSource(secretSource *api.SecretEnvSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(secretSource.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } else { - for _, msg := range ValidateSecretName(secretSource.Name, true) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), secretSource.Name, msg)) - } - } - return allErrs -} - -var validContainerResourceDivisorForCPU = sets.NewString("1m", "1") -var validContainerResourceDivisorForMemory = sets.NewString("1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei") -var validContainerResourceDivisorForEphemeralStorage = sets.NewString("1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei") - -func validateContainerResourceDivisor(rName string, divisor resource.Quantity, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - unsetDivisor := resource.Quantity{} - if unsetDivisor.Cmp(divisor) == 0 { - return allErrs - } - switch rName { - case "limits.cpu", "requests.cpu": - if !validContainerResourceDivisorForCPU.Has(divisor.String()) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1m and 1 are supported with the cpu resource")) - } - case "limits.memory", "requests.memory": - if !validContainerResourceDivisorForMemory.Has(divisor.String()) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the memory resource")) - } - case "limits.ephemeral-storage", "requests.ephemeral-storage": - if !validContainerResourceDivisorForEphemeralStorage.Has(divisor.String()) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the local ephemeral storage resource")) - } - } - return allErrs -} - -func validateConfigMapKeySelector(s *api.ConfigMapKeySelector, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - nameFn := ValidateNameFunc(ValidateSecretName) - for _, msg := range nameFn(s.Name, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), s.Name, msg)) - } - if len(s.Key) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("key"), "")) - } else { - for _, msg := range validation.IsConfigMapKey(s.Key) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg)) - } - } - - return allErrs -} - -func validateSecretKeySelector(s *api.SecretKeySelector, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - nameFn := ValidateNameFunc(ValidateSecretName) - for _, msg := range nameFn(s.Name, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), s.Name, msg)) - } - if len(s.Key) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("key"), "")) - } else { - for _, msg := range validation.IsConfigMapKey(s.Key) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg)) - } - } - - return allErrs -} - -func ValidateVolumeMounts(mounts []api.VolumeMount, volumes sets.String, container *api.Container, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - mountpoints := sets.NewString() - - for i, mnt := range mounts { - idxPath := fldPath.Index(i) - if len(mnt.Name) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) - } else if !volumes.Has(mnt.Name) { - allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), mnt.Name)) - } - if len(mnt.MountPath) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("mountPath"), "")) - } - if mountpoints.Has(mnt.MountPath) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be unique")) - } - if !path.IsAbs(mnt.MountPath) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be an absolute path")) - } - mountpoints.Insert(mnt.MountPath) - if len(mnt.SubPath) > 0 { - allErrs = append(allErrs, validateLocalDescendingPath(mnt.SubPath, fldPath.Child("subPath"))...) - } - - if mnt.MountPropagation != nil { - allErrs = append(allErrs, validateMountPropagation(mnt.MountPropagation, container, fldPath.Child("mountPropagation"))...) - } - } - return allErrs -} - -func validateProbe(probe *api.Probe, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if probe == nil { - return allErrs - } - allErrs = append(allErrs, validateHandler(&probe.Handler, fldPath)...) - - allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.InitialDelaySeconds), fldPath.Child("initialDelaySeconds"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.TimeoutSeconds), fldPath.Child("timeoutSeconds"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.PeriodSeconds), fldPath.Child("periodSeconds"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.SuccessThreshold), fldPath.Child("successThreshold"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.FailureThreshold), fldPath.Child("failureThreshold"))...) - return allErrs -} - -func validateClientIPAffinityConfig(config *api.SessionAffinityConfig, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if config == nil { - allErrs = append(allErrs, field.Required(fldPath, fmt.Sprintf("when session affinity type is %s", api.ServiceAffinityClientIP))) - return allErrs - } - if config.ClientIP == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("clientIP"), fmt.Sprintf("when session affinity type is %s", api.ServiceAffinityClientIP))) - return allErrs - } - if config.ClientIP.TimeoutSeconds == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("clientIP").Child("timeoutSeconds"), fmt.Sprintf("when session affinity type is %s", api.ServiceAffinityClientIP))) - return allErrs - } - allErrs = append(allErrs, validateAffinityTimeout(config.ClientIP.TimeoutSeconds, fldPath.Child("clientIP").Child("timeoutSeconds"))...) - - return allErrs -} - -func validateAffinityTimeout(timeout *int32, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if *timeout <= 0 || *timeout > api.MaxClientIPServiceAffinitySeconds { - allErrs = append(allErrs, field.Invalid(fldPath, timeout, fmt.Sprintf("must be greater than 0 and less than %d", api.MaxClientIPServiceAffinitySeconds))) - } - return allErrs -} - -// AccumulateUniqueHostPorts extracts each HostPort of each Container, -// accumulating the results and returning an error if any ports conflict. -func AccumulateUniqueHostPorts(containers []api.Container, accumulator *sets.String, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - for ci, ctr := range containers { - idxPath := fldPath.Index(ci) - portsPath := idxPath.Child("ports") - for pi := range ctr.Ports { - idxPath := portsPath.Index(pi) - port := ctr.Ports[pi].HostPort - if port == 0 { - continue - } - str := fmt.Sprintf("%s/%s/%d", ctr.Ports[pi].Protocol, ctr.Ports[pi].HostIP, port) - if accumulator.Has(str) { - allErrs = append(allErrs, field.Duplicate(idxPath.Child("hostPort"), str)) - } else { - accumulator.Insert(str) - } - } - } - return allErrs -} - -// checkHostPortConflicts checks for colliding Port.HostPort values across -// a slice of containers. -func checkHostPortConflicts(containers []api.Container, fldPath *field.Path) field.ErrorList { - allPorts := sets.String{} - return AccumulateUniqueHostPorts(containers, &allPorts, fldPath) -} - -func validateExecAction(exec *api.ExecAction, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - if len(exec.Command) == 0 { - allErrors = append(allErrors, field.Required(fldPath.Child("command"), "")) - } - return allErrors -} - -var supportedHTTPSchemes = sets.NewString(string(api.URISchemeHTTP), string(api.URISchemeHTTPS)) - -func validateHTTPGetAction(http *api.HTTPGetAction, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - if len(http.Path) == 0 { - allErrors = append(allErrors, field.Required(fldPath.Child("path"), "")) - } - allErrors = append(allErrors, ValidatePortNumOrName(http.Port, fldPath.Child("port"))...) - if !supportedHTTPSchemes.Has(string(http.Scheme)) { - allErrors = append(allErrors, field.NotSupported(fldPath.Child("scheme"), http.Scheme, supportedHTTPSchemes.List())) - } - for _, header := range http.HTTPHeaders { - for _, msg := range validation.IsHTTPHeaderName(header.Name) { - allErrors = append(allErrors, field.Invalid(fldPath.Child("httpHeaders"), header.Name, msg)) - } - } - return allErrors -} - -func ValidatePortNumOrName(port intstr.IntOrString, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if port.Type == intstr.Int { - for _, msg := range validation.IsValidPortNum(port.IntValue()) { - allErrs = append(allErrs, field.Invalid(fldPath, port.IntValue(), msg)) - } - } else if port.Type == intstr.String { - for _, msg := range validation.IsValidPortName(port.StrVal) { - allErrs = append(allErrs, field.Invalid(fldPath, port.StrVal, msg)) - } - } else { - allErrs = append(allErrs, field.InternalError(fldPath, fmt.Errorf("unknown type: %v", port.Type))) - } - return allErrs -} - -func validateTCPSocketAction(tcp *api.TCPSocketAction, fldPath *field.Path) field.ErrorList { - return ValidatePortNumOrName(tcp.Port, fldPath.Child("port")) -} - -func validateHandler(handler *api.Handler, fldPath *field.Path) field.ErrorList { - numHandlers := 0 - allErrors := field.ErrorList{} - if handler.Exec != nil { - if numHandlers > 0 { - allErrors = append(allErrors, field.Forbidden(fldPath.Child("exec"), "may not specify more than 1 handler type")) - } else { - numHandlers++ - allErrors = append(allErrors, validateExecAction(handler.Exec, fldPath.Child("exec"))...) - } - } - if handler.HTTPGet != nil { - if numHandlers > 0 { - allErrors = append(allErrors, field.Forbidden(fldPath.Child("httpGet"), "may not specify more than 1 handler type")) - } else { - numHandlers++ - allErrors = append(allErrors, validateHTTPGetAction(handler.HTTPGet, fldPath.Child("httpGet"))...) - } - } - if handler.TCPSocket != nil { - if numHandlers > 0 { - allErrors = append(allErrors, field.Forbidden(fldPath.Child("tcpSocket"), "may not specify more than 1 handler type")) - } else { - numHandlers++ - allErrors = append(allErrors, validateTCPSocketAction(handler.TCPSocket, fldPath.Child("tcpSocket"))...) - } - } - if numHandlers == 0 { - allErrors = append(allErrors, field.Required(fldPath, "must specify a handler type")) - } - return allErrors -} - -func validateLifecycle(lifecycle *api.Lifecycle, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if lifecycle.PostStart != nil { - allErrs = append(allErrs, validateHandler(lifecycle.PostStart, fldPath.Child("postStart"))...) - } - if lifecycle.PreStop != nil { - allErrs = append(allErrs, validateHandler(lifecycle.PreStop, fldPath.Child("preStop"))...) - } - return allErrs -} - -var supportedPullPolicies = sets.NewString(string(api.PullAlways), string(api.PullIfNotPresent), string(api.PullNever)) - -func validatePullPolicy(policy api.PullPolicy, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - - switch policy { - case api.PullAlways, api.PullIfNotPresent, api.PullNever: - break - case "": - allErrors = append(allErrors, field.Required(fldPath, "")) - default: - allErrors = append(allErrors, field.NotSupported(fldPath, policy, supportedPullPolicies.List())) - } - - return allErrors -} - -func validateInitContainers(containers, otherContainers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - if len(containers) > 0 { - allErrs = append(allErrs, validateContainers(containers, volumes, fldPath)...) - } - - allNames := sets.String{} - for _, ctr := range otherContainers { - allNames.Insert(ctr.Name) - } - for i, ctr := range containers { - idxPath := fldPath.Index(i) - if allNames.Has(ctr.Name) { - allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), ctr.Name)) - } - if len(ctr.Name) > 0 { - allNames.Insert(ctr.Name) - } - if ctr.Lifecycle != nil { - allErrs = append(allErrs, field.Invalid(idxPath.Child("lifecycle"), ctr.Lifecycle, "must not be set for init containers")) - } - if ctr.LivenessProbe != nil { - allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe"), ctr.LivenessProbe, "must not be set for init containers")) - } - if ctr.ReadinessProbe != nil { - allErrs = append(allErrs, field.Invalid(idxPath.Child("readinessProbe"), ctr.ReadinessProbe, "must not be set for init containers")) - } - } - return allErrs -} - -func validateContainers(containers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if len(containers) == 0 { - return append(allErrs, field.Required(fldPath, "")) - } - - allNames := sets.String{} - for i, ctr := range containers { - idxPath := fldPath.Index(i) - namePath := idxPath.Child("name") - if len(ctr.Name) == 0 { - allErrs = append(allErrs, field.Required(namePath, "")) - } else { - allErrs = append(allErrs, ValidateDNS1123Label(ctr.Name, namePath)...) - } - if allNames.Has(ctr.Name) { - allErrs = append(allErrs, field.Duplicate(namePath, ctr.Name)) - } else { - allNames.Insert(ctr.Name) - } - // TODO: do not validate leading and trailing whitespace to preserve backward compatibility. - // for example: https://github.com/openshift/origin/issues/14659 image = " " is special token in pod template - // others may have done similar - if len(ctr.Image) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("image"), "")) - } - if ctr.Lifecycle != nil { - allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, idxPath.Child("lifecycle"))...) - } - allErrs = append(allErrs, validateProbe(ctr.LivenessProbe, idxPath.Child("livenessProbe"))...) - // Liveness-specific validation - if ctr.LivenessProbe != nil && ctr.LivenessProbe.SuccessThreshold != 1 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe", "successThreshold"), ctr.LivenessProbe.SuccessThreshold, "must be 1")) - } - - switch ctr.TerminationMessagePolicy { - case api.TerminationMessageReadFile, api.TerminationMessageFallbackToLogsOnError: - case "": - allErrs = append(allErrs, field.Required(idxPath.Child("terminationMessagePolicy"), "must be 'File' or 'FallbackToLogsOnError'")) - default: - allErrs = append(allErrs, field.Invalid(idxPath.Child("terminationMessagePolicy"), ctr.TerminationMessagePolicy, "must be 'File' or 'FallbackToLogsOnError'")) - } - - allErrs = append(allErrs, validateProbe(ctr.ReadinessProbe, idxPath.Child("readinessProbe"))...) - allErrs = append(allErrs, validateContainerPorts(ctr.Ports, idxPath.Child("ports"))...) - allErrs = append(allErrs, ValidateEnv(ctr.Env, idxPath.Child("env"))...) - allErrs = append(allErrs, ValidateEnvFrom(ctr.EnvFrom, idxPath.Child("envFrom"))...) - allErrs = append(allErrs, ValidateVolumeMounts(ctr.VolumeMounts, volumes, &ctr, idxPath.Child("volumeMounts"))...) - allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, idxPath.Child("imagePullPolicy"))...) - allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, idxPath.Child("resources"))...) - allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, idxPath.Child("securityContext"))...) - } - // Check for colliding ports across all containers. - allErrs = append(allErrs, checkHostPortConflicts(containers, fldPath)...) - - return allErrs -} - -func validateRestartPolicy(restartPolicy *api.RestartPolicy, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - switch *restartPolicy { - case api.RestartPolicyAlways, api.RestartPolicyOnFailure, api.RestartPolicyNever: - break - case "": - allErrors = append(allErrors, field.Required(fldPath, "")) - default: - validValues := []string{string(api.RestartPolicyAlways), string(api.RestartPolicyOnFailure), string(api.RestartPolicyNever)} - allErrors = append(allErrors, field.NotSupported(fldPath, *restartPolicy, validValues)) - } - - return allErrors -} - -func validateDNSPolicy(dnsPolicy *api.DNSPolicy, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - switch *dnsPolicy { - case api.DNSClusterFirstWithHostNet, api.DNSClusterFirst, api.DNSDefault: - break - case "": - allErrors = append(allErrors, field.Required(fldPath, "")) - default: - validValues := []string{string(api.DNSClusterFirstWithHostNet), string(api.DNSClusterFirst), string(api.DNSDefault)} - allErrors = append(allErrors, field.NotSupported(fldPath, dnsPolicy, validValues)) - } - return allErrors -} - -func validateHostNetwork(hostNetwork bool, containers []api.Container, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - if hostNetwork { - for i, container := range containers { - portsPath := fldPath.Index(i).Child("ports") - for i, port := range container.Ports { - idxPath := portsPath.Index(i) - if port.HostPort != port.ContainerPort { - allErrors = append(allErrors, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, "must match `hostPort` when `hostNetwork` is true")) - } - } - } - } - return allErrors -} - -// validateImagePullSecrets checks to make sure the pull secrets are well -// formed. Right now, we only expect name to be set (it's the only field). If -// this ever changes and someone decides to set those fields, we'd like to -// know. -func validateImagePullSecrets(imagePullSecrets []api.LocalObjectReference, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - for i, currPullSecret := range imagePullSecrets { - idxPath := fldPath.Index(i) - strippedRef := api.LocalObjectReference{Name: currPullSecret.Name} - if !reflect.DeepEqual(strippedRef, currPullSecret) { - allErrors = append(allErrors, field.Invalid(idxPath, currPullSecret, "only name may be set")) - } - } - return allErrors -} - -// validateAffinity checks if given affinities are valid -func validateAffinity(affinity *api.Affinity, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if affinity != nil { - if na := affinity.NodeAffinity; na != nil { - // TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented. - // if na.RequiredDuringSchedulingRequiredDuringExecution != nil { - // allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) - // } - - if na.RequiredDuringSchedulingIgnoredDuringExecution != nil { - allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) - } - - if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) - } - } - if affinity.PodAffinity != nil { - allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, fldPath.Child("podAffinity"))...) - } - if affinity.PodAntiAffinity != nil { - allErrs = append(allErrs, validatePodAntiAffinity(affinity.PodAntiAffinity, fldPath.Child("podAntiAffinity"))...) - } - } - - return allErrs -} - -func validateTaintEffect(effect *api.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList { - if !allowEmpty && len(*effect) == 0 { - return field.ErrorList{field.Required(fldPath, "")} - } - - allErrors := field.ErrorList{} - switch *effect { - // TODO: Replace next line with subsequent commented-out line when implement TaintEffectNoScheduleNoAdmit. - case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule, api.TaintEffectNoExecute: - // case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule, api.TaintEffectNoScheduleNoAdmit, api.TaintEffectNoExecute: - default: - validValues := []string{ - string(api.TaintEffectNoSchedule), - string(api.TaintEffectPreferNoSchedule), - string(api.TaintEffectNoExecute), - // TODO: Uncomment this block when implement TaintEffectNoScheduleNoAdmit. - // string(api.TaintEffectNoScheduleNoAdmit), - } - allErrors = append(allErrors, field.NotSupported(fldPath, effect, validValues)) - } - return allErrors -} - -// validateOnlyAddedTolerations validates updated pod tolerations. -func validateOnlyAddedTolerations(newTolerations []api.Toleration, oldTolerations []api.Toleration, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for _, old := range oldTolerations { - found := false - old.TolerationSeconds = nil - for _, new := range newTolerations { - new.TolerationSeconds = nil - if reflect.DeepEqual(old, new) { - found = true - break - } - } - if !found { - allErrs = append(allErrs, field.Forbidden(fldPath, "existing toleration can not be modified except its tolerationSeconds")) - return allErrs - } - } - - allErrs = append(allErrs, ValidateTolerations(newTolerations, fldPath)...) - return allErrs -} - -func ValidateHostAliases(hostAliases []api.HostAlias, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for _, hostAlias := range hostAliases { - if ip := net.ParseIP(hostAlias.IP); ip == nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), hostAlias.IP, "must be valid IP address")) - } - for _, hostname := range hostAlias.Hostnames { - allErrs = append(allErrs, ValidateDNS1123Subdomain(hostname, fldPath.Child("hostnames"))...) - } - } - return allErrs -} - -// ValidateTolerations tests if given tolerations have valid data. -func ValidateTolerations(tolerations []api.Toleration, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - for i, toleration := range tolerations { - idxPath := fldPath.Index(i) - // validate the toleration key - if len(toleration.Key) > 0 { - allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(toleration.Key, idxPath.Child("key"))...) - } - - // empty toleration key with Exists operator and empty value means match all taints - if len(toleration.Key) == 0 && toleration.Operator != api.TolerationOpExists { - allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Operator, - "operator must be Exists when `key` is empty, which means \"match all values and all keys\"")) - } - - if toleration.TolerationSeconds != nil && toleration.Effect != api.TaintEffectNoExecute { - allErrors = append(allErrors, field.Invalid(idxPath.Child("effect"), toleration.Effect, - "effect must be 'NoExecute' when `tolerationSeconds` is set")) - } - - // validate toleration operator and value - switch toleration.Operator { - // empty operator means Equal - case api.TolerationOpEqual, "": - if errs := validation.IsValidLabelValue(toleration.Value); len(errs) != 0 { - allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Value, strings.Join(errs, ";"))) - } - case api.TolerationOpExists: - if len(toleration.Value) > 0 { - allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration, "value must be empty when `operator` is 'Exists'")) - } - default: - validValues := []string{string(api.TolerationOpEqual), string(api.TolerationOpExists)} - allErrors = append(allErrors, field.NotSupported(idxPath.Child("operator"), toleration.Operator, validValues)) - } - - // validate toleration effect, empty toleration effect means match all taint effects - if len(toleration.Effect) > 0 { - allErrors = append(allErrors, validateTaintEffect(&toleration.Effect, true, idxPath.Child("effect"))...) - } - } - return allErrors -} - -func toResourceNames(resources api.ResourceList) []api.ResourceName { - result := []api.ResourceName{} - for resourceName := range resources { - result = append(result, resourceName) - } - return result -} - -func toSet(resourceNames []api.ResourceName) sets.String { - result := sets.NewString() - for _, resourceName := range resourceNames { - result.Insert(string(resourceName)) - } - return result -} - -func toContainerResourcesSet(ctr *api.Container) sets.String { - resourceNames := toResourceNames(ctr.Resources.Requests) - resourceNames = append(resourceNames, toResourceNames(ctr.Resources.Limits)...) - return toSet(resourceNames) -} - -// validateContainersOnlyForPod does additional validation for containers on a pod versus a pod template -// it only does additive validation of fields not covered in validateContainers -func validateContainersOnlyForPod(containers []api.Container, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for i, ctr := range containers { - idxPath := fldPath.Index(i) - if len(ctr.Image) != len(strings.TrimSpace(ctr.Image)) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("image"), ctr.Image, "must not have leading or trailing whitespace")) - } - } - return allErrs -} - -// ValidatePod tests if required fields in the pod are set. -func ValidatePod(pod *api.Pod) field.ErrorList { - fldPath := field.NewPath("metadata") - allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, fldPath) - allErrs = append(allErrs, ValidatePodSpecificAnnotations(pod.ObjectMeta.Annotations, &pod.Spec, fldPath.Child("annotations"))...) - allErrs = append(allErrs, ValidatePodSpec(&pod.Spec, field.NewPath("spec"))...) - - // we do additional validation only pertinent for pods and not pod templates - // this was done to preserve backwards compatibility - specPath := field.NewPath("spec") - - allErrs = append(allErrs, validateContainersOnlyForPod(pod.Spec.Containers, specPath.Child("containers"))...) - allErrs = append(allErrs, validateContainersOnlyForPod(pod.Spec.InitContainers, specPath.Child("initContainers"))...) - - if utilfeature.DefaultFeatureGate.Enabled(features.HugePages) { - hugePageResources := sets.NewString() - for i := range pod.Spec.Containers { - resourceSet := toContainerResourcesSet(&pod.Spec.Containers[i]) - for resourceStr := range resourceSet { - if v1helper.IsHugePageResourceName(v1.ResourceName(resourceStr)) { - hugePageResources.Insert(resourceStr) - } - } - } - if len(hugePageResources) > 1 { - allErrs = append(allErrs, field.Invalid(specPath, hugePageResources, "must use a single hugepage size in a pod spec")) - } - } - - return allErrs -} - -// ValidatePodSpec tests that the specified PodSpec has valid data. -// This includes checking formatting and uniqueness. It also canonicalizes the -// structure by setting default values and implementing any backwards-compatibility -// tricks. -func ValidatePodSpec(spec *api.PodSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - allVolumes, vErrs := ValidateVolumes(spec.Volumes, fldPath.Child("volumes")) - allErrs = append(allErrs, vErrs...) - allErrs = append(allErrs, validateContainers(spec.Containers, allVolumes, fldPath.Child("containers"))...) - allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, allVolumes, fldPath.Child("initContainers"))...) - allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...) - allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...) - allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...) - allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"))...) - allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...) - allErrs = append(allErrs, validateAffinity(spec.Affinity, fldPath.Child("affinity"))...) - if len(spec.ServiceAccountName) > 0 { - for _, msg := range ValidateServiceAccountName(spec.ServiceAccountName, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceAccountName"), spec.ServiceAccountName, msg)) - } - } - - if len(spec.NodeName) > 0 { - for _, msg := range ValidateNodeName(spec.NodeName, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), spec.NodeName, msg)) - } - } - - if spec.ActiveDeadlineSeconds != nil { - value := *spec.ActiveDeadlineSeconds - if value < 1 || value > math.MaxInt32 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("activeDeadlineSeconds"), value, validation.InclusiveRangeError(1, math.MaxInt32))) - } - } - - if len(spec.Hostname) > 0 { - allErrs = append(allErrs, ValidateDNS1123Label(spec.Hostname, fldPath.Child("hostname"))...) - } - - if len(spec.Subdomain) > 0 { - allErrs = append(allErrs, ValidateDNS1123Label(spec.Subdomain, fldPath.Child("subdomain"))...) - } - - if len(spec.Tolerations) > 0 { - allErrs = append(allErrs, ValidateTolerations(spec.Tolerations, fldPath.Child("tolerations"))...) - } - - if len(spec.HostAliases) > 0 { - allErrs = append(allErrs, ValidateHostAliases(spec.HostAliases, fldPath.Child("hostAliases"))...) - } - - if len(spec.PriorityClassName) > 0 { - if !utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("priorityClassName"), "Pod priority is disabled by feature-gate")) - } else { - for _, msg := range ValidatePriorityClassName(spec.PriorityClassName, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("priorityClassName"), spec.PriorityClassName, msg)) - } - } - } - - if spec.Priority != nil && !utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("priority"), "Pod priority is disabled by feature-gate")) - } - - return allErrs -} - -// ValidateNodeSelectorRequirement tests that the specified NodeSelectorRequirement fields has valid data -func ValidateNodeSelectorRequirement(rq api.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - switch rq.Operator { - case api.NodeSelectorOpIn, api.NodeSelectorOpNotIn: - if len(rq.Values) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) - } - case api.NodeSelectorOpExists, api.NodeSelectorOpDoesNotExist: - if len(rq.Values) > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) - } - - case api.NodeSelectorOpGt, api.NodeSelectorOpLt: - if len(rq.Values) != 1 { - allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified single value when `operator` is 'Lt' or 'Gt'")) - } - default: - allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), rq.Operator, "not a valid selector operator")) - } - allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(rq.Key, fldPath.Child("key"))...) - return allErrs -} - -// ValidateNodeSelectorTerm tests that the specified node selector term has valid data -func ValidateNodeSelectorTerm(term api.NodeSelectorTerm, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if len(term.MatchExpressions) == 0 { - return append(allErrs, field.Required(fldPath.Child("matchExpressions"), "must have at least one node selector requirement")) - } - for j, req := range term.MatchExpressions { - allErrs = append(allErrs, ValidateNodeSelectorRequirement(req, fldPath.Child("matchExpressions").Index(j))...) - } - return allErrs -} - -// ValidateNodeSelector tests that the specified nodeSelector fields has valid data -func ValidateNodeSelector(nodeSelector *api.NodeSelector, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - termFldPath := fldPath.Child("nodeSelectorTerms") - if len(nodeSelector.NodeSelectorTerms) == 0 { - return append(allErrs, field.Required(termFldPath, "must have at least one node selector term")) - } - - for i, term := range nodeSelector.NodeSelectorTerms { - allErrs = append(allErrs, ValidateNodeSelectorTerm(term, termFldPath.Index(i))...) - } - - return allErrs -} - -// ValidateAvoidPodsInNodeAnnotations tests that the serialized AvoidPods in Node.Annotations has valid data -func ValidateAvoidPodsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - v1Avoids, err := v1helper.GetAvoidPodsFromNodeAnnotations(annotations) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), api.PreferAvoidPodsAnnotationKey, err.Error())) - return allErrs - } - var avoids api.AvoidPods - if err := k8s_api_v1.Convert_v1_AvoidPods_To_api_AvoidPods(&v1Avoids, &avoids, nil); err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), api.PreferAvoidPodsAnnotationKey, err.Error())) - return allErrs - } - - if len(avoids.PreferAvoidPods) != 0 { - for i, pa := range avoids.PreferAvoidPods { - idxPath := fldPath.Child(api.PreferAvoidPodsAnnotationKey).Index(i) - allErrs = append(allErrs, validatePreferAvoidPodsEntry(pa, idxPath)...) - } - } - - return allErrs -} - -// validatePreferAvoidPodsEntry tests if given PreferAvoidPodsEntry has valid data. -func validatePreferAvoidPodsEntry(avoidPodEntry api.PreferAvoidPodsEntry, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - if avoidPodEntry.PodSignature.PodController == nil { - allErrors = append(allErrors, field.Required(fldPath.Child("PodSignature"), "")) - } else { - if *(avoidPodEntry.PodSignature.PodController.Controller) != true { - allErrors = append(allErrors, - field.Invalid(fldPath.Child("PodSignature").Child("PodController").Child("Controller"), - *(avoidPodEntry.PodSignature.PodController.Controller), "must point to a controller")) - } - } - return allErrors -} - -// ValidatePreferredSchedulingTerms tests that the specified SoftNodeAffinity fields has valid data -func ValidatePreferredSchedulingTerms(terms []api.PreferredSchedulingTerm, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - for i, term := range terms { - if term.Weight <= 0 || term.Weight > 100 { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("weight"), term.Weight, "must be in the range 1-100")) - } - - allErrs = append(allErrs, ValidateNodeSelectorTerm(term.Preference, fldPath.Index(i).Child("preference"))...) - } - return allErrs -} - -// validatePodAffinityTerm tests that the specified podAffinityTerm fields have valid data -func validatePodAffinityTerm(podAffinityTerm api.PodAffinityTerm, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, fldPath.Child("matchExpressions"))...) - for _, name := range podAffinityTerm.Namespaces { - for _, msg := range ValidateNamespaceName(name, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, msg)) - } - } - if len(podAffinityTerm.TopologyKey) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("topologyKey"), "can not be empty")) - } - return append(allErrs, unversionedvalidation.ValidateLabelName(podAffinityTerm.TopologyKey, fldPath.Child("topologyKey"))...) -} - -// validatePodAffinityTerms tests that the specified podAffinityTerms fields have valid data -func validatePodAffinityTerms(podAffinityTerms []api.PodAffinityTerm, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for i, podAffinityTerm := range podAffinityTerms { - allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, fldPath.Index(i))...) - } - return allErrs -} - -// validateWeightedPodAffinityTerms tests that the specified weightedPodAffinityTerms fields have valid data -func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []api.WeightedPodAffinityTerm, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for j, weightedTerm := range weightedPodAffinityTerms { - if weightedTerm.Weight <= 0 || weightedTerm.Weight > 100 { - allErrs = append(allErrs, field.Invalid(fldPath.Index(j).Child("weight"), weightedTerm.Weight, "must be in the range 1-100")) - } - allErrs = append(allErrs, validatePodAffinityTerm(weightedTerm.PodAffinityTerm, fldPath.Index(j).Child("podAffinityTerm"))...) - } - return allErrs -} - -// validatePodAntiAffinity tests that the specified podAntiAffinity fields have valid data -func validatePodAntiAffinity(podAntiAffinity *api.PodAntiAffinity, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. - // if podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { - // allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution, false, - // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) - //} - if podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { - allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, - fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) - } - if podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil { - allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, - fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) - } - return allErrs -} - -// validatePodAffinity tests that the specified podAffinity fields have valid data -func validatePodAffinity(podAffinity *api.PodAffinity, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. - // if podAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { - // allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingRequiredDuringExecution, false, - // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) - //} - if podAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { - allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution, - fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) - } - if podAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil { - allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, - fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) - } - return allErrs -} - -func ValidateSeccompProfile(p string, fldPath *field.Path) field.ErrorList { - if p == "docker/default" { - return nil - } - if p == "unconfined" { - return nil - } - if strings.HasPrefix(p, "localhost/") { - return validateLocalDescendingPath(strings.TrimPrefix(p, "localhost/"), fldPath) - } - return field.ErrorList{field.Invalid(fldPath, p, "must be a valid seccomp profile")} -} - -func ValidateSeccompPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if p, exists := annotations[api.SeccompPodAnnotationKey]; exists { - allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(api.SeccompPodAnnotationKey))...) - } - for k, p := range annotations { - if strings.HasPrefix(k, api.SeccompContainerAnnotationKeyPrefix) { - allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(k))...) - } - } - - return allErrs -} - -func ValidateAppArmorPodAnnotations(annotations map[string]string, spec *api.PodSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for k, p := range annotations { - if !strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { - continue - } - // TODO: this belongs to admission, not general pod validation: - if !utilfeature.DefaultFeatureGate.Enabled(features.AppArmor) { - allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "AppArmor is disabled by feature-gate")) - continue - } - containerName := strings.TrimPrefix(k, apparmor.ContainerAnnotationKeyPrefix) - if !podSpecHasContainer(spec, containerName) { - allErrs = append(allErrs, field.Invalid(fldPath.Key(k), containerName, "container not found")) - } - - if err := apparmor.ValidateProfileFormat(p); err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Key(k), p, err.Error())) - } - } - - return allErrs -} - -func podSpecHasContainer(spec *api.PodSpec, containerName string) bool { - for _, c := range spec.InitContainers { - if c.Name == containerName { - return true - } - } - for _, c := range spec.Containers { - if c.Name == containerName { - return true - } - } - return false -} - -const ( - // a sysctl segment regex, concatenated with dots to form a sysctl name - SysctlSegmentFmt string = "[a-z0-9]([-_a-z0-9]*[a-z0-9])?" - - // a sysctl name regex - SysctlFmt string = "(" + SysctlSegmentFmt + "\\.)*" + SysctlSegmentFmt - - // the maximal length of a sysctl name - SysctlMaxLength int = 253 -) - -var sysctlRegexp = regexp.MustCompile("^" + SysctlFmt + "$") - -// IsValidSysctlName checks that the given string is a valid sysctl name, -// i.e. matches SysctlFmt. -func IsValidSysctlName(name string) bool { - if len(name) > SysctlMaxLength { - return false - } - return sysctlRegexp.MatchString(name) -} - -func validateSysctls(sysctls []api.Sysctl, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for i, s := range sysctls { - if len(s.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("name"), "")) - } else if !IsValidSysctlName(s.Name) { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("name"), s.Name, fmt.Sprintf("must have at most %d characters and match regex %s", SysctlMaxLength, SysctlFmt))) - } - } - return allErrs -} - -// ValidatePodSecurityContext test that the specified PodSecurityContext has valid data. -func ValidatePodSecurityContext(securityContext *api.PodSecurityContext, spec *api.PodSpec, specPath, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if securityContext != nil { - allErrs = append(allErrs, validateHostNetwork(securityContext.HostNetwork, spec.Containers, specPath.Child("containers"))...) - if securityContext.FSGroup != nil { - for _, msg := range validation.IsValidGroupID(*securityContext.FSGroup) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("fsGroup"), *(securityContext.FSGroup), msg)) - } - } - if securityContext.RunAsUser != nil { - for _, msg := range validation.IsValidUserID(*securityContext.RunAsUser) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *(securityContext.RunAsUser), msg)) - } - } - for g, gid := range securityContext.SupplementalGroups { - for _, msg := range validation.IsValidGroupID(gid) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("supplementalGroups").Index(g), gid, msg)) - } - } - } - - return allErrs -} - -func ValidateContainerUpdates(newContainers, oldContainers []api.Container, fldPath *field.Path) (allErrs field.ErrorList, stop bool) { - allErrs = field.ErrorList{} - if len(newContainers) != len(oldContainers) { - //TODO: Pinpoint the specific container that causes the invalid error after we have strategic merge diff - allErrs = append(allErrs, field.Forbidden(fldPath, "pod updates may not add or remove containers")) - return allErrs, true - } - - // validate updated container images - for i, ctr := range newContainers { - if len(ctr.Image) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("image"), "")) - } - // this is only called from ValidatePodUpdate so its safe to check leading/trailing whitespace. - if len(strings.TrimSpace(ctr.Image)) != len(ctr.Image) { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("image"), ctr.Image, "must not have leading or trailing whitespace")) - } - } - return allErrs, false -} - -// ValidatePodUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields -// that cannot be changed. -func ValidatePodUpdate(newPod, oldPod *api.Pod) field.ErrorList { - fldPath := field.NewPath("metadata") - allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath) - allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"))...) - specPath := field.NewPath("spec") - - // validate updateable fields: - // 1. spec.containers[*].image - // 2. spec.initContainers[*].image - // 3. spec.activeDeadlineSeconds - - containerErrs, stop := ValidateContainerUpdates(newPod.Spec.Containers, oldPod.Spec.Containers, specPath.Child("containers")) - allErrs = append(allErrs, containerErrs...) - if stop { - return allErrs - } - containerErrs, stop = ValidateContainerUpdates(newPod.Spec.InitContainers, oldPod.Spec.InitContainers, specPath.Child("initContainers")) - allErrs = append(allErrs, containerErrs...) - if stop { - return allErrs - } - - // validate updated spec.activeDeadlineSeconds. two types of updates are allowed: - // 1. from nil to a positive value - // 2. from a positive value to a lesser, non-negative value - if newPod.Spec.ActiveDeadlineSeconds != nil { - newActiveDeadlineSeconds := *newPod.Spec.ActiveDeadlineSeconds - if newActiveDeadlineSeconds < 0 || newActiveDeadlineSeconds > math.MaxInt32 { - allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, validation.InclusiveRangeError(0, math.MaxInt32))) - return allErrs - } - if oldPod.Spec.ActiveDeadlineSeconds != nil { - oldActiveDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds - if oldActiveDeadlineSeconds < newActiveDeadlineSeconds { - allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, "must be less than or equal to previous value")) - return allErrs - } - } - } else if oldPod.Spec.ActiveDeadlineSeconds != nil { - allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newPod.Spec.ActiveDeadlineSeconds, "must not update from a positive integer to nil value")) - } - - // handle updateable fields by munging those fields prior to deep equal comparison. - mungedPod := *newPod - // munge spec.containers[*].image - var newContainers []api.Container - for ix, container := range mungedPod.Spec.Containers { - container.Image = oldPod.Spec.Containers[ix].Image - newContainers = append(newContainers, container) - } - mungedPod.Spec.Containers = newContainers - // munge spec.initContainers[*].image - var newInitContainers []api.Container - for ix, container := range mungedPod.Spec.InitContainers { - container.Image = oldPod.Spec.InitContainers[ix].Image - newInitContainers = append(newInitContainers, container) - } - mungedPod.Spec.InitContainers = newInitContainers - // munge spec.activeDeadlineSeconds - mungedPod.Spec.ActiveDeadlineSeconds = nil - if oldPod.Spec.ActiveDeadlineSeconds != nil { - activeDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds - mungedPod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds - } - - // Allow only additions to tolerations updates. - mungedPod.Spec.Tolerations = oldPod.Spec.Tolerations - allErrs = append(allErrs, validateOnlyAddedTolerations(newPod.Spec.Tolerations, oldPod.Spec.Tolerations, specPath.Child("tolerations"))...) - - if !apiequality.Semantic.DeepEqual(mungedPod.Spec, oldPod.Spec) { - // This diff isn't perfect, but it's a helluva lot better an "I'm not going to tell you what the difference is". - //TODO: Pinpoint the specific field that causes the invalid error after we have strategic merge diff - specDiff := diff.ObjectDiff(mungedPod.Spec, oldPod.Spec) - allErrs = append(allErrs, field.Forbidden(specPath, fmt.Sprintf("pod updates may not change fields other than `spec.containers[*].image`, `spec.initContainers[*].image`, `spec.activeDeadlineSeconds` or `spec.tolerations` (only additions to existing tolerations)\n%v", specDiff))) - } - - return allErrs -} - -// ValidatePodStatusUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields -// that cannot be changed. -func ValidatePodStatusUpdate(newPod, oldPod *api.Pod) field.ErrorList { - fldPath := field.NewPath("metadata") - allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath) - allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"))...) - - if newPod.Spec.NodeName != oldPod.Spec.NodeName { - allErrs = append(allErrs, field.Forbidden(field.NewPath("status", "nodeName"), "may not be changed directly")) - } - - // For status update we ignore changes to pod spec. - newPod.Spec = oldPod.Spec - - return allErrs -} - -// ValidatePodBinding tests if required fields in the pod binding are legal. -func ValidatePodBinding(binding *api.Binding) field.ErrorList { - allErrs := field.ErrorList{} - - if len(binding.Target.Kind) != 0 && binding.Target.Kind != "Node" { - // TODO: When validation becomes versioned, this gets more complicated. - allErrs = append(allErrs, field.NotSupported(field.NewPath("target", "kind"), binding.Target.Kind, []string{"Node", ""})) - } - if len(binding.Target.Name) == 0 { - // TODO: When validation becomes versioned, this gets more complicated. - allErrs = append(allErrs, field.Required(field.NewPath("target", "name"), "")) - } - - return allErrs -} - -// ValidatePodTemplate tests if required fields in the pod template are set. -func ValidatePodTemplate(pod *api.PodTemplate) field.ErrorList { - allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidatePodTemplateSpec(&pod.Template, field.NewPath("template"))...) - return allErrs -} - -// ValidatePodTemplateUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields -// that cannot be changed. -func ValidatePodTemplateUpdate(newPod, oldPod *api.PodTemplate) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&oldPod.ObjectMeta, &newPod.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidatePodTemplateSpec(&newPod.Template, field.NewPath("template"))...) - return allErrs -} - -var supportedSessionAffinityType = sets.NewString(string(api.ServiceAffinityClientIP), string(api.ServiceAffinityNone)) -var supportedServiceType = sets.NewString(string(api.ServiceTypeClusterIP), string(api.ServiceTypeNodePort), - string(api.ServiceTypeLoadBalancer), string(api.ServiceTypeExternalName)) - -// ValidateService tests if required fields/annotations of a Service are valid. -func ValidateService(service *api.Service) field.ErrorList { - allErrs := ValidateObjectMeta(&service.ObjectMeta, true, ValidateServiceName, field.NewPath("metadata")) - - specPath := field.NewPath("spec") - isHeadlessService := service.Spec.ClusterIP == api.ClusterIPNone - if len(service.Spec.Ports) == 0 && !isHeadlessService && service.Spec.Type != api.ServiceTypeExternalName { - allErrs = append(allErrs, field.Required(specPath.Child("ports"), "")) - } - switch service.Spec.Type { - case api.ServiceTypeLoadBalancer: - for ix := range service.Spec.Ports { - port := &service.Spec.Ports[ix] - // This is a workaround for broken cloud environments that - // over-open firewalls. Hopefully it can go away when more clouds - // understand containers better. - if port.Port == 10250 { - portPath := specPath.Child("ports").Index(ix) - allErrs = append(allErrs, field.Invalid(portPath, port.Port, "may not expose port 10250 externally since it is used by kubelet")) - } - } - if service.Spec.ClusterIP == "None" { - allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "may not be set to 'None' for LoadBalancer services")) - } - case api.ServiceTypeNodePort: - if service.Spec.ClusterIP == "None" { - allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "may not be set to 'None' for NodePort services")) - } - case api.ServiceTypeExternalName: - if service.Spec.ClusterIP != "" { - allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "must be empty for ExternalName services")) - } - if len(service.Spec.ExternalName) > 0 { - allErrs = append(allErrs, ValidateDNS1123Subdomain(service.Spec.ExternalName, specPath.Child("externalName"))...) - } else { - allErrs = append(allErrs, field.Required(specPath.Child("externalName"), "")) - } - } - - allPortNames := sets.String{} - portsPath := specPath.Child("ports") - for i := range service.Spec.Ports { - portPath := portsPath.Index(i) - allErrs = append(allErrs, validateServicePort(&service.Spec.Ports[i], len(service.Spec.Ports) > 1, isHeadlessService, &allPortNames, portPath)...) - } - - if service.Spec.Selector != nil { - allErrs = append(allErrs, unversionedvalidation.ValidateLabels(service.Spec.Selector, specPath.Child("selector"))...) - } - - if len(service.Spec.SessionAffinity) == 0 { - allErrs = append(allErrs, field.Required(specPath.Child("sessionAffinity"), "")) - } else if !supportedSessionAffinityType.Has(string(service.Spec.SessionAffinity)) { - allErrs = append(allErrs, field.NotSupported(specPath.Child("sessionAffinity"), service.Spec.SessionAffinity, supportedSessionAffinityType.List())) - } - - if service.Spec.SessionAffinity == api.ServiceAffinityClientIP { - allErrs = append(allErrs, validateClientIPAffinityConfig(service.Spec.SessionAffinityConfig, specPath.Child("sessionAffinityConfig"))...) - } else if service.Spec.SessionAffinity == api.ServiceAffinityNone { - if service.Spec.SessionAffinityConfig != nil { - allErrs = append(allErrs, field.Forbidden(specPath.Child("sessionAffinityConfig"), fmt.Sprintf("must not be set when session affinity is %s", string(api.ServiceAffinityNone)))) - } - } - - if helper.IsServiceIPSet(service) { - if ip := net.ParseIP(service.Spec.ClusterIP); ip == nil { - allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "must be empty, 'None', or a valid IP address")) - } - } - - ipPath := specPath.Child("externalIPs") - for i, ip := range service.Spec.ExternalIPs { - idxPath := ipPath.Index(i) - if msgs := validation.IsValidIP(ip); len(msgs) != 0 { - for i := range msgs { - allErrs = append(allErrs, field.Invalid(idxPath, ip, msgs[i])) - } - } else { - allErrs = append(allErrs, validateNonSpecialIP(ip, idxPath)...) - } - } - - if len(service.Spec.Type) == 0 { - allErrs = append(allErrs, field.Required(specPath.Child("type"), "")) - } else if !supportedServiceType.Has(string(service.Spec.Type)) { - allErrs = append(allErrs, field.NotSupported(specPath.Child("type"), service.Spec.Type, supportedServiceType.List())) - } - - if service.Spec.Type == api.ServiceTypeLoadBalancer { - portsPath := specPath.Child("ports") - includeProtocols := sets.NewString() - for i := range service.Spec.Ports { - portPath := portsPath.Index(i) - if !supportedPortProtocols.Has(string(service.Spec.Ports[i].Protocol)) { - allErrs = append(allErrs, field.Invalid(portPath.Child("protocol"), service.Spec.Ports[i].Protocol, "cannot create an external load balancer with non-TCP/UDP ports")) - } else { - includeProtocols.Insert(string(service.Spec.Ports[i].Protocol)) - } - } - if includeProtocols.Len() > 1 { - allErrs = append(allErrs, field.Invalid(portsPath, service.Spec.Ports, "cannot create an external load balancer with mix protocols")) - } - } - - if service.Spec.Type == api.ServiceTypeClusterIP { - portsPath := specPath.Child("ports") - for i := range service.Spec.Ports { - portPath := portsPath.Index(i) - if service.Spec.Ports[i].NodePort != 0 { - allErrs = append(allErrs, field.Invalid(portPath.Child("nodePort"), service.Spec.Ports[i].NodePort, "may not be used when `type` is 'ClusterIP'")) - } - } - } - - // Check for duplicate NodePorts, considering (protocol,port) pairs - portsPath = specPath.Child("ports") - nodePorts := make(map[api.ServicePort]bool) - for i := range service.Spec.Ports { - port := &service.Spec.Ports[i] - if port.NodePort == 0 { - continue - } - portPath := portsPath.Index(i) - var key api.ServicePort - key.Protocol = port.Protocol - key.NodePort = port.NodePort - _, found := nodePorts[key] - if found { - allErrs = append(allErrs, field.Duplicate(portPath.Child("nodePort"), port.NodePort)) - } - nodePorts[key] = true - } - - // Check for duplicate Ports, considering (protocol,port) pairs - portsPath = specPath.Child("ports") - ports := make(map[api.ServicePort]bool) - for i, port := range service.Spec.Ports { - portPath := portsPath.Index(i) - key := api.ServicePort{Protocol: port.Protocol, Port: port.Port} - _, found := ports[key] - if found { - allErrs = append(allErrs, field.Duplicate(portPath, key)) - } - ports[key] = true - } - - // Check for duplicate TargetPort - portsPath = specPath.Child("ports") - targetPorts := make(map[api.ServicePort]bool) - for i, port := range service.Spec.Ports { - if (port.TargetPort.Type == intstr.Int && port.TargetPort.IntVal == 0) || (port.TargetPort.Type == intstr.String && port.TargetPort.StrVal == "") { - continue - } - portPath := portsPath.Index(i) - key := api.ServicePort{Protocol: port.Protocol, TargetPort: port.TargetPort} - _, found := targetPorts[key] - if found { - allErrs = append(allErrs, field.Duplicate(portPath.Child("targetPort"), port.TargetPort)) - } - targetPorts[key] = true - } - - // Validate SourceRange field and annotation - _, ok := service.Annotations[api.AnnotationLoadBalancerSourceRangesKey] - if len(service.Spec.LoadBalancerSourceRanges) > 0 || ok { - var fieldPath *field.Path - var val string - if len(service.Spec.LoadBalancerSourceRanges) > 0 { - fieldPath = specPath.Child("LoadBalancerSourceRanges") - val = fmt.Sprintf("%v", service.Spec.LoadBalancerSourceRanges) - } else { - fieldPath = field.NewPath("metadata", "annotations").Key(api.AnnotationLoadBalancerSourceRangesKey) - val = service.Annotations[api.AnnotationLoadBalancerSourceRangesKey] - } - if service.Spec.Type != api.ServiceTypeLoadBalancer { - allErrs = append(allErrs, field.Invalid(fieldPath, "", "may only be used when `type` is 'LoadBalancer'")) - } - _, err := apiservice.GetLoadBalancerSourceRanges(service) - if err != nil { - allErrs = append(allErrs, field.Invalid(fieldPath, val, "must be a list of IP ranges. For example, 10.240.0.0/24,10.250.0.0/24 ")) - } - } - - allErrs = append(allErrs, validateServiceExternalTrafficFieldsValue(service)...) - - return allErrs -} - -func validateServicePort(sp *api.ServicePort, requireName, isHeadlessService bool, allNames *sets.String, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if requireName && len(sp.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } else if len(sp.Name) != 0 { - allErrs = append(allErrs, ValidateDNS1123Label(sp.Name, fldPath.Child("name"))...) - if allNames.Has(sp.Name) { - allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), sp.Name)) - } else { - allNames.Insert(sp.Name) - } - } - - for _, msg := range validation.IsValidPortNum(int(sp.Port)) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), sp.Port, msg)) - } - - if len(sp.Protocol) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), "")) - } else if !supportedPortProtocols.Has(string(sp.Protocol)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), sp.Protocol, supportedPortProtocols.List())) - } - - allErrs = append(allErrs, ValidatePortNumOrName(sp.TargetPort, fldPath.Child("targetPort"))...) - - // in the v1 API, targetPorts on headless services were tolerated. - // once we have version-specific validation, we can reject this on newer API versions, but until then, we have to tolerate it for compatibility. - // - // if isHeadlessService { - // if sp.TargetPort.Type == intstr.String || (sp.TargetPort.Type == intstr.Int && sp.Port != sp.TargetPort.IntValue()) { - // allErrs = append(allErrs, field.Invalid(fldPath.Child("targetPort"), sp.TargetPort, "must be equal to the value of 'port' when clusterIP = None")) - // } - // } - - return allErrs -} - -// validateServiceExternalTrafficFieldsValue validates ExternalTraffic related annotations -// have legal value. -func validateServiceExternalTrafficFieldsValue(service *api.Service) field.ErrorList { - allErrs := field.ErrorList{} - - // Check first class fields. - if service.Spec.ExternalTrafficPolicy != "" && - service.Spec.ExternalTrafficPolicy != api.ServiceExternalTrafficPolicyTypeCluster && - service.Spec.ExternalTrafficPolicy != api.ServiceExternalTrafficPolicyTypeLocal { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy, - fmt.Sprintf("ExternalTrafficPolicy must be empty, %v or %v", api.ServiceExternalTrafficPolicyTypeCluster, api.ServiceExternalTrafficPolicyTypeLocal))) - } - if service.Spec.HealthCheckNodePort < 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("healthCheckNodePort"), service.Spec.HealthCheckNodePort, - "HealthCheckNodePort must be not less than 0")) - } - - return allErrs -} - -// ValidateServiceExternalTrafficFieldsCombination validates if ExternalTrafficPolicy, -// HealthCheckNodePort and Type combination are legal. For update, it should be called -// after clearing externalTraffic related fields for the ease of transitioning between -// different service types. -func ValidateServiceExternalTrafficFieldsCombination(service *api.Service) field.ErrorList { - allErrs := field.ErrorList{} - - if service.Spec.Type != api.ServiceTypeLoadBalancer && - service.Spec.Type != api.ServiceTypeNodePort && - service.Spec.ExternalTrafficPolicy != "" { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy, - "ExternalTrafficPolicy can only be set on NodePort and LoadBalancer service")) - } - - if !apiservice.NeedsHealthCheck(service) && - service.Spec.HealthCheckNodePort != 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "healthCheckNodePort"), service.Spec.HealthCheckNodePort, - "HealthCheckNodePort can only be set on LoadBalancer service with ExternalTrafficPolicy=Local")) - } - - return allErrs -} - -// ValidateServiceUpdate tests if required fields in the service are set during an update -func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata")) - - // ClusterIP should be immutable for services using it (every type other than ExternalName) - // which do not have ClusterIP assigned yet (empty string value) - if service.Spec.Type != api.ServiceTypeExternalName { - if oldService.Spec.Type != api.ServiceTypeExternalName && oldService.Spec.ClusterIP != "" { - allErrs = append(allErrs, ValidateImmutableField(service.Spec.ClusterIP, oldService.Spec.ClusterIP, field.NewPath("spec", "clusterIP"))...) - } - } - - allErrs = append(allErrs, ValidateService(service)...) - return allErrs -} - -// ValidateServiceStatusUpdate tests if required fields in the Service are set when updating status. -func ValidateServiceStatusUpdate(service, oldService *api.Service) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateLoadBalancerStatus(&service.Status.LoadBalancer, field.NewPath("status", "loadBalancer"))...) - return allErrs -} - -// ValidateReplicationController tests if required fields in the replication controller are set. -func ValidateReplicationController(controller *api.ReplicationController) field.ErrorList { - allErrs := ValidateObjectMeta(&controller.ObjectMeta, true, ValidateReplicationControllerName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateReplicationControllerUpdate tests if required fields in the replication controller are set. -func ValidateReplicationControllerUpdate(controller, oldController *api.ReplicationController) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateReplicationControllerStatusUpdate tests if required fields in the replication controller are set. -func ValidateReplicationControllerStatusUpdate(controller, oldController *api.ReplicationController) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateReplicationControllerStatus(controller.Status, field.NewPath("status"))...) - return allErrs -} - -func ValidateReplicationControllerStatus(status api.ReplicationControllerStatus, statusPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidateNonnegativeField(int64(status.Replicas), statusPath.Child("replicas"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(status.FullyLabeledReplicas), statusPath.Child("fullyLabeledReplicas"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(status.ReadyReplicas), statusPath.Child("readyReplicas"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(status.AvailableReplicas), statusPath.Child("availableReplicas"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(status.ObservedGeneration), statusPath.Child("observedGeneration"))...) - msg := "cannot be greater than status.replicas" - if status.FullyLabeledReplicas > status.Replicas { - allErrs = append(allErrs, field.Invalid(statusPath.Child("fullyLabeledReplicas"), status.FullyLabeledReplicas, msg)) - } - if status.ReadyReplicas > status.Replicas { - allErrs = append(allErrs, field.Invalid(statusPath.Child("readyReplicas"), status.ReadyReplicas, msg)) - } - if status.AvailableReplicas > status.Replicas { - allErrs = append(allErrs, field.Invalid(statusPath.Child("availableReplicas"), status.AvailableReplicas, msg)) - } - if status.AvailableReplicas > status.ReadyReplicas { - allErrs = append(allErrs, field.Invalid(statusPath.Child("availableReplicas"), status.AvailableReplicas, "cannot be greater than readyReplicas")) - } - return allErrs -} - -// Validates that the given selector is non-empty. -func ValidateNonEmptySelector(selectorMap map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - selector := labels.Set(selectorMap).AsSelector() - if selector.Empty() { - allErrs = append(allErrs, field.Required(fldPath, "")) - } - return allErrs -} - -// Validates the given template and ensures that it is in accordance with the desired selector and replicas. -func ValidatePodTemplateSpecForRC(template *api.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if template == nil { - allErrs = append(allErrs, field.Required(fldPath, "")) - } else { - selector := labels.Set(selectorMap).AsSelector() - if !selector.Empty() { - // Verify that the RC selector matches the labels in template. - labels := labels.Set(template.Labels) - if !selector.Matches(labels) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`")) - } - } - allErrs = append(allErrs, ValidatePodTemplateSpec(template, fldPath)...) - if replicas > 1 { - allErrs = append(allErrs, ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...) - } - // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). - if template.Spec.RestartPolicy != api.RestartPolicyAlways { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) - } - if template.Spec.ActiveDeadlineSeconds != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("spec", "activeDeadlineSeconds"), template.Spec.ActiveDeadlineSeconds, "must not be specified")) - } - } - return allErrs -} - -// ValidateReplicationControllerSpec tests if required fields in the replication controller spec are set. -func ValidateReplicationControllerSpec(spec *api.ReplicationControllerSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) - allErrs = append(allErrs, ValidateNonEmptySelector(spec.Selector, fldPath.Child("selector"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) - allErrs = append(allErrs, ValidatePodTemplateSpecForRC(spec.Template, spec.Selector, spec.Replicas, fldPath.Child("template"))...) - return allErrs -} - -// ValidatePodTemplateSpec validates the spec of a pod template -func ValidatePodTemplateSpec(spec *api.PodTemplateSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.Labels, fldPath.Child("labels"))...) - allErrs = append(allErrs, ValidateAnnotations(spec.Annotations, fldPath.Child("annotations"))...) - allErrs = append(allErrs, ValidatePodSpecificAnnotations(spec.Annotations, &spec.Spec, fldPath.Child("annotations"))...) - allErrs = append(allErrs, ValidatePodSpec(&spec.Spec, fldPath.Child("spec"))...) - return allErrs -} - -func ValidateReadOnlyPersistentDisks(volumes []api.Volume, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for i := range volumes { - vol := &volumes[i] - idxPath := fldPath.Index(i) - if vol.GCEPersistentDisk != nil { - if vol.GCEPersistentDisk.ReadOnly == false { - allErrs = append(allErrs, field.Invalid(idxPath.Child("gcePersistentDisk", "readOnly"), false, "must be true for replicated pods > 1; GCE PD can only be mounted on multiple machines if it is read-only")) - } - } - // TODO: What to do for AWS? It doesn't support replicas - } - return allErrs -} - -// ValidateTaintsInNodeAnnotations tests that the serialized taints in Node.Annotations has valid data -func ValidateTaintsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - taints, err := helper.GetTaintsFromNodeAnnotations(annotations) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath, api.TaintsAnnotationKey, err.Error())) - return allErrs - } - - if len(taints) > 0 { - allErrs = append(allErrs, validateNodeTaints(taints, fldPath.Child(api.TaintsAnnotationKey))...) - } - - return allErrs -} - -// validateNodeTaints tests if given taints have valid data. -func validateNodeTaints(taints []api.Taint, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - - uniqueTaints := map[api.TaintEffect]sets.String{} - - for i, currTaint := range taints { - idxPath := fldPath.Index(i) - // validate the taint key - allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(currTaint.Key, idxPath.Child("key"))...) - // validate the taint value - if errs := validation.IsValidLabelValue(currTaint.Value); len(errs) != 0 { - allErrors = append(allErrors, field.Invalid(idxPath.Child("value"), currTaint.Value, strings.Join(errs, ";"))) - } - // validate the taint effect - allErrors = append(allErrors, validateTaintEffect(&currTaint.Effect, false, idxPath.Child("effect"))...) - - // validate if taint is unique by - if len(uniqueTaints[currTaint.Effect]) > 0 && uniqueTaints[currTaint.Effect].Has(currTaint.Key) { - duplicatedError := field.Duplicate(idxPath, currTaint) - duplicatedError.Detail = "taints must be unique by key and effect pair" - allErrors = append(allErrors, duplicatedError) - continue - } - - // add taint to existingTaints for uniqueness check - if len(uniqueTaints[currTaint.Effect]) == 0 { - uniqueTaints[currTaint.Effect] = sets.String{} - } - uniqueTaints[currTaint.Effect].Insert(currTaint.Key) - } - return allErrors -} - -func ValidateNodeSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if annotations[api.TaintsAnnotationKey] != "" { - allErrs = append(allErrs, ValidateTaintsInNodeAnnotations(annotations, fldPath)...) - } - - if annotations[api.PreferAvoidPodsAnnotationKey] != "" { - allErrs = append(allErrs, ValidateAvoidPodsInNodeAnnotations(annotations, fldPath)...) - } - return allErrs -} - -// ValidateNode tests if required fields in the node are set. -func ValidateNode(node *api.Node) field.ErrorList { - fldPath := field.NewPath("metadata") - allErrs := ValidateObjectMeta(&node.ObjectMeta, false, ValidateNodeName, fldPath) - allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) - if len(node.Spec.Taints) > 0 { - allErrs = append(allErrs, validateNodeTaints(node.Spec.Taints, fldPath.Child("taints"))...) - } - - // Only validate spec. - // All status fields are optional and can be updated later. - // That said, if specified, we need to ensure they are valid. - allErrs = append(allErrs, ValidateNodeResources(node)...) - - // external ID is required. - if len(node.Spec.ExternalID) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("spec", "externalID"), "")) - } - - // Only allow Node.Spec.ConfigSource to be set if the DynamicKubeletConfig feature gate is enabled - if node.Spec.ConfigSource != nil && !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { - allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "configSource"), "configSource may only be set if the DynamicKubeletConfig feature gate is enabled)")) - } - - // TODO(rjnagal): Ignore PodCIDR till its completely implemented. - return allErrs -} - -// ValidateNodeResources is used to make sure a node has valid capacity and allocatable values. -func ValidateNodeResources(node *api.Node) field.ErrorList { - allErrs := field.ErrorList{} - // Validate resource quantities in capacity. - hugePageSizes := sets.NewString() - for k, v := range node.Status.Capacity { - resPath := field.NewPath("status", "capacity", string(k)) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) - // track any huge page size that has a positive value - if helper.IsHugePageResourceName(k) && v.Value() > int64(0) { - hugePageSizes.Insert(string(k)) - } - if len(hugePageSizes) > 1 { - allErrs = append(allErrs, field.Invalid(resPath, v, "may not have pre-allocated hugepages for multiple page sizes")) - } - } - // Validate resource quantities in allocatable. - hugePageSizes = sets.NewString() - for k, v := range node.Status.Allocatable { - resPath := field.NewPath("status", "allocatable", string(k)) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) - // track any huge page size that has a positive value - if helper.IsHugePageResourceName(k) && v.Value() > int64(0) { - hugePageSizes.Insert(string(k)) - } - if len(hugePageSizes) > 1 { - allErrs = append(allErrs, field.Invalid(resPath, v, "may not have pre-allocated hugepages for multiple page sizes")) - } - } - return allErrs -} - -// ValidateNodeUpdate tests to make sure a node update can be applied. Modifies oldNode. -func ValidateNodeUpdate(node, oldNode *api.Node) field.ErrorList { - fldPath := field.NewPath("metadata") - allErrs := ValidateObjectMetaUpdate(&node.ObjectMeta, &oldNode.ObjectMeta, fldPath) - allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) - - // TODO: Enable the code once we have better api object.status update model. Currently, - // anyone can update node status. - // if !apiequality.Semantic.DeepEqual(node.Status, api.NodeStatus{}) { - // allErrs = append(allErrs, field.Invalid("status", node.Status, "must be empty")) - // } - - allErrs = append(allErrs, ValidateNodeResources(node)...) - - // Validate no duplicate addresses in node status. - addresses := make(map[api.NodeAddress]bool) - for i, address := range node.Status.Addresses { - if _, ok := addresses[address]; ok { - allErrs = append(allErrs, field.Duplicate(field.NewPath("status", "addresses").Index(i), address)) - } - addresses[address] = true - } - - if len(oldNode.Spec.PodCIDR) == 0 { - // Allow the controller manager to assign a CIDR to a node if it doesn't have one. - oldNode.Spec.PodCIDR = node.Spec.PodCIDR - } else { - if oldNode.Spec.PodCIDR != node.Spec.PodCIDR { - allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "podCIDR"), "node updates may not change podCIDR except from \"\" to valid")) - } - } - - // Allow controller manager updating provider ID when not set - if len(oldNode.Spec.ProviderID) == 0 { - oldNode.Spec.ProviderID = node.Spec.ProviderID - } else { - if oldNode.Spec.ProviderID != node.Spec.ProviderID { - allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "providerID"), "node updates may not change providerID except from \"\" to valid")) - } - } - - // TODO: move reset function to its own location - // Ignore metadata changes now that they have been tested - oldNode.ObjectMeta = node.ObjectMeta - // Allow users to update capacity - oldNode.Status.Capacity = node.Status.Capacity - // Allow users to unschedule node - oldNode.Spec.Unschedulable = node.Spec.Unschedulable - // Clear status - oldNode.Status = node.Status - - // update taints - if len(node.Spec.Taints) > 0 { - allErrs = append(allErrs, validateNodeTaints(node.Spec.Taints, fldPath.Child("taints"))...) - } - oldNode.Spec.Taints = node.Spec.Taints - - // Allow updates to Node.Spec.ConfigSource if DynamicKubeletConfig feature gate is enabled - if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { - oldNode.Spec.ConfigSource = node.Spec.ConfigSource - } - - // We made allowed changes to oldNode, and now we compare oldNode to node. Any remaining differences indicate changes to protected fields. - // TODO: Add a 'real' error type for this error and provide print actual diffs. - if !apiequality.Semantic.DeepEqual(oldNode, node) { - glog.V(4).Infof("Update failed validation %#v vs %#v", oldNode, node) - allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "node updates may only change labels, taints, or capacity (or configSource, if the DynamicKubeletConfig feature gate is enabled)")) - } - - return allErrs -} - -// Validate compute resource typename. -// Refer to docs/design/resources.md for more details. -func validateResourceName(value string, fldPath *field.Path) field.ErrorList { - // Opaque integer resources (OIR) deprecation began in v1.8 - // TODO: Remove warning after OIR deprecation cycle. - if helper.IsOpaqueIntResourceName(api.ResourceName(value)) { - glog.Errorf("DEPRECATION WARNING! Opaque integer resources are deprecated starting with v1.8: %s", value) - } - - allErrs := field.ErrorList{} - for _, msg := range validation.IsQualifiedName(value) { - allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) - } - if len(allErrs) != 0 { - return allErrs - } - - if len(strings.Split(value, "/")) == 1 { - if !helper.IsStandardResourceName(value) { - return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource type or fully qualified")) - } - } - - return allErrs -} - -// Validate container resource name -// Refer to docs/design/resources.md for more details. -func validateContainerResourceName(value string, fldPath *field.Path) field.ErrorList { - allErrs := validateResourceName(value, fldPath) - - if len(strings.Split(value, "/")) == 1 { - if !helper.IsStandardContainerResourceName(value) { - return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource for containers")) - } - } - return allErrs -} - -// isLocalStorageResource checks whether the resource is local ephemeral storage -func isLocalStorageResource(name string) bool { - if name == string(api.ResourceEphemeralStorage) || name == string(api.ResourceRequestsEphemeralStorage) || - name == string(api.ResourceLimitsEphemeralStorage) { - return true - } else { - return false - } -} - -// Validate resource names that can go in a resource quota -// Refer to docs/design/resources.md for more details. -func ValidateResourceQuotaResourceName(value string, fldPath *field.Path) field.ErrorList { - allErrs := validateResourceName(value, fldPath) - if isLocalStorageResource(value) && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { - return append(allErrs, field.Forbidden(fldPath, "ResourceEphemeralStorage field disabled by feature-gate for ResourceQuota")) - } - if len(strings.Split(value, "/")) == 1 { - if !helper.IsStandardQuotaResourceName(value) { - return append(allErrs, field.Invalid(fldPath, value, isInvalidQuotaResource)) - } - } - return allErrs -} - -// Validate limit range types -func validateLimitRangeTypeName(value string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for _, msg := range validation.IsQualifiedName(value) { - allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) - } - if len(allErrs) != 0 { - return allErrs - } - - if len(strings.Split(value, "/")) == 1 { - if !helper.IsStandardLimitRangeType(value) { - return append(allErrs, field.Invalid(fldPath, value, "must be a standard limit type or fully qualified")) - } - } - - return allErrs -} - -// Validate limit range resource name -// limit types (other than Pod/Container) could contain storage not just cpu or memory -func validateLimitRangeResourceName(limitType api.LimitType, value string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if value == string(api.ResourceEphemeralStorage) && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { - return append(allErrs, field.Forbidden(fldPath, "ResourceEphemeralStorage field disabled by feature-gate for Resource LimitRange")) - } - switch limitType { - case api.LimitTypePod, api.LimitTypeContainer: - return validateContainerResourceName(value, fldPath) - default: - return validateResourceName(value, fldPath) - } -} - -// ValidateLimitRange tests if required fields in the LimitRange are set. -func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList { - allErrs := ValidateObjectMeta(&limitRange.ObjectMeta, true, ValidateLimitRangeName, field.NewPath("metadata")) - - // ensure resource names are properly qualified per docs/design/resources.md - limitTypeSet := map[api.LimitType]bool{} - fldPath := field.NewPath("spec", "limits") - for i := range limitRange.Spec.Limits { - idxPath := fldPath.Index(i) - limit := &limitRange.Spec.Limits[i] - allErrs = append(allErrs, validateLimitRangeTypeName(string(limit.Type), idxPath.Child("type"))...) - - _, found := limitTypeSet[limit.Type] - if found { - allErrs = append(allErrs, field.Duplicate(idxPath.Child("type"), limit.Type)) - } - limitTypeSet[limit.Type] = true - - keys := sets.String{} - min := map[string]resource.Quantity{} - max := map[string]resource.Quantity{} - defaults := map[string]resource.Quantity{} - defaultRequests := map[string]resource.Quantity{} - maxLimitRequestRatios := map[string]resource.Quantity{} - - for k, q := range limit.Max { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("max").Key(string(k)))...) - keys.Insert(string(k)) - max[string(k)] = q - } - for k, q := range limit.Min { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("min").Key(string(k)))...) - keys.Insert(string(k)) - min[string(k)] = q - } - - if limit.Type == api.LimitTypePod { - if len(limit.Default) > 0 { - allErrs = append(allErrs, field.Forbidden(idxPath.Child("default"), "may not be specified when `type` is 'Pod'")) - } - if len(limit.DefaultRequest) > 0 { - allErrs = append(allErrs, field.Forbidden(idxPath.Child("defaultRequest"), "may not be specified when `type` is 'Pod'")) - } - } else { - for k, q := range limit.Default { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("default").Key(string(k)))...) - keys.Insert(string(k)) - defaults[string(k)] = q - } - for k, q := range limit.DefaultRequest { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("defaultRequest").Key(string(k)))...) - keys.Insert(string(k)) - defaultRequests[string(k)] = q - } - } - - if limit.Type == api.LimitTypePersistentVolumeClaim { - _, minQuantityFound := limit.Min[api.ResourceStorage] - _, maxQuantityFound := limit.Max[api.ResourceStorage] - if !minQuantityFound && !maxQuantityFound { - allErrs = append(allErrs, field.Required(idxPath.Child("limits"), "either minimum or maximum storage value is required, but neither was provided")) - } - } - - for k, q := range limit.MaxLimitRequestRatio { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("maxLimitRequestRatio").Key(string(k)))...) - keys.Insert(string(k)) - maxLimitRequestRatios[string(k)] = q - } - - for k := range keys { - minQuantity, minQuantityFound := min[k] - maxQuantity, maxQuantityFound := max[k] - defaultQuantity, defaultQuantityFound := defaults[k] - defaultRequestQuantity, defaultRequestQuantityFound := defaultRequests[k] - maxRatio, maxRatioFound := maxLimitRequestRatios[k] - - if minQuantityFound && maxQuantityFound && minQuantity.Cmp(maxQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("min").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than max value %s", minQuantity.String(), maxQuantity.String()))) - } - - if defaultRequestQuantityFound && minQuantityFound && minQuantity.Cmp(defaultRequestQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("min value %s is greater than default request value %s", minQuantity.String(), defaultRequestQuantity.String()))) - } - - if defaultRequestQuantityFound && maxQuantityFound && defaultRequestQuantity.Cmp(maxQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than max value %s", defaultRequestQuantity.String(), maxQuantity.String()))) - } - - if defaultRequestQuantityFound && defaultQuantityFound && defaultRequestQuantity.Cmp(defaultQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than default limit value %s", defaultRequestQuantity.String(), defaultQuantity.String()))) - } - - if defaultQuantityFound && minQuantityFound && minQuantity.Cmp(defaultQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than default value %s", minQuantity.String(), defaultQuantity.String()))) - } - - if defaultQuantityFound && maxQuantityFound && defaultQuantity.Cmp(maxQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), maxQuantity, fmt.Sprintf("default value %s is greater than max value %s", defaultQuantity.String(), maxQuantity.String()))) - } - if maxRatioFound && maxRatio.Cmp(*resource.NewQuantity(1, resource.DecimalSI)) < 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is less than 1", maxRatio.String()))) - } - if maxRatioFound && minQuantityFound && maxQuantityFound { - maxRatioValue := float64(maxRatio.Value()) - minQuantityValue := minQuantity.Value() - maxQuantityValue := maxQuantity.Value() - if maxRatio.Value() < resource.MaxMilliValue && minQuantityValue < resource.MaxMilliValue && maxQuantityValue < resource.MaxMilliValue { - maxRatioValue = float64(maxRatio.MilliValue()) / 1000 - minQuantityValue = minQuantity.MilliValue() - maxQuantityValue = maxQuantity.MilliValue() - } - maxRatioLimit := float64(maxQuantityValue) / float64(minQuantityValue) - if maxRatioValue > maxRatioLimit { - allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is greater than max/min = %f", maxRatio.String(), maxRatioLimit))) - } - } - } - } - - return allErrs -} - -// ValidateServiceAccount tests if required fields in the ServiceAccount are set. -func ValidateServiceAccount(serviceAccount *api.ServiceAccount) field.ErrorList { - allErrs := ValidateObjectMeta(&serviceAccount.ObjectMeta, true, ValidateServiceAccountName, field.NewPath("metadata")) - return allErrs -} - -// ValidateServiceAccountUpdate tests if required fields in the ServiceAccount are set. -func ValidateServiceAccountUpdate(newServiceAccount, oldServiceAccount *api.ServiceAccount) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newServiceAccount.ObjectMeta, &oldServiceAccount.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateServiceAccount(newServiceAccount)...) - return allErrs -} - -// ValidateSecret tests if required fields in the Secret are set. -func ValidateSecret(secret *api.Secret) field.ErrorList { - allErrs := ValidateObjectMeta(&secret.ObjectMeta, true, ValidateSecretName, field.NewPath("metadata")) - - dataPath := field.NewPath("data") - totalSize := 0 - for key, value := range secret.Data { - for _, msg := range validation.IsConfigMapKey(key) { - allErrs = append(allErrs, field.Invalid(dataPath.Key(key), key, msg)) - } - totalSize += len(value) - } - if totalSize > api.MaxSecretSize { - allErrs = append(allErrs, field.TooLong(dataPath, "", api.MaxSecretSize)) - } - - switch secret.Type { - case api.SecretTypeServiceAccountToken: - // Only require Annotations[kubernetes.io/service-account.name] - // Additional fields (like Annotations[kubernetes.io/service-account.uid] and Data[token]) might be contributed later by a controller loop - if value := secret.Annotations[api.ServiceAccountNameKey]; len(value) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("metadata", "annotations").Key(api.ServiceAccountNameKey), "")) - } - case api.SecretTypeOpaque, "": - // no-op - case api.SecretTypeDockercfg: - dockercfgBytes, exists := secret.Data[api.DockerConfigKey] - if !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.DockerConfigKey), "")) - break - } - - // make sure that the content is well-formed json. - if err := json.Unmarshal(dockercfgBytes, &map[string]interface{}{}); err != nil { - allErrs = append(allErrs, field.Invalid(dataPath.Key(api.DockerConfigKey), "", err.Error())) - } - case api.SecretTypeDockerConfigJson: - dockerConfigJsonBytes, exists := secret.Data[api.DockerConfigJsonKey] - if !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.DockerConfigJsonKey), "")) - break - } - - // make sure that the content is well-formed json. - if err := json.Unmarshal(dockerConfigJsonBytes, &map[string]interface{}{}); err != nil { - allErrs = append(allErrs, field.Invalid(dataPath.Key(api.DockerConfigJsonKey), "", err.Error())) - } - case api.SecretTypeBasicAuth: - _, usernameFieldExists := secret.Data[api.BasicAuthUsernameKey] - _, passwordFieldExists := secret.Data[api.BasicAuthPasswordKey] - - // username or password might be empty, but the field must be present - if !usernameFieldExists && !passwordFieldExists { - allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.BasicAuthUsernameKey), "")) - allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.BasicAuthPasswordKey), "")) - break - } - case api.SecretTypeSSHAuth: - if len(secret.Data[api.SSHAuthPrivateKey]) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.SSHAuthPrivateKey), "")) - break - } - - case api.SecretTypeTLS: - if _, exists := secret.Data[api.TLSCertKey]; !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.TLSCertKey), "")) - } - if _, exists := secret.Data[api.TLSPrivateKeyKey]; !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.TLSPrivateKeyKey), "")) - } - // TODO: Verify that the key matches the cert. - default: - // no-op - } - - return allErrs -} - -// ValidateSecretUpdate tests if required fields in the Secret are set. -func ValidateSecretUpdate(newSecret, oldSecret *api.Secret) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newSecret.ObjectMeta, &oldSecret.ObjectMeta, field.NewPath("metadata")) - - if len(newSecret.Type) == 0 { - newSecret.Type = oldSecret.Type - } - - allErrs = append(allErrs, ValidateImmutableField(newSecret.Type, oldSecret.Type, field.NewPath("type"))...) - - allErrs = append(allErrs, ValidateSecret(newSecret)...) - return allErrs -} - -// ValidateConfigMapName can be used to check whether the given ConfigMap name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateConfigMapName = NameIsDNSSubdomain - -// ValidateConfigMap tests whether required fields in the ConfigMap are set. -func ValidateConfigMap(cfg *api.ConfigMap) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidateObjectMeta(&cfg.ObjectMeta, true, ValidateConfigMapName, field.NewPath("metadata"))...) - - totalSize := 0 - - for key, value := range cfg.Data { - for _, msg := range validation.IsConfigMapKey(key) { - allErrs = append(allErrs, field.Invalid(field.NewPath("data").Key(key), key, msg)) - } - totalSize += len(value) - } - if totalSize > api.MaxSecretSize { - allErrs = append(allErrs, field.TooLong(field.NewPath("data"), "", api.MaxSecretSize)) - } - - return allErrs -} - -// ValidateConfigMapUpdate tests if required fields in the ConfigMap are set. -func ValidateConfigMapUpdate(newCfg, oldCfg *api.ConfigMap) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidateObjectMetaUpdate(&newCfg.ObjectMeta, &oldCfg.ObjectMeta, field.NewPath("metadata"))...) - allErrs = append(allErrs, ValidateConfigMap(newCfg)...) - - return allErrs -} - -func validateBasicResource(quantity resource.Quantity, fldPath *field.Path) field.ErrorList { - if quantity.Value() < 0 { - return field.ErrorList{field.Invalid(fldPath, quantity.Value(), "must be a valid resource quantity")} - } - return field.ErrorList{} -} - -// Validates resource requirement spec. -func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - limPath := fldPath.Child("limits") - reqPath := fldPath.Child("requests") - for resourceName, quantity := range requirements.Limits { - fldPath := limPath.Key(string(resourceName)) - // Validate resource name. - allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) - - // Validate resource quantity. - allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) - - if resourceName == api.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { - allErrs = append(allErrs, field.Forbidden(limPath, "ResourceEphemeralStorage field disabled by feature-gate for ResourceRequirements")) - } - if helper.IsHugePageResourceName(resourceName) && !utilfeature.DefaultFeatureGate.Enabled(features.HugePages) { - allErrs = append(allErrs, field.Forbidden(limPath, fmt.Sprintf("%s field disabled by feature-gate for ResourceRequirements", resourceName))) - } - - } - for resourceName, quantity := range requirements.Requests { - fldPath := reqPath.Key(string(resourceName)) - // Validate resource name. - allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) - // Validate resource quantity. - allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) - - // Check that request <= limit. - limitQuantity, exists := requirements.Limits[resourceName] - if exists { - // For GPUs, not only requests can't exceed limits, they also can't be lower, i.e. must be equal. - if quantity.Cmp(limitQuantity) != 0 && !helper.IsOvercommitAllowed(resourceName) { - allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit", api.ResourceNvidiaGPU))) - } else if quantity.Cmp(limitQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit", resourceName))) - } - } else if resourceName == api.ResourceNvidiaGPU { - allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s request", api.ResourceNvidiaGPU))) - } - } - - return allErrs -} - -// validateResourceQuotaScopes ensures that each enumerated hard resource constraint is valid for set of scopes -func validateResourceQuotaScopes(resourceQuotaSpec *api.ResourceQuotaSpec, fld *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(resourceQuotaSpec.Scopes) == 0 { - return allErrs - } - hardLimits := sets.NewString() - for k := range resourceQuotaSpec.Hard { - hardLimits.Insert(string(k)) - } - fldPath := fld.Child("scopes") - scopeSet := sets.NewString() - for _, scope := range resourceQuotaSpec.Scopes { - if !helper.IsStandardResourceQuotaScope(string(scope)) { - allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope")) - } - for _, k := range hardLimits.List() { - if helper.IsStandardQuotaResourceName(k) && !helper.IsResourceQuotaScopeValidForResource(scope, k) { - allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope applied to resource")) - } - } - scopeSet.Insert(string(scope)) - } - invalidScopePairs := []sets.String{ - sets.NewString(string(api.ResourceQuotaScopeBestEffort), string(api.ResourceQuotaScopeNotBestEffort)), - sets.NewString(string(api.ResourceQuotaScopeTerminating), string(api.ResourceQuotaScopeNotTerminating)), - } - for _, invalidScopePair := range invalidScopePairs { - if scopeSet.HasAll(invalidScopePair.List()...) { - allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "conflicting scopes")) - } - } - return allErrs -} - -// ValidateResourceQuota tests if required fields in the ResourceQuota are set. -func ValidateResourceQuota(resourceQuota *api.ResourceQuota) field.ErrorList { - allErrs := ValidateObjectMeta(&resourceQuota.ObjectMeta, true, ValidateResourceQuotaName, field.NewPath("metadata")) - - allErrs = append(allErrs, ValidateResourceQuotaSpec(&resourceQuota.Spec, field.NewPath("spec"))...) - allErrs = append(allErrs, ValidateResourceQuotaStatus(&resourceQuota.Status, field.NewPath("status"))...) - - return allErrs -} - -func ValidateResourceQuotaStatus(status *api.ResourceQuotaStatus, fld *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - fldPath := fld.Child("hard") - for k, v := range status.Hard { - resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) - } - fldPath = fld.Child("used") - for k, v := range status.Used { - resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) - } - - return allErrs -} - -func ValidateResourceQuotaSpec(resourceQuotaSpec *api.ResourceQuotaSpec, fld *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - fldPath := fld.Child("hard") - for k, v := range resourceQuotaSpec.Hard { - resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) - } - allErrs = append(allErrs, validateResourceQuotaScopes(resourceQuotaSpec, fld)...) - - return allErrs -} - -// ValidateResourceQuantityValue enforces that specified quantity is valid for specified resource -func ValidateResourceQuantityValue(resource string, value resource.Quantity, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidateNonnegativeQuantity(value, fldPath)...) - if helper.IsIntegerResourceName(resource) { - if value.MilliValue()%int64(1000) != int64(0) { - allErrs = append(allErrs, field.Invalid(fldPath, value, isNotIntegerErrorMsg)) - } - } - return allErrs -} - -// ValidateResourceQuotaUpdate tests to see if the update is legal for an end user to make. -// newResourceQuota is updated with fields that cannot be changed. -func ValidateResourceQuotaUpdate(newResourceQuota, oldResourceQuota *api.ResourceQuota) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateResourceQuotaSpec(&newResourceQuota.Spec, field.NewPath("spec"))...) - - // ensure scopes cannot change, and that resources are still valid for scope - fldPath := field.NewPath("spec", "scopes") - oldScopes := sets.NewString() - newScopes := sets.NewString() - for _, scope := range newResourceQuota.Spec.Scopes { - newScopes.Insert(string(scope)) - } - for _, scope := range oldResourceQuota.Spec.Scopes { - oldScopes.Insert(string(scope)) - } - if !oldScopes.Equal(newScopes) { - allErrs = append(allErrs, field.Invalid(fldPath, newResourceQuota.Spec.Scopes, fieldImmutableErrorMsg)) - } - - newResourceQuota.Status = oldResourceQuota.Status - return allErrs -} - -// ValidateResourceQuotaStatusUpdate tests to see if the status update is legal for an end user to make. -// newResourceQuota is updated with fields that cannot be changed. -func ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota *api.ResourceQuota) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata")) - if len(newResourceQuota.ResourceVersion) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) - } - fldPath := field.NewPath("status", "hard") - for k, v := range newResourceQuota.Status.Hard { - resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) - } - fldPath = field.NewPath("status", "used") - for k, v := range newResourceQuota.Status.Used { - resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) - } - newResourceQuota.Spec = oldResourceQuota.Spec - return allErrs -} - -// ValidateNamespace tests if required fields are set. -func ValidateNamespace(namespace *api.Namespace) field.ErrorList { - allErrs := ValidateObjectMeta(&namespace.ObjectMeta, false, ValidateNamespaceName, field.NewPath("metadata")) - for i := range namespace.Spec.Finalizers { - allErrs = append(allErrs, validateFinalizerName(string(namespace.Spec.Finalizers[i]), field.NewPath("spec", "finalizers"))...) - } - return allErrs -} - -// Validate finalizer names -func validateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList { - allErrs := genericvalidation.ValidateFinalizerName(stringValue, fldPath) - for _, err := range validateKubeFinalizerName(stringValue, fldPath) { - allErrs = append(allErrs, err) - } - - return allErrs -} - -// validateKubeFinalizerName checks for "standard" names of legacy finalizer -func validateKubeFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(strings.Split(stringValue, "/")) == 1 { - if !helper.IsStandardFinalizerName(stringValue) { - return append(allErrs, field.Invalid(fldPath, stringValue, "name is neither a standard finalizer name nor is it fully qualified")) - } - } - - return allErrs -} - -// ValidateNamespaceUpdate tests to make sure a namespace update can be applied. -// newNamespace is updated with fields that cannot be changed -func ValidateNamespaceUpdate(newNamespace *api.Namespace, oldNamespace *api.Namespace) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) - newNamespace.Spec.Finalizers = oldNamespace.Spec.Finalizers - newNamespace.Status = oldNamespace.Status - return allErrs -} - -// ValidateNamespaceStatusUpdate tests to see if the update is legal for an end user to make. newNamespace is updated with fields -// that cannot be changed. -func ValidateNamespaceStatusUpdate(newNamespace, oldNamespace *api.Namespace) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) - newNamespace.Spec = oldNamespace.Spec - if newNamespace.DeletionTimestamp.IsZero() { - if newNamespace.Status.Phase != api.NamespaceActive { - allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Active' if `deletionTimestamp` is empty")) - } - } else { - if newNamespace.Status.Phase != api.NamespaceTerminating { - allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Terminating' if `deletionTimestamp` is not empty")) - } - } - return allErrs -} - -// ValidateNamespaceFinalizeUpdate tests to see if the update is legal for an end user to make. -// newNamespace is updated with fields that cannot be changed. -func ValidateNamespaceFinalizeUpdate(newNamespace, oldNamespace *api.Namespace) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) - - fldPath := field.NewPath("spec", "finalizers") - for i := range newNamespace.Spec.Finalizers { - idxPath := fldPath.Index(i) - allErrs = append(allErrs, validateFinalizerName(string(newNamespace.Spec.Finalizers[i]), idxPath)...) - } - newNamespace.Status = oldNamespace.Status - return allErrs -} - -// Construct lookup map of old subset IPs to NodeNames. -func updateEpAddrToNodeNameMap(ipToNodeName map[string]string, addresses []api.EndpointAddress) { - for n := range addresses { - if addresses[n].NodeName == nil { - continue - } - ipToNodeName[addresses[n].IP] = *addresses[n].NodeName - } -} - -// Build a map across all subsets of IP -> NodeName -func buildEndpointAddressNodeNameMap(subsets []api.EndpointSubset) map[string]string { - ipToNodeName := make(map[string]string) - for i := range subsets { - updateEpAddrToNodeNameMap(ipToNodeName, subsets[i].Addresses) - updateEpAddrToNodeNameMap(ipToNodeName, subsets[i].NotReadyAddresses) - } - return ipToNodeName -} - -func validateEpAddrNodeNameTransition(addr *api.EndpointAddress, ipToNodeName map[string]string, fldPath *field.Path) field.ErrorList { - errList := field.ErrorList{} - existingNodeName, found := ipToNodeName[addr.IP] - if !found { - return errList - } - if addr.NodeName == nil || *addr.NodeName == existingNodeName { - return errList - } - // NodeName entry found for this endpoint IP, but user is attempting to change NodeName - return append(errList, field.Forbidden(fldPath, fmt.Sprintf("Cannot change NodeName for %s to %s", addr.IP, *addr.NodeName))) -} - -// ValidateEndpoints tests if required fields are set. -func ValidateEndpoints(endpoints *api.Endpoints) field.ErrorList { - allErrs := ValidateObjectMeta(&endpoints.ObjectMeta, true, ValidateEndpointsName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(endpoints.Annotations, field.NewPath("annotations"))...) - allErrs = append(allErrs, validateEndpointSubsets(endpoints.Subsets, []api.EndpointSubset{}, field.NewPath("subsets"))...) - return allErrs -} - -func validateEndpointSubsets(subsets []api.EndpointSubset, oldSubsets []api.EndpointSubset, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - ipToNodeName := buildEndpointAddressNodeNameMap(oldSubsets) - for i := range subsets { - ss := &subsets[i] - idxPath := fldPath.Index(i) - - // EndpointSubsets must include endpoint address. For headless service, we allow its endpoints not to have ports. - if len(ss.Addresses) == 0 && len(ss.NotReadyAddresses) == 0 { - //TODO: consider adding a RequiredOneOf() error for this and similar cases - allErrs = append(allErrs, field.Required(idxPath, "must specify `addresses` or `notReadyAddresses`")) - } - for addr := range ss.Addresses { - allErrs = append(allErrs, validateEndpointAddress(&ss.Addresses[addr], idxPath.Child("addresses").Index(addr), ipToNodeName)...) - } - for addr := range ss.NotReadyAddresses { - allErrs = append(allErrs, validateEndpointAddress(&ss.NotReadyAddresses[addr], idxPath.Child("notReadyAddresses").Index(addr), ipToNodeName)...) - } - for port := range ss.Ports { - allErrs = append(allErrs, validateEndpointPort(&ss.Ports[port], len(ss.Ports) > 1, idxPath.Child("ports").Index(port))...) - } - } - - return allErrs -} - -func validateEndpointAddress(address *api.EndpointAddress, fldPath *field.Path, ipToNodeName map[string]string) field.ErrorList { - allErrs := field.ErrorList{} - for _, msg := range validation.IsValidIP(address.IP) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), address.IP, msg)) - } - if len(address.Hostname) > 0 { - allErrs = append(allErrs, ValidateDNS1123Label(address.Hostname, fldPath.Child("hostname"))...) - } - // During endpoint update, verify that NodeName is a DNS subdomain and transition rules allow the update - if address.NodeName != nil { - for _, msg := range ValidateNodeName(*address.NodeName, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), *address.NodeName, msg)) - } - } - allErrs = append(allErrs, validateEpAddrNodeNameTransition(address, ipToNodeName, fldPath.Child("nodeName"))...) - if len(allErrs) > 0 { - return allErrs - } - allErrs = append(allErrs, validateNonSpecialIP(address.IP, fldPath.Child("ip"))...) - return allErrs -} - -func validateNonSpecialIP(ipAddress string, fldPath *field.Path) field.ErrorList { - // We disallow some IPs as endpoints or external-ips. Specifically, - // unspecified and loopback addresses are nonsensical and link-local - // addresses tend to be used for node-centric purposes (e.g. metadata - // service). - allErrs := field.ErrorList{} - ip := net.ParseIP(ipAddress) - if ip == nil { - allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "must be a valid IP address")) - return allErrs - } - if ip.IsUnspecified() { - allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be unspecified (0.0.0.0)")) - } - if ip.IsLoopback() { - allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the loopback range (127.0.0.0/8)")) - } - if ip.IsLinkLocalUnicast() { - allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local range (169.254.0.0/16)")) - } - if ip.IsLinkLocalMulticast() { - allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local multicast range (224.0.0.0/24)")) - } - return allErrs -} - -func validateEndpointPort(port *api.EndpointPort, requireName bool, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if requireName && len(port.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } else if len(port.Name) != 0 { - allErrs = append(allErrs, ValidateDNS1123Label(port.Name, fldPath.Child("name"))...) - } - for _, msg := range validation.IsValidPortNum(int(port.Port)) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), port.Port, msg)) - } - if len(port.Protocol) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), "")) - } else if !supportedPortProtocols.Has(string(port.Protocol)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), port.Protocol, supportedPortProtocols.List())) - } - return allErrs -} - -// ValidateEndpointsUpdate tests to make sure an endpoints update can be applied. -func ValidateEndpointsUpdate(newEndpoints, oldEndpoints *api.Endpoints) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newEndpoints.ObjectMeta, &oldEndpoints.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, validateEndpointSubsets(newEndpoints.Subsets, oldEndpoints.Subsets, field.NewPath("subsets"))...) - allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(newEndpoints.Annotations, field.NewPath("annotations"))...) - return allErrs -} - -// ValidateSecurityContext ensure the security context contains valid settings -func ValidateSecurityContext(sc *api.SecurityContext, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - //this should only be true for testing since SecurityContext is defaulted by the api - if sc == nil { - return allErrs - } - - if sc.Privileged != nil { - if *sc.Privileged && !capabilities.Get().AllowPrivileged { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("privileged"), "disallowed by cluster policy")) - } - } - - if sc.RunAsUser != nil { - if *sc.RunAsUser < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *sc.RunAsUser, isNegativeErrorMsg)) - } - } - - if sc.AllowPrivilegeEscalation != nil && !*sc.AllowPrivilegeEscalation { - if sc.Privileged != nil && *sc.Privileged { - allErrs = append(allErrs, field.Invalid(fldPath, sc, "cannot set `allowPrivilegeEscalation` to false and `privileged` to true")) - } - - if sc.Capabilities != nil { - for _, cap := range sc.Capabilities.Add { - if string(cap) == "CAP_SYS_ADMIN" { - allErrs = append(allErrs, field.Invalid(fldPath, sc, "cannot set `allowPrivilegeEscalation` to false and `capabilities.Add` CAP_SYS_ADMIN")) - } - } - } - } - - return allErrs -} - -func ValidatePodLogOptions(opts *api.PodLogOptions) field.ErrorList { - allErrs := field.ErrorList{} - if opts.TailLines != nil && *opts.TailLines < 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), *opts.TailLines, isNegativeErrorMsg)) - } - if opts.LimitBytes != nil && *opts.LimitBytes < 1 { - allErrs = append(allErrs, field.Invalid(field.NewPath("limitBytes"), *opts.LimitBytes, "must be greater than 0")) - } - switch { - case opts.SinceSeconds != nil && opts.SinceTime != nil: - allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "at most one of `sinceTime` or `sinceSeconds` may be specified")) - case opts.SinceSeconds != nil: - if *opts.SinceSeconds < 1 { - allErrs = append(allErrs, field.Invalid(field.NewPath("sinceSeconds"), *opts.SinceSeconds, "must be greater than 0")) - } - } - return allErrs -} - -// ValidateLoadBalancerStatus validates required fields on a LoadBalancerStatus -func ValidateLoadBalancerStatus(status *api.LoadBalancerStatus, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for i, ingress := range status.Ingress { - idxPath := fldPath.Child("ingress").Index(i) - if len(ingress.IP) > 0 { - if isIP := (net.ParseIP(ingress.IP) != nil); !isIP { - allErrs = append(allErrs, field.Invalid(idxPath.Child("ip"), ingress.IP, "must be a valid IP address")) - } - } - if len(ingress.Hostname) > 0 { - for _, msg := range validation.IsDNS1123Subdomain(ingress.Hostname) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, msg)) - } - if isIP := (net.ParseIP(ingress.Hostname) != nil); isIP { - allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, "must be a DNS name, not an IP address")) - } - } - } - return allErrs -} - -func sysctlIntersection(a []api.Sysctl, b []api.Sysctl) []string { - lookup := make(map[string]struct{}, len(a)) - result := []string{} - for i := range a { - lookup[a[i].Name] = struct{}{} - } - for i := range b { - if _, found := lookup[b[i].Name]; found { - result = append(result, b[i].Name) - } - } - return result -} - -// validateStorageNodeAffinityAnnotation tests that the serialized TopologyConstraints in PersistentVolume.Annotations has valid data -func validateStorageNodeAffinityAnnotation(annotations map[string]string, fldPath *field.Path) (bool, field.ErrorList) { - allErrs := field.ErrorList{} - - na, err := helper.GetStorageNodeAffinityFromAnnotation(annotations) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath, api.AlphaStorageNodeAffinityAnnotation, err.Error())) - return false, allErrs - } - if na == nil { - return false, allErrs - } - - if !utilfeature.DefaultFeatureGate.Enabled(features.PersistentLocalVolumes) { - allErrs = append(allErrs, field.Forbidden(fldPath, "Storage node affinity is disabled by feature-gate")) - } - - policySpecified := false - if na.RequiredDuringSchedulingIgnoredDuringExecution != nil { - allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) - policySpecified = true - } - - if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("preferredDuringSchedulingIgnoredDuringExection"), "Storage node affinity does not support preferredDuringSchedulingIgnoredDuringExecution")) - } - return policySpecified, allErrs -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go deleted file mode 100644 index 938b7316a05f..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go +++ /dev/null @@ -1,6320 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package api - -import ( - resource "k8s.io/apimachinery/pkg/api/resource" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - types "k8s.io/apimachinery/pkg/types" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AWSElasticBlockStoreVolumeSource).DeepCopyInto(out.(*AWSElasticBlockStoreVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AWSElasticBlockStoreVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Affinity).DeepCopyInto(out.(*Affinity)) - return nil - }, InType: reflect.TypeOf(&Affinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AttachedVolume).DeepCopyInto(out.(*AttachedVolume)) - return nil - }, InType: reflect.TypeOf(&AttachedVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AvoidPods).DeepCopyInto(out.(*AvoidPods)) - return nil - }, InType: reflect.TypeOf(&AvoidPods{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureDiskVolumeSource).DeepCopyInto(out.(*AzureDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureFilePersistentVolumeSource).DeepCopyInto(out.(*AzureFilePersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureFilePersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureFileVolumeSource).DeepCopyInto(out.(*AzureFileVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureFileVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Binding).DeepCopyInto(out.(*Binding)) - return nil - }, InType: reflect.TypeOf(&Binding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Capabilities).DeepCopyInto(out.(*Capabilities)) - return nil - }, InType: reflect.TypeOf(&Capabilities{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CephFSPersistentVolumeSource).DeepCopyInto(out.(*CephFSPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CephFSPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CephFSVolumeSource).DeepCopyInto(out.(*CephFSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CephFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CinderVolumeSource).DeepCopyInto(out.(*CinderVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CinderVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClientIPConfig).DeepCopyInto(out.(*ClientIPConfig)) - return nil - }, InType: reflect.TypeOf(&ClientIPConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentCondition).DeepCopyInto(out.(*ComponentCondition)) - return nil - }, InType: reflect.TypeOf(&ComponentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentStatus).DeepCopyInto(out.(*ComponentStatus)) - return nil - }, InType: reflect.TypeOf(&ComponentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentStatusList).DeepCopyInto(out.(*ComponentStatusList)) - return nil - }, InType: reflect.TypeOf(&ComponentStatusList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMap).DeepCopyInto(out.(*ConfigMap)) - return nil - }, InType: reflect.TypeOf(&ConfigMap{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapEnvSource).DeepCopyInto(out.(*ConfigMapEnvSource)) - return nil - }, InType: reflect.TypeOf(&ConfigMapEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapKeySelector).DeepCopyInto(out.(*ConfigMapKeySelector)) - return nil - }, InType: reflect.TypeOf(&ConfigMapKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapList).DeepCopyInto(out.(*ConfigMapList)) - return nil - }, InType: reflect.TypeOf(&ConfigMapList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapProjection).DeepCopyInto(out.(*ConfigMapProjection)) - return nil - }, InType: reflect.TypeOf(&ConfigMapProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapVolumeSource).DeepCopyInto(out.(*ConfigMapVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ConfigMapVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Container).DeepCopyInto(out.(*Container)) - return nil - }, InType: reflect.TypeOf(&Container{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerImage).DeepCopyInto(out.(*ContainerImage)) - return nil - }, InType: reflect.TypeOf(&ContainerImage{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerPort).DeepCopyInto(out.(*ContainerPort)) - return nil - }, InType: reflect.TypeOf(&ContainerPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerState).DeepCopyInto(out.(*ContainerState)) - return nil - }, InType: reflect.TypeOf(&ContainerState{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateRunning).DeepCopyInto(out.(*ContainerStateRunning)) - return nil - }, InType: reflect.TypeOf(&ContainerStateRunning{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateTerminated).DeepCopyInto(out.(*ContainerStateTerminated)) - return nil - }, InType: reflect.TypeOf(&ContainerStateTerminated{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateWaiting).DeepCopyInto(out.(*ContainerStateWaiting)) - return nil - }, InType: reflect.TypeOf(&ContainerStateWaiting{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStatus).DeepCopyInto(out.(*ContainerStatus)) - return nil - }, InType: reflect.TypeOf(&ContainerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonEndpoint).DeepCopyInto(out.(*DaemonEndpoint)) - return nil - }, InType: reflect.TypeOf(&DaemonEndpoint{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeleteOptions).DeepCopyInto(out.(*DeleteOptions)) - return nil - }, InType: reflect.TypeOf(&DeleteOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIProjection).DeepCopyInto(out.(*DownwardAPIProjection)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIVolumeFile).DeepCopyInto(out.(*DownwardAPIVolumeFile)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIVolumeSource).DeepCopyInto(out.(*DownwardAPIVolumeSource)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EmptyDirVolumeSource).DeepCopyInto(out.(*EmptyDirVolumeSource)) - return nil - }, InType: reflect.TypeOf(&EmptyDirVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointAddress).DeepCopyInto(out.(*EndpointAddress)) - return nil - }, InType: reflect.TypeOf(&EndpointAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointPort).DeepCopyInto(out.(*EndpointPort)) - return nil - }, InType: reflect.TypeOf(&EndpointPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointSubset).DeepCopyInto(out.(*EndpointSubset)) - return nil - }, InType: reflect.TypeOf(&EndpointSubset{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Endpoints).DeepCopyInto(out.(*Endpoints)) - return nil - }, InType: reflect.TypeOf(&Endpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointsList).DeepCopyInto(out.(*EndpointsList)) - return nil - }, InType: reflect.TypeOf(&EndpointsList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvFromSource).DeepCopyInto(out.(*EnvFromSource)) - return nil - }, InType: reflect.TypeOf(&EnvFromSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvVar).DeepCopyInto(out.(*EnvVar)) - return nil - }, InType: reflect.TypeOf(&EnvVar{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvVarSource).DeepCopyInto(out.(*EnvVarSource)) - return nil - }, InType: reflect.TypeOf(&EnvVarSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Event).DeepCopyInto(out.(*Event)) - return nil - }, InType: reflect.TypeOf(&Event{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EventList).DeepCopyInto(out.(*EventList)) - return nil - }, InType: reflect.TypeOf(&EventList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EventSource).DeepCopyInto(out.(*EventSource)) - return nil - }, InType: reflect.TypeOf(&EventSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExecAction).DeepCopyInto(out.(*ExecAction)) - return nil - }, InType: reflect.TypeOf(&ExecAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FCVolumeSource).DeepCopyInto(out.(*FCVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FCVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FlexVolumeSource).DeepCopyInto(out.(*FlexVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FlexVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FlockerVolumeSource).DeepCopyInto(out.(*FlockerVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FlockerVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GCEPersistentDiskVolumeSource).DeepCopyInto(out.(*GCEPersistentDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GCEPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GitRepoVolumeSource).DeepCopyInto(out.(*GitRepoVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GitRepoVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GlusterfsVolumeSource).DeepCopyInto(out.(*GlusterfsVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GlusterfsVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPGetAction).DeepCopyInto(out.(*HTTPGetAction)) - return nil - }, InType: reflect.TypeOf(&HTTPGetAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPHeader).DeepCopyInto(out.(*HTTPHeader)) - return nil - }, InType: reflect.TypeOf(&HTTPHeader{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Handler).DeepCopyInto(out.(*Handler)) - return nil - }, InType: reflect.TypeOf(&Handler{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostAlias).DeepCopyInto(out.(*HostAlias)) - return nil - }, InType: reflect.TypeOf(&HostAlias{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostPathVolumeSource).DeepCopyInto(out.(*HostPathVolumeSource)) - return nil - }, InType: reflect.TypeOf(&HostPathVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ISCSIVolumeSource).DeepCopyInto(out.(*ISCSIVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ISCSIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KeyToPath).DeepCopyInto(out.(*KeyToPath)) - return nil - }, InType: reflect.TypeOf(&KeyToPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Lifecycle).DeepCopyInto(out.(*Lifecycle)) - return nil - }, InType: reflect.TypeOf(&Lifecycle{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRange).DeepCopyInto(out.(*LimitRange)) - return nil - }, InType: reflect.TypeOf(&LimitRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeItem).DeepCopyInto(out.(*LimitRangeItem)) - return nil - }, InType: reflect.TypeOf(&LimitRangeItem{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeList).DeepCopyInto(out.(*LimitRangeList)) - return nil - }, InType: reflect.TypeOf(&LimitRangeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeSpec).DeepCopyInto(out.(*LimitRangeSpec)) - return nil - }, InType: reflect.TypeOf(&LimitRangeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*List).DeepCopyInto(out.(*List)) - return nil - }, InType: reflect.TypeOf(&List{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ListOptions).DeepCopyInto(out.(*ListOptions)) - return nil - }, InType: reflect.TypeOf(&ListOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LoadBalancerIngress).DeepCopyInto(out.(*LoadBalancerIngress)) - return nil - }, InType: reflect.TypeOf(&LoadBalancerIngress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LoadBalancerStatus).DeepCopyInto(out.(*LoadBalancerStatus)) - return nil - }, InType: reflect.TypeOf(&LoadBalancerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalObjectReference).DeepCopyInto(out.(*LocalObjectReference)) - return nil - }, InType: reflect.TypeOf(&LocalObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalVolumeSource).DeepCopyInto(out.(*LocalVolumeSource)) - return nil - }, InType: reflect.TypeOf(&LocalVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NFSVolumeSource).DeepCopyInto(out.(*NFSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&NFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Namespace).DeepCopyInto(out.(*Namespace)) - return nil - }, InType: reflect.TypeOf(&Namespace{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceList).DeepCopyInto(out.(*NamespaceList)) - return nil - }, InType: reflect.TypeOf(&NamespaceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceSpec).DeepCopyInto(out.(*NamespaceSpec)) - return nil - }, InType: reflect.TypeOf(&NamespaceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceStatus).DeepCopyInto(out.(*NamespaceStatus)) - return nil - }, InType: reflect.TypeOf(&NamespaceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Node).DeepCopyInto(out.(*Node)) - return nil - }, InType: reflect.TypeOf(&Node{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeAddress).DeepCopyInto(out.(*NodeAddress)) - return nil - }, InType: reflect.TypeOf(&NodeAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeAffinity).DeepCopyInto(out.(*NodeAffinity)) - return nil - }, InType: reflect.TypeOf(&NodeAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeCondition).DeepCopyInto(out.(*NodeCondition)) - return nil - }, InType: reflect.TypeOf(&NodeCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeConfigSource).DeepCopyInto(out.(*NodeConfigSource)) - return nil - }, InType: reflect.TypeOf(&NodeConfigSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeDaemonEndpoints).DeepCopyInto(out.(*NodeDaemonEndpoints)) - return nil - }, InType: reflect.TypeOf(&NodeDaemonEndpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeList).DeepCopyInto(out.(*NodeList)) - return nil - }, InType: reflect.TypeOf(&NodeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeProxyOptions).DeepCopyInto(out.(*NodeProxyOptions)) - return nil - }, InType: reflect.TypeOf(&NodeProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeResources).DeepCopyInto(out.(*NodeResources)) - return nil - }, InType: reflect.TypeOf(&NodeResources{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelector).DeepCopyInto(out.(*NodeSelector)) - return nil - }, InType: reflect.TypeOf(&NodeSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelectorRequirement).DeepCopyInto(out.(*NodeSelectorRequirement)) - return nil - }, InType: reflect.TypeOf(&NodeSelectorRequirement{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelectorTerm).DeepCopyInto(out.(*NodeSelectorTerm)) - return nil - }, InType: reflect.TypeOf(&NodeSelectorTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSpec).DeepCopyInto(out.(*NodeSpec)) - return nil - }, InType: reflect.TypeOf(&NodeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeStatus).DeepCopyInto(out.(*NodeStatus)) - return nil - }, InType: reflect.TypeOf(&NodeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSystemInfo).DeepCopyInto(out.(*NodeSystemInfo)) - return nil - }, InType: reflect.TypeOf(&NodeSystemInfo{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectFieldSelector).DeepCopyInto(out.(*ObjectFieldSelector)) - return nil - }, InType: reflect.TypeOf(&ObjectFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMeta).DeepCopyInto(out.(*ObjectMeta)) - return nil - }, InType: reflect.TypeOf(&ObjectMeta{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectReference).DeepCopyInto(out.(*ObjectReference)) - return nil - }, InType: reflect.TypeOf(&ObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolume).DeepCopyInto(out.(*PersistentVolume)) - return nil - }, InType: reflect.TypeOf(&PersistentVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaim).DeepCopyInto(out.(*PersistentVolumeClaim)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimCondition).DeepCopyInto(out.(*PersistentVolumeClaimCondition)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimList).DeepCopyInto(out.(*PersistentVolumeClaimList)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimSpec).DeepCopyInto(out.(*PersistentVolumeClaimSpec)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimStatus).DeepCopyInto(out.(*PersistentVolumeClaimStatus)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimVolumeSource).DeepCopyInto(out.(*PersistentVolumeClaimVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeList).DeepCopyInto(out.(*PersistentVolumeList)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeSource).DeepCopyInto(out.(*PersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeSpec).DeepCopyInto(out.(*PersistentVolumeSpec)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeStatus).DeepCopyInto(out.(*PersistentVolumeStatus)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PhotonPersistentDiskVolumeSource).DeepCopyInto(out.(*PhotonPersistentDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PhotonPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Pod).DeepCopyInto(out.(*Pod)) - return nil - }, InType: reflect.TypeOf(&Pod{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAffinity).DeepCopyInto(out.(*PodAffinity)) - return nil - }, InType: reflect.TypeOf(&PodAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAffinityTerm).DeepCopyInto(out.(*PodAffinityTerm)) - return nil - }, InType: reflect.TypeOf(&PodAffinityTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAntiAffinity).DeepCopyInto(out.(*PodAntiAffinity)) - return nil - }, InType: reflect.TypeOf(&PodAntiAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAttachOptions).DeepCopyInto(out.(*PodAttachOptions)) - return nil - }, InType: reflect.TypeOf(&PodAttachOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodCondition).DeepCopyInto(out.(*PodCondition)) - return nil - }, InType: reflect.TypeOf(&PodCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodExecOptions).DeepCopyInto(out.(*PodExecOptions)) - return nil - }, InType: reflect.TypeOf(&PodExecOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodList).DeepCopyInto(out.(*PodList)) - return nil - }, InType: reflect.TypeOf(&PodList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodLogOptions).DeepCopyInto(out.(*PodLogOptions)) - return nil - }, InType: reflect.TypeOf(&PodLogOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPortForwardOptions).DeepCopyInto(out.(*PodPortForwardOptions)) - return nil - }, InType: reflect.TypeOf(&PodPortForwardOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodProxyOptions).DeepCopyInto(out.(*PodProxyOptions)) - return nil - }, InType: reflect.TypeOf(&PodProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityContext).DeepCopyInto(out.(*PodSecurityContext)) - return nil - }, InType: reflect.TypeOf(&PodSecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSignature).DeepCopyInto(out.(*PodSignature)) - return nil - }, InType: reflect.TypeOf(&PodSignature{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSpec).DeepCopyInto(out.(*PodSpec)) - return nil - }, InType: reflect.TypeOf(&PodSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodStatus).DeepCopyInto(out.(*PodStatus)) - return nil - }, InType: reflect.TypeOf(&PodStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodStatusResult).DeepCopyInto(out.(*PodStatusResult)) - return nil - }, InType: reflect.TypeOf(&PodStatusResult{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplate).DeepCopyInto(out.(*PodTemplate)) - return nil - }, InType: reflect.TypeOf(&PodTemplate{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplateList).DeepCopyInto(out.(*PodTemplateList)) - return nil - }, InType: reflect.TypeOf(&PodTemplateList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplateSpec).DeepCopyInto(out.(*PodTemplateSpec)) - return nil - }, InType: reflect.TypeOf(&PodTemplateSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PortworxVolumeSource).DeepCopyInto(out.(*PortworxVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PortworxVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Preconditions).DeepCopyInto(out.(*Preconditions)) - return nil - }, InType: reflect.TypeOf(&Preconditions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PreferAvoidPodsEntry).DeepCopyInto(out.(*PreferAvoidPodsEntry)) - return nil - }, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PreferredSchedulingTerm).DeepCopyInto(out.(*PreferredSchedulingTerm)) - return nil - }, InType: reflect.TypeOf(&PreferredSchedulingTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Probe).DeepCopyInto(out.(*Probe)) - return nil - }, InType: reflect.TypeOf(&Probe{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ProjectedVolumeSource).DeepCopyInto(out.(*ProjectedVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ProjectedVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*QuobyteVolumeSource).DeepCopyInto(out.(*QuobyteVolumeSource)) - return nil - }, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RBDVolumeSource).DeepCopyInto(out.(*RBDVolumeSource)) - return nil - }, InType: reflect.TypeOf(&RBDVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RangeAllocation).DeepCopyInto(out.(*RangeAllocation)) - return nil - }, InType: reflect.TypeOf(&RangeAllocation{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationController).DeepCopyInto(out.(*ReplicationController)) - return nil - }, InType: reflect.TypeOf(&ReplicationController{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerCondition).DeepCopyInto(out.(*ReplicationControllerCondition)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerList).DeepCopyInto(out.(*ReplicationControllerList)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerSpec).DeepCopyInto(out.(*ReplicationControllerSpec)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerStatus).DeepCopyInto(out.(*ReplicationControllerStatus)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceFieldSelector).DeepCopyInto(out.(*ResourceFieldSelector)) - return nil - }, InType: reflect.TypeOf(&ResourceFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuota).DeepCopyInto(out.(*ResourceQuota)) - return nil - }, InType: reflect.TypeOf(&ResourceQuota{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaList).DeepCopyInto(out.(*ResourceQuotaList)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaSpec).DeepCopyInto(out.(*ResourceQuotaSpec)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaStatus).DeepCopyInto(out.(*ResourceQuotaStatus)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceRequirements).DeepCopyInto(out.(*ResourceRequirements)) - return nil - }, InType: reflect.TypeOf(&ResourceRequirements{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SELinuxOptions).DeepCopyInto(out.(*SELinuxOptions)) - return nil - }, InType: reflect.TypeOf(&SELinuxOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleIOVolumeSource).DeepCopyInto(out.(*ScaleIOVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ScaleIOVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Secret).DeepCopyInto(out.(*Secret)) - return nil - }, InType: reflect.TypeOf(&Secret{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretEnvSource).DeepCopyInto(out.(*SecretEnvSource)) - return nil - }, InType: reflect.TypeOf(&SecretEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretKeySelector).DeepCopyInto(out.(*SecretKeySelector)) - return nil - }, InType: reflect.TypeOf(&SecretKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretList).DeepCopyInto(out.(*SecretList)) - return nil - }, InType: reflect.TypeOf(&SecretList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretProjection).DeepCopyInto(out.(*SecretProjection)) - return nil - }, InType: reflect.TypeOf(&SecretProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretReference).DeepCopyInto(out.(*SecretReference)) - return nil - }, InType: reflect.TypeOf(&SecretReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretVolumeSource).DeepCopyInto(out.(*SecretVolumeSource)) - return nil - }, InType: reflect.TypeOf(&SecretVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecurityContext).DeepCopyInto(out.(*SecurityContext)) - return nil - }, InType: reflect.TypeOf(&SecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SerializedReference).DeepCopyInto(out.(*SerializedReference)) - return nil - }, InType: reflect.TypeOf(&SerializedReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Service).DeepCopyInto(out.(*Service)) - return nil - }, InType: reflect.TypeOf(&Service{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAccount).DeepCopyInto(out.(*ServiceAccount)) - return nil - }, InType: reflect.TypeOf(&ServiceAccount{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAccountList).DeepCopyInto(out.(*ServiceAccountList)) - return nil - }, InType: reflect.TypeOf(&ServiceAccountList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceList).DeepCopyInto(out.(*ServiceList)) - return nil - }, InType: reflect.TypeOf(&ServiceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServicePort).DeepCopyInto(out.(*ServicePort)) - return nil - }, InType: reflect.TypeOf(&ServicePort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceProxyOptions).DeepCopyInto(out.(*ServiceProxyOptions)) - return nil - }, InType: reflect.TypeOf(&ServiceProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceSpec).DeepCopyInto(out.(*ServiceSpec)) - return nil - }, InType: reflect.TypeOf(&ServiceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceStatus).DeepCopyInto(out.(*ServiceStatus)) - return nil - }, InType: reflect.TypeOf(&ServiceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SessionAffinityConfig).DeepCopyInto(out.(*SessionAffinityConfig)) - return nil - }, InType: reflect.TypeOf(&SessionAffinityConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageOSPersistentVolumeSource).DeepCopyInto(out.(*StorageOSPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&StorageOSPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageOSVolumeSource).DeepCopyInto(out.(*StorageOSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&StorageOSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Sysctl).DeepCopyInto(out.(*Sysctl)) - return nil - }, InType: reflect.TypeOf(&Sysctl{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TCPSocketAction).DeepCopyInto(out.(*TCPSocketAction)) - return nil - }, InType: reflect.TypeOf(&TCPSocketAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Taint).DeepCopyInto(out.(*Taint)) - return nil - }, InType: reflect.TypeOf(&Taint{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Toleration).DeepCopyInto(out.(*Toleration)) - return nil - }, InType: reflect.TypeOf(&Toleration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Volume).DeepCopyInto(out.(*Volume)) - return nil - }, InType: reflect.TypeOf(&Volume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeMount).DeepCopyInto(out.(*VolumeMount)) - return nil - }, InType: reflect.TypeOf(&VolumeMount{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeProjection).DeepCopyInto(out.(*VolumeProjection)) - return nil - }, InType: reflect.TypeOf(&VolumeProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeSource).DeepCopyInto(out.(*VolumeSource)) - return nil - }, InType: reflect.TypeOf(&VolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VsphereVirtualDiskVolumeSource).DeepCopyInto(out.(*VsphereVirtualDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*WeightedPodAffinityTerm).DeepCopyInto(out.(*WeightedPodAffinityTerm)) - return nil - }, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})}, - ) -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSElasticBlockStoreVolumeSource. -func (in *AWSElasticBlockStoreVolumeSource) DeepCopy() *AWSElasticBlockStoreVolumeSource { - if in == nil { - return nil - } - out := new(AWSElasticBlockStoreVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Affinity) DeepCopyInto(out *Affinity) { - *out = *in - if in.NodeAffinity != nil { - in, out := &in.NodeAffinity, &out.NodeAffinity - if *in == nil { - *out = nil - } else { - *out = new(NodeAffinity) - (*in).DeepCopyInto(*out) - } - } - if in.PodAffinity != nil { - in, out := &in.PodAffinity, &out.PodAffinity - if *in == nil { - *out = nil - } else { - *out = new(PodAffinity) - (*in).DeepCopyInto(*out) - } - } - if in.PodAntiAffinity != nil { - in, out := &in.PodAntiAffinity, &out.PodAntiAffinity - if *in == nil { - *out = nil - } else { - *out = new(PodAntiAffinity) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Affinity. -func (in *Affinity) DeepCopy() *Affinity { - if in == nil { - return nil - } - out := new(Affinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AttachedVolume) DeepCopyInto(out *AttachedVolume) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedVolume. -func (in *AttachedVolume) DeepCopy() *AttachedVolume { - if in == nil { - return nil - } - out := new(AttachedVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AvoidPods) DeepCopyInto(out *AvoidPods) { - *out = *in - if in.PreferAvoidPods != nil { - in, out := &in.PreferAvoidPods, &out.PreferAvoidPods - *out = make([]PreferAvoidPodsEntry, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvoidPods. -func (in *AvoidPods) DeepCopy() *AvoidPods { - if in == nil { - return nil - } - out := new(AvoidPods) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureDiskVolumeSource) DeepCopyInto(out *AzureDiskVolumeSource) { - *out = *in - if in.CachingMode != nil { - in, out := &in.CachingMode, &out.CachingMode - if *in == nil { - *out = nil - } else { - *out = new(AzureDataDiskCachingMode) - **out = **in - } - } - if in.FSType != nil { - in, out := &in.FSType, &out.FSType - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - if in.ReadOnly != nil { - in, out := &in.ReadOnly, &out.ReadOnly - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.Kind != nil { - in, out := &in.Kind, &out.Kind - if *in == nil { - *out = nil - } else { - *out = new(AzureDataDiskKind) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskVolumeSource. -func (in *AzureDiskVolumeSource) DeepCopy() *AzureDiskVolumeSource { - if in == nil { - return nil - } - out := new(AzureDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureFilePersistentVolumeSource) DeepCopyInto(out *AzureFilePersistentVolumeSource) { - *out = *in - if in.SecretNamespace != nil { - in, out := &in.SecretNamespace, &out.SecretNamespace - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFilePersistentVolumeSource. -func (in *AzureFilePersistentVolumeSource) DeepCopy() *AzureFilePersistentVolumeSource { - if in == nil { - return nil - } - out := new(AzureFilePersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureFileVolumeSource) DeepCopyInto(out *AzureFileVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFileVolumeSource. -func (in *AzureFileVolumeSource) DeepCopy() *AzureFileVolumeSource { - if in == nil { - return nil - } - out := new(AzureFileVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Binding) DeepCopyInto(out *Binding) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Target = in.Target - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Binding. -func (in *Binding) DeepCopy() *Binding { - if in == nil { - return nil - } - out := new(Binding) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Binding) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Capabilities) DeepCopyInto(out *Capabilities) { - *out = *in - if in.Add != nil { - in, out := &in.Add, &out.Add - *out = make([]Capability, len(*in)) - copy(*out, *in) - } - if in.Drop != nil { - in, out := &in.Drop, &out.Drop - *out = make([]Capability, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capabilities. -func (in *Capabilities) DeepCopy() *Capabilities { - if in == nil { - return nil - } - out := new(Capabilities) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephFSPersistentVolumeSource) DeepCopyInto(out *CephFSPersistentVolumeSource) { - *out = *in - if in.Monitors != nil { - in, out := &in.Monitors, &out.Monitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(SecretReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSPersistentVolumeSource. -func (in *CephFSPersistentVolumeSource) DeepCopy() *CephFSPersistentVolumeSource { - if in == nil { - return nil - } - out := new(CephFSPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephFSVolumeSource) DeepCopyInto(out *CephFSVolumeSource) { - *out = *in - if in.Monitors != nil { - in, out := &in.Monitors, &out.Monitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSVolumeSource. -func (in *CephFSVolumeSource) DeepCopy() *CephFSVolumeSource { - if in == nil { - return nil - } - out := new(CephFSVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CinderVolumeSource) DeepCopyInto(out *CinderVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderVolumeSource. -func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource { - if in == nil { - return nil - } - out := new(CinderVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) { - *out = *in - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientIPConfig. -func (in *ClientIPConfig) DeepCopy() *ClientIPConfig { - if in == nil { - return nil - } - out := new(ClientIPConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentCondition. -func (in *ComponentCondition) DeepCopy() *ComponentCondition { - if in == nil { - return nil - } - out := new(ComponentCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ComponentCondition, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus. -func (in *ComponentStatus) DeepCopy() *ComponentStatus { - if in == nil { - return nil - } - out := new(ComponentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ComponentStatus) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ComponentStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatusList. -func (in *ComponentStatusList) DeepCopy() *ComponentStatusList { - if in == nil { - return nil - } - out := new(ComponentStatusList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ComponentStatusList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMap) DeepCopyInto(out *ConfigMap) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMap. -func (in *ConfigMap) DeepCopy() *ConfigMap { - if in == nil { - return nil - } - out := new(ConfigMap) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConfigMap) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapEnvSource. -func (in *ConfigMapEnvSource) DeepCopy() *ConfigMapEnvSource { - if in == nil { - return nil - } - out := new(ConfigMapEnvSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapKeySelector) DeepCopyInto(out *ConfigMapKeySelector) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapKeySelector. -func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector { - if in == nil { - return nil - } - out := new(ConfigMapKeySelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ConfigMap, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapList. -func (in *ConfigMapList) DeepCopy() *ConfigMapList { - if in == nil { - return nil - } - out := new(ConfigMapList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConfigMapList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapProjection) DeepCopyInto(out *ConfigMapProjection) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapProjection. -func (in *ConfigMapProjection) DeepCopy() *ConfigMapProjection { - if in == nil { - return nil - } - out := new(ConfigMapProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapVolumeSource. -func (in *ConfigMapVolumeSource) DeepCopy() *ConfigMapVolumeSource { - if in == nil { - return nil - } - out := new(ConfigMapVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Container) DeepCopyInto(out *Container) { - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ContainerPort, len(*in)) - copy(*out, *in) - } - if in.EnvFrom != nil { - in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]EnvFromSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Resources.DeepCopyInto(&out.Resources) - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]VolumeMount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.LivenessProbe != nil { - in, out := &in.LivenessProbe, &out.LivenessProbe - if *in == nil { - *out = nil - } else { - *out = new(Probe) - (*in).DeepCopyInto(*out) - } - } - if in.ReadinessProbe != nil { - in, out := &in.ReadinessProbe, &out.ReadinessProbe - if *in == nil { - *out = nil - } else { - *out = new(Probe) - (*in).DeepCopyInto(*out) - } - } - if in.Lifecycle != nil { - in, out := &in.Lifecycle, &out.Lifecycle - if *in == nil { - *out = nil - } else { - *out = new(Lifecycle) - (*in).DeepCopyInto(*out) - } - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - if *in == nil { - *out = nil - } else { - *out = new(SecurityContext) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. -func (in *Container) DeepCopy() *Container { - if in == nil { - return nil - } - out := new(Container) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerImage) DeepCopyInto(out *ContainerImage) { - *out = *in - if in.Names != nil { - in, out := &in.Names, &out.Names - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerImage. -func (in *ContainerImage) DeepCopy() *ContainerImage { - if in == nil { - return nil - } - out := new(ContainerImage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerPort) DeepCopyInto(out *ContainerPort) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPort. -func (in *ContainerPort) DeepCopy() *ContainerPort { - if in == nil { - return nil - } - out := new(ContainerPort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerState) DeepCopyInto(out *ContainerState) { - *out = *in - if in.Waiting != nil { - in, out := &in.Waiting, &out.Waiting - if *in == nil { - *out = nil - } else { - *out = new(ContainerStateWaiting) - **out = **in - } - } - if in.Running != nil { - in, out := &in.Running, &out.Running - if *in == nil { - *out = nil - } else { - *out = new(ContainerStateRunning) - (*in).DeepCopyInto(*out) - } - } - if in.Terminated != nil { - in, out := &in.Terminated, &out.Terminated - if *in == nil { - *out = nil - } else { - *out = new(ContainerStateTerminated) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerState. -func (in *ContainerState) DeepCopy() *ContainerState { - if in == nil { - return nil - } - out := new(ContainerState) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStateRunning) DeepCopyInto(out *ContainerStateRunning) { - *out = *in - in.StartedAt.DeepCopyInto(&out.StartedAt) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateRunning. -func (in *ContainerStateRunning) DeepCopy() *ContainerStateRunning { - if in == nil { - return nil - } - out := new(ContainerStateRunning) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStateTerminated) DeepCopyInto(out *ContainerStateTerminated) { - *out = *in - in.StartedAt.DeepCopyInto(&out.StartedAt) - in.FinishedAt.DeepCopyInto(&out.FinishedAt) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateTerminated. -func (in *ContainerStateTerminated) DeepCopy() *ContainerStateTerminated { - if in == nil { - return nil - } - out := new(ContainerStateTerminated) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStateWaiting) DeepCopyInto(out *ContainerStateWaiting) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateWaiting. -func (in *ContainerStateWaiting) DeepCopy() *ContainerStateWaiting { - if in == nil { - return nil - } - out := new(ContainerStateWaiting) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { - *out = *in - in.State.DeepCopyInto(&out.State) - in.LastTerminationState.DeepCopyInto(&out.LastTerminationState) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStatus. -func (in *ContainerStatus) DeepCopy() *ContainerStatus { - if in == nil { - return nil - } - out := new(ContainerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonEndpoint. -func (in *DaemonEndpoint) DeepCopy() *DaemonEndpoint { - if in == nil { - return nil - } - out := new(DaemonEndpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.GracePeriodSeconds != nil { - in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.Preconditions != nil { - in, out := &in.Preconditions, &out.Preconditions - if *in == nil { - *out = nil - } else { - *out = new(Preconditions) - (*in).DeepCopyInto(*out) - } - } - if in.OrphanDependents != nil { - in, out := &in.OrphanDependents, &out.OrphanDependents - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.PropagationPolicy != nil { - in, out := &in.PropagationPolicy, &out.PropagationPolicy - if *in == nil { - *out = nil - } else { - *out = new(DeletionPropagation) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteOptions. -func (in *DeleteOptions) DeepCopy() *DeleteOptions { - if in == nil { - return nil - } - out := new(DeleteOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DeleteOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DownwardAPIProjection) DeepCopyInto(out *DownwardAPIProjection) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DownwardAPIVolumeFile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIProjection. -func (in *DownwardAPIProjection) DeepCopy() *DownwardAPIProjection { - if in == nil { - return nil - } - out := new(DownwardAPIProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DownwardAPIVolumeFile) DeepCopyInto(out *DownwardAPIVolumeFile) { - *out = *in - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectFieldSelector) - **out = **in - } - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - if *in == nil { - *out = nil - } else { - *out = new(ResourceFieldSelector) - (*in).DeepCopyInto(*out) - } - } - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeFile. -func (in *DownwardAPIVolumeFile) DeepCopy() *DownwardAPIVolumeFile { - if in == nil { - return nil - } - out := new(DownwardAPIVolumeFile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DownwardAPIVolumeSource) DeepCopyInto(out *DownwardAPIVolumeSource) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DownwardAPIVolumeFile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeSource. -func (in *DownwardAPIVolumeSource) DeepCopy() *DownwardAPIVolumeSource { - if in == nil { - return nil - } - out := new(DownwardAPIVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) { - *out = *in - if in.SizeLimit != nil { - in, out := &in.SizeLimit, &out.SizeLimit - if *in == nil { - *out = nil - } else { - *out = new(resource.Quantity) - **out = (*in).DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirVolumeSource. -func (in *EmptyDirVolumeSource) DeepCopy() *EmptyDirVolumeSource { - if in == nil { - return nil - } - out := new(EmptyDirVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) { - *out = *in - if in.NodeName != nil { - in, out := &in.NodeName, &out.NodeName - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - if in.TargetRef != nil { - in, out := &in.TargetRef, &out.TargetRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddress. -func (in *EndpointAddress) DeepCopy() *EndpointAddress { - if in == nil { - return nil - } - out := new(EndpointAddress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. -func (in *EndpointPort) DeepCopy() *EndpointPort { - if in == nil { - return nil - } - out := new(EndpointPort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) { - *out = *in - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]EndpointAddress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.NotReadyAddresses != nil { - in, out := &in.NotReadyAddresses, &out.NotReadyAddresses - *out = make([]EndpointAddress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]EndpointPort, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSubset. -func (in *EndpointSubset) DeepCopy() *EndpointSubset { - if in == nil { - return nil - } - out := new(EndpointSubset) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Endpoints) DeepCopyInto(out *Endpoints) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Subsets != nil { - in, out := &in.Subsets, &out.Subsets - *out = make([]EndpointSubset, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoints. -func (in *Endpoints) DeepCopy() *Endpoints { - if in == nil { - return nil - } - out := new(Endpoints) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Endpoints) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointsList) DeepCopyInto(out *EndpointsList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Endpoints, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsList. -func (in *EndpointsList) DeepCopy() *EndpointsList { - if in == nil { - return nil - } - out := new(EndpointsList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *EndpointsList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvFromSource) DeepCopyInto(out *EnvFromSource) { - *out = *in - if in.ConfigMapRef != nil { - in, out := &in.ConfigMapRef, &out.ConfigMapRef - if *in == nil { - *out = nil - } else { - *out = new(ConfigMapEnvSource) - (*in).DeepCopyInto(*out) - } - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(SecretEnvSource) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvFromSource. -func (in *EnvFromSource) DeepCopy() *EnvFromSource { - if in == nil { - return nil - } - out := new(EnvFromSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvVar) DeepCopyInto(out *EnvVar) { - *out = *in - if in.ValueFrom != nil { - in, out := &in.ValueFrom, &out.ValueFrom - if *in == nil { - *out = nil - } else { - *out = new(EnvVarSource) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. -func (in *EnvVar) DeepCopy() *EnvVar { - if in == nil { - return nil - } - out := new(EnvVar) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) { - *out = *in - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectFieldSelector) - **out = **in - } - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - if *in == nil { - *out = nil - } else { - *out = new(ResourceFieldSelector) - (*in).DeepCopyInto(*out) - } - } - if in.ConfigMapKeyRef != nil { - in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef - if *in == nil { - *out = nil - } else { - *out = new(ConfigMapKeySelector) - (*in).DeepCopyInto(*out) - } - } - if in.SecretKeyRef != nil { - in, out := &in.SecretKeyRef, &out.SecretKeyRef - if *in == nil { - *out = nil - } else { - *out = new(SecretKeySelector) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarSource. -func (in *EnvVarSource) DeepCopy() *EnvVarSource { - if in == nil { - return nil - } - out := new(EnvVarSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Event) DeepCopyInto(out *Event) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.InvolvedObject = in.InvolvedObject - out.Source = in.Source - in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp) - in.LastTimestamp.DeepCopyInto(&out.LastTimestamp) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. -func (in *Event) DeepCopy() *Event { - if in == nil { - return nil - } - out := new(Event) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Event) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EventList) DeepCopyInto(out *EventList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Event, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. -func (in *EventList) DeepCopy() *EventList { - if in == nil { - return nil - } - out := new(EventList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *EventList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EventSource) DeepCopyInto(out *EventSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSource. -func (in *EventSource) DeepCopy() *EventSource { - if in == nil { - return nil - } - out := new(EventSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExecAction) DeepCopyInto(out *ExecAction) { - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecAction. -func (in *ExecAction) DeepCopy() *ExecAction { - if in == nil { - return nil - } - out := new(ExecAction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FCVolumeSource) DeepCopyInto(out *FCVolumeSource) { - *out = *in - if in.TargetWWNs != nil { - in, out := &in.TargetWWNs, &out.TargetWWNs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Lun != nil { - in, out := &in.Lun, &out.Lun - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - if in.WWIDs != nil { - in, out := &in.WWIDs, &out.WWIDs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FCVolumeSource. -func (in *FCVolumeSource) DeepCopy() *FCVolumeSource { - if in == nil { - return nil - } - out := new(FCVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } - } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexVolumeSource. -func (in *FlexVolumeSource) DeepCopy() *FlexVolumeSource { - if in == nil { - return nil - } - out := new(FlexVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlockerVolumeSource) DeepCopyInto(out *FlockerVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlockerVolumeSource. -func (in *FlockerVolumeSource) DeepCopy() *FlockerVolumeSource { - if in == nil { - return nil - } - out := new(FlockerVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCEPersistentDiskVolumeSource) DeepCopyInto(out *GCEPersistentDiskVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEPersistentDiskVolumeSource. -func (in *GCEPersistentDiskVolumeSource) DeepCopy() *GCEPersistentDiskVolumeSource { - if in == nil { - return nil - } - out := new(GCEPersistentDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitRepoVolumeSource) DeepCopyInto(out *GitRepoVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepoVolumeSource. -func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { - if in == nil { - return nil - } - out := new(GitRepoVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsVolumeSource. -func (in *GlusterfsVolumeSource) DeepCopy() *GlusterfsVolumeSource { - if in == nil { - return nil - } - out := new(GlusterfsVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPGetAction) DeepCopyInto(out *HTTPGetAction) { - *out = *in - out.Port = in.Port - if in.HTTPHeaders != nil { - in, out := &in.HTTPHeaders, &out.HTTPHeaders - *out = make([]HTTPHeader, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPGetAction. -func (in *HTTPGetAction) DeepCopy() *HTTPGetAction { - if in == nil { - return nil - } - out := new(HTTPGetAction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. -func (in *HTTPHeader) DeepCopy() *HTTPHeader { - if in == nil { - return nil - } - out := new(HTTPHeader) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Handler) DeepCopyInto(out *Handler) { - *out = *in - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - if *in == nil { - *out = nil - } else { - *out = new(ExecAction) - (*in).DeepCopyInto(*out) - } - } - if in.HTTPGet != nil { - in, out := &in.HTTPGet, &out.HTTPGet - if *in == nil { - *out = nil - } else { - *out = new(HTTPGetAction) - (*in).DeepCopyInto(*out) - } - } - if in.TCPSocket != nil { - in, out := &in.TCPSocket, &out.TCPSocket - if *in == nil { - *out = nil - } else { - *out = new(TCPSocketAction) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Handler. -func (in *Handler) DeepCopy() *Handler { - if in == nil { - return nil - } - out := new(Handler) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostAlias) DeepCopyInto(out *HostAlias) { - *out = *in - if in.Hostnames != nil { - in, out := &in.Hostnames, &out.Hostnames - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAlias. -func (in *HostAlias) DeepCopy() *HostAlias { - if in == nil { - return nil - } - out := new(HostAlias) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) { - *out = *in - if in.Type != nil { - in, out := &in.Type, &out.Type - if *in == nil { - *out = nil - } else { - *out = new(HostPathType) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathVolumeSource. -func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource { - if in == nil { - return nil - } - out := new(HostPathVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) { - *out = *in - if in.Portals != nil { - in, out := &in.Portals, &out.Portals - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } - } - if in.InitiatorName != nil { - in, out := &in.InitiatorName, &out.InitiatorName - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIVolumeSource. -func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource { - if in == nil { - return nil - } - out := new(ISCSIVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KeyToPath) DeepCopyInto(out *KeyToPath) { - *out = *in - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyToPath. -func (in *KeyToPath) DeepCopy() *KeyToPath { - if in == nil { - return nil - } - out := new(KeyToPath) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Lifecycle) DeepCopyInto(out *Lifecycle) { - *out = *in - if in.PostStart != nil { - in, out := &in.PostStart, &out.PostStart - if *in == nil { - *out = nil - } else { - *out = new(Handler) - (*in).DeepCopyInto(*out) - } - } - if in.PreStop != nil { - in, out := &in.PreStop, &out.PreStop - if *in == nil { - *out = nil - } else { - *out = new(Handler) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lifecycle. -func (in *Lifecycle) DeepCopy() *Lifecycle { - if in == nil { - return nil - } - out := new(Lifecycle) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRange) DeepCopyInto(out *LimitRange) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRange. -func (in *LimitRange) DeepCopy() *LimitRange { - if in == nil { - return nil - } - out := new(LimitRange) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LimitRange) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRangeItem) DeepCopyInto(out *LimitRangeItem) { - *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Default != nil { - in, out := &in.Default, &out.Default - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.DefaultRequest != nil { - in, out := &in.DefaultRequest, &out.DefaultRequest - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.MaxLimitRequestRatio != nil { - in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeItem. -func (in *LimitRangeItem) DeepCopy() *LimitRangeItem { - if in == nil { - return nil - } - out := new(LimitRangeItem) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]LimitRange, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeList. -func (in *LimitRangeList) DeepCopy() *LimitRangeList { - if in == nil { - return nil - } - out := new(LimitRangeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LimitRangeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRangeSpec) DeepCopyInto(out *LimitRangeSpec) { - *out = *in - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make([]LimitRangeItem, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeSpec. -func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { - if in == nil { - return nil - } - out := new(LimitRangeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *List) DeepCopyInto(out *List) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]runtime.Object, len(*in)) - for i := range *in { - if (*in)[i] == nil { - (*out)[i] = nil - } else { - (*out)[i] = (*in)[i].DeepCopyObject() - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. -func (in *List) DeepCopy() *List { - if in == nil { - return nil - } - out := new(List) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *List) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ListOptions) DeepCopyInto(out *ListOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.LabelSelector == nil { - out.LabelSelector = nil - } else { - out.LabelSelector = in.LabelSelector.DeepCopySelector() - } - if in.FieldSelector == nil { - out.FieldSelector = nil - } else { - out.FieldSelector = in.FieldSelector.DeepCopySelector() - } - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions. -func (in *ListOptions) DeepCopy() *ListOptions { - if in == nil { - return nil - } - out := new(ListOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ListOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerIngress. -func (in *LoadBalancerIngress) DeepCopy() *LoadBalancerIngress { - if in == nil { - return nil - } - out := new(LoadBalancerIngress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) { - *out = *in - if in.Ingress != nil { - in, out := &in.Ingress, &out.Ingress - *out = make([]LoadBalancerIngress, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStatus. -func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus { - if in == nil { - return nil - } - out := new(LoadBalancerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. -func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { - if in == nil { - return nil - } - out := new(LocalObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalVolumeSource) DeepCopyInto(out *LocalVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalVolumeSource. -func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource { - if in == nil { - return nil - } - out := new(LocalVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSVolumeSource. -func (in *NFSVolumeSource) DeepCopy() *NFSVolumeSource { - if in == nil { - return nil - } - out := new(NFSVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Namespace) DeepCopyInto(out *Namespace) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespace. -func (in *Namespace) DeepCopy() *Namespace { - if in == nil { - return nil - } - out := new(Namespace) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Namespace) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Namespace, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceList. -func (in *NamespaceList) DeepCopy() *NamespaceList { - if in == nil { - return nil - } - out := new(NamespaceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NamespaceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceSpec) DeepCopyInto(out *NamespaceSpec) { - *out = *in - if in.Finalizers != nil { - in, out := &in.Finalizers, &out.Finalizers - *out = make([]FinalizerName, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSpec. -func (in *NamespaceSpec) DeepCopy() *NamespaceSpec { - if in == nil { - return nil - } - out := new(NamespaceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceStatus. -func (in *NamespaceStatus) DeepCopy() *NamespaceStatus { - if in == nil { - return nil - } - out := new(NamespaceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Node) DeepCopyInto(out *Node) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. -func (in *Node) DeepCopy() *Node { - if in == nil { - return nil - } - out := new(Node) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Node) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAddress) DeepCopyInto(out *NodeAddress) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress. -func (in *NodeAddress) DeepCopy() *NodeAddress { - if in == nil { - return nil - } - out := new(NodeAddress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAffinity) DeepCopyInto(out *NodeAffinity) { - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - if *in == nil { - *out = nil - } else { - *out = new(NodeSelector) - (*in).DeepCopyInto(*out) - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]PreferredSchedulingTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinity. -func (in *NodeAffinity) DeepCopy() *NodeAffinity { - if in == nil { - return nil - } - out := new(NodeAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeCondition) DeepCopyInto(out *NodeCondition) { - *out = *in - in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCondition. -func (in *NodeCondition) DeepCopy() *NodeCondition { - if in == nil { - return nil - } - out := new(NodeCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeConfigSource) DeepCopyInto(out *NodeConfigSource) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.ConfigMapRef != nil { - in, out := &in.ConfigMapRef, &out.ConfigMapRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigSource. -func (in *NodeConfigSource) DeepCopy() *NodeConfigSource { - if in == nil { - return nil - } - out := new(NodeConfigSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeConfigSource) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) { - *out = *in - out.KubeletEndpoint = in.KubeletEndpoint - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDaemonEndpoints. -func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { - if in == nil { - return nil - } - out := new(NodeDaemonEndpoints) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeList) DeepCopyInto(out *NodeList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Node, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList. -func (in *NodeList) DeepCopy() *NodeList { - if in == nil { - return nil - } - out := new(NodeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeProxyOptions) DeepCopyInto(out *NodeProxyOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProxyOptions. -func (in *NodeProxyOptions) DeepCopy() *NodeProxyOptions { - if in == nil { - return nil - } - out := new(NodeProxyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeProxyOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResources) DeepCopyInto(out *NodeResources) { - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResources. -func (in *NodeResources) DeepCopy() *NodeResources { - if in == nil { - return nil - } - out := new(NodeResources) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSelector) DeepCopyInto(out *NodeSelector) { - *out = *in - if in.NodeSelectorTerms != nil { - in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms - *out = make([]NodeSelectorTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelector. -func (in *NodeSelector) DeepCopy() *NodeSelector { - if in == nil { - return nil - } - out := new(NodeSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSelectorRequirement) DeepCopyInto(out *NodeSelectorRequirement) { - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorRequirement. -func (in *NodeSelectorRequirement) DeepCopy() *NodeSelectorRequirement { - if in == nil { - return nil - } - out := new(NodeSelectorRequirement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSelectorTerm) DeepCopyInto(out *NodeSelectorTerm) { - *out = *in - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]NodeSelectorRequirement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorTerm. -func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm { - if in == nil { - return nil - } - out := new(NodeSelectorTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { - *out = *in - if in.Taints != nil { - in, out := &in.Taints, &out.Taints - *out = make([]Taint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ConfigSource != nil { - in, out := &in.ConfigSource, &out.ConfigSource - if *in == nil { - *out = nil - } else { - *out = new(NodeConfigSource) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec. -func (in *NodeSpec) DeepCopy() *NodeSpec { - if in == nil { - return nil - } - out := new(NodeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Allocatable != nil { - in, out := &in.Allocatable, &out.Allocatable - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]NodeCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]NodeAddress, len(*in)) - copy(*out, *in) - } - out.DaemonEndpoints = in.DaemonEndpoints - out.NodeInfo = in.NodeInfo - if in.Images != nil { - in, out := &in.Images, &out.Images - *out = make([]ContainerImage, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.VolumesInUse != nil { - in, out := &in.VolumesInUse, &out.VolumesInUse - *out = make([]UniqueVolumeName, len(*in)) - copy(*out, *in) - } - if in.VolumesAttached != nil { - in, out := &in.VolumesAttached, &out.VolumesAttached - *out = make([]AttachedVolume, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. -func (in *NodeStatus) DeepCopy() *NodeStatus { - if in == nil { - return nil - } - out := new(NodeStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSystemInfo. -func (in *NodeSystemInfo) DeepCopy() *NodeSystemInfo { - if in == nil { - return nil - } - out := new(NodeSystemInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectFieldSelector) DeepCopyInto(out *ObjectFieldSelector) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectFieldSelector. -func (in *ObjectFieldSelector) DeepCopy() *ObjectFieldSelector { - if in == nil { - return nil - } - out := new(ObjectFieldSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { - *out = *in - in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp) - if in.DeletionTimestamp != nil { - in, out := &in.DeletionTimestamp, &out.DeletionTimestamp - if *in == nil { - *out = nil - } else { - *out = new(v1.Time) - (*in).DeepCopyInto(*out) - } - } - if in.DeletionGracePeriodSeconds != nil { - in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.OwnerReferences != nil { - in, out := &in.OwnerReferences, &out.OwnerReferences - *out = make([]v1.OwnerReference, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Initializers != nil { - in, out := &in.Initializers, &out.Initializers - if *in == nil { - *out = nil - } else { - *out = new(v1.Initializers) - (*in).DeepCopyInto(*out) - } - } - if in.Finalizers != nil { - in, out := &in.Finalizers, &out.Finalizers - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta. -func (in *ObjectMeta) DeepCopy() *ObjectMeta { - if in == nil { - return nil - } - out := new(ObjectMeta) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. -func (in *ObjectReference) DeepCopy() *ObjectReference { - if in == nil { - return nil - } - out := new(ObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ObjectReference) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolume. -func (in *PersistentVolume) DeepCopy() *PersistentVolume { - if in == nil { - return nil - } - out := new(PersistentVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolume) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaim) DeepCopyInto(out *PersistentVolumeClaim) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaim. -func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim { - if in == nil { - return nil - } - out := new(PersistentVolumeClaim) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimCondition) DeepCopyInto(out *PersistentVolumeClaimCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimCondition. -func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondition { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PersistentVolumeClaim, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimList. -func (in *PersistentVolumeClaimList) DeepCopy() *PersistentVolumeClaimList { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolumeClaimList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec) { - *out = *in - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if *in == nil { - *out = nil - } else { - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - } - in.Resources.DeepCopyInto(&out.Resources) - if in.StorageClassName != nil { - in, out := &in.StorageClassName, &out.StorageClassName - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSpec. -func (in *PersistentVolumeClaimSpec) DeepCopy() *PersistentVolumeClaimSpec { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimStatus) { - *out = *in - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]PersistentVolumeClaimCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimStatus. -func (in *PersistentVolumeClaimStatus) DeepCopy() *PersistentVolumeClaimStatus { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimVolumeSource. -func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVolumeSource { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PersistentVolume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeList. -func (in *PersistentVolumeList) DeepCopy() *PersistentVolumeList { - if in == nil { - return nil - } - out := new(PersistentVolumeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolumeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { - *out = *in - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - if *in == nil { - *out = nil - } else { - *out = new(GCEPersistentDiskVolumeSource) - **out = **in - } - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - if *in == nil { - *out = nil - } else { - *out = new(AWSElasticBlockStoreVolumeSource) - **out = **in - } - } - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - if *in == nil { - *out = nil - } else { - *out = new(HostPathVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - if *in == nil { - *out = nil - } else { - *out = new(GlusterfsVolumeSource) - **out = **in - } - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - if *in == nil { - *out = nil - } else { - *out = new(NFSVolumeSource) - **out = **in - } - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - if *in == nil { - *out = nil - } else { - *out = new(RBDVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Quobyte != nil { - in, out := &in.Quobyte, &out.Quobyte - if *in == nil { - *out = nil - } else { - *out = new(QuobyteVolumeSource) - **out = **in - } - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - if *in == nil { - *out = nil - } else { - *out = new(ISCSIVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - if *in == nil { - *out = nil - } else { - *out = new(FlexVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - if *in == nil { - *out = nil - } else { - *out = new(CinderVolumeSource) - **out = **in - } - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - if *in == nil { - *out = nil - } else { - *out = new(CephFSPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.FC != nil { - in, out := &in.FC, &out.FC - if *in == nil { - *out = nil - } else { - *out = new(FCVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - if *in == nil { - *out = nil - } else { - *out = new(FlockerVolumeSource) - **out = **in - } - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - if *in == nil { - *out = nil - } else { - *out = new(AzureFilePersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - if *in == nil { - *out = nil - } else { - *out = new(VsphereVirtualDiskVolumeSource) - **out = **in - } - } - if in.AzureDisk != nil { - in, out := &in.AzureDisk, &out.AzureDisk - if *in == nil { - *out = nil - } else { - *out = new(AzureDiskVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.PhotonPersistentDisk != nil { - in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk - if *in == nil { - *out = nil - } else { - *out = new(PhotonPersistentDiskVolumeSource) - **out = **in - } - } - if in.PortworxVolume != nil { - in, out := &in.PortworxVolume, &out.PortworxVolume - if *in == nil { - *out = nil - } else { - *out = new(PortworxVolumeSource) - **out = **in - } - } - if in.ScaleIO != nil { - in, out := &in.ScaleIO, &out.ScaleIO - if *in == nil { - *out = nil - } else { - *out = new(ScaleIOVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Local != nil { - in, out := &in.Local, &out.Local - if *in == nil { - *out = nil - } else { - *out = new(LocalVolumeSource) - **out = **in - } - } - if in.StorageOS != nil { - in, out := &in.StorageOS, &out.StorageOS - if *in == nil { - *out = nil - } else { - *out = new(StorageOSPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSource. -func (in *PersistentVolumeSource) DeepCopy() *PersistentVolumeSource { - if in == nil { - return nil - } - out := new(PersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - in.PersistentVolumeSource.DeepCopyInto(&out.PersistentVolumeSource) - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.ClaimRef != nil { - in, out := &in.ClaimRef, &out.ClaimRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectReference) - **out = **in - } - } - if in.MountOptions != nil { - in, out := &in.MountOptions, &out.MountOptions - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSpec. -func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec { - if in == nil { - return nil - } - out := new(PersistentVolumeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeStatus. -func (in *PersistentVolumeStatus) DeepCopy() *PersistentVolumeStatus { - if in == nil { - return nil - } - out := new(PersistentVolumeStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PhotonPersistentDiskVolumeSource) DeepCopyInto(out *PhotonPersistentDiskVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhotonPersistentDiskVolumeSource. -func (in *PhotonPersistentDiskVolumeSource) DeepCopy() *PhotonPersistentDiskVolumeSource { - if in == nil { - return nil - } - out := new(PhotonPersistentDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Pod) DeepCopyInto(out *Pod) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. -func (in *Pod) DeepCopy() *Pod { - if in == nil { - return nil - } - out := new(Pod) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Pod) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAffinity) DeepCopyInto(out *PodAffinity) { - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinity. -func (in *PodAffinity) DeepCopy() *PodAffinity { - if in == nil { - return nil - } - out := new(PodAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { - *out = *in - if in.LabelSelector != nil { - in, out := &in.LabelSelector, &out.LabelSelector - if *in == nil { - *out = nil - } else { - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - } - if in.Namespaces != nil { - in, out := &in.Namespaces, &out.Namespaces - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinityTerm. -func (in *PodAffinityTerm) DeepCopy() *PodAffinityTerm { - if in == nil { - return nil - } - out := new(PodAffinityTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAntiAffinity) DeepCopyInto(out *PodAntiAffinity) { - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAntiAffinity. -func (in *PodAntiAffinity) DeepCopy() *PodAntiAffinity { - if in == nil { - return nil - } - out := new(PodAntiAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAttachOptions) DeepCopyInto(out *PodAttachOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAttachOptions. -func (in *PodAttachOptions) DeepCopy() *PodAttachOptions { - if in == nil { - return nil - } - out := new(PodAttachOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodAttachOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodCondition) DeepCopyInto(out *PodCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCondition. -func (in *PodCondition) DeepCopy() *PodCondition { - if in == nil { - return nil - } - out := new(PodCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExecOptions. -func (in *PodExecOptions) DeepCopy() *PodExecOptions { - if in == nil { - return nil - } - out := new(PodExecOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodExecOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodList) DeepCopyInto(out *PodList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Pod, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodList. -func (in *PodList) DeepCopy() *PodList { - if in == nil { - return nil - } - out := new(PodList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.SinceSeconds != nil { - in, out := &in.SinceSeconds, &out.SinceSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.SinceTime != nil { - in, out := &in.SinceTime, &out.SinceTime - if *in == nil { - *out = nil - } else { - *out = new(v1.Time) - (*in).DeepCopyInto(*out) - } - } - if in.TailLines != nil { - in, out := &in.TailLines, &out.TailLines - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.LimitBytes != nil { - in, out := &in.LimitBytes, &out.LimitBytes - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLogOptions. -func (in *PodLogOptions) DeepCopy() *PodLogOptions { - if in == nil { - return nil - } - out := new(PodLogOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodLogOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodPortForwardOptions) DeepCopyInto(out *PodPortForwardOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]int32, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPortForwardOptions. -func (in *PodPortForwardOptions) DeepCopy() *PodPortForwardOptions { - if in == nil { - return nil - } - out := new(PodPortForwardOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodPortForwardOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodProxyOptions) DeepCopyInto(out *PodProxyOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProxyOptions. -func (in *PodProxyOptions) DeepCopy() *PodProxyOptions { - if in == nil { - return nil - } - out := new(PodProxyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodProxyOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { - *out = *in - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - if *in == nil { - *out = nil - } else { - *out = new(SELinuxOptions) - **out = **in - } - } - if in.RunAsUser != nil { - in, out := &in.RunAsUser, &out.RunAsUser - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.RunAsNonRoot != nil { - in, out := &in.RunAsNonRoot, &out.RunAsNonRoot - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.SupplementalGroups != nil { - in, out := &in.SupplementalGroups, &out.SupplementalGroups - *out = make([]int64, len(*in)) - copy(*out, *in) - } - if in.FSGroup != nil { - in, out := &in.FSGroup, &out.FSGroup - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityContext. -func (in *PodSecurityContext) DeepCopy() *PodSecurityContext { - if in == nil { - return nil - } - out := new(PodSecurityContext) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSignature) DeepCopyInto(out *PodSignature) { - *out = *in - if in.PodController != nil { - in, out := &in.PodController, &out.PodController - if *in == nil { - *out = nil - } else { - *out = new(v1.OwnerReference) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSignature. -func (in *PodSignature) DeepCopy() *PodSignature { - if in == nil { - return nil - } - out := new(PodSignature) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSpec) DeepCopyInto(out *PodSpec) { - *out = *in - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.TerminationGracePeriodSeconds != nil { - in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.ActiveDeadlineSeconds != nil { - in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.AutomountServiceAccountToken != nil { - in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - if *in == nil { - *out = nil - } else { - *out = new(PodSecurityContext) - (*in).DeepCopyInto(*out) - } - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.Affinity != nil { - in, out := &in.Affinity, &out.Affinity - if *in == nil { - *out = nil - } else { - *out = new(Affinity) - (*in).DeepCopyInto(*out) - } - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.HostAliases != nil { - in, out := &in.HostAliases, &out.HostAliases - *out = make([]HostAlias, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Priority != nil { - in, out := &in.Priority, &out.Priority - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec. -func (in *PodSpec) DeepCopy() *PodSpec { - if in == nil { - return nil - } - out := new(PodSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodStatus) DeepCopyInto(out *PodStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]PodCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - if *in == nil { - *out = nil - } else { - *out = new(v1.Time) - (*in).DeepCopyInto(*out) - } - } - if in.InitContainerStatuses != nil { - in, out := &in.InitContainerStatuses, &out.InitContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ContainerStatuses != nil { - in, out := &in.ContainerStatuses, &out.ContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatus. -func (in *PodStatus) DeepCopy() *PodStatus { - if in == nil { - return nil - } - out := new(PodStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodStatusResult) DeepCopyInto(out *PodStatusResult) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusResult. -func (in *PodStatusResult) DeepCopy() *PodStatusResult { - if in == nil { - return nil - } - out := new(PodStatusResult) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodStatusResult) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTemplate) DeepCopyInto(out *PodTemplate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplate. -func (in *PodTemplate) DeepCopy() *PodTemplate { - if in == nil { - return nil - } - out := new(PodTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodTemplate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodTemplate, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateList. -func (in *PodTemplateList) DeepCopy() *PodTemplateList { - if in == nil { - return nil - } - out := new(PodTemplateList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodTemplateList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTemplateSpec) DeepCopyInto(out *PodTemplateSpec) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateSpec. -func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec { - if in == nil { - return nil - } - out := new(PodTemplateSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxVolumeSource. -func (in *PortworxVolumeSource) DeepCopy() *PortworxVolumeSource { - if in == nil { - return nil - } - out := new(PortworxVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Preconditions) DeepCopyInto(out *Preconditions) { - *out = *in - if in.UID != nil { - in, out := &in.UID, &out.UID - if *in == nil { - *out = nil - } else { - *out = new(types.UID) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions. -func (in *Preconditions) DeepCopy() *Preconditions { - if in == nil { - return nil - } - out := new(Preconditions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreferAvoidPodsEntry) DeepCopyInto(out *PreferAvoidPodsEntry) { - *out = *in - in.PodSignature.DeepCopyInto(&out.PodSignature) - in.EvictionTime.DeepCopyInto(&out.EvictionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferAvoidPodsEntry. -func (in *PreferAvoidPodsEntry) DeepCopy() *PreferAvoidPodsEntry { - if in == nil { - return nil - } - out := new(PreferAvoidPodsEntry) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreferredSchedulingTerm) DeepCopyInto(out *PreferredSchedulingTerm) { - *out = *in - in.Preference.DeepCopyInto(&out.Preference) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferredSchedulingTerm. -func (in *PreferredSchedulingTerm) DeepCopy() *PreferredSchedulingTerm { - if in == nil { - return nil - } - out := new(PreferredSchedulingTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Probe) DeepCopyInto(out *Probe) { - *out = *in - in.Handler.DeepCopyInto(&out.Handler) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. -func (in *Probe) DeepCopy() *Probe { - if in == nil { - return nil - } - out := new(Probe) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) { - *out = *in - if in.Sources != nil { - in, out := &in.Sources, &out.Sources - *out = make([]VolumeProjection, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectedVolumeSource. -func (in *ProjectedVolumeSource) DeepCopy() *ProjectedVolumeSource { - if in == nil { - return nil - } - out := new(ProjectedVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QuobyteVolumeSource) DeepCopyInto(out *QuobyteVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuobyteVolumeSource. -func (in *QuobyteVolumeSource) DeepCopy() *QuobyteVolumeSource { - if in == nil { - return nil - } - out := new(QuobyteVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) { - *out = *in - if in.CephMonitors != nil { - in, out := &in.CephMonitors, &out.CephMonitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDVolumeSource. -func (in *RBDVolumeSource) DeepCopy() *RBDVolumeSource { - if in == nil { - return nil - } - out := new(RBDVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation. -func (in *RangeAllocation) DeepCopy() *RangeAllocation { - if in == nil { - return nil - } - out := new(RangeAllocation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RangeAllocation) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationController) DeepCopyInto(out *ReplicationController) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationController. -func (in *ReplicationController) DeepCopy() *ReplicationController { - if in == nil { - return nil - } - out := new(ReplicationController) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicationController) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerCondition) DeepCopyInto(out *ReplicationControllerCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerCondition. -func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondition { - if in == nil { - return nil - } - out := new(ReplicationControllerCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicationController, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerList. -func (in *ReplicationControllerList) DeepCopy() *ReplicationControllerList { - if in == nil { - return nil - } - out := new(ReplicationControllerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicationControllerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Template != nil { - in, out := &in.Template, &out.Template - if *in == nil { - *out = nil - } else { - *out = new(PodTemplateSpec) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerSpec. -func (in *ReplicationControllerSpec) DeepCopy() *ReplicationControllerSpec { - if in == nil { - return nil - } - out := new(ReplicationControllerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerStatus) DeepCopyInto(out *ReplicationControllerStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ReplicationControllerCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerStatus. -func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus { - if in == nil { - return nil - } - out := new(ReplicationControllerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) { - *out = *in - out.Divisor = in.Divisor.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFieldSelector. -func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector { - if in == nil { - return nil - } - out := new(ResourceFieldSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuota. -func (in *ResourceQuota) DeepCopy() *ResourceQuota { - if in == nil { - return nil - } - out := new(ResourceQuota) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceQuota) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceQuota, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaList. -func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList { - if in == nil { - return nil - } - out := new(ResourceQuotaList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceQuotaList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) { - *out = *in - if in.Hard != nil { - in, out := &in.Hard, &out.Hard - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Scopes != nil { - in, out := &in.Scopes, &out.Scopes - *out = make([]ResourceQuotaScope, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaSpec. -func (in *ResourceQuotaSpec) DeepCopy() *ResourceQuotaSpec { - if in == nil { - return nil - } - out := new(ResourceQuotaSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuotaStatus) DeepCopyInto(out *ResourceQuotaStatus) { - *out = *in - if in.Hard != nil { - in, out := &in.Hard, &out.Hard - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Used != nil { - in, out := &in.Used, &out.Used - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatus. -func (in *ResourceQuotaStatus) DeepCopy() *ResourceQuotaStatus { - if in == nil { - return nil - } - out := new(ResourceQuotaStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { - *out = *in - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Requests != nil { - in, out := &in.Requests, &out.Requests - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. -func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { - if in == nil { - return nil - } - out := new(ResourceRequirements) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxOptions. -func (in *SELinuxOptions) DeepCopy() *SELinuxOptions { - if in == nil { - return nil - } - out := new(SELinuxOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOVolumeSource. -func (in *ScaleIOVolumeSource) DeepCopy() *ScaleIOVolumeSource { - if in == nil { - return nil - } - out := new(ScaleIOVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Secret) DeepCopyInto(out *Secret) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make(map[string][]byte, len(*in)) - for key, val := range *in { - if val == nil { - (*out)[key] = nil - } else { - (*out)[key] = make([]byte, len(val)) - copy((*out)[key], val) - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret. -func (in *Secret) DeepCopy() *Secret { - if in == nil { - return nil - } - out := new(Secret) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Secret) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEnvSource. -func (in *SecretEnvSource) DeepCopy() *SecretEnvSource { - if in == nil { - return nil - } - out := new(SecretEnvSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector. -func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { - if in == nil { - return nil - } - out := new(SecretKeySelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretList) DeepCopyInto(out *SecretList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Secret, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList. -func (in *SecretList) DeepCopy() *SecretList { - if in == nil { - return nil - } - out := new(SecretList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SecretList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretProjection) DeepCopyInto(out *SecretProjection) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretProjection. -func (in *SecretProjection) DeepCopy() *SecretProjection { - if in == nil { - return nil - } - out := new(SecretProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretReference) DeepCopyInto(out *SecretReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference. -func (in *SecretReference) DeepCopy() *SecretReference { - if in == nil { - return nil - } - out := new(SecretReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretVolumeSource) DeepCopyInto(out *SecretVolumeSource) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVolumeSource. -func (in *SecretVolumeSource) DeepCopy() *SecretVolumeSource { - if in == nil { - return nil - } - out := new(SecretVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { - *out = *in - if in.Capabilities != nil { - in, out := &in.Capabilities, &out.Capabilities - if *in == nil { - *out = nil - } else { - *out = new(Capabilities) - (*in).DeepCopyInto(*out) - } - } - if in.Privileged != nil { - in, out := &in.Privileged, &out.Privileged - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - if *in == nil { - *out = nil - } else { - *out = new(SELinuxOptions) - **out = **in - } - } - if in.RunAsUser != nil { - in, out := &in.RunAsUser, &out.RunAsUser - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.RunAsNonRoot != nil { - in, out := &in.RunAsNonRoot, &out.RunAsNonRoot - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.ReadOnlyRootFilesystem != nil { - in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.AllowPrivilegeEscalation != nil { - in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContext. -func (in *SecurityContext) DeepCopy() *SecurityContext { - if in == nil { - return nil - } - out := new(SecurityContext) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SerializedReference) DeepCopyInto(out *SerializedReference) { - *out = *in - out.TypeMeta = in.TypeMeta - out.Reference = in.Reference - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedReference. -func (in *SerializedReference) DeepCopy() *SerializedReference { - if in == nil { - return nil - } - out := new(SerializedReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SerializedReference) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Service) DeepCopyInto(out *Service) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. -func (in *Service) DeepCopy() *Service { - if in == nil { - return nil - } - out := new(Service) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Service) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Secrets != nil { - in, out := &in.Secrets, &out.Secrets - *out = make([]ObjectReference, len(*in)) - copy(*out, *in) - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.AutomountServiceAccountToken != nil { - in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccount. -func (in *ServiceAccount) DeepCopy() *ServiceAccount { - if in == nil { - return nil - } - out := new(ServiceAccount) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceAccount) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ServiceAccount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountList. -func (in *ServiceAccountList) DeepCopy() *ServiceAccountList { - if in == nil { - return nil - } - out := new(ServiceAccountList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceAccountList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceList) DeepCopyInto(out *ServiceList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Service, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList. -func (in *ServiceList) DeepCopy() *ServiceList { - if in == nil { - return nil - } - out := new(ServiceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServicePort) DeepCopyInto(out *ServicePort) { - *out = *in - out.TargetPort = in.TargetPort - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort. -func (in *ServicePort) DeepCopy() *ServicePort { - if in == nil { - return nil - } - out := new(ServicePort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceProxyOptions) DeepCopyInto(out *ServiceProxyOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceProxyOptions. -func (in *ServiceProxyOptions) DeepCopy() *ServiceProxyOptions { - if in == nil { - return nil - } - out := new(ServiceProxyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceProxyOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { - *out = *in - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ServicePort, len(*in)) - copy(*out, *in) - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ExternalIPs != nil { - in, out := &in.ExternalIPs, &out.ExternalIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SessionAffinityConfig != nil { - in, out := &in.SessionAffinityConfig, &out.SessionAffinityConfig - if *in == nil { - *out = nil - } else { - *out = new(SessionAffinityConfig) - (*in).DeepCopyInto(*out) - } - } - if in.LoadBalancerSourceRanges != nil { - in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. -func (in *ServiceSpec) DeepCopy() *ServiceSpec { - if in == nil { - return nil - } - out := new(ServiceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { - *out = *in - in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus. -func (in *ServiceStatus) DeepCopy() *ServiceStatus { - if in == nil { - return nil - } - out := new(ServiceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SessionAffinityConfig) DeepCopyInto(out *SessionAffinityConfig) { - *out = *in - if in.ClientIP != nil { - in, out := &in.ClientIP, &out.ClientIP - if *in == nil { - *out = nil - } else { - *out = new(ClientIPConfig) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityConfig. -func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { - if in == nil { - return nil - } - out := new(SessionAffinityConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSPersistentVolumeSource. -func (in *StorageOSPersistentVolumeSource) DeepCopy() *StorageOSPersistentVolumeSource { - if in == nil { - return nil - } - out := new(StorageOSPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageOSVolumeSource) DeepCopyInto(out *StorageOSVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSVolumeSource. -func (in *StorageOSVolumeSource) DeepCopy() *StorageOSVolumeSource { - if in == nil { - return nil - } - out := new(StorageOSVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Sysctl) DeepCopyInto(out *Sysctl) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sysctl. -func (in *Sysctl) DeepCopy() *Sysctl { - if in == nil { - return nil - } - out := new(Sysctl) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TCPSocketAction) DeepCopyInto(out *TCPSocketAction) { - *out = *in - out.Port = in.Port - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPSocketAction. -func (in *TCPSocketAction) DeepCopy() *TCPSocketAction { - if in == nil { - return nil - } - out := new(TCPSocketAction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Taint) DeepCopyInto(out *Taint) { - *out = *in - in.TimeAdded.DeepCopyInto(&out.TimeAdded) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint. -func (in *Taint) DeepCopy() *Taint { - if in == nil { - return nil - } - out := new(Taint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Toleration) DeepCopyInto(out *Toleration) { - *out = *in - if in.TolerationSeconds != nil { - in, out := &in.TolerationSeconds, &out.TolerationSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration. -func (in *Toleration) DeepCopy() *Toleration { - if in == nil { - return nil - } - out := new(Toleration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Volume) DeepCopyInto(out *Volume) { - *out = *in - in.VolumeSource.DeepCopyInto(&out.VolumeSource) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. -func (in *Volume) DeepCopy() *Volume { - if in == nil { - return nil - } - out := new(Volume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { - *out = *in - if in.MountPropagation != nil { - in, out := &in.MountPropagation, &out.MountPropagation - if *in == nil { - *out = nil - } else { - *out = new(MountPropagationMode) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMount. -func (in *VolumeMount) DeepCopy() *VolumeMount { - if in == nil { - return nil - } - out := new(VolumeMount) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { - *out = *in - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - if *in == nil { - *out = nil - } else { - *out = new(SecretProjection) - (*in).DeepCopyInto(*out) - } - } - if in.DownwardAPI != nil { - in, out := &in.DownwardAPI, &out.DownwardAPI - if *in == nil { - *out = nil - } else { - *out = new(DownwardAPIProjection) - (*in).DeepCopyInto(*out) - } - } - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - if *in == nil { - *out = nil - } else { - *out = new(ConfigMapProjection) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeProjection. -func (in *VolumeProjection) DeepCopy() *VolumeProjection { - if in == nil { - return nil - } - out := new(VolumeProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { - *out = *in - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - if *in == nil { - *out = nil - } else { - *out = new(HostPathVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.EmptyDir != nil { - in, out := &in.EmptyDir, &out.EmptyDir - if *in == nil { - *out = nil - } else { - *out = new(EmptyDirVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - if *in == nil { - *out = nil - } else { - *out = new(GCEPersistentDiskVolumeSource) - **out = **in - } - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - if *in == nil { - *out = nil - } else { - *out = new(AWSElasticBlockStoreVolumeSource) - **out = **in - } - } - if in.GitRepo != nil { - in, out := &in.GitRepo, &out.GitRepo - if *in == nil { - *out = nil - } else { - *out = new(GitRepoVolumeSource) - **out = **in - } - } - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - if *in == nil { - *out = nil - } else { - *out = new(SecretVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - if *in == nil { - *out = nil - } else { - *out = new(NFSVolumeSource) - **out = **in - } - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - if *in == nil { - *out = nil - } else { - *out = new(ISCSIVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - if *in == nil { - *out = nil - } else { - *out = new(GlusterfsVolumeSource) - **out = **in - } - } - if in.PersistentVolumeClaim != nil { - in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim - if *in == nil { - *out = nil - } else { - *out = new(PersistentVolumeClaimVolumeSource) - **out = **in - } - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - if *in == nil { - *out = nil - } else { - *out = new(RBDVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Quobyte != nil { - in, out := &in.Quobyte, &out.Quobyte - if *in == nil { - *out = nil - } else { - *out = new(QuobyteVolumeSource) - **out = **in - } - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - if *in == nil { - *out = nil - } else { - *out = new(FlexVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - if *in == nil { - *out = nil - } else { - *out = new(CinderVolumeSource) - **out = **in - } - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - if *in == nil { - *out = nil - } else { - *out = new(CephFSVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - if *in == nil { - *out = nil - } else { - *out = new(FlockerVolumeSource) - **out = **in - } - } - if in.DownwardAPI != nil { - in, out := &in.DownwardAPI, &out.DownwardAPI - if *in == nil { - *out = nil - } else { - *out = new(DownwardAPIVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.FC != nil { - in, out := &in.FC, &out.FC - if *in == nil { - *out = nil - } else { - *out = new(FCVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - if *in == nil { - *out = nil - } else { - *out = new(AzureFileVolumeSource) - **out = **in - } - } - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - if *in == nil { - *out = nil - } else { - *out = new(ConfigMapVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - if *in == nil { - *out = nil - } else { - *out = new(VsphereVirtualDiskVolumeSource) - **out = **in - } - } - if in.AzureDisk != nil { - in, out := &in.AzureDisk, &out.AzureDisk - if *in == nil { - *out = nil - } else { - *out = new(AzureDiskVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.PhotonPersistentDisk != nil { - in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk - if *in == nil { - *out = nil - } else { - *out = new(PhotonPersistentDiskVolumeSource) - **out = **in - } - } - if in.Projected != nil { - in, out := &in.Projected, &out.Projected - if *in == nil { - *out = nil - } else { - *out = new(ProjectedVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.PortworxVolume != nil { - in, out := &in.PortworxVolume, &out.PortworxVolume - if *in == nil { - *out = nil - } else { - *out = new(PortworxVolumeSource) - **out = **in - } - } - if in.ScaleIO != nil { - in, out := &in.ScaleIO, &out.ScaleIO - if *in == nil { - *out = nil - } else { - *out = new(ScaleIOVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.StorageOS != nil { - in, out := &in.StorageOS, &out.StorageOS - if *in == nil { - *out = nil - } else { - *out = new(StorageOSVolumeSource) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSource. -func (in *VolumeSource) DeepCopy() *VolumeSource { - if in == nil { - return nil - } - out := new(VolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VsphereVirtualDiskVolumeSource) DeepCopyInto(out *VsphereVirtualDiskVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereVirtualDiskVolumeSource. -func (in *VsphereVirtualDiskVolumeSource) DeepCopy() *VsphereVirtualDiskVolumeSource { - if in == nil { - return nil - } - out := new(VsphereVirtualDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WeightedPodAffinityTerm) DeepCopyInto(out *WeightedPodAffinityTerm) { - *out = *in - in.PodAffinityTerm.DeepCopyInto(&out.PodAffinityTerm) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedPodAffinityTerm. -func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm { - if in == nil { - return nil - } - out := new(WeightedPodAffinityTerm) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/abac/BUILD index 51caaac29546..c2f5e8198551 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/abac/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/BUILD @@ -13,9 +13,9 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/abac", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/latest/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/abac/latest/BUILD index b8d85b7b6d9a..1caca7263267 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/abac/latest/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/latest/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["latest.go"], + importpath = "k8s.io/kubernetes/pkg/apis/abac/latest", deps = [ "//pkg/apis/abac:go_default_library", "//pkg/apis/abac/v0:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/BUILD index 3115c85f9289..ca06165b8736 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/BUILD @@ -15,6 +15,7 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/abac/v0", deps = [ "//pkg/apis/abac:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -27,6 +28,7 @@ go_library( go_test( name = "go_default_xtest", srcs = ["conversion_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/abac/v0_test", deps = [ ":go_default_library", "//pkg/apis/abac:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/doc.go index a3c17c09a431..9088c157afab 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=abac.authorization.kubernetes.io package v0 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/register.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/register.go index 4efcc0929619..9a5aa984549c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/register.go @@ -19,7 +19,7 @@ package v0 import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - api "k8s.io/kubernetes/pkg/apis/abac" + "k8s.io/kubernetes/pkg/apis/abac" ) const GroupName = "abac.authorization.kubernetes.io" @@ -29,11 +29,11 @@ var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v0"} func init() { // TODO: Delete this init function, abac should not have its own scheme. - if err := addKnownTypes(api.Scheme); err != nil { + if err := addKnownTypes(abac.Scheme); err != nil { // Programmer error. panic(err) } - if err := addConversionFuncs(api.Scheme); err != nil { + if err := addConversionFuncs(abac.Scheme); err != nil { // Programmer error. panic(err) } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/zz_generated.deepcopy.go index f2b9974b00bc..1c0fe0c8b823 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v0/zz_generated.deepcopy.go @@ -21,28 +21,9 @@ limitations under the License. package v0 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Policy).DeepCopyInto(out.(*Policy)) - return nil - }, InType: reflect.TypeOf(&Policy{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Policy) DeepCopyInto(out *Policy) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/BUILD index 56db2e65bd5c..6fa5b960a721 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/BUILD @@ -17,6 +17,7 @@ go_library( "zz_generated.deepcopy.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/abac/v1beta1", deps = [ "//pkg/apis/abac:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -29,6 +30,7 @@ go_library( go_test( name = "go_default_xtest", srcs = ["conversion_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/abac/v1beta1_test", deps = [ ":go_default_library", "//pkg/apis/abac:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/doc.go index 17ae6b53e3cb..46ec997c423e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/abac // +k8s:openapi-gen=true // +k8s:defaulter-gen=TypeMeta diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/register.go index e157a69aac6e..a7fc1158ea0b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/register.go @@ -19,7 +19,7 @@ package v1beta1 import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - api "k8s.io/kubernetes/pkg/apis/abac" + "k8s.io/kubernetes/pkg/apis/abac" ) const GroupName = "abac.authorization.kubernetes.io" @@ -29,11 +29,11 @@ var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1 func init() { // TODO: delete this, abac should not have its own scheme. - if err := addKnownTypes(api.Scheme); err != nil { + if err := addKnownTypes(abac.Scheme); err != nil { // Programmer error. panic(err) } - if err := addConversionFuncs(api.Scheme); err != nil { + if err := addConversionFuncs(abac.Scheme); err != nil { // Programmer error. panic(err) } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/zz_generated.deepcopy.go index 84b36e013ee4..05fbab899e91 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/v1beta1/zz_generated.deepcopy.go @@ -21,32 +21,9 @@ limitations under the License. package v1beta1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Policy).DeepCopyInto(out.(*Policy)) - return nil - }, InType: reflect.TypeOf(&Policy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PolicySpec).DeepCopyInto(out.(*PolicySpec)) - return nil - }, InType: reflect.TypeOf(&PolicySpec{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Policy) DeepCopyInto(out *Policy) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/apis/abac/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/abac/zz_generated.deepcopy.go index 799c02723205..dc41bc7cebb1 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/abac/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/abac/zz_generated.deepcopy.go @@ -21,27 +21,9 @@ limitations under the License. package abac import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Policy).DeepCopyInto(out.(*Policy)) - return nil - }, InType: reflect.TypeOf(&Policy{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PolicySpec).DeepCopyInto(out.(*PolicySpec)) - return nil - }, InType: reflect.TypeOf(&PolicySpec{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Policy) DeepCopyInto(out *Policy) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admission/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/admission/BUILD index 2756df077cda..1192fb1fa019 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admission/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/admission/BUILD @@ -13,12 +13,13 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/admission", deps = [ "//pkg/apis/authentication:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", ], ) @@ -35,7 +36,7 @@ filegroup( ":package-srcs", "//pkg/apis/admission/fuzzer:all-srcs", "//pkg/apis/admission/install:all-srcs", - "//pkg/apis/admission/v1alpha1:all-srcs", + "//pkg/apis/admission/v1beta1:all-srcs", ], tags = ["automanaged"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admission/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/admission/doc.go index ca1150bc2756..b5a203700221 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admission/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/admission/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=admission.k8s.io package admission diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admission/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/admission/install/BUILD index ec8e8cd88a74..a7f52efeebcf 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admission/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/admission/install/BUILD @@ -8,10 +8,11 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/admission/install", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/admission:go_default_library", - "//pkg/apis/admission/v1alpha1:go_default_library", + "//pkg/apis/admission/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admission/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/admission/install/install.go index f0b0dcf79bdb..404f549c3836 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admission/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/admission/install/install.go @@ -23,13 +23,13 @@ import ( "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/admission" - "k8s.io/kubernetes/pkg/apis/admission/v1alpha1" + "k8s.io/kubernetes/pkg/apis/admission/v1beta1" ) func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) + Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) } // Install registers the API group and adds types to a scheme @@ -37,12 +37,12 @@ func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *r if err := announced.NewGroupMetaFactory( &announced.GroupMetaFactoryArgs{ GroupName: admission.GroupName, - VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version}, + VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, RootScopedKinds: sets.NewString("AdmissionReview"), AddInternalObjectsToScheme: admission.AddToScheme, }, announced.VersionToSchemeFunc{ - v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme, + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, }, ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { panic(err) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admission/types.go b/vendor/k8s.io/kubernetes/pkg/apis/admission/types.go index a816fa8d3faa..37822220e9f6 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admission/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/admission/types.go @@ -19,38 +19,34 @@ package admission import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/apis/authentication" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// AdmissionReview describes an admission request. +// AdmissionReview describes an admission review request/response. type AdmissionReview struct { metav1.TypeMeta - // Spec describes the attributes for the admission request. - // Since this admission controller is non-mutating the webhook should avoid setting this in its response to avoid the - // cost of deserializing it. - Spec AdmissionReviewSpec - // Status is filled in by the webhook and indicates whether the admission request should be permitted. - Status AdmissionReviewStatus + // Request describes the attributes for the admission request. + // +optional + Request *AdmissionRequest + + // Response describes the attributes for the admission response. + // +optional + Response *AdmissionResponse } -// AdmissionReviewSpec describes the admission.Attributes for the admission request. -type AdmissionReviewSpec struct { +// AdmissionRequest describes the admission.Attributes for the admission request. +type AdmissionRequest struct { + // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are + // otherwise identical (parallel requests, requests when earlier requests did not modify etc) + // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. + // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. + UID types.UID // Kind is the type of object being manipulated. For example: Pod Kind metav1.GroupVersionKind - // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and - // rely on the server to generate the name. If that is the case, this method will return the empty string. - Name string - // Namespace is the namespace associated with the request (if any). - Namespace string - // Object is the object from the incoming request prior to default values being applied - Object runtime.Object - // OldObject is the existing object. Only populated for UPDATE requests. - OldObject runtime.Object - // Operation is the operation being performed - Operation Operation // Resource is the name of the resource being requested. This is not the kind. For example: pods Resource metav1.GroupVersionResource // SubResource is the name of the subresource being requested. This is a different resource, scoped to the parent @@ -58,21 +54,54 @@ type AdmissionReviewSpec struct { // /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" (because status operates on // pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource // "binding", and kind "Binding". + // +optional SubResource string + // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and + // rely on the server to generate the name. If that is the case, this method will return the empty string. + // +optional + Name string + // Namespace is the namespace associated with the request (if any). + // +optional + Namespace string + // Operation is the operation being performed + Operation Operation // UserInfo is information about the requesting user UserInfo authentication.UserInfo + // Object is the object from the incoming request prior to default values being applied + // +optional + Object runtime.Object + // OldObject is the existing object. Only populated for UPDATE requests. + // +optional + OldObject runtime.Object } -// AdmissionReviewStatus describes the status of the admission request. -type AdmissionReviewStatus struct { +// AdmissionResponse describes an admission response. +type AdmissionResponse struct { + // UID is an identifier for the individual request/response. + // This should be copied over from the corresponding AdmissionRequest. + UID types.UID // Allowed indicates whether or not the admission request was permitted. Allowed bool // Result contains extra details into why an admission request was denied. // This field IS NOT consulted in any way if "Allowed" is "true". // +optional Result *metav1.Status + // Patch contains the actual patch. Currently we only support a response in the form of JSONPatch, RFC 6902. + // +optional + Patch []byte + // PatchType indicates the form the Patch will take. Currently we only support "JSONPatch". + // +optional + PatchType *PatchType } +// PatchType is the type of patch being used to represent the mutated object +type PatchType string + +// PatchType constants. +const ( + PatchTypeJSONPatch PatchType = "JSONPatch" +) + // Operation is the type of resource operation being checked for admission control type Operation string diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admission/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/admission/v1alpha1/BUILD deleted file mode 100644 index 984b3f6cb790..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admission/v1alpha1/BUILD +++ /dev/null @@ -1,40 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "helpers.go", - "register.go", - "zz_generated.conversion.go", - "zz_generated.defaults.go", - ], - deps = [ - "//pkg/apis/admission:go_default_library", - "//vendor/k8s.io/api/admission/v1alpha1:go_default_library", - "//vendor/k8s.io/api/authentication/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admission/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/admission/v1alpha1/doc.go deleted file mode 100644 index 0e2260fb2def..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admission/v1alpha1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/admission -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/admission/v1alpha1 -// +k8s:defaulter-gen=TypeMeta -// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/admission/v1alpha1 - -// +groupName=admission.k8s.io -package v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admission/v1alpha1/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/admission/v1alpha1/helpers.go deleted file mode 100644 index ad1e543744cd..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admission/v1alpha1/helpers.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - admissionv1alpha1 "k8s.io/api/admission/v1alpha1" - authenticationv1 "k8s.io/api/authentication/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apiserver/pkg/admission" -) - -// NewAdmissionReview returns an AdmissionReview for the provided admission.Attributes -func NewAdmissionReview(attr admission.Attributes) admissionv1alpha1.AdmissionReview { - gvk := attr.GetKind() - gvr := attr.GetResource() - aUserInfo := attr.GetUserInfo() - userInfo := authenticationv1.UserInfo{ - Extra: make(map[string]authenticationv1.ExtraValue), - Groups: aUserInfo.GetGroups(), - UID: aUserInfo.GetUID(), - Username: aUserInfo.GetName(), - } - - // Convert the extra information in the user object - for key, val := range aUserInfo.GetExtra() { - userInfo.Extra[key] = authenticationv1.ExtraValue(val) - } - - return admissionv1alpha1.AdmissionReview{ - Spec: admissionv1alpha1.AdmissionReviewSpec{ - Name: attr.GetName(), - Namespace: attr.GetNamespace(), - Resource: metav1.GroupVersionResource{ - Group: gvr.Group, - Resource: gvr.Resource, - Version: gvr.Version, - }, - SubResource: attr.GetSubresource(), - Operation: admissionv1alpha1.Operation(attr.GetOperation()), - Object: runtime.RawExtension{ - Object: attr.GetObject(), - }, - OldObject: runtime.RawExtension{ - Object: attr.GetOldObject(), - }, - Kind: metav1.GroupVersionKind{ - Group: gvk.Group, - Kind: gvk.Kind, - Version: gvk.Version, - }, - UserInfo: userInfo, - }, - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admission/v1alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/admission/v1alpha1/register.go deleted file mode 100644 index 8e12bcea04b7..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admission/v1alpha1/register.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - admissionv1alpha1 "k8s.io/api/admission/v1alpha1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name for this API. -const GroupName = "admission.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - localSchemeBuilder = &admissionv1alpha1.SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(RegisterDefaults) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admission/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/admission/v1alpha1/zz_generated.conversion.go deleted file mode 100644 index c96eb913882c..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/admission/v1alpha1/zz_generated.conversion.go +++ /dev/null @@ -1,149 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/admission/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - admission "k8s.io/kubernetes/pkg/apis/admission" - unsafe "unsafe" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1alpha1_AdmissionReview_To_admission_AdmissionReview, - Convert_admission_AdmissionReview_To_v1alpha1_AdmissionReview, - Convert_v1alpha1_AdmissionReviewSpec_To_admission_AdmissionReviewSpec, - Convert_admission_AdmissionReviewSpec_To_v1alpha1_AdmissionReviewSpec, - Convert_v1alpha1_AdmissionReviewStatus_To_admission_AdmissionReviewStatus, - Convert_admission_AdmissionReviewStatus_To_v1alpha1_AdmissionReviewStatus, - ) -} - -func autoConvert_v1alpha1_AdmissionReview_To_admission_AdmissionReview(in *v1alpha1.AdmissionReview, out *admission.AdmissionReview, s conversion.Scope) error { - if err := Convert_v1alpha1_AdmissionReviewSpec_To_admission_AdmissionReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_AdmissionReviewStatus_To_admission_AdmissionReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_AdmissionReview_To_admission_AdmissionReview is an autogenerated conversion function. -func Convert_v1alpha1_AdmissionReview_To_admission_AdmissionReview(in *v1alpha1.AdmissionReview, out *admission.AdmissionReview, s conversion.Scope) error { - return autoConvert_v1alpha1_AdmissionReview_To_admission_AdmissionReview(in, out, s) -} - -func autoConvert_admission_AdmissionReview_To_v1alpha1_AdmissionReview(in *admission.AdmissionReview, out *v1alpha1.AdmissionReview, s conversion.Scope) error { - if err := Convert_admission_AdmissionReviewSpec_To_v1alpha1_AdmissionReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_admission_AdmissionReviewStatus_To_v1alpha1_AdmissionReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_admission_AdmissionReview_To_v1alpha1_AdmissionReview is an autogenerated conversion function. -func Convert_admission_AdmissionReview_To_v1alpha1_AdmissionReview(in *admission.AdmissionReview, out *v1alpha1.AdmissionReview, s conversion.Scope) error { - return autoConvert_admission_AdmissionReview_To_v1alpha1_AdmissionReview(in, out, s) -} - -func autoConvert_v1alpha1_AdmissionReviewSpec_To_admission_AdmissionReviewSpec(in *v1alpha1.AdmissionReviewSpec, out *admission.AdmissionReviewSpec, s conversion.Scope) error { - out.Kind = in.Kind - if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Object, &out.Object, s); err != nil { - return err - } - if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.OldObject, &out.OldObject, s); err != nil { - return err - } - out.Operation = admission.Operation(in.Operation) - out.Name = in.Name - out.Namespace = in.Namespace - out.Resource = in.Resource - out.SubResource = in.SubResource - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.UserInfo, &out.UserInfo, 0); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_AdmissionReviewSpec_To_admission_AdmissionReviewSpec is an autogenerated conversion function. -func Convert_v1alpha1_AdmissionReviewSpec_To_admission_AdmissionReviewSpec(in *v1alpha1.AdmissionReviewSpec, out *admission.AdmissionReviewSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_AdmissionReviewSpec_To_admission_AdmissionReviewSpec(in, out, s) -} - -func autoConvert_admission_AdmissionReviewSpec_To_v1alpha1_AdmissionReviewSpec(in *admission.AdmissionReviewSpec, out *v1alpha1.AdmissionReviewSpec, s conversion.Scope) error { - out.Kind = in.Kind - out.Name = in.Name - out.Namespace = in.Namespace - if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Object, &out.Object, s); err != nil { - return err - } - if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.OldObject, &out.OldObject, s); err != nil { - return err - } - out.Operation = v1alpha1.Operation(in.Operation) - out.Resource = in.Resource - out.SubResource = in.SubResource - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.UserInfo, &out.UserInfo, 0); err != nil { - return err - } - return nil -} - -// Convert_admission_AdmissionReviewSpec_To_v1alpha1_AdmissionReviewSpec is an autogenerated conversion function. -func Convert_admission_AdmissionReviewSpec_To_v1alpha1_AdmissionReviewSpec(in *admission.AdmissionReviewSpec, out *v1alpha1.AdmissionReviewSpec, s conversion.Scope) error { - return autoConvert_admission_AdmissionReviewSpec_To_v1alpha1_AdmissionReviewSpec(in, out, s) -} - -func autoConvert_v1alpha1_AdmissionReviewStatus_To_admission_AdmissionReviewStatus(in *v1alpha1.AdmissionReviewStatus, out *admission.AdmissionReviewStatus, s conversion.Scope) error { - out.Allowed = in.Allowed - out.Result = (*v1.Status)(unsafe.Pointer(in.Result)) - return nil -} - -// Convert_v1alpha1_AdmissionReviewStatus_To_admission_AdmissionReviewStatus is an autogenerated conversion function. -func Convert_v1alpha1_AdmissionReviewStatus_To_admission_AdmissionReviewStatus(in *v1alpha1.AdmissionReviewStatus, out *admission.AdmissionReviewStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_AdmissionReviewStatus_To_admission_AdmissionReviewStatus(in, out, s) -} - -func autoConvert_admission_AdmissionReviewStatus_To_v1alpha1_AdmissionReviewStatus(in *admission.AdmissionReviewStatus, out *v1alpha1.AdmissionReviewStatus, s conversion.Scope) error { - out.Allowed = in.Allowed - out.Result = (*v1.Status)(unsafe.Pointer(in.Result)) - return nil -} - -// Convert_admission_AdmissionReviewStatus_To_v1alpha1_AdmissionReviewStatus is an autogenerated conversion function. -func Convert_admission_AdmissionReviewStatus_To_v1alpha1_AdmissionReviewStatus(in *admission.AdmissionReviewStatus, out *v1alpha1.AdmissionReviewStatus, s conversion.Scope) error { - return autoConvert_admission_AdmissionReviewStatus_To_v1alpha1_AdmissionReviewStatus(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/BUILD new file mode 100644 index 000000000000..cbe9e893b1d0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/BUILD @@ -0,0 +1,39 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + "zz_generated.conversion.go", + "zz_generated.defaults.go", + ], + importpath = "k8s.io/kubernetes/pkg/apis/admission/v1beta1", + deps = [ + "//pkg/apis/admission:go_default_library", + "//vendor/k8s.io/api/admission/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/doc.go new file mode 100644 index 000000000000..d127afc3bc20 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/admission +// +k8s:conversion-gen-external-types=k8s.io/api/admission/v1beta1 +// +k8s:defaulter-gen=TypeMeta +// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/admission/v1beta1 + +// +groupName=admission.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/register.go new file mode 100644 index 000000000000..8b3eea5d517c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/register.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + admissionv1beta1 "k8s.io/api/admission/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "admission.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &admissionv1beta1.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(RegisterDefaults) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000000..55dec435d60d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/zz_generated.conversion.go @@ -0,0 +1,166 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admission/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + admission "k8s.io/kubernetes/pkg/apis/admission" + unsafe "unsafe" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_AdmissionRequest_To_admission_AdmissionRequest, + Convert_admission_AdmissionRequest_To_v1beta1_AdmissionRequest, + Convert_v1beta1_AdmissionResponse_To_admission_AdmissionResponse, + Convert_admission_AdmissionResponse_To_v1beta1_AdmissionResponse, + Convert_v1beta1_AdmissionReview_To_admission_AdmissionReview, + Convert_admission_AdmissionReview_To_v1beta1_AdmissionReview, + ) +} + +func autoConvert_v1beta1_AdmissionRequest_To_admission_AdmissionRequest(in *v1beta1.AdmissionRequest, out *admission.AdmissionRequest, s conversion.Scope) error { + out.UID = types.UID(in.UID) + out.Kind = in.Kind + out.Resource = in.Resource + out.SubResource = in.SubResource + out.Name = in.Name + out.Namespace = in.Namespace + out.Operation = admission.Operation(in.Operation) + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.UserInfo, &out.UserInfo, 0); err != nil { + return err + } + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Object, &out.Object, s); err != nil { + return err + } + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.OldObject, &out.OldObject, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_AdmissionRequest_To_admission_AdmissionRequest is an autogenerated conversion function. +func Convert_v1beta1_AdmissionRequest_To_admission_AdmissionRequest(in *v1beta1.AdmissionRequest, out *admission.AdmissionRequest, s conversion.Scope) error { + return autoConvert_v1beta1_AdmissionRequest_To_admission_AdmissionRequest(in, out, s) +} + +func autoConvert_admission_AdmissionRequest_To_v1beta1_AdmissionRequest(in *admission.AdmissionRequest, out *v1beta1.AdmissionRequest, s conversion.Scope) error { + out.UID = types.UID(in.UID) + out.Kind = in.Kind + out.Resource = in.Resource + out.SubResource = in.SubResource + out.Name = in.Name + out.Namespace = in.Namespace + out.Operation = v1beta1.Operation(in.Operation) + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.UserInfo, &out.UserInfo, 0); err != nil { + return err + } + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Object, &out.Object, s); err != nil { + return err + } + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.OldObject, &out.OldObject, s); err != nil { + return err + } + return nil +} + +// Convert_admission_AdmissionRequest_To_v1beta1_AdmissionRequest is an autogenerated conversion function. +func Convert_admission_AdmissionRequest_To_v1beta1_AdmissionRequest(in *admission.AdmissionRequest, out *v1beta1.AdmissionRequest, s conversion.Scope) error { + return autoConvert_admission_AdmissionRequest_To_v1beta1_AdmissionRequest(in, out, s) +} + +func autoConvert_v1beta1_AdmissionResponse_To_admission_AdmissionResponse(in *v1beta1.AdmissionResponse, out *admission.AdmissionResponse, s conversion.Scope) error { + out.UID = types.UID(in.UID) + out.Allowed = in.Allowed + out.Result = (*v1.Status)(unsafe.Pointer(in.Result)) + out.Patch = *(*[]byte)(unsafe.Pointer(&in.Patch)) + out.PatchType = (*admission.PatchType)(unsafe.Pointer(in.PatchType)) + return nil +} + +// Convert_v1beta1_AdmissionResponse_To_admission_AdmissionResponse is an autogenerated conversion function. +func Convert_v1beta1_AdmissionResponse_To_admission_AdmissionResponse(in *v1beta1.AdmissionResponse, out *admission.AdmissionResponse, s conversion.Scope) error { + return autoConvert_v1beta1_AdmissionResponse_To_admission_AdmissionResponse(in, out, s) +} + +func autoConvert_admission_AdmissionResponse_To_v1beta1_AdmissionResponse(in *admission.AdmissionResponse, out *v1beta1.AdmissionResponse, s conversion.Scope) error { + out.UID = types.UID(in.UID) + out.Allowed = in.Allowed + out.Result = (*v1.Status)(unsafe.Pointer(in.Result)) + out.Patch = *(*[]byte)(unsafe.Pointer(&in.Patch)) + out.PatchType = (*v1beta1.PatchType)(unsafe.Pointer(in.PatchType)) + return nil +} + +// Convert_admission_AdmissionResponse_To_v1beta1_AdmissionResponse is an autogenerated conversion function. +func Convert_admission_AdmissionResponse_To_v1beta1_AdmissionResponse(in *admission.AdmissionResponse, out *v1beta1.AdmissionResponse, s conversion.Scope) error { + return autoConvert_admission_AdmissionResponse_To_v1beta1_AdmissionResponse(in, out, s) +} + +func autoConvert_v1beta1_AdmissionReview_To_admission_AdmissionReview(in *v1beta1.AdmissionReview, out *admission.AdmissionReview, s conversion.Scope) error { + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(admission.AdmissionRequest) + if err := Convert_v1beta1_AdmissionRequest_To_admission_AdmissionRequest(*in, *out, s); err != nil { + return err + } + } else { + out.Request = nil + } + out.Response = (*admission.AdmissionResponse)(unsafe.Pointer(in.Response)) + return nil +} + +// Convert_v1beta1_AdmissionReview_To_admission_AdmissionReview is an autogenerated conversion function. +func Convert_v1beta1_AdmissionReview_To_admission_AdmissionReview(in *v1beta1.AdmissionReview, out *admission.AdmissionReview, s conversion.Scope) error { + return autoConvert_v1beta1_AdmissionReview_To_admission_AdmissionReview(in, out, s) +} + +func autoConvert_admission_AdmissionReview_To_v1beta1_AdmissionReview(in *admission.AdmissionReview, out *v1beta1.AdmissionReview, s conversion.Scope) error { + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = new(v1beta1.AdmissionRequest) + if err := Convert_admission_AdmissionRequest_To_v1beta1_AdmissionRequest(*in, *out, s); err != nil { + return err + } + } else { + out.Request = nil + } + out.Response = (*v1beta1.AdmissionResponse)(unsafe.Pointer(in.Response)) + return nil +} + +// Convert_admission_AdmissionReview_To_v1beta1_AdmissionReview is an autogenerated conversion function. +func Convert_admission_AdmissionReview_To_v1beta1_AdmissionReview(in *admission.AdmissionReview, out *v1beta1.AdmissionReview, s conversion.Scope) error { + return autoConvert_admission_AdmissionReview_To_v1beta1_AdmissionReview(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/zz_generated.defaults.go similarity index 100% rename from vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/zz_generated.defaults.go rename to vendor/k8s.io/kubernetes/pkg/apis/admission/v1beta1/zz_generated.defaults.go diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admission/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/admission/zz_generated.deepcopy.go index 22cd49d5e478..3814ccc29205 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admission/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/admission/zz_generated.deepcopy.go @@ -22,68 +22,15 @@ package admission import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AdmissionReview).DeepCopyInto(out.(*AdmissionReview)) - return nil - }, InType: reflect.TypeOf(&AdmissionReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AdmissionReviewSpec).DeepCopyInto(out.(*AdmissionReviewSpec)) - return nil - }, InType: reflect.TypeOf(&AdmissionReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AdmissionReviewStatus).DeepCopyInto(out.(*AdmissionReviewStatus)) - return nil - }, InType: reflect.TypeOf(&AdmissionReviewStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionReview) DeepCopyInto(out *AdmissionReview) { - *out = *in - out.TypeMeta = in.TypeMeta - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReview. -func (in *AdmissionReview) DeepCopy() *AdmissionReview { - if in == nil { - return nil - } - out := new(AdmissionReview) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AdmissionReview) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionReviewSpec) DeepCopyInto(out *AdmissionReviewSpec) { +func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) { *out = *in out.Kind = in.Kind + out.Resource = in.Resource + in.UserInfo.DeepCopyInto(&out.UserInfo) if in.Object == nil { out.Object = nil } else { @@ -94,23 +41,21 @@ func (in *AdmissionReviewSpec) DeepCopyInto(out *AdmissionReviewSpec) { } else { out.OldObject = in.OldObject.DeepCopyObject() } - out.Resource = in.Resource - in.UserInfo.DeepCopyInto(&out.UserInfo) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReviewSpec. -func (in *AdmissionReviewSpec) DeepCopy() *AdmissionReviewSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionRequest. +func (in *AdmissionRequest) DeepCopy() *AdmissionRequest { if in == nil { return nil } - out := new(AdmissionReviewSpec) + out := new(AdmissionRequest) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionReviewStatus) DeepCopyInto(out *AdmissionReviewStatus) { +func (in *AdmissionResponse) DeepCopyInto(out *AdmissionResponse) { *out = *in if in.Result != nil { in, out := &in.Result, &out.Result @@ -121,15 +66,73 @@ func (in *AdmissionReviewStatus) DeepCopyInto(out *AdmissionReviewStatus) { (*in).DeepCopyInto(*out) } } + if in.Patch != nil { + in, out := &in.Patch, &out.Patch + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.PatchType != nil { + in, out := &in.PatchType, &out.PatchType + if *in == nil { + *out = nil + } else { + *out = new(PatchType) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionResponse. +func (in *AdmissionResponse) DeepCopy() *AdmissionResponse { + if in == nil { + return nil + } + out := new(AdmissionResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionReview) DeepCopyInto(out *AdmissionReview) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Request != nil { + in, out := &in.Request, &out.Request + if *in == nil { + *out = nil + } else { + *out = new(AdmissionRequest) + (*in).DeepCopyInto(*out) + } + } + if in.Response != nil { + in, out := &in.Response, &out.Response + if *in == nil { + *out = nil + } else { + *out = new(AdmissionResponse) + (*in).DeepCopyInto(*out) + } + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReviewStatus. -func (in *AdmissionReviewStatus) DeepCopy() *AdmissionReviewStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReview. +func (in *AdmissionReview) DeepCopy() *AdmissionReview { if in == nil { return nil } - out := new(AdmissionReviewStatus) + out := new(AdmissionReview) in.DeepCopyInto(out) return out } + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdmissionReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/BUILD index 5d7b1813f8c5..f223ec0a80f4 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/BUILD @@ -13,9 +13,9 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/admissionregistration", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], @@ -35,6 +35,7 @@ filegroup( "//pkg/apis/admissionregistration/fuzzer:all-srcs", "//pkg/apis/admissionregistration/install:all-srcs", "//pkg/apis/admissionregistration/v1alpha1:all-srcs", + "//pkg/apis/admissionregistration/v1beta1:all-srcs", "//pkg/apis/admissionregistration/validation:all-srcs", ], tags = ["automanaged"], diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/doc.go index 2b32705a2aa4..5c0cb5b988b6 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/doc.go @@ -14,11 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // Package admissionregistration is the internal version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration -// InitializerConfiguration and ExternalAdmissionHookConfiguration is for the +// InitializerConfiguration, ValidatingWebhookConfiguration, and MutatingWebhookConfiguration are for the // new dynamic admission controller configuration. // +groupName=admissionregistration.k8s.io package admissionregistration diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install/BUILD index 49d881d0ed0b..04bb06074b41 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install/BUILD @@ -8,10 +8,12 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/admissionregistration/install", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/admissionregistration:go_default_library", "//pkg/apis/admissionregistration/v1alpha1:go_default_library", + "//pkg/apis/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install/install.go index b2ff84b3e316..81f4ff2ffc28 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install/install.go @@ -21,13 +21,14 @@ import ( "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/admissionregistration" "k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1" + "k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1" ) func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) + Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) } // Install registers the API group and adds types to a scheme @@ -35,12 +36,13 @@ func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *r if err := announced.NewGroupMetaFactory( &announced.GroupMetaFactoryArgs{ GroupName: admissionregistration.GroupName, - RootScopedKinds: sets.NewString("InitializerConfiguration", "ExternalAdmissionHookConfiguration"), - VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version}, + RootScopedKinds: sets.NewString("InitializerConfiguration", "ValidatingWebhookConfiguration", "MutatingWebhookConfiguration"), + VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version, v1alpha1.SchemeGroupVersion.Version}, AddInternalObjectsToScheme: admissionregistration.AddToScheme, }, announced.VersionToSchemeFunc{ v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme, + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, }, ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { panic(err) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/register.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/register.go index 1fe291a8f23f..941259ce6283 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/register.go @@ -46,8 +46,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &InitializerConfiguration{}, &InitializerConfigurationList{}, - &ExternalAdmissionHookConfiguration{}, - &ExternalAdmissionHookConfigurationList{}, + &ValidatingWebhookConfiguration{}, + &ValidatingWebhookConfigurationList{}, + &MutatingWebhookConfiguration{}, + &MutatingWebhookConfigurationList{}, ) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/types.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/types.go index b46765600370..091782b81a05 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/types.go @@ -106,7 +106,7 @@ type Rule struct { type FailurePolicyType string const ( - // Ignore means the initilizer is removed from the initializers list of an + // Ignore means the initializer is removed from the initializers list of an // object if the initializer is timed out. Ignore FailurePolicyType = "Ignore" // For 1.7, only "Ignore" is allowed. "Fail" will be allowed when the @@ -118,35 +118,61 @@ const ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ExternalAdmissionHookConfiguration describes the configuration of initializers. -type ExternalAdmissionHookConfiguration struct { +// ValidatingWebhookConfiguration describes the configuration of an admission webhook that accepts or rejects and object without changing it. +type ValidatingWebhookConfiguration struct { metav1.TypeMeta // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional metav1.ObjectMeta - // ExternalAdmissionHooks is a list of external admission webhooks and the - // affected resources and operations. + // Webhooks is a list of webhooks and the affected resources and operations. // +optional - ExternalAdmissionHooks []ExternalAdmissionHook + Webhooks []Webhook } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ExternalAdmissionHookConfigurationList is a list of ExternalAdmissionHookConfiguration. -type ExternalAdmissionHookConfigurationList struct { +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +type ValidatingWebhookConfigurationList struct { metav1.TypeMeta // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta - // List of ExternalAdmissionHookConfiguration. - Items []ExternalAdmissionHookConfiguration + // List of ValidatingWebhookConfigurations. + Items []ValidatingWebhookConfiguration } -// ExternalAdmissionHook describes an external admission webhook and the -// resources and operations it applies to. -type ExternalAdmissionHook struct { - // The name of the external admission webhook. +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +type MutatingWebhookConfiguration struct { + metav1.TypeMeta + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + Webhooks []Webhook +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. +type MutatingWebhookConfigurationList struct { + metav1.TypeMeta + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta + // List of MutatingWebhookConfiguration. + Items []MutatingWebhookConfiguration +} + +// Webhook describes an admission webhook and the resources and operations it applies to. +type Webhook struct { + // The name of the admission webhook. // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where // "imagepolicy" is the name of the webhook, and kubernetes.io is the name // of the organization. @@ -155,7 +181,7 @@ type ExternalAdmissionHook struct { // ClientConfig defines how to communicate with the hook. // Required - ClientConfig AdmissionHookClientConfig + ClientConfig WebhookClientConfig // Rules describes what operations on what resources/subresources the webhook cares about. // The webhook cares about an operation if it matches _any_ Rule. @@ -165,6 +191,52 @@ type ExternalAdmissionHook struct { // allowed values are Ignore or Fail. Defaults to Ignore. // +optional FailurePolicy *FailurePolicyType + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is other cluster scoped resource, + // it is not subjected to the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector } // RuleWithOperations is a tuple of Operations and Resources. It is recommended to make @@ -191,25 +263,67 @@ const ( Connect OperationType = "CONNECT" ) -// AdmissionHookClientConfig contains the information to make a TLS +// WebhookClientConfig contains the information to make a TLS // connection with the webhook -type AdmissionHookClientConfig struct { - // Service is a reference to the service for this webhook. If there is only - // one port open for the service, that port will be used. If there are multiple - // ports open, port 443 will be used if it is open, otherwise it is an error. - // Required - Service ServiceReference - // CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate. - // Required +type WebhookClientConfig struct { + // `url` gives the location of the webhook, in standard URL form + // (`[scheme://]host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // If there is only one port open for the service, that port will be + // used. If there are multiple ports open, port 443 will be used if it + // is open, otherwise it is an error. + // + // +optional + Service *ServiceReference + + // `caBundle` is a PEM encoded CA bundle which will be used to validate + // the webhook's server certificate. + // Required. CABundle []byte } // ServiceReference holds a reference to Service.legacy.k8s.io type ServiceReference struct { - // Namespace is the namespace of the service + // `namespace` is the namespace of the service. // Required Namespace string - // Name is the name of the service + // `name` is the name of the service. // Required Name string + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + Path *string } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/BUILD index 6126c72af70d..81773c8a9cda 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/BUILD @@ -14,6 +14,7 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1", deps = [ "//pkg/apis/admissionregistration:go_default_library", "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/defaults.go index 92a37ad1ccd7..8949bb87cd71 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/defaults.go @@ -17,17 +17,9 @@ limitations under the License. package v1alpha1 import ( - admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" "k8s.io/apimachinery/pkg/runtime" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { return RegisterDefaults(scheme) } - -func SetDefaults_ExternalAdmissionHook(obj *admissionregistrationv1alpha1.ExternalAdmissionHook) { - if obj.FailurePolicy == nil { - policy := admissionregistrationv1alpha1.Ignore - obj.FailurePolicy = &policy - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/doc.go index 367688e8ae52..8c05598dc4ca 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/doc.go @@ -15,13 +15,13 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/admissionregistration -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/admissionregistration/v1alpha1 +// +k8s:conversion-gen-external-types=k8s.io/api/admissionregistration/v1alpha1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/admissionregistration/v1alpha1 // Package v1alpha1 is the v1alpha1 version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration -// admissionregistrationv1alpha1.InitializerConfiguration and admissionregistrationv1alpha1.ExternalAdmissionHookConfiguration is for the +// InitializerConfiguration, ValidatingWebhookConfiguration, and MutatingWebhookConfiguration are for the // new dynamic admission controller configuration. // +groupName=admissionregistration.k8s.io package v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/zz_generated.conversion.go index 86ef143f0ae7..ef1d0fee81ff 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/zz_generated.conversion.go @@ -36,14 +36,6 @@ func init() { // Public to allow building arbitrary schemes. func RegisterConversions(scheme *runtime.Scheme) error { return scheme.AddGeneratedConversionFuncs( - Convert_v1alpha1_AdmissionHookClientConfig_To_admissionregistration_AdmissionHookClientConfig, - Convert_admissionregistration_AdmissionHookClientConfig_To_v1alpha1_AdmissionHookClientConfig, - Convert_v1alpha1_ExternalAdmissionHook_To_admissionregistration_ExternalAdmissionHook, - Convert_admissionregistration_ExternalAdmissionHook_To_v1alpha1_ExternalAdmissionHook, - Convert_v1alpha1_ExternalAdmissionHookConfiguration_To_admissionregistration_ExternalAdmissionHookConfiguration, - Convert_admissionregistration_ExternalAdmissionHookConfiguration_To_v1alpha1_ExternalAdmissionHookConfiguration, - Convert_v1alpha1_ExternalAdmissionHookConfigurationList_To_admissionregistration_ExternalAdmissionHookConfigurationList, - Convert_admissionregistration_ExternalAdmissionHookConfigurationList_To_v1alpha1_ExternalAdmissionHookConfigurationList, Convert_v1alpha1_Initializer_To_admissionregistration_Initializer, Convert_admissionregistration_Initializer_To_v1alpha1_Initializer, Convert_v1alpha1_InitializerConfiguration_To_admissionregistration_InitializerConfiguration, @@ -52,113 +44,9 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_admissionregistration_InitializerConfigurationList_To_v1alpha1_InitializerConfigurationList, Convert_v1alpha1_Rule_To_admissionregistration_Rule, Convert_admissionregistration_Rule_To_v1alpha1_Rule, - Convert_v1alpha1_RuleWithOperations_To_admissionregistration_RuleWithOperations, - Convert_admissionregistration_RuleWithOperations_To_v1alpha1_RuleWithOperations, - Convert_v1alpha1_ServiceReference_To_admissionregistration_ServiceReference, - Convert_admissionregistration_ServiceReference_To_v1alpha1_ServiceReference, ) } -func autoConvert_v1alpha1_AdmissionHookClientConfig_To_admissionregistration_AdmissionHookClientConfig(in *v1alpha1.AdmissionHookClientConfig, out *admissionregistration.AdmissionHookClientConfig, s conversion.Scope) error { - if err := Convert_v1alpha1_ServiceReference_To_admissionregistration_ServiceReference(&in.Service, &out.Service, s); err != nil { - return err - } - out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) - return nil -} - -// Convert_v1alpha1_AdmissionHookClientConfig_To_admissionregistration_AdmissionHookClientConfig is an autogenerated conversion function. -func Convert_v1alpha1_AdmissionHookClientConfig_To_admissionregistration_AdmissionHookClientConfig(in *v1alpha1.AdmissionHookClientConfig, out *admissionregistration.AdmissionHookClientConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_AdmissionHookClientConfig_To_admissionregistration_AdmissionHookClientConfig(in, out, s) -} - -func autoConvert_admissionregistration_AdmissionHookClientConfig_To_v1alpha1_AdmissionHookClientConfig(in *admissionregistration.AdmissionHookClientConfig, out *v1alpha1.AdmissionHookClientConfig, s conversion.Scope) error { - if err := Convert_admissionregistration_ServiceReference_To_v1alpha1_ServiceReference(&in.Service, &out.Service, s); err != nil { - return err - } - out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) - return nil -} - -// Convert_admissionregistration_AdmissionHookClientConfig_To_v1alpha1_AdmissionHookClientConfig is an autogenerated conversion function. -func Convert_admissionregistration_AdmissionHookClientConfig_To_v1alpha1_AdmissionHookClientConfig(in *admissionregistration.AdmissionHookClientConfig, out *v1alpha1.AdmissionHookClientConfig, s conversion.Scope) error { - return autoConvert_admissionregistration_AdmissionHookClientConfig_To_v1alpha1_AdmissionHookClientConfig(in, out, s) -} - -func autoConvert_v1alpha1_ExternalAdmissionHook_To_admissionregistration_ExternalAdmissionHook(in *v1alpha1.ExternalAdmissionHook, out *admissionregistration.ExternalAdmissionHook, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_v1alpha1_AdmissionHookClientConfig_To_admissionregistration_AdmissionHookClientConfig(&in.ClientConfig, &out.ClientConfig, s); err != nil { - return err - } - out.Rules = *(*[]admissionregistration.RuleWithOperations)(unsafe.Pointer(&in.Rules)) - out.FailurePolicy = (*admissionregistration.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy)) - return nil -} - -// Convert_v1alpha1_ExternalAdmissionHook_To_admissionregistration_ExternalAdmissionHook is an autogenerated conversion function. -func Convert_v1alpha1_ExternalAdmissionHook_To_admissionregistration_ExternalAdmissionHook(in *v1alpha1.ExternalAdmissionHook, out *admissionregistration.ExternalAdmissionHook, s conversion.Scope) error { - return autoConvert_v1alpha1_ExternalAdmissionHook_To_admissionregistration_ExternalAdmissionHook(in, out, s) -} - -func autoConvert_admissionregistration_ExternalAdmissionHook_To_v1alpha1_ExternalAdmissionHook(in *admissionregistration.ExternalAdmissionHook, out *v1alpha1.ExternalAdmissionHook, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_admissionregistration_AdmissionHookClientConfig_To_v1alpha1_AdmissionHookClientConfig(&in.ClientConfig, &out.ClientConfig, s); err != nil { - return err - } - out.Rules = *(*[]v1alpha1.RuleWithOperations)(unsafe.Pointer(&in.Rules)) - out.FailurePolicy = (*v1alpha1.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy)) - return nil -} - -// Convert_admissionregistration_ExternalAdmissionHook_To_v1alpha1_ExternalAdmissionHook is an autogenerated conversion function. -func Convert_admissionregistration_ExternalAdmissionHook_To_v1alpha1_ExternalAdmissionHook(in *admissionregistration.ExternalAdmissionHook, out *v1alpha1.ExternalAdmissionHook, s conversion.Scope) error { - return autoConvert_admissionregistration_ExternalAdmissionHook_To_v1alpha1_ExternalAdmissionHook(in, out, s) -} - -func autoConvert_v1alpha1_ExternalAdmissionHookConfiguration_To_admissionregistration_ExternalAdmissionHookConfiguration(in *v1alpha1.ExternalAdmissionHookConfiguration, out *admissionregistration.ExternalAdmissionHookConfiguration, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.ExternalAdmissionHooks = *(*[]admissionregistration.ExternalAdmissionHook)(unsafe.Pointer(&in.ExternalAdmissionHooks)) - return nil -} - -// Convert_v1alpha1_ExternalAdmissionHookConfiguration_To_admissionregistration_ExternalAdmissionHookConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_ExternalAdmissionHookConfiguration_To_admissionregistration_ExternalAdmissionHookConfiguration(in *v1alpha1.ExternalAdmissionHookConfiguration, out *admissionregistration.ExternalAdmissionHookConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_ExternalAdmissionHookConfiguration_To_admissionregistration_ExternalAdmissionHookConfiguration(in, out, s) -} - -func autoConvert_admissionregistration_ExternalAdmissionHookConfiguration_To_v1alpha1_ExternalAdmissionHookConfiguration(in *admissionregistration.ExternalAdmissionHookConfiguration, out *v1alpha1.ExternalAdmissionHookConfiguration, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.ExternalAdmissionHooks = *(*[]v1alpha1.ExternalAdmissionHook)(unsafe.Pointer(&in.ExternalAdmissionHooks)) - return nil -} - -// Convert_admissionregistration_ExternalAdmissionHookConfiguration_To_v1alpha1_ExternalAdmissionHookConfiguration is an autogenerated conversion function. -func Convert_admissionregistration_ExternalAdmissionHookConfiguration_To_v1alpha1_ExternalAdmissionHookConfiguration(in *admissionregistration.ExternalAdmissionHookConfiguration, out *v1alpha1.ExternalAdmissionHookConfiguration, s conversion.Scope) error { - return autoConvert_admissionregistration_ExternalAdmissionHookConfiguration_To_v1alpha1_ExternalAdmissionHookConfiguration(in, out, s) -} - -func autoConvert_v1alpha1_ExternalAdmissionHookConfigurationList_To_admissionregistration_ExternalAdmissionHookConfigurationList(in *v1alpha1.ExternalAdmissionHookConfigurationList, out *admissionregistration.ExternalAdmissionHookConfigurationList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]admissionregistration.ExternalAdmissionHookConfiguration)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_ExternalAdmissionHookConfigurationList_To_admissionregistration_ExternalAdmissionHookConfigurationList is an autogenerated conversion function. -func Convert_v1alpha1_ExternalAdmissionHookConfigurationList_To_admissionregistration_ExternalAdmissionHookConfigurationList(in *v1alpha1.ExternalAdmissionHookConfigurationList, out *admissionregistration.ExternalAdmissionHookConfigurationList, s conversion.Scope) error { - return autoConvert_v1alpha1_ExternalAdmissionHookConfigurationList_To_admissionregistration_ExternalAdmissionHookConfigurationList(in, out, s) -} - -func autoConvert_admissionregistration_ExternalAdmissionHookConfigurationList_To_v1alpha1_ExternalAdmissionHookConfigurationList(in *admissionregistration.ExternalAdmissionHookConfigurationList, out *v1alpha1.ExternalAdmissionHookConfigurationList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha1.ExternalAdmissionHookConfiguration)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_admissionregistration_ExternalAdmissionHookConfigurationList_To_v1alpha1_ExternalAdmissionHookConfigurationList is an autogenerated conversion function. -func Convert_admissionregistration_ExternalAdmissionHookConfigurationList_To_v1alpha1_ExternalAdmissionHookConfigurationList(in *admissionregistration.ExternalAdmissionHookConfigurationList, out *v1alpha1.ExternalAdmissionHookConfigurationList, s conversion.Scope) error { - return autoConvert_admissionregistration_ExternalAdmissionHookConfigurationList_To_v1alpha1_ExternalAdmissionHookConfigurationList(in, out, s) -} - func autoConvert_v1alpha1_Initializer_To_admissionregistration_Initializer(in *v1alpha1.Initializer, out *admissionregistration.Initializer, s conversion.Scope) error { out.Name = in.Name out.Rules = *(*[]admissionregistration.Rule)(unsafe.Pointer(&in.Rules)) @@ -248,51 +136,3 @@ func autoConvert_admissionregistration_Rule_To_v1alpha1_Rule(in *admissionregist func Convert_admissionregistration_Rule_To_v1alpha1_Rule(in *admissionregistration.Rule, out *v1alpha1.Rule, s conversion.Scope) error { return autoConvert_admissionregistration_Rule_To_v1alpha1_Rule(in, out, s) } - -func autoConvert_v1alpha1_RuleWithOperations_To_admissionregistration_RuleWithOperations(in *v1alpha1.RuleWithOperations, out *admissionregistration.RuleWithOperations, s conversion.Scope) error { - out.Operations = *(*[]admissionregistration.OperationType)(unsafe.Pointer(&in.Operations)) - if err := Convert_v1alpha1_Rule_To_admissionregistration_Rule(&in.Rule, &out.Rule, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_RuleWithOperations_To_admissionregistration_RuleWithOperations is an autogenerated conversion function. -func Convert_v1alpha1_RuleWithOperations_To_admissionregistration_RuleWithOperations(in *v1alpha1.RuleWithOperations, out *admissionregistration.RuleWithOperations, s conversion.Scope) error { - return autoConvert_v1alpha1_RuleWithOperations_To_admissionregistration_RuleWithOperations(in, out, s) -} - -func autoConvert_admissionregistration_RuleWithOperations_To_v1alpha1_RuleWithOperations(in *admissionregistration.RuleWithOperations, out *v1alpha1.RuleWithOperations, s conversion.Scope) error { - out.Operations = *(*[]v1alpha1.OperationType)(unsafe.Pointer(&in.Operations)) - if err := Convert_admissionregistration_Rule_To_v1alpha1_Rule(&in.Rule, &out.Rule, s); err != nil { - return err - } - return nil -} - -// Convert_admissionregistration_RuleWithOperations_To_v1alpha1_RuleWithOperations is an autogenerated conversion function. -func Convert_admissionregistration_RuleWithOperations_To_v1alpha1_RuleWithOperations(in *admissionregistration.RuleWithOperations, out *v1alpha1.RuleWithOperations, s conversion.Scope) error { - return autoConvert_admissionregistration_RuleWithOperations_To_v1alpha1_RuleWithOperations(in, out, s) -} - -func autoConvert_v1alpha1_ServiceReference_To_admissionregistration_ServiceReference(in *v1alpha1.ServiceReference, out *admissionregistration.ServiceReference, s conversion.Scope) error { - out.Namespace = in.Namespace - out.Name = in.Name - return nil -} - -// Convert_v1alpha1_ServiceReference_To_admissionregistration_ServiceReference is an autogenerated conversion function. -func Convert_v1alpha1_ServiceReference_To_admissionregistration_ServiceReference(in *v1alpha1.ServiceReference, out *admissionregistration.ServiceReference, s conversion.Scope) error { - return autoConvert_v1alpha1_ServiceReference_To_admissionregistration_ServiceReference(in, out, s) -} - -func autoConvert_admissionregistration_ServiceReference_To_v1alpha1_ServiceReference(in *admissionregistration.ServiceReference, out *v1alpha1.ServiceReference, s conversion.Scope) error { - out.Namespace = in.Namespace - out.Name = in.Name - return nil -} - -// Convert_admissionregistration_ServiceReference_To_v1alpha1_ServiceReference is an autogenerated conversion function. -func Convert_admissionregistration_ServiceReference_To_v1alpha1_ServiceReference(in *admissionregistration.ServiceReference, out *v1alpha1.ServiceReference, s conversion.Scope) error { - return autoConvert_admissionregistration_ServiceReference_To_v1alpha1_ServiceReference(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/zz_generated.defaults.go index da214c5d95f3..7e6df29d4ae5 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/zz_generated.defaults.go @@ -21,7 +21,6 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -29,25 +28,5 @@ import ( // Public to allow building arbitrary schemes. // All generated defaulters are covering - they call all nested defaulters. func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&v1alpha1.ExternalAdmissionHookConfiguration{}, func(obj interface{}) { - SetObjectDefaults_ExternalAdmissionHookConfiguration(obj.(*v1alpha1.ExternalAdmissionHookConfiguration)) - }) - scheme.AddTypeDefaultingFunc(&v1alpha1.ExternalAdmissionHookConfigurationList{}, func(obj interface{}) { - SetObjectDefaults_ExternalAdmissionHookConfigurationList(obj.(*v1alpha1.ExternalAdmissionHookConfigurationList)) - }) return nil } - -func SetObjectDefaults_ExternalAdmissionHookConfiguration(in *v1alpha1.ExternalAdmissionHookConfiguration) { - for i := range in.ExternalAdmissionHooks { - a := &in.ExternalAdmissionHooks[i] - SetDefaults_ExternalAdmissionHook(a) - } -} - -func SetObjectDefaults_ExternalAdmissionHookConfigurationList(in *v1alpha1.ExternalAdmissionHookConfigurationList) { - for i := range in.Items { - a := &in.Items[i] - SetObjectDefaults_ExternalAdmissionHookConfiguration(a) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/BUILD new file mode 100644 index 000000000000..414f877488fb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/BUILD @@ -0,0 +1,39 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "defaults.go", + "doc.go", + "register.go", + "zz_generated.conversion.go", + "zz_generated.defaults.go", + ], + importpath = "k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1", + deps = [ + "//pkg/apis/admissionregistration:go_default_library", + "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/defaults.go new file mode 100644 index 000000000000..907f7d9f31df --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/defaults.go @@ -0,0 +1,38 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +func SetDefaults_Webhook(obj *admissionregistrationv1beta1.Webhook) { + if obj.FailurePolicy == nil { + policy := admissionregistrationv1beta1.Ignore + obj.FailurePolicy = &policy + } + if obj.NamespaceSelector == nil { + selector := metav1.LabelSelector{} + obj.NamespaceSelector = &selector + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/doc.go new file mode 100644 index 000000000000..9b918a625861 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/doc.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/admissionregistration +// +k8s:conversion-gen-external-types=k8s.io/api/admissionregistration/v1beta1 +// +k8s:defaulter-gen=TypeMeta +// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/admissionregistration/v1beta1 + +// Package v1beta1 is the v1beta1 version of the API. +// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration +// InitializerConfiguration, ValidatingWebhookConfiguration, and MutatingWebhookConfiguration are for the +// new dynamic admission controller configuration. +// +groupName=admissionregistration.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/register.go new file mode 100644 index 000000000000..ff505a577e14 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/register.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "admissionregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &admissionregistrationv1beta1.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addDefaultingFuncs) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000000..ec0300f27a7e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/zz_generated.conversion.go @@ -0,0 +1,277 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" + unsafe "unsafe" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration, + Convert_admissionregistration_MutatingWebhookConfiguration_To_v1beta1_MutatingWebhookConfiguration, + Convert_v1beta1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList, + Convert_admissionregistration_MutatingWebhookConfigurationList_To_v1beta1_MutatingWebhookConfigurationList, + Convert_v1beta1_Rule_To_admissionregistration_Rule, + Convert_admissionregistration_Rule_To_v1beta1_Rule, + Convert_v1beta1_RuleWithOperations_To_admissionregistration_RuleWithOperations, + Convert_admissionregistration_RuleWithOperations_To_v1beta1_RuleWithOperations, + Convert_v1beta1_ServiceReference_To_admissionregistration_ServiceReference, + Convert_admissionregistration_ServiceReference_To_v1beta1_ServiceReference, + Convert_v1beta1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration, + Convert_admissionregistration_ValidatingWebhookConfiguration_To_v1beta1_ValidatingWebhookConfiguration, + Convert_v1beta1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList, + Convert_admissionregistration_ValidatingWebhookConfigurationList_To_v1beta1_ValidatingWebhookConfigurationList, + Convert_v1beta1_Webhook_To_admissionregistration_Webhook, + Convert_admissionregistration_Webhook_To_v1beta1_Webhook, + Convert_v1beta1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig, + Convert_admissionregistration_WebhookClientConfig_To_v1beta1_WebhookClientConfig, + ) +} + +func autoConvert_v1beta1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration(in *v1beta1.MutatingWebhookConfiguration, out *admissionregistration.MutatingWebhookConfiguration, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Webhooks = *(*[]admissionregistration.Webhook)(unsafe.Pointer(&in.Webhooks)) + return nil +} + +// Convert_v1beta1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration is an autogenerated conversion function. +func Convert_v1beta1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration(in *v1beta1.MutatingWebhookConfiguration, out *admissionregistration.MutatingWebhookConfiguration, s conversion.Scope) error { + return autoConvert_v1beta1_MutatingWebhookConfiguration_To_admissionregistration_MutatingWebhookConfiguration(in, out, s) +} + +func autoConvert_admissionregistration_MutatingWebhookConfiguration_To_v1beta1_MutatingWebhookConfiguration(in *admissionregistration.MutatingWebhookConfiguration, out *v1beta1.MutatingWebhookConfiguration, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Webhooks = *(*[]v1beta1.Webhook)(unsafe.Pointer(&in.Webhooks)) + return nil +} + +// Convert_admissionregistration_MutatingWebhookConfiguration_To_v1beta1_MutatingWebhookConfiguration is an autogenerated conversion function. +func Convert_admissionregistration_MutatingWebhookConfiguration_To_v1beta1_MutatingWebhookConfiguration(in *admissionregistration.MutatingWebhookConfiguration, out *v1beta1.MutatingWebhookConfiguration, s conversion.Scope) error { + return autoConvert_admissionregistration_MutatingWebhookConfiguration_To_v1beta1_MutatingWebhookConfiguration(in, out, s) +} + +func autoConvert_v1beta1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList(in *v1beta1.MutatingWebhookConfigurationList, out *admissionregistration.MutatingWebhookConfigurationList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]admissionregistration.MutatingWebhookConfiguration)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList is an autogenerated conversion function. +func Convert_v1beta1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList(in *v1beta1.MutatingWebhookConfigurationList, out *admissionregistration.MutatingWebhookConfigurationList, s conversion.Scope) error { + return autoConvert_v1beta1_MutatingWebhookConfigurationList_To_admissionregistration_MutatingWebhookConfigurationList(in, out, s) +} + +func autoConvert_admissionregistration_MutatingWebhookConfigurationList_To_v1beta1_MutatingWebhookConfigurationList(in *admissionregistration.MutatingWebhookConfigurationList, out *v1beta1.MutatingWebhookConfigurationList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1beta1.MutatingWebhookConfiguration)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_admissionregistration_MutatingWebhookConfigurationList_To_v1beta1_MutatingWebhookConfigurationList is an autogenerated conversion function. +func Convert_admissionregistration_MutatingWebhookConfigurationList_To_v1beta1_MutatingWebhookConfigurationList(in *admissionregistration.MutatingWebhookConfigurationList, out *v1beta1.MutatingWebhookConfigurationList, s conversion.Scope) error { + return autoConvert_admissionregistration_MutatingWebhookConfigurationList_To_v1beta1_MutatingWebhookConfigurationList(in, out, s) +} + +func autoConvert_v1beta1_Rule_To_admissionregistration_Rule(in *v1beta1.Rule, out *admissionregistration.Rule, s conversion.Scope) error { + out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) + out.APIVersions = *(*[]string)(unsafe.Pointer(&in.APIVersions)) + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_v1beta1_Rule_To_admissionregistration_Rule is an autogenerated conversion function. +func Convert_v1beta1_Rule_To_admissionregistration_Rule(in *v1beta1.Rule, out *admissionregistration.Rule, s conversion.Scope) error { + return autoConvert_v1beta1_Rule_To_admissionregistration_Rule(in, out, s) +} + +func autoConvert_admissionregistration_Rule_To_v1beta1_Rule(in *admissionregistration.Rule, out *v1beta1.Rule, s conversion.Scope) error { + out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) + out.APIVersions = *(*[]string)(unsafe.Pointer(&in.APIVersions)) + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_admissionregistration_Rule_To_v1beta1_Rule is an autogenerated conversion function. +func Convert_admissionregistration_Rule_To_v1beta1_Rule(in *admissionregistration.Rule, out *v1beta1.Rule, s conversion.Scope) error { + return autoConvert_admissionregistration_Rule_To_v1beta1_Rule(in, out, s) +} + +func autoConvert_v1beta1_RuleWithOperations_To_admissionregistration_RuleWithOperations(in *v1beta1.RuleWithOperations, out *admissionregistration.RuleWithOperations, s conversion.Scope) error { + out.Operations = *(*[]admissionregistration.OperationType)(unsafe.Pointer(&in.Operations)) + if err := Convert_v1beta1_Rule_To_admissionregistration_Rule(&in.Rule, &out.Rule, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_RuleWithOperations_To_admissionregistration_RuleWithOperations is an autogenerated conversion function. +func Convert_v1beta1_RuleWithOperations_To_admissionregistration_RuleWithOperations(in *v1beta1.RuleWithOperations, out *admissionregistration.RuleWithOperations, s conversion.Scope) error { + return autoConvert_v1beta1_RuleWithOperations_To_admissionregistration_RuleWithOperations(in, out, s) +} + +func autoConvert_admissionregistration_RuleWithOperations_To_v1beta1_RuleWithOperations(in *admissionregistration.RuleWithOperations, out *v1beta1.RuleWithOperations, s conversion.Scope) error { + out.Operations = *(*[]v1beta1.OperationType)(unsafe.Pointer(&in.Operations)) + if err := Convert_admissionregistration_Rule_To_v1beta1_Rule(&in.Rule, &out.Rule, s); err != nil { + return err + } + return nil +} + +// Convert_admissionregistration_RuleWithOperations_To_v1beta1_RuleWithOperations is an autogenerated conversion function. +func Convert_admissionregistration_RuleWithOperations_To_v1beta1_RuleWithOperations(in *admissionregistration.RuleWithOperations, out *v1beta1.RuleWithOperations, s conversion.Scope) error { + return autoConvert_admissionregistration_RuleWithOperations_To_v1beta1_RuleWithOperations(in, out, s) +} + +func autoConvert_v1beta1_ServiceReference_To_admissionregistration_ServiceReference(in *v1beta1.ServiceReference, out *admissionregistration.ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + out.Path = (*string)(unsafe.Pointer(in.Path)) + return nil +} + +// Convert_v1beta1_ServiceReference_To_admissionregistration_ServiceReference is an autogenerated conversion function. +func Convert_v1beta1_ServiceReference_To_admissionregistration_ServiceReference(in *v1beta1.ServiceReference, out *admissionregistration.ServiceReference, s conversion.Scope) error { + return autoConvert_v1beta1_ServiceReference_To_admissionregistration_ServiceReference(in, out, s) +} + +func autoConvert_admissionregistration_ServiceReference_To_v1beta1_ServiceReference(in *admissionregistration.ServiceReference, out *v1beta1.ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + out.Path = (*string)(unsafe.Pointer(in.Path)) + return nil +} + +// Convert_admissionregistration_ServiceReference_To_v1beta1_ServiceReference is an autogenerated conversion function. +func Convert_admissionregistration_ServiceReference_To_v1beta1_ServiceReference(in *admissionregistration.ServiceReference, out *v1beta1.ServiceReference, s conversion.Scope) error { + return autoConvert_admissionregistration_ServiceReference_To_v1beta1_ServiceReference(in, out, s) +} + +func autoConvert_v1beta1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration(in *v1beta1.ValidatingWebhookConfiguration, out *admissionregistration.ValidatingWebhookConfiguration, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Webhooks = *(*[]admissionregistration.Webhook)(unsafe.Pointer(&in.Webhooks)) + return nil +} + +// Convert_v1beta1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration is an autogenerated conversion function. +func Convert_v1beta1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration(in *v1beta1.ValidatingWebhookConfiguration, out *admissionregistration.ValidatingWebhookConfiguration, s conversion.Scope) error { + return autoConvert_v1beta1_ValidatingWebhookConfiguration_To_admissionregistration_ValidatingWebhookConfiguration(in, out, s) +} + +func autoConvert_admissionregistration_ValidatingWebhookConfiguration_To_v1beta1_ValidatingWebhookConfiguration(in *admissionregistration.ValidatingWebhookConfiguration, out *v1beta1.ValidatingWebhookConfiguration, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Webhooks = *(*[]v1beta1.Webhook)(unsafe.Pointer(&in.Webhooks)) + return nil +} + +// Convert_admissionregistration_ValidatingWebhookConfiguration_To_v1beta1_ValidatingWebhookConfiguration is an autogenerated conversion function. +func Convert_admissionregistration_ValidatingWebhookConfiguration_To_v1beta1_ValidatingWebhookConfiguration(in *admissionregistration.ValidatingWebhookConfiguration, out *v1beta1.ValidatingWebhookConfiguration, s conversion.Scope) error { + return autoConvert_admissionregistration_ValidatingWebhookConfiguration_To_v1beta1_ValidatingWebhookConfiguration(in, out, s) +} + +func autoConvert_v1beta1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList(in *v1beta1.ValidatingWebhookConfigurationList, out *admissionregistration.ValidatingWebhookConfigurationList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]admissionregistration.ValidatingWebhookConfiguration)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList is an autogenerated conversion function. +func Convert_v1beta1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList(in *v1beta1.ValidatingWebhookConfigurationList, out *admissionregistration.ValidatingWebhookConfigurationList, s conversion.Scope) error { + return autoConvert_v1beta1_ValidatingWebhookConfigurationList_To_admissionregistration_ValidatingWebhookConfigurationList(in, out, s) +} + +func autoConvert_admissionregistration_ValidatingWebhookConfigurationList_To_v1beta1_ValidatingWebhookConfigurationList(in *admissionregistration.ValidatingWebhookConfigurationList, out *v1beta1.ValidatingWebhookConfigurationList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1beta1.ValidatingWebhookConfiguration)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_admissionregistration_ValidatingWebhookConfigurationList_To_v1beta1_ValidatingWebhookConfigurationList is an autogenerated conversion function. +func Convert_admissionregistration_ValidatingWebhookConfigurationList_To_v1beta1_ValidatingWebhookConfigurationList(in *admissionregistration.ValidatingWebhookConfigurationList, out *v1beta1.ValidatingWebhookConfigurationList, s conversion.Scope) error { + return autoConvert_admissionregistration_ValidatingWebhookConfigurationList_To_v1beta1_ValidatingWebhookConfigurationList(in, out, s) +} + +func autoConvert_v1beta1_Webhook_To_admissionregistration_Webhook(in *v1beta1.Webhook, out *admissionregistration.Webhook, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1beta1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(&in.ClientConfig, &out.ClientConfig, s); err != nil { + return err + } + out.Rules = *(*[]admissionregistration.RuleWithOperations)(unsafe.Pointer(&in.Rules)) + out.FailurePolicy = (*admissionregistration.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy)) + out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) + return nil +} + +// Convert_v1beta1_Webhook_To_admissionregistration_Webhook is an autogenerated conversion function. +func Convert_v1beta1_Webhook_To_admissionregistration_Webhook(in *v1beta1.Webhook, out *admissionregistration.Webhook, s conversion.Scope) error { + return autoConvert_v1beta1_Webhook_To_admissionregistration_Webhook(in, out, s) +} + +func autoConvert_admissionregistration_Webhook_To_v1beta1_Webhook(in *admissionregistration.Webhook, out *v1beta1.Webhook, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_admissionregistration_WebhookClientConfig_To_v1beta1_WebhookClientConfig(&in.ClientConfig, &out.ClientConfig, s); err != nil { + return err + } + out.Rules = *(*[]v1beta1.RuleWithOperations)(unsafe.Pointer(&in.Rules)) + out.FailurePolicy = (*v1beta1.FailurePolicyType)(unsafe.Pointer(in.FailurePolicy)) + out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) + return nil +} + +// Convert_admissionregistration_Webhook_To_v1beta1_Webhook is an autogenerated conversion function. +func Convert_admissionregistration_Webhook_To_v1beta1_Webhook(in *admissionregistration.Webhook, out *v1beta1.Webhook, s conversion.Scope) error { + return autoConvert_admissionregistration_Webhook_To_v1beta1_Webhook(in, out, s) +} + +func autoConvert_v1beta1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(in *v1beta1.WebhookClientConfig, out *admissionregistration.WebhookClientConfig, s conversion.Scope) error { + out.URL = (*string)(unsafe.Pointer(in.URL)) + out.Service = (*admissionregistration.ServiceReference)(unsafe.Pointer(in.Service)) + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + return nil +} + +// Convert_v1beta1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig is an autogenerated conversion function. +func Convert_v1beta1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(in *v1beta1.WebhookClientConfig, out *admissionregistration.WebhookClientConfig, s conversion.Scope) error { + return autoConvert_v1beta1_WebhookClientConfig_To_admissionregistration_WebhookClientConfig(in, out, s) +} + +func autoConvert_admissionregistration_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in *admissionregistration.WebhookClientConfig, out *v1beta1.WebhookClientConfig, s conversion.Scope) error { + out.URL = (*string)(unsafe.Pointer(in.URL)) + out.Service = (*v1beta1.ServiceReference)(unsafe.Pointer(in.Service)) + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + return nil +} + +// Convert_admissionregistration_WebhookClientConfig_To_v1beta1_WebhookClientConfig is an autogenerated conversion function. +func Convert_admissionregistration_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in *admissionregistration.WebhookClientConfig, out *v1beta1.WebhookClientConfig, s conversion.Scope) error { + return autoConvert_admissionregistration_WebhookClientConfig_To_v1beta1_WebhookClientConfig(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/zz_generated.defaults.go new file mode 100644 index 000000000000..a88a86e6bf99 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1/zz_generated.defaults.go @@ -0,0 +1,73 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&v1beta1.MutatingWebhookConfiguration{}, func(obj interface{}) { + SetObjectDefaults_MutatingWebhookConfiguration(obj.(*v1beta1.MutatingWebhookConfiguration)) + }) + scheme.AddTypeDefaultingFunc(&v1beta1.MutatingWebhookConfigurationList{}, func(obj interface{}) { + SetObjectDefaults_MutatingWebhookConfigurationList(obj.(*v1beta1.MutatingWebhookConfigurationList)) + }) + scheme.AddTypeDefaultingFunc(&v1beta1.ValidatingWebhookConfiguration{}, func(obj interface{}) { + SetObjectDefaults_ValidatingWebhookConfiguration(obj.(*v1beta1.ValidatingWebhookConfiguration)) + }) + scheme.AddTypeDefaultingFunc(&v1beta1.ValidatingWebhookConfigurationList{}, func(obj interface{}) { + SetObjectDefaults_ValidatingWebhookConfigurationList(obj.(*v1beta1.ValidatingWebhookConfigurationList)) + }) + return nil +} + +func SetObjectDefaults_MutatingWebhookConfiguration(in *v1beta1.MutatingWebhookConfiguration) { + for i := range in.Webhooks { + a := &in.Webhooks[i] + SetDefaults_Webhook(a) + } +} + +func SetObjectDefaults_MutatingWebhookConfigurationList(in *v1beta1.MutatingWebhookConfigurationList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_MutatingWebhookConfiguration(a) + } +} + +func SetObjectDefaults_ValidatingWebhookConfiguration(in *v1beta1.ValidatingWebhookConfiguration) { + for i := range in.Webhooks { + a := &in.Webhooks[i] + SetDefaults_Webhook(a) + } +} + +func SetObjectDefaults_ValidatingWebhookConfigurationList(in *v1beta1.ValidatingWebhookConfigurationList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ValidatingWebhookConfiguration(a) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/validation/BUILD index 00268be3f1e6..fa9e0a0fe95a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/validation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/validation/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/admissionregistration/validation", library = ":go_default_library", deps = [ "//pkg/apis/admissionregistration:go_default_library", @@ -19,9 +20,11 @@ go_test( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/pkg/apis/admissionregistration/validation", deps = [ "//pkg/apis/admissionregistration:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/validation:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/validation/validation.go index 07c0bb1b198b..958ebf440227 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/validation/validation.go @@ -18,9 +18,11 @@ package validation import ( "fmt" + "net/url" "strings" genericvalidation "k8s.io/apimachinery/pkg/api/validation" + metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" @@ -163,15 +165,23 @@ func ValidateInitializerConfigurationUpdate(newIC, oldIC *admissionregistration. return ValidateInitializerConfiguration(newIC) } -func ValidateExternalAdmissionHookConfiguration(e *admissionregistration.ExternalAdmissionHookConfiguration) field.ErrorList { +func ValidateValidatingWebhookConfiguration(e *admissionregistration.ValidatingWebhookConfiguration) field.ErrorList { allErrors := genericvalidation.ValidateObjectMeta(&e.ObjectMeta, false, genericvalidation.NameIsDNSSubdomain, field.NewPath("metadata")) - for i, hook := range e.ExternalAdmissionHooks { - allErrors = append(allErrors, validateExternalAdmissionHook(&hook, field.NewPath("externalAdmissionHooks").Index(i))...) + for i, hook := range e.Webhooks { + allErrors = append(allErrors, validateWebhook(&hook, field.NewPath("webhooks").Index(i))...) } return allErrors } -func validateExternalAdmissionHook(hook *admissionregistration.ExternalAdmissionHook, fldPath *field.Path) field.ErrorList { +func ValidateMutatingWebhookConfiguration(e *admissionregistration.MutatingWebhookConfiguration) field.ErrorList { + allErrors := genericvalidation.ValidateObjectMeta(&e.ObjectMeta, false, genericvalidation.NameIsDNSSubdomain, field.NewPath("metadata")) + for i, hook := range e.Webhooks { + allErrors = append(allErrors, validateWebhook(&hook, field.NewPath("webhooks").Index(i))...) + } + return allErrors +} + +func validateWebhook(hook *admissionregistration.Webhook, fldPath *field.Path) field.ErrorList { var allErrors field.ErrorList // hook.Name must be fully qualified allErrors = append(allErrors, validation.IsFullyQualifiedName(fldPath.Child("name"), hook.Name)...) @@ -179,13 +189,108 @@ func validateExternalAdmissionHook(hook *admissionregistration.ExternalAdmission for i, rule := range hook.Rules { allErrors = append(allErrors, validateRuleWithOperations(&rule, fldPath.Child("rules").Index(i))...) } - // TODO: relax the validation rule when admissionregistration is beta. - if hook.FailurePolicy != nil && *hook.FailurePolicy != admissionregistration.Ignore { - allErrors = append(allErrors, field.NotSupported(fldPath.Child("failurePolicy"), *hook.FailurePolicy, []string{string(admissionregistration.Ignore)})) + if hook.FailurePolicy != nil && !supportedFailurePolicies.Has(string(*hook.FailurePolicy)) { + allErrors = append(allErrors, field.NotSupported(fldPath.Child("failurePolicy"), *hook.FailurePolicy, supportedFailurePolicies.List())) + } + + if hook.NamespaceSelector != nil { + allErrors = append(allErrors, metav1validation.ValidateLabelSelector(hook.NamespaceSelector, fldPath.Child("namespaceSelector"))...) + } + + allErrors = append(allErrors, validateWebhookClientConfig(fldPath.Child("clientConfig"), &hook.ClientConfig)...) + + return allErrors +} + +func validateWebhookClientConfig(fldPath *field.Path, cc *admissionregistration.WebhookClientConfig) field.ErrorList { + var allErrors field.ErrorList + if (cc.URL == nil) == (cc.Service == nil) { + allErrors = append(allErrors, field.Required(fldPath.Child("url"), "exactly one of url or service is required")) + } + + if cc.URL != nil { + const form = "; desired format: https://host[/path]" + if u, err := url.Parse(*cc.URL); err != nil { + allErrors = append(allErrors, field.Required(fldPath.Child("url"), "url must be a valid URL: "+err.Error()+form)) + } else { + if u.Scheme != "https" { + allErrors = append(allErrors, field.Invalid(fldPath.Child("url"), u.Scheme, "'https' is the only allowed URL scheme"+form)) + } + if len(u.Host) == 0 { + allErrors = append(allErrors, field.Invalid(fldPath.Child("url"), u.Host, "host must be provided"+form)) + } + if u.User != nil { + allErrors = append(allErrors, field.Invalid(fldPath.Child("url"), u.User.String(), "user information is not permitted in the URL")) + } + if len(u.Fragment) != 0 { + allErrors = append(allErrors, field.Invalid(fldPath.Child("url"), u.Fragment, "fragments are not permitted in the URL")) + } + if len(u.RawQuery) != 0 { + allErrors = append(allErrors, field.Invalid(fldPath.Child("url"), u.RawQuery, "query parameters are not permitted in the URL")) + } + } + } + + if cc.Service != nil { + allErrors = append(allErrors, validateWebhookService(fldPath.Child("service"), cc.Service)...) + } + return allErrors +} + +func validateWebhookService(fldPath *field.Path, svc *admissionregistration.ServiceReference) field.ErrorList { + var allErrors field.ErrorList + + if len(svc.Name) == 0 { + allErrors = append(allErrors, field.Required(fldPath.Child("name"), "service name is required")) + } + + if len(svc.Namespace) == 0 { + allErrors = append(allErrors, field.Required(fldPath.Child("namespace"), "service namespace is required")) + } + + if svc.Path == nil { + return allErrors + } + + // TODO: replace below with url.Parse + verifying that host is empty? + + urlPath := *svc.Path + if urlPath == "/" || len(urlPath) == 0 { + return allErrors + } + if urlPath == "//" { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, "segment[0] may not be empty")) + return allErrors + } + + if !strings.HasPrefix(urlPath, "/") { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, "must start with a '/'")) } + + urlPathToCheck := urlPath[1:] + if strings.HasSuffix(urlPathToCheck, "/") { + urlPathToCheck = urlPathToCheck[:len(urlPathToCheck)-1] + } + steps := strings.Split(urlPathToCheck, "/") + for i, step := range steps { + if len(step) == 0 { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, fmt.Sprintf("segment[%d] may not be empty", i))) + continue + } + failures := validation.IsDNS1123Subdomain(step) + for _, failure := range failures { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, fmt.Sprintf("segment[%d]: %v", i, failure))) + } + } + return allErrors } +var supportedFailurePolicies = sets.NewString( + string(admissionregistration.Ignore), + string(admissionregistration.Fail), +) + var supportedOperations = sets.NewString( string(admissionregistration.OperationAll), string(admissionregistration.Create), @@ -221,6 +326,10 @@ func validateRuleWithOperations(ruleWithOperations *admissionregistration.RuleWi return allErrors } -func ValidateExternalAdmissionHookConfigurationUpdate(newC, oldC *admissionregistration.ExternalAdmissionHookConfiguration) field.ErrorList { - return ValidateExternalAdmissionHookConfiguration(newC) +func ValidateValidatingWebhookConfigurationUpdate(newC, oldC *admissionregistration.ValidatingWebhookConfiguration) field.ErrorList { + return ValidateValidatingWebhookConfiguration(newC) +} + +func ValidateMutatingWebhookConfigurationUpdate(newC, oldC *admissionregistration.MutatingWebhookConfiguration) field.ErrorList { + return ValidateMutatingWebhookConfiguration(newC) } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/zz_generated.deepcopy.go index 4952a75f7915..8bb1ab3fb6aa 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/zz_generated.deepcopy.go @@ -21,127 +21,41 @@ limitations under the License. package admissionregistration import ( - conversion "k8s.io/apimachinery/pkg/conversion" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AdmissionHookClientConfig).DeepCopyInto(out.(*AdmissionHookClientConfig)) - return nil - }, InType: reflect.TypeOf(&AdmissionHookClientConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExternalAdmissionHook).DeepCopyInto(out.(*ExternalAdmissionHook)) - return nil - }, InType: reflect.TypeOf(&ExternalAdmissionHook{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExternalAdmissionHookConfiguration).DeepCopyInto(out.(*ExternalAdmissionHookConfiguration)) - return nil - }, InType: reflect.TypeOf(&ExternalAdmissionHookConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExternalAdmissionHookConfigurationList).DeepCopyInto(out.(*ExternalAdmissionHookConfigurationList)) - return nil - }, InType: reflect.TypeOf(&ExternalAdmissionHookConfigurationList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Initializer).DeepCopyInto(out.(*Initializer)) - return nil - }, InType: reflect.TypeOf(&Initializer{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*InitializerConfiguration).DeepCopyInto(out.(*InitializerConfiguration)) - return nil - }, InType: reflect.TypeOf(&InitializerConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*InitializerConfigurationList).DeepCopyInto(out.(*InitializerConfigurationList)) - return nil - }, InType: reflect.TypeOf(&InitializerConfigurationList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Rule).DeepCopyInto(out.(*Rule)) - return nil - }, InType: reflect.TypeOf(&Rule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RuleWithOperations).DeepCopyInto(out.(*RuleWithOperations)) - return nil - }, InType: reflect.TypeOf(&RuleWithOperations{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceReference).DeepCopyInto(out.(*ServiceReference)) - return nil - }, InType: reflect.TypeOf(&ServiceReference{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionHookClientConfig) DeepCopyInto(out *AdmissionHookClientConfig) { - *out = *in - out.Service = in.Service - if in.CABundle != nil { - in, out := &in.CABundle, &out.CABundle - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionHookClientConfig. -func (in *AdmissionHookClientConfig) DeepCopy() *AdmissionHookClientConfig { - if in == nil { - return nil - } - out := new(AdmissionHookClientConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalAdmissionHook) DeepCopyInto(out *ExternalAdmissionHook) { +func (in *Initializer) DeepCopyInto(out *Initializer) { *out = *in - in.ClientConfig.DeepCopyInto(&out.ClientConfig) if in.Rules != nil { in, out := &in.Rules, &out.Rules - *out = make([]RuleWithOperations, len(*in)) + *out = make([]Rule, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.FailurePolicy != nil { - in, out := &in.FailurePolicy, &out.FailurePolicy - if *in == nil { - *out = nil - } else { - *out = new(FailurePolicyType) - **out = **in - } - } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalAdmissionHook. -func (in *ExternalAdmissionHook) DeepCopy() *ExternalAdmissionHook { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer. +func (in *Initializer) DeepCopy() *Initializer { if in == nil { return nil } - out := new(ExternalAdmissionHook) + out := new(Initializer) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalAdmissionHookConfiguration) DeepCopyInto(out *ExternalAdmissionHookConfiguration) { +func (in *InitializerConfiguration) DeepCopyInto(out *InitializerConfiguration) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.ExternalAdmissionHooks != nil { - in, out := &in.ExternalAdmissionHooks, &out.ExternalAdmissionHooks - *out = make([]ExternalAdmissionHook, len(*in)) + if in.Initializers != nil { + in, out := &in.Initializers, &out.Initializers + *out = make([]Initializer, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -149,18 +63,18 @@ func (in *ExternalAdmissionHookConfiguration) DeepCopyInto(out *ExternalAdmissio return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalAdmissionHookConfiguration. -func (in *ExternalAdmissionHookConfiguration) DeepCopy() *ExternalAdmissionHookConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfiguration. +func (in *InitializerConfiguration) DeepCopy() *InitializerConfiguration { if in == nil { return nil } - out := new(ExternalAdmissionHookConfiguration) + out := new(InitializerConfiguration) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ExternalAdmissionHookConfiguration) DeepCopyObject() runtime.Object { +func (in *InitializerConfiguration) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } else { @@ -169,13 +83,13 @@ func (in *ExternalAdmissionHookConfiguration) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalAdmissionHookConfigurationList) DeepCopyInto(out *ExternalAdmissionHookConfigurationList) { +func (in *InitializerConfigurationList) DeepCopyInto(out *InitializerConfigurationList) { *out = *in out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]ExternalAdmissionHookConfiguration, len(*in)) + *out = make([]InitializerConfiguration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -183,18 +97,18 @@ func (in *ExternalAdmissionHookConfigurationList) DeepCopyInto(out *ExternalAdmi return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalAdmissionHookConfigurationList. -func (in *ExternalAdmissionHookConfigurationList) DeepCopy() *ExternalAdmissionHookConfigurationList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfigurationList. +func (in *InitializerConfigurationList) DeepCopy() *InitializerConfigurationList { if in == nil { return nil } - out := new(ExternalAdmissionHookConfigurationList) + out := new(InitializerConfigurationList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ExternalAdmissionHookConfigurationList) DeepCopyObject() runtime.Object { +func (in *InitializerConfigurationList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } else { @@ -203,36 +117,13 @@ func (in *ExternalAdmissionHookConfigurationList) DeepCopyObject() runtime.Objec } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Initializer) DeepCopyInto(out *Initializer) { - *out = *in - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]Rule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer. -func (in *Initializer) DeepCopy() *Initializer { - if in == nil { - return nil - } - out := new(Initializer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InitializerConfiguration) DeepCopyInto(out *InitializerConfiguration) { +func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfiguration) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Initializers != nil { - in, out := &in.Initializers, &out.Initializers - *out = make([]Initializer, len(*in)) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]Webhook, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -240,18 +131,18 @@ func (in *InitializerConfiguration) DeepCopyInto(out *InitializerConfiguration) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfiguration. -func (in *InitializerConfiguration) DeepCopy() *InitializerConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfiguration. +func (in *MutatingWebhookConfiguration) DeepCopy() *MutatingWebhookConfiguration { if in == nil { return nil } - out := new(InitializerConfiguration) + out := new(MutatingWebhookConfiguration) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InitializerConfiguration) DeepCopyObject() runtime.Object { +func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } else { @@ -260,13 +151,13 @@ func (in *InitializerConfiguration) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InitializerConfigurationList) DeepCopyInto(out *InitializerConfigurationList) { +func (in *MutatingWebhookConfigurationList) DeepCopyInto(out *MutatingWebhookConfigurationList) { *out = *in out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]InitializerConfiguration, len(*in)) + *out = make([]MutatingWebhookConfiguration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -274,18 +165,18 @@ func (in *InitializerConfigurationList) DeepCopyInto(out *InitializerConfigurati return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfigurationList. -func (in *InitializerConfigurationList) DeepCopy() *InitializerConfigurationList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfigurationList. +func (in *MutatingWebhookConfigurationList) DeepCopy() *MutatingWebhookConfigurationList { if in == nil { return nil } - out := new(InitializerConfigurationList) + out := new(MutatingWebhookConfigurationList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InitializerConfigurationList) DeepCopyObject() runtime.Object { +func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } else { @@ -349,6 +240,15 @@ func (in *RuleWithOperations) DeepCopy() *RuleWithOperations { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } return } @@ -361,3 +261,152 @@ func (in *ServiceReference) DeepCopy() *ServiceReference { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]Webhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfiguration. +func (in *ValidatingWebhookConfiguration) DeepCopy() *ValidatingWebhookConfiguration { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfigurationList) DeepCopyInto(out *ValidatingWebhookConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingWebhookConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfigurationList. +func (in *ValidatingWebhookConfigurationList) DeepCopy() *ValidatingWebhookConfigurationList { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Webhook) DeepCopyInto(out *Webhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + if *in == nil { + *out = nil + } else { + *out = new(FailurePolicyType) + **out = **in + } + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. +func (in *Webhook) DeepCopy() *Webhook { + if in == nil { + return nil + } + out := new(Webhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + if in.Service != nil { + in, out := &in.Service, &out.Service + if *in == nil { + *out = nil + } else { + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD index 0b1a8701f08e..8cb47574a194 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD @@ -13,11 +13,12 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/apps", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], @@ -36,6 +37,7 @@ filegroup( ":package-srcs", "//pkg/apis/apps/fuzzer:all-srcs", "//pkg/apis/apps/install:all-srcs", + "//pkg/apis/apps/v1:all-srcs", "//pkg/apis/apps/v1beta1:all-srcs", "//pkg/apis/apps/v1beta2:all-srcs", "//pkg/apis/apps/validation:all-srcs", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go index bca1ff4ef013..91d117598299 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package package apps diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/apps/install/BUILD index 55b72470796a..e33b4cb24220 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/install/BUILD @@ -8,9 +8,11 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/apps/install", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/apps:go_default_library", + "//pkg/apis/apps/v1:go_default_library", "//pkg/apis/apps/v1beta1:go_default_library", "//pkg/apis/apps/v1beta2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go index 790e280641e8..73117127693d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go @@ -22,14 +22,15 @@ import ( "k8s.io/apimachinery/pkg/apimachinery/announced" "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/apis/apps/v1" "k8s.io/kubernetes/pkg/apis/apps/v1beta1" "k8s.io/kubernetes/pkg/apis/apps/v1beta2" ) func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) + Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) } // Install registers the API group and adds types to a scheme @@ -37,12 +38,13 @@ func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *r if err := announced.NewGroupMetaFactory( &announced.GroupMetaFactoryArgs{ GroupName: apps.GroupName, - VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version, v1beta2.SchemeGroupVersion.Version}, + VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version, v1beta2.SchemeGroupVersion.Version, v1.SchemeGroupVersion.Version}, AddInternalObjectsToScheme: apps.AddToScheme, }, announced.VersionToSchemeFunc{ v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, v1beta2.SchemeGroupVersion.Version: v1beta2.AddToScheme, + v1.SchemeGroupVersion.Version: v1.AddToScheme, }, ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { panic(err) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go index 77d8445ae141..8b7591807cbe 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go @@ -19,6 +19,7 @@ package apps import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/extensions" ) @@ -43,7 +44,7 @@ func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { // TODO this will get cleaned up with the scheme types are fixed scheme.AddKnownTypes(SchemeGroupVersion, @@ -52,7 +53,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { &extensions.Deployment{}, &extensions.DeploymentList{}, &extensions.DeploymentRollback{}, - &extensions.Scale{}, + &autoscaling.Scale{}, &StatefulSet{}, &StatefulSetList{}, &ControllerRevision{}, diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go index 7654b681ab1e..850694d7fc23 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go @@ -19,12 +19,12 @@ package apps import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/extensions.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/extensions.Scale,result=k8s.io/kubernetes/pkg/apis/extensions.Scale +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // StatefulSet represents a set of pods with consistent identities. @@ -195,6 +195,27 @@ type StatefulSetStatus struct { // newest ControllerRevision. // +optional CollisionCount *int32 + + // Represents the latest available observations of a statefulset's current state. + Conditions []StatefulSetCondition +} + +type StatefulSetConditionType string + +// TODO: Add valid condition types for Statefulsets. + +// StatefulSetCondition describes the state of a statefulset at a certain point. +type StatefulSetCondition struct { + // Type of statefulset condition. + Type StatefulSetConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // The last time this condition was updated. + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + Reason string + // A human readable message indicating details about the transition. + Message string } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/BUILD new file mode 100644 index 000000000000..3ffdef39f604 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/BUILD @@ -0,0 +1,67 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "conversion.go", + "defaults.go", + "doc.go", + "register.go", + "zz_generated.conversion.go", + "zz_generated.defaults.go", + ], + importpath = "k8s.io/kubernetes/pkg/apis/apps/v1", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/apps:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/v1:go_default_library", + "//pkg/apis/extensions:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + ], +) + +go_test( + name = "go_default_xtest", + srcs = [ + "conversion_test.go", + "defaults_test.go", + ], + importpath = "k8s.io/kubernetes/pkg/apis/apps/v1_test", + deps = [ + ":go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/apps:go_default_library", + "//pkg/apis/apps/install:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/install:go_default_library", + "//pkg/apis/extensions:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/conversion.go new file mode 100644 index 000000000000..0908f7fc2bbd --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/conversion.go @@ -0,0 +1,498 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "strconv" + + appsv1 "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/kubernetes/pkg/apis/apps" + api "k8s.io/kubernetes/pkg/apis/core" + k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions to handle the *int32 -> int32 + // conversion. A pointer is useful in the versioned type so we can default + // it, but a plain int32 is more convenient in the internal type. These + // functions are the same as the autogenerated ones in every other way. + err := scheme.AddConversionFuncs( + Convert_v1_StatefulSetSpec_To_apps_StatefulSetSpec, + Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec, + Convert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy, + Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy, + Convert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet, + Convert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet, + Convert_v1_StatefulSetStatus_To_apps_StatefulSetStatus, + Convert_apps_StatefulSetStatus_To_v1_StatefulSetStatus, + Convert_v1_Deployment_To_extensions_Deployment, + Convert_extensions_Deployment_To_v1_Deployment, + Convert_extensions_DaemonSet_To_v1_DaemonSet, + Convert_v1_DaemonSet_To_extensions_DaemonSet, + Convert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec, + Convert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec, + Convert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy, + Convert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy, + // extensions + // TODO: below conversions should be dropped in favor of auto-generated + // ones, see https://github.com/kubernetes/kubernetes/issues/39865 + Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec, + Convert_extensions_DeploymentSpec_To_v1_DeploymentSpec, + Convert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy, + Convert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy, + Convert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, + Convert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment, + Convert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec, + Convert_v1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, + ) + if err != nil { + return err + } + return nil +} + +func Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = *in.Replicas + } + out.Selector = in.Selector + if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + out.RevisionHistoryLimit = in.RevisionHistoryLimit + out.MinReadySeconds = in.MinReadySeconds + out.Paused = in.Paused + if in.ProgressDeadlineSeconds != nil { + out.ProgressDeadlineSeconds = new(int32) + *out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds + } + return nil +} + +func Convert_extensions_DeploymentSpec_To_v1_DeploymentSpec(in *extensions.DeploymentSpec, out *appsv1.DeploymentSpec, s conversion.Scope) error { + out.Replicas = &in.Replicas + out.Selector = in.Selector + if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + if in.RevisionHistoryLimit != nil { + out.RevisionHistoryLimit = new(int32) + *out.RevisionHistoryLimit = int32(*in.RevisionHistoryLimit) + } + out.MinReadySeconds = int32(in.MinReadySeconds) + out.Paused = in.Paused + if in.ProgressDeadlineSeconds != nil { + out.ProgressDeadlineSeconds = new(int32) + *out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds + } + return nil +} + +func Convert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *appsv1.DeploymentStrategy, s conversion.Scope) error { + out.Type = appsv1.DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + out.RollingUpdate = new(appsv1.RollingUpdateDeployment) + if err := Convert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func Convert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *appsv1.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { + out.Type = extensions.DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + out.RollingUpdate = new(extensions.RollingUpdateDeployment) + if err := Convert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func Convert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *appsv1.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { + if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { + return err + } + if err := s.Convert(in.MaxSurge, &out.MaxSurge, 0); err != nil { + return err + } + return nil +} + +func Convert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *appsv1.RollingUpdateDeployment, s conversion.Scope) error { + if out.MaxUnavailable == nil { + out.MaxUnavailable = &intstr.IntOrString{} + } + if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { + return err + } + if out.MaxSurge == nil { + out.MaxSurge = &intstr.IntOrString{} + } + if err := s.Convert(&in.MaxSurge, out.MaxSurge, 0); err != nil { + return err + } + return nil +} + +func Convert_v1_Deployment_To_extensions_Deployment(in *appsv1.Deployment, out *extensions.Deployment, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + + // Copy annotation to deprecated rollbackTo field for roundtrip + // TODO: remove this conversion after we delete extensions/v1beta1 and apps/v1beta1 Deployment + if revision, _ := in.Annotations[appsv1.DeprecatedRollbackTo]; revision != "" { + if revision64, err := strconv.ParseInt(revision, 10, 64); err != nil { + return fmt.Errorf("failed to parse annotation[%s]=%s as int64: %v", appsv1.DeprecatedRollbackTo, revision, err) + } else { + out.Spec.RollbackTo = new(extensions.RollbackConfig) + out.Spec.RollbackTo.Revision = revision64 + } + delete(out.Annotations, appsv1.DeprecatedRollbackTo) + } else { + out.Spec.RollbackTo = nil + } + + if err := Convert_v1_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_Deployment_To_v1_Deployment(in *extensions.Deployment, out *appsv1.Deployment, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_DeploymentSpec_To_v1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + + // Copy deprecated rollbackTo field to annotation for roundtrip + // TODO: remove this conversion after we delete extensions/v1beta1 and apps/v1beta1 Deployment + if in.Spec.RollbackTo != nil { + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + out.Annotations[appsv1.DeprecatedRollbackTo] = strconv.FormatInt(in.Spec.RollbackTo.Revision, 10) + } else { + delete(out.Annotations, appsv1.DeprecatedRollbackTo) + } + + if err := Convert_extensions_DeploymentStatus_To_v1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *appsv1.RollingUpdateDaemonSet, s conversion.Scope) error { + if out.MaxUnavailable == nil { + out.MaxUnavailable = &intstr.IntOrString{} + } + if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { + return err + } + return nil +} + +func Convert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *appsv1.RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { + if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { + return err + } + return nil +} + +func Convert_extensions_DaemonSet_To_v1_DaemonSet(in *extensions.DaemonSet, out *appsv1.DaemonSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + out.Annotations[appsv1.DeprecatedTemplateGeneration] = strconv.FormatInt(in.Spec.TemplateGeneration, 10) + if err := Convert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := s.Convert(&in.Status, &out.Status, 0); err != nil { + return err + } + return nil +} + +func Convert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *appsv1.DaemonSetSpec, s conversion.Scope) error { + out.Selector = in.Selector + if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + return err + } + out.MinReadySeconds = int32(in.MinReadySeconds) + if in.RevisionHistoryLimit != nil { + out.RevisionHistoryLimit = new(int32) + *out.RevisionHistoryLimit = *in.RevisionHistoryLimit + } else { + out.RevisionHistoryLimit = nil + } + return nil +} + +func Convert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *appsv1.DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = appsv1.DaemonSetUpdateStrategyType(in.Type) + if in.RollingUpdate != nil { + out.RollingUpdate = &appsv1.RollingUpdateDaemonSet{} + if err := Convert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { + return err + } + } + return nil +} + +func Convert_v1_DaemonSet_To_extensions_DaemonSet(in *appsv1.DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if value, ok := in.Annotations[appsv1.DeprecatedTemplateGeneration]; ok { + if value64, err := strconv.ParseInt(value, 10, 64); err != nil { + return err + } else { + out.Spec.TemplateGeneration = value64 + delete(out.Annotations, appsv1.DeprecatedTemplateGeneration) + } + } + if err := s.Convert(&in.Status, &out.Status, 0); err != nil { + return err + } + return nil +} + +func Convert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *appsv1.DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { + out.Selector = in.Selector + if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + return err + } + if in.RevisionHistoryLimit != nil { + out.RevisionHistoryLimit = new(int32) + *out.RevisionHistoryLimit = *in.RevisionHistoryLimit + } else { + out.RevisionHistoryLimit = nil + } + out.MinReadySeconds = in.MinReadySeconds + return nil +} + +func Convert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *appsv1.DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = extensions.DaemonSetUpdateStrategyType(in.Type) + if in.RollingUpdate != nil { + out.RollingUpdate = &extensions.RollingUpdateDaemonSet{} + if err := Convert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in.RollingUpdate, out.RollingUpdate, s); err != nil { + return err + } + } + return nil +} + +func Convert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *appsv1.ReplicaSetSpec, s conversion.Scope) error { + out.Replicas = new(int32) + *out.Replicas = int32(in.Replicas) + out.MinReadySeconds = in.MinReadySeconds + out.Selector = in.Selector + if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_v1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *appsv1.ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = *in.Replicas + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = in.Selector + if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(in *appsv1.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = *in.Replicas + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + if err := s.Convert(*in, *out, 0); err != nil { + return err + } + } else { + out.Selector = nil + } + if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]api.PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.VolumeClaimTemplates = nil + } + if err := Convert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + return err + } + if in.RevisionHistoryLimit != nil { + out.RevisionHistoryLimit = new(int32) + *out.RevisionHistoryLimit = *in.RevisionHistoryLimit + } else { + out.RevisionHistoryLimit = nil + } + out.ServiceName = in.ServiceName + out.PodManagementPolicy = apps.PodManagementPolicyType(in.PodManagementPolicy) + return nil +} + +func Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(in *apps.StatefulSetSpec, out *appsv1.StatefulSetSpec, s conversion.Scope) error { + out.Replicas = new(int32) + *out.Replicas = in.Replicas + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + if err := s.Convert(*in, *out, 0); err != nil { + return err + } + } else { + out.Selector = nil + } + if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]v1.PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.VolumeClaimTemplates = nil + } + if in.RevisionHistoryLimit != nil { + out.RevisionHistoryLimit = new(int32) + *out.RevisionHistoryLimit = *in.RevisionHistoryLimit + } else { + out.RevisionHistoryLimit = nil + } + out.ServiceName = in.ServiceName + out.PodManagementPolicy = appsv1.PodManagementPolicyType(in.PodManagementPolicy) + if err := Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + return err + } + return nil +} + +func Convert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in *appsv1.StatefulSetUpdateStrategy, out *apps.StatefulSetUpdateStrategy, s conversion.Scope) error { + out.Type = apps.StatefulSetUpdateStrategyType(in.Type) + if in.RollingUpdate != nil { + out.RollingUpdate = new(apps.RollingUpdateStatefulSetStrategy) + out.RollingUpdate.Partition = *in.RollingUpdate.Partition + } else { + out.RollingUpdate = nil + } + return nil +} + +func Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy(in *apps.StatefulSetUpdateStrategy, out *appsv1.StatefulSetUpdateStrategy, s conversion.Scope) error { + out.Type = appsv1.StatefulSetUpdateStrategyType(in.Type) + if in.RollingUpdate != nil { + out.RollingUpdate = new(appsv1.RollingUpdateStatefulSetStrategy) + out.RollingUpdate.Partition = new(int32) + *out.RollingUpdate.Partition = in.RollingUpdate.Partition + } else { + out.RollingUpdate = nil + } + return nil +} + +func Convert_v1_StatefulSetStatus_To_apps_StatefulSetStatus(in *appsv1.StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error { + out.ObservedGeneration = new(int64) + *out.ObservedGeneration = in.ObservedGeneration + out.Replicas = in.Replicas + out.ReadyReplicas = in.ReadyReplicas + out.CurrentReplicas = in.CurrentReplicas + out.UpdatedReplicas = in.UpdatedReplicas + out.CurrentRevision = in.CurrentRevision + out.UpdateRevision = in.UpdateRevision + if in.CollisionCount != nil { + out.CollisionCount = new(int32) + *out.CollisionCount = *in.CollisionCount + } + out.Conditions = make([]apps.StatefulSetCondition, len(in.Conditions)) + for i := range in.Conditions { + if err := Convert_v1_StatefulSetCondition_To_apps_StatefulSetCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { + return err + } + } + return nil +} + +func Convert_apps_StatefulSetStatus_To_v1_StatefulSetStatus(in *apps.StatefulSetStatus, out *appsv1.StatefulSetStatus, s conversion.Scope) error { + if in.ObservedGeneration != nil { + out.ObservedGeneration = *in.ObservedGeneration + } + out.Replicas = in.Replicas + out.ReadyReplicas = in.ReadyReplicas + out.CurrentReplicas = in.CurrentReplicas + out.UpdatedReplicas = in.UpdatedReplicas + out.CurrentRevision = in.CurrentRevision + out.UpdateRevision = in.UpdateRevision + if in.CollisionCount != nil { + out.CollisionCount = new(int32) + *out.CollisionCount = *in.CollisionCount + } + out.Conditions = make([]appsv1.StatefulSetCondition, len(in.Conditions)) + for i := range in.Conditions { + if err := Convert_apps_StatefulSetCondition_To_v1_StatefulSetCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { + return err + } + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/defaults.go new file mode 100644 index 000000000000..941a0c8e8d89 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/defaults.go @@ -0,0 +1,127 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +// SetDefaults_Deployment sets additional defaults compared to its counterpart +// in extensions. These addons are: +// - MaxUnavailable during rolling update set to 25% (1 in extensions) +// - MaxSurge value during rolling update set to 25% (1 in extensions) +// - RevisionHistoryLimit set to 10 (not set in extensions) +// - ProgressDeadlineSeconds set to 600s (not set in extensions) +func SetDefaults_Deployment(obj *appsv1.Deployment) { + // Set DeploymentSpec.Replicas to 1 if it is not set. + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } + strategy := &obj.Spec.Strategy + // Set default DeploymentStrategyType as RollingUpdate. + if strategy.Type == "" { + strategy.Type = appsv1.RollingUpdateDeploymentStrategyType + } + if strategy.Type == appsv1.RollingUpdateDeploymentStrategyType { + if strategy.RollingUpdate == nil { + rollingUpdate := appsv1.RollingUpdateDeployment{} + strategy.RollingUpdate = &rollingUpdate + } + if strategy.RollingUpdate.MaxUnavailable == nil { + // Set default MaxUnavailable as 25% by default. + maxUnavailable := intstr.FromString("25%") + strategy.RollingUpdate.MaxUnavailable = &maxUnavailable + } + if strategy.RollingUpdate.MaxSurge == nil { + // Set default MaxSurge as 25% by default. + maxSurge := intstr.FromString("25%") + strategy.RollingUpdate.MaxSurge = &maxSurge + } + } + if obj.Spec.RevisionHistoryLimit == nil { + obj.Spec.RevisionHistoryLimit = new(int32) + *obj.Spec.RevisionHistoryLimit = 10 + } + if obj.Spec.ProgressDeadlineSeconds == nil { + obj.Spec.ProgressDeadlineSeconds = new(int32) + *obj.Spec.ProgressDeadlineSeconds = 600 + } +} + +func SetDefaults_DaemonSet(obj *appsv1.DaemonSet) { + updateStrategy := &obj.Spec.UpdateStrategy + if updateStrategy.Type == "" { + updateStrategy.Type = appsv1.RollingUpdateDaemonSetStrategyType + } + if updateStrategy.Type == appsv1.RollingUpdateDaemonSetStrategyType { + if updateStrategy.RollingUpdate == nil { + rollingUpdate := appsv1.RollingUpdateDaemonSet{} + updateStrategy.RollingUpdate = &rollingUpdate + } + if updateStrategy.RollingUpdate.MaxUnavailable == nil { + // Set default MaxUnavailable as 1 by default. + maxUnavailable := intstr.FromInt(1) + updateStrategy.RollingUpdate.MaxUnavailable = &maxUnavailable + } + } + if obj.Spec.RevisionHistoryLimit == nil { + obj.Spec.RevisionHistoryLimit = new(int32) + *obj.Spec.RevisionHistoryLimit = 10 + } +} + +func SetDefaults_StatefulSet(obj *appsv1.StatefulSet) { + if len(obj.Spec.PodManagementPolicy) == 0 { + obj.Spec.PodManagementPolicy = appsv1.OrderedReadyPodManagement + } + + if obj.Spec.UpdateStrategy.Type == "" { + obj.Spec.UpdateStrategy.Type = appsv1.RollingUpdateStatefulSetStrategyType + + // UpdateStrategy.RollingUpdate will take default values below. + obj.Spec.UpdateStrategy.RollingUpdate = &appsv1.RollingUpdateStatefulSetStrategy{} + } + + if obj.Spec.UpdateStrategy.Type == appsv1.RollingUpdateStatefulSetStrategyType && + obj.Spec.UpdateStrategy.RollingUpdate != nil && + obj.Spec.UpdateStrategy.RollingUpdate.Partition == nil { + obj.Spec.UpdateStrategy.RollingUpdate.Partition = new(int32) + *obj.Spec.UpdateStrategy.RollingUpdate.Partition = 0 + } + + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } + if obj.Spec.RevisionHistoryLimit == nil { + obj.Spec.RevisionHistoryLimit = new(int32) + *obj.Spec.RevisionHistoryLimit = 10 + } +} +func SetDefaults_ReplicaSet(obj *appsv1.ReplicaSet) { + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/doc.go new file mode 100644 index 000000000000..9a1682faa3de --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/apps +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/extensions +// +k8s:conversion-gen-external-types=k8s.io/api/apps/v1 +// +k8s:defaulter-gen=TypeMeta +// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/apps/v1 + +package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/register.go new file mode 100644 index 000000000000..e5567276c113 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/register.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "apps" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &appsv1.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.conversion.go new file mode 100644 index 000000000000..e7503b947875 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.conversion.go @@ -0,0 +1,943 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + apps "k8s.io/kubernetes/pkg/apis/apps" + core "k8s.io/kubernetes/pkg/apis/core" + apis_core_v1 "k8s.io/kubernetes/pkg/apis/core/v1" + extensions "k8s.io/kubernetes/pkg/apis/extensions" + unsafe "unsafe" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_ControllerRevision_To_apps_ControllerRevision, + Convert_apps_ControllerRevision_To_v1_ControllerRevision, + Convert_v1_ControllerRevisionList_To_apps_ControllerRevisionList, + Convert_apps_ControllerRevisionList_To_v1_ControllerRevisionList, + Convert_v1_DaemonSet_To_extensions_DaemonSet, + Convert_extensions_DaemonSet_To_v1_DaemonSet, + Convert_v1_DaemonSetCondition_To_extensions_DaemonSetCondition, + Convert_extensions_DaemonSetCondition_To_v1_DaemonSetCondition, + Convert_v1_DaemonSetList_To_extensions_DaemonSetList, + Convert_extensions_DaemonSetList_To_v1_DaemonSetList, + Convert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec, + Convert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec, + Convert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus, + Convert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus, + Convert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy, + Convert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy, + Convert_v1_Deployment_To_extensions_Deployment, + Convert_extensions_Deployment_To_v1_Deployment, + Convert_v1_DeploymentCondition_To_extensions_DeploymentCondition, + Convert_extensions_DeploymentCondition_To_v1_DeploymentCondition, + Convert_v1_DeploymentList_To_extensions_DeploymentList, + Convert_extensions_DeploymentList_To_v1_DeploymentList, + Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec, + Convert_extensions_DeploymentSpec_To_v1_DeploymentSpec, + Convert_v1_DeploymentStatus_To_extensions_DeploymentStatus, + Convert_extensions_DeploymentStatus_To_v1_DeploymentStatus, + Convert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy, + Convert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy, + Convert_v1_ReplicaSet_To_extensions_ReplicaSet, + Convert_extensions_ReplicaSet_To_v1_ReplicaSet, + Convert_v1_ReplicaSetCondition_To_extensions_ReplicaSetCondition, + Convert_extensions_ReplicaSetCondition_To_v1_ReplicaSetCondition, + Convert_v1_ReplicaSetList_To_extensions_ReplicaSetList, + Convert_extensions_ReplicaSetList_To_v1_ReplicaSetList, + Convert_v1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, + Convert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec, + Convert_v1_ReplicaSetStatus_To_extensions_ReplicaSetStatus, + Convert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus, + Convert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet, + Convert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet, + Convert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, + Convert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment, + Convert_v1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy, + Convert_apps_RollingUpdateStatefulSetStrategy_To_v1_RollingUpdateStatefulSetStrategy, + Convert_v1_StatefulSet_To_apps_StatefulSet, + Convert_apps_StatefulSet_To_v1_StatefulSet, + Convert_v1_StatefulSetCondition_To_apps_StatefulSetCondition, + Convert_apps_StatefulSetCondition_To_v1_StatefulSetCondition, + Convert_v1_StatefulSetList_To_apps_StatefulSetList, + Convert_apps_StatefulSetList_To_v1_StatefulSetList, + Convert_v1_StatefulSetSpec_To_apps_StatefulSetSpec, + Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec, + Convert_v1_StatefulSetStatus_To_apps_StatefulSetStatus, + Convert_apps_StatefulSetStatus_To_v1_StatefulSetStatus, + Convert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy, + Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy, + ) +} + +func autoConvert_v1_ControllerRevision_To_apps_ControllerRevision(in *v1.ControllerRevision, out *apps.ControllerRevision, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Data, &out.Data, s); err != nil { + return err + } + out.Revision = in.Revision + return nil +} + +// Convert_v1_ControllerRevision_To_apps_ControllerRevision is an autogenerated conversion function. +func Convert_v1_ControllerRevision_To_apps_ControllerRevision(in *v1.ControllerRevision, out *apps.ControllerRevision, s conversion.Scope) error { + return autoConvert_v1_ControllerRevision_To_apps_ControllerRevision(in, out, s) +} + +func autoConvert_apps_ControllerRevision_To_v1_ControllerRevision(in *apps.ControllerRevision, out *v1.ControllerRevision, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Data, &out.Data, s); err != nil { + return err + } + out.Revision = in.Revision + return nil +} + +// Convert_apps_ControllerRevision_To_v1_ControllerRevision is an autogenerated conversion function. +func Convert_apps_ControllerRevision_To_v1_ControllerRevision(in *apps.ControllerRevision, out *v1.ControllerRevision, s conversion.Scope) error { + return autoConvert_apps_ControllerRevision_To_v1_ControllerRevision(in, out, s) +} + +func autoConvert_v1_ControllerRevisionList_To_apps_ControllerRevisionList(in *v1.ControllerRevisionList, out *apps.ControllerRevisionList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]apps.ControllerRevision, len(*in)) + for i := range *in { + if err := Convert_v1_ControllerRevision_To_apps_ControllerRevision(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_ControllerRevisionList_To_apps_ControllerRevisionList is an autogenerated conversion function. +func Convert_v1_ControllerRevisionList_To_apps_ControllerRevisionList(in *v1.ControllerRevisionList, out *apps.ControllerRevisionList, s conversion.Scope) error { + return autoConvert_v1_ControllerRevisionList_To_apps_ControllerRevisionList(in, out, s) +} + +func autoConvert_apps_ControllerRevisionList_To_v1_ControllerRevisionList(in *apps.ControllerRevisionList, out *v1.ControllerRevisionList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.ControllerRevision, len(*in)) + for i := range *in { + if err := Convert_apps_ControllerRevision_To_v1_ControllerRevision(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_apps_ControllerRevisionList_To_v1_ControllerRevisionList is an autogenerated conversion function. +func Convert_apps_ControllerRevisionList_To_v1_ControllerRevisionList(in *apps.ControllerRevisionList, out *v1.ControllerRevisionList, s conversion.Scope) error { + return autoConvert_apps_ControllerRevisionList_To_v1_ControllerRevisionList(in, out, s) +} + +func autoConvert_v1_DaemonSet_To_extensions_DaemonSet(in *v1.DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_extensions_DaemonSet_To_v1_DaemonSet(in *extensions.DaemonSet, out *v1.DaemonSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1.DaemonSetCondition, out *extensions.DaemonSetCondition, s conversion.Scope) error { + out.Type = extensions.DaemonSetConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_DaemonSetCondition_To_extensions_DaemonSetCondition is an autogenerated conversion function. +func Convert_v1_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1.DaemonSetCondition, out *extensions.DaemonSetCondition, s conversion.Scope) error { + return autoConvert_v1_DaemonSetCondition_To_extensions_DaemonSetCondition(in, out, s) +} + +func autoConvert_extensions_DaemonSetCondition_To_v1_DaemonSetCondition(in *extensions.DaemonSetCondition, out *v1.DaemonSetCondition, s conversion.Scope) error { + out.Type = v1.DaemonSetConditionType(in.Type) + out.Status = core_v1.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_extensions_DaemonSetCondition_To_v1_DaemonSetCondition is an autogenerated conversion function. +func Convert_extensions_DaemonSetCondition_To_v1_DaemonSetCondition(in *extensions.DaemonSetCondition, out *v1.DaemonSetCondition, s conversion.Scope) error { + return autoConvert_extensions_DaemonSetCondition_To_v1_DaemonSetCondition(in, out, s) +} + +func autoConvert_v1_DaemonSetList_To_extensions_DaemonSetList(in *v1.DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.DaemonSet, len(*in)) + for i := range *in { + if err := Convert_v1_DaemonSet_To_extensions_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_DaemonSetList_To_extensions_DaemonSetList is an autogenerated conversion function. +func Convert_v1_DaemonSetList_To_extensions_DaemonSetList(in *v1.DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { + return autoConvert_v1_DaemonSetList_To_extensions_DaemonSetList(in, out, s) +} + +func autoConvert_extensions_DaemonSetList_To_v1_DaemonSetList(in *extensions.DaemonSetList, out *v1.DaemonSetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.DaemonSet, len(*in)) + for i := range *in { + if err := Convert_extensions_DaemonSet_To_v1_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_extensions_DaemonSetList_To_v1_DaemonSetList is an autogenerated conversion function. +func Convert_extensions_DaemonSetList_To_v1_DaemonSetList(in *extensions.DaemonSetList, out *v1.DaemonSetList, s conversion.Scope) error { + return autoConvert_extensions_DaemonSetList_To_v1_DaemonSetList(in, out, s) +} + +func autoConvert_v1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1.DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := apis_core_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) + return nil +} + +func autoConvert_extensions_DaemonSetSpec_To_v1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *v1.DaemonSetSpec, s conversion.Scope) error { + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := apis_core_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + // WARNING: in.TemplateGeneration requires manual conversion: does not exist in peer-type + out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) + return nil +} + +func autoConvert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1.DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { + out.CurrentNumberScheduled = in.CurrentNumberScheduled + out.NumberMisscheduled = in.NumberMisscheduled + out.DesiredNumberScheduled = in.DesiredNumberScheduled + out.NumberReady = in.NumberReady + out.ObservedGeneration = in.ObservedGeneration + out.UpdatedNumberScheduled = in.UpdatedNumberScheduled + out.NumberAvailable = in.NumberAvailable + out.NumberUnavailable = in.NumberUnavailable + out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) + out.Conditions = *(*[]extensions.DaemonSetCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus is an autogenerated conversion function. +func Convert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1.DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { + return autoConvert_v1_DaemonSetStatus_To_extensions_DaemonSetStatus(in, out, s) +} + +func autoConvert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *v1.DaemonSetStatus, s conversion.Scope) error { + out.CurrentNumberScheduled = in.CurrentNumberScheduled + out.NumberMisscheduled = in.NumberMisscheduled + out.DesiredNumberScheduled = in.DesiredNumberScheduled + out.NumberReady = in.NumberReady + out.ObservedGeneration = in.ObservedGeneration + out.UpdatedNumberScheduled = in.UpdatedNumberScheduled + out.NumberAvailable = in.NumberAvailable + out.NumberUnavailable = in.NumberUnavailable + out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) + out.Conditions = *(*[]v1.DaemonSetCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus is an autogenerated conversion function. +func Convert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *v1.DaemonSetStatus, s conversion.Scope) error { + return autoConvert_extensions_DaemonSetStatus_To_v1_DaemonSetStatus(in, out, s) +} + +func autoConvert_v1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *v1.DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = extensions.DaemonSetUpdateStrategyType(in.Type) + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(extensions.RollingUpdateDaemonSet) + if err := Convert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func autoConvert_extensions_DaemonSetUpdateStrategy_To_v1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *v1.DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = v1.DaemonSetUpdateStrategyType(in.Type) + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(v1.RollingUpdateDaemonSet) + if err := Convert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func autoConvert_v1_Deployment_To_extensions_Deployment(in *v1.Deployment, out *extensions.Deployment, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_extensions_Deployment_To_v1_Deployment(in *extensions.Deployment, out *v1.Deployment, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_DeploymentSpec_To_v1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_extensions_DeploymentStatus_To_v1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { + out.Type = extensions.DeploymentConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastUpdateTime = in.LastUpdateTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_DeploymentCondition_To_extensions_DeploymentCondition is an autogenerated conversion function. +func Convert_v1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { + return autoConvert_v1_DeploymentCondition_To_extensions_DeploymentCondition(in, out, s) +} + +func autoConvert_extensions_DeploymentCondition_To_v1_DeploymentCondition(in *extensions.DeploymentCondition, out *v1.DeploymentCondition, s conversion.Scope) error { + out.Type = v1.DeploymentConditionType(in.Type) + out.Status = core_v1.ConditionStatus(in.Status) + out.LastUpdateTime = in.LastUpdateTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_extensions_DeploymentCondition_To_v1_DeploymentCondition is an autogenerated conversion function. +func Convert_extensions_DeploymentCondition_To_v1_DeploymentCondition(in *extensions.DeploymentCondition, out *v1.DeploymentCondition, s conversion.Scope) error { + return autoConvert_extensions_DeploymentCondition_To_v1_DeploymentCondition(in, out, s) +} + +func autoConvert_v1_DeploymentList_To_extensions_DeploymentList(in *v1.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.Deployment, len(*in)) + for i := range *in { + if err := Convert_v1_Deployment_To_extensions_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_DeploymentList_To_extensions_DeploymentList is an autogenerated conversion function. +func Convert_v1_DeploymentList_To_extensions_DeploymentList(in *v1.DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { + return autoConvert_v1_DeploymentList_To_extensions_DeploymentList(in, out, s) +} + +func autoConvert_extensions_DeploymentList_To_v1_DeploymentList(in *extensions.DeploymentList, out *v1.DeploymentList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.Deployment, len(*in)) + for i := range *in { + if err := Convert_extensions_Deployment_To_v1_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_extensions_DeploymentList_To_v1_DeploymentList is an autogenerated conversion function. +func Convert_extensions_DeploymentList_To_v1_DeploymentList(in *extensions.DeploymentList, out *v1.DeploymentList, s conversion.Scope) error { + return autoConvert_extensions_DeploymentList_To_v1_DeploymentList(in, out, s) +} + +func autoConvert_v1_DeploymentSpec_To_extensions_DeploymentSpec(in *v1.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { + if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := apis_core_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) + out.Paused = in.Paused + out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds)) + return nil +} + +func autoConvert_extensions_DeploymentSpec_To_v1_DeploymentSpec(in *extensions.DeploymentSpec, out *v1.DeploymentSpec, s conversion.Scope) error { + if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := apis_core_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) + out.Paused = in.Paused + // WARNING: in.RollbackTo requires manual conversion: does not exist in peer-type + out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds)) + return nil +} + +func autoConvert_v1_DeploymentStatus_To_extensions_DeploymentStatus(in *v1.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.Replicas = in.Replicas + out.UpdatedReplicas = in.UpdatedReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.UnavailableReplicas = in.UnavailableReplicas + out.Conditions = *(*[]extensions.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) + out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) + return nil +} + +// Convert_v1_DeploymentStatus_To_extensions_DeploymentStatus is an autogenerated conversion function. +func Convert_v1_DeploymentStatus_To_extensions_DeploymentStatus(in *v1.DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { + return autoConvert_v1_DeploymentStatus_To_extensions_DeploymentStatus(in, out, s) +} + +func autoConvert_extensions_DeploymentStatus_To_v1_DeploymentStatus(in *extensions.DeploymentStatus, out *v1.DeploymentStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.Replicas = in.Replicas + out.UpdatedReplicas = in.UpdatedReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.UnavailableReplicas = in.UnavailableReplicas + out.Conditions = *(*[]v1.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) + out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) + return nil +} + +// Convert_extensions_DeploymentStatus_To_v1_DeploymentStatus is an autogenerated conversion function. +func Convert_extensions_DeploymentStatus_To_v1_DeploymentStatus(in *extensions.DeploymentStatus, out *v1.DeploymentStatus, s conversion.Scope) error { + return autoConvert_extensions_DeploymentStatus_To_v1_DeploymentStatus(in, out, s) +} + +func autoConvert_v1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *v1.DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { + out.Type = extensions.DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(extensions.RollingUpdateDeployment) + if err := Convert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func autoConvert_extensions_DeploymentStrategy_To_v1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *v1.DeploymentStrategy, s conversion.Scope) error { + out.Type = v1.DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(v1.RollingUpdateDeployment) + if err := Convert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func autoConvert_v1_ReplicaSet_To_extensions_ReplicaSet(in *v1.ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ReplicaSet_To_extensions_ReplicaSet is an autogenerated conversion function. +func Convert_v1_ReplicaSet_To_extensions_ReplicaSet(in *v1.ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { + return autoConvert_v1_ReplicaSet_To_extensions_ReplicaSet(in, out, s) +} + +func autoConvert_extensions_ReplicaSet_To_v1_ReplicaSet(in *extensions.ReplicaSet, out *v1.ReplicaSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_extensions_ReplicaSet_To_v1_ReplicaSet is an autogenerated conversion function. +func Convert_extensions_ReplicaSet_To_v1_ReplicaSet(in *extensions.ReplicaSet, out *v1.ReplicaSet, s conversion.Scope) error { + return autoConvert_extensions_ReplicaSet_To_v1_ReplicaSet(in, out, s) +} + +func autoConvert_v1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1.ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { + out.Type = extensions.ReplicaSetConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_ReplicaSetCondition_To_extensions_ReplicaSetCondition is an autogenerated conversion function. +func Convert_v1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1.ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { + return autoConvert_v1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in, out, s) +} + +func autoConvert_extensions_ReplicaSetCondition_To_v1_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *v1.ReplicaSetCondition, s conversion.Scope) error { + out.Type = v1.ReplicaSetConditionType(in.Type) + out.Status = core_v1.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_extensions_ReplicaSetCondition_To_v1_ReplicaSetCondition is an autogenerated conversion function. +func Convert_extensions_ReplicaSetCondition_To_v1_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *v1.ReplicaSetCondition, s conversion.Scope) error { + return autoConvert_extensions_ReplicaSetCondition_To_v1_ReplicaSetCondition(in, out, s) +} + +func autoConvert_v1_ReplicaSetList_To_extensions_ReplicaSetList(in *v1.ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.ReplicaSet, len(*in)) + for i := range *in { + if err := Convert_v1_ReplicaSet_To_extensions_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_ReplicaSetList_To_extensions_ReplicaSetList is an autogenerated conversion function. +func Convert_v1_ReplicaSetList_To_extensions_ReplicaSetList(in *v1.ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { + return autoConvert_v1_ReplicaSetList_To_extensions_ReplicaSetList(in, out, s) +} + +func autoConvert_extensions_ReplicaSetList_To_v1_ReplicaSetList(in *extensions.ReplicaSetList, out *v1.ReplicaSetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.ReplicaSet, len(*in)) + for i := range *in { + if err := Convert_extensions_ReplicaSet_To_v1_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_extensions_ReplicaSetList_To_v1_ReplicaSetList is an autogenerated conversion function. +func Convert_extensions_ReplicaSetList_To_v1_ReplicaSetList(in *extensions.ReplicaSetList, out *v1.ReplicaSetList, s conversion.Scope) error { + return autoConvert_extensions_ReplicaSetList_To_v1_ReplicaSetList(in, out, s) +} + +func autoConvert_v1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *v1.ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { + if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := apis_core_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func autoConvert_extensions_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *v1.ReplicaSetSpec, s conversion.Scope) error { + if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := apis_core_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *v1.ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]extensions.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1_ReplicaSetStatus_To_extensions_ReplicaSetStatus is an autogenerated conversion function. +func Convert_v1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *v1.ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { + return autoConvert_v1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in, out, s) +} + +func autoConvert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *v1.ReplicaSetStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]v1.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus is an autogenerated conversion function. +func Convert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *v1.ReplicaSetStatus, s conversion.Scope) error { + return autoConvert_extensions_ReplicaSetStatus_To_v1_ReplicaSetStatus(in, out, s) +} + +func autoConvert_v1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *v1.RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { + // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) + return nil +} + +func autoConvert_extensions_RollingUpdateDaemonSet_To_v1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *v1.RollingUpdateDaemonSet, s conversion.Scope) error { + // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) + return nil +} + +func autoConvert_v1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *v1.RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { + // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) + // WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) + return nil +} + +func autoConvert_extensions_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *v1.RollingUpdateDeployment, s conversion.Scope) error { + // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) + // WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) + return nil +} + +func autoConvert_v1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(in *v1.RollingUpdateStatefulSetStrategy, out *apps.RollingUpdateStatefulSetStrategy, s conversion.Scope) error { + if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Partition, &out.Partition, s); err != nil { + return err + } + return nil +} + +// Convert_v1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy is an autogenerated conversion function. +func Convert_v1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(in *v1.RollingUpdateStatefulSetStrategy, out *apps.RollingUpdateStatefulSetStrategy, s conversion.Scope) error { + return autoConvert_v1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(in, out, s) +} + +func autoConvert_apps_RollingUpdateStatefulSetStrategy_To_v1_RollingUpdateStatefulSetStrategy(in *apps.RollingUpdateStatefulSetStrategy, out *v1.RollingUpdateStatefulSetStrategy, s conversion.Scope) error { + if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Partition, &out.Partition, s); err != nil { + return err + } + return nil +} + +// Convert_apps_RollingUpdateStatefulSetStrategy_To_v1_RollingUpdateStatefulSetStrategy is an autogenerated conversion function. +func Convert_apps_RollingUpdateStatefulSetStrategy_To_v1_RollingUpdateStatefulSetStrategy(in *apps.RollingUpdateStatefulSetStrategy, out *v1.RollingUpdateStatefulSetStrategy, s conversion.Scope) error { + return autoConvert_apps_RollingUpdateStatefulSetStrategy_To_v1_RollingUpdateStatefulSetStrategy(in, out, s) +} + +func autoConvert_v1_StatefulSet_To_apps_StatefulSet(in *v1.StatefulSet, out *apps.StatefulSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_StatefulSetStatus_To_apps_StatefulSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_StatefulSet_To_apps_StatefulSet is an autogenerated conversion function. +func Convert_v1_StatefulSet_To_apps_StatefulSet(in *v1.StatefulSet, out *apps.StatefulSet, s conversion.Scope) error { + return autoConvert_v1_StatefulSet_To_apps_StatefulSet(in, out, s) +} + +func autoConvert_apps_StatefulSet_To_v1_StatefulSet(in *apps.StatefulSet, out *v1.StatefulSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_apps_StatefulSetStatus_To_v1_StatefulSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_apps_StatefulSet_To_v1_StatefulSet is an autogenerated conversion function. +func Convert_apps_StatefulSet_To_v1_StatefulSet(in *apps.StatefulSet, out *v1.StatefulSet, s conversion.Scope) error { + return autoConvert_apps_StatefulSet_To_v1_StatefulSet(in, out, s) +} + +func autoConvert_v1_StatefulSetCondition_To_apps_StatefulSetCondition(in *v1.StatefulSetCondition, out *apps.StatefulSetCondition, s conversion.Scope) error { + out.Type = apps.StatefulSetConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_StatefulSetCondition_To_apps_StatefulSetCondition is an autogenerated conversion function. +func Convert_v1_StatefulSetCondition_To_apps_StatefulSetCondition(in *v1.StatefulSetCondition, out *apps.StatefulSetCondition, s conversion.Scope) error { + return autoConvert_v1_StatefulSetCondition_To_apps_StatefulSetCondition(in, out, s) +} + +func autoConvert_apps_StatefulSetCondition_To_v1_StatefulSetCondition(in *apps.StatefulSetCondition, out *v1.StatefulSetCondition, s conversion.Scope) error { + out.Type = v1.StatefulSetConditionType(in.Type) + out.Status = core_v1.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_apps_StatefulSetCondition_To_v1_StatefulSetCondition is an autogenerated conversion function. +func Convert_apps_StatefulSetCondition_To_v1_StatefulSetCondition(in *apps.StatefulSetCondition, out *v1.StatefulSetCondition, s conversion.Scope) error { + return autoConvert_apps_StatefulSetCondition_To_v1_StatefulSetCondition(in, out, s) +} + +func autoConvert_v1_StatefulSetList_To_apps_StatefulSetList(in *v1.StatefulSetList, out *apps.StatefulSetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]apps.StatefulSet, len(*in)) + for i := range *in { + if err := Convert_v1_StatefulSet_To_apps_StatefulSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_StatefulSetList_To_apps_StatefulSetList is an autogenerated conversion function. +func Convert_v1_StatefulSetList_To_apps_StatefulSetList(in *v1.StatefulSetList, out *apps.StatefulSetList, s conversion.Scope) error { + return autoConvert_v1_StatefulSetList_To_apps_StatefulSetList(in, out, s) +} + +func autoConvert_apps_StatefulSetList_To_v1_StatefulSetList(in *apps.StatefulSetList, out *v1.StatefulSetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.StatefulSet, len(*in)) + for i := range *in { + if err := Convert_apps_StatefulSet_To_v1_StatefulSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_apps_StatefulSetList_To_v1_StatefulSetList is an autogenerated conversion function. +func Convert_apps_StatefulSetList_To_v1_StatefulSetList(in *apps.StatefulSetList, out *v1.StatefulSetList, s conversion.Scope) error { + return autoConvert_apps_StatefulSetList_To_v1_StatefulSetList(in, out, s) +} + +func autoConvert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(in *v1.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error { + if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := apis_core_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + out.VolumeClaimTemplates = *(*[]core.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates)) + out.ServiceName = in.ServiceName + out.PodManagementPolicy = apps.PodManagementPolicyType(in.PodManagementPolicy) + if err := Convert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + return err + } + out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) + return nil +} + +func autoConvert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(in *apps.StatefulSetSpec, out *v1.StatefulSetSpec, s conversion.Scope) error { + if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := apis_core_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + out.VolumeClaimTemplates = *(*[]core_v1.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates)) + out.ServiceName = in.ServiceName + out.PodManagementPolicy = v1.PodManagementPolicyType(in.PodManagementPolicy) + if err := Convert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + return err + } + out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) + return nil +} + +func autoConvert_v1_StatefulSetStatus_To_apps_StatefulSetStatus(in *v1.StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error { + // WARNING: in.ObservedGeneration requires manual conversion: inconvertible types (int64 vs *int64) + out.Replicas = in.Replicas + out.ReadyReplicas = in.ReadyReplicas + out.CurrentReplicas = in.CurrentReplicas + out.UpdatedReplicas = in.UpdatedReplicas + out.CurrentRevision = in.CurrentRevision + out.UpdateRevision = in.UpdateRevision + out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) + out.Conditions = *(*[]apps.StatefulSetCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +func autoConvert_apps_StatefulSetStatus_To_v1_StatefulSetStatus(in *apps.StatefulSetStatus, out *v1.StatefulSetStatus, s conversion.Scope) error { + // WARNING: in.ObservedGeneration requires manual conversion: inconvertible types (*int64 vs int64) + out.Replicas = in.Replicas + out.ReadyReplicas = in.ReadyReplicas + out.CurrentReplicas = in.CurrentReplicas + out.UpdatedReplicas = in.UpdatedReplicas + out.CurrentRevision = in.CurrentRevision + out.UpdateRevision = in.UpdateRevision + out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) + out.Conditions = *(*[]v1.StatefulSetCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +func autoConvert_v1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(in *v1.StatefulSetUpdateStrategy, out *apps.StatefulSetUpdateStrategy, s conversion.Scope) error { + out.Type = apps.StatefulSetUpdateStrategyType(in.Type) + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(apps.RollingUpdateStatefulSetStrategy) + if err := Convert_v1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func autoConvert_apps_StatefulSetUpdateStrategy_To_v1_StatefulSetUpdateStrategy(in *apps.StatefulSetUpdateStrategy, out *v1.StatefulSetUpdateStrategy, s conversion.Scope) error { + out.Type = v1.StatefulSetUpdateStrategyType(in.Type) + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(v1.RollingUpdateStatefulSetStrategy) + if err := Convert_apps_RollingUpdateStatefulSetStrategy_To_v1_RollingUpdateStatefulSetStrategy(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.defaults.go new file mode 100644 index 000000000000..d05eff3c5d6d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.defaults.go @@ -0,0 +1,625 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + core_v1 "k8s.io/kubernetes/pkg/apis/core/v1" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&v1.DaemonSet{}, func(obj interface{}) { SetObjectDefaults_DaemonSet(obj.(*v1.DaemonSet)) }) + scheme.AddTypeDefaultingFunc(&v1.DaemonSetList{}, func(obj interface{}) { SetObjectDefaults_DaemonSetList(obj.(*v1.DaemonSetList)) }) + scheme.AddTypeDefaultingFunc(&v1.Deployment{}, func(obj interface{}) { SetObjectDefaults_Deployment(obj.(*v1.Deployment)) }) + scheme.AddTypeDefaultingFunc(&v1.DeploymentList{}, func(obj interface{}) { SetObjectDefaults_DeploymentList(obj.(*v1.DeploymentList)) }) + scheme.AddTypeDefaultingFunc(&v1.ReplicaSet{}, func(obj interface{}) { SetObjectDefaults_ReplicaSet(obj.(*v1.ReplicaSet)) }) + scheme.AddTypeDefaultingFunc(&v1.ReplicaSetList{}, func(obj interface{}) { SetObjectDefaults_ReplicaSetList(obj.(*v1.ReplicaSetList)) }) + scheme.AddTypeDefaultingFunc(&v1.StatefulSet{}, func(obj interface{}) { SetObjectDefaults_StatefulSet(obj.(*v1.StatefulSet)) }) + scheme.AddTypeDefaultingFunc(&v1.StatefulSetList{}, func(obj interface{}) { SetObjectDefaults_StatefulSetList(obj.(*v1.StatefulSetList)) }) + return nil +} + +func SetObjectDefaults_DaemonSet(in *v1.DaemonSet) { + SetDefaults_DaemonSet(in) + core_v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + core_v1.SetDefaults_Volume(a) + if a.VolumeSource.HostPath != nil { + core_v1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath) + } + if a.VolumeSource.Secret != nil { + core_v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + core_v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + core_v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + core_v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + core_v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + core_v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + core_v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + core_v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + core_v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + core_v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + core_v1.SetDefaults_ResourceList(&a.Resources.Limits) + core_v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + core_v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + core_v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + core_v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + core_v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + core_v1.SetDefaults_ResourceList(&a.Resources.Limits) + core_v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + core_v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + core_v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_DaemonSetList(in *v1.DaemonSetList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_DaemonSet(a) + } +} + +func SetObjectDefaults_Deployment(in *v1.Deployment) { + SetDefaults_Deployment(in) + core_v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + core_v1.SetDefaults_Volume(a) + if a.VolumeSource.HostPath != nil { + core_v1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath) + } + if a.VolumeSource.Secret != nil { + core_v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + core_v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + core_v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + core_v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + core_v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + core_v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + core_v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + core_v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + core_v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + core_v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + core_v1.SetDefaults_ResourceList(&a.Resources.Limits) + core_v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + core_v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + core_v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + core_v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + core_v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + core_v1.SetDefaults_ResourceList(&a.Resources.Limits) + core_v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + core_v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + core_v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_DeploymentList(in *v1.DeploymentList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Deployment(a) + } +} + +func SetObjectDefaults_ReplicaSet(in *v1.ReplicaSet) { + SetDefaults_ReplicaSet(in) + core_v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + core_v1.SetDefaults_Volume(a) + if a.VolumeSource.HostPath != nil { + core_v1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath) + } + if a.VolumeSource.Secret != nil { + core_v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + core_v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + core_v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + core_v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + core_v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + core_v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + core_v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + core_v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + core_v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + core_v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + core_v1.SetDefaults_ResourceList(&a.Resources.Limits) + core_v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + core_v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + core_v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + core_v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + core_v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + core_v1.SetDefaults_ResourceList(&a.Resources.Limits) + core_v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + core_v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + core_v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_ReplicaSetList(in *v1.ReplicaSetList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ReplicaSet(a) + } +} + +func SetObjectDefaults_StatefulSet(in *v1.StatefulSet) { + SetDefaults_StatefulSet(in) + core_v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + core_v1.SetDefaults_Volume(a) + if a.VolumeSource.HostPath != nil { + core_v1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath) + } + if a.VolumeSource.Secret != nil { + core_v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + core_v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + core_v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + core_v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + core_v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + core_v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + core_v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + core_v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + core_v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + core_v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + core_v1.SetDefaults_ResourceList(&a.Resources.Limits) + core_v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + core_v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + core_v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + core_v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + core_v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + core_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + core_v1.SetDefaults_ResourceList(&a.Resources.Limits) + core_v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + core_v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + core_v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.VolumeClaimTemplates { + a := &in.Spec.VolumeClaimTemplates[i] + core_v1.SetDefaults_PersistentVolumeClaim(a) + core_v1.SetDefaults_ResourceList(&a.Spec.Resources.Limits) + core_v1.SetDefaults_ResourceList(&a.Spec.Resources.Requests) + core_v1.SetDefaults_ResourceList(&a.Status.Capacity) + } +} + +func SetObjectDefaults_StatefulSetList(in *v1.StatefulSetList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_StatefulSet(a) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD index b97be7b7dc87..920dcdc49a70 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD @@ -16,15 +16,18 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/apps/v1beta1", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/apis/apps:go_default_library", + "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/v1:go_default_library", "//pkg/apis/extensions:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", @@ -47,11 +50,13 @@ filegroup( go_test( name = "go_default_xtest", srcs = ["defaults_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/apps/v1beta1_test", deps = [ ":go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/apps/install:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/install:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go index 42d26b86d315..6f4d27cf9051 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/conversion.go @@ -23,11 +23,13 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/kubernetes/pkg/api" - k8s_api_v1 "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/apis/autoscaling" + api "k8s.io/kubernetes/pkg/apis/core" + k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" "k8s.io/kubernetes/pkg/apis/extensions" ) @@ -44,8 +46,8 @@ func addConversionFuncs(scheme *runtime.Scheme) error { // extensions // TODO: below conversions should be dropped in favor of auto-generated // ones, see https://github.com/kubernetes/kubernetes/issues/39865 - Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, - Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, + Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus, + Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus, Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, @@ -87,7 +89,7 @@ func Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *appsv1beta1.Sta } else { out.Selector = nil } - if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if in.VolumeClaimTemplates != nil { @@ -127,7 +129,7 @@ func Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.StatefulSe } else { out.Selector = nil } - if err := k8s_api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if in.VolumeClaimTemplates != nil { @@ -178,48 +180,35 @@ func Convert_apps_StatefulSetUpdateStrategy_To_v1beta1_StatefulSetUpdateStrategy return nil } -func Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *appsv1beta1.ScaleStatus, s conversion.Scope) error { +func Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(in *autoscaling.ScaleStatus, out *appsv1beta1.ScaleStatus, s conversion.Scope) error { out.Replicas = int32(in.Replicas) + out.TargetSelector = in.Selector out.Selector = nil - out.TargetSelector = "" - if in.Selector != nil { - if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { - out.Selector = in.Selector.MatchLabels - } - - selector, err := metav1.LabelSelectorAsSelector(in.Selector) - if err != nil { - return fmt.Errorf("invalid label selector: %v", err) - } - out.TargetSelector = selector.String() + selector, err := metav1.ParseToLabelSelector(in.Selector) + if err != nil { + return fmt.Errorf("failed to parse selector: %v", err) } + if len(selector.MatchExpressions) == 0 { + out.Selector = selector.MatchLabels + } + return nil } -func Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *appsv1beta1.ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { +func Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(in *appsv1beta1.ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { out.Replicas = in.Replicas - // Normally when 2 fields map to the same internal value we favor the old field, since - // old clients can't be expected to know about new fields but clients that know about the - // new field can be expected to know about the old field (though that's not quite true, due - // to kubectl apply). However, these fields are readonly, so any non-nil value should work. if in.TargetSelector != "" { - labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) - if err != nil { - out.Selector = nil - return fmt.Errorf("failed to parse target selector: %v", err) - } - out.Selector = labelSelector + out.Selector = in.TargetSelector } else if in.Selector != nil { - out.Selector = new(metav1.LabelSelector) - selector := make(map[string]string) + set := labels.Set{} for key, val := range in.Selector { - selector[key] = val + set[key] = val } - out.Selector.MatchLabels = selector + out.Selector = labels.SelectorFromSet(set).String() } else { - out.Selector = nil + out.Selector = "" } return nil } @@ -229,7 +218,7 @@ func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1beta1 out.Replicas = *in.Replicas } out.Selector = in.Selector - if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { @@ -254,7 +243,7 @@ func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1beta1 func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *appsv1beta1.DeploymentSpec, s conversion.Scope) error { out.Replicas = &in.Replicas out.Selector = in.Selector - if err := k8s_api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/doc.go index 3e91b5225181..8492ada1c103 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/doc.go @@ -15,8 +15,9 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/apps +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/autoscaling // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/extensions -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/apps/v1beta1 +// +k8s:conversion-gen-external-types=k8s.io/api/apps/v1beta1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/apps/v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go index d3ddaac0333f..0bfb1e3f2097 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.conversion.go @@ -26,9 +26,10 @@ import ( meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" - api_v1 "k8s.io/kubernetes/pkg/api/v1" apps "k8s.io/kubernetes/pkg/apis/apps" + autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" + core "k8s.io/kubernetes/pkg/apis/core" + core_v1 "k8s.io/kubernetes/pkg/apis/core/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions" unsafe "unsafe" ) @@ -65,14 +66,16 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, Convert_v1beta1_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy, Convert_apps_RollingUpdateStatefulSetStrategy_To_v1beta1_RollingUpdateStatefulSetStrategy, - Convert_v1beta1_Scale_To_extensions_Scale, - Convert_extensions_Scale_To_v1beta1_Scale, - Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec, - Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec, - Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, - Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, + Convert_v1beta1_Scale_To_autoscaling_Scale, + Convert_autoscaling_Scale_To_v1beta1_Scale, + Convert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec, + Convert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec, + Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus, + Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus, Convert_v1beta1_StatefulSet_To_apps_StatefulSet, Convert_apps_StatefulSet_To_v1beta1_StatefulSet, + Convert_v1beta1_StatefulSetCondition_To_apps_StatefulSetCondition, + Convert_apps_StatefulSetCondition_To_v1beta1_StatefulSetCondition, Convert_v1beta1_StatefulSetList_To_apps_StatefulSetList, Convert_apps_StatefulSetList_To_v1beta1_StatefulSetList, Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec, @@ -188,7 +191,7 @@ func Convert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployme func autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta1.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { out.Type = extensions.DeploymentConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) + out.Status = core.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -291,7 +294,7 @@ func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta1 return err } out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := core_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { @@ -310,7 +313,7 @@ func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensi return err } out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := core_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { @@ -442,68 +445,68 @@ func Convert_apps_RollingUpdateStatefulSetStrategy_To_v1beta1_RollingUpdateState return autoConvert_apps_RollingUpdateStatefulSetStrategy_To_v1beta1_RollingUpdateStatefulSetStrategy(in, out, s) } -func autoConvert_v1beta1_Scale_To_extensions_Scale(in *v1beta1.Scale, out *extensions.Scale, s conversion.Scope) error { +func autoConvert_v1beta1_Scale_To_autoscaling_Scale(in *v1beta1.Scale, out *autoscaling.Scale, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1beta1_Scale_To_extensions_Scale is an autogenerated conversion function. -func Convert_v1beta1_Scale_To_extensions_Scale(in *v1beta1.Scale, out *extensions.Scale, s conversion.Scope) error { - return autoConvert_v1beta1_Scale_To_extensions_Scale(in, out, s) +// Convert_v1beta1_Scale_To_autoscaling_Scale is an autogenerated conversion function. +func Convert_v1beta1_Scale_To_autoscaling_Scale(in *v1beta1.Scale, out *autoscaling.Scale, s conversion.Scope) error { + return autoConvert_v1beta1_Scale_To_autoscaling_Scale(in, out, s) } -func autoConvert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *v1beta1.Scale, s conversion.Scope) error { +func autoConvert_autoscaling_Scale_To_v1beta1_Scale(in *autoscaling.Scale, out *v1beta1.Scale, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_extensions_Scale_To_v1beta1_Scale is an autogenerated conversion function. -func Convert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *v1beta1.Scale, s conversion.Scope) error { - return autoConvert_extensions_Scale_To_v1beta1_Scale(in, out, s) +// Convert_autoscaling_Scale_To_v1beta1_Scale is an autogenerated conversion function. +func Convert_autoscaling_Scale_To_v1beta1_Scale(in *autoscaling.Scale, out *v1beta1.Scale, s conversion.Scope) error { + return autoConvert_autoscaling_Scale_To_v1beta1_Scale(in, out, s) } -func autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *v1beta1.ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error { +func autoConvert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec(in *v1beta1.ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { out.Replicas = in.Replicas return nil } -// Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec is an autogenerated conversion function. -func Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *v1beta1.ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error { - return autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in, out, s) +// Convert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec is an autogenerated conversion function. +func Convert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec(in *v1beta1.ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec(in, out, s) } -func autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { +func autoConvert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec(in *autoscaling.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { out.Replicas = in.Replicas return nil } -// Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec is an autogenerated conversion function. -func Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { - return autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) +// Convert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec is an autogenerated conversion function. +func Convert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec(in *autoscaling.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { + return autoConvert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) } -func autoConvert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *v1beta1.ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { +func autoConvert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(in *v1beta1.ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { out.Replicas = in.Replicas - // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) + // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs string) // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type return nil } -func autoConvert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { +func autoConvert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(in *autoscaling.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { out.Replicas = in.Replicas - // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) + // WARNING: in.Selector requires manual conversion: inconvertible types (string vs map[string]string) return nil } @@ -539,6 +542,34 @@ func Convert_apps_StatefulSet_To_v1beta1_StatefulSet(in *apps.StatefulSet, out * return autoConvert_apps_StatefulSet_To_v1beta1_StatefulSet(in, out, s) } +func autoConvert_v1beta1_StatefulSetCondition_To_apps_StatefulSetCondition(in *v1beta1.StatefulSetCondition, out *apps.StatefulSetCondition, s conversion.Scope) error { + out.Type = apps.StatefulSetConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1beta1_StatefulSetCondition_To_apps_StatefulSetCondition is an autogenerated conversion function. +func Convert_v1beta1_StatefulSetCondition_To_apps_StatefulSetCondition(in *v1beta1.StatefulSetCondition, out *apps.StatefulSetCondition, s conversion.Scope) error { + return autoConvert_v1beta1_StatefulSetCondition_To_apps_StatefulSetCondition(in, out, s) +} + +func autoConvert_apps_StatefulSetCondition_To_v1beta1_StatefulSetCondition(in *apps.StatefulSetCondition, out *v1beta1.StatefulSetCondition, s conversion.Scope) error { + out.Type = v1beta1.StatefulSetConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_apps_StatefulSetCondition_To_v1beta1_StatefulSetCondition is an autogenerated conversion function. +func Convert_apps_StatefulSetCondition_To_v1beta1_StatefulSetCondition(in *apps.StatefulSetCondition, out *v1beta1.StatefulSetCondition, s conversion.Scope) error { + return autoConvert_apps_StatefulSetCondition_To_v1beta1_StatefulSetCondition(in, out, s) +} + func autoConvert_v1beta1_StatefulSetList_To_apps_StatefulSetList(in *v1beta1.StatefulSetList, out *apps.StatefulSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { @@ -586,10 +617,10 @@ func autoConvert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *v1beta1.Sta return err } out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := core_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - out.VolumeClaimTemplates = *(*[]api.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates)) + out.VolumeClaimTemplates = *(*[]core.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates)) out.ServiceName = in.ServiceName out.PodManagementPolicy = apps.PodManagementPolicyType(in.PodManagementPolicy) if err := Convert_v1beta1_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { @@ -604,7 +635,7 @@ func autoConvert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.Statef return err } out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := core_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } out.VolumeClaimTemplates = *(*[]v1.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates)) @@ -626,6 +657,7 @@ func autoConvert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(in *v1beta1 out.CurrentRevision = in.CurrentRevision out.UpdateRevision = in.UpdateRevision out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) + out.Conditions = *(*[]apps.StatefulSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } @@ -643,6 +675,7 @@ func autoConvert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus(in *apps.St out.CurrentRevision = in.CurrentRevision out.UpdateRevision = in.UpdateRevision out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) + out.Conditions = *(*[]v1beta1.StatefulSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.defaults.go index 26fde73fe2d0..47c74ab2c4ae 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.defaults.go @@ -23,7 +23,7 @@ package v1beta1 import ( v1beta1 "k8s.io/api/apps/v1beta1" runtime "k8s.io/apimachinery/pkg/runtime" - v1 "k8s.io/kubernetes/pkg/api/v1" + v1 "k8s.io/kubernetes/pkg/apis/core/v1" ) // RegisterDefaults adds defaulters functions to the given scheme. diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/BUILD index 6c2c3d993617..b27e6f362ce7 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/BUILD @@ -16,15 +16,18 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/apps/v1beta2", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/apis/apps:go_default_library", + "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/v1:go_default_library", "//pkg/apis/extensions:go_default_library", "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", @@ -50,12 +53,15 @@ go_test( "conversion_test.go", "defaults_test.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/apps/v1beta2_test", deps = [ ":go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/apps:go_default_library", "//pkg/apis/apps/install:go_default_library", + "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/install:go_default_library", "//pkg/apis/extensions:go_default_library", "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/conversion.go index 191c2eb56b42..f71de4f28e7a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/conversion.go @@ -24,11 +24,13 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/kubernetes/pkg/api" - k8s_api_v1 "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/apps" + autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" + api "k8s.io/kubernetes/pkg/apis/core" + k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" "k8s.io/kubernetes/pkg/apis/extensions" ) @@ -57,8 +59,8 @@ func addConversionFuncs(scheme *runtime.Scheme) error { // extensions // TODO: below conversions should be dropped in favor of auto-generated // ones, see https://github.com/kubernetes/kubernetes/issues/39865 - Convert_v1beta2_ScaleStatus_To_extensions_ScaleStatus, - Convert_extensions_ScaleStatus_To_v1beta2_ScaleStatus, + Convert_v1beta2_ScaleStatus_To_autoscaling_ScaleStatus, + Convert_autoscaling_ScaleStatus_To_v1beta2_ScaleStatus, Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec, Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec, Convert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy, @@ -119,7 +121,7 @@ func Convert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(in *appsv1beta2.Sta } else { out.Selector = nil } - if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if in.VolumeClaimTemplates != nil { @@ -159,7 +161,7 @@ func Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(in *apps.StatefulSe } else { out.Selector = nil } - if err := k8s_api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if in.VolumeClaimTemplates != nil { @@ -223,6 +225,12 @@ func Convert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus(in *appsv1beta2 out.CollisionCount = new(int32) *out.CollisionCount = *in.CollisionCount } + out.Conditions = make([]apps.StatefulSetCondition, len(in.Conditions)) + for i := range in.Conditions { + if err := Convert_v1beta2_StatefulSetCondition_To_apps_StatefulSetCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { + return err + } + } return nil } @@ -240,29 +248,32 @@ func Convert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus(in *apps.Statef out.CollisionCount = new(int32) *out.CollisionCount = *in.CollisionCount } + out.Conditions = make([]appsv1beta2.StatefulSetCondition, len(in.Conditions)) + for i := range in.Conditions { + if err := Convert_apps_StatefulSetCondition_To_v1beta2_StatefulSetCondition(&in.Conditions[i], &out.Conditions[i], s); err != nil { + return err + } + } return nil } -func Convert_extensions_ScaleStatus_To_v1beta2_ScaleStatus(in *extensions.ScaleStatus, out *appsv1beta2.ScaleStatus, s conversion.Scope) error { +func Convert_autoscaling_ScaleStatus_To_v1beta2_ScaleStatus(in *autoscaling.ScaleStatus, out *appsv1beta2.ScaleStatus, s conversion.Scope) error { out.Replicas = int32(in.Replicas) + out.TargetSelector = in.Selector out.Selector = nil - out.TargetSelector = "" - if in.Selector != nil { - if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { - out.Selector = in.Selector.MatchLabels - } - - selector, err := metav1.LabelSelectorAsSelector(in.Selector) - if err != nil { - return fmt.Errorf("invalid label selector: %v", err) - } - out.TargetSelector = selector.String() + selector, err := metav1.ParseToLabelSelector(in.Selector) + if err != nil { + return fmt.Errorf("failed to parse selector: %v", err) + } + if len(selector.MatchExpressions) == 0 { + out.Selector = selector.MatchLabels } + return nil } -func Convert_v1beta2_ScaleStatus_To_extensions_ScaleStatus(in *appsv1beta2.ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { +func Convert_v1beta2_ScaleStatus_To_autoscaling_ScaleStatus(in *appsv1beta2.ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { out.Replicas = in.Replicas // Normally when 2 fields map to the same internal value we favor the old field, since @@ -270,21 +281,15 @@ func Convert_v1beta2_ScaleStatus_To_extensions_ScaleStatus(in *appsv1beta2.Scale // new field can be expected to know about the old field (though that's not quite true, due // to kubectl apply). However, these fields are readonly, so any non-nil value should work. if in.TargetSelector != "" { - labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) - if err != nil { - out.Selector = nil - return fmt.Errorf("failed to parse target selector: %v", err) - } - out.Selector = labelSelector + out.Selector = in.TargetSelector } else if in.Selector != nil { - out.Selector = new(metav1.LabelSelector) - selector := make(map[string]string) + set := labels.Set{} for key, val := range in.Selector { - selector[key] = val + set[key] = val } - out.Selector.MatchLabels = selector + out.Selector = labels.SelectorFromSet(set).String() } else { - out.Selector = nil + out.Selector = "" } return nil } @@ -294,7 +299,7 @@ func Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1beta2 out.Replicas = *in.Replicas } out.Selector = in.Selector - if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { @@ -313,7 +318,7 @@ func Convert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(in *appsv1beta2 func Convert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(in *extensions.DeploymentSpec, out *appsv1beta2.DeploymentSpec, s conversion.Scope) error { out.Replicas = &in.Replicas out.Selector = in.Selector - if err := k8s_api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { @@ -389,7 +394,7 @@ func Convert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *extensions. *out.Replicas = int32(in.Replicas) out.MinReadySeconds = in.MinReadySeconds out.Selector = in.Selector - if err := k8s_api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil @@ -427,7 +432,7 @@ func Convert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *appsv1beta2 } out.MinReadySeconds = in.MinReadySeconds out.Selector = in.Selector - if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil @@ -473,7 +478,7 @@ func Convert_extensions_DaemonSet_To_v1beta2_DaemonSet(in *extensions.DaemonSet, func Convert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in *extensions.DaemonSetSpec, out *appsv1beta2.DaemonSetSpec, s conversion.Scope) error { out.Selector = in.Selector - if err := k8s_api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { @@ -521,7 +526,7 @@ func Convert_v1beta2_DaemonSet_To_extensions_DaemonSet(in *appsv1beta2.DaemonSet func Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(in *appsv1beta2.DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { out.Selector = in.Selector - if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/doc.go index 9077d6eeafe6..4221fe419a7b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/doc.go @@ -15,8 +15,9 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/apps +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/autoscaling // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/extensions -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/apps/v1beta2 +// +k8s:conversion-gen-external-types=k8s.io/api/apps/v1beta2 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/apps/v1beta2 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.conversion.go index 4624fa42bf09..cbe3ccaa91b5 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.conversion.go @@ -22,13 +22,14 @@ package v1beta2 import ( v1beta2 "k8s.io/api/apps/v1beta2" - core_v1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" - api_v1 "k8s.io/kubernetes/pkg/api/v1" apps "k8s.io/kubernetes/pkg/apis/apps" + autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" + core "k8s.io/kubernetes/pkg/apis/core" + core_v1 "k8s.io/kubernetes/pkg/apis/core/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions" unsafe "unsafe" ) @@ -47,6 +48,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_apps_ControllerRevisionList_To_v1beta2_ControllerRevisionList, Convert_v1beta2_DaemonSet_To_extensions_DaemonSet, Convert_extensions_DaemonSet_To_v1beta2_DaemonSet, + Convert_v1beta2_DaemonSetCondition_To_extensions_DaemonSetCondition, + Convert_extensions_DaemonSetCondition_To_v1beta2_DaemonSetCondition, Convert_v1beta2_DaemonSetList_To_extensions_DaemonSetList, Convert_extensions_DaemonSetList_To_v1beta2_DaemonSetList, Convert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec, @@ -83,14 +86,16 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDeployment, Convert_v1beta2_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy, Convert_apps_RollingUpdateStatefulSetStrategy_To_v1beta2_RollingUpdateStatefulSetStrategy, - Convert_v1beta2_Scale_To_extensions_Scale, - Convert_extensions_Scale_To_v1beta2_Scale, - Convert_v1beta2_ScaleSpec_To_extensions_ScaleSpec, - Convert_extensions_ScaleSpec_To_v1beta2_ScaleSpec, - Convert_v1beta2_ScaleStatus_To_extensions_ScaleStatus, - Convert_extensions_ScaleStatus_To_v1beta2_ScaleStatus, + Convert_v1beta2_Scale_To_autoscaling_Scale, + Convert_autoscaling_Scale_To_v1beta2_Scale, + Convert_v1beta2_ScaleSpec_To_autoscaling_ScaleSpec, + Convert_autoscaling_ScaleSpec_To_v1beta2_ScaleSpec, + Convert_v1beta2_ScaleStatus_To_autoscaling_ScaleStatus, + Convert_autoscaling_ScaleStatus_To_v1beta2_ScaleStatus, Convert_v1beta2_StatefulSet_To_apps_StatefulSet, Convert_apps_StatefulSet_To_v1beta2_StatefulSet, + Convert_v1beta2_StatefulSetCondition_To_apps_StatefulSetCondition, + Convert_apps_StatefulSetCondition_To_v1beta2_StatefulSetCondition, Convert_v1beta2_StatefulSetList_To_apps_StatefulSetList, Convert_apps_StatefulSetList_To_v1beta2_StatefulSetList, Convert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec, @@ -194,6 +199,34 @@ func autoConvert_extensions_DaemonSet_To_v1beta2_DaemonSet(in *extensions.Daemon return nil } +func autoConvert_v1beta2_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1beta2.DaemonSetCondition, out *extensions.DaemonSetCondition, s conversion.Scope) error { + out.Type = extensions.DaemonSetConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1beta2_DaemonSetCondition_To_extensions_DaemonSetCondition is an autogenerated conversion function. +func Convert_v1beta2_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1beta2.DaemonSetCondition, out *extensions.DaemonSetCondition, s conversion.Scope) error { + return autoConvert_v1beta2_DaemonSetCondition_To_extensions_DaemonSetCondition(in, out, s) +} + +func autoConvert_extensions_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in *extensions.DaemonSetCondition, out *v1beta2.DaemonSetCondition, s conversion.Scope) error { + out.Type = v1beta2.DaemonSetConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_extensions_DaemonSetCondition_To_v1beta2_DaemonSetCondition is an autogenerated conversion function. +func Convert_extensions_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in *extensions.DaemonSetCondition, out *v1beta2.DaemonSetCondition, s conversion.Scope) error { + return autoConvert_extensions_DaemonSetCondition_To_v1beta2_DaemonSetCondition(in, out, s) +} + func autoConvert_v1beta2_DaemonSetList_To_extensions_DaemonSetList(in *v1beta2.DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { @@ -237,8 +270,8 @@ func Convert_extensions_DaemonSetList_To_v1beta2_DaemonSetList(in *extensions.Da } func autoConvert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1beta2.DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := core_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_v1beta2_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { @@ -250,8 +283,8 @@ func autoConvert_v1beta2_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1beta2.D } func autoConvert_extensions_DaemonSetSpec_To_v1beta2_DaemonSetSpec(in *extensions.DaemonSetSpec, out *v1beta2.DaemonSetSpec, s conversion.Scope) error { - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := core_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1beta2_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { @@ -273,6 +306,7 @@ func autoConvert_v1beta2_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1bet out.NumberAvailable = in.NumberAvailable out.NumberUnavailable = in.NumberUnavailable out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) + out.Conditions = *(*[]extensions.DaemonSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } @@ -291,6 +325,7 @@ func autoConvert_extensions_DaemonSetStatus_To_v1beta2_DaemonSetStatus(in *exten out.NumberAvailable = in.NumberAvailable out.NumberUnavailable = in.NumberUnavailable out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) + out.Conditions = *(*[]v1beta2.DaemonSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } @@ -351,7 +386,7 @@ func autoConvert_extensions_Deployment_To_v1beta2_Deployment(in *extensions.Depl func autoConvert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta2.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { out.Type = extensions.DeploymentConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) + out.Status = core.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -366,7 +401,7 @@ func Convert_v1beta2_DeploymentCondition_To_extensions_DeploymentCondition(in *v func autoConvert_extensions_DeploymentCondition_To_v1beta2_DeploymentCondition(in *extensions.DeploymentCondition, out *v1beta2.DeploymentCondition, s conversion.Scope) error { out.Type = v1beta2.DeploymentConditionType(in.Type) - out.Status = core_v1.ConditionStatus(in.Status) + out.Status = v1.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -422,11 +457,11 @@ func Convert_extensions_DeploymentList_To_v1beta2_DeploymentList(in *extensions. } func autoConvert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta2.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := core_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_v1beta2_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { @@ -440,11 +475,11 @@ func autoConvert_v1beta2_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta2 } func autoConvert_extensions_DeploymentSpec_To_v1beta2_DeploymentSpec(in *extensions.DeploymentSpec, out *v1beta2.DeploymentSpec, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := core_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_extensions_DeploymentStrategy_To_v1beta2_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { @@ -554,7 +589,7 @@ func Convert_extensions_ReplicaSet_To_v1beta2_ReplicaSet(in *extensions.ReplicaS func autoConvert_v1beta2_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1beta2.ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { out.Type = extensions.ReplicaSetConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) + out.Status = core.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason out.Message = in.Message @@ -568,7 +603,7 @@ func Convert_v1beta2_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v func autoConvert_extensions_ReplicaSetCondition_To_v1beta2_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *v1beta2.ReplicaSetCondition, s conversion.Scope) error { out.Type = v1beta2.ReplicaSetConditionType(in.Type) - out.Status = core_v1.ConditionStatus(in.Status) + out.Status = v1.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason out.Message = in.Message @@ -623,24 +658,24 @@ func Convert_extensions_ReplicaSetList_To_v1beta2_ReplicaSetList(in *extensions. } func autoConvert_v1beta2_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *v1beta2.ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := core_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil } func autoConvert_extensions_ReplicaSetSpec_To_v1beta2_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *v1beta2.ReplicaSetSpec, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := core_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil @@ -699,7 +734,7 @@ func autoConvert_extensions_RollingUpdateDeployment_To_v1beta2_RollingUpdateDepl } func autoConvert_v1beta2_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateStatefulSetStrategy(in *v1beta2.RollingUpdateStatefulSetStrategy, out *apps.RollingUpdateStatefulSetStrategy, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.Partition, &out.Partition, s); err != nil { + if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Partition, &out.Partition, s); err != nil { return err } return nil @@ -711,7 +746,7 @@ func Convert_v1beta2_RollingUpdateStatefulSetStrategy_To_apps_RollingUpdateState } func autoConvert_apps_RollingUpdateStatefulSetStrategy_To_v1beta2_RollingUpdateStatefulSetStrategy(in *apps.RollingUpdateStatefulSetStrategy, out *v1beta2.RollingUpdateStatefulSetStrategy, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.Partition, &out.Partition, s); err != nil { + if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Partition, &out.Partition, s); err != nil { return err } return nil @@ -722,68 +757,68 @@ func Convert_apps_RollingUpdateStatefulSetStrategy_To_v1beta2_RollingUpdateState return autoConvert_apps_RollingUpdateStatefulSetStrategy_To_v1beta2_RollingUpdateStatefulSetStrategy(in, out, s) } -func autoConvert_v1beta2_Scale_To_extensions_Scale(in *v1beta2.Scale, out *extensions.Scale, s conversion.Scope) error { +func autoConvert_v1beta2_Scale_To_autoscaling_Scale(in *v1beta2.Scale, out *autoscaling.Scale, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta2_ScaleSpec_To_extensions_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta2_ScaleSpec_To_autoscaling_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta2_ScaleStatus_To_extensions_ScaleStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta2_ScaleStatus_To_autoscaling_ScaleStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1beta2_Scale_To_extensions_Scale is an autogenerated conversion function. -func Convert_v1beta2_Scale_To_extensions_Scale(in *v1beta2.Scale, out *extensions.Scale, s conversion.Scope) error { - return autoConvert_v1beta2_Scale_To_extensions_Scale(in, out, s) +// Convert_v1beta2_Scale_To_autoscaling_Scale is an autogenerated conversion function. +func Convert_v1beta2_Scale_To_autoscaling_Scale(in *v1beta2.Scale, out *autoscaling.Scale, s conversion.Scope) error { + return autoConvert_v1beta2_Scale_To_autoscaling_Scale(in, out, s) } -func autoConvert_extensions_Scale_To_v1beta2_Scale(in *extensions.Scale, out *v1beta2.Scale, s conversion.Scope) error { +func autoConvert_autoscaling_Scale_To_v1beta2_Scale(in *autoscaling.Scale, out *v1beta2.Scale, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_ScaleSpec_To_v1beta2_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_autoscaling_ScaleSpec_To_v1beta2_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_ScaleStatus_To_v1beta2_ScaleStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_autoscaling_ScaleStatus_To_v1beta2_ScaleStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_extensions_Scale_To_v1beta2_Scale is an autogenerated conversion function. -func Convert_extensions_Scale_To_v1beta2_Scale(in *extensions.Scale, out *v1beta2.Scale, s conversion.Scope) error { - return autoConvert_extensions_Scale_To_v1beta2_Scale(in, out, s) +// Convert_autoscaling_Scale_To_v1beta2_Scale is an autogenerated conversion function. +func Convert_autoscaling_Scale_To_v1beta2_Scale(in *autoscaling.Scale, out *v1beta2.Scale, s conversion.Scope) error { + return autoConvert_autoscaling_Scale_To_v1beta2_Scale(in, out, s) } -func autoConvert_v1beta2_ScaleSpec_To_extensions_ScaleSpec(in *v1beta2.ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error { +func autoConvert_v1beta2_ScaleSpec_To_autoscaling_ScaleSpec(in *v1beta2.ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { out.Replicas = in.Replicas return nil } -// Convert_v1beta2_ScaleSpec_To_extensions_ScaleSpec is an autogenerated conversion function. -func Convert_v1beta2_ScaleSpec_To_extensions_ScaleSpec(in *v1beta2.ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error { - return autoConvert_v1beta2_ScaleSpec_To_extensions_ScaleSpec(in, out, s) +// Convert_v1beta2_ScaleSpec_To_autoscaling_ScaleSpec is an autogenerated conversion function. +func Convert_v1beta2_ScaleSpec_To_autoscaling_ScaleSpec(in *v1beta2.ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1beta2_ScaleSpec_To_autoscaling_ScaleSpec(in, out, s) } -func autoConvert_extensions_ScaleSpec_To_v1beta2_ScaleSpec(in *extensions.ScaleSpec, out *v1beta2.ScaleSpec, s conversion.Scope) error { +func autoConvert_autoscaling_ScaleSpec_To_v1beta2_ScaleSpec(in *autoscaling.ScaleSpec, out *v1beta2.ScaleSpec, s conversion.Scope) error { out.Replicas = in.Replicas return nil } -// Convert_extensions_ScaleSpec_To_v1beta2_ScaleSpec is an autogenerated conversion function. -func Convert_extensions_ScaleSpec_To_v1beta2_ScaleSpec(in *extensions.ScaleSpec, out *v1beta2.ScaleSpec, s conversion.Scope) error { - return autoConvert_extensions_ScaleSpec_To_v1beta2_ScaleSpec(in, out, s) +// Convert_autoscaling_ScaleSpec_To_v1beta2_ScaleSpec is an autogenerated conversion function. +func Convert_autoscaling_ScaleSpec_To_v1beta2_ScaleSpec(in *autoscaling.ScaleSpec, out *v1beta2.ScaleSpec, s conversion.Scope) error { + return autoConvert_autoscaling_ScaleSpec_To_v1beta2_ScaleSpec(in, out, s) } -func autoConvert_v1beta2_ScaleStatus_To_extensions_ScaleStatus(in *v1beta2.ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { +func autoConvert_v1beta2_ScaleStatus_To_autoscaling_ScaleStatus(in *v1beta2.ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { out.Replicas = in.Replicas - // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) + // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs string) // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type return nil } -func autoConvert_extensions_ScaleStatus_To_v1beta2_ScaleStatus(in *extensions.ScaleStatus, out *v1beta2.ScaleStatus, s conversion.Scope) error { +func autoConvert_autoscaling_ScaleStatus_To_v1beta2_ScaleStatus(in *autoscaling.ScaleStatus, out *v1beta2.ScaleStatus, s conversion.Scope) error { out.Replicas = in.Replicas - // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) + // WARNING: in.Selector requires manual conversion: inconvertible types (string vs map[string]string) return nil } @@ -819,6 +854,34 @@ func Convert_apps_StatefulSet_To_v1beta2_StatefulSet(in *apps.StatefulSet, out * return autoConvert_apps_StatefulSet_To_v1beta2_StatefulSet(in, out, s) } +func autoConvert_v1beta2_StatefulSetCondition_To_apps_StatefulSetCondition(in *v1beta2.StatefulSetCondition, out *apps.StatefulSetCondition, s conversion.Scope) error { + out.Type = apps.StatefulSetConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1beta2_StatefulSetCondition_To_apps_StatefulSetCondition is an autogenerated conversion function. +func Convert_v1beta2_StatefulSetCondition_To_apps_StatefulSetCondition(in *v1beta2.StatefulSetCondition, out *apps.StatefulSetCondition, s conversion.Scope) error { + return autoConvert_v1beta2_StatefulSetCondition_To_apps_StatefulSetCondition(in, out, s) +} + +func autoConvert_apps_StatefulSetCondition_To_v1beta2_StatefulSetCondition(in *apps.StatefulSetCondition, out *v1beta2.StatefulSetCondition, s conversion.Scope) error { + out.Type = v1beta2.StatefulSetConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_apps_StatefulSetCondition_To_v1beta2_StatefulSetCondition is an autogenerated conversion function. +func Convert_apps_StatefulSetCondition_To_v1beta2_StatefulSetCondition(in *apps.StatefulSetCondition, out *v1beta2.StatefulSetCondition, s conversion.Scope) error { + return autoConvert_apps_StatefulSetCondition_To_v1beta2_StatefulSetCondition(in, out, s) +} + func autoConvert_v1beta2_StatefulSetList_To_apps_StatefulSetList(in *v1beta2.StatefulSetList, out *apps.StatefulSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { @@ -862,14 +925,14 @@ func Convert_apps_StatefulSetList_To_v1beta2_StatefulSetList(in *apps.StatefulSe } func autoConvert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(in *v1beta2.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := core_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - out.VolumeClaimTemplates = *(*[]api.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates)) + out.VolumeClaimTemplates = *(*[]core.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates)) out.ServiceName = in.ServiceName out.PodManagementPolicy = apps.PodManagementPolicyType(in.PodManagementPolicy) if err := Convert_v1beta2_StatefulSetUpdateStrategy_To_apps_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { @@ -880,14 +943,14 @@ func autoConvert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(in *v1beta2.Sta } func autoConvert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(in *apps.StatefulSetSpec, out *v1beta2.StatefulSetSpec, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := core_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } - out.VolumeClaimTemplates = *(*[]core_v1.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates)) + out.VolumeClaimTemplates = *(*[]v1.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates)) out.ServiceName = in.ServiceName out.PodManagementPolicy = v1beta2.PodManagementPolicyType(in.PodManagementPolicy) if err := Convert_apps_StatefulSetUpdateStrategy_To_v1beta2_StatefulSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { @@ -906,6 +969,7 @@ func autoConvert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus(in *v1beta2 out.CurrentRevision = in.CurrentRevision out.UpdateRevision = in.UpdateRevision out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) + out.Conditions = *(*[]apps.StatefulSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } @@ -918,6 +982,7 @@ func autoConvert_apps_StatefulSetStatus_To_v1beta2_StatefulSetStatus(in *apps.St out.CurrentRevision = in.CurrentRevision out.UpdateRevision = in.UpdateRevision out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) + out.Conditions = *(*[]v1beta2.StatefulSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.defaults.go index 29eabcaf89b2..ddf7f6a169e8 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.defaults.go @@ -23,7 +23,7 @@ package v1beta2 import ( v1beta2 "k8s.io/api/apps/v1beta2" runtime "k8s.io/apimachinery/pkg/runtime" - v1 "k8s.io/kubernetes/pkg/api/v1" + v1 "k8s.io/kubernetes/pkg/apis/core/v1" ) // RegisterDefaults adds defaulters functions to the given scheme. diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/BUILD index fbb4a08ac771..3d80e8e2bb46 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/BUILD @@ -9,10 +9,11 @@ load( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/pkg/apis/apps/validation", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/validation:go_default_library", "//pkg/apis/apps:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", @@ -23,10 +24,11 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/apps/validation", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/apps:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go index 85bc564ff468..fc7c029ca7ac 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/validation/validation.go @@ -24,9 +24,9 @@ import ( unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" - apivalidation "k8s.io/kubernetes/pkg/api/validation" "k8s.io/kubernetes/pkg/apis/apps" + api "k8s.io/kubernetes/pkg/apis/core" + apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" ) // ValidateStatefulSetName can be used to check whether the given StatefulSet name is valid. diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go index 983dabf46af7..5e8a74746853 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go @@ -22,57 +22,10 @@ package apps import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" - reflect "reflect" + core "k8s.io/kubernetes/pkg/apis/core" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ControllerRevision).DeepCopyInto(out.(*ControllerRevision)) - return nil - }, InType: reflect.TypeOf(&ControllerRevision{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ControllerRevisionList).DeepCopyInto(out.(*ControllerRevisionList)) - return nil - }, InType: reflect.TypeOf(&ControllerRevisionList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateStatefulSetStrategy).DeepCopyInto(out.(*RollingUpdateStatefulSetStrategy)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateStatefulSetStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSet).DeepCopyInto(out.(*StatefulSet)) - return nil - }, InType: reflect.TypeOf(&StatefulSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetList).DeepCopyInto(out.(*StatefulSetList)) - return nil - }, InType: reflect.TypeOf(&StatefulSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetSpec).DeepCopyInto(out.(*StatefulSetSpec)) - return nil - }, InType: reflect.TypeOf(&StatefulSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetStatus).DeepCopyInto(out.(*StatefulSetStatus)) - return nil - }, InType: reflect.TypeOf(&StatefulSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetUpdateStrategy).DeepCopyInto(out.(*StatefulSetUpdateStrategy)) - return nil - }, InType: reflect.TypeOf(&StatefulSetUpdateStrategy{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { *out = *in @@ -184,6 +137,23 @@ func (in *StatefulSet) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. +func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { + if in == nil { + return nil + } + out := new(StatefulSetCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in @@ -233,7 +203,7 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { in.Template.DeepCopyInto(&out.Template) if in.VolumeClaimTemplates != nil { in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]api.PersistentVolumeClaim, len(*in)) + *out = make([]core.PersistentVolumeClaim, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -282,6 +252,13 @@ func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { **out = **in } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StatefulSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/authentication/BUILD index 18fbe12e1168..839995873a24 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/BUILD @@ -13,9 +13,9 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/authentication", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go index 7a8a65b77ffe..67a7fe426666 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=authentication.k8s.io package authentication diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/BUILD index 7ec2b74077b3..ec4744b942fb 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/authentication/install", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/authentication:go_default_library", "//pkg/apis/authentication/v1:go_default_library", "//pkg/apis/authentication/v1beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/install.go index 9f1c4d647910..3e1271dcbcb1 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/install.go @@ -23,14 +23,14 @@ import ( "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/authentication" "k8s.io/kubernetes/pkg/apis/authentication/v1" "k8s.io/kubernetes/pkg/apis/authentication/v1beta1" ) func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) + Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) } // Install registers the API group and adds types to a scheme diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/BUILD index ff7e40f0a079..44ec34c7696b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/BUILD @@ -15,6 +15,7 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/authentication/v1", deps = [ "//pkg/apis/authentication:go_default_library", "//vendor/k8s.io/api/authentication/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/doc.go index 9c5fa123aee8..06d609c635fc 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/authentication -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/authentication/v1 +// +k8s:conversion-gen-external-types=k8s.io/api/authentication/v1 // +groupName=authentication.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/authentication/v1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/BUILD index 25cc2abae8c8..5903b91b385e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/BUILD @@ -15,6 +15,7 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/authentication/v1beta1", deps = [ "//pkg/apis/authentication:go_default_library", "//vendor/k8s.io/api/authentication/v1beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/doc.go index c08af211350c..68fcb83f3b92 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/authentication -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/authentication/v1beta1 +// +k8s:conversion-gen-external-types=k8s.io/api/authentication/v1beta1 // +groupName=authentication.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/authentication/v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication/zz_generated.deepcopy.go index 0efa1497af56..8f3ca0afe71e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authentication/zz_generated.deepcopy.go @@ -21,40 +21,9 @@ limitations under the License. package authentication import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReview).DeepCopyInto(out.(*TokenReview)) - return nil - }, InType: reflect.TypeOf(&TokenReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReviewSpec).DeepCopyInto(out.(*TokenReviewSpec)) - return nil - }, InType: reflect.TypeOf(&TokenReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReviewStatus).DeepCopyInto(out.(*TokenReviewStatus)) - return nil - }, InType: reflect.TypeOf(&TokenReviewStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*UserInfo).DeepCopyInto(out.(*UserInfo)) - return nil - }, InType: reflect.TypeOf(&UserInfo{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/authorization/BUILD index d64dc5b2c966..30e71c49666b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/BUILD @@ -13,9 +13,9 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/authorization", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go index df9046f03cb9..b66c384b1ed4 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=authorization.k8s.io package authorization diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/BUILD index 8f36c7f5a459..9825a5afde38 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/authorization/install", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/authorization:go_default_library", "//pkg/apis/authorization/v1:go_default_library", "//pkg/apis/authorization/v1beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go index 47b78fe5ec73..2a9cbf6bd2b1 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go @@ -23,14 +23,14 @@ import ( "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/authorization" "k8s.io/kubernetes/pkg/apis/authorization/v1" "k8s.io/kubernetes/pkg/apis/authorization/v1beta1" ) func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) + Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) } // Install registers the API group and adds types to a scheme diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go index 4920913c59eb..e798ec2123e6 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go @@ -140,8 +140,13 @@ type SelfSubjectAccessReviewSpec struct { // SubjectAccessReviewStatus type SubjectAccessReviewStatus struct { - // Allowed is required. True if the action would be allowed, false otherwise. + // Allowed is required. True if the action would be allowed, false otherwise. Allowed bool + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + Denied bool // Reason is optional. It indicates why a request was allowed or denied. Reason string // EvaluationError is an indication that some error occurred during the authorization check. @@ -205,7 +210,8 @@ type ResourceRule struct { // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of // the enumerated resources in any API group will be allowed. "*" means all. APIGroups []string - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. "*" means all. + // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. + // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. Resources []string // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. ResourceNames []string diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/BUILD index f7d262bfee47..e017792e8419 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/BUILD @@ -15,6 +15,7 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/authorization/v1", deps = [ "//pkg/apis/authorization:go_default_library", "//vendor/k8s.io/api/authorization/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/doc.go index a02d11c9c76e..b47e67c03637 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/authorization -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/authorization/v1 +// +k8s:conversion-gen-external-types=k8s.io/api/authorization/v1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/authorization/v1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/zz_generated.conversion.go index 1c06171c6fc1..00caf7872a04 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/zz_generated.conversion.go @@ -369,6 +369,7 @@ func Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec func autoConvert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *v1.SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { out.Allowed = in.Allowed + out.Denied = in.Denied out.Reason = in.Reason out.EvaluationError = in.EvaluationError return nil @@ -381,6 +382,7 @@ func Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewSt func autoConvert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *v1.SubjectAccessReviewStatus, s conversion.Scope) error { out.Allowed = in.Allowed + out.Denied = in.Denied out.Reason = in.Reason out.EvaluationError = in.EvaluationError return nil diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/BUILD index 2f5a0fbfc8a1..5ab1a06d5030 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/BUILD @@ -15,6 +15,7 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/authorization/v1beta1", deps = [ "//pkg/apis/authorization:go_default_library", "//vendor/k8s.io/api/authorization/v1beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go index 9f4624486a40..54ceb2ee6271 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/authorization -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/authorization/v1beta1 +// +k8s:conversion-gen-external-types=k8s.io/api/authorization/v1beta1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/authorization/v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.conversion.go index 4ba9417a61f5..234b4ff02619 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.conversion.go @@ -369,6 +369,7 @@ func Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessRevie func autoConvert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *v1beta1.SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { out.Allowed = in.Allowed + out.Denied = in.Denied out.Reason = in.Reason out.EvaluationError = in.EvaluationError return nil @@ -381,6 +382,7 @@ func Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessRev func autoConvert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *v1beta1.SubjectAccessReviewStatus, s conversion.Scope) error { out.Allowed = in.Allowed + out.Denied = in.Denied out.Reason = in.Reason out.EvaluationError = in.EvaluationError return nil diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/authorization/validation/BUILD index d9fd992ec0f7..58559f83da94 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/validation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/validation/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/pkg/apis/authorization/validation", deps = [ "//pkg/apis/authorization:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", @@ -20,6 +21,7 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/authorization/validation", library = ":go_default_library", deps = [ "//pkg/apis/authorization:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/zz_generated.deepcopy.go index eb121f30885f..cdc23a2097c5 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/zz_generated.deepcopy.go @@ -21,76 +21,9 @@ limitations under the License. package authorization import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalSubjectAccessReview).DeepCopyInto(out.(*LocalSubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&LocalSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NonResourceAttributes).DeepCopyInto(out.(*NonResourceAttributes)) - return nil - }, InType: reflect.TypeOf(&NonResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NonResourceRule).DeepCopyInto(out.(*NonResourceRule)) - return nil - }, InType: reflect.TypeOf(&NonResourceRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceAttributes).DeepCopyInto(out.(*ResourceAttributes)) - return nil - }, InType: reflect.TypeOf(&ResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceRule).DeepCopyInto(out.(*ResourceRule)) - return nil - }, InType: reflect.TypeOf(&ResourceRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectAccessReview).DeepCopyInto(out.(*SelfSubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectAccessReviewSpec).DeepCopyInto(out.(*SelfSubjectAccessReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectRulesReview).DeepCopyInto(out.(*SelfSubjectRulesReview)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectRulesReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectRulesReviewSpec).DeepCopyInto(out.(*SelfSubjectRulesReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectRulesReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReview).DeepCopyInto(out.(*SubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReviewSpec).DeepCopyInto(out.(*SubjectAccessReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReviewStatus).DeepCopyInto(out.(*SubjectAccessReviewStatus)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReviewStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectRulesReviewStatus).DeepCopyInto(out.(*SubjectRulesReviewStatus)) - return nil - }, InType: reflect.TypeOf(&SubjectRulesReviewStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/BUILD index fb99965cbdd4..dcca30c00d50 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/BUILD @@ -14,11 +14,11 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/autoscaling", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go index 2c770186ba76..047eedde4954 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package package autoscaling diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/BUILD index 0e7512164b9c..d1cb4d5dcbf5 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/autoscaling/install", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/autoscaling/v1:go_default_library", "//pkg/apis/autoscaling/v2beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go index e7a75f1e5b14..507f2b17f528 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go @@ -22,14 +22,14 @@ import ( "k8s.io/apimachinery/pkg/apimachinery/announced" "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/autoscaling/v1" "k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1" ) func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) + Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) } // Install registers the API group and adds types to a scheme diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go index 2bcea84b9bfb..6c321a3ababc 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go @@ -42,7 +42,7 @@ var ( AddToScheme = SchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Scale{}, diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go index 289b2275ad97..0e9d669ad78d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go @@ -19,7 +19,7 @@ package autoscaling import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -231,7 +231,7 @@ var ( // ScalingActive indicates that the HPA controller is able to scale if necessary: // it's correctly configured, can fetch the desired metrics, and isn't disabled. ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" - // AbleToScale indicates a lack of transient issues which prevent scaling from occuring, + // AbleToScale indicates a lack of transient issues which prevent scaling from occurring, // such as being in a backoff window, or being unable to access/update the target scale. AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale" // ScalingLimited indicates that the calculated scale based on metrics would be above or diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/BUILD index 8774793d048d..4f2490f7ddae 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/BUILD @@ -16,9 +16,10 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/autoscaling/v1", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -32,11 +33,12 @@ go_library( go_test( name = "go_default_xtest", srcs = ["defaults_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/autoscaling/v1_test", deps = [ ":go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/autoscaling/install:go_default_library", + "//pkg/apis/core/install:go_default_library", "//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion.go index 04dc4a2804b9..599fa7622d59 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion.go @@ -23,8 +23,8 @@ import ( "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/autoscaling" + api "k8s.io/kubernetes/pkg/apis/core" ) func addConversionFuncs(scheme *runtime.Scheme) error { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go index 7c9c9f9f90bd..366692a6289e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/autoscaling -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/autoscaling/v1 +// +k8s:conversion-gen-external-types=k8s.io/api/autoscaling/v1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/autoscaling/v1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.conversion.go index e1f48bdce535..bb1bedd52088 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.conversion.go @@ -27,8 +27,8 @@ import ( meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" + core "k8s.io/kubernetes/pkg/apis/core" unsafe "unsafe" ) @@ -385,7 +385,7 @@ func Convert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus(in *autoscaling } func autoConvert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *v1.ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { - out.Name = api.ResourceName(in.Name) + out.Name = core.ResourceName(in.Name) out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization)) out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue)) return nil @@ -409,7 +409,7 @@ func Convert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource(in *aut } func autoConvert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *v1.ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { - out.Name = api.ResourceName(in.Name) + out.Name = core.ResourceName(in.Name) out.CurrentAverageUtilization = (*int32)(unsafe.Pointer(in.CurrentAverageUtilization)) out.CurrentAverageValue = in.CurrentAverageValue return nil diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/BUILD index 1325a6f6986d..720df24df8a1 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/BUILD @@ -15,9 +15,10 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -44,12 +45,13 @@ filegroup( go_test( name = "go_default_xtest", srcs = ["defaults_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1_test", deps = [ ":go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/autoscaling/install:go_default_library", + "//pkg/apis/core/install:go_default_library", "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/doc.go index 67b6f3b41fee..449555362d31 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/autoscaling -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/autoscaling/v2beta1 +// +k8s:conversion-gen-external-types=k8s.io/api/autoscaling/v2beta1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/autoscaling/v2beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/zz_generated.conversion.go index af3bb3d81501..7b1f977df95e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/zz_generated.conversion.go @@ -27,8 +27,8 @@ import ( meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" + core "k8s.io/kubernetes/pkg/apis/core" unsafe "unsafe" ) @@ -390,7 +390,7 @@ func Convert_autoscaling_PodsMetricStatus_To_v2beta1_PodsMetricStatus(in *autosc } func autoConvert_v2beta1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *v2beta1.ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { - out.Name = api.ResourceName(in.Name) + out.Name = core.ResourceName(in.Name) out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization)) out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue)) return nil @@ -414,7 +414,7 @@ func Convert_autoscaling_ResourceMetricSource_To_v2beta1_ResourceMetricSource(in } func autoConvert_v2beta1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *v2beta1.ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { - out.Name = api.ResourceName(in.Name) + out.Name = core.ResourceName(in.Name) out.CurrentAverageUtilization = (*int32)(unsafe.Pointer(in.CurrentAverageUtilization)) out.CurrentAverageValue = in.CurrentAverageValue return nil diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/validation/BUILD index 5355d0e553c4..c6be1eff0259 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/validation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/validation/BUILD @@ -9,9 +9,10 @@ load( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/pkg/apis/autoscaling/validation", deps = [ - "//pkg/api/validation:go_default_library", "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/validation/path:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", @@ -21,10 +22,11 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/autoscaling/validation", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/validation/validation.go index e317fed2a59a..6338b2a744e0 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/validation/validation.go @@ -22,8 +22,8 @@ import ( pathvalidation "k8s.io/apimachinery/pkg/api/validation/path" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" - apivalidation "k8s.io/kubernetes/pkg/api/validation" "k8s.io/kubernetes/pkg/apis/autoscaling" + apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" ) func ValidateScale(scale *autoscaling.Scale) field.ErrorList { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go index ec833882e6ba..c0bccf3b90dc 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go @@ -23,92 +23,9 @@ package autoscaling import ( resource "k8s.io/apimachinery/pkg/api/resource" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CrossVersionObjectReference).DeepCopyInto(out.(*CrossVersionObjectReference)) - return nil - }, InType: reflect.TypeOf(&CrossVersionObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscaler).DeepCopyInto(out.(*HorizontalPodAutoscaler)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscaler{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerCondition).DeepCopyInto(out.(*HorizontalPodAutoscalerCondition)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerList).DeepCopyInto(out.(*HorizontalPodAutoscalerList)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerSpec).DeepCopyInto(out.(*HorizontalPodAutoscalerSpec)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerStatus).DeepCopyInto(out.(*HorizontalPodAutoscalerStatus)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetricSpec).DeepCopyInto(out.(*MetricSpec)) - return nil - }, InType: reflect.TypeOf(&MetricSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetricStatus).DeepCopyInto(out.(*MetricStatus)) - return nil - }, InType: reflect.TypeOf(&MetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMetricSource).DeepCopyInto(out.(*ObjectMetricSource)) - return nil - }, InType: reflect.TypeOf(&ObjectMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMetricStatus).DeepCopyInto(out.(*ObjectMetricStatus)) - return nil - }, InType: reflect.TypeOf(&ObjectMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodsMetricSource).DeepCopyInto(out.(*PodsMetricSource)) - return nil - }, InType: reflect.TypeOf(&PodsMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodsMetricStatus).DeepCopyInto(out.(*PodsMetricStatus)) - return nil - }, InType: reflect.TypeOf(&PodsMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceMetricSource).DeepCopyInto(out.(*ResourceMetricSource)) - return nil - }, InType: reflect.TypeOf(&ResourceMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceMetricStatus).DeepCopyInto(out.(*ResourceMetricStatus)) - return nil - }, InType: reflect.TypeOf(&ResourceMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Scale).DeepCopyInto(out.(*Scale)) - return nil - }, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleSpec).DeepCopyInto(out.(*ScaleSpec)) - return nil - }, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleStatus).DeepCopyInto(out.(*ScaleStatus)) - return nil - }, InType: reflect.TypeOf(&ScaleStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/batch/BUILD index e6836c6c9e4b..d8c7452d199f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/BUILD @@ -13,10 +13,10 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/batch", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/doc.go index c6b203cd8ebe..65fa657f8f7b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package package batch diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/batch/install/BUILD index 7478fd57a97d..3445e3f35eab 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/install/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/batch/install", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/apis/batch/v1:go_default_library", "//pkg/apis/batch/v1beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go index 7df8db201e0e..e04f926057a7 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/apimachinery/announced" "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/batch/v1" "k8s.io/kubernetes/pkg/apis/batch/v1beta1" @@ -30,7 +30,7 @@ import ( ) func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) + Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) } // Install registers the API group and adds types to a scheme diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/register.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/register.go index 49414bd3fcef..3b4a6d406785 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/register.go @@ -42,7 +42,7 @@ var ( AddToScheme = SchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Job{}, diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go index be2b692f296d..32d921fb6afa 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go @@ -18,7 +18,7 @@ package batch import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // +genclient @@ -250,7 +250,10 @@ type CronJobSpec struct { StartingDeadlineSeconds *int64 // Specifies how to treat concurrent executions of a Job. - // Defaults to Allow. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one // +optional ConcurrencyPolicy ConcurrencyPolicy diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/BUILD index 1f766c3c9a57..2413a15ab257 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/BUILD @@ -16,10 +16,11 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/batch/v1", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/apis/batch:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/v1:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -32,11 +33,12 @@ go_library( go_test( name = "go_default_xtest", srcs = ["defaults_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/batch/v1_test", deps = [ ":go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/batch/install:go_default_library", + "//pkg/apis/core/install:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go index 941b61d4a707..98aae5c276bf 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go @@ -23,8 +23,8 @@ import ( "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" - k8s_api_v1 "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apis/batch" + k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" ) func addConversionFuncs(scheme *runtime.Scheme) error { @@ -62,7 +62,7 @@ func Convert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *batchv1.JobSpec out.ManualSelector = nil } - if err := k8s_api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil @@ -81,7 +81,7 @@ func Convert_v1_JobSpec_To_batch_JobSpec(in *batchv1.JobSpec, out *batch.JobSpec out.ManualSelector = nil } - if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go index 69eadacd1372..67c2902198ce 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/batch -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/batch/v1 +// +k8s:conversion-gen-external-types=k8s.io/api/batch/v1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/batch/v1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.conversion.go index 487b9c63ab82..dc346c5274de 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.conversion.go @@ -26,9 +26,9 @@ import ( meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" - api_v1 "k8s.io/kubernetes/pkg/api/v1" batch "k8s.io/kubernetes/pkg/apis/batch" + core "k8s.io/kubernetes/pkg/apis/core" + apis_core_v1 "k8s.io/kubernetes/pkg/apis/core/v1" unsafe "unsafe" ) @@ -87,7 +87,7 @@ func Convert_batch_Job_To_v1_Job(in *batch.Job, out *v1.Job, s conversion.Scope) func autoConvert_v1_JobCondition_To_batch_JobCondition(in *v1.JobCondition, out *batch.JobCondition, s conversion.Scope) error { out.Type = batch.JobConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) + out.Status = core.ConditionStatus(in.Status) out.LastProbeTime = in.LastProbeTime out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -164,7 +164,7 @@ func autoConvert_v1_JobSpec_To_batch_JobSpec(in *v1.JobSpec, out *batch.JobSpec, out.BackoffLimit = (*int32)(unsafe.Pointer(in.BackoffLimit)) out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) out.ManualSelector = (*bool)(unsafe.Pointer(in.ManualSelector)) - if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := apis_core_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil @@ -177,7 +177,7 @@ func autoConvert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *v1.JobSpec, out.BackoffLimit = (*int32)(unsafe.Pointer(in.BackoffLimit)) out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) out.ManualSelector = (*bool)(unsafe.Pointer(in.ManualSelector)) - if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := apis_core_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.defaults.go index 18c0cc6d82ae..976735a1a4ef 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.defaults.go @@ -23,7 +23,7 @@ package v1 import ( v1 "k8s.io/api/batch/v1" runtime "k8s.io/apimachinery/pkg/runtime" - api_v1 "k8s.io/kubernetes/pkg/api/v1" + core_v1 "k8s.io/kubernetes/pkg/apis/core/v1" ) // RegisterDefaults adds defaulters functions to the given scheme. @@ -37,135 +37,135 @@ func RegisterDefaults(scheme *runtime.Scheme) error { func SetObjectDefaults_Job(in *v1.Job) { SetDefaults_Job(in) - api_v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) + core_v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) for i := range in.Spec.Template.Spec.Volumes { a := &in.Spec.Template.Spec.Volumes[i] - api_v1.SetDefaults_Volume(a) + core_v1.SetDefaults_Volume(a) if a.VolumeSource.HostPath != nil { - api_v1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath) + core_v1.SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath) } if a.VolumeSource.Secret != nil { - api_v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + core_v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) } if a.VolumeSource.ISCSI != nil { - api_v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + core_v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) } if a.VolumeSource.RBD != nil { - api_v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + core_v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) } if a.VolumeSource.DownwardAPI != nil { - api_v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + core_v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) for j := range a.VolumeSource.DownwardAPI.Items { b := &a.VolumeSource.DownwardAPI.Items[j] if b.FieldRef != nil { - api_v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + core_v1.SetDefaults_ObjectFieldSelector(b.FieldRef) } } } if a.VolumeSource.ConfigMap != nil { - api_v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + core_v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) } if a.VolumeSource.AzureDisk != nil { - api_v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + core_v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) } if a.VolumeSource.Projected != nil { - api_v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + core_v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) for j := range a.VolumeSource.Projected.Sources { b := &a.VolumeSource.Projected.Sources[j] if b.DownwardAPI != nil { for k := range b.DownwardAPI.Items { c := &b.DownwardAPI.Items[k] if c.FieldRef != nil { - api_v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + core_v1.SetDefaults_ObjectFieldSelector(c.FieldRef) } } } } } if a.VolumeSource.ScaleIO != nil { - api_v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + core_v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) } } for i := range in.Spec.Template.Spec.InitContainers { a := &in.Spec.Template.Spec.InitContainers[i] - api_v1.SetDefaults_Container(a) + core_v1.SetDefaults_Container(a) for j := range a.Ports { b := &a.Ports[j] - api_v1.SetDefaults_ContainerPort(b) + core_v1.SetDefaults_ContainerPort(b) } for j := range a.Env { b := &a.Env[j] if b.ValueFrom != nil { if b.ValueFrom.FieldRef != nil { - api_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + core_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) } } } - api_v1.SetDefaults_ResourceList(&a.Resources.Limits) - api_v1.SetDefaults_ResourceList(&a.Resources.Requests) + core_v1.SetDefaults_ResourceList(&a.Resources.Limits) + core_v1.SetDefaults_ResourceList(&a.Resources.Requests) if a.LivenessProbe != nil { - api_v1.SetDefaults_Probe(a.LivenessProbe) + core_v1.SetDefaults_Probe(a.LivenessProbe) if a.LivenessProbe.Handler.HTTPGet != nil { - api_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + core_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) } } if a.ReadinessProbe != nil { - api_v1.SetDefaults_Probe(a.ReadinessProbe) + core_v1.SetDefaults_Probe(a.ReadinessProbe) if a.ReadinessProbe.Handler.HTTPGet != nil { - api_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + core_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) } } if a.Lifecycle != nil { if a.Lifecycle.PostStart != nil { if a.Lifecycle.PostStart.HTTPGet != nil { - api_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) } } if a.Lifecycle.PreStop != nil { if a.Lifecycle.PreStop.HTTPGet != nil { - api_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) } } } } for i := range in.Spec.Template.Spec.Containers { a := &in.Spec.Template.Spec.Containers[i] - api_v1.SetDefaults_Container(a) + core_v1.SetDefaults_Container(a) for j := range a.Ports { b := &a.Ports[j] - api_v1.SetDefaults_ContainerPort(b) + core_v1.SetDefaults_ContainerPort(b) } for j := range a.Env { b := &a.Env[j] if b.ValueFrom != nil { if b.ValueFrom.FieldRef != nil { - api_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + core_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) } } } - api_v1.SetDefaults_ResourceList(&a.Resources.Limits) - api_v1.SetDefaults_ResourceList(&a.Resources.Requests) + core_v1.SetDefaults_ResourceList(&a.Resources.Limits) + core_v1.SetDefaults_ResourceList(&a.Resources.Requests) if a.LivenessProbe != nil { - api_v1.SetDefaults_Probe(a.LivenessProbe) + core_v1.SetDefaults_Probe(a.LivenessProbe) if a.LivenessProbe.Handler.HTTPGet != nil { - api_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + core_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) } } if a.ReadinessProbe != nil { - api_v1.SetDefaults_Probe(a.ReadinessProbe) + core_v1.SetDefaults_Probe(a.ReadinessProbe) if a.ReadinessProbe.Handler.HTTPGet != nil { - api_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + core_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) } } if a.Lifecycle != nil { if a.Lifecycle.PostStart != nil { if a.Lifecycle.PostStart.HTTPGet != nil { - api_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) } } if a.Lifecycle.PreStop != nil { if a.Lifecycle.PreStop.HTTPGet != nil { - api_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + core_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) } } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1beta1/BUILD index 7e9649f8efd5..2985d4e9d07d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1beta1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1beta1/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -18,12 +16,12 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], - tags = ["automanaged"], + importpath = "k8s.io/kubernetes/pkg/apis/batch/v1beta1", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/apis/batch/v1:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/v1:go_default_library", "//vendor/k8s.io/api/batch/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -36,12 +34,12 @@ go_library( go_test( name = "go_default_xtest", srcs = ["defaults_test.go"], - tags = ["automanaged"], + importpath = "k8s.io/kubernetes/pkg/apis/batch/v1beta1_test", deps = [ ":go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/batch/install:go_default_library", + "//pkg/apis/core/install:go_default_library", "//vendor/k8s.io/api/batch/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1beta1/doc.go index de192bc2e90e..56e1c24e4866 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1beta1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/batch -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/batch/v1beta1 +// +k8s:conversion-gen-external-types=k8s.io/api/batch/v1beta1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/batch/v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1beta1/zz_generated.conversion.go index f6ae62bd0391..b977a5e74197 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1beta1/zz_generated.conversion.go @@ -26,9 +26,9 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" batch "k8s.io/kubernetes/pkg/apis/batch" batch_v1 "k8s.io/kubernetes/pkg/apis/batch/v1" + core "k8s.io/kubernetes/pkg/apis/core" unsafe "unsafe" ) @@ -166,7 +166,7 @@ func Convert_batch_CronJobSpec_To_v1beta1_CronJobSpec(in *batch.CronJobSpec, out } func autoConvert_v1beta1_CronJobStatus_To_batch_CronJobStatus(in *v1beta1.CronJobStatus, out *batch.CronJobStatus, s conversion.Scope) error { - out.Active = *(*[]api.ObjectReference)(unsafe.Pointer(&in.Active)) + out.Active = *(*[]core.ObjectReference)(unsafe.Pointer(&in.Active)) out.LastScheduleTime = (*v1.Time)(unsafe.Pointer(in.LastScheduleTime)) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1beta1/zz_generated.defaults.go index e9256b91d078..e809e80967c6 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1beta1/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1beta1/zz_generated.defaults.go @@ -23,7 +23,7 @@ package v1beta1 import ( v1beta1 "k8s.io/api/batch/v1beta1" runtime "k8s.io/apimachinery/pkg/runtime" - v1 "k8s.io/kubernetes/pkg/api/v1" + v1 "k8s.io/kubernetes/pkg/apis/core/v1" ) // RegisterDefaults adds defaulters functions to the given scheme. diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/BUILD index 09776447e94a..79103911e03b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/BUILD @@ -16,11 +16,12 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/batch/v2alpha1", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/apis/batch/v1:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/v1:go_default_library", "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -33,11 +34,12 @@ go_library( go_test( name = "go_default_xtest", srcs = ["defaults_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/batch/v2alpha1_test", deps = [ ":go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/batch/install:go_default_library", + "//pkg/apis/core/install:go_default_library", "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go index 09033253f00a..19abe6568682 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/batch -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/batch/v2alpha1 +// +k8s:conversion-gen-external-types=k8s.io/api/batch/v2alpha1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/batch/v2alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.conversion.go index 92c8c8ceb6aa..5f6896debc9f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.conversion.go @@ -26,9 +26,9 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" batch "k8s.io/kubernetes/pkg/apis/batch" batch_v1 "k8s.io/kubernetes/pkg/apis/batch/v1" + core "k8s.io/kubernetes/pkg/apis/core" unsafe "unsafe" ) @@ -166,7 +166,7 @@ func Convert_batch_CronJobSpec_To_v2alpha1_CronJobSpec(in *batch.CronJobSpec, ou } func autoConvert_v2alpha1_CronJobStatus_To_batch_CronJobStatus(in *v2alpha1.CronJobStatus, out *batch.CronJobStatus, s conversion.Scope) error { - out.Active = *(*[]api.ObjectReference)(unsafe.Pointer(&in.Active)) + out.Active = *(*[]core.ObjectReference)(unsafe.Pointer(&in.Active)) out.LastScheduleTime = (*v1.Time)(unsafe.Pointer(in.LastScheduleTime)) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.defaults.go index 46e8ca6121d0..8983fcddadec 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.defaults.go @@ -23,7 +23,7 @@ package v2alpha1 import ( v2alpha1 "k8s.io/api/batch/v2alpha1" runtime "k8s.io/apimachinery/pkg/runtime" - v1 "k8s.io/kubernetes/pkg/api/v1" + v1 "k8s.io/kubernetes/pkg/apis/core/v1" ) // RegisterDefaults adds defaulters functions to the given scheme. diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/batch/validation/BUILD index 5ea444ddf9e1..318426b359be 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/validation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/validation/BUILD @@ -9,14 +9,16 @@ load( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/pkg/apis/batch/validation", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/validation:go_default_library", "//pkg/apis/batch:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//vendor/github.com/robfig/cron:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", ], ) @@ -24,10 +26,11 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/batch/validation", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/batch:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/validation/validation.go index c736c10fbd46..a31bf6416e45 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/validation/validation.go @@ -22,10 +22,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/labels" + apimachineryvalidation "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" - apivalidation "k8s.io/kubernetes/pkg/api/validation" "k8s.io/kubernetes/pkg/apis/batch" + api "k8s.io/kubernetes/pkg/apis/core" + apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" ) // TODO: generalize for other controller objects that will follow the same pattern, such as ReplicaSet and DaemonSet, and @@ -165,6 +166,21 @@ func ValidateCronJob(scheduledJob *batch.CronJob) field.ErrorList { // CronJobs and rcs have the same name validation allErrs := apivalidation.ValidateObjectMeta(&scheduledJob.ObjectMeta, true, apivalidation.ValidateReplicationControllerName, field.NewPath("metadata")) allErrs = append(allErrs, ValidateCronJobSpec(&scheduledJob.Spec, field.NewPath("spec"))...) + if len(scheduledJob.ObjectMeta.Name) > apimachineryvalidation.DNS1035LabelMaxLength-11 { + // The cronjob controller appends a 11-character suffix to the cronjob (`-$TIMESTAMP`) when + // creating a job. The job name length limit is 63 characters. + // Therefore cronjob names must have length <= 63-11=52. If we don't validate this here, + // then job creation will fail later. + allErrs = append(allErrs, field.Invalid(field.NewPath("metadata").Child("name"), scheduledJob.ObjectMeta.Name, "must be no more than 52 characters")) + } + return allErrs +} + +func ValidateCronJobUpdate(job, oldJob *batch.CronJob) field.ErrorList { + allErrs := apivalidation.ValidateObjectMetaUpdate(&job.ObjectMeta, &oldJob.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateCronJobSpec(&job.Spec, field.NewPath("spec"))...) + // skip the 52-character name validation limit on update validation + // to allow old cronjobs with names > 52 chars to be updated/deleted return allErrs } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go index 483452c09e0f..4e68dbd6d6d6 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go @@ -22,69 +22,10 @@ package batch import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" - reflect "reflect" + core "k8s.io/kubernetes/pkg/apis/core" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJob).DeepCopyInto(out.(*CronJob)) - return nil - }, InType: reflect.TypeOf(&CronJob{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobList).DeepCopyInto(out.(*CronJobList)) - return nil - }, InType: reflect.TypeOf(&CronJobList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobSpec).DeepCopyInto(out.(*CronJobSpec)) - return nil - }, InType: reflect.TypeOf(&CronJobSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobStatus).DeepCopyInto(out.(*CronJobStatus)) - return nil - }, InType: reflect.TypeOf(&CronJobStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Job).DeepCopyInto(out.(*Job)) - return nil - }, InType: reflect.TypeOf(&Job{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobCondition).DeepCopyInto(out.(*JobCondition)) - return nil - }, InType: reflect.TypeOf(&JobCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobList).DeepCopyInto(out.(*JobList)) - return nil - }, InType: reflect.TypeOf(&JobList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobSpec).DeepCopyInto(out.(*JobSpec)) - return nil - }, InType: reflect.TypeOf(&JobSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobStatus).DeepCopyInto(out.(*JobStatus)) - return nil - }, InType: reflect.TypeOf(&JobStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobTemplate).DeepCopyInto(out.(*JobTemplate)) - return nil - }, InType: reflect.TypeOf(&JobTemplate{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobTemplateSpec).DeepCopyInto(out.(*JobTemplateSpec)) - return nil - }, InType: reflect.TypeOf(&JobTemplateSpec{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CronJob) DeepCopyInto(out *CronJob) { *out = *in @@ -206,7 +147,7 @@ func (in *CronJobStatus) DeepCopyInto(out *CronJobStatus) { *out = *in if in.Active != nil { in, out := &in.Active, &out.Active - *out = make([]api.ObjectReference, len(*in)) + *out = make([]core.ObjectReference, len(*in)) copy(*out, *in) } if in.LastScheduleTime != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/certificates/BUILD index 1c99c0cbfa8b..cebbfcdea67d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/BUILD @@ -14,9 +14,9 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/certificates", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go index 5bd58b490fbf..6d5b784c4921 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=certificates.k8s.io package certificates diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/BUILD index 276c112c66ca..e8962d64056d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/certificates/install", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/certificates:go_default_library", "//pkg/apis/certificates/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/install.go index 34da4b93f3b0..a4e9bb39db97 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/install.go @@ -23,13 +23,13 @@ import ( "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/certificates" "k8s.io/kubernetes/pkg/apis/certificates/v1beta1" ) func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) + Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) } // Install registers the API group and adds types to a scheme diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/register.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/register.go index 085737583fcc..a876251ca433 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/register.go @@ -42,7 +42,7 @@ func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &CertificateSigningRequest{}, diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/BUILD index 879a2bc71827..334d968b4180 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/BUILD @@ -15,6 +15,7 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/certificates/v1beta1", deps = [ "//pkg/apis/certificates:go_default_library", "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/doc.go index 2c6d8b8f59af..5c600b763010 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/certificates -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/certificates/v1beta1 +// +k8s:conversion-gen-external-types=k8s.io/api/certificates/v1beta1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/certificates/v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/certificates/validation/BUILD index efab0be82d67..788809206c0b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/validation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/validation/BUILD @@ -8,9 +8,10 @@ load( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/pkg/apis/certificates/validation", deps = [ - "//pkg/api/validation:go_default_library", "//pkg/apis/certificates:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/validation/validation.go index 4077e6b0cc5e..3b61074bc6be 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/validation/validation.go @@ -20,8 +20,8 @@ import ( "fmt" "k8s.io/apimachinery/pkg/util/validation/field" - apivalidation "k8s.io/kubernetes/pkg/api/validation" "k8s.io/kubernetes/pkg/apis/certificates" + apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" ) // validateCSR validates the signature and formatting of a base64-wrapped, diff --git a/vendor/k8s.io/kubernetes/pkg/apis/certificates/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/certificates/zz_generated.deepcopy.go index c1f76f53fae3..5704a4e6c62a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/certificates/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/certificates/zz_generated.deepcopy.go @@ -21,44 +21,9 @@ limitations under the License. package certificates import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequest).DeepCopyInto(out.(*CertificateSigningRequest)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequest{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequestCondition).DeepCopyInto(out.(*CertificateSigningRequestCondition)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequestCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequestList).DeepCopyInto(out.(*CertificateSigningRequestList)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequestList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequestSpec).DeepCopyInto(out.(*CertificateSigningRequestSpec)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequestSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequestStatus).DeepCopyInto(out.(*CertificateSigningRequestStatus)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequestStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CertificateSigningRequest) DeepCopyInto(out *CertificateSigningRequest) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/BUILD index 87287710ede5..19d17029cb8e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/BUILD @@ -15,10 +15,10 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/componentconfig", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", @@ -28,6 +28,7 @@ go_library( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/componentconfig", library = ":go_default_library", deps = ["//vendor/github.com/spf13/pflag:go_default_library"], ) @@ -46,7 +47,6 @@ filegroup( "//pkg/apis/componentconfig/fuzzer:all-srcs", "//pkg/apis/componentconfig/install:all-srcs", "//pkg/apis/componentconfig/v1alpha1:all-srcs", - "//pkg/apis/componentconfig/validation:all-srcs", ], tags = ["automanaged"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/doc.go index d044b16db391..47b4d14b5795 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package package componentconfig diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/helpers.go index 6f94f8516f36..a39ec138c81e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/helpers.go @@ -35,6 +35,10 @@ type IPVar struct { } func (v IPVar) Set(s string) error { + if len(s) == 0 { + v.Val = nil + return nil + } if net.ParseIP(s) == nil { return fmt.Errorf("%q is not a valid IP address", s) } @@ -57,22 +61,6 @@ func (v IPVar) Type() string { return "ip" } -func (m *ProxyMode) Set(s string) error { - *m = ProxyMode(s) - return nil -} - -func (m *ProxyMode) String() string { - if m != nil { - return string(*m) - } - return "" -} - -func (m *ProxyMode) Type() string { - return "ProxyMode" -} - type PortRangeVar struct { Val *string } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/BUILD index a6292c0d1677..fb58da06e405 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/componentconfig/install", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/componentconfig:go_default_library", "//pkg/apis/componentconfig/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/install.go index 53b95f6a5315..d5e48363b477 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/install.go @@ -22,13 +22,13 @@ import ( "k8s.io/apimachinery/pkg/apimachinery/announced" "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" ) func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) + Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) } // Install registers the API group and adds types to a scheme diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/register.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/register.go index 536757413522..7ba52b1f68ea 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/register.go @@ -45,7 +45,6 @@ func Resource(resource string) schema.GroupResource { func addKnownTypes(scheme *runtime.Scheme) error { // TODO this will get cleaned up with the scheme types are fixed scheme.AddKnownTypes(SchemeGroupVersion, - &KubeProxyConfiguration{}, &KubeSchedulerConfiguration{}, ) return nil diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go index 078fa11d006e..4a57eec48c20 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go @@ -30,188 +30,97 @@ type ClientConnectionConfiguration struct { AcceptContentTypes string // contentType is the content type used when sending data to the server from this client. ContentType string - // qps controls the number of queries per second allowed for this connection. + // cps controls the number of queries per second allowed for this connection. QPS float32 // burst allows extra queries to accumulate when a client is exceeding its rate. - Burst int + Burst int32 } -// KubeProxyIPTablesConfiguration contains iptables-related configuration -// details for the Kubernetes proxy server. -type KubeProxyIPTablesConfiguration struct { - // masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using - // the pure iptables proxy mode. Values must be within the range [0, 31]. - MasqueradeBit *int32 - // masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. - MasqueradeAll bool - // syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', - // '2h22m'). Must be greater than 0. - SyncPeriod metav1.Duration - // minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m', - // '2h22m'). - MinSyncPeriod metav1.Duration -} +// SchedulerPolicyConfigMapKey defines the key of the element in the +// scheduler's policy ConfigMap that contains scheduler's policy config. +const SchedulerPolicyConfigMapKey string = "policy.cfg" -// KubeProxyIPVSConfiguration contains ipvs-related configuration -// details for the Kubernetes proxy server. -type KubeProxyIPVSConfiguration struct { - // syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m', - // '2h22m'). Must be greater than 0. - SyncPeriod metav1.Duration - // minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m', - // '2h22m'). - MinSyncPeriod metav1.Duration - // ipvs scheduler - Scheduler string +// SchedulerPolicySource configures a means to obtain a scheduler Policy. One +// source field must be specified, and source fields are mutually exclusive. +type SchedulerPolicySource struct { + // File is a file policy source. + File *SchedulerPolicyFileSource + // ConfigMap is a config map policy source. + ConfigMap *SchedulerPolicyConfigMapSource } -// KubeProxyConntrackConfiguration contains conntrack settings for -// the Kubernetes proxy server. -type KubeProxyConntrackConfiguration struct { - // max is the maximum number of NAT connections to track (0 to - // leave as-is). This takes precedence over conntrackMaxPerCore and conntrackMin. - Max int32 - // maxPerCore is the maximum number of NAT connections to track - // per CPU core (0 to leave the limit as-is and ignore conntrackMin). - MaxPerCore int32 - // min is the minimum value of connect-tracking records to allocate, - // regardless of conntrackMaxPerCore (set conntrackMaxPerCore=0 to leave the limit as-is). - Min int32 - // tcpEstablishedTimeout is how long an idle TCP connection will be kept open - // (e.g. '2s'). Must be greater than 0. - TCPEstablishedTimeout metav1.Duration - // tcpCloseWaitTimeout is how long an idle conntrack entry - // in CLOSE_WAIT state will remain in the conntrack - // table. (e.g. '60s'). Must be greater than 0 to set. - TCPCloseWaitTimeout metav1.Duration +// SchedulerPolicyFileSource is a policy serialized to disk and accessed via +// path. +type SchedulerPolicyFileSource struct { + // Path is the location of a serialized policy. + Path string } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KubeProxyConfiguration contains everything necessary to configure the -// Kubernetes proxy server. -type KubeProxyConfiguration struct { - metav1.TypeMeta - - // featureGates is a comma-separated list of key=value pairs that control - // which alpha/beta features are enabled. - // - // TODO this really should be a map but that requires refactoring all - // components to use config files because local-up-cluster.sh only supports - // the --feature-gates flag right now, which is comma-separated key=value - // pairs. - FeatureGates string - - // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 - // for all interfaces) - BindAddress string - // healthzBindAddress is the IP address and port for the health check server to serve on, - // defaulting to 0.0.0.0:10256 - HealthzBindAddress string - // metricsBindAddress is the IP address and port for the metrics server to serve on, - // defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces) - MetricsBindAddress string - // enableProfiling enables profiling via web interface on /debug/pprof handler. - // Profiling handlers will be handled by metrics server. - EnableProfiling bool - // clusterCIDR is the CIDR range of the pods in the cluster. It is used to - // bridge traffic coming from outside of the cluster. If not provided, - // no off-cluster bridging will be performed. - ClusterCIDR string - // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname. - HostnameOverride string - // clientConnection specifies the kubeconfig file and client connection settings for the proxy - // server to use when communicating with the apiserver. - ClientConnection ClientConnectionConfiguration - // iptables contains iptables-related configuration options. - IPTables KubeProxyIPTablesConfiguration - // ipvs contains ipvs-related configuration options. - IPVS KubeProxyIPVSConfiguration - // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within - // the range [-1000, 1000] - OOMScoreAdj *int32 - // mode specifies which proxy mode to use. - Mode ProxyMode - // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed - // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen. - PortRange string - // resourceContainer is the absolute name of the resource-only container to create and run - // the Kube-proxy in (Default: /kube-proxy). - ResourceContainer string - // udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). - // Must be greater than 0. Only applicable for proxyMode=userspace. - UDPIdleTimeout metav1.Duration - // conntrack contains conntrack-related configuration options. - Conntrack KubeProxyConntrackConfiguration - // configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater - // than 0. - ConfigSyncPeriod metav1.Duration +// SchedulerPolicyConfigMapSource is a policy serialized into a config map value +// under the SchedulerPolicyConfigMapKey key. +type SchedulerPolicyConfigMapSource struct { + // Namespace is the namespace of the policy config map. + Namespace string + // Name is the name of hte policy config map. + Name string } -// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables' -// (newer, faster). If blank, use the best-available proxy (currently iptables, but may -// change in future versions). If the iptables proxy is selected, regardless of how, but -// the system's kernel or iptables versions are insufficient, this always falls back to the -// userspace proxy. -type ProxyMode string - -const ( - ProxyModeUserspace ProxyMode = "userspace" - ProxyModeIPTables ProxyMode = "iptables" -) +// SchedulerAlgorithmSource is the source of a scheduler algorithm. One source +// field must be specified, and source fields are mutually exclusive. +type SchedulerAlgorithmSource struct { + // Policy is a policy based algorithm source. + Policy *SchedulerPolicySource + // Provider is the name of a scheduling algorithm provider to use. + Provider *string +} // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type KubeSchedulerConfiguration struct { metav1.TypeMeta - // port is the port that the scheduler's http service runs on. - Port int32 - // address is the IP address to serve on. - Address string - // algorithmProvider is the scheduling algorithm provider to use. - AlgorithmProvider string - // policyConfigFile is the filepath to the scheduler policy configuration. - PolicyConfigFile string - // enableProfiling enables profiling via web interface. - EnableProfiling bool - // enableContentionProfiling enables lock contention profiling, if enableProfiling is true. - EnableContentionProfiling bool - // contentType is contentType of requests sent to apiserver. - ContentType string - // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. - KubeAPIQPS float32 - // kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver. - KubeAPIBurst int32 // schedulerName is name of the scheduler, used to select which pods // will be processed by this scheduler, based on pod's "spec.SchedulerName". SchedulerName string + // AlgorithmSource specifies the scheduler algorithm source. + AlgorithmSource SchedulerAlgorithmSource // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule // corresponding to every RequiredDuringScheduling affinity rule. // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. - HardPodAffinitySymmetricWeight int + HardPodAffinitySymmetricWeight int32 + + // LeaderElection defines the configuration of leader election client. + LeaderElection KubeSchedulerLeaderElectionConfiguration + + // ClientConnection specifies the kubeconfig file and client connection + // settings for the proxy server to use when communicating with the apiserver. + ClientConnection ClientConnectionConfiguration + // HealthzBindAddress is the IP address and port for the health check server to serve on, + // defaulting to 0.0.0.0:10251 + HealthzBindAddress string + // MetricsBindAddress is the IP address and port for the metrics server to + // serve on, defaulting to 0.0.0.0:10251. + MetricsBindAddress string + // EnableProfiling enables profiling via web interface on /debug/pprof + // handler. Profiling handlers will be handled by metrics server. + EnableProfiling bool + // EnableContentionProfiling enables lock contention profiling, if + // EnableProfiling is true. + EnableContentionProfiling bool + // Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity. // DEPRECATED: This is no longer used. FailureDomains string - // leaderElection defines the configuration of leader election client. - LeaderElection LeaderElectionConfiguration +} + +// KubeSchedulerLeaderElectionConfiguration expands LeaderElectionConfiguration +// to include scheduler specific configuration. +type KubeSchedulerLeaderElectionConfiguration struct { + LeaderElectionConfiguration // LockObjectNamespace defines the namespace of the lock object LockObjectNamespace string // LockObjectName defines the lock object name LockObjectName string - // PolicyConfigMapName is the name of the ConfigMap object that specifies - // the scheduler's policy config. If UseLegacyPolicyConfig is true, scheduler - // uses PolicyConfigFile. If UseLegacyPolicyConfig is false and - // PolicyConfigMapName is not empty, the ConfigMap object with this name must - // exist in PolicyConfigMapNamespace before scheduler initialization. - PolicyConfigMapName string - // PolicyConfigMapNamespace is the namespace where the above policy config map - // is located. If none is provided default system namespace ("kube-system") - // will be used. - PolicyConfigMapNamespace string - // UseLegacyPolicyConfig tells the scheduler to ignore Policy ConfigMap and - // to use PolicyConfigFile if available. - UseLegacyPolicyConfig bool } // LeaderElectionConfiguration defines the configuration of leader election @@ -346,6 +255,9 @@ type KubeControllerManagerConfiguration struct { HorizontalPodAutoscalerUpscaleForbiddenWindow metav1.Duration // horizontalPodAutoscalerDownscaleForbiddenWindow is a period after which next downscale allowed. HorizontalPodAutoscalerDownscaleForbiddenWindow metav1.Duration + // horizontalPodAutoscalerTolerance is the tolerance for when + // resource usage suggests upscaling/downscaling + HorizontalPodAutoscalerTolerance float64 // deploymentControllerSyncPeriod is the period for syncing the deployments. DeploymentControllerSyncPeriod metav1.Duration // podEvictionTimeout is the grace period for deleting pods on failed nodes. diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/BUILD index dc8a497ed350..9726d08a84a6 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/BUILD @@ -17,11 +17,11 @@ go_library( "zz_generated.deepcopy.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/componentconfig:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/kubelet/apis:go_default_library", - "//pkg/kubelet/qos:go_default_library", "//pkg/master/ports:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", @@ -46,6 +46,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["defaults_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1", library = ":go_default_library", deps = ["//pkg/apis/componentconfig:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go index 921ab959e19c..0528b6cb6d5f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go @@ -17,157 +17,80 @@ limitations under the License. package v1alpha1 import ( - "fmt" - "strings" + "net" + "strconv" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kruntime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" - "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/master/ports" ) -const ( - DefaultRootDir = "/var/lib/kubelet" - - AutoDetectCloudProvider = "auto-detect" - - defaultIPTablesMasqueradeBit = 14 - defaultIPTablesDropBit = 15 -) - -var ( - zeroDuration = metav1.Duration{} - // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md) doc for more information. - defaultNodeAllocatableEnforcement = []string{"pods"} -) - func addDefaultingFuncs(scheme *kruntime.Scheme) error { return RegisterDefaults(scheme) } -func SetDefaults_KubeProxyConfiguration(obj *KubeProxyConfiguration) { - if len(obj.BindAddress) == 0 { - obj.BindAddress = "0.0.0.0" - } - if obj.HealthzBindAddress == "" { - obj.HealthzBindAddress = fmt.Sprintf("0.0.0.0:%v", ports.ProxyHealthzPort) - } else if !strings.Contains(obj.HealthzBindAddress, ":") { - obj.HealthzBindAddress += fmt.Sprintf(":%v", ports.ProxyHealthzPort) - } - if obj.MetricsBindAddress == "" { - obj.MetricsBindAddress = fmt.Sprintf("127.0.0.1:%v", ports.ProxyStatusPort) - } else if !strings.Contains(obj.MetricsBindAddress, ":") { - obj.MetricsBindAddress += fmt.Sprintf(":%v", ports.ProxyStatusPort) - } - if obj.OOMScoreAdj == nil { - temp := int32(qos.KubeProxyOOMScoreAdj) - obj.OOMScoreAdj = &temp +func SetDefaults_KubeSchedulerConfiguration(obj *KubeSchedulerConfiguration) { + if len(obj.SchedulerName) == 0 { + obj.SchedulerName = api.DefaultSchedulerName } - if obj.ResourceContainer == "" { - obj.ResourceContainer = "/kube-proxy" + + if obj.HardPodAffinitySymmetricWeight == 0 { + obj.HardPodAffinitySymmetricWeight = api.DefaultHardPodAffinitySymmetricWeight } - if obj.IPTables.SyncPeriod.Duration == 0 { - obj.IPTables.SyncPeriod = metav1.Duration{Duration: 30 * time.Second} + + if obj.AlgorithmSource.Policy == nil && + (obj.AlgorithmSource.Provider == nil || len(*obj.AlgorithmSource.Provider) == 0) { + val := SchedulerDefaultProviderName + obj.AlgorithmSource.Provider = &val } - zero := metav1.Duration{} - if obj.UDPIdleTimeout == zero { - obj.UDPIdleTimeout = metav1.Duration{Duration: 250 * time.Millisecond} - } - // If ConntrackMax is set, respect it. - if obj.Conntrack.Max == 0 { - // If ConntrackMax is *not* set, use per-core scaling. - if obj.Conntrack.MaxPerCore == 0 { - obj.Conntrack.MaxPerCore = 32 * 1024 + + if policy := obj.AlgorithmSource.Policy; policy != nil { + if policy.ConfigMap != nil && len(policy.ConfigMap.Namespace) == 0 { + obj.AlgorithmSource.Policy.ConfigMap.Namespace = api.NamespaceSystem } - if obj.Conntrack.Min == 0 { - obj.Conntrack.Min = 128 * 1024 + } + + if host, port, err := net.SplitHostPort(obj.HealthzBindAddress); err == nil { + if len(host) == 0 { + host = "0.0.0.0" } + obj.HealthzBindAddress = net.JoinHostPort(host, port) + } else { + obj.HealthzBindAddress = net.JoinHostPort("0.0.0.0", strconv.Itoa(ports.SchedulerPort)) } - if obj.IPTables.MasqueradeBit == nil { - temp := int32(14) - obj.IPTables.MasqueradeBit = &temp - } - if obj.Conntrack.TCPEstablishedTimeout == zero { - obj.Conntrack.TCPEstablishedTimeout = metav1.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default) - } - if obj.Conntrack.TCPCloseWaitTimeout == zero { - // See https://github.com/kubernetes/kubernetes/issues/32551. - // - // CLOSE_WAIT conntrack state occurs when the the Linux kernel - // sees a FIN from the remote server. Note: this is a half-close - // condition that persists as long as the local side keeps the - // socket open. The condition is rare as it is typical in most - // protocols for both sides to issue a close; this typically - // occurs when the local socket is lazily garbage collected. - // - // If the CLOSE_WAIT conntrack entry expires, then FINs from the - // local socket will not be properly SNAT'd and will not reach the - // remote server (if the connection was subject to SNAT). If the - // remote timeouts for FIN_WAIT* states exceed the CLOSE_WAIT - // timeout, then there will be an inconsistency in the state of - // the connection and a new connection reusing the SNAT (src, - // port) pair may be rejected by the remote side with RST. This - // can cause new calls to connect(2) to return with ECONNREFUSED. - // - // We set CLOSE_WAIT to one hour by default to better match - // typical server timeouts. - obj.Conntrack.TCPCloseWaitTimeout = metav1.Duration{Duration: 1 * time.Hour} - } - if obj.ConfigSyncPeriod.Duration == 0 { - obj.ConfigSyncPeriod.Duration = 15 * time.Minute + + if host, port, err := net.SplitHostPort(obj.MetricsBindAddress); err == nil { + if len(host) == 0 { + host = "0.0.0.0" + } + obj.MetricsBindAddress = net.JoinHostPort(host, port) + } else { + obj.MetricsBindAddress = net.JoinHostPort("0.0.0.0", strconv.Itoa(ports.SchedulerPort)) } if len(obj.ClientConnection.ContentType) == 0 { obj.ClientConnection.ContentType = "application/vnd.kubernetes.protobuf" } if obj.ClientConnection.QPS == 0.0 { - obj.ClientConnection.QPS = 5.0 + obj.ClientConnection.QPS = 50.0 } if obj.ClientConnection.Burst == 0 { - obj.ClientConnection.Burst = 10 + obj.ClientConnection.Burst = 100 } -} -func SetDefaults_KubeSchedulerConfiguration(obj *KubeSchedulerConfiguration) { - if obj.Port == 0 { - obj.Port = ports.SchedulerPort - } - if obj.Address == "" { - obj.Address = "0.0.0.0" + if len(obj.LeaderElection.LockObjectNamespace) == 0 { + obj.LeaderElection.LockObjectNamespace = SchedulerDefaultLockObjectNamespace } - if obj.AlgorithmProvider == "" { - obj.AlgorithmProvider = "DefaultProvider" - } - if obj.ContentType == "" { - obj.ContentType = "application/vnd.kubernetes.protobuf" - } - if obj.KubeAPIQPS == 0 { - obj.KubeAPIQPS = 50.0 - } - if obj.KubeAPIBurst == 0 { - obj.KubeAPIBurst = 100 - } - if obj.SchedulerName == "" { - obj.SchedulerName = api.DefaultSchedulerName + if len(obj.LeaderElection.LockObjectName) == 0 { + obj.LeaderElection.LockObjectName = SchedulerDefaultLockObjectName } - if obj.HardPodAffinitySymmetricWeight == 0 { - obj.HardPodAffinitySymmetricWeight = api.DefaultHardPodAffinitySymmetricWeight - } - if obj.FailureDomains == "" { + + if len(obj.FailureDomains) == 0 { obj.FailureDomains = kubeletapis.DefaultFailureDomains } - if obj.LockObjectNamespace == "" { - obj.LockObjectNamespace = SchedulerDefaultLockObjectNamespace - } - if obj.LockObjectName == "" { - obj.LockObjectName = SchedulerDefaultLockObjectName - } - if obj.PolicyConfigMapNamespace == "" { - obj.PolicyConfigMapNamespace = api.NamespaceSystem - } } func SetDefaults_LeaderElectionConfiguration(obj *LeaderElectionConfiguration) { @@ -186,7 +109,3 @@ func SetDefaults_LeaderElectionConfiguration(obj *LeaderElectionConfiguration) { obj.ResourceLock = "endpoints" } } - -func boolVar(b bool) *bool { - return &b -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go index 0ae15b67c860..f02229f140e2 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/componentconfig // +k8s:openapi-gen=true // +k8s:defaulter-gen=TypeMeta diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/register.go index a75096f20fb4..693ff3a35111 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/register.go @@ -44,7 +44,6 @@ func init() { func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &KubeProxyConfiguration{}, &KubeSchedulerConfiguration{}, ) return nil diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go index 70654ca8c9e4..55e99fb42909 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go @@ -33,204 +33,81 @@ type ClientConnectionConfiguration struct { // cps controls the number of queries per second allowed for this connection. QPS float32 `json:"qps"` // burst allows extra queries to accumulate when a client is exceeding its rate. - Burst int `json:"burst"` + Burst int32 `json:"burst"` } -// KubeProxyIPTablesConfiguration contains iptables-related configuration -// details for the Kubernetes proxy server. -type KubeProxyIPTablesConfiguration struct { - // masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using - // the pure iptables proxy mode. Values must be within the range [0, 31]. - MasqueradeBit *int32 `json:"masqueradeBit"` - // masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. - MasqueradeAll bool `json:"masqueradeAll"` - // syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', - // '2h22m'). Must be greater than 0. - SyncPeriod metav1.Duration `json:"syncPeriod"` - // minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m', - // '2h22m'). - MinSyncPeriod metav1.Duration `json:"minSyncPeriod"` +// SchedulerPolicySource configures a means to obtain a scheduler Policy. One +// source field must be specified, and source fields are mutually exclusive. +type SchedulerPolicySource struct { + // File is a file policy source. + File *SchedulerPolicyFileSource `json:"file,omitempty"` + // ConfigMap is a config map policy source. + ConfigMap *SchedulerPolicyConfigMapSource `json:"configMap,omitempty"` } -// KubeProxyIPVSConfiguration contains ipvs-related configuration -// details for the Kubernetes proxy server. -type KubeProxyIPVSConfiguration struct { - // syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m', - // '2h22m'). Must be greater than 0. - SyncPeriod metav1.Duration `json:"syncPeriod"` - // minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m', - // '2h22m'). - MinSyncPeriod metav1.Duration `json:"minSyncPeriod"` - // ipvs scheduler - Scheduler string `json:"scheduler"` +// SchedulerPolicyFileSource is a policy serialized to disk and accessed via +// path. +type SchedulerPolicyFileSource struct { + // Path is the location of a serialized policy. + Path string `json:"path"` } -// KubeProxyConntrackConfiguration contains conntrack settings for -// the Kubernetes proxy server. -type KubeProxyConntrackConfiguration struct { - // max is the maximum number of NAT connections to track (0 to - // leave as-is). This takes precedence over conntrackMaxPerCore and conntrackMin. - Max int32 `json:"max"` - // maxPerCore is the maximum number of NAT connections to track - // per CPU core (0 to leave the limit as-is and ignore conntrackMin). - MaxPerCore int32 `json:"maxPerCore"` - // min is the minimum value of connect-tracking records to allocate, - // regardless of conntrackMaxPerCore (set conntrackMaxPerCore=0 to leave the limit as-is). - Min int32 `json:"min"` - // tcpEstablishedTimeout is how long an idle TCP connection will be kept open - // (e.g. '2s'). Must be greater than 0. - TCPEstablishedTimeout metav1.Duration `json:"tcpEstablishedTimeout"` - // tcpCloseWaitTimeout is how long an idle conntrack entry - // in CLOSE_WAIT state will remain in the conntrack - // table. (e.g. '60s'). Must be greater than 0 to set. - TCPCloseWaitTimeout metav1.Duration `json:"tcpCloseWaitTimeout"` +// SchedulerPolicyConfigMapSource is a policy serialized into a config map value +// under the SchedulerPolicyConfigMapKey key. +type SchedulerPolicyConfigMapSource struct { + // Namespace is the namespace of the policy config map. + Namespace string `json:"namespace"` + // Name is the name of hte policy config map. + Name string `json:"name"` } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KubeProxyConfiguration contains everything necessary to configure the -// Kubernetes proxy server. -type KubeProxyConfiguration struct { - metav1.TypeMeta `json:",inline"` - - // featureGates is a comma-separated list of key=value pairs that control - // which alpha/beta features are enabled. - // - // TODO this really should be a map but that requires refactoring all - // components to use config files because local-up-cluster.sh only supports - // the --feature-gates flag right now, which is comma-separated key=value - // pairs. - FeatureGates string `json:"featureGates"` - - // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 - // for all interfaces) - BindAddress string `json:"bindAddress"` - // healthzBindAddress is the IP address and port for the health check server to serve on, - // defaulting to 0.0.0.0:10256 - HealthzBindAddress string `json:"healthzBindAddress"` - // metricsBindAddress is the IP address and port for the metrics server to serve on, - // defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces) - MetricsBindAddress string `json:"metricsBindAddress"` - // enableProfiling enables profiling via web interface on /debug/pprof handler. - // Profiling handlers will be handled by metrics server. - EnableProfiling bool `json:"enableProfiling"` - // clusterCIDR is the CIDR range of the pods in the cluster. It is used to - // bridge traffic coming from outside of the cluster. If not provided, - // no off-cluster bridging will be performed. - ClusterCIDR string `json:"clusterCIDR"` - // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname. - HostnameOverride string `json:"hostnameOverride"` - // clientConnection specifies the kubeconfig file and client connection settings for the proxy - // server to use when communicating with the apiserver. - ClientConnection ClientConnectionConfiguration `json:"clientConnection"` - // iptables contains iptables-related configuration options. - IPTables KubeProxyIPTablesConfiguration `json:"iptables"` - // ipvs contains ipvs-related configuration options. - IPVS KubeProxyIPVSConfiguration `json:"ipvs"` - // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within - // the range [-1000, 1000] - OOMScoreAdj *int32 `json:"oomScoreAdj"` - // mode specifies which proxy mode to use. - Mode ProxyMode `json:"mode"` - // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed - // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen. - PortRange string `json:"portRange"` - // resourceContainer is the bsolute name of the resource-only container to create and run - // the Kube-proxy in (Default: /kube-proxy). - ResourceContainer string `json:"resourceContainer"` - // udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). - // Must be greater than 0. Only applicable for proxyMode=userspace. - UDPIdleTimeout metav1.Duration `json:"udpTimeoutMilliseconds"` - // conntrack contains conntrack-related configuration options. - Conntrack KubeProxyConntrackConfiguration `json:"conntrack"` - // configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater - // than 0. - ConfigSyncPeriod metav1.Duration `json:"configSyncPeriod"` +// SchedulerAlgorithmSource is the source of a scheduler algorithm. One source +// field must be specified, and source fields are mutually exclusive. +type SchedulerAlgorithmSource struct { + // Policy is a policy based algorithm source. + Policy *SchedulerPolicySource `json:"policy,omitempty"` + // Provider is the name of a scheduling algorithm provider to use. + Provider *string `json:"provider,omitempty"` } -// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables' -// (newer, faster). If blank, use the best-available proxy (currently iptables, but may -// change in future versions). If the iptables proxy is selected, regardless of how, but -// the system's kernel or iptables versions are insufficient, this always falls back to the -// userspace proxy. -type ProxyMode string - -const ( - ProxyModeUserspace ProxyMode = "userspace" - ProxyModeIPTables ProxyMode = "iptables" -) - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type KubeSchedulerConfiguration struct { metav1.TypeMeta `json:",inline"` - // port is the port that the scheduler's http service runs on. - Port int `json:"port"` - // address is the IP address to serve on. - Address string `json:"address"` - // algorithmProvider is the scheduling algorithm provider to use. - AlgorithmProvider string `json:"algorithmProvider"` - // policyConfigFile is the filepath to the scheduler policy configuration. - PolicyConfigFile string `json:"policyConfigFile"` - // enableProfiling enables profiling via web interface. - EnableProfiling *bool `json:"enableProfiling"` - // enableContentionProfiling enables lock contention profiling, if enableProfiling is true. - EnableContentionProfiling bool `json:"enableContentionProfiling"` - // contentType is contentType of requests sent to apiserver. - ContentType string `json:"contentType"` - // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. - KubeAPIQPS float32 `json:"kubeAPIQPS"` - // kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver. - KubeAPIBurst int `json:"kubeAPIBurst"` - // schedulerName is name of the scheduler, used to select which pods + // SchedulerName is name of the scheduler, used to select which pods // will be processed by this scheduler, based on pod's "spec.SchedulerName". SchedulerName string `json:"schedulerName"` + // AlgorithmSource specifies the scheduler algorithm source. + AlgorithmSource SchedulerAlgorithmSource `json:"algorithmSource"` // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule // corresponding to every RequiredDuringScheduling affinity rule. // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. - HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"` + HardPodAffinitySymmetricWeight int32 `json:"hardPodAffinitySymmetricWeight"` + + // LeaderElection defines the configuration of leader election client. + LeaderElection KubeSchedulerLeaderElectionConfiguration `json:"leaderElection"` + + // ClientConnection specifies the kubeconfig file and client connection + // settings for the proxy server to use when communicating with the apiserver. + ClientConnection ClientConnectionConfiguration `json:"clientConnection"` + // HealthzBindAddress is the IP address and port for the health check server to serve on, + // defaulting to 0.0.0.0:10251 + HealthzBindAddress string `json:"healthzBindAddress"` + // MetricsBindAddress is the IP address and port for the metrics server to + // serve on, defaulting to 0.0.0.0:10251. + MetricsBindAddress string `json:"metricsBindAddress"` + // EnableProfiling enables profiling via web interface on /debug/pprof + // handler. Profiling handlers will be handled by metrics server. + EnableProfiling bool `json:"enableProfiling"` + // EnableContentionProfiling enables lock contention profiling, if + // EnableProfiling is true. + EnableContentionProfiling bool `json:"enableContentionProfiling"` + // Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity. FailureDomains string `json:"failureDomains"` - // leaderElection defines the configuration of leader election client. - LeaderElection LeaderElectionConfiguration `json:"leaderElection"` - // LockObjectNamespace defines the namespace of the lock object - LockObjectNamespace string `json:"lockObjectNamespace"` - // LockObjectName defines the lock object name - LockObjectName string `json:"lockObjectName"` - // PolicyConfigMapName is the name of the ConfigMap object that specifies - // the scheduler's policy config. If UseLegacyPolicyConfig is true, scheduler - // uses PolicyConfigFile. If UseLegacyPolicyConfig is false and - // PolicyConfigMapName is not empty, the ConfigMap object with this name must - // exist in PolicyConfigMapNamespace before scheduler initialization. - PolicyConfigMapName string `json:"policyConfigMapName"` - // PolicyConfigMapNamespace is the namespace where the above policy config map - // is located. If none is provided default system namespace ("kube-system") - // will be used. - PolicyConfigMapNamespace string `json:"policyConfigMapNamespace"` - // UseLegacyPolicyConfig tells the scheduler to ignore Policy ConfigMap and - // to use PolicyConfigFile if available. - UseLegacyPolicyConfig bool `json:"useLegacyPolicyConfig"` } -// HairpinMode denotes how the kubelet should configure networking to handle -// hairpin packets. -type HairpinMode string - -// Enum settings for different ways to handle hairpin packets. -const ( - // Set the hairpin flag on the veth of containers in the respective - // container runtime. - HairpinVeth = "hairpin-veth" - // Make the container bridge promiscuous. This will force it to accept - // hairpin packets, even if the flag isn't set on ports of the bridge. - PromiscuousBridge = "promiscuous-bridge" - // Neither of the above. If the kubelet is started in this hairpin mode - // and kube-proxy is running in iptables mode, hairpin packets will be - // dropped by the container bridge. - HairpinNone = "none" -) - // LeaderElectionConfiguration defines the configuration of leader election // clients for components that can run with leader election enabled. type LeaderElectionConfiguration struct { @@ -259,10 +136,22 @@ type LeaderElectionConfiguration struct { ResourceLock string `json:"resourceLock"` } +// KubeSchedulerLeaderElectionConfiguration expands LeaderElectionConfiguration +// to include scheduler specific configuration. +type KubeSchedulerLeaderElectionConfiguration struct { + LeaderElectionConfiguration `json:",inline"` + // LockObjectNamespace defines the namespace of the lock object + LockObjectNamespace string `json:"lockObjectNamespace"` + // LockObjectName defines the lock object name + LockObjectName string `json:"lockObjectName"` +} + const ( // "kube-system" is the default scheduler lock object namespace SchedulerDefaultLockObjectNamespace string = "kube-system" // "kube-scheduler" is the default scheduler lock object name SchedulerDefaultLockObjectName = "kube-scheduler" + + SchedulerDefaultProviderName = "DefaultProvider" ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go index 70290c39abd0..296fa21d042e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go @@ -38,18 +38,20 @@ func RegisterConversions(scheme *runtime.Scheme) error { return scheme.AddGeneratedConversionFuncs( Convert_v1alpha1_ClientConnectionConfiguration_To_componentconfig_ClientConnectionConfiguration, Convert_componentconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration, - Convert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration, - Convert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration, - Convert_v1alpha1_KubeProxyConntrackConfiguration_To_componentconfig_KubeProxyConntrackConfiguration, - Convert_componentconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration, - Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_componentconfig_KubeProxyIPTablesConfiguration, - Convert_componentconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration, - Convert_v1alpha1_KubeProxyIPVSConfiguration_To_componentconfig_KubeProxyIPVSConfiguration, - Convert_componentconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration, Convert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration, Convert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration, + Convert_v1alpha1_KubeSchedulerLeaderElectionConfiguration_To_componentconfig_KubeSchedulerLeaderElectionConfiguration, + Convert_componentconfig_KubeSchedulerLeaderElectionConfiguration_To_v1alpha1_KubeSchedulerLeaderElectionConfiguration, Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration, Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration, + Convert_v1alpha1_SchedulerAlgorithmSource_To_componentconfig_SchedulerAlgorithmSource, + Convert_componentconfig_SchedulerAlgorithmSource_To_v1alpha1_SchedulerAlgorithmSource, + Convert_v1alpha1_SchedulerPolicyConfigMapSource_To_componentconfig_SchedulerPolicyConfigMapSource, + Convert_componentconfig_SchedulerPolicyConfigMapSource_To_v1alpha1_SchedulerPolicyConfigMapSource, + Convert_v1alpha1_SchedulerPolicyFileSource_To_componentconfig_SchedulerPolicyFileSource, + Convert_componentconfig_SchedulerPolicyFileSource_To_v1alpha1_SchedulerPolicyFileSource, + Convert_v1alpha1_SchedulerPolicySource_To_componentconfig_SchedulerPolicySource, + Convert_componentconfig_SchedulerPolicySource_To_v1alpha1_SchedulerPolicySource, ) } @@ -81,242 +83,198 @@ func Convert_componentconfig_ClientConnectionConfiguration_To_v1alpha1_ClientCon return autoConvert_componentconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(in, out, s) } -func autoConvert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *componentconfig.KubeProxyConfiguration, s conversion.Scope) error { - out.FeatureGates = in.FeatureGates - out.BindAddress = in.BindAddress - out.HealthzBindAddress = in.HealthzBindAddress - out.MetricsBindAddress = in.MetricsBindAddress - out.EnableProfiling = in.EnableProfiling - out.ClusterCIDR = in.ClusterCIDR - out.HostnameOverride = in.HostnameOverride - if err := Convert_v1alpha1_ClientConnectionConfiguration_To_componentconfig_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { - return err - } - if err := Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_componentconfig_KubeProxyIPTablesConfiguration(&in.IPTables, &out.IPTables, s); err != nil { +func autoConvert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in *KubeSchedulerConfiguration, out *componentconfig.KubeSchedulerConfiguration, s conversion.Scope) error { + out.SchedulerName = in.SchedulerName + if err := Convert_v1alpha1_SchedulerAlgorithmSource_To_componentconfig_SchedulerAlgorithmSource(&in.AlgorithmSource, &out.AlgorithmSource, s); err != nil { return err } - if err := Convert_v1alpha1_KubeProxyIPVSConfiguration_To_componentconfig_KubeProxyIPVSConfiguration(&in.IPVS, &out.IPVS, s); err != nil { + out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight + if err := Convert_v1alpha1_KubeSchedulerLeaderElectionConfiguration_To_componentconfig_KubeSchedulerLeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { return err } - out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj)) - out.Mode = componentconfig.ProxyMode(in.Mode) - out.PortRange = in.PortRange - out.ResourceContainer = in.ResourceContainer - out.UDPIdleTimeout = in.UDPIdleTimeout - if err := Convert_v1alpha1_KubeProxyConntrackConfiguration_To_componentconfig_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil { + if err := Convert_v1alpha1_ClientConnectionConfiguration_To_componentconfig_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { return err } - out.ConfigSyncPeriod = in.ConfigSyncPeriod + out.HealthzBindAddress = in.HealthzBindAddress + out.MetricsBindAddress = in.MetricsBindAddress + out.EnableProfiling = in.EnableProfiling + out.EnableContentionProfiling = in.EnableContentionProfiling + out.FailureDomains = in.FailureDomains return nil } -// Convert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *componentconfig.KubeProxyConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in, out, s) +// Convert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in *KubeSchedulerConfiguration, out *componentconfig.KubeSchedulerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in, out, s) } -func autoConvert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *componentconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error { - out.FeatureGates = in.FeatureGates - out.BindAddress = in.BindAddress - out.HealthzBindAddress = in.HealthzBindAddress - out.MetricsBindAddress = in.MetricsBindAddress - out.EnableProfiling = in.EnableProfiling - out.ClusterCIDR = in.ClusterCIDR - out.HostnameOverride = in.HostnameOverride - if err := Convert_componentconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { +func autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *componentconfig.KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, s conversion.Scope) error { + out.SchedulerName = in.SchedulerName + if err := Convert_componentconfig_SchedulerAlgorithmSource_To_v1alpha1_SchedulerAlgorithmSource(&in.AlgorithmSource, &out.AlgorithmSource, s); err != nil { return err } - if err := Convert_componentconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(&in.IPTables, &out.IPTables, s); err != nil { + out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight + if err := Convert_componentconfig_KubeSchedulerLeaderElectionConfiguration_To_v1alpha1_KubeSchedulerLeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { + return err + } + if err := Convert_componentconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { return err } - if err := Convert_componentconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(&in.IPVS, &out.IPVS, s); err != nil { + out.HealthzBindAddress = in.HealthzBindAddress + out.MetricsBindAddress = in.MetricsBindAddress + out.EnableProfiling = in.EnableProfiling + out.EnableContentionProfiling = in.EnableContentionProfiling + out.FailureDomains = in.FailureDomains + return nil +} + +// Convert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration is an autogenerated conversion function. +func Convert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *componentconfig.KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, s conversion.Scope) error { + return autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_KubeSchedulerLeaderElectionConfiguration_To_componentconfig_KubeSchedulerLeaderElectionConfiguration(in *KubeSchedulerLeaderElectionConfiguration, out *componentconfig.KubeSchedulerLeaderElectionConfiguration, s conversion.Scope) error { + if err := Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(&in.LeaderElectionConfiguration, &out.LeaderElectionConfiguration, s); err != nil { return err } - out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj)) - out.Mode = ProxyMode(in.Mode) - out.PortRange = in.PortRange - out.ResourceContainer = in.ResourceContainer - out.UDPIdleTimeout = in.UDPIdleTimeout - if err := Convert_componentconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil { + out.LockObjectNamespace = in.LockObjectNamespace + out.LockObjectName = in.LockObjectName + return nil +} + +// Convert_v1alpha1_KubeSchedulerLeaderElectionConfiguration_To_componentconfig_KubeSchedulerLeaderElectionConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_KubeSchedulerLeaderElectionConfiguration_To_componentconfig_KubeSchedulerLeaderElectionConfiguration(in *KubeSchedulerLeaderElectionConfiguration, out *componentconfig.KubeSchedulerLeaderElectionConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeSchedulerLeaderElectionConfiguration_To_componentconfig_KubeSchedulerLeaderElectionConfiguration(in, out, s) +} + +func autoConvert_componentconfig_KubeSchedulerLeaderElectionConfiguration_To_v1alpha1_KubeSchedulerLeaderElectionConfiguration(in *componentconfig.KubeSchedulerLeaderElectionConfiguration, out *KubeSchedulerLeaderElectionConfiguration, s conversion.Scope) error { + if err := Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(&in.LeaderElectionConfiguration, &out.LeaderElectionConfiguration, s); err != nil { return err } - out.ConfigSyncPeriod = in.ConfigSyncPeriod + out.LockObjectNamespace = in.LockObjectNamespace + out.LockObjectName = in.LockObjectName return nil } -// Convert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration is an autogenerated conversion function. -func Convert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *componentconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error { - return autoConvert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in, out, s) +// Convert_componentconfig_KubeSchedulerLeaderElectionConfiguration_To_v1alpha1_KubeSchedulerLeaderElectionConfiguration is an autogenerated conversion function. +func Convert_componentconfig_KubeSchedulerLeaderElectionConfiguration_To_v1alpha1_KubeSchedulerLeaderElectionConfiguration(in *componentconfig.KubeSchedulerLeaderElectionConfiguration, out *KubeSchedulerLeaderElectionConfiguration, s conversion.Scope) error { + return autoConvert_componentconfig_KubeSchedulerLeaderElectionConfiguration_To_v1alpha1_KubeSchedulerLeaderElectionConfiguration(in, out, s) } -func autoConvert_v1alpha1_KubeProxyConntrackConfiguration_To_componentconfig_KubeProxyConntrackConfiguration(in *KubeProxyConntrackConfiguration, out *componentconfig.KubeProxyConntrackConfiguration, s conversion.Scope) error { - out.Max = in.Max - out.MaxPerCore = in.MaxPerCore - out.Min = in.Min - out.TCPEstablishedTimeout = in.TCPEstablishedTimeout - out.TCPCloseWaitTimeout = in.TCPCloseWaitTimeout +func autoConvert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in *LeaderElectionConfiguration, out *componentconfig.LeaderElectionConfiguration, s conversion.Scope) error { + if err := v1.Convert_Pointer_bool_To_bool(&in.LeaderElect, &out.LeaderElect, s); err != nil { + return err + } + out.LeaseDuration = in.LeaseDuration + out.RenewDeadline = in.RenewDeadline + out.RetryPeriod = in.RetryPeriod + out.ResourceLock = in.ResourceLock return nil } -// Convert_v1alpha1_KubeProxyConntrackConfiguration_To_componentconfig_KubeProxyConntrackConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_KubeProxyConntrackConfiguration_To_componentconfig_KubeProxyConntrackConfiguration(in *KubeProxyConntrackConfiguration, out *componentconfig.KubeProxyConntrackConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeProxyConntrackConfiguration_To_componentconfig_KubeProxyConntrackConfiguration(in, out, s) +// Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in *LeaderElectionConfiguration, out *componentconfig.LeaderElectionConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in, out, s) } -func autoConvert_componentconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in *componentconfig.KubeProxyConntrackConfiguration, out *KubeProxyConntrackConfiguration, s conversion.Scope) error { - out.Max = in.Max - out.MaxPerCore = in.MaxPerCore - out.Min = in.Min - out.TCPEstablishedTimeout = in.TCPEstablishedTimeout - out.TCPCloseWaitTimeout = in.TCPCloseWaitTimeout +func autoConvert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *componentconfig.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error { + if err := v1.Convert_bool_To_Pointer_bool(&in.LeaderElect, &out.LeaderElect, s); err != nil { + return err + } + out.LeaseDuration = in.LeaseDuration + out.RenewDeadline = in.RenewDeadline + out.RetryPeriod = in.RetryPeriod + out.ResourceLock = in.ResourceLock return nil } -// Convert_componentconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration is an autogenerated conversion function. -func Convert_componentconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in *componentconfig.KubeProxyConntrackConfiguration, out *KubeProxyConntrackConfiguration, s conversion.Scope) error { - return autoConvert_componentconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in, out, s) +// Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration is an autogenerated conversion function. +func Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *componentconfig.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error { + return autoConvert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in, out, s) } -func autoConvert_v1alpha1_KubeProxyIPTablesConfiguration_To_componentconfig_KubeProxyIPTablesConfiguration(in *KubeProxyIPTablesConfiguration, out *componentconfig.KubeProxyIPTablesConfiguration, s conversion.Scope) error { - out.MasqueradeBit = (*int32)(unsafe.Pointer(in.MasqueradeBit)) - out.MasqueradeAll = in.MasqueradeAll - out.SyncPeriod = in.SyncPeriod - out.MinSyncPeriod = in.MinSyncPeriod +func autoConvert_v1alpha1_SchedulerAlgorithmSource_To_componentconfig_SchedulerAlgorithmSource(in *SchedulerAlgorithmSource, out *componentconfig.SchedulerAlgorithmSource, s conversion.Scope) error { + out.Policy = (*componentconfig.SchedulerPolicySource)(unsafe.Pointer(in.Policy)) + out.Provider = (*string)(unsafe.Pointer(in.Provider)) return nil } -// Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_componentconfig_KubeProxyIPTablesConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_componentconfig_KubeProxyIPTablesConfiguration(in *KubeProxyIPTablesConfiguration, out *componentconfig.KubeProxyIPTablesConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeProxyIPTablesConfiguration_To_componentconfig_KubeProxyIPTablesConfiguration(in, out, s) +// Convert_v1alpha1_SchedulerAlgorithmSource_To_componentconfig_SchedulerAlgorithmSource is an autogenerated conversion function. +func Convert_v1alpha1_SchedulerAlgorithmSource_To_componentconfig_SchedulerAlgorithmSource(in *SchedulerAlgorithmSource, out *componentconfig.SchedulerAlgorithmSource, s conversion.Scope) error { + return autoConvert_v1alpha1_SchedulerAlgorithmSource_To_componentconfig_SchedulerAlgorithmSource(in, out, s) } -func autoConvert_componentconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in *componentconfig.KubeProxyIPTablesConfiguration, out *KubeProxyIPTablesConfiguration, s conversion.Scope) error { - out.MasqueradeBit = (*int32)(unsafe.Pointer(in.MasqueradeBit)) - out.MasqueradeAll = in.MasqueradeAll - out.SyncPeriod = in.SyncPeriod - out.MinSyncPeriod = in.MinSyncPeriod +func autoConvert_componentconfig_SchedulerAlgorithmSource_To_v1alpha1_SchedulerAlgorithmSource(in *componentconfig.SchedulerAlgorithmSource, out *SchedulerAlgorithmSource, s conversion.Scope) error { + out.Policy = (*SchedulerPolicySource)(unsafe.Pointer(in.Policy)) + out.Provider = (*string)(unsafe.Pointer(in.Provider)) return nil } -// Convert_componentconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration is an autogenerated conversion function. -func Convert_componentconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in *componentconfig.KubeProxyIPTablesConfiguration, out *KubeProxyIPTablesConfiguration, s conversion.Scope) error { - return autoConvert_componentconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in, out, s) +// Convert_componentconfig_SchedulerAlgorithmSource_To_v1alpha1_SchedulerAlgorithmSource is an autogenerated conversion function. +func Convert_componentconfig_SchedulerAlgorithmSource_To_v1alpha1_SchedulerAlgorithmSource(in *componentconfig.SchedulerAlgorithmSource, out *SchedulerAlgorithmSource, s conversion.Scope) error { + return autoConvert_componentconfig_SchedulerAlgorithmSource_To_v1alpha1_SchedulerAlgorithmSource(in, out, s) } -func autoConvert_v1alpha1_KubeProxyIPVSConfiguration_To_componentconfig_KubeProxyIPVSConfiguration(in *KubeProxyIPVSConfiguration, out *componentconfig.KubeProxyIPVSConfiguration, s conversion.Scope) error { - out.SyncPeriod = in.SyncPeriod - out.MinSyncPeriod = in.MinSyncPeriod - out.Scheduler = in.Scheduler +func autoConvert_v1alpha1_SchedulerPolicyConfigMapSource_To_componentconfig_SchedulerPolicyConfigMapSource(in *SchedulerPolicyConfigMapSource, out *componentconfig.SchedulerPolicyConfigMapSource, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name return nil } -// Convert_v1alpha1_KubeProxyIPVSConfiguration_To_componentconfig_KubeProxyIPVSConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_KubeProxyIPVSConfiguration_To_componentconfig_KubeProxyIPVSConfiguration(in *KubeProxyIPVSConfiguration, out *componentconfig.KubeProxyIPVSConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeProxyIPVSConfiguration_To_componentconfig_KubeProxyIPVSConfiguration(in, out, s) +// Convert_v1alpha1_SchedulerPolicyConfigMapSource_To_componentconfig_SchedulerPolicyConfigMapSource is an autogenerated conversion function. +func Convert_v1alpha1_SchedulerPolicyConfigMapSource_To_componentconfig_SchedulerPolicyConfigMapSource(in *SchedulerPolicyConfigMapSource, out *componentconfig.SchedulerPolicyConfigMapSource, s conversion.Scope) error { + return autoConvert_v1alpha1_SchedulerPolicyConfigMapSource_To_componentconfig_SchedulerPolicyConfigMapSource(in, out, s) } -func autoConvert_componentconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in *componentconfig.KubeProxyIPVSConfiguration, out *KubeProxyIPVSConfiguration, s conversion.Scope) error { - out.SyncPeriod = in.SyncPeriod - out.MinSyncPeriod = in.MinSyncPeriod - out.Scheduler = in.Scheduler +func autoConvert_componentconfig_SchedulerPolicyConfigMapSource_To_v1alpha1_SchedulerPolicyConfigMapSource(in *componentconfig.SchedulerPolicyConfigMapSource, out *SchedulerPolicyConfigMapSource, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name return nil } -// Convert_componentconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration is an autogenerated conversion function. -func Convert_componentconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in *componentconfig.KubeProxyIPVSConfiguration, out *KubeProxyIPVSConfiguration, s conversion.Scope) error { - return autoConvert_componentconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in, out, s) +// Convert_componentconfig_SchedulerPolicyConfigMapSource_To_v1alpha1_SchedulerPolicyConfigMapSource is an autogenerated conversion function. +func Convert_componentconfig_SchedulerPolicyConfigMapSource_To_v1alpha1_SchedulerPolicyConfigMapSource(in *componentconfig.SchedulerPolicyConfigMapSource, out *SchedulerPolicyConfigMapSource, s conversion.Scope) error { + return autoConvert_componentconfig_SchedulerPolicyConfigMapSource_To_v1alpha1_SchedulerPolicyConfigMapSource(in, out, s) } -func autoConvert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in *KubeSchedulerConfiguration, out *componentconfig.KubeSchedulerConfiguration, s conversion.Scope) error { - out.Port = int32(in.Port) - out.Address = in.Address - out.AlgorithmProvider = in.AlgorithmProvider - out.PolicyConfigFile = in.PolicyConfigFile - if err := v1.Convert_Pointer_bool_To_bool(&in.EnableProfiling, &out.EnableProfiling, s); err != nil { - return err - } - out.EnableContentionProfiling = in.EnableContentionProfiling - out.ContentType = in.ContentType - out.KubeAPIQPS = in.KubeAPIQPS - out.KubeAPIBurst = int32(in.KubeAPIBurst) - out.SchedulerName = in.SchedulerName - out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight - out.FailureDomains = in.FailureDomains - if err := Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { - return err - } - out.LockObjectNamespace = in.LockObjectNamespace - out.LockObjectName = in.LockObjectName - out.PolicyConfigMapName = in.PolicyConfigMapName - out.PolicyConfigMapNamespace = in.PolicyConfigMapNamespace - out.UseLegacyPolicyConfig = in.UseLegacyPolicyConfig +func autoConvert_v1alpha1_SchedulerPolicyFileSource_To_componentconfig_SchedulerPolicyFileSource(in *SchedulerPolicyFileSource, out *componentconfig.SchedulerPolicyFileSource, s conversion.Scope) error { + out.Path = in.Path return nil } -// Convert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in *KubeSchedulerConfiguration, out *componentconfig.KubeSchedulerConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in, out, s) +// Convert_v1alpha1_SchedulerPolicyFileSource_To_componentconfig_SchedulerPolicyFileSource is an autogenerated conversion function. +func Convert_v1alpha1_SchedulerPolicyFileSource_To_componentconfig_SchedulerPolicyFileSource(in *SchedulerPolicyFileSource, out *componentconfig.SchedulerPolicyFileSource, s conversion.Scope) error { + return autoConvert_v1alpha1_SchedulerPolicyFileSource_To_componentconfig_SchedulerPolicyFileSource(in, out, s) } -func autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *componentconfig.KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, s conversion.Scope) error { - out.Port = int(in.Port) - out.Address = in.Address - out.AlgorithmProvider = in.AlgorithmProvider - out.PolicyConfigFile = in.PolicyConfigFile - if err := v1.Convert_bool_To_Pointer_bool(&in.EnableProfiling, &out.EnableProfiling, s); err != nil { - return err - } - out.EnableContentionProfiling = in.EnableContentionProfiling - out.ContentType = in.ContentType - out.KubeAPIQPS = in.KubeAPIQPS - out.KubeAPIBurst = int(in.KubeAPIBurst) - out.SchedulerName = in.SchedulerName - out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight - out.FailureDomains = in.FailureDomains - if err := Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { - return err - } - out.LockObjectNamespace = in.LockObjectNamespace - out.LockObjectName = in.LockObjectName - out.PolicyConfigMapName = in.PolicyConfigMapName - out.PolicyConfigMapNamespace = in.PolicyConfigMapNamespace - out.UseLegacyPolicyConfig = in.UseLegacyPolicyConfig +func autoConvert_componentconfig_SchedulerPolicyFileSource_To_v1alpha1_SchedulerPolicyFileSource(in *componentconfig.SchedulerPolicyFileSource, out *SchedulerPolicyFileSource, s conversion.Scope) error { + out.Path = in.Path return nil } -// Convert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration is an autogenerated conversion function. -func Convert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *componentconfig.KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, s conversion.Scope) error { - return autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in, out, s) +// Convert_componentconfig_SchedulerPolicyFileSource_To_v1alpha1_SchedulerPolicyFileSource is an autogenerated conversion function. +func Convert_componentconfig_SchedulerPolicyFileSource_To_v1alpha1_SchedulerPolicyFileSource(in *componentconfig.SchedulerPolicyFileSource, out *SchedulerPolicyFileSource, s conversion.Scope) error { + return autoConvert_componentconfig_SchedulerPolicyFileSource_To_v1alpha1_SchedulerPolicyFileSource(in, out, s) } -func autoConvert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in *LeaderElectionConfiguration, out *componentconfig.LeaderElectionConfiguration, s conversion.Scope) error { - if err := v1.Convert_Pointer_bool_To_bool(&in.LeaderElect, &out.LeaderElect, s); err != nil { - return err - } - out.LeaseDuration = in.LeaseDuration - out.RenewDeadline = in.RenewDeadline - out.RetryPeriod = in.RetryPeriod - out.ResourceLock = in.ResourceLock +func autoConvert_v1alpha1_SchedulerPolicySource_To_componentconfig_SchedulerPolicySource(in *SchedulerPolicySource, out *componentconfig.SchedulerPolicySource, s conversion.Scope) error { + out.File = (*componentconfig.SchedulerPolicyFileSource)(unsafe.Pointer(in.File)) + out.ConfigMap = (*componentconfig.SchedulerPolicyConfigMapSource)(unsafe.Pointer(in.ConfigMap)) return nil } -// Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in *LeaderElectionConfiguration, out *componentconfig.LeaderElectionConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in, out, s) +// Convert_v1alpha1_SchedulerPolicySource_To_componentconfig_SchedulerPolicySource is an autogenerated conversion function. +func Convert_v1alpha1_SchedulerPolicySource_To_componentconfig_SchedulerPolicySource(in *SchedulerPolicySource, out *componentconfig.SchedulerPolicySource, s conversion.Scope) error { + return autoConvert_v1alpha1_SchedulerPolicySource_To_componentconfig_SchedulerPolicySource(in, out, s) } -func autoConvert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *componentconfig.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error { - if err := v1.Convert_bool_To_Pointer_bool(&in.LeaderElect, &out.LeaderElect, s); err != nil { - return err - } - out.LeaseDuration = in.LeaseDuration - out.RenewDeadline = in.RenewDeadline - out.RetryPeriod = in.RetryPeriod - out.ResourceLock = in.ResourceLock +func autoConvert_componentconfig_SchedulerPolicySource_To_v1alpha1_SchedulerPolicySource(in *componentconfig.SchedulerPolicySource, out *SchedulerPolicySource, s conversion.Scope) error { + out.File = (*SchedulerPolicyFileSource)(unsafe.Pointer(in.File)) + out.ConfigMap = (*SchedulerPolicyConfigMapSource)(unsafe.Pointer(in.ConfigMap)) return nil } -// Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration is an autogenerated conversion function. -func Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *componentconfig.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error { - return autoConvert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in, out, s) +// Convert_componentconfig_SchedulerPolicySource_To_v1alpha1_SchedulerPolicySource is an autogenerated conversion function. +func Convert_componentconfig_SchedulerPolicySource_To_v1alpha1_SchedulerPolicySource(in *componentconfig.SchedulerPolicySource, out *SchedulerPolicySource, s conversion.Scope) error { + return autoConvert_componentconfig_SchedulerPolicySource_To_v1alpha1_SchedulerPolicySource(in, out, s) } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go index 51344da9e6a7..9e6a3b36d19e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go @@ -21,52 +21,9 @@ limitations under the License. package v1alpha1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClientConnectionConfiguration).DeepCopyInto(out.(*ClientConnectionConfiguration)) - return nil - }, InType: reflect.TypeOf(&ClientConnectionConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeProxyConfiguration).DeepCopyInto(out.(*KubeProxyConfiguration)) - return nil - }, InType: reflect.TypeOf(&KubeProxyConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeProxyConntrackConfiguration).DeepCopyInto(out.(*KubeProxyConntrackConfiguration)) - return nil - }, InType: reflect.TypeOf(&KubeProxyConntrackConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeProxyIPTablesConfiguration).DeepCopyInto(out.(*KubeProxyIPTablesConfiguration)) - return nil - }, InType: reflect.TypeOf(&KubeProxyIPTablesConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeProxyIPVSConfiguration).DeepCopyInto(out.(*KubeProxyIPVSConfiguration)) - return nil - }, InType: reflect.TypeOf(&KubeProxyIPVSConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeSchedulerConfiguration).DeepCopyInto(out.(*KubeSchedulerConfiguration)) - return nil - }, InType: reflect.TypeOf(&KubeSchedulerConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LeaderElectionConfiguration).DeepCopyInto(out.(*LeaderElectionConfiguration)) - return nil - }, InType: reflect.TypeOf(&LeaderElectionConfiguration{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClientConnectionConfiguration) DeepCopyInto(out *ClientConnectionConfiguration) { *out = *in @@ -84,39 +41,27 @@ func (in *ClientConnectionConfiguration) DeepCopy() *ClientConnectionConfigurati } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) { +func (in *KubeSchedulerConfiguration) DeepCopyInto(out *KubeSchedulerConfiguration) { *out = *in out.TypeMeta = in.TypeMeta + in.AlgorithmSource.DeepCopyInto(&out.AlgorithmSource) + in.LeaderElection.DeepCopyInto(&out.LeaderElection) out.ClientConnection = in.ClientConnection - in.IPTables.DeepCopyInto(&out.IPTables) - out.IPVS = in.IPVS - if in.OOMScoreAdj != nil { - in, out := &in.OOMScoreAdj, &out.OOMScoreAdj - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - out.UDPIdleTimeout = in.UDPIdleTimeout - out.Conntrack = in.Conntrack - out.ConfigSyncPeriod = in.ConfigSyncPeriod return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConfiguration. -func (in *KubeProxyConfiguration) DeepCopy() *KubeProxyConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfiguration. +func (in *KubeSchedulerConfiguration) DeepCopy() *KubeSchedulerConfiguration { if in == nil { return nil } - out := new(KubeProxyConfiguration) + out := new(KubeSchedulerConfiguration) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeProxyConfiguration) DeepCopyObject() runtime.Object { +func (in *KubeSchedulerConfiguration) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } else { @@ -125,128 +70,146 @@ func (in *KubeProxyConfiguration) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeProxyConntrackConfiguration) DeepCopyInto(out *KubeProxyConntrackConfiguration) { +func (in *KubeSchedulerLeaderElectionConfiguration) DeepCopyInto(out *KubeSchedulerLeaderElectionConfiguration) { *out = *in - out.TCPEstablishedTimeout = in.TCPEstablishedTimeout - out.TCPCloseWaitTimeout = in.TCPCloseWaitTimeout + in.LeaderElectionConfiguration.DeepCopyInto(&out.LeaderElectionConfiguration) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConntrackConfiguration. -func (in *KubeProxyConntrackConfiguration) DeepCopy() *KubeProxyConntrackConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerLeaderElectionConfiguration. +func (in *KubeSchedulerLeaderElectionConfiguration) DeepCopy() *KubeSchedulerLeaderElectionConfiguration { if in == nil { return nil } - out := new(KubeProxyConntrackConfiguration) + out := new(KubeSchedulerLeaderElectionConfiguration) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeProxyIPTablesConfiguration) DeepCopyInto(out *KubeProxyIPTablesConfiguration) { +func (in *LeaderElectionConfiguration) DeepCopyInto(out *LeaderElectionConfiguration) { *out = *in - if in.MasqueradeBit != nil { - in, out := &in.MasqueradeBit, &out.MasqueradeBit + if in.LeaderElect != nil { + in, out := &in.LeaderElect, &out.LeaderElect if *in == nil { *out = nil } else { - *out = new(int32) + *out = new(bool) **out = **in } } - out.SyncPeriod = in.SyncPeriod - out.MinSyncPeriod = in.MinSyncPeriod + out.LeaseDuration = in.LeaseDuration + out.RenewDeadline = in.RenewDeadline + out.RetryPeriod = in.RetryPeriod return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPTablesConfiguration. -func (in *KubeProxyIPTablesConfiguration) DeepCopy() *KubeProxyIPTablesConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElectionConfiguration. +func (in *LeaderElectionConfiguration) DeepCopy() *LeaderElectionConfiguration { if in == nil { return nil } - out := new(KubeProxyIPTablesConfiguration) + out := new(LeaderElectionConfiguration) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeProxyIPVSConfiguration) DeepCopyInto(out *KubeProxyIPVSConfiguration) { +func (in *SchedulerAlgorithmSource) DeepCopyInto(out *SchedulerAlgorithmSource) { *out = *in - out.SyncPeriod = in.SyncPeriod - out.MinSyncPeriod = in.MinSyncPeriod + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + if *in == nil { + *out = nil + } else { + *out = new(SchedulerPolicySource) + (*in).DeepCopyInto(*out) + } + } + if in.Provider != nil { + in, out := &in.Provider, &out.Provider + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPVSConfiguration. -func (in *KubeProxyIPVSConfiguration) DeepCopy() *KubeProxyIPVSConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerAlgorithmSource. +func (in *SchedulerAlgorithmSource) DeepCopy() *SchedulerAlgorithmSource { if in == nil { return nil } - out := new(KubeProxyIPVSConfiguration) + out := new(SchedulerAlgorithmSource) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeSchedulerConfiguration) DeepCopyInto(out *KubeSchedulerConfiguration) { +func (in *SchedulerPolicyConfigMapSource) DeepCopyInto(out *SchedulerPolicyConfigMapSource) { *out = *in - out.TypeMeta = in.TypeMeta - if in.EnableProfiling != nil { - in, out := &in.EnableProfiling, &out.EnableProfiling - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - in.LeaderElection.DeepCopyInto(&out.LeaderElection) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfiguration. -func (in *KubeSchedulerConfiguration) DeepCopy() *KubeSchedulerConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerPolicyConfigMapSource. +func (in *SchedulerPolicyConfigMapSource) DeepCopy() *SchedulerPolicyConfigMapSource { if in == nil { return nil } - out := new(KubeSchedulerConfiguration) + out := new(SchedulerPolicyConfigMapSource) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeSchedulerConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerPolicyFileSource) DeepCopyInto(out *SchedulerPolicyFileSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerPolicyFileSource. +func (in *SchedulerPolicyFileSource) DeepCopy() *SchedulerPolicyFileSource { + if in == nil { return nil } + out := new(SchedulerPolicyFileSource) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LeaderElectionConfiguration) DeepCopyInto(out *LeaderElectionConfiguration) { +func (in *SchedulerPolicySource) DeepCopyInto(out *SchedulerPolicySource) { *out = *in - if in.LeaderElect != nil { - in, out := &in.LeaderElect, &out.LeaderElect + if in.File != nil { + in, out := &in.File, &out.File if *in == nil { *out = nil } else { - *out = new(bool) + *out = new(SchedulerPolicyFileSource) + **out = **in + } + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + if *in == nil { + *out = nil + } else { + *out = new(SchedulerPolicyConfigMapSource) **out = **in } } - out.LeaseDuration = in.LeaseDuration - out.RenewDeadline = in.RenewDeadline - out.RetryPeriod = in.RetryPeriod return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElectionConfiguration. -func (in *LeaderElectionConfiguration) DeepCopy() *LeaderElectionConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerPolicySource. +func (in *SchedulerPolicySource) DeepCopy() *SchedulerPolicySource { if in == nil { return nil } - out := new(LeaderElectionConfiguration) + out := new(SchedulerPolicySource) in.DeepCopyInto(out) return out } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.defaults.go index 72f514af67dd..746d4d98026b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.defaults.go @@ -28,16 +28,11 @@ import ( // Public to allow building arbitrary schemes. // All generated defaulters are covering - they call all nested defaulters. func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&KubeProxyConfiguration{}, func(obj interface{}) { SetObjectDefaults_KubeProxyConfiguration(obj.(*KubeProxyConfiguration)) }) scheme.AddTypeDefaultingFunc(&KubeSchedulerConfiguration{}, func(obj interface{}) { SetObjectDefaults_KubeSchedulerConfiguration(obj.(*KubeSchedulerConfiguration)) }) return nil } -func SetObjectDefaults_KubeProxyConfiguration(in *KubeProxyConfiguration) { - SetDefaults_KubeProxyConfiguration(in) -} - func SetObjectDefaults_KubeSchedulerConfiguration(in *KubeSchedulerConfiguration) { SetDefaults_KubeSchedulerConfiguration(in) - SetDefaults_LeaderElectionConfiguration(&in.LeaderElection) + SetDefaults_LeaderElectionConfiguration(&in.LeaderElection.LeaderElectionConfiguration) } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go index d9ee091e54c0..3722020a3278 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go @@ -21,76 +21,9 @@ limitations under the License. package componentconfig import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClientConnectionConfiguration).DeepCopyInto(out.(*ClientConnectionConfiguration)) - return nil - }, InType: reflect.TypeOf(&ClientConnectionConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupResource).DeepCopyInto(out.(*GroupResource)) - return nil - }, InType: reflect.TypeOf(&GroupResource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IPVar).DeepCopyInto(out.(*IPVar)) - return nil - }, InType: reflect.TypeOf(&IPVar{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeControllerManagerConfiguration).DeepCopyInto(out.(*KubeControllerManagerConfiguration)) - return nil - }, InType: reflect.TypeOf(&KubeControllerManagerConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeProxyConfiguration).DeepCopyInto(out.(*KubeProxyConfiguration)) - return nil - }, InType: reflect.TypeOf(&KubeProxyConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeProxyConntrackConfiguration).DeepCopyInto(out.(*KubeProxyConntrackConfiguration)) - return nil - }, InType: reflect.TypeOf(&KubeProxyConntrackConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeProxyIPTablesConfiguration).DeepCopyInto(out.(*KubeProxyIPTablesConfiguration)) - return nil - }, InType: reflect.TypeOf(&KubeProxyIPTablesConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeProxyIPVSConfiguration).DeepCopyInto(out.(*KubeProxyIPVSConfiguration)) - return nil - }, InType: reflect.TypeOf(&KubeProxyIPVSConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeSchedulerConfiguration).DeepCopyInto(out.(*KubeSchedulerConfiguration)) - return nil - }, InType: reflect.TypeOf(&KubeSchedulerConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LeaderElectionConfiguration).DeepCopyInto(out.(*LeaderElectionConfiguration)) - return nil - }, InType: reflect.TypeOf(&LeaderElectionConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeRecyclerConfiguration).DeepCopyInto(out.(*PersistentVolumeRecyclerConfiguration)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeRecyclerConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PortRangeVar).DeepCopyInto(out.(*PortRangeVar)) - return nil - }, InType: reflect.TypeOf(&PortRangeVar{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeConfiguration).DeepCopyInto(out.(*VolumeConfiguration)) - return nil - }, InType: reflect.TypeOf(&VolumeConfiguration{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClientConnectionConfiguration) DeepCopyInto(out *ClientConnectionConfiguration) { *out = *in @@ -205,39 +138,27 @@ func (in *KubeControllerManagerConfiguration) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) { +func (in *KubeSchedulerConfiguration) DeepCopyInto(out *KubeSchedulerConfiguration) { *out = *in out.TypeMeta = in.TypeMeta + in.AlgorithmSource.DeepCopyInto(&out.AlgorithmSource) + out.LeaderElection = in.LeaderElection out.ClientConnection = in.ClientConnection - in.IPTables.DeepCopyInto(&out.IPTables) - out.IPVS = in.IPVS - if in.OOMScoreAdj != nil { - in, out := &in.OOMScoreAdj, &out.OOMScoreAdj - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - out.UDPIdleTimeout = in.UDPIdleTimeout - out.Conntrack = in.Conntrack - out.ConfigSyncPeriod = in.ConfigSyncPeriod return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConfiguration. -func (in *KubeProxyConfiguration) DeepCopy() *KubeProxyConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfiguration. +func (in *KubeSchedulerConfiguration) DeepCopy() *KubeSchedulerConfiguration { if in == nil { return nil } - out := new(KubeProxyConfiguration) + out := new(KubeSchedulerConfiguration) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeProxyConfiguration) DeepCopyObject() runtime.Object { +func (in *KubeSchedulerConfiguration) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } else { @@ -246,151 +167,178 @@ func (in *KubeProxyConfiguration) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeProxyConntrackConfiguration) DeepCopyInto(out *KubeProxyConntrackConfiguration) { +func (in *KubeSchedulerLeaderElectionConfiguration) DeepCopyInto(out *KubeSchedulerLeaderElectionConfiguration) { *out = *in - out.TCPEstablishedTimeout = in.TCPEstablishedTimeout - out.TCPCloseWaitTimeout = in.TCPCloseWaitTimeout + out.LeaderElectionConfiguration = in.LeaderElectionConfiguration return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConntrackConfiguration. -func (in *KubeProxyConntrackConfiguration) DeepCopy() *KubeProxyConntrackConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerLeaderElectionConfiguration. +func (in *KubeSchedulerLeaderElectionConfiguration) DeepCopy() *KubeSchedulerLeaderElectionConfiguration { if in == nil { return nil } - out := new(KubeProxyConntrackConfiguration) + out := new(KubeSchedulerLeaderElectionConfiguration) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeProxyIPTablesConfiguration) DeepCopyInto(out *KubeProxyIPTablesConfiguration) { +func (in *LeaderElectionConfiguration) DeepCopyInto(out *LeaderElectionConfiguration) { *out = *in - if in.MasqueradeBit != nil { - in, out := &in.MasqueradeBit, &out.MasqueradeBit - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - out.SyncPeriod = in.SyncPeriod - out.MinSyncPeriod = in.MinSyncPeriod + out.LeaseDuration = in.LeaseDuration + out.RenewDeadline = in.RenewDeadline + out.RetryPeriod = in.RetryPeriod return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPTablesConfiguration. -func (in *KubeProxyIPTablesConfiguration) DeepCopy() *KubeProxyIPTablesConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElectionConfiguration. +func (in *LeaderElectionConfiguration) DeepCopy() *LeaderElectionConfiguration { if in == nil { return nil } - out := new(KubeProxyIPTablesConfiguration) + out := new(LeaderElectionConfiguration) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeProxyIPVSConfiguration) DeepCopyInto(out *KubeProxyIPVSConfiguration) { +func (in *PersistentVolumeRecyclerConfiguration) DeepCopyInto(out *PersistentVolumeRecyclerConfiguration) { *out = *in - out.SyncPeriod = in.SyncPeriod - out.MinSyncPeriod = in.MinSyncPeriod return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPVSConfiguration. -func (in *KubeProxyIPVSConfiguration) DeepCopy() *KubeProxyIPVSConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeRecyclerConfiguration. +func (in *PersistentVolumeRecyclerConfiguration) DeepCopy() *PersistentVolumeRecyclerConfiguration { if in == nil { return nil } - out := new(KubeProxyIPVSConfiguration) + out := new(PersistentVolumeRecyclerConfiguration) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeSchedulerConfiguration) DeepCopyInto(out *KubeSchedulerConfiguration) { +func (in *PortRangeVar) DeepCopyInto(out *PortRangeVar) { *out = *in - out.TypeMeta = in.TypeMeta - out.LeaderElection = in.LeaderElection + if in.Val != nil { + in, out := &in.Val, &out.Val + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfiguration. -func (in *KubeSchedulerConfiguration) DeepCopy() *KubeSchedulerConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortRangeVar. +func (in *PortRangeVar) DeepCopy() *PortRangeVar { if in == nil { return nil } - out := new(KubeSchedulerConfiguration) + out := new(PortRangeVar) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeSchedulerConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerAlgorithmSource) DeepCopyInto(out *SchedulerAlgorithmSource) { + *out = *in + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + if *in == nil { + *out = nil + } else { + *out = new(SchedulerPolicySource) + (*in).DeepCopyInto(*out) + } + } + if in.Provider != nil { + in, out := &in.Provider, &out.Provider + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerAlgorithmSource. +func (in *SchedulerAlgorithmSource) DeepCopy() *SchedulerAlgorithmSource { + if in == nil { return nil } + out := new(SchedulerAlgorithmSource) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LeaderElectionConfiguration) DeepCopyInto(out *LeaderElectionConfiguration) { +func (in *SchedulerPolicyConfigMapSource) DeepCopyInto(out *SchedulerPolicyConfigMapSource) { *out = *in - out.LeaseDuration = in.LeaseDuration - out.RenewDeadline = in.RenewDeadline - out.RetryPeriod = in.RetryPeriod return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElectionConfiguration. -func (in *LeaderElectionConfiguration) DeepCopy() *LeaderElectionConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerPolicyConfigMapSource. +func (in *SchedulerPolicyConfigMapSource) DeepCopy() *SchedulerPolicyConfigMapSource { if in == nil { return nil } - out := new(LeaderElectionConfiguration) + out := new(SchedulerPolicyConfigMapSource) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeRecyclerConfiguration) DeepCopyInto(out *PersistentVolumeRecyclerConfiguration) { +func (in *SchedulerPolicyFileSource) DeepCopyInto(out *SchedulerPolicyFileSource) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeRecyclerConfiguration. -func (in *PersistentVolumeRecyclerConfiguration) DeepCopy() *PersistentVolumeRecyclerConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerPolicyFileSource. +func (in *SchedulerPolicyFileSource) DeepCopy() *SchedulerPolicyFileSource { if in == nil { return nil } - out := new(PersistentVolumeRecyclerConfiguration) + out := new(SchedulerPolicyFileSource) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PortRangeVar) DeepCopyInto(out *PortRangeVar) { +func (in *SchedulerPolicySource) DeepCopyInto(out *SchedulerPolicySource) { *out = *in - if in.Val != nil { - in, out := &in.Val, &out.Val + if in.File != nil { + in, out := &in.File, &out.File if *in == nil { *out = nil } else { - *out = new(string) + *out = new(SchedulerPolicyFileSource) + **out = **in + } + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + if *in == nil { + *out = nil + } else { + *out = new(SchedulerPolicyConfigMapSource) **out = **in } } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortRangeVar. -func (in *PortRangeVar) DeepCopy() *PortRangeVar { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerPolicySource. +func (in *SchedulerPolicySource) DeepCopy() *SchedulerPolicySource { if in == nil { return nil } - out := new(PortRangeVar) + out := new(SchedulerPolicySource) in.DeepCopyInto(out) return out } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/core/BUILD new file mode 100644 index 000000000000..a4bdda2a8889 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/BUILD @@ -0,0 +1,60 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "annotation_key_constants.go", + "doc.go", + "field_constants.go", + "json.go", + "objectreference.go", + "register.go", + "resource.go", + "taint.go", + "toleration.go", + "types.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/kubernetes/pkg/apis/core", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["taint_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/core", + library = ":go_default_library", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/core/fuzzer:all-srcs", + "//pkg/apis/core/helper:all-srcs", + "//pkg/apis/core/install:all-srcs", + "//pkg/apis/core/pods:all-srcs", + "//pkg/apis/core/v1:all-srcs", + "//pkg/apis/core/validation:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/api/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/api/OWNERS rename to vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS diff --git a/vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go similarity index 94% rename from vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go index e935424462b5..131fdd9905ec 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go @@ -16,7 +16,7 @@ limitations under the License. // This file should be consistent with pkg/api/v1/annotation_key_constants.go. -package api +package core const ( // ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy @@ -45,12 +45,6 @@ const ( // to one container of a pod. SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" - // CreatedByAnnotation represents the key used to store the spec(json) - // used to create the resource. - // This field is deprecated in favor of ControllerRef (see #44407). - // TODO(#50720): Remove this field in v1.9. - CreatedByAnnotation = "kubernetes.io/created-by" - // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) // in the Annotations of a Node. PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" @@ -74,6 +68,10 @@ const ( // This annotation can be attached to node. ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" + // BootstrapCheckpointAnnotationKey represents a Resource (Pod) that should be checkpointed by + // the kubelet prior to running + BootstrapCheckpointAnnotationKey string = "node.kubernetes.io/bootstrap-checkpoint" + // annotation key prefix used to identify non-convertible json paths. NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go new file mode 100644 index 000000000000..8836eda4e863 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// Package api contains the latest (or "internal") version of the +// Kubernetes API objects. This is the API objects as represented in memory. +// The contract presented to clients is located in the versioned packages, +// which are sub-directories. The first one is "v1". Those packages +// describe how a particular version is serialized to storage/network. +package core diff --git a/vendor/k8s.io/kubernetes/pkg/api/field_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go similarity index 92% rename from vendor/k8s.io/kubernetes/pkg/api/field_constants.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go index 5ead0f13feb8..a26f80568c0b 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/field_constants.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package api +package core // Field path constants that are specific to the internal API // representation. @@ -25,8 +25,8 @@ const ( PodStatusField = "status.phase" SecretTypeField = "type" - EventReasonField = "reason" - EventSourceField = "source" + EventReasonField = "action" + EventSourceField = "reportingComponent" EventTypeField = "type" EventInvolvedKindField = "involvedObject.kind" EventInvolvedNamespaceField = "involvedObject.namespace" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/BUILD new file mode 100644 index 000000000000..81b0a2fbffa2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/BUILD @@ -0,0 +1,51 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["helpers_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/core/helper", + library = ":go_default_library", + deps = [ + "//pkg/apis/core:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = ["helpers.go"], + importpath = "k8s.io/kubernetes/pkg/apis/core/helper", + deps = [ + "//pkg/apis/core:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/selection:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/core/helper/qos:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go new file mode 100644 index 000000000000..ca397c8eb7ff --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go @@ -0,0 +1,644 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helper + +import ( + "encoding/json" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/apis/core" +) + +// IsHugePageResourceName returns true if the resource name has the huge page +// resource prefix. +func IsHugePageResourceName(name core.ResourceName) bool { + return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix) +} + +// IsQuotaHugePageResourceName returns true if the resource name has the quota +// related huge page resource prefix. +func IsQuotaHugePageResourceName(name core.ResourceName) bool { + return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix) || strings.HasPrefix(string(name), core.ResourceRequestsHugePagesPrefix) +} + +// HugePageResourceName returns a ResourceName with the canonical hugepage +// prefix prepended for the specified page size. The page size is converted +// to its canonical representation. +func HugePageResourceName(pageSize resource.Quantity) core.ResourceName { + return core.ResourceName(fmt.Sprintf("%s%s", core.ResourceHugePagesPrefix, pageSize.String())) +} + +// HugePageSizeFromResourceName returns the page size for the specified huge page +// resource name. If the specified input is not a valid huge page resource name +// an error is returned. +func HugePageSizeFromResourceName(name core.ResourceName) (resource.Quantity, error) { + if !IsHugePageResourceName(name) { + return resource.Quantity{}, fmt.Errorf("resource name: %s is not valid hugepage name", name) + } + pageSize := strings.TrimPrefix(string(name), core.ResourceHugePagesPrefix) + return resource.ParseQuantity(pageSize) +} + +// NonConvertibleFields iterates over the provided map and filters out all but +// any keys with the "non-convertible.kubernetes.io" prefix. +func NonConvertibleFields(annotations map[string]string) map[string]string { + nonConvertibleKeys := map[string]string{} + for key, value := range annotations { + if strings.HasPrefix(key, core.NonConvertibleAnnotationPrefix) { + nonConvertibleKeys[key] = value + } + } + return nonConvertibleKeys +} + +// Semantic can do semantic deep equality checks for core objects. +// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true +var Semantic = conversion.EqualitiesOrDie( + func(a, b resource.Quantity) bool { + // Ignore formatting, only care that numeric value stayed the same. + // TODO: if we decide it's important, it should be safe to start comparing the format. + // + // Uninitialized quantities are equivalent to 0 quantities. + return a.Cmp(b) == 0 + }, + func(a, b metav1.MicroTime) bool { + return a.UTC() == b.UTC() + }, + func(a, b metav1.Time) bool { + return a.UTC() == b.UTC() + }, + func(a, b labels.Selector) bool { + return a.String() == b.String() + }, + func(a, b fields.Selector) bool { + return a.String() == b.String() + }, +) + +var standardResourceQuotaScopes = sets.NewString( + string(core.ResourceQuotaScopeTerminating), + string(core.ResourceQuotaScopeNotTerminating), + string(core.ResourceQuotaScopeBestEffort), + string(core.ResourceQuotaScopeNotBestEffort), +) + +// IsStandardResourceQuotaScope returns true if the scope is a standard value +func IsStandardResourceQuotaScope(str string) bool { + return standardResourceQuotaScopes.Has(str) +} + +var podObjectCountQuotaResources = sets.NewString( + string(core.ResourcePods), +) + +var podComputeQuotaResources = sets.NewString( + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceLimitsCPU), + string(core.ResourceLimitsMemory), + string(core.ResourceRequestsCPU), + string(core.ResourceRequestsMemory), +) + +// IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope +func IsResourceQuotaScopeValidForResource(scope core.ResourceQuotaScope, resource string) bool { + switch scope { + case core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating, core.ResourceQuotaScopeNotBestEffort: + return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource) + case core.ResourceQuotaScopeBestEffort: + return podObjectCountQuotaResources.Has(resource) + default: + return true + } +} + +var standardContainerResources = sets.NewString( + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceEphemeralStorage), +) + +// IsStandardContainerResourceName returns true if the container can make a resource request +// for the specified resource +func IsStandardContainerResourceName(str string) bool { + return standardContainerResources.Has(str) || IsHugePageResourceName(core.ResourceName(str)) +} + +// IsExtendedResourceName returns true if the resource name is not in the +// default namespace. +func IsExtendedResourceName(name core.ResourceName) bool { + return !IsDefaultNamespaceResource(name) +} + +// IsDefaultNamespaceResource returns true if the resource name is in the +// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are +// implicitly in the kubernetes.io/ namespace. +func IsDefaultNamespaceResource(name core.ResourceName) bool { + return !strings.Contains(string(name), "/") || + strings.Contains(string(name), core.ResourceDefaultNamespacePrefix) +} + +var overcommitBlacklist = sets.NewString(string(core.ResourceNvidiaGPU)) + +// IsOvercommitAllowed returns true if the resource is in the default +// namespace and not blacklisted. +func IsOvercommitAllowed(name core.ResourceName) bool { + return IsDefaultNamespaceResource(name) && + !IsHugePageResourceName(name) && + !overcommitBlacklist.Has(string(name)) +} + +var standardLimitRangeTypes = sets.NewString( + string(core.LimitTypePod), + string(core.LimitTypeContainer), + string(core.LimitTypePersistentVolumeClaim), +) + +// IsStandardLimitRangeType returns true if the type is Pod or Container +func IsStandardLimitRangeType(str string) bool { + return standardLimitRangeTypes.Has(str) +} + +var standardQuotaResources = sets.NewString( + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceEphemeralStorage), + string(core.ResourceRequestsCPU), + string(core.ResourceRequestsMemory), + string(core.ResourceRequestsStorage), + string(core.ResourceRequestsEphemeralStorage), + string(core.ResourceLimitsCPU), + string(core.ResourceLimitsMemory), + string(core.ResourceLimitsEphemeralStorage), + string(core.ResourcePods), + string(core.ResourceQuotas), + string(core.ResourceServices), + string(core.ResourceReplicationControllers), + string(core.ResourceSecrets), + string(core.ResourcePersistentVolumeClaims), + string(core.ResourceConfigMaps), + string(core.ResourceServicesNodePorts), + string(core.ResourceServicesLoadBalancers), +) + +// IsStandardQuotaResourceName returns true if the resource is known to +// the quota tracking system +func IsStandardQuotaResourceName(str string) bool { + return standardQuotaResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str)) +} + +var standardResources = sets.NewString( + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceEphemeralStorage), + string(core.ResourceRequestsCPU), + string(core.ResourceRequestsMemory), + string(core.ResourceRequestsEphemeralStorage), + string(core.ResourceLimitsCPU), + string(core.ResourceLimitsMemory), + string(core.ResourceLimitsEphemeralStorage), + string(core.ResourcePods), + string(core.ResourceQuotas), + string(core.ResourceServices), + string(core.ResourceReplicationControllers), + string(core.ResourceSecrets), + string(core.ResourceConfigMaps), + string(core.ResourcePersistentVolumeClaims), + string(core.ResourceStorage), + string(core.ResourceRequestsStorage), + string(core.ResourceServicesNodePorts), + string(core.ResourceServicesLoadBalancers), +) + +// IsStandardResourceName returns true if the resource is known to the system +func IsStandardResourceName(str string) bool { + return standardResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str)) +} + +var integerResources = sets.NewString( + string(core.ResourcePods), + string(core.ResourceQuotas), + string(core.ResourceServices), + string(core.ResourceReplicationControllers), + string(core.ResourceSecrets), + string(core.ResourceConfigMaps), + string(core.ResourcePersistentVolumeClaims), + string(core.ResourceServicesNodePorts), + string(core.ResourceServicesLoadBalancers), +) + +// IsIntegerResourceName returns true if the resource is measured in integer values +func IsIntegerResourceName(str string) bool { + return integerResources.Has(str) || IsExtendedResourceName(core.ResourceName(str)) +} + +// Extended and HugePages resources +func IsScalarResourceName(name core.ResourceName) bool { + return IsExtendedResourceName(name) || IsHugePageResourceName(name) +} + +// this function aims to check if the service's ClusterIP is set or not +// the objective is not to perform validation here +func IsServiceIPSet(service *core.Service) bool { + return service.Spec.ClusterIP != core.ClusterIPNone && service.Spec.ClusterIP != "" +} + +// this function aims to check if the service's cluster IP is requested or not +func IsServiceIPRequested(service *core.Service) bool { + // ExternalName services are CNAME aliases to external ones. Ignore the IP. + if service.Spec.Type == core.ServiceTypeExternalName { + return false + } + return service.Spec.ClusterIP == "" +} + +var standardFinalizers = sets.NewString( + string(core.FinalizerKubernetes), + metav1.FinalizerOrphanDependents, + metav1.FinalizerDeleteDependents, +) + +// HasAnnotation returns a bool if passed in annotation exists +func HasAnnotation(obj core.ObjectMeta, ann string) bool { + _, found := obj.Annotations[ann] + return found +} + +// SetMetaDataAnnotation sets the annotation and value +func SetMetaDataAnnotation(obj *core.ObjectMeta, ann string, value string) { + if obj.Annotations == nil { + obj.Annotations = make(map[string]string) + } + obj.Annotations[ann] = value +} + +func IsStandardFinalizerName(str string) bool { + return standardFinalizers.Has(str) +} + +// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, +// only if they do not already exist +func AddToNodeAddresses(addresses *[]core.NodeAddress, addAddresses ...core.NodeAddress) { + for _, add := range addAddresses { + exists := false + for _, existing := range *addresses { + if existing.Address == add.Address && existing.Type == add.Type { + exists = true + break + } + } + if !exists { + *addresses = append(*addresses, add) + } + } +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusEqual(l, r *core.LoadBalancerStatus) bool { + return ingressSliceEqual(l.Ingress, r.Ingress) +} + +func ingressSliceEqual(lhs, rhs []core.LoadBalancerIngress) bool { + if len(lhs) != len(rhs) { + return false + } + for i := range lhs { + if !ingressEqual(&lhs[i], &rhs[i]) { + return false + } + } + return true +} + +func ingressEqual(lhs, rhs *core.LoadBalancerIngress) bool { + if lhs.IP != rhs.IP { + return false + } + if lhs.Hostname != rhs.Hostname { + return false + } + return true +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusDeepCopy(lb *core.LoadBalancerStatus) *core.LoadBalancerStatus { + c := &core.LoadBalancerStatus{} + c.Ingress = make([]core.LoadBalancerIngress, len(lb.Ingress)) + for i := range lb.Ingress { + c.Ingress[i] = lb.Ingress[i] + } + return c +} + +// GetAccessModesAsString returns a string representation of an array of access modes. +// modes, when present, are always in the same order: RWO,ROX,RWX. +func GetAccessModesAsString(modes []core.PersistentVolumeAccessMode) string { + modes = removeDuplicateAccessModes(modes) + modesStr := []string{} + if containsAccessMode(modes, core.ReadWriteOnce) { + modesStr = append(modesStr, "RWO") + } + if containsAccessMode(modes, core.ReadOnlyMany) { + modesStr = append(modesStr, "ROX") + } + if containsAccessMode(modes, core.ReadWriteMany) { + modesStr = append(modesStr, "RWX") + } + return strings.Join(modesStr, ",") +} + +// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString +func GetAccessModesFromString(modes string) []core.PersistentVolumeAccessMode { + strmodes := strings.Split(modes, ",") + accessModes := []core.PersistentVolumeAccessMode{} + for _, s := range strmodes { + s = strings.Trim(s, " ") + switch { + case s == "RWO": + accessModes = append(accessModes, core.ReadWriteOnce) + case s == "ROX": + accessModes = append(accessModes, core.ReadOnlyMany) + case s == "RWX": + accessModes = append(accessModes, core.ReadWriteMany) + } + } + return accessModes +} + +// removeDuplicateAccessModes returns an array of access modes without any duplicates +func removeDuplicateAccessModes(modes []core.PersistentVolumeAccessMode) []core.PersistentVolumeAccessMode { + accessModes := []core.PersistentVolumeAccessMode{} + for _, m := range modes { + if !containsAccessMode(accessModes, m) { + accessModes = append(accessModes, m) + } + } + return accessModes +} + +func containsAccessMode(modes []core.PersistentVolumeAccessMode, mode core.PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement core type into a struct that implements +// labels.Selector. +func NodeSelectorRequirementsAsSelector(nsm []core.NodeSelectorRequirement) (labels.Selector, error) { + if len(nsm) == 0 { + return labels.Nothing(), nil + } + selector := labels.NewSelector() + for _, expr := range nsm { + var op selection.Operator + switch expr.Operator { + case core.NodeSelectorOpIn: + op = selection.In + case core.NodeSelectorOpNotIn: + op = selection.NotIn + case core.NodeSelectorOpExists: + op = selection.Exists + case core.NodeSelectorOpDoesNotExist: + op = selection.DoesNotExist + case core.NodeSelectorOpGt: + op = selection.GreaterThan + case core.NodeSelectorOpLt: + op = selection.LessThan + default: + return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) + } + r, err := labels.NewRequirement(expr.Key, op, expr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + return selector, nil +} + +// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations +// and converts it to the []Toleration type in core. +func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]core.Toleration, error) { + var tolerations []core.Toleration + if len(annotations) > 0 && annotations[core.TolerationsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[core.TolerationsAnnotationKey]), &tolerations) + if err != nil { + return tolerations, err + } + } + return tolerations, nil +} + +// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. +// Returns true if something was updated, false otherwise. +func AddOrUpdateTolerationInPod(pod *core.Pod, toleration *core.Toleration) bool { + podTolerations := pod.Spec.Tolerations + + var newTolerations []core.Toleration + updated := false + for i := range podTolerations { + if toleration.MatchToleration(&podTolerations[i]) { + if Semantic.DeepEqual(toleration, podTolerations[i]) { + return false + } + newTolerations = append(newTolerations, *toleration) + updated = true + continue + } + + newTolerations = append(newTolerations, podTolerations[i]) + } + + if !updated { + newTolerations = append(newTolerations, *toleration) + } + + pod.Spec.Tolerations = newTolerations + return true +} + +// TolerationToleratesTaint checks if the toleration tolerates the taint. +func TolerationToleratesTaint(toleration *core.Toleration, taint *core.Taint) bool { + if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect { + return false + } + + if toleration.Key != taint.Key { + return false + } + // TODO: Use proper defaulting when Toleration becomes a field of PodSpec + if (len(toleration.Operator) == 0 || toleration.Operator == core.TolerationOpEqual) && toleration.Value == taint.Value { + return true + } + if toleration.Operator == core.TolerationOpExists { + return true + } + return false +} + +// TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations. +func TaintToleratedByTolerations(taint *core.Taint, tolerations []core.Toleration) bool { + tolerated := false + for i := range tolerations { + if TolerationToleratesTaint(&tolerations[i], taint) { + tolerated = true + break + } + } + return tolerated +} + +// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations +// and converts it to the []Taint type in core. +func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]core.Taint, error) { + var taints []core.Taint + if len(annotations) > 0 && annotations[core.TaintsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[core.TaintsAnnotationKey]), &taints) + if err != nil { + return []core.Taint{}, err + } + } + return taints, nil +} + +// SysctlsFromPodAnnotations parses the sysctl annotations into a slice of safe Sysctls +// and a slice of unsafe Sysctls. This is only a convenience wrapper around +// SysctlsFromPodAnnotation. +func SysctlsFromPodAnnotations(a map[string]string) ([]core.Sysctl, []core.Sysctl, error) { + safe, err := SysctlsFromPodAnnotation(a[core.SysctlsPodAnnotationKey]) + if err != nil { + return nil, nil, err + } + unsafe, err := SysctlsFromPodAnnotation(a[core.UnsafeSysctlsPodAnnotationKey]) + if err != nil { + return nil, nil, err + } + + return safe, unsafe, nil +} + +// SysctlsFromPodAnnotation parses an annotation value into a slice of Sysctls. +func SysctlsFromPodAnnotation(annotation string) ([]core.Sysctl, error) { + if len(annotation) == 0 { + return nil, nil + } + + kvs := strings.Split(annotation, ",") + sysctls := make([]core.Sysctl, len(kvs)) + for i, kv := range kvs { + cs := strings.Split(kv, "=") + if len(cs) != 2 || len(cs[0]) == 0 { + return nil, fmt.Errorf("sysctl %q not of the format sysctl_name=value", kv) + } + sysctls[i].Name = cs[0] + sysctls[i].Value = cs[1] + } + return sysctls, nil +} + +// PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls. +func PodAnnotationsFromSysctls(sysctls []core.Sysctl) string { + if len(sysctls) == 0 { + return "" + } + + kvs := make([]string, len(sysctls)) + for i := range sysctls { + kvs[i] = fmt.Sprintf("%s=%s", sysctls[i].Name, sysctls[i].Value) + } + return strings.Join(kvs, ",") +} + +// GetPersistentVolumeClass returns StorageClassName. +func GetPersistentVolumeClass(volume *core.PersistentVolume) string { + // Use beta annotation first + if class, found := volume.Annotations[core.BetaStorageClassAnnotation]; found { + return class + } + + return volume.Spec.StorageClassName +} + +// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was +// requested, it returns "". +func GetPersistentVolumeClaimClass(claim *core.PersistentVolumeClaim) string { + // Use beta annotation first + if class, found := claim.Annotations[core.BetaStorageClassAnnotation]; found { + return class + } + + if claim.Spec.StorageClassName != nil { + return *claim.Spec.StorageClassName + } + + return "" +} + +// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. +func PersistentVolumeClaimHasClass(claim *core.PersistentVolumeClaim) bool { + // Use beta annotation first + if _, found := claim.Annotations[core.BetaStorageClassAnnotation]; found { + return true + } + + if claim.Spec.StorageClassName != nil { + return true + } + + return false +} + +// GetStorageNodeAffinityFromAnnotation gets the json serialized data from PersistentVolume.Annotations +// and converts it to the NodeAffinity type in core. +// TODO: update when storage node affinity graduates to beta +func GetStorageNodeAffinityFromAnnotation(annotations map[string]string) (*core.NodeAffinity, error) { + if len(annotations) > 0 && annotations[core.AlphaStorageNodeAffinityAnnotation] != "" { + var affinity core.NodeAffinity + err := json.Unmarshal([]byte(annotations[core.AlphaStorageNodeAffinityAnnotation]), &affinity) + if err != nil { + return nil, err + } + return &affinity, nil + } + return nil, nil +} + +// Converts NodeAffinity type to Alpha annotation for use in PersistentVolumes +// TODO: update when storage node affinity graduates to beta +func StorageNodeAffinityToAlphaAnnotation(annotations map[string]string, affinity *core.NodeAffinity) error { + if affinity == nil { + return nil + } + + json, err := json.Marshal(*affinity) + if err != nil { + return err + } + annotations[core.AlphaStorageNodeAffinityAnnotation] = string(json) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/BUILD new file mode 100644 index 000000000000..a029fdb5b8f9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/BUILD @@ -0,0 +1,31 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["qos.go"], + importpath = "k8s.io/kubernetes/pkg/apis/core/helper/qos", + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/qos.go b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/qos.go new file mode 100644 index 000000000000..18414322c820 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/qos/qos.go @@ -0,0 +1,97 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// NOTE: DO NOT use those helper functions through client-go, the +// package path will be changed in the future. +package qos + +import ( + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" +) + +func isSupportedQoSComputeResource(name core.ResourceName) bool { + supportedQoSComputeResources := sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory)) + return supportedQoSComputeResources.Has(string(name)) || helper.IsHugePageResourceName(name) +} + +// GetPodQOS returns the QoS class of a pod. +// A pod is besteffort if none of its containers have specified any requests or limits. +// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. +// A pod is burstable if limits and requests do not match across all containers. +func GetPodQOS(pod *core.Pod) core.PodQOSClass { + requests := core.ResourceList{} + limits := core.ResourceList{} + zeroQuantity := resource.MustParse("0") + isGuaranteed := true + for _, container := range pod.Spec.Containers { + // process requests + for name, quantity := range container.Resources.Requests { + if !isSupportedQoSComputeResource(name) { + continue + } + if quantity.Cmp(zeroQuantity) == 1 { + delta := quantity.Copy() + if _, exists := requests[name]; !exists { + requests[name] = *delta + } else { + delta.Add(requests[name]) + requests[name] = *delta + } + } + } + // process limits + qosLimitsFound := sets.NewString() + for name, quantity := range container.Resources.Limits { + if !isSupportedQoSComputeResource(name) { + continue + } + if quantity.Cmp(zeroQuantity) == 1 { + qosLimitsFound.Insert(string(name)) + delta := quantity.Copy() + if _, exists := limits[name]; !exists { + limits[name] = *delta + } else { + delta.Add(limits[name]) + limits[name] = *delta + } + } + } + + if !qosLimitsFound.HasAll(string(core.ResourceMemory), string(core.ResourceCPU)) { + isGuaranteed = false + } + } + if len(requests) == 0 && len(limits) == 0 { + return core.PodQOSBestEffort + } + // Check is requests match limits for all resources. + if isGuaranteed { + for name, req := range requests { + if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 { + isGuaranteed = false + break + } + } + } + if isGuaranteed && + len(requests) == len(limits) { + return core.PodQOSGuaranteed + } + return core.PodQOSBurstable +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/core/install/BUILD new file mode 100644 index 000000000000..9ed882df9943 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/install/BUILD @@ -0,0 +1,50 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/core/install", + deps = [ + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["install_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/core/install", + library = ":go_default_library", + deps = [ + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/api/install/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/core/install/OWNERS similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/api/install/OWNERS rename to vendor/k8s.io/kubernetes/pkg/apis/core/install/OWNERS diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/core/install/install.go new file mode 100644 index 000000000000..cae514ec7b08 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/install/install.go @@ -0,0 +1,67 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the v1 monolithic api, making it available as an +// option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/v1" +) + +func init() { + Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: core.GroupName, + VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version}, + AddInternalObjectsToScheme: core.AddToScheme, + RootScopedKinds: sets.NewString( + "Node", + "Namespace", + "PersistentVolume", + "ComponentStatus", + ), + IgnoredKinds: sets.NewString( + "ListOptions", + "DeleteOptions", + "Status", + "PodLogOptions", + "PodExecOptions", + "PodAttachOptions", + "PodPortForwardOptions", + "PodProxyOptions", + "NodeProxyOptions", + "ServiceProxyOptions", + ), + }, + announced.VersionToSchemeFunc{ + v1.SchemeGroupVersion.Version: v1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/json.go b/vendor/k8s.io/kubernetes/pkg/apis/core/json.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/api/json.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/json.go index 3a6e04c182fd..937cd056c010 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/json.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/json.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package api +package core import "encoding/json" diff --git a/vendor/k8s.io/kubernetes/pkg/api/objectreference.go b/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/api/objectreference.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go index b36ecab27ff6..55b27f30b6fe 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/objectreference.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go @@ -17,7 +17,7 @@ limitations under the License. //TODO: consider making these methods functions, because we don't want helper //functions in the k8s.io/api repo. -package api +package core import ( "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/pods/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/core/pods/BUILD new file mode 100644 index 000000000000..b78e256e7c14 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/pods/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["helpers.go"], + importpath = "k8s.io/kubernetes/pkg/apis/core/pods", + visibility = ["//visibility:public"], + deps = ["//pkg/fieldpath:go_default_library"], +) + +go_test( + name = "go_default_test", + srcs = ["helpers_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/core/pods", + library = ":go_default_library", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go new file mode 100644 index 000000000000..cf199cee7375 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go @@ -0,0 +1,63 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pods + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/fieldpath" +) + +// ConvertDownwardAPIFieldLabel converts the specified downward API field label +// and its value in the pod of the specified version to the internal version, +// and returns the converted label and value. This function returns an error if +// the conversion fails. +func ConvertDownwardAPIFieldLabel(version, label, value string) (string, string, error) { + if version != "v1" { + return "", "", fmt.Errorf("unsupported pod version: %s", version) + } + + if path, _, ok := fieldpath.SplitMaybeSubscriptedPath(label); ok { + switch path { + case "metadata.annotations", "metadata.labels": + return label, value, nil + default: + return "", "", fmt.Errorf("field label does not support subscript: %s", label) + } + } + + switch label { + case "metadata.annotations", + "metadata.labels", + "metadata.name", + "metadata.namespace", + "metadata.uid", + "spec.nodeName", + "spec.restartPolicy", + "spec.serviceAccountName", + "spec.schedulerName", + "status.phase", + "status.hostIP", + "status.podIP": + return label, value, nil + // This is for backwards compatibility with old v1 clients which send spec.host + case "spec.host": + return "spec.nodeName", value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/register.go b/vendor/k8s.io/kubernetes/pkg/apis/core/register.go new file mode 100644 index 000000000000..2784cbe157e7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/register.go @@ -0,0 +1,99 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { + return err + } + scheme.AddKnownTypes(SchemeGroupVersion, + &Pod{}, + &PodList{}, + &PodStatusResult{}, + &PodTemplate{}, + &PodTemplateList{}, + &ReplicationControllerList{}, + &ReplicationController{}, + &ServiceList{}, + &Service{}, + &ServiceProxyOptions{}, + &NodeList{}, + &Node{}, + &NodeConfigSource{}, + &NodeProxyOptions{}, + &Endpoints{}, + &EndpointsList{}, + &Binding{}, + &Event{}, + &EventList{}, + &List{}, + &LimitRange{}, + &LimitRangeList{}, + &ResourceQuota{}, + &ResourceQuotaList{}, + &Namespace{}, + &NamespaceList{}, + &ServiceAccount{}, + &ServiceAccountList{}, + &Secret{}, + &SecretList{}, + &PersistentVolume{}, + &PersistentVolumeList{}, + &PersistentVolumeClaim{}, + &PersistentVolumeClaimList{}, + &PodAttachOptions{}, + &PodLogOptions{}, + &PodExecOptions{}, + &PodPortForwardOptions{}, + &PodProxyOptions{}, + &ComponentStatus{}, + &ComponentStatusList{}, + &SerializedReference{}, + &RangeAllocation{}, + &ConfigMap{}, + &ConfigMapList{}, + ) + + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource.go b/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/api/resource.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/resource.go index ce1747d8e6be..1910cd921d9a 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/resource.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package api +package core import ( "k8s.io/apimachinery/pkg/api/resource" diff --git a/vendor/k8s.io/kubernetes/pkg/api/taint.go b/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/api/taint.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/taint.go index 173e4e601614..ae1feb74d7e4 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/taint.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go @@ -17,7 +17,7 @@ limitations under the License. //TODO: consider making these methods functions, because we don't want helper //functions in the k8s.io/api repo. -package api +package core import "fmt" diff --git a/vendor/k8s.io/kubernetes/pkg/api/toleration.go b/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/api/toleration.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go index edb73b74e1a6..1dfbc9f1bb2a 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/toleration.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go @@ -17,7 +17,7 @@ limitations under the License. //TODO: consider making these methods functions, because we don't want helper //functions in the k8s.io/api repo. -package api +package core // MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , // if the two tolerations have same combination, regard as they match. diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go new file mode 100644 index 000000000000..b6b570b06e4c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go @@ -0,0 +1,4551 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// Common string formats +// --------------------- +// Many fields in this API have formatting requirements. The commonly used +// formats are defined here. +// +// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier" +// in the C language. This is captured by the following regex: +// [A-Za-z_][A-Za-z0-9_]* +// This defines the format, but not the length restriction, which should be +// specified at the definition of any field of this type. +// +// DNS_LABEL: This is a string, no more than 63 characters long, that conforms +// to the definition of a "label" in RFCs 1035 and 1123. This is captured +// by the following regex: +// [a-z0-9]([-a-z0-9]*[a-z0-9])? +// +// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms +// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured +// by the following regex: +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// or more simply: +// DNS_LABEL(\.DNS_LABEL)* +// +// IANA_SVC_NAME: This is a string, no more than 15 characters long, that +// conforms to the definition of IANA service name in RFC 6335. +// It must contains at least one letter [a-z] and it must contains only [a-z0-9-]. +// Hypens ('-') cannot be leading or trailing character of the string +// and cannot be adjacent to other hyphens. + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. +type ObjectMeta struct { + // Name is unique within a namespace. Name is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // +optional + Name string + + // GenerateName indicates that the name should be made unique by the server prior to persisting + // it. A non-empty value for the field indicates the name will be made unique (and the name + // returned to the client will be different than the name passed). The value of this field will + // be combined with a unique suffix on the server if the Name field has not been provided. + // The provided value must be valid within the rules for Name, and may be truncated by the length + // of the suffix required to make the value unique on the server. + // + // If this field is specified, and Name is not present, the server will NOT return a 409 if the + // generated name exists - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // +optional + GenerateName string + + // Namespace defines the space within which name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // +optional + Namespace string + + // SelfLink is a URL representing this object. + // +optional + SelfLink string + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // +optional + UID types.UID + + // An opaque value that represents the version of this resource. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and values may only be valid for a particular + // resource or set of resources. Only servers will generate resource versions. + // +optional + ResourceVersion string + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + Generation int64 + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // +optional + CreationTimestamp metav1.Time + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field. Once set, + // this value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard + // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the + // API. In the presence of network partitions, this object may still exist after this + // timestamp, until an administrator or automated process can determine the resource is + // fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + DeletionTimestamp *metav1.Time + + // DeletionGracePeriodSeconds records the graceful deletion value set when graceful deletion + // was requested. Represents the most recent grace period, and may only be shortened once set. + // +optional + DeletionGracePeriodSeconds *int64 + + // Labels are key value pairs that may be used to scope and select individual resources. + // Label keys are of the form: + // label-key ::= prefixed-name | name + // prefixed-name ::= prefix '/' name + // prefix ::= DNS_SUBDOMAIN + // name ::= DNS_LABEL + // The prefix is optional. If the prefix is not specified, the key is assumed to be private + // to the user. Other system components that wish to use labels must specify a prefix. The + // "kubernetes.io/" prefix is reserved for use by kubernetes components. + // +optional + Labels map[string]string + + // Annotations are unstructured key value data stored with a resource that may be set by + // external tooling. They are not queryable and should be preserved when modifying + // objects. Annotation keys have the same formatting restrictions as Label keys. See the + // comments on Labels for details. + // +optional + Annotations map[string]string + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + OwnerReferences []metav1.OwnerReference + + // An initializer is a controller which enforces some system invariant at object creation time. + // This field is a list of initializers that have not yet acted on this object. If nil or empty, + // this object has been completely initialized. Otherwise, the object is considered uninitialized + // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to + // observe uninitialized objects. + // + // When an object is created, the system will populate this list with the current set of initializers. + // Only privileged users may set or modify this list. Once it is empty, it may not be modified further + // by any user. + Initializers *metav1.Initializers + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + Finalizers []string + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + ClusterName string +} + +const ( + // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients + NamespaceDefault string = "default" + // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces + NamespaceAll string = "" + // NamespaceNone is the argument for a context when there is no namespace. + NamespaceNone string = "" + // NamespaceSystem is the system namespace where we place system components. + NamespaceSystem string = "kube-system" + // NamespacePublic is the namespace where we place public info (ConfigMaps) + NamespacePublic string = "kube-public" + // TerminationMessagePathDefault means the default path to capture the application termination message running in a container + TerminationMessagePathDefault string = "/dev/termination-log" +) + +// Volume represents a named volume in a pod that may be accessed by any containers in the pod. +type Volume struct { + // Required: This must be a DNS_LABEL. Each volume in a pod must have + // a unique name. + Name string + // The VolumeSource represents the location and type of a volume to mount. + // This is optional for now. If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + // +optional + VolumeSource +} + +// VolumeSource represents the source location of a volume to mount. +// Only one of its members may be specified. +type VolumeSource struct { + // HostPath represents file or directory on the host machine that is + // directly exposed to the container. This is generally used for system + // agents or other privileged things that are allowed to see the host + // machine. Most containers will NOT need this. + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + HostPath *HostPathVolumeSource + // EmptyDir represents a temporary directory that shares a pod's lifetime. + // +optional + EmptyDir *EmptyDirVolumeSource + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource + // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource + // GitRepo represents a git repository at a particular revision. + // +optional + GitRepo *GitRepoVolumeSource + // Secret represents a secret that should populate this volume. + // +optional + Secret *SecretVolumeSource + // NFS represents an NFS mount on the host that shares a pod's lifetime + // +optional + NFS *NFSVolumeSource + // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + ISCSI *ISCSIVolumeSource + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime + // +optional + Glusterfs *GlusterfsVolumeSource + // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace + // +optional + PersistentVolumeClaim *PersistentVolumeClaimVolumeSource + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // +optional + RBD *RBDVolumeSource + + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource + + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. + // +optional + FlexVolume *FlexVolumeSource + + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // +optional + Cinder *CinderVolumeSource + + // CephFS represents a Cephfs mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource + + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource + + // DownwardAPI represents metadata about the pod that should populate this volume + // +optional + DownwardAPI *DownwardAPIVolumeSource + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource + // ConfigMap represents a configMap that should populate this volume + // +optional + ConfigMap *ConfigMapVolumeSource + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource + // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // Items for all in one resources secrets, configmaps, and downward API + Projected *ProjectedVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // +optional + StorageOS *StorageOSVolumeSource +} + +// Similar to VolumeSource but meant for the administrator who creates PVs. +// Exactly one of its members must be set. +type PersistentVolumeSource struct { + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource + // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource + // HostPath represents a directory on the host. + // Provisioned by a developer or tester. + // This is useful for single-node development and testing only! + // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. + // +optional + HostPath *HostPathVolumeSource + // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod + // +optional + Glusterfs *GlusterfsVolumeSource + // NFS represents an NFS mount on the host that shares a pod's lifetime + // +optional + NFS *NFSVolumeSource + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // +optional + RBD *RBDPersistentVolumeSource + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource + // ISCSIPersistentVolumeSource represents an ISCSI resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + ISCSI *ISCSIPersistentVolumeSource + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. + // +optional + FlexVolume *FlexVolumeSource + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // +optional + Cinder *CinderVolumeSource + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSPersistentVolumeSource + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFilePersistentVolumeSource + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource + // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOPersistentVolumeSource + // Local represents directly-attached storage with node affinity + // +optional + Local *LocalVolumeSource + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // +optional + StorageOS *StorageOSPersistentVolumeSource + // CSI (Container Storage Interface) represents storage that handled by an external CSI driver + // +optional + CSI *CSIPersistentVolumeSource +} + +type PersistentVolumeClaimVolumeSource struct { + // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume + ClaimName string + // Optional: Defaults to false (read/write). ReadOnly here + // will force the ReadOnly setting in VolumeMounts + // +optional + ReadOnly bool +} + +const ( + // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. + // It's deprecated and will be removed in a future release. (#51440) + BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" + + // MountOptionAnnotation defines mount option annotation used in PVs + MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options" + + // AlphaStorageNodeAffinityAnnotation defines node affinity policies for a PersistentVolume. + // Value is a string of the json representation of type NodeAffinity + AlphaStorageNodeAffinityAnnotation = "volume.alpha.kubernetes.io/node-affinity" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PersistentVolume struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + //Spec defines a persistent volume owned by the cluster + // +optional + Spec PersistentVolumeSpec + + // Status represents the current information about persistent volume. + // +optional + Status PersistentVolumeStatus +} + +type PersistentVolumeSpec struct { + // Resources represents the actual resources of the volume + Capacity ResourceList + // Source represents the location and type of a volume to mount. + PersistentVolumeSource + // AccessModes contains all ways the volume can be mounted + // +optional + AccessModes []PersistentVolumeAccessMode + // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // ClaimRef is expected to be non-nil when bound. + // claim.VolumeName is the authoritative bind between PV and PVC. + // When set to non-nil value, PVC.Spec.Selector of the referenced PVC is + // ignored, i.e. labels of this PV do not need to match PVC selector. + // +optional + ClaimRef *ObjectReference + // Optional: what happens to a persistent volume when released from its claim. + // +optional + PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + StorageClassName string + // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will + // simply fail if one is invalid. + // +optional + MountOptions []string + // volumeMode defines if a volume is intended to be used with a formatted filesystem + // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is an alpha feature and may change in the future. + // +optional + VolumeMode *PersistentVolumeMode +} + +// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes +type PersistentVolumeReclaimPolicy string + +const ( + // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. + // The volume plugin must support Recycling. + PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" + // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. + // The volume plugin must support Deletion. + PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" + // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. + // The default policy is Retain. + PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" +) + +// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. +type PersistentVolumeMode string + +const ( + // PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device. + PersistentVolumeBlock PersistentVolumeMode = "Block" + // PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem. + PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem" +) + +type PersistentVolumeStatus struct { + // Phase indicates if a volume is available, bound to a claim, or released by a claim + // +optional + Phase PersistentVolumePhase + // A human-readable message indicating details about why the volume is in this state. + // +optional + Message string + // Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI + // +optional + Reason string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PersistentVolumeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PersistentVolume +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PersistentVolumeClaim is a user's request for and claim to a persistent volume +type PersistentVolumeClaim struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the volume requested by a pod author + // +optional + Spec PersistentVolumeClaimSpec + + // Status represents the current information about a claim + // +optional + Status PersistentVolumeClaimStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PersistentVolumeClaimList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PersistentVolumeClaim +} + +// PersistentVolumeClaimSpec describes the common attributes of storage devices +// and allows a Source for provider-specific attributes +type PersistentVolumeClaimSpec struct { + // Contains the types of access modes required + // +optional + AccessModes []PersistentVolumeAccessMode + // A label query over volumes to consider for binding. This selector is + // ignored when VolumeName is set + // +optional + Selector *metav1.LabelSelector + // Resources represents the minimum resources required + // +optional + Resources ResourceRequirements + // VolumeName is the binding reference to the PersistentVolume backing this + // claim. When set to non-empty value Selector is not evaluated + // +optional + VolumeName string + // Name of the StorageClass required by the claim. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 + // +optional + StorageClassName *string + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // This is an alpha feature and may change in the future. + // +optional + VolumeMode *PersistentVolumeMode +} + +type PersistentVolumeClaimConditionType string + +// These are valid conditions of Pvc +const ( + // An user trigger resize of pvc has been started + PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" +) + +type PersistentVolumeClaimCondition struct { + Type PersistentVolumeClaimConditionType + Status ConditionStatus + // +optional + LastProbeTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +type PersistentVolumeClaimStatus struct { + // Phase represents the current phase of PersistentVolumeClaim + // +optional + Phase PersistentVolumeClaimPhase + // AccessModes contains all ways the volume backing the PVC can be mounted + // +optional + AccessModes []PersistentVolumeAccessMode + // Represents the actual resources of the underlying volume + // +optional + Capacity ResourceList + // +optional + Conditions []PersistentVolumeClaimCondition +} + +type PersistentVolumeAccessMode string + +const ( + // can be mounted read/write mode to exactly 1 host + ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" + // can be mounted in read-only mode to many hosts + ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" + // can be mounted in read/write mode to many hosts + ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" +) + +type PersistentVolumePhase string + +const ( + // used for PersistentVolumes that are not available + VolumePending PersistentVolumePhase = "Pending" + // used for PersistentVolumes that are not yet bound + // Available volumes are held by the binder and matched to PersistentVolumeClaims + VolumeAvailable PersistentVolumePhase = "Available" + // used for PersistentVolumes that are bound + VolumeBound PersistentVolumePhase = "Bound" + // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted + // released volumes must be recycled before becoming available again + // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource + VolumeReleased PersistentVolumePhase = "Released" + // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim + VolumeFailed PersistentVolumePhase = "Failed" +) + +type PersistentVolumeClaimPhase string + +const ( + // used for PersistentVolumeClaims that are not yet bound + ClaimPending PersistentVolumeClaimPhase = "Pending" + // used for PersistentVolumeClaims that are bound + ClaimBound PersistentVolumeClaimPhase = "Bound" + // used for PersistentVolumeClaims that lost their underlying + // PersistentVolume. The claim was bound to a PersistentVolume and this + // volume does not exist any longer and all data on it was lost. + ClaimLost PersistentVolumeClaimPhase = "Lost" +) + +type HostPathType string + +const ( + // For backwards compatible, leave it empty if unset + HostPathUnset HostPathType = "" + // If nothing exists at the given path, an empty directory will be created there + // as needed with file mode 0755, having the same group and ownership with Kubelet. + HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate" + // A directory must exist at the given path + HostPathDirectory HostPathType = "Directory" + // If nothing exists at the given path, an empty file will be created there + // as needed with file mode 0644, having the same group and ownership with Kubelet. + HostPathFileOrCreate HostPathType = "FileOrCreate" + // A file must exist at the given path + HostPathFile HostPathType = "File" + // A UNIX socket must exist at the given path + HostPathSocket HostPathType = "Socket" + // A character device must exist at the given path + HostPathCharDev HostPathType = "CharDevice" + // A block device must exist at the given path + HostPathBlockDev HostPathType = "BlockDevice" +) + +// Represents a host path mapped into a pod. +// Host path volumes do not support ownership management or SELinux relabeling. +type HostPathVolumeSource struct { + // If the path is a symlink, it will follow the link to the real path. + Path string + // Defaults to "" + Type *HostPathType +} + +// Represents an empty directory for a pod. +// Empty directory volumes support ownership management and SELinux relabeling. +type EmptyDirVolumeSource struct { + // TODO: Longer term we want to represent the selection of underlying + // media more like a scheduling problem - user says what traits they + // need, we give them a backing store that satisfies that. For now + // this will cover the most common needs. + // Optional: what type of storage medium should back this directory. + // The default is "" which means to use the node's default medium. + // +optional + Medium StorageMedium + // Total amount of local storage required for this EmptyDir volume. + // The size limit is also applicable for memory medium. + // The maximum usage on memory medium EmptyDir would be the minimum value between + // the SizeLimit specified here and the sum of memory limits of all containers in a pod. + // The default is nil which means that the limit is undefined. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + SizeLimit *resource.Quantity +} + +// StorageMedium defines ways that storage can be allocated to a volume. +type StorageMedium string + +const ( + StorageMediumDefault StorageMedium = "" // use whatever the default is for the node + StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) + StorageMediumHugePages StorageMedium = "HugePages" // use hugepages +) + +// Protocol defines network protocols supported for things like container ports. +type Protocol string + +const ( + // ProtocolTCP is the TCP protocol. + ProtocolTCP Protocol = "TCP" + // ProtocolUDP is the UDP protocol. + ProtocolUDP Protocol = "UDP" +) + +// Represents a Persistent Disk resource in Google Compute Engine. +// +// A GCE PD must exist before mounting to a container. The disk must +// also be in the same GCE project and zone as the kubelet. A GCE PD +// can only be mounted as read/write once or read-only many times. GCE +// PDs support ownership management and SELinux relabeling. +type GCEPersistentDiskVolumeSource struct { + // Unique name of the PD resource. Used to identify the disk in GCE + PDName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Partition on the disk to mount. + // If omitted, kubelet will attempt to mount the device name. + // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. + // +optional + Partition int32 + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIVolumeSource struct { + // Required: iSCSI target portal + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + TargetPortal string + // Required: target iSCSI Qualified Name + // +optional + IQN string + // Required: iSCSI target lun number + // +optional + Lun int32 + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + ISCSIInterface string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: list of iSCSI target portal ips for high availability. + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + Portals []string + // Optional: whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool + // Optional: whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool + // Optional: CHAP secret for iSCSI target and initiator authentication. + // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true + // +optional + SecretRef *LocalObjectReference + // Optional: Custom initiator name per volume. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string +} + +// ISCSIPersistentVolumeSource represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIPersistentVolumeSource struct { + // Required: iSCSI target portal + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + TargetPortal string + // Required: target iSCSI Qualified Name + // +optional + IQN string + // Required: iSCSI target lun number + // +optional + Lun int32 + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + ISCSIInterface string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: list of iSCSI target portal ips for high availability. + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + Portals []string + // Optional: whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool + // Optional: whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool + // Optional: CHAP secret for iSCSI target and initiator authentication. + // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true + // +optional + SecretRef *SecretReference + // Optional: Custom initiator name per volume. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string +} + +// Represents a Fibre Channel volume. +// Fibre Channel volumes can only be mounted as read/write once. +// Fibre Channel volumes support ownership management and SELinux relabeling. +type FCVolumeSource struct { + // Optional: FC target worldwide names (WWNs) + // +optional + TargetWWNs []string + // Optional: FC target lun number + // +optional + Lun *int32 + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: FC volume World Wide Identifiers (WWIDs) + // Either WWIDs or TargetWWNs and Lun must be set, but not both simultaneously. + // +optional + WWIDs []string +} + +// FlexVolume represents a generic volume resource that is +// provisioned/attached using an exec based plugin. +type FlexVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: Extra driver options if any. + // +optional + Options map[string]string +} + +// Represents a Persistent Disk resource in AWS. +// +// An AWS EBS disk must exist before mounting to a container. The disk +// must also be in the same AWS zone as the kubelet. An AWS EBS disk +// can only be mounted as read/write once. AWS EBS volumes support +// ownership management and SELinux relabeling. +type AWSElasticBlockStoreVolumeSource struct { + // Unique id of the persistent disk resource. Used to identify the disk in AWS + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Partition on the disk to mount. + // If omitted, kubelet will attempt to mount the device name. + // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. + // +optional + Partition int32 + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a volume that is populated with the contents of a git repository. +// Git repo volumes do not support ownership management. +// Git repo volumes support SELinux relabeling. +type GitRepoVolumeSource struct { + // Repository URL + Repository string + // Commit hash, this is optional + // +optional + Revision string + // Clone target, this is optional + // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + // git repository. Otherwise, if specified, the volume will contain the git repository in + // the subdirectory with the given name. + // +optional + Directory string + // TODO: Consider credentials here. +} + +// Adapts a Secret into a volume. +// +// The contents of the target Secret's Data field will be presented in a volume +// as files using the keys in the Data field as the file names. +// Secret volumes support ownership management and SELinux relabeling. +type SecretVolumeSource struct { + // Name of the secret in the pod's namespace to use. + // +optional + SecretName string + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +type SecretProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Represents an NFS mount that lasts the lifetime of a pod. +// NFS volumes do not support ownership management or SELinux relabeling. +type NFSVolumeSource struct { + // Server is the hostname or IP address of the NFS server + Server string + + // Path is the exported NFS share + Path string + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the NFS export to be mounted with read-only permissions + // +optional + ReadOnly bool +} + +// Represents a Quobyte mount that lasts the lifetime of a pod. +// Quobyte volumes do not support ownership management or SELinux relabeling. +type QuobyteVolumeSource struct { + // Registry represents a single or multiple Quobyte Registry services + // specified as a string as host:port pair (multiple entries are separated with commas) + // which acts as the central registry for volumes + Registry string + + // Volume is a string that references an already created Quobyte volume by name. + Volume string + + // Defaults to false (read/write). ReadOnly here will force + // the Quobyte to be mounted with read-only permissions + // +optional + ReadOnly bool + + // User to map volume access to + // Defaults to the root user + // +optional + User string + + // Group to map volume access to + // Default is no group + // +optional + Group string +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsVolumeSource struct { + // Required: EndpointsName is the endpoint name that details Glusterfs topology + EndpointsName string + + // Required: Path is the Glusterfs volume path + Path string + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the Glusterfs to be mounted with read-only permissions + // +optional + ReadOnly bool +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDVolumeSource struct { + // Required: CephMonitors is a collection of Ceph monitors + CephMonitors []string + // Required: RBDImage is the rados image name + RBDImage string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: RadosPool is the rados pool name,default is rbd + // +optional + RBDPool string + // Optional: RBDUser is the rados user name, default is admin + // +optional + RadosUser string + // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring + // +optional + Keyring string + // Optional: SecretRef is name of the authentication secret for RBDUser, default is nil. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDPersistentVolumeSource struct { + // Required: CephMonitors is a collection of Ceph monitors + CephMonitors []string + // Required: RBDImage is the rados image name + RBDImage string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: RadosPool is the rados pool name,default is rbd + // +optional + RBDPool string + // Optional: RBDUser is the rados user name, default is admin + // +optional + RadosUser string + // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring + // +optional + Keyring string + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // +optional + SecretRef *SecretReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a cinder volume resource in Openstack. A Cinder volume +// must exist before mounting to a container. The volume must also be +// in the same region as the kubelet. Cinder volumes support ownership +// management and SELinux relabeling. +type CinderVolumeSource struct { + // Unique id of the volume used to identify the cinder volume + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + Monitors []string + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string + // Optional: User is the rados user name, default is admin + // +optional + User string + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // +optional + SecretFile string + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// SecretReference represents a Secret Reference. It has enough information to retrieve secret +// in any namespace +type SecretReference struct { + // Name is unique within a namespace to reference a secret resource. + // +optional + Name string + // Namespace defines the space within which the secret name must be unique. + // +optional + Namespace string +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSPersistentVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + Monitors []string + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string + // Optional: User is the rados user name, default is admin + // +optional + User string + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // +optional + SecretFile string + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // +optional + SecretRef *SecretReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Flocker volume mounted by the Flocker agent. +// One and only one of datasetName and datasetUUID should be set. +// Flocker volumes do not support ownership management or SELinux relabeling. +type FlockerVolumeSource struct { + // Name of the dataset stored as metadata -> name on the dataset for Flocker + // should be considered as deprecated + // +optional + DatasetName string + // UUID of the dataset. This is unique identifier of a Flocker dataset + // +optional + DatasetUUID string +} + +// Represents a volume containing downward API info. +// Downward API volumes support ownership management and SELinux relabeling. +type DownwardAPIVolumeSource struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 +} + +// Represents a single file containing information from the downward API +type DownwardAPIVolumeFile struct { + // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + Path string + // Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported. + // +optional + FieldRef *ObjectFieldSelector + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +type DownwardAPIProjection struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFileVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string + // Share Name + ShareName string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFilePersistentVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string + // Share Name + ShareName string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // the namespace of the secret that contains Azure Storage Account Name and Key + // default is the same as the Pod + // +optional + SecretNamespace *string +} + +// Represents a vSphere volume resource. +type VsphereVirtualDiskVolumeSource struct { + // Path that identifies vSphere volume vmdk + VolumePath string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Storage Policy Based Management (SPBM) profile name. + // +optional + StoragePolicyName string + // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + // +optional + StoragePolicyID string +} + +// Represents a Photon Controller persistent disk resource. +type PhotonPersistentDiskVolumeSource struct { + // ID that identifies Photon Controller persistent disk + PdID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + FSType string +} + +// PortworxVolumeSource represents a Portworx volume resource. +type PortworxVolumeSource struct { + // VolumeID uniquely identifies a Portworx volume + VolumeID string + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +type AzureDataDiskCachingMode string +type AzureDataDiskKind string + +const ( + AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" + AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" + AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" + + AzureSharedBlobDisk AzureDataDiskKind = "Shared" + AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated" + AzureManagedDisk AzureDataDiskKind = "Managed" +) + +// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +type AzureDiskVolumeSource struct { + // The Name of the data disk in the blob storage + DiskName string + // The URI of the data disk in the blob storage + DataDiskURI string + // Host Caching mode: None, Read Only, Read Write. + // +optional + CachingMode *AzureDataDiskCachingMode + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType *string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly *bool + // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + Kind *AzureDataDiskKind +} + +// ScaleIOVolumeSource represents a persistent ScaleIO volume +type ScaleIOVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string + // The name of the storage system as configured in ScaleIO. + System string + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *LocalObjectReference + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool + // The name of the ScaleIO Protection Domain for the configured storage. + // +optional + ProtectionDomain string + // The ScaleIO Storage Pool associated with the protection domain. + // +optional + StoragePool string + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // +optional + StorageMode string + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + VolumeName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume that can be defined +// by a an admin via a storage class, for instance. +type ScaleIOPersistentVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string + // The name of the storage system as configured in ScaleIO. + System string + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *SecretReference + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool + // The name of the ScaleIO Protection Domain for the configured storage. + // +optional + ProtectionDomain string + // The ScaleIO Storage Pool associated with the protection domain. + // +optional + StoragePool string + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // +optional + StorageMode string + // The name of a volume created in the ScaleIO system + // that is associated with this volume source. + VolumeName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a StorageOS persistent volume resource. +type StorageOSVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *LocalObjectReference +} + +// Represents a StorageOS persistent volume resource. +type StorageOSPersistentVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *ObjectReference +} + +// Adapts a ConfigMap into a volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// volume as files using the keys in the Data field as the file names, unless +// the items element is populated with specific mappings of keys to paths. +// ConfigMap volumes support ownership management and SELinux relabeling. +type ConfigMapVolumeSource struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool +} + +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +type ConfigMapProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool +} + +// Represents a projected volume source +type ProjectedVolumeSource struct { + // list of volume projections + Sources []VolumeProjection + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 +} + +// Projection that may be projected along with other supported volume types +type VolumeProjection struct { + // all types below are the supported types for projection into the same volume + + // information about the secret data to project + Secret *SecretProjection + // information about the downwardAPI data to project + DownwardAPI *DownwardAPIProjection + // information about the configMap data to project + ConfigMap *ConfigMapProjection +} + +// Maps a string key to a path within a volume. +type KeyToPath struct { + // The key to project. + Key string + + // The relative path of the file to map the key to. + // May not be an absolute path. + // May not contain the path element '..'. + // May not start with the string '..'. + Path string + // Optional: mode bits to use on this file, should be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 +} + +// Local represents directly-attached storage with node affinity +type LocalVolumeSource struct { + // The full path to the volume on the node + // For alpha, this path must be a directory + // Once block as a source is supported, then this path can point to a block device + Path string +} + +// Represents storage that is managed by an external CSI volume driver +type CSIPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + // Required. + Driver string + + // VolumeHandle is the unique volume name returned by the CSI volume + // plugin’s CreateVolume to refer to the volume on all subsequent calls. + // Required. + VolumeHandle string + + // Optional: The value to pass to ControllerPublishVolumeRequest. + // Defaults to false (read/write). + // +optional + ReadOnly bool +} + +// ContainerPort represents a network port in a single container +type ContainerPort struct { + // Optional: If specified, this must be an IANA_SVC_NAME Each named port + // in a pod must have a unique name. + // +optional + Name string + // Optional: If specified, this must be a valid port number, 0 < x < 65536. + // If HostNetwork is specified, this must match ContainerPort. + // +optional + HostPort int32 + // Required: This must be a valid port number, 0 < x < 65536. + ContainerPort int32 + // Required: Supports "TCP" and "UDP". + // +optional + Protocol Protocol + // Optional: What host IP to bind the external port to. + // +optional + HostIP string +} + +// VolumeMount describes a mounting of a Volume within a container. +type VolumeMount struct { + // Required: This must match the Name of a Volume [above]. + Name string + // Optional: Defaults to false (read-write). + // +optional + ReadOnly bool + // Required. If the path is not an absolute path (e.g. some/path) it + // will be prepended with the appropriate root prefix for the operating + // system. On Linux this is '/', on Windows this is 'C:\'. + MountPath string + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + // +optional + SubPath string + // mountPropagation determines how mounts are propagated from the host + // to container and the other way around. + // When not set, MountPropagationHostToContainer is used. + // This field is alpha in 1.8 and can be reworked or removed in a future + // release. + // +optional + MountPropagation *MountPropagationMode +} + +// MountPropagationMode describes mount propagation. +type MountPropagationMode string + +const ( + // MountPropagationHostToContainer means that the volume in a container will + // receive new mounts from the host or other containers, but filesystems + // mounted inside the container won't be propagated to the host or other + // containers. + // Note that this mode is recursively applied to all mounts in the volume + // ("rslave" in Linux terminology). + MountPropagationHostToContainer MountPropagationMode = "HostToContainer" + // MountPropagationBidirectional means that the volume in a container will + // receive new mounts from the host or other containers, and its own mounts + // will be propagated from the container to the host or other containers. + // Note that this mode is recursively applied to all mounts in the volume + // ("rshared" in Linux terminology). + MountPropagationBidirectional MountPropagationMode = "Bidirectional" +) + +// VolumeDevice describes a mapping of a raw block device within a container. +type VolumeDevice struct { + // name must match the name of a persistentVolumeClaim in the pod + Name string + // devicePath is the path inside of the container that the device will be mapped to. + DevicePath string +} + +// EnvVar represents an environment variable present in a Container. +type EnvVar struct { + // Required: This must be a C_IDENTIFIER. + Name string + // Optional: no more than one of the following may be specified. + // Optional: Defaults to ""; variable references $(VAR_NAME) are expanded + // using the previous defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. The $(VAR_NAME) + // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + // references will never be expanded, regardless of whether the variable + // exists or not. + // +optional + Value string + // Optional: Specifies a source the value of this var should come from. + // +optional + ValueFrom *EnvVarSource +} + +// EnvVarSource represents a source for the value of an EnvVar. +// Only one of its fields may be set. +type EnvVarSource struct { + // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, + // metadata.uid, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. + // +optional + FieldRef *ObjectFieldSelector + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector + // Selects a key of a ConfigMap. + // +optional + ConfigMapKeyRef *ConfigMapKeySelector + // Selects a key of a secret in the pod's namespace. + // +optional + SecretKeyRef *SecretKeySelector +} + +// ObjectFieldSelector selects an APIVersioned field of an object. +type ObjectFieldSelector struct { + // Required: Version of the schema the FieldPath is written in terms of. + // If no value is specified, it will be defaulted to the APIVersion of the + // enclosing object. + APIVersion string + // Required: Path of the field to select in the specified API version + FieldPath string +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +type ResourceFieldSelector struct { + // Container name: required for volumes, optional for env vars + // +optional + ContainerName string + // Required: resource to select + Resource string + // Specifies the output format of the exposed resources, defaults to "1" + // +optional + Divisor resource.Quantity +} + +// Selects a key from a ConfigMap. +type ConfigMapKeySelector struct { + // The ConfigMap to select from. + LocalObjectReference + // The key to select. + Key string + // Specify whether the ConfigMap or it's key must be defined + // +optional + Optional *bool +} + +// SecretKeySelector selects a key of a Secret. +type SecretKeySelector struct { + // The name of the secret in the pod's namespace to select from. + LocalObjectReference + // The key of the secret to select from. Must be a valid secret key. + Key string + // Specify whether the Secret or it's key must be defined + // +optional + Optional *bool +} + +// EnvFromSource represents the source of a set of ConfigMaps +type EnvFromSource struct { + // An optional identifier to prepend to each key in the ConfigMap. + // +optional + Prefix string + // The ConfigMap to select from. + //+optional + ConfigMapRef *ConfigMapEnvSource + // The Secret to select from. + //+optional + SecretRef *SecretEnvSource +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +type ConfigMapEnvSource struct { + // The ConfigMap to select from. + LocalObjectReference + // Specify whether the ConfigMap must be defined + // +optional + Optional *bool +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +type SecretEnvSource struct { + // The Secret to select from. + LocalObjectReference + // Specify whether the Secret must be defined + // +optional + Optional *bool +} + +// HTTPHeader describes a custom header to be used in HTTP probes +type HTTPHeader struct { + // The header field name + Name string + // The header field value + Value string +} + +// HTTPGetAction describes an action based on HTTP Get requests. +type HTTPGetAction struct { + // Optional: Path to access on the HTTP server. + // +optional + Path string + // Required: Name or number of the port to access on the container. + // +optional + Port intstr.IntOrString + // Optional: Host name to connect to, defaults to the pod IP. You + // probably want to set "Host" in httpHeaders instead. + // +optional + Host string + // Optional: Scheme to use for connecting to the host, defaults to HTTP. + // +optional + Scheme URIScheme + // Optional: Custom headers to set in the request. HTTP allows repeated headers. + // +optional + HTTPHeaders []HTTPHeader +} + +// URIScheme identifies the scheme used for connection to a host for Get actions +type URIScheme string + +const ( + // URISchemeHTTP means that the scheme used will be http:// + URISchemeHTTP URIScheme = "HTTP" + // URISchemeHTTPS means that the scheme used will be https:// + URISchemeHTTPS URIScheme = "HTTPS" +) + +// TCPSocketAction describes an action based on opening a socket +type TCPSocketAction struct { + // Required: Port to connect to. + // +optional + Port intstr.IntOrString + // Optional: Host name to connect to, defaults to the pod IP. + // +optional + Host string +} + +// ExecAction describes a "run in container" action. +type ExecAction struct { + // Command is the command line to execute inside the container, the working directory for the + // command is root ('/') in the container's filesystem. The command is simply exec'd, it is + // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + // a shell, you need to explicitly call out to that shell. + // +optional + Command []string +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +type Probe struct { + // The action taken to determine the health of a container + Handler + // Length of time before health checking is activated. In seconds. + // +optional + InitialDelaySeconds int32 + // Length of time before health checking times out. In seconds. + // +optional + TimeoutSeconds int32 + // How often (in seconds) to perform the probe. + // +optional + PeriodSeconds int32 + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Must be 1 for liveness. + // +optional + SuccessThreshold int32 + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // +optional + FailureThreshold int32 +} + +// PullPolicy describes a policy for if/when to pull a container image +type PullPolicy string + +const ( + // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. + PullAlways PullPolicy = "Always" + // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present + PullNever PullPolicy = "Never" + // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. + PullIfNotPresent PullPolicy = "IfNotPresent" +) + +// TerminationMessagePolicy describes how termination messages are retrieved from a container. +type TerminationMessagePolicy string + +const ( + // TerminationMessageReadFile is the default behavior and will set the container status message to + // the contents of the container's terminationMessagePath when the container exits. + TerminationMessageReadFile TerminationMessagePolicy = "File" + // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs + // for the container status message when the container exits with an error and the + // terminationMessagePath has no contents. + TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" +) + +// Capability represent POSIX capabilities type +type Capability string + +// Capabilities represent POSIX capabilities that can be added or removed to a running container. +type Capabilities struct { + // Added capabilities + // +optional + Add []Capability + // Removed capabilities + // +optional + Drop []Capability +} + +// ResourceRequirements describes the compute resource requirements. +type ResourceRequirements struct { + // Limits describes the maximum amount of compute resources allowed. + // +optional + Limits ResourceList + // Requests describes the minimum amount of compute resources required. + // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value + // +optional + Requests ResourceList +} + +// Container represents a single container that is expected to be run on the host. +type Container struct { + // Required: This must be a DNS_LABEL. Each container in a pod must + // have a unique name. + Name string + // Required. + Image string + // Optional: The docker image's entrypoint is used if this is not provided; cannot be updated. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // +optional + Command []string + // Optional: The docker image's cmd is used if this is not provided; cannot be updated. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // +optional + Args []string + // Optional: Defaults to Docker's default. + // +optional + WorkingDir string + // +optional + Ports []ContainerPort + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource + // +optional + Env []EnvVar + // Compute resource requirements. + // +optional + Resources ResourceRequirements + // +optional + VolumeMounts []VolumeMount + // volumeDevices is the list of block devices to be used by the container. + // This is an alpha feature and may change in the future. + // +optional + VolumeDevices []VolumeDevice + // +optional + LivenessProbe *Probe + // +optional + ReadinessProbe *Probe + // +optional + Lifecycle *Lifecycle + // Required. + // +optional + TerminationMessagePath string + // +optional + TerminationMessagePolicy TerminationMessagePolicy + // Required: Policy for pulling images for this container + ImagePullPolicy PullPolicy + // Optional: SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // +optional + SecurityContext *SecurityContext + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + // +optional + Stdin bool + // +optional + StdinOnce bool + // +optional + TTY bool +} + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +type Handler struct { + // One and only one of the following should be specified. + // Exec specifies the action to take. + // +optional + Exec *ExecAction + // HTTPGet specifies the http request to perform. + // +optional + HTTPGet *HTTPGetAction + // TCPSocket specifies an action involving a TCP port. + // TODO: implement a realistic TCP lifecycle hook + // +optional + TCPSocket *TCPSocketAction +} + +// Lifecycle describes actions that the management system should take in response to container lifecycle +// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks +// until the action is complete, unless the container process fails, in which case the handler is aborted. +type Lifecycle struct { + // PostStart is called immediately after a container is created. If the handler fails, the container + // is terminated and restarted. + // +optional + PostStart *Handler + // PreStop is called immediately before a container is terminated. The reason for termination is + // passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. + // +optional + PreStop *Handler +} + +// The below types are used by kube_client and api_server. + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; +// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +type ContainerStateWaiting struct { + // A brief CamelCase string indicating details about why the container is in waiting state. + // +optional + Reason string + // A human-readable message indicating details about why the container is in waiting state. + // +optional + Message string +} + +type ContainerStateRunning struct { + // +optional + StartedAt metav1.Time +} + +type ContainerStateTerminated struct { + ExitCode int32 + // +optional + Signal int32 + // +optional + Reason string + // +optional + Message string + // +optional + StartedAt metav1.Time + // +optional + FinishedAt metav1.Time + // +optional + ContainerID string +} + +// ContainerState holds a possible state of container. +// Only one of its members may be specified. +// If none of them is specified, the default one is ContainerStateWaiting. +type ContainerState struct { + // +optional + Waiting *ContainerStateWaiting + // +optional + Running *ContainerStateRunning + // +optional + Terminated *ContainerStateTerminated +} + +type ContainerStatus struct { + // Each container in a pod must have a unique name. + Name string + // +optional + State ContainerState + // +optional + LastTerminationState ContainerState + // Ready specifies whether the container has passed its readiness check. + Ready bool + // Note that this is calculated from dead containers. But those containers are subject to + // garbage collection. This value will get capped at 5 by GC. + RestartCount int32 + Image string + ImageID string + // +optional + ContainerID string +} + +// PodPhase is a label for the condition of a pod at the current time. +type PodPhase string + +// These are the valid statuses of pods. +const ( + // PodPending means the pod has been accepted by the system, but one or more of the containers + // has not been started. This includes time before being bound to a node, as well as time spent + // pulling images onto the host. + PodPending PodPhase = "Pending" + // PodRunning means the pod has been bound to a node and all of the containers have been started. + // At least one container is still running or is in the process of being restarted. + PodRunning PodPhase = "Running" + // PodSucceeded means that all containers in the pod have voluntarily terminated + // with a container exit code of 0, and the system is not going to restart any of these containers. + PodSucceeded PodPhase = "Succeeded" + // PodFailed means that all containers in the pod have terminated, and at least one container has + // terminated in a failure (exited with a non-zero exit code or was stopped by the system). + PodFailed PodPhase = "Failed" + // PodUnknown means that for some reason the state of the pod could not be obtained, typically due + // to an error in communicating with the host of the pod. + PodUnknown PodPhase = "Unknown" +) + +type PodConditionType string + +// These are valid conditions of pod. +const ( + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" + // PodReady means the pod is able to service requests and should be added to the + // load balancing pools of all matching services. + PodReady PodConditionType = "Ready" + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" + // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler + // can't schedule the pod right now, for example due to insufficient resources in the cluster. + PodReasonUnschedulable = "Unschedulable" +) + +type PodCondition struct { + Type PodConditionType + Status ConditionStatus + // +optional + LastProbeTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +// RestartPolicy describes how the container should be restarted. +// Only one of the following restart policies may be specified. +// If none of the following policies is specified, the default one +// is RestartPolicyAlways. +type RestartPolicy string + +const ( + RestartPolicyAlways RestartPolicy = "Always" + RestartPolicyOnFailure RestartPolicy = "OnFailure" + RestartPolicyNever RestartPolicy = "Never" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodList is a list of Pods. +type PodList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Pod +} + +// DNSPolicy defines how a pod's DNS will be configured. +type DNSPolicy string + +const ( + // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS + // first, if it is available, then fall back on the default + // (as determined by kubelet) DNS settings. + DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" + + // DNSClusterFirst indicates that the pod should use cluster DNS + // first unless hostNetwork is true, if it is available, then + // fall back on the default (as determined by kubelet) DNS settings. + DNSClusterFirst DNSPolicy = "ClusterFirst" + + // DNSDefault indicates that the pod should use the default (as + // determined by kubelet) DNS settings. + DNSDefault DNSPolicy = "Default" + + // DNSNone indicates that the pod should use empty DNS settings. DNS + // parameters such as nameservers and search paths should be defined via + // DNSConfig. + DNSNone DNSPolicy = "None" +) + +// A node selector represents the union of the results of one or more label queries +// over a set of nodes; that is, it represents the OR of the selectors represented +// by the node selector terms. +type NodeSelector struct { + //Required. A list of node selector terms. The terms are ORed. + NodeSelectorTerms []NodeSelectorTerm +} + +// A null or empty node selector term matches no objects. +type NodeSelectorTerm struct { + //Required. A list of node selector requirements. The requirements are ANDed. + MatchExpressions []NodeSelectorRequirement +} + +// A node selector requirement is a selector that contains values, a key, and an operator +// that relates the key and values. +type NodeSelectorRequirement struct { + // The label key that the selector applies to. + Key string + // Represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + Operator NodeSelectorOperator + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. If the operator is Gt or Lt, the values + // array must have a single element, which will be interpreted as an integer. + // This array is replaced during a strategic merge patch. + // +optional + Values []string +} + +// A node selector operator is the set of operators that can be used in +// a node selector requirement. +type NodeSelectorOperator string + +const ( + NodeSelectorOpIn NodeSelectorOperator = "In" + NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" + NodeSelectorOpExists NodeSelectorOperator = "Exists" + NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" + NodeSelectorOpGt NodeSelectorOperator = "Gt" + NodeSelectorOpLt NodeSelectorOperator = "Lt" +) + +// Affinity is a group of affinity scheduling rules. +type Affinity struct { + // Describes node affinity scheduling rules for the pod. + // +optional + NodeAffinity *NodeAffinity + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAffinity *PodAffinity + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAntiAffinity *PodAntiAffinity +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +type PodAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +type PodAntiAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm + + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +type WeightedPodAffinityTerm struct { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + Weight int32 + // Required. A pod affinity term, associated with the corresponding weight. + PodAffinityTerm PodAffinityTerm +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key matches that of any node on which +// a pod of the set of pods is running. +type PodAffinityTerm struct { + // A label query over a set of resources, in this case pods. + // +optional + LabelSelector *metav1.LabelSelector + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // null or empty list means "this pod's namespace" + Namespaces []string + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // Empty topologyKey is not allowed. + // +optional + TopologyKey string +} + +// Node affinity is a group of node affinity scheduling rules. +type NodeAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // will try to eventually evict the pod from its node. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // may or may not try to eventually evict the pod from its node. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node matches the corresponding matchExpressions; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm +} + +// An empty preferred scheduling term matches all objects with implicit weight 0 +// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +type PreferredSchedulingTerm struct { + // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + Weight int32 + // A node selector term, associated with the corresponding weight. + Preference NodeSelectorTerm +} + +// The node this Taint is attached to has the "effect" on +// any pod that does not tolerate the Taint. +type Taint struct { + // Required. The taint key to be applied to a node. + Key string + // Required. The taint value corresponding to the taint key. + // +optional + Value string + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + Effect TaintEffect + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + TimeAdded *metav1.Time +} + +type TaintEffect string + +const ( + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // but allow all pods submitted to Kubelet without going through the scheduler + // to start, and allow all already-running pods to continue running. + // Enforced by the scheduler. + TaintEffectNoSchedule TaintEffect = "NoSchedule" + // Like TaintEffectNoSchedule, but the scheduler tries not to schedule + // new pods onto the node, rather than prohibiting new pods from scheduling + // onto the node entirely. Enforced by the scheduler. + TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to + // Kubelet without going through the scheduler to start. + // Enforced by Kubelet and the scheduler. + // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" + + // Evict any already-running pods that do not tolerate the taint. + // Currently enforced by NodeController. + TaintEffectNoExecute TaintEffect = "NoExecute" +) + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple using the matching operator . +type Toleration struct { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // +optional + Key string + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + // +optional + Operator TolerationOperator + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + // +optional + Value string + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + // +optional + Effect TaintEffect + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // +optional + TolerationSeconds *int64 +} + +// A toleration operator is the set of operators that can be used in a toleration. +type TolerationOperator string + +const ( + TolerationOpExists TolerationOperator = "Exists" + TolerationOpEqual TolerationOperator = "Equal" +) + +// PodSpec is a description of a pod +type PodSpec struct { + Volumes []Volume + // List of initialization containers belonging to the pod. + InitContainers []Container + // List of containers belonging to the pod. + Containers []Container + // +optional + RestartPolicy RestartPolicy + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // +optional + TerminationGracePeriodSeconds *int64 + // Optional duration in seconds relative to the StartTime that the pod may be active on a node + // before the system actively tries to terminate the pod; value must be positive integer + // +optional + ActiveDeadlineSeconds *int64 + // Set DNS policy for the pod. + // Defaults to "ClusterFirst". + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + // +optional + DNSPolicy DNSPolicy + // NodeSelector is a selector which must be true for the pod to fit on a node + // +optional + NodeSelector map[string]string + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod + // The pod will be allowed to use secrets referenced by the ServiceAccount + ServiceAccountName string + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + AutomountServiceAccountToken *bool + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + NodeName string + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *PodSecurityContext + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // +optional + ImagePullSecrets []LocalObjectReference + // Specifies the hostname of the Pod. + // If not specified, the pod's hostname will be set to a system-defined value. + // +optional + Hostname string + // If specified, the fully qualified Pod hostname will be "...svc.". + // If not specified, the pod will not have a domainname at all. + // +optional + Subdomain string + // If specified, the pod's scheduling constraints + // +optional + Affinity *Affinity + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string + // If specified, the pod's tolerations. + // +optional + Tolerations []Toleration + // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + // file if specified. This is only valid for non-hostNetwork pods. + // +optional + HostAliases []HostAlias + // If specified, indicates the pod's priority. "SYSTEM" is a special keyword + // which indicates the highest priority. Any other name must be defined by + // creating a PriorityClass object with that name. + // If not specified, the pod priority will be default or zero if there is no + // default. + // +optional + PriorityClassName string + // The priority value. Various system components use this field to find the + // priority of the pod. When Priority Admission Controller is enabled, it + // prevents users from setting this field. The admission controller populates + // this field from PriorityClassName. + // The higher the value, the higher the priority. + // +optional + Priority *int32 + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // +optional + DNSConfig *PodDNSConfig +} + +// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the +// pod's hosts file. +type HostAlias struct { + IP string + Hostnames []string +} + +// Sysctl defines a kernel parameter to be set +type Sysctl struct { + // Name of a property to set + Name string + // Value of a property to set + Value string +} + +// PodSecurityContext holds pod-level security attributes and common container settings. +// Some fields are also present in container.securityContext. Field values of +// container.securityContext take precedence over field values of PodSecurityContext. +type PodSecurityContext struct { + // Use the host's network namespace. If this option is set, the ports that will be + // used must be specified. + // Optional: Default to false + // +k8s:conversion-gen=false + // +optional + HostNetwork bool + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostPID bool + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostIPC bool + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + // +optional + SELinuxOptions *SELinuxOptions + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsUser *int64 + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID. If unspecified, no groups will be added to + // any container. + // +optional + SupplementalGroups []int64 + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // +optional + FSGroup *int64 +} + +// PodQOSClass defines the supported qos classes of Pods. +type PodQOSClass string + +const ( + // PodQOSGuaranteed is the Guaranteed qos class. + PodQOSGuaranteed PodQOSClass = "Guaranteed" + // PodQOSBurstable is the Burstable qos class. + PodQOSBurstable PodQOSClass = "Burstable" + // PodQOSBestEffort is the BestEffort qos class. + PodQOSBestEffort PodQOSClass = "BestEffort" +) + +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +type PodDNSConfig struct { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // Duplicated nameservers will be removed. + // +optional + Nameservers []string + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // Duplicated search paths will be removed. + // +optional + Searches []string + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Duplicated entries will be removed. Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + Options []PodDNSConfigOption +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +type PodDNSConfigOption struct { + // Required. + Name string + // +optional + Value *string +} + +// PodStatus represents information about the status of a pod. Status may trail the actual +// state of a system. +type PodStatus struct { + // +optional + Phase PodPhase + // +optional + Conditions []PodCondition + // A human readable message indicating details about why the pod is in this state. + // +optional + Message string + // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted' + // +optional + Reason string + + // +optional + HostIP string + // +optional + PodIP string + + // Date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the pod. + // +optional + StartTime *metav1.Time + // +optional + QOSClass PodQOSClass + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status + InitContainerStatuses []ContainerStatus + // The list has one entry per container in the manifest. Each entry is + // currently the output of `docker inspect`. This output format is *not* + // final and should not be relied upon. + // TODO: Make real decisions about what our info should look like. Re-enable fuzz test + // when we have done this. + // +optional + ContainerStatuses []ContainerStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded +type PodStatusResult struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + // Status represents the current information about a pod. This data may not be up + // to date. + // +optional + Status PodStatus +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Pod is a collection of containers, used as either input (create, update) or as output (list, get). +type Pod struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a pod. + // +optional + Spec PodSpec + + // Status represents the current information about a pod. This data may not be up + // to date. + // +optional + Status PodStatus +} + +// PodTemplateSpec describes the data a pod should have when created from a template +type PodTemplateSpec struct { + // Metadata of the pods created from this template. + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a pod. + // +optional + Spec PodSpec +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodTemplate describes a template for creating copies of a predefined pod. +type PodTemplate struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Template defines the pods that will be created from this pod template + // +optional + Template PodTemplateSpec +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodTemplateList is a list of PodTemplates. +type PodTemplateList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []PodTemplate +} + +// ReplicationControllerSpec is the specification of a replication controller. +// As the internal representation of a replication controller, it may have either +// a TemplateRef or a Template set. +type ReplicationControllerSpec struct { + // Replicas is the number of desired replicas. + Replicas int32 + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // Selector is a label query over pods that should match the Replicas count. + Selector map[string]string + + // TemplateRef is a reference to an object that describes the pod that will be created if + // insufficient replicas are detected. This reference is ignored if a Template is set. + // Must be set before converting to a versioned API object + // +optional + //TemplateRef *ObjectReference + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Internally, this takes precedence over a + // TemplateRef. + // +optional + Template *PodTemplateSpec +} + +// ReplicationControllerStatus represents the current status of a replication +// controller. +type ReplicationControllerStatus struct { + // Replicas is the number of actual replicas. + Replicas int32 + + // The number of pods that have labels matching the labels of the pod template of the replication controller. + // +optional + FullyLabeledReplicas int32 + + // The number of ready replicas for this replication controller. + // +optional + ReadyReplicas int32 + + // The number of available replicas (ready for at least minReadySeconds) for this replication controller. + // +optional + AvailableReplicas int32 + + // ObservedGeneration is the most recent generation observed by the controller. + // +optional + ObservedGeneration int64 + + // Represents the latest available observations of a replication controller's current state. + // +optional + Conditions []ReplicationControllerCondition +} + +type ReplicationControllerConditionType string + +// These are valid conditions of a replication controller. +const ( + // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods + // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, + // etc. or deleted due to kubelet being down or finalizers are failing. + ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" +) + +// ReplicationControllerCondition describes the state of a replication controller at a certain point. +type ReplicationControllerCondition struct { + // Type of replication controller condition. + Type ReplicationControllerConditionType + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + // +optional + Reason string + // A human readable message indicating details about the transition. + // +optional + Message string +} + +// +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicationController represents the configuration of a replication controller. +type ReplicationController struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired behavior of this replication controller. + // +optional + Spec ReplicationControllerSpec + + // Status is the current status of this replication controller. This data may be + // out of date by some window of time. + // +optional + Status ReplicationControllerStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicationControllerList is a collection of replication controllers. +type ReplicationControllerList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ReplicationController +} + +const ( + // ClusterIPNone - do not assign a cluster IP + // no proxying required and no environment variables should be created for pods + ClusterIPNone = "None" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceList holds a list of services. +type ServiceList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Service +} + +// Session Affinity Type string +type ServiceAffinity string + +const ( + // ServiceAffinityClientIP is the Client IP based. + ServiceAffinityClientIP ServiceAffinity = "ClientIP" + + // ServiceAffinityNone - no session affinity. + ServiceAffinityNone ServiceAffinity = "None" +) + +const ( + // DefaultClientIPServiceAffinitySeconds is the default timeout seconds + // of Client IP based session affinity - 3 hours. + DefaultClientIPServiceAffinitySeconds int32 = 10800 + // MaxClientIPServiceAffinitySeconds is the max timeout seconds + // of Client IP based session affinity - 1 day. + MaxClientIPServiceAffinitySeconds int32 = 86400 +) + +// SessionAffinityConfig represents the configurations of session affinity. +type SessionAffinityConfig struct { + // clientIP contains the configurations of Client IP based session affinity. + // +optional + ClientIP *ClientIPConfig +} + +// ClientIPConfig represents the configurations of Client IP based session affinity. +type ClientIPConfig struct { + // timeoutSeconds specifies the seconds of ClientIP type session sticky time. + // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + // Default value is 10800(for 3 hours). + // +optional + TimeoutSeconds *int32 +} + +// Service Type string describes ingress methods for a service +type ServiceType string + +const ( + // ServiceTypeClusterIP means a service will only be accessible inside the + // cluster, via the ClusterIP. + ServiceTypeClusterIP ServiceType = "ClusterIP" + + // ServiceTypeNodePort means a service will be exposed on one port of + // every node, in addition to 'ClusterIP' type. + ServiceTypeNodePort ServiceType = "NodePort" + + // ServiceTypeLoadBalancer means a service will be exposed via an + // external load balancer (if the cloud provider supports it), in addition + // to 'NodePort' type. + ServiceTypeLoadBalancer ServiceType = "LoadBalancer" + + // ServiceTypeExternalName means a service consists of only a reference to + // an external name that kubedns or equivalent will return as a CNAME + // record, with no exposing or proxying of any pods involved. + ServiceTypeExternalName ServiceType = "ExternalName" +) + +// Service External Traffic Policy Type string +type ServiceExternalTrafficPolicyType string + +const ( + // ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior. + ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" + // ServiceExternalTrafficPolicyTypeCluster specifies cluster-wide (legacy) behavior. + ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" +) + +// ServiceStatus represents the current status of a service +type ServiceStatus struct { + // LoadBalancer contains the current status of the load-balancer, + // if one is present. + // +optional + LoadBalancer LoadBalancerStatus +} + +// LoadBalancerStatus represents the status of a load-balancer +type LoadBalancerStatus struct { + // Ingress is a list containing ingress points for the load-balancer; + // traffic intended for the service should be sent to these ingress points. + // +optional + Ingress []LoadBalancerIngress +} + +// LoadBalancerIngress represents the status of a load-balancer ingress point: +// traffic intended for the service should be sent to an ingress point. +type LoadBalancerIngress struct { + // IP is set for load-balancer ingress points that are IP based + // (typically GCE or OpenStack load-balancers) + // +optional + IP string + + // Hostname is set for load-balancer ingress points that are DNS based + // (typically AWS load-balancers) + // +optional + Hostname string +} + +// ServiceSpec describes the attributes that a user creates on a service +type ServiceSpec struct { + // Type determines how the Service is exposed. Defaults to ClusterIP. Valid + // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + // "ExternalName" maps to the specified externalName. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing to + // endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object. If clusterIP is + // "None", no virtual IP is allocated and the endpoints are published as a + // set of endpoints rather than a stable IP. + // "NodePort" builds on ClusterIP and allocates a port on every node which + // routes to the clusterIP. + // "LoadBalancer" builds on NodePort and creates an + // external load-balancer (if supported in the current cloud) which routes + // to the clusterIP. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ + // +optional + Type ServiceType + + // Required: The list of ports that are exposed by this service. + Ports []ServicePort + + // Route service traffic to pods with label keys and values matching this + // selector. If empty or not present, the service is assumed to have an + // external process managing its endpoints, which Kubernetes will not + // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + // Ignored if type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ + Selector map[string]string + + // ClusterIP is the IP address of the service and is usually assigned + // randomly by the master. If an address is specified manually and is not in + // use by others, it will be allocated to the service; otherwise, creation + // of the service will fail. This field can not be changed through updates. + // Valid values are "None", empty string (""), or a valid IP address. "None" + // can be specified for headless services when proxying is not required. + // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if + // type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +optional + ClusterIP string + + // ExternalName is the external reference that kubedns or equivalent will + // return as a CNAME record for this service. No proxying will be involved. + // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // and requires Type to be ExternalName. + ExternalName string + + // ExternalIPs are used by external load balancers, or can be set by + // users to handle external traffic that arrives at a node. + // +optional + ExternalIPs []string + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + // +optional + LoadBalancerIP string + + // Optional: Supports "ClientIP" and "None". Used to maintain session affinity. + // +optional + SessionAffinity ServiceAffinity + + // sessionAffinityConfig contains the configurations of session affinity. + // +optional + SessionAffinityConfig *SessionAffinityConfig + + // Optional: If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // +optional + LoadBalancerSourceRanges []string + + // externalTrafficPolicy denotes if this Service desires to route external + // traffic to node-local or cluster-wide endpoints. "Local" preserves the + // client source IP and avoids a second hop for LoadBalancer and Nodeport + // type services, but risks potentially imbalanced traffic spreading. + // "Cluster" obscures the client source IP and may cause a second hop to + // another node, but should have good overall load-spreading. + // +optional + ExternalTrafficPolicy ServiceExternalTrafficPolicyType + + // healthCheckNodePort specifies the healthcheck nodePort for the service. + // If not specified, HealthCheckNodePort is created by the service api + // backend with the allocated nodePort. Will use user-specified nodePort value + // if specified by the client. Only effects when Type is set to LoadBalancer + // and ExternalTrafficPolicy is set to Local. + // +optional + HealthCheckNodePort int32 + + // publishNotReadyAddresses, when set to true, indicates that DNS implementations + // must publish the notReadyAddresses of subsets for the Endpoints associated with + // the Service. The default value is false. + // The primary use case for setting this field is to use a StatefulSet's Headless Service + // to propagate SRV records for its Pods without respect to their readiness for purpose + // of peer discovery. + // This field will replace the service.alpha.kubernetes.io/tolerate-unready-endpoints + // when that annotation is deprecated and all clients have been converted to use this + // field. + // +optional + PublishNotReadyAddresses bool +} + +type ServicePort struct { + // Optional if only one ServicePort is defined on this service: The + // name of this port within the service. This must be a DNS_LABEL. + // All ports within a ServiceSpec must have unique names. This maps to + // the 'Name' field in EndpointPort objects. + Name string + + // The IP protocol for this port. Supports "TCP" and "UDP". + Protocol Protocol + + // The port that will be exposed on the service. + Port int32 + + // Optional: The target port on pods selected by this service. If this + // is a string, it will be looked up as a named port in the target + // Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + TargetPort intstr.IntOrString + + // The port on each node on which this service is exposed. + // Default is to auto-allocate a port if the ServiceType of this Service requires one. + NodePort int32 +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Service is a named abstraction of software service (for example, mysql) consisting of local port +// (for example 3306) that the proxy listens on, and the selector that determines which pods +// will answer requests sent through the proxy. +type Service struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a service. + // +optional + Spec ServiceSpec + + // Status represents the current status of a service. + // +optional + Status ServiceStatus +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceAccount binds together: +// * a name, understood by users, and perhaps by peripheral systems, for an identity +// * a principal that can be authenticated and authorized +// * a set of secrets +type ServiceAccount struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount + Secrets []ObjectReference + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // +optional + ImagePullSecrets []LocalObjectReference + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + AutomountServiceAccountToken *bool +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceAccountList is a list of ServiceAccount objects +type ServiceAccountList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ServiceAccount +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Endpoints is a collection of endpoints that implement the actual service. Example: +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] +type Endpoints struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // The set of all endpoints is the union of all subsets. + Subsets []EndpointSubset +} + +// EndpointSubset is a group of addresses with a common set of ports. The +// expanded set of endpoints is the Cartesian product of Addresses x Ports. +// For example, given: +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// The resulting set of endpoints can be viewed as: +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +type EndpointSubset struct { + Addresses []EndpointAddress + NotReadyAddresses []EndpointAddress + Ports []EndpointPort +} + +// EndpointAddress is a tuple that describes single IP address. +type EndpointAddress struct { + // The IP of this endpoint. + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. + // TODO: This should allow hostname or IP, see #4447. + IP string + // Optional: Hostname of this endpoint + // Meant to be used by DNS servers etc. + // +optional + Hostname string + // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + // +optional + NodeName *string + // Optional: The kubernetes object related to the entry point. + TargetRef *ObjectReference +} + +// EndpointPort is a tuple that describes a single port. +type EndpointPort struct { + // The name of this port (corresponds to ServicePort.Name). Optional + // if only one port is defined. Must be a DNS_LABEL. + Name string + + // The port number. + Port int32 + + // The IP protocol for this port. + Protocol Protocol +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointsList is a list of endpoints. +type EndpointsList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Endpoints +} + +// NodeSpec describes the attributes that a node is created with. +type NodeSpec struct { + // PodCIDR represents the pod IP range assigned to the node + // Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs. + // +optional + PodCIDR string + + // External ID of the node assigned by some machine database (e.g. a cloud provider) + // +optional + ExternalID string + + // ID of the node assigned by the cloud provider + // Note: format is "://" + // +optional + ProviderID string + + // Unschedulable controls node schedulability of new pods. By default node is schedulable. + // +optional + Unschedulable bool + + // If specified, the node's taints. + // +optional + Taints []Taint + + // If specified, the source to get node configuration from + // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field + // +optional + ConfigSource *NodeConfigSource +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeConfigSource specifies a source of node configuration. Exactly one subfield must be non-nil. +type NodeConfigSource struct { + metav1.TypeMeta + ConfigMapRef *ObjectReference +} + +// DaemonEndpoint contains information about a single Daemon endpoint. +type DaemonEndpoint struct { + /* + The port tag was not properly in quotes in earlier releases, so it must be + uppercase for backwards compatibility (since it was falling back to var name of + 'Port'). + */ + + // Port number of the given endpoint. + Port int32 +} + +// NodeDaemonEndpoints lists ports opened by daemons running on the Node. +type NodeDaemonEndpoints struct { + // Endpoint on which Kubelet is listening. + // +optional + KubeletEndpoint DaemonEndpoint +} + +// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. +type NodeSystemInfo struct { + // MachineID reported by the node. For unique machine identification + // in the cluster this field is preferred. Learn more from man(5) + // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html + MachineID string + // SystemUUID reported by the node. For unique machine identification + // MachineID is preferred. This field is specific to Red Hat hosts + // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html + SystemUUID string + // Boot ID reported by the node. + BootID string + // Kernel Version reported by the node. + KernelVersion string + // OS Image reported by the node. + OSImage string + // ContainerRuntime Version reported by the node. + ContainerRuntimeVersion string + // Kubelet Version reported by the node. + KubeletVersion string + // KubeProxy Version reported by the node. + KubeProxyVersion string + // The Operating System reported by the node + OperatingSystem string + // The Architecture reported by the node + Architecture string +} + +// NodeStatus is information about the current status of a node. +type NodeStatus struct { + // Capacity represents the total resources of a node. + // +optional + Capacity ResourceList + // Allocatable represents the resources of a node that are available for scheduling. + // +optional + Allocatable ResourceList + // NodePhase is the current lifecycle phase of the node. + // +optional + Phase NodePhase + // Conditions is an array of current node conditions. + // +optional + Conditions []NodeCondition + // Queried from cloud provider, if available. + // +optional + Addresses []NodeAddress + // Endpoints of daemons running on the Node. + // +optional + DaemonEndpoints NodeDaemonEndpoints + // Set of ids/uuids to uniquely identify the node. + // +optional + NodeInfo NodeSystemInfo + // List of container images on this node + // +optional + Images []ContainerImage + // List of attachable volumes in use (mounted) by the node. + // +optional + VolumesInUse []UniqueVolumeName + // List of volumes that are attached to the node. + // +optional + VolumesAttached []AttachedVolume +} + +type UniqueVolumeName string + +// AttachedVolume describes a volume attached to a node +type AttachedVolume struct { + // Name of the attached volume + Name UniqueVolumeName + + // DevicePath represents the device path where the volume should be available + DevicePath string +} + +// AvoidPods describes pods that should avoid this node. This is the value for a +// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and +// will eventually become a field of NodeStatus. +type AvoidPods struct { + // Bounded-sized list of signatures of pods that should avoid this node, sorted + // in timestamp order from oldest to newest. Size of the slice is unspecified. + // +optional + PreferAvoidPods []PreferAvoidPodsEntry +} + +// Describes a class of pods that should avoid this node. +type PreferAvoidPodsEntry struct { + // The class of pods. + PodSignature PodSignature + // Time at which this entry was added to the list. + // +optional + EvictionTime metav1.Time + // (brief) reason why this entry was added to the list. + // +optional + Reason string + // Human readable message indicating why this entry was added to the list. + // +optional + Message string +} + +// Describes the class of pods that should avoid this node. +// Exactly one field should be set. +type PodSignature struct { + // Reference to controller whose pods should avoid this node. + // +optional + PodController *metav1.OwnerReference +} + +// Describe a container image +type ContainerImage struct { + // Names by which this image is known. + Names []string + // The size of the image in bytes. + // +optional + SizeBytes int64 +} + +type NodePhase string + +// These are the valid phases of node. +const ( + // NodePending means the node has been created/added by the system, but not configured. + NodePending NodePhase = "Pending" + // NodeRunning means the node has been configured and has Kubernetes components running. + NodeRunning NodePhase = "Running" + // NodeTerminated means the node has been removed from the cluster. + NodeTerminated NodePhase = "Terminated" +) + +type NodeConditionType string + +// These are valid conditions of node. Currently, we don't have enough information to decide +// node condition. In the future, we will add more. The proposed set of conditions are: +// NodeReady, NodeReachable +const ( + // NodeReady means kubelet is healthy and ready to accept pods. + NodeReady NodeConditionType = "Ready" + // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk + // space on the node. + NodeOutOfDisk NodeConditionType = "OutOfDisk" + // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. + NodeMemoryPressure NodeConditionType = "MemoryPressure" + // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. + NodeDiskPressure NodeConditionType = "DiskPressure" + // NodeNetworkUnavailable means that network for the node is not correctly configured. + NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" + // NodeConfigOK indicates whether the kubelet is correctly configured + NodeConfigOK NodeConditionType = "ConfigOK" +) + +type NodeCondition struct { + Type NodeConditionType + Status ConditionStatus + // +optional + LastHeartbeatTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +type NodeAddressType string + +const ( + NodeHostName NodeAddressType = "Hostname" + NodeExternalIP NodeAddressType = "ExternalIP" + NodeInternalIP NodeAddressType = "InternalIP" + NodeExternalDNS NodeAddressType = "ExternalDNS" + NodeInternalDNS NodeAddressType = "InternalDNS" +) + +type NodeAddress struct { + Type NodeAddressType + Address string +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +type NodeResources struct { + // Capacity represents the available resources of a node + // +optional + Capacity ResourceList +} + +// ResourceName is the name identifying various resources in a ResourceList. +type ResourceName string + +// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, +// with the -, _, and . characters allowed anywhere, except the first or last character. +// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than +// camel case, separating compound words. +// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. +const ( + // CPU, in cores. (500m = .5 cores) + ResourceCPU ResourceName = "cpu" + // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceMemory ResourceName = "memory" + // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) + ResourceStorage ResourceName = "storage" + // Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // The resource name for ResourceEphemeralStorage is alpha and it can change across releases. + ResourceEphemeralStorage ResourceName = "ephemeral-storage" + // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned. + ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu" +) + +const ( + // Default namespace prefix. + ResourceDefaultNamespacePrefix = "kubernetes.io/" + // Name prefix for huge page resources (alpha). + ResourceHugePagesPrefix = "hugepages-" +) + +// ResourceList is a set of (resource name, quantity) pairs. +type ResourceList map[ResourceName]resource.Quantity + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Node is a worker node in Kubernetes +// The name of the node according to etcd is in ObjectMeta.Name. +type Node struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a node. + // +optional + Spec NodeSpec + + // Status describes the current status of a Node + // +optional + Status NodeStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeList is a list of nodes. +type NodeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Node +} + +// NamespaceSpec describes the attributes on a Namespace +type NamespaceSpec struct { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + Finalizers []FinalizerName +} + +// FinalizerName is the name identifying a finalizer during namespace lifecycle. +type FinalizerName string + +// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or +// in metav1. +const ( + FinalizerKubernetes FinalizerName = "kubernetes" +) + +// NamespaceStatus is information about the current status of a Namespace. +type NamespaceStatus struct { + // Phase is the current lifecycle phase of the namespace. + // +optional + Phase NamespacePhase +} + +type NamespacePhase string + +// These are the valid phases of a namespace. +const ( + // NamespaceActive means the namespace is available for use in the system + NamespaceActive NamespacePhase = "Active" + // NamespaceTerminating means the namespace is undergoing graceful termination + NamespaceTerminating NamespacePhase = "Terminating" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// A namespace provides a scope for Names. +// Use of multiple namespaces is optional +type Namespace struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of the Namespace. + // +optional + Spec NamespaceSpec + + // Status describes the current status of a Namespace + // +optional + Status NamespaceStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NamespaceList is a list of Namespaces. +type NamespaceList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Namespace +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. +// Deprecated in 1.7, please use the bindings subresource of pods instead. +type Binding struct { + metav1.TypeMeta + // ObjectMeta describes the object that is being bound. + // +optional + metav1.ObjectMeta + + // Target is the object to bind to. + Target ObjectReference +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID +} + +// DeletionPropagation decides whether and how garbage collection will be performed. +type DeletionPropagation string + +const ( + // Orphans the dependents. + DeletePropagationOrphan DeletionPropagation = "Orphan" + // Deletes the object from the key-value store, the garbage collector will delete the dependents in the background. + DeletePropagationBackground DeletionPropagation = "Background" + // The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store. + // API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp. + // This policy is cascading, i.e., the dependents will be deleted with Foreground. + DeletePropagationForeground DeletionPropagation = "Foreground" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeleteOptions may be provided when deleting an API object +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +type DeleteOptions struct { + metav1.TypeMeta + + // Optional duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // +optional + GracePeriodSeconds *int64 + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + Preconditions *Preconditions + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + OrphanDependents *bool + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. + // +optional + PropagationPolicy *DeletionPropagation +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ListOptions is the query options to a standard REST list call, and has future support for +// watch calls. +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +type ListOptions struct { + metav1.TypeMeta + + // A selector based on labels + LabelSelector labels.Selector + // A selector based on fields + FieldSelector fields.Selector + + // If true, partially initialized resources are included in the response. + IncludeUninitialized bool + + // If true, watch for changes to this list + Watch bool + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + ResourceVersion string + // Timeout for the list/watch call. + TimeoutSeconds *int64 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodLogOptions is the query options for a Pod's logs REST call +type PodLogOptions struct { + metav1.TypeMeta + + // Container for which to return logs + Container string + // If true, follow the logs for the pod + Follow bool + // If true, return previous terminated container logs + Previous bool + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceSeconds *int64 + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceTime *metav1.Time + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. + Timestamps bool + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + TailLines *int64 + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + LimitBytes *int64 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodAttachOptions is the query options to a Pod's remote attach call +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +type PodAttachOptions struct { + metav1.TypeMeta + + // Stdin if true indicates that stdin is to be redirected for the attach call + // +optional + Stdin bool + + // Stdout if true indicates that stdout is to be redirected for the attach call + // +optional + Stdout bool + + // Stderr if true indicates that stderr is to be redirected for the attach call + // +optional + Stderr bool + + // TTY if true indicates that a tty will be allocated for the attach call + // +optional + TTY bool + + // Container to attach to. + // +optional + Container string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodExecOptions is the query options to a Pod's remote exec call +type PodExecOptions struct { + metav1.TypeMeta + + // Stdin if true indicates that stdin is to be redirected for the exec call + Stdin bool + + // Stdout if true indicates that stdout is to be redirected for the exec call + Stdout bool + + // Stderr if true indicates that stderr is to be redirected for the exec call + Stderr bool + + // TTY if true indicates that a tty will be allocated for the exec call + TTY bool + + // Container in which to execute the command. + Container string + + // Command is the remote command to execute; argv array; not executed within a shell. + Command []string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodPortForwardOptions is the query options to a Pod's port forward call +type PodPortForwardOptions struct { + metav1.TypeMeta + + // The list of ports to forward + // +optional + Ports []int32 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodProxyOptions is the query options to a Pod's proxy call +type PodProxyOptions struct { + metav1.TypeMeta + + // Path is the URL path to use for the current proxy request + Path string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeProxyOptions is the query options to a Node's proxy call +type NodeProxyOptions struct { + metav1.TypeMeta + + // Path is the URL path to use for the current proxy request + Path string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceProxyOptions is the query options to a Service's proxy call. +type ServiceProxyOptions struct { + metav1.TypeMeta + + // Path is the part of URLs that include service endpoints, suffixes, + // and parameters to use for the current proxy request to service. + // For example, the whole request URL is + // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. + // Path is _search?q=user:kimchy. + Path string +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ObjectReference struct { + // +optional + Kind string + // +optional + Namespace string + // +optional + Name string + // +optional + UID types.UID + // +optional + APIVersion string + // +optional + ResourceVersion string + + // Optional. If referring to a piece of an object instead of an entire object, this string + // should contain information to identify the sub-object. For example, if the object + // reference is to a container within a pod, this would take on a value like: + // "spec.containers{name}" (where "name" refers to the name of the container that triggered + // the event) or if no container name is specified "spec.containers[2]" (container with + // index 2 in this pod). This syntax is chosen only to have some well-defined way of + // referencing a part of an object. + // TODO: this design is not final and this field is subject to change in the future. + // +optional + FieldPath string +} + +// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. +type LocalObjectReference struct { + //TODO: Add other useful fields. apiVersion, kind, uid? + Name string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type SerializedReference struct { + metav1.TypeMeta + // +optional + Reference ObjectReference +} + +type EventSource struct { + // Component from which the event is generated. + // +optional + Component string + // Node name on which the event is generated. + // +optional + Host string +} + +// Valid values for event types (new types could be added in future) +const ( + // Information only and will not cause any problems + EventTypeNormal string = "Normal" + // These events are to warn that something might go wrong + EventTypeWarning string = "Warning" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Event is a report of an event somewhere in the cluster. +// TODO: Decide whether to store these separately or with the object they apply to. +type Event struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Required. The object that this event is about. Mapped to events.Event.regarding + // +optional + InvolvedObject ObjectReference + + // Optional; this should be a short, machine understandable string that gives the reason + // for this event being generated. For example, if the event is reporting that a container + // can't start, the Reason might be "ImageNotFound". + // TODO: provide exact specification for format. + // +optional + Reason string + + // Optional. A human-readable description of the status of this operation. + // TODO: decide on maximum length. Mapped to events.Event.note + // +optional + Message string + + // Optional. The component reporting this event. Should be a short machine understandable string. + // +optional + Source EventSource + + // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) + // +optional + FirstTimestamp metav1.Time + + // The time at which the most recent occurrence of this event was recorded. + // +optional + LastTimestamp metav1.Time + + // The number of times this event has occurred. + // +optional + Count int32 + + // Type of this event (Normal, Warning), new types could be added in the future. + // +optional + Type string + + // Time when this Event was first observed. + // +optional + EventTime metav1.MicroTime + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + Series *EventSeries + + // What action was taken/failed regarding to the Regarding object. + // +optional + Action string + + // Optional secondary object for more complex actions. + // +optional + Related *ObjectReference + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + ReportingController string + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + ReportingInstance string +} + +type EventSeries struct { + // Number of occurrences in this series up to the last heartbeat time + Count int32 + // Time of the last occurence observed + LastObservedTime metav1.MicroTime + // State of this Series: Ongoing or Finished + State EventSeriesState +} + +type EventSeriesState string + +const ( + EventSeriesStateOngoing EventSeriesState = "Ongoing" + EventSeriesStateFinished EventSeriesState = "Finished" + EventSeriesStateUnknown EventSeriesState = "Unknown" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EventList is a list of events. +type EventList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Event +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// List holds a list of objects, which may not be known by the server. +type List metainternalversion.List + +// A type of object that is limited +type LimitType string + +const ( + // Limit that applies to all pods in a namespace + LimitTypePod LimitType = "Pod" + // Limit that applies to all containers in a namespace + LimitTypeContainer LimitType = "Container" + // Limit that applies to all persistent volume claims in a namespace + LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" +) + +// LimitRangeItem defines a min/max usage limit for any resource that matches on kind +type LimitRangeItem struct { + // Type of resource that this limit applies to + // +optional + Type LimitType + // Max usage constraints on this kind by resource name + // +optional + Max ResourceList + // Min usage constraints on this kind by resource name + // +optional + Min ResourceList + // Default resource requirement limit value by resource name. + // +optional + Default ResourceList + // DefaultRequest resource requirement request value by resource name. + // +optional + DefaultRequest ResourceList + // MaxLimitRequestRatio represents the max burst value for the named resource + // +optional + MaxLimitRequestRatio ResourceList +} + +// LimitRangeSpec defines a min/max usage limit for resources that match on kind +type LimitRangeSpec struct { + // Limits is the list of LimitRangeItem objects that are enforced + Limits []LimitRangeItem +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LimitRange sets resource usage limits for each kind of resource in a Namespace +type LimitRange struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the limits enforced + // +optional + Spec LimitRangeSpec +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LimitRangeList is a list of LimitRange items. +type LimitRangeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is a list of LimitRange objects + Items []LimitRange +} + +// The following identify resource constants for Kubernetes object types +const ( + // Pods, number + ResourcePods ResourceName = "pods" + // Services, number + ResourceServices ResourceName = "services" + // ReplicationControllers, number + ResourceReplicationControllers ResourceName = "replicationcontrollers" + // ResourceQuotas, number + ResourceQuotas ResourceName = "resourcequotas" + // ResourceSecrets, number + ResourceSecrets ResourceName = "secrets" + // ResourceConfigMaps, number + ResourceConfigMaps ResourceName = "configmaps" + // ResourcePersistentVolumeClaims, number + ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" + // ResourceServicesNodePorts, number + ResourceServicesNodePorts ResourceName = "services.nodeports" + // ResourceServicesLoadBalancers, number + ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" + // CPU request, in cores. (500m = .5 cores) + ResourceRequestsCPU ResourceName = "requests.cpu" + // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsMemory ResourceName = "requests.memory" + // Storage request, in bytes + ResourceRequestsStorage ResourceName = "requests.storage" + // Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage" + // CPU limit, in cores. (500m = .5 cores) + ResourceLimitsCPU ResourceName = "limits.cpu" + // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsMemory ResourceName = "limits.memory" + // Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" +) + +// The following identify resource prefix for Kubernetes object types +const ( + // HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // As burst is not supported for HugePages, we would only quota its request, and ignore the limit. + ResourceRequestsHugePagesPrefix = "requests.hugepages-" +) + +// A ResourceQuotaScope defines a filter that must match each object tracked by a quota +type ResourceQuotaScope string + +const ( + // Match all pod objects where spec.activeDeadlineSeconds + ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" + // Match all pod objects where !spec.activeDeadlineSeconds + ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" + // Match all pod objects that have best effort quality of service + ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" + // Match all pod objects that do not have best effort quality of service + ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" +) + +// ResourceQuotaSpec defines the desired hard limits to enforce for Quota +type ResourceQuotaSpec struct { + // Hard is the set of desired hard limits for each named resource + // +optional + Hard ResourceList + // A collection of filters that must match each object tracked by a quota. + // If not specified, the quota matches all objects. + // +optional + Scopes []ResourceQuotaScope +} + +// ResourceQuotaStatus defines the enforced hard limits and observed use +type ResourceQuotaStatus struct { + // Hard is the set of enforced hard limits for each named resource + // +optional + Hard ResourceList + // Used is the current observed total usage of the resource in the namespace + // +optional + Used ResourceList +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceQuota sets aggregate quota restrictions enforced per namespace +type ResourceQuota struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired quota + // +optional + Spec ResourceQuotaSpec + + // Status defines the actual enforced quota and its current usage + // +optional + Status ResourceQuotaStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceQuotaList is a list of ResourceQuota items +type ResourceQuotaList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is a list of ResourceQuota objects + Items []ResourceQuota +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Secret holds secret data of a certain type. The total bytes of the values in +// the Data field must be less than MaxSecretSize bytes. +type Secret struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Data contains the secret data. Each key must consist of alphanumeric + // characters, '-', '_' or '.'. The serialized form of the secret data is a + // base64 encoded string, representing the arbitrary (possibly non-string) + // data value here. + // +optional + Data map[string][]byte + + // Used to facilitate programmatic handling of secret data. + // +optional + Type SecretType +} + +const MaxSecretSize = 1 * 1024 * 1024 + +type SecretType string + +const ( + // SecretTypeOpaque is the default; arbitrary user-defined data + SecretTypeOpaque SecretType = "Opaque" + + // SecretTypeServiceAccountToken contains a token that identifies a service account to the API + // + // Required fields: + // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies + // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies + // - Secret.Data["token"] - a token that identifies the service account to the API + SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" + + // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountNameKey = "kubernetes.io/service-account.name" + // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountUIDKey = "kubernetes.io/service-account.uid" + // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets + ServiceAccountTokenKey = "token" + // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets + ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" + // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets + ServiceAccountRootCAKey = "ca.crt" + // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls + ServiceAccountNamespaceKey = "namespace" + + // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg + // + // Required fields: + // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file + SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" + + // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets + DockerConfigKey = ".dockercfg" + + // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json + // + // Required fields: + // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file + SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" + + // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets + DockerConfigJsonKey = ".dockerconfigjson" + + // SecretTypeBasicAuth contains data needed for basic authentication. + // + // Required at least one of fields: + // - Secret.Data["username"] - username used for authentication + // - Secret.Data["password"] - password or token needed for authentication + SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" + + // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets + BasicAuthUsernameKey = "username" + // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets + BasicAuthPasswordKey = "password" + + // SecretTypeSSHAuth contains data needed for SSH authentication. + // + // Required field: + // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication + SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" + + // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets + SSHAuthPrivateKey = "ssh-privatekey" + + // SecretTypeTLS contains information about a TLS client or server secret. It + // is primarily used with TLS termination of the Ingress resource, but may be + // used in other types. + // + // Required fields: + // - Secret.Data["tls.key"] - TLS private key. + // Secret.Data["tls.crt"] - TLS certificate. + // TODO: Consider supporting different formats, specifying CA/destinationCA. + SecretTypeTLS SecretType = "kubernetes.io/tls" + + // TLSCertKey is the key for tls certificates in a TLS secret. + TLSCertKey = "tls.crt" + // TLSPrivateKeyKey is the key for the private key field in a TLS secret. + TLSPrivateKeyKey = "tls.key" + // SecretTypeBootstrapToken is used during the automated bootstrap process (first + // implemented by kubeadm). It stores tokens that are used to sign well known + // ConfigMaps. They are used for authn. + SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type SecretList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Secret +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigMap holds configuration data for components or applications to consume. +type ConfigMap struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Data contains the configuration data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // +optional + Data map[string]string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigMapList is a resource containing a list of ConfigMap objects. +type ConfigMapList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is the list of ConfigMaps. + Items []ConfigMap +} + +// These constants are for remote command execution and port forwarding and are +// used by both the client side and server side components. +// +// This is probably not the ideal place for them, but it didn't seem worth it +// to create pkg/exec and pkg/portforward just to contain a single file with +// constants in it. Suggestions for more appropriate alternatives are +// definitely welcome! +const ( + // Enable stdin for remote command execution + ExecStdinParam = "input" + // Enable stdout for remote command execution + ExecStdoutParam = "output" + // Enable stderr for remote command execution + ExecStderrParam = "error" + // Enable TTY for remote command execution + ExecTTYParam = "tty" + // Command to run for remote command execution + ExecCommandParam = "command" + + // Name of header that specifies stream type + StreamType = "streamType" + // Value for streamType header for stdin stream + StreamTypeStdin = "stdin" + // Value for streamType header for stdout stream + StreamTypeStdout = "stdout" + // Value for streamType header for stderr stream + StreamTypeStderr = "stderr" + // Value for streamType header for data stream + StreamTypeData = "data" + // Value for streamType header for error stream + StreamTypeError = "error" + // Value for streamType header for terminal resize stream + StreamTypeResize = "resize" + + // Name of header that specifies the port being forwarded + PortHeader = "port" + // Name of header that specifies a request ID used to associate the error + // and data streams for a single forwarded connection + PortForwardRequestIDHeader = "requestID" +) + +// Type and constants for component health validation. +type ComponentConditionType string + +// These are the valid conditions for the component. +const ( + ComponentHealthy ComponentConditionType = "Healthy" +) + +type ComponentCondition struct { + Type ComponentConditionType + Status ConditionStatus + // +optional + Message string + // +optional + Error string +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +type ComponentStatus struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // +optional + Conditions []ComponentCondition +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ComponentStatusList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ComponentStatus +} + +// SecurityContext holds security configuration that will be applied to a container. +// Some fields are present in both SecurityContext and PodSecurityContext. When both +// are set, the values in SecurityContext take precedence. +type SecurityContext struct { + // The capabilities to add/drop when running containers. + // Defaults to the default set of capabilities granted by the container runtime. + // +optional + Capabilities *Capabilities + // Run container in privileged mode. + // Processes in privileged containers are essentially equivalent to root on the host. + // Defaults to false. + // +optional + Privileged *bool + // The SELinux context to be applied to the container. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + SELinuxOptions *SELinuxOptions + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsUser *int64 + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool + // The read-only root filesystem allows you to restrict the locations that an application can write + // files to, ensuring the persistent data can only be written to mounts. + // +optional + ReadOnlyRootFilesystem *bool + // AllowPrivilegeEscalation controls whether a process can gain more + // privileges than its parent process. This bool directly controls if + // the no_new_privs flag will be set on the container process. + // +optional + AllowPrivilegeEscalation *bool +} + +// SELinuxOptions are the labels to be applied to the container. +type SELinuxOptions struct { + // SELinux user label + // +optional + User string + // SELinux role label + // +optional + Role string + // SELinux type label + // +optional + Type string + // SELinux level label. + // +optional + Level string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RangeAllocation is an opaque API object (not exposed to end users) that can be persisted to record +// the global allocation state of the cluster. The schema of Range and Data generic, in that Range +// should be a string representation of the inputs to a range (for instance, for IP allocation it +// might be a CIDR) and Data is an opaque blob understood by an allocator which is typically a +// binary range. Consumers should use annotations to record additional information (schema version, +// data encoding hints). A range allocation should *ALWAYS* be recreatable at any time by observation +// of the cluster, thus the object is less strongly typed than most. +type RangeAllocation struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + // A string representing a unique label for a range of resources, such as a CIDR "10.0.0.0/8" or + // port range "10000-30000". Range is not strongly schema'd here. The Range is expected to define + // a start and end unless there is an implicit end. + Range string + // A byte array representing the serialized state of a range allocation. Additional clarifiers on + // the type or format of data should be represented with annotations. For IP allocations, this is + // represented as a bit array starting at the base IP of the CIDR in Range, with each bit representing + // a single allocated address (the fifth bit on CIDR 10.0.0.0/8 is 10.0.0.4). + Data []byte +} + +const ( + // "default-scheduler" is the name of default scheduler. + DefaultSchedulerName = "default-scheduler" + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // When the --hard-pod-affinity-weight scheduler flag is not specified, + // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. + DefaultHardPodAffinitySymmetricWeight int32 = 1 +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/BUILD new file mode 100644 index 000000000000..5796c6140760 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/BUILD @@ -0,0 +1,79 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "conversion.go", + "defaults.go", + "doc.go", + "register.go", + "zz_generated.conversion.go", + "zz_generated.defaults.go", + ], + importpath = "k8s.io/kubernetes/pkg/apis/core/v1", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/apis/extensions:go_default_library", + "//pkg/features:go_default_library", + "//pkg/util/parsers:go_default_library", + "//pkg/util/pointer:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + ], +) + +go_test( + name = "go_default_xtest", + srcs = [ + "conversion_test.go", + "defaults_test.go", + ], + importpath = "k8s.io/kubernetes/pkg/apis/core/v1_test", + deps = [ + ":go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/api/testapi:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/fuzzer:go_default_library", + "//pkg/apis/extensions:go_default_library", + "//pkg/util/pointer:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/testing/fuzzer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/fuzzer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/core/v1/helper:all-srcs", + "//pkg/apis/core/v1/validation:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/OWNERS similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/api/v1/OWNERS rename to vendor/k8s.io/kubernetes/pkg/apis/core/v1/OWNERS diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go new file mode 100644 index 000000000000..8f6e09b673ed --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go @@ -0,0 +1,571 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "reflect" + + "k8s.io/api/core/v1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +// This is a "fast-path" that avoids reflection for common types. It focuses on the objects that are +// converted the most in the cluster. +// TODO: generate one of these for every external API group - this is to prove the impact +func addFastPathConversionFuncs(scheme *runtime.Scheme) error { + scheme.AddGenericConversionFunc(func(objA, objB interface{}, s conversion.Scope) (bool, error) { + switch a := objA.(type) { + case *v1.Pod: + switch b := objB.(type) { + case *core.Pod: + return true, Convert_v1_Pod_To_core_Pod(a, b, s) + } + case *core.Pod: + switch b := objB.(type) { + case *v1.Pod: + return true, Convert_core_Pod_To_v1_Pod(a, b, s) + } + + case *v1.Event: + switch b := objB.(type) { + case *core.Event: + return true, Convert_v1_Event_To_core_Event(a, b, s) + } + case *core.Event: + switch b := objB.(type) { + case *v1.Event: + return true, Convert_core_Event_To_v1_Event(a, b, s) + } + + case *v1.ReplicationController: + switch b := objB.(type) { + case *core.ReplicationController: + return true, Convert_v1_ReplicationController_To_core_ReplicationController(a, b, s) + } + case *core.ReplicationController: + switch b := objB.(type) { + case *v1.ReplicationController: + return true, Convert_core_ReplicationController_To_v1_ReplicationController(a, b, s) + } + + case *v1.Node: + switch b := objB.(type) { + case *core.Node: + return true, Convert_v1_Node_To_core_Node(a, b, s) + } + case *core.Node: + switch b := objB.(type) { + case *v1.Node: + return true, Convert_core_Node_To_v1_Node(a, b, s) + } + + case *v1.Namespace: + switch b := objB.(type) { + case *core.Namespace: + return true, Convert_v1_Namespace_To_core_Namespace(a, b, s) + } + case *core.Namespace: + switch b := objB.(type) { + case *v1.Namespace: + return true, Convert_core_Namespace_To_v1_Namespace(a, b, s) + } + + case *v1.Service: + switch b := objB.(type) { + case *core.Service: + return true, Convert_v1_Service_To_core_Service(a, b, s) + } + case *core.Service: + switch b := objB.(type) { + case *v1.Service: + return true, Convert_core_Service_To_v1_Service(a, b, s) + } + + case *v1.Endpoints: + switch b := objB.(type) { + case *core.Endpoints: + return true, Convert_v1_Endpoints_To_core_Endpoints(a, b, s) + } + case *core.Endpoints: + switch b := objB.(type) { + case *v1.Endpoints: + return true, Convert_core_Endpoints_To_v1_Endpoints(a, b, s) + } + + case *metav1.WatchEvent: + switch b := objB.(type) { + case *metav1.InternalEvent: + return true, metav1.Convert_versioned_Event_to_versioned_InternalEvent(a, b, s) + } + case *metav1.InternalEvent: + switch b := objB.(type) { + case *metav1.WatchEvent: + return true, metav1.Convert_versioned_InternalEvent_to_versioned_Event(a, b, s) + } + } + return false, nil + }) + return nil +} + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs( + Convert_core_Pod_To_v1_Pod, + Convert_core_PodSpec_To_v1_PodSpec, + Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, + Convert_core_ServiceSpec_To_v1_ServiceSpec, + Convert_v1_Pod_To_core_Pod, + Convert_v1_PodSpec_To_core_PodSpec, + Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec, + Convert_v1_Secret_To_core_Secret, + Convert_v1_ServiceSpec_To_core_ServiceSpec, + Convert_v1_ResourceList_To_core_ResourceList, + Convert_v1_ReplicationController_to_extensions_ReplicaSet, + Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec, + Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus, + Convert_extensions_ReplicaSet_to_v1_ReplicationController, + Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec, + Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus, + ) + if err != nil { + return err + } + + // Add field conversion funcs. + err = scheme.AddFieldLabelConversionFunc("v1", "Pod", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", + "metadata.namespace", + "spec.nodeName", + "spec.restartPolicy", + "spec.schedulerName", + "status.phase", + "status.podIP": + return label, value, nil + // This is for backwards compatibility with old v1 clients which send spec.host + case "spec.host": + return "spec.nodeName", value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + err = scheme.AddFieldLabelConversionFunc("v1", "Node", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + case "spec.unschedulable": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + err = scheme.AddFieldLabelConversionFunc("v1", "ReplicationController", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", + "metadata.namespace", + "status.replicas": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) + if err != nil { + return err + } + if err := AddFieldLabelConversionsForEvent(scheme); err != nil { + return err + } + if err := AddFieldLabelConversionsForNamespace(scheme); err != nil { + return err + } + if err := AddFieldLabelConversionsForSecret(scheme); err != nil { + return err + } + return nil +} + +func Convert_v1_ReplicationController_to_extensions_ReplicaSet(in *v1.ReplicationController, out *extensions.ReplicaSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec(in *v1.ReplicationControllerSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { + out.Replicas = *in.Replicas + out.MinReadySeconds = in.MinReadySeconds + if in.Selector != nil { + out.Selector = new(metav1.LabelSelector) + metav1.Convert_map_to_unversioned_LabelSelector(&in.Selector, out.Selector, s) + } + if in.Template != nil { + if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in.Template, &out.Template, s); err != nil { + return err + } + } + return nil +} + +func Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus(in *v1.ReplicationControllerStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + for _, cond := range in.Conditions { + out.Conditions = append(out.Conditions, extensions.ReplicaSetCondition{ + Type: extensions.ReplicaSetConditionType(cond.Type), + Status: core.ConditionStatus(cond.Status), + LastTransitionTime: cond.LastTransitionTime, + Reason: cond.Reason, + Message: cond.Message, + }) + } + return nil +} + +func Convert_extensions_ReplicaSet_to_v1_ReplicationController(in *extensions.ReplicaSet, out *v1.ReplicationController, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + fieldErr, ok := err.(*field.Error) + if !ok { + return err + } + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + out.Annotations[v1.NonConvertibleAnnotationPrefix+"/"+fieldErr.Field] = reflect.ValueOf(fieldErr.BadValue).String() + } + if err := Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec(in *extensions.ReplicaSetSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { + out.Replicas = new(int32) + *out.Replicas = in.Replicas + out.MinReadySeconds = in.MinReadySeconds + var invalidErr error + if in.Selector != nil { + invalidErr = metav1.Convert_unversioned_LabelSelector_to_map(in.Selector, &out.Selector, s) + } + out.Template = new(v1.PodTemplateSpec) + if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, out.Template, s); err != nil { + return err + } + return invalidErr +} + +func Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus(in *extensions.ReplicaSetStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + for _, cond := range in.Conditions { + out.Conditions = append(out.Conditions, v1.ReplicationControllerCondition{ + Type: v1.ReplicationControllerConditionType(cond.Type), + Status: v1.ConditionStatus(cond.Status), + LastTransitionTime: cond.LastTransitionTime, + Reason: cond.Reason, + Message: cond.Message, + }) + } + return nil +} + +func Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *core.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { + out.Replicas = &in.Replicas + out.MinReadySeconds = in.MinReadySeconds + out.Selector = in.Selector + if in.Template != nil { + out.Template = new(v1.PodTemplateSpec) + if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *core.ReplicationControllerSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = *in.Replicas + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = in.Selector + if in.Template != nil { + out.Template = new(core.PodTemplateSpec) + if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in.Template, out.Template, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in *core.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { + if err := autoConvert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil { + return err + } + + return nil +} + +func Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in *v1.PodTemplateSpec, out *core.PodTemplateSpec, s conversion.Scope) error { + if err := autoConvert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in, out, s); err != nil { + return err + } + + return nil +} + +// The following two v1.PodSpec conversions are done here to support v1.ServiceAccount +// as an alias for ServiceAccountName. +func Convert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s conversion.Scope) error { + if err := autoConvert_core_PodSpec_To_v1_PodSpec(in, out, s); err != nil { + return err + } + + // DeprecatedServiceAccount is an alias for ServiceAccountName. + out.DeprecatedServiceAccount = in.ServiceAccountName + + if in.SecurityContext != nil { + // the host namespace fields have to be handled here for backward compatibility + // with v1.0.0 + out.HostPID = in.SecurityContext.HostPID + out.HostNetwork = in.SecurityContext.HostNetwork + out.HostIPC = in.SecurityContext.HostIPC + } + + return nil +} + +func Convert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s conversion.Scope) error { + if err := autoConvert_v1_PodSpec_To_core_PodSpec(in, out, s); err != nil { + return err + } + + // We support DeprecatedServiceAccount as an alias for ServiceAccountName. + // If both are specified, ServiceAccountName (the new field) wins. + if in.ServiceAccountName == "" { + out.ServiceAccountName = in.DeprecatedServiceAccount + } + + // the host namespace fields have to be handled specially for backward compatibility + // with v1.0.0 + if out.SecurityContext == nil { + out.SecurityContext = new(core.PodSecurityContext) + } + out.SecurityContext.HostNetwork = in.HostNetwork + out.SecurityContext.HostPID = in.HostPID + out.SecurityContext.HostIPC = in.HostIPC + + return nil +} + +func Convert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) error { + if err := autoConvert_core_Pod_To_v1_Pod(in, out, s); err != nil { + return err + } + + // drop init container annotations so they don't take effect on legacy kubelets. + // remove this once the oldest supported kubelet no longer honors the annotations over the field. + if len(out.Annotations) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, "pod.beta.kubernetes.io/init-containers") + delete(out.Annotations, "pod.alpha.kubernetes.io/init-containers") + delete(out.Annotations, "pod.beta.kubernetes.io/init-container-statuses") + delete(out.Annotations, "pod.alpha.kubernetes.io/init-container-statuses") + } + + return nil +} + +func Convert_v1_Secret_To_core_Secret(in *v1.Secret, out *core.Secret, s conversion.Scope) error { + if err := autoConvert_v1_Secret_To_core_Secret(in, out, s); err != nil { + return err + } + + // StringData overwrites Data + if len(in.StringData) > 0 { + if out.Data == nil { + out.Data = map[string][]byte{} + } + for k, v := range in.StringData { + out.Data[k] = []byte(v) + } + } + + return nil +} +func Convert_core_SecurityContext_To_v1_SecurityContext(in *core.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { + if in.Capabilities != nil { + out.Capabilities = new(v1.Capabilities) + if err := Convert_core_Capabilities_To_v1_Capabilities(in.Capabilities, out.Capabilities, s); err != nil { + return err + } + } else { + out.Capabilities = nil + } + out.Privileged = in.Privileged + if in.SELinuxOptions != nil { + out.SELinuxOptions = new(v1.SELinuxOptions) + if err := Convert_core_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + return err + } + } else { + out.SELinuxOptions = nil + } + out.RunAsUser = in.RunAsUser + out.RunAsNonRoot = in.RunAsNonRoot + out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem + out.AllowPrivilegeEscalation = in.AllowPrivilegeEscalation + return nil +} + +func Convert_core_PodSecurityContext_To_v1_PodSecurityContext(in *core.PodSecurityContext, out *v1.PodSecurityContext, s conversion.Scope) error { + out.SupplementalGroups = in.SupplementalGroups + if in.SELinuxOptions != nil { + out.SELinuxOptions = new(v1.SELinuxOptions) + if err := Convert_core_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + return err + } + } else { + out.SELinuxOptions = nil + } + out.RunAsUser = in.RunAsUser + out.RunAsNonRoot = in.RunAsNonRoot + out.FSGroup = in.FSGroup + return nil +} + +func Convert_v1_PodSecurityContext_To_core_PodSecurityContext(in *v1.PodSecurityContext, out *core.PodSecurityContext, s conversion.Scope) error { + out.SupplementalGroups = in.SupplementalGroups + if in.SELinuxOptions != nil { + out.SELinuxOptions = new(core.SELinuxOptions) + if err := Convert_v1_SELinuxOptions_To_core_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + return err + } + } else { + out.SELinuxOptions = nil + } + out.RunAsUser = in.RunAsUser + out.RunAsNonRoot = in.RunAsNonRoot + out.FSGroup = in.FSGroup + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_v1_ResourceList_To_core_ResourceList(in *v1.ResourceList, out *core.ResourceList, s conversion.Scope) error { + if *in == nil { + return nil + } + if *out == nil { + *out = make(core.ResourceList, len(*in)) + } + for key, val := range *in { + // Moved to defaults + // TODO(#18538): We round up resource values to milli scale to maintain API compatibility. + // In the future, we should instead reject values that need rounding. + // const milliScale = -3 + // val.RoundUp(milliScale) + + (*out)[core.ResourceName(key)] = val + } + return nil +} + +func AddFieldLabelConversionsForEvent(scheme *runtime.Scheme) error { + return scheme.AddFieldLabelConversionFunc("v1", "Event", + func(label, value string) (string, string, error) { + switch label { + case "involvedObject.kind", + "involvedObject.namespace", + "involvedObject.name", + "involvedObject.uid", + "involvedObject.apiVersion", + "involvedObject.resourceVersion", + "involvedObject.fieldPath", + "reason", + "source", + "type", + "metadata.namespace", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) +} + +func AddFieldLabelConversionsForNamespace(scheme *runtime.Scheme) error { + return scheme.AddFieldLabelConversionFunc("v1", "Namespace", + func(label, value string) (string, string, error) { + switch label { + case "status.phase", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) +} + +func AddFieldLabelConversionsForSecret(scheme *runtime.Scheme) error { + return scheme.AddFieldLabelConversionFunc("v1", "Secret", + func(label, value string) (string, string, error) { + switch label { + case "type", + "metadata.namespace", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go new file mode 100644 index 000000000000..c2aeafc3d2fd --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go @@ -0,0 +1,413 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/util/parsers" + utilpointer "k8s.io/kubernetes/pkg/util/pointer" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +func SetDefaults_ResourceList(obj *v1.ResourceList) { + for key, val := range *obj { + // TODO(#18538): We round up resource values to milli scale to maintain API compatibility. + // In the future, we should instead reject values that need rounding. + const milliScale = -3 + val.RoundUp(milliScale) + + (*obj)[v1.ResourceName(key)] = val + } +} + +func SetDefaults_ReplicationController(obj *v1.ReplicationController) { + var labels map[string]string + if obj.Spec.Template != nil { + labels = obj.Spec.Template.Labels + } + // TODO: support templates defined elsewhere when we support them in the API + if labels != nil { + if len(obj.Spec.Selector) == 0 { + obj.Spec.Selector = labels + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } +} +func SetDefaults_Volume(obj *v1.Volume) { + if utilpointer.AllPtrFieldsNil(&obj.VolumeSource) { + obj.VolumeSource = v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + } + } +} +func SetDefaults_ContainerPort(obj *v1.ContainerPort) { + if obj.Protocol == "" { + obj.Protocol = v1.ProtocolTCP + } +} +func SetDefaults_Container(obj *v1.Container) { + if obj.ImagePullPolicy == "" { + // Ignore error and assume it has been validated elsewhere + _, tag, _, _ := parsers.ParseImageName(obj.Image) + + // Check image tag + if tag == "latest" { + obj.ImagePullPolicy = v1.PullAlways + } else { + obj.ImagePullPolicy = v1.PullIfNotPresent + } + } + if obj.TerminationMessagePath == "" { + obj.TerminationMessagePath = v1.TerminationMessagePathDefault + } + if obj.TerminationMessagePolicy == "" { + obj.TerminationMessagePolicy = v1.TerminationMessageReadFile + } +} +func SetDefaults_Service(obj *v1.Service) { + if obj.Spec.SessionAffinity == "" { + obj.Spec.SessionAffinity = v1.ServiceAffinityNone + } + if obj.Spec.SessionAffinity == v1.ServiceAffinityNone { + obj.Spec.SessionAffinityConfig = nil + } + if obj.Spec.SessionAffinity == v1.ServiceAffinityClientIP { + if obj.Spec.SessionAffinityConfig == nil || obj.Spec.SessionAffinityConfig.ClientIP == nil || obj.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds == nil { + timeoutSeconds := v1.DefaultClientIPServiceAffinitySeconds + obj.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{ + ClientIP: &v1.ClientIPConfig{ + TimeoutSeconds: &timeoutSeconds, + }, + } + } + } + if obj.Spec.Type == "" { + obj.Spec.Type = v1.ServiceTypeClusterIP + } + for i := range obj.Spec.Ports { + sp := &obj.Spec.Ports[i] + if sp.Protocol == "" { + sp.Protocol = v1.ProtocolTCP + } + if sp.TargetPort == intstr.FromInt(0) || sp.TargetPort == intstr.FromString("") { + sp.TargetPort = intstr.FromInt(int(sp.Port)) + } + } + // Defaults ExternalTrafficPolicy field for NodePort / LoadBalancer service + // to Global for consistency. + if (obj.Spec.Type == v1.ServiceTypeNodePort || + obj.Spec.Type == v1.ServiceTypeLoadBalancer) && + obj.Spec.ExternalTrafficPolicy == "" { + obj.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster + } +} +func SetDefaults_Pod(obj *v1.Pod) { + // If limits are specified, but requests are not, default requests to limits + // This is done here rather than a more specific defaulting pass on v1.ResourceRequirements + // because we only want this defaulting semantic to take place on a v1.Pod and not a v1.PodTemplate + for i := range obj.Spec.Containers { + // set requests to limits if requests are not specified, but limits are + if obj.Spec.Containers[i].Resources.Limits != nil { + if obj.Spec.Containers[i].Resources.Requests == nil { + obj.Spec.Containers[i].Resources.Requests = make(v1.ResourceList) + } + for key, value := range obj.Spec.Containers[i].Resources.Limits { + if _, exists := obj.Spec.Containers[i].Resources.Requests[key]; !exists { + obj.Spec.Containers[i].Resources.Requests[key] = *(value.Copy()) + } + } + } + } + for i := range obj.Spec.InitContainers { + if obj.Spec.InitContainers[i].Resources.Limits != nil { + if obj.Spec.InitContainers[i].Resources.Requests == nil { + obj.Spec.InitContainers[i].Resources.Requests = make(v1.ResourceList) + } + for key, value := range obj.Spec.InitContainers[i].Resources.Limits { + if _, exists := obj.Spec.InitContainers[i].Resources.Requests[key]; !exists { + obj.Spec.InitContainers[i].Resources.Requests[key] = *(value.Copy()) + } + } + } + } +} +func SetDefaults_PodSpec(obj *v1.PodSpec) { + if obj.DNSPolicy == "" { + obj.DNSPolicy = v1.DNSClusterFirst + } + if obj.RestartPolicy == "" { + obj.RestartPolicy = v1.RestartPolicyAlways + } + if obj.HostNetwork { + defaultHostNetworkPorts(&obj.Containers) + defaultHostNetworkPorts(&obj.InitContainers) + } + if obj.SecurityContext == nil { + obj.SecurityContext = &v1.PodSecurityContext{} + } + if obj.TerminationGracePeriodSeconds == nil { + period := int64(v1.DefaultTerminationGracePeriodSeconds) + obj.TerminationGracePeriodSeconds = &period + } + if obj.SchedulerName == "" { + obj.SchedulerName = v1.DefaultSchedulerName + } +} +func SetDefaults_Probe(obj *v1.Probe) { + if obj.TimeoutSeconds == 0 { + obj.TimeoutSeconds = 1 + } + if obj.PeriodSeconds == 0 { + obj.PeriodSeconds = 10 + } + if obj.SuccessThreshold == 0 { + obj.SuccessThreshold = 1 + } + if obj.FailureThreshold == 0 { + obj.FailureThreshold = 3 + } +} +func SetDefaults_SecretVolumeSource(obj *v1.SecretVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(v1.SecretVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_ConfigMapVolumeSource(obj *v1.ConfigMapVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(v1.ConfigMapVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_DownwardAPIVolumeSource(obj *v1.DownwardAPIVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(v1.DownwardAPIVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_Secret(obj *v1.Secret) { + if obj.Type == "" { + obj.Type = v1.SecretTypeOpaque + } +} +func SetDefaults_ProjectedVolumeSource(obj *v1.ProjectedVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(v1.ProjectedVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_PersistentVolume(obj *v1.PersistentVolume) { + if obj.Status.Phase == "" { + obj.Status.Phase = v1.VolumePending + } + if obj.Spec.PersistentVolumeReclaimPolicy == "" { + obj.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimRetain + } + if obj.Spec.VolumeMode == nil && utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + obj.Spec.VolumeMode = new(v1.PersistentVolumeMode) + *obj.Spec.VolumeMode = v1.PersistentVolumeFilesystem + } +} +func SetDefaults_PersistentVolumeClaim(obj *v1.PersistentVolumeClaim) { + if obj.Status.Phase == "" { + obj.Status.Phase = v1.ClaimPending + } + if obj.Spec.VolumeMode == nil && utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + obj.Spec.VolumeMode = new(v1.PersistentVolumeMode) + *obj.Spec.VolumeMode = v1.PersistentVolumeFilesystem + } +} +func SetDefaults_ISCSIVolumeSource(obj *v1.ISCSIVolumeSource) { + if obj.ISCSIInterface == "" { + obj.ISCSIInterface = "default" + } +} +func SetDefaults_ISCSIPersistentVolumeSource(obj *v1.ISCSIPersistentVolumeSource) { + if obj.ISCSIInterface == "" { + obj.ISCSIInterface = "default" + } +} +func SetDefaults_AzureDiskVolumeSource(obj *v1.AzureDiskVolumeSource) { + if obj.CachingMode == nil { + obj.CachingMode = new(v1.AzureDataDiskCachingMode) + *obj.CachingMode = v1.AzureDataDiskCachingReadWrite + } + if obj.Kind == nil { + obj.Kind = new(v1.AzureDataDiskKind) + *obj.Kind = v1.AzureSharedBlobDisk + } + if obj.FSType == nil { + obj.FSType = new(string) + *obj.FSType = "ext4" + } + if obj.ReadOnly == nil { + obj.ReadOnly = new(bool) + *obj.ReadOnly = false + } +} +func SetDefaults_Endpoints(obj *v1.Endpoints) { + for i := range obj.Subsets { + ss := &obj.Subsets[i] + for i := range ss.Ports { + ep := &ss.Ports[i] + if ep.Protocol == "" { + ep.Protocol = v1.ProtocolTCP + } + } + } +} +func SetDefaults_HTTPGetAction(obj *v1.HTTPGetAction) { + if obj.Path == "" { + obj.Path = "/" + } + if obj.Scheme == "" { + obj.Scheme = v1.URISchemeHTTP + } +} +func SetDefaults_NamespaceStatus(obj *v1.NamespaceStatus) { + if obj.Phase == "" { + obj.Phase = v1.NamespaceActive + } +} +func SetDefaults_Node(obj *v1.Node) { + if obj.Spec.ExternalID == "" { + obj.Spec.ExternalID = obj.Name + } +} +func SetDefaults_NodeStatus(obj *v1.NodeStatus) { + if obj.Allocatable == nil && obj.Capacity != nil { + obj.Allocatable = make(v1.ResourceList, len(obj.Capacity)) + for key, value := range obj.Capacity { + obj.Allocatable[key] = *(value.Copy()) + } + obj.Allocatable = obj.Capacity + } +} +func SetDefaults_ObjectFieldSelector(obj *v1.ObjectFieldSelector) { + if obj.APIVersion == "" { + obj.APIVersion = "v1" + } +} +func SetDefaults_LimitRangeItem(obj *v1.LimitRangeItem) { + // for container limits, we apply default values + if obj.Type == v1.LimitTypeContainer { + + if obj.Default == nil { + obj.Default = make(v1.ResourceList) + } + if obj.DefaultRequest == nil { + obj.DefaultRequest = make(v1.ResourceList) + } + + // If a default limit is unspecified, but the max is specified, default the limit to the max + for key, value := range obj.Max { + if _, exists := obj.Default[key]; !exists { + obj.Default[key] = *(value.Copy()) + } + } + // If a default limit is specified, but the default request is not, default request to limit + for key, value := range obj.Default { + if _, exists := obj.DefaultRequest[key]; !exists { + obj.DefaultRequest[key] = *(value.Copy()) + } + } + // If a default request is not specified, but the min is provided, default request to the min + for key, value := range obj.Min { + if _, exists := obj.DefaultRequest[key]; !exists { + obj.DefaultRequest[key] = *(value.Copy()) + } + } + } +} +func SetDefaults_ConfigMap(obj *v1.ConfigMap) { + if obj.Data == nil { + obj.Data = make(map[string]string) + } +} + +// With host networking default all container ports to host ports. +func defaultHostNetworkPorts(containers *[]v1.Container) { + for i := range *containers { + for j := range (*containers)[i].Ports { + if (*containers)[i].Ports[j].HostPort == 0 { + (*containers)[i].Ports[j].HostPort = (*containers)[i].Ports[j].ContainerPort + } + } + } +} + +func SetDefaults_RBDVolumeSource(obj *v1.RBDVolumeSource) { + if obj.RBDPool == "" { + obj.RBDPool = "rbd" + } + if obj.RadosUser == "" { + obj.RadosUser = "admin" + } + if obj.Keyring == "" { + obj.Keyring = "/etc/ceph/keyring" + } +} + +func SetDefaults_RBDPersistentVolumeSource(obj *v1.RBDPersistentVolumeSource) { + if obj.RBDPool == "" { + obj.RBDPool = "rbd" + } + if obj.RadosUser == "" { + obj.RadosUser = "admin" + } + if obj.Keyring == "" { + obj.Keyring = "/etc/ceph/keyring" + } +} + +func SetDefaults_ScaleIOVolumeSource(obj *v1.ScaleIOVolumeSource) { + if obj.StorageMode == "" { + obj.StorageMode = "ThinProvisioned" + } + if obj.FSType == "" { + obj.FSType = "xfs" + } +} + +func SetDefaults_ScaleIOPersistentVolumeSource(obj *v1.ScaleIOPersistentVolumeSource) { + if obj.StorageMode == "" { + obj.StorageMode = "ThinProvisioned" + } + if obj.FSType == "" { + obj.FSType = "xfs" + } +} + +func SetDefaults_HostPathVolumeSource(obj *v1.HostPathVolumeSource) { + typeVol := v1.HostPathUnset + if obj.Type == nil { + obj.Type = &typeVol + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/doc.go new file mode 100644 index 000000000000..6fc636976fcb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/core +// +k8s:conversion-gen-external-types=k8s.io/api/core/v1 +// +k8s:defaulter-gen=TypeMeta +// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/core/v1 + +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/BUILD new file mode 100644 index 000000000000..ea7e2ae974a2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/BUILD @@ -0,0 +1,51 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["helpers_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/core/v1/helper", + library = ":go_default_library", + deps = [ + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = ["helpers.go"], + importpath = "k8s.io/kubernetes/pkg/apis/core/v1/helper", + deps = [ + "//pkg/apis/core/helper:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/selection:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/core/v1/helper/qos:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go new file mode 100644 index 000000000000..4b21aefcfced --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go @@ -0,0 +1,461 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helper + +import ( + "encoding/json" + "fmt" + "strings" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/apis/core/helper" +) + +// IsExtendedResourceName returns true if the resource name is not in the +// default namespace. +func IsExtendedResourceName(name v1.ResourceName) bool { + return !IsDefaultNamespaceResource(name) +} + +// IsDefaultNamespaceResource returns true if the resource name is in the +// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are +// implicitly in the kubernetes.io/ namespace. +func IsDefaultNamespaceResource(name v1.ResourceName) bool { + return !strings.Contains(string(name), "/") || + strings.Contains(string(name), v1.ResourceDefaultNamespacePrefix) + +} + +// IsHugePageResourceName returns true if the resource name has the huge page +// resource prefix. +func IsHugePageResourceName(name v1.ResourceName) bool { + return strings.HasPrefix(string(name), v1.ResourceHugePagesPrefix) +} + +// HugePageResourceName returns a ResourceName with the canonical hugepage +// prefix prepended for the specified page size. The page size is converted +// to its canonical representation. +func HugePageResourceName(pageSize resource.Quantity) v1.ResourceName { + return v1.ResourceName(fmt.Sprintf("%s%s", v1.ResourceHugePagesPrefix, pageSize.String())) +} + +// HugePageSizeFromResourceName returns the page size for the specified huge page +// resource name. If the specified input is not a valid huge page resource name +// an error is returned. +func HugePageSizeFromResourceName(name v1.ResourceName) (resource.Quantity, error) { + if !IsHugePageResourceName(name) { + return resource.Quantity{}, fmt.Errorf("resource name: %s is not valid hugepage name", name) + } + pageSize := strings.TrimPrefix(string(name), v1.ResourceHugePagesPrefix) + return resource.ParseQuantity(pageSize) +} + +var overcommitBlacklist = sets.NewString(string(v1.ResourceNvidiaGPU)) + +// IsOvercommitAllowed returns true if the resource is in the default +// namespace and not blacklisted and is not hugepages. +func IsOvercommitAllowed(name v1.ResourceName) bool { + return IsDefaultNamespaceResource(name) && + !IsHugePageResourceName(name) && + !overcommitBlacklist.Has(string(name)) +} + +// Extended and Hugepages resources +func IsScalarResourceName(name v1.ResourceName) bool { + return IsExtendedResourceName(name) || IsHugePageResourceName(name) +} + +// this function aims to check if the service's ClusterIP is set or not +// the objective is not to perform validation here +func IsServiceIPSet(service *v1.Service) bool { + return service.Spec.ClusterIP != v1.ClusterIPNone && service.Spec.ClusterIP != "" +} + +// this function aims to check if the service's cluster IP is requested or not +func IsServiceIPRequested(service *v1.Service) bool { + // ExternalName services are CNAME aliases to external ones. Ignore the IP. + if service.Spec.Type == v1.ServiceTypeExternalName { + return false + } + return service.Spec.ClusterIP == "" +} + +// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, +// only if they do not already exist +func AddToNodeAddresses(addresses *[]v1.NodeAddress, addAddresses ...v1.NodeAddress) { + for _, add := range addAddresses { + exists := false + for _, existing := range *addresses { + if existing.Address == add.Address && existing.Type == add.Type { + exists = true + break + } + } + if !exists { + *addresses = append(*addresses, add) + } + } +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusEqual(l, r *v1.LoadBalancerStatus) bool { + return ingressSliceEqual(l.Ingress, r.Ingress) +} + +func ingressSliceEqual(lhs, rhs []v1.LoadBalancerIngress) bool { + if len(lhs) != len(rhs) { + return false + } + for i := range lhs { + if !ingressEqual(&lhs[i], &rhs[i]) { + return false + } + } + return true +} + +func ingressEqual(lhs, rhs *v1.LoadBalancerIngress) bool { + if lhs.IP != rhs.IP { + return false + } + if lhs.Hostname != rhs.Hostname { + return false + } + return true +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusDeepCopy(lb *v1.LoadBalancerStatus) *v1.LoadBalancerStatus { + c := &v1.LoadBalancerStatus{} + c.Ingress = make([]v1.LoadBalancerIngress, len(lb.Ingress)) + for i := range lb.Ingress { + c.Ingress[i] = lb.Ingress[i] + } + return c +} + +// GetAccessModesAsString returns a string representation of an array of access modes. +// modes, when present, are always in the same order: RWO,ROX,RWX. +func GetAccessModesAsString(modes []v1.PersistentVolumeAccessMode) string { + modes = removeDuplicateAccessModes(modes) + modesStr := []string{} + if containsAccessMode(modes, v1.ReadWriteOnce) { + modesStr = append(modesStr, "RWO") + } + if containsAccessMode(modes, v1.ReadOnlyMany) { + modesStr = append(modesStr, "ROX") + } + if containsAccessMode(modes, v1.ReadWriteMany) { + modesStr = append(modesStr, "RWX") + } + return strings.Join(modesStr, ",") +} + +// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString +func GetAccessModesFromString(modes string) []v1.PersistentVolumeAccessMode { + strmodes := strings.Split(modes, ",") + accessModes := []v1.PersistentVolumeAccessMode{} + for _, s := range strmodes { + s = strings.Trim(s, " ") + switch { + case s == "RWO": + accessModes = append(accessModes, v1.ReadWriteOnce) + case s == "ROX": + accessModes = append(accessModes, v1.ReadOnlyMany) + case s == "RWX": + accessModes = append(accessModes, v1.ReadWriteMany) + } + } + return accessModes +} + +// removeDuplicateAccessModes returns an array of access modes without any duplicates +func removeDuplicateAccessModes(modes []v1.PersistentVolumeAccessMode) []v1.PersistentVolumeAccessMode { + accessModes := []v1.PersistentVolumeAccessMode{} + for _, m := range modes { + if !containsAccessMode(accessModes, m) { + accessModes = append(accessModes, m) + } + } + return accessModes +} + +func containsAccessMode(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements +// labels.Selector. +func NodeSelectorRequirementsAsSelector(nsm []v1.NodeSelectorRequirement) (labels.Selector, error) { + if len(nsm) == 0 { + return labels.Nothing(), nil + } + selector := labels.NewSelector() + for _, expr := range nsm { + var op selection.Operator + switch expr.Operator { + case v1.NodeSelectorOpIn: + op = selection.In + case v1.NodeSelectorOpNotIn: + op = selection.NotIn + case v1.NodeSelectorOpExists: + op = selection.Exists + case v1.NodeSelectorOpDoesNotExist: + op = selection.DoesNotExist + case v1.NodeSelectorOpGt: + op = selection.GreaterThan + case v1.NodeSelectorOpLt: + op = selection.LessThan + default: + return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) + } + r, err := labels.NewRequirement(expr.Key, op, expr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + return selector, nil +} + +// AddOrUpdateTolerationInPodSpec tries to add a toleration to the toleration list in PodSpec. +// Returns true if something was updated, false otherwise. +func AddOrUpdateTolerationInPodSpec(spec *v1.PodSpec, toleration *v1.Toleration) bool { + podTolerations := spec.Tolerations + + var newTolerations []v1.Toleration + updated := false + for i := range podTolerations { + if toleration.MatchToleration(&podTolerations[i]) { + if helper.Semantic.DeepEqual(toleration, podTolerations[i]) { + return false + } + newTolerations = append(newTolerations, *toleration) + updated = true + continue + } + + newTolerations = append(newTolerations, podTolerations[i]) + } + + if !updated { + newTolerations = append(newTolerations, *toleration) + } + + spec.Tolerations = newTolerations + return true +} + +// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. +// Returns true if something was updated, false otherwise. +func AddOrUpdateTolerationInPod(pod *v1.Pod, toleration *v1.Toleration) bool { + return AddOrUpdateTolerationInPodSpec(&pod.Spec, toleration) +} + +// TolerationsTolerateTaint checks if taint is tolerated by any of the tolerations. +func TolerationsTolerateTaint(tolerations []v1.Toleration, taint *v1.Taint) bool { + for i := range tolerations { + if tolerations[i].ToleratesTaint(taint) { + return true + } + } + return false +} + +type taintsFilterFunc func(*v1.Taint) bool + +// TolerationsTolerateTaintsWithFilter checks if given tolerations tolerates +// all the taints that apply to the filter in given taint list. +func TolerationsTolerateTaintsWithFilter(tolerations []v1.Toleration, taints []v1.Taint, applyFilter taintsFilterFunc) bool { + if len(taints) == 0 { + return true + } + + for i := range taints { + if applyFilter != nil && !applyFilter(&taints[i]) { + continue + } + + if !TolerationsTolerateTaint(tolerations, &taints[i]) { + return false + } + } + + return true +} + +// Returns true and list of Tolerations matching all Taints if all are tolerated, or false otherwise. +func GetMatchingTolerations(taints []v1.Taint, tolerations []v1.Toleration) (bool, []v1.Toleration) { + if len(taints) == 0 { + return true, []v1.Toleration{} + } + if len(tolerations) == 0 && len(taints) > 0 { + return false, []v1.Toleration{} + } + result := []v1.Toleration{} + for i := range taints { + tolerated := false + for j := range tolerations { + if tolerations[j].ToleratesTaint(&taints[i]) { + result = append(result, tolerations[j]) + tolerated = true + break + } + } + if !tolerated { + return false, []v1.Toleration{} + } + } + return true, result +} + +func GetAvoidPodsFromNodeAnnotations(annotations map[string]string) (v1.AvoidPods, error) { + var avoidPods v1.AvoidPods + if len(annotations) > 0 && annotations[v1.PreferAvoidPodsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[v1.PreferAvoidPodsAnnotationKey]), &avoidPods) + if err != nil { + return avoidPods, err + } + } + return avoidPods, nil +} + +// SysctlsFromPodAnnotations parses the sysctl annotations into a slice of safe Sysctls +// and a slice of unsafe Sysctls. This is only a convenience wrapper around +// SysctlsFromPodAnnotation. +func SysctlsFromPodAnnotations(a map[string]string) ([]v1.Sysctl, []v1.Sysctl, error) { + safe, err := SysctlsFromPodAnnotation(a[v1.SysctlsPodAnnotationKey]) + if err != nil { + return nil, nil, err + } + unsafe, err := SysctlsFromPodAnnotation(a[v1.UnsafeSysctlsPodAnnotationKey]) + if err != nil { + return nil, nil, err + } + + return safe, unsafe, nil +} + +// SysctlsFromPodAnnotation parses an annotation value into a slice of Sysctls. +func SysctlsFromPodAnnotation(annotation string) ([]v1.Sysctl, error) { + if len(annotation) == 0 { + return nil, nil + } + + kvs := strings.Split(annotation, ",") + sysctls := make([]v1.Sysctl, len(kvs)) + for i, kv := range kvs { + cs := strings.Split(kv, "=") + if len(cs) != 2 || len(cs[0]) == 0 { + return nil, fmt.Errorf("sysctl %q not of the format sysctl_name=value", kv) + } + sysctls[i].Name = cs[0] + sysctls[i].Value = cs[1] + } + return sysctls, nil +} + +// PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls. +func PodAnnotationsFromSysctls(sysctls []v1.Sysctl) string { + if len(sysctls) == 0 { + return "" + } + + kvs := make([]string, len(sysctls)) + for i := range sysctls { + kvs[i] = fmt.Sprintf("%s=%s", sysctls[i].Name, sysctls[i].Value) + } + return strings.Join(kvs, ",") +} + +// GetPersistentVolumeClass returns StorageClassName. +func GetPersistentVolumeClass(volume *v1.PersistentVolume) string { + // Use beta annotation first + if class, found := volume.Annotations[v1.BetaStorageClassAnnotation]; found { + return class + } + + return volume.Spec.StorageClassName +} + +// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was +// requested, it returns "". +func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { + // Use beta annotation first + if class, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { + return class + } + + if claim.Spec.StorageClassName != nil { + return *claim.Spec.StorageClassName + } + + return "" +} + +// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. +func PersistentVolumeClaimHasClass(claim *v1.PersistentVolumeClaim) bool { + // Use beta annotation first + if _, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { + return true + } + + if claim.Spec.StorageClassName != nil { + return true + } + + return false +} + +// GetStorageNodeAffinityFromAnnotation gets the json serialized data from PersistentVolume.Annotations +// and converts it to the NodeAffinity type in api. +// TODO: update when storage node affinity graduates to beta +func GetStorageNodeAffinityFromAnnotation(annotations map[string]string) (*v1.NodeAffinity, error) { + if len(annotations) > 0 && annotations[v1.AlphaStorageNodeAffinityAnnotation] != "" { + var affinity v1.NodeAffinity + err := json.Unmarshal([]byte(annotations[v1.AlphaStorageNodeAffinityAnnotation]), &affinity) + if err != nil { + return nil, err + } + return &affinity, nil + } + return nil, nil +} + +// Converts NodeAffinity type to Alpha annotation for use in PersistentVolumes +// TODO: update when storage node affinity graduates to beta +func StorageNodeAffinityToAlphaAnnotation(annotations map[string]string, affinity *v1.NodeAffinity) error { + if affinity == nil { + return nil + } + + json, err := json.Marshal(*affinity) + if err != nil { + return err + } + annotations[v1.AlphaStorageNodeAffinityAnnotation] = string(json) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/qos/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/qos/BUILD new file mode 100644 index 000000000000..d29fcf09e936 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/qos/BUILD @@ -0,0 +1,47 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["qos_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos", + library = ":go_default_library", + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper/qos:go_default_library", + "//pkg/apis/core/v1:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = ["qos.go"], + importpath = "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos", + deps = [ + "//pkg/apis/core/v1/helper:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/qos/qos.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/qos/qos.go new file mode 100644 index 000000000000..5e9dbdd7462a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/qos/qos.go @@ -0,0 +1,98 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package qos + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/sets" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" +) + +// QOSList is a set of (resource name, QoS class) pairs. +type QOSList map[v1.ResourceName]v1.PodQOSClass + +func isSupportedQoSComputeResource(name v1.ResourceName) bool { + supportedQoSComputeResources := sets.NewString(string(v1.ResourceCPU), string(v1.ResourceMemory)) + return supportedQoSComputeResources.Has(string(name)) || v1helper.IsHugePageResourceName(name) +} + +// GetPodQOS returns the QoS class of a pod. +// A pod is besteffort if none of its containers have specified any requests or limits. +// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. +// A pod is burstable if limits and requests do not match across all containers. +func GetPodQOS(pod *v1.Pod) v1.PodQOSClass { + requests := v1.ResourceList{} + limits := v1.ResourceList{} + zeroQuantity := resource.MustParse("0") + isGuaranteed := true + for _, container := range pod.Spec.Containers { + // process requests + for name, quantity := range container.Resources.Requests { + if !isSupportedQoSComputeResource(name) { + continue + } + if quantity.Cmp(zeroQuantity) == 1 { + delta := quantity.Copy() + if _, exists := requests[name]; !exists { + requests[name] = *delta + } else { + delta.Add(requests[name]) + requests[name] = *delta + } + } + } + // process limits + qosLimitsFound := sets.NewString() + for name, quantity := range container.Resources.Limits { + if !isSupportedQoSComputeResource(name) { + continue + } + if quantity.Cmp(zeroQuantity) == 1 { + qosLimitsFound.Insert(string(name)) + delta := quantity.Copy() + if _, exists := limits[name]; !exists { + limits[name] = *delta + } else { + delta.Add(limits[name]) + limits[name] = *delta + } + } + } + + if !qosLimitsFound.HasAll(string(v1.ResourceMemory), string(v1.ResourceCPU)) { + isGuaranteed = false + } + } + if len(requests) == 0 && len(limits) == 0 { + return v1.PodQOSBestEffort + } + // Check is requests match limits for all resources. + if isGuaranteed { + for name, req := range requests { + if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 { + isGuaranteed = false + break + } + } + } + if isGuaranteed && + len(requests) == len(limits) { + return v1.PodQOSGuaranteed + } + return v1.PodQOSBurstable +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/register.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/api/v1/register.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/v1/register.go diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/validation/BUILD new file mode 100644 index 000000000000..c5b694af7bd9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/validation/BUILD @@ -0,0 +1,43 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/pkg/apis/core/v1/validation", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/core/helper:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["validation_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/core/v1/validation", + library = ":go_default_library", + deps = [ + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/validation/validation.go new file mode 100644 index 000000000000..8e41b6c96327 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/validation/validation.go @@ -0,0 +1,163 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "strings" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/core/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" +) + +const isNegativeErrorMsg string = `must be greater than or equal to 0` +const isNotIntegerErrorMsg string = `must be an integer` + +func ValidateResourceRequirements(requirements *v1.ResourceRequirements, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + limPath := fldPath.Child("limits") + reqPath := fldPath.Child("requests") + for resourceName, quantity := range requirements.Limits { + fldPath := limPath.Key(string(resourceName)) + // Validate resource name. + allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) + + // Validate resource quantity. + allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) + + } + for resourceName, quantity := range requirements.Requests { + fldPath := reqPath.Key(string(resourceName)) + // Validate resource name. + allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) + // Validate resource quantity. + allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) + + // Check that request <= limit. + limitQuantity, exists := requirements.Limits[resourceName] + if exists { + // For GPUs, not only requests can't exceed limits, they also can't be lower, i.e. must be equal. + if quantity.Cmp(limitQuantity) != 0 && !v1helper.IsOvercommitAllowed(resourceName) { + allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit", resourceName))) + } else if quantity.Cmp(limitQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit", resourceName))) + } + } else if resourceName == v1.ResourceNvidiaGPU { + allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s request", v1.ResourceNvidiaGPU))) + } + } + + return allErrs +} + +func validateContainerResourceName(value string, fldPath *field.Path) field.ErrorList { + allErrs := validateResourceName(value, fldPath) + if len(strings.Split(value, "/")) == 1 { + if !helper.IsStandardContainerResourceName(value) { + return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource for containers")) + } + } + return allErrs +} + +// ValidateResourceQuantityValue enforces that specified quantity is valid for specified resource +func ValidateResourceQuantityValue(resource string, value resource.Quantity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateNonnegativeQuantity(value, fldPath)...) + if helper.IsIntegerResourceName(resource) { + if value.MilliValue()%int64(1000) != int64(0) { + allErrs = append(allErrs, field.Invalid(fldPath, value, isNotIntegerErrorMsg)) + } + } + return allErrs +} + +// Validates that a Quantity is not negative +func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if value.Cmp(resource.Quantity{}) < 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNegativeErrorMsg)) + } + return allErrs +} + +// Validate compute resource typename. +// Refer to docs/design/resources.md for more details. +func validateResourceName(value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(value) { + allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) + } + if len(allErrs) != 0 { + return allErrs + } + + if len(strings.Split(value, "/")) == 1 { + if !helper.IsStandardResourceName(value) { + return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource type or fully qualified")) + } + } + + return allErrs +} + +func ValidatePodLogOptions(opts *v1.PodLogOptions) field.ErrorList { + allErrs := field.ErrorList{} + if opts.TailLines != nil && *opts.TailLines < 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), *opts.TailLines, isNegativeErrorMsg)) + } + if opts.LimitBytes != nil && *opts.LimitBytes < 1 { + allErrs = append(allErrs, field.Invalid(field.NewPath("limitBytes"), *opts.LimitBytes, "must be greater than 0")) + } + switch { + case opts.SinceSeconds != nil && opts.SinceTime != nil: + allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "at most one of `sinceTime` or `sinceSeconds` may be specified")) + case opts.SinceSeconds != nil: + if *opts.SinceSeconds < 1 { + allErrs = append(allErrs, field.Invalid(field.NewPath("sinceSeconds"), *opts.SinceSeconds, "must be greater than 0")) + } + } + return allErrs +} + +func AccumulateUniqueHostPorts(containers []v1.Container, accumulator *sets.String, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + for ci, ctr := range containers { + idxPath := fldPath.Index(ci) + portsPath := idxPath.Child("ports") + for pi := range ctr.Ports { + idxPath := portsPath.Index(pi) + port := ctr.Ports[pi].HostPort + if port == 0 { + continue + } + str := fmt.Sprintf("%d/%s", port, ctr.Ports[pi].Protocol) + if accumulator.Has(str) { + allErrs = append(allErrs, field.Duplicate(idxPath.Child("hostPort"), str)) + } else { + accumulator.Insert(str) + } + } + } + return allErrs +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go new file mode 100644 index 000000000000..2910297937ff --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go @@ -0,0 +1,5620 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + core "k8s.io/kubernetes/pkg/apis/core" + unsafe "unsafe" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource, + Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource, + Convert_v1_Affinity_To_core_Affinity, + Convert_core_Affinity_To_v1_Affinity, + Convert_v1_AttachedVolume_To_core_AttachedVolume, + Convert_core_AttachedVolume_To_v1_AttachedVolume, + Convert_v1_AvoidPods_To_core_AvoidPods, + Convert_core_AvoidPods_To_v1_AvoidPods, + Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource, + Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource, + Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource, + Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource, + Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource, + Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource, + Convert_v1_Binding_To_core_Binding, + Convert_core_Binding_To_v1_Binding, + Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource, + Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource, + Convert_v1_Capabilities_To_core_Capabilities, + Convert_core_Capabilities_To_v1_Capabilities, + Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource, + Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource, + Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource, + Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource, + Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource, + Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource, + Convert_v1_ClientIPConfig_To_core_ClientIPConfig, + Convert_core_ClientIPConfig_To_v1_ClientIPConfig, + Convert_v1_ComponentCondition_To_core_ComponentCondition, + Convert_core_ComponentCondition_To_v1_ComponentCondition, + Convert_v1_ComponentStatus_To_core_ComponentStatus, + Convert_core_ComponentStatus_To_v1_ComponentStatus, + Convert_v1_ComponentStatusList_To_core_ComponentStatusList, + Convert_core_ComponentStatusList_To_v1_ComponentStatusList, + Convert_v1_ConfigMap_To_core_ConfigMap, + Convert_core_ConfigMap_To_v1_ConfigMap, + Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource, + Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource, + Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector, + Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector, + Convert_v1_ConfigMapList_To_core_ConfigMapList, + Convert_core_ConfigMapList_To_v1_ConfigMapList, + Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection, + Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection, + Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource, + Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource, + Convert_v1_Container_To_core_Container, + Convert_core_Container_To_v1_Container, + Convert_v1_ContainerImage_To_core_ContainerImage, + Convert_core_ContainerImage_To_v1_ContainerImage, + Convert_v1_ContainerPort_To_core_ContainerPort, + Convert_core_ContainerPort_To_v1_ContainerPort, + Convert_v1_ContainerState_To_core_ContainerState, + Convert_core_ContainerState_To_v1_ContainerState, + Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning, + Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning, + Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated, + Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated, + Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting, + Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting, + Convert_v1_ContainerStatus_To_core_ContainerStatus, + Convert_core_ContainerStatus_To_v1_ContainerStatus, + Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint, + Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint, + Convert_v1_DeleteOptions_To_core_DeleteOptions, + Convert_core_DeleteOptions_To_v1_DeleteOptions, + Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection, + Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection, + Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile, + Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile, + Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource, + Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource, + Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource, + Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource, + Convert_v1_EndpointAddress_To_core_EndpointAddress, + Convert_core_EndpointAddress_To_v1_EndpointAddress, + Convert_v1_EndpointPort_To_core_EndpointPort, + Convert_core_EndpointPort_To_v1_EndpointPort, + Convert_v1_EndpointSubset_To_core_EndpointSubset, + Convert_core_EndpointSubset_To_v1_EndpointSubset, + Convert_v1_Endpoints_To_core_Endpoints, + Convert_core_Endpoints_To_v1_Endpoints, + Convert_v1_EndpointsList_To_core_EndpointsList, + Convert_core_EndpointsList_To_v1_EndpointsList, + Convert_v1_EnvFromSource_To_core_EnvFromSource, + Convert_core_EnvFromSource_To_v1_EnvFromSource, + Convert_v1_EnvVar_To_core_EnvVar, + Convert_core_EnvVar_To_v1_EnvVar, + Convert_v1_EnvVarSource_To_core_EnvVarSource, + Convert_core_EnvVarSource_To_v1_EnvVarSource, + Convert_v1_Event_To_core_Event, + Convert_core_Event_To_v1_Event, + Convert_v1_EventList_To_core_EventList, + Convert_core_EventList_To_v1_EventList, + Convert_v1_EventSeries_To_core_EventSeries, + Convert_core_EventSeries_To_v1_EventSeries, + Convert_v1_EventSource_To_core_EventSource, + Convert_core_EventSource_To_v1_EventSource, + Convert_v1_ExecAction_To_core_ExecAction, + Convert_core_ExecAction_To_v1_ExecAction, + Convert_v1_FCVolumeSource_To_core_FCVolumeSource, + Convert_core_FCVolumeSource_To_v1_FCVolumeSource, + Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource, + Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource, + Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource, + Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource, + Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource, + Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource, + Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource, + Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource, + Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource, + Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource, + Convert_v1_HTTPGetAction_To_core_HTTPGetAction, + Convert_core_HTTPGetAction_To_v1_HTTPGetAction, + Convert_v1_HTTPHeader_To_core_HTTPHeader, + Convert_core_HTTPHeader_To_v1_HTTPHeader, + Convert_v1_Handler_To_core_Handler, + Convert_core_Handler_To_v1_Handler, + Convert_v1_HostAlias_To_core_HostAlias, + Convert_core_HostAlias_To_v1_HostAlias, + Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource, + Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource, + Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource, + Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource, + Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource, + Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource, + Convert_v1_KeyToPath_To_core_KeyToPath, + Convert_core_KeyToPath_To_v1_KeyToPath, + Convert_v1_Lifecycle_To_core_Lifecycle, + Convert_core_Lifecycle_To_v1_Lifecycle, + Convert_v1_LimitRange_To_core_LimitRange, + Convert_core_LimitRange_To_v1_LimitRange, + Convert_v1_LimitRangeItem_To_core_LimitRangeItem, + Convert_core_LimitRangeItem_To_v1_LimitRangeItem, + Convert_v1_LimitRangeList_To_core_LimitRangeList, + Convert_core_LimitRangeList_To_v1_LimitRangeList, + Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec, + Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec, + Convert_v1_List_To_core_List, + Convert_core_List_To_v1_List, + Convert_v1_ListOptions_To_core_ListOptions, + Convert_core_ListOptions_To_v1_ListOptions, + Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress, + Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress, + Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus, + Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus, + Convert_v1_LocalObjectReference_To_core_LocalObjectReference, + Convert_core_LocalObjectReference_To_v1_LocalObjectReference, + Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource, + Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource, + Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource, + Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource, + Convert_v1_Namespace_To_core_Namespace, + Convert_core_Namespace_To_v1_Namespace, + Convert_v1_NamespaceList_To_core_NamespaceList, + Convert_core_NamespaceList_To_v1_NamespaceList, + Convert_v1_NamespaceSpec_To_core_NamespaceSpec, + Convert_core_NamespaceSpec_To_v1_NamespaceSpec, + Convert_v1_NamespaceStatus_To_core_NamespaceStatus, + Convert_core_NamespaceStatus_To_v1_NamespaceStatus, + Convert_v1_Node_To_core_Node, + Convert_core_Node_To_v1_Node, + Convert_v1_NodeAddress_To_core_NodeAddress, + Convert_core_NodeAddress_To_v1_NodeAddress, + Convert_v1_NodeAffinity_To_core_NodeAffinity, + Convert_core_NodeAffinity_To_v1_NodeAffinity, + Convert_v1_NodeCondition_To_core_NodeCondition, + Convert_core_NodeCondition_To_v1_NodeCondition, + Convert_v1_NodeConfigSource_To_core_NodeConfigSource, + Convert_core_NodeConfigSource_To_v1_NodeConfigSource, + Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints, + Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints, + Convert_v1_NodeList_To_core_NodeList, + Convert_core_NodeList_To_v1_NodeList, + Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions, + Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions, + Convert_v1_NodeResources_To_core_NodeResources, + Convert_core_NodeResources_To_v1_NodeResources, + Convert_v1_NodeSelector_To_core_NodeSelector, + Convert_core_NodeSelector_To_v1_NodeSelector, + Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement, + Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement, + Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm, + Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm, + Convert_v1_NodeSpec_To_core_NodeSpec, + Convert_core_NodeSpec_To_v1_NodeSpec, + Convert_v1_NodeStatus_To_core_NodeStatus, + Convert_core_NodeStatus_To_v1_NodeStatus, + Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo, + Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo, + Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector, + Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector, + Convert_v1_ObjectMeta_To_core_ObjectMeta, + Convert_core_ObjectMeta_To_v1_ObjectMeta, + Convert_v1_ObjectReference_To_core_ObjectReference, + Convert_core_ObjectReference_To_v1_ObjectReference, + Convert_v1_PersistentVolume_To_core_PersistentVolume, + Convert_core_PersistentVolume_To_v1_PersistentVolume, + Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim, + Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim, + Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition, + Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition, + Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList, + Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList, + Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec, + Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec, + Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus, + Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus, + Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource, + Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource, + Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList, + Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList, + Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource, + Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource, + Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec, + Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec, + Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus, + Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus, + Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource, + Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource, + Convert_v1_Pod_To_core_Pod, + Convert_core_Pod_To_v1_Pod, + Convert_v1_PodAffinity_To_core_PodAffinity, + Convert_core_PodAffinity_To_v1_PodAffinity, + Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm, + Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm, + Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity, + Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity, + Convert_v1_PodAttachOptions_To_core_PodAttachOptions, + Convert_core_PodAttachOptions_To_v1_PodAttachOptions, + Convert_v1_PodCondition_To_core_PodCondition, + Convert_core_PodCondition_To_v1_PodCondition, + Convert_v1_PodDNSConfig_To_core_PodDNSConfig, + Convert_core_PodDNSConfig_To_v1_PodDNSConfig, + Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption, + Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption, + Convert_v1_PodExecOptions_To_core_PodExecOptions, + Convert_core_PodExecOptions_To_v1_PodExecOptions, + Convert_v1_PodList_To_core_PodList, + Convert_core_PodList_To_v1_PodList, + Convert_v1_PodLogOptions_To_core_PodLogOptions, + Convert_core_PodLogOptions_To_v1_PodLogOptions, + Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions, + Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions, + Convert_v1_PodProxyOptions_To_core_PodProxyOptions, + Convert_core_PodProxyOptions_To_v1_PodProxyOptions, + Convert_v1_PodSecurityContext_To_core_PodSecurityContext, + Convert_core_PodSecurityContext_To_v1_PodSecurityContext, + Convert_v1_PodSignature_To_core_PodSignature, + Convert_core_PodSignature_To_v1_PodSignature, + Convert_v1_PodSpec_To_core_PodSpec, + Convert_core_PodSpec_To_v1_PodSpec, + Convert_v1_PodStatus_To_core_PodStatus, + Convert_core_PodStatus_To_v1_PodStatus, + Convert_v1_PodStatusResult_To_core_PodStatusResult, + Convert_core_PodStatusResult_To_v1_PodStatusResult, + Convert_v1_PodTemplate_To_core_PodTemplate, + Convert_core_PodTemplate_To_v1_PodTemplate, + Convert_v1_PodTemplateList_To_core_PodTemplateList, + Convert_core_PodTemplateList_To_v1_PodTemplateList, + Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec, + Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec, + Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource, + Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource, + Convert_v1_Preconditions_To_core_Preconditions, + Convert_core_Preconditions_To_v1_Preconditions, + Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry, + Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry, + Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm, + Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm, + Convert_v1_Probe_To_core_Probe, + Convert_core_Probe_To_v1_Probe, + Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource, + Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource, + Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource, + Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource, + Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource, + Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource, + Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource, + Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource, + Convert_v1_RangeAllocation_To_core_RangeAllocation, + Convert_core_RangeAllocation_To_v1_RangeAllocation, + Convert_v1_ReplicationController_To_core_ReplicationController, + Convert_core_ReplicationController_To_v1_ReplicationController, + Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition, + Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition, + Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList, + Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList, + Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec, + Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, + Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus, + Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus, + Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector, + Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector, + Convert_v1_ResourceQuota_To_core_ResourceQuota, + Convert_core_ResourceQuota_To_v1_ResourceQuota, + Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList, + Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList, + Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec, + Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec, + Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus, + Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus, + Convert_v1_ResourceRequirements_To_core_ResourceRequirements, + Convert_core_ResourceRequirements_To_v1_ResourceRequirements, + Convert_v1_SELinuxOptions_To_core_SELinuxOptions, + Convert_core_SELinuxOptions_To_v1_SELinuxOptions, + Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource, + Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource, + Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource, + Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource, + Convert_v1_Secret_To_core_Secret, + Convert_core_Secret_To_v1_Secret, + Convert_v1_SecretEnvSource_To_core_SecretEnvSource, + Convert_core_SecretEnvSource_To_v1_SecretEnvSource, + Convert_v1_SecretKeySelector_To_core_SecretKeySelector, + Convert_core_SecretKeySelector_To_v1_SecretKeySelector, + Convert_v1_SecretList_To_core_SecretList, + Convert_core_SecretList_To_v1_SecretList, + Convert_v1_SecretProjection_To_core_SecretProjection, + Convert_core_SecretProjection_To_v1_SecretProjection, + Convert_v1_SecretReference_To_core_SecretReference, + Convert_core_SecretReference_To_v1_SecretReference, + Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource, + Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource, + Convert_v1_SecurityContext_To_core_SecurityContext, + Convert_core_SecurityContext_To_v1_SecurityContext, + Convert_v1_SerializedReference_To_core_SerializedReference, + Convert_core_SerializedReference_To_v1_SerializedReference, + Convert_v1_Service_To_core_Service, + Convert_core_Service_To_v1_Service, + Convert_v1_ServiceAccount_To_core_ServiceAccount, + Convert_core_ServiceAccount_To_v1_ServiceAccount, + Convert_v1_ServiceAccountList_To_core_ServiceAccountList, + Convert_core_ServiceAccountList_To_v1_ServiceAccountList, + Convert_v1_ServiceList_To_core_ServiceList, + Convert_core_ServiceList_To_v1_ServiceList, + Convert_v1_ServicePort_To_core_ServicePort, + Convert_core_ServicePort_To_v1_ServicePort, + Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions, + Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions, + Convert_v1_ServiceSpec_To_core_ServiceSpec, + Convert_core_ServiceSpec_To_v1_ServiceSpec, + Convert_v1_ServiceStatus_To_core_ServiceStatus, + Convert_core_ServiceStatus_To_v1_ServiceStatus, + Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig, + Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig, + Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource, + Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource, + Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource, + Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource, + Convert_v1_Sysctl_To_core_Sysctl, + Convert_core_Sysctl_To_v1_Sysctl, + Convert_v1_TCPSocketAction_To_core_TCPSocketAction, + Convert_core_TCPSocketAction_To_v1_TCPSocketAction, + Convert_v1_Taint_To_core_Taint, + Convert_core_Taint_To_v1_Taint, + Convert_v1_Toleration_To_core_Toleration, + Convert_core_Toleration_To_v1_Toleration, + Convert_v1_Volume_To_core_Volume, + Convert_core_Volume_To_v1_Volume, + Convert_v1_VolumeDevice_To_core_VolumeDevice, + Convert_core_VolumeDevice_To_v1_VolumeDevice, + Convert_v1_VolumeMount_To_core_VolumeMount, + Convert_core_VolumeMount_To_v1_VolumeMount, + Convert_v1_VolumeProjection_To_core_VolumeProjection, + Convert_core_VolumeProjection_To_v1_VolumeProjection, + Convert_v1_VolumeSource_To_core_VolumeSource, + Convert_core_VolumeSource_To_v1_VolumeSource, + Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource, + Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource, + Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm, + Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm, + ) +} + +func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *core.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function. +func Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *core.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in, out, s) +} + +func autoConvert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *core.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function. +func Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *core.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + return autoConvert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in, out, s) +} + +func autoConvert_v1_Affinity_To_core_Affinity(in *v1.Affinity, out *core.Affinity, s conversion.Scope) error { + out.NodeAffinity = (*core.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + out.PodAffinity = (*core.PodAffinity)(unsafe.Pointer(in.PodAffinity)) + out.PodAntiAffinity = (*core.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) + return nil +} + +// Convert_v1_Affinity_To_core_Affinity is an autogenerated conversion function. +func Convert_v1_Affinity_To_core_Affinity(in *v1.Affinity, out *core.Affinity, s conversion.Scope) error { + return autoConvert_v1_Affinity_To_core_Affinity(in, out, s) +} + +func autoConvert_core_Affinity_To_v1_Affinity(in *core.Affinity, out *v1.Affinity, s conversion.Scope) error { + out.NodeAffinity = (*v1.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + out.PodAffinity = (*v1.PodAffinity)(unsafe.Pointer(in.PodAffinity)) + out.PodAntiAffinity = (*v1.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) + return nil +} + +// Convert_core_Affinity_To_v1_Affinity is an autogenerated conversion function. +func Convert_core_Affinity_To_v1_Affinity(in *core.Affinity, out *v1.Affinity, s conversion.Scope) error { + return autoConvert_core_Affinity_To_v1_Affinity(in, out, s) +} + +func autoConvert_v1_AttachedVolume_To_core_AttachedVolume(in *v1.AttachedVolume, out *core.AttachedVolume, s conversion.Scope) error { + out.Name = core.UniqueVolumeName(in.Name) + out.DevicePath = in.DevicePath + return nil +} + +// Convert_v1_AttachedVolume_To_core_AttachedVolume is an autogenerated conversion function. +func Convert_v1_AttachedVolume_To_core_AttachedVolume(in *v1.AttachedVolume, out *core.AttachedVolume, s conversion.Scope) error { + return autoConvert_v1_AttachedVolume_To_core_AttachedVolume(in, out, s) +} + +func autoConvert_core_AttachedVolume_To_v1_AttachedVolume(in *core.AttachedVolume, out *v1.AttachedVolume, s conversion.Scope) error { + out.Name = v1.UniqueVolumeName(in.Name) + out.DevicePath = in.DevicePath + return nil +} + +// Convert_core_AttachedVolume_To_v1_AttachedVolume is an autogenerated conversion function. +func Convert_core_AttachedVolume_To_v1_AttachedVolume(in *core.AttachedVolume, out *v1.AttachedVolume, s conversion.Scope) error { + return autoConvert_core_AttachedVolume_To_v1_AttachedVolume(in, out, s) +} + +func autoConvert_v1_AvoidPods_To_core_AvoidPods(in *v1.AvoidPods, out *core.AvoidPods, s conversion.Scope) error { + out.PreferAvoidPods = *(*[]core.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) + return nil +} + +// Convert_v1_AvoidPods_To_core_AvoidPods is an autogenerated conversion function. +func Convert_v1_AvoidPods_To_core_AvoidPods(in *v1.AvoidPods, out *core.AvoidPods, s conversion.Scope) error { + return autoConvert_v1_AvoidPods_To_core_AvoidPods(in, out, s) +} + +func autoConvert_core_AvoidPods_To_v1_AvoidPods(in *core.AvoidPods, out *v1.AvoidPods, s conversion.Scope) error { + out.PreferAvoidPods = *(*[]v1.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) + return nil +} + +// Convert_core_AvoidPods_To_v1_AvoidPods is an autogenerated conversion function. +func Convert_core_AvoidPods_To_v1_AvoidPods(in *core.AvoidPods, out *v1.AvoidPods, s conversion.Scope) error { + return autoConvert_core_AvoidPods_To_v1_AvoidPods(in, out, s) +} + +func autoConvert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in *v1.AzureDiskVolumeSource, out *core.AzureDiskVolumeSource, s conversion.Scope) error { + out.DiskName = in.DiskName + out.DataDiskURI = in.DataDiskURI + out.CachingMode = (*core.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) + out.FSType = (*string)(unsafe.Pointer(in.FSType)) + out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) + out.Kind = (*core.AzureDataDiskKind)(unsafe.Pointer(in.Kind)) + return nil +} + +// Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in *v1.AzureDiskVolumeSource, out *core.AzureDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in, out, s) +} + +func autoConvert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *core.AzureDiskVolumeSource, out *v1.AzureDiskVolumeSource, s conversion.Scope) error { + out.DiskName = in.DiskName + out.DataDiskURI = in.DataDiskURI + out.CachingMode = (*v1.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) + out.FSType = (*string)(unsafe.Pointer(in.FSType)) + out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) + out.Kind = (*v1.AzureDataDiskKind)(unsafe.Pointer(in.Kind)) + return nil +} + +// Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource is an autogenerated conversion function. +func Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *core.AzureDiskVolumeSource, out *v1.AzureDiskVolumeSource, s conversion.Scope) error { + return autoConvert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in *v1.AzureFilePersistentVolumeSource, out *core.AzureFilePersistentVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + out.SecretNamespace = (*string)(unsafe.Pointer(in.SecretNamespace)) + return nil +} + +// Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in *v1.AzureFilePersistentVolumeSource, out *core.AzureFilePersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in, out, s) +} + +func autoConvert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *core.AzureFilePersistentVolumeSource, out *v1.AzureFilePersistentVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + out.SecretNamespace = (*string)(unsafe.Pointer(in.SecretNamespace)) + return nil +} + +// Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource is an autogenerated conversion function. +func Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *core.AzureFilePersistentVolumeSource, out *v1.AzureFilePersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *core.AzureFileVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource is an autogenerated conversion function. +func Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *core.AzureFileVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in, out, s) +} + +func autoConvert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *core.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource is an autogenerated conversion function. +func Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *core.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { + return autoConvert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in, out, s) +} + +func autoConvert_v1_Binding_To_core_Binding(in *v1.Binding, out *core.Binding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Binding_To_core_Binding is an autogenerated conversion function. +func Convert_v1_Binding_To_core_Binding(in *v1.Binding, out *core.Binding, s conversion.Scope) error { + return autoConvert_v1_Binding_To_core_Binding(in, out, s) +} + +func autoConvert_core_Binding_To_v1_Binding(in *core.Binding, out *v1.Binding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ObjectReference_To_v1_ObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +// Convert_core_Binding_To_v1_Binding is an autogenerated conversion function. +func Convert_core_Binding_To_v1_Binding(in *core.Binding, out *v1.Binding, s conversion.Scope) error { + return autoConvert_core_Binding_To_v1_Binding(in, out, s) +} + +func autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in *v1.CSIPersistentVolumeSource, out *core.CSIPersistentVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.VolumeHandle = in.VolumeHandle + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in *v1.CSIPersistentVolumeSource, out *core.CSIPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in *core.CSIPersistentVolumeSource, out *v1.CSIPersistentVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.VolumeHandle = in.VolumeHandle + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in *core.CSIPersistentVolumeSource, out *v1.CSIPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_Capabilities_To_core_Capabilities(in *v1.Capabilities, out *core.Capabilities, s conversion.Scope) error { + out.Add = *(*[]core.Capability)(unsafe.Pointer(&in.Add)) + out.Drop = *(*[]core.Capability)(unsafe.Pointer(&in.Drop)) + return nil +} + +// Convert_v1_Capabilities_To_core_Capabilities is an autogenerated conversion function. +func Convert_v1_Capabilities_To_core_Capabilities(in *v1.Capabilities, out *core.Capabilities, s conversion.Scope) error { + return autoConvert_v1_Capabilities_To_core_Capabilities(in, out, s) +} + +func autoConvert_core_Capabilities_To_v1_Capabilities(in *core.Capabilities, out *v1.Capabilities, s conversion.Scope) error { + out.Add = *(*[]v1.Capability)(unsafe.Pointer(&in.Add)) + out.Drop = *(*[]v1.Capability)(unsafe.Pointer(&in.Drop)) + return nil +} + +// Convert_core_Capabilities_To_v1_Capabilities is an autogenerated conversion function. +func Convert_core_Capabilities_To_v1_Capabilities(in *core.Capabilities, out *v1.Capabilities, s conversion.Scope) error { + return autoConvert_core_Capabilities_To_v1_Capabilities(in, out, s) +} + +func autoConvert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in *v1.CephFSPersistentVolumeSource, out *core.CephFSPersistentVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in *v1.CephFSPersistentVolumeSource, out *core.CephFSPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *core.CephFSPersistentVolumeSource, out *v1.CephFSPersistentVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *core.CephFSPersistentVolumeSource, out *v1.CephFSPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *core.CephFSVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource is an autogenerated conversion function. +func Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *core.CephFSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in, out, s) +} + +func autoConvert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *core.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource is an autogenerated conversion function. +func Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *core.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { + return autoConvert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in, out, s) +} + +func autoConvert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in *v1.CinderVolumeSource, out *core.CinderVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource is an autogenerated conversion function. +func Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in *v1.CinderVolumeSource, out *core.CinderVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in, out, s) +} + +func autoConvert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in *core.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource is an autogenerated conversion function. +func Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in *core.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { + return autoConvert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s) +} + +func autoConvert_v1_ClientIPConfig_To_core_ClientIPConfig(in *v1.ClientIPConfig, out *core.ClientIPConfig, s conversion.Scope) error { + out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +// Convert_v1_ClientIPConfig_To_core_ClientIPConfig is an autogenerated conversion function. +func Convert_v1_ClientIPConfig_To_core_ClientIPConfig(in *v1.ClientIPConfig, out *core.ClientIPConfig, s conversion.Scope) error { + return autoConvert_v1_ClientIPConfig_To_core_ClientIPConfig(in, out, s) +} + +func autoConvert_core_ClientIPConfig_To_v1_ClientIPConfig(in *core.ClientIPConfig, out *v1.ClientIPConfig, s conversion.Scope) error { + out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +// Convert_core_ClientIPConfig_To_v1_ClientIPConfig is an autogenerated conversion function. +func Convert_core_ClientIPConfig_To_v1_ClientIPConfig(in *core.ClientIPConfig, out *v1.ClientIPConfig, s conversion.Scope) error { + return autoConvert_core_ClientIPConfig_To_v1_ClientIPConfig(in, out, s) +} + +func autoConvert_v1_ComponentCondition_To_core_ComponentCondition(in *v1.ComponentCondition, out *core.ComponentCondition, s conversion.Scope) error { + out.Type = core.ComponentConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.Message = in.Message + out.Error = in.Error + return nil +} + +// Convert_v1_ComponentCondition_To_core_ComponentCondition is an autogenerated conversion function. +func Convert_v1_ComponentCondition_To_core_ComponentCondition(in *v1.ComponentCondition, out *core.ComponentCondition, s conversion.Scope) error { + return autoConvert_v1_ComponentCondition_To_core_ComponentCondition(in, out, s) +} + +func autoConvert_core_ComponentCondition_To_v1_ComponentCondition(in *core.ComponentCondition, out *v1.ComponentCondition, s conversion.Scope) error { + out.Type = v1.ComponentConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.Message = in.Message + out.Error = in.Error + return nil +} + +// Convert_core_ComponentCondition_To_v1_ComponentCondition is an autogenerated conversion function. +func Convert_core_ComponentCondition_To_v1_ComponentCondition(in *core.ComponentCondition, out *v1.ComponentCondition, s conversion.Scope) error { + return autoConvert_core_ComponentCondition_To_v1_ComponentCondition(in, out, s) +} + +func autoConvert_v1_ComponentStatus_To_core_ComponentStatus(in *v1.ComponentStatus, out *core.ComponentStatus, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Conditions = *(*[]core.ComponentCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1_ComponentStatus_To_core_ComponentStatus is an autogenerated conversion function. +func Convert_v1_ComponentStatus_To_core_ComponentStatus(in *v1.ComponentStatus, out *core.ComponentStatus, s conversion.Scope) error { + return autoConvert_v1_ComponentStatus_To_core_ComponentStatus(in, out, s) +} + +func autoConvert_core_ComponentStatus_To_v1_ComponentStatus(in *core.ComponentStatus, out *v1.ComponentStatus, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Conditions = *(*[]v1.ComponentCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_core_ComponentStatus_To_v1_ComponentStatus is an autogenerated conversion function. +func Convert_core_ComponentStatus_To_v1_ComponentStatus(in *core.ComponentStatus, out *v1.ComponentStatus, s conversion.Scope) error { + return autoConvert_core_ComponentStatus_To_v1_ComponentStatus(in, out, s) +} + +func autoConvert_v1_ComponentStatusList_To_core_ComponentStatusList(in *v1.ComponentStatusList, out *core.ComponentStatusList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ComponentStatus)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ComponentStatusList_To_core_ComponentStatusList is an autogenerated conversion function. +func Convert_v1_ComponentStatusList_To_core_ComponentStatusList(in *v1.ComponentStatusList, out *core.ComponentStatusList, s conversion.Scope) error { + return autoConvert_v1_ComponentStatusList_To_core_ComponentStatusList(in, out, s) +} + +func autoConvert_core_ComponentStatusList_To_v1_ComponentStatusList(in *core.ComponentStatusList, out *v1.ComponentStatusList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.ComponentStatus)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ComponentStatusList_To_v1_ComponentStatusList is an autogenerated conversion function. +func Convert_core_ComponentStatusList_To_v1_ComponentStatusList(in *core.ComponentStatusList, out *v1.ComponentStatusList, s conversion.Scope) error { + return autoConvert_core_ComponentStatusList_To_v1_ComponentStatusList(in, out, s) +} + +func autoConvert_v1_ConfigMap_To_core_ConfigMap(in *v1.ConfigMap, out *core.ConfigMap, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_v1_ConfigMap_To_core_ConfigMap is an autogenerated conversion function. +func Convert_v1_ConfigMap_To_core_ConfigMap(in *v1.ConfigMap, out *core.ConfigMap, s conversion.Scope) error { + return autoConvert_v1_ConfigMap_To_core_ConfigMap(in, out, s) +} + +func autoConvert_core_ConfigMap_To_v1_ConfigMap(in *core.ConfigMap, out *v1.ConfigMap, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_core_ConfigMap_To_v1_ConfigMap is an autogenerated conversion function. +func Convert_core_ConfigMap_To_v1_ConfigMap(in *core.ConfigMap, out *v1.ConfigMap, s conversion.Scope) error { + return autoConvert_core_ConfigMap_To_v1_ConfigMap(in, out, s) +} + +func autoConvert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in *v1.ConfigMapEnvSource, out *core.ConfigMapEnvSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource is an autogenerated conversion function. +func Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in *v1.ConfigMapEnvSource, out *core.ConfigMapEnvSource, s conversion.Scope) error { + return autoConvert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in, out, s) +} + +func autoConvert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *core.ConfigMapEnvSource, out *v1.ConfigMapEnvSource, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource is an autogenerated conversion function. +func Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *core.ConfigMapEnvSource, out *v1.ConfigMapEnvSource, s conversion.Scope) error { + return autoConvert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in, out, s) +} + +func autoConvert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *core.ConfigMapKeySelector, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector is an autogenerated conversion function. +func Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *core.ConfigMapKeySelector, s conversion.Scope) error { + return autoConvert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in, out, s) +} + +func autoConvert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *core.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector is an autogenerated conversion function. +func Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *core.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { + return autoConvert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in, out, s) +} + +func autoConvert_v1_ConfigMapList_To_core_ConfigMapList(in *v1.ConfigMapList, out *core.ConfigMapList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ConfigMap)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ConfigMapList_To_core_ConfigMapList is an autogenerated conversion function. +func Convert_v1_ConfigMapList_To_core_ConfigMapList(in *v1.ConfigMapList, out *core.ConfigMapList, s conversion.Scope) error { + return autoConvert_v1_ConfigMapList_To_core_ConfigMapList(in, out, s) +} + +func autoConvert_core_ConfigMapList_To_v1_ConfigMapList(in *core.ConfigMapList, out *v1.ConfigMapList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.ConfigMap)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ConfigMapList_To_v1_ConfigMapList is an autogenerated conversion function. +func Convert_core_ConfigMapList_To_v1_ConfigMapList(in *core.ConfigMapList, out *v1.ConfigMapList, s conversion.Scope) error { + return autoConvert_core_ConfigMapList_To_v1_ConfigMapList(in, out, s) +} + +func autoConvert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in *v1.ConfigMapProjection, out *core.ConfigMapProjection, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection is an autogenerated conversion function. +func Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in *v1.ConfigMapProjection, out *core.ConfigMapProjection, s conversion.Scope) error { + return autoConvert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in, out, s) +} + +func autoConvert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in *core.ConfigMapProjection, out *v1.ConfigMapProjection, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection is an autogenerated conversion function. +func Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in *core.ConfigMapProjection, out *v1.ConfigMapProjection, s conversion.Scope) error { + return autoConvert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in, out, s) +} + +func autoConvert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *core.ConfigMapVolumeSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource is an autogenerated conversion function. +func Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *core.ConfigMapVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in, out, s) +} + +func autoConvert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *core.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource is an autogenerated conversion function. +func Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *core.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { + return autoConvert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in, out, s) +} + +func autoConvert_v1_Container_To_core_Container(in *v1.Container, out *core.Container, s conversion.Scope) error { + out.Name = in.Name + out.Image = in.Image + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) + out.WorkingDir = in.WorkingDir + out.Ports = *(*[]core.ContainerPort)(unsafe.Pointer(&in.Ports)) + out.EnvFrom = *(*[]core.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + out.Env = *(*[]core.EnvVar)(unsafe.Pointer(&in.Env)) + if err := Convert_v1_ResourceRequirements_To_core_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeMounts = *(*[]core.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + out.VolumeDevices = *(*[]core.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices)) + out.LivenessProbe = (*core.Probe)(unsafe.Pointer(in.LivenessProbe)) + out.ReadinessProbe = (*core.Probe)(unsafe.Pointer(in.ReadinessProbe)) + out.Lifecycle = (*core.Lifecycle)(unsafe.Pointer(in.Lifecycle)) + out.TerminationMessagePath = in.TerminationMessagePath + out.TerminationMessagePolicy = core.TerminationMessagePolicy(in.TerminationMessagePolicy) + out.ImagePullPolicy = core.PullPolicy(in.ImagePullPolicy) + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(core.SecurityContext) + if err := Convert_v1_SecurityContext_To_core_SecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.Stdin = in.Stdin + out.StdinOnce = in.StdinOnce + out.TTY = in.TTY + return nil +} + +// Convert_v1_Container_To_core_Container is an autogenerated conversion function. +func Convert_v1_Container_To_core_Container(in *v1.Container, out *core.Container, s conversion.Scope) error { + return autoConvert_v1_Container_To_core_Container(in, out, s) +} + +func autoConvert_core_Container_To_v1_Container(in *core.Container, out *v1.Container, s conversion.Scope) error { + out.Name = in.Name + out.Image = in.Image + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) + out.WorkingDir = in.WorkingDir + out.Ports = *(*[]v1.ContainerPort)(unsafe.Pointer(&in.Ports)) + out.EnvFrom = *(*[]v1.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env)) + if err := Convert_core_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + out.VolumeDevices = *(*[]v1.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices)) + out.LivenessProbe = (*v1.Probe)(unsafe.Pointer(in.LivenessProbe)) + out.ReadinessProbe = (*v1.Probe)(unsafe.Pointer(in.ReadinessProbe)) + out.Lifecycle = (*v1.Lifecycle)(unsafe.Pointer(in.Lifecycle)) + out.TerminationMessagePath = in.TerminationMessagePath + out.TerminationMessagePolicy = v1.TerminationMessagePolicy(in.TerminationMessagePolicy) + out.ImagePullPolicy = v1.PullPolicy(in.ImagePullPolicy) + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.SecurityContext) + if err := Convert_core_SecurityContext_To_v1_SecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.Stdin = in.Stdin + out.StdinOnce = in.StdinOnce + out.TTY = in.TTY + return nil +} + +// Convert_core_Container_To_v1_Container is an autogenerated conversion function. +func Convert_core_Container_To_v1_Container(in *core.Container, out *v1.Container, s conversion.Scope) error { + return autoConvert_core_Container_To_v1_Container(in, out, s) +} + +func autoConvert_v1_ContainerImage_To_core_ContainerImage(in *v1.ContainerImage, out *core.ContainerImage, s conversion.Scope) error { + out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + out.SizeBytes = in.SizeBytes + return nil +} + +// Convert_v1_ContainerImage_To_core_ContainerImage is an autogenerated conversion function. +func Convert_v1_ContainerImage_To_core_ContainerImage(in *v1.ContainerImage, out *core.ContainerImage, s conversion.Scope) error { + return autoConvert_v1_ContainerImage_To_core_ContainerImage(in, out, s) +} + +func autoConvert_core_ContainerImage_To_v1_ContainerImage(in *core.ContainerImage, out *v1.ContainerImage, s conversion.Scope) error { + out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + out.SizeBytes = in.SizeBytes + return nil +} + +// Convert_core_ContainerImage_To_v1_ContainerImage is an autogenerated conversion function. +func Convert_core_ContainerImage_To_v1_ContainerImage(in *core.ContainerImage, out *v1.ContainerImage, s conversion.Scope) error { + return autoConvert_core_ContainerImage_To_v1_ContainerImage(in, out, s) +} + +func autoConvert_v1_ContainerPort_To_core_ContainerPort(in *v1.ContainerPort, out *core.ContainerPort, s conversion.Scope) error { + out.Name = in.Name + out.HostPort = in.HostPort + out.ContainerPort = in.ContainerPort + out.Protocol = core.Protocol(in.Protocol) + out.HostIP = in.HostIP + return nil +} + +// Convert_v1_ContainerPort_To_core_ContainerPort is an autogenerated conversion function. +func Convert_v1_ContainerPort_To_core_ContainerPort(in *v1.ContainerPort, out *core.ContainerPort, s conversion.Scope) error { + return autoConvert_v1_ContainerPort_To_core_ContainerPort(in, out, s) +} + +func autoConvert_core_ContainerPort_To_v1_ContainerPort(in *core.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { + out.Name = in.Name + out.HostPort = in.HostPort + out.ContainerPort = in.ContainerPort + out.Protocol = v1.Protocol(in.Protocol) + out.HostIP = in.HostIP + return nil +} + +// Convert_core_ContainerPort_To_v1_ContainerPort is an autogenerated conversion function. +func Convert_core_ContainerPort_To_v1_ContainerPort(in *core.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { + return autoConvert_core_ContainerPort_To_v1_ContainerPort(in, out, s) +} + +func autoConvert_v1_ContainerState_To_core_ContainerState(in *v1.ContainerState, out *core.ContainerState, s conversion.Scope) error { + out.Waiting = (*core.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) + out.Running = (*core.ContainerStateRunning)(unsafe.Pointer(in.Running)) + out.Terminated = (*core.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) + return nil +} + +// Convert_v1_ContainerState_To_core_ContainerState is an autogenerated conversion function. +func Convert_v1_ContainerState_To_core_ContainerState(in *v1.ContainerState, out *core.ContainerState, s conversion.Scope) error { + return autoConvert_v1_ContainerState_To_core_ContainerState(in, out, s) +} + +func autoConvert_core_ContainerState_To_v1_ContainerState(in *core.ContainerState, out *v1.ContainerState, s conversion.Scope) error { + out.Waiting = (*v1.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) + out.Running = (*v1.ContainerStateRunning)(unsafe.Pointer(in.Running)) + out.Terminated = (*v1.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) + return nil +} + +// Convert_core_ContainerState_To_v1_ContainerState is an autogenerated conversion function. +func Convert_core_ContainerState_To_v1_ContainerState(in *core.ContainerState, out *v1.ContainerState, s conversion.Scope) error { + return autoConvert_core_ContainerState_To_v1_ContainerState(in, out, s) +} + +func autoConvert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in *v1.ContainerStateRunning, out *core.ContainerStateRunning, s conversion.Scope) error { + out.StartedAt = in.StartedAt + return nil +} + +// Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning is an autogenerated conversion function. +func Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in *v1.ContainerStateRunning, out *core.ContainerStateRunning, s conversion.Scope) error { + return autoConvert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in, out, s) +} + +func autoConvert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in *core.ContainerStateRunning, out *v1.ContainerStateRunning, s conversion.Scope) error { + out.StartedAt = in.StartedAt + return nil +} + +// Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning is an autogenerated conversion function. +func Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in *core.ContainerStateRunning, out *v1.ContainerStateRunning, s conversion.Scope) error { + return autoConvert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in, out, s) +} + +func autoConvert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in *v1.ContainerStateTerminated, out *core.ContainerStateTerminated, s conversion.Scope) error { + out.ExitCode = in.ExitCode + out.Signal = in.Signal + out.Reason = in.Reason + out.Message = in.Message + out.StartedAt = in.StartedAt + out.FinishedAt = in.FinishedAt + out.ContainerID = in.ContainerID + return nil +} + +// Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated is an autogenerated conversion function. +func Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in *v1.ContainerStateTerminated, out *core.ContainerStateTerminated, s conversion.Scope) error { + return autoConvert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in, out, s) +} + +func autoConvert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *core.ContainerStateTerminated, out *v1.ContainerStateTerminated, s conversion.Scope) error { + out.ExitCode = in.ExitCode + out.Signal = in.Signal + out.Reason = in.Reason + out.Message = in.Message + out.StartedAt = in.StartedAt + out.FinishedAt = in.FinishedAt + out.ContainerID = in.ContainerID + return nil +} + +// Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated is an autogenerated conversion function. +func Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *core.ContainerStateTerminated, out *v1.ContainerStateTerminated, s conversion.Scope) error { + return autoConvert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in, out, s) +} + +func autoConvert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in *v1.ContainerStateWaiting, out *core.ContainerStateWaiting, s conversion.Scope) error { + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting is an autogenerated conversion function. +func Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in *v1.ContainerStateWaiting, out *core.ContainerStateWaiting, s conversion.Scope) error { + return autoConvert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in, out, s) +} + +func autoConvert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *core.ContainerStateWaiting, out *v1.ContainerStateWaiting, s conversion.Scope) error { + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting is an autogenerated conversion function. +func Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *core.ContainerStateWaiting, out *v1.ContainerStateWaiting, s conversion.Scope) error { + return autoConvert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in, out, s) +} + +func autoConvert_v1_ContainerStatus_To_core_ContainerStatus(in *v1.ContainerStatus, out *core.ContainerStatus, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1_ContainerState_To_core_ContainerState(&in.State, &out.State, s); err != nil { + return err + } + if err := Convert_v1_ContainerState_To_core_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { + return err + } + out.Ready = in.Ready + out.RestartCount = in.RestartCount + out.Image = in.Image + out.ImageID = in.ImageID + out.ContainerID = in.ContainerID + return nil +} + +// Convert_v1_ContainerStatus_To_core_ContainerStatus is an autogenerated conversion function. +func Convert_v1_ContainerStatus_To_core_ContainerStatus(in *v1.ContainerStatus, out *core.ContainerStatus, s conversion.Scope) error { + return autoConvert_v1_ContainerStatus_To_core_ContainerStatus(in, out, s) +} + +func autoConvert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerStatus, out *v1.ContainerStatus, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_core_ContainerState_To_v1_ContainerState(&in.State, &out.State, s); err != nil { + return err + } + if err := Convert_core_ContainerState_To_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { + return err + } + out.Ready = in.Ready + out.RestartCount = in.RestartCount + out.Image = in.Image + out.ImageID = in.ImageID + out.ContainerID = in.ContainerID + return nil +} + +// Convert_core_ContainerStatus_To_v1_ContainerStatus is an autogenerated conversion function. +func Convert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerStatus, out *v1.ContainerStatus, s conversion.Scope) error { + return autoConvert_core_ContainerStatus_To_v1_ContainerStatus(in, out, s) +} + +func autoConvert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in *v1.DaemonEndpoint, out *core.DaemonEndpoint, s conversion.Scope) error { + out.Port = in.Port + return nil +} + +// Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint is an autogenerated conversion function. +func Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in *v1.DaemonEndpoint, out *core.DaemonEndpoint, s conversion.Scope) error { + return autoConvert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in, out, s) +} + +func autoConvert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in *core.DaemonEndpoint, out *v1.DaemonEndpoint, s conversion.Scope) error { + out.Port = in.Port + return nil +} + +// Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint is an autogenerated conversion function. +func Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in *core.DaemonEndpoint, out *v1.DaemonEndpoint, s conversion.Scope) error { + return autoConvert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in, out, s) +} + +func autoConvert_v1_DeleteOptions_To_core_DeleteOptions(in *v1.DeleteOptions, out *core.DeleteOptions, s conversion.Scope) error { + out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) + out.Preconditions = (*core.Preconditions)(unsafe.Pointer(in.Preconditions)) + out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) + out.PropagationPolicy = (*core.DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) + return nil +} + +// Convert_v1_DeleteOptions_To_core_DeleteOptions is an autogenerated conversion function. +func Convert_v1_DeleteOptions_To_core_DeleteOptions(in *v1.DeleteOptions, out *core.DeleteOptions, s conversion.Scope) error { + return autoConvert_v1_DeleteOptions_To_core_DeleteOptions(in, out, s) +} + +func autoConvert_core_DeleteOptions_To_v1_DeleteOptions(in *core.DeleteOptions, out *v1.DeleteOptions, s conversion.Scope) error { + out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) + out.Preconditions = (*v1.Preconditions)(unsafe.Pointer(in.Preconditions)) + out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) + out.PropagationPolicy = (*v1.DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) + return nil +} + +// Convert_core_DeleteOptions_To_v1_DeleteOptions is an autogenerated conversion function. +func Convert_core_DeleteOptions_To_v1_DeleteOptions(in *core.DeleteOptions, out *v1.DeleteOptions, s conversion.Scope) error { + return autoConvert_core_DeleteOptions_To_v1_DeleteOptions(in, out, s) +} + +func autoConvert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in *v1.DownwardAPIProjection, out *core.DownwardAPIProjection, s conversion.Scope) error { + out.Items = *(*[]core.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection is an autogenerated conversion function. +func Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in *v1.DownwardAPIProjection, out *core.DownwardAPIProjection, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in, out, s) +} + +func autoConvert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *core.DownwardAPIProjection, out *v1.DownwardAPIProjection, s conversion.Scope) error { + out.Items = *(*[]v1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection is an autogenerated conversion function. +func Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *core.DownwardAPIProjection, out *v1.DownwardAPIProjection, s conversion.Scope) error { + return autoConvert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in, out, s) +} + +func autoConvert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *core.DownwardAPIVolumeFile, s conversion.Scope) error { + out.Path = in.Path + out.FieldRef = (*core.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*core.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile is an autogenerated conversion function. +func Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *core.DownwardAPIVolumeFile, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in, out, s) +} + +func autoConvert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *core.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { + out.Path = in.Path + out.FieldRef = (*v1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*v1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile is an autogenerated conversion function. +func Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *core.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { + return autoConvert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in, out, s) +} + +func autoConvert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *core.DownwardAPIVolumeSource, s conversion.Scope) error { + out.Items = *(*[]core.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource is an autogenerated conversion function. +func Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *core.DownwardAPIVolumeSource, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in, out, s) +} + +func autoConvert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *core.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { + out.Items = *(*[]v1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource is an autogenerated conversion function. +func Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *core.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { + return autoConvert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in, out, s) +} + +func autoConvert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *core.EmptyDirVolumeSource, s conversion.Scope) error { + out.Medium = core.StorageMedium(in.Medium) + out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) + return nil +} + +// Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource is an autogenerated conversion function. +func Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *core.EmptyDirVolumeSource, s conversion.Scope) error { + return autoConvert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in, out, s) +} + +func autoConvert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *core.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { + out.Medium = v1.StorageMedium(in.Medium) + out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) + return nil +} + +// Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource is an autogenerated conversion function. +func Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *core.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { + return autoConvert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in, out, s) +} + +func autoConvert_v1_EndpointAddress_To_core_EndpointAddress(in *v1.EndpointAddress, out *core.EndpointAddress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) + out.TargetRef = (*core.ObjectReference)(unsafe.Pointer(in.TargetRef)) + return nil +} + +// Convert_v1_EndpointAddress_To_core_EndpointAddress is an autogenerated conversion function. +func Convert_v1_EndpointAddress_To_core_EndpointAddress(in *v1.EndpointAddress, out *core.EndpointAddress, s conversion.Scope) error { + return autoConvert_v1_EndpointAddress_To_core_EndpointAddress(in, out, s) +} + +func autoConvert_core_EndpointAddress_To_v1_EndpointAddress(in *core.EndpointAddress, out *v1.EndpointAddress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) + out.TargetRef = (*v1.ObjectReference)(unsafe.Pointer(in.TargetRef)) + return nil +} + +// Convert_core_EndpointAddress_To_v1_EndpointAddress is an autogenerated conversion function. +func Convert_core_EndpointAddress_To_v1_EndpointAddress(in *core.EndpointAddress, out *v1.EndpointAddress, s conversion.Scope) error { + return autoConvert_core_EndpointAddress_To_v1_EndpointAddress(in, out, s) +} + +func autoConvert_v1_EndpointPort_To_core_EndpointPort(in *v1.EndpointPort, out *core.EndpointPort, s conversion.Scope) error { + out.Name = in.Name + out.Port = in.Port + out.Protocol = core.Protocol(in.Protocol) + return nil +} + +// Convert_v1_EndpointPort_To_core_EndpointPort is an autogenerated conversion function. +func Convert_v1_EndpointPort_To_core_EndpointPort(in *v1.EndpointPort, out *core.EndpointPort, s conversion.Scope) error { + return autoConvert_v1_EndpointPort_To_core_EndpointPort(in, out, s) +} + +func autoConvert_core_EndpointPort_To_v1_EndpointPort(in *core.EndpointPort, out *v1.EndpointPort, s conversion.Scope) error { + out.Name = in.Name + out.Port = in.Port + out.Protocol = v1.Protocol(in.Protocol) + return nil +} + +// Convert_core_EndpointPort_To_v1_EndpointPort is an autogenerated conversion function. +func Convert_core_EndpointPort_To_v1_EndpointPort(in *core.EndpointPort, out *v1.EndpointPort, s conversion.Scope) error { + return autoConvert_core_EndpointPort_To_v1_EndpointPort(in, out, s) +} + +func autoConvert_v1_EndpointSubset_To_core_EndpointSubset(in *v1.EndpointSubset, out *core.EndpointSubset, s conversion.Scope) error { + out.Addresses = *(*[]core.EndpointAddress)(unsafe.Pointer(&in.Addresses)) + out.NotReadyAddresses = *(*[]core.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) + out.Ports = *(*[]core.EndpointPort)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_v1_EndpointSubset_To_core_EndpointSubset is an autogenerated conversion function. +func Convert_v1_EndpointSubset_To_core_EndpointSubset(in *v1.EndpointSubset, out *core.EndpointSubset, s conversion.Scope) error { + return autoConvert_v1_EndpointSubset_To_core_EndpointSubset(in, out, s) +} + +func autoConvert_core_EndpointSubset_To_v1_EndpointSubset(in *core.EndpointSubset, out *v1.EndpointSubset, s conversion.Scope) error { + out.Addresses = *(*[]v1.EndpointAddress)(unsafe.Pointer(&in.Addresses)) + out.NotReadyAddresses = *(*[]v1.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) + out.Ports = *(*[]v1.EndpointPort)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_core_EndpointSubset_To_v1_EndpointSubset is an autogenerated conversion function. +func Convert_core_EndpointSubset_To_v1_EndpointSubset(in *core.EndpointSubset, out *v1.EndpointSubset, s conversion.Scope) error { + return autoConvert_core_EndpointSubset_To_v1_EndpointSubset(in, out, s) +} + +func autoConvert_v1_Endpoints_To_core_Endpoints(in *v1.Endpoints, out *core.Endpoints, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subsets = *(*[]core.EndpointSubset)(unsafe.Pointer(&in.Subsets)) + return nil +} + +// Convert_v1_Endpoints_To_core_Endpoints is an autogenerated conversion function. +func Convert_v1_Endpoints_To_core_Endpoints(in *v1.Endpoints, out *core.Endpoints, s conversion.Scope) error { + return autoConvert_v1_Endpoints_To_core_Endpoints(in, out, s) +} + +func autoConvert_core_Endpoints_To_v1_Endpoints(in *core.Endpoints, out *v1.Endpoints, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subsets = *(*[]v1.EndpointSubset)(unsafe.Pointer(&in.Subsets)) + return nil +} + +// Convert_core_Endpoints_To_v1_Endpoints is an autogenerated conversion function. +func Convert_core_Endpoints_To_v1_Endpoints(in *core.Endpoints, out *v1.Endpoints, s conversion.Scope) error { + return autoConvert_core_Endpoints_To_v1_Endpoints(in, out, s) +} + +func autoConvert_v1_EndpointsList_To_core_EndpointsList(in *v1.EndpointsList, out *core.EndpointsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Endpoints)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_EndpointsList_To_core_EndpointsList is an autogenerated conversion function. +func Convert_v1_EndpointsList_To_core_EndpointsList(in *v1.EndpointsList, out *core.EndpointsList, s conversion.Scope) error { + return autoConvert_v1_EndpointsList_To_core_EndpointsList(in, out, s) +} + +func autoConvert_core_EndpointsList_To_v1_EndpointsList(in *core.EndpointsList, out *v1.EndpointsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.Endpoints)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_EndpointsList_To_v1_EndpointsList is an autogenerated conversion function. +func Convert_core_EndpointsList_To_v1_EndpointsList(in *core.EndpointsList, out *v1.EndpointsList, s conversion.Scope) error { + return autoConvert_core_EndpointsList_To_v1_EndpointsList(in, out, s) +} + +func autoConvert_v1_EnvFromSource_To_core_EnvFromSource(in *v1.EnvFromSource, out *core.EnvFromSource, s conversion.Scope) error { + out.Prefix = in.Prefix + out.ConfigMapRef = (*core.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) + out.SecretRef = (*core.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_v1_EnvFromSource_To_core_EnvFromSource is an autogenerated conversion function. +func Convert_v1_EnvFromSource_To_core_EnvFromSource(in *v1.EnvFromSource, out *core.EnvFromSource, s conversion.Scope) error { + return autoConvert_v1_EnvFromSource_To_core_EnvFromSource(in, out, s) +} + +func autoConvert_core_EnvFromSource_To_v1_EnvFromSource(in *core.EnvFromSource, out *v1.EnvFromSource, s conversion.Scope) error { + out.Prefix = in.Prefix + out.ConfigMapRef = (*v1.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) + out.SecretRef = (*v1.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_core_EnvFromSource_To_v1_EnvFromSource is an autogenerated conversion function. +func Convert_core_EnvFromSource_To_v1_EnvFromSource(in *core.EnvFromSource, out *v1.EnvFromSource, s conversion.Scope) error { + return autoConvert_core_EnvFromSource_To_v1_EnvFromSource(in, out, s) +} + +func autoConvert_v1_EnvVar_To_core_EnvVar(in *v1.EnvVar, out *core.EnvVar, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + out.ValueFrom = (*core.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) + return nil +} + +// Convert_v1_EnvVar_To_core_EnvVar is an autogenerated conversion function. +func Convert_v1_EnvVar_To_core_EnvVar(in *v1.EnvVar, out *core.EnvVar, s conversion.Scope) error { + return autoConvert_v1_EnvVar_To_core_EnvVar(in, out, s) +} + +func autoConvert_core_EnvVar_To_v1_EnvVar(in *core.EnvVar, out *v1.EnvVar, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + out.ValueFrom = (*v1.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) + return nil +} + +// Convert_core_EnvVar_To_v1_EnvVar is an autogenerated conversion function. +func Convert_core_EnvVar_To_v1_EnvVar(in *core.EnvVar, out *v1.EnvVar, s conversion.Scope) error { + return autoConvert_core_EnvVar_To_v1_EnvVar(in, out, s) +} + +func autoConvert_v1_EnvVarSource_To_core_EnvVarSource(in *v1.EnvVarSource, out *core.EnvVarSource, s conversion.Scope) error { + out.FieldRef = (*core.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*core.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.ConfigMapKeyRef = (*core.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) + out.SecretKeyRef = (*core.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) + return nil +} + +// Convert_v1_EnvVarSource_To_core_EnvVarSource is an autogenerated conversion function. +func Convert_v1_EnvVarSource_To_core_EnvVarSource(in *v1.EnvVarSource, out *core.EnvVarSource, s conversion.Scope) error { + return autoConvert_v1_EnvVarSource_To_core_EnvVarSource(in, out, s) +} + +func autoConvert_core_EnvVarSource_To_v1_EnvVarSource(in *core.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { + out.FieldRef = (*v1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*v1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.ConfigMapKeyRef = (*v1.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) + out.SecretKeyRef = (*v1.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) + return nil +} + +// Convert_core_EnvVarSource_To_v1_EnvVarSource is an autogenerated conversion function. +func Convert_core_EnvVarSource_To_v1_EnvVarSource(in *core.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { + return autoConvert_core_EnvVarSource_To_v1_EnvVarSource(in, out, s) +} + +func autoConvert_v1_Event_To_core_Event(in *v1.Event, out *core.Event, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + if err := Convert_v1_EventSource_To_core_EventSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.FirstTimestamp = in.FirstTimestamp + out.LastTimestamp = in.LastTimestamp + out.Count = in.Count + out.Type = in.Type + out.EventTime = in.EventTime + out.Series = (*core.EventSeries)(unsafe.Pointer(in.Series)) + out.Action = in.Action + out.Related = (*core.ObjectReference)(unsafe.Pointer(in.Related)) + out.ReportingController = in.ReportingController + out.ReportingInstance = in.ReportingInstance + return nil +} + +// Convert_v1_Event_To_core_Event is an autogenerated conversion function. +func Convert_v1_Event_To_core_Event(in *v1.Event, out *core.Event, s conversion.Scope) error { + return autoConvert_v1_Event_To_core_Event(in, out, s) +} + +func autoConvert_core_Event_To_v1_Event(in *core.Event, out *v1.Event, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + if err := Convert_core_EventSource_To_v1_EventSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.FirstTimestamp = in.FirstTimestamp + out.LastTimestamp = in.LastTimestamp + out.Count = in.Count + out.Type = in.Type + out.EventTime = in.EventTime + out.Series = (*v1.EventSeries)(unsafe.Pointer(in.Series)) + out.Action = in.Action + out.Related = (*v1.ObjectReference)(unsafe.Pointer(in.Related)) + out.ReportingController = in.ReportingController + out.ReportingInstance = in.ReportingInstance + return nil +} + +// Convert_core_Event_To_v1_Event is an autogenerated conversion function. +func Convert_core_Event_To_v1_Event(in *core.Event, out *v1.Event, s conversion.Scope) error { + return autoConvert_core_Event_To_v1_Event(in, out, s) +} + +func autoConvert_v1_EventList_To_core_EventList(in *v1.EventList, out *core.EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Event)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_EventList_To_core_EventList is an autogenerated conversion function. +func Convert_v1_EventList_To_core_EventList(in *v1.EventList, out *core.EventList, s conversion.Scope) error { + return autoConvert_v1_EventList_To_core_EventList(in, out, s) +} + +func autoConvert_core_EventList_To_v1_EventList(in *core.EventList, out *v1.EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.Event)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_EventList_To_v1_EventList is an autogenerated conversion function. +func Convert_core_EventList_To_v1_EventList(in *core.EventList, out *v1.EventList, s conversion.Scope) error { + return autoConvert_core_EventList_To_v1_EventList(in, out, s) +} + +func autoConvert_v1_EventSeries_To_core_EventSeries(in *v1.EventSeries, out *core.EventSeries, s conversion.Scope) error { + out.Count = in.Count + out.LastObservedTime = in.LastObservedTime + out.State = core.EventSeriesState(in.State) + return nil +} + +// Convert_v1_EventSeries_To_core_EventSeries is an autogenerated conversion function. +func Convert_v1_EventSeries_To_core_EventSeries(in *v1.EventSeries, out *core.EventSeries, s conversion.Scope) error { + return autoConvert_v1_EventSeries_To_core_EventSeries(in, out, s) +} + +func autoConvert_core_EventSeries_To_v1_EventSeries(in *core.EventSeries, out *v1.EventSeries, s conversion.Scope) error { + out.Count = in.Count + out.LastObservedTime = in.LastObservedTime + out.State = v1.EventSeriesState(in.State) + return nil +} + +// Convert_core_EventSeries_To_v1_EventSeries is an autogenerated conversion function. +func Convert_core_EventSeries_To_v1_EventSeries(in *core.EventSeries, out *v1.EventSeries, s conversion.Scope) error { + return autoConvert_core_EventSeries_To_v1_EventSeries(in, out, s) +} + +func autoConvert_v1_EventSource_To_core_EventSource(in *v1.EventSource, out *core.EventSource, s conversion.Scope) error { + out.Component = in.Component + out.Host = in.Host + return nil +} + +// Convert_v1_EventSource_To_core_EventSource is an autogenerated conversion function. +func Convert_v1_EventSource_To_core_EventSource(in *v1.EventSource, out *core.EventSource, s conversion.Scope) error { + return autoConvert_v1_EventSource_To_core_EventSource(in, out, s) +} + +func autoConvert_core_EventSource_To_v1_EventSource(in *core.EventSource, out *v1.EventSource, s conversion.Scope) error { + out.Component = in.Component + out.Host = in.Host + return nil +} + +// Convert_core_EventSource_To_v1_EventSource is an autogenerated conversion function. +func Convert_core_EventSource_To_v1_EventSource(in *core.EventSource, out *v1.EventSource, s conversion.Scope) error { + return autoConvert_core_EventSource_To_v1_EventSource(in, out, s) +} + +func autoConvert_v1_ExecAction_To_core_ExecAction(in *v1.ExecAction, out *core.ExecAction, s conversion.Scope) error { + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +// Convert_v1_ExecAction_To_core_ExecAction is an autogenerated conversion function. +func Convert_v1_ExecAction_To_core_ExecAction(in *v1.ExecAction, out *core.ExecAction, s conversion.Scope) error { + return autoConvert_v1_ExecAction_To_core_ExecAction(in, out, s) +} + +func autoConvert_core_ExecAction_To_v1_ExecAction(in *core.ExecAction, out *v1.ExecAction, s conversion.Scope) error { + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +// Convert_core_ExecAction_To_v1_ExecAction is an autogenerated conversion function. +func Convert_core_ExecAction_To_v1_ExecAction(in *core.ExecAction, out *v1.ExecAction, s conversion.Scope) error { + return autoConvert_core_ExecAction_To_v1_ExecAction(in, out, s) +} + +func autoConvert_v1_FCVolumeSource_To_core_FCVolumeSource(in *v1.FCVolumeSource, out *core.FCVolumeSource, s conversion.Scope) error { + out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) + out.Lun = (*int32)(unsafe.Pointer(in.Lun)) + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.WWIDs = *(*[]string)(unsafe.Pointer(&in.WWIDs)) + return nil +} + +// Convert_v1_FCVolumeSource_To_core_FCVolumeSource is an autogenerated conversion function. +func Convert_v1_FCVolumeSource_To_core_FCVolumeSource(in *v1.FCVolumeSource, out *core.FCVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FCVolumeSource_To_core_FCVolumeSource(in, out, s) +} + +func autoConvert_core_FCVolumeSource_To_v1_FCVolumeSource(in *core.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { + out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) + out.Lun = (*int32)(unsafe.Pointer(in.Lun)) + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.WWIDs = *(*[]string)(unsafe.Pointer(&in.WWIDs)) + return nil +} + +// Convert_core_FCVolumeSource_To_v1_FCVolumeSource is an autogenerated conversion function. +func Convert_core_FCVolumeSource_To_v1_FCVolumeSource(in *core.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { + return autoConvert_core_FCVolumeSource_To_v1_FCVolumeSource(in, out, s) +} + +func autoConvert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in *v1.FlexVolumeSource, out *core.FlexVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.FSType = in.FSType + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource is an autogenerated conversion function. +func Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in *v1.FlexVolumeSource, out *core.FlexVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in, out, s) +} + +func autoConvert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in *core.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.FSType = in.FSType + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource is an autogenerated conversion function. +func Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in *core.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { + return autoConvert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in, out, s) +} + +func autoConvert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *core.FlockerVolumeSource, s conversion.Scope) error { + out.DatasetName = in.DatasetName + out.DatasetUUID = in.DatasetUUID + return nil +} + +// Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource is an autogenerated conversion function. +func Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *core.FlockerVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in, out, s) +} + +func autoConvert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *core.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { + out.DatasetName = in.DatasetName + out.DatasetUUID = in.DatasetUUID + return nil +} + +// Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource is an autogenerated conversion function. +func Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *core.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { + return autoConvert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in, out, s) +} + +func autoConvert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *core.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + out.PDName = in.PDName + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *core.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *core.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + out.PDName = in.PDName + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *core.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *core.GitRepoVolumeSource, s conversion.Scope) error { + out.Repository = in.Repository + out.Revision = in.Revision + out.Directory = in.Directory + return nil +} + +// Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource is an autogenerated conversion function. +func Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *core.GitRepoVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in, out, s) +} + +func autoConvert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *core.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { + out.Repository = in.Repository + out.Revision = in.Revision + out.Directory = in.Directory + return nil +} + +// Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource is an autogenerated conversion function. +func Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *core.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { + return autoConvert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s) +} + +func autoConvert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *core.GlusterfsVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource is an autogenerated conversion function. +func Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *core.GlusterfsVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in, out, s) +} + +func autoConvert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *core.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource is an autogenerated conversion function. +func Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *core.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { + return autoConvert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in, out, s) +} + +func autoConvert_v1_HTTPGetAction_To_core_HTTPGetAction(in *v1.HTTPGetAction, out *core.HTTPGetAction, s conversion.Scope) error { + out.Path = in.Path + out.Port = in.Port + out.Host = in.Host + out.Scheme = core.URIScheme(in.Scheme) + out.HTTPHeaders = *(*[]core.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) + return nil +} + +// Convert_v1_HTTPGetAction_To_core_HTTPGetAction is an autogenerated conversion function. +func Convert_v1_HTTPGetAction_To_core_HTTPGetAction(in *v1.HTTPGetAction, out *core.HTTPGetAction, s conversion.Scope) error { + return autoConvert_v1_HTTPGetAction_To_core_HTTPGetAction(in, out, s) +} + +func autoConvert_core_HTTPGetAction_To_v1_HTTPGetAction(in *core.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { + out.Path = in.Path + out.Port = in.Port + out.Host = in.Host + out.Scheme = v1.URIScheme(in.Scheme) + out.HTTPHeaders = *(*[]v1.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) + return nil +} + +// Convert_core_HTTPGetAction_To_v1_HTTPGetAction is an autogenerated conversion function. +func Convert_core_HTTPGetAction_To_v1_HTTPGetAction(in *core.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { + return autoConvert_core_HTTPGetAction_To_v1_HTTPGetAction(in, out, s) +} + +func autoConvert_v1_HTTPHeader_To_core_HTTPHeader(in *v1.HTTPHeader, out *core.HTTPHeader, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_v1_HTTPHeader_To_core_HTTPHeader is an autogenerated conversion function. +func Convert_v1_HTTPHeader_To_core_HTTPHeader(in *v1.HTTPHeader, out *core.HTTPHeader, s conversion.Scope) error { + return autoConvert_v1_HTTPHeader_To_core_HTTPHeader(in, out, s) +} + +func autoConvert_core_HTTPHeader_To_v1_HTTPHeader(in *core.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_core_HTTPHeader_To_v1_HTTPHeader is an autogenerated conversion function. +func Convert_core_HTTPHeader_To_v1_HTTPHeader(in *core.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { + return autoConvert_core_HTTPHeader_To_v1_HTTPHeader(in, out, s) +} + +func autoConvert_v1_Handler_To_core_Handler(in *v1.Handler, out *core.Handler, s conversion.Scope) error { + out.Exec = (*core.ExecAction)(unsafe.Pointer(in.Exec)) + out.HTTPGet = (*core.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) + out.TCPSocket = (*core.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + return nil +} + +// Convert_v1_Handler_To_core_Handler is an autogenerated conversion function. +func Convert_v1_Handler_To_core_Handler(in *v1.Handler, out *core.Handler, s conversion.Scope) error { + return autoConvert_v1_Handler_To_core_Handler(in, out, s) +} + +func autoConvert_core_Handler_To_v1_Handler(in *core.Handler, out *v1.Handler, s conversion.Scope) error { + out.Exec = (*v1.ExecAction)(unsafe.Pointer(in.Exec)) + out.HTTPGet = (*v1.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) + out.TCPSocket = (*v1.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + return nil +} + +// Convert_core_Handler_To_v1_Handler is an autogenerated conversion function. +func Convert_core_Handler_To_v1_Handler(in *core.Handler, out *v1.Handler, s conversion.Scope) error { + return autoConvert_core_Handler_To_v1_Handler(in, out, s) +} + +func autoConvert_v1_HostAlias_To_core_HostAlias(in *v1.HostAlias, out *core.HostAlias, s conversion.Scope) error { + out.IP = in.IP + out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames)) + return nil +} + +// Convert_v1_HostAlias_To_core_HostAlias is an autogenerated conversion function. +func Convert_v1_HostAlias_To_core_HostAlias(in *v1.HostAlias, out *core.HostAlias, s conversion.Scope) error { + return autoConvert_v1_HostAlias_To_core_HostAlias(in, out, s) +} + +func autoConvert_core_HostAlias_To_v1_HostAlias(in *core.HostAlias, out *v1.HostAlias, s conversion.Scope) error { + out.IP = in.IP + out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames)) + return nil +} + +// Convert_core_HostAlias_To_v1_HostAlias is an autogenerated conversion function. +func Convert_core_HostAlias_To_v1_HostAlias(in *core.HostAlias, out *v1.HostAlias, s conversion.Scope) error { + return autoConvert_core_HostAlias_To_v1_HostAlias(in, out, s) +} + +func autoConvert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *core.HostPathVolumeSource, s conversion.Scope) error { + out.Path = in.Path + out.Type = (*core.HostPathType)(unsafe.Pointer(in.Type)) + return nil +} + +// Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource is an autogenerated conversion function. +func Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *core.HostPathVolumeSource, s conversion.Scope) error { + return autoConvert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in, out, s) +} + +func autoConvert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *core.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { + out.Path = in.Path + out.Type = (*v1.HostPathType)(unsafe.Pointer(in.Type)) + return nil +} + +// Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource is an autogenerated conversion function. +func Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *core.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { + return autoConvert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in, out, s) +} + +func autoConvert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in *v1.ISCSIPersistentVolumeSource, out *core.ISCSIPersistentVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth + out.SessionCHAPAuth = in.SessionCHAPAuth + out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) + return nil +} + +// Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in *v1.ISCSIPersistentVolumeSource, out *core.ISCSIPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in *core.ISCSIPersistentVolumeSource, out *v1.ISCSIPersistentVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth + out.SessionCHAPAuth = in.SessionCHAPAuth + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) + return nil +} + +// Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in *core.ISCSIPersistentVolumeSource, out *v1.ISCSIPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *core.ISCSIVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth + out.SessionCHAPAuth = in.SessionCHAPAuth + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) + return nil +} + +// Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource is an autogenerated conversion function. +func Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *core.ISCSIVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in, out, s) +} + +func autoConvert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *core.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth + out.SessionCHAPAuth = in.SessionCHAPAuth + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) + return nil +} + +// Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource is an autogenerated conversion function. +func Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *core.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { + return autoConvert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s) +} + +func autoConvert_v1_KeyToPath_To_core_KeyToPath(in *v1.KeyToPath, out *core.KeyToPath, s conversion.Scope) error { + out.Key = in.Key + out.Path = in.Path + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_v1_KeyToPath_To_core_KeyToPath is an autogenerated conversion function. +func Convert_v1_KeyToPath_To_core_KeyToPath(in *v1.KeyToPath, out *core.KeyToPath, s conversion.Scope) error { + return autoConvert_v1_KeyToPath_To_core_KeyToPath(in, out, s) +} + +func autoConvert_core_KeyToPath_To_v1_KeyToPath(in *core.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { + out.Key = in.Key + out.Path = in.Path + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_core_KeyToPath_To_v1_KeyToPath is an autogenerated conversion function. +func Convert_core_KeyToPath_To_v1_KeyToPath(in *core.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { + return autoConvert_core_KeyToPath_To_v1_KeyToPath(in, out, s) +} + +func autoConvert_v1_Lifecycle_To_core_Lifecycle(in *v1.Lifecycle, out *core.Lifecycle, s conversion.Scope) error { + out.PostStart = (*core.Handler)(unsafe.Pointer(in.PostStart)) + out.PreStop = (*core.Handler)(unsafe.Pointer(in.PreStop)) + return nil +} + +// Convert_v1_Lifecycle_To_core_Lifecycle is an autogenerated conversion function. +func Convert_v1_Lifecycle_To_core_Lifecycle(in *v1.Lifecycle, out *core.Lifecycle, s conversion.Scope) error { + return autoConvert_v1_Lifecycle_To_core_Lifecycle(in, out, s) +} + +func autoConvert_core_Lifecycle_To_v1_Lifecycle(in *core.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { + out.PostStart = (*v1.Handler)(unsafe.Pointer(in.PostStart)) + out.PreStop = (*v1.Handler)(unsafe.Pointer(in.PreStop)) + return nil +} + +// Convert_core_Lifecycle_To_v1_Lifecycle is an autogenerated conversion function. +func Convert_core_Lifecycle_To_v1_Lifecycle(in *core.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { + return autoConvert_core_Lifecycle_To_v1_Lifecycle(in, out, s) +} + +func autoConvert_v1_LimitRange_To_core_LimitRange(in *v1.LimitRange, out *core.LimitRange, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1_LimitRange_To_core_LimitRange is an autogenerated conversion function. +func Convert_v1_LimitRange_To_core_LimitRange(in *v1.LimitRange, out *core.LimitRange, s conversion.Scope) error { + return autoConvert_v1_LimitRange_To_core_LimitRange(in, out, s) +} + +func autoConvert_core_LimitRange_To_v1_LimitRange(in *core.LimitRange, out *v1.LimitRange, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_core_LimitRange_To_v1_LimitRange is an autogenerated conversion function. +func Convert_core_LimitRange_To_v1_LimitRange(in *core.LimitRange, out *v1.LimitRange, s conversion.Scope) error { + return autoConvert_core_LimitRange_To_v1_LimitRange(in, out, s) +} + +func autoConvert_v1_LimitRangeItem_To_core_LimitRangeItem(in *v1.LimitRangeItem, out *core.LimitRangeItem, s conversion.Scope) error { + out.Type = core.LimitType(in.Type) + out.Max = *(*core.ResourceList)(unsafe.Pointer(&in.Max)) + out.Min = *(*core.ResourceList)(unsafe.Pointer(&in.Min)) + out.Default = *(*core.ResourceList)(unsafe.Pointer(&in.Default)) + out.DefaultRequest = *(*core.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) + out.MaxLimitRequestRatio = *(*core.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) + return nil +} + +// Convert_v1_LimitRangeItem_To_core_LimitRangeItem is an autogenerated conversion function. +func Convert_v1_LimitRangeItem_To_core_LimitRangeItem(in *v1.LimitRangeItem, out *core.LimitRangeItem, s conversion.Scope) error { + return autoConvert_v1_LimitRangeItem_To_core_LimitRangeItem(in, out, s) +} + +func autoConvert_core_LimitRangeItem_To_v1_LimitRangeItem(in *core.LimitRangeItem, out *v1.LimitRangeItem, s conversion.Scope) error { + out.Type = v1.LimitType(in.Type) + out.Max = *(*v1.ResourceList)(unsafe.Pointer(&in.Max)) + out.Min = *(*v1.ResourceList)(unsafe.Pointer(&in.Min)) + out.Default = *(*v1.ResourceList)(unsafe.Pointer(&in.Default)) + out.DefaultRequest = *(*v1.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) + out.MaxLimitRequestRatio = *(*v1.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) + return nil +} + +// Convert_core_LimitRangeItem_To_v1_LimitRangeItem is an autogenerated conversion function. +func Convert_core_LimitRangeItem_To_v1_LimitRangeItem(in *core.LimitRangeItem, out *v1.LimitRangeItem, s conversion.Scope) error { + return autoConvert_core_LimitRangeItem_To_v1_LimitRangeItem(in, out, s) +} + +func autoConvert_v1_LimitRangeList_To_core_LimitRangeList(in *v1.LimitRangeList, out *core.LimitRangeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.LimitRange)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_LimitRangeList_To_core_LimitRangeList is an autogenerated conversion function. +func Convert_v1_LimitRangeList_To_core_LimitRangeList(in *v1.LimitRangeList, out *core.LimitRangeList, s conversion.Scope) error { + return autoConvert_v1_LimitRangeList_To_core_LimitRangeList(in, out, s) +} + +func autoConvert_core_LimitRangeList_To_v1_LimitRangeList(in *core.LimitRangeList, out *v1.LimitRangeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.LimitRange)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_LimitRangeList_To_v1_LimitRangeList is an autogenerated conversion function. +func Convert_core_LimitRangeList_To_v1_LimitRangeList(in *core.LimitRangeList, out *v1.LimitRangeList, s conversion.Scope) error { + return autoConvert_core_LimitRangeList_To_v1_LimitRangeList(in, out, s) +} + +func autoConvert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in *v1.LimitRangeSpec, out *core.LimitRangeSpec, s conversion.Scope) error { + out.Limits = *(*[]core.LimitRangeItem)(unsafe.Pointer(&in.Limits)) + return nil +} + +// Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec is an autogenerated conversion function. +func Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in *v1.LimitRangeSpec, out *core.LimitRangeSpec, s conversion.Scope) error { + return autoConvert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in, out, s) +} + +func autoConvert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in *core.LimitRangeSpec, out *v1.LimitRangeSpec, s conversion.Scope) error { + out.Limits = *(*[]v1.LimitRangeItem)(unsafe.Pointer(&in.Limits)) + return nil +} + +// Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec is an autogenerated conversion function. +func Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in *core.LimitRangeSpec, out *v1.LimitRangeSpec, s conversion.Scope) error { + return autoConvert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in, out, s) +} + +func autoConvert_v1_List_To_core_List(in *v1.List, out *core.List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_List_To_core_List is an autogenerated conversion function. +func Convert_v1_List_To_core_List(in *v1.List, out *core.List, s conversion.Scope) error { + return autoConvert_v1_List_To_core_List(in, out, s) +} + +func autoConvert_core_List_To_v1_List(in *core.List, out *v1.List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_List_To_v1_List is an autogenerated conversion function. +func Convert_core_List_To_v1_List(in *core.List, out *v1.List, s conversion.Scope) error { + return autoConvert_core_List_To_v1_List(in, out, s) +} + +func autoConvert_v1_ListOptions_To_core_ListOptions(in *v1.ListOptions, out *core.ListOptions, s conversion.Scope) error { + if err := meta_v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := meta_v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.IncludeUninitialized = in.IncludeUninitialized + out.Watch = in.Watch + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +// Convert_v1_ListOptions_To_core_ListOptions is an autogenerated conversion function. +func Convert_v1_ListOptions_To_core_ListOptions(in *v1.ListOptions, out *core.ListOptions, s conversion.Scope) error { + return autoConvert_v1_ListOptions_To_core_ListOptions(in, out, s) +} + +func autoConvert_core_ListOptions_To_v1_ListOptions(in *core.ListOptions, out *v1.ListOptions, s conversion.Scope) error { + if err := meta_v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := meta_v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.IncludeUninitialized = in.IncludeUninitialized + out.Watch = in.Watch + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +// Convert_core_ListOptions_To_v1_ListOptions is an autogenerated conversion function. +func Convert_core_ListOptions_To_v1_ListOptions(in *core.ListOptions, out *v1.ListOptions, s conversion.Scope) error { + return autoConvert_core_ListOptions_To_v1_ListOptions(in, out, s) +} + +func autoConvert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *core.LoadBalancerIngress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + return nil +} + +// Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress is an autogenerated conversion function. +func Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *core.LoadBalancerIngress, s conversion.Scope) error { + return autoConvert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in, out, s) +} + +func autoConvert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *core.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + return nil +} + +// Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress is an autogenerated conversion function. +func Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *core.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error { + return autoConvert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in, out, s) +} + +func autoConvert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *core.LoadBalancerStatus, s conversion.Scope) error { + out.Ingress = *(*[]core.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) + return nil +} + +// Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus is an autogenerated conversion function. +func Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *core.LoadBalancerStatus, s conversion.Scope) error { + return autoConvert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in, out, s) +} + +func autoConvert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { + out.Ingress = *(*[]v1.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) + return nil +} + +// Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus is an autogenerated conversion function. +func Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { + return autoConvert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s) +} + +func autoConvert_v1_LocalObjectReference_To_core_LocalObjectReference(in *v1.LocalObjectReference, out *core.LocalObjectReference, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +// Convert_v1_LocalObjectReference_To_core_LocalObjectReference is an autogenerated conversion function. +func Convert_v1_LocalObjectReference_To_core_LocalObjectReference(in *v1.LocalObjectReference, out *core.LocalObjectReference, s conversion.Scope) error { + return autoConvert_v1_LocalObjectReference_To_core_LocalObjectReference(in, out, s) +} + +func autoConvert_core_LocalObjectReference_To_v1_LocalObjectReference(in *core.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +// Convert_core_LocalObjectReference_To_v1_LocalObjectReference is an autogenerated conversion function. +func Convert_core_LocalObjectReference_To_v1_LocalObjectReference(in *core.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { + return autoConvert_core_LocalObjectReference_To_v1_LocalObjectReference(in, out, s) +} + +func autoConvert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in *v1.LocalVolumeSource, out *core.LocalVolumeSource, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource is an autogenerated conversion function. +func Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in *v1.LocalVolumeSource, out *core.LocalVolumeSource, s conversion.Scope) error { + return autoConvert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in, out, s) +} + +func autoConvert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in *core.LocalVolumeSource, out *v1.LocalVolumeSource, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource is an autogenerated conversion function. +func Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in *core.LocalVolumeSource, out *v1.LocalVolumeSource, s conversion.Scope) error { + return autoConvert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in, out, s) +} + +func autoConvert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in *v1.NFSVolumeSource, out *core.NFSVolumeSource, s conversion.Scope) error { + out.Server = in.Server + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource is an autogenerated conversion function. +func Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in *v1.NFSVolumeSource, out *core.NFSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in, out, s) +} + +func autoConvert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in *core.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { + out.Server = in.Server + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource is an autogenerated conversion function. +func Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in *core.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { + return autoConvert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in, out, s) +} + +func autoConvert_v1_Namespace_To_core_Namespace(in *v1.Namespace, out *core.Namespace, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_NamespaceSpec_To_core_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_NamespaceStatus_To_core_NamespaceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Namespace_To_core_Namespace is an autogenerated conversion function. +func Convert_v1_Namespace_To_core_Namespace(in *v1.Namespace, out *core.Namespace, s conversion.Scope) error { + return autoConvert_v1_Namespace_To_core_Namespace(in, out, s) +} + +func autoConvert_core_Namespace_To_v1_Namespace(in *core.Namespace, out *v1.Namespace, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_NamespaceSpec_To_v1_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_NamespaceStatus_To_v1_NamespaceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_Namespace_To_v1_Namespace is an autogenerated conversion function. +func Convert_core_Namespace_To_v1_Namespace(in *core.Namespace, out *v1.Namespace, s conversion.Scope) error { + return autoConvert_core_Namespace_To_v1_Namespace(in, out, s) +} + +func autoConvert_v1_NamespaceList_To_core_NamespaceList(in *v1.NamespaceList, out *core.NamespaceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Namespace)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_NamespaceList_To_core_NamespaceList is an autogenerated conversion function. +func Convert_v1_NamespaceList_To_core_NamespaceList(in *v1.NamespaceList, out *core.NamespaceList, s conversion.Scope) error { + return autoConvert_v1_NamespaceList_To_core_NamespaceList(in, out, s) +} + +func autoConvert_core_NamespaceList_To_v1_NamespaceList(in *core.NamespaceList, out *v1.NamespaceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.Namespace)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_NamespaceList_To_v1_NamespaceList is an autogenerated conversion function. +func Convert_core_NamespaceList_To_v1_NamespaceList(in *core.NamespaceList, out *v1.NamespaceList, s conversion.Scope) error { + return autoConvert_core_NamespaceList_To_v1_NamespaceList(in, out, s) +} + +func autoConvert_v1_NamespaceSpec_To_core_NamespaceSpec(in *v1.NamespaceSpec, out *core.NamespaceSpec, s conversion.Scope) error { + out.Finalizers = *(*[]core.FinalizerName)(unsafe.Pointer(&in.Finalizers)) + return nil +} + +// Convert_v1_NamespaceSpec_To_core_NamespaceSpec is an autogenerated conversion function. +func Convert_v1_NamespaceSpec_To_core_NamespaceSpec(in *v1.NamespaceSpec, out *core.NamespaceSpec, s conversion.Scope) error { + return autoConvert_v1_NamespaceSpec_To_core_NamespaceSpec(in, out, s) +} + +func autoConvert_core_NamespaceSpec_To_v1_NamespaceSpec(in *core.NamespaceSpec, out *v1.NamespaceSpec, s conversion.Scope) error { + out.Finalizers = *(*[]v1.FinalizerName)(unsafe.Pointer(&in.Finalizers)) + return nil +} + +// Convert_core_NamespaceSpec_To_v1_NamespaceSpec is an autogenerated conversion function. +func Convert_core_NamespaceSpec_To_v1_NamespaceSpec(in *core.NamespaceSpec, out *v1.NamespaceSpec, s conversion.Scope) error { + return autoConvert_core_NamespaceSpec_To_v1_NamespaceSpec(in, out, s) +} + +func autoConvert_v1_NamespaceStatus_To_core_NamespaceStatus(in *v1.NamespaceStatus, out *core.NamespaceStatus, s conversion.Scope) error { + out.Phase = core.NamespacePhase(in.Phase) + return nil +} + +// Convert_v1_NamespaceStatus_To_core_NamespaceStatus is an autogenerated conversion function. +func Convert_v1_NamespaceStatus_To_core_NamespaceStatus(in *v1.NamespaceStatus, out *core.NamespaceStatus, s conversion.Scope) error { + return autoConvert_v1_NamespaceStatus_To_core_NamespaceStatus(in, out, s) +} + +func autoConvert_core_NamespaceStatus_To_v1_NamespaceStatus(in *core.NamespaceStatus, out *v1.NamespaceStatus, s conversion.Scope) error { + out.Phase = v1.NamespacePhase(in.Phase) + return nil +} + +// Convert_core_NamespaceStatus_To_v1_NamespaceStatus is an autogenerated conversion function. +func Convert_core_NamespaceStatus_To_v1_NamespaceStatus(in *core.NamespaceStatus, out *v1.NamespaceStatus, s conversion.Scope) error { + return autoConvert_core_NamespaceStatus_To_v1_NamespaceStatus(in, out, s) +} + +func autoConvert_v1_Node_To_core_Node(in *v1.Node, out *core.Node, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_NodeSpec_To_core_NodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_NodeStatus_To_core_NodeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Node_To_core_Node is an autogenerated conversion function. +func Convert_v1_Node_To_core_Node(in *v1.Node, out *core.Node, s conversion.Scope) error { + return autoConvert_v1_Node_To_core_Node(in, out, s) +} + +func autoConvert_core_Node_To_v1_Node(in *core.Node, out *v1.Node, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_NodeSpec_To_v1_NodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_NodeStatus_To_v1_NodeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_Node_To_v1_Node is an autogenerated conversion function. +func Convert_core_Node_To_v1_Node(in *core.Node, out *v1.Node, s conversion.Scope) error { + return autoConvert_core_Node_To_v1_Node(in, out, s) +} + +func autoConvert_v1_NodeAddress_To_core_NodeAddress(in *v1.NodeAddress, out *core.NodeAddress, s conversion.Scope) error { + out.Type = core.NodeAddressType(in.Type) + out.Address = in.Address + return nil +} + +// Convert_v1_NodeAddress_To_core_NodeAddress is an autogenerated conversion function. +func Convert_v1_NodeAddress_To_core_NodeAddress(in *v1.NodeAddress, out *core.NodeAddress, s conversion.Scope) error { + return autoConvert_v1_NodeAddress_To_core_NodeAddress(in, out, s) +} + +func autoConvert_core_NodeAddress_To_v1_NodeAddress(in *core.NodeAddress, out *v1.NodeAddress, s conversion.Scope) error { + out.Type = v1.NodeAddressType(in.Type) + out.Address = in.Address + return nil +} + +// Convert_core_NodeAddress_To_v1_NodeAddress is an autogenerated conversion function. +func Convert_core_NodeAddress_To_v1_NodeAddress(in *core.NodeAddress, out *v1.NodeAddress, s conversion.Scope) error { + return autoConvert_core_NodeAddress_To_v1_NodeAddress(in, out, s) +} + +func autoConvert_v1_NodeAffinity_To_core_NodeAffinity(in *v1.NodeAffinity, out *core.NodeAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = (*core.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]core.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_v1_NodeAffinity_To_core_NodeAffinity is an autogenerated conversion function. +func Convert_v1_NodeAffinity_To_core_NodeAffinity(in *v1.NodeAffinity, out *core.NodeAffinity, s conversion.Scope) error { + return autoConvert_v1_NodeAffinity_To_core_NodeAffinity(in, out, s) +} + +func autoConvert_core_NodeAffinity_To_v1_NodeAffinity(in *core.NodeAffinity, out *v1.NodeAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = (*v1.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_core_NodeAffinity_To_v1_NodeAffinity is an autogenerated conversion function. +func Convert_core_NodeAffinity_To_v1_NodeAffinity(in *core.NodeAffinity, out *v1.NodeAffinity, s conversion.Scope) error { + return autoConvert_core_NodeAffinity_To_v1_NodeAffinity(in, out, s) +} + +func autoConvert_v1_NodeCondition_To_core_NodeCondition(in *v1.NodeCondition, out *core.NodeCondition, s conversion.Scope) error { + out.Type = core.NodeConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastHeartbeatTime = in.LastHeartbeatTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_NodeCondition_To_core_NodeCondition is an autogenerated conversion function. +func Convert_v1_NodeCondition_To_core_NodeCondition(in *v1.NodeCondition, out *core.NodeCondition, s conversion.Scope) error { + return autoConvert_v1_NodeCondition_To_core_NodeCondition(in, out, s) +} + +func autoConvert_core_NodeCondition_To_v1_NodeCondition(in *core.NodeCondition, out *v1.NodeCondition, s conversion.Scope) error { + out.Type = v1.NodeConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastHeartbeatTime = in.LastHeartbeatTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_NodeCondition_To_v1_NodeCondition is an autogenerated conversion function. +func Convert_core_NodeCondition_To_v1_NodeCondition(in *core.NodeCondition, out *v1.NodeCondition, s conversion.Scope) error { + return autoConvert_core_NodeCondition_To_v1_NodeCondition(in, out, s) +} + +func autoConvert_v1_NodeConfigSource_To_core_NodeConfigSource(in *v1.NodeConfigSource, out *core.NodeConfigSource, s conversion.Scope) error { + out.ConfigMapRef = (*core.ObjectReference)(unsafe.Pointer(in.ConfigMapRef)) + return nil +} + +// Convert_v1_NodeConfigSource_To_core_NodeConfigSource is an autogenerated conversion function. +func Convert_v1_NodeConfigSource_To_core_NodeConfigSource(in *v1.NodeConfigSource, out *core.NodeConfigSource, s conversion.Scope) error { + return autoConvert_v1_NodeConfigSource_To_core_NodeConfigSource(in, out, s) +} + +func autoConvert_core_NodeConfigSource_To_v1_NodeConfigSource(in *core.NodeConfigSource, out *v1.NodeConfigSource, s conversion.Scope) error { + out.ConfigMapRef = (*v1.ObjectReference)(unsafe.Pointer(in.ConfigMapRef)) + return nil +} + +// Convert_core_NodeConfigSource_To_v1_NodeConfigSource is an autogenerated conversion function. +func Convert_core_NodeConfigSource_To_v1_NodeConfigSource(in *core.NodeConfigSource, out *v1.NodeConfigSource, s conversion.Scope) error { + return autoConvert_core_NodeConfigSource_To_v1_NodeConfigSource(in, out, s) +} + +func autoConvert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in *v1.NodeDaemonEndpoints, out *core.NodeDaemonEndpoints, s conversion.Scope) error { + if err := Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { + return err + } + return nil +} + +// Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints is an autogenerated conversion function. +func Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in *v1.NodeDaemonEndpoints, out *core.NodeDaemonEndpoints, s conversion.Scope) error { + return autoConvert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in, out, s) +} + +func autoConvert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *core.NodeDaemonEndpoints, out *v1.NodeDaemonEndpoints, s conversion.Scope) error { + if err := Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { + return err + } + return nil +} + +// Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints is an autogenerated conversion function. +func Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *core.NodeDaemonEndpoints, out *v1.NodeDaemonEndpoints, s conversion.Scope) error { + return autoConvert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in, out, s) +} + +func autoConvert_v1_NodeList_To_core_NodeList(in *v1.NodeList, out *core.NodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Node)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_NodeList_To_core_NodeList is an autogenerated conversion function. +func Convert_v1_NodeList_To_core_NodeList(in *v1.NodeList, out *core.NodeList, s conversion.Scope) error { + return autoConvert_v1_NodeList_To_core_NodeList(in, out, s) +} + +func autoConvert_core_NodeList_To_v1_NodeList(in *core.NodeList, out *v1.NodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.Node)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_NodeList_To_v1_NodeList is an autogenerated conversion function. +func Convert_core_NodeList_To_v1_NodeList(in *core.NodeList, out *v1.NodeList, s conversion.Scope) error { + return autoConvert_core_NodeList_To_v1_NodeList(in, out, s) +} + +func autoConvert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in *v1.NodeProxyOptions, out *core.NodeProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions is an autogenerated conversion function. +func Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in *v1.NodeProxyOptions, out *core.NodeProxyOptions, s conversion.Scope) error { + return autoConvert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in, out, s) +} + +func autoConvert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in *core.NodeProxyOptions, out *v1.NodeProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions is an autogenerated conversion function. +func Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in *core.NodeProxyOptions, out *v1.NodeProxyOptions, s conversion.Scope) error { + return autoConvert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in, out, s) +} + +func autoConvert_v1_NodeResources_To_core_NodeResources(in *v1.NodeResources, out *core.NodeResources, s conversion.Scope) error { + out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +// Convert_v1_NodeResources_To_core_NodeResources is an autogenerated conversion function. +func Convert_v1_NodeResources_To_core_NodeResources(in *v1.NodeResources, out *core.NodeResources, s conversion.Scope) error { + return autoConvert_v1_NodeResources_To_core_NodeResources(in, out, s) +} + +func autoConvert_core_NodeResources_To_v1_NodeResources(in *core.NodeResources, out *v1.NodeResources, s conversion.Scope) error { + out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +// Convert_core_NodeResources_To_v1_NodeResources is an autogenerated conversion function. +func Convert_core_NodeResources_To_v1_NodeResources(in *core.NodeResources, out *v1.NodeResources, s conversion.Scope) error { + return autoConvert_core_NodeResources_To_v1_NodeResources(in, out, s) +} + +func autoConvert_v1_NodeSelector_To_core_NodeSelector(in *v1.NodeSelector, out *core.NodeSelector, s conversion.Scope) error { + out.NodeSelectorTerms = *(*[]core.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) + return nil +} + +// Convert_v1_NodeSelector_To_core_NodeSelector is an autogenerated conversion function. +func Convert_v1_NodeSelector_To_core_NodeSelector(in *v1.NodeSelector, out *core.NodeSelector, s conversion.Scope) error { + return autoConvert_v1_NodeSelector_To_core_NodeSelector(in, out, s) +} + +func autoConvert_core_NodeSelector_To_v1_NodeSelector(in *core.NodeSelector, out *v1.NodeSelector, s conversion.Scope) error { + out.NodeSelectorTerms = *(*[]v1.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) + return nil +} + +// Convert_core_NodeSelector_To_v1_NodeSelector is an autogenerated conversion function. +func Convert_core_NodeSelector_To_v1_NodeSelector(in *core.NodeSelector, out *v1.NodeSelector, s conversion.Scope) error { + return autoConvert_core_NodeSelector_To_v1_NodeSelector(in, out, s) +} + +func autoConvert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in *v1.NodeSelectorRequirement, out *core.NodeSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = core.NodeSelectorOperator(in.Operator) + out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) + return nil +} + +// Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement is an autogenerated conversion function. +func Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in *v1.NodeSelectorRequirement, out *core.NodeSelectorRequirement, s conversion.Scope) error { + return autoConvert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in, out, s) +} + +func autoConvert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *core.NodeSelectorRequirement, out *v1.NodeSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = v1.NodeSelectorOperator(in.Operator) + out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) + return nil +} + +// Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement is an autogenerated conversion function. +func Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *core.NodeSelectorRequirement, out *v1.NodeSelectorRequirement, s conversion.Scope) error { + return autoConvert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in, out, s) +} + +func autoConvert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in *v1.NodeSelectorTerm, out *core.NodeSelectorTerm, s conversion.Scope) error { + out.MatchExpressions = *(*[]core.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) + return nil +} + +// Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm is an autogenerated conversion function. +func Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in *v1.NodeSelectorTerm, out *core.NodeSelectorTerm, s conversion.Scope) error { + return autoConvert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in, out, s) +} + +func autoConvert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *core.NodeSelectorTerm, out *v1.NodeSelectorTerm, s conversion.Scope) error { + out.MatchExpressions = *(*[]v1.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) + return nil +} + +// Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm is an autogenerated conversion function. +func Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *core.NodeSelectorTerm, out *v1.NodeSelectorTerm, s conversion.Scope) error { + return autoConvert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in, out, s) +} + +func autoConvert_v1_NodeSpec_To_core_NodeSpec(in *v1.NodeSpec, out *core.NodeSpec, s conversion.Scope) error { + out.PodCIDR = in.PodCIDR + out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID + out.Unschedulable = in.Unschedulable + out.Taints = *(*[]core.Taint)(unsafe.Pointer(&in.Taints)) + out.ConfigSource = (*core.NodeConfigSource)(unsafe.Pointer(in.ConfigSource)) + return nil +} + +// Convert_v1_NodeSpec_To_core_NodeSpec is an autogenerated conversion function. +func Convert_v1_NodeSpec_To_core_NodeSpec(in *v1.NodeSpec, out *core.NodeSpec, s conversion.Scope) error { + return autoConvert_v1_NodeSpec_To_core_NodeSpec(in, out, s) +} + +func autoConvert_core_NodeSpec_To_v1_NodeSpec(in *core.NodeSpec, out *v1.NodeSpec, s conversion.Scope) error { + out.PodCIDR = in.PodCIDR + out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID + out.Unschedulable = in.Unschedulable + out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints)) + out.ConfigSource = (*v1.NodeConfigSource)(unsafe.Pointer(in.ConfigSource)) + return nil +} + +// Convert_core_NodeSpec_To_v1_NodeSpec is an autogenerated conversion function. +func Convert_core_NodeSpec_To_v1_NodeSpec(in *core.NodeSpec, out *v1.NodeSpec, s conversion.Scope) error { + return autoConvert_core_NodeSpec_To_v1_NodeSpec(in, out, s) +} + +func autoConvert_v1_NodeStatus_To_core_NodeStatus(in *v1.NodeStatus, out *core.NodeStatus, s conversion.Scope) error { + out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Allocatable = *(*core.ResourceList)(unsafe.Pointer(&in.Allocatable)) + out.Phase = core.NodePhase(in.Phase) + out.Conditions = *(*[]core.NodeCondition)(unsafe.Pointer(&in.Conditions)) + out.Addresses = *(*[]core.NodeAddress)(unsafe.Pointer(&in.Addresses)) + if err := Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { + return err + } + if err := Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { + return err + } + out.Images = *(*[]core.ContainerImage)(unsafe.Pointer(&in.Images)) + out.VolumesInUse = *(*[]core.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) + out.VolumesAttached = *(*[]core.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) + return nil +} + +// Convert_v1_NodeStatus_To_core_NodeStatus is an autogenerated conversion function. +func Convert_v1_NodeStatus_To_core_NodeStatus(in *v1.NodeStatus, out *core.NodeStatus, s conversion.Scope) error { + return autoConvert_v1_NodeStatus_To_core_NodeStatus(in, out, s) +} + +func autoConvert_core_NodeStatus_To_v1_NodeStatus(in *core.NodeStatus, out *v1.NodeStatus, s conversion.Scope) error { + out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Allocatable = *(*v1.ResourceList)(unsafe.Pointer(&in.Allocatable)) + out.Phase = v1.NodePhase(in.Phase) + out.Conditions = *(*[]v1.NodeCondition)(unsafe.Pointer(&in.Conditions)) + out.Addresses = *(*[]v1.NodeAddress)(unsafe.Pointer(&in.Addresses)) + if err := Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { + return err + } + if err := Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { + return err + } + out.Images = *(*[]v1.ContainerImage)(unsafe.Pointer(&in.Images)) + out.VolumesInUse = *(*[]v1.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) + out.VolumesAttached = *(*[]v1.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) + return nil +} + +// Convert_core_NodeStatus_To_v1_NodeStatus is an autogenerated conversion function. +func Convert_core_NodeStatus_To_v1_NodeStatus(in *core.NodeStatus, out *v1.NodeStatus, s conversion.Scope) error { + return autoConvert_core_NodeStatus_To_v1_NodeStatus(in, out, s) +} + +func autoConvert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in *v1.NodeSystemInfo, out *core.NodeSystemInfo, s conversion.Scope) error { + out.MachineID = in.MachineID + out.SystemUUID = in.SystemUUID + out.BootID = in.BootID + out.KernelVersion = in.KernelVersion + out.OSImage = in.OSImage + out.ContainerRuntimeVersion = in.ContainerRuntimeVersion + out.KubeletVersion = in.KubeletVersion + out.KubeProxyVersion = in.KubeProxyVersion + out.OperatingSystem = in.OperatingSystem + out.Architecture = in.Architecture + return nil +} + +// Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo is an autogenerated conversion function. +func Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in *v1.NodeSystemInfo, out *core.NodeSystemInfo, s conversion.Scope) error { + return autoConvert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in, out, s) +} + +func autoConvert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in *core.NodeSystemInfo, out *v1.NodeSystemInfo, s conversion.Scope) error { + out.MachineID = in.MachineID + out.SystemUUID = in.SystemUUID + out.BootID = in.BootID + out.KernelVersion = in.KernelVersion + out.OSImage = in.OSImage + out.ContainerRuntimeVersion = in.ContainerRuntimeVersion + out.KubeletVersion = in.KubeletVersion + out.KubeProxyVersion = in.KubeProxyVersion + out.OperatingSystem = in.OperatingSystem + out.Architecture = in.Architecture + return nil +} + +// Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo is an autogenerated conversion function. +func Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in *core.NodeSystemInfo, out *v1.NodeSystemInfo, s conversion.Scope) error { + return autoConvert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in, out, s) +} + +func autoConvert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *core.ObjectFieldSelector, s conversion.Scope) error { + out.APIVersion = in.APIVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector is an autogenerated conversion function. +func Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *core.ObjectFieldSelector, s conversion.Scope) error { + return autoConvert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in, out, s) +} + +func autoConvert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *core.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { + out.APIVersion = in.APIVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector is an autogenerated conversion function. +func Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *core.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { + return autoConvert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in, out, s) +} + +func autoConvert_v1_ObjectMeta_To_core_ObjectMeta(in *v1.ObjectMeta, out *core.ObjectMeta, s conversion.Scope) error { + out.Name = in.Name + out.GenerateName = in.GenerateName + out.Namespace = in.Namespace + out.SelfLink = in.SelfLink + out.UID = types.UID(in.UID) + out.ResourceVersion = in.ResourceVersion + out.Generation = in.Generation + out.CreationTimestamp = in.CreationTimestamp + out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) + out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) + out.Initializers = (*meta_v1.Initializers)(unsafe.Pointer(in.Initializers)) + out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) + out.ClusterName = in.ClusterName + return nil +} + +// Convert_v1_ObjectMeta_To_core_ObjectMeta is an autogenerated conversion function. +func Convert_v1_ObjectMeta_To_core_ObjectMeta(in *v1.ObjectMeta, out *core.ObjectMeta, s conversion.Scope) error { + return autoConvert_v1_ObjectMeta_To_core_ObjectMeta(in, out, s) +} + +func autoConvert_core_ObjectMeta_To_v1_ObjectMeta(in *core.ObjectMeta, out *v1.ObjectMeta, s conversion.Scope) error { + out.Name = in.Name + out.GenerateName = in.GenerateName + out.Namespace = in.Namespace + out.SelfLink = in.SelfLink + out.UID = types.UID(in.UID) + out.ResourceVersion = in.ResourceVersion + out.Generation = in.Generation + out.CreationTimestamp = in.CreationTimestamp + out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) + out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) + out.Initializers = (*meta_v1.Initializers)(unsafe.Pointer(in.Initializers)) + out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) + out.ClusterName = in.ClusterName + return nil +} + +// Convert_core_ObjectMeta_To_v1_ObjectMeta is an autogenerated conversion function. +func Convert_core_ObjectMeta_To_v1_ObjectMeta(in *core.ObjectMeta, out *v1.ObjectMeta, s conversion.Scope) error { + return autoConvert_core_ObjectMeta_To_v1_ObjectMeta(in, out, s) +} + +func autoConvert_v1_ObjectReference_To_core_ObjectReference(in *v1.ObjectReference, out *core.ObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_v1_ObjectReference_To_core_ObjectReference is an autogenerated conversion function. +func Convert_v1_ObjectReference_To_core_ObjectReference(in *v1.ObjectReference, out *core.ObjectReference, s conversion.Scope) error { + return autoConvert_v1_ObjectReference_To_core_ObjectReference(in, out, s) +} + +func autoConvert_core_ObjectReference_To_v1_ObjectReference(in *core.ObjectReference, out *v1.ObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_core_ObjectReference_To_v1_ObjectReference is an autogenerated conversion function. +func Convert_core_ObjectReference_To_v1_ObjectReference(in *core.ObjectReference, out *v1.ObjectReference, s conversion.Scope) error { + return autoConvert_core_ObjectReference_To_v1_ObjectReference(in, out, s) +} + +func autoConvert_v1_PersistentVolume_To_core_PersistentVolume(in *v1.PersistentVolume, out *core.PersistentVolume, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PersistentVolume_To_core_PersistentVolume is an autogenerated conversion function. +func Convert_v1_PersistentVolume_To_core_PersistentVolume(in *v1.PersistentVolume, out *core.PersistentVolume, s conversion.Scope) error { + return autoConvert_v1_PersistentVolume_To_core_PersistentVolume(in, out, s) +} + +func autoConvert_core_PersistentVolume_To_v1_PersistentVolume(in *core.PersistentVolume, out *v1.PersistentVolume, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_PersistentVolume_To_v1_PersistentVolume is an autogenerated conversion function. +func Convert_core_PersistentVolume_To_v1_PersistentVolume(in *core.PersistentVolume, out *v1.PersistentVolume, s conversion.Scope) error { + return autoConvert_core_PersistentVolume_To_v1_PersistentVolume(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in *v1.PersistentVolumeClaim, out *core.PersistentVolumeClaim, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in *v1.PersistentVolumeClaim, out *core.PersistentVolumeClaim, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *core.PersistentVolumeClaim, out *v1.PersistentVolumeClaim, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *core.PersistentVolumeClaim, out *v1.PersistentVolumeClaim, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in *v1.PersistentVolumeClaimCondition, out *core.PersistentVolumeClaimCondition, s conversion.Scope) error { + out.Type = core.PersistentVolumeClaimConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in *v1.PersistentVolumeClaimCondition, out *core.PersistentVolumeClaimCondition, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *core.PersistentVolumeClaimCondition, out *v1.PersistentVolumeClaimCondition, s conversion.Scope) error { + out.Type = v1.PersistentVolumeClaimConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *core.PersistentVolumeClaimCondition, out *v1.PersistentVolumeClaimCondition, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList, out *core.PersistentVolumeClaimList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList, out *core.PersistentVolumeClaimList, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *core.PersistentVolumeClaimList, out *v1.PersistentVolumeClaimList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *core.PersistentVolumeClaimList, out *v1.PersistentVolumeClaimList, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in *v1.PersistentVolumeClaimSpec, out *core.PersistentVolumeClaimSpec, s conversion.Scope) error { + out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := Convert_v1_ResourceRequirements_To_core_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeName = in.VolumeName + out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) + out.VolumeMode = (*core.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) + return nil +} + +// Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in *v1.PersistentVolumeClaimSpec, out *core.PersistentVolumeClaimSpec, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *core.PersistentVolumeClaimSpec, out *v1.PersistentVolumeClaimSpec, s conversion.Scope) error { + out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := Convert_core_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeName = in.VolumeName + out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) + out.VolumeMode = (*v1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) + return nil +} + +// Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *core.PersistentVolumeClaimSpec, out *v1.PersistentVolumeClaimSpec, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in *v1.PersistentVolumeClaimStatus, out *core.PersistentVolumeClaimStatus, s conversion.Scope) error { + out.Phase = core.PersistentVolumeClaimPhase(in.Phase) + out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Conditions = *(*[]core.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in *v1.PersistentVolumeClaimStatus, out *core.PersistentVolumeClaimStatus, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *core.PersistentVolumeClaimStatus, out *v1.PersistentVolumeClaimStatus, s conversion.Scope) error { + out.Phase = v1.PersistentVolumeClaimPhase(in.Phase) + out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Conditions = *(*[]v1.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *core.PersistentVolumeClaimStatus, out *v1.PersistentVolumeClaimStatus, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *core.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + out.ClaimName = in.ClaimName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *core.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *core.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + out.ClaimName = in.ClaimName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *core.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in, out, s) +} + +func autoConvert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in *v1.PersistentVolumeList, out *core.PersistentVolumeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.PersistentVolume, len(*in)) + for i := range *in { + if err := Convert_v1_PersistentVolume_To_core_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList is an autogenerated conversion function. +func Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in *v1.PersistentVolumeList, out *core.PersistentVolumeList, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in, out, s) +} + +func autoConvert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in *core.PersistentVolumeList, out *v1.PersistentVolumeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.PersistentVolume, len(*in)) + for i := range *in { + if err := Convert_core_PersistentVolume_To_v1_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList is an autogenerated conversion function. +func Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in *core.PersistentVolumeList, out *v1.PersistentVolumeList, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in, out, s) +} + +func autoConvert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *v1.PersistentVolumeSource, out *core.PersistentVolumeSource, s conversion.Scope) error { + out.GCEPersistentDisk = (*core.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*core.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.HostPath = (*core.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.Glusterfs = (*core.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.NFS = (*core.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.RBD = (*core.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD)) + out.ISCSI = (*core.ISCSIPersistentVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Cinder = (*core.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*core.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS)) + out.FC = (*core.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.Flocker = (*core.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.FlexVolume = (*core.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.AzureFile = (*core.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.VsphereVolume = (*core.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.Quobyte = (*core.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.AzureDisk = (*core.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*core.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.PortworxVolume = (*core.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*core.ScaleIOPersistentVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.Local = (*core.LocalVolumeSource)(unsafe.Pointer(in.Local)) + out.StorageOS = (*core.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS)) + out.CSI = (*core.CSIPersistentVolumeSource)(unsafe.Pointer(in.CSI)) + return nil +} + +// Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *v1.PersistentVolumeSource, out *core.PersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in, out, s) +} + +func autoConvert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *core.PersistentVolumeSource, out *v1.PersistentVolumeSource, s conversion.Scope) error { + out.GCEPersistentDisk = (*v1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*v1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.HostPath = (*v1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.Glusterfs = (*v1.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.NFS = (*v1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.RBD = (*v1.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD)) + out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.ISCSI = (*v1.ISCSIPersistentVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.FlexVolume = (*v1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*v1.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*v1.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS)) + out.FC = (*v1.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.Flocker = (*v1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.AzureFile = (*v1.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.VsphereVolume = (*v1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.AzureDisk = (*v1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*v1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.PortworxVolume = (*v1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*v1.ScaleIOPersistentVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.Local = (*v1.LocalVolumeSource)(unsafe.Pointer(in.Local)) + out.StorageOS = (*v1.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS)) + out.CSI = (*v1.CSIPersistentVolumeSource)(unsafe.Pointer(in.CSI)) + return nil +} + +// Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource is an autogenerated conversion function. +func Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *core.PersistentVolumeSource, out *v1.PersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *v1.PersistentVolumeSpec, out *core.PersistentVolumeSpec, s conversion.Scope) error { + out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) + if err := Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { + return err + } + out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.ClaimRef = (*core.ObjectReference)(unsafe.Pointer(in.ClaimRef)) + out.PersistentVolumeReclaimPolicy = core.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) + out.StorageClassName = in.StorageClassName + out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) + out.VolumeMode = (*core.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) + return nil +} + +// Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec is an autogenerated conversion function. +func Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *v1.PersistentVolumeSpec, out *core.PersistentVolumeSpec, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in, out, s) +} + +func autoConvert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.PersistentVolumeSpec, out *v1.PersistentVolumeSpec, s conversion.Scope) error { + out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + if err := Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { + return err + } + out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.ClaimRef = (*v1.ObjectReference)(unsafe.Pointer(in.ClaimRef)) + out.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) + out.StorageClassName = in.StorageClassName + out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) + out.VolumeMode = (*v1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) + return nil +} + +// Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec is an autogenerated conversion function. +func Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.PersistentVolumeSpec, out *v1.PersistentVolumeSpec, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in, out, s) +} + +func autoConvert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in *v1.PersistentVolumeStatus, out *core.PersistentVolumeStatus, s conversion.Scope) error { + out.Phase = core.PersistentVolumePhase(in.Phase) + out.Message = in.Message + out.Reason = in.Reason + return nil +} + +// Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus is an autogenerated conversion function. +func Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in *v1.PersistentVolumeStatus, out *core.PersistentVolumeStatus, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in, out, s) +} + +func autoConvert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *core.PersistentVolumeStatus, out *v1.PersistentVolumeStatus, s conversion.Scope) error { + out.Phase = v1.PersistentVolumePhase(in.Phase) + out.Message = in.Message + out.Reason = in.Reason + return nil +} + +// Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus is an autogenerated conversion function. +func Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *core.PersistentVolumeStatus, out *v1.PersistentVolumeStatus, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in, out, s) +} + +func autoConvert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in *v1.PhotonPersistentDiskVolumeSource, out *core.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + out.PdID = in.PdID + out.FSType = in.FSType + return nil +} + +// Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in *v1.PhotonPersistentDiskVolumeSource, out *core.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *core.PhotonPersistentDiskVolumeSource, out *v1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + out.PdID = in.PdID + out.FSType = in.FSType + return nil +} + +// Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *core.PhotonPersistentDiskVolumeSource, out *v1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodSpec_To_core_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PodStatus_To_core_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Pod_To_core_Pod is an autogenerated conversion function. +func Convert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) error { + return autoConvert_v1_Pod_To_core_Pod(in, out, s) +} + +func autoConvert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PodAffinity_To_core_PodAffinity(in *v1.PodAffinity, out *core.PodAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]core.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]core.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_v1_PodAffinity_To_core_PodAffinity is an autogenerated conversion function. +func Convert_v1_PodAffinity_To_core_PodAffinity(in *v1.PodAffinity, out *core.PodAffinity, s conversion.Scope) error { + return autoConvert_v1_PodAffinity_To_core_PodAffinity(in, out, s) +} + +func autoConvert_core_PodAffinity_To_v1_PodAffinity(in *core.PodAffinity, out *v1.PodAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_core_PodAffinity_To_v1_PodAffinity is an autogenerated conversion function. +func Convert_core_PodAffinity_To_v1_PodAffinity(in *core.PodAffinity, out *v1.PodAffinity, s conversion.Scope) error { + return autoConvert_core_PodAffinity_To_v1_PodAffinity(in, out, s) +} + +func autoConvert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in *v1.PodAffinityTerm, out *core.PodAffinityTerm, s conversion.Scope) error { + out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.TopologyKey = in.TopologyKey + return nil +} + +// Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm is an autogenerated conversion function. +func Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in *v1.PodAffinityTerm, out *core.PodAffinityTerm, s conversion.Scope) error { + return autoConvert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in, out, s) +} + +func autoConvert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in *core.PodAffinityTerm, out *v1.PodAffinityTerm, s conversion.Scope) error { + out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.TopologyKey = in.TopologyKey + return nil +} + +// Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm is an autogenerated conversion function. +func Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in *core.PodAffinityTerm, out *v1.PodAffinityTerm, s conversion.Scope) error { + return autoConvert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in, out, s) +} + +func autoConvert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in *v1.PodAntiAffinity, out *core.PodAntiAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]core.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]core.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity is an autogenerated conversion function. +func Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in *v1.PodAntiAffinity, out *core.PodAntiAffinity, s conversion.Scope) error { + return autoConvert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in, out, s) +} + +func autoConvert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in *core.PodAntiAffinity, out *v1.PodAntiAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity is an autogenerated conversion function. +func Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in *core.PodAntiAffinity, out *v1.PodAntiAffinity, s conversion.Scope) error { + return autoConvert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in, out, s) +} + +func autoConvert_v1_PodAttachOptions_To_core_PodAttachOptions(in *v1.PodAttachOptions, out *core.PodAttachOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + return nil +} + +// Convert_v1_PodAttachOptions_To_core_PodAttachOptions is an autogenerated conversion function. +func Convert_v1_PodAttachOptions_To_core_PodAttachOptions(in *v1.PodAttachOptions, out *core.PodAttachOptions, s conversion.Scope) error { + return autoConvert_v1_PodAttachOptions_To_core_PodAttachOptions(in, out, s) +} + +func autoConvert_core_PodAttachOptions_To_v1_PodAttachOptions(in *core.PodAttachOptions, out *v1.PodAttachOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + return nil +} + +// Convert_core_PodAttachOptions_To_v1_PodAttachOptions is an autogenerated conversion function. +func Convert_core_PodAttachOptions_To_v1_PodAttachOptions(in *core.PodAttachOptions, out *v1.PodAttachOptions, s conversion.Scope) error { + return autoConvert_core_PodAttachOptions_To_v1_PodAttachOptions(in, out, s) +} + +func autoConvert_v1_PodCondition_To_core_PodCondition(in *v1.PodCondition, out *core.PodCondition, s conversion.Scope) error { + out.Type = core.PodConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_PodCondition_To_core_PodCondition is an autogenerated conversion function. +func Convert_v1_PodCondition_To_core_PodCondition(in *v1.PodCondition, out *core.PodCondition, s conversion.Scope) error { + return autoConvert_v1_PodCondition_To_core_PodCondition(in, out, s) +} + +func autoConvert_core_PodCondition_To_v1_PodCondition(in *core.PodCondition, out *v1.PodCondition, s conversion.Scope) error { + out.Type = v1.PodConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_PodCondition_To_v1_PodCondition is an autogenerated conversion function. +func Convert_core_PodCondition_To_v1_PodCondition(in *core.PodCondition, out *v1.PodCondition, s conversion.Scope) error { + return autoConvert_core_PodCondition_To_v1_PodCondition(in, out, s) +} + +func autoConvert_v1_PodDNSConfig_To_core_PodDNSConfig(in *v1.PodDNSConfig, out *core.PodDNSConfig, s conversion.Scope) error { + out.Nameservers = *(*[]string)(unsafe.Pointer(&in.Nameservers)) + out.Searches = *(*[]string)(unsafe.Pointer(&in.Searches)) + out.Options = *(*[]core.PodDNSConfigOption)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_v1_PodDNSConfig_To_core_PodDNSConfig is an autogenerated conversion function. +func Convert_v1_PodDNSConfig_To_core_PodDNSConfig(in *v1.PodDNSConfig, out *core.PodDNSConfig, s conversion.Scope) error { + return autoConvert_v1_PodDNSConfig_To_core_PodDNSConfig(in, out, s) +} + +func autoConvert_core_PodDNSConfig_To_v1_PodDNSConfig(in *core.PodDNSConfig, out *v1.PodDNSConfig, s conversion.Scope) error { + out.Nameservers = *(*[]string)(unsafe.Pointer(&in.Nameservers)) + out.Searches = *(*[]string)(unsafe.Pointer(&in.Searches)) + out.Options = *(*[]v1.PodDNSConfigOption)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_core_PodDNSConfig_To_v1_PodDNSConfig is an autogenerated conversion function. +func Convert_core_PodDNSConfig_To_v1_PodDNSConfig(in *core.PodDNSConfig, out *v1.PodDNSConfig, s conversion.Scope) error { + return autoConvert_core_PodDNSConfig_To_v1_PodDNSConfig(in, out, s) +} + +func autoConvert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in *v1.PodDNSConfigOption, out *core.PodDNSConfigOption, s conversion.Scope) error { + out.Name = in.Name + out.Value = (*string)(unsafe.Pointer(in.Value)) + return nil +} + +// Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption is an autogenerated conversion function. +func Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in *v1.PodDNSConfigOption, out *core.PodDNSConfigOption, s conversion.Scope) error { + return autoConvert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in, out, s) +} + +func autoConvert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in *core.PodDNSConfigOption, out *v1.PodDNSConfigOption, s conversion.Scope) error { + out.Name = in.Name + out.Value = (*string)(unsafe.Pointer(in.Value)) + return nil +} + +// Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption is an autogenerated conversion function. +func Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in *core.PodDNSConfigOption, out *v1.PodDNSConfigOption, s conversion.Scope) error { + return autoConvert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in, out, s) +} + +func autoConvert_v1_PodExecOptions_To_core_PodExecOptions(in *v1.PodExecOptions, out *core.PodExecOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +// Convert_v1_PodExecOptions_To_core_PodExecOptions is an autogenerated conversion function. +func Convert_v1_PodExecOptions_To_core_PodExecOptions(in *v1.PodExecOptions, out *core.PodExecOptions, s conversion.Scope) error { + return autoConvert_v1_PodExecOptions_To_core_PodExecOptions(in, out, s) +} + +func autoConvert_core_PodExecOptions_To_v1_PodExecOptions(in *core.PodExecOptions, out *v1.PodExecOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +// Convert_core_PodExecOptions_To_v1_PodExecOptions is an autogenerated conversion function. +func Convert_core_PodExecOptions_To_v1_PodExecOptions(in *core.PodExecOptions, out *v1.PodExecOptions, s conversion.Scope) error { + return autoConvert_core_PodExecOptions_To_v1_PodExecOptions(in, out, s) +} + +func autoConvert_v1_PodList_To_core_PodList(in *v1.PodList, out *core.PodList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.Pod, len(*in)) + for i := range *in { + if err := Convert_v1_Pod_To_core_Pod(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_PodList_To_core_PodList is an autogenerated conversion function. +func Convert_v1_PodList_To_core_PodList(in *v1.PodList, out *core.PodList, s conversion.Scope) error { + return autoConvert_v1_PodList_To_core_PodList(in, out, s) +} + +func autoConvert_core_PodList_To_v1_PodList(in *core.PodList, out *v1.PodList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.Pod, len(*in)) + for i := range *in { + if err := Convert_core_Pod_To_v1_Pod(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_PodList_To_v1_PodList is an autogenerated conversion function. +func Convert_core_PodList_To_v1_PodList(in *core.PodList, out *v1.PodList, s conversion.Scope) error { + return autoConvert_core_PodList_To_v1_PodList(in, out, s) +} + +func autoConvert_v1_PodLogOptions_To_core_PodLogOptions(in *v1.PodLogOptions, out *core.PodLogOptions, s conversion.Scope) error { + out.Container = in.Container + out.Follow = in.Follow + out.Previous = in.Previous + out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) + out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) + out.Timestamps = in.Timestamps + out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) + out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) + return nil +} + +// Convert_v1_PodLogOptions_To_core_PodLogOptions is an autogenerated conversion function. +func Convert_v1_PodLogOptions_To_core_PodLogOptions(in *v1.PodLogOptions, out *core.PodLogOptions, s conversion.Scope) error { + return autoConvert_v1_PodLogOptions_To_core_PodLogOptions(in, out, s) +} + +func autoConvert_core_PodLogOptions_To_v1_PodLogOptions(in *core.PodLogOptions, out *v1.PodLogOptions, s conversion.Scope) error { + out.Container = in.Container + out.Follow = in.Follow + out.Previous = in.Previous + out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) + out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) + out.Timestamps = in.Timestamps + out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) + out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) + return nil +} + +// Convert_core_PodLogOptions_To_v1_PodLogOptions is an autogenerated conversion function. +func Convert_core_PodLogOptions_To_v1_PodLogOptions(in *core.PodLogOptions, out *v1.PodLogOptions, s conversion.Scope) error { + return autoConvert_core_PodLogOptions_To_v1_PodLogOptions(in, out, s) +} + +func autoConvert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in *v1.PodPortForwardOptions, out *core.PodPortForwardOptions, s conversion.Scope) error { + out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions is an autogenerated conversion function. +func Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in *v1.PodPortForwardOptions, out *core.PodPortForwardOptions, s conversion.Scope) error { + return autoConvert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in, out, s) +} + +func autoConvert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *core.PodPortForwardOptions, out *v1.PodPortForwardOptions, s conversion.Scope) error { + out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions is an autogenerated conversion function. +func Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *core.PodPortForwardOptions, out *v1.PodPortForwardOptions, s conversion.Scope) error { + return autoConvert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in, out, s) +} + +func autoConvert_v1_PodProxyOptions_To_core_PodProxyOptions(in *v1.PodProxyOptions, out *core.PodProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_PodProxyOptions_To_core_PodProxyOptions is an autogenerated conversion function. +func Convert_v1_PodProxyOptions_To_core_PodProxyOptions(in *v1.PodProxyOptions, out *core.PodProxyOptions, s conversion.Scope) error { + return autoConvert_v1_PodProxyOptions_To_core_PodProxyOptions(in, out, s) +} + +func autoConvert_core_PodProxyOptions_To_v1_PodProxyOptions(in *core.PodProxyOptions, out *v1.PodProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_core_PodProxyOptions_To_v1_PodProxyOptions is an autogenerated conversion function. +func Convert_core_PodProxyOptions_To_v1_PodProxyOptions(in *core.PodProxyOptions, out *v1.PodProxyOptions, s conversion.Scope) error { + return autoConvert_core_PodProxyOptions_To_v1_PodProxyOptions(in, out, s) +} + +func autoConvert_v1_PodSecurityContext_To_core_PodSecurityContext(in *v1.PodSecurityContext, out *core.PodSecurityContext, s conversion.Scope) error { + out.SELinuxOptions = (*core.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) + out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) + return nil +} + +func autoConvert_core_PodSecurityContext_To_v1_PodSecurityContext(in *core.PodSecurityContext, out *v1.PodSecurityContext, s conversion.Scope) error { + // INFO: in.HostNetwork opted out of conversion generation + // INFO: in.HostPID opted out of conversion generation + // INFO: in.HostIPC opted out of conversion generation + out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) + out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) + return nil +} + +func autoConvert_v1_PodSignature_To_core_PodSignature(in *v1.PodSignature, out *core.PodSignature, s conversion.Scope) error { + out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) + return nil +} + +// Convert_v1_PodSignature_To_core_PodSignature is an autogenerated conversion function. +func Convert_v1_PodSignature_To_core_PodSignature(in *v1.PodSignature, out *core.PodSignature, s conversion.Scope) error { + return autoConvert_v1_PodSignature_To_core_PodSignature(in, out, s) +} + +func autoConvert_core_PodSignature_To_v1_PodSignature(in *core.PodSignature, out *v1.PodSignature, s conversion.Scope) error { + out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) + return nil +} + +// Convert_core_PodSignature_To_v1_PodSignature is an autogenerated conversion function. +func Convert_core_PodSignature_To_v1_PodSignature(in *core.PodSignature, out *v1.PodSignature, s conversion.Scope) error { + return autoConvert_core_PodSignature_To_v1_PodSignature(in, out, s) +} + +func autoConvert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s conversion.Scope) error { + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]core.Volume, len(*in)) + for i := range *in { + if err := Convert_v1_Volume_To_core_Volume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]core.Container, len(*in)) + for i := range *in { + if err := Convert_v1_Container_To_core_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.InitContainers = nil + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]core.Container, len(*in)) + for i := range *in { + if err := Convert_v1_Container_To_core_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Containers = nil + } + out.RestartPolicy = core.RestartPolicy(in.RestartPolicy) + out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.DNSPolicy = core.DNSPolicy(in.DNSPolicy) + out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) + out.ServiceAccountName = in.ServiceAccountName + // INFO: in.DeprecatedServiceAccount opted out of conversion generation + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + out.NodeName = in.NodeName + // INFO: in.HostNetwork opted out of conversion generation + // INFO: in.HostPID opted out of conversion generation + // INFO: in.HostIPC opted out of conversion generation + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(core.PodSecurityContext) + if err := Convert_v1_PodSecurityContext_To_core_PodSecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.ImagePullSecrets = *(*[]core.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain + out.Affinity = (*core.Affinity)(unsafe.Pointer(in.Affinity)) + out.SchedulerName = in.SchedulerName + out.Tolerations = *(*[]core.Toleration)(unsafe.Pointer(&in.Tolerations)) + out.HostAliases = *(*[]core.HostAlias)(unsafe.Pointer(&in.HostAliases)) + out.PriorityClassName = in.PriorityClassName + out.Priority = (*int32)(unsafe.Pointer(in.Priority)) + out.DNSConfig = (*core.PodDNSConfig)(unsafe.Pointer(in.DNSConfig)) + return nil +} + +func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s conversion.Scope) error { + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + if err := Convert_core_Volume_To_v1_Volume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]v1.Container, len(*in)) + for i := range *in { + if err := Convert_core_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.InitContainers = nil + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]v1.Container, len(*in)) + for i := range *in { + if err := Convert_core_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Containers = nil + } + out.RestartPolicy = v1.RestartPolicy(in.RestartPolicy) + out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.DNSPolicy = v1.DNSPolicy(in.DNSPolicy) + out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) + out.ServiceAccountName = in.ServiceAccountName + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + out.NodeName = in.NodeName + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.PodSecurityContext) + if err := Convert_core_PodSecurityContext_To_v1_PodSecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.ImagePullSecrets = *(*[]v1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain + out.Affinity = (*v1.Affinity)(unsafe.Pointer(in.Affinity)) + out.SchedulerName = in.SchedulerName + out.Tolerations = *(*[]v1.Toleration)(unsafe.Pointer(&in.Tolerations)) + out.HostAliases = *(*[]v1.HostAlias)(unsafe.Pointer(&in.HostAliases)) + out.PriorityClassName = in.PriorityClassName + out.Priority = (*int32)(unsafe.Pointer(in.Priority)) + out.DNSConfig = (*v1.PodDNSConfig)(unsafe.Pointer(in.DNSConfig)) + return nil +} + +func autoConvert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodStatus, s conversion.Scope) error { + out.Phase = core.PodPhase(in.Phase) + out.Conditions = *(*[]core.PodCondition)(unsafe.Pointer(&in.Conditions)) + out.Message = in.Message + out.Reason = in.Reason + out.HostIP = in.HostIP + out.PodIP = in.PodIP + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + out.InitContainerStatuses = *(*[]core.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) + out.ContainerStatuses = *(*[]core.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) + out.QOSClass = core.PodQOSClass(in.QOSClass) + return nil +} + +// Convert_v1_PodStatus_To_core_PodStatus is an autogenerated conversion function. +func Convert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodStatus, s conversion.Scope) error { + return autoConvert_v1_PodStatus_To_core_PodStatus(in, out, s) +} + +func autoConvert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *v1.PodStatus, s conversion.Scope) error { + out.Phase = v1.PodPhase(in.Phase) + out.Conditions = *(*[]v1.PodCondition)(unsafe.Pointer(&in.Conditions)) + out.Message = in.Message + out.Reason = in.Reason + out.HostIP = in.HostIP + out.PodIP = in.PodIP + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + out.QOSClass = v1.PodQOSClass(in.QOSClass) + out.InitContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) + out.ContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) + return nil +} + +// Convert_core_PodStatus_To_v1_PodStatus is an autogenerated conversion function. +func Convert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *v1.PodStatus, s conversion.Scope) error { + return autoConvert_core_PodStatus_To_v1_PodStatus(in, out, s) +} + +func autoConvert_v1_PodStatusResult_To_core_PodStatusResult(in *v1.PodStatusResult, out *core.PodStatusResult, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodStatus_To_core_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PodStatusResult_To_core_PodStatusResult is an autogenerated conversion function. +func Convert_v1_PodStatusResult_To_core_PodStatusResult(in *v1.PodStatusResult, out *core.PodStatusResult, s conversion.Scope) error { + return autoConvert_v1_PodStatusResult_To_core_PodStatusResult(in, out, s) +} + +func autoConvert_core_PodStatusResult_To_v1_PodStatusResult(in *core.PodStatusResult, out *v1.PodStatusResult, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_PodStatusResult_To_v1_PodStatusResult is an autogenerated conversion function. +func Convert_core_PodStatusResult_To_v1_PodStatusResult(in *core.PodStatusResult, out *v1.PodStatusResult, s conversion.Scope) error { + return autoConvert_core_PodStatusResult_To_v1_PodStatusResult(in, out, s) +} + +func autoConvert_v1_PodTemplate_To_core_PodTemplate(in *v1.PodTemplate, out *core.PodTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PodTemplate_To_core_PodTemplate is an autogenerated conversion function. +func Convert_v1_PodTemplate_To_core_PodTemplate(in *v1.PodTemplate, out *core.PodTemplate, s conversion.Scope) error { + return autoConvert_v1_PodTemplate_To_core_PodTemplate(in, out, s) +} + +func autoConvert_core_PodTemplate_To_v1_PodTemplate(in *core.PodTemplate, out *v1.PodTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +// Convert_core_PodTemplate_To_v1_PodTemplate is an autogenerated conversion function. +func Convert_core_PodTemplate_To_v1_PodTemplate(in *core.PodTemplate, out *v1.PodTemplate, s conversion.Scope) error { + return autoConvert_core_PodTemplate_To_v1_PodTemplate(in, out, s) +} + +func autoConvert_v1_PodTemplateList_To_core_PodTemplateList(in *v1.PodTemplateList, out *core.PodTemplateList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.PodTemplate, len(*in)) + for i := range *in { + if err := Convert_v1_PodTemplate_To_core_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_PodTemplateList_To_core_PodTemplateList is an autogenerated conversion function. +func Convert_v1_PodTemplateList_To_core_PodTemplateList(in *v1.PodTemplateList, out *core.PodTemplateList, s conversion.Scope) error { + return autoConvert_v1_PodTemplateList_To_core_PodTemplateList(in, out, s) +} + +func autoConvert_core_PodTemplateList_To_v1_PodTemplateList(in *core.PodTemplateList, out *v1.PodTemplateList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.PodTemplate, len(*in)) + for i := range *in { + if err := Convert_core_PodTemplate_To_v1_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_PodTemplateList_To_v1_PodTemplateList is an autogenerated conversion function. +func Convert_core_PodTemplateList_To_v1_PodTemplateList(in *core.PodTemplateList, out *v1.PodTemplateList, s conversion.Scope) error { + return autoConvert_core_PodTemplateList_To_v1_PodTemplateList(in, out, s) +} + +func autoConvert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in *v1.PodTemplateSpec, out *core.PodTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodSpec_To_core_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func autoConvert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in *core.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in *v1.PortworxVolumeSource, out *core.PortworxVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource is an autogenerated conversion function. +func Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in *v1.PortworxVolumeSource, out *core.PortworxVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in, out, s) +} + +func autoConvert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *core.PortworxVolumeSource, out *v1.PortworxVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource is an autogenerated conversion function. +func Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *core.PortworxVolumeSource, out *v1.PortworxVolumeSource, s conversion.Scope) error { + return autoConvert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in, out, s) +} + +func autoConvert_v1_Preconditions_To_core_Preconditions(in *v1.Preconditions, out *core.Preconditions, s conversion.Scope) error { + out.UID = (*types.UID)(unsafe.Pointer(in.UID)) + return nil +} + +// Convert_v1_Preconditions_To_core_Preconditions is an autogenerated conversion function. +func Convert_v1_Preconditions_To_core_Preconditions(in *v1.Preconditions, out *core.Preconditions, s conversion.Scope) error { + return autoConvert_v1_Preconditions_To_core_Preconditions(in, out, s) +} + +func autoConvert_core_Preconditions_To_v1_Preconditions(in *core.Preconditions, out *v1.Preconditions, s conversion.Scope) error { + out.UID = (*types.UID)(unsafe.Pointer(in.UID)) + return nil +} + +// Convert_core_Preconditions_To_v1_Preconditions is an autogenerated conversion function. +func Convert_core_Preconditions_To_v1_Preconditions(in *core.Preconditions, out *v1.Preconditions, s conversion.Scope) error { + return autoConvert_core_Preconditions_To_v1_Preconditions(in, out, s) +} + +func autoConvert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in *v1.PreferAvoidPodsEntry, out *core.PreferAvoidPodsEntry, s conversion.Scope) error { + if err := Convert_v1_PodSignature_To_core_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { + return err + } + out.EvictionTime = in.EvictionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry is an autogenerated conversion function. +func Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in *v1.PreferAvoidPodsEntry, out *core.PreferAvoidPodsEntry, s conversion.Scope) error { + return autoConvert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in, out, s) +} + +func autoConvert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *core.PreferAvoidPodsEntry, out *v1.PreferAvoidPodsEntry, s conversion.Scope) error { + if err := Convert_core_PodSignature_To_v1_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { + return err + } + out.EvictionTime = in.EvictionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry is an autogenerated conversion function. +func Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *core.PreferAvoidPodsEntry, out *v1.PreferAvoidPodsEntry, s conversion.Scope) error { + return autoConvert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in, out, s) +} + +func autoConvert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in *v1.PreferredSchedulingTerm, out *core.PreferredSchedulingTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm is an autogenerated conversion function. +func Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in *v1.PreferredSchedulingTerm, out *core.PreferredSchedulingTerm, s conversion.Scope) error { + return autoConvert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in, out, s) +} + +func autoConvert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *core.PreferredSchedulingTerm, out *v1.PreferredSchedulingTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { + return err + } + return nil +} + +// Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm is an autogenerated conversion function. +func Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *core.PreferredSchedulingTerm, out *v1.PreferredSchedulingTerm, s conversion.Scope) error { + return autoConvert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in, out, s) +} + +func autoConvert_v1_Probe_To_core_Probe(in *v1.Probe, out *core.Probe, s conversion.Scope) error { + if err := Convert_v1_Handler_To_core_Handler(&in.Handler, &out.Handler, s); err != nil { + return err + } + out.InitialDelaySeconds = in.InitialDelaySeconds + out.TimeoutSeconds = in.TimeoutSeconds + out.PeriodSeconds = in.PeriodSeconds + out.SuccessThreshold = in.SuccessThreshold + out.FailureThreshold = in.FailureThreshold + return nil +} + +// Convert_v1_Probe_To_core_Probe is an autogenerated conversion function. +func Convert_v1_Probe_To_core_Probe(in *v1.Probe, out *core.Probe, s conversion.Scope) error { + return autoConvert_v1_Probe_To_core_Probe(in, out, s) +} + +func autoConvert_core_Probe_To_v1_Probe(in *core.Probe, out *v1.Probe, s conversion.Scope) error { + if err := Convert_core_Handler_To_v1_Handler(&in.Handler, &out.Handler, s); err != nil { + return err + } + out.InitialDelaySeconds = in.InitialDelaySeconds + out.TimeoutSeconds = in.TimeoutSeconds + out.PeriodSeconds = in.PeriodSeconds + out.SuccessThreshold = in.SuccessThreshold + out.FailureThreshold = in.FailureThreshold + return nil +} + +// Convert_core_Probe_To_v1_Probe is an autogenerated conversion function. +func Convert_core_Probe_To_v1_Probe(in *core.Probe, out *v1.Probe, s conversion.Scope) error { + return autoConvert_core_Probe_To_v1_Probe(in, out, s) +} + +func autoConvert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in *v1.ProjectedVolumeSource, out *core.ProjectedVolumeSource, s conversion.Scope) error { + out.Sources = *(*[]core.VolumeProjection)(unsafe.Pointer(&in.Sources)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource is an autogenerated conversion function. +func Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in *v1.ProjectedVolumeSource, out *core.ProjectedVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in, out, s) +} + +func autoConvert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *core.ProjectedVolumeSource, out *v1.ProjectedVolumeSource, s conversion.Scope) error { + out.Sources = *(*[]v1.VolumeProjection)(unsafe.Pointer(&in.Sources)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource is an autogenerated conversion function. +func Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *core.ProjectedVolumeSource, out *v1.ProjectedVolumeSource, s conversion.Scope) error { + return autoConvert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in, out, s) +} + +func autoConvert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in *v1.QuobyteVolumeSource, out *core.QuobyteVolumeSource, s conversion.Scope) error { + out.Registry = in.Registry + out.Volume = in.Volume + out.ReadOnly = in.ReadOnly + out.User = in.User + out.Group = in.Group + return nil +} + +// Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource is an autogenerated conversion function. +func Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in *v1.QuobyteVolumeSource, out *core.QuobyteVolumeSource, s conversion.Scope) error { + return autoConvert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in, out, s) +} + +func autoConvert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *core.QuobyteVolumeSource, out *v1.QuobyteVolumeSource, s conversion.Scope) error { + out.Registry = in.Registry + out.Volume = in.Volume + out.ReadOnly = in.ReadOnly + out.User = in.User + out.Group = in.Group + return nil +} + +// Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource is an autogenerated conversion function. +func Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *core.QuobyteVolumeSource, out *v1.QuobyteVolumeSource, s conversion.Scope) error { + return autoConvert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in, out, s) +} + +func autoConvert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in *v1.RBDPersistentVolumeSource, out *core.RBDPersistentVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in *v1.RBDPersistentVolumeSource, out *core.RBDPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *core.RBDPersistentVolumeSource, out *v1.RBDPersistentVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *core.RBDPersistentVolumeSource, out *v1.RBDPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in *v1.RBDVolumeSource, out *core.RBDVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource is an autogenerated conversion function. +func Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in *v1.RBDVolumeSource, out *core.RBDVolumeSource, s conversion.Scope) error { + return autoConvert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in, out, s) +} + +func autoConvert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in *core.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource is an autogenerated conversion function. +func Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in *core.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { + return autoConvert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in, out, s) +} + +func autoConvert_v1_RangeAllocation_To_core_RangeAllocation(in *v1.RangeAllocation, out *core.RangeAllocation, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Range = in.Range + out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_v1_RangeAllocation_To_core_RangeAllocation is an autogenerated conversion function. +func Convert_v1_RangeAllocation_To_core_RangeAllocation(in *v1.RangeAllocation, out *core.RangeAllocation, s conversion.Scope) error { + return autoConvert_v1_RangeAllocation_To_core_RangeAllocation(in, out, s) +} + +func autoConvert_core_RangeAllocation_To_v1_RangeAllocation(in *core.RangeAllocation, out *v1.RangeAllocation, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Range = in.Range + out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_core_RangeAllocation_To_v1_RangeAllocation is an autogenerated conversion function. +func Convert_core_RangeAllocation_To_v1_RangeAllocation(in *core.RangeAllocation, out *v1.RangeAllocation, s conversion.Scope) error { + return autoConvert_core_RangeAllocation_To_v1_RangeAllocation(in, out, s) +} + +func autoConvert_v1_ReplicationController_To_core_ReplicationController(in *v1.ReplicationController, out *core.ReplicationController, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ReplicationController_To_core_ReplicationController is an autogenerated conversion function. +func Convert_v1_ReplicationController_To_core_ReplicationController(in *v1.ReplicationController, out *core.ReplicationController, s conversion.Scope) error { + return autoConvert_v1_ReplicationController_To_core_ReplicationController(in, out, s) +} + +func autoConvert_core_ReplicationController_To_v1_ReplicationController(in *core.ReplicationController, out *v1.ReplicationController, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_ReplicationController_To_v1_ReplicationController is an autogenerated conversion function. +func Convert_core_ReplicationController_To_v1_ReplicationController(in *core.ReplicationController, out *v1.ReplicationController, s conversion.Scope) error { + return autoConvert_core_ReplicationController_To_v1_ReplicationController(in, out, s) +} + +func autoConvert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in *v1.ReplicationControllerCondition, out *core.ReplicationControllerCondition, s conversion.Scope) error { + out.Type = core.ReplicationControllerConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition is an autogenerated conversion function. +func Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in *v1.ReplicationControllerCondition, out *core.ReplicationControllerCondition, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in, out, s) +} + +func autoConvert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *core.ReplicationControllerCondition, out *v1.ReplicationControllerCondition, s conversion.Scope) error { + out.Type = v1.ReplicationControllerConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition is an autogenerated conversion function. +func Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *core.ReplicationControllerCondition, out *v1.ReplicationControllerCondition, s conversion.Scope) error { + return autoConvert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in, out, s) +} + +func autoConvert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in *v1.ReplicationControllerList, out *core.ReplicationControllerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.ReplicationController, len(*in)) + for i := range *in { + if err := Convert_v1_ReplicationController_To_core_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList is an autogenerated conversion function. +func Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in *v1.ReplicationControllerList, out *core.ReplicationControllerList, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in, out, s) +} + +func autoConvert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in *core.ReplicationControllerList, out *v1.ReplicationControllerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.ReplicationController, len(*in)) + for i := range *in { + if err := Convert_core_ReplicationController_To_v1_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList is an autogenerated conversion function. +func Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in *core.ReplicationControllerList, out *v1.ReplicationControllerList, s conversion.Scope) error { + return autoConvert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in, out, s) +} + +func autoConvert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *core.ReplicationControllerSpec, s conversion.Scope) error { + if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(core.PodTemplateSpec) + if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(*in, *out, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func autoConvert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *core.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { + if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(v1.PodTemplateSpec) + if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(*in, *out, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func autoConvert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in *v1.ReplicationControllerStatus, out *core.ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]core.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus is an autogenerated conversion function. +func Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in *v1.ReplicationControllerStatus, out *core.ReplicationControllerStatus, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in, out, s) +} + +func autoConvert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *core.ReplicationControllerStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]v1.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus is an autogenerated conversion function. +func Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *core.ReplicationControllerStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { + return autoConvert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s) +} + +func autoConvert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in *v1.ResourceFieldSelector, out *core.ResourceFieldSelector, s conversion.Scope) error { + out.ContainerName = in.ContainerName + out.Resource = in.Resource + out.Divisor = in.Divisor + return nil +} + +// Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector is an autogenerated conversion function. +func Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in *v1.ResourceFieldSelector, out *core.ResourceFieldSelector, s conversion.Scope) error { + return autoConvert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in, out, s) +} + +func autoConvert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *core.ResourceFieldSelector, out *v1.ResourceFieldSelector, s conversion.Scope) error { + out.ContainerName = in.ContainerName + out.Resource = in.Resource + out.Divisor = in.Divisor + return nil +} + +// Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector is an autogenerated conversion function. +func Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *core.ResourceFieldSelector, out *v1.ResourceFieldSelector, s conversion.Scope) error { + return autoConvert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in, out, s) +} + +func autoConvert_v1_ResourceQuota_To_core_ResourceQuota(in *v1.ResourceQuota, out *core.ResourceQuota, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ResourceQuota_To_core_ResourceQuota is an autogenerated conversion function. +func Convert_v1_ResourceQuota_To_core_ResourceQuota(in *v1.ResourceQuota, out *core.ResourceQuota, s conversion.Scope) error { + return autoConvert_v1_ResourceQuota_To_core_ResourceQuota(in, out, s) +} + +func autoConvert_core_ResourceQuota_To_v1_ResourceQuota(in *core.ResourceQuota, out *v1.ResourceQuota, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_ResourceQuota_To_v1_ResourceQuota is an autogenerated conversion function. +func Convert_core_ResourceQuota_To_v1_ResourceQuota(in *core.ResourceQuota, out *v1.ResourceQuota, s conversion.Scope) error { + return autoConvert_core_ResourceQuota_To_v1_ResourceQuota(in, out, s) +} + +func autoConvert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in *v1.ResourceQuotaList, out *core.ResourceQuotaList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ResourceQuota)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList is an autogenerated conversion function. +func Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in *v1.ResourceQuotaList, out *core.ResourceQuotaList, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in, out, s) +} + +func autoConvert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in *core.ResourceQuotaList, out *v1.ResourceQuotaList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.ResourceQuota)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList is an autogenerated conversion function. +func Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in *core.ResourceQuotaList, out *v1.ResourceQuotaList, s conversion.Scope) error { + return autoConvert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in, out, s) +} + +func autoConvert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in *v1.ResourceQuotaSpec, out *core.ResourceQuotaSpec, s conversion.Scope) error { + out.Hard = *(*core.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Scopes = *(*[]core.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) + return nil +} + +// Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec is an autogenerated conversion function. +func Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in *v1.ResourceQuotaSpec, out *core.ResourceQuotaSpec, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in, out, s) +} + +func autoConvert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *core.ResourceQuotaSpec, out *v1.ResourceQuotaSpec, s conversion.Scope) error { + out.Hard = *(*v1.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Scopes = *(*[]v1.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) + return nil +} + +// Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec is an autogenerated conversion function. +func Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *core.ResourceQuotaSpec, out *v1.ResourceQuotaSpec, s conversion.Scope) error { + return autoConvert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in, out, s) +} + +func autoConvert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in *v1.ResourceQuotaStatus, out *core.ResourceQuotaStatus, s conversion.Scope) error { + out.Hard = *(*core.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Used = *(*core.ResourceList)(unsafe.Pointer(&in.Used)) + return nil +} + +// Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus is an autogenerated conversion function. +func Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in *v1.ResourceQuotaStatus, out *core.ResourceQuotaStatus, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in, out, s) +} + +func autoConvert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *core.ResourceQuotaStatus, out *v1.ResourceQuotaStatus, s conversion.Scope) error { + out.Hard = *(*v1.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Used = *(*v1.ResourceList)(unsafe.Pointer(&in.Used)) + return nil +} + +// Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus is an autogenerated conversion function. +func Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *core.ResourceQuotaStatus, out *v1.ResourceQuotaStatus, s conversion.Scope) error { + return autoConvert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in, out, s) +} + +func autoConvert_v1_ResourceRequirements_To_core_ResourceRequirements(in *v1.ResourceRequirements, out *core.ResourceRequirements, s conversion.Scope) error { + out.Limits = *(*core.ResourceList)(unsafe.Pointer(&in.Limits)) + out.Requests = *(*core.ResourceList)(unsafe.Pointer(&in.Requests)) + return nil +} + +// Convert_v1_ResourceRequirements_To_core_ResourceRequirements is an autogenerated conversion function. +func Convert_v1_ResourceRequirements_To_core_ResourceRequirements(in *v1.ResourceRequirements, out *core.ResourceRequirements, s conversion.Scope) error { + return autoConvert_v1_ResourceRequirements_To_core_ResourceRequirements(in, out, s) +} + +func autoConvert_core_ResourceRequirements_To_v1_ResourceRequirements(in *core.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { + out.Limits = *(*v1.ResourceList)(unsafe.Pointer(&in.Limits)) + out.Requests = *(*v1.ResourceList)(unsafe.Pointer(&in.Requests)) + return nil +} + +// Convert_core_ResourceRequirements_To_v1_ResourceRequirements is an autogenerated conversion function. +func Convert_core_ResourceRequirements_To_v1_ResourceRequirements(in *core.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { + return autoConvert_core_ResourceRequirements_To_v1_ResourceRequirements(in, out, s) +} + +func autoConvert_v1_SELinuxOptions_To_core_SELinuxOptions(in *v1.SELinuxOptions, out *core.SELinuxOptions, s conversion.Scope) error { + out.User = in.User + out.Role = in.Role + out.Type = in.Type + out.Level = in.Level + return nil +} + +// Convert_v1_SELinuxOptions_To_core_SELinuxOptions is an autogenerated conversion function. +func Convert_v1_SELinuxOptions_To_core_SELinuxOptions(in *v1.SELinuxOptions, out *core.SELinuxOptions, s conversion.Scope) error { + return autoConvert_v1_SELinuxOptions_To_core_SELinuxOptions(in, out, s) +} + +func autoConvert_core_SELinuxOptions_To_v1_SELinuxOptions(in *core.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { + out.User = in.User + out.Role = in.Role + out.Type = in.Type + out.Level = in.Level + return nil +} + +// Convert_core_SELinuxOptions_To_v1_SELinuxOptions is an autogenerated conversion function. +func Convert_core_SELinuxOptions_To_v1_SELinuxOptions(in *core.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { + return autoConvert_core_SELinuxOptions_To_v1_SELinuxOptions(in, out, s) +} + +func autoConvert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in *v1.ScaleIOPersistentVolumeSource, out *core.ScaleIOPersistentVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in *v1.ScaleIOPersistentVolumeSource, out *core.ScaleIOPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *core.ScaleIOPersistentVolumeSource, out *v1.ScaleIOPersistentVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *core.ScaleIOPersistentVolumeSource, out *v1.ScaleIOPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in *v1.ScaleIOVolumeSource, out *core.ScaleIOVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource is an autogenerated conversion function. +func Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in *v1.ScaleIOVolumeSource, out *core.ScaleIOVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in, out, s) +} + +func autoConvert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *core.ScaleIOVolumeSource, out *v1.ScaleIOVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource is an autogenerated conversion function. +func Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *core.ScaleIOVolumeSource, out *v1.ScaleIOVolumeSource, s conversion.Scope) error { + return autoConvert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in, out, s) +} + +func autoConvert_v1_Secret_To_core_Secret(in *v1.Secret, out *core.Secret, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) + // INFO: in.StringData opted out of conversion generation + out.Type = core.SecretType(in.Type) + return nil +} + +func autoConvert_core_Secret_To_v1_Secret(in *core.Secret, out *v1.Secret, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) + out.Type = v1.SecretType(in.Type) + return nil +} + +// Convert_core_Secret_To_v1_Secret is an autogenerated conversion function. +func Convert_core_Secret_To_v1_Secret(in *core.Secret, out *v1.Secret, s conversion.Scope) error { + return autoConvert_core_Secret_To_v1_Secret(in, out, s) +} + +func autoConvert_v1_SecretEnvSource_To_core_SecretEnvSource(in *v1.SecretEnvSource, out *core.SecretEnvSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretEnvSource_To_core_SecretEnvSource is an autogenerated conversion function. +func Convert_v1_SecretEnvSource_To_core_SecretEnvSource(in *v1.SecretEnvSource, out *core.SecretEnvSource, s conversion.Scope) error { + return autoConvert_v1_SecretEnvSource_To_core_SecretEnvSource(in, out, s) +} + +func autoConvert_core_SecretEnvSource_To_v1_SecretEnvSource(in *core.SecretEnvSource, out *v1.SecretEnvSource, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_SecretEnvSource_To_v1_SecretEnvSource is an autogenerated conversion function. +func Convert_core_SecretEnvSource_To_v1_SecretEnvSource(in *core.SecretEnvSource, out *v1.SecretEnvSource, s conversion.Scope) error { + return autoConvert_core_SecretEnvSource_To_v1_SecretEnvSource(in, out, s) +} + +func autoConvert_v1_SecretKeySelector_To_core_SecretKeySelector(in *v1.SecretKeySelector, out *core.SecretKeySelector, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretKeySelector_To_core_SecretKeySelector is an autogenerated conversion function. +func Convert_v1_SecretKeySelector_To_core_SecretKeySelector(in *v1.SecretKeySelector, out *core.SecretKeySelector, s conversion.Scope) error { + return autoConvert_v1_SecretKeySelector_To_core_SecretKeySelector(in, out, s) +} + +func autoConvert_core_SecretKeySelector_To_v1_SecretKeySelector(in *core.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_SecretKeySelector_To_v1_SecretKeySelector is an autogenerated conversion function. +func Convert_core_SecretKeySelector_To_v1_SecretKeySelector(in *core.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { + return autoConvert_core_SecretKeySelector_To_v1_SecretKeySelector(in, out, s) +} + +func autoConvert_v1_SecretList_To_core_SecretList(in *v1.SecretList, out *core.SecretList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.Secret, len(*in)) + for i := range *in { + if err := Convert_v1_Secret_To_core_Secret(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_SecretList_To_core_SecretList is an autogenerated conversion function. +func Convert_v1_SecretList_To_core_SecretList(in *v1.SecretList, out *core.SecretList, s conversion.Scope) error { + return autoConvert_v1_SecretList_To_core_SecretList(in, out, s) +} + +func autoConvert_core_SecretList_To_v1_SecretList(in *core.SecretList, out *v1.SecretList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.Secret, len(*in)) + for i := range *in { + if err := Convert_core_Secret_To_v1_Secret(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_SecretList_To_v1_SecretList is an autogenerated conversion function. +func Convert_core_SecretList_To_v1_SecretList(in *core.SecretList, out *v1.SecretList, s conversion.Scope) error { + return autoConvert_core_SecretList_To_v1_SecretList(in, out, s) +} + +func autoConvert_v1_SecretProjection_To_core_SecretProjection(in *v1.SecretProjection, out *core.SecretProjection, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretProjection_To_core_SecretProjection is an autogenerated conversion function. +func Convert_v1_SecretProjection_To_core_SecretProjection(in *v1.SecretProjection, out *core.SecretProjection, s conversion.Scope) error { + return autoConvert_v1_SecretProjection_To_core_SecretProjection(in, out, s) +} + +func autoConvert_core_SecretProjection_To_v1_SecretProjection(in *core.SecretProjection, out *v1.SecretProjection, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_SecretProjection_To_v1_SecretProjection is an autogenerated conversion function. +func Convert_core_SecretProjection_To_v1_SecretProjection(in *core.SecretProjection, out *v1.SecretProjection, s conversion.Scope) error { + return autoConvert_core_SecretProjection_To_v1_SecretProjection(in, out, s) +} + +func autoConvert_v1_SecretReference_To_core_SecretReference(in *v1.SecretReference, out *core.SecretReference, s conversion.Scope) error { + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +// Convert_v1_SecretReference_To_core_SecretReference is an autogenerated conversion function. +func Convert_v1_SecretReference_To_core_SecretReference(in *v1.SecretReference, out *core.SecretReference, s conversion.Scope) error { + return autoConvert_v1_SecretReference_To_core_SecretReference(in, out, s) +} + +func autoConvert_core_SecretReference_To_v1_SecretReference(in *core.SecretReference, out *v1.SecretReference, s conversion.Scope) error { + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +// Convert_core_SecretReference_To_v1_SecretReference is an autogenerated conversion function. +func Convert_core_SecretReference_To_v1_SecretReference(in *core.SecretReference, out *v1.SecretReference, s conversion.Scope) error { + return autoConvert_core_SecretReference_To_v1_SecretReference(in, out, s) +} + +func autoConvert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in *v1.SecretVolumeSource, out *core.SecretVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource is an autogenerated conversion function. +func Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in *v1.SecretVolumeSource, out *core.SecretVolumeSource, s conversion.Scope) error { + return autoConvert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in, out, s) +} + +func autoConvert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in *core.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource is an autogenerated conversion function. +func Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in *core.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { + return autoConvert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in, out, s) +} + +func autoConvert_v1_SecurityContext_To_core_SecurityContext(in *v1.SecurityContext, out *core.SecurityContext, s conversion.Scope) error { + out.Capabilities = (*core.Capabilities)(unsafe.Pointer(in.Capabilities)) + out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) + out.SELinuxOptions = (*core.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) + out.AllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.AllowPrivilegeEscalation)) + return nil +} + +// Convert_v1_SecurityContext_To_core_SecurityContext is an autogenerated conversion function. +func Convert_v1_SecurityContext_To_core_SecurityContext(in *v1.SecurityContext, out *core.SecurityContext, s conversion.Scope) error { + return autoConvert_v1_SecurityContext_To_core_SecurityContext(in, out, s) +} + +func autoConvert_core_SecurityContext_To_v1_SecurityContext(in *core.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { + out.Capabilities = (*v1.Capabilities)(unsafe.Pointer(in.Capabilities)) + out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) + out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) + out.AllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.AllowPrivilegeEscalation)) + return nil +} + +func autoConvert_v1_SerializedReference_To_core_SerializedReference(in *v1.SerializedReference, out *core.SerializedReference, s conversion.Scope) error { + if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.Reference, &out.Reference, s); err != nil { + return err + } + return nil +} + +// Convert_v1_SerializedReference_To_core_SerializedReference is an autogenerated conversion function. +func Convert_v1_SerializedReference_To_core_SerializedReference(in *v1.SerializedReference, out *core.SerializedReference, s conversion.Scope) error { + return autoConvert_v1_SerializedReference_To_core_SerializedReference(in, out, s) +} + +func autoConvert_core_SerializedReference_To_v1_SerializedReference(in *core.SerializedReference, out *v1.SerializedReference, s conversion.Scope) error { + if err := Convert_core_ObjectReference_To_v1_ObjectReference(&in.Reference, &out.Reference, s); err != nil { + return err + } + return nil +} + +// Convert_core_SerializedReference_To_v1_SerializedReference is an autogenerated conversion function. +func Convert_core_SerializedReference_To_v1_SerializedReference(in *core.SerializedReference, out *v1.SerializedReference, s conversion.Scope) error { + return autoConvert_core_SerializedReference_To_v1_SerializedReference(in, out, s) +} + +func autoConvert_v1_Service_To_core_Service(in *v1.Service, out *core.Service, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ServiceSpec_To_core_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ServiceStatus_To_core_ServiceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Service_To_core_Service is an autogenerated conversion function. +func Convert_v1_Service_To_core_Service(in *v1.Service, out *core.Service, s conversion.Scope) error { + return autoConvert_v1_Service_To_core_Service(in, out, s) +} + +func autoConvert_core_Service_To_v1_Service(in *core.Service, out *v1.Service, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ServiceSpec_To_v1_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_ServiceStatus_To_v1_ServiceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_Service_To_v1_Service is an autogenerated conversion function. +func Convert_core_Service_To_v1_Service(in *core.Service, out *v1.Service, s conversion.Scope) error { + return autoConvert_core_Service_To_v1_Service(in, out, s) +} + +func autoConvert_v1_ServiceAccount_To_core_ServiceAccount(in *v1.ServiceAccount, out *core.ServiceAccount, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Secrets = *(*[]core.ObjectReference)(unsafe.Pointer(&in.Secrets)) + out.ImagePullSecrets = *(*[]core.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + return nil +} + +// Convert_v1_ServiceAccount_To_core_ServiceAccount is an autogenerated conversion function. +func Convert_v1_ServiceAccount_To_core_ServiceAccount(in *v1.ServiceAccount, out *core.ServiceAccount, s conversion.Scope) error { + return autoConvert_v1_ServiceAccount_To_core_ServiceAccount(in, out, s) +} + +func autoConvert_core_ServiceAccount_To_v1_ServiceAccount(in *core.ServiceAccount, out *v1.ServiceAccount, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Secrets = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.Secrets)) + out.ImagePullSecrets = *(*[]v1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + return nil +} + +// Convert_core_ServiceAccount_To_v1_ServiceAccount is an autogenerated conversion function. +func Convert_core_ServiceAccount_To_v1_ServiceAccount(in *core.ServiceAccount, out *v1.ServiceAccount, s conversion.Scope) error { + return autoConvert_core_ServiceAccount_To_v1_ServiceAccount(in, out, s) +} + +func autoConvert_v1_ServiceAccountList_To_core_ServiceAccountList(in *v1.ServiceAccountList, out *core.ServiceAccountList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ServiceAccount)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ServiceAccountList_To_core_ServiceAccountList is an autogenerated conversion function. +func Convert_v1_ServiceAccountList_To_core_ServiceAccountList(in *v1.ServiceAccountList, out *core.ServiceAccountList, s conversion.Scope) error { + return autoConvert_v1_ServiceAccountList_To_core_ServiceAccountList(in, out, s) +} + +func autoConvert_core_ServiceAccountList_To_v1_ServiceAccountList(in *core.ServiceAccountList, out *v1.ServiceAccountList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.ServiceAccount)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ServiceAccountList_To_v1_ServiceAccountList is an autogenerated conversion function. +func Convert_core_ServiceAccountList_To_v1_ServiceAccountList(in *core.ServiceAccountList, out *v1.ServiceAccountList, s conversion.Scope) error { + return autoConvert_core_ServiceAccountList_To_v1_ServiceAccountList(in, out, s) +} + +func autoConvert_v1_ServiceList_To_core_ServiceList(in *v1.ServiceList, out *core.ServiceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.Service, len(*in)) + for i := range *in { + if err := Convert_v1_Service_To_core_Service(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_ServiceList_To_core_ServiceList is an autogenerated conversion function. +func Convert_v1_ServiceList_To_core_ServiceList(in *v1.ServiceList, out *core.ServiceList, s conversion.Scope) error { + return autoConvert_v1_ServiceList_To_core_ServiceList(in, out, s) +} + +func autoConvert_core_ServiceList_To_v1_ServiceList(in *core.ServiceList, out *v1.ServiceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.Service, len(*in)) + for i := range *in { + if err := Convert_core_Service_To_v1_Service(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_ServiceList_To_v1_ServiceList is an autogenerated conversion function. +func Convert_core_ServiceList_To_v1_ServiceList(in *core.ServiceList, out *v1.ServiceList, s conversion.Scope) error { + return autoConvert_core_ServiceList_To_v1_ServiceList(in, out, s) +} + +func autoConvert_v1_ServicePort_To_core_ServicePort(in *v1.ServicePort, out *core.ServicePort, s conversion.Scope) error { + out.Name = in.Name + out.Protocol = core.Protocol(in.Protocol) + out.Port = in.Port + out.TargetPort = in.TargetPort + out.NodePort = in.NodePort + return nil +} + +// Convert_v1_ServicePort_To_core_ServicePort is an autogenerated conversion function. +func Convert_v1_ServicePort_To_core_ServicePort(in *v1.ServicePort, out *core.ServicePort, s conversion.Scope) error { + return autoConvert_v1_ServicePort_To_core_ServicePort(in, out, s) +} + +func autoConvert_core_ServicePort_To_v1_ServicePort(in *core.ServicePort, out *v1.ServicePort, s conversion.Scope) error { + out.Name = in.Name + out.Protocol = v1.Protocol(in.Protocol) + out.Port = in.Port + out.TargetPort = in.TargetPort + out.NodePort = in.NodePort + return nil +} + +// Convert_core_ServicePort_To_v1_ServicePort is an autogenerated conversion function. +func Convert_core_ServicePort_To_v1_ServicePort(in *core.ServicePort, out *v1.ServicePort, s conversion.Scope) error { + return autoConvert_core_ServicePort_To_v1_ServicePort(in, out, s) +} + +func autoConvert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in *v1.ServiceProxyOptions, out *core.ServiceProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions is an autogenerated conversion function. +func Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in *v1.ServiceProxyOptions, out *core.ServiceProxyOptions, s conversion.Scope) error { + return autoConvert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in, out, s) +} + +func autoConvert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *core.ServiceProxyOptions, out *v1.ServiceProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions is an autogenerated conversion function. +func Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *core.ServiceProxyOptions, out *v1.ServiceProxyOptions, s conversion.Scope) error { + return autoConvert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in, out, s) +} + +func autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *core.ServiceSpec, s conversion.Scope) error { + out.Ports = *(*[]core.ServicePort)(unsafe.Pointer(&in.Ports)) + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + out.ClusterIP = in.ClusterIP + out.Type = core.ServiceType(in.Type) + out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) + out.SessionAffinity = core.ServiceAffinity(in.SessionAffinity) + out.LoadBalancerIP = in.LoadBalancerIP + out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) + out.ExternalName = in.ExternalName + out.ExternalTrafficPolicy = core.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy) + out.HealthCheckNodePort = in.HealthCheckNodePort + out.PublishNotReadyAddresses = in.PublishNotReadyAddresses + out.SessionAffinityConfig = (*core.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig)) + return nil +} + +// Convert_v1_ServiceSpec_To_core_ServiceSpec is an autogenerated conversion function. +func Convert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *core.ServiceSpec, s conversion.Scope) error { + return autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in, out, s) +} + +func autoConvert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *v1.ServiceSpec, s conversion.Scope) error { + out.Type = v1.ServiceType(in.Type) + out.Ports = *(*[]v1.ServicePort)(unsafe.Pointer(&in.Ports)) + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + out.ClusterIP = in.ClusterIP + out.ExternalName = in.ExternalName + out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) + out.LoadBalancerIP = in.LoadBalancerIP + out.SessionAffinity = v1.ServiceAffinity(in.SessionAffinity) + out.SessionAffinityConfig = (*v1.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig)) + out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) + out.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy) + out.HealthCheckNodePort = in.HealthCheckNodePort + out.PublishNotReadyAddresses = in.PublishNotReadyAddresses + return nil +} + +// Convert_core_ServiceSpec_To_v1_ServiceSpec is an autogenerated conversion function. +func Convert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *v1.ServiceSpec, s conversion.Scope) error { + return autoConvert_core_ServiceSpec_To_v1_ServiceSpec(in, out, s) +} + +func autoConvert_v1_ServiceStatus_To_core_ServiceStatus(in *v1.ServiceStatus, out *core.ServiceStatus, s conversion.Scope) error { + if err := Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ServiceStatus_To_core_ServiceStatus is an autogenerated conversion function. +func Convert_v1_ServiceStatus_To_core_ServiceStatus(in *v1.ServiceStatus, out *core.ServiceStatus, s conversion.Scope) error { + return autoConvert_v1_ServiceStatus_To_core_ServiceStatus(in, out, s) +} + +func autoConvert_core_ServiceStatus_To_v1_ServiceStatus(in *core.ServiceStatus, out *v1.ServiceStatus, s conversion.Scope) error { + if err := Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { + return err + } + return nil +} + +// Convert_core_ServiceStatus_To_v1_ServiceStatus is an autogenerated conversion function. +func Convert_core_ServiceStatus_To_v1_ServiceStatus(in *core.ServiceStatus, out *v1.ServiceStatus, s conversion.Scope) error { + return autoConvert_core_ServiceStatus_To_v1_ServiceStatus(in, out, s) +} + +func autoConvert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in *v1.SessionAffinityConfig, out *core.SessionAffinityConfig, s conversion.Scope) error { + out.ClientIP = (*core.ClientIPConfig)(unsafe.Pointer(in.ClientIP)) + return nil +} + +// Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig is an autogenerated conversion function. +func Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in *v1.SessionAffinityConfig, out *core.SessionAffinityConfig, s conversion.Scope) error { + return autoConvert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in, out, s) +} + +func autoConvert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *core.SessionAffinityConfig, out *v1.SessionAffinityConfig, s conversion.Scope) error { + out.ClientIP = (*v1.ClientIPConfig)(unsafe.Pointer(in.ClientIP)) + return nil +} + +// Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig is an autogenerated conversion function. +func Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *core.SessionAffinityConfig, out *v1.SessionAffinityConfig, s conversion.Scope) error { + return autoConvert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in, out, s) +} + +func autoConvert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in *v1.StorageOSPersistentVolumeSource, out *core.StorageOSPersistentVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*core.ObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in *v1.StorageOSPersistentVolumeSource, out *core.StorageOSPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *core.StorageOSPersistentVolumeSource, out *v1.StorageOSPersistentVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*v1.ObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *core.StorageOSPersistentVolumeSource, out *v1.StorageOSPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in *v1.StorageOSVolumeSource, out *core.StorageOSVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource is an autogenerated conversion function. +func Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in *v1.StorageOSVolumeSource, out *core.StorageOSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in, out, s) +} + +func autoConvert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *core.StorageOSVolumeSource, out *v1.StorageOSVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource is an autogenerated conversion function. +func Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *core.StorageOSVolumeSource, out *v1.StorageOSVolumeSource, s conversion.Scope) error { + return autoConvert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in, out, s) +} + +func autoConvert_v1_Sysctl_To_core_Sysctl(in *v1.Sysctl, out *core.Sysctl, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_v1_Sysctl_To_core_Sysctl is an autogenerated conversion function. +func Convert_v1_Sysctl_To_core_Sysctl(in *v1.Sysctl, out *core.Sysctl, s conversion.Scope) error { + return autoConvert_v1_Sysctl_To_core_Sysctl(in, out, s) +} + +func autoConvert_core_Sysctl_To_v1_Sysctl(in *core.Sysctl, out *v1.Sysctl, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_core_Sysctl_To_v1_Sysctl is an autogenerated conversion function. +func Convert_core_Sysctl_To_v1_Sysctl(in *core.Sysctl, out *v1.Sysctl, s conversion.Scope) error { + return autoConvert_core_Sysctl_To_v1_Sysctl(in, out, s) +} + +func autoConvert_v1_TCPSocketAction_To_core_TCPSocketAction(in *v1.TCPSocketAction, out *core.TCPSocketAction, s conversion.Scope) error { + out.Port = in.Port + out.Host = in.Host + return nil +} + +// Convert_v1_TCPSocketAction_To_core_TCPSocketAction is an autogenerated conversion function. +func Convert_v1_TCPSocketAction_To_core_TCPSocketAction(in *v1.TCPSocketAction, out *core.TCPSocketAction, s conversion.Scope) error { + return autoConvert_v1_TCPSocketAction_To_core_TCPSocketAction(in, out, s) +} + +func autoConvert_core_TCPSocketAction_To_v1_TCPSocketAction(in *core.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { + out.Port = in.Port + out.Host = in.Host + return nil +} + +// Convert_core_TCPSocketAction_To_v1_TCPSocketAction is an autogenerated conversion function. +func Convert_core_TCPSocketAction_To_v1_TCPSocketAction(in *core.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { + return autoConvert_core_TCPSocketAction_To_v1_TCPSocketAction(in, out, s) +} + +func autoConvert_v1_Taint_To_core_Taint(in *v1.Taint, out *core.Taint, s conversion.Scope) error { + out.Key = in.Key + out.Value = in.Value + out.Effect = core.TaintEffect(in.Effect) + out.TimeAdded = (*meta_v1.Time)(unsafe.Pointer(in.TimeAdded)) + return nil +} + +// Convert_v1_Taint_To_core_Taint is an autogenerated conversion function. +func Convert_v1_Taint_To_core_Taint(in *v1.Taint, out *core.Taint, s conversion.Scope) error { + return autoConvert_v1_Taint_To_core_Taint(in, out, s) +} + +func autoConvert_core_Taint_To_v1_Taint(in *core.Taint, out *v1.Taint, s conversion.Scope) error { + out.Key = in.Key + out.Value = in.Value + out.Effect = v1.TaintEffect(in.Effect) + out.TimeAdded = (*meta_v1.Time)(unsafe.Pointer(in.TimeAdded)) + return nil +} + +// Convert_core_Taint_To_v1_Taint is an autogenerated conversion function. +func Convert_core_Taint_To_v1_Taint(in *core.Taint, out *v1.Taint, s conversion.Scope) error { + return autoConvert_core_Taint_To_v1_Taint(in, out, s) +} + +func autoConvert_v1_Toleration_To_core_Toleration(in *v1.Toleration, out *core.Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Operator = core.TolerationOperator(in.Operator) + out.Value = in.Value + out.Effect = core.TaintEffect(in.Effect) + out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) + return nil +} + +// Convert_v1_Toleration_To_core_Toleration is an autogenerated conversion function. +func Convert_v1_Toleration_To_core_Toleration(in *v1.Toleration, out *core.Toleration, s conversion.Scope) error { + return autoConvert_v1_Toleration_To_core_Toleration(in, out, s) +} + +func autoConvert_core_Toleration_To_v1_Toleration(in *core.Toleration, out *v1.Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Operator = v1.TolerationOperator(in.Operator) + out.Value = in.Value + out.Effect = v1.TaintEffect(in.Effect) + out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) + return nil +} + +// Convert_core_Toleration_To_v1_Toleration is an autogenerated conversion function. +func Convert_core_Toleration_To_v1_Toleration(in *core.Toleration, out *v1.Toleration, s conversion.Scope) error { + return autoConvert_core_Toleration_To_v1_Toleration(in, out, s) +} + +func autoConvert_v1_Volume_To_core_Volume(in *v1.Volume, out *core.Volume, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1_VolumeSource_To_core_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Volume_To_core_Volume is an autogenerated conversion function. +func Convert_v1_Volume_To_core_Volume(in *v1.Volume, out *core.Volume, s conversion.Scope) error { + return autoConvert_v1_Volume_To_core_Volume(in, out, s) +} + +func autoConvert_core_Volume_To_v1_Volume(in *core.Volume, out *v1.Volume, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_core_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { + return err + } + return nil +} + +// Convert_core_Volume_To_v1_Volume is an autogenerated conversion function. +func Convert_core_Volume_To_v1_Volume(in *core.Volume, out *v1.Volume, s conversion.Scope) error { + return autoConvert_core_Volume_To_v1_Volume(in, out, s) +} + +func autoConvert_v1_VolumeDevice_To_core_VolumeDevice(in *v1.VolumeDevice, out *core.VolumeDevice, s conversion.Scope) error { + out.Name = in.Name + out.DevicePath = in.DevicePath + return nil +} + +// Convert_v1_VolumeDevice_To_core_VolumeDevice is an autogenerated conversion function. +func Convert_v1_VolumeDevice_To_core_VolumeDevice(in *v1.VolumeDevice, out *core.VolumeDevice, s conversion.Scope) error { + return autoConvert_v1_VolumeDevice_To_core_VolumeDevice(in, out, s) +} + +func autoConvert_core_VolumeDevice_To_v1_VolumeDevice(in *core.VolumeDevice, out *v1.VolumeDevice, s conversion.Scope) error { + out.Name = in.Name + out.DevicePath = in.DevicePath + return nil +} + +// Convert_core_VolumeDevice_To_v1_VolumeDevice is an autogenerated conversion function. +func Convert_core_VolumeDevice_To_v1_VolumeDevice(in *core.VolumeDevice, out *v1.VolumeDevice, s conversion.Scope) error { + return autoConvert_core_VolumeDevice_To_v1_VolumeDevice(in, out, s) +} + +func autoConvert_v1_VolumeMount_To_core_VolumeMount(in *v1.VolumeMount, out *core.VolumeMount, s conversion.Scope) error { + out.Name = in.Name + out.ReadOnly = in.ReadOnly + out.MountPath = in.MountPath + out.SubPath = in.SubPath + out.MountPropagation = (*core.MountPropagationMode)(unsafe.Pointer(in.MountPropagation)) + return nil +} + +// Convert_v1_VolumeMount_To_core_VolumeMount is an autogenerated conversion function. +func Convert_v1_VolumeMount_To_core_VolumeMount(in *v1.VolumeMount, out *core.VolumeMount, s conversion.Scope) error { + return autoConvert_v1_VolumeMount_To_core_VolumeMount(in, out, s) +} + +func autoConvert_core_VolumeMount_To_v1_VolumeMount(in *core.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { + out.Name = in.Name + out.ReadOnly = in.ReadOnly + out.MountPath = in.MountPath + out.SubPath = in.SubPath + out.MountPropagation = (*v1.MountPropagationMode)(unsafe.Pointer(in.MountPropagation)) + return nil +} + +// Convert_core_VolumeMount_To_v1_VolumeMount is an autogenerated conversion function. +func Convert_core_VolumeMount_To_v1_VolumeMount(in *core.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { + return autoConvert_core_VolumeMount_To_v1_VolumeMount(in, out, s) +} + +func autoConvert_v1_VolumeProjection_To_core_VolumeProjection(in *v1.VolumeProjection, out *core.VolumeProjection, s conversion.Scope) error { + out.Secret = (*core.SecretProjection)(unsafe.Pointer(in.Secret)) + out.DownwardAPI = (*core.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) + out.ConfigMap = (*core.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) + return nil +} + +// Convert_v1_VolumeProjection_To_core_VolumeProjection is an autogenerated conversion function. +func Convert_v1_VolumeProjection_To_core_VolumeProjection(in *v1.VolumeProjection, out *core.VolumeProjection, s conversion.Scope) error { + return autoConvert_v1_VolumeProjection_To_core_VolumeProjection(in, out, s) +} + +func autoConvert_core_VolumeProjection_To_v1_VolumeProjection(in *core.VolumeProjection, out *v1.VolumeProjection, s conversion.Scope) error { + out.Secret = (*v1.SecretProjection)(unsafe.Pointer(in.Secret)) + out.DownwardAPI = (*v1.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) + out.ConfigMap = (*v1.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) + return nil +} + +// Convert_core_VolumeProjection_To_v1_VolumeProjection is an autogenerated conversion function. +func Convert_core_VolumeProjection_To_v1_VolumeProjection(in *core.VolumeProjection, out *v1.VolumeProjection, s conversion.Scope) error { + return autoConvert_core_VolumeProjection_To_v1_VolumeProjection(in, out, s) +} + +func autoConvert_v1_VolumeSource_To_core_VolumeSource(in *v1.VolumeSource, out *core.VolumeSource, s conversion.Scope) error { + out.HostPath = (*core.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.EmptyDir = (*core.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) + out.GCEPersistentDisk = (*core.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*core.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.GitRepo = (*core.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) + out.Secret = (*core.SecretVolumeSource)(unsafe.Pointer(in.Secret)) + out.NFS = (*core.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.ISCSI = (*core.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Glusterfs = (*core.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.PersistentVolumeClaim = (*core.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) + out.RBD = (*core.RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.FlexVolume = (*core.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*core.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*core.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.Flocker = (*core.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.DownwardAPI = (*core.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) + out.FC = (*core.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.AzureFile = (*core.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.ConfigMap = (*core.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) + out.VsphereVolume = (*core.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.Quobyte = (*core.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.AzureDisk = (*core.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*core.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.Projected = (*core.ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) + out.PortworxVolume = (*core.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*core.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.StorageOS = (*core.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) + return nil +} + +// Convert_v1_VolumeSource_To_core_VolumeSource is an autogenerated conversion function. +func Convert_v1_VolumeSource_To_core_VolumeSource(in *v1.VolumeSource, out *core.VolumeSource, s conversion.Scope) error { + return autoConvert_v1_VolumeSource_To_core_VolumeSource(in, out, s) +} + +func autoConvert_core_VolumeSource_To_v1_VolumeSource(in *core.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { + out.HostPath = (*v1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.EmptyDir = (*v1.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) + out.GCEPersistentDisk = (*v1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*v1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.GitRepo = (*v1.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) + out.Secret = (*v1.SecretVolumeSource)(unsafe.Pointer(in.Secret)) + out.NFS = (*v1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.ISCSI = (*v1.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Glusterfs = (*v1.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.PersistentVolumeClaim = (*v1.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) + out.RBD = (*v1.RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.FlexVolume = (*v1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*v1.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*v1.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.Flocker = (*v1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.DownwardAPI = (*v1.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) + out.FC = (*v1.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.AzureFile = (*v1.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.ConfigMap = (*v1.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) + out.VsphereVolume = (*v1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.AzureDisk = (*v1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*v1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.Projected = (*v1.ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) + out.PortworxVolume = (*v1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*v1.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.StorageOS = (*v1.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) + return nil +} + +// Convert_core_VolumeSource_To_v1_VolumeSource is an autogenerated conversion function. +func Convert_core_VolumeSource_To_v1_VolumeSource(in *core.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { + return autoConvert_core_VolumeSource_To_v1_VolumeSource(in, out, s) +} + +func autoConvert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in *v1.VsphereVirtualDiskVolumeSource, out *core.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + out.VolumePath = in.VolumePath + out.FSType = in.FSType + out.StoragePolicyName = in.StoragePolicyName + out.StoragePolicyID = in.StoragePolicyID + return nil +} + +// Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in *v1.VsphereVirtualDiskVolumeSource, out *core.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in, out, s) +} + +func autoConvert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *core.VsphereVirtualDiskVolumeSource, out *v1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + out.VolumePath = in.VolumePath + out.FSType = in.FSType + out.StoragePolicyName = in.StoragePolicyName + out.StoragePolicyID = in.StoragePolicyID + return nil +} + +// Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource is an autogenerated conversion function. +func Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *core.VsphereVirtualDiskVolumeSource, out *v1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + return autoConvert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in *v1.WeightedPodAffinityTerm, out *core.WeightedPodAffinityTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { + return err + } + return nil +} + +// Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm is an autogenerated conversion function. +func Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in *v1.WeightedPodAffinityTerm, out *core.WeightedPodAffinityTerm, s conversion.Scope) error { + return autoConvert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in, out, s) +} + +func autoConvert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *core.WeightedPodAffinityTerm, out *v1.WeightedPodAffinityTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { + return err + } + return nil +} + +// Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm is an autogenerated conversion function. +func Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *core.WeightedPodAffinityTerm, out *v1.WeightedPodAffinityTerm, s conversion.Scope) error { + return autoConvert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go new file mode 100644 index 000000000000..c1ad46cd38a6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go @@ -0,0 +1,638 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&v1.ConfigMap{}, func(obj interface{}) { SetObjectDefaults_ConfigMap(obj.(*v1.ConfigMap)) }) + scheme.AddTypeDefaultingFunc(&v1.ConfigMapList{}, func(obj interface{}) { SetObjectDefaults_ConfigMapList(obj.(*v1.ConfigMapList)) }) + scheme.AddTypeDefaultingFunc(&v1.Endpoints{}, func(obj interface{}) { SetObjectDefaults_Endpoints(obj.(*v1.Endpoints)) }) + scheme.AddTypeDefaultingFunc(&v1.EndpointsList{}, func(obj interface{}) { SetObjectDefaults_EndpointsList(obj.(*v1.EndpointsList)) }) + scheme.AddTypeDefaultingFunc(&v1.LimitRange{}, func(obj interface{}) { SetObjectDefaults_LimitRange(obj.(*v1.LimitRange)) }) + scheme.AddTypeDefaultingFunc(&v1.LimitRangeList{}, func(obj interface{}) { SetObjectDefaults_LimitRangeList(obj.(*v1.LimitRangeList)) }) + scheme.AddTypeDefaultingFunc(&v1.Namespace{}, func(obj interface{}) { SetObjectDefaults_Namespace(obj.(*v1.Namespace)) }) + scheme.AddTypeDefaultingFunc(&v1.NamespaceList{}, func(obj interface{}) { SetObjectDefaults_NamespaceList(obj.(*v1.NamespaceList)) }) + scheme.AddTypeDefaultingFunc(&v1.Node{}, func(obj interface{}) { SetObjectDefaults_Node(obj.(*v1.Node)) }) + scheme.AddTypeDefaultingFunc(&v1.NodeList{}, func(obj interface{}) { SetObjectDefaults_NodeList(obj.(*v1.NodeList)) }) + scheme.AddTypeDefaultingFunc(&v1.PersistentVolume{}, func(obj interface{}) { SetObjectDefaults_PersistentVolume(obj.(*v1.PersistentVolume)) }) + scheme.AddTypeDefaultingFunc(&v1.PersistentVolumeClaim{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeClaim(obj.(*v1.PersistentVolumeClaim)) }) + scheme.AddTypeDefaultingFunc(&v1.PersistentVolumeClaimList{}, func(obj interface{}) { + SetObjectDefaults_PersistentVolumeClaimList(obj.(*v1.PersistentVolumeClaimList)) + }) + scheme.AddTypeDefaultingFunc(&v1.PersistentVolumeList{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeList(obj.(*v1.PersistentVolumeList)) }) + scheme.AddTypeDefaultingFunc(&v1.Pod{}, func(obj interface{}) { SetObjectDefaults_Pod(obj.(*v1.Pod)) }) + scheme.AddTypeDefaultingFunc(&v1.PodList{}, func(obj interface{}) { SetObjectDefaults_PodList(obj.(*v1.PodList)) }) + scheme.AddTypeDefaultingFunc(&v1.PodTemplate{}, func(obj interface{}) { SetObjectDefaults_PodTemplate(obj.(*v1.PodTemplate)) }) + scheme.AddTypeDefaultingFunc(&v1.PodTemplateList{}, func(obj interface{}) { SetObjectDefaults_PodTemplateList(obj.(*v1.PodTemplateList)) }) + scheme.AddTypeDefaultingFunc(&v1.ReplicationController{}, func(obj interface{}) { SetObjectDefaults_ReplicationController(obj.(*v1.ReplicationController)) }) + scheme.AddTypeDefaultingFunc(&v1.ReplicationControllerList{}, func(obj interface{}) { + SetObjectDefaults_ReplicationControllerList(obj.(*v1.ReplicationControllerList)) + }) + scheme.AddTypeDefaultingFunc(&v1.ResourceQuota{}, func(obj interface{}) { SetObjectDefaults_ResourceQuota(obj.(*v1.ResourceQuota)) }) + scheme.AddTypeDefaultingFunc(&v1.ResourceQuotaList{}, func(obj interface{}) { SetObjectDefaults_ResourceQuotaList(obj.(*v1.ResourceQuotaList)) }) + scheme.AddTypeDefaultingFunc(&v1.Secret{}, func(obj interface{}) { SetObjectDefaults_Secret(obj.(*v1.Secret)) }) + scheme.AddTypeDefaultingFunc(&v1.SecretList{}, func(obj interface{}) { SetObjectDefaults_SecretList(obj.(*v1.SecretList)) }) + scheme.AddTypeDefaultingFunc(&v1.Service{}, func(obj interface{}) { SetObjectDefaults_Service(obj.(*v1.Service)) }) + scheme.AddTypeDefaultingFunc(&v1.ServiceList{}, func(obj interface{}) { SetObjectDefaults_ServiceList(obj.(*v1.ServiceList)) }) + return nil +} + +func SetObjectDefaults_ConfigMap(in *v1.ConfigMap) { + SetDefaults_ConfigMap(in) +} + +func SetObjectDefaults_ConfigMapList(in *v1.ConfigMapList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ConfigMap(a) + } +} + +func SetObjectDefaults_Endpoints(in *v1.Endpoints) { + SetDefaults_Endpoints(in) +} + +func SetObjectDefaults_EndpointsList(in *v1.EndpointsList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Endpoints(a) + } +} + +func SetObjectDefaults_LimitRange(in *v1.LimitRange) { + for i := range in.Spec.Limits { + a := &in.Spec.Limits[i] + SetDefaults_LimitRangeItem(a) + SetDefaults_ResourceList(&a.Max) + SetDefaults_ResourceList(&a.Min) + SetDefaults_ResourceList(&a.Default) + SetDefaults_ResourceList(&a.DefaultRequest) + SetDefaults_ResourceList(&a.MaxLimitRequestRatio) + } +} + +func SetObjectDefaults_LimitRangeList(in *v1.LimitRangeList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_LimitRange(a) + } +} + +func SetObjectDefaults_Namespace(in *v1.Namespace) { + SetDefaults_NamespaceStatus(&in.Status) +} + +func SetObjectDefaults_NamespaceList(in *v1.NamespaceList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Namespace(a) + } +} + +func SetObjectDefaults_Node(in *v1.Node) { + SetDefaults_Node(in) + SetDefaults_NodeStatus(&in.Status) + SetDefaults_ResourceList(&in.Status.Capacity) + SetDefaults_ResourceList(&in.Status.Allocatable) +} + +func SetObjectDefaults_NodeList(in *v1.NodeList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Node(a) + } +} + +func SetObjectDefaults_PersistentVolume(in *v1.PersistentVolume) { + SetDefaults_PersistentVolume(in) + SetDefaults_ResourceList(&in.Spec.Capacity) + if in.Spec.PersistentVolumeSource.HostPath != nil { + SetDefaults_HostPathVolumeSource(in.Spec.PersistentVolumeSource.HostPath) + } + if in.Spec.PersistentVolumeSource.RBD != nil { + SetDefaults_RBDPersistentVolumeSource(in.Spec.PersistentVolumeSource.RBD) + } + if in.Spec.PersistentVolumeSource.ISCSI != nil { + SetDefaults_ISCSIPersistentVolumeSource(in.Spec.PersistentVolumeSource.ISCSI) + } + if in.Spec.PersistentVolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(in.Spec.PersistentVolumeSource.AzureDisk) + } + if in.Spec.PersistentVolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOPersistentVolumeSource(in.Spec.PersistentVolumeSource.ScaleIO) + } +} + +func SetObjectDefaults_PersistentVolumeClaim(in *v1.PersistentVolumeClaim) { + SetDefaults_PersistentVolumeClaim(in) + SetDefaults_ResourceList(&in.Spec.Resources.Limits) + SetDefaults_ResourceList(&in.Spec.Resources.Requests) + SetDefaults_ResourceList(&in.Status.Capacity) +} + +func SetObjectDefaults_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_PersistentVolumeClaim(a) + } +} + +func SetObjectDefaults_PersistentVolumeList(in *v1.PersistentVolumeList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_PersistentVolume(a) + } +} + +func SetObjectDefaults_Pod(in *v1.Pod) { + SetDefaults_Pod(in) + SetDefaults_PodSpec(&in.Spec) + for i := range in.Spec.Volumes { + a := &in.Spec.Volumes[i] + SetDefaults_Volume(a) + if a.VolumeSource.HostPath != nil { + SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath) + } + if a.VolumeSource.Secret != nil { + SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.InitContainers { + a := &in.Spec.InitContainers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Containers { + a := &in.Spec.Containers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_PodList(in *v1.PodList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Pod(a) + } +} + +func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) { + SetDefaults_PodSpec(&in.Template.Spec) + for i := range in.Template.Spec.Volumes { + a := &in.Template.Spec.Volumes[i] + SetDefaults_Volume(a) + if a.VolumeSource.HostPath != nil { + SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath) + } + if a.VolumeSource.Secret != nil { + SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Template.Spec.InitContainers { + a := &in.Template.Spec.InitContainers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Template.Spec.Containers { + a := &in.Template.Spec.Containers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_PodTemplateList(in *v1.PodTemplateList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_PodTemplate(a) + } +} + +func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) { + SetDefaults_ReplicationController(in) + if in.Spec.Template != nil { + SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + SetDefaults_Volume(a) + if a.VolumeSource.HostPath != nil { + SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath) + } + if a.VolumeSource.Secret != nil { + SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + } +} + +func SetObjectDefaults_ReplicationControllerList(in *v1.ReplicationControllerList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ReplicationController(a) + } +} + +func SetObjectDefaults_ResourceQuota(in *v1.ResourceQuota) { + SetDefaults_ResourceList(&in.Spec.Hard) + SetDefaults_ResourceList(&in.Status.Hard) + SetDefaults_ResourceList(&in.Status.Used) +} + +func SetObjectDefaults_ResourceQuotaList(in *v1.ResourceQuotaList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ResourceQuota(a) + } +} + +func SetObjectDefaults_Secret(in *v1.Secret) { + SetDefaults_Secret(in) +} + +func SetObjectDefaults_SecretList(in *v1.SecretList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Secret(a) + } +} + +func SetObjectDefaults_Service(in *v1.Service) { + SetDefaults_Service(in) +} + +func SetObjectDefaults_ServiceList(in *v1.ServiceList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Service(a) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/BUILD new file mode 100644 index 000000000000..e2d08109daaf --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/BUILD @@ -0,0 +1,83 @@ +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "events.go", + "validation.go", + ], + importpath = "k8s.io/kubernetes/pkg/apis/core/validation", + visibility = ["//visibility:public"], + deps = [ + "//pkg/api/legacyscheme:go_default_library", + "//pkg/api/service:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", + "//pkg/apis/core/pods:go_default_library", + "//pkg/apis/core/v1:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/capabilities:go_default_library", + "//pkg/features:go_default_library", + "//pkg/fieldpath:go_default_library", + "//pkg/security/apparmor:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/validation:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "events_test.go", + "validation_test.go", + ], + importpath = "k8s.io/kubernetes/pkg/apis/core/validation", + library = ":go_default_library", + deps = [ + "//pkg/api/legacyscheme:go_default_library", + "//pkg/api/testapi:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", + "//pkg/capabilities:go_default_library", + "//pkg/security/apparmor:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/OWNERS similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/api/validation/OWNERS rename to vendor/k8s.io/kubernetes/pkg/apis/core/validation/OWNERS diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/doc.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/api/validation/doc.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/validation/doc.go diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go new file mode 100644 index 000000000000..ab265bd76448 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go @@ -0,0 +1,129 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/apis/core" +) + +const ( + ReportingInstanceLengthLimit = 128 + ActionLengthLimit = 128 + ReasonLengthLimit = 128 + NoteLengthLimit = 1024 +) + +// ValidateEvent makes sure that the event makes sense. +func ValidateEvent(event *core.Event) field.ErrorList { + allErrs := field.ErrorList{} + // Because go + zeroTime := time.Time{} + + // "New" Events need to have EventTime set, so it's validating old object. + if event.EventTime.Time == zeroTime { + // Make sure event.Namespace and the involvedInvolvedObject.Namespace agree + if len(event.InvolvedObject.Namespace) == 0 { + // event.Namespace must also be empty (or "default", for compatibility with old clients) + if event.Namespace != metav1.NamespaceNone && event.Namespace != metav1.NamespaceDefault { + allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) + } + } else { + // event namespace must match + if event.Namespace != event.InvolvedObject.Namespace { + allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) + } + } + + } else { + if len(event.InvolvedObject.Namespace) == 0 && event.Namespace != metav1.NamespaceSystem { + allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) + } + if len(event.ReportingController) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("reportingController"), "")) + } + for _, msg := range validation.IsQualifiedName(event.ReportingController) { + allErrs = append(allErrs, field.Invalid(field.NewPath("reportingController"), event.ReportingController, msg)) + } + if len(event.ReportingInstance) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("reportingInstance"), "")) + } + if len(event.ReportingInstance) > ReportingInstanceLengthLimit { + allErrs = append(allErrs, field.Invalid(field.NewPath("repotingIntance"), "", fmt.Sprintf("can have at most %v characters", ReportingInstanceLengthLimit))) + } + if len(event.Action) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("action"), "")) + } + if len(event.Action) > ActionLengthLimit { + allErrs = append(allErrs, field.Invalid(field.NewPath("action"), "", fmt.Sprintf("can have at most %v characters", ActionLengthLimit))) + } + if len(event.Reason) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("reason"), "")) + } + if len(event.Reason) > ReasonLengthLimit { + allErrs = append(allErrs, field.Invalid(field.NewPath("reason"), "", fmt.Sprintf("can have at most %v characters", ReasonLengthLimit))) + } + if len(event.Message) > NoteLengthLimit { + allErrs = append(allErrs, field.Invalid(field.NewPath("message"), "", fmt.Sprintf("can have at most %v characters", NoteLengthLimit))) + } + } + + // For kinds we recognize, make sure InvolvedObject.Namespace is set for namespaced kinds + if namespaced, err := isNamespacedKind(event.InvolvedObject.Kind, event.InvolvedObject.APIVersion); err == nil { + if namespaced && len(event.InvolvedObject.Namespace) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("involvedObject", "namespace"), fmt.Sprintf("required for kind %s", event.InvolvedObject.Kind))) + } + if !namespaced && len(event.InvolvedObject.Namespace) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, fmt.Sprintf("not allowed for kind %s", event.InvolvedObject.Kind))) + } + } + + for _, msg := range validation.IsDNS1123Subdomain(event.Namespace) { + allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, msg)) + } + return allErrs +} + +// Check whether the kind in groupVersion is scoped at the root of the api hierarchy +func isNamespacedKind(kind, groupVersion string) (bool, error) { + gv, err := schema.ParseGroupVersion(groupVersion) + if err != nil { + return false, err + } + g, err := legacyscheme.Registry.Group(gv.Group) + if err != nil { + return false, err + } + + restMapping, err := g.RESTMapper.RESTMapping(schema.GroupKind{Group: gv.Group, Kind: kind}, gv.Version) + if err != nil { + return false, err + } + scopeName := restMapping.Scope.Name() + if scopeName == meta.RESTScopeNameNamespace { + return true, nil + } + return false, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go new file mode 100644 index 000000000000..655d0bf21635 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go @@ -0,0 +1,4860 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "encoding/json" + "fmt" + "math" + "net" + "path" + "path/filepath" + "reflect" + "regexp" + "strings" + + "github.com/golang/glog" + + "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/resource" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + utilfeature "k8s.io/apiserver/pkg/util/feature" + apiservice "k8s.io/kubernetes/pkg/api/service" + "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" + podshelper "k8s.io/kubernetes/pkg/apis/core/pods" + corev1 "k8s.io/kubernetes/pkg/apis/core/v1" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" + "k8s.io/kubernetes/pkg/capabilities" + "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/fieldpath" + "k8s.io/kubernetes/pkg/security/apparmor" +) + +// TODO: delete this global variable when we enable the validation of common +// fields by default. +var RepairMalformedUpdates bool = apimachineryvalidation.RepairMalformedUpdates + +const isNegativeErrorMsg string = apimachineryvalidation.IsNegativeErrorMsg +const isInvalidQuotaResource string = `must be a standard resource for quota` +const fieldImmutableErrorMsg string = apimachineryvalidation.FieldImmutableErrorMsg +const isNotIntegerErrorMsg string = `must be an integer` +const isNotPositiveErrorMsg string = `must be greater than zero` + +var pdPartitionErrorMsg string = validation.InclusiveRangeError(1, 255) +var fileModeErrorMsg string = "must be a number between 0 and 0777 (octal), both inclusive" + +// BannedOwners is a black list of object that are not allowed to be owners. +var BannedOwners = apimachineryvalidation.BannedOwners + +var iscsiInitiatorIqnRegex = regexp.MustCompile(`iqn\.\d{4}-\d{2}\.([[:alnum:]-.]+)(:[^,;*&$|\s]+)$`) +var iscsiInitiatorEuiRegex = regexp.MustCompile(`^eui.[[:alnum:]]{16}$`) +var iscsiInitiatorNaaRegex = regexp.MustCompile(`^naa.[[:alnum:]]{32}$`) + +// ValidateHasLabel requires that metav1.ObjectMeta has a Label with key and expectedValue +func ValidateHasLabel(meta metav1.ObjectMeta, fldPath *field.Path, key, expectedValue string) field.ErrorList { + allErrs := field.ErrorList{} + actualValue, found := meta.Labels[key] + if !found { + allErrs = append(allErrs, field.Required(fldPath.Child("labels").Key(key), + fmt.Sprintf("must be '%s'", expectedValue))) + return allErrs + } + if actualValue != expectedValue { + allErrs = append(allErrs, field.Invalid(fldPath.Child("labels").Key(key), meta.Labels, + fmt.Sprintf("must be '%s'", expectedValue))) + } + return allErrs +} + +// ValidateAnnotations validates that a set of annotations are correctly defined. +func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + return apimachineryvalidation.ValidateAnnotations(annotations, fldPath) +} + +func ValidateDNS1123Label(value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsDNS1123Label(value) { + allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) + } + return allErrs +} + +// ValidateDNS1123Subdomain validates that a name is a proper DNS subdomain. +func ValidateDNS1123Subdomain(value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsDNS1123Subdomain(value) { + allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) + } + return allErrs +} + +func ValidatePodSpecificAnnotations(annotations map[string]string, spec *core.PodSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if value, isMirror := annotations[core.MirrorPodAnnotationKey]; isMirror { + if len(spec.NodeName) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Key(core.MirrorPodAnnotationKey), value, "must set spec.nodeName if mirror pod annotation is set")) + } + } + + if annotations[core.TolerationsAnnotationKey] != "" { + allErrs = append(allErrs, ValidateTolerationsInPodAnnotations(annotations, fldPath)...) + } + + allErrs = append(allErrs, ValidateSeccompPodAnnotations(annotations, fldPath)...) + allErrs = append(allErrs, ValidateAppArmorPodAnnotations(annotations, spec, fldPath)...) + + sysctls, err := helper.SysctlsFromPodAnnotation(annotations[core.SysctlsPodAnnotationKey]) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Key(core.SysctlsPodAnnotationKey), annotations[core.SysctlsPodAnnotationKey], err.Error())) + } else { + allErrs = append(allErrs, validateSysctls(sysctls, fldPath.Key(core.SysctlsPodAnnotationKey))...) + } + unsafeSysctls, err := helper.SysctlsFromPodAnnotation(annotations[core.UnsafeSysctlsPodAnnotationKey]) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Key(core.UnsafeSysctlsPodAnnotationKey), annotations[core.UnsafeSysctlsPodAnnotationKey], err.Error())) + } else { + allErrs = append(allErrs, validateSysctls(unsafeSysctls, fldPath.Key(core.UnsafeSysctlsPodAnnotationKey))...) + } + inBoth := sysctlIntersection(sysctls, unsafeSysctls) + if len(inBoth) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Key(core.UnsafeSysctlsPodAnnotationKey), strings.Join(inBoth, ", "), "can not be safe and unsafe")) + } + + return allErrs +} + +// ValidateTolerationsInPodAnnotations tests that the serialized tolerations in Pod.Annotations has valid data +func ValidateTolerationsInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + tolerations, err := helper.GetTolerationsFromPodAnnotations(annotations) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, core.TolerationsAnnotationKey, err.Error())) + return allErrs + } + + if len(tolerations) > 0 { + allErrs = append(allErrs, ValidateTolerations(tolerations, fldPath.Child(core.TolerationsAnnotationKey))...) + } + + return allErrs +} + +func ValidatePodSpecificAnnotationUpdates(newPod, oldPod *core.Pod, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + newAnnotations := newPod.Annotations + oldAnnotations := oldPod.Annotations + for k, oldVal := range oldAnnotations { + if newVal, exists := newAnnotations[k]; exists && newVal == oldVal { + continue // No change. + } + if strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { + allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not remove or update AppArmor annotations")) + } + if k == core.MirrorPodAnnotationKey { + allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not remove or update mirror pod annotation")) + } + } + // Check for additions + for k := range newAnnotations { + if _, ok := oldAnnotations[k]; ok { + continue // No change. + } + if strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { + allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not add AppArmor annotations")) + } + if k == core.MirrorPodAnnotationKey { + allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not add mirror pod annotation")) + } + } + allErrs = append(allErrs, ValidatePodSpecificAnnotations(newAnnotations, &newPod.Spec, fldPath)...) + return allErrs +} + +func ValidateEndpointsSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + return allErrs +} + +func ValidateOwnerReferences(ownerReferences []metav1.OwnerReference, fldPath *field.Path) field.ErrorList { + return apimachineryvalidation.ValidateOwnerReferences(ownerReferences, fldPath) +} + +// ValidateNameFunc validates that the provided name is valid for a given resource type. +// Not all resources have the same validation rules for names. Prefix is true +// if the name will have a value appended to it. If the name is not valid, +// this returns a list of descriptions of individual characteristics of the +// value that were not valid. Otherwise this returns an empty list or nil. +type ValidateNameFunc apimachineryvalidation.ValidateNameFunc + +// maskTrailingDash replaces the final character of a string with a subdomain safe +// value if is a dash. +func maskTrailingDash(name string) string { + if strings.HasSuffix(name, "-") { + return name[:len(name)-2] + "a" + } + return name +} + +// ValidatePodName can be used to check whether the given pod name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidatePodName = NameIsDNSSubdomain + +// ValidateReplicationControllerName can be used to check whether the given replication +// controller name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateReplicationControllerName = NameIsDNSSubdomain + +// ValidateServiceName can be used to check whether the given service name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateServiceName = NameIsDNS1035Label + +// ValidateNodeName can be used to check whether the given node name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateNodeName = NameIsDNSSubdomain + +// ValidateNamespaceName can be used to check whether the given namespace name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateNamespaceName = apimachineryvalidation.ValidateNamespaceName + +// ValidateLimitRangeName can be used to check whether the given limit range name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateLimitRangeName = NameIsDNSSubdomain + +// ValidateResourceQuotaName can be used to check whether the given +// resource quota name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateResourceQuotaName = NameIsDNSSubdomain + +// ValidateSecretName can be used to check whether the given secret name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateSecretName = NameIsDNSSubdomain + +// ValidateServiceAccountName can be used to check whether the given service account name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateServiceAccountName = apimachineryvalidation.ValidateServiceAccountName + +// ValidateEndpointsName can be used to check whether the given endpoints name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateEndpointsName = NameIsDNSSubdomain + +// ValidateClusterName can be used to check whether the given cluster name is valid. +var ValidateClusterName = apimachineryvalidation.ValidateClusterName + +// ValidateClassName can be used to check whether the given class name is valid. +// It is defined here to avoid import cycle between pkg/apis/storage/validation +// (where it should be) and this file. +var ValidateClassName = NameIsDNSSubdomain + +// ValidatePiorityClassName can be used to check whether the given priority +// class name is valid. +var ValidatePriorityClassName = NameIsDNSSubdomain + +// TODO update all references to these functions to point to the apimachineryvalidation ones +// NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain. +func NameIsDNSSubdomain(name string, prefix bool) []string { + return apimachineryvalidation.NameIsDNSSubdomain(name, prefix) +} + +// NameIsDNSLabel is a ValidateNameFunc for names that must be a DNS 1123 label. +func NameIsDNSLabel(name string, prefix bool) []string { + return apimachineryvalidation.NameIsDNSLabel(name, prefix) +} + +// NameIsDNS1035Label is a ValidateNameFunc for names that must be a DNS 952 label. +func NameIsDNS1035Label(name string, prefix bool) []string { + return apimachineryvalidation.NameIsDNS1035Label(name, prefix) +} + +// Validates that given value is not negative. +func ValidateNonnegativeField(value int64, fldPath *field.Path) field.ErrorList { + return apimachineryvalidation.ValidateNonnegativeField(value, fldPath) +} + +// Validates that a Quantity is not negative +func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if value.Cmp(resource.Quantity{}) < 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNegativeErrorMsg)) + } + return allErrs +} + +// Validates that a Quantity is positive +func ValidatePositiveQuantityValue(value resource.Quantity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if value.Cmp(resource.Quantity{}) <= 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNotPositiveErrorMsg)) + } + return allErrs +} + +func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList { + return apimachineryvalidation.ValidateImmutableField(newVal, oldVal, fldPath) +} + +func ValidateImmutableAnnotation(newVal string, oldVal string, annotation string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if oldVal != newVal { + allErrs = append(allErrs, field.Invalid(fldPath.Child("annotations", annotation), newVal, fieldImmutableErrorMsg)) + } + return allErrs +} + +// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already +// been performed. +// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. +// TODO: Remove calls to this method scattered in validations of specific resources, e.g., ValidatePodUpdate. +func ValidateObjectMeta(meta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { + allErrs := apimachineryvalidation.ValidateObjectMeta(meta, requiresNamespace, apimachineryvalidation.ValidateNameFunc(nameFn), fldPath) + // run additional checks for the finalizer name + for i := range meta.Finalizers { + allErrs = append(allErrs, validateKubeFinalizerName(string(meta.Finalizers[i]), fldPath.Child("finalizers").Index(i))...) + } + return allErrs +} + +// ValidateObjectMetaUpdate validates an object's metadata when updated +func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList { + allErrs := apimachineryvalidation.ValidateObjectMetaUpdate(newMeta, oldMeta, fldPath) + // run additional checks for the finalizer name + for i := range newMeta.Finalizers { + allErrs = append(allErrs, validateKubeFinalizerName(string(newMeta.Finalizers[i]), fldPath.Child("finalizers").Index(i))...) + } + + return allErrs +} + +func ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList { + return apimachineryvalidation.ValidateNoNewFinalizers(newFinalizers, oldFinalizers, fldPath) +} + +func ValidateVolumes(volumes []core.Volume, fldPath *field.Path) (map[string]core.VolumeSource, field.ErrorList) { + allErrs := field.ErrorList{} + + allNames := sets.String{} + vols := make(map[string]core.VolumeSource) + for i, vol := range volumes { + idxPath := fldPath.Index(i) + namePath := idxPath.Child("name") + el := validateVolumeSource(&vol.VolumeSource, idxPath, vol.Name) + if len(vol.Name) == 0 { + el = append(el, field.Required(namePath, "")) + } else { + el = append(el, ValidateDNS1123Label(vol.Name, namePath)...) + } + if allNames.Has(vol.Name) { + el = append(el, field.Duplicate(namePath, vol.Name)) + } + if len(el) == 0 { + allNames.Insert(vol.Name) + vols[vol.Name] = vol.VolumeSource + } else { + allErrs = append(allErrs, el...) + } + + } + return vols, allErrs +} + +func IsMatchedVolume(name string, volumes map[string]core.VolumeSource) bool { + if _, ok := volumes[name]; ok { + return true + } else { + return false + } +} + +func isMatchedDevice(name string, volumes map[string]core.VolumeSource) (bool, bool) { + if source, ok := volumes[name]; ok { + if source.PersistentVolumeClaim != nil { + return true, true + } else { + return true, false + } + } else { + return false, false + } +} + +func mountNameAlreadyExists(name string, devices map[string]string) bool { + if _, ok := devices[name]; ok { + return true + } else { + return false + } +} + +func mountPathAlreadyExists(mountPath string, devices map[string]string) bool { + for _, devPath := range devices { + if mountPath == devPath { + return true + } + } + + return false +} + +func deviceNameAlreadyExists(name string, mounts map[string]string) bool { + if _, ok := mounts[name]; ok { + return true + } else { + return false + } +} + +func devicePathAlreadyExists(devicePath string, mounts map[string]string) bool { + for _, mountPath := range mounts { + if mountPath == devicePath { + return true + } + } + + return false +} + +func validateVolumeSource(source *core.VolumeSource, fldPath *field.Path, volName string) field.ErrorList { + numVolumes := 0 + allErrs := field.ErrorList{} + if source.EmptyDir != nil { + numVolumes++ + if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + if source.EmptyDir.SizeLimit != nil && source.EmptyDir.SizeLimit.Cmp(resource.Quantity{}) != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("sizeLimit"), "SizeLimit field disabled by feature-gate for EmptyDir volumes")) + } + } else { + if source.EmptyDir.SizeLimit != nil && source.EmptyDir.SizeLimit.Cmp(resource.Quantity{}) < 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("sizeLimit"), "SizeLimit field must be a valid resource quantity")) + } + } + if !utilfeature.DefaultFeatureGate.Enabled(features.HugePages) && source.EmptyDir.Medium == core.StorageMediumHugePages { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("medium"), "HugePages medium is disabled by feature-gate for EmptyDir volumes")) + } + } + if source.HostPath != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostPath"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateHostPathVolumeSource(source.HostPath, fldPath.Child("hostPath"))...) + } + } + if source.GitRepo != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("gitRepo"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateGitRepoVolumeSource(source.GitRepo, fldPath.Child("gitRepo"))...) + } + } + if source.GCEPersistentDisk != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(source.GCEPersistentDisk, fldPath.Child("persistentDisk"))...) + } + } + if source.AWSElasticBlockStore != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(source.AWSElasticBlockStore, fldPath.Child("awsElasticBlockStore"))...) + } + } + if source.Secret != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("secret"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateSecretVolumeSource(source.Secret, fldPath.Child("secret"))...) + } + } + if source.NFS != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("nfs"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateNFSVolumeSource(source.NFS, fldPath.Child("nfs"))...) + } + } + if source.ISCSI != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("iscsi"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateISCSIVolumeSource(source.ISCSI, fldPath.Child("iscsi"))...) + } + if source.ISCSI.InitiatorName != nil && len(volName+":"+source.ISCSI.TargetPortal) > 64 { + tooLongErr := "Total length of : must be under 64 characters if iscsi.initiatorName is specified." + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), volName, tooLongErr)) + } + } + if source.Glusterfs != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("glusterfs"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateGlusterfsVolumeSource(source.Glusterfs, fldPath.Child("glusterfs"))...) + } + } + if source.Flocker != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("flocker"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateFlockerVolumeSource(source.Flocker, fldPath.Child("flocker"))...) + } + } + if source.PersistentVolumeClaim != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("persistentVolumeClaim"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validatePersistentClaimVolumeSource(source.PersistentVolumeClaim, fldPath.Child("persistentVolumeClaim"))...) + } + } + if source.RBD != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("rbd"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateRBDVolumeSource(source.RBD, fldPath.Child("rbd"))...) + } + } + if source.Cinder != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("cinder"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateCinderVolumeSource(source.Cinder, fldPath.Child("cinder"))...) + } + } + if source.CephFS != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("cephFS"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateCephFSVolumeSource(source.CephFS, fldPath.Child("cephfs"))...) + } + } + if source.Quobyte != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("quobyte"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateQuobyteVolumeSource(source.Quobyte, fldPath.Child("quobyte"))...) + } + } + if source.DownwardAPI != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("downwarAPI"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateDownwardAPIVolumeSource(source.DownwardAPI, fldPath.Child("downwardAPI"))...) + } + } + if source.FC != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("fc"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateFCVolumeSource(source.FC, fldPath.Child("fc"))...) + } + } + if source.FlexVolume != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("flexVolume"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateFlexVolumeSource(source.FlexVolume, fldPath.Child("flexVolume"))...) + } + } + if source.ConfigMap != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("configMap"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateConfigMapVolumeSource(source.ConfigMap, fldPath.Child("configMap"))...) + } + } + + if source.AzureFile != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("azureFile"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateAzureFile(source.AzureFile, fldPath.Child("azureFile"))...) + } + } + + if source.VsphereVolume != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("vsphereVolume"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateVsphereVolumeSource(source.VsphereVolume, fldPath.Child("vsphereVolume"))...) + } + } + if source.PhotonPersistentDisk != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("photonPersistentDisk"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validatePhotonPersistentDiskVolumeSource(source.PhotonPersistentDisk, fldPath.Child("photonPersistentDisk"))...) + } + } + if source.PortworxVolume != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("portworxVolume"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validatePortworxVolumeSource(source.PortworxVolume, fldPath.Child("portworxVolume"))...) + } + } + if source.AzureDisk != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("azureDisk"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateAzureDisk(source.AzureDisk, fldPath.Child("azureDisk"))...) + } + } + if source.StorageOS != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("storageos"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateStorageOSVolumeSource(source.StorageOS, fldPath.Child("storageos"))...) + } + } + if source.Projected != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("projected"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateProjectedVolumeSource(source.Projected, fldPath.Child("projected"))...) + } + } + if source.ScaleIO != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("scaleIO"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateScaleIOVolumeSource(source.ScaleIO, fldPath.Child("scaleIO"))...) + } + } + + if numVolumes == 0 { + allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type")) + } + + return allErrs +} + +func validateHostPathVolumeSource(hostPath *core.HostPathVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(hostPath.Path) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + return allErrs + } + + allErrs = append(allErrs, validatePathNoBacksteps(hostPath.Path, fldPath.Child("path"))...) + allErrs = append(allErrs, validateHostPathType(hostPath.Type, fldPath.Child("type"))...) + return allErrs +} + +func validateGitRepoVolumeSource(gitRepo *core.GitRepoVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(gitRepo.Repository) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("repository"), "")) + } + + pathErrs := validateLocalDescendingPath(gitRepo.Directory, fldPath.Child("directory")) + allErrs = append(allErrs, pathErrs...) + return allErrs +} + +func validateISCSIVolumeSource(iscsi *core.ISCSIVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(iscsi.TargetPortal) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("targetPortal"), "")) + } + if len(iscsi.IQN) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("iqn"), "")) + } else { + if !strings.HasPrefix(iscsi.IQN, "iqn") && !strings.HasPrefix(iscsi.IQN, "eui") && !strings.HasPrefix(iscsi.IQN, "naa") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format starting with iqn, eui, or naa")) + } else if strings.HasPrefix(iscsi.IQN, "iqn") && !iscsiInitiatorIqnRegex.MatchString(iscsi.IQN) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } else if strings.HasPrefix(iscsi.IQN, "eui") && !iscsiInitiatorEuiRegex.MatchString(iscsi.IQN) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } else if strings.HasPrefix(iscsi.IQN, "naa") && !iscsiInitiatorNaaRegex.MatchString(iscsi.IQN) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } + } + if iscsi.Lun < 0 || iscsi.Lun > 255 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), iscsi.Lun, validation.InclusiveRangeError(0, 255))) + } + if (iscsi.DiscoveryCHAPAuth || iscsi.SessionCHAPAuth) && iscsi.SecretRef == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("secretRef"), "")) + } + if iscsi.InitiatorName != nil { + initiator := *iscsi.InitiatorName + if !strings.HasPrefix(initiator, "iqn") && !strings.HasPrefix(initiator, "eui") && !strings.HasPrefix(initiator, "naa") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format starting with iqn, eui, or naa")) + } + if strings.HasPrefix(initiator, "iqn") && !iscsiInitiatorIqnRegex.MatchString(initiator) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } else if strings.HasPrefix(initiator, "eui") && !iscsiInitiatorEuiRegex.MatchString(initiator) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } else if strings.HasPrefix(initiator, "naa") && !iscsiInitiatorNaaRegex.MatchString(initiator) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } + } + return allErrs +} + +func validateISCSIPersistentVolumeSource(iscsi *core.ISCSIPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(iscsi.TargetPortal) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("targetPortal"), "")) + } + if len(iscsi.IQN) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("iqn"), "")) + } else { + if !strings.HasPrefix(iscsi.IQN, "iqn") && !strings.HasPrefix(iscsi.IQN, "eui") && !strings.HasPrefix(iscsi.IQN, "naa") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } else if strings.HasPrefix(iscsi.IQN, "iqn") && !iscsiInitiatorIqnRegex.MatchString(iscsi.IQN) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } else if strings.HasPrefix(iscsi.IQN, "eui") && !iscsiInitiatorEuiRegex.MatchString(iscsi.IQN) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } else if strings.HasPrefix(iscsi.IQN, "naa") && !iscsiInitiatorNaaRegex.MatchString(iscsi.IQN) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } + } + if iscsi.Lun < 0 || iscsi.Lun > 255 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), iscsi.Lun, validation.InclusiveRangeError(0, 255))) + } + if (iscsi.DiscoveryCHAPAuth || iscsi.SessionCHAPAuth) && iscsi.SecretRef == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("secretRef"), "")) + } + if iscsi.SecretRef != nil { + if len(iscsi.SecretRef.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), "")) + } + } + if iscsi.InitiatorName != nil { + initiator := *iscsi.InitiatorName + if !strings.HasPrefix(initiator, "iqn") && !strings.HasPrefix(initiator, "eui") && !strings.HasPrefix(initiator, "naa") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } + if strings.HasPrefix(initiator, "iqn") && !iscsiInitiatorIqnRegex.MatchString(initiator) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } else if strings.HasPrefix(initiator, "eui") && !iscsiInitiatorEuiRegex.MatchString(initiator) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } else if strings.HasPrefix(initiator, "naa") && !iscsiInitiatorNaaRegex.MatchString(initiator) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } + } + return allErrs +} + +func validateFCVolumeSource(fc *core.FCVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(fc.TargetWWNs) < 1 && len(fc.WWIDs) < 1 { + allErrs = append(allErrs, field.Required(fldPath.Child("targetWWNs"), "must specify either targetWWNs or wwids, but not both")) + } + + if len(fc.TargetWWNs) != 0 && len(fc.WWIDs) != 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("targetWWNs"), fc.TargetWWNs, "targetWWNs and wwids can not be specified simultaneously")) + } + + if len(fc.TargetWWNs) != 0 { + if fc.Lun == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("lun"), "lun is required if targetWWNs is specified")) + } else { + if *fc.Lun < 0 || *fc.Lun > 255 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), fc.Lun, validation.InclusiveRangeError(0, 255))) + } + } + } + return allErrs +} + +func validateGCEPersistentDiskVolumeSource(pd *core.GCEPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(pd.PDName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("pdName"), "")) + } + if pd.Partition < 0 || pd.Partition > 255 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), pd.Partition, pdPartitionErrorMsg)) + } + return allErrs +} + +func validateAWSElasticBlockStoreVolumeSource(PD *core.AWSElasticBlockStoreVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(PD.VolumeID) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) + } + if PD.Partition < 0 || PD.Partition > 255 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), PD.Partition, pdPartitionErrorMsg)) + } + return allErrs +} + +func validateSecretVolumeSource(secretSource *core.SecretVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(secretSource.SecretName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) + } + + secretMode := secretSource.DefaultMode + if secretMode != nil && (*secretMode > 0777 || *secretMode < 0) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *secretMode, fileModeErrorMsg)) + } + + itemsPath := fldPath.Child("items") + for i, kp := range secretSource.Items { + itemPath := itemsPath.Index(i) + allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...) + } + return allErrs +} + +func validateConfigMapVolumeSource(configMapSource *core.ConfigMapVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(configMapSource.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } + + configMapMode := configMapSource.DefaultMode + if configMapMode != nil && (*configMapMode > 0777 || *configMapMode < 0) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *configMapMode, fileModeErrorMsg)) + } + + itemsPath := fldPath.Child("items") + for i, kp := range configMapSource.Items { + itemPath := itemsPath.Index(i) + allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...) + } + return allErrs +} + +func validateKeyToPath(kp *core.KeyToPath, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(kp.Key) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("key"), "")) + } + if len(kp.Path) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + } + allErrs = append(allErrs, validateLocalNonReservedPath(kp.Path, fldPath.Child("path"))...) + if kp.Mode != nil && (*kp.Mode > 0777 || *kp.Mode < 0) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *kp.Mode, fileModeErrorMsg)) + } + + return allErrs +} + +func validatePersistentClaimVolumeSource(claim *core.PersistentVolumeClaimVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(claim.ClaimName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("claimName"), "")) + } + return allErrs +} + +func validateNFSVolumeSource(nfs *core.NFSVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(nfs.Server) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("server"), "")) + } + if len(nfs.Path) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + } + if !path.IsAbs(nfs.Path) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("path"), nfs.Path, "must be an absolute path")) + } + return allErrs +} + +func validateQuobyteVolumeSource(quobyte *core.QuobyteVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(quobyte.Registry) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("registry"), "must be a host:port pair or multiple pairs separated by commas")) + } else { + for _, hostPortPair := range strings.Split(quobyte.Registry, ",") { + if _, _, err := net.SplitHostPort(hostPortPair); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("registry"), quobyte.Registry, "must be a host:port pair or multiple pairs separated by commas")) + } + } + } + + if len(quobyte.Volume) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volume"), "")) + } + return allErrs +} + +func validateGlusterfsVolumeSource(glusterfs *core.GlusterfsVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(glusterfs.EndpointsName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("endpoints"), "")) + } + if len(glusterfs.Path) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + } + return allErrs +} + +func validateFlockerVolumeSource(flocker *core.FlockerVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(flocker.DatasetName) == 0 && len(flocker.DatasetUUID) == 0 { + //TODO: consider adding a RequiredOneOf() error for this and similar cases + allErrs = append(allErrs, field.Required(fldPath, "one of datasetName and datasetUUID is required")) + } + if len(flocker.DatasetName) != 0 && len(flocker.DatasetUUID) != 0 { + allErrs = append(allErrs, field.Invalid(fldPath, "resource", "datasetName and datasetUUID can not be specified simultaneously")) + } + if strings.Contains(flocker.DatasetName, "/") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("datasetName"), flocker.DatasetName, "must not contain '/'")) + } + return allErrs +} + +var validVolumeDownwardAPIFieldPathExpressions = sets.NewString( + "metadata.name", + "metadata.namespace", + "metadata.labels", + "metadata.annotations", + "metadata.uid") + +func validateDownwardAPIVolumeFile(file *core.DownwardAPIVolumeFile, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(file.Path) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + } + allErrs = append(allErrs, validateLocalNonReservedPath(file.Path, fldPath.Child("path"))...) + if file.FieldRef != nil { + allErrs = append(allErrs, validateObjectFieldSelector(file.FieldRef, &validVolumeDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...) + if file.ResourceFieldRef != nil { + allErrs = append(allErrs, field.Invalid(fldPath, "resource", "fieldRef and resourceFieldRef can not be specified simultaneously")) + } + } else if file.ResourceFieldRef != nil { + allErrs = append(allErrs, validateContainerResourceFieldSelector(file.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), true)...) + } else { + allErrs = append(allErrs, field.Required(fldPath, "one of fieldRef and resourceFieldRef is required")) + } + if file.Mode != nil && (*file.Mode > 0777 || *file.Mode < 0) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *file.Mode, fileModeErrorMsg)) + } + + return allErrs +} + +func validateDownwardAPIVolumeSource(downwardAPIVolume *core.DownwardAPIVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + downwardAPIMode := downwardAPIVolume.DefaultMode + if downwardAPIMode != nil && (*downwardAPIMode > 0777 || *downwardAPIMode < 0) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *downwardAPIMode, fileModeErrorMsg)) + } + + for _, file := range downwardAPIVolume.Items { + allErrs = append(allErrs, validateDownwardAPIVolumeFile(&file, fldPath)...) + } + return allErrs +} + +func validateProjectionSources(projection *core.ProjectedVolumeSource, projectionMode *int32, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allPaths := sets.String{} + + for _, source := range projection.Sources { + numSources := 0 + if source.Secret != nil { + if numSources > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("secret"), "may not specify more than 1 volume type")) + } else { + numSources++ + if len(source.Secret.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } + itemsPath := fldPath.Child("items") + for i, kp := range source.Secret.Items { + itemPath := itemsPath.Index(i) + allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...) + if len(kp.Path) > 0 { + curPath := kp.Path + if !allPaths.Has(curPath) { + allPaths.Insert(curPath) + } else { + allErrs = append(allErrs, field.Invalid(fldPath, source.Secret.Name, "conflicting duplicate paths")) + } + } + } + } + } + if source.ConfigMap != nil { + if numSources > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("configMap"), "may not specify more than 1 volume type")) + } else { + numSources++ + if len(source.ConfigMap.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } + itemsPath := fldPath.Child("items") + for i, kp := range source.ConfigMap.Items { + itemPath := itemsPath.Index(i) + allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...) + if len(kp.Path) > 0 { + curPath := kp.Path + if !allPaths.Has(curPath) { + allPaths.Insert(curPath) + } else { + allErrs = append(allErrs, field.Invalid(fldPath, source.ConfigMap.Name, "conflicting duplicate paths")) + } + + } + } + } + } + if source.DownwardAPI != nil { + if numSources > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("downwardAPI"), "may not specify more than 1 volume type")) + } else { + numSources++ + for _, file := range source.DownwardAPI.Items { + allErrs = append(allErrs, validateDownwardAPIVolumeFile(&file, fldPath.Child("downwardAPI"))...) + if len(file.Path) > 0 { + curPath := file.Path + if !allPaths.Has(curPath) { + allPaths.Insert(curPath) + } else { + allErrs = append(allErrs, field.Invalid(fldPath, curPath, "conflicting duplicate paths")) + } + + } + } + } + } + } + return allErrs +} + +func validateProjectedVolumeSource(projection *core.ProjectedVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + projectionMode := projection.DefaultMode + if projectionMode != nil && (*projectionMode > 0777 || *projectionMode < 0) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *projectionMode, fileModeErrorMsg)) + } + + allErrs = append(allErrs, validateProjectionSources(projection, projectionMode, fldPath)...) + return allErrs +} + +var supportedHostPathTypes = sets.NewString( + string(core.HostPathUnset), + string(core.HostPathDirectoryOrCreate), + string(core.HostPathDirectory), + string(core.HostPathFileOrCreate), + string(core.HostPathFile), + string(core.HostPathSocket), + string(core.HostPathCharDev), + string(core.HostPathBlockDev)) + +func validateHostPathType(hostPathType *core.HostPathType, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if hostPathType != nil && !supportedHostPathTypes.Has(string(*hostPathType)) { + allErrs = append(allErrs, field.NotSupported(fldPath, hostPathType, supportedHostPathTypes.List())) + } + + return allErrs +} + +// This validate will make sure targetPath: +// 1. is not abs path +// 2. does not have any element which is ".." +func validateLocalDescendingPath(targetPath string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if path.IsAbs(targetPath) { + allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must be a relative path")) + } + + allErrs = append(allErrs, validatePathNoBacksteps(targetPath, fldPath)...) + + return allErrs +} + +// validatePathNoBacksteps makes sure the targetPath does not have any `..` path elements when split +// +// This assumes the OS of the apiserver and the nodes are the same. The same check should be done +// on the node to ensure there are no backsteps. +func validatePathNoBacksteps(targetPath string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + parts := strings.Split(filepath.ToSlash(targetPath), "/") + for _, item := range parts { + if item == ".." { + allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not contain '..'")) + break // even for `../../..`, one error is sufficient to make the point + } + } + return allErrs +} + +// validateMountPropagation verifies that MountPropagation field is valid and +// allowed for given container. +func validateMountPropagation(mountPropagation *core.MountPropagationMode, container *core.Container, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if mountPropagation == nil { + return allErrs + } + if !utilfeature.DefaultFeatureGate.Enabled(features.MountPropagation) { + allErrs = append(allErrs, field.Forbidden(fldPath, "mount propagation is disabled by feature-gate")) + return allErrs + } + + supportedMountPropagations := sets.NewString(string(core.MountPropagationBidirectional), string(core.MountPropagationHostToContainer)) + if !supportedMountPropagations.Has(string(*mountPropagation)) { + allErrs = append(allErrs, field.NotSupported(fldPath, *mountPropagation, supportedMountPropagations.List())) + } + + if container == nil { + // The container is not available yet, e.g. during validation of + // PodPreset. Stop validation now, Pod validation will refuse final + // Pods with Bidirectional propagation in non-privileged containers. + return allErrs + } + + privileged := container.SecurityContext != nil && container.SecurityContext.Privileged != nil && *container.SecurityContext.Privileged + if *mountPropagation == core.MountPropagationBidirectional && !privileged { + allErrs = append(allErrs, field.Forbidden(fldPath, "Bidirectional mount propagation is available only to privileged containers")) + } + return allErrs +} + +// This validate will make sure targetPath: +// 1. is not abs path +// 2. does not contain any '..' elements +// 3. does not start with '..' +func validateLocalNonReservedPath(targetPath string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, validateLocalDescendingPath(targetPath, fldPath)...) + // Don't report this error if the check for .. elements already caught it. + if strings.HasPrefix(targetPath, "..") && !strings.HasPrefix(targetPath, "../") { + allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not start with '..'")) + } + return allErrs +} + +func validateRBDVolumeSource(rbd *core.RBDVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(rbd.CephMonitors) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) + } + if len(rbd.RBDImage) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("image"), "")) + } + return allErrs +} + +func validateRBDPersistentVolumeSource(rbd *core.RBDPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(rbd.CephMonitors) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) + } + if len(rbd.RBDImage) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("image"), "")) + } + return allErrs +} + +func validateCinderVolumeSource(cd *core.CinderVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(cd.VolumeID) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) + } + return allErrs +} + +func validateCephFSVolumeSource(cephfs *core.CephFSVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(cephfs.Monitors) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) + } + return allErrs +} + +func validateCephFSPersistentVolumeSource(cephfs *core.CephFSPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(cephfs.Monitors) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) + } + return allErrs +} + +func validateFlexVolumeSource(fv *core.FlexVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(fv.Driver) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("driver"), "")) + } + + // Make sure user-specified options don't use kubernetes namespaces + for k := range fv.Options { + namespace := k + if parts := strings.SplitN(k, "/", 2); len(parts) == 2 { + namespace = parts[0] + } + normalized := "." + strings.ToLower(namespace) + if strings.HasSuffix(normalized, ".kubernetes.io") || strings.HasSuffix(normalized, ".k8s.io") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("options").Key(k), k, "kubernetes.io and k8s.io namespaces are reserved")) + } + } + + return allErrs +} + +func validateAzureFile(azure *core.AzureFileVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if azure.SecretName == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) + } + if azure.ShareName == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("shareName"), "")) + } + return allErrs +} + +func validateAzureFilePV(azure *core.AzureFilePersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if azure.SecretName == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) + } + if azure.ShareName == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("shareName"), "")) + } + if azure.SecretNamespace != nil { + if len(*azure.SecretNamespace) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("secretNamespace"), "")) + } + } + return allErrs +} + +func validateAzureDisk(azure *core.AzureDiskVolumeSource, fldPath *field.Path) field.ErrorList { + var supportedCachingModes = sets.NewString(string(core.AzureDataDiskCachingNone), string(core.AzureDataDiskCachingReadOnly), string(core.AzureDataDiskCachingReadWrite)) + var supportedDiskKinds = sets.NewString(string(core.AzureSharedBlobDisk), string(core.AzureDedicatedBlobDisk), string(core.AzureManagedDisk)) + + diskUriSupportedManaged := []string{"/subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}"} + diskUriSupportedblob := []string{"https://{account-name}.blob.core.windows.net/{container-name}/{disk-name}.vhd"} + + allErrs := field.ErrorList{} + if azure.DiskName == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("diskName"), "")) + } + + if azure.DataDiskURI == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("diskURI"), "")) + } + + if azure.CachingMode != nil && !supportedCachingModes.Has(string(*azure.CachingMode)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("cachingMode"), *azure.CachingMode, supportedCachingModes.List())) + } + + if azure.Kind != nil && !supportedDiskKinds.Has(string(*azure.Kind)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("kind"), *azure.Kind, supportedDiskKinds.List())) + } + + // validate that DiskUri is the correct format + if azure.Kind != nil && *azure.Kind == core.AzureManagedDisk && strings.Index(azure.DataDiskURI, "/subscriptions/") != 0 { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("diskURI"), azure.DataDiskURI, diskUriSupportedManaged)) + } + + if azure.Kind != nil && *azure.Kind != core.AzureManagedDisk && strings.Index(azure.DataDiskURI, "https://") != 0 { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("diskURI"), azure.DataDiskURI, diskUriSupportedblob)) + } + + return allErrs +} + +func validateVsphereVolumeSource(cd *core.VsphereVirtualDiskVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(cd.VolumePath) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumePath"), "")) + } + return allErrs +} + +func validatePhotonPersistentDiskVolumeSource(cd *core.PhotonPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(cd.PdID) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("pdID"), "")) + } + return allErrs +} + +func validatePortworxVolumeSource(pwx *core.PortworxVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(pwx.VolumeID) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) + } + return allErrs +} + +func validateScaleIOVolumeSource(sio *core.ScaleIOVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if sio.Gateway == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("gateway"), "")) + } + if sio.System == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("system"), "")) + } + if sio.VolumeName == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), "")) + } + return allErrs +} + +func validateScaleIOPersistentVolumeSource(sio *core.ScaleIOPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if sio.Gateway == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("gateway"), "")) + } + if sio.System == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("system"), "")) + } + if sio.VolumeName == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), "")) + } + return allErrs +} + +func validateLocalVolumeSource(ls *core.LocalVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if ls.Path == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + return allErrs + } + + allErrs = append(allErrs, validatePathNoBacksteps(ls.Path, fldPath.Child("path"))...) + return allErrs +} + +func validateStorageOSVolumeSource(storageos *core.StorageOSVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(storageos.VolumeName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), "")) + } else { + allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeName, fldPath.Child("volumeName"))...) + } + if len(storageos.VolumeNamespace) > 0 { + allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeNamespace, fldPath.Child("volumeNamespace"))...) + } + if storageos.SecretRef != nil { + if len(storageos.SecretRef.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), "")) + } + } + return allErrs +} + +func validateStorageOSPersistentVolumeSource(storageos *core.StorageOSPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(storageos.VolumeName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), "")) + } else { + allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeName, fldPath.Child("volumeName"))...) + } + if len(storageos.VolumeNamespace) > 0 { + allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeNamespace, fldPath.Child("volumeNamespace"))...) + } + if storageos.SecretRef != nil { + if len(storageos.SecretRef.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), "")) + } + if len(storageos.SecretRef.Namespace) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "namespace"), "")) + } + } + return allErrs +} + +func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if !utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) { + allErrs = append(allErrs, field.Forbidden(fldPath, "CSIPersistentVolume disabled by feature-gate")) + } + + if len(csi.Driver) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("driver"), "")) + } + + if len(csi.VolumeHandle) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeHandle"), "")) + } + + return allErrs +} + +// ValidatePersistentVolumeName checks that a name is appropriate for a +// PersistentVolumeName object. +var ValidatePersistentVolumeName = NameIsDNSSubdomain + +var supportedAccessModes = sets.NewString(string(core.ReadWriteOnce), string(core.ReadOnlyMany), string(core.ReadWriteMany)) + +var supportedReclaimPolicy = sets.NewString(string(core.PersistentVolumeReclaimDelete), string(core.PersistentVolumeReclaimRecycle), string(core.PersistentVolumeReclaimRetain)) + +var supportedVolumeModes = sets.NewString(string(core.PersistentVolumeBlock), string(core.PersistentVolumeFilesystem)) + +func ValidatePersistentVolume(pv *core.PersistentVolume) field.ErrorList { + metaPath := field.NewPath("metadata") + allErrs := ValidateObjectMeta(&pv.ObjectMeta, false, ValidatePersistentVolumeName, metaPath) + + specPath := field.NewPath("spec") + if len(pv.Spec.AccessModes) == 0 { + allErrs = append(allErrs, field.Required(specPath.Child("accessModes"), "")) + } + for _, mode := range pv.Spec.AccessModes { + if !supportedAccessModes.Has(string(mode)) { + allErrs = append(allErrs, field.NotSupported(specPath.Child("accessModes"), mode, supportedAccessModes.List())) + } + } + + if len(pv.Spec.Capacity) == 0 { + allErrs = append(allErrs, field.Required(specPath.Child("capacity"), "")) + } + + if _, ok := pv.Spec.Capacity[core.ResourceStorage]; !ok || len(pv.Spec.Capacity) > 1 { + allErrs = append(allErrs, field.NotSupported(specPath.Child("capacity"), pv.Spec.Capacity, []string{string(core.ResourceStorage)})) + } + capPath := specPath.Child("capacity") + for r, qty := range pv.Spec.Capacity { + allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...) + allErrs = append(allErrs, ValidatePositiveQuantityValue(qty, capPath.Key(string(r)))...) + } + if len(string(pv.Spec.PersistentVolumeReclaimPolicy)) > 0 { + if !supportedReclaimPolicy.Has(string(pv.Spec.PersistentVolumeReclaimPolicy)) { + allErrs = append(allErrs, field.NotSupported(specPath.Child("persistentVolumeReclaimPolicy"), pv.Spec.PersistentVolumeReclaimPolicy, supportedReclaimPolicy.List())) + } + } + + nodeAffinitySpecified, errs := validateStorageNodeAffinityAnnotation(pv.ObjectMeta.Annotations, metaPath.Child("annotations")) + allErrs = append(allErrs, errs...) + + numVolumes := 0 + if pv.Spec.HostPath != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("hostPath"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateHostPathVolumeSource(pv.Spec.HostPath, specPath.Child("hostPath"))...) + } + } + if pv.Spec.GCEPersistentDisk != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(pv.Spec.GCEPersistentDisk, specPath.Child("persistentDisk"))...) + } + } + if pv.Spec.AWSElasticBlockStore != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(pv.Spec.AWSElasticBlockStore, specPath.Child("awsElasticBlockStore"))...) + } + } + if pv.Spec.Glusterfs != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("glusterfs"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateGlusterfsVolumeSource(pv.Spec.Glusterfs, specPath.Child("glusterfs"))...) + } + } + if pv.Spec.Flocker != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("flocker"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateFlockerVolumeSource(pv.Spec.Flocker, specPath.Child("flocker"))...) + } + } + if pv.Spec.NFS != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("nfs"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateNFSVolumeSource(pv.Spec.NFS, specPath.Child("nfs"))...) + } + } + if pv.Spec.RBD != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("rbd"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateRBDPersistentVolumeSource(pv.Spec.RBD, specPath.Child("rbd"))...) + } + } + if pv.Spec.Quobyte != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("quobyte"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateQuobyteVolumeSource(pv.Spec.Quobyte, specPath.Child("quobyte"))...) + } + } + if pv.Spec.CephFS != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("cephFS"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateCephFSPersistentVolumeSource(pv.Spec.CephFS, specPath.Child("cephfs"))...) + } + } + if pv.Spec.ISCSI != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("iscsi"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateISCSIPersistentVolumeSource(pv.Spec.ISCSI, specPath.Child("iscsi"))...) + } + if pv.Spec.ISCSI.InitiatorName != nil && len(pv.ObjectMeta.Name+":"+pv.Spec.ISCSI.TargetPortal) > 64 { + tooLongErr := "Total length of : must be under 64 characters if iscsi.initiatorName is specified." + allErrs = append(allErrs, field.Invalid(metaPath.Child("name"), pv.ObjectMeta.Name, tooLongErr)) + } + } + if pv.Spec.Cinder != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("cinder"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateCinderVolumeSource(pv.Spec.Cinder, specPath.Child("cinder"))...) + } + } + if pv.Spec.FC != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("fc"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateFCVolumeSource(pv.Spec.FC, specPath.Child("fc"))...) + } + } + if pv.Spec.FlexVolume != nil { + numVolumes++ + allErrs = append(allErrs, validateFlexVolumeSource(pv.Spec.FlexVolume, specPath.Child("flexVolume"))...) + } + if pv.Spec.AzureFile != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("azureFile"), "may not specify more than 1 volume type")) + + } else { + numVolumes++ + allErrs = append(allErrs, validateAzureFilePV(pv.Spec.AzureFile, specPath.Child("azureFile"))...) + } + } + + if pv.Spec.VsphereVolume != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("vsphereVolume"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateVsphereVolumeSource(pv.Spec.VsphereVolume, specPath.Child("vsphereVolume"))...) + } + } + if pv.Spec.PhotonPersistentDisk != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("photonPersistentDisk"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validatePhotonPersistentDiskVolumeSource(pv.Spec.PhotonPersistentDisk, specPath.Child("photonPersistentDisk"))...) + } + } + if pv.Spec.PortworxVolume != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("portworxVolume"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validatePortworxVolumeSource(pv.Spec.PortworxVolume, specPath.Child("portworxVolume"))...) + } + } + if pv.Spec.AzureDisk != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("azureDisk"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateAzureDisk(pv.Spec.AzureDisk, specPath.Child("azureDisk"))...) + } + } + if pv.Spec.ScaleIO != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("scaleIO"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateScaleIOPersistentVolumeSource(pv.Spec.ScaleIO, specPath.Child("scaleIO"))...) + } + } + if pv.Spec.Local != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("local"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + if !utilfeature.DefaultFeatureGate.Enabled(features.PersistentLocalVolumes) { + allErrs = append(allErrs, field.Forbidden(specPath.Child("local"), "Local volumes are disabled by feature-gate")) + } + allErrs = append(allErrs, validateLocalVolumeSource(pv.Spec.Local, specPath.Child("local"))...) + + // NodeAffinity is required + if !nodeAffinitySpecified { + allErrs = append(allErrs, field.Required(metaPath.Child("annotations"), "Local volume requires node affinity")) + } + } + } + if pv.Spec.StorageOS != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("storageos"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateStorageOSPersistentVolumeSource(pv.Spec.StorageOS, specPath.Child("storageos"))...) + } + } + + if pv.Spec.CSI != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("csi"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateCSIPersistentVolumeSource(pv.Spec.CSI, specPath.Child("csi"))...) + } + } + + if numVolumes == 0 { + allErrs = append(allErrs, field.Required(specPath, "must specify a volume type")) + } + + // do not allow hostPath mounts of '/' to have a 'recycle' reclaim policy + if pv.Spec.HostPath != nil && path.Clean(pv.Spec.HostPath.Path) == "/" && pv.Spec.PersistentVolumeReclaimPolicy == core.PersistentVolumeReclaimRecycle { + allErrs = append(allErrs, field.Forbidden(specPath.Child("persistentVolumeReclaimPolicy"), "may not be 'recycle' for a hostPath mount of '/'")) + } + + if len(pv.Spec.StorageClassName) > 0 { + for _, msg := range ValidateClassName(pv.Spec.StorageClassName, false) { + allErrs = append(allErrs, field.Invalid(specPath.Child("storageClassName"), pv.Spec.StorageClassName, msg)) + } + } + if pv.Spec.VolumeMode != nil && !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, field.Forbidden(specPath.Child("volumeMode"), "PersistentVolume volumeMode is disabled by feature-gate")) + } else if pv.Spec.VolumeMode != nil && !supportedVolumeModes.Has(string(*pv.Spec.VolumeMode)) { + allErrs = append(allErrs, field.NotSupported(specPath.Child("volumeMode"), *pv.Spec.VolumeMode, supportedVolumeModes.List())) + } + return allErrs +} + +// ValidatePersistentVolumeUpdate tests to see if the update is legal for an end user to make. +// newPv is updated with fields that cannot be changed. +func ValidatePersistentVolumeUpdate(newPv, oldPv *core.PersistentVolume) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = ValidatePersistentVolume(newPv) + + // PersistentVolumeSource should be immutable after creation. + if !apiequality.Semantic.DeepEqual(newPv.Spec.PersistentVolumeSource, oldPv.Spec.PersistentVolumeSource) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "persistentvolumesource"), "is immutable after creation")) + } + + newPv.Status = oldPv.Status + + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, ValidateImmutableField(newPv.Spec.VolumeMode, oldPv.Spec.VolumeMode, field.NewPath("volumeMode"))...) + } + + return allErrs +} + +// ValidatePersistentVolumeStatusUpdate tests to see if the status update is legal for an end user to make. +// newPv is updated with fields that cannot be changed. +func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *core.PersistentVolume) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newPv.ObjectMeta, &oldPv.ObjectMeta, field.NewPath("metadata")) + if len(newPv.ResourceVersion) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) + } + newPv.Spec = oldPv.Spec + return allErrs +} + +// ValidatePersistentVolumeClaim validates a PersistentVolumeClaim +func ValidatePersistentVolumeClaim(pvc *core.PersistentVolumeClaim) field.ErrorList { + allErrs := ValidateObjectMeta(&pvc.ObjectMeta, true, ValidatePersistentVolumeName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidatePersistentVolumeClaimSpec(&pvc.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidatePersistentVolumeClaimSpec validates a PersistentVolumeClaimSpec +func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(spec.AccessModes) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("accessModes"), "at least 1 access mode is required")) + } + if spec.Selector != nil { + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) + } + for _, mode := range spec.AccessModes { + if mode != core.ReadWriteOnce && mode != core.ReadOnlyMany && mode != core.ReadWriteMany { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, supportedAccessModes.List())) + } + } + storageValue, ok := spec.Resources.Requests[core.ResourceStorage] + if !ok { + allErrs = append(allErrs, field.Required(fldPath.Child("resources").Key(string(core.ResourceStorage)), "")) + } else { + allErrs = append(allErrs, ValidateResourceQuantityValue(string(core.ResourceStorage), storageValue, fldPath.Child("resources").Key(string(core.ResourceStorage)))...) + allErrs = append(allErrs, ValidatePositiveQuantityValue(storageValue, fldPath.Child("resources").Key(string(core.ResourceStorage)))...) + } + + if spec.StorageClassName != nil && len(*spec.StorageClassName) > 0 { + for _, msg := range ValidateClassName(*spec.StorageClassName, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("storageClassName"), *spec.StorageClassName, msg)) + } + } + if spec.VolumeMode != nil && !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("volumeMode"), "PersistentVolumeClaim volumeMode is disabled by feature-gate")) + } else if spec.VolumeMode != nil && !supportedVolumeModes.Has(string(*spec.VolumeMode)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumeMode"), *spec.VolumeMode, supportedVolumeModes.List())) + } + return allErrs +} + +// ValidatePersistentVolumeClaimUpdate validates an update to a PersistentVolumeClaim +func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeClaim) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidatePersistentVolumeClaim(newPvc)...) + // PVController needs to update PVC.Spec w/ VolumeName. + // Claims are immutable in order to enforce quota, range limits, etc. without gaming the system. + if len(oldPvc.Spec.VolumeName) == 0 { + // volumeName changes are allowed once. + // Reset back to empty string after equality check + oldPvc.Spec.VolumeName = newPvc.Spec.VolumeName + defer func() { oldPvc.Spec.VolumeName = "" }() + } + + if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) { + newPVCSpecCopy := newPvc.Spec.DeepCopy() + + // lets make sure storage values are same. + if newPvc.Status.Phase == core.ClaimBound && newPVCSpecCopy.Resources.Requests != nil { + newPVCSpecCopy.Resources.Requests["storage"] = oldPvc.Spec.Resources.Requests["storage"] + } + + oldSize := oldPvc.Spec.Resources.Requests["storage"] + newSize := newPvc.Spec.Resources.Requests["storage"] + + if !apiequality.Semantic.DeepEqual(*newPVCSpecCopy, oldPvc.Spec) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "is immutable after creation except resources.requests for bound claims")) + } + if newSize.Cmp(oldSize) < 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "resources", "requests", "storage"), "field can not be less than previous value")) + } + + } else { + // changes to Spec are not allowed, but updates to label/and some annotations are OK. + // no-op updates pass validation. + if !apiequality.Semantic.DeepEqual(newPvc.Spec, oldPvc.Spec) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "field is immutable after creation")) + } + } + + // storageclass annotation should be immutable after creation + // TODO: remove Beta when no longer needed + allErrs = append(allErrs, ValidateImmutableAnnotation(newPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], oldPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], v1.BetaStorageClassAnnotation, field.NewPath("metadata"))...) + + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, ValidateImmutableField(newPvc.Spec.VolumeMode, oldPvc.Spec.VolumeMode, field.NewPath("volumeMode"))...) + } + + newPvc.Status = oldPvc.Status + return allErrs +} + +// ValidatePersistentVolumeClaimStatusUpdate validates an update to status of a PersistentVolumeClaim +func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *core.PersistentVolumeClaim) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata")) + if len(newPvc.ResourceVersion) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) + } + if len(newPvc.Spec.AccessModes) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("Spec", "accessModes"), "")) + } + if !utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) && len(newPvc.Status.Conditions) > 0 { + conditionPath := field.NewPath("status", "conditions") + allErrs = append(allErrs, field.Forbidden(conditionPath, "invalid field")) + } + capPath := field.NewPath("status", "capacity") + for r, qty := range newPvc.Status.Capacity { + allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...) + } + newPvc.Spec = oldPvc.Spec + return allErrs +} + +var supportedPortProtocols = sets.NewString(string(core.ProtocolTCP), string(core.ProtocolUDP)) + +func validateContainerPorts(ports []core.ContainerPort, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + allNames := sets.String{} + for i, port := range ports { + idxPath := fldPath.Index(i) + if len(port.Name) > 0 { + if msgs := validation.IsValidPortName(port.Name); len(msgs) != 0 { + for i = range msgs { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), port.Name, msgs[i])) + } + } else if allNames.Has(port.Name) { + allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), port.Name)) + } else { + allNames.Insert(port.Name) + } + } + if port.ContainerPort == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("containerPort"), "")) + } else { + for _, msg := range validation.IsValidPortNum(int(port.ContainerPort)) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, msg)) + } + } + if port.HostPort != 0 { + for _, msg := range validation.IsValidPortNum(int(port.HostPort)) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("hostPort"), port.HostPort, msg)) + } + } + if len(port.Protocol) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("protocol"), "")) + } else if !supportedPortProtocols.Has(string(port.Protocol)) { + allErrs = append(allErrs, field.NotSupported(idxPath.Child("protocol"), port.Protocol, supportedPortProtocols.List())) + } + } + return allErrs +} + +// ValidateEnv validates env vars +func ValidateEnv(vars []core.EnvVar, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + for i, ev := range vars { + idxPath := fldPath.Index(i) + if len(ev.Name) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) + } else { + for _, msg := range validation.IsEnvVarName(ev.Name) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), ev.Name, msg)) + } + } + allErrs = append(allErrs, validateEnvVarValueFrom(ev, idxPath.Child("valueFrom"))...) + } + return allErrs +} + +var validEnvDownwardAPIFieldPathExpressions = sets.NewString( + "metadata.name", + "metadata.namespace", + "metadata.uid", + "spec.nodeName", + "spec.serviceAccountName", + "status.hostIP", + "status.podIP") +var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "limits.ephemeral-storage", "requests.cpu", "requests.memory", "requests.ephemeral-storage") + +func validateEnvVarValueFrom(ev core.EnvVar, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if ev.ValueFrom == nil { + return allErrs + } + + numSources := 0 + + if ev.ValueFrom.FieldRef != nil { + numSources++ + allErrs = append(allErrs, validateObjectFieldSelector(ev.ValueFrom.FieldRef, &validEnvDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...) + } + if ev.ValueFrom.ResourceFieldRef != nil { + numSources++ + allErrs = append(allErrs, validateContainerResourceFieldSelector(ev.ValueFrom.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), false)...) + } + if ev.ValueFrom.ConfigMapKeyRef != nil { + numSources++ + allErrs = append(allErrs, validateConfigMapKeySelector(ev.ValueFrom.ConfigMapKeyRef, fldPath.Child("configMapKeyRef"))...) + } + if ev.ValueFrom.SecretKeyRef != nil { + numSources++ + allErrs = append(allErrs, validateSecretKeySelector(ev.ValueFrom.SecretKeyRef, fldPath.Child("secretKeyRef"))...) + } + + if numSources == 0 { + allErrs = append(allErrs, field.Invalid(fldPath, "", "must specify one of: `fieldRef`, `resourceFieldRef`, `configMapKeyRef` or `secretKeyRef`")) + } else if len(ev.Value) != 0 { + if numSources != 0 { + allErrs = append(allErrs, field.Invalid(fldPath, "", "may not be specified when `value` is not empty")) + } + } else if numSources > 1 { + allErrs = append(allErrs, field.Invalid(fldPath, "", "may not have more than one field specified at a time")) + } + + return allErrs +} + +func validateObjectFieldSelector(fs *core.ObjectFieldSelector, expressions *sets.String, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(fs.APIVersion) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("apiVersion"), "")) + return allErrs + } + if len(fs.FieldPath) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("fieldPath"), "")) + return allErrs + } + + internalFieldPath, _, err := podshelper.ConvertDownwardAPIFieldLabel(fs.APIVersion, fs.FieldPath, "") + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldPath"), fs.FieldPath, fmt.Sprintf("error converting fieldPath: %v", err))) + return allErrs + } + + if path, subscript, ok := fieldpath.SplitMaybeSubscriptedPath(internalFieldPath); ok { + switch path { + case "metadata.annotations": + for _, msg := range validation.IsQualifiedName(strings.ToLower(subscript)) { + allErrs = append(allErrs, field.Invalid(fldPath, subscript, msg)) + } + case "metadata.labels": + for _, msg := range validation.IsQualifiedName(subscript) { + allErrs = append(allErrs, field.Invalid(fldPath, subscript, msg)) + } + default: + allErrs = append(allErrs, field.Invalid(fldPath, path, "does not support subscript")) + } + } else if !expressions.Has(path) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("fieldPath"), path, expressions.List())) + return allErrs + } + + return allErrs +} + +func fsResourceIsEphemeralStorage(resource string) bool { + if resource == "limits.ephemeral-storage" || resource == "requests.ephemeral-storage" { + return true + } + return false +} + +func validateContainerResourceFieldSelector(fs *core.ResourceFieldSelector, expressions *sets.String, fldPath *field.Path, volume bool) field.ErrorList { + allErrs := field.ErrorList{} + + if volume && len(fs.ContainerName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("containerName"), "")) + } else if len(fs.Resource) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("resource"), "")) + } else if !expressions.Has(fs.Resource) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("resource"), fs.Resource, expressions.List())) + } else if fsResourceIsEphemeralStorage(fs.Resource) && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + allErrs = append(allErrs, field.Forbidden(fldPath, "Containers' ephemeral storage requests/limits disabled by feature-gate for Downward API")) + } + allErrs = append(allErrs, validateContainerResourceDivisor(fs.Resource, fs.Divisor, fldPath)...) + return allErrs +} + +func ValidateEnvFrom(vars []core.EnvFromSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, ev := range vars { + idxPath := fldPath.Index(i) + if len(ev.Prefix) > 0 { + for _, msg := range validation.IsEnvVarName(ev.Prefix) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("prefix"), ev.Prefix, msg)) + } + } + + numSources := 0 + if ev.ConfigMapRef != nil { + numSources++ + allErrs = append(allErrs, validateConfigMapEnvSource(ev.ConfigMapRef, idxPath.Child("configMapRef"))...) + } + if ev.SecretRef != nil { + numSources++ + allErrs = append(allErrs, validateSecretEnvSource(ev.SecretRef, idxPath.Child("secretRef"))...) + } + + if numSources == 0 { + allErrs = append(allErrs, field.Invalid(fldPath, "", "must specify one of: `configMapRef` or `secretRef`")) + } else if numSources > 1 { + allErrs = append(allErrs, field.Invalid(fldPath, "", "may not have more than one field specified at a time")) + } + } + return allErrs +} + +func validateConfigMapEnvSource(configMapSource *core.ConfigMapEnvSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(configMapSource.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } else { + for _, msg := range ValidateConfigMapName(configMapSource.Name, true) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), configMapSource.Name, msg)) + } + } + return allErrs +} + +func validateSecretEnvSource(secretSource *core.SecretEnvSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(secretSource.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } else { + for _, msg := range ValidateSecretName(secretSource.Name, true) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), secretSource.Name, msg)) + } + } + return allErrs +} + +var validContainerResourceDivisorForCPU = sets.NewString("1m", "1") +var validContainerResourceDivisorForMemory = sets.NewString("1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei") +var validContainerResourceDivisorForEphemeralStorage = sets.NewString("1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei") + +func validateContainerResourceDivisor(rName string, divisor resource.Quantity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + unsetDivisor := resource.Quantity{} + if unsetDivisor.Cmp(divisor) == 0 { + return allErrs + } + switch rName { + case "limits.cpu", "requests.cpu": + if !validContainerResourceDivisorForCPU.Has(divisor.String()) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1m and 1 are supported with the cpu resource")) + } + case "limits.memory", "requests.memory": + if !validContainerResourceDivisorForMemory.Has(divisor.String()) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the memory resource")) + } + case "limits.ephemeral-storage", "requests.ephemeral-storage": + if !validContainerResourceDivisorForEphemeralStorage.Has(divisor.String()) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the local ephemeral storage resource")) + } + } + return allErrs +} + +func validateConfigMapKeySelector(s *core.ConfigMapKeySelector, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + nameFn := ValidateNameFunc(ValidateSecretName) + for _, msg := range nameFn(s.Name, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), s.Name, msg)) + } + if len(s.Key) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("key"), "")) + } else { + for _, msg := range validation.IsConfigMapKey(s.Key) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg)) + } + } + + return allErrs +} + +func validateSecretKeySelector(s *core.SecretKeySelector, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + nameFn := ValidateNameFunc(ValidateSecretName) + for _, msg := range nameFn(s.Name, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), s.Name, msg)) + } + if len(s.Key) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("key"), "")) + } else { + for _, msg := range validation.IsConfigMapKey(s.Key) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg)) + } + } + + return allErrs +} + +func GetVolumeMountMap(mounts []core.VolumeMount) map[string]string { + volmounts := make(map[string]string) + + for _, mnt := range mounts { + volmounts[mnt.Name] = mnt.MountPath + } + + return volmounts +} + +func GetVolumeDeviceMap(devices []core.VolumeDevice) map[string]string { + voldevices := make(map[string]string) + + for _, dev := range devices { + voldevices[dev.Name] = dev.DevicePath + } + + return voldevices +} + +func ValidateVolumeMounts(mounts []core.VolumeMount, voldevices map[string]string, volumes map[string]core.VolumeSource, container *core.Container, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + mountpoints := sets.NewString() + + for i, mnt := range mounts { + idxPath := fldPath.Index(i) + if len(mnt.Name) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) + } + if !IsMatchedVolume(mnt.Name, volumes) { + allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), mnt.Name)) + } + if len(mnt.MountPath) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("mountPath"), "")) + } + if mountpoints.Has(mnt.MountPath) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be unique")) + } + mountpoints.Insert(mnt.MountPath) + + // check for overlap with VolumeDevice + if mountNameAlreadyExists(mnt.Name, voldevices) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), mnt.Name, "must not already exist in volumeDevices")) + } + if mountPathAlreadyExists(mnt.MountPath, voldevices) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must not already exist as a path in volumeDevices")) + } + + if len(mnt.SubPath) > 0 { + allErrs = append(allErrs, validateLocalDescendingPath(mnt.SubPath, fldPath.Child("subPath"))...) + } + + if mnt.MountPropagation != nil { + allErrs = append(allErrs, validateMountPropagation(mnt.MountPropagation, container, fldPath.Child("mountPropagation"))...) + } + } + return allErrs +} + +func ValidateVolumeDevices(devices []core.VolumeDevice, volmounts map[string]string, volumes map[string]core.VolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + devicepath := sets.NewString() + devicename := sets.NewString() + + if devices != nil && !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("volumeDevices"), "Container volumeDevices is disabled by feature-gate")) + return allErrs + } + if devices != nil { + for i, dev := range devices { + idxPath := fldPath.Index(i) + devName := dev.Name + devPath := dev.DevicePath + didMatch, isPVC := isMatchedDevice(devName, volumes) + if len(devName) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) + } + if devicename.Has(devName) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must be unique")) + } + // Must be PersistentVolumeClaim volume source + if didMatch && !isPVC { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "can only use volume source type of PersistentVolumeClaim for block mode")) + } + if !didMatch { + allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), devName)) + } + if len(devPath) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("devicePath"), "")) + } + if devicepath.Has(devPath) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "must be unique")) + } + if len(devPath) > 0 && len(validatePathNoBacksteps(devPath, fldPath.Child("devicePath"))) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "can not contain backsteps ('..')")) + } else { + devicepath.Insert(devPath) + } + // check for overlap with VolumeMount + if deviceNameAlreadyExists(devName, volmounts) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must not already exist in volumeMounts")) + } + if devicePathAlreadyExists(devPath, volmounts) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "must not already exist as a path in volumeMounts")) + } + if len(devName) > 0 { + devicename.Insert(devName) + } + } + } + return allErrs +} + +func validateProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if probe == nil { + return allErrs + } + allErrs = append(allErrs, validateHandler(&probe.Handler, fldPath)...) + + allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.InitialDelaySeconds), fldPath.Child("initialDelaySeconds"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.TimeoutSeconds), fldPath.Child("timeoutSeconds"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.PeriodSeconds), fldPath.Child("periodSeconds"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.SuccessThreshold), fldPath.Child("successThreshold"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.FailureThreshold), fldPath.Child("failureThreshold"))...) + return allErrs +} + +func validateClientIPAffinityConfig(config *core.SessionAffinityConfig, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if config == nil { + allErrs = append(allErrs, field.Required(fldPath, fmt.Sprintf("when session affinity type is %s", core.ServiceAffinityClientIP))) + return allErrs + } + if config.ClientIP == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("clientIP"), fmt.Sprintf("when session affinity type is %s", core.ServiceAffinityClientIP))) + return allErrs + } + if config.ClientIP.TimeoutSeconds == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("clientIP").Child("timeoutSeconds"), fmt.Sprintf("when session affinity type is %s", core.ServiceAffinityClientIP))) + return allErrs + } + allErrs = append(allErrs, validateAffinityTimeout(config.ClientIP.TimeoutSeconds, fldPath.Child("clientIP").Child("timeoutSeconds"))...) + + return allErrs +} + +func validateAffinityTimeout(timeout *int32, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if *timeout <= 0 || *timeout > core.MaxClientIPServiceAffinitySeconds { + allErrs = append(allErrs, field.Invalid(fldPath, timeout, fmt.Sprintf("must be greater than 0 and less than %d", core.MaxClientIPServiceAffinitySeconds))) + } + return allErrs +} + +// AccumulateUniqueHostPorts extracts each HostPort of each Container, +// accumulating the results and returning an error if any ports conflict. +func AccumulateUniqueHostPorts(containers []core.Container, accumulator *sets.String, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + for ci, ctr := range containers { + idxPath := fldPath.Index(ci) + portsPath := idxPath.Child("ports") + for pi := range ctr.Ports { + idxPath := portsPath.Index(pi) + port := ctr.Ports[pi].HostPort + if port == 0 { + continue + } + str := fmt.Sprintf("%s/%s/%d", ctr.Ports[pi].Protocol, ctr.Ports[pi].HostIP, port) + if accumulator.Has(str) { + allErrs = append(allErrs, field.Duplicate(idxPath.Child("hostPort"), str)) + } else { + accumulator.Insert(str) + } + } + } + return allErrs +} + +// checkHostPortConflicts checks for colliding Port.HostPort values across +// a slice of containers. +func checkHostPortConflicts(containers []core.Container, fldPath *field.Path) field.ErrorList { + allPorts := sets.String{} + return AccumulateUniqueHostPorts(containers, &allPorts, fldPath) +} + +func validateExecAction(exec *core.ExecAction, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + if len(exec.Command) == 0 { + allErrors = append(allErrors, field.Required(fldPath.Child("command"), "")) + } + return allErrors +} + +var supportedHTTPSchemes = sets.NewString(string(core.URISchemeHTTP), string(core.URISchemeHTTPS)) + +func validateHTTPGetAction(http *core.HTTPGetAction, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + if len(http.Path) == 0 { + allErrors = append(allErrors, field.Required(fldPath.Child("path"), "")) + } + allErrors = append(allErrors, ValidatePortNumOrName(http.Port, fldPath.Child("port"))...) + if !supportedHTTPSchemes.Has(string(http.Scheme)) { + allErrors = append(allErrors, field.NotSupported(fldPath.Child("scheme"), http.Scheme, supportedHTTPSchemes.List())) + } + for _, header := range http.HTTPHeaders { + for _, msg := range validation.IsHTTPHeaderName(header.Name) { + allErrors = append(allErrors, field.Invalid(fldPath.Child("httpHeaders"), header.Name, msg)) + } + } + return allErrors +} + +func ValidatePortNumOrName(port intstr.IntOrString, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if port.Type == intstr.Int { + for _, msg := range validation.IsValidPortNum(port.IntValue()) { + allErrs = append(allErrs, field.Invalid(fldPath, port.IntValue(), msg)) + } + } else if port.Type == intstr.String { + for _, msg := range validation.IsValidPortName(port.StrVal) { + allErrs = append(allErrs, field.Invalid(fldPath, port.StrVal, msg)) + } + } else { + allErrs = append(allErrs, field.InternalError(fldPath, fmt.Errorf("unknown type: %v", port.Type))) + } + return allErrs +} + +func validateTCPSocketAction(tcp *core.TCPSocketAction, fldPath *field.Path) field.ErrorList { + return ValidatePortNumOrName(tcp.Port, fldPath.Child("port")) +} + +func validateHandler(handler *core.Handler, fldPath *field.Path) field.ErrorList { + numHandlers := 0 + allErrors := field.ErrorList{} + if handler.Exec != nil { + if numHandlers > 0 { + allErrors = append(allErrors, field.Forbidden(fldPath.Child("exec"), "may not specify more than 1 handler type")) + } else { + numHandlers++ + allErrors = append(allErrors, validateExecAction(handler.Exec, fldPath.Child("exec"))...) + } + } + if handler.HTTPGet != nil { + if numHandlers > 0 { + allErrors = append(allErrors, field.Forbidden(fldPath.Child("httpGet"), "may not specify more than 1 handler type")) + } else { + numHandlers++ + allErrors = append(allErrors, validateHTTPGetAction(handler.HTTPGet, fldPath.Child("httpGet"))...) + } + } + if handler.TCPSocket != nil { + if numHandlers > 0 { + allErrors = append(allErrors, field.Forbidden(fldPath.Child("tcpSocket"), "may not specify more than 1 handler type")) + } else { + numHandlers++ + allErrors = append(allErrors, validateTCPSocketAction(handler.TCPSocket, fldPath.Child("tcpSocket"))...) + } + } + if numHandlers == 0 { + allErrors = append(allErrors, field.Required(fldPath, "must specify a handler type")) + } + return allErrors +} + +func validateLifecycle(lifecycle *core.Lifecycle, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if lifecycle.PostStart != nil { + allErrs = append(allErrs, validateHandler(lifecycle.PostStart, fldPath.Child("postStart"))...) + } + if lifecycle.PreStop != nil { + allErrs = append(allErrs, validateHandler(lifecycle.PreStop, fldPath.Child("preStop"))...) + } + return allErrs +} + +var supportedPullPolicies = sets.NewString(string(core.PullAlways), string(core.PullIfNotPresent), string(core.PullNever)) + +func validatePullPolicy(policy core.PullPolicy, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + + switch policy { + case core.PullAlways, core.PullIfNotPresent, core.PullNever: + break + case "": + allErrors = append(allErrors, field.Required(fldPath, "")) + default: + allErrors = append(allErrors, field.NotSupported(fldPath, policy, supportedPullPolicies.List())) + } + + return allErrors +} + +func validateInitContainers(containers, otherContainers []core.Container, deviceVolumes map[string]core.VolumeSource, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + if len(containers) > 0 { + allErrs = append(allErrs, validateContainers(containers, deviceVolumes, fldPath)...) + } + + allNames := sets.String{} + for _, ctr := range otherContainers { + allNames.Insert(ctr.Name) + } + for i, ctr := range containers { + idxPath := fldPath.Index(i) + if allNames.Has(ctr.Name) { + allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), ctr.Name)) + } + if len(ctr.Name) > 0 { + allNames.Insert(ctr.Name) + } + if ctr.Lifecycle != nil { + allErrs = append(allErrs, field.Invalid(idxPath.Child("lifecycle"), ctr.Lifecycle, "must not be set for init containers")) + } + if ctr.LivenessProbe != nil { + allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe"), ctr.LivenessProbe, "must not be set for init containers")) + } + if ctr.ReadinessProbe != nil { + allErrs = append(allErrs, field.Invalid(idxPath.Child("readinessProbe"), ctr.ReadinessProbe, "must not be set for init containers")) + } + } + return allErrs +} + +func validateContainers(containers []core.Container, volumes map[string]core.VolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(containers) == 0 { + return append(allErrs, field.Required(fldPath, "")) + } + + allNames := sets.String{} + for i, ctr := range containers { + idxPath := fldPath.Index(i) + namePath := idxPath.Child("name") + volMounts := GetVolumeMountMap(ctr.VolumeMounts) + volDevices := GetVolumeDeviceMap(ctr.VolumeDevices) + + if len(ctr.Name) == 0 { + allErrs = append(allErrs, field.Required(namePath, "")) + } else { + allErrs = append(allErrs, ValidateDNS1123Label(ctr.Name, namePath)...) + } + if allNames.Has(ctr.Name) { + allErrs = append(allErrs, field.Duplicate(namePath, ctr.Name)) + } else { + allNames.Insert(ctr.Name) + } + // TODO: do not validate leading and trailing whitespace to preserve backward compatibility. + // for example: https://github.com/openshift/origin/issues/14659 image = " " is special token in pod template + // others may have done similar + if len(ctr.Image) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("image"), "")) + } + if ctr.Lifecycle != nil { + allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, idxPath.Child("lifecycle"))...) + } + allErrs = append(allErrs, validateProbe(ctr.LivenessProbe, idxPath.Child("livenessProbe"))...) + // Liveness-specific validation + if ctr.LivenessProbe != nil && ctr.LivenessProbe.SuccessThreshold != 1 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe", "successThreshold"), ctr.LivenessProbe.SuccessThreshold, "must be 1")) + } + + switch ctr.TerminationMessagePolicy { + case core.TerminationMessageReadFile, core.TerminationMessageFallbackToLogsOnError: + case "": + allErrs = append(allErrs, field.Required(idxPath.Child("terminationMessagePolicy"), "must be 'File' or 'FallbackToLogsOnError'")) + default: + allErrs = append(allErrs, field.Invalid(idxPath.Child("terminationMessagePolicy"), ctr.TerminationMessagePolicy, "must be 'File' or 'FallbackToLogsOnError'")) + } + + allErrs = append(allErrs, validateProbe(ctr.ReadinessProbe, idxPath.Child("readinessProbe"))...) + allErrs = append(allErrs, validateContainerPorts(ctr.Ports, idxPath.Child("ports"))...) + allErrs = append(allErrs, ValidateEnv(ctr.Env, idxPath.Child("env"))...) + allErrs = append(allErrs, ValidateEnvFrom(ctr.EnvFrom, idxPath.Child("envFrom"))...) + allErrs = append(allErrs, ValidateVolumeMounts(ctr.VolumeMounts, volDevices, volumes, &ctr, idxPath.Child("volumeMounts"))...) + allErrs = append(allErrs, ValidateVolumeDevices(ctr.VolumeDevices, volMounts, volumes, idxPath.Child("volumeDevices"))...) + allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, idxPath.Child("imagePullPolicy"))...) + allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, idxPath.Child("resources"))...) + allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, idxPath.Child("securityContext"))...) + } + // Check for colliding ports across all containers. + allErrs = append(allErrs, checkHostPortConflicts(containers, fldPath)...) + + return allErrs +} + +func validateRestartPolicy(restartPolicy *core.RestartPolicy, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + switch *restartPolicy { + case core.RestartPolicyAlways, core.RestartPolicyOnFailure, core.RestartPolicyNever: + break + case "": + allErrors = append(allErrors, field.Required(fldPath, "")) + default: + validValues := []string{string(core.RestartPolicyAlways), string(core.RestartPolicyOnFailure), string(core.RestartPolicyNever)} + allErrors = append(allErrors, field.NotSupported(fldPath, *restartPolicy, validValues)) + } + + return allErrors +} + +func validateDNSPolicy(dnsPolicy *core.DNSPolicy, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + switch *dnsPolicy { + case core.DNSClusterFirstWithHostNet, core.DNSClusterFirst, core.DNSDefault: + case core.DNSNone: + if !utilfeature.DefaultFeatureGate.Enabled(features.CustomPodDNS) { + allErrors = append(allErrors, field.Invalid(fldPath, dnsPolicy, "DNSPolicy: can not use 'None', custom pod DNS is disabled by feature gate")) + } + case "": + allErrors = append(allErrors, field.Required(fldPath, "")) + default: + validValues := []string{string(core.DNSClusterFirstWithHostNet), string(core.DNSClusterFirst), string(core.DNSDefault)} + if utilfeature.DefaultFeatureGate.Enabled(features.CustomPodDNS) { + validValues = append(validValues, string(core.DNSNone)) + } + allErrors = append(allErrors, field.NotSupported(fldPath, dnsPolicy, validValues)) + } + return allErrors +} + +const ( + // Limits on various DNS parameters. These are derived from + // restrictions in Linux libc name resolution handling. + // Max number of DNS name servers. + MaxDNSNameservers = 3 + // Max number of domains in search path. + MaxDNSSearchPaths = 6 + // Max number of characters in search path. + MaxDNSSearchListChars = 256 +) + +func validatePodDNSConfig(dnsConfig *core.PodDNSConfig, dnsPolicy *core.DNSPolicy, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + // Validate DNSNone case. Must provide at least one DNS name server. + if utilfeature.DefaultFeatureGate.Enabled(features.CustomPodDNS) && dnsPolicy != nil && *dnsPolicy == core.DNSNone { + if dnsConfig == nil { + return append(allErrs, field.Required(fldPath, fmt.Sprintf("must provide `dnsConfig` when `dnsPolicy` is %s", core.DNSNone))) + } + if len(dnsConfig.Nameservers) == 0 { + return append(allErrs, field.Required(fldPath.Child("nameservers"), fmt.Sprintf("must provide at least one DNS nameserver when `dnsPolicy` is %s", core.DNSNone))) + } + } + + if dnsConfig != nil { + if !utilfeature.DefaultFeatureGate.Enabled(features.CustomPodDNS) { + return append(allErrs, field.Forbidden(fldPath, "DNSConfig: custom pod DNS is disabled by feature gate")) + } + + // Validate nameservers. + if len(dnsConfig.Nameservers) > MaxDNSNameservers { + allErrs = append(allErrs, field.Invalid(fldPath.Child("nameservers"), dnsConfig.Nameservers, fmt.Sprintf("must not have more than %v nameservers", MaxDNSNameservers))) + } + for i, ns := range dnsConfig.Nameservers { + if ip := net.ParseIP(ns); ip == nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("nameservers").Index(i), ns, "must be valid IP address")) + } + } + // Validate searches. + if len(dnsConfig.Searches) > MaxDNSSearchPaths { + allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, fmt.Sprintf("must not have more than %v search paths", MaxDNSSearchPaths))) + } + // Include the space between search paths. + if len(strings.Join(dnsConfig.Searches, " ")) > MaxDNSSearchListChars { + allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, "must not have more than 256 characters (including spaces) in the search list")) + } + for i, search := range dnsConfig.Searches { + allErrs = append(allErrs, ValidateDNS1123Subdomain(search, fldPath.Child("searches").Index(i))...) + } + // Validate options. + for i, option := range dnsConfig.Options { + if len(option.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("options").Index(i), "must not be empty")) + } + } + } + return allErrs +} + +func validateHostNetwork(hostNetwork bool, containers []core.Container, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + if hostNetwork { + for i, container := range containers { + portsPath := fldPath.Index(i).Child("ports") + for i, port := range container.Ports { + idxPath := portsPath.Index(i) + if port.HostPort != port.ContainerPort { + allErrors = append(allErrors, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, "must match `hostPort` when `hostNetwork` is true")) + } + } + } + } + return allErrors +} + +// validateImagePullSecrets checks to make sure the pull secrets are well +// formed. Right now, we only expect name to be set (it's the only field). If +// this ever changes and someone decides to set those fields, we'd like to +// know. +func validateImagePullSecrets(imagePullSecrets []core.LocalObjectReference, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + for i, currPullSecret := range imagePullSecrets { + idxPath := fldPath.Index(i) + strippedRef := core.LocalObjectReference{Name: currPullSecret.Name} + if !reflect.DeepEqual(strippedRef, currPullSecret) { + allErrors = append(allErrors, field.Invalid(idxPath, currPullSecret, "only name may be set")) + } + } + return allErrors +} + +// validateAffinity checks if given affinities are valid +func validateAffinity(affinity *core.Affinity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if affinity != nil { + if na := affinity.NodeAffinity; na != nil { + // TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented. + // if na.RequiredDuringSchedulingRequiredDuringExecution != nil { + // allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) + // } + + if na.RequiredDuringSchedulingIgnoredDuringExecution != nil { + allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + } + + if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) + } + } + if affinity.PodAffinity != nil { + allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, fldPath.Child("podAffinity"))...) + } + if affinity.PodAntiAffinity != nil { + allErrs = append(allErrs, validatePodAntiAffinity(affinity.PodAntiAffinity, fldPath.Child("podAntiAffinity"))...) + } + } + + return allErrs +} + +func validateTaintEffect(effect *core.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList { + if !allowEmpty && len(*effect) == 0 { + return field.ErrorList{field.Required(fldPath, "")} + } + + allErrors := field.ErrorList{} + switch *effect { + // TODO: Replace next line with subsequent commented-out line when implement TaintEffectNoScheduleNoAdmit. + case core.TaintEffectNoSchedule, core.TaintEffectPreferNoSchedule, core.TaintEffectNoExecute: + // case core.TaintEffectNoSchedule, core.TaintEffectPreferNoSchedule, core.TaintEffectNoScheduleNoAdmit, core.TaintEffectNoExecute: + default: + validValues := []string{ + string(core.TaintEffectNoSchedule), + string(core.TaintEffectPreferNoSchedule), + string(core.TaintEffectNoExecute), + // TODO: Uncomment this block when implement TaintEffectNoScheduleNoAdmit. + // string(core.TaintEffectNoScheduleNoAdmit), + } + allErrors = append(allErrors, field.NotSupported(fldPath, effect, validValues)) + } + return allErrors +} + +// validateOnlyAddedTolerations validates updated pod tolerations. +func validateOnlyAddedTolerations(newTolerations []core.Toleration, oldTolerations []core.Toleration, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, old := range oldTolerations { + found := false + old.TolerationSeconds = nil + for _, new := range newTolerations { + new.TolerationSeconds = nil + if reflect.DeepEqual(old, new) { + found = true + break + } + } + if !found { + allErrs = append(allErrs, field.Forbidden(fldPath, "existing toleration can not be modified except its tolerationSeconds")) + return allErrs + } + } + + allErrs = append(allErrs, ValidateTolerations(newTolerations, fldPath)...) + return allErrs +} + +func ValidateHostAliases(hostAliases []core.HostAlias, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, hostAlias := range hostAliases { + if ip := net.ParseIP(hostAlias.IP); ip == nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), hostAlias.IP, "must be valid IP address")) + } + for _, hostname := range hostAlias.Hostnames { + allErrs = append(allErrs, ValidateDNS1123Subdomain(hostname, fldPath.Child("hostnames"))...) + } + } + return allErrs +} + +// ValidateTolerations tests if given tolerations have valid data. +func ValidateTolerations(tolerations []core.Toleration, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + for i, toleration := range tolerations { + idxPath := fldPath.Index(i) + // validate the toleration key + if len(toleration.Key) > 0 { + allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(toleration.Key, idxPath.Child("key"))...) + } + + // empty toleration key with Exists operator and empty value means match all taints + if len(toleration.Key) == 0 && toleration.Operator != core.TolerationOpExists { + allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Operator, + "operator must be Exists when `key` is empty, which means \"match all values and all keys\"")) + } + + if toleration.TolerationSeconds != nil && toleration.Effect != core.TaintEffectNoExecute { + allErrors = append(allErrors, field.Invalid(idxPath.Child("effect"), toleration.Effect, + "effect must be 'NoExecute' when `tolerationSeconds` is set")) + } + + // validate toleration operator and value + switch toleration.Operator { + // empty operator means Equal + case core.TolerationOpEqual, "": + if errs := validation.IsValidLabelValue(toleration.Value); len(errs) != 0 { + allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Value, strings.Join(errs, ";"))) + } + case core.TolerationOpExists: + if len(toleration.Value) > 0 { + allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration, "value must be empty when `operator` is 'Exists'")) + } + default: + validValues := []string{string(core.TolerationOpEqual), string(core.TolerationOpExists)} + allErrors = append(allErrors, field.NotSupported(idxPath.Child("operator"), toleration.Operator, validValues)) + } + + // validate toleration effect, empty toleration effect means match all taint effects + if len(toleration.Effect) > 0 { + allErrors = append(allErrors, validateTaintEffect(&toleration.Effect, true, idxPath.Child("effect"))...) + } + } + return allErrors +} + +func toResourceNames(resources core.ResourceList) []core.ResourceName { + result := []core.ResourceName{} + for resourceName := range resources { + result = append(result, resourceName) + } + return result +} + +func toSet(resourceNames []core.ResourceName) sets.String { + result := sets.NewString() + for _, resourceName := range resourceNames { + result.Insert(string(resourceName)) + } + return result +} + +func toContainerResourcesSet(ctr *core.Container) sets.String { + resourceNames := toResourceNames(ctr.Resources.Requests) + resourceNames = append(resourceNames, toResourceNames(ctr.Resources.Limits)...) + return toSet(resourceNames) +} + +// validateContainersOnlyForPod does additional validation for containers on a pod versus a pod template +// it only does additive validation of fields not covered in validateContainers +func validateContainersOnlyForPod(containers []core.Container, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, ctr := range containers { + idxPath := fldPath.Index(i) + if len(ctr.Image) != len(strings.TrimSpace(ctr.Image)) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("image"), ctr.Image, "must not have leading or trailing whitespace")) + } + } + return allErrs +} + +// ValidatePod tests if required fields in the pod are set. +func ValidatePod(pod *core.Pod) field.ErrorList { + fldPath := field.NewPath("metadata") + allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, fldPath) + allErrs = append(allErrs, ValidatePodSpecificAnnotations(pod.ObjectMeta.Annotations, &pod.Spec, fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidatePodSpec(&pod.Spec, field.NewPath("spec"))...) + + // we do additional validation only pertinent for pods and not pod templates + // this was done to preserve backwards compatibility + specPath := field.NewPath("spec") + + allErrs = append(allErrs, validateContainersOnlyForPod(pod.Spec.Containers, specPath.Child("containers"))...) + allErrs = append(allErrs, validateContainersOnlyForPod(pod.Spec.InitContainers, specPath.Child("initContainers"))...) + + if utilfeature.DefaultFeatureGate.Enabled(features.HugePages) { + hugePageResources := sets.NewString() + for i := range pod.Spec.Containers { + resourceSet := toContainerResourcesSet(&pod.Spec.Containers[i]) + for resourceStr := range resourceSet { + if v1helper.IsHugePageResourceName(v1.ResourceName(resourceStr)) { + hugePageResources.Insert(resourceStr) + } + } + } + if len(hugePageResources) > 1 { + allErrs = append(allErrs, field.Invalid(specPath, hugePageResources, "must use a single hugepage size in a pod spec")) + } + } + + return allErrs +} + +// ValidatePodSpec tests that the specified PodSpec has valid data. +// This includes checking formatting and uniqueness. It also canonicalizes the +// structure by setting default values and implementing any backwards-compatibility +// tricks. +func ValidatePodSpec(spec *core.PodSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + vols, vErrs := ValidateVolumes(spec.Volumes, fldPath.Child("volumes")) + allErrs = append(allErrs, vErrs...) + allErrs = append(allErrs, validateContainers(spec.Containers, vols, fldPath.Child("containers"))...) + allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, fldPath.Child("initContainers"))...) + allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...) + allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...) + allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"))...) + allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...) + allErrs = append(allErrs, validateAffinity(spec.Affinity, fldPath.Child("affinity"))...) + allErrs = append(allErrs, validatePodDNSConfig(spec.DNSConfig, &spec.DNSPolicy, fldPath.Child("dnsConfig"))...) + if len(spec.ServiceAccountName) > 0 { + for _, msg := range ValidateServiceAccountName(spec.ServiceAccountName, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceAccountName"), spec.ServiceAccountName, msg)) + } + } + + if len(spec.NodeName) > 0 { + for _, msg := range ValidateNodeName(spec.NodeName, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), spec.NodeName, msg)) + } + } + + if spec.ActiveDeadlineSeconds != nil { + value := *spec.ActiveDeadlineSeconds + if value < 1 || value > math.MaxInt32 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("activeDeadlineSeconds"), value, validation.InclusiveRangeError(1, math.MaxInt32))) + } + } + + if len(spec.Hostname) > 0 { + allErrs = append(allErrs, ValidateDNS1123Label(spec.Hostname, fldPath.Child("hostname"))...) + } + + if len(spec.Subdomain) > 0 { + allErrs = append(allErrs, ValidateDNS1123Label(spec.Subdomain, fldPath.Child("subdomain"))...) + } + + if len(spec.Tolerations) > 0 { + allErrs = append(allErrs, ValidateTolerations(spec.Tolerations, fldPath.Child("tolerations"))...) + } + + if len(spec.HostAliases) > 0 { + allErrs = append(allErrs, ValidateHostAliases(spec.HostAliases, fldPath.Child("hostAliases"))...) + } + + if len(spec.PriorityClassName) > 0 { + if !utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("priorityClassName"), "Pod priority is disabled by feature-gate")) + } else { + for _, msg := range ValidatePriorityClassName(spec.PriorityClassName, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("priorityClassName"), spec.PriorityClassName, msg)) + } + } + } + + if spec.Priority != nil && !utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("priority"), "Pod priority is disabled by feature-gate")) + } + + return allErrs +} + +// ValidateNodeSelectorRequirement tests that the specified NodeSelectorRequirement fields has valid data +func ValidateNodeSelectorRequirement(rq core.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + switch rq.Operator { + case core.NodeSelectorOpIn, core.NodeSelectorOpNotIn: + if len(rq.Values) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) + } + case core.NodeSelectorOpExists, core.NodeSelectorOpDoesNotExist: + if len(rq.Values) > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) + } + + case core.NodeSelectorOpGt, core.NodeSelectorOpLt: + if len(rq.Values) != 1 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified single value when `operator` is 'Lt' or 'Gt'")) + } + default: + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), rq.Operator, "not a valid selector operator")) + } + allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(rq.Key, fldPath.Child("key"))...) + return allErrs +} + +// ValidateNodeSelectorTerm tests that the specified node selector term has valid data +func ValidateNodeSelectorTerm(term core.NodeSelectorTerm, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(term.MatchExpressions) == 0 { + return append(allErrs, field.Required(fldPath.Child("matchExpressions"), "must have at least one node selector requirement")) + } + for j, req := range term.MatchExpressions { + allErrs = append(allErrs, ValidateNodeSelectorRequirement(req, fldPath.Child("matchExpressions").Index(j))...) + } + return allErrs +} + +// ValidateNodeSelector tests that the specified nodeSelector fields has valid data +func ValidateNodeSelector(nodeSelector *core.NodeSelector, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + termFldPath := fldPath.Child("nodeSelectorTerms") + if len(nodeSelector.NodeSelectorTerms) == 0 { + return append(allErrs, field.Required(termFldPath, "must have at least one node selector term")) + } + + for i, term := range nodeSelector.NodeSelectorTerms { + allErrs = append(allErrs, ValidateNodeSelectorTerm(term, termFldPath.Index(i))...) + } + + return allErrs +} + +// ValidateAvoidPodsInNodeAnnotations tests that the serialized AvoidPods in Node.Annotations has valid data +func ValidateAvoidPodsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + v1Avoids, err := v1helper.GetAvoidPodsFromNodeAnnotations(annotations) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), core.PreferAvoidPodsAnnotationKey, err.Error())) + return allErrs + } + var avoids core.AvoidPods + if err := corev1.Convert_v1_AvoidPods_To_core_AvoidPods(&v1Avoids, &avoids, nil); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), core.PreferAvoidPodsAnnotationKey, err.Error())) + return allErrs + } + + if len(avoids.PreferAvoidPods) != 0 { + for i, pa := range avoids.PreferAvoidPods { + idxPath := fldPath.Child(core.PreferAvoidPodsAnnotationKey).Index(i) + allErrs = append(allErrs, validatePreferAvoidPodsEntry(pa, idxPath)...) + } + } + + return allErrs +} + +// validatePreferAvoidPodsEntry tests if given PreferAvoidPodsEntry has valid data. +func validatePreferAvoidPodsEntry(avoidPodEntry core.PreferAvoidPodsEntry, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + if avoidPodEntry.PodSignature.PodController == nil { + allErrors = append(allErrors, field.Required(fldPath.Child("PodSignature"), "")) + } else { + if *(avoidPodEntry.PodSignature.PodController.Controller) != true { + allErrors = append(allErrors, + field.Invalid(fldPath.Child("PodSignature").Child("PodController").Child("Controller"), + *(avoidPodEntry.PodSignature.PodController.Controller), "must point to a controller")) + } + } + return allErrors +} + +// ValidatePreferredSchedulingTerms tests that the specified SoftNodeAffinity fields has valid data +func ValidatePreferredSchedulingTerms(terms []core.PreferredSchedulingTerm, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + for i, term := range terms { + if term.Weight <= 0 || term.Weight > 100 { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("weight"), term.Weight, "must be in the range 1-100")) + } + + allErrs = append(allErrs, ValidateNodeSelectorTerm(term.Preference, fldPath.Index(i).Child("preference"))...) + } + return allErrs +} + +// validatePodAffinityTerm tests that the specified podAffinityTerm fields have valid data +func validatePodAffinityTerm(podAffinityTerm core.PodAffinityTerm, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, fldPath.Child("matchExpressions"))...) + for _, name := range podAffinityTerm.Namespaces { + for _, msg := range ValidateNamespaceName(name, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, msg)) + } + } + if len(podAffinityTerm.TopologyKey) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("topologyKey"), "can not be empty")) + } + return append(allErrs, unversionedvalidation.ValidateLabelName(podAffinityTerm.TopologyKey, fldPath.Child("topologyKey"))...) +} + +// validatePodAffinityTerms tests that the specified podAffinityTerms fields have valid data +func validatePodAffinityTerms(podAffinityTerms []core.PodAffinityTerm, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, podAffinityTerm := range podAffinityTerms { + allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, fldPath.Index(i))...) + } + return allErrs +} + +// validateWeightedPodAffinityTerms tests that the specified weightedPodAffinityTerms fields have valid data +func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []core.WeightedPodAffinityTerm, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for j, weightedTerm := range weightedPodAffinityTerms { + if weightedTerm.Weight <= 0 || weightedTerm.Weight > 100 { + allErrs = append(allErrs, field.Invalid(fldPath.Index(j).Child("weight"), weightedTerm.Weight, "must be in the range 1-100")) + } + allErrs = append(allErrs, validatePodAffinityTerm(weightedTerm.PodAffinityTerm, fldPath.Index(j).Child("podAffinityTerm"))...) + } + return allErrs +} + +// validatePodAntiAffinity tests that the specified podAntiAffinity fields have valid data +func validatePodAntiAffinity(podAntiAffinity *core.PodAntiAffinity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. + // if podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { + // allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution, false, + // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) + //} + if podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { + allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, + fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + } + if podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil { + allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, + fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) + } + return allErrs +} + +// validatePodAffinity tests that the specified podAffinity fields have valid data +func validatePodAffinity(podAffinity *core.PodAffinity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. + // if podAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { + // allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingRequiredDuringExecution, false, + // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) + //} + if podAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { + allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution, + fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + } + if podAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil { + allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, + fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) + } + return allErrs +} + +func ValidateSeccompProfile(p string, fldPath *field.Path) field.ErrorList { + if p == "docker/default" { + return nil + } + if p == "unconfined" { + return nil + } + if strings.HasPrefix(p, "localhost/") { + return validateLocalDescendingPath(strings.TrimPrefix(p, "localhost/"), fldPath) + } + return field.ErrorList{field.Invalid(fldPath, p, "must be a valid seccomp profile")} +} + +func ValidateSeccompPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if p, exists := annotations[core.SeccompPodAnnotationKey]; exists { + allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(core.SeccompPodAnnotationKey))...) + } + for k, p := range annotations { + if strings.HasPrefix(k, core.SeccompContainerAnnotationKeyPrefix) { + allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(k))...) + } + } + + return allErrs +} + +func ValidateAppArmorPodAnnotations(annotations map[string]string, spec *core.PodSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for k, p := range annotations { + if !strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { + continue + } + // TODO: this belongs to admission, not general pod validation: + if !utilfeature.DefaultFeatureGate.Enabled(features.AppArmor) { + allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "AppArmor is disabled by feature-gate")) + continue + } + containerName := strings.TrimPrefix(k, apparmor.ContainerAnnotationKeyPrefix) + if !podSpecHasContainer(spec, containerName) { + allErrs = append(allErrs, field.Invalid(fldPath.Key(k), containerName, "container not found")) + } + + if err := apparmor.ValidateProfileFormat(p); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Key(k), p, err.Error())) + } + } + + return allErrs +} + +func podSpecHasContainer(spec *core.PodSpec, containerName string) bool { + for _, c := range spec.InitContainers { + if c.Name == containerName { + return true + } + } + for _, c := range spec.Containers { + if c.Name == containerName { + return true + } + } + return false +} + +const ( + // a sysctl segment regex, concatenated with dots to form a sysctl name + SysctlSegmentFmt string = "[a-z0-9]([-_a-z0-9]*[a-z0-9])?" + + // a sysctl name regex + SysctlFmt string = "(" + SysctlSegmentFmt + "\\.)*" + SysctlSegmentFmt + + // the maximal length of a sysctl name + SysctlMaxLength int = 253 +) + +var sysctlRegexp = regexp.MustCompile("^" + SysctlFmt + "$") + +// IsValidSysctlName checks that the given string is a valid sysctl name, +// i.e. matches SysctlFmt. +func IsValidSysctlName(name string) bool { + if len(name) > SysctlMaxLength { + return false + } + return sysctlRegexp.MatchString(name) +} + +func validateSysctls(sysctls []core.Sysctl, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, s := range sysctls { + if len(s.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("name"), "")) + } else if !IsValidSysctlName(s.Name) { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("name"), s.Name, fmt.Sprintf("must have at most %d characters and match regex %s", SysctlMaxLength, SysctlFmt))) + } + } + return allErrs +} + +// ValidatePodSecurityContext test that the specified PodSecurityContext has valid data. +func ValidatePodSecurityContext(securityContext *core.PodSecurityContext, spec *core.PodSpec, specPath, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if securityContext != nil { + allErrs = append(allErrs, validateHostNetwork(securityContext.HostNetwork, spec.Containers, specPath.Child("containers"))...) + if securityContext.FSGroup != nil { + for _, msg := range validation.IsValidGroupID(*securityContext.FSGroup) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("fsGroup"), *(securityContext.FSGroup), msg)) + } + } + if securityContext.RunAsUser != nil { + for _, msg := range validation.IsValidUserID(*securityContext.RunAsUser) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *(securityContext.RunAsUser), msg)) + } + } + for g, gid := range securityContext.SupplementalGroups { + for _, msg := range validation.IsValidGroupID(gid) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("supplementalGroups").Index(g), gid, msg)) + } + } + } + + return allErrs +} + +func ValidateContainerUpdates(newContainers, oldContainers []core.Container, fldPath *field.Path) (allErrs field.ErrorList, stop bool) { + allErrs = field.ErrorList{} + if len(newContainers) != len(oldContainers) { + //TODO: Pinpoint the specific container that causes the invalid error after we have strategic merge diff + allErrs = append(allErrs, field.Forbidden(fldPath, "pod updates may not add or remove containers")) + return allErrs, true + } + + // validate updated container images + for i, ctr := range newContainers { + if len(ctr.Image) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("image"), "")) + } + // this is only called from ValidatePodUpdate so its safe to check leading/trailing whitespace. + if len(strings.TrimSpace(ctr.Image)) != len(ctr.Image) { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("image"), ctr.Image, "must not have leading or trailing whitespace")) + } + } + return allErrs, false +} + +// ValidatePodUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields +// that cannot be changed. +func ValidatePodUpdate(newPod, oldPod *core.Pod) field.ErrorList { + fldPath := field.NewPath("metadata") + allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath) + allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"))...) + specPath := field.NewPath("spec") + + // validate updateable fields: + // 1. spec.containers[*].image + // 2. spec.initContainers[*].image + // 3. spec.activeDeadlineSeconds + + containerErrs, stop := ValidateContainerUpdates(newPod.Spec.Containers, oldPod.Spec.Containers, specPath.Child("containers")) + allErrs = append(allErrs, containerErrs...) + if stop { + return allErrs + } + containerErrs, stop = ValidateContainerUpdates(newPod.Spec.InitContainers, oldPod.Spec.InitContainers, specPath.Child("initContainers")) + allErrs = append(allErrs, containerErrs...) + if stop { + return allErrs + } + + // validate updated spec.activeDeadlineSeconds. two types of updates are allowed: + // 1. from nil to a positive value + // 2. from a positive value to a lesser, non-negative value + if newPod.Spec.ActiveDeadlineSeconds != nil { + newActiveDeadlineSeconds := *newPod.Spec.ActiveDeadlineSeconds + if newActiveDeadlineSeconds < 0 || newActiveDeadlineSeconds > math.MaxInt32 { + allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, validation.InclusiveRangeError(0, math.MaxInt32))) + return allErrs + } + if oldPod.Spec.ActiveDeadlineSeconds != nil { + oldActiveDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds + if oldActiveDeadlineSeconds < newActiveDeadlineSeconds { + allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, "must be less than or equal to previous value")) + return allErrs + } + } + } else if oldPod.Spec.ActiveDeadlineSeconds != nil { + allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newPod.Spec.ActiveDeadlineSeconds, "must not update from a positive integer to nil value")) + } + + // handle updateable fields by munging those fields prior to deep equal comparison. + mungedPod := *newPod + // munge spec.containers[*].image + var newContainers []core.Container + for ix, container := range mungedPod.Spec.Containers { + container.Image = oldPod.Spec.Containers[ix].Image + newContainers = append(newContainers, container) + } + mungedPod.Spec.Containers = newContainers + // munge spec.initContainers[*].image + var newInitContainers []core.Container + for ix, container := range mungedPod.Spec.InitContainers { + container.Image = oldPod.Spec.InitContainers[ix].Image + newInitContainers = append(newInitContainers, container) + } + mungedPod.Spec.InitContainers = newInitContainers + // munge spec.activeDeadlineSeconds + mungedPod.Spec.ActiveDeadlineSeconds = nil + if oldPod.Spec.ActiveDeadlineSeconds != nil { + activeDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds + mungedPod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds + } + + // Allow only additions to tolerations updates. + mungedPod.Spec.Tolerations = oldPod.Spec.Tolerations + allErrs = append(allErrs, validateOnlyAddedTolerations(newPod.Spec.Tolerations, oldPod.Spec.Tolerations, specPath.Child("tolerations"))...) + + if !apiequality.Semantic.DeepEqual(mungedPod.Spec, oldPod.Spec) { + // This diff isn't perfect, but it's a helluva lot better an "I'm not going to tell you what the difference is". + //TODO: Pinpoint the specific field that causes the invalid error after we have strategic merge diff + specDiff := diff.ObjectDiff(mungedPod.Spec, oldPod.Spec) + allErrs = append(allErrs, field.Forbidden(specPath, fmt.Sprintf("pod updates may not change fields other than `spec.containers[*].image`, `spec.initContainers[*].image`, `spec.activeDeadlineSeconds` or `spec.tolerations` (only additions to existing tolerations)\n%v", specDiff))) + } + + return allErrs +} + +// ValidatePodStatusUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields +// that cannot be changed. +func ValidatePodStatusUpdate(newPod, oldPod *core.Pod) field.ErrorList { + fldPath := field.NewPath("metadata") + allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath) + allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"))...) + + if newPod.Spec.NodeName != oldPod.Spec.NodeName { + allErrs = append(allErrs, field.Forbidden(field.NewPath("status", "nodeName"), "may not be changed directly")) + } + + // For status update we ignore changes to pod spec. + newPod.Spec = oldPod.Spec + + return allErrs +} + +// ValidatePodBinding tests if required fields in the pod binding are legal. +func ValidatePodBinding(binding *core.Binding) field.ErrorList { + allErrs := field.ErrorList{} + + if len(binding.Target.Kind) != 0 && binding.Target.Kind != "Node" { + // TODO: When validation becomes versioned, this gets more complicated. + allErrs = append(allErrs, field.NotSupported(field.NewPath("target", "kind"), binding.Target.Kind, []string{"Node", ""})) + } + if len(binding.Target.Name) == 0 { + // TODO: When validation becomes versioned, this gets more complicated. + allErrs = append(allErrs, field.Required(field.NewPath("target", "name"), "")) + } + + return allErrs +} + +// ValidatePodTemplate tests if required fields in the pod template are set. +func ValidatePodTemplate(pod *core.PodTemplate) field.ErrorList { + allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidatePodTemplateSpec(&pod.Template, field.NewPath("template"))...) + return allErrs +} + +// ValidatePodTemplateUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields +// that cannot be changed. +func ValidatePodTemplateUpdate(newPod, oldPod *core.PodTemplate) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&oldPod.ObjectMeta, &newPod.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidatePodTemplateSpec(&newPod.Template, field.NewPath("template"))...) + return allErrs +} + +var supportedSessionAffinityType = sets.NewString(string(core.ServiceAffinityClientIP), string(core.ServiceAffinityNone)) +var supportedServiceType = sets.NewString(string(core.ServiceTypeClusterIP), string(core.ServiceTypeNodePort), + string(core.ServiceTypeLoadBalancer), string(core.ServiceTypeExternalName)) + +// ValidateService tests if required fields/annotations of a Service are valid. +func ValidateService(service *core.Service) field.ErrorList { + allErrs := ValidateObjectMeta(&service.ObjectMeta, true, ValidateServiceName, field.NewPath("metadata")) + + specPath := field.NewPath("spec") + isHeadlessService := service.Spec.ClusterIP == core.ClusterIPNone + if len(service.Spec.Ports) == 0 && !isHeadlessService && service.Spec.Type != core.ServiceTypeExternalName { + allErrs = append(allErrs, field.Required(specPath.Child("ports"), "")) + } + switch service.Spec.Type { + case core.ServiceTypeLoadBalancer: + for ix := range service.Spec.Ports { + port := &service.Spec.Ports[ix] + // This is a workaround for broken cloud environments that + // over-open firewalls. Hopefully it can go away when more clouds + // understand containers better. + if port.Port == 10250 { + portPath := specPath.Child("ports").Index(ix) + allErrs = append(allErrs, field.Invalid(portPath, port.Port, "may not expose port 10250 externally since it is used by kubelet")) + } + } + if service.Spec.ClusterIP == "None" { + allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "may not be set to 'None' for LoadBalancer services")) + } + case core.ServiceTypeNodePort: + if service.Spec.ClusterIP == "None" { + allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "may not be set to 'None' for NodePort services")) + } + case core.ServiceTypeExternalName: + if service.Spec.ClusterIP != "" { + allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "must be empty for ExternalName services")) + } + if len(service.Spec.ExternalName) > 0 { + allErrs = append(allErrs, ValidateDNS1123Subdomain(service.Spec.ExternalName, specPath.Child("externalName"))...) + } else { + allErrs = append(allErrs, field.Required(specPath.Child("externalName"), "")) + } + } + + allPortNames := sets.String{} + portsPath := specPath.Child("ports") + for i := range service.Spec.Ports { + portPath := portsPath.Index(i) + allErrs = append(allErrs, validateServicePort(&service.Spec.Ports[i], len(service.Spec.Ports) > 1, isHeadlessService, &allPortNames, portPath)...) + } + + if service.Spec.Selector != nil { + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(service.Spec.Selector, specPath.Child("selector"))...) + } + + if len(service.Spec.SessionAffinity) == 0 { + allErrs = append(allErrs, field.Required(specPath.Child("sessionAffinity"), "")) + } else if !supportedSessionAffinityType.Has(string(service.Spec.SessionAffinity)) { + allErrs = append(allErrs, field.NotSupported(specPath.Child("sessionAffinity"), service.Spec.SessionAffinity, supportedSessionAffinityType.List())) + } + + if service.Spec.SessionAffinity == core.ServiceAffinityClientIP { + allErrs = append(allErrs, validateClientIPAffinityConfig(service.Spec.SessionAffinityConfig, specPath.Child("sessionAffinityConfig"))...) + } else if service.Spec.SessionAffinity == core.ServiceAffinityNone { + if service.Spec.SessionAffinityConfig != nil { + allErrs = append(allErrs, field.Forbidden(specPath.Child("sessionAffinityConfig"), fmt.Sprintf("must not be set when session affinity is %s", string(core.ServiceAffinityNone)))) + } + } + + if helper.IsServiceIPSet(service) { + if ip := net.ParseIP(service.Spec.ClusterIP); ip == nil { + allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "must be empty, 'None', or a valid IP address")) + } + } + + ipPath := specPath.Child("externalIPs") + for i, ip := range service.Spec.ExternalIPs { + idxPath := ipPath.Index(i) + if msgs := validation.IsValidIP(ip); len(msgs) != 0 { + for i := range msgs { + allErrs = append(allErrs, field.Invalid(idxPath, ip, msgs[i])) + } + } else { + allErrs = append(allErrs, validateNonSpecialIP(ip, idxPath)...) + } + } + + if len(service.Spec.Type) == 0 { + allErrs = append(allErrs, field.Required(specPath.Child("type"), "")) + } else if !supportedServiceType.Has(string(service.Spec.Type)) { + allErrs = append(allErrs, field.NotSupported(specPath.Child("type"), service.Spec.Type, supportedServiceType.List())) + } + + if service.Spec.Type == core.ServiceTypeLoadBalancer { + portsPath := specPath.Child("ports") + includeProtocols := sets.NewString() + for i := range service.Spec.Ports { + portPath := portsPath.Index(i) + if !supportedPortProtocols.Has(string(service.Spec.Ports[i].Protocol)) { + allErrs = append(allErrs, field.Invalid(portPath.Child("protocol"), service.Spec.Ports[i].Protocol, "cannot create an external load balancer with non-TCP/UDP ports")) + } else { + includeProtocols.Insert(string(service.Spec.Ports[i].Protocol)) + } + } + if includeProtocols.Len() > 1 { + allErrs = append(allErrs, field.Invalid(portsPath, service.Spec.Ports, "cannot create an external load balancer with mix protocols")) + } + } + + if service.Spec.Type == core.ServiceTypeClusterIP { + portsPath := specPath.Child("ports") + for i := range service.Spec.Ports { + portPath := portsPath.Index(i) + if service.Spec.Ports[i].NodePort != 0 { + allErrs = append(allErrs, field.Invalid(portPath.Child("nodePort"), service.Spec.Ports[i].NodePort, "may not be used when `type` is 'ClusterIP'")) + } + } + } + + // Check for duplicate NodePorts, considering (protocol,port) pairs + portsPath = specPath.Child("ports") + nodePorts := make(map[core.ServicePort]bool) + for i := range service.Spec.Ports { + port := &service.Spec.Ports[i] + if port.NodePort == 0 { + continue + } + portPath := portsPath.Index(i) + var key core.ServicePort + key.Protocol = port.Protocol + key.NodePort = port.NodePort + _, found := nodePorts[key] + if found { + allErrs = append(allErrs, field.Duplicate(portPath.Child("nodePort"), port.NodePort)) + } + nodePorts[key] = true + } + + // Check for duplicate Ports, considering (protocol,port) pairs + portsPath = specPath.Child("ports") + ports := make(map[core.ServicePort]bool) + for i, port := range service.Spec.Ports { + portPath := portsPath.Index(i) + key := core.ServicePort{Protocol: port.Protocol, Port: port.Port} + _, found := ports[key] + if found { + allErrs = append(allErrs, field.Duplicate(portPath, key)) + } + ports[key] = true + } + + // Validate SourceRange field and annotation + _, ok := service.Annotations[core.AnnotationLoadBalancerSourceRangesKey] + if len(service.Spec.LoadBalancerSourceRanges) > 0 || ok { + var fieldPath *field.Path + var val string + if len(service.Spec.LoadBalancerSourceRanges) > 0 { + fieldPath = specPath.Child("LoadBalancerSourceRanges") + val = fmt.Sprintf("%v", service.Spec.LoadBalancerSourceRanges) + } else { + fieldPath = field.NewPath("metadata", "annotations").Key(core.AnnotationLoadBalancerSourceRangesKey) + val = service.Annotations[core.AnnotationLoadBalancerSourceRangesKey] + } + if service.Spec.Type != core.ServiceTypeLoadBalancer { + allErrs = append(allErrs, field.Invalid(fieldPath, "", "may only be used when `type` is 'LoadBalancer'")) + } + _, err := apiservice.GetLoadBalancerSourceRanges(service) + if err != nil { + allErrs = append(allErrs, field.Invalid(fieldPath, val, "must be a list of IP ranges. For example, 10.240.0.0/24,10.250.0.0/24 ")) + } + } + + allErrs = append(allErrs, validateServiceExternalTrafficFieldsValue(service)...) + + return allErrs +} + +func validateServicePort(sp *core.ServicePort, requireName, isHeadlessService bool, allNames *sets.String, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if requireName && len(sp.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } else if len(sp.Name) != 0 { + allErrs = append(allErrs, ValidateDNS1123Label(sp.Name, fldPath.Child("name"))...) + if allNames.Has(sp.Name) { + allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), sp.Name)) + } else { + allNames.Insert(sp.Name) + } + } + + for _, msg := range validation.IsValidPortNum(int(sp.Port)) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), sp.Port, msg)) + } + + if len(sp.Protocol) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), "")) + } else if !supportedPortProtocols.Has(string(sp.Protocol)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), sp.Protocol, supportedPortProtocols.List())) + } + + allErrs = append(allErrs, ValidatePortNumOrName(sp.TargetPort, fldPath.Child("targetPort"))...) + + // in the v1 API, targetPorts on headless services were tolerated. + // once we have version-specific validation, we can reject this on newer API versions, but until then, we have to tolerate it for compatibility. + // + // if isHeadlessService { + // if sp.TargetPort.Type == intstr.String || (sp.TargetPort.Type == intstr.Int && sp.Port != sp.TargetPort.IntValue()) { + // allErrs = append(allErrs, field.Invalid(fldPath.Child("targetPort"), sp.TargetPort, "must be equal to the value of 'port' when clusterIP = None")) + // } + // } + + return allErrs +} + +// validateServiceExternalTrafficFieldsValue validates ExternalTraffic related annotations +// have legal value. +func validateServiceExternalTrafficFieldsValue(service *core.Service) field.ErrorList { + allErrs := field.ErrorList{} + + // Check first class fields. + if service.Spec.ExternalTrafficPolicy != "" && + service.Spec.ExternalTrafficPolicy != core.ServiceExternalTrafficPolicyTypeCluster && + service.Spec.ExternalTrafficPolicy != core.ServiceExternalTrafficPolicyTypeLocal { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy, + fmt.Sprintf("ExternalTrafficPolicy must be empty, %v or %v", core.ServiceExternalTrafficPolicyTypeCluster, core.ServiceExternalTrafficPolicyTypeLocal))) + } + if service.Spec.HealthCheckNodePort < 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("healthCheckNodePort"), service.Spec.HealthCheckNodePort, + "HealthCheckNodePort must be not less than 0")) + } + + return allErrs +} + +// ValidateServiceExternalTrafficFieldsCombination validates if ExternalTrafficPolicy, +// HealthCheckNodePort and Type combination are legal. For update, it should be called +// after clearing externalTraffic related fields for the ease of transitioning between +// different service types. +func ValidateServiceExternalTrafficFieldsCombination(service *core.Service) field.ErrorList { + allErrs := field.ErrorList{} + + if service.Spec.Type != core.ServiceTypeLoadBalancer && + service.Spec.Type != core.ServiceTypeNodePort && + service.Spec.ExternalTrafficPolicy != "" { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy, + "ExternalTrafficPolicy can only be set on NodePort and LoadBalancer service")) + } + + if !apiservice.NeedsHealthCheck(service) && + service.Spec.HealthCheckNodePort != 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "healthCheckNodePort"), service.Spec.HealthCheckNodePort, + "HealthCheckNodePort can only be set on LoadBalancer service with ExternalTrafficPolicy=Local")) + } + + return allErrs +} + +// ValidateServiceUpdate tests if required fields in the service are set during an update +func ValidateServiceUpdate(service, oldService *core.Service) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata")) + + // ClusterIP should be immutable for services using it (every type other than ExternalName) + // which do not have ClusterIP assigned yet (empty string value) + if service.Spec.Type != core.ServiceTypeExternalName { + if oldService.Spec.Type != core.ServiceTypeExternalName && oldService.Spec.ClusterIP != "" { + allErrs = append(allErrs, ValidateImmutableField(service.Spec.ClusterIP, oldService.Spec.ClusterIP, field.NewPath("spec", "clusterIP"))...) + } + } + + allErrs = append(allErrs, ValidateService(service)...) + return allErrs +} + +// ValidateServiceStatusUpdate tests if required fields in the Service are set when updating status. +func ValidateServiceStatusUpdate(service, oldService *core.Service) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateLoadBalancerStatus(&service.Status.LoadBalancer, field.NewPath("status", "loadBalancer"))...) + return allErrs +} + +// ValidateReplicationController tests if required fields in the replication controller are set. +func ValidateReplicationController(controller *core.ReplicationController) field.ErrorList { + allErrs := ValidateObjectMeta(&controller.ObjectMeta, true, ValidateReplicationControllerName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidateReplicationControllerUpdate tests if required fields in the replication controller are set. +func ValidateReplicationControllerUpdate(controller, oldController *core.ReplicationController) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidateReplicationControllerStatusUpdate tests if required fields in the replication controller are set. +func ValidateReplicationControllerStatusUpdate(controller, oldController *core.ReplicationController) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateReplicationControllerStatus(controller.Status, field.NewPath("status"))...) + return allErrs +} + +func ValidateReplicationControllerStatus(status core.ReplicationControllerStatus, statusPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateNonnegativeField(int64(status.Replicas), statusPath.Child("replicas"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(status.FullyLabeledReplicas), statusPath.Child("fullyLabeledReplicas"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(status.ReadyReplicas), statusPath.Child("readyReplicas"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(status.AvailableReplicas), statusPath.Child("availableReplicas"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(status.ObservedGeneration), statusPath.Child("observedGeneration"))...) + msg := "cannot be greater than status.replicas" + if status.FullyLabeledReplicas > status.Replicas { + allErrs = append(allErrs, field.Invalid(statusPath.Child("fullyLabeledReplicas"), status.FullyLabeledReplicas, msg)) + } + if status.ReadyReplicas > status.Replicas { + allErrs = append(allErrs, field.Invalid(statusPath.Child("readyReplicas"), status.ReadyReplicas, msg)) + } + if status.AvailableReplicas > status.Replicas { + allErrs = append(allErrs, field.Invalid(statusPath.Child("availableReplicas"), status.AvailableReplicas, msg)) + } + if status.AvailableReplicas > status.ReadyReplicas { + allErrs = append(allErrs, field.Invalid(statusPath.Child("availableReplicas"), status.AvailableReplicas, "cannot be greater than readyReplicas")) + } + return allErrs +} + +// Validates that the given selector is non-empty. +func ValidateNonEmptySelector(selectorMap map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + selector := labels.Set(selectorMap).AsSelector() + if selector.Empty() { + allErrs = append(allErrs, field.Required(fldPath, "")) + } + return allErrs +} + +// Validates the given template and ensures that it is in accordance with the desired selector and replicas. +func ValidatePodTemplateSpecForRC(template *core.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if template == nil { + allErrs = append(allErrs, field.Required(fldPath, "")) + } else { + selector := labels.Set(selectorMap).AsSelector() + if !selector.Empty() { + // Verify that the RC selector matches the labels in template. + labels := labels.Set(template.Labels) + if !selector.Matches(labels) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`")) + } + } + allErrs = append(allErrs, ValidatePodTemplateSpec(template, fldPath)...) + if replicas > 1 { + allErrs = append(allErrs, ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...) + } + // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). + if template.Spec.RestartPolicy != core.RestartPolicyAlways { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(core.RestartPolicyAlways)})) + } + if template.Spec.ActiveDeadlineSeconds != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("spec", "activeDeadlineSeconds"), template.Spec.ActiveDeadlineSeconds, "must not be specified")) + } + } + return allErrs +} + +// ValidateReplicationControllerSpec tests if required fields in the replication controller spec are set. +func ValidateReplicationControllerSpec(spec *core.ReplicationControllerSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) + allErrs = append(allErrs, ValidateNonEmptySelector(spec.Selector, fldPath.Child("selector"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) + allErrs = append(allErrs, ValidatePodTemplateSpecForRC(spec.Template, spec.Selector, spec.Replicas, fldPath.Child("template"))...) + return allErrs +} + +// ValidatePodTemplateSpec validates the spec of a pod template +func ValidatePodTemplateSpec(spec *core.PodTemplateSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.Labels, fldPath.Child("labels"))...) + allErrs = append(allErrs, ValidateAnnotations(spec.Annotations, fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidatePodSpecificAnnotations(spec.Annotations, &spec.Spec, fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidatePodSpec(&spec.Spec, fldPath.Child("spec"))...) + return allErrs +} + +func ValidateReadOnlyPersistentDisks(volumes []core.Volume, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i := range volumes { + vol := &volumes[i] + idxPath := fldPath.Index(i) + if vol.GCEPersistentDisk != nil { + if vol.GCEPersistentDisk.ReadOnly == false { + allErrs = append(allErrs, field.Invalid(idxPath.Child("gcePersistentDisk", "readOnly"), false, "must be true for replicated pods > 1; GCE PD can only be mounted on multiple machines if it is read-only")) + } + } + // TODO: What to do for AWS? It doesn't support replicas + } + return allErrs +} + +// ValidateTaintsInNodeAnnotations tests that the serialized taints in Node.Annotations has valid data +func ValidateTaintsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + taints, err := helper.GetTaintsFromNodeAnnotations(annotations) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, core.TaintsAnnotationKey, err.Error())) + return allErrs + } + + if len(taints) > 0 { + allErrs = append(allErrs, validateNodeTaints(taints, fldPath.Child(core.TaintsAnnotationKey))...) + } + + return allErrs +} + +// validateNodeTaints tests if given taints have valid data. +func validateNodeTaints(taints []core.Taint, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + + uniqueTaints := map[core.TaintEffect]sets.String{} + + for i, currTaint := range taints { + idxPath := fldPath.Index(i) + // validate the taint key + allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(currTaint.Key, idxPath.Child("key"))...) + // validate the taint value + if errs := validation.IsValidLabelValue(currTaint.Value); len(errs) != 0 { + allErrors = append(allErrors, field.Invalid(idxPath.Child("value"), currTaint.Value, strings.Join(errs, ";"))) + } + // validate the taint effect + allErrors = append(allErrors, validateTaintEffect(&currTaint.Effect, false, idxPath.Child("effect"))...) + + // validate if taint is unique by + if len(uniqueTaints[currTaint.Effect]) > 0 && uniqueTaints[currTaint.Effect].Has(currTaint.Key) { + duplicatedError := field.Duplicate(idxPath, currTaint) + duplicatedError.Detail = "taints must be unique by key and effect pair" + allErrors = append(allErrors, duplicatedError) + continue + } + + // add taint to existingTaints for uniqueness check + if len(uniqueTaints[currTaint.Effect]) == 0 { + uniqueTaints[currTaint.Effect] = sets.String{} + } + uniqueTaints[currTaint.Effect].Insert(currTaint.Key) + } + return allErrors +} + +func ValidateNodeSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if annotations[core.TaintsAnnotationKey] != "" { + allErrs = append(allErrs, ValidateTaintsInNodeAnnotations(annotations, fldPath)...) + } + + if annotations[core.PreferAvoidPodsAnnotationKey] != "" { + allErrs = append(allErrs, ValidateAvoidPodsInNodeAnnotations(annotations, fldPath)...) + } + return allErrs +} + +// ValidateNode tests if required fields in the node are set. +func ValidateNode(node *core.Node) field.ErrorList { + fldPath := field.NewPath("metadata") + allErrs := ValidateObjectMeta(&node.ObjectMeta, false, ValidateNodeName, fldPath) + allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) + if len(node.Spec.Taints) > 0 { + allErrs = append(allErrs, validateNodeTaints(node.Spec.Taints, fldPath.Child("taints"))...) + } + + // Only validate spec. + // All status fields are optional and can be updated later. + // That said, if specified, we need to ensure they are valid. + allErrs = append(allErrs, ValidateNodeResources(node)...) + + // external ID is required. + if len(node.Spec.ExternalID) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("spec", "externalID"), "")) + } + + // Only allow Node.Spec.ConfigSource to be set if the DynamicKubeletConfig feature gate is enabled + if node.Spec.ConfigSource != nil && !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "configSource"), "configSource may only be set if the DynamicKubeletConfig feature gate is enabled)")) + } + + if len(node.Spec.PodCIDR) != 0 { + _, err := ValidateCIDR(node.Spec.PodCIDR) + if err != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "podCIDR"), node.Spec.PodCIDR, "not a valid CIDR")) + } + } + return allErrs +} + +// ValidateNodeResources is used to make sure a node has valid capacity and allocatable values. +func ValidateNodeResources(node *core.Node) field.ErrorList { + allErrs := field.ErrorList{} + // Validate resource quantities in capacity. + hugePageSizes := sets.NewString() + for k, v := range node.Status.Capacity { + resPath := field.NewPath("status", "capacity", string(k)) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + // track any huge page size that has a positive value + if helper.IsHugePageResourceName(k) && v.Value() > int64(0) { + hugePageSizes.Insert(string(k)) + } + if len(hugePageSizes) > 1 { + allErrs = append(allErrs, field.Invalid(resPath, v, "may not have pre-allocated hugepages for multiple page sizes")) + } + } + // Validate resource quantities in allocatable. + hugePageSizes = sets.NewString() + for k, v := range node.Status.Allocatable { + resPath := field.NewPath("status", "allocatable", string(k)) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + // track any huge page size that has a positive value + if helper.IsHugePageResourceName(k) && v.Value() > int64(0) { + hugePageSizes.Insert(string(k)) + } + if len(hugePageSizes) > 1 { + allErrs = append(allErrs, field.Invalid(resPath, v, "may not have pre-allocated hugepages for multiple page sizes")) + } + } + return allErrs +} + +// ValidateNodeUpdate tests to make sure a node update can be applied. Modifies oldNode. +func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList { + fldPath := field.NewPath("metadata") + allErrs := ValidateObjectMetaUpdate(&node.ObjectMeta, &oldNode.ObjectMeta, fldPath) + allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) + + // TODO: Enable the code once we have better core object.status update model. Currently, + // anyone can update node status. + // if !apiequality.Semantic.DeepEqual(node.Status, core.NodeStatus{}) { + // allErrs = append(allErrs, field.Invalid("status", node.Status, "must be empty")) + // } + + allErrs = append(allErrs, ValidateNodeResources(node)...) + + // Validate no duplicate addresses in node status. + addresses := make(map[core.NodeAddress]bool) + for i, address := range node.Status.Addresses { + if _, ok := addresses[address]; ok { + allErrs = append(allErrs, field.Duplicate(field.NewPath("status", "addresses").Index(i), address)) + } + addresses[address] = true + } + + if len(oldNode.Spec.PodCIDR) == 0 { + // Allow the controller manager to assign a CIDR to a node if it doesn't have one. + oldNode.Spec.PodCIDR = node.Spec.PodCIDR + } else { + if oldNode.Spec.PodCIDR != node.Spec.PodCIDR { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "podCIDR"), "node updates may not change podCIDR except from \"\" to valid")) + } + } + + // Allow controller manager updating provider ID when not set + if len(oldNode.Spec.ProviderID) == 0 { + oldNode.Spec.ProviderID = node.Spec.ProviderID + } else { + if oldNode.Spec.ProviderID != node.Spec.ProviderID { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "providerID"), "node updates may not change providerID except from \"\" to valid")) + } + } + + // TODO: move reset function to its own location + // Ignore metadata changes now that they have been tested + oldNode.ObjectMeta = node.ObjectMeta + // Allow users to update capacity + oldNode.Status.Capacity = node.Status.Capacity + // Allow users to unschedule node + oldNode.Spec.Unschedulable = node.Spec.Unschedulable + // Clear status + oldNode.Status = node.Status + + // update taints + if len(node.Spec.Taints) > 0 { + allErrs = append(allErrs, validateNodeTaints(node.Spec.Taints, fldPath.Child("taints"))...) + } + oldNode.Spec.Taints = node.Spec.Taints + + // Allow updates to Node.Spec.ConfigSource if DynamicKubeletConfig feature gate is enabled + if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { + oldNode.Spec.ConfigSource = node.Spec.ConfigSource + } + + // We made allowed changes to oldNode, and now we compare oldNode to node. Any remaining differences indicate changes to protected fields. + // TODO: Add a 'real' error type for this error and provide print actual diffs. + if !apiequality.Semantic.DeepEqual(oldNode, node) { + glog.V(4).Infof("Update failed validation %#v vs %#v", oldNode, node) + allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "node updates may only change labels, taints, or capacity (or configSource, if the DynamicKubeletConfig feature gate is enabled)")) + } + + return allErrs +} + +// Validate compute resource typename. +// Refer to docs/design/resources.md for more details. +func validateResourceName(value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(value) { + allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) + } + if len(allErrs) != 0 { + return allErrs + } + + if len(strings.Split(value, "/")) == 1 { + if !helper.IsStandardResourceName(value) { + return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource type or fully qualified")) + } + } + + return allErrs +} + +// Validate container resource name +// Refer to docs/design/resources.md for more details. +func validateContainerResourceName(value string, fldPath *field.Path) field.ErrorList { + allErrs := validateResourceName(value, fldPath) + + if len(strings.Split(value, "/")) == 1 { + if !helper.IsStandardContainerResourceName(value) { + return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource for containers")) + } + } + return allErrs +} + +// isLocalStorageResource checks whether the resource is local ephemeral storage +func isLocalStorageResource(name string) bool { + if name == string(core.ResourceEphemeralStorage) || name == string(core.ResourceRequestsEphemeralStorage) || + name == string(core.ResourceLimitsEphemeralStorage) { + return true + } else { + return false + } +} + +// Validate resource names that can go in a resource quota +// Refer to docs/design/resources.md for more details. +func ValidateResourceQuotaResourceName(value string, fldPath *field.Path) field.ErrorList { + allErrs := validateResourceName(value, fldPath) + if isLocalStorageResource(value) && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + return append(allErrs, field.Forbidden(fldPath, "ResourceEphemeralStorage field disabled by feature-gate for ResourceQuota")) + } + if len(strings.Split(value, "/")) == 1 { + if !helper.IsStandardQuotaResourceName(value) { + return append(allErrs, field.Invalid(fldPath, value, isInvalidQuotaResource)) + } + } + return allErrs +} + +// Validate limit range types +func validateLimitRangeTypeName(value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(value) { + allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) + } + if len(allErrs) != 0 { + return allErrs + } + + if len(strings.Split(value, "/")) == 1 { + if !helper.IsStandardLimitRangeType(value) { + return append(allErrs, field.Invalid(fldPath, value, "must be a standard limit type or fully qualified")) + } + } + + return allErrs +} + +// Validate limit range resource name +// limit types (other than Pod/Container) could contain storage not just cpu or memory +func validateLimitRangeResourceName(limitType core.LimitType, value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if value == string(core.ResourceEphemeralStorage) && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + return append(allErrs, field.Forbidden(fldPath, "ResourceEphemeralStorage field disabled by feature-gate for Resource LimitRange")) + } + switch limitType { + case core.LimitTypePod, core.LimitTypeContainer: + return validateContainerResourceName(value, fldPath) + default: + return validateResourceName(value, fldPath) + } +} + +// ValidateLimitRange tests if required fields in the LimitRange are set. +func ValidateLimitRange(limitRange *core.LimitRange) field.ErrorList { + allErrs := ValidateObjectMeta(&limitRange.ObjectMeta, true, ValidateLimitRangeName, field.NewPath("metadata")) + + // ensure resource names are properly qualified per docs/design/resources.md + limitTypeSet := map[core.LimitType]bool{} + fldPath := field.NewPath("spec", "limits") + for i := range limitRange.Spec.Limits { + idxPath := fldPath.Index(i) + limit := &limitRange.Spec.Limits[i] + allErrs = append(allErrs, validateLimitRangeTypeName(string(limit.Type), idxPath.Child("type"))...) + + _, found := limitTypeSet[limit.Type] + if found { + allErrs = append(allErrs, field.Duplicate(idxPath.Child("type"), limit.Type)) + } + limitTypeSet[limit.Type] = true + + keys := sets.String{} + min := map[string]resource.Quantity{} + max := map[string]resource.Quantity{} + defaults := map[string]resource.Quantity{} + defaultRequests := map[string]resource.Quantity{} + maxLimitRequestRatios := map[string]resource.Quantity{} + + for k, q := range limit.Max { + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("max").Key(string(k)))...) + keys.Insert(string(k)) + max[string(k)] = q + } + for k, q := range limit.Min { + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("min").Key(string(k)))...) + keys.Insert(string(k)) + min[string(k)] = q + } + + if limit.Type == core.LimitTypePod { + if len(limit.Default) > 0 { + allErrs = append(allErrs, field.Forbidden(idxPath.Child("default"), "may not be specified when `type` is 'Pod'")) + } + if len(limit.DefaultRequest) > 0 { + allErrs = append(allErrs, field.Forbidden(idxPath.Child("defaultRequest"), "may not be specified when `type` is 'Pod'")) + } + } else { + for k, q := range limit.Default { + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("default").Key(string(k)))...) + keys.Insert(string(k)) + defaults[string(k)] = q + } + for k, q := range limit.DefaultRequest { + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("defaultRequest").Key(string(k)))...) + keys.Insert(string(k)) + defaultRequests[string(k)] = q + } + } + + if limit.Type == core.LimitTypePersistentVolumeClaim { + _, minQuantityFound := limit.Min[core.ResourceStorage] + _, maxQuantityFound := limit.Max[core.ResourceStorage] + if !minQuantityFound && !maxQuantityFound { + allErrs = append(allErrs, field.Required(idxPath.Child("limits"), "either minimum or maximum storage value is required, but neither was provided")) + } + } + + for k, q := range limit.MaxLimitRequestRatio { + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("maxLimitRequestRatio").Key(string(k)))...) + keys.Insert(string(k)) + maxLimitRequestRatios[string(k)] = q + } + + for k := range keys { + minQuantity, minQuantityFound := min[k] + maxQuantity, maxQuantityFound := max[k] + defaultQuantity, defaultQuantityFound := defaults[k] + defaultRequestQuantity, defaultRequestQuantityFound := defaultRequests[k] + maxRatio, maxRatioFound := maxLimitRequestRatios[k] + + if minQuantityFound && maxQuantityFound && minQuantity.Cmp(maxQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("min").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than max value %s", minQuantity.String(), maxQuantity.String()))) + } + + if defaultRequestQuantityFound && minQuantityFound && minQuantity.Cmp(defaultRequestQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("min value %s is greater than default request value %s", minQuantity.String(), defaultRequestQuantity.String()))) + } + + if defaultRequestQuantityFound && maxQuantityFound && defaultRequestQuantity.Cmp(maxQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than max value %s", defaultRequestQuantity.String(), maxQuantity.String()))) + } + + if defaultRequestQuantityFound && defaultQuantityFound && defaultRequestQuantity.Cmp(defaultQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than default limit value %s", defaultRequestQuantity.String(), defaultQuantity.String()))) + } + + if defaultQuantityFound && minQuantityFound && minQuantity.Cmp(defaultQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than default value %s", minQuantity.String(), defaultQuantity.String()))) + } + + if defaultQuantityFound && maxQuantityFound && defaultQuantity.Cmp(maxQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), maxQuantity, fmt.Sprintf("default value %s is greater than max value %s", defaultQuantity.String(), maxQuantity.String()))) + } + if maxRatioFound && maxRatio.Cmp(*resource.NewQuantity(1, resource.DecimalSI)) < 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is less than 1", maxRatio.String()))) + } + if maxRatioFound && minQuantityFound && maxQuantityFound { + maxRatioValue := float64(maxRatio.Value()) + minQuantityValue := minQuantity.Value() + maxQuantityValue := maxQuantity.Value() + if maxRatio.Value() < resource.MaxMilliValue && minQuantityValue < resource.MaxMilliValue && maxQuantityValue < resource.MaxMilliValue { + maxRatioValue = float64(maxRatio.MilliValue()) / 1000 + minQuantityValue = minQuantity.MilliValue() + maxQuantityValue = maxQuantity.MilliValue() + } + maxRatioLimit := float64(maxQuantityValue) / float64(minQuantityValue) + if maxRatioValue > maxRatioLimit { + allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is greater than max/min = %f", maxRatio.String(), maxRatioLimit))) + } + } + + // for GPU and hugepages, the default value and defaultRequest value must match if both are specified + if !helper.IsOvercommitAllowed(core.ResourceName(k)) && defaultQuantityFound && defaultRequestQuantityFound && defaultQuantity.Cmp(defaultRequestQuantity) != 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default value %s must equal to defaultRequest value %s in %s", defaultQuantity.String(), defaultRequestQuantity.String(), k))) + } + } + } + + return allErrs +} + +// ValidateServiceAccount tests if required fields in the ServiceAccount are set. +func ValidateServiceAccount(serviceAccount *core.ServiceAccount) field.ErrorList { + allErrs := ValidateObjectMeta(&serviceAccount.ObjectMeta, true, ValidateServiceAccountName, field.NewPath("metadata")) + return allErrs +} + +// ValidateServiceAccountUpdate tests if required fields in the ServiceAccount are set. +func ValidateServiceAccountUpdate(newServiceAccount, oldServiceAccount *core.ServiceAccount) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newServiceAccount.ObjectMeta, &oldServiceAccount.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateServiceAccount(newServiceAccount)...) + return allErrs +} + +// ValidateSecret tests if required fields in the Secret are set. +func ValidateSecret(secret *core.Secret) field.ErrorList { + allErrs := ValidateObjectMeta(&secret.ObjectMeta, true, ValidateSecretName, field.NewPath("metadata")) + + dataPath := field.NewPath("data") + totalSize := 0 + for key, value := range secret.Data { + for _, msg := range validation.IsConfigMapKey(key) { + allErrs = append(allErrs, field.Invalid(dataPath.Key(key), key, msg)) + } + totalSize += len(value) + } + if totalSize > core.MaxSecretSize { + allErrs = append(allErrs, field.TooLong(dataPath, "", core.MaxSecretSize)) + } + + switch secret.Type { + case core.SecretTypeServiceAccountToken: + // Only require Annotations[kubernetes.io/service-account.name] + // Additional fields (like Annotations[kubernetes.io/service-account.uid] and Data[token]) might be contributed later by a controller loop + if value := secret.Annotations[core.ServiceAccountNameKey]; len(value) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("metadata", "annotations").Key(core.ServiceAccountNameKey), "")) + } + case core.SecretTypeOpaque, "": + // no-op + case core.SecretTypeDockercfg: + dockercfgBytes, exists := secret.Data[core.DockerConfigKey] + if !exists { + allErrs = append(allErrs, field.Required(dataPath.Key(core.DockerConfigKey), "")) + break + } + + // make sure that the content is well-formed json. + if err := json.Unmarshal(dockercfgBytes, &map[string]interface{}{}); err != nil { + allErrs = append(allErrs, field.Invalid(dataPath.Key(core.DockerConfigKey), "", err.Error())) + } + case core.SecretTypeDockerConfigJson: + dockerConfigJsonBytes, exists := secret.Data[core.DockerConfigJsonKey] + if !exists { + allErrs = append(allErrs, field.Required(dataPath.Key(core.DockerConfigJsonKey), "")) + break + } + + // make sure that the content is well-formed json. + if err := json.Unmarshal(dockerConfigJsonBytes, &map[string]interface{}{}); err != nil { + allErrs = append(allErrs, field.Invalid(dataPath.Key(core.DockerConfigJsonKey), "", err.Error())) + } + case core.SecretTypeBasicAuth: + _, usernameFieldExists := secret.Data[core.BasicAuthUsernameKey] + _, passwordFieldExists := secret.Data[core.BasicAuthPasswordKey] + + // username or password might be empty, but the field must be present + if !usernameFieldExists && !passwordFieldExists { + allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(core.BasicAuthUsernameKey), "")) + allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(core.BasicAuthPasswordKey), "")) + break + } + case core.SecretTypeSSHAuth: + if len(secret.Data[core.SSHAuthPrivateKey]) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(core.SSHAuthPrivateKey), "")) + break + } + + case core.SecretTypeTLS: + if _, exists := secret.Data[core.TLSCertKey]; !exists { + allErrs = append(allErrs, field.Required(dataPath.Key(core.TLSCertKey), "")) + } + if _, exists := secret.Data[core.TLSPrivateKeyKey]; !exists { + allErrs = append(allErrs, field.Required(dataPath.Key(core.TLSPrivateKeyKey), "")) + } + // TODO: Verify that the key matches the cert. + default: + // no-op + } + + return allErrs +} + +// ValidateSecretUpdate tests if required fields in the Secret are set. +func ValidateSecretUpdate(newSecret, oldSecret *core.Secret) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newSecret.ObjectMeta, &oldSecret.ObjectMeta, field.NewPath("metadata")) + + if len(newSecret.Type) == 0 { + newSecret.Type = oldSecret.Type + } + + allErrs = append(allErrs, ValidateImmutableField(newSecret.Type, oldSecret.Type, field.NewPath("type"))...) + + allErrs = append(allErrs, ValidateSecret(newSecret)...) + return allErrs +} + +// ValidateConfigMapName can be used to check whether the given ConfigMap name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateConfigMapName = NameIsDNSSubdomain + +// ValidateConfigMap tests whether required fields in the ConfigMap are set. +func ValidateConfigMap(cfg *core.ConfigMap) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateObjectMeta(&cfg.ObjectMeta, true, ValidateConfigMapName, field.NewPath("metadata"))...) + + totalSize := 0 + + for key, value := range cfg.Data { + for _, msg := range validation.IsConfigMapKey(key) { + allErrs = append(allErrs, field.Invalid(field.NewPath("data").Key(key), key, msg)) + } + totalSize += len(value) + } + if totalSize > core.MaxSecretSize { + allErrs = append(allErrs, field.TooLong(field.NewPath("data"), "", core.MaxSecretSize)) + } + + return allErrs +} + +// ValidateConfigMapUpdate tests if required fields in the ConfigMap are set. +func ValidateConfigMapUpdate(newCfg, oldCfg *core.ConfigMap) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateObjectMetaUpdate(&newCfg.ObjectMeta, &oldCfg.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, ValidateConfigMap(newCfg)...) + + return allErrs +} + +func validateBasicResource(quantity resource.Quantity, fldPath *field.Path) field.ErrorList { + if quantity.Value() < 0 { + return field.ErrorList{field.Invalid(fldPath, quantity.Value(), "must be a valid resource quantity")} + } + return field.ErrorList{} +} + +// Validates resource requirement spec. +func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + limPath := fldPath.Child("limits") + reqPath := fldPath.Child("requests") + for resourceName, quantity := range requirements.Limits { + fldPath := limPath.Key(string(resourceName)) + // Validate resource name. + allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) + + // Validate resource quantity. + allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) + + if resourceName == core.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + allErrs = append(allErrs, field.Forbidden(limPath, "ResourceEphemeralStorage field disabled by feature-gate for ResourceRequirements")) + } + if helper.IsHugePageResourceName(resourceName) && !utilfeature.DefaultFeatureGate.Enabled(features.HugePages) { + allErrs = append(allErrs, field.Forbidden(limPath, fmt.Sprintf("%s field disabled by feature-gate for ResourceRequirements", resourceName))) + } + + } + for resourceName, quantity := range requirements.Requests { + fldPath := reqPath.Key(string(resourceName)) + // Validate resource name. + allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) + // Validate resource quantity. + allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) + + // Check that request <= limit. + limitQuantity, exists := requirements.Limits[resourceName] + if exists { + // For GPUs, not only requests can't exceed limits, they also can't be lower, i.e. must be equal. + if quantity.Cmp(limitQuantity) != 0 && !helper.IsOvercommitAllowed(resourceName) { + allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit", resourceName))) + } else if quantity.Cmp(limitQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit", resourceName))) + } + } else if resourceName == core.ResourceNvidiaGPU { + allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s request", core.ResourceNvidiaGPU))) + } + } + + return allErrs +} + +// validateResourceQuotaScopes ensures that each enumerated hard resource constraint is valid for set of scopes +func validateResourceQuotaScopes(resourceQuotaSpec *core.ResourceQuotaSpec, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(resourceQuotaSpec.Scopes) == 0 { + return allErrs + } + hardLimits := sets.NewString() + for k := range resourceQuotaSpec.Hard { + hardLimits.Insert(string(k)) + } + fldPath := fld.Child("scopes") + scopeSet := sets.NewString() + for _, scope := range resourceQuotaSpec.Scopes { + if !helper.IsStandardResourceQuotaScope(string(scope)) { + allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope")) + } + for _, k := range hardLimits.List() { + if helper.IsStandardQuotaResourceName(k) && !helper.IsResourceQuotaScopeValidForResource(scope, k) { + allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope applied to resource")) + } + } + scopeSet.Insert(string(scope)) + } + invalidScopePairs := []sets.String{ + sets.NewString(string(core.ResourceQuotaScopeBestEffort), string(core.ResourceQuotaScopeNotBestEffort)), + sets.NewString(string(core.ResourceQuotaScopeTerminating), string(core.ResourceQuotaScopeNotTerminating)), + } + for _, invalidScopePair := range invalidScopePairs { + if scopeSet.HasAll(invalidScopePair.List()...) { + allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "conflicting scopes")) + } + } + return allErrs +} + +// ValidateResourceQuota tests if required fields in the ResourceQuota are set. +func ValidateResourceQuota(resourceQuota *core.ResourceQuota) field.ErrorList { + allErrs := ValidateObjectMeta(&resourceQuota.ObjectMeta, true, ValidateResourceQuotaName, field.NewPath("metadata")) + + allErrs = append(allErrs, ValidateResourceQuotaSpec(&resourceQuota.Spec, field.NewPath("spec"))...) + allErrs = append(allErrs, ValidateResourceQuotaStatus(&resourceQuota.Status, field.NewPath("status"))...) + + return allErrs +} + +func ValidateResourceQuotaStatus(status *core.ResourceQuotaStatus, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + fldPath := fld.Child("hard") + for k, v := range status.Hard { + resPath := fldPath.Key(string(k)) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + } + fldPath = fld.Child("used") + for k, v := range status.Used { + resPath := fldPath.Key(string(k)) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + } + + return allErrs +} + +func ValidateResourceQuotaSpec(resourceQuotaSpec *core.ResourceQuotaSpec, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + fldPath := fld.Child("hard") + for k, v := range resourceQuotaSpec.Hard { + resPath := fldPath.Key(string(k)) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + } + allErrs = append(allErrs, validateResourceQuotaScopes(resourceQuotaSpec, fld)...) + + return allErrs +} + +// ValidateResourceQuantityValue enforces that specified quantity is valid for specified resource +func ValidateResourceQuantityValue(resource string, value resource.Quantity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateNonnegativeQuantity(value, fldPath)...) + if helper.IsIntegerResourceName(resource) { + if value.MilliValue()%int64(1000) != int64(0) { + allErrs = append(allErrs, field.Invalid(fldPath, value, isNotIntegerErrorMsg)) + } + } + return allErrs +} + +// ValidateResourceQuotaUpdate tests to see if the update is legal for an end user to make. +// newResourceQuota is updated with fields that cannot be changed. +func ValidateResourceQuotaUpdate(newResourceQuota, oldResourceQuota *core.ResourceQuota) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateResourceQuotaSpec(&newResourceQuota.Spec, field.NewPath("spec"))...) + + // ensure scopes cannot change, and that resources are still valid for scope + fldPath := field.NewPath("spec", "scopes") + oldScopes := sets.NewString() + newScopes := sets.NewString() + for _, scope := range newResourceQuota.Spec.Scopes { + newScopes.Insert(string(scope)) + } + for _, scope := range oldResourceQuota.Spec.Scopes { + oldScopes.Insert(string(scope)) + } + if !oldScopes.Equal(newScopes) { + allErrs = append(allErrs, field.Invalid(fldPath, newResourceQuota.Spec.Scopes, fieldImmutableErrorMsg)) + } + + newResourceQuota.Status = oldResourceQuota.Status + return allErrs +} + +// ValidateResourceQuotaStatusUpdate tests to see if the status update is legal for an end user to make. +// newResourceQuota is updated with fields that cannot be changed. +func ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota *core.ResourceQuota) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata")) + if len(newResourceQuota.ResourceVersion) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) + } + fldPath := field.NewPath("status", "hard") + for k, v := range newResourceQuota.Status.Hard { + resPath := fldPath.Key(string(k)) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + } + fldPath = field.NewPath("status", "used") + for k, v := range newResourceQuota.Status.Used { + resPath := fldPath.Key(string(k)) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + } + newResourceQuota.Spec = oldResourceQuota.Spec + return allErrs +} + +// ValidateNamespace tests if required fields are set. +func ValidateNamespace(namespace *core.Namespace) field.ErrorList { + allErrs := ValidateObjectMeta(&namespace.ObjectMeta, false, ValidateNamespaceName, field.NewPath("metadata")) + for i := range namespace.Spec.Finalizers { + allErrs = append(allErrs, validateFinalizerName(string(namespace.Spec.Finalizers[i]), field.NewPath("spec", "finalizers"))...) + } + return allErrs +} + +// Validate finalizer names +func validateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList { + allErrs := apimachineryvalidation.ValidateFinalizerName(stringValue, fldPath) + for _, err := range validateKubeFinalizerName(stringValue, fldPath) { + allErrs = append(allErrs, err) + } + + return allErrs +} + +// validateKubeFinalizerName checks for "standard" names of legacy finalizer +func validateKubeFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(strings.Split(stringValue, "/")) == 1 { + if !helper.IsStandardFinalizerName(stringValue) { + return append(allErrs, field.Invalid(fldPath, stringValue, "name is neither a standard finalizer name nor is it fully qualified")) + } + } + + return allErrs +} + +// ValidateNamespaceUpdate tests to make sure a namespace update can be applied. +// newNamespace is updated with fields that cannot be changed +func ValidateNamespaceUpdate(newNamespace *core.Namespace, oldNamespace *core.Namespace) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) + newNamespace.Spec.Finalizers = oldNamespace.Spec.Finalizers + newNamespace.Status = oldNamespace.Status + return allErrs +} + +// ValidateNamespaceStatusUpdate tests to see if the update is legal for an end user to make. newNamespace is updated with fields +// that cannot be changed. +func ValidateNamespaceStatusUpdate(newNamespace, oldNamespace *core.Namespace) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) + newNamespace.Spec = oldNamespace.Spec + if newNamespace.DeletionTimestamp.IsZero() { + if newNamespace.Status.Phase != core.NamespaceActive { + allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Active' if `deletionTimestamp` is empty")) + } + } else { + if newNamespace.Status.Phase != core.NamespaceTerminating { + allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Terminating' if `deletionTimestamp` is not empty")) + } + } + return allErrs +} + +// ValidateNamespaceFinalizeUpdate tests to see if the update is legal for an end user to make. +// newNamespace is updated with fields that cannot be changed. +func ValidateNamespaceFinalizeUpdate(newNamespace, oldNamespace *core.Namespace) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) + + fldPath := field.NewPath("spec", "finalizers") + for i := range newNamespace.Spec.Finalizers { + idxPath := fldPath.Index(i) + allErrs = append(allErrs, validateFinalizerName(string(newNamespace.Spec.Finalizers[i]), idxPath)...) + } + newNamespace.Status = oldNamespace.Status + return allErrs +} + +// Construct lookup map of old subset IPs to NodeNames. +func updateEpAddrToNodeNameMap(ipToNodeName map[string]string, addresses []core.EndpointAddress) { + for n := range addresses { + if addresses[n].NodeName == nil { + continue + } + ipToNodeName[addresses[n].IP] = *addresses[n].NodeName + } +} + +// Build a map across all subsets of IP -> NodeName +func buildEndpointAddressNodeNameMap(subsets []core.EndpointSubset) map[string]string { + ipToNodeName := make(map[string]string) + for i := range subsets { + updateEpAddrToNodeNameMap(ipToNodeName, subsets[i].Addresses) + updateEpAddrToNodeNameMap(ipToNodeName, subsets[i].NotReadyAddresses) + } + return ipToNodeName +} + +func validateEpAddrNodeNameTransition(addr *core.EndpointAddress, ipToNodeName map[string]string, fldPath *field.Path) field.ErrorList { + errList := field.ErrorList{} + existingNodeName, found := ipToNodeName[addr.IP] + if !found { + return errList + } + if addr.NodeName == nil || *addr.NodeName == existingNodeName { + return errList + } + // NodeName entry found for this endpoint IP, but user is attempting to change NodeName + return append(errList, field.Forbidden(fldPath, fmt.Sprintf("Cannot change NodeName for %s to %s", addr.IP, *addr.NodeName))) +} + +// ValidateEndpoints tests if required fields are set. +func ValidateEndpoints(endpoints *core.Endpoints) field.ErrorList { + allErrs := ValidateObjectMeta(&endpoints.ObjectMeta, true, ValidateEndpointsName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(endpoints.Annotations, field.NewPath("annotations"))...) + allErrs = append(allErrs, validateEndpointSubsets(endpoints.Subsets, []core.EndpointSubset{}, field.NewPath("subsets"))...) + return allErrs +} + +func validateEndpointSubsets(subsets []core.EndpointSubset, oldSubsets []core.EndpointSubset, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + ipToNodeName := buildEndpointAddressNodeNameMap(oldSubsets) + for i := range subsets { + ss := &subsets[i] + idxPath := fldPath.Index(i) + + // EndpointSubsets must include endpoint address. For headless service, we allow its endpoints not to have ports. + if len(ss.Addresses) == 0 && len(ss.NotReadyAddresses) == 0 { + //TODO: consider adding a RequiredOneOf() error for this and similar cases + allErrs = append(allErrs, field.Required(idxPath, "must specify `addresses` or `notReadyAddresses`")) + } + for addr := range ss.Addresses { + allErrs = append(allErrs, validateEndpointAddress(&ss.Addresses[addr], idxPath.Child("addresses").Index(addr), ipToNodeName)...) + } + for addr := range ss.NotReadyAddresses { + allErrs = append(allErrs, validateEndpointAddress(&ss.NotReadyAddresses[addr], idxPath.Child("notReadyAddresses").Index(addr), ipToNodeName)...) + } + for port := range ss.Ports { + allErrs = append(allErrs, validateEndpointPort(&ss.Ports[port], len(ss.Ports) > 1, idxPath.Child("ports").Index(port))...) + } + } + + return allErrs +} + +func validateEndpointAddress(address *core.EndpointAddress, fldPath *field.Path, ipToNodeName map[string]string) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsValidIP(address.IP) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), address.IP, msg)) + } + if len(address.Hostname) > 0 { + allErrs = append(allErrs, ValidateDNS1123Label(address.Hostname, fldPath.Child("hostname"))...) + } + // During endpoint update, verify that NodeName is a DNS subdomain and transition rules allow the update + if address.NodeName != nil { + for _, msg := range ValidateNodeName(*address.NodeName, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), *address.NodeName, msg)) + } + } + allErrs = append(allErrs, validateEpAddrNodeNameTransition(address, ipToNodeName, fldPath.Child("nodeName"))...) + if len(allErrs) > 0 { + return allErrs + } + allErrs = append(allErrs, validateNonSpecialIP(address.IP, fldPath.Child("ip"))...) + return allErrs +} + +func validateNonSpecialIP(ipAddress string, fldPath *field.Path) field.ErrorList { + // We disallow some IPs as endpoints or external-ips. Specifically, + // unspecified and loopback addresses are nonsensical and link-local + // addresses tend to be used for node-centric purposes (e.g. metadata + // service). + allErrs := field.ErrorList{} + ip := net.ParseIP(ipAddress) + if ip == nil { + allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "must be a valid IP address")) + return allErrs + } + if ip.IsUnspecified() { + allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be unspecified (0.0.0.0)")) + } + if ip.IsLoopback() { + allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the loopback range (127.0.0.0/8)")) + } + if ip.IsLinkLocalUnicast() { + allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local range (169.254.0.0/16)")) + } + if ip.IsLinkLocalMulticast() { + allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local multicast range (224.0.0.0/24)")) + } + return allErrs +} + +func validateEndpointPort(port *core.EndpointPort, requireName bool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if requireName && len(port.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } else if len(port.Name) != 0 { + allErrs = append(allErrs, ValidateDNS1123Label(port.Name, fldPath.Child("name"))...) + } + for _, msg := range validation.IsValidPortNum(int(port.Port)) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), port.Port, msg)) + } + if len(port.Protocol) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), "")) + } else if !supportedPortProtocols.Has(string(port.Protocol)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), port.Protocol, supportedPortProtocols.List())) + } + return allErrs +} + +// ValidateEndpointsUpdate tests to make sure an endpoints update can be applied. +func ValidateEndpointsUpdate(newEndpoints, oldEndpoints *core.Endpoints) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newEndpoints.ObjectMeta, &oldEndpoints.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, validateEndpointSubsets(newEndpoints.Subsets, oldEndpoints.Subsets, field.NewPath("subsets"))...) + allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(newEndpoints.Annotations, field.NewPath("annotations"))...) + return allErrs +} + +// ValidateSecurityContext ensure the security context contains valid settings +func ValidateSecurityContext(sc *core.SecurityContext, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + //this should only be true for testing since SecurityContext is defaulted by the core + if sc == nil { + return allErrs + } + + if sc.Privileged != nil { + if *sc.Privileged && !capabilities.Get().AllowPrivileged { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("privileged"), "disallowed by cluster policy")) + } + } + + if sc.RunAsUser != nil { + if *sc.RunAsUser < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *sc.RunAsUser, isNegativeErrorMsg)) + } + } + + if sc.AllowPrivilegeEscalation != nil && !*sc.AllowPrivilegeEscalation { + if sc.Privileged != nil && *sc.Privileged { + allErrs = append(allErrs, field.Invalid(fldPath, sc, "cannot set `allowPrivilegeEscalation` to false and `privileged` to true")) + } + + if sc.Capabilities != nil { + for _, cap := range sc.Capabilities.Add { + if string(cap) == "CAP_SYS_ADMIN" { + allErrs = append(allErrs, field.Invalid(fldPath, sc, "cannot set `allowPrivilegeEscalation` to false and `capabilities.Add` CAP_SYS_ADMIN")) + } + } + } + } + + return allErrs +} + +func ValidatePodLogOptions(opts *core.PodLogOptions) field.ErrorList { + allErrs := field.ErrorList{} + if opts.TailLines != nil && *opts.TailLines < 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), *opts.TailLines, isNegativeErrorMsg)) + } + if opts.LimitBytes != nil && *opts.LimitBytes < 1 { + allErrs = append(allErrs, field.Invalid(field.NewPath("limitBytes"), *opts.LimitBytes, "must be greater than 0")) + } + switch { + case opts.SinceSeconds != nil && opts.SinceTime != nil: + allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "at most one of `sinceTime` or `sinceSeconds` may be specified")) + case opts.SinceSeconds != nil: + if *opts.SinceSeconds < 1 { + allErrs = append(allErrs, field.Invalid(field.NewPath("sinceSeconds"), *opts.SinceSeconds, "must be greater than 0")) + } + } + return allErrs +} + +// ValidateLoadBalancerStatus validates required fields on a LoadBalancerStatus +func ValidateLoadBalancerStatus(status *core.LoadBalancerStatus, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, ingress := range status.Ingress { + idxPath := fldPath.Child("ingress").Index(i) + if len(ingress.IP) > 0 { + if isIP := (net.ParseIP(ingress.IP) != nil); !isIP { + allErrs = append(allErrs, field.Invalid(idxPath.Child("ip"), ingress.IP, "must be a valid IP address")) + } + } + if len(ingress.Hostname) > 0 { + for _, msg := range validation.IsDNS1123Subdomain(ingress.Hostname) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, msg)) + } + if isIP := (net.ParseIP(ingress.Hostname) != nil); isIP { + allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, "must be a DNS name, not an IP address")) + } + } + } + return allErrs +} + +func sysctlIntersection(a []core.Sysctl, b []core.Sysctl) []string { + lookup := make(map[string]struct{}, len(a)) + result := []string{} + for i := range a { + lookup[a[i].Name] = struct{}{} + } + for i := range b { + if _, found := lookup[b[i].Name]; found { + result = append(result, b[i].Name) + } + } + return result +} + +// validateStorageNodeAffinityAnnotation tests that the serialized TopologyConstraints in PersistentVolume.Annotations has valid data +func validateStorageNodeAffinityAnnotation(annotations map[string]string, fldPath *field.Path) (bool, field.ErrorList) { + allErrs := field.ErrorList{} + + na, err := helper.GetStorageNodeAffinityFromAnnotation(annotations) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, core.AlphaStorageNodeAffinityAnnotation, err.Error())) + return false, allErrs + } + if na == nil { + return false, allErrs + } + + if !utilfeature.DefaultFeatureGate.Enabled(features.PersistentLocalVolumes) { + allErrs = append(allErrs, field.Forbidden(fldPath, "Storage node affinity is disabled by feature-gate")) + } + + policySpecified := false + if na.RequiredDuringSchedulingIgnoredDuringExecution != nil { + allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + policySpecified = true + } + + if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("preferredDuringSchedulingIgnoredDuringExection"), "Storage node affinity does not support preferredDuringSchedulingIgnoredDuringExecution")) + } + return policySpecified, allErrs +} + +// ValidateCIDR validates whether a CIDR matches the conventions expected by net.ParseCIDR +func ValidateCIDR(cidr string) (*net.IPNet, error) { + _, net, err := net.ParseCIDR(cidr) + if err != nil { + return nil, err + } + return net, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go new file mode 100644 index 000000000000..186760a19223 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go @@ -0,0 +1,5862 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package core + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSElasticBlockStoreVolumeSource. +func (in *AWSElasticBlockStoreVolumeSource) DeepCopy() *AWSElasticBlockStoreVolumeSource { + if in == nil { + return nil + } + out := new(AWSElasticBlockStoreVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Affinity) DeepCopyInto(out *Affinity) { + *out = *in + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + if *in == nil { + *out = nil + } else { + *out = new(NodeAffinity) + (*in).DeepCopyInto(*out) + } + } + if in.PodAffinity != nil { + in, out := &in.PodAffinity, &out.PodAffinity + if *in == nil { + *out = nil + } else { + *out = new(PodAffinity) + (*in).DeepCopyInto(*out) + } + } + if in.PodAntiAffinity != nil { + in, out := &in.PodAntiAffinity, &out.PodAntiAffinity + if *in == nil { + *out = nil + } else { + *out = new(PodAntiAffinity) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Affinity. +func (in *Affinity) DeepCopy() *Affinity { + if in == nil { + return nil + } + out := new(Affinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachedVolume) DeepCopyInto(out *AttachedVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedVolume. +func (in *AttachedVolume) DeepCopy() *AttachedVolume { + if in == nil { + return nil + } + out := new(AttachedVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvoidPods) DeepCopyInto(out *AvoidPods) { + *out = *in + if in.PreferAvoidPods != nil { + in, out := &in.PreferAvoidPods, &out.PreferAvoidPods + *out = make([]PreferAvoidPodsEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvoidPods. +func (in *AvoidPods) DeepCopy() *AvoidPods { + if in == nil { + return nil + } + out := new(AvoidPods) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureDiskVolumeSource) DeepCopyInto(out *AzureDiskVolumeSource) { + *out = *in + if in.CachingMode != nil { + in, out := &in.CachingMode, &out.CachingMode + if *in == nil { + *out = nil + } else { + *out = new(AzureDataDiskCachingMode) + **out = **in + } + } + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + if *in == nil { + *out = nil + } else { + *out = new(AzureDataDiskKind) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskVolumeSource. +func (in *AzureDiskVolumeSource) DeepCopy() *AzureDiskVolumeSource { + if in == nil { + return nil + } + out := new(AzureDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFilePersistentVolumeSource) DeepCopyInto(out *AzureFilePersistentVolumeSource) { + *out = *in + if in.SecretNamespace != nil { + in, out := &in.SecretNamespace, &out.SecretNamespace + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFilePersistentVolumeSource. +func (in *AzureFilePersistentVolumeSource) DeepCopy() *AzureFilePersistentVolumeSource { + if in == nil { + return nil + } + out := new(AzureFilePersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFileVolumeSource) DeepCopyInto(out *AzureFileVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFileVolumeSource. +func (in *AzureFileVolumeSource) DeepCopy() *AzureFileVolumeSource { + if in == nil { + return nil + } + out := new(AzureFileVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Binding) DeepCopyInto(out *Binding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Target = in.Target + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Binding. +func (in *Binding) DeepCopy() *Binding { + if in == nil { + return nil + } + out := new(Binding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Binding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIPersistentVolumeSource. +func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Capabilities) DeepCopyInto(out *Capabilities) { + *out = *in + if in.Add != nil { + in, out := &in.Add, &out.Add + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + if in.Drop != nil { + in, out := &in.Drop, &out.Drop + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capabilities. +func (in *Capabilities) DeepCopy() *Capabilities { + if in == nil { + return nil + } + out := new(Capabilities) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFSPersistentVolumeSource) DeepCopyInto(out *CephFSPersistentVolumeSource) { + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSPersistentVolumeSource. +func (in *CephFSPersistentVolumeSource) DeepCopy() *CephFSPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CephFSPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFSVolumeSource) DeepCopyInto(out *CephFSVolumeSource) { + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(LocalObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSVolumeSource. +func (in *CephFSVolumeSource) DeepCopy() *CephFSVolumeSource { + if in == nil { + return nil + } + out := new(CephFSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CinderVolumeSource) DeepCopyInto(out *CinderVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderVolumeSource. +func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource { + if in == nil { + return nil + } + out := new(CinderVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) { + *out = *in + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientIPConfig. +func (in *ClientIPConfig) DeepCopy() *ClientIPConfig { + if in == nil { + return nil + } + out := new(ClientIPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentCondition. +func (in *ComponentCondition) DeepCopy() *ComponentCondition { + if in == nil { + return nil + } + out := new(ComponentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ComponentCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus. +func (in *ComponentStatus) DeepCopy() *ComponentStatus { + if in == nil { + return nil + } + out := new(ComponentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComponentStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ComponentStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatusList. +func (in *ComponentStatusList) DeepCopy() *ComponentStatusList { + if in == nil { + return nil + } + out := new(ComponentStatusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComponentStatusList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMap) DeepCopyInto(out *ConfigMap) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMap. +func (in *ConfigMap) DeepCopy() *ConfigMap { + if in == nil { + return nil + } + out := new(ConfigMap) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigMap) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapEnvSource. +func (in *ConfigMapEnvSource) DeepCopy() *ConfigMapEnvSource { + if in == nil { + return nil + } + out := new(ConfigMapEnvSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapKeySelector) DeepCopyInto(out *ConfigMapKeySelector) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapKeySelector. +func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector { + if in == nil { + return nil + } + out := new(ConfigMapKeySelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigMap, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapList. +func (in *ConfigMapList) DeepCopy() *ConfigMapList { + if in == nil { + return nil + } + out := new(ConfigMapList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigMapList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapProjection) DeepCopyInto(out *ConfigMapProjection) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapProjection. +func (in *ConfigMapProjection) DeepCopy() *ConfigMapProjection { + if in == nil { + return nil + } + out := new(ConfigMapProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapVolumeSource. +func (in *ConfigMapVolumeSource) DeepCopy() *ConfigMapVolumeSource { + if in == nil { + return nil + } + out := new(ConfigMapVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Container) DeepCopyInto(out *Container) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + if *in == nil { + *out = nil + } else { + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + if *in == nil { + *out = nil + } else { + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + if *in == nil { + *out = nil + } else { + *out = new(Lifecycle) + (*in).DeepCopyInto(*out) + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + if *in == nil { + *out = nil + } else { + *out = new(SecurityContext) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. +func (in *Container) DeepCopy() *Container { + if in == nil { + return nil + } + out := new(Container) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerImage) DeepCopyInto(out *ContainerImage) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerImage. +func (in *ContainerImage) DeepCopy() *ContainerImage { + if in == nil { + return nil + } + out := new(ContainerImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerPort) DeepCopyInto(out *ContainerPort) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPort. +func (in *ContainerPort) DeepCopy() *ContainerPort { + if in == nil { + return nil + } + out := new(ContainerPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerState) DeepCopyInto(out *ContainerState) { + *out = *in + if in.Waiting != nil { + in, out := &in.Waiting, &out.Waiting + if *in == nil { + *out = nil + } else { + *out = new(ContainerStateWaiting) + **out = **in + } + } + if in.Running != nil { + in, out := &in.Running, &out.Running + if *in == nil { + *out = nil + } else { + *out = new(ContainerStateRunning) + (*in).DeepCopyInto(*out) + } + } + if in.Terminated != nil { + in, out := &in.Terminated, &out.Terminated + if *in == nil { + *out = nil + } else { + *out = new(ContainerStateTerminated) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerState. +func (in *ContainerState) DeepCopy() *ContainerState { + if in == nil { + return nil + } + out := new(ContainerState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateRunning) DeepCopyInto(out *ContainerStateRunning) { + *out = *in + in.StartedAt.DeepCopyInto(&out.StartedAt) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateRunning. +func (in *ContainerStateRunning) DeepCopy() *ContainerStateRunning { + if in == nil { + return nil + } + out := new(ContainerStateRunning) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateTerminated) DeepCopyInto(out *ContainerStateTerminated) { + *out = *in + in.StartedAt.DeepCopyInto(&out.StartedAt) + in.FinishedAt.DeepCopyInto(&out.FinishedAt) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateTerminated. +func (in *ContainerStateTerminated) DeepCopy() *ContainerStateTerminated { + if in == nil { + return nil + } + out := new(ContainerStateTerminated) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateWaiting) DeepCopyInto(out *ContainerStateWaiting) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateWaiting. +func (in *ContainerStateWaiting) DeepCopy() *ContainerStateWaiting { + if in == nil { + return nil + } + out := new(ContainerStateWaiting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { + *out = *in + in.State.DeepCopyInto(&out.State) + in.LastTerminationState.DeepCopyInto(&out.LastTerminationState) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStatus. +func (in *ContainerStatus) DeepCopy() *ContainerStatus { + if in == nil { + return nil + } + out := new(ContainerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonEndpoint. +func (in *DaemonEndpoint) DeepCopy() *DaemonEndpoint { + if in == nil { + return nil + } + out := new(DaemonEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.GracePeriodSeconds != nil { + in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.Preconditions != nil { + in, out := &in.Preconditions, &out.Preconditions + if *in == nil { + *out = nil + } else { + *out = new(Preconditions) + (*in).DeepCopyInto(*out) + } + } + if in.OrphanDependents != nil { + in, out := &in.OrphanDependents, &out.OrphanDependents + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.PropagationPolicy != nil { + in, out := &in.PropagationPolicy, &out.PropagationPolicy + if *in == nil { + *out = nil + } else { + *out = new(DeletionPropagation) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteOptions. +func (in *DeleteOptions) DeepCopy() *DeleteOptions { + if in == nil { + return nil + } + out := new(DeleteOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeleteOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIProjection) DeepCopyInto(out *DownwardAPIProjection) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIProjection. +func (in *DownwardAPIProjection) DeepCopy() *DownwardAPIProjection { + if in == nil { + return nil + } + out := new(DownwardAPIProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIVolumeFile) DeepCopyInto(out *DownwardAPIVolumeFile) { + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + if *in == nil { + *out = nil + } else { + *out = new(ObjectFieldSelector) + **out = **in + } + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + if *in == nil { + *out = nil + } else { + *out = new(ResourceFieldSelector) + (*in).DeepCopyInto(*out) + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeFile. +func (in *DownwardAPIVolumeFile) DeepCopy() *DownwardAPIVolumeFile { + if in == nil { + return nil + } + out := new(DownwardAPIVolumeFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIVolumeSource) DeepCopyInto(out *DownwardAPIVolumeSource) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeSource. +func (in *DownwardAPIVolumeSource) DeepCopy() *DownwardAPIVolumeSource { + if in == nil { + return nil + } + out := new(DownwardAPIVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) { + *out = *in + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + if *in == nil { + *out = nil + } else { + *out = new(resource.Quantity) + **out = (*in).DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirVolumeSource. +func (in *EmptyDirVolumeSource) DeepCopy() *EmptyDirVolumeSource { + if in == nil { + return nil + } + out := new(EmptyDirVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) { + *out = *in + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + if *in == nil { + *out = nil + } else { + *out = new(ObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddress. +func (in *EndpointAddress) DeepCopy() *EndpointAddress { + if in == nil { + return nil + } + out := new(EndpointAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. +func (in *EndpointPort) DeepCopy() *EndpointPort { + if in == nil { + return nil + } + out := new(EndpointPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotReadyAddresses != nil { + in, out := &in.NotReadyAddresses, &out.NotReadyAddresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSubset. +func (in *EndpointSubset) DeepCopy() *EndpointSubset { + if in == nil { + return nil + } + out := new(EndpointSubset) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoints) DeepCopyInto(out *Endpoints) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Subsets != nil { + in, out := &in.Subsets, &out.Subsets + *out = make([]EndpointSubset, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoints. +func (in *Endpoints) DeepCopy() *Endpoints { + if in == nil { + return nil + } + out := new(Endpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Endpoints) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsList) DeepCopyInto(out *EndpointsList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Endpoints, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsList. +func (in *EndpointsList) DeepCopy() *EndpointsList { + if in == nil { + return nil + } + out := new(EndpointsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvFromSource) DeepCopyInto(out *EnvFromSource) { + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + if *in == nil { + *out = nil + } else { + *out = new(ConfigMapEnvSource) + (*in).DeepCopyInto(*out) + } + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretEnvSource) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvFromSource. +func (in *EnvFromSource) DeepCopy() *EnvFromSource { + if in == nil { + return nil + } + out := new(EnvFromSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVar) DeepCopyInto(out *EnvVar) { + *out = *in + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + if *in == nil { + *out = nil + } else { + *out = new(EnvVarSource) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. +func (in *EnvVar) DeepCopy() *EnvVar { + if in == nil { + return nil + } + out := new(EnvVar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) { + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + if *in == nil { + *out = nil + } else { + *out = new(ObjectFieldSelector) + **out = **in + } + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + if *in == nil { + *out = nil + } else { + *out = new(ResourceFieldSelector) + (*in).DeepCopyInto(*out) + } + } + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + if *in == nil { + *out = nil + } else { + *out = new(ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + if *in == nil { + *out = nil + } else { + *out = new(SecretKeySelector) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarSource. +func (in *EnvVarSource) DeepCopy() *EnvVarSource { + if in == nil { + return nil + } + out := new(EnvVarSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.InvolvedObject = in.InvolvedObject + out.Source = in.Source + in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp) + in.LastTimestamp.DeepCopyInto(&out.LastTimestamp) + in.EventTime.DeepCopyInto(&out.EventTime) + if in.Series != nil { + in, out := &in.Series, &out.Series + if *in == nil { + *out = nil + } else { + *out = new(EventSeries) + (*in).DeepCopyInto(*out) + } + } + if in.Related != nil { + in, out := &in.Related, &out.Related + if *in == nil { + *out = nil + } else { + *out = new(ObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Event) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventList) DeepCopyInto(out *EventList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. +func (in *EventList) DeepCopy() *EventList { + if in == nil { + return nil + } + out := new(EventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSeries) DeepCopyInto(out *EventSeries) { + *out = *in + in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. +func (in *EventSeries) DeepCopy() *EventSeries { + if in == nil { + return nil + } + out := new(EventSeries) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSource) DeepCopyInto(out *EventSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSource. +func (in *EventSource) DeepCopy() *EventSource { + if in == nil { + return nil + } + out := new(EventSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecAction) DeepCopyInto(out *ExecAction) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecAction. +func (in *ExecAction) DeepCopy() *ExecAction { + if in == nil { + return nil + } + out := new(ExecAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FCVolumeSource) DeepCopyInto(out *FCVolumeSource) { + *out = *in + if in.TargetWWNs != nil { + in, out := &in.TargetWWNs, &out.TargetWWNs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.WWIDs != nil { + in, out := &in.WWIDs, &out.WWIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FCVolumeSource. +func (in *FCVolumeSource) DeepCopy() *FCVolumeSource { + if in == nil { + return nil + } + out := new(FCVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(LocalObjectReference) + **out = **in + } + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexVolumeSource. +func (in *FlexVolumeSource) DeepCopy() *FlexVolumeSource { + if in == nil { + return nil + } + out := new(FlexVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlockerVolumeSource) DeepCopyInto(out *FlockerVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlockerVolumeSource. +func (in *FlockerVolumeSource) DeepCopy() *FlockerVolumeSource { + if in == nil { + return nil + } + out := new(FlockerVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCEPersistentDiskVolumeSource) DeepCopyInto(out *GCEPersistentDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEPersistentDiskVolumeSource. +func (in *GCEPersistentDiskVolumeSource) DeepCopy() *GCEPersistentDiskVolumeSource { + if in == nil { + return nil + } + out := new(GCEPersistentDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepoVolumeSource) DeepCopyInto(out *GitRepoVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepoVolumeSource. +func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { + if in == nil { + return nil + } + out := new(GitRepoVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsVolumeSource. +func (in *GlusterfsVolumeSource) DeepCopy() *GlusterfsVolumeSource { + if in == nil { + return nil + } + out := new(GlusterfsVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPGetAction) DeepCopyInto(out *HTTPGetAction) { + *out = *in + out.Port = in.Port + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = make([]HTTPHeader, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPGetAction. +func (in *HTTPGetAction) DeepCopy() *HTTPGetAction { + if in == nil { + return nil + } + out := new(HTTPGetAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. +func (in *HTTPHeader) DeepCopy() *HTTPHeader { + if in == nil { + return nil + } + out := new(HTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Handler) DeepCopyInto(out *Handler) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + if *in == nil { + *out = nil + } else { + *out = new(ExecAction) + (*in).DeepCopyInto(*out) + } + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + if *in == nil { + *out = nil + } else { + *out = new(HTTPGetAction) + (*in).DeepCopyInto(*out) + } + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + if *in == nil { + *out = nil + } else { + *out = new(TCPSocketAction) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Handler. +func (in *Handler) DeepCopy() *Handler { + if in == nil { + return nil + } + out := new(Handler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostAlias) DeepCopyInto(out *HostAlias) { + *out = *in + if in.Hostnames != nil { + in, out := &in.Hostnames, &out.Hostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAlias. +func (in *HostAlias) DeepCopy() *HostAlias { + if in == nil { + return nil + } + out := new(HostAlias) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + if *in == nil { + *out = nil + } else { + *out = new(HostPathType) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathVolumeSource. +func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource { + if in == nil { + return nil + } + out := new(HostPathVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSource) { + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + if in.InitiatorName != nil { + in, out := &in.InitiatorName, &out.InitiatorName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIPersistentVolumeSource. +func (in *ISCSIPersistentVolumeSource) DeepCopy() *ISCSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ISCSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) { + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(LocalObjectReference) + **out = **in + } + } + if in.InitiatorName != nil { + in, out := &in.InitiatorName, &out.InitiatorName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIVolumeSource. +func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource { + if in == nil { + return nil + } + out := new(ISCSIVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyToPath) DeepCopyInto(out *KeyToPath) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyToPath. +func (in *KeyToPath) DeepCopy() *KeyToPath { + if in == nil { + return nil + } + out := new(KeyToPath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Lifecycle) DeepCopyInto(out *Lifecycle) { + *out = *in + if in.PostStart != nil { + in, out := &in.PostStart, &out.PostStart + if *in == nil { + *out = nil + } else { + *out = new(Handler) + (*in).DeepCopyInto(*out) + } + } + if in.PreStop != nil { + in, out := &in.PreStop, &out.PreStop + if *in == nil { + *out = nil + } else { + *out = new(Handler) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lifecycle. +func (in *Lifecycle) DeepCopy() *Lifecycle { + if in == nil { + return nil + } + out := new(Lifecycle) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRange) DeepCopyInto(out *LimitRange) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRange. +func (in *LimitRange) DeepCopy() *LimitRange { + if in == nil { + return nil + } + out := new(LimitRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LimitRange) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeItem) DeepCopyInto(out *LimitRangeItem) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.DefaultRequest != nil { + in, out := &in.DefaultRequest, &out.DefaultRequest + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.MaxLimitRequestRatio != nil { + in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeItem. +func (in *LimitRangeItem) DeepCopy() *LimitRangeItem { + if in == nil { + return nil + } + out := new(LimitRangeItem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LimitRange, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeList. +func (in *LimitRangeList) DeepCopy() *LimitRangeList { + if in == nil { + return nil + } + out := new(LimitRangeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LimitRangeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeSpec) DeepCopyInto(out *LimitRangeSpec) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make([]LimitRangeItem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeSpec. +func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { + if in == nil { + return nil + } + out := new(LimitRangeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *List) DeepCopyInto(out *List) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if (*in)[i] == nil { + (*out)[i] = nil + } else { + (*out)[i] = (*in)[i].DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. +func (in *List) DeepCopy() *List { + if in == nil { + return nil + } + out := new(List) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *List) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListOptions) DeepCopyInto(out *ListOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.LabelSelector == nil { + out.LabelSelector = nil + } else { + out.LabelSelector = in.LabelSelector.DeepCopySelector() + } + if in.FieldSelector == nil { + out.FieldSelector = nil + } else { + out.FieldSelector = in.FieldSelector.DeepCopySelector() + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions. +func (in *ListOptions) DeepCopy() *ListOptions { + if in == nil { + return nil + } + out := new(ListOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ListOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerIngress. +func (in *LoadBalancerIngress) DeepCopy() *LoadBalancerIngress { + if in == nil { + return nil + } + out := new(LoadBalancerIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]LoadBalancerIngress, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStatus. +func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus { + if in == nil { + return nil + } + out := new(LoadBalancerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. +func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { + if in == nil { + return nil + } + out := new(LocalObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalVolumeSource) DeepCopyInto(out *LocalVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalVolumeSource. +func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource { + if in == nil { + return nil + } + out := new(LocalVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSVolumeSource. +func (in *NFSVolumeSource) DeepCopy() *NFSVolumeSource { + if in == nil { + return nil + } + out := new(NFSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Namespace) DeepCopyInto(out *Namespace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespace. +func (in *Namespace) DeepCopy() *Namespace { + if in == nil { + return nil + } + out := new(Namespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Namespace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Namespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceList. +func (in *NamespaceList) DeepCopy() *NamespaceList { + if in == nil { + return nil + } + out := new(NamespaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NamespaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceSpec) DeepCopyInto(out *NamespaceSpec) { + *out = *in + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]FinalizerName, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSpec. +func (in *NamespaceSpec) DeepCopy() *NamespaceSpec { + if in == nil { + return nil + } + out := new(NamespaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceStatus. +func (in *NamespaceStatus) DeepCopy() *NamespaceStatus { + if in == nil { + return nil + } + out := new(NamespaceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Node) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAddress) DeepCopyInto(out *NodeAddress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress. +func (in *NodeAddress) DeepCopy() *NodeAddress { + if in == nil { + return nil + } + out := new(NodeAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAffinity) DeepCopyInto(out *NodeAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + if *in == nil { + *out = nil + } else { + *out = new(NodeSelector) + (*in).DeepCopyInto(*out) + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]PreferredSchedulingTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinity. +func (in *NodeAffinity) DeepCopy() *NodeAffinity { + if in == nil { + return nil + } + out := new(NodeAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeCondition) DeepCopyInto(out *NodeCondition) { + *out = *in + in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCondition. +func (in *NodeCondition) DeepCopy() *NodeCondition { + if in == nil { + return nil + } + out := new(NodeCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfigSource) DeepCopyInto(out *NodeConfigSource) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + if *in == nil { + *out = nil + } else { + *out = new(ObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigSource. +func (in *NodeConfigSource) DeepCopy() *NodeConfigSource { + if in == nil { + return nil + } + out := new(NodeConfigSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeConfigSource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) { + *out = *in + out.KubeletEndpoint = in.KubeletEndpoint + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDaemonEndpoints. +func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { + if in == nil { + return nil + } + out := new(NodeDaemonEndpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeList) DeepCopyInto(out *NodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Node, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList. +func (in *NodeList) DeepCopy() *NodeList { + if in == nil { + return nil + } + out := new(NodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeProxyOptions) DeepCopyInto(out *NodeProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProxyOptions. +func (in *NodeProxyOptions) DeepCopy() *NodeProxyOptions { + if in == nil { + return nil + } + out := new(NodeProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeResources) DeepCopyInto(out *NodeResources) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResources. +func (in *NodeResources) DeepCopy() *NodeResources { + if in == nil { + return nil + } + out := new(NodeResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelector) DeepCopyInto(out *NodeSelector) { + *out = *in + if in.NodeSelectorTerms != nil { + in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms + *out = make([]NodeSelectorTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelector. +func (in *NodeSelector) DeepCopy() *NodeSelector { + if in == nil { + return nil + } + out := new(NodeSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelectorRequirement) DeepCopyInto(out *NodeSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorRequirement. +func (in *NodeSelectorRequirement) DeepCopy() *NodeSelectorRequirement { + if in == nil { + return nil + } + out := new(NodeSelectorRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelectorTerm) DeepCopyInto(out *NodeSelectorTerm) { + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorTerm. +func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm { + if in == nil { + return nil + } + out := new(NodeSelectorTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConfigSource != nil { + in, out := &in.ConfigSource, &out.ConfigSource + if *in == nil { + *out = nil + } else { + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec. +func (in *NodeSpec) DeepCopy() *NodeSpec { + if in == nil { + return nil + } + out := new(NodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NodeCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]NodeAddress, len(*in)) + copy(*out, *in) + } + out.DaemonEndpoints = in.DaemonEndpoints + out.NodeInfo = in.NodeInfo + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ContainerImage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumesInUse != nil { + in, out := &in.VolumesInUse, &out.VolumesInUse + *out = make([]UniqueVolumeName, len(*in)) + copy(*out, *in) + } + if in.VolumesAttached != nil { + in, out := &in.VolumesAttached, &out.VolumesAttached + *out = make([]AttachedVolume, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. +func (in *NodeStatus) DeepCopy() *NodeStatus { + if in == nil { + return nil + } + out := new(NodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSystemInfo. +func (in *NodeSystemInfo) DeepCopy() *NodeSystemInfo { + if in == nil { + return nil + } + out := new(NodeSystemInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectFieldSelector) DeepCopyInto(out *ObjectFieldSelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectFieldSelector. +func (in *ObjectFieldSelector) DeepCopy() *ObjectFieldSelector { + if in == nil { + return nil + } + out := new(ObjectFieldSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { + *out = *in + in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp) + if in.DeletionTimestamp != nil { + in, out := &in.DeletionTimestamp, &out.DeletionTimestamp + if *in == nil { + *out = nil + } else { + *out = new(v1.Time) + (*in).DeepCopyInto(*out) + } + } + if in.DeletionGracePeriodSeconds != nil { + in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]v1.OwnerReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Initializers != nil { + in, out := &in.Initializers, &out.Initializers + if *in == nil { + *out = nil + } else { + *out = new(v1.Initializers) + (*in).DeepCopyInto(*out) + } + } + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta. +func (in *ObjectMeta) DeepCopy() *ObjectMeta { + if in == nil { + return nil + } + out := new(ObjectMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObjectReference) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolume. +func (in *PersistentVolume) DeepCopy() *PersistentVolume { + if in == nil { + return nil + } + out := new(PersistentVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolume) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaim) DeepCopyInto(out *PersistentVolumeClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaim. +func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim { + if in == nil { + return nil + } + out := new(PersistentVolumeClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimCondition) DeepCopyInto(out *PersistentVolumeClaimCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimCondition. +func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondition { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimList. +func (in *PersistentVolumeClaimList) DeepCopy() *PersistentVolumeClaimList { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeClaimList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + if *in == nil { + *out = nil + } else { + *out = new(PersistentVolumeMode) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSpec. +func (in *PersistentVolumeClaimSpec) DeepCopy() *PersistentVolumeClaimSpec { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimStatus) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PersistentVolumeClaimCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimStatus. +func (in *PersistentVolumeClaimStatus) DeepCopy() *PersistentVolumeClaimStatus { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimVolumeSource. +func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVolumeSource { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeList. +func (in *PersistentVolumeList) DeepCopy() *PersistentVolumeList { + if in == nil { + return nil + } + out := new(PersistentVolumeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { + *out = *in + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + if *in == nil { + *out = nil + } else { + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + if *in == nil { + *out = nil + } else { + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + if *in == nil { + *out = nil + } else { + *out = new(HostPathVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + if *in == nil { + *out = nil + } else { + *out = new(GlusterfsVolumeSource) + **out = **in + } + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + if *in == nil { + *out = nil + } else { + *out = new(NFSVolumeSource) + **out = **in + } + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + if *in == nil { + *out = nil + } else { + *out = new(RBDPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + if *in == nil { + *out = nil + } else { + *out = new(QuobyteVolumeSource) + **out = **in + } + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + if *in == nil { + *out = nil + } else { + *out = new(ISCSIPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + if *in == nil { + *out = nil + } else { + *out = new(FlexVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + if *in == nil { + *out = nil + } else { + *out = new(CinderVolumeSource) + **out = **in + } + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + if *in == nil { + *out = nil + } else { + *out = new(CephFSPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + if *in == nil { + *out = nil + } else { + *out = new(FCVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + if *in == nil { + *out = nil + } else { + *out = new(FlockerVolumeSource) + **out = **in + } + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + if *in == nil { + *out = nil + } else { + *out = new(AzureFilePersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + if *in == nil { + *out = nil + } else { + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + if *in == nil { + *out = nil + } else { + *out = new(AzureDiskVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + if *in == nil { + *out = nil + } else { + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + if *in == nil { + *out = nil + } else { + *out = new(PortworxVolumeSource) + **out = **in + } + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + if *in == nil { + *out = nil + } else { + *out = new(ScaleIOPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Local != nil { + in, out := &in.Local, &out.Local + if *in == nil { + *out = nil + } else { + *out = new(LocalVolumeSource) + **out = **in + } + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + if *in == nil { + *out = nil + } else { + *out = new(StorageOSPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + if *in == nil { + *out = nil + } else { + *out = new(CSIPersistentVolumeSource) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSource. +func (in *PersistentVolumeSource) DeepCopy() *PersistentVolumeSource { + if in == nil { + return nil + } + out := new(PersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + in.PersistentVolumeSource.DeepCopyInto(&out.PersistentVolumeSource) + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.ClaimRef != nil { + in, out := &in.ClaimRef, &out.ClaimRef + if *in == nil { + *out = nil + } else { + *out = new(ObjectReference) + **out = **in + } + } + if in.MountOptions != nil { + in, out := &in.MountOptions, &out.MountOptions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + if *in == nil { + *out = nil + } else { + *out = new(PersistentVolumeMode) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSpec. +func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec { + if in == nil { + return nil + } + out := new(PersistentVolumeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeStatus. +func (in *PersistentVolumeStatus) DeepCopy() *PersistentVolumeStatus { + if in == nil { + return nil + } + out := new(PersistentVolumeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PhotonPersistentDiskVolumeSource) DeepCopyInto(out *PhotonPersistentDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhotonPersistentDiskVolumeSource. +func (in *PhotonPersistentDiskVolumeSource) DeepCopy() *PhotonPersistentDiskVolumeSource { + if in == nil { + return nil + } + out := new(PhotonPersistentDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Pod) DeepCopyInto(out *Pod) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. +func (in *Pod) DeepCopy() *Pod { + if in == nil { + return nil + } + out := new(Pod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Pod) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAffinity) DeepCopyInto(out *PodAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinity. +func (in *PodAffinity) DeepCopy() *PodAffinity { + if in == nil { + return nil + } + out := new(PodAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinityTerm. +func (in *PodAffinityTerm) DeepCopy() *PodAffinityTerm { + if in == nil { + return nil + } + out := new(PodAffinityTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAntiAffinity) DeepCopyInto(out *PodAntiAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAntiAffinity. +func (in *PodAntiAffinity) DeepCopy() *PodAntiAffinity { + if in == nil { + return nil + } + out := new(PodAntiAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAttachOptions) DeepCopyInto(out *PodAttachOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAttachOptions. +func (in *PodAttachOptions) DeepCopy() *PodAttachOptions { + if in == nil { + return nil + } + out := new(PodAttachOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodAttachOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodCondition) DeepCopyInto(out *PodCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCondition. +func (in *PodCondition) DeepCopy() *PodCondition { + if in == nil { + return nil + } + out := new(PodCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) { + *out = *in + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Searches != nil { + in, out := &in.Searches, &out.Searches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]PodDNSConfigOption, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig. +func (in *PodDNSConfig) DeepCopy() *PodDNSConfig { + if in == nil { + return nil + } + out := new(PodDNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption. +func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption { + if in == nil { + return nil + } + out := new(PodDNSConfigOption) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExecOptions. +func (in *PodExecOptions) DeepCopy() *PodExecOptions { + if in == nil { + return nil + } + out := new(PodExecOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodExecOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodList) DeepCopyInto(out *PodList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pod, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodList. +func (in *PodList) DeepCopy() *PodList { + if in == nil { + return nil + } + out := new(PodList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + if *in == nil { + *out = nil + } else { + *out = new(v1.Time) + (*in).DeepCopyInto(*out) + } + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLogOptions. +func (in *PodLogOptions) DeepCopy() *PodLogOptions { + if in == nil { + return nil + } + out := new(PodLogOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodLogOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodPortForwardOptions) DeepCopyInto(out *PodPortForwardOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]int32, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPortForwardOptions. +func (in *PodPortForwardOptions) DeepCopy() *PodPortForwardOptions { + if in == nil { + return nil + } + out := new(PodPortForwardOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodPortForwardOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodProxyOptions) DeepCopyInto(out *PodProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProxyOptions. +func (in *PodProxyOptions) DeepCopy() *PodProxyOptions { + if in == nil { + return nil + } + out := new(PodProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + if *in == nil { + *out = nil + } else { + *out = new(SELinuxOptions) + **out = **in + } + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.SupplementalGroups != nil { + in, out := &in.SupplementalGroups, &out.SupplementalGroups + *out = make([]int64, len(*in)) + copy(*out, *in) + } + if in.FSGroup != nil { + in, out := &in.FSGroup, &out.FSGroup + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityContext. +func (in *PodSecurityContext) DeepCopy() *PodSecurityContext { + if in == nil { + return nil + } + out := new(PodSecurityContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSignature) DeepCopyInto(out *PodSignature) { + *out = *in + if in.PodController != nil { + in, out := &in.PodController, &out.PodController + if *in == nil { + *out = nil + } else { + *out = new(v1.OwnerReference) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSignature. +func (in *PodSignature) DeepCopy() *PodSignature { + if in == nil { + return nil + } + out := new(PodSignature) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSpec) DeepCopyInto(out *PodSpec) { + *out = *in + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + if *in == nil { + *out = nil + } else { + *out = new(PodSecurityContext) + (*in).DeepCopyInto(*out) + } + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + if *in == nil { + *out = nil + } else { + *out = new(Affinity) + (*in).DeepCopyInto(*out) + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostAliases != nil { + in, out := &in.HostAliases, &out.HostAliases + *out = make([]HostAlias, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + if *in == nil { + *out = nil + } else { + *out = new(PodDNSConfig) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec. +func (in *PodSpec) DeepCopy() *PodSpec { + if in == nil { + return nil + } + out := new(PodSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodStatus) DeepCopyInto(out *PodStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PodCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + if *in == nil { + *out = nil + } else { + *out = new(v1.Time) + (*in).DeepCopyInto(*out) + } + } + if in.InitContainerStatuses != nil { + in, out := &in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerStatuses != nil { + in, out := &in.ContainerStatuses, &out.ContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatus. +func (in *PodStatus) DeepCopy() *PodStatus { + if in == nil { + return nil + } + out := new(PodStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodStatusResult) DeepCopyInto(out *PodStatusResult) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusResult. +func (in *PodStatusResult) DeepCopy() *PodStatusResult { + if in == nil { + return nil + } + out := new(PodStatusResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodStatusResult) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplate) DeepCopyInto(out *PodTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplate. +func (in *PodTemplate) DeepCopy() *PodTemplate { + if in == nil { + return nil + } + out := new(PodTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateList. +func (in *PodTemplateList) DeepCopy() *PodTemplateList { + if in == nil { + return nil + } + out := new(PodTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplateSpec) DeepCopyInto(out *PodTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateSpec. +func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec { + if in == nil { + return nil + } + out := new(PodTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxVolumeSource. +func (in *PortworxVolumeSource) DeepCopy() *PortworxVolumeSource { + if in == nil { + return nil + } + out := new(PortworxVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Preconditions) DeepCopyInto(out *Preconditions) { + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + if *in == nil { + *out = nil + } else { + *out = new(types.UID) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions. +func (in *Preconditions) DeepCopy() *Preconditions { + if in == nil { + return nil + } + out := new(Preconditions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreferAvoidPodsEntry) DeepCopyInto(out *PreferAvoidPodsEntry) { + *out = *in + in.PodSignature.DeepCopyInto(&out.PodSignature) + in.EvictionTime.DeepCopyInto(&out.EvictionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferAvoidPodsEntry. +func (in *PreferAvoidPodsEntry) DeepCopy() *PreferAvoidPodsEntry { + if in == nil { + return nil + } + out := new(PreferAvoidPodsEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreferredSchedulingTerm) DeepCopyInto(out *PreferredSchedulingTerm) { + *out = *in + in.Preference.DeepCopyInto(&out.Preference) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferredSchedulingTerm. +func (in *PreferredSchedulingTerm) DeepCopy() *PreferredSchedulingTerm { + if in == nil { + return nil + } + out := new(PreferredSchedulingTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Probe) DeepCopyInto(out *Probe) { + *out = *in + in.Handler.DeepCopyInto(&out.Handler) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. +func (in *Probe) DeepCopy() *Probe { + if in == nil { + return nil + } + out := new(Probe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) { + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]VolumeProjection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectedVolumeSource. +func (in *ProjectedVolumeSource) DeepCopy() *ProjectedVolumeSource { + if in == nil { + return nil + } + out := new(ProjectedVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuobyteVolumeSource) DeepCopyInto(out *QuobyteVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuobyteVolumeSource. +func (in *QuobyteVolumeSource) DeepCopy() *QuobyteVolumeSource { + if in == nil { + return nil + } + out := new(QuobyteVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RBDPersistentVolumeSource) DeepCopyInto(out *RBDPersistentVolumeSource) { + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDPersistentVolumeSource. +func (in *RBDPersistentVolumeSource) DeepCopy() *RBDPersistentVolumeSource { + if in == nil { + return nil + } + out := new(RBDPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) { + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(LocalObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDVolumeSource. +func (in *RBDVolumeSource) DeepCopy() *RBDVolumeSource { + if in == nil { + return nil + } + out := new(RBDVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation. +func (in *RangeAllocation) DeepCopy() *RangeAllocation { + if in == nil { + return nil + } + out := new(RangeAllocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RangeAllocation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationController) DeepCopyInto(out *ReplicationController) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationController. +func (in *ReplicationController) DeepCopy() *ReplicationController { + if in == nil { + return nil + } + out := new(ReplicationController) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationController) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerCondition) DeepCopyInto(out *ReplicationControllerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerCondition. +func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondition { + if in == nil { + return nil + } + out := new(ReplicationControllerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerList. +func (in *ReplicationControllerList) DeepCopy() *ReplicationControllerList { + if in == nil { + return nil + } + out := new(ReplicationControllerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationControllerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + if *in == nil { + *out = nil + } else { + *out = new(PodTemplateSpec) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerSpec. +func (in *ReplicationControllerSpec) DeepCopy() *ReplicationControllerSpec { + if in == nil { + return nil + } + out := new(ReplicationControllerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerStatus) DeepCopyInto(out *ReplicationControllerStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicationControllerCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerStatus. +func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus { + if in == nil { + return nil + } + out := new(ReplicationControllerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) { + *out = *in + out.Divisor = in.Divisor.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFieldSelector. +func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector { + if in == nil { + return nil + } + out := new(ResourceFieldSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuota. +func (in *ResourceQuota) DeepCopy() *ResourceQuota { + if in == nil { + return nil + } + out := new(ResourceQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceQuota) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceQuota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaList. +func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList { + if in == nil { + return nil + } + out := new(ResourceQuotaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceQuotaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) { + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ResourceQuotaScope, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaSpec. +func (in *ResourceQuotaSpec) DeepCopy() *ResourceQuotaSpec { + if in == nil { + return nil + } + out := new(ResourceQuotaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaStatus) DeepCopyInto(out *ResourceQuotaStatus) { + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Used != nil { + in, out := &in.Used, &out.Used + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatus. +func (in *ResourceQuotaStatus) DeepCopy() *ResourceQuotaStatus { + if in == nil { + return nil + } + out := new(ResourceQuotaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. +func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { + if in == nil { + return nil + } + out := new(ResourceRequirements) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxOptions. +func (in *SELinuxOptions) DeepCopy() *SELinuxOptions { + if in == nil { + return nil + } + out := new(SELinuxOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleIOPersistentVolumeSource) DeepCopyInto(out *ScaleIOPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOPersistentVolumeSource. +func (in *ScaleIOPersistentVolumeSource) DeepCopy() *ScaleIOPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ScaleIOPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(LocalObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOVolumeSource. +func (in *ScaleIOVolumeSource) DeepCopy() *ScaleIOVolumeSource { + if in == nil { + return nil + } + out := new(ScaleIOVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Secret) DeepCopyInto(out *Secret) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string][]byte, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = make([]byte, len(val)) + copy((*out)[key], val) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret. +func (in *Secret) DeepCopy() *Secret { + if in == nil { + return nil + } + out := new(Secret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Secret) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEnvSource. +func (in *SecretEnvSource) DeepCopy() *SecretEnvSource { + if in == nil { + return nil + } + out := new(SecretEnvSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector. +func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { + if in == nil { + return nil + } + out := new(SecretKeySelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretList) DeepCopyInto(out *SecretList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Secret, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList. +func (in *SecretList) DeepCopy() *SecretList { + if in == nil { + return nil + } + out := new(SecretList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretProjection) DeepCopyInto(out *SecretProjection) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretProjection. +func (in *SecretProjection) DeepCopy() *SecretProjection { + if in == nil { + return nil + } + out := new(SecretProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretReference) DeepCopyInto(out *SecretReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference. +func (in *SecretReference) DeepCopy() *SecretReference { + if in == nil { + return nil + } + out := new(SecretReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretVolumeSource) DeepCopyInto(out *SecretVolumeSource) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVolumeSource. +func (in *SecretVolumeSource) DeepCopy() *SecretVolumeSource { + if in == nil { + return nil + } + out := new(SecretVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { + *out = *in + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + if *in == nil { + *out = nil + } else { + *out = new(Capabilities) + (*in).DeepCopyInto(*out) + } + } + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + if *in == nil { + *out = nil + } else { + *out = new(SELinuxOptions) + **out = **in + } + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.ReadOnlyRootFilesystem != nil { + in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.AllowPrivilegeEscalation != nil { + in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContext. +func (in *SecurityContext) DeepCopy() *SecurityContext { + if in == nil { + return nil + } + out := new(SecurityContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializedReference) DeepCopyInto(out *SerializedReference) { + *out = *in + out.TypeMeta = in.TypeMeta + out.Reference = in.Reference + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedReference. +func (in *SerializedReference) DeepCopy() *SerializedReference { + if in == nil { + return nil + } + out := new(SerializedReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SerializedReference) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Service) DeepCopyInto(out *Service) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. +func (in *Service) DeepCopy() *Service { + if in == nil { + return nil + } + out := new(Service) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Service) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccount. +func (in *ServiceAccount) DeepCopy() *ServiceAccount { + if in == nil { + return nil + } + out := new(ServiceAccount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccount) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountList. +func (in *ServiceAccountList) DeepCopy() *ServiceAccountList { + if in == nil { + return nil + } + out := new(ServiceAccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceList) DeepCopyInto(out *ServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList. +func (in *ServiceList) DeepCopy() *ServiceList { + if in == nil { + return nil + } + out := new(ServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicePort) DeepCopyInto(out *ServicePort) { + *out = *in + out.TargetPort = in.TargetPort + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort. +func (in *ServicePort) DeepCopy() *ServicePort { + if in == nil { + return nil + } + out := new(ServicePort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceProxyOptions) DeepCopyInto(out *ServiceProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceProxyOptions. +func (in *ServiceProxyOptions) DeepCopy() *ServiceProxyOptions { + if in == nil { + return nil + } + out := new(ServiceProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ServicePort, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExternalIPs != nil { + in, out := &in.ExternalIPs, &out.ExternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SessionAffinityConfig != nil { + in, out := &in.SessionAffinityConfig, &out.SessionAffinityConfig + if *in == nil { + *out = nil + } else { + *out = new(SessionAffinityConfig) + (*in).DeepCopyInto(*out) + } + } + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. +func (in *ServiceSpec) DeepCopy() *ServiceSpec { + if in == nil { + return nil + } + out := new(ServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { + *out = *in + in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus. +func (in *ServiceStatus) DeepCopy() *ServiceStatus { + if in == nil { + return nil + } + out := new(ServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionAffinityConfig) DeepCopyInto(out *SessionAffinityConfig) { + *out = *in + if in.ClientIP != nil { + in, out := &in.ClientIP, &out.ClientIP + if *in == nil { + *out = nil + } else { + *out = new(ClientIPConfig) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityConfig. +func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { + if in == nil { + return nil + } + out := new(SessionAffinityConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(ObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSPersistentVolumeSource. +func (in *StorageOSPersistentVolumeSource) DeepCopy() *StorageOSPersistentVolumeSource { + if in == nil { + return nil + } + out := new(StorageOSPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageOSVolumeSource) DeepCopyInto(out *StorageOSVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(LocalObjectReference) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSVolumeSource. +func (in *StorageOSVolumeSource) DeepCopy() *StorageOSVolumeSource { + if in == nil { + return nil + } + out := new(StorageOSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Sysctl) DeepCopyInto(out *Sysctl) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sysctl. +func (in *Sysctl) DeepCopy() *Sysctl { + if in == nil { + return nil + } + out := new(Sysctl) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPSocketAction) DeepCopyInto(out *TCPSocketAction) { + *out = *in + out.Port = in.Port + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPSocketAction. +func (in *TCPSocketAction) DeepCopy() *TCPSocketAction { + if in == nil { + return nil + } + out := new(TCPSocketAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Taint) DeepCopyInto(out *Taint) { + *out = *in + if in.TimeAdded != nil { + in, out := &in.TimeAdded, &out.TimeAdded + if *in == nil { + *out = nil + } else { + *out = new(v1.Time) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint. +func (in *Taint) DeepCopy() *Taint { + if in == nil { + return nil + } + out := new(Taint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Toleration) DeepCopyInto(out *Toleration) { + *out = *in + if in.TolerationSeconds != nil { + in, out := &in.TolerationSeconds, &out.TolerationSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration. +func (in *Toleration) DeepCopy() *Toleration { + if in == nil { + return nil + } + out := new(Toleration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Volume) DeepCopyInto(out *Volume) { + *out = *in + in.VolumeSource.DeepCopyInto(&out.VolumeSource) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. +func (in *Volume) DeepCopy() *Volume { + if in == nil { + return nil + } + out := new(Volume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeDevice) DeepCopyInto(out *VolumeDevice) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeDevice. +func (in *VolumeDevice) DeepCopy() *VolumeDevice { + if in == nil { + return nil + } + out := new(VolumeDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { + *out = *in + if in.MountPropagation != nil { + in, out := &in.MountPropagation, &out.MountPropagation + if *in == nil { + *out = nil + } else { + *out = new(MountPropagationMode) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMount. +func (in *VolumeMount) DeepCopy() *VolumeMount { + if in == nil { + return nil + } + out := new(VolumeMount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + if *in == nil { + *out = nil + } else { + *out = new(SecretProjection) + (*in).DeepCopyInto(*out) + } + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + if *in == nil { + *out = nil + } else { + *out = new(DownwardAPIProjection) + (*in).DeepCopyInto(*out) + } + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + if *in == nil { + *out = nil + } else { + *out = new(ConfigMapProjection) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeProjection. +func (in *VolumeProjection) DeepCopy() *VolumeProjection { + if in == nil { + return nil + } + out := new(VolumeProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { + *out = *in + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + if *in == nil { + *out = nil + } else { + *out = new(HostPathVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + if *in == nil { + *out = nil + } else { + *out = new(EmptyDirVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + if *in == nil { + *out = nil + } else { + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + if *in == nil { + *out = nil + } else { + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + } + if in.GitRepo != nil { + in, out := &in.GitRepo, &out.GitRepo + if *in == nil { + *out = nil + } else { + *out = new(GitRepoVolumeSource) + **out = **in + } + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + if *in == nil { + *out = nil + } else { + *out = new(SecretVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + if *in == nil { + *out = nil + } else { + *out = new(NFSVolumeSource) + **out = **in + } + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + if *in == nil { + *out = nil + } else { + *out = new(ISCSIVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + if *in == nil { + *out = nil + } else { + *out = new(GlusterfsVolumeSource) + **out = **in + } + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + if *in == nil { + *out = nil + } else { + *out = new(PersistentVolumeClaimVolumeSource) + **out = **in + } + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + if *in == nil { + *out = nil + } else { + *out = new(RBDVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + if *in == nil { + *out = nil + } else { + *out = new(QuobyteVolumeSource) + **out = **in + } + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + if *in == nil { + *out = nil + } else { + *out = new(FlexVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + if *in == nil { + *out = nil + } else { + *out = new(CinderVolumeSource) + **out = **in + } + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + if *in == nil { + *out = nil + } else { + *out = new(CephFSVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + if *in == nil { + *out = nil + } else { + *out = new(FlockerVolumeSource) + **out = **in + } + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + if *in == nil { + *out = nil + } else { + *out = new(DownwardAPIVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + if *in == nil { + *out = nil + } else { + *out = new(FCVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + if *in == nil { + *out = nil + } else { + *out = new(AzureFileVolumeSource) + **out = **in + } + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + if *in == nil { + *out = nil + } else { + *out = new(ConfigMapVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + if *in == nil { + *out = nil + } else { + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + if *in == nil { + *out = nil + } else { + *out = new(AzureDiskVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + if *in == nil { + *out = nil + } else { + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + if *in == nil { + *out = nil + } else { + *out = new(ProjectedVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + if *in == nil { + *out = nil + } else { + *out = new(PortworxVolumeSource) + **out = **in + } + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + if *in == nil { + *out = nil + } else { + *out = new(ScaleIOVolumeSource) + (*in).DeepCopyInto(*out) + } + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + if *in == nil { + *out = nil + } else { + *out = new(StorageOSVolumeSource) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSource. +func (in *VolumeSource) DeepCopy() *VolumeSource { + if in == nil { + return nil + } + out := new(VolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VsphereVirtualDiskVolumeSource) DeepCopyInto(out *VsphereVirtualDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereVirtualDiskVolumeSource. +func (in *VsphereVirtualDiskVolumeSource) DeepCopy() *VsphereVirtualDiskVolumeSource { + if in == nil { + return nil + } + out := new(VsphereVirtualDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeightedPodAffinityTerm) DeepCopyInto(out *WeightedPodAffinityTerm) { + *out = *in + in.PodAffinityTerm.DeepCopyInto(&out.PodAffinityTerm) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedPodAffinityTerm. +func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm { + if in == nil { + return nil + } + out := new(WeightedPodAffinityTerm) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/events/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/events/BUILD new file mode 100644 index 000000000000..13bf1079d832 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/events/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + ], + importpath = "k8s.io/kubernetes/pkg/apis/events", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/core:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/apis/events/install:all-srcs", + "//pkg/apis/events/v1beta1:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/events/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/events/doc.go new file mode 100644 index 000000000000..187271ac933f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/events/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=events.k8s.io +package events diff --git a/vendor/k8s.io/kubernetes/pkg/apis/events/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/events/install/BUILD new file mode 100644 index 000000000000..01c67e2ed5a8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/events/install/BUILD @@ -0,0 +1,30 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/events/install", + visibility = ["//visibility:public"], + deps = [ + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/events:go_default_library", + "//pkg/apis/events/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/events/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/events/install/install.go new file mode 100644 index 000000000000..9dbc1b4d47dc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/events/install/install.go @@ -0,0 +1,48 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the events API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/apis/events" + "k8s.io/kubernetes/pkg/apis/events/v1beta1" +) + +func init() { + Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: events.GroupName, + VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, + AddInternalObjectsToScheme: events.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/events/register.go b/vendor/k8s.io/kubernetes/pkg/apis/events/register.go new file mode 100644 index 000000000000..6156e800688d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/events/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/pkg/apis/core" +) + +// GroupName is the group name use in this package +const GroupName = "events.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &core.Event{}, + &core.EventList{}, + ) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/BUILD new file mode 100644 index 000000000000..bef76b25529b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/BUILD @@ -0,0 +1,37 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "conversion.go", + "doc.go", + "register.go", + "zz_generated.conversion.go", + "zz_generated.defaults.go", + ], + importpath = "k8s.io/kubernetes/pkg/apis/events/v1beta1", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/v1:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/events/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/conversion.go new file mode 100644 index 000000000000..3a2be8e31684 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/conversion.go @@ -0,0 +1,58 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/events/v1beta1" + conversion "k8s.io/apimachinery/pkg/conversion" + k8s_api "k8s.io/kubernetes/pkg/apis/core" + k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" +) + +func Convert_v1beta1_Event_To_core_Event(in *v1beta1.Event, out *k8s_api.Event, s conversion.Scope) error { + if err := autoConvert_v1beta1_Event_To_core_Event(in, out, s); err != nil { + return err + } + if err := k8s_api_v1.Convert_v1_ObjectReference_To_core_ObjectReference(&in.Regarding, &out.InvolvedObject, s); err != nil { + return err + } + if err := k8s_api_v1.Convert_v1_EventSource_To_core_EventSource(&in.DeprecatedSource, &out.Source, s); err != nil { + return err + } + out.Message = in.Note + out.FirstTimestamp = in.DeprecatedFirstTimestamp + out.LastTimestamp = in.DeprecatedLastTimestamp + out.Count = in.DeprecatedCount + return nil +} + +func Convert_core_Event_To_v1beta1_Event(in *k8s_api.Event, out *v1beta1.Event, s conversion.Scope) error { + if err := autoConvert_core_Event_To_v1beta1_Event(in, out, s); err != nil { + return err + } + if err := k8s_api_v1.Convert_core_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.Regarding, s); err != nil { + return err + } + if err := k8s_api_v1.Convert_core_EventSource_To_v1_EventSource(&in.Source, &out.DeprecatedSource, s); err != nil { + return err + } + out.Note = in.Message + out.DeprecatedFirstTimestamp = in.FirstTimestamp + out.DeprecatedLastTimestamp = in.LastTimestamp + out.DeprecatedCount = in.Count + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/doc.go new file mode 100644 index 000000000000..8144152cefd3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/events +// +k8s:conversion-gen-external-types=k8s.io/api/events/v1beta1 +// +k8s:defaulter-gen=TypeMeta +// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/events/v1beta1 + +// +groupName=events.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/register.go new file mode 100644 index 000000000000..7ff9379602fc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/register.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + eventsv1beta1 "k8s.io/api/events/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "events.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &eventsv1beta1.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(RegisterDefaults) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000000..345e3e96041a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/zz_generated.conversion.go @@ -0,0 +1,151 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + v1beta1 "k8s.io/api/events/v1beta1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + core "k8s.io/kubernetes/pkg/apis/core" + unsafe "unsafe" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_Event_To_core_Event, + Convert_core_Event_To_v1beta1_Event, + Convert_v1beta1_EventList_To_core_EventList, + Convert_core_EventList_To_v1beta1_EventList, + Convert_v1beta1_EventSeries_To_core_EventSeries, + Convert_core_EventSeries_To_v1beta1_EventSeries, + ) +} + +func autoConvert_v1beta1_Event_To_core_Event(in *v1beta1.Event, out *core.Event, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.EventTime = in.EventTime + out.Series = (*core.EventSeries)(unsafe.Pointer(in.Series)) + out.ReportingController = in.ReportingController + out.ReportingInstance = in.ReportingInstance + out.Action = in.Action + out.Reason = in.Reason + // WARNING: in.Regarding requires manual conversion: does not exist in peer-type + out.Related = (*core.ObjectReference)(unsafe.Pointer(in.Related)) + // WARNING: in.Note requires manual conversion: does not exist in peer-type + out.Type = in.Type + // WARNING: in.DeprecatedSource requires manual conversion: does not exist in peer-type + // WARNING: in.DeprecatedFirstTimestamp requires manual conversion: does not exist in peer-type + // WARNING: in.DeprecatedLastTimestamp requires manual conversion: does not exist in peer-type + // WARNING: in.DeprecatedCount requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_core_Event_To_v1beta1_Event(in *core.Event, out *v1beta1.Event, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + // WARNING: in.InvolvedObject requires manual conversion: does not exist in peer-type + out.Reason = in.Reason + // WARNING: in.Message requires manual conversion: does not exist in peer-type + // WARNING: in.Source requires manual conversion: does not exist in peer-type + // WARNING: in.FirstTimestamp requires manual conversion: does not exist in peer-type + // WARNING: in.LastTimestamp requires manual conversion: does not exist in peer-type + // WARNING: in.Count requires manual conversion: does not exist in peer-type + out.Type = in.Type + out.EventTime = in.EventTime + out.Series = (*v1beta1.EventSeries)(unsafe.Pointer(in.Series)) + out.Action = in.Action + out.Related = (*v1.ObjectReference)(unsafe.Pointer(in.Related)) + out.ReportingController = in.ReportingController + out.ReportingInstance = in.ReportingInstance + return nil +} + +func autoConvert_v1beta1_EventList_To_core_EventList(in *v1beta1.EventList, out *core.EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.Event, len(*in)) + for i := range *in { + if err := Convert_v1beta1_Event_To_core_Event(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1beta1_EventList_To_core_EventList is an autogenerated conversion function. +func Convert_v1beta1_EventList_To_core_EventList(in *v1beta1.EventList, out *core.EventList, s conversion.Scope) error { + return autoConvert_v1beta1_EventList_To_core_EventList(in, out, s) +} + +func autoConvert_core_EventList_To_v1beta1_EventList(in *core.EventList, out *v1beta1.EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1beta1.Event, len(*in)) + for i := range *in { + if err := Convert_core_Event_To_v1beta1_Event(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_EventList_To_v1beta1_EventList is an autogenerated conversion function. +func Convert_core_EventList_To_v1beta1_EventList(in *core.EventList, out *v1beta1.EventList, s conversion.Scope) error { + return autoConvert_core_EventList_To_v1beta1_EventList(in, out, s) +} + +func autoConvert_v1beta1_EventSeries_To_core_EventSeries(in *v1beta1.EventSeries, out *core.EventSeries, s conversion.Scope) error { + out.Count = in.Count + out.LastObservedTime = in.LastObservedTime + out.State = core.EventSeriesState(in.State) + return nil +} + +// Convert_v1beta1_EventSeries_To_core_EventSeries is an autogenerated conversion function. +func Convert_v1beta1_EventSeries_To_core_EventSeries(in *v1beta1.EventSeries, out *core.EventSeries, s conversion.Scope) error { + return autoConvert_v1beta1_EventSeries_To_core_EventSeries(in, out, s) +} + +func autoConvert_core_EventSeries_To_v1beta1_EventSeries(in *core.EventSeries, out *v1beta1.EventSeries, s conversion.Scope) error { + out.Count = in.Count + out.LastObservedTime = in.LastObservedTime + out.State = v1beta1.EventSeriesState(in.State) + return nil +} + +// Convert_core_EventSeries_To_v1beta1_EventSeries is an autogenerated conversion function. +func Convert_core_EventSeries_To_v1beta1_EventSeries(in *core.EventSeries, out *v1beta1.EventSeries, s conversion.Scope) error { + return autoConvert_core_EventSeries_To_v1beta1_EventSeries(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/zz_generated.defaults.go new file mode 100644 index 000000000000..e24e70be38b4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/events/v1beta1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD index 95f6365612c7..c52d2a9ac9ec 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/extensions", library = ":go_default_library", ) @@ -21,12 +22,13 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/extensions", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/networking:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go index 2bbb71d0571d..a48d5d279299 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package package extensions diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/BUILD index f223d80385df..886ce927a26e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/extensions/install", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/apis/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/install.go index 7e8cc9c21692..dfa89910120c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/install.go @@ -23,13 +23,13 @@ import ( "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" ) func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) + Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) } // Install registers the API group and adds types to a scheme @@ -38,7 +38,7 @@ func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *r &announced.GroupMetaFactoryArgs{ GroupName: extensions.GroupName, VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, - RootScopedKinds: sets.NewString("PodSecurityPolicy", "ThirdPartyResource"), + RootScopedKinds: sets.NewString("PodSecurityPolicy"), AddInternalObjectsToScheme: extensions.AddToScheme, }, announced.VersionToSchemeFunc{ diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go index 780f58dc7479..48137fc6962d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go @@ -19,6 +19,7 @@ package extensions import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/networking" ) @@ -43,7 +44,7 @@ var ( AddToScheme = SchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { // TODO this gets cleaned up when the types are fixed scheme.AddKnownTypes(SchemeGroupVersion, @@ -51,19 +52,15 @@ func addKnownTypes(scheme *runtime.Scheme) error { &DeploymentList{}, &DeploymentRollback{}, &ReplicationControllerDummy{}, - &Scale{}, - &ThirdPartyResource{}, - &ThirdPartyResourceList{}, &DaemonSetList{}, &DaemonSet{}, - &ThirdPartyResourceData{}, - &ThirdPartyResourceDataList{}, &Ingress{}, &IngressList{}, &ReplicaSet{}, &ReplicaSetList{}, &PodSecurityPolicy{}, &PodSecurityPolicyList{}, + &autoscaling.Scale{}, &networking.NetworkPolicy{}, &networking.NetworkPolicyList{}, ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go index 028160be456c..e36972846bcf 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) const ( @@ -42,44 +42,6 @@ const ( SysctlsPodSecurityPolicyAnnotationKey string = "security.alpha.kubernetes.io/sysctls" ) -// describes the attributes of a scale subresource -type ScaleSpec struct { - // desired number of instances for the scaled object. - // +optional - Replicas int32 -} - -// represents the current status of a scale subresource. -type ScaleStatus struct { - // actual number of observed instances of the scaled object. - Replicas int32 - - // label query over pods that should match the replicas count. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector -} - -// +genclient -// +genclient:noVerbs -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// represents a scaling request for a resource. -type Scale struct { - metav1.TypeMeta - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta - - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. - // +optional - Spec ScaleSpec - - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. - // +optional - Status ScaleStatus -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Dummy definition @@ -111,63 +73,8 @@ type CustomMetricCurrentStatusList struct { } // +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource -// types to the API. It consists of one or more Versions of the api. -type ThirdPartyResource struct { - metav1.TypeMeta - - // Standard object metadata - // +optional - metav1.ObjectMeta - - // Description is the description of this object. - // +optional - Description string - - // Versions are versions for this third party object - Versions []APIVersion -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type ThirdPartyResourceList struct { - metav1.TypeMeta - - // Standard list metadata. - // +optional - metav1.ListMeta - - // Items is the list of horizontal pod autoscalers. - Items []ThirdPartyResource -} - -// An APIVersion represents a single concrete version of an object model. -// TODO: we should consider merge this struct with GroupVersion in metav1.go -type APIVersion struct { - // Name of this version (e.g. 'v1'). - Name string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// An internal object, used for versioned storage in etcd. Not exposed to the end user. -type ThirdPartyResourceData struct { - metav1.TypeMeta - // Standard object metadata. - // +optional - metav1.ObjectMeta - - // Data is the raw JSON data for this data. - // +optional - Data []byte -} - -// +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type Deployment struct { @@ -524,6 +431,27 @@ type DaemonSetStatus struct { // create the name for the newest ControllerRevision. // +optional CollisionCount *int32 + + // Represents the latest available observations of a DaemonSet's current state. + Conditions []DaemonSetCondition +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + Reason string + // A human readable message indicating details about the transition. + Message string } // +genclient @@ -573,18 +501,6 @@ type DaemonSetList struct { Items []DaemonSet } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type ThirdPartyResourceDataList struct { - metav1.TypeMeta - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta - // Items is a list of third party objects - Items []ThirdPartyResourceData -} - // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -755,11 +671,11 @@ type IngressBackend struct { } // +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ReplicaSet represents the configuration of a replica set. +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. type ReplicaSet struct { metav1.TypeMeta // +optional @@ -888,7 +804,8 @@ type PodSecurityPolicySpec struct { Privileged bool // DefaultAddCapabilities is the default set of capabilities that will be added to the container // unless the pod spec specifically drops the capability. You may not list a capability in both - // DefaultAddCapabilities and RequiredDropCapabilities. + // DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the AllowedCapabilities list. // +optional DefaultAddCapabilities []api.Capability // RequiredDropCapabilities are the capabilities that will be dropped from the container. These @@ -937,12 +854,17 @@ type PodSecurityPolicySpec struct { // +optional DefaultAllowPrivilegeEscalation *bool // AllowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. + // privilege escalation. If unspecified, defaults to true. // +optional AllowPrivilegeEscalation bool // AllowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used. // +optional AllowedHostPaths []AllowedHostPath + // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "Volumes" field. + // +optional + AllowedFlexVolumes []AllowedFlexVolume } // AllowedHostPath defines the host volume conditions that will be enabled by a policy @@ -962,9 +884,9 @@ type AllowedHostPath struct { // for pods to use. It requires both the start and end to be defined. type HostPortRange struct { // Min is the start of the range, inclusive. - Min int + Min int32 // Max is the end of the range, inclusive. - Max int + Max int32 } // AllowAllCapabilities can be used as a value for the PodSecurityPolicy.AllowAllCapabilities @@ -1002,15 +924,22 @@ var ( Projected FSType = "projected" PortworxVolume FSType = "portworxVolume" ScaleIO FSType = "scaleIO" + CSI FSType = "csi" All FSType = "*" ) +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +type AllowedFlexVolume struct { + // Driver is the name of the Flexvolume driver. + Driver string +} + // SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. type SELinuxStrategyOptions struct { // Rule is the strategy that will dictate the allowable labels that may be set. Rule SELinuxStrategy // seLinuxOptions required to run as; required for MustRunAs - // More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md + // More info: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#selinux // +optional SELinuxOptions *api.SELinuxOptions } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD index 4fe6e2d19fb1..51d6bf1c77a6 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD @@ -16,15 +16,18 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/extensions/v1beta1", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", + "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/v1:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/apis/networking:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", @@ -34,10 +37,12 @@ go_library( go_test( name = "go_default_xtest", srcs = ["defaults_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/extensions/v1beta1_test", deps = [ ":go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/install:go_default_library", "//pkg/apis/extensions/install:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go index 04aa3a316e0f..d236a415e13e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go @@ -24,10 +24,12 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/kubernetes/pkg/api" - k8s_api_v1 "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/autoscaling" + api "k8s.io/kubernetes/pkg/apis/core" + k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/networking" ) @@ -35,8 +37,8 @@ import ( func addConversionFuncs(scheme *runtime.Scheme) error { // Add non-generated conversion functions err := scheme.AddConversionFuncs( - Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, - Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, + Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus, + Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus, Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, @@ -72,48 +74,35 @@ func addConversionFuncs(scheme *runtime.Scheme) error { return nil } -func Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *extensionsv1beta1.ScaleStatus, s conversion.Scope) error { +func Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(in *autoscaling.ScaleStatus, out *extensionsv1beta1.ScaleStatus, s conversion.Scope) error { out.Replicas = int32(in.Replicas) + out.TargetSelector = in.Selector out.Selector = nil - out.TargetSelector = "" - if in.Selector != nil { - if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { - out.Selector = in.Selector.MatchLabels - } - - selector, err := metav1.LabelSelectorAsSelector(in.Selector) - if err != nil { - return fmt.Errorf("invalid label selector: %v", err) - } - out.TargetSelector = selector.String() + selector, err := metav1.ParseToLabelSelector(in.Selector) + if err != nil { + return fmt.Errorf("failed to parse selector: %v", err) } + if len(selector.MatchExpressions) == 0 { + out.Selector = selector.MatchLabels + } + return nil } -func Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *extensionsv1beta1.ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { +func Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(in *extensionsv1beta1.ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { out.Replicas = in.Replicas - // Normally when 2 fields map to the same internal value we favor the old field, since - // old clients can't be expected to know about new fields but clients that know about the - // new field can be expected to know about the old field (though that's not quite true, due - // to kubectl apply). However, these fields are readonly, so any non-nil value should work. if in.TargetSelector != "" { - labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) - if err != nil { - out.Selector = nil - return fmt.Errorf("failed to parse target selector: %v", err) - } - out.Selector = labelSelector + out.Selector = in.TargetSelector } else if in.Selector != nil { - out.Selector = new(metav1.LabelSelector) - selector := make(map[string]string) + set := labels.Set{} for key, val := range in.Selector { - selector[key] = val + set[key] = val } - out.Selector.MatchLabels = selector + out.Selector = labels.SelectorFromSet(set).String() } else { - out.Selector = nil + out.Selector = "" } return nil } @@ -121,7 +110,7 @@ func Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *extensionsv1beta1 func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *extensionsv1beta1.DeploymentSpec, s conversion.Scope) error { out.Replicas = &in.Replicas out.Selector = in.Selector - if err := k8s_api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { @@ -151,7 +140,7 @@ func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *extensionsv out.Replicas = *in.Replicas } out.Selector = in.Selector - if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { @@ -247,7 +236,7 @@ func Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions. *out.Replicas = int32(in.Replicas) out.MinReadySeconds = in.MinReadySeconds out.Selector = in.Selector - if err := k8s_api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := k8s_api_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil @@ -259,7 +248,7 @@ func Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *extensionsv } out.MinReadySeconds = in.MinReadySeconds out.Selector = in.Selector - if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + if err := k8s_api_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil @@ -338,10 +327,12 @@ func Convert_v1beta1_NetworkPolicyIngressRule_To_networking_NetworkPolicyIngress return err } } - out.From = make([]networking.NetworkPolicyPeer, len(in.From)) - for i := range in.From { - if err := Convert_v1beta1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(&in.From[i], &out.From[i], s); err != nil { - return err + if in.From != nil { + out.From = make([]networking.NetworkPolicyPeer, len(in.From)) + for i := range in.From { + if err := Convert_v1beta1_NetworkPolicyPeer_To_networking_NetworkPolicyPeer(&in.From[i], &out.From[i], s); err != nil { + return err + } } } return nil @@ -354,10 +345,12 @@ func Convert_networking_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngress return err } } - out.From = make([]extensionsv1beta1.NetworkPolicyPeer, len(in.From)) - for i := range in.From { - if err := Convert_networking_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(&in.From[i], &out.From[i], s); err != nil { - return err + if in.From != nil { + out.From = make([]extensionsv1beta1.NetworkPolicyPeer, len(in.From)) + for i := range in.From { + if err := Convert_networking_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(&in.From[i], &out.From[i], s); err != nil { + return err + } } } return nil diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go index c653e812becc..d817c2f0f444 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go @@ -63,6 +63,15 @@ func SetDefaults_DaemonSet(obj *extensionsv1beta1.DaemonSet) { } } +func SetDefaults_PodSecurityPolicySpec(obj *extensionsv1beta1.PodSecurityPolicySpec) { + // This field was added after PodSecurityPolicy was released. + // Policies that do not include this field must remain as permissive as they were prior to the introduction of this field. + if obj.AllowPrivilegeEscalation == nil { + t := true + obj.AllowPrivilegeEscalation = &t + } +} + func SetDefaults_Deployment(obj *extensionsv1beta1.Deployment) { // Default labels and selector to labels from pod template spec. labels := obj.Spec.Template.Labels diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go index 91959b55f308..86e50e9b78a2 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go @@ -15,7 +15,8 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/extensions -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/extensions/v1beta1 +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/autoscaling +// +k8s:conversion-gen-external-types=k8s.io/api/extensions/v1beta1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/extensions/v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go index 7e663678310d..320ad17fb634 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.conversion.go @@ -21,13 +21,14 @@ limitations under the License. package v1beta1 import ( - core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" - api_v1 "k8s.io/kubernetes/pkg/api/v1" + autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" + core "k8s.io/kubernetes/pkg/apis/core" + core_v1 "k8s.io/kubernetes/pkg/apis/core/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions" unsafe "unsafe" ) @@ -40,8 +41,8 @@ func init() { // Public to allow building arbitrary schemes. func RegisterConversions(scheme *runtime.Scheme) error { return scheme.AddGeneratedConversionFuncs( - Convert_v1beta1_APIVersion_To_extensions_APIVersion, - Convert_extensions_APIVersion_To_v1beta1_APIVersion, + Convert_v1beta1_AllowedFlexVolume_To_extensions_AllowedFlexVolume, + Convert_extensions_AllowedFlexVolume_To_v1beta1_AllowedFlexVolume, Convert_v1beta1_AllowedHostPath_To_extensions_AllowedHostPath, Convert_extensions_AllowedHostPath_To_v1beta1_AllowedHostPath, Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus, @@ -54,6 +55,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList, Convert_v1beta1_DaemonSet_To_extensions_DaemonSet, Convert_extensions_DaemonSet_To_v1beta1_DaemonSet, + Convert_v1beta1_DaemonSetCondition_To_extensions_DaemonSetCondition, + Convert_extensions_DaemonSetCondition_To_v1beta1_DaemonSetCondition, Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList, Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList, Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec, @@ -128,43 +131,35 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions, Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions, Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions, - Convert_v1beta1_Scale_To_extensions_Scale, - Convert_extensions_Scale_To_v1beta1_Scale, - Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec, - Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec, - Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, - Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, + Convert_v1beta1_Scale_To_autoscaling_Scale, + Convert_autoscaling_Scale_To_v1beta1_Scale, + Convert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec, + Convert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec, + Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus, + Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus, Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions, Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions, - Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource, - Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource, - Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData, - Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData, - Convert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList, - Convert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList, - Convert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList, - Convert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList, ) } -func autoConvert_v1beta1_APIVersion_To_extensions_APIVersion(in *v1beta1.APIVersion, out *extensions.APIVersion, s conversion.Scope) error { - out.Name = in.Name +func autoConvert_v1beta1_AllowedFlexVolume_To_extensions_AllowedFlexVolume(in *v1beta1.AllowedFlexVolume, out *extensions.AllowedFlexVolume, s conversion.Scope) error { + out.Driver = in.Driver return nil } -// Convert_v1beta1_APIVersion_To_extensions_APIVersion is an autogenerated conversion function. -func Convert_v1beta1_APIVersion_To_extensions_APIVersion(in *v1beta1.APIVersion, out *extensions.APIVersion, s conversion.Scope) error { - return autoConvert_v1beta1_APIVersion_To_extensions_APIVersion(in, out, s) +// Convert_v1beta1_AllowedFlexVolume_To_extensions_AllowedFlexVolume is an autogenerated conversion function. +func Convert_v1beta1_AllowedFlexVolume_To_extensions_AllowedFlexVolume(in *v1beta1.AllowedFlexVolume, out *extensions.AllowedFlexVolume, s conversion.Scope) error { + return autoConvert_v1beta1_AllowedFlexVolume_To_extensions_AllowedFlexVolume(in, out, s) } -func autoConvert_extensions_APIVersion_To_v1beta1_APIVersion(in *extensions.APIVersion, out *v1beta1.APIVersion, s conversion.Scope) error { - out.Name = in.Name +func autoConvert_extensions_AllowedFlexVolume_To_v1beta1_AllowedFlexVolume(in *extensions.AllowedFlexVolume, out *v1beta1.AllowedFlexVolume, s conversion.Scope) error { + out.Driver = in.Driver return nil } -// Convert_extensions_APIVersion_To_v1beta1_APIVersion is an autogenerated conversion function. -func Convert_extensions_APIVersion_To_v1beta1_APIVersion(in *extensions.APIVersion, out *v1beta1.APIVersion, s conversion.Scope) error { - return autoConvert_extensions_APIVersion_To_v1beta1_APIVersion(in, out, s) +// Convert_extensions_AllowedFlexVolume_To_v1beta1_AllowedFlexVolume is an autogenerated conversion function. +func Convert_extensions_AllowedFlexVolume_To_v1beta1_AllowedFlexVolume(in *extensions.AllowedFlexVolume, out *v1beta1.AllowedFlexVolume, s conversion.Scope) error { + return autoConvert_extensions_AllowedFlexVolume_To_v1beta1_AllowedFlexVolume(in, out, s) } func autoConvert_v1beta1_AllowedHostPath_To_extensions_AllowedHostPath(in *v1beta1.AllowedHostPath, out *extensions.AllowedHostPath, s conversion.Scope) error { @@ -303,6 +298,34 @@ func Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, return autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in, out, s) } +func autoConvert_v1beta1_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1beta1.DaemonSetCondition, out *extensions.DaemonSetCondition, s conversion.Scope) error { + out.Type = extensions.DaemonSetConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1beta1_DaemonSetCondition_To_extensions_DaemonSetCondition is an autogenerated conversion function. +func Convert_v1beta1_DaemonSetCondition_To_extensions_DaemonSetCondition(in *v1beta1.DaemonSetCondition, out *extensions.DaemonSetCondition, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetCondition_To_extensions_DaemonSetCondition(in, out, s) +} + +func autoConvert_extensions_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in *extensions.DaemonSetCondition, out *v1beta1.DaemonSetCondition, s conversion.Scope) error { + out.Type = v1beta1.DaemonSetConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_extensions_DaemonSetCondition_To_v1beta1_DaemonSetCondition is an autogenerated conversion function. +func Convert_extensions_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in *extensions.DaemonSetCondition, out *v1beta1.DaemonSetCondition, s conversion.Scope) error { + return autoConvert_extensions_DaemonSetCondition_To_v1beta1_DaemonSetCondition(in, out, s) +} + func autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *v1beta1.DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { @@ -346,8 +369,8 @@ func Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.Da } func autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1beta1.DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := core_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { @@ -365,8 +388,8 @@ func Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *v1beta1.Daemo } func autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *v1beta1.DaemonSetSpec, s conversion.Scope) error { - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := core_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { @@ -393,6 +416,7 @@ func autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *v1bet out.NumberAvailable = in.NumberAvailable out.NumberUnavailable = in.NumberUnavailable out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) + out.Conditions = *(*[]extensions.DaemonSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } @@ -411,6 +435,7 @@ func autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *exten out.NumberAvailable = in.NumberAvailable out.NumberUnavailable = in.NumberUnavailable out.CollisionCount = (*int32)(unsafe.Pointer(in.CollisionCount)) + out.Conditions = *(*[]v1beta1.DaemonSetCondition)(unsafe.Pointer(&in.Conditions)) return nil } @@ -491,7 +516,7 @@ func Convert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployme func autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *v1beta1.DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { out.Type = extensions.DeploymentConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) + out.Status = core.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -506,7 +531,7 @@ func Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *v func autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in *extensions.DeploymentCondition, out *v1beta1.DeploymentCondition, s conversion.Scope) error { out.Type = v1beta1.DeploymentConditionType(in.Type) - out.Status = core_v1.ConditionStatus(in.Status) + out.Status = v1.ConditionStatus(in.Status) out.LastUpdateTime = in.LastUpdateTime out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason @@ -590,11 +615,11 @@ func Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *ext } func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta1.DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := core_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { @@ -609,11 +634,11 @@ func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *v1beta1 } func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *v1beta1.DeploymentSpec, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := core_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { @@ -758,8 +783,8 @@ func Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in } func autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *v1beta1.HostPortRange, out *extensions.HostPortRange, s conversion.Scope) error { - out.Min = int(in.Min) - out.Max = int(in.Max) + out.Min = in.Min + out.Max = in.Max return nil } @@ -769,8 +794,8 @@ func Convert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *v1beta1.HostP } func autoConvert_extensions_HostPortRange_To_v1beta1_HostPortRange(in *extensions.HostPortRange, out *v1beta1.HostPortRange, s conversion.Scope) error { - out.Min = int32(in.Min) - out.Max = int32(in.Max) + out.Min = in.Min + out.Max = in.Max return nil } @@ -1043,22 +1068,12 @@ func Convert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(i func autoConvert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in *v1beta1.PodSecurityPolicySpec, out *extensions.PodSecurityPolicySpec, s conversion.Scope) error { out.Privileged = in.Privileged - out.DefaultAddCapabilities = *(*[]api.Capability)(unsafe.Pointer(&in.DefaultAddCapabilities)) - out.RequiredDropCapabilities = *(*[]api.Capability)(unsafe.Pointer(&in.RequiredDropCapabilities)) - out.AllowedCapabilities = *(*[]api.Capability)(unsafe.Pointer(&in.AllowedCapabilities)) + out.DefaultAddCapabilities = *(*[]core.Capability)(unsafe.Pointer(&in.DefaultAddCapabilities)) + out.RequiredDropCapabilities = *(*[]core.Capability)(unsafe.Pointer(&in.RequiredDropCapabilities)) + out.AllowedCapabilities = *(*[]core.Capability)(unsafe.Pointer(&in.AllowedCapabilities)) out.Volumes = *(*[]extensions.FSType)(unsafe.Pointer(&in.Volumes)) out.HostNetwork = in.HostNetwork - if in.HostPorts != nil { - in, out := &in.HostPorts, &out.HostPorts - *out = make([]extensions.HostPortRange, len(*in)) - for i := range *in { - if err := Convert_v1beta1_HostPortRange_To_extensions_HostPortRange(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.HostPorts = nil - } + out.HostPorts = *(*[]extensions.HostPortRange)(unsafe.Pointer(&in.HostPorts)) out.HostPID = in.HostPID out.HostIPC = in.HostIPC if err := Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, s); err != nil { @@ -1075,8 +1090,11 @@ func autoConvert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySp } out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem out.DefaultAllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.DefaultAllowPrivilegeEscalation)) - out.AllowPrivilegeEscalation = in.AllowPrivilegeEscalation + if err := meta_v1.Convert_Pointer_bool_To_bool(&in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation, s); err != nil { + return err + } out.AllowedHostPaths = *(*[]extensions.AllowedHostPath)(unsafe.Pointer(&in.AllowedHostPaths)) + out.AllowedFlexVolumes = *(*[]extensions.AllowedFlexVolume)(unsafe.Pointer(&in.AllowedFlexVolumes)) return nil } @@ -1087,22 +1105,12 @@ func Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(i func autoConvert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *extensions.PodSecurityPolicySpec, out *v1beta1.PodSecurityPolicySpec, s conversion.Scope) error { out.Privileged = in.Privileged - out.DefaultAddCapabilities = *(*[]core_v1.Capability)(unsafe.Pointer(&in.DefaultAddCapabilities)) - out.RequiredDropCapabilities = *(*[]core_v1.Capability)(unsafe.Pointer(&in.RequiredDropCapabilities)) - out.AllowedCapabilities = *(*[]core_v1.Capability)(unsafe.Pointer(&in.AllowedCapabilities)) + out.DefaultAddCapabilities = *(*[]v1.Capability)(unsafe.Pointer(&in.DefaultAddCapabilities)) + out.RequiredDropCapabilities = *(*[]v1.Capability)(unsafe.Pointer(&in.RequiredDropCapabilities)) + out.AllowedCapabilities = *(*[]v1.Capability)(unsafe.Pointer(&in.AllowedCapabilities)) out.Volumes = *(*[]v1beta1.FSType)(unsafe.Pointer(&in.Volumes)) out.HostNetwork = in.HostNetwork - if in.HostPorts != nil { - in, out := &in.HostPorts, &out.HostPorts - *out = make([]v1beta1.HostPortRange, len(*in)) - for i := range *in { - if err := Convert_extensions_HostPortRange_To_v1beta1_HostPortRange(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.HostPorts = nil - } + out.HostPorts = *(*[]v1beta1.HostPortRange)(unsafe.Pointer(&in.HostPorts)) out.HostPID = in.HostPID out.HostIPC = in.HostIPC if err := Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, s); err != nil { @@ -1119,8 +1127,11 @@ func autoConvert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySp } out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem out.DefaultAllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.DefaultAllowPrivilegeEscalation)) - out.AllowPrivilegeEscalation = in.AllowPrivilegeEscalation + if err := meta_v1.Convert_bool_To_Pointer_bool(&in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation, s); err != nil { + return err + } out.AllowedHostPaths = *(*[]v1beta1.AllowedHostPath)(unsafe.Pointer(&in.AllowedHostPaths)) + out.AllowedFlexVolumes = *(*[]v1beta1.AllowedFlexVolume)(unsafe.Pointer(&in.AllowedFlexVolumes)) return nil } @@ -1158,7 +1169,7 @@ func Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaS func autoConvert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v1beta1.ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { out.Type = extensions.ReplicaSetConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) + out.Status = core.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason out.Message = in.Message @@ -1172,7 +1183,7 @@ func Convert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *v func autoConvert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *v1beta1.ReplicaSetCondition, s conversion.Scope) error { out.Type = v1beta1.ReplicaSetConditionType(in.Type) - out.Status = core_v1.ConditionStatus(in.Status) + out.Status = v1.ConditionStatus(in.Status) out.LastTransitionTime = in.LastTransitionTime out.Reason = in.Reason out.Message = in.Message @@ -1227,24 +1238,24 @@ func Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions. } func autoConvert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *v1beta1.ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := core_v1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil } func autoConvert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *v1beta1.ReplicaSetSpec, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { return err } out.MinReadySeconds = in.MinReadySeconds - out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := core_v1.Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { return err } return nil @@ -1364,7 +1375,7 @@ func Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOpt func autoConvert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(in *v1beta1.SELinuxStrategyOptions, out *extensions.SELinuxStrategyOptions, s conversion.Scope) error { out.Rule = extensions.SELinuxStrategy(in.Rule) - out.SELinuxOptions = (*api.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.SELinuxOptions = (*core.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) return nil } @@ -1375,7 +1386,7 @@ func Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions func autoConvert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in *extensions.SELinuxStrategyOptions, out *v1beta1.SELinuxStrategyOptions, s conversion.Scope) error { out.Rule = v1beta1.SELinuxStrategy(in.Rule) - out.SELinuxOptions = (*core_v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) return nil } @@ -1384,68 +1395,68 @@ func Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions return autoConvert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in, out, s) } -func autoConvert_v1beta1_Scale_To_extensions_Scale(in *v1beta1.Scale, out *extensions.Scale, s conversion.Scope) error { +func autoConvert_v1beta1_Scale_To_autoscaling_Scale(in *v1beta1.Scale, out *autoscaling.Scale, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_v1beta1_Scale_To_extensions_Scale is an autogenerated conversion function. -func Convert_v1beta1_Scale_To_extensions_Scale(in *v1beta1.Scale, out *extensions.Scale, s conversion.Scope) error { - return autoConvert_v1beta1_Scale_To_extensions_Scale(in, out, s) +// Convert_v1beta1_Scale_To_autoscaling_Scale is an autogenerated conversion function. +func Convert_v1beta1_Scale_To_autoscaling_Scale(in *v1beta1.Scale, out *autoscaling.Scale, s conversion.Scope) error { + return autoConvert_v1beta1_Scale_To_autoscaling_Scale(in, out, s) } -func autoConvert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *v1beta1.Scale, s conversion.Scope) error { +func autoConvert_autoscaling_Scale_To_v1beta1_Scale(in *autoscaling.Scale, out *v1beta1.Scale, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta - if err := Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + if err := Convert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + if err := Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -// Convert_extensions_Scale_To_v1beta1_Scale is an autogenerated conversion function. -func Convert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *v1beta1.Scale, s conversion.Scope) error { - return autoConvert_extensions_Scale_To_v1beta1_Scale(in, out, s) +// Convert_autoscaling_Scale_To_v1beta1_Scale is an autogenerated conversion function. +func Convert_autoscaling_Scale_To_v1beta1_Scale(in *autoscaling.Scale, out *v1beta1.Scale, s conversion.Scope) error { + return autoConvert_autoscaling_Scale_To_v1beta1_Scale(in, out, s) } -func autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *v1beta1.ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error { +func autoConvert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec(in *v1beta1.ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { out.Replicas = in.Replicas return nil } -// Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec is an autogenerated conversion function. -func Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *v1beta1.ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error { - return autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in, out, s) +// Convert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec is an autogenerated conversion function. +func Convert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec(in *v1beta1.ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1beta1_ScaleSpec_To_autoscaling_ScaleSpec(in, out, s) } -func autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { +func autoConvert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec(in *autoscaling.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { out.Replicas = in.Replicas return nil } -// Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec is an autogenerated conversion function. -func Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { - return autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) +// Convert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec is an autogenerated conversion function. +func Convert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec(in *autoscaling.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { + return autoConvert_autoscaling_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) } -func autoConvert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *v1beta1.ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { +func autoConvert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(in *v1beta1.ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { out.Replicas = in.Replicas - // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) + // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs string) // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type return nil } -func autoConvert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { +func autoConvert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(in *autoscaling.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { out.Replicas = in.Replicas - // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) + // WARNING: in.Selector requires manual conversion: inconvertible types (string vs map[string]string) return nil } @@ -1470,93 +1481,3 @@ func autoConvert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_Supplem func Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in *extensions.SupplementalGroupsStrategyOptions, out *v1beta1.SupplementalGroupsStrategyOptions, s conversion.Scope) error { return autoConvert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in, out, s) } - -func autoConvert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in *v1beta1.ThirdPartyResource, out *extensions.ThirdPartyResource, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Description = in.Description - out.Versions = *(*[]extensions.APIVersion)(unsafe.Pointer(&in.Versions)) - return nil -} - -// Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource is an autogenerated conversion function. -func Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in *v1beta1.ThirdPartyResource, out *extensions.ThirdPartyResource, s conversion.Scope) error { - return autoConvert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in, out, s) -} - -func autoConvert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in *extensions.ThirdPartyResource, out *v1beta1.ThirdPartyResource, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Description = in.Description - out.Versions = *(*[]v1beta1.APIVersion)(unsafe.Pointer(&in.Versions)) - return nil -} - -// Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource is an autogenerated conversion function. -func Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in *extensions.ThirdPartyResource, out *v1beta1.ThirdPartyResource, s conversion.Scope) error { - return autoConvert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in, out, s) -} - -func autoConvert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in *v1beta1.ThirdPartyResourceData, out *extensions.ThirdPartyResourceData, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) - return nil -} - -// Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData is an autogenerated conversion function. -func Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in *v1beta1.ThirdPartyResourceData, out *extensions.ThirdPartyResourceData, s conversion.Scope) error { - return autoConvert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in, out, s) -} - -func autoConvert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in *extensions.ThirdPartyResourceData, out *v1beta1.ThirdPartyResourceData, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) - return nil -} - -// Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData is an autogenerated conversion function. -func Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in *extensions.ThirdPartyResourceData, out *v1beta1.ThirdPartyResourceData, s conversion.Scope) error { - return autoConvert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in, out, s) -} - -func autoConvert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in *v1beta1.ThirdPartyResourceDataList, out *extensions.ThirdPartyResourceDataList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]extensions.ThirdPartyResourceData)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList is an autogenerated conversion function. -func Convert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in *v1beta1.ThirdPartyResourceDataList, out *extensions.ThirdPartyResourceDataList, s conversion.Scope) error { - return autoConvert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in, out, s) -} - -func autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *v1beta1.ThirdPartyResourceDataList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1beta1.ThirdPartyResourceData)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList is an autogenerated conversion function. -func Convert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *v1beta1.ThirdPartyResourceDataList, s conversion.Scope) error { - return autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in, out, s) -} - -func autoConvert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in *v1beta1.ThirdPartyResourceList, out *extensions.ThirdPartyResourceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]extensions.ThirdPartyResource)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList is an autogenerated conversion function. -func Convert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in *v1beta1.ThirdPartyResourceList, out *extensions.ThirdPartyResourceList, s conversion.Scope) error { - return autoConvert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in, out, s) -} - -func autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in *extensions.ThirdPartyResourceList, out *v1beta1.ThirdPartyResourceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1beta1.ThirdPartyResource)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList is an autogenerated conversion function. -func Convert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in *extensions.ThirdPartyResourceList, out *v1beta1.ThirdPartyResourceList, s conversion.Scope) error { - return autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.defaults.go index ba0c3716a235..f22282f85f8e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.defaults.go @@ -23,7 +23,7 @@ package v1beta1 import ( v1beta1 "k8s.io/api/extensions/v1beta1" runtime "k8s.io/apimachinery/pkg/runtime" - v1 "k8s.io/kubernetes/pkg/api/v1" + v1 "k8s.io/kubernetes/pkg/apis/core/v1" ) // RegisterDefaults adds defaulters functions to the given scheme. @@ -36,6 +36,8 @@ func RegisterDefaults(scheme *runtime.Scheme) error { scheme.AddTypeDefaultingFunc(&v1beta1.DeploymentList{}, func(obj interface{}) { SetObjectDefaults_DeploymentList(obj.(*v1beta1.DeploymentList)) }) scheme.AddTypeDefaultingFunc(&v1beta1.NetworkPolicy{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicy(obj.(*v1beta1.NetworkPolicy)) }) scheme.AddTypeDefaultingFunc(&v1beta1.NetworkPolicyList{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicyList(obj.(*v1beta1.NetworkPolicyList)) }) + scheme.AddTypeDefaultingFunc(&v1beta1.PodSecurityPolicy{}, func(obj interface{}) { SetObjectDefaults_PodSecurityPolicy(obj.(*v1beta1.PodSecurityPolicy)) }) + scheme.AddTypeDefaultingFunc(&v1beta1.PodSecurityPolicyList{}, func(obj interface{}) { SetObjectDefaults_PodSecurityPolicyList(obj.(*v1beta1.PodSecurityPolicyList)) }) scheme.AddTypeDefaultingFunc(&v1beta1.ReplicaSet{}, func(obj interface{}) { SetObjectDefaults_ReplicaSet(obj.(*v1beta1.ReplicaSet)) }) scheme.AddTypeDefaultingFunc(&v1beta1.ReplicaSetList{}, func(obj interface{}) { SetObjectDefaults_ReplicaSetList(obj.(*v1beta1.ReplicaSetList)) }) return nil @@ -340,6 +342,17 @@ func SetObjectDefaults_NetworkPolicyList(in *v1beta1.NetworkPolicyList) { } } +func SetObjectDefaults_PodSecurityPolicy(in *v1beta1.PodSecurityPolicy) { + SetDefaults_PodSecurityPolicySpec(&in.Spec) +} + +func SetObjectDefaults_PodSecurityPolicyList(in *v1beta1.PodSecurityPolicyList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_PodSecurityPolicy(a) + } +} + func SetObjectDefaults_ReplicaSet(in *v1beta1.ReplicaSet) { SetDefaults_ReplicaSet(in) v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/BUILD index 3cc6e3ace0e2..d7ea42609a0a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/BUILD @@ -9,9 +9,10 @@ load( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/pkg/apis/extensions/validation", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/security/apparmor:go_default_library", "//pkg/security/podsecuritypolicy/seccomp:go_default_library", @@ -30,9 +31,10 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/extensions/validation", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/security/apparmor:go_default_library", "//pkg/security/podsecuritypolicy/seccomp:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go index 1206feedb7bb..20cc279e8fd4 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go @@ -33,8 +33,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" - apivalidation "k8s.io/kubernetes/pkg/api/validation" + api "k8s.io/kubernetes/pkg/apis/core" + apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/security/apparmor" "k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp" @@ -325,9 +325,7 @@ func ValidateDeploymentStatus(status *extensions.DeploymentStatus, fldPath *fiel if status.AvailableReplicas > status.Replicas { allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, msg)) } - // TODO: ReadyReplicas is introduced in 1.6 and this check breaks the Deployment controller when pre-1.6 clusters get upgraded. - // Remove the comparison to zero once we stop supporting upgrades from 1.5. - if status.ReadyReplicas > 0 && status.AvailableReplicas > status.ReadyReplicas { + if status.AvailableReplicas > status.ReadyReplicas { allErrs = append(allErrs, field.Invalid(fldPath.Child("availableReplicas"), status.AvailableReplicas, "cannot be greater than readyReplicas")) } return allErrs @@ -353,7 +351,7 @@ func ValidateDeploymentStatusUpdate(update, old *extensions.Deployment) field.Er return allErrs } -// TODO: Move in "k8s.io/kubernetes/pkg/api/validation" +// TODO: Move in "k8s.io/kubernetes/pkg/apis/core/validation" func isDecremented(update, old *int32) bool { if update == nil && old != nil { return true @@ -522,17 +520,6 @@ func validateIngressBackend(backend *extensions.IngressBackend, fldPath *field.P return allErrs } -func ValidateScale(scale *extensions.Scale) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&scale.ObjectMeta, true, apivalidation.NameIsDNSSubdomain, field.NewPath("metadata"))...) - - if scale.Spec.Replicas < 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "replicas"), scale.Spec.Replicas, "must be greater than or equal to 0")) - } - - return allErrs -} - // ValidateReplicaSetName can be used to check whether the given ReplicaSet // name is valid. // Prefix indicates this name will be used as part of generation, in which case @@ -668,6 +655,7 @@ func ValidatePodSecurityPolicySpec(spec *extensions.PodSecurityPolicySpec, fldPa allErrs = append(allErrs, validatePSPCapsAgainstDrops(spec.RequiredDropCapabilities, spec.AllowedCapabilities, field.NewPath("allowedCapabilities"))...) allErrs = append(allErrs, validatePSPDefaultAllowPrivilegeEscalation(fldPath.Child("defaultAllowPrivilegeEscalation"), spec.DefaultAllowPrivilegeEscalation, spec.AllowPrivilegeEscalation)...) allErrs = append(allErrs, validatePSPAllowedHostPaths(fldPath.Child("allowedHostPaths"), spec.AllowedHostPaths)...) + allErrs = append(allErrs, validatePSPAllowedFlexVolumes(fldPath.Child("allowedFlexVolumes"), spec.AllowedFlexVolumes)...) return allErrs } @@ -734,6 +722,20 @@ func validatePSPAllowedHostPaths(fldPath *field.Path, allowedHostPaths []extensi return allErrs } +// validatePSPAllowedFlexVolumes +func validatePSPAllowedFlexVolumes(fldPath *field.Path, flexVolumes []extensions.AllowedFlexVolume) field.ErrorList { + allErrs := field.ErrorList{} + if len(flexVolumes) > 0 { + for idx, fv := range flexVolumes { + if len(fv.Driver) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("allowedFlexVolumes").Index(idx).Child("driver"), + "must specify a driver")) + } + } + } + return allErrs +} + // validatePSPSELinux validates the SELinux fields of PodSecurityPolicy. func validatePSPSELinux(fldPath *field.Path, seLinux *extensions.SELinuxStrategyOptions) field.ErrorList { allErrs := field.ErrorList{} @@ -815,7 +817,6 @@ func validatePodSecurityPolicyVolumes(fldPath *field.Path, volumes []extensions. allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumes"), v, allowed.List())) } } - return allErrs } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go index d41048a5a88f..ea801b3e8f57 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go @@ -22,253 +22,22 @@ package extensions import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" - reflect "reflect" + core "k8s.io/kubernetes/pkg/apis/core" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIVersion).DeepCopyInto(out.(*APIVersion)) - return nil - }, InType: reflect.TypeOf(&APIVersion{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AllowedHostPath).DeepCopyInto(out.(*AllowedHostPath)) - return nil - }, InType: reflect.TypeOf(&AllowedHostPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricCurrentStatus).DeepCopyInto(out.(*CustomMetricCurrentStatus)) - return nil - }, InType: reflect.TypeOf(&CustomMetricCurrentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricCurrentStatusList).DeepCopyInto(out.(*CustomMetricCurrentStatusList)) - return nil - }, InType: reflect.TypeOf(&CustomMetricCurrentStatusList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricTarget).DeepCopyInto(out.(*CustomMetricTarget)) - return nil - }, InType: reflect.TypeOf(&CustomMetricTarget{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricTargetList).DeepCopyInto(out.(*CustomMetricTargetList)) - return nil - }, InType: reflect.TypeOf(&CustomMetricTargetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSet).DeepCopyInto(out.(*DaemonSet)) - return nil - }, InType: reflect.TypeOf(&DaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetList).DeepCopyInto(out.(*DaemonSetList)) - return nil - }, InType: reflect.TypeOf(&DaemonSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetSpec).DeepCopyInto(out.(*DaemonSetSpec)) - return nil - }, InType: reflect.TypeOf(&DaemonSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetStatus).DeepCopyInto(out.(*DaemonSetStatus)) - return nil - }, InType: reflect.TypeOf(&DaemonSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetUpdateStrategy).DeepCopyInto(out.(*DaemonSetUpdateStrategy)) - return nil - }, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Deployment).DeepCopyInto(out.(*Deployment)) - return nil - }, InType: reflect.TypeOf(&Deployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentCondition).DeepCopyInto(out.(*DeploymentCondition)) - return nil - }, InType: reflect.TypeOf(&DeploymentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentList).DeepCopyInto(out.(*DeploymentList)) - return nil - }, InType: reflect.TypeOf(&DeploymentList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentRollback).DeepCopyInto(out.(*DeploymentRollback)) - return nil - }, InType: reflect.TypeOf(&DeploymentRollback{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentSpec).DeepCopyInto(out.(*DeploymentSpec)) - return nil - }, InType: reflect.TypeOf(&DeploymentSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStatus).DeepCopyInto(out.(*DeploymentStatus)) - return nil - }, InType: reflect.TypeOf(&DeploymentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStrategy).DeepCopyInto(out.(*DeploymentStrategy)) - return nil - }, InType: reflect.TypeOf(&DeploymentStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FSGroupStrategyOptions).DeepCopyInto(out.(*FSGroupStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&FSGroupStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupIDRange).DeepCopyInto(out.(*GroupIDRange)) - return nil - }, InType: reflect.TypeOf(&GroupIDRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPIngressPath).DeepCopyInto(out.(*HTTPIngressPath)) - return nil - }, InType: reflect.TypeOf(&HTTPIngressPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPIngressRuleValue).DeepCopyInto(out.(*HTTPIngressRuleValue)) - return nil - }, InType: reflect.TypeOf(&HTTPIngressRuleValue{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostPortRange).DeepCopyInto(out.(*HostPortRange)) - return nil - }, InType: reflect.TypeOf(&HostPortRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Ingress).DeepCopyInto(out.(*Ingress)) - return nil - }, InType: reflect.TypeOf(&Ingress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressBackend).DeepCopyInto(out.(*IngressBackend)) - return nil - }, InType: reflect.TypeOf(&IngressBackend{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressList).DeepCopyInto(out.(*IngressList)) - return nil - }, InType: reflect.TypeOf(&IngressList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressRule).DeepCopyInto(out.(*IngressRule)) - return nil - }, InType: reflect.TypeOf(&IngressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressRuleValue).DeepCopyInto(out.(*IngressRuleValue)) - return nil - }, InType: reflect.TypeOf(&IngressRuleValue{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressSpec).DeepCopyInto(out.(*IngressSpec)) - return nil - }, InType: reflect.TypeOf(&IngressSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressStatus).DeepCopyInto(out.(*IngressStatus)) - return nil - }, InType: reflect.TypeOf(&IngressStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressTLS).DeepCopyInto(out.(*IngressTLS)) - return nil - }, InType: reflect.TypeOf(&IngressTLS{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityPolicy).DeepCopyInto(out.(*PodSecurityPolicy)) - return nil - }, InType: reflect.TypeOf(&PodSecurityPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityPolicyList).DeepCopyInto(out.(*PodSecurityPolicyList)) - return nil - }, InType: reflect.TypeOf(&PodSecurityPolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityPolicySpec).DeepCopyInto(out.(*PodSecurityPolicySpec)) - return nil - }, InType: reflect.TypeOf(&PodSecurityPolicySpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSet).DeepCopyInto(out.(*ReplicaSet)) - return nil - }, InType: reflect.TypeOf(&ReplicaSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetCondition).DeepCopyInto(out.(*ReplicaSetCondition)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetList).DeepCopyInto(out.(*ReplicaSetList)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetSpec).DeepCopyInto(out.(*ReplicaSetSpec)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetStatus).DeepCopyInto(out.(*ReplicaSetStatus)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerDummy).DeepCopyInto(out.(*ReplicationControllerDummy)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerDummy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollbackConfig).DeepCopyInto(out.(*RollbackConfig)) - return nil - }, InType: reflect.TypeOf(&RollbackConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDaemonSet).DeepCopyInto(out.(*RollingUpdateDaemonSet)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDeployment).DeepCopyInto(out.(*RollingUpdateDeployment)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RunAsUserStrategyOptions).DeepCopyInto(out.(*RunAsUserStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&RunAsUserStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SELinuxStrategyOptions).DeepCopyInto(out.(*SELinuxStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&SELinuxStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Scale).DeepCopyInto(out.(*Scale)) - return nil - }, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleSpec).DeepCopyInto(out.(*ScaleSpec)) - return nil - }, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleStatus).DeepCopyInto(out.(*ScaleStatus)) - return nil - }, InType: reflect.TypeOf(&ScaleStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SupplementalGroupsStrategyOptions).DeepCopyInto(out.(*SupplementalGroupsStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&SupplementalGroupsStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResource).DeepCopyInto(out.(*ThirdPartyResource)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResourceData).DeepCopyInto(out.(*ThirdPartyResourceData)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResourceData{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResourceDataList).DeepCopyInto(out.(*ThirdPartyResourceDataList)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResourceDataList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResourceList).DeepCopyInto(out.(*ThirdPartyResourceList)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResourceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*UserIDRange).DeepCopyInto(out.(*UserIDRange)) - return nil - }, InType: reflect.TypeOf(&UserIDRange{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIVersion) DeepCopyInto(out *APIVersion) { +func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersion. -func (in *APIVersion) DeepCopy() *APIVersion { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. +func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { if in == nil { return nil } - out := new(APIVersion) + out := new(AllowedFlexVolume) in.DeepCopyInto(out) return out } @@ -398,6 +167,23 @@ func (in *DaemonSet) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in @@ -480,6 +266,13 @@ func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { **out = **in } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1101,17 +894,17 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = *in if in.DefaultAddCapabilities != nil { in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]api.Capability, len(*in)) + *out = make([]core.Capability, len(*in)) copy(*out, *in) } if in.RequiredDropCapabilities != nil { in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]api.Capability, len(*in)) + *out = make([]core.Capability, len(*in)) copy(*out, *in) } if in.AllowedCapabilities != nil { in, out := &in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]api.Capability, len(*in)) + *out = make([]core.Capability, len(*in)) copy(*out, *in) } if in.Volumes != nil { @@ -1142,6 +935,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]AllowedHostPath, len(*in)) copy(*out, *in) } + if in.AllowedFlexVolumes != nil { + in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes + *out = make([]AllowedFlexVolume, len(*in)) + copy(*out, *in) + } return } @@ -1390,7 +1188,7 @@ func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { if *in == nil { *out = nil } else { - *out = new(api.SELinuxOptions) + *out = new(core.SELinuxOptions) **out = **in } } @@ -1407,76 +1205,6 @@ func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Scale) DeepCopyInto(out *Scale) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. -func (in *Scale) DeepCopy() *Scale { - if in == nil { - return nil - } - out := new(Scale) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Scale) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. -func (in *ScaleSpec) DeepCopy() *ScaleSpec { - if in == nil { - return nil - } - out := new(ScaleSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if *in == nil { - *out = nil - } else { - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. -func (in *ScaleStatus) DeepCopy() *ScaleStatus { - if in == nil { - return nil - } - out := new(ScaleStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { *out = *in @@ -1498,138 +1226,6 @@ func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrat return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResource) DeepCopyInto(out *ThirdPartyResource) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]APIVersion, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResource. -func (in *ThirdPartyResource) DeepCopy() *ThirdPartyResource { - if in == nil { - return nil - } - out := new(ThirdPartyResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResource) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResourceData) DeepCopyInto(out *ThirdPartyResourceData) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceData. -func (in *ThirdPartyResourceData) DeepCopy() *ThirdPartyResourceData { - if in == nil { - return nil - } - out := new(ThirdPartyResourceData) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResourceData) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResourceDataList) DeepCopyInto(out *ThirdPartyResourceDataList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ThirdPartyResourceData, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceDataList. -func (in *ThirdPartyResourceDataList) DeepCopy() *ThirdPartyResourceDataList { - if in == nil { - return nil - } - out := new(ThirdPartyResourceDataList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResourceDataList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResourceList) DeepCopyInto(out *ThirdPartyResourceList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ThirdPartyResource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceList. -func (in *ThirdPartyResourceList) DeepCopy() *ThirdPartyResourceList { - if in == nil { - return nil - } - out := new(ThirdPartyResourceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResourceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UserIDRange) DeepCopyInto(out *UserIDRange) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/BUILD index c72a0f3a4cac..9dd53f7d33cd 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/BUILD @@ -13,9 +13,9 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/imagepolicy", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/doc.go index 306f51b005bc..a39220ff7c09 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=imagepolicy.k8s.io package imagepolicy diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/install/BUILD index 89706b15f251..77b3a196e84c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/install/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/imagepolicy/install", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/imagepolicy:go_default_library", "//pkg/apis/imagepolicy/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/install/install.go index cf03fd483c25..75f5ba1f44ea 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/install/install.go @@ -23,13 +23,13 @@ import ( "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/imagepolicy" "k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1" ) func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) + Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) } // Install registers the API group and adds types to a scheme diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/BUILD index f1a3ffaac565..017af8de5d46 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/BUILD @@ -13,6 +13,7 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1", deps = [ "//pkg/apis/imagepolicy:go_default_library", "//vendor/k8s.io/api/imagepolicy/v1alpha1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/doc.go index b88ad319a457..b619e953c766 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/imagepolicy -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/imagepolicy/v1alpha1 +// +k8s:conversion-gen-external-types=k8s.io/api/imagepolicy/v1alpha1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/imagepolicy/v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/zz_generated.deepcopy.go index aa752870e5a3..985d67753724 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/imagepolicy/zz_generated.deepcopy.go @@ -21,40 +21,9 @@ limitations under the License. package imagepolicy import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ImageReview).DeepCopyInto(out.(*ImageReview)) - return nil - }, InType: reflect.TypeOf(&ImageReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ImageReviewContainerSpec).DeepCopyInto(out.(*ImageReviewContainerSpec)) - return nil - }, InType: reflect.TypeOf(&ImageReviewContainerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ImageReviewSpec).DeepCopyInto(out.(*ImageReviewSpec)) - return nil - }, InType: reflect.TypeOf(&ImageReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ImageReviewStatus).DeepCopyInto(out.(*ImageReviewStatus)) - return nil - }, InType: reflect.TypeOf(&ImageReviewStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageReview) DeepCopyInto(out *ImageReview) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/networking/BUILD index 0918167e4366..4dff82d1ba37 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/BUILD @@ -13,10 +13,10 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/networking", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/networking/OWNERS index 97bde97282c9..0e696c0fbb47 100755 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/OWNERS @@ -1,4 +1,5 @@ reviewers: - caseydavenport +- cmluciano - danwinship - thockin diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go index 83c0401bf948..3c44f4d285a1 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=networking.k8s.io package networking diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/networking/install/BUILD index e1a348568b1e..a5ce49004d46 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/install/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/networking/install", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/networking:go_default_library", "//pkg/apis/networking/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/install/install.go index 85c7991bcf00..59a3d043c573 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/install/install.go @@ -22,13 +22,13 @@ import ( "k8s.io/apimachinery/pkg/apimachinery/announced" "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/networking" "k8s.io/kubernetes/pkg/apis/networking/v1" ) func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) + Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) } // Install registers the API group and adds types to a scheme diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go index 2109c3745268..ae37fcd5e261 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go @@ -19,7 +19,7 @@ package networking import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // +genclient diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/BUILD index 50fae6587d27..a6237c850cf1 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/BUILD @@ -15,8 +15,9 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/networking/v1", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/networking:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/networking/v1:go_default_library", @@ -44,10 +45,11 @@ filegroup( go_test( name = "go_default_xtest", srcs = ["defaults_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/networking/v1_test", deps = [ ":go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core/install:go_default_library", "//pkg/apis/networking/install:go_default_library", "//vendor/k8s.io/api/networking/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/doc.go index 9464c0114895..8f088130e415 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/networking -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/networking/v1 +// +k8s:conversion-gen-external-types=k8s.io/api/networking/v1 // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/extensions // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/networking/v1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/zz_generated.conversion.go index 67132df2c115..0b1c29ddbf18 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/zz_generated.conversion.go @@ -27,7 +27,7 @@ import ( conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" networking "k8s.io/kubernetes/pkg/apis/networking" unsafe "unsafe" ) @@ -198,7 +198,7 @@ func Convert_networking_NetworkPolicyPeer_To_v1_NetworkPolicyPeer(in *networking } func autoConvert_v1_NetworkPolicyPort_To_networking_NetworkPolicyPort(in *v1.NetworkPolicyPort, out *networking.NetworkPolicyPort, s conversion.Scope) error { - out.Protocol = (*api.Protocol)(unsafe.Pointer(in.Protocol)) + out.Protocol = (*core.Protocol)(unsafe.Pointer(in.Protocol)) out.Port = (*intstr.IntOrString)(unsafe.Pointer(in.Port)) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/networking/validation/BUILD index 486af9391ae6..5f1ccf041744 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/validation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/validation/BUILD @@ -9,9 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/networking/validation", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/networking:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", @@ -21,9 +22,10 @@ go_test( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/pkg/apis/networking/validation", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//pkg/apis/networking:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/validation/validation.go index 49f638f91f07..9f0a04c81b2d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/validation/validation.go @@ -17,15 +17,13 @@ limitations under the License. package validation import ( - "net" - unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" - apivalidation "k8s.io/kubernetes/pkg/api/validation" + api "k8s.io/kubernetes/pkg/apis/core" + apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/apis/networking" ) @@ -163,7 +161,7 @@ func ValidateIPBlock(ipb *networking.IPBlock, fldPath *field.Path) field.ErrorLi allErrs = append(allErrs, field.Required(fldPath.Child("cidr"), "")) return allErrs } - cidrIPNet, err := validateCIDR(ipb.CIDR) + cidrIPNet, err := apivalidation.ValidateCIDR(ipb.CIDR) if err != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("cidr"), ipb.CIDR, "not a valid CIDR")) return allErrs @@ -171,7 +169,7 @@ func ValidateIPBlock(ipb *networking.IPBlock, fldPath *field.Path) field.ErrorLi exceptCIDR := ipb.Except for i, exceptIP := range exceptCIDR { exceptPath := fldPath.Child("except").Index(i) - exceptCIDR, err := validateCIDR(exceptIP) + exceptCIDR, err := apivalidation.ValidateCIDR(exceptIP) if err != nil { allErrs = append(allErrs, field.Invalid(exceptPath, exceptIP, "not a valid CIDR")) return allErrs @@ -182,12 +180,3 @@ func ValidateIPBlock(ipb *networking.IPBlock, fldPath *field.Path) field.ErrorLi } return allErrs } - -// validateCIDR validates whether a CIDR matches the conventions expected by net.ParseCIDR -func validateCIDR(cidr string) (*net.IPNet, error) { - _, net, err := net.ParseCIDR(cidr) - if err != nil { - return nil, err - } - return net, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go index da38fb5e8dbb..aa5958efb1db 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go @@ -22,58 +22,11 @@ package networking import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - api "k8s.io/kubernetes/pkg/api" - reflect "reflect" + core "k8s.io/kubernetes/pkg/apis/core" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IPBlock).DeepCopyInto(out.(*IPBlock)) - return nil - }, InType: reflect.TypeOf(&IPBlock{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicy).DeepCopyInto(out.(*NetworkPolicy)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyEgressRule).DeepCopyInto(out.(*NetworkPolicyEgressRule)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyEgressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyIngressRule).DeepCopyInto(out.(*NetworkPolicyIngressRule)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyIngressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyList).DeepCopyInto(out.(*NetworkPolicyList)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyPeer).DeepCopyInto(out.(*NetworkPolicyPeer)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyPeer{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyPort).DeepCopyInto(out.(*NetworkPolicyPort)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicySpec).DeepCopyInto(out.(*NetworkPolicySpec)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicySpec{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPBlock) DeepCopyInto(out *IPBlock) { *out = *in @@ -268,7 +221,7 @@ func (in *NetworkPolicyPort) DeepCopyInto(out *NetworkPolicyPort) { if *in == nil { *out = nil } else { - *out = new(api.Protocol) + *out = new(core.Protocol) **out = **in } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/policy/BUILD index 38f32cc79d68..86f104cff8d7 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/BUILD @@ -13,9 +13,9 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/policy", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", @@ -35,7 +35,6 @@ filegroup( ":package-srcs", "//pkg/apis/policy/fuzzer:all-srcs", "//pkg/apis/policy/install:all-srcs", - "//pkg/apis/policy/v1alpha1:all-srcs", "//pkg/apis/policy/v1beta1:all-srcs", "//pkg/apis/policy/validation:all-srcs", ], diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/doc.go index 876858cd9a76..eb1342d30b6e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package package policy diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/policy/install/BUILD index d37deb82a8ab..6cecb39790cc 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/install/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/policy/install", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/policy:go_default_library", "//pkg/apis/policy/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go index cf133df6b6ba..6c5703195b90 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go @@ -22,13 +22,13 @@ import ( "k8s.io/apimachinery/pkg/apimachinery/announced" "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/apis/policy/v1beta1" ) func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) + Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) } // Install registers the API group and adds types to a scheme diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go index 5aadc3f1b8e1..80e834b385d4 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go @@ -42,7 +42,7 @@ var ( AddToScheme = SchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { // TODO this gets cleaned up when the types are fixed scheme.AddKnownTypes(SchemeGroupVersion, diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/BUILD index 24f9971dc52c..91f28a022675 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/BUILD @@ -13,6 +13,7 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/policy/v1beta1", deps = [ "//pkg/apis/policy:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/doc.go index afacd052773d..c3df0ab23581 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/policy -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/policy/v1beta1 +// +k8s:conversion-gen-external-types=k8s.io/api/policy/v1beta1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/policy/v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/BUILD index f61e12c25d4e..1f372c738c1e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/BUILD @@ -9,8 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/pkg/apis/policy/validation", deps = [ - "//pkg/api/validation:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//pkg/apis/extensions/validation:go_default_library", "//pkg/apis/policy:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", @@ -21,6 +22,7 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/policy/validation", library = ":go_default_library", deps = [ "//pkg/apis/policy:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go index cb0e0506d742..cae1609de6e6 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go @@ -21,7 +21,7 @@ import ( unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/util/validation/field" - apivalidation "k8s.io/kubernetes/pkg/api/validation" + apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" extensionsvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation" "k8s.io/kubernetes/pkg/apis/policy" ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go index 46dfdd8f4f0f..564329c60191 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go @@ -22,45 +22,10 @@ package policy import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Eviction).DeepCopyInto(out.(*Eviction)) - return nil - }, InType: reflect.TypeOf(&Eviction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodDisruptionBudget).DeepCopyInto(out.(*PodDisruptionBudget)) - return nil - }, InType: reflect.TypeOf(&PodDisruptionBudget{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodDisruptionBudgetList).DeepCopyInto(out.(*PodDisruptionBudgetList)) - return nil - }, InType: reflect.TypeOf(&PodDisruptionBudgetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodDisruptionBudgetSpec).DeepCopyInto(out.(*PodDisruptionBudgetSpec)) - return nil - }, InType: reflect.TypeOf(&PodDisruptionBudgetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodDisruptionBudgetStatus).DeepCopyInto(out.(*PodDisruptionBudgetStatus)) - return nil - }, InType: reflect.TypeOf(&PodDisruptionBudgetStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Eviction) DeepCopyInto(out *Eviction) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/rbac/BUILD index ce8db5f6b5fa..ef634f03a344 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/BUILD @@ -3,6 +3,7 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( @@ -14,9 +15,9 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/rbac", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", @@ -43,3 +44,17 @@ filegroup( ], tags = ["automanaged"], ) + +go_test( + name = "go_default_xtest", + srcs = ["helpers_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/rbac_test", + deps = [ + ":go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/rbac/install:go_default_library", + "//pkg/apis/rbac/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go index af9ffe20679b..058103f2c95d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=rbac.authorization.k8s.io package rbac diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/helpers.go index 549aedc98fc7..4b1a1d3a0c71 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/helpers.go @@ -55,14 +55,29 @@ func APIGroupMatches(rule *PolicyRule, requestedGroup string) bool { return false } -func ResourceMatches(rule *PolicyRule, requestedResource string) bool { +func ResourceMatches(rule *PolicyRule, combinedRequestedResource, requestedSubresource string) bool { for _, ruleResource := range rule.Resources { + // if everything is allowed, we match if ruleResource == ResourceAll { return true } - if ruleResource == requestedResource { + // if we have an exact match, we match + if ruleResource == combinedRequestedResource { return true } + + // We can also match a */subresource. + // if there isn't a subresource, then continue + if len(requestedSubresource) == 0 { + continue + } + // if the rule isn't in the format */subresource, then we don't match, continue + if len(ruleResource) == len(requestedSubresource)+2 && + strings.HasPrefix(ruleResource, "*/") && + strings.HasSuffix(ruleResource, requestedSubresource) { + return true + + } } return false @@ -348,7 +363,7 @@ func NewRoleBindingForClusterRole(roleName, namespace string) *RoleBindingBuilde // Groups adds the specified groups as the subjects of the RoleBinding. func (r *RoleBindingBuilder) Groups(groups ...string) *RoleBindingBuilder { for _, group := range groups { - r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, Subject{Kind: GroupKind, Name: group}) + r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, Subject{Kind: GroupKind, APIGroup: GroupName, Name: group}) } return r } @@ -356,7 +371,7 @@ func (r *RoleBindingBuilder) Groups(groups ...string) *RoleBindingBuilder { // Users adds the specified users as the subjects of the RoleBinding. func (r *RoleBindingBuilder) Users(users ...string) *RoleBindingBuilder { for _, user := range users { - r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, Subject{Kind: UserKind, Name: user}) + r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, Subject{Kind: UserKind, APIGroup: GroupName, Name: user}) } return r } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/BUILD index 8a1d8c5844c8..5c699a006c68 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/rbac/install", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/apis/rbac/v1:go_default_library", "//pkg/apis/rbac/v1alpha1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go index 6e9f47ac7e72..b03d8475cd4d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/apis/rbac/v1" "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1" @@ -31,7 +31,7 @@ import ( ) func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) + Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) } // Install registers the API group and adds types to a scheme @@ -40,16 +40,9 @@ func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *r &announced.GroupMetaFactoryArgs{ GroupName: rbac.GroupName, // Rollout plan: - // 1.8: - // * announce deprecation of v1alpha1 (people should use v1beta1 or v1) - // 1.9 (once all version-skewed API servers in an HA cluster are capable of reading/writing v1 to etcd): - // * move v1 to the beginning - // * add RBAC objects to update-storage-objects.sh - // * update TestEtcdStoragePath to expect objects to be stored in v1 - // * document that RBAC storage objects should be migrated to ensure storage is a v1-level (via update-storage-objects.sh or otherwise) // 1.10 (once all stored objects are at v1): - // * remove v1alpha1 - VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version, v1.SchemeGroupVersion.Version, v1alpha1.SchemeGroupVersion.Version}, + // * remove v1alpha1 (announced deprecated in 1.8) + VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version, v1beta1.SchemeGroupVersion.Version, v1alpha1.SchemeGroupVersion.Version}, RootScopedKinds: sets.NewString("ClusterRole", "ClusterRoleBinding"), AddInternalObjectsToScheme: rbac.AddToScheme, }, diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/register.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/register.go index f4a838bd89a0..4f232951512d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = SchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Role{}, diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/types.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/types.go index 6e5ce60fb882..6fdd486d2423 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/types.go @@ -48,7 +48,8 @@ type PolicyRule struct { // APIGroups is the name of the APIGroup that contains the resources. // If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. APIGroups []string - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. + // '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups. Resources []string // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. ResourceNames []string @@ -154,6 +155,18 @@ type ClusterRole struct { // Rules holds all the PolicyRules for this ClusterRole Rules []PolicyRule + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + AggregationRule *AggregationRule +} + +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +type AggregationRule struct { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + ClusterRoleSelectors []metav1.LabelSelector } // +genclient diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/BUILD index c6f661b3b975..5aa5b5fc768d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/BUILD @@ -15,6 +15,7 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/rbac/v1", deps = [ "//pkg/apis/rbac:go_default_library", "//vendor/k8s.io/api/rbac/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/doc.go index 2d346aef9af2..f9ff145b67ec 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/rbac -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/rbac/v1 +// +k8s:conversion-gen-external-types=k8s.io/api/rbac/v1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/rbac/v1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/zz_generated.conversion.go index e5e668811e21..c46056a92633 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1/zz_generated.conversion.go @@ -22,6 +22,7 @@ package v1 import ( v1 "k8s.io/api/rbac/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" rbac "k8s.io/kubernetes/pkg/apis/rbac" @@ -36,6 +37,8 @@ func init() { // Public to allow building arbitrary schemes. func RegisterConversions(scheme *runtime.Scheme) error { return scheme.AddGeneratedConversionFuncs( + Convert_v1_AggregationRule_To_rbac_AggregationRule, + Convert_rbac_AggregationRule_To_v1_AggregationRule, Convert_v1_ClusterRole_To_rbac_ClusterRole, Convert_rbac_ClusterRole_To_v1_ClusterRole, Convert_v1_ClusterRoleBinding_To_rbac_ClusterRoleBinding, @@ -61,9 +64,30 @@ func RegisterConversions(scheme *runtime.Scheme) error { ) } +func autoConvert_v1_AggregationRule_To_rbac_AggregationRule(in *v1.AggregationRule, out *rbac.AggregationRule, s conversion.Scope) error { + out.ClusterRoleSelectors = *(*[]meta_v1.LabelSelector)(unsafe.Pointer(&in.ClusterRoleSelectors)) + return nil +} + +// Convert_v1_AggregationRule_To_rbac_AggregationRule is an autogenerated conversion function. +func Convert_v1_AggregationRule_To_rbac_AggregationRule(in *v1.AggregationRule, out *rbac.AggregationRule, s conversion.Scope) error { + return autoConvert_v1_AggregationRule_To_rbac_AggregationRule(in, out, s) +} + +func autoConvert_rbac_AggregationRule_To_v1_AggregationRule(in *rbac.AggregationRule, out *v1.AggregationRule, s conversion.Scope) error { + out.ClusterRoleSelectors = *(*[]meta_v1.LabelSelector)(unsafe.Pointer(&in.ClusterRoleSelectors)) + return nil +} + +// Convert_rbac_AggregationRule_To_v1_AggregationRule is an autogenerated conversion function. +func Convert_rbac_AggregationRule_To_v1_AggregationRule(in *rbac.AggregationRule, out *v1.AggregationRule, s conversion.Scope) error { + return autoConvert_rbac_AggregationRule_To_v1_AggregationRule(in, out, s) +} + func autoConvert_v1_ClusterRole_To_rbac_ClusterRole(in *v1.ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules)) + out.AggregationRule = (*rbac.AggregationRule)(unsafe.Pointer(in.AggregationRule)) return nil } @@ -75,6 +99,7 @@ func Convert_v1_ClusterRole_To_rbac_ClusterRole(in *v1.ClusterRole, out *rbac.Cl func autoConvert_rbac_ClusterRole_To_v1_ClusterRole(in *rbac.ClusterRole, out *v1.ClusterRole, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Rules = *(*[]v1.PolicyRule)(unsafe.Pointer(&in.Rules)) + out.AggregationRule = (*v1.AggregationRule)(unsafe.Pointer(in.AggregationRule)) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/BUILD index a0bb58f106c8..fed81967eb73 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/BUILD @@ -17,6 +17,7 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1", deps = [ "//pkg/apis/rbac:go_default_library", "//vendor/k8s.io/api/rbac/v1alpha1:go_default_library", @@ -30,8 +31,9 @@ go_library( go_test( name = "go_default_xtest", srcs = ["conversion_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1_test", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/apis/rbac/install:go_default_library", "//vendor/k8s.io/api/rbac/v1alpha1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go index 9c95eb924c27..f952e27b7246 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/rbac -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/rbac/v1alpha1 +// +k8s:conversion-gen-external-types=k8s.io/api/rbac/v1alpha1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/rbac/v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go index 4c52c5a78391..6076e0d3da2a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go @@ -22,6 +22,7 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/rbac/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" rbac "k8s.io/kubernetes/pkg/apis/rbac" @@ -36,6 +37,8 @@ func init() { // Public to allow building arbitrary schemes. func RegisterConversions(scheme *runtime.Scheme) error { return scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_AggregationRule_To_rbac_AggregationRule, + Convert_rbac_AggregationRule_To_v1alpha1_AggregationRule, Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole, Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole, Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding, @@ -61,9 +64,30 @@ func RegisterConversions(scheme *runtime.Scheme) error { ) } +func autoConvert_v1alpha1_AggregationRule_To_rbac_AggregationRule(in *v1alpha1.AggregationRule, out *rbac.AggregationRule, s conversion.Scope) error { + out.ClusterRoleSelectors = *(*[]v1.LabelSelector)(unsafe.Pointer(&in.ClusterRoleSelectors)) + return nil +} + +// Convert_v1alpha1_AggregationRule_To_rbac_AggregationRule is an autogenerated conversion function. +func Convert_v1alpha1_AggregationRule_To_rbac_AggregationRule(in *v1alpha1.AggregationRule, out *rbac.AggregationRule, s conversion.Scope) error { + return autoConvert_v1alpha1_AggregationRule_To_rbac_AggregationRule(in, out, s) +} + +func autoConvert_rbac_AggregationRule_To_v1alpha1_AggregationRule(in *rbac.AggregationRule, out *v1alpha1.AggregationRule, s conversion.Scope) error { + out.ClusterRoleSelectors = *(*[]v1.LabelSelector)(unsafe.Pointer(&in.ClusterRoleSelectors)) + return nil +} + +// Convert_rbac_AggregationRule_To_v1alpha1_AggregationRule is an autogenerated conversion function. +func Convert_rbac_AggregationRule_To_v1alpha1_AggregationRule(in *rbac.AggregationRule, out *v1alpha1.AggregationRule, s conversion.Scope) error { + return autoConvert_rbac_AggregationRule_To_v1alpha1_AggregationRule(in, out, s) +} + func autoConvert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *v1alpha1.ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules)) + out.AggregationRule = (*rbac.AggregationRule)(unsafe.Pointer(in.AggregationRule)) return nil } @@ -75,6 +99,7 @@ func Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *v1alpha1.ClusterRole, func autoConvert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in *rbac.ClusterRole, out *v1alpha1.ClusterRole, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Rules = *(*[]v1alpha1.PolicyRule)(unsafe.Pointer(&in.Rules)) + out.AggregationRule = (*v1alpha1.AggregationRule)(unsafe.Pointer(in.AggregationRule)) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/BUILD index 5e7c7656d1d9..2bd8a7d9af17 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/BUILD @@ -15,6 +15,7 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/rbac/v1beta1", deps = [ "//pkg/apis/rbac:go_default_library", "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/doc.go index 7aa581d58e82..dbe68ec2ed1f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/rbac -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/rbac/v1beta1 +// +k8s:conversion-gen-external-types=k8s.io/api/rbac/v1beta1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/rbac/v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/zz_generated.conversion.go index d06b9265aa1a..d031b15fa34f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/zz_generated.conversion.go @@ -22,6 +22,7 @@ package v1beta1 import ( v1beta1 "k8s.io/api/rbac/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" rbac "k8s.io/kubernetes/pkg/apis/rbac" @@ -36,6 +37,8 @@ func init() { // Public to allow building arbitrary schemes. func RegisterConversions(scheme *runtime.Scheme) error { return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_AggregationRule_To_rbac_AggregationRule, + Convert_rbac_AggregationRule_To_v1beta1_AggregationRule, Convert_v1beta1_ClusterRole_To_rbac_ClusterRole, Convert_rbac_ClusterRole_To_v1beta1_ClusterRole, Convert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding, @@ -61,9 +64,30 @@ func RegisterConversions(scheme *runtime.Scheme) error { ) } +func autoConvert_v1beta1_AggregationRule_To_rbac_AggregationRule(in *v1beta1.AggregationRule, out *rbac.AggregationRule, s conversion.Scope) error { + out.ClusterRoleSelectors = *(*[]v1.LabelSelector)(unsafe.Pointer(&in.ClusterRoleSelectors)) + return nil +} + +// Convert_v1beta1_AggregationRule_To_rbac_AggregationRule is an autogenerated conversion function. +func Convert_v1beta1_AggregationRule_To_rbac_AggregationRule(in *v1beta1.AggregationRule, out *rbac.AggregationRule, s conversion.Scope) error { + return autoConvert_v1beta1_AggregationRule_To_rbac_AggregationRule(in, out, s) +} + +func autoConvert_rbac_AggregationRule_To_v1beta1_AggregationRule(in *rbac.AggregationRule, out *v1beta1.AggregationRule, s conversion.Scope) error { + out.ClusterRoleSelectors = *(*[]v1.LabelSelector)(unsafe.Pointer(&in.ClusterRoleSelectors)) + return nil +} + +// Convert_rbac_AggregationRule_To_v1beta1_AggregationRule is an autogenerated conversion function. +func Convert_rbac_AggregationRule_To_v1beta1_AggregationRule(in *rbac.AggregationRule, out *v1beta1.AggregationRule, s conversion.Scope) error { + return autoConvert_rbac_AggregationRule_To_v1beta1_AggregationRule(in, out, s) +} + func autoConvert_v1beta1_ClusterRole_To_rbac_ClusterRole(in *v1beta1.ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules)) + out.AggregationRule = (*rbac.AggregationRule)(unsafe.Pointer(in.AggregationRule)) return nil } @@ -75,6 +99,7 @@ func Convert_v1beta1_ClusterRole_To_rbac_ClusterRole(in *v1beta1.ClusterRole, ou func autoConvert_rbac_ClusterRole_To_v1beta1_ClusterRole(in *rbac.ClusterRole, out *v1beta1.ClusterRole, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta out.Rules = *(*[]v1beta1.PolicyRule)(unsafe.Pointer(&in.Rules)) + out.AggregationRule = (*v1beta1.AggregationRule)(unsafe.Pointer(in.AggregationRule)) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/rbac/validation/BUILD index 53d572ab31ff..3c34083071e4 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/validation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/validation/BUILD @@ -9,10 +9,13 @@ load( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/pkg/apis/rbac/validation", deps = [ - "//pkg/api/validation:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//pkg/apis/rbac:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/validation/path:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", ], ) @@ -20,6 +23,7 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/rbac/validation", library = ":go_default_library", deps = [ "//pkg/apis/rbac:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/validation/validation.go index 0fc3cb1fbc39..92fc5bdb15bc 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/validation/validation.go @@ -18,8 +18,10 @@ package validation import ( "k8s.io/apimachinery/pkg/api/validation/path" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/apis/rbac" ) @@ -61,6 +63,22 @@ func ValidateClusterRole(role *rbac.ClusterRole) field.ErrorList { allErrs = append(allErrs, err...) } } + + if role.AggregationRule != nil { + if len(role.AggregationRule.ClusterRoleSelectors) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("aggregationRule", "clusterRoleSelectors"), "at least one clusterRoleSelector required if aggregationRule is non-nil")) + } + for i, selector := range role.AggregationRule.ClusterRoleSelectors { + fieldPath := field.NewPath("aggregationRule", "clusterRoleSelectors").Index(i) + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(&selector, fieldPath)...) + + selector, err := metav1.LabelSelectorAsSelector(&selector) + if err != nil { + allErrs = append(allErrs, field.Invalid(fieldPath, selector, "invalid label selector.")) + } + } + } + if len(allErrs) != 0 { return allErrs } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/zz_generated.deepcopy.go index ce11037427a0..8454f23086db 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/rbac/zz_generated.deepcopy.go @@ -21,66 +21,31 @@ limitations under the License. package rbac import ( - conversion "k8s.io/apimachinery/pkg/conversion" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRole).DeepCopyInto(out.(*ClusterRole)) - return nil - }, InType: reflect.TypeOf(&ClusterRole{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBinding).DeepCopyInto(out.(*ClusterRoleBinding)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBindingList).DeepCopyInto(out.(*ClusterRoleBindingList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleList).DeepCopyInto(out.(*ClusterRoleList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PolicyRule).DeepCopyInto(out.(*PolicyRule)) - return nil - }, InType: reflect.TypeOf(&PolicyRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Role).DeepCopyInto(out.(*Role)) - return nil - }, InType: reflect.TypeOf(&Role{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBinding).DeepCopyInto(out.(*RoleBinding)) - return nil - }, InType: reflect.TypeOf(&RoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBindingList).DeepCopyInto(out.(*RoleBindingList)) - return nil - }, InType: reflect.TypeOf(&RoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleList).DeepCopyInto(out.(*RoleList)) - return nil - }, InType: reflect.TypeOf(&RoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleRef).DeepCopyInto(out.(*RoleRef)) - return nil - }, InType: reflect.TypeOf(&RoleRef{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Subject).DeepCopyInto(out.(*Subject)) - return nil - }, InType: reflect.TypeOf(&Subject{})}, - ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { + *out = *in + if in.ClusterRoleSelectors != nil { + in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors + *out = make([]v1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. +func (in *AggregationRule) DeepCopy() *AggregationRule { + if in == nil { + return nil + } + out := new(AggregationRule) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -95,6 +60,15 @@ func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.AggregationRule != nil { + in, out := &in.AggregationRule, &out.AggregationRule + if *in == nil { + *out = nil + } else { + *out = new(AggregationRule) + (*in).DeepCopyInto(*out) + } + } return } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/BUILD index c32f8b22d054..58811a51216f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/BUILD @@ -13,9 +13,9 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/scheduling", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/doc.go index 72e6b77a09d0..81bb7a6d6d32 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=scheduling.k8s.io package scheduling diff --git a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/install/BUILD index 8d5557ef363e..b151648488f1 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/install/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/scheduling/install", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/scheduling:go_default_library", "//pkg/apis/scheduling/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/install/install.go index d35d47058436..0a47a8815ce7 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/install/install.go @@ -23,13 +23,13 @@ import ( "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/scheduling" "k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1" ) func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) + Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) } // Install registers the API group and adds types to a scheme diff --git a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/BUILD index b9ed61b3d112..04dcd69cb20e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/BUILD @@ -13,6 +13,7 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1", deps = [ "//pkg/apis/scheduling:go_default_library", "//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/doc.go index 2fa5a8272cb4..12b9ac62f016 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/scheduling -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/scheduling/v1alpha1 +// +k8s:conversion-gen-external-types=k8s.io/api/scheduling/v1alpha1 // +groupName=scheduling.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/scheduling/v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/validation/BUILD index 388caf9afa75..fc4680377eeb 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/validation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/validation/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/scheduling/validation", library = ":go_default_library", deps = [ "//pkg/apis/scheduling:go_default_library", @@ -20,8 +21,9 @@ go_test( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/pkg/apis/scheduling/validation", deps = [ - "//pkg/api/validation:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//pkg/apis/scheduling:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/validation/validation.go index 0c32fe2513df..f4fac9d9ceac 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/validation/validation.go @@ -18,7 +18,7 @@ package validation import ( "k8s.io/apimachinery/pkg/util/validation/field" - apivalidation "k8s.io/kubernetes/pkg/api/validation" + apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/apis/scheduling" ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/zz_generated.deepcopy.go index 9eb362fad5f1..7eb3fea06728 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/zz_generated.deepcopy.go @@ -21,32 +21,9 @@ limitations under the License. package scheduling import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PriorityClass).DeepCopyInto(out.(*PriorityClass)) - return nil - }, InType: reflect.TypeOf(&PriorityClass{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PriorityClassList).DeepCopyInto(out.(*PriorityClassList)) - return nil - }, InType: reflect.TypeOf(&PriorityClassList{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/settings/BUILD index e328e7d02dfa..2e9f40d277aa 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/settings/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/BUILD @@ -13,10 +13,10 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/settings", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/doc.go index 7ff0cd047ae4..1c690e460d91 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/settings/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=settings.k8s.io package settings diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/settings/install/BUILD index 1a91d59d3325..9a6ffe726acf 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/settings/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/install/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/settings/install", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/settings:go_default_library", "//pkg/apis/settings/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/install/install.go index fdaf7d2c37e5..bf4ae5c57963 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/settings/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/install/install.go @@ -22,13 +22,13 @@ import ( "k8s.io/apimachinery/pkg/apimachinery/announced" "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/settings" "k8s.io/kubernetes/pkg/apis/settings/v1alpha1" ) func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) + Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) } // Install registers the API group and adds types to a scheme diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/register.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/register.go index 858470127931..7b144b7dfc8d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/settings/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/register.go @@ -42,7 +42,7 @@ func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &PodPreset{}, diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/types.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/types.go index 1087f00f805d..876c8025532e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/settings/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/types.go @@ -18,7 +18,7 @@ package settings import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // +genclient diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/BUILD index f3b1c5b333ea..7869c4fad8e7 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/BUILD @@ -13,9 +13,10 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/settings/v1alpha1", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/v1:go_default_library", "//pkg/apis/settings:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/settings/v1alpha1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/doc.go index 989ac772b8e8..b8fea41340b2 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/settings -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/settings/v1alpha1 +// +k8s:conversion-gen-external-types=k8s.io/api/settings/v1alpha1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/settings/v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.conversion.go index 396102ecd843..de7bb8672ffb 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.conversion.go @@ -25,7 +25,7 @@ import ( v1alpha1 "k8s.io/api/settings/v1alpha1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" settings "k8s.io/kubernetes/pkg/apis/settings" unsafe "unsafe" ) @@ -117,11 +117,11 @@ func Convert_settings_PodPresetList_To_v1alpha1_PodPresetList(in *settings.PodPr func autoConvert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec(in *v1alpha1.PodPresetSpec, out *settings.PodPresetSpec, s conversion.Scope) error { out.Selector = in.Selector - out.Env = *(*[]api.EnvVar)(unsafe.Pointer(&in.Env)) - out.EnvFrom = *(*[]api.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + out.Env = *(*[]core.EnvVar)(unsafe.Pointer(&in.Env)) + out.EnvFrom = *(*[]core.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes - *out = make([]api.Volume, len(*in)) + *out = make([]core.Volume, len(*in)) for i := range *in { // TODO: Inefficient conversion - can we improve it? if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { @@ -131,7 +131,7 @@ func autoConvert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec(in *v1alpha1.P } else { out.Volumes = nil } - out.VolumeMounts = *(*[]api.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + out.VolumeMounts = *(*[]core.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.defaults.go index b2fa4af7d3e8..af5867ce3a4c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/zz_generated.defaults.go @@ -23,7 +23,7 @@ package v1alpha1 import ( v1alpha1 "k8s.io/api/settings/v1alpha1" runtime "k8s.io/apimachinery/pkg/runtime" - v1 "k8s.io/kubernetes/pkg/api/v1" + v1 "k8s.io/kubernetes/pkg/apis/core/v1" ) // RegisterDefaults adds defaulters functions to the given scheme. diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/settings/validation/BUILD index e825fca8492e..f0e6d67f00c7 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/settings/validation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/validation/BUILD @@ -9,9 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/settings/validation", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/settings:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], @@ -20,8 +21,9 @@ go_test( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/pkg/apis/settings/validation", deps = [ - "//pkg/api/validation:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//pkg/apis/settings:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/validation/validation.go index 6f02781ff497..50ac75aab444 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/settings/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/validation/validation.go @@ -19,7 +19,7 @@ package validation import ( unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/util/validation/field" - apivalidation "k8s.io/kubernetes/pkg/api/validation" + apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/apis/settings" ) @@ -43,11 +43,11 @@ func ValidatePodPresetSpec(spec *settings.PodPresetSpec, fldPath *field.Path) fi allErrs = append(allErrs, field.Required(fldPath.Child("volumes", "env", "envFrom", "volumeMounts"), "must specify at least one")) } - volumes, vErrs := apivalidation.ValidateVolumes(spec.Volumes, fldPath.Child("volumes")) + vols, vErrs := apivalidation.ValidateVolumes(spec.Volumes, fldPath.Child("volumes")) allErrs = append(allErrs, vErrs...) allErrs = append(allErrs, apivalidation.ValidateEnv(spec.Env, fldPath.Child("env"))...) allErrs = append(allErrs, apivalidation.ValidateEnvFrom(spec.EnvFrom, fldPath.Child("envFrom"))...) - allErrs = append(allErrs, apivalidation.ValidateVolumeMounts(spec.VolumeMounts, volumes, nil, fldPath.Child("volumeMounts"))...) + allErrs = append(allErrs, apivalidation.ValidateVolumeMounts(spec.VolumeMounts, nil, vols, nil, fldPath.Child("volumeMounts"))...) return allErrs } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/settings/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/settings/zz_generated.deepcopy.go index a6d188a3986b..742ebb506328 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/settings/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/settings/zz_generated.deepcopy.go @@ -21,37 +21,10 @@ limitations under the License. package settings import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" - reflect "reflect" + core "k8s.io/kubernetes/pkg/apis/core" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPreset).DeepCopyInto(out.(*PodPreset)) - return nil - }, InType: reflect.TypeOf(&PodPreset{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPresetList).DeepCopyInto(out.(*PodPresetList)) - return nil - }, InType: reflect.TypeOf(&PodPresetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPresetSpec).DeepCopyInto(out.(*PodPresetSpec)) - return nil - }, InType: reflect.TypeOf(&PodPresetSpec{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodPreset) DeepCopyInto(out *PodPreset) { *out = *in @@ -120,28 +93,28 @@ func (in *PodPresetSpec) DeepCopyInto(out *PodPresetSpec) { in.Selector.DeepCopyInto(&out.Selector) if in.Env != nil { in, out := &in.Env, &out.Env - *out = make([]api.EnvVar, len(*in)) + *out = make([]core.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.EnvFrom != nil { in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]api.EnvFromSource, len(*in)) + *out = make([]core.EnvFromSource, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes - *out = make([]api.Volume, len(*in)) + *out = make([]core.Volume, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]api.VolumeMount, len(*in)) + *out = make([]core.VolumeMount, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/storage/BUILD index cf4b841ba874..b8d3d38ef27c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/BUILD @@ -13,10 +13,10 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/storage", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], @@ -37,6 +37,7 @@ filegroup( "//pkg/apis/storage/install:all-srcs", "//pkg/apis/storage/util:all-srcs", "//pkg/apis/storage/v1:all-srcs", + "//pkg/apis/storage/v1alpha1:all-srcs", "//pkg/apis/storage/v1beta1:all-srcs", "//pkg/apis/storage/validation:all-srcs", ], diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go index a7eb30b643bd..1f3c23f60633 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=storage.k8s.io package storage diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/install/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/storage/install/BUILD index 6bb4fbb9ecd5..798341862f4c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/install/BUILD @@ -8,10 +8,12 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kubernetes/pkg/apis/storage/install", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/storage:go_default_library", "//pkg/apis/storage/v1:go_default_library", + "//pkg/apis/storage/v1alpha1:go_default_library", "//pkg/apis/storage/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/install/install.go index 6c645feb634b..28fa78659682 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/install/install.go @@ -23,28 +23,33 @@ import ( "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/storage" "k8s.io/kubernetes/pkg/apis/storage/v1" + "k8s.io/kubernetes/pkg/apis/storage/v1alpha1" "k8s.io/kubernetes/pkg/apis/storage/v1beta1" ) func init() { - Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) + Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) } // Install registers the API group and adds types to a scheme func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { if err := announced.NewGroupMetaFactory( &announced.GroupMetaFactoryArgs{ - GroupName: storage.GroupName, - VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version, v1beta1.SchemeGroupVersion.Version}, - RootScopedKinds: sets.NewString("StorageClass"), + GroupName: storage.GroupName, + VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version, v1beta1.SchemeGroupVersion.Version, v1alpha1.SchemeGroupVersion.Version}, + RootScopedKinds: sets.NewString( + "StorageClass", + "VolumeAttachment", + ), AddInternalObjectsToScheme: storage.AddToScheme, }, announced.VersionToSchemeFunc{ - v1.SchemeGroupVersion.Version: v1.AddToScheme, - v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + v1.SchemeGroupVersion.Version: v1.AddToScheme, + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme, }, ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { panic(err) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/register.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/register.go index aaa619b4d97d..7ae2f3efe184 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/register.go @@ -46,6 +46,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &StorageClass{}, &StorageClassList{}, + &VolumeAttachment{}, + &VolumeAttachmentList{}, ) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/types.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/types.go index 1af94b2d745b..e44b327d381a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/types.go @@ -18,7 +18,7 @@ package storage import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // +genclient @@ -65,6 +65,13 @@ type StorageClass struct { // for all PVs created from this storageclass. // +optional AllowVolumeExpansion *bool + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is alpha-level and is only honored by servers that enable + // the VolumeScheduling feature. + // +optional + VolumeBindingMode *VolumeBindingMode } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -80,3 +87,125 @@ type StorageClassList struct { // Items is the list of StorageClasses Items []StorageClass } + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Captures the intent to attach or detach the specified volume to/from +// the specified node. +// +// VolumeAttachment objects are non-namespaced. +type VolumeAttachment struct { + metav1.TypeMeta + + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + Spec VolumeAttachmentSpec + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + Status VolumeAttachmentStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +type VolumeAttachmentList struct { + metav1.TypeMeta + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta + + // Items is the list of VolumeAttachments + Items []VolumeAttachment +} + +// The specification of a VolumeAttachment request. +type VolumeAttachmentSpec struct { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + Attacher string + + // Source represents the volume that should be attached. + Source VolumeAttachmentSource + + // The node that the volume should be attached to. + NodeName string +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +type VolumeAttachmentSource struct { + // Name of the persistent volume to attach. + // +optional + PersistentVolumeName *string + + // Placeholder for *VolumeSource to accommodate inline volumes in pods. +} + +// The status of a VolumeAttachment request. +type VolumeAttachmentStatus struct { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + Attached bool + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachmentMetadata map[string]string + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachError *VolumeError + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + DetachError *VolumeError +} + +// Captures an error encountered during a volume operation. +type VolumeError struct { + // Time the error was encountered. + // +optional + Time metav1.Time + + // String detailing the error encountered during Attach or Detach operation. + // This string maybe logged, so it should not contain sensitive + // information. + // +optional + Message string +} + +// VolumeBindingMode indicates how PersistentVolumeClaims should be bound. +type VolumeBindingMode string + +const ( + // VolumeBindingImmediate indicates that PersistentVolumeClaims should be + // immediately provisioned and bound. + VolumeBindingImmediate VolumeBindingMode = "Immediate" + + // VolumeBindingWaitForFirstConsumer indicates that PersistentVolumeClaims + // should not be provisioned and bound until the first Pod is created that + // references the PeristentVolumeClaim. The volume provisioning and + // binding will occur during Pod scheduing. + VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer" +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/util/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/storage/util/BUILD index 451f7221a1f8..490e83721491 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/util/BUILD @@ -3,12 +3,22 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( name = "go_default_library", - srcs = ["helpers.go"], - deps = ["//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"], + srcs = [ + "helpers.go", + "util.go", + ], + importpath = "k8s.io/kubernetes/pkg/apis/storage/util", + deps = [ + "//pkg/apis/storage:go_default_library", + "//pkg/features:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + ], ) filegroup( @@ -23,3 +33,14 @@ filegroup( srcs = [":package-srcs"], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = ["util_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/storage/util", + library = ":go_default_library", + deps = [ + "//pkg/apis/storage:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/util/util.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/util/util.go new file mode 100644 index 000000000000..9d1340435635 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/util/util.go @@ -0,0 +1,30 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/apis/storage" + "k8s.io/kubernetes/pkg/features" +) + +// DropDisabledAlphaFields removes disabled fields from the StorageClass object. +func DropDisabledAlphaFields(class *storage.StorageClass) { + if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + class.VolumeBindingMode = nil + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/BUILD index fc6190e1a6e4..aeb7e17d2a77 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/BUILD @@ -3,6 +3,7 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( @@ -14,14 +15,17 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/storage/v1", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/storage:go_default_library", + "//pkg/features:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) @@ -40,3 +44,16 @@ filegroup( ], tags = ["automanaged"], ) + +go_test( + name = "go_default_xtest", + srcs = ["defaults_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/storage/v1_test", + deps = [ + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/storage/install:go_default_library", + "//vendor/k8s.io/api/storage/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/defaults.go index 2e7c51c632e2..6f574f4867cb 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/defaults.go @@ -20,6 +20,8 @@ import ( "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/runtime" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { @@ -31,4 +33,9 @@ func SetDefaults_StorageClass(obj *storagev1.StorageClass) { obj.ReclaimPolicy = new(v1.PersistentVolumeReclaimPolicy) *obj.ReclaimPolicy = v1.PersistentVolumeReclaimDelete } + + if obj.VolumeBindingMode == nil && utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + obj.VolumeBindingMode = new(storagev1.VolumeBindingMode) + *obj.VolumeBindingMode = storagev1.VolumeBindingImmediate + } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/doc.go index 3a5b7ac2dd16..617aa14c1aa0 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/storage -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/storage/v1 +// +k8s:conversion-gen-external-types=k8s.io/api/storage/v1 // +groupName=storage.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/storage/v1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go index 59594d265194..1e63af36e67a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/zz_generated.conversion.go @@ -25,7 +25,7 @@ import ( v1 "k8s.io/api/storage/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" storage "k8s.io/kubernetes/pkg/apis/storage" unsafe "unsafe" ) @@ -49,9 +49,10 @@ func autoConvert_v1_StorageClass_To_storage_StorageClass(in *v1.StorageClass, ou out.ObjectMeta = in.ObjectMeta out.Provisioner = in.Provisioner out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters)) - out.ReclaimPolicy = (*api.PersistentVolumeReclaimPolicy)(unsafe.Pointer(in.ReclaimPolicy)) + out.ReclaimPolicy = (*core.PersistentVolumeReclaimPolicy)(unsafe.Pointer(in.ReclaimPolicy)) out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) out.AllowVolumeExpansion = (*bool)(unsafe.Pointer(in.AllowVolumeExpansion)) + out.VolumeBindingMode = (*storage.VolumeBindingMode)(unsafe.Pointer(in.VolumeBindingMode)) return nil } @@ -67,6 +68,7 @@ func autoConvert_storage_StorageClass_To_v1_StorageClass(in *storage.StorageClas out.ReclaimPolicy = (*core_v1.PersistentVolumeReclaimPolicy)(unsafe.Pointer(in.ReclaimPolicy)) out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) out.AllowVolumeExpansion = (*bool)(unsafe.Pointer(in.AllowVolumeExpansion)) + out.VolumeBindingMode = (*v1.VolumeBindingMode)(unsafe.Pointer(in.VolumeBindingMode)) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/BUILD new file mode 100644 index 000000000000..6617d6a97ea9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + "zz_generated.conversion.go", + "zz_generated.defaults.go", + ], + importpath = "k8s.io/kubernetes/pkg/apis/storage/v1alpha1", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/storage:go_default_library", + "//vendor/k8s.io/api/storage/v1alpha1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/doc.go new file mode 100644 index 000000000000..47670b19883d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/storage +// +k8s:conversion-gen-external-types=k8s.io/api/storage/v1alpha1 +// +groupName=storage.k8s.io +// +k8s:defaulter-gen=TypeMeta +// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/storage/v1alpha1 +package v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/register.go new file mode 100644 index 000000000000..699fab12d5be --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/register.go @@ -0,0 +1,38 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + storagev1alpha1 "k8s.io/api/storage/v1alpha1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "storage.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &storagev1alpha1.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/zz_generated.conversion.go new file mode 100644 index 000000000000..9bea7b3ad91f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,202 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/storage/v1alpha1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + storage "k8s.io/kubernetes/pkg/apis/storage" + unsafe "unsafe" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_VolumeAttachment_To_storage_VolumeAttachment, + Convert_storage_VolumeAttachment_To_v1alpha1_VolumeAttachment, + Convert_v1alpha1_VolumeAttachmentList_To_storage_VolumeAttachmentList, + Convert_storage_VolumeAttachmentList_To_v1alpha1_VolumeAttachmentList, + Convert_v1alpha1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource, + Convert_storage_VolumeAttachmentSource_To_v1alpha1_VolumeAttachmentSource, + Convert_v1alpha1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec, + Convert_storage_VolumeAttachmentSpec_To_v1alpha1_VolumeAttachmentSpec, + Convert_v1alpha1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus, + Convert_storage_VolumeAttachmentStatus_To_v1alpha1_VolumeAttachmentStatus, + Convert_v1alpha1_VolumeError_To_storage_VolumeError, + Convert_storage_VolumeError_To_v1alpha1_VolumeError, + ) +} + +func autoConvert_v1alpha1_VolumeAttachment_To_storage_VolumeAttachment(in *v1alpha1.VolumeAttachment, out *storage.VolumeAttachment, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1alpha1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_VolumeAttachment_To_storage_VolumeAttachment is an autogenerated conversion function. +func Convert_v1alpha1_VolumeAttachment_To_storage_VolumeAttachment(in *v1alpha1.VolumeAttachment, out *storage.VolumeAttachment, s conversion.Scope) error { + return autoConvert_v1alpha1_VolumeAttachment_To_storage_VolumeAttachment(in, out, s) +} + +func autoConvert_storage_VolumeAttachment_To_v1alpha1_VolumeAttachment(in *storage.VolumeAttachment, out *v1alpha1.VolumeAttachment, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_storage_VolumeAttachmentSpec_To_v1alpha1_VolumeAttachmentSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_storage_VolumeAttachmentStatus_To_v1alpha1_VolumeAttachmentStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_storage_VolumeAttachment_To_v1alpha1_VolumeAttachment is an autogenerated conversion function. +func Convert_storage_VolumeAttachment_To_v1alpha1_VolumeAttachment(in *storage.VolumeAttachment, out *v1alpha1.VolumeAttachment, s conversion.Scope) error { + return autoConvert_storage_VolumeAttachment_To_v1alpha1_VolumeAttachment(in, out, s) +} + +func autoConvert_v1alpha1_VolumeAttachmentList_To_storage_VolumeAttachmentList(in *v1alpha1.VolumeAttachmentList, out *storage.VolumeAttachmentList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]storage.VolumeAttachment)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_VolumeAttachmentList_To_storage_VolumeAttachmentList is an autogenerated conversion function. +func Convert_v1alpha1_VolumeAttachmentList_To_storage_VolumeAttachmentList(in *v1alpha1.VolumeAttachmentList, out *storage.VolumeAttachmentList, s conversion.Scope) error { + return autoConvert_v1alpha1_VolumeAttachmentList_To_storage_VolumeAttachmentList(in, out, s) +} + +func autoConvert_storage_VolumeAttachmentList_To_v1alpha1_VolumeAttachmentList(in *storage.VolumeAttachmentList, out *v1alpha1.VolumeAttachmentList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha1.VolumeAttachment)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_storage_VolumeAttachmentList_To_v1alpha1_VolumeAttachmentList is an autogenerated conversion function. +func Convert_storage_VolumeAttachmentList_To_v1alpha1_VolumeAttachmentList(in *storage.VolumeAttachmentList, out *v1alpha1.VolumeAttachmentList, s conversion.Scope) error { + return autoConvert_storage_VolumeAttachmentList_To_v1alpha1_VolumeAttachmentList(in, out, s) +} + +func autoConvert_v1alpha1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(in *v1alpha1.VolumeAttachmentSource, out *storage.VolumeAttachmentSource, s conversion.Scope) error { + out.PersistentVolumeName = (*string)(unsafe.Pointer(in.PersistentVolumeName)) + return nil +} + +// Convert_v1alpha1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource is an autogenerated conversion function. +func Convert_v1alpha1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(in *v1alpha1.VolumeAttachmentSource, out *storage.VolumeAttachmentSource, s conversion.Scope) error { + return autoConvert_v1alpha1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(in, out, s) +} + +func autoConvert_storage_VolumeAttachmentSource_To_v1alpha1_VolumeAttachmentSource(in *storage.VolumeAttachmentSource, out *v1alpha1.VolumeAttachmentSource, s conversion.Scope) error { + out.PersistentVolumeName = (*string)(unsafe.Pointer(in.PersistentVolumeName)) + return nil +} + +// Convert_storage_VolumeAttachmentSource_To_v1alpha1_VolumeAttachmentSource is an autogenerated conversion function. +func Convert_storage_VolumeAttachmentSource_To_v1alpha1_VolumeAttachmentSource(in *storage.VolumeAttachmentSource, out *v1alpha1.VolumeAttachmentSource, s conversion.Scope) error { + return autoConvert_storage_VolumeAttachmentSource_To_v1alpha1_VolumeAttachmentSource(in, out, s) +} + +func autoConvert_v1alpha1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(in *v1alpha1.VolumeAttachmentSpec, out *storage.VolumeAttachmentSpec, s conversion.Scope) error { + out.Attacher = in.Attacher + if err := Convert_v1alpha1_VolumeAttachmentSource_To_storage_VolumeAttachmentSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.NodeName = in.NodeName + return nil +} + +// Convert_v1alpha1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec is an autogenerated conversion function. +func Convert_v1alpha1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(in *v1alpha1.VolumeAttachmentSpec, out *storage.VolumeAttachmentSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_VolumeAttachmentSpec_To_storage_VolumeAttachmentSpec(in, out, s) +} + +func autoConvert_storage_VolumeAttachmentSpec_To_v1alpha1_VolumeAttachmentSpec(in *storage.VolumeAttachmentSpec, out *v1alpha1.VolumeAttachmentSpec, s conversion.Scope) error { + out.Attacher = in.Attacher + if err := Convert_storage_VolumeAttachmentSource_To_v1alpha1_VolumeAttachmentSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.NodeName = in.NodeName + return nil +} + +// Convert_storage_VolumeAttachmentSpec_To_v1alpha1_VolumeAttachmentSpec is an autogenerated conversion function. +func Convert_storage_VolumeAttachmentSpec_To_v1alpha1_VolumeAttachmentSpec(in *storage.VolumeAttachmentSpec, out *v1alpha1.VolumeAttachmentSpec, s conversion.Scope) error { + return autoConvert_storage_VolumeAttachmentSpec_To_v1alpha1_VolumeAttachmentSpec(in, out, s) +} + +func autoConvert_v1alpha1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(in *v1alpha1.VolumeAttachmentStatus, out *storage.VolumeAttachmentStatus, s conversion.Scope) error { + out.Attached = in.Attached + out.AttachmentMetadata = *(*map[string]string)(unsafe.Pointer(&in.AttachmentMetadata)) + out.AttachError = (*storage.VolumeError)(unsafe.Pointer(in.AttachError)) + out.DetachError = (*storage.VolumeError)(unsafe.Pointer(in.DetachError)) + return nil +} + +// Convert_v1alpha1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus is an autogenerated conversion function. +func Convert_v1alpha1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(in *v1alpha1.VolumeAttachmentStatus, out *storage.VolumeAttachmentStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_VolumeAttachmentStatus_To_storage_VolumeAttachmentStatus(in, out, s) +} + +func autoConvert_storage_VolumeAttachmentStatus_To_v1alpha1_VolumeAttachmentStatus(in *storage.VolumeAttachmentStatus, out *v1alpha1.VolumeAttachmentStatus, s conversion.Scope) error { + out.Attached = in.Attached + out.AttachmentMetadata = *(*map[string]string)(unsafe.Pointer(&in.AttachmentMetadata)) + out.AttachError = (*v1alpha1.VolumeError)(unsafe.Pointer(in.AttachError)) + out.DetachError = (*v1alpha1.VolumeError)(unsafe.Pointer(in.DetachError)) + return nil +} + +// Convert_storage_VolumeAttachmentStatus_To_v1alpha1_VolumeAttachmentStatus is an autogenerated conversion function. +func Convert_storage_VolumeAttachmentStatus_To_v1alpha1_VolumeAttachmentStatus(in *storage.VolumeAttachmentStatus, out *v1alpha1.VolumeAttachmentStatus, s conversion.Scope) error { + return autoConvert_storage_VolumeAttachmentStatus_To_v1alpha1_VolumeAttachmentStatus(in, out, s) +} + +func autoConvert_v1alpha1_VolumeError_To_storage_VolumeError(in *v1alpha1.VolumeError, out *storage.VolumeError, s conversion.Scope) error { + out.Time = in.Time + out.Message = in.Message + return nil +} + +// Convert_v1alpha1_VolumeError_To_storage_VolumeError is an autogenerated conversion function. +func Convert_v1alpha1_VolumeError_To_storage_VolumeError(in *v1alpha1.VolumeError, out *storage.VolumeError, s conversion.Scope) error { + return autoConvert_v1alpha1_VolumeError_To_storage_VolumeError(in, out, s) +} + +func autoConvert_storage_VolumeError_To_v1alpha1_VolumeError(in *storage.VolumeError, out *v1alpha1.VolumeError, s conversion.Scope) error { + out.Time = in.Time + out.Message = in.Message + return nil +} + +// Convert_storage_VolumeError_To_v1alpha1_VolumeError is an autogenerated conversion function. +func Convert_storage_VolumeError_To_v1alpha1_VolumeError(in *storage.VolumeError, out *v1alpha1.VolumeError, s conversion.Scope) error { + return autoConvert_storage_VolumeError_To_v1alpha1_VolumeError(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/zz_generated.defaults.go new file mode 100644 index 000000000000..7e6df29d4ae5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/BUILD index f0ad28c7ada2..368302812eb4 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/BUILD @@ -3,6 +3,7 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( @@ -14,14 +15,17 @@ go_library( "zz_generated.conversion.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/pkg/apis/storage/v1beta1", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/storage:go_default_library", + "//pkg/features:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/storage/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) @@ -40,3 +44,16 @@ filegroup( ], tags = ["automanaged"], ) + +go_test( + name = "go_default_xtest", + srcs = ["defaults_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/storage/v1beta1_test", + deps = [ + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/storage/install:go_default_library", + "//vendor/k8s.io/api/storage/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/defaults.go index e50599bf273d..97dbff2f3786 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/defaults.go @@ -20,6 +20,8 @@ import ( "k8s.io/api/core/v1" storagev1beta1 "k8s.io/api/storage/v1beta1" "k8s.io/apimachinery/pkg/runtime" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" ) func addDefaultingFuncs(scheme *runtime.Scheme) error { @@ -31,4 +33,9 @@ func SetDefaults_StorageClass(obj *storagev1beta1.StorageClass) { obj.ReclaimPolicy = new(v1.PersistentVolumeReclaimPolicy) *obj.ReclaimPolicy = v1.PersistentVolumeReclaimDelete } + + if obj.VolumeBindingMode == nil && utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + obj.VolumeBindingMode = new(storagev1beta1.VolumeBindingMode) + *obj.VolumeBindingMode = storagev1beta1.VolumeBindingImmediate + } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/doc.go index be39d6eb2871..0301f751664d 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/storage -// +k8s:conversion-gen-external-types=../../../../vendor/k8s.io/api/storage/v1beta1 +// +k8s:conversion-gen-external-types=k8s.io/api/storage/v1beta1 // +groupName=storage.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/storage/v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/zz_generated.conversion.go index d340530abbc5..9637b2ea494b 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/zz_generated.conversion.go @@ -25,7 +25,7 @@ import ( v1beta1 "k8s.io/api/storage/v1beta1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" storage "k8s.io/kubernetes/pkg/apis/storage" unsafe "unsafe" ) @@ -49,9 +49,10 @@ func autoConvert_v1beta1_StorageClass_To_storage_StorageClass(in *v1beta1.Storag out.ObjectMeta = in.ObjectMeta out.Provisioner = in.Provisioner out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters)) - out.ReclaimPolicy = (*api.PersistentVolumeReclaimPolicy)(unsafe.Pointer(in.ReclaimPolicy)) + out.ReclaimPolicy = (*core.PersistentVolumeReclaimPolicy)(unsafe.Pointer(in.ReclaimPolicy)) out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) out.AllowVolumeExpansion = (*bool)(unsafe.Pointer(in.AllowVolumeExpansion)) + out.VolumeBindingMode = (*storage.VolumeBindingMode)(unsafe.Pointer(in.VolumeBindingMode)) return nil } @@ -67,6 +68,7 @@ func autoConvert_storage_StorageClass_To_v1beta1_StorageClass(in *storage.Storag out.ReclaimPolicy = (*v1.PersistentVolumeReclaimPolicy)(unsafe.Pointer(in.ReclaimPolicy)) out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) out.AllowVolumeExpansion = (*bool)(unsafe.Pointer(in.AllowVolumeExpansion)) + out.VolumeBindingMode = (*v1beta1.VolumeBindingMode)(unsafe.Pointer(in.VolumeBindingMode)) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/apis/storage/validation/BUILD index cdb70b259add..e80f7838d6bf 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/validation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/validation/BUILD @@ -9,11 +9,13 @@ load( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/pkg/apis/storage/validation", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//pkg/apis/storage:go_default_library", "//pkg/features:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", @@ -24,9 +26,10 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/kubernetes/pkg/apis/storage/validation", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/storage:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/validation/validation.go index 38b7915167e5..e9f66e938e8f 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/validation/validation.go @@ -20,16 +20,25 @@ import ( "reflect" "strings" + apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/api" - apivalidation "k8s.io/kubernetes/pkg/api/validation" + api "k8s.io/kubernetes/pkg/apis/core" + apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/apis/storage" "k8s.io/kubernetes/pkg/features" ) +const ( + maxProvisionerParameterSize = 256 * (1 << 10) // 256 kB + maxProvisionerParameterLen = 512 + + maxAttachedVolumeMetadataSize = 256 * (1 << 10) // 256 kB + maxVolumeErrorMessageSize = 1024 +) + // ValidateStorageClass validates a StorageClass. func ValidateStorageClass(storageClass *storage.StorageClass) field.ErrorList { allErrs := apivalidation.ValidateObjectMeta(&storageClass.ObjectMeta, false, apivalidation.ValidateClassName, field.NewPath("metadata")) @@ -37,6 +46,7 @@ func ValidateStorageClass(storageClass *storage.StorageClass) field.ErrorList { allErrs = append(allErrs, validateParameters(storageClass.Parameters, field.NewPath("parameters"))...) allErrs = append(allErrs, validateReclaimPolicy(storageClass.ReclaimPolicy, field.NewPath("reclaimPolicy"))...) allErrs = append(allErrs, validateAllowVolumeExpansion(storageClass.AllowVolumeExpansion, field.NewPath("allowVolumeExpansion"))...) + allErrs = append(allErrs, validateVolumeBindingMode(storageClass.VolumeBindingMode, field.NewPath("volumeBindingMode"))...) return allErrs } @@ -55,6 +65,8 @@ func ValidateStorageClassUpdate(storageClass, oldStorageClass *storage.StorageCl if *storageClass.ReclaimPolicy != *oldStorageClass.ReclaimPolicy { allErrs = append(allErrs, field.Forbidden(field.NewPath("reclaimPolicy"), "updates to reclaimPolicy are forbidden.")) } + + allErrs = append(allErrs, apivalidation.ValidateImmutableField(storageClass.VolumeBindingMode, oldStorageClass.VolumeBindingMode, field.NewPath("volumeBindingMode"))...) return allErrs } @@ -72,9 +84,6 @@ func validateProvisioner(provisioner string, fldPath *field.Path) field.ErrorLis return allErrs } -const maxProvisionerParameterSize = 256 * (1 << 10) // 256 kB -const maxProvisionerParameterLen = 512 - // validateParameters tests that keys are qualified names and that provisionerParameter are < 256kB. func validateParameters(params map[string]string, fldPath *field.Path) field.ErrorList { var totalSize int64 @@ -121,3 +130,112 @@ func validateAllowVolumeExpansion(allowExpand *bool, fldPath *field.Path) field. } return allErrs } + +// ValidateVolumeAttachment validates a VolumeAttachment. +func ValidateVolumeAttachment(volumeAttachment *storage.VolumeAttachment) field.ErrorList { + allErrs := apivalidation.ValidateObjectMeta(&volumeAttachment.ObjectMeta, false, apivalidation.ValidateClassName, field.NewPath("metadata")) + allErrs = append(allErrs, validateVolumeAttachmentSpec(&volumeAttachment.Spec, field.NewPath("spec"))...) + allErrs = append(allErrs, validateVolumeAttachmentStatus(&volumeAttachment.Status, field.NewPath("status"))...) + return allErrs +} + +// ValidateVolumeAttachmentSpec tests that the specified VolumeAttachmentSpec +// has valid data. +func validateVolumeAttachmentSpec( + spec *storage.VolumeAttachmentSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, validateAttacher(spec.Attacher, fldPath.Child("attacher"))...) + allErrs = append(allErrs, validateVolumeAttachmentSource(&spec.Source, fldPath.Child("source"))...) + allErrs = append(allErrs, validateNodeName(spec.NodeName, fldPath.Child("nodeName"))...) + return allErrs +} + +// validateAttacher tests if attacher is a valid qualified name. +func validateAttacher(attacher string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(attacher) == 0 { + allErrs = append(allErrs, field.Required(fldPath, attacher)) + } + return allErrs +} + +// validateSource tests if the source is valid for VolumeAttachment. +func validateVolumeAttachmentSource(source *storage.VolumeAttachmentSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if source.PersistentVolumeName == nil || len(*source.PersistentVolumeName) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "")) + } + return allErrs +} + +// validateNodeName tests if the nodeName is valid for VolumeAttachment. +func validateNodeName(nodeName string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range apivalidation.ValidateNodeName(nodeName, false /* prefix */) { + allErrs = append(allErrs, field.Invalid(fldPath, nodeName, msg)) + } + return allErrs +} + +// validaVolumeAttachmentStatus tests if volumeAttachmentStatus is valid. +func validateVolumeAttachmentStatus(status *storage.VolumeAttachmentStatus, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, validateAttachmentMetadata(status.AttachmentMetadata, fldPath.Child("attachmentMetadata"))...) + allErrs = append(allErrs, validateVolumeError(status.AttachError, fldPath.Child("attachError"))...) + allErrs = append(allErrs, validateVolumeError(status.DetachError, fldPath.Child("detachError"))...) + return allErrs +} + +func validateAttachmentMetadata(metadata map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + var size int64 + for k, v := range metadata { + size += (int64)(len(k)) + (int64)(len(v)) + } + if size > maxAttachedVolumeMetadataSize { + allErrs = append(allErrs, field.TooLong(fldPath, metadata, maxAttachedVolumeMetadataSize)) + } + return allErrs +} + +func validateVolumeError(e *storage.VolumeError, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if e == nil { + return allErrs + } + if len(e.Message) > maxVolumeErrorMessageSize { + allErrs = append(allErrs, field.TooLong(fldPath.Child("message"), e.Message, maxAttachedVolumeMetadataSize)) + } + return allErrs +} + +// ValidateVolumeAttachmentUpdate validates a VolumeAttachment. +func ValidateVolumeAttachmentUpdate(new, old *storage.VolumeAttachment) field.ErrorList { + allErrs := ValidateVolumeAttachment(new) + + // Spec is read-only + if !apiequality.Semantic.DeepEqual(old.Spec, new.Spec) { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec"), new.Spec, "field is immutable")) + } + return allErrs +} + +var supportedVolumeBindingModes = sets.NewString(string(storage.VolumeBindingImmediate), string(storage.VolumeBindingWaitForFirstConsumer)) + +// validateVolumeBindingMode tests that VolumeBindingMode specifies valid values. +func validateVolumeBindingMode(mode *storage.VolumeBindingMode, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + if mode == nil { + allErrs = append(allErrs, field.Required(fldPath, "")) + } else if !supportedVolumeBindingModes.Has(string(*mode)) { + allErrs = append(allErrs, field.NotSupported(fldPath, mode, supportedVolumeBindingModes.List())) + } + } else if mode != nil { + allErrs = append(allErrs, field.Forbidden(fldPath, "field is disabled by feature-gate VolumeScheduling")) + } + + return allErrs +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/storage/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/storage/zz_generated.deepcopy.go index b6564ce03ad5..287623fd4260 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/storage/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/storage/zz_generated.deepcopy.go @@ -21,33 +21,10 @@ limitations under the License. package storage import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" - reflect "reflect" + core "k8s.io/kubernetes/pkg/apis/core" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageClass).DeepCopyInto(out.(*StorageClass)) - return nil - }, InType: reflect.TypeOf(&StorageClass{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageClassList).DeepCopyInto(out.(*StorageClassList)) - return nil - }, InType: reflect.TypeOf(&StorageClassList{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageClass) DeepCopyInto(out *StorageClass) { *out = *in @@ -65,7 +42,7 @@ func (in *StorageClass) DeepCopyInto(out *StorageClass) { if *in == nil { *out = nil } else { - *out = new(api.PersistentVolumeReclaimPolicy) + *out = new(core.PersistentVolumeReclaimPolicy) **out = **in } } @@ -83,6 +60,15 @@ func (in *StorageClass) DeepCopyInto(out *StorageClass) { **out = **in } } + if in.VolumeBindingMode != nil { + in, out := &in.VolumeBindingMode, &out.VolumeBindingMode + if *in == nil { + *out = nil + } else { + *out = new(VolumeBindingMode) + **out = **in + } + } return } @@ -138,3 +124,166 @@ func (in *StorageClassList) DeepCopyObject() runtime.Object { return nil } } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment. +func (in *VolumeAttachment) DeepCopy() *VolumeAttachment { + if in == nil { + return nil + } + out := new(VolumeAttachment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeAttachment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList. +func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList { + if in == nil { + return nil + } + out := new(VolumeAttachmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { + *out = *in + if in.PersistentVolumeName != nil { + in, out := &in.PersistentVolumeName, &out.PersistentVolumeName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource. +func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource { + if in == nil { + return nil + } + out := new(VolumeAttachmentSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec. +func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec { + if in == nil { + return nil + } + out := new(VolumeAttachmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) { + *out = *in + if in.AttachmentMetadata != nil { + in, out := &in.AttachmentMetadata, &out.AttachmentMetadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AttachError != nil { + in, out := &in.AttachError, &out.AttachError + if *in == nil { + *out = nil + } else { + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + } + if in.DetachError != nil { + in, out := &in.DetachError, &out.DetachError + if *in == nil { + *out = nil + } else { + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus. +func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { + if in == nil { + return nil + } + out := new(VolumeAttachmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeError) DeepCopyInto(out *VolumeError) { + *out = *in + in.Time.DeepCopyInto(&out.Time) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError. +func (in *VolumeError) DeepCopy() *VolumeError { + if in == nil { + return nil + } + out := new(VolumeError) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/pkg/auth/authorizer/abac/BUILD b/vendor/k8s.io/kubernetes/pkg/auth/authorizer/abac/BUILD index 78bbcb369705..efbea8e25685 100644 --- a/vendor/k8s.io/kubernetes/pkg/auth/authorizer/abac/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/auth/authorizer/abac/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["abac.go"], + importpath = "k8s.io/kubernetes/pkg/auth/authorizer/abac", deps = [ "//pkg/apis/abac:go_default_library", "//pkg/apis/abac/latest:go_default_library", @@ -34,6 +35,7 @@ go_test( data = [ ":example_policy", ], + importpath = "k8s.io/kubernetes/pkg/auth/authorizer/abac", library = ":go_default_library", deps = [ "//pkg/apis/abac:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/auth/authorizer/abac/abac.go b/vendor/k8s.io/kubernetes/pkg/auth/authorizer/abac/abac.go index 5e56c19ba9c7..f3132740d2d3 100644 --- a/vendor/k8s.io/kubernetes/pkg/auth/authorizer/abac/abac.go +++ b/vendor/k8s.io/kubernetes/pkg/auth/authorizer/abac/abac.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" - api "k8s.io/kubernetes/pkg/apis/abac" + "k8s.io/kubernetes/pkg/apis/abac" _ "k8s.io/kubernetes/pkg/apis/abac/latest" "k8s.io/kubernetes/pkg/apis/abac/v0" ) @@ -49,7 +49,7 @@ func (p policyLoadError) Error() string { return fmt.Sprintf("error reading policy file %s: %v", p.path, p.err) } -type policyList []*api.Policy +type policyList []*abac.Policy // TODO: Have policies be created via an API call and stored in REST storage. func NewFromFile(path string) (policyList, error) { @@ -64,13 +64,13 @@ func NewFromFile(path string) (policyList, error) { scanner := bufio.NewScanner(file) pl := make(policyList, 0) - decoder := api.Codecs.UniversalDecoder() + decoder := abac.Codecs.UniversalDecoder() i := 0 unversionedLines := 0 for scanner.Scan() { i++ - p := &api.Policy{} + p := &abac.Policy{} b := scanner.Bytes() // skip comment lines and blank lines @@ -90,14 +90,14 @@ func NewFromFile(path string) (policyList, error) { if err := runtime.DecodeInto(decoder, b, oldPolicy); err != nil { return nil, policyLoadError{path, i, b, err} } - if err := api.Scheme.Convert(oldPolicy, p, nil); err != nil { + if err := abac.Scheme.Convert(oldPolicy, p, nil); err != nil { return nil, policyLoadError{path, i, b, err} } pl = append(pl, p) continue } - decodedPolicy, ok := decodedObj.(*api.Policy) + decodedPolicy, ok := decodedObj.(*abac.Policy) if !ok { return nil, policyLoadError{path, i, b, fmt.Errorf("unrecognized object: %#v", decodedObj)} } @@ -114,7 +114,7 @@ func NewFromFile(path string) (policyList, error) { return pl, nil } -func matches(p api.Policy, a authorizer.Attributes) bool { +func matches(p abac.Policy, a authorizer.Attributes) bool { if subjectMatches(p, a.GetUser()) { if verbMatches(p, a) { // Resource and non-resource requests are mutually exclusive, at most one will match a policy @@ -130,7 +130,7 @@ func matches(p api.Policy, a authorizer.Attributes) bool { } // subjectMatches returns true if specified user and group properties in the policy match the attributes -func subjectMatches(p api.Policy, user user.Info) bool { +func subjectMatches(p abac.Policy, user user.Info) bool { matched := false if user == nil { @@ -171,7 +171,7 @@ func subjectMatches(p api.Policy, user user.Info) bool { return matched } -func verbMatches(p api.Policy, a authorizer.Attributes) bool { +func verbMatches(p abac.Policy, a authorizer.Attributes) bool { // TODO: match on verb // All policies allow read only requests @@ -187,7 +187,7 @@ func verbMatches(p api.Policy, a authorizer.Attributes) bool { return false } -func nonResourceMatches(p api.Policy, a authorizer.Attributes) bool { +func nonResourceMatches(p abac.Policy, a authorizer.Attributes) bool { // A non-resource policy cannot match a resource request if !a.IsResourceRequest() { // Allow wildcard match @@ -206,7 +206,7 @@ func nonResourceMatches(p api.Policy, a authorizer.Attributes) bool { return false } -func resourceMatches(p api.Policy, a authorizer.Attributes) bool { +func resourceMatches(p abac.Policy, a authorizer.Attributes) bool { // A resource policy cannot match a non-resource request if a.IsResourceRequest() { if p.Spec.Namespace == "*" || p.Spec.Namespace == a.GetNamespace() { @@ -221,13 +221,13 @@ func resourceMatches(p api.Policy, a authorizer.Attributes) bool { } // Authorizer implements authorizer.Authorize -func (pl policyList) Authorize(a authorizer.Attributes) (bool, string, error) { +func (pl policyList) Authorize(a authorizer.Attributes) (authorizer.Decision, string, error) { for _, p := range pl { if matches(*p, a) { - return true, "", nil + return authorizer.DecisionAllow, "", nil } } - return false, "No policy matched.", nil + return authorizer.DecisionNoOpinion, "No policy matched.", nil // TODO: Benchmark how much time policy matching takes with a medium size // policy file, compared to other steps such as encoding/decoding. // Then, add Caching only if needed. diff --git a/vendor/k8s.io/kubernetes/pkg/auth/nodeidentifier/BUILD b/vendor/k8s.io/kubernetes/pkg/auth/nodeidentifier/BUILD index 84e7f09fd5a5..f51a1dc61196 100644 --- a/vendor/k8s.io/kubernetes/pkg/auth/nodeidentifier/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/auth/nodeidentifier/BUILD @@ -12,12 +12,14 @@ go_library( "default.go", "interfaces.go", ], + importpath = "k8s.io/kubernetes/pkg/auth/nodeidentifier", deps = ["//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library"], ) go_test( name = "go_default_test", srcs = ["default_test.go"], + importpath = "k8s.io/kubernetes/pkg/auth/nodeidentifier", library = ":go_default_library", deps = ["//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/auth/nodeidentifier/interfaces.go b/vendor/k8s.io/kubernetes/pkg/auth/nodeidentifier/interfaces.go index 917bebaf9d9e..df10a88a8408 100644 --- a/vendor/k8s.io/kubernetes/pkg/auth/nodeidentifier/interfaces.go +++ b/vendor/k8s.io/kubernetes/pkg/auth/nodeidentifier/interfaces.go @@ -22,7 +22,7 @@ import ( // NodeIdentifier determines node information from a given user type NodeIdentifier interface { - // IdentifyNode determines node information from the given user.Info. + // NodeIdentity determines node information from the given user.Info. // nodeName is the name of the Node API object associated with the user.Info, // and may be empty if a specific node cannot be determined. // isNode is true if the user.Info represents an identity issued to a node. diff --git a/vendor/k8s.io/kubernetes/pkg/bootstrap/api/BUILD b/vendor/k8s.io/kubernetes/pkg/bootstrap/api/BUILD index db7be057bf6c..0beb8e733317 100644 --- a/vendor/k8s.io/kubernetes/pkg/bootstrap/api/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/bootstrap/api/BUILD @@ -13,7 +13,11 @@ go_library( "helpers.go", "types.go", ], - deps = ["//vendor/k8s.io/api/core/v1:go_default_library"], + importpath = "k8s.io/kubernetes/pkg/bootstrap/api", + deps = [ + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], ) filegroup( @@ -32,5 +36,6 @@ filegroup( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + importpath = "k8s.io/kubernetes/pkg/bootstrap/api", library = ":go_default_library", ) diff --git a/vendor/k8s.io/kubernetes/pkg/bootstrap/api/helpers.go b/vendor/k8s.io/kubernetes/pkg/bootstrap/api/helpers.go index 639d61a33c3f..01859bc37fe7 100644 --- a/vendor/k8s.io/kubernetes/pkg/bootstrap/api/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/bootstrap/api/helpers.go @@ -18,7 +18,9 @@ package api import ( "fmt" + "k8s.io/apimachinery/pkg/util/sets" "regexp" + "strings" ) var bootstrapGroupRegexp = regexp.MustCompile(`\A` + BootstrapGroupPattern + `\z`) @@ -32,3 +34,19 @@ func ValidateBootstrapGroupName(name string) error { } return fmt.Errorf("bootstrap group %q is invalid (must match %s)", name, BootstrapGroupPattern) } + +// ValidateUsages validates that the passed in string are valid usage strings for bootstrap tokens. +func ValidateUsages(usages []string) error { + usageAuthentication := strings.TrimPrefix(BootstrapTokenUsageAuthentication, BootstrapTokenUsagePrefix) + usageSigning := strings.TrimPrefix(BootstrapTokenUsageSigningKey, BootstrapTokenUsagePrefix) + invalidUsages := sets.NewString() + for _, usage := range usages { + if usage != usageAuthentication && usage != usageSigning { + invalidUsages.Insert(usage) + } + } + if len(invalidUsages) > 0 { + return fmt.Errorf("invalide bootstrap token usage string: %s, valid usage option: %s, %s", strings.Join(invalidUsages.List(), ","), usageAuthentication, usageSigning) + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/capabilities/BUILD b/vendor/k8s.io/kubernetes/pkg/capabilities/BUILD index 33b71a82397d..6ab539698cfe 100644 --- a/vendor/k8s.io/kubernetes/pkg/capabilities/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/capabilities/BUILD @@ -12,13 +12,14 @@ go_library( "capabilities.go", "doc.go", ], + importpath = "k8s.io/kubernetes/pkg/capabilities", ) go_test( name = "go_default_test", srcs = ["capabilities_test.go"], + importpath = "k8s.io/kubernetes/pkg/capabilities", library = ":go_default_library", - tags = ["automanaged"], ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/client/chaosclient/BUILD b/vendor/k8s.io/kubernetes/pkg/client/chaosclient/BUILD index 9c46e1269063..8ba5b300d302 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/chaosclient/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/chaosclient/BUILD @@ -9,12 +9,14 @@ load( go_library( name = "go_default_library", srcs = ["chaosclient.go"], + importpath = "k8s.io/kubernetes/pkg/client/chaosclient", deps = ["//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library"], ) go_test( name = "go_default_test", srcs = ["chaosclient_test.go"], + importpath = "k8s.io/kubernetes/pkg/client/chaosclient", library = ":go_default_library", ) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/BUILD index 09f1e0d3adf5..219323e9e5e5 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/BUILD @@ -11,6 +11,7 @@ go_library( "clientset.go", "doc.go", ], + importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset", deps = [ "//pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/apps/internalversion:go_default_library", @@ -20,6 +21,7 @@ go_library( "//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", + "//pkg/client/clientset_generated/internalclientset/typed/events/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/networking/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/policy/internalversion:go_default_library", @@ -55,6 +57,7 @@ filegroup( "//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:all-srcs", "//pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion:all-srcs", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:all-srcs", + "//pkg/client/clientset_generated/internalclientset/typed/events/internalversion:all-srcs", "//pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion:all-srcs", "//pkg/client/clientset_generated/internalclientset/typed/networking/internalversion:all-srcs", "//pkg/client/clientset_generated/internalclientset/typed/policy/internalversion:all-srcs", diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go index c7eb9d437caa..8c3b8a3cb621 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go @@ -29,6 +29,7 @@ import ( batchinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" certificatesinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion" coreinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" + eventsinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion" extensionsinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" networkinginternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion" policyinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion" @@ -48,6 +49,7 @@ type Interface interface { Autoscaling() autoscalinginternalversion.AutoscalingInterface Batch() batchinternalversion.BatchInterface Certificates() certificatesinternalversion.CertificatesInterface + Events() eventsinternalversion.EventsInterface Extensions() extensionsinternalversion.ExtensionsInterface Networking() networkinginternalversion.NetworkingInterface Policy() policyinternalversion.PolicyInterface @@ -69,6 +71,7 @@ type Clientset struct { autoscaling *autoscalinginternalversion.AutoscalingClient batch *batchinternalversion.BatchClient certificates *certificatesinternalversion.CertificatesClient + events *eventsinternalversion.EventsClient extensions *extensionsinternalversion.ExtensionsClient networking *networkinginternalversion.NetworkingClient policy *policyinternalversion.PolicyClient @@ -118,6 +121,11 @@ func (c *Clientset) Certificates() certificatesinternalversion.CertificatesInter return c.certificates } +// Events retrieves the EventsClient +func (c *Clientset) Events() eventsinternalversion.EventsInterface { + return c.events +} + // Extensions retrieves the ExtensionsClient func (c *Clientset) Extensions() extensionsinternalversion.ExtensionsInterface { return c.extensions @@ -201,6 +209,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.events, err = eventsinternalversion.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.extensions, err = extensionsinternalversion.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -250,6 +262,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.autoscaling = autoscalinginternalversion.NewForConfigOrDie(c) cs.batch = batchinternalversion.NewForConfigOrDie(c) cs.certificates = certificatesinternalversion.NewForConfigOrDie(c) + cs.events = eventsinternalversion.NewForConfigOrDie(c) cs.extensions = extensionsinternalversion.NewForConfigOrDie(c) cs.networking = networkinginternalversion.NewForConfigOrDie(c) cs.policy = policyinternalversion.NewForConfigOrDie(c) @@ -273,6 +286,7 @@ func New(c rest.Interface) *Clientset { cs.autoscaling = autoscalinginternalversion.New(c) cs.batch = batchinternalversion.New(c) cs.certificates = certificatesinternalversion.New(c) + cs.events = eventsinternalversion.New(c) cs.extensions = extensionsinternalversion.New(c) cs.networking = networkinginternalversion.New(c) cs.policy = policyinternalversion.New(c) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go index 6d5a0d8cb464..b667dd5157a5 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with the default arguments. - // This package has the automatically generated clientset. package internalclientset diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/BUILD index 0c3683d8a3fb..97e20c749e70 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/BUILD @@ -12,8 +12,8 @@ go_library( "register.go", "register_custom.go", ], + importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme", deps = [ - "//pkg/api/install:go_default_library", "//pkg/apis/admissionregistration/install:go_default_library", "//pkg/apis/apps/install:go_default_library", "//pkg/apis/authentication/install:go_default_library", @@ -22,6 +22,8 @@ go_library( "//pkg/apis/batch/install:go_default_library", "//pkg/apis/certificates/install:go_default_library", "//pkg/apis/componentconfig/install:go_default_library", + "//pkg/apis/core/install:go_default_library", + "//pkg/apis/events/install:go_default_library", "//pkg/apis/extensions/install:go_default_library", "//pkg/apis/networking/install:go_default_library", "//pkg/apis/policy/install:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/doc.go index 99b0eae18888..3ec2200d0990 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with the default arguments. - // This package contains the scheme of the automatically generated clientset. package scheme diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/register.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/register.go index 500d8da9fa23..e9ef85e38b3c 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/register.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/register.go @@ -23,7 +23,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" - core "k8s.io/kubernetes/pkg/api/install" admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration/install" apps "k8s.io/kubernetes/pkg/apis/apps/install" authentication "k8s.io/kubernetes/pkg/apis/authentication/install" @@ -31,6 +30,8 @@ import ( autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/install" batch "k8s.io/kubernetes/pkg/apis/batch/install" certificates "k8s.io/kubernetes/pkg/apis/certificates/install" + core "k8s.io/kubernetes/pkg/apis/core/install" + events "k8s.io/kubernetes/pkg/apis/events/install" extensions "k8s.io/kubernetes/pkg/apis/extensions/install" networking "k8s.io/kubernetes/pkg/apis/networking/install" policy "k8s.io/kubernetes/pkg/apis/policy/install" @@ -63,6 +64,7 @@ func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *r autoscaling.Install(groupFactoryRegistry, registry, scheme) batch.Install(groupFactoryRegistry, registry, scheme) certificates.Install(groupFactoryRegistry, registry, scheme) + events.Install(groupFactoryRegistry, registry, scheme) extensions.Install(groupFactoryRegistry, registry, scheme) networking.Install(groupFactoryRegistry, registry, scheme) policy.Install(groupFactoryRegistry, registry, scheme) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/BUILD index d7894dfa43fc..74333876b72f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/BUILD @@ -10,10 +10,12 @@ go_library( srcs = [ "admissionregistration_client.go", "doc.go", - "externaladmissionhookconfiguration.go", "generated_expansion.go", "initializerconfiguration.go", + "mutatingwebhookconfiguration.go", + "validatingwebhookconfiguration.go", ], + importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion", deps = [ "//pkg/apis/admissionregistration:go_default_library", "//pkg/client/clientset_generated/internalclientset/scheme:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/admissionregistration_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/admissionregistration_client.go index e64ad0d1375d..319d5ebd9944 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/admissionregistration_client.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/admissionregistration_client.go @@ -23,8 +23,9 @@ import ( type AdmissionregistrationInterface interface { RESTClient() rest.Interface - ExternalAdmissionHookConfigurationsGetter InitializerConfigurationsGetter + MutatingWebhookConfigurationsGetter + ValidatingWebhookConfigurationsGetter } // AdmissionregistrationClient is used to interact with features provided by the admissionregistration.k8s.io group. @@ -32,14 +33,18 @@ type AdmissionregistrationClient struct { restClient rest.Interface } -func (c *AdmissionregistrationClient) ExternalAdmissionHookConfigurations() ExternalAdmissionHookConfigurationInterface { - return newExternalAdmissionHookConfigurations(c) -} - func (c *AdmissionregistrationClient) InitializerConfigurations() InitializerConfigurationInterface { return newInitializerConfigurations(c) } +func (c *AdmissionregistrationClient) MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface { + return newMutatingWebhookConfigurations(c) +} + +func (c *AdmissionregistrationClient) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface { + return newValidatingWebhookConfigurations(c) +} + // NewForConfig creates a new AdmissionregistrationClient for the given config. func NewForConfig(c *rest.Config) (*AdmissionregistrationClient, error) { config := *c diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/doc.go index 0a27970fbb3b..3adf06d8934f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with the default arguments. - // This package has the automatically generated typed clients. package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/externaladmissionhookconfiguration.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/externaladmissionhookconfiguration.go deleted file mode 100644 index 09b2b383aa7b..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/externaladmissionhookconfiguration.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// ExternalAdmissionHookConfigurationsGetter has a method to return a ExternalAdmissionHookConfigurationInterface. -// A group's client should implement this interface. -type ExternalAdmissionHookConfigurationsGetter interface { - ExternalAdmissionHookConfigurations() ExternalAdmissionHookConfigurationInterface -} - -// ExternalAdmissionHookConfigurationInterface has methods to work with ExternalAdmissionHookConfiguration resources. -type ExternalAdmissionHookConfigurationInterface interface { - Create(*admissionregistration.ExternalAdmissionHookConfiguration) (*admissionregistration.ExternalAdmissionHookConfiguration, error) - Update(*admissionregistration.ExternalAdmissionHookConfiguration) (*admissionregistration.ExternalAdmissionHookConfiguration, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*admissionregistration.ExternalAdmissionHookConfiguration, error) - List(opts v1.ListOptions) (*admissionregistration.ExternalAdmissionHookConfigurationList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *admissionregistration.ExternalAdmissionHookConfiguration, err error) - ExternalAdmissionHookConfigurationExpansion -} - -// externalAdmissionHookConfigurations implements ExternalAdmissionHookConfigurationInterface -type externalAdmissionHookConfigurations struct { - client rest.Interface -} - -// newExternalAdmissionHookConfigurations returns a ExternalAdmissionHookConfigurations -func newExternalAdmissionHookConfigurations(c *AdmissionregistrationClient) *externalAdmissionHookConfigurations { - return &externalAdmissionHookConfigurations{ - client: c.RESTClient(), - } -} - -// Get takes name of the externalAdmissionHookConfiguration, and returns the corresponding externalAdmissionHookConfiguration object, and an error if there is any. -func (c *externalAdmissionHookConfigurations) Get(name string, options v1.GetOptions) (result *admissionregistration.ExternalAdmissionHookConfiguration, err error) { - result = &admissionregistration.ExternalAdmissionHookConfiguration{} - err = c.client.Get(). - Resource("externaladmissionhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ExternalAdmissionHookConfigurations that match those selectors. -func (c *externalAdmissionHookConfigurations) List(opts v1.ListOptions) (result *admissionregistration.ExternalAdmissionHookConfigurationList, err error) { - result = &admissionregistration.ExternalAdmissionHookConfigurationList{} - err = c.client.Get(). - Resource("externaladmissionhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested externalAdmissionHookConfigurations. -func (c *externalAdmissionHookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("externaladmissionhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a externalAdmissionHookConfiguration and creates it. Returns the server's representation of the externalAdmissionHookConfiguration, and an error, if there is any. -func (c *externalAdmissionHookConfigurations) Create(externalAdmissionHookConfiguration *admissionregistration.ExternalAdmissionHookConfiguration) (result *admissionregistration.ExternalAdmissionHookConfiguration, err error) { - result = &admissionregistration.ExternalAdmissionHookConfiguration{} - err = c.client.Post(). - Resource("externaladmissionhookconfigurations"). - Body(externalAdmissionHookConfiguration). - Do(). - Into(result) - return -} - -// Update takes the representation of a externalAdmissionHookConfiguration and updates it. Returns the server's representation of the externalAdmissionHookConfiguration, and an error, if there is any. -func (c *externalAdmissionHookConfigurations) Update(externalAdmissionHookConfiguration *admissionregistration.ExternalAdmissionHookConfiguration) (result *admissionregistration.ExternalAdmissionHookConfiguration, err error) { - result = &admissionregistration.ExternalAdmissionHookConfiguration{} - err = c.client.Put(). - Resource("externaladmissionhookconfigurations"). - Name(externalAdmissionHookConfiguration.Name). - Body(externalAdmissionHookConfiguration). - Do(). - Into(result) - return -} - -// Delete takes name of the externalAdmissionHookConfiguration and deletes it. Returns an error if one occurs. -func (c *externalAdmissionHookConfigurations) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("externaladmissionhookconfigurations"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *externalAdmissionHookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("externaladmissionhookconfigurations"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched externalAdmissionHookConfiguration. -func (c *externalAdmissionHookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *admissionregistration.ExternalAdmissionHookConfiguration, err error) { - result = &admissionregistration.ExternalAdmissionHookConfiguration{} - err = c.client.Patch(pt). - Resource("externaladmissionhookconfigurations"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/generated_expansion.go index 631817dea3f7..d41780a7a668 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/generated_expansion.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/generated_expansion.go @@ -16,6 +16,8 @@ limitations under the License. package internalversion -type ExternalAdmissionHookConfigurationExpansion interface{} - type InitializerConfigurationExpansion interface{} + +type MutatingWebhookConfigurationExpansion interface{} + +type ValidatingWebhookConfigurationExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/mutatingwebhookconfiguration.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/mutatingwebhookconfiguration.go new file mode 100644 index 000000000000..a58d0fd1cddc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/mutatingwebhookconfiguration.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" +) + +// MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type MutatingWebhookConfigurationsGetter interface { + MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface +} + +// MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. +type MutatingWebhookConfigurationInterface interface { + Create(*admissionregistration.MutatingWebhookConfiguration) (*admissionregistration.MutatingWebhookConfiguration, error) + Update(*admissionregistration.MutatingWebhookConfiguration) (*admissionregistration.MutatingWebhookConfiguration, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*admissionregistration.MutatingWebhookConfiguration, error) + List(opts v1.ListOptions) (*admissionregistration.MutatingWebhookConfigurationList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *admissionregistration.MutatingWebhookConfiguration, err error) + MutatingWebhookConfigurationExpansion +} + +// mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface +type mutatingWebhookConfigurations struct { + client rest.Interface +} + +// newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations +func newMutatingWebhookConfigurations(c *AdmissionregistrationClient) *mutatingWebhookConfigurations { + return &mutatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. +func (c *mutatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *admissionregistration.MutatingWebhookConfiguration, err error) { + result = &admissionregistration.MutatingWebhookConfiguration{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. +func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *admissionregistration.MutatingWebhookConfigurationList, err error) { + result = &admissionregistration.MutatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. +func (c *mutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *admissionregistration.MutatingWebhookConfiguration) (result *admissionregistration.MutatingWebhookConfiguration, err error) { + result = &admissionregistration.MutatingWebhookConfiguration{} + err = c.client.Post(). + Resource("mutatingwebhookconfigurations"). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *admissionregistration.MutatingWebhookConfiguration) (result *admissionregistration.MutatingWebhookConfiguration, err error) { + result = &admissionregistration.MutatingWebhookConfiguration{} + err = c.client.Put(). + Resource("mutatingwebhookconfigurations"). + Name(mutatingWebhookConfiguration.Name). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *mutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *mutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched mutatingWebhookConfiguration. +func (c *mutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *admissionregistration.MutatingWebhookConfiguration, err error) { + result = &admissionregistration.MutatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("mutatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/validatingwebhookconfiguration.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/validatingwebhookconfiguration.go new file mode 100644 index 000000000000..090cd6f67461 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/validatingwebhookconfiguration.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" +) + +// ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type ValidatingWebhookConfigurationsGetter interface { + ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface +} + +// ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. +type ValidatingWebhookConfigurationInterface interface { + Create(*admissionregistration.ValidatingWebhookConfiguration) (*admissionregistration.ValidatingWebhookConfiguration, error) + Update(*admissionregistration.ValidatingWebhookConfiguration) (*admissionregistration.ValidatingWebhookConfiguration, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*admissionregistration.ValidatingWebhookConfiguration, error) + List(opts v1.ListOptions) (*admissionregistration.ValidatingWebhookConfigurationList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *admissionregistration.ValidatingWebhookConfiguration, err error) + ValidatingWebhookConfigurationExpansion +} + +// validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface +type validatingWebhookConfigurations struct { + client rest.Interface +} + +// newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations +func newValidatingWebhookConfigurations(c *AdmissionregistrationClient) *validatingWebhookConfigurations { + return &validatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. +func (c *validatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *admissionregistration.ValidatingWebhookConfiguration, err error) { + result = &admissionregistration.ValidatingWebhookConfiguration{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. +func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *admissionregistration.ValidatingWebhookConfigurationList, err error) { + result = &admissionregistration.ValidatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. +func (c *validatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Create(validatingWebhookConfiguration *admissionregistration.ValidatingWebhookConfiguration) (result *admissionregistration.ValidatingWebhookConfiguration, err error) { + result = &admissionregistration.ValidatingWebhookConfiguration{} + err = c.client.Post(). + Resource("validatingwebhookconfigurations"). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Update(validatingWebhookConfiguration *admissionregistration.ValidatingWebhookConfiguration) (result *admissionregistration.ValidatingWebhookConfiguration, err error) { + result = &admissionregistration.ValidatingWebhookConfiguration{} + err = c.client.Put(). + Resource("validatingwebhookconfigurations"). + Name(validatingWebhookConfiguration.Name). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *validatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *validatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched validatingWebhookConfiguration. +func (c *validatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *admissionregistration.ValidatingWebhookConfiguration, err error) { + result = &admissionregistration.ValidatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("validatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/BUILD index e25717cdd5ff..e3fad87bee99 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/BUILD @@ -14,9 +14,10 @@ go_library( "generated_expansion.go", "statefulset.go", ], + importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion", deps = [ "//pkg/apis/apps:go_default_library", - "//pkg/apis/extensions:go_default_library", + "//pkg/apis/autoscaling:go_default_library", "//pkg/client/clientset_generated/internalclientset/scheme:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/doc.go index 0a27970fbb3b..3adf06d8934f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with the default arguments. - // This package has the automatically generated typed clients. package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/statefulset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/statefulset.go index 85ee4bd86aae..ae64cc008811 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/statefulset.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/statefulset.go @@ -22,7 +22,7 @@ import ( watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" apps "k8s.io/kubernetes/pkg/apis/apps" - extensions "k8s.io/kubernetes/pkg/apis/extensions" + autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -43,8 +43,8 @@ type StatefulSetInterface interface { List(opts v1.ListOptions) (*apps.StatefulSetList, error) Watch(opts v1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *apps.StatefulSet, err error) - GetScale(statefulSetName string, options v1.GetOptions) (*extensions.Scale, error) - UpdateScale(statefulSetName string, scale *extensions.Scale) (*extensions.Scale, error) + GetScale(statefulSetName string, options v1.GetOptions) (*autoscaling.Scale, error) + UpdateScale(statefulSetName string, scale *autoscaling.Scale) (*autoscaling.Scale, error) StatefulSetExpansion } @@ -175,9 +175,9 @@ func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subre return } -// GetScale takes name of the statefulSet, and returns the corresponding extensions.Scale object, and an error if there is any. -func (c *statefulSets) GetScale(statefulSetName string, options v1.GetOptions) (result *extensions.Scale, err error) { - result = &extensions.Scale{} +// GetScale takes name of the statefulSet, and returns the corresponding autoscaling.Scale object, and an error if there is any. +func (c *statefulSets) GetScale(statefulSetName string, options v1.GetOptions) (result *autoscaling.Scale, err error) { + result = &autoscaling.Scale{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). @@ -190,8 +190,8 @@ func (c *statefulSets) GetScale(statefulSetName string, options v1.GetOptions) ( } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *statefulSets) UpdateScale(statefulSetName string, scale *extensions.Scale) (result *extensions.Scale, err error) { - result = &extensions.Scale{} +func (c *statefulSets) UpdateScale(statefulSetName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { + result = &autoscaling.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("statefulsets"). diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/BUILD index cf58a8906d37..625149228f0a 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/BUILD @@ -14,6 +14,7 @@ go_library( "tokenreview.go", "tokenreview_expansion.go", ], + importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion", deps = [ "//pkg/apis/authentication:go_default_library", "//pkg/client/clientset_generated/internalclientset/scheme:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/doc.go index 0a27970fbb3b..3adf06d8934f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with the default arguments. - // This package has the automatically generated typed clients. package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/BUILD index 53deb0f43cfa..ff1562ea7664 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/BUILD @@ -20,6 +20,7 @@ go_library( "subjectaccessreview.go", "subjectaccessreview_expansion.go", ], + importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion", deps = [ "//pkg/apis/authorization:go_default_library", "//pkg/client/clientset_generated/internalclientset/scheme:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/doc.go index 0a27970fbb3b..3adf06d8934f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with the default arguments. - // This package has the automatically generated typed clients. package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/BUILD index 2d8baf418bb5..bed64c61992f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/BUILD @@ -13,6 +13,7 @@ go_library( "generated_expansion.go", "horizontalpodautoscaler.go", ], + importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion", deps = [ "//pkg/apis/autoscaling:go_default_library", "//pkg/client/clientset_generated/internalclientset/scheme:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/doc.go index 0a27970fbb3b..3adf06d8934f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with the default arguments. - // This package has the automatically generated typed clients. package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/BUILD index 761a54f89df4..9f870c9ccb60 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/BUILD @@ -14,6 +14,7 @@ go_library( "generated_expansion.go", "job.go", ], + importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion", deps = [ "//pkg/apis/batch:go_default_library", "//pkg/client/clientset_generated/internalclientset/scheme:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/doc.go index 0a27970fbb3b..3adf06d8934f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with the default arguments. - // This package has the automatically generated typed clients. package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/BUILD index 3e39ccdf0485..c55caeb2d882 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/BUILD @@ -14,6 +14,7 @@ go_library( "doc.go", "generated_expansion.go", ], + importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion", deps = [ "//pkg/apis/certificates:go_default_library", "//pkg/client/clientset_generated/internalclientset/scheme:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/doc.go index 0a27970fbb3b..3adf06d8934f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with the default arguments. - // This package has the automatically generated typed clients. package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/BUILD index e818124ec9ea..ad0e9695a541 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/BUILD @@ -33,11 +33,13 @@ go_library( "service_expansion.go", "serviceaccount.go", ], + importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/ref:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/apis/extensions:go_default_library", + "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/v1:go_default_library", "//pkg/client/clientset_generated/internalclientset/scheme:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/componentstatus.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/componentstatus.go index dfd6969a91fd..66b4b464ffef 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/componentstatus.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/componentstatus.go @@ -21,7 +21,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -33,14 +33,14 @@ type ComponentStatusesGetter interface { // ComponentStatusInterface has methods to work with ComponentStatus resources. type ComponentStatusInterface interface { - Create(*api.ComponentStatus) (*api.ComponentStatus, error) - Update(*api.ComponentStatus) (*api.ComponentStatus, error) + Create(*core.ComponentStatus) (*core.ComponentStatus, error) + Update(*core.ComponentStatus) (*core.ComponentStatus, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*api.ComponentStatus, error) - List(opts v1.ListOptions) (*api.ComponentStatusList, error) + Get(name string, options v1.GetOptions) (*core.ComponentStatus, error) + List(opts v1.ListOptions) (*core.ComponentStatusList, error) Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.ComponentStatus, err error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ComponentStatus, err error) ComponentStatusExpansion } @@ -57,8 +57,8 @@ func newComponentStatuses(c *CoreClient) *componentStatuses { } // Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. -func (c *componentStatuses) Get(name string, options v1.GetOptions) (result *api.ComponentStatus, err error) { - result = &api.ComponentStatus{} +func (c *componentStatuses) Get(name string, options v1.GetOptions) (result *core.ComponentStatus, err error) { + result = &core.ComponentStatus{} err = c.client.Get(). Resource("componentstatuses"). Name(name). @@ -69,8 +69,8 @@ func (c *componentStatuses) Get(name string, options v1.GetOptions) (result *api } // List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. -func (c *componentStatuses) List(opts v1.ListOptions) (result *api.ComponentStatusList, err error) { - result = &api.ComponentStatusList{} +func (c *componentStatuses) List(opts v1.ListOptions) (result *core.ComponentStatusList, err error) { + result = &core.ComponentStatusList{} err = c.client.Get(). Resource("componentstatuses"). VersionedParams(&opts, scheme.ParameterCodec). @@ -89,8 +89,8 @@ func (c *componentStatuses) Watch(opts v1.ListOptions) (watch.Interface, error) } // Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Create(componentStatus *api.ComponentStatus) (result *api.ComponentStatus, err error) { - result = &api.ComponentStatus{} +func (c *componentStatuses) Create(componentStatus *core.ComponentStatus) (result *core.ComponentStatus, err error) { + result = &core.ComponentStatus{} err = c.client.Post(). Resource("componentstatuses"). Body(componentStatus). @@ -100,8 +100,8 @@ func (c *componentStatuses) Create(componentStatus *api.ComponentStatus) (result } // Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Update(componentStatus *api.ComponentStatus) (result *api.ComponentStatus, err error) { - result = &api.ComponentStatus{} +func (c *componentStatuses) Update(componentStatus *core.ComponentStatus) (result *core.ComponentStatus, err error) { + result = &core.ComponentStatus{} err = c.client.Put(). Resource("componentstatuses"). Name(componentStatus.Name). @@ -132,8 +132,8 @@ func (c *componentStatuses) DeleteCollection(options *v1.DeleteOptions, listOpti } // Patch applies the patch and returns the patched componentStatus. -func (c *componentStatuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.ComponentStatus, err error) { - result = &api.ComponentStatus{} +func (c *componentStatuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ComponentStatus, err error) { + result = &core.ComponentStatus{} err = c.client.Patch(pt). Resource("componentstatuses"). SubResource(subresources...). diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/configmap.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/configmap.go index 53ed9fed80d8..7f5adbc4498a 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/configmap.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/configmap.go @@ -21,7 +21,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -33,14 +33,14 @@ type ConfigMapsGetter interface { // ConfigMapInterface has methods to work with ConfigMap resources. type ConfigMapInterface interface { - Create(*api.ConfigMap) (*api.ConfigMap, error) - Update(*api.ConfigMap) (*api.ConfigMap, error) + Create(*core.ConfigMap) (*core.ConfigMap, error) + Update(*core.ConfigMap) (*core.ConfigMap, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*api.ConfigMap, error) - List(opts v1.ListOptions) (*api.ConfigMapList, error) + Get(name string, options v1.GetOptions) (*core.ConfigMap, error) + List(opts v1.ListOptions) (*core.ConfigMapList, error) Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.ConfigMap, err error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ConfigMap, err error) ConfigMapExpansion } @@ -59,8 +59,8 @@ func newConfigMaps(c *CoreClient, namespace string) *configMaps { } // Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. -func (c *configMaps) Get(name string, options v1.GetOptions) (result *api.ConfigMap, err error) { - result = &api.ConfigMap{} +func (c *configMaps) Get(name string, options v1.GetOptions) (result *core.ConfigMap, err error) { + result = &core.ConfigMap{} err = c.client.Get(). Namespace(c.ns). Resource("configmaps"). @@ -72,8 +72,8 @@ func (c *configMaps) Get(name string, options v1.GetOptions) (result *api.Config } // List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. -func (c *configMaps) List(opts v1.ListOptions) (result *api.ConfigMapList, err error) { - result = &api.ConfigMapList{} +func (c *configMaps) List(opts v1.ListOptions) (result *core.ConfigMapList, err error) { + result = &core.ConfigMapList{} err = c.client.Get(). Namespace(c.ns). Resource("configmaps"). @@ -94,8 +94,8 @@ func (c *configMaps) Watch(opts v1.ListOptions) (watch.Interface, error) { } // Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Create(configMap *api.ConfigMap) (result *api.ConfigMap, err error) { - result = &api.ConfigMap{} +func (c *configMaps) Create(configMap *core.ConfigMap) (result *core.ConfigMap, err error) { + result = &core.ConfigMap{} err = c.client.Post(). Namespace(c.ns). Resource("configmaps"). @@ -106,8 +106,8 @@ func (c *configMaps) Create(configMap *api.ConfigMap) (result *api.ConfigMap, er } // Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Update(configMap *api.ConfigMap) (result *api.ConfigMap, err error) { - result = &api.ConfigMap{} +func (c *configMaps) Update(configMap *core.ConfigMap) (result *core.ConfigMap, err error) { + result = &core.ConfigMap{} err = c.client.Put(). Namespace(c.ns). Resource("configmaps"). @@ -141,8 +141,8 @@ func (c *configMaps) DeleteCollection(options *v1.DeleteOptions, listOptions v1. } // Patch applies the patch and returns the patched configMap. -func (c *configMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.ConfigMap, err error) { - result = &api.ConfigMap{} +func (c *configMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ConfigMap, err error) { + result = &core.ConfigMap{} err = c.client.Patch(pt). Namespace(c.ns). Resource("configmaps"). diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/doc.go index 0a27970fbb3b..3adf06d8934f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with the default arguments. - // This package has the automatically generated typed clients. package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/endpoints.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/endpoints.go index 39fe3d44c1a5..8a35c807a066 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/endpoints.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/endpoints.go @@ -21,7 +21,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -33,14 +33,14 @@ type EndpointsGetter interface { // EndpointsInterface has methods to work with Endpoints resources. type EndpointsInterface interface { - Create(*api.Endpoints) (*api.Endpoints, error) - Update(*api.Endpoints) (*api.Endpoints, error) + Create(*core.Endpoints) (*core.Endpoints, error) + Update(*core.Endpoints) (*core.Endpoints, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*api.Endpoints, error) - List(opts v1.ListOptions) (*api.EndpointsList, error) + Get(name string, options v1.GetOptions) (*core.Endpoints, error) + List(opts v1.ListOptions) (*core.EndpointsList, error) Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.Endpoints, err error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Endpoints, err error) EndpointsExpansion } @@ -59,8 +59,8 @@ func newEndpoints(c *CoreClient, namespace string) *endpoints { } // Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. -func (c *endpoints) Get(name string, options v1.GetOptions) (result *api.Endpoints, err error) { - result = &api.Endpoints{} +func (c *endpoints) Get(name string, options v1.GetOptions) (result *core.Endpoints, err error) { + result = &core.Endpoints{} err = c.client.Get(). Namespace(c.ns). Resource("endpoints"). @@ -72,8 +72,8 @@ func (c *endpoints) Get(name string, options v1.GetOptions) (result *api.Endpoin } // List takes label and field selectors, and returns the list of Endpoints that match those selectors. -func (c *endpoints) List(opts v1.ListOptions) (result *api.EndpointsList, err error) { - result = &api.EndpointsList{} +func (c *endpoints) List(opts v1.ListOptions) (result *core.EndpointsList, err error) { + result = &core.EndpointsList{} err = c.client.Get(). Namespace(c.ns). Resource("endpoints"). @@ -94,8 +94,8 @@ func (c *endpoints) Watch(opts v1.ListOptions) (watch.Interface, error) { } // Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Create(endpoints *api.Endpoints) (result *api.Endpoints, err error) { - result = &api.Endpoints{} +func (c *endpoints) Create(endpoints *core.Endpoints) (result *core.Endpoints, err error) { + result = &core.Endpoints{} err = c.client.Post(). Namespace(c.ns). Resource("endpoints"). @@ -106,8 +106,8 @@ func (c *endpoints) Create(endpoints *api.Endpoints) (result *api.Endpoints, err } // Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Update(endpoints *api.Endpoints) (result *api.Endpoints, err error) { - result = &api.Endpoints{} +func (c *endpoints) Update(endpoints *core.Endpoints) (result *core.Endpoints, err error) { + result = &core.Endpoints{} err = c.client.Put(). Namespace(c.ns). Resource("endpoints"). @@ -141,8 +141,8 @@ func (c *endpoints) DeleteCollection(options *v1.DeleteOptions, listOptions v1.L } // Patch applies the patch and returns the patched endpoints. -func (c *endpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.Endpoints, err error) { - result = &api.Endpoints{} +func (c *endpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Endpoints, err error) { + result = &core.Endpoints{} err = c.client.Patch(pt). Namespace(c.ns). Resource("endpoints"). diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event.go index 143099b67060..cad5b84a3c90 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event.go @@ -21,7 +21,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -33,14 +33,14 @@ type EventsGetter interface { // EventInterface has methods to work with Event resources. type EventInterface interface { - Create(*api.Event) (*api.Event, error) - Update(*api.Event) (*api.Event, error) + Create(*core.Event) (*core.Event, error) + Update(*core.Event) (*core.Event, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*api.Event, error) - List(opts v1.ListOptions) (*api.EventList, error) + Get(name string, options v1.GetOptions) (*core.Event, error) + List(opts v1.ListOptions) (*core.EventList, error) Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.Event, err error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Event, err error) EventExpansion } @@ -59,8 +59,8 @@ func newEvents(c *CoreClient, namespace string) *events { } // Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(name string, options v1.GetOptions) (result *api.Event, err error) { - result = &api.Event{} +func (c *events) Get(name string, options v1.GetOptions) (result *core.Event, err error) { + result = &core.Event{} err = c.client.Get(). Namespace(c.ns). Resource("events"). @@ -72,8 +72,8 @@ func (c *events) Get(name string, options v1.GetOptions) (result *api.Event, err } // List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(opts v1.ListOptions) (result *api.EventList, err error) { - result = &api.EventList{} +func (c *events) List(opts v1.ListOptions) (result *core.EventList, err error) { + result = &core.EventList{} err = c.client.Get(). Namespace(c.ns). Resource("events"). @@ -94,8 +94,8 @@ func (c *events) Watch(opts v1.ListOptions) (watch.Interface, error) { } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(event *api.Event) (result *api.Event, err error) { - result = &api.Event{} +func (c *events) Create(event *core.Event) (result *core.Event, err error) { + result = &core.Event{} err = c.client.Post(). Namespace(c.ns). Resource("events"). @@ -106,8 +106,8 @@ func (c *events) Create(event *api.Event) (result *api.Event, err error) { } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(event *api.Event) (result *api.Event, err error) { - result = &api.Event{} +func (c *events) Update(event *core.Event) (result *core.Event, err error) { + result = &core.Event{} err = c.client.Put(). Namespace(c.ns). Resource("events"). @@ -141,8 +141,8 @@ func (c *events) DeleteCollection(options *v1.DeleteOptions, listOptions v1.List } // Patch applies the patch and returns the patched event. -func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.Event, err error) { - result = &api.Event{} +func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Event, err error) { + result = &core.Event{} err = c.client.Patch(pt). Namespace(c.ns). Resource("events"). diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event_expansion.go index d9ac64fdb632..5f1ebb89cc9f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event_expansion.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/event_expansion.go @@ -24,9 +24,9 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/ref" - k8s_api_v1 "k8s.io/kubernetes/pkg/api/v1" + api "k8s.io/kubernetes/pkg/apis/core" + k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" ) // The EventExpansion interface allows manually adding extra methods to the EventInterface. @@ -155,7 +155,7 @@ type EventSinkImpl struct { func (e *EventSinkImpl) Create(event *v1.Event) (*v1.Event, error) { internalEvent := &api.Event{} - err := k8s_api_v1.Convert_v1_Event_To_api_Event(event, internalEvent, nil) + err := k8s_api_v1.Convert_v1_Event_To_core_Event(event, internalEvent, nil) if err != nil { return nil, err } @@ -168,7 +168,7 @@ func (e *EventSinkImpl) Create(event *v1.Event) (*v1.Event, error) { func (e *EventSinkImpl) Update(event *v1.Event) (*v1.Event, error) { internalEvent := &api.Event{} - err := k8s_api_v1.Convert_v1_Event_To_api_Event(event, internalEvent, nil) + err := k8s_api_v1.Convert_v1_Event_To_core_Event(event, internalEvent, nil) if err != nil { return nil, err } @@ -181,7 +181,7 @@ func (e *EventSinkImpl) Update(event *v1.Event) (*v1.Event, error) { func (e *EventSinkImpl) Patch(event *v1.Event, data []byte) (*v1.Event, error) { internalEvent := &api.Event{} - err := k8s_api_v1.Convert_v1_Event_To_api_Event(event, internalEvent, nil) + err := k8s_api_v1.Convert_v1_Event_To_core_Event(event, internalEvent, nil) if err != nil { return nil, err } @@ -190,7 +190,7 @@ func (e *EventSinkImpl) Patch(event *v1.Event, data []byte) (*v1.Event, error) { return nil, err } externalEvent := &v1.Event{} - err = k8s_api_v1.Convert_api_Event_To_v1_Event(internalEvent, externalEvent, nil) + err = k8s_api_v1.Convert_core_Event_To_v1_Event(internalEvent, externalEvent, nil) if err != nil { // Patch succeeded, no need to report the failed conversion return event, nil diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/limitrange.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/limitrange.go index f809d04cb0ff..de59dda99819 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/limitrange.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/limitrange.go @@ -21,7 +21,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -33,14 +33,14 @@ type LimitRangesGetter interface { // LimitRangeInterface has methods to work with LimitRange resources. type LimitRangeInterface interface { - Create(*api.LimitRange) (*api.LimitRange, error) - Update(*api.LimitRange) (*api.LimitRange, error) + Create(*core.LimitRange) (*core.LimitRange, error) + Update(*core.LimitRange) (*core.LimitRange, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*api.LimitRange, error) - List(opts v1.ListOptions) (*api.LimitRangeList, error) + Get(name string, options v1.GetOptions) (*core.LimitRange, error) + List(opts v1.ListOptions) (*core.LimitRangeList, error) Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.LimitRange, err error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.LimitRange, err error) LimitRangeExpansion } @@ -59,8 +59,8 @@ func newLimitRanges(c *CoreClient, namespace string) *limitRanges { } // Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. -func (c *limitRanges) Get(name string, options v1.GetOptions) (result *api.LimitRange, err error) { - result = &api.LimitRange{} +func (c *limitRanges) Get(name string, options v1.GetOptions) (result *core.LimitRange, err error) { + result = &core.LimitRange{} err = c.client.Get(). Namespace(c.ns). Resource("limitranges"). @@ -72,8 +72,8 @@ func (c *limitRanges) Get(name string, options v1.GetOptions) (result *api.Limit } // List takes label and field selectors, and returns the list of LimitRanges that match those selectors. -func (c *limitRanges) List(opts v1.ListOptions) (result *api.LimitRangeList, err error) { - result = &api.LimitRangeList{} +func (c *limitRanges) List(opts v1.ListOptions) (result *core.LimitRangeList, err error) { + result = &core.LimitRangeList{} err = c.client.Get(). Namespace(c.ns). Resource("limitranges"). @@ -94,8 +94,8 @@ func (c *limitRanges) Watch(opts v1.ListOptions) (watch.Interface, error) { } // Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Create(limitRange *api.LimitRange) (result *api.LimitRange, err error) { - result = &api.LimitRange{} +func (c *limitRanges) Create(limitRange *core.LimitRange) (result *core.LimitRange, err error) { + result = &core.LimitRange{} err = c.client.Post(). Namespace(c.ns). Resource("limitranges"). @@ -106,8 +106,8 @@ func (c *limitRanges) Create(limitRange *api.LimitRange) (result *api.LimitRange } // Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Update(limitRange *api.LimitRange) (result *api.LimitRange, err error) { - result = &api.LimitRange{} +func (c *limitRanges) Update(limitRange *core.LimitRange) (result *core.LimitRange, err error) { + result = &core.LimitRange{} err = c.client.Put(). Namespace(c.ns). Resource("limitranges"). @@ -141,8 +141,8 @@ func (c *limitRanges) DeleteCollection(options *v1.DeleteOptions, listOptions v1 } // Patch applies the patch and returns the patched limitRange. -func (c *limitRanges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.LimitRange, err error) { - result = &api.LimitRange{} +func (c *limitRanges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.LimitRange, err error) { + result = &core.LimitRange{} err = c.client.Patch(pt). Namespace(c.ns). Resource("limitranges"). diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace.go index fff41e420613..f4ae5388303e 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace.go @@ -21,7 +21,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -33,15 +33,15 @@ type NamespacesGetter interface { // NamespaceInterface has methods to work with Namespace resources. type NamespaceInterface interface { - Create(*api.Namespace) (*api.Namespace, error) - Update(*api.Namespace) (*api.Namespace, error) - UpdateStatus(*api.Namespace) (*api.Namespace, error) + Create(*core.Namespace) (*core.Namespace, error) + Update(*core.Namespace) (*core.Namespace, error) + UpdateStatus(*core.Namespace) (*core.Namespace, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*api.Namespace, error) - List(opts v1.ListOptions) (*api.NamespaceList, error) + Get(name string, options v1.GetOptions) (*core.Namespace, error) + List(opts v1.ListOptions) (*core.NamespaceList, error) Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.Namespace, err error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Namespace, err error) NamespaceExpansion } @@ -58,8 +58,8 @@ func newNamespaces(c *CoreClient) *namespaces { } // Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. -func (c *namespaces) Get(name string, options v1.GetOptions) (result *api.Namespace, err error) { - result = &api.Namespace{} +func (c *namespaces) Get(name string, options v1.GetOptions) (result *core.Namespace, err error) { + result = &core.Namespace{} err = c.client.Get(). Resource("namespaces"). Name(name). @@ -70,8 +70,8 @@ func (c *namespaces) Get(name string, options v1.GetOptions) (result *api.Namesp } // List takes label and field selectors, and returns the list of Namespaces that match those selectors. -func (c *namespaces) List(opts v1.ListOptions) (result *api.NamespaceList, err error) { - result = &api.NamespaceList{} +func (c *namespaces) List(opts v1.ListOptions) (result *core.NamespaceList, err error) { + result = &core.NamespaceList{} err = c.client.Get(). Resource("namespaces"). VersionedParams(&opts, scheme.ParameterCodec). @@ -90,8 +90,8 @@ func (c *namespaces) Watch(opts v1.ListOptions) (watch.Interface, error) { } // Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Create(namespace *api.Namespace) (result *api.Namespace, err error) { - result = &api.Namespace{} +func (c *namespaces) Create(namespace *core.Namespace) (result *core.Namespace, err error) { + result = &core.Namespace{} err = c.client.Post(). Resource("namespaces"). Body(namespace). @@ -101,8 +101,8 @@ func (c *namespaces) Create(namespace *api.Namespace) (result *api.Namespace, er } // Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Update(namespace *api.Namespace) (result *api.Namespace, err error) { - result = &api.Namespace{} +func (c *namespaces) Update(namespace *core.Namespace) (result *core.Namespace, err error) { + result = &core.Namespace{} err = c.client.Put(). Resource("namespaces"). Name(namespace.Name). @@ -115,8 +115,8 @@ func (c *namespaces) Update(namespace *api.Namespace) (result *api.Namespace, er // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *namespaces) UpdateStatus(namespace *api.Namespace) (result *api.Namespace, err error) { - result = &api.Namespace{} +func (c *namespaces) UpdateStatus(namespace *core.Namespace) (result *core.Namespace, err error) { + result = &core.Namespace{} err = c.client.Put(). Resource("namespaces"). Name(namespace.Name). @@ -148,8 +148,8 @@ func (c *namespaces) DeleteCollection(options *v1.DeleteOptions, listOptions v1. } // Patch applies the patch and returns the patched namespace. -func (c *namespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.Namespace, err error) { - result = &api.Namespace{} +func (c *namespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Namespace, err error) { + result = &core.Namespace{} err = c.client.Patch(pt). Resource("namespaces"). SubResource(subresources...). diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace_expansion.go index 456de1cdfcd5..29c7bcf2e24b 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace_expansion.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/namespace_expansion.go @@ -16,7 +16,9 @@ limitations under the License. package internalversion -import "k8s.io/kubernetes/pkg/api" +import ( + api "k8s.io/kubernetes/pkg/apis/core" +) // The NamespaceExpansion interface allows manually adding extra methods to the NamespaceInterface. type NamespaceExpansion interface { diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node.go index ee11691b8a71..b5c92087bf9a 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node.go @@ -21,7 +21,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -33,15 +33,15 @@ type NodesGetter interface { // NodeInterface has methods to work with Node resources. type NodeInterface interface { - Create(*api.Node) (*api.Node, error) - Update(*api.Node) (*api.Node, error) - UpdateStatus(*api.Node) (*api.Node, error) + Create(*core.Node) (*core.Node, error) + Update(*core.Node) (*core.Node, error) + UpdateStatus(*core.Node) (*core.Node, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*api.Node, error) - List(opts v1.ListOptions) (*api.NodeList, error) + Get(name string, options v1.GetOptions) (*core.Node, error) + List(opts v1.ListOptions) (*core.NodeList, error) Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.Node, err error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Node, err error) NodeExpansion } @@ -58,8 +58,8 @@ func newNodes(c *CoreClient) *nodes { } // Get takes name of the node, and returns the corresponding node object, and an error if there is any. -func (c *nodes) Get(name string, options v1.GetOptions) (result *api.Node, err error) { - result = &api.Node{} +func (c *nodes) Get(name string, options v1.GetOptions) (result *core.Node, err error) { + result = &core.Node{} err = c.client.Get(). Resource("nodes"). Name(name). @@ -70,8 +70,8 @@ func (c *nodes) Get(name string, options v1.GetOptions) (result *api.Node, err e } // List takes label and field selectors, and returns the list of Nodes that match those selectors. -func (c *nodes) List(opts v1.ListOptions) (result *api.NodeList, err error) { - result = &api.NodeList{} +func (c *nodes) List(opts v1.ListOptions) (result *core.NodeList, err error) { + result = &core.NodeList{} err = c.client.Get(). Resource("nodes"). VersionedParams(&opts, scheme.ParameterCodec). @@ -90,8 +90,8 @@ func (c *nodes) Watch(opts v1.ListOptions) (watch.Interface, error) { } // Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Create(node *api.Node) (result *api.Node, err error) { - result = &api.Node{} +func (c *nodes) Create(node *core.Node) (result *core.Node, err error) { + result = &core.Node{} err = c.client.Post(). Resource("nodes"). Body(node). @@ -101,8 +101,8 @@ func (c *nodes) Create(node *api.Node) (result *api.Node, err error) { } // Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Update(node *api.Node) (result *api.Node, err error) { - result = &api.Node{} +func (c *nodes) Update(node *core.Node) (result *core.Node, err error) { + result = &core.Node{} err = c.client.Put(). Resource("nodes"). Name(node.Name). @@ -115,8 +115,8 @@ func (c *nodes) Update(node *api.Node) (result *api.Node, err error) { // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *nodes) UpdateStatus(node *api.Node) (result *api.Node, err error) { - result = &api.Node{} +func (c *nodes) UpdateStatus(node *core.Node) (result *core.Node, err error) { + result = &core.Node{} err = c.client.Put(). Resource("nodes"). Name(node.Name). @@ -148,8 +148,8 @@ func (c *nodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListO } // Patch applies the patch and returns the patched node. -func (c *nodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.Node, err error) { - result = &api.Node{} +func (c *nodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Node, err error) { + result = &core.Node{} err = c.client.Patch(pt). Resource("nodes"). SubResource(subresources...). diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node_expansion.go index b02fa083ba6b..8e29d5f1ef0b 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node_expansion.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/node_expansion.go @@ -18,7 +18,7 @@ package internalversion import ( "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // The NodeExpansion interface allows manually adding extra methods to the NodeInterface. diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolume.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolume.go index 92a2657d4511..9f8921c4811e 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolume.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolume.go @@ -21,7 +21,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -33,15 +33,15 @@ type PersistentVolumesGetter interface { // PersistentVolumeInterface has methods to work with PersistentVolume resources. type PersistentVolumeInterface interface { - Create(*api.PersistentVolume) (*api.PersistentVolume, error) - Update(*api.PersistentVolume) (*api.PersistentVolume, error) - UpdateStatus(*api.PersistentVolume) (*api.PersistentVolume, error) + Create(*core.PersistentVolume) (*core.PersistentVolume, error) + Update(*core.PersistentVolume) (*core.PersistentVolume, error) + UpdateStatus(*core.PersistentVolume) (*core.PersistentVolume, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*api.PersistentVolume, error) - List(opts v1.ListOptions) (*api.PersistentVolumeList, error) + Get(name string, options v1.GetOptions) (*core.PersistentVolume, error) + List(opts v1.ListOptions) (*core.PersistentVolumeList, error) Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.PersistentVolume, err error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.PersistentVolume, err error) PersistentVolumeExpansion } @@ -58,8 +58,8 @@ func newPersistentVolumes(c *CoreClient) *persistentVolumes { } // Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. -func (c *persistentVolumes) Get(name string, options v1.GetOptions) (result *api.PersistentVolume, err error) { - result = &api.PersistentVolume{} +func (c *persistentVolumes) Get(name string, options v1.GetOptions) (result *core.PersistentVolume, err error) { + result = &core.PersistentVolume{} err = c.client.Get(). Resource("persistentvolumes"). Name(name). @@ -70,8 +70,8 @@ func (c *persistentVolumes) Get(name string, options v1.GetOptions) (result *api } // List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. -func (c *persistentVolumes) List(opts v1.ListOptions) (result *api.PersistentVolumeList, err error) { - result = &api.PersistentVolumeList{} +func (c *persistentVolumes) List(opts v1.ListOptions) (result *core.PersistentVolumeList, err error) { + result = &core.PersistentVolumeList{} err = c.client.Get(). Resource("persistentvolumes"). VersionedParams(&opts, scheme.ParameterCodec). @@ -90,8 +90,8 @@ func (c *persistentVolumes) Watch(opts v1.ListOptions) (watch.Interface, error) } // Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Create(persistentVolume *api.PersistentVolume) (result *api.PersistentVolume, err error) { - result = &api.PersistentVolume{} +func (c *persistentVolumes) Create(persistentVolume *core.PersistentVolume) (result *core.PersistentVolume, err error) { + result = &core.PersistentVolume{} err = c.client.Post(). Resource("persistentvolumes"). Body(persistentVolume). @@ -101,8 +101,8 @@ func (c *persistentVolumes) Create(persistentVolume *api.PersistentVolume) (resu } // Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Update(persistentVolume *api.PersistentVolume) (result *api.PersistentVolume, err error) { - result = &api.PersistentVolume{} +func (c *persistentVolumes) Update(persistentVolume *core.PersistentVolume) (result *core.PersistentVolume, err error) { + result = &core.PersistentVolume{} err = c.client.Put(). Resource("persistentvolumes"). Name(persistentVolume.Name). @@ -115,8 +115,8 @@ func (c *persistentVolumes) Update(persistentVolume *api.PersistentVolume) (resu // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *persistentVolumes) UpdateStatus(persistentVolume *api.PersistentVolume) (result *api.PersistentVolume, err error) { - result = &api.PersistentVolume{} +func (c *persistentVolumes) UpdateStatus(persistentVolume *core.PersistentVolume) (result *core.PersistentVolume, err error) { + result = &core.PersistentVolume{} err = c.client.Put(). Resource("persistentvolumes"). Name(persistentVolume.Name). @@ -148,8 +148,8 @@ func (c *persistentVolumes) DeleteCollection(options *v1.DeleteOptions, listOpti } // Patch applies the patch and returns the patched persistentVolume. -func (c *persistentVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.PersistentVolume, err error) { - result = &api.PersistentVolume{} +func (c *persistentVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.PersistentVolume, err error) { + result = &core.PersistentVolume{} err = c.client.Patch(pt). Resource("persistentvolumes"). SubResource(subresources...). diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolumeclaim.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolumeclaim.go index d0db131d77f3..d8e2e5a3e9b9 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolumeclaim.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/persistentvolumeclaim.go @@ -21,7 +21,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -33,15 +33,15 @@ type PersistentVolumeClaimsGetter interface { // PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources. type PersistentVolumeClaimInterface interface { - Create(*api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) - Update(*api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) - UpdateStatus(*api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) + Create(*core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error) + Update(*core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error) + UpdateStatus(*core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*api.PersistentVolumeClaim, error) - List(opts v1.ListOptions) (*api.PersistentVolumeClaimList, error) + Get(name string, options v1.GetOptions) (*core.PersistentVolumeClaim, error) + List(opts v1.ListOptions) (*core.PersistentVolumeClaimList, error) Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.PersistentVolumeClaim, err error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.PersistentVolumeClaim, err error) PersistentVolumeClaimExpansion } @@ -60,8 +60,8 @@ func newPersistentVolumeClaims(c *CoreClient, namespace string) *persistentVolum } // Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. -func (c *persistentVolumeClaims) Get(name string, options v1.GetOptions) (result *api.PersistentVolumeClaim, err error) { - result = &api.PersistentVolumeClaim{} +func (c *persistentVolumeClaims) Get(name string, options v1.GetOptions) (result *core.PersistentVolumeClaim, err error) { + result = &core.PersistentVolumeClaim{} err = c.client.Get(). Namespace(c.ns). Resource("persistentvolumeclaims"). @@ -73,8 +73,8 @@ func (c *persistentVolumeClaims) Get(name string, options v1.GetOptions) (result } // List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. -func (c *persistentVolumeClaims) List(opts v1.ListOptions) (result *api.PersistentVolumeClaimList, err error) { - result = &api.PersistentVolumeClaimList{} +func (c *persistentVolumeClaims) List(opts v1.ListOptions) (result *core.PersistentVolumeClaimList, err error) { + result = &core.PersistentVolumeClaimList{} err = c.client.Get(). Namespace(c.ns). Resource("persistentvolumeclaims"). @@ -95,8 +95,8 @@ func (c *persistentVolumeClaims) Watch(opts v1.ListOptions) (watch.Interface, er } // Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Create(persistentVolumeClaim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { - result = &api.PersistentVolumeClaim{} +func (c *persistentVolumeClaims) Create(persistentVolumeClaim *core.PersistentVolumeClaim) (result *core.PersistentVolumeClaim, err error) { + result = &core.PersistentVolumeClaim{} err = c.client.Post(). Namespace(c.ns). Resource("persistentvolumeclaims"). @@ -107,8 +107,8 @@ func (c *persistentVolumeClaims) Create(persistentVolumeClaim *api.PersistentVol } // Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Update(persistentVolumeClaim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { - result = &api.PersistentVolumeClaim{} +func (c *persistentVolumeClaims) Update(persistentVolumeClaim *core.PersistentVolumeClaim) (result *core.PersistentVolumeClaim, err error) { + result = &core.PersistentVolumeClaim{} err = c.client.Put(). Namespace(c.ns). Resource("persistentvolumeclaims"). @@ -122,8 +122,8 @@ func (c *persistentVolumeClaims) Update(persistentVolumeClaim *api.PersistentVol // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *persistentVolumeClaims) UpdateStatus(persistentVolumeClaim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { - result = &api.PersistentVolumeClaim{} +func (c *persistentVolumeClaims) UpdateStatus(persistentVolumeClaim *core.PersistentVolumeClaim) (result *core.PersistentVolumeClaim, err error) { + result = &core.PersistentVolumeClaim{} err = c.client.Put(). Namespace(c.ns). Resource("persistentvolumeclaims"). @@ -158,8 +158,8 @@ func (c *persistentVolumeClaims) DeleteCollection(options *v1.DeleteOptions, lis } // Patch applies the patch and returns the patched persistentVolumeClaim. -func (c *persistentVolumeClaims) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.PersistentVolumeClaim, err error) { - result = &api.PersistentVolumeClaim{} +func (c *persistentVolumeClaims) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.PersistentVolumeClaim, err error) { + result = &core.PersistentVolumeClaim{} err = c.client.Patch(pt). Namespace(c.ns). Resource("persistentvolumeclaims"). diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod.go index b662689cf508..7c8555329092 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod.go @@ -21,7 +21,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -33,15 +33,15 @@ type PodsGetter interface { // PodInterface has methods to work with Pod resources. type PodInterface interface { - Create(*api.Pod) (*api.Pod, error) - Update(*api.Pod) (*api.Pod, error) - UpdateStatus(*api.Pod) (*api.Pod, error) + Create(*core.Pod) (*core.Pod, error) + Update(*core.Pod) (*core.Pod, error) + UpdateStatus(*core.Pod) (*core.Pod, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*api.Pod, error) - List(opts v1.ListOptions) (*api.PodList, error) + Get(name string, options v1.GetOptions) (*core.Pod, error) + List(opts v1.ListOptions) (*core.PodList, error) Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.Pod, err error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Pod, err error) PodExpansion } @@ -60,8 +60,8 @@ func newPods(c *CoreClient, namespace string) *pods { } // Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. -func (c *pods) Get(name string, options v1.GetOptions) (result *api.Pod, err error) { - result = &api.Pod{} +func (c *pods) Get(name string, options v1.GetOptions) (result *core.Pod, err error) { + result = &core.Pod{} err = c.client.Get(). Namespace(c.ns). Resource("pods"). @@ -73,8 +73,8 @@ func (c *pods) Get(name string, options v1.GetOptions) (result *api.Pod, err err } // List takes label and field selectors, and returns the list of Pods that match those selectors. -func (c *pods) List(opts v1.ListOptions) (result *api.PodList, err error) { - result = &api.PodList{} +func (c *pods) List(opts v1.ListOptions) (result *core.PodList, err error) { + result = &core.PodList{} err = c.client.Get(). Namespace(c.ns). Resource("pods"). @@ -95,8 +95,8 @@ func (c *pods) Watch(opts v1.ListOptions) (watch.Interface, error) { } // Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Create(pod *api.Pod) (result *api.Pod, err error) { - result = &api.Pod{} +func (c *pods) Create(pod *core.Pod) (result *core.Pod, err error) { + result = &core.Pod{} err = c.client.Post(). Namespace(c.ns). Resource("pods"). @@ -107,8 +107,8 @@ func (c *pods) Create(pod *api.Pod) (result *api.Pod, err error) { } // Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Update(pod *api.Pod) (result *api.Pod, err error) { - result = &api.Pod{} +func (c *pods) Update(pod *core.Pod) (result *core.Pod, err error) { + result = &core.Pod{} err = c.client.Put(). Namespace(c.ns). Resource("pods"). @@ -122,8 +122,8 @@ func (c *pods) Update(pod *api.Pod) (result *api.Pod, err error) { // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *pods) UpdateStatus(pod *api.Pod) (result *api.Pod, err error) { - result = &api.Pod{} +func (c *pods) UpdateStatus(pod *core.Pod) (result *core.Pod, err error) { + result = &core.Pod{} err = c.client.Put(). Namespace(c.ns). Resource("pods"). @@ -158,8 +158,8 @@ func (c *pods) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOp } // Patch applies the patch and returns the patched pod. -func (c *pods) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.Pod, err error) { - result = &api.Pod{} +func (c *pods) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Pod, err error) { + result = &core.Pod{} err = c.client.Patch(pt). Namespace(c.ns). Resource("pods"). diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod_expansion.go index d6ea139a46be..86fead7cced7 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod_expansion.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/pod_expansion.go @@ -18,7 +18,8 @@ package internalversion import ( restclient "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" ) // The PodExpansion interface allows manually adding extra methods to the PodInterface. @@ -34,5 +35,5 @@ func (c *pods) Bind(binding *api.Binding) error { // Get constructs a request for getting the logs for a pod func (c *pods) GetLogs(name string, opts *api.PodLogOptions) *restclient.Request { - return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, api.ParameterCodec) + return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, legacyscheme.ParameterCodec) } diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/podtemplate.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/podtemplate.go index 8a940bf6b728..49723c120708 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/podtemplate.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/podtemplate.go @@ -21,7 +21,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -33,14 +33,14 @@ type PodTemplatesGetter interface { // PodTemplateInterface has methods to work with PodTemplate resources. type PodTemplateInterface interface { - Create(*api.PodTemplate) (*api.PodTemplate, error) - Update(*api.PodTemplate) (*api.PodTemplate, error) + Create(*core.PodTemplate) (*core.PodTemplate, error) + Update(*core.PodTemplate) (*core.PodTemplate, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*api.PodTemplate, error) - List(opts v1.ListOptions) (*api.PodTemplateList, error) + Get(name string, options v1.GetOptions) (*core.PodTemplate, error) + List(opts v1.ListOptions) (*core.PodTemplateList, error) Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.PodTemplate, err error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.PodTemplate, err error) PodTemplateExpansion } @@ -59,8 +59,8 @@ func newPodTemplates(c *CoreClient, namespace string) *podTemplates { } // Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. -func (c *podTemplates) Get(name string, options v1.GetOptions) (result *api.PodTemplate, err error) { - result = &api.PodTemplate{} +func (c *podTemplates) Get(name string, options v1.GetOptions) (result *core.PodTemplate, err error) { + result = &core.PodTemplate{} err = c.client.Get(). Namespace(c.ns). Resource("podtemplates"). @@ -72,8 +72,8 @@ func (c *podTemplates) Get(name string, options v1.GetOptions) (result *api.PodT } // List takes label and field selectors, and returns the list of PodTemplates that match those selectors. -func (c *podTemplates) List(opts v1.ListOptions) (result *api.PodTemplateList, err error) { - result = &api.PodTemplateList{} +func (c *podTemplates) List(opts v1.ListOptions) (result *core.PodTemplateList, err error) { + result = &core.PodTemplateList{} err = c.client.Get(). Namespace(c.ns). Resource("podtemplates"). @@ -94,8 +94,8 @@ func (c *podTemplates) Watch(opts v1.ListOptions) (watch.Interface, error) { } // Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Create(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) { - result = &api.PodTemplate{} +func (c *podTemplates) Create(podTemplate *core.PodTemplate) (result *core.PodTemplate, err error) { + result = &core.PodTemplate{} err = c.client.Post(). Namespace(c.ns). Resource("podtemplates"). @@ -106,8 +106,8 @@ func (c *podTemplates) Create(podTemplate *api.PodTemplate) (result *api.PodTemp } // Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Update(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) { - result = &api.PodTemplate{} +func (c *podTemplates) Update(podTemplate *core.PodTemplate) (result *core.PodTemplate, err error) { + result = &core.PodTemplate{} err = c.client.Put(). Namespace(c.ns). Resource("podtemplates"). @@ -141,8 +141,8 @@ func (c *podTemplates) DeleteCollection(options *v1.DeleteOptions, listOptions v } // Patch applies the patch and returns the patched podTemplate. -func (c *podTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.PodTemplate, err error) { - result = &api.PodTemplate{} +func (c *podTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.PodTemplate, err error) { + result = &core.PodTemplate{} err = c.client.Patch(pt). Namespace(c.ns). Resource("podtemplates"). diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/replicationcontroller.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/replicationcontroller.go index 3be51c316fe7..5c787f7764cf 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/replicationcontroller.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/replicationcontroller.go @@ -21,8 +21,8 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" - api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" + autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" + core "k8s.io/kubernetes/pkg/apis/core" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -34,17 +34,17 @@ type ReplicationControllersGetter interface { // ReplicationControllerInterface has methods to work with ReplicationController resources. type ReplicationControllerInterface interface { - Create(*api.ReplicationController) (*api.ReplicationController, error) - Update(*api.ReplicationController) (*api.ReplicationController, error) - UpdateStatus(*api.ReplicationController) (*api.ReplicationController, error) + Create(*core.ReplicationController) (*core.ReplicationController, error) + Update(*core.ReplicationController) (*core.ReplicationController, error) + UpdateStatus(*core.ReplicationController) (*core.ReplicationController, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*api.ReplicationController, error) - List(opts v1.ListOptions) (*api.ReplicationControllerList, error) + Get(name string, options v1.GetOptions) (*core.ReplicationController, error) + List(opts v1.ListOptions) (*core.ReplicationControllerList, error) Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.ReplicationController, err error) - GetScale(replicationControllerName string, options v1.GetOptions) (*extensions.Scale, error) - UpdateScale(replicationControllerName string, scale *extensions.Scale) (*extensions.Scale, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ReplicationController, err error) + GetScale(replicationControllerName string, options v1.GetOptions) (*autoscaling.Scale, error) + UpdateScale(replicationControllerName string, scale *autoscaling.Scale) (*autoscaling.Scale, error) ReplicationControllerExpansion } @@ -64,8 +64,8 @@ func newReplicationControllers(c *CoreClient, namespace string) *replicationCont } // Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. -func (c *replicationControllers) Get(name string, options v1.GetOptions) (result *api.ReplicationController, err error) { - result = &api.ReplicationController{} +func (c *replicationControllers) Get(name string, options v1.GetOptions) (result *core.ReplicationController, err error) { + result = &core.ReplicationController{} err = c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). @@ -77,8 +77,8 @@ func (c *replicationControllers) Get(name string, options v1.GetOptions) (result } // List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. -func (c *replicationControllers) List(opts v1.ListOptions) (result *api.ReplicationControllerList, err error) { - result = &api.ReplicationControllerList{} +func (c *replicationControllers) List(opts v1.ListOptions) (result *core.ReplicationControllerList, err error) { + result = &core.ReplicationControllerList{} err = c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). @@ -99,8 +99,8 @@ func (c *replicationControllers) Watch(opts v1.ListOptions) (watch.Interface, er } // Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Create(replicationController *api.ReplicationController) (result *api.ReplicationController, err error) { - result = &api.ReplicationController{} +func (c *replicationControllers) Create(replicationController *core.ReplicationController) (result *core.ReplicationController, err error) { + result = &core.ReplicationController{} err = c.client.Post(). Namespace(c.ns). Resource("replicationcontrollers"). @@ -111,8 +111,8 @@ func (c *replicationControllers) Create(replicationController *api.ReplicationCo } // Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Update(replicationController *api.ReplicationController) (result *api.ReplicationController, err error) { - result = &api.ReplicationController{} +func (c *replicationControllers) Update(replicationController *core.ReplicationController) (result *core.ReplicationController, err error) { + result = &core.ReplicationController{} err = c.client.Put(). Namespace(c.ns). Resource("replicationcontrollers"). @@ -126,8 +126,8 @@ func (c *replicationControllers) Update(replicationController *api.ReplicationCo // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *replicationControllers) UpdateStatus(replicationController *api.ReplicationController) (result *api.ReplicationController, err error) { - result = &api.ReplicationController{} +func (c *replicationControllers) UpdateStatus(replicationController *core.ReplicationController) (result *core.ReplicationController, err error) { + result = &core.ReplicationController{} err = c.client.Put(). Namespace(c.ns). Resource("replicationcontrollers"). @@ -162,8 +162,8 @@ func (c *replicationControllers) DeleteCollection(options *v1.DeleteOptions, lis } // Patch applies the patch and returns the patched replicationController. -func (c *replicationControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.ReplicationController, err error) { - result = &api.ReplicationController{} +func (c *replicationControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ReplicationController, err error) { + result = &core.ReplicationController{} err = c.client.Patch(pt). Namespace(c.ns). Resource("replicationcontrollers"). @@ -175,9 +175,9 @@ func (c *replicationControllers) Patch(name string, pt types.PatchType, data []b return } -// GetScale takes name of the replicationController, and returns the corresponding extensions.Scale object, and an error if there is any. -func (c *replicationControllers) GetScale(replicationControllerName string, options v1.GetOptions) (result *extensions.Scale, err error) { - result = &extensions.Scale{} +// GetScale takes name of the replicationController, and returns the corresponding autoscaling.Scale object, and an error if there is any. +func (c *replicationControllers) GetScale(replicationControllerName string, options v1.GetOptions) (result *autoscaling.Scale, err error) { + result = &autoscaling.Scale{} err = c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). @@ -190,8 +190,8 @@ func (c *replicationControllers) GetScale(replicationControllerName string, opti } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicationControllers) UpdateScale(replicationControllerName string, scale *extensions.Scale) (result *extensions.Scale, err error) { - result = &extensions.Scale{} +func (c *replicationControllers) UpdateScale(replicationControllerName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { + result = &autoscaling.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("replicationcontrollers"). diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/resourcequota.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/resourcequota.go index 169f3219afc7..5c5c3f136461 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/resourcequota.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/resourcequota.go @@ -21,7 +21,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -33,15 +33,15 @@ type ResourceQuotasGetter interface { // ResourceQuotaInterface has methods to work with ResourceQuota resources. type ResourceQuotaInterface interface { - Create(*api.ResourceQuota) (*api.ResourceQuota, error) - Update(*api.ResourceQuota) (*api.ResourceQuota, error) - UpdateStatus(*api.ResourceQuota) (*api.ResourceQuota, error) + Create(*core.ResourceQuota) (*core.ResourceQuota, error) + Update(*core.ResourceQuota) (*core.ResourceQuota, error) + UpdateStatus(*core.ResourceQuota) (*core.ResourceQuota, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*api.ResourceQuota, error) - List(opts v1.ListOptions) (*api.ResourceQuotaList, error) + Get(name string, options v1.GetOptions) (*core.ResourceQuota, error) + List(opts v1.ListOptions) (*core.ResourceQuotaList, error) Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.ResourceQuota, err error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ResourceQuota, err error) ResourceQuotaExpansion } @@ -60,8 +60,8 @@ func newResourceQuotas(c *CoreClient, namespace string) *resourceQuotas { } // Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. -func (c *resourceQuotas) Get(name string, options v1.GetOptions) (result *api.ResourceQuota, err error) { - result = &api.ResourceQuota{} +func (c *resourceQuotas) Get(name string, options v1.GetOptions) (result *core.ResourceQuota, err error) { + result = &core.ResourceQuota{} err = c.client.Get(). Namespace(c.ns). Resource("resourcequotas"). @@ -73,8 +73,8 @@ func (c *resourceQuotas) Get(name string, options v1.GetOptions) (result *api.Re } // List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. -func (c *resourceQuotas) List(opts v1.ListOptions) (result *api.ResourceQuotaList, err error) { - result = &api.ResourceQuotaList{} +func (c *resourceQuotas) List(opts v1.ListOptions) (result *core.ResourceQuotaList, err error) { + result = &core.ResourceQuotaList{} err = c.client.Get(). Namespace(c.ns). Resource("resourcequotas"). @@ -95,8 +95,8 @@ func (c *resourceQuotas) Watch(opts v1.ListOptions) (watch.Interface, error) { } // Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Create(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { - result = &api.ResourceQuota{} +func (c *resourceQuotas) Create(resourceQuota *core.ResourceQuota) (result *core.ResourceQuota, err error) { + result = &core.ResourceQuota{} err = c.client.Post(). Namespace(c.ns). Resource("resourcequotas"). @@ -107,8 +107,8 @@ func (c *resourceQuotas) Create(resourceQuota *api.ResourceQuota) (result *api.R } // Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Update(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { - result = &api.ResourceQuota{} +func (c *resourceQuotas) Update(resourceQuota *core.ResourceQuota) (result *core.ResourceQuota, err error) { + result = &core.ResourceQuota{} err = c.client.Put(). Namespace(c.ns). Resource("resourcequotas"). @@ -122,8 +122,8 @@ func (c *resourceQuotas) Update(resourceQuota *api.ResourceQuota) (result *api.R // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *resourceQuotas) UpdateStatus(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { - result = &api.ResourceQuota{} +func (c *resourceQuotas) UpdateStatus(resourceQuota *core.ResourceQuota) (result *core.ResourceQuota, err error) { + result = &core.ResourceQuota{} err = c.client.Put(). Namespace(c.ns). Resource("resourcequotas"). @@ -158,8 +158,8 @@ func (c *resourceQuotas) DeleteCollection(options *v1.DeleteOptions, listOptions } // Patch applies the patch and returns the patched resourceQuota. -func (c *resourceQuotas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.ResourceQuota, err error) { - result = &api.ResourceQuota{} +func (c *resourceQuotas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ResourceQuota, err error) { + result = &core.ResourceQuota{} err = c.client.Patch(pt). Namespace(c.ns). Resource("resourcequotas"). diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/secret.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/secret.go index 628fa1471711..36a1a16e0a98 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/secret.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/secret.go @@ -21,7 +21,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -33,14 +33,14 @@ type SecretsGetter interface { // SecretInterface has methods to work with Secret resources. type SecretInterface interface { - Create(*api.Secret) (*api.Secret, error) - Update(*api.Secret) (*api.Secret, error) + Create(*core.Secret) (*core.Secret, error) + Update(*core.Secret) (*core.Secret, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*api.Secret, error) - List(opts v1.ListOptions) (*api.SecretList, error) + Get(name string, options v1.GetOptions) (*core.Secret, error) + List(opts v1.ListOptions) (*core.SecretList, error) Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.Secret, err error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Secret, err error) SecretExpansion } @@ -59,8 +59,8 @@ func newSecrets(c *CoreClient, namespace string) *secrets { } // Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. -func (c *secrets) Get(name string, options v1.GetOptions) (result *api.Secret, err error) { - result = &api.Secret{} +func (c *secrets) Get(name string, options v1.GetOptions) (result *core.Secret, err error) { + result = &core.Secret{} err = c.client.Get(). Namespace(c.ns). Resource("secrets"). @@ -72,8 +72,8 @@ func (c *secrets) Get(name string, options v1.GetOptions) (result *api.Secret, e } // List takes label and field selectors, and returns the list of Secrets that match those selectors. -func (c *secrets) List(opts v1.ListOptions) (result *api.SecretList, err error) { - result = &api.SecretList{} +func (c *secrets) List(opts v1.ListOptions) (result *core.SecretList, err error) { + result = &core.SecretList{} err = c.client.Get(). Namespace(c.ns). Resource("secrets"). @@ -94,8 +94,8 @@ func (c *secrets) Watch(opts v1.ListOptions) (watch.Interface, error) { } // Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Create(secret *api.Secret) (result *api.Secret, err error) { - result = &api.Secret{} +func (c *secrets) Create(secret *core.Secret) (result *core.Secret, err error) { + result = &core.Secret{} err = c.client.Post(). Namespace(c.ns). Resource("secrets"). @@ -106,8 +106,8 @@ func (c *secrets) Create(secret *api.Secret) (result *api.Secret, err error) { } // Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Update(secret *api.Secret) (result *api.Secret, err error) { - result = &api.Secret{} +func (c *secrets) Update(secret *core.Secret) (result *core.Secret, err error) { + result = &core.Secret{} err = c.client.Put(). Namespace(c.ns). Resource("secrets"). @@ -141,8 +141,8 @@ func (c *secrets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Lis } // Patch applies the patch and returns the patched secret. -func (c *secrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.Secret, err error) { - result = &api.Secret{} +func (c *secrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Secret, err error) { + result = &core.Secret{} err = c.client.Patch(pt). Namespace(c.ns). Resource("secrets"). diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service.go index 05bd7855dcfa..6a9cfc0cb673 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/service.go @@ -21,7 +21,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -33,15 +33,15 @@ type ServicesGetter interface { // ServiceInterface has methods to work with Service resources. type ServiceInterface interface { - Create(*api.Service) (*api.Service, error) - Update(*api.Service) (*api.Service, error) - UpdateStatus(*api.Service) (*api.Service, error) + Create(*core.Service) (*core.Service, error) + Update(*core.Service) (*core.Service, error) + UpdateStatus(*core.Service) (*core.Service, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*api.Service, error) - List(opts v1.ListOptions) (*api.ServiceList, error) + Get(name string, options v1.GetOptions) (*core.Service, error) + List(opts v1.ListOptions) (*core.ServiceList, error) Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.Service, err error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Service, err error) ServiceExpansion } @@ -60,8 +60,8 @@ func newServices(c *CoreClient, namespace string) *services { } // Get takes name of the service, and returns the corresponding service object, and an error if there is any. -func (c *services) Get(name string, options v1.GetOptions) (result *api.Service, err error) { - result = &api.Service{} +func (c *services) Get(name string, options v1.GetOptions) (result *core.Service, err error) { + result = &core.Service{} err = c.client.Get(). Namespace(c.ns). Resource("services"). @@ -73,8 +73,8 @@ func (c *services) Get(name string, options v1.GetOptions) (result *api.Service, } // List takes label and field selectors, and returns the list of Services that match those selectors. -func (c *services) List(opts v1.ListOptions) (result *api.ServiceList, err error) { - result = &api.ServiceList{} +func (c *services) List(opts v1.ListOptions) (result *core.ServiceList, err error) { + result = &core.ServiceList{} err = c.client.Get(). Namespace(c.ns). Resource("services"). @@ -95,8 +95,8 @@ func (c *services) Watch(opts v1.ListOptions) (watch.Interface, error) { } // Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Create(service *api.Service) (result *api.Service, err error) { - result = &api.Service{} +func (c *services) Create(service *core.Service) (result *core.Service, err error) { + result = &core.Service{} err = c.client.Post(). Namespace(c.ns). Resource("services"). @@ -107,8 +107,8 @@ func (c *services) Create(service *api.Service) (result *api.Service, err error) } // Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Update(service *api.Service) (result *api.Service, err error) { - result = &api.Service{} +func (c *services) Update(service *core.Service) (result *core.Service, err error) { + result = &core.Service{} err = c.client.Put(). Namespace(c.ns). Resource("services"). @@ -122,8 +122,8 @@ func (c *services) Update(service *api.Service) (result *api.Service, err error) // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *services) UpdateStatus(service *api.Service) (result *api.Service, err error) { - result = &api.Service{} +func (c *services) UpdateStatus(service *core.Service) (result *core.Service, err error) { + result = &core.Service{} err = c.client.Put(). Namespace(c.ns). Resource("services"). @@ -158,8 +158,8 @@ func (c *services) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Li } // Patch applies the patch and returns the patched service. -func (c *services) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.Service, err error) { - result = &api.Service{} +func (c *services) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.Service, err error) { + result = &core.Service{} err = c.client.Patch(pt). Namespace(c.ns). Resource("services"). diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/serviceaccount.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/serviceaccount.go index 3a7bf04aaf31..75f0191f6fa8 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/serviceaccount.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/serviceaccount.go @@ -21,7 +21,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -33,14 +33,14 @@ type ServiceAccountsGetter interface { // ServiceAccountInterface has methods to work with ServiceAccount resources. type ServiceAccountInterface interface { - Create(*api.ServiceAccount) (*api.ServiceAccount, error) - Update(*api.ServiceAccount) (*api.ServiceAccount, error) + Create(*core.ServiceAccount) (*core.ServiceAccount, error) + Update(*core.ServiceAccount) (*core.ServiceAccount, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*api.ServiceAccount, error) - List(opts v1.ListOptions) (*api.ServiceAccountList, error) + Get(name string, options v1.GetOptions) (*core.ServiceAccount, error) + List(opts v1.ListOptions) (*core.ServiceAccountList, error) Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.ServiceAccount, err error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ServiceAccount, err error) ServiceAccountExpansion } @@ -59,8 +59,8 @@ func newServiceAccounts(c *CoreClient, namespace string) *serviceAccounts { } // Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. -func (c *serviceAccounts) Get(name string, options v1.GetOptions) (result *api.ServiceAccount, err error) { - result = &api.ServiceAccount{} +func (c *serviceAccounts) Get(name string, options v1.GetOptions) (result *core.ServiceAccount, err error) { + result = &core.ServiceAccount{} err = c.client.Get(). Namespace(c.ns). Resource("serviceaccounts"). @@ -72,8 +72,8 @@ func (c *serviceAccounts) Get(name string, options v1.GetOptions) (result *api.S } // List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. -func (c *serviceAccounts) List(opts v1.ListOptions) (result *api.ServiceAccountList, err error) { - result = &api.ServiceAccountList{} +func (c *serviceAccounts) List(opts v1.ListOptions) (result *core.ServiceAccountList, err error) { + result = &core.ServiceAccountList{} err = c.client.Get(). Namespace(c.ns). Resource("serviceaccounts"). @@ -94,8 +94,8 @@ func (c *serviceAccounts) Watch(opts v1.ListOptions) (watch.Interface, error) { } // Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Create(serviceAccount *api.ServiceAccount) (result *api.ServiceAccount, err error) { - result = &api.ServiceAccount{} +func (c *serviceAccounts) Create(serviceAccount *core.ServiceAccount) (result *core.ServiceAccount, err error) { + result = &core.ServiceAccount{} err = c.client.Post(). Namespace(c.ns). Resource("serviceaccounts"). @@ -106,8 +106,8 @@ func (c *serviceAccounts) Create(serviceAccount *api.ServiceAccount) (result *ap } // Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Update(serviceAccount *api.ServiceAccount) (result *api.ServiceAccount, err error) { - result = &api.ServiceAccount{} +func (c *serviceAccounts) Update(serviceAccount *core.ServiceAccount) (result *core.ServiceAccount, err error) { + result = &core.ServiceAccount{} err = c.client.Put(). Namespace(c.ns). Resource("serviceaccounts"). @@ -141,8 +141,8 @@ func (c *serviceAccounts) DeleteCollection(options *v1.DeleteOptions, listOption } // Patch applies the patch and returns the patched serviceAccount. -func (c *serviceAccounts) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *api.ServiceAccount, err error) { - result = &api.ServiceAccount{} +func (c *serviceAccounts) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *core.ServiceAccount, err error) { + result = &core.ServiceAccount{} err = c.client.Patch(pt). Namespace(c.ns). Resource("serviceaccounts"). diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/BUILD new file mode 100644 index 000000000000..cca3da1e72a3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/BUILD @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "events_client.go", + "generated_expansion.go", + ], + importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion", + visibility = ["//visibility:public"], + deps = [ + "//pkg/client/clientset_generated/internalclientset/scheme:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/client/clientset_generated/internalclientset/typed/events/internalversion/fake:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/doc.go new file mode 100644 index 000000000000..3adf06d8934f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package has the automatically generated typed clients. +package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/events_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/events_client.go new file mode 100644 index 000000000000..e2181c74f48c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/events_client.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import ( + rest "k8s.io/client-go/rest" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" +) + +type EventsInterface interface { + RESTClient() rest.Interface +} + +// EventsClient is used to interact with features provided by the events.k8s.io group. +type EventsClient struct { + restClient rest.Interface +} + +// NewForConfig creates a new EventsClient for the given config. +func NewForConfig(c *rest.Config) (*EventsClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &EventsClient{client}, nil +} + +// NewForConfigOrDie creates a new EventsClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *EventsClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new EventsClient for the given RESTClient. +func New(c rest.Interface) *EventsClient { + return &EventsClient{c} +} + +func setConfigDefaults(config *rest.Config) error { + g, err := scheme.Registry.Group("events.k8s.io") + if err != nil { + return err + } + + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + if config.GroupVersion == nil || config.GroupVersion.Group != g.GroupVersion.Group { + gv := g.GroupVersion + config.GroupVersion = &gv + } + config.NegotiatedSerializer = scheme.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *EventsClient) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/generated_expansion.go new file mode 100644 index 000000000000..b0f76eeede53 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion/generated_expansion.go @@ -0,0 +1,17 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/BUILD index e6e9496be35e..85d27b16516b 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/BUILD @@ -17,16 +17,13 @@ go_library( "ingress.go", "podsecuritypolicy.go", "replicaset.go", - "scale.go", - "scale_expansion.go", - "thirdpartyresource.go", ], + importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion", deps = [ + "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment.go index 32213751782c..919ab32e2aeb 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/deployment.go @@ -21,6 +21,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" + autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" extensions "k8s.io/kubernetes/pkg/apis/extensions" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -42,8 +43,8 @@ type DeploymentInterface interface { List(opts v1.ListOptions) (*extensions.DeploymentList, error) Watch(opts v1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensions.Deployment, err error) - GetScale(deploymentName string, options v1.GetOptions) (*extensions.Scale, error) - UpdateScale(deploymentName string, scale *extensions.Scale) (*extensions.Scale, error) + GetScale(deploymentName string, options v1.GetOptions) (*autoscaling.Scale, error) + UpdateScale(deploymentName string, scale *autoscaling.Scale) (*autoscaling.Scale, error) DeploymentExpansion } @@ -174,9 +175,9 @@ func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subres return } -// GetScale takes name of the deployment, and returns the corresponding extensions.Scale object, and an error if there is any. -func (c *deployments) GetScale(deploymentName string, options v1.GetOptions) (result *extensions.Scale, err error) { - result = &extensions.Scale{} +// GetScale takes name of the deployment, and returns the corresponding autoscaling.Scale object, and an error if there is any. +func (c *deployments) GetScale(deploymentName string, options v1.GetOptions) (result *autoscaling.Scale, err error) { + result = &autoscaling.Scale{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). @@ -189,8 +190,8 @@ func (c *deployments) GetScale(deploymentName string, options v1.GetOptions) (re } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *deployments) UpdateScale(deploymentName string, scale *extensions.Scale) (result *extensions.Scale, err error) { - result = &extensions.Scale{} +func (c *deployments) UpdateScale(deploymentName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { + result = &autoscaling.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("deployments"). diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/doc.go index 0a27970fbb3b..3adf06d8934f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with the default arguments. - // This package has the automatically generated typed clients. package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/extensions_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/extensions_client.go index a89a83719137..1a26d5d3028f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/extensions_client.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/extensions_client.go @@ -28,8 +28,6 @@ type ExtensionsInterface interface { IngressesGetter PodSecurityPoliciesGetter ReplicaSetsGetter - ScalesGetter - ThirdPartyResourcesGetter } // ExtensionsClient is used to interact with features provided by the extensions group. @@ -57,14 +55,6 @@ func (c *ExtensionsClient) ReplicaSets(namespace string) ReplicaSetInterface { return newReplicaSets(c, namespace) } -func (c *ExtensionsClient) Scales(namespace string) ScaleInterface { - return newScales(c, namespace) -} - -func (c *ExtensionsClient) ThirdPartyResources() ThirdPartyResourceInterface { - return newThirdPartyResources(c) -} - // NewForConfig creates a new ExtensionsClient for the given config. func NewForConfig(c *rest.Config) (*ExtensionsClient, error) { config := *c diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/generated_expansion.go index d59199eb50c9..cf6ee02f0339 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/generated_expansion.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/generated_expansion.go @@ -23,5 +23,3 @@ type IngressExpansion interface{} type PodSecurityPolicyExpansion interface{} type ReplicaSetExpansion interface{} - -type ThirdPartyResourceExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/replicaset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/replicaset.go index d6debe65fc23..5da973b71685 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/replicaset.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/replicaset.go @@ -21,6 +21,7 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" + autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" extensions "k8s.io/kubernetes/pkg/apis/extensions" scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" ) @@ -42,8 +43,8 @@ type ReplicaSetInterface interface { List(opts v1.ListOptions) (*extensions.ReplicaSetList, error) Watch(opts v1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensions.ReplicaSet, err error) - GetScale(replicaSetName string, options v1.GetOptions) (*extensions.Scale, error) - UpdateScale(replicaSetName string, scale *extensions.Scale) (*extensions.Scale, error) + GetScale(replicaSetName string, options v1.GetOptions) (*autoscaling.Scale, error) + UpdateScale(replicaSetName string, scale *autoscaling.Scale) (*autoscaling.Scale, error) ReplicaSetExpansion } @@ -174,9 +175,9 @@ func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subres return } -// GetScale takes name of the replicaSet, and returns the corresponding extensions.Scale object, and an error if there is any. -func (c *replicaSets) GetScale(replicaSetName string, options v1.GetOptions) (result *extensions.Scale, err error) { - result = &extensions.Scale{} +// GetScale takes name of the replicaSet, and returns the corresponding autoscaling.Scale object, and an error if there is any. +func (c *replicaSets) GetScale(replicaSetName string, options v1.GetOptions) (result *autoscaling.Scale, err error) { + result = &autoscaling.Scale{} err = c.client.Get(). Namespace(c.ns). Resource("replicasets"). @@ -189,8 +190,8 @@ func (c *replicaSets) GetScale(replicaSetName string, options v1.GetOptions) (re } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicaSets) UpdateScale(replicaSetName string, scale *extensions.Scale) (result *extensions.Scale, err error) { - result = &extensions.Scale{} +func (c *replicaSets) UpdateScale(replicaSetName string, scale *autoscaling.Scale) (result *autoscaling.Scale, err error) { + result = &autoscaling.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("replicasets"). diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/scale.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/scale.go deleted file mode 100644 index ad1d91b015b3..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/scale.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - rest "k8s.io/client-go/rest" -) - -// ScalesGetter has a method to return a ScaleInterface. -// A group's client should implement this interface. -type ScalesGetter interface { - Scales(namespace string) ScaleInterface -} - -// ScaleInterface has methods to work with Scale resources. -type ScaleInterface interface { - ScaleExpansion -} - -// scales implements ScaleInterface -type scales struct { - client rest.Interface - ns string -} - -// newScales returns a Scales -func newScales(c *ExtensionsClient, namespace string) *scales { - return &scales{ - client: c.RESTClient(), - ns: namespace, - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/scale_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/scale_expansion.go deleted file mode 100644 index e5f2242a8d57..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/scale_expansion.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/apis/extensions" -) - -// The ScaleExpansion interface allows manually adding extra methods to the ScaleInterface. -type ScaleExpansion interface { - Get(kind string, name string) (*extensions.Scale, error) - Update(kind string, scale *extensions.Scale) (*extensions.Scale, error) -} - -// Get takes the reference to scale subresource and returns the subresource or error, if one occurs. -func (c *scales) Get(kind string, name string) (result *extensions.Scale, err error) { - result = &extensions.Scale{} - - // TODO this method needs to take a proper unambiguous kind - fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} - resource, _ := meta.UnsafeGuessKindToResource(fullyQualifiedKind) - - err = c.client.Get(). - Namespace(c.ns). - Resource(resource.Resource). - Name(name). - SubResource("scale"). - Do(). - Into(result) - return -} - -func (c *scales) Update(kind string, scale *extensions.Scale) (result *extensions.Scale, err error) { - result = &extensions.Scale{} - - // TODO this method needs to take a proper unambiguous kind - fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} - resource, _ := meta.UnsafeGuessKindToResource(fullyQualifiedKind) - - err = c.client.Put(). - Namespace(scale.Namespace). - Resource(resource.Resource). - Name(scale.Name). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/thirdpartyresource.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/thirdpartyresource.go deleted file mode 100644 index cd814b6e578f..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/thirdpartyresource.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" -) - -// ThirdPartyResourcesGetter has a method to return a ThirdPartyResourceInterface. -// A group's client should implement this interface. -type ThirdPartyResourcesGetter interface { - ThirdPartyResources() ThirdPartyResourceInterface -} - -// ThirdPartyResourceInterface has methods to work with ThirdPartyResource resources. -type ThirdPartyResourceInterface interface { - Create(*extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error) - Update(*extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*extensions.ThirdPartyResource, error) - List(opts v1.ListOptions) (*extensions.ThirdPartyResourceList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensions.ThirdPartyResource, err error) - ThirdPartyResourceExpansion -} - -// thirdPartyResources implements ThirdPartyResourceInterface -type thirdPartyResources struct { - client rest.Interface -} - -// newThirdPartyResources returns a ThirdPartyResources -func newThirdPartyResources(c *ExtensionsClient) *thirdPartyResources { - return &thirdPartyResources{ - client: c.RESTClient(), - } -} - -// Get takes name of the thirdPartyResource, and returns the corresponding thirdPartyResource object, and an error if there is any. -func (c *thirdPartyResources) Get(name string, options v1.GetOptions) (result *extensions.ThirdPartyResource, err error) { - result = &extensions.ThirdPartyResource{} - err = c.client.Get(). - Resource("thirdpartyresources"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ThirdPartyResources that match those selectors. -func (c *thirdPartyResources) List(opts v1.ListOptions) (result *extensions.ThirdPartyResourceList, err error) { - result = &extensions.ThirdPartyResourceList{} - err = c.client.Get(). - Resource("thirdpartyresources"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested thirdPartyResources. -func (c *thirdPartyResources) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("thirdpartyresources"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a thirdPartyResource and creates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. -func (c *thirdPartyResources) Create(thirdPartyResource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { - result = &extensions.ThirdPartyResource{} - err = c.client.Post(). - Resource("thirdpartyresources"). - Body(thirdPartyResource). - Do(). - Into(result) - return -} - -// Update takes the representation of a thirdPartyResource and updates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. -func (c *thirdPartyResources) Update(thirdPartyResource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { - result = &extensions.ThirdPartyResource{} - err = c.client.Put(). - Resource("thirdpartyresources"). - Name(thirdPartyResource.Name). - Body(thirdPartyResource). - Do(). - Into(result) - return -} - -// Delete takes name of the thirdPartyResource and deletes it. Returns an error if one occurs. -func (c *thirdPartyResources) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("thirdpartyresources"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *thirdPartyResources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("thirdpartyresources"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched thirdPartyResource. -func (c *thirdPartyResources) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensions.ThirdPartyResource, err error) { - result = &extensions.ThirdPartyResource{} - err = c.client.Patch(pt). - Resource("thirdpartyresources"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/BUILD index f7a14f9fe707..15bfb8a3e2e2 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/BUILD @@ -13,6 +13,7 @@ go_library( "networking_client.go", "networkpolicy.go", ], + importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion", deps = [ "//pkg/apis/networking:go_default_library", "//pkg/client/clientset_generated/internalclientset/scheme:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/doc.go index 0a27970fbb3b..3adf06d8934f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with the default arguments. - // This package has the automatically generated typed clients. package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/BUILD index 2288c94c6250..517a1c0bbb16 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/BUILD @@ -15,6 +15,7 @@ go_library( "poddisruptionbudget.go", "policy_client.go", ], + importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion", deps = [ "//pkg/apis/policy:go_default_library", "//pkg/client/clientset_generated/internalclientset/scheme:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/doc.go index 0a27970fbb3b..3adf06d8934f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with the default arguments. - // This package has the automatically generated typed clients. package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/BUILD index fdc305f8ea2c..e51a6d632138 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/BUILD @@ -16,6 +16,7 @@ go_library( "role.go", "rolebinding.go", ], + importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion", deps = [ "//pkg/apis/rbac:go_default_library", "//pkg/client/clientset_generated/internalclientset/scheme:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/doc.go index 0a27970fbb3b..3adf06d8934f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with the default arguments. - // This package has the automatically generated typed clients. package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/BUILD index c6ecdbea0170..4ba8901bb3cb 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/BUILD @@ -13,6 +13,7 @@ go_library( "priorityclass.go", "scheduling_client.go", ], + importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion", deps = [ "//pkg/apis/scheduling:go_default_library", "//pkg/client/clientset_generated/internalclientset/scheme:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/doc.go index 0a27970fbb3b..3adf06d8934f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with the default arguments. - // This package has the automatically generated typed clients. package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/BUILD index 45f1886ae0cc..d6d8e0b02f7e 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/BUILD @@ -13,6 +13,7 @@ go_library( "podpreset.go", "settings_client.go", ], + importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion", deps = [ "//pkg/apis/settings:go_default_library", "//pkg/client/clientset_generated/internalclientset/scheme:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/doc.go index 0a27970fbb3b..3adf06d8934f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with the default arguments. - // This package has the automatically generated typed clients. package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/BUILD index 1a4b8f4d0346..cd8e7a8121ad 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/BUILD @@ -12,7 +12,9 @@ go_library( "generated_expansion.go", "storage_client.go", "storageclass.go", + "volumeattachment.go", ], + importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion", deps = [ "//pkg/apis/storage:go_default_library", "//pkg/client/clientset_generated/internalclientset/scheme:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/doc.go index 0a27970fbb3b..3adf06d8934f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with the default arguments. - // This package has the automatically generated typed clients. package internalversion diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/generated_expansion.go index 2fe8e8d7edd1..a1b2c0f5b84a 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/generated_expansion.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/generated_expansion.go @@ -17,3 +17,5 @@ limitations under the License. package internalversion type StorageClassExpansion interface{} + +type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storage_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storage_client.go index 9eebff01a4c0..2eca72b8f530 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storage_client.go +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/storage_client.go @@ -24,6 +24,7 @@ import ( type StorageInterface interface { RESTClient() rest.Interface StorageClassesGetter + VolumeAttachmentsGetter } // StorageClient is used to interact with features provided by the storage.k8s.io group. @@ -35,6 +36,10 @@ func (c *StorageClient) StorageClasses() StorageClassInterface { return newStorageClasses(c) } +func (c *StorageClient) VolumeAttachments() VolumeAttachmentInterface { + return newVolumeAttachments(c) +} + // NewForConfig creates a new StorageClient for the given config. func NewForConfig(c *rest.Config) (*StorageClient, error) { config := *c diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/volumeattachment.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/volumeattachment.go new file mode 100644 index 000000000000..da7fb9e246a7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/volumeattachment.go @@ -0,0 +1,161 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + storage "k8s.io/kubernetes/pkg/apis/storage" + scheme "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme" +) + +// VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. +// A group's client should implement this interface. +type VolumeAttachmentsGetter interface { + VolumeAttachments() VolumeAttachmentInterface +} + +// VolumeAttachmentInterface has methods to work with VolumeAttachment resources. +type VolumeAttachmentInterface interface { + Create(*storage.VolumeAttachment) (*storage.VolumeAttachment, error) + Update(*storage.VolumeAttachment) (*storage.VolumeAttachment, error) + UpdateStatus(*storage.VolumeAttachment) (*storage.VolumeAttachment, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*storage.VolumeAttachment, error) + List(opts v1.ListOptions) (*storage.VolumeAttachmentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *storage.VolumeAttachment, err error) + VolumeAttachmentExpansion +} + +// volumeAttachments implements VolumeAttachmentInterface +type volumeAttachments struct { + client rest.Interface +} + +// newVolumeAttachments returns a VolumeAttachments +func newVolumeAttachments(c *StorageClient) *volumeAttachments { + return &volumeAttachments{ + client: c.RESTClient(), + } +} + +// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. +func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *storage.VolumeAttachment, err error) { + result = &storage.VolumeAttachment{} + err = c.client.Get(). + Resource("volumeattachments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. +func (c *volumeAttachments) List(opts v1.ListOptions) (result *storage.VolumeAttachmentList, err error) { + result = &storage.VolumeAttachmentList{} + err = c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeAttachments. +func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Create(volumeAttachment *storage.VolumeAttachment) (result *storage.VolumeAttachment, err error) { + result = &storage.VolumeAttachment{} + err = c.client.Post(). + Resource("volumeattachments"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Update(volumeAttachment *storage.VolumeAttachment) (result *storage.VolumeAttachment, err error) { + result = &storage.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *volumeAttachments) UpdateStatus(volumeAttachment *storage.VolumeAttachment) (result *storage.VolumeAttachment, err error) { + result = &storage.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + SubResource("status"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. +func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("volumeattachments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("volumeattachments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched volumeAttachment. +func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *storage.VolumeAttachment, err error) { + result = &storage.VolumeAttachment{} + err = c.client.Patch(pt). + Resource("volumeattachments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/BUILD index f9383819f1f6..91495dc5d9d3 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/BUILD @@ -11,13 +11,14 @@ go_library( "factory.go", "generic.go", ], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/admissionregistration:go_default_library", "//pkg/apis/apps:go_default_library", "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/apis/certificates:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/apis/networking:go_default_library", "//pkg/apis/policy:go_default_library", @@ -40,6 +41,7 @@ go_library( "//pkg/client/informers/informers_generated/internalversion/scheduling:go_default_library", "//pkg/client/informers/informers_generated/internalversion/settings:go_default_library", "//pkg/client/informers/informers_generated/internalversion/storage:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/BUILD index 4bd8bd02e1d9..0c993a743c01 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration", deps = [ "//pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion:go_default_library", "//pkg/client/informers/informers_generated/internalversion/internalinterfaces:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/interface.go index 036ae9754b60..30943e156ee0 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // InternalVersion returns a new internalversion.Interface. func (g *group) InternalVersion() internalversion.Interface { - return internalversion.New(g.SharedInformerFactory) + return internalversion.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/BUILD index 2604d64f6222..ca1c234c0131 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/BUILD @@ -8,10 +8,12 @@ load( go_library( name = "go_default_library", srcs = [ - "externaladmissionhookconfiguration.go", "initializerconfiguration.go", "interface.go", + "mutatingwebhookconfiguration.go", + "validatingwebhookconfiguration.go", ], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion", deps = [ "//pkg/apis/admissionregistration:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/externaladmissionhookconfiguration.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/externaladmissionhookconfiguration.go deleted file mode 100644 index 6972004733d7..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/externaladmissionhookconfiguration.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was automatically generated by informer-gen - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" - admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" - internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" - internalversion "k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion" - time "time" -) - -// ExternalAdmissionHookConfigurationInformer provides access to a shared informer and lister for -// ExternalAdmissionHookConfigurations. -type ExternalAdmissionHookConfigurationInformer interface { - Informer() cache.SharedIndexInformer - Lister() internalversion.ExternalAdmissionHookConfigurationLister -} - -type externalAdmissionHookConfigurationInformer struct { - factory internalinterfaces.SharedInformerFactory -} - -// NewExternalAdmissionHookConfigurationInformer constructs a new informer for ExternalAdmissionHookConfiguration type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewExternalAdmissionHookConfigurationInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return client.Admissionregistration().ExternalAdmissionHookConfigurations().List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return client.Admissionregistration().ExternalAdmissionHookConfigurations().Watch(options) - }, - }, - &admissionregistration.ExternalAdmissionHookConfiguration{}, - resyncPeriod, - indexers, - ) -} - -func defaultExternalAdmissionHookConfigurationInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewExternalAdmissionHookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) -} - -func (f *externalAdmissionHookConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistration.ExternalAdmissionHookConfiguration{}, defaultExternalAdmissionHookConfigurationInformer) -} - -func (f *externalAdmissionHookConfigurationInformer) Lister() internalversion.ExternalAdmissionHookConfigurationLister { - return internalversion.NewExternalAdmissionHookConfigurationLister(f.Informer().GetIndexer()) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/initializerconfiguration.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/initializerconfiguration.go index 70f869291d5d..bc31ce10214c 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/initializerconfiguration.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/initializerconfiguration.go @@ -38,19 +38,33 @@ type InitializerConfigurationInformer interface { } type initializerConfigurationInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewInitializerConfigurationInformer constructs a new informer for InitializerConfiguration type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewInitializerConfigurationInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredInitializerConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredInitializerConfigurationInformer constructs a new informer for InitializerConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredInitializerConfigurationInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Admissionregistration().InitializerConfigurations().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Admissionregistration().InitializerConfigurations().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewInitializerConfigurationInformer(client internalclientset.Interface, res ) } -func defaultInitializerConfigurationInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewInitializerConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *initializerConfigurationInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredInitializerConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *initializerConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistration.InitializerConfiguration{}, defaultInitializerConfigurationInformer) + return f.factory.InformerFor(&admissionregistration.InitializerConfiguration{}, f.defaultInformer) } func (f *initializerConfigurationInformer) Lister() internalversion.InitializerConfigurationLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/interface.go index bd0803e2759f..0d7bec0173c4 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/interface.go @@ -24,27 +24,36 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { - // ExternalAdmissionHookConfigurations returns a ExternalAdmissionHookConfigurationInformer. - ExternalAdmissionHookConfigurations() ExternalAdmissionHookConfigurationInformer // InitializerConfigurations returns a InitializerConfigurationInformer. InitializerConfigurations() InitializerConfigurationInformer + // MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer. + MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer + // ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer. + ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} -} - -// ExternalAdmissionHookConfigurations returns a ExternalAdmissionHookConfigurationInformer. -func (v *version) ExternalAdmissionHookConfigurations() ExternalAdmissionHookConfigurationInformer { - return &externalAdmissionHookConfigurationInformer{factory: v.SharedInformerFactory} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // InitializerConfigurations returns a InitializerConfigurationInformer. func (v *version) InitializerConfigurations() InitializerConfigurationInformer { - return &initializerConfigurationInformer{factory: v.SharedInformerFactory} + return &initializerConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer. +func (v *version) MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer { + return &mutatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer. +func (v *version) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer { + return &validatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/mutatingwebhookconfiguration.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/mutatingwebhookconfiguration.go new file mode 100644 index 000000000000..409f36b965d8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/mutatingwebhookconfiguration.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package internalversion + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" + internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" + internalversion "k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion" + time "time" +) + +// MutatingWebhookConfigurationInformer provides access to a shared informer and lister for +// MutatingWebhookConfigurations. +type MutatingWebhookConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() internalversion.MutatingWebhookConfigurationLister +} + +type mutatingWebhookConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewMutatingWebhookConfigurationInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredMutatingWebhookConfigurationInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.Admissionregistration().MutatingWebhookConfigurations().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.Admissionregistration().MutatingWebhookConfigurations().Watch(options) + }, + }, + &admissionregistration.MutatingWebhookConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *mutatingWebhookConfigurationInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *mutatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistration.MutatingWebhookConfiguration{}, f.defaultInformer) +} + +func (f *mutatingWebhookConfigurationInformer) Lister() internalversion.MutatingWebhookConfigurationLister { + return internalversion.NewMutatingWebhookConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/validatingwebhookconfiguration.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/validatingwebhookconfiguration.go new file mode 100644 index 000000000000..3242e317f500 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion/validatingwebhookconfiguration.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package internalversion + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" + internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" + internalversion "k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion" + time "time" +) + +// ValidatingWebhookConfigurationInformer provides access to a shared informer and lister for +// ValidatingWebhookConfigurations. +type ValidatingWebhookConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() internalversion.ValidatingWebhookConfigurationLister +} + +type validatingWebhookConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewValidatingWebhookConfigurationInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredValidatingWebhookConfigurationInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.Admissionregistration().ValidatingWebhookConfigurations().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.Admissionregistration().ValidatingWebhookConfigurations().Watch(options) + }, + }, + &admissionregistration.ValidatingWebhookConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *validatingWebhookConfigurationInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *validatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistration.ValidatingWebhookConfiguration{}, f.defaultInformer) +} + +func (f *validatingWebhookConfigurationInformer) Lister() internalversion.ValidatingWebhookConfigurationLister { + return internalversion.NewValidatingWebhookConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/BUILD index 535cefd539f7..be3d8f6f5aab 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps", deps = [ "//pkg/client/informers/informers_generated/internalversion/apps/internalversion:go_default_library", "//pkg/client/informers/informers_generated/internalversion/internalinterfaces:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/interface.go index 720ab8c11e25..7467b3eec096 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // InternalVersion returns a new internalversion.Interface. func (g *group) InternalVersion() internalversion.Interface { - return internalversion.New(g.SharedInformerFactory) + return internalversion.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion/BUILD index 58739286a0d9..d3b51204f6ed 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion/BUILD @@ -12,6 +12,7 @@ go_library( "interface.go", "statefulset.go", ], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion", deps = [ "//pkg/apis/apps:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion/controllerrevision.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion/controllerrevision.go index 11e381b537bb..27407a12d6b5 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion/controllerrevision.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion/controllerrevision.go @@ -38,19 +38,34 @@ type ControllerRevisionInformer interface { } type controllerRevisionInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewControllerRevisionInformer constructs a new informer for ControllerRevision type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewControllerRevisionInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredControllerRevisionInformer constructs a new informer for ControllerRevision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredControllerRevisionInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Apps().ControllerRevisions(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Apps().ControllerRevisions(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewControllerRevisionInformer(client internalclientset.Interface, namespace ) } -func defaultControllerRevisionInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewControllerRevisionInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *controllerRevisionInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&apps.ControllerRevision{}, defaultControllerRevisionInformer) + return f.factory.InformerFor(&apps.ControllerRevision{}, f.defaultInformer) } func (f *controllerRevisionInformer) Lister() internalversion.ControllerRevisionLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion/interface.go index bd5ec23b1522..3ceeae009b13 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion/interface.go @@ -31,20 +31,22 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // ControllerRevisions returns a ControllerRevisionInformer. func (v *version) ControllerRevisions() ControllerRevisionInformer { - return &controllerRevisionInformer{factory: v.SharedInformerFactory} + return &controllerRevisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // StatefulSets returns a StatefulSetInformer. func (v *version) StatefulSets() StatefulSetInformer { - return &statefulSetInformer{factory: v.SharedInformerFactory} + return &statefulSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion/statefulset.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion/statefulset.go index e52b64661bd7..055612f8d520 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion/statefulset.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion/statefulset.go @@ -38,19 +38,34 @@ type StatefulSetInformer interface { } type statefulSetInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewStatefulSetInformer constructs a new informer for StatefulSet type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewStatefulSetInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredStatefulSetInformer constructs a new informer for StatefulSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStatefulSetInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Apps().StatefulSets(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Apps().StatefulSets(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewStatefulSetInformer(client internalclientset.Interface, namespace string ) } -func defaultStatefulSetInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewStatefulSetInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *statefulSetInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *statefulSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&apps.StatefulSet{}, defaultStatefulSetInformer) + return f.factory.InformerFor(&apps.StatefulSet{}, f.defaultInformer) } func (f *statefulSetInformer) Lister() internalversion.StatefulSetLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/BUILD index b436b210fdf9..7f48220955fb 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling", deps = [ "//pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion:go_default_library", "//pkg/client/informers/informers_generated/internalversion/internalinterfaces:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/interface.go index 52a595766d9c..00e9e7f0c9ae 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // InternalVersion returns a new internalversion.Interface. func (g *group) InternalVersion() internalversion.Interface { - return internalversion.New(g.SharedInformerFactory) + return internalversion.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/BUILD index c08c0fcad197..a83f095145f0 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/BUILD @@ -11,6 +11,7 @@ go_library( "horizontalpodautoscaler.go", "interface.go", ], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion", deps = [ "//pkg/apis/autoscaling:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/horizontalpodautoscaler.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/horizontalpodautoscaler.go index cc8e7997bc9e..4ba40676aae5 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/horizontalpodautoscaler.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/horizontalpodautoscaler.go @@ -38,19 +38,34 @@ type HorizontalPodAutoscalerInformer interface { } type horizontalPodAutoscalerInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewHorizontalPodAutoscalerInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredHorizontalPodAutoscalerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredHorizontalPodAutoscalerInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Autoscaling().HorizontalPodAutoscalers(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Autoscaling().HorizontalPodAutoscalers(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewHorizontalPodAutoscalerInformer(client internalclientset.Interface, name ) } -func defaultHorizontalPodAutoscalerInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewHorizontalPodAutoscalerInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *horizontalPodAutoscalerInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredHorizontalPodAutoscalerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&autoscaling.HorizontalPodAutoscaler{}, defaultHorizontalPodAutoscalerInformer) + return f.factory.InformerFor(&autoscaling.HorizontalPodAutoscaler{}, f.defaultInformer) } func (f *horizontalPodAutoscalerInformer) Lister() internalversion.HorizontalPodAutoscalerLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/interface.go index e442cfb1b4d3..3ac44fa63041 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer. func (v *version) HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer { - return &horizontalPodAutoscalerInformer{factory: v.SharedInformerFactory} + return &horizontalPodAutoscalerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/BUILD index 5e4cb6c270ca..536b976f4cbb 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch", deps = [ "//pkg/client/informers/informers_generated/internalversion/batch/internalversion:go_default_library", "//pkg/client/informers/informers_generated/internalversion/internalinterfaces:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/interface.go index 72bb5e729122..ec3bef48d404 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // InternalVersion returns a new internalversion.Interface. func (g *group) InternalVersion() internalversion.Interface { - return internalversion.New(g.SharedInformerFactory) + return internalversion.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion/BUILD index 595ce620461c..776728e59ad5 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion/BUILD @@ -12,6 +12,7 @@ go_library( "interface.go", "job.go", ], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion", deps = [ "//pkg/apis/batch:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion/cronjob.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion/cronjob.go index d0362f436a7b..eeff85dadc06 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion/cronjob.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion/cronjob.go @@ -38,19 +38,34 @@ type CronJobInformer interface { } type cronJobInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewCronJobInformer constructs a new informer for CronJob type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewCronJobInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCronJobInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCronJobInformer constructs a new informer for CronJob type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCronJobInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Batch().CronJobs(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Batch().CronJobs(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewCronJobInformer(client internalclientset.Interface, namespace string, re ) } -func defaultCronJobInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewCronJobInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *cronJobInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCronJobInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *cronJobInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&batch.CronJob{}, defaultCronJobInformer) + return f.factory.InformerFor(&batch.CronJob{}, f.defaultInformer) } func (f *cronJobInformer) Lister() internalversion.CronJobLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion/interface.go index e02c25aafb0d..ca29cc526b18 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion/interface.go @@ -31,20 +31,22 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // CronJobs returns a CronJobInformer. func (v *version) CronJobs() CronJobInformer { - return &cronJobInformer{factory: v.SharedInformerFactory} + return &cronJobInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Jobs returns a JobInformer. func (v *version) Jobs() JobInformer { - return &jobInformer{factory: v.SharedInformerFactory} + return &jobInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion/job.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion/job.go index 56039f740a1f..1430f5b128c9 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion/job.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion/job.go @@ -38,19 +38,34 @@ type JobInformer interface { } type jobInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewJobInformer constructs a new informer for Job type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewJobInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredJobInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredJobInformer constructs a new informer for Job type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredJobInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Batch().Jobs(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Batch().Jobs(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewJobInformer(client internalclientset.Interface, namespace string, resync ) } -func defaultJobInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewJobInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *jobInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredJobInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *jobInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&batch.Job{}, defaultJobInformer) + return f.factory.InformerFor(&batch.Job{}, f.defaultInformer) } func (f *jobInformer) Lister() internalversion.JobLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/BUILD index efb814b17d29..013c84a91d03 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates", deps = [ "//pkg/client/informers/informers_generated/internalversion/certificates/internalversion:go_default_library", "//pkg/client/informers/informers_generated/internalversion/internalinterfaces:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/interface.go index eda8f815382f..881fc209e75f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // InternalVersion returns a new internalversion.Interface. func (g *group) InternalVersion() internalversion.Interface { - return internalversion.New(g.SharedInformerFactory) + return internalversion.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/BUILD index 5af82636abdf..788efbb1d196 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/BUILD @@ -11,6 +11,7 @@ go_library( "certificatesigningrequest.go", "interface.go", ], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/internalversion", deps = [ "//pkg/apis/certificates:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/certificatesigningrequest.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/certificatesigningrequest.go index 88bda731beb0..d4608c9e3a12 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/certificatesigningrequest.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/certificatesigningrequest.go @@ -38,19 +38,33 @@ type CertificateSigningRequestInformer interface { } type certificateSigningRequestInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewCertificateSigningRequestInformer constructs a new informer for CertificateSigningRequest type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewCertificateSigningRequestInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCertificateSigningRequestInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCertificateSigningRequestInformer constructs a new informer for CertificateSigningRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCertificateSigningRequestInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Certificates().CertificateSigningRequests().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Certificates().CertificateSigningRequests().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewCertificateSigningRequestInformer(client internalclientset.Interface, re ) } -func defaultCertificateSigningRequestInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewCertificateSigningRequestInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *certificateSigningRequestInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCertificateSigningRequestInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *certificateSigningRequestInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&certificates.CertificateSigningRequest{}, defaultCertificateSigningRequestInformer) + return f.factory.InformerFor(&certificates.CertificateSigningRequest{}, f.defaultInformer) } func (f *certificateSigningRequestInformer) Lister() internalversion.CertificateSigningRequestLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/interface.go index 6dcbb4503006..d017cfceae16 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/internalversion/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // CertificateSigningRequests returns a CertificateSigningRequestInformer. func (v *version) CertificateSigningRequests() CertificateSigningRequestInformer { - return &certificateSigningRequestInformer{factory: v.SharedInformerFactory} + return &certificateSigningRequestInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/BUILD index 944f76bac954..ef8ca96e8d53 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core", deps = [ "//pkg/client/informers/informers_generated/internalversion/core/internalversion:go_default_library", "//pkg/client/informers/informers_generated/internalversion/internalinterfaces:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/interface.go index 329d6b529e15..12dab3466f34 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // InternalVersion returns a new internalversion.Interface. func (g *group) InternalVersion() internalversion.Interface { - return internalversion.New(g.SharedInformerFactory) + return internalversion.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/BUILD index 2c4ac26d23fb..6724faa6a868 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/BUILD @@ -26,8 +26,9 @@ go_library( "service.go", "serviceaccount.go", ], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/informers/informers_generated/internalversion/internalinterfaces:go_default_library", "//pkg/client/listers/core/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/componentstatus.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/componentstatus.go index 4044e1abad37..805e45bb53d3 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/componentstatus.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/componentstatus.go @@ -23,7 +23,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -38,34 +38,48 @@ type ComponentStatusInformer interface { } type componentStatusInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewComponentStatusInformer constructs a new informer for ComponentStatus type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewComponentStatusInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredComponentStatusInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredComponentStatusInformer constructs a new informer for ComponentStatus type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredComponentStatusInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().ComponentStatuses().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().ComponentStatuses().Watch(options) }, }, - &api.ComponentStatus{}, + &core.ComponentStatus{}, resyncPeriod, indexers, ) } -func defaultComponentStatusInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewComponentStatusInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *componentStatusInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredComponentStatusInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *componentStatusInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&api.ComponentStatus{}, defaultComponentStatusInformer) + return f.factory.InformerFor(&core.ComponentStatus{}, f.defaultInformer) } func (f *componentStatusInformer) Lister() internalversion.ComponentStatusLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/configmap.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/configmap.go index bfd2af64b273..f96cdf8d9d39 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/configmap.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/configmap.go @@ -23,7 +23,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -38,34 +38,49 @@ type ConfigMapInformer interface { } type configMapInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewConfigMapInformer constructs a new informer for ConfigMap type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewConfigMapInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredConfigMapInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredConfigMapInformer constructs a new informer for ConfigMap type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredConfigMapInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().ConfigMaps(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().ConfigMaps(namespace).Watch(options) }, }, - &api.ConfigMap{}, + &core.ConfigMap{}, resyncPeriod, indexers, ) } -func defaultConfigMapInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewConfigMapInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *configMapInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredConfigMapInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *configMapInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&api.ConfigMap{}, defaultConfigMapInformer) + return f.factory.InformerFor(&core.ConfigMap{}, f.defaultInformer) } func (f *configMapInformer) Lister() internalversion.ConfigMapLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/endpoints.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/endpoints.go index bb9991256866..f45d518174ef 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/endpoints.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/endpoints.go @@ -23,7 +23,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -38,34 +38,49 @@ type EndpointsInformer interface { } type endpointsInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewEndpointsInformer constructs a new informer for Endpoints type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewEndpointsInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEndpointsInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEndpointsInformer constructs a new informer for Endpoints type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEndpointsInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().Endpoints(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().Endpoints(namespace).Watch(options) }, }, - &api.Endpoints{}, + &core.Endpoints{}, resyncPeriod, indexers, ) } -func defaultEndpointsInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewEndpointsInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *endpointsInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEndpointsInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *endpointsInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&api.Endpoints{}, defaultEndpointsInformer) + return f.factory.InformerFor(&core.Endpoints{}, f.defaultInformer) } func (f *endpointsInformer) Lister() internalversion.EndpointsLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/event.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/event.go index 7b7108b61da5..936b60186640 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/event.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/event.go @@ -23,7 +23,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -38,34 +38,49 @@ type EventInformer interface { } type eventInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewEventInformer constructs a new informer for Event type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewEventInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEventInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEventInformer constructs a new informer for Event type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEventInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().Events(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().Events(namespace).Watch(options) }, }, - &api.Event{}, + &core.Event{}, resyncPeriod, indexers, ) } -func defaultEventInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewEventInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *eventInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEventInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *eventInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&api.Event{}, defaultEventInformer) + return f.factory.InformerFor(&core.Event{}, f.defaultInformer) } func (f *eventInformer) Lister() internalversion.EventLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/interface.go index a9f5f16335ed..295600cb5a98 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/interface.go @@ -59,90 +59,92 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // ComponentStatuses returns a ComponentStatusInformer. func (v *version) ComponentStatuses() ComponentStatusInformer { - return &componentStatusInformer{factory: v.SharedInformerFactory} + return &componentStatusInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } // ConfigMaps returns a ConfigMapInformer. func (v *version) ConfigMaps() ConfigMapInformer { - return &configMapInformer{factory: v.SharedInformerFactory} + return &configMapInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Endpoints returns a EndpointsInformer. func (v *version) Endpoints() EndpointsInformer { - return &endpointsInformer{factory: v.SharedInformerFactory} + return &endpointsInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Events returns a EventInformer. func (v *version) Events() EventInformer { - return &eventInformer{factory: v.SharedInformerFactory} + return &eventInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // LimitRanges returns a LimitRangeInformer. func (v *version) LimitRanges() LimitRangeInformer { - return &limitRangeInformer{factory: v.SharedInformerFactory} + return &limitRangeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Namespaces returns a NamespaceInformer. func (v *version) Namespaces() NamespaceInformer { - return &namespaceInformer{factory: v.SharedInformerFactory} + return &namespaceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } // Nodes returns a NodeInformer. func (v *version) Nodes() NodeInformer { - return &nodeInformer{factory: v.SharedInformerFactory} + return &nodeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } // PersistentVolumes returns a PersistentVolumeInformer. func (v *version) PersistentVolumes() PersistentVolumeInformer { - return &persistentVolumeInformer{factory: v.SharedInformerFactory} + return &persistentVolumeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } // PersistentVolumeClaims returns a PersistentVolumeClaimInformer. func (v *version) PersistentVolumeClaims() PersistentVolumeClaimInformer { - return &persistentVolumeClaimInformer{factory: v.SharedInformerFactory} + return &persistentVolumeClaimInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Pods returns a PodInformer. func (v *version) Pods() PodInformer { - return &podInformer{factory: v.SharedInformerFactory} + return &podInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // PodTemplates returns a PodTemplateInformer. func (v *version) PodTemplates() PodTemplateInformer { - return &podTemplateInformer{factory: v.SharedInformerFactory} + return &podTemplateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // ReplicationControllers returns a ReplicationControllerInformer. func (v *version) ReplicationControllers() ReplicationControllerInformer { - return &replicationControllerInformer{factory: v.SharedInformerFactory} + return &replicationControllerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // ResourceQuotas returns a ResourceQuotaInformer. func (v *version) ResourceQuotas() ResourceQuotaInformer { - return &resourceQuotaInformer{factory: v.SharedInformerFactory} + return &resourceQuotaInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Secrets returns a SecretInformer. func (v *version) Secrets() SecretInformer { - return &secretInformer{factory: v.SharedInformerFactory} + return &secretInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Services returns a ServiceInformer. func (v *version) Services() ServiceInformer { - return &serviceInformer{factory: v.SharedInformerFactory} + return &serviceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // ServiceAccounts returns a ServiceAccountInformer. func (v *version) ServiceAccounts() ServiceAccountInformer { - return &serviceAccountInformer{factory: v.SharedInformerFactory} + return &serviceAccountInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/limitrange.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/limitrange.go index 8eea5642fc76..e167da6e37b5 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/limitrange.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/limitrange.go @@ -23,7 +23,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -38,34 +38,49 @@ type LimitRangeInformer interface { } type limitRangeInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewLimitRangeInformer constructs a new informer for LimitRange type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewLimitRangeInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredLimitRangeInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredLimitRangeInformer constructs a new informer for LimitRange type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredLimitRangeInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().LimitRanges(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().LimitRanges(namespace).Watch(options) }, }, - &api.LimitRange{}, + &core.LimitRange{}, resyncPeriod, indexers, ) } -func defaultLimitRangeInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewLimitRangeInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *limitRangeInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredLimitRangeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *limitRangeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&api.LimitRange{}, defaultLimitRangeInformer) + return f.factory.InformerFor(&core.LimitRange{}, f.defaultInformer) } func (f *limitRangeInformer) Lister() internalversion.LimitRangeLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/namespace.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/namespace.go index 8610b66c7877..da9f88eaa8c1 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/namespace.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/namespace.go @@ -23,7 +23,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -38,34 +38,48 @@ type NamespaceInformer interface { } type namespaceInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewNamespaceInformer constructs a new informer for Namespace type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewNamespaceInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNamespaceInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredNamespaceInformer constructs a new informer for Namespace type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNamespaceInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().Namespaces().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().Namespaces().Watch(options) }, }, - &api.Namespace{}, + &core.Namespace{}, resyncPeriod, indexers, ) } -func defaultNamespaceInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewNamespaceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *namespaceInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNamespaceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *namespaceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&api.Namespace{}, defaultNamespaceInformer) + return f.factory.InformerFor(&core.Namespace{}, f.defaultInformer) } func (f *namespaceInformer) Lister() internalversion.NamespaceLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/node.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/node.go index bc8432c3a7a9..5a112bf89864 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/node.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/node.go @@ -23,7 +23,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -38,34 +38,48 @@ type NodeInformer interface { } type nodeInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewNodeInformer constructs a new informer for Node type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewNodeInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNodeInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredNodeInformer constructs a new informer for Node type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNodeInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().Nodes().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().Nodes().Watch(options) }, }, - &api.Node{}, + &core.Node{}, resyncPeriod, indexers, ) } -func defaultNodeInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewNodeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *nodeInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNodeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *nodeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&api.Node{}, defaultNodeInformer) + return f.factory.InformerFor(&core.Node{}, f.defaultInformer) } func (f *nodeInformer) Lister() internalversion.NodeLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolume.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolume.go index 7f344f1fd8cc..ecccd769ff7e 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolume.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolume.go @@ -23,7 +23,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -38,34 +38,48 @@ type PersistentVolumeInformer interface { } type persistentVolumeInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewPersistentVolumeInformer constructs a new informer for PersistentVolume type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewPersistentVolumeInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPersistentVolumeInformer constructs a new informer for PersistentVolume type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPersistentVolumeInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().PersistentVolumes().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().PersistentVolumes().Watch(options) }, }, - &api.PersistentVolume{}, + &core.PersistentVolume{}, resyncPeriod, indexers, ) } -func defaultPersistentVolumeInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewPersistentVolumeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *persistentVolumeInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *persistentVolumeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&api.PersistentVolume{}, defaultPersistentVolumeInformer) + return f.factory.InformerFor(&core.PersistentVolume{}, f.defaultInformer) } func (f *persistentVolumeInformer) Lister() internalversion.PersistentVolumeLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolumeclaim.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolumeclaim.go index e32130ba3b88..bc0c55a12c25 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolumeclaim.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/persistentvolumeclaim.go @@ -23,7 +23,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -38,34 +38,49 @@ type PersistentVolumeClaimInformer interface { } type persistentVolumeClaimInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewPersistentVolumeClaimInformer constructs a new informer for PersistentVolumeClaim type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewPersistentVolumeClaimInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeClaimInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPersistentVolumeClaimInformer constructs a new informer for PersistentVolumeClaim type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPersistentVolumeClaimInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().PersistentVolumeClaims(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().PersistentVolumeClaims(namespace).Watch(options) }, }, - &api.PersistentVolumeClaim{}, + &core.PersistentVolumeClaim{}, resyncPeriod, indexers, ) } -func defaultPersistentVolumeClaimInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewPersistentVolumeClaimInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *persistentVolumeClaimInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeClaimInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *persistentVolumeClaimInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&api.PersistentVolumeClaim{}, defaultPersistentVolumeClaimInformer) + return f.factory.InformerFor(&core.PersistentVolumeClaim{}, f.defaultInformer) } func (f *persistentVolumeClaimInformer) Lister() internalversion.PersistentVolumeClaimLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/pod.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/pod.go index a5bf7be529e4..cf72a57fcd4b 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/pod.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/pod.go @@ -23,7 +23,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -38,34 +38,49 @@ type PodInformer interface { } type podInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewPodInformer constructs a new informer for Pod type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewPodInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodInformer constructs a new informer for Pod type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().Pods(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().Pods(namespace).Watch(options) }, }, - &api.Pod{}, + &core.Pod{}, resyncPeriod, indexers, ) } -func defaultPodInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewPodInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *podInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *podInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&api.Pod{}, defaultPodInformer) + return f.factory.InformerFor(&core.Pod{}, f.defaultInformer) } func (f *podInformer) Lister() internalversion.PodLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/podtemplate.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/podtemplate.go index 295fdc174c83..536fafd63f8b 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/podtemplate.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/podtemplate.go @@ -23,7 +23,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -38,34 +38,49 @@ type PodTemplateInformer interface { } type podTemplateInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewPodTemplateInformer constructs a new informer for PodTemplate type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewPodTemplateInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodTemplateInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodTemplateInformer constructs a new informer for PodTemplate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodTemplateInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().PodTemplates(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().PodTemplates(namespace).Watch(options) }, }, - &api.PodTemplate{}, + &core.PodTemplate{}, resyncPeriod, indexers, ) } -func defaultPodTemplateInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewPodTemplateInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *podTemplateInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodTemplateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *podTemplateInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&api.PodTemplate{}, defaultPodTemplateInformer) + return f.factory.InformerFor(&core.PodTemplate{}, f.defaultInformer) } func (f *podTemplateInformer) Lister() internalversion.PodTemplateLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/replicationcontroller.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/replicationcontroller.go index 592d6bcc4728..4741b32b41cd 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/replicationcontroller.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/replicationcontroller.go @@ -23,7 +23,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -38,34 +38,49 @@ type ReplicationControllerInformer interface { } type replicationControllerInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewReplicationControllerInformer constructs a new informer for ReplicationController type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewReplicationControllerInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredReplicationControllerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredReplicationControllerInformer constructs a new informer for ReplicationController type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredReplicationControllerInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().ReplicationControllers(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().ReplicationControllers(namespace).Watch(options) }, }, - &api.ReplicationController{}, + &core.ReplicationController{}, resyncPeriod, indexers, ) } -func defaultReplicationControllerInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewReplicationControllerInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *replicationControllerInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredReplicationControllerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *replicationControllerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&api.ReplicationController{}, defaultReplicationControllerInformer) + return f.factory.InformerFor(&core.ReplicationController{}, f.defaultInformer) } func (f *replicationControllerInformer) Lister() internalversion.ReplicationControllerLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/resourcequota.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/resourcequota.go index 04327b479ed5..2a50f2c498b2 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/resourcequota.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/resourcequota.go @@ -23,7 +23,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -38,34 +38,49 @@ type ResourceQuotaInformer interface { } type resourceQuotaInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewResourceQuotaInformer constructs a new informer for ResourceQuota type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewResourceQuotaInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceQuotaInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredResourceQuotaInformer constructs a new informer for ResourceQuota type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResourceQuotaInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().ResourceQuotas(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().ResourceQuotas(namespace).Watch(options) }, }, - &api.ResourceQuota{}, + &core.ResourceQuota{}, resyncPeriod, indexers, ) } -func defaultResourceQuotaInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewResourceQuotaInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *resourceQuotaInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceQuotaInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *resourceQuotaInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&api.ResourceQuota{}, defaultResourceQuotaInformer) + return f.factory.InformerFor(&core.ResourceQuota{}, f.defaultInformer) } func (f *resourceQuotaInformer) Lister() internalversion.ResourceQuotaLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/secret.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/secret.go index 1dae56770901..35c4d2067d17 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/secret.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/secret.go @@ -23,7 +23,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -38,34 +38,49 @@ type SecretInformer interface { } type secretInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewSecretInformer constructs a new informer for Secret type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewSecretInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredSecretInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredSecretInformer constructs a new informer for Secret type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredSecretInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().Secrets(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().Secrets(namespace).Watch(options) }, }, - &api.Secret{}, + &core.Secret{}, resyncPeriod, indexers, ) } -func defaultSecretInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewSecretInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *secretInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredSecretInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *secretInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&api.Secret{}, defaultSecretInformer) + return f.factory.InformerFor(&core.Secret{}, f.defaultInformer) } func (f *secretInformer) Lister() internalversion.SecretLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/service.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/service.go index 0e61757540e4..086529166d68 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/service.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/service.go @@ -23,7 +23,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -38,34 +38,49 @@ type ServiceInformer interface { } type serviceInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewServiceInformer constructs a new informer for Service type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewServiceInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServiceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredServiceInformer constructs a new informer for Service type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServiceInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().Services(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().Services(namespace).Watch(options) }, }, - &api.Service{}, + &core.Service{}, resyncPeriod, indexers, ) } -func defaultServiceInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewServiceInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *serviceInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServiceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *serviceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&api.Service{}, defaultServiceInformer) + return f.factory.InformerFor(&core.Service{}, f.defaultInformer) } func (f *serviceInformer) Lister() internalversion.ServiceLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/serviceaccount.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/serviceaccount.go index dce710de768b..54d4509c33b8 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/serviceaccount.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion/serviceaccount.go @@ -23,7 +23,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -38,34 +38,49 @@ type ServiceAccountInformer interface { } type serviceAccountInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewServiceAccountInformer constructs a new informer for ServiceAccount type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewServiceAccountInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServiceAccountInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredServiceAccountInformer constructs a new informer for ServiceAccount type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServiceAccountInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().ServiceAccounts(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Core().ServiceAccounts(namespace).Watch(options) }, }, - &api.ServiceAccount{}, + &core.ServiceAccount{}, resyncPeriod, indexers, ) } -func defaultServiceAccountInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewServiceAccountInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *serviceAccountInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServiceAccountInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *serviceAccountInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&api.ServiceAccount{}, defaultServiceAccountInformer) + return f.factory.InformerFor(&core.ServiceAccount{}, f.defaultInformer) } func (f *serviceAccountInformer) Lister() internalversion.ServiceAccountLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/BUILD index 7ecba36be6d0..df9dfcf4b0fa 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions", deps = [ "//pkg/client/informers/informers_generated/internalversion/extensions/internalversion:go_default_library", "//pkg/client/informers/informers_generated/internalversion/internalinterfaces:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/interface.go index acc422e79899..b909a563ff39 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // InternalVersion returns a new internalversion.Interface. func (g *group) InternalVersion() internalversion.Interface { - return internalversion.New(g.SharedInformerFactory) + return internalversion.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/BUILD index 58793ed066b7..4b31f38ce9bf 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/BUILD @@ -14,8 +14,8 @@ go_library( "interface.go", "podsecuritypolicy.go", "replicaset.go", - "thirdpartyresource.go", ], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion", deps = [ "//pkg/apis/extensions:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/daemonset.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/daemonset.go index 401a08551f55..21479d96ee99 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/daemonset.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/daemonset.go @@ -38,19 +38,34 @@ type DaemonSetInformer interface { } type daemonSetInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewDaemonSetInformer constructs a new informer for DaemonSet type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewDaemonSetInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDaemonSetInformer constructs a new informer for DaemonSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDaemonSetInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Extensions().DaemonSets(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Extensions().DaemonSets(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewDaemonSetInformer(client internalclientset.Interface, namespace string, ) } -func defaultDaemonSetInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewDaemonSetInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *daemonSetInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *daemonSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensions.DaemonSet{}, defaultDaemonSetInformer) + return f.factory.InformerFor(&extensions.DaemonSet{}, f.defaultInformer) } func (f *daemonSetInformer) Lister() internalversion.DaemonSetLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/deployment.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/deployment.go index 872c211e00a8..6b1044a465d4 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/deployment.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/deployment.go @@ -38,19 +38,34 @@ type DeploymentInformer interface { } type deploymentInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewDeploymentInformer constructs a new informer for Deployment type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewDeploymentInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeploymentInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Extensions().Deployments(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Extensions().Deployments(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewDeploymentInformer(client internalclientset.Interface, namespace string, ) } -func defaultDeploymentInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewDeploymentInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *deploymentInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *deploymentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensions.Deployment{}, defaultDeploymentInformer) + return f.factory.InformerFor(&extensions.Deployment{}, f.defaultInformer) } func (f *deploymentInformer) Lister() internalversion.DeploymentLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/ingress.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/ingress.go index 8c1e22d71cf7..f4024449441d 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/ingress.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/ingress.go @@ -38,19 +38,34 @@ type IngressInformer interface { } type ingressInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewIngressInformer constructs a new informer for Ingress type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewIngressInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredIngressInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredIngressInformer constructs a new informer for Ingress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredIngressInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Extensions().Ingresses(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Extensions().Ingresses(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewIngressInformer(client internalclientset.Interface, namespace string, re ) } -func defaultIngressInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewIngressInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *ingressInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredIngressInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *ingressInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensions.Ingress{}, defaultIngressInformer) + return f.factory.InformerFor(&extensions.Ingress{}, f.defaultInformer) } func (f *ingressInformer) Lister() internalversion.IngressLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/interface.go index 04e131c807b7..2c5f3212d42e 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/interface.go @@ -34,45 +34,40 @@ type Interface interface { PodSecurityPolicies() PodSecurityPolicyInformer // ReplicaSets returns a ReplicaSetInformer. ReplicaSets() ReplicaSetInformer - // ThirdPartyResources returns a ThirdPartyResourceInformer. - ThirdPartyResources() ThirdPartyResourceInformer } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // DaemonSets returns a DaemonSetInformer. func (v *version) DaemonSets() DaemonSetInformer { - return &daemonSetInformer{factory: v.SharedInformerFactory} + return &daemonSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Deployments returns a DeploymentInformer. func (v *version) Deployments() DeploymentInformer { - return &deploymentInformer{factory: v.SharedInformerFactory} + return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // Ingresses returns a IngressInformer. func (v *version) Ingresses() IngressInformer { - return &ingressInformer{factory: v.SharedInformerFactory} + return &ingressInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // PodSecurityPolicies returns a PodSecurityPolicyInformer. func (v *version) PodSecurityPolicies() PodSecurityPolicyInformer { - return &podSecurityPolicyInformer{factory: v.SharedInformerFactory} + return &podSecurityPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } // ReplicaSets returns a ReplicaSetInformer. func (v *version) ReplicaSets() ReplicaSetInformer { - return &replicaSetInformer{factory: v.SharedInformerFactory} -} - -// ThirdPartyResources returns a ThirdPartyResourceInformer. -func (v *version) ThirdPartyResources() ThirdPartyResourceInformer { - return &thirdPartyResourceInformer{factory: v.SharedInformerFactory} + return &replicaSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/podsecuritypolicy.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/podsecuritypolicy.go index 07d30d0dd280..c1015ae1018f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/podsecuritypolicy.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/podsecuritypolicy.go @@ -38,19 +38,33 @@ type PodSecurityPolicyInformer interface { } type podSecurityPolicyInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewPodSecurityPolicyInformer constructs a new informer for PodSecurityPolicy type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewPodSecurityPolicyInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodSecurityPolicyInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPodSecurityPolicyInformer constructs a new informer for PodSecurityPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodSecurityPolicyInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Extensions().PodSecurityPolicies().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Extensions().PodSecurityPolicies().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewPodSecurityPolicyInformer(client internalclientset.Interface, resyncPeri ) } -func defaultPodSecurityPolicyInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewPodSecurityPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *podSecurityPolicyInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodSecurityPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *podSecurityPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensions.PodSecurityPolicy{}, defaultPodSecurityPolicyInformer) + return f.factory.InformerFor(&extensions.PodSecurityPolicy{}, f.defaultInformer) } func (f *podSecurityPolicyInformer) Lister() internalversion.PodSecurityPolicyLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/replicaset.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/replicaset.go index c5deda6ce612..7a4b889f6d0d 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/replicaset.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/replicaset.go @@ -38,19 +38,34 @@ type ReplicaSetInformer interface { } type replicaSetInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewReplicaSetInformer constructs a new informer for ReplicaSet type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewReplicaSetInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredReplicaSetInformer constructs a new informer for ReplicaSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredReplicaSetInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Extensions().ReplicaSets(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Extensions().ReplicaSets(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewReplicaSetInformer(client internalclientset.Interface, namespace string, ) } -func defaultReplicaSetInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewReplicaSetInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *replicaSetInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *replicaSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensions.ReplicaSet{}, defaultReplicaSetInformer) + return f.factory.InformerFor(&extensions.ReplicaSet{}, f.defaultInformer) } func (f *replicaSetInformer) Lister() internalversion.ReplicaSetLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/thirdpartyresource.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/thirdpartyresource.go deleted file mode 100644 index 7b121bd4c505..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion/thirdpartyresource.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was automatically generated by informer-gen - -package internalversion - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" - internalversion "k8s.io/kubernetes/pkg/client/listers/extensions/internalversion" - time "time" -) - -// ThirdPartyResourceInformer provides access to a shared informer and lister for -// ThirdPartyResources. -type ThirdPartyResourceInformer interface { - Informer() cache.SharedIndexInformer - Lister() internalversion.ThirdPartyResourceLister -} - -type thirdPartyResourceInformer struct { - factory internalinterfaces.SharedInformerFactory -} - -// NewThirdPartyResourceInformer constructs a new informer for ThirdPartyResource type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewThirdPartyResourceInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - return client.Extensions().ThirdPartyResources().List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - return client.Extensions().ThirdPartyResources().Watch(options) - }, - }, - &extensions.ThirdPartyResource{}, - resyncPeriod, - indexers, - ) -} - -func defaultThirdPartyResourceInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewThirdPartyResourceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) -} - -func (f *thirdPartyResourceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensions.ThirdPartyResource{}, defaultThirdPartyResourceInformer) -} - -func (f *thirdPartyResourceInformer) Lister() internalversion.ThirdPartyResourceLister { - return internalversion.NewThirdPartyResourceLister(f.Informer().GetIndexer()) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/factory.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/factory.go index c5dba5cc025f..e66407f88cca 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/factory.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/factory.go @@ -19,6 +19,7 @@ limitations under the License. package internalversion import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" @@ -43,9 +44,11 @@ import ( ) type sharedInformerFactory struct { - client internalclientset.Interface - lock sync.Mutex - defaultResync time.Duration + client internalclientset.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. @@ -55,8 +58,17 @@ type sharedInformerFactory struct { // NewSharedInformerFactory constructs a new instance of sharedInformerFactory func NewSharedInformerFactory(client internalclientset.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewFilteredSharedInformerFactory(client, defaultResync, v1.NamespaceAll, nil) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +func NewFilteredSharedInformerFactory(client internalclientset.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { return &sharedInformerFactory{ client: client, + namespace: namespace, + tweakListOptions: tweakListOptions, defaultResync: defaultResync, informers: make(map[reflect.Type]cache.SharedIndexInformer), startedInformers: make(map[reflect.Type]bool), @@ -138,53 +150,53 @@ type SharedInformerFactory interface { } func (f *sharedInformerFactory) Admissionregistration() admissionregistration.Interface { - return admissionregistration.New(f) + return admissionregistration.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Apps() apps.Interface { - return apps.New(f) + return apps.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Autoscaling() autoscaling.Interface { - return autoscaling.New(f) + return autoscaling.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Batch() batch.Interface { - return batch.New(f) + return batch.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Certificates() certificates.Interface { - return certificates.New(f) + return certificates.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Core() core.Interface { - return core.New(f) + return core.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Extensions() extensions.Interface { - return extensions.New(f) + return extensions.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Networking() networking.Interface { - return networking.New(f) + return networking.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Policy() policy.Interface { - return policy.New(f) + return policy.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Rbac() rbac.Interface { - return rbac.New(f) + return rbac.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Scheduling() scheduling.Interface { - return scheduling.New(f) + return scheduling.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Settings() settings.Interface { - return settings.New(f) + return settings.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Storage() storage.Interface { - return storage.New(f) + return storage.New(f, f.namespace, f.tweakListOptions) } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/generic.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/generic.go index 8fa88584b7fa..1de5711673fe 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/generic.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/generic.go @@ -22,12 +22,12 @@ import ( "fmt" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" apps "k8s.io/kubernetes/pkg/apis/apps" autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" batch "k8s.io/kubernetes/pkg/apis/batch" certificates "k8s.io/kubernetes/pkg/apis/certificates" + core "k8s.io/kubernetes/pkg/apis/core" extensions "k8s.io/kubernetes/pkg/apis/extensions" networking "k8s.io/kubernetes/pkg/apis/networking" policy "k8s.io/kubernetes/pkg/apis/policy" @@ -63,67 +63,69 @@ func (f *genericInformer) Lister() cache.GenericLister { // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { - // Group=Admissionregistration, Version=InternalVersion - case admissionregistration.SchemeGroupVersion.WithResource("externaladmissionhookconfigurations"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().InternalVersion().ExternalAdmissionHookConfigurations().Informer()}, nil + // Group=admissionregistration.k8s.io, Version=internalVersion case admissionregistration.SchemeGroupVersion.WithResource("initializerconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().InternalVersion().InitializerConfigurations().Informer()}, nil + case admissionregistration.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().InternalVersion().MutatingWebhookConfigurations().Informer()}, nil + case admissionregistration.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().InternalVersion().ValidatingWebhookConfigurations().Informer()}, nil - // Group=Apps, Version=InternalVersion + // Group=apps, Version=internalVersion case apps.SchemeGroupVersion.WithResource("controllerrevisions"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().InternalVersion().ControllerRevisions().Informer()}, nil case apps.SchemeGroupVersion.WithResource("statefulsets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().InternalVersion().StatefulSets().Informer()}, nil - // Group=Autoscaling, Version=InternalVersion + // Group=autoscaling, Version=internalVersion case autoscaling.SchemeGroupVersion.WithResource("horizontalpodautoscalers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().InternalVersion().HorizontalPodAutoscalers().Informer()}, nil - // Group=Batch, Version=InternalVersion + // Group=batch, Version=internalVersion case batch.SchemeGroupVersion.WithResource("cronjobs"): return &genericInformer{resource: resource.GroupResource(), informer: f.Batch().InternalVersion().CronJobs().Informer()}, nil case batch.SchemeGroupVersion.WithResource("jobs"): return &genericInformer{resource: resource.GroupResource(), informer: f.Batch().InternalVersion().Jobs().Informer()}, nil - // Group=Certificates, Version=InternalVersion + // Group=certificates.k8s.io, Version=internalVersion case certificates.SchemeGroupVersion.WithResource("certificatesigningrequests"): return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().InternalVersion().CertificateSigningRequests().Informer()}, nil - // Group=Core, Version=InternalVersion - case api.SchemeGroupVersion.WithResource("componentstatuses"): + // Group=core, Version=internalVersion + case core.SchemeGroupVersion.WithResource("componentstatuses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().InternalVersion().ComponentStatuses().Informer()}, nil - case api.SchemeGroupVersion.WithResource("configmaps"): + case core.SchemeGroupVersion.WithResource("configmaps"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().InternalVersion().ConfigMaps().Informer()}, nil - case api.SchemeGroupVersion.WithResource("endpoints"): + case core.SchemeGroupVersion.WithResource("endpoints"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().InternalVersion().Endpoints().Informer()}, nil - case api.SchemeGroupVersion.WithResource("events"): + case core.SchemeGroupVersion.WithResource("events"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().InternalVersion().Events().Informer()}, nil - case api.SchemeGroupVersion.WithResource("limitranges"): + case core.SchemeGroupVersion.WithResource("limitranges"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().InternalVersion().LimitRanges().Informer()}, nil - case api.SchemeGroupVersion.WithResource("namespaces"): + case core.SchemeGroupVersion.WithResource("namespaces"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().InternalVersion().Namespaces().Informer()}, nil - case api.SchemeGroupVersion.WithResource("nodes"): + case core.SchemeGroupVersion.WithResource("nodes"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().InternalVersion().Nodes().Informer()}, nil - case api.SchemeGroupVersion.WithResource("persistentvolumes"): + case core.SchemeGroupVersion.WithResource("persistentvolumes"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().InternalVersion().PersistentVolumes().Informer()}, nil - case api.SchemeGroupVersion.WithResource("persistentvolumeclaims"): + case core.SchemeGroupVersion.WithResource("persistentvolumeclaims"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().InternalVersion().PersistentVolumeClaims().Informer()}, nil - case api.SchemeGroupVersion.WithResource("pods"): + case core.SchemeGroupVersion.WithResource("pods"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().InternalVersion().Pods().Informer()}, nil - case api.SchemeGroupVersion.WithResource("podtemplates"): + case core.SchemeGroupVersion.WithResource("podtemplates"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().InternalVersion().PodTemplates().Informer()}, nil - case api.SchemeGroupVersion.WithResource("replicationcontrollers"): + case core.SchemeGroupVersion.WithResource("replicationcontrollers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().InternalVersion().ReplicationControllers().Informer()}, nil - case api.SchemeGroupVersion.WithResource("resourcequotas"): + case core.SchemeGroupVersion.WithResource("resourcequotas"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().InternalVersion().ResourceQuotas().Informer()}, nil - case api.SchemeGroupVersion.WithResource("secrets"): + case core.SchemeGroupVersion.WithResource("secrets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().InternalVersion().Secrets().Informer()}, nil - case api.SchemeGroupVersion.WithResource("services"): + case core.SchemeGroupVersion.WithResource("services"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().InternalVersion().Services().Informer()}, nil - case api.SchemeGroupVersion.WithResource("serviceaccounts"): + case core.SchemeGroupVersion.WithResource("serviceaccounts"): return &genericInformer{resource: resource.GroupResource(), informer: f.Core().InternalVersion().ServiceAccounts().Informer()}, nil - // Group=Extensions, Version=InternalVersion + // Group=extensions, Version=internalVersion case extensions.SchemeGroupVersion.WithResource("daemonsets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().InternalVersion().DaemonSets().Informer()}, nil case extensions.SchemeGroupVersion.WithResource("deployments"): @@ -134,18 +136,16 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().InternalVersion().PodSecurityPolicies().Informer()}, nil case extensions.SchemeGroupVersion.WithResource("replicasets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().InternalVersion().ReplicaSets().Informer()}, nil - case extensions.SchemeGroupVersion.WithResource("thirdpartyresources"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().InternalVersion().ThirdPartyResources().Informer()}, nil - // Group=Networking, Version=InternalVersion + // Group=networking.k8s.io, Version=internalVersion case networking.SchemeGroupVersion.WithResource("networkpolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().InternalVersion().NetworkPolicies().Informer()}, nil - // Group=Policy, Version=InternalVersion + // Group=policy, Version=internalVersion case policy.SchemeGroupVersion.WithResource("poddisruptionbudgets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().InternalVersion().PodDisruptionBudgets().Informer()}, nil - // Group=Rbac, Version=InternalVersion + // Group=rbac.authorization.k8s.io, Version=internalVersion case rbac.SchemeGroupVersion.WithResource("clusterroles"): return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().InternalVersion().ClusterRoles().Informer()}, nil case rbac.SchemeGroupVersion.WithResource("clusterrolebindings"): @@ -155,17 +155,19 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case rbac.SchemeGroupVersion.WithResource("rolebindings"): return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().InternalVersion().RoleBindings().Informer()}, nil - // Group=Scheduling, Version=InternalVersion + // Group=scheduling.k8s.io, Version=internalVersion case scheduling.SchemeGroupVersion.WithResource("priorityclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().InternalVersion().PriorityClasses().Informer()}, nil - // Group=Settings, Version=InternalVersion + // Group=settings.k8s.io, Version=internalVersion case settings.SchemeGroupVersion.WithResource("podpresets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Settings().InternalVersion().PodPresets().Informer()}, nil - // Group=Storage, Version=InternalVersion + // Group=storage.k8s.io, Version=internalVersion case storage.SchemeGroupVersion.WithResource("storageclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().InternalVersion().StorageClasses().Informer()}, nil + case storage.SchemeGroupVersion.WithResource("volumeattachments"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().InternalVersion().VolumeAttachments().Informer()}, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces/BUILD index b283332b28d6..a87900962d20 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces/BUILD @@ -8,8 +8,10 @@ load( go_library( name = "go_default_library", srcs = ["factory_interfaces.go"], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces", deps = [ "//pkg/client/clientset_generated/internalclientset:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces/factory_interfaces.go index 5b168b5919e6..9c7a9190588f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces/factory_interfaces.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces/factory_interfaces.go @@ -19,6 +19,7 @@ limitations under the License. package internalinterfaces import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" @@ -32,3 +33,5 @@ type SharedInformerFactory interface { Start(stopCh <-chan struct{}) InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer } + +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/BUILD index dfc51cb9a133..20fb8535447e 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking", deps = [ "//pkg/client/informers/informers_generated/internalversion/internalinterfaces:go_default_library", "//pkg/client/informers/informers_generated/internalversion/networking/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/interface.go index bae69a504141..e6b77621297a 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // InternalVersion returns a new internalversion.Interface. func (g *group) InternalVersion() internalversion.Interface { - return internalversion.New(g.SharedInformerFactory) + return internalversion.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/internalversion/BUILD index 56a2a65706e3..36e976ecccb4 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/internalversion/BUILD @@ -11,6 +11,7 @@ go_library( "interface.go", "networkpolicy.go", ], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/internalversion", deps = [ "//pkg/apis/networking:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/internalversion/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/internalversion/interface.go index e3eb5fa8181a..8a87627750b6 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/internalversion/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/internalversion/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // NetworkPolicies returns a NetworkPolicyInformer. func (v *version) NetworkPolicies() NetworkPolicyInformer { - return &networkPolicyInformer{factory: v.SharedInformerFactory} + return &networkPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/internalversion/networkpolicy.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/internalversion/networkpolicy.go index aea32f716b5b..0b094a7ce0fb 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/internalversion/networkpolicy.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/internalversion/networkpolicy.go @@ -38,19 +38,34 @@ type NetworkPolicyInformer interface { } type networkPolicyInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewNetworkPolicyInformer constructs a new informer for NetworkPolicy type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewNetworkPolicyInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNetworkPolicyInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredNetworkPolicyInformer constructs a new informer for NetworkPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNetworkPolicyInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Networking().NetworkPolicies(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Networking().NetworkPolicies(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewNetworkPolicyInformer(client internalclientset.Interface, namespace stri ) } -func defaultNetworkPolicyInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewNetworkPolicyInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *networkPolicyInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNetworkPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *networkPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networking.NetworkPolicy{}, defaultNetworkPolicyInformer) + return f.factory.InformerFor(&networking.NetworkPolicy{}, f.defaultInformer) } func (f *networkPolicyInformer) Lister() internalversion.NetworkPolicyLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/BUILD index 470c4c220be5..d7b4f7468b5f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy", deps = [ "//pkg/client/informers/informers_generated/internalversion/internalinterfaces:go_default_library", "//pkg/client/informers/informers_generated/internalversion/policy/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/interface.go index 82977c9ce9f9..2860283749bf 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // InternalVersion returns a new internalversion.Interface. func (g *group) InternalVersion() internalversion.Interface { - return internalversion.New(g.SharedInformerFactory) + return internalversion.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/internalversion/BUILD index f43798ea92e1..c01088bc4d5f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/internalversion/BUILD @@ -11,6 +11,7 @@ go_library( "interface.go", "poddisruptionbudget.go", ], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/internalversion", deps = [ "//pkg/apis/policy:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/internalversion/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/internalversion/interface.go index 258654df2ec7..74773e72e391 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/internalversion/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/internalversion/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // PodDisruptionBudgets returns a PodDisruptionBudgetInformer. func (v *version) PodDisruptionBudgets() PodDisruptionBudgetInformer { - return &podDisruptionBudgetInformer{factory: v.SharedInformerFactory} + return &podDisruptionBudgetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/internalversion/poddisruptionbudget.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/internalversion/poddisruptionbudget.go index 6be99a7120b2..431d5898b4a7 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/internalversion/poddisruptionbudget.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/internalversion/poddisruptionbudget.go @@ -38,19 +38,34 @@ type PodDisruptionBudgetInformer interface { } type podDisruptionBudgetInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewPodDisruptionBudgetInformer constructs a new informer for PodDisruptionBudget type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewPodDisruptionBudgetInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodDisruptionBudgetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodDisruptionBudgetInformer constructs a new informer for PodDisruptionBudget type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodDisruptionBudgetInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Policy().PodDisruptionBudgets(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Policy().PodDisruptionBudgets(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewPodDisruptionBudgetInformer(client internalclientset.Interface, namespac ) } -func defaultPodDisruptionBudgetInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewPodDisruptionBudgetInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *podDisruptionBudgetInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodDisruptionBudgetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *podDisruptionBudgetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&policy.PodDisruptionBudget{}, defaultPodDisruptionBudgetInformer) + return f.factory.InformerFor(&policy.PodDisruptionBudget{}, f.defaultInformer) } func (f *podDisruptionBudgetInformer) Lister() internalversion.PodDisruptionBudgetLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/BUILD index 44e9a641f14b..4ee415c2803c 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac", deps = [ "//pkg/client/informers/informers_generated/internalversion/internalinterfaces:go_default_library", "//pkg/client/informers/informers_generated/internalversion/rbac/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/interface.go index 47dad0522d39..bcd516701444 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // InternalVersion returns a new internalversion.Interface. func (g *group) InternalVersion() internalversion.Interface { - return internalversion.New(g.SharedInformerFactory) + return internalversion.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/BUILD index 3352580d4a94..96845f749212 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/BUILD @@ -14,6 +14,7 @@ go_library( "role.go", "rolebinding.go", ], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion", deps = [ "//pkg/apis/rbac:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrole.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrole.go index 6f3d575ce025..d723beeeb4f6 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrole.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrole.go @@ -38,19 +38,33 @@ type ClusterRoleInformer interface { } type clusterRoleInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewClusterRoleInformer constructs a new informer for ClusterRole type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewClusterRoleInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleInformer constructs a new informer for ClusterRole type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Rbac().ClusterRoles().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Rbac().ClusterRoles().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewClusterRoleInformer(client internalclientset.Interface, resyncPeriod tim ) } -func defaultClusterRoleInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewClusterRoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *clusterRoleInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbac.ClusterRole{}, defaultClusterRoleInformer) + return f.factory.InformerFor(&rbac.ClusterRole{}, f.defaultInformer) } func (f *clusterRoleInformer) Lister() internalversion.ClusterRoleLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrolebinding.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrolebinding.go index e0c5160a7320..07b15deaadff 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrolebinding.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/clusterrolebinding.go @@ -38,19 +38,33 @@ type ClusterRoleBindingInformer interface { } type clusterRoleBindingInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewClusterRoleBindingInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleBindingInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Rbac().ClusterRoleBindings().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Rbac().ClusterRoleBindings().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewClusterRoleBindingInformer(client internalclientset.Interface, resyncPer ) } -func defaultClusterRoleBindingInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewClusterRoleBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *clusterRoleBindingInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbac.ClusterRoleBinding{}, defaultClusterRoleBindingInformer) + return f.factory.InformerFor(&rbac.ClusterRoleBinding{}, f.defaultInformer) } func (f *clusterRoleBindingInformer) Lister() internalversion.ClusterRoleBindingLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/interface.go index fc85041c4a47..ac8b2040c0f1 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/interface.go @@ -35,30 +35,32 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // ClusterRoles returns a ClusterRoleInformer. func (v *version) ClusterRoles() ClusterRoleInformer { - return &clusterRoleInformer{factory: v.SharedInformerFactory} + return &clusterRoleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } // ClusterRoleBindings returns a ClusterRoleBindingInformer. func (v *version) ClusterRoleBindings() ClusterRoleBindingInformer { - return &clusterRoleBindingInformer{factory: v.SharedInformerFactory} + return &clusterRoleBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } // Roles returns a RoleInformer. func (v *version) Roles() RoleInformer { - return &roleInformer{factory: v.SharedInformerFactory} + return &roleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } // RoleBindings returns a RoleBindingInformer. func (v *version) RoleBindings() RoleBindingInformer { - return &roleBindingInformer{factory: v.SharedInformerFactory} + return &roleBindingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/role.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/role.go index 5d61c9c28e7f..7a736633d06c 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/role.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/role.go @@ -38,19 +38,34 @@ type RoleInformer interface { } type roleInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewRoleInformer constructs a new informer for Role type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewRoleInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleInformer constructs a new informer for Role type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Rbac().Roles(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Rbac().Roles(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewRoleInformer(client internalclientset.Interface, namespace string, resyn ) } -func defaultRoleInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewRoleInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *roleInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *roleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbac.Role{}, defaultRoleInformer) + return f.factory.InformerFor(&rbac.Role{}, f.defaultInformer) } func (f *roleInformer) Lister() internalversion.RoleLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/rolebinding.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/rolebinding.go index 752fba074e3c..6b0560953e75 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/rolebinding.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion/rolebinding.go @@ -38,19 +38,34 @@ type RoleBindingInformer interface { } type roleBindingInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewRoleBindingInformer constructs a new informer for RoleBinding type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewRoleBindingInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleBindingInformer constructs a new informer for RoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleBindingInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Rbac().RoleBindings(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Rbac().RoleBindings(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewRoleBindingInformer(client internalclientset.Interface, namespace string ) } -func defaultRoleBindingInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewRoleBindingInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *roleBindingInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *roleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbac.RoleBinding{}, defaultRoleBindingInformer) + return f.factory.InformerFor(&rbac.RoleBinding{}, f.defaultInformer) } func (f *roleBindingInformer) Lister() internalversion.RoleBindingLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/BUILD index 4f48fc8ba0ff..e7fd05be0fcd 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling", deps = [ "//pkg/client/informers/informers_generated/internalversion/internalinterfaces:go_default_library", "//pkg/client/informers/informers_generated/internalversion/scheduling/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/interface.go index 3fb2db10b108..54b493e2372e 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // InternalVersion returns a new internalversion.Interface. func (g *group) InternalVersion() internalversion.Interface { - return internalversion.New(g.SharedInformerFactory) + return internalversion.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/BUILD index bc4073fa3bca..7568d130d86b 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/BUILD @@ -11,6 +11,7 @@ go_library( "interface.go", "priorityclass.go", ], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion", deps = [ "//pkg/apis/scheduling:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/interface.go index fe670796c4a5..37dd2d17d949 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // PriorityClasses returns a PriorityClassInformer. func (v *version) PriorityClasses() PriorityClassInformer { - return &priorityClassInformer{factory: v.SharedInformerFactory} + return &priorityClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/priorityclass.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/priorityclass.go index 551ba23b2e21..68600f23b5bf 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/priorityclass.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion/priorityclass.go @@ -38,19 +38,33 @@ type PriorityClassInformer interface { } type priorityClassInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewPriorityClassInformer constructs a new informer for PriorityClass type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewPriorityClassInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPriorityClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPriorityClassInformer constructs a new informer for PriorityClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPriorityClassInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Scheduling().PriorityClasses().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Scheduling().PriorityClasses().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewPriorityClassInformer(client internalclientset.Interface, resyncPeriod t ) } -func defaultPriorityClassInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewPriorityClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *priorityClassInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPriorityClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *priorityClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&scheduling.PriorityClass{}, defaultPriorityClassInformer) + return f.factory.InformerFor(&scheduling.PriorityClass{}, f.defaultInformer) } func (f *priorityClassInformer) Lister() internalversion.PriorityClassLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/BUILD index 16bf99bdbdde..bae632755fd0 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings", deps = [ "//pkg/client/informers/informers_generated/internalversion/internalinterfaces:go_default_library", "//pkg/client/informers/informers_generated/internalversion/settings/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/interface.go index 2c4321439a2b..6683fbdf54bf 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // InternalVersion returns a new internalversion.Interface. func (g *group) InternalVersion() internalversion.Interface { - return internalversion.New(g.SharedInformerFactory) + return internalversion.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/internalversion/BUILD index 31f3a7e674a4..8dda4daab6d5 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/internalversion/BUILD @@ -11,6 +11,7 @@ go_library( "interface.go", "podpreset.go", ], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/internalversion", deps = [ "//pkg/apis/settings:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/internalversion/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/internalversion/interface.go index d8f7c341867c..679c8a2d5636 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/internalversion/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/internalversion/interface.go @@ -29,15 +29,17 @@ type Interface interface { } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // PodPresets returns a PodPresetInformer. func (v *version) PodPresets() PodPresetInformer { - return &podPresetInformer{factory: v.SharedInformerFactory} + return &podPresetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/internalversion/podpreset.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/internalversion/podpreset.go index df812b4219c4..5ecfe1e6d0bf 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/internalversion/podpreset.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/internalversion/podpreset.go @@ -38,19 +38,34 @@ type PodPresetInformer interface { } type podPresetInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string } // NewPodPresetInformer constructs a new informer for PodPreset type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewPodPresetInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodPresetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodPresetInformer constructs a new informer for PodPreset type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodPresetInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Settings().PodPresets(namespace).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Settings().PodPresets(namespace).Watch(options) }, }, @@ -60,12 +75,12 @@ func NewPodPresetInformer(client internalclientset.Interface, namespace string, ) } -func defaultPodPresetInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewPodPresetInformer(client, v1.NamespaceAll, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *podPresetInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodPresetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *podPresetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&settings.PodPreset{}, defaultPodPresetInformer) + return f.factory.InformerFor(&settings.PodPreset{}, f.defaultInformer) } func (f *podPresetInformer) Lister() internalversion.PodPresetLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/BUILD index 8ab3e991aa73..b7af74c6a71f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interface.go"], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage", deps = [ "//pkg/client/informers/informers_generated/internalversion/internalinterfaces:go_default_library", "//pkg/client/informers/informers_generated/internalversion/storage/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/interface.go index d4f366fed74a..01c55b1b6af9 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/interface.go @@ -30,15 +30,17 @@ type Interface interface { } type group struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &group{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // InternalVersion returns a new internalversion.Interface. func (g *group) InternalVersion() internalversion.Interface { - return internalversion.New(g.SharedInformerFactory) + return internalversion.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion/BUILD index 2b1800c3848f..f4ffc7da14de 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion/BUILD @@ -10,7 +10,9 @@ go_library( srcs = [ "interface.go", "storageclass.go", + "volumeattachment.go", ], + importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion", deps = [ "//pkg/apis/storage:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion/interface.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion/interface.go index 86cf36763561..cea995f8cdcc 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion/interface.go @@ -26,18 +26,27 @@ import ( type Interface interface { // StorageClasses returns a StorageClassInformer. StorageClasses() StorageClassInformer + // VolumeAttachments returns a VolumeAttachmentInformer. + VolumeAttachments() VolumeAttachmentInformer } type version struct { - internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc } // New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory) Interface { - return &version{f} +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } // StorageClasses returns a StorageClassInformer. func (v *version) StorageClasses() StorageClassInformer { - return &storageClassInformer{factory: v.SharedInformerFactory} + return &storageClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// VolumeAttachments returns a VolumeAttachmentInformer. +func (v *version) VolumeAttachments() VolumeAttachmentInformer { + return &volumeAttachmentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion/storageclass.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion/storageclass.go index eb7761a80a6a..01e932627898 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion/storageclass.go +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion/storageclass.go @@ -38,19 +38,33 @@ type StorageClassInformer interface { } type storageClassInformer struct { - factory internalinterfaces.SharedInformerFactory + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc } // NewStorageClassInformer constructs a new informer for StorageClass type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. func NewStorageClassInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStorageClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredStorageClassInformer constructs a new informer for StorageClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStorageClassInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Storage().StorageClasses().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } return client.Storage().StorageClasses().Watch(options) }, }, @@ -60,12 +74,12 @@ func NewStorageClassInformer(client internalclientset.Interface, resyncPeriod ti ) } -func defaultStorageClassInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewStorageClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) +func (f *storageClassInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStorageClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *storageClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storage.StorageClass{}, defaultStorageClassInformer) + return f.factory.InformerFor(&storage.StorageClass{}, f.defaultInformer) } func (f *storageClassInformer) Lister() internalversion.StorageClassLister { diff --git a/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion/volumeattachment.go b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion/volumeattachment.go new file mode 100644 index 000000000000..54d411f43222 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion/volumeattachment.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package internalversion + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + storage "k8s.io/kubernetes/pkg/apis/storage" + internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces" + internalversion "k8s.io/kubernetes/pkg/client/listers/storage/internalversion" + time "time" +) + +// VolumeAttachmentInformer provides access to a shared informer and lister for +// VolumeAttachments. +type VolumeAttachmentInformer interface { + Informer() cache.SharedIndexInformer + Lister() internalversion.VolumeAttachmentLister +} + +type volumeAttachmentInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewVolumeAttachmentInformer constructs a new informer for VolumeAttachment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVolumeAttachmentInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredVolumeAttachmentInformer constructs a new informer for VolumeAttachment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVolumeAttachmentInformer(client internalclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.Storage().VolumeAttachments().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.Storage().VolumeAttachments().Watch(options) + }, + }, + &storage.VolumeAttachment{}, + resyncPeriod, + indexers, + ) +} + +func (f *volumeAttachmentInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *volumeAttachmentInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storage.VolumeAttachment{}, f.defaultInformer) +} + +func (f *volumeAttachmentInformer) Lister() internalversion.VolumeAttachmentLister { + return internalversion.NewVolumeAttachmentLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/leaderelectionconfig/BUILD b/vendor/k8s.io/kubernetes/pkg/client/leaderelectionconfig/BUILD index 1461769f26e1..5814cbd6f21f 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/leaderelectionconfig/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/leaderelectionconfig/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["config.go"], + importpath = "k8s.io/kubernetes/pkg/client/leaderelectionconfig", deps = [ "//pkg/apis/componentconfig:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/leaderelectionconfig/config.go b/vendor/k8s.io/kubernetes/pkg/client/leaderelectionconfig/config.go index f8f18648fcb7..ffa0bfd19a08 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/leaderelectionconfig/config.go +++ b/vendor/k8s.io/kubernetes/pkg/client/leaderelectionconfig/config.go @@ -62,5 +62,5 @@ func BindFlags(l *componentconfig.LeaderElectionConfiguration, fs *pflag.FlagSet "of a leadership. This is only applicable if leader election is enabled.") fs.StringVar(&l.ResourceLock, "leader-elect-resource-lock", l.ResourceLock, ""+ "The type of resource object that is used for locking during "+ - "leader election. Supported options are `endpoints` (default) and `configmap`.") + "leader election. Supported options are `endpoints` (default) and `configmaps`.") } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/BUILD index 71e185607f75..1afdec477e31 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/BUILD @@ -9,13 +9,14 @@ go_library( name = "go_default_library", srcs = [ "expansion_generated.go", - "externaladmissionhookconfiguration.go", "initializerconfiguration.go", + "mutatingwebhookconfiguration.go", + "validatingwebhookconfiguration.go", ], + importpath = "k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion", deps = [ "//pkg/apis/admissionregistration:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/expansion_generated.go b/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/expansion_generated.go index 278e929fb4f6..366db3d77457 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/expansion_generated.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/expansion_generated.go @@ -18,10 +18,14 @@ limitations under the License. package internalversion -// ExternalAdmissionHookConfigurationListerExpansion allows custom methods to be added to -// ExternalAdmissionHookConfigurationLister. -type ExternalAdmissionHookConfigurationListerExpansion interface{} - // InitializerConfigurationListerExpansion allows custom methods to be added to // InitializerConfigurationLister. type InitializerConfigurationListerExpansion interface{} + +// MutatingWebhookConfigurationListerExpansion allows custom methods to be added to +// MutatingWebhookConfigurationLister. +type MutatingWebhookConfigurationListerExpansion interface{} + +// ValidatingWebhookConfigurationListerExpansion allows custom methods to be added to +// ValidatingWebhookConfigurationLister. +type ValidatingWebhookConfigurationListerExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/externaladmissionhookconfiguration.go b/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/externaladmissionhookconfiguration.go deleted file mode 100644 index ac200d552917..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/externaladmissionhookconfiguration.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was automatically generated by lister-gen - -package internalversion - -import ( - "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" -) - -// ExternalAdmissionHookConfigurationLister helps list ExternalAdmissionHookConfigurations. -type ExternalAdmissionHookConfigurationLister interface { - // List lists all ExternalAdmissionHookConfigurations in the indexer. - List(selector labels.Selector) (ret []*admissionregistration.ExternalAdmissionHookConfiguration, err error) - // Get retrieves the ExternalAdmissionHookConfiguration from the index for a given name. - Get(name string) (*admissionregistration.ExternalAdmissionHookConfiguration, error) - ExternalAdmissionHookConfigurationListerExpansion -} - -// externalAdmissionHookConfigurationLister implements the ExternalAdmissionHookConfigurationLister interface. -type externalAdmissionHookConfigurationLister struct { - indexer cache.Indexer -} - -// NewExternalAdmissionHookConfigurationLister returns a new ExternalAdmissionHookConfigurationLister. -func NewExternalAdmissionHookConfigurationLister(indexer cache.Indexer) ExternalAdmissionHookConfigurationLister { - return &externalAdmissionHookConfigurationLister{indexer: indexer} -} - -// List lists all ExternalAdmissionHookConfigurations in the indexer. -func (s *externalAdmissionHookConfigurationLister) List(selector labels.Selector) (ret []*admissionregistration.ExternalAdmissionHookConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*admissionregistration.ExternalAdmissionHookConfiguration)) - }) - return ret, err -} - -// Get retrieves the ExternalAdmissionHookConfiguration from the index for a given name. -func (s *externalAdmissionHookConfigurationLister) Get(name string) (*admissionregistration.ExternalAdmissionHookConfiguration, error) { - key := &admissionregistration.ExternalAdmissionHookConfiguration{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(admissionregistration.Resource("externaladmissionhookconfiguration"), name) - } - return obj.(*admissionregistration.ExternalAdmissionHookConfiguration), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/initializerconfiguration.go b/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/initializerconfiguration.go index 8fe394f0eaa3..0a67f7d8e8b2 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/initializerconfiguration.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/initializerconfiguration.go @@ -20,7 +20,6 @@ package internalversion import ( "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" @@ -55,8 +54,7 @@ func (s *initializerConfigurationLister) List(selector labels.Selector) (ret []* // Get retrieves the InitializerConfiguration from the index for a given name. func (s *initializerConfigurationLister) Get(name string) (*admissionregistration.InitializerConfiguration, error) { - key := &admissionregistration.InitializerConfiguration{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/mutatingwebhookconfiguration.go b/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/mutatingwebhookconfiguration.go new file mode 100644 index 000000000000..31fcbdba72db --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/mutatingwebhookconfiguration.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package internalversion + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" +) + +// MutatingWebhookConfigurationLister helps list MutatingWebhookConfigurations. +type MutatingWebhookConfigurationLister interface { + // List lists all MutatingWebhookConfigurations in the indexer. + List(selector labels.Selector) (ret []*admissionregistration.MutatingWebhookConfiguration, err error) + // Get retrieves the MutatingWebhookConfiguration from the index for a given name. + Get(name string) (*admissionregistration.MutatingWebhookConfiguration, error) + MutatingWebhookConfigurationListerExpansion +} + +// mutatingWebhookConfigurationLister implements the MutatingWebhookConfigurationLister interface. +type mutatingWebhookConfigurationLister struct { + indexer cache.Indexer +} + +// NewMutatingWebhookConfigurationLister returns a new MutatingWebhookConfigurationLister. +func NewMutatingWebhookConfigurationLister(indexer cache.Indexer) MutatingWebhookConfigurationLister { + return &mutatingWebhookConfigurationLister{indexer: indexer} +} + +// List lists all MutatingWebhookConfigurations in the indexer. +func (s *mutatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*admissionregistration.MutatingWebhookConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*admissionregistration.MutatingWebhookConfiguration)) + }) + return ret, err +} + +// Get retrieves the MutatingWebhookConfiguration from the index for a given name. +func (s *mutatingWebhookConfigurationLister) Get(name string) (*admissionregistration.MutatingWebhookConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(admissionregistration.Resource("mutatingwebhookconfiguration"), name) + } + return obj.(*admissionregistration.MutatingWebhookConfiguration), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/validatingwebhookconfiguration.go b/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/validatingwebhookconfiguration.go new file mode 100644 index 000000000000..9b4693584f85 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion/validatingwebhookconfiguration.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package internalversion + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + admissionregistration "k8s.io/kubernetes/pkg/apis/admissionregistration" +) + +// ValidatingWebhookConfigurationLister helps list ValidatingWebhookConfigurations. +type ValidatingWebhookConfigurationLister interface { + // List lists all ValidatingWebhookConfigurations in the indexer. + List(selector labels.Selector) (ret []*admissionregistration.ValidatingWebhookConfiguration, err error) + // Get retrieves the ValidatingWebhookConfiguration from the index for a given name. + Get(name string) (*admissionregistration.ValidatingWebhookConfiguration, error) + ValidatingWebhookConfigurationListerExpansion +} + +// validatingWebhookConfigurationLister implements the ValidatingWebhookConfigurationLister interface. +type validatingWebhookConfigurationLister struct { + indexer cache.Indexer +} + +// NewValidatingWebhookConfigurationLister returns a new ValidatingWebhookConfigurationLister. +func NewValidatingWebhookConfigurationLister(indexer cache.Indexer) ValidatingWebhookConfigurationLister { + return &validatingWebhookConfigurationLister{indexer: indexer} +} + +// List lists all ValidatingWebhookConfigurations in the indexer. +func (s *validatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*admissionregistration.ValidatingWebhookConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*admissionregistration.ValidatingWebhookConfiguration)) + }) + return ret, err +} + +// Get retrieves the ValidatingWebhookConfiguration from the index for a given name. +func (s *validatingWebhookConfigurationLister) Get(name string) (*admissionregistration.ValidatingWebhookConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(admissionregistration.Resource("validatingwebhookconfiguration"), name) + } + return obj.(*admissionregistration.ValidatingWebhookConfiguration), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/apps/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/listers/apps/internalversion/BUILD index 42b873d308fb..73334dd2e19a 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/apps/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/apps/internalversion/BUILD @@ -13,9 +13,10 @@ go_library( "statefulset.go", "statefulset_expansion.go", ], + importpath = "k8s.io/kubernetes/pkg/client/listers/apps/internalversion", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/apps:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/apps/internalversion/statefulset_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/listers/apps/internalversion/statefulset_expansion.go index c98c700b43f7..8798f7249bc9 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/apps/internalversion/statefulset_expansion.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/apps/internalversion/statefulset_expansion.go @@ -21,8 +21,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/apps" + api "k8s.io/kubernetes/pkg/apis/core" ) // StatefulSetListerExpansion allows custom methods to be added to diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/autoscaling/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/listers/autoscaling/internalversion/BUILD index a3e269335b06..0553786590f6 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/autoscaling/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/autoscaling/internalversion/BUILD @@ -11,6 +11,7 @@ go_library( "expansion_generated.go", "horizontalpodautoscaler.go", ], + importpath = "k8s.io/kubernetes/pkg/client/listers/autoscaling/internalversion", deps = [ "//pkg/apis/autoscaling:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/batch/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/listers/batch/internalversion/BUILD index 059ae41bb76a..1f0b2079958c 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/batch/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/batch/internalversion/BUILD @@ -14,9 +14,10 @@ go_library( "job.go", "job_expansion.go", ], + importpath = "k8s.io/kubernetes/pkg/client/listers/batch/internalversion", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/batch:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", @@ -27,10 +28,11 @@ go_library( go_test( name = "go_default_test", srcs = ["job_test.go"], + importpath = "k8s.io/kubernetes/pkg/client/listers/batch/internalversion", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/batch:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/batch/internalversion/job_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/listers/batch/internalversion/job_expansion.go index 1687f88198f6..083453d90f1c 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/batch/internalversion/job_expansion.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/batch/internalversion/job_expansion.go @@ -21,8 +21,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/batch" + api "k8s.io/kubernetes/pkg/apis/core" ) // JobListerExpansion allows custom methods to be added to diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/certificates/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/listers/certificates/internalversion/BUILD index 55e71d45b27b..afc655b9eeef 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/certificates/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/certificates/internalversion/BUILD @@ -11,10 +11,10 @@ go_library( "certificatesigningrequest.go", "expansion_generated.go", ], + importpath = "k8s.io/kubernetes/pkg/client/listers/certificates/internalversion", deps = [ "//pkg/apis/certificates:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/certificates/internalversion/certificatesigningrequest.go b/vendor/k8s.io/kubernetes/pkg/client/listers/certificates/internalversion/certificatesigningrequest.go index 3ba28efa5d62..4ded13d7a608 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/certificates/internalversion/certificatesigningrequest.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/certificates/internalversion/certificatesigningrequest.go @@ -20,7 +20,6 @@ package internalversion import ( "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" certificates "k8s.io/kubernetes/pkg/apis/certificates" @@ -55,8 +54,7 @@ func (s *certificateSigningRequestLister) List(selector labels.Selector) (ret [] // Get retrieves the CertificateSigningRequest from the index for a given name. func (s *certificateSigningRequestLister) Get(name string) (*certificates.CertificateSigningRequest, error) { - key := &certificates.CertificateSigningRequest{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/BUILD index 3561a1f42f16..8daff542f4c0 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/BUILD @@ -29,10 +29,10 @@ go_library( "service_expansion.go", "serviceaccount.go", ], + importpath = "k8s.io/kubernetes/pkg/client/listers/core/internalversion", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/componentstatus.go b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/componentstatus.go index 8f0a0a71f098..0be8bc557ded 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/componentstatus.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/componentstatus.go @@ -20,18 +20,17 @@ package internalversion import ( "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" ) // ComponentStatusLister helps list ComponentStatuses. type ComponentStatusLister interface { // List lists all ComponentStatuses in the indexer. - List(selector labels.Selector) (ret []*api.ComponentStatus, err error) + List(selector labels.Selector) (ret []*core.ComponentStatus, err error) // Get retrieves the ComponentStatus from the index for a given name. - Get(name string) (*api.ComponentStatus, error) + Get(name string) (*core.ComponentStatus, error) ComponentStatusListerExpansion } @@ -46,22 +45,21 @@ func NewComponentStatusLister(indexer cache.Indexer) ComponentStatusLister { } // List lists all ComponentStatuses in the indexer. -func (s *componentStatusLister) List(selector labels.Selector) (ret []*api.ComponentStatus, err error) { +func (s *componentStatusLister) List(selector labels.Selector) (ret []*core.ComponentStatus, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.ComponentStatus)) + ret = append(ret, m.(*core.ComponentStatus)) }) return ret, err } // Get retrieves the ComponentStatus from the index for a given name. -func (s *componentStatusLister) Get(name string) (*api.ComponentStatus, error) { - key := &api.ComponentStatus{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) +func (s *componentStatusLister) Get(name string) (*core.ComponentStatus, error) { + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(api.Resource("componentstatus"), name) + return nil, errors.NewNotFound(core.Resource("componentstatus"), name) } - return obj.(*api.ComponentStatus), nil + return obj.(*core.ComponentStatus), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/configmap.go b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/configmap.go index 373447625c1b..539a9751d401 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/configmap.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/configmap.go @@ -22,13 +22,13 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" ) // ConfigMapLister helps list ConfigMaps. type ConfigMapLister interface { // List lists all ConfigMaps in the indexer. - List(selector labels.Selector) (ret []*api.ConfigMap, err error) + List(selector labels.Selector) (ret []*core.ConfigMap, err error) // ConfigMaps returns an object that can list and get ConfigMaps. ConfigMaps(namespace string) ConfigMapNamespaceLister ConfigMapListerExpansion @@ -45,9 +45,9 @@ func NewConfigMapLister(indexer cache.Indexer) ConfigMapLister { } // List lists all ConfigMaps in the indexer. -func (s *configMapLister) List(selector labels.Selector) (ret []*api.ConfigMap, err error) { +func (s *configMapLister) List(selector labels.Selector) (ret []*core.ConfigMap, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.ConfigMap)) + ret = append(ret, m.(*core.ConfigMap)) }) return ret, err } @@ -60,9 +60,9 @@ func (s *configMapLister) ConfigMaps(namespace string) ConfigMapNamespaceLister // ConfigMapNamespaceLister helps list and get ConfigMaps. type ConfigMapNamespaceLister interface { // List lists all ConfigMaps in the indexer for a given namespace. - List(selector labels.Selector) (ret []*api.ConfigMap, err error) + List(selector labels.Selector) (ret []*core.ConfigMap, err error) // Get retrieves the ConfigMap from the indexer for a given namespace and name. - Get(name string) (*api.ConfigMap, error) + Get(name string) (*core.ConfigMap, error) ConfigMapNamespaceListerExpansion } @@ -74,21 +74,21 @@ type configMapNamespaceLister struct { } // List lists all ConfigMaps in the indexer for a given namespace. -func (s configMapNamespaceLister) List(selector labels.Selector) (ret []*api.ConfigMap, err error) { +func (s configMapNamespaceLister) List(selector labels.Selector) (ret []*core.ConfigMap, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*api.ConfigMap)) + ret = append(ret, m.(*core.ConfigMap)) }) return ret, err } // Get retrieves the ConfigMap from the indexer for a given namespace and name. -func (s configMapNamespaceLister) Get(name string) (*api.ConfigMap, error) { +func (s configMapNamespaceLister) Get(name string) (*core.ConfigMap, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(api.Resource("configmap"), name) + return nil, errors.NewNotFound(core.Resource("configmap"), name) } - return obj.(*api.ConfigMap), nil + return obj.(*core.ConfigMap), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/endpoints.go b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/endpoints.go index efea139faa50..a65c98b0bb07 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/endpoints.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/endpoints.go @@ -22,13 +22,13 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" ) // EndpointsLister helps list Endpoints. type EndpointsLister interface { // List lists all Endpoints in the indexer. - List(selector labels.Selector) (ret []*api.Endpoints, err error) + List(selector labels.Selector) (ret []*core.Endpoints, err error) // Endpoints returns an object that can list and get Endpoints. Endpoints(namespace string) EndpointsNamespaceLister EndpointsListerExpansion @@ -45,9 +45,9 @@ func NewEndpointsLister(indexer cache.Indexer) EndpointsLister { } // List lists all Endpoints in the indexer. -func (s *endpointsLister) List(selector labels.Selector) (ret []*api.Endpoints, err error) { +func (s *endpointsLister) List(selector labels.Selector) (ret []*core.Endpoints, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.Endpoints)) + ret = append(ret, m.(*core.Endpoints)) }) return ret, err } @@ -60,9 +60,9 @@ func (s *endpointsLister) Endpoints(namespace string) EndpointsNamespaceLister { // EndpointsNamespaceLister helps list and get Endpoints. type EndpointsNamespaceLister interface { // List lists all Endpoints in the indexer for a given namespace. - List(selector labels.Selector) (ret []*api.Endpoints, err error) + List(selector labels.Selector) (ret []*core.Endpoints, err error) // Get retrieves the Endpoints from the indexer for a given namespace and name. - Get(name string) (*api.Endpoints, error) + Get(name string) (*core.Endpoints, error) EndpointsNamespaceListerExpansion } @@ -74,21 +74,21 @@ type endpointsNamespaceLister struct { } // List lists all Endpoints in the indexer for a given namespace. -func (s endpointsNamespaceLister) List(selector labels.Selector) (ret []*api.Endpoints, err error) { +func (s endpointsNamespaceLister) List(selector labels.Selector) (ret []*core.Endpoints, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*api.Endpoints)) + ret = append(ret, m.(*core.Endpoints)) }) return ret, err } // Get retrieves the Endpoints from the indexer for a given namespace and name. -func (s endpointsNamespaceLister) Get(name string) (*api.Endpoints, error) { +func (s endpointsNamespaceLister) Get(name string) (*core.Endpoints, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(api.Resource("endpoints"), name) + return nil, errors.NewNotFound(core.Resource("endpoints"), name) } - return obj.(*api.Endpoints), nil + return obj.(*core.Endpoints), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/event.go b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/event.go index e66ec202a292..2c04cd1ddd05 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/event.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/event.go @@ -22,13 +22,13 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" ) // EventLister helps list Events. type EventLister interface { // List lists all Events in the indexer. - List(selector labels.Selector) (ret []*api.Event, err error) + List(selector labels.Selector) (ret []*core.Event, err error) // Events returns an object that can list and get Events. Events(namespace string) EventNamespaceLister EventListerExpansion @@ -45,9 +45,9 @@ func NewEventLister(indexer cache.Indexer) EventLister { } // List lists all Events in the indexer. -func (s *eventLister) List(selector labels.Selector) (ret []*api.Event, err error) { +func (s *eventLister) List(selector labels.Selector) (ret []*core.Event, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.Event)) + ret = append(ret, m.(*core.Event)) }) return ret, err } @@ -60,9 +60,9 @@ func (s *eventLister) Events(namespace string) EventNamespaceLister { // EventNamespaceLister helps list and get Events. type EventNamespaceLister interface { // List lists all Events in the indexer for a given namespace. - List(selector labels.Selector) (ret []*api.Event, err error) + List(selector labels.Selector) (ret []*core.Event, err error) // Get retrieves the Event from the indexer for a given namespace and name. - Get(name string) (*api.Event, error) + Get(name string) (*core.Event, error) EventNamespaceListerExpansion } @@ -74,21 +74,21 @@ type eventNamespaceLister struct { } // List lists all Events in the indexer for a given namespace. -func (s eventNamespaceLister) List(selector labels.Selector) (ret []*api.Event, err error) { +func (s eventNamespaceLister) List(selector labels.Selector) (ret []*core.Event, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*api.Event)) + ret = append(ret, m.(*core.Event)) }) return ret, err } // Get retrieves the Event from the indexer for a given namespace and name. -func (s eventNamespaceLister) Get(name string) (*api.Event, error) { +func (s eventNamespaceLister) Get(name string) (*core.Event, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(api.Resource("event"), name) + return nil, errors.NewNotFound(core.Resource("event"), name) } - return obj.(*api.Event), nil + return obj.(*core.Event), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/limitrange.go b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/limitrange.go index 1fa3bea66b83..92bcf032e0de 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/limitrange.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/limitrange.go @@ -22,13 +22,13 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" ) // LimitRangeLister helps list LimitRanges. type LimitRangeLister interface { // List lists all LimitRanges in the indexer. - List(selector labels.Selector) (ret []*api.LimitRange, err error) + List(selector labels.Selector) (ret []*core.LimitRange, err error) // LimitRanges returns an object that can list and get LimitRanges. LimitRanges(namespace string) LimitRangeNamespaceLister LimitRangeListerExpansion @@ -45,9 +45,9 @@ func NewLimitRangeLister(indexer cache.Indexer) LimitRangeLister { } // List lists all LimitRanges in the indexer. -func (s *limitRangeLister) List(selector labels.Selector) (ret []*api.LimitRange, err error) { +func (s *limitRangeLister) List(selector labels.Selector) (ret []*core.LimitRange, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.LimitRange)) + ret = append(ret, m.(*core.LimitRange)) }) return ret, err } @@ -60,9 +60,9 @@ func (s *limitRangeLister) LimitRanges(namespace string) LimitRangeNamespaceList // LimitRangeNamespaceLister helps list and get LimitRanges. type LimitRangeNamespaceLister interface { // List lists all LimitRanges in the indexer for a given namespace. - List(selector labels.Selector) (ret []*api.LimitRange, err error) + List(selector labels.Selector) (ret []*core.LimitRange, err error) // Get retrieves the LimitRange from the indexer for a given namespace and name. - Get(name string) (*api.LimitRange, error) + Get(name string) (*core.LimitRange, error) LimitRangeNamespaceListerExpansion } @@ -74,21 +74,21 @@ type limitRangeNamespaceLister struct { } // List lists all LimitRanges in the indexer for a given namespace. -func (s limitRangeNamespaceLister) List(selector labels.Selector) (ret []*api.LimitRange, err error) { +func (s limitRangeNamespaceLister) List(selector labels.Selector) (ret []*core.LimitRange, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*api.LimitRange)) + ret = append(ret, m.(*core.LimitRange)) }) return ret, err } // Get retrieves the LimitRange from the indexer for a given namespace and name. -func (s limitRangeNamespaceLister) Get(name string) (*api.LimitRange, error) { +func (s limitRangeNamespaceLister) Get(name string) (*core.LimitRange, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(api.Resource("limitrange"), name) + return nil, errors.NewNotFound(core.Resource("limitrange"), name) } - return obj.(*api.LimitRange), nil + return obj.(*core.LimitRange), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/namespace.go b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/namespace.go index c925f652373c..a429b48a6167 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/namespace.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/namespace.go @@ -20,18 +20,17 @@ package internalversion import ( "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" ) // NamespaceLister helps list Namespaces. type NamespaceLister interface { // List lists all Namespaces in the indexer. - List(selector labels.Selector) (ret []*api.Namespace, err error) + List(selector labels.Selector) (ret []*core.Namespace, err error) // Get retrieves the Namespace from the index for a given name. - Get(name string) (*api.Namespace, error) + Get(name string) (*core.Namespace, error) NamespaceListerExpansion } @@ -46,22 +45,21 @@ func NewNamespaceLister(indexer cache.Indexer) NamespaceLister { } // List lists all Namespaces in the indexer. -func (s *namespaceLister) List(selector labels.Selector) (ret []*api.Namespace, err error) { +func (s *namespaceLister) List(selector labels.Selector) (ret []*core.Namespace, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.Namespace)) + ret = append(ret, m.(*core.Namespace)) }) return ret, err } // Get retrieves the Namespace from the index for a given name. -func (s *namespaceLister) Get(name string) (*api.Namespace, error) { - key := &api.Namespace{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) +func (s *namespaceLister) Get(name string) (*core.Namespace, error) { + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(api.Resource("namespace"), name) + return nil, errors.NewNotFound(core.Resource("namespace"), name) } - return obj.(*api.Namespace), nil + return obj.(*core.Namespace), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/node.go b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/node.go index ba33d64896d6..e722b0fe65b4 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/node.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/node.go @@ -20,18 +20,17 @@ package internalversion import ( "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" ) // NodeLister helps list Nodes. type NodeLister interface { // List lists all Nodes in the indexer. - List(selector labels.Selector) (ret []*api.Node, err error) + List(selector labels.Selector) (ret []*core.Node, err error) // Get retrieves the Node from the index for a given name. - Get(name string) (*api.Node, error) + Get(name string) (*core.Node, error) NodeListerExpansion } @@ -46,22 +45,21 @@ func NewNodeLister(indexer cache.Indexer) NodeLister { } // List lists all Nodes in the indexer. -func (s *nodeLister) List(selector labels.Selector) (ret []*api.Node, err error) { +func (s *nodeLister) List(selector labels.Selector) (ret []*core.Node, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.Node)) + ret = append(ret, m.(*core.Node)) }) return ret, err } // Get retrieves the Node from the index for a given name. -func (s *nodeLister) Get(name string) (*api.Node, error) { - key := &api.Node{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) +func (s *nodeLister) Get(name string) (*core.Node, error) { + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(api.Resource("node"), name) + return nil, errors.NewNotFound(core.Resource("node"), name) } - return obj.(*api.Node), nil + return obj.(*core.Node), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/node_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/node_expansion.go index 465c59e3ba29..3abca2d58edd 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/node_expansion.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/node_expansion.go @@ -18,7 +18,7 @@ package internalversion import ( "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // NodeConditionPredicate is a function that indicates whether the given node's conditions meet diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/persistentvolume.go b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/persistentvolume.go index 040058df20fd..e5b0f7fff072 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/persistentvolume.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/persistentvolume.go @@ -20,18 +20,17 @@ package internalversion import ( "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" ) // PersistentVolumeLister helps list PersistentVolumes. type PersistentVolumeLister interface { // List lists all PersistentVolumes in the indexer. - List(selector labels.Selector) (ret []*api.PersistentVolume, err error) + List(selector labels.Selector) (ret []*core.PersistentVolume, err error) // Get retrieves the PersistentVolume from the index for a given name. - Get(name string) (*api.PersistentVolume, error) + Get(name string) (*core.PersistentVolume, error) PersistentVolumeListerExpansion } @@ -46,22 +45,21 @@ func NewPersistentVolumeLister(indexer cache.Indexer) PersistentVolumeLister { } // List lists all PersistentVolumes in the indexer. -func (s *persistentVolumeLister) List(selector labels.Selector) (ret []*api.PersistentVolume, err error) { +func (s *persistentVolumeLister) List(selector labels.Selector) (ret []*core.PersistentVolume, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.PersistentVolume)) + ret = append(ret, m.(*core.PersistentVolume)) }) return ret, err } // Get retrieves the PersistentVolume from the index for a given name. -func (s *persistentVolumeLister) Get(name string) (*api.PersistentVolume, error) { - key := &api.PersistentVolume{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) +func (s *persistentVolumeLister) Get(name string) (*core.PersistentVolume, error) { + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(api.Resource("persistentvolume"), name) + return nil, errors.NewNotFound(core.Resource("persistentvolume"), name) } - return obj.(*api.PersistentVolume), nil + return obj.(*core.PersistentVolume), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/persistentvolumeclaim.go b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/persistentvolumeclaim.go index f2e482269e38..bfd54a63f82b 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/persistentvolumeclaim.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/persistentvolumeclaim.go @@ -22,13 +22,13 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" ) // PersistentVolumeClaimLister helps list PersistentVolumeClaims. type PersistentVolumeClaimLister interface { // List lists all PersistentVolumeClaims in the indexer. - List(selector labels.Selector) (ret []*api.PersistentVolumeClaim, err error) + List(selector labels.Selector) (ret []*core.PersistentVolumeClaim, err error) // PersistentVolumeClaims returns an object that can list and get PersistentVolumeClaims. PersistentVolumeClaims(namespace string) PersistentVolumeClaimNamespaceLister PersistentVolumeClaimListerExpansion @@ -45,9 +45,9 @@ func NewPersistentVolumeClaimLister(indexer cache.Indexer) PersistentVolumeClaim } // List lists all PersistentVolumeClaims in the indexer. -func (s *persistentVolumeClaimLister) List(selector labels.Selector) (ret []*api.PersistentVolumeClaim, err error) { +func (s *persistentVolumeClaimLister) List(selector labels.Selector) (ret []*core.PersistentVolumeClaim, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.PersistentVolumeClaim)) + ret = append(ret, m.(*core.PersistentVolumeClaim)) }) return ret, err } @@ -60,9 +60,9 @@ func (s *persistentVolumeClaimLister) PersistentVolumeClaims(namespace string) P // PersistentVolumeClaimNamespaceLister helps list and get PersistentVolumeClaims. type PersistentVolumeClaimNamespaceLister interface { // List lists all PersistentVolumeClaims in the indexer for a given namespace. - List(selector labels.Selector) (ret []*api.PersistentVolumeClaim, err error) + List(selector labels.Selector) (ret []*core.PersistentVolumeClaim, err error) // Get retrieves the PersistentVolumeClaim from the indexer for a given namespace and name. - Get(name string) (*api.PersistentVolumeClaim, error) + Get(name string) (*core.PersistentVolumeClaim, error) PersistentVolumeClaimNamespaceListerExpansion } @@ -74,21 +74,21 @@ type persistentVolumeClaimNamespaceLister struct { } // List lists all PersistentVolumeClaims in the indexer for a given namespace. -func (s persistentVolumeClaimNamespaceLister) List(selector labels.Selector) (ret []*api.PersistentVolumeClaim, err error) { +func (s persistentVolumeClaimNamespaceLister) List(selector labels.Selector) (ret []*core.PersistentVolumeClaim, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*api.PersistentVolumeClaim)) + ret = append(ret, m.(*core.PersistentVolumeClaim)) }) return ret, err } // Get retrieves the PersistentVolumeClaim from the indexer for a given namespace and name. -func (s persistentVolumeClaimNamespaceLister) Get(name string) (*api.PersistentVolumeClaim, error) { +func (s persistentVolumeClaimNamespaceLister) Get(name string) (*core.PersistentVolumeClaim, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(api.Resource("persistentvolumeclaim"), name) + return nil, errors.NewNotFound(core.Resource("persistentvolumeclaim"), name) } - return obj.(*api.PersistentVolumeClaim), nil + return obj.(*core.PersistentVolumeClaim), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/pod.go b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/pod.go index 2785db1c55d2..c89ae6f2084b 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/pod.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/pod.go @@ -22,13 +22,13 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" ) // PodLister helps list Pods. type PodLister interface { // List lists all Pods in the indexer. - List(selector labels.Selector) (ret []*api.Pod, err error) + List(selector labels.Selector) (ret []*core.Pod, err error) // Pods returns an object that can list and get Pods. Pods(namespace string) PodNamespaceLister PodListerExpansion @@ -45,9 +45,9 @@ func NewPodLister(indexer cache.Indexer) PodLister { } // List lists all Pods in the indexer. -func (s *podLister) List(selector labels.Selector) (ret []*api.Pod, err error) { +func (s *podLister) List(selector labels.Selector) (ret []*core.Pod, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.Pod)) + ret = append(ret, m.(*core.Pod)) }) return ret, err } @@ -60,9 +60,9 @@ func (s *podLister) Pods(namespace string) PodNamespaceLister { // PodNamespaceLister helps list and get Pods. type PodNamespaceLister interface { // List lists all Pods in the indexer for a given namespace. - List(selector labels.Selector) (ret []*api.Pod, err error) + List(selector labels.Selector) (ret []*core.Pod, err error) // Get retrieves the Pod from the indexer for a given namespace and name. - Get(name string) (*api.Pod, error) + Get(name string) (*core.Pod, error) PodNamespaceListerExpansion } @@ -74,21 +74,21 @@ type podNamespaceLister struct { } // List lists all Pods in the indexer for a given namespace. -func (s podNamespaceLister) List(selector labels.Selector) (ret []*api.Pod, err error) { +func (s podNamespaceLister) List(selector labels.Selector) (ret []*core.Pod, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*api.Pod)) + ret = append(ret, m.(*core.Pod)) }) return ret, err } // Get retrieves the Pod from the indexer for a given namespace and name. -func (s podNamespaceLister) Get(name string) (*api.Pod, error) { +func (s podNamespaceLister) Get(name string) (*core.Pod, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(api.Resource("pod"), name) + return nil, errors.NewNotFound(core.Resource("pod"), name) } - return obj.(*api.Pod), nil + return obj.(*core.Pod), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/podtemplate.go b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/podtemplate.go index becc58485525..ec8343314350 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/podtemplate.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/podtemplate.go @@ -22,13 +22,13 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" ) // PodTemplateLister helps list PodTemplates. type PodTemplateLister interface { // List lists all PodTemplates in the indexer. - List(selector labels.Selector) (ret []*api.PodTemplate, err error) + List(selector labels.Selector) (ret []*core.PodTemplate, err error) // PodTemplates returns an object that can list and get PodTemplates. PodTemplates(namespace string) PodTemplateNamespaceLister PodTemplateListerExpansion @@ -45,9 +45,9 @@ func NewPodTemplateLister(indexer cache.Indexer) PodTemplateLister { } // List lists all PodTemplates in the indexer. -func (s *podTemplateLister) List(selector labels.Selector) (ret []*api.PodTemplate, err error) { +func (s *podTemplateLister) List(selector labels.Selector) (ret []*core.PodTemplate, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.PodTemplate)) + ret = append(ret, m.(*core.PodTemplate)) }) return ret, err } @@ -60,9 +60,9 @@ func (s *podTemplateLister) PodTemplates(namespace string) PodTemplateNamespaceL // PodTemplateNamespaceLister helps list and get PodTemplates. type PodTemplateNamespaceLister interface { // List lists all PodTemplates in the indexer for a given namespace. - List(selector labels.Selector) (ret []*api.PodTemplate, err error) + List(selector labels.Selector) (ret []*core.PodTemplate, err error) // Get retrieves the PodTemplate from the indexer for a given namespace and name. - Get(name string) (*api.PodTemplate, error) + Get(name string) (*core.PodTemplate, error) PodTemplateNamespaceListerExpansion } @@ -74,21 +74,21 @@ type podTemplateNamespaceLister struct { } // List lists all PodTemplates in the indexer for a given namespace. -func (s podTemplateNamespaceLister) List(selector labels.Selector) (ret []*api.PodTemplate, err error) { +func (s podTemplateNamespaceLister) List(selector labels.Selector) (ret []*core.PodTemplate, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*api.PodTemplate)) + ret = append(ret, m.(*core.PodTemplate)) }) return ret, err } // Get retrieves the PodTemplate from the indexer for a given namespace and name. -func (s podTemplateNamespaceLister) Get(name string) (*api.PodTemplate, error) { +func (s podTemplateNamespaceLister) Get(name string) (*core.PodTemplate, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(api.Resource("podtemplate"), name) + return nil, errors.NewNotFound(core.Resource("podtemplate"), name) } - return obj.(*api.PodTemplate), nil + return obj.(*core.PodTemplate), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/replicationcontroller.go b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/replicationcontroller.go index b9728c249e62..77cf24677f00 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/replicationcontroller.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/replicationcontroller.go @@ -22,13 +22,13 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" ) // ReplicationControllerLister helps list ReplicationControllers. type ReplicationControllerLister interface { // List lists all ReplicationControllers in the indexer. - List(selector labels.Selector) (ret []*api.ReplicationController, err error) + List(selector labels.Selector) (ret []*core.ReplicationController, err error) // ReplicationControllers returns an object that can list and get ReplicationControllers. ReplicationControllers(namespace string) ReplicationControllerNamespaceLister ReplicationControllerListerExpansion @@ -45,9 +45,9 @@ func NewReplicationControllerLister(indexer cache.Indexer) ReplicationController } // List lists all ReplicationControllers in the indexer. -func (s *replicationControllerLister) List(selector labels.Selector) (ret []*api.ReplicationController, err error) { +func (s *replicationControllerLister) List(selector labels.Selector) (ret []*core.ReplicationController, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.ReplicationController)) + ret = append(ret, m.(*core.ReplicationController)) }) return ret, err } @@ -60,9 +60,9 @@ func (s *replicationControllerLister) ReplicationControllers(namespace string) R // ReplicationControllerNamespaceLister helps list and get ReplicationControllers. type ReplicationControllerNamespaceLister interface { // List lists all ReplicationControllers in the indexer for a given namespace. - List(selector labels.Selector) (ret []*api.ReplicationController, err error) + List(selector labels.Selector) (ret []*core.ReplicationController, err error) // Get retrieves the ReplicationController from the indexer for a given namespace and name. - Get(name string) (*api.ReplicationController, error) + Get(name string) (*core.ReplicationController, error) ReplicationControllerNamespaceListerExpansion } @@ -74,21 +74,21 @@ type replicationControllerNamespaceLister struct { } // List lists all ReplicationControllers in the indexer for a given namespace. -func (s replicationControllerNamespaceLister) List(selector labels.Selector) (ret []*api.ReplicationController, err error) { +func (s replicationControllerNamespaceLister) List(selector labels.Selector) (ret []*core.ReplicationController, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*api.ReplicationController)) + ret = append(ret, m.(*core.ReplicationController)) }) return ret, err } // Get retrieves the ReplicationController from the indexer for a given namespace and name. -func (s replicationControllerNamespaceLister) Get(name string) (*api.ReplicationController, error) { +func (s replicationControllerNamespaceLister) Get(name string) (*core.ReplicationController, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(api.Resource("replicationcontroller"), name) + return nil, errors.NewNotFound(core.Resource("replicationcontroller"), name) } - return obj.(*api.ReplicationController), nil + return obj.(*core.ReplicationController), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/replicationcontroller_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/replicationcontroller_expansion.go index c08d38d2d828..bd9029160dad 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/replicationcontroller_expansion.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/replicationcontroller_expansion.go @@ -20,7 +20,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // ReplicationControllerListerExpansion allows custom methods to be added to diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/resourcequota.go b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/resourcequota.go index d0015df76ebd..2b695099bed9 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/resourcequota.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/resourcequota.go @@ -22,13 +22,13 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" ) // ResourceQuotaLister helps list ResourceQuotas. type ResourceQuotaLister interface { // List lists all ResourceQuotas in the indexer. - List(selector labels.Selector) (ret []*api.ResourceQuota, err error) + List(selector labels.Selector) (ret []*core.ResourceQuota, err error) // ResourceQuotas returns an object that can list and get ResourceQuotas. ResourceQuotas(namespace string) ResourceQuotaNamespaceLister ResourceQuotaListerExpansion @@ -45,9 +45,9 @@ func NewResourceQuotaLister(indexer cache.Indexer) ResourceQuotaLister { } // List lists all ResourceQuotas in the indexer. -func (s *resourceQuotaLister) List(selector labels.Selector) (ret []*api.ResourceQuota, err error) { +func (s *resourceQuotaLister) List(selector labels.Selector) (ret []*core.ResourceQuota, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.ResourceQuota)) + ret = append(ret, m.(*core.ResourceQuota)) }) return ret, err } @@ -60,9 +60,9 @@ func (s *resourceQuotaLister) ResourceQuotas(namespace string) ResourceQuotaName // ResourceQuotaNamespaceLister helps list and get ResourceQuotas. type ResourceQuotaNamespaceLister interface { // List lists all ResourceQuotas in the indexer for a given namespace. - List(selector labels.Selector) (ret []*api.ResourceQuota, err error) + List(selector labels.Selector) (ret []*core.ResourceQuota, err error) // Get retrieves the ResourceQuota from the indexer for a given namespace and name. - Get(name string) (*api.ResourceQuota, error) + Get(name string) (*core.ResourceQuota, error) ResourceQuotaNamespaceListerExpansion } @@ -74,21 +74,21 @@ type resourceQuotaNamespaceLister struct { } // List lists all ResourceQuotas in the indexer for a given namespace. -func (s resourceQuotaNamespaceLister) List(selector labels.Selector) (ret []*api.ResourceQuota, err error) { +func (s resourceQuotaNamespaceLister) List(selector labels.Selector) (ret []*core.ResourceQuota, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*api.ResourceQuota)) + ret = append(ret, m.(*core.ResourceQuota)) }) return ret, err } // Get retrieves the ResourceQuota from the indexer for a given namespace and name. -func (s resourceQuotaNamespaceLister) Get(name string) (*api.ResourceQuota, error) { +func (s resourceQuotaNamespaceLister) Get(name string) (*core.ResourceQuota, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(api.Resource("resourcequota"), name) + return nil, errors.NewNotFound(core.Resource("resourcequota"), name) } - return obj.(*api.ResourceQuota), nil + return obj.(*core.ResourceQuota), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/secret.go b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/secret.go index 820fc4219707..e5585d9e0d21 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/secret.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/secret.go @@ -22,13 +22,13 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" ) // SecretLister helps list Secrets. type SecretLister interface { // List lists all Secrets in the indexer. - List(selector labels.Selector) (ret []*api.Secret, err error) + List(selector labels.Selector) (ret []*core.Secret, err error) // Secrets returns an object that can list and get Secrets. Secrets(namespace string) SecretNamespaceLister SecretListerExpansion @@ -45,9 +45,9 @@ func NewSecretLister(indexer cache.Indexer) SecretLister { } // List lists all Secrets in the indexer. -func (s *secretLister) List(selector labels.Selector) (ret []*api.Secret, err error) { +func (s *secretLister) List(selector labels.Selector) (ret []*core.Secret, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.Secret)) + ret = append(ret, m.(*core.Secret)) }) return ret, err } @@ -60,9 +60,9 @@ func (s *secretLister) Secrets(namespace string) SecretNamespaceLister { // SecretNamespaceLister helps list and get Secrets. type SecretNamespaceLister interface { // List lists all Secrets in the indexer for a given namespace. - List(selector labels.Selector) (ret []*api.Secret, err error) + List(selector labels.Selector) (ret []*core.Secret, err error) // Get retrieves the Secret from the indexer for a given namespace and name. - Get(name string) (*api.Secret, error) + Get(name string) (*core.Secret, error) SecretNamespaceListerExpansion } @@ -74,21 +74,21 @@ type secretNamespaceLister struct { } // List lists all Secrets in the indexer for a given namespace. -func (s secretNamespaceLister) List(selector labels.Selector) (ret []*api.Secret, err error) { +func (s secretNamespaceLister) List(selector labels.Selector) (ret []*core.Secret, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*api.Secret)) + ret = append(ret, m.(*core.Secret)) }) return ret, err } // Get retrieves the Secret from the indexer for a given namespace and name. -func (s secretNamespaceLister) Get(name string) (*api.Secret, error) { +func (s secretNamespaceLister) Get(name string) (*core.Secret, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(api.Resource("secret"), name) + return nil, errors.NewNotFound(core.Resource("secret"), name) } - return obj.(*api.Secret), nil + return obj.(*core.Secret), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/service.go b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/service.go index 06afb1b504da..34f3d7747ff7 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/service.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/service.go @@ -22,13 +22,13 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" ) // ServiceLister helps list Services. type ServiceLister interface { // List lists all Services in the indexer. - List(selector labels.Selector) (ret []*api.Service, err error) + List(selector labels.Selector) (ret []*core.Service, err error) // Services returns an object that can list and get Services. Services(namespace string) ServiceNamespaceLister ServiceListerExpansion @@ -45,9 +45,9 @@ func NewServiceLister(indexer cache.Indexer) ServiceLister { } // List lists all Services in the indexer. -func (s *serviceLister) List(selector labels.Selector) (ret []*api.Service, err error) { +func (s *serviceLister) List(selector labels.Selector) (ret []*core.Service, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.Service)) + ret = append(ret, m.(*core.Service)) }) return ret, err } @@ -60,9 +60,9 @@ func (s *serviceLister) Services(namespace string) ServiceNamespaceLister { // ServiceNamespaceLister helps list and get Services. type ServiceNamespaceLister interface { // List lists all Services in the indexer for a given namespace. - List(selector labels.Selector) (ret []*api.Service, err error) + List(selector labels.Selector) (ret []*core.Service, err error) // Get retrieves the Service from the indexer for a given namespace and name. - Get(name string) (*api.Service, error) + Get(name string) (*core.Service, error) ServiceNamespaceListerExpansion } @@ -74,21 +74,21 @@ type serviceNamespaceLister struct { } // List lists all Services in the indexer for a given namespace. -func (s serviceNamespaceLister) List(selector labels.Selector) (ret []*api.Service, err error) { +func (s serviceNamespaceLister) List(selector labels.Selector) (ret []*core.Service, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*api.Service)) + ret = append(ret, m.(*core.Service)) }) return ret, err } // Get retrieves the Service from the indexer for a given namespace and name. -func (s serviceNamespaceLister) Get(name string) (*api.Service, error) { +func (s serviceNamespaceLister) Get(name string) (*core.Service, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(api.Resource("service"), name) + return nil, errors.NewNotFound(core.Resource("service"), name) } - return obj.(*api.Service), nil + return obj.(*core.Service), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/service_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/service_expansion.go index 1bbc89c5c7d9..6a951cabfd31 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/service_expansion.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/service_expansion.go @@ -18,7 +18,7 @@ package internalversion import ( "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // ServiceListerExpansion allows custom methods to be added to diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/serviceaccount.go b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/serviceaccount.go index 5b07ee039fd4..59a0e8aef7bc 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/serviceaccount.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/core/internalversion/serviceaccount.go @@ -22,13 +22,13 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" ) // ServiceAccountLister helps list ServiceAccounts. type ServiceAccountLister interface { // List lists all ServiceAccounts in the indexer. - List(selector labels.Selector) (ret []*api.ServiceAccount, err error) + List(selector labels.Selector) (ret []*core.ServiceAccount, err error) // ServiceAccounts returns an object that can list and get ServiceAccounts. ServiceAccounts(namespace string) ServiceAccountNamespaceLister ServiceAccountListerExpansion @@ -45,9 +45,9 @@ func NewServiceAccountLister(indexer cache.Indexer) ServiceAccountLister { } // List lists all ServiceAccounts in the indexer. -func (s *serviceAccountLister) List(selector labels.Selector) (ret []*api.ServiceAccount, err error) { +func (s *serviceAccountLister) List(selector labels.Selector) (ret []*core.ServiceAccount, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*api.ServiceAccount)) + ret = append(ret, m.(*core.ServiceAccount)) }) return ret, err } @@ -60,9 +60,9 @@ func (s *serviceAccountLister) ServiceAccounts(namespace string) ServiceAccountN // ServiceAccountNamespaceLister helps list and get ServiceAccounts. type ServiceAccountNamespaceLister interface { // List lists all ServiceAccounts in the indexer for a given namespace. - List(selector labels.Selector) (ret []*api.ServiceAccount, err error) + List(selector labels.Selector) (ret []*core.ServiceAccount, err error) // Get retrieves the ServiceAccount from the indexer for a given namespace and name. - Get(name string) (*api.ServiceAccount, error) + Get(name string) (*core.ServiceAccount, error) ServiceAccountNamespaceListerExpansion } @@ -74,21 +74,21 @@ type serviceAccountNamespaceLister struct { } // List lists all ServiceAccounts in the indexer for a given namespace. -func (s serviceAccountNamespaceLister) List(selector labels.Selector) (ret []*api.ServiceAccount, err error) { +func (s serviceAccountNamespaceLister) List(selector labels.Selector) (ret []*core.ServiceAccount, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*api.ServiceAccount)) + ret = append(ret, m.(*core.ServiceAccount)) }) return ret, err } // Get retrieves the ServiceAccount from the indexer for a given namespace and name. -func (s serviceAccountNamespaceLister) Get(name string) (*api.ServiceAccount, error) { +func (s serviceAccountNamespaceLister) Get(name string) (*core.ServiceAccount, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(api.Resource("serviceaccount"), name) + return nil, errors.NewNotFound(core.Resource("serviceaccount"), name) } - return obj.(*api.ServiceAccount), nil + return obj.(*core.ServiceAccount), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/BUILD index 6178d48767a4..2e76730a7c3c 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/BUILD @@ -18,11 +18,10 @@ go_library( "podsecuritypolicy.go", "replicaset.go", "replicaset_expansion.go", - "scale.go", - "thirdpartyresource.go", ], + importpath = "k8s.io/kubernetes/pkg/client/listers/extensions/internalversion", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -47,9 +46,10 @@ filegroup( go_test( name = "go_default_test", srcs = ["daemonset_expansion_test.go"], + importpath = "k8s.io/kubernetes/pkg/client/listers/extensions/internalversion", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/daemonset_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/daemonset_expansion.go index d6c55ec49a58..dd9eee743112 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/daemonset_expansion.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/daemonset_expansion.go @@ -21,7 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" ) diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/expansion_generated.go b/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/expansion_generated.go index 1faa9b74946c..1872c99f0487 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/expansion_generated.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/expansion_generated.go @@ -29,15 +29,3 @@ type IngressNamespaceListerExpansion interface{} // PodSecurityPolicyListerExpansion allows custom methods to be added to // PodSecurityPolicyLister. type PodSecurityPolicyListerExpansion interface{} - -// ScaleListerExpansion allows custom methods to be added to -// ScaleLister. -type ScaleListerExpansion interface{} - -// ScaleNamespaceListerExpansion allows custom methods to be added to -// ScaleNamespaceLister. -type ScaleNamespaceListerExpansion interface{} - -// ThirdPartyResourceListerExpansion allows custom methods to be added to -// ThirdPartyResourceLister. -type ThirdPartyResourceListerExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/podsecuritypolicy.go b/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/podsecuritypolicy.go index 03a976bad28a..f57b903989e3 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/podsecuritypolicy.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/podsecuritypolicy.go @@ -20,7 +20,6 @@ package internalversion import ( "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" extensions "k8s.io/kubernetes/pkg/apis/extensions" @@ -55,8 +54,7 @@ func (s *podSecurityPolicyLister) List(selector labels.Selector) (ret []*extensi // Get retrieves the PodSecurityPolicy from the index for a given name. func (s *podSecurityPolicyLister) Get(name string) (*extensions.PodSecurityPolicy, error) { - key := &extensions.PodSecurityPolicy{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/replicaset_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/replicaset_expansion.go index 44808306b4d3..3b3cfc361f48 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/replicaset_expansion.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/replicaset_expansion.go @@ -21,7 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" ) diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/scale.go b/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/scale.go deleted file mode 100644 index b33c2a156f0f..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/scale.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was automatically generated by lister-gen - -package internalversion - -import ( - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - extensions "k8s.io/kubernetes/pkg/apis/extensions" -) - -// ScaleLister helps list Scales. -type ScaleLister interface { - // List lists all Scales in the indexer. - List(selector labels.Selector) (ret []*extensions.Scale, err error) - // Scales returns an object that can list and get Scales. - Scales(namespace string) ScaleNamespaceLister - ScaleListerExpansion -} - -// scaleLister implements the ScaleLister interface. -type scaleLister struct { - indexer cache.Indexer -} - -// NewScaleLister returns a new ScaleLister. -func NewScaleLister(indexer cache.Indexer) ScaleLister { - return &scaleLister{indexer: indexer} -} - -// List lists all Scales in the indexer. -func (s *scaleLister) List(selector labels.Selector) (ret []*extensions.Scale, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*extensions.Scale)) - }) - return ret, err -} - -// Scales returns an object that can list and get Scales. -func (s *scaleLister) Scales(namespace string) ScaleNamespaceLister { - return scaleNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ScaleNamespaceLister helps list and get Scales. -type ScaleNamespaceLister interface { - // List lists all Scales in the indexer for a given namespace. - List(selector labels.Selector) (ret []*extensions.Scale, err error) - // Get retrieves the Scale from the indexer for a given namespace and name. - Get(name string) (*extensions.Scale, error) - ScaleNamespaceListerExpansion -} - -// scaleNamespaceLister implements the ScaleNamespaceLister -// interface. -type scaleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Scales in the indexer for a given namespace. -func (s scaleNamespaceLister) List(selector labels.Selector) (ret []*extensions.Scale, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*extensions.Scale)) - }) - return ret, err -} - -// Get retrieves the Scale from the indexer for a given namespace and name. -func (s scaleNamespaceLister) Get(name string) (*extensions.Scale, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(extensions.Resource("scale"), name) - } - return obj.(*extensions.Scale), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/thirdpartyresource.go b/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/thirdpartyresource.go deleted file mode 100644 index 6177bf5f55ac..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/internalversion/thirdpartyresource.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was automatically generated by lister-gen - -package internalversion - -import ( - "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - extensions "k8s.io/kubernetes/pkg/apis/extensions" -) - -// ThirdPartyResourceLister helps list ThirdPartyResources. -type ThirdPartyResourceLister interface { - // List lists all ThirdPartyResources in the indexer. - List(selector labels.Selector) (ret []*extensions.ThirdPartyResource, err error) - // Get retrieves the ThirdPartyResource from the index for a given name. - Get(name string) (*extensions.ThirdPartyResource, error) - ThirdPartyResourceListerExpansion -} - -// thirdPartyResourceLister implements the ThirdPartyResourceLister interface. -type thirdPartyResourceLister struct { - indexer cache.Indexer -} - -// NewThirdPartyResourceLister returns a new ThirdPartyResourceLister. -func NewThirdPartyResourceLister(indexer cache.Indexer) ThirdPartyResourceLister { - return &thirdPartyResourceLister{indexer: indexer} -} - -// List lists all ThirdPartyResources in the indexer. -func (s *thirdPartyResourceLister) List(selector labels.Selector) (ret []*extensions.ThirdPartyResource, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*extensions.ThirdPartyResource)) - }) - return ret, err -} - -// Get retrieves the ThirdPartyResource from the index for a given name. -func (s *thirdPartyResourceLister) Get(name string) (*extensions.ThirdPartyResource, error) { - key := &extensions.ThirdPartyResource{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(extensions.Resource("thirdpartyresource"), name) - } - return obj.(*extensions.ThirdPartyResource), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/networking/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/listers/networking/internalversion/BUILD index e6b71d04af1b..98d873578858 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/networking/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/networking/internalversion/BUILD @@ -11,6 +11,7 @@ go_library( "expansion_generated.go", "networkpolicy.go", ], + importpath = "k8s.io/kubernetes/pkg/client/listers/networking/internalversion", deps = [ "//pkg/apis/networking:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/policy/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/listers/policy/internalversion/BUILD index d3b7b56e1b24..c120deb3b46c 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/policy/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/policy/internalversion/BUILD @@ -13,8 +13,9 @@ go_library( "poddisruptionbudget.go", "poddisruptionbudget_expansion.go", ], + importpath = "k8s.io/kubernetes/pkg/client/listers/policy/internalversion", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/policy:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/policy/internalversion/poddisruptionbudget_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/listers/policy/internalversion/poddisruptionbudget_expansion.go index 025358920f49..fcbc88bc8d07 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/policy/internalversion/poddisruptionbudget_expansion.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/policy/internalversion/poddisruptionbudget_expansion.go @@ -22,7 +22,7 @@ import ( "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/policy" ) diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/rbac/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/listers/rbac/internalversion/BUILD index eaed9273b355..1530995394ed 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/rbac/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/rbac/internalversion/BUILD @@ -14,10 +14,10 @@ go_library( "role.go", "rolebinding.go", ], + importpath = "k8s.io/kubernetes/pkg/client/listers/rbac/internalversion", deps = [ "//pkg/apis/rbac:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/rbac/internalversion/clusterrole.go b/vendor/k8s.io/kubernetes/pkg/client/listers/rbac/internalversion/clusterrole.go index eda1cc3acfe5..ae4089520d3d 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/rbac/internalversion/clusterrole.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/rbac/internalversion/clusterrole.go @@ -20,7 +20,6 @@ package internalversion import ( "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" rbac "k8s.io/kubernetes/pkg/apis/rbac" @@ -55,8 +54,7 @@ func (s *clusterRoleLister) List(selector labels.Selector) (ret []*rbac.ClusterR // Get retrieves the ClusterRole from the index for a given name. func (s *clusterRoleLister) Get(name string) (*rbac.ClusterRole, error) { - key := &rbac.ClusterRole{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/rbac/internalversion/clusterrolebinding.go b/vendor/k8s.io/kubernetes/pkg/client/listers/rbac/internalversion/clusterrolebinding.go index fed2b2e9f368..258c41768e1a 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/rbac/internalversion/clusterrolebinding.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/rbac/internalversion/clusterrolebinding.go @@ -20,7 +20,6 @@ package internalversion import ( "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" rbac "k8s.io/kubernetes/pkg/apis/rbac" @@ -55,8 +54,7 @@ func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*rbac.C // Get retrieves the ClusterRoleBinding from the index for a given name. func (s *clusterRoleBindingLister) Get(name string) (*rbac.ClusterRoleBinding, error) { - key := &rbac.ClusterRoleBinding{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/scheduling/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/listers/scheduling/internalversion/BUILD index 0933b02331be..5c2f02bafe7c 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/scheduling/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/scheduling/internalversion/BUILD @@ -11,10 +11,10 @@ go_library( "expansion_generated.go", "priorityclass.go", ], + importpath = "k8s.io/kubernetes/pkg/client/listers/scheduling/internalversion", deps = [ "//pkg/apis/scheduling:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/scheduling/internalversion/priorityclass.go b/vendor/k8s.io/kubernetes/pkg/client/listers/scheduling/internalversion/priorityclass.go index b219677dd2f5..1692ba758dd2 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/scheduling/internalversion/priorityclass.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/scheduling/internalversion/priorityclass.go @@ -20,7 +20,6 @@ package internalversion import ( "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" scheduling "k8s.io/kubernetes/pkg/apis/scheduling" @@ -55,8 +54,7 @@ func (s *priorityClassLister) List(selector labels.Selector) (ret []*scheduling. // Get retrieves the PriorityClass from the index for a given name. func (s *priorityClassLister) Get(name string) (*scheduling.PriorityClass, error) { - key := &scheduling.PriorityClass{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/settings/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/listers/settings/internalversion/BUILD index 1d33428cc317..5e9f774e2418 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/settings/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/settings/internalversion/BUILD @@ -11,6 +11,7 @@ go_library( "expansion_generated.go", "podpreset.go", ], + importpath = "k8s.io/kubernetes/pkg/client/listers/settings/internalversion", deps = [ "//pkg/apis/settings:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/storage/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/client/listers/storage/internalversion/BUILD index c5f61c2762af..ff223c8b8cd8 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/storage/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/storage/internalversion/BUILD @@ -10,11 +10,12 @@ go_library( srcs = [ "expansion_generated.go", "storageclass.go", + "volumeattachment.go", ], + importpath = "k8s.io/kubernetes/pkg/client/listers/storage/internalversion", deps = [ "//pkg/apis/storage:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/storage/internalversion/expansion_generated.go b/vendor/k8s.io/kubernetes/pkg/client/listers/storage/internalversion/expansion_generated.go index 858ece21cbdd..e5a64c5d022c 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/storage/internalversion/expansion_generated.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/storage/internalversion/expansion_generated.go @@ -21,3 +21,7 @@ package internalversion // StorageClassListerExpansion allows custom methods to be added to // StorageClassLister. type StorageClassListerExpansion interface{} + +// VolumeAttachmentListerExpansion allows custom methods to be added to +// VolumeAttachmentLister. +type VolumeAttachmentListerExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/storage/internalversion/storageclass.go b/vendor/k8s.io/kubernetes/pkg/client/listers/storage/internalversion/storageclass.go index f1898a98f7fe..b14973fe250d 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/listers/storage/internalversion/storageclass.go +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/storage/internalversion/storageclass.go @@ -20,7 +20,6 @@ package internalversion import ( "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" storage "k8s.io/kubernetes/pkg/apis/storage" @@ -55,8 +54,7 @@ func (s *storageClassLister) List(selector labels.Selector) (ret []*storage.Stor // Get retrieves the StorageClass from the index for a given name. func (s *storageClassLister) Get(name string) (*storage.StorageClass, error) { - key := &storage.StorageClass{ObjectMeta: v1.ObjectMeta{Name: name}} - obj, exists, err := s.indexer.Get(key) + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/client/listers/storage/internalversion/volumeattachment.go b/vendor/k8s.io/kubernetes/pkg/client/listers/storage/internalversion/volumeattachment.go new file mode 100644 index 000000000000..16fd281d0326 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/client/listers/storage/internalversion/volumeattachment.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package internalversion + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + storage "k8s.io/kubernetes/pkg/apis/storage" +) + +// VolumeAttachmentLister helps list VolumeAttachments. +type VolumeAttachmentLister interface { + // List lists all VolumeAttachments in the indexer. + List(selector labels.Selector) (ret []*storage.VolumeAttachment, err error) + // Get retrieves the VolumeAttachment from the index for a given name. + Get(name string) (*storage.VolumeAttachment, error) + VolumeAttachmentListerExpansion +} + +// volumeAttachmentLister implements the VolumeAttachmentLister interface. +type volumeAttachmentLister struct { + indexer cache.Indexer +} + +// NewVolumeAttachmentLister returns a new VolumeAttachmentLister. +func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { + return &volumeAttachmentLister{indexer: indexer} +} + +// List lists all VolumeAttachments in the indexer. +func (s *volumeAttachmentLister) List(selector labels.Selector) (ret []*storage.VolumeAttachment, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*storage.VolumeAttachment)) + }) + return ret, err +} + +// Get retrieves the VolumeAttachment from the index for a given name. +func (s *volumeAttachmentLister) Get(name string) (*storage.VolumeAttachment, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(storage.Resource("volumeattachment"), name) + } + return obj.(*storage.VolumeAttachment), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/BUILD b/vendor/k8s.io/kubernetes/pkg/client/unversioned/BUILD index 7255f4e0882c..4ed4a551f17b 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/BUILD @@ -12,11 +12,13 @@ go_library( "conditions.go", "helper.go", ], + importpath = "k8s.io/kubernetes/pkg/client/unversioned", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/pod:go_default_library", "//pkg/apis/apps:go_default_library", "//pkg/apis/batch:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/apps/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", @@ -35,10 +37,12 @@ go_library( go_test( name = "go_default_test", srcs = ["helper_test.go"], + importpath = "k8s.io/kubernetes/pkg/client/unversioned", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/conditions.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/conditions.go index a65c84750854..042d13a6fa8c 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/conditions.go +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/conditions.go @@ -24,10 +24,10 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/pod" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/batch" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" appsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/helper.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/helper.go index 74baf75d0d5e..ca0fc2c70d4e 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/helper.go +++ b/vendor/k8s.io/kubernetes/pkg/client/unversioned/helper.go @@ -19,7 +19,7 @@ package unversioned import ( "k8s.io/apimachinery/pkg/runtime/schema" restclient "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" // Import solely to initialize client auth plugins. _ "k8s.io/client-go/plugin/pkg/client/auth" ) @@ -41,7 +41,7 @@ func SetKubernetesDefaults(config *restclient.Config) error { config.GroupVersion = &schema.GroupVersion{} } if config.NegotiatedSerializer == nil { - config.NegotiatedSerializer = api.Codecs + config.NegotiatedSerializer = legacyscheme.Codecs } return restclient.SetKubernetesDefaults(config) } @@ -52,7 +52,7 @@ func setGroupDefaults(groupName string, config *restclient.Config) error { config.UserAgent = restclient.DefaultKubernetesUserAgent() } if config.GroupVersion == nil || config.GroupVersion.Group != groupName { - g, err := api.Registry.Group(groupName) + g, err := legacyscheme.Registry.Group(groupName) if err != nil { return err } @@ -60,7 +60,7 @@ func setGroupDefaults(groupName string, config *restclient.Config) error { config.GroupVersion = ©GroupVersion } if config.NegotiatedSerializer == nil { - config.NegotiatedSerializer = api.Codecs + config.NegotiatedSerializer = legacyscheme.Codecs } return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/BUILD b/vendor/k8s.io/kubernetes/pkg/cloudprovider/BUILD index ee4c9298fac8..7c38feae3fe6 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/BUILD @@ -12,11 +12,13 @@ go_library( "doc.go", "plugins.go", ], + importpath = "k8s.io/kubernetes/pkg/cloudprovider", deps = [ "//pkg/controller:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/client-go/informers:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS b/vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS index 249a4a546b17..176769b7dba0 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/OWNERS @@ -1,5 +1,7 @@ approvers: - mikedanese +- dims +- wlan0 reviewers: - thockin - lavalamp @@ -34,8 +36,8 @@ reviewers: - jszczepkowski - markturansky - girishkalele -- satnam6502 - jdef - freehan - jingxu97 - wlan0 +- cheftako diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md b/vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md index ada366620612..b59eba409f1e 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md @@ -4,7 +4,7 @@ The mechanism for supporting cloud providers is currently in transition: the original method of implementing cloud provider-specific functionality within the main kubernetes tree (here) is no longer advised; however, the proposed solution is still in development. #### Guidance for potential cloud providers: -* Support for cloud providers is currently in a state of flux. Background information on motivation and the proposal for improving is in the github [proposal](https://git.k8s.io/community/contributors/design-proposals/cloud-provider-refactoring.md). +* Support for cloud providers is currently in a state of flux. Background information on motivation and the proposal for improving is in the github [proposal](https://git.k8s.io/community/contributors/design-proposals/cloud-provider/cloud-provider-refactoring.md). * In support of this plan, a new cloud-controller-manager binary was added in 1.6. This was the first of several steps (see the proposal for more information). * Attempts to contribute new cloud providers or (to a lesser extent) persistent volumes to the core repo will likely meet with some pushback from reviewers/approvers. * It is understood that this is an unfortunate situation in which 'the old way is no longer supported but the new way is not ready yet', but the initial path is unsustainable, and contributors are encouraged to participate in the implementation of the proposed long-term solution, as there is risk that PRs for new cloud providers here will not be approved. @@ -13,4 +13,4 @@ The mechanism for supporting cloud providers is currently in transition: the or #### Some additional context on status / direction: * 1.6 added a new cloud-controller-manager binary that may be used for testing the new out-of-core cloudprovider flow. * Setting cloud-provider=external allows for creation of a separate controller-manager binary -* 1.7 adds [extensible admission control](https://git.k8s.io/community/contributors/design-proposals/admission_control_extension.md), further enabling topology customization. +* 1.7 adds [extensible admission control](https://git.k8s.io/community/contributors/design-proposals/api-machinery/admission_control_extension.md), further enabling topology customization. diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go index 9a60e5490e62..2e46d47d339d 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go @@ -23,6 +23,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/informers" "k8s.io/kubernetes/pkg/controller" ) @@ -49,6 +50,11 @@ type Interface interface { HasClusterID() bool } +type InformerUser interface { + // SetInformers sets the informer on the cloud object. + SetInformers(informerFactory informers.SharedInformerFactory) +} + // Clusters is an abstract, pluggable interface for clusters of containers. type Clusters interface { // ListClusters lists the names of the available clusters. @@ -176,6 +182,7 @@ type Routes interface { var ( InstanceNotFound = errors.New("instance not found") DiskNotFound = errors.New("disk is not found") + NotImplemented = errors.New("unimplemented") ) // Zone represents the location of a particular machine. diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/BUILD b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/BUILD index 63c7d12f6662..aeccfa1e5b63 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["providers.go"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers", deps = [ "//pkg/cloudprovider/providers/aws:go_default_library", "//pkg/cloudprovider/providers/azure:go_default_library", @@ -16,7 +17,6 @@ go_library( "//pkg/cloudprovider/providers/openstack:go_default_library", "//pkg/cloudprovider/providers/ovirt:go_default_library", "//pkg/cloudprovider/providers/photon:go_default_library", - "//pkg/cloudprovider/providers/rackspace:go_default_library", "//pkg/cloudprovider/providers/vsphere:go_default_library", ], ) @@ -40,7 +40,6 @@ filegroup( "//pkg/cloudprovider/providers/openstack:all-srcs", "//pkg/cloudprovider/providers/ovirt:all-srcs", "//pkg/cloudprovider/providers/photon:all-srcs", - "//pkg/cloudprovider/providers/rackspace:all-srcs", "//pkg/cloudprovider/providers/vsphere:all-srcs", ], tags = ["automanaged"], diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/BUILD b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/BUILD index eb41175c317c..9a4a6a76539f 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/BUILD @@ -10,6 +10,7 @@ go_library( name = "go_default_library", srcs = [ "aws.go", + "aws_fakes.go", "aws_instancegroups.go", "aws_loadbalancer.go", "aws_metrics.go", @@ -24,6 +25,7 @@ go_library( "tags.go", "volumes.go", ], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/aws", deps = [ "//pkg/api/v1/service:go_default_library", "//pkg/cloudprovider:go_default_library", @@ -42,14 +44,21 @@ go_library( "//vendor/github.com/aws/aws-sdk-go/service/autoscaling:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/elb:go_default_library", + "//vendor/github.com/aws/aws-sdk-go/service/elbv2:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/kms:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", ], ) @@ -64,17 +73,16 @@ go_test( "retry_handler_test.go", "tags_test.go", ], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/aws", library = ":go_default_library", deps = [ "//pkg/kubelet/apis:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/service/autoscaling:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/elb:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/service/kms:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/mock:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go index 0e000be68a63..ad55f5383d77 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go @@ -38,16 +38,23 @@ import ( "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go/service/elbv2" "github.com/aws/aws-sdk-go/service/kms" "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/record" "path" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes/scheme" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/kubernetes/pkg/api/v1/service" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller" @@ -56,6 +63,18 @@ import ( volumeutil "k8s.io/kubernetes/pkg/volume/util" ) +// NLBHealthCheckRuleDescription is the comment used on a security group rule to +// indicate that it is used for health checks +const NLBHealthCheckRuleDescription = "kubernetes.io/rule/nlb/health" + +// NLBClientRuleDescription is the comment used on a security group rule to +// indicate that it is used for client traffic +const NLBClientRuleDescription = "kubernetes.io/rule/nlb/client" + +// NLBMtuDiscoveryRuleDescription is the comment used on a security group rule +// to indicate that it is used for mtu discovery +const NLBMtuDiscoveryRuleDescription = "kubernetes.io/rule/nlb/mtu" + // ProviderName is the name of this cloud provider. const ProviderName = "aws" @@ -71,6 +90,11 @@ const TagNameSubnetInternalELB = "kubernetes.io/role/internal-elb" // it should be used for internet ELBs const TagNameSubnetPublicELB = "kubernetes.io/role/elb" +// ServiceAnnotationLoadBalancerType is the annotation used on the service +// to indicate what type of Load Balancer we want. Right now, the only accepted +// value is "nlb" +const ServiceAnnotationLoadBalancerType = "service.beta.kubernetes.io/aws-load-balancer-type" + // ServiceAnnotationLoadBalancerInternal is the annotation used on the service // to indicate that we want an internal ELB. const ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/aws-load-balancer-internal" @@ -129,6 +153,11 @@ const ServiceAnnotationLoadBalancerCertificate = "service.beta.kubernetes.io/aws // listeners. Defaults to '*' (all). const ServiceAnnotationLoadBalancerSSLPorts = "service.beta.kubernetes.io/aws-load-balancer-ssl-ports" +// ServiceAnnotationLoadBalancerSSLNegotiationPolicy is the annotation used on +// the service to specify a SSL negotiation settings for the HTTPS/SSL listeners +// of your load balancer. Defaults to AWS's default +const ServiceAnnotationLoadBalancerSSLNegotiationPolicy = "service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy" + // ServiceAnnotationLoadBalancerBEProtocol is the annotation used on the service // to specify the protocol spoken by the backend (pod) behind a listener. // If `http` (default) or `https`, an HTTPS listener that terminates the @@ -144,6 +173,31 @@ const ServiceAnnotationLoadBalancerBEProtocol = "service.beta.kubernetes.io/aws- // For example: "Key1=Val1,Key2=Val2,KeyNoVal1=,KeyNoVal2" const ServiceAnnotationLoadBalancerAdditionalTags = "service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags" +// ServiceAnnotationLoadBalancerHCHealthyThreshold is the annotation used on +// the service to specify the number of successive successful health checks +// required for a backend to be considered healthy for traffic. +const ServiceAnnotationLoadBalancerHCHealthyThreshold = "service.beta.kubernetes.io/aws-load-balancer-healthcheck-healthy-threshold" + +// ServiceAnnotationLoadBalancerHCUnhealthyThreshold is the annotation used +// on the service to specify the number of unsuccessful health checks +// required for a backend to be considered unhealthy for traffic +const ServiceAnnotationLoadBalancerHCUnhealthyThreshold = "service.beta.kubernetes.io/aws-load-balancer-healthcheck-unhealthy-threshold" + +// ServiceAnnotationLoadBalancerHCTimeout is the annotation used on the +// service to specify, in seconds, how long to wait before marking a health +// check as failed. +const ServiceAnnotationLoadBalancerHCTimeout = "service.beta.kubernetes.io/aws-load-balancer-healthcheck-timeout" + +// ServiceAnnotationLoadBalancerHCInterval is the annotation used on the +// service to specify, in seconds, the interval between health checks. +const ServiceAnnotationLoadBalancerHCInterval = "service.beta.kubernetes.io/aws-load-balancer-healthcheck-interval" + +// Event key when a volume is stuck on attaching state when being attached to a volume +const volumeAttachmentStuck = "VolumeAttachmentStuck" + +// Indicates that a node has volumes stuck in attaching state and hence it is not fit for scheduling more pods +const nodeWithImpairedVolumes = "NodeWithImpairedVolumes" + const ( // volumeAttachmentConsecutiveErrorLimit is the number of consecutive errors we will ignore when waiting for a volume to attach/detach volumeAttachmentStatusConsecutiveErrorLimit = 10 @@ -208,6 +262,7 @@ var _ cloudprovider.PVLabeler = (*Cloud)(nil) type Services interface { Compute(region string) (EC2, error) LoadBalancing(region string) (ELB, error) + LoadBalancingV2(region string) (ELBV2, error) Autoscaling(region string) (ASG, error) Metadata() (EC2Metadata, error) KeyManagement(region string) (KMS, error) @@ -231,6 +286,10 @@ type EC2 interface { // Delete an EBS volume DeleteVolume(*ec2.DeleteVolumeInput) (*ec2.DeleteVolumeOutput, error) + ModifyVolume(*ec2.ModifyVolumeInput) (*ec2.ModifyVolumeOutput, error) + + DescribeVolumeModifications(*ec2.DescribeVolumesModificationsInput) ([]*ec2.VolumeModification, error) + DescribeSecurityGroups(request *ec2.DescribeSecurityGroupsInput) ([]*ec2.SecurityGroup, error) CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error) @@ -248,6 +307,8 @@ type EC2 interface { DeleteRoute(request *ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error) ModifyInstanceAttribute(request *ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error) + + DescribeVpcs(input *ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error) } // ELB is a simple pass-through of AWS' ELB client interface, which allows for testing @@ -255,10 +316,13 @@ type ELB interface { CreateLoadBalancer(*elb.CreateLoadBalancerInput) (*elb.CreateLoadBalancerOutput, error) DeleteLoadBalancer(*elb.DeleteLoadBalancerInput) (*elb.DeleteLoadBalancerOutput, error) DescribeLoadBalancers(*elb.DescribeLoadBalancersInput) (*elb.DescribeLoadBalancersOutput, error) + AddTags(*elb.AddTagsInput) (*elb.AddTagsOutput, error) RegisterInstancesWithLoadBalancer(*elb.RegisterInstancesWithLoadBalancerInput) (*elb.RegisterInstancesWithLoadBalancerOutput, error) DeregisterInstancesFromLoadBalancer(*elb.DeregisterInstancesFromLoadBalancerInput) (*elb.DeregisterInstancesFromLoadBalancerOutput, error) CreateLoadBalancerPolicy(*elb.CreateLoadBalancerPolicyInput) (*elb.CreateLoadBalancerPolicyOutput, error) SetLoadBalancerPoliciesForBackendServer(*elb.SetLoadBalancerPoliciesForBackendServerInput) (*elb.SetLoadBalancerPoliciesForBackendServerOutput, error) + SetLoadBalancerPoliciesOfListener(input *elb.SetLoadBalancerPoliciesOfListenerInput) (*elb.SetLoadBalancerPoliciesOfListenerOutput, error) + DescribeLoadBalancerPolicies(input *elb.DescribeLoadBalancerPoliciesInput) (*elb.DescribeLoadBalancerPoliciesOutput, error) DetachLoadBalancerFromSubnets(*elb.DetachLoadBalancerFromSubnetsInput) (*elb.DetachLoadBalancerFromSubnetsOutput, error) AttachLoadBalancerToSubnets(*elb.AttachLoadBalancerToSubnetsInput) (*elb.AttachLoadBalancerToSubnetsOutput, error) @@ -274,6 +338,38 @@ type ELB interface { ModifyLoadBalancerAttributes(*elb.ModifyLoadBalancerAttributesInput) (*elb.ModifyLoadBalancerAttributesOutput, error) } +// ELBV2 is a simple pass-through of AWS' ELBV2 client interface, which allows for testing +type ELBV2 interface { + AddTags(input *elbv2.AddTagsInput) (*elbv2.AddTagsOutput, error) + + CreateLoadBalancer(*elbv2.CreateLoadBalancerInput) (*elbv2.CreateLoadBalancerOutput, error) + DescribeLoadBalancers(*elbv2.DescribeLoadBalancersInput) (*elbv2.DescribeLoadBalancersOutput, error) + DeleteLoadBalancer(*elbv2.DeleteLoadBalancerInput) (*elbv2.DeleteLoadBalancerOutput, error) + + ModifyLoadBalancerAttributes(*elbv2.ModifyLoadBalancerAttributesInput) (*elbv2.ModifyLoadBalancerAttributesOutput, error) + DescribeLoadBalancerAttributes(*elbv2.DescribeLoadBalancerAttributesInput) (*elbv2.DescribeLoadBalancerAttributesOutput, error) + + CreateTargetGroup(*elbv2.CreateTargetGroupInput) (*elbv2.CreateTargetGroupOutput, error) + DescribeTargetGroups(*elbv2.DescribeTargetGroupsInput) (*elbv2.DescribeTargetGroupsOutput, error) + ModifyTargetGroup(*elbv2.ModifyTargetGroupInput) (*elbv2.ModifyTargetGroupOutput, error) + DeleteTargetGroup(*elbv2.DeleteTargetGroupInput) (*elbv2.DeleteTargetGroupOutput, error) + + DescribeTargetHealth(input *elbv2.DescribeTargetHealthInput) (*elbv2.DescribeTargetHealthOutput, error) + + DescribeTargetGroupAttributes(*elbv2.DescribeTargetGroupAttributesInput) (*elbv2.DescribeTargetGroupAttributesOutput, error) + ModifyTargetGroupAttributes(*elbv2.ModifyTargetGroupAttributesInput) (*elbv2.ModifyTargetGroupAttributesOutput, error) + + RegisterTargets(*elbv2.RegisterTargetsInput) (*elbv2.RegisterTargetsOutput, error) + DeregisterTargets(*elbv2.DeregisterTargetsInput) (*elbv2.DeregisterTargetsOutput, error) + + CreateListener(*elbv2.CreateListenerInput) (*elbv2.CreateListenerOutput, error) + DescribeListeners(*elbv2.DescribeListenersInput) (*elbv2.DescribeListenersOutput, error) + DeleteListener(*elbv2.DeleteListenerInput) (*elbv2.DeleteListenerOutput, error) + ModifyListener(*elbv2.ModifyListenerInput) (*elbv2.ModifyListenerOutput, error) + + WaitUntilLoadBalancersDeleted(*elbv2.DescribeLoadBalancersInput) error +} + // ASG is a simple pass-through of the Autoscaling client interface, which // allows for testing. type ASG interface { @@ -362,6 +458,9 @@ type Volumes interface { // Check if disks specified in argument map are still attached to their respective nodes. DisksAreAttached(map[types.NodeName][]KubernetesVolumeID) (map[types.NodeName]map[KubernetesVolumeID]bool, error) + + // Expand the disk to new size + ResizeDisk(diskName KubernetesVolumeID, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) } // InstanceGroups is an interface for managing cloud-managed instance groups / autoscaling instance groups @@ -383,6 +482,7 @@ type InstanceGroupInfo interface { type Cloud struct { ec2 EC2 elb ELB + elbv2 ELBV2 asg ASG kms KMS metadata EC2Metadata @@ -398,6 +498,11 @@ type Cloud struct { instanceCache instanceCache + clientBuilder controller.ControllerClientBuilder + kubeClient clientset.Interface + eventBroadcaster record.EventBroadcaster + eventRecorder record.EventRecorder + // We keep an active list of devices we have assigned but not yet // attached, to avoid a race condition where we assign a device mapping // and then get a second request before we attach the volume @@ -563,6 +668,20 @@ func (p *awsSDKProvider) LoadBalancing(regionName string) (ELB, error) { return elbClient, nil } +func (p *awsSDKProvider) LoadBalancingV2(regionName string) (ELBV2, error) { + awsConfig := &aws.Config{ + Region: ®ionName, + Credentials: p.creds, + } + awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true) + + elbClient := elbv2.New(session.New(awsConfig)) + + p.addHandlers(regionName, &elbClient.Handlers) + + return elbClient, nil +} + func (p *awsSDKProvider) Autoscaling(regionName string) (ASG, error) { awsConfig := &aws.Config{ Region: ®ionName, @@ -618,7 +737,7 @@ func newEc2Filter(name string, values ...string) *ec2.Filter { // AddSSHKeyToAllInstances is currently not implemented. func (c *Cloud) AddSSHKeyToAllInstances(user string, keyData []byte) error { - return errors.New("unimplemented") + return cloudprovider.NotImplemented } // CurrentNodeName returns the name of the current node @@ -722,6 +841,36 @@ func (s *awsSdkEC2) DeleteVolume(request *ec2.DeleteVolumeInput) (*ec2.DeleteVol return resp, err } +func (s *awsSdkEC2) ModifyVolume(request *ec2.ModifyVolumeInput) (*ec2.ModifyVolumeOutput, error) { + requestTime := time.Now() + resp, err := s.ec2.ModifyVolume(request) + timeTaken := time.Since(requestTime).Seconds() + recordAwsMetric("modify_volume", timeTaken, err) + return resp, err +} + +func (s *awsSdkEC2) DescribeVolumeModifications(request *ec2.DescribeVolumesModificationsInput) ([]*ec2.VolumeModification, error) { + requestTime := time.Now() + results := []*ec2.VolumeModification{} + var nextToken *string + for { + resp, err := s.ec2.DescribeVolumesModifications(request) + if err != nil { + recordAwsMetric("describe_volume_modification", 0, err) + return nil, fmt.Errorf("error listing volume modifictions : %v", err) + } + results = append(results, resp.VolumesModifications...) + nextToken = resp.NextToken + if aws.StringValue(nextToken) == "" { + break + } + request.NextToken = nextToken + } + timeTaken := time.Since(requestTime).Seconds() + recordAwsMetric("describe_volume_modification", timeTaken, nil) + return results, nil +} + func (s *awsSdkEC2) DescribeSubnets(request *ec2.DescribeSubnetsInput) ([]*ec2.Subnet, error) { // Subnets are not paged response, err := s.ec2.DescribeSubnets(request) @@ -776,6 +925,10 @@ func (s *awsSdkEC2) ModifyInstanceAttribute(request *ec2.ModifyInstanceAttribute return s.ec2.ModifyInstanceAttribute(request) } +func (s *awsSdkEC2) DescribeVpcs(request *ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error) { + return s.ec2.DescribeVpcs(request) +} + func init() { registerMetrics() cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) { @@ -889,6 +1042,11 @@ func newAWSCloud(config io.Reader, awsServices Services) (*Cloud, error) { return nil, fmt.Errorf("error creating AWS ELB client: %v", err) } + elbv2, err := awsServices.LoadBalancingV2(regionName) + if err != nil { + return nil, fmt.Errorf("error creating AWS ELBV2 client: %v", err) + } + asg, err := awsServices.Autoscaling(regionName) if err != nil { return nil, fmt.Errorf("error creating AWS autoscaling client: %v", err) @@ -902,6 +1060,7 @@ func newAWSCloud(config io.Reader, awsServices Services) (*Cloud, error) { awsCloud := &Cloud{ ec2: ec2, elb: elb, + elbv2: elbv2, asg: asg, metadata: metadata, kms: kms, @@ -957,7 +1116,14 @@ func newAWSCloud(config io.Reader, awsServices Services) (*Cloud, error) { } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (c *Cloud) Initialize(clientBuilder controller.ControllerClientBuilder) {} +func (c *Cloud) Initialize(clientBuilder controller.ControllerClientBuilder) { + c.clientBuilder = clientBuilder + c.kubeClient = clientBuilder.ClientOrDie("aws-cloud-provider") + c.eventBroadcaster = record.NewBroadcaster() + c.eventBroadcaster.StartLogging(glog.Infof) + c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(c.kubeClient.CoreV1().RESTClient()).Events("")}) + c.eventRecorder = c.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "aws-cloud-provider"}) +} // Clusters returns the list of clusters. func (c *Cloud) Clusters() (cloudprovider.Clusters, bool) { @@ -1039,7 +1205,7 @@ func (c *Cloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) { if err != nil || len(internalDNS) == 0 { //TODO: It would be nice to be able to determine the reason for the failure, // but the AWS client masks all failures with the same error description. - glog.V(2).Info("Could not determine private DNS from AWS metadata.") + glog.V(4).Info("Could not determine private DNS from AWS metadata.") } else { addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: internalDNS}) } @@ -1048,7 +1214,7 @@ func (c *Cloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) { if err != nil || len(externalDNS) == 0 { //TODO: It would be nice to be able to determine the reason for the failure, // but the AWS client masks all failures with the same error description. - glog.V(2).Info("Could not determine public DNS from AWS metadata.") + glog.V(4).Info("Could not determine public DNS from AWS metadata.") } else { addresses = append(addresses, v1.NodeAddress{Type: v1.NodeExternalDNS, Address: externalDNS}) } @@ -1152,7 +1318,33 @@ func (c *Cloud) ExternalID(nodeName types.NodeName) (string, error) { // InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running. // If false is returned with no error, the instance will be immediately deleted by the cloud controller manager. func (c *Cloud) InstanceExistsByProviderID(providerID string) (bool, error) { - return false, errors.New("unimplemented") + instanceID, err := kubernetesInstanceID(providerID).mapToAWSInstanceID() + if err != nil { + return false, err + } + + request := &ec2.DescribeInstancesInput{ + InstanceIds: []*string{instanceID.awsString()}, + } + + instances, err := c.ec2.DescribeInstances(request) + if err != nil { + return false, err + } + if len(instances) == 0 { + return false, nil + } + if len(instances) > 1 { + return false, fmt.Errorf("multiple instances found for instance: %s", instanceID) + } + + state := instances[0].State.Name + if *state != "running" { + glog.Warningf("the instance %s is not running", instanceID) + return false, nil + } + + return true, nil } // InstanceID returns the cloud provider ID of the node with the specified nodeName. @@ -1470,7 +1662,7 @@ type awsDisk struct { } func newAWSDisk(aws *Cloud, name KubernetesVolumeID) (*awsDisk, error) { - awsID, err := name.mapToAWSVolumeID() + awsID, err := name.MapToAWSVolumeID() if err != nil { return nil, err } @@ -1499,6 +1691,87 @@ func (d *awsDisk) describeVolume() (*ec2.Volume, error) { return volumes[0], nil } +func (d *awsDisk) describeVolumeModification() (*ec2.VolumeModification, error) { + volumeID := d.awsID + request := &ec2.DescribeVolumesModificationsInput{ + VolumeIds: []*string{volumeID.awsString()}, + } + volumeMods, err := d.ec2.DescribeVolumeModifications(request) + + if err != nil { + return nil, fmt.Errorf("error describing volume modification %s with %v", volumeID, err) + } + + if len(volumeMods) == 0 { + return nil, fmt.Errorf("no volume modifications found for %s", volumeID) + } + lastIndex := len(volumeMods) - 1 + return volumeMods[lastIndex], nil +} + +func (d *awsDisk) modifyVolume(requestGiB int64) (int64, error) { + volumeID := d.awsID + + request := &ec2.ModifyVolumeInput{ + VolumeId: volumeID.awsString(), + Size: aws.Int64(requestGiB), + } + output, err := d.ec2.ModifyVolume(request) + if err != nil { + modifyError := fmt.Errorf("AWS modifyVolume failed for %s with %v", volumeID, err) + return requestGiB, modifyError + } + + volumeModification := output.VolumeModification + + if aws.StringValue(volumeModification.ModificationState) == ec2.VolumeModificationStateCompleted { + return aws.Int64Value(volumeModification.TargetSize), nil + } + + backoff := wait.Backoff{ + Duration: 1 * time.Second, + Factor: 2, + Steps: 10, + } + + checkForResize := func() (bool, error) { + volumeModification, err := d.describeVolumeModification() + + if err != nil { + return false, err + } + + if aws.StringValue(volumeModification.ModificationState) == ec2.VolumeModificationStateCompleted { + return true, nil + } + return false, nil + } + waitWithErr := wait.ExponentialBackoff(backoff, checkForResize) + return requestGiB, waitWithErr +} + +// applyUnSchedulableTaint applies a unschedulable taint to a node after verifying +// if node has become unusable because of volumes getting stuck in attaching state. +func (c *Cloud) applyUnSchedulableTaint(nodeName types.NodeName, reason string) { + node, fetchErr := c.kubeClient.CoreV1().Nodes().Get(string(nodeName), metav1.GetOptions{}) + if fetchErr != nil { + glog.Errorf("Error fetching node %s with %v", nodeName, fetchErr) + return + } + + taint := &v1.Taint{ + Key: nodeWithImpairedVolumes, + Value: "true", + Effect: v1.TaintEffectNoSchedule, + } + err := controller.AddOrUpdateTaintOnNode(c.kubeClient, string(nodeName), taint) + if err != nil { + glog.Errorf("Error applying taint to node %s with error %v", nodeName, err) + return + } + c.eventRecorder.Eventf(node, v1.EventTypeWarning, volumeAttachmentStuck, reason) +} + // waitForAttachmentStatus polls until the attachment status is the expected value // On success, it returns the last attachment state. func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, error) { @@ -1690,6 +1963,15 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName, ec2Device := "/dev/xvd" + string(mountDevice) if !alreadyAttached { + available, err := c.checkIfAvailable(disk, "attaching", awsInstance.awsID) + if err != nil { + glog.Error(err) + } + + if !available { + attachEnded = true + return "", err + } request := &ec2.AttachVolumeInput{ Device: aws.String(ec2Device), InstanceId: aws.String(awsInstance.awsID), @@ -1709,7 +1991,11 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName, } attachment, err := disk.waitForAttachmentStatus("attached") + if err != nil { + if err == wait.ErrWaitTimeout { + c.applyUnSchedulableTaint(nodeName, "Volume stuck in attaching state - node needs reboot to fix impaired state.") + } return "", err } @@ -1735,26 +2021,23 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName, // DetachDisk implements Volumes.DetachDisk func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error) { - disk, err := newAWSDisk(c, diskName) - if err != nil { + diskInfo, attached, err := c.checkIfAttachedToNode(diskName, nodeName) + if diskInfo == nil { return "", err } - awsInstance, info, err := c.getFullInstance(nodeName) - if err != nil { - if err == cloudprovider.InstanceNotFound { - // If instance no longer exists, safe to assume volume is not attached. - glog.Warningf( - "Instance %q does not exist. DetachDisk will assume disk %q is not attached to it.", - nodeName, - diskName) - return "", nil - } + if !attached && diskInfo.ec2Instance != nil { + glog.Warningf("DetachDisk %s called for node %s but volume is attached to node %s", diskName, nodeName, diskInfo.nodeName) + return "", nil + } - return "", err + if !attached { + return "", nil } - mountDevice, alreadyAttached, err := c.getMountDevice(awsInstance, info, disk.awsID, false) + awsInstance := newAWSInstance(c.ec2, diskInfo.ec2Instance) + + mountDevice, alreadyAttached, err := c.getMountDevice(awsInstance, diskInfo.ec2Instance, diskInfo.disk.awsID, false) if err != nil { return "", err } @@ -1766,18 +2049,19 @@ func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) request := ec2.DetachVolumeInput{ InstanceId: &awsInstance.awsID, - VolumeId: disk.awsID.awsString(), + VolumeId: diskInfo.disk.awsID.awsString(), } response, err := c.ec2.DetachVolume(&request) if err != nil { - return "", fmt.Errorf("error detaching EBS volume %q from %q: %q", disk.awsID, awsInstance.awsID, err) + return "", fmt.Errorf("error detaching EBS volume %q from %q: %q", diskInfo.disk.awsID, awsInstance.awsID, err) } + if response == nil { return "", errors.New("no response from DetachVolume") } - attachment, err := disk.waitForAttachmentStatus("detached") + attachment, err := diskInfo.disk.waitForAttachmentStatus("detached") if err != nil { return "", err } @@ -1790,7 +2074,7 @@ func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) } if mountDevice != "" { - c.endAttaching(awsInstance, disk.awsID, mountDevice) + c.endAttaching(awsInstance, diskInfo.disk.awsID, mountDevice) // We don't check the return value - we don't really expect the attachment to have been // in progress, though it might have been } @@ -1922,9 +2206,59 @@ func (c *Cloud) DeleteDisk(volumeName KubernetesVolumeID) (bool, error) { if err != nil { return false, err } + available, err := c.checkIfAvailable(awsDisk, "deleting", "") + if err != nil { + glog.Error(err) + } + + if !available { + return false, err + } + return awsDisk.deleteVolume() } +func (c *Cloud) checkIfAvailable(disk *awsDisk, opName string, instance string) (bool, error) { + info, err := disk.describeVolume() + + if err != nil { + glog.Errorf("Error describing volume %q: %q", disk.awsID, err) + // if for some reason we can not describe volume we will return error + return false, err + } + + volumeState := aws.StringValue(info.State) + opError := fmt.Sprintf("Error %s EBS volume %q", opName, disk.awsID) + if len(instance) != 0 { + opError = fmt.Sprintf("%q to instance %q", opError, instance) + } + + // Only available volumes can be attached or deleted + if volumeState != "available" { + // Volume is attached somewhere else and we can not attach it here + if len(info.Attachments) > 0 { + attachment := info.Attachments[0] + instanceId := aws.StringValue(attachment.InstanceId) + attachedInstance, ierr := c.getInstanceByID(instanceId) + attachErr := fmt.Sprintf("%s since volume is currently attached to %q", opError, instanceId) + if ierr != nil { + glog.Error(attachErr) + return false, errors.New(attachErr) + } + devicePath := aws.StringValue(attachment.Device) + nodeName := mapInstanceToNodeName(attachedInstance) + + danglingErr := volumeutil.NewDanglingError(attachErr, nodeName, devicePath) + return false, danglingErr + } + + attachErr := fmt.Errorf("%s since volume is in %q state", opError, volumeState) + return false, attachErr + } + + return true, nil +} + func (c *Cloud) GetLabelsForVolume(pv *v1.PersistentVolume) (map[string]string, error) { // Ignore any volumes that are being provisioned if pv.Spec.AWSElasticBlockStore.VolumeID == volume.ProvisionedVolumeName { @@ -1984,32 +2318,12 @@ func (c *Cloud) GetDiskPath(volumeName KubernetesVolumeID) (string, error) { // DiskIsAttached implements Volumes.DiskIsAttached func (c *Cloud) DiskIsAttached(diskName KubernetesVolumeID, nodeName types.NodeName) (bool, error) { - _, instance, err := c.getFullInstance(nodeName) - if err != nil { - if err == cloudprovider.InstanceNotFound { - // If instance no longer exists, safe to assume volume is not attached. - glog.Warningf( - "Instance %q does not exist. DiskIsAttached will assume disk %q is not attached to it.", - nodeName, - diskName) - return false, nil - } - - return false, err + diskInfo, attached, err := c.checkIfAttachedToNode(diskName, nodeName) + if diskInfo == nil { + return true, err } - diskID, err := diskName.mapToAWSVolumeID() - if err != nil { - return false, fmt.Errorf("error mapping volume spec %q to aws id: %v", diskName, err) - } - - for _, blockDevice := range instance.BlockDeviceMappings { - id := awsVolumeID(aws.StringValue(blockDevice.Ebs.VolumeId)) - if id == diskID { - return true, nil - } - } - return false, nil + return attached, nil } func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolumeID) (map[types.NodeName]map[KubernetesVolumeID]bool, error) { @@ -2062,7 +2376,7 @@ func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolume idToDiskName := make(map[awsVolumeID]KubernetesVolumeID) for _, diskName := range diskNames { - volumeID, err := diskName.mapToAWSVolumeID() + volumeID, err := diskName.MapToAWSVolumeID() if err != nil { return nil, fmt.Errorf("error mapping volume spec %q to aws id: %v", diskName, err) } @@ -2082,6 +2396,37 @@ func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolume return attached, nil } +func (c *Cloud) ResizeDisk( + diskName KubernetesVolumeID, + oldSize resource.Quantity, + newSize resource.Quantity) (resource.Quantity, error) { + awsDisk, err := newAWSDisk(c, diskName) + if err != nil { + return oldSize, err + } + + volumeInfo, err := awsDisk.describeVolume() + if err != nil { + descErr := fmt.Errorf("AWS.ResizeDisk Error describing volume %s with %v", diskName, err) + return oldSize, descErr + } + requestBytes := newSize.Value() + // AWS resizes in chunks of GiB (not GB) + requestGiB := volume.RoundUpSize(requestBytes, 1024*1024*1024) + newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB)) + + // If disk already if of greater or equal size than requested we return + if aws.Int64Value(volumeInfo.Size) >= requestGiB { + return newSizeQuant, nil + } + _, err = awsDisk.modifyVolume(requestGiB) + + if err != nil { + return oldSize, err + } + return newSizeQuant, nil +} + // Gets the current load balancer state func (c *Cloud) describeLoadBalancer(name string) (*elb.LoadBalancerDescription, error) { request := &elb.DescribeLoadBalancersInput{} @@ -2107,6 +2452,53 @@ func (c *Cloud) describeLoadBalancer(name string) (*elb.LoadBalancerDescription, return ret, nil } +func (c *Cloud) addLoadBalancerTags(loadBalancerName string, requested map[string]string) error { + var tags []*elb.Tag + for k, v := range requested { + tag := &elb.Tag{ + Key: aws.String(k), + Value: aws.String(v), + } + tags = append(tags, tag) + } + + request := &elb.AddTagsInput{} + request.LoadBalancerNames = []*string{&loadBalancerName} + request.Tags = tags + + _, err := c.elb.AddTags(request) + if err != nil { + return fmt.Errorf("error adding tags to load balancer: %v", err) + } + return nil +} + +// Gets the current load balancer state +func (c *Cloud) describeLoadBalancerv2(name string) (*elbv2.LoadBalancer, error) { + request := &elbv2.DescribeLoadBalancersInput{ + Names: []*string{aws.String(name)}, + } + + response, err := c.elbv2.DescribeLoadBalancers(request) + if err != nil { + if awsError, ok := err.(awserr.Error); ok { + if awsError.Code() == elbv2.ErrCodeLoadBalancerNotFoundException { + return nil, nil + } + } + return nil, fmt.Errorf("Error describing load balancer: %q", err) + } + + // AWS will not return 2 load balancers with the same name _and_ type. + for i := range response.LoadBalancers { + if aws.StringValue(response.LoadBalancers[i].Type) == elbv2.LoadBalancerTypeEnumNetwork { + return response.LoadBalancers[i], nil + } + } + + return nil, fmt.Errorf("NLB '%s' could not be found", name) +} + // Retrieves instance's vpc id from metadata func (c *Cloud) findVPCID() (string, error) { macs, err := c.metadata.GetMetadata("network/interfaces/macs/") @@ -2782,8 +3174,8 @@ func buildListener(port v1.ServicePort, annotations map[string]string, sslPorts // EnsureLoadBalancer implements LoadBalancer.EnsureLoadBalancer func (c *Cloud) EnsureLoadBalancer(clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { annotations := apiService.Annotations - glog.V(2).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v, %v)", - clusterName, apiService.Namespace, apiService.Name, c.region, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, nodes, annotations) + glog.V(2).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", + clusterName, apiService.Namespace, apiService.Name, c.region, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, annotations) if apiService.Spec.SessionAffinity != v1.ServiceAffinityNone { // ELB supports sticky sessions, but only when configured for HTTP/HTTPS @@ -2796,6 +3188,8 @@ func (c *Cloud) EnsureLoadBalancer(clusterName string, apiService *v1.Service, n // Figure out what mappings we want on the load balancer listeners := []*elb.Listener{} + v2Mappings := []nlbPortMapping{} + portList := getPortSets(annotations[ServiceAnnotationLoadBalancerSSLPorts]) for _, port := range apiService.Spec.Ports { if port.Protocol != v1.ProtocolTCP { @@ -2805,6 +3199,17 @@ func (c *Cloud) EnsureLoadBalancer(clusterName string, apiService *v1.Service, n glog.Errorf("Ignoring port without NodePort defined: %v", port) continue } + + if isNLB(annotations) { + v2Mappings = append(v2Mappings, nlbPortMapping{ + FrontendPort: int64(port.Port), + TrafficPort: int64(port.NodePort), + // if externalTrafficPolicy == "Local", we'll override the + // health check later + HealthCheckPort: int64(port.NodePort), + HealthCheckProtocol: elbv2.ProtocolEnumTcp, + }) + } listener, err := buildListener(port, annotations, portList) if err != nil { return nil, err @@ -2833,6 +3238,69 @@ func (c *Cloud) EnsureLoadBalancer(clusterName string, apiService *v1.Service, n internalELB = true } + if isNLB(annotations) { + + if path, healthCheckNodePort := service.GetServiceHealthCheckPathPort(apiService); path != "" { + for i := range v2Mappings { + v2Mappings[i].HealthCheckPort = int64(healthCheckNodePort) + v2Mappings[i].HealthCheckPath = path + v2Mappings[i].HealthCheckProtocol = elbv2.ProtocolEnumHttp + } + } + + // Find the subnets that the ELB will live in + subnetIDs, err := c.findELBSubnets(internalELB) + if err != nil { + glog.Errorf("Error listing subnets in VPC: %q", err) + return nil, err + } + // Bail out early if there are no subnets + if len(subnetIDs) == 0 { + return nil, fmt.Errorf("could not find any suitable subnets for creating the ELB") + } + + loadBalancerName := cloudprovider.GetLoadBalancerName(apiService) + serviceName := types.NamespacedName{Namespace: apiService.Namespace, Name: apiService.Name} + + instanceIDs := []string{} + for id := range instances { + instanceIDs = append(instanceIDs, string(id)) + } + + v2LoadBalancer, err := c.ensureLoadBalancerv2( + serviceName, + loadBalancerName, + v2Mappings, + instanceIDs, + subnetIDs, + internalELB, + annotations, + ) + if err != nil { + return nil, err + } + + sourceRangeCidrs := []string{} + for cidr := range sourceRanges { + sourceRangeCidrs = append(sourceRangeCidrs, cidr) + } + if len(sourceRangeCidrs) == 0 { + sourceRangeCidrs = append(sourceRangeCidrs, "0.0.0.0/0") + } + + err = c.updateInstanceSecurityGroupsForNLB(v2Mappings, instances, loadBalancerName, sourceRangeCidrs) + if err != nil { + glog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err) + return nil, err + } + + // We don't have an `ensureLoadBalancerInstances()` function for elbv2 + // because `ensureLoadBalancerv2()` requires instance Ids + + // TODO: Wait for creation? + return v2toStatus(v2LoadBalancer), nil + } + // Determine if we need to set the Proxy protocol policy proxyProtocol := false proxyProtocolAnnotation := apiService.Annotations[ServiceAnnotationLoadBalancerProxyProtocol] @@ -3016,9 +3484,23 @@ func (c *Cloud) EnsureLoadBalancer(clusterName string, apiService *v1.Service, n return nil, err } + if sslPolicyName, ok := annotations[ServiceAnnotationLoadBalancerSSLNegotiationPolicy]; ok { + err := c.ensureSSLNegotiationPolicy(loadBalancer, sslPolicyName) + if err != nil { + return nil, err + } + + for _, port := range c.getLoadBalancerTLSPorts(loadBalancer) { + err := c.setSSLNegotiationPolicy(loadBalancerName, sslPolicyName, port) + if err != nil { + return nil, err + } + } + } + if path, healthCheckNodePort := service.GetServiceHealthCheckPathPort(apiService); path != "" { glog.V(4).Infof("service %v (%v) needs health checks on :%d%s)", apiService.Name, loadBalancerName, healthCheckNodePort, path) - err = c.ensureLoadBalancerHealthCheck(loadBalancer, "HTTP", healthCheckNodePort, path) + err = c.ensureLoadBalancerHealthCheck(loadBalancer, "HTTP", healthCheckNodePort, path, annotations) if err != nil { return nil, fmt.Errorf("Failed to ensure health check for localized service %v on node port %v: %q", loadBalancerName, healthCheckNodePort, err) } @@ -3034,7 +3516,7 @@ func (c *Cloud) EnsureLoadBalancer(clusterName string, apiService *v1.Service, n break } // there must be no path on TCP health check - err = c.ensureLoadBalancerHealthCheck(loadBalancer, "TCP", tcpHealthCheckPort, "") + err = c.ensureLoadBalancerHealthCheck(loadBalancer, "TCP", tcpHealthCheckPort, "", annotations) if err != nil { return nil, err } @@ -3063,6 +3545,18 @@ func (c *Cloud) EnsureLoadBalancer(clusterName string, apiService *v1.Service, n // GetLoadBalancer is an implementation of LoadBalancer.GetLoadBalancer func (c *Cloud) GetLoadBalancer(clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) { loadBalancerName := cloudprovider.GetLoadBalancerName(service) + + if isNLB(service.Annotations) { + lb, err := c.describeLoadBalancerv2(loadBalancerName) + if err != nil { + return nil, false, err + } + if lb == nil { + return nil, false, nil + } + return v2toStatus(lb), true, nil + } + lb, err := c.describeLoadBalancer(loadBalancerName) if err != nil { return nil, false, err @@ -3088,6 +3582,24 @@ func toStatus(lb *elb.LoadBalancerDescription) *v1.LoadBalancerStatus { return status } +func v2toStatus(lb *elbv2.LoadBalancer) *v1.LoadBalancerStatus { + status := &v1.LoadBalancerStatus{} + if lb == nil { + glog.Error("[BUG] v2toStatus got nil input, this is a Kubernetes bug, please report") + return status + } + + // We check for Active or Provisioning, the only successful statuses + if aws.StringValue(lb.DNSName) != "" && (aws.StringValue(lb.State.Code) == elbv2.LoadBalancerStateEnumActive || + aws.StringValue(lb.State.Code) == elbv2.LoadBalancerStateEnumProvisioning) { + var ingress v1.LoadBalancerIngress + ingress.Hostname = aws.StringValue(lb.DNSName) + status.Ingress = []v1.LoadBalancerIngress{ingress} + } + + return status +} + // Returns the first security group for an instance, or nil // We only create instances with one security group, so we don't expect multiple security groups. // However, if there are multiple security groups, we will choose the one tagged with our cluster filter. @@ -3293,6 +3805,139 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer // EnsureLoadBalancerDeleted implements LoadBalancer.EnsureLoadBalancerDeleted. func (c *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error { loadBalancerName := cloudprovider.GetLoadBalancerName(service) + + if isNLB(service.Annotations) { + lb, err := c.describeLoadBalancerv2(loadBalancerName) + if err != nil { + return err + } + if lb == nil { + glog.Info("Load balancer already deleted: ", loadBalancerName) + return nil + } + + // Delete the LoadBalancer and target groups + // + // Deleting a target group while associated with a load balancer will + // fail. We delete the loadbalancer first. This does leave the + // possibility of zombie target groups if DeleteLoadBalancer() fails + // + // * Get target groups for NLB + // * Delete Load Balancer + // * Delete target groups + // * Clean up SecurityGroupRules + { + + targetGroups, err := c.elbv2.DescribeTargetGroups( + &elbv2.DescribeTargetGroupsInput{LoadBalancerArn: lb.LoadBalancerArn}, + ) + if err != nil { + return fmt.Errorf("Error listing target groups before deleting load balancer: %q", err) + } + + _, err = c.elbv2.DeleteLoadBalancer( + &elbv2.DeleteLoadBalancerInput{LoadBalancerArn: lb.LoadBalancerArn}, + ) + if err != nil { + return fmt.Errorf("Error deleting load balancer %q: %v", loadBalancerName, err) + } + + for _, group := range targetGroups.TargetGroups { + _, err := c.elbv2.DeleteTargetGroup( + &elbv2.DeleteTargetGroupInput{TargetGroupArn: group.TargetGroupArn}, + ) + if err != nil { + return fmt.Errorf("Error deleting target groups after deleting load balancer: %q", err) + } + } + } + + { + var matchingGroups []*ec2.SecurityGroup + { + // Server side filter + describeRequest := &ec2.DescribeSecurityGroupsInput{} + filters := []*ec2.Filter{ + newEc2Filter("ip-permission.protocol", "tcp"), + } + describeRequest.Filters = c.tagging.addFilters(filters) + response, err := c.ec2.DescribeSecurityGroups(describeRequest) + if err != nil { + return fmt.Errorf("Error querying security groups for NLB: %q", err) + } + for _, sg := range response { + if !c.tagging.hasClusterTag(sg.Tags) { + continue + } + matchingGroups = append(matchingGroups, sg) + } + + // client-side filter out groups that don't have IP Rules we've + // annotated for this service + matchingGroups = filterForIPRangeDescription(matchingGroups, loadBalancerName) + } + + { + clientRule := fmt.Sprintf("%s=%s", NLBClientRuleDescription, loadBalancerName) + mtuRule := fmt.Sprintf("%s=%s", NLBMtuDiscoveryRuleDescription, loadBalancerName) + healthRule := fmt.Sprintf("%s=%s", NLBHealthCheckRuleDescription, loadBalancerName) + + for i := range matchingGroups { + removes := []*ec2.IpPermission{} + for j := range matchingGroups[i].IpPermissions { + + v4rangesToRemove := []*ec2.IpRange{} + v6rangesToRemove := []*ec2.Ipv6Range{} + + // Find IpPermission that contains k8s description + // If we removed the whole IpPermission, it could contain other non-k8s specified ranges + for k := range matchingGroups[i].IpPermissions[j].IpRanges { + description := aws.StringValue(matchingGroups[i].IpPermissions[j].IpRanges[k].Description) + if description == clientRule || description == mtuRule || description == healthRule { + v4rangesToRemove = append(v4rangesToRemove, matchingGroups[i].IpPermissions[j].IpRanges[k]) + } + } + + // Find IpPermission that contains k8s description + // If we removed the whole IpPermission, it could contain other non-k8s specified rangesk + for k := range matchingGroups[i].IpPermissions[j].Ipv6Ranges { + description := aws.StringValue(matchingGroups[i].IpPermissions[j].Ipv6Ranges[k].Description) + if description == clientRule || description == mtuRule || description == healthRule { + v6rangesToRemove = append(v6rangesToRemove, matchingGroups[i].IpPermissions[j].Ipv6Ranges[k]) + } + } + + if len(v4rangesToRemove) > 0 || len(v6rangesToRemove) > 0 { + // create a new *IpPermission to not accidentally remove UserIdGroupPairs + removedPermission := &ec2.IpPermission{ + FromPort: matchingGroups[i].IpPermissions[j].FromPort, + IpProtocol: matchingGroups[i].IpPermissions[j].IpProtocol, + IpRanges: v4rangesToRemove, + Ipv6Ranges: v6rangesToRemove, + ToPort: matchingGroups[i].IpPermissions[j].ToPort, + } + removes = append(removes, removedPermission) + } + + } + if len(removes) > 0 { + changed, err := c.removeSecurityGroupIngress(aws.StringValue(matchingGroups[i].GroupId), removes) + if err != nil { + return err + } + if !changed { + glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", *matchingGroups[i].GroupId) + } + } + + } + + } + + } + return nil + } + lb, err := c.describeLoadBalancer(loadBalancerName) if err != nil { return err @@ -3398,6 +4043,17 @@ func (c *Cloud) UpdateLoadBalancer(clusterName string, service *v1.Service, node } loadBalancerName := cloudprovider.GetLoadBalancerName(service) + if isNLB(service.Annotations) { + lb, err := c.describeLoadBalancerv2(loadBalancerName) + if err != nil { + return err + } + if lb == nil { + return fmt.Errorf("Load balancer not found") + } + _, err = c.EnsureLoadBalancer(clusterName, service, nodes) + return err + } lb, err := c.describeLoadBalancer(loadBalancerName) if err != nil { return err @@ -3407,6 +4063,19 @@ func (c *Cloud) UpdateLoadBalancer(clusterName string, service *v1.Service, node return fmt.Errorf("Load balancer not found") } + if sslPolicyName, ok := service.Annotations[ServiceAnnotationLoadBalancerSSLNegotiationPolicy]; ok { + err := c.ensureSSLNegotiationPolicy(lb, sslPolicyName) + if err != nil { + return err + } + for _, port := range c.getLoadBalancerTLSPorts(lb) { + err := c.setSSLNegotiationPolicy(loadBalancerName, sslPolicyName, port) + if err != nil { + return err + } + } + } + err = c.ensureLoadBalancerInstances(aws.StringValue(lb.LoadBalancerName), lb.Instances, instances) if err != nil { return nil diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_fakes.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_fakes.go new file mode 100644 index 000000000000..774363272304 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_fakes.go @@ -0,0 +1,534 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package aws + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/aws/aws-sdk-go/service/kms" + + "github.com/golang/glog" +) + +type FakeAWSServices struct { + region string + instances []*ec2.Instance + selfInstance *ec2.Instance + networkInterfacesMacs []string + networkInterfacesPrivateIPs [][]string + networkInterfacesVpcIDs []string + + ec2 FakeEC2 + elb ELB + elbv2 ELBV2 + asg *FakeASG + metadata *FakeMetadata + kms *FakeKMS +} + +func NewFakeAWSServices(clusterId string) *FakeAWSServices { + s := &FakeAWSServices{} + s.region = "us-east-1" + s.ec2 = &FakeEC2Impl{aws: s} + s.elb = &FakeELB{aws: s} + s.elbv2 = &FakeELBV2{aws: s} + s.asg = &FakeASG{aws: s} + s.metadata = &FakeMetadata{aws: s} + s.kms = &FakeKMS{aws: s} + + s.networkInterfacesMacs = []string{"aa:bb:cc:dd:ee:00", "aa:bb:cc:dd:ee:01"} + s.networkInterfacesVpcIDs = []string{"vpc-mac0", "vpc-mac1"} + + selfInstance := &ec2.Instance{} + selfInstance.InstanceId = aws.String("i-self") + selfInstance.Placement = &ec2.Placement{ + AvailabilityZone: aws.String("us-east-1a"), + } + selfInstance.PrivateDnsName = aws.String("ip-172-20-0-100.ec2.internal") + selfInstance.PrivateIpAddress = aws.String("192.168.0.1") + selfInstance.PublicIpAddress = aws.String("1.2.3.4") + s.selfInstance = selfInstance + s.instances = []*ec2.Instance{selfInstance} + + var tag ec2.Tag + tag.Key = aws.String(TagNameKubernetesClusterLegacy) + tag.Value = aws.String(clusterId) + selfInstance.Tags = []*ec2.Tag{&tag} + + return s +} + +func (s *FakeAWSServices) WithAz(az string) *FakeAWSServices { + if s.selfInstance.Placement == nil { + s.selfInstance.Placement = &ec2.Placement{} + } + s.selfInstance.Placement.AvailabilityZone = aws.String(az) + return s +} + +func (s *FakeAWSServices) Compute(region string) (EC2, error) { + return s.ec2, nil +} + +func (s *FakeAWSServices) LoadBalancing(region string) (ELB, error) { + return s.elb, nil +} + +func (s *FakeAWSServices) LoadBalancingV2(region string) (ELBV2, error) { + return s.elbv2, nil +} + +func (s *FakeAWSServices) Autoscaling(region string) (ASG, error) { + return s.asg, nil +} + +func (s *FakeAWSServices) Metadata() (EC2Metadata, error) { + return s.metadata, nil +} + +func (s *FakeAWSServices) KeyManagement(region string) (KMS, error) { + return s.kms, nil +} + +type FakeEC2 interface { + EC2 + CreateSubnet(*ec2.Subnet) (*ec2.CreateSubnetOutput, error) + RemoveSubnets() + CreateRouteTable(*ec2.RouteTable) (*ec2.CreateRouteTableOutput, error) + RemoveRouteTables() +} + +type FakeEC2Impl struct { + aws *FakeAWSServices + Subnets []*ec2.Subnet + DescribeSubnetsInput *ec2.DescribeSubnetsInput + RouteTables []*ec2.RouteTable + DescribeRouteTablesInput *ec2.DescribeRouteTablesInput +} + +func (ec2i *FakeEC2Impl) DescribeInstances(request *ec2.DescribeInstancesInput) ([]*ec2.Instance, error) { + matches := []*ec2.Instance{} + for _, instance := range ec2i.aws.instances { + if request.InstanceIds != nil { + if instance.InstanceId == nil { + glog.Warning("Instance with no instance id: ", instance) + continue + } + + found := false + for _, instanceID := range request.InstanceIds { + if *instanceID == *instance.InstanceId { + found = true + break + } + } + if !found { + continue + } + } + if request.Filters != nil { + allMatch := true + for _, filter := range request.Filters { + if !instanceMatchesFilter(instance, filter) { + allMatch = false + break + } + } + if !allMatch { + continue + } + } + matches = append(matches, instance) + } + + return matches, nil +} + +func (ec2i *FakeEC2Impl) AttachVolume(request *ec2.AttachVolumeInput) (resp *ec2.VolumeAttachment, err error) { + panic("Not implemented") +} + +func (ec2i *FakeEC2Impl) DetachVolume(request *ec2.DetachVolumeInput) (resp *ec2.VolumeAttachment, err error) { + panic("Not implemented") +} + +func (ec2i *FakeEC2Impl) DescribeVolumes(request *ec2.DescribeVolumesInput) ([]*ec2.Volume, error) { + panic("Not implemented") +} + +func (ec2i *FakeEC2Impl) CreateVolume(request *ec2.CreateVolumeInput) (resp *ec2.Volume, err error) { + panic("Not implemented") +} + +func (ec2i *FakeEC2Impl) DeleteVolume(request *ec2.DeleteVolumeInput) (resp *ec2.DeleteVolumeOutput, err error) { + panic("Not implemented") +} + +func (ec2i *FakeEC2Impl) DescribeSecurityGroups(request *ec2.DescribeSecurityGroupsInput) ([]*ec2.SecurityGroup, error) { + panic("Not implemented") +} + +func (ec2i *FakeEC2Impl) CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error) { + panic("Not implemented") +} + +func (ec2i *FakeEC2Impl) DeleteSecurityGroup(*ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error) { + panic("Not implemented") +} + +func (ec2i *FakeEC2Impl) AuthorizeSecurityGroupIngress(*ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error) { + panic("Not implemented") +} + +func (ec2i *FakeEC2Impl) RevokeSecurityGroupIngress(*ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error) { + panic("Not implemented") +} + +func (ec2i *FakeEC2Impl) DescribeVolumeModifications(*ec2.DescribeVolumesModificationsInput) ([]*ec2.VolumeModification, error) { + panic("Not implemented") +} + +func (ec2i *FakeEC2Impl) ModifyVolume(*ec2.ModifyVolumeInput) (*ec2.ModifyVolumeOutput, error) { + panic("Not implemented") +} + +func (ec2i *FakeEC2Impl) CreateSubnet(request *ec2.Subnet) (*ec2.CreateSubnetOutput, error) { + ec2i.Subnets = append(ec2i.Subnets, request) + response := &ec2.CreateSubnetOutput{ + Subnet: request, + } + return response, nil +} + +func (ec2i *FakeEC2Impl) DescribeSubnets(request *ec2.DescribeSubnetsInput) ([]*ec2.Subnet, error) { + ec2i.DescribeSubnetsInput = request + return ec2i.Subnets, nil +} + +func (ec2i *FakeEC2Impl) RemoveSubnets() { + ec2i.Subnets = ec2i.Subnets[:0] +} + +func (ec2i *FakeEC2Impl) CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) { + panic("Not implemented") +} + +func (ec2i *FakeEC2Impl) DescribeRouteTables(request *ec2.DescribeRouteTablesInput) ([]*ec2.RouteTable, error) { + ec2i.DescribeRouteTablesInput = request + return ec2i.RouteTables, nil +} + +func (ec2i *FakeEC2Impl) CreateRouteTable(request *ec2.RouteTable) (*ec2.CreateRouteTableOutput, error) { + ec2i.RouteTables = append(ec2i.RouteTables, request) + response := &ec2.CreateRouteTableOutput{ + RouteTable: request, + } + return response, nil +} + +func (ec2i *FakeEC2Impl) RemoveRouteTables() { + ec2i.RouteTables = ec2i.RouteTables[:0] +} + +func (ec2i *FakeEC2Impl) CreateRoute(request *ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error) { + panic("Not implemented") +} + +func (ec2i *FakeEC2Impl) DeleteRoute(request *ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error) { + panic("Not implemented") +} + +func (ec2i *FakeEC2Impl) ModifyInstanceAttribute(request *ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error) { + panic("Not implemented") +} + +func (ec2i *FakeEC2Impl) DescribeVpcs(request *ec2.DescribeVpcsInput) (*ec2.DescribeVpcsOutput, error) { + return &ec2.DescribeVpcsOutput{Vpcs: []*ec2.Vpc{{CidrBlock: aws.String("172.20.0.0/16")}}}, nil +} + +type FakeMetadata struct { + aws *FakeAWSServices +} + +func (m *FakeMetadata) GetMetadata(key string) (string, error) { + networkInterfacesPrefix := "network/interfaces/macs/" + i := m.aws.selfInstance + if key == "placement/availability-zone" { + az := "" + if i.Placement != nil { + az = aws.StringValue(i.Placement.AvailabilityZone) + } + return az, nil + } else if key == "instance-id" { + return aws.StringValue(i.InstanceId), nil + } else if key == "local-hostname" { + return aws.StringValue(i.PrivateDnsName), nil + } else if key == "public-hostname" { + return aws.StringValue(i.PublicDnsName), nil + } else if key == "local-ipv4" { + return aws.StringValue(i.PrivateIpAddress), nil + } else if key == "public-ipv4" { + return aws.StringValue(i.PublicIpAddress), nil + } else if strings.HasPrefix(key, networkInterfacesPrefix) { + if key == networkInterfacesPrefix { + return strings.Join(m.aws.networkInterfacesMacs, "/\n") + "/\n", nil + } else { + keySplit := strings.Split(key, "/") + macParam := keySplit[3] + if len(keySplit) == 5 && keySplit[4] == "vpc-id" { + for i, macElem := range m.aws.networkInterfacesMacs { + if macParam == macElem { + return m.aws.networkInterfacesVpcIDs[i], nil + } + } + } + if len(keySplit) == 5 && keySplit[4] == "local-ipv4s" { + for i, macElem := range m.aws.networkInterfacesMacs { + if macParam == macElem { + return strings.Join(m.aws.networkInterfacesPrivateIPs[i], "/\n"), nil + } + } + } + return "", nil + } + } else { + return "", nil + } +} + +type FakeELB struct { + aws *FakeAWSServices +} + +func (elb *FakeELB) CreateLoadBalancer(*elb.CreateLoadBalancerInput) (*elb.CreateLoadBalancerOutput, error) { + panic("Not implemented") +} + +func (elb *FakeELB) DeleteLoadBalancer(input *elb.DeleteLoadBalancerInput) (*elb.DeleteLoadBalancerOutput, error) { + panic("Not implemented") +} + +func (elb *FakeELB) DescribeLoadBalancers(input *elb.DescribeLoadBalancersInput) (*elb.DescribeLoadBalancersOutput, error) { + panic("Not implemented") +} + +func (elb *FakeELB) AddTags(input *elb.AddTagsInput) (*elb.AddTagsOutput, error) { + panic("Not implemented") +} + +func (elb *FakeELB) RegisterInstancesWithLoadBalancer(*elb.RegisterInstancesWithLoadBalancerInput) (*elb.RegisterInstancesWithLoadBalancerOutput, error) { + panic("Not implemented") +} + +func (elb *FakeELB) DeregisterInstancesFromLoadBalancer(*elb.DeregisterInstancesFromLoadBalancerInput) (*elb.DeregisterInstancesFromLoadBalancerOutput, error) { + panic("Not implemented") +} + +func (elb *FakeELB) DetachLoadBalancerFromSubnets(*elb.DetachLoadBalancerFromSubnetsInput) (*elb.DetachLoadBalancerFromSubnetsOutput, error) { + panic("Not implemented") +} + +func (elb *FakeELB) AttachLoadBalancerToSubnets(*elb.AttachLoadBalancerToSubnetsInput) (*elb.AttachLoadBalancerToSubnetsOutput, error) { + panic("Not implemented") +} + +func (elb *FakeELB) CreateLoadBalancerListeners(*elb.CreateLoadBalancerListenersInput) (*elb.CreateLoadBalancerListenersOutput, error) { + panic("Not implemented") +} + +func (elb *FakeELB) DeleteLoadBalancerListeners(*elb.DeleteLoadBalancerListenersInput) (*elb.DeleteLoadBalancerListenersOutput, error) { + panic("Not implemented") +} + +func (elb *FakeELB) ApplySecurityGroupsToLoadBalancer(*elb.ApplySecurityGroupsToLoadBalancerInput) (*elb.ApplySecurityGroupsToLoadBalancerOutput, error) { + panic("Not implemented") +} + +func (elb *FakeELB) ConfigureHealthCheck(*elb.ConfigureHealthCheckInput) (*elb.ConfigureHealthCheckOutput, error) { + panic("Not implemented") +} + +func (elb *FakeELB) CreateLoadBalancerPolicy(*elb.CreateLoadBalancerPolicyInput) (*elb.CreateLoadBalancerPolicyOutput, error) { + panic("Not implemented") +} + +func (elb *FakeELB) SetLoadBalancerPoliciesForBackendServer(*elb.SetLoadBalancerPoliciesForBackendServerInput) (*elb.SetLoadBalancerPoliciesForBackendServerOutput, error) { + panic("Not implemented") +} + +func (elb *FakeELB) SetLoadBalancerPoliciesOfListener(input *elb.SetLoadBalancerPoliciesOfListenerInput) (*elb.SetLoadBalancerPoliciesOfListenerOutput, error) { + panic("Not implemented") +} + +func (elb *FakeELB) DescribeLoadBalancerPolicies(input *elb.DescribeLoadBalancerPoliciesInput) (*elb.DescribeLoadBalancerPoliciesOutput, error) { + panic("Not implemented") +} + +func (elb *FakeELB) DescribeLoadBalancerAttributes(*elb.DescribeLoadBalancerAttributesInput) (*elb.DescribeLoadBalancerAttributesOutput, error) { + panic("Not implemented") +} + +func (elb *FakeELB) ModifyLoadBalancerAttributes(*elb.ModifyLoadBalancerAttributesInput) (*elb.ModifyLoadBalancerAttributesOutput, error) { + panic("Not implemented") +} + +func (self *FakeELB) expectDescribeLoadBalancers(loadBalancerName string) { + panic("Not implemented") +} + +type FakeELBV2 struct { + aws *FakeAWSServices +} + +func (self *FakeELBV2) AddTags(input *elbv2.AddTagsInput) (*elbv2.AddTagsOutput, error) { + panic("Not implemented") +} + +func (self *FakeELBV2) CreateLoadBalancer(*elbv2.CreateLoadBalancerInput) (*elbv2.CreateLoadBalancerOutput, error) { + panic("Not implemented") +} +func (self *FakeELBV2) DescribeLoadBalancers(*elbv2.DescribeLoadBalancersInput) (*elbv2.DescribeLoadBalancersOutput, error) { + panic("Not implemented") +} +func (self *FakeELBV2) DeleteLoadBalancer(*elbv2.DeleteLoadBalancerInput) (*elbv2.DeleteLoadBalancerOutput, error) { + panic("Not implemented") +} + +func (self *FakeELBV2) ModifyLoadBalancerAttributes(*elbv2.ModifyLoadBalancerAttributesInput) (*elbv2.ModifyLoadBalancerAttributesOutput, error) { + panic("Not implemented") +} +func (self *FakeELBV2) DescribeLoadBalancerAttributes(*elbv2.DescribeLoadBalancerAttributesInput) (*elbv2.DescribeLoadBalancerAttributesOutput, error) { + panic("Not implemented") +} + +func (self *FakeELBV2) CreateTargetGroup(*elbv2.CreateTargetGroupInput) (*elbv2.CreateTargetGroupOutput, error) { + panic("Not implemented") +} +func (self *FakeELBV2) DescribeTargetGroups(*elbv2.DescribeTargetGroupsInput) (*elbv2.DescribeTargetGroupsOutput, error) { + panic("Not implemented") +} +func (self *FakeELBV2) ModifyTargetGroup(*elbv2.ModifyTargetGroupInput) (*elbv2.ModifyTargetGroupOutput, error) { + panic("Not implemented") +} +func (self *FakeELBV2) DeleteTargetGroup(*elbv2.DeleteTargetGroupInput) (*elbv2.DeleteTargetGroupOutput, error) { + panic("Not implemented") +} + +func (self *FakeELBV2) DescribeTargetHealth(input *elbv2.DescribeTargetHealthInput) (*elbv2.DescribeTargetHealthOutput, error) { + panic("Not implemented") +} + +func (self *FakeELBV2) DescribeTargetGroupAttributes(*elbv2.DescribeTargetGroupAttributesInput) (*elbv2.DescribeTargetGroupAttributesOutput, error) { + panic("Not implemented") +} +func (self *FakeELBV2) ModifyTargetGroupAttributes(*elbv2.ModifyTargetGroupAttributesInput) (*elbv2.ModifyTargetGroupAttributesOutput, error) { + panic("Not implemented") +} + +func (self *FakeELBV2) RegisterTargets(*elbv2.RegisterTargetsInput) (*elbv2.RegisterTargetsOutput, error) { + panic("Not implemented") +} +func (self *FakeELBV2) DeregisterTargets(*elbv2.DeregisterTargetsInput) (*elbv2.DeregisterTargetsOutput, error) { + panic("Not implemented") +} + +func (self *FakeELBV2) CreateListener(*elbv2.CreateListenerInput) (*elbv2.CreateListenerOutput, error) { + panic("Not implemented") +} +func (self *FakeELBV2) DescribeListeners(*elbv2.DescribeListenersInput) (*elbv2.DescribeListenersOutput, error) { + panic("Not implemented") +} +func (self *FakeELBV2) DeleteListener(*elbv2.DeleteListenerInput) (*elbv2.DeleteListenerOutput, error) { + panic("Not implemented") +} +func (self *FakeELBV2) ModifyListener(*elbv2.ModifyListenerInput) (*elbv2.ModifyListenerOutput, error) { + panic("Not implemented") +} + +func (self *FakeELBV2) WaitUntilLoadBalancersDeleted(*elbv2.DescribeLoadBalancersInput) error { + panic("Not implemented") +} + +type FakeASG struct { + aws *FakeAWSServices +} + +func (a *FakeASG) UpdateAutoScalingGroup(*autoscaling.UpdateAutoScalingGroupInput) (*autoscaling.UpdateAutoScalingGroupOutput, error) { + panic("Not implemented") +} + +func (a *FakeASG) DescribeAutoScalingGroups(*autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error) { + panic("Not implemented") +} + +type FakeKMS struct { + aws *FakeAWSServices +} + +func (kms *FakeKMS) DescribeKey(*kms.DescribeKeyInput) (*kms.DescribeKeyOutput, error) { + panic("Not implemented") +} + +func instanceMatchesFilter(instance *ec2.Instance, filter *ec2.Filter) bool { + name := *filter.Name + if name == "private-dns-name" { + if instance.PrivateDnsName == nil { + return false + } + return contains(filter.Values, *instance.PrivateDnsName) + } + + if name == "instance-state-name" { + return contains(filter.Values, *instance.State.Name) + } + + if name == "tag-key" { + for _, instanceTag := range instance.Tags { + if contains(filter.Values, aws.StringValue(instanceTag.Key)) { + return true + } + } + return false + } + + if strings.HasPrefix(name, "tag:") { + tagName := name[4:] + for _, instanceTag := range instance.Tags { + if aws.StringValue(instanceTag.Key) == tagName && contains(filter.Values, aws.StringValue(instanceTag.Value)) { + return true + } + } + return false + } + + panic("Unknown filter name: " + name) +} + +func contains(haystack []*string, needle string) bool { + for _, s := range haystack { + // (deliberately panic if s == nil) + if needle == *s { + return true + } + } + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go index 03eefb296e09..6baaca2ac6c0 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws_loadbalancer.go @@ -17,21 +17,53 @@ limitations under the License. package aws import ( + "crypto/sha1" "fmt" "reflect" "strconv" "strings" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go/service/elbv2" "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" ) -const ProxyProtocolPolicyName = "k8s-proxyprotocol-enabled" +const ( + ProxyProtocolPolicyName = "k8s-proxyprotocol-enabled" + + SSLNegotiationPolicyNameFormat = "k8s-SSLNegotiationPolicy-%s" +) + +var ( + // Defaults for ELB Healthcheck + defaultHCHealthyThreshold = int64(2) + defaultHCUnhealthyThreshold = int64(6) + defaultHCTimeout = int64(5) + defaultHCInterval = int64(10) +) + +func isNLB(annotations map[string]string) bool { + if annotations[ServiceAnnotationLoadBalancerType] == "nlb" { + return true + } + return false +} + +type nlbPortMapping struct { + FrontendPort int64 + TrafficPort int64 + ClientCIDR string + + HealthCheckPort int64 + HealthCheckPath string + HealthCheckProtocol string +} // getLoadBalancerAdditionalTags converts the comma separated list of key-value // pairs in the ServiceAnnotationLoadBalancerAdditionalTags annotation and returns @@ -62,6 +94,760 @@ func getLoadBalancerAdditionalTags(annotations map[string]string) map[string]str return additionalTags } +// ensureLoadBalancerv2 ensures a v2 load balancer is created +func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBalancerName string, mappings []nlbPortMapping, instanceIDs, subnetIDs []string, internalELB bool, annotations map[string]string) (*elbv2.LoadBalancer, error) { + loadBalancer, err := c.describeLoadBalancerv2(loadBalancerName) + if err != nil { + return nil, err + } + + dirty := false + + // Get additional tags set by the user + tags := getLoadBalancerAdditionalTags(annotations) + // Add default tags + tags[TagNameKubernetesService] = namespacedName.String() + tags = c.tagging.buildTags(ResourceLifecycleOwned, tags) + + if loadBalancer == nil { + // Create the LB + createRequest := &elbv2.CreateLoadBalancerInput{ + Type: aws.String(elbv2.LoadBalancerTypeEnumNetwork), + Name: aws.String(loadBalancerName), + } + if internalELB { + createRequest.Scheme = aws.String("internal") + } + + // We are supposed to specify one subnet per AZ. + // TODO: What happens if we have more than one subnet per AZ? + createRequest.SubnetMappings = createSubnetMappings(subnetIDs) + + for k, v := range tags { + createRequest.Tags = append(createRequest.Tags, &elbv2.Tag{ + Key: aws.String(k), Value: aws.String(v), + }) + } + + glog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName) + createResponse, err := c.elbv2.CreateLoadBalancer(createRequest) + if err != nil { + return nil, fmt.Errorf("Error creating load balancer: %q", err) + } + + loadBalancer = createResponse.LoadBalancers[0] + + // Create Target Groups + addTagsInput := &elbv2.AddTagsInput{ + ResourceArns: []*string{}, + Tags: []*elbv2.Tag{}, + } + + for i := range mappings { + // It is easier to keep track of updates by having possibly + // duplicate target groups where the backend port is the same + _, targetGroupArn, err := c.createListenerV2(createResponse.LoadBalancers[0].LoadBalancerArn, mappings[i], namespacedName, instanceIDs, *createResponse.LoadBalancers[0].VpcId) + if err != nil { + return nil, fmt.Errorf("Error creating listener: %q", err) + } + addTagsInput.ResourceArns = append(addTagsInput.ResourceArns, targetGroupArn) + + } + + // Add tags to targets + for k, v := range tags { + addTagsInput.Tags = append(addTagsInput.Tags, &elbv2.Tag{ + Key: aws.String(k), Value: aws.String(v), + }) + } + if len(addTagsInput.ResourceArns) > 0 && len(addTagsInput.Tags) > 0 { + _, err = c.elbv2.AddTags(addTagsInput) + if err != nil { + return nil, fmt.Errorf("Error adding tags after creating Load Balancer: %q", err) + } + } + } else { + // TODO: Sync internal vs non-internal + + // sync mappings + { + listenerDescriptions, err := c.elbv2.DescribeListeners( + &elbv2.DescribeListenersInput{ + LoadBalancerArn: loadBalancer.LoadBalancerArn, + }, + ) + if err != nil { + return nil, fmt.Errorf("Error describing listeners: %q", err) + } + + // actual maps FrontendPort to an elbv2.Listener + actual := map[int64]*elbv2.Listener{} + for _, listener := range listenerDescriptions.Listeners { + actual[*listener.Port] = listener + } + + actualTargetGroups, err := c.elbv2.DescribeTargetGroups( + &elbv2.DescribeTargetGroupsInput{ + LoadBalancerArn: loadBalancer.LoadBalancerArn, + }, + ) + if err != nil { + return nil, fmt.Errorf("Error listing target groups: %q", err) + } + + nodePortTargetGroup := map[int64]*elbv2.TargetGroup{} + for _, targetGroup := range actualTargetGroups.TargetGroups { + nodePortTargetGroup[*targetGroup.Port] = targetGroup + } + + // Create Target Groups + addTagsInput := &elbv2.AddTagsInput{ + ResourceArns: []*string{}, + Tags: []*elbv2.Tag{}, + } + + // Handle additions/modifications + for _, mapping := range mappings { + frontendPort := mapping.FrontendPort + nodePort := mapping.TrafficPort + + // modifications + if listener, ok := actual[frontendPort]; ok { + // nodePort must have changed, we'll need to delete old TG + // and recreate + if targetGroup, ok := nodePortTargetGroup[nodePort]; !ok { + // Create new Target group + targetName := createTargetName(namespacedName, frontendPort, nodePort) + targetGroup, err = c.ensureTargetGroup( + nil, + mapping, + instanceIDs, + targetName, + *loadBalancer.VpcId, + ) + if err != nil { + return nil, err + } + + // Associate new target group to LB + _, err := c.elbv2.ModifyListener(&elbv2.ModifyListenerInput{ + ListenerArn: listener.ListenerArn, + Port: aws.Int64(frontendPort), + Protocol: aws.String("TCP"), + DefaultActions: []*elbv2.Action{{ + TargetGroupArn: targetGroup.TargetGroupArn, + Type: aws.String("forward"), + }}, + }) + if err != nil { + return nil, fmt.Errorf("Error updating load balancer listener: %q", err) + } + + // Delete old target group + _, err = c.elbv2.DeleteTargetGroup(&elbv2.DeleteTargetGroupInput{ + TargetGroupArn: listener.DefaultActions[0].TargetGroupArn, + }) + if err != nil { + return nil, fmt.Errorf("Error deleting old target group: %q", err) + } + + } else { + // Run ensureTargetGroup to make sure instances in service are up-to-date + targetName := createTargetName(namespacedName, frontendPort, nodePort) + _, err = c.ensureTargetGroup( + targetGroup, + mapping, + instanceIDs, + targetName, + *loadBalancer.VpcId, + ) + if err != nil { + return nil, err + } + } + dirty = true + continue + } + + // Additions + _, targetGroupArn, err := c.createListenerV2(loadBalancer.LoadBalancerArn, mapping, namespacedName, instanceIDs, *loadBalancer.VpcId) + if err != nil { + return nil, err + } + addTagsInput.ResourceArns = append(addTagsInput.ResourceArns, targetGroupArn) + dirty = true + } + + frontEndPorts := map[int64]bool{} + for i := range mappings { + frontEndPorts[mappings[i].FrontendPort] = true + } + + // handle deletions + for port, listener := range actual { + if _, ok := frontEndPorts[port]; !ok { + err := c.deleteListenerV2(listener) + if err != nil { + return nil, err + } + dirty = true + } + } + + // Add tags to new targets + for k, v := range tags { + addTagsInput.Tags = append(addTagsInput.Tags, &elbv2.Tag{ + Key: aws.String(k), Value: aws.String(v), + }) + } + if len(addTagsInput.ResourceArns) > 0 && len(addTagsInput.Tags) > 0 { + _, err = c.elbv2.AddTags(addTagsInput) + if err != nil { + return nil, fmt.Errorf("Error adding tags after modifying load balancer targets: %q", err) + } + } + } + + // Subnets cannot be modified on NLBs + if dirty { + loadBalancers, err := c.elbv2.DescribeLoadBalancers( + &elbv2.DescribeLoadBalancersInput{ + LoadBalancerArns: []*string{ + loadBalancer.LoadBalancerArn, + }, + }, + ) + if err != nil { + return nil, fmt.Errorf("Error retrieving load balancer after update: %q", err) + } + loadBalancer = loadBalancers.LoadBalancers[0] + } + } + return loadBalancer, nil +} + +// create a valid target group name - ensure name is not over 32 characters +func createTargetName(namespacedName types.NamespacedName, frontendPort, nodePort int64) string { + sha := fmt.Sprintf("%x", sha1.Sum([]byte(namespacedName.String())))[:13] + return fmt.Sprintf("k8s-tg-%s-%d-%d", sha, frontendPort, nodePort) +} + +func (c *Cloud) createListenerV2(loadBalancerArn *string, mapping nlbPortMapping, namespacedName types.NamespacedName, instanceIDs []string, vpcID string) (listener *elbv2.Listener, targetGroupArn *string, err error) { + targetName := createTargetName(namespacedName, mapping.FrontendPort, mapping.TrafficPort) + + glog.Infof("Creating load balancer target group for %v with name: %s", namespacedName, targetName) + target, err := c.ensureTargetGroup( + nil, + mapping, + instanceIDs, + targetName, + vpcID, + ) + if err != nil { + return nil, aws.String(""), err + } + + createListernerInput := &elbv2.CreateListenerInput{ + LoadBalancerArn: loadBalancerArn, + Port: aws.Int64(mapping.FrontendPort), + Protocol: aws.String("TCP"), + DefaultActions: []*elbv2.Action{{ + TargetGroupArn: target.TargetGroupArn, + Type: aws.String(elbv2.ActionTypeEnumForward), + }}, + } + glog.Infof("Creating load balancer listener for %v", namespacedName) + createListenerOutput, err := c.elbv2.CreateListener(createListernerInput) + if err != nil { + return nil, aws.String(""), fmt.Errorf("Error creating load balancer listener: %q", err) + } + return createListenerOutput.Listeners[0], target.TargetGroupArn, nil +} + +// cleans up listener and corresponding target group +func (c *Cloud) deleteListenerV2(listener *elbv2.Listener) error { + _, err := c.elbv2.DeleteListener(&elbv2.DeleteListenerInput{ListenerArn: listener.ListenerArn}) + if err != nil { + return fmt.Errorf("Error deleting load balancer listener: %q", err) + } + _, err = c.elbv2.DeleteTargetGroup(&elbv2.DeleteTargetGroupInput{TargetGroupArn: listener.DefaultActions[0].TargetGroupArn}) + if err != nil { + return fmt.Errorf("Error deleting load balancer target group: %q", err) + } + return nil +} + +// ensureTargetGroup creates a target group with a set of instances +func (c *Cloud) ensureTargetGroup(targetGroup *elbv2.TargetGroup, mapping nlbPortMapping, instances []string, name string, vpcID string) (*elbv2.TargetGroup, error) { + dirty := false + if targetGroup == nil { + + input := &elbv2.CreateTargetGroupInput{ + VpcId: aws.String(vpcID), + Name: aws.String(name), + Port: aws.Int64(mapping.TrafficPort), + Protocol: aws.String("TCP"), + TargetType: aws.String("instance"), + HealthCheckIntervalSeconds: aws.Int64(30), + HealthCheckPort: aws.String("traffic-port"), + HealthCheckProtocol: aws.String("TCP"), + HealthyThresholdCount: aws.Int64(3), + UnhealthyThresholdCount: aws.Int64(3), + } + + input.HealthCheckProtocol = aws.String(mapping.HealthCheckProtocol) + if mapping.HealthCheckProtocol != elbv2.ProtocolEnumTcp { + input.HealthCheckPath = aws.String(mapping.HealthCheckPath) + } + + // Account for externalTrafficPolicy = "Local" + if mapping.HealthCheckPort != mapping.TrafficPort { + input.HealthCheckPort = aws.String(strconv.Itoa(int(mapping.HealthCheckPort))) + } + + result, err := c.elbv2.CreateTargetGroup(input) + if err != nil { + return nil, fmt.Errorf("Error creating load balancer target group: %q", err) + } + if len(result.TargetGroups) != 1 { + return nil, fmt.Errorf("Expected only one target group on CreateTargetGroup, got %d groups", len(result.TargetGroups)) + } + + registerInput := &elbv2.RegisterTargetsInput{ + TargetGroupArn: result.TargetGroups[0].TargetGroupArn, + Targets: []*elbv2.TargetDescription{}, + } + for _, instanceID := range instances { + registerInput.Targets = append(registerInput.Targets, &elbv2.TargetDescription{ + Id: aws.String(string(instanceID)), + Port: aws.Int64(mapping.TrafficPort), + }) + } + + _, err = c.elbv2.RegisterTargets(registerInput) + if err != nil { + return nil, fmt.Errorf("Error registering targets for load balancer: %q", err) + } + + return result.TargetGroups[0], nil + } + + // handle instances in service + { + healthResponse, err := c.elbv2.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{TargetGroupArn: targetGroup.TargetGroupArn}) + if err != nil { + return nil, fmt.Errorf("Error describing target group health: %q", err) + } + actualIDs := []string{} + for _, healthDescription := range healthResponse.TargetHealthDescriptions { + if healthDescription.TargetHealth.Reason != nil { + switch aws.StringValue(healthDescription.TargetHealth.Reason) { + case elbv2.TargetHealthReasonEnumTargetDeregistrationInProgress: + // We don't need to count this instance in service if it is + // on its way out + default: + actualIDs = append(actualIDs, *healthDescription.Target.Id) + } + } + } + + actual := sets.NewString(actualIDs...) + expected := sets.NewString(instances...) + + additions := expected.Difference(actual) + removals := actual.Difference(expected) + + if len(additions) > 0 { + registerInput := &elbv2.RegisterTargetsInput{ + TargetGroupArn: targetGroup.TargetGroupArn, + Targets: []*elbv2.TargetDescription{}, + } + for instanceID := range additions { + registerInput.Targets = append(registerInput.Targets, &elbv2.TargetDescription{ + Id: aws.String(instanceID), + Port: aws.Int64(mapping.TrafficPort), + }) + } + _, err := c.elbv2.RegisterTargets(registerInput) + if err != nil { + return nil, fmt.Errorf("Error registering new targets in target group: %q", err) + } + dirty = true + } + + if len(removals) > 0 { + deregisterInput := &elbv2.DeregisterTargetsInput{ + TargetGroupArn: targetGroup.TargetGroupArn, + Targets: []*elbv2.TargetDescription{}, + } + for instanceID := range removals { + deregisterInput.Targets = append(deregisterInput.Targets, &elbv2.TargetDescription{ + Id: aws.String(instanceID), + Port: aws.Int64(mapping.TrafficPort), + }) + } + _, err := c.elbv2.DeregisterTargets(deregisterInput) + if err != nil { + return nil, fmt.Errorf("Error trying to deregister targets in target group: %q", err) + } + dirty = true + } + } + + // ensure the health check is correct + { + dirtyHealthCheck := false + + input := &elbv2.ModifyTargetGroupInput{ + TargetGroupArn: targetGroup.TargetGroupArn, + } + + if aws.StringValue(targetGroup.HealthCheckProtocol) != mapping.HealthCheckProtocol { + input.HealthCheckProtocol = aws.String(mapping.HealthCheckProtocol) + dirtyHealthCheck = true + } + if aws.StringValue(targetGroup.HealthCheckPort) != strconv.Itoa(int(mapping.HealthCheckPort)) { + input.HealthCheckPort = aws.String(strconv.Itoa(int(mapping.HealthCheckPort))) + dirtyHealthCheck = true + } + if mapping.HealthCheckPath != "" && mapping.HealthCheckProtocol != elbv2.ProtocolEnumTcp { + input.HealthCheckPath = aws.String(mapping.HealthCheckPath) + dirtyHealthCheck = true + } + + if dirtyHealthCheck { + _, err := c.elbv2.ModifyTargetGroup(input) + if err != nil { + return nil, fmt.Errorf("Error modifying target group health check: %q", err) + } + + dirty = true + } + } + + if dirty { + result, err := c.elbv2.DescribeTargetGroups(&elbv2.DescribeTargetGroupsInput{ + Names: []*string{aws.String(name)}, + }) + if err != nil { + return nil, fmt.Errorf("Error retrieving target group after creation/update: %q", err) + } + targetGroup = result.TargetGroups[0] + } + + return targetGroup, nil +} + +func portsForNLB(lbName string, sg *ec2.SecurityGroup, clientTraffic bool) sets.Int64 { + response := sets.NewInt64() + var annotation string + if clientTraffic { + annotation = fmt.Sprintf("%s=%s", NLBClientRuleDescription, lbName) + } else { + annotation = fmt.Sprintf("%s=%s", NLBHealthCheckRuleDescription, lbName) + } + + for i := range sg.IpPermissions { + for j := range sg.IpPermissions[i].IpRanges { + description := aws.StringValue(sg.IpPermissions[i].IpRanges[j].Description) + if description == annotation { + // TODO should probably check FromPort == ToPort + response.Insert(aws.Int64Value(sg.IpPermissions[i].FromPort)) + } + } + } + return response +} + +// filterForIPRangeDescription filters in security groups that have IpRange Descriptions that match a loadBalancerName +func filterForIPRangeDescription(securityGroups []*ec2.SecurityGroup, lbName string) []*ec2.SecurityGroup { + response := []*ec2.SecurityGroup{} + clientRule := fmt.Sprintf("%s=%s", NLBClientRuleDescription, lbName) + healthRule := fmt.Sprintf("%s=%s", NLBHealthCheckRuleDescription, lbName) + for i := range securityGroups { + for j := range securityGroups[i].IpPermissions { + for k := range securityGroups[i].IpPermissions[j].IpRanges { + description := aws.StringValue(securityGroups[i].IpPermissions[j].IpRanges[k].Description) + if description == clientRule || description == healthRule { + response = append(response, securityGroups[i]) + } + } + } + } + return response +} + +func (c *Cloud) getVpcCidrBlock() (*string, error) { + vpcs, err := c.ec2.DescribeVpcs(&ec2.DescribeVpcsInput{ + VpcIds: []*string{aws.String(c.vpcID)}, + }) + if err != nil { + return nil, fmt.Errorf("Error querying VPC for ELB: %q", err) + } + if len(vpcs.Vpcs) != 1 { + return nil, fmt.Errorf("Error querying VPC for ELB, got %d vpcs for %s", len(vpcs.Vpcs), c.vpcID) + } + return vpcs.Vpcs[0].CidrBlock, nil +} + +// abstraction for updating SG rules +// if clientTraffic is false, then only update HealthCheck rules +func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.SecurityGroup, desiredSgIds []string, ports []int64, lbName string, clientCidrs []string, clientTraffic bool) error { + + // Map containing the groups we want to make changes on; the ports to make + // changes on; and whether to add or remove it. true to add, false to remove + portChanges := map[string]map[int64]bool{} + + for _, id := range desiredSgIds { + // consider everything an addition for now + if _, ok := portChanges[id]; !ok { + portChanges[id] = make(map[int64]bool) + } + for _, port := range ports { + portChanges[id][port] = true + } + } + + // Compare to actual groups + for _, actualGroup := range actualGroups { + actualGroupID := aws.StringValue(actualGroup.GroupId) + if actualGroupID == "" { + glog.Warning("Ignoring group without ID: ", actualGroup) + continue + } + + addingMap, ok := portChanges[actualGroupID] + if ok { + desiredSet := sets.NewInt64() + for port := range addingMap { + desiredSet.Insert(port) + } + existingSet := portsForNLB(lbName, actualGroup, clientTraffic) + + // remove from portChanges ports that are already allowed + if intersection := desiredSet.Intersection(existingSet); intersection.Len() > 0 { + for p := range intersection { + delete(portChanges[actualGroupID], p) + } + } + + // allowed ports that need to be removed + if difference := existingSet.Difference(desiredSet); difference.Len() > 0 { + for p := range difference { + portChanges[actualGroupID][p] = false + } + } + } + } + + // Make changes we've planned on + for instanceSecurityGroupID, portMap := range portChanges { + adds := []*ec2.IpPermission{} + removes := []*ec2.IpPermission{} + for port, add := range portMap { + if add { + if clientTraffic { + glog.V(2).Infof("Adding rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) + glog.V(2).Infof("Adding rule for client traffic from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) + } else { + glog.V(2).Infof("Adding rule for health check traffic from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) + } + } else { + if clientTraffic { + glog.V(2).Infof("Removing rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) + glog.V(2).Infof("Removing rule for client traffic from the network load balancer (%s) to instance (%s)", clientCidrs, instanceSecurityGroupID) + } + glog.V(2).Infof("Removing rule for health check traffic from the network load balancer (%s) to instance (%s)", clientCidrs, instanceSecurityGroupID) + } + + if clientTraffic { + clientRuleAnnotation := fmt.Sprintf("%s=%s", NLBClientRuleDescription, lbName) + mtuRuleAnnotation := fmt.Sprintf("%s=%s", NLBMtuDiscoveryRuleDescription, lbName) + // Client Traffic + permission := &ec2.IpPermission{ + FromPort: aws.Int64(port), + ToPort: aws.Int64(port), + IpProtocol: aws.String("tcp"), + } + ranges := []*ec2.IpRange{} + for _, cidr := range clientCidrs { + ranges = append(ranges, &ec2.IpRange{ + CidrIp: aws.String(cidr), + Description: aws.String(clientRuleAnnotation), + }) + } + permission.IpRanges = ranges + if add { + adds = append(adds, permission) + } else { + removes = append(removes, permission) + } + + // MTU discovery + permission = &ec2.IpPermission{ + IpProtocol: aws.String("icmp"), + FromPort: aws.Int64(3), + ToPort: aws.Int64(4), + } + ranges = []*ec2.IpRange{} + for _, cidr := range clientCidrs { + ranges = append(ranges, &ec2.IpRange{ + CidrIp: aws.String(cidr), + Description: aws.String(mtuRuleAnnotation), + }) + } + permission.IpRanges = ranges + if add { + adds = append(adds, permission) + } else { + removes = append(removes, permission) + } + } else { + healthRuleAnnotation := fmt.Sprintf("%s=%s", NLBHealthCheckRuleDescription, lbName) + + // NLB HealthCheck + permission := &ec2.IpPermission{ + FromPort: aws.Int64(port), + ToPort: aws.Int64(port), + IpProtocol: aws.String("tcp"), + } + ranges := []*ec2.IpRange{} + for _, cidr := range clientCidrs { + ranges = append(ranges, &ec2.IpRange{ + CidrIp: aws.String(cidr), + Description: aws.String(healthRuleAnnotation), + }) + } + permission.IpRanges = ranges + if add { + adds = append(adds, permission) + } else { + removes = append(removes, permission) + } + } + + } + if len(adds) > 0 { + changed, err := c.addSecurityGroupIngress(instanceSecurityGroupID, adds) + if err != nil { + return err + } + if !changed { + glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + } + } + if len(removes) > 0 { + changed, err := c.removeSecurityGroupIngress(instanceSecurityGroupID, removes) + if err != nil { + return err + } + if !changed { + glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + } + } + } + return nil +} + +// Add SG rules for a given NLB +func (c *Cloud) updateInstanceSecurityGroupsForNLB(mappings []nlbPortMapping, instances map[awsInstanceID]*ec2.Instance, lbName string, clientCidrs []string) error { + if c.cfg.Global.DisableSecurityGroupIngress { + return nil + } + + vpcCidr, err := c.getVpcCidrBlock() + if err != nil { + return err + } + + // Unlike the classic ELB, NLB does not have a security group that we can + // filter against all existing groups to see if they allow access. Instead + // we use the IpRange.Description field to annotate NLB health check and + // client traffic rules + + // Get the actual list of groups that allow ingress for the load-balancer + var actualGroups []*ec2.SecurityGroup + { + // Server side filter + describeRequest := &ec2.DescribeSecurityGroupsInput{} + filters := []*ec2.Filter{ + newEc2Filter("ip-permission.protocol", "tcp"), + } + describeRequest.Filters = c.tagging.addFilters(filters) + response, err := c.ec2.DescribeSecurityGroups(describeRequest) + if err != nil { + return fmt.Errorf("Error querying security groups for NLB: %q", err) + } + for _, sg := range response { + if !c.tagging.hasClusterTag(sg.Tags) { + continue + } + actualGroups = append(actualGroups, sg) + } + + // client-side filter + // Filter out groups that don't have IP Rules we've annotated for this service + actualGroups = filterForIPRangeDescription(actualGroups, lbName) + } + + taggedSecurityGroups, err := c.getTaggedSecurityGroups() + if err != nil { + return fmt.Errorf("Error querying for tagged security groups: %q", err) + } + + externalTrafficPolicyIsLocal := false + trafficPorts := []int64{} + for i := range mappings { + trafficPorts = append(trafficPorts, mappings[i].TrafficPort) + if mappings[i].TrafficPort != mappings[i].HealthCheckPort { + externalTrafficPolicyIsLocal = true + } + } + + healthCheckPorts := trafficPorts + // if externalTrafficPolicy is Local, all listeners use the same health + // check port + if externalTrafficPolicyIsLocal && len(mappings) > 0 { + healthCheckPorts = []int64{mappings[0].HealthCheckPort} + } + + desiredGroupIds := []string{} + // Scan instances for groups we want open + for _, instance := range instances { + securityGroup, err := findSecurityGroupForInstance(instance, taggedSecurityGroups) + if err != nil { + return err + } + + if securityGroup == nil { + glog.Warningf("Ignoring instance without security group: %s", aws.StringValue(instance.InstanceId)) + continue + } + + id := aws.StringValue(securityGroup.GroupId) + if id == "" { + glog.Warningf("found security group without id: %v", securityGroup) + continue + } + + desiredGroupIds = append(desiredGroupIds, id) + } + + // Run once for Client traffic + err = c.updateInstanceSecurityGroupsForNLBTraffic(actualGroups, desiredGroupIds, trafficPorts, lbName, clientCidrs, true) + if err != nil { + return err + } + + // Run once for health check traffic + err = c.updateInstanceSecurityGroupsForNLBTraffic(actualGroups, desiredGroupIds, healthCheckPorts, lbName, []string{aws.StringValue(vpcCidr)}, false) + if err != nil { + return err + } + + return nil +} + func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBalancerName string, listeners []*elb.Listener, subnetIDs []string, securityGroupIDs []string, internalELB, proxyProtocol bool, loadBalancerAttributes *elb.LoadBalancerAttributes, annotations map[string]string) (*elb.LoadBalancerDescription, error) { loadBalancer, err := c.describeLoadBalancer(loadBalancerName) if err != nil { @@ -314,6 +1100,18 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala } } } + + { + // Add additional tags + glog.V(2).Infof("Creating additional load balancer tags for %s", loadBalancerName) + tags := getLoadBalancerAdditionalTags(annotations) + if len(tags) > 0 { + err := c.addLoadBalancerTags(loadBalancerName, tags) + if err != nil { + return nil, fmt.Errorf("unable to create additional load balancer tags: %v", err) + } + } + } } // Whether the ELB was new or existing, sync attributes regardless. This accounts for things @@ -356,6 +1154,17 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala return loadBalancer, nil } +func createSubnetMappings(subnetIDs []string) []*elbv2.SubnetMapping { + response := []*elbv2.SubnetMapping{} + + for _, id := range subnetIDs { + // Ignore AllocationId for now + response = append(response, &elbv2.SubnetMapping{SubnetId: aws.String(id)}) + } + + return response +} + // elbProtocolsAreEqual checks if two ELB protocol strings are considered the same // Comparison is case insensitive func elbProtocolsAreEqual(l, r *string) bool { @@ -374,44 +1183,72 @@ func awsArnEquals(l, r *string) bool { return strings.EqualFold(aws.StringValue(l), aws.StringValue(r)) } +// getExpectedHealthCheck returns an elb.Healthcheck for the provided target +// and using either sensible defaults or overrides via Service annotations +func (c *Cloud) getExpectedHealthCheck(target string, annotations map[string]string) (*elb.HealthCheck, error) { + healthcheck := &elb.HealthCheck{Target: &target} + getOrDefault := func(annotation string, defaultValue int64) (*int64, error) { + i64 := defaultValue + var err error + if s, ok := annotations[annotation]; ok { + i64, err = strconv.ParseInt(s, 10, 0) + if err != nil { + return nil, fmt.Errorf("failed parsing health check annotation value: %v", err) + } + } + return &i64, nil + } + var err error + healthcheck.HealthyThreshold, err = getOrDefault(ServiceAnnotationLoadBalancerHCHealthyThreshold, defaultHCHealthyThreshold) + if err != nil { + return nil, err + } + healthcheck.UnhealthyThreshold, err = getOrDefault(ServiceAnnotationLoadBalancerHCUnhealthyThreshold, defaultHCUnhealthyThreshold) + if err != nil { + return nil, err + } + healthcheck.Timeout, err = getOrDefault(ServiceAnnotationLoadBalancerHCTimeout, defaultHCTimeout) + if err != nil { + return nil, err + } + healthcheck.Interval, err = getOrDefault(ServiceAnnotationLoadBalancerHCInterval, defaultHCInterval) + if err != nil { + return nil, err + } + if err = healthcheck.Validate(); err != nil { + return nil, fmt.Errorf("some of the load balancer health check parameters are invalid: %v", err) + } + return healthcheck, nil +} + // Makes sure that the health check for an ELB matches the configured health check node port -func (c *Cloud) ensureLoadBalancerHealthCheck(loadBalancer *elb.LoadBalancerDescription, protocol string, port int32, path string) error { +func (c *Cloud) ensureLoadBalancerHealthCheck(loadBalancer *elb.LoadBalancerDescription, protocol string, port int32, path string, annotations map[string]string) error { name := aws.StringValue(loadBalancer.LoadBalancerName) actual := loadBalancer.HealthCheck - - // Default AWS settings - expectedHealthyThreshold := int64(2) - expectedUnhealthyThreshold := int64(6) - expectedTimeout := int64(5) - expectedInterval := int64(10) - expectedTarget := protocol + ":" + strconv.FormatInt(int64(port), 10) + path + expected, err := c.getExpectedHealthCheck(expectedTarget, annotations) + if err != nil { + return fmt.Errorf("cannot update health check for load balancer %q: %q", name, err) + } - if expectedTarget == aws.StringValue(actual.Target) && - expectedHealthyThreshold == orZero(actual.HealthyThreshold) && - expectedUnhealthyThreshold == orZero(actual.UnhealthyThreshold) && - expectedTimeout == orZero(actual.Timeout) && - expectedInterval == orZero(actual.Interval) { + // comparing attributes 1 by 1 to avoid breakage in case a new field is + // added to the HC which breaks the equality + if aws.StringValue(expected.Target) == aws.StringValue(actual.Target) && + aws.Int64Value(expected.HealthyThreshold) == aws.Int64Value(actual.HealthyThreshold) && + aws.Int64Value(expected.UnhealthyThreshold) == aws.Int64Value(actual.UnhealthyThreshold) && + aws.Int64Value(expected.Interval) == aws.Int64Value(actual.Interval) && + aws.Int64Value(expected.Timeout) == aws.Int64Value(actual.Timeout) { return nil } - glog.V(2).Infof("Updating load-balancer health-check for %q", name) - - healthCheck := &elb.HealthCheck{} - healthCheck.HealthyThreshold = &expectedHealthyThreshold - healthCheck.UnhealthyThreshold = &expectedUnhealthyThreshold - healthCheck.Timeout = &expectedTimeout - healthCheck.Interval = &expectedInterval - healthCheck.Target = &expectedTarget - request := &elb.ConfigureHealthCheckInput{} - request.HealthCheck = healthCheck + request.HealthCheck = expected request.LoadBalancerName = loadBalancer.LoadBalancerName - _, err := c.elb.ConfigureHealthCheck(request) + _, err = c.elb.ConfigureHealthCheck(request) if err != nil { - return fmt.Errorf("error configuring load-balancer health-check for %q: %q", name, err) + return fmt.Errorf("error configuring load balancer health check for %q: %q", name, err) } return nil @@ -471,6 +1308,78 @@ func (c *Cloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstances return nil } +func (c *Cloud) getLoadBalancerTLSPorts(loadBalancer *elb.LoadBalancerDescription) []int64 { + ports := []int64{} + + for _, listenerDescription := range loadBalancer.ListenerDescriptions { + protocol := aws.StringValue(listenerDescription.Listener.Protocol) + if protocol == "SSL" || protocol == "HTTPS" { + ports = append(ports, aws.Int64Value(listenerDescription.Listener.LoadBalancerPort)) + } + } + return ports +} + +func (c *Cloud) ensureSSLNegotiationPolicy(loadBalancer *elb.LoadBalancerDescription, policyName string) error { + glog.V(2).Info("Describing load balancer policies on load balancer") + result, err := c.elb.DescribeLoadBalancerPolicies(&elb.DescribeLoadBalancerPoliciesInput{ + LoadBalancerName: loadBalancer.LoadBalancerName, + PolicyNames: []*string{ + aws.String(fmt.Sprintf(SSLNegotiationPolicyNameFormat, policyName)), + }, + }) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case "PolicyNotFound": + // TODO change from string to `elb.ErrCodePolicyNotFoundException` once the AWS SDK is updated + default: + return fmt.Errorf("error describing security policies on load balancer: %q", err) + } + } + } + + if len(result.PolicyDescriptions) > 0 { + return nil + } + + glog.V(2).Infof("Creating SSL negotiation policy '%s' on load balancer", fmt.Sprintf(SSLNegotiationPolicyNameFormat, policyName)) + // there is an upper limit of 98 policies on an ELB, we're pretty safe from + // running into it + _, err = c.elb.CreateLoadBalancerPolicy(&elb.CreateLoadBalancerPolicyInput{ + LoadBalancerName: loadBalancer.LoadBalancerName, + PolicyName: aws.String(fmt.Sprintf(SSLNegotiationPolicyNameFormat, policyName)), + PolicyTypeName: aws.String("SSLNegotiationPolicyType"), + PolicyAttributes: []*elb.PolicyAttribute{ + { + AttributeName: aws.String("Reference-Security-Policy"), + AttributeValue: aws.String(policyName), + }, + }, + }) + if err != nil { + return fmt.Errorf("error creating security policy on load balancer: %q", err) + } + return nil +} + +func (c *Cloud) setSSLNegotiationPolicy(loadBalancerName, sslPolicyName string, port int64) error { + policyName := fmt.Sprintf(SSLNegotiationPolicyNameFormat, sslPolicyName) + request := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String(loadBalancerName), + LoadBalancerPort: aws.Int64(port), + PolicyNames: []*string{ + aws.String(policyName), + }, + } + glog.V(2).Infof("Setting SSL negotiation policy '%s' on load balancer", policyName) + _, err := c.elb.SetLoadBalancerPoliciesOfListener(request) + if err != nil { + return fmt.Errorf("error setting SSL negotiation policy '%s' on load balancer: %q", policyName, err) + } + return nil +} + func (c *Cloud) createProxyProtocolPolicy(loadBalancerName string) error { request := &elb.CreateLoadBalancerPolicyInput{ LoadBalancerName: aws.String(loadBalancerName), diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/instances.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/instances.go index 3fa778e2e623..a42834415dd0 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/instances.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/instances.go @@ -25,10 +25,14 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/golang/glog" "k8s.io/api/core/v1" + "regexp" "sync" "time" ) +// awsInstanceRegMatch represents Regex Match for AWS instance. +var awsInstanceRegMatch = regexp.MustCompile("^i-[^/]*$") + // awsInstanceID represents the ID of the instance in the AWS API, e.g. i-12345678 // The "traditional" format is "i-12345678" // A new longer format is also being introduced: "i-12345678abcdef01" @@ -76,8 +80,7 @@ func (name kubernetesInstanceID) mapToAWSInstanceID() (awsInstanceID, error) { // We sanity check the resulting volume; the two known formats are // i-12345678 and i-12345678abcdef01 - // TODO: Regex match? - if awsID == "" || strings.Contains(awsID, "/") || !strings.HasPrefix(awsID, "i-") { + if awsID == "" || !awsInstanceRegMatch.MatchString(awsID) { return "", fmt.Errorf("Invalid format for AWS instance (%s)", name) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler.go index 49a50ef5c501..8c063f8e8a41 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/retry_handler.go @@ -21,6 +21,7 @@ import ( "sync" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" "github.com/golang/glog" @@ -53,7 +54,15 @@ func (c *CrossRequestRetryDelay) BeforeSign(r *request.Request) { if delay > 0 { glog.Warningf("Inserting delay before AWS request (%s) to avoid RequestLimitExceeded: %s", describeRequest(r), delay.String()) - r.Config.SleepDelay(delay) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility + sleepFn(delay) + } else if err := aws.SleepWithContext(r.Context(), delay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } // Avoid clock skew problems r.Time = now diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/volumes.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/volumes.go index 8ff342199d8b..2ac932d43d72 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/volumes.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/volumes.go @@ -19,11 +19,18 @@ package aws import ( "fmt" "net/url" + "regexp" "strings" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/types" ) +// awsVolumeRegMatch represents Regex Match for AWS volume. +var awsVolumeRegMatch = regexp.MustCompile("^vol-[^/]*$") + // awsVolumeID represents the ID of the volume in the AWS API, e.g. vol-12345678 // The "traditional" format is "vol-12345678" // A new longer format is also being introduced: "vol-12345678abcdef01" @@ -42,8 +49,18 @@ func (i awsVolumeID) awsString() *string { // * type KubernetesVolumeID string -// mapToAWSVolumeID extracts the awsVolumeID from the KubernetesVolumeID -func (name KubernetesVolumeID) mapToAWSVolumeID() (awsVolumeID, error) { +// DiskInfo returns aws disk information in easy to use manner +type diskInfo struct { + ec2Instance *ec2.Instance + nodeName types.NodeName + volumeState string + attachmentState string + hasAttachment bool + disk *awsDisk +} + +// MapToAWSVolumeID extracts the awsVolumeID from the KubernetesVolumeID +func (name KubernetesVolumeID) MapToAWSVolumeID() (awsVolumeID, error) { // name looks like aws://availability-zone/awsVolumeId // The original idea of the URL-style name was to put the AZ into the @@ -75,10 +92,61 @@ func (name KubernetesVolumeID) mapToAWSVolumeID() (awsVolumeID, error) { // We sanity check the resulting volume; the two known formats are // vol-12345678 and vol-12345678abcdef01 - // TODO: Regex match? - if strings.Contains(awsID, "/") || !strings.HasPrefix(awsID, "vol-") { + if !awsVolumeRegMatch.MatchString(awsID) { return "", fmt.Errorf("Invalid format for AWS volume (%s)", name) } return awsVolumeID(awsID), nil } + +func GetAWSVolumeID(kubeVolumeID string) (string, error) { + kid := KubernetesVolumeID(kubeVolumeID) + awsID, err := kid.MapToAWSVolumeID() + return string(awsID), err +} + +func (c *Cloud) checkIfAttachedToNode(diskName KubernetesVolumeID, nodeName types.NodeName) (*diskInfo, bool, error) { + disk, err := newAWSDisk(c, diskName) + + if err != nil { + return nil, true, err + } + + awsDiskInfo := &diskInfo{ + disk: disk, + } + + info, err := disk.describeVolume() + + if err != nil { + describeError := fmt.Errorf("Error describing volume %s with %v", diskName, err) + glog.Warning(describeError) + awsDiskInfo.volumeState = "unknown" + return awsDiskInfo, false, describeError + } + + awsDiskInfo.volumeState = aws.StringValue(info.State) + + if len(info.Attachments) > 0 { + attachment := info.Attachments[0] + awsDiskInfo.attachmentState = aws.StringValue(attachment.State) + instanceID := aws.StringValue(attachment.InstanceId) + instanceInfo, err := c.getInstanceByID(instanceID) + + // This should never happen but if it does it could mean there was a race and instance + // has been deleted + if err != nil { + fetchErr := fmt.Errorf("Error fetching instance %s for volume %s", instanceID, diskName) + glog.Warning(fetchErr) + return awsDiskInfo, false, fetchErr + } + + awsDiskInfo.ec2Instance = instanceInfo + awsDiskInfo.nodeName = mapInstanceToNodeName(instanceInfo) + awsDiskInfo.hasAttachment = true + if awsDiskInfo.nodeName == nodeName { + return awsDiskInfo, true, nil + } + } + return awsDiskInfo, false, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD index 2a494f4ff714..a72de9f2a6de 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD @@ -13,6 +13,7 @@ go_library( "azure_backoff.go", "azure_blobDiskController.go", "azure_controllerCommon.go", + "azure_fakes.go", "azure_file.go", "azure_instance_metadata.go", "azure_instances.go", @@ -22,9 +23,11 @@ go_library( "azure_storage.go", "azure_storageaccount.go", "azure_util.go", + "azure_util_vmss.go", "azure_wrap.go", "azure_zones.go", ], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure", deps = [ "//pkg/api/v1/service:go_default_library", "//pkg/cloudprovider:go_default_library", @@ -47,6 +50,7 @@ go_library( "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", ], @@ -54,14 +58,24 @@ go_library( go_test( name = "go_default_test", - srcs = ["azure_test.go"], + srcs = [ + "azure_loadbalancer_test.go", + "azure_test.go", + "azure_util_test.go", + ], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/azure", library = ":go_default_library", deps = [ "//pkg/api/v1/service:go_default_library", + "//pkg/kubelet/apis:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/network:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/OWNERS b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/OWNERS index 9e1337493fe6..1109bcea3468 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/OWNERS @@ -2,3 +2,5 @@ approvers: - brendandburns - colemickens - jdumars +reviewers: +- andyzhangx diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go index be0e2df44647..ffc2030a8faf 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go @@ -22,6 +22,7 @@ import ( "fmt" "io" "io/ioutil" + "strings" "time" "k8s.io/client-go/util/flowcontrol" @@ -44,13 +45,17 @@ import ( const ( // CloudProviderName is the value used for the --cloud-provider flag - CloudProviderName = "azure" - rateLimitQPSDefault = 1.0 - rateLimitBucketDefault = 5 - backoffRetriesDefault = 6 - backoffExponentDefault = 1.5 - backoffDurationDefault = 5 // in seconds - backoffJitterDefault = 1.0 + CloudProviderName = "azure" + rateLimitQPSDefault = 1.0 + rateLimitBucketDefault = 5 + backoffRetriesDefault = 6 + backoffExponentDefault = 1.5 + backoffDurationDefault = 5 // in seconds + backoffJitterDefault = 1.0 + maximumLoadBalancerRuleCount = 148 // According to Azure LB rule default limit + + vmTypeVMSS = "vmss" + vmTypeStandard = "standard" ) // Config holds the configuration parsed from the --cloud-config flag @@ -82,6 +87,15 @@ type Config struct { // the cloudprovider will try to add all nodes to a single backend pool which is forbidden. // In other words, if you use multiple agent pools (availability sets), you MUST set this field. PrimaryAvailabilitySetName string `json:"primaryAvailabilitySetName" yaml:"primaryAvailabilitySetName"` + // The type of azure nodes. Candidate valudes are: vmss and standard. + // If not set, it will be default to standard. + VMType string `json:"vmType" yaml:"vmType"` + // The name of the scale set that should be used as the load balancer backend. + // If this is set, the Azure cloudprovider will only add nodes from that scale set to the load + // balancer backend pool. If this is not set, and multiple agent pools (scale sets) are used, then + // the cloudprovider will try to add all nodes to a single backend pool which is forbidden. + // In other words, if you use multiple agent pools (scale sets), you MUST set this field. + PrimaryScaleSetName string `json:"primaryScaleSetName" yaml:"primaryScaleSetName"` // The ClientID for an AAD application with RBAC access to talk to Azure RM APIs AADClientID string `json:"aadClientId" yaml:"aadClientId"` @@ -113,6 +127,58 @@ type Config struct { // Use managed service identity for the virtual machine to access Azure ARM APIs UseManagedIdentityExtension bool `json:"useManagedIdentityExtension"` + + // Maximum allowed LoadBalancer Rule Count is the limit enforced by Azure Load balancer + MaximumLoadBalancerRuleCount int `json:"maximumLoadBalancerRuleCount"` +} + +// VirtualMachinesClient defines needed functions for azure network.VirtualMachinesClient +type VirtualMachinesClient interface { + CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) + Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) + List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) + ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) +} + +// InterfacesClient defines needed functions for azure network.InterfacesClient +type InterfacesClient interface { + CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error) + Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) + GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) +} + +// LoadBalancersClient defines needed functions for azure network.LoadBalancersClient +type LoadBalancersClient interface { + CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error) + Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) + Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) + List(resourceGroupName string) (result network.LoadBalancerListResult, err error) + ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) +} + +// PublicIPAddressesClient defines needed functions for azure network.PublicIPAddressesClient +type PublicIPAddressesClient interface { + CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error) + Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) + Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) + List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) + ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) +} + +// SubnetsClient defines needed functions for azure network.SubnetsClient +type SubnetsClient interface { + CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error) + Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) + Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) + List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error) +} + +// SecurityGroupsClient defines needed functions for azure network.SecurityGroupsClient +type SecurityGroupsClient interface { + CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error) + Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) + Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) + List(resourceGroupName string) (result network.SecurityGroupListResult, err error) } // Cloud holds the config and clients @@ -120,19 +186,23 @@ type Cloud struct { Config Environment azure.Environment RoutesClient network.RoutesClient - SubnetsClient network.SubnetsClient - InterfacesClient network.InterfacesClient + SubnetsClient SubnetsClient + InterfacesClient InterfacesClient RouteTablesClient network.RouteTablesClient - LoadBalancerClient network.LoadBalancersClient - PublicIPAddressesClient network.PublicIPAddressesClient - SecurityGroupsClient network.SecurityGroupsClient - VirtualMachinesClient compute.VirtualMachinesClient + LoadBalancerClient LoadBalancersClient + PublicIPAddressesClient PublicIPAddressesClient + SecurityGroupsClient SecurityGroupsClient + VirtualMachinesClient VirtualMachinesClient StorageAccountClient storage.AccountsClient DisksClient disk.DisksClient operationPollRateLimiter flowcontrol.RateLimiter resourceRequestBackoff wait.Backoff metadata *InstanceMetadata + // Clients for vmss. + VirtualMachineScaleSetsClient compute.VirtualMachineScaleSetsClient + VirtualMachineScaleSetVMsClient compute.VirtualMachineScaleSetVMsClient + *BlobDiskController *ManagedDiskController *controllerCommon @@ -166,8 +236,12 @@ func GetServicePrincipalToken(config *Config, env *azure.Environment) (*adal.Ser if config.UseManagedIdentityExtension { glog.V(2).Infoln("azure: using managed identity extension to retrieve access token") + msiEndpoint, err := adal.GetMSIVMEndpoint() + if err != nil { + return nil, fmt.Errorf("Getting the managed service identity endpoint: %v", err) + } return adal.NewServicePrincipalTokenFromMSI( - *oauthConfig, + msiEndpoint, env.ServiceManagementEndpoint) } @@ -217,11 +291,12 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { return nil, err } - az.SubnetsClient = network.NewSubnetsClient(az.SubscriptionID) - az.SubnetsClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.SubnetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - az.SubnetsClient.PollingDelay = 5 * time.Second - configureUserAgent(&az.SubnetsClient.Client) + subnetsClient := network.NewSubnetsClient(az.SubscriptionID) + subnetsClient.BaseURI = az.Environment.ResourceManagerEndpoint + subnetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + subnetsClient.PollingDelay = 5 * time.Second + configureUserAgent(&subnetsClient.Client) + az.SubnetsClient = subnetsClient az.RouteTablesClient = network.NewRouteTablesClient(az.SubscriptionID) az.RouteTablesClient.BaseURI = az.Environment.ResourceManagerEndpoint @@ -235,35 +310,54 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { az.RoutesClient.PollingDelay = 5 * time.Second configureUserAgent(&az.RoutesClient.Client) - az.InterfacesClient = network.NewInterfacesClient(az.SubscriptionID) - az.InterfacesClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.InterfacesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - az.InterfacesClient.PollingDelay = 5 * time.Second - configureUserAgent(&az.InterfacesClient.Client) - - az.LoadBalancerClient = network.NewLoadBalancersClient(az.SubscriptionID) - az.LoadBalancerClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.LoadBalancerClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - az.LoadBalancerClient.PollingDelay = 5 * time.Second - configureUserAgent(&az.LoadBalancerClient.Client) - - az.VirtualMachinesClient = compute.NewVirtualMachinesClient(az.SubscriptionID) - az.VirtualMachinesClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.VirtualMachinesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - az.VirtualMachinesClient.PollingDelay = 5 * time.Second - configureUserAgent(&az.VirtualMachinesClient.Client) - - az.PublicIPAddressesClient = network.NewPublicIPAddressesClient(az.SubscriptionID) - az.PublicIPAddressesClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.PublicIPAddressesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - az.PublicIPAddressesClient.PollingDelay = 5 * time.Second - configureUserAgent(&az.PublicIPAddressesClient.Client) - - az.SecurityGroupsClient = network.NewSecurityGroupsClient(az.SubscriptionID) - az.SecurityGroupsClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.SecurityGroupsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) - az.SecurityGroupsClient.PollingDelay = 5 * time.Second - configureUserAgent(&az.SecurityGroupsClient.Client) + interfacesClient := network.NewInterfacesClient(az.SubscriptionID) + interfacesClient.BaseURI = az.Environment.ResourceManagerEndpoint + interfacesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + interfacesClient.PollingDelay = 5 * time.Second + configureUserAgent(&interfacesClient.Client) + az.InterfacesClient = interfacesClient + + loadBalancerClient := network.NewLoadBalancersClient(az.SubscriptionID) + loadBalancerClient.BaseURI = az.Environment.ResourceManagerEndpoint + loadBalancerClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + loadBalancerClient.PollingDelay = 5 * time.Second + configureUserAgent(&loadBalancerClient.Client) + az.LoadBalancerClient = loadBalancerClient + + virtualMachinesClient := compute.NewVirtualMachinesClient(az.SubscriptionID) + virtualMachinesClient.BaseURI = az.Environment.ResourceManagerEndpoint + virtualMachinesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + virtualMachinesClient.PollingDelay = 5 * time.Second + configureUserAgent(&virtualMachinesClient.Client) + az.VirtualMachinesClient = virtualMachinesClient + + publicIPAddressClient := network.NewPublicIPAddressesClient(az.SubscriptionID) + publicIPAddressClient.BaseURI = az.Environment.ResourceManagerEndpoint + publicIPAddressClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + publicIPAddressClient.PollingDelay = 5 * time.Second + configureUserAgent(&publicIPAddressClient.Client) + az.PublicIPAddressesClient = publicIPAddressClient + + securityGroupsClient := network.NewSecurityGroupsClient(az.SubscriptionID) + securityGroupsClient.BaseURI = az.Environment.ResourceManagerEndpoint + securityGroupsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + securityGroupsClient.PollingDelay = 5 * time.Second + configureUserAgent(&securityGroupsClient.Client) + az.SecurityGroupsClient = securityGroupsClient + + virtualMachineScaleSetVMsClient := compute.NewVirtualMachineScaleSetVMsClient(az.SubscriptionID) + az.VirtualMachineScaleSetVMsClient.BaseURI = az.Environment.ResourceManagerEndpoint + az.VirtualMachineScaleSetVMsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + az.VirtualMachineScaleSetVMsClient.PollingDelay = 5 * time.Second + configureUserAgent(&virtualMachineScaleSetVMsClient.Client) + az.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient + + virtualMachineScaleSetsClient := compute.NewVirtualMachineScaleSetsClient(az.SubscriptionID) + az.VirtualMachineScaleSetsClient.BaseURI = az.Environment.ResourceManagerEndpoint + az.VirtualMachineScaleSetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + az.VirtualMachineScaleSetsClient.PollingDelay = 5 * time.Second + configureUserAgent(&virtualMachineScaleSetsClient.Client) + az.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient az.StorageAccountClient = storage.NewAccountsClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID) az.StorageAccountClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) @@ -285,7 +379,7 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { az.operationPollRateLimiter = flowcontrol.NewTokenBucketRateLimiter( az.CloudProviderRateLimitQPS, az.CloudProviderRateLimitBucket) - glog.V(2).Infof("Azure cloudprovider using rate limit config: QPS=%d, bucket=%d", + glog.V(2).Infof("Azure cloudprovider using rate limit config: QPS=%g, bucket=%d", az.CloudProviderRateLimitQPS, az.CloudProviderRateLimitBucket) } else { @@ -323,6 +417,10 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { az.metadata = NewInstanceMetadata() + if az.MaximumLoadBalancerRuleCount == 0 { + az.MaximumLoadBalancerRuleCount = maximumLoadBalancerRuleCount + } + if err := initDiskControllers(&az); err != nil { return nil, err } @@ -355,6 +453,11 @@ func ParseConfig(configReader io.Reader) (*Config, *azure.Environment, error) { return nil, nil, err } } + + if config.VMType != "" { + config.VMType = strings.ToLower(config.VMType) + } + return &config, &env, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go index 0683413c66c7..6f5e41349db7 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -26,11 +26,26 @@ import ( "k8s.io/apimachinery/pkg/types" ) +// requestBackoff if backoff is disabled in cloud provider it +// returns a new Backoff object steps = 1 +// This is to make sure that the requested command executes +// at least once +func (az *Cloud) requestBackoff() (resourceRequestBackoff wait.Backoff) { + if az.CloudProviderBackoff { + return az.resourceRequestBackoff + } + resourceRequestBackoff = wait.Backoff{ + Steps: 1, + } + + return resourceRequestBackoff +} + // GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.VirtualMachine, bool, error) { var machine compute.VirtualMachine var exists bool - err := wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { + err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error machine, exists, retryErr = az.getVirtualMachine(name) if retryErr != nil { @@ -43,11 +58,29 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua return machine, exists, err } +// GetScaleSetsVMWithRetry invokes az.getScaleSetsVM with exponential backoff retry +func (az *Cloud) GetScaleSetsVMWithRetry(name types.NodeName) (compute.VirtualMachineScaleSetVM, bool, error) { + var machine compute.VirtualMachineScaleSetVM + var exists bool + err := wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { + var retryErr error + machine, exists, retryErr = az.getVmssVirtualMachine(name) + if retryErr != nil { + glog.Errorf("GetScaleSetsVMWithRetry backoff: failure, will retry,err=%v", retryErr) + return false, nil + } + glog.V(10).Infof("GetScaleSetsVMWithRetry backoff: success") + return true, nil + }) + return machine, exists, err +} + // VirtualMachineClientGetWithRetry invokes az.VirtualMachinesClient.Get with exponential backoff retry func (az *Cloud) VirtualMachineClientGetWithRetry(resourceGroup, vmName string, types compute.InstanceViewTypes) (compute.VirtualMachine, error) { var machine compute.VirtualMachine - err := wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { + err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error + az.operationPollRateLimiter.Accept() machine, retryErr = az.VirtualMachinesClient.Get(resourceGroup, vmName, types) if retryErr != nil { glog.Errorf("backoff: failure, will retry,err=%v", retryErr) @@ -59,10 +92,63 @@ func (az *Cloud) VirtualMachineClientGetWithRetry(resourceGroup, vmName string, return machine, err } +// VirtualMachineClientListWithRetry invokes az.VirtualMachinesClient.List with exponential backoff retry +func (az *Cloud) VirtualMachineClientListWithRetry() ([]compute.VirtualMachine, error) { + allNodes := []compute.VirtualMachine{} + var result compute.VirtualMachineListResult + err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { + var retryErr error + az.operationPollRateLimiter.Accept() + glog.V(10).Infof("VirtualMachinesClient.List(%v): start", az.ResourceGroup) + result, retryErr = az.VirtualMachinesClient.List(az.ResourceGroup) + glog.V(10).Infof("VirtualMachinesClient.List(%v): end", az.ResourceGroup) + if retryErr != nil { + glog.Errorf("VirtualMachinesClient.List(%v) - backoff: failure, will retry,err=%v", + az.ResourceGroup, + retryErr) + return false, retryErr + } + glog.V(2).Infof("VirtualMachinesClient.List(%v) - backoff: success", az.ResourceGroup) + return true, nil + }) + if err != nil { + return nil, err + } + + appendResults := (result.Value != nil && len(*result.Value) > 0) + for appendResults { + allNodes = append(allNodes, *result.Value...) + appendResults = false + // follow the next link to get all the vms for resource group + if result.NextLink != nil { + err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { + var retryErr error + az.operationPollRateLimiter.Accept() + glog.V(10).Infof("VirtualMachinesClient.ListNextResults(%v): start", az.ResourceGroup) + result, retryErr = az.VirtualMachinesClient.ListNextResults(result) + glog.V(10).Infof("VirtualMachinesClient.ListNextResults(%v): end", az.ResourceGroup) + if retryErr != nil { + glog.Errorf("VirtualMachinesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v", + az.ResourceGroup, retryErr) + return false, retryErr + } + glog.V(2).Infof("VirtualMachinesClient.ListNextResults(%v): success", az.ResourceGroup) + return true, nil + }) + if err != nil { + return allNodes, err + } + appendResults = (result.Value != nil && len(*result.Value) > 0) + } + } + + return allNodes, err +} + // GetIPForMachineWithRetry invokes az.getIPForMachine with exponential backoff retry func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, error) { var ip string - err := wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { + err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { var retryErr error ip, retryErr = az.getIPForMachine(name) if retryErr != nil { @@ -77,7 +163,7 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, error) { // CreateOrUpdateSGWithRetry invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error { - return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { + return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { az.operationPollRateLimiter.Accept() glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): start", *sg.Name) respChan, errChan := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil) @@ -90,7 +176,7 @@ func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error { // CreateOrUpdateLBWithRetry invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error { - return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { + return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { az.operationPollRateLimiter.Accept() glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): start", *lb.Name) respChan, errChan := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil) @@ -101,9 +187,120 @@ func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error { }) } +// ListLBWithRetry invokes az.LoadBalancerClient.List with exponential backoff retry +func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) { + allLBs := []network.LoadBalancer{} + var result network.LoadBalancerListResult + + err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { + var retryErr error + az.operationPollRateLimiter.Accept() + glog.V(10).Infof("LoadBalancerClient.List(%v): start", az.ResourceGroup) + result, retryErr = az.LoadBalancerClient.List(az.ResourceGroup) + glog.V(10).Infof("LoadBalancerClient.List(%v): end", az.ResourceGroup) + if retryErr != nil { + glog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v", + az.ResourceGroup, + retryErr) + return false, retryErr + } + glog.V(2).Infof("LoadBalancerClient.List(%v) - backoff: success", az.ResourceGroup) + return true, nil + }) + if err != nil { + return nil, err + } + + appendResults := (result.Value != nil && len(*result.Value) > 0) + for appendResults { + allLBs = append(allLBs, *result.Value...) + appendResults = false + + // follow the next link to get all the vms for resource group + if result.NextLink != nil { + err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { + var retryErr error + az.operationPollRateLimiter.Accept() + glog.V(10).Infof("LoadBalancerClient.ListNextResults(%v): start", az.ResourceGroup) + result, retryErr = az.LoadBalancerClient.ListNextResults(result) + glog.V(10).Infof("LoadBalancerClient.ListNextResults(%v): end", az.ResourceGroup) + if retryErr != nil { + glog.Errorf("LoadBalancerClient.ListNextResults(%v) - backoff: failure, will retry,err=%v", + az.ResourceGroup, + retryErr) + return false, retryErr + } + glog.V(2).Infof("LoadBalancerClient.ListNextResults(%v) - backoff: success", az.ResourceGroup) + return true, nil + }) + if err != nil { + return allLBs, err + } + appendResults = (result.Value != nil && len(*result.Value) > 0) + } + } + + return allLBs, nil +} + +// ListPIPWithRetry list the PIP resources in az.ResourceGroup +func (az *Cloud) ListPIPWithRetry() ([]network.PublicIPAddress, error) { + allPIPs := []network.PublicIPAddress{} + var result network.PublicIPAddressListResult + err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { + var retryErr error + az.operationPollRateLimiter.Accept() + glog.V(10).Infof("PublicIPAddressesClient.List(%v): start", az.ResourceGroup) + result, retryErr = az.PublicIPAddressesClient.List(az.ResourceGroup) + glog.V(10).Infof("PublicIPAddressesClient.List(%v): end", az.ResourceGroup) + if retryErr != nil { + glog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v", + az.ResourceGroup, + retryErr) + return false, retryErr + } + glog.V(2).Infof("PublicIPAddressesClient.List(%v) - backoff: success", az.ResourceGroup) + return true, nil + }) + if err != nil { + return nil, err + } + + appendResults := (result.Value != nil && len(*result.Value) > 0) + for appendResults { + allPIPs = append(allPIPs, *result.Value...) + appendResults = false + + // follow the next link to get all the vms for resource group + if result.NextLink != nil { + err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { + var retryErr error + az.operationPollRateLimiter.Accept() + glog.V(10).Infof("PublicIPAddressesClient.ListNextResults(%v): start", az.ResourceGroup) + result, retryErr = az.PublicIPAddressesClient.ListNextResults(result) + glog.V(10).Infof("PublicIPAddressesClient.ListNextResults(%v): end", az.ResourceGroup) + if retryErr != nil { + glog.Errorf("PublicIPAddressesClient.ListNextResults(%v) - backoff: failure, will retry,err=%v", + az.ResourceGroup, + retryErr) + return false, retryErr + } + glog.V(2).Infof("PublicIPAddressesClient.ListNextResults(%v) - backoff: success", az.ResourceGroup) + return true, nil + }) + if err != nil { + return allPIPs, err + } + appendResults = (result.Value != nil && len(*result.Value) > 0) + } + } + + return allPIPs, nil +} + // CreateOrUpdatePIPWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdatePIPWithRetry(pip network.PublicIPAddress) error { - return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { + return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { az.operationPollRateLimiter.Accept() glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s): start", *pip.Name) respChan, errChan := az.PublicIPAddressesClient.CreateOrUpdate(az.ResourceGroup, *pip.Name, pip, nil) @@ -116,7 +313,7 @@ func (az *Cloud) CreateOrUpdatePIPWithRetry(pip network.PublicIPAddress) error { // CreateOrUpdateInterfaceWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error { - return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { + return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { az.operationPollRateLimiter.Accept() glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): start", *nic.Name) respChan, errChan := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil) @@ -129,7 +326,7 @@ func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error { // DeletePublicIPWithRetry invokes az.PublicIPAddressesClient.Delete with exponential backoff retry func (az *Cloud) DeletePublicIPWithRetry(pipName string) error { - return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { + return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { az.operationPollRateLimiter.Accept() glog.V(10).Infof("PublicIPAddressesClient.Delete(%s): start", pipName) respChan, errChan := az.PublicIPAddressesClient.Delete(az.ResourceGroup, pipName, nil) @@ -142,7 +339,7 @@ func (az *Cloud) DeletePublicIPWithRetry(pipName string) error { // DeleteLBWithRetry invokes az.LoadBalancerClient.Delete with exponential backoff retry func (az *Cloud) DeleteLBWithRetry(lbName string) error { - return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { + return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { az.operationPollRateLimiter.Accept() glog.V(10).Infof("LoadBalancerClient.Delete(%s): start", lbName) respChan, errChan := az.LoadBalancerClient.Delete(az.ResourceGroup, lbName, nil) @@ -155,20 +352,20 @@ func (az *Cloud) DeleteLBWithRetry(lbName string) error { // CreateOrUpdateRouteTableWithRetry invokes az.RouteTablesClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable) error { - return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { + return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { az.operationPollRateLimiter.Accept() - glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%s): start", routeTable) + glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%s): start", *routeTable.Name) respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil) resp := <-respChan err := <-errChan - glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%s): end", routeTable) + glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%s): end", *routeTable.Name) return processRetryResponse(resp.Response, err) }) } // CreateOrUpdateRouteWithRetry invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error { - return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { + return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { az.operationPollRateLimiter.Accept() glog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): start", *route.Name) respChan, errChan := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil) @@ -181,7 +378,7 @@ func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error { // DeleteRouteWithRetry invokes az.RoutesClient.Delete with exponential backoff retry func (az *Cloud) DeleteRouteWithRetry(routeName string) error { - return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { + return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { az.operationPollRateLimiter.Accept() glog.V(10).Infof("RoutesClient.Delete(%s): start", az.RouteTableName) respChan, errChan := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil) @@ -194,7 +391,7 @@ func (az *Cloud) DeleteRouteWithRetry(routeName string) error { // CreateOrUpdateVMWithRetry invokes az.VirtualMachinesClient.CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateVMWithRetry(vmName string, newVM compute.VirtualMachine) error { - return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { + return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { az.operationPollRateLimiter.Accept() glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): start", vmName) respChan, errChan := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go index 037c4941ef2a..a6eda963ef7c 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go @@ -20,9 +20,7 @@ import ( "bytes" "encoding/binary" "fmt" - "math" "net/url" - "os" "regexp" "sync" @@ -61,70 +59,71 @@ type BlobDiskController struct { accounts map[string]*storageAccountState } -var defaultContainerName = "" -var storageAccountNamePrefix = "" -var storageAccountNameMatch = "" -var initFlag int64 - -var accountsLock = &sync.Mutex{} +var ( + defaultContainerName = "" + storageAccountNamePrefix = "" + storageAccountNameMatch = "" + accountsLock = &sync.Mutex{} +) func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error) { c := BlobDiskController{common: common} - err := c.init() + c.setUniqueStrings() + // get accounts + accounts, err := c.getAllStorageAccounts() if err != nil { - return nil, err + glog.Errorf("azureDisk - getAllStorageAccounts error: %v", err) + c.accounts = make(map[string]*storageAccountState) + return &c, nil } - + c.accounts = accounts return &c, nil } -// CreateVolume creates a VHD blob in a given storage account, will create the given storage account if it does not exist in current resource group -func (c *BlobDiskController) CreateVolume(name, storageAccount string, storageAccountType storage.SkuName, location string, requestGB int) (string, string, int, error) { - key, err := c.common.cloud.getStorageAccesskey(storageAccount) - if err != nil { - glog.V(2).Infof("azureDisk - no key found for storage account %s in resource group %s, begin to create a new storage account", storageAccount, c.common.resourceGroup) - - cp := storage.AccountCreateParameters{ - Sku: &storage.Sku{Name: storageAccountType}, - Tags: &map[string]*string{"created-by": to.StringPtr("azure-dd")}, - Location: &location} - cancel := make(chan struct{}) - - _, errchan := c.common.cloud.StorageAccountClient.Create(c.common.resourceGroup, storageAccount, cp, cancel) - err = <-errchan - if err != nil { - return "", "", 0, fmt.Errorf(fmt.Sprintf("Create Storage Account %s, error: %s", storageAccount, err)) - } - - key, err = c.common.cloud.getStorageAccesskey(storageAccount) +// CreateVolume creates a VHD blob in a storage account that has storageType and location using the given storage account. +// If no storage account is given, search all the storage accounts associated with the resource group and pick one that +// fits storage type and location. +func (c *BlobDiskController) CreateVolume(name, storageAccount, storageAccountType, location string, requestGB int) (string, string, int, error) { + var err error + accounts := []accountWithLocation{} + if len(storageAccount) > 0 { + accounts = append(accounts, accountWithLocation{Name: storageAccount}) + } else { + // find a storage account + accounts, err = c.common.cloud.getStorageAccounts() if err != nil { - return "", "", 0, fmt.Errorf("no key found for storage account %s even after creating a new storage account", storageAccount) + // TODO: create a storage account and container + return "", "", 0, err } - - glog.Errorf("no key found for storage account %s in resource group %s", storageAccount, c.common.resourceGroup) - return "", "", 0, err } + for _, account := range accounts { + glog.V(4).Infof("account %s type %s location %s", account.Name, account.StorageType, account.Location) + if (storageAccountType == "" || account.StorageType == storageAccountType) && (location == "" || account.Location == location) || len(storageAccount) > 0 { + // find the access key with this account + key, err := c.common.cloud.getStorageAccesskey(account.Name) + if err != nil { + glog.V(2).Infof("no key found for storage account %s", account.Name) + continue + } - client, err := azstorage.NewBasicClient(storageAccount, key) - if err != nil { - return "", "", 0, err - } - blobClient := client.GetBlobService() + client, err := azstorage.NewBasicClientOnSovereignCloud(account.Name, key, c.common.cloud.Environment) + if err != nil { + return "", "", 0, err + } + blobClient := client.GetBlobService() - container := blobClient.GetContainerReference(vhdContainerName) - _, err = container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate}) - if err != nil { - return "", "", 0, err - } + // create a page blob in this account's vhd container + diskName, diskURI, err := c.createVHDBlobDisk(blobClient, account.Name, name, vhdContainerName, int64(requestGB)) + if err != nil { + return "", "", 0, err + } - diskName, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccount, name, vhdContainerName, int64(requestGB)) - if err != nil { - return "", "", 0, err + glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI) + return diskName, diskURI, requestGB, err + } } - - glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI) - return diskName, diskURI, requestGB, err + return "", "", 0, fmt.Errorf("failed to find a matching storage account") } // DeleteVolume deletes a VHD blob @@ -172,11 +171,6 @@ func (c *BlobDiskController) getBlobNameAndAccountFromURI(diskURI string) (strin func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageClient, accountName, vhdName, containerName string, sizeGB int64) (string, string, error) { container := blobClient.GetContainerReference(containerName) - _, err := container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate}) - if err != nil { - return "", "", err - } - size := 1024 * 1024 * 1024 * sizeGB vhdSize := size + vhd.VHD_HEADER_SIZE /* header size */ // Blob name in URL must end with '.vhd' extension. @@ -184,12 +178,22 @@ func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageC tags := make(map[string]string) tags["createdby"] = "k8sAzureDataDisk" - glog.V(4).Infof("azureDisk - creating page blob %name in container %s account %s", vhdName, containerName, accountName) + glog.V(4).Infof("azureDisk - creating page blob %s in container %s account %s", vhdName, containerName, accountName) blob := container.GetBlobReference(vhdName) blob.Properties.ContentLength = vhdSize blob.Metadata = tags - err = blob.PutPageBlob(nil) + err := blob.PutPageBlob(nil) + if err != nil { + // if container doesn't exist, create one and retry PutPageBlob + detail := err.Error() + if strings.Contains(detail, errContainerNotFound) { + err = container.Create(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate}) + if err == nil { + err = blob.PutPageBlob(nil) + } + } + } if err != nil { return "", "", fmt.Errorf("failed to put page blob %s in container %s: %v", vhdName, containerName, err) } @@ -223,7 +227,7 @@ func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageC // delete a vhd blob func (c *BlobDiskController) deleteVhdBlob(accountName, accountKey, blobName string) error { - client, err := azstorage.NewBasicClient(accountName, accountKey) + client, err := azstorage.NewBasicClientOnSovereignCloud(accountName, accountKey, c.common.cloud.Environment) if err != nil { return err } @@ -235,24 +239,12 @@ func (c *BlobDiskController) deleteVhdBlob(accountName, accountKey, blobName str } //CreateBlobDisk : create a blob disk in a node -func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int, forceStandAlone bool) (string, error) { - glog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s StandAlone:%v", dataDiskName, storageAccountType, forceStandAlone) +func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error) { + glog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s", dataDiskName, storageAccountType) - var storageAccountName = "" - var err error - - if forceStandAlone { - // we have to wait until the storage account is is created - storageAccountName = "p" + MakeCRC32(c.common.subscriptionID+c.common.resourceGroup+dataDiskName) - err = c.createStorageAccount(storageAccountName, storageAccountType, c.common.location, false) - if err != nil { - return "", err - } - } else { - storageAccountName, err = c.findSANameForDisk(storageAccountType) - if err != nil { - return "", err - } + storageAccountName, err := c.findSANameForDisk(storageAccountType) + if err != nil { + return "", err } blobClient, err := c.getBlobSvcClient(storageAccountName) @@ -265,15 +257,13 @@ func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountT return "", err } - if !forceStandAlone { - atomic.AddInt32(&c.accounts[storageAccountName].diskCount, 1) - } + atomic.AddInt32(&c.accounts[storageAccountName].diskCount, 1) return diskURI, nil } //DeleteBlobDisk : delete a blob disk from a node -func (c *BlobDiskController) DeleteBlobDisk(diskURI string, wasForced bool) error { +func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error { storageAccountName, vhdName, err := diskNameandSANameFromURI(diskURI) if err != nil { return err @@ -285,11 +275,6 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string, wasForced bool) erro glog.V(4).Infof("azureDisk - deleting volume %s", diskURI) return c.DeleteVolume(diskURI) } - // if forced (as in one disk = one storage account) - // delete the account completely - if wasForced { - return c.deleteStorageAccount(storageAccountName) - } blobSvc, err := c.getBlobSvcClient(storageAccountName) if err != nil { @@ -314,60 +299,6 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string, wasForced bool) erro return err } -// Init tries best effort to ensure that 2 accounts standard/premium were created -// to be used by shared blob disks. This to increase the speed pvc provisioning (in most of cases) -func (c *BlobDiskController) init() error { - if !c.shouldInit() { - return nil - } - - c.setUniqueStrings() - - // get accounts - accounts, err := c.getAllStorageAccounts() - if err != nil { - return err - } - c.accounts = accounts - - if len(c.accounts) == 0 { - counter := 1 - for counter <= storageAccountsCountInit { - - accountType := storage.PremiumLRS - if n := math.Mod(float64(counter), 2); n == 0 { - accountType = storage.StandardLRS - } - - // We don't really care if these calls failed - // at this stage, we are trying to ensure 2 accounts (Standard/Premium) - // are there ready for PVC creation - - // if we failed here, the accounts will be created in the process - // of creating PVC - - // nor do we care if they were partially created, as the entire - // account creation process is idempotent - go func(thisNext int) { - newAccountName := getAccountNameForNum(thisNext) - - glog.Infof("azureDisk - BlobDiskController init process will create new storageAccount:%s type:%s", newAccountName, accountType) - err := c.createStorageAccount(newAccountName, accountType, c.common.location, true) - // TODO return created and error from - if err != nil { - glog.Infof("azureDisk - BlobDiskController init: create account %s with error:%s", newAccountName, err.Error()) - - } else { - glog.Infof("azureDisk - BlobDiskController init: created account %s", newAccountName) - } - }(counter) - counter = counter + 1 - } - } - - return nil -} - //Sets unique strings to be used as accountnames && || blob containers names func (c *BlobDiskController) setUniqueStrings() { uniqueString := c.common.resourceGroup + c.common.location + c.common.subscriptionID @@ -416,7 +347,7 @@ func (c *BlobDiskController) getBlobSvcClient(SAName string) (azstorage.BlobStor return blobSvc, err } - if client, err = azstorage.NewBasicClient(SAName, key); err != nil { + if client, err = azstorage.NewBasicClientOnSovereignCloud(SAName, key, c.common.cloud.Environment); err != nil { return blobSvc, err } @@ -471,20 +402,22 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e if err != nil { glog.V(4).Infof("azureDisk - GetStorageAccount:%s err %s", storageAccountName, err.Error()) - return false, err + return false, nil // error performing the query - retryable } if provisionState == storage.Succeeded { return true, nil } - glog.V(4).Infof("azureDisk - GetStorageAccount:%s not ready yet", storageAccountName) - // leave it for next loop/sync loop - return false, fmt.Errorf("azureDisk - Account %s has not been flagged Succeeded by ARM", storageAccountName) + glog.V(4).Infof("azureDisk - GetStorageAccount:%s not ready yet (not flagged Succeeded by ARM)", storageAccountName) + return false, nil // back off and see if the account becomes ready on next retry }) // we have failed to ensure that account is ready for us to create // the default vhd container if err != nil { + if err == kwait.ErrWaitTimeout { + return fmt.Errorf("azureDisk - timed out waiting for storage account %s to become ready", storageAccountName) + } return err } } @@ -537,19 +470,6 @@ func (c *BlobDiskController) getDiskCount(SAName string) (int, error) { return int(c.accounts[SAName].diskCount), nil } -// shouldInit ensures that we only init the plugin once -// and we only do that in the controller - -func (c *BlobDiskController) shouldInit() bool { - if os.Args[0] == "kube-controller-manager" || (os.Args[0] == "/hyperkube" && os.Args[1] == "controller-manager") { - swapped := atomic.CompareAndSwapInt64(&initFlag, 0, 1) - if swapped { - return true - } - } - return false -} - func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccountState, error) { accountListResult, err := c.common.cloud.StorageAccountClient.List() if err != nil { @@ -622,14 +542,6 @@ func (c *BlobDiskController) createStorageAccount(storageAccountName string, sto c.addAccountState(storageAccountName, newAccountState) } - if !bExist { - // SA Accounts takes time to be provisioned - // so if this account was just created allow it sometime - // before polling - glog.V(2).Infof("azureDisk - storage account %s was just created, allowing time before polling status") - time.Sleep(25 * time.Second) // as observed 25 is the average time for SA to be provisioned - } - // finally, make sure that we default container is created // before handing it back over return c.ensureDefaultContainer(storageAccountName) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go new file mode 100644 index 000000000000..1bc0d0c811d5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_fakes.go @@ -0,0 +1,627 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "fmt" + "math/rand" + "net/http" + "strings" + "sync" + "time" + + "github.com/Azure/go-autorest/autorest/to" + + "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/Azure/azure-sdk-for-go/arm/network" + "github.com/Azure/go-autorest/autorest" +) + +type fakeAzureLBClient struct { + mutex *sync.Mutex + FakeStore map[string]map[string]network.LoadBalancer +} + +func newFakeAzureLBClient() fakeAzureLBClient { + fLBC := fakeAzureLBClient{} + fLBC.FakeStore = make(map[string]map[string]network.LoadBalancer) + fLBC.mutex = &sync.Mutex{} + return fLBC +} + +func (fLBC fakeAzureLBClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters network.LoadBalancer, cancel <-chan struct{}) (<-chan network.LoadBalancer, <-chan error) { + fLBC.mutex.Lock() + defer fLBC.mutex.Unlock() + resultChan := make(chan network.LoadBalancer, 1) + errChan := make(chan error, 1) + var result network.LoadBalancer + var err error + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + if _, ok := fLBC.FakeStore[resourceGroupName]; !ok { + fLBC.FakeStore[resourceGroupName] = make(map[string]network.LoadBalancer) + } + + // For dynamic ip allocation, just fill in the PrivateIPAddress + if parameters.FrontendIPConfigurations != nil { + for idx, config := range *parameters.FrontendIPConfigurations { + if config.PrivateIPAllocationMethod == network.Dynamic { + // Here we randomly assign an ip as private ip + // It dosen't smart enough to know whether it is in the subnet's range + (*parameters.FrontendIPConfigurations)[idx].PrivateIPAddress = getRandomIPPtr() + } + } + } + fLBC.FakeStore[resourceGroupName][loadBalancerName] = parameters + result = fLBC.FakeStore[resourceGroupName][loadBalancerName] + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + err = nil + return resultChan, errChan +} + +func (fLBC fakeAzureLBClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + fLBC.mutex.Lock() + defer fLBC.mutex.Unlock() + respChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + var resp autorest.Response + var err error + defer func() { + respChan <- resp + errChan <- err + close(respChan) + close(errChan) + }() + if rgLBs, ok := fLBC.FakeStore[resourceGroupName]; ok { + if _, ok := rgLBs[loadBalancerName]; ok { + delete(rgLBs, loadBalancerName) + resp.Response = &http.Response{ + StatusCode: http.StatusAccepted, + } + err = nil + return respChan, errChan + } + } + resp.Response = &http.Response{ + StatusCode: http.StatusNotFound, + } + err = autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "Not such LB", + } + return respChan, errChan +} + +func (fLBC fakeAzureLBClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) { + fLBC.mutex.Lock() + defer fLBC.mutex.Unlock() + if _, ok := fLBC.FakeStore[resourceGroupName]; ok { + if entity, ok := fLBC.FakeStore[resourceGroupName][loadBalancerName]; ok { + return entity, nil + } + } + return result, autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "Not such LB", + } +} + +func (fLBC fakeAzureLBClient) List(resourceGroupName string) (result network.LoadBalancerListResult, err error) { + fLBC.mutex.Lock() + defer fLBC.mutex.Unlock() + var value []network.LoadBalancer + if _, ok := fLBC.FakeStore[resourceGroupName]; ok { + for _, v := range fLBC.FakeStore[resourceGroupName] { + value = append(value, v) + } + } + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + result.NextLink = nil + result.Value = &value + return result, nil +} + +func (fLBC fakeAzureLBClient) ListNextResults(lastResult network.LoadBalancerListResult) (result network.LoadBalancerListResult, err error) { + fLBC.mutex.Lock() + defer fLBC.mutex.Unlock() + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + result.NextLink = nil + result.Value = nil + return result, nil +} + +type fakeAzurePIPClient struct { + mutex *sync.Mutex + FakeStore map[string]map[string]network.PublicIPAddress + SubscriptionID string +} + +const publicIPAddressIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/publicIPAddresses/%s" + +// returns the full identifier of a publicIPAddress. +func getpublicIPAddressID(subscriptionID string, resourceGroupName, pipName string) string { + return fmt.Sprintf( + publicIPAddressIDTemplate, + subscriptionID, + resourceGroupName, + pipName) +} + +func newFakeAzurePIPClient(subscriptionID string) fakeAzurePIPClient { + fAPC := fakeAzurePIPClient{} + fAPC.FakeStore = make(map[string]map[string]network.PublicIPAddress) + fAPC.SubscriptionID = subscriptionID + fAPC.mutex = &sync.Mutex{} + return fAPC +} + +func (fAPC fakeAzurePIPClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters network.PublicIPAddress, cancel <-chan struct{}) (<-chan network.PublicIPAddress, <-chan error) { + fAPC.mutex.Lock() + defer fAPC.mutex.Unlock() + resultChan := make(chan network.PublicIPAddress, 1) + errChan := make(chan error, 1) + var result network.PublicIPAddress + var err error + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + if _, ok := fAPC.FakeStore[resourceGroupName]; !ok { + fAPC.FakeStore[resourceGroupName] = make(map[string]network.PublicIPAddress) + } + + // assign id + pipID := getpublicIPAddressID(fAPC.SubscriptionID, resourceGroupName, publicIPAddressName) + parameters.ID = &pipID + + // only create in the case user has not provided + if parameters.PublicIPAddressPropertiesFormat != nil && + parameters.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod == network.Static { + // assign ip + parameters.IPAddress = getRandomIPPtr() + } + + fAPC.FakeStore[resourceGroupName][publicIPAddressName] = parameters + result = fAPC.FakeStore[resourceGroupName][publicIPAddressName] + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + err = nil + return resultChan, errChan +} + +func (fAPC fakeAzurePIPClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + fAPC.mutex.Lock() + defer fAPC.mutex.Unlock() + respChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + var resp autorest.Response + var err error + defer func() { + respChan <- resp + errChan <- err + close(respChan) + close(errChan) + }() + if rgPIPs, ok := fAPC.FakeStore[resourceGroupName]; ok { + if _, ok := rgPIPs[publicIPAddressName]; ok { + delete(rgPIPs, publicIPAddressName) + resp.Response = &http.Response{ + StatusCode: http.StatusAccepted, + } + err = nil + return respChan, errChan + } + } + resp.Response = &http.Response{ + StatusCode: http.StatusNotFound, + } + err = autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "Not such PIP", + } + return respChan, errChan +} + +func (fAPC fakeAzurePIPClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) { + fAPC.mutex.Lock() + defer fAPC.mutex.Unlock() + if _, ok := fAPC.FakeStore[resourceGroupName]; ok { + if entity, ok := fAPC.FakeStore[resourceGroupName][publicIPAddressName]; ok { + return entity, nil + } + } + return result, autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "Not such PIP", + } +} + +func (fAPC fakeAzurePIPClient) ListNextResults(lastResults network.PublicIPAddressListResult) (result network.PublicIPAddressListResult, err error) { + fAPC.mutex.Lock() + defer fAPC.mutex.Unlock() + return network.PublicIPAddressListResult{}, nil +} + +func (fAPC fakeAzurePIPClient) List(resourceGroupName string) (result network.PublicIPAddressListResult, err error) { + fAPC.mutex.Lock() + defer fAPC.mutex.Unlock() + var value []network.PublicIPAddress + if _, ok := fAPC.FakeStore[resourceGroupName]; ok { + for _, v := range fAPC.FakeStore[resourceGroupName] { + value = append(value, v) + } + } + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + result.NextLink = nil + result.Value = &value + return result, nil +} + +type fakeAzureInterfacesClient struct { + mutex *sync.Mutex + FakeStore map[string]map[string]network.Interface +} + +func newFakeAzureInterfacesClient() fakeAzureInterfacesClient { + fIC := fakeAzureInterfacesClient{} + fIC.FakeStore = make(map[string]map[string]network.Interface) + fIC.mutex = &sync.Mutex{} + + return fIC +} + +func (fIC fakeAzureInterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters network.Interface, cancel <-chan struct{}) (<-chan network.Interface, <-chan error) { + fIC.mutex.Lock() + defer fIC.mutex.Unlock() + resultChan := make(chan network.Interface, 1) + errChan := make(chan error, 1) + var result network.Interface + var err error + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + if _, ok := fIC.FakeStore[resourceGroupName]; !ok { + fIC.FakeStore[resourceGroupName] = make(map[string]network.Interface) + } + fIC.FakeStore[resourceGroupName][networkInterfaceName] = parameters + result = fIC.FakeStore[resourceGroupName][networkInterfaceName] + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + err = nil + + return resultChan, errChan +} + +func (fIC fakeAzureInterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) { + fIC.mutex.Lock() + defer fIC.mutex.Unlock() + if _, ok := fIC.FakeStore[resourceGroupName]; ok { + if entity, ok := fIC.FakeStore[resourceGroupName][networkInterfaceName]; ok { + return entity, nil + } + } + return result, autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "Not such Interface", + } +} + +func (fIC fakeAzureInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) { + return result, nil +} + +type fakeAzureVirtualMachinesClient struct { + mutex *sync.Mutex + FakeStore map[string]map[string]compute.VirtualMachine +} + +func newFakeAzureVirtualMachinesClient() fakeAzureVirtualMachinesClient { + fVMC := fakeAzureVirtualMachinesClient{} + fVMC.FakeStore = make(map[string]map[string]compute.VirtualMachine) + fVMC.mutex = &sync.Mutex{} + return fVMC +} + +func (fVMC fakeAzureVirtualMachinesClient) CreateOrUpdate(resourceGroupName string, VMName string, parameters compute.VirtualMachine, cancel <-chan struct{}) (<-chan compute.VirtualMachine, <-chan error) { + fVMC.mutex.Lock() + defer fVMC.mutex.Unlock() + resultChan := make(chan compute.VirtualMachine, 1) + errChan := make(chan error, 1) + var result compute.VirtualMachine + var err error + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + if _, ok := fVMC.FakeStore[resourceGroupName]; !ok { + fVMC.FakeStore[resourceGroupName] = make(map[string]compute.VirtualMachine) + } + fVMC.FakeStore[resourceGroupName][VMName] = parameters + result = fVMC.FakeStore[resourceGroupName][VMName] + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + err = nil + return resultChan, errChan +} + +func (fVMC fakeAzureVirtualMachinesClient) Get(resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) { + fVMC.mutex.Lock() + defer fVMC.mutex.Unlock() + if _, ok := fVMC.FakeStore[resourceGroupName]; ok { + if entity, ok := fVMC.FakeStore[resourceGroupName][VMName]; ok { + return entity, nil + } + } + return result, autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "Not such VM", + } +} + +func (fVMC fakeAzureVirtualMachinesClient) List(resourceGroupName string) (result compute.VirtualMachineListResult, err error) { + fVMC.mutex.Lock() + defer fVMC.mutex.Unlock() + var value []compute.VirtualMachine + if _, ok := fVMC.FakeStore[resourceGroupName]; ok { + for _, v := range fVMC.FakeStore[resourceGroupName] { + value = append(value, v) + } + } + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + result.NextLink = nil + result.Value = &value + return result, nil +} +func (fVMC fakeAzureVirtualMachinesClient) ListNextResults(lastResults compute.VirtualMachineListResult) (result compute.VirtualMachineListResult, err error) { + fVMC.mutex.Lock() + defer fVMC.mutex.Unlock() + return compute.VirtualMachineListResult{}, nil +} + +type fakeAzureSubnetsClient struct { + mutex *sync.Mutex + FakeStore map[string]map[string]network.Subnet +} + +func newFakeAzureSubnetsClient() fakeAzureSubnetsClient { + fASC := fakeAzureSubnetsClient{} + fASC.FakeStore = make(map[string]map[string]network.Subnet) + fASC.mutex = &sync.Mutex{} + return fASC +} + +func (fASC fakeAzureSubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters network.Subnet, cancel <-chan struct{}) (<-chan network.Subnet, <-chan error) { + fASC.mutex.Lock() + defer fASC.mutex.Unlock() + resultChan := make(chan network.Subnet, 1) + errChan := make(chan error, 1) + var result network.Subnet + var err error + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND") + if _, ok := fASC.FakeStore[rgVnet]; !ok { + fASC.FakeStore[rgVnet] = make(map[string]network.Subnet) + } + fASC.FakeStore[rgVnet][subnetName] = subnetParameters + result = fASC.FakeStore[rgVnet][subnetName] + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + err = nil + return resultChan, errChan +} + +func (fASC fakeAzureSubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + fASC.mutex.Lock() + defer fASC.mutex.Unlock() + respChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + var resp autorest.Response + var err error + defer func() { + respChan <- resp + errChan <- err + close(respChan) + close(errChan) + }() + + rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND") + if rgSubnets, ok := fASC.FakeStore[rgVnet]; ok { + if _, ok := rgSubnets[subnetName]; ok { + delete(rgSubnets, subnetName) + resp.Response = &http.Response{ + StatusCode: http.StatusAccepted, + } + err = nil + return respChan, errChan + } + } + resp.Response = &http.Response{ + StatusCode: http.StatusNotFound, + } + err = autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "Not such Subnet", + } + return respChan, errChan +} +func (fASC fakeAzureSubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) { + fASC.mutex.Lock() + defer fASC.mutex.Unlock() + rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND") + if _, ok := fASC.FakeStore[rgVnet]; ok { + if entity, ok := fASC.FakeStore[rgVnet][subnetName]; ok { + return entity, nil + } + } + return result, autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "Not such Subnet", + } +} +func (fASC fakeAzureSubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result network.SubnetListResult, err error) { + fASC.mutex.Lock() + defer fASC.mutex.Unlock() + rgVnet := strings.Join([]string{resourceGroupName, virtualNetworkName}, "AND") + var value []network.Subnet + if _, ok := fASC.FakeStore[rgVnet]; ok { + for _, v := range fASC.FakeStore[rgVnet] { + value = append(value, v) + } + } + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + result.NextLink = nil + result.Value = &value + return result, nil +} + +type fakeAzureNSGClient struct { + mutex *sync.Mutex + FakeStore map[string]map[string]network.SecurityGroup +} + +func newFakeAzureNSGClient() fakeAzureNSGClient { + fNSG := fakeAzureNSGClient{} + fNSG.FakeStore = make(map[string]map[string]network.SecurityGroup) + fNSG.mutex = &sync.Mutex{} + return fNSG +} + +func (fNSG fakeAzureNSGClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters network.SecurityGroup, cancel <-chan struct{}) (<-chan network.SecurityGroup, <-chan error) { + fNSG.mutex.Lock() + defer fNSG.mutex.Unlock() + resultChan := make(chan network.SecurityGroup, 1) + errChan := make(chan error, 1) + var result network.SecurityGroup + var err error + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + if _, ok := fNSG.FakeStore[resourceGroupName]; !ok { + fNSG.FakeStore[resourceGroupName] = make(map[string]network.SecurityGroup) + } + fNSG.FakeStore[resourceGroupName][networkSecurityGroupName] = parameters + result = fNSG.FakeStore[resourceGroupName][networkSecurityGroupName] + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + err = nil + return resultChan, errChan +} + +func (fNSG fakeAzureNSGClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + fNSG.mutex.Lock() + defer fNSG.mutex.Unlock() + respChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + var resp autorest.Response + var err error + defer func() { + respChan <- resp + errChan <- err + close(respChan) + close(errChan) + }() + if rgSGs, ok := fNSG.FakeStore[resourceGroupName]; ok { + if _, ok := rgSGs[networkSecurityGroupName]; ok { + delete(rgSGs, networkSecurityGroupName) + resp.Response = &http.Response{ + StatusCode: http.StatusAccepted, + } + err = nil + return respChan, errChan + } + } + resp.Response = &http.Response{ + StatusCode: http.StatusNotFound, + } + err = autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "Not such NSG", + } + return respChan, errChan +} + +func (fNSG fakeAzureNSGClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) { + fNSG.mutex.Lock() + defer fNSG.mutex.Unlock() + if _, ok := fNSG.FakeStore[resourceGroupName]; ok { + if entity, ok := fNSG.FakeStore[resourceGroupName][networkSecurityGroupName]; ok { + return entity, nil + } + } + return result, autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "Not such NSG", + } +} + +func (fNSG fakeAzureNSGClient) List(resourceGroupName string) (result network.SecurityGroupListResult, err error) { + fNSG.mutex.Lock() + defer fNSG.mutex.Unlock() + var value []network.SecurityGroup + if _, ok := fNSG.FakeStore[resourceGroupName]; ok { + for _, v := range fNSG.FakeStore[resourceGroupName] { + value = append(value, v) + } + } + result.Response.Response = &http.Response{ + StatusCode: http.StatusOK, + } + result.NextLink = nil + result.Value = &value + return result, nil +} + +func getRandomIPPtr() *string { + rand.Seed(time.Now().UnixNano()) + return to.StringPtr(fmt.Sprintf("%d.%d.%d.%d", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256))) +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go index 3d9d64475501..8378e596a9d1 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_instances.go @@ -17,7 +17,6 @@ limitations under the License. package azure import ( - "errors" "fmt" "k8s.io/api/core/v1" @@ -49,19 +48,10 @@ func (az *Cloud) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) { } return addresses, nil } - ip, err := az.getIPForMachine(name) + ip, err := az.GetIPForMachineWithRetry(name) if err != nil { - if az.CloudProviderBackoff { - glog.V(2).Infof("NodeAddresses(%s) backing off", name) - ip, err = az.GetIPForMachineWithRetry(name) - if err != nil { - glog.V(2).Infof("NodeAddresses(%s) abort backoff", name) - return nil, err - } - } else { - glog.Errorf("error: az.NodeAddresses, az.getIPForMachine(%s), err=%v", name, err) - return nil, err - } + glog.V(2).Infof("NodeAddresses(%s) abort backoff", name) + return nil, err } return []v1.NodeAddress{ @@ -90,7 +80,20 @@ func (az *Cloud) ExternalID(name types.NodeName) (string, error) { // InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running. // If false is returned with no error, the instance will be immediately deleted by the cloud controller manager. func (az *Cloud) InstanceExistsByProviderID(providerID string) (bool, error) { - return false, errors.New("unimplemented") + name, err := splitProviderID(providerID) + if err != nil { + return false, err + } + + _, err = az.InstanceID(name) + if err != nil { + if err == cloudprovider.InstanceNotFound { + return false, nil + } + return false, err + } + + return true, nil } func (az *Cloud) isCurrentInstance(name types.NodeName) (bool, error) { @@ -114,6 +117,44 @@ func (az *Cloud) InstanceID(name types.NodeName) (string, error) { } } } + + if az.Config.VMType == vmTypeVMSS { + id, err := az.getVmssInstanceID(name) + if err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance { + // Retry with standard type because master nodes may not belong to any vmss. + return az.getStandardInstanceID(name) + } + + return id, err + } + + return az.getStandardInstanceID(name) +} + +func (az *Cloud) getVmssInstanceID(name types.NodeName) (string, error) { + var machine compute.VirtualMachineScaleSetVM + var exists bool + var err error + az.operationPollRateLimiter.Accept() + machine, exists, err = az.getVmssVirtualMachine(name) + if err != nil { + if az.CloudProviderBackoff { + glog.V(2).Infof("InstanceID(%s) backing off", name) + machine, exists, err = az.GetScaleSetsVMWithRetry(name) + if err != nil { + glog.V(2).Infof("InstanceID(%s) abort backoff", name) + return "", err + } + } else { + return "", err + } + } else if !exists { + return "", cloudprovider.InstanceNotFound + } + return *machine.ID, nil +} + +func (az *Cloud) getStandardInstanceID(name types.NodeName) (string, error) { var machine compute.VirtualMachine var exists bool var err error @@ -165,6 +206,39 @@ func (az *Cloud) InstanceType(name types.NodeName) (string, error) { } } } + + if az.Config.VMType == vmTypeVMSS { + machineType, err := az.getVmssInstanceType(name) + if err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance { + // Retry with standard type because master nodes may not belong to any vmss. + return az.getStandardInstanceType(name) + } + + return machineType, err + } + + return az.getStandardInstanceType(name) +} + +// getVmssInstanceType gets instance with type vmss. +func (az *Cloud) getVmssInstanceType(name types.NodeName) (string, error) { + machine, exists, err := az.getVmssVirtualMachine(name) + if err != nil { + glog.Errorf("error: az.InstanceType(%s), az.getVmssVirtualMachine(%s) err=%v", name, name, err) + return "", err + } else if !exists { + return "", cloudprovider.InstanceNotFound + } + + if machine.Sku.Name != nil { + return *machine.Sku.Name, nil + } + + return "", fmt.Errorf("instance type is not set") +} + +// getStandardInstanceType gets instance with standard type. +func (az *Cloud) getStandardInstanceType(name types.NodeName) (string, error) { machine, exists, err := az.getVirtualMachine(name) if err != nil { glog.Errorf("error: az.InstanceType(%s), az.getVirtualMachine(%s) err=%v", name, name, err) @@ -187,39 +261,6 @@ func (az *Cloud) CurrentNodeName(hostname string) (types.NodeName, error) { return types.NodeName(hostname), nil } -func (az *Cloud) listAllNodesInResourceGroup() ([]compute.VirtualMachine, error) { - allNodes := []compute.VirtualMachine{} - - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachinesClient.List(%s): start", az.ResourceGroup) - result, err := az.VirtualMachinesClient.List(az.ResourceGroup) - glog.V(10).Infof("VirtualMachinesClient.List(%s): end", az.ResourceGroup) - if err != nil { - glog.Errorf("error: az.listAllNodesInResourceGroup(), az.VirtualMachinesClient.List(%s), err=%v", az.ResourceGroup, err) - return nil, err - } - - morePages := (result.Value != nil && len(*result.Value) > 1) - - for morePages { - allNodes = append(allNodes, *result.Value...) - - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("VirtualMachinesClient.ListAllNextResults(%v): start", az.ResourceGroup) - result, err = az.VirtualMachinesClient.ListAllNextResults(result) - glog.V(10).Infof("VirtualMachinesClient.ListAllNextResults(%v): end", az.ResourceGroup) - if err != nil { - glog.Errorf("error: az.listAllNodesInResourceGroup(), az.VirtualMachinesClient.ListAllNextResults(%v), err=%v", result, err) - return nil, err - } - - morePages = (result.Value != nil && len(*result.Value) > 1) - } - - return allNodes, nil - -} - // mapNodeNameToVMName maps a k8s NodeName to an Azure VM Name // This is a simple string cast. func mapNodeNameToVMName(nodeName types.NodeName) string { diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index bd3fcf28ac20..c1bc5bf972f1 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -18,11 +18,13 @@ package azure import ( "fmt" + "math" "strconv" "strings" "k8s.io/api/core/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" serviceapi "k8s.io/kubernetes/pkg/api/v1/service" "github.com/Azure/azure-sdk-for-go/arm/compute" @@ -35,257 +37,94 @@ import ( // ServiceAnnotationLoadBalancerInternal is the annotation used on the service const ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/azure-load-balancer-internal" +// ServiceAnnotationLoadBalancerInternalSubnet is the annotation used on the service +// to specify what subnet it is exposed on +const ServiceAnnotationLoadBalancerInternalSubnet = "service.beta.kubernetes.io/azure-load-balancer-internal-subnet" + +// ServiceAnnotationLoadBalancerMode is the annotation used on the service to specify the +// Azure load balancer selection based on availability sets +// There are currently three possible load balancer selection modes : +// 1. Default mode - service has no annotation ("service.beta.kubernetes.io/azure-load-balancer-mode") +// In this case the Loadbalancer of the primary Availability set is selected +// 2. "__auto__" mode - service is annotated with __auto__ value, this when loadbalancer from any availability set +// is selected which has the miinimum rules associated with it. +// 3. "as1,as2" mode - this is when the laod balancer from the specified availability sets is selected that has the +// miinimum rules associated with it. +const ServiceAnnotationLoadBalancerMode = "service.beta.kubernetes.io/azure-load-balancer-mode" + +// ServiceAnnotationLoadBalancerAutoModeValue the annotation used on the service to specify the +// Azure load balancer auto selection from the availability sets +const ServiceAnnotationLoadBalancerAutoModeValue = "__auto__" + +// ServiceAnnotationDNSLabelName annotation speficying the DNS label name for the service. +const ServiceAnnotationDNSLabelName = "service.beta.kubernetes.io/azure-dns-label-name" + +// ServiceAnnotationSharedSecurityRule is the annotation used on the service +// to specify that the service should be exposed using an Azure security rule +// that may be shared with other service, trading specificity of rules for an +// increase in the number of services that can be exposed. This relies on the +// Azure "augmented security rules" feature which at the time of writing is in +// preview and available only in certain regions. +const ServiceAnnotationSharedSecurityRule = "service.beta.kubernetes.io/azure-shared-securityrule" + // GetLoadBalancer returns whether the specified load balancer exists, and // if so, what its status is. func (az *Cloud) GetLoadBalancer(clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) { - isInternal := requiresInternalLoadBalancer(service) - lbName := getLoadBalancerName(clusterName, isInternal) - serviceName := getServiceName(service) - - lb, existsLb, err := az.getAzureLoadBalancer(lbName) + _, status, exists, err = az.getServiceLoadBalancer(service, clusterName, nil, false) if err != nil { return nil, false, err } - if !existsLb { - glog.V(5).Infof("get(%s): lb(%s) - doesn't exist", serviceName, lbName) - return nil, false, nil + if exists == false { + serviceName := getServiceName(service) + glog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s)- IP doesn't exist in any of the lbs", clusterName, serviceName) + return nil, false, fmt.Errorf("Service(%s) - Loadbalancer not found", serviceName) } - - var lbIP *string - - if isInternal { - lbFrontendIPConfigName := getFrontendIPConfigName(service) - for _, ipConfiguration := range *lb.FrontendIPConfigurations { - if lbFrontendIPConfigName == *ipConfiguration.Name { - lbIP = ipConfiguration.PrivateIPAddress - break - } - } - } else { - // TODO: Consider also read address from lb's FrontendIPConfigurations - pipName, err := az.determinePublicIPName(clusterName, service) - if err != nil { - return nil, false, err - } - pip, existsPip, err := az.getPublicIPAddress(pipName) - if err != nil { - return nil, false, err - } - if existsPip { - lbIP = pip.IPAddress - } - } - - if lbIP == nil { - glog.V(5).Infof("get(%s): lb(%s) - IP doesn't exist", serviceName, lbName) - return nil, false, nil - } - - return &v1.LoadBalancerStatus{ - Ingress: []v1.LoadBalancerIngress{{IP: *lbIP}}, - }, true, nil + return status, true, nil } -func (az *Cloud) determinePublicIPName(clusterName string, service *v1.Service) (string, error) { - loadBalancerIP := service.Spec.LoadBalancerIP - if len(loadBalancerIP) == 0 { - return getPublicIPName(clusterName, service), nil +func getPublicIPLabel(service *v1.Service) string { + if labelName, found := service.Annotations[ServiceAnnotationDNSLabelName]; found { + return labelName } - - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.List(%v): start", az.ResourceGroup) - list, err := az.PublicIPAddressesClient.List(az.ResourceGroup) - glog.V(10).Infof("PublicIPAddressesClient.List(%v): end", az.ResourceGroup) - if err != nil { - return "", err - } - - if list.Value != nil { - for ix := range *list.Value { - ip := &(*list.Value)[ix] - if ip.PublicIPAddressPropertiesFormat.IPAddress != nil && - *ip.PublicIPAddressPropertiesFormat.IPAddress == loadBalancerIP { - return *ip.Name, nil - } - } - } - // TODO: follow next link here? Will there really ever be that many public IPs? - - return "", fmt.Errorf("user supplied IP Address %s was not found", loadBalancerIP) + return "" } // EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer func (az *Cloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { - isInternal := requiresInternalLoadBalancer(service) - lbName := getLoadBalancerName(clusterName, isInternal) - // When a client updates the internal load balancer annotation, // the service may be switched from an internal LB to a public one, or vise versa. // Here we'll firstly ensure service do not lie in the opposite LB. - err := az.cleanupLoadBalancer(clusterName, service, !isInternal) - if err != nil { - return nil, err - } - serviceName := getServiceName(service) - glog.V(5).Infof("ensure(%s): START clusterName=%q lbName=%q", serviceName, clusterName, lbName) - - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("SecurityGroupsClient.Get(%q): start", az.SecurityGroupName) - sg, err := az.SecurityGroupsClient.Get(az.ResourceGroup, az.SecurityGroupName, "") - glog.V(10).Infof("SecurityGroupsClient.Get(%q): end", az.SecurityGroupName) - if err != nil { + glog.V(5).Infof("ensureloadbalancer(%s): START clusterName=%q", serviceName, clusterName) + flippedService := flipServiceInternalAnnotation(service) + if _, err := az.reconcileLoadBalancer(clusterName, flippedService, nil, false /* wantLb */); err != nil { return nil, err } - sg, sgNeedsUpdate, err := az.reconcileSecurityGroup(sg, clusterName, service, true /* wantLb */) - if err != nil { + + if _, err := az.reconcilePublicIP(clusterName, service, true /* wantLb */); err != nil { return nil, err } - if sgNeedsUpdate { - glog.V(3).Infof("ensure(%s): sg(%s) - updating", serviceName, *sg.Name) - // azure-sdk-for-go introduced contraint validation which breaks the updating here if we don't set these - // to nil. This is a workaround until https://github.com/Azure/go-autorest/issues/112 is fixed - sg.SecurityGroupPropertiesFormat.NetworkInterfaces = nil - sg.SecurityGroupPropertiesFormat.Subnets = nil - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%q): start", *sg.Name) - respChan, errChan := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil) - resp := <-respChan - err := <-errChan - glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%q): end", *sg.Name) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { - glog.V(2).Infof("ensure(%s) backing off: sg(%s) - updating", serviceName, *sg.Name) - retryErr := az.CreateOrUpdateSGWithRetry(sg) - if retryErr != nil { - glog.V(2).Infof("ensure(%s) abort backoff: sg(%s) - updating", serviceName, *sg.Name) - return nil, retryErr - } - } - if err != nil { - return nil, err - } - } - lb, existsLb, err := az.getAzureLoadBalancer(lbName) + lb, err := az.reconcileLoadBalancer(clusterName, service, nodes, true /* wantLb */) if err != nil { return nil, err } - if !existsLb { - lb = network.LoadBalancer{ - Name: &lbName, - Location: &az.Location, - LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{}, - } - } - - var lbIP *string - var fipConfigurationProperties *network.FrontendIPConfigurationPropertiesFormat - - if isInternal { - subnet, existsSubnet, err := az.getSubnet(az.VnetName, az.SubnetName) - if err != nil { - return nil, err - } - - if !existsSubnet { - return nil, fmt.Errorf("ensure(%s): lb(%s) - failed to get subnet: %s/%s", serviceName, lbName, az.VnetName, az.SubnetName) - } - - configProperties := network.FrontendIPConfigurationPropertiesFormat{ - Subnet: &network.Subnet{ - ID: subnet.ID, - }, - } - - loadBalancerIP := service.Spec.LoadBalancerIP - if loadBalancerIP != "" { - configProperties.PrivateIPAllocationMethod = network.Static - configProperties.PrivateIPAddress = &loadBalancerIP - lbIP = &loadBalancerIP - } else { - // We'll need to call GetLoadBalancer later to retrieve allocated IP. - configProperties.PrivateIPAllocationMethod = network.Dynamic - } - - fipConfigurationProperties = &configProperties - } else { - pipName, err := az.determinePublicIPName(clusterName, service) - if err != nil { - return nil, err - } - pip, err := az.ensurePublicIPExists(serviceName, pipName) - if err != nil { - return nil, err - } - - lbIP = pip.IPAddress - fipConfigurationProperties = &network.FrontendIPConfigurationPropertiesFormat{ - PublicIPAddress: &network.PublicIPAddress{ID: pip.ID}, - } - } - lb, lbNeedsUpdate, err := az.reconcileLoadBalancer(lb, fipConfigurationProperties, clusterName, service, nodes) + lbStatus, err := az.getServiceLoadBalancerStatus(service, lb) if err != nil { return nil, err } - if !existsLb || lbNeedsUpdate { - glog.V(3).Infof("ensure(%s): lb(%s) - updating", serviceName, lbName) - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%q): start", *lb.Name) - respChan, errChan := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil) - resp := <-respChan - err := <-errChan - glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%q): end", *lb.Name) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { - glog.V(2).Infof("ensure(%s) backing off: lb(%s) - updating", serviceName, lbName) - retryErr := az.CreateOrUpdateLBWithRetry(lb) - if retryErr != nil { - glog.V(2).Infof("ensure(%s) abort backoff: lb(%s) - updating", serviceName, lbName) - return nil, retryErr - } - } - if err != nil { - return nil, err - } - } - - // Add the machines to the backend pool if they're not already - lbBackendName := getBackendPoolName(clusterName) - lbBackendPoolID := az.getBackendPoolID(lbName, lbBackendName) - hostUpdates := make([]func() error, len(nodes)) - for i, node := range nodes { - localNodeName := node.Name - f := func() error { - err := az.ensureHostInPool(serviceName, types.NodeName(localNodeName), lbBackendPoolID) - if err != nil { - return fmt.Errorf("ensure(%s): lb(%s) - failed to ensure host in pool: %q", serviceName, lbName, err) - } - return nil - } - hostUpdates[i] = f - } - errs := utilerrors.AggregateGoroutines(hostUpdates...) - if errs != nil { - return nil, utilerrors.Flatten(errs) + var serviceIP *string + if lbStatus != nil && len(lbStatus.Ingress) > 0 { + serviceIP = &lbStatus.Ingress[0].IP } - - glog.V(2).Infof("ensure(%s): lb(%s) finished", serviceName, lbName) - - if lbIP == nil { - lbStatus, exists, err := az.GetLoadBalancer(clusterName, service) - if err != nil { - return nil, err - } - if !exists { - return nil, fmt.Errorf("ensure(%s): lb(%s) - failed to get back load balancer", serviceName, lbName) - } - return lbStatus, nil + glog.V(10).Infof("Calling reconcileSecurityGroup from EnsureLoadBalancer for %s with IP %s, wantLb = true", service.Name, logSafe(serviceIP)) + if _, err := az.reconcileSecurityGroup(clusterName, service, serviceIP, true /* wantLb */); err != nil { + return nil, err } - return &v1.LoadBalancerStatus{ - Ingress: []v1.LoadBalancerIngress{{IP: *lbIP}}, - }, nil + return lbStatus, nil } // UpdateLoadBalancer updates hosts under the specified load balancer. @@ -302,152 +141,250 @@ func (az *Cloud) UpdateLoadBalancer(clusterName string, service *v1.Service, nod // doesn't exist even if some part of it is still laying around. func (az *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error { isInternal := requiresInternalLoadBalancer(service) - lbName := getLoadBalancerName(clusterName, isInternal) serviceName := getServiceName(service) + glog.V(5).Infof("delete(%s): START clusterName=%q", serviceName, clusterName) - glog.V(5).Infof("delete(%s): START clusterName=%q lbName=%q", serviceName, clusterName, lbName) - - err := az.cleanupLoadBalancer(clusterName, service, isInternal) + serviceIPToCleanup, err := az.findServiceIPAddress(clusterName, service, isInternal) if err != nil { return err } - sg, existsSg, err := az.getSecurityGroup() - if err != nil { + glog.V(10).Infof("Calling reconcileSecurityGroup from EnsureLoadBalancerDeleted for %s with IP %s, wantLb = false", service.Name, serviceIPToCleanup) + if _, err := az.reconcileSecurityGroup(clusterName, service, &serviceIPToCleanup, false /* wantLb */); err != nil { return err } - if existsSg { - reconciledSg, sgNeedsUpdate, reconcileErr := az.reconcileSecurityGroup(sg, clusterName, service, false /* wantLb */) - if reconcileErr != nil { - return reconcileErr - } - if sgNeedsUpdate { - glog.V(3).Infof("delete(%s): sg(%s) - updating", serviceName, az.SecurityGroupName) - // azure-sdk-for-go introduced contraint validation which breaks the updating here if we don't set these - // to nil. This is a workaround until https://github.com/Azure/go-autorest/issues/112 is fixed - sg.SecurityGroupPropertiesFormat.NetworkInterfaces = nil - sg.SecurityGroupPropertiesFormat.Subnets = nil - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%q): start", *reconciledSg.Name) - respChan, errChan := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *reconciledSg.Name, reconciledSg, nil) - resp := <-respChan - err := <-errChan - glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%q): end", *reconciledSg.Name) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { - glog.V(2).Infof("delete(%s) backing off: sg(%s) - updating", serviceName, az.SecurityGroupName) - retryErr := az.CreateOrUpdateSGWithRetry(reconciledSg) - if retryErr != nil { - err = retryErr - glog.V(2).Infof("delete(%s) abort backoff: sg(%s) - updating", serviceName, az.SecurityGroupName) - } + + if _, err := az.reconcileLoadBalancer(clusterName, service, nil, false /* wantLb */); err != nil { + return err + } + + if _, err := az.reconcilePublicIP(clusterName, service, false /* wantLb */); err != nil { + return err + } + + glog.V(2).Infof("delete(%s): FINISH", serviceName) + return nil +} + +// getServiceLoadBalancer gets the loadbalancer for the service if it already exists +// If wantLb is TRUE then -it selects a new load balancer +// In case the selected load balancer does not exists it returns network.LoadBalancer struct +// with added metadata (such as name, location) and existsLB set to FALSE +// By default - cluster default LB is returned +func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool) (lb *network.LoadBalancer, status *v1.LoadBalancerStatus, exists bool, err error) { + isInternal := requiresInternalLoadBalancer(service) + var defaultLB *network.LoadBalancer + defaultLBName := az.getLoadBalancerName(clusterName, az.Config.PrimaryAvailabilitySetName, isInternal) + + existingLBs, err := az.ListLBWithRetry() + if err != nil { + return nil, nil, false, err + } + + // check if the service already has a load balancer + if existingLBs != nil { + for _, existingLB := range existingLBs { + if strings.EqualFold(*existingLB.Name, defaultLBName) { + defaultLB = &existingLB } + if isInternalLoadBalancer(&existingLB) != isInternal { + continue + } + status, err = az.getServiceLoadBalancerStatus(service, &existingLB) if err != nil { - return err + return nil, nil, false, err + } + if status == nil { + // service is not om this load balancer + continue } + + return &existingLB, status, true, nil } } - glog.V(2).Infof("delete(%s): FINISH", serviceName) - return nil -} + // service does not have a load balancer, select one + if wantLb { + // select new load balancer for service + selectedLB, exists, err := az.selectLoadBalancer(clusterName, service, &existingLBs, nodes) + if err != nil { + return nil, nil, false, err + } -func (az *Cloud) cleanupLoadBalancer(clusterName string, service *v1.Service, isInternalLb bool) error { - lbName := getLoadBalancerName(clusterName, isInternalLb) - serviceName := getServiceName(service) + return selectedLB, nil, exists, err + } - glog.V(10).Infof("ensure lb deleted: clusterName=%q, serviceName=%s, lbName=%q", clusterName, serviceName, lbName) + // create a default LB with meta data if not present + if defaultLB == nil { + defaultLB = &network.LoadBalancer{ + Name: &defaultLBName, + Location: &az.Location, + LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{}, + } + } + + return defaultLB, nil, false, nil +} - lb, existsLb, err := az.getAzureLoadBalancer(lbName) +// select load balancer for the service in the cluster +// the selection algorithm selectes the the load balancer with currently has +// the minimum lb rules, there there are multiple LB's with same number of rules +// it selects the first one (sorted based on name) +func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) { + isInternal := requiresInternalLoadBalancer(service) + serviceName := getServiceName(service) + glog.V(3).Infof("selectLoadBalancer(%s): isInternal(%s) - start", serviceName, isInternal) + availabilitySetNames, err := az.getLoadBalancerAvailabilitySetNames(service, nodes) if err != nil { - return err + glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.getLoadBalancerAvailabilitySetNames failed, err=(%v)", clusterName, serviceName, isInternal, err) + return nil, false, err } - if existsLb { - var publicIPToCleanup *string - - if !isInternalLb { - // Find public ip resource to clean up from IP configuration - lbFrontendIPConfigName := getFrontendIPConfigName(service) - for _, config := range *lb.FrontendIPConfigurations { - if strings.EqualFold(*config.Name, lbFrontendIPConfigName) { - if config.PublicIPAddress != nil { - // Only ID property is available - publicIPToCleanup = config.PublicIPAddress.ID - } - break - } + glog.Infof("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - availabilitysetsnames %v", clusterName, serviceName, isInternal, *availabilitySetNames) + mapExistingLBs := map[string]network.LoadBalancer{} + for _, lb := range *existingLBs { + mapExistingLBs[*lb.Name] = lb + } + selectedLBRuleCount := math.MaxInt32 + for _, currASName := range *availabilitySetNames { + currLBName := az.getLoadBalancerName(clusterName, currASName, isInternal) + lb, exists := mapExistingLBs[currLBName] + if !exists { + // select this LB as this is a new LB and will have minimum rules + // create tmp lb struct to hold metadata for the new load-balancer + selectedLB = &network.LoadBalancer{ + Name: &currLBName, + Location: &az.Location, + LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{}, } + + return selectedLB, false, nil } - lb, lbNeedsUpdate, reconcileErr := az.reconcileLoadBalancer(lb, nil, clusterName, service, []*v1.Node{}) - if reconcileErr != nil { - return reconcileErr + lbRules := *lb.LoadBalancingRules + currLBRuleCount := 0 + if lbRules != nil { + currLBRuleCount = len(lbRules) } - if lbNeedsUpdate { - if len(*lb.FrontendIPConfigurations) > 0 { - glog.V(3).Infof("delete(%s): lb(%s) - updating", serviceName, lbName) - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%q): start", *lb.Name) - respChan, errChan := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil) - resp := <-respChan - err := <-errChan - glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%q): end", *lb.Name) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { - glog.V(2).Infof("delete(%s) backing off: sg(%s) - updating", serviceName, az.SecurityGroupName) - retryErr := az.CreateOrUpdateLBWithRetry(lb) - if retryErr != nil { - err = retryErr - glog.V(2).Infof("delete(%s) abort backoff: sg(%s) - updating", serviceName, az.SecurityGroupName) - } + if currLBRuleCount < selectedLBRuleCount { + selectedLBRuleCount = currLBRuleCount + selectedLB = &lb + } + } + + if selectedLB == nil { + err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - unable to find load balancer for selected availability sets %v", clusterName, serviceName, isInternal, *availabilitySetNames) + glog.Error(err) + return nil, false, err + } + // validate if the selected LB has not exceeded the MaximumLoadBalancerRuleCount + if az.Config.MaximumLoadBalancerRuleCount != 0 && selectedLBRuleCount >= az.Config.MaximumLoadBalancerRuleCount { + err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - all available load balancers have exceeded maximum rule limit %d, availabilitysetnames (%v)", clusterName, serviceName, isInternal, selectedLBRuleCount, *availabilitySetNames) + glog.Error(err) + return selectedLB, existsLb, err + } + + return selectedLB, existsLb, nil +} + +func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.LoadBalancer) (status *v1.LoadBalancerStatus, err error) { + if lb == nil { + glog.V(10).Infof("getServiceLoadBalancerStatus lb is nil") + return nil, nil + } + if lb.FrontendIPConfigurations == nil || *lb.FrontendIPConfigurations == nil { + return nil, nil + } + isInternal := requiresInternalLoadBalancer(service) + lbFrontendIPConfigName := getFrontendIPConfigName(service, subnet(service)) + serviceName := getServiceName(service) + for _, ipConfiguration := range *lb.FrontendIPConfigurations { + if lbFrontendIPConfigName == *ipConfiguration.Name { + var lbIP *string + if isInternal { + lbIP = ipConfiguration.PrivateIPAddress + } else { + if ipConfiguration.PublicIPAddress == nil { + return nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress is Nil", serviceName, *lb.Name) } - if err != nil { - return err + pipID := ipConfiguration.PublicIPAddress.ID + if pipID == nil { + return nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress ID is Nil", serviceName, *lb.Name) } - } else { - glog.V(3).Infof("delete(%s): lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName) - - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("LoadBalancerClient.Delete(%q): start", lbName) - respChan, errChan := az.LoadBalancerClient.Delete(az.ResourceGroup, lbName, nil) - resp := <-respChan - err := <-errChan - glog.V(10).Infof("LoadBalancerClient.Delete(%q): end", lbName) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { - glog.V(2).Infof("delete(%s) backing off: lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName) - retryErr := az.DeleteLBWithRetry(lbName) - if retryErr != nil { - err = retryErr - glog.V(2).Infof("delete(%s) abort backoff: lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName) - } + pipName, err := getLastSegment(*pipID) + if err != nil { + return nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress Name from ID(%s)", serviceName, *lb.Name, *pipID) } + pip, existsPip, err := az.getPublicIPAddress(pipName) if err != nil { - return err + return nil, err + } + if existsPip { + lbIP = pip.IPAddress } } + + return &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: *lbIP}}}, nil } + } - // Public IP can be deleted after frontend ip configuration rule deleted. - if publicIPToCleanup != nil { - // Only delete an IP address if we created it, deducing by name. - if index := strings.LastIndex(*publicIPToCleanup, "/"); index != -1 { - managedPipName := getPublicIPName(clusterName, service) - pipName := (*publicIPToCleanup)[index+1:] - if strings.EqualFold(managedPipName, pipName) { - glog.V(5).Infof("Deleting public IP resource %q.", pipName) - err = az.ensurePublicIPDeleted(serviceName, pipName) - if err != nil { - return err - } - } else { - glog.V(5).Infof("Public IP resource %q found, but it does not match managed name %q, skip deleting.", pipName, managedPipName) - } - } + return nil, nil +} + +func (az *Cloud) determinePublicIPName(clusterName string, service *v1.Service) (string, error) { + loadBalancerIP := service.Spec.LoadBalancerIP + if len(loadBalancerIP) == 0 { + return getPublicIPName(clusterName, service), nil + } + + pips, err := az.ListPIPWithRetry() + if err != nil { + return "", err + } + + for _, pip := range pips { + if pip.PublicIPAddressPropertiesFormat.IPAddress != nil && + *pip.PublicIPAddressPropertiesFormat.IPAddress == loadBalancerIP { + return *pip.Name, nil } } + return "", fmt.Errorf("user supplied IP Address %s was not found", loadBalancerIP) +} + +func flipServiceInternalAnnotation(service *v1.Service) *v1.Service { + copyService := service.DeepCopy() + if copyService.Annotations == nil { + copyService.Annotations = map[string]string{} + } + if v, ok := copyService.Annotations[ServiceAnnotationLoadBalancerInternal]; ok && v == "true" { + // If it is internal now, we make it external by remove the annotation + delete(copyService.Annotations, ServiceAnnotationLoadBalancerInternal) + } else { + // If it is external now, we make it internal + copyService.Annotations[ServiceAnnotationLoadBalancerInternal] = "true" + } + return copyService +} - return nil +func (az *Cloud) findServiceIPAddress(clusterName string, service *v1.Service, isInternalLb bool) (string, error) { + if len(service.Spec.LoadBalancerIP) > 0 { + return service.Spec.LoadBalancerIP, nil + } + + lbStatus, existsLb, err := az.GetLoadBalancer(clusterName, service) + if err != nil { + return "", err + } + if !existsLb { + return "", fmt.Errorf("Expected to find an IP address for service %s but did not", service.Name) + } + if len(lbStatus.Ingress) < 1 { + return "", fmt.Errorf("Expected to find an IP address for service %s but it had no ingresses", service.Name) + } + + return lbStatus.Ingress[0].IP, nil } -func (az *Cloud) ensurePublicIPExists(serviceName, pipName string) (*network.PublicIPAddress, error) { +func (az *Cloud) ensurePublicIPExists(serviceName, pipName, domainNameLabel string) (*network.PublicIPAddress, error) { pip, existsPip, err := az.getPublicIPAddress(pipName) if err != nil { return nil, err @@ -461,26 +398,21 @@ func (az *Cloud) ensurePublicIPExists(serviceName, pipName string) (*network.Pub pip.PublicIPAddressPropertiesFormat = &network.PublicIPAddressPropertiesFormat{ PublicIPAllocationMethod: network.Static, } + if len(domainNameLabel) > 0 { + pip.PublicIPAddressPropertiesFormat.DNSSettings = &network.PublicIPAddressDNSSettings{ + DomainNameLabel: &domainNameLabel, + } + } pip.Tags = &map[string]*string{"service": &serviceName} - glog.V(3).Infof("ensure(%s): pip(%s) - creating", serviceName, *pip.Name) az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%q): start", *pip.Name) - respChan, errChan := az.PublicIPAddressesClient.CreateOrUpdate(az.ResourceGroup, *pip.Name, pip, nil) - resp := <-respChan - err = <-errChan - glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%q): end", *pip.Name) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { - glog.V(2).Infof("ensure(%s) backing off: pip(%s) - creating", serviceName, *pip.Name) - retryErr := az.CreateOrUpdatePIPWithRetry(pip) - if retryErr != nil { - glog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - creating", serviceName, *pip.Name) - err = retryErr - } - } + glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%q): start", *pip.Name) + err = az.CreateOrUpdatePIPWithRetry(pip) if err != nil { + glog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - creating", serviceName, *pip.Name) return nil, err } + glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%q): end", *pip.Name) az.operationPollRateLimiter.Accept() glog.V(10).Infof("PublicIPAddressesClient.Get(%q): start", *pip.Name) @@ -491,44 +423,27 @@ func (az *Cloud) ensurePublicIPExists(serviceName, pipName string) (*network.Pub } return &pip, nil - -} - -func (az *Cloud) ensurePublicIPDeleted(serviceName, pipName string) error { - glog.V(2).Infof("ensure(%s): pip(%s) - deleting", serviceName, pipName) - az.operationPollRateLimiter.Accept() - glog.V(10).Infof("PublicIPAddressesClient.Delete(%q): start", pipName) - resp, deleteErrChan := az.PublicIPAddressesClient.Delete(az.ResourceGroup, pipName, nil) - deleteErr := <-deleteErrChan - glog.V(10).Infof("PublicIPAddressesClient.Delete(%q): end", pipName) // response not read yet... - if az.CloudProviderBackoff && shouldRetryAPIRequest(<-resp, deleteErr) { - glog.V(2).Infof("ensure(%s) backing off: pip(%s) - deleting", serviceName, pipName) - retryErr := az.DeletePublicIPWithRetry(pipName) - if retryErr != nil { - glog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - deleting", serviceName, pipName) - return retryErr - } - } - _, realErr := checkResourceExistsFromError(deleteErr) - if realErr != nil { - return nil - } - return nil } // This ensures load balancer exists and the frontend ip config is setup. // This also reconciles the Service's Ports with the LoadBalancer config. // This entails adding rules/probes for expected Ports and removing stale rules/ports. -func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, fipConfigurationProperties *network.FrontendIPConfigurationPropertiesFormat, clusterName string, service *v1.Service, nodes []*v1.Node) (network.LoadBalancer, bool, error) { +// nodes only used if wantLb is true +func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error) { isInternal := requiresInternalLoadBalancer(service) - lbName := getLoadBalancerName(clusterName, isInternal) serviceName := getServiceName(service) - lbFrontendIPConfigName := getFrontendIPConfigName(service) + glog.V(2).Infof("reconcileLoadBalancer(%s) - wantLb(%t): started", serviceName, wantLb) + lb, _, _, err := az.getServiceLoadBalancer(service, clusterName, nodes, wantLb) + if err != nil { + return nil, err + } + lbName := *lb.Name + glog.V(2).Infof("reconcileLoadBalancer(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb) + lbFrontendIPConfigName := getFrontendIPConfigName(service, subnet(service)) lbFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbFrontendIPConfigName) lbBackendPoolName := getBackendPoolName(clusterName) lbBackendPoolID := az.getBackendPoolID(lbName, lbBackendPoolName) - wantLb := fipConfigurationProperties != nil dirtyLb := false // Ensure LoadBalancer's Backend Pool Configuration @@ -565,16 +480,27 @@ func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, fipConfiguration if lb.FrontendIPConfigurations != nil { newConfigs = *lb.FrontendIPConfigurations } + if !wantLb { for i := len(newConfigs) - 1; i >= 0; i-- { config := newConfigs[i] - if strings.EqualFold(*config.Name, lbFrontendIPConfigName) { + if serviceOwnsFrontendIP(config, service) { glog.V(3).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName) newConfigs = append(newConfigs[:i], newConfigs[i+1:]...) dirtyConfigs = true } } } else { + if isInternal { + for i := len(newConfigs) - 1; i >= 0; i-- { + config := newConfigs[i] + if serviceOwnsFrontendIP(config, service) && !strings.EqualFold(*config.Name, lbFrontendIPConfigName) { + glog.V(3).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name) + newConfigs = append(newConfigs[:i], newConfigs[i+1:]...) + dirtyConfigs = true + } + } + } foundConfig := false for _, config := range newConfigs { if strings.EqualFold(*config.Name, lbFrontendIPConfigName) { @@ -583,6 +509,51 @@ func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, fipConfiguration } } if !foundConfig { + // construct FrontendIPConfigurationPropertiesFormat + var fipConfigurationProperties *network.FrontendIPConfigurationPropertiesFormat + if isInternal { + subnetName := subnet(service) + if subnetName == nil { + subnetName = &az.SubnetName + } + subnet, existsSubnet, err := az.getSubnet(az.VnetName, *subnetName) + if err != nil { + return nil, err + } + + if !existsSubnet { + return nil, fmt.Errorf("ensure(%s): lb(%s) - failed to get subnet: %s/%s", serviceName, lbName, az.VnetName, az.SubnetName) + } + + configProperties := network.FrontendIPConfigurationPropertiesFormat{ + Subnet: &subnet, + } + + loadBalancerIP := service.Spec.LoadBalancerIP + if loadBalancerIP != "" { + configProperties.PrivateIPAllocationMethod = network.Static + configProperties.PrivateIPAddress = &loadBalancerIP + } else { + // We'll need to call GetLoadBalancer later to retrieve allocated IP. + configProperties.PrivateIPAllocationMethod = network.Dynamic + } + + fipConfigurationProperties = &configProperties + } else { + pipName, err := az.determinePublicIPName(clusterName, service) + if err != nil { + return nil, err + } + domainNameLabel := getPublicIPLabel(service) + pip, err := az.ensurePublicIPExists(serviceName, pipName, domainNameLabel) + if err != nil { + return nil, err + } + fipConfigurationProperties = &network.FrontendIPConfigurationPropertiesFormat{ + PublicIPAddress: &network.PublicIPAddress{ID: pip.ID}, + } + } + newConfigs = append(newConfigs, network.FrontendIPConfiguration{ Name: to.StringPtr(lbFrontendIPConfigName), @@ -608,11 +579,11 @@ func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, fipConfiguration var expectedProbes []network.Probe var expectedRules []network.LoadBalancingRule for _, port := range ports { - lbRuleName := getLoadBalancerRuleName(service, port) + lbRuleName := getLoadBalancerRuleName(service, port, subnet(service)) transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(port.Protocol) if err != nil { - return lb, false, err + return nil, err } if serviceapi.NeedsHealthCheck(service) { @@ -620,7 +591,7 @@ func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, fipConfiguration // ERROR: this isn't supported // health check (aka source ip preservation) is not // compatible with UDP (it uses an HTTP check) - return lb, false, fmt.Errorf("services requiring health checks are incompatible with UDP ports") + return nil, fmt.Errorf("services requiring health checks are incompatible with UDP ports") } podPresencePath, podPresencePort := serviceapi.GetServiceHealthCheckPathPort(service) @@ -652,6 +623,7 @@ func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, fipConfiguration if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP { loadDistribution = network.SourceIP } + expectedRule := network.LoadBalancingRule{ Name: &lbRuleName, LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ @@ -760,23 +732,96 @@ func (az *Cloud) reconcileLoadBalancer(lb network.LoadBalancer, fipConfiguration lb.LoadBalancingRules = &updatedRules } - return lb, dirtyLb, nil + // We don't care if the LB exists or not + // We only care about if there is any change in the LB, which means dirtyLB + // If it is not exist, and no change to that, we don't CreateOrUpdate LB + if dirtyLb { + if lb.FrontendIPConfigurations == nil || len(*lb.FrontendIPConfigurations) == 0 { + // When FrontendIPConfigurations is empty, we need to delete the Azure load balancer resource itself, + // because an Azure load balancer cannot have an empty FrontendIPConfigurations collection + glog.V(3).Infof("delete(%s): lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName) + + az.operationPollRateLimiter.Accept() + glog.V(10).Infof("LoadBalancerClient.Delete(%q): start", lbName) + err := az.DeleteLBWithRetry(lbName) + if err != nil { + glog.V(2).Infof("delete(%s) abort backoff: lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName) + return nil, err + } + glog.V(10).Infof("LoadBalancerClient.Delete(%q): end", lbName) + } else { + glog.V(3).Infof("ensure(%s): lb(%s) - updating", serviceName, lbName) + err := az.CreateOrUpdateLBWithRetry(*lb) + if err != nil { + glog.V(2).Infof("ensure(%s) abort backoff: lb(%s) - updating", serviceName, lbName) + return nil, err + } + } + } + + if wantLb && nodes != nil { + // Add the machines to the backend pool if they're not already + availabilitySetName := az.mapLoadBalancerNameToAvailabilitySet(lbName, clusterName) + hostUpdates := make([]func() error, len(nodes)) + for i, node := range nodes { + localNodeName := node.Name + f := func() error { + err := az.ensureHostInPool(serviceName, types.NodeName(localNodeName), lbBackendPoolID, availabilitySetName) + if err != nil { + return fmt.Errorf("ensure(%s): lb(%s) - failed to ensure host in pool: %q", serviceName, lbName, err) + } + return nil + } + hostUpdates[i] = f + } + + errs := utilerrors.AggregateGoroutines(hostUpdates...) + if errs != nil { + return nil, utilerrors.Flatten(errs) + } + } + + glog.V(2).Infof("ensure(%s): lb(%s) finished", serviceName, lbName) + return lb, nil } // This reconciles the Network Security Group similar to how the LB is reconciled. // This entails adding required, missing SecurityRules and removing stale rules. -func (az *Cloud) reconcileSecurityGroup(sg network.SecurityGroup, clusterName string, service *v1.Service, wantLb bool) (network.SecurityGroup, bool, error) { +func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, wantLb bool) (*network.SecurityGroup, error) { serviceName := getServiceName(service) - var ports []v1.ServicePort - if wantLb { - ports = service.Spec.Ports - } else { + glog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q lbName=%q", serviceName, clusterName) + + ports := service.Spec.Ports + if ports == nil { + if useSharedSecurityRule(service) { + glog.V(2).Infof("Attempting to reconcile security group for service %s, but service uses shared rule and we don't know which port it's for", service.Name) + return nil, fmt.Errorf("No port info for reconciling shared rule for service %s", service.Name) + } ports = []v1.ServicePort{} } + az.operationPollRateLimiter.Accept() + glog.V(10).Infof("SecurityGroupsClient.Get(%q): start", az.SecurityGroupName) + sg, err := az.SecurityGroupsClient.Get(az.ResourceGroup, az.SecurityGroupName, "") + glog.V(10).Infof("SecurityGroupsClient.Get(%q): end", az.SecurityGroupName) + if err != nil { + return nil, err + } + + destinationIPAddress := "" + if wantLb && lbIP == nil { + return nil, fmt.Errorf("No load balancer IP for setting up security rules for service %s", service.Name) + } + if lbIP != nil { + destinationIPAddress = *lbIP + } + if destinationIPAddress == "" { + destinationIPAddress = "*" + } + sourceRanges, err := serviceapi.GetLoadBalancerSourceRanges(service) if err != nil { - return sg, false, err + return nil, err } var sourceAddressPrefixes []string if sourceRanges == nil || serviceapi.IsAllowAll(sourceRanges) { @@ -788,38 +833,52 @@ func (az *Cloud) reconcileSecurityGroup(sg network.SecurityGroup, clusterName st sourceAddressPrefixes = append(sourceAddressPrefixes, ip.String()) } } - expectedSecurityRules := make([]network.SecurityRule, len(ports)*len(sourceAddressPrefixes)) + expectedSecurityRules := []network.SecurityRule{} - for i, port := range ports { - _, securityProto, _, err := getProtocolsFromKubernetesProtocol(port.Protocol) - if err != nil { - return sg, false, err - } - for j := range sourceAddressPrefixes { - ix := i*len(sourceAddressPrefixes) + j - securityRuleName := getSecurityRuleName(service, port, sourceAddressPrefixes[j]) - expectedSecurityRules[ix] = network.SecurityRule{ - Name: to.StringPtr(securityRuleName), - SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ - Protocol: *securityProto, - SourcePortRange: to.StringPtr("*"), - DestinationPortRange: to.StringPtr(strconv.Itoa(int(port.Port))), - SourceAddressPrefix: to.StringPtr(sourceAddressPrefixes[j]), - DestinationAddressPrefix: to.StringPtr("*"), - Access: network.SecurityRuleAccessAllow, - Direction: network.SecurityRuleDirectionInbound, - }, + if wantLb { + expectedSecurityRules = make([]network.SecurityRule, len(ports)*len(sourceAddressPrefixes)) + + for i, port := range ports { + _, securityProto, _, err := getProtocolsFromKubernetesProtocol(port.Protocol) + if err != nil { + return nil, err + } + for j := range sourceAddressPrefixes { + ix := i*len(sourceAddressPrefixes) + j + securityRuleName := getSecurityRuleName(service, port, sourceAddressPrefixes[j]) + expectedSecurityRules[ix] = network.SecurityRule{ + Name: to.StringPtr(securityRuleName), + SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ + Protocol: *securityProto, + SourcePortRange: to.StringPtr("*"), + DestinationPortRange: to.StringPtr(strconv.Itoa(int(port.Port))), + SourceAddressPrefix: to.StringPtr(sourceAddressPrefixes[j]), + DestinationAddressPrefix: to.StringPtr(destinationIPAddress), + Access: network.SecurityRuleAccessAllow, + Direction: network.SecurityRuleDirectionInbound, + }, + } } } } + for _, r := range expectedSecurityRules { + glog.V(10).Infof("Expecting security rule for %s: %s:%s -> %s:%s", service.Name, *r.SourceAddressPrefix, *r.SourcePortRange, *r.DestinationAddressPrefix, *r.DestinationPortRange) + } + // update security rules dirtySg := false var updatedRules []network.SecurityRule if sg.SecurityRules != nil { updatedRules = *sg.SecurityRules } - // update security rules: remove unwanted + + for _, r := range updatedRules { + glog.V(10).Infof("Existing security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange)) + } + + // update security rules: remove unwanted rules that belong privately + // to this service for i := len(updatedRules) - 1; i >= 0; i-- { existingRule := updatedRules[i] if serviceOwnsRule(service, *existingRule.Name) { @@ -836,6 +895,50 @@ func (az *Cloud) reconcileSecurityGroup(sg network.SecurityGroup, clusterName st } } } + // update security rules: if the service uses a shared rule and is being deleted, + // then remove it from the shared rule + if useSharedSecurityRule(service) && !wantLb { + for _, port := range ports { + for _, sourceAddressPrefix := range sourceAddressPrefixes { + sharedRuleName := getSecurityRuleName(service, port, sourceAddressPrefix) + sharedIndex, sharedRule, sharedRuleFound := findSecurityRuleByName(updatedRules, sharedRuleName) + if !sharedRuleFound { + glog.V(4).Infof("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name) + return nil, fmt.Errorf("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name) + } + if sharedRule.DestinationAddressPrefixes == nil { + glog.V(4).Infof("Expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name) + return nil, fmt.Errorf("Expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name) + } + existingPrefixes := *sharedRule.DestinationAddressPrefixes + addressIndex, found := findIndex(existingPrefixes, destinationIPAddress) + if !found { + glog.V(4).Infof("Expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name) + return nil, fmt.Errorf("Expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name) + } + if len(existingPrefixes) == 1 { + updatedRules = append(updatedRules[:sharedIndex], updatedRules[sharedIndex+1:]...) + } else { + newDestinations := append(existingPrefixes[:addressIndex], existingPrefixes[addressIndex+1:]...) + sharedRule.DestinationAddressPrefixes = &newDestinations + updatedRules[sharedIndex] = sharedRule + } + dirtySg = true + } + } + } + + // update security rules: prepare rules for consolidation + for index, rule := range updatedRules { + if allowsConsolidation(rule) { + updatedRules[index] = makeConsolidatable(rule) + } + } + for index, rule := range expectedSecurityRules { + if allowsConsolidation(rule) { + expectedSecurityRules[index] = makeConsolidatable(rule) + } + } // update security rules: add needed for _, expectedRule := range expectedSecurityRules { foundRule := false @@ -843,12 +946,17 @@ func (az *Cloud) reconcileSecurityGroup(sg network.SecurityGroup, clusterName st glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name) foundRule = true } + if foundRule && allowsConsolidation(expectedRule) { + index, _ := findConsolidationCandidate(updatedRules, expectedRule) + updatedRules[index] = consolidate(updatedRules[index], expectedRule) + dirtySg = true + } if !foundRule { glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - adding", serviceName, wantLb, *expectedRule.Name) nextAvailablePriority, err := getNextAvailablePriority(updatedRules) if err != nil { - return sg, false, err + return nil, err } expectedRule.Priority = to.Int32Ptr(nextAvailablePriority) @@ -856,15 +964,236 @@ func (az *Cloud) reconcileSecurityGroup(sg network.SecurityGroup, clusterName st dirtySg = true } } + + for _, r := range updatedRules { + glog.V(10).Infof("Updated security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange)) + } + if dirtySg { sg.SecurityRules = &updatedRules + glog.V(3).Infof("ensure(%s): sg(%s) - updating", serviceName, *sg.Name) + az.operationPollRateLimiter.Accept() + glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): start", *sg.Name) + err := az.CreateOrUpdateSGWithRetry(sg) + if err != nil { + glog.V(2).Infof("ensure(%s) abort backoff: sg(%s) - updating", serviceName, *sg.Name) + // TODO (Nov 2017): remove when augmented security rules are out of preview + // we could try to parse the response but it's not worth it for bridging a preview + errorDescription := err.Error() + if strings.Contains(errorDescription, "SubscriptionNotRegisteredForFeature") && strings.Contains(errorDescription, "Microsoft.Network/AllowAccessRuleExtendedProperties") { + sharedRuleError := fmt.Errorf("Shared security rules are not available in this Azure region. Details: %v", errorDescription) + return nil, sharedRuleError + } + // END TODO + return nil, err + } + glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): end", *sg.Name) } - return sg, dirtySg, nil + return &sg, nil +} + +func logSafe(s *string) string { + if s == nil { + return "(nil)" + } + return *s +} + +func logSafeCollection(s *string, strs *[]string) string { + if s == nil { + if strs == nil { + return "(nil)" + } + return "[" + strings.Join(*strs, ",") + "]" + } + return *s +} + +func findSecurityRuleByName(rules []network.SecurityRule, ruleName string) (int, network.SecurityRule, bool) { + for index, rule := range rules { + if rule.Name != nil && strings.EqualFold(*rule.Name, ruleName) { + return index, rule, true + } + } + return 0, network.SecurityRule{}, false +} + +func findIndex(strs []string, s string) (int, bool) { + for index, str := range strs { + if strings.EqualFold(str, s) { + return index, true + } + } + return 0, false +} + +func allowsConsolidation(rule network.SecurityRule) bool { + return strings.HasPrefix(*rule.Name, "shared") +} + +func findConsolidationCandidate(rules []network.SecurityRule, rule network.SecurityRule) (int, bool) { + for index, r := range rules { + if allowsConsolidation(r) { + if strings.EqualFold(*r.Name, *rule.Name) { + return index, true + } + } + } + + return 0, false +} + +func makeConsolidatable(rule network.SecurityRule) network.SecurityRule { + return network.SecurityRule{ + Name: rule.Name, + SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ + Priority: rule.Priority, + Protocol: rule.Protocol, + SourcePortRange: rule.SourcePortRange, + SourcePortRanges: rule.SourcePortRanges, + DestinationPortRange: rule.DestinationPortRange, + DestinationPortRanges: rule.DestinationPortRanges, + SourceAddressPrefix: rule.SourceAddressPrefix, + SourceAddressPrefixes: rule.SourceAddressPrefixes, + DestinationAddressPrefixes: collectionOrSingle(rule.DestinationAddressPrefixes, rule.DestinationAddressPrefix), + Access: rule.Access, + Direction: rule.Direction, + }, + } +} + +func consolidate(existingRule network.SecurityRule, newRule network.SecurityRule) network.SecurityRule { + destinations := appendElements(existingRule.SecurityRulePropertiesFormat.DestinationAddressPrefixes, newRule.DestinationAddressPrefix, newRule.DestinationAddressPrefixes) + destinations = deduplicate(destinations) // there are transient conditions during controller startup where it tries to add a service that is already added + + return network.SecurityRule{ + Name: existingRule.Name, + SecurityRulePropertiesFormat: &network.SecurityRulePropertiesFormat{ + Priority: existingRule.Priority, + Protocol: existingRule.Protocol, + SourcePortRange: existingRule.SourcePortRange, + SourcePortRanges: existingRule.SourcePortRanges, + DestinationPortRange: existingRule.DestinationPortRange, + DestinationPortRanges: existingRule.DestinationPortRanges, + SourceAddressPrefix: existingRule.SourceAddressPrefix, + SourceAddressPrefixes: existingRule.SourceAddressPrefixes, + DestinationAddressPrefixes: destinations, + Access: existingRule.Access, + Direction: existingRule.Direction, + }, + } +} + +func collectionOrSingle(collection *[]string, s *string) *[]string { + if collection != nil && len(*collection) > 0 { + return collection + } + if s == nil { + return &[]string{} + } + return &[]string{*s} +} + +func appendElements(collection *[]string, appendString *string, appendStrings *[]string) *[]string { + newCollection := []string{} + + if collection != nil { + newCollection = append(newCollection, *collection...) + } + if appendString != nil { + newCollection = append(newCollection, *appendString) + } + if appendStrings != nil { + newCollection = append(newCollection, *appendStrings...) + } + + return &newCollection +} + +func deduplicate(collection *[]string) *[]string { + if collection == nil { + return nil + } + + seen := map[string]bool{} + result := make([]string, 0, len(*collection)) + + for _, v := range *collection { + if seen[v] == true { + // skip this element + } else { + seen[v] = true + result = append(result, v) + } + } + + return &result +} + +// This reconciles the PublicIP resources similar to how the LB is reconciled. +func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, wantLb bool) (*network.PublicIPAddress, error) { + isInternal := requiresInternalLoadBalancer(service) + serviceName := getServiceName(service) + var desiredPipName string + var err error + if !isInternal && wantLb { + desiredPipName, err = az.determinePublicIPName(clusterName, service) + if err != nil { + return nil, err + } + } + + pips, err := az.ListPIPWithRetry() + if err != nil { + return nil, err + } + + for _, pip := range pips { + if pip.Tags != nil && + (*pip.Tags)["service"] != nil && + *(*pip.Tags)["service"] == serviceName { + // We need to process for pips belong to this service + pipName := *pip.Name + if wantLb && !isInternal && pipName == desiredPipName { + // This is the only case we should preserve the + // Public ip resource with match service tag + } else { + glog.V(2).Infof("ensure(%s): pip(%s) - deleting", serviceName, pipName) + az.operationPollRateLimiter.Accept() + glog.V(10).Infof("DeletePublicIPWithRetry(%q): start", pipName) + err = az.DeletePublicIPWithRetry(pipName) + if err != nil { + glog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - deleting", serviceName, pipName) + // We let err to pass through + // It may be ignorable + } + glog.V(10).Infof("DeletePublicIPWithRetry(%q): end", pipName) // response not read yet... + + err = ignoreStatusNotFoundFromError(err) + if err != nil { + return nil, err + } + glog.V(2).Infof("ensure(%s): pip(%s) - finished", serviceName, pipName) + } + } + + } + + if !isInternal && wantLb { + // Confirm desired public ip resource exists + var pip *network.PublicIPAddress + domainNameLabel := getPublicIPLabel(service) + if pip, err = az.ensurePublicIPExists(serviceName, desiredPipName, domainNameLabel); err != nil { + return nil, err + } + return pip, nil + } + return nil, nil } func findProbe(probes []network.Probe, probe network.Probe) bool { for _, existingProbe := range probes { - if strings.EqualFold(*existingProbe.Name, *probe.Name) { + if strings.EqualFold(*existingProbe.Name, *probe.Name) && *existingProbe.Port == *probe.Port { return true } } @@ -880,36 +1209,56 @@ func findRule(rules []network.LoadBalancingRule, rule network.LoadBalancingRule) return false } +// This compares rule's Name, Protocol, SourcePortRange, DestinationPortRange, SourceAddressPrefix, Access, and Direction. +// Note that it compares rule's DestinationAddressPrefix only when it's not consolidated rule as such rule does not have DestinationAddressPrefix defined. +// We intentionally do not compare DestinationAddressPrefixes in consolidated case because reconcileSecurityRule has to consider the two rules equal, +// despite different DestinationAddressPrefixes, in order to give it a chance to consolidate the two rules. func findSecurityRule(rules []network.SecurityRule, rule network.SecurityRule) bool { for _, existingRule := range rules { - if strings.EqualFold(*existingRule.Name, *rule.Name) { - return true + if !strings.EqualFold(*existingRule.Name, *rule.Name) { + continue + } + if existingRule.Protocol != rule.Protocol { + continue + } + if !strings.EqualFold(*existingRule.SourcePortRange, *rule.SourcePortRange) { + continue + } + if !strings.EqualFold(*existingRule.DestinationPortRange, *rule.DestinationPortRange) { + continue + } + if !strings.EqualFold(*existingRule.SourceAddressPrefix, *rule.SourceAddressPrefix) { + continue + } + if !allowsConsolidation(existingRule) && !allowsConsolidation(rule) { + if !strings.EqualFold(*existingRule.DestinationAddressPrefix, *rule.DestinationAddressPrefix) { + continue + } } + if existingRule.Access != rule.Access { + continue + } + if existingRule.Direction != rule.Direction { + continue + } + return true } return false } // This ensures the given VM's Primary NIC's Primary IP Configuration is // participating in the specified LoadBalancer Backend Pool. -func (az *Cloud) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string) error { +func (az *Cloud) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string, availabilitySetName string) error { var machine compute.VirtualMachine vmName := mapNodeNameToVMName(nodeName) az.operationPollRateLimiter.Accept() glog.V(10).Infof("VirtualMachinesClient.Get(%q): start", vmName) - machine, err := az.VirtualMachinesClient.Get(az.ResourceGroup, vmName, "") - glog.V(10).Infof("VirtualMachinesClient.Get(%q): end", vmName) + machine, err := az.VirtualMachineClientGetWithRetry(az.ResourceGroup, vmName, "") if err != nil { - if az.CloudProviderBackoff { - glog.V(2).Infof("ensureHostInPool(%s, %s, %s) backing off", serviceName, nodeName, backendPoolID) - machine, err = az.VirtualMachineClientGetWithRetry(az.ResourceGroup, vmName, "") - if err != nil { - glog.V(2).Infof("ensureHostInPool(%s, %s, %s) abort backoff", serviceName, nodeName, backendPoolID) - return err - } - } else { - return err - } + glog.V(2).Infof("ensureHostInPool(%s, %s, %s) abort backoff", serviceName, nodeName, backendPoolID) + return err } + glog.V(10).Infof("VirtualMachinesClient.Get(%q): end", vmName) primaryNicID, err := getPrimaryInterfaceID(machine) if err != nil { @@ -921,12 +1270,12 @@ func (az *Cloud) ensureHostInPool(serviceName string, nodeName types.NodeName, b } // Check availability set - if az.PrimaryAvailabilitySetName != "" { - expectedAvailabilitySetName := az.getAvailabilitySetID(az.PrimaryAvailabilitySetName) - if !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) { + if availabilitySetName != "" { + expectedAvailabilitySetName := az.getAvailabilitySetID(availabilitySetName) + if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) { glog.V(3).Infof( - "nicupdate(%s): skipping nic (%s) since it is not in the primaryAvailabilitSet(%s)", - serviceName, nicName, az.PrimaryAvailabilitySetName) + "nicupdate(%s): skipping nic (%s) since it is not in the availabilitySet(%s)", + serviceName, nicName, availabilitySetName) return nil } } @@ -994,3 +1343,45 @@ func requiresInternalLoadBalancer(service *v1.Service) bool { return false } + +func subnet(service *v1.Service) *string { + if requiresInternalLoadBalancer(service) { + if l, ok := service.Annotations[ServiceAnnotationLoadBalancerInternalSubnet]; ok { + return &l + } + } + + return nil +} + +// getServiceLoadBalancerMode parses the mode value +// if the value is __auto__ it returns isAuto = TRUE +// if anything else it returns the unique availability set names after triming spaces +func getServiceLoadBalancerMode(service *v1.Service) (hasMode bool, isAuto bool, availabilitySetNames []string) { + mode, hasMode := service.Annotations[ServiceAnnotationLoadBalancerMode] + mode = strings.TrimSpace(mode) + isAuto = strings.EqualFold(mode, ServiceAnnotationLoadBalancerAutoModeValue) + if !isAuto { + // Break up list of "AS1,AS2" + availabilitySetParsedList := strings.Split(mode, ",") + + // Trim the availability set names and remove duplicates + // e.g. {"AS1"," AS2", "AS3", "AS3"} => {"AS1", "AS2", "AS3"} + availabilitySetNameSet := sets.NewString() + for _, v := range availabilitySetParsedList { + availabilitySetNameSet.Insert(strings.TrimSpace(v)) + } + + availabilitySetNames = availabilitySetNameSet.List() + } + + return hasMode, isAuto, availabilitySetNames +} + +func useSharedSecurityRule(service *v1.Service) bool { + if l, ok := service.Annotations[ServiceAnnotationSharedSecurityRule]; ok { + return l == "true" + } + + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.md b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.md new file mode 100644 index 000000000000..141d066cf159 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.md @@ -0,0 +1,77 @@ +# Azure LoadBalancer + +The way azure define LoadBalancer is different with GCE or AWS. Azure's LB can have multiple frontend IP refs. The GCE and AWS can only allow one, if you want more, you better to have another LB. Because of the fact, Public IP is not part of the LB in Azure. NSG is not part of LB in Azure either. However, you cannot delete them in parallel, Public IP can only be delete after LB's frontend IP ref is removed. + +For different Azure Resources, such as LB, Public IP, NSG. They are the same tier azure resources. We need to make sure there is no connection in their own ensure loops. In another words, They would be eventually reconciled regardless of other resources' state. They should only depends on service state. + +Despite the ideal philosophy above, we have to face the reality. NSG depends on LB's frontend ip to adjust NSG rules. So when we want to reconcile NSG, the LB should contain the corresponding frontend ip config. + +And also, For Azure, we cannot afford to have more than 1 worker of service_controller. Because, different services could operate on the same LB, concurrent execution could result in conflict or unexpected result. For AWS and GCE, they apparently doesn't have the problem, they use one LB per service, no such conflict. + +There are two load balancers per availability set internal and external. There is a limit on number of services that can be associated with a single load balancer. +By default primary load balancer is selected. Services can be annotated to allow auto selection of available load balancers. Service annotations can also be used to provide specific availability sets that host the load balancers. Note that in case of auto selection or specific availability set selection, when the availability set is lost incase of downtime or cluster scale down the services are currently not auto assigned to an available load balancer. +Service Annotation for Auto and specific load balancer mode + +- service.beta.kubernetes.io/azure-load-balancer-mode" (__auto__|as1,as2...) + +## Introduce Functions + +- reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error) + - Go through lb's properties, update based on wantLb + - If any change on the lb, no matter if the lb exists or not + - Call az cloud to CreateOrUpdate on this lb, or Delete if nothing left + - return lb, err + +- reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, wantLb bool) (*network.SecurityGroup, error) + - Go though NSG' properties, update based on wantLb + - Use destinationIPAddress as target address if possible + - Consolidate NSG rules if possible + - If any change on the NSG, (the NSG should always exists) + - Call az cloud to CreateOrUpdate on this NSG + - return sg, err + +- reconcilePublicIP(clusterName string, service *v1.Service, wantLb bool) (*network.PublicIPAddress, error) + - List all the public ip in the resource group + - Make sure we only touch Public IP resources has tags[service] = "namespace/serviceName" + - skip for wantLb && !isInternal && pipName == desiredPipName + - delete other public ip resources if any + - if !isInternal && wantLb + - ensure Public IP with desiredPipName exists + +- getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool) (lb, status, exists, error) + - gets the loadbalancer for the service if it already exists + - If wantLb is TRUE then -it selects a new load balancer, the selction helps distribute the services across load balancers + - In case the selected load balancer does not exists it returns network.LoadBalancer struct with added metadata (such as name, location) and existsLB set to FALSE + - By default - cluster default LB is returned + +## Define interface behaviors + +### GetLoadBalancer + +- Get LoadBalancer status, return status, error + - return the load balancer status for this service + - it will not create or update or delete any resource + +### EnsureLoadBalancer + +- Reconcile LB for the flipped service + - Call reconcileLoadBalancer(clusterName, flipedService, nil, false/* wantLb */) +- Reconcile Public IP + - Call reconcilePublicIP(cluster, service, true) +- Reconcile LB's related and owned resources, such as FrontEndIPConfig, Rules, Probe. + - Call reconcileLoadBalancer(clusterName, service, nodes, true /* wantLb */) +- Reconcile NSG rules, it need to be called after reconcileLB + - Call reconcileSecurityGroup(clusterName, service, lbStatus, true /* wantLb */) + +### UpdateLoadBalancer + +- Has no difference with EnsureLoadBalancer + +### EnsureLoadBalancerDeleted + +- Reconcile NSG first, before reconcile LB, because SG need LB to be there + - Call reconcileSecurityGroup(clusterName, service, nil, false /* wantLb */) +- Reconcile LB's related and owned resources, such as FrontEndIPConfig, Rules, Probe. + - Call reconcileLoadBalancer(clusterName, service, nodes, false) +- Reconcile Public IP, public IP needs related LB reconciled first + - Call reconcilePublicIP(cluster, service, false) \ No newline at end of file diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util.go index 211e22f0dfbf..958275ca66bb 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util.go @@ -21,6 +21,7 @@ import ( "fmt" "hash/crc32" "regexp" + "sort" "strconv" "strings" @@ -31,6 +32,7 @@ import ( "github.com/Azure/azure-sdk-for-go/arm/network" "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" ) const ( @@ -44,6 +46,12 @@ const ( loadBalancerRuleIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/loadBalancingRules/%s" loadBalancerProbeIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/probes/%s" securityRuleIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkSecurityGroups/%s/securityRules/%s" + + // InternalLoadBalancerNameSuffix is load balancer posfix + InternalLoadBalancerNameSuffix = "-internal" + + // nodeLabelRole specifies the role of a node + nodeLabelRole = "kubernetes.io/role" ) var providerIDRE = regexp.MustCompile(`^` + CloudProviderName + `://(?:.*)/Microsoft.Compute/virtualMachines/(.+)$`) @@ -116,6 +124,143 @@ func (az *Cloud) getSecurityRuleID(securityRuleName string) string { securityRuleName) } +// returns the full identifier of a publicIPAddress. +func (az *Cloud) getpublicIPAddressID(pipName string) string { + return fmt.Sprintf( + publicIPAddressIDTemplate, + az.SubscriptionID, + az.ResourceGroup, + pipName) +} + +// getLoadBalancerAvailabilitySetNames selects all possible availability sets for +// service load balancer, if the service has no loadbalancer mode annotaion returns the +// primary availability set if service annotation for loadbalancer availability set +// exists then return the eligible a availability set +func (az *Cloud) getLoadBalancerAvailabilitySetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) { + hasMode, isAuto, serviceAvailabilitySetNames := getServiceLoadBalancerMode(service) + if !hasMode { + // no mode specified in service annotation default to PrimaryAvailabilitySetName + availabilitySetNames = &[]string{az.Config.PrimaryAvailabilitySetName} + return availabilitySetNames, nil + } + availabilitySetNames, err = az.getAgentPoolAvailabiliySets(nodes) + if err != nil { + glog.Errorf("az.getLoadBalancerAvailabilitySetNames - getAgentPoolAvailabiliySets failed err=(%v)", err) + return nil, err + } + if len(*availabilitySetNames) == 0 { + glog.Errorf("az.getLoadBalancerAvailabilitySetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes)) + return nil, fmt.Errorf("No availability sets found for nodes, node count(%d)", len(nodes)) + } + // sort the list to have deterministic selection + sort.Strings(*availabilitySetNames) + if !isAuto { + if serviceAvailabilitySetNames == nil || len(serviceAvailabilitySetNames) == 0 { + return nil, fmt.Errorf("service annotation for LoadBalancerMode is empty, it should have __auto__ or availability sets value") + } + // validate availability set exists + var found bool + for sasx := range serviceAvailabilitySetNames { + for asx := range *availabilitySetNames { + if strings.EqualFold((*availabilitySetNames)[asx], serviceAvailabilitySetNames[sasx]) { + found = true + serviceAvailabilitySetNames[sasx] = (*availabilitySetNames)[asx] + break + } + } + if !found { + glog.Errorf("az.getLoadBalancerAvailabilitySetNames - Availability set (%s) in service annotation not found", serviceAvailabilitySetNames[sasx]) + return nil, fmt.Errorf("availability set (%s) - not found", serviceAvailabilitySetNames[sasx]) + } + } + availabilitySetNames = &serviceAvailabilitySetNames + } + + return availabilitySetNames, nil +} + +// lists the virtual machines for for the resource group and then builds +// a list of availability sets that match the nodes available to k8s +func (az *Cloud) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) { + vms, err := az.VirtualMachineClientListWithRetry() + if err != nil { + glog.Errorf("az.getNodeAvailabilitySet - VirtualMachineClientListWithRetry failed, err=%v", err) + return nil, err + } + vmNameToAvailabilitySetID := make(map[string]string, len(vms)) + for vmx := range vms { + vm := vms[vmx] + if vm.AvailabilitySet != nil { + vmNameToAvailabilitySetID[*vm.Name] = *vm.AvailabilitySet.ID + } + } + availabilitySetIDs := sets.NewString() + agentPoolAvailabilitySets = &[]string{} + for nx := range nodes { + nodeName := (*nodes[nx]).Name + if isMasterNode(nodes[nx]) { + continue + } + asID, ok := vmNameToAvailabilitySetID[nodeName] + if !ok { + glog.Errorf("az.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName) + return nil, fmt.Errorf("Node (%s) - has no availability sets", nodeName) + } + if availabilitySetIDs.Has(asID) { + // already added in the list + continue + } + asName, err := getLastSegment(asID) + if err != nil { + glog.Errorf("az.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err) + return nil, err + } + // AvailabilitySet ID is currently upper cased in a indeterministic way + // We want to keep it lower case, before the ID get fixed + asName = strings.ToLower(asName) + + *agentPoolAvailabilitySets = append(*agentPoolAvailabilitySets, asName) + } + + return agentPoolAvailabilitySets, nil +} + +func (az *Cloud) mapLoadBalancerNameToAvailabilitySet(lbName string, clusterName string) (availabilitySetName string) { + availabilitySetName = strings.TrimSuffix(lbName, InternalLoadBalancerNameSuffix) + if strings.EqualFold(clusterName, lbName) { + availabilitySetName = az.Config.PrimaryAvailabilitySetName + } + + return availabilitySetName +} + +// For a load balancer, all frontend ip should reference either a subnet or publicIpAddress. +// Thus Azure do not allow mixed type (public and internal) load balancer. +// So we'd have a separate name for internal load balancer. +// This would be the name for Azure LoadBalancer resource. +func (az *Cloud) getLoadBalancerName(clusterName string, availabilitySetName string, isInternal bool) string { + lbNamePrefix := availabilitySetName + if strings.EqualFold(availabilitySetName, az.Config.PrimaryAvailabilitySetName) { + lbNamePrefix = clusterName + } + if isInternal { + return fmt.Sprintf("%s%s", lbNamePrefix, InternalLoadBalancerNameSuffix) + } + return lbNamePrefix +} + +// isMasterNode returns returns true is the node has a master role label. +// The master role is determined by looking for: +// * a kubernetes.io/role="master" label +func isMasterNode(node *v1.Node) bool { + if val, ok := node.Labels[nodeLabelRole]; ok && val == "master" { + return true + } + + return false +} + // returns the deepest child's identifier from a full identifier string. func getLastSegment(ID string) (string, error) { parts := strings.Split(ID, "/") @@ -179,27 +324,26 @@ func getPrimaryIPConfig(nic network.Interface) (*network.InterfaceIPConfiguratio return nil, fmt.Errorf("failed to determine the determine primary ipconfig. nicname=%q", *nic.Name) } -// For a load balancer, all frontend ip should reference either a subnet or publicIpAddress. -// Thus Azure do not allow mixed type (public and internal) load balancer. -// So we'd have a separate name for internal load balancer. -// This would be the name for Azure LoadBalancer resource. -func getLoadBalancerName(clusterName string, isInternal bool) string { - if isInternal { - return fmt.Sprintf("%s-internal", clusterName) - } - - return clusterName +func isInternalLoadBalancer(lb *network.LoadBalancer) bool { + return strings.HasSuffix(*lb.Name, InternalLoadBalancerNameSuffix) } func getBackendPoolName(clusterName string) string { return clusterName } -func getLoadBalancerRuleName(service *v1.Service, port v1.ServicePort) string { - return fmt.Sprintf("%s-%s-%d", getRulePrefix(service), port.Protocol, port.Port) +func getLoadBalancerRuleName(service *v1.Service, port v1.ServicePort, subnetName *string) string { + if subnetName == nil { + return fmt.Sprintf("%s-%s-%d", getRulePrefix(service), port.Protocol, port.Port) + } + return fmt.Sprintf("%s-%s-%s-%d", getRulePrefix(service), *subnetName, port.Protocol, port.Port) } func getSecurityRuleName(service *v1.Service, port v1.ServicePort, sourceAddrPrefix string) string { + if useSharedSecurityRule(service) { + safePrefix := strings.Replace(sourceAddrPrefix, "/", "_", -1) + return fmt.Sprintf("shared-%s-%d-%s", port.Protocol, port.Port, safePrefix) + } safePrefix := strings.Replace(sourceAddrPrefix, "/", "_", -1) return fmt.Sprintf("%s-%s-%d-%s", getRulePrefix(service), port.Protocol, port.Port, safePrefix) } @@ -224,8 +368,17 @@ func serviceOwnsRule(service *v1.Service, rule string) bool { return strings.HasPrefix(strings.ToUpper(rule), strings.ToUpper(prefix)) } -func getFrontendIPConfigName(service *v1.Service) string { - return cloudprovider.GetLoadBalancerName(service) +func serviceOwnsFrontendIP(fip network.FrontendIPConfiguration, service *v1.Service) bool { + baseName := cloudprovider.GetLoadBalancerName(service) + return strings.HasPrefix(*fip.Name, baseName) +} + +func getFrontendIPConfigName(service *v1.Service, subnetName *string) string { + baseName := cloudprovider.GetLoadBalancerName(service) + if subnetName != nil { + return fmt.Sprintf("%s-%s", baseName, *subnetName) + } + return baseName } // This returns the next available rule priority level for a given set of security rules. @@ -249,6 +402,19 @@ outer: } func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, error) { + if az.Config.VMType == vmTypeVMSS { + ip, err := az.getIPForVmssMachine(nodeName) + if err == cloudprovider.InstanceNotFound || err == ErrorNotVmssInstance { + return az.getIPForStandardMachine(nodeName) + } + + return ip, err + } + + return az.getIPForStandardMachine(nodeName) +} + +func (az *Cloud) getIPForStandardMachine(nodeName types.NodeName) (string, error) { az.operationPollRateLimiter.Accept() machine, exists, err := az.getVirtualMachine(nodeName) if !exists { diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util_vmss.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util_vmss.go new file mode 100644 index 000000000000..e16e31735526 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util_vmss.go @@ -0,0 +1,102 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "fmt" + "strconv" + + "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/cloudprovider" +) + +func (az *Cloud) getIPForVmssMachine(nodeName types.NodeName) (string, error) { + az.operationPollRateLimiter.Accept() + machine, exists, err := az.getVmssVirtualMachine(nodeName) + if !exists { + return "", cloudprovider.InstanceNotFound + } + if err != nil { + glog.Errorf("error: az.getIPForVmssMachine(%s), az.getVmssVirtualMachine(%s), err=%v", nodeName, nodeName, err) + return "", err + } + + nicID, err := getPrimaryInterfaceIDForVmssMachine(machine) + if err != nil { + glog.Errorf("error: az.getIPForVmssMachine(%s), getPrimaryInterfaceID(%v), err=%v", nodeName, machine, err) + return "", err + } + + nicName, err := getLastSegment(nicID) + if err != nil { + glog.Errorf("error: az.getIPForVmssMachine(%s), getLastSegment(%s), err=%v", nodeName, nicID, err) + return "", err + } + + az.operationPollRateLimiter.Accept() + glog.V(10).Infof("InterfacesClient.Get(%q): start", nicName) + nic, err := az.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(az.ResourceGroup, az.Config.PrimaryScaleSetName, *machine.InstanceID, nicName, "") + glog.V(10).Infof("InterfacesClient.Get(%q): end", nicName) + if err != nil { + glog.Errorf("error: az.getIPForVmssMachine(%s), az.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, az.ResourceGroup, nicName, "", err) + return "", err + } + + ipConfig, err := getPrimaryIPConfig(nic) + if err != nil { + glog.Errorf("error: az.getIPForVmssMachine(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err) + return "", err + } + + targetIP := *ipConfig.PrivateIPAddress + return targetIP, nil +} + +// This returns the full identifier of the primary NIC for the given VM. +func getPrimaryInterfaceIDForVmssMachine(machine compute.VirtualMachineScaleSetVM) (string, error) { + if len(*machine.NetworkProfile.NetworkInterfaces) == 1 { + return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil + } + + for _, ref := range *machine.NetworkProfile.NetworkInterfaces { + if *ref.Primary { + return *ref.ID, nil + } + } + + return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", *machine.Name) +} + +// machineName is composed of computerNamePrefix and 36-based instanceID. +// And instanceID part if in fixed length of 6 characters. +// Refer https://msftstack.wordpress.com/2017/05/10/figuring-out-azure-vm-scale-set-machine-names/. +func getVmssInstanceID(machineName string) (string, error) { + nameLength := len(machineName) + if nameLength < 6 { + return "", ErrorNotVmssInstance + } + + instanceID, err := strconv.ParseUint(machineName[nameLength-6:], 36, 64) + if err != nil { + return "", ErrorNotVmssInstance + } + + return fmt.Sprintf("%d", instanceID), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go index e9c06dc6fc22..52d033b9294f 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go @@ -17,6 +17,7 @@ limitations under the License. package azure import ( + "errors" "net/http" "github.com/Azure/azure-sdk-for-go/arm/compute" @@ -26,6 +27,11 @@ import ( "k8s.io/apimachinery/pkg/types" ) +var ( + // ErrorNotVmssInstance indicates an instance is not belongint to any vmss. + ErrorNotVmssInstance = errors.New("not a vmss instance") +) + // checkExistsFromError inspects an error and returns a true if err is nil, // false if error is an autorest.Error with StatusCode=404 and will return the // error back if error is another status code or another type of error. @@ -40,6 +46,19 @@ func checkResourceExistsFromError(err error) (bool, error) { return false, v } +// If it is StatusNotFound return nil, +// Otherwise, return what it is +func ignoreStatusNotFoundFromError(err error) error { + if err == nil { + return nil + } + v, ok := err.(autorest.DetailedError) + if ok && v.StatusCode == http.StatusNotFound { + return nil + } + return err +} + func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachine, exists bool, err error) { var realErr error @@ -61,6 +80,32 @@ func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualM return vm, exists, err } +func (az *Cloud) getVmssVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachineScaleSetVM, exists bool, err error) { + var realErr error + + vmName := string(nodeName) + instanceID, err := getVmssInstanceID(vmName) + if err != nil { + return vm, false, err + } + + az.operationPollRateLimiter.Accept() + glog.V(10).Infof("VirtualMachineScaleSetVMsClient.Get(%s): start", vmName) + vm, err = az.VirtualMachineScaleSetVMsClient.Get(az.ResourceGroup, az.PrimaryScaleSetName, instanceID) + glog.V(10).Infof("VirtualMachineScaleSetVMsClient.Get(%s): end", vmName) + + exists, realErr = checkResourceExistsFromError(err) + if realErr != nil { + return vm, false, realErr + } + + if !exists { + return vm, false, nil + } + + return vm, exists, err +} + func (az *Cloud) getRouteTable() (routeTable network.RouteTable, exists bool, err error) { var realErr error @@ -103,7 +148,6 @@ func (az *Cloud) getSecurityGroup() (sg network.SecurityGroup, exists bool, err func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exists bool, err error) { var realErr error - az.operationPollRateLimiter.Accept() glog.V(10).Infof("LoadBalancerClient.Get(%s): start", name) lb, err = az.LoadBalancerClient.Get(az.ResourceGroup, name, "") @@ -121,6 +165,25 @@ func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exi return lb, exists, err } +func (az *Cloud) listLoadBalancers() (lbListResult network.LoadBalancerListResult, exists bool, err error) { + var realErr error + + az.operationPollRateLimiter.Accept() + glog.V(10).Infof("LoadBalancerClient.List(%s): start", az.ResourceGroup) + lbListResult, err = az.LoadBalancerClient.List(az.ResourceGroup) + glog.V(10).Infof("LoadBalancerClient.List(%s): end", az.ResourceGroup) + exists, realErr = checkResourceExistsFromError(err) + if realErr != nil { + return lbListResult, false, realErr + } + + if !exists { + return lbListResult, false, nil + } + + return lbListResult, exists, err +} + func (az *Cloud) getPublicIPAddress(name string) (pip network.PublicIPAddress, exists bool, err error) { var realErr error diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go index 2f9d58decf69..192456f43a8b 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_zones.go @@ -44,6 +44,7 @@ type instanceInfo struct { // GetZone returns the Zone containing the current failure zone and locality region that the program is running in func (az *Cloud) GetZone() (cloudprovider.Zone, error) { faultMutex.Lock() + defer faultMutex.Unlock() if faultDomain == nil { var err error faultDomain, err = fetchFaultDomain() @@ -55,7 +56,6 @@ func (az *Cloud) GetZone() (cloudprovider.Zone, error) { FailureDomain: *faultDomain, Region: az.Location, } - faultMutex.Unlock() return zone, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/BUILD b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/BUILD index 47a548ad7882..9798a63fb13c 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/BUILD @@ -20,6 +20,7 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack", deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/controller:go_default_library", @@ -37,6 +38,7 @@ go_library( go_test( name = "go_default_test", srcs = ["cloudstack_test.go"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack", library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/metadata.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/metadata.go index 0ec991d19803..1d76b609a329 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/metadata.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack/metadata.go @@ -91,7 +91,7 @@ func (m *metadata) InstanceID(name types.NodeName) (string, error) { // InstanceType returns the type of the specified instance. func (m *metadata) InstanceType(name types.NodeName) (string, error) { instanceType, err := m.get(metadataTypeInstanceType) - if err == nil { + if err != nil { return "", fmt.Errorf("could not get instance type: %v", err) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD index 401f5934da51..9636a85c9e3e 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/BUILD @@ -32,6 +32,7 @@ go_library( "gce_loadbalancer_external.go", "gce_loadbalancer_internal.go", "gce_loadbalancer_naming.go", + "gce_networkendpointgroup.go", "gce_op.go", "gce_routes.go", "gce_targetpool.go", @@ -39,10 +40,10 @@ go_library( "gce_urlmap.go", "gce_util.go", "gce_zones.go", - "kms.go", "metrics.go", "token_source.go", ], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", deps = [ "//pkg/api/v1/service:go_default_library", "//pkg/cloudprovider:go_default_library", @@ -51,6 +52,7 @@ go_library( "//pkg/master/ports:go_default_library", "//pkg/util/net/sets:go_default_library", "//pkg/util/version:go_default_library", + "//pkg/version:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//vendor/cloud.google.com/go/compute/metadata:go_default_library", @@ -58,7 +60,6 @@ go_library( "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/golang.org/x/oauth2:go_default_library", "//vendor/golang.org/x/oauth2/google:go_default_library", - "//vendor/google.golang.org/api/cloudkms/v1:go_default_library", "//vendor/google.golang.org/api/compute/v0.alpha:go_default_library", "//vendor/google.golang.org/api/compute/v0.beta:go_default_library", "//vendor/google.golang.org/api/compute/v1:go_default_library", @@ -66,6 +67,7 @@ go_library( "//vendor/google.golang.org/api/googleapi:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -74,8 +76,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope:go_default_library", + "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", @@ -96,6 +97,7 @@ go_test( "gce_test.go", "metrics_test.go", ], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", library = ":go_default_library", deps = [ "//pkg/cloudprovider:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go index 7117929152ef..8447092f1647 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go @@ -21,6 +21,7 @@ import ( "io" "net/http" "regexp" + "runtime" "strconv" "strings" "sync" @@ -33,20 +34,21 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apiserver/pkg/server/options/encryptionconfig" - "k8s.io/apiserver/pkg/storage/value/encrypt/envelope" + "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/flowcontrol" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller" + kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" + "k8s.io/kubernetes/pkg/version" "github.com/golang/glog" "golang.org/x/oauth2" "golang.org/x/oauth2/google" - cloudkms "google.golang.org/api/cloudkms/v1" computealpha "google.golang.org/api/compute/v0.alpha" computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" @@ -100,20 +102,23 @@ type GCECloud struct { // for the cloudprovider to start watching the configmap. ClusterID ClusterID - service *compute.Service - serviceBeta *computebeta.Service - serviceAlpha *computealpha.Service - containerService *container.Service - cloudkmsService *cloudkms.Service - client clientset.Interface - clientBuilder controller.ControllerClientBuilder - eventBroadcaster record.EventBroadcaster - eventRecorder record.EventRecorder - projectID string - region string - localZone string // The zone in which we are running - managedZones []string // List of zones we are spanning (for multi-AZ clusters, primarily when running on master) + service *compute.Service + serviceBeta *computebeta.Service + serviceAlpha *computealpha.Service + containerService *container.Service + client clientset.Interface + clientBuilder controller.ControllerClientBuilder + eventBroadcaster record.EventBroadcaster + eventRecorder record.EventRecorder + projectID string + region string + localZone string // The zone in which we are running + // managedZones will be set to the 1 zone if running a single zone cluster + // it will be set to ALL zones in region for any multi-zone cluster + // Use GetAllCurrentZones to get only zones that contain nodes + managedZones []string networkURL string + isLegacyNetwork bool subnetworkURL string secondaryRangeName string networkProjectID string @@ -125,7 +130,13 @@ type GCECloud struct { nodeInstancePrefix string // If non-"", an advisory prefix for all nodes in the cluster useMetadataServer bool operationPollRateLimiter flowcontrol.RateLimiter - manager ServiceManager + manager diskServiceManager + // Lock for access to nodeZones + nodeZonesLock sync.Mutex + // nodeZones is a mapping from Zone to a sets.String of Node's names in the Zone + // it is updated by the nodeInformer + nodeZones map[string]sets.String + nodeInformerSynced cache.InformerSynced // sharedResourceLock is used to serialize GCE operations that may mutate shared state to // prevent inconsistencies. For example, load balancers manipulation methods will take the // lock to prevent shared resources from being prematurely deleted while the operation is @@ -138,59 +149,7 @@ type GCECloud struct { AlphaFeatureGate *AlphaFeatureGate } -type ServiceManager interface { - // Creates a new persistent disk on GCE with the given disk spec. - CreateDisk( - name string, - sizeGb int64, - tagsStr string, - diskType string, - zone string) (gceObject, error) - - // Creates a new regional persistent disk on GCE with the given disk spec. - CreateRegionalDisk( - name string, - sizeGb int64, - tagsStr string, - diskType string, - zones sets.String) (gceObject, error) - - // Deletes the persistent disk from GCE with the given diskName. - DeleteDisk(zone string, disk string) (gceObject, error) - - // Deletes the regional persistent disk from GCE with the given diskName. - DeleteRegionalDisk(diskName string) (gceObject, error) - - // Attach a persistent disk on GCE with the given disk spec to the specified instance. - AttachDisk( - disk *GCEDisk, - readWrite string, - instanceZone string, - instanceName string) (gceObject, error) - - // Detach a persistent disk on GCE with the given disk spec from the specified instance. - DetachDisk( - instanceZone string, - instanceName string, - devicePath string) (gceObject, error) - - // Gets the persistent disk from GCE with the given diskName. - GetDisk(zone string, diskName string) (*GCEDisk, error) - - // Gets the regional persistent disk from GCE with the given diskName. - GetRegionalDisk(diskName string) (*GCEDisk, error) - - // Waits until GCE reports the given operation in the given zone as done. - WaitForZoneOp(op gceObject, zone string, mc *metricContext) error - - // Waits until GCE reports the given operation in the given region is done. - WaitForRegionalOp(op gceObject, mc *metricContext) error -} - -type GCEServiceManager struct { - gce *GCECloud -} - +// TODO: replace gcfg with json type ConfigGlobal struct { TokenURL string `gcfg:"token-url"` TokenBody string `gcfg:"token-body"` @@ -215,7 +174,7 @@ type ConfigGlobal struct { // located in (i.e. where the controller will be running). If this is // blank, then the local zone will be discovered via the metadata server. LocalZone string `gcfg:"local-zone"` - // Possible values: List of api names separated by comma. Default to none. + // Default to none. // For example: MyFeatureFlag AlphaFeatures []string `gcfg:"alpha-features"` } @@ -245,10 +204,6 @@ type CloudConfig struct { AlphaFeatureGate *AlphaFeatureGate } -// kmsPluginRegisterOnce prevents the cloudprovider from registering its KMS plugin -// more than once in the KMS plugin registry. -var kmsPluginRegisterOnce sync.Once - func init() { cloudprovider.RegisterCloudProvider( ProviderName, @@ -262,11 +217,6 @@ func (g *GCECloud) GetComputeService() *compute.Service { return g.service } -// Raw access to the cloudkmsService of GCE cloud. Required for encryption of etcd using Google KMS. -func (g *GCECloud) GetKMSService() *cloudkms.Service { - return g.cloudkmsService -} - // newGCECloud creates a new instance of GCECloud. func newGCECloud(config io.Reader) (gceCloud *GCECloud, err error) { var cloudConfig *CloudConfig @@ -303,6 +253,8 @@ func generateCloudConfig(configFile *ConfigFile) (cloudConfig *CloudConfig, err cloudConfig.TokenSource = google.ComputeTokenSource("") cloudConfig.UseMetadataServer = true + featureMap := make(map[string]bool) + cloudConfig.AlphaFeatureGate = &AlphaFeatureGate{featureMap} if configFile != nil { if configFile.Global.ApiEndpoint != "" { cloudConfig.ApiEndpoint = configFile.Global.ApiEndpoint @@ -326,6 +278,13 @@ func generateCloudConfig(configFile *ConfigFile) (cloudConfig *CloudConfig, err glog.Errorf("Encountered error for creating alpha feature gate: %v", err) } cloudConfig.AlphaFeatureGate = alphaFeatureGate + } else { + // initialize AlphaFeatureGate when no AlphaFeatures are configured. + alphaFeatureGate, err := NewAlphaFeatureGate([]string{}) + if err != nil { + glog.Errorf("Encountered error for initializing alpha feature gate: %v", err) + } + cloudConfig.AlphaFeatureGate = alphaFeatureGate } // retrieve projectID and zone @@ -396,6 +355,13 @@ func generateCloudConfig(configFile *ConfigFile) (cloudConfig *CloudConfig, err // If no tokenSource is specified, uses oauth2.DefaultTokenSource. // If managedZones is nil / empty all zones in the region will be managed. func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { + // Remove any pre-release version and build metadata from the semver, leaving only the MAJOR.MINOR.PATCH portion. + // See http://semver.org/. + version := strings.TrimLeft(strings.Split(strings.Split(version.Get().GitVersion, "-")[0], "+")[0], "v") + + // Create a user-agent header append string to supply to the Google API clients, to identify Kubernetes as the origin of the GCP API calls. + userAgent := fmt.Sprintf("Kubernetes/%s (%s %s)", version, runtime.GOOS, runtime.GOARCH) + // Use ProjectID for NetworkProjectID, if it wasn't explicitly set. if config.NetworkProjectID == "" { config.NetworkProjectID = config.ProjectID @@ -409,6 +375,7 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { if err != nil { return nil, err } + service.UserAgent = userAgent client, err = newOauthClient(config.TokenSource) if err != nil { @@ -418,6 +385,7 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { if err != nil { return nil, err } + serviceBeta.UserAgent = userAgent client, err = newOauthClient(config.TokenSource) if err != nil { @@ -427,6 +395,7 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { if err != nil { return nil, err } + serviceAlpha.UserAgent = userAgent // Expect override api endpoint to always be v1 api and follows the same pattern as prod. // Generate alpha and beta api endpoints based on override v1 api endpoint. @@ -442,39 +411,53 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { if err != nil { return nil, err } - - cloudkmsService, err := cloudkms.New(client) - if err != nil { - return nil, err - } + containerService.UserAgent = userAgent // ProjectID and.NetworkProjectID may be project number or name. projID, netProjID := tryConvertToProjectNames(config.ProjectID, config.NetworkProjectID, service) - onXPN := projID != netProjID var networkURL string var subnetURL string + var isLegacyNetwork bool - if config.NetworkName == "" && config.NetworkURL == "" { - // TODO: Stop using this call and return an error. - // This function returns the first network in a list of networks for a project. The project - // should be set via configuration instead of randomly taking the first. - networkName, err := getNetworkNameViaAPICall(service, config.NetworkProjectID) - if err != nil { - return nil, err - } - networkURL = gceNetworkURL(config.ApiEndpoint, netProjID, networkName) - } else if config.NetworkURL != "" { + if config.NetworkURL != "" { networkURL = config.NetworkURL - } else { + } else if config.NetworkName != "" { networkURL = gceNetworkURL(config.ApiEndpoint, netProjID, config.NetworkName) + } else { + // Other consumers may use the cloudprovider without utilizing the wrapped GCE API functions + // or functions requiring network/subnetwork URLs (e.g. Kubelet). + glog.Warningf("No network name or URL specified.") } if config.SubnetworkURL != "" { subnetURL = config.SubnetworkURL } else if config.SubnetworkName != "" { subnetURL = gceSubnetworkURL(config.ApiEndpoint, netProjID, config.Region, config.SubnetworkName) + } else { + // Attempt to determine the subnetwork in case it's an automatic network. + // Legacy networks will not have a subnetwork, so subnetworkURL should remain empty. + if networkName := lastComponent(networkURL); networkName != "" { + if n, err := getNetwork(service, netProjID, networkName); err != nil { + // Gracefully fail because kubelet calls CreateGCECloud without any config, and API calls will fail coming from minions. + glog.Warningf("Could not retrieve network %q in attempt to determine if legacy network or see list of subnets, err %v", networkURL, err) + } else { + // Legacy networks have a non-empty IPv4Range + if len(n.IPv4Range) > 0 { + glog.Infof("Determined network %q is type legacy", networkURL) + isLegacyNetwork = true + } else { + // Try to find the subnet in the list of subnets + subnetURL = findSubnetForRegion(n.Subnetworks, config.Region) + if len(subnetURL) > 0 { + glog.Infof("Using subnet %q within network %q & region %q because none was specified.", subnetURL, n.Name, config.Region) + } else { + glog.Warningf("Could not find any subnet in region %q within list %v.", config.Region, n.Subnetworks) + } + } + } + } } if len(config.ManagedZones) == 0 { @@ -483,7 +466,7 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { return nil, err } } - if len(config.ManagedZones) != 1 { + if len(config.ManagedZones) > 1 { glog.Infof("managing multiple zones: %v", config.ManagedZones) } @@ -494,7 +477,6 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { serviceAlpha: serviceAlpha, serviceBeta: serviceBeta, containerService: containerService, - cloudkmsService: cloudkmsService, projectID: projID, networkProjectID: netProjID, onXPN: onXPN, @@ -502,6 +484,7 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { localZone: config.Zone, managedZones: config.ManagedZones, networkURL: networkURL, + isLegacyNetwork: isLegacyNetwork, subnetworkURL: subnetURL, secondaryRangeName: config.SecondaryRangeName, nodeTags: config.NodeTags, @@ -509,17 +492,10 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) { useMetadataServer: config.UseMetadataServer, operationPollRateLimiter: operationPollRateLimiter, AlphaFeatureGate: config.AlphaFeatureGate, + nodeZones: map[string]sets.String{}, } - gce.manager = &GCEServiceManager{gce} - - // Registering the KMS plugin only the first time. - kmsPluginRegisterOnce.Do(func() { - // Register the Google Cloud KMS based service in the KMS plugin registry. - encryptionconfig.KMSPluginRegistry.Register(KMSServiceName, func(config io.Reader) (envelope.Service, error) { - return gce.getGCPCloudKMSService(config) - }) - }) + gce.manager = &gceServiceManager{gce} return gce, nil } @@ -559,7 +535,7 @@ func (gce *GCECloud) Initialize(clientBuilder controller.ControllerClientBuilder if gce.OnXPN() { gce.eventBroadcaster = record.NewBroadcaster() - gce.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(gce.client.Core().RESTClient()).Events("")}) + gce.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(gce.client.CoreV1().RESTClient()).Events("")}) gce.eventRecorder = gce.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "gce-cloudprovider"}) } @@ -625,6 +601,72 @@ func (gce *GCECloud) SubnetworkURL() string { return gce.subnetworkURL } +func (gce *GCECloud) IsLegacyNetwork() bool { + return gce.isLegacyNetwork +} + +func (gce *GCECloud) SetInformers(informerFactory informers.SharedInformerFactory) { + glog.Infof("Setting up informers for GCECloud") + nodeInformer := informerFactory.Core().V1().Nodes().Informer() + nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + node := obj.(*v1.Node) + gce.updateNodeZones(nil, node) + }, + UpdateFunc: func(prev, obj interface{}) { + prevNode := prev.(*v1.Node) + newNode := obj.(*v1.Node) + if newNode.Labels[kubeletapis.LabelZoneFailureDomain] == + prevNode.Labels[kubeletapis.LabelZoneFailureDomain] { + return + } + gce.updateNodeZones(prevNode, newNode) + }, + DeleteFunc: func(obj interface{}) { + node, isNode := obj.(*v1.Node) + // We can get DeletedFinalStateUnknown instead of *v1.Node here + // and we need to handle that correctly. + if !isNode { + deletedState, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + glog.Errorf("Received unexpected object: %v", obj) + return + } + node, ok = deletedState.Obj.(*v1.Node) + if !ok { + glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj) + return + } + } + gce.updateNodeZones(node, nil) + }, + }) + gce.nodeInformerSynced = nodeInformer.HasSynced +} + +func (gce *GCECloud) updateNodeZones(prevNode, newNode *v1.Node) { + gce.nodeZonesLock.Lock() + defer gce.nodeZonesLock.Unlock() + if prevNode != nil { + prevZone, ok := prevNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] + if ok { + gce.nodeZones[prevZone].Delete(prevNode.ObjectMeta.Name) + if gce.nodeZones[prevZone].Len() == 0 { + gce.nodeZones[prevZone] = nil + } + } + } + if newNode != nil { + newZone, ok := newNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] + if ok { + if gce.nodeZones[newZone] == nil { + gce.nodeZones[newZone] = sets.NewString() + } + gce.nodeZones[newZone].Insert(newNode.ObjectMeta.Name) + } + } +} + // Known-useless DNS search path. var uselessDNSSearchRE = regexp.MustCompile(`^[0-9]+.google.internal.$`) @@ -668,7 +710,7 @@ func gceSubnetworkURL(apiEndpoint, project, region, subnetwork string) string { return apiEndpoint + strings.Join([]string{"projects", project, "regions", region, "subnetworks", subnetwork}, "/") } -// getProjectIDInURL parses typical full resource URLS and shorter URLS +// getProjectIDInURL parses full resource URLS and shorter URLS // https://www.googleapis.com/compute/v1/projects/myproject/global/networks/mycustom // projects/myproject/global/networks/mycustom // All return "myproject" @@ -682,6 +724,20 @@ func getProjectIDInURL(urlStr string) (string, error) { return "", fmt.Errorf("could not find project field in url: %v", urlStr) } +// getRegionInURL parses full resource URLS and shorter URLS +// https://www.googleapis.com/compute/v1/projects/myproject/regions/us-central1/subnetworks/a +// projects/myproject/regions/us-central1/subnetworks/a +// All return "us-central1" +func getRegionInURL(urlStr string) string { + fields := strings.Split(urlStr, "/") + for i, v := range fields { + if v == "regions" && i < len(fields)-1 { + return fields[i+1] + } + } + return "" +} + func getNetworkNameViaMetadata() (string, error) { result, err := metadata.Get("instance/network-interfaces/0/network") if err != nil { @@ -694,18 +750,9 @@ func getNetworkNameViaMetadata() (string, error) { return parts[3], nil } -func getNetworkNameViaAPICall(svc *compute.Service, projectID string) (string, error) { - // TODO: use PageToken to list all not just the first 500 - networkList, err := svc.Networks.List(projectID).Do() - if err != nil { - return "", err - } - - if networkList == nil || len(networkList.Items) <= 0 { - return "", fmt.Errorf("GCE Network List call returned no networks for project %q", projectID) - } - - return networkList.Items[0].Name, nil +// getNetwork returns a GCP network +func getNetwork(svc *compute.Service, networkProjectID, networkID string) (*compute.Network, error) { + return svc.Networks.Get(networkProjectID, networkID).Do() } // getProjectID returns the project's string ID given a project number or string @@ -740,6 +787,15 @@ func getZonesForRegion(svc *compute.Service, projectID, region string) ([]string return zones, nil } +func findSubnetForRegion(subnetURLs []string, region string) string { + for _, url := range subnetURLs { + if thisRegion := getRegionInURL(url); thisRegion == region { + return url + } + } + return "" +} + func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) { if tokenSource == nil { var err error @@ -768,335 +824,7 @@ func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) { return oauth2.NewClient(oauth2.NoContext, tokenSource), nil } -func (manager *GCEServiceManager) CreateDisk( - name string, - sizeGb int64, - tagsStr string, - diskType string, - zone string) (gceObject, error) { - diskTypeURI, err := manager.getDiskTypeURI( - manager.gce.region /* diskRegion */, singleZone{zone}, diskType) - if err != nil { - return nil, err - } - - if manager.gce.AlphaFeatureGate.Enabled(GCEDiskAlphaFeatureGate) { - diskToCreateAlpha := &computealpha.Disk{ - Name: name, - SizeGb: sizeGb, - Description: tagsStr, - Type: diskTypeURI, - } - - return manager.gce.serviceAlpha.Disks.Insert( - manager.gce.projectID, zone, diskToCreateAlpha).Do() - } - - diskToCreateV1 := &compute.Disk{ - Name: name, - SizeGb: sizeGb, - Description: tagsStr, - Type: diskTypeURI, - } - return manager.gce.service.Disks.Insert( - manager.gce.projectID, zone, diskToCreateV1).Do() -} - -func (manager *GCEServiceManager) CreateRegionalDisk( - name string, - sizeGb int64, - tagsStr string, - diskType string, - replicaZones sets.String) (gceObject, error) { - diskTypeURI, err := manager.getDiskTypeURI( - manager.gce.region /* diskRegion */, multiZone{replicaZones}, diskType) - if err != nil { - return nil, err - } - fullyQualifiedReplicaZones := []string{} - for _, replicaZone := range replicaZones.UnsortedList() { - fullyQualifiedReplicaZones = append( - fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone)) - } - if manager.gce.AlphaFeatureGate.Enabled(GCEDiskAlphaFeatureGate) { - diskToCreateAlpha := &computealpha.Disk{ - Name: name, - SizeGb: sizeGb, - Description: tagsStr, - Type: diskTypeURI, - ReplicaZones: fullyQualifiedReplicaZones, - } - return manager.gce.serviceAlpha.RegionDisks.Insert( - manager.gce.projectID, manager.gce.region, diskToCreateAlpha).Do() - } - - return nil, fmt.Errorf("The regional PD feature is only available via the GCE Alpha API. Enable \"GCEDiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.") -} - -func (manager *GCEServiceManager) AttachDisk( - disk *GCEDisk, - readWrite string, - instanceZone string, - instanceName string) (gceObject, error) { - source, err := manager.getDiskSourceURI(disk) - if err != nil { - return nil, err - } - - if manager.gce.AlphaFeatureGate.Enabled(GCEDiskAlphaFeatureGate) { - attachedDiskAlpha := &computealpha.AttachedDisk{ - DeviceName: disk.Name, - Kind: disk.Kind, - Mode: readWrite, - Source: source, - Type: diskTypePersistent, - } - return manager.gce.serviceAlpha.Instances.AttachDisk( - manager.gce.projectID, instanceZone, instanceName, attachedDiskAlpha).Do() - } - - attachedDiskV1 := &compute.AttachedDisk{ - DeviceName: disk.Name, - Kind: disk.Kind, - Mode: readWrite, - Source: source, - Type: disk.Type, - } - return manager.gce.service.Instances.AttachDisk( - manager.gce.projectID, instanceZone, instanceName, attachedDiskV1).Do() -} - -func (manager *GCEServiceManager) DetachDisk( - instanceZone string, - instanceName string, - devicePath string) (gceObject, error) { - if manager.gce.AlphaFeatureGate.Enabled(GCEDiskAlphaFeatureGate) { - manager.gce.serviceAlpha.Instances.DetachDisk( - manager.gce.projectID, instanceZone, instanceName, devicePath).Do() - } - - return manager.gce.service.Instances.DetachDisk( - manager.gce.projectID, instanceZone, instanceName, devicePath).Do() -} - -func (manager *GCEServiceManager) GetDisk( - zone string, - diskName string) (*GCEDisk, error) { - if zone == "" { - return nil, fmt.Errorf("Can not fetch disk %q. Zone is empty.", diskName) - } - - if diskName == "" { - return nil, fmt.Errorf("Can not fetch disk. Zone is specified (%q). But disk name is empty.", zone) - } - - if manager.gce.AlphaFeatureGate.Enabled(GCEDiskAlphaFeatureGate) { - diskAlpha, err := manager.gce.serviceAlpha.Disks.Get( - manager.gce.projectID, zone, diskName).Do() - if err != nil { - return nil, err - } - - var zoneInfo zoneType - if len(diskAlpha.ReplicaZones) > 1 { - zones := sets.NewString() - for _, zoneURI := range diskAlpha.ReplicaZones { - zones.Insert(lastComponent(zoneURI)) - } - zoneInfo = multiZone{zones} - } else { - zoneInfo = singleZone{lastComponent(diskAlpha.Zone)} - if diskAlpha.Zone == "" { - zoneInfo = singleZone{lastComponent(zone)} - } - } - - region := strings.TrimSpace(lastComponent(diskAlpha.Region)) - if region == "" { - region, err = manager.getRegionFromZone(zoneInfo) - if err != nil { - return nil, fmt.Errorf("failed to extract region from zone for %q/%q err=%v", zone, diskName, err) - } - } - - return &GCEDisk{ - ZoneInfo: zoneInfo, - Region: region, - Name: diskAlpha.Name, - Kind: diskAlpha.Kind, - Type: diskAlpha.Type, - }, nil - } - - diskStable, err := manager.gce.service.Disks.Get( - manager.gce.projectID, zone, diskName).Do() - if err != nil { - return nil, err - } - - zoneInfo := singleZone{strings.TrimSpace(lastComponent(diskStable.Zone))} - if zoneInfo.zone == "" { - zoneInfo = singleZone{zone} - } - - region, err := manager.getRegionFromZone(zoneInfo) - if err != nil { - return nil, fmt.Errorf("failed to extract region from zone for %q/%q err=%v", zone, diskName, err) - } - - return &GCEDisk{ - ZoneInfo: zoneInfo, - Region: region, - Name: diskStable.Name, - Kind: diskStable.Kind, - Type: diskStable.Type, - }, nil -} - -func (manager *GCEServiceManager) GetRegionalDisk( - diskName string) (*GCEDisk, error) { - - if manager.gce.AlphaFeatureGate.Enabled(GCEDiskAlphaFeatureGate) { - diskAlpha, err := manager.gce.serviceAlpha.RegionDisks.Get( - manager.gce.projectID, manager.gce.region, diskName).Do() - if err != nil { - return nil, err - } - - zones := sets.NewString() - for _, zoneURI := range diskAlpha.ReplicaZones { - zones.Insert(lastComponent(zoneURI)) - } - - return &GCEDisk{ - ZoneInfo: multiZone{zones}, - Region: lastComponent(diskAlpha.Region), - Name: diskAlpha.Name, - Kind: diskAlpha.Kind, - Type: diskAlpha.Type, - }, nil - } - - return nil, fmt.Errorf("The regional PD feature is only available via the GCE Alpha API. Enable \"GCEDiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.") -} - -func (manager *GCEServiceManager) DeleteDisk( - zone string, - diskName string) (gceObject, error) { - - if manager.gce.AlphaFeatureGate.Enabled(GCEDiskAlphaFeatureGate) { - return manager.gce.serviceAlpha.Disks.Delete( - manager.gce.projectID, zone, diskName).Do() - } - - return manager.gce.service.Disks.Delete( - manager.gce.projectID, zone, diskName).Do() -} - -func (manager *GCEServiceManager) DeleteRegionalDisk( - diskName string) (gceObject, error) { - if manager.gce.AlphaFeatureGate.Enabled(GCEDiskAlphaFeatureGate) { - return manager.gce.serviceAlpha.RegionDisks.Delete( - manager.gce.projectID, manager.gce.region, diskName).Do() - } - - return nil, fmt.Errorf("DeleteRegionalDisk is a regional PD feature and is only available via the GCE Alpha API. Enable \"GCEDiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.") -} - -func (manager *GCEServiceManager) WaitForZoneOp( - op gceObject, zone string, mc *metricContext) error { - return manager.gce.waitForZoneOp(op, zone, mc) -} - -func (manager *GCEServiceManager) WaitForRegionalOp( - op gceObject, mc *metricContext) error { - return manager.gce.waitForRegionOp(op, manager.gce.region, mc) -} - -func (manager *GCEServiceManager) getDiskSourceURI(disk *GCEDisk) (string, error) { - getProjectsAPIEndpoint := manager.getProjectsAPIEndpoint() - if manager.gce.AlphaFeatureGate.Enabled(GCEDiskAlphaFeatureGate) { - getProjectsAPIEndpoint = manager.getProjectsAPIEndpointAlpha() - } - - switch zoneInfo := disk.ZoneInfo.(type) { - case singleZone: - if zoneInfo.zone == "" || disk.Region == "" { - // Unexpected, but sanity-check - return "", fmt.Errorf("PD does not have zone/region information: %#v", disk) - } - - return getProjectsAPIEndpoint + fmt.Sprintf( - diskSourceURITemplateSingleZone, - manager.gce.projectID, - zoneInfo.zone, - disk.Name), nil - case multiZone: - if zoneInfo.replicaZones == nil || zoneInfo.replicaZones.Len() <= 0 { - // Unexpected, but sanity-check - return "", fmt.Errorf("PD is regional but does not have any replicaZones specified: %v", disk) - } - return getProjectsAPIEndpoint + fmt.Sprintf( - diskSourceURITemplateRegional, - manager.gce.projectID, - disk.Region, - disk.Name), nil - case nil: - // Unexpected, but sanity-check - return "", fmt.Errorf("PD did not have ZoneInfo: %v", disk) - default: - // Unexpected, but sanity-check - return "", fmt.Errorf("disk.ZoneInfo has unexpected type %T", zoneInfo) - } -} - -func (manager *GCEServiceManager) getDiskTypeURI( - diskRegion string, diskZoneInfo zoneType, diskType string) (string, error) { - getProjectsAPIEndpoint := manager.getProjectsAPIEndpoint() - if manager.gce.AlphaFeatureGate.Enabled(GCEDiskAlphaFeatureGate) { - getProjectsAPIEndpoint = manager.getProjectsAPIEndpointAlpha() - } - - switch zoneInfo := diskZoneInfo.(type) { - case singleZone: - if zoneInfo.zone == "" { - return "", fmt.Errorf("zone is empty: %v", zoneInfo) - } - - return getProjectsAPIEndpoint + fmt.Sprintf( - diskTypeURITemplateSingleZone, - manager.gce.projectID, - zoneInfo.zone, - diskType), nil - case multiZone: - if zoneInfo.replicaZones == nil || zoneInfo.replicaZones.Len() <= 0 { - return "", fmt.Errorf("zoneInfo is regional but does not have any replicaZones specified: %v", zoneInfo) - } - return getProjectsAPIEndpoint + fmt.Sprintf( - diskTypeURITemplateRegional, - manager.gce.projectID, - diskRegion, - diskType), nil - case nil: - return "", fmt.Errorf("zoneInfo nil") - default: - return "", fmt.Errorf("zoneInfo has unexpected type %T", zoneInfo) - } -} - -func (manager *GCEServiceManager) getReplicaZoneURI(zone string) string { - getProjectsAPIEndpoint := manager.getProjectsAPIEndpoint() - if manager.gce.AlphaFeatureGate.Enabled(GCEDiskAlphaFeatureGate) { - getProjectsAPIEndpoint = manager.getProjectsAPIEndpointAlpha() - } - - return getProjectsAPIEndpoint + fmt.Sprintf( - replicaZoneURITemplateSingleZone, - manager.gce.projectID, - zone) -} - -func (manager *GCEServiceManager) getProjectsAPIEndpoint() string { +func (manager *gceServiceManager) getProjectsAPIEndpoint() string { projectsApiEndpoint := gceComputeAPIEndpoint + "projects/" if manager.gce.service != nil { projectsApiEndpoint = manager.gce.service.BasePath @@ -1105,7 +833,7 @@ func (manager *GCEServiceManager) getProjectsAPIEndpoint() string { return projectsApiEndpoint } -func (manager *GCEServiceManager) getProjectsAPIEndpointAlpha() string { +func (manager *gceServiceManager) getProjectsAPIEndpointAlpha() string { projectsApiEndpoint := gceComputeAPIEndpointAlpha + "projects/" if manager.gce.service != nil { projectsApiEndpoint = manager.gce.serviceAlpha.BasePath @@ -1113,37 +841,3 @@ func (manager *GCEServiceManager) getProjectsAPIEndpointAlpha() string { return projectsApiEndpoint } - -func (manager *GCEServiceManager) getRegionFromZone(zoneInfo zoneType) (string, error) { - var zone string - switch zoneInfo := zoneInfo.(type) { - case singleZone: - if zoneInfo.zone == "" { - // Unexpected, but sanity-check - return "", fmt.Errorf("PD is single zone, but zone is not specified: %#v", zoneInfo) - } - - zone = zoneInfo.zone - case multiZone: - if zoneInfo.replicaZones == nil || zoneInfo.replicaZones.Len() <= 0 { - // Unexpected, but sanity-check - return "", fmt.Errorf("PD is regional but does not have any replicaZones specified: %v", zoneInfo) - } - - zone = zoneInfo.replicaZones.UnsortedList()[0] - case nil: - // Unexpected, but sanity-check - return "", fmt.Errorf("zoneInfo is nil") - default: - // Unexpected, but sanity-check - return "", fmt.Errorf("zoneInfo has unexpected type %T", zoneInfo) - } - - region, err := GetGCERegion(zone) - if err != nil { - glog.Warningf("failed to parse GCE region from zone %q: %v", zone, err) - region = manager.gce.region - } - - return region, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go index 292504f7d6ee..8b9c98f5816a 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_alpha.go @@ -29,13 +29,16 @@ const ( // tier to use. Currently supports "Standard" and "Premium" (default). AlphaFeatureNetworkTiers = "NetworkTiers" - GCEDiskAlphaFeatureGate = "DiskAlphaAPI" + AlphaFeatureGCEDisk = "DiskAlphaAPI" + + AlphaFeatureNetworkEndpointGroup = "NetworkEndpointGroup" ) // All known alpha features var knownAlphaFeatures = map[string]bool{ - AlphaFeatureNetworkTiers: true, - GCEDiskAlphaFeatureGate: true, + AlphaFeatureNetworkTiers: true, + AlphaFeatureGCEDisk: true, + AlphaFeatureNetworkEndpointGroup: true, } type AlphaFeatureGate struct { @@ -58,3 +61,10 @@ func NewAlphaFeatureGate(features []string) (*AlphaFeatureGate, error) { } return &AlphaFeatureGate{featureMap}, utilerrors.NewAggregate(errList) } + +func (gce *GCECloud) alphaFeatureEnabled(feature string) error { + if !gce.AlphaFeatureGate.Enabled(feature) { + return fmt.Errorf("alpha feature %q is not enabled.", feature) + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go index 7bf3d73e0d88..cbbc5db7d79b 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_backendservice.go @@ -19,11 +19,16 @@ package gce import ( "net/http" + computealpha "google.golang.org/api/compute/v0.alpha" compute "google.golang.org/api/compute/v1" ) func newBackendServiceMetricContext(request, region string) *metricContext { - return newGenericMetricContext("backendservice", request, region, unusedMetricLabel, computeV1Version) + return newBackendServiceMetricContextWithVersion(request, region, computeV1Version) +} + +func newBackendServiceMetricContextWithVersion(request, region, version string) *metricContext { + return newGenericMetricContext("backendservice", request, region, unusedMetricLabel, version) } // GetGlobalBackendService retrieves a backend by name. @@ -33,6 +38,13 @@ func (gce *GCECloud) GetGlobalBackendService(name string) (*compute.BackendServi return v, mc.Observe(err) } +// GetAlphaGlobalBackendService retrieves alpha backend by name. +func (gce *GCECloud) GetAlphaGlobalBackendService(name string) (*computealpha.BackendService, error) { + mc := newBackendServiceMetricContextWithVersion("get", "", computeAlphaVersion) + v, err := gce.serviceAlpha.BackendServices.Get(gce.projectID, name).Do() + return v, mc.Observe(err) +} + // UpdateGlobalBackendService applies the given BackendService as an update to an existing service. func (gce *GCECloud) UpdateGlobalBackendService(bg *compute.BackendService) error { mc := newBackendServiceMetricContext("update", "") @@ -44,6 +56,17 @@ func (gce *GCECloud) UpdateGlobalBackendService(bg *compute.BackendService) erro return gce.waitForGlobalOp(op, mc) } +// UpdateAlphaGlobalBackendService applies the given alpha BackendService as an update to an existing service. +func (gce *GCECloud) UpdateAlphaGlobalBackendService(bg *computealpha.BackendService) error { + mc := newBackendServiceMetricContextWithVersion("update", "", computeAlphaVersion) + op, err := gce.serviceAlpha.BackendServices.Update(gce.projectID, bg.Name, bg).Do() + if err != nil { + return mc.Observe(err) + } + + return gce.waitForGlobalOp(op, mc) +} + // DeleteGlobalBackendService deletes the given BackendService by name. func (gce *GCECloud) DeleteGlobalBackendService(name string) error { mc := newBackendServiceMetricContext("delete", "") @@ -69,6 +92,17 @@ func (gce *GCECloud) CreateGlobalBackendService(bg *compute.BackendService) erro return gce.waitForGlobalOp(op, mc) } +// CreateAlphaGlobalBackendService creates the given alpha BackendService. +func (gce *GCECloud) CreateAlphaGlobalBackendService(bg *computealpha.BackendService) error { + mc := newBackendServiceMetricContextWithVersion("create", "", computeAlphaVersion) + op, err := gce.serviceAlpha.BackendServices.Insert(gce.projectID, bg).Do() + if err != nil { + return mc.Observe(err) + } + + return gce.waitForGlobalOp(op, mc) +} + // ListGlobalBackendServices lists all backend services in the project. func (gce *GCECloud) ListGlobalBackendServices() (*compute.BackendServiceList, error) { mc := newBackendServiceMetricContext("list", "") diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusterid.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusterid.go index 6f1b667c8efc..46b4ff4f6a0c 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusterid.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_clusterid.go @@ -101,7 +101,7 @@ func (gce *GCECloud) watchClusterID() { }, } - listerWatcher := cache.NewListWatchFromClient(gce.ClusterID.client.Core().RESTClient(), "configmaps", UIDNamespace, fields.Everything()) + listerWatcher := cache.NewListWatchFromClient(gce.ClusterID.client.CoreV1().RESTClient(), "configmaps", UIDNamespace, fields.Everything()) var controller cache.Controller gce.ClusterID.store, controller = cache.NewInformer(newSingleObjectListerWatcher(listerWatcher, UIDConfigMapName), &v1.ConfigMap{}, updateFuncFrequency, mapEventHandler) @@ -189,7 +189,7 @@ func (ci *ClusterID) getOrInitialize() error { UIDProvider: newId, } - if _, err := ci.client.Core().ConfigMaps(UIDNamespace).Create(cfg); err != nil { + if _, err := ci.client.CoreV1().ConfigMaps(UIDNamespace).Create(cfg); err != nil { glog.Errorf("GCE cloud provider failed to create %v config map to store cluster id: %v", ci.cfgMapKey, err) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go index ac7f6ee394dc..6918c57c0df7 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_disks.go @@ -24,6 +24,7 @@ import ( "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/pkg/cloudprovider" @@ -32,6 +33,8 @@ import ( volumeutil "k8s.io/kubernetes/pkg/volume/util" "github.com/golang/glog" + computealpha "google.golang.org/api/compute/v0.alpha" + compute "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" ) @@ -52,6 +55,451 @@ const ( replicaZoneURITemplateSingleZone = "%s/zones/%s" // {gce.projectID}/zones/{disk.Zone} ) +type diskServiceManager interface { + // Creates a new persistent disk on GCE with the given disk spec. + CreateDiskOnCloudProvider( + name string, + sizeGb int64, + tagsStr string, + diskType string, + zone string) (gceObject, error) + + // Creates a new regional persistent disk on GCE with the given disk spec. + CreateRegionalDiskOnCloudProvider( + name string, + sizeGb int64, + tagsStr string, + diskType string, + zones sets.String) (gceObject, error) + + // Deletes the persistent disk from GCE with the given diskName. + DeleteDiskOnCloudProvider(zone string, disk string) (gceObject, error) + + // Deletes the regional persistent disk from GCE with the given diskName. + DeleteRegionalDiskOnCloudProvider(diskName string) (gceObject, error) + + // Attach a persistent disk on GCE with the given disk spec to the specified instance. + AttachDiskOnCloudProvider( + disk *GCEDisk, + readWrite string, + instanceZone string, + instanceName string) (gceObject, error) + + // Detach a persistent disk on GCE with the given disk spec from the specified instance. + DetachDiskOnCloudProvider( + instanceZone string, + instanceName string, + devicePath string) (gceObject, error) + + ResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64, zone string) (gceObject, error) + RegionalResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64) (gceObject, error) + + // Gets the persistent disk from GCE with the given diskName. + GetDiskFromCloudProvider(zone string, diskName string) (*GCEDisk, error) + + // Gets the regional persistent disk from GCE with the given diskName. + GetRegionalDiskFromCloudProvider(diskName string) (*GCEDisk, error) + + // Waits until GCE reports the given operation in the given zone as done. + WaitForZoneOp(op gceObject, zone string, mc *metricContext) error + + // Waits until GCE reports the given operation in the given region is done. + WaitForRegionalOp(op gceObject, mc *metricContext) error +} + +type gceServiceManager struct { + gce *GCECloud +} + +func (manager *gceServiceManager) CreateDiskOnCloudProvider( + name string, + sizeGb int64, + tagsStr string, + diskType string, + zone string) (gceObject, error) { + diskTypeURI, err := manager.getDiskTypeURI( + manager.gce.region /* diskRegion */, singleZone{zone}, diskType) + if err != nil { + return nil, err + } + + if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) { + diskToCreateAlpha := &computealpha.Disk{ + Name: name, + SizeGb: sizeGb, + Description: tagsStr, + Type: diskTypeURI, + } + + return manager.gce.serviceAlpha.Disks.Insert( + manager.gce.projectID, zone, diskToCreateAlpha).Do() + } + + diskToCreateV1 := &compute.Disk{ + Name: name, + SizeGb: sizeGb, + Description: tagsStr, + Type: diskTypeURI, + } + return manager.gce.service.Disks.Insert( + manager.gce.projectID, zone, diskToCreateV1).Do() +} + +func (manager *gceServiceManager) CreateRegionalDiskOnCloudProvider( + name string, + sizeGb int64, + tagsStr string, + diskType string, + replicaZones sets.String) (gceObject, error) { + diskTypeURI, err := manager.getDiskTypeURI( + manager.gce.region /* diskRegion */, multiZone{replicaZones}, diskType) + if err != nil { + return nil, err + } + fullyQualifiedReplicaZones := []string{} + for _, replicaZone := range replicaZones.UnsortedList() { + fullyQualifiedReplicaZones = append( + fullyQualifiedReplicaZones, manager.getReplicaZoneURI(replicaZone)) + } + if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) { + diskToCreateAlpha := &computealpha.Disk{ + Name: name, + SizeGb: sizeGb, + Description: tagsStr, + Type: diskTypeURI, + ReplicaZones: fullyQualifiedReplicaZones, + } + return manager.gce.serviceAlpha.RegionDisks.Insert( + manager.gce.projectID, manager.gce.region, diskToCreateAlpha).Do() + } + + return nil, fmt.Errorf("The regional PD feature is only available via the GCE Alpha API. Enable \"GCEDiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.") +} + +func (manager *gceServiceManager) AttachDiskOnCloudProvider( + disk *GCEDisk, + readWrite string, + instanceZone string, + instanceName string) (gceObject, error) { + source, err := manager.getDiskSourceURI(disk) + if err != nil { + return nil, err + } + + if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) { + attachedDiskAlpha := &computealpha.AttachedDisk{ + DeviceName: disk.Name, + Kind: disk.Kind, + Mode: readWrite, + Source: source, + Type: diskTypePersistent, + } + return manager.gce.serviceAlpha.Instances.AttachDisk( + manager.gce.projectID, instanceZone, instanceName, attachedDiskAlpha).Do() + } + + attachedDiskV1 := &compute.AttachedDisk{ + DeviceName: disk.Name, + Kind: disk.Kind, + Mode: readWrite, + Source: source, + Type: disk.Type, + } + return manager.gce.service.Instances.AttachDisk( + manager.gce.projectID, instanceZone, instanceName, attachedDiskV1).Do() +} + +func (manager *gceServiceManager) DetachDiskOnCloudProvider( + instanceZone string, + instanceName string, + devicePath string) (gceObject, error) { + if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) { + manager.gce.serviceAlpha.Instances.DetachDisk( + manager.gce.projectID, instanceZone, instanceName, devicePath).Do() + } + + return manager.gce.service.Instances.DetachDisk( + manager.gce.projectID, instanceZone, instanceName, devicePath).Do() +} + +func (manager *gceServiceManager) GetDiskFromCloudProvider( + zone string, + diskName string) (*GCEDisk, error) { + if zone == "" { + return nil, fmt.Errorf("Can not fetch disk %q. Zone is empty.", diskName) + } + + if diskName == "" { + return nil, fmt.Errorf("Can not fetch disk. Zone is specified (%q). But disk name is empty.", zone) + } + + if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) { + diskAlpha, err := manager.gce.serviceAlpha.Disks.Get( + manager.gce.projectID, zone, diskName).Do() + if err != nil { + return nil, err + } + + var zoneInfo zoneType + if len(diskAlpha.ReplicaZones) > 1 { + zones := sets.NewString() + for _, zoneURI := range diskAlpha.ReplicaZones { + zones.Insert(lastComponent(zoneURI)) + } + zoneInfo = multiZone{zones} + } else { + zoneInfo = singleZone{lastComponent(diskAlpha.Zone)} + if diskAlpha.Zone == "" { + zoneInfo = singleZone{lastComponent(zone)} + } + } + + region := strings.TrimSpace(lastComponent(diskAlpha.Region)) + if region == "" { + region, err = manager.getRegionFromZone(zoneInfo) + if err != nil { + return nil, fmt.Errorf("failed to extract region from zone for %q/%q err=%v", zone, diskName, err) + } + } + + return &GCEDisk{ + ZoneInfo: zoneInfo, + Region: region, + Name: diskAlpha.Name, + Kind: diskAlpha.Kind, + Type: diskAlpha.Type, + SizeGb: diskAlpha.SizeGb, + }, nil + } + + diskStable, err := manager.gce.service.Disks.Get( + manager.gce.projectID, zone, diskName).Do() + if err != nil { + return nil, err + } + + zoneInfo := singleZone{strings.TrimSpace(lastComponent(diskStable.Zone))} + if zoneInfo.zone == "" { + zoneInfo = singleZone{zone} + } + + region, err := manager.getRegionFromZone(zoneInfo) + if err != nil { + return nil, fmt.Errorf("failed to extract region from zone for %q/%q err=%v", zone, diskName, err) + } + + return &GCEDisk{ + ZoneInfo: zoneInfo, + Region: region, + Name: diskStable.Name, + Kind: diskStable.Kind, + Type: diskStable.Type, + SizeGb: diskStable.SizeGb, + }, nil +} + +func (manager *gceServiceManager) GetRegionalDiskFromCloudProvider( + diskName string) (*GCEDisk, error) { + + if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) { + diskAlpha, err := manager.gce.serviceAlpha.RegionDisks.Get( + manager.gce.projectID, manager.gce.region, diskName).Do() + if err != nil { + return nil, err + } + + zones := sets.NewString() + for _, zoneURI := range diskAlpha.ReplicaZones { + zones.Insert(lastComponent(zoneURI)) + } + + return &GCEDisk{ + ZoneInfo: multiZone{zones}, + Region: lastComponent(diskAlpha.Region), + Name: diskAlpha.Name, + Kind: diskAlpha.Kind, + Type: diskAlpha.Type, + SizeGb: diskAlpha.SizeGb, + }, nil + } + + return nil, fmt.Errorf("The regional PD feature is only available via the GCE Alpha API. Enable \"GCEDiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.") +} + +func (manager *gceServiceManager) DeleteDiskOnCloudProvider( + zone string, + diskName string) (gceObject, error) { + + if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) { + return manager.gce.serviceAlpha.Disks.Delete( + manager.gce.projectID, zone, diskName).Do() + } + + return manager.gce.service.Disks.Delete( + manager.gce.projectID, zone, diskName).Do() +} + +func (manager *gceServiceManager) DeleteRegionalDiskOnCloudProvider( + diskName string) (gceObject, error) { + if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) { + return manager.gce.serviceAlpha.RegionDisks.Delete( + manager.gce.projectID, manager.gce.region, diskName).Do() + } + + return nil, fmt.Errorf("DeleteRegionalDiskOnCloudProvider is a regional PD feature and is only available via the GCE Alpha API. Enable \"GCEDiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.") +} + +func (manager *gceServiceManager) WaitForZoneOp( + op gceObject, zone string, mc *metricContext) error { + return manager.gce.waitForZoneOp(op, zone, mc) +} + +func (manager *gceServiceManager) WaitForRegionalOp( + op gceObject, mc *metricContext) error { + return manager.gce.waitForRegionOp(op, manager.gce.region, mc) +} + +func (manager *gceServiceManager) getDiskSourceURI(disk *GCEDisk) (string, error) { + getProjectsAPIEndpoint := manager.getProjectsAPIEndpoint() + if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) { + getProjectsAPIEndpoint = manager.getProjectsAPIEndpointAlpha() + } + + switch zoneInfo := disk.ZoneInfo.(type) { + case singleZone: + if zoneInfo.zone == "" || disk.Region == "" { + // Unexpected, but sanity-check + return "", fmt.Errorf("PD does not have zone/region information: %#v", disk) + } + + return getProjectsAPIEndpoint + fmt.Sprintf( + diskSourceURITemplateSingleZone, + manager.gce.projectID, + zoneInfo.zone, + disk.Name), nil + case multiZone: + if zoneInfo.replicaZones == nil || zoneInfo.replicaZones.Len() <= 0 { + // Unexpected, but sanity-check + return "", fmt.Errorf("PD is regional but does not have any replicaZones specified: %v", disk) + } + return getProjectsAPIEndpoint + fmt.Sprintf( + diskSourceURITemplateRegional, + manager.gce.projectID, + disk.Region, + disk.Name), nil + case nil: + // Unexpected, but sanity-check + return "", fmt.Errorf("PD did not have ZoneInfo: %v", disk) + default: + // Unexpected, but sanity-check + return "", fmt.Errorf("disk.ZoneInfo has unexpected type %T", zoneInfo) + } +} + +func (manager *gceServiceManager) getDiskTypeURI( + diskRegion string, diskZoneInfo zoneType, diskType string) (string, error) { + getProjectsAPIEndpoint := manager.getProjectsAPIEndpoint() + if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) { + getProjectsAPIEndpoint = manager.getProjectsAPIEndpointAlpha() + } + + switch zoneInfo := diskZoneInfo.(type) { + case singleZone: + if zoneInfo.zone == "" { + return "", fmt.Errorf("zone is empty: %v", zoneInfo) + } + + return getProjectsAPIEndpoint + fmt.Sprintf( + diskTypeURITemplateSingleZone, + manager.gce.projectID, + zoneInfo.zone, + diskType), nil + case multiZone: + if zoneInfo.replicaZones == nil || zoneInfo.replicaZones.Len() <= 0 { + return "", fmt.Errorf("zoneInfo is regional but does not have any replicaZones specified: %v", zoneInfo) + } + return getProjectsAPIEndpoint + fmt.Sprintf( + diskTypeURITemplateRegional, + manager.gce.projectID, + diskRegion, + diskType), nil + case nil: + return "", fmt.Errorf("zoneInfo nil") + default: + return "", fmt.Errorf("zoneInfo has unexpected type %T", zoneInfo) + } +} + +func (manager *gceServiceManager) getReplicaZoneURI(zone string) string { + getProjectsAPIEndpoint := manager.getProjectsAPIEndpoint() + if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) { + getProjectsAPIEndpoint = manager.getProjectsAPIEndpointAlpha() + } + + return getProjectsAPIEndpoint + fmt.Sprintf( + replicaZoneURITemplateSingleZone, + manager.gce.projectID, + zone) +} + +func (manager *gceServiceManager) getRegionFromZone(zoneInfo zoneType) (string, error) { + var zone string + switch zoneInfo := zoneInfo.(type) { + case singleZone: + if zoneInfo.zone == "" { + // Unexpected, but sanity-check + return "", fmt.Errorf("PD is single zone, but zone is not specified: %#v", zoneInfo) + } + + zone = zoneInfo.zone + case multiZone: + if zoneInfo.replicaZones == nil || zoneInfo.replicaZones.Len() <= 0 { + // Unexpected, but sanity-check + return "", fmt.Errorf("PD is regional but does not have any replicaZones specified: %v", zoneInfo) + } + + zone = zoneInfo.replicaZones.UnsortedList()[0] + case nil: + // Unexpected, but sanity-check + return "", fmt.Errorf("zoneInfo is nil") + default: + // Unexpected, but sanity-check + return "", fmt.Errorf("zoneInfo has unexpected type %T", zoneInfo) + } + + region, err := GetGCERegion(zone) + if err != nil { + glog.Warningf("failed to parse GCE region from zone %q: %v", zone, err) + region = manager.gce.region + } + + return region, nil +} + +func (manager *gceServiceManager) ResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64, zone string) (gceObject, error) { + if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) { + resizeServiceRequest := &computealpha.DisksResizeRequest{ + SizeGb: sizeGb, + } + return manager.gce.serviceAlpha.Disks.Resize(manager.gce.projectID, zone, disk.Name, resizeServiceRequest).Do() + + } + resizeServiceRequest := &compute.DisksResizeRequest{ + SizeGb: sizeGb, + } + return manager.gce.service.Disks.Resize(manager.gce.projectID, zone, disk.Name, resizeServiceRequest).Do() +} + +func (manager *gceServiceManager) RegionalResizeDiskOnCloudProvider(disk *GCEDisk, sizeGb int64) (gceObject, error) { + if manager.gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) { + resizeServiceRequest := &computealpha.RegionDisksResizeRequest{ + SizeGb: sizeGb, + } + return manager.gce.serviceAlpha.RegionDisks.Resize(manager.gce.projectID, disk.Region, disk.Name, resizeServiceRequest).Do() + } + return nil, fmt.Errorf("RegionalResizeDiskOnCloudProvider is a regional PD feature and is only available via the GCE Alpha API. Enable \"GCEDiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.") +} + // Disks is interface for manipulation with GCE PDs. type Disks interface { // AttachDisk attaches given disk to the node with the specified NodeName. @@ -81,6 +529,9 @@ type Disks interface { // DeleteDisk deletes PD. DeleteDisk(diskToDelete string) error + // ResizeDisk resizes PD and returns new disk size + ResizeDisk(diskToResize string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) + // GetAutoLabelsForPD returns labels to apply to PersistentVolume // representing this PD, namely failure domain and zone. // zone can be provided to specify the zone for the PD, @@ -100,6 +551,7 @@ type GCEDisk struct { Name string Kind string Type string + SizeGb int64 } type zoneType interface { @@ -152,7 +604,7 @@ func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOn // Try fetching as regional PD var disk *GCEDisk var mc *metricContext - if gce.AlphaFeatureGate.Enabled(GCEDiskAlphaFeatureGate) { + if gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) { disk, err = gce.getRegionalDiskByName(diskName) if err != nil { glog.V(5).Infof("Could not find regional PD named %q to Attach. Will look for a zonal PD", diskName) @@ -175,7 +627,7 @@ func (gce *GCECloud) AttachDisk(diskName string, nodeName types.NodeName, readOn readWrite = "READ_ONLY" } - attachOp, err := gce.manager.AttachDisk( + attachOp, err := gce.manager.AttachDiskOnCloudProvider( disk, readWrite, instance.Zone, instance.Name) if err != nil { @@ -202,7 +654,7 @@ func (gce *GCECloud) DetachDisk(devicePath string, nodeName types.NodeName) erro } mc := newDiskMetricContextZonal("detach", gce.region, inst.Zone) - detachOp, err := gce.manager.DetachDisk(inst.Zone, inst.Name, devicePath) + detachOp, err := gce.manager.DetachDiskOnCloudProvider(inst.Zone, inst.Name, devicePath) if err != nil { return mc.Observe(err) } @@ -273,11 +725,14 @@ func (gce *GCECloud) DisksAreAttached(diskNames []string, nodeName types.NodeNam // JSON in Description field. func (gce *GCECloud) CreateDisk( name string, diskType string, zone string, sizeGb int64, tags map[string]string) error { - - // Do not allow creation of PDs in zones that are not managed. Such PDs - // then cannot be deleted by DeleteDisk. - if isManaged := gce.verifyZoneIsManaged(zone); !isManaged { - return fmt.Errorf("kubernetes does not manage zone %q", zone) + // Do not allow creation of PDs in zones that are do not have nodes. Such PDs + // are not currently usable. + curZones, err := gce.GetAllCurrentZones() + if err != nil { + return err + } + if !curZones.Has(zone) { + return fmt.Errorf("kubernetes does not have a node in zone %q", zone) } tagsStr, err := gce.encodeDiskTags(tags) @@ -292,7 +747,7 @@ func (gce *GCECloud) CreateDisk( mc := newDiskMetricContextZonal("create", gce.region, zone) - createOp, err := gce.manager.CreateDisk( + createOp, err := gce.manager.CreateDiskOnCloudProvider( name, sizeGb, tagsStr, diskType, zone) if isGCEError(err, "alreadyExists") { @@ -316,17 +771,16 @@ func (gce *GCECloud) CreateDisk( func (gce *GCECloud) CreateRegionalDisk( name string, diskType string, replicaZones sets.String, sizeGb int64, tags map[string]string) error { - // Do not allow creation of PDs in zones that are not managed. Such PDs - // then cannot be deleted by DeleteDisk. - unmanagedZones := []string{} - for _, zone := range replicaZones.UnsortedList() { - if isManaged := gce.verifyZoneIsManaged(zone); !isManaged { - unmanagedZones = append(unmanagedZones, zone) - } + // Do not allow creation of PDs in zones that are do not have nodes. Such PDs + // are not currently usable. This functionality should be reverted to checking + // against managed zones if we want users to be able to create RegionalDisks + // in zones where there are no nodes + curZones, err := gce.GetAllCurrentZones() + if err != nil { + return err } - - if len(unmanagedZones) > 0 { - return fmt.Errorf("kubernetes does not manage specified zones: %q. Managed Zones: %q", unmanagedZones, gce.managedZones) + if !curZones.IsSuperset(replicaZones) { + return fmt.Errorf("kubernetes does not have nodes in specified zones: %q. Zones that contain nodes: %q", replicaZones.Difference(curZones), curZones) } tagsStr, err := gce.encodeDiskTags(tags) @@ -341,7 +795,7 @@ func (gce *GCECloud) CreateRegionalDisk( mc := newDiskMetricContextRegional("create", gce.region) - createOp, err := gce.manager.CreateRegionalDisk( + createOp, err := gce.manager.CreateRegionalDiskOnCloudProvider( name, sizeGb, tagsStr, diskType, replicaZones) if isGCEError(err, "alreadyExists") { @@ -359,16 +813,6 @@ func (gce *GCECloud) CreateRegionalDisk( return err } -func (gce *GCECloud) verifyZoneIsManaged(zone string) bool { - for _, managedZone := range gce.managedZones { - if zone == managedZone { - return true - } - } - - return false -} - func getDiskType(diskType string) (string, error) { switch diskType { case DiskTypeSSD, DiskTypeStandard: @@ -392,6 +836,57 @@ func (gce *GCECloud) DeleteDisk(diskToDelete string) error { return err } +// ResizeDisk expands given disk and returns new disk size +func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) { + disk, err := gce.GetDiskByNameUnknownZone(diskToResize) + if err != nil { + return oldSize, err + } + + requestBytes := newSize.Value() + // GCE resizes in chunks of GBs (not GiB) + requestGB := volume.RoundUpSize(requestBytes, 1000*1000*1000) + newSizeQuant := resource.MustParse(fmt.Sprintf("%dG", requestGB)) + + // If disk is already of size equal or greater than requested size, we simply return + if disk.SizeGb >= requestGB { + return newSizeQuant, nil + } + + var mc *metricContext + + switch zoneInfo := disk.ZoneInfo.(type) { + case singleZone: + mc = newDiskMetricContextZonal("resize", disk.Region, zoneInfo.zone) + resizeOp, err := gce.manager.ResizeDiskOnCloudProvider(disk, requestGB, zoneInfo.zone) + + if err != nil { + return oldSize, mc.Observe(err) + } + waitErr := gce.manager.WaitForZoneOp(resizeOp, zoneInfo.zone, mc) + if waitErr != nil { + return oldSize, waitErr + } + return newSizeQuant, nil + case multiZone: + mc = newDiskMetricContextRegional("resize", disk.Region) + resizeOp, err := gce.manager.RegionalResizeDiskOnCloudProvider(disk, requestGB) + + if err != nil { + return oldSize, mc.Observe(err) + } + waitErr := gce.manager.WaitForRegionalOp(resizeOp, mc) + if waitErr != nil { + return oldSize, waitErr + } + return newSizeQuant, nil + case nil: + return oldSize, fmt.Errorf("PD has nil ZoneInfo: %v", disk) + default: + return oldSize, fmt.Errorf("disk.ZoneInfo has unexpected type %T", zoneInfo) + } +} + // Builds the labels that should be automatically added to a PersistentVolume backed by a GCE PD // Specifically, this builds FailureDomain (zone) and Region labels. // The PersistentVolumeLabel admission controller calls this and adds the labels when a PV is created. @@ -469,7 +964,7 @@ func (gce *GCECloud) GetAutoLabelsForPD(name string, zone string) (map[string]st // If not found, returns (nil, nil) func (gce *GCECloud) findDiskByName(diskName string, zone string) (*GCEDisk, error) { mc := newDiskMetricContextZonal("get", gce.region, zone) - disk, err := gce.manager.GetDisk(zone, diskName) + disk, err := gce.manager.GetDiskFromCloudProvider(zone, diskName) if err == nil { return disk, mc.Observe(nil) } @@ -492,7 +987,7 @@ func (gce *GCECloud) getDiskByName(diskName string, zone string) (*GCEDisk, erro // If not found, returns (nil, nil) func (gce *GCECloud) findRegionalDiskByName(diskName string) (*GCEDisk, error) { mc := newDiskMetricContextRegional("get", gce.region) - disk, err := gce.manager.GetRegionalDisk(diskName) + disk, err := gce.manager.GetRegionalDiskFromCloudProvider(diskName) if err == nil { return disk, mc.Observe(nil) } @@ -515,7 +1010,7 @@ func (gce *GCECloud) getRegionalDiskByName(diskName string) (*GCEDisk, error) { // Prefer getDiskByName, if the zone can be established // Return cloudprovider.DiskNotFound if the given disk cannot be found in any zone func (gce *GCECloud) GetDiskByNameUnknownZone(diskName string) (*GCEDisk, error) { - if gce.AlphaFeatureGate.Enabled(GCEDiskAlphaFeatureGate) { + if gce.AlphaFeatureGate.Enabled(AlphaFeatureGCEDisk) { regionalDisk, err := gce.getRegionalDiskByName(diskName) if err == nil { return regionalDisk, err @@ -593,14 +1088,14 @@ func (gce *GCECloud) doDeleteDisk(diskToDelete string) error { switch zoneInfo := disk.ZoneInfo.(type) { case singleZone: mc = newDiskMetricContextZonal("delete", disk.Region, zoneInfo.zone) - deleteOp, err := gce.manager.DeleteDisk(zoneInfo.zone, disk.Name) + deleteOp, err := gce.manager.DeleteDiskOnCloudProvider(zoneInfo.zone, disk.Name) if err != nil { return mc.Observe(err) } return gce.manager.WaitForZoneOp(deleteOp, zoneInfo.zone, mc) case multiZone: mc = newDiskMetricContextRegional("delete", disk.Region) - deleteOp, err := gce.manager.DeleteRegionalDisk(disk.Name) + deleteOp, err := gce.manager.DeleteRegionalDiskOnCloudProvider(disk.Name) if err != nil { return mc.Observe(err) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go index 85f077f0007c..7c71f5ae3919 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go @@ -22,6 +22,7 @@ import ( utilversion "k8s.io/kubernetes/pkg/util/version" "github.com/golang/glog" + computealpha "google.golang.org/api/compute/v0.alpha" compute "google.golang.org/api/compute/v1" ) @@ -43,7 +44,11 @@ func init() { } func newHealthcheckMetricContext(request string) *metricContext { - return newGenericMetricContext("healthcheck", request, unusedMetricLabel, unusedMetricLabel, computeV1Version) + return newHealthcheckMetricContextWithVersion(request, computeV1Version) +} + +func newHealthcheckMetricContextWithVersion(request, version string) *metricContext { + return newGenericMetricContext("healthcheck", request, unusedMetricLabel, unusedMetricLabel, version) } // GetHttpHealthCheck returns the given HttpHealthCheck by name. @@ -155,6 +160,13 @@ func (gce *GCECloud) GetHealthCheck(name string) (*compute.HealthCheck, error) { return v, mc.Observe(err) } +// GetAlphaHealthCheck returns the given alpha HealthCheck by name. +func (gce *GCECloud) GetAlphaHealthCheck(name string) (*computealpha.HealthCheck, error) { + mc := newHealthcheckMetricContextWithVersion("get", computeAlphaVersion) + v, err := gce.serviceAlpha.HealthChecks.Get(gce.projectID, name).Do() + return v, mc.Observe(err) +} + // UpdateHealthCheck applies the given HealthCheck as an update. func (gce *GCECloud) UpdateHealthCheck(hc *compute.HealthCheck) error { mc := newHealthcheckMetricContext("update") @@ -166,6 +178,17 @@ func (gce *GCECloud) UpdateHealthCheck(hc *compute.HealthCheck) error { return gce.waitForGlobalOp(op, mc) } +// UpdateAlphaHealthCheck applies the given alpha HealthCheck as an update. +func (gce *GCECloud) UpdateAlphaHealthCheck(hc *computealpha.HealthCheck) error { + mc := newHealthcheckMetricContextWithVersion("update", computeAlphaVersion) + op, err := gce.serviceAlpha.HealthChecks.Update(gce.projectID, hc.Name, hc).Do() + if err != nil { + return mc.Observe(err) + } + + return gce.waitForGlobalOp(op, mc) +} + // DeleteHealthCheck deletes the given HealthCheck by name. func (gce *GCECloud) DeleteHealthCheck(name string) error { mc := newHealthcheckMetricContext("delete") @@ -188,6 +211,17 @@ func (gce *GCECloud) CreateHealthCheck(hc *compute.HealthCheck) error { return gce.waitForGlobalOp(op, mc) } +// CreateAlphaHealthCheck creates the given alpha HealthCheck. +func (gce *GCECloud) CreateAlphaHealthCheck(hc *computealpha.HealthCheck) error { + mc := newHealthcheckMetricContextWithVersion("create", computeAlphaVersion) + op, err := gce.serviceAlpha.HealthChecks.Insert(gce.projectID, hc).Do() + if err != nil { + return mc.Observe(err) + } + + return gce.waitForGlobalOp(op, mc) +} + // ListHealthChecks lists all HealthCheck in the project. func (gce *GCECloud) ListHealthChecks() (*compute.HealthCheckList, error) { mc := newHealthcheckMetricContext("list") diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instances.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instances.go index c6e31dee61e8..2a17d4429994 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instances.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_instances.go @@ -17,7 +17,6 @@ limitations under the License. package gce import ( - "errors" "fmt" "net" "net/http" @@ -117,19 +116,35 @@ func (gce *GCECloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddr return nodeAddresses, nil } +// instanceByProviderID returns the cloudprovider instance of the node +// with the specified unique providerID +func (gce *GCECloud) instanceByProviderID(providerID string) (*gceInstance, error) { + project, zone, name, err := splitProviderID(providerID) + if err != nil { + return nil, err + } + + instance, err := gce.getInstanceFromProjectInZoneByName(project, zone, name) + if err != nil { + if isHTTPErrorCode(err, http.StatusNotFound) { + return nil, cloudprovider.InstanceNotFound + } + return nil, err + } + + return instance, nil +} + // InstanceTypeByProviderID returns the cloudprovider instance type of the node // with the specified unique providerID This method will not be called from the // node that is requesting this ID. i.e. metadata service and other local // methods cannot be used here func (gce *GCECloud) InstanceTypeByProviderID(providerID string) (string, error) { - project, zone, name, err := splitProviderID(providerID) - if err != nil { - return "", err - } - instance, err := gce.getInstanceFromProjectInZoneByName(project, zone, name) + instance, err := gce.instanceByProviderID(providerID) if err != nil { return "", err } + return instance.Type, nil } @@ -157,7 +172,15 @@ func (gce *GCECloud) ExternalID(nodeName types.NodeName) (string, error) { // InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running. // If false is returned with no error, the instance will be immediately deleted by the cloud controller manager. func (gce *GCECloud) InstanceExistsByProviderID(providerID string) (bool, error) { - return false, errors.New("unimplemented") + _, err := gce.instanceByProviderID(providerID) + if err != nil { + if err == cloudprovider.InstanceNotFound { + return false, nil + } + return false, err + } + + return true, nil } // InstanceID returns the cloud provider ID of the node with the specified NodeName. @@ -250,35 +273,39 @@ func (gce *GCECloud) AddSSHKeyToAllInstances(user string, keyData []byte) error }) } -// GetAllZones returns all the zones in which nodes are running -func (gce *GCECloud) GetAllZones() (sets.String, error) { - // Fast-path for non-multizone - if len(gce.managedZones) == 1 { - return sets.NewString(gce.managedZones...), nil +// GetAllCurrentZones returns all the zones in which k8s nodes are currently running +func (gce *GCECloud) GetAllCurrentZones() (sets.String, error) { + if gce.nodeInformerSynced == nil { + glog.Warningf("GCECloud object does not have informers set, should only happen in E2E binary.") + return gce.GetAllZonesFromCloudProvider() + } + gce.nodeZonesLock.Lock() + defer gce.nodeZonesLock.Unlock() + if !gce.nodeInformerSynced() { + return nil, fmt.Errorf("Node informer is not synced when trying to GetAllCurrentZones") } + zones := sets.NewString() + for zone, nodes := range gce.nodeZones { + if len(nodes) > 0 { + zones.Insert(zone) + } + } + return zones, nil +} - // TODO: Caching, but this is currently only called when we are creating a volume, - // which is a relatively infrequent operation, and this is only # zones API calls +// GetAllZonesFromCloudProvider returns all the zones in which nodes are running +// Only use this in E2E tests to get zones, on real clusters this will +// get all zones with compute instances in them even if not k8s instances!!! +// ex. I have k8s nodes in us-central1-c and us-central1-b. I also have +// a non-k8s compute in us-central1-a. This func will return a,b, and c. +func (gce *GCECloud) GetAllZonesFromCloudProvider() (sets.String, error) { zones := sets.NewString() - // TODO: Parallelize, although O(zones) so not too bad (N <= 3 typically) for _, zone := range gce.managedZones { mc := newInstancesMetricContext("list", zone) // We only retrieve one page in each zone - we only care about existence listCall := gce.service.Instances.List(gce.projectID, zone) - // No filter: We assume that a zone is either used or unused - // We could only consider running nodes (like we do in List above), - // but probably if instances are starting we still want to consider them. - // I think we should wait until we have a reason to make the - // call one way or the other; we generally can't guarantee correct - // volume spreading if the set of zones is changing - // (and volume spreading is currently only a heuristic). - // Long term we want to replace GetAllZones (which primarily supports volume - // spreading) with a scheduler policy that is able to see the global state of - // volumes and the health of zones. - - // Just a minimal set of fields - we only care about existence listCall = listCall.Fields("items(name)") res, err := listCall.Do() if err != nil { @@ -294,6 +321,34 @@ func (gce *GCECloud) GetAllZones() (sets.String, error) { return zones, nil } +// InsertInstance creates a new instance on GCP +func (gce *GCECloud) InsertInstance(project string, zone string, rb *compute.Instance) error { + mc := newInstancesMetricContext("create", zone) + op, err := gce.service.Instances.Insert(project, zone, rb).Do() + if err != nil { + return mc.Observe(err) + } + return gce.waitForZoneOp(op, zone, mc) +} + +// ListInstanceNames returns a string of instance names seperated by spaces. +func (gce *GCECloud) ListInstanceNames(project, zone string) (string, error) { + res, err := gce.service.Instances.List(project, zone).Fields("items(name)").Do() + if err != nil { + return "", err + } + var output string + for _, item := range res.Items { + output += item.Name + " " + } + return output, nil +} + +// DeleteInstance deletes an instance specified by project, zone, and name +func (gce *GCECloud) DeleteInstance(project, zone, name string) (*compute.Operation, error) { + return gce.service.Instances.Delete(project, zone, name).Do() +} + // Implementation of Instances.CurrentNodeName func (gce *GCECloud) CurrentNodeName(hostname string) (types.NodeName, error) { return types.NodeName(hostname), nil @@ -354,7 +409,7 @@ func (gce *GCECloud) AddAliasToInstance(nodeName types.NodeName, alias *net.IPNe mc := newInstancesMetricContext("addalias", v1instance.Zone) op, err := gce.serviceAlpha.Instances.UpdateNetworkInterface( - gce.projectID, instance.Zone, instance.Name, iface.Name, iface).Do() + gce.projectID, lastComponent(instance.Zone), instance.Name, iface.Name, iface).Do() if err != nil { return mc.Observe(err) } @@ -447,6 +502,7 @@ func (gce *GCECloud) getInstanceByName(name string) (*gceInstance, error) { if isHTTPErrorCode(err, http.StatusNotFound) { continue } + glog.Errorf("getInstanceByName: failed to get instance %s in zone %s; err: %v", name, zone, err) return nil, err } return instance, nil @@ -461,7 +517,6 @@ func (gce *GCECloud) getInstanceFromProjectInZoneByName(project, zone, name stri res, err := gce.service.Instances.Get(project, zone, name).Do() mc.Observe(err) if err != nil { - glog.Errorf("getInstanceFromProjectInZoneByName: failed to get instance %s; err: %v", name, err) return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer.go index 1811c7fec293..6b8995b2f501 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer.go @@ -57,7 +57,7 @@ func init() { panic("Incorrect default GCE L7 source ranges") } - flag.Var(&lbSrcRngsFlag, "cloud-provider-gce-lb-src-cidrs", "CIDRS opened in GCE firewall for LB traffic proxy & health checks") + flag.Var(&lbSrcRngsFlag, "cloud-provider-gce-lb-src-cidrs", "CIDRs opened in GCE firewall for LB traffic proxy & health checks") } // String is the method to format the flag's value, part of the flag.Value interface. @@ -72,7 +72,7 @@ func (c *cidrs) Set(value string) error { c.isSet = true c.ipn = make(netsets.IPNet) } else { - return fmt.Errorf("GCE LB CIDRS have already been set") + return fmt.Errorf("GCE LB CIDRs have already been set") } for _, cidr := range strings.Split(value, ",") { diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go index 0d07647c7a00..aabef25feb79 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go @@ -66,18 +66,17 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName, clusterID string, a affinityType := apiService.Spec.SessionAffinity serviceName := types.NamespacedName{Namespace: apiService.Namespace, Name: apiService.Name} - glog.V(2).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", - loadBalancerName, gce.region, requestedIP, portStr, hostNames, serviceName, apiService.Annotations) - lbRefStr := fmt.Sprintf("%v(%v)", loadBalancerName, serviceName) + glog.V(2).Infof("ensureExternalLoadBalancer(%s, %v, %v, %v, %v, %v)", lbRefStr, gce.region, requestedIP, portStr, hostNames, apiService.Annotations) + // Check the current and the desired network tiers. If they do not match, // tear down the existing resources with the wrong tier. netTier, err := gce.getServiceNetworkTier(apiService) if err != nil { - glog.Errorf("EnsureLoadBalancer(%s): failed to get the desired network tier: %v", lbRefStr, err) + glog.Errorf("ensureExternalLoadBalancer(%s): Failed to get the desired network tier: %v.", lbRefStr, err) return nil, err } - glog.V(4).Infof("EnsureLoadBalancer(%s): desired network tier %q ", lbRefStr, netTier) + glog.V(4).Infof("ensureExternalLoadBalancer(%s): Desired network tier %q.", lbRefStr, netTier) if gce.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) { gce.deleteWrongNetworkTieredResources(loadBalancerName, lbRefStr, netTier) } @@ -88,8 +87,7 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName, clusterID string, a return nil, err } if !fwdRuleExists { - glog.V(2).Infof("Forwarding rule %v for Service %v/%v doesn't exist", - loadBalancerName, apiService.Namespace, apiService.Name) + glog.V(2).Infof("ensureExternalLoadBalancer(%s): Forwarding rule %v doesn't exist.", lbRefStr, loadBalancerName) } // Make sure we know which IP address will be used and have properly reserved @@ -124,14 +122,14 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName, clusterID string, a } if isSafeToReleaseIP { if err := gce.DeleteRegionAddress(loadBalancerName, gce.region); err != nil && !isNotFound(err) { - glog.Errorf("Failed to release static IP %s for load balancer (%v(%v), %v): %v", ipAddressToUse, loadBalancerName, serviceName, gce.region, err) + glog.Errorf("ensureExternalLoadBalancer(%s): Failed to release static IP %s in region %v: %v.", lbRefStr, ipAddressToUse, gce.region, err) } else if isNotFound(err) { - glog.V(2).Infof("EnsureLoadBalancer(%v(%v)): address %s is not reserved.", loadBalancerName, serviceName, ipAddressToUse) + glog.V(2).Infof("ensureExternalLoadBalancer(%s): IP address %s is not reserved.", lbRefStr, ipAddressToUse) } else { - glog.V(2).Infof("EnsureLoadBalancer(%v(%v)): released static IP %s", loadBalancerName, serviceName, ipAddressToUse) + glog.Infof("ensureExternalLoadBalancer(%s): Released static IP %s.", lbRefStr, ipAddressToUse) } } else { - glog.Warningf("orphaning static IP %s during update of load balancer (%v(%v), %v): %v", ipAddressToUse, loadBalancerName, serviceName, gce.region, err) + glog.Warningf("ensureExternalLoadBalancer(%s): Orphaning static IP %s in region %v: %v.", lbRefStr, ipAddressToUse, gce.region, err) } }() @@ -150,9 +148,9 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName, clusterID string, a // emphemeral IP used by the fwd rule, or create a new static IP. ipAddr, existed, err := ensureStaticIP(gce, loadBalancerName, serviceName.String(), gce.region, fwdRuleIP, netTier) if err != nil { - return nil, fmt.Errorf("failed to ensure a static IP for the LB: %v", err) + return nil, fmt.Errorf("failed to ensure a static IP for load balancer (%s): %v", lbRefStr, err) } - glog.V(4).Infof("EnsureLoadBalancer(%s): ensured IP address %s (tier: %s)", lbRefStr, ipAddr, netTier) + glog.Infof("ensureExternalLoadBalancer(%s): Ensured IP address %s (tier: %s).", lbRefStr, ipAddr, netTier) // If the IP was not owned by the user, but it already existed, it // could indicate that the previous update cycle failed. We can use // this IP and try to run through the process again, but we should @@ -180,17 +178,17 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName, clusterID string, a // Unlike forwarding rules and target pools, firewalls can be updated // without needing to be deleted and recreated. if firewallExists { - glog.Infof("EnsureLoadBalancer(%v(%v)): updating firewall", loadBalancerName, serviceName) - if err := gce.updateFirewall(apiService, makeFirewallName(loadBalancerName), gce.region, desc, sourceRanges, ports, hosts); err != nil { + glog.Infof("ensureExternalLoadBalancer(%s): Updating firewall.", lbRefStr) + if err := gce.updateFirewall(apiService, MakeFirewallName(loadBalancerName), gce.region, desc, sourceRanges, ports, hosts); err != nil { return nil, err } - glog.Infof("EnsureLoadBalancer(%v(%v)): updated firewall", loadBalancerName, serviceName) + glog.Infof("ensureExternalLoadBalancer(%s): Updated firewall.", lbRefStr) } else { - glog.Infof("EnsureLoadBalancer(%v(%v)): creating firewall", loadBalancerName, serviceName) - if err := gce.createFirewall(apiService, makeFirewallName(loadBalancerName), gce.region, desc, sourceRanges, ports, hosts); err != nil { + glog.Infof("ensureExternalLoadBalancer(%s): Creating firewall.", lbRefStr) + if err := gce.createFirewall(apiService, MakeFirewallName(loadBalancerName), gce.region, desc, sourceRanges, ports, hosts); err != nil { return nil, err } - glog.Infof("EnsureLoadBalancer(%v(%v)): created firewall", loadBalancerName, serviceName) + glog.Infof("ensureExternalLoadBalancer(%s): Created firewall.", lbRefStr) } } @@ -199,7 +197,7 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName, clusterID string, a return nil, err } if !tpExists { - glog.Infof("Target pool %v for Service %v/%v doesn't exist", loadBalancerName, apiService.Namespace, apiService.Name) + glog.Infof("ensureExternalLoadBalancer(%s): Target pool for service doesn't exist.", lbRefStr) } // Check which health check needs to create and which health check needs to delete. @@ -207,33 +205,33 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName, clusterID string, a var hcToCreate, hcToDelete *compute.HttpHealthCheck hcLocalTrafficExisting, err := gce.GetHttpHealthCheck(loadBalancerName) if err != nil && !isHTTPErrorCode(err, http.StatusNotFound) { - return nil, fmt.Errorf("error checking HTTP health check %s: %v", loadBalancerName, err) + return nil, fmt.Errorf("error checking HTTP health check for load balancer (%s): %v", lbRefStr, err) } if path, healthCheckNodePort := apiservice.GetServiceHealthCheckPathPort(apiService); path != "" { - glog.V(4).Infof("service %v (%v) needs local traffic health checks on: %d%s)", apiService.Name, loadBalancerName, healthCheckNodePort, path) + glog.V(4).Infof("ensureExternalLoadBalancer(%s): Service needs local traffic health checks on: %d%s.", lbRefStr, healthCheckNodePort, path) if hcLocalTrafficExisting == nil { // This logic exists to detect a transition for non-OnlyLocal to OnlyLocal service // turn on the tpNeedsUpdate flag to delete/recreate fwdrule/tpool updating the // target pool to use local traffic health check. - glog.V(2).Infof("Updating from nodes health checks to local traffic health checks for service %v LB %v", apiService.Name, loadBalancerName) + glog.V(2).Infof("ensureExternalLoadBalancer(%s): Updating from nodes health checks to local traffic health checks.", lbRefStr) if supportsNodesHealthCheck { - hcToDelete = makeHttpHealthCheck(makeNodesHealthCheckName(clusterID), GetNodesHealthCheckPath(), GetNodesHealthCheckPort()) + hcToDelete = makeHttpHealthCheck(MakeNodesHealthCheckName(clusterID), GetNodesHealthCheckPath(), GetNodesHealthCheckPort()) } tpNeedsUpdate = true } hcToCreate = makeHttpHealthCheck(loadBalancerName, path, healthCheckNodePort) } else { - glog.V(4).Infof("Service %v needs nodes health checks.", apiService.Name) + glog.V(4).Infof("ensureExternalLoadBalancer(%s): Service needs nodes health checks.", lbRefStr) if hcLocalTrafficExisting != nil { // This logic exists to detect a transition from OnlyLocal to non-OnlyLocal service // and turn on the tpNeedsUpdate flag to delete/recreate fwdrule/tpool updating the // target pool to use nodes health check. - glog.V(2).Infof("Updating from local traffic health checks to nodes health checks for service %v LB %v", apiService.Name, loadBalancerName) + glog.V(2).Infof("ensureExternalLoadBalancer(%s): Updating from local traffic health checks to nodes health checks.", lbRefStr) hcToDelete = hcLocalTrafficExisting tpNeedsUpdate = true } if supportsNodesHealthCheck { - hcToCreate = makeHttpHealthCheck(makeNodesHealthCheckName(clusterID), GetNodesHealthCheckPath(), GetNodesHealthCheckPort()) + hcToCreate = makeHttpHealthCheck(MakeNodesHealthCheckName(clusterID), GetNodesHealthCheckPath(), GetNodesHealthCheckPort()) } } // Now we get to some slightly more interesting logic. @@ -249,9 +247,9 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName, clusterID string, a // IP. That way we can come back to it later. isSafeToReleaseIP = false if err := gce.DeleteRegionForwardingRule(loadBalancerName, gce.region); err != nil && !isNotFound(err) { - return nil, fmt.Errorf("failed to delete existing forwarding rule %s for load balancer update: %v", loadBalancerName, err) + return nil, fmt.Errorf("failed to delete existing forwarding rule for load balancer (%s) update: %v", lbRefStr, err) } - glog.Infof("EnsureLoadBalancer(%v(%v)): deleted forwarding rule", loadBalancerName, serviceName) + glog.Infof("ensureExternalLoadBalancer(%s): Deleted forwarding rule.", lbRefStr) } if tpExists && tpNeedsUpdate { // Pass healthchecks to DeleteExternalTargetPoolAndChecks to cleanup health checks after cleaning up the target pool itself. @@ -260,9 +258,9 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName, clusterID string, a hcNames = append(hcNames, hcToDelete.Name) } if err := gce.DeleteExternalTargetPoolAndChecks(apiService, loadBalancerName, gce.region, clusterID, hcNames...); err != nil { - return nil, fmt.Errorf("failed to delete existing target pool %s for load balancer update: %v", loadBalancerName, err) + return nil, fmt.Errorf("failed to delete existing target pool for load balancer (%s) update: %v", lbRefStr, err) } - glog.Infof("EnsureLoadBalancer(%v(%v)): deleted target pool", loadBalancerName, serviceName) + glog.Infof("ensureExternalLoadBalancer(%s): Deleted target pool.", lbRefStr) } // Once we've deleted the resources (if necessary), build them back up (or for @@ -274,37 +272,37 @@ func (gce *GCECloud) ensureExternalLoadBalancer(clusterName, clusterID string, a } // Pass healthchecks to createTargetPool which needs them as health check links in the target pool if err := gce.createTargetPool(apiService, loadBalancerName, serviceName.String(), ipAddressToUse, gce.region, clusterID, createInstances, affinityType, hcToCreate); err != nil { - return nil, fmt.Errorf("failed to create target pool %s: %v", loadBalancerName, err) + return nil, fmt.Errorf("failed to create target pool for load balancer (%s): %v", lbRefStr, err) } if hcToCreate != nil { - glog.Infof("EnsureLoadBalancer(%v(%v)): created health checks %v for target pool", loadBalancerName, serviceName, hcToCreate.Name) + glog.Infof("ensureExternalLoadBalancer(%s): Created health checks %v.", lbRefStr, hcToCreate.Name) } if len(hosts) <= maxTargetPoolCreateInstances { - glog.Infof("EnsureLoadBalancer(%v(%v)): created target pool", loadBalancerName, serviceName) + glog.Infof("ensureExternalLoadBalancer(%s): Created target pool.", lbRefStr) } else { - glog.Infof("EnsureLoadBalancer(%v(%v)): created initial target pool (now updating with %d hosts)", loadBalancerName, serviceName, len(hosts)-maxTargetPoolCreateInstances) + glog.Infof("ensureExternalLoadBalancer(%s): Created initial target pool (now updating with %d hosts).", lbRefStr, len(hosts)-maxTargetPoolCreateInstances) created := sets.NewString() for _, host := range createInstances { created.Insert(host.makeComparableHostPath()) } if err := gce.updateTargetPool(loadBalancerName, created, hosts); err != nil { - return nil, fmt.Errorf("failed to update target pool %s: %v", loadBalancerName, err) + return nil, fmt.Errorf("failed to update target pool for load balancer (%s): %v", lbRefStr, err) } - glog.Infof("EnsureLoadBalancer(%v(%v)): updated target pool (with %d hosts)", loadBalancerName, serviceName, len(hosts)-maxTargetPoolCreateInstances) + glog.Infof("ensureExternalLoadBalancer(%s): Updated target pool (with %d hosts).", lbRefStr, len(hosts)-maxTargetPoolCreateInstances) } } if tpNeedsUpdate || fwdRuleNeedsUpdate { - glog.Infof("EnsureLoadBalancer(%v(%v)): creating forwarding rule, IP %s (tier: %s)", loadBalancerName, serviceName, ipAddressToUse, netTier) + glog.Infof("ensureExternalLoadBalancer(%s): Creating forwarding rule, IP %s (tier: %s).", lbRefStr, ipAddressToUse, netTier) if err := createForwardingRule(gce, loadBalancerName, serviceName.String(), gce.region, ipAddressToUse, gce.targetPoolURL(loadBalancerName), ports, netTier); err != nil { - return nil, fmt.Errorf("failed to create forwarding rule %s: %v", loadBalancerName, err) + return nil, fmt.Errorf("failed to create forwarding rule for load balancer (%s): %v", lbRefStr, err) } // End critical section. It is safe to release the static IP (which // just demotes it to ephemeral) now that it is attached. In the case // of a user-requested IP, the "is user-owned" flag will be set, // preventing it from actually being released. isSafeToReleaseIP = true - glog.Infof("EnsureLoadBalancer(%v(%v)): created forwarding rule, IP %s", loadBalancerName, serviceName, ipAddressToUse) + glog.Infof("ensureExternalLoadBalancer(%s): Created forwarding rule, IP %s.", lbRefStr, ipAddressToUse) } status := &v1.LoadBalancerStatus{} @@ -336,30 +334,36 @@ func (gce *GCECloud) updateExternalLoadBalancer(clusterName string, service *v1. // ensureExternalLoadBalancerDeleted is the external implementation of LoadBalancer.EnsureLoadBalancerDeleted func (gce *GCECloud) ensureExternalLoadBalancerDeleted(clusterName, clusterID string, service *v1.Service) error { loadBalancerName := cloudprovider.GetLoadBalancerName(service) + serviceName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} + lbRefStr := fmt.Sprintf("%v(%v)", loadBalancerName, serviceName) var hcNames []string if path, _ := apiservice.GetServiceHealthCheckPathPort(service); path != "" { hcToDelete, err := gce.GetHttpHealthCheck(loadBalancerName) if err != nil && !isHTTPErrorCode(err, http.StatusNotFound) { - glog.Infof("Failed to retrieve health check %v:%v", loadBalancerName, err) + glog.Infof("ensureExternalLoadBalancerDeleted(%s): Failed to retrieve health check:%v.", lbRefStr, err) return err } - hcNames = append(hcNames, hcToDelete.Name) + // If we got 'StatusNotFound' LB was already deleted and it's safe to ignore. + if err == nil { + hcNames = append(hcNames, hcToDelete.Name) + } } else { // EnsureLoadBalancerDeleted() could be triggered by changing service from // LoadBalancer type to others. In this case we have no idea whether it was // using local traffic health check or nodes health check. Attempt to delete // both to prevent leaking. hcNames = append(hcNames, loadBalancerName) - hcNames = append(hcNames, makeNodesHealthCheckName(clusterID)) + hcNames = append(hcNames, MakeNodesHealthCheckName(clusterID)) } errs := utilerrors.AggregateGoroutines( func() error { - fwName := makeFirewallName(loadBalancerName) + glog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting firewall rule.", lbRefStr) + fwName := MakeFirewallName(loadBalancerName) err := ignoreNotFound(gce.DeleteFirewall(fwName)) if isForbidden(err) && gce.OnXPN() { - glog.V(4).Infof("ensureExternalLoadBalancerDeleted(%v): do not have permission to delete firewall rule (on XPN). Raising event.", loadBalancerName) + glog.V(4).Infof("ensureExternalLoadBalancerDeleted(%s): Do not have permission to delete firewall rule %v (on XPN). Raising event.", lbRefStr, fwName) gce.raiseFirewallChangeNeededEvent(service, FirewallToGCloudDeleteCmd(fwName, gce.NetworkProjectID())) return nil } @@ -368,13 +372,18 @@ func (gce *GCECloud) ensureExternalLoadBalancerDeleted(clusterName, clusterID st // Even though we don't hold on to static IPs for load balancers, it's // possible that EnsureLoadBalancer left one around in a failed // creation/update attempt, so make sure we clean it up here just in case. - func() error { return ignoreNotFound(gce.DeleteRegionAddress(loadBalancerName, gce.region)) }, func() error { + glog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting IP address.", lbRefStr) + return ignoreNotFound(gce.DeleteRegionAddress(loadBalancerName, gce.region)) + }, + func() error { + glog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting forwarding rule.", lbRefStr) // The forwarding rule must be deleted before either the target pool can, // unfortunately, so we have to do these two serially. if err := ignoreNotFound(gce.DeleteRegionForwardingRule(loadBalancerName, gce.region)); err != nil { return err } + glog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting target pool.", lbRefStr) if err := gce.DeleteExternalTargetPoolAndChecks(service, loadBalancerName, gce.region, clusterID, hcNames...); err != nil { return err } @@ -388,10 +397,13 @@ func (gce *GCECloud) ensureExternalLoadBalancerDeleted(clusterName, clusterID st } func (gce *GCECloud) DeleteExternalTargetPoolAndChecks(service *v1.Service, name, region, clusterID string, hcNames ...string) error { + serviceName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} + lbRefStr := fmt.Sprintf("%v(%v)", name, serviceName) + if err := gce.DeleteTargetPool(name, region); err != nil && isHTTPErrorCode(err, http.StatusNotFound) { - glog.Infof("Target pool %s already deleted. Continuing to delete other resources.", name) + glog.Infof("DeleteExternalTargetPoolAndChecks(%v): Target pool already deleted. Continuing to delete other resources.", lbRefStr) } else if err != nil { - glog.Warningf("Failed to delete target pool %s, got error %s.", name, err.Error()) + glog.Warningf("DeleteExternalTargetPoolAndChecks(%v): Failed to delete target pool, got error %s.", lbRefStr, err.Error()) return err } @@ -406,14 +418,14 @@ func (gce *GCECloud) DeleteExternalTargetPoolAndChecks(service *v1.Service, name gce.sharedResourceLock.Lock() defer gce.sharedResourceLock.Unlock() } - glog.Infof("Deleting health check %v", hcName) + glog.Infof("DeleteExternalTargetPoolAndChecks(%v): Deleting health check %v.", lbRefStr, hcName) if err := gce.DeleteHttpHealthCheck(hcName); err != nil { // Delete nodes health checks will fail if any other target pool is using it. if isInUsedByError(err) { - glog.V(4).Infof("Health check %v is in used: %v.", hcName, err) + glog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Health check %v is in used: %v.", lbRefStr, hcName, err) return nil } else if !isHTTPErrorCode(err, http.StatusNotFound) { - glog.Warningf("Failed to delete health check %v: %v", hcName, err) + glog.Warningf("DeleteExternalTargetPoolAndChecks(%v): Failed to delete health check %v: %v.", lbRefStr, hcName, err) return err } // StatusNotFound could happen when: @@ -423,15 +435,15 @@ func (gce *GCECloud) DeleteExternalTargetPoolAndChecks(service *v1.Service, name // - This is a retry and in previous round we failed to delete the healthcheck firewall // after deleted the healthcheck. // We continue to delete the healthcheck firewall to prevent leaking. - glog.V(4).Infof("Health check %v is already deleted.", hcName) + glog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Health check %v is already deleted.", lbRefStr, hcName) } // If health check is deleted without error, it means no load-balancer is using it. // So we should delete the health check firewall as well. fwName := MakeHealthCheckFirewallName(clusterID, hcName, isNodesHealthCheck) - glog.Infof("Deleting firewall %v.", fwName) + glog.Infof("DeleteExternalTargetPoolAndChecks(%v): Deleting health check firewall %v.", lbRefStr, fwName) if err := ignoreNotFound(gce.DeleteFirewall(fwName)); err != nil { if isForbidden(err) && gce.OnXPN() { - glog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): do not have permission to delete firewall rule (on XPN). Raising event.", hcName) + glog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Do not have permission to delete firewall rule %v (on XPN). Raising event.", lbRefStr, fwName) gce.raiseFirewallChangeNeededEvent(service, FirewallToGCloudDeleteCmd(fwName, gce.NetworkProjectID())) return nil } @@ -761,7 +773,7 @@ func translateAffinityType(affinityType v1.ServiceAffinity) string { } func (gce *GCECloud) firewallNeedsUpdate(name, serviceName, region, ipAddress string, ports []v1.ServicePort, sourceRanges netsets.IPNet) (exists bool, needsUpdate bool, err error) { - fw, err := gce.service.Firewalls.Get(gce.NetworkProjectID(), makeFirewallName(name)).Do() + fw, err := gce.service.Firewalls.Get(gce.NetworkProjectID(), MakeFirewallName(name)).Do() if err != nil { if isHTTPErrorCode(err, http.StatusNotFound) { return false, true, nil diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go index 31d770c849aa..ff14cbacdbe3 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go @@ -80,12 +80,18 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s // Determine IP which will be used for this LB. If no forwarding rule has been established // or specified in the Service spec, then requestedIP = "". requestedIP := determineRequestedIP(svc, existingFwdRule) - addrMgr := newAddressManager(gce, nm.String(), gce.Region(), gce.getInternalSubnetURL(), loadBalancerName, requestedIP, schemeInternal) - ipToUse, err := addrMgr.HoldAddress() - if err != nil { - return nil, err + ipToUse := requestedIP + + var addrMgr *addressManager + // If the network is not a legacy network, use the address manager + if !gce.IsLegacyNetwork() { + addrMgr = newAddressManager(gce, nm.String(), gce.Region(), gce.SubnetworkURL(), loadBalancerName, requestedIP, schemeInternal) + ipToUse, err = addrMgr.HoldAddress() + if err != nil { + return nil, err + } + glog.V(2).Infof("ensureInternalLoadBalancer(%v): reserved IP %q for the forwarding rule", loadBalancerName, ipToUse) } - glog.V(2).Infof("ensureInternalLoadBalancer(%v): reserved IP %q for the forwarding rule", loadBalancerName, ipToUse) // Ensure firewall rules if necessary if err = gce.ensureInternalFirewalls(loadBalancerName, ipToUse, clusterID, nm, svc, strconv.Itoa(int(hcPort)), sharedHealthCheck, nodes); err != nil { @@ -102,7 +108,7 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s LoadBalancingScheme: string(scheme), } - // Specify subnetwork if network type is manual + // Specify subnetwork if known if len(gce.subnetworkURL) > 0 { expectedFwdRule.Subnetwork = gce.subnetworkURL } else { @@ -112,7 +118,7 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s fwdRuleDeleted := false if existingFwdRule != nil && !fwdRuleEqual(existingFwdRule, expectedFwdRule) { glog.V(2).Infof("ensureInternalLoadBalancer(%v): deleting existing forwarding rule with IP address %v", loadBalancerName, existingFwdRule.IPAddress) - if err = gce.DeleteRegionForwardingRule(loadBalancerName, gce.region); err != nil && !isNotFound(err) { + if err = ignoreNotFound(gce.DeleteRegionForwardingRule(loadBalancerName, gce.region)); err != nil { return nil, err } fwdRuleDeleted = true @@ -138,13 +144,21 @@ func (gce *GCECloud) ensureInternalLoadBalancer(clusterName, clusterID string, s gce.clearPreviousInternalResources(svc, loadBalancerName, existingBackendService, backendServiceName, hcName) } - // Now that the controller knows the forwarding rule exists, we can release the address. - if err := addrMgr.ReleaseAddress(); err != nil { - glog.Errorf("ensureInternalLoadBalancer: failed to release address reservation, possibly causing an orphan: %v", err) + if addrMgr != nil { + // Now that the controller knows the forwarding rule exists, we can release the address. + if err := addrMgr.ReleaseAddress(); err != nil { + glog.Errorf("ensureInternalLoadBalancer: failed to release address reservation, possibly causing an orphan: %v", err) + } + } + + // Get the most recent forwarding rule for the address. + updatedFwdRule, err := gce.GetRegionForwardingRule(loadBalancerName, gce.region) + if err != nil { + return nil, err } status := &v1.LoadBalancerStatus{} - status.Ingress = []v1.LoadBalancerIngress{{IP: ipToUse}} + status.Ingress = []v1.LoadBalancerIngress{{IP: updatedFwdRule.IPAddress}} return status, nil } @@ -206,7 +220,7 @@ func (gce *GCECloud) ensureInternalLoadBalancerDeleted(clusterName, clusterID st ensureAddressDeleted(gce, loadBalancerName, gce.region) glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting region internal forwarding rule", loadBalancerName) - if err := gce.DeleteRegionForwardingRule(loadBalancerName, gce.region); err != nil && !isNotFound(err) { + if err := ignoreNotFound(gce.DeleteRegionForwardingRule(loadBalancerName, gce.region)); err != nil { return err } @@ -217,7 +231,7 @@ func (gce *GCECloud) ensureInternalLoadBalancerDeleted(clusterName, clusterID st } glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting firewall for traffic", loadBalancerName) - if err := gce.DeleteFirewall(loadBalancerName); err != nil { + if err := ignoreNotFound(gce.DeleteFirewall(loadBalancerName)); err != nil { if isForbidden(err) && gce.OnXPN() { glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): could not delete traffic firewall on XPN cluster. Raising event.", loadBalancerName) gce.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudDeleteCmd(loadBalancerName, gce.NetworkProjectID())) @@ -272,7 +286,7 @@ func (gce *GCECloud) teardownInternalHealthCheckAndFirewall(svc *v1.Service, hcN glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check deleted", hcName) hcFirewallName := makeHealthCheckFirewallNameFromHC(hcName) - if err := gce.DeleteFirewall(hcFirewallName); err != nil && !isNotFound(err) { + if err := ignoreNotFound(gce.DeleteFirewall(hcFirewallName)); err != nil { if isForbidden(err) && gce.OnXPN() { glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): could not delete health check traffic firewall on XPN cluster. Raising Event.", hcName) gce.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudDeleteCmd(hcFirewallName, gce.NetworkProjectID())) @@ -662,20 +676,6 @@ func (gce *GCECloud) getBackendServiceLink(name string) string { return gce.service.BasePath + strings.Join([]string{gce.projectID, "regions", gce.region, "backendServices", name}, "/") } -// getInternalSubnetURL first attempts to return the configured SubnetURL. -// If subnetwork-name was not specified, then a best-effort generation is made. -// Note subnet names might not be the network name for some auto networks. -func (gce *GCECloud) getInternalSubnetURL() string { - if gce.SubnetworkURL() != "" { - return gce.SubnetworkURL() - } - - networkName := getNameFromLink(gce.NetworkURL()) - v := gceSubnetworkURL("", gce.NetworkProjectID(), gce.Region(), networkName) - glog.Warningf("Generating subnetwork URL based off network since subnet name/URL was not configured: %q", v) - return v -} - func getNameFromLink(link string) string { if link == "" { return "" diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_naming.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_naming.go index 73169b21dc53..7fee340a629c 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_naming.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_naming.go @@ -89,9 +89,9 @@ func makeServiceDescription(serviceName string) string { return fmt.Sprintf(`{"kubernetes.io/service-name":"%s"}`, serviceName) } -// makeNodesHealthCheckName returns name of the health check resource used by +// MakeNodesHealthCheckName returns name of the health check resource used by // the GCE load balancers (l4) for performing health checks on nodes. -func makeNodesHealthCheckName(clusterID string) string { +func MakeNodesHealthCheckName(clusterID string) string { return fmt.Sprintf("k8s-%v-node", clusterID) } @@ -103,12 +103,14 @@ func makeHealthCheckDescription(serviceName string) string { // balancers (l4) for performing health checks. func MakeHealthCheckFirewallName(clusterID, hcName string, isNodesHealthCheck bool) string { if isNodesHealthCheck { - return makeNodesHealthCheckName(clusterID) + "-http-hc" + return MakeNodesHealthCheckName(clusterID) + "-http-hc" } return "k8s-" + hcName + "-http-hc" } -func makeFirewallName(name string) string { +// MakeFirewallName returns the firewall name used by the GCE load +// balancers (l4) for serving traffic. +func MakeFirewallName(name string) string { return fmt.Sprintf("k8s-fw-%s", name) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_networkendpointgroup.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_networkendpointgroup.go new file mode 100644 index 000000000000..f8d3e80f4f3e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_networkendpointgroup.go @@ -0,0 +1,148 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gce + +import ( + "context" + computealpha "google.golang.org/api/compute/v0.alpha" + "strings" +) + +const ( + NEGLoadBalancerType = "LOAD_BALANCING" + NEGIPPortNetworkEndpointType = "GCE_VM_IP_PORT" +) + +func newNetworkEndpointGroupMetricContext(request string, zone string) *metricContext { + return newGenericMetricContext("networkendpointgroup_", request, unusedMetricLabel, zone, computeAlphaVersion) +} + +func (gce *GCECloud) GetNetworkEndpointGroup(name string, zone string) (*computealpha.NetworkEndpointGroup, error) { + if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil { + return nil, err + } + mc := newNetworkEndpointGroupMetricContext("get", zone) + v, err := gce.serviceAlpha.NetworkEndpointGroups.Get(gce.ProjectID(), zone, name).Do() + return v, mc.Observe(err) +} + +func (gce *GCECloud) ListNetworkEndpointGroup(zone string) ([]*computealpha.NetworkEndpointGroup, error) { + if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil { + return nil, err + } + mc := newNetworkEndpointGroupMetricContext("list", zone) + networkEndpointGroups := []*computealpha.NetworkEndpointGroup{} + err := gce.serviceAlpha.NetworkEndpointGroups.List(gce.ProjectID(), zone).Pages(context.Background(), func(res *computealpha.NetworkEndpointGroupList) error { + networkEndpointGroups = append(networkEndpointGroups, res.Items...) + return nil + }) + return networkEndpointGroups, mc.Observe(err) +} + +func (gce *GCECloud) AggregatedListNetworkEndpointGroup() (map[string][]*computealpha.NetworkEndpointGroup, error) { + if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil { + return nil, err + } + mc := newNetworkEndpointGroupMetricContext("aggregated_list", "") + zoneNetworkEndpointGroupMap := map[string][]*computealpha.NetworkEndpointGroup{} + err := gce.serviceAlpha.NetworkEndpointGroups.AggregatedList(gce.ProjectID()).Pages(context.Background(), func(res *computealpha.NetworkEndpointGroupAggregatedList) error { + for key, negs := range res.Items { + if len(negs.NetworkEndpointGroups) == 0 { + continue + } + // key has the format of "zones/${zone_name}" + zone := strings.Split(key, "/")[1] + if _, ok := zoneNetworkEndpointGroupMap[zone]; !ok { + zoneNetworkEndpointGroupMap[zone] = []*computealpha.NetworkEndpointGroup{} + } + zoneNetworkEndpointGroupMap[zone] = append(zoneNetworkEndpointGroupMap[zone], negs.NetworkEndpointGroups...) + } + return nil + }) + return zoneNetworkEndpointGroupMap, mc.Observe(err) +} + +func (gce *GCECloud) CreateNetworkEndpointGroup(neg *computealpha.NetworkEndpointGroup, zone string) error { + if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil { + return err + } + mc := newNetworkEndpointGroupMetricContext("create", zone) + op, err := gce.serviceAlpha.NetworkEndpointGroups.Insert(gce.ProjectID(), zone, neg).Do() + if err != nil { + return mc.Observe(err) + } + return gce.waitForZoneOp(op, zone, mc) +} + +func (gce *GCECloud) DeleteNetworkEndpointGroup(name string, zone string) error { + if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil { + return err + } + mc := newNetworkEndpointGroupMetricContext("delete", zone) + op, err := gce.serviceAlpha.NetworkEndpointGroups.Delete(gce.ProjectID(), zone, name).Do() + if err != nil { + return mc.Observe(err) + } + return gce.waitForZoneOp(op, zone, mc) +} + +func (gce *GCECloud) AttachNetworkEndpoints(name, zone string, endpoints []*computealpha.NetworkEndpoint) error { + if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil { + return err + } + mc := newNetworkEndpointGroupMetricContext("attach", zone) + op, err := gce.serviceAlpha.NetworkEndpointGroups.AttachNetworkEndpoints(gce.ProjectID(), zone, name, &computealpha.NetworkEndpointGroupsAttachEndpointsRequest{ + NetworkEndpoints: endpoints, + }).Do() + if err != nil { + return mc.Observe(err) + } + return gce.waitForZoneOp(op, zone, mc) +} + +func (gce *GCECloud) DetachNetworkEndpoints(name, zone string, endpoints []*computealpha.NetworkEndpoint) error { + if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil { + return err + } + mc := newNetworkEndpointGroupMetricContext("detach", zone) + op, err := gce.serviceAlpha.NetworkEndpointGroups.DetachNetworkEndpoints(gce.ProjectID(), zone, name, &computealpha.NetworkEndpointGroupsDetachEndpointsRequest{ + NetworkEndpoints: endpoints, + }).Do() + if err != nil { + return mc.Observe(err) + } + return gce.waitForZoneOp(op, zone, mc) +} + +func (gce *GCECloud) ListNetworkEndpoints(name, zone string, showHealthStatus bool) ([]*computealpha.NetworkEndpointWithHealthStatus, error) { + if err := gce.alphaFeatureEnabled(AlphaFeatureNetworkEndpointGroup); err != nil { + return nil, err + } + healthStatus := "SKIP" + if showHealthStatus { + healthStatus = "SHOW" + } + mc := newNetworkEndpointGroupMetricContext("list_networkendpoints", zone) + networkEndpoints := []*computealpha.NetworkEndpointWithHealthStatus{} + err := gce.serviceAlpha.NetworkEndpointGroups.ListNetworkEndpoints(gce.ProjectID(), zone, name, &computealpha.NetworkEndpointGroupsListEndpointsRequest{ + HealthStatus: healthStatus, + }).Pages(context.Background(), func(res *computealpha.NetworkEndpointGroupsListNetworkEndpoints) error { + networkEndpoints = append(networkEndpoints, res.Items...) + return nil + }) + return networkEndpoints, mc.Observe(err) +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/kms.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/kms.go deleted file mode 100644 index fbe62f523b55..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/kms.go +++ /dev/null @@ -1,167 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gce - -import ( - "encoding/base64" - "fmt" - "io" - - "github.com/golang/glog" - cloudkms "google.golang.org/api/cloudkms/v1" - "google.golang.org/api/googleapi" - gcfg "gopkg.in/gcfg.v1" - "k8s.io/apiserver/pkg/storage/value/encrypt/envelope" -) - -const ( - // KMSServiceName is the name of the cloudkms provider registered by this cloud. - KMSServiceName = "gcp-cloudkms" - - defaultGKMSKeyRing = "google-container-engine" - defaultGKMSKeyRingLocation = "global" -) - -// gkmsConfig contains the GCE specific KMS configuration for setting up a KMS connection. -type gkmsConfig struct { - Global struct { - // location is the KMS location of the KeyRing to be used for encryption. - // It can be found by checking the available KeyRings in the IAM UI. - // This is not the same as the GCP location of the project. - // +optional - Location string `gcfg:"kms-location"` - // keyRing is the keyRing of the hosted key to be used. The default value is "google-kubernetes". - // +optional - KeyRing string `gcfg:"kms-keyring"` - // cryptoKey is the name of the key to be used for encryption of Data-Encryption-Keys. - CryptoKey string `gcfg:"kms-cryptokey"` - } -} - -// readGCPCloudKMSConfig parses and returns the configuration parameters for Google Cloud KMS. -func readGCPCloudKMSConfig(reader io.Reader) (*gkmsConfig, error) { - cfg := &gkmsConfig{} - if err := gcfg.FatalOnly(gcfg.ReadInto(cfg, reader)); err != nil { - glog.Errorf("Couldn't read Google Cloud KMS config: %v", err) - return nil, err - } - return cfg, nil -} - -// gkmsService provides Encrypt and Decrypt methods which allow cryptographic operations -// using Google Cloud KMS service. -type gkmsService struct { - parentName string - cloudkmsService *cloudkms.Service -} - -// getGCPCloudKMSService provides a Google Cloud KMS based implementation of envelope.Service. -func (gce *GCECloud) getGCPCloudKMSService(config io.Reader) (envelope.Service, error) { - kmsConfig, err := readGCPCloudKMSConfig(config) - if err != nil { - return nil, err - } - - // Hosting on GCE/GKE with Google KMS encryption provider - cloudkmsService := gce.GetKMSService() - - // Set defaults for location and keyRing. - location := kmsConfig.Global.Location - if len(location) == 0 { - location = defaultGKMSKeyRingLocation - } - keyRing := kmsConfig.Global.KeyRing - if len(keyRing) == 0 { - keyRing = defaultGKMSKeyRing - } - - cryptoKey := kmsConfig.Global.CryptoKey - if len(cryptoKey) == 0 { - return nil, fmt.Errorf("missing cryptoKey for cloudprovided KMS: " + KMSServiceName) - } - - parentName := fmt.Sprintf("projects/%s/locations/%s", gce.projectID, location) - - // Create the keyRing if it does not exist yet - _, err = cloudkmsService.Projects.Locations.KeyRings.Create(parentName, - &cloudkms.KeyRing{}).KeyRingId(keyRing).Do() - if err != nil && unrecoverableCreationError(err) { - return nil, err - } - parentName = parentName + "/keyRings/" + keyRing - - // Create the cryptoKey if it does not exist yet - _, err = cloudkmsService.Projects.Locations.KeyRings.CryptoKeys.Create(parentName, - &cloudkms.CryptoKey{ - Purpose: "ENCRYPT_DECRYPT", - }).CryptoKeyId(cryptoKey).Do() - if err != nil && unrecoverableCreationError(err) { - return nil, err - } - parentName = parentName + "/cryptoKeys/" + cryptoKey - - service := &gkmsService{ - parentName: parentName, - cloudkmsService: cloudkmsService, - } - - // Sanity check before startup. For non-GCP clusters, the user's account may not have permissions to create - // the key. We need to verify the existence of the key before apiserver startup. - _, err = service.Encrypt([]byte("test")) - if err != nil { - return nil, fmt.Errorf("failed to encrypt data using Google cloudkms, using key %s. Ensure that the keyRing and cryptoKey exist. Got error: %v", parentName, err) - } - - return service, nil -} - -// Decrypt decrypts a base64 representation of encrypted bytes. -func (t *gkmsService) Decrypt(data string) ([]byte, error) { - resp, err := t.cloudkmsService.Projects.Locations.KeyRings.CryptoKeys. - Decrypt(t.parentName, &cloudkms.DecryptRequest{ - Ciphertext: data, - }).Do() - if err != nil { - return nil, err - } - return base64.StdEncoding.DecodeString(resp.Plaintext) -} - -// Encrypt encrypts bytes, and returns base64 representation of the ciphertext. -func (t *gkmsService) Encrypt(data []byte) (string, error) { - resp, err := t.cloudkmsService.Projects.Locations.KeyRings.CryptoKeys. - Encrypt(t.parentName, &cloudkms.EncryptRequest{ - Plaintext: base64.StdEncoding.EncodeToString(data), - }).Do() - if err != nil { - return "", err - } - return resp.Ciphertext, nil -} - -// unrecoverableCreationError decides if Kubernetes should ignore the encountered Google KMS -// error. Only to be used for errors seen while creating a KeyRing or CryptoKey. -func unrecoverableCreationError(err error) bool { - apiError, isAPIError := err.(*googleapi.Error) - // 409 means the object exists. - // 403 means we do not have permission to create the object, the user must do it. - // Else, it is an unrecoverable error. - if !isAPIError || (apiError.Code != 409 && apiError.Code != 403) { - return true - } - return false -} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/metrics.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/metrics.go index 649bd11b132b..22f1c2dde6f5 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/metrics.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/metrics.go @@ -67,6 +67,12 @@ func (mc *metricContext) Observe(err error) error { } func newGenericMetricContext(prefix, request, region, zone, version string) *metricContext { + if len(zone) == 0 { + zone = unusedMetricLabel + } + if len(region) == 0 { + region = unusedMetricLabel + } return &metricContext{ start: time.Now(), attributes: []string{prefix + "_" + request, region, zone, version}, diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/BUILD b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/BUILD index 74022fef5fa9..117014a5a203 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/BUILD @@ -18,9 +18,10 @@ go_library( "openstack_routes.go", "openstack_volumes.go", ], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack", deps = [ - "//pkg/api/v1/helper:go_default_library", "//pkg/api/v1/service:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/controller:go_default_library", "//pkg/util/mount:go_default_library", @@ -28,35 +29,36 @@ go_library( "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/gophercloud/gophercloud:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack:go_default_library", - "//vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions:go_default_library", + "//vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes:go_default_library", + "//vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions:go_default_library", + "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers:go_default_library", - "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members:go_default_library", - "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors:go_default_library", - "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools:go_default_library", - "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules:go_default_library", + "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports:go_default_library", "//vendor/github.com/gophercloud/gophercloud/pagination:go_default_library", "//vendor/github.com/mitchellh/mapstructure:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/util/cert:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", @@ -70,12 +72,13 @@ go_test( "openstack_routes_test.go", "openstack_test.go", ], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack", library = ":go_default_library", deps = [ "//pkg/cloudprovider:go_default_library", "//vendor/github.com/gophercloud/gophercloud:go_default_library", - "//vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers:go_default_library", + "//vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/OWNERS b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/OWNERS index a9ec9e985dd4..8f4d575d6dff 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/OWNERS @@ -2,7 +2,9 @@ approvers: - anguslees - NickrenREN - dims +- FengyunPan reviewers: - anguslees - NickrenREN -- dims \ No newline at end of file +- dims +- FengyunPan diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata.go index 9ab0e09189eb..d3cf22d9238d 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/metadata.go @@ -32,26 +32,46 @@ import ( "k8s.io/utils/exec" ) -// metadataUrl is URL to OpenStack metadata server. It's hardcoded IPv4 -// link-local address as documented in "OpenStack Cloud Administrator Guide", -// chapter Compute - Networking with nova-network. -// https://docs.openstack.org/admin-guide/compute-networking-nova.html#metadata-service -const metadataUrl = "http://169.254.169.254/openstack/2012-08-10/meta_data.json" +const ( + // metadataUrlTemplate allows building an OpenStack Metadata service URL. + // It's a hardcoded IPv4 link-local address as documented in "OpenStack Cloud + // Administrator Guide", chapter Compute - Networking with nova-network. + // https://docs.openstack.org/admin-guide/compute-networking-nova.html#metadata-service + defaultMetadataVersion = "2012-08-10" + metadataUrlTemplate = "http://169.254.169.254/openstack/%s/meta_data.json" + + // metadataID is used as an identifier on the metadata search order configuration. + metadataID = "metadataService" + + // Config drive is defined as an iso9660 or vfat (deprecated) drive + // with the "config-2" label. + // http://docs.openstack.org/user-guide/cli-config-drive.html + configDriveLabel = "config-2" + configDrivePathTemplate = "openstack/%s/meta_data.json" + + // configDriveID is used as an identifier on the metadata search order configuration. + configDriveID = "configDrive" +) -// Config drive is defined as an iso9660 or vfat (deprecated) drive -// with the "config-2" label. -// http://docs.openstack.org/user-guide/cli-config-drive.html -const configDriveLabel = "config-2" -const configDrivePath = "openstack/2012-08-10/meta_data.json" +var ErrBadMetadata = errors.New("invalid OpenStack metadata, got empty uuid") -var ErrBadMetadata = errors.New("Invalid OpenStack metadata, got empty uuid") +// There are multiple device types. To keep it simple, we're using a single structure +// for all device metadata types. +type DeviceMetadata struct { + Type string `json:"type"` + Bus string `json:"bus,omitempty"` + Serial string `json:"serial,omitempty"` + Address string `json:"address,omitempty"` + // .. and other fields. +} // Assumes the "2012-08-10" meta_data.json format. // See http://docs.openstack.org/user-guide/cli_config_drive.html type Metadata struct { - Uuid string `json:"uuid"` - Name string `json:"name"` - AvailabilityZone string `json:"availability_zone"` + Uuid string `json:"uuid"` + Name string `json:"name"` + AvailabilityZone string `json:"availability_zone"` + Devices []DeviceMetadata `json:"devices,omitempty"` // .. and other fields we don't care about. Expand as necessary. } @@ -71,7 +91,15 @@ func parseMetadata(r io.Reader) (*Metadata, error) { return &metadata, nil } -func getMetadataFromConfigDrive() (*Metadata, error) { +func getMetadataUrl(metadataVersion string) string { + return fmt.Sprintf(metadataUrlTemplate, metadataVersion) +} + +func getConfigDrivePath(metadataVersion string) string { + return fmt.Sprintf(configDrivePathTemplate, metadataVersion) +} + +func getMetadataFromConfigDrive(metadataVersion string) (*Metadata, error) { // Try to read instance UUID from config drive. dev := "/dev/disk/by-label/" + configDriveLabel if _, err := os.Stat(dev); os.IsNotExist(err) { @@ -81,8 +109,7 @@ func getMetadataFromConfigDrive() (*Metadata, error) { "-o", "device", ).CombinedOutput() if err != nil { - glog.V(2).Infof("Unable to run blkid: %v", err) - return nil, err + return nil, fmt.Errorf("unable to run blkid: %v", err) } dev = strings.TrimSpace(string(out)) } @@ -101,37 +128,35 @@ func getMetadataFromConfigDrive() (*Metadata, error) { err = mounter.Mount(dev, mntdir, "vfat", []string{"ro"}) } if err != nil { - glog.Errorf("Error mounting configdrive %s: %v", dev, err) - return nil, err + return nil, fmt.Errorf("error mounting configdrive %s: %v", dev, err) } defer mounter.Unmount(mntdir) glog.V(4).Infof("Configdrive mounted on %s", mntdir) + configDrivePath := getConfigDrivePath(metadataVersion) f, err := os.Open( filepath.Join(mntdir, configDrivePath)) if err != nil { - glog.Errorf("Error reading %s on config drive: %v", configDrivePath, err) - return nil, err + return nil, fmt.Errorf("error reading %s on config drive: %v", configDrivePath, err) } defer f.Close() return parseMetadata(f) } -func getMetadataFromMetadataService() (*Metadata, error) { - // Try to get JSON from metdata server. +func getMetadataFromMetadataService(metadataVersion string) (*Metadata, error) { + // Try to get JSON from metadata server. + metadataUrl := getMetadataUrl(metadataVersion) glog.V(4).Infof("Attempting to fetch metadata from %s", metadataUrl) resp, err := http.Get(metadataUrl) if err != nil { - glog.V(3).Infof("Cannot read %s: %v", metadataUrl, err) - return nil, err + return nil, fmt.Errorf("error fetching %s: %v", metadataUrl, err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("Unexpected status code when reading metadata from %s: %s", metadataUrl, resp.Status) - glog.V(3).Infof("%v", err) + err = fmt.Errorf("unexpected status code when reading metadata from %s: %s", metadataUrl, resp.Status) return nil, err } @@ -141,12 +166,28 @@ func getMetadataFromMetadataService() (*Metadata, error) { // Metadata is fixed for the current host, so cache the value process-wide var metadataCache *Metadata -func getMetadata() (*Metadata, error) { +func getMetadata(order string) (*Metadata, error) { if metadataCache == nil { - md, err := getMetadataFromConfigDrive() - if err != nil { - md, err = getMetadataFromMetadataService() + var md *Metadata + var err error + + elements := strings.Split(order, ",") + for _, id := range elements { + id = strings.TrimSpace(id) + switch id { + case configDriveID: + md, err = getMetadataFromConfigDrive(defaultMetadataVersion) + case metadataID: + md, err = getMetadataFromMetadataService(defaultMetadataVersion) + default: + err = fmt.Errorf("%s is not a valid metadata search order option. Supported options are %s and %s", id, configDriveID, metadataID) + } + + if err == nil { + break + } } + if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack.go index e1fe42c84758..a2a517a4d3e2 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack.go @@ -24,13 +24,11 @@ import ( "io/ioutil" "net/http" "regexp" - "sort" "strings" "time" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" - apiversions_v1 "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions" "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces" "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" "github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts" @@ -44,7 +42,7 @@ import ( "k8s.io/apimachinery/pkg/types" netutil "k8s.io/apimachinery/pkg/util/net" certutil "k8s.io/client-go/util/cert" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller" ) @@ -52,11 +50,12 @@ import ( const ( ProviderName = "openstack" AvailabilityZone = "availability_zone" + defaultTimeOut = 60 * time.Second ) -var ErrNotFound = errors.New("Failed to find object") -var ErrMultipleResults = errors.New("Multiple results where only one expected") -var ErrNoAddressFound = errors.New("No address found for host") +var ErrNotFound = errors.New("failed to find object") +var ErrMultipleResults = errors.New("multiple results where only one expected") +var ErrNoAddressFound = errors.New("no address found for host") // encoding.TextUnmarshaler interface for time.Duration type MyDuration struct { @@ -75,38 +74,48 @@ func (d *MyDuration) UnmarshalText(text []byte) error { type LoadBalancer struct { network *gophercloud.ServiceClient compute *gophercloud.ServiceClient + lb *gophercloud.ServiceClient opts LoadBalancerOpts } type LoadBalancerOpts struct { - LBVersion string `gcfg:"lb-version"` // overrides autodetection. v1 or v2 + LBVersion string `gcfg:"lb-version"` // overrides autodetection. Only support v2. + UseOctavia bool `gcfg:"use-octavia"` // uses Octavia V2 service catalog endpoint SubnetId string `gcfg:"subnet-id"` // overrides autodetection. FloatingNetworkId string `gcfg:"floating-network-id"` // If specified, will create floating ip for loadbalancer, or do not create floating ip. LBMethod string `gcfg:"lb-method"` // default to ROUND_ROBIN. + LBProvider string `gcfg:"lb-provider"` CreateMonitor bool `gcfg:"create-monitor"` MonitorDelay MyDuration `gcfg:"monitor-delay"` MonitorTimeout MyDuration `gcfg:"monitor-timeout"` MonitorMaxRetries uint `gcfg:"monitor-max-retries"` ManageSecurityGroups bool `gcfg:"manage-security-groups"` - NodeSecurityGroupID string `gcfg:"node-security-group"` + NodeSecurityGroupIDs []string // Do not specify, get it automatically when enable manage-security-groups. TODO(FengyunPan): move it into cache } type BlockStorageOpts struct { BSVersion string `gcfg:"bs-version"` // overrides autodetection. v1 or v2. Defaults to auto TrustDevicePath bool `gcfg:"trust-device-path"` // See Issue #33128 + IgnoreVolumeAZ bool `gcfg:"ignore-volume-az"` } type RouterOpts struct { RouterId string `gcfg:"router-id"` // required } +type MetadataOpts struct { + SearchOrder string `gcfg:"search-order"` + RequestTimeout MyDuration `gcfg:"request-timeout"` +} + // OpenStack is an implementation of cloud provider Interface for OpenStack. type OpenStack struct { - provider *gophercloud.ProviderClient - region string - lbOpts LoadBalancerOpts - bsOpts BlockStorageOpts - routeOpts RouterOpts + provider *gophercloud.ProviderClient + region string + lbOpts LoadBalancerOpts + bsOpts BlockStorageOpts + routeOpts RouterOpts + metadataOpts MetadataOpts // InstanceID of the server where this OpenStack object is instantiated. localInstanceID string } @@ -128,6 +137,7 @@ type Config struct { LoadBalancer LoadBalancerOpts BlockStorage BlockStorageOpts Route RouterOpts + Metadata MetadataOpts } func init() { @@ -172,8 +182,7 @@ func (cfg Config) toAuth3Options() tokens3.AuthOptions { func readConfig(config io.Reader) (Config, error) { if config == nil { - err := fmt.Errorf("no OpenStack cloud provider config file given") - return Config{}, err + return Config{}, fmt.Errorf("no OpenStack cloud provider config file given") } var cfg Config @@ -181,6 +190,8 @@ func readConfig(config io.Reader) (Config, error) { // Set default values for config params cfg.BlockStorage.BSVersion = "auto" cfg.BlockStorage.TrustDevicePath = false + cfg.BlockStorage.IgnoreVolumeAZ = false + cfg.Metadata.SearchOrder = fmt.Sprintf("%s,%s", configDriveID, metadataID) err := gcfg.ReadInto(&cfg, config) return cfg, err @@ -198,7 +209,7 @@ func (c *Caller) Call(f func()) { } } -func readInstanceID() (string, error) { +func readInstanceID(searchOrder string) (string, error) { // Try to find instance ID on the local filesystem (created by cloud-init) const instanceIDFile = "/var/lib/cloud/data/instance-id" idBytes, err := ioutil.ReadFile(instanceIDFile) @@ -212,7 +223,7 @@ func readInstanceID() (string, error) { // Fall through to metadata server lookup } - md, err := getMetadata() + md, err := getMetadata(searchOrder) if err != nil { return "", err } @@ -239,11 +250,8 @@ func checkOpenStackOpts(openstackOpts *OpenStack) error { } } - // if enable ManageSecurityGroups, node-security-group should be set. - if lbOpts.ManageSecurityGroups { - if len(lbOpts.NodeSecurityGroupID) == 0 { - return fmt.Errorf("node-security-group not set in cloud provider config") - } + if err := checkMetadataSearchOrder(openstackOpts.metadataOpts.SearchOrder); err != nil { + return err } return nil @@ -279,12 +287,19 @@ func newOpenStack(cfg Config) (*OpenStack, error) { return nil, err } + emptyDuration := MyDuration{} + if cfg.Metadata.RequestTimeout == emptyDuration { + cfg.Metadata.RequestTimeout.Duration = time.Duration(defaultTimeOut) + } + provider.HTTPClient.Timeout = cfg.Metadata.RequestTimeout.Duration + os := OpenStack{ - provider: provider, - region: cfg.Global.Region, - lbOpts: cfg.LoadBalancer, - bsOpts: cfg.BlockStorage, - routeOpts: cfg.Route, + provider: provider, + region: cfg.Global.Region, + lbOpts: cfg.LoadBalancer, + bsOpts: cfg.BlockStorage, + routeOpts: cfg.Route, + metadataOpts: cfg.Metadata, } err = checkOpenStackOpts(&os) @@ -484,7 +499,6 @@ func (os *OpenStack) HasClusterID() bool { func (os *OpenStack) LoadBalancer() (cloudprovider.LoadBalancer, bool) { glog.V(4).Info("openstack.LoadBalancer() called") - // TODO: Search for and support Rackspace loadbalancer API, and others. network, err := os.NewNetworkV2() if err != nil { return nil, false @@ -495,39 +509,22 @@ func (os *OpenStack) LoadBalancer() (cloudprovider.LoadBalancer, bool) { return nil, false } - lbVersion := os.lbOpts.LBVersion - if lbVersion == "" { - // No version specified, try newest supported by server - netExts, err := networkExtensions(network) - if err != nil { - glog.Warningf("Failed to list neutron extensions: %v", err) - return nil, false - } + lb, err := os.NewLoadBalancerV2() + if err != nil { + return nil, false + } - if netExts["lbaasv2"] { - lbVersion = "v2" - } else if netExts["lbaas"] { - lbVersion = "v1" - } else { - glog.Warningf("Failed to find neutron LBaaS extension (v1 or v2)") - return nil, false - } - glog.V(3).Infof("Using LBaaS extension %v", lbVersion) + // LBaaS v1 is deprecated in the OpenStack Liberty release. + // Currently kubernetes OpenStack cloud provider just support LBaaS v2. + lbVersion := os.lbOpts.LBVersion + if lbVersion != "" && lbVersion != "v2" { + glog.Warningf("Config error: currently only support LBaaS v2, unrecognised lb-version \"%v\"", lbVersion) + return nil, false } glog.V(1).Info("Claiming to support LoadBalancer") - if lbVersion == "v2" { - return &LbaasV2{LoadBalancer{network, compute, os.lbOpts}}, true - } else if lbVersion == "v1" { - // Since LBaaS v1 is deprecated in the OpenStack Liberty release, so deprecate LBaaSV1 at V1.8, then remove LBaaSV1 after V1.9. - // Reference OpenStack doc: https://docs.openstack.org/mitaka/networking-guide/config-lbaas.html - glog.Warningf("The LBaaS v1 of OpenStack cloud provider has been deprecated, Please use LBaaS v2") - return &LbaasV1{LoadBalancer{network, compute, os.lbOpts}}, true - } else { - glog.Warningf("Config error: unrecognised lb-version \"%v\"", lbVersion) - return nil, false - } + return &LbaasV2{LoadBalancer{network, compute, lb, os.lbOpts}}, true } func isNotFound(err error) bool { @@ -537,12 +534,11 @@ func isNotFound(err error) bool { func (os *OpenStack) Zones() (cloudprovider.Zones, bool) { glog.V(1).Info("Claiming to support Zones") - return os, true } func (os *OpenStack) GetZone() (cloudprovider.Zone, error) { - md, err := getMetadata() + md, err := getMetadata(os.metadataOpts.SearchOrder) if err != nil { return cloudprovider.Zone{}, err } @@ -551,8 +547,7 @@ func (os *OpenStack) GetZone() (cloudprovider.Zone, error) { FailureDomain: md.AvailabilityZone, Region: os.region, } - glog.V(1).Infof("Current zone is %v", zone) - + glog.V(4).Infof("Current zone is %v", zone) return zone, nil } @@ -580,7 +575,6 @@ func (os *OpenStack) GetZoneByProviderID(providerID string) (cloudprovider.Zone, Region: os.region, } glog.V(4).Infof("The instance %s in zone %v", srv.Name, zone) - return zone, nil } @@ -606,7 +600,6 @@ func (os *OpenStack) GetZoneByNodeName(nodeName types.NodeName) (cloudprovider.Z Region: os.region, } glog.V(4).Infof("The instance %s in zone %v", srv.Name, zone) - return zone, nil } @@ -641,53 +634,9 @@ func (os *OpenStack) Routes() (cloudprovider.Routes, bool) { } glog.V(1).Info("Claiming to support Routes") - return r, true } -// Implementation of sort interface for blockstorage version probing -type APIVersionsByID []apiversions_v1.APIVersion - -func (apiVersions APIVersionsByID) Len() int { - return len(apiVersions) -} - -func (apiVersions APIVersionsByID) Swap(i, j int) { - apiVersions[i], apiVersions[j] = apiVersions[j], apiVersions[i] -} - -func (apiVersions APIVersionsByID) Less(i, j int) bool { - return apiVersions[i].ID > apiVersions[j].ID -} - -func autoVersionSelector(apiVersion *apiversions_v1.APIVersion) string { - switch strings.ToLower(apiVersion.ID) { - case "v2.0": - return "v2" - case "v1.0": - return "v1" - default: - return "" - } -} - -func doBsApiVersionAutodetect(availableApiVersions []apiversions_v1.APIVersion) string { - sort.Sort(APIVersionsByID(availableApiVersions)) - for _, status := range []string{"CURRENT", "SUPPORTED"} { - for _, version := range availableApiVersions { - if strings.ToUpper(version.Status) == status { - if detectedApiVersion := autoVersionSelector(&version); detectedApiVersion != "" { - glog.V(3).Infof("Blockstorage API version probing has found a suitable %s api version: %s", status, detectedApiVersion) - return detectedApiVersion - } - } - } - } - - return "" - -} - func (os *OpenStack) volumeService(forceVersion string) (volumeService, error) { bsVersion := "" if forceVersion == "" { @@ -702,46 +651,71 @@ func (os *OpenStack) volumeService(forceVersion string) (volumeService, error) { if err != nil { return nil, err } + glog.V(3).Infof("Using Blockstorage API V1") return &VolumesV1{sClient, os.bsOpts}, nil case "v2": sClient, err := os.NewBlockStorageV2() if err != nil { return nil, err } + glog.V(3).Infof("Using Blockstorage API V2") return &VolumesV2{sClient, os.bsOpts}, nil - case "auto": - sClient, err := os.NewBlockStorageV1() + case "v3": + sClient, err := os.NewBlockStorageV3() if err != nil { return nil, err } - availableApiVersions := []apiversions_v1.APIVersion{} - err = apiversions_v1.List(sClient).EachPage(func(page pagination.Page) (bool, error) { - // returning false from this handler stops page iteration, error is propagated to the upper function - apiversions, err := apiversions_v1.ExtractAPIVersions(page) - if err != nil { - glog.Errorf("Unable to extract api versions from page: %v", err) - return false, err - } - availableApiVersions = append(availableApiVersions, apiversions...) - return true, nil - }) + glog.V(3).Infof("Using Blockstorage API V3") + return &VolumesV3{sClient, os.bsOpts}, nil + case "auto": + // Currently kubernetes support Cinder v1 / Cinder v2 / Cinder v3. + // Choose Cinder v3 firstly, if kubernetes can't initialize cinder v3 client, try to initialize cinder v2 client. + // If kubernetes can't initialize cinder v2 client, try to initialize cinder v1 client. + // Return appropriate message when kubernetes can't initialize them. + if sClient, err := os.NewBlockStorageV3(); err == nil { + glog.V(3).Infof("Using Blockstorage API V3") + return &VolumesV3{sClient, os.bsOpts}, nil + } - if err != nil { - glog.Errorf("Error when retrieving list of supported blockstorage api versions: %v", err) - return nil, err + if sClient, err := os.NewBlockStorageV2(); err == nil { + glog.V(3).Infof("Using Blockstorage API V2") + return &VolumesV2{sClient, os.bsOpts}, nil } - if autodetectedVersion := doBsApiVersionAutodetect(availableApiVersions); autodetectedVersion != "" { - return os.volumeService(autodetectedVersion) - } else { - // Nothing suitable found, failed autodetection, just exit with appropriate message - err_txt := "BlockStorage API version autodetection failed. " + - "Please set it explicitly in cloud.conf in section [BlockStorage] with key `bs-version`" - return nil, errors.New(err_txt) + + if sClient, err := os.NewBlockStorageV1(); err == nil { + glog.V(3).Infof("Using Blockstorage API V1") + return &VolumesV1{sClient, os.bsOpts}, nil } + err_txt := "BlockStorage API version autodetection failed. " + + "Please set it explicitly in cloud.conf in section [BlockStorage] with key `bs-version`" + return nil, errors.New(err_txt) default: err_txt := fmt.Sprintf("Config error: unrecognised bs-version \"%v\"", os.bsOpts.BSVersion) - glog.Warningf(err_txt) return nil, errors.New(err_txt) } } + +func checkMetadataSearchOrder(order string) error { + if order == "" { + return errors.New("invalid value in section [Metadata] with key `search-order`. Value cannot be empty") + } + + elements := strings.Split(order, ",") + if len(elements) > 2 { + return errors.New("invalid value in section [Metadata] with key `search-order`. Value cannot contain more than 2 elements") + } + + for _, id := range elements { + id = strings.TrimSpace(id) + switch id { + case configDriveID: + case metadataID: + default: + return fmt.Errorf("invalid element %q found in section [Metadata] with key `search-order`."+ + "Supported elements include %q and %q", id, configDriveID, metadataID) + } + } + + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_client.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_client.go index 916c4a09c563..04851070e4d1 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_client.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_client.go @@ -17,10 +17,10 @@ limitations under the License. package openstack import ( + "fmt" + "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" - - "github.com/golang/glog" ) func (os *OpenStack) NewNetworkV2() (*gophercloud.ServiceClient, error) { @@ -28,8 +28,7 @@ func (os *OpenStack) NewNetworkV2() (*gophercloud.ServiceClient, error) { Region: os.region, }) if err != nil { - glog.Warningf("Failed to find network v2 endpoint for region %s: %v", os.region, err) - return nil, err + return nil, fmt.Errorf("failed to find network v2 endpoint for region %s: %v", os.region, err) } return network, nil } @@ -39,8 +38,7 @@ func (os *OpenStack) NewComputeV2() (*gophercloud.ServiceClient, error) { Region: os.region, }) if err != nil { - glog.Warningf("Failed to find compute v2 endpoint for region %s: %v", os.region, err) - return nil, err + return nil, fmt.Errorf("failed to find compute v2 endpoint for region %s: %v", os.region, err) } return compute, nil } @@ -50,8 +48,7 @@ func (os *OpenStack) NewBlockStorageV1() (*gophercloud.ServiceClient, error) { Region: os.region, }) if err != nil { - glog.Errorf("Unable to initialize cinder v1 client for region %s: %v", os.region, err) - return nil, err + return nil, fmt.Errorf("unable to initialize cinder v1 client for region %s: %v", os.region, err) } return storage, nil } @@ -61,8 +58,35 @@ func (os *OpenStack) NewBlockStorageV2() (*gophercloud.ServiceClient, error) { Region: os.region, }) if err != nil { - glog.Errorf("Unable to initialize cinder v2 client for region %s: %v", os.region, err) - return nil, err + return nil, fmt.Errorf("unable to initialize cinder v2 client for region %s: %v", os.region, err) + } + return storage, nil +} + +func (os *OpenStack) NewBlockStorageV3() (*gophercloud.ServiceClient, error) { + storage, err := openstack.NewBlockStorageV3(os.provider, gophercloud.EndpointOpts{ + Region: os.region, + }) + if err != nil { + return nil, fmt.Errorf("unable to initialize cinder v3 client for region %s: %v", os.region, err) } return storage, nil } + +func (os *OpenStack) NewLoadBalancerV2() (*gophercloud.ServiceClient, error) { + var lb *gophercloud.ServiceClient + var err error + if os.lbOpts.UseOctavia { + lb, err = openstack.NewLoadBalancerV2(os.provider, gophercloud.EndpointOpts{ + Region: os.region, + }) + } else { + lb, err = openstack.NewNetworkV2(os.provider, gophercloud.EndpointOpts{ + Region: os.region, + }) + } + if err != nil { + return nil, fmt.Errorf("failed to find load-balancer v2 endpoint for region %s: %v", os.region, err) + } + return lb, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_instances.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_instances.go index a9037117b252..3cf1733b322e 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_instances.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_instances.go @@ -17,7 +17,6 @@ limitations under the License. package openstack import ( - "errors" "fmt" "regexp" @@ -32,6 +31,7 @@ import ( type Instances struct { compute *gophercloud.ServiceClient + opts MetadataOpts } // Instances returns an implementation of Instances for OpenStack. @@ -45,13 +45,16 @@ func (os *OpenStack) Instances() (cloudprovider.Instances, bool) { glog.V(1).Info("Claiming to support Instances") - return &Instances{compute}, true + return &Instances{ + compute: compute, + opts: os.metadataOpts, + }, true } // Implementation of Instances.CurrentNodeName // Note this is *not* necessarily the same as hostname. func (i *Instances) CurrentNodeName(hostname string) (types.NodeName, error) { - md, err := getMetadata() + md, err := getMetadata(i.opts.SearchOrder) if err != nil { return "", err } @@ -59,7 +62,7 @@ func (i *Instances) CurrentNodeName(hostname string) (types.NodeName, error) { } func (i *Instances) AddSSHKeyToAllInstances(user string, keyData []byte) error { - return errors.New("unimplemented") + return cloudprovider.NotImplemented } func (i *Instances) NodeAddresses(name types.NodeName) ([]v1.NodeAddress, error) { @@ -113,13 +116,31 @@ func (i *Instances) ExternalID(name types.NodeName) (string, error) { // InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running. // If false is returned with no error, the instance will be immediately deleted by the cloud controller manager. func (i *Instances) InstanceExistsByProviderID(providerID string) (bool, error) { - return false, errors.New("unimplemented") + instanceID, err := instanceIDFromProviderID(providerID) + if err != nil { + return false, err + } + + server, err := servers.Get(i.compute, instanceID).Extract() + if err != nil { + if isNotFound(err) { + return false, nil + } + return false, err + } + + if server.Status != "ACTIVE" { + glog.Warningf("the instance %s is not active", instanceID) + return false, nil + } + + return true, nil } // InstanceID returns the kubelet's cloud provider ID. func (os *OpenStack) InstanceID() (string, error) { if len(os.localInstanceID) == 0 { - id, err := readInstanceID() + id, err := readInstanceID(os.metadataOpts.SearchOrder) if err != nil { return "", err } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go index 19bd2ba69a9e..31c1985072a0 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go @@ -25,21 +25,21 @@ import ( "github.com/golang/glog" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers" v2monitors "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors" v2pools "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" neutronports "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" "github.com/gophercloud/gophercloud/pagination" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/api/v1/service" "k8s.io/kubernetes/pkg/cloudprovider" @@ -71,17 +71,10 @@ const ( // ServiceAnnotationLoadBalancerInternal is the annotation used on the service // to indicate that we want an internal loadbalancer service. - // If the value of ServiceAnnotationLoadBalancerInternal is false, it indicates that we want an external loadbalancer service. Default to true. + // If the value of ServiceAnnotationLoadBalancerInternal is false, it indicates that we want an external loadbalancer service. Default to false. ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/openstack-internal-load-balancer" ) -// Deprecated; Since LBaaS v1 is deprecated in the OpenStack Liberty release, Kubernetes deprecated it at V1.8. -// TODO(FengyunPan): remove LBaaS v1 after kubernetes V1.9. -// LoadBalancer implementation for LBaaS v1 -type LbaasV1 struct { - LoadBalancer -} - // LoadBalancer implementation for LBaaS v2 type LbaasV2 struct { LoadBalancer @@ -142,76 +135,6 @@ func getFloatingIPByPortID(client *gophercloud.ServiceClient, portID string) (*f return &floatingIPList[0], nil } -func getPoolByName(client *gophercloud.ServiceClient, name string) (*pools.Pool, error) { - opts := pools.ListOpts{ - Name: name, - } - pager := pools.List(client, opts) - - poolList := make([]pools.Pool, 0, 1) - - err := pager.EachPage(func(page pagination.Page) (bool, error) { - p, err := pools.ExtractPools(page) - if err != nil { - return false, err - } - poolList = append(poolList, p...) - if len(poolList) > 1 { - return false, ErrMultipleResults - } - return true, nil - }) - if err != nil { - if isNotFound(err) { - return nil, ErrNotFound - } - return nil, err - } - - if len(poolList) == 0 { - return nil, ErrNotFound - } else if len(poolList) > 1 { - return nil, ErrMultipleResults - } - - return &poolList[0], nil -} - -func getVipByName(client *gophercloud.ServiceClient, name string) (*vips.VirtualIP, error) { - opts := vips.ListOpts{ - Name: name, - } - pager := vips.List(client, opts) - - vipList := make([]vips.VirtualIP, 0, 1) - - err := pager.EachPage(func(page pagination.Page) (bool, error) { - v, err := vips.ExtractVIPs(page) - if err != nil { - return false, err - } - vipList = append(vipList, v...) - if len(vipList) > 1 { - return false, ErrMultipleResults - } - return true, nil - }) - if err != nil { - if isNotFound(err) { - return nil, ErrNotFound - } - return nil, err - } - - if len(vipList) == 0 { - return nil, ErrNotFound - } else if len(vipList) > 1 { - return nil, ErrMultipleResults - } - - return &vipList[0], nil -} - func getLoadbalancerByName(client *gophercloud.ServiceClient, name string) (*loadbalancers.LoadBalancer, error) { opts := loadbalancers.ListOpts{ Name: name, @@ -371,8 +294,14 @@ func popMember(members []v2pools.Member, addr string, port int) []v2pools.Member return members } -func getSecurityGroupName(clusterName string, service *v1.Service) string { - return fmt.Sprintf("lb-sg-%s-%v", clusterName, service.Name) +func getSecurityGroupName(service *v1.Service) string { + securityGroupName := fmt.Sprintf("lb-sg-%s-%s-%s", service.UID, service.Namespace, service.Name) + //OpenStack requires that the name of a security group is shorter than 255 bytes. + if len(securityGroupName) > 255 { + securityGroupName = securityGroupName[:255] + } + + return securityGroupName } func getSecurityGroupRules(client *gophercloud.ServiceClient, opts rules.ListOpts) ([]rules.SecGroupRule, error) { @@ -414,7 +343,7 @@ func waitLoadbalancerActiveProvisioningStatus(client *gophercloud.ServiceClient, if loadbalancer.ProvisioningStatus == activeStatus { return true, nil } else if loadbalancer.ProvisioningStatus == errorStatus { - return true, fmt.Errorf("Loadbalancer has gone into ERROR state") + return true, fmt.Errorf("loadbalancer has gone into ERROR state") } else { return false, nil } @@ -422,7 +351,7 @@ func waitLoadbalancerActiveProvisioningStatus(client *gophercloud.ServiceClient, }) if err == wait.ErrWaitTimeout { - err = fmt.Errorf("Loadbalancer failed to go into ACTIVE provisioning status within alloted time") + err = fmt.Errorf("loadbalancer failed to go into ACTIVE provisioning status within alloted time") } return provisioningStatus, err } @@ -447,7 +376,7 @@ func waitLoadbalancerDeleted(client *gophercloud.ServiceClient, loadbalancerID s }) if err == wait.ErrWaitTimeout { - err = fmt.Errorf("Loadbalancer failed to delete within the alloted time") + err = fmt.Errorf("loadbalancer failed to delete within the alloted time") } return err @@ -513,6 +442,7 @@ func (lbaas *LbaasV2) createLoadBalancer(service *v1.Service, name string, inter Name: name, Description: fmt.Sprintf("Kubernetes external service %s", name), VipSubnetID: lbaas.opts.SubnetId, + Provider: lbaas.opts.LBProvider, } loadBalancerIP := service.Spec.LoadBalancerIP @@ -520,16 +450,16 @@ func (lbaas *LbaasV2) createLoadBalancer(service *v1.Service, name string, inter createOpts.VipAddress = loadBalancerIP } - loadbalancer, err := loadbalancers.Create(lbaas.network, createOpts).Extract() + loadbalancer, err := loadbalancers.Create(lbaas.lb, createOpts).Extract() if err != nil { - return nil, fmt.Errorf("Error creating loadbalancer %v: %v", createOpts, err) + return nil, fmt.Errorf("error creating loadbalancer %v: %v", createOpts, err) } return loadbalancer, nil } func (lbaas *LbaasV2) GetLoadBalancer(clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) { loadBalancerName := cloudprovider.GetLoadBalancerName(service) - loadbalancer, err := getLoadbalancerByName(lbaas.network, loadBalancerName) + loadbalancer, err := getLoadbalancerByName(lbaas.lb, loadBalancerName) if err == ErrNotFound { return nil, false, nil } @@ -538,7 +468,17 @@ func (lbaas *LbaasV2) GetLoadBalancer(clusterName string, service *v1.Service) ( } status := &v1.LoadBalancerStatus{} - status.Ingress = []v1.LoadBalancerIngress{{IP: loadbalancer.VipAddress}} + + portID := loadbalancer.VipPortID + if portID != "" { + floatIP, err := getFloatingIPByPortID(lbaas.network, portID) + if err != nil { + return nil, false, fmt.Errorf("error getting floating ip for port %s: %v", portID, err) + } + status.Ingress = []v1.LoadBalancerIngress{{IP: floatIP.FloatingIP}} + } else { + status.Ingress = []v1.LoadBalancerIngress{{IP: loadbalancer.VipAddress}} + } return status, true, err } @@ -605,6 +545,75 @@ func getSubnetIDForLB(compute *gophercloud.ServiceClient, node v1.Node) (string, return "", ErrNotFound } +// getNodeSecurityGroupIDForLB lists node-security-groups for specific nodes +func getNodeSecurityGroupIDForLB(compute *gophercloud.ServiceClient, nodes []*v1.Node) ([]string, error) { + nodeSecurityGroupIDs := sets.NewString() + + for _, node := range nodes { + nodeName := types.NodeName(node.Name) + srv, err := getServerByName(compute, nodeName) + if err != nil { + return nodeSecurityGroupIDs.List(), err + } + + // use the first node-security-groups + // case 0: node1:SG1 node2:SG1 return SG1 + // case 1: node1:SG1 node2:SG2 return SG1,SG2 + // case 2: node1:SG1,SG2 node2:SG3,SG4 return SG1,SG3 + // case 3: node1:SG1,SG2 node2:SG2,SG3 return SG1,SG2 + securityGroupName := srv.SecurityGroups[0]["name"] + nodeSecurityGroupIDs.Insert(securityGroupName.(string)) + } + + return nodeSecurityGroupIDs.List(), nil +} + +// getFloatingNetworkIdForLB returns a floating-network-id for cluster. +func getFloatingNetworkIdForLB(client *gophercloud.ServiceClient) (string, error) { + var floatingNetworkIds []string + + type NetworkWithExternalExt struct { + networks.Network + external.NetworkExternalExt + } + + err := networks.List(client, networks.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + var externalNetwork []NetworkWithExternalExt + err := networks.ExtractNetworksInto(page, &externalNetwork) + if err != nil { + return false, err + } + + for _, externalNet := range externalNetwork { + if externalNet.External { + floatingNetworkIds = append(floatingNetworkIds, externalNet.ID) + } + } + + if len(floatingNetworkIds) > 1 { + return false, ErrMultipleResults + } + return true, nil + }) + if err != nil { + if isNotFound(err) { + return "", ErrNotFound + } + + if err == ErrMultipleResults { + glog.V(4).Infof("find multiple external networks, pick the first one when there are no explicit configuration.") + return floatingNetworkIds[0], nil + } + return "", err + } + + if len(floatingNetworkIds) == 0 { + return "", ErrNotFound + } + + return floatingNetworkIds[0], nil +} + // TODO: This code currently ignores 'region' and always creates a // loadbalancer in only the current OpenStack region. We should take // a list of regions (from config) and query/create loadbalancers in @@ -613,13 +622,17 @@ func getSubnetIDForLB(compute *gophercloud.ServiceClient, node v1.Node) (string, func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, nodes, apiService.Annotations) + if len(nodes) == 0 { + return nil, fmt.Errorf("there are no available nodes for LoadBalancer service %s/%s", apiService.Namespace, apiService.Name) + } + if len(lbaas.opts.SubnetId) == 0 { // Get SubnetId automatically. // The LB needs to be configured with instance addresses on the same subnet, so get SubnetId by one node. subnetID, err := getSubnetIDForLB(lbaas.compute, *nodes[0]) if err != nil { glog.Warningf("Failed to find subnet-id for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) - return nil, fmt.Errorf("No subnet-id for service %s/%s : subnet-id not set in cloud provider config, "+ + return nil, fmt.Errorf("no subnet-id for service %s/%s : subnet-id not set in cloud provider config, "+ "and failed to find subnet-id from OpenStack: %v", apiService.Namespace, apiService.Name, err) } lbaas.opts.SubnetId = subnetID @@ -631,23 +644,29 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv } floatingPool := getStringFromServiceAnnotation(apiService, ServiceAnnotationLoadBalancerFloatingNetworkId, lbaas.opts.FloatingNetworkId) - glog.V(4).Infof("EnsureLoadBalancer using floatingPool: %v", floatingPool) + if len(floatingPool) == 0 { + var err error + floatingPool, err = getFloatingNetworkIdForLB(lbaas.network) + if err != nil { + glog.Warningf("Failed to find floating-network-id for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) + } + } var internalAnnotation bool - internal := getStringFromServiceAnnotation(apiService, ServiceAnnotationLoadBalancerInternal, "true") + internal := getStringFromServiceAnnotation(apiService, ServiceAnnotationLoadBalancerInternal, "false") switch internal { case "true": glog.V(4).Infof("Ensure an internal loadbalancer service.") internalAnnotation = true case "false": if len(floatingPool) != 0 { - glog.V(4).Infof("Ensure an external loadbalancer service.") + glog.V(4).Infof("Ensure an external loadbalancer service, using floatingPool: %v", floatingPool) internalAnnotation = false } else { - return nil, fmt.Errorf("floating-network-id or loadbalancer.openstack.org/floating-network-id should be specified when service.beta.kubernetes.io/openstack-internal-load-balancer is false") + return nil, fmt.Errorf("floating-network-id or loadbalancer.openstack.org/floating-network-id should be specified when ensuring an external loadbalancer service") } default: - return nil, fmt.Errorf("unknow service.beta.kubernetes.io/openstack-internal-load-balancer annotation: %v, specify \"true\" or \"false\".", + return nil, fmt.Errorf("unknown service.beta.kubernetes.io/openstack-internal-load-balancer annotation: %v, specify \"true\" or \"false\" ", internal) } @@ -655,17 +674,17 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv // TODO: Convert all error messages to use an event recorder for _, port := range ports { if port.Protocol != v1.ProtocolTCP { - return nil, fmt.Errorf("Only TCP LoadBalancer is supported for openstack load balancers") + return nil, fmt.Errorf("only TCP LoadBalancer is supported for openstack load balancers") } } sourceRanges, err := service.GetLoadBalancerSourceRanges(apiService) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get source ranges for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) } if !service.IsAllowAll(sourceRanges) && !lbaas.opts.ManageSecurityGroups { - return nil, fmt.Errorf("Source range restrictions are not supported for openstack load balancers without managing security groups") + return nil, fmt.Errorf("source range restrictions are not supported for openstack load balancers without managing security groups") } affinity := apiService.Spec.SessionAffinity @@ -680,37 +699,37 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv } name := cloudprovider.GetLoadBalancerName(apiService) - loadbalancer, err := getLoadbalancerByName(lbaas.network, name) + loadbalancer, err := getLoadbalancerByName(lbaas.lb, name) if err != nil { if err != ErrNotFound { - return nil, fmt.Errorf("Error getting loadbalancer %s: %v", name, err) + return nil, fmt.Errorf("error getting loadbalancer %s: %v", name, err) } glog.V(2).Infof("Creating loadbalancer %s", name) loadbalancer, err = lbaas.createLoadBalancer(apiService, name, internalAnnotation) if err != nil { // Unknown error, retry later - return nil, fmt.Errorf("Error creating loadbalancer %s: %v", name, err) + return nil, fmt.Errorf("error creating loadbalancer %s: %v", name, err) } } else { glog.V(2).Infof("LoadBalancer %s already exists", name) } - waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) + waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) lbmethod := v2pools.LBMethod(lbaas.opts.LBMethod) if lbmethod == "" { lbmethod = v2pools.LBMethodRoundRobin } - oldListeners, err := getListenersByLoadBalancerID(lbaas.network, loadbalancer.ID) + oldListeners, err := getListenersByLoadBalancerID(lbaas.lb, loadbalancer.ID) if err != nil { - return nil, fmt.Errorf("Error getting LB %s listeners: %v", name, err) + return nil, fmt.Errorf("error getting LB %s listeners: %v", name, err) } for portIndex, port := range ports { listener := getListenerForPort(oldListeners, port) if listener == nil { glog.V(4).Infof("Creating listener for port %d", int(port.Port)) - listener, err = listeners.Create(lbaas.network, listeners.CreateOpts{ + listener, err = listeners.Create(lbaas.lb, listeners.CreateOpts{ Name: fmt.Sprintf("listener_%s_%d", name, portIndex), Protocol: listeners.Protocol(port.Protocol), ProtocolPort: int(port.Port), @@ -718,9 +737,9 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv }).Extract() if err != nil { // Unknown error, retry later - return nil, fmt.Errorf("Error creating LB listener: %v", err) + return nil, fmt.Errorf("error creating LB listener: %v", err) } - waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) + waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) } glog.V(4).Infof("Listener for %s port %d: %s", string(port.Protocol), int(port.Port), listener.ID) @@ -728,14 +747,14 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv // After all ports have been processed, remaining listeners are removed as obsolete. // Pop valid listeners. oldListeners = popListener(oldListeners, listener.ID) - pool, err := getPoolByListenerID(lbaas.network, loadbalancer.ID, listener.ID) + pool, err := getPoolByListenerID(lbaas.lb, loadbalancer.ID, listener.ID) if err != nil && err != ErrNotFound { // Unknown error, retry later - return nil, fmt.Errorf("Error getting pool for listener %s: %v", listener.ID, err) + return nil, fmt.Errorf("error getting pool for listener %s: %v", listener.ID, err) } if pool == nil { glog.V(4).Infof("Creating pool for listener %s", listener.ID) - pool, err = v2pools.Create(lbaas.network, v2pools.CreateOpts{ + pool, err = v2pools.Create(lbaas.lb, v2pools.CreateOpts{ Name: fmt.Sprintf("pool_%s_%d", name, portIndex), Protocol: v2pools.Protocol(port.Protocol), LBMethod: lbmethod, @@ -744,15 +763,15 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv }).Extract() if err != nil { // Unknown error, retry later - return nil, fmt.Errorf("Error creating pool for listener %s: %v", listener.ID, err) + return nil, fmt.Errorf("error creating pool for listener %s: %v", listener.ID, err) } - waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) + waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) } glog.V(4).Infof("Pool for listener %s: %s", listener.ID, pool.ID) - members, err := getMembersByPoolID(lbaas.network, pool.ID) + members, err := getMembersByPoolID(lbaas.lb, pool.ID) if err != nil && !isNotFound(err) { - return nil, fmt.Errorf("Error getting pool members %s: %v", pool.ID, err) + return nil, fmt.Errorf("error getting pool members %s: %v", pool.ID, err) } for _, node := range nodes { addr, err := nodeAddressForLB(node) @@ -762,22 +781,22 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv glog.Warningf("Failed to create LB pool member for node %s: %v", node.Name, err) continue } else { - return nil, fmt.Errorf("Error getting address for node %s: %v", node.Name, err) + return nil, fmt.Errorf("error getting address for node %s: %v", node.Name, err) } } if !memberExists(members, addr, int(port.NodePort)) { glog.V(4).Infof("Creating member for pool %s", pool.ID) - _, err := v2pools.CreateMember(lbaas.network, pool.ID, v2pools.CreateMemberOpts{ + _, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{ ProtocolPort: int(port.NodePort), Address: addr, SubnetID: lbaas.opts.SubnetId, }).Extract() if err != nil { - return nil, fmt.Errorf("Error creating LB pool member for node: %s, %v", node.Name, err) + return nil, fmt.Errorf("error creating LB pool member for node: %s, %v", node.Name, err) } - waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) + waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) } else { // After all members have been processed, remaining members are deleted as obsolete. members = popMember(members, addr, int(port.NodePort)) @@ -789,17 +808,17 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv // Delete obsolete members for this pool for _, member := range members { glog.V(4).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address) - err := v2pools.DeleteMember(lbaas.network, pool.ID, member.ID).ExtractErr() + err := v2pools.DeleteMember(lbaas.lb, pool.ID, member.ID).ExtractErr() if err != nil && !isNotFound(err) { - return nil, fmt.Errorf("Error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err) + return nil, fmt.Errorf("error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err) } - waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) + waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) } monitorID := pool.MonitorID if monitorID == "" && lbaas.opts.CreateMonitor { glog.V(4).Infof("Creating monitor for pool %s", pool.ID) - monitor, err := v2monitors.Create(lbaas.network, v2monitors.CreateOpts{ + monitor, err := v2monitors.Create(lbaas.lb, v2monitors.CreateOpts{ PoolID: pool.ID, Type: string(port.Protocol), Delay: int(lbaas.opts.MonitorDelay.Duration.Seconds()), @@ -807,9 +826,9 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv MaxRetries: int(lbaas.opts.MonitorMaxRetries), }).Extract() if err != nil { - return nil, fmt.Errorf("Error creating LB pool healthmonitor: %v", err) + return nil, fmt.Errorf("error creating LB pool healthmonitor: %v", err) } - waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) + waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) monitorID = monitor.ID } else if lbaas.opts.CreateMonitor == false { glog.V(4).Infof("Do not create monitor for pool %s when create-monitor is false", pool.ID) @@ -824,61 +843,57 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv for _, listener := range oldListeners { glog.V(4).Infof("Deleting obsolete listener %s:", listener.ID) // get pool for listener - pool, err := getPoolByListenerID(lbaas.network, loadbalancer.ID, listener.ID) + pool, err := getPoolByListenerID(lbaas.lb, loadbalancer.ID, listener.ID) if err != nil && err != ErrNotFound { - return nil, fmt.Errorf("Error getting pool for obsolete listener %s: %v", listener.ID, err) + return nil, fmt.Errorf("error getting pool for obsolete listener %s: %v", listener.ID, err) } if pool != nil { // get and delete monitor monitorID := pool.MonitorID if monitorID != "" { glog.V(4).Infof("Deleting obsolete monitor %s for pool %s", monitorID, pool.ID) - err = v2monitors.Delete(lbaas.network, monitorID).ExtractErr() + err = v2monitors.Delete(lbaas.lb, monitorID).ExtractErr() if err != nil && !isNotFound(err) { - return nil, fmt.Errorf("Error deleting obsolete monitor %s for pool %s: %v", monitorID, pool.ID, err) + return nil, fmt.Errorf("error deleting obsolete monitor %s for pool %s: %v", monitorID, pool.ID, err) } - waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) + waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) } // get and delete pool members - members, err := getMembersByPoolID(lbaas.network, pool.ID) + members, err := getMembersByPoolID(lbaas.lb, pool.ID) if err != nil && !isNotFound(err) { - return nil, fmt.Errorf("Error getting members for pool %s: %v", pool.ID, err) + return nil, fmt.Errorf("error getting members for pool %s: %v", pool.ID, err) } if members != nil { for _, member := range members { glog.V(4).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address) - err := v2pools.DeleteMember(lbaas.network, pool.ID, member.ID).ExtractErr() + err := v2pools.DeleteMember(lbaas.lb, pool.ID, member.ID).ExtractErr() if err != nil && !isNotFound(err) { - return nil, fmt.Errorf("Error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err) + return nil, fmt.Errorf("error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err) } - waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) + waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) } } glog.V(4).Infof("Deleting obsolete pool %s for listener %s", pool.ID, listener.ID) // delete pool - err = v2pools.Delete(lbaas.network, pool.ID).ExtractErr() + err = v2pools.Delete(lbaas.lb, pool.ID).ExtractErr() if err != nil && !isNotFound(err) { - return nil, fmt.Errorf("Error deleting obsolete pool %s for listener %s: %v", pool.ID, listener.ID, err) + return nil, fmt.Errorf("error deleting obsolete pool %s for listener %s: %v", pool.ID, listener.ID, err) } - waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) + waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) } // delete listener - err = listeners.Delete(lbaas.network, listener.ID).ExtractErr() + err = listeners.Delete(lbaas.lb, listener.ID).ExtractErr() if err != nil && !isNotFound(err) { - return nil, fmt.Errorf("Error deleteting obsolete listener: %v", err) + return nil, fmt.Errorf("error deleteting obsolete listener: %v", err) } - waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) + waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) glog.V(2).Infof("Deleted obsolete listener: %s", listener.ID) } - status := &v1.LoadBalancerStatus{} - - status.Ingress = []v1.LoadBalancerIngress{{IP: loadbalancer.VipAddress}} - portID := loadbalancer.VipPortID - floatIP, err := getFloatingIPByPortID(lbaas.network, portID) + floatIP, err := getFloatingIPByPortID(lbaas.lb, portID) if err != nil && err != ErrNotFound { - return nil, fmt.Errorf("Error getting floating ip for port %s: %v", portID, err) + return nil, fmt.Errorf("error getting floating ip for port %s: %v", portID, err) } if floatIP == nil && floatingPool != "" && !internalAnnotation { glog.V(4).Infof("Creating floating ip for loadbalancer %s port %s", loadbalancer.ID, portID) @@ -894,38 +909,97 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv floatIP, err = floatingips.Create(lbaas.network, floatIPOpts).Extract() if err != nil { - return nil, fmt.Errorf("Error creating LB floatingip %+v: %v", floatIPOpts, err) + return nil, fmt.Errorf("error creating LB floatingip %+v: %v", floatIPOpts, err) } } + + status := &v1.LoadBalancerStatus{} + if floatIP != nil { - status.Ingress = append(status.Ingress, v1.LoadBalancerIngress{IP: floatIP.FloatingIP}) + status.Ingress = []v1.LoadBalancerIngress{{IP: floatIP.FloatingIP}} + } else { + status.Ingress = []v1.LoadBalancerIngress{{IP: loadbalancer.VipAddress}} } if lbaas.opts.ManageSecurityGroups { + err := lbaas.ensureSecurityGroup(clusterName, apiService, nodes, loadbalancer) + if err != nil { + // cleanup what was created so far + _ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService) + return status, err + } + + // delete the old Security Group for the service + // Related to #53764 + // TODO(FengyunPan): Remove it at V1.10 + err = lbaas.EnsureOldSecurityGroupDeleted(clusterName, apiService) + if err != nil { + return status, fmt.Errorf("Failed to delete the Security Group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) + } + } + + return status, nil +} + +// ensureSecurityGroup ensures security group exist for specific loadbalancer service. +// Creating security group for specific loadbalancer service when it does not exist. +func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *v1.Service, nodes []*v1.Node, loadbalancer *loadbalancers.LoadBalancer) error { + // find node-security-group for service + var err error + if len(lbaas.opts.NodeSecurityGroupIDs) == 0 { + lbaas.opts.NodeSecurityGroupIDs, err = getNodeSecurityGroupIDForLB(lbaas.compute, nodes) + if err != nil { + return fmt.Errorf("failed to find node-security-group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) + } + } + glog.V(4).Infof("find node-security-group %v for loadbalancer service %s/%s", lbaas.opts.NodeSecurityGroupIDs, apiService.Namespace, apiService.Name) + + // get service ports + ports := apiService.Spec.Ports + if len(ports) == 0 { + return fmt.Errorf("no ports provided to openstack load balancer") + } + + // get service source ranges + sourceRanges, err := service.GetLoadBalancerSourceRanges(apiService) + if err != nil { + return fmt.Errorf("failed to get source ranges for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) + } + + // ensure security group for LB + lbSecGroupName := getSecurityGroupName(apiService) + lbSecGroupID, err := groups.IDFromName(lbaas.network, lbSecGroupName) + if err != nil { + // check whether security group does not exist + _, ok := err.(*gophercloud.ErrResourceNotFound) + if ok { + // create it later + lbSecGroupID = "" + } else { + return fmt.Errorf("error occurred finding security group: %s: %v", lbSecGroupName, err) + } + } + if len(lbSecGroupID) == 0 { + // create security group lbSecGroupCreateOpts := groups.CreateOpts{ - Name: getSecurityGroupName(clusterName, apiService), - Description: fmt.Sprintf("Securty Group for %v Service LoadBalancer", apiService.Name), + Name: getSecurityGroupName(apiService), + Description: fmt.Sprintf("Security Group for %s/%s Service LoadBalancer in cluster %s", apiService.Namespace, apiService.Name, clusterName), } lbSecGroup, err := groups.Create(lbaas.network, lbSecGroupCreateOpts).Extract() - if err != nil { - // cleanup what was created so far - _ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService) - return nil, err + return fmt.Errorf("failed to create Security Group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) } + lbSecGroupID = lbSecGroup.ID + //add rule in security group for _, port := range ports { - for _, sourceRange := range sourceRanges.StringSlice() { ethertype := rules.EtherType4 network, _, err := net.ParseCIDR(sourceRange) if err != nil { - // cleanup what was created so far - glog.Errorf("Error parsing source range %s as a CIDR", sourceRange) - _ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService) - return nil, err + return fmt.Errorf("error parsing source range %s as a CIDR: %v", sourceRange, err) } if network.To4() == nil { @@ -945,18 +1019,9 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv _, err = rules.Create(lbaas.network, lbSecGroupRuleCreateOpts).Extract() if err != nil { - // cleanup what was created so far - _ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService) - return nil, err + return fmt.Errorf("error occured creating rule for SecGroup %s: %v", lbSecGroup.ID, err) } } - - err := createNodeSecurityGroup(lbaas.network, lbaas.opts.NodeSecurityGroupID, int(port.NodePort), port.Protocol, lbSecGroup.ID) - if err != nil { - glog.Errorf("Error occured creating security group for loadbalancer %s:", loadbalancer.ID) - _ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService) - return nil, err - } } lbSecGroupRuleCreateOpts := rules.CreateOpts{ @@ -972,9 +1037,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv _, err = rules.Create(lbaas.network, lbSecGroupRuleCreateOpts).Extract() if err != nil { - // cleanup what was created so far - _ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService) - return nil, err + return fmt.Errorf("error occured creating rule for SecGroup %s: %v", lbSecGroup.ID, err) } lbSecGroupRuleCreateOpts = rules.CreateOpts{ @@ -988,38 +1051,81 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(clusterName string, apiService *v1.Serv } _, err = rules.Create(lbaas.network, lbSecGroupRuleCreateOpts).Extract() - if err != nil { - // cleanup what was created so far - _ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService) - return nil, err + return fmt.Errorf("error occured creating rule for SecGroup %s: %v", lbSecGroup.ID, err) } + // get security groups of port portID := loadbalancer.VipPortID - update_opts := neutronports.UpdateOpts{SecurityGroups: &[]string{lbSecGroup.ID}} - res := neutronports.Update(lbaas.network, portID, update_opts) - if res.Err != nil { - glog.Errorf("Error occured updating port: %s", portID) - // cleanup what was created so far - _ = lbaas.EnsureLoadBalancerDeleted(clusterName, apiService) - return nil, res.Err + port, err := getPortByID(lbaas.network, portID) + if err != nil { + return err + } + + // ensure the vip port has the security groups + found := false + for _, portSecurityGroups := range port.SecurityGroups { + if portSecurityGroups == lbSecGroup.ID { + found = true + break + } + } + + // update loadbalancer vip port + if !found { + port.SecurityGroups = append(port.SecurityGroups, lbSecGroup.ID) + update_opts := neutronports.UpdateOpts{SecurityGroups: &port.SecurityGroups} + res := neutronports.Update(lbaas.network, portID, update_opts) + if res.Err != nil { + msg := fmt.Sprintf("Error occured updating port %s for loadbalancer service %s/%s: %v", portID, apiService.Namespace, apiService.Name, res.Err) + return fmt.Errorf(msg) + } } } - return status, nil + // ensure rules for every node security group + for _, port := range ports { + for _, nodeSecurityGroupID := range lbaas.opts.NodeSecurityGroupIDs { + opts := rules.ListOpts{ + Direction: string(rules.DirIngress), + SecGroupID: nodeSecurityGroupID, + RemoteGroupID: lbSecGroupID, + PortRangeMax: int(port.NodePort), + PortRangeMin: int(port.NodePort), + Protocol: string(port.Protocol), + } + secGroupRules, err := getSecurityGroupRules(lbaas.network, opts) + if err != nil && !isNotFound(err) { + msg := fmt.Sprintf("Error finding rules for remote group id %s in security group id %s: %v", lbSecGroupID, nodeSecurityGroupID, err) + return fmt.Errorf(msg) + } + if len(secGroupRules) != 0 { + // Do not add rule when find rules for remote group in the Node Security Group + continue + } + + // Add the rules in the Node Security Group + err = createNodeSecurityGroup(lbaas.network, nodeSecurityGroupID, int(port.NodePort), port.Protocol, lbSecGroupID) + if err != nil { + return fmt.Errorf("error occured creating security group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) + } + } + } + + return nil } func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error { loadBalancerName := cloudprovider.GetLoadBalancerName(service) glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, nodes) - if len(lbaas.opts.SubnetId) == 0 { + if len(lbaas.opts.SubnetId) == 0 && len(nodes) > 0 { // Get SubnetId automatically. // The LB needs to be configured with instance addresses on the same subnet, so get SubnetId by one node. subnetID, err := getSubnetIDForLB(lbaas.compute, *nodes[0]) if err != nil { glog.Warningf("Failed to find subnet-id for loadbalancer service %s/%s: %v", service.Namespace, service.Name, err) - return fmt.Errorf("No subnet-id for service %s/%s : subnet-id not set in cloud provider config, "+ + return fmt.Errorf("no subnet-id for service %s/%s : subnet-id not set in cloud provider config, "+ "and failed to find subnet-id from OpenStack: %v", service.Namespace, service.Name, err) } lbaas.opts.SubnetId = subnetID @@ -1030,12 +1136,12 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service return fmt.Errorf("no ports provided to openstack load balancer") } - loadbalancer, err := getLoadbalancerByName(lbaas.network, loadBalancerName) + loadbalancer, err := getLoadbalancerByName(lbaas.lb, loadBalancerName) if err != nil { return err } if loadbalancer == nil { - return fmt.Errorf("Loadbalancer %s does not exist", loadBalancerName) + return fmt.Errorf("loadbalancer %s does not exist", loadBalancerName) } // Get all listeners for this loadbalancer, by "port key". @@ -1045,9 +1151,9 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service } var listenerIDs []string lbListeners := make(map[portKey]listeners.Listener) - allListeners, err := getListenersByLoadBalancerID(lbaas.network, loadbalancer.ID) + allListeners, err := getListenersByLoadBalancerID(lbaas.lb, loadbalancer.ID) if err != nil { - return fmt.Errorf("Error getting listeners for LB %s: %v", loadBalancerName, err) + return fmt.Errorf("error getting listeners for LB %s: %v", loadBalancerName, err) } for _, l := range allListeners { key := portKey{Protocol: listeners.Protocol(l.Protocol), Port: l.ProtocolPort} @@ -1058,9 +1164,9 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service // Get all pools for this loadbalancer, by listener ID. lbPools := make(map[string]v2pools.Pool) for _, listenerID := range listenerIDs { - pool, err := getPoolByListenerID(lbaas.network, loadbalancer.ID, listenerID) + pool, err := getPoolByListenerID(lbaas.lb, loadbalancer.ID, listenerID) if err != nil { - return fmt.Errorf("Error getting pool for listener %s: %v", listenerID, err) + return fmt.Errorf("error getting pool for listener %s: %v", listenerID, err) } lbPools[listenerID] = *pool } @@ -1083,19 +1189,19 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service Port: int(port.Port), }] if !ok { - return fmt.Errorf("Loadbalancer %s does not contain required listener for port %d and protocol %s", loadBalancerName, port.Port, port.Protocol) + return fmt.Errorf("loadbalancer %s does not contain required listener for port %d and protocol %s", loadBalancerName, port.Port, port.Protocol) } // Get pool associated with this listener pool, ok := lbPools[listener.ID] if !ok { - return fmt.Errorf("Loadbalancer %s does not contain required pool for listener %s", loadBalancerName, listener.ID) + return fmt.Errorf("loadbalancer %s does not contain required pool for listener %s", loadBalancerName, listener.ID) } // Find existing pool members (by address) for this port - getMembers, err := getMembersByPoolID(lbaas.network, pool.ID) + getMembers, err := getMembersByPoolID(lbaas.lb, pool.ID) if err != nil { - return fmt.Errorf("Error getting pool members %s: %v", pool.ID, err) + return fmt.Errorf("error getting pool members %s: %v", pool.ID, err) } members := make(map[string]v2pools.Member) for _, member := range getMembers { @@ -1108,7 +1214,7 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service // Already exists, do not create member continue } - _, err := v2pools.CreateMember(lbaas.network, pool.ID, v2pools.CreateMemberOpts{ + _, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{ Address: addr, ProtocolPort: int(port.NodePort), SubnetID: lbaas.opts.SubnetId, @@ -1116,7 +1222,7 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service if err != nil { return err } - waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) + waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) } // Remove any old members for this port @@ -1125,13 +1231,101 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service // Still present, do not delete member continue } - err = v2pools.DeleteMember(lbaas.network, pool.ID, member.ID).ExtractErr() + err = v2pools.DeleteMember(lbaas.lb, pool.ID, member.ID).ExtractErr() if err != nil && !isNotFound(err) { return err } - waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) + waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) + } + } + + if lbaas.opts.ManageSecurityGroups { + err := lbaas.updateSecurityGroup(clusterName, service, nodes, loadbalancer) + if err != nil { + return fmt.Errorf("failed to update Security Group for loadbalancer service %s/%s: %v", service.Namespace, service.Name, err) + } + } + + return nil +} + +// updateSecurityGroup updating security group for specific loadbalancer service. +func (lbaas *LbaasV2) updateSecurityGroup(clusterName string, apiService *v1.Service, nodes []*v1.Node, loadbalancer *loadbalancers.LoadBalancer) error { + originalNodeSecurityGroupIDs := lbaas.opts.NodeSecurityGroupIDs + + var err error + lbaas.opts.NodeSecurityGroupIDs, err = getNodeSecurityGroupIDForLB(lbaas.compute, nodes) + if err != nil { + return fmt.Errorf("failed to find node-security-group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) + } + glog.V(4).Infof("find node-security-group %v for loadbalancer service %s/%s", lbaas.opts.NodeSecurityGroupIDs, apiService.Namespace, apiService.Name) + + original := sets.NewString(originalNodeSecurityGroupIDs...) + current := sets.NewString(lbaas.opts.NodeSecurityGroupIDs...) + removals := original.Difference(current) + + // Generate Name + lbSecGroupName := getSecurityGroupName(apiService) + lbSecGroupID, err := groups.IDFromName(lbaas.network, lbSecGroupName) + if err != nil { + return fmt.Errorf("error occurred finding security group: %s: %v", lbSecGroupName, err) + } + + ports := apiService.Spec.Ports + if len(ports) == 0 { + return fmt.Errorf("no ports provided to openstack load balancer") + } + + for _, port := range ports { + for removal := range removals { + // Delete the rules in the Node Security Group + opts := rules.ListOpts{ + Direction: string(rules.DirIngress), + SecGroupID: removal, + RemoteGroupID: lbSecGroupID, + PortRangeMax: int(port.NodePort), + PortRangeMin: int(port.NodePort), + Protocol: string(port.Protocol), + } + secGroupRules, err := getSecurityGroupRules(lbaas.network, opts) + if err != nil && !isNotFound(err) { + return fmt.Errorf("error finding rules for remote group id %s in security group id %s: %v", lbSecGroupID, removal, err) + } + + for _, rule := range secGroupRules { + res := rules.Delete(lbaas.network, rule.ID) + if res.Err != nil && !isNotFound(res.Err) { + return fmt.Errorf("error occurred deleting security group rule: %s: %v", rule.ID, res.Err) + } + } + } + + for _, nodeSecurityGroupID := range lbaas.opts.NodeSecurityGroupIDs { + opts := rules.ListOpts{ + Direction: string(rules.DirIngress), + SecGroupID: nodeSecurityGroupID, + RemoteGroupID: lbSecGroupID, + PortRangeMax: int(port.NodePort), + PortRangeMin: int(port.NodePort), + Protocol: string(port.Protocol), + } + secGroupRules, err := getSecurityGroupRules(lbaas.network, opts) + if err != nil && !isNotFound(err) { + return fmt.Errorf("error finding rules for remote group id %s in security group id %s: %v", lbSecGroupID, nodeSecurityGroupID, err) + } + if len(secGroupRules) != 0 { + // Do not add rule when find rules for remote group in the Node Security Group + continue + } + + // Add the rules in the Node Security Group + err = createNodeSecurityGroup(lbaas.network, nodeSecurityGroupID, int(port.NodePort), port.Protocol, lbSecGroupID) + if err != nil { + return fmt.Errorf("error occured creating security group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) + } } } + return nil } @@ -1139,7 +1333,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1. loadBalancerName := cloudprovider.GetLoadBalancerName(service) glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v)", clusterName, loadBalancerName) - loadbalancer, err := getLoadbalancerByName(lbaas.network, loadBalancerName) + loadbalancer, err := getLoadbalancerByName(lbaas.lb, loadBalancerName) if err != nil && err != ErrNotFound { return err } @@ -1162,18 +1356,18 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1. } // get all listeners associated with this loadbalancer - listenerList, err := getListenersByLoadBalancerID(lbaas.network, loadbalancer.ID) + listenerList, err := getListenersByLoadBalancerID(lbaas.lb, loadbalancer.ID) if err != nil { - return fmt.Errorf("Error getting LB %s listeners: %v", loadbalancer.ID, err) + return fmt.Errorf("error getting LB %s listeners: %v", loadbalancer.ID, err) } // get all pools (and health monitors) associated with this loadbalancer var poolIDs []string var monitorIDs []string for _, listener := range listenerList { - pool, err := getPoolByListenerID(lbaas.network, loadbalancer.ID, listener.ID) + pool, err := getPoolByListenerID(lbaas.lb, loadbalancer.ID, listener.ID) if err != nil && err != ErrNotFound { - return fmt.Errorf("Error getting pool for listener %s: %v", listener.ID, err) + return fmt.Errorf("error getting pool for listener %s: %v", listener.ID, err) } if pool != nil { poolIDs = append(poolIDs, pool.ID) @@ -1187,9 +1381,9 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1. // get all members associated with each poolIDs var memberIDs []string for _, pool := range poolIDs { - membersList, err := getMembersByPoolID(lbaas.network, pool) + membersList, err := getMembersByPoolID(lbaas.lb, pool) if err != nil && !isNotFound(err) { - return fmt.Errorf("Error getting pool members %s: %v", pool, err) + return fmt.Errorf("error getting pool members %s: %v", pool, err) } for _, member := range membersList { memberIDs = append(memberIDs, member.ID) @@ -1198,421 +1392,178 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1. // delete all monitors for _, monitorID := range monitorIDs { - err := v2monitors.Delete(lbaas.network, monitorID).ExtractErr() + err := v2monitors.Delete(lbaas.lb, monitorID).ExtractErr() if err != nil && !isNotFound(err) { return err } - waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) + waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) } // delete all members and pools for _, poolID := range poolIDs { // delete all members for this pool for _, memberID := range memberIDs { - err := v2pools.DeleteMember(lbaas.network, poolID, memberID).ExtractErr() + err := v2pools.DeleteMember(lbaas.lb, poolID, memberID).ExtractErr() if err != nil && !isNotFound(err) { return err } - waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) + waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) } // delete pool - err := v2pools.Delete(lbaas.network, poolID).ExtractErr() + err := v2pools.Delete(lbaas.lb, poolID).ExtractErr() if err != nil && !isNotFound(err) { return err } - waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) + waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) } // delete all listeners for _, listener := range listenerList { - err := listeners.Delete(lbaas.network, listener.ID).ExtractErr() + err := listeners.Delete(lbaas.lb, listener.ID).ExtractErr() if err != nil && !isNotFound(err) { return err } - waitLoadbalancerActiveProvisioningStatus(lbaas.network, loadbalancer.ID) + waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) } // delete loadbalancer - err = loadbalancers.Delete(lbaas.network, loadbalancer.ID).ExtractErr() + err = loadbalancers.Delete(lbaas.lb, loadbalancer.ID).ExtractErr() if err != nil && !isNotFound(err) { return err } - waitLoadbalancerDeleted(lbaas.network, loadbalancer.ID) + waitLoadbalancerDeleted(lbaas.lb, loadbalancer.ID) // Delete the Security Group if lbaas.opts.ManageSecurityGroups { - // Generate Name - lbSecGroupName := getSecurityGroupName(clusterName, service) - lbSecGroupID, err := groups.IDFromName(lbaas.network, lbSecGroupName) + err := lbaas.EnsureSecurityGroupDeleted(clusterName, service) if err != nil { - glog.V(1).Infof("Error occurred finding security group: %s: %v", lbSecGroupName, err) - return nil + return fmt.Errorf("Failed to delete Security Group for loadbalancer service %s/%s: %v", service.Namespace, service.Name, err) } - lbSecGroup := groups.Delete(lbaas.network, lbSecGroupID) - if lbSecGroup.Err != nil && !isNotFound(lbSecGroup.Err) { - return lbSecGroup.Err - } - - // Delete the rules in the Node Security Group - opts := rules.ListOpts{ - SecGroupID: lbaas.opts.NodeSecurityGroupID, - RemoteGroupID: lbSecGroupID, - } - secGroupRules, err := getSecurityGroupRules(lbaas.network, opts) - - if err != nil && !isNotFound(err) { - glog.Errorf("Error finding rules for remote group id %s in security group id %s", lbSecGroupID, lbaas.opts.NodeSecurityGroupID) - return err - } - - for _, rule := range secGroupRules { - res := rules.Delete(lbaas.network, rule.ID) - if res.Err != nil && !isNotFound(res.Err) { - glog.V(1).Infof("Error occurred deleting security group rule: %s: %v", rule.ID, res.Err) - } + // delete the old Security Group for the service + // Related to #53764 + // TODO(FengyunPan): Remove it at V1.10 + err = lbaas.EnsureOldSecurityGroupDeleted(clusterName, service) + if err != nil { + return fmt.Errorf("Failed to delete the Security Group for loadbalancer service %s/%s: %v", service.Namespace, service.Name, err) } } return nil } -func (lb *LbaasV1) GetLoadBalancer(clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) { - loadBalancerName := cloudprovider.GetLoadBalancerName(service) - vip, err := getVipByName(lb.network, loadBalancerName) - if err == ErrNotFound { - return nil, false, nil - } - if vip == nil { - return nil, false, err - } - - status := &v1.LoadBalancerStatus{} - status.Ingress = []v1.LoadBalancerIngress{{IP: vip.Address}} - - return status, true, err -} - -// TODO: This code currently ignores 'region' and always creates a -// loadbalancer in only the current OpenStack region. We should take -// a list of regions (from config) and query/create loadbalancers in -// each region. - -func (lb *LbaasV1) EnsureLoadBalancer(clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { - glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, nodes, apiService.Annotations) - - if len(lb.opts.SubnetId) == 0 { - // Get SubnetId automatically. - // The LB needs to be configured with instance addresses on the same subnet, so get SubnetId by one node. - subnetID, err := getSubnetIDForLB(lb.compute, *nodes[0]) - if err != nil { - glog.Warningf("Failed to find subnet-id for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) - return nil, fmt.Errorf("No subnet-id for service %s/%s : subnet-id not set in cloud provider config, "+ - "and failed to find subnet-id from OpenStack: %v", apiService.Namespace, apiService.Name, err) - } - lb.opts.SubnetId = subnetID - } - - floatingPool := getStringFromServiceAnnotation(apiService, ServiceAnnotationLoadBalancerFloatingNetworkId, lb.opts.FloatingNetworkId) - glog.V(4).Infof("EnsureLoadBalancer using floatingPool: %v", floatingPool) - - var internalAnnotation bool - internal := getStringFromServiceAnnotation(apiService, ServiceAnnotationLoadBalancerInternal, "true") - switch internal { - case "true": - glog.V(4).Infof("Ensure an internal loadbalancer service.") - internalAnnotation = true - case "false": - if len(floatingPool) != 0 { - glog.V(4).Infof("Ensure an external loadbalancer service.") - internalAnnotation = false - } else { - return nil, fmt.Errorf("floating-network-id or loadbalancer.openstack.org/floating-network-id should be specified when service.beta.kubernetes.io/openstack-internal-load-balancer is false") - } - default: - return nil, fmt.Errorf("unknow service.beta.kubernetes.io/openstack-internal-load-balancer annotation: %v, specify \"true\" or \"false\".", - internal) - } - - ports := apiService.Spec.Ports - if len(ports) > 1 { - return nil, fmt.Errorf("multiple ports are not supported in openstack v1 load balancers") - } else if len(ports) == 0 { - return nil, fmt.Errorf("no ports provided to openstack load balancer") - } - - // The service controller verified all the protocols match on the ports, just check and use the first one - // TODO: Convert all error messages to use an event recorder - if ports[0].Protocol != v1.ProtocolTCP { - return nil, fmt.Errorf("Only TCP LoadBalancer is supported for openstack load balancers") - } - - affinity := apiService.Spec.SessionAffinity - var persistence *vips.SessionPersistence - switch affinity { - case v1.ServiceAffinityNone: - persistence = nil - case v1.ServiceAffinityClientIP: - persistence = &vips.SessionPersistence{Type: "SOURCE_IP"} - default: - return nil, fmt.Errorf("unsupported load balancer affinity: %v", affinity) - } - - sourceRanges, err := service.GetLoadBalancerSourceRanges(apiService) - if err != nil { - return nil, err - } - - if !service.IsAllowAll(sourceRanges) { - return nil, fmt.Errorf("Source range restrictions are not supported for openstack load balancers") - } - - glog.V(2).Infof("Checking if openstack load balancer already exists: %s", cloudprovider.GetLoadBalancerName(apiService)) - _, exists, err := lb.GetLoadBalancer(clusterName, apiService) - if err != nil { - return nil, fmt.Errorf("error checking if openstack load balancer already exists: %v", err) - } - - // TODO: Implement a more efficient update strategy for common changes than delete & create - // In particular, if we implement hosts update, we can get rid of UpdateHosts - if exists { - err := lb.EnsureLoadBalancerDeleted(clusterName, apiService) - if err != nil { - return nil, fmt.Errorf("error deleting existing openstack load balancer: %v", err) - } - } - - lbmethod := pools.LBMethod(lb.opts.LBMethod) - if lbmethod == "" { - lbmethod = pools.LBMethodRoundRobin - } - name := cloudprovider.GetLoadBalancerName(apiService) - pool, err := pools.Create(lb.network, pools.CreateOpts{ - Name: name, - Protocol: pools.ProtocolTCP, - SubnetID: lb.opts.SubnetId, - LBMethod: lbmethod, - }).Extract() - if err != nil { - return nil, fmt.Errorf("Error creating pool for openstack load balancer %s: %v", name, err) - } - - for _, node := range nodes { - addr, err := nodeAddressForLB(node) - if err != nil { - return nil, err - } - - _, err = members.Create(lb.network, members.CreateOpts{ - PoolID: pool.ID, - ProtocolPort: int(ports[0].NodePort), //Note: only handles single port - Address: addr, - }).Extract() - if err != nil { - return nil, fmt.Errorf("Error creating member for the pool(%s) of openstack load balancer %s: %v", - pool.ID, name, err) - } - } - - var mon *monitors.Monitor - if lb.opts.CreateMonitor { - mon, err = monitors.Create(lb.network, monitors.CreateOpts{ - Type: monitors.TypeTCP, - Delay: int(lb.opts.MonitorDelay.Duration.Seconds()), - Timeout: int(lb.opts.MonitorTimeout.Duration.Seconds()), - MaxRetries: int(lb.opts.MonitorMaxRetries), - }).Extract() - if err != nil { - return nil, fmt.Errorf("Error creating monitor for openstack load balancer %s: %v", name, err) - } - - _, err = pools.AssociateMonitor(lb.network, pool.ID, mon.ID).Extract() - if err != nil { - return nil, fmt.Errorf("Error associating monitor(%s) with pool(%s) for"+ - "openstack load balancer %s: %v", mon.ID, pool.ID, name, err) - } - } - - createOpts := vips.CreateOpts{ - Name: name, - Description: fmt.Sprintf("Kubernetes external service %s", name), - Protocol: "TCP", - ProtocolPort: int(ports[0].Port), //TODO: need to handle multi-port - PoolID: pool.ID, - SubnetID: lb.opts.SubnetId, - Persistence: persistence, - } - - loadBalancerIP := apiService.Spec.LoadBalancerIP - if loadBalancerIP != "" && internalAnnotation { - createOpts.Address = loadBalancerIP - } - - vip, err := vips.Create(lb.network, createOpts).Extract() +// EnsureSecurityGroupDeleted deleting security group for specific loadbalancer service. +func (lbaas *LbaasV2) EnsureSecurityGroupDeleted(clusterName string, service *v1.Service) error { + // Generate Name + lbSecGroupName := getSecurityGroupName(service) + lbSecGroupID, err := groups.IDFromName(lbaas.network, lbSecGroupName) if err != nil { - return nil, fmt.Errorf("Error creating vip for openstack load balancer %s: %v", name, err) - } - - status := &v1.LoadBalancerStatus{} - - status.Ingress = []v1.LoadBalancerIngress{{IP: vip.Address}} - - if floatingPool != "" && !internalAnnotation { - floatIPOpts := floatingips.CreateOpts{ - FloatingNetworkID: floatingPool, - PortID: vip.PortID, - } - - loadBalancerIP := apiService.Spec.LoadBalancerIP - if loadBalancerIP != "" { - floatIPOpts.FloatingIP = loadBalancerIP - } - - floatIP, err := floatingips.Create(lb.network, floatIPOpts).Extract() - if err != nil { - return nil, fmt.Errorf("Error creating floatingip for openstack load balancer %s: %v", name, err) + // check whether security group does not exist + _, ok := err.(*gophercloud.ErrResourceNotFound) + if ok { + // It is OK when the security group has been deleted by others. + return nil + } else { + return fmt.Errorf("Error occurred finding security group: %s: %v", lbSecGroupName, err) } - - status.Ingress = append(status.Ingress, v1.LoadBalancerIngress{IP: floatIP.FloatingIP}) } - return status, nil - -} - -func (lb *LbaasV1) UpdateLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node) error { - loadBalancerName := cloudprovider.GetLoadBalancerName(service) - glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, nodes) - - vip, err := getVipByName(lb.network, loadBalancerName) - if err != nil { - return err + lbSecGroup := groups.Delete(lbaas.network, lbSecGroupID) + if lbSecGroup.Err != nil && !isNotFound(lbSecGroup.Err) { + return lbSecGroup.Err } - // Set of member (addresses) that _should_ exist - addrs := map[string]bool{} - for _, node := range nodes { - addr, err := nodeAddressForLB(node) - if err != nil { - return err - } - - addrs[addr] = true - } + if len(lbaas.opts.NodeSecurityGroupIDs) == 0 { + // Just happen when nodes have not Security Group, or should not happen + // UpdateLoadBalancer and EnsureLoadBalancer can set lbaas.opts.NodeSecurityGroupIDs when it is empty + // And service controller call UpdateLoadBalancer to set lbaas.opts.NodeSecurityGroupIDs when controller manager service is restarted. + glog.Warningf("Can not find node-security-group from all the nodes of this cluster when delete loadbalancer service %s/%s", + service.Namespace, service.Name) + } else { + // Delete the rules in the Node Security Group + for _, nodeSecurityGroupID := range lbaas.opts.NodeSecurityGroupIDs { + opts := rules.ListOpts{ + SecGroupID: nodeSecurityGroupID, + RemoteGroupID: lbSecGroupID, + } + secGroupRules, err := getSecurityGroupRules(lbaas.network, opts) - // Iterate over members that _do_ exist - pager := members.List(lb.network, members.ListOpts{PoolID: vip.PoolID}) - err = pager.EachPage(func(page pagination.Page) (bool, error) { - memList, err := members.ExtractMembers(page) - if err != nil { - return false, err - } + if err != nil && !isNotFound(err) { + msg := fmt.Sprintf("Error finding rules for remote group id %s in security group id %s: %v", lbSecGroupID, nodeSecurityGroupID, err) + return fmt.Errorf(msg) + } - for _, member := range memList { - if _, found := addrs[member.Address]; found { - // Member already exists - delete(addrs, member.Address) - } else { - // Member needs to be deleted - err = members.Delete(lb.network, member.ID).ExtractErr() - if err != nil { - return false, err + for _, rule := range secGroupRules { + res := rules.Delete(lbaas.network, rule.ID) + if res.Err != nil && !isNotFound(res.Err) { + return fmt.Errorf("Error occurred deleting security group rule: %s: %v", rule.ID, res.Err) } } } - - return true, nil - }) - if err != nil { - return err - } - - // Anything left in addrs is a new member that needs to be added - for addr := range addrs { - _, err := members.Create(lb.network, members.CreateOpts{ - PoolID: vip.PoolID, - Address: addr, - ProtocolPort: vip.ProtocolPort, - }).Extract() - if err != nil { - return err - } } return nil } -func (lb *LbaasV1) EnsureLoadBalancerDeleted(clusterName string, service *v1.Service) error { - loadBalancerName := cloudprovider.GetLoadBalancerName(service) - glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v)", clusterName, loadBalancerName) - - vip, err := getVipByName(lb.network, loadBalancerName) - if err != nil && err != ErrNotFound { - return err - } +// getOldSecurityGroupName is used to get the old security group name +// Related to #53764 +// TODO(FengyunPan): Remove it at V1.10 +func getOldSecurityGroupName(clusterName string, service *v1.Service) string { + return fmt.Sprintf("lb-sg-%s-%v", clusterName, service.Name) +} - if vip != nil && vip.PortID != "" { - floatingIP, err := getFloatingIPByPortID(lb.network, vip.PortID) - if err != nil && !isNotFound(err) { - return err - } - if floatingIP != nil { - err = floatingips.Delete(lb.network, floatingIP.ID).ExtractErr() - if err != nil && !isNotFound(err) { - return err - } +// EnsureOldSecurityGroupDeleted deleting old security group for specific loadbalancer service. +// Related to #53764 +// TODO(FengyunPan): Remove it at V1.10 +func (lbaas *LbaasV2) EnsureOldSecurityGroupDeleted(clusterName string, service *v1.Service) error { + glog.V(4).Infof("EnsureOldSecurityGroupDeleted(%v, %v)", clusterName, service) + // Generate Name + lbSecGroupName := getOldSecurityGroupName(clusterName, service) + lbSecGroupID, err := groups.IDFromName(lbaas.network, lbSecGroupName) + if err != nil { + // check whether security group does not exist + _, ok := err.(*gophercloud.ErrResourceNotFound) + if ok { + // It is OK when the security group has been deleted by others. + return nil + } else { + return fmt.Errorf("Error occurred finding security group: %s: %v", lbSecGroupName, err) } } - // We have to delete the VIP before the pool can be deleted, - // so no point continuing if this fails. - if vip != nil { - err := vips.Delete(lb.network, vip.ID).ExtractErr() - if err != nil && !isNotFound(err) { - return err - } + lbSecGroup := groups.Delete(lbaas.network, lbSecGroupID) + if lbSecGroup.Err != nil && !isNotFound(lbSecGroup.Err) { + return lbSecGroup.Err } - var pool *pools.Pool - if vip != nil { - pool, err = pools.Get(lb.network, vip.PoolID).Extract() - if err != nil && !isNotFound(err) { - return err - } + if len(lbaas.opts.NodeSecurityGroupIDs) == 0 { + // Just happen when nodes have not Security Group, or should not happen + // UpdateLoadBalancer and EnsureLoadBalancer can set lbaas.opts.NodeSecurityGroupIDs when it is empty + // And service controller call UpdateLoadBalancer to set lbaas.opts.NodeSecurityGroupIDs when controller manager service is restarted. + glog.Warningf("Can not find node-security-group from all the nodes of this cluster when delete loadbalancer service %s/%s", + service.Namespace, service.Name) } else { - // The VIP is gone, but it is conceivable that a Pool - // still exists that we failed to delete on some - // previous occasion. Make a best effort attempt to - // cleanup any pools with the same name as the VIP. - pool, err = getPoolByName(lb.network, service.Name) - if err != nil && err != ErrNotFound { - return err - } - } - - if pool != nil { - for _, monId := range pool.MonitorIDs { - _, err = pools.DisassociateMonitor(lb.network, pool.ID, monId).Extract() - if err != nil { - return err + // Delete the rules in the Node Security Group + for _, nodeSecurityGroupID := range lbaas.opts.NodeSecurityGroupIDs { + opts := rules.ListOpts{ + SecGroupID: nodeSecurityGroupID, + RemoteGroupID: lbSecGroupID, } + secGroupRules, err := getSecurityGroupRules(lbaas.network, opts) - err = monitors.Delete(lb.network, monId).ExtractErr() if err != nil && !isNotFound(err) { - return err + msg := fmt.Sprintf("Error finding rules for remote group id %s in security group id %s: %v", lbSecGroupID, nodeSecurityGroupID, err) + return fmt.Errorf(msg) } - } - for _, memberId := range pool.MemberIDs { - err = members.Delete(lb.network, memberId).ExtractErr() - if err != nil && !isNotFound(err) { - return err + + for _, rule := range secGroupRules { + res := rules.Delete(lbaas.network, rule.ID) + if res.Err != nil && !isNotFound(res.Err) { + return fmt.Errorf("Error occurred deleting security group rule: %s: %v", rule.ID, res.Err) + } } } - err = pools.Delete(lb.network, pool.ID).ExtractErr() - if err != nil && !isNotFound(err) { - return err - } } return nil diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go index d8fb05bca7eb..e95f2330fd20 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go @@ -17,18 +17,21 @@ limitations under the License. package openstack import ( - "errors" "fmt" "io/ioutil" "path" + "path/filepath" "strings" "time" + "k8s.io/apimachinery/pkg/api/resource" k8s_volume "k8s.io/kubernetes/pkg/volume" "github.com/gophercloud/gophercloud" + volumeexpand "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions" volumes_v1 "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes" volumes_v2 "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" + volumes_v3 "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes" "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" "github.com/prometheus/client_golang/prometheus" @@ -39,6 +42,7 @@ type volumeService interface { createVolume(opts VolumeCreateOpts) (string, string, error) getVolume(volumeID string) (Volume, error) deleteVolume(volumeName string) error + expandVolume(volumeID string, newSize int) error } // Volumes implementation for v1 @@ -53,6 +57,12 @@ type VolumesV2 struct { opts BlockStorageOpts } +// Volumes implementation for v3 +type VolumesV3 struct { + blockstorage *gophercloud.ServiceClient + opts BlockStorageOpts +} + type Volume struct { // ID of the instance, to which this volume is attached. "" if not attached AttachedServerId string @@ -64,6 +74,8 @@ type Volume struct { Name string // Current status of the volume. Status string + // Volume size in GB + Size int } type VolumeCreateOpts struct { @@ -79,6 +91,11 @@ const ( VolumeInUseStatus = "in-use" VolumeDeletedStatus = "deleted" VolumeErrorStatus = "error" + + // On some environments, we need to query the metadata service in order + // to locate disks. We'll use the Newton version, which includes device + // metadata. + NewtonMetadataVersion = "2016-06-30" ) func (volumes *VolumesV1) createVolume(opts VolumeCreateOpts) (string, string, error) { @@ -121,20 +138,40 @@ func (volumes *VolumesV2) createVolume(opts VolumeCreateOpts) (string, string, e return vol.ID, vol.AvailabilityZone, nil } +func (volumes *VolumesV3) createVolume(opts VolumeCreateOpts) (string, string, error) { + startTime := time.Now() + + create_opts := volumes_v3.CreateOpts{ + Name: opts.Name, + Size: opts.Size, + VolumeType: opts.VolumeType, + AvailabilityZone: opts.Availability, + Metadata: opts.Metadata, + } + + vol, err := volumes_v3.Create(volumes.blockstorage, create_opts).Extract() + timeTaken := time.Since(startTime).Seconds() + recordOpenstackOperationMetric("create_v3_volume", timeTaken, err) + if err != nil { + return "", "", err + } + return vol.ID, vol.AvailabilityZone, nil +} + func (volumes *VolumesV1) getVolume(volumeID string) (Volume, error) { startTime := time.Now() volumeV1, err := volumes_v1.Get(volumes.blockstorage, volumeID).Extract() timeTaken := time.Since(startTime).Seconds() recordOpenstackOperationMetric("get_v1_volume", timeTaken, err) if err != nil { - glog.Errorf("Error occurred getting volume by ID: %s", volumeID) - return Volume{}, err + return Volume{}, fmt.Errorf("error occurred getting volume by ID: %s, err: %v", volumeID, err) } volume := Volume{ ID: volumeV1.ID, Name: volumeV1.Name, Status: volumeV1.Status, + Size: volumeV1.Size, } if len(volumeV1.Attachments) > 0 && volumeV1.Attachments[0]["server_id"] != nil { @@ -151,14 +188,14 @@ func (volumes *VolumesV2) getVolume(volumeID string) (Volume, error) { timeTaken := time.Since(startTime).Seconds() recordOpenstackOperationMetric("get_v2_volume", timeTaken, err) if err != nil { - glog.Errorf("Error occurred getting volume by ID: %s", volumeID) - return Volume{}, err + return Volume{}, fmt.Errorf("error occurred getting volume by ID: %s, err: %v", volumeID, err) } volume := Volume{ ID: volumeV2.ID, Name: volumeV2.Name, Status: volumeV2.Status, + Size: volumeV2.Size, } if len(volumeV2.Attachments) > 0 { @@ -169,15 +206,34 @@ func (volumes *VolumesV2) getVolume(volumeID string) (Volume, error) { return volume, nil } -func (volumes *VolumesV1) deleteVolume(volumeID string) error { +func (volumes *VolumesV3) getVolume(volumeID string) (Volume, error) { startTime := time.Now() - err := volumes_v1.Delete(volumes.blockstorage, volumeID).ExtractErr() + volumeV3, err := volumes_v3.Get(volumes.blockstorage, volumeID).Extract() timeTaken := time.Since(startTime).Seconds() - recordOpenstackOperationMetric("delete_v1_volume", timeTaken, err) + recordOpenstackOperationMetric("get_v3_volume", timeTaken, err) if err != nil { - glog.Errorf("Cannot delete volume %s: %v", volumeID, err) + return Volume{}, fmt.Errorf("error occurred getting volume by ID: %s, err: %v", volumeID, err) + } + + volume := Volume{ + ID: volumeV3.ID, + Name: volumeV3.Name, + Status: volumeV3.Status, } + if len(volumeV3.Attachments) > 0 { + volume.AttachedServerId = volumeV3.Attachments[0].ServerID + volume.AttachedDevice = volumeV3.Attachments[0].Device + } + + return volume, nil +} + +func (volumes *VolumesV1) deleteVolume(volumeID string) error { + startTime := time.Now() + err := volumes_v1.Delete(volumes.blockstorage, volumeID).ExtractErr() + timeTaken := time.Since(startTime).Seconds() + recordOpenstackOperationMetric("delete_v1_volume", timeTaken, err) return err } @@ -186,10 +242,47 @@ func (volumes *VolumesV2) deleteVolume(volumeID string) error { err := volumes_v2.Delete(volumes.blockstorage, volumeID).ExtractErr() timeTaken := time.Since(startTime).Seconds() recordOpenstackOperationMetric("delete_v2_volume", timeTaken, err) - if err != nil { - glog.Errorf("Cannot delete volume %s: %v", volumeID, err) + return err +} + +func (volumes *VolumesV3) deleteVolume(volumeID string) error { + startTime := time.Now() + err := volumes_v3.Delete(volumes.blockstorage, volumeID).ExtractErr() + timeTaken := time.Since(startTime).Seconds() + recordOpenstackOperationMetric("delete_v3_volume", timeTaken, err) + return err +} + +func (volumes *VolumesV1) expandVolume(volumeID string, newSize int) error { + startTime := time.Now() + create_opts := volumeexpand.ExtendSizeOpts{ + NewSize: newSize, } + err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, create_opts).ExtractErr() + timeTaken := time.Since(startTime).Seconds() + recordOpenstackOperationMetric("expand_volume", timeTaken, err) + return err +} + +func (volumes *VolumesV2) expandVolume(volumeID string, newSize int) error { + startTime := time.Now() + create_opts := volumeexpand.ExtendSizeOpts{ + NewSize: newSize, + } + err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, create_opts).ExtractErr() + timeTaken := time.Since(startTime).Seconds() + recordOpenstackOperationMetric("expand_volume", timeTaken, err) + return err +} +func (volumes *VolumesV3) expandVolume(volumeID string, newSize int) error { + startTime := time.Now() + create_opts := volumeexpand.ExtendSizeOpts{ + NewSize: newSize, + } + err := volumeexpand.ExtendSize(volumes.blockstorage, volumeID, create_opts).ExtractErr() + timeTaken := time.Since(startTime).Seconds() + recordOpenstackOperationMetric("expand_volume", timeTaken, err) return err } @@ -200,7 +293,6 @@ func (os *OpenStack) OperationPending(diskName string) (bool, string, error) { } volumeStatus := volume.Status if volumeStatus == VolumeErrorStatus { - glog.Errorf("status of volume %s is %s", diskName, volumeStatus) return false, volumeStatus, nil } if volumeStatus == VolumeAvailableStatus || volumeStatus == VolumeInUseStatus || volumeStatus == VolumeDeletedStatus { @@ -226,9 +318,7 @@ func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) { glog.V(4).Infof("Disk %s is already attached to instance %s", volumeID, instanceID) return volume.ID, nil } - errmsg := fmt.Sprintf("Disk %s is attached to a different instance (%s)", volumeID, volume.AttachedServerId) - glog.V(2).Infof(errmsg) - return "", errors.New(errmsg) + return "", fmt.Errorf("disk %s is attached to a different instance (%s)", volumeID, volume.AttachedServerId) } startTime := time.Now() @@ -239,8 +329,7 @@ func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) { timeTaken := time.Since(startTime).Seconds() recordOpenstackOperationMetric("attach_disk", timeTaken, err) if err != nil { - glog.Errorf("Failed to attach %s volume to %s compute: %v", volumeID, instanceID, err) - return "", err + return "", fmt.Errorf("failed to attach %s volume to %s compute: %v", volumeID, instanceID, err) } glog.V(2).Infof("Successfully attached %s volume to %s compute", volumeID, instanceID) return volume.ID, nil @@ -259,18 +348,14 @@ func (os *OpenStack) DetachDisk(instanceID, volumeID string) error { } if volume.Status != VolumeInUseStatus { - errmsg := fmt.Sprintf("can not detach volume %s, its status is %s.", volume.Name, volume.Status) - glog.Errorf(errmsg) - return errors.New(errmsg) + return fmt.Errorf("can not detach volume %s, its status is %s", volume.Name, volume.Status) } cClient, err := os.NewComputeV2() if err != nil { return err } if volume.AttachedServerId != instanceID { - errMsg := fmt.Sprintf("Disk: %s has no attachments or is not attached to compute: %s", volume.Name, instanceID) - glog.Errorf(errMsg) - return errors.New(errMsg) + return fmt.Errorf("disk: %s has no attachments or is not attached to compute: %s", volume.Name, instanceID) } else { startTime := time.Now() // This is a blocking call and effects kubelet's performance directly. @@ -279,8 +364,7 @@ func (os *OpenStack) DetachDisk(instanceID, volumeID string) error { timeTaken := time.Since(startTime).Seconds() recordOpenstackOperationMetric("detach_disk", timeTaken, err) if err != nil { - glog.Errorf("Failed to delete volume %s from compute %s attached %v", volume.ID, instanceID, err) - return err + return fmt.Errorf("failed to delete volume %s from compute %s attached %v", volume.ID, instanceID, err) } glog.V(2).Infof("Successfully detached volume: %s from compute: %s", volume.ID, instanceID) } @@ -288,22 +372,53 @@ func (os *OpenStack) DetachDisk(instanceID, volumeID string) error { return nil } +// ExpandVolume expands the size of specific cinder volume (in GiB) +func (os *OpenStack) ExpandVolume(volumeID string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) { + volume, err := os.getVolume(volumeID) + if err != nil { + return oldSize, err + } + if volume.Status != VolumeAvailableStatus { + // cinder volume can not be expanded if its status is not available + return oldSize, fmt.Errorf("volume status is not available") + } + + volSizeBytes := newSize.Value() + // Cinder works with gigabytes, convert to GiB with rounding up + volSizeGB := int(k8s_volume.RoundUpSize(volSizeBytes, 1024*1024*1024)) + newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", volSizeGB)) + + // if volume size equals to or greater than the newSize, return nil + if volume.Size >= volSizeGB { + return newSizeQuant, nil + } + + volumes, err := os.volumeService("") + if err != nil { + return oldSize, err + } + + err = volumes.expandVolume(volumeID, volSizeGB) + if err != nil { + return oldSize, err + } + return newSizeQuant, nil +} + // getVolume retrieves Volume by its ID. func (os *OpenStack) getVolume(volumeID string) (Volume, error) { volumes, err := os.volumeService("") - if err != nil || volumes == nil { - glog.Errorf("Unable to initialize cinder client for region: %s", os.region) - return Volume{}, err + if err != nil { + return Volume{}, fmt.Errorf("unable to initialize cinder client for region: %s, err: %v", os.region, err) } return volumes.getVolume(volumeID) } // CreateVolume creates a volume of given size (in GiB) -func (os *OpenStack) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, error) { +func (os *OpenStack) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error) { volumes, err := os.volumeService("") - if err != nil || volumes == nil { - glog.Errorf("Unable to initialize cinder client for region: %s", os.region) - return "", "", err + if err != nil { + return "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("unable to initialize cinder client for region: %s, err: %v", os.region, err) } opts := VolumeCreateOpts{ @@ -319,17 +434,17 @@ func (os *OpenStack) CreateVolume(name string, size int, vtype, availability str volumeID, volumeAZ, err := volumes.createVolume(opts) if err != nil { - glog.Errorf("Failed to create a %d GB volume: %v", size, err) - return "", "", err + return "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("failed to create a %d GB volume: %v", size, err) } - glog.Infof("Created volume %v in Availability Zone: %v", volumeID, volumeAZ) - return volumeID, volumeAZ, nil + glog.Infof("Created volume %v in Availability Zone: %v Ignore volume AZ: %v", volumeID, volumeAZ, os.bsOpts.IgnoreVolumeAZ) + return volumeID, volumeAZ, os.bsOpts.IgnoreVolumeAZ, nil } // GetDevicePath returns the path of an attached block storage volume, specified by its id. -func (os *OpenStack) GetDevicePath(volumeID string) string { - // Build a list of candidate device paths +func (os *OpenStack) GetDevicePathBySerialId(volumeID string) string { + // Build a list of candidate device paths. + // Certain Nova drivers will set the disk serial ID, including the Cinder volume id. candidateDeviceNodes := []string{ // KVM fmt.Sprintf("virtio-%s", volumeID[:20]), @@ -350,10 +465,74 @@ func (os *OpenStack) GetDevicePath(volumeID string) string { } } - glog.Warningf("Failed to find device for the volumeID: %q\n", volumeID) + glog.V(4).Infof("Failed to find device for the volumeID: %q by serial ID", volumeID) return "" } +func (os *OpenStack) GetDevicePathFromInstanceMetadata(volumeID string) string { + // Nova Hyper-V hosts cannot override disk SCSI IDs. In order to locate + // volumes, we're querying the metadata service. Note that the Hyper-V + // driver will include device metadata for untagged volumes as well. + // + // We're avoiding using cached metadata (or the configdrive), + // relying on the metadata service. + instanceMetadata, err := getMetadataFromMetadataService( + NewtonMetadataVersion) + + if err != nil { + glog.V(4).Infof( + "Could not retrieve instance metadata. Error: %v", err) + return "" + } + + for _, device := range instanceMetadata.Devices { + if device.Type == "disk" && device.Serial == volumeID { + glog.V(4).Infof( + "Found disk metadata for volumeID %q. Bus: %q, Address: %q", + volumeID, device.Bus, device.Address) + + diskPattern := fmt.Sprintf( + "/dev/disk/by-path/*-%s-%s", + device.Bus, device.Address) + diskPaths, err := filepath.Glob(diskPattern) + if err != nil { + glog.Errorf( + "could not retrieve disk path for volumeID: %q. Error filepath.Glob(%q): %v", + volumeID, diskPattern, err) + return "" + } + + if len(diskPaths) == 1 { + return diskPaths[0] + } + + glog.Errorf( + "expecting to find one disk path for volumeID %q, found %d: %v", + volumeID, len(diskPaths), diskPaths) + return "" + } + } + + glog.V(4).Infof( + "Could not retrieve device metadata for volumeID: %q", volumeID) + return "" +} + +// GetDevicePath returns the path of an attached block storage volume, specified by its id. +func (os *OpenStack) GetDevicePath(volumeID string) string { + devicePath := os.GetDevicePathBySerialId(volumeID) + + if devicePath == "" { + devicePath = os.GetDevicePathFromInstanceMetadata(volumeID) + } + + if devicePath == "" { + glog.Warningf("Failed to find device for the volumeID: %q", volumeID) + } + + return devicePath +} + func (os *OpenStack) DeleteVolume(volumeID string) error { used, err := os.diskIsUsed(volumeID) if err != nil { @@ -365,16 +544,12 @@ func (os *OpenStack) DeleteVolume(volumeID string) error { } volumes, err := os.volumeService("") - if err != nil || volumes == nil { - glog.Errorf("Unable to initialize cinder client for region: %s", os.region) - return err + if err != nil { + return fmt.Errorf("unable to initialize cinder client for region: %s, err: %v", os.region, err) } err = volumes.deleteVolume(volumeID) - if err != nil { - glog.Errorf("Cannot delete volume %s: %v", volumeID, err) - } - return nil + return err } @@ -387,9 +562,7 @@ func (os *OpenStack) GetAttachmentDiskPath(instanceID, volumeID string) (string, return "", err } if volume.Status != VolumeInUseStatus { - errmsg := fmt.Sprintf("can not get device path of volume %s, its status is %s.", volume.Name, volume.Status) - glog.Errorf(errmsg) - return "", errors.New(errmsg) + return "", fmt.Errorf("can not get device path of volume %s, its status is %s ", volume.Name, volume.Status) } if volume.AttachedServerId != "" { if instanceID == volume.AttachedServerId { @@ -397,12 +570,10 @@ func (os *OpenStack) GetAttachmentDiskPath(instanceID, volumeID string) (string, // see http://developer.openstack.org/api-ref-blockstorage-v1.html return volume.AttachedDevice, nil } else { - errMsg := fmt.Sprintf("Disk %q is attached to a different compute: %q, should be detached before proceeding", volumeID, volume.AttachedServerId) - glog.Errorf(errMsg) - return "", errors.New(errMsg) + return "", fmt.Errorf("disk %q is attached to a different compute: %q, should be detached before proceeding", volumeID, volume.AttachedServerId) } } - return "", fmt.Errorf("volume %s has no ServerId.", volumeID) + return "", fmt.Errorf("volume %s has no ServerId", volumeID) } // DiskIsAttached queries if a volume is attached to a compute instance diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/BUILD b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/BUILD index ae8652e24ac5..a3ec5a0034a0 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["ovirt.go"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt", deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/controller:go_default_library", @@ -21,6 +22,7 @@ go_library( go_test( name = "go_default_test", srcs = ["ovirt_test.go"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt", library = ":go_default_library", deps = ["//pkg/cloudprovider:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/ovirt.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/ovirt.go index d25597941262..e688257ad87c 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/ovirt.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt/ovirt.go @@ -18,7 +18,6 @@ package ovirt import ( "encoding/xml" - "errors" "fmt" "io" "io/ioutil" @@ -192,7 +191,7 @@ func (v *OVirtCloud) NodeAddresses(nodeName types.NodeName) ([]v1.NodeAddress, e // This method will not be called from the node that is requesting this ID. i.e. metadata service // and other local methods cannot be used here func (v *OVirtCloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) { - return []v1.NodeAddress{}, errors.New("unimplemented") + return []v1.NodeAddress{}, cloudprovider.NotImplemented } // mapNodeNameToInstanceName maps from a k8s NodeName to an ovirt instance name (the hostname) @@ -214,7 +213,7 @@ func (v *OVirtCloud) ExternalID(nodeName types.NodeName) (string, error) { // InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running. // If false is returned with no error, the instance will be immediately deleted by the cloud controller manager. func (v *OVirtCloud) InstanceExistsByProviderID(providerID string) (bool, error) { - return false, errors.New("unimplemented") + return false, cloudprovider.NotImplemented } // InstanceID returns the cloud provider ID of the node with the specified NodeName. @@ -233,7 +232,7 @@ func (v *OVirtCloud) InstanceID(nodeName types.NodeName) (string, error) { // This method will not be called from the node that is requesting this ID. i.e. metadata service // and other local methods cannot be used here func (v *OVirtCloud) InstanceTypeByProviderID(providerID string) (string, error) { - return "", errors.New("unimplemented") + return "", cloudprovider.NotImplemented } // InstanceType returns the type of the specified instance. @@ -321,5 +320,5 @@ func (v *OVirtCloud) CurrentNodeName(hostname string) (types.NodeName, error) { } func (v *OVirtCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error { - return errors.New("unimplemented") + return cloudprovider.NotImplemented } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/BUILD b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/BUILD index d58cc8c07fd2..052159a13e88 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/BUILD @@ -9,8 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["photon.go"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/photon", deps = [ - "//pkg/api/v1/helper:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/controller:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -24,6 +25,7 @@ go_library( go_test( name = "go_default_test", srcs = ["photon_test.go"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/photon", library = ":go_default_library", deps = [ "//pkg/cloudprovider:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/photon.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/photon.go index 317340997868..5071de5c5318 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/photon.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/photon/photon.go @@ -38,7 +38,7 @@ import ( "gopkg.in/gcfg.v1" "k8s.io/api/core/v1" k8stypes "k8s.io/apimachinery/pkg/types" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller" ) @@ -421,11 +421,11 @@ func (pc *PCCloud) NodeAddresses(nodeName k8stypes.NodeName) ([]v1.NodeAddress, // This method will not be called from the node that is requesting this ID. i.e. metadata service // and other local methods cannot be used here func (pc *PCCloud) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) { - return []v1.NodeAddress{}, errors.New("unimplemented") + return []v1.NodeAddress{}, cloudprovider.NotImplemented } func (pc *PCCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error { - return errors.New("unimplemented") + return cloudprovider.NotImplemented } func (pc *PCCloud) CurrentNodeName(hostname string) (k8stypes.NodeName, error) { @@ -473,7 +473,7 @@ func (pc *PCCloud) ExternalID(nodeName k8stypes.NodeName) (string, error) { // InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running. // If false is returned with no error, the instance will be immediately deleted by the cloud controller manager. func (pc *PCCloud) InstanceExistsByProviderID(providerID string) (bool, error) { - return false, errors.New("unimplemented") + return false, cloudprovider.NotImplemented } // InstanceID returns the cloud provider ID of the specified instance. @@ -497,7 +497,7 @@ func (pc *PCCloud) InstanceID(nodeName k8stypes.NodeName) (string, error) { // This method will not be called from the node that is requesting this ID. i.e. metadata service // and other local methods cannot be used here func (pc *PCCloud) InstanceTypeByProviderID(providerID string) (string, error) { - return "", errors.New("unimplemented") + return "", cloudprovider.NotImplemented } func (pc *PCCloud) InstanceType(nodeName k8stypes.NodeName) (string, error) { @@ -637,6 +637,10 @@ func (pc *PCCloud) DiskIsAttached(pdID string, nodeName k8stypes.NodeName) (bool } vmID, err := pc.InstanceID(nodeName) + if err == cloudprovider.InstanceNotFound { + glog.Infof("Instance %q does not exist, disk %s will be detached automatically.", nodeName, pdID) + return false, nil + } if err != nil { glog.Errorf("Photon Cloud Provider: pc.InstanceID failed for DiskIsAttached. Error[%v]", err) return false, err @@ -665,6 +669,11 @@ func (pc *PCCloud) DisksAreAttached(pdIDs []string, nodeName k8stypes.NodeName) } vmID, err := pc.InstanceID(nodeName) + if err == cloudprovider.InstanceNotFound { + glog.Infof("Instance %q does not exist, its disks will be detached automatically.", nodeName) + // make all the disks as detached. + return attached, nil + } if err != nil { glog.Errorf("Photon Cloud Provider: pc.InstanceID failed for DiskIsAttached. Error[%v]", err) return attached, err diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/providers.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/providers.go index 89b9e6d22467..7de9ca9a41bc 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/providers.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/providers.go @@ -25,6 +25,5 @@ import ( _ "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack" _ "k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt" _ "k8s.io/kubernetes/pkg/cloudprovider/providers/photon" - _ "k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace" _ "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" ) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace/BUILD b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace/BUILD deleted file mode 100644 index e45b4d1d9f1c..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace/BUILD +++ /dev/null @@ -1,48 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = ["rackspace.go"], - deps = [ - "//pkg/cloudprovider:go_default_library", - "//pkg/controller:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/rackspace/gophercloud:go_default_library", - "//vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach:go_default_library", - "//vendor/github.com/rackspace/gophercloud/openstack/compute/v2/servers:go_default_library", - "//vendor/github.com/rackspace/gophercloud/pagination:go_default_library", - "//vendor/github.com/rackspace/gophercloud/rackspace:go_default_library", - "//vendor/github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes:go_default_library", - "//vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/servers:go_default_library", - "//vendor/github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach:go_default_library", - "//vendor/gopkg.in/gcfg.v1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["rackspace_test.go"], - library = ":go_default_library", - deps = ["//vendor/github.com/rackspace/gophercloud:go_default_library"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace/rackspace.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace/rackspace.go deleted file mode 100644 index 18fce399dc19..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace/rackspace.go +++ /dev/null @@ -1,786 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rackspace - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "regexp" - "time" - - "gopkg.in/gcfg.v1" - - "github.com/golang/glog" - "github.com/rackspace/gophercloud" - osvolumeattach "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach" - osservers "github.com/rackspace/gophercloud/openstack/compute/v2/servers" - "github.com/rackspace/gophercloud/pagination" - "github.com/rackspace/gophercloud/rackspace" - "github.com/rackspace/gophercloud/rackspace/blockstorage/v1/volumes" - "github.com/rackspace/gophercloud/rackspace/compute/v2/servers" - "github.com/rackspace/gophercloud/rackspace/compute/v2/volumeattach" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/controller" -) - -const ( - ProviderName = "rackspace" - MetaDataPath = "/media/configdrive/openstack/latest/meta_data.json" - VolumeAvailableStatus = "available" - VolumeInUseStatus = "in-use" - VolumeErrorStatus = "error" -) - -var ErrNotFound = errors.New("Failed to find object") -var ErrMultipleResults = errors.New("Multiple results where only one expected") -var ErrNoAddressFound = errors.New("No address found for host") -var ErrAttrNotFound = errors.New("Expected attribute not found") - -// encoding.TextUnmarshaler interface for time.Duration -type MyDuration struct { - time.Duration -} - -func (d *MyDuration) UnmarshalText(text []byte) error { - res, err := time.ParseDuration(string(text)) - if err != nil { - return err - } - d.Duration = res - return nil -} - -type MetaData struct { - UUID string `json:"uuid"` - Name string `json:"name"` -} - -type LoadBalancerOpts struct { - SubnetId string `gcfg:"subnet-id"` // required - CreateMonitor bool `gcfg:"create-monitor"` - MonitorDelay MyDuration `gcfg:"monitor-delay"` - MonitorTimeout MyDuration `gcfg:"monitor-timeout"` - MonitorMaxRetries uint `gcfg:"monitor-max-retries"` -} - -// Rackspace is an implementation of cloud provider Interface for Rackspace. -type Rackspace struct { - provider *gophercloud.ProviderClient - region string - lbOpts LoadBalancerOpts -} - -type Config struct { - Global struct { - AuthUrl string `gcfg:"auth-url"` - Username string - UserId string `gcfg:"user-id"` - Password string - ApiKey string `gcfg:"api-key"` - TenantId string `gcfg:"tenant-id"` - TenantName string `gcfg:"tenant-name"` - DomainId string `gcfg:"domain-id"` - DomainName string `gcfg:"domain-name"` - Region string - } - LoadBalancer LoadBalancerOpts -} - -func probeNodeAddress(compute *gophercloud.ServiceClient, name string) (string, error) { - id, err := readInstanceID() - if err == nil { - srv, err := servers.Get(compute, id).Extract() - if err != nil { - return "", err - } - return getAddressByServer(srv) - } - - ip, err := getAddressByName(compute, name) - if err != nil { - return "", err - } - - return ip, nil -} - -func probeInstanceID(client *gophercloud.ServiceClient, name string) (string, error) { - // Attempt to read id from config drive. - id, err := readInstanceID() - if err == nil { - return id, nil - } - - // Attempt to get the server by the name from the API - server, err := getServerByName(client, name) - if err != nil { - return "", err - } - - return server.ID, nil -} - -func parseMetaData(file io.Reader) (string, error) { - metaDataBytes, err := ioutil.ReadAll(file) - if err != nil { - return "", fmt.Errorf("Cannot read %s: %v", file, err) - } - - metaData := MetaData{} - err = json.Unmarshal(metaDataBytes, &metaData) - if err != nil { - return "", fmt.Errorf("Cannot parse %s: %v", MetaDataPath, err) - } - - return metaData.UUID, nil -} - -func readInstanceID() (string, error) { - file, err := os.Open(MetaDataPath) - if err != nil { - return "", fmt.Errorf("Cannot open %s: %v", MetaDataPath, err) - } - defer file.Close() - - return parseMetaData(file) -} - -func init() { - cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) { - cfg, err := readConfig(config) - if err != nil { - return nil, err - } - return newRackspace(cfg) - }) -} - -func (cfg Config) toAuthOptions() gophercloud.AuthOptions { - return gophercloud.AuthOptions{ - IdentityEndpoint: cfg.Global.AuthUrl, - Username: cfg.Global.Username, - UserID: cfg.Global.UserId, - Password: cfg.Global.Password, - APIKey: cfg.Global.ApiKey, - TenantID: cfg.Global.TenantId, - TenantName: cfg.Global.TenantName, - - // Persistent service, so we need to be able to renew tokens - AllowReauth: true, - } -} - -func readConfig(config io.Reader) (Config, error) { - if config == nil { - err := fmt.Errorf("no Rackspace cloud provider config file given") - return Config{}, err - } - - var cfg Config - err := gcfg.ReadInto(&cfg, config) - return cfg, err -} - -func newRackspace(cfg Config) (*Rackspace, error) { - provider, err := rackspace.AuthenticatedClient(cfg.toAuthOptions()) - if err != nil { - return nil, err - } - - os := Rackspace{ - provider: provider, - region: cfg.Global.Region, - lbOpts: cfg.LoadBalancer, - } - - return &os, nil -} - -// Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (os *Rackspace) Initialize(clientBuilder controller.ControllerClientBuilder) {} - -type Instances struct { - compute *gophercloud.ServiceClient -} - -// Instances returns an implementation of Instances for Rackspace. -func (os *Rackspace) Instances() (cloudprovider.Instances, bool) { - glog.V(2).Info("rackspace.Instances() called") - - compute, err := os.getComputeClient() - if err != nil { - glog.Warningf("Failed to find compute endpoint: %v", err) - return nil, false - } - glog.V(1).Info("Claiming to support Instances") - - return &Instances{compute}, true -} - -func serverHasAddress(srv osservers.Server, ip string) bool { - if ip == firstAddr(srv.Addresses["private"]) { - return true - } - if ip == firstAddr(srv.Addresses["public"]) { - return true - } - if ip == srv.AccessIPv4 { - return true - } - if ip == srv.AccessIPv6 { - return true - } - return false -} - -func getServerByAddress(client *gophercloud.ServiceClient, name string) (*osservers.Server, error) { - pager := servers.List(client, nil) - - serverList := make([]osservers.Server, 0, 1) - - err := pager.EachPage(func(page pagination.Page) (bool, error) { - s, err := servers.ExtractServers(page) - if err != nil { - return false, err - } - for _, v := range s { - if serverHasAddress(v, name) { - serverList = append(serverList, v) - } - } - if len(serverList) > 1 { - return false, ErrMultipleResults - } - return true, nil - }) - if err != nil { - return nil, err - } - - if len(serverList) == 0 { - return nil, ErrNotFound - } else if len(serverList) > 1 { - return nil, ErrMultipleResults - } - - return &serverList[0], nil -} - -func getServerByName(client *gophercloud.ServiceClient, name string) (*osservers.Server, error) { - if net.ParseIP(name) != nil { - // we're an IP, so we'll have to walk the full list of servers to - // figure out which one we are. - return getServerByAddress(client, name) - } - opts := osservers.ListOpts{ - Name: fmt.Sprintf("^%s$", regexp.QuoteMeta(name)), - Status: "ACTIVE", - } - pager := servers.List(client, opts) - - serverList := make([]osservers.Server, 0, 1) - - err := pager.EachPage(func(page pagination.Page) (bool, error) { - s, err := servers.ExtractServers(page) - if err != nil { - return false, err - } - serverList = append(serverList, s...) - if len(serverList) > 1 { - return false, ErrMultipleResults - } - return true, nil - }) - if err != nil { - return nil, err - } - - if len(serverList) == 0 { - return nil, ErrNotFound - } else if len(serverList) > 1 { - return nil, ErrMultipleResults - } - - return &serverList[0], nil -} - -func firstAddr(netblob interface{}) string { - // Run-time types for the win :( - list, ok := netblob.([]interface{}) - if !ok || len(list) < 1 { - return "" - } - props, ok := list[0].(map[string]interface{}) - if !ok { - return "" - } - tmp, ok := props["addr"] - if !ok { - return "" - } - addr, ok := tmp.(string) - if !ok { - return "" - } - return addr -} - -func getAddressByServer(srv *osservers.Server) (string, error) { - var s string - if s == "" { - s = firstAddr(srv.Addresses["private"]) - } - if s == "" { - s = firstAddr(srv.Addresses["public"]) - } - if s == "" { - s = srv.AccessIPv4 - } - if s == "" { - s = srv.AccessIPv6 - } - if s == "" { - return "", ErrNoAddressFound - } - return s, nil -} - -func getAddressByName(api *gophercloud.ServiceClient, name string) (string, error) { - srv, err := getServerByName(api, name) - if err != nil { - return "", err - } - - return getAddressByServer(srv) -} - -func (i *Instances) NodeAddresses(nodeName types.NodeName) ([]v1.NodeAddress, error) { - glog.V(2).Infof("NodeAddresses(%v) called", nodeName) - serverName := mapNodeNameToServerName(nodeName) - ip, err := probeNodeAddress(i.compute, serverName) - if err != nil { - return nil, err - } - - glog.V(2).Infof("NodeAddresses(%v) => %v", serverName, ip) - - // net.ParseIP().String() is to maintain compatibility with the old code - parsedIP := net.ParseIP(ip).String() - return []v1.NodeAddress{ - {Type: v1.NodeInternalIP, Address: parsedIP}, - {Type: v1.NodeExternalIP, Address: parsedIP}, - }, nil -} - -// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID -// This method will not be called from the node that is requesting this ID. i.e. metadata service -// and other local methods cannot be used here -func (i *Instances) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) { - instanceID, err := instanceIDFromProviderID(providerID) - - if err != nil { - return []v1.NodeAddress{}, err - } - - server, err := servers.Get(i.compute, instanceID).Extract() - - if err != nil { - return []v1.NodeAddress{}, err - } - - addresses, err := i.NodeAddresses(mapServerToNodeName(server)) - - if err != nil { - return []v1.NodeAddress{}, err - } - - return addresses, nil -} - -// mapNodeNameToServerName maps from a k8s NodeName to a rackspace Server Name -// This is a simple string cast. -func mapNodeNameToServerName(nodeName types.NodeName) string { - return string(nodeName) -} - -// mapServerToNodeName maps a rackspace Server to an k8s NodeName -func mapServerToNodeName(s *osservers.Server) types.NodeName { - return types.NodeName(s.Name) -} - -// ExternalID returns the cloud provider ID of the node with the specified Name (deprecated). -func (i *Instances) ExternalID(nodeName types.NodeName) (string, error) { - serverName := mapNodeNameToServerName(nodeName) - return probeInstanceID(i.compute, serverName) -} - -// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running. -// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager. -func (i *Instances) InstanceExistsByProviderID(providerID string) (bool, error) { - return false, errors.New("unimplemented") -} - -// InstanceID returns the cloud provider ID of the kubelet's instance. -func (rs *Rackspace) InstanceID() (string, error) { - return readInstanceID() -} - -// InstanceID returns the cloud provider ID of the node with the specified Name. -func (i *Instances) InstanceID(nodeName types.NodeName) (string, error) { - serverName := mapNodeNameToServerName(nodeName) - return probeInstanceID(i.compute, serverName) -} - -// InstanceType returns the type of the specified instance. -func (i *Instances) InstanceType(name types.NodeName) (string, error) { - serverName := mapNodeNameToServerName(name) - - srv, err := getServerByName(i.compute, serverName) - if err != nil { - return "", err - } - - return srvInstanceType(srv) -} - -func srvInstanceType(srv *osservers.Server) (string, error) { - val, ok := srv.Flavor["name"] - - if !ok { - return "", fmt.Errorf("flavor name not present in server info") - } - - flavor, ok := val.(string) - - if !ok { - return "", fmt.Errorf("flavor name is not a string") - } - - return flavor, nil -} - -func instanceIDFromProviderID(providerID string) (instanceID string, err error) { - var providerIDRegexp = regexp.MustCompile(`^rackspace://([^/]+)$`) - matches := providerIDRegexp.FindStringSubmatch(providerID) - if len(matches) != 2 { - return "", fmt.Errorf("ProviderID \"%s\" didn't match expected format \"rackspace://InstanceID\"", providerID) - } - - return matches[1], nil -} - -// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID -// This method will not be called from the node that is requesting this ID. i.e. metadata service -// and other local methods cannot be used here -func (i *Instances) InstanceTypeByProviderID(providerID string) (string, error) { - instanceID, err := instanceIDFromProviderID(providerID) - - if err != nil { - return "", err - } - - server, err := servers.Get(i.compute, instanceID).Extract() - - if err != nil { - return "", err - } - - return srvInstanceType(server) -} - -func (i *Instances) AddSSHKeyToAllInstances(user string, keyData []byte) error { - return errors.New("unimplemented") -} - -// Implementation of Instances.CurrentNodeName -func (i *Instances) CurrentNodeName(hostname string) (types.NodeName, error) { - // Beware when changing this, nodename == hostname assumption is crucial to - // apiserver => kubelet communication. - return types.NodeName(hostname), nil -} - -func (os *Rackspace) Clusters() (cloudprovider.Clusters, bool) { - return nil, false -} - -// ProviderName returns the cloud provider ID. -func (os *Rackspace) ProviderName() string { - return ProviderName -} - -// ScrubDNS filters DNS settings for pods. -func (os *Rackspace) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) { - return nameservers, searches -} - -// HasClusterID returns true if the cluster has a clusterID -func (os *Rackspace) HasClusterID() bool { - return true -} - -func (os *Rackspace) LoadBalancer() (cloudprovider.LoadBalancer, bool) { - return nil, false -} - -func (os *Rackspace) Zones() (cloudprovider.Zones, bool) { - glog.V(1).Info("Claiming to support Zones") - - return os, true -} - -func (os *Rackspace) Routes() (cloudprovider.Routes, bool) { - return nil, false -} - -func (os *Rackspace) GetZone() (cloudprovider.Zone, error) { - glog.V(1).Infof("Current zone is %v", os.region) - - return cloudprovider.Zone{Region: os.region}, nil -} - -// GetZoneByProviderID implements Zones.GetZoneByProviderID -// This is particularly useful in external cloud providers where the kubelet -// does not initialize node data. -func (os *Rackspace) GetZoneByProviderID(providerID string) (cloudprovider.Zone, error) { - return cloudprovider.Zone{}, errors.New("GetZoneByProviderID not implemented") -} - -// GetZoneByNodeName implements Zones.GetZoneByNodeName -// This is particularly useful in external cloud providers where the kubelet -// does not initialize node data. -func (os *Rackspace) GetZoneByNodeName(nodeName types.NodeName) (cloudprovider.Zone, error) { - return cloudprovider.Zone{}, errors.New("GetZoneByNodeName not imeplemented") -} - -// Create a volume of given size (in GiB) -func (rs *Rackspace) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, error) { - return "", "", errors.New("unimplemented") -} - -func (rs *Rackspace) DeleteVolume(volumeID string) error { - return errors.New("unimplemented") -} - -func (rs *Rackspace) OperationPending(diskName string) (bool, string, error) { - disk, err := rs.getVolume(diskName) - if err != nil { - return false, "", err - } - volumeStatus := disk.Status - if volumeStatus == VolumeErrorStatus { - glog.Errorf("status of volume %s is %s", diskName, volumeStatus) - return false, volumeStatus, nil - } - if volumeStatus == VolumeAvailableStatus || volumeStatus == VolumeInUseStatus { - return false, disk.Status, nil - } - return true, volumeStatus, nil -} - -// Attaches given cinder volume to the compute running kubelet -func (rs *Rackspace) AttachDisk(instanceID, volumeID string) (string, error) { - volume, err := rs.getVolume(volumeID) - if err != nil { - return "", err - } - - if volume.Status != VolumeAvailableStatus { - errmsg := fmt.Sprintf("volume %s status is %s, not %s, can not be attached to instance %s.", volume.Name, volume.Status, VolumeAvailableStatus, instanceID) - glog.Errorf(errmsg) - return "", errors.New(errmsg) - } - - compute, err := rs.getComputeClient() - if err != nil { - return "", err - } - - if len(volume.Attachments) > 0 { - if instanceID == volume.Attachments[0]["server_id"] { - glog.V(4).Infof("Volume: %q is already attached to compute: %q", volumeID, instanceID) - return volume.ID, nil - } - - errMsg := fmt.Sprintf("Volume %q is attached to a different compute: %q, should be detached before proceeding", volumeID, volume.Attachments[0]["server_id"]) - glog.Errorf(errMsg) - return "", errors.New(errMsg) - } - - _, err = volumeattach.Create(compute, instanceID, &osvolumeattach.CreateOpts{ - VolumeID: volume.ID, - }).Extract() - if err != nil { - glog.Errorf("Failed to attach %s volume to %s compute", volumeID, instanceID) - return "", err - } - glog.V(2).Infof("Successfully attached %s volume to %s compute", volumeID, instanceID) - return volume.ID, nil -} - -// GetDevicePath returns the path of an attached block storage volume, specified by its id. -func (rs *Rackspace) GetDevicePath(volumeID string) string { - volume, err := rs.getVolume(volumeID) - if err != nil { - return "" - } - attachments := volume.Attachments - if len(attachments) != 1 { - glog.Warningf("Unexpected number of volume attachments on %s: %d", volumeID, len(attachments)) - return "" - } - return attachments[0]["device"].(string) -} - -// Takes a partial/full disk id or volumeName -func (rs *Rackspace) getVolume(volumeID string) (*volumes.Volume, error) { - client, err := rackspace.NewBlockStorageV1(rs.provider, gophercloud.EndpointOpts{ - Region: rs.region, - }) - - volume, err := volumes.Get(client, volumeID).Extract() - if err != nil { - glog.Errorf("Error occurred getting volume by ID: %s", volumeID) - return &volumes.Volume{}, err - } - return volume, nil -} - -func (rs *Rackspace) getComputeClient() (*gophercloud.ServiceClient, error) { - client, err := rackspace.NewComputeV2(rs.provider, gophercloud.EndpointOpts{ - Region: rs.region, - }) - if err != nil || client == nil { - glog.Errorf("Unable to initialize nova client for region: %s", rs.region) - } - return client, nil -} - -// Detaches given cinder volume from the compute running kubelet -func (rs *Rackspace) DetachDisk(instanceID, volumeID string) error { - volume, err := rs.getVolume(volumeID) - if err != nil { - return err - } - - if volume.Status != VolumeInUseStatus { - errmsg := fmt.Sprintf("can not detach volume %s, its status is %s.", volume.Name, volume.Status) - glog.Errorf(errmsg) - return errors.New(errmsg) - } - - compute, err := rs.getComputeClient() - if err != nil { - return err - } - - if len(volume.Attachments) > 1 { - // Rackspace does not support "multiattach", this is a sanity check. - errmsg := fmt.Sprintf("Volume %s is attached to multiple instances, which is not supported by this provider.", volume.ID) - return errors.New(errmsg) - } - - if len(volume.Attachments) > 0 && instanceID == volume.Attachments[0]["server_id"] { - // This is a blocking call and effects kubelet's performance directly. - // We should consider kicking it out into a separate routine, if it is bad. - err = volumeattach.Delete(compute, instanceID, volume.ID).ExtractErr() - if err != nil { - glog.Errorf("Failed to delete volume %s from compute %s attached %v", volume.ID, instanceID, err) - return err - } - glog.V(2).Infof("Successfully detached volume: %s from compute: %s", volume.ID, instanceID) - } else { - errMsg := fmt.Sprintf("Disk: %s has no attachments or is not attached to compute: %s", volume.Name, instanceID) - glog.Errorf(errMsg) - return errors.New(errMsg) - } - - return nil -} - -// Get device path of attached volume to the compute running kubelet, as known by cinder -func (rs *Rackspace) GetAttachmentDiskPath(instanceID, volumeID string) (string, error) { - // See issue #33128 - Cinder does not always tell you the right device path, as such - // we must only use this value as a last resort. - volume, err := rs.getVolume(volumeID) - if err != nil { - return "", err - } - - if volume.Status != VolumeInUseStatus { - errmsg := fmt.Sprintf("can not get device path of volume %s, its status is %s.", volume.Name, volume.Status) - glog.Errorf(errmsg) - return "", errors.New(errmsg) - } - if len(volume.Attachments) > 0 && volume.Attachments[0]["server_id"] != nil { - if instanceID == volume.Attachments[0]["server_id"] { - // Attachment[0]["device"] points to the device path - // see http://developer.openstack.org/api-ref-blockstorage-v1.html - return volume.Attachments[0]["device"].(string), nil - } else { - errMsg := fmt.Sprintf("Disk %q is attached to a different compute: %q, should be detached before proceeding", volumeID, volume.Attachments[0]["server_id"]) - glog.Errorf(errMsg) - return "", errors.New(errMsg) - } - } - return "", fmt.Errorf("volume %s is not attached to %s", volumeID, instanceID) -} - -// query if a volume is attached to a compute instance -func (rs *Rackspace) DiskIsAttached(instanceID, volumeID string) (bool, error) { - volume, err := rs.getVolume(volumeID) - if err != nil { - return false, err - } - if len(volume.Attachments) > 0 && volume.Attachments[0]["server_id"] != nil && instanceID == volume.Attachments[0]["server_id"] { - return true, nil - } - return false, nil -} - -// query if a list volumes are attached to a compute instance -func (rs *Rackspace) DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error) { - attached := make(map[string]bool) - for _, volumeID := range volumeIDs { - attached[volumeID] = false - } - var returnedErr error - for _, volumeID := range volumeIDs { - result, err := rs.DiskIsAttached(instanceID, volumeID) - if err != nil { - returnedErr = fmt.Errorf("Error in checking disk %q attached: %v \n %v", volumeID, err, returnedErr) - continue - } - if result { - attached[volumeID] = true - } - - } - return attached, returnedErr -} - -// query if we should trust the cinder provide deviceName, See issue #33128 -func (rs *Rackspace) ShouldTrustDevicePath() bool { - return true -} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/BUILD b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/BUILD index b385fa1a2e7b..91de27e7adc8 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/BUILD @@ -9,30 +9,35 @@ load( go_library( name = "go_default_library", srcs = [ + "nodemanager.go", "vsphere.go", "vsphere_util.go", ], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere", deps = [ - "//pkg/api/v1/helper:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/vsphere/vclib:go_default_library", "//pkg/cloudprovider/providers/vsphere/vclib/diskmanagers:go_default_library", "//pkg/controller:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/vmware/govmomi:go_default_library", - "//vendor/github.com/vmware/govmomi/object:go_default_library", "//vendor/github.com/vmware/govmomi/vim25:go_default_library", "//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/informers:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], ) go_test( name = "go_default_test", srcs = ["vsphere_test.go"], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere", library = ":go_default_library", deps = [ "//pkg/cloudprovider:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/nodemanager.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/nodemanager.go new file mode 100644 index 000000000000..493ea61045e3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/nodemanager.go @@ -0,0 +1,295 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vsphere + +import ( + "fmt" + "github.com/golang/glog" + "golang.org/x/net/context" + "k8s.io/api/core/v1" + k8stypes "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" + "strings" + "sync" +) + +// Stores info about the kubernetes node +type NodeInfo struct { + dataCenter *vclib.Datacenter + vm *vclib.VirtualMachine + vcServer string +} + +type NodeManager struct { + // TODO: replace map with concurrent map when k8s supports go v1.9 + + // Maps the VC server to VSphereInstance + vsphereInstanceMap map[string]*VSphereInstance + // Maps node name to node info. + nodeInfoMap map[string]*NodeInfo + // Maps node name to node structure + registeredNodes map[string]*v1.Node + + // Mutexes + registeredNodesLock sync.RWMutex + nodeInfoLock sync.RWMutex +} + +type NodeDetails struct { + NodeName string + vm *vclib.VirtualMachine +} + +// TODO: Make it configurable in vsphere.conf +const ( + POOL_SIZE = 8 + QUEUE_SIZE = POOL_SIZE * 10 +) + +func (nm *NodeManager) DiscoverNode(node *v1.Node) error { + type VmSearch struct { + vc string + datacenter *vclib.Datacenter + } + + var mutex = &sync.Mutex{} + var globalErrMutex = &sync.Mutex{} + var queueChannel chan *VmSearch + var wg sync.WaitGroup + var globalErr *error + + queueChannel = make(chan *VmSearch, QUEUE_SIZE) + nodeUUID := node.Status.NodeInfo.SystemUUID + vmFound := false + globalErr = nil + + setGlobalErr := func(err error) { + globalErrMutex.Lock() + globalErr = &err + globalErrMutex.Unlock() + } + + setVMFound := func(found bool) { + mutex.Lock() + vmFound = found + mutex.Unlock() + } + + getVMFound := func() bool { + mutex.Lock() + found := vmFound + mutex.Unlock() + return found + } + + go func() { + var datacenterObjs []*vclib.Datacenter + for vc, vsi := range nm.vsphereInstanceMap { + + found := getVMFound() + if found == true { + break + } + + // Create context + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err := vsi.conn.Connect(ctx) + if err != nil { + glog.V(4).Info("Discovering node error vc:", err) + setGlobalErr(err) + continue + } + + if vsi.cfg.Datacenters == "" { + datacenterObjs, err = vclib.GetAllDatacenter(ctx, vsi.conn) + if err != nil { + glog.V(4).Info("Discovering node error dc:", err) + setGlobalErr(err) + continue + } + } else { + datacenters := strings.Split(vsi.cfg.Datacenters, ",") + for _, dc := range datacenters { + dc = strings.TrimSpace(dc) + if dc == "" { + continue + } + datacenterObj, err := vclib.GetDatacenter(ctx, vsi.conn, dc) + if err != nil { + glog.V(4).Info("Discovering node error dc:", err) + setGlobalErr(err) + continue + } + datacenterObjs = append(datacenterObjs, datacenterObj) + } + } + + for _, datacenterObj := range datacenterObjs { + found := getVMFound() + if found == true { + break + } + + glog.V(4).Infof("Finding node %s in vc=%s and datacenter=%s", node.Name, vc, datacenterObj.Name()) + queueChannel <- &VmSearch{ + vc: vc, + datacenter: datacenterObj, + } + } + } + close(queueChannel) + }() + + for i := 0; i < POOL_SIZE; i++ { + go func() { + for res := range queueChannel { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vm, err := res.datacenter.GetVMByUUID(ctx, nodeUUID) + if err != nil { + glog.V(4).Infof("Error %q while looking for vm=%+v in vc=%s and datacenter=%s", + err, node.Name, vm, res.vc, res.datacenter.Name()) + if err != vclib.ErrNoVMFound { + setGlobalErr(err) + } else { + glog.V(4).Infof("Did not find node %s in vc=%s and datacenter=%s", + node.Name, res.vc, res.datacenter.Name(), err) + } + continue + } + if vm != nil { + glog.V(4).Infof("Found node %s as vm=%+v in vc=%s and datacenter=%s", + node.Name, vm, res.vc, res.datacenter.Name()) + + nodeInfo := &NodeInfo{dataCenter: res.datacenter, vm: vm, vcServer: res.vc} + nm.addNodeInfo(node.ObjectMeta.Name, nodeInfo) + for range queueChannel { + } + setVMFound(true) + break + } + } + wg.Done() + }() + wg.Add(1) + } + wg.Wait() + if vmFound { + return nil + } + if globalErr != nil { + return *globalErr + } + + glog.V(4).Infof("Discovery Node: %q vm not found", node.Name) + return vclib.ErrNoVMFound +} + +func (nm *NodeManager) RegisterNode(node *v1.Node) error { + nm.addNode(node) + nm.DiscoverNode(node) + return nil +} + +func (nm *NodeManager) UnRegisterNode(node *v1.Node) error { + nm.removeNode(node) + return nil +} + +func (nm *NodeManager) RediscoverNode(nodeName k8stypes.NodeName) error { + node, err := nm.GetNode(nodeName) + + if err != nil { + return err + } + return nm.DiscoverNode(&node) +} + +func (nm *NodeManager) GetNode(nodeName k8stypes.NodeName) (v1.Node, error) { + nm.registeredNodesLock.RLock() + node := nm.registeredNodes[convertToString(nodeName)] + nm.registeredNodesLock.RUnlock() + if node == nil { + return v1.Node{}, vclib.ErrNoVMFound + } + return *node, nil +} + +func (nm *NodeManager) addNode(node *v1.Node) { + nm.registeredNodesLock.Lock() + nm.registeredNodes[node.ObjectMeta.Name] = node + nm.registeredNodesLock.Unlock() +} + +func (nm *NodeManager) removeNode(node *v1.Node) { + nm.registeredNodesLock.Lock() + delete(nm.registeredNodes, node.ObjectMeta.Name) + nm.registeredNodesLock.Unlock() +} + +// GetNodeInfo returns a NodeInfo which datacenter, vm and vc server ip address. +// This method returns an error if it is unable find node VCs and DCs listed in vSphere.conf +// NodeInfo returned may not be updated to reflect current VM location. +func (nm *NodeManager) GetNodeInfo(nodeName k8stypes.NodeName) (NodeInfo, error) { + getNodeInfo := func(nodeName k8stypes.NodeName) *NodeInfo { + nm.nodeInfoLock.RLock() + nodeInfo := nm.nodeInfoMap[convertToString(nodeName)] + nm.nodeInfoLock.RUnlock() + return nodeInfo + } + nodeInfo := getNodeInfo(nodeName) + if nodeInfo == nil { + err := nm.RediscoverNode(nodeName) + if err != nil { + glog.V(4).Infof("error %q node info for node %q not found", err, convertToString(nodeName)) + return NodeInfo{}, err + } + nodeInfo = getNodeInfo(nodeName) + } + return *nodeInfo, nil +} + +func (nm *NodeManager) GetNodeDetails() []NodeDetails { + nm.nodeInfoLock.RLock() + defer nm.nodeInfoLock.RUnlock() + var nodeDetails []NodeDetails + for nodeName, nodeInfo := range nm.nodeInfoMap { + nodeDetails = append(nodeDetails, NodeDetails{nodeName, nodeInfo.vm}) + } + return nodeDetails +} + +func (nm *NodeManager) addNodeInfo(nodeName string, nodeInfo *NodeInfo) { + nm.nodeInfoLock.Lock() + nm.nodeInfoMap[nodeName] = nodeInfo + nm.nodeInfoLock.Unlock() +} + +func (nm *NodeManager) GetVSphereInstance(nodeName k8stypes.NodeName) (VSphereInstance, error) { + nodeInfo, err := nm.GetNodeInfo(nodeName) + if err != nil { + glog.V(4).Infof("node info for node %q not found", convertToString(nodeName)) + return VSphereInstance{}, err + } + vsphereInstance := nm.vsphereInstanceMap[nodeInfo.vcServer] + if vsphereInstance == nil { + return VSphereInstance{}, fmt.Errorf("vSphereInstance for vc server %q not found while looking for node %q", nodeInfo.vcServer, convertToString(nodeName)) + } + return *vsphereInstance, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/BUILD b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/BUILD index b65aabb11acf..6750d560e4ce 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/BUILD @@ -21,6 +21,7 @@ go_library( "volumeoptions.go", "vsphere_metrics.go", ], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/custom_errors.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/custom_errors.go index 391f328f426f..6709c4cf21ad 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/custom_errors.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/custom_errors.go @@ -25,6 +25,7 @@ const ( NoDevicesFoundErrMsg = "No devices found" DiskNotFoundErrMsg = "No vSphere disk ID found" InvalidVolumeOptionsErrMsg = "VolumeOptions verification failed" + NoVMFoundErrMsg = "No VM found" ) // Error constants @@ -34,4 +35,5 @@ var ( ErrNoDevicesFound = errors.New(NoDevicesFoundErrMsg) ErrNoDiskIDFound = errors.New(DiskNotFoundErrMsg) ErrInvalidVolumeOptions = errors.New(InvalidVolumeOptionsErrMsg) + ErrNoVMFound = errors.New(NoVMFoundErrMsg) ) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go index f4dc1c455ae9..d325c72dfe1a 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go @@ -19,6 +19,7 @@ package vclib import ( "errors" "fmt" + "path/filepath" "strings" "github.com/golang/glog" @@ -48,6 +49,22 @@ func GetDatacenter(ctx context.Context, connection *VSphereConnection, datacente return &dc, nil } +// GetAllDatacenter returns all the DataCenter Objects +func GetAllDatacenter(ctx context.Context, connection *VSphereConnection) ([]*Datacenter, error) { + var dc []*Datacenter + finder := find.NewFinder(connection.GoVmomiClient.Client, true) + datacenters, err := finder.DatacenterList(ctx, "*") + if err != nil { + glog.Errorf("Failed to find the datacenter. err: %+v", err) + return nil, err + } + for _, datacenter := range datacenters { + dc = append(dc, &(Datacenter{datacenter})) + } + + return dc, nil +} + // GetVMByUUID gets the VM object from the given vmUUID func (dc *Datacenter) GetVMByUUID(ctx context.Context, vmUUID string) (*VirtualMachine, error) { s := object.NewSearchIndex(dc.Client()) @@ -59,7 +76,7 @@ func (dc *Datacenter) GetVMByUUID(ctx context.Context, vmUUID string) (*VirtualM } if svm == nil { glog.Errorf("Unable to find VM by UUID. VM UUID: %s", vmUUID) - return nil, fmt.Errorf("Failed to find VM by UUID: %s", vmUUID) + return nil, ErrNoVMFound } virtualMachine := VirtualMachine{object.NewVirtualMachine(dc.Client(), svm.Reference()), dc} return &virtualMachine, nil @@ -78,6 +95,41 @@ func (dc *Datacenter) GetVMByPath(ctx context.Context, vmPath string) (*VirtualM return &virtualMachine, nil } +// GetAllDatastores gets the datastore URL to DatastoreInfo map for all the datastores in +// the datacenter. +func (dc *Datacenter) GetAllDatastores(ctx context.Context) (map[string]*DatastoreInfo, error) { + finder := getFinder(dc) + datastores, err := finder.DatastoreList(ctx, "*") + if err != nil { + glog.Errorf("Failed to get all the datastores. err: %+v", err) + return nil, err + } + var dsList []types.ManagedObjectReference + for _, ds := range datastores { + dsList = append(dsList, ds.Reference()) + } + + var dsMoList []mo.Datastore + pc := property.DefaultCollector(dc.Client()) + properties := []string{DatastoreInfoProperty} + err = pc.Retrieve(ctx, dsList, properties, &dsMoList) + if err != nil { + glog.Errorf("Failed to get Datastore managed objects from datastore objects."+ + " dsObjList: %+v, properties: %+v, err: %v", dsList, properties, err) + return nil, err + } + + dsURLInfoMap := make(map[string]*DatastoreInfo) + for _, dsMo := range dsMoList { + dsURLInfoMap[dsMo.Info.GetDatastoreInfo().Url] = &DatastoreInfo{ + &Datastore{object.NewDatastore(dc.Client(), dsMo.Reference()), + dc}, + dsMo.Info.GetDatastoreInfo()} + } + glog.V(9).Infof("dsURLInfoMap : %+v", dsURLInfoMap) + return dsURLInfoMap, nil +} + // GetDatastoreByPath gets the Datastore object from the given vmDiskPath func (dc *Datacenter) GetDatastoreByPath(ctx context.Context, vmDiskPath string) (*Datastore, error) { datastorePathObj := new(object.DatastorePath) @@ -108,6 +160,23 @@ func (dc *Datacenter) GetDatastoreByName(ctx context.Context, name string) (*Dat return &datastore, nil } +// GetResourcePool gets the resource pool for the given path +func (dc *Datacenter) GetResourcePool(ctx context.Context, computePath string) (*object.ResourcePool, error) { + finder := getFinder(dc) + var computeResource *object.ComputeResource + var err error + if computePath == "" { + computeResource, err = finder.DefaultComputeResource(ctx) + } else { + computeResource, err = finder.ComputeResource(ctx, computePath) + } + if err != nil { + glog.Errorf("Failed to get the ResourcePool for computePath '%s'. err: %+v", computePath, err) + return nil, err + } + return computeResource.ResourcePool(ctx) +} + // GetFolderByPath gets the Folder Object from the given folder path // folderPath should be the full path to folder func (dc *Datacenter) GetFolderByPath(ctx context.Context, folderPath string) (*Folder, error) { @@ -142,6 +211,23 @@ func (dc *Datacenter) GetVMMoList(ctx context.Context, vmObjList []*VirtualMachi return vmMoList, nil } +// GetVirtualDiskPage83Data gets the virtual disk UUID by diskPath +func (dc *Datacenter) GetVirtualDiskPage83Data(ctx context.Context, diskPath string) (string, error) { + if len(diskPath) > 0 && filepath.Ext(diskPath) != ".vmdk" { + diskPath += ".vmdk" + } + vdm := object.NewVirtualDiskManager(dc.Client()) + // Returns uuid of vmdk virtual disk + diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc.Datacenter) + + if err != nil { + glog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err) + return "", err + } + diskUUID = formatVirtualDiskUUID(diskUUID) + return diskUUID, nil +} + // GetDatastoreMoList gets the Datastore Managed Objects with the given properties from the datastore objects func (dc *Datacenter) GetDatastoreMoList(ctx context.Context, dsObjList []*Datastore, properties []string) ([]mo.Datastore, error) { var dsMoList []mo.Datastore @@ -162,3 +248,78 @@ func (dc *Datacenter) GetDatastoreMoList(ctx context.Context, dsObjList []*Datas } return dsMoList, nil } + +// CheckDisksAttached checks if the disk is attached to node. +// This is done by comparing the volume path with the backing.FilePath on the VM Virtual disk devices. +func (dc *Datacenter) CheckDisksAttached(ctx context.Context, nodeVolumes map[string][]string) (map[string]map[string]bool, error) { + attached := make(map[string]map[string]bool) + var vmList []*VirtualMachine + for nodeName, volPaths := range nodeVolumes { + for _, volPath := range volPaths { + setNodeVolumeMap(attached, volPath, nodeName, false) + } + vm, err := dc.GetVMByPath(ctx, nodeName) + if err != nil { + if IsNotFound(err) { + glog.Warningf("Node %q does not exist, vSphere CP will assume disks %v are not attached to it.", nodeName, volPaths) + } + continue + } + vmList = append(vmList, vm) + } + if len(vmList) == 0 { + glog.V(2).Infof("vSphere CP will assume no disks are attached to any node.") + return attached, nil + } + vmMoList, err := dc.GetVMMoList(ctx, vmList, []string{"config.hardware.device", "name"}) + if err != nil { + // When there is an error fetching instance information + // it is safer to return nil and let volume information not be touched. + glog.Errorf("Failed to get VM Managed object for nodes: %+v. err: +%v", vmList, err) + return nil, err + } + + for _, vmMo := range vmMoList { + if vmMo.Config == nil { + glog.Errorf("Config is not available for VM: %q", vmMo.Name) + continue + } + for nodeName, volPaths := range nodeVolumes { + if nodeName == vmMo.Name { + verifyVolumePathsForVM(vmMo, volPaths, attached) + } + } + } + return attached, nil +} + +// VerifyVolumePathsForVM verifies if the volume paths (volPaths) are attached to VM. +func verifyVolumePathsForVM(vmMo mo.VirtualMachine, volPaths []string, nodeVolumeMap map[string]map[string]bool) { + // Verify if the volume paths are present on the VM backing virtual disk devices + for _, volPath := range volPaths { + vmDevices := object.VirtualDeviceList(vmMo.Config.Hardware.Device) + for _, device := range vmDevices { + if vmDevices.TypeName(device) == "VirtualDisk" { + virtualDevice := device.GetVirtualDevice() + if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { + if backing.FileName == volPath { + setNodeVolumeMap(nodeVolumeMap, volPath, vmMo.Name, true) + } + } + } + } + } +} + +func setNodeVolumeMap( + nodeVolumeMap map[string]map[string]bool, + volumePath string, + nodeName string, + check bool) { + volumeMap := nodeVolumeMap[nodeName] + if volumeMap == nil { + volumeMap = make(map[string]bool) + nodeVolumeMap[nodeName] = volumeMap + } + volumeMap[volumePath] = check +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datastore.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datastore.go index 1901af189091..8fba424bbd91 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datastore.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/datastore.go @@ -17,6 +17,7 @@ limitations under the License. package vclib import ( + "fmt" "github.com/golang/glog" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/property" @@ -32,6 +33,16 @@ type Datastore struct { Datacenter *Datacenter } +// DatastoreInfo is a structure to store the Datastore and it's Info. +type DatastoreInfo struct { + *Datastore + Info *types.DatastoreInfo +} + +func (di DatastoreInfo) String() string { + return fmt.Sprintf("Datastore: %+v, datastore URL: %s", di.Datastore, di.Info.Url) +} + // CreateDirectory creates the directory at location specified by directoryPath. // If the intermediate level folders do not exist, and the parameter createParents is true, all the non-existent folders are created. // directoryPath must be in the format "[vsanDatastore] kubevols" diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/BUILD b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/BUILD index 8608c978cadb..6a888195e65a 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/BUILD @@ -12,6 +12,7 @@ go_library( "virtualdisk.go", "vmdm.go", ], + importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers", deps = [ "//pkg/cloudprovider/providers/vsphere/vclib:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vdm.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vdm.go index 50f065fc1b14..3e6d9b2ecd9d 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vdm.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vdm.go @@ -35,7 +35,7 @@ type virtualDiskManager struct { // Create implements Disk's Create interface // Contains implementation of virtualDiskManager based Provisioning -func (diskManager virtualDiskManager) Create(ctx context.Context, datastore *vclib.Datastore) (err error) { +func (diskManager virtualDiskManager) Create(ctx context.Context, datastore *vclib.Datastore) (canonicalDiskPath string, err error) { if diskManager.volumeOptions.SCSIControllerType == "" { diskManager.volumeOptions.SCSIControllerType = vclib.LSILogicControllerType } @@ -57,25 +57,26 @@ func (diskManager virtualDiskManager) Create(ctx context.Context, datastore *vcl if err != nil { vclib.RecordvSphereMetric(vclib.APICreateVolume, requestTime, err) glog.Errorf("Failed to create virtual disk: %s. err: %+v", diskManager.diskPath, err) - return err + return "", err } - err = task.Wait(ctx) + taskInfo, err := task.WaitForResult(ctx, nil) vclib.RecordvSphereMetric(vclib.APICreateVolume, requestTime, err) if err != nil { - glog.Errorf("Failed to create virtual disk: %s. err: %+v", diskManager.diskPath, err) - return err + glog.Errorf("Failed to complete virtual disk creation: %s. err: %+v", diskManager.diskPath, err) + return "", err } - return nil + canonicalDiskPath = taskInfo.Result.(string) + return canonicalDiskPath, nil } // Delete implements Disk's Delete interface -func (diskManager virtualDiskManager) Delete(ctx context.Context, datastore *vclib.Datastore) error { +func (diskManager virtualDiskManager) Delete(ctx context.Context, datacenter *vclib.Datacenter) error { // Create a virtual disk manager - virtualDiskManager := object.NewVirtualDiskManager(datastore.Client()) - diskPath := vclib.RemoveClusterFromVDiskPath(diskManager.diskPath) + virtualDiskManager := object.NewVirtualDiskManager(datacenter.Client()) + diskPath := vclib.RemoveStorageClusterORFolderNameFromVDiskPath(diskManager.diskPath) requestTime := time.Now() // Delete virtual disk - task, err := virtualDiskManager.DeleteVirtualDisk(ctx, diskPath, datastore.Datacenter.Datacenter) + task, err := virtualDiskManager.DeleteVirtualDisk(ctx, diskPath, datacenter.Datacenter) if err != nil { glog.Errorf("Failed to delete virtual disk. err: %v", err) vclib.RecordvSphereMetric(vclib.APIDeleteVolume, requestTime, err) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/virtualdisk.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/virtualdisk.go index 80000da384e7..533f49ece307 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/virtualdisk.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/virtualdisk.go @@ -39,8 +39,8 @@ const ( // VirtualDiskProvider defines interfaces for creating disk type VirtualDiskProvider interface { - Create(ctx context.Context, datastore *vclib.Datastore) error - Delete(ctx context.Context, datastore *vclib.Datastore) error + Create(ctx context.Context, datastore *vclib.Datastore) (string, error) + Delete(ctx context.Context, datacenter *vclib.Datacenter) error } // getDiskManager returns vmDiskManager or vdmDiskManager based on given volumeoptions @@ -60,21 +60,21 @@ func getDiskManager(disk *VirtualDisk, diskOperation string) VirtualDiskProvider } // Create gets appropriate disk manager and calls respective create method -func (virtualDisk *VirtualDisk) Create(ctx context.Context, datastore *vclib.Datastore) error { +func (virtualDisk *VirtualDisk) Create(ctx context.Context, datastore *vclib.Datastore) (string, error) { if virtualDisk.VolumeOptions.DiskFormat == "" { virtualDisk.VolumeOptions.DiskFormat = vclib.ThinDiskType } if !virtualDisk.VolumeOptions.VerifyVolumeOptions() { glog.Error("VolumeOptions verification failed. volumeOptions: ", virtualDisk.VolumeOptions) - return vclib.ErrInvalidVolumeOptions + return "", vclib.ErrInvalidVolumeOptions } if virtualDisk.VolumeOptions.StoragePolicyID != "" && virtualDisk.VolumeOptions.StoragePolicyName != "" { - return fmt.Errorf("Storage Policy ID and Storage Policy Name both set, Please set only one parameter") + return "", fmt.Errorf("Storage Policy ID and Storage Policy Name both set, Please set only one parameter") } return getDiskManager(virtualDisk, VirtualDiskCreateOperation).Create(ctx, datastore) } // Delete gets appropriate disk manager and calls respective delete method -func (virtualDisk *VirtualDisk) Delete(ctx context.Context, datastore *vclib.Datastore) error { - return getDiskManager(virtualDisk, VirtualDiskDeleteOperation).Delete(ctx, datastore) +func (virtualDisk *VirtualDisk) Delete(ctx context.Context, datacenter *vclib.Datacenter) error { + return getDiskManager(virtualDisk, VirtualDiskDeleteOperation).Delete(ctx, datacenter) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vmdm.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vmdm.go index 6e3302edd0c3..6942dffb7f86 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vmdm.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vmdm.go @@ -37,33 +37,33 @@ type vmDiskManager struct { // Create implements Disk's Create interface // Contains implementation of VM based Provisioning to provision disk with SPBM Policy or VSANStorageProfileData -func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datastore) (err error) { +func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datastore) (canonicalDiskPath string, err error) { if vmdisk.volumeOptions.SCSIControllerType == "" { vmdisk.volumeOptions.SCSIControllerType = vclib.PVSCSIControllerType } pbmClient, err := vclib.NewPbmClient(ctx, datastore.Client()) if err != nil { glog.Errorf("Error occurred while creating new pbmClient, err: %+v", err) - return err + return "", err } if vmdisk.volumeOptions.StoragePolicyID == "" && vmdisk.volumeOptions.StoragePolicyName != "" { vmdisk.volumeOptions.StoragePolicyID, err = pbmClient.ProfileIDByName(ctx, vmdisk.volumeOptions.StoragePolicyName) if err != nil { glog.Errorf("Error occurred while getting Profile Id from Profile Name: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyName, err) - return err + return "", err } } if vmdisk.volumeOptions.StoragePolicyID != "" { compatible, faultMessage, err := datastore.IsCompatibleWithStoragePolicy(ctx, vmdisk.volumeOptions.StoragePolicyID) if err != nil { glog.Errorf("Error occurred while checking datastore compatibility with storage policy id: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyID, err) - return err + return "", err } if !compatible { glog.Errorf("Datastore: %s is not compatible with Policy: %s", datastore.Name(), vmdisk.volumeOptions.StoragePolicyName) - return fmt.Errorf("User specified datastore is not compatible with the storagePolicy: %q. Failed with faults: %+q", vmdisk.volumeOptions.StoragePolicyName, faultMessage) + return "", fmt.Errorf("User specified datastore is not compatible with the storagePolicy: %q. Failed with faults: %+q", vmdisk.volumeOptions.StoragePolicyName, faultMessage) } } @@ -76,11 +76,11 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto // Check Datastore type - VSANStorageProfileData is only applicable to vSAN Datastore dsType, err := datastore.GetType(ctx) if err != nil { - return err + return "", err } if dsType != vclib.VSANDatastoreType { glog.Errorf("The specified datastore: %q is not a VSAN datastore", datastore.Name()) - return fmt.Errorf("The specified datastore: %q is not a VSAN datastore."+ + return "", fmt.Errorf("The specified datastore: %q is not a VSAN datastore."+ " The policy parameters will work only with VSAN Datastore."+ " So, please specify a valid VSAN datastore in Storage class definition.", datastore.Name()) } @@ -91,7 +91,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto } } else { glog.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set") - return fmt.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set") + return "", fmt.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set") } var dummyVM *vclib.VirtualMachine // Check if VM already exist in the folder. @@ -106,7 +106,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto dummyVM, err = vmdisk.createDummyVM(ctx, datastore.Datacenter, dummyVMFullName) if err != nil { glog.Errorf("Failed to create Dummy VM. err: %v", err) - return err + return "", err } } @@ -115,7 +115,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto disk, _, err := dummyVM.CreateDiskSpec(ctx, vmdisk.diskPath, datastore, vmdisk.volumeOptions) if err != nil { glog.Errorf("Failed to create Disk Spec. err: %v", err) - return err + return "", err } deviceConfigSpec := &types.VirtualDeviceConfigSpec{ Device: disk, @@ -135,7 +135,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto glog.V(vclib.LogLevel).Info("File: %v already exists", vmdisk.diskPath) } else { glog.Errorf("Failed to attach the disk to VM: %q with err: %+v", dummyVMFullName, err) - return err + return "", err } } // Detach the disk from the dummy VM. @@ -146,7 +146,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto glog.V(vclib.LogLevel).Info("File: %v is already detached", vmdisk.diskPath) } else { glog.Errorf("Failed to detach the disk: %q from VM: %q with err: %+v", vmdisk.diskPath, dummyVMFullName, err) - return err + return "", err } } // Delete the dummy VM @@ -154,10 +154,10 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto if err != nil { glog.Errorf("Failed to destroy the vm: %q with err: %+v", dummyVMFullName, err) } - return nil + return vmdisk.diskPath, nil } -func (vmdisk vmDiskManager) Delete(ctx context.Context, datastore *vclib.Datastore) error { +func (vmdisk vmDiskManager) Delete(ctx context.Context, datacenter *vclib.Datacenter) error { return fmt.Errorf("vmDiskManager.Delete is not supported") } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/pbm.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/pbm.go index df749fb89660..5ec83c62b366 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/pbm.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/pbm.go @@ -85,7 +85,7 @@ func (pbmClient *PbmClient) IsDatastoreCompatible(ctx context.Context, storagePo // GetCompatibleDatastores filters and returns compatible list of datastores for given storage policy id // For Non Compatible Datastores, fault message with the Datastore Name is also returned -func (pbmClient *PbmClient) GetCompatibleDatastores(ctx context.Context, storagePolicyID string, datastores []*Datastore) ([]*Datastore, string, error) { +func (pbmClient *PbmClient) GetCompatibleDatastores(ctx context.Context, dc *Datacenter, storagePolicyID string, datastores []*DatastoreInfo) ([]*DatastoreInfo, string, error) { var ( dsMorNameMap = getDsMorNameMap(ctx, datastores) localizedMessagesForNotCompatibleDatastores = "" @@ -96,7 +96,7 @@ func (pbmClient *PbmClient) GetCompatibleDatastores(ctx context.Context, storage return nil, "", err } compatibleHubs := compatibilityResult.CompatibleDatastores() - var compatibleDatastoreList []*Datastore + var compatibleDatastoreList []*DatastoreInfo for _, hub := range compatibleHubs { compatibleDatastoreList = append(compatibleDatastoreList, getDatastoreFromPlacementHub(datastores, hub)) } @@ -121,7 +121,7 @@ func (pbmClient *PbmClient) GetCompatibleDatastores(ctx context.Context, storage } // GetPlacementCompatibilityResult gets placement compatibility result based on storage policy requirements. -func (pbmClient *PbmClient) GetPlacementCompatibilityResult(ctx context.Context, storagePolicyID string, datastore []*Datastore) (pbm.PlacementCompatibilityResult, error) { +func (pbmClient *PbmClient) GetPlacementCompatibilityResult(ctx context.Context, storagePolicyID string, datastore []*DatastoreInfo) (pbm.PlacementCompatibilityResult, error) { var hubs []pbmtypes.PbmPlacementHub for _, ds := range datastore { hubs = append(hubs, pbmtypes.PbmPlacementHub{ @@ -145,7 +145,7 @@ func (pbmClient *PbmClient) GetPlacementCompatibilityResult(ctx context.Context, } // getDataStoreForPlacementHub returns matching datastore associated with given pbmPlacementHub -func getDatastoreFromPlacementHub(datastore []*Datastore, pbmPlacementHub pbmtypes.PbmPlacementHub) *Datastore { +func getDatastoreFromPlacementHub(datastore []*DatastoreInfo, pbmPlacementHub pbmtypes.PbmPlacementHub) *DatastoreInfo { for _, ds := range datastore { if ds.Reference().Type == pbmPlacementHub.HubType && ds.Reference().Value == pbmPlacementHub.HubId { return ds @@ -155,7 +155,7 @@ func getDatastoreFromPlacementHub(datastore []*Datastore, pbmPlacementHub pbmtyp } // getDsMorNameMap returns map of ds Mor and Datastore Object Name -func getDsMorNameMap(ctx context.Context, datastores []*Datastore) map[string]string { +func getDsMorNameMap(ctx context.Context, datastores []*DatastoreInfo) map[string]string { dsMorNameMap := make(map[string]string) for _, ds := range datastores { dsObjectName, err := ds.ObjectName(ctx) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/utils.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/utils.go index 3ae00e8c660a..bac429d6debd 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/utils.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/utils.go @@ -22,8 +22,11 @@ import ( "regexp" "strings" + "github.com/golang/glog" "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" ) @@ -120,13 +123,91 @@ func getSCSIControllers(vmDevices object.VirtualDeviceList) []*types.VirtualCont return scsiControllers } -// RemoveClusterFromVDiskPath removes the cluster or folder path from the vDiskPath +// RemoveStorageClusterORFolderNameFromVDiskPath removes the cluster or folder path from the vDiskPath // for vDiskPath [DatastoreCluster/sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value is [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk // for vDiskPath [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value remains same [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk -func RemoveClusterFromVDiskPath(vDiskPath string) string { +func RemoveStorageClusterORFolderNameFromVDiskPath(vDiskPath string) string { datastore := regexp.MustCompile("\\[(.*?)\\]").FindStringSubmatch(vDiskPath)[1] if filepath.Base(datastore) != datastore { vDiskPath = strings.Replace(vDiskPath, datastore, filepath.Base(datastore), 1) } return vDiskPath } + +// GetPathFromVMDiskPath retrieves the path from VM Disk Path. +// Example: For vmDiskPath - [vsanDatastore] kubevols/volume.vmdk, the path is kubevols/volume.vmdk +func GetPathFromVMDiskPath(vmDiskPath string) string { + datastorePathObj := new(object.DatastorePath) + isSuccess := datastorePathObj.FromString(vmDiskPath) + if !isSuccess { + glog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath) + return "" + } + return datastorePathObj.Path +} + +// GetDatastoreFromVMDiskPath retrieves the path from VM Disk Path. +// Example: For vmDiskPath - [vsanDatastore] kubevols/volume.vmdk, the path is vsanDatastore +func GetDatastoreFromVMDiskPath(vmDiskPath string) string { + datastorePathObj := new(object.DatastorePath) + isSuccess := datastorePathObj.FromString(vmDiskPath) + if !isSuccess { + glog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath) + return "" + } + return datastorePathObj.Datastore +} + +//GetDatastorePathObjFromVMDiskPath gets the datastorePathObj from VM disk path. +func GetDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath, error) { + datastorePathObj := new(object.DatastorePath) + isSuccess := datastorePathObj.FromString(vmDiskPath) + if !isSuccess { + glog.Errorf("Failed to parse volPath: %s", vmDiskPath) + return nil, fmt.Errorf("Failed to parse volPath: %s", vmDiskPath) + } + return datastorePathObj, nil +} + +//IsValidUUID checks if the string is a valid UUID. +func IsValidUUID(uuid string) bool { + r := regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$") + return r.MatchString(uuid) +} + +// IsManagedObjectNotFoundError returns true if error is of type ManagedObjectNotFound +func IsManagedObjectNotFoundError(err error) bool { + isManagedObjectNotFoundError := false + if soap.IsSoapFault(err) { + _, isManagedObjectNotFoundError = soap.ToSoapFault(err).VimFault().(types.ManagedObjectNotFound) + } + return isManagedObjectNotFoundError +} + +// VerifyVolumePathsForVM verifies if the volume paths (volPaths) are attached to VM. +func VerifyVolumePathsForVM(vmMo mo.VirtualMachine, volPaths []string, nodeName string, nodeVolumeMap map[string]map[string]bool) { + // Verify if the volume paths are present on the VM backing virtual disk devices + vmDevices := object.VirtualDeviceList(vmMo.Config.Hardware.Device) + VerifyVolumePathsForVMDevices(vmDevices, volPaths, nodeName, nodeVolumeMap) + +} + +// VerifyVolumePathsForVMDevices verifies if the volume paths (volPaths) are attached to VM. +func VerifyVolumePathsForVMDevices(vmDevices object.VirtualDeviceList, volPaths []string, nodeName string, nodeVolumeMap map[string]map[string]bool) { + volPathsMap := make(map[string]bool) + for _, volPath := range volPaths { + volPathsMap[volPath] = true + } + // Verify if the volume paths are present on the VM backing virtual disk devices + for _, device := range vmDevices { + if vmDevices.TypeName(device) == "VirtualDisk" { + virtualDevice := device.GetVirtualDevice() + if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { + if volPathsMap[backing.FileName] { + setNodeVolumeMap(nodeVolumeMap, backing.FileName, nodeName, true) + } + } + } + } + +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go index 91d70535b8f6..8077b5583e65 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go @@ -19,11 +19,11 @@ package vclib import ( "context" "fmt" - "path/filepath" "time" "github.com/golang/glog" "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" ) @@ -46,23 +46,6 @@ func (vm *VirtualMachine) IsDiskAttached(ctx context.Context, diskPath string) ( return false, nil } -// GetVirtualDiskPage83Data gets the virtual disk UUID by diskPath -func (vm *VirtualMachine) GetVirtualDiskPage83Data(ctx context.Context, diskPath string) (string, error) { - if len(diskPath) > 0 && filepath.Ext(diskPath) != ".vmdk" { - diskPath += ".vmdk" - } - vdm := object.NewVirtualDiskManager(vm.Client()) - // Returns uuid of vmdk virtual disk - diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, vm.Datacenter.Datacenter) - - if err != nil { - glog.Errorf("QueryVirtualDiskUuid failed for diskPath: %q on VM: %q. err: %+v", diskPath, vm.InventoryPath, err) - return "", ErrNoDiskUUIDFound - } - diskUUID = formatVirtualDiskUUID(diskUUID) - return diskUUID, nil -} - // DeleteVM deletes the VM. func (vm *VirtualMachine) DeleteVM(ctx context.Context) error { destroyTask, err := vm.Destroy(ctx) @@ -81,7 +64,7 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol return "", fmt.Errorf("Not a valid SCSI Controller Type. Valid options are %q", SCSIControllerTypeValidOptions()) } vmDiskPathCopy := vmDiskPath - vmDiskPath = RemoveClusterFromVDiskPath(vmDiskPath) + vmDiskPath = RemoveStorageClusterORFolderNameFromVDiskPath(vmDiskPath) attached, err := vm.IsDiskAttached(ctx, vmDiskPath) if err != nil { glog.Errorf("Error occurred while checking if disk is attached on VM: %q. vmDiskPath: %q, err: %+v", vm.InventoryPath, vmDiskPath, err) @@ -89,10 +72,24 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol } // If disk is already attached, return the disk UUID if attached { - diskUUID, _ := vm.GetVirtualDiskPage83Data(ctx, vmDiskPath) + diskUUID, _ := vm.Datacenter.GetVirtualDiskPage83Data(ctx, vmDiskPath) return diskUUID, nil } + if volumeOptions.StoragePolicyName != "" { + pbmClient, err := NewPbmClient(ctx, vm.Client()) + if err != nil { + glog.Errorf("Error occurred while creating new pbmClient. err: %+v", err) + return "", err + } + + volumeOptions.StoragePolicyID, err = pbmClient.ProfileIDByName(ctx, volumeOptions.StoragePolicyName) + if err != nil { + glog.Errorf("Failed to get Profile ID by name: %s. err: %+v", volumeOptions.StoragePolicyName, err) + return "", err + } + } + dsObj, err := vm.Datacenter.GetDatastoreByPath(ctx, vmDiskPathCopy) if err != nil { glog.Errorf("Failed to get datastore from vmDiskPath: %q. err: %+v", vmDiskPath, err) @@ -143,7 +140,7 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol } // Once disk is attached, get the disk UUID. - diskUUID, err := vm.GetVirtualDiskPage83Data(ctx, vmDiskPath) + diskUUID, err := vm.Datacenter.GetVirtualDiskPage83Data(ctx, vmDiskPath) if err != nil { glog.Errorf("Error occurred while getting Disk Info from VM: %q. err: %v", vm.InventoryPath, err) vm.DetachDisk(ctx, vmDiskPath) @@ -157,7 +154,7 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol // DetachDisk detaches the disk specified by vmDiskPath func (vm *VirtualMachine) DetachDisk(ctx context.Context, vmDiskPath string) error { - vmDiskPath = RemoveClusterFromVDiskPath(vmDiskPath) + vmDiskPath = RemoveStorageClusterORFolderNameFromVDiskPath(vmDiskPath) device, err := vm.getVirtualDeviceByPath(ctx, vmDiskPath) if err != nil { glog.Errorf("Disk ID not found for VM: %q with diskPath: %q", vm.InventoryPath, vmDiskPath) @@ -204,7 +201,7 @@ func (vm *VirtualMachine) IsActive(ctx context.Context) (bool, error) { } // GetAllAccessibleDatastores gets the list of accessible Datastores for the given Virtual Machine -func (vm *VirtualMachine) GetAllAccessibleDatastores(ctx context.Context) ([]*Datastore, error) { +func (vm *VirtualMachine) GetAllAccessibleDatastores(ctx context.Context) ([]*DatastoreInfo, error) { host, err := vm.HostSystem(ctx) if err != nil { glog.Errorf("Failed to get host system for VM: %q. err: %+v", vm.InventoryPath, err) @@ -217,9 +214,28 @@ func (vm *VirtualMachine) GetAllAccessibleDatastores(ctx context.Context) ([]*Da glog.Errorf("Failed to retrieve datastores for host: %+v. err: %+v", host, err) return nil, err } - var dsObjList []*Datastore + var dsRefList []types.ManagedObjectReference for _, dsRef := range hostSystemMo.Datastore { - dsObjList = append(dsObjList, &Datastore{object.NewDatastore(vm.Client(), dsRef), vm.Datacenter}) + dsRefList = append(dsRefList, dsRef) + } + + var dsMoList []mo.Datastore + pc := property.DefaultCollector(vm.Client()) + properties := []string{DatastoreInfoProperty} + err = pc.Retrieve(ctx, dsRefList, properties, &dsMoList) + if err != nil { + glog.Errorf("Failed to get Datastore managed objects from datastore objects."+ + " dsObjList: %+v, properties: %+v, err: %v", dsRefList, properties, err) + return nil, err + } + glog.V(9).Infof("Result dsMoList: %+v", dsMoList) + var dsObjList []*DatastoreInfo + for _, dsMo := range dsMoList { + dsObjList = append(dsObjList, + &DatastoreInfo{ + &Datastore{object.NewDatastore(vm.Client(), dsMo.Reference()), + vm.Datacenter}, + dsMo.Info.GetDatastoreInfo()}) } return dsObjList, nil } @@ -285,6 +301,25 @@ func (vm *VirtualMachine) CreateDiskSpec(ctx context.Context, diskPath string, d return disk, newSCSIController, nil } +// GetVirtualDiskPath gets the first available virtual disk devicePath from the VM +func (vm *VirtualMachine) GetVirtualDiskPath(ctx context.Context) (string, error) { + vmDevices, err := vm.Device(ctx) + if err != nil { + glog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err) + return "", err + } + // filter vm devices to retrieve device for the given vmdk file identified by disk path + for _, device := range vmDevices { + if vmDevices.TypeName(device) == "VirtualDisk" { + virtualDevice := device.GetVirtualDevice() + if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { + return backing.FileName, nil + } + } + } + return "", nil +} + // createAndAttachSCSIController creates and attachs the SCSI controller to the VM. func (vm *VirtualMachine) createAndAttachSCSIController(ctx context.Context, diskControllerType string) (types.BaseVirtualDevice, error) { // Get VM device list @@ -322,24 +357,17 @@ func (vm *VirtualMachine) createAndAttachSCSIController(ctx context.Context, dis // getVirtualDeviceByPath gets the virtual device by path func (vm *VirtualMachine) getVirtualDeviceByPath(ctx context.Context, diskPath string) (types.BaseVirtualDevice, error) { - var diskUUID string vmDevices, err := vm.Device(ctx) if err != nil { glog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err) return nil, err } - volumeUUID, err := vm.GetVirtualDiskPage83Data(ctx, diskPath) - if err != nil { - glog.Errorf("Failed to get disk UUID for path: %q on VM: %q. err: %+v", diskPath, vm.InventoryPath, err) - return nil, err - } // filter vm devices to retrieve device for the given vmdk file identified by disk path for _, device := range vmDevices { if vmDevices.TypeName(device) == "VirtualDisk" { virtualDevice := device.GetVirtualDevice() if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { - diskUUID = formatVirtualDiskUUID(backing.Uuid) - if diskUUID == volumeUUID { + if backing.FileName == diskPath { return device, nil } } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go index 3f0e66582101..77f80e23549c 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -21,6 +21,7 @@ import ( "fmt" "io" "net" + "os" "path" "path/filepath" "runtime" @@ -34,7 +35,10 @@ import ( "golang.org/x/net/context" "k8s.io/api/core/v1" k8stypes "k8s.io/apimachinery/pkg/types" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers" @@ -47,7 +51,6 @@ const ( VolDir = "kubevols" RoundTripperDefaultCount = 3 DummyVMPrefixName = "vsphere-k8s" - VSANDatastoreType = "vsan" MacOuiVC = "00:50:56" MacOuiEsx = "00:0c:29" CleanUpDummyVMRoutineInterval = 5 @@ -56,26 +59,51 @@ const ( ) var cleanUpRoutineInitialized = false +var datastoreFolderIDMap = make(map[string]map[string]string) -var clientLock sync.Mutex var cleanUpRoutineInitLock sync.Mutex var cleanUpDummyVMLock sync.RWMutex // VSphere is an implementation of cloud provider Interface for VSphere. type VSphere struct { + cfg *VSphereConfig + hostName string + // Maps the VSphere IP address to VSphereInstance + vsphereInstanceMap map[string]*VSphereInstance + // Responsible for managing discovery of k8s node, their location etc. + nodeManager *NodeManager +} + +// Represents a vSphere instance where one or more kubernetes nodes are running. +type VSphereInstance struct { conn *vclib.VSphereConnection - cfg *VSphereConfig - // InstanceID of the server where this VSphere object is instantiated. - localInstanceID string + cfg *VirtualCenterConfig +} + +// Structure that represents Virtual Center configuration +type VirtualCenterConfig struct { + // vCenter username. + User string `gcfg:"user"` + // vCenter password in clear text. + Password string `gcfg:"password"` + // vCenter port. + VCenterPort string `gcfg:"port"` + // Datacenter in which VMs are located. + Datacenters string `gcfg:"datacenters"` + // Soap round tripper count (retries = RoundTripper - 1) + RoundTripperCount uint `gcfg:"soap-roundtrip-count"` } -// VSphereConfig information that is used by vSphere Cloud Provider to connect to VC +// Structure that represents the content of vsphere.conf file. +// Users specify the configuration of one or more Virtual Centers in vsphere.conf where +// the Kubernetes master and worker nodes are running. type VSphereConfig struct { Global struct { // vCenter username. User string `gcfg:"user"` // vCenter password in clear text. Password string `gcfg:"password"` + // Deprecated. Use VirtualCenter to specify multiple vCenter Servers. // vCenter IP. VCenterIP string `gcfg:"server"` // vCenter port. @@ -83,23 +111,32 @@ type VSphereConfig struct { // True if vCenter uses self-signed cert. InsecureFlag bool `gcfg:"insecure-flag"` // Datacenter in which VMs are located. + // Deprecated. Use "datacenters" instead. Datacenter string `gcfg:"datacenter"` + // Datacenter in which VMs are located. + Datacenters string `gcfg:"datacenters"` // Datastore in which vmdks are stored. - Datastore string `gcfg:"datastore"` - // WorkingDir is path where VMs can be found. + // Deprecated. See Workspace.DefaultDatastore + DefaultDatastore string `gcfg:"datastore"` + // WorkingDir is path where VMs can be found. Also used to create dummy VMs. + // Deprecated. WorkingDir string `gcfg:"working-dir"` // Soap round tripper count (retries = RoundTripper - 1) RoundTripperCount uint `gcfg:"soap-roundtrip-count"` + // Deprecated as the virtual machines will be automatically discovered. // VMUUID is the VM Instance UUID of virtual machine which can be retrieved from instanceUuid // property in VmConfigInfo, or also set as vc.uuid in VMX file. // If not set, will be fetched from the machine via sysfs (requires root) VMUUID string `gcfg:"vm-uuid"` + // Deprecated as virtual machine will be automatically discovered. // VMName is the VM name of virtual machine // Combining the WorkingDir and VMName can form a unique InstanceID. // When vm-name is set, no username/password is required on worker nodes. VMName string `gcfg:"vm-name"` } + VirtualCenter map[string]*VirtualCenterConfig + Network struct { // PublicNetwork is name of the network the VMs are joined to. PublicNetwork string `gcfg:"public-network"` @@ -109,12 +146,21 @@ type VSphereConfig struct { // SCSIControllerType defines SCSI controller to be used. SCSIControllerType string `dcfg:"scsicontrollertype"` } + + // Endpoint used to create volumes + Workspace struct { + VCenterIP string `gcfg:"server"` + Datacenter string `gcfg:"datacenter"` + Folder string `gcfg:"folder"` + DefaultDatastore string `gcfg:"default-datastore"` + ResourcePoolPath string `gcfg:"resourcepool-path"` + } } type Volumes interface { // AttachDisk attaches given disk to given node. Current node // is used when nodeName is empty string. - AttachDisk(vmDiskPath string, storagePolicyID string, nodeName k8stypes.NodeName) (diskUUID string, err error) + AttachDisk(vmDiskPath string, storagePolicyName string, nodeName k8stypes.NodeName) (diskUUID string, err error) // DetachDisk detaches given disk to given node. Current node // is used when nodeName is empty string. @@ -127,7 +173,7 @@ type Volumes interface { // DisksAreAttached checks if a list disks are attached to the given node. // Assumption: If node doesn't exist, disks are not attached to the node. - DisksAreAttached(volPath []string, nodeName k8stypes.NodeName) (map[string]bool, error) + DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error) // CreateVolume creates a new vmdk with specified parameters. CreateVolume(volumeOptions *vclib.VolumeOptions) (volumePath string, err error) @@ -151,19 +197,169 @@ func readConfig(config io.Reader) (VSphereConfig, error) { func init() { vclib.RegisterMetrics() cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) { + // If vSphere.conf file is not present then it is worker node. + if config == nil { + return newWorkerNode() + } cfg, err := readConfig(config) if err != nil { return nil, err } - return newVSphere(cfg) + return newControllerNode(cfg) }) } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (vs *VSphere) Initialize(clientBuilder controller.ControllerClientBuilder) {} +func (vs *VSphere) Initialize(clientBuilder controller.ControllerClientBuilder) { + if vs.cfg == nil { + return + } -func newVSphere(cfg VSphereConfig) (*VSphere, error) { + // Only on controller node it is required to register listeners. + // Register callbacks for node updates + client := clientBuilder.ClientOrDie("vSphere-cloud-provider") + factory := informers.NewSharedInformerFactory(client, 5*time.Minute) + nodeInformer := factory.Core().V1().Nodes() + nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: vs.NodeAdded, + DeleteFunc: vs.NodeDeleted, + }) + go nodeInformer.Informer().Run(wait.NeverStop) + glog.V(4).Infof("vSphere cloud provider initialized") +} + +// Creates new worker node interface and returns +func newWorkerNode() (*VSphere, error) { var err error + vs := VSphere{} + vs.hostName, err = os.Hostname() + if err != nil { + glog.Errorf("Failed to get hostname. err: %+v", err) + return nil, err + } + + return &vs, nil +} + +func populateVsphereInstanceMap(cfg *VSphereConfig) (map[string]*VSphereInstance, error) { + vsphereInstanceMap := make(map[string]*VSphereInstance) + + // Check if the vsphere.conf is in old format. In this + // format the cfg.VirtualCenter will be nil or empty. + if cfg.VirtualCenter == nil || len(cfg.VirtualCenter) == 0 { + glog.V(4).Infof("Config is not per virtual center and is in old format.") + if cfg.Global.User == "" { + glog.Error("Global.User is empty!") + return nil, errors.New("Global.User is empty!") + } + if cfg.Global.Password == "" { + glog.Error("Global.Password is empty!") + return nil, errors.New("Global.Password is empty!") + } + if cfg.Global.WorkingDir == "" { + glog.Error("Global.WorkingDir is empty!") + return nil, errors.New("Global.WorkingDir is empty!") + } + if cfg.Global.VCenterIP == "" { + glog.Error("Global.VCenterIP is empty!") + return nil, errors.New("Global.VCenterIP is empty!") + } + if cfg.Global.Datacenter == "" { + glog.Error("Global.Datacenter is empty!") + return nil, errors.New("Global.Datacenter is empty!") + } + cfg.Workspace.VCenterIP = cfg.Global.VCenterIP + cfg.Workspace.Datacenter = cfg.Global.Datacenter + cfg.Workspace.Folder = cfg.Global.WorkingDir + cfg.Workspace.DefaultDatastore = cfg.Global.DefaultDatastore + + vcConfig := VirtualCenterConfig{ + User: cfg.Global.User, + Password: cfg.Global.Password, + VCenterPort: cfg.Global.VCenterPort, + Datacenters: cfg.Global.Datacenter, + RoundTripperCount: cfg.Global.RoundTripperCount, + } + + vSphereConn := vclib.VSphereConnection{ + Username: vcConfig.User, + Password: vcConfig.Password, + Hostname: cfg.Global.VCenterIP, + Insecure: cfg.Global.InsecureFlag, + RoundTripperCount: vcConfig.RoundTripperCount, + Port: vcConfig.VCenterPort, + } + vsphereIns := VSphereInstance{ + conn: &vSphereConn, + cfg: &vcConfig, + } + vsphereInstanceMap[cfg.Global.VCenterIP] = &vsphereIns + } else { + if cfg.Workspace.VCenterIP == "" || cfg.Workspace.Folder == "" || cfg.Workspace.Datacenter == "" { + msg := fmt.Sprintf("All fields in workspace are mandatory."+ + " vsphere.conf does not have the workspace specified correctly. cfg.Workspace: %+v", cfg.Workspace) + glog.Error(msg) + return nil, errors.New(msg) + } + for vcServer, vcConfig := range cfg.VirtualCenter { + glog.V(4).Infof("Initializing vc server %s", vcServer) + if vcServer == "" { + glog.Error("vsphere.conf does not have the VirtualCenter IP address specified") + return nil, errors.New("vsphere.conf does not have the VirtualCenter IP address specified") + } + if vcConfig.User == "" { + vcConfig.User = cfg.Global.User + } + if vcConfig.Password == "" { + vcConfig.Password = cfg.Global.Password + } + if vcConfig.User == "" { + msg := fmt.Sprintf("vcConfig.User is empty for vc %s!", vcServer) + glog.Error(msg) + return nil, errors.New(msg) + } + if vcConfig.Password == "" { + msg := fmt.Sprintf("vcConfig.Password is empty for vc %s!", vcServer) + glog.Error(msg) + return nil, errors.New(msg) + } + if vcConfig.VCenterPort == "" { + vcConfig.VCenterPort = cfg.Global.VCenterPort + } + if vcConfig.Datacenters == "" { + if cfg.Global.Datacenters != "" { + vcConfig.Datacenters = cfg.Global.Datacenters + } else { + // cfg.Global.Datacenter is deprecated, so giving it the last preference. + vcConfig.Datacenters = cfg.Global.Datacenter + } + } + if vcConfig.RoundTripperCount == 0 { + vcConfig.RoundTripperCount = cfg.Global.RoundTripperCount + } + + vSphereConn := vclib.VSphereConnection{ + Username: vcConfig.User, + Password: vcConfig.Password, + Hostname: vcServer, + Insecure: cfg.Global.InsecureFlag, + RoundTripperCount: vcConfig.RoundTripperCount, + Port: vcConfig.VCenterPort, + } + vsphereIns := VSphereInstance{ + conn: &vSphereConn, + cfg: vcConfig, + } + vsphereInstanceMap[vcServer] = &vsphereIns + } + } + return vsphereInstanceMap, nil +} + +// Creates new Contreoller node interface and returns +func newControllerNode(cfg VSphereConfig) (*VSphere, error) { + var err error + if cfg.Disk.SCSIControllerType == "" { cfg.Disk.SCSIControllerType = vclib.PVSCSIControllerType } else if !vclib.CheckControllerSupported(cfg.Disk.SCSIControllerType) { @@ -187,56 +383,37 @@ func newVSphere(cfg VSphereConfig) (*VSphere, error) { return nil, err } } - vSphereConn := vclib.VSphereConnection{ - Username: cfg.Global.User, - Password: cfg.Global.Password, - Hostname: cfg.Global.VCenterIP, - Insecure: cfg.Global.InsecureFlag, - RoundTripperCount: cfg.Global.RoundTripperCount, - Port: cfg.Global.VCenterPort, + vsphereInstanceMap, err := populateVsphereInstanceMap(&cfg) + if err != nil { + return nil, err } - var instanceID string - if cfg.Global.VMName == "" { - // if VMName is not set in the cloud config file, each nodes (including worker nodes) need credentials to obtain VMName from vCenter - glog.V(4).Infof("Cannot find VMName from cloud config file, start obtaining it from vCenter") - // Create context - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - err = vSphereConn.Connect(ctx) - if err != nil { - glog.Errorf("Failed to connect to vSphere") - return nil, err - } - dc, err := vclib.GetDatacenter(ctx, &vSphereConn, cfg.Global.Datacenter) - if err != nil { - return nil, err - } - vm, err := dc.GetVMByUUID(ctx, cfg.Global.VMUUID) - if err != nil { - return nil, err - } - vmName, err := vm.ObjectName(ctx) - if err != nil { - return nil, err - } - instanceID = vmName - } else { - instanceID = cfg.Global.VMName - } vs := VSphere{ - conn: &vSphereConn, - cfg: &cfg, - localInstanceID: instanceID, + vsphereInstanceMap: vsphereInstanceMap, + nodeManager: &NodeManager{ + vsphereInstanceMap: vsphereInstanceMap, + nodeInfoMap: make(map[string]*NodeInfo), + registeredNodes: make(map[string]*v1.Node), + }, + cfg: &cfg, + } + + vs.hostName, err = os.Hostname() + if err != nil { + glog.Errorf("Failed to get hostname. err: %+v", err) + return nil, err } runtime.SetFinalizer(&vs, logout) return &vs, nil } func logout(vs *VSphere) { - if vs.conn.GoVmomiClient != nil { - vs.conn.GoVmomiClient.Logout(context.TODO()) + for _, vsphereIns := range vs.vsphereInstanceMap { + if vsphereIns.conn.GoVmomiClient != nil { + vsphereIns.conn.GoVmomiClient.Logout(context.TODO()) + } } + } // Instances returns an implementation of Instances for vSphere. @@ -283,43 +460,74 @@ func getLocalIP() ([]v1.NodeAddress, error) { return addrs, nil } -// Get the VM Managed Object instance by from the node -func (vs *VSphere) getVMByName(ctx context.Context, nodeName k8stypes.NodeName) (*vclib.VirtualMachine, error) { - dc, err := vclib.GetDatacenter(ctx, vs.conn, vs.cfg.Global.Datacenter) +func (vs *VSphere) getVSphereInstance(nodeName k8stypes.NodeName) (*VSphereInstance, error) { + vsphereIns, err := vs.nodeManager.GetVSphereInstance(nodeName) if err != nil { + glog.Errorf("Cannot find node %q in cache. Node not found!!!", nodeName) return nil, err } - vmPath := vs.cfg.Global.WorkingDir + "/" + nodeNameToVMName(nodeName) - vm, err := dc.GetVMByPath(ctx, vmPath) + return &vsphereIns, nil +} + +func (vs *VSphere) getVSphereInstanceForServer(vcServer string, ctx context.Context) (*VSphereInstance, error) { + vsphereIns, ok := vs.vsphereInstanceMap[vcServer] + if !ok { + glog.Errorf("cannot find vcServer %q in cache. VC not found!!!", vcServer) + return nil, errors.New(fmt.Sprintf("Cannot find node %q in vsphere configuration map", vcServer)) + } + // Ensure client is logged in and session is valid + err := vsphereIns.conn.Connect(ctx) if err != nil { + glog.Errorf("failed connecting to vcServer %q with error %+v", vcServer, err) return nil, err } - return vm, nil + + return vsphereIns, nil +} + +// Get the VM Managed Object instance by from the node +func (vs *VSphere) getVMFromNodeName(ctx context.Context, nodeName k8stypes.NodeName) (*vclib.VirtualMachine, error) { + nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName) + if err != nil { + return nil, err + } + return nodeInfo.vm, nil } // NodeAddresses is an implementation of Instances.NodeAddresses. func (vs *VSphere) NodeAddresses(nodeName k8stypes.NodeName) ([]v1.NodeAddress, error) { // Get local IP addresses if node is local node - if vs.localInstanceID == nodeNameToVMName(nodeName) { + if vs.hostName == convertToString(nodeName) { return getLocalIP() } + + if vs.cfg == nil { + return nil, cloudprovider.InstanceNotFound + } + + // Below logic can be executed only on master as VC details are present. addrs := []v1.NodeAddress{} // Create context ctx, cancel := context.WithCancel(context.Background()) defer cancel() + vsi, err := vs.getVSphereInstance(nodeName) + if err != nil { + return nil, err + } // Ensure client is logged in and session is valid - err := vs.conn.Connect(ctx) + err = vsi.conn.Connect(ctx) if err != nil { return nil, err } - vm, err := vs.getVMByName(ctx, nodeName) + + vm, err := vs.getVMFromNodeName(ctx, nodeName) if err != nil { - glog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeNameToVMName(nodeName), err) + glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) return nil, err } vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*vclib.VirtualMachine{vm}, []string{"guest.net"}) if err != nil { - glog.Errorf("Failed to get VM Managed object with property guest.net for node: %q. err: +%v", nodeNameToVMName(nodeName), err) + glog.Errorf("Failed to get VM Managed object with property guest.net for node: %q. err: +%v", convertToString(nodeName), err) return nil, err } // retrieve VM's ip(s) @@ -347,27 +555,24 @@ func (vs *VSphere) NodeAddresses(nodeName k8stypes.NodeName) ([]v1.NodeAddress, // This method will not be called from the node that is requesting this ID. i.e. metadata service // and other local methods cannot be used here func (vs *VSphere) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddress, error) { - vmName := path.Base(providerID) - return vs.NodeAddresses(vmNameToNodeName(vmName)) + return vs.NodeAddresses(convertToK8sType(providerID)) } // AddSSHKeyToAllInstances add SSH key to all instances func (vs *VSphere) AddSSHKeyToAllInstances(user string, keyData []byte) error { - return errors.New("unimplemented") + return cloudprovider.NotImplemented } // CurrentNodeName gives the current node name func (vs *VSphere) CurrentNodeName(hostname string) (k8stypes.NodeName, error) { - return vmNameToNodeName(vs.localInstanceID), nil + return convertToK8sType(vs.hostName), nil } -// nodeNameToVMName maps a NodeName to the vmware infrastructure name -func nodeNameToVMName(nodeName k8stypes.NodeName) string { +func convertToString(nodeName k8stypes.NodeName) string { return string(nodeName) } -// nodeNameToVMName maps a vmware infrastructure name to a NodeName -func vmNameToNodeName(vmName string) k8stypes.NodeName { +func convertToK8sType(vmName string) k8stypes.NodeName { return k8stypes.NodeName(vmName) } @@ -379,40 +584,73 @@ func (vs *VSphere) ExternalID(nodeName k8stypes.NodeName) (string, error) { // InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running. // If false is returned with no error, the instance will be immediately deleted by the cloud controller manager. func (vs *VSphere) InstanceExistsByProviderID(providerID string) (bool, error) { - return false, errors.New("unimplemented") + _, err := vs.InstanceID(convertToK8sType(providerID)) + if err == nil { + return true, nil + } + + return false, err } // InstanceID returns the cloud provider ID of the node with the specified Name. func (vs *VSphere) InstanceID(nodeName k8stypes.NodeName) (string, error) { - if vs.localInstanceID == nodeNameToVMName(nodeName) { - return vs.cfg.Global.WorkingDir + "/" + vs.localInstanceID, nil - } - // Create context - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - // Ensure client is logged in and session is valid - err := vs.conn.Connect(ctx) - if err != nil { - return "", err - } - vm, err := vs.getVMByName(ctx, nodeName) - if err != nil { - if vclib.IsNotFound(err) { - return "", cloudprovider.InstanceNotFound + + instanceIDInternal := func() (string, error) { + if vs.hostName == convertToString(nodeName) { + return vs.hostName, nil + } + + // Below logic can be performed only on master node where VC details are preset. + if vs.cfg == nil { + return "", fmt.Errorf("The current node can't detremine InstanceID for %q", convertToString(nodeName)) } - glog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeNameToVMName(nodeName), err) - return "", err + + // Create context + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vsi, err := vs.getVSphereInstance(nodeName) + if err != nil { + return "", err + } + // Ensure client is logged in and session is valid + err = vsi.conn.Connect(ctx) + if err != nil { + return "", err + } + vm, err := vs.getVMFromNodeName(ctx, nodeName) + if err != nil { + if err == vclib.ErrNoVMFound { + return "", cloudprovider.InstanceNotFound + } + glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) + return "", err + } + isActive, err := vm.IsActive(ctx) + if err != nil { + glog.Errorf("Failed to check whether node %q is active. err: %+v.", convertToString(nodeName), err) + return "", err + } + if isActive { + return convertToString(nodeName), nil + } + glog.Warningf("The VM: %s is not in %s state", convertToString(nodeName), vclib.ActivePowerState) + return "", cloudprovider.InstanceNotFound } - isActive, err := vm.IsActive(ctx) + + instanceID, err := instanceIDInternal() if err != nil { - glog.Errorf("Failed to check whether node %q is active. err: %+v.", nodeNameToVMName(nodeName), err) - return "", err - } - if isActive { - return "/" + vm.InventoryPath, nil + isManagedObjectNotFoundError, err := vs.retry(nodeName, err) + if isManagedObjectNotFoundError { + if err == nil { + glog.V(4).Infof("InstanceID: Found node %q", convertToString(nodeName)) + instanceID, err = instanceIDInternal() + } else if err == vclib.ErrNoVMFound { + return "", cloudprovider.InstanceNotFound + } + } } - return "", fmt.Errorf("The node %q is not active", nodeNameToVMName(nodeName)) + return instanceID, err } // InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID @@ -457,72 +695,111 @@ func (vs *VSphere) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []st } // AttachDisk attaches given virtual disk volume to the compute running kubelet. -func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyID string, nodeName k8stypes.NodeName) (diskUUID string, err error) { - attachDiskInternal := func(vmDiskPath string, storagePolicyID string, nodeName k8stypes.NodeName) (diskUUID string, err error) { +func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyName string, nodeName k8stypes.NodeName) (diskUUID string, err error) { + attachDiskInternal := func(vmDiskPath string, storagePolicyName string, nodeName k8stypes.NodeName) (diskUUID string, err error) { if nodeName == "" { - nodeName = vmNameToNodeName(vs.localInstanceID) + nodeName = convertToK8sType(vs.hostName) } // Create context ctx, cancel := context.WithCancel(context.Background()) defer cancel() + vsi, err := vs.getVSphereInstance(nodeName) + if err != nil { + return "", err + } // Ensure client is logged in and session is valid - err = vs.conn.Connect(ctx) + err = vsi.conn.Connect(ctx) if err != nil { return "", err } - vm, err := vs.getVMByName(ctx, nodeName) + + vm, err := vs.getVMFromNodeName(ctx, nodeName) if err != nil { - glog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeNameToVMName(nodeName), err) + glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) return "", err } - diskUUID, err = vm.AttachDisk(ctx, vmDiskPath, &vclib.VolumeOptions{SCSIControllerType: vclib.PVSCSIControllerType, StoragePolicyID: storagePolicyID}) + + diskUUID, err = vm.AttachDisk(ctx, vmDiskPath, &vclib.VolumeOptions{SCSIControllerType: vclib.PVSCSIControllerType, StoragePolicyName: storagePolicyName}) if err != nil { - glog.Errorf("Failed to attach disk: %s for node: %s. err: +%v", vmDiskPath, nodeNameToVMName(nodeName), err) + glog.Errorf("Failed to attach disk: %s for node: %s. err: +%v", vmDiskPath, convertToString(nodeName), err) return "", err } return diskUUID, nil } requestTime := time.Now() - diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyID, nodeName) + diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName) + if err != nil { + isManagedObjectNotFoundError, err := vs.retry(nodeName, err) + if isManagedObjectNotFoundError { + if err == nil { + glog.V(4).Infof("AttachDisk: Found node %q", convertToString(nodeName)) + diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName) + } + } + } vclib.RecordvSphereMetric(vclib.OperationAttachVolume, requestTime, err) return diskUUID, err } +func (vs *VSphere) retry(nodeName k8stypes.NodeName, err error) (bool, error) { + isManagedObjectNotFoundError := false + if err != nil { + if vclib.IsManagedObjectNotFoundError(err) { + isManagedObjectNotFoundError = true + glog.V(4).Infof("error %q ManagedObjectNotFound for node %q", err, convertToString(nodeName)) + err = vs.nodeManager.RediscoverNode(nodeName) + } + } + return isManagedObjectNotFoundError, err +} + // DetachDisk detaches given virtual disk volume from the compute running kubelet. func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error { detachDiskInternal := func(volPath string, nodeName k8stypes.NodeName) error { if nodeName == "" { - nodeName = vmNameToNodeName(vs.localInstanceID) + nodeName = convertToK8sType(vs.hostName) } // Create context ctx, cancel := context.WithCancel(context.Background()) defer cancel() + vsi, err := vs.getVSphereInstance(nodeName) + if err != nil { + return err + } // Ensure client is logged in and session is valid - err := vs.conn.Connect(ctx) + err = vsi.conn.Connect(ctx) if err != nil { return err } - vm, err := vs.getVMByName(ctx, nodeName) + vm, err := vs.getVMFromNodeName(ctx, nodeName) if err != nil { // If node doesn't exist, disk is already detached from node. - if vclib.IsNotFound(err) { - glog.Infof("Node %q does not exist, disk %s is already detached from node.", nodeNameToVMName(nodeName), volPath) + if err == vclib.ErrNoVMFound { + glog.Infof("Node %q does not exist, disk %s is already detached from node.", convertToString(nodeName), volPath) return nil } - glog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeNameToVMName(nodeName), err) + glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) return err } err = vm.DetachDisk(ctx, volPath) if err != nil { - glog.Errorf("Failed to detach disk: %s for node: %s. err: +%v", volPath, nodeNameToVMName(nodeName), err) + glog.Errorf("Failed to detach disk: %s for node: %s. err: +%v", volPath, convertToString(nodeName), err) return err } return nil } requestTime := time.Now() err := detachDiskInternal(volPath, nodeName) - vclib.RecordvSphereMetric(vclib.OperationDetachVolume, requestTime, nil) + if err != nil { + isManagedObjectNotFoundError, err := vs.retry(nodeName, err) + if isManagedObjectNotFoundError { + if err == nil { + err = detachDiskInternal(volPath, nodeName) + } + } + } + vclib.RecordvSphereMetric(vclib.OperationDetachVolume, requestTime, err) return err } @@ -531,22 +808,26 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b diskIsAttachedInternal := func(volPath string, nodeName k8stypes.NodeName) (bool, error) { var vSphereInstance string if nodeName == "" { - vSphereInstance = vs.localInstanceID - nodeName = vmNameToNodeName(vSphereInstance) + vSphereInstance = vs.hostName + nodeName = convertToK8sType(vSphereInstance) } else { - vSphereInstance = nodeNameToVMName(nodeName) + vSphereInstance = convertToString(nodeName) } // Create context ctx, cancel := context.WithCancel(context.Background()) defer cancel() + vsi, err := vs.getVSphereInstance(nodeName) + if err != nil { + return false, err + } // Ensure client is logged in and session is valid - err := vs.conn.Connect(ctx) + err = vsi.conn.Connect(ctx) if err != nil { return false, err } - vm, err := vs.getVMByName(ctx, nodeName) + vm, err := vs.getVMFromNodeName(ctx, nodeName) if err != nil { - if vclib.IsNotFound(err) { + if err == vclib.ErrNoVMFound { glog.Warningf("Node %q does not exist, vsphere CP will assume disk %v is not attached to it.", nodeName, volPath) // make the disk as detached and return false without error. return false, nil @@ -554,7 +835,7 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b glog.Errorf("Failed to get VM object for node: %q. err: +%v", vSphereInstance, err) return false, err } - + volPath = vclib.RemoveStorageClusterORFolderNameFromVDiskPath(volPath) attached, err := vm.IsDiskAttached(ctx, volPath) if err != nil { glog.Errorf("DiskIsAttached failed to determine whether disk %q is still attached on node %q", @@ -565,67 +846,147 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b } requestTime := time.Now() isAttached, err := diskIsAttachedInternal(volPath, nodeName) + if err != nil { + isManagedObjectNotFoundError, err := vs.retry(nodeName, err) + if isManagedObjectNotFoundError { + if err == vclib.ErrNoVMFound { + isAttached, err = false, nil + } else if err == nil { + isAttached, err = diskIsAttachedInternal(volPath, nodeName) + } + } + } vclib.RecordvSphereMetric(vclib.OperationDiskIsAttached, requestTime, err) return isAttached, err } // DisksAreAttached returns if disks are attached to the VM using controllers supported by the plugin. -func (vs *VSphere) DisksAreAttached(volPaths []string, nodeName k8stypes.NodeName) (map[string]bool, error) { - disksAreAttachedInternal := func(volPaths []string, nodeName k8stypes.NodeName) (map[string]bool, error) { - attached := make(map[string]bool) - if len(volPaths) == 0 { - return attached, nil - } - var vSphereInstance string - if nodeName == "" { - vSphereInstance = vs.localInstanceID - nodeName = vmNameToNodeName(vSphereInstance) - } else { - vSphereInstance = nodeNameToVMName(nodeName) +// 1. Converts volPaths into canonical form so that it can be compared with the VM device path. +// 2. Segregates nodes by vCenter and Datacenter they are present in. This reduces calls to VC. +// 3. Creates go routines per VC-DC to find whether disks are attached to the nodes. +// 4. If the some of the VMs are not found or migrated then they are added to a list. +// 5. After successful execution of goroutines, +// 5a. If there are any VMs which needs to be retried, they are rediscovered and the whole operation is initiated again for only rediscovered VMs. +// 5b. If VMs are removed from vSphere inventory they are ignored. +func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error) { + disksAreAttachedInternal := func(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error) { + + // disksAreAttach checks whether disks are attached to the nodes. + // Returns nodes that need to be retried if retry is true + // Segregates nodes per VC and DC + // Creates go routines per VC-DC to find whether disks are attached to the nodes. + disksAreAttach := func(ctx context.Context, nodeVolumes map[k8stypes.NodeName][]string, attached map[string]map[string]bool, retry bool) ([]k8stypes.NodeName, error) { + + var wg sync.WaitGroup + var localAttachedMaps []map[string]map[string]bool + var nodesToRetry []k8stypes.NodeName + var globalErr error + globalErr = nil + globalErrMutex := &sync.Mutex{} + nodesToRetryMutex := &sync.Mutex{} + + // Segregate nodes according to VC-DC + dcNodes := make(map[string][]k8stypes.NodeName) + for nodeName := range nodeVolumes { + nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName) + if err != nil { + glog.Errorf("Failed to get node info: %+v. err: %+v", nodeInfo.vm, err) + return nodesToRetry, err + } + VC_DC := nodeInfo.vcServer + nodeInfo.dataCenter.String() + dcNodes[VC_DC] = append(dcNodes[VC_DC], nodeName) + } + + for _, nodes := range dcNodes { + localAttachedMap := make(map[string]map[string]bool) + localAttachedMaps = append(localAttachedMaps, localAttachedMap) + // Start go routines per VC-DC to check disks are attached + go func() { + nodesToRetryLocal, err := vs.checkDiskAttached(ctx, nodes, nodeVolumes, localAttachedMap, retry) + if err != nil { + if !vclib.IsManagedObjectNotFoundError(err) { + globalErrMutex.Lock() + globalErr = err + globalErrMutex.Unlock() + glog.Errorf("Failed to check disk attached for nodes: %+v. err: %+v", nodes, err) + } + } + nodesToRetryMutex.Lock() + nodesToRetry = append(nodesToRetry, nodesToRetryLocal...) + nodesToRetryMutex.Unlock() + wg.Done() + }() + wg.Add(1) + } + wg.Wait() + if globalErr != nil { + return nodesToRetry, globalErr + } + for _, localAttachedMap := range localAttachedMaps { + for key, value := range localAttachedMap { + attached[key] = value + } + } + + return nodesToRetry, nil } + + glog.V(4).Info("Starting DisksAreAttached API for vSphere with nodeVolumes: %+v", nodeVolumes) // Create context ctx, cancel := context.WithCancel(context.Background()) defer cancel() - // Ensure client is logged in and session is valid - err := vs.conn.Connect(ctx) + + disksAttached := make(map[k8stypes.NodeName]map[string]bool) + if len(nodeVolumes) == 0 { + return disksAttached, nil + } + + // Convert VolPaths into canonical form so that it can be compared with the VM device path. + vmVolumes, err := vs.convertVolPathsToDevicePaths(ctx, nodeVolumes) if err != nil { + glog.Errorf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err) return nil, err } - vm, err := vs.getVMByName(ctx, nodeName) + attached := make(map[string]map[string]bool) + nodesToRetry, err := disksAreAttach(ctx, vmVolumes, attached, false) if err != nil { - if vclib.IsNotFound(err) { - glog.Warningf("Node %q does not exist, vsphere CP will assume all disks %v are not attached to it.", nodeName, volPaths) - // make all the disks as detached and return false without error. - attached := make(map[string]bool) - for _, volPath := range volPaths { - attached[volPath] = false - } - return attached, nil - } - glog.Errorf("Failed to get VM object for node: %q. err: +%v", vSphereInstance, err) return nil, err } - for _, volPath := range volPaths { - result, err := vm.IsDiskAttached(ctx, volPath) - if err == nil { - if result { - attached[volPath] = true - } else { - attached[volPath] = false + if len(nodesToRetry) != 0 { + // Rediscover nodes which are need to be retried + remainingNodesVolumes := make(map[k8stypes.NodeName][]string) + for _, nodeName := range nodesToRetry { + err = vs.nodeManager.RediscoverNode(nodeName) + if err != nil { + if err == vclib.ErrNoVMFound { + glog.V(4).Infof("node %s not found. err: %+v", nodeName, err) + continue + } + glog.Errorf("Failed to rediscover node %s. err: %+v", nodeName, err) + return nil, err + } + remainingNodesVolumes[nodeName] = nodeVolumes[nodeName] + } + + // If some remaining nodes are still registered + if len(remainingNodesVolumes) != 0 { + nodesToRetry, err = disksAreAttach(ctx, remainingNodesVolumes, attached, true) + if err != nil || len(nodesToRetry) != 0 { + glog.Errorf("Failed to retry disksAreAttach for nodes %+v. err: %+v", remainingNodesVolumes, err) + return nil, err } - } else { - glog.Errorf("DisksAreAttached failed to determine whether disk %q from volPaths %+v is still attached on node %q", - volPath, - volPaths, - vSphereInstance) - return nil, err + } + + for nodeName, volPaths := range attached { + disksAttached[convertToK8sType(nodeName)] = volPaths } } - return attached, nil + glog.V(4).Infof("DisksAreAttach successfully executed. result: %+v", attached) + return disksAttached, nil } requestTime := time.Now() - attached, err := disksAreAttachedInternal(volPaths, nodeName) + attached, err := disksAreAttachedInternal(nodeVolumes) vclib.RecordvSphereMetric(vclib.OperationDisksAreAttached, requestTime, err) return attached, err } @@ -634,25 +995,25 @@ func (vs *VSphere) DisksAreAttached(volPaths []string, nodeName k8stypes.NodeNam // If the volumeOptions.Datastore is part of datastore cluster for example - [DatastoreCluster/sharedVmfs-0] then // return value will be [DatastoreCluster/sharedVmfs-0] kubevols/.vmdk // else return value will be [sharedVmfs-0] kubevols/.vmdk -func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (volumePath string, err error) { +func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVolumePath string, err error) { glog.V(1).Infof("Starting to create a vSphere volume with volumeOptions: %+v", volumeOptions) - createVolumeInternal := func(volumeOptions *vclib.VolumeOptions) (volumePath string, err error) { + createVolumeInternal := func(volumeOptions *vclib.VolumeOptions) (canonicalVolumePath string, err error) { var datastore string - // Default datastore is the datastore in the vSphere config file that is used to initialize vSphere cloud provider. + // If datastore not specified, then use default datastore if volumeOptions.Datastore == "" { - datastore = vs.cfg.Global.Datastore + datastore = vs.cfg.Workspace.DefaultDatastore } else { datastore = volumeOptions.Datastore } + datastore = strings.TrimSpace(datastore) // Create context ctx, cancel := context.WithCancel(context.Background()) defer cancel() - // Ensure client is logged in and session is valid - err = vs.conn.Connect(ctx) + vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx) if err != nil { return "", err } - dc, err := vclib.GetDatacenter(ctx, vs.conn, vs.cfg.Global.Datacenter) + dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter) if err != nil { return "", err } @@ -670,18 +1031,37 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (volumePath cleanUpRoutineInitialized = true } cleanUpRoutineInitLock.Unlock() - vmOptions, err = vs.setVMOptions(ctx, dc) + vmOptions, err = vs.setVMOptions(ctx, dc, vs.cfg.Workspace.ResourcePoolPath) if err != nil { glog.Errorf("Failed to set VM options requires to create a vsphere volume. err: %+v", err) return "", err } } if volumeOptions.StoragePolicyName != "" && volumeOptions.Datastore == "" { - datastore, err = getPbmCompatibleDatastore(ctx, dc.Client(), volumeOptions.StoragePolicyName, vmOptions.VMFolder) + datastore, err = getPbmCompatibleDatastore(ctx, dc, volumeOptions.StoragePolicyName, vs.nodeManager) if err != nil { glog.Errorf("Failed to get pbm compatible datastore with storagePolicy: %s. err: %+v", volumeOptions.StoragePolicyName, err) return "", err } + } else { + // Since no storage policy is specified but datastore is specified, check + // if the given datastore is a shared datastore across all node VMs. + sharedDsList, err := getSharedDatastoresInK8SCluster(ctx, dc, vs.nodeManager) + if err != nil { + glog.Errorf("Failed to get shared datastore: %+v", err) + return "", err + } + found := false + for _, sharedDs := range sharedDsList { + if datastore == sharedDs.Info.Name { + found = true + break + } + } + if !found { + msg := fmt.Sprintf("The specified datastore %s is not a shared datastore across node VMs", datastore) + return "", errors.New(msg) + } } ds, err := dc.GetDatastoreByName(ctx, datastore) if err != nil { @@ -694,27 +1074,34 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (volumePath glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err) return "", err } - volumePath = kubeVolsPath + volumeOptions.Name + ".vmdk" + volumePath := kubeVolsPath + volumeOptions.Name + ".vmdk" disk := diskmanagers.VirtualDisk{ DiskPath: volumePath, VolumeOptions: volumeOptions, VMOptions: vmOptions, } - err = disk.Create(ctx, ds) + volumePath, err = disk.Create(ctx, ds) if err != nil { glog.Errorf("Failed to create a vsphere volume with volumeOptions: %+v on datastore: %s. err: %+v", volumeOptions, datastore, err) return "", err } + // Get the canonical path for the volume path. + canonicalVolumePath, err = getcanonicalVolumePath(ctx, dc, volumePath) + if err != nil { + glog.Errorf("Failed to get canonical vsphere volume path for volume: %s with volumeOptions: %+v on datastore: %s. err: %+v", volumePath, volumeOptions, datastore, err) + return "", err + } if filepath.Base(datastore) != datastore { // If datastore is within cluster, add cluster path to the volumePath - volumePath = strings.Replace(volumePath, filepath.Base(datastore), datastore, 1) + canonicalVolumePath = strings.Replace(canonicalVolumePath, filepath.Base(datastore), datastore, 1) } - return volumePath, nil + return canonicalVolumePath, nil } requestTime := time.Now() - volumePath, err = createVolumeInternal(volumeOptions) + canonicalVolumePath, err = createVolumeInternal(volumeOptions) vclib.RecordCreateVolumeMetric(volumeOptions, requestTime, err) - return volumePath, err + glog.V(4).Infof("The canonical volume path for the newly created vSphere volume is %q", canonicalVolumePath) + return canonicalVolumePath, err } // DeleteVolume deletes a volume given volume name. @@ -724,16 +1111,11 @@ func (vs *VSphere) DeleteVolume(vmDiskPath string) error { // Create context ctx, cancel := context.WithCancel(context.Background()) defer cancel() - // Ensure client is logged in and session is valid - err := vs.conn.Connect(ctx) + vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx) if err != nil { return err } - dc, err := vclib.GetDatacenter(ctx, vs.conn, vs.cfg.Global.Datacenter) - if err != nil { - return err - } - ds, err := dc.GetDatastoreByName(ctx, vs.cfg.Global.Datastore) + dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter) if err != nil { return err } @@ -742,7 +1124,7 @@ func (vs *VSphere) DeleteVolume(vmDiskPath string) error { VolumeOptions: &vclib.VolumeOptions{}, VMOptions: &vclib.VMOptions{}, } - err = disk.Delete(ctx, ds) + err = disk.Delete(ctx, dc) if err != nil { glog.Errorf("Failed to delete vsphere volume with vmDiskPath: %s. err: %+v", vmDiskPath, err) } @@ -758,3 +1140,27 @@ func (vs *VSphere) DeleteVolume(vmDiskPath string) error { func (vs *VSphere) HasClusterID() bool { return true } + +// Notification handler when node is added into k8s cluster. +func (vs *VSphere) NodeAdded(obj interface{}) { + node, ok := obj.(*v1.Node) + if node == nil || !ok { + glog.Warningf("NodeAdded: unrecognized object %+v", obj) + return + } + + glog.V(4).Infof("Node added: %+v", node) + vs.nodeManager.RegisterNode(node) +} + +// Notification handler when node is removed from k8s cluster. +func (vs *VSphere) NodeDeleted(obj interface{}) { + node, ok := obj.(*v1.Node) + if node == nil || !ok { + glog.Warningf("NodeDeleted: unrecognized object %+v", obj) + return + } + + glog.V(4).Infof("Node deleted: %+v", node) + vs.nodeManager.UnRegisterNode(node) +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_util.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_util.go index 54e09cd7d56d..efedb0621399 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_util.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_util.go @@ -21,20 +21,23 @@ import ( "errors" "io/ioutil" "os" + "regexp" "runtime" "strings" "time" "github.com/golang/glog" "github.com/vmware/govmomi" - "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25" - "github.com/vmware/govmomi/vim25/mo" "fmt" + "github.com/vmware/govmomi/vim25/mo" + "k8s.io/api/core/v1" + k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers" + "path/filepath" ) const ( @@ -42,6 +45,7 @@ const ( DatastoreInfoProperty = "info" Folder = "Folder" VirtualMachine = "VirtualMachine" + DummyDiskName = "kube-dummyDisk.vmdk" ) // GetVSphere reads vSphere configuration from system environment and construct vSphere object @@ -53,10 +57,28 @@ func GetVSphere() (*VSphere, error) { return nil, err } vSphereConn.GoVmomiClient = client + vsphereIns := &VSphereInstance{ + conn: vSphereConn, + cfg: &VirtualCenterConfig{ + User: cfg.Global.User, + Password: cfg.Global.Password, + VCenterPort: cfg.Global.VCenterPort, + Datacenters: cfg.Global.Datacenters, + RoundTripperCount: cfg.Global.RoundTripperCount, + }, + } + vsphereInsMap := make(map[string]*VSphereInstance) + vsphereInsMap[cfg.Global.VCenterIP] = vsphereIns + // TODO: Initialize nodeManager and set it in VSphere. vs := &VSphere{ - conn: vSphereConn, - cfg: cfg, - localInstanceID: "", + vsphereInstanceMap: vsphereInsMap, + hostName: "", + cfg: cfg, + nodeManager: &NodeManager{ + vsphereInstanceMap: vsphereInsMap, + nodeInfoMap: make(map[string]*NodeInfo), + registeredNodes: make(map[string]*v1.Node), + }, } runtime.SetFinalizer(vs, logout) return vs, nil @@ -68,14 +90,18 @@ func getVSphereConfig() *VSphereConfig { cfg.Global.VCenterPort = os.Getenv("VSPHERE_VCENTER_PORT") cfg.Global.User = os.Getenv("VSPHERE_USER") cfg.Global.Password = os.Getenv("VSPHERE_PASSWORD") - cfg.Global.Datacenter = os.Getenv("VSPHERE_DATACENTER") - cfg.Global.Datastore = os.Getenv("VSPHERE_DATASTORE") + cfg.Global.Datacenters = os.Getenv("VSPHERE_DATACENTER") + cfg.Global.DefaultDatastore = os.Getenv("VSPHERE_DATASTORE") cfg.Global.WorkingDir = os.Getenv("VSPHERE_WORKING_DIR") cfg.Global.VMName = os.Getenv("VSPHERE_VM_NAME") cfg.Global.InsecureFlag = false if strings.ToLower(os.Getenv("VSPHERE_INSECURE")) == "true" { cfg.Global.InsecureFlag = true } + cfg.Workspace.VCenterIP = cfg.Global.VCenterIP + cfg.Workspace.Datacenter = cfg.Global.Datacenters + cfg.Workspace.DefaultDatastore = cfg.Global.DefaultDatastore + cfg.Workspace.Folder = cfg.Global.WorkingDir return &cfg } @@ -125,49 +151,83 @@ func getvmUUID() (string, error) { return uuid, nil } -// Get all datastores accessible for the virtual machine object. -func getSharedDatastoresInK8SCluster(ctx context.Context, folder *vclib.Folder) ([]*vclib.Datastore, error) { - vmList, err := folder.GetVirtualMachines(ctx) +// Returns the accessible datastores for the given node VM. +func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) { + accessibleDatastores, err := nodeVmDetail.vm.GetAllAccessibleDatastores(ctx) if err != nil { - glog.Errorf("Failed to get virtual machines in the kubernetes cluster: %s, err: %+v", folder.InventoryPath, err) - return nil, err + // Check if the node VM is not found which indicates that the node info in the node manager is stale. + // If so, rediscover the node and retry. + if vclib.IsManagedObjectNotFoundError(err) { + glog.V(4).Infof("error %q ManagedObjectNotFound for node %q. Rediscovering...", err, nodeVmDetail.NodeName) + err = nodeManager.RediscoverNode(convertToK8sType(nodeVmDetail.NodeName)) + if err == nil { + glog.V(4).Infof("Discovered node %s successfully", nodeVmDetail.NodeName) + nodeInfo, err := nodeManager.GetNodeInfo(convertToK8sType(nodeVmDetail.NodeName)) + if err != nil { + glog.V(4).Infof("error %q getting node info for node %+v", err, nodeVmDetail) + return nil, err + } + + accessibleDatastores, err = nodeInfo.vm.GetAllAccessibleDatastores(ctx) + if err != nil { + glog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail) + return nil, err + } + } else { + glog.V(4).Infof("error %q rediscovering node %+v", err, nodeVmDetail) + return nil, err + } + } else { + glog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail) + return nil, err + } } - if vmList == nil || len(vmList) == 0 { - glog.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath) - return nil, fmt.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath) + return accessibleDatastores, nil +} + +// Get all datastores accessible for the virtual machine object. +func getSharedDatastoresInK8SCluster(ctx context.Context, dc *vclib.Datacenter, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) { + nodeVmDetails := nodeManager.GetNodeDetails() + if nodeVmDetails == nil || len(nodeVmDetails) == 0 { + msg := fmt.Sprintf("Kubernetes node nodeVmDetail details is empty. nodeVmDetails : %+v", nodeVmDetails) + glog.Error(msg) + return nil, fmt.Errorf(msg) } - index := 0 - var sharedDatastores []*vclib.Datastore - for _, vm := range vmList { - vmName, err := vm.ObjectName(ctx) + var sharedDatastores []*vclib.DatastoreInfo + for index, nodeVmDetail := range nodeVmDetails { + glog.V(9).Infof("Getting accessible datastores for node %s", nodeVmDetail.NodeName) + accessibleDatastores, err := getAccessibleDatastores(ctx, &nodeVmDetail, nodeManager) if err != nil { return nil, err } - if !strings.HasPrefix(vmName, DummyVMPrefixName) { - accessibleDatastores, err := vm.GetAllAccessibleDatastores(ctx) - if err != nil { - return nil, err - } - if index == 0 { - sharedDatastores = accessibleDatastores - } else { - sharedDatastores = intersect(sharedDatastores, accessibleDatastores) - if len(sharedDatastores) == 0 { - return nil, fmt.Errorf("No shared datastores found in the Kubernetes cluster: %s", folder.InventoryPath) - } + if index == 0 { + sharedDatastores = accessibleDatastores + } else { + sharedDatastores = intersect(sharedDatastores, accessibleDatastores) + if len(sharedDatastores) == 0 { + return nil, fmt.Errorf("No shared datastores found in the Kubernetes cluster for nodeVmDetails: %+v", nodeVmDetails) } - index++ } } + glog.V(9).Infof("sharedDatastores : %+v", sharedDatastores) + sharedDatastores, err := getDatastoresForEndpointVC(ctx, dc, sharedDatastores) + if err != nil { + glog.Errorf("Failed to get shared datastores from endpoint VC. err: %+v", err) + return nil, err + } + glog.V(9).Infof("sharedDatastores at endpoint VC: %+v", sharedDatastores) return sharedDatastores, nil } -func intersect(list1 []*vclib.Datastore, list2 []*vclib.Datastore) []*vclib.Datastore { - var sharedDs []*vclib.Datastore +func intersect(list1 []*vclib.DatastoreInfo, list2 []*vclib.DatastoreInfo) []*vclib.DatastoreInfo { + glog.V(9).Infof("list1: %+v", list1) + glog.V(9).Infof("list2: %+v", list2) + var sharedDs []*vclib.DatastoreInfo for _, val1 := range list1 { // Check if val1 is found in list2 for _, val2 := range list2 { - if val1.Reference().Value == val2.Reference().Value { + // Intersection is performed based on the datastoreUrl as this uniquely identifies the datastore. + if val1.Info.Url == val2.Info.Url { sharedDs = append(sharedDs, val1) break } @@ -176,46 +236,42 @@ func intersect(list1 []*vclib.Datastore, list2 []*vclib.Datastore) []*vclib.Data return sharedDs } -// Get the datastores accessible for the virtual machine object. -func getAllAccessibleDatastores(ctx context.Context, client *vim25.Client, vmMo mo.VirtualMachine) ([]string, error) { - host := vmMo.Summary.Runtime.Host - if host == nil { - return nil, errors.New("VM doesn't have a HostSystem") - } - var hostSystemMo mo.HostSystem - s := object.NewSearchIndex(client) - err := s.Properties(ctx, host.Reference(), []string{DatastoreProperty}, &hostSystemMo) - if err != nil { - return nil, err - } - var dsRefValues []string - for _, dsRef := range hostSystemMo.Datastore { - dsRefValues = append(dsRefValues, dsRef.Value) - } - return dsRefValues, nil -} - // getMostFreeDatastore gets the best fit compatible datastore by free space. -func getMostFreeDatastoreName(ctx context.Context, client *vim25.Client, dsObjList []*vclib.Datastore) (string, error) { - dsMoList, err := dsObjList[0].Datacenter.GetDatastoreMoList(ctx, dsObjList, []string{DatastoreInfoProperty}) - if err != nil { - return "", err - } +func getMostFreeDatastoreName(ctx context.Context, client *vim25.Client, dsInfoList []*vclib.DatastoreInfo) (string, error) { var curMax int64 curMax = -1 var index int - for i, dsMo := range dsMoList { - dsFreeSpace := dsMo.Info.GetDatastoreInfo().FreeSpace + for i, dsInfo := range dsInfoList { + dsFreeSpace := dsInfo.Info.GetDatastoreInfo().FreeSpace if dsFreeSpace > curMax { curMax = dsFreeSpace index = i } } - return dsMoList[index].Info.GetDatastoreInfo().Name, nil + return dsInfoList[index].Info.GetDatastoreInfo().Name, nil +} + +// Returns the datastores in the given datacenter by performing lookup based on datastore URL. +func getDatastoresForEndpointVC(ctx context.Context, dc *vclib.Datacenter, sharedDsInfos []*vclib.DatastoreInfo) ([]*vclib.DatastoreInfo, error) { + var datastores []*vclib.DatastoreInfo + allDsInfoMap, err := dc.GetAllDatastores(ctx) + if err != nil { + return nil, err + } + for _, sharedDsInfo := range sharedDsInfos { + dsInfo, ok := allDsInfoMap[sharedDsInfo.Info.Url] + if ok { + datastores = append(datastores, dsInfo) + } else { + glog.V(4).Infof("Warning: Shared datastore with URL %s does not exist in endpoint VC", sharedDsInfo.Info.Url) + } + } + glog.V(9).Infof("Datastore from endpoint VC: %+v", datastores) + return datastores, nil } -func getPbmCompatibleDatastore(ctx context.Context, client *vim25.Client, storagePolicyName string, folder *vclib.Folder) (string, error) { - pbmClient, err := vclib.NewPbmClient(ctx, client) +func getPbmCompatibleDatastore(ctx context.Context, dc *vclib.Datacenter, storagePolicyName string, nodeManager *NodeManager) (string, error) { + pbmClient, err := vclib.NewPbmClient(ctx, dc.Client()) if err != nil { return "", err } @@ -224,35 +280,40 @@ func getPbmCompatibleDatastore(ctx context.Context, client *vim25.Client, storag glog.Errorf("Failed to get Profile ID by name: %s. err: %+v", storagePolicyName, err) return "", err } - sharedDsList, err := getSharedDatastoresInK8SCluster(ctx, folder) + sharedDs, err := getSharedDatastoresInK8SCluster(ctx, dc, nodeManager) if err != nil { - glog.Errorf("Failed to get shared datastores from kubernetes cluster: %s. err: %+v", folder.InventoryPath, err) + glog.Errorf("Failed to get shared datastores. err: %+v", err) return "", err } - compatibleDatastores, _, err := pbmClient.GetCompatibleDatastores(ctx, storagePolicyID, sharedDsList) + if len(sharedDs) == 0 { + msg := "No shared datastores found in the endpoint virtual center" + glog.Errorf(msg) + return "", errors.New(msg) + } + compatibleDatastores, _, err := pbmClient.GetCompatibleDatastores(ctx, dc, storagePolicyID, sharedDs) if err != nil { - glog.Errorf("Failed to get compatible datastores from datastores : %+v with storagePolicy: %s. err: %+v", sharedDsList, storagePolicyID, err) + glog.Errorf("Failed to get compatible datastores from datastores : %+v with storagePolicy: %s. err: %+v", + sharedDs, storagePolicyID, err) return "", err } - datastore, err := getMostFreeDatastoreName(ctx, client, compatibleDatastores) + glog.V(9).Infof("compatibleDatastores : %+v", compatibleDatastores) + datastore, err := getMostFreeDatastoreName(ctx, dc.Client(), compatibleDatastores) if err != nil { glog.Errorf("Failed to get most free datastore from compatible datastores: %+v. err: %+v", compatibleDatastores, err) return "", err } + glog.V(4).Infof("Most free datastore : %+s", datastore) return datastore, err } -func (vs *VSphere) setVMOptions(ctx context.Context, dc *vclib.Datacenter) (*vclib.VMOptions, error) { +func (vs *VSphere) setVMOptions(ctx context.Context, dc *vclib.Datacenter, resourcePoolPath string) (*vclib.VMOptions, error) { var vmOptions vclib.VMOptions - vm, err := dc.GetVMByPath(ctx, vs.cfg.Global.WorkingDir+"/"+vs.localInstanceID) + resourcePool, err := dc.GetResourcePool(ctx, resourcePoolPath) if err != nil { return nil, err } - resourcePool, err := vm.GetResourcePool(ctx) - if err != nil { - return nil, err - } - folder, err := dc.GetFolderByPath(ctx, vs.cfg.Global.WorkingDir) + glog.V(9).Infof("Resource pool path %s, resourcePool %+v", resourcePoolPath, resourcePool) + folder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder) if err != nil { return nil, err } @@ -268,28 +329,200 @@ func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) { defer cancel() for { time.Sleep(CleanUpDummyVMRoutineInterval * time.Minute) - // Ensure client is logged in and session is valid - err := vs.conn.Connect(ctx) + vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx) if err != nil { - glog.V(4).Infof("Failed to connect to VC with err: %+v. Retrying again...", err) + glog.V(4).Infof("Failed to get VSphere instance with err: %+v. Retrying again...", err) continue } - dc, err := vclib.GetDatacenter(ctx, vs.conn, vs.cfg.Global.Datacenter) + dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter) if err != nil { - glog.V(4).Infof("Failed to get the datacenter: %s from VC. err: %+v", vs.cfg.Global.Datacenter, err) + glog.V(4).Infof("Failed to get the datacenter: %s from VC. err: %+v", vs.cfg.Workspace.Datacenter, err) continue } // Get the folder reference for global working directory where the dummy VM needs to be created. - vmFolder, err := dc.GetFolderByPath(ctx, vs.cfg.Global.WorkingDir) + vmFolder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder) if err != nil { - glog.V(4).Infof("Unable to get the kubernetes folder: %q reference. err: %+v", vs.cfg.Global.WorkingDir, err) + glog.V(4).Infof("Unable to get the kubernetes folder: %q reference. err: %+v", vs.cfg.Workspace.Folder, err) continue } // A write lock is acquired to make sure the cleanUp routine doesn't delete any VM's created by ongoing PVC requests. defer cleanUpDummyVMLock.Lock() err = diskmanagers.CleanUpDummyVMs(ctx, vmFolder, dc) if err != nil { - glog.V(4).Infof("Unable to clean up dummy VM's in the kubernetes cluster: %q. err: %+v", vs.cfg.Global.WorkingDir, err) + glog.V(4).Infof("Unable to clean up dummy VM's in the kubernetes cluster: %q. err: %+v", vs.cfg.Workspace.Folder, err) + } + } +} + +// Get canonical volume path for volume Path. +// Example1: The canonical path for volume path - [vsanDatastore] kubevols/volume.vmdk will be [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk +// Example2: The canonical path for volume path - [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk will be same as volume Path. +func getcanonicalVolumePath(ctx context.Context, dc *vclib.Datacenter, volumePath string) (string, error) { + var folderID string + var folderExists bool + canonicalVolumePath := volumePath + dsPathObj, err := vclib.GetDatastorePathObjFromVMDiskPath(volumePath) + if err != nil { + return "", err + } + dsPath := strings.Split(strings.TrimSpace(dsPathObj.Path), "/") + if len(dsPath) <= 1 { + return canonicalVolumePath, nil + } + datastore := dsPathObj.Datastore + dsFolder := dsPath[0] + folderNameIDMap, datastoreExists := datastoreFolderIDMap[datastore] + if datastoreExists { + folderID, folderExists = folderNameIDMap[dsFolder] + } + // Get the datastore folder ID if datastore or folder doesn't exist in datastoreFolderIDMap + if !datastoreExists || !folderExists { + if !vclib.IsValidUUID(dsFolder) { + dummyDiskVolPath := "[" + datastore + "] " + dsFolder + "/" + DummyDiskName + // Querying a non-existent dummy disk on the datastore folder. + // It would fail and return an folder ID in the error message. + _, err := dc.GetVirtualDiskPage83Data(ctx, dummyDiskVolPath) + if err != nil { + re := regexp.MustCompile("File (.*?) was not found") + match := re.FindStringSubmatch(err.Error()) + canonicalVolumePath = match[1] + } + } + diskPath := vclib.GetPathFromVMDiskPath(canonicalVolumePath) + if diskPath == "" { + return "", fmt.Errorf("Failed to parse canonicalVolumePath: %s in getcanonicalVolumePath method", canonicalVolumePath) + } + folderID = strings.Split(strings.TrimSpace(diskPath), "/")[0] + setdatastoreFolderIDMap(datastoreFolderIDMap, datastore, dsFolder, folderID) + } + canonicalVolumePath = strings.Replace(volumePath, dsFolder, folderID, 1) + return canonicalVolumePath, nil +} + +func setdatastoreFolderIDMap( + datastoreFolderIDMap map[string]map[string]string, + datastore string, + folderName string, + folderID string) { + folderNameIDMap := datastoreFolderIDMap[datastore] + if folderNameIDMap == nil { + folderNameIDMap = make(map[string]string) + datastoreFolderIDMap[datastore] = folderNameIDMap + } + folderNameIDMap[folderName] = folderID +} + +func convertVolPathToDevicePath(ctx context.Context, dc *vclib.Datacenter, volPath string) (string, error) { + volPath = vclib.RemoveStorageClusterORFolderNameFromVDiskPath(volPath) + // Get the canonical volume path for volPath. + canonicalVolumePath, err := getcanonicalVolumePath(ctx, dc, volPath) + if err != nil { + glog.Errorf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err) + return "", err + } + // Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map + if len(canonicalVolumePath) > 0 && filepath.Ext(canonicalVolumePath) != ".vmdk" { + canonicalVolumePath += ".vmdk" + } + return canonicalVolumePath, nil +} + +// convertVolPathsToDevicePaths removes cluster or folder path from volPaths and convert to canonicalPath +func (vs *VSphere) convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName][]string, error) { + vmVolumes := make(map[k8stypes.NodeName][]string) + for nodeName, volPaths := range nodeVolumes { + nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName) + if err != nil { + return nil, err + } + + _, err = vs.getVSphereInstanceForServer(nodeInfo.vcServer, ctx) + if err != nil { + return nil, err + } + + for i, volPath := range volPaths { + deviceVolPath, err := convertVolPathToDevicePath(ctx, nodeInfo.dataCenter, volPath) + if err != nil { + glog.Errorf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err) + return nil, err + } + volPaths[i] = deviceVolPath + } + vmVolumes[nodeName] = volPaths + } + return vmVolumes, nil +} + +// checkDiskAttached verifies volumes are attached to the VMs which are in same vCenter and Datacenter +// Returns nodes if exist any for which VM is not found in that vCenter and Datacenter +func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeName, nodeVolumes map[k8stypes.NodeName][]string, attached map[string]map[string]bool, retry bool) ([]k8stypes.NodeName, error) { + var nodesToRetry []k8stypes.NodeName + var vmList []*vclib.VirtualMachine + var nodeInfo NodeInfo + var err error + + for _, nodeName := range nodes { + nodeInfo, err = vs.nodeManager.GetNodeInfo(nodeName) + if err != nil { + return nodesToRetry, err + } + vmList = append(vmList, nodeInfo.vm) + } + + // Making sure session is valid + _, err = vs.getVSphereInstanceForServer(nodeInfo.vcServer, ctx) + if err != nil { + return nodesToRetry, err + } + + // If any of the nodes are not present property collector query will fail for entire operation + vmMoList, err := nodeInfo.dataCenter.GetVMMoList(ctx, vmList, []string{"config.hardware.device", "name", "config.uuid"}) + if err != nil { + if vclib.IsManagedObjectNotFoundError(err) && !retry { + glog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for property collector query for nodes: %+v vms: %+v", nodes, vmList) + // Property Collector Query failed + // VerifyVolumePaths per VM + for _, nodeName := range nodes { + nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName) + if err != nil { + return nodesToRetry, err + } + devices, err := nodeInfo.vm.VirtualMachine.Device(ctx) + if err != nil { + if vclib.IsManagedObjectNotFoundError(err) { + glog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for Kubernetes node: %s with vSphere Virtual Machine reference: %v", nodeName, nodeInfo.vm) + nodesToRetry = append(nodesToRetry, nodeName) + continue + } + return nodesToRetry, err + } + glog.V(4).Infof("Verifying Volume Paths by devices for node %s and VM %s", nodeName, nodeInfo.vm) + vclib.VerifyVolumePathsForVMDevices(devices, nodeVolumes[nodeName], convertToString(nodeName), attached) + } + } + return nodesToRetry, err + } + + vmMoMap := make(map[string]mo.VirtualMachine) + for _, vmMo := range vmMoList { + if vmMo.Config == nil { + glog.Errorf("Config is not available for VM: %q", vmMo.Name) + continue + } + glog.V(9).Infof("vmMoMap vmname: %q vmuuid: %s", vmMo.Name, strings.ToLower(vmMo.Config.Uuid)) + vmMoMap[strings.ToLower(vmMo.Config.Uuid)] = vmMo + } + + glog.V(9).Infof("vmMoMap: +%v", vmMoMap) + + for _, nodeName := range nodes { + node, err := vs.nodeManager.GetNode(nodeName) + if err != nil { + return nodesToRetry, err } + glog.V(9).Infof("Verifying volume for nodeName: %q with nodeuuid: %s", nodeName, node.Status.NodeInfo.SystemUUID, vmMoMap) + vclib.VerifyVolumePathsForVM(vmMoMap[strings.ToLower(node.Status.NodeInfo.SystemUUID)], nodeVolumes[nodeName], convertToString(nodeName), attached) } + return nodesToRetry, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/BUILD index 06544fbddada..fe5a3adc085a 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/BUILD @@ -12,13 +12,15 @@ go_test( "controller_ref_manager_test.go", "controller_utils_test.go", ], + importpath = "k8s.io/kubernetes/pkg/controller", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", + "//pkg/apis/core/install:go_default_library", "//pkg/controller/testutil:go_default_library", "//pkg/securitycontext:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", @@ -48,11 +50,13 @@ go_library( "doc.go", "lookup_cache.go", ], + importpath = "k8s.io/kubernetes/pkg/controller", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/v1/pod:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/install:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//pkg/serviceaccount:go_default_library", "//pkg/util/hash:go_default_library", "//pkg/util/taints:go_default_library", @@ -79,13 +83,11 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/authentication/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", - "//vendor/k8s.io/client-go/tools/reference:go_default_library", "//vendor/k8s.io/client-go/util/integer:go_default_library", "//vendor/k8s.io/client-go/util/retry:go_default_library", ], @@ -105,6 +107,7 @@ filegroup( "//pkg/controller/bootstrap:all-srcs", "//pkg/controller/certificates:all-srcs", "//pkg/controller/cloud:all-srcs", + "//pkg/controller/clusterroleaggregation:all-srcs", "//pkg/controller/cronjob:all-srcs", "//pkg/controller/daemon:all-srcs", "//pkg/controller/deployment:all-srcs", @@ -130,6 +133,7 @@ filegroup( "//pkg/controller/volume/events:all-srcs", "//pkg/controller/volume/expand:all-srcs", "//pkg/controller/volume/persistentvolume:all-srcs", + "//pkg/controller/volume/pvcprotection:all-srcs", ], tags = ["automanaged"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/BUILD index b4b0c24fbf57..6dfcd204c9d5 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/BUILD @@ -15,10 +15,11 @@ go_test( "tokencleaner_test.go", "util_test.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/bootstrap", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//pkg/bootstrap/api:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", @@ -39,8 +40,9 @@ go_library( "tokencleaner.go", "util.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/bootstrap", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/bootstrap/api:go_default_library", "//pkg/util/metrics:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/bootstrapsigner.go b/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/bootstrapsigner.go index 07a987658be0..5bb53a448378 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/bootstrapsigner.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/bootstrapsigner.go @@ -33,7 +33,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api" "k8s.io/kubernetes/pkg/util/metrics" ) @@ -92,26 +92,28 @@ type BootstrapSigner struct { // NewBootstrapSigner returns a new *BootstrapSigner. // // TODO: Switch to shared informers -func NewBootstrapSigner(cl clientset.Interface, options BootstrapSignerOptions) *BootstrapSigner { +func NewBootstrapSigner(cl clientset.Interface, options BootstrapSignerOptions) (*BootstrapSigner, error) { e := &BootstrapSigner{ client: cl, configMapKey: options.ConfigMapNamespace + "/" + options.ConfigMapName, secretNamespace: options.TokenSecretNamespace, syncQueue: workqueue.NewNamed("bootstrap_signer_queue"), } - if cl.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("bootstrap_signer", cl.Core().RESTClient().GetRateLimiter()) + if cl.CoreV1().RESTClient().GetRateLimiter() != nil { + if err := metrics.RegisterMetricAndTrackRateLimiterUsage("bootstrap_signer", cl.CoreV1().RESTClient().GetRateLimiter()); err != nil { + return nil, err + } } configMapSelector := fields.SelectorFromSet(map[string]string{api.ObjectNameField: options.ConfigMapName}) e.configMaps, e.configMapsController = cache.NewInformer( &cache.ListWatch{ ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) { lo.FieldSelector = configMapSelector.String() - return e.client.Core().ConfigMaps(options.ConfigMapNamespace).List(lo) + return e.client.CoreV1().ConfigMaps(options.ConfigMapNamespace).List(lo) }, WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) { lo.FieldSelector = configMapSelector.String() - return e.client.Core().ConfigMaps(options.ConfigMapNamespace).Watch(lo) + return e.client.CoreV1().ConfigMaps(options.ConfigMapNamespace).Watch(lo) }, }, &v1.ConfigMap{}, @@ -127,11 +129,11 @@ func NewBootstrapSigner(cl clientset.Interface, options BootstrapSignerOptions) &cache.ListWatch{ ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) { lo.FieldSelector = secretSelector.String() - return e.client.Core().Secrets(e.secretNamespace).List(lo) + return e.client.CoreV1().Secrets(e.secretNamespace).List(lo) }, WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) { lo.FieldSelector = secretSelector.String() - return e.client.Core().Secrets(e.secretNamespace).Watch(lo) + return e.client.CoreV1().Secrets(e.secretNamespace).Watch(lo) }, }, &v1.Secret{}, @@ -142,7 +144,7 @@ func NewBootstrapSigner(cl clientset.Interface, options BootstrapSignerOptions) DeleteFunc: func(_ interface{}) { e.pokeConfigMapSync() }, }, ) - return e + return e, nil } // Run runs controller loops and returns when they are done @@ -227,7 +229,7 @@ func (e *BootstrapSigner) signConfigMap() { } func (e *BootstrapSigner) updateConfigMap(cm *v1.ConfigMap) { - _, err := e.client.Core().ConfigMaps(cm.Namespace).Update(cm) + _, err := e.client.CoreV1().ConfigMaps(cm.Namespace).Update(cm) if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) { glog.V(3).Infof("Error updating ConfigMap: %v", err) } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner.go b/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner.go index 38328edda201..6c099a4c733e 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api" "k8s.io/kubernetes/pkg/util/metrics" ) @@ -66,13 +66,15 @@ type TokenCleaner struct { // NewTokenCleaner returns a new *NewTokenCleaner. // // TODO: Switch to shared informers -func NewTokenCleaner(cl clientset.Interface, options TokenCleanerOptions) *TokenCleaner { +func NewTokenCleaner(cl clientset.Interface, options TokenCleanerOptions) (*TokenCleaner, error) { e := &TokenCleaner{ client: cl, tokenSecretNamespace: options.TokenSecretNamespace, } - if cl.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("token_cleaner", cl.Core().RESTClient().GetRateLimiter()) + if cl.CoreV1().RESTClient().GetRateLimiter() != nil { + if err := metrics.RegisterMetricAndTrackRateLimiterUsage("token_cleaner", cl.CoreV1().RESTClient().GetRateLimiter()); err != nil { + return nil, err + } } secretSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(bootstrapapi.SecretTypeBootstrapToken)}) @@ -80,11 +82,11 @@ func NewTokenCleaner(cl clientset.Interface, options TokenCleanerOptions) *Token &cache.ListWatch{ ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) { lo.FieldSelector = secretSelector.String() - return e.client.Core().Secrets(e.tokenSecretNamespace).List(lo) + return e.client.CoreV1().Secrets(e.tokenSecretNamespace).List(lo) }, WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) { lo.FieldSelector = secretSelector.String() - return e.client.Core().Secrets(e.tokenSecretNamespace).Watch(lo) + return e.client.CoreV1().Secrets(e.tokenSecretNamespace).Watch(lo) }, }, &v1.Secret{}, @@ -94,7 +96,7 @@ func NewTokenCleaner(cl clientset.Interface, options TokenCleanerOptions) *Token UpdateFunc: func(oldSecret, newSecret interface{}) { e.evalSecret(newSecret) }, }, ) - return e + return e, nil } // Run runs controller loops and returns when they are done @@ -118,7 +120,7 @@ func (tc *TokenCleaner) evalSecret(o interface{}) { if len(secret.UID) > 0 { options = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &secret.UID}} } - err := tc.client.Core().Secrets(secret.Namespace).Delete(secret.Name, options) + err := tc.client.CoreV1().Secrets(secret.Namespace).Delete(secret.Name, options) // NotFound isn't a real error (it's already been deleted) // Conflict isn't a real error (the UID precondition failed) if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) { diff --git a/vendor/k8s.io/kubernetes/pkg/controller/certificates/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/certificates/BUILD index e81b0fcdbbcd..b4470499d06e 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/certificates/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/certificates/BUILD @@ -10,13 +10,12 @@ go_library( "certificate_controller.go", "certificate_controller_utils.go", ], - visibility = [ - ":__subpackages__", - "//cmd/gke-certificates-controller:__subpackages__", - ], + importpath = "k8s.io/kubernetes/pkg/controller/certificates", + visibility = ["//visibility:public"], deps = [ "//pkg/controller:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/juju/ratelimit:go_default_library", "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", @@ -43,6 +42,7 @@ filegroup( srcs = [ ":package-srcs", "//pkg/controller/certificates/approver:all-srcs", + "//pkg/controller/certificates/cleaner:all-srcs", "//pkg/controller/certificates/signer:all-srcs", ], tags = ["automanaged"], @@ -54,6 +54,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["certificate_controller_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/certificates", library = ":go_default_library", deps = [ "//pkg/controller:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/BUILD index eac5be6944ee..1e05a1408ac2 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["sarapprove_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/certificates/approver", library = ":go_default_library", deps = [ "//pkg/apis/certificates/v1beta1:go_default_library", @@ -24,6 +25,7 @@ go_test( go_library( name = "go_default_library", srcs = ["sarapprove.go"], + importpath = "k8s.io/kubernetes/pkg/controller/certificates/approver", deps = [ "//pkg/apis/certificates/v1beta1:go_default_library", "//pkg/controller/certificates:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/sarapprove.go b/vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/sarapprove.go index d2eb5ac5efc5..3aa6be1ce08f 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/sarapprove.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/sarapprove.go @@ -44,7 +44,7 @@ type sarApprover struct { recognizers []csrRecognizer } -func NewCSRApprovingController(client clientset.Interface, csrInformer certificatesinformers.CertificateSigningRequestInformer) (*certificates.CertificateController, error) { +func NewCSRApprovingController(client clientset.Interface, csrInformer certificatesinformers.CertificateSigningRequestInformer) *certificates.CertificateController { approver := &sarApprover{ client: client, recognizers: recognizers(), @@ -106,7 +106,7 @@ func (a *sarApprover) handle(csr *capi.CertificateSigningRequest) error { } if approved { appendApprovalCondition(csr, r.successMessage) - _, err = a.client.Certificates().CertificateSigningRequests().UpdateApproval(csr) + _, err = a.client.CertificatesV1beta1().CertificateSigningRequests().UpdateApproval(csr) if err != nil { return fmt.Errorf("error updating approval for csr: %v", err) } @@ -115,7 +115,7 @@ func (a *sarApprover) handle(csr *capi.CertificateSigningRequest) error { } if len(tried) != 0 { - return fmt.Errorf("recognized csr %q as %v but subject access review was not approved", csr.Name, tried) + return certificates.IgnorableError("recognized csr %q as %v but subject access review was not approved", csr.Name, tried) } return nil diff --git a/vendor/k8s.io/kubernetes/pkg/controller/certificates/certificate_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/certificates/certificate_controller.go index 2caa30de327c..e86810a4be18 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/certificates/certificate_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/certificates/certificate_controller.go @@ -36,6 +36,7 @@ import ( "k8s.io/kubernetes/pkg/controller" "github.com/golang/glog" + "github.com/juju/ratelimit" ) type CertificateController struct { @@ -53,16 +54,20 @@ func NewCertificateController( kubeClient clientset.Interface, csrInformer certificatesinformers.CertificateSigningRequestInformer, handler func(*certificates.CertificateSigningRequest) error, -) (*CertificateController, error) { +) *CertificateController { // Send events to the apiserver eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) cc := &CertificateController{ kubeClient: kubeClient, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "certificate"), - handler: handler, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewMaxOfRateLimiter( + workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 1000*time.Second), + // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) + &workqueue.BucketRateLimiter{Bucket: ratelimit.NewBucketWithRate(float64(10), int64(100))}, + ), "certificate"), + handler: handler, } // Manage the addition/update of certificate requests @@ -97,8 +102,7 @@ func NewCertificateController( }) cc.csrLister = csrInformer.Lister() cc.csrsSynced = csrInformer.Informer().HasSynced - cc.handler = handler - return cc, nil + return cc } // Run the main goroutine responsible for watching and syncing jobs. @@ -136,7 +140,11 @@ func (cc *CertificateController) processNextWorkItem() bool { if err := cc.syncFunc(cKey.(string)); err != nil { cc.queue.AddRateLimited(cKey) - utilruntime.HandleError(fmt.Errorf("Sync %v failed with : %v", cKey, err)) + if _, ignorable := err.(ignorableError); !ignorable { + utilruntime.HandleError(fmt.Errorf("Sync %v failed with : %v", cKey, err)) + } else { + glog.V(4).Infof("Sync %v failed with : %v", cKey, err) + } return true } @@ -182,3 +190,17 @@ func (cc *CertificateController) syncFunc(key string) error { return cc.handler(csr) } + +// IgnorableError returns an error that we shouldn't handle (i.e. log) because +// it's spammy and usually user error. Instead we will log these errors at a +// higher log level. We still need to throw these errors to signal that the +// sync should be retried. +func IgnorableError(s string, args ...interface{}) ignorableError { + return ignorableError(fmt.Sprintf(s, args...)) +} + +type ignorableError string + +func (e ignorableError) Error() string { + return string(e) +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/certificates/cleaner/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/certificates/cleaner/BUILD new file mode 100644 index 000000000000..14ca29e60ee2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/certificates/cleaner/BUILD @@ -0,0 +1,45 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["cleaner.go"], + importpath = "k8s.io/kubernetes/pkg/controller/certificates/cleaner", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/informers/certificates/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/listers/certificates/v1beta1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["cleaner_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/certificates/cleaner", + library = ":go_default_library", + deps = [ + "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/certificates/cleaner/cleaner.go b/vendor/k8s.io/kubernetes/pkg/controller/certificates/cleaner/cleaner.go new file mode 100644 index 000000000000..0cbc8c232ca8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/certificates/cleaner/cleaner.go @@ -0,0 +1,200 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cleaner implements an automated cleaner that does garbage collection +// on CSRs that meet specific criteria. With automated CSR requests and +// automated approvals, the volume of CSRs only increases over time, at a rapid +// rate if the certificate duration is short. +package cleaner + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + "time" + + "github.com/golang/glog" + + capi "k8s.io/api/certificates/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1" + csrclient "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" + certificateslisters "k8s.io/client-go/listers/certificates/v1beta1" +) + +const ( + // The interval to list all CSRs and check each one against the criteria to + // automatically clean it up. + pollingInterval = 1 * time.Hour + // The time periods after which these different CSR statuses should be + // cleaned up. + approvedExpiration = 1 * time.Hour + deniedExpiration = 1 * time.Hour + pendingExpiration = 24 * time.Hour +) + +// CSRCleanerController is a controller that garbage collects old certificate +// signing requests (CSRs). Since there are mechanisms that automatically +// create CSRs, and mechanisms that automatically approve CSRs, in order to +// prevent a build up of CSRs over time, it is necessary to GC them. CSRs will +// be removed if they meet one of the following criteria: the CSR is Approved +// with a certificate and is old enough to be past the GC issued deadline, the +// CSR is denied and is old enough to be past the GC denied deadline, the CSR +// is Pending and is old enough to be past the GC pending deadline, the CSR is +// approved with a certificate and the certificate is expired. +type CSRCleanerController struct { + csrClient csrclient.CertificateSigningRequestInterface + csrLister certificateslisters.CertificateSigningRequestLister +} + +// NewCSRCleanerController creates a new CSRCleanerController. +func NewCSRCleanerController( + csrClient csrclient.CertificateSigningRequestInterface, + csrInformer certificatesinformers.CertificateSigningRequestInformer, +) *CSRCleanerController { + return &CSRCleanerController{ + csrClient: csrClient, + csrLister: csrInformer.Lister(), + } +} + +// Run the main goroutine responsible for watching and syncing jobs. +func (ccc *CSRCleanerController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + glog.Infof("Starting CSR cleaner controller") + defer glog.Infof("Shutting down CSR cleaner controller") + + for i := 0; i < workers; i++ { + go wait.Until(ccc.worker, pollingInterval, stopCh) + } + + <-stopCh +} + +// worker runs a thread that dequeues CSRs, handles them, and marks them done. +func (ccc *CSRCleanerController) worker() { + csrs, err := ccc.csrLister.List(labels.Everything()) + if err != nil { + glog.Errorf("Unable to list CSRs: %v", err) + return + } + for _, csr := range csrs { + if err := ccc.handle(csr); err != nil { + glog.Errorf("Error while attempting to clean CSR %q: %v", csr.Name, err) + } + } +} + +func (ccc *CSRCleanerController) handle(csr *capi.CertificateSigningRequest) error { + isIssuedExpired, err := isIssuedExpired(csr) + if err != nil { + return err + } + if isIssuedPastDeadline(csr) || isDeniedPastDeadline(csr) || isPendingPastDeadline(csr) || isIssuedExpired { + if err := ccc.csrClient.Delete(csr.Name, nil); err != nil { + return fmt.Errorf("unable to delete CSR %q: %v", csr.Name, err) + } + } + return nil +} + +// isIssuedExpired checks if the CSR has been issued a certificate and if the +// expiration of the certificate (the NotAfter value) has passed. +func isIssuedExpired(csr *capi.CertificateSigningRequest) (bool, error) { + for _, c := range csr.Status.Conditions { + isExpired, err := isExpired(csr) + if err != nil { + return false, err + } + if c.Type == capi.CertificateApproved && isIssued(csr) && isExpired { + glog.Infof("Cleaning CSR %q as the associated certificate is expired.", csr.Name, approvedExpiration) + return true, nil + } + } + return false, nil +} + +// isPendingPastDeadline checks if the certificate has a Pending status and the +// creation time of the CSR is passed the deadline that pending requests are +// maintained for. +func isPendingPastDeadline(csr *capi.CertificateSigningRequest) bool { + // If there are no Conditions on the status, the CSR will appear via + // `kubectl` as `Pending`. + if len(csr.Status.Conditions) == 0 && isOlderThan(csr.CreationTimestamp, pendingExpiration) { + glog.Infof("Cleaning CSR %q as it is more than %v old and unhandled.", csr.Name, pendingExpiration) + return true + } + return false +} + +// isDeniedPastDeadline checks if the certificate has a Denied status and the +// creation time of the CSR is passed the deadline that denied requests are +// maintained for. +func isDeniedPastDeadline(csr *capi.CertificateSigningRequest) bool { + for _, c := range csr.Status.Conditions { + if c.Type == capi.CertificateDenied && isOlderThan(c.LastUpdateTime, deniedExpiration) { + glog.Infof("Cleaning CSR %q as it is more than %v old and denied.", csr.Name, deniedExpiration) + return true + } + } + return false +} + +// isIssuedPastDeadline checks if the certificate has an Issued status and the +// creation time of the CSR is passed the deadline that issued requests are +// maintained for. +func isIssuedPastDeadline(csr *capi.CertificateSigningRequest) bool { + for _, c := range csr.Status.Conditions { + if c.Type == capi.CertificateApproved && isIssued(csr) && isOlderThan(c.LastUpdateTime, approvedExpiration) { + glog.Infof("Cleaning CSR %q as it is more than %v old and approved.", csr.Name, approvedExpiration) + return true + } + } + return false +} + +// isOlderThan checks that t is a non-zero time after time.Now() + d. +func isOlderThan(t metav1.Time, d time.Duration) bool { + return !t.IsZero() && t.Sub(time.Now()) < -1*d +} + +// isIssued checks if the CSR has `Issued` status. There is no explicit +// 'Issued' status. Implicitly, if there is a certificate associated with the +// CSR, the CSR statuses that are visible via `kubectl` will include 'Issued'. +func isIssued(csr *capi.CertificateSigningRequest) bool { + return csr.Status.Certificate != nil +} + +// isExpired checks if the CSR has a certificate and the date in the `NotAfter` +// field has gone by. +func isExpired(csr *capi.CertificateSigningRequest) (bool, error) { + if csr.Status.Certificate == nil { + return false, nil + } + block, _ := pem.Decode(csr.Status.Certificate) + if block == nil { + return false, fmt.Errorf("expected the certificate associated with the CSR to be PEM encoded") + } + certs, err := x509.ParseCertificates(block.Bytes) + if err != nil { + return false, fmt.Errorf("unable to parse certificate data: %v", err) + } + return time.Now().After(certs[0].NotAfter), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/certificates/signer/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/certificates/signer/BUILD index c29d2d132c06..d7da4d31e076 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/certificates/signer/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/certificates/signer/BUILD @@ -14,6 +14,7 @@ go_test( "testdata/ca.key", "testdata/kubelet.csr", ], + importpath = "k8s.io/kubernetes/pkg/controller/certificates/signer", library = ":go_default_library", deps = [ "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", @@ -24,6 +25,7 @@ go_test( go_library( name = "go_default_library", srcs = ["cfssl_signer.go"], + importpath = "k8s.io/kubernetes/pkg/controller/certificates/signer", deps = [ "//pkg/controller/certificates:go_default_library", "//vendor/github.com/cloudflare/cfssl/config:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/certificates/signer/cfssl_signer.go b/vendor/k8s.io/kubernetes/pkg/controller/certificates/signer/cfssl_signer.go index 1a5f3ff7e7b6..dd57d68a0dcf 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/certificates/signer/cfssl_signer.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/certificates/signer/cfssl_signer.go @@ -50,7 +50,7 @@ func NewCSRSigningController( client, csrInformer, signer.handle, - ) + ), nil } type cfsslSigner struct { @@ -103,7 +103,7 @@ func (s *cfsslSigner) handle(csr *capi.CertificateSigningRequest) error { if err != nil { return fmt.Errorf("error auto signing csr: %v", err) } - _, err = s.client.Certificates().CertificateSigningRequests().UpdateStatus(csr) + _, err = s.client.CertificatesV1beta1().CertificateSigningRequests().UpdateStatus(csr) if err != nil { return fmt.Errorf("error updating signature for csr: %v", err) } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go b/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go index df873fb3e50a..041717623d77 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go @@ -28,13 +28,13 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" - clientgoclientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes" v1authentication "k8s.io/client-go/kubernetes/typed/authentication/v1" v1core "k8s.io/client-go/kubernetes/typed/core/v1" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/serviceaccount" "github.com/golang/glog" @@ -46,8 +46,8 @@ type ControllerClientBuilder interface { ConfigOrDie(name string) *restclient.Config Client(name string) (clientset.Interface, error) ClientOrDie(name string) clientset.Interface - ClientGoClient(name string) (clientgoclientset.Interface, error) - ClientGoClientOrDie(name string) clientgoclientset.Interface + ClientGoClient(name string) (clientset.Interface, error) + ClientGoClientOrDie(name string) clientset.Interface } // SimpleControllerClientBuilder returns a fixed client with different user agents @@ -85,15 +85,15 @@ func (b SimpleControllerClientBuilder) ClientOrDie(name string) clientset.Interf return client } -func (b SimpleControllerClientBuilder) ClientGoClient(name string) (clientgoclientset.Interface, error) { +func (b SimpleControllerClientBuilder) ClientGoClient(name string) (clientset.Interface, error) { clientConfig, err := b.Config(name) if err != nil { return nil, err } - return clientgoclientset.NewForConfig(clientConfig) + return clientset.NewForConfig(clientConfig) } -func (b SimpleControllerClientBuilder) ClientGoClientOrDie(name string) clientgoclientset.Interface { +func (b SimpleControllerClientBuilder) ClientGoClientOrDie(name string) clientset.Interface { client, err := b.ClientGoClient(name) if err != nil { glog.Fatal(err) @@ -237,7 +237,7 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount, // If we couldn't run the token review, the API might be disabled or we might not have permission. // Try to make a request to /apis with the token. If we get a 401 we should consider the token invalid. clientConfigCopy := *clientConfig - clientConfigCopy.NegotiatedSerializer = api.Codecs + clientConfigCopy.NegotiatedSerializer = legacyscheme.Codecs client, err := restclient.UnversionedRESTClientFor(&clientConfigCopy) if err != nil { return nil, false, err @@ -275,15 +275,15 @@ func (b SAControllerClientBuilder) ClientOrDie(name string) clientset.Interface return client } -func (b SAControllerClientBuilder) ClientGoClient(name string) (clientgoclientset.Interface, error) { +func (b SAControllerClientBuilder) ClientGoClient(name string) (clientset.Interface, error) { clientConfig, err := b.Config(name) if err != nil { return nil, err } - return clientgoclientset.NewForConfig(clientConfig) + return clientset.NewForConfig(clientConfig) } -func (b SAControllerClientBuilder) ClientGoClientOrDie(name string) clientgoclientset.Interface { +func (b SAControllerClientBuilder) ClientGoClientOrDie(name string) clientset.Interface { client, err := b.ClientGoClient(name) if err != nil { glog.Fatal(err) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/clusterroleaggregation/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/clusterroleaggregation/BUILD new file mode 100644 index 000000000000..94a93af996da --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/clusterroleaggregation/BUILD @@ -0,0 +1,57 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["clusterroleaggregation_controller.go"], + importpath = "k8s.io/kubernetes/pkg/controller/clusterroleaggregation", + visibility = ["//visibility:public"], + deps = [ + "//pkg/controller:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/informers/rbac/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1:go_default_library", + "//vendor/k8s.io/client-go/listers/rbac/v1:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/client-go/util/workqueue:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["clusterroleaggregation_controller_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/clusterroleaggregation", + library = ":go_default_library", + deps = [ + "//pkg/controller:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", + "//vendor/k8s.io/client-go/listers/rbac/v1:go_default_library", + "//vendor/k8s.io/client-go/testing:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go new file mode 100644 index 000000000000..05879e0e681d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go @@ -0,0 +1,213 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterroleaggregation + +import ( + "fmt" + "sort" + "time" + + "github.com/golang/glog" + + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + rbacinformers "k8s.io/client-go/informers/rbac/v1" + rbacclient "k8s.io/client-go/kubernetes/typed/rbac/v1" + rbaclisters "k8s.io/client-go/listers/rbac/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/kubernetes/pkg/controller" +) + +// ClusterRoleAggregationController is a controller to combine cluster roles +type ClusterRoleAggregationController struct { + clusterRoleClient rbacclient.ClusterRolesGetter + clusterRoleLister rbaclisters.ClusterRoleLister + clusterRolesSynced cache.InformerSynced + + syncHandler func(key string) error + queue workqueue.RateLimitingInterface +} + +// NewClusterRoleAggregation creates a new controller +func NewClusterRoleAggregation(clusterRoleInformer rbacinformers.ClusterRoleInformer, clusterRoleClient rbacclient.ClusterRolesGetter) *ClusterRoleAggregationController { + c := &ClusterRoleAggregationController{ + clusterRoleClient: clusterRoleClient, + clusterRoleLister: clusterRoleInformer.Lister(), + clusterRolesSynced: clusterRoleInformer.Informer().HasSynced, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ClusterRoleAggregator"), + } + c.syncHandler = c.syncClusterRole + + clusterRoleInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + c.enqueue() + }, + UpdateFunc: func(old, cur interface{}) { + c.enqueue() + }, + DeleteFunc: func(uncast interface{}) { + c.enqueue() + }, + }) + return c +} + +func (c *ClusterRoleAggregationController) syncClusterRole(key string) error { + _, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + sharedClusterRole, err := c.clusterRoleLister.Get(name) + if errors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + if sharedClusterRole.AggregationRule == nil { + return nil + } + + newPolicyRules := []rbacv1.PolicyRule{} + for i := range sharedClusterRole.AggregationRule.ClusterRoleSelectors { + selector := sharedClusterRole.AggregationRule.ClusterRoleSelectors[i] + runtimeLabelSelector, err := metav1.LabelSelectorAsSelector(&selector) + if err != nil { + return err + } + clusterRoles, err := c.clusterRoleLister.List(runtimeLabelSelector) + if err != nil { + return err + } + sort.Sort(byName(clusterRoles)) + + for i := range clusterRoles { + if clusterRoles[i].Name == sharedClusterRole.Name { + continue + } + + for j := range clusterRoles[i].Rules { + currRule := clusterRoles[i].Rules[j] + if !ruleExists(newPolicyRules, currRule) { + newPolicyRules = append(newPolicyRules, currRule) + } + } + } + } + + if equality.Semantic.DeepEqual(newPolicyRules, sharedClusterRole.Rules) { + return nil + } + + // we need to update + clusterRole := sharedClusterRole.DeepCopy() + clusterRole.Rules = nil + for _, rule := range newPolicyRules { + clusterRole.Rules = append(clusterRole.Rules, *rule.DeepCopy()) + } + _, err = c.clusterRoleClient.ClusterRoles().Update(clusterRole) + + return err +} + +func ruleExists(haystack []rbacv1.PolicyRule, needle rbacv1.PolicyRule) bool { + for _, curr := range haystack { + if equality.Semantic.DeepEqual(curr, needle) { + return true + } + } + return false +} + +// Run starts the controller and blocks until stopCh is closed. +func (c *ClusterRoleAggregationController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + glog.Infof("Starting ClusterRoleAggregator") + defer glog.Infof("Shutting down ClusterRoleAggregator") + + if !controller.WaitForCacheSync("ClusterRoleAggregator", stopCh, c.clusterRolesSynced) { + return + } + + for i := 0; i < workers; i++ { + go wait.Until(c.runWorker, time.Second, stopCh) + } + + <-stopCh +} + +func (c *ClusterRoleAggregationController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *ClusterRoleAggregationController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.syncHandler(dsKey.(string)) + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +func (c *ClusterRoleAggregationController) enqueue() { + // this is unusual, but since the set of all clusterroles is small and we don't know the dependency + // graph, just queue up every thing each time. This allows errors to be selectively retried if there + // is a problem updating a single role + allClusterRoles, err := c.clusterRoleLister.List(labels.Everything()) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't list all objects %v", err)) + return + } + for _, clusterRole := range allClusterRoles { + // only queue ones that we may need to aggregate + if clusterRole.AggregationRule == nil { + continue + } + key, err := controller.KeyFunc(clusterRole) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", clusterRole, err)) + return + } + c.queue.Add(key) + } +} + +type byName []*rbacv1.ClusterRole + +func (a byName) Len() int { return len(a) } +func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byName) Less(i, j int) bool { return a[i].Name < a[j].Name } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go index 360f2091b2dc..b662fa6fb574 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go @@ -38,15 +38,13 @@ import ( "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" - ref "k8s.io/client-go/tools/reference" "k8s.io/client-go/util/integer" clientretry "k8s.io/client-go/util/retry" - _ "k8s.io/kubernetes/pkg/api/install" podutil "k8s.io/kubernetes/pkg/api/v1/pod" - "k8s.io/kubernetes/pkg/api/validation" + _ "k8s.io/kubernetes/pkg/apis/core/install" + "k8s.io/kubernetes/pkg/apis/core/validation" hashutil "k8s.io/kubernetes/pkg/util/hash" taintutils "k8s.io/kubernetes/pkg/util/taints" @@ -412,7 +410,7 @@ type RealRSControl struct { var _ RSControlInterface = &RealRSControl{} func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) error { - _, err := r.KubeClient.Extensions().ReplicaSets(namespace).Patch(name, types.StrategicMergePatchType, data) + _, err := r.KubeClient.ExtensionsV1beta1().ReplicaSets(namespace).Patch(name, types.StrategicMergePatchType, data) return err } @@ -474,29 +472,12 @@ func getPodsFinalizers(template *v1.PodTemplateSpec) []string { return desiredFinalizers } -func getPodsAnnotationSet(template *v1.PodTemplateSpec, object runtime.Object) (labels.Set, error) { +func getPodsAnnotationSet(template *v1.PodTemplateSpec) labels.Set { desiredAnnotations := make(labels.Set) for k, v := range template.Annotations { desiredAnnotations[k] = v } - createdByRef, err := ref.GetReference(scheme.Scheme, object) - if err != nil { - return desiredAnnotations, fmt.Errorf("unable to get controller reference: %v", err) - } - - // TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients - // would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment. - // We need to consistently handle this case of annotation versioning. - codec := scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion) - - createdByRefJson, err := runtime.Encode(codec, &v1.SerializedReference{ - Reference: *createdByRef, - }) - if err != nil { - return desiredAnnotations, fmt.Errorf("unable to serialize controller reference: %v", err) - } - desiredAnnotations[v1.CreatedByAnnotation] = string(createdByRefJson) - return desiredAnnotations, nil + return desiredAnnotations } func getPodsPrefix(controllerName string) string { @@ -546,17 +527,14 @@ func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *v } func (r RealPodControl) PatchPod(namespace, name string, data []byte) error { - _, err := r.KubeClient.Core().Pods(namespace).Patch(name, types.StrategicMergePatchType, data) + _, err := r.KubeClient.CoreV1().Pods(namespace).Patch(name, types.StrategicMergePatchType, data) return err } func GetPodFromTemplate(template *v1.PodTemplateSpec, parentObject runtime.Object, controllerRef *metav1.OwnerReference) (*v1.Pod, error) { desiredLabels := getPodsLabelSet(template) desiredFinalizers := getPodsFinalizers(template) - desiredAnnotations, err := getPodsAnnotationSet(template, parentObject) - if err != nil { - return nil, err - } + desiredAnnotations := getPodsAnnotationSet(template) accessor, err := meta.Accessor(parentObject) if err != nil { return nil, fmt.Errorf("parentObject does not have ObjectMeta, %v", err) @@ -589,7 +567,7 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodT if labels.Set(pod.Labels).AsSelectorPreValidated().Empty() { return fmt.Errorf("unable to create pods, no labels") } - if newPod, err := r.KubeClient.Core().Pods(namespace).Create(pod); err != nil { + if newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(pod); err != nil { r.Recorder.Eventf(object, v1.EventTypeWarning, FailedCreatePodReason, "Error creating: %v", err) return err } else { @@ -610,7 +588,7 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime return fmt.Errorf("object does not have ObjectMeta, %v", err) } glog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID) - if err := r.KubeClient.Core().Pods(namespace).Delete(podID, nil); err != nil { + if err := r.KubeClient.CoreV1().Pods(namespace).Delete(podID, nil); err != nil { r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err) return fmt.Errorf("unable to delete pods: %v", err) } else { @@ -925,10 +903,10 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v // First we try getting node from the API server cache, as it's cheaper. If it fails // we get it from etcd to be sure to have fresh data. if firstTry { - oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) + oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) firstTry = false } else { - oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{}) + oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) } if err != nil { return err @@ -982,10 +960,10 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, node *v1.Node, t // First we try getting node from the API server cache, as it's cheaper. If it fails // we get it from etcd to be sure to have fresh data. if firstTry { - oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) + oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) firstTry = false } else { - oldNode, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{}) + oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) } if err != nil { return err @@ -1030,7 +1008,7 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) } - _, err = c.Core().Nodes().Patch(string(nodeName), types.StrategicMergePatchType, patchBytes) + _, err = c.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, patchBytes) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/cronjob/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/cronjob/BUILD index 3ceb655c3ec2..e484d2c50a2a 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/cronjob/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/cronjob/BUILD @@ -14,8 +14,9 @@ go_library( "injection.go", "utils.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/cronjob", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/util/metrics:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/robfig/cron:go_default_library", @@ -45,15 +46,17 @@ go_test( "cronjob_controller_test.go", "utils_test.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/cronjob", library = ":go_default_library", deps = [ - "//pkg/api/install:go_default_library", "//pkg/apis/batch/install:go_default_library", + "//pkg/apis/core/install:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/batch/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/cronjob/cronjob_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/cronjob/cronjob_controller.go index 682ae721a4af..679895e85dfc 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/cronjob/cronjob_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/cronjob/cronjob_controller.go @@ -66,14 +66,16 @@ type CronJobController struct { recorder record.EventRecorder } -func NewCronJobController(kubeClient clientset.Interface) *CronJobController { +func NewCronJobController(kubeClient clientset.Interface) (*CronJobController, error) { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) // TODO: remove the wrapper when every clients have moved to use the clientset. - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("cronjob_controller", kubeClient.Core().RESTClient().GetRateLimiter()) + if err := metrics.RegisterMetricAndTrackRateLimiterUsage("cronjob_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()); err != nil { + return nil, err + } } jm := &CronJobController{ @@ -84,12 +86,15 @@ func NewCronJobController(kubeClient clientset.Interface) *CronJobController { recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cronjob-controller"}), } - return jm + return jm, nil } -func NewCronJobControllerFromClient(kubeClient clientset.Interface) *CronJobController { - jm := NewCronJobController(kubeClient) - return jm +func NewCronJobControllerFromClient(kubeClient clientset.Interface) (*CronJobController, error) { + jm, err := NewCronJobController(kubeClient) + if err != nil { + return nil, err + } + return jm, nil } // Run the main goroutine responsible for watching and syncing jobs. diff --git a/vendor/k8s.io/kubernetes/pkg/controller/cronjob/utils.go b/vendor/k8s.io/kubernetes/pkg/controller/cronjob/utils.go index 2416dbeb946e..d3286d99ced5 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/cronjob/utils.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/cronjob/utils.go @@ -31,7 +31,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" ref "k8s.io/client-go/tools/reference" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" ) // Utilities for dealing with Jobs and CronJobs and time. @@ -144,7 +144,7 @@ func getRecentUnmetScheduleTimes(sj batchv1beta1.CronJob, now time.Time) ([]time for t := sched.Next(earliestTime); !t.After(now); t = sched.Next(t) { starts = append(starts, t) - // An object might miss several starts. For example, if + // An object might miss several starts. For example, if // controller gets wedged on friday at 5:01pm when everyone has // gone home, and someone comes in on tuesday AM and discovers // the problem and restarts the controller, then all the hourly @@ -159,7 +159,7 @@ func getRecentUnmetScheduleTimes(sj batchv1beta1.CronJob, now time.Time) ([]time // of this controller. In that case, we want to not try to list // all the missed start times. // - // I've somewhat arbitrarily picked 100, as more than 80, but + // I've somewhat arbitrarily picked 100, as more than 80, // but less than "lots". if len(starts) > 100 { // We can't get the most recent times so just return an empty slice @@ -176,11 +176,6 @@ func getJobFromTemplate(sj *batchv1beta1.CronJob, scheduledTime time.Time) (*bat // scheduled-job-name=$SJ_NAME -- for user convenience labels := copyLabels(&sj.Spec.JobTemplate) annotations := copyAnnotations(&sj.Spec.JobTemplate) - createdByRefJson, err := makeCreatedByRefJson(sj) - if err != nil { - return nil, err - } - annotations[v1.CreatedByAnnotation] = string(createdByRefJson) // We want job names for a given nominal start time to have a deterministic name to avoid the same job being created twice name := fmt.Sprintf("%s-%d", sj.Name, getTimeHash(scheduledTime)) @@ -192,7 +187,7 @@ func getJobFromTemplate(sj *batchv1beta1.CronJob, scheduledTime time.Time) (*bat OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(sj, controllerKind)}, }, } - if err := api.Scheme.Convert(&sj.Spec.JobTemplate.Spec, &job.Spec, nil); err != nil { + if err := legacyscheme.Scheme.Convert(&sj.Spec.JobTemplate.Spec, &job.Spec, nil); err != nil { return nil, fmt.Errorf("unable to convert job template: %v", err) } return job, nil @@ -205,7 +200,7 @@ func getTimeHash(scheduledTime time.Time) int64 { // makeCreatedByRefJson makes a json string with an object reference for use in "created-by" annotation value func makeCreatedByRefJson(object runtime.Object) (string, error) { - createdByRef, err := ref.GetReference(api.Scheme, object) + createdByRef, err := ref.GetReference(legacyscheme.Scheme, object) if err != nil { return "", fmt.Errorf("unable to get controller reference: %v", err) } @@ -213,7 +208,7 @@ func makeCreatedByRefJson(object runtime.Object) (string, error) { // TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients // would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment. // We need to consistently handle this case of annotation versioning. - codec := api.Codecs.LegacyCodec(schema.GroupVersion{Group: v1.GroupName, Version: "v1"}) + codec := legacyscheme.Codecs.LegacyCodec(schema.GroupVersion{Group: v1.GroupName, Version: "v1"}) createdByRefJson, err := runtime.Encode(codec, &v1.SerializedReference{ Reference: *createdByRef, diff --git a/vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD index cedbe39e1245..dd082c9ac27a 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD @@ -13,9 +13,10 @@ go_library( "doc.go", "update.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/daemon", deps = [ - "//pkg/api/v1/helper:go_default_library", "//pkg/api/v1/pod:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/daemon/util:go_default_library", "//pkg/features:go_default_library", @@ -25,7 +26,6 @@ go_library( "//plugin/pkg/scheduler/algorithm:go_default_library", "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", "//plugin/pkg/scheduler/schedulercache:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -37,6 +37,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", @@ -64,11 +65,13 @@ go_test( "daemon_controller_test.go", "update_test.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/daemon", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/v1/pod:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/controller:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/securitycontext:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/daemon/OWNERS b/vendor/k8s.io/kubernetes/pkg/controller/daemon/OWNERS index bb110c20dd10..d83d4dd28788 100755 --- a/vendor/k8s.io/kubernetes/pkg/controller/daemon/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/controller/daemon/OWNERS @@ -5,3 +5,4 @@ reviewers: - lukaszo - mikedanese - tnozicka +- k82cn diff --git a/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller.go index 544a5fc3f0bf..72308b93b5aa 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller.go @@ -48,8 +48,8 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/integer" "k8s.io/client-go/util/workqueue" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" podutil "k8s.io/kubernetes/pkg/api/v1/pod" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/daemon/util" "k8s.io/kubernetes/pkg/features" @@ -66,7 +66,7 @@ const ( // The value of 250 is chosen b/c values that are too high can cause registry DoS issues BurstReplicas = 250 - // If sending a status upate to API server fails, we retry a finite number of times. + // If sending a status update to API server fails, we retry a finite number of times. StatusUpdateRetries = 1 // Reasons for DaemonSet events @@ -130,21 +130,23 @@ type DaemonSetsController struct { suspendedDaemonPods map[string]sets.String } -func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInformer, historyInformer appsinformers.ControllerRevisionInformer, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, kubeClient clientset.Interface) *DaemonSetsController { +func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInformer, historyInformer appsinformers.ControllerRevisionInformer, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, kubeClient clientset.Interface) (*DaemonSetsController, error) { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) // TODO: remove the wrapper when every clients have moved to use the clientset. - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) - if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.Core().RESTClient().GetRateLimiter()) + if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { + if err := metrics.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()); err != nil { + return nil, err + } } dsc := &DaemonSetsController{ kubeClient: kubeClient, eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "daemonset-controller"}), podControl: controller.RealPodControl{ KubeClient: kubeClient, - Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "daemon-set"}), + Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "daemonset-controller"}), }, crControl: controller.RealControllerRevisionControl{ KubeClient: kubeClient, @@ -201,7 +203,7 @@ func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInfo dsc.syncHandler = dsc.syncDaemonSet dsc.enqueueDaemonSet = dsc.enqueue dsc.enqueueDaemonSetRateLimited = dsc.enqueueRateLimited - return dsc + return dsc, nil } func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) { @@ -298,8 +300,8 @@ func (dsc *DaemonSetsController) enqueueDaemonSetAfter(obj interface{}, after ti dsc.queue.AddAfter(key, after) } -// getPodDaemonSets returns a list of DaemonSets that potentially match the pod. -func (dsc *DaemonSetsController) getPodDaemonSets(pod *v1.Pod) []*extensions.DaemonSet { +// getDaemonSetsForPod returns a list of DaemonSets that potentially match the pod. +func (dsc *DaemonSetsController) getDaemonSetsForPod(pod *v1.Pod) []*extensions.DaemonSet { sets, err := dsc.dsLister.GetPodDaemonSets(pod) if err != nil { return nil @@ -362,8 +364,8 @@ func (dsc *DaemonSetsController) addHistory(obj interface{}) { } // updateHistory figures out what DaemonSet(s) manage a ControllerRevision when the ControllerRevision -// is updated and wake them up. If the anything of the ControllerRevision have changed, we need to -// awaken both the old and new DaemonSets. +// is updated and wake them up. If anything of the ControllerRevision has changed, we need to awaken +// both the old and new DaemonSets. func (dsc *DaemonSetsController) updateHistory(old, cur interface{}) { curHistory := cur.(*apps.ControllerRevision) oldHistory := old.(*apps.ControllerRevision) @@ -474,7 +476,7 @@ func (dsc *DaemonSetsController) addPod(obj interface{}) { // them to see if anyone wants to adopt it. // DO NOT observe creation because no controller should be waiting for an // orphan. - dss := dsc.getPodDaemonSets(pod) + dss := dsc.getDaemonSetsForPod(pod) if len(dss) == 0 { return } @@ -495,8 +497,6 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) { // Two different versions of the same pod will always have different RVs. return } - changedToReady := !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod) - labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels) curControllerRef := metav1.GetControllerOf(curPod) oldControllerRef := metav1.GetControllerOf(oldPod) @@ -516,6 +516,7 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) { } glog.V(4).Infof("Pod %s updated.", curPod.Name) dsc.enqueueDaemonSet(ds) + changedToReady := !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod) // See https://github.com/kubernetes/kubernetes/pull/38076 for more details if changedToReady && ds.Spec.MinReadySeconds > 0 { // Add a second to avoid milliseconds skew in AddAfter. @@ -527,11 +528,12 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) { // Otherwise, it's an orphan. If anything changed, sync matching controllers // to see if anyone wants to adopt it now. - dss := dsc.getPodDaemonSets(curPod) + dss := dsc.getDaemonSetsForPod(curPod) if len(dss) == 0 { return } glog.V(4).Infof("Orphan Pod %s updated.", curPod.Name) + labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels) if labelChanged || controllerRefChanged { for _, ds := range dss { dsc.enqueueDaemonSet(ds) @@ -707,7 +709,7 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) { dsList, err := dsc.dsLister.List(labels.Everything()) if err != nil { - glog.V(4).Infof("Error enqueueing daemon sets: %v", err) + glog.V(4).Infof("Error listing daemon sets: %v", err) return } // TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too). @@ -799,6 +801,10 @@ func (dsc *DaemonSetsController) resolveControllerRef(namespace string, controll return ds } +// manage manages the scheduling and running of Pods of ds on nodes. +// After figuring out which nodes should run a Pod of ds but not yet running one and +// which nodes should not run a Pod of ds but currently running one, it calls function +// syncNodes with a list of pods to remove and a list of nodes to run a Pod of ds. func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet, hash string) error { // Find out which nodes are running the daemon pods controlled by ds. nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds) @@ -1065,7 +1071,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet, } numberUnavailable := desiredNumberScheduled - numberAvailable - err = storeDaemonSetStatus(dsc.kubeClient.Extensions().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable) + err = storeDaemonSetStatus(dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable) if err != nil { return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err) } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/daemon/update.go b/vendor/k8s.io/kubernetes/pkg/controller/daemon/update.go index bbbb4d68f912..bbc9015a28a0 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/daemon/update.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/daemon/update.go @@ -32,11 +32,11 @@ import ( "k8s.io/apimachinery/pkg/runtime" intstrutil "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/rand" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/daemon/util" labelsutil "k8s.io/kubernetes/pkg/util/labels" - "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/rand" ) // rollingUpdate deletes old daemon set pods making sure that no more than @@ -228,7 +228,7 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *extensions.DaemonSet, cur toUpdate.Labels = make(map[string]string) } toUpdate.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[extensions.DefaultDaemonSetUniqueLabelKey] - _, err = dsc.kubeClient.Core().Pods(ds.Namespace).Update(toUpdate) + _, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(toUpdate) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/daemon/util/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/daemon/util/BUILD index 58d23c979992..4a87a1eacd99 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/daemon/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/daemon/util/BUILD @@ -9,9 +9,10 @@ load( go_library( name = "go_default_library", srcs = ["daemonset_util.go"], + importpath = "k8s.io/kubernetes/pkg/controller/daemon/util", deps = [ - "//pkg/api/v1/helper:go_default_library", "//pkg/api/v1/pod:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/util/labels:go_default_library", @@ -39,6 +40,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["daemonset_util_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/daemon/util", library = ":go_default_library", deps = [ "//pkg/api/testapi:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go b/vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go index 6ef70a3fac58..1ea30b713fd8 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go @@ -23,8 +23,8 @@ import ( extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" podutil "k8s.io/kubernetes/pkg/api/v1/pod" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" labelsutil "k8s.io/kubernetes/pkg/util/labels" diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/deployment/BUILD index 44b5cdea4118..6995ca4c33dd 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/deployment/BUILD @@ -16,12 +16,12 @@ go_library( "rolling.go", "sync.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/deployment", deps = [ "//pkg/controller:go_default_library", "//pkg/controller/deployment/util:go_default_library", "//pkg/util/labels:go_default_library", "//pkg/util/metrics:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", @@ -30,6 +30,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", @@ -55,16 +56,17 @@ go_test( "rolling_test.go", "sync_test.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/deployment", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/apps/install:go_default_library", "//pkg/apis/authentication/install:go_default_library", "//pkg/apis/authorization/install:go_default_library", "//pkg/apis/autoscaling/install:go_default_library", "//pkg/apis/batch/install:go_default_library", "//pkg/apis/certificates/install:go_default_library", + "//pkg/apis/core/install:go_default_library", "//pkg/apis/extensions/install:go_default_library", "//pkg/apis/policy/install:go_default_library", "//pkg/apis/rbac/install:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go index 7ec410800518..0b3461f30d96 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go @@ -97,14 +97,16 @@ type DeploymentController struct { } // NewDeploymentController creates a new DeploymentController. -func NewDeploymentController(dInformer extensionsinformers.DeploymentInformer, rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) *DeploymentController { +func NewDeploymentController(dInformer extensionsinformers.DeploymentInformer, rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) // TODO: remove the wrapper when every clients have moved to use the clientset. - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.Core().RESTClient()).Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.CoreV1().RESTClient()).Events("")}) - if client != nil && client.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.Core().RESTClient().GetRateLimiter()) + if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil { + if err := metrics.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.CoreV1().RESTClient().GetRateLimiter()); err != nil { + return nil, err + } } dc := &DeploymentController{ client: client, @@ -140,7 +142,7 @@ func NewDeploymentController(dInformer extensionsinformers.DeploymentInformer, r dc.dListerSynced = dInformer.Informer().HasSynced dc.rsListerSynced = rsInformer.Informer().HasSynced dc.podListerSynced = podInformer.Informer().HasSynced - return dc + return dc, nil } // Run begins watching and syncing. @@ -560,7 +562,7 @@ func (dc *DeploymentController) syncDeployment(key string) error { startTime := time.Now() glog.V(4).Infof("Started syncing deployment %q (%v)", key, startTime) defer func() { - glog.V(4).Infof("Finished syncing deployment %q (%v)", key, time.Now().Sub(startTime)) + glog.V(4).Infof("Finished syncing deployment %q (%v)", key, time.Since(startTime)) }() namespace, name, err := cache.SplitMetaNamespaceKey(key) @@ -585,7 +587,7 @@ func (dc *DeploymentController) syncDeployment(key string) error { dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.") if d.Status.ObservedGeneration < d.Generation { d.Status.ObservedGeneration = d.Generation - dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d) + dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d) } return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/progress.go b/vendor/k8s.io/kubernetes/pkg/controller/deployment/progress.go index 1062d7edc7ac..1cd25ed1fff3 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/progress.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/deployment/progress.go @@ -52,7 +52,10 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe case util.DeploymentComplete(d, &newStatus): // Update the deployment conditions with a message for the new replica set that // was successfully deployed. If the condition already exists, we ignore this update. - msg := fmt.Sprintf("ReplicaSet %q has successfully progressed.", newRS.Name) + msg := fmt.Sprintf("Deployment %q has successfully progressed.", d.Name) + if newRS != nil { + msg = fmt.Sprintf("ReplicaSet %q has successfully progressed.", newRS.Name) + } condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, util.NewRSAvailableReason, msg) util.SetDeploymentCondition(&newStatus, *condition) @@ -109,7 +112,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe newDeployment := d newDeployment.Status = newStatus - _, err := dc.client.Extensions().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment) + _, err := dc.client.ExtensionsV1beta1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/rollback.go b/vendor/k8s.io/kubernetes/pkg/controller/deployment/rollback.go index 4e0e2ab0a77e..826185afc333 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/rollback.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/deployment/rollback.go @@ -55,7 +55,7 @@ func (dc *DeploymentController) rollback(d *extensions.Deployment, rsList []*ext glog.V(4).Infof("Found replica set %q with desired revision %d", rs.Name, v) // rollback by copying podTemplate.Spec from the replica set // revision number will be incremented during the next getAllReplicaSetsAndSyncRevision call - // no-op if the the spec matches current deployment's podTemplate.Spec + // no-op if the spec matches current deployment's podTemplate.Spec performedRollback, err := dc.rollbackToTemplate(d, rs) if performedRollback && err == nil { dc.emitRollbackNormalEvent(d, fmt.Sprintf("Rolled back deployment %q to revision %d", d.Name, *toRevision)) @@ -73,11 +73,7 @@ func (dc *DeploymentController) rollback(d *extensions.Deployment, rsList []*ext // cleans up the rollback spec so subsequent requeues of the deployment won't end up in here. func (dc *DeploymentController) rollbackToTemplate(d *extensions.Deployment, rs *extensions.ReplicaSet) (bool, error) { performedRollback := false - isEqual, err := deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) - if err != nil { - return false, err - } - if !isEqual { + if !deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) { glog.V(4).Infof("Rolling back deployment %q to template spec %+v", d.Name, rs.Spec.Template.Spec) deploymentutil.SetFromReplicaSetTemplate(d, rs.Spec.Template) // set RS (the old RS we'll rolling back to) annotations back to the deployment; @@ -116,6 +112,6 @@ func (dc *DeploymentController) emitRollbackNormalEvent(d *extensions.Deployment func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *extensions.Deployment) error { glog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name) d.Spec.RollbackTo = nil - _, err := dc.client.Extensions().Deployments(d.Namespace).Update(d) + _, err := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).Update(d) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/sync.go b/vendor/k8s.io/kubernetes/pkg/controller/deployment/sync.go index cd08decd60d2..497e82cb1d95 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/sync.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/deployment/sync.go @@ -29,10 +29,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/rand" "k8s.io/kubernetes/pkg/controller" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" labelsutil "k8s.io/kubernetes/pkg/util/labels" - "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/rand" ) // syncStatusOnly only updates Deployments Status and doesn't take any mutating actions. @@ -100,7 +100,7 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment) } var err error - d, err = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d) + d, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d) return err } @@ -122,10 +122,7 @@ func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *extensions.D if err != nil { return nil, nil, fmt.Errorf("error labeling replica sets and pods with pod-template-hash: %v", err) } - _, allOldRSs, err := deploymentutil.FindOldReplicaSets(d, rsList) - if err != nil { - return nil, nil, err - } + _, allOldRSs := deploymentutil.FindOldReplicaSets(d, rsList) // Get new replica set with the updated revision number newRS, err := dc.getNewReplicaSet(d, rsList, allOldRSs, createIfNotExisted) @@ -170,7 +167,7 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet, return nil, err } // 1. Add hash template label to the rs. This ensures that any newly created pods will have the new label. - updatedRS, err := deploymentutil.UpdateRSWithRetries(dc.client.Extensions().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name, + updatedRS, err := deploymentutil.UpdateRSWithRetries(dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name, func(updated *extensions.ReplicaSet) error { // Precondition: the RS doesn't contain the new hash in its pod template label. if updated.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash { @@ -210,7 +207,7 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet, // 3. Update rs label and selector to include the new hash label // Copy the old selector, so that we can scrub out any orphaned pods - updatedRS, err = deploymentutil.UpdateRSWithRetries(dc.client.Extensions().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name, func(updated *extensions.ReplicaSet) error { + updatedRS, err = deploymentutil.UpdateRSWithRetries(dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name, func(updated *extensions.ReplicaSet) error { // Precondition: the RS doesn't contain the new hash in its label and selector. if updated.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash && updated.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey] == hash { return utilerrors.ErrPreconditionViolated @@ -235,10 +232,7 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet, // 3. If there's no existing new RS and createIfNotExisted is true, create one with appropriate revision number (maxOldRevision + 1) and replicas. // Note that the pod-template-hash will be added to adopted RSes and pods. func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsList, oldRSs []*extensions.ReplicaSet, createIfNotExisted bool) (*extensions.ReplicaSet, error) { - existingNewRS, err := deploymentutil.FindNewReplicaSet(d, rsList) - if err != nil { - return nil, err - } + existingNewRS := deploymentutil.FindNewReplicaSet(d, rsList) // Calculate the max revision number among all old RSes maxOldRevision := deploymentutil.MaxRevision(oldRSs) @@ -257,7 +251,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds if annotationsUpdated || minReadySecondsNeedsUpdate { rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds - return dc.client.Extensions().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy) + return dc.client.ExtensionsV1beta1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy) } // Should use the revision in existingNewRS's annotation, since it set by before @@ -274,7 +268,8 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis } if needsUpdate { - if d, err = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d); err != nil { + var err error + if d, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d); err != nil { return nil, err } } @@ -320,7 +315,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis // hash collisions. If there is any other error, we need to report it in the status of // the Deployment. alreadyExists := false - createdRS, err := dc.client.Extensions().ReplicaSets(d.Namespace).Create(&newRS) + createdRS, err := dc.client.ExtensionsV1beta1().ReplicaSets(d.Namespace).Create(&newRS) switch { // We may end up hitting this due to a slow cache or a fast resync of the Deployment. // Fetch a copy of the ReplicaSet. If its PodTemplateSpec is semantically deep equal @@ -333,13 +328,9 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis if rsErr != nil { return nil, rsErr } - isEqual, equalErr := deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) - if equalErr != nil { - return nil, equalErr - } // Matching ReplicaSet is not equal - increment the collisionCount in the DeploymentStatus // and requeue the Deployment. - if !isEqual { + if !deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) { if d.Status.CollisionCount == nil { d.Status.CollisionCount = new(int32) } @@ -347,7 +338,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis *d.Status.CollisionCount++ // Update the collisionCount for the Deployment and let it requeue by returning the original // error. - _, dErr := dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d) + _, dErr := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d) if dErr == nil { glog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount) } @@ -364,7 +355,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis // We don't really care about this error at this point, since we have a bigger issue to report. // TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account // these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568 - _, _ = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d) + _, _ = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d) } dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg) return nil, err @@ -381,7 +372,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis needsUpdate = true } if needsUpdate { - _, err = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d) + _, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d) } return createdRS, err } @@ -517,7 +508,7 @@ func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newSc var err error if sizeNeedsUpdate || annotationsNeedUpdate { *(rsCopy.Spec.Replicas) = newScale - rs, err = dc.client.Extensions().ReplicaSets(rsCopy.Namespace).Update(rsCopy) + rs, err = dc.client.ExtensionsV1beta1().ReplicaSets(rsCopy.Namespace).Update(rsCopy) if err == nil && sizeNeedsUpdate { scaled = true dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale) @@ -555,7 +546,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSe continue } glog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name) - if err := dc.client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) { + if err := dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) { // Return error instead of aggregating and continuing DELETEs on the theory // that we may be overloading the api server. return err @@ -575,7 +566,7 @@ func (dc *DeploymentController) syncDeploymentStatus(allRSs []*extensions.Replic newDeployment := d newDeployment.Status = newStatus - _, err := dc.client.Extensions().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment) + _, err := dc.client.ExtensionsV1beta1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD index 9df7ea4fedd5..fd967e837f8d 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD @@ -13,6 +13,7 @@ go_library( "pod_util.go", "replicaset_util.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/deployment/util", deps = [ "//pkg/apis/extensions:go_default_library", "//pkg/controller:go_default_library", @@ -44,9 +45,9 @@ go_test( "deployment_util_test.go", "hash_test.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/deployment/util", library = ":go_default_library", deps = [ - "//pkg/api/v1:go_default_library", "//pkg/controller:go_default_library", "//pkg/util/hash:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -56,6 +57,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/testing:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go b/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go index 39872bd17b52..d017e33709e3 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go @@ -502,14 +502,8 @@ func GetAllReplicaSets(deployment *extensions.Deployment, c extensionsv1beta1.Ex if err != nil { return nil, nil, nil, err } - oldRSes, allOldRSes, err := FindOldReplicaSets(deployment, rsList) - if err != nil { - return nil, nil, nil, err - } - newRS, err := FindNewReplicaSet(deployment, rsList) - if err != nil { - return nil, nil, nil, err - } + oldRSes, allOldRSes := FindOldReplicaSets(deployment, rsList) + newRS := FindNewReplicaSet(deployment, rsList) return oldRSes, allOldRSes, newRS, nil } @@ -520,7 +514,8 @@ func GetOldReplicaSets(deployment *extensions.Deployment, c extensionsv1beta1.Ex if err != nil { return nil, nil, err } - return FindOldReplicaSets(deployment, rsList) + oldRSes, allOldRSes := FindOldReplicaSets(deployment, rsList) + return oldRSes, allOldRSes, nil } // GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface. @@ -530,7 +525,7 @@ func GetNewReplicaSet(deployment *extensions.Deployment, c extensionsv1beta1.Ext if err != nil { return nil, err } - return FindNewReplicaSet(deployment, rsList) + return FindNewReplicaSet(deployment, rsList), nil } // RsListFromClient returns an rsListFunc that wraps the given client. @@ -548,13 +543,6 @@ func RsListFromClient(c extensionsv1beta1.ExtensionsV1beta1Interface) RsListFunc } } -// podListFromClient returns a podListFunc that wraps the given client. -func podListFromClient(c clientset.Interface) podListFunc { - return func(namespace string, options metav1.ListOptions) (*v1.PodList, error) { - return c.Core().Pods(namespace).List(options) - } -} - // TODO: switch this to full namespacers type RsListFunc func(string, metav1.ListOptions) ([]*extensions.ReplicaSet, error) type podListFunc func(string, metav1.ListOptions) (*v1.PodList, error) @@ -574,7 +562,7 @@ func ListReplicaSets(deployment *extensions.Deployment, getRSList RsListFunc) ([ options := metav1.ListOptions{LabelSelector: selector.String()} all, err := getRSList(namespace, options) if err != nil { - return all, err + return nil, err } // Only include those whose ControllerRef matches the Deployment. owned := make([]*extensions.ReplicaSet, 0, len(all)) @@ -647,7 +635,7 @@ func ListPods(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet // We ignore pod-template-hash because the hash result would be different upon podTemplateSpec API changes // (e.g. the addition of a new field will cause the hash code to change) // Note that we assume input podTemplateSpecs contain non-empty labels -func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) (bool, error) { +func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool { t1Copy := template1.DeepCopy() t2Copy := template2.DeepCopy() // First, compare template.Labels (ignoring hash) @@ -658,43 +646,36 @@ func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) (bool, error) { // We make sure len(labels2) >= len(labels1) for k, v := range labels2 { if labels1[k] != v && k != extensions.DefaultDeploymentUniqueLabelKey { - return false, nil + return false } } // Then, compare the templates without comparing their labels t1Copy.Labels, t2Copy.Labels = nil, nil - return apiequality.Semantic.DeepEqual(t1Copy, t2Copy), nil + return apiequality.Semantic.DeepEqual(t1Copy, t2Copy) } // FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template). -func FindNewReplicaSet(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) (*extensions.ReplicaSet, error) { +func FindNewReplicaSet(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) *extensions.ReplicaSet { sort.Sort(controller.ReplicaSetsByCreationTimestamp(rsList)) for i := range rsList { - equal, err := EqualIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) - if err != nil { - return nil, err - } - if equal { + if EqualIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) { // In rare cases, such as after cluster upgrades, Deployment may end up with // having more than one new ReplicaSets that have the same template as its template, // see https://github.com/kubernetes/kubernetes/issues/40415 // We deterministically choose the oldest new ReplicaSet. - return rsList[i], nil + return rsList[i] } } // new ReplicaSet does not exist. - return nil, nil + return nil } // FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given slice of RSes. // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. -func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { +func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet) { var requiredRSs []*extensions.ReplicaSet var allRSs []*extensions.ReplicaSet - newRS, err := FindNewReplicaSet(deployment, rsList) - if err != nil { - return nil, nil, err - } + newRS := FindNewReplicaSet(deployment, rsList) for _, rs := range rsList { // Filter out new replica set if newRS != nil && rs.UID == newRS.UID { @@ -705,7 +686,7 @@ func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions. requiredRSs = append(requiredRSs, rs) } } - return requiredRSs, allRSs, nil + return requiredRSs, allRSs } // WaitForReplicaSetUpdated polls the replica set until it is updated. @@ -740,7 +721,7 @@ func LabelPodsWithHash(podList *v1.PodList, c clientset.Interface, podLister cor } // Only label the pod that doesn't already have the new hash if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash { - _, err := UpdatePodWithRetries(c.Core().Pods(namespace), podLister, pod.Namespace, pod.Name, + _, err := UpdatePodWithRetries(c.CoreV1().Pods(namespace), podLister, pod.Namespace, pod.Name, func(podToUpdate *v1.Pod) error { // Precondition: the pod doesn't contain the new hash in its label. if podToUpdate.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash { diff --git a/vendor/k8s.io/kubernetes/pkg/controller/disruption/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/disruption/BUILD index 74ce94953a37..b92a2e2c9496 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/disruption/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/disruption/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["disruption.go"], + importpath = "k8s.io/kubernetes/pkg/controller/disruption", deps = [ "//pkg/api/v1/pod:go_default_library", "//pkg/controller:go_default_library", @@ -44,10 +45,11 @@ go_library( go_test( name = "go_default_test", srcs = ["disruption_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/disruption", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core/install:go_default_library", "//pkg/controller:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/disruption/disruption.go b/vendor/k8s.io/kubernetes/pkg/controller/disruption/disruption.go index ab474ed22280..b234f5ca0b64 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/disruption/disruption.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/disruption/disruption.go @@ -294,7 +294,7 @@ func (dc *DisruptionController) Run(stopCh <-chan struct{}) { if dc.kubeClient != nil { glog.Infof("Sending events to api server.") - dc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(dc.kubeClient.Core().RESTClient()).Events("")}) + dc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(dc.kubeClient.CoreV1().RESTClient()).Events("")}) } else { glog.Infof("No api server defined - no events will be sent to API server.") } @@ -728,7 +728,7 @@ func refresh(pdbClient policyclientset.PodDisruptionBudgetInterface, pdb *policy } func (dc *DisruptionController) writePdbStatus(pdb *policy.PodDisruptionBudget) error { - pdbClient := dc.kubeClient.Policy().PodDisruptionBudgets(pdb.Namespace) + pdbClient := dc.kubeClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace) st := pdb.Status var err error diff --git a/vendor/k8s.io/kubernetes/pkg/controller/endpoint/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/endpoint/BUILD index ba57ed268b76..ed8acb206d5b 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/endpoint/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/endpoint/BUILD @@ -12,10 +12,11 @@ go_library( "doc.go", "endpoints_controller.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/endpoint", deps = [ - "//pkg/api:go_default_library", "//pkg/api/v1/endpoints:go_default_library", "//pkg/api/v1/pod:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/controller:go_default_library", "//pkg/util/metrics:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -39,11 +40,13 @@ go_library( go_test( name = "go_default_test", srcs = ["endpoints_controller_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/endpoint", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/v1/endpoints:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/controller:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go index 13ea96fd81a9..8aa41da8a838 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go @@ -36,9 +36,9 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/util/workqueue" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1/endpoints" podutil "k8s.io/kubernetes/pkg/api/v1/pod" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/util/metrics" @@ -75,8 +75,8 @@ var ( // NewEndpointController returns a new *EndpointController. func NewEndpointController(podInformer coreinformers.PodInformer, serviceInformer coreinformers.ServiceInformer, endpointsInformer coreinformers.EndpointsInformer, client clientset.Interface) *EndpointController { - if client != nil && client.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.Core().RESTClient().GetRateLimiter()) + if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.CoreV1().RESTClient().GetRateLimiter()) } e := &EndpointController{ client: client, @@ -144,7 +144,7 @@ type EndpointController struct { workerLoopPeriod time.Duration } -// Runs e; will not return until stopCh is closed. workers determines how many +// Run will not return until stopCh is closed. workers determines how many // endpoints will be handled in parallel. func (e *EndpointController) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() @@ -215,6 +215,10 @@ func podToEndpointAddress(pod *v1.Pod) *v1.EndpointAddress { } func podChanged(oldPod, newPod *v1.Pod) bool { + // If the pod's deletion timestamp is set, remove endpoint from ready address. + if newPod.DeletionTimestamp != oldPod.DeletionTimestamp { + return true + } // If the pod's readiness has changed, the associated endpoint address // will move from the unready endpoints set to the ready endpoints. // So for the purposes of an endpoint, a readiness change on a pod @@ -395,7 +399,7 @@ func (e *EndpointController) syncService(key string) error { // service is deleted. However, if we're down at the time when // the service is deleted, we will miss that deletion, so this // doesn't completely solve the problem. See #6877. - err = e.client.Core().Endpoints(namespace).Delete(name, nil) + err = e.client.CoreV1().Endpoints(namespace).Delete(name, nil) if err != nil && !errors.IsNotFound(err) { return err } @@ -508,10 +512,10 @@ func (e *EndpointController) syncService(key string) error { glog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps) if createEndpoints { // No previous endpoints, create them - _, err = e.client.Core().Endpoints(service.Namespace).Create(newEndpoints) + _, err = e.client.CoreV1().Endpoints(service.Namespace).Create(newEndpoints) } else { // Pre-existing - _, err = e.client.Core().Endpoints(service.Namespace).Update(newEndpoints) + _, err = e.client.CoreV1().Endpoints(service.Namespace).Update(newEndpoints) } if err != nil { if createEndpoints && errors.IsForbidden(err) { diff --git a/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/BUILD index 767027910525..1ea321cce933 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/BUILD @@ -17,6 +17,7 @@ go_library( "patch.go", "uid_cache.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/garbagecollector", deps = [ "//pkg/controller:go_default_library", "//pkg/controller/garbagecollector/metaonly:go_default_library", @@ -49,10 +50,11 @@ go_library( go_test( name = "go_default_test", srcs = ["garbagecollector_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/garbagecollector", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core/install:go_default_library", "//pkg/controller/garbagecollector/metaonly:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -64,6 +66,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", + "//vendor/k8s.io/client-go/discovery:go_default_library", "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector.go b/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector.go index 88536408713e..dad94e6e97e6 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector.go @@ -171,17 +171,7 @@ func (gc *GarbageCollector) Sync(discoveryClient discovery.DiscoveryInterface, p oldResources := make(map[schema.GroupVersionResource]struct{}) wait.Until(func() { // Get the current resource list from discovery. - newResources, err := GetDeletableResources(discoveryClient) - if err != nil { - utilruntime.HandleError(err) - return - } - - // Detect first or abnormal sync and try again later. - if oldResources == nil || len(oldResources) == 0 { - oldResources = newResources - return - } + newResources := GetDeletableResources(discoveryClient) // Decide whether discovery has reported a change. if reflect.DeepEqual(oldResources, newResources) { @@ -189,9 +179,8 @@ func (gc *GarbageCollector) Sync(discoveryClient discovery.DiscoveryInterface, p return } - // Something has changed, so track the new state and perform a sync. + // Something has changed, time to sync. glog.V(2).Infof("syncing garbage collector with updated resources from discovery: %v", newResources) - oldResources = newResources // Ensure workers are paused to avoid processing events before informers // have resynced. @@ -216,9 +205,19 @@ func (gc *GarbageCollector) Sync(discoveryClient discovery.DiscoveryInterface, p utilruntime.HandleError(fmt.Errorf("failed to sync resource monitors: %v", err)) return } + // TODO: WaitForCacheSync can block forever during normal operation. Could + // pass a timeout channel, but we have to consider the implications of + // un-pausing the GC with a partially synced graph builder. if !controller.WaitForCacheSync("garbage collector", stopCh, gc.dependencyGraphBuilder.IsSynced) { utilruntime.HandleError(fmt.Errorf("timed out waiting for dependency graph builder sync during GC sync")) + return } + + // Finally, keep track of our new state. Do this after all preceding steps + // have succeeded to ensure we'll retry on subsequent syncs if an error + // occured. + oldResources = newResources + glog.V(2).Infof("synced garbage collector") }, period, stopCh) } @@ -563,14 +562,14 @@ func (gc *GarbageCollector) attemptToOrphanWorker() bool { err := gc.orphanDependents(owner.identity, dependents) if err != nil { - glog.V(5).Infof("orphanDependents for %s failed with %v", owner.identity, err) + utilruntime.HandleError(fmt.Errorf("orphanDependents for %s failed with %v", owner.identity, err)) gc.attemptToOrphan.AddRateLimited(item) return true } // update the owner, remove "orphaningFinalizer" from its finalizers list err = gc.removeFinalizer(owner, metav1.FinalizerOrphanDependents) if err != nil { - glog.V(5).Infof("removeOrphanFinalizer for %s failed with %v", owner.identity, err) + utilruntime.HandleError(fmt.Errorf("removeOrphanFinalizer for %s failed with %v", owner.identity, err)) gc.attemptToOrphan.AddRateLimited(item) } return true @@ -591,16 +590,38 @@ func (gc *GarbageCollector) GraphHasUID(UIDs []types.UID) bool { // GetDeletableResources returns all resources from discoveryClient that the // garbage collector should recognize and work with. More specifically, all -// preferred resources which support the 'delete' verb. -func GetDeletableResources(discoveryClient discovery.DiscoveryInterface) (map[schema.GroupVersionResource]struct{}, error) { +// preferred resources which support the 'delete', 'list', and 'watch' verbs. +// +// All discovery errors are considered temporary. Upon encountering any error, +// GetDeletableResources will log and return any discovered resources it was +// able to process (which may be none). +func GetDeletableResources(discoveryClient discovery.ServerResourcesInterface) map[schema.GroupVersionResource]struct{} { preferredResources, err := discoveryClient.ServerPreferredResources() if err != nil { - return nil, fmt.Errorf("failed to get supported resources from server: %v", err) + if discovery.IsGroupDiscoveryFailedError(err) { + glog.Warning("failed to discover some groups: %v", err.(*discovery.ErrGroupDiscoveryFailed).Groups) + } else { + glog.Warning("failed to discover preferred resources: %v", err) + } } - deletableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"delete"}}, preferredResources) - deletableGroupVersionResources, err := discovery.GroupVersionResources(deletableResources) - if err != nil { - return nil, fmt.Errorf("Failed to parse resources from server: %v", err) + if preferredResources == nil { + return map[schema.GroupVersionResource]struct{}{} + } + + // This is extracted from discovery.GroupVersionResources to allow tolerating + // failures on a per-resource basis. + deletableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"delete", "list", "watch"}}, preferredResources) + deletableGroupVersionResources := map[schema.GroupVersionResource]struct{}{} + for _, rl := range deletableResources { + gv, err := schema.ParseGroupVersion(rl.GroupVersion) + if err != nil { + glog.Warning("ignoring invalid discovered resource %q: %v", rl.GroupVersion, err) + continue + } + for i := range rl.APIResources { + deletableGroupVersionResources[schema.GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: rl.APIResources[i].Name}] = struct{}{} + } } - return deletableGroupVersionResources, nil + + return deletableGroupVersionResources } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/graph_builder.go b/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/graph_builder.go index c19305312e9c..03a96503a061 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/graph_builder.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/graph_builder.go @@ -230,7 +230,7 @@ func (gb *GraphBuilder) syncMonitors(resources map[schema.GroupVersionResource]s kept := 0 added := 0 for resource := range resources { - if _, ok := ignoredResources[resource.GroupResource()]; ok { + if _, ok := gb.ignoredResources[resource.GroupResource()]; ok { continue } if m, ok := toRemove[resource]; ok { @@ -342,6 +342,9 @@ func (gb *GraphBuilder) Run(stopCh <-chan struct{}) { close(monitor.stopCh) } } + + // reset monitors so that the graph builder can be safely re-run/synced. + gb.monitors = nil glog.Infof("stopped %d of %d monitors", stopped, len(monitors)) } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/BUILD index f012b59743c2..157f4ba9a146 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/BUILD @@ -13,10 +13,10 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", @@ -26,9 +26,10 @@ go_library( go_test( name = "go_default_test", srcs = ["metaonly_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly", library = ":go_default_library", deps = [ - "//pkg/api/install:go_default_library", + "//pkg/apis/core/install:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly.go b/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly.go index 5f1db87d3974..aec98ba33560 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" ) type metaOnlyJSONScheme struct{} @@ -41,10 +41,10 @@ func gvkToMetadataOnlyObject(gvk schema.GroupVersionKind) runtime.Object { } func NewMetadataCodecFactory() serializer.CodecFactory { - // populating another scheme from api.Scheme, registering every kind with + // populating another scheme from legacyscheme.Scheme, registering every kind with // MetadataOnlyObject (or MetadataOnlyObjectList). scheme := runtime.NewScheme() - allTypes := api.Scheme.AllKnownTypes() + allTypes := legacyscheme.Scheme.AllKnownTypes() for kind := range allTypes { if kind.Version == runtime.APIVersionInternal { continue diff --git a/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/zz_generated.deepcopy.go index 70e2946939a5..9c766f26307c 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/zz_generated.deepcopy.go @@ -21,27 +21,9 @@ limitations under the License. package metaonly import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetadataOnlyObject).DeepCopyInto(out.(*MetadataOnlyObject)) - return nil - }, InType: reflect.TypeOf(&MetadataOnlyObject{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetadataOnlyObjectList).DeepCopyInto(out.(*MetadataOnlyObjectList)) - return nil - }, InType: reflect.TypeOf(&MetadataOnlyObjectList{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MetadataOnlyObject) DeepCopyInto(out *MetadataOnlyObject) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/controller/history/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/history/BUILD index 39ade7b69459..5a5f4ebae9c3 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/history/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/history/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["controller_history_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/history", library = ":go_default_library", deps = [ "//pkg/api/testapi:go_default_library", @@ -31,6 +32,7 @@ go_test( go_library( name = "go_default_library", srcs = ["controller_history.go"], + importpath = "k8s.io/kubernetes/pkg/controller/history", deps = [ "//pkg/util/hash:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/job/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/job/BUILD index 39257c81e05f..a5ab7ba7aea7 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/job/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/job/BUILD @@ -13,6 +13,7 @@ go_library( "job_controller.go", "utils.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/job", deps = [ "//pkg/controller:go_default_library", "//pkg/util/metrics:go_default_library", @@ -44,10 +45,11 @@ go_test( "job_controller_test.go", "utils_test.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/job", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core/install:go_default_library", "//pkg/controller:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/job/job_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/job/job_controller.go index e31f8394366a..a12cbed1c46b 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/job/job_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/job/job_controller.go @@ -770,7 +770,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b } func (jm *JobController) updateJobStatus(job *batch.Job) error { - _, err := jm.kubeClient.Batch().Jobs(job.Namespace).UpdateStatus(job) + _, err := jm.kubeClient.BatchV1().Jobs(job.Namespace).UpdateStatus(job) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/namespace/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/namespace/BUILD index 1ca4e65349f0..015813897e84 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/namespace/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/namespace/BUILD @@ -11,6 +11,7 @@ go_library( "doc.go", "namespace_controller.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/namespace", deps = [ "//pkg/controller:go_default_library", "//pkg/controller/namespace/deletion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/namespace/deletion/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/namespace/deletion/BUILD index fc6e9641018b..3ff183f2e42c 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/namespace/deletion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/namespace/deletion/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["namespaced_resources_deleter.go"], + importpath = "k8s.io/kubernetes/pkg/controller/namespace/deletion", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -28,9 +29,11 @@ go_library( go_test( name = "go_default_test", srcs = ["namespaced_resources_deleter_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/namespace/deletion", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller.go index c3bdd28be7a1..32a0bb33fd9a 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/namespace/namespace_controller.go @@ -72,11 +72,11 @@ func NewNamespaceController( // create the controller so we can inject the enqueue function namespaceController := &NamespaceController{ queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"), - namespacedResourcesDeleter: deletion.NewNamespacedResourcesDeleter(kubeClient.Core().Namespaces(), clientPool, kubeClient.Core(), discoverResourcesFn, finalizerToken, true), + namespacedResourcesDeleter: deletion.NewNamespacedResourcesDeleter(kubeClient.CoreV1().Namespaces(), clientPool, kubeClient.CoreV1(), discoverResourcesFn, finalizerToken, true), } - if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("namespace_controller", kubeClient.Core().RESTClient().GetRateLimiter()) + if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("namespace_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()) } // configure the namespace informer event handlers diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/node/BUILD index 400a67b930f5..7ed48e2dc620 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["nodecontroller_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/node", library = ":go_default_library", deps = [ "//pkg/cloudprovider:go_default_library", @@ -46,6 +47,7 @@ go_library( "metrics.go", "node_controller.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/node", deps = [ "//pkg/api/v1/node:go_default_library", "//pkg/cloudprovider:go_default_library", @@ -58,7 +60,6 @@ go_library( "//pkg/util/node:go_default_library", "//pkg/util/system:go_default_library", "//pkg/util/taints:go_default_library", - "//pkg/util/version:go_default_library", "//plugin/pkg/scheduler/algorithm:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/OWNERS b/vendor/k8s.io/kubernetes/pkg/controller/node/OWNERS index b94f08c6b3e5..99dd2eda0e10 100755 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/OWNERS @@ -6,3 +6,4 @@ reviewers: - smarterclayton - ingvagabund - aveshagarwal +- k82cn diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/BUILD index f2319adf8ad6..e72f938e9411 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/BUILD @@ -13,14 +13,18 @@ go_test( "range_allocator_test.go", "timeout_test.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/node/ipam", library = ":go_default_library", deps = [ + "//pkg/controller:go_default_library", "//pkg/controller/node/ipam/cidrset:go_default_library", "//pkg/controller/node/ipam/test:go_default_library", "//pkg/controller/testutil:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/informers:go_default_library", + "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", ], ) @@ -36,9 +40,12 @@ go_library( "range_allocator.go", "timeout.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/node/ipam", deps = [ + "//pkg/api/v1/node:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/gce:go_default_library", + "//pkg/controller:go_default_library", "//pkg/controller/node/ipam/cidrset:go_default_library", "//pkg/controller/node/ipam/sync:go_default_library", "//pkg/controller/node/util:go_default_library", @@ -50,12 +57,14 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/adapter.go b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/adapter.go index 00a91535c8e9..6a5d9e480b0d 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/adapter.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/adapter.go @@ -52,7 +52,7 @@ func newAdapter(k8s clientset.Interface, cloud *gce.GCECloud) *adapter { ret.recorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloudCIDRAllocator"}) glog.V(0).Infof("Sending events to api server.") broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{ - Interface: v1core.New(k8s.Core().RESTClient()).Events(""), + Interface: v1core.New(k8s.CoreV1().RESTClient()).Events(""), }) return ret @@ -86,7 +86,7 @@ func (a *adapter) AddAlias(ctx context.Context, nodeName string, cidrRange *net. } func (a *adapter) Node(ctx context.Context, name string) (*v1.Node, error) { - return a.k8s.Core().Nodes().Get(name, metav1.GetOptions{}) + return a.k8s.CoreV1().Nodes().Get(name, metav1.GetOptions{}) } func (a *adapter) UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRange *net.IPNet) error { @@ -101,7 +101,7 @@ func (a *adapter) UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRang return err } - _, err = a.k8s.Core().Nodes().Patch(node.Name, types.StrategicMergePatchType, bytes) + _, err = a.k8s.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, bytes) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/cidr_allocator.go b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/cidr_allocator.go index 6fa8a684e060..b9a97938ad31 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/cidr_allocator.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/cidr_allocator.go @@ -80,12 +80,12 @@ type CIDRAllocator interface { AllocateOrOccupyCIDR(node *v1.Node) error // ReleaseCIDR releases the CIDR of the removed node ReleaseCIDR(node *v1.Node) error - // Register allocator with the nodeInformer for updates. - Register(nodeInformer informers.NodeInformer) + // Run starts all the working logic of the allocator. + Run(stopCh <-chan struct{}) } // New creates a new CIDR range allocator. -func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, allocatorType CIDRAllocatorType, clusterCIDR, serviceCIDR *net.IPNet, nodeCIDRMaskSize int) (CIDRAllocator, error) { +func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, allocatorType CIDRAllocatorType, clusterCIDR, serviceCIDR *net.IPNet, nodeCIDRMaskSize int) (CIDRAllocator, error) { nodeList, err := listNodes(kubeClient) if err != nil { return nil, err @@ -93,9 +93,9 @@ func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, allocato switch allocatorType { case RangeAllocatorType: - return NewCIDRRangeAllocator(kubeClient, clusterCIDR, serviceCIDR, nodeCIDRMaskSize, nodeList) + return NewCIDRRangeAllocator(kubeClient, nodeInformer, clusterCIDR, serviceCIDR, nodeCIDRMaskSize, nodeList) case CloudAllocatorType: - return NewCloudCIDRAllocator(kubeClient, cloud) + return NewCloudCIDRAllocator(kubeClient, cloud, nodeInformer) default: return nil, fmt.Errorf("Invalid CIDR allocator type: %v", allocatorType) } @@ -107,7 +107,7 @@ func listNodes(kubeClient clientset.Interface) (*v1.NodeList, error) { // controller manager to restart. if pollErr := wait.Poll(10*time.Second, apiserverStartupGracePeriod, func() (bool, error) { var err error - nodeList, err = kubeClient.Core().Nodes().List(metav1.ListOptions{ + nodeList, err = kubeClient.CoreV1().Nodes().List(metav1.ListOptions{ FieldSelector: fields.Everything().String(), LabelSelector: labels.Everything().String(), }) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/cidrset/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/cidrset/BUILD index fdad775f2d92..5ea716c59ff1 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/cidrset/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/cidrset/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["cidr_set_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/node/ipam/cidrset", library = ":go_default_library", deps = ["//vendor/github.com/golang/glog:go_default_library"], ) @@ -16,6 +17,7 @@ go_test( go_library( name = "go_default_library", srcs = ["cidr_set.go"], + importpath = "k8s.io/kubernetes/pkg/controller/node/ipam/cidrset", ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/cidrset/cidr_set.go b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/cidrset/cidr_set.go index a83f73a293db..38a0521d2dd0 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/cidrset/cidr_set.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/cidrset/cidr_set.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "math/big" + "math/bits" "net" "sync" ) @@ -43,35 +44,38 @@ const ( // TODO: https://github.com/kubernetes/kubernetes/issues/44918 // clusterSubnetMaxDiff limited to 16 due to the uncompressed bitmap clusterSubnetMaxDiff = 16 - // maximum 64 bits of prefix - maxPrefixLength = 64 + // halfIPv6Len is the half of the IPv6 length + halfIPv6Len = net.IPv6len / 2 ) var ( - // ErrCIDRRangeNoCIDRsRemaining occurs when there are no more space + // ErrCIDRRangeNoCIDRsRemaining occurs when there is no more space // to allocate CIDR ranges. ErrCIDRRangeNoCIDRsRemaining = errors.New( "CIDR allocation failed; there are no remaining CIDRs left to allocate in the accepted range") + // ErrCIDRSetSubNetTooBig occurs when the subnet mask size is too + // big compared to the CIDR mask size. + ErrCIDRSetSubNetTooBig = errors.New( + "New CIDR set failed; the node CIDR size is too big") ) // NewCIDRSet creates a new CidrSet. -func NewCIDRSet(clusterCIDR *net.IPNet, subNetMaskSize int) *CidrSet { +func NewCIDRSet(clusterCIDR *net.IPNet, subNetMaskSize int) (*CidrSet, error) { clusterMask := clusterCIDR.Mask clusterMaskSize, _ := clusterMask.Size() var maxCIDRs int - if ((clusterCIDR.IP.To4() == nil) && (subNetMaskSize-clusterMaskSize > clusterSubnetMaxDiff)) || (subNetMaskSize > maxPrefixLength) { - maxCIDRs = 0 - } else { - maxCIDRs = 1 << uint32(subNetMaskSize-clusterMaskSize) + if (clusterCIDR.IP.To4() == nil) && (subNetMaskSize-clusterMaskSize > clusterSubnetMaxDiff) { + return nil, ErrCIDRSetSubNetTooBig } + maxCIDRs = 1 << uint32(subNetMaskSize-clusterMaskSize) return &CidrSet{ clusterCIDR: clusterCIDR, clusterIP: clusterCIDR.IP, clusterMaskSize: clusterMaskSize, maxCIDRs: maxCIDRs, subNetMaskSize: subNetMaskSize, - } + }, nil } func (s *CidrSet) indexToCIDRBlock(index int) *net.IPNet { @@ -89,10 +93,35 @@ func (s *CidrSet) indexToCIDRBlock(index int) *net.IPNet { } case s.clusterIP.To16() != nil: { - j := uint64(index) << uint64(64-s.subNetMaskSize) - ipInt := (binary.BigEndian.Uint64(s.clusterIP)) | j - ip = make([]byte, 16) - binary.BigEndian.PutUint64(ip, ipInt) + // leftClusterIP | rightClusterIP + // 2001:0DB8:1234:0000:0000:0000:0000:0000 + const v6NBits = 128 + const halfV6NBits = v6NBits / 2 + leftClusterIP := binary.BigEndian.Uint64(s.clusterIP[:halfIPv6Len]) + rightClusterIP := binary.BigEndian.Uint64(s.clusterIP[halfIPv6Len:]) + + leftIP, rightIP := make([]byte, halfIPv6Len), make([]byte, halfIPv6Len) + + if s.subNetMaskSize <= halfV6NBits { + // We only care about left side IP + leftClusterIP |= uint64(index) << uint(halfV6NBits-s.subNetMaskSize) + } else { + if s.clusterMaskSize < halfV6NBits { + // see how many bits are needed to reach the left side + btl := uint(s.subNetMaskSize - halfV6NBits) + indexMaxBit := uint(64 - bits.LeadingZeros64(uint64(index))) + if indexMaxBit > btl { + leftClusterIP |= uint64(index) >> btl + } + } + // the right side will be calculated the same way either the + // subNetMaskSize affects both left and right sides + rightClusterIP |= uint64(index) << uint(v6NBits-s.subNetMaskSize) + } + binary.BigEndian.PutUint64(leftIP, leftClusterIP) + binary.BigEndian.PutUint64(rightIP, rightClusterIP) + + ip = append(leftIP, rightIP...) mask = 128 } } @@ -159,8 +188,12 @@ func (s *CidrSet) getBeginingAndEndIndices(cidr *net.IPNet) (begin, end int, err ipInt := binary.BigEndian.Uint32(cidr.IP) | (^binary.BigEndian.Uint32(cidr.Mask)) binary.BigEndian.PutUint32(ip, ipInt) } else { - ipInt := binary.BigEndian.Uint64(cidr.IP) | (^binary.BigEndian.Uint64(cidr.Mask)) - binary.BigEndian.PutUint64(ip, ipInt) + // ipIntLeft | ipIntRight + // 2001:0DB8:1234:0000:0000:0000:0000:0000 + ipIntLeft := binary.BigEndian.Uint64(cidr.IP[:net.IPv6len/2]) | (^binary.BigEndian.Uint64(cidr.Mask[:net.IPv6len/2])) + ipIntRight := binary.BigEndian.Uint64(cidr.IP[net.IPv6len/2:]) | (^binary.BigEndian.Uint64(cidr.Mask[net.IPv6len/2:])) + binary.BigEndian.PutUint64(ip[:net.IPv6len/2], ipIntLeft) + binary.BigEndian.PutUint64(ip[net.IPv6len/2:], ipIntRight) } end, err = s.getIndexForCIDR(&net.IPNet{ IP: net.IP(ip).Mask(subNetMask), @@ -217,7 +250,10 @@ func (s *CidrSet) getIndexForIP(ip net.IP) (int, error) { return int(cidrIndex), nil } if ip.To16() != nil { - cidrIndex := (binary.BigEndian.Uint64(s.clusterIP) ^ binary.BigEndian.Uint64(ip.To16())) >> uint64(64-s.subNetMaskSize) + bigIP := big.NewInt(0).SetBytes(s.clusterIP) + bigIP = bigIP.Xor(bigIP, big.NewInt(0).SetBytes(ip)) + cidrIndexBig := bigIP.Rsh(bigIP, uint(net.IPv6len*8-s.subNetMaskSize)) + cidrIndex := cidrIndexBig.Uint64() if cidrIndex >= uint64(s.maxCIDRs) { return 0, fmt.Errorf("CIDR: %v/%v is out of the range of CIDR allocator", ip, s.subNetMaskSize) } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/cloud_cidr_allocator.go b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/cloud_cidr_allocator.go index 5403ebb91a13..98b65b22e674 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/cloud_cidr_allocator.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/cloud_cidr_allocator.go @@ -25,9 +25,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" informers "k8s.io/client-go/informers/core/v1" + corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" @@ -35,10 +36,12 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" + v1node "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" + "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/node/util" - nodeutil "k8s.io/kubernetes/pkg/util/node" + utilnode "k8s.io/kubernetes/pkg/util/node" ) // cloudCIDRAllocator allocates node CIDRs according to IP address aliases @@ -49,10 +52,18 @@ type cloudCIDRAllocator struct { client clientset.Interface cloud *gce.GCECloud - // Channel that is used to pass updating Nodes with assigned CIDRs to the background - // This increases a throughput of CIDR assignment by not blocking on long operations. - nodeCIDRUpdateChannel chan nodeAndCIDR - recorder record.EventRecorder + // nodeLister is able to list/get nodes and is populated by the shared informer passed to + // NewCloudCIDRAllocator. + nodeLister corelisters.NodeLister + // nodesSynced returns true if the node shared informer has been synced at least once. + nodesSynced cache.InformerSynced + + // Channel that is used to pass updating Nodes to the background. + // This increases the throughput of CIDR assignment by parallelization + // and not blocking on long operations (which shouldn't be done from + // event handlers anyway). + nodeUpdateChannel chan string + recorder record.EventRecorder // Keep a set of nodes that are currectly being processed to avoid races in CIDR allocation lock sync.Mutex @@ -62,7 +73,7 @@ type cloudCIDRAllocator struct { var _ CIDRAllocator = (*cloudCIDRAllocator)(nil) // NewCloudCIDRAllocator creates a new cloud CIDR allocator. -func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Interface) (CIDRAllocator, error) { +func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer) (CIDRAllocator, error) { if client == nil { glog.Fatalf("kubeClient is nil when starting NodeController") } @@ -71,7 +82,7 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"}) eventBroadcaster.StartLogging(glog.Infof) glog.V(0).Infof("Sending events to api server.") - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.Core().RESTClient()).Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.CoreV1().RESTClient()).Events("")}) gceCloud, ok := cloud.(*gce.GCECloud) if !ok { @@ -80,31 +91,65 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter } ca := &cloudCIDRAllocator{ - client: client, - cloud: gceCloud, - nodeCIDRUpdateChannel: make(chan nodeAndCIDR, cidrUpdateQueueSize), - recorder: recorder, - nodesInProcessing: sets.NewString(), + client: client, + cloud: gceCloud, + nodeLister: nodeInformer.Lister(), + nodesSynced: nodeInformer.Informer().HasSynced, + nodeUpdateChannel: make(chan string, cidrUpdateQueueSize), + recorder: recorder, + nodesInProcessing: sets.NewString(), } - for i := 0; i < cidrUpdateWorkers; i++ { - // TODO: Take stopChan as an argument to NewCloudCIDRAllocator and pass it to the worker. - go ca.worker(wait.NeverStop) - } + nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: util.CreateAddNodeHandler(ca.AllocateOrOccupyCIDR), + UpdateFunc: util.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { + if newNode.Spec.PodCIDR == "" { + return ca.AllocateOrOccupyCIDR(newNode) + } + // Even if PodCIDR is assigned, but NetworkUnavailable condition is + // set to true, we need to process the node to set the condition. + _, cond := v1node.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable) + if cond == nil || cond.Status != v1.ConditionFalse { + return ca.AllocateOrOccupyCIDR(newNode) + } + return nil + }), + DeleteFunc: util.CreateDeleteNodeHandler(ca.ReleaseCIDR), + }) glog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName()) return ca, nil } +func (ca *cloudCIDRAllocator) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + glog.Infof("Starting cloud CIDR allocator") + defer glog.Infof("Shutting down cloud CIDR allocator") + + if !controller.WaitForCacheSync("cidrallocator", stopCh, ca.nodesSynced) { + return + } + + for i := 0; i < cidrUpdateWorkers; i++ { + go ca.worker(stopCh) + } + + <-stopCh +} + func (ca *cloudCIDRAllocator) worker(stopChan <-chan struct{}) { for { select { - case workItem, ok := <-ca.nodeCIDRUpdateChannel: + case workItem, ok := <-ca.nodeUpdateChannel: if !ok { glog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed") return } - ca.updateCIDRAllocation(workItem) + if err := ca.updateCIDRAllocation(workItem); err != nil { + // Requeue the failed node for update again. + ca.nodeUpdateChannel <- workItem + } case <-stopChan: return } @@ -138,14 +183,24 @@ func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(node *v1.Node) error { glog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name) return nil } - cidrs, err := ca.cloud.AliasRanges(types.NodeName(node.Name)) + + glog.V(4).Infof("Putting node %s into the work queue", node.Name) + ca.nodeUpdateChannel <- node.Name + return nil +} + +// updateCIDRAllocation assigns CIDR to Node and sends an update to the API server. +func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error { + var err error + var node *v1.Node + defer ca.removeNodeFromProcessing(nodeName) + + cidrs, err := ca.cloud.AliasRanges(types.NodeName(nodeName)) if err != nil { - ca.removeNodeFromProcessing(node.Name) util.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable") return fmt.Errorf("failed to allocate cidr: %v", err) } if len(cidrs) == 0 { - ca.removeNodeFromProcessing(node.Name) util.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable") return fmt.Errorf("failed to allocate cidr: Node %v has no CIDRs", node.Name) } @@ -153,32 +208,19 @@ func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(node *v1.Node) error { if err != nil { return fmt.Errorf("failed to parse string '%s' as a CIDR: %v", cidrs[0], err) } + podCIDR := cidr.String() - glog.V(4).Infof("Putting node %s with CIDR %s into the work queue", node.Name, cidrs[0]) - ca.nodeCIDRUpdateChannel <- nodeAndCIDR{ - nodeName: node.Name, - cidr: cidr, - } - return nil -} - -// updateCIDRAllocation assigns CIDR to Node and sends an update to the API server. -func (ca *cloudCIDRAllocator) updateCIDRAllocation(data nodeAndCIDR) error { - var err error - var node *v1.Node - defer ca.removeNodeFromProcessing(data.nodeName) - podCIDR := data.cidr.String() for rep := 0; rep < cidrUpdateRetries; rep++ { - // TODO: change it to using PATCH instead of full Node updates. - node, err = ca.client.Core().Nodes().Get(data.nodeName, metav1.GetOptions{}) + node, err = ca.nodeLister.Get(nodeName) if err != nil { - glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", data.nodeName, err) + glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", nodeName, err) continue } if node.Spec.PodCIDR != "" { if node.Spec.PodCIDR == podCIDR { glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR) - return nil + // We don't return to set the NetworkUnavailable condition if needed. + break } glog.Errorf("PodCIDR being reassigned! Node %v spec has %v, but cloud provider has assigned %v", node.Name, node.Spec.PodCIDR, podCIDR) @@ -188,8 +230,7 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(data nodeAndCIDR) error { // // See https://github.com/kubernetes/kubernetes/pull/42147#discussion_r103357248 } - node.Spec.PodCIDR = podCIDR - if _, err = ca.client.Core().Nodes().Update(node); err == nil { + if err = utilnode.PatchNodeCIDR(ca.client, types.NodeName(node.Name), podCIDR); err == nil { glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR) break } @@ -197,11 +238,11 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(data nodeAndCIDR) error { } if err != nil { util.RecordNodeStatusChange(ca.recorder, node, "CIDRAssignmentFailed") - glog.Errorf("CIDR assignment for node %v failed: %v.", data.nodeName, err) + glog.Errorf("CIDR assignment for node %v failed: %v.", nodeName, err) return err } - err = nodeutil.SetNodeCondition(ca.client, types.NodeName(node.Name), v1.NodeCondition{ + err = utilnode.SetNodeCondition(ca.client, types.NodeName(node.Name), v1.NodeCondition{ Type: v1.NodeNetworkUnavailable, Status: v1.ConditionFalse, Reason: "RouteCreated", @@ -219,16 +260,3 @@ func (ca *cloudCIDRAllocator) ReleaseCIDR(node *v1.Node) error { node.Name, node.Spec.PodCIDR) return nil } - -func (ca *cloudCIDRAllocator) Register(nodeInformer informers.NodeInformer) { - nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: util.CreateAddNodeHandler(ca.AllocateOrOccupyCIDR), - UpdateFunc: util.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { - if newNode.Spec.PodCIDR == "" { - return ca.AllocateOrOccupyCIDR(newNode) - } - return nil - }), - DeleteFunc: util.CreateDeleteNodeHandler(ca.ReleaseCIDR), - }) -} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/controller.go b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/controller.go index f8a2375678f8..4b1221b6781b 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/controller.go @@ -76,11 +76,16 @@ func NewController( return nil, fmt.Errorf("cloud IPAM controller does not support %q provider", cloud.ProviderName()) } + set, err := cidrset.NewCIDRSet(clusterCIDR, nodeCIDRMaskSize) + if err != nil { + return nil, err + } + c := &Controller{ config: config, adapter: newAdapter(kubeClient, gceCloud), syncers: make(map[string]*nodesync.NodeSync), - set: cidrset.NewCIDRSet(clusterCIDR, nodeCIDRMaskSize), + set: set, } if err := occupyServiceCIDR(c.set, clusterCIDR, serviceCIDR); err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/range_allocator.go b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/range_allocator.go index cacc47a86174..d3037b1d1d11 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/range_allocator.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/range_allocator.go @@ -25,18 +25,20 @@ import ( "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" informers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" + corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" - + "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/node/ipam/cidrset" "k8s.io/kubernetes/pkg/controller/node/util" + nodeutil "k8s.io/kubernetes/pkg/util/node" ) type rangeAllocator struct { @@ -45,6 +47,12 @@ type rangeAllocator struct { clusterCIDR *net.IPNet maxCIDRs int + // nodeLister is able to list/get nodes and is populated by the shared informer passed to + // NewCloudCIDRAllocator. + nodeLister corelisters.NodeLister + // nodesSynced returns true if the node shared informer has been synced at least once. + nodesSynced cache.InformerSynced + // Channel that is used to pass updating Nodes with assigned CIDRs to the background // This increases a throughput of CIDR assignment by not blocking on long operations. nodeCIDRUpdateChannel chan nodeAndCIDR @@ -59,7 +67,7 @@ type rangeAllocator struct { // Caller must ensure subNetMaskSize is not less than cluster CIDR mask size. // Caller must always pass in a list of existing nodes so the new allocator // can initialize its CIDR map. NodeList is only nil in testing. -func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) { +func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.NodeInformer, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) { if client == nil { glog.Fatalf("kubeClient is nil when starting NodeController") } @@ -68,12 +76,18 @@ func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, s recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"}) eventBroadcaster.StartLogging(glog.Infof) glog.V(0).Infof("Sending events to api server.") - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.Core().RESTClient()).Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.CoreV1().RESTClient()).Events("")}) + set, err := cidrset.NewCIDRSet(clusterCIDR, subNetMaskSize) + if err != nil { + return nil, err + } ra := &rangeAllocator{ client: client, - cidrs: cidrset.NewCIDRSet(clusterCIDR, subNetMaskSize), + cidrs: set, clusterCIDR: clusterCIDR, + nodeLister: nodeInformer.Lister(), + nodesSynced: nodeInformer.Informer().HasSynced, nodeCIDRUpdateChannel: make(chan nodeAndCIDR, cidrUpdateQueueSize), recorder: recorder, nodesInProcessing: sets.NewString(), @@ -103,12 +117,55 @@ func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, s } } } + + nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: util.CreateAddNodeHandler(ra.AllocateOrOccupyCIDR), + UpdateFunc: util.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { + // If the PodCIDR is not empty we either: + // - already processed a Node that already had a CIDR after NC restarted + // (cidr is marked as used), + // - already processed a Node successfully and allocated a CIDR for it + // (cidr is marked as used), + // - already processed a Node but we did saw a "timeout" response and + // request eventually got through in this case we haven't released + // the allocated CIDR (cidr is still marked as used). + // There's a possible error here: + // - NC sees a new Node and assigns a CIDR X to it, + // - Update Node call fails with a timeout, + // - Node is updated by some other component, NC sees an update and + // assigns CIDR Y to the Node, + // - Both CIDR X and CIDR Y are marked as used in the local cache, + // even though Node sees only CIDR Y + // The problem here is that in in-memory cache we see CIDR X as marked, + // which prevents it from being assigned to any new node. The cluster + // state is correct. + // Restart of NC fixes the issue. + if newNode.Spec.PodCIDR == "" { + return ra.AllocateOrOccupyCIDR(newNode) + } + return nil + }), + DeleteFunc: util.CreateDeleteNodeHandler(ra.ReleaseCIDR), + }) + + return ra, nil +} + +func (r *rangeAllocator) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + glog.Infof("Starting range CIDR allocator") + defer glog.Infof("Shutting down range CIDR allocator") + + if !controller.WaitForCacheSync("cidrallocator", stopCh, r.nodesSynced) { + return + } + for i := 0; i < cidrUpdateWorkers; i++ { - // TODO: Take stopChan as an argument to NewCIDRRangeAllocator and pass it to the worker. - go ra.worker(wait.NeverStop) + go r.worker(stopCh) } - return ra, nil + <-stopCh } func (r *rangeAllocator) worker(stopChan <-chan struct{}) { @@ -119,7 +176,10 @@ func (r *rangeAllocator) worker(stopChan <-chan struct{}) { glog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed") return } - r.updateCIDRAllocation(workItem) + if err := r.updateCIDRAllocation(workItem); err != nil { + // Requeue the failed node for update again. + r.nodeCIDRUpdateChannel <- workItem + } case <-stopChan: return } @@ -227,8 +287,7 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error { podCIDR := data.cidr.String() for rep := 0; rep < cidrUpdateRetries; rep++ { - // TODO: change it to using PATCH instead of full Node updates. - node, err = r.client.Core().Nodes().Get(data.nodeName, metav1.GetOptions{}) + node, err = r.nodeLister.Get(data.nodeName) if err != nil { glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", data.nodeName, err) continue @@ -244,8 +303,7 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error { } return nil } - node.Spec.PodCIDR = podCIDR - if _, err = r.client.Core().Nodes().Update(node); err == nil { + if err = nodeutil.PatchNodeCIDR(r.client, types.NodeName(node.Name), podCIDR); err == nil { glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR) break } @@ -265,35 +323,3 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error { } return err } - -func (r *rangeAllocator) Register(nodeInformer informers.NodeInformer) { - nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: util.CreateAddNodeHandler(r.AllocateOrOccupyCIDR), - UpdateFunc: util.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { - // If the PodCIDR is not empty we either: - // - already processed a Node that already had a CIDR after NC restarted - // (cidr is marked as used), - // - already processed a Node successfully and allocated a CIDR for it - // (cidr is marked as used), - // - already processed a Node but we did saw a "timeout" response and - // request eventually got through in this case we haven't released - // the allocated CIDR (cidr is still marked as used). - // There's a possible error here: - // - NC sees a new Node and assigns a CIDR X to it, - // - Update Node call fails with a timeout, - // - Node is updated by some other component, NC sees an update and - // assigns CIDR Y to the Node, - // - Both CIDR X and CIDR Y are marked as used in the local cache, - // even though Node sees only CIDR Y - // The problem here is that in in-memory cache we see CIDR X as marked, - // which prevents it from being assigned to any new node. The cluster - // state is correct. - // Restart of NC fixes the issue. - if newNode.Spec.PodCIDR == "" { - return r.AllocateOrOccupyCIDR(newNode) - } - return nil - }), - DeleteFunc: util.CreateDeleteNodeHandler(r.ReleaseCIDR), - }) -} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/sync/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/sync/BUILD index 30433dcaed12..bc34f435e843 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/sync/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/sync/BUILD @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["sync.go"], + importpath = "k8s.io/kubernetes/pkg/controller/node/ipam/sync", visibility = ["//visibility:public"], deps = [ "//pkg/controller/node/ipam/cidrset:go_default_library", @@ -14,6 +15,7 @@ go_library( go_test( name = "go_default_test", srcs = ["sync_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/node/ipam/sync", library = ":go_default_library", deps = [ "//pkg/controller/node/ipam/cidrset:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/sync/sync.go b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/sync/sync.go index 60c9d59b6b1b..4995f4255436 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/sync/sync.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/ipam/sync/sync.go @@ -56,7 +56,7 @@ type kubeAPI interface { UpdateNodePodCIDR(ctx context.Context, node *v1.Node, cidrRange *net.IPNet) error // UpdateNodeNetworkUnavailable updates the network unavailable status for the node. UpdateNodeNetworkUnavailable(nodeName string, unavailable bool) error - // EmitNodeEvent emits an event for the given node. + // EmitNodeWarningEvent emits an event for the given node. EmitNodeWarningEvent(nodeName, reason, fmt string, args ...interface{}) } @@ -263,11 +263,6 @@ func (op *updateOp) updateNodeFromAlias(ctx context.Context, sync *NodeSync, nod glog.V(2).Infof("Node %q PodCIDR set to %v", node.Name, aliasRange) - if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil { - glog.Errorf("Error setting route status for node %q: %v", node.Name, err) - return err - } - if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil { glog.Errorf("Could not update node NetworkUnavailable status to false: %v", err) return err diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/node_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/node/node_controller.go index 4b48ea26ed1e..ff77e6c7d5cd 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/node_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/node_controller.go @@ -55,7 +55,6 @@ import ( utilnode "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/util/system" taintutils "k8s.io/kubernetes/pkg/util/taints" - utilversion "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" ) @@ -65,7 +64,6 @@ func init() { } var ( - gracefulDeletionVersion = utilversion.MustParseSemantic("v1.1.0") // UnreachableTaintTemplate is the taint for when a node becomes unreachable. UnreachableTaintTemplate = &v1.Taint{ Key: algorithm.TaintNodeUnreachable, @@ -186,7 +184,6 @@ type Controller struct { cidrAllocator ipam.CIDRAllocator taintManager *scheduler.NoExecuteTaintManager - forcefullyDeletePod func(*v1.Pod) error nodeExistsInCloudProvider func(types.NodeName) (bool, error) computeZoneStateFunc func(nodeConditions []*v1.NodeCondition) (int, ZoneState) enterPartialDisruptionFunc func(nodeNum int) float32 @@ -243,17 +240,17 @@ func NewNodeController( } eventBroadcaster := record.NewBroadcaster() - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "controllermanager"}) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "node-controller"}) eventBroadcaster.StartLogging(glog.Infof) glog.V(0).Infof("Sending events to api server.") eventBroadcaster.StartRecordingToSink( &v1core.EventSinkImpl{ - Interface: v1core.New(kubeClient.Core().RESTClient()).Events(""), + Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events(""), }) - if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("node_controller", kubeClient.Core().RESTClient().GetRateLimiter()) + if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("node_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()) } if allocateNodeCIDRs { @@ -285,9 +282,6 @@ func NewNodeController( serviceCIDR: serviceCIDR, allocateNodeCIDRs: allocateNodeCIDRs, allocatorType: allocatorType, - forcefullyDeletePod: func(p *v1.Pod) error { - return util.ForcefullyDeletePod(kubeClient, p) - }, nodeExistsInCloudProvider: func(nodeName types.NodeName) (bool, error) { return util.NodeExistsInCloudProvider(cloud, nodeName) }, @@ -298,6 +292,7 @@ func NewNodeController( zoneStates: make(map[string]ZoneState), runTaintManager: runTaintManager, useTaintBasedEvictions: useTaintBasedEvictions && runTaintManager, + taintNodeByCondition: taintNodeByCondition, } if useTaintBasedEvictions { glog.Infof("Controller is using taint based evictions.") @@ -308,14 +303,12 @@ func NewNodeController( podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - nc.maybeDeleteTerminatingPod(obj) pod := obj.(*v1.Pod) if nc.taintManager != nil { nc.taintManager.PodUpdated(nil, pod) } }, UpdateFunc: func(prev, obj interface{}) { - nc.maybeDeleteTerminatingPod(obj) prevPod := prev.(*v1.Pod) newPod := obj.(*v1.Pod) if nc.taintManager != nil { @@ -367,11 +360,10 @@ func NewNodeController( } else { var err error nc.cidrAllocator, err = ipam.New( - kubeClient, cloud, nc.allocatorType, nc.clusterCIDR, nc.serviceCIDR, nodeCIDRMaskSize) + kubeClient, cloud, nodeInformer, nc.allocatorType, nc.clusterCIDR, nc.serviceCIDR, nodeCIDRMaskSize) if err != nil { return nil, err } - nc.cidrAllocator.Register(nodeInformer) } } @@ -394,6 +386,7 @@ func NewNodeController( } if nc.taintNodeByCondition { + glog.Infof("Controller will taint node by condition.") nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: util.CreateAddNodeHandler(func(node *v1.Node) error { return nc.doNoScheduleTaintingPass(node) @@ -404,6 +397,17 @@ func NewNodeController( }) } + // NOTE(resouer): nodeInformer to substitute deprecated taint key (notReady -> not-ready). + // Remove this logic when we don't need this backwards compatibility + nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: util.CreateAddNodeHandler(func(node *v1.Node) error { + return nc.doFixDeprecatedTaintKeyPass(node) + }), + UpdateFunc: util.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { + return nc.doFixDeprecatedTaintKeyPass(newNode) + }), + }) + nc.nodeLister = nodeInformer.Lister() nc.nodeInformerSynced = nodeInformer.Informer().HasSynced @@ -442,6 +446,44 @@ func (nc *Controller) doEvictionPass() { } } +// doFixDeprecatedTaintKeyPass checks and replaces deprecated taint key with proper key name if needed. +func (nc *Controller) doFixDeprecatedTaintKeyPass(node *v1.Node) error { + taintsToAdd := []*v1.Taint{} + taintsToDel := []*v1.Taint{} + + for _, taint := range node.Spec.Taints { + if taint.Key == algorithm.DeprecatedTaintNodeNotReady { + tDel := taint + taintsToDel = append(taintsToDel, &tDel) + + tAdd := taint + tAdd.Key = algorithm.TaintNodeNotReady + taintsToAdd = append(taintsToAdd, &tAdd) + } + + if taint.Key == algorithm.DeprecatedTaintNodeUnreachable { + tDel := taint + taintsToDel = append(taintsToDel, &tDel) + + tAdd := taint + tAdd.Key = algorithm.TaintNodeUnreachable + taintsToAdd = append(taintsToAdd, &tAdd) + } + } + + if len(taintsToAdd) == 0 && len(taintsToDel) == 0 { + return nil + } + + glog.Warningf("Detected deprecated taint keys: %v on node: %v, will substitute them with %v", + taintsToDel, node.GetName(), taintsToAdd) + + if !util.SwapNodeControllerTaint(nc.kubeClient, taintsToAdd, taintsToDel, node) { + return fmt.Errorf("failed to swap taints of node %+v", node) + } + return nil +} + func (nc *Controller) doNoScheduleTaintingPass(node *v1.Node) error { // Map node's condition to Taints. taints := []v1.Taint{} @@ -542,6 +584,12 @@ func (nc *Controller) Run(stopCh <-chan struct{}) { go wait.Until(nc.doEvictionPass, scheduler.NodeEvictionPeriod, wait.NeverStop) } + if nc.allocateNodeCIDRs { + if nc.allocatorType != ipam.IPAMFromClusterAllocatorType && nc.allocatorType != ipam.IPAMFromCloudAllocatorType { + go nc.cidrAllocator.Run(wait.NeverStop) + } + } + <-stopCh } @@ -611,14 +659,14 @@ func (nc *Controller) monitorNodeStatus() error { return true, nil } name := node.Name - node, err = nc.kubeClient.Core().Nodes().Get(name, metav1.GetOptions{}) + node, err = nc.kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{}) if err != nil { glog.Errorf("Failed while getting a Node to retry updating NodeStatus. Probably Node %s was deleted.", name) return false, err } return false, nil }); err != nil { - glog.Errorf("Update status of Node %v from Controller error : %v. "+ + glog.Errorf("Update status of Node '%v' from Controller error: %v. "+ "Skipping - no pods will be evicted.", node.Name, err) continue } @@ -1018,7 +1066,7 @@ func (nc *Controller) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1.Node _, currentCondition := v1node.GetNodeCondition(&node.Status, v1.NodeReady) if !apiequality.Semantic.DeepEqual(currentCondition, &observedReadyCondition) { - if _, err = nc.kubeClient.Core().Nodes().UpdateStatus(node); err != nil { + if _, err = nc.kubeClient.CoreV1().Nodes().UpdateStatus(node); err != nil { glog.Errorf("Error updating node %s: %v", node.Name, err) return gracePeriod, observedReadyCondition, currentReadyCondition, err } @@ -1151,55 +1199,3 @@ func (nc *Controller) ComputeZoneState(nodeReadyConditions []*v1.NodeCondition) return notReadyNodes, stateNormal } } - -// maybeDeleteTerminatingPod non-gracefully deletes pods that are terminating -// that should not be gracefully terminated. -func (nc *Controller) maybeDeleteTerminatingPod(obj interface{}) { - pod, ok := obj.(*v1.Pod) - if !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - glog.Errorf("Couldn't get object from tombstone %#v", obj) - return - } - pod, ok = tombstone.Obj.(*v1.Pod) - if !ok { - glog.Errorf("Tombstone contained object that is not a Pod %#v", obj) - return - } - } - - // consider only terminating pods - if pod.DeletionTimestamp == nil { - return - } - - node, err := nc.nodeLister.Get(pod.Spec.NodeName) - // if there is no such node, do nothing and let the podGC clean it up. - if apierrors.IsNotFound(err) { - return - } - if err != nil { - // this can only happen if the Store.KeyFunc has a problem creating - // a key for the pod. If it happens once, it will happen again so - // don't bother requeuing the pod. - utilruntime.HandleError(err) - return - } - - // delete terminating pods that have been scheduled on - // nodes that do not support graceful termination - // TODO(mikedanese): this can be removed when we no longer - // guarantee backwards compatibility of master API to kubelets with - // versions less than 1.1.0 - v, err := utilversion.ParseSemantic(node.Status.NodeInfo.KubeletVersion) - if err != nil { - glog.V(0).Infof("Couldn't parse version %q of node: %v", node.Status.NodeInfo.KubeletVersion, err) - utilruntime.HandleError(nc.forcefullyDeletePod(pod)) - return - } - if v.LessThan(gracefulDeletionVersion) { - utilruntime.HandleError(nc.forcefullyDeletePod(pod)) - return - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/scheduler/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/node/scheduler/BUILD index 563fcefd032e..eb6465351405 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/scheduler/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/scheduler/BUILD @@ -13,6 +13,7 @@ go_test( "taint_controller_test.go", "timed_workers_test.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/node/scheduler", library = ":go_default_library", deps = [ "//pkg/controller/testutil:go_default_library", @@ -32,9 +33,10 @@ go_library( "taint_controller.go", "timed_workers.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/node/scheduler", deps = [ - "//pkg/api/helper:go_default_library", - "//pkg/api/v1/helper:go_default_library", + "//pkg/apis/core/helper:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/scheduler/rate_limited_queue.go b/vendor/k8s.io/kubernetes/pkg/controller/node/scheduler/rate_limited_queue.go index 984443ff52b1..da544fa96ca2 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/scheduler/rate_limited_queue.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/scheduler/rate_limited_queue.go @@ -257,7 +257,7 @@ func (q *RateLimitedTimedQueue) Try(fn ActionFunc) { } // Add value to the queue to be processed. Won't add the same -// value(comparsion by value) a second time if it was already added +// value(comparison by value) a second time if it was already added // and not removed. func (q *RateLimitedTimedQueue) Add(value string, uid interface{}) bool { now := now() diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/scheduler/taint_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/node/scheduler/taint_controller.go index 9892defc5fe8..bf641f69fb48 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/scheduler/taint_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/scheduler/taint_controller.go @@ -23,8 +23,8 @@ import ( "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/api/helper" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + "k8s.io/kubernetes/pkg/apis/core/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -88,7 +88,7 @@ func deletePodHandler(c clientset.Interface, emitEventFunc func(types.Namespaced } var err error for i := 0; i < retries; i++ { - err = c.Core().Pods(ns).Delete(name, &metav1.DeleteOptions{}) + err = c.CoreV1().Pods(ns).Delete(name, &metav1.DeleteOptions{}) if err == nil { break } @@ -110,12 +110,12 @@ func getNoExecuteTaints(taints []v1.Taint) []v1.Taint { func getPodsAssignedToNode(c clientset.Interface, nodeName string) ([]v1.Pod, error) { selector := fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}) - pods, err := c.Core().Pods(v1.NamespaceAll).List(metav1.ListOptions{ + pods, err := c.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{ FieldSelector: selector.String(), LabelSelector: labels.Everything().String(), }) for i := 0; i < retries && err != nil; i++ { - pods, err = c.Core().Pods(v1.NamespaceAll).List(metav1.ListOptions{ + pods, err = c.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{ FieldSelector: selector.String(), LabelSelector: labels.Everything().String(), }) @@ -152,11 +152,11 @@ func getMinTolerationTime(tolerations []v1.Toleration) time.Duration { // communicate with the API server. func NewNoExecuteTaintManager(c clientset.Interface) *NoExecuteTaintManager { eventBroadcaster := record.NewBroadcaster() - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "controllermanager"}) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "taint-controller"}) eventBroadcaster.StartLogging(glog.Infof) if c != nil { glog.V(0).Infof("Sending events to api server.") - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(c.Core().RESTClient()).Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(c.CoreV1().RESTClient()).Events("")}) } else { glog.Fatalf("kubeClient is nil when starting NodeController") } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/util/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/node/util/BUILD index d1ed571cfeaf..b14d0cb92607 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/util/BUILD @@ -8,13 +8,13 @@ load( go_library( name = "go_default_library", srcs = ["controller_utils.go"], + importpath = "k8s.io/kubernetes/pkg/controller/node/util", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/controller:go_default_library", "//pkg/kubelet/util/format:go_default_library", "//pkg/util/node:go_default_library", - "//pkg/util/version:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/util/controller_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/node/util/controller_utils.go index 75d39f12465b..a9d8e08fadc3 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/util/controller_utils.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/util/controller_utils.go @@ -33,12 +33,11 @@ import ( "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" extensionslisters "k8s.io/client-go/listers/extensions/v1beta1" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/kubelet/util/format" nodepkg "k8s.io/kubernetes/pkg/util/node" - utilversion "k8s.io/kubernetes/pkg/util/version" "github.com/golang/glog" ) @@ -47,10 +46,6 @@ var ( // ErrCloudInstance occurs when the cloud provider does not support // the Instances API. ErrCloudInstance = errors.New("cloud provider doesn't support instances") - // podStatusReconciliationVersion is the the minimum kubelet version - // for which the nodecontroller can safely flip pod.Status to - // NotReady. - podStatusReconciliationVersion = utilversion.MustParseSemantic("v1.2.0") ) // DeletePods will delete all pods from master running on given node, @@ -60,7 +55,7 @@ func DeletePods(kubeClient clientset.Interface, recorder record.EventRecorder, n remaining := false selector := fields.OneTermEqualSelector(api.PodHostField, nodeName).String() options := metav1.ListOptions{FieldSelector: selector} - pods, err := kubeClient.Core().Pods(metav1.NamespaceAll).List(options) + pods, err := kubeClient.CoreV1().Pods(metav1.NamespaceAll).List(options) var updateErrList []error if err != nil { @@ -98,7 +93,7 @@ func DeletePods(kubeClient clientset.Interface, recorder record.EventRecorder, n glog.V(2).Infof("Starting deletion of pod %v/%v", pod.Namespace, pod.Name) recorder.Eventf(&pod, v1.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName) - if err := kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { + if err := kubeClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { return false, err } remaining = true @@ -123,27 +118,16 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa var updatedPod *v1.Pod var err error - if updatedPod, err = kubeClient.Core().Pods(pod.Namespace).UpdateStatus(pod); err != nil { + if updatedPod, err = kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod); err != nil { return nil, err } return updatedPod, nil } -// ForcefullyDeletePod deletes the pod immediately. -func ForcefullyDeletePod(c clientset.Interface, pod *v1.Pod) error { - var zero int64 - glog.Infof("NodeController is force deleting Pod: %v:%v", pod.Namespace, pod.Name) - err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &zero}) - if err == nil { - glog.V(4).Infof("forceful deletion of %s succeeded", pod.Name) - } - return err -} - // ForcefullyDeleteNode deletes the node immediately. The pods on the // node are cleaned up by the podGC. func ForcefullyDeleteNode(kubeClient clientset.Interface, nodeName string) error { - if err := kubeClient.Core().Nodes().Delete(nodeName, nil); err != nil { + if err := kubeClient.CoreV1().Nodes().Delete(nodeName, nil); err != nil { return fmt.Errorf("unable to delete node %q: %v", nodeName, err) } return nil @@ -152,17 +136,10 @@ func ForcefullyDeleteNode(kubeClient clientset.Interface, nodeName string) error // MarkAllPodsNotReady updates ready status of all pods running on // given node from master return true if success func MarkAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error { - // Don't set pods to NotReady if the kubelet is running a version that - // doesn't understand how to correct readiness. - // TODO: Remove this check when we no longer guarantee backward compatibility - // with node versions < 1.2.0. - if NodeRunningOutdatedKubelet(node) { - return nil - } nodeName := node.Name glog.V(2).Infof("Update ready status of pods on node [%v]", nodeName) opts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()} - pods, err := kubeClient.Core().Pods(metav1.NamespaceAll).List(opts) + pods, err := kubeClient.CoreV1().Pods(metav1.NamespaceAll).List(opts) if err != nil { return err } @@ -178,7 +155,7 @@ func MarkAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error { if cond.Type == v1.PodReady { pod.Status.Conditions[i].Status = v1.ConditionFalse glog.V(2).Infof("Updating ready status of pod %v to false", pod.Name) - _, err := kubeClient.Core().Pods(pod.Namespace).UpdateStatus(&pod) + _, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(&pod) if err != nil { glog.Warningf("Failed to update status for pod %q: %v", format.Pod(&pod), err) errMsg = append(errMsg, fmt.Sprintf("%v", err)) @@ -193,23 +170,6 @@ func MarkAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error { return fmt.Errorf("%v", strings.Join(errMsg, "; ")) } -// NodeRunningOutdatedKubelet returns true if the kubeletVersion reported -// in the nodeInfo of the given node is "outdated", meaning < 1.2.0. -// Older versions were inflexible and modifying pod.Status directly through -// the apiserver would result in unexpected outcomes. -func NodeRunningOutdatedKubelet(node *v1.Node) bool { - v, err := utilversion.ParseSemantic(node.Status.NodeInfo.KubeletVersion) - if err != nil { - glog.Errorf("couldn't parse version %q of node %v", node.Status.NodeInfo.KubeletVersion, err) - return true - } - if v.LessThan(podStatusReconciliationVersion) { - glog.Infof("Node %v running kubelet at (%v) which is less than the minimum version that allows nodecontroller to mark pods NotReady (%v).", node.Name, v, podStatusReconciliationVersion) - return true - } - return false -} - // NodeExistsInCloudProvider returns true if the node exists in the // cloud provider. func NodeExistsInCloudProvider(cloud cloudprovider.Interface, nodeName types.NodeName) (bool, error) { @@ -256,7 +216,8 @@ func RecordNodeStatusChange(recorder record.EventRecorder, node *v1.Node, newSta // otherwise. func SwapNodeControllerTaint(kubeClient clientset.Interface, taintsToAdd, taintsToRemove []*v1.Taint, node *v1.Node) bool { for _, taintToAdd := range taintsToAdd { - taintToAdd.TimeAdded = metav1.Now() + now := metav1.Now() + taintToAdd.TimeAdded = &now } err := controller.AddOrUpdateTaintOnNode(kubeClient, node.Name, taintsToAdd...) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/BUILD index de7af610fe45..08701232be6f 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/BUILD @@ -14,8 +14,9 @@ go_library( "rate_limitters.go", "replica_calculator.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/podautoscaler", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/v1/pod:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/podautoscaler/metrics:go_default_library", @@ -23,9 +24,9 @@ go_library( "//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", @@ -38,8 +39,8 @@ go_library( "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/listers/autoscaling/v1:go_default_library", + "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/client-go/util/workqueue:go_default_library", @@ -54,11 +55,13 @@ go_test( "legacy_replica_calculator_test.go", "replica_calculator_test.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/podautoscaler", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/autoscaling/install:go_default_library", + "//pkg/apis/core/install:go_default_library", "//pkg/apis/extensions/install:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/podautoscaler/metrics:go_default_library", @@ -67,15 +70,16 @@ go_test( "//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/scale/fake:go_default_library", "//vendor/k8s.io/client-go/testing:go_default_library", "//vendor/k8s.io/heapster/metrics/api/v1/types:go_default_library", "//vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go index b8c50795f055..65a7a6f17021 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go @@ -25,9 +25,9 @@ import ( autoscalingv1 "k8s.io/api/autoscaling/v1" autoscalingv2 "k8s.io/api/autoscaling/v2beta1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -39,49 +39,27 @@ import ( "k8s.io/client-go/kubernetes/scheme" autoscalingclient "k8s.io/client-go/kubernetes/typed/autoscaling/v1" v1core "k8s.io/client-go/kubernetes/typed/core/v1" - extensionsclient "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" autoscalinglisters "k8s.io/client-go/listers/autoscaling/v1" + scaleclient "k8s.io/client-go/scale" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/controller" ) var ( - // Usage shoud exceed the tolerance before we start downscale or upscale the pods. - // TODO: make it a flag or HPA spec element. - tolerance = 0.1 - scaleUpLimitFactor = 2.0 scaleUpLimitMinimum = 4.0 ) -func calculateScaleUpLimit(currentReplicas int32) int32 { - return int32(math.Max(scaleUpLimitFactor*float64(currentReplicas), scaleUpLimitMinimum)) -} - -// UnsafeConvertToVersionVia is like api.Scheme.UnsafeConvertToVersion, but it does so via an internal version first. -// We use it since working with v2alpha1 is convenient here, but we want to use the v1 client (and -// can't just use the internal version). Note that conversion mutates the object, so you need to deepcopy -// *before* you call this if the input object came out of a shared cache. -func UnsafeConvertToVersionVia(obj runtime.Object, externalVersion schema.GroupVersion) (runtime.Object, error) { - objInt, err := api.Scheme.UnsafeConvertToVersion(obj, schema.GroupVersion{Group: externalVersion.Group, Version: runtime.APIVersionInternal}) - if err != nil { - return nil, fmt.Errorf("failed to convert the given object to the internal version: %v", err) - } - - objExt, err := api.Scheme.UnsafeConvertToVersion(objInt, externalVersion) - if err != nil { - return nil, fmt.Errorf("failed to convert the given object back to the external version: %v", err) - } - - return objExt, err -} - +// HorizontalController is responsible for the synchronizing HPA objects stored +// in the system with the actual deployments/replication controllers they +// control. type HorizontalController struct { - scaleNamespacer extensionsclient.ScalesGetter + scaleNamespacer scaleclient.ScalesGetter hpaNamespacer autoscalingclient.HorizontalPodAutoscalersGetter + mapper apimeta.RESTMapper replicaCalc *ReplicaCalculator eventRecorder record.EventRecorder @@ -98,10 +76,12 @@ type HorizontalController struct { queue workqueue.RateLimitingInterface } +// NewHorizontalController creates a new HorizontalController. func NewHorizontalController( evtNamespacer v1core.EventsGetter, - scaleNamespacer extensionsclient.ScalesGetter, + scaleNamespacer scaleclient.ScalesGetter, hpaNamespacer autoscalingclient.HorizontalPodAutoscalersGetter, + mapper apimeta.RESTMapper, replicaCalc *ReplicaCalculator, hpaInformer autoscalinginformers.HorizontalPodAutoscalerInformer, resyncPeriod time.Duration, @@ -110,6 +90,7 @@ func NewHorizontalController( ) *HorizontalController { broadcaster := record.NewBroadcaster() + broadcaster.StartLogging(glog.Infof) // TODO: remove the wrapper when every clients have moved to use the clientset. broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: evtNamespacer.Events("")}) recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "horizontal-pod-autoscaler"}) @@ -121,7 +102,8 @@ func NewHorizontalController( hpaNamespacer: hpaNamespacer, upscaleForbiddenWindow: upscaleForbiddenWindow, downscaleForbiddenWindow: downscaleForbiddenWindow, - queue: workqueue.NewNamedRateLimitingQueue(NewDefaultHPARateLimiter(resyncPeriod), "horizontalpodautoscaler"), + queue: workqueue.NewNamedRateLimitingQueue(NewDefaultHPARateLimiter(resyncPeriod), "horizontalpodautoscaler"), + mapper: mapper, } hpaInformer.Informer().AddEventHandlerWithResyncPeriod( @@ -138,6 +120,7 @@ func NewHorizontalController( return hpaController } +// Run begins watching and syncing. func (a *HorizontalController) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer a.queue.ShutDown() @@ -210,7 +193,7 @@ func (a *HorizontalController) processNextWorkItem() bool { // Computes the desired number of replicas for the metric specifications listed in the HPA, returning the maximum // of the computed replica counts, a description of the associated metric, and the statuses of all metrics // computed. -func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.HorizontalPodAutoscaler, scale *extensions.Scale, +func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.HorizontalPodAutoscaler, scale *autoscalingv1.Scale, metricSpecs []autoscalingv2.MetricSpec) (replicas int32, metric string, statuses []autoscalingv2.MetricStatus, timestamp time.Time, err error) { currentReplicas := scale.Status.Replicas @@ -218,21 +201,14 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori statuses = make([]autoscalingv2.MetricStatus, len(metricSpecs)) for i, metricSpec := range metricSpecs { - if len(scale.Status.Selector) == 0 && len(scale.Status.TargetSelector) == 0 { + if scale.Status.Selector == "" { errMsg := "selector is required" a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg) setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "the HPA target's scale is missing a selector") return 0, "", nil, time.Time{}, fmt.Errorf(errMsg) } - var selector labels.Selector - var err error - if len(scale.Status.Selector) > 0 { - selector = labels.SelectorFromSet(labels.Set(scale.Status.Selector)) - err = nil - } else { - selector, err = labels.Parse(scale.Status.TargetSelector) - } + selector, err := labels.Parse(scale.Status.Selector) if err != nil { errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err) a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg) @@ -360,7 +336,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho // make a copy so that we never mutate the shared informer cache (conversion can mutate the object) hpav1 := hpav1Shared.DeepCopy() // then, convert to autoscaling/v2, which makes our lives easier when calculating metrics - hpaRaw, err := UnsafeConvertToVersionVia(hpav1, autoscalingv2.SchemeGroupVersion) + hpaRaw, err := unsafeConvertToVersionVia(hpav1, autoscalingv2.SchemeGroupVersion) if err != nil { a.eventRecorder.Event(hpav1, v1.EventTypeWarning, "FailedConvertHPA", err.Error()) return fmt.Errorf("failed to convert the given HPA to %s: %v", autoscalingv2.SchemeGroupVersion.String(), err) @@ -370,7 +346,28 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name) - scale, err := a.scaleNamespacer.Scales(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Kind, hpa.Spec.ScaleTargetRef.Name) + targetGV, err := schema.ParseGroupVersion(hpa.Spec.ScaleTargetRef.APIVersion) + if err != nil { + a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error()) + setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err) + a.updateStatusIfNeeded(hpaStatusOriginal, hpa) + return fmt.Errorf("invalid API version in scale target reference: %v", err) + } + + targetGK := schema.GroupKind{ + Group: targetGV.Group, + Kind: hpa.Spec.ScaleTargetRef.Kind, + } + + mappings, err := a.mapper.RESTMappings(targetGK) + if err != nil { + a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error()) + setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err) + a.updateStatusIfNeeded(hpaStatusOriginal, hpa) + return fmt.Errorf("unable to determine resource for scale target reference: %v", err) + } + + scale, targetGR, err := a.scaleForResourceMappings(hpa.Namespace, hpa.Spec.ScaleTargetRef.Name, mappings) if err != nil { a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error()) setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err) @@ -431,30 +428,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho rescaleReason = "All metrics below target" } - // Do not upscale too much to prevent incorrect rapid increase of the number of master replicas caused by - // bogus CPU usage report from heapster/kubelet (like in issue #32304). - scaleUpLimit := calculateScaleUpLimit(currentReplicas) - - switch { - case desiredReplicas > scaleUpLimit: - setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "ScaleUpLimit", "the desired replica count is increasing faster than the maximum scale rate") - desiredReplicas = scaleUpLimit - case hpa.Spec.MinReplicas != nil && desiredReplicas < *hpa.Spec.MinReplicas: - // make sure we aren't below our minimum - setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "TooFewReplicas", "the desired replica count was less than the minimum replica count") - desiredReplicas = *hpa.Spec.MinReplicas - case desiredReplicas == 0: - // never scale down to 0, reserved for disabling autoscaling - setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "TooFewReplicas", "the desired replica count was zero") - desiredReplicas = 1 - case desiredReplicas > hpa.Spec.MaxReplicas: - // make sure we aren't above our maximum - setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "TooManyReplicas", "the desired replica count was more than the maximum replica count") - desiredReplicas = hpa.Spec.MaxReplicas - default: - // mark that we're within acceptible limits - setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, "DesiredWithinRange", "the desired replica count is within the acceptible range") - } + desiredReplicas = a.normalizeDesiredReplicas(hpa, currentReplicas, desiredReplicas) rescale = a.shouldScale(hpa, currentReplicas, desiredReplicas, timestamp) backoffDown := false @@ -484,7 +458,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho if rescale { scale.Spec.Replicas = desiredReplicas - _, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(hpa.Spec.ScaleTargetRef.Kind, scale) + _, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(targetGR, scale) if err != nil { a.eventRecorder.Eventf(hpa, v1.EventTypeWarning, "FailedRescale", "New size: %d; reason: %s; error: %v", desiredReplicas, rescaleReason, err.Error()) setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedUpdateScale", "the HPA controller was unable to update the target scale: %v", err) @@ -507,6 +481,75 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho return a.updateStatusIfNeeded(hpaStatusOriginal, hpa) } +// normalizeDesiredReplicas takes the metrics desired replicas value and normalizes it based on the appropriate conditions (i.e. < maxReplicas, > +// minReplicas, etc...) +func (a *HorizontalController) normalizeDesiredReplicas(hpa *autoscalingv2.HorizontalPodAutoscaler, currentReplicas int32, prenormalizedDesiredReplicas int32) int32 { + var minReplicas int32 + if hpa.Spec.MinReplicas != nil { + minReplicas = *hpa.Spec.MinReplicas + } else { + minReplicas = 0 + } + + desiredReplicas, condition, reason := convertDesiredReplicasWithRules(currentReplicas, prenormalizedDesiredReplicas, minReplicas, hpa.Spec.MaxReplicas) + + if desiredReplicas == prenormalizedDesiredReplicas { + setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, condition, reason) + } else { + setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, condition, reason) + } + + return desiredReplicas +} + +// convertDesiredReplicas performs the actual normalization, without depending on `HorizontalController` or `HorizontalPodAutoscaler` +func convertDesiredReplicasWithRules(currentReplicas, desiredReplicas, hpaMinReplicas, hpaMaxReplicas int32) (int32, string, string) { + + var minimumAllowedReplicas int32 + var maximumAllowedReplicas int32 + + var possibleLimitingCondition string + var possibleLimitingReason string + + if hpaMinReplicas == 0 { + minimumAllowedReplicas = 1 + possibleLimitingReason = "the desired replica count is zero" + } else { + minimumAllowedReplicas = hpaMinReplicas + possibleLimitingReason = "the desired replica count is less than the minimum replica count" + } + + // Do not upscale too much to prevent incorrect rapid increase of the number of master replicas caused by + // bogus CPU usage report from heapster/kubelet (like in issue #32304). + scaleUpLimit := calculateScaleUpLimit(currentReplicas) + + if hpaMaxReplicas > scaleUpLimit { + maximumAllowedReplicas = scaleUpLimit + + possibleLimitingCondition = "ScaleUpLimit" + possibleLimitingReason = "the desired replica count is increasing faster than the maximum scale rate" + } else { + maximumAllowedReplicas = hpaMaxReplicas + + possibleLimitingCondition = "TooManyReplicas" + possibleLimitingReason = "the desired replica count is more than the maximum replica count" + } + + if desiredReplicas < minimumAllowedReplicas { + possibleLimitingCondition = "TooFewReplicas" + + return minimumAllowedReplicas, possibleLimitingCondition, possibleLimitingReason + } else if desiredReplicas > maximumAllowedReplicas { + return maximumAllowedReplicas, possibleLimitingCondition, possibleLimitingReason + } + + return desiredReplicas, "DesiredWithinRange", "the desired count is within the acceptable range" +} + +func calculateScaleUpLimit(currentReplicas int32) int32 { + return int32(math.Max(scaleUpLimitFactor*float64(currentReplicas), scaleUpLimitMinimum)) +} + func (a *HorizontalController) shouldScale(hpa *autoscalingv2.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, timestamp time.Time) bool { if desiredReplicas == currentReplicas { return false @@ -531,6 +574,35 @@ func (a *HorizontalController) shouldScale(hpa *autoscalingv2.HorizontalPodAutos return false } +// scaleForResourceMappings attempts to fetch the scale for the +// resource with the given name and namespace, trying each RESTMapping +// in turn until a working one is found. If none work, the first error +// is returned. It returns both the scale, as well as the group-resource from +// the working mapping. +func (a *HorizontalController) scaleForResourceMappings(namespace, name string, mappings []*apimeta.RESTMapping) (*autoscalingv1.Scale, schema.GroupResource, error) { + var firstErr error + for i, mapping := range mappings { + targetGR := mapping.GroupVersionKind.GroupVersion().WithResource(mapping.Resource).GroupResource() + scale, err := a.scaleNamespacer.Scales(namespace).Get(targetGR, name) + if err == nil { + return scale, targetGR, nil + } + + // if this is the first error, remember it, + // then go on and try other mappings until we find a good one + if i == 0 { + firstErr = err + } + } + + // make sure we handle an empty set of mappings + if firstErr == nil { + firstErr = fmt.Errorf("unrecognized resource") + } + + return nil, schema.GroupResource{}, firstErr +} + // setCurrentReplicasInStatus sets the current replica count in the status of the HPA. func (a *HorizontalController) setCurrentReplicasInStatus(hpa *autoscalingv2.HorizontalPodAutoscaler, currentReplicas int32) { a.setStatus(hpa, currentReplicas, hpa.Status.DesiredReplicas, hpa.Status.CurrentMetrics, false) @@ -565,7 +637,7 @@ func (a *HorizontalController) updateStatusIfNeeded(oldStatus *autoscalingv2.Hor // updateStatus actually does the update request for the status of the given HPA func (a *HorizontalController) updateStatus(hpa *autoscalingv2.HorizontalPodAutoscaler) error { // convert back to autoscalingv1 - hpaRaw, err := UnsafeConvertToVersionVia(hpa, autoscalingv1.SchemeGroupVersion) + hpaRaw, err := unsafeConvertToVersionVia(hpa, autoscalingv1.SchemeGroupVersion) if err != nil { a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedConvertHPA", err.Error()) return fmt.Errorf("failed to convert the given HPA to %s: %v", autoscalingv2.SchemeGroupVersion.String(), err) @@ -581,6 +653,24 @@ func (a *HorizontalController) updateStatus(hpa *autoscalingv2.HorizontalPodAuto return nil } +// unsafeConvertToVersionVia is like Scheme.UnsafeConvertToVersion, but it does so via an internal version first. +// We use it since working with v2alpha1 is convenient here, but we want to use the v1 client (and +// can't just use the internal version). Note that conversion mutates the object, so you need to deepcopy +// *before* you call this if the input object came out of a shared cache. +func unsafeConvertToVersionVia(obj runtime.Object, externalVersion schema.GroupVersion) (runtime.Object, error) { + objInt, err := legacyscheme.Scheme.UnsafeConvertToVersion(obj, schema.GroupVersion{Group: externalVersion.Group, Version: runtime.APIVersionInternal}) + if err != nil { + return nil, fmt.Errorf("failed to convert the given object to the internal version: %v", err) + } + + objExt, err := legacyscheme.Scheme.UnsafeConvertToVersion(objInt, externalVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert the given object back to the external version: %v", err) + } + + return objExt, err +} + // setCondition sets the specific condition type on the given HPA to the specified value with the given reason // and message. The message and args are treated like a format string. The condition will be added if it is // not present. diff --git a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/BUILD index 2514548ac76a..f0d328fdb575 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/BUILD @@ -14,6 +14,7 @@ go_library( "rest_metrics_client.go", "utilization.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", @@ -37,9 +38,10 @@ go_test( "legacy_metrics_client_test.go", "rest_metrics_client_test.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/extensions/install:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go index 36c24da2f633..5d13acbd65be 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go @@ -54,8 +54,8 @@ type HeapsterMetricsClient struct { func NewHeapsterMetricsClient(client clientset.Interface, namespace, scheme, service, port string) MetricsClient { return &HeapsterMetricsClient{ - services: client.Core().Services(namespace), - podsGetter: client.Core(), + services: client.CoreV1().Services(namespace), + podsGetter: client.CoreV1(), heapsterScheme: scheme, heapsterService: service, heapsterPort: port, diff --git a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/utilization.go b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/utilization.go index 946a43bb1253..0417b9a0e3dd 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/utilization.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/utilization.go @@ -21,7 +21,7 @@ import ( ) // GetResourceUtilizationRatio takes in a set of metrics, a set of matching requests, -// and a target utilization percentage, and calculates the the ratio of +// and a target utilization percentage, and calculates the ratio of // desired to actual utilization (returning that, the actual utilization, and the raw average value) func GetResourceUtilizationRatio(metrics PodMetricsInfo, requests map[string]int64, targetUtilization int32) (utilizationRatio float64, currentUtilization int32, rawAverageValue int64, err error) { metricsTotal := int64(0) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/rate_limitters.go b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/rate_limitters.go index 86dd0c098917..06e36ec40a8b 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/rate_limitters.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/rate_limitters.go @@ -46,7 +46,7 @@ func (r *FixedItemIntervalRateLimiter) NumRequeues(item interface{}) int { func (r *FixedItemIntervalRateLimiter) Forget(item interface{}) { } -// NewDefaultHPARateLimitter creates a rate limitter which limits overall (as per the +// NewDefaultHPARateLimiter creates a rate limitter which limits overall (as per the // default controller rate limiter), as well as per the resync interval func NewDefaultHPARateLimiter(interval time.Duration) workqueue.RateLimiter { return NewFixedItemIntervalRateLimiter(interval) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go index ee239198aa2d..62f863c3b180 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go @@ -31,15 +31,23 @@ import ( metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" ) +const ( + // defaultTestingTolerance is default value for calculating when to + // scale up/scale down. + defaultTestingTolerance = 0.1 +) + type ReplicaCalculator struct { metricsClient metricsclient.MetricsClient podsGetter v1coreclient.PodsGetter + tolerance float64 } -func NewReplicaCalculator(metricsClient metricsclient.MetricsClient, podsGetter v1coreclient.PodsGetter) *ReplicaCalculator { +func NewReplicaCalculator(metricsClient metricsclient.MetricsClient, podsGetter v1coreclient.PodsGetter, tolerance float64) *ReplicaCalculator { return &ReplicaCalculator{ metricsClient: metricsClient, podsGetter: podsGetter, + tolerance: tolerance, } } @@ -105,7 +113,7 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti rebalanceUnready := len(unreadyPods) > 0 && usageRatio > 1.0 if !rebalanceUnready && len(missingPods) == 0 { - if math.Abs(1.0-usageRatio) <= tolerance { + if math.Abs(1.0-usageRatio) <= c.tolerance { // return the current replicas if the change would be too small return currentReplicas, utilization, rawUtilization, timestamp, nil } @@ -141,7 +149,7 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti return 0, utilization, rawUtilization, time.Time{}, err } - if math.Abs(1.0-newUsageRatio) <= tolerance || (usageRatio < 1.0 && newUsageRatio > 1.0) || (usageRatio > 1.0 && newUsageRatio < 1.0) { + if math.Abs(1.0-newUsageRatio) <= c.tolerance || (usageRatio < 1.0 && newUsageRatio > 1.0) || (usageRatio > 1.0 && newUsageRatio < 1.0) { // return the current replicas if the change would be too small, // or if the new usage ratio would cause a change in scale direction return currentReplicas, utilization, rawUtilization, timestamp, nil @@ -218,7 +226,7 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet rebalanceUnready := len(unreadyPods) > 0 && usageRatio > 1.0 if !rebalanceUnready && len(missingPods) == 0 { - if math.Abs(1.0-usageRatio) <= tolerance { + if math.Abs(1.0-usageRatio) <= c.tolerance { // return the current replicas if the change would be too small return currentReplicas, utilization, nil } @@ -251,7 +259,7 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet // re-run the utilization calculation with our new numbers newUsageRatio, _ := metricsclient.GetMetricUtilizationRatio(metrics, targetUtilization) - if math.Abs(1.0-newUsageRatio) <= tolerance || (usageRatio < 1.0 && newUsageRatio > 1.0) || (usageRatio > 1.0 && newUsageRatio < 1.0) { + if math.Abs(1.0-newUsageRatio) <= c.tolerance || (usageRatio < 1.0 && newUsageRatio > 1.0) || (usageRatio > 1.0 && newUsageRatio < 1.0) { // return the current replicas if the change would be too small, // or if the new usage ratio would cause a change in scale direction return currentReplicas, utilization, nil @@ -271,7 +279,7 @@ func (c *ReplicaCalculator) GetObjectMetricReplicas(currentReplicas int32, targe } usageRatio := float64(utilization) / float64(targetUtilization) - if math.Abs(1.0-usageRatio) <= tolerance { + if math.Abs(1.0-usageRatio) <= c.tolerance { // return the current replicas if the change would be too small return currentReplicas, utilization, timestamp, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/podgc/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/podgc/BUILD index c1630f99c449..01209d81fc75 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/podgc/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/podgc/BUILD @@ -12,6 +12,7 @@ go_library( "doc.go", "gc_controller.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/podgc", deps = [ "//pkg/controller:go_default_library", "//pkg/util/metrics:go_default_library", @@ -32,6 +33,7 @@ go_library( go_test( name = "go_default_test", srcs = ["gc_controller_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/podgc", library = ":go_default_library", deps = [ "//pkg/controller:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/podgc/gc_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/podgc/gc_controller.go index 76179b736b2e..8badcc4ea37a 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/podgc/gc_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/podgc/gc_controller.go @@ -52,15 +52,15 @@ type PodGCController struct { } func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInformer, terminatedPodThreshold int) *PodGCController { - if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.Core().RESTClient().GetRateLimiter()) + if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()) } gcc := &PodGCController{ kubeClient: kubeClient, terminatedPodThreshold: terminatedPodThreshold, deletePod: func(namespace, name string) error { glog.Infof("PodGC is force deleting Pod: %v:%v", namespace, name) - return kubeClient.Core().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0)) + return kubeClient.CoreV1().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0)) }, } @@ -143,7 +143,7 @@ func (gcc *PodGCController) gcTerminated(pods []*v1.Pod) { func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod) { glog.V(4).Infof("GC'ing orphaned") // We want to get list of Nodes from the etcd, to make sure that it's as fresh as possible. - nodes, err := gcc.kubeClient.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := gcc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { return } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/replicaset/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/replicaset/BUILD index 0f64334806f7..3711a751aa5f 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/replicaset/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/replicaset/BUILD @@ -13,6 +13,7 @@ go_library( "replica_set.go", "replica_set_utils.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/replicaset", deps = [ "//pkg/api/v1/pod:go_default_library", "//pkg/controller:go_default_library", @@ -23,6 +24,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", @@ -42,11 +44,14 @@ go_library( go_test( name = "go_default_test", - srcs = ["replica_set_test.go"], + srcs = [ + "replica_set_test.go", + "replica_set_utils_test.go", + ], + importpath = "k8s.io/kubernetes/pkg/controller/replicaset", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/testapi:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/controller:go_default_library", "//pkg/securitycontext:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/replicaset/OWNERS b/vendor/k8s.io/kubernetes/pkg/controller/replicaset/OWNERS index 3fc7da4b6581..1cf422540417 100755 --- a/vendor/k8s.io/kubernetes/pkg/controller/replicaset/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/controller/replicaset/OWNERS @@ -1,7 +1,9 @@ approvers: - caesarxuchao - lavalamp +- enisoc reviewers: - caesarxuchao - lavalamp - tnozicka +- enisoc diff --git a/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go b/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go index 3a24370b0782..a111e5b62414 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go @@ -14,7 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -// If you make changes to this file, you should also make the corresponding change in ReplicationController. +// ### ATTENTION ### +// +// This code implements both ReplicaSet and ReplicationController. +// +// For RC, the objects are converted on the way in and out (see ../replication/), +// as if ReplicationController were just an older API version of ReplicaSet. +// However, RC and RS still have separate storage and separate instantiations +// of the ReplicaSetController object. +// +// Use rsc.Kind in log messages rather than hard-coding "ReplicaSet". package replicaset @@ -22,6 +31,7 @@ import ( "fmt" "reflect" "sort" + "strings" "sync" "time" @@ -32,6 +42,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" coreinformers "k8s.io/client-go/informers/core/v1" @@ -59,12 +70,14 @@ const ( statusUpdateRetries = 1 ) -// controllerKind contains the schema.GroupVersionKind for this controller type. -var controllerKind = v1beta1.SchemeGroupVersion.WithKind("ReplicaSet") - // ReplicaSetController is responsible for synchronizing ReplicaSet objects stored // in the system with actual running pods. type ReplicaSetController struct { + // GroupVersionKind indicates the controller type. + // Different instances of this struct may handle different GVKs. + // For example, this struct can be used (with adapters) to handle ReplicationController. + schema.GroupVersionKind + kubeClient clientset.Interface podControl controller.PodControlInterface @@ -95,22 +108,35 @@ type ReplicaSetController struct { // NewReplicaSetController configures a replica set controller with the specified event recorder func NewReplicaSetController(rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController { - if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.Core().RESTClient().GetRateLimiter()) - } eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")}) - - rsc := &ReplicaSetController{ - kubeClient: kubeClient, - podControl: controller.RealPodControl{ + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) + return NewBaseController(rsInformer, podInformer, kubeClient, burstReplicas, + v1beta1.SchemeGroupVersion.WithKind("ReplicaSet"), + "replicaset_controller", + "replicaset", + controller.RealPodControl{ KubeClient: kubeClient, Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "replicaset-controller"}), }, - burstReplicas: burstReplicas, - expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "replicaset"), + ) +} + +// NewBaseController is the implementation of NewReplicaSetController with additional injected +// parameters so that it can also serve as the implementation of NewReplicationController. +func NewBaseController(rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int, + gvk schema.GroupVersionKind, metricOwnerName, queueName string, podControl controller.PodControlInterface) *ReplicaSetController { + if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage(metricOwnerName, kubeClient.CoreV1().RESTClient().GetRateLimiter()) + } + + rsc := &ReplicaSetController{ + GroupVersionKind: gvk, + kubeClient: kubeClient, + podControl: podControl, + burstReplicas: burstReplicas, + expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), queueName), } rsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -153,10 +179,11 @@ func (rsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer rsc.queue.ShutDown() - glog.Infof("Starting replica set controller") - defer glog.Infof("Shutting down replica set Controller") + controllerName := strings.ToLower(rsc.Kind) + glog.Infof("Starting %v controller", controllerName) + defer glog.Infof("Shutting down %v controller", controllerName) - if !controller.WaitForCacheSync("replica set", stopCh, rsc.podListerSynced, rsc.rsListerSynced) { + if !controller.WaitForCacheSync(rsc.Kind, stopCh, rsc.podListerSynced, rsc.rsListerSynced) { return } @@ -176,7 +203,7 @@ func (rsc *ReplicaSetController) getPodReplicaSets(pod *v1.Pod) []*extensions.Re if len(rss) > 1 { // ControllerRef will ensure we don't do anything crazy, but more than one // item in this list nevertheless constitutes user error. - utilruntime.HandleError(fmt.Errorf("user error! more than one ReplicaSet is selecting pods with labels: %+v", pod.Labels)) + utilruntime.HandleError(fmt.Errorf("user error! more than one %v is selecting pods with labels: %+v", rsc.Kind, pod.Labels)) } return rss } @@ -187,7 +214,7 @@ func (rsc *ReplicaSetController) getPodReplicaSets(pod *v1.Pod) []*extensions.Re func (rsc *ReplicaSetController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *extensions.ReplicaSet { // We can't look up by UID, so look up by Name and then verify UID. // Don't even try to look up by Name if it's the wrong Kind. - if controllerRef.Kind != controllerKind.Kind { + if controllerRef.Kind != rsc.Kind { return nil } rs, err := rsc.rsLister.ReplicaSets(namespace).Get(controllerRef.Name) @@ -220,7 +247,7 @@ func (rsc *ReplicaSetController) updateRS(old, cur interface{}) { // that bad as ReplicaSets that haven't met expectations yet won't // sync, and all the listing is done using local stores. if *(oldRS.Spec.Replicas) != *(curRS.Spec.Replicas) { - glog.V(4).Infof("Replica set %v updated. Desired pod count change: %d->%d", curRS.Name, *(oldRS.Spec.Replicas), *(curRS.Spec.Replicas)) + glog.V(4).Infof("%v %v updated. Desired pod count change: %d->%d", rsc.Kind, curRS.Name, *(oldRS.Spec.Replicas), *(curRS.Spec.Replicas)) } rsc.enqueueReplicaSet(cur) } @@ -319,7 +346,7 @@ func (rsc *ReplicaSetController) updatePod(old, cur interface{}) { // Note that this still suffers from #29229, we are just moving the problem one level // "closer" to kubelet (from the deployment to the replica set controller). if !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod) && rs.Spec.MinReadySeconds > 0 { - glog.V(2).Infof("ReplicaSet %q will be enqueued after %ds for availability check", rs.Name, rs.Spec.MinReadySeconds) + glog.V(2).Infof("%v %q will be enqueued after %ds for availability check", rsc.Kind, rs.Name, rs.Spec.MinReadySeconds) // Add a second to avoid milliseconds skew in AddAfter. // See https://github.com/kubernetes/kubernetes/issues/39785#issuecomment-279959133 for more info. rsc.enqueueReplicaSetAfter(rs, (time.Duration(rs.Spec.MinReadySeconds)*time.Second)+time.Second) @@ -434,13 +461,11 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *exte diff := len(filteredPods) - int(*(rs.Spec.Replicas)) rsKey, err := controller.KeyFunc(rs) if err != nil { - utilruntime.HandleError(fmt.Errorf("Couldn't get key for ReplicaSet %#v: %v", rs, err)) + utilruntime.HandleError(fmt.Errorf("Couldn't get key for %v %#v: %v", rsc.Kind, rs, err)) return nil } - var errCh chan error if diff < 0 { diff *= -1 - errCh = make(chan error, diff) if diff > rsc.burstReplicas { diff = rsc.burstReplicas } @@ -450,8 +475,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *exte // into a performance bottleneck. We should generate a UID for the pod // beforehand and store it via ExpectCreations. rsc.expectations.ExpectCreations(rsKey, diff) - var wg sync.WaitGroup - glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff) + glog.V(2).Infof("Too few replicas for %v %s/%s, need %d, creating %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff) // Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize // and double with each successful iteration in a kind of "slow start". // This handles attempts to start large numbers of pods that would @@ -460,105 +484,85 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *exte // prevented from spamming the API service with the pod create requests // after one of its pods fails. Conveniently, this also prevents the // event spam that those failures would generate. - for batchSize := integer.IntMin(diff, controller.SlowStartInitialBatchSize); diff > 0; batchSize = integer.IntMin(2*batchSize, diff) { - errorCount := len(errCh) - wg.Add(batchSize) - for i := 0; i < batchSize; i++ { - go func() { - defer wg.Done() - var err error - boolPtr := func(b bool) *bool { return &b } - controllerRef := &metav1.OwnerReference{ - APIVersion: controllerKind.GroupVersion().String(), - Kind: controllerKind.Kind, - Name: rs.Name, - UID: rs.UID, - BlockOwnerDeletion: boolPtr(true), - Controller: boolPtr(true), - } - err = rsc.podControl.CreatePodsWithControllerRef(rs.Namespace, &rs.Spec.Template, rs, controllerRef) - if err != nil && errors.IsTimeout(err) { - // Pod is created but its initialization has timed out. - // If the initialization is successful eventually, the - // controller will observe the creation via the informer. - // If the initialization fails, or if the pod keeps - // uninitialized for a long time, the informer will not - // receive any update, and the controller will create a new - // pod when the expectation expires. - return - } - if err != nil { - // Decrement the expected number of creates because the informer won't observe this pod - glog.V(2).Infof("Failed creation, decrementing expectations for replica set %q/%q", rs.Namespace, rs.Name) - rsc.expectations.CreationObserved(rsKey) - errCh <- err - } - }() + successfulCreations, err := slowStartBatch(diff, controller.SlowStartInitialBatchSize, func() error { + boolPtr := func(b bool) *bool { return &b } + controllerRef := &metav1.OwnerReference{ + APIVersion: rsc.GroupVersion().String(), + Kind: rsc.Kind, + Name: rs.Name, + UID: rs.UID, + BlockOwnerDeletion: boolPtr(true), + Controller: boolPtr(true), } - wg.Wait() - // any skipped pods that we never attempted to start shouldn't be expected. - skippedPods := diff - batchSize - if errorCount < len(errCh) && skippedPods > 0 { - glog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for replica set %q/%q", skippedPods, rs.Namespace, rs.Name) - for i := 0; i < skippedPods; i++ { - // Decrement the expected number of creates because the informer won't observe this pod - rsc.expectations.CreationObserved(rsKey) - } - // The skipped pods will be retried later. The next controller resync will - // retry the slow start process. - break + err := rsc.podControl.CreatePodsWithControllerRef(rs.Namespace, &rs.Spec.Template, rs, controllerRef) + if err != nil && errors.IsTimeout(err) { + // Pod is created but its initialization has timed out. + // If the initialization is successful eventually, the + // controller will observe the creation via the informer. + // If the initialization fails, or if the pod keeps + // uninitialized for a long time, the informer will not + // receive any update, and the controller will create a new + // pod when the expectation expires. + return nil + } + return err + }) + + // Any skipped pods that we never attempted to start shouldn't be expected. + // The skipped pods will be retried later. The next controller resync will + // retry the slow start process. + if skippedPods := diff - successfulCreations; skippedPods > 0 { + glog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for %v %v/%v", skippedPods, rsc.Kind, rs.Namespace, rs.Name) + for i := 0; i < skippedPods; i++ { + // Decrement the expected number of creates because the informer won't observe this pod + rsc.expectations.CreationObserved(rsKey) } - diff -= batchSize } + return err } else if diff > 0 { if diff > rsc.burstReplicas { diff = rsc.burstReplicas } - errCh = make(chan error, diff) - glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff) - // No need to sort pods if we are about to delete all of them - if *(rs.Spec.Replicas) != 0 { - // Sort the pods in the order such that not-ready < ready, unscheduled - // < scheduled, and pending < running. This ensures that we delete pods - // in the earlier stages whenever possible. - sort.Sort(controller.ActivePods(filteredPods)) - } + glog.V(2).Infof("Too many replicas for %v %s/%s, need %d, deleting %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff) + + // Choose which Pods to delete, preferring those in earlier phases of startup. + podsToDelete := getPodsToDelete(filteredPods, diff) + // Snapshot the UIDs (ns/name) of the pods we're expecting to see // deleted, so we know to record their expectations exactly once either // when we see it as an update of the deletion timestamp, or as a delete. // Note that if the labels on a pod/rs change in a way that the pod gets // orphaned, the rs will only wake up after the expectations have // expired even if other pods are deleted. - deletedPodKeys := []string{} - for i := 0; i < diff; i++ { - deletedPodKeys = append(deletedPodKeys, controller.PodKey(filteredPods[i])) - } - rsc.expectations.ExpectDeletions(rsKey, deletedPodKeys) + rsc.expectations.ExpectDeletions(rsKey, getPodKeys(podsToDelete)) + + errCh := make(chan error, diff) var wg sync.WaitGroup wg.Add(diff) - for i := 0; i < diff; i++ { - go func(ix int) { + for _, pod := range podsToDelete { + go func(targetPod *v1.Pod) { defer wg.Done() - if err := rsc.podControl.DeletePod(rs.Namespace, filteredPods[ix].Name, rs); err != nil { + if err := rsc.podControl.DeletePod(rs.Namespace, targetPod.Name, rs); err != nil { // Decrement the expected number of deletes because the informer won't observe this deletion - podKey := controller.PodKey(filteredPods[ix]) - glog.V(2).Infof("Failed to delete %v, decrementing expectations for controller %q/%q", podKey, rs.Namespace, rs.Name) + podKey := controller.PodKey(targetPod) + glog.V(2).Infof("Failed to delete %v, decrementing expectations for %v %s/%s", podKey, rsc.Kind, rs.Namespace, rs.Name) rsc.expectations.DeletionObserved(rsKey, podKey) errCh <- err } - }(i) + }(pod) } wg.Wait() - } - select { - case err := <-errCh: - // all errors have been reported before and they're likely to be the same, so we'll only return the first one we hit. - if err != nil { - return err + select { + case err := <-errCh: + // all errors have been reported before and they're likely to be the same, so we'll only return the first one we hit. + if err != nil { + return err + } + default: } - default: } + return nil } @@ -566,9 +570,10 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *exte // meaning it did not expect to see any more of its pods created or deleted. This function is not meant to be // invoked concurrently with the same key. func (rsc *ReplicaSetController) syncReplicaSet(key string) error { + startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing replica set %q (%v)", key, time.Now().Sub(startTime)) + glog.V(4).Infof("Finished syncing %v %q (%v)", rsc.Kind, key, time.Since(startTime)) }() namespace, name, err := cache.SplitMetaNamespaceKey(key) @@ -577,7 +582,7 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error { } rs, err := rsc.rsLister.ReplicaSets(namespace).Get(name) if errors.IsNotFound(err) { - glog.V(4).Infof("ReplicaSet has been deleted %v", key) + glog.V(4).Infof("%v %v has been deleted", rsc.Kind, key) rsc.expectations.DeleteExpectations(key) return nil } @@ -606,22 +611,10 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error { filteredPods = append(filteredPods, pod) } } - // If any adoptions are attempted, we should first recheck for deletion with - // an uncached quorum read sometime after listing Pods (see #42639). - canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) { - fresh, err := rsc.kubeClient.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - if fresh.UID != rs.UID { - return nil, fmt.Errorf("original ReplicaSet %v/%v is gone: got uid %v, wanted %v", rs.Namespace, rs.Name, fresh.UID, rs.UID) - } - return fresh, nil - }) - cm := controller.NewPodControllerRefManager(rsc.podControl, rs, selector, controllerKind, canAdoptFunc) + // NOTE: filteredPods are pointing to objects from cache - if you need to // modify them, you need to copy it first. - filteredPods, err = cm.ClaimPods(filteredPods) + filteredPods, err = rsc.claimPods(rs, selector, filteredPods) if err != nil { return err } @@ -630,13 +623,11 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error { if rsNeedsSync && rs.DeletionTimestamp == nil { manageReplicasErr = rsc.manageReplicas(filteredPods, rs) } - rs = rs.DeepCopy() - newStatus := calculateStatus(rs, filteredPods, manageReplicasErr) // Always updates status as pods come up or die. - updatedRS, err := updateReplicaSetStatus(rsc.kubeClient.Extensions().ReplicaSets(rs.Namespace), rs, newStatus) + updatedRS, err := updateReplicaSetStatus(rsc.kubeClient.ExtensionsV1beta1().ReplicaSets(rs.Namespace), rs, newStatus) if err != nil { // Multiple things could lead to this update failing. Requeuing the replica set ensures // Returning an error causes a requeue without forcing a hotloop @@ -650,3 +641,77 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error { } return manageReplicasErr } + +func (rsc *ReplicaSetController) claimPods(rs *extensions.ReplicaSet, selector labels.Selector, filteredPods []*v1.Pod) ([]*v1.Pod, error) { + // If any adoptions are attempted, we should first recheck for deletion with + // an uncached quorum read sometime after listing Pods (see #42639). + canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) { + fresh, err := rsc.kubeClient.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + if fresh.UID != rs.UID { + return nil, fmt.Errorf("original %v %v/%v is gone: got uid %v, wanted %v", rsc.Kind, rs.Namespace, rs.Name, fresh.UID, rs.UID) + } + return fresh, nil + }) + cm := controller.NewPodControllerRefManager(rsc.podControl, rs, selector, rsc.GroupVersionKind, canAdoptFunc) + return cm.ClaimPods(filteredPods) +} + +// slowStartBatch tries to call the provided function a total of 'count' times, +// starting slow to check for errors, then speeding up if calls succeed. +// +// It groups the calls into batches, starting with a group of initialBatchSize. +// Within each batch, it may call the function multiple times concurrently. +// +// If a whole batch succeeds, the next batch may get exponentially larger. +// If there are any failures in a batch, all remaining batches are skipped +// after waiting for the current batch to complete. +// +// It returns the number of successful calls to the function. +func slowStartBatch(count int, initialBatchSize int, fn func() error) (int, error) { + remaining := count + successes := 0 + for batchSize := integer.IntMin(remaining, initialBatchSize); batchSize > 0; batchSize = integer.IntMin(2*batchSize, remaining) { + errCh := make(chan error, batchSize) + var wg sync.WaitGroup + wg.Add(batchSize) + for i := 0; i < batchSize; i++ { + go func() { + defer wg.Done() + if err := fn(); err != nil { + errCh <- err + } + }() + } + wg.Wait() + curSuccesses := batchSize - len(errCh) + successes += curSuccesses + if len(errCh) > 0 { + return successes, <-errCh + } + remaining -= batchSize + } + return successes, nil +} + +func getPodsToDelete(filteredPods []*v1.Pod, diff int) []*v1.Pod { + // No need to sort pods if we are about to delete all of them. + // diff will always be <= len(filteredPods), so not need to handle > case. + if diff < len(filteredPods) { + // Sort the pods in the order such that not-ready < ready, unscheduled + // < scheduled, and pending < running. This ensures that we delete pods + // in the earlier stages whenever possible. + sort.Sort(controller.ActivePods(filteredPods)) + } + return filteredPods[:diff] +} + +func getPodKeys(pods []*v1.Pod) []string { + podKeys := make([]string, 0, len(pods)) + for _, pod := range pods { + podKeys = append(podKeys, controller.PodKey(pod)) + } + return podKeys +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_utils.go index ec11fd106faa..ad628da95511 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_utils.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_utils.go @@ -55,7 +55,7 @@ func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs *ext var getErr, updateErr error var updatedRS *extensions.ReplicaSet for i, rs := 0, rs; ; i++ { - glog.V(4).Infof(fmt.Sprintf("Updating status for ReplicaSet: %s/%s, ", rs.Namespace, rs.Name) + + glog.V(4).Infof(fmt.Sprintf("Updating status for %v: %s/%s, ", rs.Kind, rs.Namespace, rs.Name) + fmt.Sprintf("replicas %d->%d (need %d), ", rs.Status.Replicas, newStatus.Replicas, *(rs.Spec.Replicas)) + fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rs.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) + fmt.Sprintf("readyReplicas %d->%d, ", rs.Status.ReadyReplicas, newStatus.ReadyReplicas) + diff --git a/vendor/k8s.io/kubernetes/pkg/controller/replication/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/replication/BUILD index a21f67d22d2a..17eb6701a8d0 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/replication/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/replication/BUILD @@ -9,61 +9,47 @@ load( go_library( name = "go_default_library", srcs = [ + "conversion.go", "doc.go", "replication_controller.go", "replication_controller_utils.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/replication", deps = [ - "//pkg/api/v1/pod:go_default_library", + "//pkg/apis/core/v1:go_default_library", + "//pkg/apis/extensions:go_default_library", + "//pkg/apis/extensions/v1beta1:go_default_library", "//pkg/controller:go_default_library", - "//pkg/util/metrics:go_default_library", + "//pkg/controller/replicaset:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/trace:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", + "//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", - "//vendor/k8s.io/client-go/util/integer:go_default_library", - "//vendor/k8s.io/client-go/util/workqueue:go_default_library", ], ) go_test( name = "go_default_test", - srcs = ["replication_controller_test.go"], + srcs = ["replication_controller_utils_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/replication", library = ":go_default_library", - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/controller:go_default_library", - "//pkg/securitycontext:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers:go_default_library", - "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/testing:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/k8s.io/client-go/util/testing:go_default_library", - "//vendor/k8s.io/client-go/util/workqueue:go_default_library", - ], + deps = ["//vendor/k8s.io/api/core/v1:go_default_library"], ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/controller/replication/OWNERS b/vendor/k8s.io/kubernetes/pkg/controller/replication/OWNERS index 3fc7da4b6581..1cf422540417 100755 --- a/vendor/k8s.io/kubernetes/pkg/controller/replication/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/controller/replication/OWNERS @@ -1,7 +1,9 @@ approvers: - caesarxuchao - lavalamp +- enisoc reviewers: - caesarxuchao - lavalamp - tnozicka +- enisoc diff --git a/vendor/k8s.io/kubernetes/pkg/controller/replication/conversion.go b/vendor/k8s.io/kubernetes/pkg/controller/replication/conversion.go new file mode 100644 index 000000000000..67c1847a95ef --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/replication/conversion.go @@ -0,0 +1,372 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains adapters that convert between RC and RS, +// as if ReplicationController were an older API version of ReplicaSet. +// It allows ReplicaSetController to directly replace the old ReplicationManager, +// which was previously a manually-maintained copy-paste of RSC. + +package replication + +import ( + "errors" + "fmt" + "time" + + "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + coreinformers "k8s.io/client-go/informers/core/v1" + clientset "k8s.io/client-go/kubernetes" + appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" + appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" + v1client "k8s.io/client-go/kubernetes/typed/core/v1" + extensionsv1beta1client "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" + v1listers "k8s.io/client-go/listers/core/v1" + extensionslisters "k8s.io/client-go/listers/extensions/v1beta1" + "k8s.io/client-go/tools/cache" + apiv1 "k8s.io/kubernetes/pkg/apis/core/v1" + "k8s.io/kubernetes/pkg/apis/extensions" + extensionsinternalv1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + "k8s.io/kubernetes/pkg/controller" +) + +// informerAdapter implements ReplicaSetInformer by wrapping ReplicationControllerInformer +// and converting objects. +type informerAdapter struct { + rcInformer coreinformers.ReplicationControllerInformer +} + +func (i informerAdapter) Informer() cache.SharedIndexInformer { + return conversionInformer{i.rcInformer.Informer()} +} + +func (i informerAdapter) Lister() extensionslisters.ReplicaSetLister { + return conversionLister{i.rcInformer.Lister()} +} + +type conversionInformer struct { + cache.SharedIndexInformer +} + +func (i conversionInformer) AddEventHandler(handler cache.ResourceEventHandler) { + i.SharedIndexInformer.AddEventHandler(conversionEventHandler{handler}) +} + +func (i conversionInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) { + i.SharedIndexInformer.AddEventHandlerWithResyncPeriod(conversionEventHandler{handler}, resyncPeriod) +} + +type conversionLister struct { + rcLister v1listers.ReplicationControllerLister +} + +func (l conversionLister) List(selector labels.Selector) ([]*extensionsv1beta1.ReplicaSet, error) { + rcList, err := l.rcLister.List(selector) + if err != nil { + return nil, err + } + return convertSlice(rcList) +} + +func (l conversionLister) ReplicaSets(namespace string) extensionslisters.ReplicaSetNamespaceLister { + return conversionNamespaceLister{l.rcLister.ReplicationControllers(namespace)} +} + +func (l conversionLister) GetPodReplicaSets(pod *v1.Pod) ([]*extensionsv1beta1.ReplicaSet, error) { + rcList, err := l.rcLister.GetPodControllers(pod) + if err != nil { + return nil, err + } + return convertSlice(rcList) +} + +type conversionNamespaceLister struct { + rcLister v1listers.ReplicationControllerNamespaceLister +} + +func (l conversionNamespaceLister) List(selector labels.Selector) ([]*extensionsv1beta1.ReplicaSet, error) { + rcList, err := l.rcLister.List(selector) + if err != nil { + return nil, err + } + return convertSlice(rcList) +} + +func (l conversionNamespaceLister) Get(name string) (*extensionsv1beta1.ReplicaSet, error) { + rc, err := l.rcLister.Get(name) + if err != nil { + return nil, err + } + return convertRCtoRS(rc, nil) +} + +type conversionEventHandler struct { + handler cache.ResourceEventHandler +} + +func (h conversionEventHandler) OnAdd(obj interface{}) { + rs, err := convertRCtoRS(obj.(*v1.ReplicationController), nil) + if err != nil { + utilruntime.HandleError(fmt.Errorf("dropping RC OnAdd event: can't convert object %#v to RS: %v", obj, err)) + return + } + h.handler.OnAdd(rs) +} + +func (h conversionEventHandler) OnUpdate(oldObj, newObj interface{}) { + oldRS, err := convertRCtoRS(oldObj.(*v1.ReplicationController), nil) + if err != nil { + utilruntime.HandleError(fmt.Errorf("dropping RC OnUpdate event: can't convert old object %#v to RS: %v", oldObj, err)) + return + } + newRS, err := convertRCtoRS(newObj.(*v1.ReplicationController), nil) + if err != nil { + utilruntime.HandleError(fmt.Errorf("dropping RC OnUpdate event: can't convert new object %#v to RS: %v", newObj, err)) + return + } + h.handler.OnUpdate(oldRS, newRS) +} + +func (h conversionEventHandler) OnDelete(obj interface{}) { + rc, ok := obj.(*v1.ReplicationController) + if !ok { + // Convert the Obj inside DeletedFinalStateUnknown. + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("dropping RC OnDelete event: couldn't get object from tombstone %+v", obj)) + return + } + rc, ok = tombstone.Obj.(*v1.ReplicationController) + if !ok { + utilruntime.HandleError(fmt.Errorf("dropping RC OnDelete event: tombstone contained object that is not a RC %#v", obj)) + return + } + rs, err := convertRCtoRS(rc, nil) + if err != nil { + utilruntime.HandleError(fmt.Errorf("dropping RC OnDelete event: can't convert object %#v to RS: %v", obj, err)) + return + } + h.handler.OnDelete(cache.DeletedFinalStateUnknown{Key: tombstone.Key, Obj: rs}) + return + } + + // It's a regular RC object. + rs, err := convertRCtoRS(rc, nil) + if err != nil { + utilruntime.HandleError(fmt.Errorf("dropping RC OnDelete event: can't convert object %#v to RS: %v", obj, err)) + return + } + h.handler.OnDelete(rs) +} + +type clientsetAdapter struct { + clientset.Interface +} + +func (c clientsetAdapter) ExtensionsV1beta1() extensionsv1beta1client.ExtensionsV1beta1Interface { + return conversionExtensionsClient{c.Interface, c.Interface.ExtensionsV1beta1()} +} + +func (c clientsetAdapter) Extensions() extensionsv1beta1client.ExtensionsV1beta1Interface { + return conversionExtensionsClient{c.Interface, c.Interface.Extensions()} +} + +func (c clientsetAdapter) AppsV1beta2() appsv1beta2.AppsV1beta2Interface { + return conversionAppsV1beta2Client{c.Interface, c.Interface.AppsV1beta2()} +} + +func (c clientsetAdapter) AppsV1() appsv1.AppsV1Interface { + return conversionAppsV1Client{c.Interface, c.Interface.AppsV1()} +} + +func (c clientsetAdapter) Apps() appsv1.AppsV1Interface { + return conversionAppsV1Client{c.Interface, c.Interface.AppsV1()} +} + +type conversionAppsV1beta2Client struct { + clientset clientset.Interface + appsv1beta2.AppsV1beta2Interface +} + +func (c conversionAppsV1beta2Client) ReplicaSets(namespace string) appsv1beta2.ReplicaSetInterface { + // TODO(enisoc): This will force RC integration tests to fail if anyone tries to update + // ReplicaSetController to use apps/v1beta2 without updating this conversion adapter. + // Please change conversionClient to use the new RS version instead of extensions/v1beta1, + // and then return a conversionClient here. + panic("need to update RC/RS conversionClient for apps/v1beta2") +} + +type conversionAppsV1Client struct { + clientset clientset.Interface + appsv1.AppsV1Interface +} + +func (c conversionAppsV1Client) ReplicaSets(namespace string) appsv1.ReplicaSetInterface { + // TODO(enisoc): This will force RC integration tests to fail if anyone tries to update + // ReplicaSetController to use apps/v1 without updating this conversion adapter. + // Please change conversionClient to use the new RS version instead of extensions/v1beta1, + // and then return a conversionClient here. + panic("need to update RC/RS conversionClient for apps/v1") +} + +type conversionExtensionsClient struct { + clientset clientset.Interface + extensionsv1beta1client.ExtensionsV1beta1Interface +} + +func (c conversionExtensionsClient) ReplicaSets(namespace string) extensionsv1beta1client.ReplicaSetInterface { + return conversionClient{c.clientset.CoreV1().ReplicationControllers(namespace)} +} + +type conversionClient struct { + v1client.ReplicationControllerInterface +} + +func (c conversionClient) Create(rs *extensionsv1beta1.ReplicaSet) (*extensionsv1beta1.ReplicaSet, error) { + return convertCall(c.ReplicationControllerInterface.Create, rs) +} + +func (c conversionClient) Update(rs *extensionsv1beta1.ReplicaSet) (*extensionsv1beta1.ReplicaSet, error) { + return convertCall(c.ReplicationControllerInterface.Update, rs) +} + +func (c conversionClient) UpdateStatus(rs *extensionsv1beta1.ReplicaSet) (*extensionsv1beta1.ReplicaSet, error) { + return convertCall(c.ReplicationControllerInterface.UpdateStatus, rs) +} + +func (c conversionClient) Get(name string, options metav1.GetOptions) (*extensionsv1beta1.ReplicaSet, error) { + rc, err := c.ReplicationControllerInterface.Get(name, options) + if err != nil { + return nil, err + } + return convertRCtoRS(rc, nil) +} + +func (c conversionClient) List(opts metav1.ListOptions) (*extensionsv1beta1.ReplicaSetList, error) { + rcList, err := c.ReplicationControllerInterface.List(opts) + if err != nil { + return nil, err + } + return convertList(rcList) +} + +func (c conversionClient) Watch(opts metav1.ListOptions) (watch.Interface, error) { + // This is not used by RSC because we wrap the shared informer instead. + return nil, errors.New("Watch() is not implemented for conversionClient") +} + +func (c conversionClient) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *extensionsv1beta1.ReplicaSet, err error) { + // This is not used by RSC. + return nil, errors.New("Patch() is not implemented for conversionClient") +} + +func convertSlice(rcList []*v1.ReplicationController) ([]*extensionsv1beta1.ReplicaSet, error) { + rsList := make([]*extensionsv1beta1.ReplicaSet, 0, len(rcList)) + for _, rc := range rcList { + rs, err := convertRCtoRS(rc, nil) + if err != nil { + return nil, err + } + rsList = append(rsList, rs) + } + return rsList, nil +} + +func convertList(rcList *v1.ReplicationControllerList) (*extensionsv1beta1.ReplicaSetList, error) { + rsList := &extensionsv1beta1.ReplicaSetList{Items: make([]extensionsv1beta1.ReplicaSet, len(rcList.Items))} + for i := range rcList.Items { + rc := &rcList.Items[i] + _, err := convertRCtoRS(rc, &rsList.Items[i]) + if err != nil { + return nil, err + } + } + return rsList, nil +} + +func convertCall(fn func(*v1.ReplicationController) (*v1.ReplicationController, error), rs *extensionsv1beta1.ReplicaSet) (*extensionsv1beta1.ReplicaSet, error) { + rc, err := convertRStoRC(rs) + if err != nil { + return nil, err + } + result, err := fn(rc) + if err != nil { + return nil, err + } + return convertRCtoRS(result, nil) +} + +func convertRCtoRS(rc *v1.ReplicationController, out *extensionsv1beta1.ReplicaSet) (*extensionsv1beta1.ReplicaSet, error) { + var rsInternal extensions.ReplicaSet + if err := apiv1.Convert_v1_ReplicationController_to_extensions_ReplicaSet(rc, &rsInternal, nil); err != nil { + return nil, fmt.Errorf("can't convert ReplicationController %v/%v to ReplicaSet: %v", rc.Namespace, rc.Name, err) + } + if out == nil { + out = new(extensionsv1beta1.ReplicaSet) + } + if err := extensionsinternalv1beta1.Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(&rsInternal, out, nil); err != nil { + return nil, fmt.Errorf("can't convert ReplicaSet (converted from ReplicationController %v/%v) from internal to extensions/v1beta1: %v", rc.Namespace, rc.Name, err) + } + return out, nil +} + +func convertRStoRC(rs *extensionsv1beta1.ReplicaSet) (*v1.ReplicationController, error) { + var rsInternal extensions.ReplicaSet + if err := extensionsinternalv1beta1.Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(rs, &rsInternal, nil); err != nil { + return nil, fmt.Errorf("can't convert ReplicaSet (converting to ReplicationController %v/%v) from extensions/v1beta1 to internal: %v", rs.Namespace, rs.Name, err) + } + var rc v1.ReplicationController + if err := apiv1.Convert_extensions_ReplicaSet_to_v1_ReplicationController(&rsInternal, &rc, nil); err != nil { + return nil, fmt.Errorf("can't convert ReplicaSet to ReplicationController %v/%v: %v", rs.Namespace, rs.Name, err) + } + return &rc, nil +} + +type podControlAdapter struct { + controller.PodControlInterface +} + +func (pc podControlAdapter) CreatePods(namespace string, template *v1.PodTemplateSpec, object runtime.Object) error { + // This is not used by RSC. + return errors.New("CreatePods() is not implemented for podControlAdapter") +} + +func (pc podControlAdapter) CreatePodsOnNode(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error { + // This is not used by RSC. + return errors.New("CreatePodsOnNode() is not implemented for podControlAdapter") +} + +func (pc podControlAdapter) CreatePodsWithControllerRef(namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error { + rc, err := convertRStoRC(object.(*extensionsv1beta1.ReplicaSet)) + if err != nil { + return err + } + return pc.PodControlInterface.CreatePodsWithControllerRef(namespace, template, rc, controllerRef) +} + +func (pc podControlAdapter) DeletePod(namespace string, podID string, object runtime.Object) error { + rc, err := convertRStoRC(object.(*extensionsv1beta1.ReplicaSet)) + if err != nil { + return err + } + return pc.PodControlInterface.DeletePod(namespace, podID, rc) +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go index ff032f84035a..b181db0f3353 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go @@ -14,653 +14,54 @@ See the License for the specific language governing permissions and limitations under the License. */ -// If you make changes to this file, you should also make the corresponding change in ReplicaSet. +// ### ATTENTION ### +// +// ReplicationManager is now just a wrapper around ReplicaSetController, +// with a conversion layer that effectively treats ReplicationController +// as if it were an older API version of ReplicaSet. +// +// However, RC and RS still have separate storage and separate instantiations +// of the ReplicaSetController object. package replication import ( - "fmt" - "reflect" - "sort" - "sync" - "time" - "github.com/golang/glog" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - utiltrace "k8s.io/apiserver/pkg/util/trace" coreinformers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" - corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/integer" - "k8s.io/client-go/util/workqueue" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/util/metrics" + "k8s.io/kubernetes/pkg/controller/replicaset" ) const ( - // Realistic value of the burstReplica field for the replication manager based off - // performance requirements for kubernetes 1.0. - BurstReplicas = 500 - - // The number of times we retry updating a replication controller's status. - statusUpdateRetries = 1 + BurstReplicas = replicaset.BurstReplicas ) -// controllerKind contains the schema.GroupVersionKind for this controller type. -var controllerKind = v1.SchemeGroupVersion.WithKind("ReplicationController") - // ReplicationManager is responsible for synchronizing ReplicationController objects stored // in the system with actual running pods. -// NOTE: using this name to distinguish this type from API object "ReplicationController"; will -// not fix it right now. Refer to #41459 for more detail. +// It is actually just a wrapper around ReplicaSetController. type ReplicationManager struct { - kubeClient clientset.Interface - podControl controller.PodControlInterface - - // An rc is temporarily suspended after creating/deleting these many replicas. - // It resumes normal action after observing the watch events for them. - burstReplicas int - // To allow injection of syncReplicationController for testing. - syncHandler func(rcKey string) error - - // A TTLCache of pod creates/deletes each rc expects to see. - expectations *controller.UIDTrackingControllerExpectations - - rcLister corelisters.ReplicationControllerLister - rcListerSynced cache.InformerSynced - - podLister corelisters.PodLister - // podListerSynced returns true if the pod store has been synced at least once. - // Added as a member to the struct to allow injection for testing. - podListerSynced cache.InformerSynced - - // Controllers that need to be synced - queue workqueue.RateLimitingInterface + replicaset.ReplicaSetController } // NewReplicationManager configures a replication manager with the specified event recorder func NewReplicationManager(podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicationManager { - if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().RESTClient().GetRateLimiter()) - } - eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")}) - - rm := &ReplicationManager{ - kubeClient: kubeClient, - podControl: controller.RealPodControl{ - KubeClient: kubeClient, - Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "replication-controller"}), - }, - burstReplicas: burstReplicas, - expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "replicationmanager"), - } - - rcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: rm.enqueueController, - UpdateFunc: rm.updateRC, - // This will enter the sync loop and no-op, because the controller has been deleted from the store. - // Note that deleting a controller immediately after scaling it to 0 will not work. The recommended - // way of achieving this is by performing a `stop` operation on the controller. - DeleteFunc: rm.enqueueController, - }) - rm.rcLister = rcInformer.Lister() - rm.rcListerSynced = rcInformer.Informer().HasSynced - - podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: rm.addPod, - // This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill - // the most frequent pod update is status, and the associated rc will only list from local storage, so - // it should be ok. - UpdateFunc: rm.updatePod, - DeleteFunc: rm.deletePod, - }) - rm.podLister = podInformer.Lister() - rm.podListerSynced = podInformer.Informer().HasSynced - - rm.syncHandler = rm.syncReplicationController - return rm -} - -// SetEventRecorder replaces the event recorder used by the replication manager -// with the given recorder. Only used for testing. -func (rm *ReplicationManager) SetEventRecorder(recorder record.EventRecorder) { - // TODO: Hack. We can't cleanly shutdown the event recorder, so benchmarks - // need to pass in a fake. - rm.podControl = controller.RealPodControl{KubeClient: rm.kubeClient, Recorder: recorder} -} - -// Run begins watching and syncing. -func (rm *ReplicationManager) Run(workers int, stopCh <-chan struct{}) { - defer utilruntime.HandleCrash() - defer rm.queue.ShutDown() - - glog.Infof("Starting RC controller") - defer glog.Infof("Shutting down RC controller") - - if !controller.WaitForCacheSync("RC", stopCh, rm.podListerSynced, rm.rcListerSynced) { - return - } - - for i := 0; i < workers; i++ { - go wait.Until(rm.worker, time.Second, stopCh) - } - - <-stopCh -} - -// getPodControllers returns a list of ReplicationControllers matching the given pod. -func (rm *ReplicationManager) getPodControllers(pod *v1.Pod) []*v1.ReplicationController { - rcs, err := rm.rcLister.GetPodControllers(pod) - if err != nil { - return nil - } - if len(rcs) > 1 { - // ControllerRef will ensure we don't do anything crazy, but more than one - // item in this list nevertheless constitutes user error. - utilruntime.HandleError(fmt.Errorf("user error! more than one ReplicationController is selecting pods with labels: %+v", pod.Labels)) - } - return rcs -} - -// resolveControllerRef returns the controller referenced by a ControllerRef, -// or nil if the ControllerRef could not be resolved to a matching controller -// of the correct Kind. -func (rm *ReplicationManager) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *v1.ReplicationController { - // We can't look up by UID, so look up by Name and then verify UID. - // Don't even try to look up by Name if it's the wrong Kind. - if controllerRef.Kind != controllerKind.Kind { - return nil - } - rc, err := rm.rcLister.ReplicationControllers(namespace).Get(controllerRef.Name) - if err != nil { - return nil - } - if rc.UID != controllerRef.UID { - // The controller we found with this Name is not the same one that the - // ControllerRef points to. - return nil - } - return rc -} - -// callback when RC is updated -func (rm *ReplicationManager) updateRC(old, cur interface{}) { - oldRC := old.(*v1.ReplicationController) - curRC := cur.(*v1.ReplicationController) - - // You might imagine that we only really need to enqueue the - // controller when Spec changes, but it is safer to sync any - // time this function is triggered. That way a full informer - // resync can requeue any controllers that don't yet have pods - // but whose last attempts at creating a pod have failed (since - // we don't block on creation of pods) instead of those - // controllers stalling indefinitely. Enqueueing every time - // does result in some spurious syncs (like when Status.Replica - // is updated and the watch notification from it retriggers - // this function), but in general extra resyncs shouldn't be - // that bad as rcs that haven't met expectations yet won't - // sync, and all the listing is done using local stores. - if *(oldRC.Spec.Replicas) != *(curRC.Spec.Replicas) { - glog.V(4).Infof("Replication controller %v updated. Desired pod count change: %d->%d", curRC.Name, *(oldRC.Spec.Replicas), *(curRC.Spec.Replicas)) - } - rm.enqueueController(cur) -} - -// When a pod is created, enqueue the ReplicationController that manages it and update its expectations. -func (rm *ReplicationManager) addPod(obj interface{}) { - pod := obj.(*v1.Pod) - - if pod.DeletionTimestamp != nil { - // on a restart of the controller manager, it's possible a new pod shows up in a state that - // is already pending deletion. Prevent the pod from being a creation observation. - rm.deletePod(pod) - return - } - - // If it has a ControllerRef, that's all that matters. - if controllerRef := metav1.GetControllerOf(pod); controllerRef != nil { - rc := rm.resolveControllerRef(pod.Namespace, controllerRef) - if rc == nil { - return - } - rsKey, err := controller.KeyFunc(rc) - if err != nil { - return - } - glog.V(4).Infof("Pod %s created: %#v.", pod.Name, pod) - rm.expectations.CreationObserved(rsKey) - rm.enqueueController(rc) - return - } - - // Otherwise, it's an orphan. Get a list of all matching ReplicationControllers and sync - // them to see if anyone wants to adopt it. - // DO NOT observe creation because no controller should be waiting for an - // orphan. - rcs := rm.getPodControllers(pod) - if len(rcs) == 0 { - return - } - glog.V(4).Infof("Orphan Pod %s created: %#v.", pod.Name, pod) - for _, rc := range rcs { - rm.enqueueController(rc) - } -} - -// When a pod is updated, figure out what ReplicationController/s manage it and wake them -// up. If the labels of the pod have changed we need to awaken both the old -// and new ReplicationController. old and cur must be *v1.Pod types. -func (rm *ReplicationManager) updatePod(old, cur interface{}) { - curPod := cur.(*v1.Pod) - oldPod := old.(*v1.Pod) - if curPod.ResourceVersion == oldPod.ResourceVersion { - // Periodic resync will send update events for all known pods. - // Two different versions of the same pod will always have different RVs. - return - } - - labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels) - if curPod.DeletionTimestamp != nil { - // when a pod is deleted gracefully it's deletion timestamp is first modified to reflect a grace period, - // and after such time has passed, the kubelet actually deletes it from the store. We receive an update - // for modification of the deletion timestamp and expect an rc to create more replicas asap, not wait - // until the kubelet actually deletes the pod. This is different from the Phase of a pod changing, because - // an rc never initiates a phase change, and so is never asleep waiting for the same. - rm.deletePod(curPod) - if labelChanged { - // we don't need to check the oldPod.DeletionTimestamp because DeletionTimestamp cannot be unset. - rm.deletePod(oldPod) - } - return - } - - curControllerRef := metav1.GetControllerOf(curPod) - oldControllerRef := metav1.GetControllerOf(oldPod) - controllerRefChanged := !reflect.DeepEqual(curControllerRef, oldControllerRef) - if controllerRefChanged && oldControllerRef != nil { - // The ControllerRef was changed. Sync the old controller, if any. - if rc := rm.resolveControllerRef(oldPod.Namespace, oldControllerRef); rc != nil { - rm.enqueueController(rc) - } - } - - // If it has a ControllerRef, that's all that matters. - if curControllerRef != nil { - rc := rm.resolveControllerRef(curPod.Namespace, curControllerRef) - if rc == nil { - return - } - glog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta) - rm.enqueueController(rc) - // TODO: MinReadySeconds in the Pod will generate an Available condition to be added in - // the Pod status which in turn will trigger a requeue of the owning ReplicationController thus - // having its status updated with the newly available replica. For now, we can fake the - // update by resyncing the controller MinReadySeconds after the it is requeued because - // a Pod transitioned to Ready. - // Note that this still suffers from #29229, we are just moving the problem one level - // "closer" to kubelet (from the deployment to the ReplicationController controller). - if !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod) && rc.Spec.MinReadySeconds > 0 { - glog.V(2).Infof("ReplicationController %q will be enqueued after %ds for availability check", rc.Name, rc.Spec.MinReadySeconds) - // Add a second to avoid milliseconds skew in AddAfter. - // See https://github.com/kubernetes/kubernetes/issues/39785#issuecomment-279959133 for more info. - rm.enqueueControllerAfter(rc, (time.Duration(rc.Spec.MinReadySeconds)*time.Second)+time.Second) - } - return - } - - // Otherwise, it's an orphan. If anything changed, sync matching controllers - // to see if anyone wants to adopt it now. - if labelChanged || controllerRefChanged { - rcs := rm.getPodControllers(curPod) - if len(rcs) == 0 { - return - } - glog.V(4).Infof("Orphan Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta) - for _, rc := range rcs { - rm.enqueueController(rc) - } - } -} - -// When a pod is deleted, enqueue the ReplicationController that manages the pod and update its expectations. -// obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item. -func (rm *ReplicationManager) deletePod(obj interface{}) { - pod, ok := obj.(*v1.Pod) - - // When a delete is dropped, the relist will notice a pod in the store not - // in the list, leading to the insertion of a tombstone object which contains - // the deleted key/value. Note that this value might be stale. If the pod - // changed labels the new ReplicationController will not be woken up till the periodic resync. - if !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %+v", obj)) - return - } - pod, ok = tombstone.Obj.(*v1.Pod) - if !ok { - utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a pod %#v", obj)) - return - } - } - - controllerRef := metav1.GetControllerOf(pod) - if controllerRef == nil { - // No controller should care about orphans being deleted. - return - } - rc := rm.resolveControllerRef(pod.Namespace, controllerRef) - if rc == nil { - return - } - rsKey, err := controller.KeyFunc(rc) - if err != nil { - return - } - glog.V(4).Infof("Pod %s/%s deleted through %v, timestamp %+v: %#v.", pod.Namespace, pod.Name, utilruntime.GetCaller(), pod.DeletionTimestamp, pod) - rm.expectations.DeletionObserved(rsKey, controller.PodKey(pod)) - rm.enqueueController(rc) -} - -// obj could be an *v1.ReplicationController, or a DeletionFinalStateUnknown marker item. -func (rm *ReplicationManager) enqueueController(obj interface{}) { - key, err := controller.KeyFunc(obj) - if err != nil { - utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) - return - } - rm.queue.Add(key) -} - -// obj could be an *v1.ReplicationController, or a DeletionFinalStateUnknown marker item. -func (rm *ReplicationManager) enqueueControllerAfter(obj interface{}, after time.Duration) { - key, err := controller.KeyFunc(obj) - if err != nil { - utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) - return - } - rm.queue.AddAfter(key, after) -} - -// worker runs a worker thread that just dequeues items, processes them, and marks them done. -// It enforces that the syncHandler is never invoked concurrently with the same key. -func (rm *ReplicationManager) worker() { - for rm.processNextWorkItem() { - } - glog.Infof("replication controller worker shutting down") -} - -func (rm *ReplicationManager) processNextWorkItem() bool { - key, quit := rm.queue.Get() - if quit { - return false - } - defer rm.queue.Done(key) - - err := rm.syncHandler(key.(string)) - if err == nil { - rm.queue.Forget(key) - return true - } - - rm.queue.AddRateLimited(key) - utilruntime.HandleError(err) - return true -} - -// manageReplicas checks and updates replicas for the given replication controller. -// Does NOT modify . -func (rm *ReplicationManager) manageReplicas(filteredPods []*v1.Pod, rc *v1.ReplicationController) error { - diff := len(filteredPods) - int(*(rc.Spec.Replicas)) - rcKey, err := controller.KeyFunc(rc) - if err != nil { - return err - } - if diff == 0 { - return nil - } - - if diff < 0 { - diff *= -1 - if diff > rm.burstReplicas { - diff = rm.burstReplicas - } - // TODO: Track UIDs of creates just like deletes. The problem currently - // is we'd need to wait on the result of a create to record the pod's - // UID, which would require locking *across* the create, which will turn - // into a performance bottleneck. We should generate a UID for the pod - // beforehand and store it via ExpectCreations. - errCh := make(chan error, diff) - rm.expectations.ExpectCreations(rcKey, diff) - var wg sync.WaitGroup - glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rc.Namespace, rc.Name, *(rc.Spec.Replicas), diff) - // Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize - // and double with each successful iteration in a kind of "slow start". - // This handles attempts to start large numbers of pods that would - // likely all fail with the same error. For example a project with a - // low quota that attempts to create a large number of pods will be - // prevented from spamming the API service with the pod create requests - // after one of its pods fails. Conveniently, this also prevents the - // event spam that those failures would generate. - for batchSize := integer.IntMin(diff, controller.SlowStartInitialBatchSize); diff > 0; batchSize = integer.IntMin(2*batchSize, diff) { - errorCount := len(errCh) - wg.Add(batchSize) - for i := 0; i < batchSize; i++ { - go func() { - defer wg.Done() - var err error - boolPtr := func(b bool) *bool { return &b } - controllerRef := &metav1.OwnerReference{ - APIVersion: controllerKind.GroupVersion().String(), - Kind: controllerKind.Kind, - Name: rc.Name, - UID: rc.UID, - BlockOwnerDeletion: boolPtr(true), - Controller: boolPtr(true), - } - err = rm.podControl.CreatePodsWithControllerRef(rc.Namespace, rc.Spec.Template, rc, controllerRef) - if err != nil && errors.IsTimeout(err) { - // Pod is created but its initialization has timed out. - // If the initialization is successful eventually, the - // controller will observe the creation via the informer. - // If the initialization fails, or if the pod keeps - // uninitialized for a long time, the informer will not - // receive any update, and the controller will create a new - // pod when the expectation expires. - return - } - if err != nil { - // Decrement the expected number of creates because the informer won't observe this pod - glog.V(2).Infof("Failed creation, decrementing expectations for controller %q/%q", rc.Namespace, rc.Name) - rm.expectations.CreationObserved(rcKey) - errCh <- err - utilruntime.HandleError(err) - } - }() - } - wg.Wait() - // any skipped pods that we never attempted to start shouldn't be expected. - skippedPods := diff - batchSize - if errorCount < len(errCh) && skippedPods > 0 { - glog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for controller %q/%q", skippedPods, rc.Namespace, rc.Name) - for i := 0; i < skippedPods; i++ { - // Decrement the expected number of creates because the informer won't observe this pod - rm.expectations.CreationObserved(rcKey) - } - // The skipped pods will be retried later. The next controller resync will - // retry the slow start process. - break - } - diff -= batchSize - } - - select { - case err := <-errCh: - // all errors have been reported before and they're likely to be the same, so we'll only return the first one we hit. - if err != nil { - return err - } - default: - } - - return nil - } - - if diff > rm.burstReplicas { - diff = rm.burstReplicas - } - glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rc.Namespace, rc.Name, *(rc.Spec.Replicas), diff) - // No need to sort pods if we are about to delete all of them - if *(rc.Spec.Replicas) != 0 { - // Sort the pods in the order such that not-ready < ready, unscheduled - // < scheduled, and pending < running. This ensures that we delete pods - // in the earlier stages whenever possible. - sort.Sort(controller.ActivePods(filteredPods)) - } - // Snapshot the UIDs (ns/name) of the pods we're expecting to see - // deleted, so we know to record their expectations exactly once either - // when we see it as an update of the deletion timestamp, or as a delete. - // Note that if the labels on a pod/rc change in a way that the pod gets - // orphaned, the rs will only wake up after the expectations have - // expired even if other pods are deleted. - deletedPodKeys := []string{} - for i := 0; i < diff; i++ { - deletedPodKeys = append(deletedPodKeys, controller.PodKey(filteredPods[i])) - } - // We use pod namespace/name as a UID to wait for deletions, so if the - // labels on a pod/rc change in a way that the pod gets orphaned, the - // rc will only wake up after the expectation has expired. - errCh := make(chan error, diff) - rm.expectations.ExpectDeletions(rcKey, deletedPodKeys) - var wg sync.WaitGroup - wg.Add(diff) - for i := 0; i < diff; i++ { - go func(ix int) { - defer wg.Done() - if err := rm.podControl.DeletePod(rc.Namespace, filteredPods[ix].Name, rc); err != nil { - // Decrement the expected number of deletes because the informer won't observe this deletion - podKey := controller.PodKey(filteredPods[ix]) - glog.V(2).Infof("Failed to delete %v due to %v, decrementing expectations for controller %q/%q", podKey, err, rc.Namespace, rc.Name) - rm.expectations.DeletionObserved(rcKey, podKey) - errCh <- err - utilruntime.HandleError(err) - } - }(i) - } - wg.Wait() - - select { - case err := <-errCh: - // all errors have been reported before and they're likely to be the same, so we'll only return the first one we hit. - if err != nil { - return err - } - default: - } - - return nil - -} - -// syncReplicationController will sync the rc with the given key if it has had its expectations fulfilled, meaning -// it did not expect to see any more of its pods created or deleted. This function is not meant to be invoked -// concurrently with the same key. -func (rm *ReplicationManager) syncReplicationController(key string) error { - trace := utiltrace.New("syncReplicationController: " + key) - defer trace.LogIfLong(250 * time.Millisecond) - - startTime := time.Now() - defer func() { - glog.V(4).Infof("Finished syncing controller %q (%v)", key, time.Now().Sub(startTime)) - }() - - namespace, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - return err - } - rc, err := rm.rcLister.ReplicationControllers(namespace).Get(name) - if errors.IsNotFound(err) { - glog.Infof("Replication Controller has been deleted %v", key) - rm.expectations.DeleteExpectations(key) - return nil - } - if err != nil { - return err - } - - trace.Step("ReplicationController restored") - rcNeedsSync := rm.expectations.SatisfiedExpectations(key) - trace.Step("Expectations restored") - - // list all pods to include the pods that don't match the rc's selector - // anymore but has the stale controller ref. - // TODO: Do the List and Filter in a single pass, or use an index. - allPods, err := rm.podLister.Pods(rc.Namespace).List(labels.Everything()) - if err != nil { - return err - } - // Ignore inactive pods. - var filteredPods []*v1.Pod - for _, pod := range allPods { - if controller.IsPodActive(pod) { - filteredPods = append(filteredPods, pod) - } - } - // If any adoptions are attempted, we should first recheck for deletion with - // an uncached quorum read sometime after listing Pods (see #42639). - canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) { - fresh, err := rm.kubeClient.CoreV1().ReplicationControllers(rc.Namespace).Get(rc.Name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - if fresh.UID != rc.UID { - return nil, fmt.Errorf("original ReplicationController %v/%v is gone: got uid %v, wanted %v", rc.Namespace, rc.Name, fresh.UID, rc.UID) - } - return fresh, nil - }) - cm := controller.NewPodControllerRefManager(rm.podControl, rc, labels.Set(rc.Spec.Selector).AsSelectorPreValidated(), controllerKind, canAdoptFunc) - // NOTE: filteredPods are pointing to objects from cache - if you need to - // modify them, you need to copy it first. - filteredPods, err = cm.ClaimPods(filteredPods) - if err != nil { - return err - } - - var manageReplicasErr error - if rcNeedsSync && rc.DeletionTimestamp == nil { - manageReplicasErr = rm.manageReplicas(filteredPods, rc) - } - trace.Step("manageReplicas done") - - rc = rc.DeepCopy() - - newStatus := calculateStatus(rc, filteredPods, manageReplicasErr) - - // Always updates status as pods come up or die. - updatedRC, err := updateReplicationControllerStatus(rm.kubeClient.Core().ReplicationControllers(rc.Namespace), *rc, newStatus) - if err != nil { - // Multiple things could lead to this update failing. Returning an error causes a requeue without forcing a hotloop - return err - } - // Resync the ReplicationController after MinReadySeconds as a last line of defense to guard against clock-skew. - if manageReplicasErr == nil && updatedRC.Spec.MinReadySeconds > 0 && - updatedRC.Status.ReadyReplicas == *(updatedRC.Spec.Replicas) && - updatedRC.Status.AvailableReplicas != *(updatedRC.Spec.Replicas) { - rm.enqueueControllerAfter(updatedRC, time.Duration(updatedRC.Spec.MinReadySeconds)*time.Second) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) + return &ReplicationManager{ + *replicaset.NewBaseController(informerAdapter{rcInformer}, podInformer, clientsetAdapter{kubeClient}, burstReplicas, + v1.SchemeGroupVersion.WithKind("ReplicationController"), + "replication_controller", + "replicationmanager", + podControlAdapter{controller.RealPodControl{ + KubeClient: kubeClient, + Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "replication-controller"}), + }}, + ), } - return manageReplicasErr } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller_utils.go index 625317becb39..506074b83e46 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller_utils.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller_utils.go @@ -19,123 +19,10 @@ limitations under the License. package replication import ( - "fmt" - "reflect" - - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) -// updateReplicationControllerStatus attempts to update the Status.Replicas of the given controller, with a single GET/PUT retry. -func updateReplicationControllerStatus(c v1core.ReplicationControllerInterface, rc v1.ReplicationController, newStatus v1.ReplicationControllerStatus) (*v1.ReplicationController, error) { - // This is the steady state. It happens when the rc doesn't have any expectations, since - // we do a periodic relist every 30s. If the generations differ but the replicas are - // the same, a caller might've resized to the same replica count. - if rc.Status.Replicas == newStatus.Replicas && - rc.Status.FullyLabeledReplicas == newStatus.FullyLabeledReplicas && - rc.Status.ReadyReplicas == newStatus.ReadyReplicas && - rc.Status.AvailableReplicas == newStatus.AvailableReplicas && - rc.Generation == rc.Status.ObservedGeneration && - reflect.DeepEqual(rc.Status.Conditions, newStatus.Conditions) { - return &rc, nil - } - // Save the generation number we acted on, otherwise we might wrongfully indicate - // that we've seen a spec update when we retry. - // TODO: This can clobber an update if we allow multiple agents to write to the - // same status. - newStatus.ObservedGeneration = rc.Generation - - var getErr, updateErr error - var updatedRC *v1.ReplicationController - for i, rc := 0, &rc; ; i++ { - glog.V(4).Infof(fmt.Sprintf("Updating status for rc: %s/%s, ", rc.Namespace, rc.Name) + - fmt.Sprintf("replicas %d->%d (need %d), ", rc.Status.Replicas, newStatus.Replicas, *(rc.Spec.Replicas)) + - fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rc.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) + - fmt.Sprintf("readyReplicas %d->%d, ", rc.Status.ReadyReplicas, newStatus.ReadyReplicas) + - fmt.Sprintf("availableReplicas %d->%d, ", rc.Status.AvailableReplicas, newStatus.AvailableReplicas) + - fmt.Sprintf("sequence No: %v->%v", rc.Status.ObservedGeneration, newStatus.ObservedGeneration)) - - rc.Status = newStatus - updatedRC, updateErr = c.UpdateStatus(rc) - if updateErr == nil { - return updatedRC, nil - } - // Stop retrying if we exceed statusUpdateRetries - the replicationController will be requeued with a rate limit. - if i >= statusUpdateRetries { - break - } - // Update the controller with the latest resource version for the next poll - if rc, getErr = c.Get(rc.Name, metav1.GetOptions{}); getErr != nil { - // If the GET fails we can't trust status.Replicas anymore. This error - // is bound to be more interesting than the update failure. - return nil, getErr - } - } - - return nil, updateErr -} - -// OverlappingControllers sorts a list of controllers by creation timestamp, using their names as a tie breaker. -type OverlappingControllers []*v1.ReplicationController - -func (o OverlappingControllers) Len() int { return len(o) } -func (o OverlappingControllers) Swap(i, j int) { o[i], o[j] = o[j], o[i] } - -func (o OverlappingControllers) Less(i, j int) bool { - if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) { - return o[i].Name < o[j].Name - } - return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp) -} - -func calculateStatus(rc *v1.ReplicationController, filteredPods []*v1.Pod, manageReplicasErr error) v1.ReplicationControllerStatus { - newStatus := rc.Status - // Count the number of pods that have labels matching the labels of the pod - // template of the replication controller, the matching pods may have more - // labels than are in the template. Because the label of podTemplateSpec is - // a superset of the selector of the replication controller, so the possible - // matching pods must be part of the filteredPods. - fullyLabeledReplicasCount := 0 - readyReplicasCount := 0 - availableReplicasCount := 0 - templateLabel := labels.Set(rc.Spec.Template.Labels).AsSelectorPreValidated() - for _, pod := range filteredPods { - if templateLabel.Matches(labels.Set(pod.Labels)) { - fullyLabeledReplicasCount++ - } - if podutil.IsPodReady(pod) { - readyReplicasCount++ - if podutil.IsPodAvailable(pod, rc.Spec.MinReadySeconds, metav1.Now()) { - availableReplicasCount++ - } - } - } - - failureCond := GetCondition(rc.Status, v1.ReplicationControllerReplicaFailure) - if manageReplicasErr != nil && failureCond == nil { - var reason string - if diff := len(filteredPods) - int(*(rc.Spec.Replicas)); diff < 0 { - reason = "FailedCreate" - } else if diff > 0 { - reason = "FailedDelete" - } - cond := NewReplicationControllerCondition(v1.ReplicationControllerReplicaFailure, v1.ConditionTrue, reason, manageReplicasErr.Error()) - SetCondition(&newStatus, cond) - } else if manageReplicasErr == nil && failureCond != nil { - RemoveCondition(&newStatus, v1.ReplicationControllerReplicaFailure) - } - - newStatus.Replicas = int32(len(filteredPods)) - newStatus.FullyLabeledReplicas = int32(fullyLabeledReplicasCount) - newStatus.ReadyReplicas = int32(readyReplicasCount) - newStatus.AvailableReplicas = int32(availableReplicasCount) - return newStatus -} - // NewReplicationControllerCondition creates a new replication controller condition. func NewReplicationControllerCondition(condType v1.ReplicationControllerConditionType, status v1.ConditionStatus, reason, msg string) v1.ReplicationControllerCondition { return v1.ReplicationControllerCondition{ diff --git a/vendor/k8s.io/kubernetes/pkg/controller/resourcequota/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/resourcequota/BUILD index b5661e495265..2b4bf45648d9 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/resourcequota/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/resourcequota/BUILD @@ -10,15 +10,17 @@ go_library( name = "go_default_library", srcs = [ "doc.go", - "replenishment_controller.go", "resource_quota_controller.go", + "resource_quota_monitor.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/resourcequota", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/v1:go_default_library", "//pkg/controller:go_default_library", "//pkg/quota:go_default_library", "//pkg/quota/evaluator/core:go_default_library", + "//pkg/quota/generic:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", @@ -26,11 +28,12 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/discovery:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", @@ -42,14 +45,12 @@ go_library( go_test( name = "go_default_test", - srcs = [ - "replenishment_controller_test.go", - "resource_quota_controller_test.go", - ], + srcs = ["resource_quota_controller_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/resourcequota", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", "//pkg/controller:go_default_library", + "//pkg/quota:go_default_library", "//pkg/quota/generic:go_default_library", "//pkg/quota/install:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -57,12 +58,12 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/testing:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/resourcequota/replenishment_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/resourcequota/replenishment_controller.go deleted file mode 100644 index 5f063e656836..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/resourcequota/replenishment_controller.go +++ /dev/null @@ -1,233 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resourcequota - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/clock" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/informers" - "k8s.io/client-go/tools/cache" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/quota/evaluator/core" -) - -// ReplenishmentFunc is a function that is invoked when controller sees a change -// that may require a quota to be replenished (i.e. object deletion, or object moved to terminal state) -type ReplenishmentFunc func(groupKind schema.GroupKind, namespace string, object runtime.Object) - -// ReplenishmentControllerOptions is an options struct that tells a factory -// how to configure a controller that can inform the quota system it should -// replenish quota -type ReplenishmentControllerOptions struct { - // The kind monitored for replenishment - GroupKind schema.GroupKind - // The period that should be used to re-sync the monitored resource - ResyncPeriod controller.ResyncPeriodFunc - // The function to invoke when a change is observed that should trigger - // replenishment - ReplenishmentFunc ReplenishmentFunc -} - -// PodReplenishmentUpdateFunc will replenish if the old pod was quota tracked but the new is not -func PodReplenishmentUpdateFunc(options *ReplenishmentControllerOptions, clock clock.Clock) func(oldObj, newObj interface{}) { - return func(oldObj, newObj interface{}) { - oldPod := oldObj.(*v1.Pod) - newPod := newObj.(*v1.Pod) - if core.QuotaV1Pod(oldPod, clock) && !core.QuotaV1Pod(newPod, clock) { - options.ReplenishmentFunc(options.GroupKind, newPod.Namespace, oldPod) - } - } -} - -// ObjectReplenishmentDeleteFunc will replenish on every delete -func ObjectReplenishmentDeleteFunc(options *ReplenishmentControllerOptions) func(obj interface{}) { - return func(obj interface{}) { - metaObject, err := meta.Accessor(obj) - if err != nil { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - glog.Errorf("replenishment controller could not get object from tombstone %+v, could take up to %v before quota is replenished", obj, options.ResyncPeriod()) - utilruntime.HandleError(err) - return - } - metaObject, err = meta.Accessor(tombstone.Obj) - if err != nil { - glog.Errorf("replenishment controller tombstone contained object that is not a meta %+v, could take up to %v before quota is replenished", tombstone.Obj, options.ResyncPeriod()) - utilruntime.HandleError(err) - return - } - } - options.ReplenishmentFunc(options.GroupKind, metaObject.GetNamespace(), nil) - } -} - -// ReplenishmentControllerFactory knows how to build replenishment controllers -type ReplenishmentControllerFactory interface { - // NewController returns a controller configured with the specified options. - // This method is NOT thread-safe. - NewController(options *ReplenishmentControllerOptions) (cache.Controller, error) -} - -// replenishmentControllerFactory implements ReplenishmentControllerFactory -type replenishmentControllerFactory struct { - sharedInformerFactory informers.SharedInformerFactory -} - -// NewReplenishmentControllerFactory returns a factory that knows how to build controllers -// to replenish resources when updated or deleted -func NewReplenishmentControllerFactory(f informers.SharedInformerFactory) ReplenishmentControllerFactory { - return &replenishmentControllerFactory{ - sharedInformerFactory: f, - } -} - -func (r *replenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (cache.Controller, error) { - var ( - informer informers.GenericInformer - err error - ) - - switch options.GroupKind { - case api.Kind("Pod"): - informer, err = r.sharedInformerFactory.ForResource(v1.SchemeGroupVersion.WithResource("pods")) - if err != nil { - return nil, err - } - clock := clock.RealClock{} - informer.Informer().AddEventHandlerWithResyncPeriod( - cache.ResourceEventHandlerFuncs{ - UpdateFunc: PodReplenishmentUpdateFunc(options, clock), - DeleteFunc: ObjectReplenishmentDeleteFunc(options), - }, - options.ResyncPeriod(), - ) - case api.Kind("Service"): - informer, err = r.sharedInformerFactory.ForResource(v1.SchemeGroupVersion.WithResource("services")) - if err != nil { - return nil, err - } - informer.Informer().AddEventHandlerWithResyncPeriod( - cache.ResourceEventHandlerFuncs{ - UpdateFunc: ServiceReplenishmentUpdateFunc(options), - DeleteFunc: ObjectReplenishmentDeleteFunc(options), - }, - options.ResyncPeriod(), - ) - case api.Kind("ReplicationController"): - informer, err = r.sharedInformerFactory.ForResource(v1.SchemeGroupVersion.WithResource("replicationcontrollers")) - if err != nil { - return nil, err - } - informer.Informer().AddEventHandlerWithResyncPeriod( - cache.ResourceEventHandlerFuncs{ - DeleteFunc: ObjectReplenishmentDeleteFunc(options), - }, - options.ResyncPeriod(), - ) - case api.Kind("PersistentVolumeClaim"): - informer, err = r.sharedInformerFactory.ForResource(v1.SchemeGroupVersion.WithResource("persistentvolumeclaims")) - if err != nil { - return nil, err - } - informer.Informer().AddEventHandlerWithResyncPeriod( - cache.ResourceEventHandlerFuncs{ - DeleteFunc: ObjectReplenishmentDeleteFunc(options), - }, - options.ResyncPeriod(), - ) - case api.Kind("Secret"): - informer, err = r.sharedInformerFactory.ForResource(v1.SchemeGroupVersion.WithResource("secrets")) - if err != nil { - return nil, err - } - informer.Informer().AddEventHandlerWithResyncPeriod( - cache.ResourceEventHandlerFuncs{ - DeleteFunc: ObjectReplenishmentDeleteFunc(options), - }, - options.ResyncPeriod(), - ) - case api.Kind("ConfigMap"): - informer, err = r.sharedInformerFactory.ForResource(v1.SchemeGroupVersion.WithResource("configmaps")) - if err != nil { - return nil, err - } - informer.Informer().AddEventHandlerWithResyncPeriod( - cache.ResourceEventHandlerFuncs{ - DeleteFunc: ObjectReplenishmentDeleteFunc(options), - }, - options.ResyncPeriod(), - ) - default: - return nil, NewUnhandledGroupKindError(options.GroupKind) - } - return informer.Informer().GetController(), nil -} - -// ServiceReplenishmentUpdateFunc will replenish if the service was quota tracked has changed service type -func ServiceReplenishmentUpdateFunc(options *ReplenishmentControllerOptions) func(oldObj, newObj interface{}) { - return func(oldObj, newObj interface{}) { - oldService := oldObj.(*v1.Service) - newService := newObj.(*v1.Service) - if core.GetQuotaServiceType(oldService) != core.GetQuotaServiceType(newService) { - options.ReplenishmentFunc(options.GroupKind, newService.Namespace, nil) - } - } -} - -type unhandledKindErr struct { - kind schema.GroupKind -} - -func (e unhandledKindErr) Error() string { - return fmt.Sprintf("no replenishment controller available for %s", e.kind) -} - -func NewUnhandledGroupKindError(kind schema.GroupKind) error { - return unhandledKindErr{kind: kind} -} - -func IsUnhandledGroupKindError(err error) bool { - if err == nil { - return false - } - _, ok := err.(unhandledKindErr) - return ok -} - -// UnionReplenishmentControllerFactory iterates through its constituent factories ignoring, UnhandledGroupKindErrors -// returning the first success or failure it hits. If there are no hits either way, it return an UnhandledGroupKind error -type UnionReplenishmentControllerFactory []ReplenishmentControllerFactory - -func (f UnionReplenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (cache.Controller, error) { - for _, factory := range f { - controller, err := factory.NewController(options) - if !IsUnhandledGroupKindError(err) { - return controller, err - } - } - - return nil, NewUnhandledGroupKindError(options.GroupKind) -} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go index 43acf9878cfd..7ac8d350ac73 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go @@ -18,6 +18,8 @@ package resourcequota import ( "fmt" + "reflect" + "sync" "time" "github.com/golang/glog" @@ -27,21 +29,35 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/discovery" + "k8s.io/client-go/informers" coreinformers "k8s.io/client-go/informers/core/v1" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" - "k8s.io/kubernetes/pkg/api" - k8s_api_v1 "k8s.io/kubernetes/pkg/api/v1" + api "k8s.io/kubernetes/pkg/apis/core" + k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/quota" ) +// NamespacedResourcesFunc knows how to discover namespaced resources. +type NamespacedResourcesFunc func() ([]*metav1.APIResourceList, error) + +// ReplenishmentFunc is a signal that a resource changed in specified namespace +// that may require quota to be recalculated. +type ReplenishmentFunc func(groupResource schema.GroupResource, namespace string) + +// InformerFactory is all the quota system needs to interface with informers. +type InformerFactory interface { + ForResource(resource schema.GroupVersionResource) (informers.GenericInformer, error) + Start(stopCh <-chan struct{}) +} + // ResourceQuotaControllerOptions holds options for creating a quota controller type ResourceQuotaControllerOptions struct { // Must have authority to list all quotas, and update quota status @@ -50,15 +66,18 @@ type ResourceQuotaControllerOptions struct { ResourceQuotaInformer coreinformers.ResourceQuotaInformer // Controls full recalculation of quota usage ResyncPeriod controller.ResyncPeriodFunc - // Knows how to calculate usage + // Maintains evaluators that know how to calculate usage for group resource Registry quota.Registry - // Knows how to build controllers that notify replenishment events - ControllerFactory ReplenishmentControllerFactory + // Discover list of supported resources on the server. + DiscoveryFunc NamespacedResourcesFunc + // A function that returns the list of resources to ignore + IgnoredResourcesFunc func() map[schema.GroupResource]struct{} + // InformersStarted knows if informers were started. + InformersStarted <-chan struct{} + // InformerFactory interfaces with informers. + InformerFactory InformerFactory // Controls full resync of objects monitored for replenishment. ReplenishmentResyncPeriod controller.ResyncPeriodFunc - // List of GroupKind objects that should be monitored for replenishment at - // a faster frequency than the quota controller recalculation interval - GroupKindsToReplenish []schema.GroupKind } // ResourceQuotaController is responsible for tracking quota usage status in the system @@ -79,21 +98,25 @@ type ResourceQuotaController struct { resyncPeriod controller.ResyncPeriodFunc // knows how to calculate usage registry quota.Registry - // controllers monitoring to notify for replenishment - replenishmentControllers []cache.Controller + // knows how to monitor all the resources tracked by quota and trigger replenishment + quotaMonitor *QuotaMonitor + // controls the workers that process quotas + // this lock is acquired to control write access to the monitors and ensures that all + // monitors are synced before the controller can process quotas. + workerLock sync.RWMutex } -func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *ResourceQuotaController { +// NewResourceQuotaController creates a quota controller with specified options +func NewResourceQuotaController(options *ResourceQuotaControllerOptions) (*ResourceQuotaController, error) { // build the resource quota controller rq := &ResourceQuotaController{ - rqClient: options.QuotaClient, - rqLister: options.ResourceQuotaInformer.Lister(), - informerSyncedFuncs: []cache.InformerSynced{options.ResourceQuotaInformer.Informer().HasSynced}, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_primary"), - missingUsageQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_priority"), - resyncPeriod: options.ResyncPeriod, - registry: options.Registry, - replenishmentControllers: []cache.Controller{}, + rqClient: options.QuotaClient, + rqLister: options.ResourceQuotaInformer.Lister(), + informerSyncedFuncs: []cache.InformerSynced{options.ResourceQuotaInformer.Informer().HasSynced}, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_primary"), + missingUsageQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_priority"), + resyncPeriod: options.ResyncPeriod, + registry: options.Registry, } // set the synchronization handler rq.syncHandler = rq.syncResourceQuotaFromKey @@ -125,21 +148,30 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour rq.resyncPeriod(), ) - for _, groupKindToReplenish := range options.GroupKindsToReplenish { - controllerOptions := &ReplenishmentControllerOptions{ - GroupKind: groupKindToReplenish, - ResyncPeriod: options.ReplenishmentResyncPeriod, - ReplenishmentFunc: rq.replenishQuota, - } - replenishmentController, err := options.ControllerFactory.NewController(controllerOptions) - if err != nil { - glog.Warningf("quota controller unable to replenish %s due to %v, changes only accounted during full resync", groupKindToReplenish, err) - } else { - // make sure we wait for each shared informer's cache to sync - rq.informerSyncedFuncs = append(rq.informerSyncedFuncs, replenishmentController.HasSynced) - } + qm := &QuotaMonitor{ + informersStarted: options.InformersStarted, + informerFactory: options.InformerFactory, + ignoredResources: options.IgnoredResourcesFunc(), + resourceChanges: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resource_quota_controller_resource_changes"), + resyncPeriod: options.ReplenishmentResyncPeriod, + replenishmentFunc: rq.replenishQuota, + registry: rq.registry, + } + rq.quotaMonitor = qm + + // do initial quota monitor setup + resources, err := GetQuotableResources(options.DiscoveryFunc) + if err != nil { + return nil, err } - return rq + if err = qm.syncMonitors(resources); err != nil { + utilruntime.HandleError(fmt.Errorf("initial monitor sync has error: %v", err)) + } + + // only start quota once all informers synced + rq.informerSyncedFuncs = append(rq.informerSyncedFuncs, qm.IsSynced) + + return rq, nil } // enqueueAll is called at the fullResyncPeriod interval to force a full recalculation of quota usage statistics @@ -189,7 +221,7 @@ func (rq *ResourceQuotaController) addQuota(obj interface{}) { for constraint := range resourceQuota.Status.Hard { if _, usageFound := resourceQuota.Status.Used[constraint]; !usageFound { matchedResources := []api.ResourceName{api.ResourceName(constraint)} - for _, evaluator := range rq.registry.Evaluators() { + for _, evaluator := range rq.registry.List() { if intersection := evaluator.MatchingResources(matchedResources); len(intersection) > 0 { rq.missingUsageQueue.Add(key) return @@ -205,6 +237,10 @@ func (rq *ResourceQuotaController) addQuota(obj interface{}) { // worker runs a worker thread that just dequeues items, processes them, and marks them done. func (rq *ResourceQuotaController) worker(queue workqueue.RateLimitingInterface) func() { workFunc := func() bool { + + rq.workerLock.RLock() + defer rq.workerLock.RUnlock() + key, quit := queue.Get() if quit { return true @@ -238,10 +274,7 @@ func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) { glog.Infof("Starting resource quota controller") defer glog.Infof("Shutting down resource quota controller") - // the controllers that replenish other resources to respond rapidly to state changes - for _, replenishmentController := range rq.replenishmentControllers { - go replenishmentController.Run(stopCh) - } + go rq.quotaMonitor.Run(stopCh) if !controller.WaitForCacheSync("resource quota", stopCh, rq.informerSyncedFuncs...) { return @@ -287,7 +320,7 @@ func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota *v1.Resourc dirty := !apiequality.Semantic.DeepEqual(v1ResourceQuota.Spec.Hard, v1ResourceQuota.Status.Hard) resourceQuota := api.ResourceQuota{} - if err := k8s_api_v1.Convert_v1_ResourceQuota_To_api_ResourceQuota(v1ResourceQuota, &resourceQuota, nil); err != nil { + if err := k8s_api_v1.Convert_v1_ResourceQuota_To_core_ResourceQuota(v1ResourceQuota, &resourceQuota, nil); err != nil { return err } @@ -334,7 +367,7 @@ func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota *v1.Resourc // there was a change observed by this controller that requires we update quota if dirty { v1Usage := &v1.ResourceQuota{} - if err := k8s_api_v1.Convert_api_ResourceQuota_To_v1_ResourceQuota(&usage, v1Usage, nil); err != nil { + if err := k8s_api_v1.Convert_core_ResourceQuota_To_v1_ResourceQuota(&usage, v1Usage, nil); err != nil { return err } _, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(v1Usage) @@ -344,11 +377,10 @@ func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota *v1.Resourc } // replenishQuota is a replenishment function invoked by a controller to notify that a quota should be recalculated -func (rq *ResourceQuotaController) replenishQuota(groupKind schema.GroupKind, namespace string, object runtime.Object) { - // check if the quota controller can evaluate this kind, if not, ignore it altogether... - evaluators := rq.registry.Evaluators() - evaluator, found := evaluators[groupKind] - if !found { +func (rq *ResourceQuotaController) replenishQuota(groupResource schema.GroupResource, namespace string) { + // check if the quota controller can evaluate this groupResource, if not, ignore it altogether... + evaluator := rq.registry.Get(groupResource) + if evaluator == nil { return } @@ -370,7 +402,7 @@ func (rq *ResourceQuotaController) replenishQuota(groupKind schema.GroupKind, na for i := range resourceQuotas { resourceQuota := resourceQuotas[i] internalResourceQuota := &api.ResourceQuota{} - if err := k8s_api_v1.Convert_v1_ResourceQuota_To_api_ResourceQuota(resourceQuota, internalResourceQuota, nil); err != nil { + if err := k8s_api_v1.Convert_v1_ResourceQuota_To_core_ResourceQuota(resourceQuota, internalResourceQuota, nil); err != nil { glog.Error(err) continue } @@ -381,3 +413,66 @@ func (rq *ResourceQuotaController) replenishQuota(groupKind schema.GroupKind, na } } } + +// Sync periodically resyncs the controller when new resources are observed from discovery. +func (rq *ResourceQuotaController) Sync(discoveryFunc NamespacedResourcesFunc, period time.Duration, stopCh <-chan struct{}) { + // Something has changed, so track the new state and perform a sync. + oldResources := make(map[schema.GroupVersionResource]struct{}) + wait.Until(func() { + // Get the current resource list from discovery. + newResources, err := GetQuotableResources(discoveryFunc) + if err != nil { + utilruntime.HandleError(err) + return + } + + // Decide whether discovery has reported a change. + if reflect.DeepEqual(oldResources, newResources) { + glog.V(4).Infof("no resource updates from discovery, skipping resource quota sync") + return + } + + // Something has changed, so track the new state and perform a sync. + glog.V(2).Infof("syncing resource quota controller with updated resources from discovery: %v", newResources) + oldResources = newResources + + // Ensure workers are paused to avoid processing events before informers + // have resynced. + rq.workerLock.Lock() + defer rq.workerLock.Unlock() + + // Perform the monitor resync and wait for controllers to report cache sync. + if err := rq.resyncMonitors(newResources); err != nil { + utilruntime.HandleError(fmt.Errorf("failed to sync resource monitors: %v", err)) + return + } + if !controller.WaitForCacheSync("resource quota", stopCh, rq.quotaMonitor.IsSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for quota monitor sync")) + } + }, period, stopCh) +} + +// resyncMonitors starts or stops quota monitors as needed to ensure that all +// (and only) those resources present in the map are monitored. +func (rq *ResourceQuotaController) resyncMonitors(resources map[schema.GroupVersionResource]struct{}) error { + if err := rq.quotaMonitor.syncMonitors(resources); err != nil { + return err + } + rq.quotaMonitor.startMonitors() + return nil +} + +// GetQuotableResources returns all resources that the quota system should recognize. +// It requires a resource supports the following verbs: 'create','list','delete' +func GetQuotableResources(discoveryFunc NamespacedResourcesFunc) (map[schema.GroupVersionResource]struct{}, error) { + possibleResources, err := discoveryFunc() + if err != nil { + return nil, fmt.Errorf("failed to discover resources: %v", err) + } + quotableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"create", "list", "delete"}}, possibleResources) + quotableGroupVersionResources, err := discovery.GroupVersionResources(quotableResources) + if err != nil { + return nil, fmt.Errorf("Failed to parse resources: %v", err) + } + return quotableGroupVersionResources, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_monitor.go b/vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_monitor.go new file mode 100644 index 000000000000..e5d6dc593f96 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_monitor.go @@ -0,0 +1,341 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourcequota + +import ( + "fmt" + "sync" + "time" + + "github.com/golang/glog" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/clock" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/quota" + "k8s.io/kubernetes/pkg/quota/evaluator/core" + "k8s.io/kubernetes/pkg/quota/generic" +) + +type eventType int + +func (e eventType) String() string { + switch e { + case addEvent: + return "add" + case updateEvent: + return "update" + case deleteEvent: + return "delete" + default: + return fmt.Sprintf("unknown(%d)", int(e)) + } +} + +const ( + addEvent eventType = iota + updateEvent + deleteEvent +) + +type event struct { + eventType eventType + obj interface{} + oldObj interface{} + gvr schema.GroupVersionResource +} + +type QuotaMonitor struct { + // each monitor list/watches a resource and determines if we should replenish quota + monitors monitors + monitorLock sync.Mutex + // informersStarted is closed after after all of the controllers have been initialized and are running. + // After that it is safe to start them here, before that it is not. + informersStarted <-chan struct{} + + // stopCh drives shutdown. If it is nil, it indicates that Run() has not been + // called yet. If it is non-nil, then when closed it indicates everything + // should shut down. + // + // This channel is also protected by monitorLock. + stopCh <-chan struct{} + + // monitors are the producer of the resourceChanges queue + resourceChanges workqueue.RateLimitingInterface + + // interfaces with informers + informerFactory InformerFactory + + // list of resources to ignore + ignoredResources map[schema.GroupResource]struct{} + + // The period that should be used to re-sync the monitored resource + resyncPeriod controller.ResyncPeriodFunc + + // callback to alert that a change may require quota recalculation + replenishmentFunc ReplenishmentFunc + + // maintains list of evaluators + registry quota.Registry +} + +// monitor runs a Controller with a local stop channel. +type monitor struct { + controller cache.Controller + + // stopCh stops Controller. If stopCh is nil, the monitor is considered to be + // not yet started. + stopCh chan struct{} +} + +// Run is intended to be called in a goroutine. Multiple calls of this is an +// error. +func (m *monitor) Run() { + m.controller.Run(m.stopCh) +} + +type monitors map[schema.GroupVersionResource]*monitor + +func (qm *QuotaMonitor) controllerFor(resource schema.GroupVersionResource) (cache.Controller, error) { + // TODO: pass this down + clock := clock.RealClock{} + handlers := cache.ResourceEventHandlerFuncs{ + UpdateFunc: func(oldObj, newObj interface{}) { + // TODO: leaky abstraction! live w/ it for now, but should pass down an update filter func. + // we only want to queue the updates we care about though as too much noise will overwhelm queue. + notifyUpdate := false + switch resource.GroupResource() { + case schema.GroupResource{Resource: "pods"}: + oldPod := oldObj.(*v1.Pod) + newPod := newObj.(*v1.Pod) + notifyUpdate = core.QuotaV1Pod(oldPod, clock) && !core.QuotaV1Pod(newPod, clock) + case schema.GroupResource{Resource: "services"}: + oldService := oldObj.(*v1.Service) + newService := newObj.(*v1.Service) + notifyUpdate = core.GetQuotaServiceType(oldService) != core.GetQuotaServiceType(newService) + } + if notifyUpdate { + event := &event{ + eventType: updateEvent, + obj: newObj, + oldObj: oldObj, + gvr: resource, + } + qm.resourceChanges.Add(event) + } + }, + DeleteFunc: func(obj interface{}) { + // delta fifo may wrap the object in a cache.DeletedFinalStateUnknown, unwrap it + if deletedFinalStateUnknown, ok := obj.(cache.DeletedFinalStateUnknown); ok { + obj = deletedFinalStateUnknown.Obj + } + event := &event{ + eventType: deleteEvent, + obj: obj, + gvr: resource, + } + qm.resourceChanges.Add(event) + }, + } + shared, err := qm.informerFactory.ForResource(resource) + if err == nil { + glog.V(4).Infof("QuotaMonitor using a shared informer for resource %q", resource.String()) + shared.Informer().AddEventHandlerWithResyncPeriod(handlers, qm.resyncPeriod()) + return shared.Informer().GetController(), nil + } + glog.V(4).Infof("QuotaMonitor unable to use a shared informer for resource %q: %v", resource.String(), err) + + // TODO: if we can share storage with garbage collector, it may make sense to support other resources + // until that time, aggregated api servers will have to run their own controller to reconcile their own quota. + return nil, fmt.Errorf("unable to monitor quota for resource %q", resource.String()) +} + +// syncMonitors rebuilds the monitor set according to the supplied resources, +// creating or deleting monitors as necessary. It will return any error +// encountered, but will make an attempt to create a monitor for each resource +// instead of immediately exiting on an error. It may be called before or after +// Run. Monitors are NOT started as part of the sync. To ensure all existing +// monitors are started, call startMonitors. +func (qm *QuotaMonitor) syncMonitors(resources map[schema.GroupVersionResource]struct{}) error { + qm.monitorLock.Lock() + defer qm.monitorLock.Unlock() + + toRemove := qm.monitors + if toRemove == nil { + toRemove = monitors{} + } + current := monitors{} + errs := []error{} + kept := 0 + added := 0 + for resource := range resources { + if _, ok := qm.ignoredResources[resource.GroupResource()]; ok { + continue + } + if m, ok := toRemove[resource]; ok { + current[resource] = m + delete(toRemove, resource) + kept++ + continue + } + c, err := qm.controllerFor(resource) + if err != nil { + errs = append(errs, fmt.Errorf("couldn't start monitor for resource %q: %v", resource, err)) + continue + } + + // check if we need to create an evaluator for this resource (if none previously registered) + evaluator := qm.registry.Get(resource.GroupResource()) + if evaluator == nil { + listerFunc := generic.ListerFuncForResourceFunc(qm.informerFactory.ForResource) + listResourceFunc := generic.ListResourceUsingListerFunc(listerFunc, resource) + evaluator = generic.NewObjectCountEvaluator(false, resource.GroupResource(), listResourceFunc, "") + qm.registry.Add(evaluator) + glog.Infof("QuotaMonitor created object count evaluator for %s", resource.GroupResource()) + } + + // track the monitor + current[resource] = &monitor{controller: c} + added++ + } + qm.monitors = current + + for _, monitor := range toRemove { + if monitor.stopCh != nil { + close(monitor.stopCh) + } + } + + glog.V(4).Infof("quota synced monitors; added %d, kept %d, removed %d", added, kept, len(toRemove)) + // NewAggregate returns nil if errs is 0-length + return utilerrors.NewAggregate(errs) +} + +// startMonitors ensures the current set of monitors are running. Any newly +// started monitors will also cause shared informers to be started. +// +// If called before Run, startMonitors does nothing (as there is no stop channel +// to support monitor/informer execution). +func (qm *QuotaMonitor) startMonitors() { + qm.monitorLock.Lock() + defer qm.monitorLock.Unlock() + + if qm.stopCh == nil { + return + } + + // we're waiting until after the informer start that happens once all the controllers are initialized. This ensures + // that they don't get unexpected events on their work queues. + <-qm.informersStarted + + monitors := qm.monitors + started := 0 + for _, monitor := range monitors { + if monitor.stopCh == nil { + monitor.stopCh = make(chan struct{}) + qm.informerFactory.Start(qm.stopCh) + go monitor.Run() + started++ + } + } + glog.V(4).Infof("QuotaMonitor started %d new monitors, %d currently running", started, len(monitors)) +} + +// IsSynced returns true if any monitors exist AND all those monitors' +// controllers HasSynced functions return true. This means IsSynced could return +// true at one time, and then later return false if all monitors were +// reconstructed. +func (qm *QuotaMonitor) IsSynced() bool { + qm.monitorLock.Lock() + defer qm.monitorLock.Unlock() + + if len(qm.monitors) == 0 { + return false + } + + for _, monitor := range qm.monitors { + if !monitor.controller.HasSynced() { + return false + } + } + return true +} + +// Run sets the stop channel and starts monitor execution until stopCh is +// closed. Any running monitors will be stopped before Run returns. +func (qm *QuotaMonitor) Run(stopCh <-chan struct{}) { + glog.Infof("QuotaMonitor running") + defer glog.Infof("QuotaMonitor stopping") + + // Set up the stop channel. + qm.monitorLock.Lock() + qm.stopCh = stopCh + qm.monitorLock.Unlock() + + // Start monitors and begin change processing until the stop channel is + // closed. + qm.startMonitors() + wait.Until(qm.runProcessResourceChanges, 1*time.Second, stopCh) + + // Stop any running monitors. + qm.monitorLock.Lock() + defer qm.monitorLock.Unlock() + monitors := qm.monitors + stopped := 0 + for _, monitor := range monitors { + if monitor.stopCh != nil { + stopped++ + close(monitor.stopCh) + } + } + glog.Infof("QuotaMonitor stopped %d of %d monitors", stopped, len(monitors)) +} + +func (qm *QuotaMonitor) runProcessResourceChanges() { + for qm.processResourceChanges() { + } +} + +// Dequeueing an event from resourceChanges to process +func (qm *QuotaMonitor) processResourceChanges() bool { + item, quit := qm.resourceChanges.Get() + if quit { + return false + } + defer qm.resourceChanges.Done(item) + event, ok := item.(*event) + if !ok { + utilruntime.HandleError(fmt.Errorf("expect a *event, got %v", item)) + return true + } + obj := event.obj + accessor, err := meta.Accessor(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err)) + return true + } + glog.V(4).Infof("QuotaMonitor process object: %s, namespace %s, name %s, uid %s, event type %v", event.gvr.String(), accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType) + qm.replenishmentFunc(event.gvr.GroupResource(), accessor.GetNamespace()) + return true +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/route/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/route/BUILD index 282622dfdce0..02efb7343d92 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/route/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/route/BUILD @@ -12,6 +12,7 @@ go_library( "doc.go", "route_controller.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/route", deps = [ "//pkg/api/v1/node:go_default_library", "//pkg/cloudprovider:go_default_library", @@ -39,6 +40,7 @@ go_library( go_test( name = "go_default_test", srcs = ["route_controller_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/route", library = ":go_default_library", deps = [ "//pkg/api/v1/node:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/route/route_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/route/route_controller.go index 9ed2bc45f43b..9a399a02709c 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/route/route_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/route/route_controller.go @@ -66,8 +66,8 @@ type RouteController struct { } func New(routes cloudprovider.Routes, kubeClient clientset.Interface, nodeInformer coreinformers.NodeInformer, clusterName string, clusterCIDR *net.IPNet) *RouteController { - if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("route_controller", kubeClient.Core().RESTClient().GetRateLimiter()) + if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("route_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()) } if clusterCIDR == nil { @@ -75,6 +75,7 @@ func New(routes cloudprovider.Routes, kubeClient clientset.Interface, nodeInform } eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(glog.Infof) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "route_controller"}) rc := &RouteController{ @@ -102,7 +103,7 @@ func (rc *RouteController) Run(stopCh <-chan struct{}, syncPeriod time.Duration) } if rc.broadcaster != nil { - rc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(rc.kubeClient.Core().RESTClient()).Events("")}) + rc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(rc.kubeClient.CoreV1().RESTClient()).Events("")}) } // TODO: If we do just the full Resync every 5 minutes (default value) @@ -114,7 +115,7 @@ func (rc *RouteController) Run(stopCh <-chan struct{}, syncPeriod time.Duration) if err := rc.reconcileNodeRoutes(); err != nil { glog.Errorf("Couldn't reconcile node routes: %v", err) } - }, syncPeriod, wait.NeverStop) + }, syncPeriod, stopCh) <-stopCh } @@ -254,7 +255,7 @@ func (rc *RouteController) updateNetworkingCondition(nodeName types.NodeName, ro glog.Errorf("Error updating node %s: %v", nodeName, err) return err } - glog.Errorf("Error updating node %s, retrying: %v", nodeName, err) + glog.V(4).Infof("Error updating node %s, retrying: %v", nodeName, err) } glog.Errorf("Error updating node %s: %v", nodeName, err) return err diff --git a/vendor/k8s.io/kubernetes/pkg/controller/service/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/service/BUILD index 43d3495d4396..cfb191f9ca0b 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/service/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/service/BUILD @@ -12,17 +12,19 @@ go_library( "doc.go", "service_controller.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/service", deps = [ - "//cmd/kubeadm/app/constants:go_default_library", - "//pkg/api/v1/helper:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/controller:go_default_library", + "//pkg/features:go_default_library", "//pkg/util/metrics:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", @@ -37,6 +39,7 @@ go_library( go_test( name = "go_default_test", srcs = ["service_controller_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/service", library = ":go_default_library", deps = [ "//pkg/api/testapi:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/service/OWNERS b/vendor/k8s.io/kubernetes/pkg/controller/service/OWNERS index fcaf26466167..d1170c74d0f2 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/service/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/controller/service/OWNERS @@ -3,3 +3,8 @@ reviewers: - MrHohn - thockin - matchstick +approvers: +- bowei +- MrHohn +- thockin +- matchstick diff --git a/vendor/k8s.io/kubernetes/pkg/controller/service/service_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/service/service_controller.go index 069055e72631..38c6b99da2cf 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/service/service_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/service/service_controller.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + utilfeature "k8s.io/apiserver/pkg/util/feature" coreinformers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" @@ -37,10 +38,10 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" - "k8s.io/kubernetes/cmd/kubeadm/app/constants" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller" + kubefeatures "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/metrics" ) @@ -63,6 +64,14 @@ const ( notRetryable = false doNotRetry = time.Duration(0) + + // LabelNodeRoleMaster specifies that a node is a master + // It's copied over to kubeadm until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112 + LabelNodeRoleMaster = "node-role.kubernetes.io/master" + + // LabelNodeRoleExcludeBalancer specifies that the node should be + // exclude from load balancers created by a cloud provider. + LabelNodeRoleExcludeBalancer = "alpha.service-controller.kubernetes.io/exclude-balancer" ) type cachedService struct { @@ -105,11 +114,14 @@ func New( clusterName string, ) (*ServiceController, error) { broadcaster := record.NewBroadcaster() - broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")}) + broadcaster.StartLogging(glog.Infof) + broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "service-controller"}) - if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("service_controller", kubeClient.Core().RESTClient().GetRateLimiter()) + if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { + if err := metrics.RegisterMetricAndTrackRateLimiterUsage("service_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()); err != nil { + return nil, err + } } s := &ServiceController{ @@ -176,6 +188,7 @@ func (s *ServiceController) Run(stopCh <-chan struct{}, workers int) { defer glog.Info("Shutting down service controller") if !controller.WaitForCacheSync("service", stopCh, s.serviceListerSynced, s.nodeListerSynced) { + return } for i := 0; i < workers; i++ { @@ -207,12 +220,12 @@ func (s *ServiceController) worker() { func (s *ServiceController) init() error { if s.cloud == nil { - return fmt.Errorf("WARNING: no cloud provider provided, services of type LoadBalancer will fail.") + return fmt.Errorf("WARNING: no cloud provider provided, services of type LoadBalancer will fail") } balancer, ok := s.cloud.LoadBalancer() if !ok { - return fmt.Errorf("the cloud provider does not support external load balancers.") + return fmt.Errorf("the cloud provider does not support external load balancers") } s.balancer = balancer @@ -236,15 +249,17 @@ func (s *ServiceController) processServiceUpdate(cachedService *cachedService, s err, retry := s.createLoadBalancerIfNeeded(key, service) if err != nil { message := "Error creating load balancer" + var retryToReturn time.Duration if retry { message += " (will retry): " + retryToReturn = cachedService.nextRetryDelay() } else { message += " (will not retry): " + retryToReturn = doNotRetry } message += err.Error() s.eventRecorder.Event(service, v1.EventTypeWarning, "CreatingLoadBalancerFailed", message) - - return err, cachedService.nextRetryDelay() + return err, retryToReturn } // Always update the cache upon success. // NOTE: Since we update the cached service if and only if we successfully @@ -270,7 +285,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.S if !wantsLoadBalancer(service) { _, exists, err := s.balancer.GetLoadBalancer(s.clusterName, service) if err != nil { - return fmt.Errorf("Error getting LB for service %s: %v", key, err), retryable + return fmt.Errorf("error getting LB for service %s: %v", key, err), retryable } if exists { glog.Infof("Deleting existing load balancer for service %s that no longer needs a load balancer.", key) @@ -290,7 +305,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.S s.eventRecorder.Event(service, v1.EventTypeNormal, "EnsuringLoadBalancer", "Ensuring load balancer") newState, err = s.ensureLoadBalancer(service) if err != nil { - return fmt.Errorf("Failed to ensure load balancer for service %s: %v", key, err), retryable + return fmt.Errorf("failed to ensure load balancer for service %s: %v", key, err), retryable } s.eventRecorder.Event(service, v1.EventTypeNormal, "EnsuredLoadBalancer", "Ensured load balancer") } @@ -305,7 +320,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.S service.Status.LoadBalancer = *newState if err := s.persistUpdate(service); err != nil { - return fmt.Errorf("Failed to persist updated status to apiserver, even after retries. Giving up: %v", err), notRetryable + return fmt.Errorf("failed to persist updated status to apiserver, even after retries. Giving up: %v", err), notRetryable } } else { glog.V(2).Infof("Not persisting unchanged LoadBalancerStatus for service %s to registry.", key) @@ -317,7 +332,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.S func (s *ServiceController) persistUpdate(service *v1.Service) error { var err error for i := 0; i < clientRetryCount; i++ { - _, err = s.kubeClient.Core().Services(service.Namespace).UpdateStatus(service) + _, err = s.kubeClient.CoreV1().Services(service.Namespace).UpdateStatus(service) if err == nil { return nil } @@ -332,7 +347,7 @@ func (s *ServiceController) persistUpdate(service *v1.Service) error { // TODO: Try to resolve the conflict if the change was unrelated to load // balancer status. For now, just pass it up the stack. if errors.IsConflict(err) { - return fmt.Errorf("Not persisting update to service '%s/%s' that has been changed since we received it: %v", + return fmt.Errorf("not persisting update to service '%s/%s' that has been changed since we received it: %v", service.Namespace, service.Name, err) } glog.Warningf("Failed to persist updated LoadBalancerStatus to service '%s/%s' after creating its load balancer: %v", @@ -348,6 +363,11 @@ func (s *ServiceController) ensureLoadBalancer(service *v1.Service) (*v1.LoadBal return nil, err } + // If there are no available nodes for LoadBalancer service, make a EventTypeWarning event for it. + if len(nodes) == 0 { + s.eventRecorder.Eventf(service, v1.EventTypeWarning, "UnAvailableLoadBalancer", "There are no available nodes for LoadBalancer service %s/%s", service.Namespace, service.Name) + } + // - Only one protocol supported per service // - Not all cloud providers support all protocols and the next step is expected to return // an error for unsupported protocols @@ -492,7 +512,7 @@ func getPortsForLB(service *v1.Service) ([]*v1.ServicePort, error) { protocol = sp.Protocol } else if protocol != sp.Protocol && wantsLoadBalancer(service) { // TODO: Convert error messages to use event recorder - return nil, fmt.Errorf("mixed protocol external load balancers are not supported.") + return nil, fmt.Errorf("mixed protocol external load balancers are not supported") } } return ports, nil @@ -580,10 +600,6 @@ func stringSlicesEqual(x, y []string) bool { return true } -func includeNodeFromNodeList(node *v1.Node) bool { - return !node.Spec.Unschedulable -} - func getNodeConditionPredicate() corelisters.NodeConditionPredicate { return func(node *v1.Node) bool { // We add the master to the node list, but its unschedulable. So we use this to filter @@ -594,10 +610,16 @@ func getNodeConditionPredicate() corelisters.NodeConditionPredicate { // As of 1.6, we will taint the master, but not necessarily mark it unschedulable. // Recognize nodes labeled as master, and filter them also, as we were doing previously. - if _, hasMasterRoleLabel := node.Labels[constants.LabelNodeRoleMaster]; hasMasterRoleLabel { + if _, hasMasterRoleLabel := node.Labels[LabelNodeRoleMaster]; hasMasterRoleLabel { return false } + if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.ServiceNodeExclusion) { + if _, hasExcludeBalancerLabel := node.Labels[LabelNodeRoleExcludeBalancer]; hasExcludeBalancerLabel { + return false + } + } + // If we have no info, don't accept if len(node.Status.Conditions) == 0 { return false @@ -671,7 +693,12 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *v1.Service, h // This operation doesn't normally take very long (and happens pretty often), so we only record the final event err := s.balancer.UpdateLoadBalancer(s.clusterName, service, hosts) if err == nil { - s.eventRecorder.Event(service, v1.EventTypeNormal, "UpdatedLoadBalancer", "Updated load balancer with new hosts") + // If there are no available nodes for LoadBalancer service, make a EventTypeWarning event for it. + if len(hosts) == 0 { + s.eventRecorder.Eventf(service, v1.EventTypeWarning, "UnAvailableLoadBalancer", "There are no available nodes for LoadBalancer service %s/%s", service.Namespace, service.Name) + } else { + s.eventRecorder.Event(service, v1.EventTypeNormal, "UpdatedLoadBalancer", "Updated load balancer with new hosts") + } return nil } @@ -746,7 +773,7 @@ func (s *ServiceController) syncService(key string) error { if retryDelay != 0 { // Add the failed service back to the queue so we'll retry it. - glog.Errorf("Failed to process service. Retrying in %s: %v", retryDelay, err) + glog.Errorf("Failed to process service %v. Retrying in %s: %v", key, retryDelay, err) go func(obj interface{}, delay time.Duration) { // put back the service key to working queue, it is possible that more entries of the service // were added into the queue during the delay, but it does not mess as when handling the retry, @@ -754,7 +781,7 @@ func (s *ServiceController) syncService(key string) error { s.workingQueue.AddAfter(obj, delay) }(key, retryDelay) } else if err != nil { - runtime.HandleError(fmt.Errorf("Failed to process service. Not retrying: %v", err)) + runtime.HandleError(fmt.Errorf("failed to process service %v. Not retrying: %v", key, err)) } return nil } @@ -765,7 +792,7 @@ func (s *ServiceController) syncService(key string) error { func (s *ServiceController) processServiceDeletion(key string) (error, time.Duration) { cachedService, ok := s.cache.get(key) if !ok { - return fmt.Errorf("Service %s not in cache even though the watcher thought it was. Ignoring the deletion.", key), doNotRetry + return fmt.Errorf("service %s not in cache even though the watcher thought it was. Ignoring the deletion", key), doNotRetry } return s.processLoadBalancerDelete(cachedService, key) } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/BUILD index 53c49ec3e972..ef389de512c9 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/BUILD @@ -14,8 +14,9 @@ go_library( "tokengetter.go", "tokens_controller.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/serviceaccount", deps = [ - "//pkg/api/v1:go_default_library", + "//pkg/apis/core/v1:go_default_library", "//pkg/controller:go_default_library", "//pkg/registry/core/secret:go_default_library", "//pkg/registry/core/secret/storage:go_default_library", @@ -50,9 +51,10 @@ go_test( "serviceaccounts_controller_test.go", "tokens_controller_test.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/serviceaccount", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/controller:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/serviceaccounts_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/serviceaccounts_controller.go index a84ce61382d6..e0c41b82f8c3 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/serviceaccounts_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/serviceaccounts_controller.go @@ -61,14 +61,16 @@ func DefaultServiceAccountsControllerOptions() ServiceAccountsControllerOptions } // NewServiceAccountsController returns a new *ServiceAccountsController. -func NewServiceAccountsController(saInformer coreinformers.ServiceAccountInformer, nsInformer coreinformers.NamespaceInformer, cl clientset.Interface, options ServiceAccountsControllerOptions) *ServiceAccountsController { +func NewServiceAccountsController(saInformer coreinformers.ServiceAccountInformer, nsInformer coreinformers.NamespaceInformer, cl clientset.Interface, options ServiceAccountsControllerOptions) (*ServiceAccountsController, error) { e := &ServiceAccountsController{ client: cl, serviceAccountsToEnsure: options.ServiceAccounts, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount"), } - if cl != nil && cl.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().RESTClient().GetRateLimiter()) + if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil { + if err := metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.CoreV1().RESTClient().GetRateLimiter()); err != nil { + return nil, err + } } saInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -86,7 +88,7 @@ func NewServiceAccountsController(saInformer coreinformers.ServiceAccountInforme e.syncHandler = e.syncNamespace - return e + return e, nil } // ServiceAccountsController manages ServiceAccount objects inside Namespaces @@ -210,7 +212,7 @@ func (c *ServiceAccountsController) syncNamespace(key string) error { // TODO eliminate this once the fake client can handle creation without NS sa.Namespace = ns.Name - if _, err := c.client.Core().ServiceAccounts(ns.Name).Create(&sa); err != nil && !apierrs.IsAlreadyExists(err) { + if _, err := c.client.CoreV1().ServiceAccounts(ns.Name).Create(&sa); err != nil && !apierrs.IsAlreadyExists(err) { createFailures = append(createFailures, err) } } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/tokengetter.go b/vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/tokengetter.go index e81f1381573d..d21f578b29c4 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/tokengetter.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/tokengetter.go @@ -23,7 +23,7 @@ import ( "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/storage/storagebackend" clientset "k8s.io/client-go/kubernetes" - apiv1 "k8s.io/kubernetes/pkg/api/v1" + apiv1 "k8s.io/kubernetes/pkg/apis/core/v1" "k8s.io/kubernetes/pkg/registry/core/secret" secretstore "k8s.io/kubernetes/pkg/registry/core/secret/storage" serviceaccountregistry "k8s.io/kubernetes/pkg/registry/core/serviceaccount" @@ -44,10 +44,10 @@ func NewGetterFromClient(c clientset.Interface) serviceaccount.ServiceAccountTok return clientGetter{c} } func (c clientGetter) GetServiceAccount(namespace, name string) (*v1.ServiceAccount, error) { - return c.client.Core().ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) + return c.client.CoreV1().ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) } func (c clientGetter) GetSecret(namespace, name string) (*v1.Secret, error) { - return c.client.Core().Secrets(namespace).Get(name, metav1.GetOptions{}) + return c.client.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) } // registryGetter implements ServiceAccountTokenGetter using a service account and secret registry @@ -68,7 +68,7 @@ func (r *registryGetter) GetServiceAccount(namespace, name string) (*v1.ServiceA return nil, err } v1ServiceAccount := v1.ServiceAccount{} - err = apiv1.Convert_api_ServiceAccount_To_v1_ServiceAccount(internalServiceAccount, &v1ServiceAccount, nil) + err = apiv1.Convert_core_ServiceAccount_To_v1_ServiceAccount(internalServiceAccount, &v1ServiceAccount, nil) return &v1ServiceAccount, err } @@ -79,7 +79,7 @@ func (r *registryGetter) GetSecret(namespace, name string) (*v1.Secret, error) { return nil, err } v1Secret := v1.Secret{} - err = apiv1.Convert_api_Secret_To_v1_Secret(internalSecret, &v1Secret, nil) + err = apiv1.Convert_core_Secret_To_v1_Secret(internalSecret, &v1Secret, nil) return &v1Secret, err } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/tokens_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/tokens_controller.go index f8d5851101c5..c34e600ef112 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/tokens_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/serviceaccount/tokens_controller.go @@ -70,7 +70,7 @@ type TokensControllerOptions struct { } // NewTokensController returns a new *TokensController. -func NewTokensController(serviceAccounts informers.ServiceAccountInformer, secrets informers.SecretInformer, cl clientset.Interface, options TokensControllerOptions) *TokensController { +func NewTokensController(serviceAccounts informers.ServiceAccountInformer, secrets informers.SecretInformer, cl clientset.Interface, options TokensControllerOptions) (*TokensController, error) { maxRetries := options.MaxRetries if maxRetries == 0 { maxRetries = 10 @@ -86,8 +86,10 @@ func NewTokensController(serviceAccounts informers.ServiceAccountInformer, secre maxRetries: maxRetries, } - if cl != nil && cl.Core().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_tokens_controller", cl.Core().RESTClient().GetRateLimiter()) + if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil { + if err := metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_tokens_controller", cl.CoreV1().RESTClient().GetRateLimiter()); err != nil { + return nil, err + } } e.serviceAccounts = serviceAccounts.Lister() @@ -124,7 +126,7 @@ func NewTokensController(serviceAccounts informers.ServiceAccountInformer, secre options.SecretResync, ) - return e + return e, nil } // TokensController manages ServiceAccountToken secrets for ServiceAccount objects @@ -344,7 +346,7 @@ func (e *TokensController) deleteToken(ns, name string, uid types.UID) ( /*retry if len(uid) > 0 { opts = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}} } - err := e.client.Core().Secrets(ns).Delete(name, opts) + err := e.client.CoreV1().Secrets(ns).Delete(name, opts) // NotFound doesn't need a retry (it's already been deleted) // Conflict doesn't need a retry (the UID precondition failed) if err == nil || apierrors.IsNotFound(err) || apierrors.IsConflict(err) { @@ -366,7 +368,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou // We don't want to update the cache's copy of the service account // so add the secret to a freshly retrieved copy of the service account - serviceAccounts := e.client.Core().ServiceAccounts(serviceAccount.Namespace) + serviceAccounts := e.client.CoreV1().ServiceAccounts(serviceAccount.Namespace) liveServiceAccount, err := serviceAccounts.Get(serviceAccount.Name, metav1.GetOptions{}) if err != nil { // Retry if we cannot fetch the live service account (for a NotFound error, either the live lookup or our cache are stale) @@ -405,7 +407,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou } // Save the secret - createdToken, err := e.client.Core().Secrets(serviceAccount.Namespace).Create(secret) + createdToken, err := e.client.CoreV1().Secrets(serviceAccount.Namespace).Create(secret) if err != nil { // retriable error return true, err @@ -455,7 +457,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou // we weren't able to use the token, try to clean it up. glog.V(2).Infof("deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err) deleteOpts := &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &createdToken.UID}} - if deleteErr := e.client.Core().Secrets(createdToken.Namespace).Delete(createdToken.Name, deleteOpts); deleteErr != nil { + if deleteErr := e.client.CoreV1().Secrets(createdToken.Namespace).Delete(createdToken.Name, deleteOpts); deleteErr != nil { glog.Error(deleteErr) // if we fail, just log it } } @@ -513,7 +515,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccou // We don't want to update the cache's copy of the secret // so add the token to a freshly retrieved copy of the secret - secrets := e.client.Core().Secrets(cachedSecret.Namespace) + secrets := e.client.CoreV1().Secrets(cachedSecret.Namespace) liveSecret, err := secrets.Get(cachedSecret.Name, metav1.GetOptions{}) if err != nil { // Retry for any error other than a NotFound @@ -577,7 +579,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccou func (e *TokensController) removeSecretReference(saNamespace string, saName string, saUID types.UID, secretName string) error { // We don't want to update the cache's copy of the service account // so remove the secret from a freshly retrieved copy of the service account - serviceAccounts := e.client.Core().ServiceAccounts(saNamespace) + serviceAccounts := e.client.CoreV1().ServiceAccounts(saNamespace) serviceAccount, err := serviceAccounts.Get(saName, metav1.GetOptions{}) // Ignore NotFound errors when attempting to remove a reference if apierrors.IsNotFound(err) { @@ -631,7 +633,7 @@ func (e *TokensController) getServiceAccount(ns string, name string, uid types.U } // Live lookup - sa, err = e.client.Core().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) + sa, err = e.client.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return nil, nil } @@ -667,7 +669,7 @@ func (e *TokensController) getSecret(ns string, name string, uid types.UID, fetc } // Live lookup - secret, err := e.client.Core().Secrets(ns).Get(name, metav1.GetOptions{}) + secret, err := e.client.CoreV1().Secrets(ns).Get(name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return nil, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/BUILD index 7cf5f352358e..d35a9de07807 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/BUILD @@ -15,6 +15,7 @@ go_library( "stateful_set_status_updater.go", "stateful_set_utils.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/statefulset", deps = [ "//pkg/api/v1/pod:go_default_library", "//pkg/controller:go_default_library", @@ -53,11 +54,12 @@ go_test( "stateful_set_test.go", "stateful_set_utils_test.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/statefulset", library = ":go_default_library", deps = [ - "//pkg/api/install:go_default_library", "//pkg/api/v1/pod:go_default_library", "//pkg/apis/apps/install:go_default_library", + "//pkg/apis/core/install:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/history:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_pod_control.go b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_pod_control.go index ef33a78b16b4..fff08046297b 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_pod_control.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_pod_control.go @@ -77,7 +77,7 @@ func (spc *realStatefulPodControl) CreateStatefulPod(set *apps.StatefulSet, pod return err } // If we created the PVCs attempt to create the Pod - _, err := spc.client.Core().Pods(set.Namespace).Create(pod) + _, err := spc.client.CoreV1().Pods(set.Namespace).Create(pod) // sink already exists errors if apierrors.IsAlreadyExists(err) { return err @@ -113,7 +113,7 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *apps.StatefulSet, pod attemptedUpdate = true // commit the update, retrying on conflicts - _, updateErr := spc.client.Core().Pods(set.Namespace).Update(pod) + _, updateErr := spc.client.CoreV1().Pods(set.Namespace).Update(pod) if updateErr == nil { return nil } @@ -134,7 +134,7 @@ func (spc *realStatefulPodControl) UpdateStatefulPod(set *apps.StatefulSet, pod } func (spc *realStatefulPodControl) DeleteStatefulPod(set *apps.StatefulSet, pod *v1.Pod) error { - err := spc.client.Core().Pods(set.Namespace).Delete(pod.Name, nil) + err := spc.client.CoreV1().Pods(set.Namespace).Delete(pod.Name, nil) spc.recordPodEvent("delete", set, pod, err) return err } @@ -182,7 +182,7 @@ func (spc *realStatefulPodControl) createPersistentVolumeClaims(set *apps.Statef _, err := spc.pvcLister.PersistentVolumeClaims(claim.Namespace).Get(claim.Name) switch { case apierrors.IsNotFound(err): - _, err := spc.client.Core().PersistentVolumeClaims(claim.Namespace).Create(&claim) + _, err := spc.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(&claim) if err != nil { errs = append(errs, fmt.Errorf("Failed to create PVC %s: %s", claim.Name, err)) } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set.go b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set.go index 2cde7ab3fe43..a49ba7deb3ce 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set.go @@ -71,6 +71,8 @@ type StatefulSetController struct { setListerSynced cache.InformerSynced // pvcListerSynced returns true if the pvc shared informer has synced at least once pvcListerSynced cache.InformerSynced + // revListerSynced returns true if the rev shared informer has synced at least once + revListerSynced cache.InformerSynced // StatefulSets that need to be synced. queue workqueue.RateLimitingInterface } @@ -85,8 +87,8 @@ func NewStatefulSetController( ) *StatefulSetController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")}) - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "statefulset"}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "statefulset-controller"}) ssc := &StatefulSetController{ kubeClient: kubeClient, @@ -103,6 +105,8 @@ func NewStatefulSetController( pvcListerSynced: pvcInformer.Informer().HasSynced, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "statefulset"), podControl: controller.RealPodControl{KubeClient: kubeClient, Recorder: recorder}, + + revListerSynced: revInformer.Informer().HasSynced, } podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -146,7 +150,7 @@ func (ssc *StatefulSetController) Run(workers int, stopCh <-chan struct{}) { glog.Infof("Starting stateful set controller") defer glog.Infof("Shutting down statefulset controller") - if !controller.WaitForCacheSync("stateful set", stopCh, ssc.podListerSynced, ssc.setListerSynced, ssc.pvcListerSynced) { + if !controller.WaitForCacheSync("stateful set", stopCh, ssc.podListerSynced, ssc.setListerSynced, ssc.pvcListerSynced, ssc.revListerSynced) { return } @@ -400,7 +404,7 @@ func (ssc *StatefulSetController) processNextWorkItem() bool { return true } -// worker runs a worker goroutine that invokes processNextWorkItem until the the controller's queue is closed +// worker runs a worker goroutine that invokes processNextWorkItem until the controller's queue is closed func (ssc *StatefulSetController) worker() { for ssc.processNextWorkItem() { } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_utils.go index fc8ba3e5448f..4e23784ac3d6 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_utils.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_utils.go @@ -112,7 +112,8 @@ func identityMatches(set *apps.StatefulSet, pod *v1.Pod) bool { return ordinal >= 0 && set.Name == parent && pod.Name == getPodName(set, ordinal) && - pod.Namespace == set.Namespace + pod.Namespace == set.Namespace && + pod.Labels[apps.StatefulSetPodNameLabel] == pod.Name } // storageMatches returns true if pod's Volumes cover the set of PersistentVolumeClaims @@ -187,11 +188,15 @@ func initIdentity(set *apps.StatefulSet, pod *v1.Pod) { pod.Spec.Subdomain = set.Spec.ServiceName } -// updateIdentity updates pod's name, hostname, and subdomain to conform to set's name and headless service. +// updateIdentity updates pod's name, hostname, and subdomain, and StatefulSetPodNameLabel to conform to set's name +// and headless service. func updateIdentity(set *apps.StatefulSet, pod *v1.Pod) { pod.Name = getPodName(set, getOrdinal(pod)) pod.Namespace = set.Namespace - + if pod.Labels == nil { + pod.Labels = make(map[string]string) + } + pod.Labels[apps.StatefulSetPodNameLabel] = pod.Name } // isRunningAndReady returns true if pod is in the PodRunning Phase, if it has a condition of PodReady. diff --git a/vendor/k8s.io/kubernetes/pkg/controller/ttl/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/ttl/BUILD index f54cc8aef5aa..7c96860f7573 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/ttl/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/ttl/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["ttl_controller.go"], + importpath = "k8s.io/kubernetes/pkg/controller/ttl", deps = [ "//pkg/controller:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -43,6 +44,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["ttl_controller_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/ttl", library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/ttl/ttl_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/ttl/ttl_controller.go index 3581e0519f05..2938ca8270b6 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/ttl/ttl_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/ttl/ttl_controller.go @@ -263,7 +263,7 @@ func (ttlc *TTLController) patchNodeWithAnnotation(node *v1.Node, annotationKey if err != nil { return err } - _, err = ttlc.kubeClient.Core().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes) + _, err = ttlc.kubeClient.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes) if err != nil { glog.V(2).Infof("Failed to change ttl annotation for node %s: %v", node.Name, err) return err diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/BUILD index b3d129b7abed..d3b76f22ff4c 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["attach_detach_controller.go"], + importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach", deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/controller:go_default_library", @@ -20,6 +21,7 @@ go_library( "//pkg/util/io:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", + "//pkg/volume/util:go_default_library", "//pkg/volume/util/operationexecutor:go_default_library", "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -40,6 +42,7 @@ go_library( go_test( name = "go_default_test", srcs = ["attach_detach_controller_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach", library = ":go_default_library", deps = [ "//pkg/controller:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go index a608fbf94d7d..3c01e6b05a4c 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go @@ -45,6 +45,7 @@ import ( "k8s.io/kubernetes/pkg/util/io" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" + volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) @@ -134,8 +135,9 @@ func NewAttachDetachController( eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")}) - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "attachdetach"}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "attachdetach-controller"}) + blkutil := volumeutil.NewBlockVolumePathHandler() adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr) adc.actualStateOfWorld = cache.NewActualStateOfWorld(&adc.volumePluginMgr) @@ -144,7 +146,8 @@ func NewAttachDetachController( kubeClient, &adc.volumePluginMgr, recorder, - false)) // flag for experimental binary check for volume mount + false, // flag for experimental binary check for volume mount + blkutil)) adc.nodeStatusUpdater = statusupdater.NewNodeStatusUpdater( kubeClient, nodeInformer.Lister(), adc.actualStateOfWorld) @@ -294,7 +297,7 @@ func (adc *attachDetachController) populateActualStateOfWorld() error { glog.Errorf("Failed to mark the volume as attached: %v", err) continue } - adc.processVolumesInUse(nodeName, node.Status.VolumesInUse, true /* forceUnmount */) + adc.processVolumesInUse(nodeName, node.Status.VolumesInUse) adc.addNodeToDswp(node, types.NodeName(node.Name)) } } @@ -463,7 +466,7 @@ func (adc *attachDetachController) nodeUpdate(oldObj, newObj interface{}) { nodeName := types.NodeName(node.Name) adc.addNodeToDswp(node, nodeName) - adc.processVolumesInUse(nodeName, node.Status.VolumesInUse, false /* forceUnmount */) + adc.processVolumesInUse(nodeName, node.Status.VolumesInUse) } func (adc *attachDetachController) nodeDelete(obj interface{}) { @@ -478,7 +481,7 @@ func (adc *attachDetachController) nodeDelete(obj interface{}) { glog.Infof("error removing node %q from desired-state-of-world: %v", nodeName, err) } - adc.processVolumesInUse(nodeName, node.Status.VolumesInUse, false /* forceUnmount */) + adc.processVolumesInUse(nodeName, node.Status.VolumesInUse) } // processVolumesInUse processes the list of volumes marked as "in-use" @@ -486,7 +489,7 @@ func (adc *attachDetachController) nodeDelete(obj interface{}) { // corresponding volume in the actual state of the world to indicate that it is // mounted. func (adc *attachDetachController) processVolumesInUse( - nodeName types.NodeName, volumesInUse []v1.UniqueVolumeName, forceUnmount bool) { + nodeName types.NodeName, volumesInUse []v1.UniqueVolumeName) { glog.V(4).Infof("processVolumesInUse for node %q", nodeName) for _, attachedVolume := range adc.actualStateOfWorld.GetAttachedVolumesForNode(nodeName) { mounted := false @@ -496,8 +499,7 @@ func (adc *attachDetachController) processVolumesInUse( break } } - err := adc.actualStateOfWorld.SetVolumeMountedByNode( - attachedVolume.VolumeName, nodeName, mounted, forceUnmount) + err := adc.actualStateOfWorld.SetVolumeMountedByNode(attachedVolume.VolumeName, nodeName, mounted) if err != nil { glog.Warningf( "SetVolumeMountedByNode(%q, %q, %q) returned an error: %v", @@ -516,6 +518,10 @@ func (adc *attachDetachController) GetPluginDir(podUID string) string { return "" } +func (adc *attachDetachController) GetVolumeDevicePluginDir(podUID string) string { + return "" +} + func (adc *attachDetachController) GetPodVolumeDir(podUID types.UID, pluginName, volumeName string) string { return "" } @@ -524,6 +530,10 @@ func (adc *attachDetachController) GetPodPluginDir(podUID types.UID, pluginName return "" } +func (adc *attachDetachController) GetPodVolumeDeviceDir(podUID types.UID, pluginName string) string { + return "" +} + func (adc *attachDetachController) GetKubeClient() clientset.Interface { return adc.kubeClient } @@ -593,3 +603,7 @@ func (adc *attachDetachController) addNodeToDswp(node *v1.Node, nodeName types.N func (adc *attachDetachController) GetNodeLabels() (map[string]string, error) { return nil, fmt.Errorf("GetNodeLabels() unsupported in Attach/Detach controller") } + +func (adc *attachDetachController) GetNodeName() types.NodeName { + return "" +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/BUILD index b9f282c25454..6bc3d938c54e 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/BUILD @@ -12,6 +12,7 @@ go_library( "actual_state_of_world.go", "desired_state_of_world.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache", deps = [ "//pkg/volume:go_default_library", "//pkg/volume/util/operationexecutor:go_default_library", @@ -29,6 +30,7 @@ go_test( "actual_state_of_world_test.go", "desired_state_of_world_test.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache", library = ":go_default_library", deps = [ "//pkg/controller/volume/attachdetach/testing:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go index 4ee4e26c9b42..9bb6320c6621 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go @@ -68,7 +68,7 @@ type ActualStateOfWorld interface { // returned. // If no node with the name nodeName exists in list of attached nodes for // the specified volume, an error is returned. - SetVolumeMountedByNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool, forceUnmount bool) error + SetVolumeMountedByNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error // SetNodeStatusUpdateNeeded sets statusUpdateNeeded for the specified // node to true indicating the AttachedVolume field in the Node's Status @@ -173,7 +173,7 @@ type actualStateOfWorld struct { sync.RWMutex } -// The volume object represents a volume the the attach/detach controller +// The volume object represents a volume the attach/detach controller // believes to be successfully attached to a node it is managing. type attachedVolume struct { // volumeName contains the unique identifier for this volume. @@ -331,7 +331,7 @@ func (asw *actualStateOfWorld) AddVolumeNode( } func (asw *actualStateOfWorld) SetVolumeMountedByNode( - volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool, forceUnmount bool) error { + volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error { asw.Lock() defer asw.Unlock() @@ -343,11 +343,6 @@ func (asw *actualStateOfWorld) SetVolumeMountedByNode( if mounted { // Increment set count nodeObj.mountedByNodeSetCount = nodeObj.mountedByNodeSetCount + 1 - } else { - // Do not allow value to be reset unless it has been set at least once - if nodeObj.mountedByNodeSetCount == 0 && !forceUnmount { - return nil - } } nodeObj.mountedByNode = mounted @@ -480,7 +475,6 @@ func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName types.NodeN // should not happen errMsg := fmt.Sprintf("Failed to set statusUpdateNeeded to needed %t because nodeName=%q does not exist", needed, nodeName) - glog.Errorf(errMsg) return fmt.Errorf(errMsg) } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/desired_state_of_world.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/desired_state_of_world.go index f4481f716184..57ee9253ec1d 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/desired_state_of_world.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/desired_state_of_world.go @@ -101,6 +101,10 @@ type DesiredStateOfWorld interface { // GetKeepTerminatedPodVolumesForNode determines if node wants volumes to be // mounted and attached for terminated pods GetKeepTerminatedPodVolumesForNode(k8stypes.NodeName) bool + + // Mark multiattach error as reported to prevent spamming multiple + // events for same error + SetMultiAttachError(v1.UniqueVolumeName, k8stypes.NodeName) } // VolumeToAttach represents a volume that should be attached to a node. @@ -329,6 +333,21 @@ func (dsw *desiredStateOfWorld) VolumeExists( return false } +func (dsw *desiredStateOfWorld) SetMultiAttachError( + volumeName v1.UniqueVolumeName, + nodeName k8stypes.NodeName) { + dsw.Lock() + defer dsw.Unlock() + + nodeObj, nodeExists := dsw.nodesManaged[nodeName] + if nodeExists { + if volumeObj, volumeExists := nodeObj.volumesToAttach[volumeName]; volumeExists { + volumeObj.multiAttachErrorReported = true + dsw.nodesManaged[nodeName].volumesToAttach[volumeName] = volumeObj + } + } +} + // GetKeepTerminatedPodVolumesForNode determines if node wants volumes to be // mounted and attached for terminated pods func (dsw *desiredStateOfWorld) GetKeepTerminatedPodVolumesForNode(nodeName k8stypes.NodeName) bool { diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator/BUILD index 51d178f2009e..78f4ac364736 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["desired_state_of_world_populator.go"], + importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator", deps = [ "//pkg/controller/volume/attachdetach/cache:go_default_library", "//pkg/controller/volume/attachdetach/util:go_default_library", @@ -40,6 +41,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["desired_state_of_world_populator_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator", library = ":go_default_library", deps = [ "//pkg/controller:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/BUILD index bc683f6bf0ef..16379f5c0b9c 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["reconciler.go"], + importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler", deps = [ "//pkg/controller/volume/attachdetach/cache:go_default_library", "//pkg/controller/volume/attachdetach/statusupdater:go_default_library", @@ -26,6 +27,7 @@ go_library( go_test( name = "go_default_test", srcs = ["reconciler_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler", library = ":go_default_library", deps = [ "//pkg/controller:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go index 26133bb3bcd5..ac81f98c6ae1 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go @@ -241,52 +241,56 @@ func (rc *reconciler) reconcile() { } } + rc.attachDesiredVolumes() + + // Update Node Status + err := rc.nodeStatusUpdater.UpdateNodeStatuses() + if err != nil { + glog.Warningf("UpdateNodeStatuses failed with: %v", err) + } +} + +func (rc *reconciler) attachDesiredVolumes() { // Ensure volumes that should be attached are attached. for _, volumeToAttach := range rc.desiredStateOfWorld.GetVolumesToAttach() { - if rc.actualStateOfWorld.VolumeNodeExists( - volumeToAttach.VolumeName, volumeToAttach.NodeName) { + if rc.actualStateOfWorld.VolumeNodeExists(volumeToAttach.VolumeName, volumeToAttach.NodeName) { // Volume/Node exists, touch it to reset detachRequestedTime glog.V(5).Infof(volumeToAttach.GenerateMsgDetailed("Volume attached--touching", "")) rc.actualStateOfWorld.ResetDetachRequestTime(volumeToAttach.VolumeName, volumeToAttach.NodeName) - } else { - // Don't even try to start an operation if there is already one running - if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "") { - glog.V(10).Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName) - continue - } + continue + } + // Don't even try to start an operation if there is already one running + if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "") { + glog.V(10).Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName) + continue + } - if rc.isMultiAttachForbidden(volumeToAttach.VolumeSpec) { - nodes := rc.actualStateOfWorld.GetNodesForVolume(volumeToAttach.VolumeName) - if len(nodes) > 0 { - if !volumeToAttach.MultiAttachErrorReported { - simpleMsg, detailedMsg := volumeToAttach.GenerateMsg("Multi-Attach error", "Volume is already exclusively attached to one node and can't be attached to another") - for _, pod := range volumeToAttach.ScheduledPods { - rc.recorder.Eventf(pod, v1.EventTypeWarning, kevents.FailedAttachVolume, simpleMsg) - } - volumeToAttach.MultiAttachErrorReported = true - glog.Warningf(detailedMsg) + if rc.isMultiAttachForbidden(volumeToAttach.VolumeSpec) { + nodes := rc.actualStateOfWorld.GetNodesForVolume(volumeToAttach.VolumeName) + if len(nodes) > 0 { + if !volumeToAttach.MultiAttachErrorReported { + simpleMsg, detailedMsg := volumeToAttach.GenerateMsg("Multi-Attach error", "Volume is already exclusively attached to one node and can't be attached to another") + for _, pod := range volumeToAttach.ScheduledPods { + rc.recorder.Eventf(pod, v1.EventTypeWarning, kevents.FailedAttachVolume, simpleMsg) } - continue + rc.desiredStateOfWorld.SetMultiAttachError(volumeToAttach.VolumeName, volumeToAttach.NodeName) + glog.Warningf(detailedMsg) } + continue } + } - // Volume/Node doesn't exist, spawn a goroutine to attach it - glog.V(5).Infof(volumeToAttach.GenerateMsgDetailed("Starting attacherDetacher.AttachVolume", "")) - err := rc.attacherDetacher.AttachVolume(volumeToAttach.VolumeToAttach, rc.actualStateOfWorld) - if err == nil { - glog.Infof(volumeToAttach.GenerateMsgDetailed("attacherDetacher.AttachVolume started", "")) - } - if err != nil && !exponentialbackoff.IsExponentialBackoff(err) { - // Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected. - // Log all other errors. - glog.Errorf(volumeToAttach.GenerateErrorDetailed("attacherDetacher.AttachVolume failed to start", err).Error()) - } + // Volume/Node doesn't exist, spawn a goroutine to attach it + glog.V(5).Infof(volumeToAttach.GenerateMsgDetailed("Starting attacherDetacher.AttachVolume", "")) + err := rc.attacherDetacher.AttachVolume(volumeToAttach.VolumeToAttach, rc.actualStateOfWorld) + if err == nil { + glog.Infof(volumeToAttach.GenerateMsgDetailed("attacherDetacher.AttachVolume started", "")) + } + if err != nil && !exponentialbackoff.IsExponentialBackoff(err) { + // Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected. + // Log all other errors. + glog.Errorf(volumeToAttach.GenerateErrorDetailed("attacherDetacher.AttachVolume failed to start", err).Error()) } } - // Update Node Status - err := rc.nodeStatusUpdater.UpdateNodeStatuses() - if err != nil { - glog.Warningf("UpdateNodeStatuses failed with: %v", err) - } } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/BUILD index 9e294e2ba5df..e60d31be92e2 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/BUILD @@ -11,6 +11,7 @@ go_library( "fake_node_status_updater.go", "node_status_updater.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater", deps = [ "//pkg/controller/volume/attachdetach/cache:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go index e1b31fbccc8c..ab6dd05ecc4d 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go @@ -129,10 +129,10 @@ func (nsu *nodeStatusUpdater) updateNodeStatus(nodeName types.NodeName, nodeObj err) } - _, err = nsu.kubeClient.Core().Nodes().PatchStatus(string(nodeName), patchBytes) + _, err = nsu.kubeClient.CoreV1().Nodes().PatchStatus(string(nodeName), patchBytes) if err != nil { return fmt.Errorf( - "failed to kubeClient.Core().Nodes().Patch for node %q. %v", + "failed to kubeClient.CoreV1().Nodes().Patch for node %q. %v", nodeName, err) } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/util/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/util/BUILD index 75a2a3122e8d..06848fecca38 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/util/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["util.go"], + importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/util", deps = [ "//pkg/controller/volume/attachdetach/cache:go_default_library", "//pkg/volume:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/events/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/volume/events/BUILD index eb4d17dddf39..73dc4c3f4406 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/events/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/events/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["event.go"], + importpath = "k8s.io/kubernetes/pkg/controller/volume/events", ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/events/event.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/events/event.go index b5ac3fb7be89..c99c30c99a7c 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/events/event.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/events/event.go @@ -29,4 +29,5 @@ const ( ProvisioningFailed = "ProvisioningFailed" ProvisioningCleanupFailed = "ProvisioningCleanupFailed" ProvisioningSucceeded = "ProvisioningSucceeded" + WaitForFirstConsumer = "WaitForFirstConsumer" ) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/BUILD index b68223bee842..74ae103b74c2 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -14,7 +12,7 @@ go_library( "pvc_populator.go", "sync_volume_resize.go", ], - tags = ["automanaged"], + importpath = "k8s.io/kubernetes/pkg/controller/volume/expand", deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/controller:go_default_library", @@ -24,6 +22,7 @@ go_library( "//pkg/util/io:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", + "//pkg/volume/util:go_default_library", "//pkg/volume/util/operationexecutor:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/BUILD index f07d34a020af..c9f64da1a972 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -11,7 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["volume_resize_map.go"], - tags = ["automanaged"], + importpath = "k8s.io/kubernetes/pkg/controller/volume/expand/cache", deps = [ "//pkg/controller/volume/expand/util:go_default_library", "//pkg/util/strings:go_default_library", @@ -41,6 +39,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["volume_resize_map_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/volume/expand/cache", library = ":go_default_library", deps = [ "//pkg/volume/util/types:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/volume_resize_map.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/volume_resize_map.go index 5676192bccd5..2645a294c23b 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/volume_resize_map.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/cache/volume_resize_map.go @@ -88,6 +88,11 @@ func NewVolumeResizeMap(kubeClient clientset.Interface) VolumeResizeMap { } // AddPVCUpdate adds pvc for resizing +// This function intentionally allows addition of PVCs for which pv.Spec.Size >= pvc.Spec.Size, +// the reason being - lack of transaction in k8s means after successful resize, we can't guarantee that when we update PV, +// pvc update will be successful too and after resize we alyways update PV first. +// If for some reason we weren't able to update PVC after successful resize, then we are going to reprocess +// the PVC and hopefully after a no-op resize in volume plugin, PVC will be updated with right values as well. func (resizeMap *volumeResizeMap) AddPVCUpdate(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { if pv.Spec.ClaimRef == nil || pvc.Namespace != pv.Spec.ClaimRef.Namespace || pvc.Name != pv.Spec.ClaimRef.Name { glog.V(4).Infof("Persistent Volume is not bound to PVC being updated : %s", util.ClaimToClaimKey(pvc)) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/expand_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/expand_controller.go index 1c71528ef912..6d6ac0edf9bd 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/expand_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/expand_controller.go @@ -42,6 +42,7 @@ import ( "k8s.io/kubernetes/pkg/util/io" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" ) @@ -117,12 +118,14 @@ func NewExpandController( eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "volume_expand"}) + blkutil := util.NewBlockVolumePathHandler() expc.opExecutor = operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator( kubeClient, &expc.volumePluginMgr, recorder, - false)) + false, + blkutil)) expc.resizeMap = cache.NewVolumeResizeMap(expc.kubeClient) @@ -203,10 +206,18 @@ func (expc *expandController) GetPluginDir(pluginName string) string { return "" } +func (expc *expandController) GetVolumeDevicePluginDir(pluginName string) string { + return "" +} + func (expc *expandController) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string { return "" } +func (expc *expandController) GetPodVolumeDeviceDir(podUID types.UID, pluginName string) string { + return "" +} + func (expc *expandController) GetPodPluginDir(podUID types.UID, pluginName string) string { return "" } @@ -266,3 +277,7 @@ func (expc *expandController) GetConfigMapFunc() func(namespace, name string) (* func (expc *expandController) GetNodeLabels() (map[string]string, error) { return nil, fmt.Errorf("GetNodeLabels unsupported in expandController") } + +func (expc *expandController) GetNodeName() types.NodeName { + return "" +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/pvc_populator.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/pvc_populator.go index 220e1092f7d5..bf46175f1eff 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/pvc_populator.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/pvc_populator.go @@ -80,6 +80,11 @@ func (populator *pvcPopulator) Sync() { glog.V(5).Infof("Error getting persistent volume for pvc %q : %v", pvc.UID, err) continue } + // We are only going to add PVCs which are: + // - bound + // - pvc.Spec.Size > pvc.Status.Size + // These 2 checks are already performed in AddPVCUpdate function before adding pvc for resize + // and hence we do not repeat those checks here. populator.resizeMap.AddPVCUpdate(pvc, pv) } } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/util/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/util/BUILD index ce155342a61c..b7bb8c69f0ac 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/expand/util/BUILD @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["util.go"], + importpath = "k8s.io/kubernetes/pkg/controller/volume/expand/util", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/golang/glog:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD index 7021ddfcb6b9..01ab29857e6c 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD @@ -12,13 +12,19 @@ go_library( "index.go", "pv_controller.go", "pv_controller_base.go", + "scheduler_assume_cache.go", + "scheduler_binder.go", + "scheduler_binder_cache.go", + "scheduler_binder_fake.go", "volume_host.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/volume/persistentvolume", deps = [ - "//pkg/api/v1/helper:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/volume/events:go_default_library", + "//pkg/features:go_default_library", "//pkg/util/goroutinemap:go_default_library", "//pkg/util/goroutinemap/exponentialbackoff:go_default_library", "//pkg/util/io:go_default_library", @@ -36,6 +42,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/informers/storage/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", @@ -60,11 +67,16 @@ go_test( "provision_test.go", "pv_controller_test.go", "recycle_test.go", + "scheduler_assume_cache_test.go", + "scheduler_binder_cache_test.go", + "scheduler_binder_test.go", ], + importpath = "k8s.io/kubernetes/pkg/controller/volume/persistentvolume", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", "//pkg/api/testapi:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/controller:go_default_library", "//pkg/volume:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -78,6 +90,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index.go index e9a40fe1c853..5c3457445643 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index.go @@ -24,9 +24,12 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/cache" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" + volumeutil "k8s.io/kubernetes/pkg/volume/util" ) // persistentVolumeOrderedIndex is a cache.Store that keeps persistent volumes @@ -72,7 +75,7 @@ func (pvIndex *persistentVolumeOrderedIndex) listByAccessModes(modes []v1.Persis } // find returns the nearest PV from the ordered list or nil if a match is not found -func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) { +func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *v1.PersistentVolumeClaim, delayBinding bool) (*v1.PersistentVolume, error) { // PVs are indexed by their access modes to allow easier searching. Each // index is the string representation of a set of access modes. There is a // finite number of possible sets and PVs will only be indexed in one of @@ -88,6 +91,45 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *v1.PersistentVol // example above). allPossibleModes := pvIndex.allPossibleMatchingAccessModes(claim.Spec.AccessModes) + for _, modes := range allPossibleModes { + volumes, err := pvIndex.listByAccessModes(modes) + if err != nil { + return nil, err + } + + bestVol, err := findMatchingVolume(claim, volumes, nil /* node for topology binding*/, nil /* exclusion map */, delayBinding) + if err != nil { + return nil, err + } + + if bestVol != nil { + return bestVol, nil + } + } + return nil, nil +} + +// findMatchingVolume goes through the list of volumes to find the best matching volume +// for the claim. +// +// This function is used by both the PV controller and scheduler. +// +// delayBinding is true only in the PV controller path. When set, prebound PVs are still returned +// as a match for the claim, but unbound PVs are skipped. +// +// node is set only in the scheduler path. When set, the PV node affinity is checked against +// the node's labels. +// +// excludedVolumes is only used in the scheduler path, and is needed for evaluating multiple +// unbound PVCs for a single Pod at one time. As each PVC finds a matching PV, the chosen +// PV needs to be excluded from future matching. +func findMatchingVolume( + claim *v1.PersistentVolumeClaim, + volumes []*v1.PersistentVolume, + node *v1.Node, + excludedVolumes map[string]*v1.PersistentVolume, + delayBinding bool) (*v1.PersistentVolume, error) { + var smallestVolume *v1.PersistentVolume var smallestVolumeQty resource.Quantity requestedQty := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] @@ -103,63 +145,130 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *v1.PersistentVol selector = internalSelector } - for _, modes := range allPossibleModes { - volumes, err := pvIndex.listByAccessModes(modes) + // Go through all available volumes with two goals: + // - find a volume that is either pre-bound by user or dynamically + // provisioned for this claim. Because of this we need to loop through + // all volumes. + // - find the smallest matching one if there is no volume pre-bound to + // the claim. + for _, volume := range volumes { + if _, ok := excludedVolumes[volume.Name]; ok { + // Skip volumes in the excluded list + continue + } + + volumeQty := volume.Spec.Capacity[v1.ResourceStorage] + + // check if volumeModes do not match (Alpha and feature gate protected) + isMisMatch, err := checkVolumeModeMisMatches(&claim.Spec, &volume.Spec) if err != nil { - return nil, err + return nil, fmt.Errorf("error checking if volumeMode was a mismatch: %v", err) + } + // filter out mismatching volumeModes + if isMisMatch { + continue } - // Go through all available volumes with two goals: - // - find a volume that is either pre-bound by user or dynamically - // provisioned for this claim. Because of this we need to loop through - // all volumes. - // - find the smallest matching one if there is no volume pre-bound to - // the claim. - for _, volume := range volumes { - if isVolumeBoundToClaim(volume, claim) { - // this claim and volume are pre-bound; return - // the volume if the size request is satisfied, - // otherwise continue searching for a match - volumeQty := volume.Spec.Capacity[v1.ResourceStorage] - if volumeQty.Cmp(requestedQty) < 0 { - continue - } - return volume, nil + nodeAffinityValid := true + if node != nil { + // Scheduler path, check that the PV NodeAffinity + // is satisfied by the node + err := volumeutil.CheckNodeAffinity(volume, node.Labels) + if err != nil { + nodeAffinityValid = false } + } - // filter out: - // - volumes bound to another claim - // - volumes whose labels don't match the claim's selector, if specified - // - volumes in Class that is not requested - if volume.Spec.ClaimRef != nil { - continue - } else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) { + if isVolumeBoundToClaim(volume, claim) { + // this claim and volume are pre-bound; return + // the volume if the size request is satisfied, + // otherwise continue searching for a match + if volumeQty.Cmp(requestedQty) < 0 { continue } - if v1helper.GetPersistentVolumeClass(volume) != requestedClass { - continue + + // If PV node affinity is invalid, return no match. + // This means the prebound PV (and therefore PVC) + // is not suitable for this node. + if !nodeAffinityValid { + return nil, nil } - volumeQty := volume.Spec.Capacity[v1.ResourceStorage] - if volumeQty.Cmp(requestedQty) >= 0 { - if smallestVolume == nil || smallestVolumeQty.Cmp(volumeQty) > 0 { - smallestVolume = volume - smallestVolumeQty = volumeQty - } + return volume, nil + } + + if node == nil && delayBinding { + // PV controller does not bind this claim. + // Scheduler will handle binding unbound volumes + // Scheduler path will have node != nil + continue + } + + // filter out: + // - volumes bound to another claim + // - volumes whose labels don't match the claim's selector, if specified + // - volumes in Class that is not requested + // - volumes whose NodeAffinity does not match the node + if volume.Spec.ClaimRef != nil { + continue + } else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) { + continue + } + if v1helper.GetPersistentVolumeClass(volume) != requestedClass { + continue + } + if !nodeAffinityValid { + continue + } + + if node != nil { + // Scheduler path + // Check that the access modes match + if !checkAccessModes(claim, volume) { + continue } } - if smallestVolume != nil { - // Found a matching volume - return smallestVolume, nil + if volumeQty.Cmp(requestedQty) >= 0 { + if smallestVolume == nil || smallestVolumeQty.Cmp(volumeQty) > 0 { + smallestVolume = volume + smallestVolumeQty = volumeQty + } } } + + if smallestVolume != nil { + // Found a matching volume + return smallestVolume, nil + } + return nil, nil } +// checkVolumeModeMatches is a convenience method that checks volumeMode for PersistentVolume +// and PersistentVolumeClaims along with making sure that the Alpha feature gate BlockVolume is +// enabled. +// This is Alpha and could change in the future. +func checkVolumeModeMisMatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) (bool, error) { + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + if pvSpec.VolumeMode != nil && pvcSpec.VolumeMode != nil { + requestedVolumeMode := *pvcSpec.VolumeMode + pvVolumeMode := *pvSpec.VolumeMode + return requestedVolumeMode != pvVolumeMode, nil + } else { + // This also should retrun an error, this means that + // the defaulting has failed. + return true, fmt.Errorf("api defaulting for volumeMode failed") + } + } else { + // feature gate is disabled + return false, nil + } +} + // findBestMatchForClaim is a convenience method that finds a volume by the claim's AccessModes and requests for Storage -func (pvIndex *persistentVolumeOrderedIndex) findBestMatchForClaim(claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) { - return pvIndex.findByClaim(claim) +func (pvIndex *persistentVolumeOrderedIndex) findBestMatchForClaim(claim *v1.PersistentVolumeClaim, delayBinding bool) (*v1.PersistentVolume, error) { + return pvIndex.findByClaim(claim, delayBinding) } // allPossibleMatchingAccessModes returns an array of AccessMode arrays that @@ -241,3 +350,19 @@ func claimToClaimKey(claim *v1.PersistentVolumeClaim) string { func claimrefToClaimKey(claimref *v1.ObjectReference) string { return fmt.Sprintf("%s/%s", claimref.Namespace, claimref.Name) } + +// Returns true if PV satisfies all the PVC's requested AccessModes +func checkAccessModes(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) bool { + pvModesMap := map[v1.PersistentVolumeAccessMode]bool{} + for _, mode := range volume.Spec.AccessModes { + pvModesMap[mode] = true + } + + for _, mode := range claim.Spec.AccessModes { + _, ok := pvModesMap[mode] + if !ok { + return false + } + } + return true +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go index 58ad88cdfa1b..980d960c750b 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go @@ -26,6 +26,7 @@ import ( storage "k8s.io/api/storage/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" corelisters "k8s.io/client-go/listers/core/v1" @@ -34,9 +35,10 @@ import ( "k8s.io/client-go/tools/record" ref "k8s.io/client-go/tools/reference" "k8s.io/client-go/util/workqueue" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller/volume/events" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/goroutinemap" "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" vol "k8s.io/kubernetes/pkg/volume" @@ -231,6 +233,10 @@ func (ctrl *PersistentVolumeController) syncClaim(claim *v1.PersistentVolumeClai func checkVolumeSatisfyClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) error { requestedQty := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] requestedSize := requestedQty.Value() + isMisMatch, err := checkVolumeModeMisMatches(&claim.Spec, &volume.Spec) + if err != nil { + return fmt.Errorf("error checking if volumeMode was a mismatch: %v", err) + } volumeQty := volume.Spec.Capacity[v1.ResourceStorage] volumeSize := volumeQty.Value() @@ -243,9 +249,37 @@ func checkVolumeSatisfyClaim(volume *v1.PersistentVolume, claim *v1.PersistentVo return fmt.Errorf("Class of volume[%s] is not the same as claim[%v]", volume.Name, claimToClaimKey(claim)) } + if isMisMatch { + return fmt.Errorf("VolumeMode[%v] of volume[%s] is incompatible with VolumeMode[%v] of claim[%v]", volume.Spec.VolumeMode, volume.Name, claim.Spec.VolumeMode, claim.Name) + } + return nil } +func (ctrl *PersistentVolumeController) shouldDelayBinding(claim *v1.PersistentVolumeClaim) (bool, error) { + if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + return false, nil + } + + className := v1helper.GetPersistentVolumeClaimClass(claim) + if className == "" { + return false, nil + } + + class, err := ctrl.classLister.Get(className) + if err != nil { + return false, nil + } + + if class.VolumeBindingMode == nil { + return false, fmt.Errorf("VolumeBindingMode not set for StorageClass %q", className) + } + + // TODO: add check to handle dynamic provisioning later + + return *class.VolumeBindingMode == storage.VolumeBindingWaitForFirstConsumer, nil +} + // syncUnboundClaim is the main controller method to decide what to do with an // unbound claim. func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVolumeClaim) error { @@ -253,9 +287,13 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol // OBSERVATION: pvc is "Pending" if claim.Spec.VolumeName == "" { // User did not care which PV they get. + delayBinding, err := ctrl.shouldDelayBinding(claim) + if err != nil { + return err + } // [Unit test set 1] - volume, err := ctrl.volumes.findBestMatchForClaim(claim) + volume, err := ctrl.volumes.findBestMatchForClaim(claim, delayBinding) if err != nil { glog.V(2).Infof("synchronizing unbound PersistentVolumeClaim[%s]: Error finding PV for claim: %v", claimToClaimKey(claim), err) return fmt.Errorf("Error finding PV for claim %q: %v", claimToClaimKey(claim), err) @@ -264,15 +302,21 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: no volume found", claimToClaimKey(claim)) // No PV could be found // OBSERVATION: pvc is "Pending", will retry - if v1helper.GetPersistentVolumeClaimClass(claim) != "" { + switch { + case delayBinding: + // TODO: Skip dynamic provisioning for now + ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, events.WaitForFirstConsumer, "waiting for first consumer to be created before binding") + case v1helper.GetPersistentVolumeClaimClass(claim) != "": if err = ctrl.provisionClaim(claim); err != nil { return err } return nil + default: + ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, events.FailedBinding, "no persistent volumes available for this claim and no storage class is set") } + // Mark the claim as Pending and try to find a match in the next // periodic syncClaim - ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, events.FailedBinding, "no persistent volumes available for this claim and no storage class is set") if _, err = ctrl.updateClaimStatus(claim, v1.ClaimPending, nil); err != nil { return err } @@ -319,7 +363,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol if err = checkVolumeSatisfyClaim(volume, claim); err != nil { glog.V(4).Infof("Can't bind the claim to volume %q: %v", volume.Name, err) //send a event - ctrl.eventRecorder.Event(volume, v1.EventTypeWarning, events.VolumeMismatch, "Volume's size is smaller than requested or volume's class does not match with claim") + ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.VolumeMismatch, "Volume's size is smaller than requested or volume's class does not match with claim") //volume does not satisfy the requirements of the claim if _, err = ctrl.updateClaimStatus(claim, v1.ClaimPending, nil); err != nil { return err @@ -641,7 +685,7 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo return claim, nil } - newClaim, err := ctrl.kubeClient.Core().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(claimClone) + newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(claimClone) if err != nil { glog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s failed: %v", claimToClaimKey(claim), phase, err) return newClaim, err @@ -697,7 +741,7 @@ func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentV volumeClone.Status.Phase = phase volumeClone.Status.Message = message - newVol, err := ctrl.kubeClient.Core().PersistentVolumes().UpdateStatus(volumeClone) + newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().UpdateStatus(volumeClone) if err != nil { glog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err) return newVol, err @@ -740,6 +784,42 @@ func (ctrl *PersistentVolumeController) updateVolumePhaseWithEvent(volume *v1.Pe func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) { glog.V(4).Infof("updating PersistentVolume[%s]: binding to %q", volume.Name, claimToClaimKey(claim)) + volumeClone, dirty, err := ctrl.getBindVolumeToClaim(volume, claim) + if err != nil { + return nil, err + } + + // Save the volume only if something was changed + if dirty { + return ctrl.updateBindVolumeToClaim(volumeClone, claim, true) + } + + glog.V(4).Infof("updating PersistentVolume[%s]: already bound to %q", volume.Name, claimToClaimKey(claim)) + return volume, nil +} + +// bindVolumeToClaim modifies given volume to be bound to a claim and saves it to +// API server. The claim is not modified in this method! +func (ctrl *PersistentVolumeController) updateBindVolumeToClaim(volumeClone *v1.PersistentVolume, claim *v1.PersistentVolumeClaim, updateCache bool) (*v1.PersistentVolume, error) { + glog.V(2).Infof("claim %q bound to volume %q", claimToClaimKey(claim), volumeClone.Name) + newVol, err := ctrl.kubeClient.Core().PersistentVolumes().Update(volumeClone) + if err != nil { + glog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volumeClone.Name, claimToClaimKey(claim), err) + return newVol, err + } + if updateCache { + _, err = ctrl.storeVolumeUpdate(newVol) + if err != nil { + glog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volumeClone.Name, err) + return newVol, err + } + } + glog.V(4).Infof("updating PersistentVolume[%s]: bound to %q", newVol.Name, claimToClaimKey(claim)) + return newVol, nil +} + +// Get new PV object only, no API or cache update +func (ctrl *PersistentVolumeController) getBindVolumeToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, bool, error) { dirty := false // Check if the volume was already bound (either by user or by controller) @@ -760,7 +840,7 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV claimRef, err := ref.GetReference(scheme.Scheme, claim) if err != nil { - return nil, fmt.Errorf("Unexpected error getting claim reference: %v", err) + return nil, false, fmt.Errorf("Unexpected error getting claim reference: %v", err) } volumeClone.Spec.ClaimRef = claimRef dirty = true @@ -772,25 +852,7 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV dirty = true } - // Save the volume only if something was changed - if dirty { - glog.V(2).Infof("claim %q bound to volume %q", claimToClaimKey(claim), volume.Name) - newVol, err := ctrl.kubeClient.Core().PersistentVolumes().Update(volumeClone) - if err != nil { - glog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volume.Name, claimToClaimKey(claim), err) - return newVol, err - } - _, err = ctrl.storeVolumeUpdate(newVol) - if err != nil { - glog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volume.Name, err) - return newVol, err - } - glog.V(4).Infof("updating PersistentVolume[%s]: bound to %q", newVol.Name, claimToClaimKey(claim)) - return newVol, nil - } - - glog.V(4).Infof("updating PersistentVolume[%s]: already bound to %q", volume.Name, claimToClaimKey(claim)) - return volume, nil + return volumeClone, dirty, nil } // bindClaimToVolume modifies the given claim to be bound to a volume and @@ -829,7 +891,7 @@ func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVo if dirty { glog.V(2).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim)) - newClaim, err := ctrl.kubeClient.Core().PersistentVolumeClaims(claim.Namespace).Update(claimClone) + newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claimClone) if err != nil { glog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q failed: %v", claimToClaimKey(claim), volume.Name, err) return newClaim, err @@ -916,7 +978,7 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume volumeClone.Spec.ClaimRef.UID = "" } - newVol, err := ctrl.kubeClient.Core().PersistentVolumes().Update(volumeClone) + newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone) if err != nil { glog.V(4).Infof("updating PersistentVolume[%s]: rollback failed: %v", volume.Name, err) return err @@ -977,7 +1039,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{}) // This method may have been waiting for a volume lock for some time. // Previous recycleVolumeOperation might just have saved an updated version, // so read current volume state now. - newVolume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(volume.Name, metav1.GetOptions{}) + newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{}) if err != nil { glog.V(3).Infof("error reading peristent volume %q: %v", volume.Name, err) return @@ -1056,7 +1118,7 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(arg interface{}) e // This method may have been waiting for a volume lock for some time. // Previous deleteVolumeOperation might just have saved an updated version, so // read current volume state now. - newVolume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(volume.Name, metav1.GetOptions{}) + newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{}) if err != nil { glog.V(3).Infof("error reading peristent volume %q: %v", volume.Name, err) return nil @@ -1100,7 +1162,7 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(arg interface{}) e glog.V(4).Infof("deleteVolumeOperation [%s]: success", volume.Name) // Delete the volume - if err = ctrl.kubeClient.Core().PersistentVolumes().Delete(volume.Name, nil); err != nil { + if err = ctrl.kubeClient.CoreV1().PersistentVolumes().Delete(volume.Name, nil); err != nil { // Oops, could not delete the volume and therefore the controller will // try to delete the volume again on next update. We _could_ maintain a // cache of "recently deleted volumes" and avoid unnecessary deletion, @@ -1260,7 +1322,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa // yet. pvName := ctrl.getProvisionedVolumeNameForClaim(claim) - volume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + volume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) if err == nil && volume != nil { // Volume has been already provisioned, nothing to do. glog.V(4).Infof("provisionClaimOperation [%s]: volume already exists, skipping", claimToClaimKey(claim)) @@ -1338,7 +1400,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ { glog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name) var newVol *v1.PersistentVolume - if newVol, err = ctrl.kubeClient.Core().PersistentVolumes().Create(volume); err == nil || apierrs.IsAlreadyExists(err) { + if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrs.IsAlreadyExists(err) { // Save succeeded. if err != nil { glog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim)) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go index 2fb5852f7f47..2ba5d831cbd3 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go @@ -70,7 +70,8 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error) eventRecorder := p.EventRecorder if eventRecorder == nil { broadcaster := record.NewBroadcaster() - broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(p.KubeClient.Core().RESTClient()).Events("")}) + broadcaster.StartLogging(glog.Infof) + broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(p.KubeClient.CoreV1().RESTClient()).Events("")}) eventRecorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "persistentvolume-controller"}) } @@ -242,11 +243,15 @@ func (ctrl *PersistentVolumeController) deleteClaim(claim *v1.PersistentVolumeCl _ = ctrl.claims.Delete(claim) glog.V(4).Infof("claim %q deleted", claimToClaimKey(claim)) + volumeName := claim.Spec.VolumeName + if volumeName == "" { + glog.V(5).Infof("deleteClaim[%q]: volume not bound", claimToClaimKey(claim)) + return + } // sync the volume when its claim is deleted. Explicitly sync'ing the // volume here in response to claim deletion prevents the volume from // waiting until the next sync period for its Release. - volumeName := claim.Spec.VolumeName - glog.V(5).Infof("deleteClaim[%s]: scheduling sync of volume %q", claimToClaimKey(claim), volumeName) + glog.V(5).Infof("deleteClaim[%q]: scheduling sync of volume %s", claimToClaimKey(claim), volumeName) ctrl.volumeQueue.Add(volumeName) } @@ -424,7 +429,7 @@ func (ctrl *PersistentVolumeController) setClaimProvisioner(claim *v1.Persistent // modify these, therefore create a copy. claimClone := claim.DeepCopy() metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, annStorageProvisioner, class.Provisioner) - newClaim, err := ctrl.kubeClient.Core().PersistentVolumeClaims(claim.Namespace).Update(claimClone) + newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claimClone) if err != nil { return newClaim, err } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_assume_cache.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_assume_cache.go new file mode 100644 index 000000000000..28884004d7cc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_assume_cache.go @@ -0,0 +1,318 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "fmt" + "strconv" + "sync" + + "github.com/golang/glog" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/client-go/tools/cache" +) + +// AssumeCache is a cache on top of the informer that allows for updating +// objects outside of informer events and also restoring the informer +// cache's version of the object. Objects are assumed to be +// Kubernetes API objects that implement meta.Interface +type AssumeCache interface { + // Assume updates the object in-memory only + Assume(obj interface{}) error + + // Restore the informer cache's version of the object + Restore(objName string) + + // Get the object by name + Get(objName string) (interface{}, error) + + // List all the objects in the cache + List() []interface{} +} + +type errWrongType struct { + typeName string + object interface{} +} + +func (e *errWrongType) Error() string { + return fmt.Sprintf("could not convert object to type %v: %+v", e.typeName, e.object) +} + +type errNotFound struct { + typeName string + objectName string +} + +func (e *errNotFound) Error() string { + return fmt.Sprintf("could not find %v %q", e.typeName, e.objectName) +} + +type errObjectName struct { + detailedErr error +} + +func (e *errObjectName) Error() string { + return fmt.Sprintf("failed to get object name: %v", e.detailedErr) +} + +// assumeCache stores two pointers to represent a single object: +// * The pointer to the informer object. +// * The pointer to the latest object, which could be the same as +// the informer object, or an in-memory object. +// +// An informer update always overrides the latest object pointer. +// +// Assume() only updates the latest object pointer. +// Restore() sets the latest object pointer back to the informer object. +// Get/List() always returns the latest object pointer. +type assumeCache struct { + mutex sync.Mutex + + // describes the object stored + description string + + // Stores objInfo pointers + store cache.Store +} + +type objInfo struct { + // name of the object + name string + + // Latest version of object could be cached-only or from informer + latestObj interface{} + + // Latest object from informer + apiObj interface{} +} + +func objInfoKeyFunc(obj interface{}) (string, error) { + objInfo, ok := obj.(*objInfo) + if !ok { + return "", &errWrongType{"objInfo", obj} + } + return objInfo.name, nil +} + +func NewAssumeCache(informer cache.SharedIndexInformer, description string) *assumeCache { + // TODO: index by storageclass + c := &assumeCache{store: cache.NewStore(objInfoKeyFunc), description: description} + + // Unit tests don't use informers + if informer != nil { + informer.AddEventHandler( + cache.ResourceEventHandlerFuncs{ + AddFunc: c.add, + UpdateFunc: c.update, + DeleteFunc: c.delete, + }, + ) + } + return c +} + +func (c *assumeCache) add(obj interface{}) { + if obj == nil { + return + } + + name, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + glog.Errorf("add failed: %v", &errObjectName{err}) + return + } + + c.mutex.Lock() + defer c.mutex.Unlock() + + objInfo := &objInfo{name: name, latestObj: obj, apiObj: obj} + c.store.Update(objInfo) +} + +func (c *assumeCache) update(oldObj interface{}, newObj interface{}) { + c.add(newObj) +} + +func (c *assumeCache) delete(obj interface{}) { + if obj == nil { + return + } + + name, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + glog.Errorf("delete failed: %v", &errObjectName{err}) + return + } + + c.mutex.Lock() + defer c.mutex.Unlock() + + objInfo := &objInfo{name: name} + err = c.store.Delete(objInfo) + if err != nil { + glog.Errorf("delete: failed to delete %v %v: %v", c.description, name, err) + } +} + +func (c *assumeCache) getObjVersion(name string, obj interface{}) (int64, error) { + objAccessor, err := meta.Accessor(obj) + if err != nil { + return -1, err + } + + objResourceVersion, err := strconv.ParseInt(objAccessor.GetResourceVersion(), 10, 64) + if err != nil { + return -1, fmt.Errorf("error parsing ResourceVersion %q for %v %q: %s", objAccessor.GetResourceVersion(), c.description, name, err) + } + return objResourceVersion, nil +} + +func (c *assumeCache) getObjInfo(name string) (*objInfo, error) { + obj, ok, err := c.store.GetByKey(name) + if err != nil { + return nil, err + } + if !ok { + return nil, &errNotFound{c.description, name} + } + + objInfo, ok := obj.(*objInfo) + if !ok { + return nil, &errWrongType{"objInfo", obj} + } + return objInfo, nil +} + +func (c *assumeCache) Get(objName string) (interface{}, error) { + c.mutex.Lock() + defer c.mutex.Unlock() + + objInfo, err := c.getObjInfo(objName) + if err != nil { + return nil, err + } + return objInfo.latestObj, nil +} + +func (c *assumeCache) List() []interface{} { + c.mutex.Lock() + defer c.mutex.Unlock() + + allObjs := []interface{}{} + for _, obj := range c.store.List() { + objInfo, ok := obj.(*objInfo) + if !ok { + glog.Errorf("list error: %v", &errWrongType{"objInfo", obj}) + continue + } + allObjs = append(allObjs, objInfo.latestObj) + } + return allObjs +} + +func (c *assumeCache) Assume(obj interface{}) error { + name, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + return &errObjectName{err} + } + + c.mutex.Lock() + defer c.mutex.Unlock() + + objInfo, err := c.getObjInfo(name) + if err != nil { + return err + } + + newVersion, err := c.getObjVersion(name, obj) + if err != nil { + return err + } + + storedVersion, err := c.getObjVersion(name, objInfo.latestObj) + if err != nil { + return err + } + + if newVersion < storedVersion { + return fmt.Errorf("%v %q is out of sync", c.description, name) + } + + // Only update the cached object + objInfo.latestObj = obj + glog.V(4).Infof("Assumed %v %q, version %v", c.description, name, newVersion) + return nil +} + +func (c *assumeCache) Restore(objName string) { + c.mutex.Lock() + defer c.mutex.Unlock() + + objInfo, err := c.getObjInfo(objName) + if err != nil { + // This could be expected if object got deleted + glog.V(5).Infof("Restore %v %q warning: %v", c.description, objName, err) + } else { + objInfo.latestObj = objInfo.apiObj + glog.V(4).Infof("Restored %v %q", c.description, objName) + } +} + +// PVAssumeCache is a AssumeCache for PersistentVolume objects +type PVAssumeCache interface { + AssumeCache + + GetPV(pvName string) (*v1.PersistentVolume, error) + ListPVs() []*v1.PersistentVolume +} + +type pvAssumeCache struct { + *assumeCache +} + +func NewPVAssumeCache(informer cache.SharedIndexInformer) PVAssumeCache { + return &pvAssumeCache{assumeCache: NewAssumeCache(informer, "v1.PersistentVolume")} +} + +func (c *pvAssumeCache) GetPV(pvName string) (*v1.PersistentVolume, error) { + obj, err := c.Get(pvName) + if err != nil { + return nil, err + } + + pv, ok := obj.(*v1.PersistentVolume) + if !ok { + return nil, &errWrongType{"v1.PersistentVolume", obj} + } + return pv, nil +} + +func (c *pvAssumeCache) ListPVs() []*v1.PersistentVolume { + objs := c.List() + pvs := []*v1.PersistentVolume{} + for _, obj := range objs { + pv, ok := obj.(*v1.PersistentVolume) + if !ok { + glog.Errorf("ListPVs: %v", &errWrongType{"v1.PersistentVolume", obj}) + } + pvs = append(pvs, pv) + } + return pvs +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder.go new file mode 100644 index 000000000000..7edd1ef459d2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder.go @@ -0,0 +1,420 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "fmt" + "sort" + + "github.com/golang/glog" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + coreinformers "k8s.io/client-go/informers/core/v1" + storageinformers "k8s.io/client-go/informers/storage/v1" + clientset "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" + volumeutil "k8s.io/kubernetes/pkg/volume/util" +) + +// SchedulerVolumeBinder is used by the scheduler to handle PVC/PV binding +// and dynamic provisioning. The binding decisions are integrated into the pod scheduling +// workflow so that the PV NodeAffinity is also considered along with the pod's other +// scheduling requirements. +// +// This integrates into the existing default scheduler workflow as follows: +// 1. The scheduler takes a Pod off the scheduler queue and processes it serially: +// a. Invokes all predicate functions, parallelized across nodes. FindPodVolumes() is invoked here. +// b. Invokes all priority functions. Future/TBD +// c. Selects the best node for the Pod. +// d. Cache the node selection for the Pod. (Assume phase) +// i. If PVC binding is required, cache in-memory only: +// * Updated PV objects for prebinding to the corresponding PVCs. +// * For the pod, which PVs need API updates. +// AssumePodVolumes() is invoked here. Then BindPodVolumes() is called asynchronously by the +// scheduler. After BindPodVolumes() is complete, the Pod is added back to the scheduler queue +// to be processed again until all PVCs are bound. +// ii. If PVC binding is not required, cache the Pod->Node binding in the scheduler's pod cache, +// and asynchronously bind the Pod to the Node. This is handled in the scheduler and not here. +// 2. Once the assume operation is done, the scheduler processes the next Pod in the scheduler queue +// while the actual binding operation occurs in the background. +type SchedulerVolumeBinder interface { + // FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the node. + // + // If a PVC is bound, it checks if the PV's NodeAffinity matches the Node. + // Otherwise, it tries to find an available PV to bind to the PVC. + // + // It returns true if there are matching PVs that can satisfy all of the Pod's PVCs, and returns true + // if bound volumes satisfy the PV NodeAffinity. + // + // This function is called by the volume binding scheduler predicate and can be called in parallel + FindPodVolumes(pod *v1.Pod, nodeName string) (unboundVolumesSatisified, boundVolumesSatisfied bool, err error) + + // AssumePodVolumes will take the PV matches for unbound PVCs and update the PV cache assuming + // that the PV is prebound to the PVC. + // + // It returns true if all volumes are fully bound, and returns true if any volume binding API operation needs + // to be done afterwards. + // + // This function will modify assumedPod with the node name. + // This function is called serially. + AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, bindingRequired bool, err error) + + // BindPodVolumes will initiate the volume binding by making the API call to prebind the PV + // to its matching PVC. + // + // This function can be called in parallel. + BindPodVolumes(assumedPod *v1.Pod) error + + // GetBindingsCache returns the cache used (if any) to store volume binding decisions. + GetBindingsCache() PodBindingCache +} + +type volumeBinder struct { + ctrl *PersistentVolumeController + + // TODO: Need AssumeCache for PVC for dynamic provisioning + pvcCache corelisters.PersistentVolumeClaimLister + nodeCache corelisters.NodeLister + pvCache PVAssumeCache + + // Stores binding decisions that were made in FindPodVolumes for use in AssumePodVolumes. + // AssumePodVolumes modifies the bindings again for use in BindPodVolumes. + podBindingCache PodBindingCache +} + +// NewVolumeBinder sets up all the caches needed for the scheduler to make volume binding decisions. +func NewVolumeBinder( + kubeClient clientset.Interface, + pvcInformer coreinformers.PersistentVolumeClaimInformer, + pvInformer coreinformers.PersistentVolumeInformer, + nodeInformer coreinformers.NodeInformer, + storageClassInformer storageinformers.StorageClassInformer) SchedulerVolumeBinder { + + // TODO: find better way... + ctrl := &PersistentVolumeController{ + kubeClient: kubeClient, + classLister: storageClassInformer.Lister(), + } + + b := &volumeBinder{ + ctrl: ctrl, + pvcCache: pvcInformer.Lister(), + nodeCache: nodeInformer.Lister(), + pvCache: NewPVAssumeCache(pvInformer.Informer()), + podBindingCache: NewPodBindingCache(), + } + + return b +} + +func (b *volumeBinder) GetBindingsCache() PodBindingCache { + return b.podBindingCache +} + +// FindPodVolumes caches the matching PVs per node in podBindingCache +func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, nodeName string) (unboundVolumesSatisfied, boundVolumesSatisfied bool, err error) { + podName := getPodName(pod) + + glog.V(4).Infof("FindPodVolumes for pod %q, node %q", podName, nodeName) + + // Initialize to true for pods that don't have volumes + unboundVolumesSatisfied = true + boundVolumesSatisfied = true + + node, err := b.nodeCache.Get(nodeName) + if node == nil || err != nil { + return false, false, fmt.Errorf("error getting node %q: %v", nodeName, err) + } + + // The pod's volumes need to be processed in one call to avoid the race condition where + // volumes can get bound in between calls. + boundClaims, unboundClaims, unboundClaimsImmediate, err := b.getPodVolumes(pod) + if err != nil { + return false, false, err + } + + // Immediate claims should be bound + if len(unboundClaimsImmediate) > 0 { + return false, false, fmt.Errorf("pod has unbound PersistentVolumeClaims") + } + + // Check PV node affinity on bound volumes + if len(boundClaims) > 0 { + boundVolumesSatisfied, err = b.checkBoundClaims(boundClaims, node, podName) + if err != nil { + return false, false, err + } + } + + // Find PVs for unbound volumes + if len(unboundClaims) > 0 { + unboundVolumesSatisfied, err = b.findMatchingVolumes(pod, unboundClaims, node) + if err != nil { + return false, false, err + } + } + + return unboundVolumesSatisfied, boundVolumesSatisfied, nil +} + +// AssumePodVolumes will take the cached matching PVs in podBindingCache for the chosen node +// and update the pvCache with the new prebound PV. It will update podBindingCache again +// with the PVs that need an API update. +func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound, bindingRequired bool, err error) { + podName := getPodName(assumedPod) + + glog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName) + + if allBound := b.arePodVolumesBound(assumedPod); allBound { + glog.V(4).Infof("AssumePodVolumes: all PVCs bound and nothing to do") + return true, false, nil + } + + assumedPod.Spec.NodeName = nodeName + claimsToBind := b.podBindingCache.GetBindings(assumedPod, nodeName) + newBindings := []*bindingInfo{} + + for _, binding := range claimsToBind { + newPV, dirty, err := b.ctrl.getBindVolumeToClaim(binding.pv, binding.pvc) + glog.V(5).Infof("AssumePodVolumes: getBindVolumeToClaim for PV %q, PVC %q. newPV %p, dirty %v, err: %v", + binding.pv.Name, + binding.pvc.Name, + newPV, + dirty, + err) + if err != nil { + b.revertAssumedPVs(newBindings) + return false, true, err + } + if dirty { + err = b.pvCache.Assume(newPV) + if err != nil { + b.revertAssumedPVs(newBindings) + return false, true, err + } + + newBindings = append(newBindings, &bindingInfo{pv: newPV, pvc: binding.pvc}) + } + } + + if len(newBindings) == 0 { + // Don't update cached bindings if no API updates are needed. This can happen if we + // previously updated the PV object and are waiting for the PV controller to finish binding. + glog.V(4).Infof("AssumePodVolumes: PVs already assumed") + return false, false, nil + } + b.podBindingCache.UpdateBindings(assumedPod, nodeName, newBindings) + + return false, true, nil +} + +// BindPodVolumes gets the cached bindings in podBindingCache and makes the API update for those PVs. +func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod) error { + glog.V(4).Infof("BindPodVolumes for pod %q", getPodName(assumedPod)) + + bindings := b.podBindingCache.GetBindings(assumedPod, assumedPod.Spec.NodeName) + + // Do the actual prebinding. Let the PV controller take care of the rest + // There is no API rollback if the actual binding fails + for i, bindingInfo := range bindings { + _, err := b.ctrl.updateBindVolumeToClaim(bindingInfo.pv, bindingInfo.pvc, false) + if err != nil { + // only revert assumed cached updates for volumes we haven't successfully bound + b.revertAssumedPVs(bindings[i:]) + return err + } + } + + return nil +} + +func getPodName(pod *v1.Pod) string { + return pod.Namespace + "/" + pod.Name +} + +func getPVCName(pvc *v1.PersistentVolumeClaim) string { + return pvc.Namespace + "/" + pvc.Name +} + +func (b *volumeBinder) isVolumeBound(namespace string, vol *v1.Volume, checkFullyBound bool) (bool, *v1.PersistentVolumeClaim, error) { + if vol.PersistentVolumeClaim == nil { + return true, nil, nil + } + + pvcName := vol.PersistentVolumeClaim.ClaimName + pvc, err := b.pvcCache.PersistentVolumeClaims(namespace).Get(pvcName) + if err != nil || pvc == nil { + return false, nil, fmt.Errorf("error getting PVC %q: %v", pvcName, err) + } + + pvName := pvc.Spec.VolumeName + if pvName != "" { + if checkFullyBound { + if metav1.HasAnnotation(pvc.ObjectMeta, annBindCompleted) { + glog.V(5).Infof("PVC %q is fully bound to PV %q", getPVCName(pvc), pvName) + return true, pvc, nil + } else { + glog.V(5).Infof("PVC %q is not fully bound to PV %q", getPVCName(pvc), pvName) + return false, pvc, nil + } + } + glog.V(5).Infof("PVC %q is bound or prebound to PV %q", getPVCName(pvc), pvName) + return true, pvc, nil + } + + glog.V(5).Infof("PVC %q is not bound", getPVCName(pvc)) + return false, pvc, nil +} + +// arePodVolumesBound returns true if all volumes are fully bound +func (b *volumeBinder) arePodVolumesBound(pod *v1.Pod) bool { + for _, vol := range pod.Spec.Volumes { + if isBound, _, _ := b.isVolumeBound(pod.Namespace, &vol, true); !isBound { + // Pod has at least one PVC that needs binding + return false + } + } + return true +} + +// getPodVolumes returns a pod's PVCs separated into bound (including prebound), unbound with delayed binding, +// and unbound with immediate binding +func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentVolumeClaim, unboundClaims []*bindingInfo, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error) { + boundClaims = []*v1.PersistentVolumeClaim{} + unboundClaimsImmediate = []*v1.PersistentVolumeClaim{} + unboundClaims = []*bindingInfo{} + + for _, vol := range pod.Spec.Volumes { + volumeBound, pvc, err := b.isVolumeBound(pod.Namespace, &vol, false) + if err != nil { + return nil, nil, nil, err + } + if pvc == nil { + continue + } + if volumeBound { + boundClaims = append(boundClaims, pvc) + } else { + delayBinding, err := b.ctrl.shouldDelayBinding(pvc) + if err != nil { + return nil, nil, nil, err + } + if delayBinding { + // Scheduler path + unboundClaims = append(unboundClaims, &bindingInfo{pvc: pvc}) + } else { + // Immediate binding should have already been bound + unboundClaimsImmediate = append(unboundClaimsImmediate, pvc) + } + } + } + return boundClaims, unboundClaims, unboundClaimsImmediate, nil +} + +func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node *v1.Node, podName string) (bool, error) { + for _, pvc := range claims { + pvName := pvc.Spec.VolumeName + pv, err := b.pvCache.GetPV(pvName) + if err != nil { + return false, err + } + + err = volumeutil.CheckNodeAffinity(pv, node.Labels) + if err != nil { + glog.V(4).Infof("PersistentVolume %q, Node %q mismatch for Pod %q: %v", pvName, node.Name, err.Error(), podName) + return false, nil + } + glog.V(5).Infof("PersistentVolume %q, Node %q matches for Pod %q", pvName, node.Name, podName) + } + + glog.V(4).Infof("All volumes for Pod %q match with Node %q", podName, node.Name) + return true, nil +} + +func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingInfo, node *v1.Node) (foundMatches bool, err error) { + // Sort all the claims by increasing size request to get the smallest fits + sort.Sort(byPVCSize(claimsToBind)) + + allPVs := b.pvCache.ListPVs() + chosenPVs := map[string]*v1.PersistentVolume{} + + for _, bindingInfo := range claimsToBind { + // Find a matching PV + bindingInfo.pv, err = findMatchingVolume(bindingInfo.pvc, allPVs, node, chosenPVs, true) + if err != nil { + return false, err + } + if bindingInfo.pv == nil { + glog.V(4).Infof("No matching volumes for PVC %q on node %q", getPVCName(bindingInfo.pvc), node.Name) + return false, nil + } + + // matching PV needs to be excluded so we don't select it again + chosenPVs[bindingInfo.pv.Name] = bindingInfo.pv + } + + // Mark cache with all the matches for each PVC for this node + b.podBindingCache.UpdateBindings(pod, node.Name, claimsToBind) + glog.V(4).Infof("Found matching volumes on node %q", node.Name) + + return true, nil +} + +func (b *volumeBinder) revertAssumedPVs(bindings []*bindingInfo) { + for _, bindingInfo := range bindings { + b.pvCache.Restore(bindingInfo.pv.Name) + } +} + +type bindingInfo struct { + // Claim that needs to be bound + pvc *v1.PersistentVolumeClaim + + // Proposed PV to bind to this claim + pv *v1.PersistentVolume +} + +// Used in unit test errors +func (b bindingInfo) String() string { + pvcName := "" + pvName := "" + if b.pvc != nil { + pvcName = getPVCName(b.pvc) + } + if b.pv != nil { + pvName = b.pv.Name + } + return fmt.Sprintf("[PVC %q, PV %q]", pvcName, pvName) +} + +type byPVCSize []*bindingInfo + +func (a byPVCSize) Len() int { + return len(a) +} + +func (a byPVCSize) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func (a byPVCSize) Less(i, j int) bool { + iSize := a[i].pvc.Spec.Resources.Requests[v1.ResourceStorage] + jSize := a[j].pvc.Spec.Resources.Requests[v1.ResourceStorage] + // return true if iSize is less than jSize + return iSize.Cmp(jSize) == -1 +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_cache.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_cache.go new file mode 100644 index 000000000000..8a0a77960859 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_cache.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "sync" + + "k8s.io/api/core/v1" +) + +// podBindingCache stores PV binding decisions per pod per node. +// Pod entries are removed when the Pod is deleted or updated to +// no longer be schedulable. +type PodBindingCache interface { + // UpdateBindings will update the cache with the given bindings for the + // pod and node. + UpdateBindings(pod *v1.Pod, node string, bindings []*bindingInfo) + + // DeleteBindings will remove all cached bindings for the given pod. + DeleteBindings(pod *v1.Pod) + + // GetBindings will return the cached bindings for the given pod and node. + GetBindings(pod *v1.Pod, node string) []*bindingInfo +} + +type podBindingCache struct { + mutex sync.Mutex + + // Key = pod name + // Value = nodeBindings + bindings map[string]nodeBindings +} + +// Key = nodeName +// Value = array of bindingInfo +type nodeBindings map[string][]*bindingInfo + +func NewPodBindingCache() PodBindingCache { + return &podBindingCache{bindings: map[string]nodeBindings{}} +} + +func (c *podBindingCache) DeleteBindings(pod *v1.Pod) { + c.mutex.Lock() + defer c.mutex.Unlock() + + podName := getPodName(pod) + delete(c.bindings, podName) +} + +func (c *podBindingCache) UpdateBindings(pod *v1.Pod, node string, bindings []*bindingInfo) { + c.mutex.Lock() + defer c.mutex.Unlock() + + podName := getPodName(pod) + nodeBinding, ok := c.bindings[podName] + if !ok { + nodeBinding = nodeBindings{} + c.bindings[podName] = nodeBinding + } + nodeBinding[node] = bindings +} + +func (c *podBindingCache) GetBindings(pod *v1.Pod, node string) []*bindingInfo { + c.mutex.Lock() + defer c.mutex.Unlock() + + podName := getPodName(pod) + nodeBindings, ok := c.bindings[podName] + if !ok { + return nil + } + return nodeBindings[node] +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_fake.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_fake.go new file mode 100644 index 000000000000..2810276b1617 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_fake.go @@ -0,0 +1,63 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package persistentvolume + +import ( + "k8s.io/api/core/v1" +) + +type FakeVolumeBinderConfig struct { + AllBound bool + FindUnboundSatsified bool + FindBoundSatsified bool + FindErr error + AssumeBindingRequired bool + AssumeErr error + BindErr error +} + +// NewVolumeBinder sets up all the caches needed for the scheduler to make +// topology-aware volume binding decisions. +func NewFakeVolumeBinder(config *FakeVolumeBinderConfig) *FakeVolumeBinder { + return &FakeVolumeBinder{ + config: config, + } +} + +type FakeVolumeBinder struct { + config *FakeVolumeBinderConfig + AssumeCalled bool + BindCalled bool +} + +func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, nodeName string) (unboundVolumesSatisfied, boundVolumesSatsified bool, err error) { + return b.config.FindUnboundSatsified, b.config.FindBoundSatsified, b.config.FindErr +} + +func (b *FakeVolumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (bool, bool, error) { + b.AssumeCalled = true + return b.config.AllBound, b.config.AssumeBindingRequired, b.config.AssumeErr +} + +func (b *FakeVolumeBinder) BindPodVolumes(assumedPod *v1.Pod) error { + b.BindCalled = true + return b.config.BindErr +} + +func (b *FakeVolumeBinder) GetBindingsCache() PodBindingCache { + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/volume_host.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/volume_host.go index 8f182edc6b98..27d45629e8a2 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/volume_host.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/volume_host.go @@ -37,6 +37,10 @@ func (ctrl *PersistentVolumeController) GetPluginDir(pluginName string) string { return "" } +func (ctrl *PersistentVolumeController) GetVolumeDevicePluginDir(pluginName string) string { + return "" +} + func (ctrl *PersistentVolumeController) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string { return "" } @@ -45,6 +49,10 @@ func (ctrl *PersistentVolumeController) GetPodPluginDir(podUID types.UID, plugin return "" } +func (ctrl *PersistentVolumeController) GetPodVolumeDeviceDir(ppodUID types.UID, pluginName string) string { + return "" +} + func (ctrl *PersistentVolumeController) GetKubeClient() clientset.Interface { return ctrl.kubeClient } @@ -100,3 +108,7 @@ func (adc *PersistentVolumeController) GetExec(pluginName string) mount.Exec { func (ctrl *PersistentVolumeController) GetNodeLabels() (map[string]string, error) { return nil, fmt.Errorf("GetNodeLabels() unsupported in PersistentVolumeController") } + +func (ctrl *PersistentVolumeController) GetNodeName() types.NodeName { + return "" +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/pvcprotection/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/volume/pvcprotection/BUILD new file mode 100644 index 000000000000..5c713a259d2b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/pvcprotection/BUILD @@ -0,0 +1,61 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["pvc_protection_controller.go"], + importpath = "k8s.io/kubernetes/pkg/controller/volume/pvcprotection", + visibility = ["//visibility:public"], + deps = [ + "//pkg/controller:go_default_library", + "//pkg/util/metrics:go_default_library", + "//pkg/volume/util:go_default_library", + "//pkg/volume/util/volumehelper:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/client-go/util/workqueue:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["pvc_protection_controller_test.go"], + importpath = "k8s.io/kubernetes/pkg/controller/volume/pvcprotection", + library = ":go_default_library", + deps = [ + "//pkg/controller:go_default_library", + "//pkg/volume/util:go_default_library", + "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/client-go/informers:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", + "//vendor/k8s.io/client-go/testing:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/pvcprotection/pvc_protection_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/pvcprotection/pvc_protection_controller.go new file mode 100644 index 000000000000..40bf3e5de5c1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/pvcprotection/pvc_protection_controller.go @@ -0,0 +1,284 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pvcprotection + +import ( + "fmt" + "time" + + "github.com/golang/glog" + "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + coreinformers "k8s.io/client-go/informers/core/v1" + clientset "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/util/metrics" + volumeutil "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/kubernetes/pkg/volume/util/volumehelper" +) + +// Controller is controller that removes PVCProtectionFinalizer +// from PVCs that are used by no pods. +type Controller struct { + client clientset.Interface + + pvcLister corelisters.PersistentVolumeClaimLister + pvcListerSynced cache.InformerSynced + + podLister corelisters.PodLister + podListerSynced cache.InformerSynced + + queue workqueue.RateLimitingInterface +} + +// NewPVCProtectionController returns a new *{VCProtectionController. +func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface) *Controller { + e := &Controller{ + client: cl, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"), + } + if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("persistentvolumeclaim_protection_controller", cl.CoreV1().RESTClient().GetRateLimiter()) + } + + e.pvcLister = pvcInformer.Lister() + e.pvcListerSynced = pvcInformer.Informer().HasSynced + pvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: e.pvcAddedUpdated, + UpdateFunc: func(old, new interface{}) { + e.pvcAddedUpdated(new) + }, + }) + + e.podLister = podInformer.Lister() + e.podListerSynced = podInformer.Informer().HasSynced + podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + e.podAddedDeletedUpdated(obj, false) + }, + DeleteFunc: func(obj interface{}) { + e.podAddedDeletedUpdated(obj, true) + }, + UpdateFunc: func(old, new interface{}) { + e.podAddedDeletedUpdated(new, false) + }, + }) + + return e +} + +// Run runs the controller goroutines. +func (c *Controller) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + glog.Infof("Starting PVC protection controller") + defer glog.Infof("Shutting down PVC protection controller") + + if !controller.WaitForCacheSync("PVC protection", stopCh, c.pvcListerSynced, c.podListerSynced) { + return + } + + for i := 0; i < workers; i++ { + go wait.Until(c.runWorker, time.Second, stopCh) + } + + <-stopCh +} + +func (c *Controller) runWorker() { + for c.processNextWorkItem() { + } +} + +// processNextWorkItem deals with one pvcKey off the queue. It returns false when it's time to quit. +func (c *Controller) processNextWorkItem() bool { + pvcKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(pvcKey) + + pvcNamespace, pvcName, err := cache.SplitMetaNamespaceKey(pvcKey.(string)) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Error parsing PVC key %q: %v", pvcKey, err)) + return true + } + + err = c.processPVC(pvcNamespace, pvcName) + if err == nil { + c.queue.Forget(pvcKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("PVC %v failed with : %v", pvcKey, err)) + c.queue.AddRateLimited(pvcKey) + + return true +} + +func (c *Controller) processPVC(pvcNamespace, pvcName string) error { + glog.V(4).Infof("Processing PVC %s/%s", pvcNamespace, pvcName) + startTime := time.Now() + defer func() { + glog.V(4).Infof("Finished processing PVC %s/%s (%v)", pvcNamespace, pvcName, time.Now().Sub(startTime)) + }() + + pvc, err := c.pvcLister.PersistentVolumeClaims(pvcNamespace).Get(pvcName) + if apierrs.IsNotFound(err) { + glog.V(4).Infof("PVC %s/%s not found, ignoring", pvcNamespace, pvcName) + return nil + } + if err != nil { + return err + } + + if volumeutil.IsPVCBeingDeleted(pvc) && volumeutil.IsProtectionFinalizerPresent(pvc) { + // PVC should be deleted. Check if it's used and remove finalizer if + // it's not. + isUsed, err := c.isBeingUsed(pvc) + if err != nil { + return err + } + if !isUsed { + return c.removeFinalizer(pvc) + } + } + + if !volumeutil.IsPVCBeingDeleted(pvc) && !volumeutil.IsProtectionFinalizerPresent(pvc) { + // PVC is not being deleted -> it should have the finalizer. The + // finalizer should be added by admission plugin, this is just to add + // the finalizer to old PVCs that were created before the admission + // plugin was enabled. + return c.addFinalizer(pvc) + } + return nil +} + +func (c *Controller) addFinalizer(pvc *v1.PersistentVolumeClaim) error { + claimClone := pvc.DeepCopy() + volumeutil.AddProtectionFinalizer(claimClone) + _, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(claimClone) + if err != nil { + glog.V(3).Infof("Error adding protection finalizer to PVC %s/%s: %v", pvc.Namespace, pvc.Name) + return err + } + glog.V(3).Infof("Added protection finalizer to PVC %s/%s", pvc.Namespace, pvc.Name) + return nil +} + +func (c *Controller) removeFinalizer(pvc *v1.PersistentVolumeClaim) error { + claimClone := pvc.DeepCopy() + volumeutil.RemoveProtectionFinalizer(claimClone) + _, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(claimClone) + if err != nil { + glog.V(3).Infof("Error removing protection finalizer from PVC %s/%s: %v", pvc.Namespace, pvc.Name, err) + return err + } + glog.V(3).Infof("Removed protection finalizer from PVC %s/%s", pvc.Namespace, pvc.Name) + return nil +} + +func (c *Controller) isBeingUsed(pvc *v1.PersistentVolumeClaim) (bool, error) { + pods, err := c.podLister.Pods(pvc.Namespace).List(labels.Everything()) + if err != nil { + return false, err + } + for _, pod := range pods { + if pod.Spec.NodeName == "" { + // This pod is not scheduled. We have a predicated in scheduler that + // prevents scheduling pods with deletion timestamp, so we can be + // pretty sure it won't be scheduled in parallel to this check. + // Therefore this pod does not block the PVC from deletion. + glog.V(4).Infof("Skipping unscheduled pod %s when checking PVC %s/%s", pod.Name, pvc.Namespace, pvc.Name) + continue + } + if volumehelper.IsPodTerminated(pod, pod.Status) { + // This pod is being unmounted/detached or is already + // unmounted/detached. It does not block the PVC from deletion. + continue + } + for _, volume := range pod.Spec.Volumes { + if volume.PersistentVolumeClaim == nil { + continue + } + if volume.PersistentVolumeClaim.ClaimName == pvc.Name { + glog.V(2).Infof("Keeping PVC %s/%s, it is used by pod %s/%s", pvc.Namespace, pvc.Name, pod.Namespace, pod.Name) + return true, nil + } + } + } + + glog.V(3).Infof("PVC %s/%s is unused", pvc.Namespace, pvc.Name) + return false, nil +} + +// pvcAddedUpdated reacts to pvc added/updated/deleted events +func (c *Controller) pvcAddedUpdated(obj interface{}) { + pvc, ok := obj.(*v1.PersistentVolumeClaim) + if !ok { + utilruntime.HandleError(fmt.Errorf("PVC informer returned non-PVC object: %#v", obj)) + return + } + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pvc) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for Persistent Volume Claim %#v: %v", pvc, err)) + return + } + glog.V(4).Infof("Got event on PVC %s", key) + + if (!volumeutil.IsPVCBeingDeleted(pvc) && !volumeutil.IsProtectionFinalizerPresent(pvc)) || (volumeutil.IsPVCBeingDeleted(pvc) && volumeutil.IsProtectionFinalizerPresent(pvc)) { + c.queue.Add(key) + } +} + +// podAddedDeletedUpdated reacts to Pod events +func (c *Controller) podAddedDeletedUpdated(obj interface{}, deleted bool) { + pod, ok := obj.(*v1.Pod) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) + return + } + pod, ok = tombstone.Obj.(*v1.Pod) + if !ok { + utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a Pod %#v", obj)) + return + } + } + + // Filter out pods that can't help us to remove a finalizer on PVC + if !deleted && !volumehelper.IsPodTerminated(pod, pod.Status) && pod.Spec.NodeName != "" { + return + } + + glog.V(4).Infof("Got event on pod %s/%s", pod.Namespace, pod.Name) + + // Enqueue all PVCs that the pod uses + for _, volume := range pod.Spec.Volumes { + if volume.PersistentVolumeClaim != nil { + c.queue.Add(pod.Namespace + "/" + volume.PersistentVolumeClaim.ClaimName) + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/BUILD b/vendor/k8s.io/kubernetes/pkg/credentialprovider/BUILD index 83f77387b9f3..89f836f49055 100644 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/BUILD @@ -15,6 +15,7 @@ go_library( "plugins.go", "provider.go", ], + importpath = "k8s.io/kubernetes/pkg/credentialprovider", deps = [ "//vendor/github.com/docker/docker/api/types:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -30,6 +31,7 @@ go_test( "keyring_test.go", "provider_test.go", ], + importpath = "k8s.io/kubernetes/pkg/credentialprovider", library = ":go_default_library", deps = ["//vendor/github.com/docker/docker/api/types:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/BUILD b/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/BUILD index cc3e0d955d7e..5cb5b7647f76 100644 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["aws_credentials.go"], + importpath = "k8s.io/kubernetes/pkg/credentialprovider/aws", deps = [ "//pkg/credentialprovider:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", @@ -22,6 +23,7 @@ go_library( go_test( name = "go_default_test", srcs = ["aws_credentials_test.go"], + importpath = "k8s.io/kubernetes/pkg/credentialprovider/aws", library = ":go_default_library", deps = [ "//pkg/credentialprovider:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go index 7e8955e9c0c4..d889cbf1fa8b 100644 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/aws/aws_credentials.go @@ -30,7 +30,10 @@ import ( "k8s.io/kubernetes/pkg/credentialprovider" ) -const registryURLTemplate = "*.dkr.ecr.%s.amazonaws.com" +const awsChinaRegionPrefix = "cn-" +const awsStandardDNSSuffix = "amazonaws.com" +const awsChinaDNSSuffix = "amazonaws.com.cn" +const registryURLTemplate = "*.dkr.ecr.%s.%s" // awsHandlerLogger is a handler that logs all AWS SDK requests // Copied from pkg/cloudprovider/providers/aws/log_handler.go @@ -80,6 +83,16 @@ type ecrProvider struct { var _ credentialprovider.DockerConfigProvider = &ecrProvider{} +// registryURL has different suffix in AWS China region +func registryURL(region string) string { + dnsSuffix := awsStandardDNSSuffix + // deal with aws none standard regions + if strings.HasPrefix(region, awsChinaRegionPrefix) { + dnsSuffix = awsChinaDNSSuffix + } + return fmt.Sprintf(registryURLTemplate, region, dnsSuffix) +} + // RegisterCredentialsProvider registers a credential provider for the specified region. // It creates a lazy provider for each AWS region, in order to support // cross-region ECR access. They have to be lazy because it's unlikely, but not @@ -92,7 +105,7 @@ func RegisterCredentialsProvider(region string) { credentialprovider.RegisterCredentialProvider("aws-ecr-"+region, &lazyEcrProvider{ region: region, - regionURL: fmt.Sprintf(registryURLTemplate, region), + regionURL: registryURL(region), }) } @@ -136,7 +149,7 @@ func (p *lazyEcrProvider) Provide() credentialprovider.DockerConfig { func newEcrProvider(region string, getter tokenGetter) *ecrProvider { return &ecrProvider{ region: region, - regionURL: fmt.Sprintf(registryURLTemplate, region), + regionURL: registryURL(region), getter: getter, } } diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/BUILD b/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/BUILD index 0ba1582e75d1..ee3b570baf0a 100644 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/BUILD @@ -12,6 +12,7 @@ go_library( "azure_acr_helper.go", "azure_credentials.go", ], + importpath = "k8s.io/kubernetes/pkg/credentialprovider/azure", deps = [ "//pkg/cloudprovider/providers/azure:go_default_library", "//pkg/credentialprovider:go_default_library", @@ -28,6 +29,7 @@ go_library( go_test( name = "go_default_test", srcs = ["azure_credentials_test.go"], + importpath = "k8s.io/kubernetes/pkg/credentialprovider/azure", library = ":go_default_library", deps = [ "//vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/BUILD b/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/BUILD index 45f4c3b5c977..4d903e0dcf70 100644 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/gcp/BUILD @@ -13,6 +13,7 @@ go_library( "jwt.go", "metadata.go", ], + importpath = "k8s.io/kubernetes/pkg/credentialprovider/gcp", deps = [ "//pkg/credentialprovider:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -30,6 +31,7 @@ go_test( "jwt_test.go", "metadata_test.go", ], + importpath = "k8s.io/kubernetes/pkg/credentialprovider/gcp", library = ":go_default_library", deps = [ "//pkg/credentialprovider:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/rancher/BUILD b/vendor/k8s.io/kubernetes/pkg/credentialprovider/rancher/BUILD index 02689ef5c44c..afc26b6ddef8 100644 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/rancher/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/rancher/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["rancher_registry_credentials_test.go"], + importpath = "k8s.io/kubernetes/pkg/credentialprovider/rancher", library = ":go_default_library", deps = [ "//pkg/credentialprovider:go_default_library", @@ -22,6 +23,7 @@ go_library( "doc.go", "rancher_registry_credentials.go", ], + importpath = "k8s.io/kubernetes/pkg/credentialprovider/rancher", deps = [ "//pkg/credentialprovider:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/features/BUILD b/vendor/k8s.io/kubernetes/pkg/features/BUILD index dac4e2e9ca94..c88d15f265c2 100644 --- a/vendor/k8s.io/kubernetes/pkg/features/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/features/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["kube_features.go"], + importpath = "k8s.io/kubernetes/pkg/features", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/features:go_default_library", "//vendor/k8s.io/apiserver/pkg/features:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go index 35a90a1ac25c..80599039bc46 100644 --- a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -98,7 +98,7 @@ const ( // the API server as the certificate approaches expiration. RotateKubeletClientCertificate utilfeature.Feature = "RotateKubeletClientCertificate" - // owner: @msau + // owner: @msau42 // alpha: v1.7 // // A new volume type that supports local disks on a node. @@ -140,12 +140,6 @@ const ( // 'MemoryPressure', 'OutOfDisk' and 'DiskPressure'. TaintNodesByCondition utilfeature.Feature = "TaintNodesByCondition" - // owner: @haibinxie - // alpha: v1.8 - // - // Implement IPVS-based in-cluster service load balancing - SupportIPVSProxyMode utilfeature.Feature = "SupportIPVSProxyMode" - // owner: @jsafrane // alpha: v1.8 // @@ -163,6 +157,61 @@ const ( // // Enable pods to consume pre-allocated huge pages of varying page sizes HugePages utilfeature.Feature = "HugePages" + + // owner @brendandburns + // alpha: v1.8 + // + // Enable nodes to exclude themselves from service load balancers + ServiceNodeExclusion utilfeature.Feature = "ServiceNodeExclusion" + + // owner: @jsafrane + // alpha: v1.9 + // + // Enable running mount utilities in containers. + MountContainers utilfeature.Feature = "MountContainers" + + // owner: @msau42 + // alpha: v1.9 + // + // Extend the default scheduler to be aware of PV topology and handle PV binding + // Before moving to beta, resolve Kubernetes issue #56180 + VolumeScheduling utilfeature.Feature = "VolumeScheduling" + + // owner: @vladimirvivien + // alpha: v1.9 + // + // Enable mount/attachment of Container Storage Interface (CSI) backed PVs + CSIPersistentVolume utilfeature.Feature = "CSIPersistentVolume" + + // owner @MrHohn + // alpha: v1.9 + // + // Support configurable pod DNS parameters. + CustomPodDNS utilfeature.Feature = "CustomPodDNS" + + // owner: @screeley44 + // alpha: v1.9 + // + // Enable Block volume support in containers. + BlockVolume utilfeature.Feature = "BlockVolume" + + // owner: @pospispa + // + // alpha: v1.9 + // Postpone deletion of a persistent volume claim in case it is used by a pod + PVCProtection utilfeature.Feature = "PVCProtection" + + // owner: @aveshagarwal + // alpha: v1.9 + // + // Enable resource limits priority function + ResourceLimitsPriorityFunction utilfeature.Feature = "ResourceLimitsPriorityFunction" + + // owner: @m1093782566 + // beta: v1.9 + // + // Implement IPVS-based in-cluster service load balancing + SupportIPVSProxyMode utilfeature.Feature = "SupportIPVSProxyMode" ) func init() { @@ -194,6 +243,15 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS MountPropagation: {Default: false, PreRelease: utilfeature.Alpha}, ExpandPersistentVolumes: {Default: false, PreRelease: utilfeature.Alpha}, CPUManager: {Default: false, PreRelease: utilfeature.Alpha}, + ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha}, + MountContainers: {Default: false, PreRelease: utilfeature.Alpha}, + VolumeScheduling: {Default: false, PreRelease: utilfeature.Alpha}, + CSIPersistentVolume: {Default: false, PreRelease: utilfeature.Alpha}, + CustomPodDNS: {Default: false, PreRelease: utilfeature.Alpha}, + BlockVolume: {Default: false, PreRelease: utilfeature.Alpha}, + PVCProtection: {Default: false, PreRelease: utilfeature.Alpha}, + ResourceLimitsPriorityFunction: {Default: false, PreRelease: utilfeature.Alpha}, + SupportIPVSProxyMode: {Default: false, PreRelease: utilfeature.Beta}, // inherited features from generic apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: @@ -201,10 +259,9 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS genericfeatures.AdvancedAuditing: {Default: true, PreRelease: utilfeature.Beta}, genericfeatures.APIResponseCompression: {Default: false, PreRelease: utilfeature.Alpha}, genericfeatures.Initializers: {Default: false, PreRelease: utilfeature.Alpha}, - genericfeatures.APIListChunking: {Default: false, PreRelease: utilfeature.Alpha}, + genericfeatures.APIListChunking: {Default: true, PreRelease: utilfeature.Beta}, // inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: - apiextensionsfeatures.CustomResourceValidation: {Default: false, PreRelease: utilfeature.Alpha}, - SupportIPVSProxyMode: {Default: false, PreRelease: utilfeature.Alpha}, + apiextensionsfeatures.CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta}, } diff --git a/vendor/k8s.io/kubernetes/pkg/fieldpath/BUILD b/vendor/k8s.io/kubernetes/pkg/fieldpath/BUILD index 05710a259df6..0e4dae6a921e 100644 --- a/vendor/k8s.io/kubernetes/pkg/fieldpath/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/fieldpath/BUILD @@ -12,12 +12,17 @@ go_library( "doc.go", "fieldpath.go", ], - deps = ["//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library"], + importpath = "k8s.io/kubernetes/pkg/fieldpath", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", + ], ) go_test( name = "go_default_test", srcs = ["fieldpath_test.go"], + importpath = "k8s.io/kubernetes/pkg/fieldpath", library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go b/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go index caf0096ca0ef..437541458929 100644 --- a/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go +++ b/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go @@ -21,6 +21,7 @@ import ( "strings" "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/util/validation" ) // FormatMap formats map[string]string to a string. @@ -42,6 +43,23 @@ func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) return "", nil } + if path, subscript, ok := SplitMaybeSubscriptedPath(fieldPath); ok { + switch path { + case "metadata.annotations": + if errs := validation.IsQualifiedName(strings.ToLower(subscript)); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetAnnotations()[subscript], nil + case "metadata.labels": + if errs := validation.IsQualifiedName(subscript); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetLabels()[subscript], nil + default: + return "", fmt.Errorf("fieldPath %q does not support subscript", fieldPath) + } + } + switch fieldPath { case "metadata.annotations": return FormatMap(accessor.GetAnnotations()), nil @@ -57,3 +75,29 @@ func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) return "", fmt.Errorf("unsupported fieldPath: %v", fieldPath) } + +// SplitMaybeSubscriptedPath checks whether the specified fieldPath is +// subscripted, and +// - if yes, this function splits the fieldPath into path and subscript, and +// returns (path, subscript, true). +// - if no, this function returns (fieldPath, "", false). +// +// Example inputs and outputs: +// - "metadata.annotations['myKey']" --> ("metadata.annotations", "myKey", true) +// - "metadata.annotations['a[b]c']" --> ("metadata.annotations", "a[b]c", true) +// - "metadata.labels['']" --> ("metadata.labels", "", true) +// - "metadata.labels" --> ("metadata.labels", "", false) +func SplitMaybeSubscriptedPath(fieldPath string) (string, string, bool) { + if !strings.HasSuffix(fieldPath, "']") { + return fieldPath, "", false + } + s := strings.TrimSuffix(fieldPath, "']") + parts := strings.SplitN(s, "['", 2) + if len(parts) < 2 { + return fieldPath, "", false + } + if len(parts[0]) == 0 { + return fieldPath, "", false + } + return parts[0], parts[1], true +} diff --git a/vendor/k8s.io/kubernetes/pkg/generated/openapi/BUILD b/vendor/k8s.io/kubernetes/pkg/generated/openapi/BUILD index 245c796215e0..5c2245822149 100644 --- a/vendor/k8s.io/kubernetes/pkg/generated/openapi/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/generated/openapi/BUILD @@ -4,22 +4,27 @@ package(default_visibility = ["//visibility:public"]) load("//pkg/generated/openapi:def.bzl", "openapi_library") +load("//build:openapi.bzl", "openapi_go_prefix", "openapi_vendor_prefix") openapi_library( name = "go_default_library", srcs = ["doc.go"], + go_prefix = openapi_go_prefix, openapi_targets = [ - "federation/apis/federation/v1beta1", "pkg/apis/abac/v0", "pkg/apis/abac/v1beta1", "pkg/apis/componentconfig/v1alpha1", "pkg/kubelet/apis/kubeletconfig/v1alpha1", + "pkg/proxy/apis/kubeproxyconfig/v1alpha1", "pkg/version", ], tags = ["automanaged"], + vendor_prefix = openapi_vendor_prefix, vendor_targets = [ - "k8s.io/api/admission/v1alpha1", + "k8s.io/api/admission/v1beta1", "k8s.io/api/admissionregistration/v1alpha1", + "k8s.io/api/admissionregistration/v1beta1", + "k8s.io/api/apps/v1", "k8s.io/api/apps/v1beta1", "k8s.io/api/apps/v1beta2", "k8s.io/api/authentication/v1", @@ -33,6 +38,7 @@ openapi_library( "k8s.io/api/batch/v2alpha1", "k8s.io/api/certificates/v1beta1", "k8s.io/api/core/v1", + "k8s.io/api/events/v1beta1", "k8s.io/api/extensions/v1beta1", "k8s.io/api/imagepolicy/v1alpha1", "k8s.io/api/networking/v1", @@ -43,6 +49,7 @@ openapi_library( "k8s.io/api/scheduling/v1alpha1", "k8s.io/api/settings/v1alpha1", "k8s.io/api/storage/v1", + "k8s.io/api/storage/v1alpha1", "k8s.io/api/storage/v1beta1", "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", "k8s.io/apimachinery/pkg/api/resource", @@ -55,6 +62,7 @@ openapi_library( "k8s.io/apiserver/pkg/apis/audit/v1alpha1", "k8s.io/apiserver/pkg/apis/audit/v1beta1", "k8s.io/apiserver/pkg/apis/example/v1", + "k8s.io/apiserver/pkg/apis/example2/v1", "k8s.io/client-go/pkg/version", "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1", "k8s.io/metrics/pkg/apis/custom_metrics/v1beta1", diff --git a/vendor/k8s.io/kubernetes/pkg/generated/openapi/def.bzl b/vendor/k8s.io/kubernetes/pkg/generated/openapi/def.bzl index fa530f2b7454..87fa23acd1c7 100644 --- a/vendor/k8s.io/kubernetes/pkg/generated/openapi/def.bzl +++ b/vendor/k8s.io/kubernetes/pkg/generated/openapi/def.bzl @@ -1,7 +1,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") load("@io_kubernetes_build//defs:go.bzl", "go_genrule") -def openapi_library(name, tags, srcs, openapi_targets=[], vendor_targets=[]): +def openapi_library(name, tags, srcs, go_prefix, vendor_prefix="", openapi_targets=[], vendor_targets=[]): deps = [ "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", @@ -14,17 +14,17 @@ def openapi_library(name, tags, srcs, openapi_targets=[], vendor_targets=[]): ) go_genrule( name = "zz_generated.openapi", - srcs = srcs + ["//hack/boilerplate:boilerplate.go.txt"], + srcs = srcs + ["//" + vendor_prefix + "hack/boilerplate:boilerplate.go.txt"], outs = ["zz_generated.openapi.go"], cmd = " ".join([ "$(location //vendor/k8s.io/code-generator/cmd/openapi-gen)", "--v 1", "--logtostderr", - "--go-header-file $(location //hack/boilerplate:boilerplate.go.txt)", + "--go-header-file $(location //" + vendor_prefix + "hack/boilerplate:boilerplate.go.txt)", "--output-file-base zz_generated.openapi", - "--output-package k8s.io/kubernetes/pkg/generated/openapi", - "--input-dirs " + ",".join(["k8s.io/kubernetes/" + target for target in openapi_targets] + ["k8s.io/kubernetes/vendor/" + target for target in vendor_targets]), - "&& cp pkg/generated/openapi/zz_generated.openapi.go $(location :zz_generated.openapi.go)", + "--output-package " + go_prefix + vendor_prefix + "pkg/generated/openapi", + "--input-dirs " + ",".join([go_prefix + target for target in openapi_targets] + [go_prefix + "vendor/" + target for target in vendor_targets]), + "&& cp " + vendor_prefix + "pkg/generated/openapi/zz_generated.openapi.go $(location :zz_generated.openapi.go)", ]), go_deps = deps, tools = ["//vendor/k8s.io/code-generator/cmd/openapi-gen"], diff --git a/vendor/k8s.io/kubernetes/pkg/generated/openapi/zz_generated.openapi.go b/vendor/k8s.io/kubernetes/pkg/generated/openapi/zz_generated.openapi.go index 0a35a6b84214..698a90cd29e4 100644 --- a/vendor/k8s.io/kubernetes/pkg/generated/openapi/zz_generated.openapi.go +++ b/vendor/k8s.io/kubernetes/pkg/generated/openapi/zz_generated.openapi.go @@ -30,80 +30,42 @@ import ( func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { return map[string]common.OpenAPIDefinition{ - "k8s.io/api/admissionregistration/v1alpha1.AdmissionHookClientConfig": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "AdmissionHookClientConfig contains the information to make a TLS connection with the webhook", - Properties: map[string]spec.Schema{ - "service": { - SchemaProps: spec.SchemaProps{ - Description: "Service is a reference to the service for this webhook. If there is only one port open for the service, that port will be used. If there are multiple ports open, port 443 will be used if it is open, otherwise it is an error. Required", - Ref: ref("k8s.io/api/admissionregistration/v1alpha1.ServiceReference"), - }, - }, - "caBundle": { - SchemaProps: spec.SchemaProps{ - Description: "CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate. Required", - Type: []string{"string"}, - Format: "byte", - }, - }, - }, - Required: []string{"service", "caBundle"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/admissionregistration/v1alpha1.ServiceReference"}, - }, - "k8s.io/api/admissionregistration/v1alpha1.ExternalAdmissionHook": { + "k8s.io/api/admissionregistration/v1alpha1.Initializer": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ExternalAdmissionHook describes an external admission webhook and the resources and operations it applies to.", + Description: "Initializer describes the name and the failure policy of an initializer, and what resources it applies to.", Properties: map[string]spec.Schema{ "name": { SchemaProps: spec.SchemaProps{ - Description: "The name of the external admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + Description: "Name is the identifier of the initializer. It will be added to the object that needs to be initialized. Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where \"alwayspullimages\" is the name of the webhook, and kubernetes.io is the name of the organization. Required", Type: []string{"string"}, Format: "", }, }, - "clientConfig": { - SchemaProps: spec.SchemaProps{ - Description: "ClientConfig defines how to communicate with the hook. Required", - Ref: ref("k8s.io/api/admissionregistration/v1alpha1.AdmissionHookClientConfig"), - }, - }, "rules": { SchemaProps: spec.SchemaProps{ - Description: "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule.", + Description: "Rules describes what resources/subresources the initializer cares about. The initializer cares about an operation if it matches _any_ Rule. Rule.Resources must not include subresources.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/admissionregistration/v1alpha1.RuleWithOperations"), + Ref: ref("k8s.io/api/admissionregistration/v1alpha1.Rule"), }, }, }, }, }, - "failurePolicy": { - SchemaProps: spec.SchemaProps{ - Description: "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", - Type: []string{"string"}, - Format: "", - }, - }, }, - Required: []string{"name", "clientConfig"}, + Required: []string{"name"}, }, }, Dependencies: []string{ - "k8s.io/api/admissionregistration/v1alpha1.AdmissionHookClientConfig", "k8s.io/api/admissionregistration/v1alpha1.RuleWithOperations"}, + "k8s.io/api/admissionregistration/v1alpha1.Rule"}, }, - "k8s.io/api/admissionregistration/v1alpha1.ExternalAdmissionHookConfiguration": { + "k8s.io/api/admissionregistration/v1alpha1.InitializerConfiguration": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ExternalAdmissionHookConfiguration describes the configuration of initializers.", + Description: "InitializerConfiguration describes the configuration of initializers.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -125,7 +87,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), }, }, - "externalAdmissionHooks": { + "initializers": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ "x-kubernetes-patch-merge-key": "name", @@ -133,12 +95,12 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, SchemaProps: spec.SchemaProps{ - Description: "ExternalAdmissionHooks is a list of external admission webhooks and the affected resources and operations.", + Description: "Initializers is a list of resources and their default initializers Order-sensitive. When merging multiple InitializerConfigurations, we sort the initializers from different InitializerConfigurations by the name of the InitializerConfigurations; the order of the initializers from the same InitializerConfiguration is preserved.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/admissionregistration/v1alpha1.ExternalAdmissionHook"), + Ref: ref("k8s.io/api/admissionregistration/v1alpha1.Initializer"), }, }, }, @@ -148,12 +110,12 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, Dependencies: []string{ - "k8s.io/api/admissionregistration/v1alpha1.ExternalAdmissionHook", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "k8s.io/api/admissionregistration/v1alpha1.Initializer", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, }, - "k8s.io/api/admissionregistration/v1alpha1.ExternalAdmissionHookConfigurationList": { + "k8s.io/api/admissionregistration/v1alpha1.InitializerConfigurationList": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ExternalAdmissionHookConfigurationList is a list of ExternalAdmissionHookConfiguration.", + Description: "InitializerConfigurationList is a list of InitializerConfiguration.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -177,12 +139,12 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "items": { SchemaProps: spec.SchemaProps{ - Description: "List of ExternalAdmissionHookConfiguration.", + Description: "List of InitializerConfiguration.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/admissionregistration/v1alpha1.ExternalAdmissionHookConfiguration"), + Ref: ref("k8s.io/api/admissionregistration/v1alpha1.InitializerConfiguration"), }, }, }, @@ -193,44 +155,64 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, Dependencies: []string{ - "k8s.io/api/admissionregistration/v1alpha1.ExternalAdmissionHookConfiguration", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + "k8s.io/api/admissionregistration/v1alpha1.InitializerConfiguration", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, }, - "k8s.io/api/admissionregistration/v1alpha1.Initializer": { + "k8s.io/api/admissionregistration/v1alpha1.Rule": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Initializer describes the name and the failure policy of an initializer, and what resources it applies to.", + Description: "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", Properties: map[string]spec.Schema{ - "name": { + "apiGroups": { SchemaProps: spec.SchemaProps{ - Description: "Name is the identifier of the initializer. It will be added to the object that needs to be initialized. Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where \"alwayspullimages\" is the name of the webhook, and kubernetes.io is the name of the organization. Required", - Type: []string{"string"}, - Format: "", + Description: "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, }, }, - "rules": { + "apiVersions": { SchemaProps: spec.SchemaProps{ - Description: "Rules describes what resources/subresources the initializer cares about. The initializer cares about an operation if it matches _any_ Rule. Rule.Resources must not include subresources.", + Description: "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/admissionregistration/v1alpha1.Rule"), + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", }, }, }, }, }, }, - Required: []string{"name"}, }, }, - Dependencies: []string{ - "k8s.io/api/admissionregistration/v1alpha1.Rule"}, + Dependencies: []string{}, }, - "k8s.io/api/admissionregistration/v1alpha1.InitializerConfiguration": { + "k8s.io/api/admissionregistration/v1beta1.MutatingWebhookConfiguration": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "InitializerConfiguration describes the configuration of initializers.", + Description: "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -252,7 +234,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), }, }, - "initializers": { + "webhooks": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ "x-kubernetes-patch-merge-key": "name", @@ -260,12 +242,12 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, SchemaProps: spec.SchemaProps{ - Description: "Initializers is a list of resources and their default initializers Order-sensitive. When merging multiple InitializerConfigurations, we sort the initializers from different InitializerConfigurations by the name of the InitializerConfigurations; the order of the initializers from the same InitializerConfiguration is preserved.", + Description: "Webhooks is a list of webhooks and the affected resources and operations.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/admissionregistration/v1alpha1.Initializer"), + Ref: ref("k8s.io/api/admissionregistration/v1beta1.Webhook"), }, }, }, @@ -275,12 +257,12 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, Dependencies: []string{ - "k8s.io/api/admissionregistration/v1alpha1.Initializer", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "k8s.io/api/admissionregistration/v1beta1.Webhook", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, }, - "k8s.io/api/admissionregistration/v1alpha1.InitializerConfigurationList": { + "k8s.io/api/admissionregistration/v1beta1.MutatingWebhookConfigurationList": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "InitializerConfigurationList is a list of InitializerConfiguration.", + Description: "MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -304,12 +286,12 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "items": { SchemaProps: spec.SchemaProps{ - Description: "List of InitializerConfiguration.", + Description: "List of MutatingWebhookConfiguration.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/admissionregistration/v1alpha1.InitializerConfiguration"), + Ref: ref("k8s.io/api/admissionregistration/v1beta1.MutatingWebhookConfiguration"), }, }, }, @@ -320,9 +302,9 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, Dependencies: []string{ - "k8s.io/api/admissionregistration/v1alpha1.InitializerConfiguration", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + "k8s.io/api/admissionregistration/v1beta1.MutatingWebhookConfiguration", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, }, - "k8s.io/api/admissionregistration/v1alpha1.Rule": { + "k8s.io/api/admissionregistration/v1beta1.Rule": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ Description: "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", @@ -374,7 +356,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, Dependencies: []string{}, }, - "k8s.io/api/admissionregistration/v1alpha1.RuleWithOperations": { + "k8s.io/api/admissionregistration/v1beta1.RuleWithOperations": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ Description: "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", @@ -440,21 +422,28 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, Dependencies: []string{}, }, - "k8s.io/api/admissionregistration/v1alpha1.ServiceReference": { + "k8s.io/api/admissionregistration/v1beta1.ServiceReference": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ Description: "ServiceReference holds a reference to Service.legacy.k8s.io", Properties: map[string]spec.Schema{ "namespace": { SchemaProps: spec.SchemaProps{ - Description: "Namespace is the namespace of the service Required", + Description: "`namespace` is the namespace of the service. Required", Type: []string{"string"}, Format: "", }, }, "name": { SchemaProps: spec.SchemaProps{ - Description: "Name is the name of the service Required", + Description: "`name` is the name of the service. Required", + Type: []string{"string"}, + Format: "", + }, + }, + "path": { + SchemaProps: spec.SchemaProps{ + Description: "`path` is an optional URL path which will be sent in any request to this service.", Type: []string{"string"}, Format: "", }, @@ -465,10 +454,10 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, Dependencies: []string{}, }, - "k8s.io/api/apps/v1beta1.ControllerRevision": { + "k8s.io/api/admissionregistration/v1beta1.ValidatingWebhookConfiguration": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", + Description: "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -486,34 +475,39 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "metadata": { SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Description: "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), }, }, - "data": { - SchemaProps: spec.SchemaProps{ - Description: "Data is the serialized representation of the state.", - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + "webhooks": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, }, - }, - "revision": { SchemaProps: spec.SchemaProps{ - Description: "Revision indicates the revision of the state represented by Data.", - Type: []string{"integer"}, - Format: "int64", + Description: "Webhooks is a list of webhooks and the affected resources and operations.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/admissionregistration/v1beta1.Webhook"), + }, + }, + }, }, }, }, - Required: []string{"revision"}, }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + "k8s.io/api/admissionregistration/v1beta1.Webhook", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, }, - "k8s.io/api/apps/v1beta1.ControllerRevisionList": { + "k8s.io/api/admissionregistration/v1beta1.ValidatingWebhookConfigurationList": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", + Description: "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -531,18 +525,18 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "metadata": { SchemaProps: spec.SchemaProps{ - Description: "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), }, }, "items": { SchemaProps: spec.SchemaProps{ - Description: "Items is the list of ControllerRevisions", + Description: "List of ValidatingWebhookConfiguration.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/apps/v1beta1.ControllerRevision"), + Ref: ref("k8s.io/api/admissionregistration/v1beta1.ValidatingWebhookConfiguration"), }, }, }, @@ -553,107 +547,140 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta1.ControllerRevision", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + "k8s.io/api/admissionregistration/v1beta1.ValidatingWebhookConfiguration", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, }, - "k8s.io/api/apps/v1beta1.Deployment": { + "k8s.io/api/admissionregistration/v1beta1.Webhook": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for more information. Deployment enables declarative updates for Pods and ReplicaSets.", + Description: "Webhook describes an admission webhook and the resources and operations it applies to.", Properties: map[string]spec.Schema{ - "kind": { + "name": { SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Description: "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", Type: []string{"string"}, Format: "", }, }, - "apiVersion": { + "clientConfig": { SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", + Description: "ClientConfig defines how to communicate with the hook. Required", + Ref: ref("k8s.io/api/admissionregistration/v1beta1.WebhookClientConfig"), }, }, - "metadata": { + "rules": { SchemaProps: spec.SchemaProps{ - Description: "Standard object metadata.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + Description: "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/admissionregistration/v1beta1.RuleWithOperations"), + }, + }, + }, }, }, - "spec": { + "failurePolicy": { SchemaProps: spec.SchemaProps{ - Description: "Specification of the desired behavior of the Deployment.", - Ref: ref("k8s.io/api/apps/v1beta1.DeploymentSpec"), + Description: "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", + Type: []string{"string"}, + Format: "", }, }, - "status": { + "namespaceSelector": { SchemaProps: spec.SchemaProps{ - Description: "Most recently observed status of the Deployment.", - Ref: ref("k8s.io/api/apps/v1beta1.DeploymentStatus"), + Description: "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is other cluster scoped resource, it is not subjected to the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), }, }, }, + Required: []string{"name", "clientConfig"}, }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta1.DeploymentSpec", "k8s.io/api/apps/v1beta1.DeploymentStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "k8s.io/api/admissionregistration/v1beta1.RuleWithOperations", "k8s.io/api/admissionregistration/v1beta1.WebhookClientConfig", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, }, - "k8s.io/api/apps/v1beta1.DeploymentCondition": { + "k8s.io/api/admissionregistration/v1beta1.WebhookClientConfig": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DeploymentCondition describes the state of a deployment at a certain point.", + Description: "WebhookClientConfig contains the information to make a TLS connection with the webhook", Properties: map[string]spec.Schema{ - "type": { + "url": { SchemaProps: spec.SchemaProps{ - Description: "Type of deployment condition.", + Description: "`url` gives the location of the webhook, in standard URL form (`[scheme://]host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", Type: []string{"string"}, Format: "", }, }, - "status": { + "service": { SchemaProps: spec.SchemaProps{ - Description: "Status of the condition, one of True, False, Unknown.", - Type: []string{"string"}, - Format: "", + Description: "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nIf there is only one port open for the service, that port will be used. If there are multiple ports open, port 443 will be used if it is open, otherwise it is an error.", + Ref: ref("k8s.io/api/admissionregistration/v1beta1.ServiceReference"), }, }, - "lastUpdateTime": { + "caBundle": { SchemaProps: spec.SchemaProps{ - Description: "The last time this condition was updated.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + Description: "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. Required.", + Type: []string{"string"}, + Format: "byte", }, }, - "lastTransitionTime": { + }, + Required: []string{"caBundle"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/admissionregistration/v1beta1.ServiceReference"}, + }, + "k8s.io/api/apps/v1.ControllerRevision": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", + Properties: map[string]spec.Schema{ + "kind": { SchemaProps: spec.SchemaProps{ - Description: "Last time the condition transitioned from one status to another.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", }, }, - "reason": { + "apiVersion": { SchemaProps: spec.SchemaProps{ - Description: "The reason for the condition's last transition.", + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", Type: []string{"string"}, Format: "", }, }, - "message": { + "metadata": { SchemaProps: spec.SchemaProps{ - Description: "A human readable message indicating details about the transition.", - Type: []string{"string"}, - Format: "", + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "data": { + SchemaProps: spec.SchemaProps{ + Description: "Data is the serialized representation of the state.", + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + }, + }, + "revision": { + SchemaProps: spec.SchemaProps{ + Description: "Revision indicates the revision of the state represented by Data.", + Type: []string{"integer"}, + Format: "int64", }, }, }, - Required: []string{"type", "status"}, + Required: []string{"revision"}, }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, }, - "k8s.io/api/apps/v1beta1.DeploymentList": { + "k8s.io/api/apps/v1.ControllerRevisionList": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DeploymentList is a list of Deployments.", + Description: "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -671,18 +698,18 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "metadata": { SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata.", + Description: "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), }, }, "items": { SchemaProps: spec.SchemaProps{ - Description: "Items is the list of Deployments.", + Description: "Items is the list of ControllerRevisions", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/apps/v1beta1.Deployment"), + Ref: ref("k8s.io/api/apps/v1.ControllerRevision"), }, }, }, @@ -693,12 +720,12 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta1.Deployment", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + "k8s.io/api/apps/v1.ControllerRevision", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, }, - "k8s.io/api/apps/v1beta1.DeploymentRollback": { + "k8s.io/api/apps/v1.DaemonSet": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.", + Description: "DaemonSet represents the configuration of a daemon set.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -714,154 +741,229 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Format: "", }, }, - "name": { + "metadata": { SchemaProps: spec.SchemaProps{ - Description: "Required: This must match the Name of a deployment.", + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/apps/v1.DaemonSetSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/apps/v1.DaemonSetStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1.DaemonSetSpec", "k8s.io/api/apps/v1.DaemonSetStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + }, + "k8s.io/api/apps/v1.DaemonSetCondition": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DaemonSetCondition describes the state of a DaemonSet at a certain point.", + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of DaemonSet condition.", Type: []string{"string"}, Format: "", }, }, - "updatedAnnotations": { + "status": { SchemaProps: spec.SchemaProps{ - Description: "The annotations to be updated to a deployment", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, + Description: "Status of the condition, one of True, False, Unknown.", + Type: []string{"string"}, + Format: "", }, }, - "rollbackTo": { + "lastTransitionTime": { SchemaProps: spec.SchemaProps{ - Description: "The config of this deployment rollback.", - Ref: ref("k8s.io/api/apps/v1beta1.RollbackConfig"), + Description: "Last time the condition transitioned from one status to another.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "The reason for the condition's last transition.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human readable message indicating details about the transition.", + Type: []string{"string"}, + Format: "", }, }, }, - Required: []string{"name", "rollbackTo"}, + Required: []string{"type", "status"}, }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta1.RollbackConfig"}, + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, }, - "k8s.io/api/apps/v1beta1.DeploymentSpec": { + "k8s.io/api/apps/v1.DaemonSetList": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DeploymentSpec is the specification of the desired behavior of the Deployment.", + Description: "DaemonSetList is a collection of daemon sets.", Properties: map[string]spec.Schema{ - "replicas": { + "kind": { SchemaProps: spec.SchemaProps{ - Description: "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", - Type: []string{"integer"}, - Format: "int32", + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "A list of daemon sets.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/apps/v1.DaemonSet"), + }, + }, + }, }, }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1.DaemonSet", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + }, + "k8s.io/api/apps/v1.DaemonSetSpec": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DaemonSetSpec is the specification of a daemon set.", + Properties: map[string]spec.Schema{ "selector": { SchemaProps: spec.SchemaProps{ - Description: "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", + Description: "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), }, }, "template": { SchemaProps: spec.SchemaProps{ - Description: "Template describes the pods that will be created.", + Description: "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"), }, }, - "strategy": { + "updateStrategy": { SchemaProps: spec.SchemaProps{ - Description: "The deployment strategy to use to replace existing pods with new ones.", - Ref: ref("k8s.io/api/apps/v1beta1.DeploymentStrategy"), + Description: "An update strategy to replace existing DaemonSet pods with new pods.", + Ref: ref("k8s.io/api/apps/v1.DaemonSetUpdateStrategy"), }, }, "minReadySeconds": { SchemaProps: spec.SchemaProps{ - Description: "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + Description: "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", Type: []string{"integer"}, Format: "int32", }, }, "revisionHistoryLimit": { SchemaProps: spec.SchemaProps{ - Description: "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.", + Description: "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", Type: []string{"integer"}, Format: "int32", }, }, - "paused": { + }, + Required: []string{"selector", "template"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1.DaemonSetUpdateStrategy", "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + }, + "k8s.io/api/apps/v1.DaemonSetStatus": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DaemonSetStatus represents the current status of a daemon set.", + Properties: map[string]spec.Schema{ + "currentNumberScheduled": { SchemaProps: spec.SchemaProps{ - Description: "Indicates that the deployment is paused.", - Type: []string{"boolean"}, - Format: "", + Description: "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + Type: []string{"integer"}, + Format: "int32", }, }, - "rollbackTo": { + "numberMisscheduled": { SchemaProps: spec.SchemaProps{ - Description: "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.", - Ref: ref("k8s.io/api/apps/v1beta1.RollbackConfig"), + Description: "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + Type: []string{"integer"}, + Format: "int32", }, }, - "progressDeadlineSeconds": { + "desiredNumberScheduled": { SchemaProps: spec.SchemaProps{ - Description: "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", + Description: "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", Type: []string{"integer"}, Format: "int32", }, }, - }, - Required: []string{"template"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/apps/v1beta1.DeploymentStrategy", "k8s.io/api/apps/v1beta1.RollbackConfig", "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, - }, - "k8s.io/api/apps/v1beta1.DeploymentStatus": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "DeploymentStatus is the most recently observed status of the Deployment.", - Properties: map[string]spec.Schema{ - "observedGeneration": { + "numberReady": { SchemaProps: spec.SchemaProps{ - Description: "The generation observed by the deployment controller.", + Description: "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", Type: []string{"integer"}, - Format: "int64", + Format: "int32", }, }, - "replicas": { + "observedGeneration": { SchemaProps: spec.SchemaProps{ - Description: "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", + Description: "The most recent generation observed by the daemon set controller.", Type: []string{"integer"}, - Format: "int32", + Format: "int64", }, }, - "updatedReplicas": { + "updatedNumberScheduled": { SchemaProps: spec.SchemaProps{ - Description: "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + Description: "The total number of nodes that are running updated daemon pod", Type: []string{"integer"}, Format: "int32", }, }, - "readyReplicas": { + "numberAvailable": { SchemaProps: spec.SchemaProps{ - Description: "Total number of ready pods targeted by this deployment.", + Description: "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", Type: []string{"integer"}, Format: "int32", }, }, - "availableReplicas": { + "numberUnavailable": { SchemaProps: spec.SchemaProps{ - Description: "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + Description: "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", Type: []string{"integer"}, Format: "int32", }, }, - "unavailableReplicas": { + "collisionCount": { SchemaProps: spec.SchemaProps{ - Description: "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + Description: "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", Type: []string{"integer"}, Format: "int32", }, @@ -874,256 +976,147 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, SchemaProps: spec.SchemaProps{ - Description: "Represents the latest available observations of a deployment's current state.", + Description: "Represents the latest available observations of a DaemonSet's current state.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/apps/v1beta1.DeploymentCondition"), + Ref: ref("k8s.io/api/apps/v1.DaemonSetCondition"), }, }, }, }, }, - "collisionCount": { - SchemaProps: spec.SchemaProps{ - Description: "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", - Type: []string{"integer"}, - Format: "int32", - }, - }, }, + Required: []string{"currentNumberScheduled", "numberMisscheduled", "desiredNumberScheduled", "numberReady"}, }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta1.DeploymentCondition"}, + "k8s.io/api/apps/v1.DaemonSetCondition"}, }, - "k8s.io/api/apps/v1beta1.DeploymentStrategy": { + "k8s.io/api/apps/v1.DaemonSetUpdateStrategy": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DeploymentStrategy describes how to replace existing pods with new ones.", + Description: "DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.", Properties: map[string]spec.Schema{ "type": { SchemaProps: spec.SchemaProps{ - Description: "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + Description: "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate.", Type: []string{"string"}, Format: "", }, }, "rollingUpdate": { SchemaProps: spec.SchemaProps{ - Description: "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", - Ref: ref("k8s.io/api/apps/v1beta1.RollingUpdateDeployment"), + Description: "Rolling update config params. Present only if type = \"RollingUpdate\".", + Ref: ref("k8s.io/api/apps/v1.RollingUpdateDaemonSet"), }, }, }, }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta1.RollingUpdateDeployment"}, + "k8s.io/api/apps/v1.RollingUpdateDaemonSet"}, }, - "k8s.io/api/apps/v1beta1.RollbackConfig": { + "k8s.io/api/apps/v1.Deployment": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DEPRECATED.", + Description: "Deployment enables declarative updates for Pods and ReplicaSets.", Properties: map[string]spec.Schema{ - "revision": { + "kind": { SchemaProps: spec.SchemaProps{ - Description: "The revision to rollback to. If set to 0, rollback to the last revision.", - Type: []string{"integer"}, - Format: "int64", + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", }, }, - }, - }, - }, - Dependencies: []string{}, - }, - "k8s.io/api/apps/v1beta1.RollingUpdateDeployment": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Spec to control the desired behavior of rolling update.", - Properties: map[string]spec.Schema{ - "maxUnavailable": { + "apiVersion": { SchemaProps: spec.SchemaProps{ - Description: "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", }, }, - "maxSurge": { + "metadata": { SchemaProps: spec.SchemaProps{ - Description: "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), + Description: "Standard object metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Specification of the desired behavior of the Deployment.", + Ref: ref("k8s.io/api/apps/v1.DeploymentSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Most recently observed status of the Deployment.", + Ref: ref("k8s.io/api/apps/v1.DeploymentStatus"), }, }, }, }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, + "k8s.io/api/apps/v1.DeploymentSpec", "k8s.io/api/apps/v1.DeploymentStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, }, - "k8s.io/api/apps/v1beta1.RollingUpdateStatefulSetStrategy": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", - Properties: map[string]spec.Schema{ - "partition": { - SchemaProps: spec.SchemaProps{ - Description: "Partition indicates the ordinal at which the StatefulSet should be partitioned.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - }, - }, - Dependencies: []string{}, - }, - "k8s.io/api/apps/v1beta1.Scale": { + "k8s.io/api/apps/v1.DeploymentCondition": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Scale represents a scaling request for a resource.", + Description: "DeploymentCondition describes the state of a deployment at a certain point.", Properties: map[string]spec.Schema{ - "kind": { + "type": { SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Description: "Type of deployment condition.", Type: []string{"string"}, Format: "", }, }, - "apiVersion": { + "status": { SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Description: "Status of the condition, one of True, False, Unknown.", Type: []string{"string"}, Format: "", }, }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - Ref: ref("k8s.io/api/apps/v1beta1.ScaleSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", - Ref: ref("k8s.io/api/apps/v1beta1.ScaleStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/apps/v1beta1.ScaleSpec", "k8s.io/api/apps/v1beta1.ScaleStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - }, - "k8s.io/api/apps/v1beta1.ScaleSpec": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ScaleSpec describes the attributes of a scale subresource", - Properties: map[string]spec.Schema{ - "replicas": { - SchemaProps: spec.SchemaProps{ - Description: "desired number of instances for the scaled object.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - }, - }, - Dependencies: []string{}, - }, - "k8s.io/api/apps/v1beta1.ScaleStatus": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ScaleStatus represents the current status of a scale subresource.", - Properties: map[string]spec.Schema{ - "replicas": { - SchemaProps: spec.SchemaProps{ - Description: "actual number of observed instances of the scaled object.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "selector": { + "lastUpdateTime": { SchemaProps: spec.SchemaProps{ - Description: "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, + Description: "The last time this condition was updated.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, - "targetSelector": { + "lastTransitionTime": { SchemaProps: spec.SchemaProps{ - Description: "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - Type: []string{"string"}, - Format: "", + Description: "Last time the condition transitioned from one status to another.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, - }, - Required: []string{"replicas"}, - }, - }, - Dependencies: []string{}, - }, - "k8s.io/api/apps/v1beta1.StatefulSet": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", - Properties: map[string]spec.Schema{ - "kind": { + "reason": { SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Description: "The reason for the condition's last transition.", Type: []string{"string"}, Format: "", }, }, - "apiVersion": { + "message": { SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Description: "A human readable message indicating details about the transition.", Type: []string{"string"}, Format: "", }, }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "Spec defines the desired identities of pods in this set.", - Ref: ref("k8s.io/api/apps/v1beta1.StatefulSetSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", - Ref: ref("k8s.io/api/apps/v1beta1.StatefulSetStatus"), - }, - }, }, + Required: []string{"type", "status"}, }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta1.StatefulSetSpec", "k8s.io/api/apps/v1beta1.StatefulSetStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, }, - "k8s.io/api/apps/v1beta1.StatefulSetList": { + "k8s.io/api/apps/v1.DeploymentList": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "StatefulSetList is a collection of StatefulSets.", + Description: "DeploymentList is a list of Deployments.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -1141,16 +1134,18 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "metadata": { SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + Description: "Standard list metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), }, }, "items": { SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, + Description: "Items is the list of Deployments.", + Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/apps/v1beta1.StatefulSet"), + Ref: ref("k8s.io/api/apps/v1.Deployment"), }, }, }, @@ -1161,174 +1156,180 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta1.StatefulSet", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + "k8s.io/api/apps/v1.Deployment", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, }, - "k8s.io/api/apps/v1beta1.StatefulSetSpec": { + "k8s.io/api/apps/v1.DeploymentSpec": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "A StatefulSetSpec is the specification of a StatefulSet.", + Description: "DeploymentSpec is the specification of the desired behavior of the Deployment.", Properties: map[string]spec.Schema{ "replicas": { SchemaProps: spec.SchemaProps{ - Description: "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + Description: "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", Type: []string{"integer"}, Format: "int32", }, }, "selector": { SchemaProps: spec.SchemaProps{ - Description: "selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + Description: "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), }, }, "template": { SchemaProps: spec.SchemaProps{ - Description: "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + Description: "Template describes the pods that will be created.", Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"), }, }, - "volumeClaimTemplates": { + "strategy": { SchemaProps: spec.SchemaProps{ - Description: "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaim"), - }, - }, - }, + Description: "The deployment strategy to use to replace existing pods with new ones.", + Ref: ref("k8s.io/api/apps/v1.DeploymentStrategy"), }, }, - "serviceName": { + "minReadySeconds": { SchemaProps: spec.SchemaProps{ - Description: "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", - Type: []string{"string"}, - Format: "", + Description: "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + Type: []string{"integer"}, + Format: "int32", }, }, - "podManagementPolicy": { + "revisionHistoryLimit": { SchemaProps: spec.SchemaProps{ - Description: "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", - Type: []string{"string"}, - Format: "", + Description: "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", + Type: []string{"integer"}, + Format: "int32", }, }, - "updateStrategy": { + "paused": { SchemaProps: spec.SchemaProps{ - Description: "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", - Ref: ref("k8s.io/api/apps/v1beta1.StatefulSetUpdateStrategy"), + Description: "Indicates that the deployment is paused.", + Type: []string{"boolean"}, + Format: "", }, }, - "revisionHistoryLimit": { + "progressDeadlineSeconds": { SchemaProps: spec.SchemaProps{ - Description: "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", + Description: "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", Type: []string{"integer"}, Format: "int32", }, }, }, - Required: []string{"template", "serviceName"}, + Required: []string{"selector", "template"}, }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta1.StatefulSetUpdateStrategy", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + "k8s.io/api/apps/v1.DeploymentStrategy", "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, }, - "k8s.io/api/apps/v1beta1.StatefulSetStatus": { + "k8s.io/api/apps/v1.DeploymentStatus": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "StatefulSetStatus represents the current state of a StatefulSet.", + Description: "DeploymentStatus is the most recently observed status of the Deployment.", Properties: map[string]spec.Schema{ "observedGeneration": { SchemaProps: spec.SchemaProps{ - Description: "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", + Description: "The generation observed by the deployment controller.", Type: []string{"integer"}, Format: "int64", }, }, "replicas": { SchemaProps: spec.SchemaProps{ - Description: "replicas is the number of Pods created by the StatefulSet controller.", + Description: "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", Type: []string{"integer"}, Format: "int32", }, }, - "readyReplicas": { + "updatedReplicas": { SchemaProps: spec.SchemaProps{ - Description: "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", + Description: "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", Type: []string{"integer"}, Format: "int32", }, }, - "currentReplicas": { + "readyReplicas": { SchemaProps: spec.SchemaProps{ - Description: "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", + Description: "Total number of ready pods targeted by this deployment.", Type: []string{"integer"}, Format: "int32", }, }, - "updatedReplicas": { + "availableReplicas": { SchemaProps: spec.SchemaProps{ - Description: "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", + Description: "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", Type: []string{"integer"}, Format: "int32", }, }, - "currentRevision": { + "unavailableReplicas": { SchemaProps: spec.SchemaProps{ - Description: "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", - Type: []string{"string"}, - Format: "", + Description: "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + Type: []string{"integer"}, + Format: "int32", }, }, - "updateRevision": { + "conditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ - Description: "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", - Type: []string{"string"}, - Format: "", + Description: "Represents the latest available observations of a deployment's current state.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/apps/v1.DeploymentCondition"), + }, + }, + }, }, }, "collisionCount": { SchemaProps: spec.SchemaProps{ - Description: "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + Description: "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", Type: []string{"integer"}, Format: "int32", }, }, }, - Required: []string{"replicas"}, }, }, - Dependencies: []string{}, + Dependencies: []string{ + "k8s.io/api/apps/v1.DeploymentCondition"}, }, - "k8s.io/api/apps/v1beta1.StatefulSetUpdateStrategy": { + "k8s.io/api/apps/v1.DeploymentStrategy": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.", + Description: "DeploymentStrategy describes how to replace existing pods with new ones.", Properties: map[string]spec.Schema{ "type": { SchemaProps: spec.SchemaProps{ - Description: "Type indicates the type of the StatefulSetUpdateStrategy.", + Description: "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", Type: []string{"string"}, Format: "", }, }, "rollingUpdate": { SchemaProps: spec.SchemaProps{ - Description: "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.", - Ref: ref("k8s.io/api/apps/v1beta1.RollingUpdateStatefulSetStrategy"), + Description: "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", + Ref: ref("k8s.io/api/apps/v1.RollingUpdateDeployment"), }, }, }, }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta1.RollingUpdateStatefulSetStrategy"}, + "k8s.io/api/apps/v1.RollingUpdateDeployment"}, }, - "k8s.io/api/apps/v1beta2.ControllerRevision": { + "k8s.io/api/apps/v1.ReplicaSet": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", + Description: "ReplicaSet ensures that a specified number of pod replicas are running at any given time.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -1346,122 +1347,78 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "metadata": { SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Description: "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), }, }, - "data": { + "spec": { SchemaProps: spec.SchemaProps{ - Description: "Data is the serialized representation of the state.", - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + Description: "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/apps/v1.ReplicaSetSpec"), }, }, - "revision": { + "status": { SchemaProps: spec.SchemaProps{ - Description: "Revision indicates the revision of the state represented by Data.", - Type: []string{"integer"}, - Format: "int64", + Description: "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/apps/v1.ReplicaSetStatus"), }, }, }, - Required: []string{"revision"}, }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + "k8s.io/api/apps/v1.ReplicaSetSpec", "k8s.io/api/apps/v1.ReplicaSetStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, }, - "k8s.io/api/apps/v1beta2.ControllerRevisionList": { + "k8s.io/api/apps/v1.ReplicaSetCondition": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", + Description: "ReplicaSetCondition describes the state of a replica set at a certain point.", Properties: map[string]spec.Schema{ - "kind": { + "type": { SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Description: "Type of replica set condition.", Type: []string{"string"}, Format: "", }, }, - "apiVersion": { + "status": { SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Description: "Status of the condition, one of True, False, Unknown.", Type: []string{"string"}, Format: "", }, }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { + "lastTransitionTime": { SchemaProps: spec.SchemaProps{ - Description: "Items is the list of ControllerRevisions", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/apps/v1beta2.ControllerRevision"), - }, - }, - }, + Description: "The last time the condition transitioned from one status to another.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/apps/v1beta2.ControllerRevision", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - }, - "k8s.io/api/apps/v1beta2.DaemonSet": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "DaemonSet represents the configuration of a daemon set.", - Properties: map[string]spec.Schema{ - "kind": { + "reason": { SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Description: "The reason for the condition's last transition.", Type: []string{"string"}, Format: "", }, }, - "apiVersion": { + "message": { SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Description: "A human readable message indicating details about the transition.", Type: []string{"string"}, Format: "", }, }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/apps/v1beta2.DaemonSetSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/apps/v1beta2.DaemonSetStatus"), - }, - }, }, + Required: []string{"type", "status"}, }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta2.DaemonSetSpec", "k8s.io/api/apps/v1beta2.DaemonSetStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, }, - "k8s.io/api/apps/v1beta2.DaemonSetList": { + "k8s.io/api/apps/v1.ReplicaSetList": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DaemonSetList is a collection of daemon sets.", + Description: "ReplicaSetList is a collection of ReplicaSets.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -1479,18 +1436,18 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "metadata": { SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), }, }, "items": { SchemaProps: spec.SchemaProps{ - Description: "A list of daemon sets.", + Description: "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/apps/v1beta2.DaemonSet"), + Ref: ref("k8s.io/api/apps/v1.ReplicaSet"), }, }, }, @@ -1501,154 +1458,173 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta2.DaemonSet", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + "k8s.io/api/apps/v1.ReplicaSet", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, }, - "k8s.io/api/apps/v1beta2.DaemonSetSpec": { + "k8s.io/api/apps/v1.ReplicaSetSpec": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DaemonSetSpec is the specification of a daemon set.", + Description: "ReplicaSetSpec is the specification of a ReplicaSet.", Properties: map[string]spec.Schema{ - "selector": { - SchemaProps: spec.SchemaProps{ - Description: "A label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), - }, - }, - "template": { - SchemaProps: spec.SchemaProps{ - Description: "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", - Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"), - }, - }, - "updateStrategy": { + "replicas": { SchemaProps: spec.SchemaProps{ - Description: "An update strategy to replace existing DaemonSet pods with new pods.", - Ref: ref("k8s.io/api/apps/v1beta2.DaemonSetUpdateStrategy"), + Description: "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + Type: []string{"integer"}, + Format: "int32", }, }, "minReadySeconds": { SchemaProps: spec.SchemaProps{ - Description: "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", + Description: "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", Type: []string{"integer"}, Format: "int32", }, }, - "revisionHistoryLimit": { + "selector": { SchemaProps: spec.SchemaProps{ - Description: "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", - Type: []string{"integer"}, - Format: "int32", + Description: "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), + }, + }, + "template": { + SchemaProps: spec.SchemaProps{ + Description: "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"), }, }, }, - Required: []string{"template"}, + Required: []string{"selector"}, }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta2.DaemonSetUpdateStrategy", "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, }, - "k8s.io/api/apps/v1beta2.DaemonSetStatus": { + "k8s.io/api/apps/v1.ReplicaSetStatus": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DaemonSetStatus represents the current status of a daemon set.", + Description: "ReplicaSetStatus represents the current status of a ReplicaSet.", Properties: map[string]spec.Schema{ - "currentNumberScheduled": { + "replicas": { SchemaProps: spec.SchemaProps{ - Description: "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + Description: "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", Type: []string{"integer"}, Format: "int32", }, }, - "numberMisscheduled": { + "fullyLabeledReplicas": { SchemaProps: spec.SchemaProps{ - Description: "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + Description: "The number of pods that have labels matching the labels of the pod template of the replicaset.", Type: []string{"integer"}, Format: "int32", }, }, - "desiredNumberScheduled": { + "readyReplicas": { SchemaProps: spec.SchemaProps{ - Description: "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + Description: "The number of ready replicas for this replica set.", Type: []string{"integer"}, Format: "int32", }, }, - "numberReady": { + "availableReplicas": { SchemaProps: spec.SchemaProps{ - Description: "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", + Description: "The number of available replicas (ready for at least minReadySeconds) for this replica set.", Type: []string{"integer"}, Format: "int32", }, }, "observedGeneration": { SchemaProps: spec.SchemaProps{ - Description: "The most recent generation observed by the daemon set controller.", + Description: "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", Type: []string{"integer"}, Format: "int64", }, }, - "updatedNumberScheduled": { + "conditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ - Description: "The total number of nodes that are running updated daemon pod", - Type: []string{"integer"}, - Format: "int32", + Description: "Represents the latest available observations of a replica set's current state.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/apps/v1.ReplicaSetCondition"), + }, + }, + }, }, }, - "numberAvailable": { + }, + Required: []string{"replicas"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1.ReplicaSetCondition"}, + }, + "k8s.io/api/apps/v1.RollingUpdateDaemonSet": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Spec to control the desired behavior of daemon set rolling update.", + Properties: map[string]spec.Schema{ + "maxUnavailable": { SchemaProps: spec.SchemaProps{ - Description: "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", - Type: []string{"integer"}, - Format: "int32", + Description: "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), }, }, - "numberUnavailable": { + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, + }, + "k8s.io/api/apps/v1.RollingUpdateDeployment": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Spec to control the desired behavior of rolling update.", + Properties: map[string]spec.Schema{ + "maxUnavailable": { SchemaProps: spec.SchemaProps{ - Description: "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", - Type: []string{"integer"}, - Format: "int32", + Description: "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), }, }, - "collisionCount": { + "maxSurge": { SchemaProps: spec.SchemaProps{ - Description: "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", - Type: []string{"integer"}, - Format: "int32", + Description: "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), }, }, }, - Required: []string{"currentNumberScheduled", "numberMisscheduled", "desiredNumberScheduled", "numberReady"}, }, }, - Dependencies: []string{}, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, }, - "k8s.io/api/apps/v1beta2.DaemonSetUpdateStrategy": { + "k8s.io/api/apps/v1.RollingUpdateStatefulSetStrategy": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.", + Description: "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", Properties: map[string]spec.Schema{ - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate.", - Type: []string{"string"}, - Format: "", - }, - }, - "rollingUpdate": { + "partition": { SchemaProps: spec.SchemaProps{ - Description: "Rolling update config params. Present only if type = \"RollingUpdate\".", - Ref: ref("k8s.io/api/apps/v1beta2.RollingUpdateDaemonSet"), + Description: "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.", + Type: []string{"integer"}, + Format: "int32", }, }, }, }, }, - Dependencies: []string{ - "k8s.io/api/apps/v1beta2.RollingUpdateDaemonSet"}, + Dependencies: []string{}, }, - "k8s.io/api/apps/v1beta2.Deployment": { + "k8s.io/api/apps/v1.StatefulSet": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Deployment enables declarative updates for Pods and ReplicaSets.", + Description: "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -1666,36 +1642,35 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "metadata": { SchemaProps: spec.SchemaProps{ - Description: "Standard object metadata.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), }, }, "spec": { SchemaProps: spec.SchemaProps{ - Description: "Specification of the desired behavior of the Deployment.", - Ref: ref("k8s.io/api/apps/v1beta2.DeploymentSpec"), + Description: "Spec defines the desired identities of pods in this set.", + Ref: ref("k8s.io/api/apps/v1.StatefulSetSpec"), }, }, "status": { SchemaProps: spec.SchemaProps{ - Description: "Most recently observed status of the Deployment.", - Ref: ref("k8s.io/api/apps/v1beta2.DeploymentStatus"), + Description: "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", + Ref: ref("k8s.io/api/apps/v1.StatefulSetStatus"), }, }, }, }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta2.DeploymentSpec", "k8s.io/api/apps/v1beta2.DeploymentStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "k8s.io/api/apps/v1.StatefulSetSpec", "k8s.io/api/apps/v1.StatefulSetStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, }, - "k8s.io/api/apps/v1beta2.DeploymentCondition": { + "k8s.io/api/apps/v1.StatefulSetCondition": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DeploymentCondition describes the state of a deployment at a certain point.", + Description: "StatefulSetCondition describes the state of a statefulset at a certain point.", Properties: map[string]spec.Schema{ "type": { SchemaProps: spec.SchemaProps{ - Description: "Type of deployment condition.", + Description: "Type of statefulset condition.", Type: []string{"string"}, Format: "", }, @@ -1707,12 +1682,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Format: "", }, }, - "lastUpdateTime": { - SchemaProps: spec.SchemaProps{ - Description: "The last time this condition was updated.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", @@ -1740,10 +1709,10 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Dependencies: []string{ "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, }, - "k8s.io/api/apps/v1beta2.DeploymentList": { + "k8s.io/api/apps/v1.StatefulSetList": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DeploymentList is a list of Deployments.", + Description: "StatefulSetList is a collection of StatefulSets.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -1761,18 +1730,16 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "metadata": { SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), }, }, "items": { SchemaProps: spec.SchemaProps{ - Description: "Items is the list of Deployments.", - Type: []string{"array"}, + Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/apps/v1beta2.Deployment"), + Ref: ref("k8s.io/api/apps/v1.StatefulSet"), }, }, }, @@ -1783,116 +1750,136 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta2.Deployment", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + "k8s.io/api/apps/v1.StatefulSet", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, }, - "k8s.io/api/apps/v1beta2.DeploymentSpec": { + "k8s.io/api/apps/v1.StatefulSetSpec": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DeploymentSpec is the specification of the desired behavior of the Deployment.", + Description: "A StatefulSetSpec is the specification of a StatefulSet.", Properties: map[string]spec.Schema{ "replicas": { SchemaProps: spec.SchemaProps{ - Description: "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + Description: "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", Type: []string{"integer"}, Format: "int32", }, }, "selector": { SchemaProps: spec.SchemaProps{ - Description: "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", + Description: "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), }, }, "template": { SchemaProps: spec.SchemaProps{ - Description: "Template describes the pods that will be created.", + Description: "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"), }, }, - "strategy": { + "volumeClaimTemplates": { SchemaProps: spec.SchemaProps{ - Description: "The deployment strategy to use to replace existing pods with new ones.", - Ref: ref("k8s.io/api/apps/v1beta2.DeploymentStrategy"), + Description: "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaim"), + }, + }, + }, }, }, - "minReadySeconds": { + "serviceName": { SchemaProps: spec.SchemaProps{ - Description: "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - Type: []string{"integer"}, - Format: "int32", + Description: "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + Type: []string{"string"}, + Format: "", }, }, - "revisionHistoryLimit": { + "podManagementPolicy": { SchemaProps: spec.SchemaProps{ - Description: "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", - Type: []string{"integer"}, - Format: "int32", + Description: "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", + Type: []string{"string"}, + Format: "", }, }, - "paused": { + "updateStrategy": { SchemaProps: spec.SchemaProps{ - Description: "Indicates that the deployment is paused.", - Type: []string{"boolean"}, - Format: "", + Description: "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", + Ref: ref("k8s.io/api/apps/v1.StatefulSetUpdateStrategy"), }, }, - "progressDeadlineSeconds": { + "revisionHistoryLimit": { SchemaProps: spec.SchemaProps{ - Description: "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", + Description: "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", Type: []string{"integer"}, Format: "int32", }, }, }, - Required: []string{"template"}, + Required: []string{"selector", "template", "serviceName"}, }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta2.DeploymentStrategy", "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + "k8s.io/api/apps/v1.StatefulSetUpdateStrategy", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, }, - "k8s.io/api/apps/v1beta2.DeploymentStatus": { + "k8s.io/api/apps/v1.StatefulSetStatus": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DeploymentStatus is the most recently observed status of the Deployment.", + Description: "StatefulSetStatus represents the current state of a StatefulSet.", Properties: map[string]spec.Schema{ "observedGeneration": { SchemaProps: spec.SchemaProps{ - Description: "The generation observed by the deployment controller.", + Description: "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", Type: []string{"integer"}, Format: "int64", }, }, "replicas": { SchemaProps: spec.SchemaProps{ - Description: "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", + Description: "replicas is the number of Pods created by the StatefulSet controller.", Type: []string{"integer"}, Format: "int32", }, }, - "updatedReplicas": { + "readyReplicas": { SchemaProps: spec.SchemaProps{ - Description: "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + Description: "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", Type: []string{"integer"}, Format: "int32", }, }, - "readyReplicas": { + "currentReplicas": { SchemaProps: spec.SchemaProps{ - Description: "Total number of ready pods targeted by this deployment.", + Description: "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", Type: []string{"integer"}, Format: "int32", }, }, - "availableReplicas": { + "updatedReplicas": { SchemaProps: spec.SchemaProps{ - Description: "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + Description: "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", Type: []string{"integer"}, Format: "int32", }, }, - "unavailableReplicas": { + "currentRevision": { SchemaProps: spec.SchemaProps{ - Description: "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + Description: "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", + Type: []string{"string"}, + Format: "", + }, + }, + "updateRevision": { + SchemaProps: spec.SchemaProps{ + Description: "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + Type: []string{"string"}, + Format: "", + }, + }, + "collisionCount": { + SchemaProps: spec.SchemaProps{ + Description: "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", Type: []string{"integer"}, Format: "int32", }, @@ -1905,58 +1892,142 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, SchemaProps: spec.SchemaProps{ - Description: "Represents the latest available observations of a deployment's current state.", + Description: "Represents the latest available observations of a statefulset's current state.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/apps/v1beta2.DeploymentCondition"), + Ref: ref("k8s.io/api/apps/v1.StatefulSetCondition"), }, }, }, }, }, - "collisionCount": { + }, + Required: []string{"replicas"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1.StatefulSetCondition"}, + }, + "k8s.io/api/apps/v1.StatefulSetUpdateStrategy": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.", + Properties: map[string]spec.Schema{ + "type": { SchemaProps: spec.SchemaProps{ - Description: "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", + Description: "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.", + Type: []string{"string"}, + Format: "", + }, + }, + "rollingUpdate": { + SchemaProps: spec.SchemaProps{ + Description: "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.", + Ref: ref("k8s.io/api/apps/v1.RollingUpdateStatefulSetStrategy"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1.RollingUpdateStatefulSetStrategy"}, + }, + "k8s.io/api/apps/v1beta1.ControllerRevision": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "data": { + SchemaProps: spec.SchemaProps{ + Description: "Data is the serialized representation of the state.", + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + }, + }, + "revision": { + SchemaProps: spec.SchemaProps{ + Description: "Revision indicates the revision of the state represented by Data.", Type: []string{"integer"}, - Format: "int32", + Format: "int64", }, }, }, + Required: []string{"revision"}, }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta2.DeploymentCondition"}, + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, }, - "k8s.io/api/apps/v1beta2.DeploymentStrategy": { + "k8s.io/api/apps/v1beta1.ControllerRevisionList": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DeploymentStrategy describes how to replace existing pods with new ones.", + Description: "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", Properties: map[string]spec.Schema{ - "type": { + "kind": { SchemaProps: spec.SchemaProps{ - Description: "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", Type: []string{"string"}, Format: "", }, }, - "rollingUpdate": { + "apiVersion": { SchemaProps: spec.SchemaProps{ - Description: "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", - Ref: ref("k8s.io/api/apps/v1beta2.RollingUpdateDeployment"), + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "Items is the list of ControllerRevisions", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/apps/v1beta1.ControllerRevision"), + }, + }, + }, }, }, }, + Required: []string{"items"}, }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta2.RollingUpdateDeployment"}, + "k8s.io/api/apps/v1beta1.ControllerRevision", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, }, - "k8s.io/api/apps/v1beta2.ReplicaSet": { + "k8s.io/api/apps/v1beta1.Deployment": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ReplicaSet represents the configuration of a ReplicaSet.", + Description: "DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for more information. Deployment enables declarative updates for Pods and ReplicaSets.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -1974,36 +2045,36 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "metadata": { SchemaProps: spec.SchemaProps{ - Description: "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Description: "Standard object metadata.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), }, }, "spec": { SchemaProps: spec.SchemaProps{ - Description: "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/apps/v1beta2.ReplicaSetSpec"), + Description: "Specification of the desired behavior of the Deployment.", + Ref: ref("k8s.io/api/apps/v1beta1.DeploymentSpec"), }, }, "status": { SchemaProps: spec.SchemaProps{ - Description: "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/apps/v1beta2.ReplicaSetStatus"), + Description: "Most recently observed status of the Deployment.", + Ref: ref("k8s.io/api/apps/v1beta1.DeploymentStatus"), }, }, }, }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta2.ReplicaSetSpec", "k8s.io/api/apps/v1beta2.ReplicaSetStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "k8s.io/api/apps/v1beta1.DeploymentSpec", "k8s.io/api/apps/v1beta1.DeploymentStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, }, - "k8s.io/api/apps/v1beta2.ReplicaSetCondition": { + "k8s.io/api/apps/v1beta1.DeploymentCondition": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ReplicaSetCondition describes the state of a replica set at a certain point.", + Description: "DeploymentCondition describes the state of a deployment at a certain point.", Properties: map[string]spec.Schema{ "type": { SchemaProps: spec.SchemaProps{ - Description: "Type of replica set condition.", + Description: "Type of deployment condition.", Type: []string{"string"}, Format: "", }, @@ -2015,9 +2086,15 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Format: "", }, }, + "lastUpdateTime": { + SchemaProps: spec.SchemaProps{ + Description: "The last time this condition was updated.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, "lastTransitionTime": { SchemaProps: spec.SchemaProps{ - Description: "The last time the condition transitioned from one status to another.", + Description: "Last time the condition transitioned from one status to another.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -2042,10 +2119,10 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Dependencies: []string{ "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, }, - "k8s.io/api/apps/v1beta2.ReplicaSetList": { + "k8s.io/api/apps/v1beta1.DeploymentList": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ReplicaSetList is a collection of ReplicaSets.", + Description: "DeploymentList is a list of Deployments.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -2063,18 +2140,18 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "metadata": { SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Description: "Standard list metadata.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), }, }, "items": { SchemaProps: spec.SchemaProps{ - Description: "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", + Description: "Items is the list of Deployments.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/apps/v1beta2.ReplicaSet"), + Ref: ref("k8s.io/api/apps/v1beta1.Deployment"), }, }, }, @@ -2085,20 +2162,91 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta2.ReplicaSet", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + "k8s.io/api/apps/v1beta1.Deployment", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, }, - "k8s.io/api/apps/v1beta2.ReplicaSetSpec": { + "k8s.io/api/apps/v1beta1.DeploymentRollback": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ReplicaSetSpec is the specification of a ReplicaSet.", + Description: "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Required: This must match the Name of a deployment.", + Type: []string{"string"}, + Format: "", + }, + }, + "updatedAnnotations": { + SchemaProps: spec.SchemaProps{ + Description: "The annotations to be updated to a deployment", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "rollbackTo": { + SchemaProps: spec.SchemaProps{ + Description: "The config of this deployment rollback.", + Ref: ref("k8s.io/api/apps/v1beta1.RollbackConfig"), + }, + }, + }, + Required: []string{"name", "rollbackTo"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta1.RollbackConfig"}, + }, + "k8s.io/api/apps/v1beta1.DeploymentSpec": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DeploymentSpec is the specification of the desired behavior of the Deployment.", Properties: map[string]spec.Schema{ "replicas": { SchemaProps: spec.SchemaProps{ - Description: "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + Description: "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", Type: []string{"integer"}, Format: "int32", }, }, + "selector": { + SchemaProps: spec.SchemaProps{ + Description: "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), + }, + }, + "template": { + SchemaProps: spec.SchemaProps{ + Description: "Template describes the pods that will be created.", + Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"), + }, + }, + "strategy": { + SchemaProps: spec.SchemaProps{ + Description: "The deployment strategy to use to replace existing pods with new ones.", + Ref: ref("k8s.io/api/apps/v1beta1.DeploymentStrategy"), + }, + }, "minReadySeconds": { SchemaProps: spec.SchemaProps{ Description: "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", @@ -2106,62 +2254,85 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Format: "int32", }, }, - "selector": { + "revisionHistoryLimit": { SchemaProps: spec.SchemaProps{ - Description: "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), + Description: "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.", + Type: []string{"integer"}, + Format: "int32", }, }, - "template": { + "paused": { SchemaProps: spec.SchemaProps{ - Description: "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", - Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"), + Description: "Indicates that the deployment is paused.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "rollbackTo": { + SchemaProps: spec.SchemaProps{ + Description: "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.", + Ref: ref("k8s.io/api/apps/v1beta1.RollbackConfig"), + }, + }, + "progressDeadlineSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", + Type: []string{"integer"}, + Format: "int32", }, }, }, + Required: []string{"template"}, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + "k8s.io/api/apps/v1beta1.DeploymentStrategy", "k8s.io/api/apps/v1beta1.RollbackConfig", "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, }, - "k8s.io/api/apps/v1beta2.ReplicaSetStatus": { + "k8s.io/api/apps/v1beta1.DeploymentStatus": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ReplicaSetStatus represents the current status of a ReplicaSet.", + Description: "DeploymentStatus is the most recently observed status of the Deployment.", Properties: map[string]spec.Schema{ + "observedGeneration": { + SchemaProps: spec.SchemaProps{ + Description: "The generation observed by the deployment controller.", + Type: []string{"integer"}, + Format: "int64", + }, + }, "replicas": { SchemaProps: spec.SchemaProps{ - Description: "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + Description: "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", Type: []string{"integer"}, Format: "int32", }, }, - "fullyLabeledReplicas": { + "updatedReplicas": { SchemaProps: spec.SchemaProps{ - Description: "The number of pods that have labels matching the labels of the pod template of the replicaset.", + Description: "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", Type: []string{"integer"}, Format: "int32", }, }, "readyReplicas": { SchemaProps: spec.SchemaProps{ - Description: "The number of ready replicas for this replica set.", + Description: "Total number of ready pods targeted by this deployment.", Type: []string{"integer"}, Format: "int32", }, }, "availableReplicas": { SchemaProps: spec.SchemaProps{ - Description: "The number of available replicas (ready for at least minReadySeconds) for this replica set.", + Description: "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", Type: []string{"integer"}, Format: "int32", }, }, - "observedGeneration": { + "unavailableReplicas": { SchemaProps: spec.SchemaProps{ - Description: "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", + Description: "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", Type: []string{"integer"}, - Format: "int64", + Format: "int32", }, }, "conditions": { @@ -2172,42 +2343,72 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, SchemaProps: spec.SchemaProps{ - Description: "Represents the latest available observations of a replica set's current state.", + Description: "Represents the latest available observations of a deployment's current state.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/apps/v1beta2.ReplicaSetCondition"), + Ref: ref("k8s.io/api/apps/v1beta1.DeploymentCondition"), }, }, }, }, }, + "collisionCount": { + SchemaProps: spec.SchemaProps{ + Description: "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", + Type: []string{"integer"}, + Format: "int32", + }, + }, }, - Required: []string{"replicas"}, }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta2.ReplicaSetCondition"}, + "k8s.io/api/apps/v1beta1.DeploymentCondition"}, }, - "k8s.io/api/apps/v1beta2.RollingUpdateDaemonSet": { + "k8s.io/api/apps/v1beta1.DeploymentStrategy": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Spec to control the desired behavior of daemon set rolling update.", + Description: "DeploymentStrategy describes how to replace existing pods with new ones.", Properties: map[string]spec.Schema{ - "maxUnavailable": { + "type": { SchemaProps: spec.SchemaProps{ - Description: "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), + Description: "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + Type: []string{"string"}, + Format: "", + }, + }, + "rollingUpdate": { + SchemaProps: spec.SchemaProps{ + Description: "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", + Ref: ref("k8s.io/api/apps/v1beta1.RollingUpdateDeployment"), }, }, }, }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, + "k8s.io/api/apps/v1beta1.RollingUpdateDeployment"}, }, - "k8s.io/api/apps/v1beta2.RollingUpdateDeployment": { + "k8s.io/api/apps/v1beta1.RollbackConfig": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DEPRECATED.", + Properties: map[string]spec.Schema{ + "revision": { + SchemaProps: spec.SchemaProps{ + Description: "The revision to rollback to. If set to 0, rollback to the last revision.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + }, + }, + }, + Dependencies: []string{}, + }, + "k8s.io/api/apps/v1beta1.RollingUpdateDeployment": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ Description: "Spec to control the desired behavior of rolling update.", @@ -2230,14 +2431,14 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Dependencies: []string{ "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, }, - "k8s.io/api/apps/v1beta2.RollingUpdateStatefulSetStrategy": { + "k8s.io/api/apps/v1beta1.RollingUpdateStatefulSetStrategy": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ Description: "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", Properties: map[string]spec.Schema{ "partition": { SchemaProps: spec.SchemaProps{ - Description: "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.", + Description: "Partition indicates the ordinal at which the StatefulSet should be partitioned.", Type: []string{"integer"}, Format: "int32", }, @@ -2247,7 +2448,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, Dependencies: []string{}, }, - "k8s.io/api/apps/v1beta2.Scale": { + "k8s.io/api/apps/v1beta1.Scale": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ Description: "Scale represents a scaling request for a resource.", @@ -2275,22 +2476,22 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "spec": { SchemaProps: spec.SchemaProps{ Description: "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - Ref: ref("k8s.io/api/apps/v1beta2.ScaleSpec"), + Ref: ref("k8s.io/api/apps/v1beta1.ScaleSpec"), }, }, "status": { SchemaProps: spec.SchemaProps{ Description: "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", - Ref: ref("k8s.io/api/apps/v1beta2.ScaleStatus"), + Ref: ref("k8s.io/api/apps/v1beta1.ScaleStatus"), }, }, }, }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta2.ScaleSpec", "k8s.io/api/apps/v1beta2.ScaleStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "k8s.io/api/apps/v1beta1.ScaleSpec", "k8s.io/api/apps/v1beta1.ScaleStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, }, - "k8s.io/api/apps/v1beta2.ScaleSpec": { + "k8s.io/api/apps/v1beta1.ScaleSpec": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ Description: "ScaleSpec describes the attributes of a scale subresource", @@ -2307,7 +2508,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, Dependencies: []string{}, }, - "k8s.io/api/apps/v1beta2.ScaleStatus": { + "k8s.io/api/apps/v1beta1.ScaleStatus": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ Description: "ScaleStatus represents the current status of a scale subresource.", @@ -2346,10 +2547,10 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, Dependencies: []string{}, }, - "k8s.io/api/apps/v1beta2.StatefulSet": { + "k8s.io/api/apps/v1beta1.StatefulSet": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + Description: "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -2373,70 +2574,116 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "spec": { SchemaProps: spec.SchemaProps{ Description: "Spec defines the desired identities of pods in this set.", - Ref: ref("k8s.io/api/apps/v1beta2.StatefulSetSpec"), + Ref: ref("k8s.io/api/apps/v1beta1.StatefulSetSpec"), }, }, "status": { SchemaProps: spec.SchemaProps{ Description: "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", - Ref: ref("k8s.io/api/apps/v1beta2.StatefulSetStatus"), + Ref: ref("k8s.io/api/apps/v1beta1.StatefulSetStatus"), }, }, }, }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta2.StatefulSetSpec", "k8s.io/api/apps/v1beta2.StatefulSetStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "k8s.io/api/apps/v1beta1.StatefulSetSpec", "k8s.io/api/apps/v1beta1.StatefulSetStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, }, - "k8s.io/api/apps/v1beta2.StatefulSetList": { + "k8s.io/api/apps/v1beta1.StatefulSetCondition": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "StatefulSetList is a collection of StatefulSets.", + Description: "StatefulSetCondition describes the state of a statefulset at a certain point.", Properties: map[string]spec.Schema{ - "kind": { + "type": { SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Description: "Type of statefulset condition.", Type: []string{"string"}, Format: "", }, }, - "apiVersion": { + "status": { SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Description: "Status of the condition, one of True, False, Unknown.", Type: []string{"string"}, Format: "", }, }, - "metadata": { + "lastTransitionTime": { SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + Description: "Last time the condition transitioned from one status to another.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, - "items": { + "reason": { SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/apps/v1beta2.StatefulSet"), - }, - }, - }, + Description: "The reason for the condition's last transition.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human readable message indicating details about the transition.", + Type: []string{"string"}, + Format: "", }, }, }, - Required: []string{"items"}, + Required: []string{"type", "status"}, }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta2.StatefulSet", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, }, - "k8s.io/api/apps/v1beta2.StatefulSetSpec": { + "k8s.io/api/apps/v1beta1.StatefulSetList": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "A StatefulSetSpec is the specification of a StatefulSet.", + Description: "StatefulSetList is a collection of StatefulSets.", Properties: map[string]spec.Schema{ - "replicas": { + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/apps/v1beta1.StatefulSet"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta1.StatefulSet", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + }, + "k8s.io/api/apps/v1beta1.StatefulSetSpec": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "A StatefulSetSpec is the specification of a StatefulSet.", + Properties: map[string]spec.Schema{ + "replicas": { SchemaProps: spec.SchemaProps{ Description: "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", Type: []string{"integer"}, @@ -2485,7 +2732,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "updateStrategy": { SchemaProps: spec.SchemaProps{ Description: "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", - Ref: ref("k8s.io/api/apps/v1beta2.StatefulSetUpdateStrategy"), + Ref: ref("k8s.io/api/apps/v1beta1.StatefulSetUpdateStrategy"), }, }, "revisionHistoryLimit": { @@ -2500,9 +2747,9 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta2.StatefulSetUpdateStrategy", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + "k8s.io/api/apps/v1beta1.StatefulSetUpdateStrategy", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, }, - "k8s.io/api/apps/v1beta2.StatefulSetStatus": { + "k8s.io/api/apps/v1beta1.StatefulSetStatus": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ Description: "StatefulSetStatus represents the current state of a StatefulSet.", @@ -2563,20 +2810,40 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Format: "int32", }, }, + "conditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Represents the latest available observations of a statefulset's current state.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/apps/v1beta1.StatefulSetCondition"), + }, + }, + }, + }, + }, }, Required: []string{"replicas"}, }, }, - Dependencies: []string{}, + Dependencies: []string{ + "k8s.io/api/apps/v1beta1.StatefulSetCondition"}, }, - "k8s.io/api/apps/v1beta2.StatefulSetUpdateStrategy": { + "k8s.io/api/apps/v1beta1.StatefulSetUpdateStrategy": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ Description: "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.", Properties: map[string]spec.Schema{ "type": { SchemaProps: spec.SchemaProps{ - Description: "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.", + Description: "Type indicates the type of the StatefulSetUpdateStrategy.", Type: []string{"string"}, Format: "", }, @@ -2584,19 +2851,19 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "rollingUpdate": { SchemaProps: spec.SchemaProps{ Description: "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.", - Ref: ref("k8s.io/api/apps/v1beta2.RollingUpdateStatefulSetStrategy"), + Ref: ref("k8s.io/api/apps/v1beta1.RollingUpdateStatefulSetStrategy"), }, }, }, }, }, Dependencies: []string{ - "k8s.io/api/apps/v1beta2.RollingUpdateStatefulSetStrategy"}, + "k8s.io/api/apps/v1beta1.RollingUpdateStatefulSetStrategy"}, }, - "k8s.io/api/authentication/v1.TokenReview": { + "k8s.io/api/apps/v1beta2.ControllerRevision": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", + Description: "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -2614,139 +2881,79 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "metadata": { SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), }, }, - "spec": { + "data": { SchemaProps: spec.SchemaProps{ - Description: "Spec holds information about the request being evaluated", - Ref: ref("k8s.io/api/authentication/v1.TokenReviewSpec"), + Description: "Data is the serialized representation of the state.", + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, - "status": { + "revision": { SchemaProps: spec.SchemaProps{ - Description: "Status is filled in by the server and indicates whether the request can be authenticated.", - Ref: ref("k8s.io/api/authentication/v1.TokenReviewStatus"), + Description: "Revision indicates the revision of the state represented by Data.", + Type: []string{"integer"}, + Format: "int64", }, }, }, - Required: []string{"spec"}, + Required: []string{"revision"}, }, }, Dependencies: []string{ - "k8s.io/api/authentication/v1.TokenReviewSpec", "k8s.io/api/authentication/v1.TokenReviewStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - }, - "k8s.io/api/authentication/v1.TokenReviewSpec": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "TokenReviewSpec is a description of the token authentication request.", - Properties: map[string]spec.Schema{ - "token": { - SchemaProps: spec.SchemaProps{ - Description: "Token is the opaque bearer token.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, }, - "k8s.io/api/authentication/v1.TokenReviewStatus": { + "k8s.io/api/apps/v1beta2.ControllerRevisionList": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TokenReviewStatus is the result of the token authentication request.", + Description: "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", Properties: map[string]spec.Schema{ - "authenticated": { - SchemaProps: spec.SchemaProps{ - Description: "Authenticated indicates that the token was associated with a known user.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "user": { - SchemaProps: spec.SchemaProps{ - Description: "User is the UserInfo associated with the provided token.", - Ref: ref("k8s.io/api/authentication/v1.UserInfo"), - }, - }, - "error": { + "kind": { SchemaProps: spec.SchemaProps{ - Description: "Error indicates that the token couldn't be checked", + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", Type: []string{"string"}, Format: "", }, }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/authentication/v1.UserInfo"}, - }, - "k8s.io/api/authentication/v1.UserInfo": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "UserInfo holds the information about the user needed to implement the user.Info interface.", - Properties: map[string]spec.Schema{ - "username": { + "apiVersion": { SchemaProps: spec.SchemaProps{ - Description: "The name that uniquely identifies this user among all active users.", + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", Type: []string{"string"}, Format: "", }, }, - "uid": { + "metadata": { SchemaProps: spec.SchemaProps{ - Description: "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.", - Type: []string{"string"}, - Format: "", + Description: "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), }, }, - "groups": { + "items": { SchemaProps: spec.SchemaProps{ - Description: "The names of groups this user is a part of.", + Description: "Items is the list of ControllerRevisions", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "extra": { - SchemaProps: spec.SchemaProps{ - Description: "Any additional information provided by the authenticator.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, + Ref: ref("k8s.io/api/apps/v1beta2.ControllerRevision"), }, }, }, }, }, }, + Required: []string{"items"}, }, }, - Dependencies: []string{}, + Dependencies: []string{ + "k8s.io/api/apps/v1beta2.ControllerRevision", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, }, - "k8s.io/api/authentication/v1beta1.TokenReview": { + "k8s.io/api/apps/v1beta2.DaemonSet": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", + Description: "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -2764,84 +2971,1545 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "metadata": { SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), }, }, "spec": { SchemaProps: spec.SchemaProps{ - Description: "Spec holds information about the request being evaluated", - Ref: ref("k8s.io/api/authentication/v1beta1.TokenReviewSpec"), + Description: "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/apps/v1beta2.DaemonSetSpec"), }, }, "status": { SchemaProps: spec.SchemaProps{ - Description: "Status is filled in by the server and indicates whether the request can be authenticated.", - Ref: ref("k8s.io/api/authentication/v1beta1.TokenReviewStatus"), + Description: "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/apps/v1beta2.DaemonSetStatus"), }, }, }, - Required: []string{"spec"}, }, }, Dependencies: []string{ - "k8s.io/api/authentication/v1beta1.TokenReviewSpec", "k8s.io/api/authentication/v1beta1.TokenReviewStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "k8s.io/api/apps/v1beta2.DaemonSetSpec", "k8s.io/api/apps/v1beta2.DaemonSetStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, }, - "k8s.io/api/authentication/v1beta1.TokenReviewSpec": { + "k8s.io/api/apps/v1beta2.DaemonSetCondition": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TokenReviewSpec is a description of the token authentication request.", + Description: "DaemonSetCondition describes the state of a DaemonSet at a certain point.", Properties: map[string]spec.Schema{ - "token": { + "type": { SchemaProps: spec.SchemaProps{ - Description: "Token is the opaque bearer token.", + Description: "Type of DaemonSet condition.", Type: []string{"string"}, Format: "", }, }, - }, - }, - }, - Dependencies: []string{}, - }, - "k8s.io/api/authentication/v1beta1.TokenReviewStatus": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "TokenReviewStatus is the result of the token authentication request.", - Properties: map[string]spec.Schema{ - "authenticated": { + "status": { SchemaProps: spec.SchemaProps{ - Description: "Authenticated indicates that the token was associated with a known user.", - Type: []string{"boolean"}, + Description: "Status of the condition, one of True, False, Unknown.", + Type: []string{"string"}, Format: "", }, }, - "user": { + "lastTransitionTime": { SchemaProps: spec.SchemaProps{ - Description: "User is the UserInfo associated with the provided token.", - Ref: ref("k8s.io/api/authentication/v1beta1.UserInfo"), + Description: "Last time the condition transitioned from one status to another.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, - "error": { + "reason": { SchemaProps: spec.SchemaProps{ - Description: "Error indicates that the token couldn't be checked", + Description: "The reason for the condition's last transition.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human readable message indicating details about the transition.", Type: []string{"string"}, Format: "", }, }, }, + Required: []string{"type", "status"}, }, }, Dependencies: []string{ - "k8s.io/api/authentication/v1beta1.UserInfo"}, + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, }, - "k8s.io/api/authentication/v1beta1.UserInfo": { + "k8s.io/api/apps/v1beta2.DaemonSetList": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "UserInfo holds the information about the user needed to implement the user.Info interface.", + Description: "DaemonSetList is a collection of daemon sets.", Properties: map[string]spec.Schema{ - "username": { + "kind": { SchemaProps: spec.SchemaProps{ - Description: "The name that uniquely identifies this user among all active users.", + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "A list of daemon sets.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/apps/v1beta2.DaemonSet"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta2.DaemonSet", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + }, + "k8s.io/api/apps/v1beta2.DaemonSetSpec": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DaemonSetSpec is the specification of a daemon set.", + Properties: map[string]spec.Schema{ + "selector": { + SchemaProps: spec.SchemaProps{ + Description: "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), + }, + }, + "template": { + SchemaProps: spec.SchemaProps{ + Description: "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"), + }, + }, + "updateStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "An update strategy to replace existing DaemonSet pods with new pods.", + Ref: ref("k8s.io/api/apps/v1beta2.DaemonSetUpdateStrategy"), + }, + }, + "minReadySeconds": { + SchemaProps: spec.SchemaProps{ + Description: "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "revisionHistoryLimit": { + SchemaProps: spec.SchemaProps{ + Description: "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"selector", "template"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta2.DaemonSetUpdateStrategy", "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + }, + "k8s.io/api/apps/v1beta2.DaemonSetStatus": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DaemonSetStatus represents the current status of a daemon set.", + Properties: map[string]spec.Schema{ + "currentNumberScheduled": { + SchemaProps: spec.SchemaProps{ + Description: "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "numberMisscheduled": { + SchemaProps: spec.SchemaProps{ + Description: "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "desiredNumberScheduled": { + SchemaProps: spec.SchemaProps{ + Description: "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "numberReady": { + SchemaProps: spec.SchemaProps{ + Description: "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "observedGeneration": { + SchemaProps: spec.SchemaProps{ + Description: "The most recent generation observed by the daemon set controller.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "updatedNumberScheduled": { + SchemaProps: spec.SchemaProps{ + Description: "The total number of nodes that are running updated daemon pod", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "numberAvailable": { + SchemaProps: spec.SchemaProps{ + Description: "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "numberUnavailable": { + SchemaProps: spec.SchemaProps{ + Description: "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "collisionCount": { + SchemaProps: spec.SchemaProps{ + Description: "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "conditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Represents the latest available observations of a DaemonSet's current state.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/apps/v1beta2.DaemonSetCondition"), + }, + }, + }, + }, + }, + }, + Required: []string{"currentNumberScheduled", "numberMisscheduled", "desiredNumberScheduled", "numberReady"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta2.DaemonSetCondition"}, + }, + "k8s.io/api/apps/v1beta2.DaemonSetUpdateStrategy": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.", + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate.", + Type: []string{"string"}, + Format: "", + }, + }, + "rollingUpdate": { + SchemaProps: spec.SchemaProps{ + Description: "Rolling update config params. Present only if type = \"RollingUpdate\".", + Ref: ref("k8s.io/api/apps/v1beta2.RollingUpdateDaemonSet"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta2.RollingUpdateDaemonSet"}, + }, + "k8s.io/api/apps/v1beta2.Deployment": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for more information. Deployment enables declarative updates for Pods and ReplicaSets.", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Specification of the desired behavior of the Deployment.", + Ref: ref("k8s.io/api/apps/v1beta2.DeploymentSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Most recently observed status of the Deployment.", + Ref: ref("k8s.io/api/apps/v1beta2.DeploymentStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta2.DeploymentSpec", "k8s.io/api/apps/v1beta2.DeploymentStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + }, + "k8s.io/api/apps/v1beta2.DeploymentCondition": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DeploymentCondition describes the state of a deployment at a certain point.", + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of deployment condition.", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status of the condition, one of True, False, Unknown.", + Type: []string{"string"}, + Format: "", + }, + }, + "lastUpdateTime": { + SchemaProps: spec.SchemaProps{ + Description: "The last time this condition was updated.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "lastTransitionTime": { + SchemaProps: spec.SchemaProps{ + Description: "Last time the condition transitioned from one status to another.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "The reason for the condition's last transition.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human readable message indicating details about the transition.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"type", "status"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + }, + "k8s.io/api/apps/v1beta2.DeploymentList": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DeploymentList is a list of Deployments.", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "Items is the list of Deployments.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/apps/v1beta2.Deployment"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta2.Deployment", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + }, + "k8s.io/api/apps/v1beta2.DeploymentSpec": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DeploymentSpec is the specification of the desired behavior of the Deployment.", + Properties: map[string]spec.Schema{ + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "selector": { + SchemaProps: spec.SchemaProps{ + Description: "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), + }, + }, + "template": { + SchemaProps: spec.SchemaProps{ + Description: "Template describes the pods that will be created.", + Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"), + }, + }, + "strategy": { + SchemaProps: spec.SchemaProps{ + Description: "The deployment strategy to use to replace existing pods with new ones.", + Ref: ref("k8s.io/api/apps/v1beta2.DeploymentStrategy"), + }, + }, + "minReadySeconds": { + SchemaProps: spec.SchemaProps{ + Description: "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "revisionHistoryLimit": { + SchemaProps: spec.SchemaProps{ + Description: "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "paused": { + SchemaProps: spec.SchemaProps{ + Description: "Indicates that the deployment is paused.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "progressDeadlineSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"selector", "template"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta2.DeploymentStrategy", "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + }, + "k8s.io/api/apps/v1beta2.DeploymentStatus": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DeploymentStatus is the most recently observed status of the Deployment.", + Properties: map[string]spec.Schema{ + "observedGeneration": { + SchemaProps: spec.SchemaProps{ + Description: "The generation observed by the deployment controller.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "updatedReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "readyReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "Total number of ready pods targeted by this deployment.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "availableReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "unavailableReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "conditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Represents the latest available observations of a deployment's current state.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/apps/v1beta2.DeploymentCondition"), + }, + }, + }, + }, + }, + "collisionCount": { + SchemaProps: spec.SchemaProps{ + Description: "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta2.DeploymentCondition"}, + }, + "k8s.io/api/apps/v1beta2.DeploymentStrategy": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DeploymentStrategy describes how to replace existing pods with new ones.", + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + Type: []string{"string"}, + Format: "", + }, + }, + "rollingUpdate": { + SchemaProps: spec.SchemaProps{ + Description: "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", + Ref: ref("k8s.io/api/apps/v1beta2.RollingUpdateDeployment"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta2.RollingUpdateDeployment"}, + }, + "k8s.io/api/apps/v1beta2.ReplicaSet": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/apps/v1beta2.ReplicaSetSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/apps/v1beta2.ReplicaSetStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta2.ReplicaSetSpec", "k8s.io/api/apps/v1beta2.ReplicaSetStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + }, + "k8s.io/api/apps/v1beta2.ReplicaSetCondition": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ReplicaSetCondition describes the state of a replica set at a certain point.", + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of replica set condition.", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status of the condition, one of True, False, Unknown.", + Type: []string{"string"}, + Format: "", + }, + }, + "lastTransitionTime": { + SchemaProps: spec.SchemaProps{ + Description: "The last time the condition transitioned from one status to another.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "The reason for the condition's last transition.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human readable message indicating details about the transition.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"type", "status"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + }, + "k8s.io/api/apps/v1beta2.ReplicaSetList": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ReplicaSetList is a collection of ReplicaSets.", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/apps/v1beta2.ReplicaSet"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta2.ReplicaSet", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + }, + "k8s.io/api/apps/v1beta2.ReplicaSetSpec": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ReplicaSetSpec is the specification of a ReplicaSet.", + Properties: map[string]spec.Schema{ + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "minReadySeconds": { + SchemaProps: spec.SchemaProps{ + Description: "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "selector": { + SchemaProps: spec.SchemaProps{ + Description: "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), + }, + }, + "template": { + SchemaProps: spec.SchemaProps{ + Description: "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"), + }, + }, + }, + Required: []string{"selector"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + }, + "k8s.io/api/apps/v1beta2.ReplicaSetStatus": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ReplicaSetStatus represents the current status of a ReplicaSet.", + Properties: map[string]spec.Schema{ + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "fullyLabeledReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "The number of pods that have labels matching the labels of the pod template of the replicaset.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "readyReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "The number of ready replicas for this replica set.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "availableReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "The number of available replicas (ready for at least minReadySeconds) for this replica set.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "observedGeneration": { + SchemaProps: spec.SchemaProps{ + Description: "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "conditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Represents the latest available observations of a replica set's current state.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/apps/v1beta2.ReplicaSetCondition"), + }, + }, + }, + }, + }, + }, + Required: []string{"replicas"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta2.ReplicaSetCondition"}, + }, + "k8s.io/api/apps/v1beta2.RollingUpdateDaemonSet": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Spec to control the desired behavior of daemon set rolling update.", + Properties: map[string]spec.Schema{ + "maxUnavailable": { + SchemaProps: spec.SchemaProps{ + Description: "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, + }, + "k8s.io/api/apps/v1beta2.RollingUpdateDeployment": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Spec to control the desired behavior of rolling update.", + Properties: map[string]spec.Schema{ + "maxUnavailable": { + SchemaProps: spec.SchemaProps{ + Description: "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), + }, + }, + "maxSurge": { + SchemaProps: spec.SchemaProps{ + Description: "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, + }, + "k8s.io/api/apps/v1beta2.RollingUpdateStatefulSetStrategy": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", + Properties: map[string]spec.Schema{ + "partition": { + SchemaProps: spec.SchemaProps{ + Description: "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + Dependencies: []string{}, + }, + "k8s.io/api/apps/v1beta2.Scale": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Scale represents a scaling request for a resource.", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + Ref: ref("k8s.io/api/apps/v1beta2.ScaleSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", + Ref: ref("k8s.io/api/apps/v1beta2.ScaleStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta2.ScaleSpec", "k8s.io/api/apps/v1beta2.ScaleStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + }, + "k8s.io/api/apps/v1beta2.ScaleSpec": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ScaleSpec describes the attributes of a scale subresource", + Properties: map[string]spec.Schema{ + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "desired number of instances for the scaled object.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + Dependencies: []string{}, + }, + "k8s.io/api/apps/v1beta2.ScaleStatus": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ScaleStatus represents the current status of a scale subresource.", + Properties: map[string]spec.Schema{ + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "actual number of observed instances of the scaled object.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "selector": { + SchemaProps: spec.SchemaProps{ + Description: "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "targetSelector": { + SchemaProps: spec.SchemaProps{ + Description: "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"replicas"}, + }, + }, + Dependencies: []string{}, + }, + "k8s.io/api/apps/v1beta2.StatefulSet": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec defines the desired identities of pods in this set.", + Ref: ref("k8s.io/api/apps/v1beta2.StatefulSetSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", + Ref: ref("k8s.io/api/apps/v1beta2.StatefulSetStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta2.StatefulSetSpec", "k8s.io/api/apps/v1beta2.StatefulSetStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + }, + "k8s.io/api/apps/v1beta2.StatefulSetCondition": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "StatefulSetCondition describes the state of a statefulset at a certain point.", + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of statefulset condition.", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status of the condition, one of True, False, Unknown.", + Type: []string{"string"}, + Format: "", + }, + }, + "lastTransitionTime": { + SchemaProps: spec.SchemaProps{ + Description: "Last time the condition transitioned from one status to another.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "The reason for the condition's last transition.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human readable message indicating details about the transition.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"type", "status"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + }, + "k8s.io/api/apps/v1beta2.StatefulSetList": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "StatefulSetList is a collection of StatefulSets.", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/apps/v1beta2.StatefulSet"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta2.StatefulSet", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + }, + "k8s.io/api/apps/v1beta2.StatefulSetSpec": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "A StatefulSetSpec is the specification of a StatefulSet.", + Properties: map[string]spec.Schema{ + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "selector": { + SchemaProps: spec.SchemaProps{ + Description: "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), + }, + }, + "template": { + SchemaProps: spec.SchemaProps{ + Description: "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"), + }, + }, + "volumeClaimTemplates": { + SchemaProps: spec.SchemaProps{ + Description: "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaim"), + }, + }, + }, + }, + }, + "serviceName": { + SchemaProps: spec.SchemaProps{ + Description: "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + Type: []string{"string"}, + Format: "", + }, + }, + "podManagementPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", + Type: []string{"string"}, + Format: "", + }, + }, + "updateStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", + Ref: ref("k8s.io/api/apps/v1beta2.StatefulSetUpdateStrategy"), + }, + }, + "revisionHistoryLimit": { + SchemaProps: spec.SchemaProps{ + Description: "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"selector", "template", "serviceName"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta2.StatefulSetUpdateStrategy", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + }, + "k8s.io/api/apps/v1beta2.StatefulSetStatus": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "StatefulSetStatus represents the current state of a StatefulSet.", + Properties: map[string]spec.Schema{ + "observedGeneration": { + SchemaProps: spec.SchemaProps{ + Description: "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "replicas is the number of Pods created by the StatefulSet controller.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "readyReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "currentReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "updatedReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "currentRevision": { + SchemaProps: spec.SchemaProps{ + Description: "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", + Type: []string{"string"}, + Format: "", + }, + }, + "updateRevision": { + SchemaProps: spec.SchemaProps{ + Description: "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + Type: []string{"string"}, + Format: "", + }, + }, + "collisionCount": { + SchemaProps: spec.SchemaProps{ + Description: "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "conditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Represents the latest available observations of a statefulset's current state.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/apps/v1beta2.StatefulSetCondition"), + }, + }, + }, + }, + }, + }, + Required: []string{"replicas"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta2.StatefulSetCondition"}, + }, + "k8s.io/api/apps/v1beta2.StatefulSetUpdateStrategy": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.", + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.", + Type: []string{"string"}, + Format: "", + }, + }, + "rollingUpdate": { + SchemaProps: spec.SchemaProps{ + Description: "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.", + Ref: ref("k8s.io/api/apps/v1beta2.RollingUpdateStatefulSetStrategy"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/apps/v1beta2.RollingUpdateStatefulSetStrategy"}, + }, + "k8s.io/api/authentication/v1.TokenReview": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec holds information about the request being evaluated", + Ref: ref("k8s.io/api/authentication/v1.TokenReviewSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status is filled in by the server and indicates whether the request can be authenticated.", + Ref: ref("k8s.io/api/authentication/v1.TokenReviewStatus"), + }, + }, + }, + Required: []string{"spec"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/authentication/v1.TokenReviewSpec", "k8s.io/api/authentication/v1.TokenReviewStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + }, + "k8s.io/api/authentication/v1.TokenReviewSpec": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TokenReviewSpec is a description of the token authentication request.", + Properties: map[string]spec.Schema{ + "token": { + SchemaProps: spec.SchemaProps{ + Description: "Token is the opaque bearer token.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{}, + }, + "k8s.io/api/authentication/v1.TokenReviewStatus": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TokenReviewStatus is the result of the token authentication request.", + Properties: map[string]spec.Schema{ + "authenticated": { + SchemaProps: spec.SchemaProps{ + Description: "Authenticated indicates that the token was associated with a known user.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "user": { + SchemaProps: spec.SchemaProps{ + Description: "User is the UserInfo associated with the provided token.", + Ref: ref("k8s.io/api/authentication/v1.UserInfo"), + }, + }, + "error": { + SchemaProps: spec.SchemaProps{ + Description: "Error indicates that the token couldn't be checked", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/authentication/v1.UserInfo"}, + }, + "k8s.io/api/authentication/v1.UserInfo": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "UserInfo holds the information about the user needed to implement the user.Info interface.", + Properties: map[string]spec.Schema{ + "username": { + SchemaProps: spec.SchemaProps{ + Description: "The name that uniquely identifies this user among all active users.", + Type: []string{"string"}, + Format: "", + }, + }, + "uid": { + SchemaProps: spec.SchemaProps{ + Description: "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.", + Type: []string{"string"}, + Format: "", + }, + }, + "groups": { + SchemaProps: spec.SchemaProps{ + Description: "The names of groups this user is a part of.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "extra": { + SchemaProps: spec.SchemaProps{ + Description: "Any additional information provided by the authenticator.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{}, + }, + "k8s.io/api/authentication/v1beta1.TokenReview": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec holds information about the request being evaluated", + Ref: ref("k8s.io/api/authentication/v1beta1.TokenReviewSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status is filled in by the server and indicates whether the request can be authenticated.", + Ref: ref("k8s.io/api/authentication/v1beta1.TokenReviewStatus"), + }, + }, + }, + Required: []string{"spec"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/authentication/v1beta1.TokenReviewSpec", "k8s.io/api/authentication/v1beta1.TokenReviewStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + }, + "k8s.io/api/authentication/v1beta1.TokenReviewSpec": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TokenReviewSpec is a description of the token authentication request.", + Properties: map[string]spec.Schema{ + "token": { + SchemaProps: spec.SchemaProps{ + Description: "Token is the opaque bearer token.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{}, + }, + "k8s.io/api/authentication/v1beta1.TokenReviewStatus": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TokenReviewStatus is the result of the token authentication request.", + Properties: map[string]spec.Schema{ + "authenticated": { + SchemaProps: spec.SchemaProps{ + Description: "Authenticated indicates that the token was associated with a known user.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "user": { + SchemaProps: spec.SchemaProps{ + Description: "User is the UserInfo associated with the provided token.", + Ref: ref("k8s.io/api/authentication/v1beta1.UserInfo"), + }, + }, + "error": { + SchemaProps: spec.SchemaProps{ + Description: "Error indicates that the token couldn't be checked", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/authentication/v1beta1.UserInfo"}, + }, + "k8s.io/api/authentication/v1beta1.UserInfo": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "UserInfo holds the information about the user needed to implement the user.Info interface.", + Properties: map[string]spec.Schema{ + "username": { + SchemaProps: spec.SchemaProps{ + Description: "The name that uniquely identifies this user among all active users.", Type: []string{"string"}, Format: "", }, @@ -3093,7 +4761,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "resources": { SchemaProps: spec.SchemaProps{ - Description: "Resources is a list of resources this rule applies to. ResourceAll represents all resources. \"*\" means all.", + Description: "Resources is a list of resources this rule applies to. \"*\" means all in the specified apiGroups.\n \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -3372,7 +5040,14 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Properties: map[string]spec.Schema{ "allowed": { SchemaProps: spec.SchemaProps{ - Description: "Allowed is required. True if the action would be allowed, false otherwise.", + Description: "Allowed is required. True if the action would be allowed, false otherwise.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "denied": { + SchemaProps: spec.SchemaProps{ + Description: "Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.", Type: []string{"boolean"}, Format: "", }, @@ -3649,7 +5324,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "resources": { SchemaProps: spec.SchemaProps{ - Description: "Resources is a list of resources this rule applies to. ResourceAll represents all resources. \"*\" means all.", + Description: "Resources is a list of resources this rule applies to. \"*\" means all in the specified apiGroups.\n \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -3928,7 +5603,14 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Properties: map[string]spec.Schema{ "allowed": { SchemaProps: spec.SchemaProps{ - Description: "Allowed is required. True if the action would be allowed, false otherwise.", + Description: "Allowed is required. True if the action would be allowed, false otherwise.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "denied": { + SchemaProps: spec.SchemaProps{ + Description: "Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.", Type: []string{"boolean"}, Format: "", }, @@ -5298,7 +6980,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "manualSelector": { SchemaProps: spec.SchemaProps{ - Description: "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://git.k8s.io/community/contributors/design-proposals/selector-generation.md", + Description: "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", Type: []string{"boolean"}, Format: "", }, @@ -5488,7 +7170,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "concurrencyPolicy": { SchemaProps: spec.SchemaProps{ - Description: "Specifies how to treat concurrent executions of a Job. Defaults to Allow.", + Description: "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", Type: []string{"string"}, Format: "", }, @@ -5726,7 +7408,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "concurrencyPolicy": { SchemaProps: spec.SchemaProps{ - Description: "Specifies how to treat concurrent executions of a Job. Defaults to Allow.", + Description: "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", Type: []string{"string"}, Format: "", }, @@ -6247,7 +7929,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "kind": { SchemaProps: spec.SchemaProps{ - Description: "Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", + Description: "Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", Type: []string{"string"}, Format: "", }, @@ -6367,6 +8049,38 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Dependencies: []string{ "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, }, + "k8s.io/api/core/v1.CSIPersistentVolumeSource": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents storage that is managed by an external CSI volume driver", + Properties: map[string]spec.Schema{ + "driver": { + SchemaProps: spec.SchemaProps{ + Description: "Driver is the name of the driver to use for this volume. Required.", + Type: []string{"string"}, + Format: "", + }, + }, + "volumeHandle": { + SchemaProps: spec.SchemaProps{ + Description: "VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"driver", "volumeHandle"}, + }, + }, + Dependencies: []string{}, + }, "k8s.io/api/core/v1.Capabilities": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -7067,6 +8781,25 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, "livenessProbe": { SchemaProps: spec.SchemaProps{ Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", @@ -7108,7 +8841,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "securityContext": { SchemaProps: spec.SchemaProps{ - Description: "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md", + Description: "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", Ref: ref("k8s.io/api/core/v1.SecurityContext"), }, }, @@ -7138,7 +8871,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeMount"}, + "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, }, "k8s.io/api/core/v1.ContainerImage": { Schema: spec.Schema{ @@ -7933,12 +9666,51 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Format: "", }, }, + "eventTime": { + SchemaProps: spec.SchemaProps{ + Description: "Time when this Event was first observed.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), + }, + }, + "series": { + SchemaProps: spec.SchemaProps{ + Description: "Data about the Event series this event represents or nil if it's a singleton Event.", + Ref: ref("k8s.io/api/core/v1.EventSeries"), + }, + }, + "action": { + SchemaProps: spec.SchemaProps{ + Description: "What action was taken/failed regarding to the Regarding object.", + Type: []string{"string"}, + Format: "", + }, + }, + "related": { + SchemaProps: spec.SchemaProps{ + Description: "Optional secondary object for more complex actions.", + Ref: ref("k8s.io/api/core/v1.ObjectReference"), + }, + }, + "reportingComponent": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.", + Type: []string{"string"}, + Format: "", + }, + }, + "reportingInstance": { + SchemaProps: spec.SchemaProps{ + Description: "ID of the controller instance, e.g. `kubelet-xyzf`.", + Type: []string{"string"}, + Format: "", + }, + }, }, Required: []string{"metadata", "involvedObject"}, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.EventSource", "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + "k8s.io/api/core/v1.EventSeries", "k8s.io/api/core/v1.EventSource", "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, }, "k8s.io/api/core/v1.EventList": { Schema: spec.Schema{ @@ -7985,6 +9757,37 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Dependencies: []string{ "k8s.io/api/core/v1.Event", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, }, + "k8s.io/api/core/v1.EventSeries": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "EventSeries contain information on series of events, i.e. thing that was/is happening continously for some time.", + Properties: map[string]spec.Schema{ + "count": { + SchemaProps: spec.SchemaProps{ + Description: "Number of occurrences in this series up to the last heartbeat time", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "lastObservedTime": { + SchemaProps: spec.SchemaProps{ + Description: "Time of the last occurence observed", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), + }, + }, + "state": { + SchemaProps: spec.SchemaProps{ + Description: "State of this Series: Ongoing or Finished", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"}, + }, "k8s.io/api/core/v1.EventSource": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -8095,7 +9898,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/core/v1.FlexVolumeSource": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", Properties: map[string]spec.Schema{ "driver": { SchemaProps: spec.SchemaProps{ @@ -8416,23 +10219,118 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Properties: map[string]spec.Schema{ "path": { SchemaProps: spec.SchemaProps{ - Description: "Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - Type: []string{"string"}, - Format: "", + Description: "Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"path"}, + }, + }, + Dependencies: []string{}, + }, + "k8s.io/api/core/v1.ISCSIPersistentVolumeSource": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + Properties: map[string]spec.Schema{ + "targetPortal": { + SchemaProps: spec.SchemaProps{ + Description: "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + Type: []string{"string"}, + Format: "", + }, + }, + "iqn": { + SchemaProps: spec.SchemaProps{ + Description: "Target iSCSI Qualified Name.", + Type: []string{"string"}, + Format: "", + }, + }, + "lun": { + SchemaProps: spec.SchemaProps{ + Description: "iSCSI Target Lun number.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "iscsiInterface": { + SchemaProps: spec.SchemaProps{ + Description: "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "portals": { + SchemaProps: spec.SchemaProps{ + Description: "iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "chapAuthDiscovery": { + SchemaProps: spec.SchemaProps{ + Description: "whether support iSCSI Discovery CHAP authentication", + Type: []string{"boolean"}, + Format: "", + }, + }, + "chapAuthSession": { + SchemaProps: spec.SchemaProps{ + Description: "whether support iSCSI Session CHAP authentication", + Type: []string{"boolean"}, + Format: "", + }, + }, + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "CHAP Secret for iSCSI target and initiator authentication", + Ref: ref("k8s.io/api/core/v1.SecretReference"), }, }, - "type": { + "initiatorName": { SchemaProps: spec.SchemaProps{ - Description: "Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Description: "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", Type: []string{"string"}, Format: "", }, }, }, - Required: []string{"path"}, + Required: []string{"targetPortal", "iqn", "lun"}, }, }, - Dependencies: []string{}, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretReference"}, }, "k8s.io/api/core/v1.ISCSIVolumeSource": { Schema: spec.Schema{ @@ -8441,7 +10339,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Properties: map[string]spec.Schema{ "targetPortal": { SchemaProps: spec.SchemaProps{ - Description: "iSCSI target portal. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + Description: "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", Type: []string{"string"}, Format: "", }, @@ -8455,14 +10353,14 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "lun": { SchemaProps: spec.SchemaProps{ - Description: "iSCSI target lun number.", + Description: "iSCSI Target Lun number.", Type: []string{"integer"}, Format: "int32", }, }, "iscsiInterface": { SchemaProps: spec.SchemaProps{ - Description: "Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.", + Description: "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", Type: []string{"string"}, Format: "", }, @@ -8483,7 +10381,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "portals": { SchemaProps: spec.SchemaProps{ - Description: "iSCSI target portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + Description: "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -8511,13 +10409,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "secretRef": { SchemaProps: spec.SchemaProps{ - Description: "CHAP secret for iSCSI target and initiator authentication", + Description: "CHAP Secret for iSCSI target and initiator authentication", Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), }, }, "initiatorName": { SchemaProps: spec.SchemaProps{ - Description: "Custom iSCSI initiator name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", + Description: "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", Type: []string{"string"}, Format: "", }, @@ -8731,7 +10629,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "items": { SchemaProps: spec.SchemaProps{ - Description: "Items is a list of LimitRange objects. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_limit_range.md", + Description: "Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -9029,7 +10927,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Properties: map[string]spec.Schema{ "finalizers": { SchemaProps: spec.SchemaProps{ - Description: "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#finalizers", + Description: "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -9053,7 +10951,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Properties: map[string]spec.Schema{ "phase": { SchemaProps: spec.SchemaProps{ - Description: "Phase is the current lifecycle phase of the namespace. More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#phases", + Description: "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", Type: []string{"string"}, Format: "", }, @@ -10033,6 +11931,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Format: "", }, }, + "volumeMode": { + SchemaProps: spec.SchemaProps{ + Description: "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is an alpha feature and may change in the future.", + Type: []string{"string"}, + Format: "", + }, + }, }, }, }, @@ -10211,13 +12116,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "rbd": { SchemaProps: spec.SchemaProps{ Description: "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", - Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + Ref: ref("k8s.io/api/core/v1.RBDPersistentVolumeSource"), }, }, "iscsi": { SchemaProps: spec.SchemaProps{ Description: "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", - Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + Ref: ref("k8s.io/api/core/v1.ISCSIPersistentVolumeSource"), }, }, "cinder": { @@ -10246,7 +12151,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "flexVolume": { SchemaProps: spec.SchemaProps{ - Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), }, }, @@ -10289,7 +12194,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "scaleIO": { SchemaProps: spec.SchemaProps{ Description: "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", - Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + Ref: ref("k8s.io/api/core/v1.ScaleIOPersistentVolumeSource"), }, }, "local": { @@ -10304,11 +12209,17 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Ref: ref("k8s.io/api/core/v1.StorageOSPersistentVolumeSource"), }, }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "CSI represents storage that handled by an external CSI driver", + Ref: ref("k8s.io/api/core/v1.CSIPersistentVolumeSource"), + }, + }, }, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFilePersistentVolumeSource", "k8s.io/api/core/v1.CephFSPersistentVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.LocalVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.StorageOSPersistentVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"}, + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFilePersistentVolumeSource", "k8s.io/api/core/v1.CSIPersistentVolumeSource", "k8s.io/api/core/v1.CephFSPersistentVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIPersistentVolumeSource", "k8s.io/api/core/v1.LocalVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDPersistentVolumeSource", "k8s.io/api/core/v1.ScaleIOPersistentVolumeSource", "k8s.io/api/core/v1.StorageOSPersistentVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"}, }, "k8s.io/api/core/v1.PersistentVolumeSpec": { Schema: spec.Schema{ @@ -10361,13 +12272,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "rbd": { SchemaProps: spec.SchemaProps{ Description: "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", - Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + Ref: ref("k8s.io/api/core/v1.RBDPersistentVolumeSource"), }, }, "iscsi": { SchemaProps: spec.SchemaProps{ Description: "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", - Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + Ref: ref("k8s.io/api/core/v1.ISCSIPersistentVolumeSource"), }, }, "cinder": { @@ -10396,7 +12307,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "flexVolume": { SchemaProps: spec.SchemaProps{ - Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), }, }, @@ -10439,7 +12350,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "scaleIO": { SchemaProps: spec.SchemaProps{ Description: "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", - Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + Ref: ref("k8s.io/api/core/v1.ScaleIOPersistentVolumeSource"), }, }, "local": { @@ -10454,6 +12365,12 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Ref: ref("k8s.io/api/core/v1.StorageOSPersistentVolumeSource"), }, }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "CSI represents storage that handled by an external CSI driver", + Ref: ref("k8s.io/api/core/v1.CSIPersistentVolumeSource"), + }, + }, "accessModes": { SchemaProps: spec.SchemaProps{ Description: "AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes", @@ -10502,11 +12419,18 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, }, + "volumeMode": { + SchemaProps: spec.SchemaProps{ + Description: "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is an alpha feature and may change in the future.", + Type: []string{"string"}, + Format: "", + }, + }, }, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFilePersistentVolumeSource", "k8s.io/api/core/v1.CephFSPersistentVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.LocalVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.ObjectReference", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.StorageOSPersistentVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "k8s.io/apimachinery/pkg/api/resource.Quantity"}, + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFilePersistentVolumeSource", "k8s.io/api/core/v1.CSIPersistentVolumeSource", "k8s.io/api/core/v1.CephFSPersistentVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIPersistentVolumeSource", "k8s.io/api/core/v1.LocalVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.ObjectReference", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDPersistentVolumeSource", "k8s.io/api/core/v1.ScaleIOPersistentVolumeSource", "k8s.io/api/core/v1.StorageOSPersistentVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "k8s.io/apimachinery/pkg/api/resource.Quantity"}, }, "k8s.io/api/core/v1.PersistentVolumeStatus": { Schema: spec.Schema{ @@ -10647,7 +12571,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/core/v1.PodAffinityTerm": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key tches that of any node on which a pod of the set of pods is running", + Description: "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running", Properties: map[string]spec.Schema{ "labelSelector": { SchemaProps: spec.SchemaProps{ @@ -10671,12 +12595,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "topologyKey": { SchemaProps: spec.SchemaProps{ - Description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as \"all topologies\" (\"all topologies\" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.", + Description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", Type: []string{"string"}, Format: "", }, }, }, + Required: []string{"topologyKey"}, }, }, Dependencies: []string{ @@ -10830,6 +12755,81 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Dependencies: []string{ "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, }, + "k8s.io/api/core/v1.PodDNSConfig": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.", + Properties: map[string]spec.Schema{ + "nameservers": { + SchemaProps: spec.SchemaProps{ + Description: "A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "searches": { + SchemaProps: spec.SchemaProps{ + Description: "A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "options": { + SchemaProps: spec.SchemaProps{ + Description: "A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.PodDNSConfigOption"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PodDNSConfigOption"}, + }, + "k8s.io/api/core/v1.PodDNSConfigOption": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodDNSConfigOption defines DNS resolver options of a pod.", + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Required.", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{}, + }, "k8s.io/api/core/v1.PodExecOptions": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -11252,7 +13252,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "dnsPolicy": { SchemaProps: spec.SchemaProps{ - Description: "Set DNS policy for containers within the pod. One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. Defaults to \"ClusterFirst\". To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", + Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. Note that 'None' policy is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it.", Type: []string{"string"}, Format: "", }, @@ -11418,12 +13418,18 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Format: "int32", }, }, + "dnsConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. This is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it.", + Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), + }, + }, }, Required: []string{"containers"}, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, + "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, }, "k8s.io/api/core/v1.PodStatus": { Schema: spec.Schema{ @@ -11908,6 +13914,80 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, Dependencies: []string{}, }, + "k8s.io/api/core/v1.RBDPersistentVolumeSource": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", + Properties: map[string]spec.Schema{ + "monitors": { + SchemaProps: spec.SchemaProps{ + Description: "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", + Type: []string{"string"}, + Format: "", + }, + }, + "pool": { + SchemaProps: spec.SchemaProps{ + Description: "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + Type: []string{"string"}, + Format: "", + }, + }, + "user": { + SchemaProps: spec.SchemaProps{ + Description: "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + Type: []string{"string"}, + Format: "", + }, + }, + "keyring": { + SchemaProps: spec.SchemaProps{ + Description: "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + Type: []string{"string"}, + Format: "", + }, + }, + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + Ref: ref("k8s.io/api/core/v1.SecretReference"), + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"monitors", "image"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretReference"}, + }, "k8s.io/api/core/v1.RBDVolumeSource": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -12375,7 +14455,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "items": { SchemaProps: spec.SchemaProps{ - Description: "Items is a list of ResourceQuota objects. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md", + Description: "Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -12400,7 +14480,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Properties: map[string]spec.Schema{ "hard": { SchemaProps: spec.SchemaProps{ - Description: "Hard is the set of desired hard limits for each named resource. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md", + Description: "Hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", Type: []string{"object"}, AdditionalProperties: &spec.SchemaOrBool{ Schema: &spec.Schema{ @@ -12438,7 +14518,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Properties: map[string]spec.Schema{ "hard": { SchemaProps: spec.SchemaProps{ - Description: "Hard is the set of enforced hard limits for each named resource. More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md", + Description: "Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", Type: []string{"object"}, AdditionalProperties: &spec.SchemaOrBool{ Schema: &spec.Schema{ @@ -12499,49 +14579,130 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/api/resource.Quantity"}, - }, - "k8s.io/api/core/v1.SELinuxOptions": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SELinuxOptions are the labels to be applied to the container", - Properties: map[string]spec.Schema{ - "user": { + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/api/resource.Quantity"}, + }, + "k8s.io/api/core/v1.SELinuxOptions": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SELinuxOptions are the labels to be applied to the container", + Properties: map[string]spec.Schema{ + "user": { + SchemaProps: spec.SchemaProps{ + Description: "User is a SELinux user label that applies to the container.", + Type: []string{"string"}, + Format: "", + }, + }, + "role": { + SchemaProps: spec.SchemaProps{ + Description: "Role is a SELinux role label that applies to the container.", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type is a SELinux type label that applies to the container.", + Type: []string{"string"}, + Format: "", + }, + }, + "level": { + SchemaProps: spec.SchemaProps{ + Description: "Level is SELinux level label that applies to the container.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{}, + }, + "k8s.io/api/core/v1.ScaleIOPersistentVolumeSource": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume", + Properties: map[string]spec.Schema{ + "gateway": { + SchemaProps: spec.SchemaProps{ + Description: "The host address of the ScaleIO API Gateway.", + Type: []string{"string"}, + Format: "", + }, + }, + "system": { + SchemaProps: spec.SchemaProps{ + Description: "The name of the storage system as configured in ScaleIO.", + Type: []string{"string"}, + Format: "", + }, + }, + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", + Ref: ref("k8s.io/api/core/v1.SecretReference"), + }, + }, + "sslEnabled": { + SchemaProps: spec.SchemaProps{ + Description: "Flag to enable/disable SSL communication with Gateway, default false", + Type: []string{"boolean"}, + Format: "", + }, + }, + "protectionDomain": { + SchemaProps: spec.SchemaProps{ + Description: "The name of the ScaleIO Protection Domain for the configured storage.", + Type: []string{"string"}, + Format: "", + }, + }, + "storagePool": { + SchemaProps: spec.SchemaProps{ + Description: "The ScaleIO Storage Pool associated with the protection domain.", + Type: []string{"string"}, + Format: "", + }, + }, + "storageMode": { SchemaProps: spec.SchemaProps{ - Description: "User is a SELinux user label that applies to the container.", + Description: "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.", Type: []string{"string"}, Format: "", }, }, - "role": { + "volumeName": { SchemaProps: spec.SchemaProps{ - Description: "Role is a SELinux role label that applies to the container.", + Description: "The name of a volume already created in the ScaleIO system that is associated with this volume source.", Type: []string{"string"}, Format: "", }, }, - "type": { + "fsType": { SchemaProps: spec.SchemaProps{ - Description: "Type is a SELinux type label that applies to the container.", + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", Type: []string{"string"}, Format: "", }, }, - "level": { + "readOnly": { SchemaProps: spec.SchemaProps{ - Description: "Level is SELinux level label that applies to the container.", - Type: []string{"string"}, + Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + Type: []string{"boolean"}, Format: "", }, }, }, + Required: []string{"gateway", "system", "secretRef"}, }, }, - Dependencies: []string{}, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretReference"}, }, "k8s.io/api/core/v1.ScaleIOVolumeSource": { Schema: spec.Schema{ @@ -12577,21 +14738,21 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "protectionDomain": { SchemaProps: spec.SchemaProps{ - Description: "The name of the Protection Domain for the configured storage (defaults to \"default\").", + Description: "The name of the ScaleIO Protection Domain for the configured storage.", Type: []string{"string"}, Format: "", }, }, "storagePool": { SchemaProps: spec.SchemaProps{ - Description: "The Storage Pool associated with the protection domain (defaults to \"default\").", + Description: "The ScaleIO Storage Pool associated with the protection domain.", Type: []string{"string"}, Format: "", }, }, "storageMode": { SchemaProps: spec.SchemaProps{ - Description: "Indicates whether the storage for a volume should be thick or thin (defaults to \"thin\").", + Description: "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.", Type: []string{"string"}, Format: "", }, @@ -13363,7 +15524,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "externalName": { SchemaProps: spec.SchemaProps{ - Description: "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid DNS name and requires Type to be ExternalName.", + Description: "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be ExternalName.", Type: []string{"string"}, Format: "", }, @@ -13739,7 +15900,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "flexVolume": { SchemaProps: spec.SchemaProps{ - Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), }, }, @@ -13840,6 +16001,31 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Dependencies: []string{ "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GitRepoVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"}, }, + "k8s.io/api/core/v1.VolumeDevice": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "volumeDevice describes a mapping of a raw block device within a container.", + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "name must match the name of a persistentVolumeClaim in the pod", + Type: []string{"string"}, + Format: "", + }, + }, + "devicePath": { + SchemaProps: spec.SchemaProps{ + Description: "devicePath is the path inside of the container that the device will be mapped to.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name", "devicePath"}, + }, + }, + Dependencies: []string{}, + }, "k8s.io/api/core/v1.VolumeMount": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -13988,7 +16174,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "flexVolume": { SchemaProps: spec.SchemaProps{ - Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), }, }, @@ -14152,19 +16338,219 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Dependencies: []string{ "k8s.io/api/core/v1.PodAffinityTerm"}, }, - "k8s.io/api/extensions/v1beta1.APIVersion": { + "k8s.io/api/events/v1beta1.Event": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "An APIVersion represents a single concrete version of an object model.", + Description: "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.", Properties: map[string]spec.Schema{ - "name": { + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "eventTime": { + SchemaProps: spec.SchemaProps{ + Description: "Required. Time when this Event was first observed.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), + }, + }, + "series": { + SchemaProps: spec.SchemaProps{ + Description: "Data about the Event series this event represents or nil if it's a singleton Event.", + Ref: ref("k8s.io/api/events/v1beta1.EventSeries"), + }, + }, + "reportingController": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.", + Type: []string{"string"}, + Format: "", + }, + }, + "reportingInstance": { + SchemaProps: spec.SchemaProps{ + Description: "ID of the controller instance, e.g. `kubelet-xyzf`.", + Type: []string{"string"}, + Format: "", + }, + }, + "action": { + SchemaProps: spec.SchemaProps{ + Description: "What action was taken/failed regarding to the regarding object.", + Type: []string{"string"}, + Format: "", + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "Why the action was taken.", + Type: []string{"string"}, + Format: "", + }, + }, + "regarding": { + SchemaProps: spec.SchemaProps{ + Description: "The object this Event is about. In most cases it's an Object reporting controller implements. E.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.", + Ref: ref("k8s.io/api/core/v1.ObjectReference"), + }, + }, + "related": { + SchemaProps: spec.SchemaProps{ + Description: "Optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.", + Ref: ref("k8s.io/api/core/v1.ObjectReference"), + }, + }, + "note": { + SchemaProps: spec.SchemaProps{ + Description: "Optional. A human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of this event (Normal, Warning), new types could be added in the future.", + Type: []string{"string"}, + Format: "", + }, + }, + "deprecatedSource": { + SchemaProps: spec.SchemaProps{ + Description: "Deprecated field assuring backward compatibility with core.v1 Event type", + Ref: ref("k8s.io/api/core/v1.EventSource"), + }, + }, + "deprecatedFirstTimestamp": { + SchemaProps: spec.SchemaProps{ + Description: "Deprecated field assuring backward compatibility with core.v1 Event type", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "deprecatedLastTimestamp": { + SchemaProps: spec.SchemaProps{ + Description: "Deprecated field assuring backward compatibility with core.v1 Event type", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "deprecatedCount": { + SchemaProps: spec.SchemaProps{ + Description: "Deprecated field assuring backward compatibility with core.v1 Event type", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"eventTime"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.EventSource", "k8s.io/api/core/v1.ObjectReference", "k8s.io/api/events/v1beta1.EventSeries", "k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + }, + "k8s.io/api/events/v1beta1.EventList": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "EventList is a list of Event objects.", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "Items is a list of schema objects.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/events/v1beta1.Event"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/events/v1beta1.Event", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + }, + "k8s.io/api/events/v1beta1.EventSeries": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "EventSeries contain information on series of events, i.e. thing that was/is happening continously for some time.", + Properties: map[string]spec.Schema{ + "count": { + SchemaProps: spec.SchemaProps{ + Description: "Number of occurrences in this series up to the last heartbeat time", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "lastObservedTime": { + SchemaProps: spec.SchemaProps{ + Description: "Time when last Event from the series was seen before last heartbeat.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), + }, + }, + "state": { + SchemaProps: spec.SchemaProps{ + Description: "Information whether this series is ongoing or finished.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"count", "lastObservedTime", "state"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"}, + }, + "k8s.io/api/extensions/v1beta1.AllowedFlexVolume": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", + Properties: map[string]spec.Schema{ + "driver": { SchemaProps: spec.SchemaProps{ - Description: "Name of this version (e.g. 'v1').", + Description: "Driver is the name of the Flexvolume driver.", Type: []string{"string"}, Format: "", }, }, }, + Required: []string{"driver"}, }, }, Dependencies: []string{}, @@ -14281,48 +16667,94 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Dependencies: []string{ "k8s.io/api/extensions/v1beta1.CustomMetricTarget"}, }, - "k8s.io/api/extensions/v1beta1.DaemonSet": { + "k8s.io/api/extensions/v1beta1.DaemonSet": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/extensions/v1beta1.DaemonSetSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/extensions/v1beta1.DaemonSetStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/extensions/v1beta1.DaemonSetSpec", "k8s.io/api/extensions/v1beta1.DaemonSetStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + }, + "k8s.io/api/extensions/v1beta1.DaemonSetCondition": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", + Description: "DaemonSetCondition describes the state of a DaemonSet at a certain point.", Properties: map[string]spec.Schema{ - "kind": { + "type": { SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Description: "Type of DaemonSet condition.", Type: []string{"string"}, Format: "", }, }, - "apiVersion": { + "status": { SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Description: "Status of the condition, one of True, False, Unknown.", Type: []string{"string"}, Format: "", }, }, - "metadata": { + "lastTransitionTime": { SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + Description: "Last time the condition transitioned from one status to another.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, - "spec": { + "reason": { SchemaProps: spec.SchemaProps{ - Description: "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/extensions/v1beta1.DaemonSetSpec"), + Description: "The reason for the condition's last transition.", + Type: []string{"string"}, + Format: "", }, }, - "status": { + "message": { SchemaProps: spec.SchemaProps{ - Description: "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/extensions/v1beta1.DaemonSetStatus"), + Description: "A human readable message indicating details about the transition.", + Type: []string{"string"}, + Format: "", }, }, }, + Required: []string{"type", "status"}, }, }, Dependencies: []string{ - "k8s.io/api/extensions/v1beta1.DaemonSetSpec", "k8s.io/api/extensions/v1beta1.DaemonSetStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, }, "k8s.io/api/extensions/v1beta1.DaemonSetList": { Schema: spec.Schema{ @@ -14488,11 +16920,31 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Format: "int32", }, }, + "conditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Represents the latest available observations of a DaemonSet's current state.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/extensions/v1beta1.DaemonSetCondition"), + }, + }, + }, + }, + }, }, Required: []string{"currentNumberScheduled", "numberMisscheduled", "desiredNumberScheduled", "numberReady"}, }, }, - Dependencies: []string{}, + Dependencies: []string{ + "k8s.io/api/extensions/v1beta1.DaemonSetCondition"}, }, "k8s.io/api/extensions/v1beta1.DaemonSetUpdateStrategy": { Schema: spec.Schema{ @@ -15023,7 +17475,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/extensions/v1beta1.IPBlock": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", + Description: "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", Properties: map[string]spec.Schema{ "cidr": { SchemaProps: spec.SchemaProps{ @@ -15298,7 +17750,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/extensions/v1beta1.NetworkPolicy": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "NetworkPolicy describes what network traffic is allowed for a set of Pods", + Description: "DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. NetworkPolicy describes what network traffic is allowed for a set of Pods", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -15335,7 +17787,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/extensions/v1beta1.NetworkPolicyEgressRule": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", + Description: "DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", Properties: map[string]spec.Schema{ "ports": { SchemaProps: spec.SchemaProps{ @@ -15372,7 +17824,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/extensions/v1beta1.NetworkPolicyIngressRule": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", + Description: "DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", Properties: map[string]spec.Schema{ "ports": { SchemaProps: spec.SchemaProps{ @@ -15409,7 +17861,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/extensions/v1beta1.NetworkPolicyList": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Network Policy List is a list of NetworkPolicy objects.", + Description: "DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. Network Policy List is a list of NetworkPolicy objects.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -15454,6 +17906,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/extensions/v1beta1.NetworkPolicyPeer": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ + Description: "DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer.", Properties: map[string]spec.Schema{ "podSelector": { SchemaProps: spec.SchemaProps{ @@ -15482,6 +17935,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/extensions/v1beta1.NetworkPolicyPort": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ + Description: "DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort.", Properties: map[string]spec.Schema{ "protocol": { SchemaProps: spec.SchemaProps{ @@ -15505,6 +17959,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/extensions/v1beta1.NetworkPolicySpec": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ + Description: "DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec.", Properties: map[string]spec.Schema{ "podSelector": { SchemaProps: spec.SchemaProps{ @@ -15655,7 +18110,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "defaultAddCapabilities": { SchemaProps: spec.SchemaProps{ - Description: "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.", + Description: "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the AllowedCapabilities list.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -15783,7 +18238,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "allowPrivilegeEscalation": { SchemaProps: spec.SchemaProps{ - Description: "AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation.", + Description: "AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", Type: []string{"boolean"}, Format: "", }, @@ -15801,17 +18256,30 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, }, + "allowedFlexVolumes": { + SchemaProps: spec.SchemaProps{ + Description: "AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"Volumes\" field.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/extensions/v1beta1.AllowedFlexVolume"), + }, + }, + }, + }, + }, }, Required: []string{"seLinux", "runAsUser", "supplementalGroups", "fsGroup"}, }, }, Dependencies: []string{ - "k8s.io/api/extensions/v1beta1.AllowedHostPath", "k8s.io/api/extensions/v1beta1.FSGroupStrategyOptions", "k8s.io/api/extensions/v1beta1.HostPortRange", "k8s.io/api/extensions/v1beta1.RunAsUserStrategyOptions", "k8s.io/api/extensions/v1beta1.SELinuxStrategyOptions", "k8s.io/api/extensions/v1beta1.SupplementalGroupsStrategyOptions"}, + "k8s.io/api/extensions/v1beta1.AllowedFlexVolume", "k8s.io/api/extensions/v1beta1.AllowedHostPath", "k8s.io/api/extensions/v1beta1.FSGroupStrategyOptions", "k8s.io/api/extensions/v1beta1.HostPortRange", "k8s.io/api/extensions/v1beta1.RunAsUserStrategyOptions", "k8s.io/api/extensions/v1beta1.SELinuxStrategyOptions", "k8s.io/api/extensions/v1beta1.SupplementalGroupsStrategyOptions"}, }, "k8s.io/api/extensions/v1beta1.ReplicaSet": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet represents the configuration of a ReplicaSet.", + Description: "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -16172,7 +18640,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "seLinuxOptions": { SchemaProps: spec.SchemaProps{ - Description: "seLinuxOptions required to run as; required for MustRunAs More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md", + Description: "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", Ref: ref("k8s.io/api/core/v1.SELinuxOptions"), }, }, @@ -16313,185 +18781,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Dependencies: []string{ "k8s.io/api/extensions/v1beta1.IDRange"}, }, - "k8s.io/api/extensions/v1beta1.ThirdPartyResource": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource types to the API. It consists of one or more Versions of the api.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "description": { - SchemaProps: spec.SchemaProps{ - Description: "Description is the description of this object.", - Type: []string{"string"}, - Format: "", - }, - }, - "versions": { - SchemaProps: spec.SchemaProps{ - Description: "Versions are versions for this third party object", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/extensions/v1beta1.APIVersion"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/extensions/v1beta1.APIVersion", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - }, - "k8s.io/api/extensions/v1beta1.ThirdPartyResourceData": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "An internal object, used for versioned storage in etcd. Not exposed to the end user.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object metadata.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "data": { - SchemaProps: spec.SchemaProps{ - Description: "Data is the raw JSON data for this data.", - Type: []string{"string"}, - Format: "byte", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - }, - "k8s.io/api/extensions/v1beta1.ThirdPartyResourceDataList": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ThirdPartyResrouceDataList is a list of ThirdPartyResourceData.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "Items is the list of ThirdpartyResourceData.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/extensions/v1beta1.ThirdPartyResourceData"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/extensions/v1beta1.ThirdPartyResourceData", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - }, - "k8s.io/api/extensions/v1beta1.ThirdPartyResourceList": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ThirdPartyResourceList is a list of ThirdPartyResources.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "Items is the list of ThirdPartyResources.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/extensions/v1beta1.ThirdPartyResource"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/extensions/v1beta1.ThirdPartyResource", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - }, "k8s.io/api/imagepolicy/v1alpha1.ImageReview": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -17132,6 +19421,30 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Dependencies: []string{ "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, }, + "k8s.io/api/rbac/v1.AggregationRule": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + Properties: map[string]spec.Schema{ + "clusterRoleSelectors": { + SchemaProps: spec.SchemaProps{ + Description: "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + }, "k8s.io/api/rbac/v1.ClusterRole": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -17170,12 +19483,18 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, }, + "aggregationRule": { + SchemaProps: spec.SchemaProps{ + Description: "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", + Ref: ref("k8s.io/api/rbac/v1.AggregationRule"), + }, + }, }, Required: []string{"rules"}, }, }, Dependencies: []string{ - "k8s.io/api/rbac/v1.PolicyRule", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "k8s.io/api/rbac/v1.AggregationRule", "k8s.io/api/rbac/v1.PolicyRule", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, }, "k8s.io/api/rbac/v1.ClusterRoleBinding": { Schema: spec.Schema{ @@ -17651,10 +19970,34 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, }, - Required: []string{"kind", "name"}, + Required: []string{"kind", "name"}, + }, + }, + Dependencies: []string{}, + }, + "k8s.io/api/rbac/v1alpha1.AggregationRule": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + Properties: map[string]spec.Schema{ + "clusterRoleSelectors": { + SchemaProps: spec.SchemaProps{ + Description: "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), + }, + }, + }, + }, + }, + }, }, }, - Dependencies: []string{}, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, }, "k8s.io/api/rbac/v1alpha1.ClusterRole": { Schema: spec.Schema{ @@ -17694,12 +20037,18 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, }, + "aggregationRule": { + SchemaProps: spec.SchemaProps{ + Description: "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", + Ref: ref("k8s.io/api/rbac/v1alpha1.AggregationRule"), + }, + }, }, Required: []string{"rules"}, }, }, Dependencies: []string{ - "k8s.io/api/rbac/v1alpha1.PolicyRule", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "k8s.io/api/rbac/v1alpha1.AggregationRule", "k8s.io/api/rbac/v1alpha1.PolicyRule", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, }, "k8s.io/api/rbac/v1alpha1.ClusterRoleBinding": { Schema: spec.Schema{ @@ -18180,6 +20529,30 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, Dependencies: []string{}, }, + "k8s.io/api/rbac/v1beta1.AggregationRule": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + Properties: map[string]spec.Schema{ + "clusterRoleSelectors": { + SchemaProps: spec.SchemaProps{ + Description: "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + }, "k8s.io/api/rbac/v1beta1.ClusterRole": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -18218,12 +20591,18 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, }, + "aggregationRule": { + SchemaProps: spec.SchemaProps{ + Description: "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", + Ref: ref("k8s.io/api/rbac/v1beta1.AggregationRule"), + }, + }, }, Required: []string{"rules"}, }, }, Dependencies: []string{ - "k8s.io/api/rbac/v1beta1.PolicyRule", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "k8s.io/api/rbac/v1beta1.AggregationRule", "k8s.io/api/rbac/v1beta1.PolicyRule", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, }, "k8s.io/api/rbac/v1beta1.ClusterRoleBinding": { Schema: spec.Schema{ @@ -18401,7 +20780,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "resources": { SchemaProps: spec.SchemaProps{ - Description: "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", + Description: "Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -19025,6 +21404,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Format: "", }, }, + "volumeBindingMode": { + SchemaProps: spec.SchemaProps{ + Description: "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is alpha-level and is only honored by servers that enable the VolumeScheduling feature.", + Type: []string{"string"}, + Format: "", + }, + }, }, Required: []string{"provisioner"}, }, @@ -19077,6 +21463,213 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Dependencies: []string{ "k8s.io/api/storage/v1.StorageClass", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, }, + "k8s.io/api/storage/v1alpha1.VolumeAttachment": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + Ref: ref("k8s.io/api/storage/v1alpha1.VolumeAttachmentSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + Ref: ref("k8s.io/api/storage/v1alpha1.VolumeAttachmentStatus"), + }, + }, + }, + Required: []string{"spec"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/storage/v1alpha1.VolumeAttachmentSpec", "k8s.io/api/storage/v1alpha1.VolumeAttachmentStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + }, + "k8s.io/api/storage/v1alpha1.VolumeAttachmentList": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "VolumeAttachmentList is a collection of VolumeAttachment objects.", + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "Items is the list of VolumeAttachments", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/storage/v1alpha1.VolumeAttachment"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/storage/v1alpha1.VolumeAttachment", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + }, + "k8s.io/api/storage/v1alpha1.VolumeAttachmentSource": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + Properties: map[string]spec.Schema{ + "persistentVolumeName": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the persistent volume to attach.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{}, + }, + "k8s.io/api/storage/v1alpha1.VolumeAttachmentSpec": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", + Properties: map[string]spec.Schema{ + "attacher": { + SchemaProps: spec.SchemaProps{ + Description: "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + Type: []string{"string"}, + Format: "", + }, + }, + "source": { + SchemaProps: spec.SchemaProps{ + Description: "Source represents the volume that should be attached.", + Ref: ref("k8s.io/api/storage/v1alpha1.VolumeAttachmentSource"), + }, + }, + "nodeName": { + SchemaProps: spec.SchemaProps{ + Description: "The node that the volume should be attached to.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"attacher", "source", "nodeName"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/storage/v1alpha1.VolumeAttachmentSource"}, + }, + "k8s.io/api/storage/v1alpha1.VolumeAttachmentStatus": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "VolumeAttachmentStatus is the status of a VolumeAttachment request.", + Properties: map[string]spec.Schema{ + "attached": { + SchemaProps: spec.SchemaProps{ + Description: "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "attachmentMetadata": { + SchemaProps: spec.SchemaProps{ + Description: "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "attachError": { + SchemaProps: spec.SchemaProps{ + Description: "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + Ref: ref("k8s.io/api/storage/v1alpha1.VolumeError"), + }, + }, + "detachError": { + SchemaProps: spec.SchemaProps{ + Description: "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + Ref: ref("k8s.io/api/storage/v1alpha1.VolumeError"), + }, + }, + }, + Required: []string{"attached"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/storage/v1alpha1.VolumeError"}, + }, + "k8s.io/api/storage/v1alpha1.VolumeError": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "VolumeError captures an error encountered during a volume operation.", + Properties: map[string]spec.Schema{ + "time": { + SchemaProps: spec.SchemaProps{ + Description: "Time the error was encountered.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + }, "k8s.io/api/storage/v1beta1.StorageClass": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -19151,6 +21744,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Format: "", }, }, + "volumeBindingMode": { + SchemaProps: spec.SchemaProps{ + Description: "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is alpha-level and is only honored by servers that enable the VolumeScheduling feature.", + Type: []string{"string"}, + Format: "", + }, + }, }, Required: []string{"provisioner"}, }, @@ -19422,7 +22022,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "validation": { SchemaProps: spec.SchemaProps{ - Description: "Validation describes the validation methods for CustomResources This field is alpha-level and should only be sent to servers that enable the CustomResourceValidation feature.", + Description: "Validation describes the validation methods for CustomResources", Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1.CustomResourceValidation"), }, }, @@ -20240,7 +22840,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "propagationPolicy": { SchemaProps: spec.SchemaProps{ - Description: "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.", + Description: "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", Type: []string{"string"}, Format: "", }, @@ -20878,7 +23478,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "deletionTimestamp": { SchemaProps: spec.SchemaProps{ - Description: "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + Description: "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -21887,12 +24487,24 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Ref: ref("k8s.io/apimachinery/pkg/runtime.Unknown"), }, }, + "requestReceivedTimestamp": { + SchemaProps: spec.SchemaProps{ + Description: "Time the request reached the apiserver.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), + }, + }, + "stageTimestamp": { + SchemaProps: spec.SchemaProps{ + Description: "Time the request reached current audit stage.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), + }, + }, }, Required: []string{"level", "timestamp", "auditID", "stage", "requestURI", "verb", "user"}, }, }, Dependencies: []string{ - "k8s.io/api/authentication/v1.UserInfo", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.Status", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "k8s.io/apimachinery/pkg/runtime.Unknown", "k8s.io/apiserver/pkg/apis/audit/v1alpha1.ObjectReference"}, + "k8s.io/api/authentication/v1.UserInfo", "k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.Status", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "k8s.io/apimachinery/pkg/runtime.Unknown", "k8s.io/apiserver/pkg/apis/audit/v1alpha1.ObjectReference"}, }, "k8s.io/apiserver/pkg/apis/audit/v1alpha1.EventList": { Schema: spec.Schema{ @@ -22072,6 +24684,20 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, }, + "omitStages": { + SchemaProps: spec.SchemaProps{ + Description: "OmitStages is a list of stages for which no events are created. Note that this can also be specified per rule in which case the union of both are omitted.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, Required: []string{"rules"}, }, @@ -22219,7 +24845,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "omitStages": { SchemaProps: spec.SchemaProps{ - Description: "OmitStages specify events generated in which stages will not be emitted to backend. An empty list means no restrictions will apply.", + Description: "OmitStages is a list of stages for which no events are created. Note that this can also be specified policy wide in which case the union of both are omitted. An empty list means no restrictions will apply.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -22259,7 +24885,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "metadata": { SchemaProps: spec.SchemaProps{ - Description: "ObjectMeta is included for interoperability with API infrastructure.", + Description: "ObjectMeta is included for interoperability with API infrastructure. DEPRECATED: Use StageTimestamp which supports micro second instead of ObjectMeta.CreateTimestamp and the rest of the object is not used", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), }, }, @@ -22272,7 +24898,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "timestamp": { SchemaProps: spec.SchemaProps{ - Description: "Time the request reached the apiserver.", + Description: "Time the request reached the apiserver. DEPRECATED: Use RequestReceivedTimestamp which supports micro second instead.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -22354,12 +24980,24 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Ref: ref("k8s.io/apimachinery/pkg/runtime.Unknown"), }, }, + "requestReceivedTimestamp": { + SchemaProps: spec.SchemaProps{ + Description: "Time the request reached the apiserver.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), + }, + }, + "stageTimestamp": { + SchemaProps: spec.SchemaProps{ + Description: "Time the request reached current audit stage.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), + }, + }, }, Required: []string{"level", "timestamp", "auditID", "stage", "requestURI", "verb", "user"}, }, }, Dependencies: []string{ - "k8s.io/api/authentication/v1.UserInfo", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.Status", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "k8s.io/apimachinery/pkg/runtime.Unknown", "k8s.io/apiserver/pkg/apis/audit/v1beta1.ObjectReference"}, + "k8s.io/api/authentication/v1.UserInfo", "k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.Status", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "k8s.io/apimachinery/pkg/runtime.Unknown", "k8s.io/apiserver/pkg/apis/audit/v1beta1.ObjectReference"}, }, "k8s.io/apiserver/pkg/apis/audit/v1beta1.EventList": { Schema: spec.Schema{ @@ -22547,6 +25185,20 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, }, + "omitStages": { + SchemaProps: spec.SchemaProps{ + Description: "OmitStages is a list of stages for which no events are created. Note that this can also be specified per rule in which case the union of both are omitted.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, Required: []string{"rules"}, }, @@ -22694,7 +25346,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "omitStages": { SchemaProps: spec.SchemaProps{ - Description: "OmitStages specify events generated in which stages will not be emitted to backend. An empty list means no restrictions will apply.", + Description: "OmitStages is a list of stages for which no events are created. Note that this can also be specified policy wide in which case the union of both are omitted. An empty list means no restrictions will apply.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -22884,7 +25536,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "groupPriorityMinimum": { SchemaProps: spec.SchemaProps{ - Description: "GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is prefered by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", + Description: "GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", Type: []string{"integer"}, Format: "int32", }, @@ -22924,326 +25576,35 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Ref: ref("k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1.APIServiceCondition"), }, }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1.APIServiceCondition"}, - }, - "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1.ServiceReference": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ServiceReference holds a reference to Service.legacy.k8s.io", - Properties: map[string]spec.Schema{ - "namespace": { - SchemaProps: spec.SchemaProps{ - Description: "Namespace is the namespace of the service", - Type: []string{"string"}, - Format: "", - }, - }, - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the name of the service", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - }, - "k8s.io/kubernetes/federation/apis/federation/v1beta1.Cluster": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "Spec defines the behavior of the Cluster.", - Ref: ref("k8s.io/kubernetes/federation/apis/federation/v1beta1.ClusterSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status describes the current status of a Cluster", - Ref: ref("k8s.io/kubernetes/federation/apis/federation/v1beta1.ClusterStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "k8s.io/kubernetes/federation/apis/federation/v1beta1.ClusterSpec", "k8s.io/kubernetes/federation/apis/federation/v1beta1.ClusterStatus"}, - }, - "k8s.io/kubernetes/federation/apis/federation/v1beta1.ClusterCondition": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClusterCondition describes current state of a cluster.", - Properties: map[string]spec.Schema{ - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Type of cluster condition, Complete or Failed.", - Type: []string{"string"}, - Format: "", - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status of the condition, one of True, False, Unknown.", - Type: []string{"string"}, - Format: "", - }, - }, - "lastProbeTime": { - SchemaProps: spec.SchemaProps{ - Description: "Last time the condition was checked.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "lastTransitionTime": { - SchemaProps: spec.SchemaProps{ - Description: "Last time the condition transit from one status to another.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "reason": { - SchemaProps: spec.SchemaProps{ - Description: "(brief) reason for the condition's last transition.", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "Human readable message indicating details about last transition.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"type", "status"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - }, - "k8s.io/kubernetes/federation/apis/federation/v1beta1.ClusterList": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "A list of all the kubernetes clusters registered to the federation", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "List of Cluster objects.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/kubernetes/federation/apis/federation/v1beta1.Cluster"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/kubernetes/federation/apis/federation/v1beta1.Cluster"}, - }, - "k8s.io/kubernetes/federation/apis/federation/v1beta1.ClusterSelectorRequirement": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClusterSelectorRequirement contains values, a key, and an operator that relates the key and values. The zero value of ClusterSelectorRequirement is invalid. ClusterSelectorRequirement implements both set based match and exact match", - Properties: map[string]spec.Schema{ - "key": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "key", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "operator": { - SchemaProps: spec.SchemaProps{ - Description: "The Operator defines how the Key is matched to the Values. One of \"in\", \"notin\", \"exists\", \"!\", \"=\", \"!=\", \"gt\" or \"lt\".", - Type: []string{"string"}, - Format: "", - }, - }, - "values": { - SchemaProps: spec.SchemaProps{ - Description: "An array of string values. If the operator is \"in\" or \"notin\", the values array must be non-empty. If the operator is \"exists\" or \"!\", the values array must be empty. If the operator is \"gt\" or \"lt\", the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"key", "operator"}, - }, - }, - Dependencies: []string{}, - }, - "k8s.io/kubernetes/federation/apis/federation/v1beta1.ClusterSpec": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClusterSpec describes the attributes of a kubernetes cluster.", - Properties: map[string]spec.Schema{ - "serverAddressByClientCIDRs": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "clientCIDR", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "A map of client CIDR to server address. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/kubernetes/federation/apis/federation/v1beta1.ServerAddressByClientCIDR"), - }, - }, - }, - }, - }, - "secretRef": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the secret containing kubeconfig to access this cluster. The secret is read from the kubernetes cluster that is hosting federation control plane. Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key \"kubeconfig\". This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. This can be left empty if the cluster allows insecure access.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), - }, - }, - }, - Required: []string{"serverAddressByClientCIDRs"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/kubernetes/federation/apis/federation/v1beta1.ServerAddressByClientCIDR"}, - }, - "k8s.io/kubernetes/federation/apis/federation/v1beta1.ClusterStatus": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClusterStatus is information about the current status of a cluster updated by cluster controller periodically.", - Properties: map[string]spec.Schema{ - "conditions": { - SchemaProps: spec.SchemaProps{ - Description: "Conditions is an array of current cluster conditions.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/kubernetes/federation/apis/federation/v1beta1.ClusterCondition"), - }, - }, - }, - }, - }, - "zones": { - SchemaProps: spec.SchemaProps{ - Description: "Zones is the list of availability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. These will always be in the same region.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "region": { - SchemaProps: spec.SchemaProps{ - Description: "Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'.", - Type: []string{"string"}, - Format: "", + }, }, }, }, }, }, Dependencies: []string{ - "k8s.io/kubernetes/federation/apis/federation/v1beta1.ClusterCondition"}, + "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1.APIServiceCondition"}, }, - "k8s.io/kubernetes/federation/apis/federation/v1beta1.ServerAddressByClientCIDR": { + "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1.ServiceReference": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.", + Description: "ServiceReference holds a reference to Service.legacy.k8s.io", Properties: map[string]spec.Schema{ - "clientCIDR": { + "namespace": { SchemaProps: spec.SchemaProps{ - Description: "The CIDR with which clients can match their IP to figure out the server address that they should use.", + Description: "Namespace is the namespace of the service", Type: []string{"string"}, Format: "", }, }, - "serverAddress": { + "name": { SchemaProps: spec.SchemaProps{ - Description: "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.", + Description: "Name is the name of the service", Type: []string{"string"}, Format: "", }, }, }, - Required: []string{"clientCIDR", "serverAddress"}, }, }, Dependencies: []string{}, @@ -23375,268 +25736,15 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "burst": { SchemaProps: spec.SchemaProps{ Description: "burst allows extra queries to accumulate when a client is exceeding its rate.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - Required: []string{"kubeconfig", "acceptContentTypes", "contentType", "qps", "burst"}, - }, - }, - Dependencies: []string{}, - }, - "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.KubeProxyConfiguration": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "KubeProxyConfiguration contains everything necessary to configure the Kubernetes proxy server.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "featureGates": { - SchemaProps: spec.SchemaProps{ - Description: "featureGates is a comma-separated list of key=value pairs that control which alpha/beta features are enabled.\n\ncomponents to use config files because local-up-cluster.sh only supports the --feature-gates flag right now, which is comma-separated key=value pairs.", - Type: []string{"string"}, - Format: "", - }, - }, - "bindAddress": { - SchemaProps: spec.SchemaProps{ - Description: "bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)", - Type: []string{"string"}, - Format: "", - }, - }, - "healthzBindAddress": { - SchemaProps: spec.SchemaProps{ - Description: "healthzBindAddress is the IP address and port for the health check server to serve on, defaulting to 0.0.0.0:10256", - Type: []string{"string"}, - Format: "", - }, - }, - "metricsBindAddress": { - SchemaProps: spec.SchemaProps{ - Description: "metricsBindAddress is the IP address and port for the metrics server to serve on, defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces)", - Type: []string{"string"}, - Format: "", - }, - }, - "enableProfiling": { - SchemaProps: spec.SchemaProps{ - Description: "enableProfiling enables profiling via web interface on /debug/pprof handler. Profiling handlers will be handled by metrics server.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "clusterCIDR": { - SchemaProps: spec.SchemaProps{ - Description: "clusterCIDR is the CIDR range of the pods in the cluster. It is used to bridge traffic coming from outside of the cluster. If not provided, no off-cluster bridging will be performed.", - Type: []string{"string"}, - Format: "", - }, - }, - "hostnameOverride": { - SchemaProps: spec.SchemaProps{ - Description: "hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname.", - Type: []string{"string"}, - Format: "", - }, - }, - "clientConnection": { - SchemaProps: spec.SchemaProps{ - Description: "clientConnection specifies the kubeconfig file and client connection settings for the proxy server to use when communicating with the apiserver.", - Ref: ref("k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.ClientConnectionConfiguration"), - }, - }, - "iptables": { - SchemaProps: spec.SchemaProps{ - Description: "iptables contains iptables-related configuration options.", - Ref: ref("k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.KubeProxyIPTablesConfiguration"), - }, - }, - "ipvs": { - SchemaProps: spec.SchemaProps{ - Description: "ipvs contains ipvs-related configuration options.", - Ref: ref("k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.KubeProxyIPVSConfiguration"), - }, - }, - "oomScoreAdj": { - SchemaProps: spec.SchemaProps{ - Description: "oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "mode": { - SchemaProps: spec.SchemaProps{ - Description: "mode specifies which proxy mode to use.", - Type: []string{"string"}, - Format: "", - }, - }, - "portRange": { - SchemaProps: spec.SchemaProps{ - Description: "portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.", - Type: []string{"string"}, - Format: "", - }, - }, - "resourceContainer": { - SchemaProps: spec.SchemaProps{ - Description: "resourceContainer is the bsolute name of the resource-only container to create and run the Kube-proxy in (Default: /kube-proxy).", - Type: []string{"string"}, - Format: "", - }, - }, - "udpTimeoutMilliseconds": { - SchemaProps: spec.SchemaProps{ - Description: "udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxyMode=userspace.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), - }, - }, - "conntrack": { - SchemaProps: spec.SchemaProps{ - Description: "conntrack contains conntrack-related configuration options.", - Ref: ref("k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.KubeProxyConntrackConfiguration"), - }, - }, - "configSyncPeriod": { - SchemaProps: spec.SchemaProps{ - Description: "configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater than 0.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), - }, - }, - }, - Required: []string{"featureGates", "bindAddress", "healthzBindAddress", "metricsBindAddress", "enableProfiling", "clusterCIDR", "hostnameOverride", "clientConnection", "iptables", "ipvs", "oomScoreAdj", "mode", "portRange", "resourceContainer", "udpTimeoutMilliseconds", "conntrack", "configSyncPeriod"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.ClientConnectionConfiguration", "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.KubeProxyConntrackConfiguration", "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.KubeProxyIPTablesConfiguration", "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.KubeProxyIPVSConfiguration"}, - }, - "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.KubeProxyConntrackConfiguration": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "KubeProxyConntrackConfiguration contains conntrack settings for the Kubernetes proxy server.", - Properties: map[string]spec.Schema{ - "max": { - SchemaProps: spec.SchemaProps{ - Description: "max is the maximum number of NAT connections to track (0 to leave as-is). This takes precedence over conntrackMaxPerCore and conntrackMin.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "maxPerCore": { - SchemaProps: spec.SchemaProps{ - Description: "maxPerCore is the maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrackMin).", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "min": { - SchemaProps: spec.SchemaProps{ - Description: "min is the minimum value of connect-tracking records to allocate, regardless of conntrackMaxPerCore (set conntrackMaxPerCore=0 to leave the limit as-is).", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "tcpEstablishedTimeout": { - SchemaProps: spec.SchemaProps{ - Description: "tcpEstablishedTimeout is how long an idle TCP connection will be kept open (e.g. '2s'). Must be greater than 0.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), - }, - }, - "tcpCloseWaitTimeout": { - SchemaProps: spec.SchemaProps{ - Description: "tcpCloseWaitTimeout is how long an idle conntrack entry in CLOSE_WAIT state will remain in the conntrack table. (e.g. '60s'). Must be greater than 0 to set.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), - }, - }, - }, - Required: []string{"max", "maxPerCore", "min", "tcpEstablishedTimeout", "tcpCloseWaitTimeout"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, - }, - "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.KubeProxyIPTablesConfiguration": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "KubeProxyIPTablesConfiguration contains iptables-related configuration details for the Kubernetes proxy server.", - Properties: map[string]spec.Schema{ - "masqueradeBit": { - SchemaProps: spec.SchemaProps{ - Description: "masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using the pure iptables proxy mode. Values must be within the range [0, 31].", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "masqueradeAll": { - SchemaProps: spec.SchemaProps{ - Description: "masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "syncPeriod": { - SchemaProps: spec.SchemaProps{ - Description: "syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), - }, - }, - "minSyncPeriod": { - SchemaProps: spec.SchemaProps{ - Description: "minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m', '2h22m').", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), - }, - }, - }, - Required: []string{"masqueradeBit", "masqueradeAll", "syncPeriod", "minSyncPeriod"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, - }, - "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.KubeProxyIPVSConfiguration": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "KubeProxyIPVSConfiguration contains ipvs-related configuration details for the Kubernetes proxy server.", - Properties: map[string]spec.Schema{ - "syncPeriod": { - SchemaProps: spec.SchemaProps{ - Description: "syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), - }, - }, - "minSyncPeriod": { - SchemaProps: spec.SchemaProps{ - Description: "minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m', '2h22m').", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), - }, - }, - "scheduler": { - SchemaProps: spec.SchemaProps{ - Description: "ipvs scheduler", - Type: []string{"string"}, - Format: "", + Type: []string{"integer"}, + Format: "int32", }, }, }, - Required: []string{"syncPeriod", "minSyncPeriod", "scheduler"}, + Required: []string{"kubeconfig", "acceptContentTypes", "contentType", "qps", "burst"}, }, }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + Dependencies: []string{}, }, "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.KubeSchedulerConfiguration": { Schema: spec.Schema{ @@ -23656,96 +25764,117 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Format: "", }, }, - "port": { + "schedulerName": { + SchemaProps: spec.SchemaProps{ + Description: "SchedulerName is name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's \"spec.SchedulerName\".", + Type: []string{"string"}, + Format: "", + }, + }, + "algorithmSource": { + SchemaProps: spec.SchemaProps{ + Description: "AlgorithmSource specifies the scheduler algorithm source.", + Ref: ref("k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.SchedulerAlgorithmSource"), + }, + }, + "hardPodAffinitySymmetricWeight": { SchemaProps: spec.SchemaProps{ - Description: "port is the port that the scheduler's http service runs on.", + Description: "RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule corresponding to every RequiredDuringScheduling affinity rule. HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100.", Type: []string{"integer"}, Format: "int32", }, }, - "address": { + "leaderElection": { SchemaProps: spec.SchemaProps{ - Description: "address is the IP address to serve on.", - Type: []string{"string"}, - Format: "", + Description: "LeaderElection defines the configuration of leader election client.", + Ref: ref("k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.KubeSchedulerLeaderElectionConfiguration"), + }, + }, + "clientConnection": { + SchemaProps: spec.SchemaProps{ + Description: "ClientConnection specifies the kubeconfig file and client connection settings for the proxy server to use when communicating with the apiserver.", + Ref: ref("k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.ClientConnectionConfiguration"), }, }, - "algorithmProvider": { + "healthzBindAddress": { SchemaProps: spec.SchemaProps{ - Description: "algorithmProvider is the scheduling algorithm provider to use.", + Description: "HealthzBindAddress is the IP address and port for the health check server to serve on, defaulting to 0.0.0.0:10251", Type: []string{"string"}, Format: "", }, }, - "policyConfigFile": { + "metricsBindAddress": { SchemaProps: spec.SchemaProps{ - Description: "policyConfigFile is the filepath to the scheduler policy configuration.", + Description: "MetricsBindAddress is the IP address and port for the metrics server to serve on, defaulting to 0.0.0.0:10251.", Type: []string{"string"}, Format: "", }, }, "enableProfiling": { SchemaProps: spec.SchemaProps{ - Description: "enableProfiling enables profiling via web interface.", + Description: "EnableProfiling enables profiling via web interface on /debug/pprof handler. Profiling handlers will be handled by metrics server.", Type: []string{"boolean"}, Format: "", }, }, "enableContentionProfiling": { SchemaProps: spec.SchemaProps{ - Description: "enableContentionProfiling enables lock contention profiling, if enableProfiling is true.", + Description: "EnableContentionProfiling enables lock contention profiling, if EnableProfiling is true.", Type: []string{"boolean"}, Format: "", }, }, - "contentType": { + "failureDomains": { SchemaProps: spec.SchemaProps{ - Description: "contentType is contentType of requests sent to apiserver.", + Description: "Indicate the \"all topologies\" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.", Type: []string{"string"}, Format: "", }, }, - "kubeAPIQPS": { + }, + Required: []string{"schedulerName", "algorithmSource", "hardPodAffinitySymmetricWeight", "leaderElection", "clientConnection", "healthzBindAddress", "metricsBindAddress", "enableProfiling", "enableContentionProfiling", "failureDomains"}, + }, + }, + Dependencies: []string{ + "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.ClientConnectionConfiguration", "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.KubeSchedulerLeaderElectionConfiguration", "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.SchedulerAlgorithmSource"}, + }, + "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.KubeSchedulerLeaderElectionConfiguration": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "KubeSchedulerLeaderElectionConfiguration expands LeaderElectionConfiguration to include scheduler specific configuration.", + Properties: map[string]spec.Schema{ + "leaderElect": { SchemaProps: spec.SchemaProps{ - Description: "kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.", - Type: []string{"number"}, - Format: "float", + Description: "leaderElect enables a leader election client to gain leadership before executing the main loop. Enable this when running replicated components for high availability.", + Type: []string{"boolean"}, + Format: "", }, }, - "kubeAPIBurst": { + "leaseDuration": { SchemaProps: spec.SchemaProps{ - Description: "kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver.", - Type: []string{"integer"}, - Format: "int32", + Description: "leaseDuration is the duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, - "schedulerName": { + "renewDeadline": { SchemaProps: spec.SchemaProps{ - Description: "schedulerName is name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's \"spec.SchedulerName\".", - Type: []string{"string"}, - Format: "", + Description: "renewDeadline is the interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, - "hardPodAffinitySymmetricWeight": { + "retryPeriod": { SchemaProps: spec.SchemaProps{ - Description: "RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule corresponding to every RequiredDuringScheduling affinity rule. HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100.", - Type: []string{"integer"}, - Format: "int32", + Description: "retryPeriod is the duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, - "failureDomains": { + "resourceLock": { SchemaProps: spec.SchemaProps{ - Description: "Indicate the \"all topologies\" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.", + Description: "resourceLock indicates the resource object type that will be used to lock during leader election cycles.", Type: []string{"string"}, Format: "", }, }, - "leaderElection": { - SchemaProps: spec.SchemaProps{ - Description: "leaderElection defines the configuration of leader election client.", - Ref: ref("k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.LeaderElectionConfiguration"), - }, - }, "lockObjectNamespace": { SchemaProps: spec.SchemaProps{ Description: "LockObjectNamespace defines the namespace of the lock object", @@ -23760,33 +25889,12 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Format: "", }, }, - "policyConfigMapName": { - SchemaProps: spec.SchemaProps{ - Description: "PolicyConfigMapName is the name of the ConfigMap object that specifies the scheduler's policy config. If UseLegacyPolicyConfig is true, scheduler uses PolicyConfigFile. If UseLegacyPolicyConfig is false and PolicyConfigMapName is not empty, the ConfigMap object with this name must exist in PolicyConfigMapNamespace before scheduler initialization.", - Type: []string{"string"}, - Format: "", - }, - }, - "policyConfigMapNamespace": { - SchemaProps: spec.SchemaProps{ - Description: "PolicyConfigMapNamespace is the namespace where the above policy config map is located. If none is provided default system namespace (\"kube-system\") will be used.", - Type: []string{"string"}, - Format: "", - }, - }, - "useLegacyPolicyConfig": { - SchemaProps: spec.SchemaProps{ - Description: "UseLegacyPolicyConfig tells the scheduler to ignore Policy ConfigMap and to use PolicyConfigFile if available.", - Type: []string{"boolean"}, - Format: "", - }, - }, }, - Required: []string{"port", "address", "algorithmProvider", "policyConfigFile", "enableProfiling", "enableContentionProfiling", "contentType", "kubeAPIQPS", "kubeAPIBurst", "schedulerName", "hardPodAffinitySymmetricWeight", "failureDomains", "leaderElection", "lockObjectNamespace", "lockObjectName", "policyConfigMapName", "policyConfigMapNamespace", "useLegacyPolicyConfig"}, + Required: []string{"leaderElect", "leaseDuration", "renewDeadline", "retryPeriod", "resourceLock", "lockObjectNamespace", "lockObjectName"}, }, }, Dependencies: []string{ - "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.LeaderElectionConfiguration"}, + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, }, "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.LeaderElectionConfiguration": { Schema: spec.Schema{ @@ -23832,6 +25940,96 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Dependencies: []string{ "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, }, + "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.SchedulerAlgorithmSource": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SchedulerAlgorithmSource is the source of a scheduler algorithm. One source field must be specified, and source fields are mutually exclusive.", + Properties: map[string]spec.Schema{ + "policy": { + SchemaProps: spec.SchemaProps{ + Description: "Policy is a policy based algorithm source.", + Ref: ref("k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.SchedulerPolicySource"), + }, + }, + "provider": { + SchemaProps: spec.SchemaProps{ + Description: "Provider is the name of a scheduling algorithm provider to use.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.SchedulerPolicySource"}, + }, + "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.SchedulerPolicyConfigMapSource": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SchedulerPolicyConfigMapSource is a policy serialized into a config map value under the SchedulerPolicyConfigMapKey key.", + Properties: map[string]spec.Schema{ + "namespace": { + SchemaProps: spec.SchemaProps{ + Description: "Namespace is the namespace of the policy config map.", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of hte policy config map.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"namespace", "name"}, + }, + }, + Dependencies: []string{}, + }, + "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.SchedulerPolicyFileSource": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SchedulerPolicyFileSource is a policy serialized to disk and accessed via path.", + Properties: map[string]spec.Schema{ + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path is the location of a serialized policy.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"path"}, + }, + }, + Dependencies: []string{}, + }, + "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.SchedulerPolicySource": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SchedulerPolicySource configures a means to obtain a scheduler Policy. One source field must be specified, and source fields are mutually exclusive.", + Properties: map[string]spec.Schema{ + "file": { + SchemaProps: spec.SchemaProps{ + Description: "File is a file policy source.", + Ref: ref("k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.SchedulerPolicyFileSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "ConfigMap is a config map policy source.", + Ref: ref("k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.SchedulerPolicyConfigMapSource"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.SchedulerPolicyConfigMapSource", "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1.SchedulerPolicyFileSource"}, + }, "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1.KubeletAnonymousAuthentication": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -23905,7 +26103,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1.KubeletConfiguration": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "A configuration field should go in KubeletFlags instead of KubeletConfiguration if any of these are true: - its value will never, or cannot safely be changed during the lifetime of a node - its value cannot be safely shared between nodes at the same time (e.g. a hostname)\n KubeletConfiguration is intended to be shared between nodes\nIn general, please try to avoid adding flags or configuration fields, we already have a confusingly large amount of them.", + Description: "A configuration field should go in KubeletFlags instead of KubeletConfiguration if its value cannot be safely shared between nodes at the same time (e.g. a hostname) In general, please try to avoid adding flags or configuration fields, we already have a confusingly large amount of them.", Properties: map[string]spec.Schema{ "kind": { SchemaProps: spec.SchemaProps{ @@ -23962,8 +26160,22 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "manifestURLHeader": { SchemaProps: spec.SchemaProps{ Description: "manifestURLHeader is the HTTP header to use when accessing the manifest URL, with the key separated from the value with a ':', as in 'key:value'", - Type: []string{"string"}, - Format: "", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, }, }, "enableServer": { @@ -24020,13 +26232,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Ref: ref("k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1.KubeletAuthorization"), }, }, - "seccompProfileRoot": { - SchemaProps: spec.SchemaProps{ - Description: "seccompProfileRoot is the directory path for seccomp profiles.", - Type: []string{"string"}, - Format: "", - }, - }, "allowPrivileged": { SchemaProps: spec.SchemaProps{ Description: "allowPrivileged enables containers to request privileged mode. Defaults to false.", @@ -24118,26 +26323,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Format: "", }, }, - "minimumGCAge": { - SchemaProps: spec.SchemaProps{ - Description: "minimumGCAge is the minimum age for a finished container before it is garbage collected.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), - }, - }, - "maxPerPodContainerCount": { - SchemaProps: spec.SchemaProps{ - Description: "maxPerPodContainerCount is the maximum number of old instances to retain per container. Each container takes up some disk space.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "maxContainerCount": { - SchemaProps: spec.SchemaProps{ - Description: "maxContainerCount is the maximum number of old instances of containers to retain globally. Each container takes up some disk space.", - Type: []string{"integer"}, - Format: "int32", - }, - }, "cAdvisorPort": { SchemaProps: spec.SchemaProps{ Description: "cAdvisorPort is the port of the localhost cAdvisor endpoint (set to 0 to disable)", @@ -24166,13 +26351,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Format: "int32", }, }, - "registerNode": { - SchemaProps: spec.SchemaProps{ - Description: "registerNode enables automatic registration with the apiserver.", - Type: []string{"boolean"}, - Format: "", - }, - }, "clusterDomain": { SchemaProps: spec.SchemaProps{ Description: "clusterDomain is the DNS domain for this cluster. If set, kubelet will configure all containers to search this domain in addition to the host's search domains.", @@ -24180,13 +26358,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Format: "", }, }, - "masterServiceNamespace": { - SchemaProps: spec.SchemaProps{ - Description: "masterServiceNamespace is The namespace from which the kubernetes master services should be injected into pods.", - Type: []string{"string"}, - Format: "", - }, - }, "clusterDNS": { SchemaProps: spec.SchemaProps{ Description: "clusterDNS is a list of IP address for the cluster DNS server. If set, kubelet will configure all containers to use this for DNS resolution instead of the host's DNS servers", @@ -24239,13 +26410,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, - "volumePluginDir": { - SchemaProps: spec.SchemaProps{ - Description: "volumePluginDir is the full path of the directory in which to search for additional third party volume plugins", - Type: []string{"string"}, - Format: "", - }, - }, "kubeletCgroups": { SchemaProps: spec.SchemaProps{ Description: "kubeletCgroups is the absolute name of cgroups to isolate the kubelet in.", @@ -24253,13 +26417,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA Format: "", }, }, - "runtimeCgroups": { - SchemaProps: spec.SchemaProps{ - Description: "runtimeCgroups are cgroups that container runtime is expected to be isolated in.", - Type: []string{"string"}, - Format: "", - }, - }, "systemCgroups": { SchemaProps: spec.SchemaProps{ Description: "systemCgroups is absolute name of cgroups in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot.", @@ -24283,174 +26440,279 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, "cgroupDriver": { SchemaProps: spec.SchemaProps{ - Description: "driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd)", + Description: "driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd)", + Type: []string{"string"}, + Format: "", + }, + }, + "cpuManagerPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "CPUManagerPolicy is the name of the policy to use.", + Type: []string{"string"}, + Format: "", + }, + }, + "cpuManagerReconcilePeriod": { + SchemaProps: spec.SchemaProps{ + Description: "CPU Manager reconciliation period.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "runtimeRequestTimeout": { + SchemaProps: spec.SchemaProps{ + Description: "runtimeRequestTimeout is the timeout for all runtime requests except long running requests - pull, logs, exec and attach.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "hairpinMode": { + SchemaProps: spec.SchemaProps{ + Description: "How should the kubelet configure the container bridge for hairpin packets. Setting this flag allows endpoints in a Service to loadbalance back to themselves if they should try to access their own Service. Values:\n \"promiscuous-bridge\": make the container bridge promiscuous.\n \"hairpin-veth\": set the hairpin flag on container veth interfaces.\n \"none\": do nothing.\nGenerally, one must set --hairpin-mode=veth-flag to achieve hairpin NAT, because promiscous-bridge assumes the existence of a container bridge named cbr0.", + Type: []string{"string"}, + Format: "", + }, + }, + "maxPods": { + SchemaProps: spec.SchemaProps{ + Description: "maxPods is the number of pods that can run on this Kubelet.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "podCIDR": { + SchemaProps: spec.SchemaProps{ + Description: "The CIDR to use for pod IP addresses, only used in standalone mode. In cluster mode, this is obtained from the master.", Type: []string{"string"}, Format: "", }, }, - "containerRuntime": { + "resolvConf": { SchemaProps: spec.SchemaProps{ - Description: "containerRuntime is the container runtime to use.", + Description: "ResolverConfig is the resolver configuration file used as the basis for the container DNS resolution configuration.", Type: []string{"string"}, Format: "", }, }, - "remoteRuntimeEndpoint": { + "cpuCFSQuota": { SchemaProps: spec.SchemaProps{ - Description: "remoteRuntimeEndpoint is the endpoint of remote runtime service", - Type: []string{"string"}, + Description: "cpuCFSQuota is Enable CPU CFS quota enforcement for containers that specify CPU limits", + Type: []string{"boolean"}, Format: "", }, }, - "remoteImageEndpoint": { + "maxOpenFiles": { SchemaProps: spec.SchemaProps{ - Description: "remoteImageEndpoint is the endpoint of remote image service", - Type: []string{"string"}, - Format: "", + Description: "maxOpenFiles is Number of files that can be opened by Kubelet process.", + Type: []string{"integer"}, + Format: "int64", }, }, - "cpuManagerPolicy": { + "contentType": { SchemaProps: spec.SchemaProps{ - Description: "CPUManagerPolicy is the name of the policy to use.", + Description: "contentType is contentType of requests sent to apiserver.", Type: []string{"string"}, Format: "", }, }, - "cpuManagerReconcilePeriod": { + "kubeAPIQPS": { SchemaProps: spec.SchemaProps{ - Description: "CPU Manager reconciliation period.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "kubeAPIQPS is the QPS to use while talking with kubernetes apiserver", + Type: []string{"integer"}, + Format: "int32", }, }, - "runtimeRequestTimeout": { + "kubeAPIBurst": { SchemaProps: spec.SchemaProps{ - Description: "runtimeRequestTimeout is the timeout for all runtime requests except long running requests - pull, logs, exec and attach.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "kubeAPIBurst is the burst to allow while talking with kubernetes apiserver", + Type: []string{"integer"}, + Format: "int32", }, }, - "experimentalMounterPath": { + "serializeImagePulls": { SchemaProps: spec.SchemaProps{ - Description: "experimentalMounterPath is the path to mounter binary. If not set, kubelet will attempt to use mount binary that is available via $PATH,", - Type: []string{"string"}, + Description: "serializeImagePulls when enabled, tells the Kubelet to pull images one at a time. We recommend *not* changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Issue #10959 has more details.", + Type: []string{"boolean"}, Format: "", }, }, - "lockFilePath": { + "evictionHard": { SchemaProps: spec.SchemaProps{ - Description: "lockFilePath is the path that kubelet will use to as a lock file. It uses this file as a lock to synchronize with other kubelet processes that may be running.", - Type: []string{"string"}, - Format: "", + Description: "Map of signal names to quantities that defines hard eviction thresholds. For example: {\"memory.available\": \"300Mi\"}.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, }, }, - "exitOnLockContention": { + "evictionSoft": { SchemaProps: spec.SchemaProps{ - Description: "ExitOnLockContention is a flag that signifies to the kubelet that it is running in \"bootstrap\" mode. This requires that 'LockFilePath' has been set. This will cause the kubelet to listen to inotify events on the lock file, releasing it and exiting when another process tries to open that file.", - Type: []string{"boolean"}, - Format: "", + Description: "Map of signal names to quantities that defines soft eviction thresholds. For example: {\"memory.available\": \"300Mi\"}.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, }, }, - "hairpinMode": { + "evictionSoftGracePeriod": { SchemaProps: spec.SchemaProps{ - Description: "How should the kubelet configure the container bridge for hairpin packets. Setting this flag allows endpoints in a Service to loadbalance back to themselves if they should try to access their own Service. Values:\n \"promiscuous-bridge\": make the container bridge promiscuous.\n \"hairpin-veth\": set the hairpin flag on container veth interfaces.\n \"none\": do nothing.\nGenerally, one must set --hairpin-mode=veth-flag to achieve hairpin NAT, because promiscous-bridge assumes the existence of a container bridge named cbr0.", - Type: []string{"string"}, - Format: "", + Description: "Map of signal names to quantities that defines grace periods for each soft eviction signal. For example: {\"memory.available\": \"30s\"}.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, }, }, - "maxPods": { + "evictionPressureTransitionPeriod": { SchemaProps: spec.SchemaProps{ - Description: "maxPods is the number of pods that can run on this Kubelet.", + Description: "Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "evictionMaxPodGracePeriod": { + SchemaProps: spec.SchemaProps{ + Description: "Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.", Type: []string{"integer"}, Format: "int32", }, }, - "podCIDR": { + "evictionMinimumReclaim": { SchemaProps: spec.SchemaProps{ - Description: "The CIDR to use for pod IP addresses, only used in standalone mode. In cluster mode, this is obtained from the master.", - Type: []string{"string"}, - Format: "", + Description: "Map of signal names to quantities that defines minimum reclaims, which describe the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction while that resource is under pressure. For example: {\"imagefs.available\": \"2Gi\"}", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, }, }, - "resolvConf": { + "podsPerCore": { SchemaProps: spec.SchemaProps{ - Description: "ResolverConfig is the resolver configuration file used as the basis for the container DNS resolution configuration.\"), []", - Type: []string{"string"}, + Description: "Maximum number of pods per core. Cannot exceed MaxPods", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "enableControllerAttachDetach": { + SchemaProps: spec.SchemaProps{ + Description: "enableControllerAttachDetach enables the Attach/Detach controller to manage attachment/detachment of volumes scheduled to this node, and disables kubelet from executing any attach/detach operations", + Type: []string{"boolean"}, Format: "", }, }, - "cpuCFSQuota": { + "protectKernelDefaults": { SchemaProps: spec.SchemaProps{ - Description: "cpuCFSQuota is Enable CPU CFS quota enforcement for containers that specify CPU limits", + Description: "Default behaviour for kernel tuning", Type: []string{"boolean"}, Format: "", }, }, - "containerized": { + "makeIPTablesUtilChains": { SchemaProps: spec.SchemaProps{ - Description: "containerized should be set to true if kubelet is running in a container.", + Description: "If true, Kubelet ensures a set of iptables rules are present on host. These rules will serve as utility rules for various components, e.g. KubeProxy. The rules will be created based on IPTablesMasqueradeBit and IPTablesDropBit.", Type: []string{"boolean"}, Format: "", }, }, - "maxOpenFiles": { + "iptablesMasqueradeBit": { SchemaProps: spec.SchemaProps{ - Description: "maxOpenFiles is Number of files that can be opened by Kubelet process.", + Description: "iptablesMasqueradeBit is the bit of the iptables fwmark space to mark for SNAT Values must be within the range [0, 31]. Must be different from other mark bits. Warning: Please match the value of corresponding parameter in kube-proxy", Type: []string{"integer"}, - Format: "int64", + Format: "int32", }, }, - "registerSchedulable": { + "iptablesDropBit": { SchemaProps: spec.SchemaProps{ - Description: "registerSchedulable tells the kubelet to register the node as schedulable. Won't have any effect if register-node is false. DEPRECATED: use registerWithTaints instead", - Type: []string{"boolean"}, - Format: "", + Description: "iptablesDropBit is the bit of the iptables fwmark space to mark for dropping packets. Values must be within the range [0, 31]. Must be different from other mark bits.", + Type: []string{"integer"}, + Format: "int32", }, }, - "registerWithTaints": { + "featureGates": { SchemaProps: spec.SchemaProps{ - Description: "registerWithTaints are an array of taints to add to a node object when the kubelet registers itself. This only takes effect when registerNode is true and upon the initial registration of the node.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ + Description: "featureGates is a map of feature names to bools that enable or disable alpha/experimental features.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Taint"), + Type: []string{"boolean"}, + Format: "", }, }, }, }, }, - "contentType": { + "failSwapOn": { SchemaProps: spec.SchemaProps{ - Description: "contentType is contentType of requests sent to apiserver.", - Type: []string{"string"}, + Description: "Tells the Kubelet to fail to start if swap is enabled on the node.", + Type: []string{"boolean"}, Format: "", }, }, - "kubeAPIQPS": { + "systemReserved": { SchemaProps: spec.SchemaProps{ - Description: "kubeAPIQPS is the QPS to use while talking with kubernetes apiserver", - Type: []string{"integer"}, - Format: "int32", + Description: "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for non-kubernetes components. Currently only cpu and memory are supported. [default=none] See http://kubernetes.io/docs/user-guide/compute-resources for more detail.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, }, }, - "kubeAPIBurst": { + "kubeReserved": { SchemaProps: spec.SchemaProps{ - Description: "kubeAPIBurst is the burst to allow while talking with kubernetes apiserver", - Type: []string{"integer"}, - Format: "int32", + Description: "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for kubernetes system components. Currently cpu, memory and local storage for root file system are supported. [default=none] See http://kubernetes.io/docs/user-guide/compute-resources for more detail.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, }, }, - "serializeImagePulls": { + "systemReservedCgroup": { SchemaProps: spec.SchemaProps{ - Description: "serializeImagePulls when enabled, tells the Kubelet to pull images one at a time. We recommend *not* changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Issue #10959 has more details.", - Type: []string{"boolean"}, + Description: "This flag helps kubelet identify absolute name of top level cgroup used to enforce `SystemReserved` compute resource reservation for OS system daemons. Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information.", + Type: []string{"string"}, + Format: "", + }, + }, + "kubeReservedCgroup": { + SchemaProps: spec.SchemaProps{ + Description: "This flag helps kubelet identify absolute name of top level cgroup used to enforce `KubeReserved` compute resource reservation for Kubernetes node system daemons. Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information.", + Type: []string{"string"}, Format: "", }, }, - "nodeLabels": { + "enforceNodeAllocatable": { SchemaProps: spec.SchemaProps{ - Description: "nodeLabels to add when registering the node in the cluster.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ + Description: "This flag specifies the various Node Allocatable enforcements that Kubelet needs to perform. This flag accepts a list of options. Acceptible options are `pods`, `system-reserved` & `kube-reserved`. Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Type: []string{"string"}, @@ -24460,299 +26722,375 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA }, }, }, - "nonMasqueradeCIDR": { + }, + Required: []string{"configTrialDuration", "podManifestPath", "syncFrequency", "fileCheckFrequency", "httpCheckFrequency", "manifestURL", "manifestURLHeader", "enableServer", "address", "port", "readOnlyPort", "tlsCertFile", "tlsPrivateKeyFile", "authentication", "authorization", "allowPrivileged", "hostNetworkSources", "hostPIDSources", "hostIPCSources", "registryPullQPS", "registryBurst", "eventRecordQPS", "eventBurst", "enableDebuggingHandlers", "enableContentionProfiling", "cAdvisorPort", "healthzPort", "healthzBindAddress", "oomScoreAdj", "clusterDomain", "clusterDNS", "streamingConnectionIdleTimeout", "nodeStatusUpdateFrequency", "imageMinimumGCAge", "imageGCHighThresholdPercent", "imageGCLowThresholdPercent", "volumeStatsAggPeriod", "kubeletCgroups", "systemCgroups", "cgroupRoot", "cpuManagerPolicy", "cpuManagerReconcilePeriod", "runtimeRequestTimeout", "hairpinMode", "maxPods", "podCIDR", "resolvConf", "cpuCFSQuota", "maxOpenFiles", "contentType", "kubeAPIQPS", "kubeAPIBurst", "serializeImagePulls", "evictionPressureTransitionPeriod", "evictionMaxPodGracePeriod", "podsPerCore", "enableControllerAttachDetach", "protectKernelDefaults", "makeIPTablesUtilChains", "iptablesMasqueradeBit", "iptablesDropBit", "systemReserved", "kubeReserved", "enforceNodeAllocatable"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1.KubeletAuthentication", "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1.KubeletAuthorization"}, + }, + "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1.KubeletWebhookAuthentication": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{ + "enabled": { SchemaProps: spec.SchemaProps{ - Description: "nonMasqueradeCIDR configures masquerading: traffic to IPs outside this range will use IP masquerade.", - Type: []string{"string"}, + Description: "enabled allows bearer token authentication backed by the tokenreviews.authentication.k8s.io API", + Type: []string{"boolean"}, Format: "", }, }, - "enableCustomMetrics": { + "cacheTTL": { SchemaProps: spec.SchemaProps{ - Description: "enable gathering custom metrics.", - Type: []string{"boolean"}, + Description: "cacheTTL enables caching of authentication results", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + }, + Required: []string{"enabled", "cacheTTL"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + }, + "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1.KubeletWebhookAuthorization": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{ + "cacheAuthorizedTTL": { + SchemaProps: spec.SchemaProps{ + Description: "cacheAuthorizedTTL is the duration to cache 'authorized' responses from the webhook authorizer.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "cacheUnauthorizedTTL": { + SchemaProps: spec.SchemaProps{ + Description: "cacheUnauthorizedTTL is the duration to cache 'unauthorized' responses from the webhook authorizer.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + }, + Required: []string{"cacheAuthorizedTTL", "cacheUnauthorizedTTL"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + }, + "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1.KubeletX509Authentication": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{ + "clientCAFile": { + SchemaProps: spec.SchemaProps{ + Description: "clientCAFile is the path to a PEM-encoded certificate bundle. If set, any request presenting a client certificate signed by one of the authorities in the bundle is authenticated with a username corresponding to the CommonName, and groups corresponding to the Organization in the client certificate.", + Type: []string{"string"}, Format: "", }, }, - "evictionHard": { + }, + Required: []string{"clientCAFile"}, + }, + }, + Dependencies: []string{}, + }, + "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1.ClientConnectionConfiguration": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ClientConnectionConfiguration contains details for constructing a client.", + Properties: map[string]spec.Schema{ + "kubeconfig": { SchemaProps: spec.SchemaProps{ - Description: "Comma-delimited list of hard eviction expressions. For example, 'memory.available<300Mi'.", + Description: "kubeConfigFile is the path to a kubeconfig file.", Type: []string{"string"}, Format: "", }, }, - "evictionSoft": { + "acceptContentTypes": { SchemaProps: spec.SchemaProps{ - Description: "Comma-delimited list of soft eviction expressions. For example, 'memory.available<300Mi'.", + Description: "acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.", Type: []string{"string"}, Format: "", }, }, - "evictionSoftGracePeriod": { + "contentType": { SchemaProps: spec.SchemaProps{ - Description: "Comma-delimeted list of grace periods for each soft eviction signal. For example, 'memory.available=30s'.", + Description: "contentType is the content type used when sending data to the server from this client.", Type: []string{"string"}, Format: "", }, }, - "evictionPressureTransitionPeriod": { + "qps": { SchemaProps: spec.SchemaProps{ - Description: "Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + Description: "cps controls the number of queries per second allowed for this connection.", + Type: []string{"number"}, + Format: "float", }, }, - "evictionMaxPodGracePeriod": { + "burst": { SchemaProps: spec.SchemaProps{ - Description: "Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met.", + Description: "burst allows extra queries to accumulate when a client is exceeding its rate.", Type: []string{"integer"}, Format: "int32", }, }, - "evictionMinimumReclaim": { + }, + Required: []string{"kubeconfig", "acceptContentTypes", "contentType", "qps", "burst"}, + }, + }, + Dependencies: []string{}, + }, + "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1.KubeProxyConfiguration": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "KubeProxyConfiguration contains everything necessary to configure the Kubernetes proxy server.", + Properties: map[string]spec.Schema{ + "kind": { SchemaProps: spec.SchemaProps{ - Description: "Comma-delimited list of minimum reclaims (e.g. imagefs.available=2Gi) that describes the minimum amount of resource the kubelet will reclaim when performing a pod eviction if that resource is under pressure.", + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", Type: []string{"string"}, Format: "", }, }, - "experimentalKernelMemcgNotification": { + "apiVersion": { SchemaProps: spec.SchemaProps{ - Description: "If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling.", - Type: []string{"boolean"}, + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, Format: "", }, }, - "podsPerCore": { + "featureGates": { SchemaProps: spec.SchemaProps{ - Description: "Maximum number of pods per core. Cannot exceed MaxPods", - Type: []string{"integer"}, - Format: "int32", + Description: "featureGates is a comma-separated list of key=value pairs that control which alpha/beta features are enabled.\n\ncomponents to use config files because local-up-cluster.sh only supports the --feature-gates flag right now, which is comma-separated key=value pairs.", + Type: []string{"string"}, + Format: "", }, }, - "enableControllerAttachDetach": { + "bindAddress": { SchemaProps: spec.SchemaProps{ - Description: "enableControllerAttachDetach enables the Attach/Detach controller to manage attachment/detachment of volumes scheduled to this node, and disables kubelet from executing any attach/detach operations", - Type: []string{"boolean"}, + Description: "bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)", + Type: []string{"string"}, Format: "", }, }, - "experimentalQOSReserved": { + "healthzBindAddress": { SchemaProps: spec.SchemaProps{ - Description: "A set of ResourceName=Percentage (e.g. memory=50%) pairs that describe how pod resource requests are reserved at the QoS level. Currently only memory is supported. [default=none]\"", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, + Description: "healthzBindAddress is the IP address and port for the health check server to serve on, defaulting to 0.0.0.0:10256", + Type: []string{"string"}, + Format: "", }, }, - "protectKernelDefaults": { + "metricsBindAddress": { SchemaProps: spec.SchemaProps{ - Description: "Default behaviour for kernel tuning", - Type: []string{"boolean"}, + Description: "metricsBindAddress is the IP address and port for the metrics server to serve on, defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces)", + Type: []string{"string"}, Format: "", }, }, - "makeIPTablesUtilChains": { + "enableProfiling": { SchemaProps: spec.SchemaProps{ - Description: "If true, Kubelet ensures a set of iptables rules are present on host. These rules will serve as utility rules for various components, e.g. KubeProxy. The rules will be created based on IPTablesMasqueradeBit and IPTablesDropBit.", + Description: "enableProfiling enables profiling via web interface on /debug/pprof handler. Profiling handlers will be handled by metrics server.", Type: []string{"boolean"}, Format: "", }, }, - "iptablesMasqueradeBit": { - SchemaProps: spec.SchemaProps{ - Description: "iptablesMasqueradeBit is the bit of the iptables fwmark space to mark for SNAT Values must be within the range [0, 31]. Must be different from other mark bits. Warning: Please match the value of corresponding parameter in kube-proxy", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "iptablesDropBit": { - SchemaProps: spec.SchemaProps{ - Description: "iptablesDropBit is the bit of the iptables fwmark space to mark for dropping packets. Values must be within the range [0, 31]. Must be different from other mark bits.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "allowedUnsafeSysctls": { + "clusterCIDR": { SchemaProps: spec.SchemaProps{ - Description: "Whitelist of unsafe sysctls or sysctl patterns (ending in *). Use these at your own risk. Resource isolation might be lacking and pod might influence each other on the same node.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, + Description: "clusterCIDR is the CIDR range of the pods in the cluster. It is used to bridge traffic coming from outside of the cluster. If not provided, no off-cluster bridging will be performed.", + Type: []string{"string"}, + Format: "", }, }, - "featureGates": { + "hostnameOverride": { SchemaProps: spec.SchemaProps{ - Description: "featureGates is a string of comma-separated key=value pairs that describe feature gates for alpha/experimental features.", + Description: "hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname.", Type: []string{"string"}, Format: "", }, }, - "failSwapOn": { + "clientConnection": { SchemaProps: spec.SchemaProps{ - Description: "Tells the Kubelet to fail to start if swap is enabled on the node.", - Type: []string{"boolean"}, - Format: "", + Description: "clientConnection specifies the kubeconfig file and client connection settings for the proxy server to use when communicating with the apiserver.", + Ref: ref("k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1.ClientConnectionConfiguration"), }, }, - "experimentalCheckNodeCapabilitiesBeforeMount": { + "iptables": { SchemaProps: spec.SchemaProps{ - Description: "This flag, if set, enables a check prior to mount operations to verify that the required components (binaries, etc.) to mount the volume are available on the underlying node. If the check is enabled and fails the mount operation fails.", - Type: []string{"boolean"}, - Format: "", + Description: "iptables contains iptables-related configuration options.", + Ref: ref("k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1.KubeProxyIPTablesConfiguration"), }, }, - "keepTerminatedPodVolumes": { + "ipvs": { SchemaProps: spec.SchemaProps{ - Description: "This flag, if set, instructs the kubelet to keep volumes from terminated pods mounted to the node. This can be useful for debugging volume related issues.", - Type: []string{"boolean"}, - Format: "", + Description: "ipvs contains ipvs-related configuration options.", + Ref: ref("k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1.KubeProxyIPVSConfiguration"), }, }, - "systemReserved": { + "oomScoreAdj": { SchemaProps: spec.SchemaProps{ - Description: "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for non-kubernetes components. Currently only cpu and memory are supported. [default=none] See http://kubernetes.io/docs/user-guide/compute-resources for more detail.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, + Description: "oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]", + Type: []string{"integer"}, + Format: "int32", }, }, - "kubeReserved": { + "mode": { SchemaProps: spec.SchemaProps{ - Description: "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for kubernetes system components. Currently cpu, memory and local storage for root file system are supported. [default=none] See http://kubernetes.io/docs/user-guide/compute-resources for more detail.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, + Description: "mode specifies which proxy mode to use.", + Type: []string{"string"}, + Format: "", }, }, - "systemReservedCgroup": { + "portRange": { SchemaProps: spec.SchemaProps{ - Description: "This flag helps kubelet identify absolute name of top level cgroup used to enforce `SystemReserved` compute resource reservation for OS system daemons. Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md) doc for more information.", + Description: "portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.", Type: []string{"string"}, Format: "", }, }, - "kubeReservedCgroup": { + "resourceContainer": { SchemaProps: spec.SchemaProps{ - Description: "This flag helps kubelet identify absolute name of top level cgroup used to enforce `KubeReserved` compute resource reservation for Kubernetes node system daemons. Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md) doc for more information.", + Description: "resourceContainer is the bsolute name of the resource-only container to create and run the Kube-proxy in (Default: /kube-proxy).", Type: []string{"string"}, Format: "", }, }, - "enforceNodeAllocatable": { + "udpTimeoutMilliseconds": { SchemaProps: spec.SchemaProps{ - Description: "This flag specifies the various Node Allocatable enforcements that Kubelet needs to perform. This flag accepts a list of options. Acceptible options are `pods`, `system-reserved` & `kube-reserved`. Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md) doc for more information.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, + Description: "udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxyMode=userspace.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, - "experimentalNodeAllocatableIgnoreEvictionThreshold": { + "conntrack": { SchemaProps: spec.SchemaProps{ - Description: "This flag, if set, will avoid including `EvictionHard` limits while computing Node Allocatable. Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md) doc for more information.", - Type: []string{"boolean"}, - Format: "", + Description: "conntrack contains conntrack-related configuration options.", + Ref: ref("k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1.KubeProxyConntrackConfiguration"), + }, + }, + "configSyncPeriod": { + SchemaProps: spec.SchemaProps{ + Description: "configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater than 0.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, }, - Required: []string{"configTrialDuration", "podManifestPath", "syncFrequency", "fileCheckFrequency", "httpCheckFrequency", "manifestURL", "manifestURLHeader", "enableServer", "address", "port", "readOnlyPort", "tlsCertFile", "tlsPrivateKeyFile", "authentication", "authorization", "seccompProfileRoot", "allowPrivileged", "hostNetworkSources", "hostPIDSources", "hostIPCSources", "registryPullQPS", "registryBurst", "eventRecordQPS", "eventBurst", "enableDebuggingHandlers", "enableContentionProfiling", "minimumGCAge", "maxPerPodContainerCount", "maxContainerCount", "cAdvisorPort", "healthzPort", "healthzBindAddress", "oomScoreAdj", "registerNode", "clusterDomain", "masterServiceNamespace", "clusterDNS", "streamingConnectionIdleTimeout", "nodeStatusUpdateFrequency", "imageMinimumGCAge", "imageGCHighThresholdPercent", "imageGCLowThresholdPercent", "volumeStatsAggPeriod", "volumePluginDir", "kubeletCgroups", "runtimeCgroups", "systemCgroups", "cgroupRoot", "containerRuntime", "remoteRuntimeEndpoint", "remoteImageEndpoint", "cpuManagerPolicy", "cpuManagerReconcilePeriod", "runtimeRequestTimeout", "lockFilePath", "exitOnLockContention", "hairpinMode", "maxPods", "podCIDR", "resolvConf", "cpuCFSQuota", "containerized", "maxOpenFiles", "registerSchedulable", "registerWithTaints", "contentType", "kubeAPIQPS", "kubeAPIBurst", "serializeImagePulls", "nodeLabels", "nonMasqueradeCIDR", "enableCustomMetrics", "evictionHard", "evictionSoft", "evictionSoftGracePeriod", "evictionPressureTransitionPeriod", "evictionMaxPodGracePeriod", "evictionMinimumReclaim", "experimentalKernelMemcgNotification", "podsPerCore", "enableControllerAttachDetach", "experimentalQOSReserved", "protectKernelDefaults", "makeIPTablesUtilChains", "iptablesMasqueradeBit", "iptablesDropBit", "systemReserved", "kubeReserved", "enforceNodeAllocatable"}, + Required: []string{"featureGates", "bindAddress", "healthzBindAddress", "metricsBindAddress", "enableProfiling", "clusterCIDR", "hostnameOverride", "clientConnection", "iptables", "ipvs", "oomScoreAdj", "mode", "portRange", "resourceContainer", "udpTimeoutMilliseconds", "conntrack", "configSyncPeriod"}, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.Taint", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1.KubeletAuthentication", "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1.KubeletAuthorization"}, + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1.ClientConnectionConfiguration", "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1.KubeProxyConntrackConfiguration", "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1.KubeProxyIPTablesConfiguration", "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1.KubeProxyIPVSConfiguration"}, }, - "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1.KubeletWebhookAuthentication": { + "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1.KubeProxyConntrackConfiguration": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ + Description: "KubeProxyConntrackConfiguration contains conntrack settings for the Kubernetes proxy server.", Properties: map[string]spec.Schema{ - "enabled": { + "max": { SchemaProps: spec.SchemaProps{ - Description: "enabled allows bearer token authentication backed by the tokenreviews.authentication.k8s.io API", - Type: []string{"boolean"}, - Format: "", + Description: "max is the maximum number of NAT connections to track (0 to leave as-is). This takes precedence over maxPerCore and min.", + Type: []string{"integer"}, + Format: "int32", }, }, - "cacheTTL": { + "maxPerCore": { SchemaProps: spec.SchemaProps{ - Description: "cacheTTL enables caching of authentication results", + Description: "maxPerCore is the maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore min).", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "min": { + SchemaProps: spec.SchemaProps{ + Description: "min is the minimum value of connect-tracking records to allocate, regardless of conntrackMaxPerCore (set maxPerCore=0 to leave the limit as-is).", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "tcpEstablishedTimeout": { + SchemaProps: spec.SchemaProps{ + Description: "tcpEstablishedTimeout is how long an idle TCP connection will be kept open (e.g. '2s'). Must be greater than 0 to set.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "tcpCloseWaitTimeout": { + SchemaProps: spec.SchemaProps{ + Description: "tcpCloseWaitTimeout is how long an idle conntrack entry in CLOSE_WAIT state will remain in the conntrack table. (e.g. '60s'). Must be greater than 0 to set.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, }, - Required: []string{"enabled", "cacheTTL"}, + Required: []string{"max", "maxPerCore", "min", "tcpEstablishedTimeout", "tcpCloseWaitTimeout"}, }, }, Dependencies: []string{ "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, }, - "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1.KubeletWebhookAuthorization": { + "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1.KubeProxyIPTablesConfiguration": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ + Description: "KubeProxyIPTablesConfiguration contains iptables-related configuration details for the Kubernetes proxy server.", Properties: map[string]spec.Schema{ - "cacheAuthorizedTTL": { + "masqueradeBit": { SchemaProps: spec.SchemaProps{ - Description: "cacheAuthorizedTTL is the duration to cache 'authorized' responses from the webhook authorizer.", + Description: "masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using the pure iptables proxy mode. Values must be within the range [0, 31].", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "masqueradeAll": { + SchemaProps: spec.SchemaProps{ + Description: "masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "syncPeriod": { + SchemaProps: spec.SchemaProps{ + Description: "syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, - "cacheUnauthorizedTTL": { + "minSyncPeriod": { SchemaProps: spec.SchemaProps{ - Description: "cacheUnauthorizedTTL is the duration to cache 'unauthorized' responses from the webhook authorizer.", + Description: "minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m', '2h22m').", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, }, - Required: []string{"cacheAuthorizedTTL", "cacheUnauthorizedTTL"}, + Required: []string{"masqueradeBit", "masqueradeAll", "syncPeriod", "minSyncPeriod"}, }, }, Dependencies: []string{ "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, }, - "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1.KubeletX509Authentication": { + "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1.KubeProxyIPVSConfiguration": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ + Description: "KubeProxyIPVSConfiguration contains ipvs-related configuration details for the Kubernetes proxy server.", Properties: map[string]spec.Schema{ - "clientCAFile": { + "syncPeriod": { SchemaProps: spec.SchemaProps{ - Description: "clientCAFile is the path to a PEM-encoded certificate bundle. If set, any request presenting a client certificate signed by one of the authorities in the bundle is authenticated with a username corresponding to the CommonName, and groups corresponding to the Organization in the client certificate.", + Description: "syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "minSyncPeriod": { + SchemaProps: spec.SchemaProps{ + Description: "minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m', '2h22m').", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "scheduler": { + SchemaProps: spec.SchemaProps{ + Description: "ipvs scheduler", Type: []string{"string"}, Format: "", }, }, }, - Required: []string{"clientCAFile"}, + Required: []string{"syncPeriod", "minSyncPeriod", "scheduler"}, }, }, - Dependencies: []string{}, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, }, "k8s.io/metrics/pkg/apis/custom_metrics/v1beta1.MetricValue": { Schema: spec.Schema{ diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/BUILD b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/BUILD index fc2e3afe13ed..e72de1f9f939 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/BUILD @@ -12,8 +12,9 @@ go_library( "default_storage_factory_builder.go", "doc.go", ], + importpath = "k8s.io/kubernetes/pkg/kubeapiserver", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/storage:go_default_library", @@ -45,10 +46,11 @@ filegroup( go_test( name = "go_default_test", srcs = ["default_storage_factory_builder_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubeapiserver", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core/install:go_default_library", "//pkg/apis/extensions/install:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/BUILD b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/BUILD index 95a3eb4c3065..b3ec0cb6f9f8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/BUILD @@ -8,23 +8,23 @@ load( go_test( name = "go_default_test", - srcs = ["init_test.go"], + srcs = ["initializer_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubeapiserver/admission", library = ":go_default_library", - deps = [ - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", - ], + deps = ["//vendor/k8s.io/apiserver/pkg/admission:go_default_library"], ) go_library( name = "go_default_library", srcs = ["initializer.go"], + importpath = "k8s.io/kubernetes/pkg/kubeapiserver/admission", deps = [ "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/quota:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", ], @@ -41,7 +41,6 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//pkg/kubeapiserver/admission/configuration:all-srcs", "//pkg/kubeapiserver/admission/util:all-srcs", ], tags = ["automanaged"], diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/BUILD b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/BUILD deleted file mode 100644 index edd36ae534b4..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/BUILD +++ /dev/null @@ -1,55 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = [ - "configuration_manager_test.go", - "external_admission_hook_manager_test.go", - "initializer_manager_test.go", - ], - library = ":go_default_library", - deps = [ - "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - ], -) - -go_library( - name = "go_default_library", - srcs = [ - "configuration_manager.go", - "external_admission_hook_manager.go", - "initializer_manager.go", - ], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/external_admission_hook_manager.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/external_admission_hook_manager.go deleted file mode 100644 index 024f5fae0b92..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/external_admission_hook_manager.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package configuration - -import ( - "fmt" - "reflect" - - "github.com/golang/glog" - - "k8s.io/api/admissionregistration/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -type ExternalAdmissionHookConfigurationLister interface { - List(opts metav1.ListOptions) (*v1alpha1.ExternalAdmissionHookConfigurationList, error) -} - -type ExternalAdmissionHookConfigurationManager struct { - *poller -} - -func NewExternalAdmissionHookConfigurationManager(c ExternalAdmissionHookConfigurationLister) *ExternalAdmissionHookConfigurationManager { - getFn := func() (runtime.Object, error) { - list, err := c.List(metav1.ListOptions{}) - if err != nil { - if errors.IsNotFound(err) || errors.IsForbidden(err) { - glog.V(5).Infof("ExternalAdmissionHookConfiguration are disabled due to an error: %v", err) - return nil, ErrDisabled - } - return nil, err - } - return mergeExternalAdmissionHookConfigurations(list), nil - } - - return &ExternalAdmissionHookConfigurationManager{ - newPoller(getFn), - } -} - -// ExternalAdmissionHooks returns the merged ExternalAdmissionHookConfiguration. -func (im *ExternalAdmissionHookConfigurationManager) ExternalAdmissionHooks() (*v1alpha1.ExternalAdmissionHookConfiguration, error) { - configuration, err := im.poller.configuration() - if err != nil { - return nil, err - } - externalAdmissionHookConfiguration, ok := configuration.(*v1alpha1.ExternalAdmissionHookConfiguration) - if !ok { - return nil, fmt.Errorf("expected type %v, got type %v", reflect.TypeOf(externalAdmissionHookConfiguration), reflect.TypeOf(configuration)) - } - return externalAdmissionHookConfiguration, nil -} - -func (im *ExternalAdmissionHookConfigurationManager) Run(stopCh <-chan struct{}) { - im.poller.Run(stopCh) -} - -func mergeExternalAdmissionHookConfigurations( - list *v1alpha1.ExternalAdmissionHookConfigurationList, -) *v1alpha1.ExternalAdmissionHookConfiguration { - configurations := list.Items - var ret v1alpha1.ExternalAdmissionHookConfiguration - for _, c := range configurations { - ret.ExternalAdmissionHooks = append(ret.ExternalAdmissionHooks, c.ExternalAdmissionHooks...) - } - return &ret -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/initializer.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/initializer.go index fed03e9db2d2..0a7903227c7a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/initializer.go +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/initializer.go @@ -17,11 +17,9 @@ limitations under the License. package admission import ( - "net/http" - "net/url" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apiserver/pkg/admission" + webhookconfig "k8s.io/apiserver/pkg/admission/plugin/webhook/config" "k8s.io/apiserver/pkg/authorization/authorizer" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" @@ -34,25 +32,13 @@ import ( // WantsInternalKubeClientSet defines a function which sets ClientSet for admission plugins that need it type WantsInternalKubeClientSet interface { SetInternalKubeClientSet(internalclientset.Interface) - admission.Validator -} - -// WantsExternalKubeClientSet defines a function which sets ClientSet for admission plugins that need it -type WantsExternalKubeClientSet interface { - SetExternalKubeClientSet(clientset.Interface) - admission.Validator + admission.InitializationValidator } // WantsInternalKubeInformerFactory defines a function which sets InformerFactory for admission plugins that need it type WantsInternalKubeInformerFactory interface { SetInternalKubeInformerFactory(informers.SharedInformerFactory) - admission.Validator -} - -// WantsAuthorizer defines a function which sets Authorizer for admission plugins that need it. -type WantsAuthorizer interface { - SetAuthorizer(authorizer.Authorizer) - admission.Validator + admission.InitializationValidator } // WantsCloudConfig defines a function which sets CloudConfig for admission plugins that need it. @@ -65,50 +51,23 @@ type WantsRESTMapper interface { SetRESTMapper(meta.RESTMapper) } -// WantsQuotaRegistry defines a function which sets quota registry for admission plugins that need it. -type WantsQuotaRegistry interface { - SetQuotaRegistry(quota.Registry) - admission.Validator -} - -// WantsServiceResolver defines a fuction that accepts a ServiceResolver for -// admission plugins that need to make calls to services. -type WantsServiceResolver interface { - SetServiceResolver(ServiceResolver) -} - -// WantsClientCert defines a fuction that accepts a cert & key for admission -// plugins that need to make calls and prove their identity. -type WantsClientCert interface { - SetClientCert(cert, key []byte) -} - -// ServiceResolver knows how to convert a service reference into an actual -// location. -type ServiceResolver interface { - ResolveEndpoint(namespace, name string) (*url.URL, error) -} - -// WantsProxyTransport defines a fuction that accepts a proxy transport for admission -// plugins that need to make calls to pods. -type WantsProxyTransport interface { - SetProxyTransport(proxyTransport *http.Transport) +// WantsQuotaConfiguration defines a function which sets quota configuration for admission plugins that need it. +type WantsQuotaConfiguration interface { + SetQuotaConfiguration(quota.Configuration) + admission.InitializationValidator } +// PluginInitializer is used for initialization of the Kubernetes specific admission plugins. type PluginInitializer struct { - internalClient internalclientset.Interface - externalClient clientset.Interface - informers informers.SharedInformerFactory - authorizer authorizer.Authorizer - cloudConfig []byte - restMapper meta.RESTMapper - quotaRegistry quota.Registry - serviceResolver ServiceResolver - - // for proving we are apiserver in call-outs - clientCert []byte - clientKey []byte - proxyTransport *http.Transport + internalClient internalclientset.Interface + externalClient clientset.Interface + informers informers.SharedInformerFactory + authorizer authorizer.Authorizer + cloudConfig []byte + restMapper meta.RESTMapper + quotaConfiguration quota.Configuration + serviceResolver webhookconfig.ServiceResolver + authenticationInfoResolverWrapper webhookconfig.AuthenticationInfoResolverWrapper } var _ admission.PluginInitializer = &PluginInitializer{} @@ -118,44 +77,20 @@ var _ admission.PluginInitializer = &PluginInitializer{} // all public, this construction method is pointless boilerplate. func NewPluginInitializer( internalClient internalclientset.Interface, - externalClient clientset.Interface, sharedInformers informers.SharedInformerFactory, - authz authorizer.Authorizer, cloudConfig []byte, restMapper meta.RESTMapper, - quotaRegistry quota.Registry, + quotaConfiguration quota.Configuration, ) *PluginInitializer { return &PluginInitializer{ - internalClient: internalClient, - externalClient: externalClient, - informers: sharedInformers, - authorizer: authz, - cloudConfig: cloudConfig, - restMapper: restMapper, - quotaRegistry: quotaRegistry, + internalClient: internalClient, + informers: sharedInformers, + cloudConfig: cloudConfig, + restMapper: restMapper, + quotaConfiguration: quotaConfiguration, } } -// SetServiceResolver sets the service resolver which is needed by some plugins. -func (i *PluginInitializer) SetServiceResolver(s ServiceResolver) *PluginInitializer { - i.serviceResolver = s - return i -} - -// SetClientCert sets the client cert & key (identity used for calling out to -// web hooks) which is needed by some plugins. -func (i *PluginInitializer) SetClientCert(cert, key []byte) *PluginInitializer { - i.clientCert = cert - i.clientKey = key - return i -} - -// SetProxyTransport sets the proxyTransport which is needed by some plugins. -func (i *PluginInitializer) SetProxyTransport(proxyTransport *http.Transport) *PluginInitializer { - i.proxyTransport = proxyTransport - return i -} - // Initialize checks the initialization interfaces implemented by each plugin // and provide the appropriate initialization data func (i *PluginInitializer) Initialize(plugin admission.Interface) { @@ -163,18 +98,10 @@ func (i *PluginInitializer) Initialize(plugin admission.Interface) { wants.SetInternalKubeClientSet(i.internalClient) } - if wants, ok := plugin.(WantsExternalKubeClientSet); ok { - wants.SetExternalKubeClientSet(i.externalClient) - } - if wants, ok := plugin.(WantsInternalKubeInformerFactory); ok { wants.SetInternalKubeInformerFactory(i.informers) } - if wants, ok := plugin.(WantsAuthorizer); ok { - wants.SetAuthorizer(i.authorizer) - } - if wants, ok := plugin.(WantsCloudConfig); ok { wants.SetCloudConfig(i.cloudConfig) } @@ -183,25 +110,7 @@ func (i *PluginInitializer) Initialize(plugin admission.Interface) { wants.SetRESTMapper(i.restMapper) } - if wants, ok := plugin.(WantsQuotaRegistry); ok { - wants.SetQuotaRegistry(i.quotaRegistry) - } - - if wants, ok := plugin.(WantsServiceResolver); ok { - if i.serviceResolver == nil { - panic("An admission plugin wants the service resolver, but it was not provided.") - } - wants.SetServiceResolver(i.serviceResolver) - } - - if wants, ok := plugin.(WantsClientCert); ok { - if i.clientCert == nil || i.clientKey == nil { - panic("An admission plugin wants a client cert/key, but they were not provided.") - } - wants.SetClientCert(i.clientCert, i.clientKey) - } - - if wants, ok := plugin.(WantsProxyTransport); ok { - wants.SetProxyTransport(i.proxyTransport) + if wants, ok := plugin.(WantsQuotaConfiguration); ok { + wants.SetQuotaConfiguration(i.quotaConfiguration) } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/util/BUILD b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/util/BUILD index 33a6ab2bde66..9cd2c1fd397a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/util/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -10,7 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["initializer.go"], - tags = ["automanaged"], + importpath = "k8s.io/kubernetes/pkg/kubeapiserver/admission/util", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/initialization:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authenticator/BUILD b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authenticator/BUILD index 744763e47537..44c8ae56b794 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authenticator/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authenticator/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["config.go"], + importpath = "k8s.io/kubernetes/pkg/kubeapiserver/authenticator", deps = [ "//pkg/serviceaccount:go_default_library", "//vendor/github.com/go-openapi/spec:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authenticator/config.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authenticator/config.go index b1212ce67a54..d47ec98d534e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authenticator/config.go +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authenticator/config.go @@ -235,7 +235,7 @@ func newAuthenticatorFromBasicAuthFile(basicAuthFile string) (authenticator.Requ return basicauth.New(basicAuthenticator), nil } -// newAuthenticatorFromTokenFile returns an authenticator.Request or an error +// newAuthenticatorFromTokenFile returns an authenticator.Token or an error func newAuthenticatorFromTokenFile(tokenAuthFile string) (authenticator.Token, error) { tokenAuthenticator, err := tokenfile.NewCSV(tokenAuthFile) if err != nil { @@ -245,7 +245,7 @@ func newAuthenticatorFromTokenFile(tokenAuthFile string) (authenticator.Token, e return tokenAuthenticator, nil } -// newAuthenticatorFromOIDCIssuerURL returns an authenticator.Request or an error. +// newAuthenticatorFromOIDCIssuerURL returns an authenticator.Token or an error. func newAuthenticatorFromOIDCIssuerURL(issuerURL, clientID, caFile, usernameClaim, usernamePrefix, groupsClaim, groupsPrefix string) (authenticator.Token, error) { const noUsernamePrefix = "-" @@ -278,7 +278,7 @@ func newAuthenticatorFromOIDCIssuerURL(issuerURL, clientID, caFile, usernameClai return tokenAuthenticator, nil } -// newServiceAccountAuthenticator returns an authenticator.Request or an error +// newServiceAccountAuthenticator returns an authenticator.Token or an error func newServiceAccountAuthenticator(keyfiles []string, lookup bool, serviceAccountGetter serviceaccount.ServiceAccountTokenGetter) (authenticator.Token, error) { allPublicKeys := []interface{}{} for _, keyfile := range keyfiles { diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/BUILD b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/BUILD index a2864f4014a2..5d5acebfb764 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/BUILD @@ -12,6 +12,7 @@ go_test( data = [ "//pkg/auth/authorizer/abac:example_policy", ], + importpath = "k8s.io/kubernetes/pkg/kubeapiserver/authorizer", library = ":go_default_library", deps = ["//pkg/kubeapiserver/authorizer/modes:go_default_library"], ) @@ -19,6 +20,7 @@ go_test( go_library( name = "go_default_library", srcs = ["config.go"], + importpath = "k8s.io/kubernetes/pkg/kubeapiserver/authorizer", deps = [ "//pkg/auth/authorizer/abac:go_default_library", "//pkg/auth/nodeidentifier:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes/BUILD b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes/BUILD index 3d6e3406c13f..9be3d02c9a8a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes/BUILD @@ -9,12 +9,14 @@ load( go_test( name = "go_default_test", srcs = ["modes_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes", library = ":go_default_library", ) go_library( name = "go_default_library", srcs = ["modes.go"], + importpath = "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes", ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/default_storage_factory_builder.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/default_storage_factory_builder.go index 2a9fcf9ba792..ca1a3d35b331 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/default_storage_factory_builder.go +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/default_storage_factory_builder.go @@ -26,9 +26,20 @@ import ( serverstorage "k8s.io/apiserver/pkg/server/storage" "k8s.io/apiserver/pkg/storage/storagebackend" utilflag "k8s.io/apiserver/pkg/util/flag" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" ) +// specialDefaultResourcePrefixes are prefixes compiled into Kubernetes. +var specialDefaultResourcePrefixes = map[schema.GroupResource]string{ + {Group: "", Resource: "replicationControllers"}: "controllers", + {Group: "", Resource: "replicationcontrollers"}: "controllers", + {Group: "", Resource: "endpoints"}: "services/endpoints", + {Group: "", Resource: "nodes"}: "minions", + {Group: "", Resource: "services"}: "services/specs", + {Group: "extensions", Resource: "ingresses"}: "ingress", + {Group: "extensions", Resource: "podsecuritypolicies"}: "podsecuritypolicy", +} + // NewStorageFactory builds the DefaultStorageFactory. // Merges defaultResourceConfig with the user specified overrides and merges // defaultAPIResourceConfig with the corresponding user specified overrides as well. @@ -42,7 +53,7 @@ func NewStorageFactory(storageConfig storagebackend.Config, defaultMediaType str if err != nil { return nil, err } - return serverstorage.NewDefaultStorageFactory(storageConfig, defaultMediaType, serializer, resourceEncodingConfig, apiResourceConfig), nil + return serverstorage.NewDefaultStorageFactory(storageConfig, defaultMediaType, serializer, resourceEncodingConfig, apiResourceConfig, specialDefaultResourcePrefixes), nil } // Merges the given defaultResourceConfig with specifc GroupvVersionResource overrides. @@ -74,9 +85,9 @@ func mergeAPIResourceConfigs(defaultAPIResourceConfig *serverstorage.ResourceCon if ok { if allAPIFlagValue == "false" { // Disable all group versions. - resourceConfig.DisableVersions(api.Registry.RegisteredGroupVersions()...) + resourceConfig.DisableVersions(legacyscheme.Registry.RegisteredGroupVersions()...) } else if allAPIFlagValue == "true" { - resourceConfig.EnableVersions(api.Registry.RegisteredGroupVersions()...) + resourceConfig.EnableVersions(legacyscheme.Registry.RegisteredGroupVersions()...) } } @@ -110,8 +121,8 @@ func mergeAPIResourceConfigs(defaultAPIResourceConfig *serverstorage.ResourceCon if err != nil { return nil, fmt.Errorf("invalid key %s", key) } - // Verify that the groupVersion is api.Registry. - if !api.Registry.IsRegisteredVersion(groupVersion) { + // Verify that the groupVersion is legacyscheme.Registry. + if !legacyscheme.Registry.IsRegisteredVersion(groupVersion) { return nil, fmt.Errorf("group version %s that has not been registered", groupVersion.String()) } enabled, err := getRuntimeConfigValue(overrides, key, false) @@ -142,8 +153,8 @@ func mergeAPIResourceConfigs(defaultAPIResourceConfig *serverstorage.ResourceCon return nil, fmt.Errorf("invalid key %s", key) } resource := tokens[2] - // Verify that the groupVersion is api.Registry. - if !api.Registry.IsRegisteredVersion(groupVersion) { + // Verify that the groupVersion is legacyscheme.Registry. + if !legacyscheme.Registry.IsRegisteredVersion(groupVersion) { return nil, fmt.Errorf("group version %s that has not been registered", groupVersion.String()) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/BUILD b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/BUILD index 83d1a3793bfb..4c8d3d517cd6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/BUILD @@ -13,11 +13,13 @@ go_library( "authentication.go", "authorization.go", "cloudprovider.go", + "options.go", "serving.go", "storage_versions.go", ], + importpath = "k8s.io/kubernetes/pkg/kubeapiserver/options", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/kubeapiserver/authenticator:go_default_library", @@ -52,6 +54,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["storage_versions_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubeapiserver/options", library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/authentication.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/authentication.go index 3acf11d97673..6a6b579f2153 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/authentication.go +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/authentication.go @@ -180,10 +180,6 @@ func (s *BuiltInAuthenticationOptions) AddFlags(fs *pflag.FlagSet) { } if s.BootstrapToken != nil { - fs.BoolVar(&s.BootstrapToken.Enable, "experimental-bootstrap-token-auth", s.BootstrapToken.Enable, ""+ - "Deprecated (use --enable-bootstrap-token-auth).") - fs.MarkDeprecated("experimental-bootstrap-token-auth", "use --enable-bootstrap-token-auth instead.") - fs.BoolVar(&s.BootstrapToken.Enable, "enable-bootstrap-token-auth", s.BootstrapToken.Enable, ""+ "Enable to allow secrets of type 'bootstrap.kubernetes.io/token' in the 'kube-system' "+ "namespace to be used for TLS bootstrapping authentication.") @@ -298,8 +294,10 @@ func (s *BuiltInAuthenticationOptions) ToAuthenticationConfig() authenticator.Au ret.OIDCCAFile = s.OIDC.CAFile ret.OIDCClientID = s.OIDC.ClientID ret.OIDCGroupsClaim = s.OIDC.GroupsClaim + ret.OIDCGroupsPrefix = s.OIDC.GroupsPrefix ret.OIDCIssuerURL = s.OIDC.IssuerURL ret.OIDCUsernameClaim = s.OIDC.UsernameClaim + ret.OIDCUsernamePrefix = s.OIDC.UsernamePrefix } if s.PasswordFile != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/cloudprovider.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/cloudprovider.go index f1e599762426..310acb592ced 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/cloudprovider.go +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/cloudprovider.go @@ -81,6 +81,8 @@ func (s *CloudProviderOptions) DefaultExternalHost(genericoptions *genericoption for _, addr := range addrs { if addr.Type == v1.NodeExternalIP { genericoptions.ExternalHost = addr.Address + glog.Warning("[Deprecated] Getting host address using cloud provider is " + + "now deprecated. Please use --external-hostname explicitly") } } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/options.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/options.go new file mode 100644 index 000000000000..acc1c91f9f22 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/options.go @@ -0,0 +1,24 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + utilnet "k8s.io/apimachinery/pkg/util/net" +) + +// DefaultServiceNodePortRange is the default port range for NodePort services. +var DefaultServiceNodePortRange = utilnet.PortRange{Base: 30000, Size: 2768} diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/storage_versions.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/storage_versions.go index d466900431aa..bac8725605e1 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/storage_versions.go +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/options/storage_versions.go @@ -20,7 +20,7 @@ import ( "strings" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "github.com/spf13/pflag" ) @@ -40,8 +40,8 @@ type StorageSerializationOptions struct { func NewStorageSerializationOptions() *StorageSerializationOptions { return &StorageSerializationOptions{ - DefaultStorageVersions: api.Registry.AllPreferredGroupVersions(), - StorageVersions: api.Registry.AllPreferredGroupVersions(), + DefaultStorageVersions: legacyscheme.Registry.AllPreferredGroupVersions(), + StorageVersions: legacyscheme.Registry.AllPreferredGroupVersions(), } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/server/BUILD b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/server/BUILD index cbbc801b4758..2c7cf9a245e8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/server/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/server/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["insecure_handler.go"], + importpath = "k8s.io/kubernetes/pkg/kubeapiserver/server", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", @@ -16,6 +17,7 @@ go_library( "//vendor/k8s.io/apiserver/pkg/features:go_default_library", "//vendor/k8s.io/apiserver/pkg/server:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/filters:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server/options:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/server/insecure_handler.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/server/insecure_handler.go index 8186a0ac7abc..c3a3921f50a2 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/server/insecure_handler.go +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/server/insecure_handler.go @@ -19,6 +19,7 @@ package server import ( "net" "net/http" + "time" "github.com/golang/glog" @@ -28,6 +29,7 @@ import ( "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/server" genericfilters "k8s.io/apiserver/pkg/server/filters" + "k8s.io/apiserver/pkg/server/options" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/rest" ) @@ -45,11 +47,12 @@ func BuildInsecureHandlerChain(apiHandler http.Handler, c *server.Config) http.H } handler = genericapifilters.WithAuthentication(handler, c.RequestContextMapper, insecureSuperuser{}, nil) handler = genericfilters.WithCORS(handler, c.CorsAllowedOriginList, nil, nil, nil, "true") - handler = genericfilters.WithPanicRecovery(handler) handler = genericfilters.WithTimeoutForNonLongRunningRequests(handler, c.RequestContextMapper, c.LongRunningFunc, c.RequestTimeout) handler = genericfilters.WithMaxInFlightLimit(handler, c.MaxRequestsInFlight, c.MaxMutatingRequestsInFlight, c.RequestContextMapper, c.LongRunningFunc) + handler = genericfilters.WithWaitGroup(handler, c.RequestContextMapper, c.LongRunningFunc, c.HandlerChainWaitGroup) handler = genericapifilters.WithRequestInfo(handler, server.NewRequestInfoResolver(c), c.RequestContextMapper) handler = apirequest.WithRequestContext(handler, c.RequestContextMapper) + handler = genericfilters.WithPanicRecovery(handler) return handler } @@ -84,12 +87,12 @@ func (s *InsecureServingInfo) NewLoopbackClientConfig(token string) (*rest.Confi // NonBlockingRun spawns the insecure http server. An error is // returned if the ports cannot be listened on. -func NonBlockingRun(insecureServingInfo *InsecureServingInfo, insecureHandler http.Handler, stopCh <-chan struct{}) error { +func NonBlockingRun(insecureServingInfo *InsecureServingInfo, insecureHandler http.Handler, shutDownTimeout time.Duration, stopCh <-chan struct{}) error { // Use an internal stop channel to allow cleanup of the listeners on error. internalStopCh := make(chan struct{}) if insecureServingInfo != nil && insecureHandler != nil { - if err := serveInsecurely(insecureServingInfo, insecureHandler, internalStopCh); err != nil { + if err := serveInsecurely(insecureServingInfo, insecureHandler, shutDownTimeout, internalStopCh); err != nil { close(internalStopCh) return err } @@ -109,15 +112,18 @@ func NonBlockingRun(insecureServingInfo *InsecureServingInfo, insecureHandler ht // serveInsecurely run the insecure http server. It fails only if the initial listen // call fails. The actual server loop (stoppable by closing stopCh) runs in a go // routine, i.e. serveInsecurely does not block. -func serveInsecurely(insecureServingInfo *InsecureServingInfo, insecureHandler http.Handler, stopCh <-chan struct{}) error { +func serveInsecurely(insecureServingInfo *InsecureServingInfo, insecureHandler http.Handler, shutDownTimeout time.Duration, stopCh <-chan struct{}) error { insecureServer := &http.Server{ Addr: insecureServingInfo.BindAddress, Handler: insecureHandler, MaxHeaderBytes: 1 << 20, } glog.Infof("Serving insecurely on %s", insecureServingInfo.BindAddress) - var err error - _, err = server.RunServer(insecureServer, insecureServingInfo.BindNetwork, stopCh) + ln, _, err := options.CreateListener(insecureServingInfo.BindNetwork, insecureServingInfo.BindAddress) + if err != nil { + return err + } + err = server.RunServer(insecureServer, ln, shutDownTimeout, stopCh) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/.import-restrictions b/vendor/k8s.io/kubernetes/pkg/kubectl/.import-restrictions new file mode 100644 index 000000000000..875f997dd52b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/.import-restrictions @@ -0,0 +1,150 @@ +{ + "Rules": [{ + "SelectorRegexp": "k8s[.]io/kubernetes/pkg", + "AllowedPrefixes": [ + "k8s.io/kubernetes/pkg/api", + "k8s.io/kubernetes/pkg/api/events", + "k8s.io/kubernetes/pkg/api/legacyscheme", + "k8s.io/kubernetes/pkg/api/pod", + "k8s.io/kubernetes/pkg/api/ref", + "k8s.io/kubernetes/pkg/api/resource", + "k8s.io/kubernetes/pkg/api/service", + "k8s.io/kubernetes/pkg/api/v1/pod", + "k8s.io/kubernetes/pkg/api/v1/service", + "k8s.io/kubernetes/pkg/apis/admissionregistration", + "k8s.io/kubernetes/pkg/apis/admissionregistration/install", + "k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1", + "k8s.io/kubernetes/pkg/apis/apps", + "k8s.io/kubernetes/pkg/apis/apps/install", + "k8s.io/kubernetes/pkg/apis/apps/v1", + "k8s.io/kubernetes/pkg/apis/apps/v1beta1", + "k8s.io/kubernetes/pkg/apis/apps/v1beta2", + "k8s.io/kubernetes/pkg/apis/authentication", + "k8s.io/kubernetes/pkg/apis/authentication/install", + "k8s.io/kubernetes/pkg/apis/authentication/v1", + "k8s.io/kubernetes/pkg/apis/authentication/v1beta1", + "k8s.io/kubernetes/pkg/apis/authorization", + "k8s.io/kubernetes/pkg/apis/authorization/install", + "k8s.io/kubernetes/pkg/apis/authorization/v1", + "k8s.io/kubernetes/pkg/apis/authorization/v1beta1", + "k8s.io/kubernetes/pkg/apis/autoscaling", + "k8s.io/kubernetes/pkg/apis/autoscaling/install", + "k8s.io/kubernetes/pkg/apis/autoscaling/v1", + "k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1", + "k8s.io/kubernetes/pkg/apis/batch", + "k8s.io/kubernetes/pkg/apis/batch/install", + "k8s.io/kubernetes/pkg/apis/batch/v1", + "k8s.io/kubernetes/pkg/apis/batch/v1beta1", + "k8s.io/kubernetes/pkg/apis/batch/v2alpha1", + "k8s.io/kubernetes/pkg/apis/certificates", + "k8s.io/kubernetes/pkg/apis/certificates/install", + "k8s.io/kubernetes/pkg/apis/certificates/v1beta1", + "k8s.io/kubernetes/pkg/apis/componentconfig", + "k8s.io/kubernetes/pkg/apis/componentconfig/install", + "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1", + "k8s.io/kubernetes/pkg/apis/core", + "k8s.io/kubernetes/pkg/apis/core/helper", + "k8s.io/kubernetes/pkg/apis/core/helper/qos", + "k8s.io/kubernetes/pkg/apis/core/install", + "k8s.io/kubernetes/pkg/apis/core/v1", + "k8s.io/kubernetes/pkg/apis/core/v1/helper", + "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos", + "k8s.io/kubernetes/pkg/apis/core/validation", + "k8s.io/kubernetes/pkg/apis/extensions", + "k8s.io/kubernetes/pkg/apis/extensions/install", + "k8s.io/kubernetes/pkg/apis/extensions/v1beta1", + "k8s.io/kubernetes/pkg/apis/networking", + "k8s.io/kubernetes/pkg/apis/networking/install", + "k8s.io/kubernetes/pkg/apis/networking/v1", + "k8s.io/kubernetes/pkg/apis/policy", + "k8s.io/kubernetes/pkg/apis/policy/install", + "k8s.io/kubernetes/pkg/apis/policy/v1beta1", + "k8s.io/kubernetes/pkg/apis/rbac", + "k8s.io/kubernetes/pkg/apis/rbac/install", + "k8s.io/kubernetes/pkg/apis/rbac/v1", + "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1", + "k8s.io/kubernetes/pkg/apis/rbac/v1beta1", + "k8s.io/kubernetes/pkg/apis/scheduling", + "k8s.io/kubernetes/pkg/apis/scheduling/install", + "k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1", + "k8s.io/kubernetes/pkg/apis/settings", + "k8s.io/kubernetes/pkg/apis/settings/install", + "k8s.io/kubernetes/pkg/apis/settings/v1alpha1", + "k8s.io/kubernetes/pkg/apis/storage", + "k8s.io/kubernetes/pkg/apis/storage/install", + "k8s.io/kubernetes/pkg/apis/storage/util", + "k8s.io/kubernetes/pkg/apis/storage/v1", + "k8s.io/kubernetes/pkg/apis/storage/v1beta1", + "k8s.io/kubernetes/pkg/capabilities", + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset", + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme", + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion", + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion", + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion", + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion", + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion", + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion", + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion", + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion", + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion", + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion", + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion", + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion", + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion", + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion", + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion", + "k8s.io/kubernetes/pkg/client/metrics/prometheus", + "k8s.io/kubernetes/pkg/client/unversioned", + "k8s.io/kubernetes/pkg/cloudprovider", + "k8s.io/kubernetes/pkg/cloudprovider/providers/aws", + "k8s.io/kubernetes/pkg/controller", + "k8s.io/kubernetes/pkg/controller/daemon", + "k8s.io/kubernetes/pkg/controller/daemon/util", + "k8s.io/kubernetes/pkg/controller/deployment/util", + "k8s.io/kubernetes/pkg/controller/history", + "k8s.io/kubernetes/pkg/controller/statefulset", + "k8s.io/kubernetes/pkg/credentialprovider", + "k8s.io/kubernetes/pkg/credentialprovider/aws", + "k8s.io/kubernetes/pkg/features", + "k8s.io/kubernetes/pkg/fieldpath", + "k8s.io/kubernetes/pkg/generated", + "k8s.io/kubernetes/pkg/kubectl", + "k8s.io/kubernetes/pkg/kubelet/apis", + "k8s.io/kubernetes/pkg/kubelet/qos", + "k8s.io/kubernetes/pkg/kubelet/types", + "k8s.io/kubernetes/pkg/master/ports", + "k8s.io/kubernetes/pkg/printers", + "k8s.io/kubernetes/pkg/printers/internalversion", + "k8s.io/kubernetes/pkg/registry/rbac/reconciliation", + "k8s.io/kubernetes/pkg/registry/rbac/validation", + "k8s.io/kubernetes/pkg/security/apparmor", + "k8s.io/kubernetes/pkg/serviceaccount", + "k8s.io/kubernetes/pkg/util/file", + "k8s.io/kubernetes/pkg/util/goroutinemap", + "k8s.io/kubernetes/pkg/util/hash", + "k8s.io/kubernetes/pkg/util/interrupt", + "k8s.io/kubernetes/pkg/util/io", + "k8s.io/kubernetes/pkg/util/labels", + "k8s.io/kubernetes/pkg/util/metrics", + "k8s.io/kubernetes/pkg/util/mount", + "k8s.io/kubernetes/pkg/util/net/sets", + "k8s.io/kubernetes/pkg/util/node", + "k8s.io/kubernetes/pkg/util/nsenter", + "k8s.io/kubernetes/pkg/util/parsers", + "k8s.io/kubernetes/pkg/util/pointer", + "k8s.io/kubernetes/pkg/util/slice", + "k8s.io/kubernetes/pkg/util/taints", + "k8s.io/kubernetes/pkg/version", + "k8s.io/kubernetes/pkg/version/prometheus", + "k8s.io/kubernetes/pkg/volume", + "k8s.io/kubernetes/pkg/volume/util", + "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm", + "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates", + "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util", + "k8s.io/kubernetes/plugin/pkg/scheduler/api", + "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache", + "k8s.io/kubernetes/plugin/pkg/scheduler/util" + ], + "ForbiddenPrefixes": [] + }] +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD index 6813564059cf..fd1a9876c1aa 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD @@ -9,13 +9,17 @@ load( go_test( name = "go_default_test", srcs = [ - "cluster_test.go", + "autoscale_test.go", + "clusterrolebinding_test.go", "configmap_test.go", "delete_test.go", + "deployment_test.go", "env_file_test.go", "generate_test.go", - "kubectl_test.go", + "history_test.go", "namespace_test.go", + "pdb_test.go", + "priorityclass_test.go", "quota_test.go", "resource_filter_test.go", "rolebinding_test.go", @@ -31,16 +35,15 @@ go_test( "serviceaccount_test.go", "sorting_printer_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubectl", library = ":go_default_library", deps = [ - "//federation/apis/federation/v1beta1:go_default_library", - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", - "//pkg/apis/apps:go_default_library", "//pkg/apis/batch:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", - "//pkg/apis/rbac:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", @@ -50,11 +53,16 @@ go_test( "//pkg/printers:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", + "//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/batch/v1beta1:go_default_library", "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/api/policy/v1beta1:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", + "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", + "//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -66,6 +74,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/rest/fake:go_default_library", "//vendor/k8s.io/client-go/testing:go_default_library", @@ -79,20 +88,19 @@ go_library( "apply.go", "autoscale.go", "bash_comp_utils.go", - "cluster.go", "clusterrolebinding.go", "configmap.go", "delete.go", "deployment.go", "doc.go", "env_file.go", - "explain.go", "generate.go", "history.go", "interfaces.go", "kubectl.go", "namespace.go", "pdb.go", + "priorityclass.go", "quota.go", "resource_filter.go", "rolebinding.go", @@ -110,18 +118,15 @@ go_library( "sorting_printer.go", "versioned_client.go", ], + importpath = "k8s.io/kubernetes/pkg/kubectl", deps = [ - "//federation/apis/federation/v1beta1:go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/util:go_default_library", - "//pkg/api/v1:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/v1/pod:go_default_library", "//pkg/apis/apps:go_default_library", - "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/batch:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/v1:go_default_library", "//pkg/apis/extensions:go_default_library", - "//pkg/apis/policy:go_default_library", - "//pkg/apis/rbac:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/apps/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", @@ -132,22 +137,27 @@ go_library( "//pkg/controller/deployment/util:go_default_library", "//pkg/controller/statefulset:go_default_library", "//pkg/credentialprovider:go_default_library", + "//pkg/kubectl/apps:go_default_library", "//pkg/kubectl/resource:go_default_library", "//pkg/kubectl/util:go_default_library", "//pkg/kubectl/util/hash:go_default_library", "//pkg/kubectl/util/slice:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", - "//vendor/github.com/emicklei/go-restful-swagger12:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", + "//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/batch/v1beta1:go_default_library", "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/api/policy/v1beta1:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", + "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", + "//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -167,6 +177,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", @@ -188,12 +199,16 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//pkg/kubectl/apply:all-srcs", "//pkg/kubectl/apps:all-srcs", + "//pkg/kubectl/categories:all-srcs", "//pkg/kubectl/cmd:all-srcs", + "//pkg/kubectl/explain:all-srcs", "//pkg/kubectl/metricsutil:all-srcs", "//pkg/kubectl/plugins:all-srcs", "//pkg/kubectl/proxy:all-srcs", "//pkg/kubectl/resource:all-srcs", + "//pkg/kubectl/scheme:all-srcs", "//pkg/kubectl/testing:all-srcs", "//pkg/kubectl/util:all-srcs", "//pkg/kubectl/validation:all-srcs", diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go index ba7488895298..fc716369a13b 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go @@ -19,7 +19,7 @@ package kubectl import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/kubectl/resource" ) @@ -72,59 +72,33 @@ func GetModifiedConfiguration(info *resource.Info, annotate bool, codec runtime. // First serialize the object without the annotation to prevent recursion, // then add that serialization to it as the annotation and serialize it again. var modified []byte - if info.VersionedObject != nil { - // If an object was read from input, use that version. - accessor, err := meta.Accessor(info.VersionedObject) - if err != nil { - return nil, err - } - - // Get the current annotations from the object. - annots := accessor.GetAnnotations() - if annots == nil { - annots = map[string]string{} - } - original := annots[api.LastAppliedConfigAnnotation] - delete(annots, api.LastAppliedConfigAnnotation) - accessor.SetAnnotations(annots) - // TODO: this needs to be abstracted - there should be no assumption that versioned object - // can be marshalled to JSON. - modified, err = runtime.Encode(codec, info.VersionedObject) - if err != nil { - return nil, err - } + // Otherwise, use the server side version of the object. + accessor := info.Mapping.MetadataAccessor + // Get the current annotations from the object. + annots, err := accessor.Annotations(info.Object) + if err != nil { + return nil, err + } - if annotate { - annots[api.LastAppliedConfigAnnotation] = string(modified) - accessor.SetAnnotations(annots) - // TODO: this needs to be abstracted - there should be no assumption that versioned object - // can be marshalled to JSON. - modified, err = runtime.Encode(codec, info.VersionedObject) - if err != nil { - return nil, err - } - } + if annots == nil { + annots = map[string]string{} + } - // Restore the object to its original condition. - annots[api.LastAppliedConfigAnnotation] = original - accessor.SetAnnotations(annots) - } else { - // Otherwise, use the server side version of the object. - accessor := info.Mapping.MetadataAccessor - // Get the current annotations from the object. - annots, err := accessor.Annotations(info.Object) - if err != nil { - return nil, err - } + original := annots[api.LastAppliedConfigAnnotation] + delete(annots, api.LastAppliedConfigAnnotation) + if err := accessor.SetAnnotations(info.Object, annots); err != nil { + return nil, err + } - if annots == nil { - annots = map[string]string{} - } + modified, err = runtime.Encode(codec, info.Object) + if err != nil { + return nil, err + } - original := annots[api.LastAppliedConfigAnnotation] - delete(annots, api.LastAppliedConfigAnnotation) - if err := accessor.SetAnnotations(info.Object, annots); err != nil { + if annotate { + annots[api.LastAppliedConfigAnnotation] = string(modified) + if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annots); err != nil { return nil, err } @@ -132,24 +106,12 @@ func GetModifiedConfiguration(info *resource.Info, annotate bool, codec runtime. if err != nil { return nil, err } + } - if annotate { - annots[api.LastAppliedConfigAnnotation] = string(modified) - if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annots); err != nil { - return nil, err - } - - modified, err = runtime.Encode(codec, info.Object) - if err != nil { - return nil, err - } - } - - // Restore the object to its original condition. - annots[api.LastAppliedConfigAnnotation] = original - if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annots); err != nil { - return nil, err - } + // Restore the object to its original condition. + annots[api.LastAppliedConfigAnnotation] = original + if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annots); err != nil { + return nil, err } return modified, nil diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apps/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/apps/BUILD new file mode 100644 index 000000000000..e60af52bd6f0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/apps/BUILD @@ -0,0 +1,43 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = ["kind_visitor.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/apps", + deps = ["//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +go_test( + name = "go_default_xtest", + srcs = [ + "apps_suite_test.go", + "kind_visitor_test.go", + ], + importpath = "k8s.io/kubernetes/pkg/kubectl/apps_test", + deps = [ + ":go_default_library", + "//vendor/github.com/onsi/ginkgo:go_default_library", + "//vendor/github.com/onsi/ginkgo/config:go_default_library", + "//vendor/github.com/onsi/ginkgo/types:go_default_library", + "//vendor/github.com/onsi/gomega:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apps/kind_visitor.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apps/kind_visitor.go new file mode 100644 index 000000000000..9c3ad07fe7e1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/apps/kind_visitor.go @@ -0,0 +1,89 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apps + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// KindVisitor is used with GroupKindElement to call a particular function depending on the +// Kind of a schema.GroupKind +type KindVisitor interface { + VisitDaemonSet(kind GroupKindElement) + VisitDeployment(kind GroupKindElement) + VisitJob(kind GroupKindElement) + VisitPod(kind GroupKindElement) + VisitReplicaSet(kind GroupKindElement) + VisitReplicationController(kind GroupKindElement) + VisitStatefulSet(kind GroupKindElement) + VisitCronJob(kind GroupKindElement) +} + +// GroupKindElement defines a Kubernetes API group elem +type GroupKindElement schema.GroupKind + +// Accept calls the Visit method on visitor that corresponds to elem's Kind +func (elem GroupKindElement) Accept(visitor KindVisitor) error { + switch { + case elem.GroupMatch("apps", "extensions") && elem.Kind == "DaemonSet": + visitor.VisitDaemonSet(elem) + case elem.GroupMatch("apps", "extensions") && elem.Kind == "Deployment": + visitor.VisitDeployment(elem) + case elem.GroupMatch("batch") && elem.Kind == "Job": + visitor.VisitJob(elem) + case elem.GroupMatch("", "core") && elem.Kind == "Pod": + visitor.VisitPod(elem) + case elem.GroupMatch("apps", "extensions") && elem.Kind == "ReplicaSet": + visitor.VisitReplicaSet(elem) + case elem.GroupMatch("", "core") && elem.Kind == "ReplicationController": + visitor.VisitReplicationController(elem) + case elem.GroupMatch("apps") && elem.Kind == "StatefulSet": + visitor.VisitStatefulSet(elem) + case elem.GroupMatch("batch") && elem.Kind == "CronJob": + visitor.VisitCronJob(elem) + default: + return fmt.Errorf("no visitor method exists for %v", elem) + } + return nil +} + +// GroupMatch returns true if and only if elem's group matches one +// of the group arguments +func (elem GroupKindElement) GroupMatch(groups ...string) bool { + for _, g := range groups { + if elem.Group == g { + return true + } + } + return false +} + +// NoOpKindVisitor implements KindVisitor with no-op functions. +type NoOpKindVisitor struct{} + +var _ KindVisitor = &NoOpKindVisitor{} + +func (*NoOpKindVisitor) VisitDaemonSet(kind GroupKindElement) {} +func (*NoOpKindVisitor) VisitDeployment(kind GroupKindElement) {} +func (*NoOpKindVisitor) VisitJob(kind GroupKindElement) {} +func (*NoOpKindVisitor) VisitPod(kind GroupKindElement) {} +func (*NoOpKindVisitor) VisitReplicaSet(kind GroupKindElement) {} +func (*NoOpKindVisitor) VisitReplicationController(kind GroupKindElement) {} +func (*NoOpKindVisitor) VisitStatefulSet(kind GroupKindElement) {} +func (*NoOpKindVisitor) VisitCronJob(kind GroupKindElement) {} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go b/vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go index eacef9bbf76b..dcbfadde84ae 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go @@ -20,10 +20,9 @@ import ( "fmt" "strconv" + autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/autoscaling" ) type HorizontalPodAutoscalerV1 struct{} @@ -91,12 +90,12 @@ func generateHPA(genericParams map[string]interface{}) (runtime.Object, error) { } } - scaler := autoscaling.HorizontalPodAutoscaler{ + scaler := autoscalingv1.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, - Spec: autoscaling.HorizontalPodAutoscalerSpec{ - ScaleTargetRef: autoscaling.CrossVersionObjectReference{ + Spec: autoscalingv1.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{ Kind: params["scaleRef-kind"], Name: params["scaleRef-name"], APIVersion: params["scaleRef-apiVersion"], @@ -110,15 +109,7 @@ func generateHPA(genericParams map[string]interface{}) (runtime.Object, error) { } if cpu >= 0 { c := int32(cpu) - scaler.Spec.Metrics = []autoscaling.MetricSpec{ - { - Type: autoscaling.ResourceMetricSourceType, - Resource: &autoscaling.ResourceMetricSource{ - Name: api.ResourceCPU, - TargetAverageUtilization: &c, - }, - }, - } + scaler.Spec.TargetCPUUtilizationPercentage = &c } return &scaler, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/categories/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/categories/BUILD new file mode 100644 index 000000000000..d26c196dd582 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/categories/BUILD @@ -0,0 +1,42 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["categories.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/categories", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/client-go/discovery:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["categories_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/categories", + library = ":go_default_library", + deps = [ + "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/version:go_default_library", + "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/rest/fake:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/categories.go b/vendor/k8s.io/kubernetes/pkg/kubectl/categories/categories.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/kubectl/resource/categories.go rename to vendor/k8s.io/kubernetes/pkg/kubectl/categories/categories.go index bee08a05c530..4a669599af71 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/categories.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/categories/categories.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package resource +package categories import ( "k8s.io/apimachinery/pkg/runtime/schema" @@ -179,10 +179,3 @@ var LegacyCategoryExpander CategoryExpander = SimpleCategoryExpander{ "all": legacyUserResources, }, } - -// LegacyFederationCategoryExpander is the old hardcoded expansion for federation -var LegacyFederationCategoryExpander CategoryExpander = SimpleCategoryExpander{ - Expansions: map[string][]schema.GroupResource{ - "all": {{Group: "", Resource: "services"}}, - }, -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cluster.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cluster.go deleted file mode 100644 index ffac838b4bbc..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cluster.go +++ /dev/null @@ -1,159 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1" -) - -const ( - ServiceAccountNameAnnotation = "federation.kubernetes.io/servive-account-name" - ClusterRoleNameAnnotation = "federation.kubernetes.io/cluster-role-name" -) - -// ClusterGeneratorV1Beta1 supports stable generation of a -// federation/cluster resource. -type ClusterGeneratorV1Beta1 struct { - // Name of the cluster context (required) - Name string - // ClientCIDR is the CIDR range in which the Kubernetes APIServer - // is available for the client (optional) - ClientCIDR string - // ServerAddress is the APIServer address of the Kubernetes cluster - // that is being registered (required) - ServerAddress string - // SecretName is the name of the secret that stores the credentials - // for the Kubernetes cluster that is being registered (optional) - SecretName string - // ServiceAccountName is the name of the service account that is - // created in the cluster being registered. If this is provided, - // then ClusterRoleName must also be provided (optional) - ServiceAccountName string - // ClusterRoleName is the name of the cluster role and cluster role - // binding that are created in the cluster being registered. If this - // is provided, then ServiceAccountName must also be provided - // (optional) - ClusterRoleName string -} - -// Ensure it supports the generator pattern that uses parameter -// injection. -var _ Generator = &ClusterGeneratorV1Beta1{} - -// Ensure it supports the generator pattern that uses parameters -// specified during construction. -var _ StructuredGenerator = &ClusterGeneratorV1Beta1{} - -// Generate returns a cluster resource using the specified parameters. -func (s ClusterGeneratorV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) - if err != nil { - return nil, err - } - clustergen := &ClusterGeneratorV1Beta1{} - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - clustergen.Name = params["name"] - clustergen.ClientCIDR = params["client-cidr"] - clustergen.ServerAddress = params["server-address"] - clustergen.SecretName = params["secret"] - clustergen.ServiceAccountName = params["service-account-name"] - clustergen.ClusterRoleName = params["cluster-role-name"] - return clustergen.StructuredGenerate() -} - -// ParamNames returns the set of supported input parameters when using -// the parameter injection generator pattern. -func (s ClusterGeneratorV1Beta1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"client-cidr", false}, - {"server-address", true}, - {"secret", false}, - {"service-account-name", false}, - {"cluster-role-name", false}, - } -} - -// StructuredGenerate outputs a federation cluster resource object -// using the configured fields. -func (s ClusterGeneratorV1Beta1) StructuredGenerate() (runtime.Object, error) { - if err := s.validate(); err != nil { - return nil, err - } - if s.ClientCIDR == "" { - s.ClientCIDR = "0.0.0.0/0" - } - if s.SecretName == "" { - s.SecretName = s.Name - } - cluster := &federationapi.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: s.Name, - }, - Spec: federationapi.ClusterSpec{ - ServerAddressByClientCIDRs: []federationapi.ServerAddressByClientCIDR{ - { - ClientCIDR: s.ClientCIDR, - ServerAddress: s.ServerAddress, - }, - }, - SecretRef: &v1.LocalObjectReference{ - Name: s.SecretName, - }, - }, - } - - annotations := make(map[string]string) - if s.ServiceAccountName != "" { - annotations[ServiceAccountNameAnnotation] = s.ServiceAccountName - } - if s.ClusterRoleName != "" { - annotations[ClusterRoleNameAnnotation] = s.ClusterRoleName - } - if len(annotations) == 1 { - return nil, fmt.Errorf("Either both or neither of ServiceAccountName and ClusterRoleName must be provided.") - } - if len(annotations) > 0 { - cluster.SetAnnotations(annotations) - } - - return cluster, nil -} - -// validate validates required fields are set to support structured -// generation. -func (s ClusterGeneratorV1Beta1) validate() error { - if len(s.Name) == 0 { - return fmt.Errorf("name must be specified") - } - if len(s.ServerAddress) == 0 { - return fmt.Errorf("server address must be specified") - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/clusterrolebinding.go b/vendor/k8s.io/kubernetes/pkg/kubectl/clusterrolebinding.go index b19ca40754c9..d94656069117 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/clusterrolebinding.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/clusterrolebinding.go @@ -21,9 +21,9 @@ import ( "strings" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/apis/rbac" ) // ClusterRoleBindingGeneratorV1 supports stable generation of a clusterRoleBinding. @@ -109,24 +109,24 @@ func (s ClusterRoleBindingGeneratorV1) StructuredGenerate() (runtime.Object, err if err := s.validate(); err != nil { return nil, err } - clusterRoleBinding := &rbac.ClusterRoleBinding{} + clusterRoleBinding := &rbacv1beta1.ClusterRoleBinding{} clusterRoleBinding.Name = s.Name - clusterRoleBinding.RoleRef = rbac.RoleRef{ - APIGroup: rbac.GroupName, + clusterRoleBinding.RoleRef = rbacv1beta1.RoleRef{ + APIGroup: rbacv1beta1.GroupName, Kind: "ClusterRole", Name: s.ClusterRole, } for _, user := range sets.NewString(s.Users...).List() { - clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, rbac.Subject{ - Kind: rbac.UserKind, - APIGroup: rbac.GroupName, + clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, rbacv1beta1.Subject{ + Kind: rbacv1beta1.UserKind, + APIGroup: rbacv1beta1.GroupName, Name: user, }) } for _, group := range sets.NewString(s.Groups...).List() { - clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, rbac.Subject{ - Kind: rbac.GroupKind, - APIGroup: rbac.GroupName, + clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, rbacv1beta1.Subject{ + Kind: rbacv1beta1.GroupKind, + APIGroup: rbacv1beta1.GroupName, Name: group, }) } @@ -135,8 +135,8 @@ func (s ClusterRoleBindingGeneratorV1) StructuredGenerate() (runtime.Object, err if len(tokens) != 2 || tokens[0] == "" || tokens[1] == "" { return nil, fmt.Errorf("serviceaccount must be :") } - clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, rbac.Subject{ - Kind: rbac.ServiceAccountKind, + clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, rbacv1beta1.Subject{ + Kind: rbacv1beta1.ServiceAccountKind, APIGroup: "", Namespace: tokens[0], Name: tokens[1], diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/BUILD new file mode 100644 index 000000000000..11e3615799a7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/BUILD @@ -0,0 +1,41 @@ +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "command_groups.go", + "markdown.go", + "normalizers.go", + "templater.go", + "templates.go", + ], + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/templates", + visibility = [ + "//build/visible_to:pkg_kubectl_cmd_templates_CONSUMERS", + ], + deps = [ + "//pkg/kubectl/util/term:go_default_library", + "//vendor/github.com/MakeNowJust/heredoc:go_default_library", + "//vendor/github.com/russross/blackfriday:go_default_library", + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = [ + "//build/visible_to:pkg_kubectl_cmd_templates_CONSUMERS", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/command_groups.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/command_groups.go new file mode 100644 index 000000000000..447a39621f7e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/command_groups.go @@ -0,0 +1,59 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import ( + "github.com/spf13/cobra" +) + +type CommandGroup struct { + Message string + Commands []*cobra.Command +} + +type CommandGroups []CommandGroup + +func (g CommandGroups) Add(c *cobra.Command) { + for _, group := range g { + c.AddCommand(group.Commands...) + } +} + +func (g CommandGroups) Has(c *cobra.Command) bool { + for _, group := range g { + for _, command := range group.Commands { + if command == c { + return true + } + } + } + return false +} + +func AddAdditionalCommands(g CommandGroups, message string, cmds []*cobra.Command) CommandGroups { + group := CommandGroup{Message: message} + for _, c := range cmds { + // Don't show commands that have no short description + if !g.Has(c) && len(c.Short) != 0 { + group.Commands = append(group.Commands, c) + } + } + if len(group.Commands) == 0 { + return g + } + return append(g, group) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/markdown.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/markdown.go new file mode 100644 index 000000000000..1a19f89fd4ce --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/markdown.go @@ -0,0 +1,145 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import ( + "bytes" + "fmt" + "strings" + + "github.com/russross/blackfriday" +) + +const linebreak = "\n" + +// ASCIIRenderer implements blackfriday.Renderer +var _ blackfriday.Renderer = &ASCIIRenderer{} + +// ASCIIRenderer is a blackfriday.Renderer intended for rendering markdown +// documents as plain text, well suited for human reading on terminals. +type ASCIIRenderer struct { + Indentation string + + listItemCount uint + listLevel uint +} + +// NormalText gets a text chunk *after* the markdown syntax was already +// processed and does a final cleanup on things we don't expect here, like +// removing linebreaks on things that are not a paragraph break (auto unwrap). +func (r *ASCIIRenderer) NormalText(out *bytes.Buffer, text []byte) { + raw := string(text) + lines := strings.Split(raw, linebreak) + for _, line := range lines { + trimmed := strings.Trim(line, " \n\t") + out.WriteString(trimmed) + out.WriteString(" ") + } +} + +// List renders the start and end of a list. +func (r *ASCIIRenderer) List(out *bytes.Buffer, text func() bool, flags int) { + r.listLevel++ + out.WriteString(linebreak) + text() + r.listLevel-- +} + +// ListItem renders list items and supports both ordered and unordered lists. +func (r *ASCIIRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) { + if flags&blackfriday.LIST_ITEM_BEGINNING_OF_LIST != 0 { + r.listItemCount = 1 + } else { + r.listItemCount++ + } + indent := strings.Repeat(r.Indentation, int(r.listLevel)) + var bullet string + if flags&blackfriday.LIST_TYPE_ORDERED != 0 { + bullet += fmt.Sprintf("%d.", r.listItemCount) + } else { + bullet += "*" + } + out.WriteString(indent + bullet + " ") + r.fw(out, text) + out.WriteString(linebreak) +} + +// Paragraph renders the start and end of a paragraph. +func (r *ASCIIRenderer) Paragraph(out *bytes.Buffer, text func() bool) { + out.WriteString(linebreak) + text() + out.WriteString(linebreak) +} + +// BlockCode renders a chunk of text that represents source code. +func (r *ASCIIRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) { + out.WriteString(linebreak) + lines := []string{} + for _, line := range strings.Split(string(text), linebreak) { + indented := r.Indentation + line + lines = append(lines, indented) + } + out.WriteString(strings.Join(lines, linebreak)) +} + +func (r *ASCIIRenderer) GetFlags() int { return 0 } +func (r *ASCIIRenderer) HRule(out *bytes.Buffer) { + out.WriteString(linebreak + "----------" + linebreak) +} +func (r *ASCIIRenderer) LineBreak(out *bytes.Buffer) { out.WriteString(linebreak) } +func (r *ASCIIRenderer) TitleBlock(out *bytes.Buffer, text []byte) { r.fw(out, text) } +func (r *ASCIIRenderer) Header(out *bytes.Buffer, text func() bool, level int, id string) { text() } +func (r *ASCIIRenderer) BlockHtml(out *bytes.Buffer, text []byte) { r.fw(out, text) } +func (r *ASCIIRenderer) BlockQuote(out *bytes.Buffer, text []byte) { r.fw(out, text) } +func (r *ASCIIRenderer) TableRow(out *bytes.Buffer, text []byte) { r.fw(out, text) } +func (r *ASCIIRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) { r.fw(out, text) } +func (r *ASCIIRenderer) TableCell(out *bytes.Buffer, text []byte, align int) { r.fw(out, text) } +func (r *ASCIIRenderer) Footnotes(out *bytes.Buffer, text func() bool) { text() } +func (r *ASCIIRenderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) { r.fw(out, text) } +func (r *ASCIIRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int) { r.fw(out, link) } +func (r *ASCIIRenderer) CodeSpan(out *bytes.Buffer, text []byte) { r.fw(out, text) } +func (r *ASCIIRenderer) DoubleEmphasis(out *bytes.Buffer, text []byte) { r.fw(out, text) } +func (r *ASCIIRenderer) Emphasis(out *bytes.Buffer, text []byte) { r.fw(out, text) } +func (r *ASCIIRenderer) RawHtmlTag(out *bytes.Buffer, text []byte) { r.fw(out, text) } +func (r *ASCIIRenderer) TripleEmphasis(out *bytes.Buffer, text []byte) { r.fw(out, text) } +func (r *ASCIIRenderer) StrikeThrough(out *bytes.Buffer, text []byte) { r.fw(out, text) } +func (r *ASCIIRenderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) { r.fw(out, ref) } +func (r *ASCIIRenderer) Entity(out *bytes.Buffer, entity []byte) { r.fw(out, entity) } +func (r *ASCIIRenderer) Smartypants(out *bytes.Buffer, text []byte) { r.fw(out, text) } +func (r *ASCIIRenderer) DocumentHeader(out *bytes.Buffer) {} +func (r *ASCIIRenderer) DocumentFooter(out *bytes.Buffer) {} +func (r *ASCIIRenderer) TocHeaderWithAnchor(text []byte, level int, anchor string) {} +func (r *ASCIIRenderer) TocHeader(text []byte, level int) {} +func (r *ASCIIRenderer) TocFinalize() {} + +func (r *ASCIIRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) { + r.fw(out, header, body) +} + +func (r *ASCIIRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) { + r.fw(out, link) +} + +func (r *ASCIIRenderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) { + r.fw(out, link) +} + +func (r *ASCIIRenderer) fw(out *bytes.Buffer, text ...[]byte) { + for _, t := range text { + out.Write(t) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/normalizers.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/normalizers.go new file mode 100644 index 000000000000..db7b17a7d6d2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/normalizers.go @@ -0,0 +1,97 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import ( + "strings" + + "github.com/MakeNowJust/heredoc" + "github.com/russross/blackfriday" + "github.com/spf13/cobra" +) + +const Indentation = ` ` + +// LongDesc normalizes a command's long description to follow the conventions. +func LongDesc(s string) string { + if len(s) == 0 { + return s + } + return normalizer{s}.heredoc().markdown().trim().string +} + +// Examples normalizes a command's examples to follow the conventions. +func Examples(s string) string { + if len(s) == 0 { + return s + } + return normalizer{s}.trim().indent().string +} + +// Normalize perform all required normalizations on a given command. +func Normalize(cmd *cobra.Command) *cobra.Command { + if len(cmd.Long) > 0 { + cmd.Long = LongDesc(cmd.Long) + } + if len(cmd.Example) > 0 { + cmd.Example = Examples(cmd.Example) + } + return cmd +} + +// NormalizeAll perform all required normalizations in the entire command tree. +func NormalizeAll(cmd *cobra.Command) *cobra.Command { + if cmd.HasSubCommands() { + for _, subCmd := range cmd.Commands() { + NormalizeAll(subCmd) + } + } + Normalize(cmd) + return cmd +} + +type normalizer struct { + string +} + +func (s normalizer) markdown() normalizer { + bytes := []byte(s.string) + formatted := blackfriday.Markdown(bytes, &ASCIIRenderer{Indentation: Indentation}, 0) + s.string = string(formatted) + return s +} + +func (s normalizer) heredoc() normalizer { + s.string = heredoc.Doc(s.string) + return s +} + +func (s normalizer) trim() normalizer { + s.string = strings.TrimSpace(s.string) + return s +} + +func (s normalizer) indent() normalizer { + indentedLines := []string{} + for _, line := range strings.Split(s.string, "\n") { + trimmed := strings.TrimSpace(line) + indented := Indentation + trimmed + indentedLines = append(indentedLines, indented) + } + s.string = strings.Join(indentedLines, "\n") + return s +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/templater.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/templater.go new file mode 100644 index 000000000000..27b76dc42b7c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/templater.go @@ -0,0 +1,293 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import ( + "bytes" + "fmt" + "strings" + "text/template" + "unicode" + + "k8s.io/kubernetes/pkg/kubectl/util/term" + + "github.com/spf13/cobra" + flag "github.com/spf13/pflag" +) + +type FlagExposer interface { + ExposeFlags(cmd *cobra.Command, flags ...string) FlagExposer +} + +func ActsAsRootCommand(cmd *cobra.Command, filters []string, groups ...CommandGroup) FlagExposer { + if cmd == nil { + panic("nil root command") + } + templater := &templater{ + RootCmd: cmd, + UsageTemplate: MainUsageTemplate(), + HelpTemplate: MainHelpTemplate(), + CommandGroups: groups, + Filtered: filters, + } + cmd.SetUsageFunc(templater.UsageFunc()) + cmd.SetHelpFunc(templater.HelpFunc()) + return templater +} + +func UseOptionsTemplates(cmd *cobra.Command) { + templater := &templater{ + UsageTemplate: OptionsUsageTemplate(), + HelpTemplate: OptionsHelpTemplate(), + } + cmd.SetUsageFunc(templater.UsageFunc()) + cmd.SetHelpFunc(templater.HelpFunc()) +} + +type templater struct { + UsageTemplate string + HelpTemplate string + RootCmd *cobra.Command + CommandGroups + Filtered []string +} + +func (templater *templater) ExposeFlags(cmd *cobra.Command, flags ...string) FlagExposer { + cmd.SetUsageFunc(templater.UsageFunc(flags...)) + return templater +} + +func (templater *templater) HelpFunc() func(*cobra.Command, []string) { + return func(c *cobra.Command, s []string) { + t := template.New("help") + t.Funcs(templater.templateFuncs()) + template.Must(t.Parse(templater.HelpTemplate)) + out := term.NewResponsiveWriter(c.OutOrStdout()) + err := t.Execute(out, c) + if err != nil { + c.Println(err) + } + } +} + +func (templater *templater) UsageFunc(exposedFlags ...string) func(*cobra.Command) error { + return func(c *cobra.Command) error { + t := template.New("usage") + t.Funcs(templater.templateFuncs(exposedFlags...)) + template.Must(t.Parse(templater.UsageTemplate)) + out := term.NewResponsiveWriter(c.OutOrStderr()) + return t.Execute(out, c) + } +} + +func (templater *templater) templateFuncs(exposedFlags ...string) template.FuncMap { + return template.FuncMap{ + "trim": strings.TrimSpace, + "trimRight": func(s string) string { return strings.TrimRightFunc(s, unicode.IsSpace) }, + "trimLeft": func(s string) string { return strings.TrimLeftFunc(s, unicode.IsSpace) }, + "gt": cobra.Gt, + "eq": cobra.Eq, + "rpad": rpad, + "appendIfNotPresent": appendIfNotPresent, + "flagsNotIntersected": flagsNotIntersected, + "visibleFlags": visibleFlags, + "flagsUsages": flagsUsages, + "cmdGroups": templater.cmdGroups, + "cmdGroupsString": templater.cmdGroupsString, + "rootCmd": templater.rootCmdName, + "isRootCmd": templater.isRootCmd, + "optionsCmdFor": templater.optionsCmdFor, + "usageLine": templater.usageLine, + "exposed": func(c *cobra.Command) *flag.FlagSet { + exposed := flag.NewFlagSet("exposed", flag.ContinueOnError) + if len(exposedFlags) > 0 { + for _, name := range exposedFlags { + if flag := c.Flags().Lookup(name); flag != nil { + exposed.AddFlag(flag) + } + } + } + return exposed + }, + } +} + +func (templater *templater) cmdGroups(c *cobra.Command, all []*cobra.Command) []CommandGroup { + if len(templater.CommandGroups) > 0 && c == templater.RootCmd { + all = filter(all, templater.Filtered...) + return AddAdditionalCommands(templater.CommandGroups, "Other Commands:", all) + } + all = filter(all, "options") + return []CommandGroup{ + { + Message: "Available Commands:", + Commands: all, + }, + } +} + +func (t *templater) cmdGroupsString(c *cobra.Command) string { + groups := []string{} + for _, cmdGroup := range t.cmdGroups(c, c.Commands()) { + cmds := []string{cmdGroup.Message} + for _, cmd := range cmdGroup.Commands { + if cmd.Runnable() { + cmds = append(cmds, " "+rpad(cmd.Name(), cmd.NamePadding())+" "+cmd.Short) + } + } + groups = append(groups, strings.Join(cmds, "\n")) + } + return strings.Join(groups, "\n\n") +} + +func (t *templater) rootCmdName(c *cobra.Command) string { + return t.rootCmd(c).CommandPath() +} + +func (t *templater) isRootCmd(c *cobra.Command) bool { + return t.rootCmd(c) == c +} + +func (t *templater) parents(c *cobra.Command) []*cobra.Command { + parents := []*cobra.Command{c} + for current := c; !t.isRootCmd(current) && current.HasParent(); { + current = current.Parent() + parents = append(parents, current) + } + return parents +} + +func (t *templater) rootCmd(c *cobra.Command) *cobra.Command { + if c != nil && !c.HasParent() { + return c + } + if t.RootCmd == nil { + panic("nil root cmd") + } + return t.RootCmd +} + +func (t *templater) optionsCmdFor(c *cobra.Command) string { + if !c.Runnable() { + return "" + } + rootCmdStructure := t.parents(c) + for i := len(rootCmdStructure) - 1; i >= 0; i-- { + cmd := rootCmdStructure[i] + if _, _, err := cmd.Find([]string{"options"}); err == nil { + return cmd.CommandPath() + " options" + } + } + return "" +} + +func (t *templater) usageLine(c *cobra.Command) string { + usage := c.UseLine() + suffix := "[options]" + if c.HasFlags() && !strings.Contains(usage, suffix) { + usage += " " + suffix + } + return usage +} + +func flagsUsages(f *flag.FlagSet) string { + x := new(bytes.Buffer) + + f.VisitAll(func(flag *flag.Flag) { + if flag.Hidden { + return + } + format := "--%s=%s: %s\n" + + if flag.Value.Type() == "string" { + format = "--%s='%s': %s\n" + } + + if len(flag.Shorthand) > 0 { + format = " -%s, " + format + } else { + format = " %s " + format + } + + fmt.Fprintf(x, format, flag.Shorthand, flag.Name, flag.DefValue, flag.Usage) + }) + + return x.String() +} + +func rpad(s string, padding int) string { + template := fmt.Sprintf("%%-%ds", padding) + return fmt.Sprintf(template, s) +} + +func indentLines(s string, indentation int) string { + r := []string{} + for _, line := range strings.Split(s, "\n") { + indented := strings.Repeat(" ", indentation) + line + r = append(r, indented) + } + return strings.Join(r, "\n") +} + +func appendIfNotPresent(s, stringToAppend string) string { + if strings.Contains(s, stringToAppend) { + return s + } + return s + " " + stringToAppend +} + +func flagsNotIntersected(l *flag.FlagSet, r *flag.FlagSet) *flag.FlagSet { + f := flag.NewFlagSet("notIntersected", flag.ContinueOnError) + l.VisitAll(func(flag *flag.Flag) { + if r.Lookup(flag.Name) == nil { + f.AddFlag(flag) + } + }) + return f +} + +func visibleFlags(l *flag.FlagSet) *flag.FlagSet { + hidden := "help" + f := flag.NewFlagSet("visible", flag.ContinueOnError) + l.VisitAll(func(flag *flag.Flag) { + if flag.Name != hidden { + f.AddFlag(flag) + } + }) + return f +} + +func filter(cmds []*cobra.Command, names ...string) []*cobra.Command { + out := []*cobra.Command{} + for _, c := range cmds { + if c.Hidden { + continue + } + skip := false + for _, name := range names { + if name == c.Name() { + skip = true + break + } + } + if skip { + continue + } + out = append(out, c) + } + return out +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/templates.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/templates.go new file mode 100644 index 000000000000..c40908f9f609 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/templates/templates.go @@ -0,0 +1,102 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import ( + "strings" + "unicode" +) + +const ( + // SectionVars is the help template section that declares variables to be used in the template. + SectionVars = `{{$isRootCmd := isRootCmd .}}` + + `{{$rootCmd := rootCmd .}}` + + `{{$visibleFlags := visibleFlags (flagsNotIntersected .LocalFlags .PersistentFlags)}}` + + `{{$explicitlyExposedFlags := exposed .}}` + + `{{$optionsCmdFor := optionsCmdFor .}}` + + `{{$usageLine := usageLine .}}` + + // SectionAliases is the help template section that displays command aliases. + SectionAliases = `{{if gt .Aliases 0}}Aliases: +{{.NameAndAliases}} + +{{end}}` + + // SectionExamples is the help template section that displays command examples. + SectionExamples = `{{if .HasExample}}Examples: +{{trimRight .Example}} + +{{end}}` + + // SectionSubcommands is the help template section that displays the command's subcommands. + SectionSubcommands = `{{if .HasAvailableSubCommands}}{{cmdGroupsString .}} + +{{end}}` + + // SectionFlags is the help template section that displays the command's flags. + SectionFlags = `{{ if or $visibleFlags.HasFlags $explicitlyExposedFlags.HasFlags}}Options: +{{ if $visibleFlags.HasFlags}}{{trimRight (flagsUsages $visibleFlags)}}{{end}}{{ if $explicitlyExposedFlags.HasFlags}}{{trimRight (flagsUsages $explicitlyExposedFlags)}}{{end}} + +{{end}}` + + // SectionUsage is the help template section that displays the command's usage. + SectionUsage = `{{if and .Runnable (ne .UseLine "") (ne .UseLine $rootCmd)}}Usage: + {{$usageLine}} + +{{end}}` + + // SectionTipsHelp is the help template section that displays the '--help' hint. + SectionTipsHelp = `{{if .HasSubCommands}}Use "{{$rootCmd}} --help" for more information about a given command. +{{end}}` + + // SectionTipsGlobalOptions is the help template section that displays the 'options' hint for displaying global flags. + SectionTipsGlobalOptions = `{{if $optionsCmdFor}}Use "{{$optionsCmdFor}}" for a list of global command-line options (applies to all commands). +{{end}}` +) + +// MainHelpTemplate if the template for 'help' used by most commands. +func MainHelpTemplate() string { + return `{{with or .Long .Short }}{{. | trim}}{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` +} + +// MainUsageTemplate if the template for 'usage' used by most commands. +func MainUsageTemplate() string { + sections := []string{ + "\n\n", + SectionVars, + SectionAliases, + SectionExamples, + SectionSubcommands, + SectionFlags, + SectionUsage, + SectionTipsHelp, + SectionTipsGlobalOptions, + } + return strings.TrimRightFunc(strings.Join(sections, ""), unicode.IsSpace) +} + +// OptionsHelpTemplate if the template for 'help' used by the 'options' command. +func OptionsHelpTemplate() string { + return "" +} + +// OptionsUsageTemplate if the template for 'usage' used by the 'options' command. +func OptionsUsageTemplate() string { + return `{{ if .HasInheritedFlags}}The following options can be passed to any command: + +{{flagsUsages .InheritedFlags}}{{end}}` +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD index 180988c990d2..bf458175abbb 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD @@ -17,37 +17,45 @@ go_library( "printing.go", "shortcut_restmapper.go", ], + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util", visibility = [ "//build/visible_to:pkg_kubectl_cmd_util_CONSUMERS", ], deps = [ - "//federation/apis/federation:go_default_library", - "//federation/client/clientset_generated/federation_clientset:go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/apps:go_default_library", "//pkg/apis/batch:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/v1:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", "//pkg/client/unversioned:go_default_library", "//pkg/controller:go_default_library", "//pkg/kubectl:go_default_library", + "//pkg/kubectl/categories:go_default_library", + "//pkg/kubectl/cmd/templates:go_default_library", "//pkg/kubectl/cmd/util/openapi:go_default_library", "//pkg/kubectl/cmd/util/openapi/validation:go_default_library", "//pkg/kubectl/plugins:go_default_library", "//pkg/kubectl/resource:go_default_library", + "//pkg/kubectl/scheme:go_default_library", "//pkg/kubectl/validation:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", "//pkg/version:go_default_library", - "//vendor/github.com/emicklei/go-restful-swagger12:go_default_library", "//vendor/github.com/evanphx/json-patch:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", + "//vendor/k8s.io/api/apps/v1beta1:go_default_library", + "//vendor/k8s.io/api/apps/v1beta2:go_default_library", + "//vendor/k8s.io/api/batch/v1:go_default_library", + "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -55,7 +63,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", @@ -87,24 +94,25 @@ go_test( data = [ "//api/swagger-spec", ], + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util", library = ":go_default_library", visibility = [ "//build/visible_to:COMMON_testing", ], deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", "//pkg/apis/apps:go_default_library", "//pkg/apis/batch:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", "//pkg/controller:go_default_library", "//pkg/kubectl:go_default_library", + "//pkg/kubectl/categories:go_default_library", "//pkg/kubectl/resource:go_default_library", - "//pkg/kubectl/validation:go_default_library", - "//vendor/github.com/emicklei/go-restful-swagger12:go_default_library", "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/cached_discovery.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/cached_discovery.go index e48e479eaf0c..0224ebbe9be0 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/cached_discovery.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/cached_discovery.go @@ -24,17 +24,15 @@ import ( "sync" "time" - "github.com/emicklei/go-restful-swagger12" "github.com/golang/glog" "github.com/googleapis/gnostic/OpenAPIv2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/version" "k8s.io/client-go/discovery" restclient "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/kubectl/scheme" ) // CachedDiscoveryClient implements the functions that discovery server-supported API groups, @@ -68,8 +66,8 @@ func (d *CachedDiscoveryClient) ServerResourcesForGroupVersion(groupVersion stri // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. if err == nil { cachedResources := &metav1.APIResourceList{} - if err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), cachedBytes, cachedResources); err == nil { - glog.V(6).Infof("returning cached discovery info from %v", filename) + if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedResources); err == nil { + glog.V(10).Infof("returning cached discovery info from %v", filename) return cachedResources, nil } } @@ -115,8 +113,8 @@ func (d *CachedDiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. if err == nil { cachedGroups := &metav1.APIGroupList{} - if err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), cachedBytes, cachedGroups); err == nil { - glog.V(6).Infof("returning cached discovery info from %v", filename) + if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedGroups); err == nil { + glog.V(10).Infof("returning cached discovery info from %v", filename) return cachedGroups, nil } } @@ -181,7 +179,7 @@ func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Obj return err } - bytes, err := runtime.Encode(api.Codecs.LegacyCodec(), obj) + bytes, err := runtime.Encode(scheme.Codecs.LegacyCodec(), obj) if err != nil { return err } @@ -233,10 +231,6 @@ func (d *CachedDiscoveryClient) ServerVersion() (*version.Info, error) { return d.delegate.ServerVersion() } -func (d *CachedDiscoveryClient) SwaggerSchema(version schema.GroupVersion) (*swagger.ApiDeclaration, error) { - return d.delegate.SwaggerSchema(version) -} - func (d *CachedDiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { return d.delegate.OpenAPISchema() } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/clientcache.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/clientcache.go index 5312cb3095c7..124216d2e8f2 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/clientcache.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/clientcache.go @@ -24,7 +24,6 @@ import ( "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" oldclient "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/version" @@ -34,7 +33,6 @@ func NewClientCache(loader clientcmd.ClientConfig, discoveryClientFactory Discov return &ClientCache{ clientsets: make(map[schema.GroupVersion]internalclientset.Interface), configs: make(map[schema.GroupVersion]*restclient.Config), - fedClientSets: make(map[schema.GroupVersion]fedclientset.Interface), loader: loader, discoveryClientFactory: discoveryClientFactory, } @@ -43,10 +41,9 @@ func NewClientCache(loader clientcmd.ClientConfig, discoveryClientFactory Discov // ClientCache caches previously loaded clients for reuse, and ensures MatchServerVersion // is invoked only once type ClientCache struct { - loader clientcmd.ClientConfig - clientsets map[schema.GroupVersion]internalclientset.Interface - fedClientSets map[schema.GroupVersion]fedclientset.Interface - configs map[schema.GroupVersion]*restclient.Config + loader clientcmd.ClientConfig + clientsets map[schema.GroupVersion]internalclientset.Interface + configs map[schema.GroupVersion]*restclient.Config // noVersionConfig provides a cached config for the case of no required version specified noVersionConfig *restclient.Config @@ -211,51 +208,3 @@ func (c *ClientCache) ClientSetForVersion(requiredVersion *schema.GroupVersion) return clientset, nil } - -func (c *ClientCache) FederationClientSetForVersion(version *schema.GroupVersion) (fedclientset.Interface, error) { - c.lock.Lock() - defer c.lock.Unlock() - - return c.federationClientSetForVersion(version) -} - -func (c *ClientCache) federationClientSetForVersion(version *schema.GroupVersion) (fedclientset.Interface, error) { - if version != nil { - if clientSet, found := c.fedClientSets[*version]; found { - return clientSet, nil - } - } - config, err := c.clientConfigForVersion(version) - if err != nil { - return nil, err - } - - // TODO: support multi versions of client with clientset - clientSet, err := fedclientset.NewForConfig(config) - if err != nil { - return nil, err - } - c.fedClientSets[*config.GroupVersion] = clientSet - - if version != nil { - configCopy := *config - clientSet, err := fedclientset.NewForConfig(&configCopy) - if err != nil { - return nil, err - } - c.fedClientSets[*version] = clientSet - } - - return clientSet, nil -} - -func (c *ClientCache) FederationClientForVersion(version *schema.GroupVersion) (*restclient.RESTClient, error) { - c.lock.Lock() - defer c.lock.Unlock() - - fedClientSet, err := c.federationClientSetForVersion(version) - if err != nil { - return nil, err - } - return fedClientSet.Federation().RESTClient().(*restclient.RESTClient), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go index 992916dc5e49..f4c0799596f9 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go @@ -17,41 +17,32 @@ limitations under the License. package util import ( - "bytes" "fmt" "io" - "io/ioutil" - "os" - "os/user" - "path" "sort" "strconv" "strings" "time" - swagger "github.com/emicklei/go-restful-swagger12" - "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/pflag" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer/json" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" - "k8s.io/kubernetes/pkg/api" - apiv1 "k8s.io/kubernetes/pkg/api/v1" + api "k8s.io/kubernetes/pkg/apis/core" + apiv1 "k8s.io/kubernetes/pkg/apis/core/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/kubectl" + "k8s.io/kubernetes/pkg/kubectl/categories" "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" "k8s.io/kubernetes/pkg/kubectl/plugins" "k8s.io/kubernetes/pkg/kubectl/resource" @@ -112,11 +103,6 @@ type ClientAccessFactory interface { // just directions to the server. People use this to build RESTMappers on top of BareClientConfig() (*restclient.Config, error) - // TODO this should probably be removed and collapsed into whatever we want to use long term - // probably returning a restclient for a version and leaving contruction up to someone else - FederationClientSetForVersion(version *schema.GroupVersion) (fedclientset.Interface, error) - // TODO remove this should be rolled into restclient with the right version - FederationClientForVersion(version *schema.GroupVersion) (*restclient.RESTClient, error) // TODO remove. This should be rolled into `ClientSet` ClientSetForVersion(requiredVersion *schema.GroupVersion) (internalclientset.Interface, error) // TODO remove. This should be rolled into `ClientConfig` @@ -131,7 +117,7 @@ type ClientAccessFactory interface { // UpdatePodSpecForObject will call the provided function on the pod spec this object supports, // return false if no pod spec is supported, or return an error. - UpdatePodSpecForObject(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) + UpdatePodSpecForObject(obj runtime.Object, fn func(*v1.PodSpec) error) (bool, error) // MapBasedSelectorForObject returns the map-based selector associated with the provided object. If a // new set-based selector is provided, an error is returned if the selector cannot be converted to a @@ -154,8 +140,6 @@ type ClientAccessFactory interface { // BindExternalFlags adds any flags defined by external projects (not part of pflags) BindExternalFlags(flags *pflag.FlagSet) - // TODO: Break the dependency on cmd here. - DefaultResourceFilterOptions(cmd *cobra.Command, withNamespace bool) *printers.PrintOptions // DefaultResourceFilterFunc returns a collection of FilterFuncs suitable for filtering specific resource types. DefaultResourceFilterFunc() kubectl.Filters @@ -203,11 +187,8 @@ type ClientAccessFactory interface { type ObjectMappingFactory interface { // Returns interfaces for dealing with arbitrary runtime.Objects. Object() (meta.RESTMapper, runtime.ObjectTyper) - // Returns interfaces for dealing with arbitrary - // runtime.Unstructured. This performs API calls to discover types. - UnstructuredObject() (meta.RESTMapper, runtime.ObjectTyper, error) // Returns interface for expanding categories like `all`. - CategoryExpander() resource.CategoryExpander + CategoryExpander() categories.CategoryExpander // Returns a RESTClient for working with the specified RESTMapping or an error. This is intended // for working with arbitrary resources and is not guaranteed to point to a Kubernetes APIServer. ClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) @@ -232,10 +213,13 @@ type ObjectMappingFactory interface { // AttachablePodForObject returns the pod to which to attach given an object. AttachablePodForObject(object runtime.Object, timeout time.Duration) (*api.Pod, error) + // ApproximatePodTemplateForObject returns a pod template object for the provided source. + // It may return both an error and a object. It attempt to return the best possible template + // available at the current time. + ApproximatePodTemplateForObject(runtime.Object) (*api.PodTemplateSpec, error) + // Returns a schema that can validate objects stored on disk. - Validator(validate bool, openapi bool, cacheDir string) (validation.Schema, error) - // SwaggerSchema returns the schema declaration for the provided group version kind. - SwaggerSchema(schema.GroupVersionKind) (*swagger.ApiDeclaration, error) + Validator(validate bool) (validation.Schema, error) // OpenAPISchema returns the schema openapi schema definiton OpenAPISchema() (openapi.Resources, error) } @@ -246,19 +230,24 @@ type BuilderFactory interface { // PrinterForCommand returns the default printer for the command. It requires that certain options // are declared on the command (see AddPrinterFlags). Returns a printer, or an error if a printer // could not be found. - // TODO: Break the dependency on cmd here. - PrinterForCommand(cmd *cobra.Command, isLocal bool, outputOpts *printers.OutputOptions, options printers.PrintOptions) (printers.ResourcePrinter, error) + PrinterForOptions(options *printers.PrintOptions) (printers.ResourcePrinter, error) // PrinterForMapping returns a printer suitable for displaying the provided resource type. // Requires that printer flags have been added to cmd (see AddPrinterFlags). // Returns a printer, true if the printer is generic (is not internal), or // an error if a printer could not be found. - PrinterForMapping(cmd *cobra.Command, isLocal bool, outputOpts *printers.OutputOptions, mapping *meta.RESTMapping, withNamespace bool) (printers.ResourcePrinter, error) + PrinterForMapping(options *printers.PrintOptions, mapping *meta.RESTMapping) (printers.ResourcePrinter, error) // PrintObject prints an api object given command line flags to modify the output format PrintObject(cmd *cobra.Command, isLocal bool, mapper meta.RESTMapper, obj runtime.Object, out io.Writer) error - // One stop shopping for a Builder - NewBuilder(allowRemoteCalls bool) *resource.Builder - // Resource builder for working with unstructured objects - NewUnstructuredBuilder(allowRemoteCalls bool) (*resource.Builder, error) + // PrintResourceInfoForCommand receives a *cobra.Command and a *resource.Info and + // attempts to print an info object based on the specified output format. If the + // object passed is non-generic, it attempts to print the object using a HumanReadablePrinter. + // Requires that printer flags have been added to cmd (see AddPrinterFlags). + PrintResourceInfoForCommand(cmd *cobra.Command, info *resource.Info, out io.Writer) error + // PrintSuccess prints message after finishing mutating operations + PrintSuccess(mapper meta.RESTMapper, shortOutput bool, out io.Writer, resource, name string, dryRun bool, operation string) + // NewBuilder returns an object that assists in loading objects from both disk and the server + // and which implements the common patterns for CLI interactions with generic resources. + NewBuilder() *resource.Builder // PluginLoader provides the implementation to be used to load cli plugins. PluginLoader() plugins.PluginLoader // PluginRunner provides the implementation to be used to run cli plugins. @@ -298,8 +287,8 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) Factory { // GetFirstPod returns a pod matching the namespace and label selector // and the number of all pods that match the label selector. -func GetFirstPod(client coreclient.PodsGetter, namespace string, selector labels.Selector, timeout time.Duration, sortBy func([]*v1.Pod) sort.Interface) (*api.Pod, int, error) { - options := metav1.ListOptions{LabelSelector: selector.String()} +func GetFirstPod(client coreclient.PodsGetter, namespace string, selector string, timeout time.Duration, sortBy func([]*v1.Pod) sort.Interface) (*api.Pod, int, error) { + options := metav1.ListOptions{LabelSelector: selector} podList, err := client.Pods(namespace).List(options) if err != nil { @@ -309,13 +298,13 @@ func GetFirstPod(client coreclient.PodsGetter, namespace string, selector labels for i := range podList.Items { pod := podList.Items[i] externalPod := &v1.Pod{} - apiv1.Convert_api_Pod_To_v1_Pod(&pod, externalPod, nil) + apiv1.Convert_core_Pod_To_v1_Pod(&pod, externalPod, nil) pods = append(pods, externalPod) } if len(pods) > 0 { sort.Sort(sortBy(pods)) internalPod := &api.Pod{} - apiv1.Convert_v1_Pod_To_api_Pod(pods[0], internalPod, nil) + apiv1.Convert_v1_Pod_To_core_Pod(pods[0], internalPod, nil) return internalPod, len(podList.Items), nil } @@ -392,145 +381,3 @@ func getServiceProtocols(spec api.ServiceSpec) map[string]string { } return result } - -type clientSwaggerSchema struct { - c restclient.Interface - cacheDir string -} - -const schemaFileName = "schema.json" - -type schemaClient interface { - Get() *restclient.Request -} - -func recursiveSplit(dir string) []string { - parent, file := path.Split(dir) - if len(parent) == 0 { - return []string{file} - } - return append(recursiveSplit(parent[:len(parent)-1]), file) -} - -func substituteUserHome(dir string) (string, error) { - if len(dir) == 0 || dir[0] != '~' { - return dir, nil - } - parts := recursiveSplit(dir) - if len(parts[0]) == 1 { - parts[0] = os.Getenv("HOME") - } else { - usr, err := user.Lookup(parts[0][1:]) - if err != nil { - return "", err - } - parts[0] = usr.HomeDir - } - return path.Join(parts...), nil -} - -func writeSchemaFile(schemaData []byte, cacheDir, cacheFile, prefix, groupVersion string) error { - if err := os.MkdirAll(path.Join(cacheDir, prefix, groupVersion), 0755); err != nil { - return err - } - tmpFile, err := ioutil.TempFile(cacheDir, "schema") - if err != nil { - // If we can't write, keep going. - if os.IsPermission(err) { - return nil - } - return err - } - if _, err := io.Copy(tmpFile, bytes.NewBuffer(schemaData)); err != nil { - return err - } - glog.V(4).Infof("Writing swagger cache (dir %v) file %v (from %v)", cacheDir, cacheFile, tmpFile.Name()) - if err := os.Link(tmpFile.Name(), cacheFile); err != nil { - // If we can't write due to file existing, or permission problems, keep going. - if os.IsExist(err) || os.IsPermission(err) { - return nil - } - return err - } - return nil -} - -func getSchemaAndValidate(c schemaClient, data []byte, prefix, groupVersion, cacheDir string, delegate validation.Schema) (err error) { - var schemaData []byte - var firstSeen bool - fullDir, err := substituteUserHome(cacheDir) - if err != nil { - return err - } - cacheFile := path.Join(fullDir, prefix, groupVersion, schemaFileName) - - if len(cacheDir) != 0 { - if schemaData, err = ioutil.ReadFile(cacheFile); err != nil && !os.IsNotExist(err) { - return err - } - } - if schemaData == nil { - firstSeen = true - schemaData, err = downloadSchemaAndStore(c, cacheDir, fullDir, cacheFile, prefix, groupVersion) - if err != nil { - return err - } - } - schema, err := validation.NewSwaggerSchemaFromBytes(schemaData, delegate) - if err != nil { - return err - } - err = schema.ValidateBytes(data) - if _, ok := err.(validation.TypeNotFoundError); ok && !firstSeen { - // As a temporary hack, kubectl would re-get the schema if validation - // fails for type not found reason. - // TODO: runtime-config settings needs to make into the file's name - schemaData, err = downloadSchemaAndStore(c, cacheDir, fullDir, cacheFile, prefix, groupVersion) - if err != nil { - return err - } - schema, err := validation.NewSwaggerSchemaFromBytes(schemaData, delegate) - if err != nil { - return err - } - return schema.ValidateBytes(data) - } - - return err -} - -// Download swagger schema from apiserver and store it to file. -func downloadSchemaAndStore(c schemaClient, cacheDir, fullDir, cacheFile, prefix, groupVersion string) (schemaData []byte, err error) { - schemaData, err = c.Get(). - AbsPath("/swaggerapi", prefix, groupVersion). - Do(). - Raw() - if err != nil { - return - } - if len(cacheDir) != 0 { - if err = writeSchemaFile(schemaData, fullDir, cacheFile, prefix, groupVersion); err != nil { - return - } - } - return -} - -func (c *clientSwaggerSchema) ValidateBytes(data []byte) error { - gvk, err := json.DefaultMetaFactory.Interpret(data) - if err != nil { - return err - } - if ok := api.Registry.IsEnabledVersion(gvk.GroupVersion()); !ok { - // if we don't have this in our scheme, just skip validation because its an object we don't recognize - return nil - } - - switch gvk.Group { - case api.GroupName: - return getSchemaAndValidate(c.c, data, "api", gvk.GroupVersion().String(), c.cacheDir, c) - - default: - return getSchemaAndValidate(c.c, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_builder.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_builder.go index e3a270fee30a..2c433032e4ad 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_builder.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_builder.go @@ -28,7 +28,6 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/kubectl/plugins" "k8s.io/kubernetes/pkg/kubectl/resource" "k8s.io/kubernetes/pkg/printers" @@ -48,44 +47,20 @@ func NewBuilderFactory(clientAccessFactory ClientAccessFactory, objectMappingFac return f } -func (f *ring2Factory) PrinterForCommand(cmd *cobra.Command, isLocal bool, outputOpts *printers.OutputOptions, options printers.PrintOptions) (printers.ResourcePrinter, error) { +func (f *ring2Factory) PrinterForOptions(options *printers.PrintOptions) (printers.ResourcePrinter, error) { var mapper meta.RESTMapper var typer runtime.ObjectTyper - var err error - if isLocal { - mapper = api.Registry.RESTMapper() - typer = api.Scheme - } else { - mapper, typer, err = f.objectMappingFactory.UnstructuredObject() - if err != nil { - return nil, err - } - } + mapper, typer = f.objectMappingFactory.Object() + // TODO: used by the custom column implementation and the name implementation, break this dependency decoders := []runtime.Decoder{f.clientAccessFactory.Decoder(true), unstructured.UnstructuredJSONScheme} encoder := f.clientAccessFactory.JSONEncoder() - return PrinterForCommand(cmd, outputOpts, mapper, typer, encoder, decoders, options) + return PrinterForOptions(mapper, typer, encoder, decoders, options) } -func (f *ring2Factory) PrinterForMapping(cmd *cobra.Command, isLocal bool, outputOpts *printers.OutputOptions, mapping *meta.RESTMapping, withNamespace bool) (printers.ResourcePrinter, error) { - // Some callers do not have "label-columns" so we can't use the GetFlagStringSlice() helper - columnLabel, err := cmd.Flags().GetStringSlice("label-columns") - if err != nil { - columnLabel = []string{} - } - - options := printers.PrintOptions{ - NoHeaders: GetFlagBool(cmd, "no-headers"), - WithNamespace: withNamespace, - Wide: GetWideFlag(cmd), - ShowAll: GetFlagBool(cmd, "show-all"), - ShowLabels: GetFlagBool(cmd, "show-labels"), - AbsoluteTimestamps: isWatch(cmd), - ColumnLabels: columnLabel, - } - - printer, err := f.PrinterForCommand(cmd, isLocal, outputOpts, options) +func (f *ring2Factory) PrinterForMapping(options *printers.PrintOptions, mapping *meta.RESTMapping) (printers.ResourcePrinter, error) { + printer, err := f.PrinterForOptions(options) if err != nil { return nil, err } @@ -107,70 +82,94 @@ func (f *ring2Factory) PrinterForMapping(cmd *cobra.Command, isLocal bool, outpu return printer, nil } +func (f *ring2Factory) PrintSuccess(mapper meta.RESTMapper, shortOutput bool, out io.Writer, resource, name string, dryRun bool, operation string) { + resource, _ = mapper.ResourceSingularizer(resource) + dryRunMsg := "" + if dryRun { + dryRunMsg = " (dry run)" + } + if shortOutput { + // -o name: prints resource/name + if len(resource) > 0 { + fmt.Fprintf(out, "%s/%s\n", resource, name) + } else { + fmt.Fprintf(out, "%s\n", name) + } + } else { + // understandable output by default + if len(resource) > 0 { + fmt.Fprintf(out, "%s \"%s\" %s%s\n", resource, name, operation, dryRunMsg) + } else { + fmt.Fprintf(out, "\"%s\" %s%s\n", name, operation, dryRunMsg) + } + } +} + func (f *ring2Factory) PrintObject(cmd *cobra.Command, isLocal bool, mapper meta.RESTMapper, obj runtime.Object, out io.Writer) error { // try to get a typed object _, typer := f.objectMappingFactory.Object() gvks, _, err := typer.ObjectKinds(obj) - // fall back to an unstructured object if we get something unregistered - if runtime.IsNotRegisteredError(err) { - _, typer, unstructuredErr := f.objectMappingFactory.UnstructuredObject() - if unstructuredErr != nil { - // if we can't get an unstructured typer, return the original error - return err - } - gvks, _, err = typer.ObjectKinds(obj) - } - if err != nil { return err } + // Prefer the existing external version if specified + var preferredVersion []string + if gvks[0].Version != "" && gvks[0].Version != runtime.APIVersionInternal { + preferredVersion = []string{gvks[0].Version} + } - mapping, err := mapper.RESTMapping(gvks[0].GroupKind()) + mapping, err := mapper.RESTMapping(gvks[0].GroupKind(), preferredVersion...) if err != nil { return err } - printer, err := f.PrinterForMapping(cmd, isLocal, nil, mapping, false) + printer, err := f.PrinterForMapping(ExtractCmdPrintOptions(cmd, false), mapping) if err != nil { return err } return printer.PrintObj(obj, out) } -// NewBuilder returns a new resource builder. -// Receives a bool flag and avoids remote calls if set to false -func (f *ring2Factory) NewBuilder(allowRemoteCalls bool) *resource.Builder { - var clientMapper resource.ClientMapper - clientMapperFunc := resource.ClientMapperFunc(f.objectMappingFactory.ClientForMapping) - - mapper, typer := f.objectMappingFactory.Object() - categoryExpander := f.objectMappingFactory.CategoryExpander() - - if allowRemoteCalls { - clientMapper = clientMapperFunc - } else { - clientMapper = resource.DisabledClientForMapping{ClientMapper: clientMapperFunc} +func (f *ring2Factory) PrintResourceInfoForCommand(cmd *cobra.Command, info *resource.Info, out io.Writer) error { + printOpts := ExtractCmdPrintOptions(cmd, false) + printer, err := f.PrinterForOptions(printOpts) + if err != nil { + return err } - - return resource.NewBuilder(mapper, categoryExpander, typer, clientMapper, f.clientAccessFactory.Decoder(true)) -} - -func (f *ring2Factory) NewUnstructuredBuilder(allowRemoteCalls bool) (*resource.Builder, error) { - if !allowRemoteCalls { - return f.NewBuilder(allowRemoteCalls), nil + if !printer.IsGeneric() { + printer, err = f.PrinterForMapping(printOpts, nil) + if err != nil { + return err + } } + return printer.PrintObj(info.Object, out) +} - clientMapperFunc := resource.ClientMapperFunc(f.objectMappingFactory.UnstructuredClientForMapping) +// NewBuilder returns a new resource builder for structured api objects. +func (f *ring2Factory) NewBuilder() *resource.Builder { + clientMapperFunc := resource.ClientMapperFunc(f.objectMappingFactory.ClientForMapping) + mapper, typer := f.objectMappingFactory.Object() - mapper, typer, err := f.objectMappingFactory.UnstructuredObject() - if err != nil { - return nil, err - } + unstructuredClientMapperFunc := resource.ClientMapperFunc(f.objectMappingFactory.UnstructuredClientForMapping) categoryExpander := f.objectMappingFactory.CategoryExpander() - return resource.NewBuilder(mapper, categoryExpander, typer, clientMapperFunc, unstructured.UnstructuredJSONScheme), nil + return resource.NewBuilder( + &resource.Mapper{ + RESTMapper: mapper, + ObjectTyper: typer, + ClientMapper: clientMapperFunc, + Decoder: f.clientAccessFactory.Decoder(true), + }, + &resource.Mapper{ + RESTMapper: mapper, + ObjectTyper: typer, + ClientMapper: unstructuredClientMapperFunc, + Decoder: unstructured.UnstructuredJSONScheme, + }, + categoryExpander, + ) } // PluginLoader loads plugins from a path set by the KUBECTL_PLUGINS_PATH env var. @@ -180,10 +179,10 @@ func (f *ring2Factory) NewUnstructuredBuilder(allowRemoteCalls bool) (*resource. // system directory structure spec for the given platform. func (f *ring2Factory) PluginLoader() plugins.PluginLoader { if len(os.Getenv("KUBECTL_PLUGINS_PATH")) > 0 { - return plugins.PluginsEnvVarPluginLoader() + return plugins.KubectlPluginsPathPluginLoader() } return plugins.TolerantMultiPluginLoader{ - plugins.XDGDataPluginLoader(), + plugins.XDGDataDirsPluginLoader(), plugins.UserDirPluginLoader(), } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_client_access.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_client_access.go index 7a8b8a821b6d..bf354932a88d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_client_access.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_client_access.go @@ -29,10 +29,20 @@ import ( "strings" "time" + "k8s.io/api/core/v1" + "github.com/spf13/cobra" "github.com/spf13/pflag" + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + batchv1 "k8s.io/api/batch/v1" + batchv2alpha1 "k8s.io/api/batch/v2alpha1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" utilflag "k8s.io/apiserver/pkg/util/flag" @@ -41,10 +51,9 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/util/homedir" - fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/batch" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/kubectl" @@ -208,47 +217,63 @@ func (f *ring0Factory) RESTClient() (*restclient.RESTClient, error) { return restclient.RESTClientFor(clientConfig) } -func (f *ring0Factory) FederationClientSetForVersion(version *schema.GroupVersion) (fedclientset.Interface, error) { - return f.clientCache.FederationClientSetForVersion(version) -} - -func (f *ring0Factory) FederationClientForVersion(version *schema.GroupVersion) (*restclient.RESTClient, error) { - return f.clientCache.FederationClientForVersion(version) -} - func (f *ring0Factory) Decoder(toInternal bool) runtime.Decoder { var decoder runtime.Decoder if toInternal { - decoder = api.Codecs.UniversalDecoder() + decoder = legacyscheme.Codecs.UniversalDecoder() } else { - decoder = api.Codecs.UniversalDeserializer() + decoder = legacyscheme.Codecs.UniversalDeserializer() } return decoder } func (f *ring0Factory) JSONEncoder() runtime.Encoder { - return api.Codecs.LegacyCodec(api.Registry.EnabledVersions()...) + return legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...) } -func (f *ring0Factory) UpdatePodSpecForObject(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) { +func (f *ring0Factory) UpdatePodSpecForObject(obj runtime.Object, fn func(*v1.PodSpec) error) (bool, error) { // TODO: replace with a swagger schema based approach (identify pod template via schema introspection) switch t := obj.(type) { - case *api.Pod: + case *v1.Pod: return true, fn(&t.Spec) - case *api.ReplicationController: + // ReplicationController + case *v1.ReplicationController: if t.Spec.Template == nil { - t.Spec.Template = &api.PodTemplateSpec{} + t.Spec.Template = &v1.PodTemplateSpec{} } return true, fn(&t.Spec.Template.Spec) - case *extensions.Deployment: + // Deployment + case *extensionsv1beta1.Deployment: return true, fn(&t.Spec.Template.Spec) - case *extensions.DaemonSet: + case *appsv1beta1.Deployment: return true, fn(&t.Spec.Template.Spec) - case *extensions.ReplicaSet: + case *appsv1beta2.Deployment: + return true, fn(&t.Spec.Template.Spec) + case *appsv1.Deployment: + return true, fn(&t.Spec.Template.Spec) + // DaemonSet + case *extensionsv1beta1.DaemonSet: + return true, fn(&t.Spec.Template.Spec) + case *appsv1beta2.DaemonSet: + return true, fn(&t.Spec.Template.Spec) + case *appsv1.DaemonSet: + return true, fn(&t.Spec.Template.Spec) + // ReplicaSet + case *extensionsv1beta1.ReplicaSet: return true, fn(&t.Spec.Template.Spec) - case *apps.StatefulSet: + case *appsv1beta2.ReplicaSet: return true, fn(&t.Spec.Template.Spec) - case *batch.Job: + case *appsv1.ReplicaSet: + return true, fn(&t.Spec.Template.Spec) + // StatefulSet + case *appsv1beta1.StatefulSet: + return true, fn(&t.Spec.Template.Spec) + case *appsv1beta2.StatefulSet: + return true, fn(&t.Spec.Template.Spec) + case *appsv1.StatefulSet: + return true, fn(&t.Spec.Template.Spec) + // Job + case *batchv1.Job: return true, fn(&t.Spec.Template.Spec) default: return false, fmt.Errorf("the object is not a pod or does not have a pod template") @@ -285,11 +310,7 @@ func (f *ring0Factory) MapBasedSelectorForObject(object runtime.Object) (string, } return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil default: - gvks, _, err := api.Scheme.ObjectKinds(object) - if err != nil { - return "", err - } - return "", fmt.Errorf("cannot extract pod selector from %v", gvks[0]) + return "", fmt.Errorf("cannot extract pod selector from %T", object) } } @@ -307,11 +328,7 @@ func (f *ring0Factory) PortsForObject(object runtime.Object) ([]string, error) { case *extensions.ReplicaSet: return getPorts(t.Spec.Template.Spec), nil default: - gvks, _, err := api.Scheme.ObjectKinds(object) - if err != nil { - return nil, err - } - return nil, fmt.Errorf("cannot extract ports from %v", gvks[0]) + return nil, fmt.Errorf("cannot extract ports from %T", object) } } @@ -329,11 +346,7 @@ func (f *ring0Factory) ProtocolsForObject(object runtime.Object) (map[string]str case *extensions.ReplicaSet: return getProtocols(t.Spec.Template.Spec), nil default: - gvks, _, err := api.Scheme.ObjectKinds(object) - if err != nil { - return nil, err - } - return nil, fmt.Errorf("cannot extract protocols from %v", gvks[0]) + return nil, fmt.Errorf("cannot extract protocols from %T", object) } } @@ -398,24 +411,6 @@ func (f *ring0Factory) BindExternalFlags(flags *pflag.FlagSet) { flags.AddGoFlagSet(flag.CommandLine) } -func (f *ring0Factory) DefaultResourceFilterOptions(cmd *cobra.Command, withNamespace bool) *printers.PrintOptions { - columnLabel, err := cmd.Flags().GetStringSlice("label-columns") - if err != nil { - columnLabel = []string{} - } - opts := &printers.PrintOptions{ - NoHeaders: GetFlagBool(cmd, "no-headers"), - WithNamespace: withNamespace, - Wide: GetWideFlag(cmd), - ShowAll: GetFlagBool(cmd, "show-all"), - ShowLabels: GetFlagBool(cmd, "show-labels"), - AbsoluteTimestamps: isWatch(cmd), - ColumnLabels: columnLabel, - } - - return opts -} - func (f *ring0Factory) DefaultResourceFilterFunc() kubectl.Filters { return kubectl.NewResourceFilter() } @@ -502,6 +497,7 @@ const ( ClusterV1Beta1GeneratorName = "cluster/v1beta1" PodDisruptionBudgetV1GeneratorName = "poddisruptionbudget/v1beta1" PodDisruptionBudgetV2GeneratorName = "poddisruptionbudget/v1beta1/v2" + PriorityClassV1Alpha1GeneratorName = "priorityclass/v1alpha1" ) // DefaultGenerators returns the set of default generators for use in Factory instances @@ -571,6 +567,74 @@ func DefaultGenerators(cmdName string) map[string]kubectl.Generator { return generator } +// fallbackGeneratorNameIfNecessary returns the name of the old generator +// if server does not support new generator. Otherwise, the +// generator string is returned unchanged. +// +// If the generator name is changed, print a warning message to let the user +// know. +func FallbackGeneratorNameIfNecessary( + generatorName string, + discoveryClient discovery.DiscoveryInterface, + cmdErr io.Writer, +) (string, error) { + switch generatorName { + case DeploymentBasicAppsV1Beta1GeneratorName: + hasResource, err := HasResource(discoveryClient, appsv1beta1.SchemeGroupVersion.WithResource("deployments")) + if err != nil { + return "", err + } + if !hasResource { + warning(cmdErr, DeploymentBasicAppsV1Beta1GeneratorName, DeploymentBasicV1Beta1GeneratorName) + return DeploymentBasicV1Beta1GeneratorName, nil + } + case CronJobV2Alpha1GeneratorName: + hasResource, err := HasResource(discoveryClient, batchv2alpha1.SchemeGroupVersion.WithResource("cronjobs")) + if err != nil { + return "", err + } + if !hasResource { + warning(cmdErr, CronJobV2Alpha1GeneratorName, JobV1GeneratorName) + return JobV1GeneratorName, nil + } + } + return generatorName, nil +} + +func warning(cmdErr io.Writer, newGeneratorName, oldGeneratorName string) { + fmt.Fprintf(cmdErr, "WARNING: New deployments generator %q specified, "+ + "but it isn't available. "+ + "Falling back to %q.\n", + newGeneratorName, + oldGeneratorName, + ) +} + +func HasResource(client discovery.DiscoveryInterface, resource schema.GroupVersionResource) (bool, error) { + resources, err := client.ServerResourcesForGroupVersion(resource.GroupVersion().String()) + if apierrors.IsNotFound(err) { + // entire group is missing + return false, nil + } + if err != nil { + // other errors error + return false, fmt.Errorf("failed to discover supported resources: %v", err) + } + for _, serverResource := range resources.APIResources { + if serverResource.Name == resource.Resource { + return true, nil + } + } + return false, nil +} + +func Contains(resourcesList []*metav1.APIResourceList, resource schema.GroupVersionResource) bool { + resources := discovery.FilteredBy(discovery.ResourcePredicateFunc(func(gv string, r *metav1.APIResource) bool { + return resource.GroupVersion().String() == gv && resource.Resource == r.Name + }), resourcesList) + return len(resources) != 0 +} + func (f *ring0Factory) Generators(cmdName string) map[string]kubectl.Generator { return DefaultGenerators(cmdName) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_object_mapping.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_object_mapping.go index 2b51808cf845..dfd82d406c57 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_object_mapping.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory_object_mapping.go @@ -22,12 +22,11 @@ import ( "errors" "fmt" "os" - "path" + "reflect" "sort" "sync" "time" - swagger "github.com/emicklei/go-restful-swagger12" "github.com/golang/glog" "k8s.io/api/core/v1" @@ -35,18 +34,18 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" restclient "k8s.io/client-go/rest" - "k8s.io/kubernetes/federation/apis/federation" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/batch" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/kubectl" + "k8s.io/kubernetes/pkg/kubectl/categories" "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" openapivalidation "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation" "k8s.io/kubernetes/pkg/kubectl/resource" @@ -74,60 +73,51 @@ func NewObjectMappingFactory(clientAccessFactory ClientAccessFactory) ObjectMapp return f } -// TODO: This method should return an error now that it can fail. Alternatively, it needs to -// return lazy implementations of mapper and typer that don't hit the wire until they are -// invoked. -func (f *ring1Factory) Object() (meta.RESTMapper, runtime.ObjectTyper) { - mapper := api.Registry.RESTMapper() - discoveryClient, err := f.clientAccessFactory.DiscoveryClient() - if err == nil { - mapper = meta.FirstHitRESTMapper{ - MultiRESTMapper: meta.MultiRESTMapper{ - discovery.NewDeferredDiscoveryRESTMapper(discoveryClient, api.Registry.InterfacesFor), - api.Registry.RESTMapper(), // hardcoded fall back - }, - } - - // wrap with shortcuts, they require a discoveryClient - mapper, err = NewShortcutExpander(mapper, discoveryClient) - // you only have an error on missing discoveryClient, so this shouldn't fail. Check anyway. - CheckErr(err) - } - - return mapper, api.Scheme -} - -func (f *ring1Factory) UnstructuredObject() (meta.RESTMapper, runtime.ObjectTyper, error) { +// objectLoader attempts to perform discovery against the server, and will fall back to +// the built in mapper if necessary. It supports unstructured objects either way, since +// the underlying Scheme supports Unstructured. The mapper will return converters that can +// convert versioned types to unstructured and back. +func (f *ring1Factory) objectLoader() (meta.RESTMapper, runtime.ObjectTyper, error) { discoveryClient, err := f.clientAccessFactory.DiscoveryClient() if err != nil { - return nil, nil, err + glog.V(3).Infof("Unable to get a discovery client to find server resources, falling back to hardcoded types: %v", err) + return legacyscheme.Registry.RESTMapper(), legacyscheme.Scheme, nil } + groupResources, err := discovery.GetAPIGroupResources(discoveryClient) if err != nil && !discoveryClient.Fresh() { discoveryClient.Invalidate() groupResources, err = discovery.GetAPIGroupResources(discoveryClient) } if err != nil { - return nil, nil, err + glog.V(3).Infof("Unable to retrieve API resources, falling back to hardcoded types: %v", err) + return legacyscheme.Registry.RESTMapper(), legacyscheme.Scheme, nil } - mapper := discovery.NewDeferredDiscoveryRESTMapper(discoveryClient, meta.InterfacesForUnstructured) - typer := discovery.NewUnstructuredObjectTyper(groupResources) - expander, err := NewShortcutExpander(mapper, discoveryClient) + // allow conversion between typed and unstructured objects + interfaces := meta.InterfacesForUnstructuredConversion(legacyscheme.Registry.InterfacesFor) + mapper := discovery.NewDeferredDiscoveryRESTMapper(discoveryClient, meta.VersionInterfacesFunc(interfaces)) + // TODO: should this also indicate it recognizes typed objects? + typer := discovery.NewUnstructuredObjectTyper(groupResources, legacyscheme.Scheme) + expander := NewShortcutExpander(mapper, discoveryClient) return expander, typer, err } -func (f *ring1Factory) CategoryExpander() resource.CategoryExpander { - legacyExpander := resource.LegacyCategoryExpander +func (f *ring1Factory) Object() (meta.RESTMapper, runtime.ObjectTyper) { + return meta.NewLazyObjectLoader(f.objectLoader) +} + +func (f *ring1Factory) CategoryExpander() categories.CategoryExpander { + legacyExpander := categories.LegacyCategoryExpander discoveryClient, err := f.clientAccessFactory.DiscoveryClient() if err == nil { // fallback is the legacy expander wrapped with discovery based filtering - fallbackExpander, err := resource.NewDiscoveryFilteredExpander(legacyExpander, discoveryClient) + fallbackExpander, err := categories.NewDiscoveryFilteredExpander(legacyExpander, discoveryClient) CheckErr(err) // by default use the expander that discovers based on "categories" field from the API - discoveryCategoryExpander, err := resource.NewDiscoveryCategoryExpander(fallbackExpander, discoveryClient) + discoveryCategoryExpander, err := categories.NewDiscoveryCategoryExpander(fallbackExpander, discoveryClient) CheckErr(err) return discoveryCategoryExpander @@ -146,9 +136,6 @@ func (f *ring1Factory) ClientForMapping(mapping *meta.RESTMapping) (resource.RES } gvk := mapping.GroupVersionKind switch gvk.Group { - case federation.GroupName: - mappingVersion := mapping.GroupVersionKind.GroupVersion() - return f.clientAccessFactory.FederationClientForVersion(&mappingVersion) case api.GroupName: cfg.APIPath = "/api" default: @@ -179,15 +166,6 @@ func (f *ring1Factory) UnstructuredClientForMapping(mapping *meta.RESTMapping) ( func (f *ring1Factory) Describer(mapping *meta.RESTMapping) (printers.Describer, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() - if mapping.GroupVersionKind.Group == federation.GroupName { - fedClientSet, err := f.clientAccessFactory.FederationClientSetForVersion(&mappingVersion) - if err != nil { - return nil, err - } - if mapping.GroupVersionKind.Kind == "Cluster" { - return &printersinternal.ClusterDescriber{Interface: fedClientSet}, nil - } - } clientset, err := f.clientAccessFactory.ClientSetForVersion(&mappingVersion) if err != nil { @@ -288,15 +266,11 @@ func (f *ring1Factory) LogsForObject(object, options runtime.Object, timeout tim } default: - gvks, _, err := api.Scheme.ObjectKinds(object) - if err != nil { - return nil, err - } - return nil, fmt.Errorf("cannot get the logs from %v", gvks[0]) + return nil, fmt.Errorf("cannot get the logs from %T", object) } sortBy := func(pods []*v1.Pod) sort.Interface { return controller.ByLogging(pods) } - pod, numPods, err := GetFirstPod(clientset.Core(), namespace, selector, timeout, sortBy) + pod, numPods, err := GetFirstPod(clientset.Core(), namespace, selector.String(), timeout, sortBy) if err != nil { return nil, err } @@ -330,32 +304,51 @@ func (f *ring1Factory) Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) } func (f *ring1Factory) HistoryViewer(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) { - mappingVersion := mapping.GroupVersionKind.GroupVersion() - clientset, err := f.clientAccessFactory.ClientSetForVersion(&mappingVersion) + external, err := f.clientAccessFactory.KubernetesClientSet() if err != nil { return nil, err } - return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset) + return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), external) } func (f *ring1Factory) Rollbacker(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) { - mappingVersion := mapping.GroupVersionKind.GroupVersion() - clientset, err := f.clientAccessFactory.ClientSetForVersion(&mappingVersion) + external, err := f.clientAccessFactory.KubernetesClientSet() if err != nil { return nil, err } - return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), clientset) + return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), external) } func (f *ring1Factory) StatusViewer(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) { - mappingVersion := mapping.GroupVersionKind.GroupVersion() - clientset, err := f.clientAccessFactory.ClientSetForVersion(&mappingVersion) + clientset, err := f.clientAccessFactory.KubernetesClientSet() if err != nil { return nil, err } return kubectl.StatusViewerFor(mapping.GroupVersionKind.GroupKind(), clientset) } +func (f *ring1Factory) ApproximatePodTemplateForObject(object runtime.Object) (*api.PodTemplateSpec, error) { + switch t := object.(type) { + case *api.Pod: + return &api.PodTemplateSpec{ + ObjectMeta: t.ObjectMeta, + Spec: t.Spec, + }, nil + case *api.ReplicationController: + return t.Spec.Template, nil + case *extensions.ReplicaSet: + return &t.Spec.Template, nil + case *extensions.DaemonSet: + return &t.Spec.Template, nil + case *extensions.Deployment: + return &t.Spec.Template, nil + case *batch.Job: + return &t.Spec.Template, nil + } + + return nil, fmt.Errorf("unable to extract pod template from type %v", reflect.TypeOf(object)) +} + func (f *ring1Factory) AttachablePodForObject(object runtime.Object, timeout time.Duration) (*api.Pod, error) { clientset, err := f.clientAccessFactory.ClientSetForVersion(nil) if err != nil { @@ -397,64 +390,28 @@ func (f *ring1Factory) AttachablePodForObject(object runtime.Object, timeout tim return t, nil default: - gvks, _, err := api.Scheme.ObjectKinds(object) - if err != nil { - return nil, err - } - return nil, fmt.Errorf("cannot attach to %v: not implemented", gvks[0]) + return nil, fmt.Errorf("cannot attach to %T: not implemented", object) } sortBy := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } - pod, _, err := GetFirstPod(clientset.Core(), namespace, selector, timeout, sortBy) + pod, _, err := GetFirstPod(clientset.Core(), namespace, selector.String(), timeout, sortBy) return pod, err } -func (f *ring1Factory) Validator(validate, openapi bool, cacheDir string) (validation.Schema, error) { - if validate { - if openapi { - resources, err := f.OpenAPISchema() - if err == nil { - return validation.ConjunctiveSchema{ - openapivalidation.NewSchemaValidation(resources), - validation.NoDoubleKeySchema{}, - }, nil - } - - glog.Warningf("Failed to download OpenAPI (%v), falling back to swagger", err) - } - - discovery, err := f.clientAccessFactory.DiscoveryClient() - if err != nil { - return nil, err - } - dir := cacheDir - if len(dir) > 0 { - version, err := discovery.ServerVersion() - if err == nil { - dir = path.Join(cacheDir, version.String()) - } else { - dir = "" // disable caching as a fallback - } - } - swaggerSchema := &clientSwaggerSchema{ - c: discovery.RESTClient(), - cacheDir: dir, - } - return validation.ConjunctiveSchema{ - swaggerSchema, - validation.NoDoubleKeySchema{}, - }, nil +func (f *ring1Factory) Validator(validate bool) (validation.Schema, error) { + if !validate { + return validation.NullSchema{}, nil } - return validation.NullSchema{}, nil -} -func (f *ring1Factory) SwaggerSchema(gvk schema.GroupVersionKind) (*swagger.ApiDeclaration, error) { - version := gvk.GroupVersion() - discovery, err := f.clientAccessFactory.DiscoveryClient() + resources, err := f.OpenAPISchema() if err != nil { return nil, err } - return discovery.SwaggerSchema(version) + + return validation.ConjunctiveSchema{ + openapivalidation.NewSchemaValidation(resources), + validation.NoDoubleKeySchema{}, + }, nil } // OpenAPISchema returns metadata and structural information about Kubernetes object definitions. diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go index b41af0f842f5..b982d86b8734 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go @@ -27,7 +27,7 @@ import ( "strings" "time" - jsonpatch "github.com/evanphx/json-patch" + "github.com/evanphx/json-patch" "github.com/golang/glog" "github.com/spf13/cobra" @@ -36,7 +36,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/json" @@ -44,7 +43,7 @@ import ( "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/tools/clientcmd" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/resource" "k8s.io/kubernetes/pkg/printers" @@ -120,11 +119,6 @@ func CheckErr(err error) { checkErr(err, fatalErrHandler) } -// checkErrWithPrefix works like CheckErr, but adds a caller-defined prefix to non-nil errors -func checkErrWithPrefix(prefix string, err error) { - checkErr(err, fatalErrHandler) -} - // checkErr formats a given error as a string and calls the passed handleErr // func with that string and an kubectl exit code. func checkErr(err error, handleErr func(string, int)) { @@ -368,6 +362,15 @@ func GetFlagInt(cmd *cobra.Command, flag string) int { return i } +// Assumes the flag has a default value. +func GetFlagInt32(cmd *cobra.Command, flag string) int32 { + i, err := cmd.Flags().GetInt32(flag) + if err != nil { + glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return i +} + // Assumes the flag has a default value. func GetFlagInt64(cmd *cobra.Command, flag string) int64 { i, err := cmd.Flags().GetInt64(flag) @@ -395,20 +398,10 @@ func GetPodRunningTimeoutFlag(cmd *cobra.Command) (time.Duration, error) { func AddValidateFlags(cmd *cobra.Command) { cmd.Flags().Bool("validate", true, "If true, use a schema to validate the input before sending it") - cmd.Flags().String("schema-cache-dir", fmt.Sprintf("~/%s/%s", clientcmd.RecommendedHomeDir, clientcmd.RecommendedSchemaName), fmt.Sprintf("If non-empty, load/store cached API schemas in this directory, default is '$HOME/%s/%s'", clientcmd.RecommendedHomeDir, clientcmd.RecommendedSchemaName)) - cmd.Flags().Bool("openapi-validation", true, "If true, use openapi rather than swagger for validation.") - cmd.MarkFlagFilename("schema-cache-dir") } func AddValidateOptionFlags(cmd *cobra.Command, options *ValidateOptions) { cmd.Flags().BoolVar(&options.EnableValidation, "validate", true, "If true, use a schema to validate the input before sending it") - cmd.Flags().StringVar(&options.SchemaCacheDir, "schema-cache-dir", fmt.Sprintf("~/%s/%s", clientcmd.RecommendedHomeDir, clientcmd.RecommendedSchemaName), fmt.Sprintf("If non-empty, load/store cached API schemas in this directory, default is '$HOME/%s/%s'", clientcmd.RecommendedHomeDir, clientcmd.RecommendedSchemaName)) - cmd.Flags().BoolVar(&options.UseOpenAPI, "openapi-validation", true, "If true, use openapi rather than swagger for validation") - cmd.MarkFlagFilename("schema-cache-dir") -} - -func AddOpenAPIFlags(cmd *cobra.Command) { - cmd.Flags().Bool("openapi-validation", true, "If true, use openapi rather than swagger for validation") } func AddFilenameOptionFlags(cmd *cobra.Command, options *resource.FilenameOptions, usage string) { @@ -446,8 +439,6 @@ func AddGeneratorFlags(cmd *cobra.Command, defaultGenerator string) { type ValidateOptions struct { EnableValidation bool - UseOpenAPI bool - SchemaCacheDir string } func ReadConfigDataFromReader(reader io.Reader, source string) ([]byte, error) { @@ -590,7 +581,7 @@ func ChangeResourcePatch(info *resource.Info, changeCause string) ([]byte, types } } -// containsChangeCause checks if input resource info contains change-cause annotation. +// ContainsChangeCause checks if input resource info contains change-cause annotation. func ContainsChangeCause(info *resource.Info) bool { annotations, err := info.Mapping.MetadataAccessor.Annotations(info.Object) if err != nil { @@ -705,7 +696,7 @@ func FilterResourceList(obj runtime.Object, filterFuncs kubectl.Filters, filterO if err != nil { return 0, []runtime.Object{obj}, utilerrors.NewAggregate([]error{err}) } - if errs := runtime.DecodeList(items, api.Codecs.UniversalDecoder(), unstructured.UnstructuredJSONScheme); len(errs) > 0 { + if errs := runtime.DecodeList(items, legacyscheme.Codecs.UniversalDecoder(), unstructured.UnstructuredJSONScheme); len(errs) > 0 { return 0, []runtime.Object{obj}, utilerrors.NewAggregate(errs) } @@ -748,17 +739,6 @@ func PrintFilterCount(out io.Writer, found, hidden, errors int, options *printer } } -// ObjectListToVersionedObject receives a list of api objects and a group version -// and squashes the list's items into a single versioned runtime.Object. -func ObjectListToVersionedObject(objects []runtime.Object, version schema.GroupVersion) (runtime.Object, error) { - objectList := &api.List{Items: objects} - converted, err := resource.TryConvert(api.Scheme, objectList, version, api.Registry.GroupOrDie(api.GroupName).GroupVersion) - if err != nil { - return nil, err - } - return converted, nil -} - // IsSiblingCommandExists receives a pointer to a cobra command and a target string. // Returns true if the target string is found in the list of sibling commands. func IsSiblingCommandExists(cmd *cobra.Command, targetCmdName string) bool { @@ -778,6 +758,7 @@ func DefaultSubCommandRun(out io.Writer) func(c *cobra.Command, args []string) { c.SetOutput(out) RequireNoArguments(c, args) c.Help() + CheckErr(ErrExit) } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/BUILD index 1075879cd70c..149567c293eb 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/BUILD @@ -10,17 +10,17 @@ go_library( name = "go_default_library", srcs = [ "doc.go", - "document.go", "extensions.go", "openapi.go", "openapi_getter.go", ], + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi", deps = [ "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", - "//vendor/gopkg.in/yaml.v2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", ], ) @@ -33,6 +33,7 @@ go_test( "openapi_test.go", ], data = ["//api/openapi-spec:swagger-spec"], + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi_test", deps = [ ":go_default_library", "//pkg/kubectl/cmd/util/openapi/testing:go_default_library", @@ -41,6 +42,7 @@ go_test( "//vendor/github.com/onsi/ginkgo/types:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/OWNERS b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/OWNERS new file mode 100644 index 000000000000..7eb2d06fe1ac --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/OWNERS @@ -0,0 +1,4 @@ +approvers: +- apelisse +reviewers: +- apelisse diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/document.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/document.go deleted file mode 100644 index 6b2f6782b948..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/document.go +++ /dev/null @@ -1,330 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package openapi - -import ( - "fmt" - "strings" - - openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2" - yaml "gopkg.in/yaml.v2" - - "k8s.io/apimachinery/pkg/runtime/schema" -) - -func newSchemaError(path *Path, format string, a ...interface{}) error { - err := fmt.Sprintf(format, a...) - if path.Len() == 0 { - return fmt.Errorf("SchemaError: %v", err) - } - return fmt.Errorf("SchemaError(%v): %v", path, err) -} - -// groupVersionKindExtensionKey is the key used to lookup the -// GroupVersionKind value for an object definition from the -// definition's "extensions" map. -const groupVersionKindExtensionKey = "x-kubernetes-group-version-kind" - -func vendorExtensionToMap(e []*openapi_v2.NamedAny) map[string]interface{} { - values := map[string]interface{}{} - - for _, na := range e { - if na.GetName() == "" || na.GetValue() == nil { - continue - } - if na.GetValue().GetYaml() == "" { - continue - } - var value interface{} - err := yaml.Unmarshal([]byte(na.GetValue().GetYaml()), &value) - if err != nil { - continue - } - - values[na.GetName()] = value - } - - return values -} - -// Get and parse GroupVersionKind from the extension. Returns empty if it doesn't have one. -func parseGroupVersionKind(s *openapi_v2.Schema) schema.GroupVersionKind { - extensionMap := vendorExtensionToMap(s.GetVendorExtension()) - - // Get the extensions - gvkExtension, ok := extensionMap[groupVersionKindExtensionKey] - if !ok { - return schema.GroupVersionKind{} - } - - // gvk extension must be a list of 1 element. - gvkList, ok := gvkExtension.([]interface{}) - if !ok { - return schema.GroupVersionKind{} - } - if len(gvkList) != 1 { - return schema.GroupVersionKind{} - - } - gvk := gvkList[0] - - // gvk extension list must be a map with group, version, and - // kind fields - gvkMap, ok := gvk.(map[interface{}]interface{}) - if !ok { - return schema.GroupVersionKind{} - } - group, ok := gvkMap["group"].(string) - if !ok { - return schema.GroupVersionKind{} - } - version, ok := gvkMap["version"].(string) - if !ok { - return schema.GroupVersionKind{} - } - kind, ok := gvkMap["kind"].(string) - if !ok { - return schema.GroupVersionKind{} - } - - return schema.GroupVersionKind{ - Group: group, - Version: version, - Kind: kind, - } -} - -// Definitions is an implementation of `Resources`. It looks for -// resources in an openapi Schema. -type Definitions struct { - models map[string]Schema - resources map[schema.GroupVersionKind]string -} - -var _ Resources = &Definitions{} - -// NewOpenAPIData creates a new `Resources` out of the openapi document. -func NewOpenAPIData(doc *openapi_v2.Document) (Resources, error) { - definitions := Definitions{ - models: map[string]Schema{}, - resources: map[schema.GroupVersionKind]string{}, - } - - // Save the list of all models first. This will allow us to - // validate that we don't have any dangling reference. - for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { - definitions.models[namedSchema.GetName()] = nil - } - - // Now, parse each model. We can validate that references exists. - for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { - path := NewPath(namedSchema.GetName()) - schema, err := definitions.ParseSchema(namedSchema.GetValue(), &path) - if err != nil { - return nil, err - } - definitions.models[namedSchema.GetName()] = schema - gvk := parseGroupVersionKind(namedSchema.GetValue()) - if len(gvk.Kind) > 0 { - definitions.resources[gvk] = namedSchema.GetName() - } - } - - return &definitions, nil -} - -// We believe the schema is a reference, verify that and returns a new -// Schema -func (d *Definitions) parseReference(s *openapi_v2.Schema, path *Path) (Schema, error) { - if len(s.GetProperties().GetAdditionalProperties()) > 0 { - return nil, newSchemaError(path, "unallowed embedded type definition") - } - if len(s.GetType().GetValue()) > 0 { - return nil, newSchemaError(path, "definition reference can't have a type") - } - - if !strings.HasPrefix(s.GetXRef(), "#/definitions/") { - return nil, newSchemaError(path, "unallowed reference to non-definition %q", s.GetXRef()) - } - reference := strings.TrimPrefix(s.GetXRef(), "#/definitions/") - if _, ok := d.models[reference]; !ok { - return nil, newSchemaError(path, "unknown model in reference: %q", reference) - } - return &Ref{ - BaseSchema: d.parseBaseSchema(s, path), - reference: reference, - definitions: d, - }, nil -} - -func (d *Definitions) parseBaseSchema(s *openapi_v2.Schema, path *Path) BaseSchema { - return BaseSchema{ - Description: s.GetDescription(), - Extensions: vendorExtensionToMap(s.GetVendorExtension()), - Path: *path, - } -} - -// We believe the schema is a map, verify and return a new schema -func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error) { - if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { - return nil, newSchemaError(path, "invalid object type") - } - if s.GetAdditionalProperties().GetSchema() == nil { - return nil, newSchemaError(path, "invalid object doesn't have additional properties") - } - sub, err := d.ParseSchema(s.GetAdditionalProperties().GetSchema(), path) - if err != nil { - return nil, err - } - return &Map{ - BaseSchema: d.parseBaseSchema(s, path), - SubType: sub, - }, nil -} - -func (d *Definitions) parsePrimitive(s *openapi_v2.Schema, path *Path) (Schema, error) { - var t string - if len(s.GetType().GetValue()) > 1 { - return nil, newSchemaError(path, "primitive can't have more than 1 type") - } - if len(s.GetType().GetValue()) == 1 { - t = s.GetType().GetValue()[0] - } - switch t { - case String: - case Number: - case Integer: - case Boolean: - case "": // Some models are completely empty, and can be safely ignored. - // Do nothing - default: - return nil, newSchemaError(path, "Unknown primitive type: %q", t) - } - return &Primitive{ - BaseSchema: d.parseBaseSchema(s, path), - Type: t, - Format: s.GetFormat(), - }, nil -} - -func (d *Definitions) parseArray(s *openapi_v2.Schema, path *Path) (Schema, error) { - if len(s.GetType().GetValue()) != 1 { - return nil, newSchemaError(path, "array should have exactly one type") - } - if s.GetType().GetValue()[0] != array { - return nil, newSchemaError(path, `array should have type "array"`) - } - if len(s.GetItems().GetSchema()) != 1 { - return nil, newSchemaError(path, "array should have exactly one sub-item") - } - sub, err := d.ParseSchema(s.GetItems().GetSchema()[0], path) - if err != nil { - return nil, err - } - return &Array{ - BaseSchema: d.parseBaseSchema(s, path), - SubType: sub, - }, nil -} - -func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error) { - if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { - return nil, newSchemaError(path, "invalid object type") - } - if s.GetProperties() == nil { - return nil, newSchemaError(path, "object doesn't have properties") - } - - fields := map[string]Schema{} - - for _, namedSchema := range s.GetProperties().GetAdditionalProperties() { - var err error - path := path.FieldPath(namedSchema.GetName()) - fields[namedSchema.GetName()], err = d.ParseSchema(namedSchema.GetValue(), &path) - if err != nil { - return nil, err - } - } - - return &Kind{ - BaseSchema: d.parseBaseSchema(s, path), - RequiredFields: s.GetRequired(), - Fields: fields, - }, nil -} - -// ParseSchema creates a walkable Schema from an openapi schema. While -// this function is public, it doesn't leak through the interface. -func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) { - if len(s.GetType().GetValue()) == 1 { - t := s.GetType().GetValue()[0] - switch t { - case object: - return d.parseMap(s, path) - case array: - return d.parseArray(s, path) - } - - } - if s.GetXRef() != "" { - return d.parseReference(s, path) - } - if s.GetProperties() != nil { - return d.parseKind(s, path) - } - return d.parsePrimitive(s, path) -} - -// LookupResource is public through the interface of Resources. It -// returns a visitable schema from the given group-version-kind. -func (d *Definitions) LookupResource(gvk schema.GroupVersionKind) Schema { - modelName, found := d.resources[gvk] - if !found { - return nil - } - model, found := d.models[modelName] - if !found { - return nil - } - return model -} - -type Ref struct { - BaseSchema - - reference string - definitions *Definitions -} - -var _ Reference = &Ref{} - -func (r *Ref) Reference() string { - return r.reference -} - -func (r *Ref) SubSchema() Schema { - return r.definitions.models[r.reference] -} - -func (r *Ref) Accept(v SchemaVisitor) { - v.VisitReference(r) -} - -func (r *Ref) GetName() string { - return fmt.Sprintf("Reference to %q", r.reference) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/openapi.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/openapi.go index 8e6cf0639f44..fef6a187ba64 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/openapi.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/openapi.go @@ -17,215 +17,108 @@ limitations under the License. package openapi import ( - "fmt" - "strings" + openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2" "k8s.io/apimachinery/pkg/runtime/schema" -) - -// Defines openapi types. -const ( - Integer = "integer" - Number = "number" - String = "string" - Boolean = "boolean" - - // These types are private as they should never leak, and are - // represented by actual structs. - array = "array" - object = "object" + "k8s.io/kube-openapi/pkg/util/proto" ) // Resources interface describe a resources provider, that can give you // resource based on group-version-kind. type Resources interface { - LookupResource(gvk schema.GroupVersionKind) Schema -} - -// SchemaVisitor is an interface that you need to implement if you want -// to "visit" an openapi schema. A dispatch on the Schema type will call -// the appropriate function based on its actual type: -// - Array is a list of one and only one given subtype -// - Map is a map of string to one and only one given subtype -// - Primitive can be string, integer, number and boolean. -// - Kind is an object with specific fields mapping to specific types. -// - Reference is a link to another definition. -type SchemaVisitor interface { - VisitArray(*Array) - VisitMap(*Map) - VisitPrimitive(*Primitive) - VisitKind(*Kind) - VisitReference(Reference) -} - -// Schema is the base definition of an openapi type. -type Schema interface { - // Giving a visitor here will let you visit the actual type. - Accept(SchemaVisitor) - - // Pretty print the name of the type. - GetName() string - // Describes how to access this field. - GetPath() *Path - // Describes the field. - GetDescription() string - // Returns type extensions. - GetExtensions() map[string]interface{} -} - -// Path helps us keep track of type paths -type Path struct { - parent *Path - key string + LookupResource(gvk schema.GroupVersionKind) proto.Schema } -func NewPath(key string) Path { - return Path{key: key} -} +// groupVersionKindExtensionKey is the key used to lookup the +// GroupVersionKind value for an object definition from the +// definition's "extensions" map. +const groupVersionKindExtensionKey = "x-kubernetes-group-version-kind" -func (p *Path) Get() []string { - if p == nil { - return []string{} - } - if p.key == "" { - return p.parent.Get() - } - return append(p.parent.Get(), p.key) +// document is an implementation of `Resources`. It looks for +// resources in an openapi Schema. +type document struct { + // Maps gvk to model name + resources map[schema.GroupVersionKind]string + models proto.Models } -func (p *Path) Len() int { - return len(p.Get()) -} - -func (p *Path) String() string { - return strings.Join(p.Get(), "") -} +var _ Resources = &document{} -// ArrayPath appends an array index and creates a new path -func (p *Path) ArrayPath(i int) Path { - return Path{ - parent: p, - key: fmt.Sprintf("[%d]", i), +func NewOpenAPIData(doc *openapi_v2.Document) (Resources, error) { + models, err := proto.NewOpenAPIData(doc) + if err != nil { + return nil, err } -} -// FieldPath appends a field name and creates a new path -func (p *Path) FieldPath(field string) Path { - return Path{ - parent: p, - key: fmt.Sprintf(".%s", field), + resources := map[schema.GroupVersionKind]string{} + for _, modelName := range models.ListModels() { + model := models.LookupModel(modelName) + if model == nil { + panic("ListModels returns a model that can't be looked-up.") + } + gvk := parseGroupVersionKind(model) + if len(gvk.Kind) > 0 { + resources[gvk] = modelName + } } -} - -// BaseSchema holds data used by each types of schema. -type BaseSchema struct { - Description string - Extensions map[string]interface{} - - Path Path -} - -func (b *BaseSchema) GetDescription() string { - return b.Description -} -func (b *BaseSchema) GetExtensions() map[string]interface{} { - return b.Extensions + return &document{ + resources: resources, + models: models, + }, nil } -func (b *BaseSchema) GetPath() *Path { - return &b.Path -} - -// Array must have all its element of the same `SubType`. -type Array struct { - BaseSchema - - SubType Schema -} - -var _ Schema = &Array{} - -func (a *Array) Accept(v SchemaVisitor) { - v.VisitArray(a) -} - -func (a *Array) GetName() string { - return fmt.Sprintf("Array of %s", a.SubType.GetName()) -} - -// Kind is a complex object. It can have multiple different -// subtypes for each field, as defined in the `Fields` field. Mandatory -// fields are listed in `RequiredFields`. The key of the object is -// always of type `string`. -type Kind struct { - BaseSchema - - // Lists names of required fields. - RequiredFields []string - // Maps field names to types. - Fields map[string]Schema -} - -var _ Schema = &Kind{} - -func (k *Kind) Accept(v SchemaVisitor) { - v.VisitKind(k) -} - -func (k *Kind) GetName() string { - properties := []string{} - for key := range k.Fields { - properties = append(properties, key) +func (d *document) LookupResource(gvk schema.GroupVersionKind) proto.Schema { + modelName, found := d.resources[gvk] + if !found { + return nil } - return fmt.Sprintf("Kind(%v)", properties) -} - -// Map is an object who values must all be of the same `SubType`. -// The key of the object is always of type `string`. -type Map struct { - BaseSchema - - SubType Schema + return d.models.LookupModel(modelName) } -var _ Schema = &Map{} +// Get and parse GroupVersionKind from the extension. Returns empty if it doesn't have one. +func parseGroupVersionKind(s proto.Schema) schema.GroupVersionKind { + extensions := s.GetExtensions() -func (m *Map) Accept(v SchemaVisitor) { - v.VisitMap(m) -} - -func (m *Map) GetName() string { - return fmt.Sprintf("Map of %s", m.SubType.GetName()) -} - -// Primitive is a literal. There can be multiple types of primitives, -// and this subtype can be visited through the `subType` field. -type Primitive struct { - BaseSchema - - // Type of a primitive must be one of: integer, number, string, boolean. - Type string - Format string -} - -var _ Schema = &Primitive{} + // Get the extensions + gvkExtension, ok := extensions[groupVersionKindExtensionKey] + if !ok { + return schema.GroupVersionKind{} + } -func (p *Primitive) Accept(v SchemaVisitor) { - v.VisitPrimitive(p) -} + // gvk extension must be a list of 1 element. + gvkList, ok := gvkExtension.([]interface{}) + if !ok { + return schema.GroupVersionKind{} + } + if len(gvkList) != 1 { + return schema.GroupVersionKind{} -func (p *Primitive) GetName() string { - if p.Format == "" { - return p.Type } - return fmt.Sprintf("%s (%s)", p.Type, p.Format) -} + gvk := gvkList[0] -// Reference implementation depends on the type of document. -type Reference interface { - Schema + // gvk extension list must be a map with group, version, and + // kind fields + gvkMap, ok := gvk.(map[interface{}]interface{}) + if !ok { + return schema.GroupVersionKind{} + } + group, ok := gvkMap["group"].(string) + if !ok { + return schema.GroupVersionKind{} + } + version, ok := gvkMap["version"].(string) + if !ok { + return schema.GroupVersionKind{} + } + kind, ok := gvkMap["kind"].(string) + if !ok { + return schema.GroupVersionKind{} + } - Reference() string - SubSchema() Schema + return schema.GroupVersionKind{ + Group: group, + Version: version, + Kind: kind, + } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/BUILD index 77b98f39bb3e..cf05b074a9a9 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -10,33 +8,28 @@ load( go_library( name = "go_default_library", - srcs = [ - "errors.go", - "types.go", - "validation.go", - ], - tags = ["automanaged"], + srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/util:go_default_library", "//pkg/kubectl/cmd/util/openapi:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/util/proto/validation:go_default_library", ], ) go_test( - name = "go_default_xtest", + name = "go_default_test", srcs = [ "validation_suite_test.go", "validation_test.go", ], data = ["//api/openapi-spec:swagger-spec"], - tags = ["automanaged"], + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation", + library = ":go_default_library", deps = [ - ":go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/kubectl/cmd/util/openapi:go_default_library", "//pkg/kubectl/cmd/util/openapi/testing:go_default_library", @@ -45,6 +38,7 @@ go_test( "//vendor/github.com/onsi/ginkgo/types:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/util/proto/validation:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/errors.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/errors.go deleted file mode 100644 index 4f4ab9e3bd18..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/errors.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "fmt" -) - -type Errors struct { - errors []error -} - -func (e *Errors) Errors() []error { - return e.errors -} - -func (e *Errors) AppendErrors(err ...error) { - e.errors = append(e.errors, err...) -} - -type ValidationError struct { - Path string - Err error -} - -func (e ValidationError) Error() string { - return fmt.Sprintf("ValidationError(%s): %v", e.Path, e.Err) -} - -type InvalidTypeError struct { - Path string - Expected string - Actual string -} - -func (e InvalidTypeError) Error() string { - return fmt.Sprintf("invalid type for %s: got %q, expected %q", e.Path, e.Actual, e.Expected) -} - -type MissingRequiredFieldError struct { - Path string - Field string -} - -func (e MissingRequiredFieldError) Error() string { - return fmt.Sprintf("missing required field %q in %s", e.Field, e.Path) -} - -type UnknownFieldError struct { - Path string - Field string -} - -func (e UnknownFieldError) Error() string { - return fmt.Sprintf("unknown field %q in %s", e.Field, e.Path) -} - -type InvalidObjectTypeError struct { - Path string - Type string -} - -func (e InvalidObjectTypeError) Error() string { - return fmt.Sprintf("unknown object type %q in %s", e.Type, e.Path) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/types.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/types.go deleted file mode 100644 index 2c16c2b16fc3..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/types.go +++ /dev/null @@ -1,289 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "reflect" - "sort" - - "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" -) - -type ValidationItem interface { - openapi.SchemaVisitor - - Errors() []error - Path() *openapi.Path -} - -type baseItem struct { - errors Errors - path openapi.Path -} - -// Errors returns the list of errors found for this item. -func (item *baseItem) Errors() []error { - return item.errors.Errors() -} - -// AddValidationError wraps the given error into a ValidationError and -// attaches it to this item. -func (item *baseItem) AddValidationError(err error) { - item.errors.AppendErrors(ValidationError{Path: item.path.String(), Err: err}) -} - -// AddError adds a regular (non-validation related) error to the list. -func (item *baseItem) AddError(err error) { - item.errors.AppendErrors(err) -} - -// CopyErrors adds a list of errors to this item. This is useful to copy -// errors from subitems. -func (item *baseItem) CopyErrors(errs []error) { - item.errors.AppendErrors(errs...) -} - -// Path returns the path of this item, helps print useful errors. -func (item *baseItem) Path() *openapi.Path { - return &item.path -} - -// mapItem represents a map entry in the yaml. -type mapItem struct { - baseItem - - Map map[string]interface{} -} - -func (item *mapItem) sortedKeys() []string { - sortedKeys := []string{} - for key := range item.Map { - sortedKeys = append(sortedKeys, key) - } - sort.Strings(sortedKeys) - return sortedKeys -} - -var _ ValidationItem = &mapItem{} - -func (item *mapItem) VisitPrimitive(schema *openapi.Primitive) { - item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: schema.Type, Actual: "map"}) -} - -func (item *mapItem) VisitArray(schema *openapi.Array) { - item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: "map"}) -} - -func (item *mapItem) VisitMap(schema *openapi.Map) { - for _, key := range item.sortedKeys() { - subItem, err := itemFactory(item.Path().FieldPath(key), item.Map[key]) - if err != nil { - item.AddError(err) - continue - } - schema.SubType.Accept(subItem) - item.CopyErrors(subItem.Errors()) - } -} - -func (item *mapItem) VisitKind(schema *openapi.Kind) { - // Verify each sub-field. - for _, key := range item.sortedKeys() { - if item.Map[key] == nil { - continue - } - subItem, err := itemFactory(item.Path().FieldPath(key), item.Map[key]) - if err != nil { - item.AddError(err) - continue - } - if _, ok := schema.Fields[key]; !ok { - item.AddValidationError(UnknownFieldError{Path: schema.GetPath().String(), Field: key}) - continue - } - schema.Fields[key].Accept(subItem) - item.CopyErrors(subItem.Errors()) - } - - // Verify that all required fields are present. - for _, required := range schema.RequiredFields { - if v, ok := item.Map[required]; !ok || v == nil { - item.AddValidationError(MissingRequiredFieldError{Path: schema.GetPath().String(), Field: required}) - } - } -} - -func (item *mapItem) VisitReference(schema openapi.Reference) { - // passthrough - schema.SubSchema().Accept(item) -} - -// arrayItem represents a yaml array. -type arrayItem struct { - baseItem - - Array []interface{} -} - -var _ ValidationItem = &arrayItem{} - -func (item *arrayItem) VisitPrimitive(schema *openapi.Primitive) { - item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: schema.Type, Actual: "array"}) -} - -func (item *arrayItem) VisitArray(schema *openapi.Array) { - for i, v := range item.Array { - path := item.Path().ArrayPath(i) - if v == nil { - item.AddValidationError(InvalidObjectTypeError{Type: "nil", Path: path.String()}) - continue - } - subItem, err := itemFactory(path, v) - if err != nil { - item.AddError(err) - continue - } - schema.SubType.Accept(subItem) - item.CopyErrors(subItem.Errors()) - } -} - -func (item *arrayItem) VisitMap(schema *openapi.Map) { - item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: "map"}) -} - -func (item *arrayItem) VisitKind(schema *openapi.Kind) { - item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: "map"}) -} - -func (item *arrayItem) VisitReference(schema openapi.Reference) { - // passthrough - schema.SubSchema().Accept(item) -} - -// primitiveItem represents a yaml value. -type primitiveItem struct { - baseItem - - Value interface{} - Kind string -} - -var _ ValidationItem = &primitiveItem{} - -func (item *primitiveItem) VisitPrimitive(schema *openapi.Primitive) { - // Some types of primitives can match more than one (a number - // can be a string, but not the other way around). Return from - // the switch if we have a valid possible type conversion - // NOTE(apelisse): This logic is blindly copied from the - // existing swagger logic, and I'm not sure I agree with it. - switch schema.Type { - case openapi.Boolean: - switch item.Kind { - case openapi.Boolean: - return - } - case openapi.Integer: - switch item.Kind { - case openapi.Integer, openapi.Number: - return - } - case openapi.Number: - switch item.Kind { - case openapi.Number: - return - } - case openapi.String: - return - } - - item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: schema.Type, Actual: item.Kind}) -} - -func (item *primitiveItem) VisitArray(schema *openapi.Array) { - item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: item.Kind}) -} - -func (item *primitiveItem) VisitMap(schema *openapi.Map) { - item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: item.Kind}) -} - -func (item *primitiveItem) VisitKind(schema *openapi.Kind) { - item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: item.Kind}) -} - -func (item *primitiveItem) VisitReference(schema openapi.Reference) { - // passthrough - schema.SubSchema().Accept(item) -} - -// itemFactory creates the relevant item type/visitor based on the current yaml type. -func itemFactory(path openapi.Path, v interface{}) (ValidationItem, error) { - // We need to special case for no-type fields in yaml (e.g. empty item in list) - if v == nil { - return nil, InvalidObjectTypeError{Type: "nil", Path: path.String()} - } - kind := reflect.TypeOf(v).Kind() - switch kind { - case reflect.Bool: - return &primitiveItem{ - baseItem: baseItem{path: path}, - Value: v, - Kind: openapi.Boolean, - }, nil - case reflect.Int, - reflect.Int8, - reflect.Int16, - reflect.Int32, - reflect.Int64, - reflect.Uint, - reflect.Uint8, - reflect.Uint16, - reflect.Uint32, - reflect.Uint64: - return &primitiveItem{ - baseItem: baseItem{path: path}, - Value: v, - Kind: openapi.Integer, - }, nil - case reflect.Float32, - reflect.Float64: - return &primitiveItem{ - baseItem: baseItem{path: path}, - Value: v, - Kind: openapi.Number, - }, nil - case reflect.String: - return &primitiveItem{ - baseItem: baseItem{path: path}, - Value: v, - Kind: openapi.String, - }, nil - case reflect.Array, - reflect.Slice: - return &arrayItem{ - baseItem: baseItem{path: path}, - Array: v.([]interface{}), - }, nil - case reflect.Map: - return &mapItem{ - baseItem: baseItem{path: path}, - Map: v.(map[string]interface{}), - }, nil - } - return nil, InvalidObjectTypeError{Type: kind.String(), Path: path.String()} -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/validation.go index 08bc6a573bda..80239e0229ff 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation/validation.go @@ -18,37 +18,40 @@ package validation import ( "errors" - "fmt" "strings" "k8s.io/apimachinery/pkg/runtime/schema" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/kubernetes/pkg/api" - apiutil "k8s.io/kubernetes/pkg/api/util" + "k8s.io/kube-openapi/pkg/util/proto/validation" "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" ) +// SchemaValidation validates the object against an OpenAPI schema. type SchemaValidation struct { resources openapi.Resources } +// NewSchemaValidation creates a new SchemaValidation that can be used +// to validate objects. func NewSchemaValidation(resources openapi.Resources) *SchemaValidation { return &SchemaValidation{ resources: resources, } } +// ValidateBytes will validates the object against using the Resources +// object. func (v *SchemaValidation) ValidateBytes(data []byte) error { obj, err := parse(data) if err != nil { return err } - gvk, err := getObjectKind(obj) - if err != nil { - return err + gvk, errs := getObjectKind(obj) + if errs != nil { + return utilerrors.NewAggregate(errs) } if strings.HasSuffix(gvk.Kind, "List") { @@ -59,40 +62,33 @@ func (v *SchemaValidation) ValidateBytes(data []byte) error { } func (v *SchemaValidation) validateList(object interface{}) []error { - fields := object.(map[string]interface{}) - if fields == nil { + fields, ok := object.(map[string]interface{}) + if !ok || fields == nil { return []error{errors.New("invalid object to validate")} } - errs := []error{} + allErrors := []error{} + if _, ok := fields["items"].([]interface{}); !ok { + return []error{errors.New("invalid object to validate")} + } for _, item := range fields["items"].([]interface{}) { - if gvk, err := getObjectKind(item); err != nil { - errs = append(errs, err) + if gvk, errs := getObjectKind(item); errs != nil { + allErrors = append(allErrors, errs...) } else { - errs = append(errs, v.validateResource(item, gvk)...) + allErrors = append(allErrors, v.validateResource(item, gvk)...) } } - return errs + return allErrors } func (v *SchemaValidation) validateResource(obj interface{}, gvk schema.GroupVersionKind) []error { - if !api.Registry.IsEnabledVersion(gvk.GroupVersion()) { - // if we don't have this in our scheme, just skip - // validation because its an object we don't recognize - return nil - } - resource := v.resources.LookupResource(gvk) if resource == nil { - return []error{fmt.Errorf("unknown object type %#v", gvk)} + // resource is not present, let's just skip validation. + return nil } - rootValidation, err := itemFactory(openapi.NewPath(gvk.Kind), obj) - if err != nil { - return []error{err} - } - resource.Accept(rootValidation) - return rootValidation.Errors() + return validation.ValidateModel(obj, resource, gvk.Kind) } func parse(data []byte) (interface{}, error) { @@ -107,26 +103,38 @@ func parse(data []byte) (interface{}, error) { return obj, nil } -func getObjectKind(object interface{}) (schema.GroupVersionKind, error) { - fields := object.(map[string]interface{}) - if fields == nil { - return schema.GroupVersionKind{}, errors.New("invalid object to validate") +func getObjectKind(object interface{}) (schema.GroupVersionKind, []error) { + var listErrors []error + fields, ok := object.(map[string]interface{}) + if !ok || fields == nil { + listErrors = append(listErrors, errors.New("invalid object to validate")) + return schema.GroupVersionKind{}, listErrors } + + var group string + var version string apiVersion := fields["apiVersion"] if apiVersion == nil { - return schema.GroupVersionKind{}, errors.New("apiVersion not set") - } - if _, ok := apiVersion.(string); !ok { - return schema.GroupVersionKind{}, errors.New("apiVersion isn't string type") + listErrors = append(listErrors, errors.New("apiVersion not set")) + } else if _, ok := apiVersion.(string); !ok { + listErrors = append(listErrors, errors.New("apiVersion isn't string type")) + } else { + gv, err := schema.ParseGroupVersion(apiVersion.(string)) + if err != nil { + listErrors = append(listErrors, err) + } else { + group = gv.Group + version = gv.Version + } } - version := apiutil.GetVersion(apiVersion.(string)) - group := apiutil.GetGroup(apiVersion.(string)) kind := fields["kind"] if kind == nil { - return schema.GroupVersionKind{}, errors.New("kind not set") + listErrors = append(listErrors, errors.New("kind not set")) + } else if _, ok := kind.(string); !ok { + listErrors = append(listErrors, errors.New("kind isn't string type")) } - if _, ok := kind.(string); !ok { - return schema.GroupVersionKind{}, errors.New("kind isn't string type") + if listErrors != nil { + return schema.GroupVersionKind{}, listErrors } return schema.GroupVersionKind{Group: group, Version: version, Kind: kind.(string)}, nil diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go index 8e26c2559992..d978b9ef96c8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go @@ -18,13 +18,12 @@ package util import ( "fmt" - "io" "strings" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/resource" + "k8s.io/kubernetes/pkg/kubectl/cmd/templates" "k8s.io/kubernetes/pkg/printers" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" @@ -73,30 +72,6 @@ func AddNoHeadersFlags(cmd *cobra.Command) { cmd.Flags().Bool("no-headers", false, "When using the default or custom-column output format, don't print headers (default print headers).") } -// PrintSuccess prints message after finishing mutating operations -func PrintSuccess(mapper meta.RESTMapper, shortOutput bool, out io.Writer, resource string, name string, dryRun bool, operation string) { - resource, _ = mapper.ResourceSingularizer(resource) - dryRunMsg := "" - if dryRun { - dryRunMsg = " (dry run)" - } - if shortOutput { - // -o name: prints resource/name - if len(resource) > 0 { - fmt.Fprintf(out, "%s/%s\n", resource, name) - } else { - fmt.Fprintf(out, "%s\n", name) - } - } else { - // understandable output by default - if len(resource) > 0 { - fmt.Fprintf(out, "%s \"%s\" %s%s\n", resource, name, operation, dryRunMsg) - } else { - fmt.Fprintf(out, "\"%s\" %s%s\n", name, operation, dryRunMsg) - } - } -} - // ValidateOutputArgs validates -o flag args for mutations func ValidateOutputArgs(cmd *cobra.Command) error { outputMode := GetFlagString(cmd, "output") @@ -106,23 +81,11 @@ func ValidateOutputArgs(cmd *cobra.Command) error { return nil } -// PrinterForCommand returns the printer for the outputOptions (if given) or +// PrinterForOptions returns the printer for the outputOptions (if given) or // returns the default printer for the command. Requires that printer flags have // been added to cmd (see AddPrinterFlags). -// TODO: remove the dependency on cmd object -func PrinterForCommand(cmd *cobra.Command, outputOpts *printers.OutputOptions, mapper meta.RESTMapper, typer runtime.ObjectTyper, encoder runtime.Encoder, decoders []runtime.Decoder, options printers.PrintOptions) (printers.ResourcePrinter, error) { - - if outputOpts == nil { - outputOpts = extractOutputOptions(cmd) - } - - // this function may be invoked by a command that did not call AddPrinterFlags first, so we need - // to be safe about how we access the no-headers flag - noHeaders := false - if cmd.Flags().Lookup("no-headers") != nil { - noHeaders = GetFlagBool(cmd, "no-headers") - } - printer, err := printers.GetStandardPrinter(outputOpts, noHeaders, mapper, typer, encoder, decoders, options) +func PrinterForOptions(mapper meta.RESTMapper, typer runtime.ObjectTyper, encoder runtime.Encoder, decoders []runtime.Decoder, options *printers.PrintOptions) (printers.ResourcePrinter, error) { + printer, err := printers.GetStandardPrinter(mapper, typer, encoder, decoders, *options) if err != nil { return nil, err } @@ -134,37 +97,39 @@ func PrinterForCommand(cmd *cobra.Command, outputOpts *printers.OutputOptions, m printersinternal.AddHandlers(humanReadablePrinter) } - return maybeWrapSortingPrinter(cmd, printer), nil + return maybeWrapSortingPrinter(printer, *options), nil } -// PrintResourceInfoForCommand receives a *cobra.Command and a *resource.Info and -// attempts to print an info object based on the specified output format. If the -// object passed is non-generic, it attempts to print the object using a HumanReadablePrinter. -// Requires that printer flags have been added to cmd (see AddPrinterFlags). -func PrintResourceInfoForCommand(cmd *cobra.Command, info *resource.Info, f Factory, out io.Writer) error { - printer, err := f.PrinterForCommand(cmd, false, nil, printers.PrintOptions{}) +// ExtractCmdPrintOptions parses printer specific commandline args and +// returns a PrintOptions object. +// Requires that printer flags have been added to cmd (see AddPrinterFlags) +func ExtractCmdPrintOptions(cmd *cobra.Command, withNamespace bool) *printers.PrintOptions { + flags := cmd.Flags() + + columnLabel, err := flags.GetStringSlice("label-columns") if err != nil { - return err + columnLabel = []string{} } - if !printer.IsGeneric() { - printer, err = f.PrinterForMapping(cmd, false, nil, nil, false) - if err != nil { - return err - } - } - return printer.PrintObj(info.Object, out) -} -// extractOutputOptions parses printer specific commandline args and returns -// printers.OutputsOptions object. -func extractOutputOptions(cmd *cobra.Command) *printers.OutputOptions { - flags := cmd.Flags() + options := &printers.PrintOptions{ + NoHeaders: GetFlagBool(cmd, "no-headers"), + Wide: GetWideFlag(cmd), + ShowAll: GetFlagBool(cmd, "show-all"), + ShowLabels: GetFlagBool(cmd, "show-labels"), + AbsoluteTimestamps: isWatch(cmd), + ColumnLabels: columnLabel, + WithNamespace: withNamespace, + } var outputFormat string if flags.Lookup("output") != nil { outputFormat = GetFlagString(cmd, "output") } + if flags.Lookup("sort-by") != nil { + options.SortBy = GetFlagString(cmd, "sort-by") + } + // templates are logically optional for specifying a format. // TODO once https://github.com/kubernetes/kubernetes/issues/12668 is fixed, this should fall back to GetFlagString var templateFile string @@ -189,30 +154,72 @@ func extractOutputOptions(cmd *cobra.Command) *printers.OutputOptions { // this function may be invoked by a command that did not call AddPrinterFlags first, so we need // to be safe about how we access the allow-missing-template-keys flag - allowMissingTemplateKeys := false if flags.Lookup("allow-missing-template-keys") != nil { - allowMissingTemplateKeys = GetFlagBool(cmd, "allow-missing-template-keys") + options.AllowMissingKeys = GetFlagBool(cmd, "allow-missing-template-keys") } - return &printers.OutputOptions{ - FmtType: outputFormat, - FmtArg: templateFile, - AllowMissingKeys: allowMissingTemplateKeys, - } -} + options.OutputFormatType = outputFormat + options.OutputFormatArgument = templateFile -func maybeWrapSortingPrinter(cmd *cobra.Command, printer printers.ResourcePrinter) printers.ResourcePrinter { - sorting, err := cmd.Flags().GetString("sort-by") - if err != nil { - // error can happen on missing flag or bad flag type. In either case, this command didn't intent to sort - return printer - } + return options +} - if len(sorting) != 0 { +func maybeWrapSortingPrinter(printer printers.ResourcePrinter, printOpts printers.PrintOptions) printers.ResourcePrinter { + if len(printOpts.SortBy) != 0 { return &kubectl.SortingPrinter{ Delegate: printer, - SortField: fmt.Sprintf("{%s}", sorting), + SortField: fmt.Sprintf("{%s}", printOpts.SortBy), } } return printer } + +// ValidResourceTypeList returns a multi-line string containing the valid resources. May +// be called before the factory is initialized. +// TODO: This function implementation should be replaced with a real implementation from the +// discovery service. +func ValidResourceTypeList(f ClientAccessFactory) string { + // TODO: Should attempt to use the cached discovery list or fallback to a static list + // that is calculated from code compiled into the factory. + return templates.LongDesc(`Valid resource types include: + + * all + * certificatesigningrequests (aka 'csr') + * clusterrolebindings + * clusterroles + * componentstatuses (aka 'cs') + * configmaps (aka 'cm') + * controllerrevisions + * cronjobs + * customresourcedefinition (aka 'crd') + * daemonsets (aka 'ds') + * deployments (aka 'deploy') + * endpoints (aka 'ep') + * events (aka 'ev') + * horizontalpodautoscalers (aka 'hpa') + * ingresses (aka 'ing') + * jobs + * limitranges (aka 'limits') + * namespaces (aka 'ns') + * networkpolicies (aka 'netpol') + * nodes (aka 'no') + * persistentvolumeclaims (aka 'pvc') + * persistentvolumes (aka 'pv') + * poddisruptionbudgets (aka 'pdb') + * podpreset + * pods (aka 'po') + * podsecuritypolicies (aka 'psp') + * podtemplates + * replicasets (aka 'rs') + * replicationcontrollers (aka 'rc') + * resourcequotas (aka 'quota') + * rolebindings + * roles + * secrets + * serviceaccounts (aka 'sa') + * services (aka 'svc') + * statefulsets (aka 'sts') + * storageclasses (aka 'sc') + + `) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/shortcut_restmapper.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/shortcut_restmapper.go index 8d71af02ceb3..c5ecfa84b123 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/shortcut_restmapper.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/shortcut_restmapper.go @@ -17,7 +17,6 @@ limitations under the License. package util import ( - "errors" "strings" "github.com/golang/glog" @@ -37,11 +36,8 @@ type shortcutExpander struct { var _ meta.RESTMapper = &shortcutExpander{} -func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface) (shortcutExpander, error) { - if client == nil { - return shortcutExpander{}, errors.New("Please provide discovery client to shortcut expander") - } - return shortcutExpander{RESTMapper: delegate, discoveryClient: client}, nil +func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface) shortcutExpander { + return shortcutExpander{RESTMapper: delegate, discoveryClient: client} } func (e shortcutExpander) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { @@ -79,22 +75,24 @@ func (e shortcutExpander) RESTMappings(gk schema.GroupKind, versions ...string) func (e shortcutExpander) getShortcutMappings() ([]kubectl.ResourceShortcuts, error) { res := []kubectl.ResourceShortcuts{} // get server resources + // This can return an error *and* the results it was able to find. We don't need to fail on the error. apiResList, err := e.discoveryClient.ServerResources() - if err == nil { - for _, apiResources := range apiResList { - for _, apiRes := range apiResources.APIResources { - for _, shortName := range apiRes.ShortNames { - gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) - if err != nil { - glog.V(1).Infof("Unable to parse groupversion = %s due to = %s", apiResources.GroupVersion, err.Error()) - continue - } - rs := kubectl.ResourceShortcuts{ - ShortForm: schema.GroupResource{Group: gv.Group, Resource: shortName}, - LongForm: schema.GroupResource{Group: gv.Group, Resource: apiRes.Name}, - } - res = append(res, rs) + if err != nil { + glog.V(1).Infof("Error loading discovery information: %v", err) + } + for _, apiResources := range apiResList { + for _, apiRes := range apiResources.APIResources { + for _, shortName := range apiRes.ShortNames { + gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) + if err != nil { + glog.V(1).Infof("Unable to parse groupversion = %s due to = %s", apiResources.GroupVersion, err.Error()) + continue + } + rs := kubectl.ResourceShortcuts{ + ShortForm: schema.GroupResource{Group: gv.Group, Resource: shortName}, + LongForm: schema.GroupResource{Group: gv.Group, Resource: apiRes.Name}, } + res = append(res, rs) } } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go b/vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go index a8161906c2cd..8d1ef5281ede 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go @@ -23,9 +23,10 @@ import ( "path" "strings" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/kubectl/util" "k8s.io/kubernetes/pkg/kubectl/util/hash" ) @@ -126,7 +127,7 @@ func (s ConfigMapGeneratorV1) StructuredGenerate() (runtime.Object, error) { if err := s.validate(); err != nil { return nil, err } - configMap := &api.ConfigMap{} + configMap := &v1.ConfigMap{} configMap.Name = s.Name configMap.Data = map[string]string{} if len(s.FileSources) > 0 { @@ -167,9 +168,9 @@ func (s ConfigMapGeneratorV1) validate() error { // handleConfigMapFromLiteralSources adds the specified literal source // information into the provided configMap. -func handleConfigMapFromLiteralSources(configMap *api.ConfigMap, literalSources []string) error { +func handleConfigMapFromLiteralSources(configMap *v1.ConfigMap, literalSources []string) error { for _, literalSource := range literalSources { - keyName, value, err := parseLiteralSource(literalSource) + keyName, value, err := util.ParseLiteralSource(literalSource) if err != nil { return err } @@ -183,9 +184,9 @@ func handleConfigMapFromLiteralSources(configMap *api.ConfigMap, literalSources // handleConfigMapFromFileSources adds the specified file source information // into the provided configMap -func handleConfigMapFromFileSources(configMap *api.ConfigMap, fileSources []string) error { +func handleConfigMapFromFileSources(configMap *v1.ConfigMap, fileSources []string) error { for _, fileSource := range fileSources { - keyName, filePath, err := parseFileSource(fileSource) + keyName, filePath, err := util.ParseFileSource(fileSource) if err != nil { return err } @@ -228,7 +229,7 @@ func handleConfigMapFromFileSources(configMap *api.ConfigMap, fileSources []stri // handleConfigMapFromEnvFileSource adds the specified env file source information // into the provided configMap -func handleConfigMapFromEnvFileSource(configMap *api.ConfigMap, envFileSource string) error { +func handleConfigMapFromEnvFileSource(configMap *v1.ConfigMap, envFileSource string) error { info, err := os.Stat(envFileSource) if err != nil { switch err := err.(type) { @@ -249,7 +250,7 @@ func handleConfigMapFromEnvFileSource(configMap *api.ConfigMap, envFileSource st // addKeyFromFileToConfigMap adds a key with the given name to a ConfigMap, populating // the value with the content of the given file path, or returns an error. -func addKeyFromFileToConfigMap(configMap *api.ConfigMap, keyName, filePath string) error { +func addKeyFromFileToConfigMap(configMap *v1.ConfigMap, keyName, filePath string) error { data, err := ioutil.ReadFile(filePath) if err != nil { return err @@ -259,7 +260,7 @@ func addKeyFromFileToConfigMap(configMap *api.ConfigMap, keyName, filePath strin // addKeyFromLiteralToConfigMap adds the given key and data to the given config map, // returning an error if the key is not valid or if the key already exists. -func addKeyFromLiteralToConfigMap(configMap *api.ConfigMap, keyName, data string) error { +func addKeyFromLiteralToConfigMap(configMap *v1.ConfigMap, keyName, data string) error { // Note, the rules for ConfigMap keys are the exact same as the ones for SecretKeys. if errs := validation.IsConfigMapKey(keyName); len(errs) != 0 { return fmt.Errorf("%q is not a valid key name for a ConfigMap: %s", keyName, strings.Join(errs, ";")) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/delete.go b/vendor/k8s.io/kubernetes/pkg/kubectl/delete.go index 3982bc17e7ec..1522f71b68eb 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/delete.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/delete.go @@ -28,9 +28,9 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/batch" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" appsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/explain.go b/vendor/k8s.io/kubernetes/pkg/kubectl/explain.go deleted file mode 100644 index 06190ab9ecd6..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/explain.go +++ /dev/null @@ -1,251 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "io" - "strings" - - "github.com/emicklei/go-restful-swagger12" - - "k8s.io/apimachinery/pkg/api/meta" - apiutil "k8s.io/kubernetes/pkg/api/util" -) - -var allModels = make(map[string]*swagger.NamedModel) - -// SplitAndParseResourceRequest separates the users input into a model and fields -func SplitAndParseResourceRequest(inResource string, mapper meta.RESTMapper) (string, []string, error) { - inResource, fieldsPath := splitDotNotation(inResource) - inResource, _ = mapper.ResourceSingularizer(inResource) - return inResource, fieldsPath, nil -} - -// PrintModelDescription prints the description of a specific model or dot path. -// If recursive, all components nested within the fields of the schema will be -// printed. -func PrintModelDescription(inModel string, fieldsPath []string, w io.Writer, swaggerSchema *swagger.ApiDeclaration, recursive bool) error { - apiVer := apiutil.GetVersion(swaggerSchema.ApiVersion) + "." - - var pointedModel *swagger.NamedModel - for i := range swaggerSchema.Models.List { - name := swaggerSchema.Models.List[i].Name - - allModels[name] = &swaggerSchema.Models.List[i] - if strings.ToLower(name) == strings.ToLower(apiVer+inModel) { - pointedModel = &swaggerSchema.Models.List[i] - } - } - if pointedModel == nil { - return fmt.Errorf("requested resource %q is not defined", inModel) - } - - if len(fieldsPath) == 0 { - return printTopLevelResourceInfo(w, pointedModel, recursive) - } - - var pointedModelAsProp *swagger.NamedModelProperty - for _, field := range fieldsPath { - if prop, nextModel, isModel := getField(pointedModel, field); prop != nil { - if isModel { - pointedModelAsProp = prop - pointedModel = allModels[nextModel] - } else { - return printPrimitive(w, prop) - } - } else { - return fmt.Errorf("field %q does not exist", field) - } - } - return printModelInfo(w, pointedModel, pointedModelAsProp, recursive) -} - -func splitDotNotation(model string) (string, []string) { - var fieldsPath []string - dotModel := strings.Split(model, ".") - if len(dotModel) >= 1 { - fieldsPath = dotModel[1:] - } - return dotModel[0], fieldsPath -} - -func getPointedModel(prop *swagger.ModelProperty) (string, bool) { - if prop.Ref != nil { - return *prop.Ref, true - } else if *prop.Type == "array" && prop.Items.Ref != nil { - return *prop.Items.Ref, true - } - return "", false -} - -func getField(model *swagger.NamedModel, sField string) (*swagger.NamedModelProperty, string, bool) { - for _, prop := range model.Model.Properties.List { - if prop.Name == sField { - pointedModel, isModel := getPointedModel(&prop.Property) - return &prop, pointedModel, isModel - } - } - return nil, "", false -} - -func printModelInfo(w io.Writer, model *swagger.NamedModel, modelProp *swagger.NamedModelProperty, recursive bool) error { - t, _ := getFieldType(&modelProp.Property) - fmt.Fprintf(w, "RESOURCE: %s <%s>\n\n", modelProp.Name, t) - fieldDesc, _ := wrapAndIndentText(modelProp.Property.Description, " ", 80) - fmt.Fprintf(w, "DESCRIPTION:\n%s\n\n%s\n", fieldDesc, indentText(model.Model.Description, " ")) - return printFields(w, model, recursive) -} - -func printPrimitive(w io.Writer, field *swagger.NamedModelProperty) error { - t, _ := getFieldType(&field.Property) - fmt.Fprintf(w, "FIELD: %s <%s>\n\n", field.Name, t) - d, _ := wrapAndIndentText(field.Property.Description, " ", 80) - fmt.Fprintf(w, "DESCRIPTION:\n%s\n", d) - return nil -} - -func printTopLevelResourceInfo(w io.Writer, model *swagger.NamedModel, recursive bool) error { - fmt.Fprintf(w, "DESCRIPTION:\n%s\n", model.Model.Description) - return printFields(w, model, recursive) -} - -func printFields(w io.Writer, model *swagger.NamedModel, recursive bool) error { - fmt.Fprint(w, "\nFIELDS:\n") - for _, field := range model.Model.Properties.List { - fieldType, err := getFieldType(&field.Property) - if err != nil { - return err - } - - if arrayContains(model.Model.Required, field.Name) { - fmt.Fprintf(w, " %s\t<%s> -required-\n", field.Name, fieldType) - } else { - fmt.Fprintf(w, " %s\t<%s>\n", field.Name, fieldType) - } - - if recursive { - pointedModel, isModel := getPointedModel(&field.Property) - if isModel { - for _, nestedField := range allModels[pointedModel].Model.Properties.List { - t, _ := getFieldType(&nestedField.Property) - fmt.Fprintf(w, " %s\t<%s>\n", nestedField.Name, t) - } - } - } else { - fieldDesc, _ := wrapAndIndentText(field.Property.Description, " ", 80) - fmt.Fprintf(w, "%s\n\n", fieldDesc) - } - } - fmt.Fprint(w, "\n") - return nil -} - -func getFieldType(prop *swagger.ModelProperty) (string, error) { - if prop.Type == nil { - return "Object", nil - } else if *prop.Type == "any" { - // Swagger Spec doesn't return information for maps. - return "map[string]string", nil - } else if *prop.Type == "array" { - if prop.Items == nil { - return "", fmt.Errorf("error in swagger spec. Property: %v contains an array without type", prop) - } - if prop.Items.Ref != nil { - fieldType := "[]Object" - return fieldType, nil - } - fieldType := "[]" + *prop.Items.Type - return fieldType, nil - } - return *prop.Type, nil -} - -func wrapAndIndentText(desc, indent string, lim int) (string, error) { - words := strings.Split(strings.Replace(strings.TrimSpace(desc), "\n", " ", -1), " ") - n := len(words) - - for i := 0; i < n; i++ { - if len(words[i]) > lim { - if strings.Contains(words[i], "/") { - s := breakURL(words[i]) - words = append(words[:i], append(s, words[i+1:]...)...) - i = i + len(s) - 1 - } else { - fmt.Println(len(words[i])) - return "", fmt.Errorf("there are words longer that the break limit is") - } - } - } - - var lines []string - line := []string{indent} - lineL := len(indent) - for i := 0; i < len(words); i++ { - w := words[i] - - if strings.HasSuffix(w, "/") && lineL+len(w)-1 < lim { - prev := line[len(line)-1] - if strings.HasSuffix(prev, "/") { - if i+1 < len(words)-1 && !strings.HasSuffix(words[i+1], "/") { - w = strings.TrimSuffix(w, "/") - } - - line[len(line)-1] = prev + w - lineL += len(w) - } else { - line = append(line, w) - lineL += len(w) + 1 - } - } else if lineL+len(w) < lim { - line = append(line, w) - lineL += len(w) + 1 - } else { - lines = append(lines, strings.Join(line, " ")) - line = []string{indent, w} - lineL = len(indent) + len(w) - } - } - lines = append(lines, strings.Join(line, " ")) - - return strings.Join(lines, "\n"), nil -} - -func breakURL(url string) []string { - var buf []string - for _, part := range strings.Split(url, "/") { - buf = append(buf, part+"/") - } - return buf -} - -func indentText(text, indent string) string { - lines := strings.Split(text, "\n") - for i := range lines { - lines[i] = indent + lines[i] - } - return strings.Join(lines, "\n") -} - -func arrayContains(s []string, e string) bool { - for _, a := range s { - if a == e { - return true - } - } - return false -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/history.go b/vendor/k8s.io/kubernetes/pkg/kubectl/history.go index 5464686291da..36f95776d231 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/history.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/history.go @@ -27,18 +27,18 @@ import ( extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/kubernetes" clientappsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" - clientextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" - "k8s.io/kubernetes/pkg/api" - apiv1 "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/extensions" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + clientextv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" + api "k8s.io/kubernetes/pkg/apis/core" + apiv1 "k8s.io/kubernetes/pkg/apis/core/v1" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" + kapps "k8s.io/kubernetes/pkg/kubectl/apps" sliceutil "k8s.io/kubernetes/pkg/kubectl/util/slice" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" ) @@ -52,26 +52,58 @@ type HistoryViewer interface { ViewHistory(namespace, name string, revision int64) (string, error) } -func HistoryViewerFor(kind schema.GroupKind, c clientset.Interface) (HistoryViewer, error) { - switch kind { - case extensions.Kind("Deployment"), apps.Kind("Deployment"): - return &DeploymentHistoryViewer{c}, nil - case apps.Kind("StatefulSet"): - return &StatefulSetHistoryViewer{c}, nil - case extensions.Kind("DaemonSet"), apps.Kind("DaemonSet"): - return &DaemonSetHistoryViewer{c}, nil +type HistoryVisitor struct { + clientset kubernetes.Interface + result HistoryViewer +} + +func (v *HistoryVisitor) VisitDeployment(elem kapps.GroupKindElement) { + v.result = &DeploymentHistoryViewer{v.clientset} +} + +func (v *HistoryVisitor) VisitStatefulSet(kind kapps.GroupKindElement) { + v.result = &StatefulSetHistoryViewer{v.clientset} +} + +func (v *HistoryVisitor) VisitDaemonSet(kind kapps.GroupKindElement) { + v.result = &DaemonSetHistoryViewer{v.clientset} +} + +func (v *HistoryVisitor) VisitJob(kind kapps.GroupKindElement) {} +func (v *HistoryVisitor) VisitPod(kind kapps.GroupKindElement) {} +func (v *HistoryVisitor) VisitReplicaSet(kind kapps.GroupKindElement) {} +func (v *HistoryVisitor) VisitReplicationController(kind kapps.GroupKindElement) {} +func (v *HistoryVisitor) VisitCronJob(kind kapps.GroupKindElement) {} + +// HistoryViewerFor returns an implementation of HistoryViewer interface for the given schema kind +func HistoryViewerFor(kind schema.GroupKind, c kubernetes.Interface) (HistoryViewer, error) { + elem := kapps.GroupKindElement(kind) + visitor := &HistoryVisitor{ + clientset: c, + } + + // Determine which HistoryViewer we need here + err := elem.Accept(visitor) + + if err != nil { + return nil, fmt.Errorf("error retrieving history for %q, %v", kind.String(), err) + } + + if visitor.result == nil { + return nil, fmt.Errorf("no history viewer has been implemented for %q", kind.String()) } - return nil, fmt.Errorf("no history viewer has been implemented for %q", kind) + + return visitor.result, nil } type DeploymentHistoryViewer struct { - c clientset.Interface + c kubernetes.Interface } // ViewHistory returns a revision-to-replicaset map as the revision history of a deployment // TODO: this should be a describer func (h *DeploymentHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) { - versionedExtensionsClient := versionedExtensionsClientV1beta1(h.c) + versionedExtensionsClient := h.c.ExtensionsV1beta1() deployment, err := versionedExtensionsClient.Deployments(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", fmt.Errorf("failed to retrieve deployment %s: %v", name, err) @@ -138,7 +170,7 @@ func (h *DeploymentHistoryViewer) ViewHistory(namespace, name string, revision i func printTemplate(template *v1.PodTemplateSpec) (string, error) { buf := bytes.NewBuffer([]byte{}) internalTemplate := &api.PodTemplateSpec{} - if err := apiv1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(template, internalTemplate, nil); err != nil { + if err := apiv1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(template, internalTemplate, nil); err != nil { return "", fmt.Errorf("failed to convert podtemplate, %v", err) } w := printersinternal.NewPrefixWriter(buf) @@ -147,24 +179,21 @@ func printTemplate(template *v1.PodTemplateSpec) (string, error) { } type DaemonSetHistoryViewer struct { - c clientset.Interface + c kubernetes.Interface } // ViewHistory returns a revision-to-history map as the revision history of a deployment // TODO: this should be a describer func (h *DaemonSetHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) { - versionedAppsClient := versionedAppsClientV1beta1(h.c) - versionedExtensionsClient := versionedExtensionsClientV1beta1(h.c) - versionedObj, allHistory, err := controlledHistories(versionedAppsClient, versionedExtensionsClient, namespace, name, "DaemonSet") + ds, history, err := daemonSetHistory(h.c.ExtensionsV1beta1(), h.c.AppsV1beta1(), namespace, name) if err != nil { - return "", fmt.Errorf("unable to find history controlled by DaemonSet %s: %v", name, err) + return "", err } historyInfo := make(map[int64]*appsv1beta1.ControllerRevision) - for _, history := range allHistory { + for _, history := range history { // TODO: for now we assume revisions don't overlap, we may need to handle it historyInfo[history.Revision] = history } - if len(historyInfo) == 0 { return "No rollout history found.", nil } @@ -175,13 +204,7 @@ func (h *DaemonSetHistoryViewer) ViewHistory(namespace, name string, revision in if !ok { return "", fmt.Errorf("unable to find the specified revision") } - - versionedDS, ok := versionedObj.(*extensionsv1beta1.DaemonSet) - if !ok { - return "", fmt.Errorf("unexpected non-DaemonSet object returned: %v", versionedDS) - } - - dsOfHistory, err := applyHistory(versionedDS, history) + dsOfHistory, err := applyDaemonSetHistory(ds, history) if err != nil { return "", fmt.Errorf("unable to parse history %s", history.Name) } @@ -211,93 +234,46 @@ func (h *DaemonSetHistoryViewer) ViewHistory(namespace, name string, revision in } type StatefulSetHistoryViewer struct { - c clientset.Interface -} - -func getOwner(revision apps.ControllerRevision) *metav1.OwnerReference { - ownerRefs := revision.GetOwnerReferences() - for i := range ownerRefs { - owner := &ownerRefs[i] - if owner.Controller != nil && *owner.Controller == true { - return owner - } - } - return nil + c kubernetes.Interface } // ViewHistory returns a list of the revision history of a statefulset // TODO: this should be a describer // TODO: needs to implement detailed revision view func (h *StatefulSetHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) { - - sts, err := h.c.Apps().StatefulSets(namespace).Get(name, metav1.GetOptions{}) + _, history, err := statefulSetHistory(h.c.AppsV1beta1(), namespace, name) if err != nil { - return "", fmt.Errorf("failed to retrieve statefulset %s", err) - } - selector, err := metav1.LabelSelectorAsSelector(sts.Spec.Selector) - if err != nil { - return "", fmt.Errorf("failed to retrieve statefulset history %s", err) - } - revisions, err := h.c.Apps().ControllerRevisions(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) - if err != nil { - return "", fmt.Errorf("failed to retrieve statefulset history %s", err) + return "", err } - if len(revisions.Items) <= 0 { + + if len(history) <= 0 { return "No rollout history found.", nil } - revisionNumbers := make([]int64, len(revisions.Items)) - for i := range revisions.Items { - if owner := getOwner(revisions.Items[i]); owner != nil && owner.UID == sts.UID { - revisionNumbers[i] = revisions.Items[i].Revision - } + revisions := make([]int64, len(history)) + for _, revision := range history { + revisions = append(revisions, revision.Revision) } - sliceutil.SortInts64(revisionNumbers) + sliceutil.SortInts64(revisions) return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "REVISION\n") - for _, r := range revisionNumbers { + for _, r := range revisions { fmt.Fprintf(out, "%d\n", r) } return nil }) } -// controlledHistories returns all ControllerRevisions controlled by the given API object -func controlledHistories(apps clientappsv1beta1.AppsV1beta1Interface, extensions clientextensionsv1beta1.ExtensionsV1beta1Interface, namespace, name, kind string) (runtime.Object, []*appsv1beta1.ControllerRevision, error) { - var obj runtime.Object - var labelSelector *metav1.LabelSelector - - switch kind { - case "DaemonSet": - ds, err := extensions.DaemonSets(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return nil, nil, fmt.Errorf("failed to retrieve DaemonSet %s: %v", name, err) - } - labelSelector = ds.Spec.Selector - obj = ds - case "StatefulSet": - ss, err := apps.StatefulSets(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return nil, nil, fmt.Errorf("failed to retrieve StatefulSet %s: %v", name, err) - } - labelSelector = ss.Spec.Selector - obj = ss - default: - return nil, nil, fmt.Errorf("unsupported API object kind: %s", kind) - } - +// controlledHistories returns all ControllerRevisions in namespace that selected by selector and owned by accessor +func controlledHistory( + apps clientappsv1beta1.AppsV1beta1Interface, + namespace string, + selector labels.Selector, + accessor metav1.Object) ([]*appsv1beta1.ControllerRevision, error) { var result []*appsv1beta1.ControllerRevision - selector, err := metav1.LabelSelectorAsSelector(labelSelector) - if err != nil { - return nil, nil, err - } historyList, err := apps.ControllerRevisions(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { - return nil, nil, err - } - accessor, err := meta.Accessor(obj) - if err != nil { - return nil, nil, fmt.Errorf("failed to obtain accessor for %s named %s: %v", kind, name, err) + return nil, err } for i := range historyList.Items { history := historyList.Items[i] @@ -306,16 +282,59 @@ func controlledHistories(apps clientappsv1beta1.AppsV1beta1Interface, extensions result = append(result, &history) } } - return obj, result, nil + return result, nil } -// applyHistory returns a specific revision of DaemonSet by applying the given history to a copy of the given DaemonSet -func applyHistory(ds *extensionsv1beta1.DaemonSet, history *appsv1beta1.ControllerRevision) (*extensionsv1beta1.DaemonSet, error) { - obj, err := api.Scheme.New(ds.GroupVersionKind()) +// daemonSetHistory returns the DaemonSet named name in namespace and all ControllerRevisions in its history. +func daemonSetHistory( + ext clientextv1beta1.ExtensionsV1beta1Interface, + apps clientappsv1beta1.AppsV1beta1Interface, + namespace, name string) (*extensionsv1beta1.DaemonSet, []*appsv1beta1.ControllerRevision, error) { + ds, err := ext.DaemonSets(namespace).Get(name, metav1.GetOptions{}) if err != nil { - return nil, err + return nil, nil, fmt.Errorf("failed to retrieve DaemonSet %s: %v", name, err) + } + selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector) + if err != nil { + return nil, nil, fmt.Errorf("failed to create selector for DaemonSet %s: %v", ds.Name, err) } - clone := obj.(*extensionsv1beta1.DaemonSet) + accessor, err := meta.Accessor(ds) + if err != nil { + return nil, nil, fmt.Errorf("failed to create accessor for DaemonSet %s: %v", ds.Name, err) + } + history, err := controlledHistory(apps, ds.Namespace, selector, accessor) + if err != nil { + return nil, nil, fmt.Errorf("unable to find history controlled by DaemonSet %s: %v", ds.Name, err) + } + return ds, history, nil +} + +// statefulSetHistory returns the StatefulSet named name in namespace and all ControllerRevisions in its history. +func statefulSetHistory( + apps clientappsv1beta1.AppsV1beta1Interface, + namespace, name string) (*appsv1beta1.StatefulSet, []*appsv1beta1.ControllerRevision, error) { + sts, err := apps.StatefulSets(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, nil, fmt.Errorf("failed to retrieve Statefulset %s: %s", name, err.Error()) + } + selector, err := metav1.LabelSelectorAsSelector(sts.Spec.Selector) + if err != nil { + return nil, nil, fmt.Errorf("failed to create selector for StatefulSet %s: %s", name, err.Error()) + } + accessor, err := meta.Accessor(sts) + if err != nil { + return nil, nil, fmt.Errorf("failed to obtain accessor for StatefulSet %s: %s", name, err.Error()) + } + history, err := controlledHistory(apps, namespace, selector, accessor) + if err != nil { + return nil, nil, fmt.Errorf("unable to find history controlled by StatefulSet %s: %v", name, err) + } + return sts, history, nil +} + +// applyDaemonSetHistory returns a specific revision of DaemonSet by applying the given history to a copy of the given DaemonSet +func applyDaemonSetHistory(ds *extensionsv1beta1.DaemonSet, history *appsv1beta1.ControllerRevision) (*extensionsv1beta1.DaemonSet, error) { + clone := ds.DeepCopy() cloneBytes, err := json.Marshal(clone) if err != nil { return nil, err diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/kubectl.go b/vendor/k8s.io/kubernetes/pkg/kubectl/kubectl.go index 2cf9425eb8e1..f13cbc66c4f2 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/kubectl.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/kubectl.go @@ -18,18 +18,11 @@ limitations under the License. package kubectl import ( - "errors" - "fmt" - "path" "strings" "k8s.io/apimachinery/pkg/runtime/schema" ) -const ( - kubectlAnnotationPrefix = "kubectl.kubernetes.io/" -) - type NamespaceInfo struct { Namespace string } @@ -188,44 +181,3 @@ func ResourceAliases(rs []string) []string { } return as } - -// parseFileSource parses the source given. Acceptable formats include: -// -// 1. source-path: the basename will become the key name -// 2. source-name=source-path: the source-name will become the key name and source-path is the path to the key file -// -// Key names cannot include '='. -func parseFileSource(source string) (keyName, filePath string, err error) { - numSeparators := strings.Count(source, "=") - switch { - case numSeparators == 0: - return path.Base(source), source, nil - case numSeparators == 1 && strings.HasPrefix(source, "="): - return "", "", fmt.Errorf("key name for file path %v missing.", strings.TrimPrefix(source, "=")) - case numSeparators == 1 && strings.HasSuffix(source, "="): - return "", "", fmt.Errorf("file path for key name %v missing.", strings.TrimSuffix(source, "=")) - case numSeparators > 1: - return "", "", errors.New("Key names or file paths cannot contain '='.") - default: - components := strings.Split(source, "=") - return components[0], components[1], nil - } -} - -// parseLiteralSource parses the source key=val pair into its component pieces. -// This functionality is distinguished from strings.SplitN(source, "=", 2) since -// it returns an error in the case of empty keys, values, or a missing equals -// sign. -func parseLiteralSource(source string) (keyName, value string, err error) { - // leading equal is invalid - if strings.Index(source, "=") == 0 { - return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) - } - // split after the first equal (so values can have the = character) - items := strings.SplitN(source, "=", 2) - if len(items) != 2 { - return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) - } - - return items[0], items[1], nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go b/vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go index a649ced7ff20..03723cdaa84f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go @@ -19,8 +19,8 @@ package kubectl import ( "fmt" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" ) // NamespaceGeneratorV1 supports stable generation of a namespace @@ -65,7 +65,7 @@ func (g *NamespaceGeneratorV1) StructuredGenerate() (runtime.Object, error) { if err := g.validate(); err != nil { return nil, err } - namespace := &api.Namespace{} + namespace := &v1.Namespace{} namespace.Name = g.Name return namespace, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/pdb.go b/vendor/k8s.io/kubernetes/pkg/kubectl/pdb.go index 08d8a74b8750..df2cc8f53b33 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/pdb.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/pdb.go @@ -18,12 +18,11 @@ package kubectl import ( "fmt" - "os" + policy "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/kubernetes/pkg/apis/policy" ) // PodDisruptionBudgetV1Generator supports stable generation of a pod disruption budget. @@ -39,7 +38,7 @@ var _ StructuredGenerator = &PodDisruptionBudgetV1Generator{} func (PodDisruptionBudgetV1Generator) ParamNames() []GeneratorParam { return []GeneratorParam{ {"name", true}, - {"min-available", true}, + {"min-available", false}, {"selector", true}, } } @@ -51,15 +50,15 @@ func (s PodDisruptionBudgetV1Generator) Generate(params map[string]interface{}) } name, isString := params["name"].(string) if !isString { - return nil, fmt.Errorf("expected string, saw %v for 'name'", name) + return nil, fmt.Errorf("expected string, found %T for 'name'", params["name"]) } minAvailable, isString := params["min-available"].(string) if !isString { - return nil, fmt.Errorf("expected string, found %v", minAvailable) + return nil, fmt.Errorf("expected string, found %T for 'min-available'", params["min-available"]) } selector, isString := params["selector"].(string) if !isString { - return nil, fmt.Errorf("expected string, found %v", selector) + return nil, fmt.Errorf("expected string, found %T for 'selector'", params["selector"]) } delegate := &PodDisruptionBudgetV1Generator{Name: name, MinAvailable: minAvailable, Selector: selector} return delegate.StructuredGenerate() @@ -123,7 +122,7 @@ func (PodDisruptionBudgetV2Generator) ParamNames() []GeneratorParam { {"name", true}, {"min-available", false}, {"max-unavailable", false}, - {"selector", false}, + {"selector", true}, } } @@ -135,22 +134,22 @@ func (s PodDisruptionBudgetV2Generator) Generate(params map[string]interface{}) name, isString := params["name"].(string) if !isString { - return nil, fmt.Errorf("expected string, saw %v for 'name'", name) + return nil, fmt.Errorf("expected string, found %T for 'name'", params["name"]) } minAvailable, isString := params["min-available"].(string) if !isString { - return nil, fmt.Errorf("expected string, found %v", minAvailable) + return nil, fmt.Errorf("expected string, found %T for 'min-available'", params["min-available"]) } - maxUnavailable, isString := params["max-available"].(string) + maxUnavailable, isString := params["max-unavailable"].(string) if !isString { - return nil, fmt.Errorf("expected string, found %v", maxUnavailable) + return nil, fmt.Errorf("expected string, found %T for 'max-unavailable'", params["max-unavailable"]) } selector, isString := params["selector"].(string) if !isString { - return nil, fmt.Errorf("expected string, found %v", selector) + return nil, fmt.Errorf("expected string, found %T for 'selector'", params["selector"]) } delegate := &PodDisruptionBudgetV2Generator{Name: name, MinAvailable: minAvailable, MaxUnavailable: maxUnavailable, Selector: selector} return delegate.StructuredGenerate() @@ -167,15 +166,6 @@ func (s *PodDisruptionBudgetV2Generator) StructuredGenerate() (runtime.Object, e return nil, err } - if len(s.MaxUnavailable) == 0 && len(s.MinAvailable) == 0 { - s.MinAvailable = "1" - - // This behavior is intended for backward compatibility. - // TODO: remove in Kubernetes 1.8 - fmt.Fprintln(os.Stderr, "Deprecated behavior in kubectl create pdb: Defaulting min-available to 1. "+ - "Kubernetes 1.8 will remove this default, and one of min-available/max-available must be specified. ") - } - if len(s.MaxUnavailable) > 0 { maxUnavailable := intstr.Parse(s.MaxUnavailable) return &policy.PodDisruptionBudget{ @@ -213,6 +203,9 @@ func (s *PodDisruptionBudgetV2Generator) validate() error { if len(s.Selector) == 0 { return fmt.Errorf("a selector must be specified") } + if len(s.MaxUnavailable) == 0 && len(s.MinAvailable) == 0 { + return fmt.Errorf("one of min-available or max-unavailable must be specified") + } if len(s.MaxUnavailable) > 0 && len(s.MinAvailable) > 0 { return fmt.Errorf("min-available and max-unavailable cannot be both specified") } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/BUILD index 31ee686c6234..4c497cac9408 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/BUILD @@ -14,6 +14,7 @@ go_library( "plugins.go", "runner.go", ], + importpath = "k8s.io/kubernetes/pkg/kubectl/plugins", deps = [ "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -43,6 +44,7 @@ go_test( "plugins_test.go", "runner_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubectl/plugins", library = ":go_default_library", deps = ["//vendor/github.com/spf13/pflag:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/env.go b/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/env.go index f7becb52b192..7845b5fe99b6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/env.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/env.go @@ -24,19 +24,21 @@ import ( "github.com/spf13/pflag" ) -// Env represents an environment variable with its name and value +// Env represents an environment variable with its name and value. type Env struct { N string V string } +// String returns "name=value" string. func (e Env) String() string { return fmt.Sprintf("%s=%s", e.N, e.V) } -// EnvList is a list of Env +// EnvList is a list of Env. type EnvList []Env +// Slice returns a slice of "name=value" strings. func (e EnvList) Slice() []string { envs := []string{} for _, env := range e { @@ -45,6 +47,7 @@ func (e EnvList) Slice() []string { return envs } +// Merge converts "name=value" strings into Env values and merges them into e. func (e EnvList) Merge(s ...string) EnvList { newList := e newList = append(newList, fromSlice(s)...) @@ -53,12 +56,14 @@ func (e EnvList) Merge(s ...string) EnvList { // EnvProvider provides the environment in which the plugin will run. type EnvProvider interface { + // Env returns the env list. Env() (EnvList, error) } -// MultiEnvProvider is an EnvProvider for multiple env providers, returns on first error. +// MultiEnvProvider satisfies the EnvProvider interface for multiple env providers. type MultiEnvProvider []EnvProvider +// Env returns the combined env list of multiple env providers, returns on first error. func (p MultiEnvProvider) Env() (EnvList, error) { env := EnvList{} for _, provider := range p { @@ -71,9 +76,10 @@ func (p MultiEnvProvider) Env() (EnvList, error) { return env, nil } -// PluginCallerEnvProvider provides env with the path to the caller binary (usually full path to 'kubectl'). +// PluginCallerEnvProvider satisfies the EnvProvider interface. type PluginCallerEnvProvider struct{} +// Env returns env with the path to the caller binary (usually full path to 'kubectl'). func (p *PluginCallerEnvProvider) Env() (EnvList, error) { caller, err := os.Executable() if err != nil { @@ -84,11 +90,12 @@ func (p *PluginCallerEnvProvider) Env() (EnvList, error) { }, nil } -// PluginDescriptorEnvProvider provides env vars with information about the running plugin. +// PluginDescriptorEnvProvider satisfies the EnvProvider interface. type PluginDescriptorEnvProvider struct { Plugin *Plugin } +// Env returns env with information about the running plugin. func (p *PluginDescriptorEnvProvider) Env() (EnvList, error) { if p.Plugin == nil { return []Env{}, fmt.Errorf("plugin not present to extract env") @@ -104,19 +111,24 @@ func (p *PluginDescriptorEnvProvider) Env() (EnvList, error) { return env, nil } -// OSEnvProvider provides current environment from the operating system. +// OSEnvProvider satisfies the EnvProvider interface. type OSEnvProvider struct{} +// Env returns the current environment from the operating system. func (p *OSEnvProvider) Env() (EnvList, error) { return fromSlice(os.Environ()), nil } +// EmptyEnvProvider satisfies the EnvProvider interface. type EmptyEnvProvider struct{} +// Env returns an empty environment. func (p *EmptyEnvProvider) Env() (EnvList, error) { return EnvList{}, nil } +// FlagToEnvName converts a flag string into a UNIX like environment variable name. +// e.g --some-flag => "PREFIX_SOME_FLAG" func FlagToEnvName(flagName, prefix string) string { envName := strings.TrimPrefix(flagName, "--") envName = strings.ToUpper(envName) @@ -125,6 +137,8 @@ func FlagToEnvName(flagName, prefix string) string { return envName } +// FlagToEnv converts a flag and its value into an Env. +// e.g --some-flag some-value => Env{N: "PREFIX_SOME_FLAG", V="SOME_VALUE"} func FlagToEnv(flag *pflag.Flag, prefix string) Env { envName := FlagToEnvName(flag.Name, prefix) return Env{envName, flag.Value.String()} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/loader.go b/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/loader.go index 74899e1d5ecc..6cbaffc34de6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/loader.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/loader.go @@ -28,10 +28,12 @@ import ( "k8s.io/client-go/tools/clientcmd" ) +// PluginDescriptorFilename is the default file name for plugin descriptions. const PluginDescriptorFilename = "plugin.yaml" // PluginLoader is capable of loading a list of plugin descriptions. type PluginLoader interface { + // Load loads the plugin descriptions. Load() (Plugins, error) } @@ -107,7 +109,7 @@ func (l *DirectoryPluginLoader) Load() (Plugins, error) { return list, err } -// UserDirPluginLoader is a PluginLoader that loads plugins from the +// UserDirPluginLoader returns a PluginLoader that loads plugins from the // "plugins" directory under the user's kubeconfig dir (usually "~/.kube/plugins/"). func UserDirPluginLoader() PluginLoader { dir := filepath.Join(clientcmd.RecommendedConfigDir, "plugins") @@ -116,7 +118,7 @@ func UserDirPluginLoader() PluginLoader { } } -// PathFromEnvVarPluginLoader is a PluginLoader that loads plugins from one or more +// PathFromEnvVarPluginLoader returns a PluginLoader that loads plugins from one or more // directories specified by the provided env var name. In case the env var is not // set, the PluginLoader just loads nothing. A list of subdirectories can be provided, // which will be appended to each path specified by the env var. @@ -135,17 +137,17 @@ func PathFromEnvVarPluginLoader(envVarName string, subdirs ...string) PluginLoad return loader } -// PluginsEnvVarPluginLoader is a PluginLoader that loads plugins from one or more +// KubectlPluginsPathPluginLoader returns a PluginLoader that loads plugins from one or more // directories specified by the KUBECTL_PLUGINS_PATH env var. -func PluginsEnvVarPluginLoader() PluginLoader { +func KubectlPluginsPathPluginLoader() PluginLoader { return PathFromEnvVarPluginLoader("KUBECTL_PLUGINS_PATH") } -// XDGDataPluginLoader is a PluginLoader that loads plugins from one or more +// XDGDataDirsPluginLoader returns a PluginLoader that loads plugins from one or more // directories specified by the XDG system directory structure spec in the // XDG_DATA_DIRS env var, plus the "kubectl/plugins/" suffix. According to the // spec, if XDG_DATA_DIRS is not set it defaults to "/usr/local/share:/usr/share". -func XDGDataPluginLoader() PluginLoader { +func XDGDataDirsPluginLoader() PluginLoader { envVarName := "XDG_DATA_DIRS" if len(os.Getenv(envVarName)) > 0 { return PathFromEnvVarPluginLoader(envVarName, "kubectl", "plugins") @@ -164,6 +166,7 @@ func XDGDataPluginLoader() PluginLoader { // a successful loading means every encapsulated loader was able to load without errors. type MultiPluginLoader []PluginLoader +// Load calls Load() for each of the encapsulated Loaders. func (l MultiPluginLoader) Load() (Plugins, error) { plugins := Plugins{} for _, loader := range l { @@ -180,6 +183,7 @@ func (l MultiPluginLoader) Load() (Plugins, error) { // but is tolerant to errors while loading from them. type TolerantMultiPluginLoader []PluginLoader +// Load calls Load() for each of the encapsulated Loaders. func (l TolerantMultiPluginLoader) Load() (Plugins, error) { plugins := Plugins{} for _, loader := range l { @@ -191,9 +195,10 @@ func (l TolerantMultiPluginLoader) Load() (Plugins, error) { return plugins, nil } -// DummyPluginLoader loads nothing. +// DummyPluginLoader is a noop PluginLoader. type DummyPluginLoader struct{} +// Load loads nothing. func (l *DummyPluginLoader) Load() (Plugins, error) { return Plugins{}, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/plugins.go b/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/plugins.go index e0bea1a3b46e..f2cc17847b30 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/plugins.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/plugins.go @@ -23,11 +23,16 @@ import ( ) var ( - IncompletePluginError = fmt.Errorf("incomplete plugin descriptor: name, shortDesc and command fields are required") - InvalidPluginNameError = fmt.Errorf("plugin name can't contain spaces") - IncompleteFlagError = fmt.Errorf("incomplete flag descriptor: name and desc fields are required") - InvalidFlagNameError = fmt.Errorf("flag name can't contain spaces") - InvalidFlagShorthandError = fmt.Errorf("flag shorthand must be only one letter") + // ErrIncompletePlugin indicates plugin is incomplete. + ErrIncompletePlugin = fmt.Errorf("incomplete plugin descriptor: name, shortDesc and command fields are required") + // ErrInvalidPluginName indicates plugin name is invalid. + ErrInvalidPluginName = fmt.Errorf("plugin name can't contain spaces") + // ErrIncompleteFlag indicates flag is incomplete. + ErrIncompleteFlag = fmt.Errorf("incomplete flag descriptor: name and desc fields are required") + // ErrInvalidFlagName indicates flag name is invalid. + ErrInvalidFlagName = fmt.Errorf("flag name can't contain spaces") + // ErrInvalidFlagShorthand indicates flag shorthand is invalid. + ErrInvalidFlagShorthand = fmt.Errorf("flag shorthand must be only one letter") ) // Plugin is the representation of a CLI extension (plugin). @@ -37,7 +42,7 @@ type Plugin struct { Context RunningContext `json:"-"` } -// PluginDescription holds everything needed to register a +// Description holds everything needed to register a // plugin as a command. Usually comes from a descriptor file. type Description struct { Name string `json:"name"` @@ -49,18 +54,19 @@ type Description struct { Tree Plugins `json:"tree,omitempty"` } -// PluginSource holds the location of a given plugin in the filesystem. +// Source holds the location of a given plugin in the filesystem. type Source struct { Dir string `json:"-"` DescriptorName string `json:"-"` } +// Validate validates plugin data. func (p Plugin) Validate() error { if len(p.Name) == 0 || len(p.ShortDesc) == 0 || (len(p.Command) == 0 && len(p.Tree) == 0) { - return IncompletePluginError + return ErrIncompletePlugin } if strings.Index(p.Name, " ") > -1 { - return InvalidPluginNameError + return ErrInvalidPluginName } for _, flag := range p.Flags { if err := flag.Validate(); err != nil { @@ -75,6 +81,7 @@ func (p Plugin) Validate() error { return nil } +// IsValid returns true if plugin data is valid. func (p Plugin) IsValid() bool { return p.Validate() == nil } @@ -90,24 +97,27 @@ type Flag struct { DefValue string `json:"defValue,omitempty"` } +// Validate validates flag data. func (f Flag) Validate() error { if len(f.Name) == 0 || len(f.Desc) == 0 { - return IncompleteFlagError + return ErrIncompleteFlag } if strings.Index(f.Name, " ") > -1 { - return InvalidFlagNameError + return ErrInvalidFlagName } return f.ValidateShorthand() } +// ValidateShorthand validates flag shorthand data. func (f Flag) ValidateShorthand() error { length := len(f.Shorthand) if length == 0 || (length == 1 && unicode.IsLetter(rune(f.Shorthand[0]))) { return nil } - return InvalidFlagShorthandError + return ErrInvalidFlagShorthand } +// Shorthanded returns true if flag shorthand data is valid. func (f Flag) Shorthanded() bool { return f.ValidateShorthand() == nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/priorityclass.go b/vendor/k8s.io/kubernetes/pkg/kubectl/priorityclass.go new file mode 100644 index 000000000000..fd600ae3253d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/priorityclass.go @@ -0,0 +1,85 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "fmt" + + scheduling "k8s.io/api/scheduling/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// PriorityClassV1Generator supports stable generation of a priorityClass. +type PriorityClassV1Generator struct { + Name string + Value int32 + GlobalDefault bool + Description string +} + +// Ensure it supports the generator pattern that uses parameters specified during construction. +var _ StructuredGenerator = &PriorityClassV1Generator{} + +func (PriorityClassV1Generator) ParamNames() []GeneratorParam { + return []GeneratorParam{ + {"name", true}, + {"value", true}, + {"global-default", false}, + {"description", false}, + } +} + +func (s PriorityClassV1Generator) Generate(params map[string]interface{}) (runtime.Object, error) { + if err := ValidateParams(s.ParamNames(), params); err != nil { + return nil, err + } + + name, found := params["name"].(string) + if !found { + return nil, fmt.Errorf("expected string, saw %v for 'name'", name) + } + + value, found := params["value"].(int32) + if !found { + return nil, fmt.Errorf("expected int32, found %v", value) + } + + globalDefault, found := params["global-default"].(bool) + if !found { + return nil, fmt.Errorf("expected bool, found %v", globalDefault) + } + + description, found := params["description"].(string) + if !found { + return nil, fmt.Errorf("expected string, found %v", description) + } + delegate := &PriorityClassV1Generator{Name: name, Value: value, GlobalDefault: globalDefault, Description: description} + return delegate.StructuredGenerate() +} + +// StructuredGenerate outputs a priorityClass object using the configured fields. +func (s *PriorityClassV1Generator) StructuredGenerate() (runtime.Object, error) { + return &scheduling.PriorityClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: s.Name, + }, + Value: s.Value, + GlobalDefault: s.GlobalDefault, + Description: s.Description, + }, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/quota.go b/vendor/k8s.io/kubernetes/pkg/kubectl/quota.go index 173f568c6713..d51b74af951c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/quota.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/quota.go @@ -20,8 +20,8 @@ import ( "fmt" "strings" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" ) // ResourceQuotaGeneratorV1 supports stable generation of a resource quota @@ -79,7 +79,7 @@ func (g *ResourceQuotaGeneratorV1) StructuredGenerate() (runtime.Object, error) return nil, err } - resourceList, err := populateResourceList(g.Hard) + resourceList, err := populateResourceListV1(g.Hard) if err != nil { return nil, err } @@ -89,7 +89,7 @@ func (g *ResourceQuotaGeneratorV1) StructuredGenerate() (runtime.Object, error) return nil, err } - resourceQuota := &api.ResourceQuota{} + resourceQuota := &v1.ResourceQuota{} resourceQuota.Name = g.Name resourceQuota.Spec.Hard = resourceList resourceQuota.Spec.Scopes = scopes @@ -104,14 +104,14 @@ func (r *ResourceQuotaGeneratorV1) validate() error { return nil } -func parseScopes(spec string) ([]api.ResourceQuotaScope, error) { +func parseScopes(spec string) ([]v1.ResourceQuotaScope, error) { // empty input gets a nil response to preserve generator test expected behaviors if spec == "" { return nil, nil } scopes := strings.Split(spec, ",") - result := make([]api.ResourceQuotaScope, 0, len(scopes)) + result := make([]v1.ResourceQuotaScope, 0, len(scopes)) for _, scope := range scopes { // intentionally do not verify the scope against the valid scope list. This is done by the apiserver anyway. @@ -119,7 +119,7 @@ func parseScopes(spec string) ([]api.ResourceQuotaScope, error) { return nil, fmt.Errorf("invalid resource quota scope \"\"") } - result = append(result, api.ResourceQuotaScope(scope)) + result = append(result, v1.ResourceQuotaScope(scope)) } return result, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/BUILD index 441d31133c67..2df1069b41f6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/BUILD @@ -8,7 +8,6 @@ go_library( name = "go_default_library", srcs = [ "builder.go", - "categories.go", "doc.go", "helper.go", "interfaces.go", @@ -17,18 +16,20 @@ go_library( "selector.go", "visitor.go", ], + importpath = "k8s.io/kubernetes/pkg/kubectl/resource", visibility = [ "//build/visible_to:pkg_kubectl_resource_CONSUMERS", ], deps = [ - "//pkg/api:go_default_library", + "//pkg/kubectl/categories:go_default_library", "//pkg/kubectl/validation:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/text/encoding/unicode:go_default_library", "//vendor/golang.org/x/text/transform:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -38,7 +39,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/discovery:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", ], ) @@ -47,7 +47,6 @@ go_test( name = "go_default_test", srcs = [ "builder_test.go", - "categories_test.go", "helper_test.go", "visitor_test.go", ], @@ -55,14 +54,12 @@ go_test( "//examples:config", "//test/fixtures", ], + importpath = "k8s.io/kubernetes/pkg/kubectl/resource", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/api/testing:go_default_library", - "//vendor/github.com/emicklei/go-restful-swagger12:go_default_library", + "//pkg/kubectl/categories:go_default_library", + "//pkg/kubectl/scheme:go_default_library", "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", @@ -72,11 +69,11 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/version:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/rest/fake:go_default_library", "//vendor/k8s.io/client-go/rest/watch:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/builder.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/builder.go index 8908bc2ef307..4ff130b7f904 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/builder.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/builder.go @@ -17,6 +17,7 @@ limitations under the License. package resource import ( + "errors" "fmt" "io" "net/url" @@ -26,10 +27,10 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/kubectl/categories" "k8s.io/kubernetes/pkg/kubectl/validation" ) @@ -42,8 +43,12 @@ const defaultHttpGetAttempts int = 3 // from the command line and converting them to a list of resources to iterate // over using the Visitor interface. type Builder struct { - mapper *Mapper - categoryExpander CategoryExpander + categoryExpander categories.CategoryExpander + + // mapper is set explicitly by resource builders + mapper *Mapper + internal *Mapper + unstructured *Mapper errs []error @@ -51,9 +56,12 @@ type Builder struct { stream bool dir bool - selector labels.Selector + labelSelector *string + fieldSelector *string selectAll bool includeUninitialized bool + limitChunks int64 + requestTransforms []RequestTransform resources []string @@ -88,6 +96,11 @@ Example resource specifications include: ' ' ''`) +var LocalResourceError = errors.New(`error: you must specify resources by --filename when --local is set. +Example resource specifications include: + '-f rsrc.yaml' + '--filename=rsrc.json'`) + // TODO: expand this to include other errors. func IsUsageError(err error) bool { if err == nil { @@ -106,10 +119,13 @@ type resourceTuple struct { Name string } -// NewBuilder creates a builder that operates on generic objects. -func NewBuilder(mapper meta.RESTMapper, categoryExpander CategoryExpander, typer runtime.ObjectTyper, clientMapper ClientMapper, decoder runtime.Decoder) *Builder { +// NewBuilder creates a builder that operates on generic objects. At least one of +// internal or unstructured must be specified. +// TODO: Add versioned client (although versioned is still lossy) +func NewBuilder(internal, unstructured *Mapper, categoryExpander categories.CategoryExpander) *Builder { return &Builder{ - mapper: &Mapper{typer, mapper, clientMapper, decoder}, + internal: internal, + unstructured: unstructured, categoryExpander: categoryExpander, requireObject: true, } @@ -155,6 +171,64 @@ func (b *Builder) FilenameParam(enforceNamespace bool, filenameOptions *Filename return b } +// Unstructured updates the builder so that it will request and send unstructured +// objects. Unstructured objects preserve all fields sent by the server in a map format +// based on the object's JSON structure which means no data is lost when the client +// reads and then writes an object. Use this mode in preference to Internal unless you +// are working with Go types directly. +func (b *Builder) Unstructured() *Builder { + if b.unstructured == nil { + b.errs = append(b.errs, fmt.Errorf("no unstructured mapper provided")) + return b + } + if b.mapper != nil { + b.errs = append(b.errs, fmt.Errorf("another mapper was already selected, cannot use unstructured types")) + return b + } + b.mapper = b.unstructured + return b +} + +// Internal updates the builder so that it will convert objects off the wire +// into the internal form if necessary. Using internal types is lossy - fields added +// to the server will not be seen by the client code and may result in failure. Only +// use this mode when working offline, or when generating patches to send to the server. +// Use Unstructured if you are reading an object and performing a POST or PUT. +func (b *Builder) Internal() *Builder { + if b.internal == nil { + b.errs = append(b.errs, fmt.Errorf("no internal mapper provided")) + return b + } + if b.mapper != nil { + b.errs = append(b.errs, fmt.Errorf("another mapper was already selected, cannot use internal types")) + return b + } + b.mapper = b.internal + return b +} + +// LocalParam calls Local() if local is true. +func (b *Builder) LocalParam(local bool) *Builder { + if local { + b.Local() + } + return b +} + +// Local will avoid asking the server for results. +func (b *Builder) Local() *Builder { + mapper := *b.mapper + mapper.ClientMapper = DisabledClientForMapping{ClientMapper: mapper.ClientMapper} + b.mapper = &mapper + return b +} + +// Mapper returns a copy of the current mapper. +func (b *Builder) Mapper() *Mapper { + mapper := *b.mapper + return &mapper +} + // URL accepts a number of URLs directly. func (b *Builder) URL(httpAttemptCount int, urls ...*url.URL) *Builder { for _, u := range urls { @@ -249,28 +323,41 @@ func (b *Builder) ResourceNames(resource string, names ...string) *Builder { return b } -// SelectorParam defines a selector that should be applied to the object types to load. +// LabelSelectorParam defines a selector that should be applied to the object types to load. // This will not affect files loaded from disk or URL. If the parameter is empty it is -// a no-op - to select all resources invoke `b.Selector(labels.Everything)`. -func (b *Builder) SelectorParam(s string) *Builder { - selector, err := labels.Parse(s) - if err != nil { - b.errs = append(b.errs, fmt.Errorf("the provided selector %q is not valid: %v", s, err)) - return b - } - if selector.Empty() { +// a no-op - to select all resources invoke `b.LabelSelector(labels.Everything.String)`. +func (b *Builder) LabelSelectorParam(s string) *Builder { + selector := strings.TrimSpace(s) + if len(selector) == 0 { return b } if b.selectAll { - b.errs = append(b.errs, fmt.Errorf("found non empty selector %q with previously set 'all' parameter. ", s)) + b.errs = append(b.errs, fmt.Errorf("found non-empty label selector %q with previously set 'all' parameter. ", s)) return b } - return b.Selector(selector) + return b.LabelSelector(selector) +} + +// LabelSelector accepts a selector directly and will filter the resulting list by that object. +// Use LabelSelectorParam instead for user input. +func (b *Builder) LabelSelector(selector string) *Builder { + b.labelSelector = &selector + return b } -// Selector accepts a selector directly, and if non nil will trigger a list action. -func (b *Builder) Selector(selector labels.Selector) *Builder { - b.selector = selector +// FieldSelectorParam defines a selector that should be applied to the object types to load. +// This will not affect files loaded from disk or URL. If the parameter is empty it is +// a no-op - to select all resources. +func (b *Builder) FieldSelectorParam(s string) *Builder { + s = strings.TrimSpace(s) + if len(s) == 0 { + return b + } + if b.selectAll { + b.errs = append(b.errs, fmt.Errorf("found non-empty field selector %q with previously set 'all' parameter. ", s)) + return b + } + b.fieldSelector = &s return b } @@ -318,9 +405,24 @@ func (b *Builder) RequireNamespace() *Builder { return b } +// RequestChunksOf attempts to load responses from the server in batches of size limit +// to avoid long delays loading and transferring very large lists. If unset defaults to +// no chunking. +func (b *Builder) RequestChunksOf(chunkSize int64) *Builder { + b.limitChunks = chunkSize + return b +} + +// TransformRequests alters API calls made by clients requested from this builder. Pass +// an empty list to clear modifiers. +func (b *Builder) TransformRequests(opts ...RequestTransform) *Builder { + b.requestTransforms = opts + return b +} + // SelectEverythingParam func (b *Builder) SelectAllParam(selectAll bool) *Builder { - if selectAll && b.selector != nil { + if selectAll && (b.labelSelector != nil || b.fieldSelector != nil) { b.errs = append(b.errs, fmt.Errorf("setting 'all' parameter but found a non empty selector. ")) return b } @@ -365,8 +467,9 @@ func (b *Builder) ResourceTypeOrNameArgs(allowEmptySelector bool, args ...string b.ResourceTypes(SplitResourceArgument(args[0])...) case len(args) == 1: b.ResourceTypes(SplitResourceArgument(args[0])...) - if b.selector == nil && allowEmptySelector { - b.selector = labels.Everything() + if b.labelSelector == nil && allowEmptySelector { + selector := labels.Everything().String() + b.labelSelector = &selector } case len(args) == 0: default: @@ -554,7 +657,8 @@ func (b *Builder) visitorResult() *Result { } if b.selectAll { - b.selector = labels.Everything() + selector := labels.Everything().String() + b.labelSelector = &selector } // visit items specified by paths @@ -563,7 +667,7 @@ func (b *Builder) visitorResult() *Result { } // visit selectors - if b.selector != nil { + if b.labelSelector != nil || b.fieldSelector != nil { return b.visitBySelector() } @@ -603,6 +707,14 @@ func (b *Builder) visitBySelector() *Result { return result } + var labelSelector, fieldSelector string + if b.labelSelector != nil { + labelSelector = *b.labelSelector + } + if b.fieldSelector != nil { + fieldSelector = *b.fieldSelector + } + visitors := []Visitor{} for _, mapping := range mappings { client, err := b.mapper.ClientForMapping(mapping) @@ -610,11 +722,12 @@ func (b *Builder) visitBySelector() *Result { result.err = err return result } + client = NewClientWithOptions(client, b.requestTransforms...) selectorNamespace := b.namespace if mapping.Scope.Name() != meta.RESTScopeNameNamespace { selectorNamespace = "" } - visitors = append(visitors, NewSelector(client, mapping, selectorNamespace, b.selector, b.export, b.includeUninitialized)) + visitors = append(visitors, NewSelector(client, mapping, selectorNamespace, labelSelector, fieldSelector, b.export, b.includeUninitialized, b.limitChunks)) } if b.continueOnError { result.visitor = EagerVisitorList(visitors) @@ -659,6 +772,7 @@ func (b *Builder) visitByResource() *Result { result.err = err return result } + client = NewClientWithOptions(client, b.requestTransforms...) clients[s] = client } @@ -687,7 +801,13 @@ func (b *Builder) visitByResource() *Result { } } - info := NewInfo(client, mapping, selectorNamespace, tuple.Name, b.export) + info := &Info{ + Client: client, + Mapping: mapping, + Namespace: selectorNamespace, + Name: tuple.Name, + Export: b.export, + } items = append(items, info) } @@ -746,7 +866,13 @@ func (b *Builder) visitByName() *Result { visitors := []Visitor{} for _, name := range b.names { - info := NewInfo(client, mapping, selectorNamespace, name, b.export) + info := &Info{ + Client: client, + Mapping: mapping, + Namespace: selectorNamespace, + Name: name, + Export: b.export, + } visitors = append(visitors, info) } result.visitor = VisitorList(visitors) @@ -789,8 +915,12 @@ func (b *Builder) visitByPaths() *Result { } visitors = NewDecoratedVisitor(visitors, RetrieveLatest) } - if b.selector != nil { - visitors = NewFilteredVisitor(visitors, FilterBySelector(b.selector)) + if b.labelSelector != nil { + selector, err := labels.Parse(*b.labelSelector) + if err != nil { + return result.withError(fmt.Errorf("the provided selector %q is not valid: %v", b.labelSelector, err)) + } + visitors = NewFilteredVisitor(visitors, FilterByLabelSelector(selector)) } result.visitor = visitors result.sources = b.paths @@ -803,6 +933,7 @@ func (b *Builder) visitByPaths() *Result { // for further iteration. func (b *Builder) Do() *Result { r := b.visitorResult() + r.mapper = b.Mapper() if r.err != nil { return r } @@ -883,5 +1014,5 @@ func MultipleTypesRequested(args []string) bool { } rKinds.Insert(rTuple.Resource) } - return (rKinds.Len() > 1) + return rKinds.Len() > 1 } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/helper.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/helper.go index 940387295ad1..e5ca36221ed6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/helper.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/helper.go @@ -22,7 +22,6 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" @@ -64,33 +63,24 @@ func (m *Helper) Get(namespace, name string, export bool) (runtime.Object, error return req.Do().Get() } -// TODO: add field selector -func (m *Helper) List(namespace, apiVersion string, selector labels.Selector, export, includeUninitialized bool) (runtime.Object, error) { +func (m *Helper) List(namespace, apiVersion string, export bool, options *metav1.ListOptions) (runtime.Object, error) { req := m.RESTClient.Get(). NamespaceIfScoped(namespace, m.NamespaceScoped). Resource(m.Resource). - VersionedParams(&metav1.ListOptions{ - LabelSelector: selector.String(), - }, metav1.ParameterCodec) + VersionedParams(options, metav1.ParameterCodec) if export { // TODO: I should be part of ListOptions req.Param("export", strconv.FormatBool(export)) } - if includeUninitialized { - req.Param("includeUninitialized", strconv.FormatBool(includeUninitialized)) - } return req.Do().Get() } -func (m *Helper) Watch(namespace, resourceVersion, apiVersion string, labelSelector labels.Selector) (watch.Interface, error) { +func (m *Helper) Watch(namespace, apiVersion string, options *metav1.ListOptions) (watch.Interface, error) { + options.Watch = true return m.RESTClient.Get(). NamespaceIfScoped(namespace, m.NamespaceScoped). Resource(m.Resource). - VersionedParams(&metav1.ListOptions{ - ResourceVersion: resourceVersion, - Watch: true, - LabelSelector: labelSelector.String(), - }, metav1.ParameterCodec). + VersionedParams(options, metav1.ParameterCodec). Watch() } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/interfaces.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/interfaces.go index bb7a956cde21..8b75d41dbf90 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/interfaces.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/interfaces.go @@ -44,3 +44,41 @@ type ClientMapperFunc func(mapping *meta.RESTMapping) (RESTClient, error) func (f ClientMapperFunc) ClientForMapping(mapping *meta.RESTMapping) (RESTClient, error) { return f(mapping) } + +// RequestTransform is a function that is given a chance to modify the outgoing request. +type RequestTransform func(*client.Request) + +// NewClientWithOptions wraps the provided RESTClient and invokes each transform on each +// newly created request. +func NewClientWithOptions(c RESTClient, transforms ...RequestTransform) RESTClient { + return &clientOptions{c: c, transforms: transforms} +} + +type clientOptions struct { + c RESTClient + transforms []RequestTransform +} + +func (c *clientOptions) modify(req *client.Request) *client.Request { + for _, transform := range c.transforms { + transform(req) + } + return req +} + +func (c *clientOptions) Get() *client.Request { + return c.modify(c.c.Get()) +} + +func (c *clientOptions) Post() *client.Request { + return c.modify(c.c.Post()) +} +func (c *clientOptions) Patch(t types.PatchType) *client.Request { + return c.modify(c.c.Patch(t)) +} +func (c *clientOptions) Delete() *client.Request { + return c.modify(c.c.Delete()) +} +func (c *clientOptions) Put() *client.Request { + return c.modify(c.c.Put()) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/mapper.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/mapper.go index 81377358d4f8..6739736805d7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/mapper.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/mapper.go @@ -25,17 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -// DisabledClientForMapping allows callers to avoid allowing remote calls when handling -// resources. -type DisabledClientForMapping struct { - ClientMapper -} - -func (f DisabledClientForMapping) ClientForMapping(mapping *meta.RESTMapping) (RESTClient, error) { - return nil, nil -} - -// Mapper is a convenience struct for holding references to the three interfaces +// Mapper is a convenience struct for holding references to the interfaces // needed to create Info for arbitrary objects. type Mapper struct { runtime.ObjectTyper @@ -44,17 +34,27 @@ type Mapper struct { runtime.Decoder } +// AcceptUnrecognizedObjects will return a mapper that will tolerate objects +// that are not recognized by the RESTMapper, returning mappings that can +// perform minimal transformation. Allows working in disconnected mode, or with +// objects that the server does not recognize. Returned resource.Info objects +// may have empty resource fields and nil clients. +func (m *Mapper) AcceptUnrecognizedObjects() *Mapper { + copied := *m + copied.RESTMapper = NewRelaxedRESTMapper(m.RESTMapper) + copied.ClientMapper = NewRelaxedClientMapper(m.ClientMapper) + return &copied +} + // InfoForData creates an Info object for the given data. An error is returned // if any of the decoding or client lookup steps fail. Name and namespace will be // set into Info if the mapping's MetadataAccessor can retrieve them. func (m *Mapper) InfoForData(data []byte, source string) (*Info, error) { - versions := &runtime.VersionedObjects{} - _, gvk, err := m.Decode(data, nil, versions) + obj, gvk, err := m.Decode(data, nil, nil) if err != nil { return nil, fmt.Errorf("unable to decode %q: %v", source, err) } - obj, versioned := versions.Last(), versions.First() mapping, err := m.RESTMapping(gvk.GroupKind(), gvk.Version) if err != nil { return nil, fmt.Errorf("unable to recognize %q: %v", source, err) @@ -70,14 +70,15 @@ func (m *Mapper) InfoForData(data []byte, source string) (*Info, error) { resourceVersion, _ := mapping.MetadataAccessor.ResourceVersion(obj) return &Info{ - Mapping: mapping, - Client: client, + Client: client, + Mapping: mapping, + + Source: source, Namespace: namespace, Name: name, - Source: source, - VersionedObject: versioned, - Object: obj, ResourceVersion: resourceVersion, + + Object: obj, }, nil } @@ -99,6 +100,7 @@ func (m *Mapper) InfoForObject(obj runtime.Object, preferredGVKs []schema.GroupV if err != nil { return nil, fmt.Errorf("unable to recognize %v: %v", groupVersionKind, err) } + client, err := m.ClientForMapping(mapping) if err != nil { return nil, fmt.Errorf("unable to connect to a server to handle %q: %v", mapping.Resource, err) @@ -107,13 +109,14 @@ func (m *Mapper) InfoForObject(obj runtime.Object, preferredGVKs []schema.GroupV namespace, _ := mapping.MetadataAccessor.Namespace(obj) resourceVersion, _ := mapping.MetadataAccessor.ResourceVersion(obj) return &Info{ - Mapping: mapping, - Client: client, - Namespace: namespace, - Name: name, + Client: client, + Mapping: mapping, - Object: obj, + Namespace: namespace, + Name: name, ResourceVersion: resourceVersion, + + Object: obj, }, nil } @@ -152,3 +155,83 @@ func preferredObjectKind(possibilities []schema.GroupVersionKind, preferences [] // Just pick the first return possibilities[0] } + +// DisabledClientForMapping allows callers to avoid allowing remote calls when handling +// resources. +type DisabledClientForMapping struct { + ClientMapper +} + +func (f DisabledClientForMapping) ClientForMapping(mapping *meta.RESTMapping) (RESTClient, error) { + return nil, nil +} + +// NewRelaxedClientMapper will return a nil mapping if the object is not a recognized resource. +func NewRelaxedClientMapper(mapper ClientMapper) ClientMapper { + return relaxedClientMapper{mapper} +} + +type relaxedClientMapper struct { + ClientMapper +} + +func (f relaxedClientMapper) ClientForMapping(mapping *meta.RESTMapping) (RESTClient, error) { + if len(mapping.Resource) == 0 { + return nil, nil + } + return f.ClientMapper.ClientForMapping(mapping) +} + +// NewRelaxedRESTMapper returns a RESTMapper that will tolerate mappings that don't exist in provided +// RESTMapper, returning a mapping that is a best effort against the current server. This allows objects +// that the server does not recognize to still be loaded. +func NewRelaxedRESTMapper(mapper meta.RESTMapper) meta.RESTMapper { + return relaxedMapper{mapper} +} + +type relaxedMapper struct { + meta.RESTMapper +} + +func (m relaxedMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + mapping, err := m.RESTMapper.RESTMapping(gk, versions...) + if err != nil && meta.IsNoMatchError(err) && len(versions) > 0 { + return &meta.RESTMapping{ + GroupVersionKind: gk.WithVersion(versions[0]), + MetadataAccessor: meta.NewAccessor(), + Scope: meta.RESTScopeRoot, + ObjectConvertor: identityConvertor{}, + }, nil + } + return mapping, err +} +func (m relaxedMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + mappings, err := m.RESTMapper.RESTMappings(gk, versions...) + if err != nil && meta.IsNoMatchError(err) && len(versions) > 0 { + return []*meta.RESTMapping{ + { + GroupVersionKind: gk.WithVersion(versions[0]), + MetadataAccessor: meta.NewAccessor(), + Scope: meta.RESTScopeRoot, + ObjectConvertor: identityConvertor{}, + }, + }, nil + } + return mappings, err +} + +type identityConvertor struct{} + +var _ runtime.ObjectConvertor = identityConvertor{} + +func (c identityConvertor) Convert(in interface{}, out interface{}, context interface{}) error { + return fmt.Errorf("unable to convert objects across pointers") +} + +func (c identityConvertor) ConvertToVersion(in runtime.Object, gv runtime.GroupVersioner) (out runtime.Object, err error) { + return in, nil +} + +func (c identityConvertor) ConvertFieldLabel(version string, kind string, label string, value string) (string, string, error) { + return "", "", fmt.Errorf("unable to convert field labels") +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/result.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/result.go index bb185754c5c0..611471ca7c72 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/result.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/result.go @@ -20,16 +20,13 @@ import ( "fmt" "reflect" - "github.com/golang/glog" - + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/watch" - "k8s.io/kubernetes/pkg/api" ) // ErrMatchFunc can be used to filter errors that may not be true failures. @@ -44,6 +41,7 @@ type Result struct { singleItemImplied bool targetsSingleItems bool + mapper *Mapper ignoreErrors []utilerrors.Matcher // populated by a call to Infos @@ -77,6 +75,11 @@ func (r *Result) IgnoreErrors(fns ...ErrMatchFunc) *Result { return r } +// Mapper returns a copy of the builder's mapper. +func (r *Result) Mapper() *Mapper { + return r.mapper +} + // Err returns one or more errors (via a util.ErrorList) that occurred prior // to visiting the elements in the visitor. To see all errors including those // that occur during visitation, invoke Infos(). @@ -132,7 +135,7 @@ func (r *Result) Infos() ([]*Info, error) { // found resources. If the Builder was a singular context (expected to return a // single resource by user input) and only a single resource was found, the resource // will be returned as is. Otherwise, the returned resources will be part of an -// api.List. The ResourceVersion of the api.List will be set only if it is identical +// v1.List. The ResourceVersion of the v1.List will be set only if it is identical // across all infos returned. func (r *Result) Object() (runtime.Object, error) { infos, err := r.Infos() @@ -163,12 +166,27 @@ func (r *Result) Object() (runtime.Object, error) { if len(versions) == 1 { version = versions.List()[0] } - return &api.List{ + + return toV1List(objects, version), err +} + +// Compile time check to enforce that list implements the necessary interface +var _ metav1.ListInterface = &v1.List{} +var _ metav1.ListMetaAccessor = &v1.List{} + +// toV1List takes a slice of Objects + their version, and returns +// a v1.List Object containing the objects in the Items field +func toV1List(objects []runtime.Object, version string) runtime.Object { + raw := []runtime.RawExtension{} + for _, o := range objects { + raw = append(raw, runtime.RawExtension{Object: o}) + } + return &v1.List{ ListMeta: metav1.ListMeta{ ResourceVersion: version, }, - Items: objects, - }, err + Items: raw, + } } // ResourceMapping returns a single meta.RESTMapping representing the @@ -221,88 +239,3 @@ func (r *Result) Watch(resourceVersion string) (watch.Interface, error) { } return w.Watch(resourceVersion) } - -// AsVersionedObject converts a list of infos into a single object - either a List containing -// the objects as children, or if only a single Object is present, as that object. The provided -// version will be preferred as the conversion target, but the Object's mapping version will be -// used if that version is not present. -func AsVersionedObject(infos []*Info, forceList bool, version schema.GroupVersion, encoder runtime.Encoder) (runtime.Object, error) { - objects, err := AsVersionedObjects(infos, version, encoder) - if err != nil { - return nil, err - } - - var object runtime.Object - if len(objects) == 1 && !forceList { - object = objects[0] - } else { - object = &api.List{Items: objects} - converted, err := TryConvert(api.Scheme, object, version, api.Registry.GroupOrDie(api.GroupName).GroupVersion) - if err != nil { - return nil, err - } - object = converted - } - - actualVersion := object.GetObjectKind().GroupVersionKind() - if actualVersion.Version != version.Version { - defaultVersionInfo := "" - if len(actualVersion.Version) > 0 { - defaultVersionInfo = fmt.Sprintf("Defaulting to %q", actualVersion.Version) - } - glog.V(1).Infof("info: the output version specified is invalid. %s\n", defaultVersionInfo) - } - return object, nil -} - -// AsVersionedObjects converts a list of infos into versioned objects. The provided -// version will be preferred as the conversion target, but the Object's mapping version will be -// used if that version is not present. -func AsVersionedObjects(infos []*Info, version schema.GroupVersion, encoder runtime.Encoder) ([]runtime.Object, error) { - objects := []runtime.Object{} - for _, info := range infos { - if info.Object == nil { - continue - } - - // objects that are not part of api.Scheme must be converted to JSON - // TODO: convert to map[string]interface{}, attach to runtime.Unknown? - if !version.Empty() { - if _, _, err := api.Scheme.ObjectKinds(info.Object); runtime.IsNotRegisteredError(err) { - // TODO: ideally this would encode to version, but we don't expose multiple codecs here. - data, err := runtime.Encode(encoder, info.Object) - if err != nil { - return nil, err - } - // TODO: Set ContentEncoding and ContentType. - objects = append(objects, &runtime.Unknown{Raw: data}) - continue - } - } - - converted, err := TryConvert(info.Mapping.ObjectConvertor, info.Object, version, info.Mapping.GroupVersionKind.GroupVersion()) - if err != nil { - return nil, err - } - objects = append(objects, converted) - } - return objects, nil -} - -// TryConvert attempts to convert the given object to the provided versions in order. This function assumes -// the object is in internal version. -func TryConvert(converter runtime.ObjectConvertor, object runtime.Object, versions ...schema.GroupVersion) (runtime.Object, error) { - var last error - for _, version := range versions { - if version.Empty() { - return object, nil - } - obj, err := converter.ConvertToVersion(object, version) - if err != nil { - last = err - continue - } - return obj, nil - } - return nil, last -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/selector.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/selector.go index 6faed0f47849..66e13ad0810b 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/selector.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/selector.go @@ -21,7 +21,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/labels" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/watch" ) @@ -30,60 +30,90 @@ type Selector struct { Client RESTClient Mapping *meta.RESTMapping Namespace string - Selector labels.Selector + LabelSelector string + FieldSelector string Export bool IncludeUninitialized bool + LimitChunks int64 } // NewSelector creates a resource selector which hides details of getting items by their label selector. -func NewSelector(client RESTClient, mapping *meta.RESTMapping, namespace string, selector labels.Selector, export, includeUninitialized bool) *Selector { +func NewSelector(client RESTClient, mapping *meta.RESTMapping, namespace, labelSelector, fieldSelector string, export, includeUninitialized bool, limitChunks int64) *Selector { return &Selector{ Client: client, Mapping: mapping, Namespace: namespace, - Selector: selector, + LabelSelector: labelSelector, + FieldSelector: fieldSelector, Export: export, IncludeUninitialized: includeUninitialized, + LimitChunks: limitChunks, } } -// Visit implements Visitor +// Visit implements Visitor and uses request chunking by default. func (r *Selector) Visit(fn VisitorFunc) error { - list, err := NewHelper(r.Client, r.Mapping).List(r.Namespace, r.ResourceMapping().GroupVersionKind.GroupVersion().String(), r.Selector, r.Export, r.IncludeUninitialized) - if err != nil { - if errors.IsBadRequest(err) || errors.IsNotFound(err) { - if se, ok := err.(*errors.StatusError); ok { - // modify the message without hiding this is an API error - if r.Selector.Empty() { - se.ErrStatus.Message = fmt.Sprintf("Unable to list %q: %v", r.Mapping.Resource, se.ErrStatus.Message) - } else { - se.ErrStatus.Message = fmt.Sprintf("Unable to find %q that match the selector %q: %v", r.Mapping.Resource, r.Selector, se.ErrStatus.Message) - } - return se + var continueToken string + for { + list, err := NewHelper(r.Client, r.Mapping).List( + r.Namespace, + r.ResourceMapping().GroupVersionKind.GroupVersion().String(), + r.Export, + &metav1.ListOptions{ + LabelSelector: r.LabelSelector, + FieldSelector: r.FieldSelector, + IncludeUninitialized: r.IncludeUninitialized, + Limit: r.LimitChunks, + Continue: continueToken, + }, + ) + if err != nil { + if errors.IsResourceExpired(err) { + return err } - if r.Selector.Empty() { - return fmt.Errorf("Unable to list %q: %v", r.Mapping.Resource, err) - } else { - return fmt.Errorf("Unable to find %q that match the selector %q: %v", r.Mapping.Resource, r.Selector, err) + if errors.IsBadRequest(err) || errors.IsNotFound(err) { + if se, ok := err.(*errors.StatusError); ok { + // modify the message without hiding this is an API error + if len(r.LabelSelector) == 0 && len(r.FieldSelector) == 0 { + se.ErrStatus.Message = fmt.Sprintf("Unable to list %q: %v", r.Mapping.Resource, se.ErrStatus.Message) + } else { + se.ErrStatus.Message = fmt.Sprintf("Unable to find %q that match label selector %q, field selector %q: %v", r.Mapping.Resource, r.LabelSelector, r.FieldSelector, se.ErrStatus.Message) + } + return se + } + if len(r.LabelSelector) == 0 && len(r.FieldSelector) == 0 { + return fmt.Errorf("Unable to list %q: %v", r.Mapping.Resource, err) + } + return fmt.Errorf("Unable to find %q that match label selector %q, field selector %q: %v", r.Mapping.Resource, r.LabelSelector, r.FieldSelector, err) } + return err } - return err - } - accessor := r.Mapping.MetadataAccessor - resourceVersion, _ := accessor.ResourceVersion(list) - info := &Info{ - Client: r.Client, - Mapping: r.Mapping, - Namespace: r.Namespace, + accessor := r.Mapping.MetadataAccessor + resourceVersion, _ := accessor.ResourceVersion(list) + nextContinueToken, _ := accessor.Continue(list) + info := &Info{ + Client: r.Client, + Mapping: r.Mapping, + + Namespace: r.Namespace, + ResourceVersion: resourceVersion, - Object: list, - ResourceVersion: resourceVersion, + Object: list, + } + + if err := fn(info, nil); err != nil { + return err + } + if len(nextContinueToken) == 0 { + return nil + } + continueToken = nextContinueToken } - return fn(info, nil) } func (r *Selector) Watch(resourceVersion string) (watch.Interface, error) { - return NewHelper(r.Client, r.Mapping).Watch(r.Namespace, resourceVersion, r.ResourceMapping().GroupVersionKind.GroupVersion().String(), r.Selector) + return NewHelper(r.Client, r.Mapping).Watch(r.Namespace, r.ResourceMapping().GroupVersionKind.GroupVersion().String(), + &metav1.ListOptions{ResourceVersion: resourceVersion, LabelSelector: r.LabelSelector, FieldSelector: r.FieldSelector}) } // ResourceMapping returns the mapping for this resource and implements ResourceMapping diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/visitor.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/visitor.go index 5e5a73a3160e..554fa4a66113 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/visitor.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/visitor.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -73,19 +74,22 @@ type ResourceMapping interface { // Info contains temporary info to execute a REST call, or show the results // of an already completed REST call. type Info struct { - Client RESTClient - Mapping *meta.RESTMapping + Client RESTClient + // Mapping may be nil if the object has no available metadata, but is still parseable + // from disk. + Mapping *meta.RESTMapping + // Namespace will be set if the object is namespaced and has a specified value. Namespace string Name string // Optional, Source is the filename or URL to template file (.json or .yaml), // or stdin to use to handle the resource Source string - // Optional, this is the provided object in a versioned type before defaulting - // and conversions into its corresponding internal type. This is useful for - // reflecting on user intent which may be lost after defaulting and conversions. - VersionedObject runtime.Object - // Optional, this is the most recent value returned by the server if available + // Optional, this is the most recent value returned by the server if available. It will + // typically be in unstructured or internal forms, depending on how the Builder was + // defined. If retrieved from the server, the Builder expects the mapping client to + // decide the final form. Use the AsVersioned, AsUnstructured, and AsInternal helpers + // to alter the object versions. Object runtime.Object // Optional, this is the most recent resource version the server knows about for // this type of resource. It may not match the resource version of the object, @@ -96,17 +100,6 @@ type Info struct { Export bool } -// NewInfo returns a new info object -func NewInfo(client RESTClient, mapping *meta.RESTMapping, namespace, name string, export bool) *Info { - return &Info{ - Client: client, - Mapping: mapping, - Namespace: namespace, - Name: name, - Export: export, - } -} - // Visit implements Visitor func (i *Info) Visit(fn VisitorFunc) error { return fn(i, nil) @@ -176,6 +169,61 @@ func (i *Info) ResourceMapping() *meta.RESTMapping { return i.Mapping } +// Internal attempts to convert the provided object to an internal type or returns an error. +func (i *Info) Internal() (runtime.Object, error) { + return i.Mapping.ConvertToVersion(i.Object, i.Mapping.GroupVersionKind.GroupKind().WithVersion(runtime.APIVersionInternal).GroupVersion()) +} + +// AsInternal returns the object in internal form if possible, or i.Object if it cannot be +// converted. +func (i *Info) AsInternal() runtime.Object { + if obj, err := i.Internal(); err == nil { + return obj + } + return i.Object +} + +// Versioned returns the object as a Go type in the mapping's version or returns an error. +func (i *Info) Versioned() (runtime.Object, error) { + return i.Mapping.ConvertToVersion(i.Object, i.Mapping.GroupVersionKind.GroupVersion()) +} + +// AsVersioned returns the object as a Go object in the external form if possible (matching the +// group version kind of the mapping, or i.Object if it cannot be converted. +func (i *Info) AsVersioned() runtime.Object { + if obj, err := i.Versioned(); err == nil { + return obj + } + return i.Object +} + +// Unstructured returns the current object in unstructured form (as a runtime.Unstructured) +func (i *Info) Unstructured() (runtime.Unstructured, error) { + switch t := i.Object.(type) { + case runtime.Unstructured: + return t, nil + case *runtime.Unknown: + gvk := i.Mapping.GroupVersionKind + out, _, err := unstructured.UnstructuredJSONScheme.Decode(t.Raw, &gvk, nil) + return out.(runtime.Unstructured), err + default: + out := &unstructured.Unstructured{} + if err := i.Mapping.Convert(i.Object, out, nil); err != nil { + return nil, err + } + return out, nil + } +} + +// AsUnstructured returns the object as a Go object in external form as a runtime.Unstructured +// (map of JSON equivalent values) or as i.Object if it cannot be converted. +func (i *Info) AsUnstructured() runtime.Object { + if out, err := i.Unstructured(); err == nil { + return out + } + return i.Object +} + // VisitorList implements Visit for the sub visitors it contains. The first error // returned from a child Visitor will terminate iteration. type VisitorList []Visitor @@ -388,10 +436,7 @@ func (v FlattenListVisitor) Visit(fn VisitorFunc) error { if err != nil { return fn(info, nil) } - if errs := runtime.DecodeList(items, struct { - runtime.ObjectTyper - runtime.Decoder - }{v.Mapper, v.Mapper.Decoder}); len(errs) > 0 { + if errs := runtime.DecodeList(items, v.Mapper.Decoder); len(errs) > 0 { return utilerrors.NewAggregate(errs) } @@ -691,7 +736,7 @@ func (v FilteredVisitor) Visit(fn VisitorFunc) error { }) } -func FilterBySelector(s labels.Selector) FilterFunc { +func FilterByLabelSelector(s labels.Selector) FilterFunc { return func(info *Info, err error) (bool, error) { if err != nil { return false, err diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource_filter.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource_filter.go index a85d1454b9a9..b873748f6888 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource_filter.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource_filter.go @@ -19,7 +19,7 @@ package kubectl import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/printers" ) @@ -56,10 +56,6 @@ func filterPods(obj runtime.Object, options printers.PrintOptions) bool { // Filter loops through a collection of FilterFuncs until it finds one that can filter the given resource func (f Filters) Filter(obj runtime.Object, opts *printers.PrintOptions) (bool, error) { - // check if the object is unstructured. If so, let's attempt to convert it to a type we can understand - // before apply filter func. - obj, _ = DecodeUnknownObject(obj) - for _, filter := range f { if ok := filter(obj, *opts); ok { return true, nil @@ -67,19 +63,3 @@ func (f Filters) Filter(obj runtime.Object, opts *printers.PrintOptions) (bool, } return false, nil } - -// check if the object is unstructured. If so, let's attempt to convert it to a type we can understand. -func DecodeUnknownObject(obj runtime.Object) (runtime.Object, error) { - var err error - - switch obj.(type) { - case runtime.Unstructured, *runtime.Unknown: - if objBytes, err := runtime.Encode(api.Codecs.LegacyCodec(), obj); err == nil { - if decodedObj, err := runtime.Decode(api.Codecs.UniversalDecoder(), objBytes); err == nil { - obj = decodedObj - } - } - } - - return obj, err -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rolebinding.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rolebinding.go index 6d1ae4ddaa00..0eddb1e0342c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rolebinding.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/rolebinding.go @@ -21,9 +21,9 @@ import ( "strings" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/apis/rbac" ) // RoleBindingGeneratorV1 supports stable generation of a roleBinding. @@ -113,35 +113,35 @@ func (s RoleBindingGeneratorV1) StructuredGenerate() (runtime.Object, error) { if err := s.validate(); err != nil { return nil, err } - roleBinding := &rbac.RoleBinding{} + roleBinding := &rbacv1.RoleBinding{} roleBinding.Name = s.Name switch { case len(s.Role) > 0: - roleBinding.RoleRef = rbac.RoleRef{ - APIGroup: rbac.GroupName, + roleBinding.RoleRef = rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, Kind: "Role", Name: s.Role, } case len(s.ClusterRole) > 0: - roleBinding.RoleRef = rbac.RoleRef{ - APIGroup: rbac.GroupName, + roleBinding.RoleRef = rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: s.ClusterRole, } } for _, user := range sets.NewString(s.Users...).List() { - roleBinding.Subjects = append(roleBinding.Subjects, rbac.Subject{ - Kind: rbac.UserKind, - APIGroup: rbac.GroupName, + roleBinding.Subjects = append(roleBinding.Subjects, rbacv1.Subject{ + Kind: rbacv1.UserKind, + APIGroup: rbacv1.GroupName, Name: user, }) } for _, group := range sets.NewString(s.Groups...).List() { - roleBinding.Subjects = append(roleBinding.Subjects, rbac.Subject{ - Kind: rbac.GroupKind, - APIGroup: rbac.GroupName, + roleBinding.Subjects = append(roleBinding.Subjects, rbacv1.Subject{ + Kind: rbacv1.GroupKind, + APIGroup: rbacv1.GroupName, Name: group, }) } @@ -150,8 +150,8 @@ func (s RoleBindingGeneratorV1) StructuredGenerate() (runtime.Object, error) { if len(tokens) != 2 || tokens[1] == "" { return nil, fmt.Errorf("serviceaccount must be :") } - roleBinding.Subjects = append(roleBinding.Subjects, rbac.Subject{ - Kind: rbac.ServiceAccountKind, + roleBinding.Subjects = append(roleBinding.Subjects, rbacv1.Subject{ + Kind: rbacv1.ServiceAccountKind, APIGroup: "", Namespace: tokens[0], Name: tokens[1], diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go index 31f74d81c06f..7f6dfa80fb11 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go @@ -26,17 +26,19 @@ import ( appsv1beta1 "k8s.io/api/apps/v1beta1" "k8s.io/api/core/v1" - externalextensions "k8s.io/api/extensions/v1beta1" + extv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" - "k8s.io/kubernetes/pkg/api" - apiv1 "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/apps" + api "k8s.io/kubernetes/pkg/apis/core" + apiv1 "k8s.io/kubernetes/pkg/apis/core/v1" "k8s.io/kubernetes/pkg/apis/extensions" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/controller/daemon" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" "k8s.io/kubernetes/pkg/controller/statefulset" @@ -54,7 +56,7 @@ type Rollbacker interface { Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) } -func RollbackerFor(kind schema.GroupKind, c clientset.Interface) (Rollbacker, error) { +func RollbackerFor(kind schema.GroupKind, c kubernetes.Interface) (Rollbacker, error) { switch kind { case extensions.Kind("Deployment"), apps.Kind("Deployment"): return &DeploymentRollbacker{c}, nil @@ -67,7 +69,7 @@ func RollbackerFor(kind schema.GroupKind, c clientset.Interface) (Rollbacker, er } type DeploymentRollbacker struct { - c clientset.Interface + c kubernetes.Interface } func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) { @@ -81,26 +83,26 @@ func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations m if d.Spec.Paused { return "", fmt.Errorf("you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume deployment/%s' and try again", d.Name) } - deploymentRollback := &extensions.DeploymentRollback{ + deploymentRollback := &extv1beta1.DeploymentRollback{ Name: d.Name, UpdatedAnnotations: updatedAnnotations, - RollbackTo: extensions.RollbackConfig{ + RollbackTo: extv1beta1.RollbackConfig{ Revision: toRevision, }, } result := "" // Get current events - events, err := r.c.Core().Events(d.Namespace).List(metav1.ListOptions{}) + events, err := r.c.CoreV1().Events(d.Namespace).List(metav1.ListOptions{}) if err != nil { return result, err } // Do the rollback - if err := r.c.Extensions().Deployments(d.Namespace).Rollback(deploymentRollback); err != nil { + if err := r.c.ExtensionsV1beta1().Deployments(d.Namespace).Rollback(deploymentRollback); err != nil { return result, err } // Watch for the changes of events - watch, err := r.c.Core().Events(d.Namespace).Watch(metav1.ListOptions{Watch: true, ResourceVersion: events.ResourceVersion}) + watch, err := r.c.CoreV1().Events(d.Namespace).Watch(metav1.ListOptions{Watch: true, ResourceVersion: events.ResourceVersion}) if err != nil { return result, err } @@ -149,13 +151,13 @@ func isRollbackEvent(e *api.Event) (bool, string) { return false, "" } -func simpleDryRun(deployment *extensions.Deployment, c clientset.Interface, toRevision int64) (string, error) { - externalDeployment := &externalextensions.Deployment{} - if err := api.Scheme.Convert(deployment, externalDeployment, nil); err != nil { +func simpleDryRun(deployment *extensions.Deployment, c kubernetes.Interface, toRevision int64) (string, error) { + externalDeployment := &extv1beta1.Deployment{} + if err := legacyscheme.Scheme.Convert(deployment, externalDeployment, nil); err != nil { return "", fmt.Errorf("failed to convert deployment, %v", err) } - versionedExtensionsClient := versionedExtensionsClientV1beta1(c) - _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(externalDeployment, versionedExtensionsClient) + + _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(externalDeployment, c.ExtensionsV1beta1()) if err != nil { return "", fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", deployment.Name, err) } @@ -184,7 +186,7 @@ func simpleDryRun(deployment *extensions.Deployment, c clientset.Interface, toRe } buf := bytes.NewBuffer([]byte{}) internalTemplate := &api.PodTemplateSpec{} - if err := apiv1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(template, internalTemplate, nil); err != nil { + if err := apiv1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(template, internalTemplate, nil); err != nil { return "", fmt.Errorf("failed to convert podtemplate, %v", err) } w := printersinternal.NewPrefixWriter(buf) @@ -203,7 +205,7 @@ func simpleDryRun(deployment *extensions.Deployment, c clientset.Interface, toRe buf := bytes.NewBuffer([]byte{}) buf.WriteString("\n") internalTemplate := &api.PodTemplateSpec{} - if err := apiv1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(template, internalTemplate, nil); err != nil { + if err := apiv1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(template, internalTemplate, nil); err != nil { return "", fmt.Errorf("failed to convert podtemplate, %v", err) } w := printersinternal.NewPrefixWriter(buf) @@ -212,40 +214,32 @@ func simpleDryRun(deployment *extensions.Deployment, c clientset.Interface, toRe } type DaemonSetRollbacker struct { - c clientset.Interface + c kubernetes.Interface } func (r *DaemonSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) { if toRevision < 0 { return "", revisionNotFoundErr(toRevision) } - - ds, ok := obj.(*extensions.DaemonSet) - if !ok { - return "", fmt.Errorf("passed object is not a DaemonSet: %#v", obj) - } - versionedAppsClient := versionedAppsClientV1beta1(r.c) - versionedExtensionsClient := versionedExtensionsClientV1beta1(r.c) - versionedObj, allHistory, err := controlledHistories(versionedAppsClient, versionedExtensionsClient, ds.Namespace, ds.Name, "DaemonSet") + accessor, err := meta.Accessor(obj) if err != nil { - return "", fmt.Errorf("unable to find history controlled by DaemonSet %s: %v", ds.Name, err) + return "", fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error()) } - versionedDS, ok := versionedObj.(*externalextensions.DaemonSet) - if !ok { - return "", fmt.Errorf("unexpected non-DaemonSet object returned: %v", versionedDS) + ds, history, err := daemonSetHistory(r.c.ExtensionsV1beta1(), r.c.AppsV1beta1(), accessor.GetNamespace(), accessor.GetName()) + if err != nil { + return "", err } - - if toRevision == 0 && len(allHistory) <= 1 { + if toRevision == 0 && len(history) <= 1 { return "", fmt.Errorf("no last revision to roll back to") } - toHistory := findHistory(toRevision, allHistory) + toHistory := findHistory(toRevision, history) if toHistory == nil { return "", revisionNotFoundErr(toRevision) } if dryRun { - appliedDS, err := applyHistory(versionedDS, toHistory) + appliedDS, err := applyDaemonSetHistory(ds, toHistory) if err != nil { return "", err } @@ -253,7 +247,7 @@ func (r *DaemonSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations ma } // Skip if the revision already matches current DaemonSet - done, err := daemon.Match(versionedDS, toHistory) + done, err := daemon.Match(ds, toHistory) if err != nil { return "", err } @@ -262,7 +256,7 @@ func (r *DaemonSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations ma } // Restore revision - if _, err = versionedExtensionsClient.DaemonSets(ds.Namespace).Patch(ds.Name, types.StrategicMergePatchType, toHistory.Data.Raw); err != nil { + if _, err = r.c.ExtensionsV1beta1().DaemonSets(accessor.GetNamespace()).Patch(accessor.GetName(), types.StrategicMergePatchType, toHistory.Data.Raw); err != nil { return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) } @@ -270,7 +264,7 @@ func (r *DaemonSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations ma } type StatefulSetRollbacker struct { - c clientset.Interface + c kubernetes.Interface } // toRevision is a non-negative integer, with 0 being reserved to indicate rolling back to previous configuration @@ -278,34 +272,25 @@ func (r *StatefulSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations if toRevision < 0 { return "", revisionNotFoundErr(toRevision) } - - ss, ok := obj.(*apps.StatefulSet) - if !ok { - return "", fmt.Errorf("passed object is not a StatefulSet: %#v", obj) - } - versionedAppsClient := versionedAppsClientV1beta1(r.c) - versionedExtensionsClient := versionedExtensionsClientV1beta1(r.c) - versionedObj, allHistory, err := controlledHistories(versionedAppsClient, versionedExtensionsClient, ss.Namespace, ss.Name, "StatefulSet") + accessor, err := meta.Accessor(obj) if err != nil { - return "", fmt.Errorf("unable to find history controlled by StatefulSet %s: %v", ss.Name, err) + return "", fmt.Errorf("failed to create accessor for kind %v: %s", obj.GetObjectKind(), err.Error()) } - - versionedSS, ok := versionedObj.(*appsv1beta1.StatefulSet) - if !ok { - return "", fmt.Errorf("unexpected non-StatefulSet object returned: %v", versionedSS) + sts, history, err := statefulSetHistory(r.c.AppsV1beta1(), accessor.GetNamespace(), accessor.GetName()) + if err != nil { + return "", err } - - if toRevision == 0 && len(allHistory) <= 1 { + if toRevision == 0 && len(history) <= 1 { return "", fmt.Errorf("no last revision to roll back to") } - toHistory := findHistory(toRevision, allHistory) + toHistory := findHistory(toRevision, history) if toHistory == nil { return "", revisionNotFoundErr(toRevision) } if dryRun { - appliedSS, err := statefulset.ApplyRevision(versionedSS, toHistory) + appliedSS, err := statefulset.ApplyRevision(sts, toHistory) if err != nil { return "", err } @@ -313,7 +298,7 @@ func (r *StatefulSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations } // Skip if the revision already matches current StatefulSet - done, err := statefulset.Match(versionedSS, toHistory) + done, err := statefulset.Match(sts, toHistory) if err != nil { return "", err } @@ -322,7 +307,7 @@ func (r *StatefulSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations } // Restore revision - if _, err = versionedAppsClient.StatefulSets(ss.Namespace).Patch(ss.Name, types.StrategicMergePatchType, toHistory.Data.Raw); err != nil { + if _, err = r.c.AppsV1beta1().StatefulSets(sts.Namespace).Patch(sts.Name, types.StrategicMergePatchType, toHistory.Data.Raw); err != nil { return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) } @@ -360,7 +345,7 @@ func printPodTemplate(specTemplate *v1.PodTemplateSpec) (string, error) { content := bytes.NewBuffer([]byte{}) w := printersinternal.NewPrefixWriter(content) internalTemplate := &api.PodTemplateSpec{} - if err := apiv1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(specTemplate, internalTemplate, nil); err != nil { + if err := apiv1.Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(specTemplate, internalTemplate, nil); err != nil { return "", fmt.Errorf("failed to convert podtemplate while printing: %v", err) } printersinternal.DescribePodTemplate(internalTemplate, w) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go index 3259d6dd976c..1a1f8dfb3300 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go @@ -32,9 +32,9 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/integer" "k8s.io/client-go/util/retry" - "k8s.io/kubernetes/pkg/api" - apiv1 "k8s.io/kubernetes/pkg/api/v1" podutil "k8s.io/kubernetes/pkg/api/v1/pod" + api "k8s.io/kubernetes/pkg/apis/core" + apiv1 "k8s.io/kubernetes/pkg/apis/core/v1" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" client "k8s.io/kubernetes/pkg/client/unversioned" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" @@ -42,6 +42,7 @@ import ( ) const ( + kubectlAnnotationPrefix = "kubectl.kubernetes.io/" sourceIdAnnotation = kubectlAnnotationPrefix + "update-source-id" desiredReplicasAnnotation = kubectlAnnotationPrefix + "desired-replicas" originalReplicasAnnotation = kubectlAnnotationPrefix + "original-replicas" @@ -424,7 +425,7 @@ func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController, minR } for _, pod := range pods.Items { v1Pod := &v1.Pod{} - if err := apiv1.Convert_api_Pod_To_v1_Pod(&pod, v1Pod, nil); err != nil { + if err := apiv1.Convert_core_Pod_To_v1_Pod(&pod, v1Pod, nil); err != nil { return 0, 0, err } // Do not count deleted pods as ready diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go index d7d40af16010..2f58cb28eec0 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go @@ -19,13 +19,13 @@ package kubectl import ( "fmt" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + clientappsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" + clientextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - appsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" - extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" "k8s.io/kubernetes/pkg/controller/deployment/util" ) @@ -35,31 +35,31 @@ type StatusViewer interface { } // StatusViewerFor returns a StatusViewer for the resource specified by kind. -func StatusViewerFor(kind schema.GroupKind, c internalclientset.Interface) (StatusViewer, error) { +func StatusViewerFor(kind schema.GroupKind, c kubernetes.Interface) (StatusViewer, error) { switch kind { - case extensions.Kind("Deployment"), apps.Kind("Deployment"): + case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), apps.Kind("Deployment"): return &DeploymentStatusViewer{c.Extensions()}, nil - case extensions.Kind("DaemonSet"), apps.Kind("DaemonSet"): + case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet").GroupKind(), apps.Kind("DaemonSet"): return &DaemonSetStatusViewer{c.Extensions()}, nil case apps.Kind("StatefulSet"): - return &StatefulSetStatusViewer{c.Apps()}, nil + return &StatefulSetStatusViewer{c.AppsV1beta1()}, nil } return nil, fmt.Errorf("no status viewer has been implemented for %v", kind) } // DeploymentStatusViewer implements the StatusViewer interface. type DeploymentStatusViewer struct { - c extensionsclient.DeploymentsGetter + c clientextensionsv1beta1.DeploymentsGetter } // DaemonSetStatusViewer implements the StatusViewer interface. type DaemonSetStatusViewer struct { - c extensionsclient.DaemonSetsGetter + c clientextensionsv1beta1.DaemonSetsGetter } // StatefulSetStatusViewer implements the StatusViewer interface. type StatefulSetStatusViewer struct { - c appsclient.StatefulSetsGetter + c clientappsv1beta1.StatefulSetsGetter } // Status returns a message describing deployment status, and a bool value indicating if the status is considered done. @@ -78,12 +78,12 @@ func (s *DeploymentStatusViewer) Status(namespace, name string, revision int64) } } if deployment.Generation <= deployment.Status.ObservedGeneration { - cond := util.GetDeploymentConditionInternal(deployment.Status, extensions.DeploymentProgressing) + cond := util.GetDeploymentCondition(deployment.Status, extensionsv1beta1.DeploymentProgressing) if cond != nil && cond.Reason == util.TimedOutReason { return "", false, fmt.Errorf("deployment %q exceeded its progress deadline", name) } - if deployment.Status.UpdatedReplicas < deployment.Spec.Replicas { - return fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas have been updated...\n", deployment.Status.UpdatedReplicas, deployment.Spec.Replicas), false, nil + if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas { + return fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas have been updated...\n", deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas), false, nil } if deployment.Status.Replicas > deployment.Status.UpdatedReplicas { return fmt.Sprintf("Waiting for rollout to finish: %d old replicas are pending termination...\n", deployment.Status.Replicas-deployment.Status.UpdatedReplicas), false, nil @@ -104,7 +104,7 @@ func (s *DaemonSetStatusViewer) Status(namespace, name string, revision int64) ( if err != nil { return "", false, err } - if daemon.Spec.UpdateStrategy.Type != extensions.RollingUpdateDaemonSetStrategyType { + if daemon.Spec.UpdateStrategy.Type != extensionsv1beta1.RollingUpdateDaemonSetStrategyType { return "", true, fmt.Errorf("Status is available only for RollingUpdate strategy type") } if daemon.Generation <= daemon.Status.ObservedGeneration { @@ -131,13 +131,15 @@ func (s *StatefulSetStatusViewer) Status(namespace, name string, revision int64) if sts.Status.ObservedGeneration == nil || sts.Generation > *sts.Status.ObservedGeneration { return "Waiting for statefulset spec update to be observed...\n", false, nil } - if sts.Status.ReadyReplicas < sts.Spec.Replicas { - return fmt.Sprintf("Waiting for %d pods to be ready...\n", sts.Spec.Replicas-sts.Status.ReadyReplicas), false, nil + if sts.Spec.Replicas != nil && sts.Status.ReadyReplicas < *sts.Spec.Replicas { + return fmt.Sprintf("Waiting for %d pods to be ready...\n", *sts.Spec.Replicas-sts.Status.ReadyReplicas), false, nil } if sts.Spec.UpdateStrategy.Type == apps.RollingUpdateStatefulSetStrategyType && sts.Spec.UpdateStrategy.RollingUpdate != nil { - if sts.Status.UpdatedReplicas < (sts.Spec.Replicas - sts.Spec.UpdateStrategy.RollingUpdate.Partition) { - return fmt.Sprintf("Waiting for partitioned roll out to finish: %d out of %d new pods have been updated...\n", - sts.Status.UpdatedReplicas, (sts.Spec.Replicas - sts.Spec.UpdateStrategy.RollingUpdate.Partition)), false, nil + if sts.Spec.Replicas != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil { + if sts.Status.UpdatedReplicas < (*sts.Spec.Replicas - *sts.Spec.UpdateStrategy.RollingUpdate.Partition) { + return fmt.Sprintf("Waiting for partitioned roll out to finish: %d out of %d new pods have been updated...\n", + sts.Status.UpdatedReplicas, (*sts.Spec.Replicas - *sts.Spec.UpdateStrategy.RollingUpdate.Partition)), false, nil + } } return fmt.Sprintf("partitioned roll out complete: %d new pods have been updated...\n", sts.Status.UpdatedReplicas), true, nil diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/run.go b/vendor/k8s.io/kubernetes/pkg/kubectl/run.go index 7dd77f1f391d..e78bfd4c3194 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/run.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/run.go @@ -31,7 +31,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/kubernetes/pkg/api" ) type DeploymentV1Beta1 struct{} @@ -102,8 +101,6 @@ func (DeploymentV1Beta1) Generate(genericParams map[string]interface{}) (runtime return nil, err } - // TODO: use versioned types for generators so that we don't need to - // set default values manually (see issue #17384) count32 := int32(count) deployment := extensionsv1beta1.Deployment{ ObjectMeta: metav1.ObjectMeta{ @@ -608,31 +605,6 @@ func (BasicReplicationController) ParamNames() []GeneratorParam { } } -// populateResourceList takes strings of form =,= -// and returns ResourceList. -func populateResourceList(spec string) (api.ResourceList, error) { - // empty input gets a nil response to preserve generator test expected behaviors - if spec == "" { - return nil, nil - } - - result := api.ResourceList{} - resourceStatements := strings.Split(spec, ",") - for _, resourceStatement := range resourceStatements { - parts := strings.Split(resourceStatement, "=") - if len(parts) != 2 { - return nil, fmt.Errorf("Invalid argument syntax %v, expected =", resourceStatement) - } - resourceName := api.ResourceName(parts[0]) - resourceQuantity, err := resource.ParseQuantity(parts[1]) - if err != nil { - return nil, err - } - result[resourceName] = resourceQuantity - } - return result, nil -} - // populateResourceListV1 takes strings of form =,= // and returns ResourceList. func populateResourceListV1(spec string) (v1.ResourceList, error) { @@ -658,23 +630,6 @@ func populateResourceListV1(spec string) (v1.ResourceList, error) { return result, nil } -// HandleResourceRequirements parses the limits and requests parameters if specified -// and returns ResourceRequirements. -func HandleResourceRequirements(params map[string]string) (api.ResourceRequirements, error) { - result := api.ResourceRequirements{} - limits, err := populateResourceList(params["limits"]) - if err != nil { - return result, err - } - result.Limits = limits - requests, err := populateResourceList(params["requests"]) - if err != nil { - return result, err - } - result.Requests = requests - return result, nil -} - // HandleResourceRequirementsV1 parses the limits and requests parameters if specified // and returns ResourceRequirements. func HandleResourceRequirementsV1(params map[string]string) (v1.ResourceRequirements, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go b/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go index e445733849ad..da6d4fbb796d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go @@ -27,9 +27,9 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/batch" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" appsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" @@ -104,9 +104,11 @@ type ScaleError struct { } func (c ScaleError) Error() string { - return fmt.Sprintf( - "Scaling the resource failed with: %v; Current resource version %s", - c.ActualError, c.ResourceVersion) + msg := fmt.Sprintf("Scaling the resource failed with: %v", c.ActualError) + if len(c.ResourceVersion) > 0 { + msg += fmt.Sprintf("; Current resource version %s", c.ResourceVersion) + } + return msg } // RetryParams encapsulates the retry parameters used by kubectl's scaler. @@ -169,7 +171,7 @@ type ReplicationControllerScaler struct { func (scaler *ReplicationControllerScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { controller, err := scaler.c.ReplicationControllers(namespace).Get(name, metav1.GetOptions{}) if err != nil { - return "", ScaleError{ScaleGetFailure, "Unknown", err} + return "", ScaleError{ScaleGetFailure, "", err} } if preconditions != nil { if err := preconditions.ValidateReplicationController(controller); err != nil { @@ -267,7 +269,7 @@ type ReplicaSetScaler struct { func (scaler *ReplicaSetScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { rs, err := scaler.c.ReplicaSets(namespace).Get(name, metav1.GetOptions{}) if err != nil { - return "", ScaleError{ScaleGetFailure, "Unknown", err} + return "", ScaleError{ScaleGetFailure, "", err} } if preconditions != nil { if err := preconditions.ValidateReplicaSet(rs); err != nil { @@ -341,7 +343,7 @@ type StatefulSetScaler struct { func (scaler *StatefulSetScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { ss, err := scaler.c.StatefulSets(namespace).Get(name, metav1.GetOptions{}) if err != nil { - return "", ScaleError{ScaleGetFailure, "Unknown", err} + return "", ScaleError{ScaleGetFailure, "", err} } if preconditions != nil { if err := preconditions.ValidateStatefulSet(ss); err != nil { @@ -397,7 +399,7 @@ type JobScaler struct { func (scaler *JobScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { job, err := scaler.c.Jobs(namespace).Get(name, metav1.GetOptions{}) if err != nil { - return "", ScaleError{ScaleGetFailure, "Unknown", err} + return "", ScaleError{ScaleGetFailure, "", err} } if preconditions != nil { if err := preconditions.ValidateJob(job); err != nil { @@ -466,7 +468,7 @@ type DeploymentScaler struct { func (scaler *DeploymentScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) { deployment, err := scaler.c.Deployments(namespace).Get(name, metav1.GetOptions{}) if err != nil { - return "", ScaleError{ScaleGetFailure, "Unknown", err} + return "", ScaleError{ScaleGetFailure, "", err} } if preconditions != nil { if err := preconditions.ValidateDeployment(deployment); err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/BUILD new file mode 100644 index 000000000000..cdd9333404b1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/BUILD @@ -0,0 +1,62 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "install.go", + "scheme.go", + ], + importpath = "k8s.io/kubernetes/pkg/kubectl/scheme", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/admission/v1beta1:go_default_library", + "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", + "//vendor/k8s.io/api/apps/v1beta1:go_default_library", + "//vendor/k8s.io/api/apps/v1beta2:go_default_library", + "//vendor/k8s.io/api/authentication/v1:go_default_library", + "//vendor/k8s.io/api/authentication/v1beta1:go_default_library", + "//vendor/k8s.io/api/authorization/v1:go_default_library", + "//vendor/k8s.io/api/authorization/v1beta1:go_default_library", + "//vendor/k8s.io/api/autoscaling/v1:go_default_library", + "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", + "//vendor/k8s.io/api/batch/v1:go_default_library", + "//vendor/k8s.io/api/batch/v1beta1:go_default_library", + "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", + "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/api/imagepolicy/v1alpha1:go_default_library", + "//vendor/k8s.io/api/networking/v1:go_default_library", + "//vendor/k8s.io/api/policy/v1beta1:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", + "//vendor/k8s.io/api/rbac/v1alpha1:go_default_library", + "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", + "//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library", + "//vendor/k8s.io/api/settings/v1alpha1:go_default_library", + "//vendor/k8s.io/api/storage/v1:go_default_library", + "//vendor/k8s.io/api/storage/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/install.go b/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/install.go new file mode 100644 index 000000000000..51d4e16f18a1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/install.go @@ -0,0 +1,304 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + admissionv1alpha1 "k8s.io/api/admission/v1beta1" + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + authenticationv1 "k8s.io/api/authentication/v1" + authenticationv1beta1 "k8s.io/api/authentication/v1beta1" + authorizationv1 "k8s.io/api/authorization/v1" + authorizationv1beta1 "k8s.io/api/authorization/v1beta1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + batchv2alpha1 "k8s.io/api/batch/v2alpha1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1" + networkingv1 "k8s.io/api/networking/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" + settingsv1alpha1 "k8s.io/api/settings/v1alpha1" + storagev1 "k8s.io/api/storage/v1" + storagev1beta1 "k8s.io/api/storage/v1beta1" + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/kubernetes/scheme" +) + +// Register all groups in the kubectl's registry, but no componentconfig group since it's not in k8s.io/api +// The code in this file mostly duplicate the install under k8s.io/kubernetes/pkg/api and k8s.io/kubernetes/pkg/apis, +// but does NOT register the internal types. +func init() { + // Register external types for Scheme + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + scheme.AddToScheme(Scheme) + + // Register external types for Registry + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: corev1.GroupName, + VersionPreferenceOrder: []string{corev1.SchemeGroupVersion.Version}, + RootScopedKinds: sets.NewString( + "Node", + "Namespace", + "PersistentVolume", + "ComponentStatus", + ), + IgnoredKinds: sets.NewString( + "ListOptions", + "DeleteOptions", + "Status", + "PodLogOptions", + "PodExecOptions", + "PodAttachOptions", + "PodPortForwardOptions", + "PodProxyOptions", + "NodeProxyOptions", + "ServiceProxyOptions", + ), + }, + announced.VersionToSchemeFunc{ + corev1.SchemeGroupVersion.Version: corev1.AddToScheme, + }, + ).Announce(GroupFactoryRegistry).RegisterAndEnable(Registry, Scheme); err != nil { + panic(err) + } + + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: admissionv1alpha1.GroupName, + VersionPreferenceOrder: []string{admissionv1alpha1.SchemeGroupVersion.Version}, + RootScopedKinds: sets.NewString("AdmissionReview"), + }, + announced.VersionToSchemeFunc{ + admissionv1alpha1.SchemeGroupVersion.Version: admissionv1alpha1.AddToScheme, + }, + ).Announce(GroupFactoryRegistry).RegisterAndEnable(Registry, Scheme); err != nil { + panic(err) + } + + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: admissionregistrationv1alpha1.GroupName, + RootScopedKinds: sets.NewString("InitializerConfiguration", "ValidatingWebhookConfiguration", "MutatingWebhookConfiguration"), + VersionPreferenceOrder: []string{admissionregistrationv1alpha1.SchemeGroupVersion.Version}, + }, + announced.VersionToSchemeFunc{ + admissionregistrationv1alpha1.SchemeGroupVersion.Version: admissionregistrationv1alpha1.AddToScheme, + }, + ).Announce(GroupFactoryRegistry).RegisterAndEnable(Registry, Scheme); err != nil { + panic(err) + } + + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: appsv1.GroupName, + VersionPreferenceOrder: []string{appsv1beta1.SchemeGroupVersion.Version, appsv1beta2.SchemeGroupVersion.Version, appsv1.SchemeGroupVersion.Version}, + }, + announced.VersionToSchemeFunc{ + appsv1beta1.SchemeGroupVersion.Version: appsv1beta1.AddToScheme, + appsv1beta2.SchemeGroupVersion.Version: appsv1beta2.AddToScheme, + appsv1.SchemeGroupVersion.Version: appsv1.AddToScheme, + }, + ).Announce(GroupFactoryRegistry).RegisterAndEnable(Registry, Scheme); err != nil { + panic(err) + } + + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: authenticationv1beta1.GroupName, + VersionPreferenceOrder: []string{authenticationv1.SchemeGroupVersion.Version, authenticationv1beta1.SchemeGroupVersion.Version}, + RootScopedKinds: sets.NewString("TokenReview"), + }, + announced.VersionToSchemeFunc{ + authenticationv1beta1.SchemeGroupVersion.Version: authenticationv1beta1.AddToScheme, + authenticationv1.SchemeGroupVersion.Version: authenticationv1.AddToScheme, + }, + ).Announce(GroupFactoryRegistry).RegisterAndEnable(Registry, Scheme); err != nil { + panic(err) + } + + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: authorizationv1.GroupName, + VersionPreferenceOrder: []string{authorizationv1.SchemeGroupVersion.Version, authorizationv1beta1.SchemeGroupVersion.Version}, + RootScopedKinds: sets.NewString("SubjectAccessReview", "SelfSubjectAccessReview", "SelfSubjectRulesReview"), + }, + announced.VersionToSchemeFunc{ + authorizationv1beta1.SchemeGroupVersion.Version: authorizationv1beta1.AddToScheme, + authorizationv1.SchemeGroupVersion.Version: authorizationv1.AddToScheme, + }, + ).Announce(GroupFactoryRegistry).RegisterAndEnable(Registry, Scheme); err != nil { + panic(err) + } + + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: autoscalingv1.GroupName, + VersionPreferenceOrder: []string{autoscalingv1.SchemeGroupVersion.Version, autoscalingv2beta1.SchemeGroupVersion.Version}, + }, + announced.VersionToSchemeFunc{ + autoscalingv1.SchemeGroupVersion.Version: autoscalingv1.AddToScheme, + autoscalingv2beta1.SchemeGroupVersion.Version: autoscalingv2beta1.AddToScheme, + }, + ).Announce(GroupFactoryRegistry).RegisterAndEnable(Registry, Scheme); err != nil { + panic(err) + } + + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: batchv1.GroupName, + VersionPreferenceOrder: []string{batchv1.SchemeGroupVersion.Version, batchv1beta1.SchemeGroupVersion.Version, batchv2alpha1.SchemeGroupVersion.Version}, + }, + announced.VersionToSchemeFunc{ + batchv1.SchemeGroupVersion.Version: batchv1.AddToScheme, + batchv1beta1.SchemeGroupVersion.Version: batchv1beta1.AddToScheme, + batchv2alpha1.SchemeGroupVersion.Version: batchv2alpha1.AddToScheme, + }, + ).Announce(GroupFactoryRegistry).RegisterAndEnable(Registry, Scheme); err != nil { + panic(err) + } + + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: certificatesv1beta1.GroupName, + VersionPreferenceOrder: []string{certificatesv1beta1.SchemeGroupVersion.Version}, + RootScopedKinds: sets.NewString("CertificateSigningRequest"), + }, + announced.VersionToSchemeFunc{ + certificatesv1beta1.SchemeGroupVersion.Version: certificatesv1beta1.AddToScheme, + }, + ).Announce(GroupFactoryRegistry).RegisterAndEnable(Registry, Scheme); err != nil { + panic(err) + } + + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: extensionsv1beta1.GroupName, + VersionPreferenceOrder: []string{extensionsv1beta1.SchemeGroupVersion.Version}, + RootScopedKinds: sets.NewString("PodSecurityPolicy"), + }, + announced.VersionToSchemeFunc{ + extensionsv1beta1.SchemeGroupVersion.Version: extensionsv1beta1.AddToScheme, + }, + ).Announce(GroupFactoryRegistry).RegisterAndEnable(Registry, Scheme); err != nil { + panic(err) + } + + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: imagepolicyv1alpha1.GroupName, + VersionPreferenceOrder: []string{imagepolicyv1alpha1.SchemeGroupVersion.Version}, + RootScopedKinds: sets.NewString("ImageReview"), + }, + announced.VersionToSchemeFunc{ + imagepolicyv1alpha1.SchemeGroupVersion.Version: imagepolicyv1alpha1.AddToScheme, + }, + ).Announce(GroupFactoryRegistry).RegisterAndEnable(Registry, Scheme); err != nil { + panic(err) + } + + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: networkingv1.GroupName, + VersionPreferenceOrder: []string{networkingv1.SchemeGroupVersion.Version}, + }, + announced.VersionToSchemeFunc{ + networkingv1.SchemeGroupVersion.Version: networkingv1.AddToScheme, + }, + ).Announce(GroupFactoryRegistry).RegisterAndEnable(Registry, Scheme); err != nil { + panic(err) + } + + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: policyv1beta1.GroupName, + VersionPreferenceOrder: []string{policyv1beta1.SchemeGroupVersion.Version}, + }, + announced.VersionToSchemeFunc{ + policyv1beta1.SchemeGroupVersion.Version: policyv1beta1.AddToScheme, + }, + ).Announce(GroupFactoryRegistry).RegisterAndEnable(Registry, Scheme); err != nil { + panic(err) + } + + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: rbacv1.GroupName, + VersionPreferenceOrder: []string{rbacv1.SchemeGroupVersion.Version, rbacv1beta1.SchemeGroupVersion.Version, rbacv1alpha1.SchemeGroupVersion.Version}, + RootScopedKinds: sets.NewString("ClusterRole", "ClusterRoleBinding"), + }, + announced.VersionToSchemeFunc{ + rbacv1.SchemeGroupVersion.Version: rbacv1.AddToScheme, + rbacv1beta1.SchemeGroupVersion.Version: rbacv1beta1.AddToScheme, + rbacv1alpha1.SchemeGroupVersion.Version: rbacv1alpha1.AddToScheme, + }, + ).Announce(GroupFactoryRegistry).RegisterAndEnable(Registry, Scheme); err != nil { + panic(err) + } + + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: schedulingv1alpha1.GroupName, + VersionPreferenceOrder: []string{schedulingv1alpha1.SchemeGroupVersion.Version}, + RootScopedKinds: sets.NewString("PriorityClass"), + }, + announced.VersionToSchemeFunc{ + schedulingv1alpha1.SchemeGroupVersion.Version: schedulingv1alpha1.AddToScheme, + }, + ).Announce(GroupFactoryRegistry).RegisterAndEnable(Registry, Scheme); err != nil { + panic(err) + } + + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: settingsv1alpha1.GroupName, + VersionPreferenceOrder: []string{settingsv1alpha1.SchemeGroupVersion.Version}, + }, + announced.VersionToSchemeFunc{ + settingsv1alpha1.SchemeGroupVersion.Version: settingsv1alpha1.AddToScheme, + }, + ).Announce(GroupFactoryRegistry).RegisterAndEnable(Registry, Scheme); err != nil { + panic(err) + } + + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: storagev1.GroupName, + VersionPreferenceOrder: []string{storagev1.SchemeGroupVersion.Version, storagev1beta1.SchemeGroupVersion.Version}, + RootScopedKinds: sets.NewString("StorageClass"), + }, + announced.VersionToSchemeFunc{ + storagev1.SchemeGroupVersion.Version: storagev1.AddToScheme, + storagev1beta1.SchemeGroupVersion.Version: storagev1beta1.AddToScheme, + }, + ).Announce(GroupFactoryRegistry).RegisterAndEnable(Registry, Scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/scheme.go b/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/scheme.go new file mode 100644 index 000000000000..cd38d22a1838 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/scheme/scheme.go @@ -0,0 +1,43 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + "os" + + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// All kubectl code should eventually switch to use this Registry and Scheme instead of the global ones. + +// GroupFactoryRegistry is the APIGroupFactoryRegistry (overlaps a bit with Registry, see comments in package for details) +var GroupFactoryRegistry = make(announced.APIGroupFactoryRegistry) + +// Registry is an instance of an API registry. +var Registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS")) + +// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. +var Scheme = runtime.NewScheme() + +// Codecs provides access to encoding and decoding for the scheme +var Codecs = serializer.NewCodecFactory(Scheme) + +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(Scheme) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/secret.go b/vendor/k8s.io/kubernetes/pkg/kubectl/secret.go index 856d1d86f946..04c5ced232e6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/secret.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/secret.go @@ -23,9 +23,10 @@ import ( "path" "strings" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/kubectl/util" "k8s.io/kubernetes/pkg/kubectl/util/hash" ) @@ -128,11 +129,11 @@ func (s SecretGeneratorV1) StructuredGenerate() (runtime.Object, error) { if err := s.validate(); err != nil { return nil, err } - secret := &api.Secret{} + secret := &v1.Secret{} secret.Name = s.Name secret.Data = map[string][]byte{} if len(s.Type) > 0 { - secret.Type = api.SecretType(s.Type) + secret.Type = v1.SecretType(s.Type) } if len(s.FileSources) > 0 { if err := handleFromFileSources(secret, s.FileSources); err != nil { @@ -171,9 +172,9 @@ func (s SecretGeneratorV1) validate() error { } // handleFromLiteralSources adds the specified literal source information into the provided secret -func handleFromLiteralSources(secret *api.Secret, literalSources []string) error { +func handleFromLiteralSources(secret *v1.Secret, literalSources []string) error { for _, literalSource := range literalSources { - keyName, value, err := parseLiteralSource(literalSource) + keyName, value, err := util.ParseLiteralSource(literalSource) if err != nil { return err } @@ -185,9 +186,9 @@ func handleFromLiteralSources(secret *api.Secret, literalSources []string) error } // handleFromFileSources adds the specified file source information into the provided secret -func handleFromFileSources(secret *api.Secret, fileSources []string) error { +func handleFromFileSources(secret *v1.Secret, fileSources []string) error { for _, fileSource := range fileSources { - keyName, filePath, err := parseFileSource(fileSource) + keyName, filePath, err := util.ParseFileSource(fileSource) if err != nil { return err } @@ -229,7 +230,7 @@ func handleFromFileSources(secret *api.Secret, fileSources []string) error { // handleFromEnvFileSource adds the specified env file source information // into the provided secret -func handleFromEnvFileSource(secret *api.Secret, envFileSource string) error { +func handleFromEnvFileSource(secret *v1.Secret, envFileSource string) error { info, err := os.Stat(envFileSource) if err != nil { switch err := err.(type) { @@ -248,7 +249,7 @@ func handleFromEnvFileSource(secret *api.Secret, envFileSource string) error { }) } -func addKeyFromFileToSecret(secret *api.Secret, keyName, filePath string) error { +func addKeyFromFileToSecret(secret *v1.Secret, keyName, filePath string) error { data, err := ioutil.ReadFile(filePath) if err != nil { return err @@ -256,7 +257,7 @@ func addKeyFromFileToSecret(secret *api.Secret, keyName, filePath string) error return addKeyFromLiteralToSecret(secret, keyName, data) } -func addKeyFromLiteralToSecret(secret *api.Secret, keyName string, data []byte) error { +func addKeyFromLiteralToSecret(secret *v1.Secret, keyName string, data []byte) error { if errs := validation.IsConfigMapKey(keyName); len(errs) != 0 { return fmt.Errorf("%q is not a valid key name for a Secret: %s", keyName, strings.Join(errs, ";")) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go b/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go index d39ebb5b32bb..fc1e15b6b914 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go @@ -20,8 +20,8 @@ import ( "encoding/json" "fmt" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/credentialprovider" "k8s.io/kubernetes/pkg/kubectl/util/hash" ) @@ -89,11 +89,11 @@ func (s SecretForDockerRegistryGeneratorV1) StructuredGenerate() (runtime.Object if err != nil { return nil, err } - secret := &api.Secret{} + secret := &v1.Secret{} secret.Name = s.Name - secret.Type = api.SecretTypeDockercfg + secret.Type = v1.SecretTypeDockercfg secret.Data = map[string][]byte{} - secret.Data[api.DockerConfigKey] = dockercfgContent + secret.Data[v1.DockerConfigKey] = dockercfgContent if s.AppendHash { h, err := hash.SecretHash(secret) if err != nil { @@ -141,7 +141,9 @@ func handleDockercfgContent(username, password, email, server string) ([]byte, e Email: email, } - dockerCfg := map[string]credentialprovider.DockerConfigEntry{server: dockercfgAuth} + dockerCfg := credentialprovider.DockerConfigJson{ + Auths: map[string]credentialprovider.DockerConfigEntry{server: dockercfgAuth}, + } return json.Marshal(dockerCfg) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go b/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go index aeb6d8863a9c..1c3fffd0456b 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go @@ -21,8 +21,8 @@ import ( "fmt" "io/ioutil" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/kubectl/util/hash" ) @@ -87,12 +87,12 @@ func (s SecretForTLSGeneratorV1) StructuredGenerate() (runtime.Object, error) { if err != nil { return nil, err } - secret := &api.Secret{} + secret := &v1.Secret{} secret.Name = s.Name - secret.Type = api.SecretTypeTLS + secret.Type = v1.SecretTypeTLS secret.Data = map[string][]byte{} - secret.Data[api.TLSCertKey] = []byte(tlsCrt) - secret.Data[api.TLSPrivateKeyKey] = []byte(tlsKey) + secret.Data[v1.TLSCertKey] = []byte(tlsCrt) + secret.Data[v1.TLSPrivateKeyKey] = []byte(tlsKey) if s.AppendHash { h, err := hash.SecretHash(secret) if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/service.go b/vendor/k8s.io/kubernetes/pkg/kubectl/service.go index 072bddbeb03b..7f0e0d869f72 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/service.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/service.go @@ -21,10 +21,10 @@ import ( "strconv" "strings" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/kubernetes/pkg/api" ) // The only difference between ServiceGeneratorV1 and V2 is that the service port is named "default" in V1, while it is left unnamed in V2. @@ -113,7 +113,7 @@ func generate(genericParams map[string]interface{}) (runtime.Object, error) { isHeadlessService := params["cluster-ip"] == "None" - ports := []api.ServicePort{} + ports := []v1.ServicePort{} servicePortName, found := params["port-name"] if !found { // Leave the port unnamed. @@ -135,7 +135,7 @@ func generate(genericParams map[string]interface{}) (runtime.Object, error) { if portString, found = params["ports"]; !found { portString, found = params["port"] if !found && !isHeadlessService { - return nil, fmt.Errorf("'port' is a required parameter.") + return nil, fmt.Errorf("'ports' or 'port' is a required parameter.") } } @@ -168,20 +168,20 @@ func generate(genericParams map[string]interface{}) (runtime.Object, error) { protocol = exposeProtocol } } - ports = append(ports, api.ServicePort{ + ports = append(ports, v1.ServicePort{ Name: name, Port: int32(port), - Protocol: api.Protocol(protocol), + Protocol: v1.Protocol(protocol), }) } } - service := api.Service{ + service := v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: labels, }, - Spec: api.ServiceSpec{ + Spec: v1.ServiceSpec{ Selector: selector, Ports: ports, }, @@ -213,24 +213,24 @@ func generate(genericParams map[string]interface{}) (runtime.Object, error) { service.Spec.ExternalIPs = []string{params["external-ip"]} } if len(params["type"]) != 0 { - service.Spec.Type = api.ServiceType(params["type"]) + service.Spec.Type = v1.ServiceType(params["type"]) } - if service.Spec.Type == api.ServiceTypeLoadBalancer { + if service.Spec.Type == v1.ServiceTypeLoadBalancer { service.Spec.LoadBalancerIP = params["load-balancer-ip"] } if len(params["session-affinity"]) != 0 { - switch api.ServiceAffinity(params["session-affinity"]) { - case api.ServiceAffinityNone: - service.Spec.SessionAffinity = api.ServiceAffinityNone - case api.ServiceAffinityClientIP: - service.Spec.SessionAffinity = api.ServiceAffinityClientIP + switch v1.ServiceAffinity(params["session-affinity"]) { + case v1.ServiceAffinityNone: + service.Spec.SessionAffinity = v1.ServiceAffinityNone + case v1.ServiceAffinityClientIP: + service.Spec.SessionAffinity = v1.ServiceAffinityClientIP default: return nil, fmt.Errorf("unknown session affinity: %s", params["session-affinity"]) } } if len(params["cluster-ip"]) != 0 { if params["cluster-ip"] == "None" { - service.Spec.ClusterIP = api.ClusterIPNone + service.Spec.ClusterIP = v1.ClusterIPNone } else { service.Spec.ClusterIP = params["cluster-ip"] } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/service_basic.go b/vendor/k8s.io/kubernetes/pkg/kubectl/service_basic.go index d53cc60fcd29..1f31bd09a2a3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/service_basic.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/service_basic.go @@ -21,17 +21,17 @@ import ( "strconv" "strings" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/kubernetes/pkg/api" ) type ServiceCommonGeneratorV1 struct { Name string TCP []string - Type api.ServiceType + Type v1.ServiceType ClusterIP string NodePort int ExternalName string @@ -89,14 +89,25 @@ func parsePorts(portString string) (int32, intstr.IntOrString, error) { if err != nil { return 0, intstr.FromInt(0), err } + + if errs := validation.IsValidPortNum(port); len(errs) != 0 { + return 0, intstr.FromInt(0), fmt.Errorf(strings.Join(errs, ",")) + } + if len(portStringSlice) == 1 { return int32(port), intstr.FromInt(int(port)), nil } var targetPort intstr.IntOrString if portNum, err := strconv.Atoi(portStringSlice[1]); err != nil { + if errs := validation.IsValidPortName(portStringSlice[1]); len(errs) != 0 { + return 0, intstr.FromInt(0), fmt.Errorf(strings.Join(errs, ",")) + } targetPort = intstr.FromString(portStringSlice[1]) } else { + if errs := validation.IsValidPortNum(portNum); len(errs) != 0 { + return 0, intstr.FromInt(0), fmt.Errorf(strings.Join(errs, ",")) + } targetPort = intstr.FromInt(portNum) } return int32(port), targetPort, nil @@ -131,7 +142,7 @@ func (s ServiceLoadBalancerGeneratorV1) Generate(params map[string]interface{}) if err != nil { return nil, err } - delegate := &ServiceCommonGeneratorV1{Type: api.ServiceTypeLoadBalancer, ClusterIP: ""} + delegate := &ServiceCommonGeneratorV1{Type: v1.ServiceTypeLoadBalancer, ClusterIP: ""} err = delegate.GenerateCommon(params) if err != nil { return nil, err @@ -144,7 +155,7 @@ func (s ServiceNodePortGeneratorV1) Generate(params map[string]interface{}) (run if err != nil { return nil, err } - delegate := &ServiceCommonGeneratorV1{Type: api.ServiceTypeNodePort, ClusterIP: ""} + delegate := &ServiceCommonGeneratorV1{Type: v1.ServiceTypeNodePort, ClusterIP: ""} err = delegate.GenerateCommon(params) if err != nil { return nil, err @@ -157,7 +168,7 @@ func (s ServiceClusterIPGeneratorV1) Generate(params map[string]interface{}) (ru if err != nil { return nil, err } - delegate := &ServiceCommonGeneratorV1{Type: api.ServiceTypeClusterIP, ClusterIP: ""} + delegate := &ServiceCommonGeneratorV1{Type: v1.ServiceTypeClusterIP, ClusterIP: ""} err = delegate.GenerateCommon(params) if err != nil { return nil, err @@ -170,7 +181,7 @@ func (s ServiceExternalNameGeneratorV1) Generate(params map[string]interface{}) if err != nil { return nil, err } - delegate := &ServiceCommonGeneratorV1{Type: api.ServiceTypeExternalName, ClusterIP: ""} + delegate := &ServiceCommonGeneratorV1{Type: v1.ServiceTypeExternalName, ClusterIP: ""} err = delegate.GenerateCommon(params) if err != nil { return nil, err @@ -187,13 +198,13 @@ func (s ServiceCommonGeneratorV1) validate() error { if len(s.Type) == 0 { return fmt.Errorf("type must be specified") } - if s.ClusterIP == api.ClusterIPNone && s.Type != api.ServiceTypeClusterIP { + if s.ClusterIP == v1.ClusterIPNone && s.Type != v1.ServiceTypeClusterIP { return fmt.Errorf("ClusterIP=None can only be used with ClusterIP service type") } - if s.ClusterIP != api.ClusterIPNone && len(s.TCP) == 0 && s.Type != api.ServiceTypeExternalName { + if s.ClusterIP != v1.ClusterIPNone && len(s.TCP) == 0 && s.Type != v1.ServiceTypeExternalName { return fmt.Errorf("at least one tcp port specifier must be provided") } - if s.Type == api.ServiceTypeExternalName { + if s.Type == v1.ServiceTypeExternalName { if errs := validation.IsDNS1123Subdomain(s.ExternalName); len(errs) != 0 { return fmt.Errorf("invalid service external name %s", s.ExternalName) } @@ -206,7 +217,7 @@ func (s ServiceCommonGeneratorV1) StructuredGenerate() (runtime.Object, error) { if err != nil { return nil, err } - ports := []api.ServicePort{} + ports := []v1.ServicePort{} for _, tcpString := range s.TCP { port, targetPort, err := parsePorts(tcpString) if err != nil { @@ -214,11 +225,11 @@ func (s ServiceCommonGeneratorV1) StructuredGenerate() (runtime.Object, error) { } portName := strings.Replace(tcpString, ":", "-", -1) - ports = append(ports, api.ServicePort{ + ports = append(ports, v1.ServicePort{ Name: portName, Port: port, TargetPort: targetPort, - Protocol: api.Protocol("TCP"), + Protocol: v1.Protocol("TCP"), NodePort: int32(s.NodePort), }) } @@ -229,13 +240,13 @@ func (s ServiceCommonGeneratorV1) StructuredGenerate() (runtime.Object, error) { selector := map[string]string{} selector["app"] = s.Name - service := api.Service{ + service := v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: s.Name, Labels: labels, }, - Spec: api.ServiceSpec{ - Type: api.ServiceType(s.Type), + Spec: v1.ServiceSpec{ + Type: v1.ServiceType(s.Type), Selector: selector, Ports: ports, ExternalName: s.ExternalName, diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go b/vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go index 08b84fb6fda5..fa701e140a24 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go @@ -19,8 +19,8 @@ package kubectl import ( "fmt" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" ) // ServiceAccountGeneratorV1 supports stable generation of a service account @@ -37,7 +37,7 @@ func (g *ServiceAccountGeneratorV1) StructuredGenerate() (runtime.Object, error) if err := g.validate(); err != nil { return nil, err } - serviceAccount := &api.ServiceAccount{} + serviceAccount := &v1.ServiceAccount{} serviceAccount.Name = g.Name return serviceAccount, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/util/BUILD index 17fdddedf148..535db1587a25 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/BUILD @@ -1,6 +1,7 @@ load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( @@ -14,6 +15,7 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/kubectl/util", visibility = ["//build/visible_to:pkg_kubectl_util_CONSUMERS"], deps = [ "//vendor/golang.org/x/sys/unix:go_default_library", @@ -43,3 +45,10 @@ filegroup( tags = ["automanaged"], visibility = ["//build/visible_to:pkg_kubectl_util_CONSUMERS"], ) + +go_test( + name = "go_default_test", + srcs = ["util_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/util", + library = ":go_default_library", +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/hash/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/util/hash/BUILD index 30f5802f7056..7e8885b65350 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/hash/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/hash/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -11,16 +9,16 @@ load( go_test( name = "go_default_test", srcs = ["hash_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/util/hash", library = ":go_default_library", - tags = ["automanaged"], - deps = ["//pkg/api:go_default_library"], + deps = ["//vendor/k8s.io/api/core/v1:go_default_library"], ) go_library( name = "go_default_library", srcs = ["hash.go"], - tags = ["automanaged"], - deps = ["//pkg/api:go_default_library"], + importpath = "k8s.io/kubernetes/pkg/kubectl/util/hash", + deps = ["//vendor/k8s.io/api/core/v1:go_default_library"], ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/hash/hash.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/hash/hash.go index 137e18045409..d7afaab7d7cb 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/hash/hash.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/hash/hash.go @@ -21,12 +21,12 @@ import ( "encoding/json" "fmt" - "k8s.io/kubernetes/pkg/api" + "k8s.io/api/core/v1" ) // ConfigMapHash returns a hash of the ConfigMap. // The Data, Kind, and Name are taken into account. -func ConfigMapHash(cm *api.ConfigMap) (string, error) { +func ConfigMapHash(cm *v1.ConfigMap) (string, error) { encoded, err := encodeConfigMap(cm) if err != nil { return "", err @@ -40,7 +40,7 @@ func ConfigMapHash(cm *api.ConfigMap) (string, error) { // SecretHash returns a hash of the Secret. // The Data, Kind, Name, and Type are taken into account. -func SecretHash(sec *api.Secret) (string, error) { +func SecretHash(sec *v1.Secret) (string, error) { encoded, err := encodeSecret(sec) if err != nil { return "", err @@ -54,7 +54,7 @@ func SecretHash(sec *api.Secret) (string, error) { // encodeConfigMap encodes a ConfigMap. // Data, Kind, and Name are taken into account. -func encodeConfigMap(cm *api.ConfigMap) (string, error) { +func encodeConfigMap(cm *v1.ConfigMap) (string, error) { // json.Marshal sorts the keys in a stable order in the encoding data, err := json.Marshal(map[string]interface{}{"kind": "ConfigMap", "name": cm.Name, "data": cm.Data}) if err != nil { @@ -65,7 +65,7 @@ func encodeConfigMap(cm *api.ConfigMap) (string, error) { // encodeSecret encodes a Secret. // Data, Kind, Name, and Type are taken into account. -func encodeSecret(sec *api.Secret) (string, error) { +func encodeSecret(sec *v1.Secret) (string, error) { // json.Marshal sorts the keys in a stable order in the encoding data, err := json.Marshal(map[string]interface{}{"kind": "Secret", "type": sec.Type, "name": sec.Name, "data": sec.Data}) if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/slice/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/util/slice/BUILD index 4886be406f65..03567ebbaf9a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/slice/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/slice/BUILD @@ -9,11 +9,13 @@ load( go_library( name = "go_default_library", srcs = ["slice.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/util/slice", ) go_test( name = "go_default_test", srcs = ["slice_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/util/slice", library = ":go_default_library", ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/BUILD new file mode 100644 index 000000000000..d05dc402eddc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/BUILD @@ -0,0 +1,51 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "resize.go", + "resizeevents.go", + "term.go", + "term_writer.go", + ] + select({ + "@io_bazel_rules_go//go/platform:windows_amd64": [ + "resizeevents_windows.go", + ], + "//conditions:default": [], + }), + importpath = "k8s.io/kubernetes/pkg/kubectl/util/term", + deps = [ + "//pkg/util/interrupt:go_default_library", + "//vendor/github.com/docker/docker/pkg/term:go_default_library", + "//vendor/github.com/mitchellh/go-wordwrap:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["term_writer_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubectl/util/term", + library = ":go_default_library", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/resize.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/resize.go new file mode 100644 index 000000000000..7ca09a8586b1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/resize.go @@ -0,0 +1,132 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package term + +import ( + "fmt" + + "github.com/docker/docker/pkg/term" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/remotecommand" +) + +// GetSize returns the current size of the user's terminal. If it isn't a terminal, +// nil is returned. +func (t TTY) GetSize() *remotecommand.TerminalSize { + outFd, isTerminal := term.GetFdInfo(t.Out) + if !isTerminal { + return nil + } + return GetSize(outFd) +} + +// GetSize returns the current size of the terminal associated with fd. +func GetSize(fd uintptr) *remotecommand.TerminalSize { + winsize, err := term.GetWinsize(fd) + if err != nil { + runtime.HandleError(fmt.Errorf("unable to get terminal size: %v", err)) + return nil + } + + return &remotecommand.TerminalSize{Width: winsize.Width, Height: winsize.Height} +} + +// MonitorSize monitors the terminal's size. It returns a TerminalSizeQueue primed with +// initialSizes, or nil if there's no TTY present. +func (t *TTY) MonitorSize(initialSizes ...*remotecommand.TerminalSize) remotecommand.TerminalSizeQueue { + outFd, isTerminal := term.GetFdInfo(t.Out) + if !isTerminal { + return nil + } + + t.sizeQueue = &sizeQueue{ + t: *t, + // make it buffered so we can send the initial terminal sizes without blocking, prior to starting + // the streaming below + resizeChan: make(chan remotecommand.TerminalSize, len(initialSizes)), + stopResizing: make(chan struct{}), + } + + t.sizeQueue.monitorSize(outFd, initialSizes...) + + return t.sizeQueue +} + +// sizeQueue implements remotecommand.TerminalSizeQueue +type sizeQueue struct { + t TTY + // resizeChan receives a Size each time the user's terminal is resized. + resizeChan chan remotecommand.TerminalSize + stopResizing chan struct{} +} + +// make sure sizeQueue implements the resize.TerminalSizeQueue interface +var _ remotecommand.TerminalSizeQueue = &sizeQueue{} + +// monitorSize primes resizeChan with initialSizes and then monitors for resize events. With each +// new event, it sends the current terminal size to resizeChan. +func (s *sizeQueue) monitorSize(outFd uintptr, initialSizes ...*remotecommand.TerminalSize) { + // send the initial sizes + for i := range initialSizes { + if initialSizes[i] != nil { + s.resizeChan <- *initialSizes[i] + } + } + + resizeEvents := make(chan remotecommand.TerminalSize, 1) + + monitorResizeEvents(outFd, resizeEvents, s.stopResizing) + + // listen for resize events in the background + go func() { + defer runtime.HandleCrash() + + for { + select { + case size, ok := <-resizeEvents: + if !ok { + return + } + + select { + // try to send the size to resizeChan, but don't block + case s.resizeChan <- size: + // send successful + default: + // unable to send / no-op + } + case <-s.stopResizing: + return + } + } + }() +} + +// Next returns the new terminal size after the terminal has been resized. It returns nil when +// monitoring has been stopped. +func (s *sizeQueue) Next() *remotecommand.TerminalSize { + size, ok := <-s.resizeChan + if !ok { + return nil + } + return &size +} + +// stop stops the background goroutine that is monitoring for terminal resizes. +func (s *sizeQueue) stop() { + close(s.stopResizing) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/resizeevents.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/resizeevents.go new file mode 100644 index 000000000000..e3476f978d06 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/resizeevents.go @@ -0,0 +1,61 @@ +// +build !windows + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package term + +import ( + "os" + "os/signal" + + "golang.org/x/sys/unix" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/remotecommand" +) + +// monitorResizeEvents spawns a goroutine that waits for SIGWINCH signals (these indicate the +// terminal has resized). After receiving a SIGWINCH, this gets the terminal size and tries to send +// it to the resizeEvents channel. The goroutine stops when the stop channel is closed. +func monitorResizeEvents(fd uintptr, resizeEvents chan<- remotecommand.TerminalSize, stop chan struct{}) { + go func() { + defer runtime.HandleCrash() + + winch := make(chan os.Signal, 1) + signal.Notify(winch, unix.SIGWINCH) + defer signal.Stop(winch) + + for { + select { + case <-winch: + size := GetSize(fd) + if size == nil { + return + } + + // try to send size + select { + case resizeEvents <- *size: + // success + default: + // not sent + } + case <-stop: + return + } + } + }() +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/resizeevents_windows.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/resizeevents_windows.go new file mode 100644 index 000000000000..adccf873466a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/resizeevents_windows.go @@ -0,0 +1,62 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package term + +import ( + "time" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/remotecommand" +) + +// monitorResizeEvents spawns a goroutine that periodically gets the terminal size and tries to send +// it to the resizeEvents channel if the size has changed. The goroutine stops when the stop channel +// is closed. +func monitorResizeEvents(fd uintptr, resizeEvents chan<- remotecommand.TerminalSize, stop chan struct{}) { + go func() { + defer runtime.HandleCrash() + + size := GetSize(fd) + if size == nil { + return + } + lastSize := *size + + for { + // see if we need to stop running + select { + case <-stop: + return + default: + } + + size := GetSize(fd) + if size == nil { + return + } + + if size.Height != lastSize.Height || size.Width != lastSize.Width { + lastSize.Height = size.Height + lastSize.Width = size.Width + resizeEvents <- *size + } + + // sleep to avoid hot looping + time.Sleep(250 * time.Millisecond) + } + }() +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/term.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/term.go new file mode 100644 index 000000000000..58baee8310d4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/term.go @@ -0,0 +1,110 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package term + +import ( + "io" + "os" + + "github.com/docker/docker/pkg/term" + + "k8s.io/kubernetes/pkg/util/interrupt" +) + +// SafeFunc is a function to be invoked by TTY. +type SafeFunc func() error + +// TTY helps invoke a function and preserve the state of the terminal, even if the process is +// terminated during execution. It also provides support for terminal resizing for remote command +// execution/attachment. +type TTY struct { + // In is a reader representing stdin. It is a required field. + In io.Reader + // Out is a writer representing stdout. It must be set to support terminal resizing. It is an + // optional field. + Out io.Writer + // Raw is true if the terminal should be set raw. + Raw bool + // TryDev indicates the TTY should try to open /dev/tty if the provided input + // is not a file descriptor. + TryDev bool + // Parent is an optional interrupt handler provided to this function - if provided + // it will be invoked after the terminal state is restored. If it is not provided, + // a signal received during the TTY will result in os.Exit(0) being invoked. + Parent *interrupt.Handler + + // sizeQueue is set after a call to MonitorSize() and is used to monitor SIGWINCH signals when the + // user's terminal resizes. + sizeQueue *sizeQueue +} + +// IsTerminalIn returns true if t.In is a terminal. Does not check /dev/tty +// even if TryDev is set. +func (t TTY) IsTerminalIn() bool { + return IsTerminal(t.In) +} + +// IsTerminalOut returns true if t.Out is a terminal. Does not check /dev/tty +// even if TryDev is set. +func (t TTY) IsTerminalOut() bool { + return IsTerminal(t.Out) +} + +// IsTerminal returns whether the passed object is a terminal or not +func IsTerminal(i interface{}) bool { + _, terminal := term.GetFdInfo(i) + return terminal +} + +// Safe invokes the provided function and will attempt to ensure that when the +// function returns (or a termination signal is sent) that the terminal state +// is reset to the condition it was in prior to the function being invoked. If +// t.Raw is true the terminal will be put into raw mode prior to calling the function. +// If the input file descriptor is not a TTY and TryDev is true, the /dev/tty file +// will be opened (if available). +func (t TTY) Safe(fn SafeFunc) error { + inFd, isTerminal := term.GetFdInfo(t.In) + + if !isTerminal && t.TryDev { + if f, err := os.Open("/dev/tty"); err == nil { + defer f.Close() + inFd = f.Fd() + isTerminal = term.IsTerminal(inFd) + } + } + if !isTerminal { + return fn() + } + + var state *term.State + var err error + if t.Raw { + state, err = term.MakeRaw(inFd) + } else { + state, err = term.SaveState(inFd) + } + if err != nil { + return err + } + return interrupt.Chain(t.Parent, func() { + if t.sizeQueue != nil { + t.sizeQueue.stop() + } + + term.RestoreTerminal(inFd, state) + }).Run(fn) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/term_writer.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/term_writer.go new file mode 100644 index 000000000000..2d72d1e45254 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/term/term_writer.go @@ -0,0 +1,124 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package term + +import ( + "io" + "os" + + "github.com/docker/docker/pkg/term" + wordwrap "github.com/mitchellh/go-wordwrap" +) + +type wordWrapWriter struct { + limit uint + writer io.Writer +} + +// NewResponsiveWriter creates a Writer that detects the column width of the +// terminal we are in, and adjusts every line width to fit and use recommended +// terminal sizes for better readability. Does proper word wrapping automatically. +// if terminal width >= 120 columns use 120 columns +// if terminal width >= 100 columns use 100 columns +// if terminal width >= 80 columns use 80 columns +// In case we're not in a terminal or if it's smaller than 80 columns width, +// doesn't do any wrapping. +func NewResponsiveWriter(w io.Writer) io.Writer { + file, ok := w.(*os.File) + if !ok { + return w + } + fd := file.Fd() + if !term.IsTerminal(fd) { + return w + } + + terminalSize := GetSize(fd) + if terminalSize == nil { + return w + } + + var limit uint + switch { + case terminalSize.Width >= 120: + limit = 120 + case terminalSize.Width >= 100: + limit = 100 + case terminalSize.Width >= 80: + limit = 80 + } + + return NewWordWrapWriter(w, limit) +} + +// NewWordWrapWriter is a Writer that supports a limit of characters on every line +// and does auto word wrapping that respects that limit. +func NewWordWrapWriter(w io.Writer, limit uint) io.Writer { + return &wordWrapWriter{ + limit: limit, + writer: w, + } +} + +func (w wordWrapWriter) Write(p []byte) (nn int, err error) { + if w.limit == 0 { + return w.writer.Write(p) + } + original := string(p) + wrapped := wordwrap.WrapString(original, w.limit) + return w.writer.Write([]byte(wrapped)) +} + +// NewPunchCardWriter is a NewWordWrapWriter that limits the line width to 80 columns. +func NewPunchCardWriter(w io.Writer) io.Writer { + return NewWordWrapWriter(w, 80) +} + +type maxWidthWriter struct { + maxWidth uint + currentWidth uint + written uint + writer io.Writer +} + +// NewMaxWidthWriter is a Writer that supports a limit of characters on every +// line, but doesn't do any word wrapping automatically. +func NewMaxWidthWriter(w io.Writer, maxWidth uint) io.Writer { + return &maxWidthWriter{ + maxWidth: maxWidth, + writer: w, + } +} + +func (m maxWidthWriter) Write(p []byte) (nn int, err error) { + for _, b := range p { + if m.currentWidth == m.maxWidth { + m.writer.Write([]byte{'\n'}) + m.currentWidth = 0 + } + if b == '\n' { + m.currentWidth = 0 + } + _, err := m.writer.Write([]byte{b}) + if err != nil { + return int(m.written), err + } + m.written++ + m.currentWidth++ + } + return len(p), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/util/util.go b/vendor/k8s.io/kubernetes/pkg/kubectl/util/util.go index 7b6eeff81fe6..fd1bf1364689 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/util/util.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/util/util.go @@ -18,7 +18,10 @@ package util import ( "crypto/md5" + "errors" "fmt" + "path" + "strings" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -44,3 +47,45 @@ func HashObject(obj runtime.Object, codec runtime.Codec) (string, error) { } return fmt.Sprintf("%x", md5.Sum(data)), nil } + +// ParseFileSource parses the source given. +// +// Acceptable formats include: +// 1. source-path: the basename will become the key name +// 2. source-name=source-path: the source-name will become the key name and +// source-path is the path to the key file. +// +// Key names cannot include '='. +func ParseFileSource(source string) (keyName, filePath string, err error) { + numSeparators := strings.Count(source, "=") + switch { + case numSeparators == 0: + return path.Base(source), source, nil + case numSeparators == 1 && strings.HasPrefix(source, "="): + return "", "", fmt.Errorf("key name for file path %v missing.", strings.TrimPrefix(source, "=")) + case numSeparators == 1 && strings.HasSuffix(source, "="): + return "", "", fmt.Errorf("file path for key name %v missing.", strings.TrimSuffix(source, "=")) + case numSeparators > 1: + return "", "", errors.New("Key names or file paths cannot contain '='.") + default: + components := strings.Split(source, "=") + return components[0], components[1], nil + } +} + +// ParseLiteralSource parses the source key=val pair into its component pieces. +// This functionality is distinguished from strings.SplitN(source, "=", 2) since +// it returns an error in the case of empty keys, values, or a missing equals sign. +func ParseLiteralSource(source string) (keyName, value string, err error) { + // leading equal is invalid + if strings.Index(source, "=") == 0 { + return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) + } + // split after the first equal (so values can have the = character) + items := strings.SplitN(source, "=", 2) + if len(items) != 2 { + return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) + } + + return items[0], items[1], nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/kubectl/validation/BUILD index 3b2de79ad252..0854e64950c2 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/validation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/validation/BUILD @@ -1,5 +1,3 @@ -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -13,54 +11,24 @@ exports_files( visibility = ["//build/visible_to:COMMON_testing"], ) -filegroup( - name = "testdata", - srcs = [ - "testdata/v1/invalidPod.yaml", - "testdata/v1/invalidPod1.json", - "testdata/v1/invalidPod2.json", - "testdata/v1/invalidPod3.json", - "testdata/v1/invalidPod4.yaml", - "testdata/v1/validPod.yaml", - ], - visibility = ["//build/visible_to:COMMON_testing"], -) - go_test( name = "go_default_test", srcs = ["schema_test.go"], data = [ ":testdata", - "//api/swagger-spec", ], + importpath = "k8s.io/kubernetes/pkg/kubectl/validation", library = ":go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/api/testapi:go_default_library", - "//pkg/api/testing:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/testing/fuzzer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", - ], ) go_library( name = "go_default_library", srcs = ["schema.go"], - tags = ["automanaged"], + importpath = "k8s.io/kubernetes/pkg/kubectl/validation", visibility = ["//build/visible_to:pkg_kubectl_validation_CONSUMERS"], deps = [ - "//pkg/api/util:go_default_library", - "//vendor/github.com/emicklei/go-restful-swagger12:go_default_library", "//vendor/github.com/exponent-io/jsonpath:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/validation/schema.go b/vendor/k8s.io/kubernetes/pkg/kubectl/validation/schema.go index b75907aefbe6..6eef619390dd 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/validation/schema.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/validation/schema.go @@ -20,43 +20,11 @@ import ( "bytes" "encoding/json" "fmt" - "reflect" - "regexp" - "strings" - swagger "github.com/emicklei/go-restful-swagger12" ejson "github.com/exponent-io/jsonpath" - "github.com/golang/glog" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/yaml" - apiutil "k8s.io/kubernetes/pkg/api/util" ) -// InvalidTypeError records information about an invalid type. -type InvalidTypeError struct { - ExpectedKind reflect.Kind - ObservedKind reflect.Kind - FieldName string -} - -func (i *InvalidTypeError) Error() string { - return fmt.Sprintf("expected type %s, for field %s, got %s", i.ExpectedKind.String(), i.FieldName, i.ObservedKind.String()) -} - -// NewInvalidTypeError returns an instance of NewInvalidTypeError. -func NewInvalidTypeError(expected reflect.Kind, observed reflect.Kind, fieldName string) error { - return &InvalidTypeError{expected, observed, fieldName} -} - -// TypeNotFoundError is returned when specified type -// can not found in schema -type TypeNotFoundError string - -func (tnfe TypeNotFoundError) Error() string { - return fmt.Sprintf("couldn't find type: %s", string(tnfe)) -} - // Schema is an interface that knows how to validate an API object serialized to a byte array. type Schema interface { ValidateBytes(data []byte) error @@ -133,314 +101,3 @@ func (c ConjunctiveSchema) ValidateBytes(data []byte) error { } return utilerrors.NewAggregate(list) } - -// SwaggerSchema is a schema based on an OpenAPI spec. -type SwaggerSchema struct { - api swagger.ApiDeclaration - delegate Schema // For delegating to other api groups -} - -// NewSwaggerSchemaFromBytes creates an instance of SwaggerSchema from bytes. -func NewSwaggerSchemaFromBytes(data []byte, factory Schema) (Schema, error) { - schema := &SwaggerSchema{} - err := json.Unmarshal(data, &schema.api) - if err != nil { - return nil, err - } - schema.delegate = factory - return schema, nil -} - -// validateList unpacks a list and validate every item in the list. -// It return nil if every item is ok. -// Otherwise it return an error list contain errors of every item. -func (s *SwaggerSchema) validateList(obj map[string]interface{}) []error { - items, exists := obj["items"] - if !exists { - return []error{fmt.Errorf("no items field in %#v", obj)} - } - return s.validateItems(items) -} - -func (s *SwaggerSchema) validateItems(items interface{}) []error { - allErrs := []error{} - itemList, ok := items.([]interface{}) - if !ok { - return append(allErrs, fmt.Errorf("items isn't a slice")) - } - for i, item := range itemList { - fields, ok := item.(map[string]interface{}) - if !ok { - allErrs = append(allErrs, fmt.Errorf("items[%d] isn't a map[string]interface{}", i)) - continue - } - groupVersion := fields["apiVersion"] - if groupVersion == nil { - allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion not set", i)) - continue - } - itemVersion, ok := groupVersion.(string) - if !ok { - allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion isn't string type", i)) - continue - } - if len(itemVersion) == 0 { - allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion is empty", i)) - } - kind := fields["kind"] - if kind == nil { - allErrs = append(allErrs, fmt.Errorf("items[%d].kind not set", i)) - continue - } - itemKind, ok := kind.(string) - if !ok { - allErrs = append(allErrs, fmt.Errorf("items[%d].kind isn't string type", i)) - continue - } - if len(itemKind) == 0 { - allErrs = append(allErrs, fmt.Errorf("items[%d].kind is empty", i)) - } - version := apiutil.GetVersion(itemVersion) - errs := s.ValidateObject(item, "", version+"."+itemKind) - if len(errs) >= 1 { - allErrs = append(allErrs, errs...) - } - } - - return allErrs -} - -// ValidateBytes validates bytes in a SwaggerSchema. -func (s *SwaggerSchema) ValidateBytes(data []byte) error { - var obj interface{} - out, err := yaml.ToJSON(data) - if err != nil { - return err - } - data = out - if err := json.Unmarshal(data, &obj); err != nil { - return err - } - fields, ok := obj.(map[string]interface{}) - if !ok { - return fmt.Errorf("error in unmarshaling data %s", string(data)) - } - groupVersion := fields["apiVersion"] - if groupVersion == nil { - return fmt.Errorf("apiVersion not set") - } - if _, ok := groupVersion.(string); !ok { - return fmt.Errorf("apiVersion isn't string type") - } - kind := fields["kind"] - if kind == nil { - return fmt.Errorf("kind not set") - } - if _, ok := kind.(string); !ok { - return fmt.Errorf("kind isn't string type") - } - if strings.HasSuffix(kind.(string), "List") { - return utilerrors.NewAggregate(s.validateList(fields)) - } - version := apiutil.GetVersion(groupVersion.(string)) - allErrs := s.ValidateObject(obj, "", version+"."+kind.(string)) - if len(allErrs) == 1 { - return allErrs[0] - } - return utilerrors.NewAggregate(allErrs) -} - -// ValidateObject returns no errors for a valid object. -func (s *SwaggerSchema) ValidateObject(obj interface{}, fieldName, typeName string) []error { - allErrs := []error{} - models := s.api.Models - model, ok := models.At(typeName) - - // Verify the api version matches. This is required for nested types with differing api versions because - // s.api only has schema for 1 api version (the parent object type's version). - // e.g. an extensions/v1beta1 Template embedding a /v1 Service requires the schema for the extensions/v1beta1 - // api to delegate to the schema for the /v1 api. - // Only do this for !ok objects so that cross APIVersion vendored types take precedence. - if !ok && s.delegate != nil { - fields, mapOk := obj.(map[string]interface{}) - if !mapOk { - return append(allErrs, fmt.Errorf("field %s for %s: expected object of type map[string]interface{}, but the actual type is %T", fieldName, typeName, obj)) - } - if delegated, err := s.delegateIfDifferentAPIVersion(&unstructured.Unstructured{Object: fields}); delegated { - if err != nil { - allErrs = append(allErrs, err) - } - return allErrs - } - } - - if !ok { - return append(allErrs, TypeNotFoundError(typeName)) - } - properties := model.Properties - if len(properties.List) == 0 { - // The object does not have any sub-fields. - return nil - } - fields, ok := obj.(map[string]interface{}) - if !ok { - return append(allErrs, fmt.Errorf("field %s for %s: expected object of type map[string]interface{}, but the actual type is %T", fieldName, typeName, obj)) - } - if len(fieldName) > 0 { - fieldName = fieldName + "." - } - // handle required fields - for _, requiredKey := range model.Required { - if _, ok := fields[requiredKey]; !ok { - allErrs = append(allErrs, fmt.Errorf("field %s%s for %s is required", fieldName, requiredKey, typeName)) - } - } - for key, value := range fields { - details, ok := properties.At(key) - - // Special case for runtime.RawExtension and runtime.Objects because they always fail to validate - // This is because the actual values will be of some sub-type (e.g. Deployment) not the expected - // super-type (RawExtension) - if s.isGenericArray(details) { - errs := s.validateItems(value) - if len(errs) > 0 { - allErrs = append(allErrs, errs...) - } - continue - } - if !ok { - allErrs = append(allErrs, fmt.Errorf("found invalid field %s for %s", key, typeName)) - continue - } - if details.Type == nil && details.Ref == nil { - allErrs = append(allErrs, fmt.Errorf("could not find the type of %s%s from object %v", fieldName, key, details)) - } - var fieldType string - if details.Type != nil { - fieldType = *details.Type - } else { - fieldType = *details.Ref - } - if value == nil { - glog.V(2).Infof("Skipping nil field: %s%s", fieldName, key) - continue - } - errs := s.validateField(value, fieldName+key, fieldType, &details) - if len(errs) > 0 { - allErrs = append(allErrs, errs...) - } - } - return allErrs -} - -// delegateIfDifferentAPIVersion delegates the validation of an object if its ApiGroup does not match the -// current SwaggerSchema. -// First return value is true if the validation was delegated (by a different ApiGroup SwaggerSchema) -// Second return value is the result of the delegated validation if performed. -func (s *SwaggerSchema) delegateIfDifferentAPIVersion(obj *unstructured.Unstructured) (bool, error) { - // Never delegate objects in the same APIVersion or we will get infinite recursion - if !s.isDifferentAPIVersion(obj) { - return false, nil - } - - // Convert the object back into bytes so that we can pass it to the ValidateBytes function - m, err := json.Marshal(obj.Object) - if err != nil { - return true, err - } - - // Delegate validation of this object to the correct SwaggerSchema for its ApiGroup - return true, s.delegate.ValidateBytes(m) -} - -// isDifferentAPIVersion Returns true if obj lives in a different APIVersion than the SwaggerSchema does. -// The SwaggerSchema will not be able to process objects in different APIVersions unless they are vendored. -func (s *SwaggerSchema) isDifferentAPIVersion(obj *unstructured.Unstructured) bool { - groupVersion := obj.GetAPIVersion() - return len(groupVersion) > 0 && s.api.ApiVersion != groupVersion -} - -// isGenericArray Returns true if p is an array of generic Objects - either RawExtension or Object. -func (s *SwaggerSchema) isGenericArray(p swagger.ModelProperty) bool { - return p.DataTypeFields.Type != nil && - *p.DataTypeFields.Type == "array" && - p.Items != nil && - p.Items.Ref != nil && - (*p.Items.Ref == "runtime.RawExtension" || *p.Items.Ref == "runtime.Object") -} - -// This matches type name in the swagger spec, such as "v1.Binding". -var versionRegexp = regexp.MustCompile(`^(v.+|unversioned|types)\..*`) - -func (s *SwaggerSchema) validateField(value interface{}, fieldName, fieldType string, fieldDetails *swagger.ModelProperty) []error { - allErrs := []error{} - if reflect.TypeOf(value) == nil { - return append(allErrs, fmt.Errorf("unexpected nil value for field %v", fieldName)) - } - // TODO: caesarxuchao: because we have multiple group/versions and objects - // may reference objects in other group, the commented out way of checking - // if a filedType is a type defined by us is outdated. We use a hacky way - // for now. - // TODO: the type name in the swagger spec is something like "v1.Binding", - // and the "v1" is generated from the package name, not the groupVersion of - // the type. We need to fix go-restful to embed the group name in the type - // name, otherwise we couldn't handle identically named types in different - // groups correctly. - if versionRegexp.MatchString(fieldType) { - // if strings.HasPrefix(fieldType, apiVersion) { - return s.ValidateObject(value, fieldName, fieldType) - } - switch fieldType { - case "string": - // Be loose about what we accept for 'string' since we use IntOrString in a couple of places - _, isString := value.(string) - _, isNumber := value.(float64) - _, isInteger := value.(int) - if !isString && !isNumber && !isInteger { - return append(allErrs, NewInvalidTypeError(reflect.String, reflect.TypeOf(value).Kind(), fieldName)) - } - case "array": - arr, ok := value.([]interface{}) - if !ok { - return append(allErrs, NewInvalidTypeError(reflect.Array, reflect.TypeOf(value).Kind(), fieldName)) - } - var arrType string - if fieldDetails.Items.Ref == nil && fieldDetails.Items.Type == nil { - return append(allErrs, NewInvalidTypeError(reflect.Array, reflect.TypeOf(value).Kind(), fieldName)) - } - if fieldDetails.Items.Ref != nil { - arrType = *fieldDetails.Items.Ref - } else { - arrType = *fieldDetails.Items.Type - } - for ix := range arr { - errs := s.validateField(arr[ix], fmt.Sprintf("%s[%d]", fieldName, ix), arrType, nil) - if len(errs) > 0 { - allErrs = append(allErrs, errs...) - } - } - case "uint64": - case "int64": - case "integer": - _, isNumber := value.(float64) - _, isInteger := value.(int) - if !isNumber && !isInteger { - return append(allErrs, NewInvalidTypeError(reflect.Int, reflect.TypeOf(value).Kind(), fieldName)) - } - case "float64": - if _, ok := value.(float64); !ok { - return append(allErrs, NewInvalidTypeError(reflect.Float64, reflect.TypeOf(value).Kind(), fieldName)) - } - case "boolean": - if _, ok := value.(bool); !ok { - return append(allErrs, NewInvalidTypeError(reflect.Bool, reflect.TypeOf(value).Kind(), fieldName)) - } - // API servers before release 1.3 produce swagger spec with `type: "any"` as the fallback type, while newer servers produce spec with `type: "object"`. - // We have both here so that kubectl can work with both old and new api servers. - case "object": - case "any": - default: - return append(allErrs, fmt.Errorf("unexpected type: %v", fieldType)) - } - return allErrs -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/BUILD index 5b2ad441016b..7fb535012420 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/BUILD @@ -18,7 +18,6 @@ go_library( "kubelet_pods.go", "kubelet_resources.go", "kubelet_volumes.go", - "networks.go", "oom_watcher.go", "pod_container_deletor.go", "pod_workers.go", @@ -28,15 +27,16 @@ go_library( "util.go", "volume_host.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet", deps = [ - "//cmd/kubelet/app/options:go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/api/v1/helper:go_default_library", - "//pkg/api/v1/helper/qos:go_default_library", "//pkg/api/v1/pod:go_default_library", "//pkg/api/v1/resource:go_default_library", - "//pkg/api/v1/validation:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/pods:go_default_library", + "//pkg/apis/core/v1:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/apis/core/v1/helper/qos:go_default_library", + "//pkg/apis/core/v1/validation:go_default_library", "//pkg/capabilities:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/features:go_default_library", @@ -53,7 +53,6 @@ go_library( "//pkg/kubelet/configmap:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/dockershim:go_default_library", - "//pkg/kubelet/dockershim/libdocker:go_default_library", "//pkg/kubelet/dockershim/remote:go_default_library", "//pkg/kubelet/envvars:go_default_library", "//pkg/kubelet/events:go_default_library", @@ -65,7 +64,9 @@ go_library( "//pkg/kubelet/kuberuntime:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/metrics:go_default_library", + "//pkg/kubelet/mountpod:go_default_library", "//pkg/kubelet/network:go_default_library", + "//pkg/kubelet/network/dns:go_default_library", "//pkg/kubelet/pleg:go_default_library", "//pkg/kubelet/pod:go_default_library", "//pkg/kubelet/preemption:go_default_library", @@ -116,7 +117,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", @@ -130,12 +130,12 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", + "//vendor/k8s.io/client-go/util/certificate:go_default_library", "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", "//vendor/k8s.io/client-go/util/integer:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", @@ -153,7 +153,6 @@ go_test( "kubelet_resources_test.go", "kubelet_test.go", "kubelet_volumes_test.go", - "networks_test.go", "oom_watcher_test.go", "pod_container_deletor_test.go", "pod_workers_test.go", @@ -165,10 +164,11 @@ go_test( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/kubelet", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core/install:go_default_library", "//pkg/capabilities:go_default_library", "//pkg/cloudprovider/providers/fake:go_default_library", "//pkg/kubelet/apis:go_default_library", @@ -208,6 +208,7 @@ go_test( "//pkg/volume/host_path:go_default_library", "//pkg/volume/testing:go_default_library", "//pkg/volume/util/volumehelper:go_default_library", + "//plugin/pkg/scheduler/schedulercache:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", @@ -253,13 +254,13 @@ filegroup( "//pkg/kubelet/apis:all-srcs", "//pkg/kubelet/cadvisor:all-srcs", "//pkg/kubelet/certificate:all-srcs", + "//pkg/kubelet/checkpoint:all-srcs", "//pkg/kubelet/client:all-srcs", "//pkg/kubelet/cm:all-srcs", "//pkg/kubelet/config:all-srcs", "//pkg/kubelet/configmap:all-srcs", "//pkg/kubelet/container:all-srcs", "//pkg/kubelet/custommetrics:all-srcs", - "//pkg/kubelet/deviceplugin:all-srcs", "//pkg/kubelet/dockershim:all-srcs", "//pkg/kubelet/envvars:all-srcs", "//pkg/kubelet/events:all-srcs", @@ -271,6 +272,7 @@ filegroup( "//pkg/kubelet/leaky:all-srcs", "//pkg/kubelet/lifecycle:all-srcs", "//pkg/kubelet/metrics:all-srcs", + "//pkg/kubelet/mountpod:all-srcs", "//pkg/kubelet/network:all-srcs", "//pkg/kubelet/pleg:all-srcs", "//pkg/kubelet/pod:all-srcs", @@ -288,6 +290,7 @@ filegroup( "//pkg/kubelet/types:all-srcs", "//pkg/kubelet/util:all-srcs", "//pkg/kubelet/volumemanager:all-srcs", + "//pkg/kubelet/winstats:all-srcs", ], tags = ["automanaged"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD index aeb68f1bd521..490bbd581f90 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD @@ -11,6 +11,7 @@ go_library( "well_known_annotations.go", "well_known_labels.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/apis", ) filegroup( @@ -25,7 +26,7 @@ filegroup( srcs = [ ":package-srcs", "//pkg/kubelet/apis/cri:all-srcs", - "//pkg/kubelet/apis/deviceplugin/v1alpha1:all-srcs", + "//pkg/kubelet/apis/deviceplugin/v1alpha:all-srcs", "//pkg/kubelet/apis/kubeletconfig:all-srcs", "//pkg/kubelet/apis/stats/v1alpha1:all-srcs", ], diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/BUILD index 0a869ef112d4..399cf489a3f9 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["services.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/apis/cri", deps = ["//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/BUILD index c39b090d1399..a2ed4b6686d8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/BUILD @@ -11,6 +11,7 @@ go_library( "api.pb.go", "constants.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime", deps = [ "//vendor/github.com/gogo/protobuf/gogoproto:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/api.pb.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/api.pb.go index 4571e1f42653..f0b41f00a5d2 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/api.pb.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/api.pb.go @@ -860,6 +860,8 @@ func (*RemovePodSandboxResponse) Descriptor() ([]byte, []int) { return fileDescr type PodSandboxStatusRequest struct { // ID of the PodSandbox for which to retrieve status. PodSandboxId string `protobuf:"bytes,1,opt,name=pod_sandbox_id,json=podSandboxId,proto3" json:"pod_sandbox_id,omitempty"` + // Verbose indicates whether to return extra information about the pod sandbox. + Verbose bool `protobuf:"varint,2,opt,name=verbose,proto3" json:"verbose,omitempty"` } func (m *PodSandboxStatusRequest) Reset() { *m = PodSandboxStatusRequest{} } @@ -873,6 +875,13 @@ func (m *PodSandboxStatusRequest) GetPodSandboxId() string { return "" } +func (m *PodSandboxStatusRequest) GetVerbose() bool { + if m != nil { + return m.Verbose + } + return false +} + // PodSandboxNetworkStatus is the status of the network for a PodSandbox. type PodSandboxNetworkStatus struct { // IP address of the PodSandbox. @@ -1010,6 +1019,11 @@ func (m *PodSandboxStatus) GetAnnotations() map[string]string { type PodSandboxStatusResponse struct { // Status of the PodSandbox. Status *PodSandboxStatus `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` + // Info is extra information of the PodSandbox. The key could be abitrary string, and + // value should be in json format. The information could include anything useful for + // debug, e.g. network namespace for linux container based container runtime. + // It should only be returned non-empty when Verbose is true. + Info map[string]string `protobuf:"bytes,2,rep,name=info" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (m *PodSandboxStatusResponse) Reset() { *m = PodSandboxStatusResponse{} } @@ -1023,6 +1037,13 @@ func (m *PodSandboxStatusResponse) GetStatus() *PodSandboxStatus { return nil } +func (m *PodSandboxStatusResponse) GetInfo() map[string]string { + if m != nil { + return m.Info + } + return nil +} + // PodSandboxStateValue is the wrapper of PodSandboxState. type PodSandboxStateValue struct { // State of the sandbox. @@ -1397,6 +1418,7 @@ type LinuxContainerSecurityContext struct { SupplementalGroups []int64 `protobuf:"varint,8,rep,packed,name=supplemental_groups,json=supplementalGroups" json:"supplemental_groups,omitempty"` // AppArmor profile for the container, candidate values are: // * runtime/default: equivalent to not specifying a profile. + // * unconfined: no profiles are loaded // * localhost/: profile loaded on the node // (localhost) by name. The possible profile names are detailed at // http://wiki.apparmor.net/index.php/AppArmor_Core_Policy_Reference @@ -2087,6 +2109,8 @@ func (m *ListContainersResponse) GetContainers() []*Container { type ContainerStatusRequest struct { // ID of the container for which to retrieve status. ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + // Verbose indicates whether to return extra information about the container. + Verbose bool `protobuf:"varint,2,opt,name=verbose,proto3" json:"verbose,omitempty"` } func (m *ContainerStatusRequest) Reset() { *m = ContainerStatusRequest{} } @@ -2100,6 +2124,13 @@ func (m *ContainerStatusRequest) GetContainerId() string { return "" } +func (m *ContainerStatusRequest) GetVerbose() bool { + if m != nil { + return m.Verbose + } + return false +} + // ContainerStatus represents the status of a container. type ContainerStatus struct { // ID of the container. @@ -2251,6 +2282,11 @@ func (m *ContainerStatus) GetLogPath() string { type ContainerStatusResponse struct { // Status of the container. Status *ContainerStatus `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` + // Info is extra information of the Container. The key could be abitrary string, and + // value should be in json format. The information could include anything useful for + // debug, e.g. pid for linux container based container runtime. + // It should only be returned non-empty when Verbose is true. + Info map[string]string `protobuf:"bytes,2,rep,name=info" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (m *ContainerStatusResponse) Reset() { *m = ContainerStatusResponse{} } @@ -2264,6 +2300,13 @@ func (m *ContainerStatusResponse) GetStatus() *ContainerStatus { return nil } +func (m *ContainerStatusResponse) GetInfo() map[string]string { + if m != nil { + return m.Info + } + return nil +} + type UpdateContainerResourcesRequest struct { // ID of the container to update. ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` @@ -2376,7 +2419,17 @@ type ExecRequest struct { // Whether to exec the command in a TTY. Tty bool `protobuf:"varint,3,opt,name=tty,proto3" json:"tty,omitempty"` // Whether to stream stdin. + // One of `stdin`, `stdout`, and `stderr` MUST be true. Stdin bool `protobuf:"varint,4,opt,name=stdin,proto3" json:"stdin,omitempty"` + // Whether to stream stdout. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + Stdout bool `protobuf:"varint,5,opt,name=stdout,proto3" json:"stdout,omitempty"` + // Whether to stream stderr. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + // If `tty` is true, `stderr` MUST be false. Multiplexing is not supported + // in this case. The output of stdout and stderr will be combined to a + // single stream. + Stderr bool `protobuf:"varint,6,opt,name=stderr,proto3" json:"stderr,omitempty"` } func (m *ExecRequest) Reset() { *m = ExecRequest{} } @@ -2411,6 +2464,20 @@ func (m *ExecRequest) GetStdin() bool { return false } +func (m *ExecRequest) GetStdout() bool { + if m != nil { + return m.Stdout + } + return false +} + +func (m *ExecRequest) GetStderr() bool { + if m != nil { + return m.Stderr + } + return false +} + type ExecResponse struct { // Fully qualified URL of the exec streaming server. Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` @@ -2431,10 +2498,20 @@ type AttachRequest struct { // ID of the container to which to attach. ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` // Whether to stream stdin. + // One of `stdin`, `stdout`, and `stderr` MUST be true. Stdin bool `protobuf:"varint,2,opt,name=stdin,proto3" json:"stdin,omitempty"` // Whether the process being attached is running in a TTY. // This must match the TTY setting in the ContainerConfig. Tty bool `protobuf:"varint,3,opt,name=tty,proto3" json:"tty,omitempty"` + // Whether to stream stdout. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + Stdout bool `protobuf:"varint,4,opt,name=stdout,proto3" json:"stdout,omitempty"` + // Whether to stream stderr. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + // If `tty` is true, `stderr` MUST be false. Multiplexing is not supported + // in this case. The output of stdout and stderr will be combined to a + // single stream. + Stderr bool `protobuf:"varint,5,opt,name=stderr,proto3" json:"stderr,omitempty"` } func (m *AttachRequest) Reset() { *m = AttachRequest{} } @@ -2462,6 +2539,20 @@ func (m *AttachRequest) GetTty() bool { return false } +func (m *AttachRequest) GetStdout() bool { + if m != nil { + return m.Stdout + } + return false +} + +func (m *AttachRequest) GetStderr() bool { + if m != nil { + return m.Stderr + } + return false +} + type AttachResponse struct { // Fully qualified URL of the attach streaming server. Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` @@ -2635,6 +2726,8 @@ func (m *ListImagesResponse) GetImages() []*Image { type ImageStatusRequest struct { // Spec of the image. Image *ImageSpec `protobuf:"bytes,1,opt,name=image" json:"image,omitempty"` + // Verbose indicates whether to return extra information about the image. + Verbose bool `protobuf:"varint,2,opt,name=verbose,proto3" json:"verbose,omitempty"` } func (m *ImageStatusRequest) Reset() { *m = ImageStatusRequest{} } @@ -2648,9 +2741,21 @@ func (m *ImageStatusRequest) GetImage() *ImageSpec { return nil } +func (m *ImageStatusRequest) GetVerbose() bool { + if m != nil { + return m.Verbose + } + return false +} + type ImageStatusResponse struct { // Status of the image. Image *Image `protobuf:"bytes,1,opt,name=image" json:"image,omitempty"` + // Info is extra information of the Image. The key could be abitrary string, and + // value should be in json format. The information could include anything useful + // for debug, e.g. image config for oci image based container runtime. + // It should only be returned non-empty when Verbose is true. + Info map[string]string `protobuf:"bytes,2,rep,name=info" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (m *ImageStatusResponse) Reset() { *m = ImageStatusResponse{} } @@ -2664,6 +2769,13 @@ func (m *ImageStatusResponse) GetImage() *Image { return nil } +func (m *ImageStatusResponse) GetInfo() map[string]string { + if m != nil { + return m.Info + } + return nil +} + // AuthConfig contains authorization information for connecting to a registry. type AuthConfig struct { Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` @@ -2924,15 +3036,29 @@ func (m *RuntimeStatus) GetConditions() []*RuntimeCondition { } type StatusRequest struct { + // Verbose indicates whether to return extra information about the runtime. + Verbose bool `protobuf:"varint,1,opt,name=verbose,proto3" json:"verbose,omitempty"` } func (m *StatusRequest) Reset() { *m = StatusRequest{} } func (*StatusRequest) ProtoMessage() {} func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{81} } +func (m *StatusRequest) GetVerbose() bool { + if m != nil { + return m.Verbose + } + return false +} + type StatusResponse struct { // Status of the Runtime. Status *RuntimeStatus `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` + // Info is extra information of the Runtime. The key could be abitrary string, and + // value should be in json format. The information could include anything useful for + // debug, e.g. plugins used by the container runtime. + // It should only be returned non-empty when Verbose is true. + Info map[string]string `protobuf:"bytes,2,rep,name=info" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (m *StatusResponse) Reset() { *m = StatusResponse{} } @@ -2946,6 +3072,13 @@ func (m *StatusResponse) GetStatus() *RuntimeStatus { return nil } +func (m *StatusResponse) GetInfo() map[string]string { + if m != nil { + return m.Info + } + return nil +} + type ImageFsInfoRequest struct { } @@ -5171,6 +5304,16 @@ func (m *PodSandboxStatusRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintApi(dAtA, i, uint64(len(m.PodSandboxId))) i += copy(dAtA[i:], m.PodSandboxId) } + if m.Verbose { + dAtA[i] = 0x10 + i++ + if m.Verbose { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } return i, nil } @@ -5377,6 +5520,23 @@ func (m *PodSandboxStatusResponse) MarshalTo(dAtA []byte) (int, error) { } i += n16 } + if len(m.Info) > 0 { + for k := range m.Info { + dAtA[i] = 0x12 + i++ + v := m.Info[k] + mapSize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + i = encodeVarintApi(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } return i, nil } @@ -6664,6 +6824,16 @@ func (m *ContainerStatusRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerId))) i += copy(dAtA[i:], m.ContainerId) } + if m.Verbose { + dAtA[i] = 0x10 + i++ + if m.Verbose { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } return i, nil } @@ -6831,6 +7001,23 @@ func (m *ContainerStatusResponse) MarshalTo(dAtA []byte) (int, error) { } i += n39 } + if len(m.Info) > 0 { + for k := range m.Info { + dAtA[i] = 0x12 + i++ + v := m.Info[k] + mapSize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + i = encodeVarintApi(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } return i, nil } @@ -7021,6 +7208,26 @@ func (m *ExecRequest) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.Stdout { + dAtA[i] = 0x28 + i++ + if m.Stdout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Stderr { + dAtA[i] = 0x30 + i++ + if m.Stderr { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } return i, nil } @@ -7089,6 +7296,26 @@ func (m *AttachRequest) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.Stdout { + dAtA[i] = 0x20 + i++ + if m.Stdout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Stderr { + dAtA[i] = 0x28 + i++ + if m.Stderr { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } return i, nil } @@ -7368,6 +7595,16 @@ func (m *ImageStatusRequest) MarshalTo(dAtA []byte) (int, error) { } i += n46 } + if m.Verbose { + dAtA[i] = 0x10 + i++ + if m.Verbose { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } return i, nil } @@ -7396,6 +7633,23 @@ func (m *ImageStatusResponse) MarshalTo(dAtA []byte) (int, error) { } i += n47 } + if len(m.Info) > 0 { + for k := range m.Info { + dAtA[i] = 0x12 + i++ + v := m.Info[k] + mapSize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + i = encodeVarintApi(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } return i, nil } @@ -7760,6 +8014,16 @@ func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Verbose { + dAtA[i] = 0x8 + i++ + if m.Verbose { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } return i, nil } @@ -7788,6 +8052,23 @@ func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { } i += n54 } + if len(m.Info) > 0 { + for k := range m.Info { + dAtA[i] = 0x12 + i++ + v := m.Info[k] + mapSize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + i = encodeVarintApi(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } return i, nil } @@ -8622,6 +8903,9 @@ func (m *PodSandboxStatusRequest) Size() (n int) { if l > 0 { n += 1 + l + sovApi(uint64(l)) } + if m.Verbose { + n += 2 + } return n } @@ -8706,6 +8990,14 @@ func (m *PodSandboxStatusResponse) Size() (n int) { l = m.Status.Size() n += 1 + l + sovApi(uint64(l)) } + if len(m.Info) > 0 { + for k, v := range m.Info { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } return n } @@ -9258,6 +9550,9 @@ func (m *ContainerStatusRequest) Size() (n int) { if l > 0 { n += 1 + l + sovApi(uint64(l)) } + if m.Verbose { + n += 2 + } return n } @@ -9339,6 +9634,14 @@ func (m *ContainerStatusResponse) Size() (n int) { l = m.Status.Size() n += 1 + l + sovApi(uint64(l)) } + if len(m.Info) > 0 { + for k, v := range m.Info { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } return n } @@ -9417,6 +9720,12 @@ func (m *ExecRequest) Size() (n int) { if m.Stdin { n += 2 } + if m.Stdout { + n += 2 + } + if m.Stderr { + n += 2 + } return n } @@ -9443,6 +9752,12 @@ func (m *AttachRequest) Size() (n int) { if m.Tty { n += 2 } + if m.Stdout { + n += 2 + } + if m.Stderr { + n += 2 + } return n } @@ -9555,6 +9870,9 @@ func (m *ImageStatusRequest) Size() (n int) { l = m.Image.Size() n += 1 + l + sovApi(uint64(l)) } + if m.Verbose { + n += 2 + } return n } @@ -9565,6 +9883,14 @@ func (m *ImageStatusResponse) Size() (n int) { l = m.Image.Size() n += 1 + l + sovApi(uint64(l)) } + if len(m.Info) > 0 { + for k, v := range m.Info { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } return n } @@ -9714,6 +10040,9 @@ func (m *RuntimeStatus) Size() (n int) { func (m *StatusRequest) Size() (n int) { var l int _ = l + if m.Verbose { + n += 2 + } return n } @@ -9724,6 +10053,14 @@ func (m *StatusResponse) Size() (n int) { l = m.Status.Size() n += 1 + l + sovApi(uint64(l)) } + if len(m.Info) > 0 { + for k, v := range m.Info { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } return n } @@ -10176,6 +10513,7 @@ func (this *PodSandboxStatusRequest) String() string { } s := strings.Join([]string{`&PodSandboxStatusRequest{`, `PodSandboxId:` + fmt.Sprintf("%v", this.PodSandboxId) + `,`, + `Verbose:` + fmt.Sprintf("%v", this.Verbose) + `,`, `}`, }, "") return s @@ -10251,8 +10589,19 @@ func (this *PodSandboxStatusResponse) String() string { if this == nil { return "nil" } + keysForInfo := make([]string, 0, len(this.Info)) + for k := range this.Info { + keysForInfo = append(keysForInfo, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForInfo) + mapStringForInfo := "map[string]string{" + for _, k := range keysForInfo { + mapStringForInfo += fmt.Sprintf("%v: %v,", k, this.Info[k]) + } + mapStringForInfo += "}" s := strings.Join([]string{`&PodSandboxStatusResponse{`, `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "PodSandboxStatus", "PodSandboxStatus", 1) + `,`, + `Info:` + mapStringForInfo + `,`, `}`, }, "") return s @@ -10680,6 +11029,7 @@ func (this *ContainerStatusRequest) String() string { } s := strings.Join([]string{`&ContainerStatusRequest{`, `ContainerId:` + fmt.Sprintf("%v", this.ContainerId) + `,`, + `Verbose:` + fmt.Sprintf("%v", this.Verbose) + `,`, `}`, }, "") return s @@ -10732,8 +11082,19 @@ func (this *ContainerStatusResponse) String() string { if this == nil { return "nil" } + keysForInfo := make([]string, 0, len(this.Info)) + for k := range this.Info { + keysForInfo = append(keysForInfo, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForInfo) + mapStringForInfo := "map[string]string{" + for _, k := range keysForInfo { + mapStringForInfo += fmt.Sprintf("%v: %v,", k, this.Info[k]) + } + mapStringForInfo += "}" s := strings.Join([]string{`&ContainerStatusResponse{`, `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "ContainerStatus", "ContainerStatus", 1) + `,`, + `Info:` + mapStringForInfo + `,`, `}`, }, "") return s @@ -10791,6 +11152,8 @@ func (this *ExecRequest) String() string { `Cmd:` + fmt.Sprintf("%v", this.Cmd) + `,`, `Tty:` + fmt.Sprintf("%v", this.Tty) + `,`, `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, `}`, }, "") return s @@ -10813,6 +11176,8 @@ func (this *AttachRequest) String() string { `ContainerId:` + fmt.Sprintf("%v", this.ContainerId) + `,`, `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, `Tty:` + fmt.Sprintf("%v", this.Tty) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, `}`, }, "") return s @@ -10899,6 +11264,7 @@ func (this *ImageStatusRequest) String() string { } s := strings.Join([]string{`&ImageStatusRequest{`, `Image:` + strings.Replace(fmt.Sprintf("%v", this.Image), "ImageSpec", "ImageSpec", 1) + `,`, + `Verbose:` + fmt.Sprintf("%v", this.Verbose) + `,`, `}`, }, "") return s @@ -10907,8 +11273,19 @@ func (this *ImageStatusResponse) String() string { if this == nil { return "nil" } + keysForInfo := make([]string, 0, len(this.Info)) + for k := range this.Info { + keysForInfo = append(keysForInfo, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForInfo) + mapStringForInfo := "map[string]string{" + for _, k := range keysForInfo { + mapStringForInfo += fmt.Sprintf("%v: %v,", k, this.Info[k]) + } + mapStringForInfo += "}" s := strings.Join([]string{`&ImageStatusResponse{`, `Image:` + strings.Replace(fmt.Sprintf("%v", this.Image), "Image", "Image", 1) + `,`, + `Info:` + mapStringForInfo + `,`, `}`, }, "") return s @@ -11036,6 +11413,7 @@ func (this *StatusRequest) String() string { return "nil" } s := strings.Join([]string{`&StatusRequest{`, + `Verbose:` + fmt.Sprintf("%v", this.Verbose) + `,`, `}`, }, "") return s @@ -11044,8 +11422,19 @@ func (this *StatusResponse) String() string { if this == nil { return "nil" } + keysForInfo := make([]string, 0, len(this.Info)) + for k := range this.Info { + keysForInfo = append(keysForInfo, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForInfo) + mapStringForInfo := "map[string]string{" + for _, k := range keysForInfo { + mapStringForInfo += fmt.Sprintf("%v: %v,", k, this.Info[k]) + } + mapStringForInfo += "}" s := strings.Join([]string{`&StatusResponse{`, `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "RuntimeStatus", "RuntimeStatus", 1) + `,`, + `Info:` + mapStringForInfo + `,`, `}`, }, "") return s @@ -13716,6 +14105,26 @@ func (m *PodSandboxStatusRequest) Unmarshal(dAtA []byte) error { } m.PodSandboxId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbose", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Verbose = bool(v != 0) default: iNdEx = preIndex skippy, err := skipApi(dAtA[iNdEx:]) @@ -14492,8 +14901,124 @@ func (m *PodSandboxStatusResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Info == nil { + m.Info = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Info[mapkey] = mapvalue + } else { + var mapvalue string + m.Info[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex skippy, err := skipApi(dAtA[iNdEx:]) if err != nil { return err @@ -19063,6 +19588,26 @@ func (m *ContainerStatusRequest) Unmarshal(dAtA []byte) error { } m.ContainerId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbose", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Verbose = bool(v != 0) default: iNdEx = preIndex skippy, err := skipApi(dAtA[iNdEx:]) @@ -19765,6 +20310,122 @@ func (m *ContainerStatusResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Info == nil { + m.Info = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Info[mapkey] = mapvalue + } else { + var mapvalue string + m.Info[mapkey] = mapvalue + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(dAtA[iNdEx:]) @@ -20333,17 +20994,57 @@ func (m *ExecRequest) Unmarshal(dAtA []byte) error { } } m.Stdin = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdout = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stderr = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } iNdEx += skippy } @@ -20531,6 +21232,46 @@ func (m *AttachRequest) Unmarshal(dAtA []byte) error { } } m.Tty = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdout = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stderr = bool(v != 0) default: iNdEx = preIndex skippy, err := skipApi(dAtA[iNdEx:]) @@ -21378,6 +22119,26 @@ func (m *ImageStatusRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbose", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Verbose = bool(v != 0) default: iNdEx = preIndex skippy, err := skipApi(dAtA[iNdEx:]) @@ -21461,6 +22222,122 @@ func (m *ImageStatusResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Info == nil { + m.Info = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Info[mapkey] = mapvalue + } else { + var mapvalue string + m.Info[mapkey] = mapvalue + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(dAtA[iNdEx:]) @@ -22629,6 +23506,26 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbose", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Verbose = bool(v != 0) default: iNdEx = preIndex skippy, err := skipApi(dAtA[iNdEx:]) @@ -22712,6 +23609,122 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Info == nil { + m.Info = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Info[mapkey] = mapvalue + } else { + var mapvalue string + m.Info[mapkey] = mapvalue + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipApi(dAtA[iNdEx:]) @@ -24568,273 +25581,280 @@ var ( func init() { proto.RegisterFile("api.proto", fileDescriptorApi) } var fileDescriptorApi = []byte{ - // 4276 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x3b, 0x4d, 0x6f, 0x1b, 0x49, - 0x76, 0x22, 0xa9, 0x0f, 0xf2, 0x51, 0xa4, 0xa8, 0x92, 0x2c, 0x51, 0xb4, 0x2d, 0xcb, 0xed, 0xf1, - 0x8c, 0xed, 0x5d, 0x7b, 0x3c, 0x9a, 0x1d, 0x4f, 0xc6, 0xf3, 0x65, 0x8e, 0x24, 0x3b, 0x1a, 0xdb, - 0x94, 0xb6, 0x29, 0xcd, 0xee, 0x66, 0x03, 0x74, 0x5a, 0xec, 0x12, 0xd5, 0x33, 0x64, 0x77, 0x4f, - 0x77, 0xb5, 0x6d, 0x05, 0x39, 0x24, 0x97, 0x20, 0x97, 0x00, 0x9b, 0x63, 0x6e, 0x39, 0x04, 0x58, - 0xe4, 0x92, 0x43, 0x0e, 0x41, 0x7e, 0x41, 0xb0, 0x97, 0x05, 0x02, 0x04, 0x08, 0x92, 0x5b, 0xd6, - 0x39, 0xe4, 0x10, 0x20, 0xbf, 0x61, 0x51, 0x5f, 0xdd, 0xd5, 0x5f, 0xb4, 0xe4, 0x19, 0xec, 0xfa, - 0xc4, 0xae, 0x57, 0xaf, 0x5e, 0xbd, 0x7a, 0xef, 0xd5, 0xab, 0xf7, 0x5e, 0x15, 0xa1, 0x66, 0x7a, - 0xf6, 0x1d, 0xcf, 0x77, 0x89, 0x8b, 0xe6, 0xfc, 0xd0, 0x21, 0xf6, 0x18, 0x77, 0x6e, 0x0f, 0x6d, - 0x72, 0x12, 0x1e, 0xdd, 0x19, 0xb8, 0xe3, 0x77, 0x87, 0xee, 0xd0, 0x7d, 0x97, 0xf5, 0x1f, 0x85, - 0xc7, 0xac, 0xc5, 0x1a, 0xec, 0x8b, 0x8f, 0xd3, 0x6e, 0x41, 0xf3, 0x2b, 0xec, 0x07, 0xb6, 0xeb, - 0xe8, 0xf8, 0xdb, 0x10, 0x07, 0x04, 0xb5, 0x61, 0xee, 0x19, 0x87, 0xb4, 0x4b, 0x1b, 0xa5, 0x1b, - 0x35, 0x5d, 0x36, 0xb5, 0x5f, 0x96, 0x60, 0x21, 0x42, 0x0e, 0x3c, 0xd7, 0x09, 0x70, 0x31, 0x36, - 0xba, 0x0a, 0xf3, 0x82, 0x27, 0xc3, 0x31, 0xc7, 0xb8, 0x5d, 0x66, 0xdd, 0x75, 0x01, 0xeb, 0x99, - 0x63, 0x8c, 0xde, 0x81, 0x05, 0x89, 0x22, 0x89, 0x54, 0x18, 0x56, 0x53, 0x80, 0xc5, 0x6c, 0xe8, - 0x0e, 0x2c, 0x49, 0x44, 0xd3, 0xb3, 0x23, 0xe4, 0x69, 0x86, 0xbc, 0x28, 0xba, 0xba, 0x9e, 0x2d, - 0xf0, 0xb5, 0x9f, 0x43, 0x6d, 0xbb, 0xd7, 0xdf, 0x72, 0x9d, 0x63, 0x7b, 0x48, 0x59, 0x0c, 0xb0, - 0x4f, 0xc7, 0xb4, 0x4b, 0x1b, 0x15, 0xca, 0xa2, 0x68, 0xa2, 0x0e, 0x54, 0x03, 0x6c, 0xfa, 0x83, - 0x13, 0x1c, 0xb4, 0xcb, 0xac, 0x2b, 0x6a, 0xd3, 0x51, 0xae, 0x47, 0x6c, 0xd7, 0x09, 0xda, 0x15, - 0x3e, 0x4a, 0x34, 0xb5, 0xbf, 0x2d, 0x41, 0x7d, 0xdf, 0xf5, 0xc9, 0x53, 0xd3, 0xf3, 0x6c, 0x67, - 0x88, 0x6e, 0x43, 0x95, 0xc9, 0x72, 0xe0, 0x8e, 0x98, 0x0c, 0x9a, 0x9b, 0x8b, 0x77, 0x04, 0x4b, - 0x77, 0xf6, 0x45, 0x87, 0x1e, 0xa1, 0xa0, 0xeb, 0xd0, 0x1c, 0xb8, 0x0e, 0x31, 0x6d, 0x07, 0xfb, - 0x86, 0xe7, 0xfa, 0x84, 0x49, 0x66, 0x46, 0x6f, 0x44, 0x50, 0x4a, 0x1c, 0x5d, 0x84, 0xda, 0x89, - 0x1b, 0x10, 0x8e, 0x51, 0x61, 0x18, 0x55, 0x0a, 0x60, 0x9d, 0xab, 0x30, 0xc7, 0x3a, 0x6d, 0x4f, - 0xc8, 0x60, 0x96, 0x36, 0x77, 0x3d, 0xed, 0xd7, 0x25, 0x98, 0x79, 0xea, 0x86, 0x0e, 0x49, 0x4d, - 0x63, 0x92, 0x13, 0xa1, 0x1f, 0x65, 0x1a, 0x93, 0x9c, 0xc4, 0xd3, 0x50, 0x0c, 0xae, 0x22, 0x3e, - 0x0d, 0xed, 0xec, 0x40, 0xd5, 0xc7, 0xa6, 0xe5, 0x3a, 0xa3, 0x53, 0xc6, 0x42, 0x55, 0x8f, 0xda, - 0x54, 0x77, 0x01, 0x1e, 0xd9, 0x4e, 0xf8, 0xc2, 0xf0, 0xf1, 0xc8, 0x3c, 0xc2, 0x23, 0xc6, 0x4a, - 0x55, 0x6f, 0x0a, 0xb0, 0xce, 0xa1, 0xe8, 0x63, 0xa8, 0x7b, 0xbe, 0xeb, 0x99, 0x43, 0x93, 0x8a, - 0xaf, 0x3d, 0xc3, 0x24, 0xb4, 0x16, 0x49, 0x88, 0x71, 0xbb, 0x1f, 0x23, 0xe8, 0x2a, 0xb6, 0xf6, - 0x35, 0x2c, 0x50, 0x4b, 0x09, 0x3c, 0x73, 0x80, 0xf7, 0x98, 0xfc, 0xa9, 0x5d, 0x31, 0x8e, 0x1d, - 0x4c, 0x9e, 0xbb, 0xfe, 0x37, 0x6c, 0x59, 0x55, 0xbd, 0x4e, 0x61, 0x3d, 0x0e, 0x42, 0x6b, 0x50, - 0xe5, 0x8b, 0xb2, 0x2d, 0xb6, 0xa6, 0xaa, 0xce, 0xc4, 0xb5, 0x6f, 0x5b, 0x51, 0x97, 0xed, 0x0d, - 0xc4, 0x92, 0xe6, 0xb8, 0xe8, 0x06, 0x9a, 0x06, 0xb0, 0xeb, 0x90, 0x7b, 0x3f, 0xfa, 0xca, 0x1c, - 0x85, 0x18, 0x2d, 0xc3, 0xcc, 0x33, 0xfa, 0xc1, 0xe8, 0x57, 0x74, 0xde, 0xd0, 0xfe, 0xb2, 0x02, - 0x17, 0x9f, 0xd0, 0xd5, 0xf5, 0x4d, 0xc7, 0x3a, 0x72, 0x5f, 0xf4, 0xf1, 0x20, 0xf4, 0x6d, 0x72, - 0xba, 0xe5, 0x3a, 0x04, 0xbf, 0x20, 0x68, 0x07, 0x16, 0x1d, 0xc9, 0xaf, 0x21, 0xed, 0x87, 0x52, - 0xa8, 0x6f, 0xb6, 0xa3, 0x25, 0xa7, 0x56, 0xa4, 0xb7, 0x9c, 0x24, 0x20, 0x40, 0x9f, 0xc7, 0xc2, - 0x95, 0x44, 0xca, 0x8c, 0xc8, 0x4a, 0x44, 0xa4, 0xbf, 0xc3, 0xf8, 0x10, 0x24, 0xa4, 0xd0, 0x25, - 0x81, 0xf7, 0x81, 0x6e, 0x34, 0xc3, 0x0c, 0x8c, 0x30, 0xc0, 0x3e, 0x5b, 0x69, 0x7d, 0x73, 0x29, - 0x1a, 0x1c, 0xaf, 0x53, 0xaf, 0xf9, 0xa1, 0xd3, 0x0d, 0x0e, 0x03, 0xec, 0xb3, 0xed, 0x28, 0xd4, - 0x6b, 0xf8, 0xae, 0x4b, 0x8e, 0x03, 0xa9, 0x52, 0x09, 0xd6, 0x19, 0x14, 0xbd, 0x0b, 0x4b, 0x41, - 0xe8, 0x79, 0x23, 0x3c, 0xc6, 0x0e, 0x31, 0x47, 0xc6, 0xd0, 0x77, 0x43, 0x2f, 0x68, 0xcf, 0x6c, - 0x54, 0x6e, 0x54, 0x74, 0xa4, 0x76, 0x3d, 0x62, 0x3d, 0x68, 0x1d, 0xc0, 0xf3, 0xed, 0x67, 0xf6, - 0x08, 0x0f, 0xb1, 0xd5, 0x9e, 0x65, 0x44, 0x15, 0x08, 0xba, 0x0b, 0xcb, 0x01, 0x1e, 0x0c, 0xdc, - 0xb1, 0x67, 0x78, 0xbe, 0x7b, 0x6c, 0x8f, 0x30, 0x37, 0xc8, 0x39, 0x66, 0x90, 0x48, 0xf4, 0xed, - 0xf3, 0x2e, 0x6a, 0x9a, 0xda, 0x2f, 0xca, 0x70, 0x81, 0x09, 0x60, 0xdf, 0xb5, 0x84, 0x2e, 0xc4, - 0x76, 0xbf, 0x06, 0x8d, 0x01, 0x63, 0xc8, 0xf0, 0x4c, 0x1f, 0x3b, 0x44, 0xd8, 0xfd, 0x3c, 0x07, - 0xee, 0x33, 0x18, 0xda, 0x83, 0x56, 0x20, 0x54, 0x67, 0x0c, 0xb8, 0xee, 0x84, 0x84, 0xdf, 0x8a, - 0x84, 0x34, 0x41, 0xcf, 0xfa, 0x42, 0x90, 0x51, 0xfc, 0x5c, 0x70, 0x1a, 0x0c, 0xc8, 0x88, 0xbb, - 0x8b, 0xfa, 0xe6, 0x0f, 0x92, 0x74, 0xd2, 0x6c, 0xde, 0xe9, 0x73, 0xec, 0x1d, 0x87, 0xf8, 0xa7, - 0xba, 0x1c, 0xdb, 0xb9, 0x0f, 0xf3, 0x6a, 0x07, 0x6a, 0x41, 0xe5, 0x1b, 0x7c, 0x2a, 0x96, 0x40, - 0x3f, 0x63, 0xbb, 0xe4, 0x9b, 0x95, 0x37, 0xee, 0x97, 0xff, 0xa0, 0xa4, 0xf9, 0x80, 0xe2, 0x59, - 0x9e, 0x62, 0x62, 0x5a, 0x26, 0x31, 0x11, 0x82, 0x69, 0xe6, 0x7e, 0x39, 0x09, 0xf6, 0x4d, 0xa9, - 0x86, 0x62, 0x6b, 0xd4, 0x74, 0xfa, 0x89, 0x2e, 0x41, 0x2d, 0x32, 0x42, 0xe1, 0x83, 0x63, 0x00, - 0xf5, 0x85, 0x26, 0x21, 0x78, 0xec, 0x11, 0x66, 0x10, 0x0d, 0x5d, 0x36, 0xb5, 0x7f, 0x99, 0x86, - 0x56, 0x46, 0x03, 0x1f, 0x42, 0x75, 0x2c, 0xa6, 0x17, 0xb6, 0x7f, 0x31, 0x76, 0x88, 0x19, 0x0e, - 0xf5, 0x08, 0x99, 0xfa, 0x1b, 0xba, 0x19, 0x95, 0xe3, 0x22, 0x6a, 0x53, 0xb5, 0x8e, 0xdc, 0xa1, - 0x61, 0xd9, 0x3e, 0x1e, 0x10, 0xd7, 0x3f, 0x15, 0x5c, 0xce, 0x8f, 0xdc, 0xe1, 0xb6, 0x84, 0xa1, - 0xf7, 0x00, 0x2c, 0x27, 0xa0, 0x1a, 0x3d, 0xb6, 0x87, 0x8c, 0xd7, 0xfa, 0x26, 0x8a, 0xe6, 0x8e, - 0x8e, 0x04, 0xbd, 0x66, 0x39, 0x81, 0x60, 0xf6, 0x23, 0x68, 0x50, 0x17, 0x6b, 0x8c, 0xb9, 0x37, - 0xe7, 0x56, 0x5c, 0xdf, 0x5c, 0x56, 0x38, 0x8e, 0x5c, 0xbd, 0x3e, 0xef, 0xc5, 0x8d, 0x00, 0x7d, - 0x0a, 0xb3, 0xcc, 0xc5, 0x05, 0xed, 0x59, 0x36, 0xe6, 0x7a, 0xce, 0x2a, 0x85, 0xb6, 0x9f, 0x30, - 0x3c, 0xae, 0x6c, 0x31, 0x08, 0x3d, 0x81, 0xba, 0xe9, 0x38, 0x2e, 0x31, 0xf9, 0x06, 0x9f, 0x63, - 0x34, 0x6e, 0x15, 0xd3, 0xe8, 0xc6, 0xc8, 0x9c, 0x90, 0x3a, 0x1c, 0xfd, 0x08, 0x66, 0x98, 0x07, - 0x68, 0x57, 0xd9, 0xaa, 0xd7, 0x27, 0x9b, 0x9f, 0xce, 0x91, 0x3b, 0x1f, 0x41, 0x5d, 0x61, 0xed, - 0x3c, 0xe6, 0xd6, 0xf9, 0x0c, 0x5a, 0x69, 0x8e, 0xce, 0x65, 0xae, 0xbb, 0xb0, 0xac, 0x87, 0x4e, - 0xcc, 0x98, 0x8c, 0x3f, 0xde, 0x83, 0x59, 0xa1, 0x3f, 0x6e, 0x3b, 0x6b, 0x85, 0x12, 0xd1, 0x05, - 0xa2, 0xf6, 0x29, 0x5c, 0x48, 0x91, 0x12, 0xd1, 0xc9, 0x5b, 0xd0, 0xf4, 0x5c, 0xcb, 0x08, 0x38, - 0xd8, 0xb0, 0x2d, 0xe9, 0x0c, 0xbc, 0x08, 0x77, 0xd7, 0xa2, 0xc3, 0xfb, 0xc4, 0xf5, 0xb2, 0xac, - 0x9c, 0x6d, 0x78, 0x1b, 0x56, 0xd2, 0xc3, 0xf9, 0xf4, 0xda, 0xe7, 0xb0, 0xaa, 0xe3, 0xb1, 0xfb, - 0x0c, 0xbf, 0x2e, 0xe9, 0x0e, 0xb4, 0xb3, 0x04, 0x62, 0xe2, 0x31, 0xb4, 0x4f, 0x4c, 0x12, 0x06, - 0xe7, 0x23, 0x7e, 0x53, 0x25, 0x20, 0x8e, 0x4e, 0x4e, 0x07, 0x35, 0xa1, 0x6c, 0x7b, 0x62, 0x50, - 0xd9, 0xf6, 0xb4, 0xcf, 0xa1, 0x16, 0x1d, 0x5a, 0x68, 0x33, 0x8e, 0x8c, 0xca, 0xaf, 0x38, 0xd9, - 0xa2, 0x98, 0xe9, 0x71, 0xc6, 0x5b, 0x8b, 0x99, 0x36, 0x01, 0x22, 0x3f, 0x23, 0x4f, 0x4a, 0x94, - 0xa5, 0xa7, 0x2b, 0x58, 0xda, 0xdf, 0x27, 0x9c, 0x8e, 0xc2, 0xb2, 0x15, 0xb1, 0x6c, 0x25, 0x9c, - 0x50, 0xf9, 0x3c, 0x4e, 0xe8, 0x0e, 0xcc, 0x04, 0xc4, 0x24, 0xdc, 0x0d, 0x36, 0x95, 0xc5, 0x25, - 0xa7, 0xc4, 0x3a, 0x47, 0x43, 0x97, 0x01, 0x06, 0x3e, 0x36, 0x09, 0xb6, 0x0c, 0x93, 0xfb, 0xc7, - 0x8a, 0x5e, 0x13, 0x90, 0x2e, 0x41, 0xf7, 0x61, 0x4e, 0x46, 0x2a, 0x33, 0x8c, 0x8d, 0x8d, 0x1c, - 0x82, 0x09, 0xe9, 0xeb, 0x72, 0x40, 0xbc, 0xa7, 0x67, 0x27, 0xef, 0x69, 0x31, 0x8e, 0x23, 0x2b, - 0x6e, 0x69, 0xae, 0xd0, 0x2d, 0xf1, 0x11, 0x67, 0x71, 0x4b, 0xd5, 0x42, 0xb7, 0x24, 0x68, 0x4c, - 0x74, 0x4b, 0xbf, 0x4f, 0x07, 0xf3, 0x14, 0xda, 0xd9, 0x0d, 0x22, 0x1c, 0xc3, 0x7b, 0x30, 0x1b, - 0x30, 0xc8, 0x04, 0x27, 0x23, 0x86, 0x08, 0x44, 0xed, 0x21, 0x2c, 0xa7, 0x2c, 0x80, 0x07, 0x8a, - 0x91, 0xbd, 0x94, 0xce, 0x64, 0x2f, 0xda, 0xff, 0x97, 0x54, 0xeb, 0x7d, 0x68, 0x8f, 0x08, 0xf6, - 0x33, 0xd6, 0xfb, 0xbe, 0x24, 0xca, 0x4d, 0xf7, 0x72, 0x11, 0x51, 0x1e, 0xc3, 0x09, 0x4b, 0xec, - 0x43, 0x93, 0xe9, 0xd0, 0x08, 0xf0, 0x88, 0x1d, 0x88, 0x22, 0x14, 0xf9, 0x61, 0xce, 0x68, 0x3e, - 0x2f, 0x37, 0x80, 0xbe, 0x40, 0xe7, 0xea, 0x6b, 0x8c, 0x54, 0x58, 0xe7, 0x01, 0xa0, 0x2c, 0xd2, - 0xb9, 0xf4, 0xf0, 0x25, 0xdd, 0xfb, 0x34, 0x71, 0xc9, 0xf1, 0xf4, 0xc7, 0x8c, 0x8d, 0x09, 0x4a, - 0xe0, 0x7c, 0xea, 0x02, 0x51, 0xfb, 0xbb, 0x0a, 0x40, 0xdc, 0xf9, 0xc6, 0x6e, 0xfa, 0x0f, 0xa3, - 0x2d, 0xc8, 0xa3, 0x89, 0x2b, 0x39, 0xf4, 0x72, 0x37, 0xdf, 0xc3, 0xe4, 0xe6, 0xe3, 0x71, 0xc5, - 0x5b, 0x79, 0xa3, 0xdf, 0xd8, 0x6d, 0xb7, 0x05, 0x2b, 0x69, 0x75, 0x8b, 0x4d, 0x77, 0x13, 0x66, - 0x6c, 0x82, 0xc7, 0x3c, 0x0d, 0x57, 0xd3, 0x11, 0x05, 0x97, 0x63, 0x68, 0x57, 0xa1, 0xb6, 0x3b, - 0x36, 0x87, 0xb8, 0xef, 0xe1, 0x01, 0x9d, 0xcb, 0xa6, 0x0d, 0x31, 0x3f, 0x6f, 0x68, 0x9b, 0x50, - 0x7d, 0x8c, 0x4f, 0xf9, 0x1e, 0x3c, 0x23, 0x7f, 0xda, 0x5f, 0x97, 0x61, 0x95, 0xf9, 0xce, 0x2d, - 0x99, 0x04, 0xeb, 0x38, 0x70, 0x43, 0x7f, 0x80, 0x03, 0xa6, 0x52, 0x2f, 0x34, 0x3c, 0xec, 0xdb, - 0xae, 0x25, 0xb2, 0xbe, 0xda, 0xc0, 0x0b, 0xf7, 0x19, 0x80, 0x26, 0xca, 0xb4, 0xfb, 0xdb, 0xd0, - 0x15, 0xb6, 0x55, 0xd1, 0xab, 0x03, 0x2f, 0xfc, 0x31, 0x6d, 0xcb, 0xb1, 0xc1, 0x89, 0xe9, 0xe3, - 0x80, 0xd9, 0x10, 0x1f, 0xdb, 0x67, 0x00, 0xf4, 0x1e, 0x5c, 0x18, 0xe3, 0xb1, 0xeb, 0x9f, 0x1a, - 0x23, 0x7b, 0x6c, 0x13, 0xc3, 0x76, 0x8c, 0xa3, 0x53, 0x82, 0x03, 0x61, 0x38, 0x88, 0x77, 0x3e, - 0xa1, 0x7d, 0xbb, 0xce, 0x17, 0xb4, 0x07, 0x69, 0xd0, 0x70, 0xdd, 0xb1, 0x11, 0x0c, 0x5c, 0x1f, - 0x1b, 0xa6, 0xf5, 0x35, 0x3b, 0x3c, 0x2a, 0x7a, 0xdd, 0x75, 0xc7, 0x7d, 0x0a, 0xeb, 0x5a, 0x5f, - 0xa3, 0x2b, 0x50, 0x1f, 0x78, 0x61, 0x80, 0x89, 0x41, 0x7f, 0xd8, 0x21, 0x51, 0xd3, 0x81, 0x83, - 0xb6, 0xbc, 0x30, 0x50, 0x10, 0xc6, 0x54, 0xec, 0x73, 0x2a, 0xc2, 0x53, 0x2a, 0x66, 0x13, 0x1a, - 0x89, 0x3c, 0x92, 0x66, 0x0b, 0x2c, 0x61, 0x14, 0xd9, 0x02, 0xfd, 0xa6, 0x30, 0xdf, 0x1d, 0x49, - 0x49, 0xb2, 0x6f, 0x0a, 0x23, 0xa7, 0x9e, 0x4c, 0x15, 0xd8, 0x37, 0x15, 0xf9, 0x08, 0x3f, 0x13, - 0x75, 0x80, 0x9a, 0xce, 0x1b, 0x9a, 0x05, 0xb0, 0x65, 0x7a, 0xe6, 0x91, 0x3d, 0xb2, 0xc9, 0x29, - 0xba, 0x09, 0x2d, 0xd3, 0xb2, 0x8c, 0x81, 0x84, 0xd8, 0x58, 0x16, 0x65, 0x16, 0x4c, 0xcb, 0xda, - 0x52, 0xc0, 0xe8, 0x07, 0xb0, 0x68, 0xf9, 0xae, 0x97, 0xc4, 0xe5, 0x55, 0x9a, 0x16, 0xed, 0x50, - 0x91, 0xb5, 0x7f, 0x9e, 0x86, 0xcb, 0x49, 0xc5, 0xa6, 0x33, 0xf3, 0x0f, 0x61, 0x3e, 0x35, 0x6b, - 0x32, 0x25, 0x8e, 0x99, 0xd4, 0x13, 0x88, 0xa9, 0xdc, 0xb5, 0x9c, 0xc9, 0x5d, 0x73, 0x53, 0xfe, - 0xca, 0xf7, 0x91, 0xf2, 0x4f, 0x7f, 0x97, 0x94, 0x7f, 0xe6, 0x4c, 0x29, 0xff, 0xdb, 0xac, 0x02, - 0x27, 0x07, 0xb1, 0xc4, 0x8b, 0x9b, 0x51, 0x23, 0xc2, 0x71, 0x64, 0xa5, 0x2e, 0x55, 0x1a, 0x98, - 0x3b, 0x4f, 0x69, 0xa0, 0x5a, 0x58, 0x1a, 0xa0, 0x16, 0xe1, 0x79, 0xa6, 0x3f, 0x76, 0x7d, 0x99, - 0xfb, 0xb7, 0x6b, 0x8c, 0x85, 0x05, 0x09, 0x17, 0x79, 0x7f, 0x61, 0x95, 0x00, 0x8a, 0xaa, 0x04, - 0x68, 0x03, 0xe6, 0x1d, 0xd7, 0x70, 0xf0, 0x73, 0x83, 0x2a, 0x2c, 0x68, 0xd7, 0xb9, 0xf6, 0x1c, - 0xb7, 0x87, 0x9f, 0xef, 0x53, 0x88, 0xf6, 0x0f, 0x25, 0x58, 0x4e, 0x1a, 0x8e, 0xc8, 0x0b, 0x3f, - 0x83, 0x9a, 0x2f, 0x7d, 0x83, 0x30, 0x96, 0x8d, 0x64, 0xfc, 0x95, 0xf5, 0x21, 0x7a, 0x3c, 0x04, - 0xfd, 0xb8, 0xb0, 0xc2, 0xf0, 0x76, 0x01, 0x99, 0x57, 0xd5, 0x18, 0xb4, 0x2e, 0x2c, 0x46, 0xc8, - 0x13, 0xf3, 0x7b, 0x25, 0x5f, 0x2f, 0x27, 0xf3, 0x75, 0x07, 0x66, 0xb7, 0xf1, 0x33, 0x7b, 0x80, - 0xbf, 0x97, 0xfa, 0xe0, 0x06, 0xd4, 0x3d, 0xec, 0x8f, 0xed, 0x20, 0x88, 0x8c, 0xbe, 0xa6, 0xab, - 0x20, 0xed, 0xbf, 0x66, 0x60, 0x21, 0x2d, 0xd9, 0x7b, 0x99, 0xf2, 0x40, 0x27, 0xde, 0x85, 0xe9, - 0xf5, 0x29, 0x67, 0xf4, 0x0d, 0x79, 0x0c, 0x94, 0x53, 0x59, 0x42, 0x74, 0x52, 0x88, 0xa3, 0x81, - 0xae, 0x7f, 0xe0, 0x8e, 0xc7, 0xa6, 0x63, 0xc9, 0xda, 0xad, 0x68, 0x52, 0x69, 0x99, 0xfe, 0x90, - 0x6e, 0x2d, 0x0a, 0x66, 0xdf, 0xd4, 0x4b, 0xd2, 0x68, 0xdb, 0x76, 0x58, 0x75, 0x81, 0x6d, 0x9c, - 0x9a, 0x0e, 0x02, 0xb4, 0x6d, 0xfb, 0xe8, 0x3a, 0x4c, 0x63, 0xe7, 0x99, 0x3c, 0x8d, 0xe3, 0xe2, - 0xae, 0x3c, 0x7e, 0x74, 0xd6, 0x8d, 0xde, 0x86, 0xd9, 0xb1, 0x1b, 0x3a, 0x44, 0xc6, 0xdd, 0xcd, - 0x64, 0x8d, 0x53, 0x17, 0xbd, 0xe8, 0x26, 0xcc, 0x59, 0x4c, 0x07, 0x32, 0xb8, 0x5e, 0x88, 0x2b, - 0x14, 0x0c, 0xae, 0xcb, 0x7e, 0xf4, 0x49, 0x14, 0x47, 0xd4, 0x52, 0x91, 0x40, 0x4a, 0xa8, 0xb9, - 0xc1, 0xc4, 0xe3, 0x64, 0x30, 0x01, 0x8c, 0xc4, 0xcd, 0x42, 0x12, 0x93, 0xeb, 0x0b, 0x6b, 0x50, - 0x1d, 0xb9, 0x43, 0x6e, 0x07, 0x75, 0x5e, 0xe9, 0x1f, 0xb9, 0x43, 0x66, 0x06, 0xcb, 0x34, 0x78, - 0xb2, 0x6c, 0xa7, 0x3d, 0xcf, 0xb6, 0x17, 0x6f, 0xd0, 0x33, 0x91, 0x7d, 0x18, 0xae, 0x33, 0xc0, - 0xed, 0x06, 0xeb, 0xaa, 0x31, 0xc8, 0x9e, 0x33, 0x60, 0x47, 0x36, 0x21, 0xa7, 0xed, 0x26, 0x83, - 0xd3, 0x4f, 0x1a, 0xf3, 0xf2, 0x6c, 0x67, 0x21, 0x15, 0xf3, 0xe6, 0xed, 0xcf, 0x37, 0xa0, 0x80, - 0xf1, 0x4f, 0x25, 0x58, 0xd9, 0x62, 0x21, 0x9f, 0xe2, 0x09, 0xce, 0x91, 0x80, 0xa3, 0xbb, 0x51, - 0xa5, 0x23, 0x9d, 0x47, 0xa7, 0x17, 0x2b, 0xf0, 0xd0, 0x03, 0x68, 0x4a, 0x9a, 0x62, 0x64, 0xe5, - 0x55, 0x35, 0x92, 0x46, 0xa0, 0x36, 0xb5, 0x4f, 0x60, 0x35, 0xc3, 0xb3, 0x08, 0xcf, 0xae, 0xc2, - 0x7c, 0xec, 0x11, 0x22, 0x96, 0xeb, 0x11, 0x6c, 0xd7, 0xd2, 0xee, 0xc3, 0x85, 0x3e, 0x31, 0x7d, - 0x92, 0x59, 0xf0, 0x19, 0xc6, 0xb2, 0x32, 0x49, 0x72, 0xac, 0xa8, 0x64, 0xf4, 0x61, 0xb9, 0x4f, - 0x5c, 0xef, 0x35, 0x88, 0xd2, 0x9d, 0x4e, 0x97, 0xed, 0x86, 0x44, 0xc4, 0x64, 0xb2, 0xa9, 0xad, - 0xf2, 0xa2, 0x4e, 0x76, 0xb6, 0x8f, 0x61, 0x85, 0xd7, 0x54, 0x5e, 0x67, 0x11, 0x6b, 0xb2, 0xa2, - 0x93, 0xa5, 0xbb, 0x0d, 0x4b, 0xb1, 0x2b, 0x8f, 0xd3, 0xc3, 0xdb, 0xc9, 0xf4, 0x70, 0x35, 0xab, - 0xe3, 0x44, 0x76, 0xf8, 0x37, 0x65, 0xc5, 0x61, 0x16, 0x24, 0x87, 0x9b, 0xc9, 0xe4, 0xf0, 0x52, - 0x01, 0xc9, 0x44, 0x6e, 0x98, 0xb5, 0xc8, 0x4a, 0x8e, 0x45, 0xea, 0x99, 0x0c, 0x72, 0x3a, 0x55, - 0xcc, 0x4e, 0xf1, 0xf6, 0x3b, 0x49, 0x20, 0x77, 0x79, 0x02, 0x19, 0x4d, 0x1d, 0xd5, 0xb9, 0xee, - 0xa6, 0x12, 0xc8, 0x76, 0x11, 0x9b, 0x51, 0xfe, 0xf8, 0x57, 0xd3, 0x50, 0x8b, 0xfa, 0x32, 0x82, - 0xcd, 0x0a, 0xa9, 0x9c, 0x23, 0x24, 0xf5, 0xfc, 0xaa, 0xbc, 0xce, 0xf9, 0x35, 0xfd, 0xaa, 0xf3, - 0xeb, 0x22, 0xd4, 0xd8, 0x87, 0xe1, 0xe3, 0x63, 0x71, 0x1e, 0x55, 0x19, 0x40, 0xc7, 0xc7, 0xb1, - 0x41, 0xcd, 0x9e, 0xc5, 0xa0, 0x52, 0x99, 0xea, 0x5c, 0x3a, 0x53, 0xbd, 0x17, 0x9d, 0x30, 0xfc, - 0x2c, 0x5a, 0xcf, 0x92, 0xcb, 0x3d, 0x5b, 0x76, 0x92, 0x67, 0x0b, 0x3f, 0x9e, 0xae, 0xe5, 0x0c, - 0x7e, 0x63, 0xf3, 0xd4, 0x27, 0x3c, 0x4f, 0x55, 0xad, 0x4a, 0x38, 0xc2, 0x4d, 0x80, 0x68, 0xcf, - 0xcb, 0x64, 0x15, 0x65, 0x97, 0xa6, 0x2b, 0x58, 0xd4, 0xab, 0x24, 0xe4, 0x1f, 0x17, 0x63, 0xcf, - 0xe0, 0x55, 0xfe, 0x55, 0x8d, 0x92, 0x0a, 0xea, 0x99, 0xf7, 0x32, 0xa5, 0x8d, 0xb3, 0x59, 0xdd, - 0xed, 0x64, 0x65, 0xe3, 0x7c, 0xe6, 0x92, 0x29, 0x6c, 0xb0, 0x43, 0xdd, 0xf4, 0x45, 0x37, 0xcf, - 0x49, 0x6b, 0x02, 0xd2, 0x25, 0x34, 0x94, 0x3a, 0xb6, 0x1d, 0x3b, 0x38, 0xe1, 0xfd, 0xb3, 0xac, - 0x1f, 0x24, 0xa8, 0xcb, 0x6e, 0xb5, 0xf1, 0x0b, 0x9b, 0x18, 0x03, 0xd7, 0xc2, 0xcc, 0x18, 0x67, - 0xf4, 0x2a, 0x05, 0x6c, 0xb9, 0x16, 0x8e, 0x37, 0x48, 0xf5, 0x5c, 0x1b, 0xa4, 0x96, 0xda, 0x20, - 0x2b, 0x30, 0xeb, 0x63, 0x33, 0x70, 0x1d, 0x91, 0x18, 0x88, 0x16, 0x3d, 0x2b, 0xc6, 0x38, 0x08, - 0xe8, 0x04, 0x22, 0x80, 0x11, 0x4d, 0x25, 0xcc, 0x9a, 0x2f, 0x0a, 0xb3, 0x26, 0x14, 0x4c, 0x53, - 0x61, 0x56, 0xa3, 0x28, 0xcc, 0x3a, 0x4b, 0xbd, 0x54, 0x09, 0x22, 0x9b, 0x13, 0x83, 0x48, 0x35, - 0x1c, 0x5b, 0x48, 0x84, 0x63, 0xbf, 0xcf, 0x3d, 0xf5, 0x18, 0x56, 0x33, 0xbb, 0x40, 0x6c, 0xaa, - 0xbb, 0xa9, 0x8a, 0x6b, 0xbb, 0x48, 0x40, 0x51, 0xc1, 0xf5, 0xcf, 0xe0, 0xca, 0xa1, 0x67, 0xa5, - 0x42, 0x15, 0x91, 0x68, 0x9d, 0x3d, 0x42, 0xb8, 0x27, 0xa3, 0xca, 0xf2, 0x19, 0x73, 0x38, 0x8e, - 0xae, 0x69, 0xb0, 0x51, 0x3c, 0xbb, 0x38, 0xf2, 0xff, 0x04, 0x16, 0x76, 0x5e, 0xe0, 0x41, 0xff, - 0xd4, 0x19, 0x9c, 0x83, 0xa3, 0x16, 0x54, 0x06, 0x63, 0x4b, 0x94, 0x32, 0xe8, 0xa7, 0x1a, 0xc5, - 0x54, 0x92, 0x51, 0x8c, 0x01, 0xad, 0x78, 0x06, 0x21, 0xc9, 0x15, 0x2a, 0x49, 0x8b, 0x22, 0x53, - 0xe2, 0xf3, 0xba, 0x68, 0x09, 0x38, 0xf6, 0x7d, 0xb6, 0x54, 0x0e, 0xc7, 0xbe, 0x9f, 0xdc, 0x73, - 0x95, 0xe4, 0x9e, 0xd3, 0xbe, 0x86, 0x3a, 0x9d, 0xe0, 0x3b, 0xb1, 0x2f, 0x42, 0xf9, 0x4a, 0x1c, - 0xca, 0x47, 0x19, 0xc1, 0xb4, 0x92, 0x11, 0x68, 0x1b, 0x30, 0xcf, 0xe7, 0x12, 0x0b, 0x69, 0x41, - 0x25, 0xf4, 0x47, 0xd2, 0xb2, 0x42, 0x7f, 0xa4, 0xfd, 0x11, 0x34, 0xba, 0x84, 0x98, 0x83, 0x93, - 0x73, 0xf0, 0x13, 0xcd, 0x55, 0x56, 0xb3, 0x8f, 0x0c, 0x4f, 0x9a, 0x06, 0x4d, 0x49, 0xbb, 0x70, - 0xfe, 0x1e, 0xa0, 0x7d, 0xd7, 0x27, 0x0f, 0x5d, 0xff, 0xb9, 0xe9, 0x5b, 0xe7, 0x8b, 0xe6, 0x11, - 0x4c, 0x8b, 0xd7, 0x3c, 0x95, 0x1b, 0x33, 0x3a, 0xfb, 0xd6, 0xde, 0x81, 0xa5, 0x04, 0xbd, 0xc2, - 0x89, 0x3f, 0x84, 0x3a, 0x73, 0x72, 0x22, 0xe2, 0xbb, 0xa1, 0x56, 0x3c, 0x27, 0x79, 0x42, 0xad, - 0x0b, 0x8b, 0xf4, 0x14, 0x63, 0xf0, 0x68, 0x5b, 0xfc, 0x30, 0x15, 0x17, 0x2d, 0x27, 0xc7, 0xa7, - 0x62, 0xa2, 0x7f, 0x2c, 0xc1, 0x0c, 0x83, 0x67, 0xce, 0x9c, 0x8b, 0x50, 0xf3, 0xb1, 0xe7, 0x1a, - 0xc4, 0x1c, 0x46, 0x0f, 0xa4, 0x28, 0xe0, 0xc0, 0x1c, 0x06, 0xec, 0x7d, 0x17, 0xed, 0xb4, 0xec, - 0x21, 0x0e, 0x88, 0x7c, 0x25, 0x55, 0xa7, 0xb0, 0x6d, 0x0e, 0xa2, 0x22, 0x09, 0xec, 0x3f, 0xe5, - 0x01, 0xcf, 0xb4, 0xce, 0xbe, 0xd1, 0x75, 0xfe, 0xf6, 0x60, 0x42, 0x79, 0x8a, 0x3d, 0x48, 0xe8, - 0x40, 0x35, 0x55, 0x91, 0x8a, 0xda, 0xda, 0x27, 0x80, 0xd4, 0x35, 0x0b, 0xa1, 0xbe, 0x0d, 0xb3, - 0x4c, 0x24, 0xf2, 0xc4, 0x6e, 0x26, 0x17, 0xad, 0x8b, 0x5e, 0xed, 0x33, 0x40, 0x5c, 0x8a, 0x89, - 0x53, 0xfa, 0xec, 0x12, 0xff, 0x18, 0x96, 0x12, 0xe3, 0xa3, 0xab, 0xe6, 0x04, 0x81, 0xf4, 0xec, - 0x62, 0xf0, 0xaf, 0x4b, 0x00, 0xdd, 0x90, 0x9c, 0x88, 0x52, 0x88, 0xba, 0xca, 0x52, 0x72, 0x95, - 0xb4, 0xcf, 0x33, 0x83, 0xe0, 0xb9, 0xeb, 0xcb, 0x30, 0x34, 0x6a, 0xb3, 0x32, 0x46, 0x48, 0x4e, - 0x64, 0xf9, 0x95, 0x7e, 0xa3, 0xeb, 0xd0, 0xe4, 0xef, 0xda, 0x0c, 0xd3, 0xb2, 0x7c, 0x1c, 0x04, - 0xa2, 0x0e, 0xdb, 0xe0, 0xd0, 0x2e, 0x07, 0x52, 0x34, 0xdb, 0xc2, 0x0e, 0xb1, 0xc9, 0xa9, 0x41, - 0xdc, 0x6f, 0xb0, 0x23, 0x02, 0xcc, 0x86, 0x84, 0x1e, 0x50, 0x20, 0x45, 0xf3, 0xf1, 0xd0, 0x0e, - 0x88, 0x2f, 0xd1, 0x64, 0x5d, 0x50, 0x40, 0x19, 0x9a, 0xf6, 0xcb, 0x12, 0xb4, 0xf6, 0xc3, 0xd1, - 0x88, 0x2f, 0xf2, 0xbc, 0xb2, 0x44, 0xef, 0x88, 0x75, 0x94, 0x53, 0xd6, 0x10, 0x8b, 0x48, 0x2c, - 0xee, 0xbb, 0x27, 0xbe, 0x77, 0x61, 0x51, 0x61, 0x54, 0x28, 0x2d, 0x11, 0x47, 0x94, 0x92, 0x71, - 0x04, 0x35, 0x14, 0x9e, 0xeb, 0xbd, 0xde, 0xe2, 0xb4, 0x0b, 0xb0, 0x94, 0x18, 0x2f, 0x0e, 0x8d, - 0x5b, 0xd0, 0x10, 0xd7, 0xbd, 0xc2, 0x08, 0xd6, 0xa0, 0x4a, 0xdd, 0xcb, 0xc0, 0xb6, 0x64, 0xdd, - 0x7d, 0xce, 0x73, 0xad, 0x2d, 0xdb, 0xf2, 0xb5, 0x1e, 0x34, 0x74, 0x4e, 0x5e, 0xe0, 0x7e, 0x0a, - 0x4d, 0x71, 0x39, 0x6c, 0x24, 0x1e, 0x49, 0xc4, 0x45, 0xe2, 0x04, 0x6d, 0xbd, 0xe1, 0xa8, 0x4d, - 0xed, 0xe7, 0xd0, 0xe1, 0x87, 0x5a, 0x82, 0xaa, 0x5c, 0xda, 0xa7, 0x20, 0xdf, 0x5d, 0x16, 0x11, - 0x4f, 0x0e, 0x6b, 0xf8, 0x6a, 0x53, 0xbb, 0x0c, 0x17, 0x73, 0x89, 0x8b, 0x75, 0x7b, 0xd0, 0x8a, - 0x3b, 0x2c, 0x5b, 0x5e, 0x37, 0xb0, 0x6b, 0x84, 0x92, 0x72, 0x8d, 0xb0, 0x12, 0x05, 0x0a, 0xdc, - 0xa1, 0x8b, 0x96, 0x12, 0xd6, 0x55, 0x8a, 0xc2, 0xba, 0xe9, 0x44, 0x58, 0xa7, 0x7d, 0x19, 0x49, - 0x4f, 0xc4, 0xd4, 0x1f, 0xb1, 0xc0, 0x9e, 0xcf, 0x2d, 0xdd, 0xc4, 0x5a, 0xce, 0xe2, 0x38, 0x86, - 0xae, 0x20, 0x6b, 0x0b, 0xd0, 0x48, 0x38, 0x0c, 0xed, 0x01, 0x34, 0x53, 0x1e, 0xe0, 0x4e, 0x2a, - 0xc2, 0xc9, 0x88, 0x2d, 0x15, 0xdf, 0x2c, 0x0b, 0x47, 0xf4, 0x30, 0xd8, 0x75, 0x8e, 0x5d, 0x49, - 0xf7, 0x1a, 0xd4, 0x0f, 0x8b, 0x9e, 0x21, 0x4e, 0xcb, 0x7b, 0xac, 0x77, 0x60, 0xb1, 0x4f, 0x5c, - 0xdf, 0x1c, 0xe2, 0x5d, 0xb6, 0x6b, 0x8f, 0x6d, 0x7e, 0x4f, 0x13, 0x86, 0x91, 0xff, 0x66, 0xdf, - 0xda, 0x7f, 0x94, 0x60, 0xe1, 0xa1, 0x3d, 0xc2, 0xc1, 0x69, 0x40, 0xf0, 0xf8, 0x90, 0x45, 0xbb, - 0x97, 0xa0, 0x46, 0xb9, 0x09, 0x88, 0x39, 0xf6, 0xe4, 0x3d, 0x57, 0x04, 0xa0, 0x32, 0x0a, 0x38, - 0x69, 0x99, 0xff, 0xaa, 0x99, 0x46, 0x66, 0x56, 0x1a, 0xfd, 0x0b, 0x10, 0x7a, 0x1f, 0x20, 0x0c, - 0xb0, 0x25, 0xee, 0xb6, 0x2a, 0xa9, 0xa3, 0xe7, 0x50, 0xbd, 0x81, 0xa0, 0x78, 0xfc, 0xa2, 0xeb, - 0x03, 0xa8, 0xdb, 0x8e, 0x6b, 0x61, 0x76, 0x03, 0x61, 0x89, 0xdc, 0x38, 0x7f, 0x14, 0x70, 0xc4, - 0xc3, 0x00, 0x5b, 0xda, 0x1f, 0x0b, 0x2f, 0x2c, 0x85, 0x27, 0x74, 0xb0, 0x03, 0x8b, 0x7c, 0x43, - 0x1f, 0x47, 0x8b, 0x96, 0x8a, 0x8e, 0x03, 0xce, 0x94, 0x40, 0xf4, 0x96, 0x2d, 0x4e, 0x45, 0x39, - 0x42, 0xbb, 0x0f, 0x17, 0x12, 0x51, 0xe9, 0x79, 0x92, 0xb9, 0x47, 0xa9, 0x4c, 0x30, 0x36, 0x10, - 0x91, 0x8a, 0x49, 0xfb, 0x28, 0x48, 0xc5, 0x02, 0x9e, 0x8a, 0x05, 0x9a, 0x0e, 0x6b, 0x89, 0x04, - 0x35, 0xc1, 0xc8, 0x07, 0xa9, 0x23, 0xfe, 0x72, 0x01, 0xb1, 0xd4, 0x59, 0xff, 0xbf, 0x25, 0x58, - 0xce, 0x43, 0x78, 0xcd, 0x52, 0xc8, 0x4f, 0x0a, 0x5e, 0x1c, 0xdc, 0x9d, 0xc8, 0xcd, 0xef, 0xa4, - 0x68, 0xf4, 0x18, 0x3a, 0x79, 0xd2, 0xcb, 0xaa, 0xa2, 0x72, 0x06, 0x55, 0xfc, 0x5f, 0x59, 0x29, - 0xee, 0x75, 0x09, 0xf1, 0xed, 0xa3, 0x90, 0x1a, 0xef, 0xf7, 0x95, 0xa4, 0x3f, 0x88, 0x12, 0x50, - 0x2e, 0xbf, 0x1b, 0xd9, 0x51, 0xf1, 0xac, 0xb9, 0x49, 0xe8, 0x5e, 0x32, 0x09, 0xe5, 0x65, 0xbb, - 0xdb, 0x13, 0xc9, 0xbc, 0xb1, 0x95, 0x99, 0x97, 0x25, 0x68, 0x26, 0xf5, 0x80, 0x3e, 0x01, 0x30, - 0x23, 0xce, 0x85, 0xc9, 0x5f, 0x9a, 0xb4, 0x3a, 0x5d, 0xc1, 0x47, 0xd7, 0xa0, 0x32, 0xf0, 0x42, - 0xa1, 0x91, 0xf8, 0xfe, 0x66, 0xcb, 0x0b, 0xb9, 0x03, 0xa0, 0xbd, 0x34, 0x68, 0xe6, 0xf7, 0xf0, - 0x19, 0xcf, 0xf5, 0x94, 0x81, 0x39, 0xaa, 0xc0, 0x41, 0x9f, 0x43, 0xf3, 0xb9, 0x6f, 0x13, 0xf3, - 0x68, 0x84, 0x8d, 0x91, 0x79, 0x8a, 0x7d, 0xe1, 0xb9, 0x8a, 0xbd, 0x4c, 0x43, 0xe2, 0x3f, 0xa1, - 0xe8, 0x5a, 0x08, 0x55, 0x39, 0xff, 0x2b, 0x3c, 0xf2, 0x63, 0x58, 0x0d, 0x29, 0x9a, 0xc1, 0xde, - 0x02, 0x38, 0xa6, 0xe3, 0x1a, 0x01, 0xa6, 0x47, 0x93, 0x7c, 0x7f, 0x97, 0xef, 0x2d, 0x97, 0xd9, - 0xa0, 0x2d, 0xd7, 0xc7, 0x3d, 0xd3, 0x71, 0xfb, 0x7c, 0x84, 0x36, 0x86, 0xba, 0xb2, 0x9c, 0x57, - 0xcc, 0xfc, 0x00, 0x16, 0xe5, 0xcd, 0x58, 0x80, 0x89, 0xf0, 0xeb, 0x93, 0xe6, 0x5c, 0x10, 0xe8, - 0x7d, 0x4c, 0x98, 0x77, 0xbf, 0x75, 0x09, 0xaa, 0xf2, 0x2f, 0x10, 0x68, 0x0e, 0x2a, 0x07, 0x5b, - 0xfb, 0xad, 0x29, 0xfa, 0x71, 0xb8, 0xbd, 0xdf, 0x2a, 0xdd, 0x1a, 0x43, 0x2b, 0xfd, 0xfc, 0x1f, - 0xad, 0xc2, 0xd2, 0xbe, 0xbe, 0xb7, 0xdf, 0x7d, 0xd4, 0x3d, 0xd8, 0xdd, 0xeb, 0x19, 0xfb, 0xfa, - 0xee, 0x57, 0xdd, 0x83, 0x9d, 0xd6, 0x14, 0xba, 0x0a, 0x97, 0xd5, 0x8e, 0x3f, 0xdc, 0xeb, 0x1f, - 0x18, 0x07, 0x7b, 0xc6, 0xd6, 0x5e, 0xef, 0xa0, 0xbb, 0xdb, 0xdb, 0xd1, 0x5b, 0x25, 0x74, 0x19, - 0xd6, 0x54, 0x94, 0x2f, 0x76, 0xb7, 0x77, 0xf5, 0x9d, 0x2d, 0xfa, 0xdd, 0x7d, 0xd2, 0x2a, 0xdf, - 0xba, 0x0f, 0x0b, 0xa9, 0xe7, 0x3c, 0x68, 0x11, 0x1a, 0xfd, 0x6e, 0x6f, 0xfb, 0x8b, 0xbd, 0x9f, - 0x1a, 0xfa, 0x4e, 0x77, 0xfb, 0x67, 0xad, 0x29, 0xb4, 0x0c, 0x2d, 0x09, 0xea, 0xed, 0x1d, 0x70, - 0x68, 0xe9, 0xd6, 0x37, 0x29, 0x93, 0xc4, 0xe8, 0x02, 0x2c, 0x46, 0x73, 0x1b, 0x5b, 0xfa, 0x4e, - 0xf7, 0x60, 0x67, 0xbb, 0x35, 0x95, 0x04, 0xeb, 0x87, 0xbd, 0xde, 0x6e, 0xef, 0x51, 0xab, 0x44, - 0xa9, 0xc6, 0xe0, 0x9d, 0x9f, 0xee, 0x52, 0xe4, 0x72, 0x12, 0xf9, 0xb0, 0xf7, 0xb8, 0xb7, 0xf7, - 0x93, 0x5e, 0xab, 0xb2, 0xf9, 0xef, 0x0d, 0x68, 0xca, 0x98, 0x01, 0xfb, 0xec, 0xba, 0xf6, 0x33, - 0x98, 0x93, 0x7f, 0x86, 0x89, 0x9d, 0x55, 0xf2, 0x9f, 0x3b, 0x9d, 0x76, 0xb6, 0x43, 0xc4, 0x5e, - 0x53, 0x68, 0x9f, 0xc5, 0x42, 0xca, 0xd3, 0xa9, 0xcb, 0x6a, 0x74, 0x92, 0x79, 0x9b, 0xd5, 0x59, - 0x2f, 0xea, 0x8e, 0x28, 0xf6, 0x69, 0x00, 0xa4, 0x3e, 0x7b, 0x45, 0xeb, 0x6a, 0x98, 0x90, 0x7d, - 0x4e, 0xdb, 0xb9, 0x52, 0xd8, 0x1f, 0x11, 0xfd, 0x19, 0xb4, 0xd2, 0x0f, 0x5e, 0x51, 0x5c, 0xb2, - 0x29, 0x78, 0x4c, 0xdb, 0xb9, 0x3a, 0x01, 0x43, 0x25, 0x9d, 0x79, 0x34, 0xba, 0x51, 0xfc, 0xec, - 0x2f, 0x43, 0xba, 0xe8, 0x2d, 0x21, 0x17, 0x45, 0xf2, 0xc9, 0x13, 0x52, 0x9f, 0x6a, 0xe6, 0x3c, - 0x7d, 0x53, 0x44, 0x91, 0xff, 0x56, 0x4a, 0x9b, 0x42, 0x5f, 0xc1, 0x42, 0xea, 0xa6, 0x0e, 0xc5, - 0xa3, 0xf2, 0xef, 0x1d, 0x3b, 0x1b, 0xc5, 0x08, 0x49, 0xbd, 0xa9, 0xf7, 0x70, 0x09, 0xbd, 0xe5, - 0x5c, 0xee, 0x25, 0xf4, 0x96, 0x7b, 0x81, 0xc7, 0xcc, 0x2b, 0x71, 0xdb, 0xa6, 0x98, 0x57, 0xde, - 0xd5, 0x5e, 0x67, 0xbd, 0xa8, 0x5b, 0x5d, 0x7e, 0xea, 0xa6, 0x4d, 0x59, 0x7e, 0xfe, 0x05, 0x5e, - 0x67, 0xa3, 0x18, 0x21, 0xad, 0xab, 0xb8, 0xec, 0x9f, 0xd2, 0x55, 0xe6, 0x96, 0x29, 0xa5, 0xab, - 0xec, 0x7d, 0x81, 0xd0, 0x55, 0xaa, 0x7e, 0x7f, 0xa5, 0xb0, 0xbe, 0x99, 0xd5, 0x55, 0x7e, 0xc9, - 0x54, 0x9b, 0x42, 0xdf, 0x42, 0xbb, 0xa8, 0x08, 0x89, 0xe2, 0x18, 0xe1, 0x15, 0x55, 0xd2, 0xce, - 0xcd, 0x33, 0x60, 0x46, 0x53, 0x76, 0xa1, 0x2a, 0x2b, 0x8e, 0x28, 0x76, 0x28, 0xa9, 0x32, 0x67, - 0x67, 0x2d, 0xa7, 0x27, 0x22, 0xf1, 0x01, 0x4c, 0x53, 0x28, 0x5a, 0x4e, 0x20, 0xc9, 0xa1, 0x17, - 0x52, 0xd0, 0x68, 0xd8, 0xc7, 0x30, 0xcb, 0x0b, 0x74, 0x28, 0xce, 0x9c, 0x12, 0xd5, 0xc0, 0xce, - 0x6a, 0x06, 0x1e, 0x0d, 0xfe, 0x92, 0xff, 0x27, 0x4f, 0x54, 0xda, 0xd0, 0xc5, 0xc4, 0xdf, 0x37, - 0x92, 0xf5, 0xbc, 0xce, 0xa5, 0xfc, 0x4e, 0xd5, 0x44, 0x52, 0xe1, 0xc7, 0x7a, 0x51, 0x7c, 0x98, - 0x31, 0x91, 0xfc, 0x78, 0x53, 0x9b, 0x42, 0x06, 0x2f, 0x5a, 0xa5, 0x08, 0x6b, 0xf9, 0xb6, 0x95, - 0x20, 0x7e, 0x6d, 0x22, 0x4e, 0x34, 0xc1, 0x11, 0x2c, 0xe5, 0xa4, 0xdf, 0xe8, 0x5a, 0x4a, 0xf9, - 0x79, 0x99, 0x7f, 0xe7, 0xad, 0xc9, 0x48, 0xaa, 0x8a, 0x84, 0x79, 0xaf, 0xa8, 0x3e, 0x41, 0xb1, - 0xea, 0xd5, 0x0c, 0x5c, 0x0e, 0xde, 0xfc, 0x8b, 0x0a, 0xcc, 0xf3, 0x22, 0x89, 0x38, 0xd3, 0x1e, - 0x01, 0xc4, 0x75, 0x3c, 0xd4, 0x49, 0x2c, 0x33, 0x51, 0xd0, 0xec, 0x5c, 0xcc, 0xed, 0x53, 0x95, - 0xaf, 0x94, 0xe4, 0x14, 0xe5, 0x67, 0x0b, 0x7d, 0x8a, 0xf2, 0x73, 0xaa, 0x78, 0xda, 0x14, 0xda, - 0x86, 0x5a, 0x54, 0x27, 0x42, 0x4a, 0x79, 0x29, 0x55, 0xe4, 0xea, 0x74, 0xf2, 0xba, 0x54, 0x8e, - 0x94, 0xda, 0x8f, 0xc2, 0x51, 0xb6, 0xa2, 0xa4, 0x70, 0x94, 0x57, 0x2e, 0x8a, 0x57, 0xc7, 0x53, - 0xdd, 0xf4, 0xea, 0x12, 0xd5, 0x83, 0xf4, 0xea, 0x92, 0xd9, 0xb1, 0x36, 0xf5, 0xc5, 0xa5, 0x5f, - 0xfd, 0x66, 0xbd, 0xf4, 0x9f, 0xbf, 0x59, 0x9f, 0xfa, 0xf3, 0x97, 0xeb, 0xa5, 0x5f, 0xbd, 0x5c, - 0x2f, 0xfd, 0xdb, 0xcb, 0xf5, 0xd2, 0x7f, 0xbf, 0x5c, 0x2f, 0xfd, 0xe2, 0x7f, 0xd6, 0xa7, 0x8e, - 0x66, 0xd9, 0x9f, 0x54, 0xdf, 0xff, 0x6d, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfc, 0xa9, 0xc0, 0x61, - 0x58, 0x3c, 0x00, 0x00, + // 4392 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5b, 0xcd, 0x6f, 0x1c, 0x47, + 0x76, 0xe7, 0xcc, 0xf0, 0x63, 0xe6, 0x0d, 0x67, 0x38, 0x2c, 0x51, 0xe4, 0x70, 0x24, 0x51, 0x54, + 0xcb, 0x92, 0x25, 0xed, 0x4a, 0x96, 0xe9, 0xb5, 0x14, 0xcb, 0xb6, 0xe4, 0x31, 0x49, 0x29, 0xb4, + 0x24, 0x92, 0xdb, 0x43, 0x7a, 0x6d, 0x6c, 0x80, 0x4e, 0x73, 0xba, 0x38, 0x6c, 0x7b, 0xa6, 0xab, + 0xdd, 0x1f, 0x92, 0x18, 0xe4, 0x90, 0x5c, 0x82, 0x20, 0x40, 0x80, 0xcd, 0x31, 0x39, 0xe5, 0x10, + 0x60, 0x91, 0x4b, 0x10, 0xe4, 0x10, 0xe4, 0x0f, 0x08, 0x92, 0xbd, 0x2c, 0x10, 0x20, 0x40, 0x90, + 0xdc, 0xb2, 0xca, 0x21, 0x87, 0x00, 0xf9, 0x1b, 0x16, 0xf5, 0xd5, 0x5d, 0xfd, 0x35, 0x22, 0x65, + 0xef, 0x5a, 0xa7, 0xe9, 0x7a, 0xf5, 0xea, 0xd5, 0xab, 0xaa, 0x57, 0xaf, 0xde, 0xfb, 0x55, 0x0d, + 0xd4, 0x4c, 0xd7, 0xbe, 0xe5, 0x7a, 0x24, 0x20, 0x68, 0xc6, 0x0b, 0x9d, 0xc0, 0x1e, 0xe1, 0xce, + 0xcd, 0x81, 0x1d, 0x1c, 0x85, 0x07, 0xb7, 0xfa, 0x64, 0xf4, 0xce, 0x80, 0x0c, 0xc8, 0x3b, 0xac, + 0xfe, 0x20, 0x3c, 0x64, 0x25, 0x56, 0x60, 0x5f, 0xbc, 0x9d, 0x76, 0x03, 0x9a, 0x9f, 0x63, 0xcf, + 0xb7, 0x89, 0xa3, 0xe3, 0x6f, 0x42, 0xec, 0x07, 0xa8, 0x0d, 0x33, 0xcf, 0x38, 0xa5, 0x5d, 0x5a, + 0x2d, 0x5d, 0xab, 0xe9, 0xb2, 0xa8, 0xfd, 0xbc, 0x04, 0x73, 0x11, 0xb3, 0xef, 0x12, 0xc7, 0xc7, + 0xc5, 0xdc, 0xe8, 0x12, 0xcc, 0x0a, 0x9d, 0x0c, 0xc7, 0x1c, 0xe1, 0x76, 0x99, 0x55, 0xd7, 0x05, + 0x6d, 0xdb, 0x1c, 0x61, 0xf4, 0x36, 0xcc, 0x49, 0x16, 0x29, 0xa4, 0xc2, 0xb8, 0x9a, 0x82, 0x2c, + 0x7a, 0x43, 0xb7, 0xe0, 0x8c, 0x64, 0x34, 0x5d, 0x3b, 0x62, 0x9e, 0x64, 0xcc, 0xf3, 0xa2, 0xaa, + 0xeb, 0xda, 0x82, 0x5f, 0xfb, 0x29, 0xd4, 0x36, 0xb6, 0x7b, 0xeb, 0xc4, 0x39, 0xb4, 0x07, 0x54, + 0x45, 0x1f, 0x7b, 0xb4, 0x4d, 0xbb, 0xb4, 0x5a, 0xa1, 0x2a, 0x8a, 0x22, 0xea, 0x40, 0xd5, 0xc7, + 0xa6, 0xd7, 0x3f, 0xc2, 0x7e, 0xbb, 0xcc, 0xaa, 0xa2, 0x32, 0x6d, 0x45, 0xdc, 0xc0, 0x26, 0x8e, + 0xdf, 0xae, 0xf0, 0x56, 0xa2, 0xa8, 0xfd, 0x65, 0x09, 0xea, 0xbb, 0xc4, 0x0b, 0x9e, 0x9a, 0xae, + 0x6b, 0x3b, 0x03, 0x74, 0x13, 0xaa, 0x6c, 0x2e, 0xfb, 0x64, 0xc8, 0xe6, 0xa0, 0xb9, 0x36, 0x7f, + 0x4b, 0xa8, 0x74, 0x6b, 0x57, 0x54, 0xe8, 0x11, 0x0b, 0xba, 0x02, 0xcd, 0x3e, 0x71, 0x02, 0xd3, + 0x76, 0xb0, 0x67, 0xb8, 0xc4, 0x0b, 0xd8, 0xcc, 0x4c, 0xe9, 0x8d, 0x88, 0x4a, 0x85, 0xa3, 0x73, + 0x50, 0x3b, 0x22, 0x7e, 0xc0, 0x39, 0x2a, 0x8c, 0xa3, 0x4a, 0x09, 0xac, 0x72, 0x09, 0x66, 0x58, + 0xa5, 0xed, 0x8a, 0x39, 0x98, 0xa6, 0xc5, 0x2d, 0x57, 0xfb, 0x65, 0x09, 0xa6, 0x9e, 0x92, 0xd0, + 0x09, 0x52, 0xdd, 0x98, 0xc1, 0x91, 0x58, 0x1f, 0xa5, 0x1b, 0x33, 0x38, 0x8a, 0xbb, 0xa1, 0x1c, + 0x7c, 0x89, 0x78, 0x37, 0xb4, 0xb2, 0x03, 0x55, 0x0f, 0x9b, 0x16, 0x71, 0x86, 0xc7, 0x4c, 0x85, + 0xaa, 0x1e, 0x95, 0xe9, 0xda, 0xf9, 0x78, 0x68, 0x3b, 0xe1, 0x0b, 0xc3, 0xc3, 0x43, 0xf3, 0x00, + 0x0f, 0x99, 0x2a, 0x55, 0xbd, 0x29, 0xc8, 0x3a, 0xa7, 0xa2, 0x0f, 0xa1, 0xee, 0x7a, 0xc4, 0x35, + 0x07, 0x26, 0x9d, 0xbe, 0xf6, 0x14, 0x9b, 0xa1, 0xe5, 0x68, 0x86, 0x98, 0xb6, 0xbb, 0x31, 0x83, + 0xae, 0x72, 0x6b, 0x5f, 0xc1, 0x1c, 0xb5, 0x14, 0xdf, 0x35, 0xfb, 0x78, 0x87, 0xcd, 0x3f, 0xb5, + 0x2b, 0xa6, 0xb1, 0x83, 0x83, 0xe7, 0xc4, 0xfb, 0x9a, 0x0d, 0xab, 0xaa, 0xd7, 0x29, 0x6d, 0x9b, + 0x93, 0xd0, 0x32, 0x54, 0xf9, 0xa0, 0x6c, 0x8b, 0x8d, 0xa9, 0xaa, 0xb3, 0xe9, 0xda, 0xb5, 0xad, + 0xa8, 0xca, 0x76, 0xfb, 0x62, 0x48, 0x33, 0x7c, 0xea, 0xfa, 0x9a, 0x06, 0xb0, 0xe5, 0x04, 0x77, + 0x7e, 0xf4, 0xb9, 0x39, 0x0c, 0x31, 0x5a, 0x80, 0xa9, 0x67, 0xf4, 0x83, 0xc9, 0xaf, 0xe8, 0xbc, + 0xa0, 0xfd, 0x49, 0x05, 0xce, 0x3d, 0xa1, 0xa3, 0xeb, 0x99, 0x8e, 0x75, 0x40, 0x5e, 0xf4, 0x70, + 0x3f, 0xf4, 0xec, 0xe0, 0x78, 0x9d, 0x38, 0x01, 0x7e, 0x11, 0xa0, 0x4d, 0x98, 0x77, 0xa4, 0xbe, + 0x86, 0xb4, 0x1f, 0x2a, 0xa1, 0xbe, 0xd6, 0x8e, 0x86, 0x9c, 0x1a, 0x91, 0xde, 0x72, 0x92, 0x04, + 0x1f, 0x3d, 0x88, 0x27, 0x57, 0x0a, 0x29, 0x33, 0x21, 0x8b, 0x91, 0x90, 0xde, 0x26, 0xd3, 0x43, + 0x88, 0x90, 0x93, 0x2e, 0x05, 0xbc, 0x07, 0x74, 0xa3, 0x19, 0xa6, 0x6f, 0x84, 0x3e, 0xf6, 0xd8, + 0x48, 0xeb, 0x6b, 0x67, 0xa2, 0xc6, 0xf1, 0x38, 0xf5, 0x9a, 0x17, 0x3a, 0x5d, 0x7f, 0xdf, 0xc7, + 0x1e, 0xdb, 0x8e, 0x62, 0x79, 0x0d, 0x8f, 0x90, 0xe0, 0xd0, 0x97, 0x4b, 0x2a, 0xc9, 0x3a, 0xa3, + 0xa2, 0x77, 0xe0, 0x8c, 0x1f, 0xba, 0xee, 0x10, 0x8f, 0xb0, 0x13, 0x98, 0x43, 0x63, 0xe0, 0x91, + 0xd0, 0xf5, 0xdb, 0x53, 0xab, 0x95, 0x6b, 0x15, 0x1d, 0xa9, 0x55, 0x8f, 0x58, 0x0d, 0x5a, 0x01, + 0x70, 0x3d, 0xfb, 0x99, 0x3d, 0xc4, 0x03, 0x6c, 0xb5, 0xa7, 0x99, 0x50, 0x85, 0x82, 0x6e, 0xc3, + 0x82, 0x8f, 0xfb, 0x7d, 0x32, 0x72, 0x0d, 0xd7, 0x23, 0x87, 0xf6, 0x10, 0x73, 0x83, 0x9c, 0x61, + 0x06, 0x89, 0x44, 0xdd, 0x2e, 0xaf, 0xa2, 0xa6, 0xa9, 0xfd, 0xac, 0x0c, 0x67, 0xd9, 0x04, 0xec, + 0x12, 0x4b, 0xac, 0x85, 0xd8, 0xee, 0x97, 0xa1, 0xd1, 0x67, 0x0a, 0x19, 0xae, 0xe9, 0x61, 0x27, + 0x10, 0x76, 0x3f, 0xcb, 0x89, 0xbb, 0x8c, 0x86, 0x76, 0xa0, 0xe5, 0x8b, 0xa5, 0x33, 0xfa, 0x7c, + 0xed, 0xc4, 0x0c, 0xbf, 0x15, 0x4d, 0xd2, 0x98, 0x75, 0xd6, 0xe7, 0xfc, 0xcc, 0xc2, 0xcf, 0xf8, + 0xc7, 0x7e, 0x3f, 0x18, 0x72, 0x77, 0x51, 0x5f, 0xfb, 0x41, 0x52, 0x4e, 0x5a, 0xcd, 0x5b, 0x3d, + 0xce, 0xbd, 0xe9, 0x04, 0xde, 0xb1, 0x2e, 0xdb, 0x76, 0xee, 0xc1, 0xac, 0x5a, 0x81, 0x5a, 0x50, + 0xf9, 0x1a, 0x1f, 0x8b, 0x21, 0xd0, 0xcf, 0xd8, 0x2e, 0xf9, 0x66, 0xe5, 0x85, 0x7b, 0xe5, 0xdf, + 0x29, 0x69, 0x1e, 0xa0, 0xb8, 0x97, 0xa7, 0x38, 0x30, 0x2d, 0x33, 0x30, 0x11, 0x82, 0x49, 0xe6, + 0x7e, 0xb9, 0x08, 0xf6, 0x4d, 0xa5, 0x86, 0x62, 0x6b, 0xd4, 0x74, 0xfa, 0x89, 0xce, 0x43, 0x2d, + 0x32, 0x42, 0xe1, 0x83, 0x63, 0x02, 0xf5, 0x85, 0x66, 0x10, 0xe0, 0x91, 0x1b, 0x30, 0x83, 0x68, + 0xe8, 0xb2, 0xa8, 0xfd, 0xd3, 0x24, 0xb4, 0x32, 0x2b, 0x70, 0x17, 0xaa, 0x23, 0xd1, 0xbd, 0xb0, + 0xfd, 0x73, 0xb1, 0x43, 0xcc, 0x68, 0xa8, 0x47, 0xcc, 0xd4, 0xdf, 0xd0, 0xcd, 0xa8, 0x1c, 0x17, + 0x51, 0x99, 0x2e, 0xeb, 0x90, 0x0c, 0x0c, 0xcb, 0xf6, 0x70, 0x3f, 0x20, 0xde, 0xb1, 0xd0, 0x72, + 0x76, 0x48, 0x06, 0x1b, 0x92, 0x86, 0xde, 0x05, 0xb0, 0x1c, 0x9f, 0xae, 0xe8, 0xa1, 0x3d, 0x60, + 0xba, 0xd6, 0xd7, 0x50, 0xd4, 0x77, 0x74, 0x24, 0xe8, 0x35, 0xcb, 0xf1, 0x85, 0xb2, 0x1f, 0x40, + 0x83, 0xba, 0x58, 0x63, 0xc4, 0xbd, 0x39, 0xb7, 0xe2, 0xfa, 0xda, 0x82, 0xa2, 0x71, 0xe4, 0xea, + 0xf5, 0x59, 0x37, 0x2e, 0xf8, 0xe8, 0x63, 0x98, 0x66, 0x2e, 0xce, 0x6f, 0x4f, 0xb3, 0x36, 0x57, + 0x72, 0x46, 0x29, 0x56, 0xfb, 0x09, 0xe3, 0xe3, 0x8b, 0x2d, 0x1a, 0xa1, 0x27, 0x50, 0x37, 0x1d, + 0x87, 0x04, 0x26, 0xdf, 0xe0, 0x33, 0x4c, 0xc6, 0x8d, 0x62, 0x19, 0xdd, 0x98, 0x99, 0x0b, 0x52, + 0x9b, 0xa3, 0x1f, 0xc1, 0x14, 0xf3, 0x00, 0xed, 0x2a, 0x1b, 0xf5, 0xca, 0x78, 0xf3, 0xd3, 0x39, + 0x73, 0xe7, 0x03, 0xa8, 0x2b, 0xaa, 0x9d, 0xc6, 0xdc, 0x3a, 0xf7, 0xa1, 0x95, 0xd6, 0xe8, 0x54, + 0xe6, 0xba, 0x05, 0x0b, 0x7a, 0xe8, 0xc4, 0x8a, 0xc9, 0xf8, 0xe3, 0x5d, 0x98, 0x16, 0xeb, 0xc7, + 0x6d, 0x67, 0xb9, 0x70, 0x46, 0x74, 0xc1, 0xa8, 0x7d, 0x0c, 0x67, 0x53, 0xa2, 0x44, 0x74, 0xf2, + 0x16, 0x34, 0x5d, 0x62, 0x19, 0x3e, 0x27, 0x1b, 0xb6, 0x25, 0x9d, 0x81, 0x1b, 0xf1, 0x6e, 0x59, + 0xb4, 0x79, 0x2f, 0x20, 0x6e, 0x56, 0x95, 0x93, 0x35, 0x6f, 0xc3, 0x62, 0xba, 0x39, 0xef, 0x5e, + 0x7b, 0x00, 0x4b, 0x3a, 0x1e, 0x91, 0x67, 0xf8, 0x75, 0x45, 0x77, 0xa0, 0x9d, 0x15, 0x20, 0x84, + 0x7f, 0x09, 0x4b, 0x31, 0xb5, 0x17, 0x98, 0x41, 0xe8, 0x9f, 0x4a, 0xb8, 0x08, 0xdd, 0x0e, 0x88, + 0x8f, 0xe5, 0x21, 0x29, 0x8a, 0xda, 0x75, 0x55, 0xb4, 0x38, 0x54, 0x79, 0x0f, 0xa8, 0x09, 0x65, + 0xdb, 0x15, 0xe2, 0xca, 0xb6, 0xab, 0x3d, 0x80, 0x5a, 0x74, 0x9c, 0xa1, 0xb5, 0x38, 0x66, 0x2a, + 0xbf, 0xe2, 0xcc, 0x8b, 0xa2, 0xa9, 0xc7, 0x19, 0x3f, 0x2e, 0x7a, 0x5a, 0x03, 0x88, 0x3c, 0x90, + 0x3c, 0x43, 0x51, 0x56, 0x9e, 0xae, 0x70, 0x69, 0x7f, 0x93, 0x70, 0x47, 0x8a, 0xca, 0x56, 0xa4, + 0xb2, 0x95, 0x70, 0x4f, 0xe5, 0xd3, 0xb8, 0xa7, 0x5b, 0x30, 0xe5, 0x07, 0x66, 0xc0, 0x1d, 0x64, + 0x53, 0x19, 0x5c, 0xb2, 0x4b, 0xac, 0x73, 0x36, 0x74, 0x01, 0xa0, 0xef, 0x61, 0x33, 0xc0, 0x96, + 0x61, 0x72, 0xcf, 0x59, 0xd1, 0x6b, 0x82, 0xd2, 0x0d, 0xd0, 0x3d, 0x98, 0x91, 0x31, 0xcc, 0x14, + 0x53, 0x63, 0x35, 0x47, 0x60, 0x62, 0xf6, 0x75, 0xd9, 0x20, 0xde, 0xed, 0xd3, 0xe3, 0x77, 0xbb, + 0x68, 0xc7, 0x99, 0x15, 0x87, 0x35, 0x53, 0xe8, 0xb0, 0x78, 0x8b, 0x93, 0x38, 0xac, 0x6a, 0xa1, + 0xc3, 0x12, 0x32, 0xc6, 0x3a, 0xac, 0xef, 0xd3, 0xf5, 0xfc, 0x6b, 0x09, 0xda, 0xd9, 0xbd, 0x23, + 0x7c, 0xc6, 0xbb, 0x30, 0xed, 0x33, 0xca, 0x18, 0xff, 0x23, 0x9a, 0x08, 0x46, 0xf4, 0x00, 0x26, + 0x6d, 0xe7, 0x90, 0xb0, 0x1c, 0x42, 0x3d, 0xf9, 0x8b, 0xfa, 0xb8, 0xb5, 0xe5, 0x1c, 0x12, 0x3e, + 0x25, 0xac, 0x61, 0xe7, 0x2e, 0xd4, 0x22, 0xd2, 0xa9, 0x46, 0xf2, 0x10, 0x16, 0x52, 0xc6, 0xc7, + 0xa3, 0xd7, 0xc8, 0x54, 0x4b, 0x27, 0x32, 0x55, 0xed, 0xff, 0x4b, 0xea, 0xc6, 0x79, 0x68, 0x0f, + 0x03, 0xec, 0x65, 0x36, 0xce, 0x7b, 0x52, 0x28, 0xdf, 0x35, 0x17, 0x8a, 0x84, 0xf2, 0xc0, 0x52, + 0x6c, 0x82, 0x1e, 0x34, 0x99, 0xf9, 0x18, 0x3e, 0x1e, 0xb2, 0x53, 0x5a, 0xc4, 0x47, 0x3f, 0xcc, + 0x69, 0xcd, 0xfb, 0xe5, 0xb6, 0xd7, 0x13, 0xec, 0x7c, 0x9a, 0x1a, 0x43, 0x95, 0xd6, 0xf9, 0x04, + 0x50, 0x96, 0xe9, 0x54, 0x13, 0xf7, 0x19, 0x75, 0x3b, 0x34, 0x9b, 0xca, 0x39, 0x7e, 0x0e, 0x99, + 0x1a, 0x63, 0x96, 0x9f, 0xeb, 0xa9, 0x0b, 0x46, 0xed, 0xaf, 0x2b, 0x00, 0x71, 0xe5, 0x1b, 0xeb, + 0x6f, 0xee, 0x46, 0xbb, 0x9f, 0x87, 0x38, 0x17, 0x73, 0xe4, 0xe5, 0xee, 0xfb, 0x87, 0xc9, 0x7d, + 0xcf, 0x83, 0x9d, 0xb7, 0xf2, 0x5a, 0xbf, 0xb1, 0x3b, 0x7e, 0x1d, 0x16, 0xd3, 0xcb, 0x2d, 0xb6, + 0xfb, 0x75, 0x98, 0xb2, 0x03, 0x3c, 0xe2, 0xd8, 0x80, 0x9a, 0x23, 0x29, 0xbc, 0x9c, 0x43, 0xbb, + 0x04, 0xb5, 0xad, 0x91, 0x39, 0xc0, 0x3d, 0x17, 0xf7, 0x69, 0x5f, 0x36, 0x2d, 0x88, 0xfe, 0x79, + 0x41, 0x5b, 0x83, 0xea, 0x63, 0x7c, 0xcc, 0xf7, 0xe0, 0x09, 0xf5, 0xd3, 0xfe, 0xbc, 0x0c, 0x4b, + 0xcc, 0x6d, 0xaf, 0xcb, 0xcc, 0x5c, 0xc7, 0x3e, 0x09, 0xbd, 0x3e, 0xf6, 0xd9, 0x92, 0xba, 0xa1, + 0xe1, 0x62, 0xcf, 0x26, 0x96, 0x48, 0x45, 0x6b, 0x7d, 0x37, 0xdc, 0x65, 0x04, 0x9a, 0xbd, 0xd3, + 0xea, 0x6f, 0x42, 0x22, 0x6c, 0xab, 0xa2, 0x57, 0xfb, 0x6e, 0xf8, 0x63, 0x5a, 0x96, 0x6d, 0xfd, + 0x23, 0xd3, 0xc3, 0x3e, 0xb3, 0x21, 0xde, 0xb6, 0xc7, 0x08, 0xe8, 0x5d, 0x38, 0x3b, 0xc2, 0x23, + 0xe2, 0x1d, 0x1b, 0x43, 0x7b, 0x64, 0x07, 0x86, 0xed, 0x18, 0x07, 0xc7, 0x01, 0xf6, 0x85, 0xe1, + 0x20, 0x5e, 0xf9, 0x84, 0xd6, 0x6d, 0x39, 0x9f, 0xd2, 0x1a, 0xa4, 0x41, 0x83, 0x90, 0x91, 0xe1, + 0xf7, 0x89, 0x87, 0x0d, 0xd3, 0xfa, 0x8a, 0x9d, 0x5b, 0x15, 0xbd, 0x4e, 0xc8, 0xa8, 0x47, 0x69, + 0x5d, 0xeb, 0x2b, 0x74, 0x11, 0xea, 0x7d, 0x37, 0xf4, 0x71, 0x60, 0xd0, 0x1f, 0x76, 0x3e, 0xd5, + 0x74, 0xe0, 0xa4, 0x75, 0x37, 0xf4, 0x15, 0x86, 0x11, 0x9d, 0xf6, 0x19, 0x95, 0xe1, 0x29, 0x9d, + 0x66, 0x13, 0x1a, 0x89, 0xe4, 0x96, 0xa6, 0x30, 0x2c, 0x8b, 0x15, 0x29, 0x0c, 0xfd, 0xa6, 0x34, + 0x8f, 0x0c, 0xe5, 0x4c, 0xb2, 0x6f, 0x4a, 0x0b, 0x8e, 0x5d, 0x99, 0xbf, 0xb0, 0x6f, 0x3a, 0xe5, + 0x43, 0xfc, 0x4c, 0x80, 0x13, 0x35, 0x9d, 0x17, 0x34, 0x0b, 0x60, 0xdd, 0x74, 0xcd, 0x03, 0x7b, + 0x68, 0x07, 0xc7, 0xe8, 0x3a, 0xb4, 0x4c, 0xcb, 0x32, 0xfa, 0x92, 0x62, 0x63, 0x89, 0x14, 0xcd, + 0x99, 0x96, 0xb5, 0xae, 0x90, 0xd1, 0x0f, 0x60, 0xde, 0xf2, 0x88, 0x9b, 0xe4, 0xe5, 0xd0, 0x51, + 0x8b, 0x56, 0xa8, 0xcc, 0xda, 0x3f, 0x4e, 0xc2, 0x85, 0xe4, 0xc2, 0xa6, 0xe1, 0x82, 0xbb, 0x30, + 0x9b, 0xea, 0x35, 0x99, 0xa7, 0xc7, 0x4a, 0xea, 0x09, 0xc6, 0x54, 0x42, 0x5d, 0xce, 0x24, 0xd4, + 0xb9, 0x38, 0x44, 0xe5, 0xbb, 0xc0, 0x21, 0x26, 0xbf, 0x0d, 0x0e, 0x31, 0x75, 0x22, 0x1c, 0xe2, + 0x2a, 0x83, 0x05, 0x65, 0x23, 0x96, 0x0d, 0x72, 0x33, 0x6a, 0x44, 0x3c, 0x8e, 0x84, 0x0f, 0x53, + 0x78, 0xc5, 0xcc, 0x69, 0xf0, 0x8a, 0x6a, 0x21, 0x5e, 0x41, 0x2d, 0xc2, 0x75, 0x4d, 0x6f, 0x44, + 0x3c, 0x09, 0x48, 0xb4, 0x6b, 0x4c, 0x85, 0x39, 0x49, 0x17, 0x60, 0x44, 0x21, 0x74, 0x01, 0x45, + 0xd0, 0x05, 0x5a, 0x85, 0x59, 0x87, 0x18, 0x0e, 0x7e, 0x6e, 0xd0, 0x05, 0xf3, 0xdb, 0x75, 0xbe, + 0x7a, 0x0e, 0xd9, 0xc6, 0xcf, 0x77, 0x29, 0x45, 0xfb, 0xdb, 0x12, 0x2c, 0x24, 0x0d, 0x47, 0x24, + 0xab, 0xf7, 0xa1, 0xe6, 0x49, 0xdf, 0x20, 0x8c, 0x65, 0x35, 0x19, 0xfa, 0x65, 0x7d, 0x88, 0x1e, + 0x37, 0x41, 0x3f, 0x2e, 0x84, 0x3d, 0xae, 0x16, 0x88, 0x79, 0x15, 0xf0, 0xa1, 0x75, 0x61, 0x3e, + 0x62, 0x1e, 0x0b, 0x3a, 0x28, 0x20, 0x42, 0x39, 0x09, 0x22, 0x38, 0x30, 0xbd, 0x81, 0x9f, 0xd9, + 0x7d, 0xfc, 0x9d, 0x80, 0x96, 0xab, 0x50, 0x77, 0xb1, 0x37, 0xb2, 0x7d, 0x3f, 0x32, 0xfa, 0x9a, + 0xae, 0x92, 0xb4, 0xff, 0x9a, 0x82, 0xb9, 0xf4, 0xcc, 0xde, 0xc9, 0x60, 0x16, 0x9d, 0x78, 0x17, + 0xa6, 0xc7, 0xa7, 0x9c, 0xd1, 0xd7, 0xe4, 0x31, 0x50, 0x4e, 0x25, 0x28, 0xd1, 0x49, 0x21, 0x8e, + 0x06, 0x3a, 0xfe, 0x3e, 0x19, 0x8d, 0x4c, 0xc7, 0x92, 0x80, 0xb2, 0x28, 0xd2, 0xd9, 0x32, 0xbd, + 0x01, 0xdd, 0x5a, 0x94, 0xcc, 0xbe, 0xa9, 0x97, 0xa4, 0x81, 0xbe, 0xed, 0x30, 0xc8, 0x83, 0x6d, + 0x9c, 0x9a, 0x0e, 0x82, 0xb4, 0x61, 0x7b, 0xe8, 0x0a, 0x4c, 0x62, 0xe7, 0x99, 0x3c, 0x8d, 0x63, + 0xc4, 0x59, 0x1e, 0x3f, 0x3a, 0xab, 0x46, 0x57, 0x61, 0x7a, 0x44, 0x42, 0x27, 0x90, 0x21, 0x7f, + 0x33, 0x09, 0xbc, 0xea, 0xa2, 0x16, 0x5d, 0x87, 0x19, 0x8b, 0xad, 0x81, 0x8c, 0xeb, 0xe7, 0x62, + 0xd8, 0x84, 0xd1, 0x75, 0x59, 0x8f, 0x3e, 0x8a, 0xe2, 0x88, 0x5a, 0x2a, 0x12, 0x48, 0x4d, 0x6a, + 0x6e, 0x30, 0xf1, 0x38, 0x19, 0x4c, 0x00, 0x13, 0x71, 0xbd, 0x50, 0xc4, 0x78, 0xd0, 0x63, 0x19, + 0xaa, 0x43, 0x32, 0xe0, 0x76, 0x50, 0xe7, 0xd7, 0x0f, 0x43, 0x32, 0x60, 0x66, 0xb0, 0x40, 0x83, + 0x27, 0xcb, 0x76, 0xda, 0xb3, 0x6c, 0x7b, 0xf1, 0x02, 0x3d, 0x13, 0xd9, 0x87, 0x41, 0x9c, 0x3e, + 0x6e, 0x37, 0x58, 0x55, 0x8d, 0x51, 0x76, 0x9c, 0x3e, 0x3b, 0xb2, 0x83, 0xe0, 0xb8, 0xdd, 0x64, + 0x74, 0xfa, 0x49, 0x63, 0x5e, 0x9e, 0x68, 0xcd, 0xa5, 0x62, 0xde, 0xbc, 0xfd, 0xf9, 0x06, 0xa0, + 0x2a, 0xff, 0x50, 0x82, 0xc5, 0x75, 0x16, 0xf2, 0x29, 0x9e, 0xe0, 0x34, 0xa8, 0xc0, 0xed, 0x08, + 0x7e, 0x49, 0xa7, 0xf0, 0xe9, 0xc1, 0x0a, 0x3e, 0xf4, 0x09, 0x34, 0xa5, 0x4c, 0xd1, 0xb2, 0xf2, + 0x2a, 0xe0, 0xa6, 0xe1, 0xab, 0x45, 0xed, 0x23, 0x58, 0xca, 0xe8, 0x2c, 0xc2, 0xb3, 0x4b, 0x30, + 0x1b, 0x7b, 0x84, 0x48, 0xe5, 0x7a, 0x44, 0xdb, 0xb2, 0xb4, 0x7b, 0x70, 0xb6, 0x17, 0x98, 0x5e, + 0x90, 0x19, 0xf0, 0x09, 0xda, 0x32, 0xec, 0x26, 0xd9, 0x56, 0xc0, 0x2b, 0x3d, 0x58, 0xe8, 0x05, + 0xc4, 0x7d, 0x0d, 0xa1, 0x74, 0xa7, 0xd3, 0x61, 0x93, 0x30, 0x10, 0x31, 0x99, 0x2c, 0x6a, 0x4b, + 0x1c, 0x69, 0xca, 0xf6, 0xf6, 0x21, 0x2c, 0x72, 0xa0, 0xe7, 0x75, 0x06, 0xb1, 0x2c, 0x61, 0xa6, + 0xac, 0xdc, 0x0d, 0x38, 0x13, 0xbb, 0xf2, 0x38, 0x3d, 0xbc, 0x99, 0x4c, 0x0f, 0x97, 0xb2, 0x6b, + 0x9c, 0xc8, 0x0e, 0xff, 0xa2, 0xac, 0x38, 0xcc, 0x82, 0xe4, 0x70, 0x2d, 0x99, 0x1c, 0x9e, 0x2f, + 0x10, 0x99, 0xc8, 0x0d, 0xb3, 0x16, 0x59, 0xc9, 0xb1, 0x48, 0x3d, 0x93, 0x41, 0x4e, 0xa6, 0xf2, + 0xec, 0x94, 0x6e, 0xbf, 0x95, 0x04, 0x72, 0x8b, 0x27, 0x90, 0x51, 0xd7, 0x11, 0xf8, 0x76, 0x3b, + 0x95, 0x40, 0xb6, 0x8b, 0xd4, 0x8c, 0xf2, 0xc7, 0x3f, 0x9d, 0x84, 0x5a, 0x54, 0x97, 0x99, 0xd8, + 0xec, 0x24, 0x95, 0x73, 0x26, 0x49, 0x3d, 0xbf, 0x2a, 0xaf, 0x73, 0x7e, 0x4d, 0xbe, 0xea, 0xfc, + 0x3a, 0x07, 0x35, 0xf6, 0x61, 0x78, 0xf8, 0x50, 0x9c, 0x47, 0x55, 0x46, 0xd0, 0xf1, 0x61, 0x6c, + 0x50, 0xd3, 0x27, 0x31, 0xa8, 0x54, 0xa6, 0x3a, 0x93, 0xce, 0x54, 0xef, 0x44, 0x27, 0x0c, 0x3f, + 0x8b, 0x56, 0xb2, 0xe2, 0x72, 0xcf, 0x96, 0xcd, 0xe4, 0xd9, 0xc2, 0x8f, 0xa7, 0xcb, 0x39, 0x8d, + 0xdf, 0xd8, 0x3c, 0xf5, 0x09, 0xcf, 0x53, 0x55, 0xab, 0x12, 0x8e, 0x70, 0x0d, 0x20, 0xda, 0xf3, + 0x32, 0x59, 0x45, 0xd9, 0xa1, 0xe9, 0x0a, 0x97, 0xb6, 0x0f, 0x8b, 0x89, 0xf9, 0x8f, 0x11, 0xe2, + 0x93, 0x79, 0xb1, 0x02, 0x78, 0xf8, 0x5f, 0xd4, 0xf8, 0xa9, 0x00, 0x64, 0xbd, 0x93, 0x01, 0x3d, + 0x4e, 0x66, 0x8f, 0x37, 0x93, 0x98, 0xc7, 0xe9, 0x0c, 0x29, 0x03, 0x79, 0xb0, 0xe3, 0xde, 0xf4, + 0x44, 0x35, 0xcf, 0x56, 0x6b, 0x82, 0xd2, 0x0d, 0x68, 0x90, 0x75, 0x68, 0x3b, 0xb6, 0x7f, 0xc4, + 0xeb, 0xa7, 0x59, 0x3d, 0x48, 0x52, 0x97, 0x5d, 0xc2, 0xe3, 0x17, 0x76, 0x60, 0xf4, 0x89, 0x85, + 0x99, 0x99, 0x4e, 0xe9, 0x55, 0x4a, 0x58, 0x27, 0x16, 0x8e, 0xb7, 0x4e, 0xf5, 0x54, 0x5b, 0xa7, + 0x96, 0xda, 0x3a, 0x8b, 0x30, 0xed, 0x61, 0xd3, 0x27, 0x8e, 0x48, 0x19, 0x44, 0x89, 0xce, 0xff, + 0x08, 0xfb, 0x3e, 0xed, 0x40, 0x84, 0x36, 0xa2, 0xa8, 0x04, 0x60, 0xb3, 0x45, 0x01, 0xd8, 0x18, + 0x14, 0x37, 0x15, 0x80, 0x35, 0x8a, 0x02, 0xb0, 0x93, 0x80, 0xb8, 0x4a, 0x78, 0xd9, 0x1c, 0x1b, + 0x5e, 0xaa, 0x81, 0xda, 0x5c, 0x22, 0x50, 0xfb, 0x3e, 0x77, 0xdb, 0x3f, 0x97, 0x60, 0x29, 0xb3, + 0x41, 0xc4, 0x7e, 0xbb, 0x9d, 0x82, 0x81, 0xdb, 0x45, 0x33, 0x14, 0xa1, 0xc0, 0xf7, 0x13, 0x28, + 0xf0, 0x8d, 0x42, 0xfe, 0xef, 0x1c, 0x04, 0xfe, 0x43, 0xb8, 0xb8, 0xef, 0x5a, 0xa9, 0xf0, 0x49, + 0x24, 0x7f, 0x27, 0xdf, 0xef, 0x77, 0x64, 0xa4, 0x5b, 0x3e, 0x61, 0x5e, 0xc9, 0xd9, 0x35, 0x0d, + 0x56, 0x8b, 0x7b, 0x17, 0x61, 0xc8, 0xef, 0xc3, 0xdc, 0xe6, 0x0b, 0xdc, 0xef, 0x1d, 0x3b, 0xfd, + 0x53, 0x68, 0xd4, 0x82, 0x4a, 0x7f, 0x64, 0x09, 0x78, 0x85, 0x7e, 0xaa, 0x91, 0x55, 0x25, 0x19, + 0x59, 0x19, 0xd0, 0x8a, 0x7b, 0x10, 0x4b, 0xb8, 0x48, 0x97, 0xd0, 0xa2, 0xcc, 0x54, 0xf8, 0xac, + 0x2e, 0x4a, 0x82, 0x8e, 0x3d, 0x8f, 0x0d, 0x95, 0xd3, 0xb1, 0xe7, 0x25, 0x77, 0x7b, 0x25, 0xb9, + 0xdb, 0xb5, 0xbf, 0x2a, 0x41, 0x9d, 0xf6, 0xf0, 0xad, 0xf4, 0x17, 0xf9, 0x45, 0x25, 0xce, 0x2f, + 0xa2, 0x34, 0x65, 0x52, 0x4d, 0x53, 0x62, 0xcd, 0xa7, 0x18, 0x39, 0xab, 0xf9, 0x74, 0x44, 0xc7, + 0x9e, 0xa7, 0xad, 0xc2, 0x2c, 0xd7, 0x4d, 0x8c, 0xbc, 0x05, 0x95, 0xd0, 0x1b, 0x4a, 0xeb, 0x09, + 0xbd, 0xa1, 0xf6, 0x67, 0x25, 0x68, 0x74, 0x83, 0xc0, 0xec, 0x1f, 0x9d, 0x62, 0x00, 0x91, 0x72, + 0x65, 0x55, 0xb9, 0xec, 0x20, 0x62, 0x75, 0x27, 0x0b, 0xd4, 0x9d, 0x4a, 0xa8, 0xab, 0x41, 0x53, + 0xea, 0x52, 0xa8, 0xf0, 0x36, 0xa0, 0x5d, 0xe2, 0x05, 0x0f, 0x89, 0xf7, 0xdc, 0xf4, 0xac, 0xd3, + 0xe5, 0x30, 0x08, 0x26, 0xc5, 0xc3, 0xaa, 0xca, 0xb5, 0x29, 0x9d, 0x7d, 0x6b, 0x6f, 0xc3, 0x99, + 0x84, 0xbc, 0xc2, 0x8e, 0xef, 0x42, 0x9d, 0x39, 0x70, 0x11, 0xe7, 0x5e, 0x53, 0x71, 0xde, 0x71, + 0x5e, 0x5e, 0xeb, 0xc2, 0x3c, 0x3d, 0xbb, 0x19, 0x3d, 0xda, 0x78, 0x3f, 0x4c, 0x45, 0x83, 0x0b, + 0xc9, 0xf6, 0xa9, 0x48, 0xf0, 0xef, 0x4a, 0x30, 0xc5, 0xe8, 0x99, 0xf3, 0xf4, 0x1c, 0xd4, 0x3c, + 0xec, 0x12, 0x23, 0x30, 0x07, 0xd1, 0x5b, 0x35, 0x4a, 0xd8, 0x33, 0x07, 0x3e, 0x7b, 0x6a, 0x47, + 0x2b, 0x2d, 0x7b, 0x80, 0xfd, 0x40, 0x3e, 0x58, 0xab, 0x53, 0xda, 0x06, 0x27, 0xd1, 0x29, 0xf1, + 0xed, 0x3f, 0xe0, 0x61, 0xde, 0xa4, 0xce, 0xbe, 0xd1, 0x15, 0xfe, 0x0c, 0x64, 0x0c, 0x28, 0xc7, + 0xde, 0x86, 0x74, 0xa0, 0x9a, 0xc2, 0xe1, 0xa2, 0xb2, 0xf6, 0x11, 0x20, 0x75, 0xcc, 0x62, 0x52, + 0xaf, 0xc2, 0x34, 0x9b, 0x12, 0x19, 0xa7, 0x34, 0x93, 0x83, 0xd6, 0x45, 0xad, 0xf6, 0x05, 0x20, + 0x3e, 0x8b, 0x89, 0xd8, 0xe4, 0xc4, 0x33, 0x3e, 0x26, 0x44, 0xf9, 0xfb, 0x12, 0x9c, 0x49, 0x88, + 0x8e, 0x1e, 0x04, 0x24, 0x64, 0xa7, 0x15, 0x13, 0x72, 0xef, 0x25, 0x3c, 0xf9, 0xd5, 0x94, 0x02, + 0xbf, 0x21, 0x2f, 0xfe, 0xcb, 0x12, 0x40, 0x37, 0x0c, 0x8e, 0x04, 0x20, 0xa5, 0xce, 0x7a, 0x29, + 0x39, 0xeb, 0xb4, 0xce, 0x35, 0x7d, 0xff, 0x39, 0xf1, 0x64, 0x32, 0x10, 0x95, 0x19, 0x98, 0x14, + 0x06, 0x47, 0x12, 0x04, 0xa7, 0xdf, 0xe8, 0x0a, 0x34, 0xf9, 0x93, 0x47, 0xc3, 0xb4, 0x2c, 0x0f, + 0xfb, 0xbe, 0x40, 0xc3, 0x1b, 0x9c, 0xda, 0xe5, 0x44, 0xca, 0x66, 0x5b, 0xd8, 0x09, 0xec, 0xe0, + 0xd8, 0x08, 0xc8, 0xd7, 0xd8, 0x11, 0x61, 0x7e, 0x43, 0x52, 0xf7, 0x28, 0x91, 0xb2, 0x79, 0x78, + 0x60, 0xfb, 0x81, 0x27, 0xd9, 0x24, 0x3a, 0x2b, 0xa8, 0x8c, 0x4d, 0xfb, 0x79, 0x09, 0x5a, 0xbb, + 0xe1, 0x70, 0xc8, 0x67, 0xf6, 0xd4, 0x6b, 0xfb, 0xb6, 0x18, 0x47, 0x39, 0x65, 0x9d, 0xf1, 0x14, + 0x89, 0xc1, 0x7d, 0x7b, 0xf8, 0xe1, 0x36, 0xcc, 0x2b, 0x8a, 0x0a, 0x4b, 0x49, 0xc4, 0x6c, 0xa5, + 0x64, 0xcc, 0xa6, 0xdd, 0x07, 0xc4, 0x33, 0xee, 0xd7, 0x1b, 0x9c, 0x76, 0x16, 0xce, 0x24, 0xda, + 0x8b, 0x63, 0xf2, 0x06, 0x34, 0xc4, 0x7d, 0xbf, 0x30, 0x82, 0x65, 0xa8, 0x52, 0x77, 0xd7, 0xb7, + 0x2d, 0x79, 0xfb, 0x31, 0xe3, 0x12, 0x6b, 0xdd, 0xb6, 0x3c, 0x6d, 0x1b, 0x1a, 0x3a, 0x17, 0x2f, + 0x78, 0x3f, 0x86, 0xa6, 0x78, 0x1d, 0x60, 0x24, 0xde, 0xcf, 0xc4, 0x50, 0x7d, 0x42, 0xb6, 0xde, + 0x70, 0xd4, 0xa2, 0xf6, 0x53, 0xe8, 0xf0, 0x63, 0x3c, 0x21, 0x55, 0x0e, 0xed, 0x63, 0x90, 0x4f, + 0x72, 0x8b, 0x84, 0x27, 0x9b, 0x35, 0x3c, 0xb5, 0xa8, 0x5d, 0x80, 0x73, 0xb9, 0xc2, 0xc5, 0xb8, + 0x5d, 0x68, 0xc5, 0x15, 0x96, 0x2d, 0x2f, 0x7d, 0xd8, 0x65, 0x4e, 0x49, 0xb9, 0xcc, 0x59, 0x8c, + 0x62, 0xb2, 0xb2, 0x3c, 0x4f, 0x58, 0xe4, 0x15, 0x87, 0xd0, 0x95, 0xa2, 0x10, 0x7a, 0x32, 0x11, + 0x42, 0x6b, 0x9f, 0x45, 0xb3, 0x27, 0xf2, 0x97, 0x0f, 0x58, 0x7a, 0xc5, 0xfb, 0x96, 0x6e, 0x6b, + 0x39, 0x67, 0x70, 0x9c, 0x43, 0x57, 0x98, 0xb5, 0xeb, 0xd0, 0x48, 0x3a, 0x30, 0xc5, 0x2d, 0x95, + 0x32, 0x6e, 0xa9, 0x99, 0xf2, 0x48, 0xb7, 0x52, 0x71, 0x66, 0x66, 0x46, 0x53, 0x51, 0xe6, 0xfb, + 0x09, 0xdf, 0x74, 0x29, 0xbe, 0x87, 0xf9, 0x0d, 0xb9, 0xa5, 0x05, 0xe1, 0xa3, 0x1f, 0xfa, 0xb4, + 0xbd, 0x18, 0xa2, 0x76, 0x19, 0xea, 0xfb, 0x45, 0x8f, 0x65, 0x27, 0xe5, 0xc5, 0xe6, 0xdb, 0x30, + 0xdf, 0x0b, 0x88, 0x67, 0x0e, 0xf0, 0x16, 0x73, 0x20, 0x87, 0x36, 0xbf, 0xb8, 0x0b, 0xc3, 0xe8, + 0x68, 0x63, 0xdf, 0xda, 0x7f, 0x94, 0x60, 0xee, 0xa1, 0x3d, 0xc4, 0xfe, 0xb1, 0x1f, 0xe0, 0xd1, + 0x3e, 0x4b, 0x72, 0xce, 0x43, 0x8d, 0x8e, 0xcb, 0x0f, 0xcc, 0x91, 0x2b, 0x2f, 0x3e, 0x23, 0x02, + 0x5d, 0x2e, 0x9f, 0x8b, 0x96, 0x80, 0x88, 0x9a, 0x60, 0x66, 0x7a, 0xa5, 0x49, 0x9f, 0x20, 0xa1, + 0xf7, 0x00, 0x42, 0x1f, 0x5b, 0xe2, 0xb2, 0xb3, 0x92, 0x3a, 0x95, 0xf7, 0xd5, 0x2b, 0x29, 0xca, + 0xc7, 0x6f, 0x3e, 0xdf, 0x87, 0xba, 0xed, 0x10, 0x0b, 0xb3, 0x2b, 0x29, 0x4b, 0x80, 0x25, 0xf9, + 0xad, 0x80, 0x33, 0xee, 0xfb, 0xd8, 0xd2, 0x7e, 0x4f, 0x9c, 0x42, 0x72, 0xf2, 0xc4, 0x9a, 0x6f, + 0xc2, 0x3c, 0xf7, 0x2d, 0x87, 0xd1, 0xa0, 0xa5, 0xcd, 0xc5, 0x69, 0x46, 0x6a, 0x42, 0xf4, 0x96, + 0x2d, 0x02, 0x06, 0xd9, 0x42, 0xbb, 0x07, 0x67, 0x13, 0xb9, 0xc5, 0x29, 0xa2, 0x7d, 0xed, 0x51, + 0x0a, 0x1a, 0x88, 0x0d, 0x52, 0x64, 0xe0, 0xd2, 0x1e, 0x0b, 0x32, 0x70, 0x9f, 0x67, 0xe0, 0xbe, + 0xa6, 0xc3, 0x72, 0x02, 0xb1, 0x48, 0x28, 0xf2, 0x7e, 0x2a, 0xfa, 0xb9, 0x50, 0x20, 0x2c, 0x15, + 0x06, 0xfd, 0x6f, 0x09, 0x16, 0xf2, 0x18, 0x5e, 0x13, 0x1b, 0xfb, 0x49, 0xc1, 0x13, 0x94, 0xdb, + 0x63, 0xb5, 0xf9, 0xad, 0xa0, 0x88, 0x8f, 0xa1, 0x93, 0x37, 0x7b, 0xd9, 0xa5, 0xa8, 0x9c, 0x60, + 0x29, 0xfe, 0xaf, 0xac, 0xa0, 0xbd, 0xdd, 0x20, 0xf0, 0xec, 0x83, 0x90, 0x1a, 0xef, 0x77, 0x85, + 0xcd, 0x7c, 0x12, 0xe1, 0x0e, 0x7c, 0xfe, 0xae, 0x65, 0x5b, 0xc5, 0xbd, 0xe6, 0x62, 0x0f, 0x3b, + 0x49, 0xec, 0x81, 0xe3, 0xb8, 0x37, 0xc7, 0x8a, 0x79, 0x63, 0xa1, 0xba, 0x97, 0x25, 0x68, 0x26, + 0xd7, 0x01, 0x7d, 0x04, 0x60, 0x46, 0x9a, 0x0b, 0x93, 0x3f, 0x3f, 0x6e, 0x74, 0xba, 0xc2, 0x8f, + 0x2e, 0x43, 0xa5, 0xef, 0x86, 0x62, 0x45, 0xe2, 0x0b, 0xbd, 0x75, 0x37, 0xe4, 0x0e, 0x80, 0xd6, + 0xd2, 0x7c, 0x82, 0x3f, 0xcc, 0xc8, 0x78, 0xae, 0xa7, 0x8c, 0xcc, 0x59, 0x05, 0x0f, 0x7a, 0x00, + 0xcd, 0xe7, 0x9e, 0x1d, 0x98, 0x07, 0x43, 0x6c, 0x0c, 0xcd, 0x63, 0xec, 0x09, 0xcf, 0x55, 0xec, + 0x65, 0x1a, 0x92, 0xff, 0x09, 0x65, 0xd7, 0x42, 0xa8, 0xca, 0xfe, 0x5f, 0xe1, 0x91, 0x1f, 0xc3, + 0x52, 0x48, 0xd9, 0x0c, 0xf6, 0x38, 0xc4, 0x31, 0x1d, 0x62, 0xf8, 0x98, 0x9e, 0x92, 0xf2, 0x2d, + 0x68, 0xbe, 0xb7, 0x5c, 0x60, 0x8d, 0xd6, 0x89, 0x87, 0xb7, 0x4d, 0x87, 0xf4, 0x78, 0x0b, 0x6d, + 0x04, 0x75, 0x65, 0x38, 0xaf, 0xe8, 0xf9, 0x13, 0x98, 0x97, 0x57, 0xa5, 0x3e, 0x0e, 0x84, 0x5f, + 0x1f, 0xd7, 0xe7, 0x9c, 0x60, 0xef, 0xe1, 0x80, 0x79, 0xf7, 0x1b, 0xe7, 0xa1, 0x2a, 0xff, 0xa8, + 0x83, 0x66, 0xa0, 0xb2, 0xb7, 0xbe, 0xdb, 0x9a, 0xa0, 0x1f, 0xfb, 0x1b, 0xbb, 0xad, 0xd2, 0x8d, + 0x11, 0xb4, 0xd2, 0x7f, 0x52, 0x41, 0x4b, 0x70, 0x66, 0x57, 0xdf, 0xd9, 0xed, 0x3e, 0xea, 0xee, + 0x6d, 0xed, 0x6c, 0x1b, 0xbb, 0xfa, 0xd6, 0xe7, 0xdd, 0xbd, 0xcd, 0xd6, 0x04, 0xba, 0x04, 0x17, + 0xd4, 0x8a, 0xdf, 0xdd, 0xe9, 0xed, 0x19, 0x7b, 0x3b, 0xc6, 0xfa, 0xce, 0xf6, 0x5e, 0x77, 0x6b, + 0x7b, 0x53, 0x6f, 0x95, 0xd0, 0x05, 0x58, 0x56, 0x59, 0x3e, 0xdd, 0xda, 0xd8, 0xd2, 0x37, 0xd7, + 0xe9, 0x77, 0xf7, 0x49, 0xab, 0x7c, 0xe3, 0x1e, 0xcc, 0xa5, 0xde, 0x77, 0xa1, 0x79, 0x68, 0xf4, + 0xba, 0xdb, 0x1b, 0x9f, 0xee, 0x7c, 0x61, 0xe8, 0x9b, 0xdd, 0x8d, 0x2f, 0x5b, 0x13, 0x68, 0x01, + 0x5a, 0x92, 0xb4, 0xbd, 0xb3, 0xc7, 0xa9, 0xa5, 0x1b, 0x5f, 0xa7, 0x4c, 0x12, 0xa3, 0xb3, 0x30, + 0x1f, 0xf5, 0x6d, 0xac, 0xeb, 0x9b, 0xdd, 0xbd, 0xcd, 0x8d, 0xd6, 0x44, 0x92, 0xac, 0xef, 0x6f, + 0x6f, 0x6f, 0x6d, 0x3f, 0x6a, 0x95, 0xa8, 0xd4, 0x98, 0xbc, 0xf9, 0xc5, 0x16, 0x65, 0x2e, 0x27, + 0x99, 0xf7, 0xb7, 0x1f, 0x6f, 0xef, 0xfc, 0x64, 0xbb, 0x55, 0x59, 0xfb, 0xf7, 0x06, 0x34, 0x65, + 0x8c, 0x82, 0x3d, 0x76, 0x7f, 0x7f, 0x1f, 0x66, 0xe4, 0x5f, 0xb6, 0x62, 0x67, 0x95, 0xfc, 0x7f, + 0x59, 0xa7, 0x9d, 0xad, 0x10, 0x61, 0xe0, 0x04, 0xda, 0x65, 0x61, 0x99, 0xf2, 0x96, 0xee, 0x82, + 0x1a, 0x0d, 0x65, 0x1e, 0xeb, 0x75, 0x56, 0x8a, 0xaa, 0x23, 0x89, 0x3d, 0x1a, 0x70, 0xa9, 0x8f, + 0xb3, 0xd1, 0x8a, 0x1a, 0x26, 0x64, 0x1f, 0x7d, 0x77, 0x2e, 0x16, 0xd6, 0x47, 0x42, 0xbf, 0x84, + 0x56, 0xfa, 0x59, 0x36, 0x8a, 0xf1, 0xb2, 0x82, 0x27, 0xdf, 0x9d, 0x4b, 0x63, 0x38, 0x54, 0xd1, + 0x99, 0x07, 0xcc, 0xab, 0x63, 0x1e, 0x94, 0xa6, 0x45, 0x17, 0x3d, 0x39, 0xe5, 0x53, 0x91, 0x7c, + 0x03, 0x87, 0xd4, 0x67, 0xc3, 0x39, 0x6f, 0x21, 0x95, 0xa9, 0xc8, 0x7f, 0x3c, 0xa7, 0x4d, 0xa0, + 0xcf, 0x61, 0x2e, 0x75, 0x75, 0x8b, 0xe2, 0x56, 0xf9, 0x17, 0xd1, 0x9d, 0xd5, 0x62, 0x86, 0xe4, + 0xba, 0xa9, 0x17, 0xb3, 0x89, 0x75, 0xcb, 0xb9, 0xed, 0x4d, 0xac, 0x5b, 0xee, 0x8d, 0x2e, 0x33, + 0xaf, 0xc4, 0xf5, 0xab, 0x62, 0x5e, 0x79, 0x77, 0xbd, 0x9d, 0x95, 0xa2, 0x6a, 0x75, 0xf8, 0xa9, + 0xab, 0x57, 0x65, 0xf8, 0xf9, 0x37, 0xba, 0x9d, 0xd5, 0x62, 0x86, 0xf4, 0x5a, 0xc5, 0xf7, 0x40, + 0xa9, 0xb5, 0xca, 0x5c, 0x3b, 0xa6, 0xd6, 0x2a, 0x7b, 0x81, 0x24, 0xd6, 0x2a, 0x75, 0x6d, 0x73, + 0xb1, 0x18, 0xa5, 0xce, 0xac, 0x55, 0x3e, 0x8c, 0xad, 0x4d, 0xa0, 0x6f, 0xa0, 0x5d, 0x84, 0x00, + 0xa3, 0x38, 0x46, 0x78, 0x05, 0x44, 0xdd, 0xb9, 0x7e, 0x02, 0xce, 0xa8, 0xcb, 0x2e, 0x54, 0x25, + 0xdc, 0x8b, 0x62, 0x87, 0x92, 0xc2, 0x98, 0x3b, 0xcb, 0x39, 0x35, 0x91, 0x88, 0xf7, 0x61, 0x92, + 0x52, 0xd1, 0x42, 0x82, 0x49, 0x36, 0x3d, 0x9b, 0xa2, 0x46, 0xcd, 0x3e, 0x84, 0x69, 0x8e, 0x5d, + 0xa2, 0x38, 0x53, 0x4b, 0x00, 0xab, 0x9d, 0xa5, 0x0c, 0x3d, 0x6a, 0xfc, 0x19, 0xff, 0xe7, 0xa8, + 0x00, 0x21, 0xd1, 0xb9, 0xc4, 0x9f, 0x8c, 0x92, 0x50, 0x67, 0xe7, 0x7c, 0x7e, 0xa5, 0x6a, 0x22, + 0xa9, 0xf0, 0x63, 0xa5, 0x28, 0x3e, 0xcc, 0x98, 0x48, 0x7e, 0xbc, 0xa9, 0x4d, 0x20, 0x83, 0xe3, + 0x79, 0x29, 0xc1, 0x5a, 0xbe, 0x6d, 0x25, 0x84, 0x5f, 0x1e, 0xcb, 0x13, 0x75, 0x70, 0x00, 0x67, + 0x72, 0x90, 0x00, 0x74, 0x39, 0xb5, 0xf8, 0x79, 0x20, 0x44, 0xe7, 0xad, 0xf1, 0x4c, 0xea, 0x12, + 0x09, 0xf3, 0x5e, 0xcc, 0xa4, 0xc7, 0xe9, 0x25, 0x4a, 0x1b, 0xf3, 0xda, 0x1f, 0x57, 0x60, 0x96, + 0xe3, 0x35, 0xe2, 0x4c, 0x7b, 0x04, 0x10, 0x43, 0x9c, 0xa8, 0x93, 0x18, 0x66, 0x02, 0xeb, 0xed, + 0x9c, 0xcb, 0xad, 0x53, 0x17, 0x5f, 0x01, 0x10, 0x95, 0xc5, 0xcf, 0x62, 0xa0, 0xca, 0xe2, 0xe7, + 0x60, 0x8e, 0xda, 0x04, 0xda, 0x80, 0x5a, 0x04, 0x59, 0x21, 0x05, 0xe9, 0x4a, 0xe1, 0x6d, 0x9d, + 0x4e, 0x5e, 0x95, 0xaa, 0x91, 0x02, 0x43, 0x29, 0x1a, 0x65, 0xc1, 0x2d, 0x45, 0xa3, 0x3c, 0xe4, + 0x2a, 0x1e, 0x1d, 0x4f, 0x75, 0xd3, 0xa3, 0x4b, 0xa0, 0x07, 0xe9, 0xd1, 0x25, 0xb3, 0x63, 0x6d, + 0xe2, 0xd3, 0xf3, 0xbf, 0xf8, 0xd5, 0x4a, 0xe9, 0x3f, 0x7f, 0xb5, 0x32, 0xf1, 0x47, 0x2f, 0x57, + 0x4a, 0xbf, 0x78, 0xb9, 0x52, 0xfa, 0xb7, 0x97, 0x2b, 0xa5, 0xff, 0x7e, 0xb9, 0x52, 0xfa, 0xd9, + 0xff, 0xac, 0x4c, 0x1c, 0x4c, 0xb3, 0xbf, 0x52, 0xbf, 0xf7, 0xeb, 0x00, 0x00, 0x00, 0xff, 0xff, + 0xa6, 0x04, 0x77, 0x58, 0xfe, 0x3e, 0x00, 0x00, } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/api.proto b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/api.proto index 3e9fccd0aad0..93428edc8afb 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/api.proto +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/api.proto @@ -331,6 +331,8 @@ message RemovePodSandboxResponse {} message PodSandboxStatusRequest { // ID of the PodSandbox for which to retrieve status. string pod_sandbox_id = 1; + // Verbose indicates whether to return extra information about the pod sandbox. + bool verbose = 2; } // PodSandboxNetworkStatus is the status of the network for a PodSandbox. @@ -382,6 +384,11 @@ message PodSandboxStatus { message PodSandboxStatusResponse { // Status of the PodSandbox. PodSandboxStatus status = 1; + // Info is extra information of the PodSandbox. The key could be abitrary string, and + // value should be in json format. The information could include anything useful for + // debug, e.g. network namespace for linux container based container runtime. + // It should only be returned non-empty when Verbose is true. + map info = 2; } // PodSandboxStateValue is the wrapper of PodSandboxState. @@ -523,6 +530,7 @@ message LinuxContainerSecurityContext { repeated int64 supplemental_groups = 8; // AppArmor profile for the container, candidate values are: // * runtime/default: equivalent to not specifying a profile. + // * unconfined: no profiles are loaded // * localhost/: profile loaded on the node // (localhost) by name. The possible profile names are detailed at // http://wiki.apparmor.net/index.php/AppArmor_Core_Policy_Reference @@ -746,6 +754,8 @@ message ListContainersResponse { message ContainerStatusRequest { // ID of the container for which to retrieve status. string container_id = 1; + // Verbose indicates whether to return extra information about the container. + bool verbose = 2; } // ContainerStatus represents the status of a container. @@ -790,6 +800,11 @@ message ContainerStatus { message ContainerStatusResponse { // Status of the container. ContainerStatus status = 1; + // Info is extra information of the Container. The key could be abitrary string, and + // value should be in json format. The information could include anything useful for + // debug, e.g. pid for linux container based container runtime. + // It should only be returned non-empty when Verbose is true. + map info = 2; } message UpdateContainerResourcesRequest { @@ -827,7 +842,17 @@ message ExecRequest { // Whether to exec the command in a TTY. bool tty = 3; // Whether to stream stdin. + // One of `stdin`, `stdout`, and `stderr` MUST be true. bool stdin = 4; + // Whether to stream stdout. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + bool stdout = 5; + // Whether to stream stderr. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + // If `tty` is true, `stderr` MUST be false. Multiplexing is not supported + // in this case. The output of stdout and stderr will be combined to a + // single stream. + bool stderr = 6; } message ExecResponse { @@ -839,10 +864,20 @@ message AttachRequest { // ID of the container to which to attach. string container_id = 1; // Whether to stream stdin. + // One of `stdin`, `stdout`, and `stderr` MUST be true. bool stdin = 2; // Whether the process being attached is running in a TTY. // This must match the TTY setting in the ContainerConfig. bool tty = 3; + // Whether to stream stdout. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + bool stdout = 4; + // Whether to stream stderr. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + // If `tty` is true, `stderr` MUST be false. Multiplexing is not supported + // in this case. The output of stdout and stderr will be combined to a + // single stream. + bool stderr = 5; } message AttachResponse { @@ -899,11 +934,18 @@ message ListImagesResponse { message ImageStatusRequest { // Spec of the image. ImageSpec image = 1; + // Verbose indicates whether to return extra information about the image. + bool verbose = 2; } message ImageStatusResponse { // Status of the image. Image image = 1; + // Info is extra information of the Image. The key could be abitrary string, and + // value should be in json format. The information could include anything useful + // for debug, e.g. image config for oci image based container runtime. + // It should only be returned non-empty when Verbose is true. + map info = 2; } // AuthConfig contains authorization information for connecting to a registry. @@ -986,11 +1028,19 @@ message RuntimeStatus { repeated RuntimeCondition conditions = 1; } -message StatusRequest {} +message StatusRequest { + // Verbose indicates whether to return extra information about the runtime. + bool verbose = 1; +} message StatusResponse { // Status of the Runtime. RuntimeStatus status = 1; + // Info is extra information of the Runtime. The key could be abitrary string, and + // value should be in json format. The information could include anything useful for + // debug, e.g. plugins used by the container runtime. + // It should only be returned non-empty when Verbose is true. + map info = 2; } message ImageFsInfoRequest {} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/constants.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/constants.go index 27c42c5c5165..04cac264d149 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/constants.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/constants.go @@ -25,3 +25,31 @@ const ( // NetworkReady means the runtime network is up and ready to accept containers which require network. NetworkReady = "NetworkReady" ) + +// LogStreamType is the type of the stream in CRI container log. +type LogStreamType string + +const ( + // Stdout is the stream type for stdout. + Stdout LogStreamType = "stdout" + // Stderr is the stream type for stderr. + Stderr LogStreamType = "stderr" +) + +// LogTag is the tag of a log line in CRI container log. +// Currently defined log tags: +// * First tag: Partial/End - P/E. +// The field in the container log format can be extended to include multiple +// tags by using a delimiter, but changes should be rare. If it becomes clear +// that better extensibility is desired, a more extensible format (e.g., json) +// should be adopted as a replacement and/or addition. +type LogTag string + +const ( + // LogTagPartial means the line is part of multiple lines. + LogTagPartial LogTag = "P" + // LogTagFull means the line is a single full line or the end of multiple lines. + LogTagFull LogTag = "F" + // LogTagDelimiter is the delimiter for different log tags. + LogTagDelimiter = ":" +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha/BUILD new file mode 100644 index 000000000000..b60d55569c95 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha/BUILD @@ -0,0 +1,41 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "api.pb.go", + "constants.go", + ], + importpath = "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha", + deps = [ + "//vendor/github.com/gogo/protobuf/gogoproto:go_default_library", + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +filegroup( + name = "go_default_library_protos", + srcs = ["api.proto"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1/api.pb.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha/api.pb.go similarity index 85% rename from vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1/api.pb.go rename to vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha/api.pb.go index 2d73929bdf1a..6a939acde525 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1/api.pb.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha/api.pb.go @@ -31,7 +31,6 @@ limitations under the License. Device AllocateRequest AllocateResponse - DeviceRuntimeSpec Mount DeviceSpec */ @@ -177,64 +176,42 @@ func (m *AllocateRequest) GetDevicesIDs() []string { return nil } +// AllocateResponse includes the artifacts that needs to be injected into +// a container for accessing 'deviceIDs' that were mentioned as part of +// 'AllocateRequest'. // Failure Handling: // if Kubelet sends an allocation request for dev1 and dev2. // Allocation on dev1 succeeds but allocation on dev2 fails. // The Device plugin should send a ListAndWatch update and fail the // Allocation request type AllocateResponse struct { - Spec []*DeviceRuntimeSpec `protobuf:"bytes,1,rep,name=spec" json:"spec,omitempty"` + // List of environment variable to be set in the container to access one of more devices. + Envs map[string]string `protobuf:"bytes,1,rep,name=envs" json:"envs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Mounts for the container. + Mounts []*Mount `protobuf:"bytes,2,rep,name=mounts" json:"mounts,omitempty"` + // Devices for the container. + Devices []*DeviceSpec `protobuf:"bytes,3,rep,name=devices" json:"devices,omitempty"` } func (m *AllocateResponse) Reset() { *m = AllocateResponse{} } func (*AllocateResponse) ProtoMessage() {} func (*AllocateResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{5} } -func (m *AllocateResponse) GetSpec() []*DeviceRuntimeSpec { - if m != nil { - return m.Spec - } - return nil -} - -// The list to be added to the CRI spec -type DeviceRuntimeSpec struct { - // ID of the Device - ID string `protobuf:"bytes,1,opt,name=ID,json=iD,proto3" json:"ID,omitempty"` - // List of environment variable to set in the container. - Envs map[string]string `protobuf:"bytes,2,rep,name=envs" json:"envs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Mounts for the container. - Mounts []*Mount `protobuf:"bytes,3,rep,name=mounts" json:"mounts,omitempty"` - // Devices for the container - Devices []*DeviceSpec `protobuf:"bytes,4,rep,name=devices" json:"devices,omitempty"` -} - -func (m *DeviceRuntimeSpec) Reset() { *m = DeviceRuntimeSpec{} } -func (*DeviceRuntimeSpec) ProtoMessage() {} -func (*DeviceRuntimeSpec) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{6} } - -func (m *DeviceRuntimeSpec) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *DeviceRuntimeSpec) GetEnvs() map[string]string { +func (m *AllocateResponse) GetEnvs() map[string]string { if m != nil { return m.Envs } return nil } -func (m *DeviceRuntimeSpec) GetMounts() []*Mount { +func (m *AllocateResponse) GetMounts() []*Mount { if m != nil { return m.Mounts } return nil } -func (m *DeviceRuntimeSpec) GetDevices() []*DeviceSpec { +func (m *AllocateResponse) GetDevices() []*DeviceSpec { if m != nil { return m.Devices } @@ -254,7 +231,7 @@ type Mount struct { func (m *Mount) Reset() { *m = Mount{} } func (*Mount) ProtoMessage() {} -func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{7} } +func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{6} } func (m *Mount) GetContainerPath() string { if m != nil { @@ -292,7 +269,7 @@ type DeviceSpec struct { func (m *DeviceSpec) Reset() { *m = DeviceSpec{} } func (*DeviceSpec) ProtoMessage() {} -func (*DeviceSpec) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{8} } +func (*DeviceSpec) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{7} } func (m *DeviceSpec) GetContainerPath() string { if m != nil { @@ -322,7 +299,6 @@ func init() { proto.RegisterType((*Device)(nil), "deviceplugin.Device") proto.RegisterType((*AllocateRequest)(nil), "deviceplugin.AllocateRequest") proto.RegisterType((*AllocateResponse)(nil), "deviceplugin.AllocateResponse") - proto.RegisterType((*DeviceRuntimeSpec)(nil), "deviceplugin.DeviceRuntimeSpec") proto.RegisterType((*Mount)(nil), "deviceplugin.Mount") proto.RegisterType((*DeviceSpec)(nil), "deviceplugin.DeviceSpec") } @@ -698,45 +674,9 @@ func (m *AllocateResponse) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.Spec) > 0 { - for _, msg := range m.Spec { - dAtA[i] = 0xa - i++ - i = encodeVarintApi(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DeviceRuntimeSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeviceRuntimeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ID) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintApi(dAtA, i, uint64(len(m.ID))) - i += copy(dAtA[i:], m.ID) - } if len(m.Envs) > 0 { for k := range m.Envs { - dAtA[i] = 0x12 + dAtA[i] = 0xa i++ v := m.Envs[k] mapSize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) @@ -753,7 +693,7 @@ func (m *DeviceRuntimeSpec) MarshalTo(dAtA []byte) (int, error) { } if len(m.Mounts) > 0 { for _, msg := range m.Mounts { - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ i = encodeVarintApi(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) @@ -765,7 +705,7 @@ func (m *DeviceRuntimeSpec) MarshalTo(dAtA []byte) (int, error) { } if len(m.Devices) > 0 { for _, msg := range m.Devices { - dAtA[i] = 0x22 + dAtA[i] = 0x1a i++ i = encodeVarintApi(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) @@ -946,22 +886,6 @@ func (m *AllocateRequest) Size() (n int) { func (m *AllocateResponse) Size() (n int) { var l int _ = l - if len(m.Spec) > 0 { - for _, e := range m.Spec { - l = e.Size() - n += 1 + l + sovApi(uint64(l)) - } - } - return n -} - -func (m *DeviceRuntimeSpec) Size() (n int) { - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } if len(m.Envs) > 0 { for k, v := range m.Envs { _ = k @@ -1086,16 +1010,6 @@ func (this *AllocateRequest) String() string { return s } func (this *AllocateResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllocateResponse{`, - `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "DeviceRuntimeSpec", "DeviceRuntimeSpec", 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeviceRuntimeSpec) String() string { if this == nil { return "nil" } @@ -1109,8 +1023,7 @@ func (this *DeviceRuntimeSpec) String() string { mapStringForEnvs += fmt.Sprintf("%v: %v,", k, this.Envs[k]) } mapStringForEnvs += "}" - s := strings.Join([]string{`&DeviceRuntimeSpec{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + s := strings.Join([]string{`&AllocateResponse{`, `Envs:` + mapStringForEnvs + `,`, `Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "Mount", 1) + `,`, `Devices:` + strings.Replace(fmt.Sprintf("%v", this.Devices), "DeviceSpec", "DeviceSpec", 1) + `,`, @@ -1635,116 +1548,6 @@ func (m *AllocateResponse) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Spec = append(m.Spec, &DeviceRuntimeSpec{}) - if err := m.Spec[len(m.Spec)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeviceRuntimeSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeviceRuntimeSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeviceRuntimeSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Envs", wireType) } @@ -1860,7 +1663,7 @@ func (m *DeviceRuntimeSpec) Unmarshal(dAtA []byte) error { m.Envs[mapkey] = mapvalue } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) } @@ -1891,7 +1694,7 @@ func (m *DeviceRuntimeSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 4: + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) } @@ -2316,42 +2119,41 @@ var ( func init() { proto.RegisterFile("api.proto", fileDescriptorApi) } var fileDescriptorApi = []byte{ - // 590 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xce, 0x26, 0x6d, 0x9a, 0x4c, 0xd3, 0x1f, 0x96, 0x0a, 0x59, 0x01, 0x4c, 0x65, 0x84, 0x54, - 0x84, 0x70, 0x4b, 0x7a, 0x00, 0x21, 0x21, 0x51, 0x94, 0x82, 0xaa, 0xf2, 0x53, 0x99, 0x03, 0xc7, - 0x6a, 0xeb, 0x0c, 0xf1, 0x0a, 0x7b, 0xd7, 0x78, 0xd7, 0x91, 0x72, 0xe3, 0x11, 0x78, 0x0c, 0x1e, - 0xa5, 0x47, 0x8e, 0x1c, 0x69, 0x78, 0x0d, 0x0e, 0xc8, 0xeb, 0x75, 0x9b, 0xa6, 0x41, 0x5c, 0xb8, - 0x79, 0xbe, 0xf9, 0x66, 0xe6, 0x9b, 0xcd, 0x7c, 0x81, 0x36, 0x4b, 0xb9, 0x9f, 0x66, 0x52, 0x4b, - 0xda, 0x19, 0xe0, 0x88, 0x87, 0x98, 0xc6, 0xf9, 0x90, 0x8b, 0xee, 0xc3, 0x21, 0xd7, 0x51, 0x7e, - 0xe2, 0x87, 0x32, 0xd9, 0x1e, 0xca, 0xa1, 0xdc, 0x36, 0xa4, 0x93, 0xfc, 0xa3, 0x89, 0x4c, 0x60, - 0xbe, 0xca, 0x62, 0x2f, 0x86, 0xb5, 0x00, 0x87, 0x5c, 0x69, 0xcc, 0x02, 0xfc, 0x9c, 0xa3, 0xd2, - 0xd4, 0x81, 0xa5, 0x11, 0x66, 0x8a, 0x4b, 0xe1, 0x90, 0x4d, 0xb2, 0xd5, 0x0e, 0xaa, 0x90, 0x76, - 0xa1, 0x85, 0x62, 0x90, 0x4a, 0x2e, 0xb4, 0x53, 0x37, 0xa9, 0xf3, 0x98, 0xde, 0x85, 0x95, 0x0c, - 0x95, 0xcc, 0xb3, 0x10, 0x8f, 0x05, 0x4b, 0xd0, 0x69, 0x18, 0x42, 0xa7, 0x02, 0xdf, 0xb2, 0x04, - 0xbd, 0x25, 0x58, 0xdc, 0x4f, 0x52, 0x3d, 0xf6, 0x5e, 0xc2, 0xc6, 0x6b, 0xae, 0xf4, 0x9e, 0x18, - 0x7c, 0x60, 0x3a, 0x8c, 0x02, 0x54, 0xa9, 0x14, 0x0a, 0xa9, 0x0f, 0x4b, 0xe5, 0x36, 0xca, 0x21, - 0x9b, 0x8d, 0xad, 0xe5, 0xde, 0x86, 0x3f, 0xbd, 0x9d, 0xdf, 0x37, 0x41, 0x50, 0x91, 0xbc, 0x1d, - 0x68, 0x96, 0x10, 0x5d, 0x85, 0xfa, 0x41, 0xdf, 0x0a, 0xae, 0xf3, 0x3e, 0xbd, 0x01, 0xcd, 0x08, - 0x59, 0xac, 0x23, 0xab, 0xd4, 0x46, 0xde, 0x23, 0x58, 0xdb, 0x8b, 0x63, 0x19, 0x32, 0x8d, 0xd5, - 0xc2, 0x2e, 0x80, 0xed, 0x77, 0xd0, 0x2f, 0xe7, 0xb6, 0x83, 0x29, 0xc4, 0x7b, 0x05, 0xeb, 0x17, - 0x25, 0x56, 0xe8, 0x2e, 0x2c, 0xa8, 0x14, 0x43, 0xab, 0xf2, 0xce, 0x5c, 0x95, 0xb9, 0xd0, 0x3c, - 0xc1, 0xf7, 0x29, 0x86, 0x81, 0x21, 0x7b, 0xbf, 0x09, 0x5c, 0xbb, 0x92, 0xbb, 0xa2, 0xfc, 0x19, - 0x2c, 0xa0, 0x18, 0x29, 0xa7, 0x6e, 0x5a, 0xdf, 0xff, 0x47, 0x6b, 0x7f, 0x5f, 0x8c, 0xd4, 0xbe, - 0xd0, 0xd9, 0x38, 0x30, 0x65, 0xf4, 0x01, 0x34, 0x13, 0x99, 0x0b, 0xad, 0x9c, 0x86, 0x69, 0x70, - 0xfd, 0x72, 0x83, 0x37, 0x45, 0x2e, 0xb0, 0x14, 0xda, 0xbb, 0x78, 0xef, 0x05, 0xc3, 0x76, 0xe6, - 0x8d, 0x33, 0x2b, 0x54, 0xc4, 0xee, 0x63, 0x68, 0x9f, 0xcf, 0xa4, 0xeb, 0xd0, 0xf8, 0x84, 0x63, - 0xab, 0xbe, 0xf8, 0xa4, 0x1b, 0xb0, 0x38, 0x62, 0x71, 0x8e, 0xf6, 0xdd, 0xcb, 0xe0, 0x69, 0xfd, - 0x09, 0xf1, 0x22, 0x58, 0x34, 0xd3, 0xe9, 0x3d, 0x58, 0x0d, 0xa5, 0xd0, 0x8c, 0x0b, 0xcc, 0x8e, - 0x53, 0xa6, 0x23, 0x5b, 0xbf, 0x72, 0x8e, 0x1e, 0x31, 0x1d, 0xd1, 0x9b, 0xd0, 0x8e, 0xa4, 0xd2, - 0x25, 0xc3, 0xde, 0x5b, 0x01, 0x54, 0xc9, 0x0c, 0xd9, 0xe0, 0x58, 0x8a, 0x78, 0x6c, 0x6e, 0xad, - 0x15, 0xb4, 0x0a, 0xe0, 0x9d, 0x88, 0xc7, 0x5e, 0x06, 0x70, 0xa1, 0xfc, 0xbf, 0x8c, 0xdb, 0x84, - 0xe5, 0x14, 0xb3, 0x84, 0xab, 0xc2, 0x08, 0xca, 0x1e, 0xf7, 0x34, 0xd4, 0x3b, 0x82, 0x4e, 0xe9, - 0xa4, 0x8c, 0xe9, 0xc2, 0x2c, 0xcf, 0xa1, 0x55, 0x39, 0x8b, 0xde, 0xbe, 0xfc, 0xaa, 0x33, 0x8e, - 0xeb, 0xce, 0xfc, 0x44, 0xa5, 0x45, 0x6a, 0xbd, 0x6f, 0x04, 0x3a, 0xe5, 0x1a, 0x47, 0x26, 0x41, - 0x0f, 0xa1, 0x33, 0xed, 0x1a, 0x3a, 0xaf, 0xae, 0xeb, 0x5d, 0x06, 0xe7, 0xd9, 0xcc, 0xab, 0xed, - 0x10, 0x7a, 0x08, 0xad, 0xea, 0xaa, 0x67, 0xf5, 0xcd, 0x18, 0xa4, 0xeb, 0xfe, 0x2d, 0x5d, 0xb5, - 0x7b, 0x71, 0xeb, 0xf4, 0xcc, 0x25, 0x3f, 0xce, 0xdc, 0xda, 0x97, 0x89, 0x4b, 0x4e, 0x27, 0x2e, - 0xf9, 0x3e, 0x71, 0xc9, 0xcf, 0x89, 0x4b, 0xbe, 0xfe, 0x72, 0x6b, 0x27, 0x4d, 0xf3, 0x5f, 0xb3, - 0xfb, 0x27, 0x00, 0x00, 0xff, 0xff, 0x99, 0xda, 0x39, 0xd8, 0xb5, 0x04, 0x00, 0x00, + // 562 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcb, 0x8e, 0xd3, 0x4a, + 0x10, 0x4d, 0x27, 0x77, 0xf2, 0xa8, 0xc9, 0x3c, 0xd4, 0x37, 0x42, 0x96, 0x01, 0x2b, 0x32, 0x42, + 0x8a, 0x84, 0xf0, 0x0c, 0x61, 0x01, 0x42, 0x2c, 0x18, 0x94, 0x20, 0x8d, 0x86, 0x47, 0x64, 0x16, + 0x2c, 0xa3, 0x8e, 0x53, 0xc4, 0x16, 0x76, 0xb7, 0x71, 0xb7, 0x23, 0x65, 0xc7, 0x27, 0xf0, 0x19, + 0x7c, 0xca, 0x2c, 0x59, 0xb2, 0x64, 0xc2, 0x8e, 0xaf, 0x40, 0x6e, 0xdb, 0x79, 0x29, 0x62, 0xc5, + 0xce, 0x75, 0xea, 0x9c, 0xd4, 0xa9, 0xca, 0xb1, 0xa1, 0xc5, 0xe2, 0xc0, 0x89, 0x13, 0xa1, 0x04, + 0x6d, 0x4f, 0x71, 0x1e, 0x78, 0x18, 0x87, 0xe9, 0x2c, 0xe0, 0xe6, 0xc3, 0x59, 0xa0, 0xfc, 0x74, + 0xe2, 0x78, 0x22, 0x3a, 0x9b, 0x89, 0x99, 0x38, 0xd3, 0xa4, 0x49, 0xfa, 0x51, 0x57, 0xba, 0xd0, + 0x4f, 0xb9, 0xd8, 0x0e, 0xe1, 0xc4, 0xc5, 0x59, 0x20, 0x15, 0x26, 0x2e, 0x7e, 0x4e, 0x51, 0x2a, + 0x6a, 0x40, 0x63, 0x8e, 0x89, 0x0c, 0x04, 0x37, 0x48, 0x97, 0xf4, 0x5a, 0x6e, 0x59, 0x52, 0x13, + 0x9a, 0xc8, 0xa7, 0xb1, 0x08, 0xb8, 0x32, 0xaa, 0xba, 0xb5, 0xaa, 0xe9, 0x3d, 0x38, 0x4a, 0x50, + 0x8a, 0x34, 0xf1, 0x70, 0xcc, 0x59, 0x84, 0x46, 0x4d, 0x13, 0xda, 0x25, 0xf8, 0x96, 0x45, 0x68, + 0x37, 0xe0, 0x60, 0x18, 0xc5, 0x6a, 0x61, 0xbf, 0x82, 0xce, 0xeb, 0x40, 0xaa, 0x0b, 0x3e, 0xfd, + 0xc0, 0x94, 0xe7, 0xbb, 0x28, 0x63, 0xc1, 0x25, 0x52, 0x07, 0x1a, 0xf9, 0x36, 0xd2, 0x20, 0xdd, + 0x5a, 0xef, 0xb0, 0xdf, 0x71, 0x36, 0xb7, 0x73, 0x06, 0xba, 0x70, 0x4b, 0x92, 0x7d, 0x0e, 0xf5, + 0x1c, 0xa2, 0xc7, 0x50, 0xbd, 0x1c, 0x14, 0x86, 0xab, 0xc1, 0x80, 0xde, 0x82, 0xba, 0x8f, 0x2c, + 0x54, 0x7e, 0xe1, 0xb4, 0xa8, 0xec, 0x47, 0x70, 0x72, 0x11, 0x86, 0xc2, 0x63, 0x0a, 0xcb, 0x85, + 0x2d, 0x80, 0xe2, 0xf7, 0x2e, 0x07, 0xf9, 0xdc, 0x96, 0xbb, 0x81, 0xd8, 0xbf, 0x09, 0x9c, 0xae, + 0x35, 0x85, 0xd3, 0xe7, 0xf0, 0x1f, 0xf2, 0x79, 0x69, 0xb3, 0xb7, 0x6d, 0x73, 0x97, 0xed, 0x0c, + 0xf9, 0x5c, 0x0e, 0xb9, 0x4a, 0x16, 0xae, 0x56, 0xd1, 0x07, 0x50, 0x8f, 0x44, 0xca, 0x95, 0x34, + 0xaa, 0x5a, 0xff, 0xff, 0xb6, 0xfe, 0x4d, 0xd6, 0x73, 0x0b, 0x0a, 0xed, 0xaf, 0x8f, 0x52, 0xd3, + 0x6c, 0x63, 0xdf, 0x51, 0xde, 0xc7, 0xe8, 0xad, 0x0e, 0x63, 0x3e, 0x81, 0xd6, 0x6a, 0x26, 0x3d, + 0x85, 0xda, 0x27, 0x5c, 0x14, 0xc7, 0xc9, 0x1e, 0x69, 0x07, 0x0e, 0xe6, 0x2c, 0x4c, 0xb1, 0x38, + 0x4e, 0x5e, 0x3c, 0xab, 0x3e, 0x25, 0xb6, 0x0f, 0x07, 0x7a, 0x3a, 0xbd, 0x0f, 0xc7, 0x9e, 0xe0, + 0x8a, 0x05, 0x1c, 0x93, 0x71, 0xcc, 0x94, 0x5f, 0xe8, 0x8f, 0x56, 0xe8, 0x88, 0x29, 0x9f, 0xde, + 0x86, 0x96, 0x2f, 0xa4, 0xca, 0x19, 0x45, 0x28, 0x32, 0xa0, 0x6c, 0x26, 0xc8, 0xa6, 0x63, 0xc1, + 0xc3, 0x85, 0x0e, 0x44, 0xd3, 0x6d, 0x66, 0xc0, 0x3b, 0x1e, 0x2e, 0xec, 0x04, 0x60, 0xed, 0xfc, + 0x9f, 0x8c, 0xeb, 0xc2, 0x61, 0x8c, 0x49, 0x14, 0xc8, 0x2c, 0xad, 0xb2, 0x48, 0xe0, 0x26, 0xd4, + 0x1f, 0x41, 0x3b, 0x8f, 0x7b, 0xc2, 0x54, 0x96, 0xe8, 0x17, 0xd0, 0x2c, 0xe3, 0x4f, 0xef, 0x6e, + 0x5f, 0x75, 0xe7, 0xb5, 0x30, 0x77, 0xfe, 0xa2, 0x3c, 0xc7, 0x95, 0xfe, 0x37, 0x02, 0xed, 0x7c, + 0x8d, 0x91, 0x6e, 0xd0, 0x2b, 0x68, 0x6f, 0x46, 0x9b, 0xee, 0xd3, 0x99, 0xf6, 0x36, 0xb8, 0xef, + 0x5d, 0xb0, 0x2b, 0xe7, 0x84, 0x5e, 0x41, 0xb3, 0xcc, 0xd2, 0xae, 0xbf, 0x9d, 0x14, 0x9b, 0xd6, + 0xdf, 0x23, 0x68, 0x57, 0x5e, 0xde, 0xb9, 0xbe, 0xb1, 0xc8, 0x8f, 0x1b, 0xab, 0xf2, 0x65, 0x69, + 0x91, 0xeb, 0xa5, 0x45, 0xbe, 0x2f, 0x2d, 0xf2, 0x73, 0x69, 0x91, 0xaf, 0xbf, 0xac, 0xca, 0xa4, + 0xae, 0x3f, 0x08, 0x8f, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x17, 0x55, 0xaf, 0xe1, 0x5a, 0x04, + 0x00, 0x00, } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1/api.proto b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha/api.proto similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1/api.proto rename to vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha/api.proto index e222d14a76f2..05408cbb1fa8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1/api.proto +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha/api.proto @@ -81,25 +81,21 @@ message AllocateRequest { repeated string devicesIDs = 1; } +// AllocateResponse includes the artifacts that needs to be injected into +// a container for accessing 'deviceIDs' that were mentioned as part of +// 'AllocateRequest'. // Failure Handling: // if Kubelet sends an allocation request for dev1 and dev2. // Allocation on dev1 succeeds but allocation on dev2 fails. // The Device plugin should send a ListAndWatch update and fail the // Allocation request message AllocateResponse { - repeated DeviceRuntimeSpec spec = 1; -} - -// The list to be added to the CRI spec -message DeviceRuntimeSpec { - // ID of the Device - string ID = 1; - // List of environment variable to set in the container. - map envs = 2; + // List of environment variable to be set in the container to access one of more devices. + map envs = 1; // Mounts for the container. - repeated Mount mounts = 3; - // Devices for the container - repeated DeviceSpec devices = 4; + repeated Mount mounts = 2; + // Devices for the container. + repeated DeviceSpec devices = 3; } // Mount specifies a host volume to mount into a container. diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1/constants.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha/constants.go similarity index 93% rename from vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1/constants.go rename to vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha/constants.go index fb0440cce59c..896354ab9019 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1/constants.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha/constants.go @@ -22,8 +22,8 @@ const ( // UnHealthy means that the device is unhealty Unhealthy = "Unhealthy" - // Version is the API version - Version = "0.1" + // Current version of the API supported by kubelet + Version = "v1alpha2" // DevicePluginPath is the folder the Device Plugin is expecting sockets to be on // Only privileged pods have access to this path // Note: Placeholder until we find a "standard path" diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1/BUILD deleted file mode 100644 index 07e26b69b127..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1/BUILD +++ /dev/null @@ -1,43 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "api.pb.go", - "constants.go", - ], - tags = ["automanaged"], - deps = [ - "//vendor/github.com/gogo/protobuf/gogoproto:go_default_library", - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) - -filegroup( - name = "go_default_library_protos", - srcs = ["api.proto"], - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/BUILD index 07c85a597d6e..c62439d6261a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/BUILD @@ -1,25 +1,23 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( name = "go_default_library", srcs = [ "doc.go", + "helpers.go", "register.go", "types.go", "zz_generated.deepcopy.go", ], - tags = ["automanaged"], + importpath = "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig", deps = [ - "//pkg/api:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], @@ -36,9 +34,21 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//pkg/kubelet/apis/kubeletconfig/fuzzer:all-srcs", "//pkg/kubelet/apis/kubeletconfig/scheme:all-srcs", "//pkg/kubelet/apis/kubeletconfig/v1alpha1:all-srcs", "//pkg/kubelet/apis/kubeletconfig/validation:all-srcs", ], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = ["helpers_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig", + library = ":go_default_library", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/doc.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/doc.go index 9c48ce22911a..2988febdfd60 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package package kubeletconfig diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/helpers.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/helpers.go new file mode 100644 index 000000000000..392dd4ea2c55 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/helpers.go @@ -0,0 +1,30 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeletconfig + +// KubeletConfigurationPathRefs returns pointers to all of the KubeletConfiguration fields that contain filepaths. +// You might use this, for example, to resolve all relative paths against some common root before +// passing the configuration to the application. This method must be kept up to date as new fields are added. +func KubeletConfigurationPathRefs(kc *KubeletConfiguration) []*string { + paths := []*string{} + paths = append(paths, &kc.PodManifestPath) + paths = append(paths, &kc.Authentication.X509.ClientCAFile) + paths = append(paths, &kc.TLSCertFile) + paths = append(paths, &kc.TLSPrivateKeyFile) + paths = append(paths, &kc.ResolverConfig) + return paths +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme/BUILD index 44660bf9818f..9bc425ac6b07 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme/BUILD @@ -1,8 +1,9 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["scheme.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme", visibility = ["//visibility:public"], deps = [ "//pkg/kubelet/apis/kubeletconfig:go_default_library", @@ -25,3 +26,14 @@ filegroup( tags = ["automanaged"], visibility = ["//visibility:public"], ) + +go_test( + name = "go_default_test", + srcs = ["scheme_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme", + library = ":go_default_library", + deps = [ + "//pkg/kubelet/apis/kubeletconfig/fuzzer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/testing/roundtrip:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/types.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/types.go index 338791c2fb7b..3b7f42509a59 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/types.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/types.go @@ -17,12 +17,7 @@ limitations under the License. package kubeletconfig import ( - "fmt" - "sort" - "strings" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/api" ) // HairpinMode denotes how the kubelet should configure networking to handle @@ -45,13 +40,10 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// A configuration field should go in KubeletFlags instead of KubeletConfiguration if any of these are true: -// - its value will never, or cannot safely be changed during the lifetime of a node -// - its value cannot be safely shared between nodes at the same time (e.g. a hostname) -// KubeletConfiguration is intended to be shared between nodes +// A configuration field should go in KubeletFlags instead of KubeletConfiguration if +// its value cannot be safely shared between nodes at the same time (e.g. a hostname) // In general, please try to avoid adding flags or configuration fields, // we already have a confusingly large amount of them. -// TODO: curate the ordering and structure of this config object type KubeletConfiguration struct { metav1.TypeMeta @@ -73,7 +65,7 @@ type KubeletConfiguration struct { ManifestURL string // manifestURLHeader is the HTTP header to use when accessing the manifest // URL, with the key separated from the value with a ':', as in 'key:value' - ManifestURLHeader string + ManifestURLHeader map[string][]string // enableServer enables the Kubelet's server EnableServer bool // address is the IP address for the Kubelet to serve on (set to 0.0.0.0 @@ -97,8 +89,6 @@ type KubeletConfiguration struct { Authentication KubeletAuthentication // authorization specifies how requests to the Kubelet's server are authorized Authorization KubeletAuthorization - // seccompProfileRoot is the directory path for seccomp profiles. - SeccompProfileRoot string // allowPrivileged enables containers to request privileged mode. // Defaults to false. AllowPrivileged bool @@ -131,15 +121,6 @@ type KubeletConfiguration struct { EnableDebuggingHandlers bool // enableContentionProfiling enables lock contention profiling, if enableDebuggingHandlers is true. EnableContentionProfiling bool - // minimumGCAge is the minimum age for a finished container before it is - // garbage collected. - MinimumGCAge metav1.Duration - // maxPerPodContainerCount is the maximum number of old instances to - // retain per container. Each container takes up some disk space. - MaxPerPodContainerCount int32 - // maxContainerCount is the maximum number of old instances of containers - // to retain globally. Each container takes up some disk space. - MaxContainerCount int32 // cAdvisorPort is the port of the localhost cAdvisor endpoint (set to 0 to disable) CAdvisorPort int32 // healthzPort is the port of the localhost healthz endpoint (set to 0 to disable) @@ -150,15 +131,10 @@ type KubeletConfiguration struct { // oomScoreAdj is The oom-score-adj value for kubelet process. Values // must be within the range [-1000, 1000]. OOMScoreAdj int32 - // registerNode enables automatic registration with the apiserver. - RegisterNode bool // clusterDomain is the DNS domain for this cluster. If set, kubelet will // configure all containers to search this domain in addition to the // host's search domains. ClusterDomain string - // masterServiceNamespace is The namespace from which the kubernetes - // master services should be injected into pods. - MasterServiceNamespace string // clusterDNS is a list of IP address for a cluster DNS server. If set, // kubelet will configure all containers to use this for DNS resolution // instead of the host's DNS servers @@ -182,9 +158,6 @@ type KubeletConfiguration struct { ImageGCLowThresholdPercent int32 // How frequently to calculate and cache volume disk usage for all pods VolumeStatsAggPeriod metav1.Duration - // volumePluginDir is the full path of the directory in which to search - // for additional third party volume plugins - VolumePluginDir string // KubeletCgroups is the absolute name of cgroups to isolate the kubelet in. // +optional KubeletCgroups string @@ -196,9 +169,6 @@ type KubeletConfiguration struct { // driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd) // +optional CgroupDriver string - // Cgroups that container runtime is expected to be isolated in. - // +optional - RuntimeCgroups string // SystemCgroups is absolute name of cgroups in which to place // all non-kernel processes that are not already in a container. Empty // for no container. Rolling back the flag requires a reboot. @@ -208,12 +178,6 @@ type KubeletConfiguration struct { // If CgroupsPerQOS is enabled, this is the root of the QoS cgroup hierarchy. // +optional CgroupRoot string - // containerRuntime is the container runtime to use. - ContainerRuntime string - // remoteRuntimeEndpoint is the endpoint of remote runtime service - RemoteRuntimeEndpoint string - // remoteImageEndpoint is the endpoint of remote image service - RemoteImageEndpoint string // CPUManagerPolicy is the name of the policy to use. CPUManagerPolicy string // CPU Manager reconciliation period. @@ -222,17 +186,6 @@ type KubeletConfiguration struct { // requests - pull, logs, exec and attach. // +optional RuntimeRequestTimeout metav1.Duration - // experimentalMounterPath is the path of mounter binary. Leave empty to use the default mount path - ExperimentalMounterPath string - // lockFilePath is the path that kubelet will use to as a lock file. - // It uses this file as a lock to synchronize with other kubelet processes - // that may be running. - LockFilePath string - // ExitOnLockContention is a flag that signifies to the kubelet that it is running - // in "bootstrap" mode. This requires that 'LockFilePath' has been set. - // This will cause the kubelet to listen to inotify events on the lock file, - // releasing it and exiting when another process tries to open that file. - ExitOnLockContention bool // How should the kubelet configure the container bridge for hairpin packets. // Setting this flag allows endpoints in a Service to loadbalance back to // themselves if they should try to access their own Service. Values: @@ -248,23 +201,13 @@ type KubeletConfiguration struct { // In cluster mode, this is obtained from the master. PodCIDR string // ResolverConfig is the resolver configuration file used as the basis - // for the container DNS resolution configuration."), [] + // for the container DNS resolution configuration. ResolverConfig string // cpuCFSQuota is Enable CPU CFS quota enforcement for containers that // specify CPU limits CPUCFSQuota bool - // containerized should be set to true if kubelet is running in a container. - Containerized bool // maxOpenFiles is Number of files that can be opened by Kubelet process. MaxOpenFiles int64 - // registerSchedulable tells the kubelet to register the node as - // schedulable. Won't have any effect if register-node is false. - // DEPRECATED: use registerWithTaints instead - RegisterSchedulable bool - // registerWithTaints are an array of taints to add to a node object when - // the kubelet registers itself. This only takes effect when registerNode - // is true and upon the initial registration of the node. - RegisterWithTaints []api.Taint // contentType is contentType of requests sent to apiserver. ContentType string // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver @@ -277,43 +220,32 @@ type KubeletConfiguration struct { // run docker daemon with version < 1.9 or an Aufs storage backend. // Issue #10959 has more details. SerializeImagePulls bool - // nodeLabels to add when registering the node in the cluster. - NodeLabels map[string]string - // nonMasqueradeCIDR configures masquerading: traffic to IPs outside this range will use IP masquerade. - NonMasqueradeCIDR string - // enable gathering custom metrics. - EnableCustomMetrics bool - // Comma-delimited list of hard eviction expressions. For example, 'memory.available<300Mi'. + // Map of signal names to quantities that defines hard eviction thresholds. For example: {"memory.available": "300Mi"}. // +optional - EvictionHard string - // Comma-delimited list of soft eviction expressions. For example, 'memory.available<300Mi'. + EvictionHard map[string]string + // Map of signal names to quantities that defines soft eviction thresholds. For example: {"memory.available": "300Mi"}. // +optional - EvictionSoft string - // Comma-delimeted list of grace periods for each soft eviction signal. For example, 'memory.available=30s'. + EvictionSoft map[string]string + // Map of signal names to quantities that defines grace periods for each soft eviction signal. For example: {"memory.available": "30s"}. // +optional - EvictionSoftGracePeriod string + EvictionSoftGracePeriod map[string]string // Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. // +optional EvictionPressureTransitionPeriod metav1.Duration // Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. // +optional EvictionMaxPodGracePeriod int32 - // Comma-delimited list of minimum reclaims (e.g. imagefs.available=2Gi) that describes the minimum amount of resource the kubelet will reclaim when performing a pod eviction if that resource is under pressure. + // Map of signal names to quantities that defines minimum reclaims, which describe the minimum + // amount of a given resource the kubelet will reclaim when performing a pod eviction while + // that resource is under pressure. For example: {"imagefs.available": "2Gi"} // +optional - EvictionMinimumReclaim string - // If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling. - // +optional - ExperimentalKernelMemcgNotification bool + EvictionMinimumReclaim map[string]string // Maximum number of pods per core. Cannot exceed MaxPods PodsPerCore int32 // enableControllerAttachDetach enables the Attach/Detach controller to // manage attachment/detachment of volumes scheduled to this node, and // disables kubelet from executing any attach/detach operations EnableControllerAttachDetach bool - // A set of ResourceName=Percentage (e.g. memory=50%) pairs that describe - // how pod resource requests are reserved at the QoS level. - // Currently only memory is supported. [default=none]" - ExperimentalQOSReserved ConfigurationMap // Default behaviour for kernel tuning ProtectKernelDefaults bool // If true, Kubelet ensures a set of iptables rules are present on host. @@ -328,21 +260,10 @@ type KubeletConfiguration struct { // iptablesDropBit is the bit of the iptables fwmark space to use for dropping packets. Kubelet will ensure iptables mark and drop rules. // Values must be within the range [0, 31]. Must be different from IPTablesMasqueradeBit IPTablesDropBit int32 - // Whitelist of unsafe sysctls or sysctl patterns (ending in *). - // +optional - AllowedUnsafeSysctls []string - // featureGates is a string of comma-separated key=value pairs that describe feature - // gates for alpha/experimental features. - FeatureGates string + // featureGates is a map of feature names to bools that enable or disable alpha/experimental features. + FeatureGates map[string]bool // Tells the Kubelet to fail to start if swap is enabled on the node. FailSwapOn bool - // This flag, if set, enables a check prior to mount operations to verify that the required components - // (binaries, etc.) to mount the volume are available on the underlying node. If the check is enabled - // and fails the mount operation fails. - ExperimentalCheckNodeCapabilitiesBeforeMount bool - // This flag, if set, instructs the kubelet to keep volumes from terminated pods mounted to the node. - // This can be useful for debugging volume related issues. - KeepTerminatedPodVolumes bool /* following flags are meant for Node Allocatable */ @@ -350,25 +271,22 @@ type KubeletConfiguration struct { // that describe resources reserved for non-kubernetes components. // Currently only cpu and memory are supported. [default=none] // See http://kubernetes.io/docs/user-guide/compute-resources for more detail. - SystemReserved ConfigurationMap + SystemReserved map[string]string // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs // that describe resources reserved for kubernetes system components. - // Currently cpu, memory and local storage for root file system are supported. [default=none] + // Currently cpu, memory and local ephemeral storage for root file system are supported. [default=none] // See http://kubernetes.io/docs/user-guide/compute-resources for more detail. - KubeReserved ConfigurationMap + KubeReserved map[string]string // This flag helps kubelet identify absolute name of top level cgroup used to enforce `SystemReserved` compute resource reservation for OS system daemons. - // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md) doc for more information. + // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. SystemReservedCgroup string // This flag helps kubelet identify absolute name of top level cgroup used to enforce `KubeReserved` compute resource reservation for Kubernetes node system daemons. - // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md) doc for more information. + // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. KubeReservedCgroup string // This flag specifies the various Node Allocatable enforcements that Kubelet needs to perform. // This flag accepts a list of options. Acceptable options are `pods`, `system-reserved` & `kube-reserved`. - // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md) doc for more information. + // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. EnforceNodeAllocatable []string - // This flag, if set, will avoid including `EvictionHard` limits while computing Node Allocatable. - // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md) doc for more information. - ExperimentalNodeAllocatableIgnoreEvictionThreshold bool } type KubeletAuthorizationMode string @@ -426,33 +344,3 @@ type KubeletAnonymousAuthentication struct { // Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. Enabled bool } - -type ConfigurationMap map[string]string - -func (m *ConfigurationMap) String() string { - pairs := []string{} - for k, v := range *m { - pairs = append(pairs, fmt.Sprintf("%s=%s", k, v)) - } - sort.Strings(pairs) - return strings.Join(pairs, ",") -} - -func (m *ConfigurationMap) Set(value string) error { - for _, s := range strings.Split(value, ",") { - if len(s) == 0 { - continue - } - arr := strings.SplitN(s, "=", 2) - if len(arr) == 2 { - (*m)[strings.TrimSpace(arr[0])] = strings.TrimSpace(arr[1]) - } else { - (*m)[strings.TrimSpace(arr[0])] = "" - } - } - return nil -} - -func (*ConfigurationMap) Type() string { - return "mapStringString" -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/BUILD index 0618be7f1363..bfab5b2758dc 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -18,15 +16,13 @@ go_library( "zz_generated.deepcopy.go", "zz_generated.defaults.go", ], - tags = ["automanaged"], + importpath = "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1", deps = [ - "//pkg/api:go_default_library", "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/qos:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/master/ports:go_default_library", "//pkg/util/pointer:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/defaults.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/defaults.go index 26ead1f7ca44..90ac66813b51 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/defaults.go @@ -17,8 +17,6 @@ limitations under the License. package v1alpha1 import ( - "path/filepath" - "runtime" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,14 +36,14 @@ const ( // More details here: https://github.com/kubernetes/kubernetes/issues/50986 AutoDetectCloudProvider = "auto-detect" - defaultIPTablesMasqueradeBit = 14 - defaultIPTablesDropBit = 15 + DefaultIPTablesMasqueradeBit = 14 + DefaultIPTablesDropBit = 15 ) var ( zeroDuration = metav1.Duration{} - // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md) doc for more information. - defaultNodeAllocatableEnforcement = []string{"pods"} + // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. + DefaultNodeAllocatableEnforcement = []string{"pods"} ) func addDefaultingFuncs(scheme *kruntime.Scheme) error { @@ -85,9 +83,6 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) { if obj.VolumeStatsAggPeriod == zeroDuration { obj.VolumeStatsAggPeriod = metav1.Duration{Duration: time.Minute} } - if obj.ContainerRuntime == "" { - obj.ContainerRuntime = kubetypes.DockerContainerRuntime - } if obj.RuntimeRequestTimeout == zeroDuration { obj.RuntimeRequestTimeout = metav1.Duration{Duration: 2 * time.Minute} } @@ -143,31 +138,12 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) { temp := int32(80) obj.ImageGCLowThresholdPercent = &temp } - if obj.MasterServiceNamespace == "" { - obj.MasterServiceNamespace = metav1.NamespaceDefault - } - if obj.MaxContainerCount == nil { - temp := int32(-1) - obj.MaxContainerCount = &temp - } - if obj.MaxPerPodContainerCount == 0 { - obj.MaxPerPodContainerCount = 1 - } if obj.MaxOpenFiles == 0 { obj.MaxOpenFiles = 1000000 } if obj.MaxPods == 0 { obj.MaxPods = 110 } - if obj.MinimumGCAge == zeroDuration { - obj.MinimumGCAge = metav1.Duration{Duration: 0} - } - if obj.NonMasqueradeCIDR == "" { - obj.NonMasqueradeCIDR = "10.0.0.0/8" - } - if obj.VolumePluginDir == "" { - obj.VolumePluginDir = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" - } if obj.NodeStatusUpdateFrequency == zeroDuration { obj.NodeStatusUpdateFrequency = metav1.Duration{Duration: 10 * time.Second} } @@ -187,12 +163,6 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) { if obj.ReadOnlyPort == nil { obj.ReadOnlyPort = utilpointer.Int32Ptr(ports.KubeletReadOnlyPort) } - if obj.RegisterNode == nil { - obj.RegisterNode = boolVar(true) - } - if obj.RegisterSchedulable == nil { - obj.RegisterSchedulable = boolVar(true) - } if obj.RegistryBurst == 0 { obj.RegistryBurst = 10 } @@ -206,9 +176,6 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) { if obj.SerializeImagePulls == nil { obj.SerializeImagePulls = boolVar(true) } - if obj.SeccompProfileRoot == "" { - obj.SeccompProfileRoot = filepath.Join(DefaultRootDir, "seccomp") - } if obj.StreamingConnectionIdleTimeout == zeroDuration { obj.StreamingConnectionIdleTimeout = metav1.Duration{Duration: 4 * time.Hour} } @@ -229,33 +196,25 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) { obj.HairpinMode = PromiscuousBridge } if obj.EvictionHard == nil { - temp := "memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%" - obj.EvictionHard = &temp + obj.EvictionHard = map[string]string{ + "memory.available": "100Mi", + "nodefs.available": "10%", + "nodefs.inodesFree": "5%", + "imagefs.available": "15%", + } } if obj.EvictionPressureTransitionPeriod == zeroDuration { obj.EvictionPressureTransitionPeriod = metav1.Duration{Duration: 5 * time.Minute} } - if obj.ExperimentalKernelMemcgNotification == nil { - obj.ExperimentalKernelMemcgNotification = boolVar(false) - } - if obj.SystemReserved == nil { - obj.SystemReserved = make(map[string]string) - } - if obj.KubeReserved == nil { - obj.KubeReserved = make(map[string]string) - } - if obj.ExperimentalQOSReserved == nil { - obj.ExperimentalQOSReserved = make(map[string]string) - } if obj.MakeIPTablesUtilChains == nil { obj.MakeIPTablesUtilChains = boolVar(true) } if obj.IPTablesMasqueradeBit == nil { - temp := int32(defaultIPTablesMasqueradeBit) + temp := int32(DefaultIPTablesMasqueradeBit) obj.IPTablesMasqueradeBit = &temp } if obj.IPTablesDropBit == nil { - temp := int32(defaultIPTablesDropBit) + temp := int32(DefaultIPTablesDropBit) obj.IPTablesDropBit = &temp } if obj.CgroupsPerQOS == nil { @@ -266,14 +225,7 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) { obj.CgroupDriver = "cgroupfs" } if obj.EnforceNodeAllocatable == nil { - obj.EnforceNodeAllocatable = defaultNodeAllocatableEnforcement - } - if obj.RemoteRuntimeEndpoint == "" { - if runtime.GOOS == "linux" { - obj.RemoteRuntimeEndpoint = "unix:///var/run/dockershim.sock" - } else if runtime.GOOS == "windows" { - obj.RemoteRuntimeEndpoint = "tcp://localhost:3735" - } + obj.EnforceNodeAllocatable = DefaultNodeAllocatableEnforcement } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/doc.go index 16a3c2528686..c3f38c345379 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig // +k8s:openapi-gen=true // +k8s:defaulter-gen=TypeMeta diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go index 0e6172e69321..0c661f8a0eed 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/types.go @@ -17,7 +17,6 @@ limitations under the License. package v1alpha1 import ( - "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -41,10 +40,8 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// A configuration field should go in KubeletFlags instead of KubeletConfiguration if any of these are true: -// - its value will never, or cannot safely be changed during the lifetime of a node -// - its value cannot be safely shared between nodes at the same time (e.g. a hostname) -// KubeletConfiguration is intended to be shared between nodes +// A configuration field should go in KubeletFlags instead of KubeletConfiguration if +// its value cannot be safely shared between nodes at the same time (e.g. a hostname) // In general, please try to avoid adding flags or configuration fields, // we already have a confusingly large amount of them. type KubeletConfiguration struct { @@ -68,7 +65,7 @@ type KubeletConfiguration struct { ManifestURL string `json:"manifestURL"` // manifestURLHeader is the HTTP header to use when accessing the manifest // URL, with the key separated from the value with a ':', as in 'key:value' - ManifestURLHeader string `json:"manifestURLHeader"` + ManifestURLHeader map[string][]string `json:"manifestURLHeader"` // enableServer enables the Kubelet's server EnableServer *bool `json:"enableServer"` // address is the IP address for the Kubelet to serve on (set to 0.0.0.0 @@ -92,8 +89,6 @@ type KubeletConfiguration struct { Authentication KubeletAuthentication `json:"authentication"` // authorization specifies how requests to the Kubelet's server are authorized Authorization KubeletAuthorization `json:"authorization"` - // seccompProfileRoot is the directory path for seccomp profiles. - SeccompProfileRoot string `json:"seccompProfileRoot"` // allowPrivileged enables containers to request privileged mode. // Defaults to false. AllowPrivileged *bool `json:"allowPrivileged"` @@ -126,15 +121,6 @@ type KubeletConfiguration struct { EnableDebuggingHandlers *bool `json:"enableDebuggingHandlers"` // enableContentionProfiling enables lock contention profiling, if enableDebuggingHandlers is true. EnableContentionProfiling bool `json:"enableContentionProfiling"` - // minimumGCAge is the minimum age for a finished container before it is - // garbage collected. - MinimumGCAge metav1.Duration `json:"minimumGCAge"` - // maxPerPodContainerCount is the maximum number of old instances to - // retain per container. Each container takes up some disk space. - MaxPerPodContainerCount int32 `json:"maxPerPodContainerCount"` - // maxContainerCount is the maximum number of old instances of containers - // to retain globally. Each container takes up some disk space. - MaxContainerCount *int32 `json:"maxContainerCount"` // cAdvisorPort is the port of the localhost cAdvisor endpoint (set to 0 to disable) CAdvisorPort *int32 `json:"cAdvisorPort"` // healthzPort is the port of the localhost healthz endpoint (set to 0 to disable) @@ -145,15 +131,10 @@ type KubeletConfiguration struct { // oomScoreAdj is The oom-score-adj value for kubelet process. Values // must be within the range [-1000, 1000]. OOMScoreAdj *int32 `json:"oomScoreAdj"` - // registerNode enables automatic registration with the apiserver. - RegisterNode *bool `json:"registerNode"` // clusterDomain is the DNS domain for this cluster. If set, kubelet will // configure all containers to search this domain in addition to the // host's search domains. ClusterDomain string `json:"clusterDomain"` - // masterServiceNamespace is The namespace from which the kubernetes - // master services should be injected into pods. - MasterServiceNamespace string `json:"masterServiceNamespace"` // clusterDNS is a list of IP address for the cluster DNS server. If set, // kubelet will configure all containers to use this for DNS resolution // instead of the host's DNS servers @@ -178,13 +159,8 @@ type KubeletConfiguration struct { ImageGCLowThresholdPercent *int32 `json:"imageGCLowThresholdPercent"` // How frequently to calculate and cache volume disk usage for all pods VolumeStatsAggPeriod metav1.Duration `json:"volumeStatsAggPeriod"` - // volumePluginDir is the full path of the directory in which to search - // for additional third party volume plugins - VolumePluginDir string `json:"volumePluginDir"` // kubeletCgroups is the absolute name of cgroups to isolate the kubelet in. KubeletCgroups string `json:"kubeletCgroups"` - // runtimeCgroups are cgroups that container runtime is expected to be isolated in. - RuntimeCgroups string `json:"runtimeCgroups"` // systemCgroups is absolute name of cgroups in which to place // all non-kernel processes that are not already in a container. Empty // for no container. Rolling back the flag requires a reboot. @@ -200,12 +176,6 @@ type KubeletConfiguration struct { // driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd) // +optional CgroupDriver string `json:"cgroupDriver,omitempty"` - // containerRuntime is the container runtime to use. - ContainerRuntime string `json:"containerRuntime"` - // remoteRuntimeEndpoint is the endpoint of remote runtime service - RemoteRuntimeEndpoint string `json:"remoteRuntimeEndpoint"` - // remoteImageEndpoint is the endpoint of remote image service - RemoteImageEndpoint string `json:"remoteImageEndpoint"` // CPUManagerPolicy is the name of the policy to use. CPUManagerPolicy string `json:"cpuManagerPolicy"` // CPU Manager reconciliation period. @@ -213,18 +183,6 @@ type KubeletConfiguration struct { // runtimeRequestTimeout is the timeout for all runtime requests except long running // requests - pull, logs, exec and attach. RuntimeRequestTimeout metav1.Duration `json:"runtimeRequestTimeout"` - // experimentalMounterPath is the path to mounter binary. If not set, kubelet will attempt to use mount - // binary that is available via $PATH, - ExperimentalMounterPath string `json:"experimentalMounterPath,omitempty"` - // lockFilePath is the path that kubelet will use to as a lock file. - // It uses this file as a lock to synchronize with other kubelet processes - // that may be running. - LockFilePath *string `json:"lockFilePath"` - // ExitOnLockContention is a flag that signifies to the kubelet that it is running - // in "bootstrap" mode. This requires that 'LockFilePath' has been set. - // This will cause the kubelet to listen to inotify events on the lock file, - // releasing it and exiting when another process tries to open that file. - ExitOnLockContention bool `json:"exitOnLockContention"` // How should the kubelet configure the container bridge for hairpin packets. // Setting this flag allows endpoints in a Service to loadbalance back to // themselves if they should try to access their own Service. Values: @@ -240,23 +198,13 @@ type KubeletConfiguration struct { // In cluster mode, this is obtained from the master. PodCIDR string `json:"podCIDR"` // ResolverConfig is the resolver configuration file used as the basis - // for the container DNS resolution configuration."), [] + // for the container DNS resolution configuration. ResolverConfig string `json:"resolvConf"` // cpuCFSQuota is Enable CPU CFS quota enforcement for containers that // specify CPU limits CPUCFSQuota *bool `json:"cpuCFSQuota"` - // containerized should be set to true if kubelet is running in a container. - Containerized *bool `json:"containerized"` // maxOpenFiles is Number of files that can be opened by Kubelet process. MaxOpenFiles int64 `json:"maxOpenFiles"` - // registerSchedulable tells the kubelet to register the node as - // schedulable. Won't have any effect if register-node is false. - // DEPRECATED: use registerWithTaints instead - RegisterSchedulable *bool `json:"registerSchedulable"` - // registerWithTaints are an array of taints to add to a node object when - // the kubelet registers itself. This only takes effect when registerNode - // is true and upon the initial registration of the node. - RegisterWithTaints []v1.Taint `json:"registerWithTaints"` // contentType is contentType of requests sent to apiserver. ContentType string `json:"contentType"` // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver @@ -269,36 +217,30 @@ type KubeletConfiguration struct { // run docker daemon with version < 1.9 or an Aufs storage backend. // Issue #10959 has more details. SerializeImagePulls *bool `json:"serializeImagePulls"` - // nodeLabels to add when registering the node in the cluster. - NodeLabels map[string]string `json:"nodeLabels"` - // nonMasqueradeCIDR configures masquerading: traffic to IPs outside this range will use IP masquerade. - NonMasqueradeCIDR string `json:"nonMasqueradeCIDR"` - // enable gathering custom metrics. - EnableCustomMetrics bool `json:"enableCustomMetrics"` - // Comma-delimited list of hard eviction expressions. For example, 'memory.available<300Mi'. - EvictionHard *string `json:"evictionHard"` - // Comma-delimited list of soft eviction expressions. For example, 'memory.available<300Mi'. - EvictionSoft string `json:"evictionSoft"` - // Comma-delimeted list of grace periods for each soft eviction signal. For example, 'memory.available=30s'. - EvictionSoftGracePeriod string `json:"evictionSoftGracePeriod"` + // Map of signal names to quantities that defines hard eviction thresholds. For example: {"memory.available": "300Mi"}. + // +optional + EvictionHard map[string]string `json:"evictionHard"` + // Map of signal names to quantities that defines soft eviction thresholds. For example: {"memory.available": "300Mi"}. + // +optional + EvictionSoft map[string]string `json:"evictionSoft"` + // Map of signal names to quantities that defines grace periods for each soft eviction signal. For example: {"memory.available": "30s"}. + // +optional + EvictionSoftGracePeriod map[string]string `json:"evictionSoftGracePeriod"` // Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. EvictionPressureTransitionPeriod metav1.Duration `json:"evictionPressureTransitionPeriod"` // Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. EvictionMaxPodGracePeriod int32 `json:"evictionMaxPodGracePeriod"` - // Comma-delimited list of minimum reclaims (e.g. imagefs.available=2Gi) that describes the minimum amount of resource the kubelet will reclaim when performing a pod eviction if that resource is under pressure. - EvictionMinimumReclaim string `json:"evictionMinimumReclaim"` - // If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling. - ExperimentalKernelMemcgNotification *bool `json:"experimentalKernelMemcgNotification"` + // Map of signal names to quantities that defines minimum reclaims, which describe the minimum + // amount of a given resource the kubelet will reclaim when performing a pod eviction while + // that resource is under pressure. For example: {"imagefs.available": "2Gi"} + // +optional + EvictionMinimumReclaim map[string]string `json:"evictionMinimumReclaim"` // Maximum number of pods per core. Cannot exceed MaxPods PodsPerCore int32 `json:"podsPerCore"` // enableControllerAttachDetach enables the Attach/Detach controller to // manage attachment/detachment of volumes scheduled to this node, and // disables kubelet from executing any attach/detach operations EnableControllerAttachDetach *bool `json:"enableControllerAttachDetach"` - // A set of ResourceName=Percentage (e.g. memory=50%) pairs that describe - // how pod resource requests are reserved at the QoS level. - // Currently only memory is supported. [default=none]" - ExperimentalQOSReserved map[string]string `json:"experimentalQOSReserved"` // Default behaviour for kernel tuning ProtectKernelDefaults bool `json:"protectKernelDefaults"` // If true, Kubelet ensures a set of iptables rules are present on host. @@ -313,22 +255,10 @@ type KubeletConfiguration struct { // iptablesDropBit is the bit of the iptables fwmark space to mark for dropping packets. // Values must be within the range [0, 31]. Must be different from other mark bits. IPTablesDropBit *int32 `json:"iptablesDropBit"` - // Whitelist of unsafe sysctls or sysctl patterns (ending in *). Use these at your own risk. - // Resource isolation might be lacking and pod might influence each other on the same node. - // +optional - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty"` - // featureGates is a string of comma-separated key=value pairs that describe feature - // gates for alpha/experimental features. - FeatureGates string `json:"featureGates,omitempty"` + // featureGates is a map of feature names to bools that enable or disable alpha/experimental features. + FeatureGates map[string]bool `json:"featureGates,omitempty"` // Tells the Kubelet to fail to start if swap is enabled on the node. FailSwapOn bool `json:"failSwapOn,omitempty"` - // This flag, if set, enables a check prior to mount operations to verify that the required components - // (binaries, etc.) to mount the volume are available on the underlying node. If the check is enabled - // and fails the mount operation fails. - ExperimentalCheckNodeCapabilitiesBeforeMount bool `json:"experimentalCheckNodeCapabilitiesBeforeMount,omitempty"` - // This flag, if set, instructs the kubelet to keep volumes from terminated pods mounted to the node. - // This can be useful for debugging volume related issues. - KeepTerminatedPodVolumes bool `json:"keepTerminatedPodVolumes,omitempty"` /* following flags are meant for Node Allocatable */ @@ -344,18 +274,15 @@ type KubeletConfiguration struct { KubeReserved map[string]string `json:"kubeReserved"` // This flag helps kubelet identify absolute name of top level cgroup used to enforce `SystemReserved` compute resource reservation for OS system daemons. - // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md) doc for more information. + // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. SystemReservedCgroup string `json:"systemReservedCgroup,omitempty"` // This flag helps kubelet identify absolute name of top level cgroup used to enforce `KubeReserved` compute resource reservation for Kubernetes node system daemons. - // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md) doc for more information. + // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. KubeReservedCgroup string `json:"kubeReservedCgroup,omitempty"` // This flag specifies the various Node Allocatable enforcements that Kubelet needs to perform. // This flag accepts a list of options. Acceptible options are `pods`, `system-reserved` & `kube-reserved`. - // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md) doc for more information. + // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. EnforceNodeAllocatable []string `json:"enforceNodeAllocatable"` - // This flag, if set, will avoid including `EvictionHard` limits while computing Node Allocatable. - // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md) doc for more information. - ExperimentalNodeAllocatableIgnoreEvictionThreshold bool `json:"experimentalNodeAllocatableIgnoreEvictionThreshold,omitempty"` } type KubeletAuthorizationMode string diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go index 87cf779c0dbd..b4701bb564cb 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.conversion.go @@ -21,11 +21,9 @@ limitations under the License. package v1alpha1 import ( - core_v1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" unsafe "unsafe" ) @@ -148,7 +146,7 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_kubeletconfig_KubeletConfigura out.FileCheckFrequency = in.FileCheckFrequency out.HTTPCheckFrequency = in.HTTPCheckFrequency out.ManifestURL = in.ManifestURL - out.ManifestURLHeader = in.ManifestURLHeader + out.ManifestURLHeader = *(*map[string][]string)(unsafe.Pointer(&in.ManifestURLHeader)) if err := v1.Convert_Pointer_bool_To_bool(&in.EnableServer, &out.EnableServer, s); err != nil { return err } @@ -165,7 +163,6 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_kubeletconfig_KubeletConfigura if err := Convert_v1alpha1_KubeletAuthorization_To_kubeletconfig_KubeletAuthorization(&in.Authorization, &out.Authorization, s); err != nil { return err } - out.SeccompProfileRoot = in.SeccompProfileRoot if err := v1.Convert_Pointer_bool_To_bool(&in.AllowPrivileged, &out.AllowPrivileged, s); err != nil { return err } @@ -184,11 +181,6 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_kubeletconfig_KubeletConfigura return err } out.EnableContentionProfiling = in.EnableContentionProfiling - out.MinimumGCAge = in.MinimumGCAge - out.MaxPerPodContainerCount = in.MaxPerPodContainerCount - if err := v1.Convert_Pointer_int32_To_int32(&in.MaxContainerCount, &out.MaxContainerCount, s); err != nil { - return err - } if err := v1.Convert_Pointer_int32_To_int32(&in.CAdvisorPort, &out.CAdvisorPort, s); err != nil { return err } @@ -199,11 +191,7 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_kubeletconfig_KubeletConfigura if err := v1.Convert_Pointer_int32_To_int32(&in.OOMScoreAdj, &out.OOMScoreAdj, s); err != nil { return err } - if err := v1.Convert_Pointer_bool_To_bool(&in.RegisterNode, &out.RegisterNode, s); err != nil { - return err - } out.ClusterDomain = in.ClusterDomain - out.MasterServiceNamespace = in.MasterServiceNamespace out.ClusterDNS = *(*[]string)(unsafe.Pointer(&in.ClusterDNS)) out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency @@ -215,26 +203,16 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_kubeletconfig_KubeletConfigura return err } out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod - out.VolumePluginDir = in.VolumePluginDir out.KubeletCgroups = in.KubeletCgroups - out.RuntimeCgroups = in.RuntimeCgroups out.SystemCgroups = in.SystemCgroups out.CgroupRoot = in.CgroupRoot if err := v1.Convert_Pointer_bool_To_bool(&in.CgroupsPerQOS, &out.CgroupsPerQOS, s); err != nil { return err } out.CgroupDriver = in.CgroupDriver - out.ContainerRuntime = in.ContainerRuntime - out.RemoteRuntimeEndpoint = in.RemoteRuntimeEndpoint - out.RemoteImageEndpoint = in.RemoteImageEndpoint out.CPUManagerPolicy = in.CPUManagerPolicy out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod out.RuntimeRequestTimeout = in.RuntimeRequestTimeout - out.ExperimentalMounterPath = in.ExperimentalMounterPath - if err := v1.Convert_Pointer_string_To_string(&in.LockFilePath, &out.LockFilePath, s); err != nil { - return err - } - out.ExitOnLockContention = in.ExitOnLockContention out.HairpinMode = in.HairpinMode out.MaxPods = in.MaxPods out.PodCIDR = in.PodCIDR @@ -242,14 +220,7 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_kubeletconfig_KubeletConfigura if err := v1.Convert_Pointer_bool_To_bool(&in.CPUCFSQuota, &out.CPUCFSQuota, s); err != nil { return err } - if err := v1.Convert_Pointer_bool_To_bool(&in.Containerized, &out.Containerized, s); err != nil { - return err - } out.MaxOpenFiles = in.MaxOpenFiles - if err := v1.Convert_Pointer_bool_To_bool(&in.RegisterSchedulable, &out.RegisterSchedulable, s); err != nil { - return err - } - out.RegisterWithTaints = *(*[]api.Taint)(unsafe.Pointer(&in.RegisterWithTaints)) out.ContentType = in.ContentType if err := v1.Convert_Pointer_int32_To_int32(&in.KubeAPIQPS, &out.KubeAPIQPS, s); err != nil { return err @@ -258,25 +229,16 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_kubeletconfig_KubeletConfigura if err := v1.Convert_Pointer_bool_To_bool(&in.SerializeImagePulls, &out.SerializeImagePulls, s); err != nil { return err } - out.NodeLabels = *(*map[string]string)(unsafe.Pointer(&in.NodeLabels)) - out.NonMasqueradeCIDR = in.NonMasqueradeCIDR - out.EnableCustomMetrics = in.EnableCustomMetrics - if err := v1.Convert_Pointer_string_To_string(&in.EvictionHard, &out.EvictionHard, s); err != nil { - return err - } - out.EvictionSoft = in.EvictionSoft - out.EvictionSoftGracePeriod = in.EvictionSoftGracePeriod + out.EvictionHard = *(*map[string]string)(unsafe.Pointer(&in.EvictionHard)) + out.EvictionSoft = *(*map[string]string)(unsafe.Pointer(&in.EvictionSoft)) + out.EvictionSoftGracePeriod = *(*map[string]string)(unsafe.Pointer(&in.EvictionSoftGracePeriod)) out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod - out.EvictionMinimumReclaim = in.EvictionMinimumReclaim - if err := v1.Convert_Pointer_bool_To_bool(&in.ExperimentalKernelMemcgNotification, &out.ExperimentalKernelMemcgNotification, s); err != nil { - return err - } + out.EvictionMinimumReclaim = *(*map[string]string)(unsafe.Pointer(&in.EvictionMinimumReclaim)) out.PodsPerCore = in.PodsPerCore if err := v1.Convert_Pointer_bool_To_bool(&in.EnableControllerAttachDetach, &out.EnableControllerAttachDetach, s); err != nil { return err } - out.ExperimentalQOSReserved = *(*kubeletconfig.ConfigurationMap)(unsafe.Pointer(&in.ExperimentalQOSReserved)) out.ProtectKernelDefaults = in.ProtectKernelDefaults if err := v1.Convert_Pointer_bool_To_bool(&in.MakeIPTablesUtilChains, &out.MakeIPTablesUtilChains, s); err != nil { return err @@ -287,17 +249,13 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_kubeletconfig_KubeletConfigura if err := v1.Convert_Pointer_int32_To_int32(&in.IPTablesDropBit, &out.IPTablesDropBit, s); err != nil { return err } - out.AllowedUnsafeSysctls = *(*[]string)(unsafe.Pointer(&in.AllowedUnsafeSysctls)) - out.FeatureGates = in.FeatureGates + out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) out.FailSwapOn = in.FailSwapOn - out.ExperimentalCheckNodeCapabilitiesBeforeMount = in.ExperimentalCheckNodeCapabilitiesBeforeMount - out.KeepTerminatedPodVolumes = in.KeepTerminatedPodVolumes - out.SystemReserved = *(*kubeletconfig.ConfigurationMap)(unsafe.Pointer(&in.SystemReserved)) - out.KubeReserved = *(*kubeletconfig.ConfigurationMap)(unsafe.Pointer(&in.KubeReserved)) + out.SystemReserved = *(*map[string]string)(unsafe.Pointer(&in.SystemReserved)) + out.KubeReserved = *(*map[string]string)(unsafe.Pointer(&in.KubeReserved)) out.SystemReservedCgroup = in.SystemReservedCgroup out.KubeReservedCgroup = in.KubeReservedCgroup out.EnforceNodeAllocatable = *(*[]string)(unsafe.Pointer(&in.EnforceNodeAllocatable)) - out.ExperimentalNodeAllocatableIgnoreEvictionThreshold = in.ExperimentalNodeAllocatableIgnoreEvictionThreshold return nil } @@ -313,7 +271,7 @@ func autoConvert_kubeletconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigura out.FileCheckFrequency = in.FileCheckFrequency out.HTTPCheckFrequency = in.HTTPCheckFrequency out.ManifestURL = in.ManifestURL - out.ManifestURLHeader = in.ManifestURLHeader + out.ManifestURLHeader = *(*map[string][]string)(unsafe.Pointer(&in.ManifestURLHeader)) if err := v1.Convert_bool_To_Pointer_bool(&in.EnableServer, &out.EnableServer, s); err != nil { return err } @@ -330,7 +288,6 @@ func autoConvert_kubeletconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigura if err := Convert_kubeletconfig_KubeletAuthorization_To_v1alpha1_KubeletAuthorization(&in.Authorization, &out.Authorization, s); err != nil { return err } - out.SeccompProfileRoot = in.SeccompProfileRoot if err := v1.Convert_bool_To_Pointer_bool(&in.AllowPrivileged, &out.AllowPrivileged, s); err != nil { return err } @@ -349,11 +306,6 @@ func autoConvert_kubeletconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigura return err } out.EnableContentionProfiling = in.EnableContentionProfiling - out.MinimumGCAge = in.MinimumGCAge - out.MaxPerPodContainerCount = in.MaxPerPodContainerCount - if err := v1.Convert_int32_To_Pointer_int32(&in.MaxContainerCount, &out.MaxContainerCount, s); err != nil { - return err - } if err := v1.Convert_int32_To_Pointer_int32(&in.CAdvisorPort, &out.CAdvisorPort, s); err != nil { return err } @@ -364,11 +316,7 @@ func autoConvert_kubeletconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigura if err := v1.Convert_int32_To_Pointer_int32(&in.OOMScoreAdj, &out.OOMScoreAdj, s); err != nil { return err } - if err := v1.Convert_bool_To_Pointer_bool(&in.RegisterNode, &out.RegisterNode, s); err != nil { - return err - } out.ClusterDomain = in.ClusterDomain - out.MasterServiceNamespace = in.MasterServiceNamespace out.ClusterDNS = *(*[]string)(unsafe.Pointer(&in.ClusterDNS)) out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency @@ -380,26 +328,16 @@ func autoConvert_kubeletconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigura return err } out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod - out.VolumePluginDir = in.VolumePluginDir out.KubeletCgroups = in.KubeletCgroups if err := v1.Convert_bool_To_Pointer_bool(&in.CgroupsPerQOS, &out.CgroupsPerQOS, s); err != nil { return err } out.CgroupDriver = in.CgroupDriver - out.RuntimeCgroups = in.RuntimeCgroups out.SystemCgroups = in.SystemCgroups out.CgroupRoot = in.CgroupRoot - out.ContainerRuntime = in.ContainerRuntime - out.RemoteRuntimeEndpoint = in.RemoteRuntimeEndpoint - out.RemoteImageEndpoint = in.RemoteImageEndpoint out.CPUManagerPolicy = in.CPUManagerPolicy out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod out.RuntimeRequestTimeout = in.RuntimeRequestTimeout - out.ExperimentalMounterPath = in.ExperimentalMounterPath - if err := v1.Convert_string_To_Pointer_string(&in.LockFilePath, &out.LockFilePath, s); err != nil { - return err - } - out.ExitOnLockContention = in.ExitOnLockContention out.HairpinMode = in.HairpinMode out.MaxPods = in.MaxPods out.PodCIDR = in.PodCIDR @@ -407,14 +345,7 @@ func autoConvert_kubeletconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigura if err := v1.Convert_bool_To_Pointer_bool(&in.CPUCFSQuota, &out.CPUCFSQuota, s); err != nil { return err } - if err := v1.Convert_bool_To_Pointer_bool(&in.Containerized, &out.Containerized, s); err != nil { - return err - } out.MaxOpenFiles = in.MaxOpenFiles - if err := v1.Convert_bool_To_Pointer_bool(&in.RegisterSchedulable, &out.RegisterSchedulable, s); err != nil { - return err - } - out.RegisterWithTaints = *(*[]core_v1.Taint)(unsafe.Pointer(&in.RegisterWithTaints)) out.ContentType = in.ContentType if err := v1.Convert_int32_To_Pointer_int32(&in.KubeAPIQPS, &out.KubeAPIQPS, s); err != nil { return err @@ -423,25 +354,16 @@ func autoConvert_kubeletconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigura if err := v1.Convert_bool_To_Pointer_bool(&in.SerializeImagePulls, &out.SerializeImagePulls, s); err != nil { return err } - out.NodeLabels = *(*map[string]string)(unsafe.Pointer(&in.NodeLabels)) - out.NonMasqueradeCIDR = in.NonMasqueradeCIDR - out.EnableCustomMetrics = in.EnableCustomMetrics - if err := v1.Convert_string_To_Pointer_string(&in.EvictionHard, &out.EvictionHard, s); err != nil { - return err - } - out.EvictionSoft = in.EvictionSoft - out.EvictionSoftGracePeriod = in.EvictionSoftGracePeriod + out.EvictionHard = *(*map[string]string)(unsafe.Pointer(&in.EvictionHard)) + out.EvictionSoft = *(*map[string]string)(unsafe.Pointer(&in.EvictionSoft)) + out.EvictionSoftGracePeriod = *(*map[string]string)(unsafe.Pointer(&in.EvictionSoftGracePeriod)) out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod - out.EvictionMinimumReclaim = in.EvictionMinimumReclaim - if err := v1.Convert_bool_To_Pointer_bool(&in.ExperimentalKernelMemcgNotification, &out.ExperimentalKernelMemcgNotification, s); err != nil { - return err - } + out.EvictionMinimumReclaim = *(*map[string]string)(unsafe.Pointer(&in.EvictionMinimumReclaim)) out.PodsPerCore = in.PodsPerCore if err := v1.Convert_bool_To_Pointer_bool(&in.EnableControllerAttachDetach, &out.EnableControllerAttachDetach, s); err != nil { return err } - out.ExperimentalQOSReserved = *(*map[string]string)(unsafe.Pointer(&in.ExperimentalQOSReserved)) out.ProtectKernelDefaults = in.ProtectKernelDefaults if err := v1.Convert_bool_To_Pointer_bool(&in.MakeIPTablesUtilChains, &out.MakeIPTablesUtilChains, s); err != nil { return err @@ -452,17 +374,13 @@ func autoConvert_kubeletconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigura if err := v1.Convert_int32_To_Pointer_int32(&in.IPTablesDropBit, &out.IPTablesDropBit, s); err != nil { return err } - out.AllowedUnsafeSysctls = *(*[]string)(unsafe.Pointer(&in.AllowedUnsafeSysctls)) - out.FeatureGates = in.FeatureGates + out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) out.FailSwapOn = in.FailSwapOn - out.ExperimentalCheckNodeCapabilitiesBeforeMount = in.ExperimentalCheckNodeCapabilitiesBeforeMount - out.KeepTerminatedPodVolumes = in.KeepTerminatedPodVolumes out.SystemReserved = *(*map[string]string)(unsafe.Pointer(&in.SystemReserved)) out.KubeReserved = *(*map[string]string)(unsafe.Pointer(&in.KubeReserved)) out.SystemReservedCgroup = in.SystemReservedCgroup out.KubeReservedCgroup = in.KubeReservedCgroup out.EnforceNodeAllocatable = *(*[]string)(unsafe.Pointer(&in.EnforceNodeAllocatable)) - out.ExperimentalNodeAllocatableIgnoreEvictionThreshold = in.ExperimentalNodeAllocatableIgnoreEvictionThreshold return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.deepcopy.go index eff621971c0c..e27e7bdd8575 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1/zz_generated.deepcopy.go @@ -21,54 +21,10 @@ limitations under the License. package v1alpha1 import ( - core_v1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeletAnonymousAuthentication).DeepCopyInto(out.(*KubeletAnonymousAuthentication)) - return nil - }, InType: reflect.TypeOf(&KubeletAnonymousAuthentication{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeletAuthentication).DeepCopyInto(out.(*KubeletAuthentication)) - return nil - }, InType: reflect.TypeOf(&KubeletAuthentication{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeletAuthorization).DeepCopyInto(out.(*KubeletAuthorization)) - return nil - }, InType: reflect.TypeOf(&KubeletAuthorization{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeletConfiguration).DeepCopyInto(out.(*KubeletConfiguration)) - return nil - }, InType: reflect.TypeOf(&KubeletConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeletWebhookAuthentication).DeepCopyInto(out.(*KubeletWebhookAuthentication)) - return nil - }, InType: reflect.TypeOf(&KubeletWebhookAuthentication{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeletWebhookAuthorization).DeepCopyInto(out.(*KubeletWebhookAuthorization)) - return nil - }, InType: reflect.TypeOf(&KubeletWebhookAuthorization{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeletX509Authentication).DeepCopyInto(out.(*KubeletX509Authentication)) - return nil - }, InType: reflect.TypeOf(&KubeletX509Authentication{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeletAnonymousAuthentication) DeepCopyInto(out *KubeletAnonymousAuthentication) { *out = *in @@ -146,6 +102,18 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { out.SyncFrequency = in.SyncFrequency out.FileCheckFrequency = in.FileCheckFrequency out.HTTPCheckFrequency = in.HTTPCheckFrequency + if in.ManifestURLHeader != nil { + in, out := &in.ManifestURLHeader, &out.ManifestURLHeader + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = make([]string, len(val)) + copy((*out)[key], val) + } + } + } if in.EnableServer != nil { in, out := &in.EnableServer, &out.EnableServer if *in == nil { @@ -217,16 +185,6 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { **out = **in } } - out.MinimumGCAge = in.MinimumGCAge - if in.MaxContainerCount != nil { - in, out := &in.MaxContainerCount, &out.MaxContainerCount - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } if in.CAdvisorPort != nil { in, out := &in.CAdvisorPort, &out.CAdvisorPort if *in == nil { @@ -254,15 +212,6 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { **out = **in } } - if in.RegisterNode != nil { - in, out := &in.RegisterNode, &out.RegisterNode - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } if in.ClusterDNS != nil { in, out := &in.ClusterDNS, &out.ClusterDNS *out = make([]string, len(*in)) @@ -301,15 +250,6 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { } out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod out.RuntimeRequestTimeout = in.RuntimeRequestTimeout - if in.LockFilePath != nil { - in, out := &in.LockFilePath, &out.LockFilePath - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } if in.CPUCFSQuota != nil { in, out := &in.CPUCFSQuota, &out.CPUCFSQuota if *in == nil { @@ -319,31 +259,6 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { **out = **in } } - if in.Containerized != nil { - in, out := &in.Containerized, &out.Containerized - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.RegisterSchedulable != nil { - in, out := &in.RegisterSchedulable, &out.RegisterSchedulable - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.RegisterWithTaints != nil { - in, out := &in.RegisterWithTaints, &out.RegisterWithTaints - *out = make([]core_v1.Taint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } if in.KubeAPIQPS != nil { in, out := &in.KubeAPIQPS, &out.KubeAPIQPS if *in == nil { @@ -362,30 +277,33 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { **out = **in } } - if in.NodeLabels != nil { - in, out := &in.NodeLabels, &out.NodeLabels + if in.EvictionHard != nil { + in, out := &in.EvictionHard, &out.EvictionHard *out = make(map[string]string, len(*in)) for key, val := range *in { (*out)[key] = val } } - if in.EvictionHard != nil { - in, out := &in.EvictionHard, &out.EvictionHard - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in + if in.EvictionSoft != nil { + in, out := &in.EvictionSoft, &out.EvictionSoft + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.EvictionSoftGracePeriod != nil { + in, out := &in.EvictionSoftGracePeriod, &out.EvictionSoftGracePeriod + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val } } out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod - if in.ExperimentalKernelMemcgNotification != nil { - in, out := &in.ExperimentalKernelMemcgNotification, &out.ExperimentalKernelMemcgNotification - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in + if in.EvictionMinimumReclaim != nil { + in, out := &in.EvictionMinimumReclaim, &out.EvictionMinimumReclaim + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val } } if in.EnableControllerAttachDetach != nil { @@ -397,13 +315,6 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { **out = **in } } - if in.ExperimentalQOSReserved != nil { - in, out := &in.ExperimentalQOSReserved, &out.ExperimentalQOSReserved - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } if in.MakeIPTablesUtilChains != nil { in, out := &in.MakeIPTablesUtilChains, &out.MakeIPTablesUtilChains if *in == nil { @@ -431,10 +342,12 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { **out = **in } } - if in.AllowedUnsafeSysctls != nil { - in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls - *out = make([]string, len(*in)) - copy(*out, *in) + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } } if in.SystemReserved != nil { in, out := &in.SystemReserved, &out.SystemReserved diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation/BUILD index 432212ea3e84..7383b8a0c225 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -11,7 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["validation.go"], - tags = ["automanaged"], + importpath = "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation", deps = [ "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/cm:go_default_library", @@ -36,6 +34,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation", library = ":go_default_library", deps = [ "//pkg/kubelet/apis/kubeletconfig:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/zz_generated.deepcopy.go index 3f37b5d303b4..944e7b8985b1 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/zz_generated.deepcopy.go @@ -22,53 +22,9 @@ package kubeletconfig import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeletAnonymousAuthentication).DeepCopyInto(out.(*KubeletAnonymousAuthentication)) - return nil - }, InType: reflect.TypeOf(&KubeletAnonymousAuthentication{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeletAuthentication).DeepCopyInto(out.(*KubeletAuthentication)) - return nil - }, InType: reflect.TypeOf(&KubeletAuthentication{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeletAuthorization).DeepCopyInto(out.(*KubeletAuthorization)) - return nil - }, InType: reflect.TypeOf(&KubeletAuthorization{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeletConfiguration).DeepCopyInto(out.(*KubeletConfiguration)) - return nil - }, InType: reflect.TypeOf(&KubeletConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeletWebhookAuthentication).DeepCopyInto(out.(*KubeletWebhookAuthentication)) - return nil - }, InType: reflect.TypeOf(&KubeletWebhookAuthentication{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeletWebhookAuthorization).DeepCopyInto(out.(*KubeletWebhookAuthorization)) - return nil - }, InType: reflect.TypeOf(&KubeletWebhookAuthorization{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KubeletX509Authentication).DeepCopyInto(out.(*KubeletX509Authentication)) - return nil - }, InType: reflect.TypeOf(&KubeletX509Authentication{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeletAnonymousAuthentication) DeepCopyInto(out *KubeletAnonymousAuthentication) { *out = *in @@ -137,6 +93,18 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { out.SyncFrequency = in.SyncFrequency out.FileCheckFrequency = in.FileCheckFrequency out.HTTPCheckFrequency = in.HTTPCheckFrequency + if in.ManifestURLHeader != nil { + in, out := &in.ManifestURLHeader, &out.ManifestURLHeader + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = make([]string, len(val)) + copy((*out)[key], val) + } + } + } out.Authentication = in.Authentication out.Authorization = in.Authorization if in.HostNetworkSources != nil { @@ -154,7 +122,6 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { *out = make([]string, len(*in)) copy(*out, *in) } - out.MinimumGCAge = in.MinimumGCAge if in.ClusterDNS != nil { in, out := &in.ClusterDNS, &out.ClusterDNS *out = make([]string, len(*in)) @@ -166,43 +133,52 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod out.RuntimeRequestTimeout = in.RuntimeRequestTimeout - if in.RegisterWithTaints != nil { - in, out := &in.RegisterWithTaints, &out.RegisterWithTaints - *out = make([]api.Taint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) + if in.EvictionHard != nil { + in, out := &in.EvictionHard, &out.EvictionHard + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val } } - if in.NodeLabels != nil { - in, out := &in.NodeLabels, &out.NodeLabels + if in.EvictionSoft != nil { + in, out := &in.EvictionSoft, &out.EvictionSoft + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.EvictionSoftGracePeriod != nil { + in, out := &in.EvictionSoftGracePeriod, &out.EvictionSoftGracePeriod *out = make(map[string]string, len(*in)) for key, val := range *in { (*out)[key] = val } } out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod - if in.ExperimentalQOSReserved != nil { - in, out := &in.ExperimentalQOSReserved, &out.ExperimentalQOSReserved - *out = make(ConfigurationMap, len(*in)) + if in.EvictionMinimumReclaim != nil { + in, out := &in.EvictionMinimumReclaim, &out.EvictionMinimumReclaim + *out = make(map[string]string, len(*in)) for key, val := range *in { (*out)[key] = val } } - if in.AllowedUnsafeSysctls != nil { - in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls - *out = make([]string, len(*in)) - copy(*out, *in) + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } } if in.SystemReserved != nil { in, out := &in.SystemReserved, &out.SystemReserved - *out = make(ConfigurationMap, len(*in)) + *out = make(map[string]string, len(*in)) for key, val := range *in { (*out)[key] = val } } if in.KubeReserved != nil { in, out := &in.KubeReserved, &out.KubeReserved - *out = make(ConfigurationMap, len(*in)) + *out = make(map[string]string, len(*in)) for key, val := range *in { (*out)[key] = val } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1/BUILD index 26c1951081cc..61a83c261255 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["types.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1", deps = ["//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1/types.go index e08128658d2b..916ea822742f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1/types.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1/types.go @@ -86,6 +86,12 @@ type PodStats struct { // +patchMergeKey=name // +patchStrategy=merge Containers []ContainerStats `json:"containers" patchStrategy:"merge" patchMergeKey:"name"` + // Stats pertaining to CPU resources consumed by pod cgroup (which includes all containers' resource usage and pod overhead). + // +optional + CPU *CPUStats `json:"cpu,omitempty"` + // Stats pertaining to memory (RAM) resources consumed by pod cgroup (which includes all containers' resource usage and pod overhead). + // +optional + Memory *MemoryStats `json:"memory,omitempty"` // Stats pertaining to network resources. // +optional Network *NetworkStats `json:"network,omitempty"` @@ -95,6 +101,9 @@ type PodStats struct { // +patchMergeKey=name // +patchStrategy=merge VolumeStats []VolumeStats `json:"volume,omitempty" patchStrategy:"merge" patchMergeKey:"name"` + // EphemeralStorage reports the total filesystem usage for the containers and emptyDir-backed volumes in the measured Pod. + // +optional + EphemeralStorage *FsStats `json:"ephemeral-storage,omitempty"` } // ContainerStats holds container-level unprocessed sample stats. @@ -109,6 +118,8 @@ type ContainerStats struct { // Stats pertaining to memory (RAM) resources. // +optional Memory *MemoryStats `json:"memory,omitempty"` + // Metrics for Accelerators. Each Accelerator corresponds to one element in the array. + Accelerators []AcceleratorStats `json:"accelerators,omitempty"` // Stats pertaining to container rootfs usage of filesystem resources. // Rootfs.UsedBytes is the number of bytes used for the container write layer. // +optional @@ -130,10 +141,10 @@ type PodReference struct { UID string `json:"uid"` } -// NetworkStats contains data about network resources. -type NetworkStats struct { - // The time at which these stats were updated. - Time metav1.Time `json:"time"` +// InterfaceStats contains resource value data about interface. +type InterfaceStats struct { + // The name of the interface + Name string `json:"name"` // Cumulative count of bytes received. // +optional RxBytes *uint64 `json:"rxBytes,omitempty"` @@ -148,6 +159,17 @@ type NetworkStats struct { TxErrors *uint64 `json:"txErrors,omitempty"` } +// NetworkStats contains data about network resources. +type NetworkStats struct { + // The time at which these stats were updated. + Time metav1.Time `json:"time"` + + // Stats for the default interface, if found + InterfaceStats `json:",inline"` + + Interfaces []InterfaceStats `json:"interfaces,omitempty"` +} + // CPUStats contains data about CPU usage. type CPUStats struct { // The time at which these stats were updated. @@ -188,6 +210,30 @@ type MemoryStats struct { MajorPageFaults *uint64 `json:"majorPageFaults,omitempty"` } +// AcceleratorStats contains stats for accelerators attached to the container. +type AcceleratorStats struct { + // Make of the accelerator (nvidia, amd, google etc.) + Make string `json:"make"` + + // Model of the accelerator (tesla-p100, tesla-k80 etc.) + Model string `json:"model"` + + // ID of the accelerator. + ID string `json:"id"` + + // Total accelerator memory. + // unit: bytes + MemoryTotal uint64 `json:"memoryTotal"` + + // Total accelerator memory allocated. + // unit: bytes + MemoryUsed uint64 `json:"memoryUsed"` + + // Percent of time over the past sample period (10s) during which + // the accelerator was actively processing. + DutyCycle uint64 `json:"dutyCycle"` +} + // VolumeStats contains data about Volume filesystem usage. type VolumeStats struct { // Embedded FsStats diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/BUILD index 29c45eb2b876..faa9d5005b22 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/BUILD @@ -24,9 +24,11 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/kubelet/cadvisor", deps = [ - "//pkg/api/v1/helper:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/features:go_default_library", + "//pkg/kubelet/types:go_default_library", "//vendor/github.com/google/cadvisor/events:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", @@ -35,7 +37,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ - "//pkg/kubelet/types:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/cache/memory:go_default_library", "//vendor/github.com/google/cadvisor/container:go_default_library", @@ -46,6 +47,9 @@ go_library( "//vendor/github.com/google/cadvisor/utils/sysfs:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", ], + "@io_bazel_rules_go//go/platform:windows_amd64": [ + "//pkg/kubelet/winstats:go_default_library", + ], "//conditions:default": [], }), ) @@ -58,6 +62,7 @@ go_test( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/kubelet/cadvisor", library = ":go_default_library", deps = select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go index 4749016a49a8..948cb651b300 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go @@ -105,11 +105,16 @@ func containerLabels(c *cadvisorapi.ContainerInfo) map[string]string { } // New creates a cAdvisor and exports its API on the specified port if port > 0. -func New(address string, port uint, imageFsInfoProvider ImageFsInfoProvider, rootPath string) (Interface, error) { +func New(address string, port uint, imageFsInfoProvider ImageFsInfoProvider, rootPath string, usingLegacyStats bool) (Interface, error) { sysFs := sysfs.NewRealSysFs() + ignoreMetrics := cadvisormetrics.MetricSet{cadvisormetrics.NetworkTcpUsageMetrics: struct{}{}, cadvisormetrics.NetworkUdpUsageMetrics: struct{}{}} + if !usingLegacyStats { + ignoreMetrics[cadvisormetrics.DiskUsageMetrics] = struct{}{} + } + // Create and start the cAdvisor container manager. - m, err := manager.New(memory.New(statsCacheDuration, nil), sysFs, maxHousekeepingInterval, allowDynamicHousekeeping, cadvisormetrics.MetricSet{cadvisormetrics.NetworkTcpUsageMetrics: struct{}{}, cadvisormetrics.NetworkUdpUsageMetrics: struct{}{}}, http.DefaultClient) + m, err := manager.New(memory.New(statsCacheDuration, nil), sysFs, maxHousekeepingInterval, allowDynamicHousekeeping, ignoreMetrics, http.DefaultClient) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_unsupported.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_unsupported.go index 0b653e69e6b9..f1ae9486d873 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_unsupported.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_unsupported.go @@ -31,7 +31,7 @@ type cadvisorUnsupported struct { var _ Interface = new(cadvisorUnsupported) -func New(address string, port uint, imageFsInfoProvider ImageFsInfoProvider, rootPath string) (Interface, error) { +func New(address string, port uint, imageFsInfoProvider ImageFsInfoProvider, rootPath string, usingLegacyStats bool) (Interface, error) { return &cadvisorUnsupported{}, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_windows.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_windows.go index b663c372fd24..6ce1f8d1b38f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_windows.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_windows.go @@ -22,16 +22,19 @@ import ( "github.com/google/cadvisor/events" cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapiv2 "github.com/google/cadvisor/info/v2" + "k8s.io/kubernetes/pkg/kubelet/winstats" ) type cadvisorClient struct { + winStatsClient winstats.Client } var _ Interface = new(cadvisorClient) // New creates a cAdvisor and exports its API on the specified port if port > 0. -func New(address string, port uint, imageFsInfoProvider ImageFsInfoProvider, rootPath string) (Interface, error) { - return &cadvisorClient{}, nil +func New(address string, port uint, imageFsInfoProvider ImageFsInfoProvider, rootPath string, usingLegacyStats bool) (Interface, error) { + client, err := winstats.NewPerfCounterClient() + return &cadvisorClient{winStatsClient: client}, err } func (cu *cadvisorClient) Start() error { @@ -47,7 +50,7 @@ func (cu *cadvisorClient) ContainerInfo(name string, req *cadvisorapi.ContainerI } func (cu *cadvisorClient) ContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) { - return make(map[string]cadvisorapiv2.ContainerInfo), nil + return cu.winStatsClient.WinContainerInfos() } func (cu *cadvisorClient) SubcontainerInfo(name string, req *cadvisorapi.ContainerInfoRequest) (map[string]*cadvisorapi.ContainerInfo, error) { @@ -55,11 +58,11 @@ func (cu *cadvisorClient) SubcontainerInfo(name string, req *cadvisorapi.Contain } func (cu *cadvisorClient) MachineInfo() (*cadvisorapi.MachineInfo, error) { - return &cadvisorapi.MachineInfo{}, nil + return cu.winStatsClient.WinMachineInfo() } func (cu *cadvisorClient) VersionInfo() (*cadvisorapi.VersionInfo, error) { - return &cadvisorapi.VersionInfo{}, nil + return cu.winStatsClient.WinVersionInfo() } func (cu *cadvisorClient) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/helpers_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/helpers_linux.go index ea10588c8c7c..10b5e05a008d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/helpers_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/helpers_linux.go @@ -43,7 +43,7 @@ func (i *imageFsInfoProvider) ImageFsInfoLabel() (string, error) { // This is a temporary workaround to get stats for cri-o from cadvisor // and should be removed. // Related to https://github.com/kubernetes/kubernetes/issues/51798 - if i.runtimeEndpoint == "/var/run/crio.sock" { + if i.runtimeEndpoint == CrioSocket { return cadvisorfs.LabelCrioImages, nil } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/util.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/util.go index dd4d7c4b1fda..e4107d5b4a74 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/util.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/util.go @@ -17,13 +17,22 @@ limitations under the License. package cadvisor import ( + goruntime "runtime" + cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapi2 "github.com/google/cadvisor/info/v2" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" utilfeature "k8s.io/apiserver/pkg/util/feature" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" +) + +const ( + // Please keep this in sync with the one in: + // github.com/google/cadvisor/container/crio/client.go + CrioSocket = "/var/run/crio/crio.sock" ) func CapacityFromMachineInfo(info *cadvisorapi.MachineInfo) v1.ResourceList { @@ -57,3 +66,16 @@ func EphemeralStorageCapacityFromFsInfo(info cadvisorapi2.FsInfo) v1.ResourceLis } return c } + +// CRI integrations should get container metrics via CRI. Docker +// uses the built-in cadvisor to gather such metrics on Linux for +// historical reasons. +// cri-o relies on cadvisor as a temporary workaround. The code should +// be removed. Related issue: +// https://github.com/kubernetes/kubernetes/issues/51798 +// UsingLegacyCadvisorStats returns true if container stats are provided by cadvisor instead of through the CRI +func UsingLegacyCadvisorStats(runtime, runtimeEndpoint string) bool { + return runtime == kubetypes.RktContainerRuntime || + (runtime == kubetypes.DockerContainerRuntime && goruntime.GOOS == "linux") || + runtimeEndpoint == CrioSocket +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/BUILD index a9df3be8c7df..4d30fec92ece 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/BUILD @@ -9,46 +9,36 @@ load( go_library( name = "go_default_library", srcs = [ - "certificate_manager.go", - "certificate_store.go", "kubelet.go", "transport.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/certificate", deps = [ "//pkg/kubelet/apis/kubeletconfig:go_default_library", - "//pkg/util/file:go_default_library", + "//pkg/kubelet/metrics:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/util/cert:go_default_library", + "//vendor/k8s.io/client-go/util/certificate:go_default_library", ], ) go_test( name = "go_default_test", - srcs = [ - "certificate_manager_test.go", - "certificate_store_test.go", - "transport_test.go", - ], + srcs = ["transport_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/certificate", library = ":go_default_library", deps = [ - "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/util/cert:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/BUILD index 669d93075771..05d24e0c0f40 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["bootstrap_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", @@ -19,15 +20,18 @@ go_test( go_library( name = "go_default_library", srcs = ["bootstrap.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap", deps = [ - "//pkg/kubelet/util/csr:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", + "//vendor/k8s.io/client-go/transport:go_default_library", "//vendor/k8s.io/client-go/util/cert:go_default_library", + "//vendor/k8s.io/client-go/util/certificate/csr:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go b/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go index 2952ba0bd62b..d123c0beee53 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap/bootstrap.go @@ -20,16 +20,19 @@ import ( "fmt" "os" "path/filepath" + "time" "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" certificates "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/client-go/transport" certutil "k8s.io/client-go/util/cert" - "k8s.io/kubernetes/pkg/kubelet/util/csr" + "k8s.io/client-go/util/certificate/csr" ) const ( @@ -42,17 +45,15 @@ const ( // On success, a kubeconfig file referencing the generated key and obtained certificate is written to kubeconfigPath. // The certificate and key file are stored in certDir. func LoadClientCert(kubeconfigPath string, bootstrapPath string, certDir string, nodeName types.NodeName) error { - // Short-circuit if the kubeconfig file already exists. - // TODO: inspect the kubeconfig, ensure a rest client can be built from it, verify client cert expiration, etc. - _, err := os.Stat(kubeconfigPath) - if err == nil { - glog.V(2).Infof("Kubeconfig %s exists, skipping bootstrap", kubeconfigPath) - return nil - } - if !os.IsNotExist(err) { - glog.Errorf("Error reading kubeconfig %s, skipping bootstrap: %v", kubeconfigPath, err) + // Short-circuit if the kubeconfig file exists and is valid. + ok, err := verifyBootstrapClientConfig(kubeconfigPath) + if err != nil { return err } + if ok { + glog.V(2).Infof("Kubeconfig %s exists and is valid, skipping bootstrap", kubeconfigPath) + return nil + } glog.V(2).Info("Using bootstrap kubeconfig to generate TLS client cert, key and kubeconfig file") @@ -72,6 +73,17 @@ func LoadClientCert(kubeconfigPath string, bootstrapPath string, certDir string, if err != nil { return fmt.Errorf("unable to build bootstrap key path: %v", err) } + // If we are unable to generate a CSR, we remove our key file and start fresh. + // This method is used before enabling client rotation and so we must ensure we + // can make forward progress if we crash and exit when a CSR exists but the cert + // it is signed for has expired. + defer func() { + if !success { + if err := os.Remove(keyPath); err != nil && !os.IsNotExist(err) { + glog.Warningf("Cannot clean up the key file %q: %v", keyPath, err) + } + } + }() keyData, _, err := certutil.LoadOrGenerateKeyFile(keyPath) if err != nil { return err @@ -82,6 +94,13 @@ func LoadClientCert(kubeconfigPath string, bootstrapPath string, certDir string, if err != nil { return fmt.Errorf("unable to build bootstrap client cert path: %v", err) } + defer func() { + if !success { + if err := os.Remove(certPath); err != nil && !os.IsNotExist(err) { + glog.Warningf("Cannot clean up the cert file %q: %v", certPath, err) + } + } + }() certData, err := csr.RequestNodeCertificate(bootstrapClient.CertificateSigningRequests(), keyData, nodeName) if err != nil { return err @@ -89,13 +108,6 @@ func LoadClientCert(kubeconfigPath string, bootstrapPath string, certDir string, if err := certutil.WriteCert(certPath, certData); err != nil { return err } - defer func() { - if !success { - if err := os.Remove(certPath); err != nil { - glog.Warningf("Cannot clean up the cert file %q: %v", certPath, err) - } - } - }() // Get the CA data from the bootstrap client config. caFile, caData := bootstrapClientConfig.CAFile, []byte{} @@ -150,3 +162,48 @@ func loadRESTClientConfig(kubeconfig string) (*restclient.Config, error) { loader, ).ClientConfig() } + +// verifyBootstrapClientConfig checks the provided kubeconfig to see if it has a valid +// client certificate. It returns true if the kubeconfig is valid, or an error if bootstrapping +// should stop immediately. +func verifyBootstrapClientConfig(kubeconfigPath string) (bool, error) { + _, err := os.Stat(kubeconfigPath) + if os.IsNotExist(err) { + return false, nil + } + if err != nil { + return false, fmt.Errorf("error reading existing bootstrap kubeconfig %s: %v", kubeconfigPath, err) + } + bootstrapClientConfig, err := loadRESTClientConfig(kubeconfigPath) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Unable to read existing bootstrap client config: %v", err)) + return false, nil + } + transportConfig, err := bootstrapClientConfig.TransportConfig() + if err != nil { + utilruntime.HandleError(fmt.Errorf("Unable to load transport configuration from existing bootstrap client config: %v", err)) + return false, nil + } + // has side effect of populating transport config data fields + if _, err := transport.TLSConfigFor(transportConfig); err != nil { + utilruntime.HandleError(fmt.Errorf("Unable to load TLS configuration from existing bootstrap client config: %v", err)) + return false, nil + } + certs, err := certutil.ParseCertsPEM(transportConfig.TLS.CertData) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Unable to load TLS certificates from existing bootstrap client config: %v", err)) + return false, nil + } + if len(certs) == 0 { + utilruntime.HandleError(fmt.Errorf("Unable to read TLS certificates from existing bootstrap client config: %v", err)) + return false, nil + } + now := time.Now() + for _, cert := range certs { + if now.After(cert.NotAfter) { + utilruntime.HandleError(fmt.Errorf("Part of the existing bootstrap client certificate is expired: %s", cert.NotAfter)) + return false, nil + } + } + return true, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/certificate_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/certificate_manager.go deleted file mode 100644 index dcaa1a377492..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/certificate_manager.go +++ /dev/null @@ -1,412 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package certificate - -import ( - "crypto/ecdsa" - "crypto/elliptic" - cryptorand "crypto/rand" - "crypto/tls" - "crypto/x509" - "encoding/pem" - "fmt" - "sync" - "time" - - "github.com/golang/glog" - - certificates "k8s.io/api/certificates/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" - certificatesclient "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" - "k8s.io/client-go/util/cert" -) - -const ( - syncPeriod = 1 * time.Hour -) - -// Manager maintains and updates the certificates in use by this certificate -// manager. In the background it communicates with the API server to get new -// certificates for certificates about to expire. -type Manager interface { - // CertificateSigningRequestClient sets the client interface that is used for - // signing new certificates generated as part of rotation. - SetCertificateSigningRequestClient(certificatesclient.CertificateSigningRequestInterface) error - // Start the API server status sync loop. - Start() - // Current returns the currently selected certificate from the - // certificate manager, as well as the associated certificate and key data - // in PEM format. - Current() *tls.Certificate -} - -// Config is the set of configuration parameters available for a new Manager. -type Config struct { - // CertificateSigningRequestClient will be used for signing new certificate - // requests generated when a key rotation occurs. It must be set either at - // initialization or by using CertificateSigningRequestClient before - // Manager.Start() is called. - CertificateSigningRequestClient certificatesclient.CertificateSigningRequestInterface - // Template is the CertificateRequest that will be used as a template for - // generating certificate signing requests for all new keys generated as - // part of rotation. It follows the same rules as the template parameter of - // crypto.x509.CreateCertificateRequest in the Go standard libraries. - Template *x509.CertificateRequest - // Usages is the types of usages that certificates generated by the manager - // can be used for. - Usages []certificates.KeyUsage - // CertificateStore is a persistent store where the current cert/key is - // kept and future cert/key pairs will be persisted after they are - // generated. - CertificateStore Store - // BootstrapCertificatePEM is the certificate data that will be returned - // from the Manager if the CertificateStore doesn't have any cert/key pairs - // currently available and has not yet had a chance to get a new cert/key - // pair from the API. If the CertificateStore does have a cert/key pair, - // this will be ignored. If there is no cert/key pair available in the - // CertificateStore, as soon as Start is called, it will request a new - // cert/key pair from the CertificateSigningRequestClient. This is intended - // to allow the first boot of a component to be initialized using a - // generic, multi-use cert/key pair which will be quickly replaced with a - // unique cert/key pair. - BootstrapCertificatePEM []byte - // BootstrapKeyPEM is the key data that will be returned from the Manager - // if the CertificateStore doesn't have any cert/key pairs currently - // available. If the CertificateStore does have a cert/key pair, this will - // be ignored. If the bootstrap cert/key pair are used, they will be - // rotated at the first opportunity, possibly well in advance of expiring. - // This is intended to allow the first boot of a component to be - // initialized using a generic, multi-use cert/key pair which will be - // quickly replaced with a unique cert/key pair. - BootstrapKeyPEM []byte -} - -// Store is responsible for getting and updating the current certificate. -// Depending on the concrete implementation, the backing store for this -// behavior may vary. -type Store interface { - // Current returns the currently selected certificate, as well as the - // associated certificate and key data in PEM format. If the Store doesn't - // have a cert/key pair currently, it should return a NoCertKeyError so - // that the Manager can recover by using bootstrap certificates to request - // a new cert/key pair. - Current() (*tls.Certificate, error) - // Update accepts the PEM data for the cert/key pair and makes the new - // cert/key pair the 'current' pair, that will be returned by future calls - // to Current(). - Update(cert, key []byte) (*tls.Certificate, error) -} - -// NoCertKeyError indicates there is no cert/key currently available. -type NoCertKeyError string - -func (e *NoCertKeyError) Error() string { return string(*e) } - -type manager struct { - certSigningRequestClient certificatesclient.CertificateSigningRequestInterface - template *x509.CertificateRequest - usages []certificates.KeyUsage - certStore Store - certAccessLock sync.RWMutex - cert *tls.Certificate - rotationDeadline time.Time - forceRotation bool -} - -// NewManager returns a new certificate manager. A certificate manager is -// responsible for being the authoritative source of certificates in the -// Kubelet and handling updates due to rotation. -func NewManager(config *Config) (Manager, error) { - cert, forceRotation, err := getCurrentCertificateOrBootstrap( - config.CertificateStore, - config.BootstrapCertificatePEM, - config.BootstrapKeyPEM) - if err != nil { - return nil, err - } - - m := manager{ - certSigningRequestClient: config.CertificateSigningRequestClient, - template: config.Template, - usages: config.Usages, - certStore: config.CertificateStore, - cert: cert, - forceRotation: forceRotation, - } - - return &m, nil -} - -// Current returns the currently selected certificate from the certificate -// manager. This can be nil if the manager was initialized without a -// certificate and has not yet received one from the -// CertificateSigningRequestClient. -func (m *manager) Current() *tls.Certificate { - m.certAccessLock.RLock() - defer m.certAccessLock.RUnlock() - return m.cert -} - -// SetCertificateSigningRequestClient sets the client interface that is used -// for signing new certificates generated as part of rotation. It must be -// called before Start() and can not be used to change the -// CertificateSigningRequestClient that has already been set. This method is to -// support the one specific scenario where the CertificateSigningRequestClient -// uses the CertificateManager. -func (m *manager) SetCertificateSigningRequestClient(certSigningRequestClient certificatesclient.CertificateSigningRequestInterface) error { - if m.certSigningRequestClient == nil { - m.certSigningRequestClient = certSigningRequestClient - return nil - } - return fmt.Errorf("CertificateSigningRequestClient is already set.") -} - -// Start will start the background work of rotating the certificates. -func (m *manager) Start() { - // Certificate rotation depends on access to the API server certificate - // signing API, so don't start the certificate manager if we don't have a - // client. This will happen on the cluster master, where the kubelet is - // responsible for bootstrapping the pods of the master components. - if m.certSigningRequestClient == nil { - glog.V(2).Infof("Certificate rotation is not enabled, no connection to the apiserver.") - return - } - - glog.V(2).Infof("Certificate rotation is enabled.") - - m.setRotationDeadline() - - // Synchronously request a certificate before entering the background - // loop to allow bootstrap scenarios, where the certificate manager - // doesn't have a certificate at all yet. - if m.shouldRotate() { - glog.V(1).Infof("shouldRotate() is true, forcing immediate rotation") - _, err := m.rotateCerts() - if err != nil { - glog.Errorf("Could not rotate certificates: %v", err) - } - } - backoff := wait.Backoff{ - Duration: 2 * time.Second, - Factor: 2, - Jitter: 0.1, - Steps: 7, - } - go wait.Forever(func() { - sleepInterval := m.rotationDeadline.Sub(time.Now()) - glog.V(2).Infof("Waiting %v for next certificate rotation", sleepInterval) - time.Sleep(sleepInterval) - if err := wait.ExponentialBackoff(backoff, m.rotateCerts); err != nil { - glog.Errorf("Reached backoff limit, still unable to rotate certs: %v", err) - wait.PollInfinite(128*time.Second, m.rotateCerts) - } - }, 0) -} - -func getCurrentCertificateOrBootstrap( - store Store, - bootstrapCertificatePEM []byte, - bootstrapKeyPEM []byte) (cert *tls.Certificate, shouldRotate bool, errResult error) { - - currentCert, err := store.Current() - if err == nil { - return currentCert, false, nil - } - - if _, ok := err.(*NoCertKeyError); !ok { - return nil, false, err - } - - if bootstrapCertificatePEM == nil || bootstrapKeyPEM == nil { - return nil, true, nil - } - - bootstrapCert, err := tls.X509KeyPair(bootstrapCertificatePEM, bootstrapKeyPEM) - if err != nil { - return nil, false, err - } - if len(bootstrapCert.Certificate) < 1 { - return nil, false, fmt.Errorf("no cert/key data found") - } - - certs, err := x509.ParseCertificates(bootstrapCert.Certificate[0]) - if err != nil { - return nil, false, fmt.Errorf("unable to parse certificate data: %v", err) - } - bootstrapCert.Leaf = certs[0] - return &bootstrapCert, true, nil -} - -// shouldRotate looks at how close the current certificate is to expiring and -// decides if it is time to rotate or not. -func (m *manager) shouldRotate() bool { - m.certAccessLock.RLock() - defer m.certAccessLock.RUnlock() - if m.cert == nil { - return true - } - if m.forceRotation { - return true - } - return time.Now().After(m.rotationDeadline) -} - -func (m *manager) rotateCerts() (bool, error) { - glog.V(2).Infof("Rotating certificates") - - csrPEM, keyPEM, err := m.generateCSR() - if err != nil { - glog.Errorf("Unable to generate a certificate signing request: %v", err) - return false, nil - } - - // Call the Certificate Signing Request API to get a certificate for the - // new private key. - crtPEM, err := requestCertificate(m.certSigningRequestClient, csrPEM, m.usages) - if err != nil { - glog.Errorf("Failed while requesting a signed certificate from the master: %v", err) - return false, nil - } - - cert, err := m.certStore.Update(crtPEM, keyPEM) - if err != nil { - glog.Errorf("Unable to store the new cert/key pair: %v", err) - return false, nil - } - - m.updateCached(cert) - m.setRotationDeadline() - m.forceRotation = false - return true, nil -} - -// setRotationDeadline sets a cached value for the threshold at which the -// current certificate should be rotated, 80%+/-10% of the expiration of the -// certificate. -func (m *manager) setRotationDeadline() { - m.certAccessLock.RLock() - defer m.certAccessLock.RUnlock() - if m.cert == nil { - m.rotationDeadline = time.Now() - return - } - - notAfter := m.cert.Leaf.NotAfter - totalDuration := float64(notAfter.Sub(m.cert.Leaf.NotBefore)) - - // Use some jitter to set the rotation threshold so each node will rotate - // at approximately 70-90% of the total lifetime of the certificate. With - // jitter, if a number of nodes are added to a cluster at approximately the - // same time (such as cluster creation time), they won't all try to rotate - // certificates at the same time for the rest of the life of the cluster. - jitteryDuration := wait.Jitter(time.Duration(totalDuration), 0.2) - time.Duration(totalDuration*0.3) - - m.rotationDeadline = m.cert.Leaf.NotBefore.Add(jitteryDuration) - glog.V(2).Infof("Certificate rotation deadline is %v", m.rotationDeadline) -} - -func (m *manager) updateCached(cert *tls.Certificate) { - m.certAccessLock.Lock() - defer m.certAccessLock.Unlock() - m.cert = cert -} - -func (m *manager) generateCSR() (csrPEM []byte, keyPEM []byte, err error) { - // Generate a new private key. - privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) - if err != nil { - return nil, nil, fmt.Errorf("unable to generate a new private key: %v", err) - } - der, err := x509.MarshalECPrivateKey(privateKey) - if err != nil { - return nil, nil, fmt.Errorf("unable to marshal the new key to DER: %v", err) - } - - keyPEM = pem.EncodeToMemory(&pem.Block{Type: cert.ECPrivateKeyBlockType, Bytes: der}) - - csrPEM, err = cert.MakeCSRFromTemplate(privateKey, m.template) - if err != nil { - return nil, nil, fmt.Errorf("unable to create a csr from the private key: %v", err) - } - return csrPEM, keyPEM, nil -} - -// requestCertificate will create a certificate signing request using the PEM -// encoded CSR and send it to API server, then it will watch the object's -// status, once approved by API server, it will return the API server's issued -// certificate (pem-encoded). If there is any errors, or the watch timeouts, it -// will return an error. -// -// NOTE This is a copy of a function with the same name in -// k8s.io/kubernetes/pkg/kubelet/util/csr/csr.go, changing only the package that -// CertificateSigningRequestInterface and KeyUsage are imported from. -func requestCertificate(client certificatesclient.CertificateSigningRequestInterface, csrData []byte, usages []certificates.KeyUsage) (certData []byte, err error) { - glog.Infof("Requesting new certificate.") - req, err := client.Create(&certificates.CertificateSigningRequest{ - // Username, UID, Groups will be injected by API server. - TypeMeta: metav1.TypeMeta{Kind: "CertificateSigningRequest"}, - ObjectMeta: metav1.ObjectMeta{GenerateName: "csr-"}, - - Spec: certificates.CertificateSigningRequestSpec{ - Request: csrData, - Usages: usages, - }, - }) - if err != nil { - return nil, fmt.Errorf("cannot create certificate signing request: %v", err) - } - - // Make a default timeout = 3600s. - var defaultTimeoutSeconds int64 = 3600 - certWatch, err := client.Watch(metav1.ListOptions{ - Watch: true, - TimeoutSeconds: &defaultTimeoutSeconds, - FieldSelector: fields.OneTermEqualSelector("metadata.name", req.Name).String(), - }) - if err != nil { - return nil, fmt.Errorf("cannot watch on the certificate signing request: %v", err) - } - defer certWatch.Stop() - ch := certWatch.ResultChan() - - for { - event, ok := <-ch - if !ok { - break - } - - if event.Type == watch.Modified || event.Type == watch.Added { - if event.Object.(*certificates.CertificateSigningRequest).UID != req.UID { - continue - } - status := event.Object.(*certificates.CertificateSigningRequest).Status - for _, c := range status.Conditions { - if c.Type == certificates.CertificateDenied { - return nil, fmt.Errorf("certificate signing request is not approved, reason: %v, message: %v", c.Reason, c.Message) - } - if c.Type == certificates.CertificateApproved && status.Certificate != nil { - return status.Certificate, nil - } - } - } - } - - return nil, fmt.Errorf("watch channel closed") -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/kubelet.go b/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/kubelet.go index b74598681508..b8530074c44f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/kubelet.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/kubelet.go @@ -22,21 +22,25 @@ import ( "fmt" "net" + "github.com/prometheus/client_golang/prometheus" + certificates "k8s.io/api/certificates/v1beta1" "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" clientcertificates "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" + "k8s.io/client-go/util/certificate" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" + "k8s.io/kubernetes/pkg/kubelet/metrics" ) // NewKubeletServerCertificateManager creates a certificate manager for the kubelet when retrieving a server certificate // or returns an error. -func NewKubeletServerCertificateManager(kubeClient clientset.Interface, kubeCfg *kubeletconfig.KubeletConfiguration, nodeName types.NodeName, ips []net.IP, hostnames []string, certDirectory string) (Manager, error) { +func NewKubeletServerCertificateManager(kubeClient clientset.Interface, kubeCfg *kubeletconfig.KubeletConfiguration, nodeName types.NodeName, ips []net.IP, hostnames []string, certDirectory string) (certificate.Manager, error) { var certSigningRequestClient clientcertificates.CertificateSigningRequestInterface - if kubeClient != nil && kubeClient.Certificates() != nil { - certSigningRequestClient = kubeClient.Certificates().CertificateSigningRequests() + if kubeClient != nil && kubeClient.CertificatesV1beta1() != nil { + certSigningRequestClient = kubeClient.CertificatesV1beta1().CertificateSigningRequests() } - certificateStore, err := NewFileStore( + certificateStore, err := certificate.NewFileStore( "kubelet-server", certDirectory, certDirectory, @@ -45,7 +49,17 @@ func NewKubeletServerCertificateManager(kubeClient clientset.Interface, kubeCfg if err != nil { return nil, fmt.Errorf("failed to initialize server certificate store: %v", err) } - m, err := NewManager(&Config{ + var certificateExpiration = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: metrics.KubeletSubsystem, + Subsystem: "certificate_manager", + Name: "server_expiration_seconds", + Help: "Gauge of the lifetime of a certificate. The value is the date the certificate will expire in seconds since January 1, 1970 UTC.", + }, + ) + prometheus.MustRegister(certificateExpiration) + + m, err := certificate.NewManager(&certificate.Config{ CertificateSigningRequestClient: certSigningRequestClient, Template: &x509.CertificateRequest{ Subject: pkix.Name{ @@ -69,7 +83,8 @@ func NewKubeletServerCertificateManager(kubeClient clientset.Interface, kubeCfg // authenticate itself to a TLS client. certificates.UsageServerAuth, }, - CertificateStore: certificateStore, + CertificateStore: certificateStore, + CertificateExpiration: certificateExpiration, }) if err != nil { return nil, fmt.Errorf("failed to initialize server certificate manager: %v", err) @@ -81,8 +96,8 @@ func NewKubeletServerCertificateManager(kubeClient clientset.Interface, kubeCfg // client that can be used to sign new certificates (or rotate). It answers with // whatever certificate it is initialized with. If a CSR client is set later, it // may begin rotating/renewing the client cert -func NewKubeletClientCertificateManager(certDirectory string, nodeName types.NodeName, certData []byte, keyData []byte, certFile string, keyFile string) (Manager, error) { - certificateStore, err := NewFileStore( +func NewKubeletClientCertificateManager(certDirectory string, nodeName types.NodeName, certData []byte, keyData []byte, certFile string, keyFile string) (certificate.Manager, error) { + certificateStore, err := certificate.NewFileStore( "kubelet-client", certDirectory, certDirectory, @@ -91,7 +106,17 @@ func NewKubeletClientCertificateManager(certDirectory string, nodeName types.Nod if err != nil { return nil, fmt.Errorf("failed to initialize client certificate store: %v", err) } - m, err := NewManager(&Config{ + var certificateExpiration = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: metrics.KubeletSubsystem, + Subsystem: "certificate_manager", + Name: "client_expiration_seconds", + Help: "Gauge of the lifetime of a certificate. The value is the date the certificate will expire in seconds since January 1, 1970 UTC.", + }, + ) + prometheus.MustRegister(certificateExpiration) + + m, err := certificate.NewManager(&certificate.Config{ Template: &x509.CertificateRequest{ Subject: pkix.Name{ CommonName: fmt.Sprintf("system:node:%s", nodeName), @@ -116,6 +141,7 @@ func NewKubeletClientCertificateManager(certDirectory string, nodeName types.Nod CertificateStore: certificateStore, BootstrapCertificatePEM: certData, BootstrapKeyPEM: keyData, + CertificateExpiration: certificateExpiration, }) if err != nil { return nil, fmt.Errorf("failed to initialize client certificate manager: %v", err) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/transport.go b/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/transport.go index bb472039f3fb..1683ad09735a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/transport.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/certificate/transport.go @@ -30,6 +30,7 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" restclient "k8s.io/client-go/rest" + "k8s.io/client-go/util/certificate" ) // UpdateTransport instruments a restconfig with a transport that dynamically uses @@ -44,13 +45,13 @@ import ( // // stopCh should be used to indicate when the transport is unused and doesn't need // to continue checking the manager. -func UpdateTransport(stopCh <-chan struct{}, clientConfig *restclient.Config, clientCertificateManager Manager) error { - return updateTransport(stopCh, 10*time.Second, clientConfig, clientCertificateManager) +func UpdateTransport(stopCh <-chan struct{}, clientConfig *restclient.Config, clientCertificateManager certificate.Manager, exitIfExpired bool) error { + return updateTransport(stopCh, 10*time.Second, clientConfig, clientCertificateManager, exitIfExpired) } // updateTransport is an internal method that exposes how often this method checks that the // client cert has changed. Intended for testing. -func updateTransport(stopCh <-chan struct{}, period time.Duration, clientConfig *restclient.Config, clientCertificateManager Manager) error { +func updateTransport(stopCh <-chan struct{}, period time.Duration, clientConfig *restclient.Config, clientCertificateManager certificate.Manager, exitIfExpired bool) error { if clientConfig.Transport != nil { return fmt.Errorf("there is already a transport configured") } @@ -79,6 +80,13 @@ func updateTransport(stopCh <-chan struct{}, period time.Duration, clientConfig lastCert := clientCertificateManager.Current() go wait.Until(func() { curr := clientCertificateManager.Current() + if exitIfExpired && curr != nil && time.Now().After(curr.Leaf.NotAfter) { + if clientCertificateManager.ServerHealthy() { + glog.Fatalf("The currently active client certificate has expired and the server is responsive, exiting.") + } else { + glog.Errorf("The currently active client certificate has expired, but the server is not responsive. A restart may be necessary to retrieve new initial credentials.") + } + } if curr == nil || lastCert == curr { // Cert hasn't been rotated. return diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/checkpoint/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/checkpoint/BUILD new file mode 100644 index 000000000000..c8035650883e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/checkpoint/BUILD @@ -0,0 +1,42 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["checkpoint.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/checkpoint", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/volume/util:go_default_library", + "//vendor/github.com/dchest/safefile:go_default_library", + "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["checkpoint_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/checkpoint", + library = ":go_default_library", + deps = [ + "//pkg/apis/core:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/checkpoint/checkpoint.go b/vendor/k8s.io/kubernetes/pkg/kubelet/checkpoint/checkpoint.go new file mode 100644 index 000000000000..d4b30f902d95 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/checkpoint/checkpoint.go @@ -0,0 +1,151 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package checkpoint + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/dchest/safefile" + "github.com/ghodss/yaml" + "github.com/golang/glog" + + "k8s.io/api/core/v1" + "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/volume/util" +) + +const ( + // Delimiter used on checkpoints written to disk + delimiter = "_" + podPrefix = "Pod" +) + +// Manager is the interface used to manage checkpoints +// which involves writing resources to disk to recover +// during restart or failure scenarios. +// https://github.com/kubernetes/community/pull/1241/files +type Manager interface { + // LoadPods will load checkpointed Pods from disk + LoadPods() ([]*v1.Pod, error) + + // WritePod will serialize a Pod to disk + WritePod(pod *v1.Pod) error + + // Deletes the checkpoint of the given pod from disk + DeletePod(pod *v1.Pod) error +} + +var instance Manager +var mutex = &sync.Mutex{} + +// fileCheckPointManager - is a checkpointer that writes contents to disk +// The type information of the resource objects are encoded in the name +type fileCheckPointManager struct { + path string +} + +// NewCheckpointManager will create a Manager that points to the following path +func NewCheckpointManager(path string) Manager { + // NOTE: This is a precaution; current implementation should not run + // multiple checkpoint managers. + mutex.Lock() + defer mutex.Unlock() + instance = &fileCheckPointManager{path: path} + return instance +} + +// GetInstance will return the current Manager, there should be only one. +func GetInstance() Manager { + mutex.Lock() + defer mutex.Unlock() + return instance +} + +// loadPod will load Pod Checkpoint yaml file. +func (fcp *fileCheckPointManager) loadPod(file string) (*v1.Pod, error) { + return util.LoadPodFromFile(file) +} + +// checkAnnotations will validate the checkpoint annotations exist on the Pod +func (fcp *fileCheckPointManager) checkAnnotations(pod *v1.Pod) bool { + if podAnnotations := pod.GetAnnotations(); podAnnotations != nil { + if podAnnotations[core.BootstrapCheckpointAnnotationKey] == "true" { + return true + } + } + return false +} + +// getPodPath returns the full qualified path for the pod checkpoint +func (fcp *fileCheckPointManager) getPodPath(pod *v1.Pod) string { + return fmt.Sprintf("%v/Pod%v%v.yaml", fcp.path, delimiter, pod.GetUID()) +} + +// LoadPods Loads All Checkpoints from disk +func (fcp *fileCheckPointManager) LoadPods() ([]*v1.Pod, error) { + checkpoints := make([]*v1.Pod, 0) + files, err := ioutil.ReadDir(fcp.path) + if err != nil { + return nil, err + } + for _, f := range files { + // get just the filename + _, fname := filepath.Split(f.Name()) + // Get just the Resource from "Resource_Name" + fnfields := strings.Split(fname, delimiter) + switch fnfields[0] { + case podPrefix: + pod, err := fcp.loadPod(fmt.Sprintf("%s/%s", fcp.path, f.Name())) + if err != nil { + return nil, err + } + checkpoints = append(checkpoints, pod) + default: + glog.Warningf("Unsupported checkpoint file detected %v", f) + } + } + return checkpoints, nil +} + +// Writes a checkpoint to a file on disk if annotation is present +func (fcp *fileCheckPointManager) WritePod(pod *v1.Pod) error { + var err error + if fcp.checkAnnotations(pod) { + if blob, err := yaml.Marshal(pod); err == nil { + err = safefile.WriteFile(fcp.getPodPath(pod), blob, 0644) + } + } else { + // This is to handle an edge where a pod update could remove + // an annotation and the checkpoint should then be removed. + err = fcp.DeletePod(pod) + } + return err +} + +// Deletes a checkpoint from disk if present +func (fcp *fileCheckPointManager) DeletePod(pod *v1.Pod) error { + podPath := fcp.getPodPath(pod) + if err := os.Remove(podPath); !os.IsNotExist(err) { + return err + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/client/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/client/BUILD index 8a2ff7041dc8..3bf79dc1f088 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/client/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/client/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["kubelet_client.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/client", deps = [ "//pkg/util/node:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -24,6 +25,7 @@ go_test( name = "go_default_test", srcs = ["kubelet_client_test.go"], data = ["//pkg/client/testdata"], + importpath = "k8s.io/kubernetes/pkg/kubelet/client", library = ":go_default_library", deps = [ "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/BUILD index 187cc3a0ecc2..127192c637dc 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/BUILD @@ -7,8 +7,6 @@ go_library( "container_manager.go", "container_manager_stub.go", "container_manager_unsupported.go", - "device_plugin_handler.go", - "device_plugin_handler_stub.go", "fake_internal_container_lifecycle.go", "helpers_unsupported.go", "internal_container_lifecycle.go", @@ -29,32 +27,32 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/kubelet/cm", visibility = ["//visibility:public"], deps = [ "//pkg/features:go_default_library", "//pkg/kubelet/apis/cri:go_default_library", - "//pkg/kubelet/apis/deviceplugin/v1alpha1:go_default_library", - "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/cadvisor:go_default_library", "//pkg/kubelet/cm/cpumanager:go_default_library", + "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/container:go_default_library", - "//pkg/kubelet/deviceplugin:go_default_library", "//pkg/kubelet/eviction/api:go_default_library", + "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/status:go_default_library", "//pkg/util/mount:go_default_library", + "//plugin/pkg/scheduler/schedulercache:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ - "//pkg/api:go_default_library", - "//pkg/api/v1/helper:go_default_library", - "//pkg/api/v1/helper/qos:go_default_library", "//pkg/api/v1/resource:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/apis/core/v1/helper/qos:go_default_library", + "//pkg/kubelet/cm/deviceplugin:go_default_library", "//pkg/kubelet/cm/util:go_default_library", "//pkg/kubelet/events:go_default_library", "//pkg/kubelet/metrics:go_default_library", @@ -69,6 +67,7 @@ go_library( "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", ], @@ -80,7 +79,6 @@ go_test( name = "go_default_test", srcs = [ "container_manager_unsupported_test.go", - "device_plugin_handler_test.go", ] + select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ "cgroup_manager_linux_test.go", @@ -91,21 +89,17 @@ go_test( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/kubelet/cm", library = ":go_default_library", deps = [ - "//pkg/kubelet/apis/deviceplugin/v1alpha1:go_default_library", "//pkg/util/mount:go_default_library", - "//vendor/github.com/stretchr/testify/assert:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ - "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/eviction/api:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", ], "//conditions:default": [], }), @@ -124,6 +118,7 @@ filegroup( ":package-srcs", "//pkg/kubelet/cm/cpumanager:all-srcs", "//pkg/kubelet/cm/cpuset:all-srcs", + "//pkg/kubelet/cm/deviceplugin:all-srcs", "//pkg/kubelet/cm/util:all-srcs", ], tags = ["automanaged"], diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go index 62894e5a3dde..d1d99713429c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go @@ -45,6 +45,8 @@ const ( libcontainerCgroupfs libcontainerCgroupManagerType = "cgroupfs" // libcontainerSystemd means use libcontainer with systemd libcontainerSystemd libcontainerCgroupManagerType = "systemd" + // systemdSuffix is the cgroup name suffix for systemd + systemdSuffix string = ".slice" ) // hugePageSizeList is useful for converting to the hugetlb canonical unit @@ -68,8 +70,8 @@ func ConvertCgroupNameToSystemd(cgroupName CgroupName, outputToCgroupFs bool) st } // detect if we are given a systemd style name. // if so, we do not want to do double encoding. - if strings.HasSuffix(part, ".slice") { - part = strings.TrimSuffix(part, ".slice") + if IsSystemdStyleName(part) { + part = strings.TrimSuffix(part, systemdSuffix) separatorIndex := strings.LastIndex(part, "-") if separatorIndex >= 0 && separatorIndex < len(part) { part = part[separatorIndex+1:] @@ -87,8 +89,8 @@ func ConvertCgroupNameToSystemd(cgroupName CgroupName, outputToCgroupFs bool) st result = "-" } // always have a .slice suffix - if !strings.HasSuffix(result, ".slice") { - result = result + ".slice" + if !IsSystemdStyleName(result) { + result = result + systemdSuffix } // if the caller desired the result in cgroupfs format... @@ -114,6 +116,13 @@ func ConvertCgroupFsNameToSystemd(cgroupfsName string) (string, error) { return path.Base(cgroupfsName), nil } +func IsSystemdStyleName(name string) bool { + if strings.HasSuffix(name, systemdSuffix) { + return true + } + return false +} + // libcontainerAdapter provides a simplified interface to libcontainer based on libcontainer type. type libcontainerAdapter struct { // cgroupManagerType defines how to interface with libcontainer @@ -151,15 +160,18 @@ func (l *libcontainerAdapter) revertName(name string) CgroupName { if l.cgroupManagerType != libcontainerSystemd { return CgroupName(name) } + return CgroupName(RevertFromSystemdToCgroupStyleName(name)) +} +func RevertFromSystemdToCgroupStyleName(name string) string { driverName, err := ConvertCgroupFsNameToSystemd(name) if err != nil { panic(err) } - driverName = strings.TrimSuffix(driverName, ".slice") + driverName = strings.TrimSuffix(driverName, systemdSuffix) driverName = strings.Replace(driverName, "-", "/", -1) driverName = strings.Replace(driverName, "_", "-", -1) - return CgroupName(driverName) + return driverName } // adaptName converts a CgroupName identifier to a driver specific conversion value. diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_unsupported.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_unsupported.go index 6a567e94b156..34345985cf98 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_unsupported.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_unsupported.go @@ -77,3 +77,11 @@ func ConvertCgroupFsNameToSystemd(cgroupfsName string) (string, error) { func ConvertCgroupNameToSystemd(cgroupName CgroupName, outputToCgroupFs bool) string { return "" } + +func RevertFromSystemdToCgroupStyleName(name string) string { + return "" +} + +func IsSystemdStyleName(name string) bool { + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go index 36039bdbc0d3..dfdcf8d82c84 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go @@ -23,10 +23,12 @@ import ( // TODO: Migrate kubelet to either use its own internal objects or client library. "k8s.io/api/core/v1" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" - "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" + "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/status" + "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" "fmt" "strconv" @@ -40,16 +42,16 @@ type ContainerManager interface { // Runs the container manager's housekeeping. // - Ensures that the Docker daemon is in a container. // - Creates the system container where all non-containerized processes run. - Start(*v1.Node, ActivePodsFunc, status.PodStatusProvider, internalapi.RuntimeService) error + Start(*v1.Node, ActivePodsFunc, config.SourcesReady, status.PodStatusProvider, internalapi.RuntimeService) error - // Returns resources allocated to system cgroups in the machine. + // SystemCgroupsLimit returns resources allocated to system cgroups in the machine. // These cgroups include the system and Kubernetes services. SystemCgroupsLimit() v1.ResourceList - // Returns a NodeConfig that is being used by the container manager. + // GetNodeConfig returns a NodeConfig that is being used by the container manager. GetNodeConfig() NodeConfig - // Returns internal Status. + // Status returns internal Status. Status() Status // NewPodContainerManager is a factory method which returns a podContainerManager object @@ -62,19 +64,30 @@ type ContainerManager interface { // GetQOSContainersInfo returns the names of top level QoS containers GetQOSContainersInfo() QOSContainersInfo - // GetNodeAllocatable returns the amount of compute resources that have to be reserved from scheduling. + // GetNodeAllocatableReservation returns the amount of compute resources that have to be reserved from scheduling. GetNodeAllocatableReservation() v1.ResourceList // GetCapacity returns the amount of compute resources tracked by container manager available on the node. GetCapacity() v1.ResourceList + // GetDevicePluginResourceCapacity returns the amount of device plugin resources available on the node + // and inactive device plugin resources previously registered on the node. + GetDevicePluginResourceCapacity() (v1.ResourceList, []string) + // UpdateQOSCgroups performs housekeeping updates to ensure that the top // level QoS containers have their desired state in a thread-safe way UpdateQOSCgroups() error - // Returns RunContainerOptions with devices, mounts, and env fields populated for + // GetResources returns RunContainerOptions with devices, mounts, and env fields populated for // extended resources required by container. - GetResources(pod *v1.Pod, container *v1.Container, activePods []*v1.Pod) (*kubecontainer.RunContainerOptions, error) + GetResources(pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) + + // UpdatePluginResources calls Allocate of device plugin handler for potential + // requests for device plugin resources, and returns an error if fails. + // Otherwise, it updates allocatableResource in nodeInfo if necessary, + // to make sure it is at least equal to the pod's requested capacity for + // any registered device plugin resource + UpdatePluginResources(*schedulercache.NodeInfo, *lifecycle.PodAdmitAttributes) error InternalContainerLifecycle() InternalContainerLifecycle } @@ -87,6 +100,7 @@ type NodeConfig struct { CgroupsPerQOS bool CgroupRoot string CgroupDriver string + KubeletRootDir string ProtectKernelDefaults bool NodeAllocatableConfig ExperimentalQOSReserved map[v1.ResourceName]int64 @@ -135,7 +149,7 @@ func parsePercentage(v string) (int64, error) { } // ParseQOSReserved parses the --qos-reserve-requests option -func ParseQOSReserved(m kubeletconfig.ConfigurationMap) (*map[v1.ResourceName]int64, error) { +func ParseQOSReserved(m map[string]string) (*map[v1.ResourceName]int64, error) { reservations := make(map[v1.ResourceName]int64) for k, v := range m { switch v1.ResourceName(k) { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go index fea87a2da887..6c6c7068172d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go @@ -19,13 +19,13 @@ limitations under the License. package cm import ( - "bufio" + "bytes" "fmt" "io/ioutil" "os" - "os/exec" "path" "strconv" + "strings" "sync" "time" @@ -45,8 +45,11 @@ import ( internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" + "k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin" cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util" + "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/kubelet/status" utilfile "k8s.io/kubernetes/pkg/util/file" @@ -55,6 +58,7 @@ import ( "k8s.io/kubernetes/pkg/util/procfs" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" utilversion "k8s.io/kubernetes/pkg/util/version" + "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) const ( @@ -110,7 +114,7 @@ type containerManagerImpl struct { qosContainers QOSContainersInfo // Tasks that are run periodically periodicTasks []func() - // holds all the mounted cgroup subsystems + // Holds all the mounted cgroup subsystems subsystems *CgroupSubsystems nodeInfo *v1.Node // Interface for cgroup management @@ -125,7 +129,7 @@ type containerManagerImpl struct { // Interface for QoS cgroup management qosContainerManager QOSContainerManager // Interface for exporting and allocating devices reported by device plugins. - devicePluginHandler DevicePluginHandler + devicePluginManager deviceplugin.Manager // Interface for CPU affinity management. cpuManager cpumanager.Manager } @@ -197,31 +201,17 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I } // Check whether swap is enabled. The Kubelet does not support running with swap enabled. - cmd := exec.Command("cat", "/proc/swaps") - stdout, err := cmd.StdoutPipe() + swapData, err := ioutil.ReadFile("/proc/swaps") if err != nil { return nil, err } - if err := cmd.Start(); err != nil { - return nil, err - } - var buf []string - scanner := bufio.NewScanner(stdout) - for scanner.Scan() { // Splits on newlines by default - buf = append(buf, scanner.Text()) - } - if err := cmd.Wait(); err != nil { // Clean up - return nil, err - } + swapData = bytes.TrimSpace(swapData) // extra trailing \n + swapLines := strings.Split(string(swapData), "\n") - // Running with swap enabled should be considered an error, but in order to maintain legacy - // behavior we have to require an opt-in to this error for a period of time. // If there is more than one line (table headers) in /proc/swaps, swap is enabled and we should // error out unless --fail-swap-on is set to false. - if len(buf) > 1 { - if failSwapOn { - return nil, fmt.Errorf("Running with swap on is not supported, please disable swap! or set --fail-swap-on flag to false. /proc/swaps contained: %v", buf) - } + if failSwapOn && len(swapLines) > 1 { + return nil, fmt.Errorf("Running with swap on is not supported, please disable swap! or set --fail-swap-on flag to false. /proc/swaps contained: %v", swapLines) } var capacity = v1.ResourceList{} // It is safe to invoke `MachineInfo` on cAdvisor before logically initializing cAdvisor here because @@ -250,13 +240,13 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I return nil, fmt.Errorf("invalid configuration: cgroup-root %q doesn't exist: %v", cgroupRoot, err) } glog.Infof("container manager verified user specified cgroup-root exists: %v", cgroupRoot) - // Include the the top level cgroup for enforcing node allocatable into cgroup-root. + // Include the top level cgroup for enforcing node allocatable into cgroup-root. // This way, all sub modules can avoid having to understand the concept of node allocatable. cgroupRoot = path.Join(cgroupRoot, defaultNodeAllocatableCgroupName) } glog.Infof("Creating Container Manager object based on Node Config: %+v", nodeConfig) - qosContainerManager, err := NewQOSContainerManager(subsystems, cgroupRoot, nodeConfig) + qosContainerManager, err := NewQOSContainerManager(subsystems, cgroupRoot, nodeConfig, cgroupManager) if err != nil { return nil, err } @@ -273,23 +263,11 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I qosContainerManager: qosContainerManager, } - updateDeviceCapacityFunc := func(updates v1.ResourceList) { - cm.Lock() - defer cm.Unlock() - for k, v := range updates { - if v.Value() <= 0 { - delete(cm.capacity, k) - } else { - cm.capacity[k] = v - } - } - } - - glog.Infof("Creating device plugin handler: %t", devicePluginEnabled) + glog.Infof("Creating device plugin manager: %t", devicePluginEnabled) if devicePluginEnabled { - cm.devicePluginHandler, err = NewDevicePluginHandlerImpl(updateDeviceCapacityFunc) + cm.devicePluginManager, err = deviceplugin.NewManagerImpl() } else { - cm.devicePluginHandler, err = NewDevicePluginHandlerStub() + cm.devicePluginManager, err = deviceplugin.NewManagerStub() } if err != nil { return nil, err @@ -302,6 +280,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I nodeConfig.ExperimentalCPUManagerReconcilePeriod, machineInfo, cm.GetNodeAllocatableReservation(), + nodeConfig.KubeletRootDir, ) if err != nil { glog.Errorf("failed to initialize cpu manager: %v", err) @@ -540,6 +519,7 @@ func (cm *containerManagerImpl) Status() Status { func (cm *containerManagerImpl) Start(node *v1.Node, activePods ActivePodsFunc, + sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, runtimeService internalapi.RuntimeService) error { @@ -607,7 +587,7 @@ func (cm *containerManagerImpl) Start(node *v1.Node, }, time.Second, stopChan) // Starts device plugin manager. - if err := cm.devicePluginHandler.Start(); err != nil { + if err := cm.devicePluginManager.Start(deviceplugin.ActivePodsFunc(activePods), sourcesReady); err != nil { return err } return nil @@ -628,75 +608,24 @@ func (cm *containerManagerImpl) setFsCapacity() error { } // TODO: move the GetResources logic to PodContainerManager. -func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Container, activePods []*v1.Pod) (*kubecontainer.RunContainerOptions, error) { +func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) { opts := &kubecontainer.RunContainerOptions{} - // Gets devices, mounts, and envs from device plugin handler. - glog.V(3).Infof("Calling devicePluginHandler AllocateDevices") - // Maps to detect duplicate settings. - devsMap := make(map[string]string) - mountsMap := make(map[string]string) - envsMap := make(map[string]string) - allocResps, err := cm.devicePluginHandler.Allocate(pod, container, activePods) - if err != nil { - return opts, err - } - // Loops through AllocationResponses of all required extended resources. - for _, resp := range allocResps { - // Loops through runtime spec of all devices of the given resource. - for _, devRuntime := range resp.Spec { - // Updates RunContainerOptions.Devices. - for _, dev := range devRuntime.Devices { - if d, ok := devsMap[dev.ContainerPath]; ok { - glog.V(3).Infof("skip existing device %s %s", dev.ContainerPath, dev.HostPath) - if d != dev.HostPath { - glog.Errorf("Container device %s has conflicting mapping host devices: %s and %s", - dev.ContainerPath, d, dev.HostPath) - } - continue - } - devsMap[dev.ContainerPath] = dev.HostPath - opts.Devices = append(opts.Devices, kubecontainer.DeviceInfo{ - PathOnHost: dev.HostPath, - PathInContainer: dev.ContainerPath, - Permissions: dev.Permissions, - }) - } - // Updates RunContainerOptions.Mounts. - for _, mount := range devRuntime.Mounts { - if m, ok := mountsMap[mount.ContainerPath]; ok { - glog.V(3).Infof("skip existing mount %s %s", mount.ContainerPath, mount.HostPath) - if m != mount.HostPath { - glog.Errorf("Container mount %s has conflicting mapping host mounts: %s and %s", - mount.ContainerPath, m, mount.HostPath) - } - continue - } - mountsMap[mount.ContainerPath] = mount.HostPath - opts.Mounts = append(opts.Mounts, kubecontainer.Mount{ - Name: mount.ContainerPath, - ContainerPath: mount.ContainerPath, - HostPath: mount.HostPath, - ReadOnly: mount.ReadOnly, - SELinuxRelabel: false, - }) - } - // Updates RunContainerOptions.Envs. - for k, v := range devRuntime.Envs { - if e, ok := envsMap[k]; ok { - glog.V(3).Infof("skip existing envs %s %s", k, v) - if e != v { - glog.Errorf("Environment variable %s has conflicting setting: %s and %s", k, e, v) - } - continue - } - envsMap[k] = v - opts.Envs = append(opts.Envs, kubecontainer.EnvVar{Name: k, Value: v}) - } - } - } + // Allocate should already be called during predicateAdmitHandler.Admit(), + // just try to fetch device runtime information from cached state here + devOpts := cm.devicePluginManager.GetDeviceRunContainerOptions(pod, container) + if devOpts == nil { + return opts, nil + } + opts.Devices = append(opts.Devices, devOpts.Devices...) + opts.Mounts = append(opts.Mounts, devOpts.Mounts...) + opts.Envs = append(opts.Envs, devOpts.Envs...) return opts, nil } +func (cm *containerManagerImpl) UpdatePluginResources(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { + return cm.devicePluginManager.Allocate(node, attrs) +} + func (cm *containerManagerImpl) SystemCgroupsLimit() v1.ResourceList { cpuLimit := int64(0) @@ -957,3 +886,7 @@ func (cm *containerManagerImpl) GetCapacity() v1.ResourceList { defer cm.RUnlock() return cm.capacity } + +func (cm *containerManagerImpl) GetDevicePluginResourceCapacity() (v1.ResourceList, []string) { + return cm.devicePluginManager.GetCapacity() +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_stub.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_stub.go index ee5d7c73bf6f..27a86849582e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_stub.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_stub.go @@ -22,15 +22,18 @@ import ( internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" + "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/status" + "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) type containerManagerStub struct{} var _ ContainerManager = &containerManagerStub{} -func (cm *containerManagerStub) Start(_ *v1.Node, _ ActivePodsFunc, _ status.PodStatusProvider, _ internalapi.RuntimeService) error { +func (cm *containerManagerStub) Start(_ *v1.Node, _ ActivePodsFunc, _ config.SourcesReady, _ status.PodStatusProvider, _ internalapi.RuntimeService) error { glog.V(2).Infof("Starting stub container manager") return nil } @@ -67,14 +70,22 @@ func (cm *containerManagerStub) GetCapacity() v1.ResourceList { return nil } +func (cm *containerManagerStub) GetDevicePluginResourceCapacity() (v1.ResourceList, []string) { + return nil, []string{} +} + func (cm *containerManagerStub) NewPodContainerManager() PodContainerManager { return &podContainerManagerStub{} } -func (cm *containerManagerStub) GetResources(pod *v1.Pod, container *v1.Container, activePods []*v1.Pod) (*kubecontainer.RunContainerOptions, error) { +func (cm *containerManagerStub) GetResources(pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) { return &kubecontainer.RunContainerOptions{}, nil } +func (cm *containerManagerStub) UpdatePluginResources(*schedulercache.NodeInfo, *lifecycle.PodAdmitAttributes) error { + return nil +} + func (cm *containerManagerStub) InternalContainerLifecycle() InternalContainerLifecycle { return &internalContainerLifecycleImpl{cpumanager.NewFakeManager()} } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_unsupported.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_unsupported.go index 0602bb5de977..6453397b1774 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_unsupported.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_unsupported.go @@ -26,9 +26,12 @@ import ( internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" + "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) type unsupportedContainerManager struct { @@ -36,7 +39,7 @@ type unsupportedContainerManager struct { var _ ContainerManager = &unsupportedContainerManager{} -func (unsupportedContainerManager) Start(_ *v1.Node, _ ActivePodsFunc, _ status.PodStatusProvider, _ internalapi.RuntimeService) error { +func (unsupportedContainerManager) Start(_ *v1.Node, _ ActivePodsFunc, _ config.SourcesReady, _ status.PodStatusProvider, _ internalapi.RuntimeService) error { return fmt.Errorf("Container Manager is unsupported in this build") } @@ -72,14 +75,22 @@ func (cm *unsupportedContainerManager) GetCapacity() v1.ResourceList { return nil } +func (cm *unsupportedContainerManager) GetDevicePluginResourceCapacity() (v1.ResourceList, []string) { + return nil, []string{} +} + func (cm *unsupportedContainerManager) NewPodContainerManager() PodContainerManager { return &unsupportedPodContainerManager{} } -func (cm *unsupportedContainerManager) GetResources(pod *v1.Pod, container *v1.Container, activePods []*v1.Pod) (*kubecontainer.RunContainerOptions, error) { +func (cm *unsupportedContainerManager) GetResources(pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) { return &kubecontainer.RunContainerOptions{}, nil } +func (cm *unsupportedContainerManager) UpdatePluginResources(*schedulercache.NodeInfo, *lifecycle.PodAdmitAttributes) error { + return nil +} + func (cm *unsupportedContainerManager) InternalContainerLifecycle() InternalContainerLifecycle { return &internalContainerLifecycleImpl{cpumanager.NewFakeManager()} } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go index 8102ceca2dd8..56bdbb9d3636 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go @@ -25,6 +25,7 @@ import ( "k8s.io/client-go/tools/record" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cadvisor" + "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/util/mount" ) @@ -35,7 +36,7 @@ type containerManagerImpl struct { var _ ContainerManager = &containerManagerImpl{} -func (cm *containerManagerImpl) Start(_ *v1.Node, _ ActivePodsFunc, _ status.PodStatusProvider, _ internalapi.RuntimeService) error { +func (cm *containerManagerImpl) Start(_ *v1.Node, _ ActivePodsFunc, _ config.SourcesReady, _ status.PodStatusProvider, _ internalapi.RuntimeService) error { glog.V(2).Infof("Starting Windows stub container manager") return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/BUILD index 0e46de793c63..f76a6a9312ef 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/BUILD @@ -10,9 +10,10 @@ go_library( "policy_none.go", "policy_static.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager", visibility = ["//visibility:public"], deps = [ - "//pkg/api/v1/helper/qos:go_default_library", + "//pkg/apis/core/v1/helper/qos:go_default_library", "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", "//pkg/kubelet/cm/cpumanager/state:go_default_library", "//pkg/kubelet/cm/cpumanager/topology:go_default_library", @@ -35,12 +36,14 @@ go_test( "policy_static_test.go", "policy_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager", library = ":go_default_library", deps = [ "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", "//pkg/kubelet/cm/cpumanager/state:go_default_library", "//pkg/kubelet/cm/cpumanager/topology:go_default_library", "//pkg/kubelet/cm/cpuset:go_default_library", + "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go index aade9f926677..9dc6b98b4e76 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go @@ -33,6 +33,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/status" + "path" ) // ActivePodsFunc is a function that returns a list of pods to reconcile. @@ -44,6 +45,9 @@ type runtimeService interface { type policyName string +// CPUManagerStateFileName is the name file name where cpu manager stores it's state +const CPUManagerStateFileName = "cpu_manager_state" + // Manager interface provides methods for Kubelet to manage pod cpus. type Manager interface { // Start is called during Kubelet initialization. @@ -99,6 +103,7 @@ func NewManager( reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, + stateFileDirecory string, ) (Manager, error) { var policy Policy @@ -140,10 +145,14 @@ func NewManager( policy = NewNonePolicy() } + stateImpl := state.NewFileState( + path.Join(stateFileDirecory, CPUManagerStateFileName), + policy.Name()) + manager := &manager{ policy: policy, reconcilePeriod: reconcilePeriod, - state: state.NewMemoryState(), + state: stateImpl, machineInfo: machineInfo, nodeAllocatableReservation: nodeAllocatableReservation, } @@ -176,11 +185,16 @@ func (m *manager) AddContainer(p *v1.Pod, c *v1.Container, containerID string) e cpus := m.state.GetCPUSetOrDefault(containerID) m.Unlock() - err = m.updateContainerCPUSet(containerID, cpus) - if err != nil { - glog.Errorf("[cpumanager] AddContainer error: %v", err) - return err + if !cpus.IsEmpty() { + err = m.updateContainerCPUSet(containerID, cpus) + if err != nil { + glog.Errorf("[cpumanager] AddContainer error: %v", err) + return err + } + } else { + glog.V(5).Infof("[cpumanager] update container resources is skipped due to cpu set is empty") } + return nil } @@ -236,7 +250,7 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec continue } - glog.Infof("[cpumanager] reconcileState: updating container (pod: %s, container: %s, container id: %s, cpuset: \"%v\")", pod.Name, container.Name, containerID, cset) + glog.V(4).Infof("[cpumanager] reconcileState: updating container (pod: %s, container: %s, container id: %s, cpuset: \"%v\")", pod.Name, container.Name, containerID, cset) err = m.updateContainerCPUSet(containerID, cset) if err != nil { glog.Errorf("[cpumanager] reconcileState: failed to update container (pod: %s, container: %s, container id: %s, cpuset: \"%v\", error: %v)", pod.Name, container.Name, containerID, cset, err) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go index 1eca50b91f45..dfbb0a297d06 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go @@ -21,7 +21,7 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" - v1qos "k8s.io/kubernetes/pkg/api/v1/helper/qos" + v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" @@ -109,9 +109,45 @@ func (p *staticPolicy) Name() string { } func (p *staticPolicy) Start(s state.State) { - // Configure the shared pool to include all detected CPU IDs. - allCPUs := p.topology.CPUDetails.CPUs() - s.SetDefaultCPUSet(allCPUs) + if err := p.validateState(s); err != nil { + glog.Errorf("[cpumanager] static policy invalid state: %s\n", err.Error()) + panic("[cpumanager] - please drain node and remove policy state file") + } +} + +func (p *staticPolicy) validateState(s state.State) error { + tmpAssignments := s.GetCPUAssignments() + tmpDefaultCPUset := s.GetDefaultCPUSet() + + // Default cpuset cannot be empty when assignments exist + if tmpDefaultCPUset.IsEmpty() { + if len(tmpAssignments) != 0 { + return fmt.Errorf("default cpuset cannot be empty") + } + // state is empty initialize + allCPUs := p.topology.CPUDetails.CPUs() + s.SetDefaultCPUSet(allCPUs) + return nil + } + + // State has already been initialized from file (is not empty) + // 1 Check if the reserved cpuset is not part of default cpuset because: + // - kube/system reserved have changed (increased) - may lead to some containers not being able to start + // - user tampered with file + if !p.reserved.Intersection(tmpDefaultCPUset).Equals(p.reserved) { + return fmt.Errorf("not all reserved cpus: \"%s\" are present in defaultCpuSet: \"%s\"", + p.reserved.String(), tmpDefaultCPUset.String()) + } + + // 2. Check if state for static policy is consistent + for cID, cset := range tmpAssignments { + // None of the cpu in DEFAULT cset should be in s.assignments + if !tmpDefaultCPUset.Intersection(cset).IsEmpty() { + return fmt.Errorf("container id: %s cpuset: \"%s\" overlaps with default cpuset \"%s\"", + cID, cset.String(), tmpDefaultCPUset.String()) + } + } + return nil } // assignableCPUs returns the set of unassigned CPUs minus the reserved set. diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/BUILD index 7731bfb78c9f..8af631c9ad6d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/BUILD @@ -1,11 +1,13 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ "state.go", + "state_file.go", "state_mem.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state", visibility = ["//visibility:public"], deps = [ "//pkg/kubelet/cm/cpuset:go_default_library", @@ -13,6 +15,14 @@ go_library( ], ) +go_test( + name = "go_default_test", + srcs = ["state_file_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state", + library = ":go_default_library", + deps = ["//pkg/kubelet/cm/cpuset:go_default_library"], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state.go index 98f7f7dc240b..0550b644d579 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state.go @@ -20,17 +20,32 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" ) +// ContainerCPUAssignments type used in cpu manger state +type ContainerCPUAssignments map[string]cpuset.CPUSet + +// Clone returns a copy of ContainerCPUAssignments +func (as ContainerCPUAssignments) Clone() ContainerCPUAssignments { + ret := make(ContainerCPUAssignments) + for key, val := range as { + ret[key] = val + } + return ret +} + // Reader interface used to read current cpu/pod assignment state type Reader interface { GetCPUSet(containerID string) (cpuset.CPUSet, bool) GetDefaultCPUSet() cpuset.CPUSet GetCPUSetOrDefault(containerID string) cpuset.CPUSet + GetCPUAssignments() ContainerCPUAssignments } type writer interface { SetCPUSet(containerID string, cpuset cpuset.CPUSet) SetDefaultCPUSet(cpuset cpuset.CPUSet) + SetCPUAssignments(ContainerCPUAssignments) Delete(containerID string) + ClearState() } // State interface provides methods for tracking and setting cpu/pod assignment diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_file.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_file.go new file mode 100644 index 000000000000..20c7763350d1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_file.go @@ -0,0 +1,204 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package state + +import ( + "encoding/json" + "fmt" + "github.com/golang/glog" + "io/ioutil" + "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" + "os" + "sync" +) + +type stateFileData struct { + PolicyName string `json:"policyName"` + DefaultCPUSet string `json:"defaultCpuSet"` + Entries map[string]string `json:"entries,omitempty"` +} + +var _ State = &stateFile{} + +type stateFile struct { + sync.RWMutex + stateFilePath string + policyName string + cache State +} + +// NewFileState creates new State for keeping track of cpu/pod assignment with file backend +func NewFileState(filePath string, policyName string) State { + stateFile := &stateFile{ + stateFilePath: filePath, + cache: NewMemoryState(), + policyName: policyName, + } + + if err := stateFile.tryRestoreState(); err != nil { + // could not restore state, init new state file + glog.Infof("[cpumanager] state file: initializing empty state file - reason: \"%s\"", err) + stateFile.cache.ClearState() + stateFile.storeState() + } + + return stateFile +} + +// tryRestoreState tries to read state file, upon any error, +// err message is logged and state is left clean. un-initialized +func (sf *stateFile) tryRestoreState() error { + sf.Lock() + defer sf.Unlock() + var err error + + // used when all parsing is ok + tmpAssignments := make(ContainerCPUAssignments) + tmpDefaultCPUSet := cpuset.NewCPUSet() + tmpContainerCPUSet := cpuset.NewCPUSet() + + var content []byte + + if content, err = ioutil.ReadFile(sf.stateFilePath); os.IsNotExist(err) { + // Create file + if _, err = os.Create(sf.stateFilePath); err != nil { + glog.Errorf("[cpumanager] state file: unable to create state file \"%s\":%s", sf.stateFilePath, err.Error()) + panic("[cpumanager] state file not created") + } + glog.Infof("[cpumanager] state file: created empty state file \"%s\"", sf.stateFilePath) + } else { + // File exists - try to read + var readState stateFileData + + if err = json.Unmarshal(content, &readState); err != nil { + glog.Warningf("[cpumanager] state file: could not unmarshal, corrupted state file - \"%s\"", sf.stateFilePath) + return err + } + + if sf.policyName != readState.PolicyName { + return fmt.Errorf("policy configured \"%s\" != policy from state file \"%s\"", sf.policyName, readState.PolicyName) + } + + if tmpDefaultCPUSet, err = cpuset.Parse(readState.DefaultCPUSet); err != nil { + glog.Warningf("[cpumanager] state file: could not parse state file - [defaultCpuSet:\"%s\"]", readState.DefaultCPUSet) + return err + } + + for containerID, cpuString := range readState.Entries { + if tmpContainerCPUSet, err = cpuset.Parse(cpuString); err != nil { + glog.Warningf("[cpumanager] state file: could not parse state file - container id: %s, cpuset: \"%s\"", containerID, cpuString) + return err + } + tmpAssignments[containerID] = tmpContainerCPUSet + } + + sf.cache.SetDefaultCPUSet(tmpDefaultCPUSet) + sf.cache.SetCPUAssignments(tmpAssignments) + + glog.V(2).Infof("[cpumanager] state file: restored state from state file \"%s\"", sf.stateFilePath) + glog.V(2).Infof("[cpumanager] state file: defaultCPUSet: %s", tmpDefaultCPUSet.String()) + } + return nil +} + +// saves state to a file, caller is responsible for locking +func (sf *stateFile) storeState() { + var content []byte + var err error + + data := stateFileData{ + PolicyName: sf.policyName, + DefaultCPUSet: sf.cache.GetDefaultCPUSet().String(), + Entries: map[string]string{}, + } + + for containerID, cset := range sf.cache.GetCPUAssignments() { + data.Entries[containerID] = cset.String() + } + + if content, err = json.Marshal(data); err != nil { + panic("[cpumanager] state file: could not serialize state to json") + } + + if err = ioutil.WriteFile(sf.stateFilePath, content, 0644); err != nil { + panic("[cpumanager] state file not written") + } + return +} + +func (sf *stateFile) GetCPUSet(containerID string) (cpuset.CPUSet, bool) { + sf.RLock() + defer sf.RUnlock() + + res, ok := sf.cache.GetCPUSet(containerID) + return res, ok +} + +func (sf *stateFile) GetDefaultCPUSet() cpuset.CPUSet { + sf.RLock() + defer sf.RUnlock() + + return sf.cache.GetDefaultCPUSet() +} + +func (sf *stateFile) GetCPUSetOrDefault(containerID string) cpuset.CPUSet { + sf.RLock() + defer sf.RUnlock() + + return sf.cache.GetCPUSetOrDefault(containerID) +} + +func (sf *stateFile) GetCPUAssignments() ContainerCPUAssignments { + sf.RLock() + defer sf.RUnlock() + return sf.cache.GetCPUAssignments() +} + +func (sf *stateFile) SetCPUSet(containerID string, cset cpuset.CPUSet) { + sf.Lock() + defer sf.Unlock() + sf.cache.SetCPUSet(containerID, cset) + sf.storeState() +} + +func (sf *stateFile) SetDefaultCPUSet(cset cpuset.CPUSet) { + sf.Lock() + defer sf.Unlock() + sf.cache.SetDefaultCPUSet(cset) + sf.storeState() +} + +func (sf *stateFile) SetCPUAssignments(a ContainerCPUAssignments) { + sf.Lock() + defer sf.Unlock() + sf.cache.SetCPUAssignments(a) + sf.storeState() +} + +func (sf *stateFile) Delete(containerID string) { + sf.Lock() + defer sf.Unlock() + sf.cache.Delete(containerID) + sf.storeState() +} + +func (sf *stateFile) ClearState() { + sf.Lock() + defer sf.Unlock() + sf.cache.ClearState() + sf.storeState() +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go index 751b1726aae1..797cdb15b2b9 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_mem.go @@ -25,7 +25,7 @@ import ( type stateMemory struct { sync.RWMutex - assignments map[string]cpuset.CPUSet + assignments ContainerCPUAssignments defaultCPUSet cpuset.CPUSet } @@ -35,7 +35,7 @@ var _ State = &stateMemory{} func NewMemoryState() State { glog.Infof("[cpumanager] initializing new in-memory state store") return &stateMemory{ - assignments: map[string]cpuset.CPUSet{}, + assignments: ContainerCPUAssignments{}, defaultCPUSet: cpuset.NewCPUSet(), } } @@ -65,6 +65,12 @@ func (s *stateMemory) GetCPUSetOrDefault(containerID string) cpuset.CPUSet { return s.GetDefaultCPUSet() } +func (s *stateMemory) GetCPUAssignments() ContainerCPUAssignments { + s.RLock() + defer s.RUnlock() + return s.assignments.Clone() +} + func (s *stateMemory) SetCPUSet(containerID string, cset cpuset.CPUSet) { s.Lock() defer s.Unlock() @@ -81,6 +87,14 @@ func (s *stateMemory) SetDefaultCPUSet(cset cpuset.CPUSet) { glog.Infof("[cpumanager] updated default cpuset: \"%s\"", cset) } +func (s *stateMemory) SetCPUAssignments(a ContainerCPUAssignments) { + s.Lock() + defer s.Unlock() + + s.assignments = a.Clone() + glog.Infof("[cpumanager] updated cpuset assignments: \"%v\"", a) +} + func (s *stateMemory) Delete(containerID string) { s.Lock() defer s.Unlock() @@ -88,3 +102,12 @@ func (s *stateMemory) Delete(containerID string) { delete(s.assignments, containerID) glog.V(2).Infof("[cpumanager] deleted cpuset assignment (container id: %s)", containerID) } + +func (s *stateMemory) ClearState() { + s.Lock() + defer s.Unlock() + + s.defaultCPUSet = cpuset.CPUSet{} + s.assignments = make(ContainerCPUAssignments) + glog.V(2).Infof("[cpumanager] cleared state") +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/BUILD index a2a34307d712..eb6ba3d6e63a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/BUILD @@ -6,9 +6,11 @@ go_library( "doc.go", "topology.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology", visibility = ["//visibility:public"], deps = [ "//pkg/kubelet/cm/cpuset:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", ], ) @@ -30,6 +32,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["topology_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology", library = ":go_default_library", deps = ["//vendor/github.com/google/cadvisor/info/v1:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go index a03cddbad8a7..2491abe6c06a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go @@ -18,7 +18,9 @@ package topology import ( "fmt" + "sort" + "github.com/golang/glog" cadvisorapi "github.com/google/cadvisor/info/v1" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" ) @@ -145,15 +147,22 @@ func Discover(machineInfo *cadvisorapi.MachineInfo) (*CPUTopology, error) { } CPUDetails := CPUDetails{} - numCPUs := machineInfo.NumCores numPhysicalCores := 0 + var coreID int + var err error + for _, socket := range machineInfo.Topology { numPhysicalCores += len(socket.Cores) for _, core := range socket.Cores { + if coreID, err = getUniqueCoreID(core.Threads); err != nil { + glog.Errorf("could not get unique coreID for socket: %d core %d threads: %v", + socket.Id, core.Id, core.Threads) + return nil, err + } for _, cpu := range core.Threads { CPUDetails[cpu] = CPUInfo{ - CoreID: core.Id, + CoreID: coreID, SocketID: socket.Id, } } @@ -167,3 +176,22 @@ func Discover(machineInfo *cadvisorapi.MachineInfo) (*CPUTopology, error) { CPUDetails: CPUDetails, }, nil } + +// getUniqueCoreID computes coreId as the lowest cpuID +// for a given Threads []int slice. This will assure that coreID's are +// platform unique (opposite to what cAdvisor reports - socket unique) +func getUniqueCoreID(threads []int) (coreID int, err error) { + err = nil + if len(threads) == 0 { + return 0, fmt.Errorf("no cpus provided") + } + + if len(threads) != cpuset.NewCPUSet(threads...).Size() { + return 0, fmt.Errorf("cpus provided are not unique") + } + + tmpThreads := make([]int, len(threads)) + copy(tmpThreads, threads) + sort.Ints(tmpThreads) + return tmpThreads[0], err +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/BUILD index 24ee7753220a..7ed863a5ee6c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/BUILD @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = ["cpuset.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpuset", visibility = ["//visibility:public"], deps = ["//vendor/github.com/golang/glog:go_default_library"], ) @@ -10,6 +11,7 @@ go_library( go_test( name = "go_default_test", srcs = ["cpuset_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpuset", library = ":go_default_library", ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/device_plugin_handler.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/device_plugin_handler.go deleted file mode 100644 index 8da350bc873f..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/device_plugin_handler.go +++ /dev/null @@ -1,293 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "sync" - - "github.com/golang/glog" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/util/sets" - pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1" - "k8s.io/kubernetes/pkg/kubelet/deviceplugin" -) - -// podDevices represents a list of pod to device Id mappings. -type containerDevices map[string]sets.String -type podDevices map[string]containerDevices - -func (pdev podDevices) pods() sets.String { - ret := sets.NewString() - for k := range pdev { - ret.Insert(k) - } - return ret -} - -func (pdev podDevices) insert(podUID, contName string, device string) { - if _, exists := pdev[podUID]; !exists { - pdev[podUID] = make(containerDevices) - } - if _, exists := pdev[podUID][contName]; !exists { - pdev[podUID][contName] = sets.NewString() - } - pdev[podUID][contName].Insert(device) -} - -func (pdev podDevices) getDevices(podUID, contName string) sets.String { - containers, exists := pdev[podUID] - if !exists { - return nil - } - devices, exists := containers[contName] - if !exists { - return nil - } - return devices -} - -func (pdev podDevices) delete(pods []string) { - for _, uid := range pods { - delete(pdev, uid) - } -} - -func (pdev podDevices) devices() sets.String { - ret := sets.NewString() - for _, containerDevices := range pdev { - for _, deviceSet := range containerDevices { - ret = ret.Union(deviceSet) - } - } - return ret -} - -type DevicePluginHandler interface { - // Start starts device plugin registration service. - Start() error - // Devices returns all of registered devices keyed by resourceName. - Devices() map[string][]*pluginapi.Device - // Allocate attempts to allocate all of required extended resources for - // the input container, issues an Allocate rpc request for each of such - // resources, and returns their AllocateResponses on success. - Allocate(pod *v1.Pod, container *v1.Container, activePods []*v1.Pod) ([]*pluginapi.AllocateResponse, error) -} - -type DevicePluginHandlerImpl struct { - sync.Mutex - devicePluginManager deviceplugin.Manager - // devicePluginManagerMonitorCallback is used for testing only. - devicePluginManagerMonitorCallback deviceplugin.MonitorCallback - // allDevices contains all of registered resourceNames and their exported device IDs. - allDevices map[string]sets.String - // allocatedDevices contains pod to allocated device mapping, keyed by resourceName. - allocatedDevices map[string]podDevices -} - -// NewDevicePluginHandler create a DevicePluginHandler -// updateCapacityFunc is called to update ContainerManager capacity when -// device capacity changes. -func NewDevicePluginHandlerImpl(updateCapacityFunc func(v1.ResourceList)) (*DevicePluginHandlerImpl, error) { - glog.V(2).Infof("Creating Device Plugin Handler") - handler := &DevicePluginHandlerImpl{ - allDevices: make(map[string]sets.String), - allocatedDevices: make(map[string]podDevices), - } - - deviceManagerMonitorCallback := func(resourceName string, added, updated, deleted []*pluginapi.Device) { - var capacity = v1.ResourceList{} - kept := append(updated, added...) - if _, ok := handler.allDevices[resourceName]; !ok { - handler.allDevices[resourceName] = sets.NewString() - } - // For now, DevicePluginHandler only keeps track of healthy devices. - // We can revisit this later when the need comes to track unhealthy devices here. - for _, dev := range kept { - if dev.Health == pluginapi.Healthy { - handler.allDevices[resourceName].Insert(dev.ID) - } else { - handler.allDevices[resourceName].Delete(dev.ID) - } - } - for _, dev := range deleted { - handler.allDevices[resourceName].Delete(dev.ID) - } - capacity[v1.ResourceName(resourceName)] = *resource.NewQuantity(int64(handler.allDevices[resourceName].Len()), resource.DecimalSI) - updateCapacityFunc(capacity) - } - - mgr, err := deviceplugin.NewManagerImpl(pluginapi.KubeletSocket, deviceManagerMonitorCallback) - if err != nil { - return nil, fmt.Errorf("Failed to initialize device plugin manager: %+v", err) - } - - handler.devicePluginManager = mgr - handler.devicePluginManagerMonitorCallback = deviceManagerMonitorCallback - // Loads in allocatedDevices information from disk. - err = handler.readCheckpoint() - if err != nil { - glog.Warningf("Continue after failing to read checkpoint file. Device allocation info may NOT be up-to-date. Err: %v", err) - } - return handler, nil -} - -func (h *DevicePluginHandlerImpl) Start() error { - return h.devicePluginManager.Start() -} - -func (h *DevicePluginHandlerImpl) Devices() map[string][]*pluginapi.Device { - return h.devicePluginManager.Devices() -} - -func (h *DevicePluginHandlerImpl) Allocate(pod *v1.Pod, container *v1.Container, activePods []*v1.Pod) ([]*pluginapi.AllocateResponse, error) { - var ret []*pluginapi.AllocateResponse - h.updateAllocatedDevices(activePods) - for k, v := range container.Resources.Limits { - resource := string(k) - needed := int(v.Value()) - glog.V(3).Infof("needs %d %s", needed, resource) - if !deviceplugin.IsDeviceName(k) || needed == 0 { - continue - } - h.Lock() - // Gets list of devices that have already been allocated. - // This can happen if a container restarts for example. - if h.allocatedDevices[resource] == nil { - h.allocatedDevices[resource] = make(podDevices) - } - devices := h.allocatedDevices[resource].getDevices(string(pod.UID), container.Name) - if devices != nil { - glog.V(3).Infof("Found pre-allocated devices for resource %s container %q in Pod %q: %v", resource, container.Name, pod.UID, devices.List()) - needed = needed - devices.Len() - } - // Get Devices in use. - devicesInUse := h.allocatedDevices[resource].devices() - // Get a list of available devices. - available := h.allDevices[resource].Difference(devicesInUse) - if int(available.Len()) < needed { - h.Unlock() - return nil, fmt.Errorf("requested number of devices unavailable for %s. Requested: %d, Available: %d", resource, needed, available.Len()) - } - allocated := available.UnsortedList()[:needed] - for _, device := range allocated { - // Update internal allocated device cache. - h.allocatedDevices[resource].insert(string(pod.UID), container.Name, device) - } - h.Unlock() - // devicePluginManager.Allocate involves RPC calls to device plugin, which - // could be heavy-weight. Therefore we want to perform this operation outside - // mutex lock. Note if Allcate call fails, we may leave container resources - // partially allocated for the failed container. We rely on updateAllocatedDevices() - // to garbage collect these resources later. Another side effect is that if - // we have X resource A and Y resource B in total, and two containers, container1 - // and container2 both require X resource A and Y resource B. Both allocation - // requests may fail if we serve them in mixed order. - // TODO: may revisit this part later if we see inefficient resource allocation - // in real use as the result of this. - resp, err := h.devicePluginManager.Allocate(resource, append(devices.UnsortedList(), allocated...)) - if err != nil { - return nil, err - } - ret = append(ret, resp) - } - // Checkpoints device to container allocation information. - if err := h.writeCheckpoint(); err != nil { - return nil, err - } - return ret, nil -} - -// updateAllocatedDevices updates the list of GPUs in use. -// It gets a list of active pods and then frees any GPUs that are bound to -// terminated pods. Returns error on failure. -func (h *DevicePluginHandlerImpl) updateAllocatedDevices(activePods []*v1.Pod) { - h.Lock() - defer h.Unlock() - activePodUids := sets.NewString() - for _, pod := range activePods { - activePodUids.Insert(string(pod.UID)) - } - for _, podDevs := range h.allocatedDevices { - allocatedPodUids := podDevs.pods() - podsToBeRemoved := allocatedPodUids.Difference(activePodUids) - glog.V(5).Infof("pods to be removed: %v", podsToBeRemoved.List()) - podDevs.delete(podsToBeRemoved.List()) - } -} - -type checkpointEntry struct { - PodUID string - ContainerName string - ResourceName string - DeviceID string -} - -// checkpointData struct is used to store pod to device allocation information -// in a checkpoint file. -// TODO: add version control when we need to change checkpoint format. -type checkpointData struct { - Entries []checkpointEntry -} - -// Checkpoints device to container allocation information to disk. -func (h *DevicePluginHandlerImpl) writeCheckpoint() error { - filepath := h.devicePluginManager.CheckpointFile() - var data checkpointData - for resourceName, podDev := range h.allocatedDevices { - for podUID, conDev := range podDev { - for conName, devs := range conDev { - for _, devId := range devs.UnsortedList() { - data.Entries = append(data.Entries, checkpointEntry{podUID, conName, resourceName, devId}) - } - } - } - } - dataJson, err := json.Marshal(data) - if err != nil { - return err - } - return ioutil.WriteFile(filepath, dataJson, 0644) -} - -// Reads device to container allocation information from disk, and populates -// h.allocatedDevices accordingly. -func (h *DevicePluginHandlerImpl) readCheckpoint() error { - filepath := h.devicePluginManager.CheckpointFile() - content, err := ioutil.ReadFile(filepath) - if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("failed to read checkpoint file %q: %v", filepath, err) - } - glog.V(2).Infof("Read checkpoint file %s\n", filepath) - var data checkpointData - if err := json.Unmarshal(content, &data); err != nil { - return fmt.Errorf("failed to unmarshal checkpoint data: %v", err) - } - for _, entry := range data.Entries { - glog.V(2).Infof("Get checkpoint entry: %v %v %v %v\n", entry.PodUID, entry.ContainerName, entry.ResourceName, entry.DeviceID) - if h.allocatedDevices[entry.ResourceName] == nil { - h.allocatedDevices[entry.ResourceName] = make(podDevices) - } - h.allocatedDevices[entry.ResourceName].insert(entry.PodUID, entry.ContainerName, entry.DeviceID) - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/device_plugin_handler_stub.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/device_plugin_handler_stub.go deleted file mode 100644 index a70c281086ca..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/device_plugin_handler_stub.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - "k8s.io/api/core/v1" - pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1" -) - -// A simple stub implementation for DevicePluginHandler. -type DevicePluginHandlerStub struct{} - -func NewDevicePluginHandlerStub() (*DevicePluginHandlerStub, error) { - return &DevicePluginHandlerStub{}, nil -} - -func (h *DevicePluginHandlerStub) Start() error { - return nil -} - -func (h *DevicePluginHandlerStub) Devices() map[string][]*pluginapi.Device { - return make(map[string][]*pluginapi.Device) -} - -func (h *DevicePluginHandlerStub) Allocate(pod *v1.Pod, container *v1.Container, activePods []*v1.Pod) ([]*pluginapi.AllocateResponse, error) { - var ret []*pluginapi.AllocateResponse - return ret, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/BUILD new file mode 100644 index 000000000000..9c91c2e9df28 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/BUILD @@ -0,0 +1,70 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "device_plugin_stub.go", + "endpoint.go", + "manager.go", + "manager_stub.go", + "pod_devices.go", + "types.go", + ], + importpath = "k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin", + deps = [ + "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/kubelet/apis/deviceplugin/v1alpha:go_default_library", + "//pkg/kubelet/config:go_default_library", + "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/lifecycle:go_default_library", + "//pkg/kubelet/metrics:go_default_library", + "//plugin/pkg/scheduler/schedulercache:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +go_test( + name = "go_default_test", + srcs = [ + "endpoint_test.go", + "manager_test.go", + ], + importpath = "k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin", + library = ":go_default_library", + deps = [ + "//pkg/kubelet/apis/deviceplugin/v1alpha:go_default_library", + "//pkg/kubelet/lifecycle:go_default_library", + "//plugin/pkg/scheduler/schedulercache:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/OWNERS b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/OWNERS new file mode 100644 index 000000000000..ec9b7ddc156a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/OWNERS @@ -0,0 +1,6 @@ +approvers: +- jiayingz +- vishh +reviewers: +- mindprince +- RenaudWasTaken diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/deviceplugin/device_plugin_stub.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/kubelet/deviceplugin/device_plugin_stub.go rename to vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go index a0f103d03a9b..01f08c159876 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/deviceplugin/device_plugin_stub.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/device_plugin_stub.go @@ -26,7 +26,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" - pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1" + pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha" ) // Stub implementation for DevicePlugin. diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/endpoint.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/endpoint.go new file mode 100644 index 000000000000..29feaf52852e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/endpoint.go @@ -0,0 +1,198 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deviceplugin + +import ( + "fmt" + "net" + "sync" + "time" + + "github.com/golang/glog" + "golang.org/x/net/context" + "google.golang.org/grpc" + + pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha" +) + +// endpoint maps to a single registered device plugin. It is responsible +// for managing gRPC communications with the device plugin and caching +// device states reported by the device plugin. +type endpoint interface { + run() + stop() + allocate(devs []string) (*pluginapi.AllocateResponse, error) + getDevices() []pluginapi.Device + callback(resourceName string, added, updated, deleted []pluginapi.Device) +} + +type endpointImpl struct { + client pluginapi.DevicePluginClient + clientConn *grpc.ClientConn + + socketPath string + resourceName string + + devices map[string]pluginapi.Device + mutex sync.Mutex + + cb monitorCallback +} + +// newEndpoint creates a new endpoint for the given resourceName. +func newEndpointImpl(socketPath, resourceName string, devices map[string]pluginapi.Device, callback monitorCallback) (*endpointImpl, error) { + client, c, err := dial(socketPath) + if err != nil { + glog.Errorf("Can't create new endpoint with path %s err %v", socketPath, err) + return nil, err + } + + return &endpointImpl{ + client: client, + clientConn: c, + + socketPath: socketPath, + resourceName: resourceName, + + devices: devices, + cb: callback, + }, nil +} + +func (e *endpointImpl) callback(resourceName string, added, updated, deleted []pluginapi.Device) { + e.cb(resourceName, added, updated, deleted) +} + +func (e *endpointImpl) getDevices() []pluginapi.Device { + e.mutex.Lock() + defer e.mutex.Unlock() + var devs []pluginapi.Device + + for _, d := range e.devices { + devs = append(devs, d) + } + + return devs +} + +// run initializes ListAndWatch gRPC call for the device plugin and +// blocks on receiving ListAndWatch gRPC stream updates. Each ListAndWatch +// stream update contains a new list of device states. listAndWatch compares the new +// device states with its cached states to get list of new, updated, and deleted devices. +// It then issues a callback to pass this information to the device manager which +// will adjust the resource available information accordingly. +func (e *endpointImpl) run() { + stream, err := e.client.ListAndWatch(context.Background(), &pluginapi.Empty{}) + if err != nil { + glog.Errorf(errListAndWatch, e.resourceName, err) + + return + } + + devices := make(map[string]pluginapi.Device) + + e.mutex.Lock() + for _, d := range e.devices { + devices[d.ID] = d + } + e.mutex.Unlock() + + for { + response, err := stream.Recv() + if err != nil { + glog.Errorf(errListAndWatch, e.resourceName, err) + return + } + + devs := response.Devices + glog.V(2).Infof("State pushed for device plugin %s", e.resourceName) + + newDevs := make(map[string]*pluginapi.Device) + var added, updated []pluginapi.Device + + for _, d := range devs { + dOld, ok := devices[d.ID] + newDevs[d.ID] = d + + if !ok { + glog.V(2).Infof("New device for Endpoint %s: %v", e.resourceName, d) + + devices[d.ID] = *d + added = append(added, *d) + + continue + } + + if d.Health == dOld.Health { + continue + } + + if d.Health == pluginapi.Unhealthy { + glog.Errorf("Device %s is now Unhealthy", d.ID) + } else if d.Health == pluginapi.Healthy { + glog.V(2).Infof("Device %s is now Healthy", d.ID) + } + + devices[d.ID] = *d + updated = append(updated, *d) + } + + var deleted []pluginapi.Device + for id, d := range devices { + if _, ok := newDevs[id]; ok { + continue + } + + glog.Errorf("Device %s was deleted", d.ID) + + deleted = append(deleted, d) + delete(devices, id) + } + + e.mutex.Lock() + e.devices = devices + e.mutex.Unlock() + + e.callback(e.resourceName, added, updated, deleted) + } +} + +// allocate issues Allocate gRPC call to the device plugin. +func (e *endpointImpl) allocate(devs []string) (*pluginapi.AllocateResponse, error) { + return e.client.Allocate(context.Background(), &pluginapi.AllocateRequest{ + DevicesIDs: devs, + }) +} + +func (e *endpointImpl) stop() { + e.clientConn.Close() +} + +// dial establishes the gRPC communication with the registered device plugin. +func dial(unixSocketPath string) (pluginapi.DevicePluginClient, *grpc.ClientConn, error) { + c, err := grpc.Dial(unixSocketPath, grpc.WithInsecure(), + grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("unix", addr, timeout) + }), + ) + + if err != nil { + return nil, nil, fmt.Errorf(errFailedToDialDevicePlugin+" %v", err) + } + + return pluginapi.NewDevicePluginClient(c), c, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/manager.go new file mode 100644 index 000000000000..db28e36d5d60 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/manager.go @@ -0,0 +1,646 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deviceplugin + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "sync" + "time" + + "github.com/golang/glog" + "golang.org/x/net/context" + "google.golang.org/grpc" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/sets" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" + pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha" + "k8s.io/kubernetes/pkg/kubelet/config" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" + "k8s.io/kubernetes/pkg/kubelet/metrics" + "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" +) + +// ActivePodsFunc is a function that returns a list of pods to reconcile. +type ActivePodsFunc func() []*v1.Pod + +// monitorCallback is the function called when a device's health state changes, +// or new devices are reported, or old devices are deleted. +// Updated contains the most recent state of the Device. +type monitorCallback func(resourceName string, added, updated, deleted []pluginapi.Device) + +// ManagerImpl is the structure in charge of managing Device Plugins. +type ManagerImpl struct { + socketname string + socketdir string + + endpoints map[string]endpoint // Key is ResourceName + mutex sync.Mutex + + server *grpc.Server + + // activePods is a method for listing active pods on the node + // so the amount of pluginResources requested by existing pods + // could be counted when updating allocated devices + activePods ActivePodsFunc + + // sourcesReady provides the readiness of kubelet configuration sources such as apiserver update readiness. + // We use it to determine when we can purge inactive pods from checkpointed state. + sourcesReady config.SourcesReady + + // callback is used for updating devices' states in one time call. + // e.g. a new device is advertised, two old devices are deleted and a running device fails. + callback monitorCallback + + // allDevices contains all of registered resourceNames and their exported device IDs. + allDevices map[string]sets.String + + // allocatedDevices contains allocated deviceIds, keyed by resourceName. + allocatedDevices map[string]sets.String + + // podDevices contains pod to allocated device mapping. + podDevices podDevices +} + +type sourcesReadyStub struct{} + +func (s *sourcesReadyStub) AddSource(source string) {} +func (s *sourcesReadyStub) AllReady() bool { return true } + +// NewManagerImpl creates a new manager. +func NewManagerImpl() (*ManagerImpl, error) { + return newManagerImpl(pluginapi.KubeletSocket) +} + +func newManagerImpl(socketPath string) (*ManagerImpl, error) { + glog.V(2).Infof("Creating Device Plugin manager at %s", socketPath) + + if socketPath == "" || !filepath.IsAbs(socketPath) { + return nil, fmt.Errorf(errBadSocket+" %v", socketPath) + } + + dir, file := filepath.Split(socketPath) + manager := &ManagerImpl{ + endpoints: make(map[string]endpoint), + socketname: file, + socketdir: dir, + allDevices: make(map[string]sets.String), + allocatedDevices: make(map[string]sets.String), + podDevices: make(podDevices), + } + manager.callback = manager.genericDeviceUpdateCallback + + // The following structs are populated with real implementations in manager.Start() + // Before that, initializes them to perform no-op operations. + manager.activePods = func() []*v1.Pod { return []*v1.Pod{} } + manager.sourcesReady = &sourcesReadyStub{} + + return manager, nil +} + +func (m *ManagerImpl) genericDeviceUpdateCallback(resourceName string, added, updated, deleted []pluginapi.Device) { + kept := append(updated, added...) + m.mutex.Lock() + if _, ok := m.allDevices[resourceName]; !ok { + m.allDevices[resourceName] = sets.NewString() + } + // For now, Manager only keeps track of healthy devices. + // TODO: adds support to track unhealthy devices. + for _, dev := range kept { + if dev.Health == pluginapi.Healthy { + m.allDevices[resourceName].Insert(dev.ID) + } else { + m.allDevices[resourceName].Delete(dev.ID) + } + } + for _, dev := range deleted { + m.allDevices[resourceName].Delete(dev.ID) + } + m.mutex.Unlock() + m.writeCheckpoint() +} + +func (m *ManagerImpl) removeContents(dir string) error { + d, err := os.Open(dir) + if err != nil { + return err + } + defer d.Close() + names, err := d.Readdirnames(-1) + if err != nil { + return err + } + for _, name := range names { + filePath := filepath.Join(dir, name) + if filePath == m.checkpointFile() { + continue + } + stat, err := os.Stat(filePath) + if err != nil { + glog.Errorf("Failed to stat file %v: %v", filePath, err) + continue + } + if stat.IsDir() { + continue + } + err = os.RemoveAll(filePath) + if err != nil { + return err + } + } + return nil +} + +const ( + // kubeletDevicePluginCheckpoint is the file name of device plugin checkpoint + kubeletDevicePluginCheckpoint = "kubelet_internal_checkpoint" +) + +// checkpointFile returns device plugin checkpoint file path. +func (m *ManagerImpl) checkpointFile() string { + return filepath.Join(m.socketdir, kubeletDevicePluginCheckpoint) +} + +// Start starts the Device Plugin Manager amd start initialization of +// podDevices and allocatedDevices information from checkpoint-ed state and +// starts device plugin registration service. +func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady) error { + glog.V(2).Infof("Starting Device Plugin manager") + + m.activePods = activePods + m.sourcesReady = sourcesReady + + // Loads in allocatedDevices information from disk. + err := m.readCheckpoint() + if err != nil { + glog.Warningf("Continue after failing to read checkpoint file. Device allocation info may NOT be up-to-date. Err: %v", err) + } + + socketPath := filepath.Join(m.socketdir, m.socketname) + os.MkdirAll(m.socketdir, 0755) + + // Removes all stale sockets in m.socketdir. Device plugins can monitor + // this and use it as a signal to re-register with the new Kubelet. + if err := m.removeContents(m.socketdir); err != nil { + glog.Errorf("Fail to clean up stale contents under %s: %+v", m.socketdir, err) + } + + s, err := net.Listen("unix", socketPath) + if err != nil { + glog.Errorf(errListenSocket+" %+v", err) + return err + } + + m.server = grpc.NewServer([]grpc.ServerOption{}...) + + pluginapi.RegisterRegistrationServer(m.server, m) + go m.server.Serve(s) + + glog.V(2).Infof("Serving device plugin registration server on %q", socketPath) + + return nil +} + +// Devices is the map of devices that are known by the Device +// Plugin manager with the kind of the devices as key +func (m *ManagerImpl) Devices() map[string][]pluginapi.Device { + m.mutex.Lock() + defer m.mutex.Unlock() + + devs := make(map[string][]pluginapi.Device) + for k, e := range m.endpoints { + glog.V(3).Infof("Endpoint: %+v: %p", k, e) + devs[k] = e.getDevices() + } + + return devs +} + +// Allocate is the call that you can use to allocate a set of devices +// from the registered device plugins. +func (m *ManagerImpl) Allocate(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { + pod := attrs.Pod + devicesToReuse := make(map[string]sets.String) + // TODO: Reuse devices between init containers and regular containers. + for _, container := range pod.Spec.InitContainers { + if err := m.allocateContainerResources(pod, &container, devicesToReuse); err != nil { + return err + } + m.podDevices.addContainerAllocatedResources(string(pod.UID), container.Name, devicesToReuse) + } + for _, container := range pod.Spec.Containers { + if err := m.allocateContainerResources(pod, &container, devicesToReuse); err != nil { + return err + } + m.podDevices.removeContainerAllocatedResources(string(pod.UID), container.Name, devicesToReuse) + } + + m.mutex.Lock() + defer m.mutex.Unlock() + + // quick return if no pluginResources requested + if _, podRequireDevicePluginResource := m.podDevices[string(pod.UID)]; !podRequireDevicePluginResource { + return nil + } + + m.sanitizeNodeAllocatable(node) + return nil +} + +// Register registers a device plugin. +func (m *ManagerImpl) Register(ctx context.Context, r *pluginapi.RegisterRequest) (*pluginapi.Empty, error) { + glog.Infof("Got registration request from device plugin with resource name %q", r.ResourceName) + metrics.DevicePluginRegistrationCount.WithLabelValues(r.ResourceName).Inc() + if r.Version != pluginapi.Version { + errorString := fmt.Sprintf(errUnsuportedVersion, r.Version, pluginapi.Version) + glog.Infof("Bad registration request from device plugin with resource name %q: %v", r.ResourceName, errorString) + return &pluginapi.Empty{}, fmt.Errorf(errorString) + } + + if !v1helper.IsExtendedResourceName(v1.ResourceName(r.ResourceName)) { + errorString := fmt.Sprintf(errInvalidResourceName, r.ResourceName) + glog.Infof("Bad registration request from device plugin: %v", errorString) + return &pluginapi.Empty{}, fmt.Errorf(errorString) + } + + // TODO: for now, always accepts newest device plugin. Later may consider to + // add some policies here, e.g., verify whether an old device plugin with the + // same resource name is still alive to determine whether we want to accept + // the new registration. + go m.addEndpoint(r) + + return &pluginapi.Empty{}, nil +} + +// Stop is the function that can stop the gRPC server. +func (m *ManagerImpl) Stop() error { + m.mutex.Lock() + defer m.mutex.Unlock() + for _, e := range m.endpoints { + e.stop() + } + + m.server.Stop() + return nil +} + +func (m *ManagerImpl) addEndpoint(r *pluginapi.RegisterRequest) { + existingDevs := make(map[string]pluginapi.Device) + m.mutex.Lock() + old, ok := m.endpoints[r.ResourceName] + if ok && old != nil { + // Pass devices of previous endpoint into re-registered one, + // to avoid potential orphaned devices upon re-registration + devices := make(map[string]pluginapi.Device) + for _, device := range old.getDevices() { + devices[device.ID] = device + } + existingDevs = devices + } + m.mutex.Unlock() + + socketPath := filepath.Join(m.socketdir, r.Endpoint) + e, err := newEndpointImpl(socketPath, r.ResourceName, existingDevs, m.callback) + if err != nil { + glog.Errorf("Failed to dial device plugin with request %v: %v", r, err) + return + } + + m.mutex.Lock() + // Check for potential re-registration during the initialization of new endpoint, + // and skip updating if re-registration happens. + // TODO: simplify the part once we have a better way to handle registered devices + ext := m.endpoints[r.ResourceName] + if ext != old { + glog.Warningf("Some other endpoint %v is added while endpoint %v is initialized", ext, e) + m.mutex.Unlock() + e.stop() + return + } + // Associates the newly created endpoint with the corresponding resource name. + // Stops existing endpoint if there is any. + m.endpoints[r.ResourceName] = e + glog.V(2).Infof("Registered endpoint %v", e) + m.mutex.Unlock() + + if old != nil { + old.stop() + } + + go func() { + e.run() + e.stop() + + m.mutex.Lock() + if old, ok := m.endpoints[r.ResourceName]; ok && old == e { + glog.V(2).Infof("Delete resource for endpoint %v", e) + delete(m.endpoints, r.ResourceName) + } + + glog.V(2).Infof("Unregistered endpoint %v", e) + m.mutex.Unlock() + }() +} + +// GetCapacity is expected to be called when Kubelet updates its node status. +// The first returned variable contains the registered device plugin resource capacity. +// The second returned variable contains previously registered resources that are no longer active. +// Kubelet uses this information to update resource capacity/allocatable in its node status. +// After the call, device plugin can remove the inactive resources from its internal list as the +// change is already reflected in Kubelet node status. +// Note in the special case after Kubelet restarts, device plugin resource capacities can +// temporarily drop to zero till corresponding device plugins re-register. This is OK because +// cm.UpdatePluginResource() run during predicate Admit guarantees we adjust nodeinfo +// capacity for already allocated pods so that they can continue to run. However, new pods +// requiring device plugin resources will not be scheduled till device plugin re-registers. +func (m *ManagerImpl) GetCapacity() (v1.ResourceList, []string) { + needsUpdateCheckpoint := false + var capacity = v1.ResourceList{} + var deletedResources []string + m.mutex.Lock() + for resourceName, devices := range m.allDevices { + if _, ok := m.endpoints[resourceName]; !ok { + delete(m.allDevices, resourceName) + deletedResources = append(deletedResources, resourceName) + needsUpdateCheckpoint = true + } else { + capacity[v1.ResourceName(resourceName)] = *resource.NewQuantity(int64(devices.Len()), resource.DecimalSI) + } + } + m.mutex.Unlock() + if needsUpdateCheckpoint { + m.writeCheckpoint() + } + return capacity, deletedResources +} + +// checkpointData struct is used to store pod to device allocation information +// and registered device information in a checkpoint file. +// TODO: add version control when we need to change checkpoint format. +type checkpointData struct { + PodDeviceEntries []podDevicesCheckpointEntry + RegisteredDevices map[string][]string +} + +// Checkpoints device to container allocation information to disk. +func (m *ManagerImpl) writeCheckpoint() error { + m.mutex.Lock() + data := checkpointData{ + PodDeviceEntries: m.podDevices.toCheckpointData(), + RegisteredDevices: make(map[string][]string), + } + for resource, devices := range m.allDevices { + data.RegisteredDevices[resource] = devices.UnsortedList() + } + m.mutex.Unlock() + + dataJSON, err := json.Marshal(data) + if err != nil { + return err + } + filepath := m.checkpointFile() + return ioutil.WriteFile(filepath, dataJSON, 0644) +} + +// Reads device to container allocation information from disk, and populates +// m.allocatedDevices accordingly. +func (m *ManagerImpl) readCheckpoint() error { + filepath := m.checkpointFile() + content, err := ioutil.ReadFile(filepath) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to read checkpoint file %q: %v", filepath, err) + } + glog.V(2).Infof("Read checkpoint file %s\n", filepath) + var data checkpointData + if err := json.Unmarshal(content, &data); err != nil { + return fmt.Errorf("failed to unmarshal checkpoint data: %v", err) + } + + m.mutex.Lock() + defer m.mutex.Unlock() + m.podDevices.fromCheckpointData(data.PodDeviceEntries) + m.allocatedDevices = m.podDevices.devices() + for resource, devices := range data.RegisteredDevices { + m.allDevices[resource] = sets.NewString() + for _, dev := range devices { + m.allDevices[resource].Insert(dev) + } + } + return nil +} + +// updateAllocatedDevices gets a list of active pods and then frees any Devices that are bound to +// terminated pods. Returns error on failure. +func (m *ManagerImpl) updateAllocatedDevices(activePods []*v1.Pod) { + if !m.sourcesReady.AllReady() { + return + } + m.mutex.Lock() + defer m.mutex.Unlock() + activePodUids := sets.NewString() + for _, pod := range activePods { + activePodUids.Insert(string(pod.UID)) + } + allocatedPodUids := m.podDevices.pods() + podsToBeRemoved := allocatedPodUids.Difference(activePodUids) + if len(podsToBeRemoved) <= 0 { + return + } + glog.V(3).Infof("pods to be removed: %v", podsToBeRemoved.List()) + m.podDevices.delete(podsToBeRemoved.List()) + // Regenerated allocatedDevices after we update pod allocation information. + m.allocatedDevices = m.podDevices.devices() +} + +// Returns list of device Ids we need to allocate with Allocate rpc call. +// Returns empty list in case we don't need to issue the Allocate rpc call. +func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, required int, reusableDevices sets.String) (sets.String, error) { + m.mutex.Lock() + defer m.mutex.Unlock() + needed := required + // Gets list of devices that have already been allocated. + // This can happen if a container restarts for example. + devices := m.podDevices.containerDevices(podUID, contName, resource) + if devices != nil { + glog.V(3).Infof("Found pre-allocated devices for resource %s container %q in Pod %q: %v", resource, contName, podUID, devices.List()) + needed = needed - devices.Len() + // A pod's resource is not expected to change once admitted by the API server, + // so just fail loudly here. We can revisit this part if this no longer holds. + if needed != 0 { + return nil, fmt.Errorf("pod %v container %v changed request for resource %v from %v to %v", podUID, contName, resource, devices.Len(), required) + } + } + if needed == 0 { + // No change, no work. + return nil, nil + } + glog.V(3).Infof("Needs to allocate %v %v for pod %q container %q", needed, resource, podUID, contName) + // Needs to allocate additional devices. + if _, ok := m.allDevices[resource]; !ok { + return nil, fmt.Errorf("can't allocate unregistered device %v", resource) + } + devices = sets.NewString() + // Allocates from reusableDevices list first. + for device := range reusableDevices { + devices.Insert(device) + needed-- + if needed == 0 { + return devices, nil + } + } + // Needs to allocate additional devices. + if m.allocatedDevices[resource] == nil { + m.allocatedDevices[resource] = sets.NewString() + } + // Gets Devices in use. + devicesInUse := m.allocatedDevices[resource] + // Gets a list of available devices. + available := m.allDevices[resource].Difference(devicesInUse) + if int(available.Len()) < needed { + return nil, fmt.Errorf("requested number of devices unavailable for %s. Requested: %d, Available: %d", resource, needed, available.Len()) + } + allocated := available.UnsortedList()[:needed] + // Updates m.allocatedDevices with allocated devices to prevent them + // from being allocated to other pods/containers, given that we are + // not holding lock during the rpc call. + for _, device := range allocated { + m.allocatedDevices[resource].Insert(device) + devices.Insert(device) + } + return devices, nil +} + +// allocateContainerResources attempts to allocate all of required device +// plugin resources for the input container, issues an Allocate rpc request +// for each new device resource requirement, processes their AllocateResponses, +// and updates the cached containerDevices on success. +func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Container, devicesToReuse map[string]sets.String) error { + podUID := string(pod.UID) + contName := container.Name + allocatedDevicesUpdated := false + for k, v := range container.Resources.Limits { + resource := string(k) + needed := int(v.Value()) + glog.V(3).Infof("needs %d %s", needed, resource) + _, registeredResource := m.allDevices[resource] + _, allocatedResource := m.allocatedDevices[resource] + // Continues if this is neither an active device plugin resource nor + // a resource we have previously allocated. + if !registeredResource && !allocatedResource { + continue + } + // Updates allocatedDevices to garbage collect any stranded resources + // before doing the device plugin allocation. + if !allocatedDevicesUpdated { + m.updateAllocatedDevices(m.activePods()) + allocatedDevicesUpdated = true + } + allocDevices, err := m.devicesToAllocate(podUID, contName, resource, needed, devicesToReuse[resource]) + if err != nil { + return err + } + if allocDevices == nil || len(allocDevices) <= 0 { + continue + } + startRPCTime := time.Now() + // devicePluginManager.Allocate involves RPC calls to device plugin, which + // could be heavy-weight. Therefore we want to perform this operation outside + // mutex lock. Note if Allocate call fails, we may leave container resources + // partially allocated for the failed container. We rely on updateAllocatedDevices() + // to garbage collect these resources later. Another side effect is that if + // we have X resource A and Y resource B in total, and two containers, container1 + // and container2 both require X resource A and Y resource B. Both allocation + // requests may fail if we serve them in mixed order. + // TODO: may revisit this part later if we see inefficient resource allocation + // in real use as the result of this. Should also consider to parallize device + // plugin Allocate grpc calls if it becomes common that a container may require + // resources from multiple device plugins. + m.mutex.Lock() + e, ok := m.endpoints[resource] + m.mutex.Unlock() + if !ok { + m.mutex.Lock() + m.allocatedDevices = m.podDevices.devices() + m.mutex.Unlock() + return fmt.Errorf("Unknown Device Plugin %s", resource) + } + + devs := allocDevices.UnsortedList() + glog.V(3).Infof("Making allocation request for devices %v for device plugin %s", devs, resource) + resp, err := e.allocate(devs) + metrics.DevicePluginAllocationLatency.WithLabelValues(resource).Observe(metrics.SinceInMicroseconds(startRPCTime)) + if err != nil { + // In case of allocation failure, we want to restore m.allocatedDevices + // to the actual allocated state from m.podDevices. + m.mutex.Lock() + m.allocatedDevices = m.podDevices.devices() + m.mutex.Unlock() + return err + } + + // Update internal cached podDevices state. + m.mutex.Lock() + m.podDevices.insert(podUID, contName, resource, allocDevices, resp) + m.mutex.Unlock() + } + + // Checkpoints device to container allocation information. + return m.writeCheckpoint() +} + +// GetDeviceRunContainerOptions checks whether we have cached containerDevices +// for the passed-in and returns its DeviceRunContainerOptions +// for the found one. An empty struct is returned in case no cached state is found. +func (m *ManagerImpl) GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Container) *DeviceRunContainerOptions { + m.mutex.Lock() + defer m.mutex.Unlock() + return m.podDevices.deviceRunContainerOptions(string(pod.UID), container.Name) +} + +// sanitizeNodeAllocatable scans through allocatedDevices in the device manager +// and if necessary, updates allocatableResource in nodeInfo to at least equal to +// the allocated capacity. This allows pods that have already been scheduled on +// the node to pass GeneralPredicates admission checking even upon device plugin failure. +func (m *ManagerImpl) sanitizeNodeAllocatable(node *schedulercache.NodeInfo) { + var newAllocatableResource *schedulercache.Resource + allocatableResource := node.AllocatableResource() + if allocatableResource.ScalarResources == nil { + allocatableResource.ScalarResources = make(map[v1.ResourceName]int64) + } + for resource, devices := range m.allocatedDevices { + needed := devices.Len() + quant, ok := allocatableResource.ScalarResources[v1.ResourceName(resource)] + if ok && int(quant) >= needed { + continue + } + // Needs to update nodeInfo.AllocatableResource to make sure + // NodeInfo.allocatableResource at least equal to the capacity already allocated. + if newAllocatableResource == nil { + newAllocatableResource = allocatableResource.Clone() + } + newAllocatableResource.ScalarResources[v1.ResourceName(resource)] = int64(needed) + } + if newAllocatableResource != nil { + node.SetAllocatableResource(newAllocatableResource) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/manager_stub.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/manager_stub.go new file mode 100644 index 000000000000..903a0077a2c3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/manager_stub.go @@ -0,0 +1,63 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deviceplugin + +import ( + "k8s.io/api/core/v1" + pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha" + "k8s.io/kubernetes/pkg/kubelet/config" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" + "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" +) + +// ManagerStub provides a simple stub implementation for the Device Manager. +type ManagerStub struct{} + +// NewManagerStub creates a ManagerStub. +func NewManagerStub() (*ManagerStub, error) { + return &ManagerStub{}, nil +} + +// Start simply returns nil. +func (h *ManagerStub) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady) error { + return nil +} + +// Stop simply returns nil. +func (h *ManagerStub) Stop() error { + return nil +} + +// Devices returns an empty map. +func (h *ManagerStub) Devices() map[string][]pluginapi.Device { + return make(map[string][]pluginapi.Device) +} + +// Allocate simply returns nil. +func (h *ManagerStub) Allocate(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { + return nil +} + +// GetDeviceRunContainerOptions simply returns nil. +func (h *ManagerStub) GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Container) *DeviceRunContainerOptions { + return nil +} + +// GetCapacity simply returns nil capacity and empty removed resource list. +func (h *ManagerStub) GetCapacity() (v1.ResourceList, []string) { + return nil, []string{} +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/pod_devices.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/pod_devices.go new file mode 100644 index 000000000000..311c8d0c60fe --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/pod_devices.go @@ -0,0 +1,257 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deviceplugin + +import ( + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/util/sets" + pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" +) + +type deviceAllocateInfo struct { + // deviceIds contains device Ids allocated to this container for the given resourceName. + deviceIds sets.String + // allocResp contains cached rpc AllocateResponse. + allocResp *pluginapi.AllocateResponse +} + +type resourceAllocateInfo map[string]deviceAllocateInfo // Keyed by resourceName. +type containerDevices map[string]resourceAllocateInfo // Keyed by containerName. +type podDevices map[string]containerDevices // Keyed by podUID. + +func (pdev podDevices) pods() sets.String { + ret := sets.NewString() + for k := range pdev { + ret.Insert(k) + } + return ret +} + +func (pdev podDevices) insert(podUID, contName, resource string, devices sets.String, resp *pluginapi.AllocateResponse) { + if _, podExists := pdev[podUID]; !podExists { + pdev[podUID] = make(containerDevices) + } + if _, contExists := pdev[podUID][contName]; !contExists { + pdev[podUID][contName] = make(resourceAllocateInfo) + } + pdev[podUID][contName][resource] = deviceAllocateInfo{ + deviceIds: devices, + allocResp: resp, + } +} + +func (pdev podDevices) delete(pods []string) { + for _, uid := range pods { + delete(pdev, uid) + } +} + +// Returns list of device Ids allocated to the given container for the given resource. +// Returns nil if we don't have cached state for the given . +func (pdev podDevices) containerDevices(podUID, contName, resource string) sets.String { + if _, podExists := pdev[podUID]; !podExists { + return nil + } + if _, contExists := pdev[podUID][contName]; !contExists { + return nil + } + devs, resourceExists := pdev[podUID][contName][resource] + if !resourceExists { + return nil + } + return devs.deviceIds +} + +// Populates allocatedResources with the device resources allocated to the specified . +func (pdev podDevices) addContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.String) { + containers, exists := pdev[podUID] + if !exists { + return + } + resources, exists := containers[contName] + if !exists { + return + } + for resource, devices := range resources { + allocatedResources[resource] = allocatedResources[resource].Union(devices.deviceIds) + } +} + +// Removes the device resources allocated to the specified from allocatedResources. +func (pdev podDevices) removeContainerAllocatedResources(podUID, contName string, allocatedResources map[string]sets.String) { + containers, exists := pdev[podUID] + if !exists { + return + } + resources, exists := containers[contName] + if !exists { + return + } + for resource, devices := range resources { + allocatedResources[resource] = allocatedResources[resource].Difference(devices.deviceIds) + } +} + +// Returns all of devices allocated to the pods being tracked, keyed by resourceName. +func (pdev podDevices) devices() map[string]sets.String { + ret := make(map[string]sets.String) + for _, containerDevices := range pdev { + for _, resources := range containerDevices { + for resource, devices := range resources { + if _, exists := ret[resource]; !exists { + ret[resource] = sets.NewString() + } + ret[resource] = ret[resource].Union(devices.deviceIds) + } + } + } + return ret +} + +// podDevicesCheckpointEntry is used to record to device allocation information. +type podDevicesCheckpointEntry struct { + PodUID string + ContainerName string + ResourceName string + DeviceIDs []string + AllocResp []byte +} + +// Turns podDevices to checkpointData. +func (pdev podDevices) toCheckpointData() []podDevicesCheckpointEntry { + var data []podDevicesCheckpointEntry + for podUID, containerDevices := range pdev { + for conName, resources := range containerDevices { + for resource, devices := range resources { + devIds := devices.deviceIds.UnsortedList() + if devices.allocResp == nil { + glog.Errorf("Can't marshal allocResp for %v %v %v: allocation response is missing", podUID, conName, resource) + continue + } + + allocResp, err := devices.allocResp.Marshal() + if err != nil { + glog.Errorf("Can't marshal allocResp for %v %v %v: %v", podUID, conName, resource, err) + continue + } + data = append(data, podDevicesCheckpointEntry{podUID, conName, resource, devIds, allocResp}) + } + } + } + return data +} + +// Populates podDevices from the passed in checkpointData. +func (pdev podDevices) fromCheckpointData(data []podDevicesCheckpointEntry) { + for _, entry := range data { + glog.V(2).Infof("Get checkpoint entry: %v %v %v %v %v\n", + entry.PodUID, entry.ContainerName, entry.ResourceName, entry.DeviceIDs, entry.AllocResp) + devIDs := sets.NewString() + for _, devID := range entry.DeviceIDs { + devIDs.Insert(devID) + } + allocResp := &pluginapi.AllocateResponse{} + err := allocResp.Unmarshal(entry.AllocResp) + if err != nil { + glog.Errorf("Can't unmarshal allocResp for %v %v %v: %v", entry.PodUID, entry.ContainerName, entry.ResourceName, err) + continue + } + pdev.insert(entry.PodUID, entry.ContainerName, entry.ResourceName, devIDs, allocResp) + } +} + +// Returns combined container runtime settings to consume the container's allocated devices. +func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *DeviceRunContainerOptions { + containers, exists := pdev[podUID] + if !exists { + return nil + } + resources, exists := containers[contName] + if !exists { + return nil + } + opts := &DeviceRunContainerOptions{} + // Maps to detect duplicate settings. + devsMap := make(map[string]string) + mountsMap := make(map[string]string) + envsMap := make(map[string]string) + // Loops through AllocationResponses of all cached device resources. + for _, devices := range resources { + resp := devices.allocResp + // Each Allocate response has the following artifacts. + // Environment variables + // Mount points + // Device files + // These artifacts are per resource per container. + // Updates RunContainerOptions.Envs. + for k, v := range resp.Envs { + if e, ok := envsMap[k]; ok { + glog.V(3).Infof("skip existing env %s %s", k, v) + if e != v { + glog.Errorf("Environment variable %s has conflicting setting: %s and %s", k, e, v) + } + continue + } + glog.V(4).Infof("add env %s %s", k, v) + envsMap[k] = v + opts.Envs = append(opts.Envs, kubecontainer.EnvVar{Name: k, Value: v}) + } + + // Updates RunContainerOptions.Devices. + for _, dev := range resp.Devices { + if d, ok := devsMap[dev.ContainerPath]; ok { + glog.V(3).Infof("skip existing device %s %s", dev.ContainerPath, dev.HostPath) + if d != dev.HostPath { + glog.Errorf("Container device %s has conflicting mapping host devices: %s and %s", + dev.ContainerPath, d, dev.HostPath) + } + continue + } + glog.V(4).Infof("add device %s %s", dev.ContainerPath, dev.HostPath) + devsMap[dev.ContainerPath] = dev.HostPath + opts.Devices = append(opts.Devices, kubecontainer.DeviceInfo{ + PathOnHost: dev.HostPath, + PathInContainer: dev.ContainerPath, + Permissions: dev.Permissions, + }) + } + // Updates RunContainerOptions.Mounts. + for _, mount := range resp.Mounts { + if m, ok := mountsMap[mount.ContainerPath]; ok { + glog.V(3).Infof("skip existing mount %s %s", mount.ContainerPath, mount.HostPath) + if m != mount.HostPath { + glog.Errorf("Container mount %s has conflicting mapping host mounts: %s and %s", + mount.ContainerPath, m, mount.HostPath) + } + continue + } + glog.V(4).Infof("add mount %s %s", mount.ContainerPath, mount.HostPath) + mountsMap[mount.ContainerPath] = mount.HostPath + opts.Mounts = append(opts.Mounts, kubecontainer.Mount{ + Name: mount.ContainerPath, + ContainerPath: mount.ContainerPath, + HostPath: mount.HostPath, + ReadOnly: mount.ReadOnly, + // TODO: This may need to be part of Device plugin API. + SELinuxRelabel: false, + }) + } + } + return opts +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/types.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/types.go new file mode 100644 index 000000000000..c4465a8be4cb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin/types.go @@ -0,0 +1,96 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deviceplugin + +import ( + "k8s.io/api/core/v1" + pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha" + "k8s.io/kubernetes/pkg/kubelet/config" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" + "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" +) + +// Manager manages all the Device Plugins running on a node. +type Manager interface { + // Start starts device plugin registration service. + Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady) error + + // Devices is the map of devices that have registered themselves + // against the manager. + // The map key is the ResourceName of the device plugins. + Devices() map[string][]pluginapi.Device + + // Allocate configures and assigns devices to pods. The pods are provided + // through the pod admission attributes in the attrs argument. From the + // requested device resources, Allocate will communicate with the owning + // device plugin to allow setup procedures to take place, and for the + // device plugin to provide runtime settings to use the device (environment + // variables, mount points and device files). The node object is provided + // for the device manager to update the node capacity to reflect the + // currently available devices. + Allocate(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error + + // Stop stops the manager. + Stop() error + + // GetDeviceRunContainerOptions checks whether we have cached containerDevices + // for the passed-in and returns its DeviceRunContainerOptions + // for the found one. An empty struct is returned in case no cached state is found. + GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Container) *DeviceRunContainerOptions + + // GetCapacity returns the amount of available device plugin resource capacity + // and inactive device plugin resources previously registered on the node. + GetCapacity() (v1.ResourceList, []string) +} + +// DeviceRunContainerOptions contains the combined container runtime settings to consume its allocated devices. +type DeviceRunContainerOptions struct { + // The environment variables list. + Envs []kubecontainer.EnvVar + // The mounts for the container. + Mounts []kubecontainer.Mount + // The host devices mapped into the container. + Devices []kubecontainer.DeviceInfo +} + +// TODO: evaluate whether we need these error definitions. +const ( + // errFailedToDialDevicePlugin is the error raised when the device plugin could not be + // reached on the registered socket + errFailedToDialDevicePlugin = "failed to dial device plugin:" + // errUnsuportedVersion is the error raised when the device plugin uses an API version not + // supported by the Kubelet registry + errUnsuportedVersion = "requested API version %q is not supported by kubelet. Supported version is %q" + // errDevicePluginAlreadyExists is the error raised when a device plugin with the + // same Resource Name tries to register itself + errDevicePluginAlreadyExists = "another device plugin already registered this Resource Name" + // errInvalidResourceName is the error raised when a device plugin is registering + // itself with an invalid ResourceName + errInvalidResourceName = "the ResourceName %q is invalid" + // errEmptyResourceName is the error raised when the resource name field is empty + errEmptyResourceName = "invalid Empty ResourceName" + + // errBadSocket is the error raised when the registry socket path is not absolute + errBadSocket = "bad socketPath, must be an absolute path:" + // errRemoveSocket is the error raised when the registry could not remove the existing socket + errRemoveSocket = "failed to remove socket while starting device plugin registry, with error" + // errListenSocket is the error raised when the registry could not listen on the socket + errListenSocket = "failed to listen to socket while starting device plugin registry, with error" + // errListAndWatch is the error raised when ListAndWatch ended unsuccessfully + errListAndWatch = "listAndWatch ended unexpectedly for device plugin %s with error %v" +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go index 6525bb98439f..935fb6c8060e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go @@ -26,9 +26,10 @@ import ( libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups" "k8s.io/api/core/v1" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" - v1qos "k8s.io/kubernetes/pkg/api/v1/helper/qos" + "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/api/v1/resource" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" + v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" ) const ( @@ -222,3 +223,8 @@ func getCgroupProcs(dir string) ([]int, error) { } return out, nil } + +// GetPodCgroupNameSuffix returns the last element of the pod CgroupName identifier +func GetPodCgroupNameSuffix(podUID types.UID) string { + return podCgroupNamePrefix + string(podUID) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_unsupported.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_unsupported.go index 3b88c7360c60..b572f3456f3f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_unsupported.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_unsupported.go @@ -18,7 +18,10 @@ limitations under the License. package cm -import "k8s.io/api/core/v1" +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) const ( MinShares = 0 @@ -52,3 +55,8 @@ func GetCgroupSubsystems() (*CgroupSubsystems, error) { func getCgroupProcs(dir string) ([]int, error) { return nil, nil } + +// GetPodCgroupNameSuffix returns the last element of the pod CgroupName identifier +func GetPodCgroupNameSuffix(podUID types.UID) string { + return "" +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager.go index c9cb48ce5592..66e0d82467e0 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager.go @@ -29,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/api" kubefeatures "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/events" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" @@ -55,7 +54,7 @@ func (cm *containerManagerImpl) createNodeAllocatableCgroups() error { return nil } -// Enforce Node Allocatable Cgroup settings. +// enforceNodeAllocatableCgroups enforce Node Allocatable Cgroup settings. func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { nc := cm.NodeConfig.NodeAllocatableConfig @@ -139,7 +138,7 @@ func enforceExistingCgroup(cgroupManager CgroupManager, cName string, rl v1.Reso return nil } -// Returns a ResourceConfig object that can be used to create or update cgroups via CgroupManager interface. +// getCgroupConfig returns a ResourceConfig object that can be used to create or update cgroups via CgroupManager interface. func getCgroupConfig(rl v1.ResourceList) *ResourceConfig { // TODO(vishh): Set CPU Quota if necessary. if rl == nil { @@ -186,7 +185,7 @@ func (cm *containerManagerImpl) getNodeAllocatableAbsolute() v1.ResourceList { } -// GetNodeAllocatable returns amount of compute or storage resource that have to be reserved on this node from scheduling. +// GetNodeAllocatableReservation returns amount of compute or storage resource that have to be reserved on this node from scheduling. func (cm *containerManagerImpl) GetNodeAllocatableReservation() v1.ResourceList { evictionReservation := hardEvictionReservation(cm.HardEvictionThresholds, cm.capacity) result := make(v1.ResourceList) @@ -238,16 +237,7 @@ func (cm *containerManagerImpl) validateNodeAllocatable() error { var errors []string nar := cm.GetNodeAllocatableReservation() for k, v := range nar { - capacityClone, err := api.Scheme.DeepCopy(cm.capacity[k]) - if err != nil { - errors = append(errors, fmt.Sprintf("DeepCopy capacity error")) - } - value, ok := capacityClone.(resource.Quantity) - if !ok { - return fmt.Errorf( - "failed to cast object %#v to Quantity", - capacityClone) - } + value := cm.capacity[k].DeepCopy() value.Sub(v) if value.Sign() < 0 { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go index 7180a73c49c5..e62d192891d4 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go @@ -27,7 +27,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" - v1qos "k8s.io/kubernetes/pkg/api/v1/helper/qos" + v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" ) const ( @@ -104,7 +104,7 @@ func (m *podContainerManagerImpl) GetPodContainerName(pod *v1.Pod) (CgroupName, case v1.PodQOSBestEffort: parentContainer = m.qosContainersInfo.BestEffort } - podContainer := podCgroupNamePrefix + string(pod.UID) + podContainer := GetPodCgroupNameSuffix(pod.UID) // Get the absolute path of the cgroup cgroupName := (CgroupName)(path.Join(parentContainer, podContainer)) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/qos_container_manager_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/qos_container_manager_linux.go index 8cfea46ce6f3..0cd8559998ee 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/qos_container_manager_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/qos_container_manager_linux.go @@ -31,8 +31,8 @@ import ( cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs" "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" - v1qos "k8s.io/kubernetes/pkg/api/v1/helper/qos" "k8s.io/kubernetes/pkg/api/v1/resource" + v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" kubefeatures "k8s.io/kubernetes/pkg/features" ) @@ -56,21 +56,21 @@ type qosContainerManagerImpl struct { cgroupManager CgroupManager activePods ActivePodsFunc getNodeAllocatable func() v1.ResourceList - cgroupRoot string + cgroupRoot CgroupName qosReserved map[v1.ResourceName]int64 } -func NewQOSContainerManager(subsystems *CgroupSubsystems, cgroupRoot string, nodeConfig NodeConfig) (QOSContainerManager, error) { +func NewQOSContainerManager(subsystems *CgroupSubsystems, cgroupRoot string, nodeConfig NodeConfig, cgroupManager CgroupManager) (QOSContainerManager, error) { if !nodeConfig.CgroupsPerQOS { return &qosContainerManagerNoop{ - cgroupRoot: CgroupName(nodeConfig.CgroupRoot), + cgroupRoot: CgroupName(cgroupRoot), }, nil } return &qosContainerManagerImpl{ subsystems: subsystems, - cgroupManager: NewCgroupManager(subsystems, nodeConfig.CgroupDriver), - cgroupRoot: cgroupRoot, + cgroupManager: cgroupManager, + cgroupRoot: CgroupName(cgroupRoot), qosReserved: nodeConfig.ExperimentalQOSReserved, }, nil } @@ -81,7 +81,7 @@ func (m *qosContainerManagerImpl) GetQOSContainersInfo() QOSContainersInfo { func (m *qosContainerManagerImpl) Start(getNodeAllocatable func() v1.ResourceList, activePods ActivePodsFunc) error { cm := m.cgroupManager - rootContainer := m.cgroupRoot + rootContainer := string(m.cgroupRoot) if !cm.Exists(CgroupName(rootContainer)) { return fmt.Errorf("root container %s doesn't exist", rootContainer) } @@ -194,9 +194,6 @@ func (m *qosContainerManagerImpl) setCPUCgroupConfig(configs map[v1.PodQOSClass] // set burstable shares based on current observe state burstableCPUShares := MilliCPUToShares(burstablePodCPURequest) - if burstableCPUShares < uint64(MinShares) { - burstableCPUShares = uint64(MinShares) - } configs[v1.PodQOSBurstable].ResourceParameters.CpuShares = &burstableCPUShares return nil } @@ -317,7 +314,7 @@ func (m *qosContainerManagerImpl) UpdateCgroups() error { } } if updateSuccess { - glog.V(2).Infof("[ContainerManager]: Updated QoS cgroup configuration") + glog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration") return nil } @@ -334,12 +331,12 @@ func (m *qosContainerManagerImpl) UpdateCgroups() error { for _, config := range qosConfigs { err := m.cgroupManager.Update(config) if err != nil { - glog.V(2).Infof("[ContainerManager]: Failed to update QoS cgroup configuration") + glog.Errorf("[ContainerManager]: Failed to update QoS cgroup configuration") return err } } - glog.V(2).Infof("[ContainerManager]: Updated QoS cgroup configuration on retry") + glog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration on retry") return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/util/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/util/BUILD index 66030b3f25c8..9de2c5ba0116 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/util/BUILD @@ -15,6 +15,7 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/kubelet/cm/util", deps = select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/config/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/config/BUILD index defc45f72d32..f5e082e7c256 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/config/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/config/BUILD @@ -1,10 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -12,9 +6,11 @@ go_library( "apiserver.go", "common.go", "config.go", + "defaults.go", "doc.go", "file.go", "file_unsupported.go", + "flags.go", "http.go", "sources.go", ] + select({ @@ -23,12 +19,16 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/kubelet/config", + visibility = ["//visibility:public"], deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", - "//pkg/api/install:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", + "//pkg/apis/core/install:go_default_library", + "//pkg/apis/core/v1:go_default_library", + "//pkg/apis/core/validation:go_default_library", + "//pkg/kubelet/checkpoint:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/events:go_default_library", "//pkg/kubelet/types:go_default_library", @@ -36,13 +36,13 @@ go_library( "//pkg/util/config:go_default_library", "//pkg/util/hash:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", @@ -69,12 +69,14 @@ go_test( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/kubelet/config", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/v1:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/securitycontext:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -106,4 +108,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/config/apiserver.go b/vendor/k8s.io/kubernetes/pkg/kubelet/config/apiserver.go index 016c33bf7201..a8afc6e345c6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/config/apiserver.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/config/apiserver.go @@ -25,13 +25,13 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) // NewSourceApiserver creates a config source that watches and pulls from the apiserver. func NewSourceApiserver(c clientset.Interface, nodeName types.NodeName, updates chan<- interface{}) { - lw := cache.NewListWatchFromClient(c.Core().RESTClient(), "pods", metav1.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, string(nodeName))) + lw := cache.NewListWatchFromClient(c.CoreV1().RESTClient(), "pods", metav1.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, string(nodeName))) newSourceApiserverFromLW(lw, updates) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/config/common.go b/vendor/k8s.io/kubernetes/pkg/kubelet/config/common.go index 8203efa57ced..02a4f476126e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/config/common.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/config/common.go @@ -27,14 +27,15 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" utilyaml "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" // TODO: remove this import if // api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String() is changed // to "v1"? - _ "k8s.io/kubernetes/pkg/api/install" - k8s_api_v1 "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/api/legacyscheme" + _ "k8s.io/kubernetes/pkg/apis/core/install" + k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" + "k8s.io/kubernetes/pkg/apis/core/validation" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/util/hash" @@ -98,7 +99,7 @@ func getSelfLink(name, namespace string) string { if len(namespace) == 0 { namespace = metav1.NamespaceDefault } - selfLink = fmt.Sprintf("/api/"+api.Registry.GroupOrDie(api.GroupName).GroupVersion.Version+"/namespaces/%s/pods/%s", namespace, name) + selfLink = fmt.Sprintf("/api/"+legacyscheme.Registry.GroupOrDie(api.GroupName).GroupVersion.Version+"/namespaces/%s/pods/%s", namespace, name) return selfLink } @@ -110,7 +111,7 @@ func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod *v if err != nil { return false, nil, err } - obj, err := runtime.Decode(api.Codecs.UniversalDecoder(), json) + obj, err := runtime.Decode(legacyscheme.Codecs.UniversalDecoder(), json) if err != nil { return false, pod, err } @@ -118,8 +119,7 @@ func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod *v newPod, ok := obj.(*api.Pod) // Check whether the object could be converted to single pod. if !ok { - err = fmt.Errorf("invalid pod: %#v", obj) - return false, pod, err + return false, pod, fmt.Errorf("invalid pod: %#v", obj) } // Apply default values and validate the pod. @@ -127,18 +127,18 @@ func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod *v return true, pod, err } if errs := validation.ValidatePod(newPod); len(errs) > 0 { - err = fmt.Errorf("invalid pod: %v", errs) - return true, pod, err + return true, pod, fmt.Errorf("invalid pod: %v", errs) } v1Pod := &v1.Pod{} - if err := k8s_api_v1.Convert_api_Pod_To_v1_Pod(newPod, v1Pod, nil); err != nil { + if err := k8s_api_v1.Convert_core_Pod_To_v1_Pod(newPod, v1Pod, nil); err != nil { + glog.Errorf("Pod %q failed to convert to v1", newPod.Name) return true, nil, err } return true, v1Pod, nil } func tryDecodePodList(data []byte, defaultFn defaultFunc) (parsed bool, pods v1.PodList, err error) { - obj, err := runtime.Decode(api.Codecs.UniversalDecoder(), data) + obj, err := runtime.Decode(legacyscheme.Codecs.UniversalDecoder(), data) if err != nil { return false, pods, err } @@ -162,7 +162,7 @@ func tryDecodePodList(data []byte, defaultFn defaultFunc) (parsed bool, pods v1. } } v1Pods := &v1.PodList{} - if err := k8s_api_v1.Convert_api_PodList_To_v1_PodList(newPods, v1Pods, nil); err != nil { + if err := k8s_api_v1.Convert_core_PodList_To_v1_PodList(newPods, v1Pods, nil); err != nil { return true, pods, err } return true, *v1Pods, err diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/config/config.go b/vendor/k8s.io/kubernetes/pkg/kubelet/config/config.go index 0ecd6bbf5d2a..d131c6ce3950 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/config/config.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/config/config.go @@ -25,11 +25,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/client-go/tools/record" - "k8s.io/kubernetes/pkg/api" - k8s_api_v1 "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/kubelet/checkpoint" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/events" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" @@ -65,8 +62,9 @@ type PodConfig struct { updates chan kubetypes.PodUpdate // contains the list of all configured sources - sourcesLock sync.Mutex - sources sets.String + sourcesLock sync.Mutex + sources sets.String + checkpointManager checkpoint.Manager } // NewPodConfig creates an object that can merge many configuration sources into a stream @@ -112,6 +110,19 @@ func (c *PodConfig) Sync() { c.pods.Sync() } +// Restore restores pods from the checkpoint path, *once* +func (c *PodConfig) Restore(path string, updates chan<- interface{}) error { + var err error + if c.checkpointManager == nil { + c.checkpointManager = checkpoint.NewCheckpointManager(path) + pods, err := c.checkpointManager.LoadPods() + if err == nil { + updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.RESTORE, Source: kubetypes.ApiserverSource} + } + } + return err +} + // podStorage manages the current pod state at any point in time and ensures updates // to the channel are delivered in order. Note that this object is an in-memory source of // "truth" and on creation contains zero entries. Once all previously read sources are @@ -156,7 +167,7 @@ func (s *podStorage) Merge(source string, change interface{}) error { defer s.updateLock.Unlock() seenBefore := s.sourcesSeen.Has(source) - adds, updates, deletes, removes, reconciles := s.merge(source, change) + adds, updates, deletes, removes, reconciles, restores := s.merge(source, change) firstSet := !seenBefore && s.sourcesSeen.Has(source) // deliver update notifications @@ -174,6 +185,9 @@ func (s *podStorage) Merge(source string, change interface{}) error { if len(deletes.Pods) > 0 { s.updates <- *deletes } + if len(restores.Pods) > 0 { + s.updates <- *restores + } if firstSet && len(adds.Pods) == 0 && len(updates.Pods) == 0 && len(deletes.Pods) == 0 { // Send an empty update when first seeing the source and there are // no ADD or UPDATE or DELETE pods from the source. This signals kubelet that @@ -210,7 +224,7 @@ func (s *podStorage) Merge(source string, change interface{}) error { return nil } -func (s *podStorage) merge(source string, change interface{}) (adds, updates, deletes, removes, reconciles *kubetypes.PodUpdate) { +func (s *podStorage) merge(source string, change interface{}) (adds, updates, deletes, removes, reconciles, restores *kubetypes.PodUpdate) { s.podLock.Lock() defer s.podLock.Unlock() @@ -219,6 +233,7 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de deletePods := []*v1.Pod{} removePods := []*v1.Pod{} reconcilePods := []*v1.Pod{} + restorePods := []*v1.Pod{} pods := s.pods[source] if pods == nil { @@ -291,6 +306,8 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de removePods = append(removePods, existing) } } + case kubetypes.RESTORE: + glog.V(4).Infof("Restoring pods for source %s", source) default: glog.Warningf("Received invalid update type: %v", update) @@ -304,8 +321,9 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de deletes = &kubetypes.PodUpdate{Op: kubetypes.DELETE, Pods: copyPods(deletePods), Source: source} removes = &kubetypes.PodUpdate{Op: kubetypes.REMOVE, Pods: copyPods(removePods), Source: source} reconciles = &kubetypes.PodUpdate{Op: kubetypes.RECONCILE, Pods: copyPods(reconcilePods), Source: source} + restores = &kubetypes.PodUpdate{Op: kubetypes.RESTORE, Pods: copyPods(restorePods), Source: source} - return adds, updates, deletes, removes, reconciles + return adds, updates, deletes, removes, reconciles, restores } func (s *podStorage) markSourceSet(source string) { @@ -323,34 +341,17 @@ func (s *podStorage) seenSources(sources ...string) bool { func filterInvalidPods(pods []*v1.Pod, source string, recorder record.EventRecorder) (filtered []*v1.Pod) { names := sets.String{} for i, pod := range pods { - var errlist field.ErrorList - // TODO: remove the conversion when validation is performed on versioned objects. - internalPod := &api.Pod{} - if err := k8s_api_v1.Convert_v1_Pod_To_api_Pod(pod, internalPod, nil); err != nil { - glog.Warningf("Pod[%d] (%s) from %s failed to convert to v1, ignoring: %v", i+1, format.Pod(pod), source, err) - recorder.Eventf(pod, v1.EventTypeWarning, "FailedConversion", "Error converting pod %s from %s, ignoring: %v", format.Pod(pod), source, err) + // Pods from each source are assumed to have passed validation individually. + // This function only checks if there is any naming conflict. + name := kubecontainer.GetPodFullName(pod) + if names.Has(name) { + glog.Warningf("Pod[%d] (%s) from %s failed validation due to duplicate pod name %q, ignoring", i+1, format.Pod(pod), source, pod.Name) + recorder.Eventf(pod, v1.EventTypeWarning, events.FailedValidation, "Error validating pod %s from %s due to duplicate pod name %q, ignoring", format.Pod(pod), source, pod.Name) continue - } - if errs := validation.ValidatePod(internalPod); len(errs) != 0 { - errlist = append(errlist, errs...) - // If validation fails, don't trust it any further - - // even Name could be bad. } else { - name := kubecontainer.GetPodFullName(pod) - if names.Has(name) { - // TODO: when validation becomes versioned, this gets a bit - // more complicated. - errlist = append(errlist, field.Duplicate(field.NewPath("metadata", "name"), pod.Name)) - } else { - names.Insert(name) - } - } - if len(errlist) > 0 { - err := errlist.ToAggregate() - glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, format.Pod(pod), source, err) - recorder.Eventf(pod, v1.EventTypeWarning, events.FailedValidation, "Error validating pod %s from %s, ignoring: %v", format.Pod(pod), source, err) - continue + names.Insert(name) } + filtered = append(filtered, pod) } return @@ -491,12 +492,7 @@ func (s *podStorage) MergedState() interface{} { pods := make([]*v1.Pod, 0) for _, sourcePods := range s.pods { for _, podRef := range sourcePods { - pod, err := api.Scheme.Copy(podRef) - if err != nil { - glog.Errorf("unable to copy pod: %v", err) - continue - } - pods = append(pods, pod.(*v1.Pod)) + pods = append(pods, podRef.DeepCopy()) } } return pods @@ -506,12 +502,7 @@ func copyPods(sourcePods []*v1.Pod) []*v1.Pod { pods := []*v1.Pod{} for _, source := range sourcePods { // Use a deep copy here just in case - pod, err := api.Scheme.Copy(source) - if err != nil { - glog.Errorf("unable to copy pod: %v", err) - continue - } - pods = append(pods, pod.(*v1.Pod)) + pods = append(pods, source.DeepCopy()) } return pods } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go b/vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go new file mode 100644 index 000000000000..b90306968f1f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go @@ -0,0 +1,26 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +const ( + DefaultKubeletPodsDirName = "pods" + DefaultKubeletVolumesDirName = "volumes" + DefaultKubeletVolumeDevicesDirName = "volumeDevices" + DefaultKubeletPluginsDirName = "plugins" + DefaultKubeletContainersDirName = "containers" + DefaultKubeletPluginContainersDirName = "plugin-containers" +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/config/file.go b/vendor/k8s.io/kubernetes/pkg/kubelet/config/file.go index f825c2eb526c..a706abdb36e2 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/config/file.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/config/file.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/config/flags.go b/vendor/k8s.io/kubernetes/pkg/kubelet/config/flags.go new file mode 100644 index 000000000000..705b8babfd23 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/config/flags.go @@ -0,0 +1,109 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "github.com/spf13/pflag" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type ContainerRuntimeOptions struct { + + // General options. + + // ContainerRuntime is the container runtime to use. + ContainerRuntime string + // RuntimeCgroups that container runtime is expected to be isolated in. + RuntimeCgroups string + + // Docker-specific options. + + // DockershimRootDirectory is the path to the dockershim root directory. Defaults to + // /var/lib/dockershim if unset. Exposed for integration testing (e.g. in OpenShift). + DockershimRootDirectory string + // Enable dockershim only mode. + ExperimentalDockershim bool + // This flag, if set, disables use of a shared PID namespace for pods running in the docker CRI runtime. + // A shared PID namespace is the only option in non-docker runtimes and is required by the CRI. The ability to + // disable it for docker will be removed unless a compelling use case is discovered with widespread use. + // TODO: Remove once we no longer support disabling shared PID namespace (https://issues.k8s.io/41938) + DockerDisableSharedPID bool + // PodSandboxImage is the image whose network/ipc namespaces + // containers in each pod will use. + PodSandboxImage string + // DockerEndpoint is the path to the docker endpoint to communicate with. + DockerEndpoint string + // If no pulling progress is made before the deadline imagePullProgressDeadline, + // the image pulling will be cancelled. Defaults to 1m0s. + // +optional + ImagePullProgressDeadline metav1.Duration + + // Network plugin options. + + // networkPluginName is the name of the network plugin to be invoked for + // various events in kubelet/pod lifecycle + NetworkPluginName string + // NetworkPluginMTU is the MTU to be passed to the network plugin, + // and overrides the default MTU for cases where it cannot be automatically + // computed (such as IPSEC). + NetworkPluginMTU int32 + // CNIConfDir is the full path of the directory in which to search for + // CNI config files + CNIConfDir string + // CNIBinDir is the full path of the directory in which to search for + // CNI plugin binaries + CNIBinDir string + + // rkt-specific options. + + // rktPath is the path of rkt binary. Leave empty to use the first rkt in $PATH. + RktPath string + // rktApiEndpoint is the endpoint of the rkt API service to communicate with. + RktAPIEndpoint string + // rktStage1Image is the image to use as stage1. Local paths and + // http/https URLs are supported. + RktStage1Image string +} + +func (s *ContainerRuntimeOptions) AddFlags(fs *pflag.FlagSet) { + // General settings. + fs.StringVar(&s.ContainerRuntime, "container-runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'.") + fs.StringVar(&s.RuntimeCgroups, "runtime-cgroups", s.RuntimeCgroups, "Optional absolute name of cgroups to create and run the runtime in.") + + // Docker-specific settings. + fs.BoolVar(&s.ExperimentalDockershim, "experimental-dockershim", s.ExperimentalDockershim, "Enable dockershim only mode. In this mode, kubelet will only start dockershim without any other functionalities. This flag only serves test purpose, please do not use it unless you are conscious of what you are doing. [default=false]") + fs.MarkHidden("experimental-dockershim") + fs.StringVar(&s.DockershimRootDirectory, "experimental-dockershim-root-directory", s.DockershimRootDirectory, "Path to the dockershim root directory.") + fs.MarkHidden("experimental-dockershim-root-directory") + fs.BoolVar(&s.DockerDisableSharedPID, "docker-disable-shared-pid", s.DockerDisableSharedPID, "The Container Runtime Interface (CRI) defaults to using a shared PID namespace for containers in a pod when running with Docker 1.13.1 or higher. Setting this flag reverts to the previous behavior of isolated PID namespaces. This ability will be removed in a future Kubernetes release.") + fs.StringVar(&s.PodSandboxImage, "pod-infra-container-image", s.PodSandboxImage, "The image whose network/ipc namespaces containers in each pod will use.") + fs.StringVar(&s.DockerEndpoint, "docker-endpoint", s.DockerEndpoint, "Use this for the docker endpoint to communicate with") + fs.DurationVar(&s.ImagePullProgressDeadline.Duration, "image-pull-progress-deadline", s.ImagePullProgressDeadline.Duration, "If no pulling progress is made before this deadline, the image pulling will be cancelled.") + + // Network plugin settings. Shared by both docker and rkt. + fs.StringVar(&s.NetworkPluginName, "network-plugin", s.NetworkPluginName, " The name of the network plugin to be invoked for various events in kubelet/pod lifecycle") + fs.StringVar(&s.CNIConfDir, "cni-conf-dir", s.CNIConfDir, " The full path of the directory in which to search for CNI config files. Default: /etc/cni/net.d") + fs.StringVar(&s.CNIBinDir, "cni-bin-dir", s.CNIBinDir, " The full path of the directory in which to search for CNI plugin binaries. Default: /opt/cni/bin") + fs.Int32Var(&s.NetworkPluginMTU, "network-plugin-mtu", s.NetworkPluginMTU, " The MTU to be passed to the network plugin, to override the default. Set to 0 to use the default 1460 MTU.") + + // Rkt-specific settings. + fs.StringVar(&s.RktPath, "rkt-path", s.RktPath, "Path of rkt binary. Leave empty to use the first rkt in $PATH. Only used if --container-runtime='rkt'.") + fs.StringVar(&s.RktAPIEndpoint, "rkt-api-endpoint", s.RktAPIEndpoint, "The endpoint of the rkt API service to communicate with. Only used if --container-runtime='rkt'.") + fs.StringVar(&s.RktStage1Image, "rkt-stage1-image", s.RktStage1Image, "image to use as stage1. Local paths and http/https URLs are supported. If empty, the 'stage1.aci' in the same directory as '--rkt-path' will be used.") + fs.MarkDeprecated("rkt-stage1-image", "Will be removed in a future version. The default stage1 image will be specified by the rkt configurations, see https://github.com/coreos/rkt/blob/master/Documentation/configuration.md for more details.") + +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/config/http.go b/vendor/k8s.io/kubernetes/pkg/kubelet/config/http.go index 077fdb4447f3..0ef43e062a54 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/config/http.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/config/http.go @@ -26,7 +26,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "github.com/golang/glog" diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/configmap/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/configmap/BUILD index c078a2b2f26f..27612f889f24 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/configmap/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/configmap/BUILD @@ -12,6 +12,7 @@ go_library( "configmap_manager.go", "fake_manager.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/configmap", deps = [ "//pkg/api/v1/pod:go_default_library", "//pkg/kubelet/util:go_default_library", @@ -41,6 +42,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["configmap_manager_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/configmap", library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/configmap/configmap_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/configmap/configmap_manager.go index d66f0bc2386f..71a7ae14854f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/configmap/configmap_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/configmap/configmap_manager.go @@ -64,7 +64,7 @@ func NewSimpleConfigMapManager(kubeClient clientset.Interface) Manager { } func (s *simpleConfigMapManager) GetConfigMap(namespace, name string) (*v1.ConfigMap, error) { - return s.kubeClient.Core().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) + return s.kubeClient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) } func (s *simpleConfigMapManager) RegisterPod(pod *v1.Pod) { @@ -216,7 +216,7 @@ func (s *configMapStore) Get(namespace, name string) (*v1.ConfigMap, error) { // etcd and apiserver (the cache is eventually consistent). util.FromApiserverCache(&opts) } - configMap, err := s.kubeClient.Core().ConfigMaps(namespace).Get(name, opts) + configMap, err := s.kubeClient.CoreV1().ConfigMaps(namespace).Get(name, opts) if err != nil && !apierrors.IsNotFound(err) && data.configMap == nil && data.err == nil { // Couldn't fetch the latest configmap, but there is no cached data to return. // Return the fetch result instead. @@ -282,6 +282,11 @@ func (c *cachingConfigMapManager) RegisterPod(pod *v1.Pod) { c.registeredPods[key] = pod if prev != nil { for name := range getConfigMapNames(prev) { + // On an update, the .Add() call above will have re-incremented the + // ref count of any existing items, so any configmaps that are in both + // names and prev need to have their ref counts decremented. Any that + // are only in prev need to be completely removed. This unconditional + // call takes care of both cases. c.configMapStore.Delete(prev.Namespace, name) } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/container/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/container/BUILD index 78c840846a9f..ac38ebd80049 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/container/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/container/BUILD @@ -21,9 +21,10 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/kubelet/container", visibility = ["//visibility:public"], deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", "//pkg/kubelet/util/format:go_default_library", "//pkg/kubelet/util/ioutils:go_default_library", @@ -57,10 +58,11 @@ go_test( "ref_test.go", "sync_result_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/container", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core/install:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -71,6 +73,7 @@ go_test( go_test( name = "go_default_xtest", srcs = ["runtime_cache_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/container_test", deps = [ ":go_default_library", "//pkg/kubelet/container/testing:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go b/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go index e242398ff5a3..32dbc745b304 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go @@ -46,9 +46,9 @@ type HandlerRunner interface { // RuntimeHelper wraps kubelet to make container runtime // able to get necessary informations like the RunContainerOptions, DNS settings, Host IP. type RuntimeHelper interface { - GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (contOpts *RunContainerOptions, useClusterFirstPolicy bool, err error) - GetClusterDNS(pod *v1.Pod) (dnsServers []string, dnsSearches []string, useClusterFirstPolicy bool, err error) - // GetPodCgroupParent returns the the CgroupName identifer, and its literal cgroupfs form on the host + GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (contOpts *RunContainerOptions, err error) + GetPodDNS(pod *v1.Pod) (dnsConfig *runtimeapi.DNSConfig, err error) + // GetPodCgroupParent returns the CgroupName identifer, and its literal cgroupfs form on the host // of a pod. GetPodCgroupParent(pod *v1.Pod) string GetPodDir(podUID types.UID) string diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/container/ref.go b/vendor/k8s.io/kubernetes/pkg/kubelet/container/ref.go index b15f7702ef4d..f61c0fc4a3b4 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/container/ref.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/container/ref.go @@ -21,7 +21,7 @@ import ( "k8s.io/api/core/v1" ref "k8s.io/client-go/tools/reference" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" ) var ImplicitContainerPrefix string = "implicitly required container " @@ -39,7 +39,7 @@ func GenerateContainerRef(pod *v1.Pod, container *v1.Container) (*v1.ObjectRefer // start (like the pod infra container). This is not a good way, ugh. fieldPath = ImplicitContainerPrefix + container.Name } - ref, err := ref.GetPartialReference(api.Scheme, pod, fieldPath) + ref, err := ref.GetPartialReference(legacyscheme.Scheme, pod, fieldPath) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go b/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go index 3e9644c24f4e..2f5f4e3d188a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go @@ -127,9 +127,8 @@ type Runtime interface { // DirectStreamingRuntime is the interface implemented by runtimes for which the streaming calls // (exec/attach/port-forward) should be served directly by the Kubelet. type DirectStreamingRuntime interface { - // Runs the command in the container of the specified pod using nsenter. - // Attaches the processes stdin, stdout, and stderr. Optionally uses a - // tty. + // Runs the command in the container of the specified pod. Attaches + // the processes stdin, stdout, and stderr. Optionally uses a tty. ExecInContainer(containerID ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error // Forward the specified port from the specified pod to the stream. PortForward(pod *Pod, port int32, stream io.ReadWriteCloser) error @@ -429,10 +428,6 @@ type RunContainerOptions struct { // this directory will be used to create and mount the log file to // container.TerminationMessagePath PodContainerDir string - // The list of DNS servers for the container to use. - DNS []string - // The list of DNS search domains. - DNSSearch []string // The parent cgroup to pass to Docker CgroupParent string // The type of container rootfs @@ -451,9 +446,14 @@ type RunContainerOptions struct { type VolumeInfo struct { // Mounter is the volume's mounter Mounter volume.Mounter + // BlockVolumeMapper is the Block volume's mapper + BlockVolumeMapper volume.BlockVolumeMapper // SELinuxLabeled indicates whether this volume has had the // pod's SELinux label applied to it or not SELinuxLabeled bool + // Whether the volume permission is set to read-only or not + // This value is passed from volume.spec + ReadOnly bool } type VolumeMap map[string]VolumeInfo diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/deviceplugin/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/deviceplugin/BUILD deleted file mode 100644 index db841b8a990c..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/deviceplugin/BUILD +++ /dev/null @@ -1,56 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = [ - "device_plugin_stub.go", - "endpoint.go", - "manager.go", - "types.go", - "utils.go", - ], - tags = ["automanaged"], - deps = [ - "//pkg/api/v1/helper:go_default_library", - "//pkg/kubelet/apis/deviceplugin/v1alpha1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = [ - "endpoint_test.go", - "manager_test.go", - "utils_test.go", - ], - library = ":go_default_library", - deps = [ - "//pkg/kubelet/apis/deviceplugin/v1alpha1:go_default_library", - "//vendor/github.com/stretchr/testify/require:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/deviceplugin/endpoint.go b/vendor/k8s.io/kubernetes/pkg/kubelet/deviceplugin/endpoint.go deleted file mode 100644 index f0523471a38d..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/deviceplugin/endpoint.go +++ /dev/null @@ -1,214 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package deviceplugin - -import ( - "fmt" - "net" - "sync" - "time" - - "github.com/golang/glog" - "golang.org/x/net/context" - "google.golang.org/grpc" - - pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1" -) - -// endpoint maps to a single registered device plugin. It is responsible -// for managing gRPC communications with the device plugin and caching -// device states reported by the device plugin. -type endpoint struct { - client pluginapi.DevicePluginClient - - socketPath string - resourceName string - - devices map[string]*pluginapi.Device - mutex sync.Mutex - - callback MonitorCallback - - cancel context.CancelFunc - ctx context.Context -} - -// newEndpoint creates a new endpoint for the given resourceName. -func newEndpoint(socketPath, resourceName string, callback MonitorCallback) (*endpoint, error) { - client, err := dial(socketPath) - if err != nil { - glog.Errorf("Can't create new endpoint with path %s err %v", socketPath, err) - return nil, err - } - - ctx, stop := context.WithCancel(context.Background()) - - return &endpoint{ - client: client, - - socketPath: socketPath, - resourceName: resourceName, - - devices: nil, - callback: callback, - - cancel: stop, - ctx: ctx, - }, nil -} - -func (e *endpoint) getDevices() []*pluginapi.Device { - e.mutex.Lock() - defer e.mutex.Unlock() - return copyDevices(e.devices) -} - -// list initializes ListAndWatch gRPC call for the device plugin and gets the -// initial list of the devices. Returns ListAndWatch gRPC stream on success. -func (e *endpoint) list() (pluginapi.DevicePlugin_ListAndWatchClient, error) { - glog.V(3).Infof("Starting List") - stream, err := e.client.ListAndWatch(e.ctx, &pluginapi.Empty{}) - if err != nil { - glog.Errorf(errListAndWatch, e.resourceName, err) - - return nil, err - } - - devs, err := stream.Recv() - if err != nil { - glog.Errorf(errListAndWatch, e.resourceName, err) - return nil, err - } - - devices := make(map[string]*pluginapi.Device) - var added, updated, deleted []*pluginapi.Device - for _, d := range devs.Devices { - devices[d.ID] = d - added = append(added, cloneDevice(d)) - } - - e.mutex.Lock() - e.devices = devices - e.mutex.Unlock() - - e.callback(e.resourceName, added, updated, deleted) - - return stream, nil -} - -// listAndWatch blocks on receiving ListAndWatch gRPC stream updates. Each ListAndWatch -// stream update contains a new list of device states. listAndWatch compares the new -// device states with its cached states to get list of new, updated, and deleted devices. -// It then issues a callback to pass this information to the device_plugin_handler which -// will adjust the resource available information accordingly. -func (e *endpoint) listAndWatch(stream pluginapi.DevicePlugin_ListAndWatchClient) { - glog.V(3).Infof("Starting ListAndWatch") - - devices := make(map[string]*pluginapi.Device) - - e.mutex.Lock() - for _, d := range e.devices { - devices[d.ID] = cloneDevice(d) - } - e.mutex.Unlock() - - for { - response, err := stream.Recv() - if err != nil { - glog.Errorf(errListAndWatch, e.resourceName, err) - return - } - - devs := response.Devices - glog.V(2).Infof("State pushed for device plugin %s", e.resourceName) - - newDevs := make(map[string]*pluginapi.Device) - var added, updated []*pluginapi.Device - - for _, d := range devs { - dOld, ok := devices[d.ID] - newDevs[d.ID] = d - - if !ok { - glog.V(2).Infof("New device for Endpoint %s: %v", e.resourceName, d) - - devices[d.ID] = d - added = append(added, cloneDevice(d)) - - continue - } - - if d.Health == dOld.Health { - continue - } - - if d.Health == pluginapi.Unhealthy { - glog.Errorf("Device %s is now Unhealthy", d.ID) - } else if d.Health == pluginapi.Healthy { - glog.V(2).Infof("Device %s is now Healthy", d.ID) - } - - devices[d.ID] = d - updated = append(updated, cloneDevice(d)) - } - - var deleted []*pluginapi.Device - for id, d := range devices { - if _, ok := newDevs[id]; ok { - continue - } - - glog.Errorf("Device %s was deleted", d.ID) - - deleted = append(deleted, cloneDevice(d)) - delete(devices, id) - } - - e.mutex.Lock() - e.devices = devices - e.mutex.Unlock() - - e.callback(e.resourceName, added, updated, deleted) - } - -} - -// allocate issues Allocate gRPC call to the device plugin. -func (e *endpoint) allocate(devs []string) (*pluginapi.AllocateResponse, error) { - return e.client.Allocate(context.Background(), &pluginapi.AllocateRequest{ - DevicesIDs: devs, - }) -} - -func (e *endpoint) stop() { - e.cancel() -} - -// dial establishes the gRPC communication with the registered device plugin. -func dial(unixSocketPath string) (pluginapi.DevicePluginClient, error) { - c, err := grpc.Dial(unixSocketPath, grpc.WithInsecure(), - grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { - return net.DialTimeout("unix", addr, timeout) - }), - ) - - if err != nil { - return nil, fmt.Errorf(errFailedToDialDevicePlugin+" %v", err) - } - - return pluginapi.NewDevicePluginClient(c), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/deviceplugin/manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/deviceplugin/manager.go deleted file mode 100644 index 5c05a9728943..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/deviceplugin/manager.go +++ /dev/null @@ -1,239 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package deviceplugin - -import ( - "fmt" - "net" - "os" - "path/filepath" - "sync" - - "github.com/golang/glog" - "golang.org/x/net/context" - "google.golang.org/grpc" - - pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1" -) - -// ManagerImpl is the structure in charge of managing Device Plugins. -type ManagerImpl struct { - socketname string - socketdir string - - endpoints map[string]*endpoint // Key is ResourceName - mutex sync.Mutex - - callback MonitorCallback - - server *grpc.Server -} - -// NewManagerImpl creates a new manager on the socket `socketPath`. -// f is the callback that is called when a device becomes unhealthy. -// socketPath is present for testing purposes in production this is pluginapi.KubeletSocket -func NewManagerImpl(socketPath string, f MonitorCallback) (*ManagerImpl, error) { - glog.V(2).Infof("Creating Device Plugin manager at %s", socketPath) - - if socketPath == "" || !filepath.IsAbs(socketPath) { - return nil, fmt.Errorf(errBadSocket+" %v", socketPath) - } - - dir, file := filepath.Split(socketPath) - return &ManagerImpl{ - endpoints: make(map[string]*endpoint), - - socketname: file, - socketdir: dir, - callback: f, - }, nil -} - -func (m *ManagerImpl) removeContents(dir string) error { - d, err := os.Open(dir) - if err != nil { - return err - } - defer d.Close() - names, err := d.Readdirnames(-1) - if err != nil { - return err - } - for _, name := range names { - filePath := filepath.Join(dir, name) - if filePath == m.CheckpointFile() { - continue - } - stat, err := os.Stat(filePath) - if err != nil { - glog.Errorf("Failed to stat file %v: %v", filePath, err) - continue - } - if stat.IsDir() { - continue - } - err = os.RemoveAll(filePath) - if err != nil { - return err - } - } - return nil -} - -// CheckpointFile returns device plugin checkpoint file path. -func (m *ManagerImpl) CheckpointFile() string { - return filepath.Join(m.socketdir, "kubelet_internal_checkpoint") -} - -// Start starts the Device Plugin Manager -func (m *ManagerImpl) Start() error { - glog.V(2).Infof("Starting Device Plugin manager") - - socketPath := filepath.Join(m.socketdir, m.socketname) - os.MkdirAll(m.socketdir, 0755) - - // Removes all stale sockets in m.socketdir. Device plugins can monitor - // this and use it as a signal to re-register with the new Kubelet. - if err := m.removeContents(m.socketdir); err != nil { - glog.Errorf("Fail to clean up stale contents under %s: %+v", m.socketdir, err) - } - - if err := os.Remove(socketPath); err != nil && !os.IsNotExist(err) { - glog.Errorf(errRemoveSocket+" %+v", err) - return err - } - - s, err := net.Listen("unix", socketPath) - if err != nil { - glog.Errorf(errListenSocket+" %+v", err) - return err - } - - m.server = grpc.NewServer([]grpc.ServerOption{}...) - - pluginapi.RegisterRegistrationServer(m.server, m) - go m.server.Serve(s) - - return nil -} - -// Devices is the map of devices that are known by the Device -// Plugin manager with the kind of the devices as key -func (m *ManagerImpl) Devices() map[string][]*pluginapi.Device { - m.mutex.Lock() - defer m.mutex.Unlock() - - devs := make(map[string][]*pluginapi.Device) - for k, e := range m.endpoints { - glog.V(3).Infof("Endpoint: %+v: %+v", k, e) - devs[k] = e.getDevices() - } - - return devs -} - -// Allocate is the call that you can use to allocate a set of devices -// from the registered device plugins. -func (m *ManagerImpl) Allocate(resourceName string, devs []string) (*pluginapi.AllocateResponse, error) { - - if len(devs) == 0 { - return nil, nil - } - - glog.V(3).Infof("Recieved allocation request for devices %v for device plugin %s", - devs, resourceName) - m.mutex.Lock() - e, ok := m.endpoints[resourceName] - m.mutex.Unlock() - if !ok { - return nil, fmt.Errorf("Unknown Device Plugin %s", resourceName) - } - - return e.allocate(devs) -} - -// Register registers a device plugin. -func (m *ManagerImpl) Register(ctx context.Context, - r *pluginapi.RegisterRequest) (*pluginapi.Empty, error) { - glog.V(2).Infof("Got request for Device Plugin %s", r.ResourceName) - if r.Version != pluginapi.Version { - return &pluginapi.Empty{}, fmt.Errorf(errUnsuportedVersion) - } - - if err := IsResourceNameValid(r.ResourceName); err != nil { - return &pluginapi.Empty{}, err - } - - // TODO: for now, always accepts newest device plugin. Later may consider to - // add some policies here, e.g., verify whether an old device plugin with the - // same resource name is still alive to determine whether we want to accept - // the new registration. - go m.addEndpoint(r) - - return &pluginapi.Empty{}, nil -} - -// Stop is the function that can stop the gRPC server. -func (m *ManagerImpl) Stop() error { - m.mutex.Lock() - defer m.mutex.Unlock() - for _, e := range m.endpoints { - e.stop() - } - - m.server.Stop() - return nil -} - -func (m *ManagerImpl) addEndpoint(r *pluginapi.RegisterRequest) { - socketPath := filepath.Join(m.socketdir, r.Endpoint) - e, err := newEndpoint(socketPath, r.ResourceName, m.callback) - if err != nil { - glog.Errorf("Failed to dial device plugin with request %v: %v", r, err) - return - } - stream, err := e.list() - if err != nil { - glog.Errorf("Failed to List devices for plugin %v: %v", r.ResourceName, err) - return - } - - // Associates the newly created endpoint with the corresponding resource name. - // Stops existing endpoint if there is any. - m.mutex.Lock() - old, ok := m.endpoints[r.ResourceName] - m.endpoints[r.ResourceName] = e - m.mutex.Unlock() - glog.V(2).Infof("Registered endpoint %v", e) - if ok && old != nil { - old.stop() - } - - go func() { - e.listAndWatch(stream) - - m.mutex.Lock() - if old, ok := m.endpoints[r.ResourceName]; ok && old == e { - glog.V(2).Infof("Delete resource for endpoint %v", e) - delete(m.endpoints, r.ResourceName) - // Issues callback to delete all of devices. - e.callback(e.resourceName, []*pluginapi.Device{}, []*pluginapi.Device{}, e.getDevices()) - } - glog.V(2).Infof("Unregistered endpoint %v", e) - m.mutex.Unlock() - }() -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/deviceplugin/types.go b/vendor/k8s.io/kubernetes/pkg/kubelet/deviceplugin/types.go deleted file mode 100644 index 3a2eae6fee1a..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/deviceplugin/types.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package deviceplugin - -import ( - pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1" -) - -// MonitorCallback is the function called when a device's health state changes, -// or new devices are reported, or old devices are deleted. -// Updated contains the most recent state of the Device. -type MonitorCallback func(resourceName string, added, updated, deleted []*pluginapi.Device) - -// Manager manages all the Device Plugins running on a node. -type Manager interface { - // Start starts the gRPC Registration service. - Start() error - - // Devices is the map of devices that have registered themselves - // against the manager. - // The map key is the ResourceName of the device plugins. - Devices() map[string][]*pluginapi.Device - - // Allocate takes resourceName and list of device Ids, and calls the - // gRPC Allocate on the device plugin matching the resourceName. - Allocate(string, []string) (*pluginapi.AllocateResponse, error) - - // Stop stops the manager. - Stop() error - - // Returns checkpoint file path. - CheckpointFile() string -} - -// TODO: evaluate whether we need these error definitions. -const ( - // errFailedToDialDevicePlugin is the error raised when the device plugin could not be - // reached on the registered socket - errFailedToDialDevicePlugin = "failed to dial device plugin:" - // errUnsuportedVersion is the error raised when the device plugin uses an API version not - // supported by the Kubelet registry - errUnsuportedVersion = "unsupported API version by the Kubelet registry" - // errDevicePluginAlreadyExists is the error raised when a device plugin with the - // same Resource Name tries to register itself - errDevicePluginAlreadyExists = "another device plugin already registered this Resource Name" - // errInvalidResourceName is the error raised when a device plugin is registering - // itself with an invalid ResourceName - errInvalidResourceName = "the ResourceName is invalid" - // errEmptyResourceName is the error raised when the resource name field is empty - errEmptyResourceName = "invalid Empty ResourceName" - - // errBadSocket is the error raised when the registry socket path is not absolute - errBadSocket = "bad socketPath, must be an absolute path:" - // errRemoveSocket is the error raised when the registry could not remove the existing socket - errRemoveSocket = "failed to remove socket while starting device plugin registry, with error" - // errListenSocket is the error raised when the registry could not listen on the socket - errListenSocket = "failed to listen to socket while starting device plugin registry, with error" - // errListAndWatch is the error raised when ListAndWatch ended unsuccessfully - errListAndWatch = "listAndWatch ended unexpectedly for device plugin %s with error %v" -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/deviceplugin/utils.go b/vendor/k8s.io/kubernetes/pkg/kubelet/deviceplugin/utils.go deleted file mode 100644 index b30ac2f640e6..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/deviceplugin/utils.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package deviceplugin - -import ( - "fmt" - - "k8s.io/api/core/v1" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" - pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha1" -) - -func cloneDevice(d *pluginapi.Device) *pluginapi.Device { - return &pluginapi.Device{ - ID: d.ID, - Health: d.Health, - } - -} - -func copyDevices(devs map[string]*pluginapi.Device) []*pluginapi.Device { - var clones []*pluginapi.Device - - for _, d := range devs { - clones = append(clones, cloneDevice(d)) - } - - return clones -} - -// IsResourceNameValid returns an error if the resource is invalid or is not an -// extended resource name. -func IsResourceNameValid(resourceName string) error { - if resourceName == "" { - return fmt.Errorf(errEmptyResourceName) - } - if !IsDeviceName(v1.ResourceName(resourceName)) { - return fmt.Errorf(errInvalidResourceName) - } - return nil -} - -// IsDeviceName returns whether the ResourceName points to an extended resource -// name exported by a device plugin. -func IsDeviceName(k v1.ResourceName) bool { - return v1helper.IsExtendedResourceName(k) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD index ce520af36ebc..748a74c79e81 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD @@ -9,16 +9,15 @@ load( go_library( name = "go_default_library", srcs = [ - "checkpoint_store.go", "convert.go", "doc.go", "docker_checkpoint.go", "docker_container.go", "docker_image.go", - "docker_legacy.go", + "docker_image_unsupported.go", "docker_sandbox.go", "docker_service.go", - "docker_stats.go", + "docker_stats_unsupported.go", "docker_streaming.go", "exec.go", "helpers.go", @@ -28,13 +27,18 @@ go_library( "selinux_util.go", ] + select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ + "docker_image_linux.go", + "docker_stats_linux.go", "helpers_linux.go", ], "@io_bazel_rules_go//go/platform:windows_amd64": [ + "docker_image_windows.go", + "docker_stats_windows.go", "helpers_windows.go", ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim", deps = [ "//pkg/credentialprovider:go_default_library", "//pkg/kubelet/apis/cri:go_default_library", @@ -43,8 +47,8 @@ go_library( "//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/dockershim/cm:go_default_library", - "//pkg/kubelet/dockershim/errors:go_default_library", "//pkg/kubelet/dockershim/libdocker:go_default_library", + "//pkg/kubelet/dockershim/metrics:go_default_library", "//pkg/kubelet/leaky:go_default_library", "//pkg/kubelet/network:go_default_library", "//pkg/kubelet/network/cni:go_default_library", @@ -55,10 +59,12 @@ go_library( "//pkg/kubelet/types:go_default_library", "//pkg/kubelet/util/cache:go_default_library", "//pkg/kubelet/util/ioutils:go_default_library", + "//pkg/kubelet/util/store:go_default_library", "//pkg/security/apparmor:go_default_library", + "//pkg/util/filesystem:go_default_library", "//pkg/util/hash:go_default_library", "//pkg/util/parsers:go_default_library", - "//pkg/util/term:go_default_library", + "//vendor/github.com/armon/circbuf:go_default_library", "//vendor/github.com/blang/semver:go_default_library", "//vendor/github.com/docker/docker/api/types:go_default_library", "//vendor/github.com/docker/docker/api/types/container:go_default_library", @@ -69,23 +75,19 @@ go_library( "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", - "//vendor/k8s.io/utils/exec:go_default_library", ], ) go_test( name = "go_default_test", srcs = [ - "checkpoint_store_test.go", "convert_test.go", "docker_checkpoint_test.go", "docker_container_test.go", "docker_image_test.go", - "docker_legacy_test.go", "docker_sandbox_test.go", "docker_service_test.go", "helpers_test.go", @@ -99,15 +101,13 @@ go_test( "//conditions:default": [], }), data = [ - "fixtures/seccomp/sub/subtest", - "fixtures/seccomp/test", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim", library = ":go_default_library", deps = [ "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/container/testing:go_default_library", - "//pkg/kubelet/dockershim/errors:go_default_library", "//pkg/kubelet/dockershim/libdocker:go_default_library", "//pkg/kubelet/dockershim/testing:go_default_library", "//pkg/kubelet/network:go_default_library", @@ -139,8 +139,8 @@ filegroup( srcs = [ ":package-srcs", "//pkg/kubelet/dockershim/cm:all-srcs", - "//pkg/kubelet/dockershim/errors:all-srcs", "//pkg/kubelet/dockershim/libdocker:all-srcs", + "//pkg/kubelet/dockershim/metrics:all-srcs", "//pkg/kubelet/dockershim/remote:all-srcs", "//pkg/kubelet/dockershim/testing:all-srcs", ], diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/checkpoint_store.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/checkpoint_store.go deleted file mode 100644 index b8160bbd9bd6..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/checkpoint_store.go +++ /dev/null @@ -1,156 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dockershim - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "strings" - - "k8s.io/kubernetes/pkg/kubelet/dockershim/errors" -) - -const ( - tmpPrefix = "." - tmpSuffix = ".tmp" - keyMaxLength = 250 -) - -var keyRegex = regexp.MustCompile("^[a-zA-Z0-9]+$") - -// CheckpointStore provides the interface for checkpoint storage backend. -// CheckpointStore must be thread-safe -type CheckpointStore interface { - // key must contain one or more characters in [A-Za-z0-9] - // Write persists a checkpoint with key - Write(key string, data []byte) error - // Read retrieves a checkpoint with key - // Read must return CheckpointNotFoundError if checkpoint is not found - Read(key string) ([]byte, error) - // Delete deletes a checkpoint with key - // Delete must not return error if checkpoint does not exist - Delete(key string) error - // List lists all keys of existing checkpoints - List() ([]string, error) -} - -// FileStore is an implementation of CheckpointStore interface which stores checkpoint in files. -type FileStore struct { - // path to the base directory for storing checkpoint files - path string -} - -func NewFileStore(path string) (CheckpointStore, error) { - if err := ensurePath(path); err != nil { - return nil, err - } - return &FileStore{path: path}, nil -} - -// writeFileAndSync is copied from ioutil.WriteFile, with the extra File.Sync -// at the end to ensure file is written on the disk. -func writeFileAndSync(filename string, data []byte, perm os.FileMode) error { - f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err == nil { - // Only sync if the Write completed successfully. - err = f.Sync() - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -func (fstore *FileStore) Write(key string, data []byte) error { - if err := validateKey(key); err != nil { - return err - } - if err := ensurePath(fstore.path); err != nil { - return err - } - tmpfile := filepath.Join(fstore.path, fmt.Sprintf("%s%s%s", tmpPrefix, key, tmpSuffix)) - if err := writeFileAndSync(tmpfile, data, 0644); err != nil { - return err - } - return os.Rename(tmpfile, fstore.getCheckpointPath(key)) -} - -func (fstore *FileStore) Read(key string) ([]byte, error) { - if err := validateKey(key); err != nil { - return nil, err - } - bytes, err := ioutil.ReadFile(fstore.getCheckpointPath(key)) - if os.IsNotExist(err) { - return bytes, errors.CheckpointNotFoundError - } - return bytes, err -} - -func (fstore *FileStore) Delete(key string) error { - if err := validateKey(key); err != nil { - return err - } - if err := os.Remove(fstore.getCheckpointPath(key)); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -func (fstore *FileStore) List() ([]string, error) { - keys := make([]string, 0) - files, err := ioutil.ReadDir(fstore.path) - if err != nil { - return keys, err - } - for _, f := range files { - if !strings.HasPrefix(f.Name(), tmpPrefix) { - keys = append(keys, f.Name()) - } - } - return keys, nil -} - -func (fstore *FileStore) getCheckpointPath(key string) string { - return filepath.Join(fstore.path, key) -} - -// ensurePath creates input directory if it does not exist -func ensurePath(path string) error { - if _, err := os.Stat(path); err != nil { - // MkdirAll returns nil if directory already exists - return os.MkdirAll(path, 0755) - } - return nil -} - -func validateKey(key string) error { - if len(key) <= keyMaxLength && keyRegex.MatchString(key) { - return nil - } - return fmt.Errorf("checkpoint key %q is not valid.", key) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD index a83c367b85e5..fb5661cd50ac 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD @@ -19,6 +19,7 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/cm", deps = [ "//pkg/kubelet/dockershim/libdocker:go_default_library", ] + select({ diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go index b820a8126a7d..74d00be66224 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go @@ -91,7 +91,7 @@ func (m *containerManager) doWork() { glog.Errorf("Unable to parse docker version %q: %v", v.APIVersion, err) return } - // EnsureDockerInConatiner does two things. + // EnsureDockerInContainer does two things. // 1. Ensure processes run in the cgroups if m.cgroupsManager is not nil. // 2. Ensure processes have the OOM score applied. if err := kubecm.EnsureDockerInContainer(version, dockerOOMScoreAdj, m.cgroupsManager); err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_checkpoint.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_checkpoint.go index 6ad1d794169f..f474696995b6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_checkpoint.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_checkpoint.go @@ -23,7 +23,8 @@ import ( "path/filepath" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/kubelet/dockershim/errors" + utilstore "k8s.io/kubernetes/pkg/kubelet/util/store" + utilfs "k8s.io/kubernetes/pkg/util/filesystem" hashutil "k8s.io/kubernetes/pkg/util/hash" ) @@ -82,11 +83,11 @@ type CheckpointHandler interface { // PersistentCheckpointHandler is an implementation of CheckpointHandler. It persists checkpoint in CheckpointStore type PersistentCheckpointHandler struct { - store CheckpointStore + store utilstore.Store } func NewPersistentCheckpointHandler(dockershimRootDir string) (CheckpointHandler, error) { - fstore, err := NewFileStore(filepath.Join(dockershimRootDir, sandboxCheckpointDir)) + fstore, err := utilstore.NewFileStore(filepath.Join(dockershimRootDir, sandboxCheckpointDir), utilfs.DefaultFs{}) if err != nil { return nil, err } @@ -111,12 +112,14 @@ func (handler *PersistentCheckpointHandler) GetCheckpoint(podSandboxID string) ( //TODO: unmarhsal into a struct with just Version, check version, unmarshal into versioned type. err = json.Unmarshal(blob, &checkpoint) if err != nil { - glog.Errorf("Failed to unmarshal checkpoint %q. Checkpoint content: %q. ErrMsg: %v", podSandboxID, string(blob), err) - return &checkpoint, errors.CorruptCheckpointError + glog.Errorf("Failed to unmarshal checkpoint %q, removing checkpoint. Checkpoint content: %q. ErrMsg: %v", podSandboxID, string(blob), err) + handler.RemoveCheckpoint(podSandboxID) + return nil, fmt.Errorf("failed to unmarshal checkpoint") } if checkpoint.CheckSum != calculateChecksum(checkpoint) { - glog.Errorf("Checksum of checkpoint %q is not valid", podSandboxID) - return &checkpoint, errors.CorruptCheckpointError + glog.Errorf("Checksum of checkpoint %q is not valid, removing checkpoint", podSandboxID) + handler.RemoveCheckpoint(podSandboxID) + return nil, fmt.Errorf("checkpoint is corrupted") } return &checkpoint, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container.go index 1f32f5238977..453c18c75663 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container.go @@ -75,15 +75,6 @@ func (ds *dockerService) ListContainers(filter *runtimeapi.ContainerFilter) ([]* result = append(result, converted) } - // Include legacy containers if there are still legacy containers not cleaned up yet. - if !ds.legacyCleanup.Done() { - legacyContainers, err := ds.ListLegacyContainers(filter) - if err != nil { - return nil, err - } - // Legacy containers are always older, so we can safely append them to the end. - result = append(result, legacyContainers...) - } return result, nil } @@ -206,7 +197,7 @@ func (ds *dockerService) createContainerLogSymlink(containerID string) error { path, realPath, containerID, err) } } else { - supported, err := IsCRISupportedLogDriver(ds.client) + supported, err := ds.IsCRISupportedLogDriver() if err != nil { glog.Warningf("Failed to check supported logging driver by CRI: %v", err) return nil @@ -241,18 +232,21 @@ func (ds *dockerService) removeContainerLogSymlink(containerID string) error { // StartContainer starts the container. func (ds *dockerService) StartContainer(containerID string) error { err := ds.client.StartContainer(containerID) - if err != nil { - err = transformStartContainerError(err) - return fmt.Errorf("failed to start container %q: %v", containerID, err) - } - // Create container log symlink. - if err := ds.createContainerLogSymlink(containerID); err != nil { + + // Create container log symlink for all containers (including failed ones). + if linkError := ds.createContainerLogSymlink(containerID); linkError != nil { // Do not stop the container if we failed to create symlink because: // 1. This is not a critical failure. // 2. We don't have enough information to properly stop container here. // Kubelet will surface this error to user via an event. - return err + return linkError } + + if err != nil { + err = transformStartContainerError(err) + return fmt.Errorf("failed to start container %q: %v", containerID, err) + } + return nil } @@ -370,15 +364,6 @@ func (ds *dockerService) ContainerStatus(containerID string) (*runtimeapi.Contai ct, st, ft := createdAt.UnixNano(), startedAt.UnixNano(), finishedAt.UnixNano() exitCode := int32(r.State.ExitCode) - // If the container has no containerTypeLabelKey label, treat it as a legacy container. - if _, ok := r.Config.Labels[containerTypeLabelKey]; !ok { - names, labels, err := convertLegacyNameAndLabels([]string{r.Name}, r.Config.Labels) - if err != nil { - return nil, err - } - r.Name, r.Config.Labels = names[0], labels - } - metadata, err := parseContainerName(r.Name) if err != nil { return nil, err diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image.go index 7482dc420ddf..1b39522f862f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image.go @@ -21,6 +21,7 @@ import ( "net/http" dockertypes "github.com/docker/docker/api/types" + dockerfilters "github.com/docker/docker/api/types/filters" "github.com/docker/docker/pkg/jsonmessage" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" @@ -33,8 +34,9 @@ import ( func (ds *dockerService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) { opts := dockertypes.ImageListOptions{} if filter != nil { - if imgSpec := filter.GetImage(); imgSpec != nil { - opts.Filters.Add("reference", imgSpec.Image) + if filter.GetImage().GetImage() != "" { + opts.Filters = dockerfilters.NewArgs() + opts.Filters.Add("reference", filter.GetImage().GetImage()) } } @@ -133,11 +135,6 @@ func getImageRef(client libdocker.Interface, image string) (string, error) { return img.ID, nil } -// ImageFsInfo returns information of the filesystem that is used to store images. -func (ds *dockerService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) { - return nil, fmt.Errorf("not implemented") -} - func filterHTTPError(err error, image string) error { // docker/docker/pull/11314 prints detailed error info for docker pull. // When it hits 502, it returns a verbose html output including an inline svg, diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_linux.go new file mode 100644 index 000000000000..1af785972cca --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_linux.go @@ -0,0 +1,30 @@ +// +build linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockershim + +import ( + "fmt" + + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" +) + +// ImageFsInfo returns information of the filesystem that is used to store images. +func (ds *dockerService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) { + return nil, fmt.Errorf("not implemented") +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_unsupported.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_unsupported.go new file mode 100644 index 000000000000..17519e0385f7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_unsupported.go @@ -0,0 +1,30 @@ +// +build !linux,!windows + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockershim + +import ( + "fmt" + + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" +) + +// ImageFsInfo returns information of the filesystem that is used to store images. +func (ds *dockerService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) { + return nil, fmt.Errorf("not implemented") +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_windows.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_windows.go new file mode 100644 index 000000000000..b147659f2b69 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_windows.go @@ -0,0 +1,39 @@ +// +build windows + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockershim + +import ( + "time" + + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" +) + +// ImageFsInfo returns information of the filesystem that is used to store images. +func (ds *dockerService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) { + // For Windows Stats to work correctly, a file system must be provided. For now, provide a fake filesystem. + filesystems := []*runtimeapi.FilesystemUsage{ + { + Timestamp: time.Now().UnixNano(), + UsedBytes: &runtimeapi.UInt64Value{Value: 0}, + InodesUsed: &runtimeapi.UInt64Value{Value: 0}, + }, + } + + return filesystems, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_legacy.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_legacy.go deleted file mode 100644 index 24ebbe3d98b2..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_legacy.go +++ /dev/null @@ -1,284 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dockershim - -import ( - "fmt" - "strings" - "sync/atomic" - "time" - - dockertypes "github.com/docker/docker/api/types" - dockerfilters "github.com/docker/docker/api/types/filters" - "github.com/golang/glog" - - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" - kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" - "k8s.io/kubernetes/pkg/kubelet/leaky" -) - -// These are labels used by kuberuntime. Ideally, we should not rely on kuberuntime implementation -// detail in dockershim. However, we need these labels for legacy container (containers created by -// kubernetes 1.4 and 1.5) support. -// TODO(random-liu): Remove this file and related code in kubernetes 1.8. -const ( - podDeletionGracePeriodLabel = "io.kubernetes.pod.deletionGracePeriod" - podTerminationGracePeriodLabel = "io.kubernetes.pod.terminationGracePeriod" - - containerHashLabel = "io.kubernetes.container.hash" - containerRestartCountLabel = "io.kubernetes.container.restartCount" - containerTerminationMessagePathLabel = "io.kubernetes.container.terminationMessagePath" - containerTerminationMessagePolicyLabel = "io.kubernetes.container.terminationMessagePolicy" - containerPreStopHandlerLabel = "io.kubernetes.container.preStopHandler" - containerPortsLabel = "io.kubernetes.container.ports" -) - -// NOTE that we can't handle the following dockershim internal labels, so they will be empty: -// * containerLogPathLabelKey ("io.kubernetes.container.logpath"): RemoveContainer will ignore -// the label if it's empty. -// * sandboxIDLabelKey ("io.kubernetes.sandbox.id"): This is used in 2 places: -// * filter.PodSandboxId: The filter is not used in kuberuntime now. -// * runtimeapi.Container.PodSandboxId: toRuntimeAPIContainer retrieves PodSandboxId from the -// label. The field is used in kuberuntime sandbox garbage collection. Missing this may cause -// pod sandbox to be removed before its containers are removed. - -// convertLegacyNameAndLabels converts legacy name and labels into dockershim name and labels. -// The function can be used to either legacy infra container or regular container. -// NOTE that legacy infra container doesn't have restart count label, so the returned attempt for -// sandbox will always be 0. The sandbox attempt is only used to generate new sandbox name, and -// there is no naming conflict between legacy and new containers/sandboxes, so it should be fine. -func convertLegacyNameAndLabels(names []string, labels map[string]string) ([]string, map[string]string, error) { - if len(names) == 0 { - return nil, nil, fmt.Errorf("unexpected empty name") - } - - // Generate new dockershim name. - m, _, err := libdocker.ParseDockerName(names[0]) - if err != nil { - return nil, nil, err - } - sandboxName, sandboxNamespace, err := kubecontainer.ParsePodFullName(m.PodFullName) - if err != nil { - return nil, nil, err - } - newNames := []string{strings.Join([]string{ - kubePrefix, // 0 - m.ContainerName, // 1: container name - sandboxName, // 2: sandbox name - sandboxNamespace, // 3: sandbox namesapce - string(m.PodUID), // 4: sandbox uid - labels[containerRestartCountLabel], // 5 - }, nameDelimiter)} - - // Generate new labels. - legacyAnnotations := sets.NewString( - containerHashLabel, - containerRestartCountLabel, - containerTerminationMessagePathLabel, - containerTerminationMessagePolicyLabel, - containerPreStopHandlerLabel, - containerPortsLabel, - ) - newLabels := map[string]string{} - for k, v := range labels { - if legacyAnnotations.Has(k) { - // Add annotation prefix for all legacy labels which should be annotations in dockershim. - newLabels[fmt.Sprintf("%s%s", annotationPrefix, k)] = v - } else { - newLabels[k] = v - } - } - // Add containerTypeLabelKey indicating the container is sandbox or application container. - if m.ContainerName == leaky.PodInfraContainerName { - newLabels[containerTypeLabelKey] = containerTypeLabelSandbox - } else { - newLabels[containerTypeLabelKey] = containerTypeLabelContainer - } - return newNames, newLabels, nil -} - -// legacyCleanupCheckInterval is the interval legacyCleanupCheck is performed. -const legacyCleanupCheckInterval = 10 * time.Second - -// LegacyCleanupInit initializes the legacy cleanup flag. If necessary, it will starts a goroutine -// which periodically checks legacy cleanup until it finishes. -func (ds *dockerService) LegacyCleanupInit() { - // If there is no legacy container/sandbox, just return. - if clean, _ := ds.checkLegacyCleanup(); clean { - return - } - // Or else start the cleanup routine. - go wait.PollInfinite(legacyCleanupCheckInterval, ds.checkLegacyCleanup) -} - -// checkLegacyCleanup lists legacy containers/sandboxes, if no legacy containers/sandboxes are found, -// mark the legacy cleanup flag as done. -func (ds *dockerService) checkLegacyCleanup() (bool, error) { - // Always do legacy cleanup when list fails. - sandboxes, err := ds.ListLegacyPodSandbox(nil) - if err != nil { - glog.Errorf("Failed to list legacy pod sandboxes: %v", err) - return false, nil - } - containers, err := ds.ListLegacyContainers(nil) - if err != nil { - glog.Errorf("Failed to list legacy containers: %v", err) - return false, nil - } - if len(sandboxes) != 0 || len(containers) != 0 { - glog.V(4).Infof("Found legacy sandboxes %+v, legacy containers %+v, continue legacy cleanup", - sandboxes, containers) - return false, nil - } - ds.legacyCleanup.MarkDone() - glog.V(2).Infof("No legacy containers found, stop performing legacy cleanup.") - return true, nil -} - -// ListLegacyPodSandbox only lists all legacy pod sandboxes. -func (ds *dockerService) ListLegacyPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) { - // By default, list all containers whether they are running or not. - opts := dockertypes.ContainerListOptions{All: true, Filters: dockerfilters.NewArgs()} - filterOutReadySandboxes := false - f := newDockerFilter(&opts.Filters) - if filter != nil { - if filter.Id != "" { - f.Add("id", filter.Id) - } - if filter.State != nil { - if filter.GetState().State == runtimeapi.PodSandboxState_SANDBOX_READY { - // Only list running containers. - opts.All = false - } else { - // runtimeapi.PodSandboxState_SANDBOX_NOTREADY can mean the - // container is in any of the non-running state (e.g., created, - // exited). We can't tell docker to filter out running - // containers directly, so we'll need to filter them out - // ourselves after getting the results. - filterOutReadySandboxes = true - } - } - if filter.LabelSelector != nil { - for k, v := range filter.LabelSelector { - f.AddLabel(k, v) - } - } - } - containers, err := ds.client.ListContainers(opts) - if err != nil { - return nil, err - } - - // Convert docker containers to runtime api sandboxes. - result := make([]*runtimeapi.PodSandbox, 0, len(containers)) - for i := range containers { - c := containers[i] - // Skip new containers with containerTypeLabelKey label. - if _, ok := c.Labels[containerTypeLabelKey]; ok { - continue - } - // If the container has no containerTypeLabelKey label, treat it as a legacy container. - c.Names, c.Labels, err = convertLegacyNameAndLabels(c.Names, c.Labels) - if err != nil { - glog.V(4).Infof("Unable to convert legacy container %+v: %v", c, err) - continue - } - if c.Labels[containerTypeLabelKey] != containerTypeLabelSandbox { - continue - } - converted, err := containerToRuntimeAPISandbox(&c) - if err != nil { - glog.V(4).Infof("Unable to convert docker to runtime API sandbox %+v: %v", c, err) - continue - } - if filterOutReadySandboxes && converted.State == runtimeapi.PodSandboxState_SANDBOX_READY { - continue - } - result = append(result, converted) - } - return result, nil -} - -// ListLegacyPodSandbox only lists all legacy containers. -func (ds *dockerService) ListLegacyContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) { - opts := dockertypes.ContainerListOptions{All: true, Filters: dockerfilters.NewArgs()} - f := newDockerFilter(&opts.Filters) - - if filter != nil { - if filter.Id != "" { - f.Add("id", filter.Id) - } - if filter.State != nil { - f.Add("status", toDockerContainerStatus(filter.GetState().State)) - } - // NOTE: Legacy container doesn't have sandboxIDLabelKey label, so we can't filter with - // it. Fortunately, PodSandboxId is not used in kuberuntime now. - if filter.LabelSelector != nil { - for k, v := range filter.LabelSelector { - f.AddLabel(k, v) - } - } - } - containers, err := ds.client.ListContainers(opts) - if err != nil { - return nil, err - } - - // Convert docker to runtime api containers. - result := make([]*runtimeapi.Container, 0, len(containers)) - for i := range containers { - c := containers[i] - // Skip new containers with containerTypeLabelKey label. - if _, ok := c.Labels[containerTypeLabelKey]; ok { - continue - } - // If the container has no containerTypeLabelKey label, treat it as a legacy container. - c.Names, c.Labels, err = convertLegacyNameAndLabels(c.Names, c.Labels) - if err != nil { - glog.V(4).Infof("Unable to convert legacy container %+v: %v", c, err) - continue - } - if c.Labels[containerTypeLabelKey] != containerTypeLabelContainer { - continue - } - converted, err := toRuntimeAPIContainer(&c) - if err != nil { - glog.V(4).Infof("Unable to convert docker container to runtime API container %+v: %v", c, err) - continue - } - result = append(result, converted) - } - return result, nil -} - -// legacyCleanupFlag is the flag indicating whether legacy cleanup is done. -type legacyCleanupFlag struct { - done int32 -} - -// Done checks whether legacy cleanup is done. -func (f *legacyCleanupFlag) Done() bool { - return atomic.LoadInt32(&f.done) == 1 -} - -// MarkDone sets legacy cleanup as done. -func (f *legacyCleanupFlag) MarkDone() { - atomic.StoreInt32(&f.done, 1) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go index fb321d591388..b595e310096b 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go @@ -30,7 +30,6 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/dockershim/errors" "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/kubelet/types" @@ -193,20 +192,10 @@ func (ds *dockerService) StopPodSandbox(podSandboxID string) error { // actions will only have sandbox ID and not have pod namespace and name information. // Return error if encounter any unexpected error. if checkpointErr != nil { - if libdocker.IsContainerNotFoundError(statusErr) && checkpointErr == errors.CheckpointNotFoundError { + if libdocker.IsContainerNotFoundError(statusErr) { glog.Warningf("Both sandbox container and checkpoint for id %q could not be found. "+ "Proceed without further sandbox information.", podSandboxID) } else { - if checkpointErr == errors.CorruptCheckpointError { - // Remove the corrupted checkpoint so that the next - // StopPodSandbox call can proceed. This may indicate that - // some resources won't be reclaimed. - // TODO (#43021): Fix this properly. - glog.Warningf("Removing corrupted checkpoint %q: %+v", podSandboxID, *checkpoint) - if err := ds.checkpointHandler.RemoveCheckpoint(podSandboxID); err != nil { - glog.Warningf("Unable to remove corrupted checkpoint %q: %v", podSandboxID, err) - } - } return utilerrors.NewAggregate([]error{ fmt.Errorf("failed to get checkpoint for sandbox %q: %v", podSandboxID, checkpointErr), fmt.Errorf("failed to get sandbox status: %v", statusErr)}) @@ -227,7 +216,9 @@ func (ds *dockerService) StopPodSandbox(podSandboxID string) error { // since it is stopped. With empty network namespcae, CNI bridge plugin will conduct best // effort clean up and will not return error. errList := []error{} - if !hostNetwork { + ready, ok := ds.getNetworkReady(podSandboxID) + if !hostNetwork && (ready || !ok) { + // Only tear down the pod network if we haven't done so already cID := kubecontainer.BuildContainerID(runtimeName, podSandboxID) err := ds.network.TearDownPod(namespace, name, cID) if err == nil { @@ -237,10 +228,13 @@ func (ds *dockerService) StopPodSandbox(podSandboxID string) error { } } if err := ds.client.StopContainer(podSandboxID, defaultSandboxGracePeriod); err != nil { - glog.Errorf("Failed to stop sandbox %q: %v", podSandboxID, err) // Do not return error if the container does not exist if !libdocker.IsContainerNotFoundError(err) { + glog.Errorf("Failed to stop sandbox %q: %v", podSandboxID, err) errList = append(errList, err) + } else { + // remove the checkpoint for any sandbox that is not found in the runtime + ds.checkpointHandler.RemoveCheckpoint(podSandboxID) } } return utilerrors.NewAggregate(errList) @@ -270,12 +264,15 @@ func (ds *dockerService) RemovePodSandbox(podSandboxID string) error { } // Remove the sandbox container. - if err := ds.client.RemoveContainer(podSandboxID, dockertypes.ContainerRemoveOptions{RemoveVolumes: true, Force: true}); err != nil && !libdocker.IsContainerNotFoundError(err) { + err = ds.client.RemoveContainer(podSandboxID, dockertypes.ContainerRemoveOptions{RemoveVolumes: true, Force: true}) + if err == nil || libdocker.IsContainerNotFoundError(err) { + // Only clear network ready when the sandbox has actually been + // removed from docker or doesn't exist + ds.clearNetworkReady(podSandboxID) + } else { errs = append(errs, err) } - ds.clearNetworkReady(podSandboxID) - // Remove the checkpoint of the sandbox. if err := ds.checkpointHandler.RemoveCheckpoint(podSandboxID); err != nil { errs = append(errs, err) @@ -373,17 +370,6 @@ func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodS } hostNetwork := sharesHostNetwork(r) - // If the sandbox has no containerTypeLabelKey label, treat it as a legacy sandbox. - if _, ok := r.Config.Labels[containerTypeLabelKey]; !ok { - names, labels, err := convertLegacyNameAndLabels([]string{r.Name}, r.Config.Labels) - if err != nil { - return nil, err - } - r.Name, r.Config.Labels = names[0], labels - // Forcibly trigger infra container restart. - hostNetwork = !hostNetwork - } - metadata, err := parseSandboxName(r.Name) if err != nil { return nil, err @@ -491,46 +477,44 @@ func (ds *dockerService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([] checkpoint, err := ds.checkpointHandler.GetCheckpoint(id) if err != nil { glog.Errorf("Failed to retrieve checkpoint for sandbox %q: %v", id, err) - - if err == errors.CorruptCheckpointError { - glog.Warningf("Removing corrupted checkpoint %q: %+v", id, *checkpoint) - if err := ds.checkpointHandler.RemoveCheckpoint(id); err != nil { - glog.Warningf("Unable to remove corrupted checkpoint %q: %v", id, err) - } - } continue } result = append(result, checkpointToRuntimeAPISandbox(id, checkpoint)) } - // Include legacy sandboxes if there are still legacy sandboxes not cleaned up yet. - if !ds.legacyCleanup.Done() { - legacySandboxes, err := ds.ListLegacyPodSandbox(filter) - if err != nil { - return nil, err - } - // Legacy sandboxes are always older, so we can safely append them to the end. - result = append(result, legacySandboxes...) - } return result, nil } // applySandboxLinuxOptions applies LinuxPodSandboxConfig to dockercontainer.HostConfig and dockercontainer.ContainerCreateConfig. func (ds *dockerService) applySandboxLinuxOptions(hc *dockercontainer.HostConfig, lc *runtimeapi.LinuxPodSandboxConfig, createConfig *dockertypes.ContainerCreateConfig, image string, separator rune) error { - // Apply Cgroup options. - cgroupParent, err := ds.GenerateExpectedCgroupParent(lc.CgroupParent) - if err != nil { - return err + if lc == nil { + return nil } - hc.CgroupParent = cgroupParent // Apply security context. - if err = applySandboxSecurityContext(lc, createConfig.Config, hc, ds.network, separator); err != nil { + if err := applySandboxSecurityContext(lc, createConfig.Config, hc, ds.network, separator); err != nil { return err } // Set sysctls. hc.Sysctls = lc.Sysctls + return nil +} +func (ds *dockerService) applySandboxResources(hc *dockercontainer.HostConfig, lc *runtimeapi.LinuxPodSandboxConfig) error { + hc.Resources = dockercontainer.Resources{ + MemorySwap: DefaultMemorySwap(), + CPUShares: defaultSandboxCPUshares, + // Use docker's default cpu quota/period. + } + + if lc != nil { + // Apply Cgroup options. + cgroupParent, err := ds.GenerateExpectedCgroupParent(lc.CgroupParent) + if err != nil { + return err + } + hc.CgroupParent = cgroupParent + } return nil } @@ -563,10 +547,8 @@ func (ds *dockerService) makeSandboxDockerConfig(c *runtimeapi.PodSandboxConfig, } // Apply linux-specific options. - if lc := c.GetLinux(); lc != nil { - if err := ds.applySandboxLinuxOptions(hc, lc, createConfig, image, securityOptSep); err != nil { - return nil, err - } + if err := ds.applySandboxLinuxOptions(hc, c.GetLinux(), createConfig, image, securityOptSep); err != nil { + return nil, err } // Set port mappings. @@ -574,17 +556,12 @@ func (ds *dockerService) makeSandboxDockerConfig(c *runtimeapi.PodSandboxConfig, createConfig.Config.ExposedPorts = exposedPorts hc.PortBindings = portBindings - // Apply resource options. - setSandboxResources(hc) + // TODO: Get rid of the dependency on kubelet internal package. + hc.OomScoreAdj = qos.PodInfraOOMAdj - // Apply cgroupsParent derived from the sandbox config. - if lc := c.GetLinux(); lc != nil { - // Apply Cgroup options. - cgroupParent, err := ds.GenerateExpectedCgroupParent(lc.CgroupParent) - if err != nil { - return nil, fmt.Errorf("failed to generate cgroup parent in expected syntax for container %q: %v", c.Metadata.Name, err) - } - hc.CgroupParent = cgroupParent + // Apply resource options. + if err := ds.applySandboxResources(hc, c.GetLinux()); err != nil { + return nil, err } // Set security options. @@ -623,16 +600,6 @@ func sharesHostIpc(container *dockertypes.ContainerJSON) bool { return false } -func setSandboxResources(hc *dockercontainer.HostConfig) { - hc.Resources = dockercontainer.Resources{ - MemorySwap: DefaultMemorySwap(), - CPUShares: defaultSandboxCPUshares, - // Use docker's default cpu quota/period. - } - // TODO: Get rid of the dependency on kubelet internal package. - hc.OomScoreAdj = qos.PodInfraOOMAdj -} - func constructPodSandboxCheckpoint(config *runtimeapi.PodSandboxConfig) *PodSandboxCheckpoint { checkpoint := NewPodSandboxCheckpoint(config.Metadata.Namespace, config.Metadata.Name) for _, pm := range config.GetPortMappings() { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go index a42c7487a33f..6fa1106f4fd4 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go @@ -24,27 +24,30 @@ import ( "sync" "time" + "github.com/armon/circbuf" "github.com/blang/semver" dockertypes "github.com/docker/docker/api/types" "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubetypes "k8s.io/apimachinery/pkg/types" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" kubecm "k8s.io/kubernetes/pkg/kubelet/cm" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockershim/cm" - "k8s.io/kubernetes/pkg/kubelet/dockershim/errors" "k8s.io/kubernetes/pkg/kubelet/network" "k8s.io/kubernetes/pkg/kubelet/network/cni" "k8s.io/kubernetes/pkg/kubelet/network/hostport" "k8s.io/kubernetes/pkg/kubelet/network/kubenet" "k8s.io/kubernetes/pkg/kubelet/server/streaming" "k8s.io/kubernetes/pkg/kubelet/util/cache" + utilstore "k8s.io/kubernetes/pkg/kubelet/util/store" "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" + "k8s.io/kubernetes/pkg/kubelet/dockershim/metrics" ) const ( @@ -145,24 +148,46 @@ type dockerNetworkHost struct { var internalLabelKeys []string = []string{containerTypeLabelKey, containerLogPathLabelKey, sandboxIDLabelKey} +// ClientConfig is parameters used to initialize docker client +type ClientConfig struct { + DockerEndpoint string + RuntimeRequestTimeout time.Duration + ImagePullProgressDeadline time.Duration + + // Configuration for fake docker client + EnableSleep bool + WithTraceDisabled bool +} + +// NewDockerClientFromConfig create a docker client from given configure +// return nil if nil configure is given. +func NewDockerClientFromConfig(config *ClientConfig) libdocker.Interface { + if config != nil { + // Create docker client. + client := libdocker.ConnectToDockerOrDie( + config.DockerEndpoint, + config.RuntimeRequestTimeout, + config.ImagePullProgressDeadline, + config.WithTraceDisabled, + config.EnableSleep, + ) + return client + } + + return nil +} + // NOTE: Anything passed to DockerService should be eventually handled in another way when we switch to running the shim as a different process. -func NewDockerService(client libdocker.Interface, podSandboxImage string, streamingConfig *streaming.Config, - pluginSettings *NetworkPluginSettings, cgroupsName string, kubeCgroupDriver string, execHandlerName, dockershimRootDir string, disableSharedPID bool) (DockerService, error) { +func NewDockerService(config *ClientConfig, podSandboxImage string, streamingConfig *streaming.Config, + pluginSettings *NetworkPluginSettings, cgroupsName string, kubeCgroupDriver string, dockershimRootDir string, disableSharedPID bool) (DockerService, error) { + + client := NewDockerClientFromConfig(config) + c := libdocker.NewInstrumentedInterface(client) checkpointHandler, err := NewPersistentCheckpointHandler(dockershimRootDir) if err != nil { return nil, err } - var execHandler ExecHandler - switch execHandlerName { - case "native": - execHandler = &NativeExecHandler{} - case "nsenter": - execHandler = &NsenterExecHandler{} - default: - glog.Warningf("Unknown Docker exec handler %q; defaulting to native", execHandlerName) - execHandler = &NativeExecHandler{} - } ds := &dockerService{ client: c, @@ -170,7 +195,7 @@ func NewDockerService(client libdocker.Interface, podSandboxImage string, stream podSandboxImage: podSandboxImage, streamingRuntime: &streamingRuntime{ client: client, - execHandler: execHandler, + execHandler: &NativeExecHandler{}, }, containerManager: cm.NewContainerManager(cgroupsName, client), checkpointHandler: checkpointHandler, @@ -209,6 +234,7 @@ func NewDockerService(client libdocker.Interface, podSandboxImage string, stream // NOTE: cgroup driver is only detectable in docker 1.11+ cgroupDriver := defaultCgroupDriver dockerInfo, err := ds.client.Info() + glog.Infof("Docker Info: %+v", dockerInfo) if err != nil { glog.Errorf("Failed to execute Info() call to the Docker client: %v", err) glog.Warningf("Falling back to use the default driver: %q", cgroupDriver) @@ -229,6 +255,10 @@ func NewDockerService(client libdocker.Interface, podSandboxImage string, stream }, versionCacheTTL, ) + + // Register prometheus metrics. + metrics.Register() + return ds, nil } @@ -240,6 +270,15 @@ type DockerService interface { Start() error // For serving streaming calls. http.Handler + + // IsCRISupportedLogDriver checks whether the logging driver used by docker is + // suppoted by native CRI integration. + // TODO(resouer): remove this when deprecating unsupported log driver + IsCRISupportedLogDriver() (bool, error) + + // NewDockerLegacyService created docker legacy service when log driver is not supported. + // TODO(resouer): remove this when deprecating unsupported log driver + NewDockerLegacyService() DockerLegacyService } type dockerService struct { @@ -258,8 +297,6 @@ type dockerService struct { // cgroup driver used by Docker runtime. cgroupDriver string checkpointHandler CheckpointHandler - // legacyCleanup indicates whether legacy cleanup has finished or not. - legacyCleanup legacyCleanupFlag // caches the version of the runtime. // To be compatible with multiple docker versions, we need to perform // version checking for some operations. Use this cache to avoid querying @@ -329,12 +366,10 @@ func (ds *dockerService) GetPodPortMappings(podSandboxID string) ([]*hostport.Po checkpoint, err := ds.checkpointHandler.GetCheckpoint(podSandboxID) // Return empty portMappings if checkpoint is not found if err != nil { - if err == errors.CheckpointNotFoundError { - glog.Warningf("Failed to retrieve checkpoint for sandbox %q: %v", podSandboxID, err) + if err == utilstore.ErrKeyNotFound { return nil, nil - } else { - return nil, err } + return nil, err } portMappings := make([]*hostport.PortMapping, 0, len(checkpoint.Data.PortMappings)) @@ -352,7 +387,6 @@ func (ds *dockerService) GetPodPortMappings(podSandboxID string) ([]*hostport.Po // Start initializes and starts components in dockerService. func (ds *dockerService) Start() error { // Initialize the legacy cleanup flag. - ds.LegacyCleanupInit() return ds.containerManager.Start() } @@ -486,8 +520,10 @@ type dockerLegacyService struct { client libdocker.Interface } -func NewDockerLegacyService(client libdocker.Interface) DockerLegacyService { - return &dockerLegacyService{client: client} +// NewDockerLegacyService created docker legacy service when log driver is not supported. +// TODO(resouer): remove this when deprecating unsupported log driver +func (d *dockerService) NewDockerLegacyService() DockerLegacyService { + return &dockerLegacyService{client: d.client} } // GetContainerLogs get container logs directly from docker daemon. @@ -524,13 +560,43 @@ func (d *dockerLegacyService) GetContainerLogs(pod *v1.Pod, containerID kubecont return d.client.Logs(containerID.ID, opts, sopts) } +// LegacyLogProvider implements the kuberuntime.LegacyLogProvider interface +type LegacyLogProvider struct { + dls DockerLegacyService +} + +func NewLegacyLogProvider(dls DockerLegacyService) LegacyLogProvider { + return LegacyLogProvider{dls: dls} +} + +// GetContainerLogTail attempts to read up to MaxContainerTerminationMessageLogLength +// from the end of the log when docker is configured with a log driver other than json-log. +// It reads up to MaxContainerTerminationMessageLogLines lines. +func (l LegacyLogProvider) GetContainerLogTail(uid kubetypes.UID, name, namespace string, containerId kubecontainer.ContainerID) (string, error) { + value := int64(kubecontainer.MaxContainerTerminationMessageLogLines) + buf, _ := circbuf.NewBuffer(kubecontainer.MaxContainerTerminationMessageLogLength) + // Although this is not a full spec pod, dockerLegacyService.GetContainerLogs() currently completely ignores its pod param + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + UID: uid, + Name: name, + Namespace: namespace, + }, + } + err := l.dls.GetContainerLogs(pod, containerId, &v1.PodLogOptions{TailLines: &value}, buf, buf) + if err != nil { + return "", err + } + return buf.String(), nil +} + // criSupportedLogDrivers are log drivers supported by native CRI integration. var criSupportedLogDrivers = []string{"json-file"} // IsCRISupportedLogDriver checks whether the logging driver used by docker is // suppoted by native CRI integration. -func IsCRISupportedLogDriver(client libdocker.Interface) (bool, error) { - info, err := client.Info() +func (d *dockerService) IsCRISupportedLogDriver() (bool, error) { + info, err := d.client.Info() if err != nil { return false, fmt.Errorf("failed to get docker info: %v", err) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats.go deleted file mode 100644 index 9d840a45faae..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats.go +++ /dev/null @@ -1,32 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dockershim - -import ( - "fmt" - - runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" -) - -// DockerService does not implement container stats. -func (ds *dockerService) ContainerStats(string) (*runtimeapi.ContainerStats, error) { - return nil, fmt.Errorf("Not implemented") -} - -func (ds *dockerService) ListContainerStats(*runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) { - return nil, fmt.Errorf("Not implemented") -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats_linux.go new file mode 100644 index 000000000000..48622185f1b3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats_linux.go @@ -0,0 +1,34 @@ +// +build linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockershim + +import ( + "fmt" + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" +) + +// ContainerStats returns stats for a container stats request based on container id. +func (ds *dockerService) ContainerStats(string) (*runtimeapi.ContainerStats, error) { + return nil, fmt.Errorf("not implemented") +} + +// ListContainerStats returns stats for a list container stats request based on a filter. +func (ds *dockerService) ListContainerStats(*runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) { + return nil, fmt.Errorf("not implemented") +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats_unsupported.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats_unsupported.go new file mode 100644 index 000000000000..d9f7beef5c7e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats_unsupported.go @@ -0,0 +1,35 @@ +// +build !linux,!windows + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockershim + +import ( + "fmt" + + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" +) + +// ContainerStats returns stats for a container stats request based on container id. +func (ds *dockerService) ContainerStats(string) (*runtimeapi.ContainerStats, error) { + return nil, fmt.Errorf("not implemented") +} + +// ListContainerStats returns stats for a list container stats request based on a filter. +func (ds *dockerService) ListContainerStats(*runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) { + return nil, fmt.Errorf("not implemented") +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats_windows.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats_windows.go new file mode 100644 index 000000000000..6f51239502e4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats_windows.go @@ -0,0 +1,105 @@ +// +build windows + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockershim + +import ( + "time" + + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" +) + +// ContainerStats returns stats for a container stats request based on container id. +func (ds *dockerService) ContainerStats(containerID string) (*runtimeapi.ContainerStats, error) { + containerStats, err := ds.getContainerStats(containerID) + if err != nil { + return nil, err + } + return containerStats, nil +} + +// ListContainerStats returns stats for a list container stats request based on a filter. +func (ds *dockerService) ListContainerStats(containerStatsFilter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) { + filter := &runtimeapi.ContainerFilter{} + + if containerStatsFilter != nil { + filter.Id = containerStatsFilter.Id + filter.PodSandboxId = containerStatsFilter.PodSandboxId + filter.LabelSelector = containerStatsFilter.LabelSelector + } + + containers, err := ds.ListContainers(filter) + if err != nil { + return nil, err + } + + var stats []*runtimeapi.ContainerStats + for _, container := range containers { + containerStats, err := ds.getContainerStats(container.Id) + if err != nil { + return nil, err + } + + stats = append(stats, containerStats) + } + + return stats, nil +} + +func (ds *dockerService) getContainerStats(containerID string) (*runtimeapi.ContainerStats, error) { + statsJSON, err := ds.client.GetContainerStats(containerID) + if err != nil { + return nil, err + } + + containerJSON, err := ds.client.InspectContainerWithSize(containerID) + if err != nil { + return nil, err + } + + status, err := ds.ContainerStatus(containerID) + if err != nil { + return nil, err + } + + dockerStats := statsJSON.Stats + timestamp := time.Now().UnixNano() + containerStats := &runtimeapi.ContainerStats{ + Attributes: &runtimeapi.ContainerAttributes{ + Id: containerID, + Metadata: status.Metadata, + Labels: status.Labels, + Annotations: status.Annotations, + }, + Cpu: &runtimeapi.CpuUsage{ + Timestamp: timestamp, + // have to multiply cpu usage by 100 since docker stats units is in 100's of nano seconds for Windows + // see https://github.com/moby/moby/blob/v1.13.1/api/types/stats.go#L22 + UsageCoreNanoSeconds: &runtimeapi.UInt64Value{Value: dockerStats.CPUStats.CPUUsage.TotalUsage * 100}, + }, + Memory: &runtimeapi.MemoryUsage{ + Timestamp: timestamp, + WorkingSetBytes: &runtimeapi.UInt64Value{Value: dockerStats.MemoryStats.PrivateWorkingSet}, + }, + WritableLayer: &runtimeapi.FilesystemUsage{ + Timestamp: timestamp, + UsedBytes: &runtimeapi.UInt64Value{Value: uint64(*containerJSON.SizeRw)}, + }, + } + return containerStats, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/errors/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/errors/BUILD deleted file mode 100644 index 1a00c4f85093..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/errors/BUILD +++ /dev/null @@ -1,24 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["errors.go"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/errors/errors.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/errors/errors.go deleted file mode 100644 index 25462ffd9c88..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/errors/errors.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package errors - -import "fmt" - -var CorruptCheckpointError = fmt.Errorf("checkpoint is corrupted.") -var CheckpointNotFoundError = fmt.Errorf("checkpoint is not found.") diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/exec.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/exec.go index a861bb47f0e0..aaaff8487d34 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/exec.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/exec.go @@ -19,8 +19,6 @@ package dockershim import ( "fmt" "io" - "os" - "os/exec" "time" dockertypes "github.com/docker/docker/api/types" @@ -28,8 +26,6 @@ import ( "k8s.io/client-go/tools/remotecommand" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/util/term" - utilexec "k8s.io/utils/exec" "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" ) @@ -39,9 +35,6 @@ type ExecHandler interface { ExecInContainer(client libdocker.Interface, container *dockertypes.ContainerJSON, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error } -// NsenterExecHandler executes commands in Docker containers using nsenter. -type NsenterExecHandler struct{} - type dockerExitError struct { Inspect *dockertypes.ContainerExecInspect } @@ -62,75 +55,6 @@ func (d *dockerExitError) ExitStatus() int { return d.Inspect.ExitCode } -// TODO should we support nsenter in a container, running with elevated privs and --pid=host? -func (*NsenterExecHandler) ExecInContainer(client libdocker.Interface, container *dockertypes.ContainerJSON, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error { - nsenter, err := exec.LookPath("nsenter") - if err != nil { - return fmt.Errorf("exec unavailable - unable to locate nsenter") - } - - containerPid := container.State.Pid - - // TODO what if the container doesn't have `env`??? - args := []string{"-t", fmt.Sprintf("%d", containerPid), "-m", "-i", "-u", "-n", "-p", "--", "env", "-i"} - args = append(args, fmt.Sprintf("HOSTNAME=%s", container.Config.Hostname)) - args = append(args, container.Config.Env...) - args = append(args, cmd...) - command := exec.Command(nsenter, args...) - var cmdErr error - if tty { - p, err := kubecontainer.StartPty(command) - if err != nil { - return err - } - defer p.Close() - - // make sure to close the stdout stream - defer stdout.Close() - - kubecontainer.HandleResizing(resize, func(size remotecommand.TerminalSize) { - term.SetSize(p.Fd(), size) - }) - - if stdin != nil { - go io.Copy(p, stdin) - } - - if stdout != nil { - go io.Copy(stdout, p) - } - - cmdErr = command.Wait() - } else { - if stdin != nil { - // Use an os.Pipe here as it returns true *os.File objects. - // This way, if you run 'kubectl exec -i bash' (no tty) and type 'exit', - // the call below to command.Run() can unblock because its Stdin is the read half - // of the pipe. - r, w, err := os.Pipe() - if err != nil { - return err - } - go io.Copy(w, stdin) - - command.Stdin = r - } - if stdout != nil { - command.Stdout = stdout - } - if stderr != nil { - command.Stderr = stderr - } - - cmdErr = command.Run() - } - - if exitErr, ok := cmdErr.(*exec.ExitError); ok { - return &utilexec.ExitErrorWrapper{ExitError: exitErr} - } - return cmdErr -} - // NativeExecHandler executes commands in Docker containers using Docker's exec API. type NativeExecHandler struct{} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers.go index bac5298aa6b1..595263a840b2 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers.go @@ -223,14 +223,6 @@ func getApparmorSecurityOpts(sc *runtimeapi.LinuxContainerSecurityContext, separ return fmtOpts, nil } -func getNetworkNamespace(c *dockertypes.ContainerJSON) (string, error) { - if c.State.Pid == 0 { - // Docker reports pid 0 for an exited container. - return "", fmt.Errorf("Cannot find network namespace for the terminated container %q", c.ID) - } - return fmt.Sprintf(dockerNetNSFmt, c.State.Pid), nil -} - // dockerFilter wraps around dockerfilters.Args and provides methods to modify // the filter easily. type dockerFilter struct { @@ -393,6 +385,11 @@ func getAppArmorOpts(profile string) ([]dockerOpt, error) { return nil, nil } + // Return unconfined profile explicitly + if profile == apparmor.ProfileNameUnconfined { + return []dockerOpt{{"apparmor", apparmor.ProfileNameUnconfined, ""}}, nil + } + // Assume validation has already happened. profileName := strings.TrimPrefix(profile, apparmor.ProfileNamePrefix) return []dockerOpt{{"apparmor", profileName, ""}}, nil diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_linux.go index 18a223d755de..f8a9a1fbbe06 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_linux.go @@ -62,7 +62,11 @@ func getSeccompDockerOpts(seccompProfile string) ([]dockerOpt, error) { return nil, fmt.Errorf("unknown seccomp profile option: %s", seccompProfile) } - fname := strings.TrimPrefix(seccompProfile, "localhost/") // by pod annotation validation, name is a valid subpath + // get the full path of seccomp profile when prefixed with 'localhost/'. + fname := strings.TrimPrefix(seccompProfile, "localhost/") + if !filepath.IsAbs(fname) { + return nil, fmt.Errorf("seccomp profile path must be absolute, but got relative path %q", fname) + } file, err := ioutil.ReadFile(filepath.FromSlash(fname)) if err != nil { return nil, fmt.Errorf("cannot load seccomp profile %q: %v", fname, err) @@ -133,3 +137,11 @@ func (ds *dockerService) updateCreateConfig( func (ds *dockerService) determinePodIPBySandboxID(uid string) string { return "" } + +func getNetworkNamespace(c *dockertypes.ContainerJSON) (string, error) { + if c.State.Pid == 0 { + // Docker reports pid 0 for an exited container. + return "", fmt.Errorf("cannot find network namespace for the terminated container %q", c.ID) + } + return fmt.Sprintf(dockerNetNSFmt, c.State.Pid), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_unsupported.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_unsupported.go index db24ae7afb60..dfe3a097a495 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_unsupported.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_unsupported.go @@ -19,6 +19,8 @@ limitations under the License. package dockershim import ( + "fmt" + "github.com/blang/semver" dockertypes "github.com/docker/docker/api/types" "github.com/golang/glog" @@ -47,3 +49,7 @@ func (ds *dockerService) determinePodIPBySandboxID(uid string) string { glog.Warningf("determinePodIPBySandboxID is unsupported in this build") return "" } + +func getNetworkNamespace(c *dockertypes.ContainerJSON) (string, error) { + return "", fmt.Errorf("unsupported platform") +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_windows.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_windows.go index b9ddfd2c6496..c93515daab27 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_windows.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_windows.go @@ -47,6 +47,9 @@ func (ds *dockerService) updateCreateConfig( podSandboxID string, securityOptSep rune, apiVersion *semver.Version) error { if networkMode := os.Getenv("CONTAINER_NETWORK"); networkMode != "" { createConfig.HostConfig.NetworkMode = dockercontainer.NetworkMode(networkMode) + } else { + // Todo: Refactor this call in future for calling methods directly in security_context.go + modifyHostNetworkOptionForContainer(false, podSandboxID, createConfig.HostConfig) } return nil @@ -71,14 +74,49 @@ func (ds *dockerService) determinePodIPBySandboxID(sandboxID string) string { if err != nil { continue } - if containerIP := getContainerIP(r); containerIP != "" { - return containerIP + + // Versions and feature support + // ============================ + // Windows version == Windows Server, Version 1709,, Supports both sandbox and non-sandbox case + // Windows version == Windows Server 2016 Support only non-sandbox case + // Windows version < Windows Server 2016 is Not Supported + + // Sandbox support in Windows mandates CNI Plugin. + // Presense of CONTAINER_NETWORK flag is considered as non-Sandbox cases here + + // Todo: Add a kernel version check for more validation + + if networkMode := os.Getenv("CONTAINER_NETWORK"); networkMode == "" { + // Do not return any IP, so that we would continue and get the IP of the Sandbox + ds.getIP(sandboxID, r) + } else { + // On Windows, every container that is created in a Sandbox, needs to invoke CNI plugin again for adding the Network, + // with the shared container name as NetNS info, + // This is passed down to the platform to replicate some necessary information to the new container + + // + // This place is chosen as a hack for now, since getContainerIP would end up calling CNI's addToNetwork + // That is why addToNetwork is required to be idempotent + + // Instead of relying on this call, an explicit call to addToNetwork should be + // done immediately after ContainerCreation, in case of Windows only. TBD Issue # to handle this + + if containerIP := getContainerIP(r); containerIP != "" { + return containerIP + } } } return "" } +func getNetworkNamespace(c *dockertypes.ContainerJSON) (string, error) { + // Currently in windows there is no identifier exposed for network namespace + // Like docker, the referenced container id is used to figure out the network namespace id internally by the platform + // so returning the docker networkMode (which holds container: for network namespace here + return string(c.HostConfig.NetworkMode), nil +} + func getContainerIP(container *dockertypes.ContainerJSON) string { if container.NetworkSettings != nil { for _, network := range container.NetworkSettings.Networks { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/BUILD index 3e7ecbd99a08..574ca6eca9d1 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/BUILD @@ -11,15 +11,12 @@ go_test( srcs = [ "helpers_test.go", "kube_docker_client_test.go", - "legacy_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker", library = ":go_default_library", deps = [ - "//pkg/util/hash:go_default_library", "//vendor/github.com/docker/docker/api/types:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", ], ) @@ -31,11 +28,10 @@ go_library( "helpers.go", "instrumented_client.go", "kube_docker_client.go", - "legacy.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker", deps = [ - "//pkg/kubelet/container:go_default_library", - "//pkg/kubelet/metrics:go_default_library", + "//pkg/kubelet/dockershim/metrics:go_default_library", "//vendor/github.com/docker/distribution/reference:go_default_library", "//vendor/github.com/docker/docker/api/types:go_default_library", "//vendor/github.com/docker/docker/api/types/container:go_default_library", @@ -47,7 +43,6 @@ go_library( "//vendor/github.com/opencontainers/go-digest:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/client.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/client.go index 40bf612a6cec..0400bbb91790 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/client.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/client.go @@ -40,12 +40,16 @@ const ( // This is only used by GetKubeletDockerContainers(), and should be removed // along with the function. containerNamePrefix = "k8s" + + // Fake docker endpoint + FakeDockerEndpoint = "fake://" ) // Interface is an abstract interface for testability. It abstracts the interface of docker client. type Interface interface { ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error) InspectContainer(id string) (*dockertypes.ContainerJSON, error) + InspectContainerWithSize(id string) (*dockertypes.ContainerJSON, error) CreateContainer(dockertypes.ContainerCreateConfig) (*dockercontainer.ContainerCreateCreatedBody, error) StartContainer(id string) error StopContainer(id string, timeout time.Duration) error @@ -66,6 +70,7 @@ type Interface interface { AttachToContainer(string, dockertypes.ContainerAttachOptions, StreamOptions) error ResizeContainerTTY(id string, height, width uint) error ResizeExecTTY(id string, height, width uint) error + GetContainerStats(id string) (*dockertypes.StatsJSON, error) } // Get a *dockerapi.Client, either using the endpoint passed in, or using @@ -84,9 +89,18 @@ func getDockerClient(dockerEndpoint string) (*dockerapi.Client, error) { // is the timeout for docker requests. If timeout is exceeded, the request // will be cancelled and throw out an error. If requestTimeout is 0, a default // value will be applied. -func ConnectToDockerOrDie(dockerEndpoint string, requestTimeout, imagePullProgressDeadline time.Duration) Interface { - if dockerEndpoint == "fake://" { - return NewFakeDockerClient() +func ConnectToDockerOrDie(dockerEndpoint string, requestTimeout, imagePullProgressDeadline time.Duration, + withTraceDisabled bool, enableSleep bool) Interface { + if dockerEndpoint == FakeDockerEndpoint { + fakeClient := NewFakeDockerClient() + if withTraceDisabled { + fakeClient = fakeClient.WithTraceDisabled() + } + + if enableSleep { + fakeClient.EnableSleep = true + } + return fakeClient } client, err := getDockerClient(dockerEndpoint) if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/fake_client.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/fake_client.go index 0d42b3a32e47..21e27eba8fc2 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/fake_client.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/fake_client.go @@ -83,6 +83,9 @@ const ( fakeDockerVersion = "1.11.2" fakeImageSize = 1024 + + // Docker prepends '/' to the container name. + dockerNamePrefix = "/" ) func NewFakeDockerClient() *FakeDockerClient { @@ -289,11 +292,7 @@ func (f *FakeDockerClient) AssertCallDetails(calls ...calledDetail) (err error) func (f *FakeDockerClient) idsToNames(ids []string) ([]string, error) { names := []string{} for _, id := range ids { - dockerName, _, err := ParseDockerName(f.ContainerMap[id].Name) - if err != nil { - return nil, fmt.Errorf("unexpected error: %v", err) - } - names = append(names, dockerName.ContainerName) + names = append(names, strings.TrimPrefix(f.ContainerMap[id].Name, dockerNamePrefix)) } return names, nil } @@ -373,9 +372,8 @@ func (f *FakeDockerClient) popError(op string) error { if ok { delete(f.Errors, op) return err - } else { - return nil } + return nil } // ListContainers is a test-spy implementation of Interface.ListContainers. @@ -412,7 +410,7 @@ func (f *FakeDockerClient) ListContainers(options dockertypes.ContainerListOptio var filtered []dockertypes.Container for _, container := range containerList { for _, statusFilter := range statusFilters { - if container.Status == statusFilter { + if toDockerContainerStatus(container.Status) == statusFilter { filtered = append(filtered, container) break } @@ -445,6 +443,19 @@ func (f *FakeDockerClient) ListContainers(options dockertypes.ContainerListOptio return containerList, err } +func toDockerContainerStatus(state string) string { + switch { + case strings.HasPrefix(state, StatusCreatedPrefix): + return "created" + case strings.HasPrefix(state, StatusRunningPrefix): + return "running" + case strings.HasPrefix(state, StatusExitedPrefix): + return "exited" + default: + return "unknown" + } +} + // InspectContainer is a test-spy implementation of Interface.InspectContainer. // It adds an entry "inspect" to the internal method call record. func (f *FakeDockerClient) InspectContainer(id string) (*dockertypes.ContainerJSON, error) { @@ -462,6 +473,23 @@ func (f *FakeDockerClient) InspectContainer(id string) (*dockertypes.ContainerJS return nil, fmt.Errorf("container %q not found", id) } +// InspectContainerWithSize is a test-spy implementation of Interface.InspectContainerWithSize. +// It adds an entry "inspect" to the internal method call record. +func (f *FakeDockerClient) InspectContainerWithSize(id string) (*dockertypes.ContainerJSON, error) { + f.Lock() + defer f.Unlock() + f.appendCalled(calledDetail{name: "inspect_container_withsize"}) + err := f.popError("inspect_container_withsize") + if container, ok := f.ContainerMap[id]; ok { + return container, err + } + if err != nil { + // Use the custom error if it exists. + return nil, err + } + return nil, fmt.Errorf("container %q not found", id) +} + // InspectImageByRef is a test-spy implementation of Interface.InspectImageByRef. // It adds an entry "inspect" to the internal method call record. func (f *FakeDockerClient) InspectImageByRef(name string) (*dockertypes.ImageInspect, error) { @@ -523,8 +551,7 @@ func (f *FakeDockerClient) CreateContainer(c dockertypes.ContainerCreateConfig) return nil, err } // This is not a very good fake. We'll just add this container's name to the list. - // Docker likes to add a '/', so copy that behavior. - name := "/" + c.Name + name := dockerNamePrefix + c.Name id := GetFakeContainerID(name) f.appendContainerTrace("Created", id) timestamp := f.Clock.Now() @@ -551,6 +578,18 @@ func (f *FakeDockerClient) StartContainer(id string) error { } f.appendContainerTrace("Started", id) container, ok := f.ContainerMap[id] + if container.HostConfig.NetworkMode.IsContainer() { + hostContainerID := container.HostConfig.NetworkMode.ConnectedContainer() + found := false + for _, container := range f.RunningContainerList { + if container.ID == hostContainerID { + found = true + } + } + if !found { + return fmt.Errorf("failed to start container \"%s\": Error response from daemon: cannot join network of a non running container: %s", id, hostContainerID) + } + } timestamp := f.Clock.Now() if !ok { container = convertFakeContainer(&FakeContainer{ID: id, Name: id, CreatedAt: timestamp}) @@ -622,6 +661,15 @@ func (f *FakeDockerClient) RemoveContainer(id string, opts dockertypes.Container } } + for i := range f.RunningContainerList { + // allow removal of running containers which are not running + if f.RunningContainerList[i].ID == id && !f.ContainerMap[id].State.Running { + delete(f.ContainerMap, id) + f.RunningContainerList = append(f.RunningContainerList[:i], f.RunningContainerList[i+1:]...) + f.appendContainerTrace("Removed", id) + return nil + } + } // To be a good fake, report error if container is not stopped. return fmt.Errorf("container not stopped") } @@ -854,3 +902,10 @@ func (f *FakeDockerPuller) GetImageRef(image string) (string, error) { } return image, err } + +func (f *FakeDockerClient) GetContainerStats(id string) (*dockertypes.StatsJSON, error) { + f.Lock() + defer f.Unlock() + f.appendCalled(calledDetail{name: "getContainerStats"}) + return nil, fmt.Errorf("not implemented") +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/instrumented_client.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/instrumented_client.go index 69c79c5bf39e..78a0d6964812 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/instrumented_client.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/instrumented_client.go @@ -22,7 +22,8 @@ import ( dockertypes "github.com/docker/docker/api/types" dockercontainer "github.com/docker/docker/api/types/container" dockerimagetypes "github.com/docker/docker/api/types/image" - "k8s.io/kubernetes/pkg/kubelet/metrics" + + "k8s.io/kubernetes/pkg/kubelet/dockershim/metrics" ) // instrumentedInterface wraps the Interface and records the operations @@ -31,7 +32,7 @@ type instrumentedInterface struct { client Interface } -// Creates an instrumented Interface from an existing Interface. +// NewInstrumentedInterface creates an instrumented Interface from an existing Interface. func NewInstrumentedInterface(dockerClient Interface) Interface { return instrumentedInterface{ client: dockerClient, @@ -73,6 +74,15 @@ func (in instrumentedInterface) InspectContainer(id string) (*dockertypes.Contai return out, err } +func (in instrumentedInterface) InspectContainerWithSize(id string) (*dockertypes.ContainerJSON, error) { + const operation = "inspect_container_withsize" + defer recordOperation(operation, time.Now()) + + out, err := in.client.InspectContainerWithSize(id) + recordError(operation, err) + return out, err +} + func (in instrumentedInterface) CreateContainer(opts dockertypes.ContainerCreateConfig) (*dockercontainer.ContainerCreateCreatedBody, error) { const operation = "create_container" defer recordOperation(operation, time.Now()) @@ -251,3 +261,12 @@ func (in instrumentedInterface) ResizeContainerTTY(id string, height, width uint recordError(operation, err) return err } + +func (in instrumentedInterface) GetContainerStats(id string) (*dockertypes.StatsJSON, error) { + const operation = "stats" + defer recordOperation(operation, time.Now()) + + out, err := in.client.GetContainerStats(id) + recordError(operation, err) + return out, err +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/kube_docker_client.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/kube_docker_client.go index e002b9dbb376..0cc0c3ae0397 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/kube_docker_client.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/kube_docker_client.go @@ -123,6 +123,21 @@ func (d *kubeDockerClient) InspectContainer(id string) (*dockertypes.ContainerJS return &containerJSON, nil } +// InspectContainerWithSize is currently only used for Windows container stats +func (d *kubeDockerClient) InspectContainerWithSize(id string) (*dockertypes.ContainerJSON, error) { + ctx, cancel := d.getTimeoutContext() + defer cancel() + // Inspects the container including the fields SizeRw and SizeRootFs. + containerJSON, _, err := d.client.ContainerInspectWithRaw(ctx, id, true) + if ctxErr := contextError(ctx); ctxErr != nil { + return nil, ctxErr + } + if err != nil { + return nil, err + } + return &containerJSON, nil +} + func (d *kubeDockerClient) CreateContainer(opts dockertypes.ContainerCreateConfig) (*dockercontainer.ContainerCreateCreatedBody, error) { ctx, cancel := d.getTimeoutContext() defer cancel() @@ -522,6 +537,27 @@ func (d *kubeDockerClient) ResizeContainerTTY(id string, height, width uint) err }) } +// GetContainerStats is currently only used for Windows container stats +func (d *kubeDockerClient) GetContainerStats(id string) (*dockertypes.StatsJSON, error) { + ctx, cancel := d.getCancelableContext() + defer cancel() + + response, err := d.client.ContainerStats(ctx, id, false) + if err != nil { + return nil, err + } + + dec := json.NewDecoder(response.Body) + var stats dockertypes.StatsJSON + err = dec.Decode(&stats) + if err != nil { + return nil, err + } + + defer response.Body.Close() + return &stats, nil +} + // redirectResponseToOutputStream redirect the response stream to stdout and stderr. When tty is true, all stream will // only be redirected to stdout. func (d *kubeDockerClient) redirectResponseToOutputStream(tty bool, outputStream, errorStream io.Writer, resp io.Reader) error { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/legacy.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/legacy.go deleted file mode 100644 index 4212364ecbf5..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/legacy.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package libdocker - -import ( - "fmt" - "math/rand" - "strconv" - "strings" - - "github.com/golang/glog" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" -) - -// This file contains functions used in the non-CRI integration (< 1.6). They -// are currently used for recoginzing containers created by pre-1.6 kubelets. -// TODO: Remove this file for kubernetes 1.8+. - -// Creates a name which can be reversed to identify both full pod name and container name. -// This function returns stable name, unique name and a unique id. -// Although rand.Uint32() is not really unique, but it's enough for us because error will -// only occur when instances of the same container in the same pod have the same UID. The -// chance is really slim. -func BuildDockerName(dockerName KubeletContainerName, container *v1.Container) (string, string, string) { - containerName := dockerName.ContainerName + "." + strconv.FormatUint(kubecontainer.HashContainerLegacy(container), 16) - stableName := fmt.Sprintf("%s_%s_%s_%s", - containerNamePrefix, - containerName, - dockerName.PodFullName, - dockerName.PodUID) - UID := fmt.Sprintf("%08x", rand.Uint32()) - return stableName, fmt.Sprintf("%s_%s", stableName, UID), UID -} - -// Unpacks a container name, returning the pod full name and container name we would have used to -// construct the docker name. If we are unable to parse the name, an error is returned. -func ParseDockerName(name string) (dockerName *KubeletContainerName, hash uint64, err error) { - // For some reason docker appears to be appending '/' to names. - // If it's there, strip it. - name = strings.TrimPrefix(name, "/") - parts := strings.Split(name, "_") - if len(parts) == 0 || parts[0] != containerNamePrefix { - err = fmt.Errorf("failed to parse Docker container name %q into parts", name) - return nil, 0, err - } - if len(parts) < 6 { - // We have at least 5 fields. We may have more in the future. - // Anything with less fields than this is not something we can - // manage. - glog.Warningf("found a container with the %q prefix, but too few fields (%d): %q", containerNamePrefix, len(parts), name) - err = fmt.Errorf("Docker container name %q has less parts than expected %v", name, parts) - return nil, 0, err - } - - nameParts := strings.Split(parts[1], ".") - containerName := nameParts[0] - if len(nameParts) > 1 { - hash, err = strconv.ParseUint(nameParts[1], 16, 32) - if err != nil { - glog.Warningf("invalid container hash %q in container %q", nameParts[1], name) - } - } - - podFullName := parts[2] + "_" + parts[3] - podUID := types.UID(parts[4]) - - return &KubeletContainerName{podFullName, podUID, containerName}, hash, nil -} - -// KubeletContainerName encapsulates a pod name and a Kubernetes container name. -type KubeletContainerName struct { - PodFullName string - PodUID types.UID - ContainerName string -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/BUILD new file mode 100644 index 000000000000..3ea7d9321d55 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["metrics.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/metrics", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/prometheus/client_golang/prometheus:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/metrics.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/metrics.go new file mode 100644 index 000000000000..2ce548f76a63 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/metrics/metrics.go @@ -0,0 +1,96 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + // DockerOperationsKey is the key for docker operation metrics. + DockerOperationsKey = "docker_operations" + // DockerOperationsLatencyKey is the key for the operation latency metrics. + DockerOperationsLatencyKey = "docker_operations_latency_microseconds" + // DockerOperationsErrorsKey is the key for the operation error metrics. + DockerOperationsErrorsKey = "docker_operations_errors" + // DockerOperationsTimeoutKey is the key for the operation timoeut metrics. + DockerOperationsTimeoutKey = "docker_operations_timeout" + + // Keep the "kubelet" subsystem for backward compatibility. + kubeletSubsystem = "kubelet" +) + +var ( + // DockerOperationsLatency collects operation latency numbers by operation + // type. + DockerOperationsLatency = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Subsystem: kubeletSubsystem, + Name: DockerOperationsLatencyKey, + Help: "Latency in microseconds of Docker operations. Broken down by operation type.", + }, + []string{"operation_type"}, + ) + // DockerOperations collects operation counts by operation type. + DockerOperations = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: kubeletSubsystem, + Name: DockerOperationsKey, + Help: "Cumulative number of Docker operations by operation type.", + }, + []string{"operation_type"}, + ) + // DockerOperationsErrors collects operation errors by operation + // type. + DockerOperationsErrors = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: kubeletSubsystem, + Name: DockerOperationsErrorsKey, + Help: "Cumulative number of Docker operation errors by operation type.", + }, + []string{"operation_type"}, + ) + // DockerOperationsTimeout collects operation timeouts by operation type. + DockerOperationsTimeout = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: kubeletSubsystem, + Name: DockerOperationsTimeoutKey, + Help: "Cumulative number of Docker operation timeout by operation type.", + }, + []string{"operation_type"}, + ) +) + +var registerMetrics sync.Once + +// Register all metrics. +func Register() { + registerMetrics.Do(func() { + prometheus.MustRegister(DockerOperationsLatency) + prometheus.MustRegister(DockerOperations) + prometheus.MustRegister(DockerOperationsErrors) + prometheus.MustRegister(DockerOperationsTimeout) + }) +} + +// SinceInMicroseconds gets the time since the specified start in microseconds. +func SinceInMicroseconds(start time.Time) float64 { + return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/BUILD index 9460128d657f..2d3202536989 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/BUILD @@ -11,6 +11,7 @@ go_library( "docker_server.go", "docker_service.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/remote", deps = [ "//pkg/kubelet/apis/cri:go_default_library", "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/docker_service.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/docker_service.go index f51004aa0586..2d85f1790002 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/docker_service.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/docker_service.go @@ -17,7 +17,6 @@ limitations under the License. package remote import ( - "fmt" "time" "golang.org/x/net/context" @@ -226,13 +225,25 @@ func (d *dockerService) RemoveImage(ctx context.Context, r *runtimeapi.RemoveIma // ImageFsInfo returns information of the filesystem that is used to store images. func (d *dockerService) ImageFsInfo(ctx context.Context, r *runtimeapi.ImageFsInfoRequest) (*runtimeapi.ImageFsInfoResponse, error) { - return nil, fmt.Errorf("not implemented") + filesystems, err := d.imageService.ImageFsInfo() + if err != nil { + return nil, err + } + return &runtimeapi.ImageFsInfoResponse{ImageFilesystems: filesystems}, nil } func (d *dockerService) ContainerStats(ctx context.Context, r *runtimeapi.ContainerStatsRequest) (*runtimeapi.ContainerStatsResponse, error) { - return nil, fmt.Errorf("not implemented") + stats, err := d.runtimeService.ContainerStats(r.ContainerId) + if err != nil { + return nil, err + } + return &runtimeapi.ContainerStatsResponse{Stats: stats}, nil } func (d *dockerService) ListContainerStats(ctx context.Context, r *runtimeapi.ListContainerStatsRequest) (*runtimeapi.ListContainerStatsResponse, error) { - return nil, fmt.Errorf("not implemented") + stats, err := d.runtimeService.ListContainerStats(r.GetFilter()) + if err != nil { + return nil, err + } + return &runtimeapi.ListContainerStatsResponse{Stats: stats}, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/envvars/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/envvars/BUILD index 335fb9ef673b..0583ebdf13da 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/envvars/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/envvars/BUILD @@ -12,8 +12,9 @@ go_library( "doc.go", "envvars.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/envvars", deps = [ - "//pkg/api/v1/helper:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", ], ) @@ -21,6 +22,7 @@ go_library( go_test( name = "go_default_xtest", srcs = ["envvars_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/envvars_test", deps = [ ":go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/envvars/envvars.go b/vendor/k8s.io/kubernetes/pkg/kubelet/envvars/envvars.go index 9dcb7ebbb4d1..789c20820cd3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/envvars/envvars.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/envvars/envvars.go @@ -23,7 +23,7 @@ import ( "strings" "k8s.io/api/core/v1" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" ) // FromServices builds environment variables that a container is started with, diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/events/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/events/BUILD index eb4d17dddf39..2c43d69c129a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/events/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/events/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["event.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/events", ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/events/event.go b/vendor/k8s.io/kubernetes/pkg/kubelet/events/event.go index a1bb2682ed04..127405738117 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/events/event.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/events/event.go @@ -27,6 +27,12 @@ const ( BackOffStartContainer = "BackOff" ExceededGracePeriod = "ExceededGracePeriod" + // Pod event reason list + FailedToKillPod = "FailedKillPod" + FailedToCreatePodContainer = "FailedCreatePodContainer" + FailedToMakePodDataDirectories = "Failed" + NetworkNotReady = "NetworkNotReady" + // Image event reason list PullingImage = "Pulling" PulledImage = "Pulled" @@ -46,7 +52,11 @@ const ( FailedDetachVolume = "FailedDetachVolume" FailedMountVolume = "FailedMount" VolumeResizeFailed = "VolumeResizeFailed" + FileSystemResizeFailed = "FileSystemResizeFailed" + FileSystemResizeSuccess = "FileSystemResizeSuccessful" FailedUnMountVolume = "FailedUnMount" + FailedMapVolume = "FailedMapVolume" + FailedUnmapDevice = "FailedUnmapDevice" WarnAlreadyMountedVolume = "AlreadyMountedVolume" SuccessfulDetachVolume = "SuccessfulDetachVolume" SuccessfulMountVolume = "SuccessfulMountVolume" @@ -65,6 +75,7 @@ const ( UnsupportedMountOption = "UnsupportedMountOption" SandboxChanged = "SandboxChanged" FailedCreatePodSandBox = "FailedCreatePodSandBox" + FailedStatusPodSandBox = "FailedPodSandBoxStatus" // Image manager event reason list InvalidDiskCapacity = "InvalidDiskCapacity" diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/BUILD index f3d0ae93d791..0f4d6f820990 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/BUILD @@ -12,9 +12,11 @@ go_test( "eviction_manager_test.go", "helpers_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/eviction", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/features:go_default_library", "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", "//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/eviction/api:go_default_library", @@ -45,9 +47,10 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/kubelet/eviction", deps = [ - "//pkg/api/v1/helper/qos:go_default_library", "//pkg/api/v1/resource:go_default_library", + "//pkg/apis/core/v1/helper/qos:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", "//pkg/kubelet/cm:go_default_library", @@ -58,12 +61,12 @@ go_library( "//pkg/kubelet/server/stats:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/kubelet/util/format:go_default_library", + "//plugin/pkg/scheduler/util:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/api/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/api/BUILD index e36c2fc1cfd6..dd51294b6dbe 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/api/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/api/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["types.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/eviction/api", deps = ["//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/api/types.go b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/api/types.go index 9e40ac624604..f0e5e416c306 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/api/types.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/api/types.go @@ -50,6 +50,23 @@ const ( OpLessThan ThresholdOperator = "LessThan" ) +// OpForSignal maps Signals to ThresholdOperators. +// Today, the only supported operator is "LessThan". This may change in the future, +// for example if "consumed" (as opposed to "available") type signals are added. +// In both cases the directionality of the threshold is implicit to the signal type +// (for a given signal, the decision to evict will be made when crossing the threshold +// from either above or below, never both). There is thus no reason to expose the +// operator in the Kubelet's public API. Instead, we internally map signal types to operators. +var OpForSignal = map[Signal]ThresholdOperator{ + SignalMemoryAvailable: OpLessThan, + SignalNodeFsAvailable: OpLessThan, + SignalNodeFsInodesFree: OpLessThan, + SignalImageFsAvailable: OpLessThan, + SignalImageFsInodesFree: OpLessThan, + SignalAllocatableMemoryAvailable: OpLessThan, + SignalAllocatableNodeFsAvailable: OpLessThan, +} + // ThresholdValue is a value holder that abstracts literal versus percentage based quantity type ThresholdValue struct { // The following fields are exclusive. Only the topmost non-zero field is used. diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go index 14596188f6d7..90b1038cf150 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go @@ -30,8 +30,8 @@ import ( "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" - v1qos "k8s.io/kubernetes/pkg/api/v1/helper/qos" apiv1resource "k8s.io/kubernetes/pkg/api/v1/resource" + v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/features" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/cm" @@ -233,7 +233,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act activePods := podFunc() // make observations and get a function to derive pod usage stats relative to those observations. - observations, statsFunc, err := makeSignalObservations(m.summaryProvider, capacityProvider, activePods, *m.dedicatedImageFs) + observations, statsFunc, err := makeSignalObservations(m.summaryProvider, capacityProvider, activePods) if err != nil { glog.Errorf("eviction manager: unexpected err: %v", err) return nil diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go index 9c639d5064f6..f8be310b34cc 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go @@ -26,12 +26,13 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/util/sets" - v1qos "k8s.io/kubernetes/pkg/api/v1/helper/qos" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/cm" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" "k8s.io/kubernetes/pkg/kubelet/server/stats" + schedulerutils "k8s.io/kubernetes/plugin/pkg/scheduler/util" ) const ( @@ -72,13 +73,11 @@ func init() { signalToNodeCondition[evictionapi.SignalNodeFsAvailable] = v1.NodeDiskPressure signalToNodeCondition[evictionapi.SignalImageFsInodesFree] = v1.NodeDiskPressure signalToNodeCondition[evictionapi.SignalNodeFsInodesFree] = v1.NodeDiskPressure - signalToNodeCondition[evictionapi.SignalAllocatableNodeFsAvailable] = v1.NodeDiskPressure // map signals to resources (and vice-versa) signalToResource = map[evictionapi.Signal]v1.ResourceName{} signalToResource[evictionapi.SignalMemoryAvailable] = v1.ResourceMemory signalToResource[evictionapi.SignalAllocatableMemoryAvailable] = v1.ResourceMemory - signalToResource[evictionapi.SignalAllocatableNodeFsAvailable] = resourceNodeFs signalToResource[evictionapi.SignalImageFsAvailable] = resourceImageFs signalToResource[evictionapi.SignalImageFsInodesFree] = resourceImageFsInodes signalToResource[evictionapi.SignalNodeFsAvailable] = resourceNodeFs @@ -99,7 +98,7 @@ func validSignal(signal evictionapi.Signal) bool { } // ParseThresholdConfig parses the flags for thresholds. -func ParseThresholdConfig(allocatableConfig []string, evictionHard, evictionSoft, evictionSoftGracePeriod, evictionMinimumReclaim string) ([]evictionapi.Threshold, error) { +func ParseThresholdConfig(allocatableConfig []string, evictionHard, evictionSoft, evictionSoftGracePeriod, evictionMinimumReclaim map[string]string) ([]evictionapi.Threshold, error) { results := []evictionapi.Threshold{} allocatableThresholds := getAllocatableThreshold(allocatableConfig) results = append(results, allocatableThresholds...) @@ -143,60 +142,34 @@ func ParseThresholdConfig(allocatableConfig []string, evictionHard, evictionSoft } // parseThresholdStatements parses the input statements into a list of Threshold objects. -func parseThresholdStatements(expr string) ([]evictionapi.Threshold, error) { - if len(expr) == 0 { +func parseThresholdStatements(statements map[string]string) ([]evictionapi.Threshold, error) { + if len(statements) == 0 { return nil, nil } results := []evictionapi.Threshold{} - statements := strings.Split(expr, ",") - signalsFound := sets.NewString() - for _, statement := range statements { - result, err := parseThresholdStatement(statement) + for signal, val := range statements { + result, err := parseThresholdStatement(evictionapi.Signal(signal), val) if err != nil { return nil, err } - if signalsFound.Has(string(result.Signal)) { - return nil, fmt.Errorf("found duplicate eviction threshold for signal %v", result.Signal) - } - signalsFound.Insert(string(result.Signal)) results = append(results, result) } return results, nil } // parseThresholdStatement parses a threshold statement. -func parseThresholdStatement(statement string) (evictionapi.Threshold, error) { - tokens2Operator := map[string]evictionapi.ThresholdOperator{ - "<": evictionapi.OpLessThan, - } - var ( - operator evictionapi.ThresholdOperator - parts []string - ) - for token := range tokens2Operator { - parts = strings.Split(statement, token) - // if we got a token, we know this was the operator... - if len(parts) > 1 { - operator = tokens2Operator[token] - break - } - } - if len(operator) == 0 || len(parts) != 2 { - return evictionapi.Threshold{}, fmt.Errorf("invalid eviction threshold syntax %v, expected ", statement) - } - signal := evictionapi.Signal(parts[0]) +func parseThresholdStatement(signal evictionapi.Signal, val string) (evictionapi.Threshold, error) { if !validSignal(signal) { return evictionapi.Threshold{}, fmt.Errorf(unsupportedEvictionSignal, signal) } - - quantityValue := parts[1] - if strings.HasSuffix(quantityValue, "%") { - percentage, err := parsePercentage(quantityValue) + operator := evictionapi.OpForSignal[signal] + if strings.HasSuffix(val, "%") { + percentage, err := parsePercentage(val) if err != nil { return evictionapi.Threshold{}, err } if percentage <= 0 { - return evictionapi.Threshold{}, fmt.Errorf("eviction percentage threshold %v must be positive: %s", signal, quantityValue) + return evictionapi.Threshold{}, fmt.Errorf("eviction percentage threshold %v must be positive: %s", signal, val) } return evictionapi.Threshold{ Signal: signal, @@ -206,7 +179,7 @@ func parseThresholdStatement(statement string) (evictionapi.Threshold, error) { }, }, nil } - quantity, err := resource.ParseQuantity(quantityValue) + quantity, err := resource.ParseQuantity(val) if err != nil { return evictionapi.Threshold{}, err } @@ -237,16 +210,6 @@ func getAllocatableThreshold(allocatableConfig []string) []evictionapi.Threshold Quantity: resource.NewQuantity(int64(0), resource.BinarySI), }, }, - { - Signal: evictionapi.SignalAllocatableNodeFsAvailable, - Operator: evictionapi.OpLessThan, - Value: evictionapi.ThresholdValue{ - Quantity: resource.NewQuantity(int64(0), resource.BinarySI), - }, - MinReclaim: &evictionapi.ThresholdValue{ - Quantity: resource.NewQuantity(int64(0), resource.BinarySI), - }, - }, } } } @@ -263,33 +226,22 @@ func parsePercentage(input string) (float32, error) { } // parseGracePeriods parses the grace period statements -func parseGracePeriods(expr string) (map[evictionapi.Signal]time.Duration, error) { - if len(expr) == 0 { +func parseGracePeriods(statements map[string]string) (map[evictionapi.Signal]time.Duration, error) { + if len(statements) == 0 { return nil, nil } results := map[evictionapi.Signal]time.Duration{} - statements := strings.Split(expr, ",") - for _, statement := range statements { - parts := strings.Split(statement, "=") - if len(parts) != 2 { - return nil, fmt.Errorf("invalid eviction grace period syntax %v, expected =", statement) - } - signal := evictionapi.Signal(parts[0]) + for signal, val := range statements { + signal := evictionapi.Signal(signal) if !validSignal(signal) { return nil, fmt.Errorf(unsupportedEvictionSignal, signal) } - - gracePeriod, err := time.ParseDuration(parts[1]) + gracePeriod, err := time.ParseDuration(val) if err != nil { return nil, err } if gracePeriod < 0 { - return nil, fmt.Errorf("invalid eviction grace period specified: %v, must be a positive value", parts[1]) - } - - // check against duplicate statements - if _, found := results[signal]; found { - return nil, fmt.Errorf("duplicate eviction grace period specified for %v", signal) + return nil, fmt.Errorf("invalid eviction grace period specified: %v, must be a positive value", val) } results[signal] = gracePeriod } @@ -297,51 +249,36 @@ func parseGracePeriods(expr string) (map[evictionapi.Signal]time.Duration, error } // parseMinimumReclaims parses the minimum reclaim statements -func parseMinimumReclaims(expr string) (map[evictionapi.Signal]evictionapi.ThresholdValue, error) { - if len(expr) == 0 { +func parseMinimumReclaims(statements map[string]string) (map[evictionapi.Signal]evictionapi.ThresholdValue, error) { + if len(statements) == 0 { return nil, nil } results := map[evictionapi.Signal]evictionapi.ThresholdValue{} - statements := strings.Split(expr, ",") - for _, statement := range statements { - parts := strings.Split(statement, "=") - if len(parts) != 2 { - return nil, fmt.Errorf("invalid eviction minimum reclaim syntax: %v, expected =", statement) - } - signal := evictionapi.Signal(parts[0]) + for signal, val := range statements { + signal := evictionapi.Signal(signal) if !validSignal(signal) { return nil, fmt.Errorf(unsupportedEvictionSignal, signal) } - - quantityValue := parts[1] - if strings.HasSuffix(quantityValue, "%") { - percentage, err := parsePercentage(quantityValue) + if strings.HasSuffix(val, "%") { + percentage, err := parsePercentage(val) if err != nil { return nil, err } if percentage <= 0 { - return nil, fmt.Errorf("eviction percentage minimum reclaim %v must be positive: %s", signal, quantityValue) - } - // check against duplicate statements - if _, found := results[signal]; found { - return nil, fmt.Errorf("duplicate eviction minimum reclaim specified for %v", signal) + return nil, fmt.Errorf("eviction percentage minimum reclaim %v must be positive: %s", signal, val) } results[signal] = evictionapi.ThresholdValue{ Percentage: percentage, } continue } - // check against duplicate statements - if _, found := results[signal]; found { - return nil, fmt.Errorf("duplicate eviction minimum reclaim specified for %v", signal) + quantity, err := resource.ParseQuantity(val) + if err != nil { + return nil, err } - quantity, err := resource.ParseQuantity(parts[1]) if quantity.Sign() < 0 { return nil, fmt.Errorf("negative eviction minimum reclaim specified for %v", signal) } - if err != nil { - return nil, err - } results[signal] = evictionapi.ThresholdValue{ Quantity: &quantity, } @@ -453,20 +390,12 @@ func podDiskUsage(podStats statsapi.PodStats, pod *v1.Pod, statsToMeasure []fsSt // podMemoryUsage aggregates pod memory usage. func podMemoryUsage(podStats statsapi.PodStats) (v1.ResourceList, error) { - disk := resource.Quantity{Format: resource.BinarySI} memory := resource.Quantity{Format: resource.BinarySI} for _, container := range podStats.Containers { - // disk usage (if known) - for _, fsStats := range []*statsapi.FsStats{container.Rootfs, container.Logs} { - disk.Add(*diskUsage(fsStats)) - } // memory usage (if known) memory.Add(*memoryUsage(container.Memory)) } - return v1.ResourceList{ - v1.ResourceMemory: memory, - resourceDisk: disk, - }, nil + return v1.ResourceList{v1.ResourceMemory: memory}, nil } // localEphemeralVolumeNames returns the set of ephemeral volumes for the pod that are local @@ -482,7 +411,7 @@ func localEphemeralVolumeNames(pod *v1.Pod) []string { return result } -// podLocalEphemeralStorageUsage aggregates pod local ephemeral storage usage and inode consumption for the specified stats to measure. +// podLocalEphemeralStorageUsage aggregates pod local ephemeral storage usage and inode consumption for the specified stats to measure. func podLocalEphemeralStorageUsage(podStats statsapi.PodStats, pod *v1.Pod, statsToMeasure []fsStatsType) (v1.ResourceList, error) { disk := resource.Quantity{Format: resource.BinarySI} inodes := resource.Quantity{Format: resource.BinarySI} @@ -586,77 +515,104 @@ func (ms *multiSorter) Less(i, j int) bool { return ms.cmp[k](p1, p2) < 0 } -// qosComparator compares pods by QoS (BestEffort < Burstable < Guaranteed) -func qosComparator(p1, p2 *v1.Pod) int { - qosP1 := v1qos.GetPodQOS(p1) - qosP2 := v1qos.GetPodQOS(p2) - // its a tie - if qosP1 == qosP2 { +// priority compares pods by Priority, if priority is enabled. +func priority(p1, p2 *v1.Pod) int { + if !utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) { + // If priority is not enabled, all pods are equal. return 0 } - // if p1 is best effort, we know p2 is burstable or guaranteed - if qosP1 == v1.PodQOSBestEffort { - return -1 + priority1 := schedulerutils.GetPodPriority(p1) + priority2 := schedulerutils.GetPodPriority(p2) + if priority1 == priority2 { + return 0 } - // we know p1 and p2 are not besteffort, so if p1 is burstable, p2 must be guaranteed - if qosP1 == v1.PodQOSBurstable { - if qosP2 == v1.PodQOSGuaranteed { - return -1 - } + if priority1 > priority2 { return 1 } - // ok, p1 must be guaranteed. - return 1 + return -1 } -// memory compares pods by largest consumer of memory relative to request. -func memory(stats statsFunc) cmpFunc { +// exceedMemoryRequests compares whether or not pods' memory usage exceeds their requests +func exceedMemoryRequests(stats statsFunc) cmpFunc { return func(p1, p2 *v1.Pod) int { - p1Stats, found := stats(p1) - // if we have no usage stats for p1, we want p2 first - if !found { - return -1 + p1Stats, p1Found := stats(p1) + p2Stats, p2Found := stats(p2) + if !p1Found || !p2Found { + // prioritize evicting the pod for which no stats were found + return cmpBool(!p1Found, !p2Found) } - // if we have no usage stats for p2, but p1 has usage, we want p1 first. - p2Stats, found := stats(p2) - if !found { - return 1 + + p1Usage, p1Err := podMemoryUsage(p1Stats) + p2Usage, p2Err := podMemoryUsage(p2Stats) + if p1Err != nil || p2Err != nil { + // prioritize evicting the pod which had an error getting stats + return cmpBool(p1Err != nil, p2Err != nil) } - // if we cant get usage for p1 measured, we want p2 first - p1Usage, err := podMemoryUsage(p1Stats) - if err != nil { - return -1 + + p1Memory := p1Usage[v1.ResourceMemory] + p2Memory := p2Usage[v1.ResourceMemory] + p1ExceedsRequests := p1Memory.Cmp(podRequest(p1, v1.ResourceMemory)) == 1 + p2ExceedsRequests := p2Memory.Cmp(podRequest(p2, v1.ResourceMemory)) == 1 + // prioritize evicting the pod which exceeds its requests + return cmpBool(p1ExceedsRequests, p2ExceedsRequests) + } +} + +// memory compares pods by largest consumer of memory relative to request. +func memory(stats statsFunc) cmpFunc { + return func(p1, p2 *v1.Pod) int { + p1Stats, p1Found := stats(p1) + p2Stats, p2Found := stats(p2) + if !p1Found || !p2Found { + // prioritize evicting the pod for which no stats were found + return cmpBool(!p1Found, !p2Found) } - // if we cant get usage for p2 measured, we want p1 first - p2Usage, err := podMemoryUsage(p2Stats) - if err != nil { - return 1 + + p1Usage, p1Err := podMemoryUsage(p1Stats) + p2Usage, p2Err := podMemoryUsage(p2Stats) + if p1Err != nil || p2Err != nil { + // prioritize evicting the pod which had an error getting stats + return cmpBool(p1Err != nil, p2Err != nil) } // adjust p1, p2 usage relative to the request (if any) p1Memory := p1Usage[v1.ResourceMemory] - p1Request := podMemoryRequest(p1) + p1Request := podRequest(p1, v1.ResourceMemory) p1Memory.Sub(p1Request) p2Memory := p2Usage[v1.ResourceMemory] - p2Request := podMemoryRequest(p2) + p2Request := podRequest(p2, v1.ResourceMemory) p2Memory.Sub(p2Request) - // if p2 is using more than p1, we want p2 first + // prioritize evicting the pod which has the larger consumption of memory return p2Memory.Cmp(p1Memory) } } -// podMemoryRequest returns the total memory request of a pod which is the +// podRequest returns the total resource request of a pod which is the // max(sum of init container requests, sum of container requests) -func podMemoryRequest(pod *v1.Pod) resource.Quantity { +func podRequest(pod *v1.Pod, resourceName v1.ResourceName) resource.Quantity { containerValue := resource.Quantity{Format: resource.BinarySI} + if resourceName == resourceDisk && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + // if the local storage capacity isolation feature gate is disabled, pods request 0 disk + return containerValue + } for i := range pod.Spec.Containers { - containerValue.Add(*pod.Spec.Containers[i].Resources.Requests.Memory()) + switch resourceName { + case v1.ResourceMemory: + containerValue.Add(*pod.Spec.Containers[i].Resources.Requests.Memory()) + case resourceDisk: + containerValue.Add(*pod.Spec.Containers[i].Resources.Requests.StorageEphemeral()) + } } initValue := resource.Quantity{Format: resource.BinarySI} for i := range pod.Spec.InitContainers { - initValue.Add(*pod.Spec.InitContainers[i].Resources.Requests.Memory()) + switch resourceName { + case v1.ResourceMemory: + containerValue.Add(*pod.Spec.Containers[i].Resources.Requests.Memory()) + case resourceDisk: + containerValue.Add(*pod.Spec.Containers[i].Resources.Requests.StorageEphemeral()) + } } if containerValue.Cmp(initValue) > 0 { return containerValue @@ -664,48 +620,82 @@ func podMemoryRequest(pod *v1.Pod) resource.Quantity { return initValue } -// disk compares pods by largest consumer of disk relative to request for the specified disk resource. -func disk(stats statsFunc, fsStatsToMeasure []fsStatsType, diskResource v1.ResourceName) cmpFunc { +// exceedDiskRequests compares whether or not pods' disk usage exceeds their requests +func exceedDiskRequests(stats statsFunc, fsStatsToMeasure []fsStatsType, diskResource v1.ResourceName) cmpFunc { return func(p1, p2 *v1.Pod) int { - p1Stats, found := stats(p1) - // if we have no usage stats for p1, we want p2 first - if !found { - return -1 + p1Stats, p1Found := stats(p1) + p2Stats, p2Found := stats(p2) + if !p1Found || !p2Found { + // prioritize evicting the pod for which no stats were found + return cmpBool(!p1Found, !p2Found) } - // if we have no usage stats for p2, but p1 has usage, we want p1 first. - p2Stats, found := stats(p2) - if !found { - return 1 + + p1Usage, p1Err := podDiskUsage(p1Stats, p1, fsStatsToMeasure) + p2Usage, p2Err := podDiskUsage(p2Stats, p2, fsStatsToMeasure) + if p1Err != nil || p2Err != nil { + // prioritize evicting the pod which had an error getting stats + return cmpBool(p1Err != nil, p2Err != nil) } - // if we cant get usage for p1 measured, we want p2 first - p1Usage, err := podDiskUsage(p1Stats, p1, fsStatsToMeasure) - if err != nil { - return -1 + + p1Disk := p1Usage[diskResource] + p2Disk := p2Usage[diskResource] + p1ExceedsRequests := p1Disk.Cmp(podRequest(p1, diskResource)) == 1 + p2ExceedsRequests := p2Disk.Cmp(podRequest(p2, diskResource)) == 1 + // prioritize evicting the pod which exceeds its requests + return cmpBool(p1ExceedsRequests, p2ExceedsRequests) + } +} + +// disk compares pods by largest consumer of disk relative to request for the specified disk resource. +func disk(stats statsFunc, fsStatsToMeasure []fsStatsType, diskResource v1.ResourceName) cmpFunc { + return func(p1, p2 *v1.Pod) int { + p1Stats, p1Found := stats(p1) + p2Stats, p2Found := stats(p2) + if !p1Found || !p2Found { + // prioritize evicting the pod for which no stats were found + return cmpBool(!p1Found, !p2Found) } - // if we cant get usage for p2 measured, we want p1 first - p2Usage, err := podDiskUsage(p2Stats, p2, fsStatsToMeasure) - if err != nil { - return 1 + p1Usage, p1Err := podDiskUsage(p1Stats, p1, fsStatsToMeasure) + p2Usage, p2Err := podDiskUsage(p2Stats, p2, fsStatsToMeasure) + if p1Err != nil || p2Err != nil { + // prioritize evicting the pod which had an error getting stats + return cmpBool(p1Err != nil, p2Err != nil) } - // disk is best effort, so we don't measure relative to a request. - // TODO: add disk as a guaranteed resource + // adjust p1, p2 usage relative to the request (if any) p1Disk := p1Usage[diskResource] p2Disk := p2Usage[diskResource] - // if p2 is using more than p1, we want p2 first + p1Request := podRequest(p1, resourceDisk) + p1Disk.Sub(p1Request) + p2Request := podRequest(p2, resourceDisk) + p2Disk.Sub(p2Request) + // prioritize evicting the pod which has the larger consumption of disk return p2Disk.Cmp(p1Disk) } } +// cmpBool compares booleans, placing true before false +func cmpBool(a, b bool) int { + if a == b { + return 0 + } + if !b { + return -1 + } + return 1 +} + // rankMemoryPressure orders the input pods for eviction in response to memory pressure. +// It ranks by whether or not the pod's usage exceeds its requests, then by priority, and +// finally by memory usage above requests. func rankMemoryPressure(pods []*v1.Pod, stats statsFunc) { - orderedBy(qosComparator, memory(stats)).Sort(pods) + orderedBy(exceedMemoryRequests(stats), priority, memory(stats)).Sort(pods) } // rankDiskPressureFunc returns a rankFunc that measures the specified fs stats. func rankDiskPressureFunc(fsStatsToMeasure []fsStatsType, diskResource v1.ResourceName) rankFunc { return func(pods []*v1.Pod, stats statsFunc) { - orderedBy(qosComparator, disk(stats, fsStatsToMeasure, diskResource)).Sort(pods) + orderedBy(exceedDiskRequests(stats, fsStatsToMeasure, diskResource), priority, disk(stats, fsStatsToMeasure, diskResource)).Sort(pods) } } @@ -721,7 +711,7 @@ func (a byEvictionPriority) Less(i, j int) bool { } // makeSignalObservations derives observations using the specified summary provider. -func makeSignalObservations(summaryProvider stats.SummaryProvider, capacityProvider CapacityProvider, pods []*v1.Pod, withImageFs bool) (signalObservations, statsFunc, error) { +func makeSignalObservations(summaryProvider stats.SummaryProvider, capacityProvider CapacityProvider, pods []*v1.Pod) (signalObservations, statsFunc, error) { summary, err := summaryProvider.Get() if err != nil { return nil, nil, err @@ -773,11 +763,11 @@ func makeSignalObservations(summaryProvider stats.SummaryProvider, capacityProvi } } - nodeCapacity := capacityProvider.GetCapacity() - allocatableReservation := capacityProvider.GetNodeAllocatableReservation() - - memoryAllocatableCapacity, memoryAllocatableAvailable, exist := getResourceAllocatable(nodeCapacity, allocatableReservation, v1.ResourceMemory) - if exist { + if memoryAllocatableCapacity, ok := capacityProvider.GetCapacity()[v1.ResourceMemory]; ok { + memoryAllocatableAvailable := memoryAllocatableCapacity.Copy() + if reserved, exists := capacityProvider.GetNodeAllocatableReservation()[v1.ResourceMemory]; exists { + memoryAllocatableAvailable.Sub(reserved) + } for _, pod := range summary.Pods { mu, err := podMemoryUsage(pod) if err == nil { @@ -786,53 +776,15 @@ func makeSignalObservations(summaryProvider stats.SummaryProvider, capacityProvi } result[evictionapi.SignalAllocatableMemoryAvailable] = signalObservation{ available: memoryAllocatableAvailable, - capacity: memoryAllocatableCapacity, - } - } - - ephemeralStorageCapacity, ephemeralStorageAllocatable, exist := getResourceAllocatable(nodeCapacity, allocatableReservation, v1.ResourceEphemeralStorage) - if exist { - for _, pod := range pods { - podStat, ok := statsFunc(pod) - if !ok { - continue - } - - fsStatsSet := []fsStatsType{} - if withImageFs { - fsStatsSet = []fsStatsType{fsStatsLogs, fsStatsLocalVolumeSource} - } else { - fsStatsSet = []fsStatsType{fsStatsRoot, fsStatsLogs, fsStatsLocalVolumeSource} - } - - usage, err := podDiskUsage(podStat, pod, fsStatsSet) - if err != nil { - glog.Warningf("eviction manager: error getting pod disk usage %v", err) - continue - } - ephemeralStorageAllocatable.Sub(usage[resourceDisk]) - } - result[evictionapi.SignalAllocatableNodeFsAvailable] = signalObservation{ - available: ephemeralStorageAllocatable, - capacity: ephemeralStorageCapacity, + capacity: &memoryAllocatableCapacity, } + } else { + glog.Errorf("Could not find capacity information for resource %v", v1.ResourceMemory) } return result, statsFunc, nil } -func getResourceAllocatable(capacity v1.ResourceList, reservation v1.ResourceList, resourceName v1.ResourceName) (*resource.Quantity, *resource.Quantity, bool) { - if capacity, ok := capacity[resourceName]; ok { - allocate := capacity.Copy() - if reserved, exists := reservation[resourceName]; exists { - allocate.Sub(reserved) - } - return capacity.Copy(), allocate, true - } - glog.Errorf("Could not find capacity information for resource %v", resourceName) - return nil, nil, false -} - // thresholdsMet returns the set of thresholds that were met independent of grace period func thresholdsMet(thresholds []evictionapi.Threshold, observations signalObservations, enforceMinReclaim bool) []evictionapi.Threshold { results := []evictionapi.Threshold{} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/types.go b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/types.go index 7b97f37b0618..f9548510c81b 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/types.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/types.go @@ -72,7 +72,7 @@ type DiskInfoProvider interface { type CapacityProvider interface { // GetCapacity returns the amount of compute resources tracked by container manager available on the node. GetCapacity() v1.ResourceList - // GetNodeAllocatable returns the amount of compute resources that have to be reserved from scheduling. + // GetNodeAllocatableReservation returns the amount of compute resources that have to be reserved from scheduling. GetNodeAllocatableReservation() v1.ResourceList } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/gpu/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/gpu/BUILD index 885d267ad294..1747aa67a24e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/gpu/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/gpu/BUILD @@ -11,6 +11,7 @@ go_library( "gpu_manager_stub.go", "types.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/gpu", deps = ["//vendor/k8s.io/api/core/v1:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/gpu/OWNERS b/vendor/k8s.io/kubernetes/pkg/kubelet/gpu/OWNERS index 7d59668a62d0..7635d5b58d9a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/gpu/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/gpu/OWNERS @@ -5,4 +5,8 @@ approvers: - yujuhong reviewers: - cmluciano -- sig-node-reviewers \ No newline at end of file +- jiayingz +- mindprince +- RenaudWasTaken +- vishh +- sig-node-reviewers diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/gpu/nvidia/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/gpu/nvidia/BUILD index 71f110e8a079..62082545e713 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/gpu/nvidia/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/gpu/nvidia/BUILD @@ -12,7 +12,9 @@ go_library( "helpers.go", "nvidia_gpu_manager.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/gpu/nvidia", deps = [ + "//pkg/kubelet/dockershim:go_default_library", "//pkg/kubelet/dockershim/libdocker:go_default_library", "//pkg/kubelet/gpu:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -38,8 +40,10 @@ filegroup( go_test( name = "go_default_test", srcs = ["nvidia_gpu_manager_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/gpu/nvidia", library = ":go_default_library", deps = [ + "//pkg/kubelet/dockershim:go_default_library", "//pkg/kubelet/dockershim/libdocker:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/gpu/nvidia/nvidia_gpu_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/gpu/nvidia/nvidia_gpu_manager.go index c347cd2addf7..37c30d14b558 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/gpu/nvidia/nvidia_gpu_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/gpu/nvidia/nvidia_gpu_manager.go @@ -30,6 +30,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/kubelet/dockershim" "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" "k8s.io/kubernetes/pkg/kubelet/gpu" ) @@ -43,8 +44,11 @@ const ( // Optional device. nvidiaUVMToolsDevice string = "/dev/nvidia-uvm-tools" devDirectory = "/dev" - nvidiaDeviceRE = `^nvidia[0-9]*$` - nvidiaFullpathRE = `^/dev/nvidia[0-9]*$` +) + +var ( + nvidiaDeviceRE = regexp.MustCompile(`^nvidia[0-9]*$`) + nvidiaFullpathRE = regexp.MustCompile(`^/dev/nvidia[0-9]*$`) ) type activePodsLister interface { @@ -67,10 +71,12 @@ type nvidiaGPUManager struct { // NewNvidiaGPUManager returns a GPUManager that manages local Nvidia GPUs. // TODO: Migrate to use pod level cgroups and make it generic to all runtimes. -func NewNvidiaGPUManager(activePodsLister activePodsLister, dockerClient libdocker.Interface) (gpu.GPUManager, error) { +func NewNvidiaGPUManager(activePodsLister activePodsLister, config *dockershim.ClientConfig) (gpu.GPUManager, error) { + dockerClient := dockershim.NewDockerClientFromConfig(config) if dockerClient == nil { - return nil, fmt.Errorf("invalid docker client specified") + return nil, fmt.Errorf("invalid docker client configure specified") } + return &nvidiaGPUManager{ allGPUs: sets.NewString(), dockerClient: dockerClient, @@ -191,7 +197,6 @@ func (ngm *nvidiaGPUManager) updateAllocatedGPUs() { // we want more features, features like schedule containers according to GPU family // name. func (ngm *nvidiaGPUManager) discoverGPUs() error { - reg := regexp.MustCompile(nvidiaDeviceRE) files, err := ioutil.ReadDir(devDirectory) if err != nil { return err @@ -200,7 +205,7 @@ func (ngm *nvidiaGPUManager) discoverGPUs() error { if f.IsDir() { continue } - if reg.MatchString(f.Name()) { + if nvidiaDeviceRE.MatchString(f.Name()) { glog.V(2).Infof("Found Nvidia GPU %q", f.Name()) ngm.allGPUs.Insert(path.Join(devDirectory, f.Name())) } @@ -271,5 +276,5 @@ func (ngm *nvidiaGPUManager) gpusInUse() *podGPUs { } func isValidPath(path string) bool { - return regexp.MustCompile(nvidiaFullpathRE).MatchString(path) + return nvidiaFullpathRE.MatchString(path) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/images/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/images/BUILD index e67fb3b76400..8a8684920f4f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/images/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/images/BUILD @@ -16,8 +16,9 @@ go_library( "puller.go", "types.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/images", deps = [ - "//pkg/kubelet/cadvisor:go_default_library", + "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/events:go_default_library", "//pkg/util/parsers:go_default_library", @@ -38,12 +39,13 @@ go_test( "image_gc_manager_test.go", "image_manager_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/images", library = ":go_default_library", deps = [ - "//pkg/kubelet/cadvisor/testing:go_default_library", + "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/container/testing:go_default_library", - "//vendor/github.com/google/cadvisor/info/v2:go_default_library", + "//pkg/kubelet/server/stats/testing:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/images/image_gc_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/images/image_gc_manager.go index 4b188ed87221..344e0156a498 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/images/image_gc_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/images/image_gc_manager.go @@ -17,6 +17,7 @@ limitations under the License. package images import ( + goerrors "errors" "fmt" "math" "sort" @@ -24,17 +25,25 @@ import ( "time" "github.com/golang/glog" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" - "k8s.io/kubernetes/pkg/kubelet/cadvisor" + statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/events" ) +// StatsProvider is an interface for fetching stats used during image garbage +// collection. +type StatsProvider interface { + // ImageFsStats returns the stats of the image filesystem. + ImageFsStats() (*statsapi.FsStats, error) +} + // Manages lifecycle of all images. // // Implementation is thread-safe. @@ -78,8 +87,8 @@ type realImageGCManager struct { // The image garbage collection policy in use. policy ImageGCPolicy - // cAdvisor instance. - cadvisor cadvisor.Interface + // statsProvider provides stats used during image garbage collection. + statsProvider StatsProvider // Recorder for Kubernetes events. recorder record.EventRecorder @@ -128,7 +137,7 @@ type imageRecord struct { size int64 } -func NewImageGCManager(runtime container.Runtime, cadvisorInterface cadvisor.Interface, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy) (ImageGCManager, error) { +func NewImageGCManager(runtime container.Runtime, statsProvider StatsProvider, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy) (ImageGCManager, error) { // Validate policy. if policy.HighThresholdPercent < 0 || policy.HighThresholdPercent > 100 { return nil, fmt.Errorf("invalid HighThresholdPercent %d, must be in range [0-100]", policy.HighThresholdPercent) @@ -140,13 +149,13 @@ func NewImageGCManager(runtime container.Runtime, cadvisorInterface cadvisor.Int return nil, fmt.Errorf("LowThresholdPercent %d can not be higher than HighThresholdPercent %d", policy.LowThresholdPercent, policy.HighThresholdPercent) } im := &realImageGCManager{ - runtime: runtime, - policy: policy, - imageRecords: make(map[string]*imageRecord), - cadvisor: cadvisorInterface, - recorder: recorder, - nodeRef: nodeRef, - initialized: false, + runtime: runtime, + policy: policy, + imageRecords: make(map[string]*imageRecord), + statsProvider: statsProvider, + recorder: recorder, + nodeRef: nodeRef, + initialized: false, } return im, nil @@ -244,12 +253,19 @@ func (im *realImageGCManager) detectImages(detectTime time.Time) error { func (im *realImageGCManager) GarbageCollect() error { // Get disk usage on disk holding images. - fsInfo, err := im.cadvisor.ImagesFsInfo() + fsStats, err := im.statsProvider.ImageFsStats() if err != nil { return err } - capacity := int64(fsInfo.Capacity) - available := int64(fsInfo.Available) + + var capacity, available int64 + if fsStats.CapacityBytes != nil { + capacity = int64(*fsStats.CapacityBytes) + } + if fsStats.AvailableBytes != nil { + available = int64(*fsStats.AvailableBytes) + } + if available > capacity { glog.Warningf("available %d is larger than capacity %d", available, capacity) available = capacity @@ -257,7 +273,7 @@ func (im *realImageGCManager) GarbageCollect() error { // Check valid capacity. if capacity == 0 { - err := fmt.Errorf("invalid capacity %d on device %q at mount point %q", capacity, fsInfo.Device, fsInfo.Mountpoint) + err := goerrors.New("invalid capacity 0 on image filesystem") im.recorder.Eventf(im.nodeRef, v1.EventTypeWarning, events.InvalidDiskCapacity, err.Error()) return err } @@ -266,7 +282,7 @@ func (im *realImageGCManager) GarbageCollect() error { usagePercent := 100 - int(available*100/capacity) if usagePercent >= im.policy.HighThresholdPercent { amountToFree := capacity*int64(100-im.policy.LowThresholdPercent)/100 - available - glog.Infof("[imageGCManager]: Disk usage on %q (%s) is at %d%% which is over the high threshold (%d%%). Trying to free %d bytes", fsInfo.Device, fsInfo.Mountpoint, usagePercent, im.policy.HighThresholdPercent, amountToFree) + glog.Infof("[imageGCManager]: Disk usage on image filesystem is at %d%% which is over the high threshold (%d%%). Trying to free %d bytes", usagePercent, im.policy.HighThresholdPercent, amountToFree) freed, err := im.freeSpace(amountToFree, time.Now()) if err != nil { return err diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go index ef7831ac6da7..fab8dcefbb67 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go @@ -19,14 +19,11 @@ package kubelet import ( "crypto/tls" "fmt" - "io/ioutil" "net" "net/http" "net/url" "os" "path" - "path/filepath" - goruntime "runtime" "sort" "strings" "sync" @@ -35,8 +32,6 @@ import ( "github.com/golang/glog" - clientgoclientset "k8s.io/client-go/kubernetes" - cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapiv2 "github.com/google/cadvisor/info/v2" "k8s.io/api/core/v1" @@ -54,23 +49,22 @@ import ( corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/certificate" "k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/integer" - "k8s.io/kubernetes/cmd/kubelet/app/options" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/features" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/cadvisor" - "k8s.io/kubernetes/pkg/kubelet/certificate" + kubeletcertificate "k8s.io/kubernetes/pkg/kubelet/certificate" "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/configmap" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockershim" - "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" dockerremote "k8s.io/kubernetes/pkg/kubelet/dockershim/remote" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/eviction" @@ -82,6 +76,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/kubelet/network" + "k8s.io/kubernetes/pkg/kubelet/network/dns" "k8s.io/kubernetes/pkg/kubelet/pleg" kubepod "k8s.io/kubernetes/pkg/kubelet/pod" "k8s.io/kubernetes/pkg/kubelet/preemption" @@ -196,13 +191,35 @@ type Bootstrap interface { // Builder creates and initializes a Kubelet instance type Builder func(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, - crOptions *options.ContainerRuntimeOptions, - hostnameOverride, - nodeIP, - providerID, - cloudProvider, - certDirectory, - rootDirectory string) (Bootstrap, error) + crOptions *config.ContainerRuntimeOptions, + containerRuntime string, + runtimeCgroups string, + hostnameOverride string, + nodeIP string, + providerID string, + cloudProvider string, + certDirectory string, + rootDirectory string, + registerNode bool, + registerWithTaints []api.Taint, + allowedUnsafeSysctls []string, + containerized bool, + remoteRuntimeEndpoint string, + remoteImageEndpoint string, + experimentalMounterPath string, + experimentalKernelMemcgNotification bool, + experimentalCheckNodeCapabilitiesBeforeMount bool, + experimentalNodeAllocatableIgnoreEvictionThreshold bool, + minimumGCAge metav1.Duration, + maxPerPodContainerCount int32, + maxContainerCount int32, + masterServiceNamespace string, + registerSchedulable bool, + nonMasqueradeCIDR string, + keepTerminatedPodVolumes bool, + nodeLabels map[string]string, + seccompProfileRoot string, + bootstrapCheckpointPath string) (Bootstrap, error) // Dependencies is a bin for things we might consider "injected dependencies" -- objects constructed // at runtime that are necessary for running the Kubelet. This is a temporary solution for grouping @@ -235,11 +252,11 @@ type Dependencies struct { CAdvisorInterface cadvisor.Interface Cloud cloudprovider.Interface ContainerManager cm.ContainerManager - DockerClient libdocker.Interface + DockerClientConfig *dockershim.ClientConfig EventClient v1core.EventsGetter HeartbeatClient v1core.CoreV1Interface KubeClient clientset.Interface - ExternalKubeClient clientgoclientset.Interface + ExternalKubeClient clientset.Interface Mounter mount.Interface NetworkPlugins []network.NetworkPlugin OOMAdjuster *oom.OOMAdjuster @@ -255,14 +272,14 @@ type Dependencies struct { // makePodSourceConfig creates a config.PodConfig from the given // KubeletConfiguration or returns an error. -func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, nodeName types.NodeName) (*config.PodConfig, error) { +func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, nodeName types.NodeName, bootstrapCheckpointPath string) (*config.PodConfig, error) { manifestURLHeader := make(http.Header) - if kubeCfg.ManifestURLHeader != "" { - pieces := strings.Split(kubeCfg.ManifestURLHeader, ":") - if len(pieces) != 2 { - return nil, fmt.Errorf("manifest-url-header must have a single ':' key-value separator, got %q", kubeCfg.ManifestURLHeader) + if len(kubeCfg.ManifestURLHeader) > 0 { + for k, v := range kubeCfg.ManifestURLHeader { + for i := range v { + manifestURLHeader.Add(k, v[i]) + } } - manifestURLHeader.Set(pieces[0], pieces[1]) } // source of all configuration @@ -270,7 +287,7 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku // define file config source if kubeCfg.PodManifestPath != "" { - glog.Infof("Adding manifest file: %v", kubeCfg.PodManifestPath) + glog.Infof("Adding manifest path: %v", kubeCfg.PodManifestPath) config.NewSourceFile(kubeCfg.PodManifestPath, nodeName, kubeCfg.FileCheckFrequency.Duration, cfg.Channel(kubetypes.FileSource)) } @@ -279,19 +296,32 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku glog.Infof("Adding manifest url %q with HTTP header %v", kubeCfg.ManifestURL, manifestURLHeader) config.NewSourceURL(kubeCfg.ManifestURL, manifestURLHeader, nodeName, kubeCfg.HTTPCheckFrequency.Duration, cfg.Channel(kubetypes.HTTPSource)) } + + // Restore from the checkpoint path + // NOTE: This MUST happen before creating the apiserver source + // below, or the checkpoint would override the source of truth. + updatechannel := cfg.Channel(kubetypes.ApiserverSource) + if bootstrapCheckpointPath != "" { + glog.Infof("Adding checkpoint path: %v", bootstrapCheckpointPath) + err := cfg.Restore(bootstrapCheckpointPath, updatechannel) + if err != nil { + return nil, err + } + } + if kubeDeps.KubeClient != nil { glog.Infof("Watching apiserver") - config.NewSourceApiserver(kubeDeps.KubeClient, nodeName, cfg.Channel(kubetypes.ApiserverSource)) + config.NewSourceApiserver(kubeDeps.KubeClient, nodeName, updatechannel) } return cfg, nil } -func getRuntimeAndImageServices(config *kubeletconfiginternal.KubeletConfiguration) (internalapi.RuntimeService, internalapi.ImageManagerService, error) { - rs, err := remote.NewRemoteRuntimeService(config.RemoteRuntimeEndpoint, config.RuntimeRequestTimeout.Duration) +func getRuntimeAndImageServices(remoteRuntimeEndpoint string, remoteImageEndpoint string, runtimeRequestTimeout metav1.Duration) (internalapi.RuntimeService, internalapi.ImageManagerService, error) { + rs, err := remote.NewRemoteRuntimeService(remoteRuntimeEndpoint, runtimeRequestTimeout.Duration) if err != nil { return nil, nil, err } - is, err := remote.NewRemoteImageService(config.RemoteImageEndpoint, config.RuntimeRequestTimeout.Duration) + is, err := remote.NewRemoteImageService(remoteImageEndpoint, runtimeRequestTimeout.Duration) if err != nil { return nil, nil, err } @@ -302,13 +332,35 @@ func getRuntimeAndImageServices(config *kubeletconfiginternal.KubeletConfigurati // No initialization of Kubelet and its modules should happen here. func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, - crOptions *options.ContainerRuntimeOptions, - hostnameOverride, - nodeIP, - providerID, - cloudProvider, - certDirectory, - rootDirectory string) (*Kubelet, error) { + crOptions *config.ContainerRuntimeOptions, + containerRuntime string, + runtimeCgroups string, + hostnameOverride string, + nodeIP string, + providerID string, + cloudProvider string, + certDirectory string, + rootDirectory string, + registerNode bool, + registerWithTaints []api.Taint, + allowedUnsafeSysctls []string, + containerized bool, + remoteRuntimeEndpoint string, + remoteImageEndpoint string, + experimentalMounterPath string, + experimentalKernelMemcgNotification bool, + experimentalCheckNodeCapabilitiesBeforeMount bool, + experimentalNodeAllocatableIgnoreEvictionThreshold bool, + minimumGCAge metav1.Duration, + maxPerPodContainerCount int32, + maxContainerCount int32, + masterServiceNamespace string, + registerSchedulable bool, + nonMasqueradeCIDR string, + keepTerminatedPodVolumes bool, + nodeLabels map[string]string, + seccompProfileRoot string, + bootstrapCheckpointPath string) (*Kubelet, error) { if rootDirectory == "" { return nil, fmt.Errorf("invalid root directory %q", rootDirectory) } @@ -369,16 +421,16 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, if kubeDeps.PodConfig == nil { var err error - kubeDeps.PodConfig, err = makePodSourceConfig(kubeCfg, kubeDeps, nodeName) + kubeDeps.PodConfig, err = makePodSourceConfig(kubeCfg, kubeDeps, nodeName, bootstrapCheckpointPath) if err != nil { return nil, err } } containerGCPolicy := kubecontainer.ContainerGCPolicy{ - MinAge: kubeCfg.MinimumGCAge.Duration, - MaxPerPodContainer: int(kubeCfg.MaxPerPodContainerCount), - MaxContainers: int(kubeCfg.MaxContainerCount), + MinAge: minimumGCAge.Duration, + MaxPerPodContainer: int(maxPerPodContainerCount), + MaxContainers: int(maxContainerCount), } daemonEndpoints := &v1.NodeDaemonEndpoints{ @@ -392,7 +444,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, } enforceNodeAllocatable := kubeCfg.EnforceNodeAllocatable - if kubeCfg.ExperimentalNodeAllocatableIgnoreEvictionThreshold { + if experimentalNodeAllocatableIgnoreEvictionThreshold { // Do not provide kubeCfg.EnforceNodeAllocatable to eviction threshold parsing if we are not enforcing Evictions enforceNodeAllocatable = []string{} } @@ -404,12 +456,12 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, PressureTransitionPeriod: kubeCfg.EvictionPressureTransitionPeriod.Duration, MaxPodGracePeriodSeconds: int64(kubeCfg.EvictionMaxPodGracePeriod), Thresholds: thresholds, - KernelMemcgNotification: kubeCfg.ExperimentalKernelMemcgNotification, + KernelMemcgNotification: experimentalKernelMemcgNotification, } serviceIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) if kubeDeps.KubeClient != nil { - serviceLW := cache.NewListWatchFromClient(kubeDeps.KubeClient.Core().RESTClient(), "services", metav1.NamespaceAll, fields.Everything()) + serviceLW := cache.NewListWatchFromClient(kubeDeps.KubeClient.CoreV1().RESTClient(), "services", metav1.NamespaceAll, fields.Everything()) r := cache.NewReflector(serviceLW, &v1.Service{}, serviceIndexer, 0) go r.Run(wait.NeverStop) } @@ -418,7 +470,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) if kubeDeps.KubeClient != nil { fieldSelector := fields.Set{api.ObjectNameField: string(nodeName)}.AsSelector() - nodeLW := cache.NewListWatchFromClient(kubeDeps.KubeClient.Core().RESTClient(), "nodes", metav1.NamespaceAll, fieldSelector) + nodeLW := cache.NewListWatchFromClient(kubeDeps.KubeClient.CoreV1().RESTClient(), "nodes", metav1.NamespaceAll, fieldSelector) r := cache.NewReflector(nodeLW, &v1.Node{}, nodeIndexer, 0) go r.Run(wait.NeverStop) } @@ -448,6 +500,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, } } httpClient := &http.Client{} + parsedNodeIP := net.ParseIP(nodeIP) klet := &Kubelet{ hostname: hostname, @@ -457,13 +510,13 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, rootDirectory: rootDirectory, resyncInterval: kubeCfg.SyncFrequency.Duration, sourcesReady: config.NewSourcesReady(kubeDeps.PodConfig.SeenAllSources), - registerNode: kubeCfg.RegisterNode, - registerSchedulable: kubeCfg.RegisterSchedulable, - clusterDomain: kubeCfg.ClusterDomain, - clusterDNS: clusterDNS, + registerNode: registerNode, + registerWithTaints: registerWithTaints, + registerSchedulable: registerSchedulable, + dnsConfigurer: dns.NewConfigurer(kubeDeps.Recorder, nodeRef, parsedNodeIP, clusterDNS, kubeCfg.ClusterDomain, kubeCfg.ResolverConfig), serviceLister: serviceLister, nodeInfo: nodeInfo, - masterServiceNamespace: kubeCfg.MasterServiceNamespace, + masterServiceNamespace: masterServiceNamespace, streamingConnectionIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration, recorder: kubeDeps.Recorder, cadvisor: kubeDeps.CAdvisorInterface, @@ -472,28 +525,29 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, externalCloudProvider: cloudprovider.IsExternal(cloudProvider), providerID: providerID, nodeRef: nodeRef, - nodeLabels: kubeCfg.NodeLabels, + nodeLabels: nodeLabels, nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration, - os: kubeDeps.OSInterface, - oomWatcher: oomWatcher, - cgroupsPerQOS: kubeCfg.CgroupsPerQOS, - cgroupRoot: kubeCfg.CgroupRoot, - mounter: kubeDeps.Mounter, - writer: kubeDeps.Writer, - maxPods: int(kubeCfg.MaxPods), - podsPerCore: int(kubeCfg.PodsPerCore), - syncLoopMonitor: atomic.Value{}, - resolverConfig: kubeCfg.ResolverConfig, - daemonEndpoints: daemonEndpoints, - containerManager: kubeDeps.ContainerManager, - nodeIP: net.ParseIP(nodeIP), - clock: clock.RealClock{}, + os: kubeDeps.OSInterface, + oomWatcher: oomWatcher, + cgroupsPerQOS: kubeCfg.CgroupsPerQOS, + cgroupRoot: kubeCfg.CgroupRoot, + mounter: kubeDeps.Mounter, + writer: kubeDeps.Writer, + maxPods: int(kubeCfg.MaxPods), + podsPerCore: int(kubeCfg.PodsPerCore), + syncLoopMonitor: atomic.Value{}, + daemonEndpoints: daemonEndpoints, + containerManager: kubeDeps.ContainerManager, + containerRuntimeName: containerRuntime, + nodeIP: parsedNodeIP, + clock: clock.RealClock{}, enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach, iptClient: utilipt.New(utilexec.New(), utildbus.New(), utilipt.ProtocolIpv4), makeIPTablesUtilChains: kubeCfg.MakeIPTablesUtilChains, iptablesMasqueradeBit: int(kubeCfg.IPTablesMasqueradeBit), iptablesDropBit: int(kubeCfg.IPTablesDropBit), experimentalHostUserNamespaceDefaulting: utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalHostUserNamespaceDefaultingGate), + keepTerminatedPodVolumes: keepTerminatedPodVolumes, } secretManager := secret.NewCachingSecretManager( @@ -508,7 +562,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, glog.Infof("Experimental host user namespace defaulting is enabled.") } - hairpinMode, err := effectiveHairpinMode(kubeletconfiginternal.HairpinMode(kubeCfg.HairpinMode), kubeCfg.ContainerRuntime, crOptions.NetworkPluginName) + hairpinMode, err := effectiveHairpinMode(kubeletconfiginternal.HairpinMode(kubeCfg.HairpinMode), containerRuntime, crOptions.NetworkPluginName) if err != nil { // This is a non-recoverable error. Returning it up the callstack will just // lead to retries of the same failure, so just fail hard. @@ -516,17 +570,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, } glog.Infof("Hairpin mode set to %q", hairpinMode) - // TODO(#36485) Remove this workaround once we fix the init-container issue. - // Touch iptables lock file, which will be shared among all processes accessing - // the iptables. - f, err := os.OpenFile(utilipt.LockfilePath16x, os.O_CREATE, 0600) - if err != nil { - glog.Warningf("Failed to open iptables lock file: %v", err) - } else if err = f.Close(); err != nil { - glog.Warningf("Failed to close iptables lock file: %v", err) - } - - plug, err := network.InitNetworkPlugin(kubeDeps.NetworkPlugins, crOptions.NetworkPluginName, &criNetworkHost{&networkHost{klet}, &network.NoopPortMappingGetter{}}, hairpinMode, kubeCfg.NonMasqueradeCIDR, int(crOptions.NetworkPluginMTU)) + plug, err := network.InitNetworkPlugin(kubeDeps.NetworkPlugins, crOptions.NetworkPluginName, &criNetworkHost{&networkHost{klet}, &network.NoopPortMappingGetter{}}, hairpinMode, nonMasqueradeCIDR, int(crOptions.NetworkPluginMTU)) if err != nil { return nil, err } @@ -545,24 +589,20 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, // podManager is also responsible for keeping secretManager and configMapManager contents up-to-date. klet.podManager = kubepod.NewBasicPodManager(kubepod.NewBasicMirrorClient(klet.kubeClient), secretManager, configMapManager) - if kubeCfg.RemoteRuntimeEndpoint != "" { - // kubeCfg.RemoteImageEndpoint is same as kubeCfg.RemoteRuntimeEndpoint if not explicitly specified - if kubeCfg.RemoteImageEndpoint == "" { - kubeCfg.RemoteImageEndpoint = kubeCfg.RemoteRuntimeEndpoint + if remoteRuntimeEndpoint != "" { + // remoteImageEndpoint is same as remoteRuntimeEndpoint if not explicitly specified + if remoteImageEndpoint == "" { + remoteImageEndpoint = remoteRuntimeEndpoint } } // TODO: These need to become arguments to a standalone docker shim. - binDir := crOptions.CNIBinDir - if binDir == "" { - binDir = crOptions.NetworkPluginDir - } pluginSettings := dockershim.NetworkPluginSettings{ HairpinMode: hairpinMode, - NonMasqueradeCIDR: kubeCfg.NonMasqueradeCIDR, + NonMasqueradeCIDR: nonMasqueradeCIDR, PluginName: crOptions.NetworkPluginName, PluginConfDir: crOptions.CNIConfDir, - PluginBinDir: binDir, + PluginBinDir: crOptions.CNIBinDir, MTU: int(crOptions.NetworkPluginMTU), } @@ -575,20 +615,22 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, pluginSettings.LegacyRuntimeHost = nl // rktnetes cannot be run with CRI. - if kubeCfg.ContainerRuntime != kubetypes.RktContainerRuntime { + if containerRuntime != kubetypes.RktContainerRuntime { // kubelet defers to the runtime shim to setup networking. Setting // this to nil will prevent it from trying to invoke the plugin. // It's easier to always probe and initialize plugins till cri // becomes the default. klet.networkPlugin = nil + // if left at nil, that means it is unneeded + var legacyLogProvider kuberuntime.LegacyLogProvider - switch kubeCfg.ContainerRuntime { + switch containerRuntime { case kubetypes.DockerContainerRuntime: // Create and start the CRI shim running as a grpc server. streamingConfig := getStreamingConfig(kubeCfg, kubeDeps) - ds, err := dockershim.NewDockerService(kubeDeps.DockerClient, crOptions.PodSandboxImage, streamingConfig, - &pluginSettings, kubeCfg.RuntimeCgroups, kubeCfg.CgroupDriver, crOptions.DockerExecHandlerName, - crOptions.DockershimRootDirectory, crOptions.DockerDisableSharedPID) + ds, err := dockershim.NewDockerService(kubeDeps.DockerClientConfig, crOptions.PodSandboxImage, streamingConfig, + &pluginSettings, runtimeCgroups, kubeCfg.CgroupDriver, crOptions.DockershimRootDirectory, + crOptions.DockerDisableSharedPID) if err != nil { return nil, err } @@ -601,29 +643,30 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, // The unix socket for kubelet <-> dockershim communication. glog.V(5).Infof("RemoteRuntimeEndpoint: %q, RemoteImageEndpoint: %q", - kubeCfg.RemoteRuntimeEndpoint, - kubeCfg.RemoteImageEndpoint) + remoteRuntimeEndpoint, + remoteImageEndpoint) glog.V(2).Infof("Starting the GRPC server for the docker CRI shim.") - server := dockerremote.NewDockerServer(kubeCfg.RemoteRuntimeEndpoint, ds) + server := dockerremote.NewDockerServer(remoteRuntimeEndpoint, ds) if err := server.Start(); err != nil { return nil, err } // Create dockerLegacyService when the logging driver is not supported. - supported, err := dockershim.IsCRISupportedLogDriver(kubeDeps.DockerClient) + supported, err := ds.IsCRISupportedLogDriver() if err != nil { return nil, err } if !supported { - klet.dockerLegacyService = dockershim.NewDockerLegacyService(kubeDeps.DockerClient) + klet.dockerLegacyService = ds.NewDockerLegacyService() + legacyLogProvider = dockershim.NewLegacyLogProvider(klet.dockerLegacyService) } case kubetypes.RemoteContainerRuntime: // No-op. break default: - return nil, fmt.Errorf("unsupported CRI runtime: %q", kubeCfg.ContainerRuntime) + return nil, fmt.Errorf("unsupported CRI runtime: %q", containerRuntime) } - runtimeService, imageService, err := getRuntimeAndImageServices(kubeCfg) + runtimeService, imageService, err := getRuntimeAndImageServices(remoteRuntimeEndpoint, remoteImageEndpoint, kubeCfg.RuntimeRequestTimeout) if err != nil { return nil, err } @@ -631,7 +674,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, runtime, err := kuberuntime.NewKubeGenericRuntimeManager( kubecontainer.FilterEventRecorder(kubeDeps.Recorder), klet.livenessManager, - kubeCfg.SeccompProfileRoot, + seccompProfileRoot, containerRefManager, machineInfo, klet, @@ -646,6 +689,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, runtimeService, imageService, kubeDeps.ContainerManager.InternalContainerLifecycle(), + legacyLogProvider, ) if err != nil { return nil, err @@ -653,14 +697,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.containerRuntime = runtime klet.runner = runtime - // CRI integrations should get container metrics via CRI. Docker - // uses the built-in cadvisor to gather such metrics on Linux for - // historical reasons. - // cri-o relies on cadvisor as a temporary workaround. The code should - // be removed. Related issue: - // https://github.com/kubernetes/kubernetes/issues/51798 - if (kubeCfg.ContainerRuntime == kubetypes.DockerContainerRuntime && - goruntime.GOOS == "linux") || kubeCfg.RemoteRuntimeEndpoint == "/var/run/crio.sock" { + if cadvisor.UsingLegacyCadvisorStats(containerRuntime, remoteRuntimeEndpoint) { klet.StatsProvider = stats.NewCadvisorStatsProvider( klet.cadvisor, klet.resourceAnalyzer, @@ -730,7 +767,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.containerDeletor = newPodContainerDeletor(klet.containerRuntime, integer.IntMax(containerGCPolicy.MaxPerPodContainer, minDeadContainerInPod)) // setup imageManager - imageManager, err := images.NewImageGCManager(klet.containerRuntime, kubeDeps.CAdvisorInterface, kubeDeps.Recorder, nodeRef, imageGCPolicy) + imageManager, err := images.NewImageGCManager(klet.containerRuntime, klet.StatsProvider, kubeDeps.Recorder, nodeRef, imageGCPolicy) if err != nil { return nil, fmt.Errorf("failed to initialize image manager: %v", err) } @@ -753,7 +790,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ips = append(ips, cloudIPs...) names := append([]string{klet.GetHostname(), hostnameOverride}, cloudNames...) - klet.serverCertificateManager, err = certificate.NewKubeletServerCertificateManager(klet.kubeClient, kubeCfg, klet.nodeName, ips, names, certDirectory) + klet.serverCertificateManager, err = kubeletcertificate.NewKubeletServerCertificateManager(klet.kubeClient, kubeCfg, klet.nodeName, ips, names, certDirectory) if err != nil { return nil, fmt.Errorf("failed to initialize certificate manager: %v", err) } @@ -781,11 +818,11 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, // If the experimentalMounterPathFlag is set, we do not want to // check node capabilities since the mount path is not the default - if len(kubeCfg.ExperimentalMounterPath) != 0 { - kubeCfg.ExperimentalCheckNodeCapabilitiesBeforeMount = false + if len(experimentalMounterPath) != 0 { + experimentalCheckNodeCapabilitiesBeforeMount = false // Replace the nameserver in containerized-mounter's rootfs/etc/resolve.conf with kubelet.ClusterDNS // so that service name could be resolved - klet.setupDNSinContainerizedMounter(kubeCfg.ExperimentalMounterPath) + klet.dnsConfigurer.SetupDNSinContainerizedMounter(experimentalMounterPath) } // setup volumeManager @@ -800,8 +837,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps.Mounter, klet.getPodsDir(), kubeDeps.Recorder, - kubeCfg.ExperimentalCheckNodeCapabilitiesBeforeMount, - kubeCfg.KeepTerminatedPodVolumes) + experimentalCheckNodeCapabilitiesBeforeMount, + keepTerminatedPodVolumes) runtimeCache, err := kubecontainer.NewRuntimeCache(klet.containerRuntime) if err != nil { @@ -833,7 +870,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, } // Safe, whitelisted sysctls can always be used as unsafe sysctls in the spec // Hence, we concatenate those two lists. - safeAndUnsafeSysctls := append(sysctl.SafeSysctlWhitelist(), kubeCfg.AllowedUnsafeSysctls...) + safeAndUnsafeSysctls := append(sysctl.SafeSysctlWhitelist(), allowedUnsafeSysctls...) unsafeWhitelist, err := sysctl.NewWhitelist(safeAndUnsafeSysctls, v1.UnsafeSysctlsPodAnnotationKey) if err != nil { return nil, err @@ -851,18 +888,18 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.AddPodSyncHandler(activeDeadlineHandler) criticalPodAdmissionHandler := preemption.NewCriticalPodAdmissionHandler(klet.GetActivePods, killPodNow(klet.podWorkers, kubeDeps.Recorder), kubeDeps.Recorder) - klet.admitHandlers.AddPodAdmitHandler(lifecycle.NewPredicateAdmitHandler(klet.getNodeAnyWay, criticalPodAdmissionHandler)) + klet.admitHandlers.AddPodAdmitHandler(lifecycle.NewPredicateAdmitHandler(klet.getNodeAnyWay, criticalPodAdmissionHandler, klet.containerManager.UpdatePluginResources)) // apply functional Option's for _, opt := range kubeDeps.Options { opt(klet) } - klet.appArmorValidator = apparmor.NewValidator(kubeCfg.ContainerRuntime) + klet.appArmorValidator = apparmor.NewValidator(containerRuntime) klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewAppArmorAdmitHandler(klet.appArmorValidator)) klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewNoNewPrivsAdmitHandler(klet.containerRuntime)) if utilfeature.DefaultFeatureGate.Enabled(features.Accelerators) { - if kubeCfg.ContainerRuntime == kubetypes.DockerContainerRuntime { - if klet.gpuManager, err = nvidia.NewNvidiaGPUManager(klet, kubeDeps.DockerClient); err != nil { + if containerRuntime == kubetypes.DockerContainerRuntime { + if klet.gpuManager, err = nvidia.NewNvidiaGPUManager(klet, kubeDeps.DockerClientConfig); err != nil { return nil, err } } else { @@ -922,16 +959,15 @@ type Kubelet struct { // Set to true to have the node register itself with the apiserver. registerNode bool + // List of taints to add to a node object when the kubelet registers itself. + registerWithTaints []api.Taint // Set to true to have the node register itself as schedulable. registerSchedulable bool // for internal book keeping; access only from within registerWithApiserver registrationCompleted bool - // If non-empty, use this for container DNS search. - clusterDomain string - - // If non-nil, use this for container DNS server. - clusterDNS []net.IP + // dnsConfigurer is used for setting up DNS resolver configuration when launching pods. + dnsConfigurer *dns.Configurer // masterServiceNamespace is the namespace that the master service is exposed in. masterServiceNamespace string @@ -1006,6 +1042,9 @@ type Kubelet struct { // Reference to this node. nodeRef *v1.ObjectReference + // The name of the container runtime + containerRuntimeName string + // Container runtime. containerRuntime kubecontainer.Runtime @@ -1073,11 +1112,6 @@ type Kubelet struct { // Channel for sending pods to kill. podKillingCh chan *kubecontainer.PodPair - // The configuration file used as the base to generate the container's - // DNS resolver configuration file. This can be used in conjunction with - // clusterDomain and clusterDNS. - resolverConfig string - // Information about the ports which are opened by daemons on Node running this Kubelet server. daemonEndpoints *v1.NodeDaemonEndpoints @@ -1158,6 +1192,13 @@ type Kubelet struct { // StatsProvider provides the node and the container stats. *stats.StatsProvider + + // containerized should be set to true if the kubelet is running in a container + containerized bool + + // This flag, if set, instructs the kubelet to keep volumes from terminated pods mounted to the node. + // This can be useful for debugging volume related issues. + keepTerminatedPodVolumes bool // DEPRECATED } func allLocalIPsWithoutLoopback() ([]net.IP, error) { @@ -1278,7 +1319,7 @@ func (kl *Kubelet) initializeModules() error { return fmt.Errorf("Kubelet failed to get node info: %v", err) } - if err := kl.containerManager.Start(node, kl.GetActivePods, kl.statusManager, kl.runtimeService); err != nil { + if err := kl.containerManager.Start(node, kl.GetActivePods, kl.sourcesReady, kl.statusManager, kl.runtimeService); err != nil { return fmt.Errorf("Failed to start ContainerManager %v", err) } @@ -1344,8 +1385,8 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { go wait.Until(kl.podKiller, 1*time.Second, wait.NeverStop) // Start gorouting responsible for checking limits in resolv.conf - if kl.resolverConfig != "" { - go wait.Until(func() { kl.checkLimitsForResolvConf() }, 30*time.Second, wait.NeverStop) + if kl.dnsConfigurer.ResolverConfig != "" { + go wait.Until(func() { kl.dnsConfigurer.CheckLimitsForResolvConf() }, 30*time.Second, wait.NeverStop) } // Start component sync loops. @@ -1364,64 +1405,6 @@ func (kl *Kubelet) GetKubeClient() clientset.Interface { return kl.kubeClient } -// GetClusterDNS returns a list of the DNS servers and a list of the DNS search -// domains of the cluster. -func (kl *Kubelet) GetClusterDNS(pod *v1.Pod) ([]string, []string, bool, error) { - var hostDNS, hostSearch []string - // Get host DNS settings - if kl.resolverConfig != "" { - f, err := os.Open(kl.resolverConfig) - if err != nil { - return nil, nil, false, err - } - defer f.Close() - - hostDNS, hostSearch, err = kl.parseResolvConf(f) - if err != nil { - return nil, nil, false, err - } - } - useClusterFirstPolicy := ((pod.Spec.DNSPolicy == v1.DNSClusterFirst && !kubecontainer.IsHostNetworkPod(pod)) || pod.Spec.DNSPolicy == v1.DNSClusterFirstWithHostNet) - if useClusterFirstPolicy && len(kl.clusterDNS) == 0 { - // clusterDNS is not known. - // pod with ClusterDNSFirst Policy cannot be created - kl.recorder.Eventf(pod, v1.EventTypeWarning, "MissingClusterDNS", "kubelet does not have ClusterDNS IP configured and cannot create Pod using %q policy. Falling back to DNSDefault policy.", pod.Spec.DNSPolicy) - log := fmt.Sprintf("kubelet does not have ClusterDNS IP configured and cannot create Pod using %q policy. pod: %q. Falling back to DNSDefault policy.", pod.Spec.DNSPolicy, format.Pod(pod)) - kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, "MissingClusterDNS", log) - - // fallback to DNSDefault - useClusterFirstPolicy = false - } - - if !useClusterFirstPolicy { - // When the kubelet --resolv-conf flag is set to the empty string, use - // DNS settings that override the docker default (which is to use - // /etc/resolv.conf) and effectively disable DNS lookups. According to - // the bind documentation, the behavior of the DNS client library when - // "nameservers" are not specified is to "use the nameserver on the - // local machine". A nameserver setting of localhost is equivalent to - // this documented behavior. - if kl.resolverConfig == "" { - hostDNS = []string{"127.0.0.1"} - hostSearch = []string{"."} - } else { - hostSearch = kl.formDNSSearchForDNSDefault(hostSearch, pod) - } - return hostDNS, hostSearch, useClusterFirstPolicy, nil - } - - // for a pod with DNSClusterFirst policy, the cluster DNS server is the only nameserver configured for - // the pod. The cluster DNS server itself will forward queries to other nameservers that is configured to use, - // in case the cluster DNS server cannot resolve the DNS query itself - dns := make([]string, len(kl.clusterDNS)) - for i, ip := range kl.clusterDNS { - dns[i] = ip.String() - } - dnsSearch := kl.formDNSSearch(hostSearch, pod) - - return dns, dnsSearch, useClusterFirstPolicy, nil -} - // syncPod is the transaction script for the sync of a single pod. // // Arguments: @@ -1445,6 +1428,10 @@ func (kl *Kubelet) GetClusterDNS(pod *v1.Pod) ([]string, []string, bool, error) // // If any step of this workflow errors, the error is returned, and is repeated // on the next syncPod call. +// +// This operation writes all events that are dispatched in order to provide +// the most accurate information possible about an error situation to aid debugging. +// Callers should not throw an event if this operation returns an error. func (kl *Kubelet) syncPod(o syncPodOptions) error { // pull out the required options pod := o.pod @@ -1462,6 +1449,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { kl.statusManager.SetPodStatus(pod, apiPodStatus) // we kill the pod with the specified grace period since this is a termination if err := kl.killPod(pod, nil, podStatus, killPodOptions.PodTerminationGracePeriodSecondsOverride); err != nil { + kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err) // there was an error killing the pod, so we return that error directly utilruntime.HandleError(err) return err @@ -1528,6 +1516,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { if !runnable.Admit || pod.DeletionTimestamp != nil || apiPodStatus.Phase == v1.PodFailed { var syncErr error if err := kl.killPod(pod, nil, podStatus, nil); err != nil { + kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err) syncErr = fmt.Errorf("error killing pod: %v", err) utilruntime.HandleError(syncErr) } else { @@ -1542,6 +1531,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { // If the network plugin is not ready, only start the pod if it uses the host network if rs := kl.runtimeState.networkErrors(); len(rs) != 0 && !kubecontainer.IsHostNetworkPod(pod) { + kl.recorder.Eventf(pod, v1.EventTypeWarning, events.NetworkNotReady, "network is not ready: %v", rs) return fmt.Errorf("network is not ready: %v", rs) } @@ -1584,6 +1574,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { glog.V(2).Infof("Failed to update QoS cgroups while syncing pod: %v", err) } if err := pcm.EnsureExists(pod); err != nil { + kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToCreatePodContainer, "unable to ensure pod container exists: %v", err) return fmt.Errorf("failed to ensure that the pod: %v cgroups exist and are correctly applied: %v", pod.UID, err) } } @@ -1607,15 +1598,21 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { } } if mirrorPod == nil || deleted { - glog.V(3).Infof("Creating a mirror pod for static pod %q", format.Pod(pod)) - if err := kl.podManager.CreateMirrorPod(pod); err != nil { - glog.Errorf("Failed creating a mirror pod for %q: %v", format.Pod(pod), err) + node, err := kl.GetNode() + if err != nil || node.DeletionTimestamp != nil { + glog.V(4).Infof("No need to create a mirror pod, since node %q has been removed from the cluster", kl.nodeName) + } else { + glog.V(4).Infof("Creating a mirror pod for static pod %q", format.Pod(pod)) + if err := kl.podManager.CreateMirrorPod(pod); err != nil { + glog.Errorf("Failed creating a mirror pod for %q: %v", format.Pod(pod), err) + } } } } // Make data directories for the pod if err := kl.makePodDataDirs(pod); err != nil { + kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToMakePodDataDirectories, "error making pod data directories: %v", err) glog.Errorf("Unable to make pod data directories for pod %q: %v", format.Pod(pod), err) return err } @@ -1637,6 +1634,8 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { result := kl.containerRuntime.SyncPod(pod, apiPodStatus, podStatus, pullSecrets, kl.backOff) kl.reasonCache.Update(pod.UID, result) if err := result.Error(); err != nil { + // Do not record an event here, as we keep all event logging for sync pod failures + // local to container runtime so we get better errors return err } @@ -1853,17 +1852,28 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle glog.V(2).Infof("SyncLoop (DELETE, %q): %q", u.Source, format.Pods(u.Pods)) // DELETE is treated as a UPDATE because of graceful deletion. handler.HandlePodUpdates(u.Pods) + case kubetypes.RESTORE: + glog.V(2).Infof("SyncLoop (RESTORE, %q): %q", u.Source, format.Pods(u.Pods)) + // These are pods restored from the checkpoint. Treat them as new + // pods. + handler.HandlePodAdditions(u.Pods) case kubetypes.SET: // TODO: Do we want to support this? glog.Errorf("Kubelet does not support snapshot update") } - // Mark the source ready after receiving at least one update from the - // source. Once all the sources are marked ready, various cleanup - // routines will start reclaiming resources. It is important that this - // takes place only after kubelet calls the update handler to process - // the update to ensure the internal pod cache is up-to-date. - kl.sourcesReady.AddSource(u.Source) + if u.Op != kubetypes.RESTORE { + // If the update type is RESTORE, it means that the update is from + // the pod checkpoints and may be incomplete. Do not mark the + // source as ready. + + // Mark the source ready after receiving at least one update from the + // source. Once all the sources are marked ready, various cleanup + // routines will start reclaiming resources. It is important that this + // takes place only after kubelet calls the update handler to process + // the update to ensure the internal pod cache is up-to-date. + kl.sourcesReady.AddSource(u.Source) + } case e := <-plegCh: if isSyncPodWorthy(e) { // PLEG event for a pod; sync it. @@ -1888,7 +1898,7 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle break } glog.V(4).Infof("SyncLoop (SYNC): %d pods; %s", len(podsToSync), format.Pods(podsToSync)) - kl.HandlePodSyncs(podsToSync) + handler.HandlePodSyncs(podsToSync) case update := <-kl.livenessManager.Updates(): if update.Result == proberesults.Failure { // The liveness manager detected a failure; sync the pod. @@ -2081,7 +2091,7 @@ func (kl *Kubelet) updateRuntimeUp() { } // rkt uses the legacy, non-CRI integration. Don't check the runtime // conditions for it. - if kl.kubeletConfiguration.ContainerRuntime != kubetypes.RktContainerRuntime { + if kl.containerRuntimeName != kubetypes.RktContainerRuntime { if s == nil { glog.Errorf("Container runtime status is nil") return @@ -2162,43 +2172,15 @@ func (kl *Kubelet) cleanUpContainersInPod(podID types.UID, exitedContainerID str if podStatus, err := kl.podCache.Get(podID); err == nil { removeAll := false if syncedPod, ok := kl.podManager.GetPodByUID(podID); ok { - // When an evicted pod has already synced, all containers can be removed. - removeAll = eviction.PodIsEvicted(syncedPod.Status) + // generate the api status using the cached runtime status to get up-to-date ContainerStatuses + apiPodStatus := kl.generateAPIPodStatus(syncedPod, podStatus) + // When an evicted or deleted pod has already synced, all containers can be removed. + removeAll = eviction.PodIsEvicted(syncedPod.Status) || (syncedPod.DeletionTimestamp != nil && notRunning(apiPodStatus.ContainerStatuses)) } kl.containerDeletor.deleteContainersInPod(exitedContainerID, podStatus, removeAll) } } -// Replace the nameserver in containerized-mounter's rootfs/etc/resolve.conf with kubelet.ClusterDNS -func (kl *Kubelet) setupDNSinContainerizedMounter(mounterPath string) { - resolvePath := filepath.Join(strings.TrimSuffix(mounterPath, "/mounter"), "rootfs", "etc", "resolv.conf") - dnsString := "" - for _, dns := range kl.clusterDNS { - dnsString = dnsString + fmt.Sprintf("nameserver %s\n", dns) - } - if kl.resolverConfig != "" { - f, err := os.Open(kl.resolverConfig) - defer f.Close() - if err != nil { - glog.Error("Could not open resolverConf file") - } else { - _, hostSearch, err := kl.parseResolvConf(f) - if err != nil { - glog.Errorf("Error for parsing the reslov.conf file: %v", err) - } else { - dnsString = dnsString + "search" - for _, search := range hostSearch { - dnsString = dnsString + fmt.Sprintf(" %s", search) - } - dnsString = dnsString + "\n" - } - } - } - if err := ioutil.WriteFile(resolvePath, []byte(dnsString), 0600); err != nil { - glog.Errorf("Could not write dns nameserver in file %s, with error %v", resolvePath, err) - } -} - // isSyncPodWorthy filters out events that are not worthy of pod syncing func isSyncPodWorthy(event *pleg.PodLifecycleEvent) bool { // ContatnerRemoved doesn't affect pod state diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go index 7737229507ee..fefd8aac1444 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go @@ -27,8 +27,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/cmd/kubelet/app/options" "k8s.io/kubernetes/pkg/kubelet/cm" + "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" utilfile "k8s.io/kubernetes/pkg/util/file" utilnode "k8s.io/kubernetes/pkg/util/node" @@ -46,7 +46,7 @@ func (kl *Kubelet) getRootDir() string { // getPodsDir returns the full path to the directory under which pod // directories are created. func (kl *Kubelet) getPodsDir() string { - return filepath.Join(kl.getRootDir(), options.DefaultKubeletPodsDirName) + return filepath.Join(kl.getRootDir(), config.DefaultKubeletPodsDirName) } // getPluginsDir returns the full path to the directory under which plugin @@ -54,7 +54,7 @@ func (kl *Kubelet) getPodsDir() string { // they need to persist. Plugins should create subdirectories under this named // after their own names. func (kl *Kubelet) getPluginsDir() string { - return filepath.Join(kl.getRootDir(), options.DefaultKubeletPluginsDirName) + return filepath.Join(kl.getRootDir(), config.DefaultKubeletPluginsDirName) } // getPluginDir returns a data directory name for a given plugin name. @@ -64,6 +64,21 @@ func (kl *Kubelet) getPluginDir(pluginName string) string { return filepath.Join(kl.getPluginsDir(), pluginName) } +// getVolumeDevicePluginsDir returns the full path to the directory under which plugin +// directories are created. Plugins can use these directories for data that +// they need to persist. Plugins should create subdirectories under this named +// after their own names. +func (kl *Kubelet) getVolumeDevicePluginsDir() string { + return filepath.Join(kl.getRootDir(), config.DefaultKubeletPluginsDirName) +} + +// getVolumeDevicePluginDir returns a data directory name for a given plugin name. +// Plugins can use these directories to store data that they need to persist. +// For per-pod plugin data, see getVolumeDevicePluginsDir. +func (kl *Kubelet) getVolumeDevicePluginDir(pluginName string) string { + return filepath.Join(kl.getVolumeDevicePluginsDir(), pluginName, config.DefaultKubeletVolumeDevicesDirName) +} + // GetPodDir returns the full path to the per-pod data directory for the // specified pod. This directory may not exist if the pod does not exist. func (kl *Kubelet) GetPodDir(podUID types.UID) string { @@ -80,7 +95,7 @@ func (kl *Kubelet) getPodDir(podUID types.UID) string { // which volumes are created for the specified pod. This directory may not // exist if the pod does not exist. func (kl *Kubelet) getPodVolumesDir(podUID types.UID) string { - return filepath.Join(kl.getPodDir(podUID), options.DefaultKubeletVolumesDirName) + return filepath.Join(kl.getPodDir(podUID), config.DefaultKubeletVolumesDirName) } // getPodVolumeDir returns the full path to the directory which represents the @@ -90,11 +105,24 @@ func (kl *Kubelet) getPodVolumeDir(podUID types.UID, pluginName string, volumeNa return filepath.Join(kl.getPodVolumesDir(podUID), pluginName, volumeName) } +// getPodVolumeDevicesDir returns the full path to the per-pod data directory under +// which volumes are created for the specified pod. This directory may not +// exist if the pod does not exist. +func (kl *Kubelet) getPodVolumeDevicesDir(podUID types.UID) string { + return filepath.Join(kl.getPodDir(podUID), config.DefaultKubeletVolumeDevicesDirName) +} + +// getPodVolumeDeviceDir returns the full path to the directory which represents the +// named plugin for specified pod. This directory may not exist if the pod does not exist. +func (kl *Kubelet) getPodVolumeDeviceDir(podUID types.UID, pluginName string) string { + return filepath.Join(kl.getPodVolumeDevicesDir(podUID), pluginName) +} + // getPodPluginsDir returns the full path to the per-pod data directory under // which plugins may store data for the specified pod. This directory may not // exist if the pod does not exist. func (kl *Kubelet) getPodPluginsDir(podUID types.UID) string { - return filepath.Join(kl.getPodDir(podUID), options.DefaultKubeletPluginsDirName) + return filepath.Join(kl.getPodDir(podUID), config.DefaultKubeletPluginsDirName) } // getPodPluginDir returns a data directory name for a given plugin name for a @@ -108,7 +136,7 @@ func (kl *Kubelet) getPodPluginDir(podUID types.UID, pluginName string) string { // which container data is held for the specified pod. This directory may not // exist if the pod or container does not exist. func (kl *Kubelet) getPodContainerDir(podUID types.UID, ctrName string) string { - return filepath.Join(kl.getPodDir(podUID), options.DefaultKubeletContainersDirName, ctrName) + return filepath.Join(kl.getPodDir(podUID), config.DefaultKubeletContainersDirName, ctrName) } // GetPods returns all pods bound to the kubelet and their spec, and the mirror @@ -219,7 +247,7 @@ func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, err if pathExists, pathErr := volumeutil.PathExists(podVolDir); pathErr != nil { return volumes, fmt.Errorf("Error checking if path %q exists: %v", podVolDir, pathErr) } else if !pathExists { - glog.Warningf("Warning: path %q does not exist: %q", podVolDir) + glog.Warningf("Path %q does not exist", podVolDir) return volumes, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network.go index e8f0edab723a..d7e47de55767 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network.go @@ -18,14 +18,13 @@ package kubelet import ( "fmt" - "io" - "io/ioutil" - "os" - "strings" "github.com/golang/glog" "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/network" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" utiliptables "k8s.io/kubernetes/pkg/util/iptables" @@ -46,6 +45,77 @@ const ( KubeFirewallChain utiliptables.Chain = "KUBE-FIREWALL" ) +// This just exports required functions from kubelet proper, for use by network +// plugins. +// TODO(#35457): get rid of this backchannel to the kubelet. The scope of +// the back channel is restricted to host-ports/testing, and restricted +// to kubenet. No other network plugin wrapper needs it. Other plugins +// only require a way to access namespace information, which they can do +// directly through the methods implemented by criNetworkHost. +type networkHost struct { + kubelet *Kubelet +} + +func (nh *networkHost) GetPodByName(name, namespace string) (*v1.Pod, bool) { + return nh.kubelet.GetPodByName(name, namespace) +} + +func (nh *networkHost) GetKubeClient() clientset.Interface { + return nh.kubelet.kubeClient +} + +func (nh *networkHost) GetRuntime() kubecontainer.Runtime { + return nh.kubelet.GetRuntime() +} + +func (nh *networkHost) SupportsLegacyFeatures() bool { + return true +} + +// criNetworkHost implements the part of network.Host required by the +// cri (NamespaceGetter). It leechs off networkHost for all other +// methods, because networkHost is slated for deletion. +type criNetworkHost struct { + *networkHost + // criNetworkHost currently support legacy features. Hence no need to support PortMappingGetter + *network.NoopPortMappingGetter +} + +// GetNetNS returns the network namespace of the given containerID. +// This method satisfies the network.NamespaceGetter interface for +// networkHost. It's only meant to be used from network plugins +// that are directly invoked by the kubelet (aka: legacy, pre-cri). +// Any network plugin invoked by a cri must implement NamespaceGetter +// to talk directly to the runtime instead. +func (c *criNetworkHost) GetNetNS(containerID string) (string, error) { + return c.kubelet.GetRuntime().GetNetNS(kubecontainer.ContainerID{Type: "", ID: containerID}) +} + +// NoOpLegacyHost implements the network.LegacyHost interface for the remote +// runtime shim by just returning empties. It doesn't support legacy features +// like host port and bandwidth shaping. +type NoOpLegacyHost struct{} + +// GetPodByName always returns "nil, true" for 'NoOpLegacyHost' +func (n *NoOpLegacyHost) GetPodByName(namespace, name string) (*v1.Pod, bool) { + return nil, true +} + +// GetKubeClient always returns "nil" for 'NoOpLegacyHost' +func (n *NoOpLegacyHost) GetKubeClient() clientset.Interface { + return nil +} + +// GetRuntime always returns "nil" for 'NoOpLegacyHost' +func (n *NoOpLegacyHost) GetRuntime() kubecontainer.Runtime { + return nil +} + +// SupportsLegacyFeatures always returns "false" for 'NoOpLegacyHost' +func (n *NoOpLegacyHost) SupportsLegacyFeatures() bool { + return false +} + // effectiveHairpinMode determines the effective hairpin mode given the // configured mode, container runtime, and whether cbr0 should be configured. func effectiveHairpinMode(hairpinMode kubeletconfig.HairpinMode, containerRuntime string, networkPlugin string) (kubeletconfig.HairpinMode, error) { @@ -89,156 +159,6 @@ func (kl *Kubelet) providerRequiresNetworkingConfiguration() bool { return supported } -func omitDuplicates(kl *Kubelet, pod *v1.Pod, combinedSearch []string) []string { - uniqueDomains := map[string]bool{} - - for _, dnsDomain := range combinedSearch { - if _, exists := uniqueDomains[dnsDomain]; !exists { - combinedSearch[len(uniqueDomains)] = dnsDomain - uniqueDomains[dnsDomain] = true - } - } - return combinedSearch[:len(uniqueDomains)] -} - -func formDNSSearchFitsLimits(kl *Kubelet, pod *v1.Pod, composedSearch []string) []string { - // resolver file Search line current limitations - resolvSearchLineDNSDomainsLimit := 6 - resolvSearchLineLenLimit := 255 - limitsExceeded := false - - if len(composedSearch) > resolvSearchLineDNSDomainsLimit { - composedSearch = composedSearch[:resolvSearchLineDNSDomainsLimit] - limitsExceeded = true - } - - if resolvSearchhLineStrLen := len(strings.Join(composedSearch, " ")); resolvSearchhLineStrLen > resolvSearchLineLenLimit { - cutDomainsNum := 0 - cutDoaminsLen := 0 - for i := len(composedSearch) - 1; i >= 0; i-- { - cutDoaminsLen += len(composedSearch[i]) + 1 - cutDomainsNum++ - - if (resolvSearchhLineStrLen - cutDoaminsLen) <= resolvSearchLineLenLimit { - break - } - } - - composedSearch = composedSearch[:(len(composedSearch) - cutDomainsNum)] - limitsExceeded = true - } - - if limitsExceeded { - log := fmt.Sprintf("Search Line limits were exceeded, some dns names have been omitted, the applied search line is: %s", strings.Join(composedSearch, " ")) - kl.recorder.Event(pod, v1.EventTypeWarning, "DNSSearchForming", log) - glog.Error(log) - } - return composedSearch -} - -func (kl *Kubelet) formDNSSearchForDNSDefault(hostSearch []string, pod *v1.Pod) []string { - return formDNSSearchFitsLimits(kl, pod, hostSearch) -} - -func (kl *Kubelet) formDNSSearch(hostSearch []string, pod *v1.Pod) []string { - if kl.clusterDomain == "" { - formDNSSearchFitsLimits(kl, pod, hostSearch) - return hostSearch - } - - nsSvcDomain := fmt.Sprintf("%s.svc.%s", pod.Namespace, kl.clusterDomain) - svcDomain := fmt.Sprintf("svc.%s", kl.clusterDomain) - dnsSearch := []string{nsSvcDomain, svcDomain, kl.clusterDomain} - - combinedSearch := append(dnsSearch, hostSearch...) - - combinedSearch = omitDuplicates(kl, pod, combinedSearch) - return formDNSSearchFitsLimits(kl, pod, combinedSearch) -} - -func (kl *Kubelet) checkLimitsForResolvConf() { - // resolver file Search line current limitations - resolvSearchLineDNSDomainsLimit := 6 - resolvSearchLineLenLimit := 255 - - f, err := os.Open(kl.resolverConfig) - if err != nil { - kl.recorder.Event(kl.nodeRef, v1.EventTypeWarning, "checkLimitsForResolvConf", err.Error()) - glog.Error("checkLimitsForResolvConf: " + err.Error()) - return - } - defer f.Close() - - _, hostSearch, err := kl.parseResolvConf(f) - if err != nil { - kl.recorder.Event(kl.nodeRef, v1.EventTypeWarning, "checkLimitsForResolvConf", err.Error()) - glog.Error("checkLimitsForResolvConf: " + err.Error()) - return - } - - domainCntLimit := resolvSearchLineDNSDomainsLimit - - if kl.clusterDomain != "" { - domainCntLimit -= 3 - } - - if len(hostSearch) > domainCntLimit { - log := fmt.Sprintf("Resolv.conf file '%s' contains search line consisting of more than %d domains!", kl.resolverConfig, domainCntLimit) - kl.recorder.Event(kl.nodeRef, v1.EventTypeWarning, "checkLimitsForResolvConf", log) - glog.Error("checkLimitsForResolvConf: " + log) - return - } - - if len(strings.Join(hostSearch, " ")) > resolvSearchLineLenLimit { - log := fmt.Sprintf("Resolv.conf file '%s' contains search line which length is more than allowed %d chars!", kl.resolverConfig, resolvSearchLineLenLimit) - kl.recorder.Event(kl.nodeRef, v1.EventTypeWarning, "checkLimitsForResolvConf", log) - glog.Error("checkLimitsForResolvConf: " + log) - return - } - - return -} - -// parseResolveConf reads a resolv.conf file from the given reader, and parses -// it into nameservers and searches, possibly returning an error. -// TODO: move to utility package -func (kl *Kubelet) parseResolvConf(reader io.Reader) (nameservers []string, searches []string, err error) { - file, err := ioutil.ReadAll(reader) - if err != nil { - return nil, nil, err - } - - // Lines of the form "nameserver 1.2.3.4" accumulate. - nameservers = []string{} - - // Lines of the form "search example.com" overrule - last one wins. - searches = []string{} - - lines := strings.Split(string(file), "\n") - for l := range lines { - trimmed := strings.TrimSpace(lines[l]) - if strings.HasPrefix(trimmed, "#") { - continue - } - fields := strings.Fields(trimmed) - if len(fields) == 0 { - continue - } - if fields[0] == "nameserver" && len(fields) >= 2 { - nameservers = append(nameservers, fields[1]) - } - if fields[0] == "search" { - searches = fields[1:] - } - } - - // There used to be code here to scrub DNS for each cloud, but doesn't - // make sense anymore since cloudproviders are being factored out. - // contact @thockin or @wlan0 for more information - - return nameservers, searches, nil -} - // syncNetworkStatus updates the network state func (kl *Kubelet) syncNetworkStatus() { // For cri integration, network state will be updated in updateRuntimeUp, @@ -361,3 +281,10 @@ func getIPTablesMark(bit int) string { value := 1 << uint(bit) return fmt.Sprintf("%#08x/%#08x", value, value) } + +// GetPodDNS returns DNS setttings for the pod. +// This function is defined in kubecontainer.RuntimeHelper interface so we +// have to implement it. +func (kl *Kubelet) GetPodDNS(pod *v1.Pod) (*runtimeapi.DNSConfig, error) { + return kl.dnsConfigurer.GetPodDNS(pod) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go index f0e219a878c2..7e09c16815f3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go @@ -30,12 +30,11 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/types" utilnet "k8s.io/apimachinery/pkg/util/net" utilfeature "k8s.io/apiserver/pkg/util/feature" - k8s_api_v1 "k8s.io/kubernetes/pkg/api/v1" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" @@ -98,7 +97,7 @@ func (kl *Kubelet) registerWithAPIServer() { // a different externalID value, it attempts to delete that node so that a // later attempt can recreate it. func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { - _, err := kl.kubeClient.Core().Nodes().Create(node) + _, err := kl.kubeClient.CoreV1().Nodes().Create(node) if err == nil { return true } @@ -108,7 +107,7 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { return false } - existingNode, err := kl.kubeClient.Core().Nodes().Get(string(kl.nodeName), metav1.GetOptions{}) + existingNode, err := kl.kubeClient.CoreV1().Nodes().Get(string(kl.nodeName), metav1.GetOptions{}) if err != nil { glog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err) return false @@ -118,15 +117,9 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { return false } - clonedNode, err := conversion.NewCloner().DeepCopy(existingNode) - if err != nil { - glog.Errorf("Unable to clone %q node object %#v: %v", kl.nodeName, existingNode, err) - return false - } - - originalNode, ok := clonedNode.(*v1.Node) - if !ok || originalNode == nil { - glog.Errorf("Unable to cast %q node object %#v to v1.Node", kl.nodeName, clonedNode) + originalNode := existingNode.DeepCopy() + if originalNode == nil { + glog.Errorf("Nil %q node object", kl.nodeName) return false } @@ -153,7 +146,7 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { "Previously node %q had externalID %q; now it is %q; will delete and recreate.", kl.nodeName, node.Spec.ExternalID, existingNode.Spec.ExternalID, ) - if err := kl.kubeClient.Core().Nodes().Delete(node.Name, nil); err != nil { + if err := kl.kubeClient.CoreV1().Nodes().Delete(node.Name, nil); err != nil { glog.Errorf("Unable to register node %q with API server: error deleting old node: %v", kl.nodeName, err) } else { glog.Infof("Deleted old node object %q", kl.nodeName) @@ -176,6 +169,10 @@ func (kl *Kubelet) updateDefaultLabels(initialNode, existingNode *v1.Node) bool var needsUpdate bool = false //Set default labels but make sure to not set labels with empty values for _, label := range defaultLabels { + if _, hasInitialValue := initialNode.Labels[label]; !hasInitialValue { + continue + } + if existingNode.Labels[label] != initialNode.Labels[label] { existingNode.Labels[label] = initialNode.Labels[label] needsUpdate = true @@ -236,10 +233,10 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { }, } nodeTaints := make([]v1.Taint, 0) - if len(kl.kubeletConfiguration.RegisterWithTaints) > 0 { - taints := make([]v1.Taint, len(kl.kubeletConfiguration.RegisterWithTaints)) - for i := range kl.kubeletConfiguration.RegisterWithTaints { - if err := k8s_api_v1.Convert_api_Taint_To_v1_Taint(&kl.kubeletConfiguration.RegisterWithTaints[i], &taints[i], nil); err != nil { + if len(kl.registerWithTaints) > 0 { + taints := make([]v1.Taint, len(kl.registerWithTaints)) + for i := range kl.registerWithTaints { + if err := k8s_api_v1.Convert_core_Taint_To_v1_Taint(&kl.registerWithTaints[i], &taints[i], nil); err != nil { return nil, err } } @@ -279,7 +276,7 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { glog.Infof("Controller attach/detach is disabled for this node; Kubelet will attach and detach volumes") } - if kl.kubeletConfiguration.KeepTerminatedPodVolumes { + if kl.keepTerminatedPodVolumes { if node.Annotations == nil { node.Annotations = make(map[string]string) } @@ -409,14 +406,9 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error { return fmt.Errorf("error getting node %q: %v", kl.nodeName, err) } - clonedNode, err := conversion.NewCloner().DeepCopy(node) - if err != nil { - return fmt.Errorf("error clone node %q: %v", kl.nodeName, err) - } - - originalNode, ok := clonedNode.(*v1.Node) - if !ok || originalNode == nil { - return fmt.Errorf("failed to cast %q node object %#v to v1.Node", kl.nodeName, clonedNode) + originalNode := node.DeepCopy() + if originalNode == nil { + return fmt.Errorf("nil %q node object", kl.nodeName) } kl.updatePodCIDR(node.Spec.PodCIDR) @@ -445,13 +437,19 @@ func (kl *Kubelet) recordNodeStatusEvent(eventType, event string) { // Set IP and hostname addresses for the node. func (kl *Kubelet) setNodeAddress(node *v1.Node) error { if kl.nodeIP != nil { - if err := kl.validateNodeIP(); err != nil { + if err := validateNodeIP(kl.nodeIP); err != nil { return fmt.Errorf("failed to validate nodeIP: %v", err) } glog.V(2).Infof("Using node IP: %q", kl.nodeIP.String()) } if kl.externalCloudProvider { + if kl.nodeIP != nil { + if node.ObjectMeta.Annotations == nil { + node.ObjectMeta.Annotations = make(map[string]string) + } + node.ObjectMeta.Annotations[kubeletapis.AnnotationProvidedIPAddr] = kl.nodeIP.String() + } // We rely on the external cloud provider to supply the addresses. return nil } @@ -505,20 +503,25 @@ func (kl *Kubelet) setNodeAddress(node *v1.Node) error { // 1) Use nodeIP if set // 2) If the user has specified an IP to HostnameOverride, use it - // 3) Lookup the IP from node name by DNS and use the first non-loopback ipv4 address + // 3) Lookup the IP from node name by DNS and use the first valid IPv4 address. + // If the node does not have a valid IPv4 address, use the first valid IPv6 address. // 4) Try to get the IP from the network interface used as default gateway if kl.nodeIP != nil { ipAddr = kl.nodeIP - node.ObjectMeta.Annotations[kubeletapis.AnnotationProvidedIPAddr] = kl.nodeIP.String() } else if addr := net.ParseIP(kl.hostname); addr != nil { ipAddr = addr } else { var addrs []net.IP - addrs, err = net.LookupIP(node.Name) + addrs, _ = net.LookupIP(node.Name) for _, addr := range addrs { - if !addr.IsLoopback() && addr.To4() != nil { - ipAddr = addr - break + if err = validateNodeIP(addr); err == nil { + if addr.To4() != nil { + ipAddr = addr + break + } + if addr.To16() != nil && ipAddr == nil { + ipAddr = addr + } } } @@ -598,24 +601,17 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) { } } - currentCapacity := kl.containerManager.GetCapacity() - if currentCapacity != nil { - for k, v := range currentCapacity { - if v1helper.IsExtendedResourceName(k) { - glog.V(2).Infof("Update capacity for %s to %d", k, v.Value()) - node.Status.Capacity[k] = v - } - } - // Remove stale extended resources. - for k := range node.Status.Capacity { - if v1helper.IsExtendedResourceName(k) { - if _, ok := currentCapacity[k]; !ok { - glog.V(2).Infof("delete capacity for %s", k) - delete(node.Status.Capacity, k) - } - } + devicePluginCapacity, removedDevicePlugins := kl.containerManager.GetDevicePluginResourceCapacity() + if devicePluginCapacity != nil { + for k, v := range devicePluginCapacity { + glog.V(2).Infof("Update capacity for %s to %d", k, v.Value()) + node.Status.Capacity[k] = v } } + for _, removedResource := range removedDevicePlugins { + glog.V(2).Infof("Remove capacity for %s", removedResource) + delete(node.Status.Capacity, v1.ResourceName(removedResource)) + } } // Set Allocatable. @@ -1003,17 +999,22 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error { } // Validate given node IP belongs to the current host -func (kl *Kubelet) validateNodeIP() error { - if kl.nodeIP == nil { - return nil - } - +func validateNodeIP(nodeIP net.IP) error { // Honor IP limitations set in setNodeStatus() - if kl.nodeIP.IsLoopback() { + if nodeIP.To4() == nil && nodeIP.To16() == nil { + return fmt.Errorf("nodeIP must be a valid IP address") + } + if nodeIP.IsLoopback() { return fmt.Errorf("nodeIP can't be loopback address") } - if kl.nodeIP.To4() == nil { - return fmt.Errorf("nodeIP must be IPv4 address") + if nodeIP.IsMulticast() { + return fmt.Errorf("nodeIP can't be a multicast address") + } + if nodeIP.IsLinkLocalUnicast() { + return fmt.Errorf("nodeIP can't be a link-local unicast address") + } + if nodeIP.IsUnspecified() { + return fmt.Errorf("nodeIP can't be an all zeros address") } addrs, err := net.InterfaceAddrs() @@ -1028,9 +1029,9 @@ func (kl *Kubelet) validateNodeIP() error { case *net.IPAddr: ip = v.IP } - if ip != nil && ip.Equal(kl.nodeIP) { + if ip != nil && ip.Equal(nodeIP) { return nil } } - return fmt.Errorf("Node IP: %q not found in the host's network interfaces", kl.nodeIP.String()) + return fmt.Errorf("Node IP: %q not found in the host's network interfaces", nodeIP.String()) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go index 9406669bf076..6241f4ba3406 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go @@ -43,12 +43,12 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/remotecommand" - "k8s.io/kubernetes/pkg/api" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" - v1qos "k8s.io/kubernetes/pkg/api/v1/helper/qos" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/api/v1/resource" - "k8s.io/kubernetes/pkg/api/v1/validation" + podshelper "k8s.io/kubernetes/pkg/apis/core/pods" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" + v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" + "k8s.io/kubernetes/pkg/apis/core/v1/validation" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/fieldpath" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" @@ -64,6 +64,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util/format" utilfile "k8s.io/kubernetes/pkg/util/file" "k8s.io/kubernetes/pkg/volume" + volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/volumehelper" volumevalidation "k8s.io/kubernetes/pkg/volume/validation" "k8s.io/kubernetes/third_party/forked/golang/expansion" @@ -91,9 +92,9 @@ func (kl *Kubelet) GetActivePods() []*v1.Pod { return activePods } -// makeDevices determines the devices for the given container. +// makeGPUDevices determines the devices for the given container. // Experimental. -func (kl *Kubelet) makeDevices(pod *v1.Pod, container *v1.Container) ([]kubecontainer.DeviceInfo, error) { +func (kl *Kubelet) makeGPUDevices(pod *v1.Pod, container *v1.Container) ([]kubecontainer.DeviceInfo, error) { if container.Resources.Limits.NvidiaGPU().IsZero() { return nil, nil } @@ -111,6 +112,56 @@ func (kl *Kubelet) makeDevices(pod *v1.Pod, container *v1.Container) ([]kubecont return devices, nil } +func makeAbsolutePath(goos, path string) string { + if goos != "windows" { + return "/" + path + } + // These are all for windows + // If there is a colon, give up. + if strings.Contains(path, ":") { + return path + } + // If there is a slash, but no drive, add 'c:' + if strings.HasPrefix(path, "/") || strings.HasPrefix(path, "\\") { + return "c:" + path + } + // Otherwise, add 'c:\' + return "c:\\" + path +} + +// makeBlockVolumes maps the raw block devices specified in the path of the container +// Experimental +func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVolumes kubecontainer.VolumeMap, blkutil volumeutil.BlockVolumePathHandler) ([]kubecontainer.DeviceInfo, error) { + var devices []kubecontainer.DeviceInfo + for _, device := range container.VolumeDevices { + // check path is absolute + if !filepath.IsAbs(device.DevicePath) { + return nil, fmt.Errorf("error DevicePath `%s` must be an absolute path", device.DevicePath) + } + vol, ok := podVolumes[device.Name] + if !ok || vol.BlockVolumeMapper == nil { + glog.Errorf("Block volume cannot be satisfied for container %q, because the volume is missing or the volume mapper is nil: %+v", container.Name, device) + return nil, fmt.Errorf("cannot find volume %q to pass into container %q", device.Name, container.Name) + } + // Get a symbolic link associated to a block device under pod device path + dirPath, volName := vol.BlockVolumeMapper.GetPodDeviceMapPath() + symlinkPath := path.Join(dirPath, volName) + if islinkExist, checkErr := blkutil.IsSymlinkExist(symlinkPath); checkErr != nil { + return nil, checkErr + } else if islinkExist { + // Check readOnly in PVCVolumeSource and set read only permission if it's true. + permission := "mrw" + if vol.ReadOnly { + permission = "r" + } + glog.V(4).Infof("Device will be attached to container %q. Path on host: %v", container.Name, symlinkPath) + devices = append(devices, kubecontainer.DeviceInfo{PathOnHost: symlinkPath, PathInContainer: device.DevicePath, Permissions: permission}) + } + } + + return devices, nil +} + // makeMounts determines the mount points for the given container. func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap) ([]kubecontainer.Mount, error) { // Kubernetes only mounts on /etc/hosts if: @@ -127,8 +178,8 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h mountEtcHostsFile = mountEtcHostsFile && (mount.MountPath != etcHostsPath) vol, ok := podVolumes[mount.Name] if !ok || vol.Mounter == nil { - glog.Warningf("Mount cannot be satisfied for container %q, because the volume is missing or the volume mounter is nil: %q", container.Name, mount) - continue + glog.Errorf("Mount cannot be satisfied for container %q, because the volume is missing or the volume mounter is nil: %+v", container.Name, mount) + return nil, fmt.Errorf("cannot find volume %q to mount into container %q", mount.Name, container.Name) } relabelVolume := false @@ -187,9 +238,9 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h if (strings.HasPrefix(hostPath, "/") || strings.HasPrefix(hostPath, "\\")) && !strings.Contains(hostPath, ":") { hostPath = "c:" + hostPath } - if (strings.HasPrefix(containerPath, "/") || strings.HasPrefix(containerPath, "\\")) && !strings.Contains(containerPath, ":") { - containerPath = "c:" + containerPath - } + } + if !filepath.IsAbs(containerPath) { + containerPath = makeAbsolutePath(runtime.GOOS, containerPath) } propagation, err := translateMountPropagation(mount.MountPropagation) @@ -346,7 +397,7 @@ func truncatePodHostnameIfNeeded(podName, hostname string) (string, error) { // given that pod's spec and annotations or returns an error. func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, error) { // TODO(vmarmol): Handle better. - clusterDomain := kl.clusterDomain + clusterDomain := kl.dnsConfigurer.ClusterDomain hostname := pod.Name if len(pod.Spec.Hostname) > 0 { @@ -381,18 +432,17 @@ func (kl *Kubelet) GetPodCgroupParent(pod *v1.Pod) string { // GenerateRunContainerOptions generates the RunContainerOptions, which can be used by // the container runtime to set parameters for launching a container. -func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, bool, error) { - useClusterFirstPolicy := false - opts, err := kl.containerManager.GetResources(pod, container, kl.GetActivePods()) +func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, error) { + opts, err := kl.containerManager.GetResources(pod, container) if err != nil { - return nil, false, err + return nil, err } cgroupParent := kl.GetPodCgroupParent(pod) opts.CgroupParent = cgroupParent hostname, hostDomainName, err := kl.GeneratePodHostNameAndDomain(pod) if err != nil { - return nil, false, err + return nil, err } opts.Hostname = hostname podName := volumehelper.GetUniquePodName(pod) @@ -400,21 +450,31 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai opts.PortMappings = kubecontainer.MakePortMappings(container) // TODO(random-liu): Move following convert functions into pkg/kubelet/container - devices, err := kl.makeDevices(pod, container) + devices, err := kl.makeGPUDevices(pod, container) if err != nil { - return nil, false, err + return nil, err } opts.Devices = append(opts.Devices, devices...) + // TODO: remove feature gate check after no longer needed + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + blkutil := volumeutil.NewBlockVolumePathHandler() + blkVolumes, err := kl.makeBlockVolumes(pod, container, volumes, blkutil) + if err != nil { + return nil, err + } + opts.Devices = append(opts.Devices, blkVolumes...) + } + mounts, err := makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, volumes) if err != nil { - return nil, false, err + return nil, err } opts.Mounts = append(opts.Mounts, mounts...) envs, err := kl.makeEnvironmentVariables(pod, container, podIP) if err != nil { - return nil, false, err + return nil, err } opts.Envs = append(opts.Envs, envs...) @@ -429,17 +489,12 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai } } - opts.DNS, opts.DNSSearch, useClusterFirstPolicy, err = kl.GetClusterDNS(pod) - if err != nil { - return nil, false, err - } - // only do this check if the experimental behavior is enabled, otherwise allow it to default to false if kl.experimentalHostUserNamespaceDefaulting { opts.EnableHostUserNamespace = kl.enableHostUserNamespace(pod) } - return opts, useClusterFirstPolicy, nil + return opts, nil } var masterServices = sets.NewString("kubernetes") @@ -722,7 +777,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container // podFieldSelectorRuntimeValue returns the runtime value of the given // selector for a pod. func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP string) (string, error) { - internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "") + internalFieldPath, _, err := podshelper.ConvertDownwardAPIFieldLabel(fs.APIVersion, fs.FieldPath, "") if err != nil { return "", err } @@ -820,6 +875,16 @@ func (kl *Kubelet) podIsTerminated(pod *v1.Pod) bool { return status.Phase == v1.PodFailed || status.Phase == v1.PodSucceeded || (pod.DeletionTimestamp != nil && notRunning(status.ContainerStatuses)) } +// IsPodTerminated returns trus if the pod with the provided UID is in a terminated state ("Failed" or "Succeeded") +// or if the pod has been deleted or removed +func (kl *Kubelet) IsPodTerminated(uid types.UID) bool { + pod, podFound := kl.podManager.GetPodByUID(uid) + if !podFound { + return true + } + return kl.podIsTerminated(pod) +} + // IsPodDeleted returns true if the pod is deleted. For the pod to be deleted, either: // 1. The pod object is deleted // 2. The pod's status is evicted @@ -854,7 +919,7 @@ func (kl *Kubelet) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bo glog.V(3).Infof("Pod %q is terminated, but some containers have not been cleaned up: %+v", format.Pod(pod), runtimeStatus.ContainerStatuses) return false } - if kl.podVolumesExist(pod.UID) && !kl.kubeletConfiguration.KeepTerminatedPodVolumes { + if kl.podVolumesExist(pod.UID) && !kl.keepTerminatedPodVolumes { // We shouldnt delete pods whose volumes have not been cleaned up if we are not keeping terminated pod volumes glog.V(3).Infof("Pod %q is terminated, but some volumes have not been cleaned up", format.Pod(pod)) return false @@ -1338,16 +1403,21 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontaine // set status for Pods created on versions of kube older than 1.6 apiPodStatus.QOSClass = v1qos.GetPodQOS(pod) + oldPodStatus, found := kl.statusManager.GetPodStatus(pod.UID) + if !found { + oldPodStatus = pod.Status + } + apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses( pod, podStatus, - pod.Status.ContainerStatuses, + oldPodStatus.ContainerStatuses, pod.Spec.Containers, len(pod.Spec.InitContainers) > 0, false, ) apiPodStatus.InitContainerStatuses = kl.convertToAPIContainerStatuses( pod, podStatus, - pod.Status.InitContainerStatuses, + oldPodStatus.InitContainerStatuses, pod.Spec.InitContainers, len(pod.Spec.InitContainers) > 0, true, @@ -1414,7 +1484,7 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon } oldStatus, found := oldStatuses[container.Name] if found { - if isInitContainer && oldStatus.State.Terminated != nil { + if oldStatus.State.Terminated != nil { // Do not update status on terminated init containers as // they be removed at any time. status = &oldStatus @@ -1694,7 +1764,7 @@ func (kl *Kubelet) cleanupOrphanedPodCgroups(cgroupPods map[types.UID]cm.CgroupN // parent croup. If the volumes still exist, reduce the cpu shares for any // process in the cgroup to the minimum value while we wait. if the kubelet // is configured to keep terminated volumes, we will delete the cgroup and not block. - if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist && !kl.kubeletConfiguration.KeepTerminatedPodVolumes { + if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist && !kl.keepTerminatedPodVolumes { glog.V(3).Infof("Orphaned pod %q found, but volumes not yet removed. Reducing cpu to minimum", uid) if err := pcm.ReduceCPULimits(val); err != nil { glog.Warningf("Failed to reduce cpu time for pod %q pending volume cleanup due to %v", uid, err) @@ -1762,13 +1832,13 @@ func hasHostNamespace(pod *v1.Pod) bool { func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool { for _, volume := range pod.Spec.Volumes { if volume.PersistentVolumeClaim != nil { - pvc, err := kl.kubeClient.Core().PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) + pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) if err != nil { glog.Warningf("unable to retrieve pvc %s:%s - %v", pod.Namespace, volume.PersistentVolumeClaim.ClaimName, err) continue } if pvc != nil { - referencedVolume, err := kl.kubeClient.Core().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) + referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) if err != nil { glog.Warningf("unable to retrieve pv %s - %v", pvc.Spec.VolumeName, err) continue diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_resources.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_resources.go index 15e4d9ac2386..dc47a85880e3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_resources.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_resources.go @@ -22,7 +22,6 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes/scheme" "k8s.io/kubernetes/pkg/api/v1/resource" ) @@ -44,28 +43,14 @@ func (kl *Kubelet) defaultPodLimitsForDownwardAPI(pod *v1.Pod, container *v1.Con } allocatable := node.Status.Allocatable glog.Infof("allocatable: %v", allocatable) - podCopy, err := scheme.Scheme.Copy(pod) - if err != nil { - return nil, nil, fmt.Errorf("failed to perform a deep copy of pod object: %v", err) - } - outputPod, ok := podCopy.(*v1.Pod) - if !ok { - return nil, nil, fmt.Errorf("unexpected type returned from deep copy of pod object") - } + outputPod := pod.DeepCopy() for idx := range outputPod.Spec.Containers { resource.MergeContainerResourceLimits(&outputPod.Spec.Containers[idx], allocatable) } var outputContainer *v1.Container if container != nil { - containerCopy, err := scheme.Scheme.DeepCopy(container) - if err != nil { - return nil, nil, fmt.Errorf("failed to perform a deep copy of container object: %v", err) - } - outputContainer, ok = containerCopy.(*v1.Container) - if !ok { - return nil, nil, fmt.Errorf("unexpected type returned from deep copy of container object") - } + outputContainer = container.DeepCopy() resource.MergeContainerResourceLimits(outputContainer, allocatable) } return outputPod, outputContainer, nil diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go index 6620cc07e2ec..79b0fe3ea3af 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go @@ -69,7 +69,6 @@ func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *v1.Pod, o if err != nil { return nil, fmt.Errorf("can't use volume plugins for %s: %v", spec.Name(), err) } - opts.Containerized = kl.kubeletConfiguration.Containerized physicalMounter, err := plugin.NewMounter(spec, pod, opts) if err != nil { return nil, fmt.Errorf("failed to instantiate mounter for volume: %s using plugin: %s with a root cause: %v", spec.Name(), plugin.GetPluginName(), err) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/BUILD index ac2ccb3eadea..71bbe9fb145e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/BUILD @@ -13,6 +13,7 @@ go_library( "rollback.go", "watch.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig", deps = [ "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/apis/kubeletconfig/validation:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/BUILD index 995a26a9bfc9..4a2f55aa6a73 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/BUILD @@ -13,6 +13,7 @@ go_test( "configmap_test.go", "download_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint", library = ":go_default_library", deps = [ "//pkg/kubelet/apis/kubeletconfig:go_default_library", @@ -37,8 +38,9 @@ go_library( "configmap.go", "download.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library", "//pkg/kubelet/kubeletconfig/status:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/checkpoint.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/checkpoint.go index 852ea7148890..be44aaaa39cd 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/checkpoint.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/checkpoint.go @@ -22,7 +22,7 @@ import ( apiv1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" ) @@ -43,7 +43,7 @@ type Checkpoint interface { // DecodeCheckpoint is a helper for using the apimachinery to decode serialized checkpoints func DecodeCheckpoint(data []byte) (Checkpoint, error) { // decode the checkpoint - obj, err := runtime.Decode(api.Codecs.UniversalDecoder(), data) + obj, err := runtime.Decode(legacyscheme.Codecs.UniversalDecoder(), data) if err != nil { return nil, fmt.Errorf("failed to decode, error: %v", err) } @@ -52,7 +52,7 @@ func DecodeCheckpoint(data []byte) (Checkpoint, error) { // convert it to the external ConfigMap type, so we're consistently working with the external type outside of the on-disk representation cm := &apiv1.ConfigMap{} - err = api.Scheme.Convert(obj, cm, nil) + err = legacyscheme.Scheme.Convert(obj, cm, nil) if err != nil { return nil, fmt.Errorf("failed to convert decoded object into a v1 ConfigMap, error: %v", err) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/download.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/download.go index 841d9434531f..297d974a1aa6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/download.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/download.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status" utilcodec "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec" utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log" @@ -71,7 +71,7 @@ func NewRemoteConfigSource(source *apiv1.NodeConfigSource) (RemoteConfigSource, // e.g. the objects stored in the .cur and .lkg files by checkpoint/store/fsstore.go func DecodeRemoteConfigSource(data []byte) (RemoteConfigSource, error) { // decode the remote config source - obj, err := runtime.Decode(api.Codecs.UniversalDecoder(), data) + obj, err := runtime.Decode(legacyscheme.Codecs.UniversalDecoder(), data) if err != nil { return nil, fmt.Errorf("failed to decode, error: %v", err) } @@ -81,7 +81,7 @@ func DecodeRemoteConfigSource(data []byte) (RemoteConfigSource, error) { // convert it to the external NodeConfigSource type, so we're consistently working with the external type outside of the on-disk representation cs := &apiv1.NodeConfigSource{} - err = api.Scheme.Convert(obj, cs, nil) + err = legacyscheme.Scheme.Convert(obj, cs, nil) if err != nil { return nil, fmt.Errorf("failed to convert decoded object into a v1 NodeConfigSource, error: %v", err) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store/BUILD index c5c69f571209..37529c46abb4 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store/BUILD @@ -12,6 +12,7 @@ go_test( "fsstore_test.go", "store_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store", library = ":go_default_library", deps = [ "//pkg/kubelet/kubeletconfig/checkpoint:go_default_library", @@ -32,6 +33,7 @@ go_library( "fsstore.go", "store.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store", deps = [ "//pkg/kubelet/kubeletconfig/checkpoint:go_default_library", "//pkg/kubelet/kubeletconfig/util/files:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store/fsstore.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store/fsstore.go index a56c4ec1659c..1026b97a842e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store/fsstore.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store/fsstore.go @@ -56,10 +56,7 @@ func (s *fsStore) Initialize() error { if err := utilfiles.EnsureFile(s.fs, filepath.Join(s.checkpointsDir, curFile)); err != nil { return err } - if err := utilfiles.EnsureFile(s.fs, filepath.Join(s.checkpointsDir, lkgFile)); err != nil { - return err - } - return nil + return utilfiles.EnsureFile(s.fs, filepath.Join(s.checkpointsDir, lkgFile)) } func (s *fsStore) Exists(uid string) (bool, error) { @@ -77,10 +74,7 @@ func (s *fsStore) Save(c checkpoint.Checkpoint) error { return err } // save the file - if err := utilfiles.ReplaceFile(s.fs, filepath.Join(s.checkpointsDir, c.UID()), data); err != nil { - return err - } - return nil + return utilfiles.ReplaceFile(s.fs, filepath.Join(s.checkpointsDir, c.UID()), data) } func (s *fsStore) Load(uid string) (checkpoint.Checkpoint, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles/BUILD index 90ea3d176b35..939e728f0122 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["configfiles.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles", deps = [ "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library", @@ -34,6 +35,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["configfiles_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles", library = ":go_default_library", deps = [ "//pkg/kubelet/apis/kubeletconfig:go_default_library", @@ -42,7 +44,6 @@ go_test( "//pkg/kubelet/kubeletconfig/util/files:go_default_library", "//pkg/kubelet/kubeletconfig/util/test:go_default_library", "//pkg/util/filesystem:go_default_library", - "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles/configfiles.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles/configfiles.go index b3c763c162c8..fe55e0b938f7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles/configfiles.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles/configfiles.go @@ -72,5 +72,23 @@ func (loader *fsLoader) Load() (*kubeletconfig.KubeletConfiguration, error) { return nil, fmt.Errorf("init config file %q was empty, but some parameters are required", path) } - return utilcodec.DecodeKubeletConfiguration(loader.kubeletCodecs, data) + kc, err := utilcodec.DecodeKubeletConfiguration(loader.kubeletCodecs, data) + if err != nil { + return nil, err + } + + // make all paths absolute + resolveRelativePaths(kubeletconfig.KubeletConfigurationPathRefs(kc), loader.configDir) + return kc, nil +} + +// resolveRelativePaths makes relative paths absolute by resolving them against `root` +func resolveRelativePaths(paths []*string, root string) { + for _, path := range paths { + // leave empty paths alone, "no path" is a valid input + // do not attempt to resolve paths that are already absolute + if len(*path) > 0 && !filepath.IsAbs(*path) { + *path = filepath.Join(root, *path) + } + } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/controller.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/controller.go index 036b41e81461..6ff4cd6b2840 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/controller.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/controller.go @@ -47,7 +47,7 @@ const ( // - downloads new configuration from the API server // - validates configuration // - tracks the last-known-good configuration, and rolls-back to last-known-good when necessary -// For more information, see the proposal: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/dynamic-kubelet-configuration.md +// For more information, see the proposal: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/dynamic-kubelet-configuration.md type Controller struct { // dynamicConfig, if true, indicates that we should sync config from the API server dynamicConfig bool @@ -248,10 +248,7 @@ func (cc *Controller) StartSync(client clientset.Interface, nodeName string) { func (cc *Controller) initialize() error { utillog.Infof("ensuring filesystem is set up correctly") // initialize local checkpoint storage location - if err := cc.checkpointStore.Initialize(); err != nil { - return err - } - return nil + return cc.checkpointStore.Initialize() } // localConfig returns the initConfig if it is loaded, otherwise returns the defaultConfig. diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status/BUILD index 0226c794b54f..6de1a105e308 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status/BUILD @@ -8,8 +8,10 @@ load( go_library( name = "go_default_library", srcs = ["status.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/kubelet/kubeletconfig/util/equal:go_default_library", "//pkg/kubelet/kubeletconfig/util/log:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status/status.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status/status.go index 22d0d1865ce0..b83e6891d78f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status/status.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status/status.go @@ -26,7 +26,8 @@ import ( kuberuntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/strategicpatch" clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" utilequal "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/equal" utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log" ) @@ -257,18 +258,18 @@ func (c *configOKCondition) Sync(client clientset.Interface, nodeName string) { // generate the patch mediaType := "application/json" - info, ok := kuberuntime.SerializerInfoForMediaType(api.Codecs.SupportedMediaTypes(), mediaType) + info, ok := kuberuntime.SerializerInfoForMediaType(legacyscheme.Codecs.SupportedMediaTypes(), mediaType) if !ok { err = fmt.Errorf("unsupported media type %q", mediaType) return } - versions := api.Registry.EnabledVersionsForGroup(api.GroupName) + versions := legacyscheme.Registry.EnabledVersionsForGroup(api.GroupName) if len(versions) == 0 { err = fmt.Errorf("no enabled versions for group %q", api.GroupName) return } // the "best" version supposedly comes first in the list returned from apiv1.Registry.EnabledVersionsForGroup - encoder := api.Codecs.EncoderForVersion(info.Serializer, versions[0]) + encoder := legacyscheme.Codecs.EncoderForVersion(info.Serializer, versions[0]) before, err := kuberuntime.Encode(encoder, node) if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec/BUILD index df4916b26c87..50b123e7a3a2 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec/BUILD @@ -8,9 +8,10 @@ load( go_library( name = "go_default_library", srcs = ["codec.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core/install:go_default_library", "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec/codec.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec/codec.go index 148967caba83..3316f955af20 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec/codec.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec/codec.go @@ -20,11 +20,11 @@ import ( "fmt" // ensure the core apis are installed - _ "k8s.io/kubernetes/pkg/api/install" + _ "k8s.io/kubernetes/pkg/apis/core/install" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" ) @@ -34,18 +34,18 @@ import ( func NewJSONEncoder(groupName string) (runtime.Encoder, error) { // encode to json mediaType := "application/json" - info, ok := runtime.SerializerInfoForMediaType(api.Codecs.SupportedMediaTypes(), mediaType) + info, ok := runtime.SerializerInfoForMediaType(legacyscheme.Codecs.SupportedMediaTypes(), mediaType) if !ok { return nil, fmt.Errorf("unsupported media type %q", mediaType) } - versions := api.Registry.EnabledVersionsForGroup(groupName) + versions := legacyscheme.Registry.EnabledVersionsForGroup(groupName) if len(versions) == 0 { return nil, fmt.Errorf("no enabled versions for group %q", groupName) } - // the "best" version supposedly comes first in the list returned from api.Registry.EnabledVersionsForGroup - return api.Codecs.EncoderForVersion(info.Serializer, versions[0]), nil + // the "best" version supposedly comes first in the list returned from legacyscheme.Registry.EnabledVersionsForGroup + return legacyscheme.Codecs.EncoderForVersion(info.Serializer, versions[0]), nil } // DecodeKubeletConfiguration decodes a serialized KubeletConfiguration to the internal type diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/equal/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/equal/BUILD index aa0c8e0402e1..8add657cda52 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/equal/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/equal/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["equal.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/equal", deps = ["//vendor/k8s.io/api/core/v1:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files/BUILD index f03ed24586ce..ae93b6f99e3e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["files.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files", deps = ["//pkg/util/filesystem:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files/files.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files/files.go index 62d96fa5ad5e..aa76e151d234 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files/files.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files/files.go @@ -63,39 +63,53 @@ func EnsureFile(fs utilfs.Filesystem, path string) error { return file.Close() } -// ReplaceFile replaces the contents of the file at `path` with `data` by writing to a tmp file in the same -// dir as `path` and renaming the tmp file over `path`. The file does not have to exist to use ReplaceFile. -func ReplaceFile(fs utilfs.Filesystem, path string, data []byte) error { +// WriteTmpFile creates a temporary file at `path`, writes `data` into it, and fsyncs the file +func WriteTmpFile(fs utilfs.Filesystem, path string, data []byte) (tmpPath string, retErr error) { dir := filepath.Dir(path) prefix := filepath.Base(path) // create the tmp file tmpFile, err := fs.TempFile(dir, prefix) if err != nil { - return err + return "", err } + defer func() { + // close the file, return the close error only if there haven't been any other errors + if err := tmpFile.Close(); retErr == nil { + retErr = err + } + // if there was an error writing, syncing, or closing, delete the temporary file and return the error + if retErr != nil { + if err := fs.Remove(tmpPath); err != nil { + retErr = fmt.Errorf("attempted to remove temporary file %q after error %v, but failed due to error: %v", path, retErr, err) + } + tmpPath = "" + } + }() // Name() will be an absolute path when using utilfs.DefaultFS, because ioutil.TempFile passes // an absolute path to os.Open, and we ensure similar behavior in utilfs.FakeFS for testing. - tmpPath := tmpFile.Name() + tmpPath = tmpFile.Name() // write data if _, err := tmpFile.Write(data); err != nil { - return err + return tmpPath, err } // sync file, to ensure it's written in case a hard reset happens - if err := tmpFile.Sync(); err != nil { - return err - } - if err := tmpFile.Close(); err != nil { - return err - } + return tmpPath, tmpFile.Sync() +} - // rename over existing file - if err := fs.Rename(tmpPath, path); err != nil { +// ReplaceFile replaces the contents of the file at `path` with `data` by writing to a tmp file in the same +// dir as `path` and renaming the tmp file over `path`. The file does not have to exist to use ReplaceFile. +// Note ReplaceFile calls fsync. +func ReplaceFile(fs utilfs.Filesystem, path string, data []byte) error { + // write data to a temporary file + tmpPath, err := WriteTmpFile(fs, path, data) + if err != nil { return err } - return nil + // rename over existing file + return fs.Rename(tmpPath, path) } // DirExists returns true if a directory exists at `path`, false if `path` does not exist, otherwise an error @@ -121,8 +135,5 @@ func EnsureDir(fs utilfs.Filesystem, path string) error { } // Assert: dir does not exist // create the dir - if err := fs.MkdirAll(path, defaultPerm); err != nil { - return err - } - return nil + return fs.MkdirAll(path, defaultPerm) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log/BUILD index 39e96f84c3bd..34b20a27e345 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["log.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log", deps = ["//vendor/github.com/golang/glog:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic/BUILD index eb72800ec04f..818059ac632f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["panic.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic", deps = ["//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/watch.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/watch.go index 2c0363c7cabc..ecc255b962f6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/watch.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubeletconfig/watch.go @@ -47,12 +47,12 @@ func newSharedNodeInformer(client clientset.Interface, nodeName string, lw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (kuberuntime.Object, error) { - return client.Core().Nodes().List(metav1.ListOptions{ + return client.CoreV1().Nodes().List(metav1.ListOptions{ FieldSelector: fieldselector.String(), }) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return client.Core().Nodes().Watch(metav1.ListOptions{ + return client.CoreV1().Nodes().Watch(metav1.ListOptions{ FieldSelector: fieldselector.String(), ResourceVersion: options.ResourceVersion, }) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD index a2599346fc91..e46a4dcc4121 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD @@ -23,9 +23,10 @@ go_library( "legacy.go", "security_context.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/kuberuntime", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/v1/helper:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/credentialprovider:go_default_library", "//pkg/kubelet/apis/cri:go_default_library", "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", @@ -33,6 +34,7 @@ go_library( "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/events:go_default_library", "//pkg/kubelet/images:go_default_library", + "//pkg/kubelet/kuberuntime/logs:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/prober/results:go_default_library", @@ -47,8 +49,6 @@ go_library( "//pkg/util/tail:go_default_library", "//pkg/util/version:go_default_library", "//vendor/github.com/armon/circbuf:go_default_library", - "//vendor/github.com/docker/docker/pkg/jsonlog:go_default_library", - "//vendor/github.com/fsnotify/fsnotify:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", @@ -72,13 +72,13 @@ go_test( "kuberuntime_container_test.go", "kuberuntime_gc_test.go", "kuberuntime_image_test.go", - "kuberuntime_logs_test.go", "kuberuntime_manager_test.go", "kuberuntime_sandbox_test.go", "labels_test.go", "legacy_test.go", "security_context_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/kuberuntime", library = ":go_default_library", deps = [ "//pkg/credentialprovider:go_default_library", @@ -111,6 +111,9 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//pkg/kubelet/kuberuntime/logs:all-srcs", + ], tags = ["automanaged"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go index 94571806f0a8..a630588e2e9a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go @@ -33,6 +33,10 @@ import ( proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results" ) +const ( + fakeSeccompProfileRoot = "/fakeSeccompProfileRoot" +) + type fakeHTTP struct { url string err error @@ -43,18 +47,25 @@ func (f *fakeHTTP) Get(url string) (*http.Response, error) { return nil, f.err } -type fakePodDeletionProvider struct { - pods map[types.UID]struct{} +type fakePodStateProvider struct { + existingPods map[types.UID]struct{} + runningPods map[types.UID]struct{} } -func newFakePodDeletionProvider() *fakePodDeletionProvider { - return &fakePodDeletionProvider{ - pods: make(map[types.UID]struct{}), +func newFakePodStateProvider() *fakePodStateProvider { + return &fakePodStateProvider{ + existingPods: make(map[types.UID]struct{}), + runningPods: make(map[types.UID]struct{}), } } -func (f *fakePodDeletionProvider) IsPodDeleted(uid types.UID) bool { - _, found := f.pods[uid] +func (f *fakePodStateProvider) IsPodDeleted(uid types.UID) bool { + _, found := f.existingPods[uid] + return !found +} + +func (f *fakePodStateProvider) IsPodTerminated(uid types.UID) bool { + _, found := f.runningPods[uid] return !found } @@ -71,6 +82,7 @@ func NewFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS runtimeService: runtimeService, imageService: imageService, keyring: keyring, + seccompProfileRoot: fakeSeccompProfileRoot, internalLifecycle: cm.NewFakeInternalContainerLifecycle(), } @@ -79,7 +91,7 @@ func NewFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS return nil, err } - kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, newFakePodDeletionProvider(), kubeRuntimeManager) + kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, newFakePodStateProvider(), kubeRuntimeManager) kubeRuntimeManager.runtimeName = typedVersion.RuntimeName kubeRuntimeManager.imagePuller = images.NewImageManager( kubecontainer.FilterEventRecorder(recorder), diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go index 2ecc2e81ed49..d1ee8de3f982 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go @@ -25,7 +25,7 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) @@ -41,11 +41,6 @@ const ( minQuotaPeriod = 1000 ) -var ( - // The default dns opt strings - defaultDNSOptions = []string{"ndots:5"} -) - type podsByID []*kubecontainer.Pod func (b podsByID) Len() int { return len(b) } @@ -278,7 +273,7 @@ func (m *kubeGenericRuntimeManager) getSeccompProfileFromAnnotations(annotations if strings.HasPrefix(profile, "localhost/") { name := strings.TrimPrefix(profile, "localhost/") fname := filepath.Join(m.seccompProfileRoot, filepath.FromSlash(name)) - return fname + return "localhost/" + fname } return profile diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go index a673c8ceedb9..de0b6fdd7f38 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -89,6 +89,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb // Step 1: pull the image. imageRef, msg, err := m.imagePuller.EnsureImageExists(pod, container, pullSecrets) if err != nil { + m.recordContainerEvent(pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", grpc.ErrorDesc(err)) return msg, err } @@ -173,7 +174,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb // generateContainerConfig generates container config for kubelet runtime v1. func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string) (*runtimeapi.ContainerConfig, error) { - opts, _, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP) + opts, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP) if err != nil { return nil, err } @@ -182,13 +183,10 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai if err != nil { return nil, err } - if uid != nil { - // Verify RunAsNonRoot. Non-root verification only supports numeric user. - if err := verifyRunAsNonRoot(pod, container, *uid); err != nil { - return nil, err - } - } else if username != "" { - glog.Warningf("Non-root verification doesn't support non-numeric user (%s)", username) + + // Verify RunAsNonRoot. Non-root verification only supports numeric user. + if err := verifyRunAsNonRoot(pod, container, uid, username); err != nil { + return nil, err } command, args := kubecontainer.ExpandContainerCommandAndArgs(container, opts.Envs) @@ -368,20 +366,22 @@ func makeUID() string { // getTerminationMessage looks on the filesystem for the provided termination message path, returning a limited // amount of those bytes, or returns true if the logs should be checked. func getTerminationMessage(status *runtimeapi.ContainerStatus, terminationMessagePath string, fallbackToLogs bool) (string, bool) { - if len(terminationMessagePath) != 0 { - for _, mount := range status.Mounts { - if mount.ContainerPath != terminationMessagePath { - continue - } - path := mount.HostPath - data, _, err := tail.ReadAtMost(path, kubecontainer.MaxContainerTerminationMessageLength) - if err != nil { - return fmt.Sprintf("Error on reading termination log %s: %v", path, err), false - } - if !fallbackToLogs || len(data) != 0 { - return string(data), false + if len(terminationMessagePath) == 0 { + return "", fallbackToLogs + } + for _, mount := range status.Mounts { + if mount.ContainerPath != terminationMessagePath { + continue + } + path := mount.HostPath + data, _, err := tail.ReadAtMost(path, kubecontainer.MaxContainerTerminationMessageLength) + if err != nil { + if os.IsNotExist(err) { + return "", fallbackToLogs } + return fmt.Sprintf("Error on reading termination log %s: %v", path, err), false } + return string(data), (fallbackToLogs && len(data) == 0) } return "", fallbackToLogs } @@ -424,8 +424,16 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n fallbackToLogs := annotatedInfo.TerminationMessagePolicy == v1.TerminationMessageFallbackToLogsOnError && cStatus.ExitCode != 0 tMessage, checkLogs := getTerminationMessage(status, annotatedInfo.TerminationMessagePath, fallbackToLogs) if checkLogs { - path := buildFullContainerLogsPath(uid, labeledInfo.ContainerName, annotatedInfo.RestartCount) - tMessage = m.readLastStringFromContainerLogs(path) + // if dockerLegacyService is populated, we're supposed to use it to fetch logs + if m.legacyLogProvider != nil { + tMessage, err = m.legacyLogProvider.GetContainerLogTail(uid, name, namespace, kubecontainer.ContainerID{Type: m.runtimeName, ID: c.Id}) + if err != nil { + tMessage = fmt.Sprintf("Error reading termination message from logs: %v", err) + } + } else { + path := buildFullContainerLogsPath(uid, labeledInfo.ContainerName, annotatedInfo.RestartCount) + tMessage = m.readLastStringFromContainerLogs(path) + } } // Use the termination message written by the application is not empty if len(tMessage) != 0 { @@ -765,6 +773,8 @@ func (m *kubeGenericRuntimeManager) GetExec(id kubecontainer.ContainerID, cmd [] Cmd: cmd, Tty: tty, Stdin: stdin, + Stdout: stdout, + Stderr: stderr, } resp, err := m.runtimeService.Exec(req) if err != nil { @@ -779,6 +789,8 @@ func (m *kubeGenericRuntimeManager) GetAttach(id kubecontainer.ContainerID, stdi req := &runtimeapi.AttachRequest{ ContainerId: id.ID, Stdin: stdin, + Stdout: stdout, + Stderr: stderr, Tty: tty, } resp, err := m.runtimeService.Attach(req) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_gc.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_gc.go index 063a89d6b310..4bf35638c398 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_gc.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_gc.go @@ -25,6 +25,7 @@ import ( "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -32,17 +33,17 @@ import ( // containerGC is the manager of garbage collection. type containerGC struct { - client internalapi.RuntimeService - manager *kubeGenericRuntimeManager - podDeletionProvider podDeletionProvider + client internalapi.RuntimeService + manager *kubeGenericRuntimeManager + podStateProvider podStateProvider } // NewContainerGC creates a new containerGC. -func NewContainerGC(client internalapi.RuntimeService, podDeletionProvider podDeletionProvider, manager *kubeGenericRuntimeManager) *containerGC { +func NewContainerGC(client internalapi.RuntimeService, podStateProvider podStateProvider, manager *kubeGenericRuntimeManager) *containerGC { return &containerGC{ - client: client, - manager: manager, - podDeletionProvider: podDeletionProvider, + client: client, + manager: manager, + podStateProvider: podStateProvider, } } @@ -200,7 +201,7 @@ func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByE } // evict all containers that are evictable -func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error { +func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool, evictTerminatedPods bool) error { // Separate containers by evict units. evictUnits, err := cgc.evictableContainers(gcPolicy.MinAge) if err != nil { @@ -210,7 +211,7 @@ func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.ContainerGCPolicy // Remove deleted pod containers if all sources are ready. if allSourcesReady { for key, unit := range evictUnits { - if cgc.podDeletionProvider.IsPodDeleted(key.uid) || evictNonDeletedPods { + if cgc.podStateProvider.IsPodDeleted(key.uid) || (cgc.podStateProvider.IsPodTerminated(key.uid) && evictTerminatedPods) { cgc.removeOldestN(unit, len(unit)) // Remove all. delete(evictUnits, key) } @@ -252,12 +253,18 @@ func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.ContainerGCPolicy // 2. contains no containers. // 3. belong to a non-existent (i.e., already removed) pod, or is not the // most recently created sandbox for the pod. -func (cgc *containerGC) evictSandboxes(evictNonDeletedPods bool) error { +func (cgc *containerGC) evictSandboxes(evictTerminatedPods bool) error { containers, err := cgc.manager.getKubeletContainers(true) if err != nil { return err } + // collect all the PodSandboxId of container + sandboxIDs := sets.NewString() + for _, container := range containers { + sandboxIDs.Insert(container.PodSandboxId) + } + sandboxes, err := cgc.manager.getKubeletSandboxes(true) if err != nil { return err @@ -277,15 +284,7 @@ func (cgc *containerGC) evictSandboxes(evictNonDeletedPods bool) error { } // Set sandboxes that still have containers to be active. - hasContainers := false - sandboxID := sandbox.Id - for _, container := range containers { - if container.PodSandboxId == sandboxID { - hasContainers = true - break - } - } - if hasContainers { + if sandboxIDs.Has(sandbox.Id) { sandboxInfo.active = true } @@ -298,7 +297,7 @@ func (cgc *containerGC) evictSandboxes(evictNonDeletedPods bool) error { } for podUID, sandboxes := range sandboxesByPod { - if cgc.podDeletionProvider.IsPodDeleted(podUID) || evictNonDeletedPods { + if cgc.podStateProvider.IsPodDeleted(podUID) || (cgc.podStateProvider.IsPodTerminated(podUID) && evictTerminatedPods) { // Remove all evictable sandboxes if the pod has been removed. // Note that the latest dead sandbox is also removed if there is // already an active one. @@ -324,7 +323,7 @@ func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error { for _, dir := range dirs { name := dir.Name() podUID := types.UID(name) - if !cgc.podDeletionProvider.IsPodDeleted(podUID) { + if !cgc.podStateProvider.IsPodDeleted(podUID) { continue } err := osInterface.RemoveAll(filepath.Join(podLogsRootDirectory, name)) @@ -358,14 +357,14 @@ func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error { // * removes oldest dead containers by enforcing gcPolicy.MaxContainers. // * gets evictable sandboxes which are not ready and contains no containers. // * removes evictable sandboxes. -func (cgc *containerGC) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error { +func (cgc *containerGC) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool, evictTerminatedPods bool) error { // Remove evictable containers - if err := cgc.evictContainers(gcPolicy, allSourcesReady, evictNonDeletedPods); err != nil { + if err := cgc.evictContainers(gcPolicy, allSourcesReady, evictTerminatedPods); err != nil { return err } // Remove sandboxes with zero containers - if err := cgc.evictSandboxes(evictNonDeletedPods); err != nil { + if err := cgc.evictSandboxes(evictTerminatedPods); err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go index 752e41b76e72..ee8dc4c81fe5 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go @@ -17,361 +17,19 @@ limitations under the License. package kuberuntime import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" "io" - "math" - "os" "time" - "github.com/docker/docker/pkg/jsonlog" - "github.com/fsnotify/fsnotify" - "github.com/golang/glog" - "k8s.io/api/core/v1" - runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" - "k8s.io/kubernetes/pkg/util/tail" + "k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs" ) -// Notice that the current kuberuntime logs implementation doesn't handle -// log rotation. -// * It will not retrieve logs in rotated log file. -// * If log rotation happens when following the log: -// * If the rotation is using create mode, we'll still follow the old file. -// * If the rotation is using copytruncate, we'll be reading at the original position and get nothing. -// TODO(random-liu): Support log rotation. - -// streamType is the type of the stream. -type streamType string - -const ( - stderrType streamType = "stderr" - stdoutType streamType = "stdout" - - // timeFormat is the time format used in the log. - timeFormat = time.RFC3339Nano - // blockSize is the block size used in tail. - blockSize = 1024 - - // stateCheckPeriod is the period to check container state while following - // the container log. Kubelet should not keep following the log when the - // container is not running. - stateCheckPeriod = 5 * time.Second -) - -var ( - // eol is the end-of-line sign in the log. - eol = []byte{'\n'} - // delimiter is the delimiter for timestamp and streamtype in log line. - delimiter = []byte{' '} -) - -// logMessage is the internal log type. -type logMessage struct { - timestamp time.Time - stream streamType - log []byte -} - -// reset resets the log to nil. -func (l *logMessage) reset() { - l.timestamp = time.Time{} - l.stream = "" - l.log = nil -} - -// logOptions is the internal type of all log options. -type logOptions struct { - tail int64 - bytes int64 - since time.Time - follow bool - timestamp bool -} - -// newLogOptions convert the v1.PodLogOptions to internal logOptions. -func newLogOptions(apiOpts *v1.PodLogOptions, now time.Time) *logOptions { - opts := &logOptions{ - tail: -1, // -1 by default which means read all logs. - bytes: -1, // -1 by default which means read all logs. - follow: apiOpts.Follow, - timestamp: apiOpts.Timestamps, - } - if apiOpts.TailLines != nil { - opts.tail = *apiOpts.TailLines - } - if apiOpts.LimitBytes != nil { - opts.bytes = *apiOpts.LimitBytes - } - if apiOpts.SinceSeconds != nil { - opts.since = now.Add(-time.Duration(*apiOpts.SinceSeconds) * time.Second) - } - if apiOpts.SinceTime != nil && apiOpts.SinceTime.After(opts.since) { - opts.since = apiOpts.SinceTime.Time - } - return opts -} - // ReadLogs read the container log and redirect into stdout and stderr. // Note that containerID is only needed when following the log, or else // just pass in empty string "". func (m *kubeGenericRuntimeManager) ReadLogs(path, containerID string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer) error { - f, err := os.Open(path) - if err != nil { - return fmt.Errorf("failed to open log file %q: %v", path, err) - } - defer f.Close() - // Convert v1.PodLogOptions into internal log options. - opts := newLogOptions(apiOpts, time.Now()) - - // Search start point based on tail line. - start, err := tail.FindTailLineStartIndex(f, opts.tail) - if err != nil { - return fmt.Errorf("failed to tail %d lines of log file %q: %v", opts.tail, path, err) - } - if _, err := f.Seek(start, os.SEEK_SET); err != nil { - return fmt.Errorf("failed to seek %d in log file %q: %v", start, path, err) - } - - // Start parsing the logs. - r := bufio.NewReader(f) - // Do not create watcher here because it is not needed if `Follow` is false. - var watcher *fsnotify.Watcher - var parse parseFunc - writer := newLogWriter(stdout, stderr, opts) - msg := &logMessage{} - for { - l, err := r.ReadBytes(eol[0]) - if err != nil { - if err != io.EOF { // This is an real error - return fmt.Errorf("failed to read log file %q: %v", path, err) - } - if !opts.follow { - // Return directly when reading to the end if not follow. - if len(l) > 0 { - glog.Warningf("Incomplete line in log file %q: %q", path, l) - } - glog.V(2).Infof("Finish parsing log file %q", path) - return nil - } - // Reset seek so that if this is an incomplete line, - // it will be read again. - if _, err := f.Seek(-int64(len(l)), os.SEEK_CUR); err != nil { - return fmt.Errorf("failed to reset seek in log file %q: %v", path, err) - } - if watcher == nil { - // Intialize the watcher if it has not been initialized yet. - if watcher, err = fsnotify.NewWatcher(); err != nil { - return fmt.Errorf("failed to create fsnotify watcher: %v", err) - } - defer watcher.Close() - if err := watcher.Add(f.Name()); err != nil { - return fmt.Errorf("failed to watch file %q: %v", f.Name(), err) - } - } - // Wait until the next log change. - if found, err := m.waitLogs(containerID, watcher); !found { - return err - } - continue - } - if parse == nil { - // Intialize the log parsing function. - parse, err = getParseFunc(l) - if err != nil { - return fmt.Errorf("failed to get parse function: %v", err) - } - } - // Parse the log line. - msg.reset() - if err := parse(l, msg); err != nil { - glog.Errorf("Failed with err %v when parsing log for log file %q: %q", err, path, l) - continue - } - // Write the log line into the stream. - if err := writer.write(msg); err != nil { - if err == errMaximumWrite { - glog.V(2).Infof("Finish parsing log file %q, hit bytes limit %d(bytes)", path, opts.bytes) - return nil - } - glog.Errorf("Failed with err %v when writing log for log file %q: %+v", err, path, msg) - return err - } - } -} - -// waitLogs wait for the next log write. It returns a boolean and an error. The boolean -// indicates whether a new log is found; the error is error happens during waiting new logs. -func (m *kubeGenericRuntimeManager) waitLogs(id string, w *fsnotify.Watcher) (bool, error) { - errRetry := 5 - for { - select { - case e := <-w.Events: - switch e.Op { - case fsnotify.Write: - return true, nil - default: - glog.Errorf("Unexpected fsnotify event: %v, retrying...", e) - } - case err := <-w.Errors: - glog.Errorf("Fsnotify watch error: %v, %d error retries remaining", err, errRetry) - if errRetry == 0 { - return false, err - } - errRetry-- - case <-time.After(stateCheckPeriod): - s, err := m.runtimeService.ContainerStatus(id) - if err != nil { - return false, err - } - // Only keep following container log when it is running. - if s.State != runtimeapi.ContainerState_CONTAINER_RUNNING { - glog.Errorf("Container %q is not running (state=%q)", id, s.State) - // Do not return error because it's normal that the container stops - // during waiting. - return false, nil - } - } - } -} - -// parseFunc is a function parsing one log line to the internal log type. -// Notice that the caller must make sure logMessage is not nil. -type parseFunc func([]byte, *logMessage) error - -var parseFuncs []parseFunc = []parseFunc{ - parseCRILog, // CRI log format parse function - parseDockerJSONLog, // Docker JSON log format parse function -} - -// parseCRILog parses logs in CRI log format. CRI Log format example: -// 2016-10-06T00:17:09.669794202Z stdout log content 1 -// 2016-10-06T00:17:09.669794203Z stderr log content 2 -func parseCRILog(log []byte, msg *logMessage) error { - var err error - // Parse timestamp - idx := bytes.Index(log, delimiter) - if idx < 0 { - return fmt.Errorf("timestamp is not found") - } - msg.timestamp, err = time.Parse(timeFormat, string(log[:idx])) - if err != nil { - return fmt.Errorf("unexpected timestamp format %q: %v", timeFormat, err) - } - - // Parse stream type - log = log[idx+1:] - idx = bytes.Index(log, delimiter) - if idx < 0 { - return fmt.Errorf("stream type is not found") - } - msg.stream = streamType(log[:idx]) - if msg.stream != stdoutType && msg.stream != stderrType { - return fmt.Errorf("unexpected stream type %q", msg.stream) - } - - // Get log content - msg.log = log[idx+1:] - - return nil -} - -// parseDockerJSONLog parses logs in Docker JSON log format. Docker JSON log format -// example: -// {"log":"content 1","stream":"stdout","time":"2016-10-20T18:39:20.57606443Z"} -// {"log":"content 2","stream":"stderr","time":"2016-10-20T18:39:20.57606444Z"} -func parseDockerJSONLog(log []byte, msg *logMessage) error { - var l = &jsonlog.JSONLog{} - l.Reset() - - // TODO: JSON decoding is fairly expensive, we should evaluate this. - if err := json.Unmarshal(log, l); err != nil { - return fmt.Errorf("failed with %v to unmarshal log %q", err, l) - } - msg.timestamp = l.Created - msg.stream = streamType(l.Stream) - msg.log = []byte(l.Log) - return nil -} - -// getParseFunc returns proper parse function based on the sample log line passed in. -func getParseFunc(log []byte) (parseFunc, error) { - for _, p := range parseFuncs { - if err := p(log, &logMessage{}); err == nil { - return p, nil - } - } - return nil, fmt.Errorf("unsupported log format: %q", log) -} - -// logWriter controls the writing into the stream based on the log options. -type logWriter struct { - stdout io.Writer - stderr io.Writer - opts *logOptions - remain int64 -} - -// errMaximumWrite is returned when all bytes have been written. -var errMaximumWrite = errors.New("maximum write") - -// errShortWrite is returned when the message is not fully written. -var errShortWrite = errors.New("short write") - -func newLogWriter(stdout io.Writer, stderr io.Writer, opts *logOptions) *logWriter { - w := &logWriter{ - stdout: stdout, - stderr: stderr, - opts: opts, - remain: math.MaxInt64, // initialize it as infinity - } - if opts.bytes >= 0 { - w.remain = opts.bytes - } - return w -} + opts := logs.NewLogOptions(apiOpts, time.Now()) -// writeLogs writes logs into stdout, stderr. -func (w *logWriter) write(msg *logMessage) error { - if msg.timestamp.Before(w.opts.since) { - // Skip the line because it's older than since - return nil - } - line := msg.log - if w.opts.timestamp { - prefix := append([]byte(msg.timestamp.Format(timeFormat)), delimiter[0]) - line = append(prefix, line...) - } - // If the line is longer than the remaining bytes, cut it. - if int64(len(line)) > w.remain { - line = line[:w.remain] - } - // Get the proper stream to write to. - var stream io.Writer - switch msg.stream { - case stdoutType: - stream = w.stdout - case stderrType: - stream = w.stderr - default: - return fmt.Errorf("unexpected stream type %q", msg.stream) - } - n, err := stream.Write(line) - w.remain -= int64(n) - if err != nil { - return err - } - // If the line has not been fully written, return errShortWrite - if n < len(line) { - return errShortWrite - } - // If there are no more bytes left, return errMaximumWrite - if w.remain <= 0 { - return errMaximumWrite - } - return nil + return logs.ReadLogs(path, containerID, opts, m.runtimeService, stdout, stderr) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go index 9da4737359b2..06d91b7540ba 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -32,7 +32,7 @@ import ( "k8s.io/client-go/tools/record" ref "k8s.io/client-go/tools/reference" "k8s.io/client-go/util/flowcontrol" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/credentialprovider" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" @@ -65,9 +65,10 @@ var ( ErrVersionNotSupported = errors.New("Runtime api version is not supported") ) -// podDeletionProvider can determine if a pod is deleted -type podDeletionProvider interface { +// podStateProvider can determine if a pod is deleted ir terminated +type podStateProvider interface { IsPodDeleted(kubetypes.UID) bool + IsPodTerminated(kubetypes.UID) bool } type kubeGenericRuntimeManager struct { @@ -112,6 +113,9 @@ type kubeGenericRuntimeManager struct { // Internal lifecycle event handlers for container resource management. internalLifecycle cm.InternalContainerLifecycle + + // A shim to legacy functions for backward compatibility. + legacyLogProvider LegacyLogProvider } type KubeGenericRuntime interface { @@ -120,6 +124,12 @@ type KubeGenericRuntime interface { kubecontainer.ContainerCommandRunner } +// LegacyLogProvider gives the ability to use unsupported docker log drivers (e.g. journald) +type LegacyLogProvider interface { + // Get the last few lines of the logs for a specific container. + GetContainerLogTail(uid kubetypes.UID, name, namespace string, containerID kubecontainer.ContainerID) (string, error) +} + // NewKubeGenericRuntimeManager creates a new kubeGenericRuntimeManager func NewKubeGenericRuntimeManager( recorder record.EventRecorder, @@ -127,7 +137,7 @@ func NewKubeGenericRuntimeManager( seccompProfileRoot string, containerRefManager *kubecontainer.RefManager, machineInfo *cadvisorapi.MachineInfo, - podDeletionProvider podDeletionProvider, + podStateProvider podStateProvider, osInterface kubecontainer.OSInterface, runtimeHelper kubecontainer.RuntimeHelper, httpClient types.HttpGetter, @@ -139,6 +149,7 @@ func NewKubeGenericRuntimeManager( runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, internalLifecycle cm.InternalContainerLifecycle, + legacyLogProvider LegacyLogProvider, ) (KubeGenericRuntime, error) { kubeRuntimeManager := &kubeGenericRuntimeManager{ recorder: recorder, @@ -153,6 +164,7 @@ func NewKubeGenericRuntimeManager( imageService: newInstrumentedImageManagerService(imageService), keyring: credentialprovider.NewDockerKeyring(), internalLifecycle: internalLifecycle, + legacyLogProvider: legacyLogProvider, } typedVersion, err := kubeRuntimeManager.runtimeService.Version(kubeRuntimeAPIVersion) @@ -193,7 +205,7 @@ func NewKubeGenericRuntimeManager( imagePullQPS, imagePullBurst) kubeRuntimeManager.runner = lifecycle.NewHandlerRunner(httpClient, kubeRuntimeManager, kubeRuntimeManager) - kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, podDeletionProvider, kubeRuntimeManager) + kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, podStateProvider, kubeRuntimeManager) kubeRuntimeManager.versionCache = cache.NewObjectCache( func() (interface{}, error) { @@ -211,7 +223,10 @@ func (m *kubeGenericRuntimeManager) Type() string { } func newRuntimeVersion(version string) (*utilversion.Version, error) { - return utilversion.ParseSemantic(version) + if ver, err := utilversion.ParseSemantic(version); err == nil { + return ver, err + } + return utilversion.ParseGeneric(version) } func (m *kubeGenericRuntimeManager) getTypedVersion() (*runtimeapi.VersionResponse, error) { @@ -555,7 +570,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat podContainerChanges := m.computePodActions(pod, podStatus) glog.V(3).Infof("computePodActions got %+v for pod %q", podContainerChanges, format.Pod(pod)) if podContainerChanges.CreateSandbox { - ref, err := ref.GetReference(api.Scheme, pod) + ref, err := ref.GetReference(legacyscheme.Scheme, pod) if err != nil { glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err) } @@ -630,7 +645,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat if err != nil { createSandboxResult.Fail(kubecontainer.ErrCreatePodSandbox, msg) glog.Errorf("createPodSandbox for pod %q failed: %v", format.Pod(pod), err) - ref, err := ref.GetReference(api.Scheme, pod) + ref, err := ref.GetReference(legacyscheme.Scheme, pod) if err != nil { glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err) } @@ -641,6 +656,11 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID) if err != nil { + ref, err := ref.GetReference(legacyscheme.Scheme, pod) + if err != nil { + glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err) + } + m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedStatusPodSandBox, "Unable to get pod sandbox status: %v", err) glog.Errorf("Failed to get pod sandbox status: %v; Skipping pod %q", err, format.Pod(pod)) result.Fail(err) return diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go index 670181b31992..2773270e2fae 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go @@ -74,17 +74,11 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attemp Annotations: newPodAnnotations(pod), } - dnsServers, dnsSearches, useClusterFirstPolicy, err := m.runtimeHelper.GetClusterDNS(pod) + dnsConfig, err := m.runtimeHelper.GetPodDNS(pod) if err != nil { return nil, err } - podSandboxConfig.DnsConfig = &runtimeapi.DNSConfig{ - Servers: dnsServers, - Searches: dnsSearches, - } - if useClusterFirstPolicy { - podSandboxConfig.DnsConfig.Options = defaultDNSOptions - } + podSandboxConfig.DnsConfig = dnsConfig if !kubecontainer.IsHostNetworkPod(pod) { // TODO: Add domain support in new runtime interface diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/BUILD new file mode 100644 index 000000000000..211a0b2c0fed --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/BUILD @@ -0,0 +1,44 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["logs.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs", + visibility = ["//visibility:public"], + deps = [ + "//pkg/kubelet/apis/cri:go_default_library", + "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", + "//pkg/util/tail:go_default_library", + "//vendor/github.com/docker/docker/pkg/jsonlog:go_default_library", + "//vendor/github.com/fsnotify/fsnotify:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["logs_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs", + library = ":go_default_library", + deps = [ + "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/logs.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/logs.go new file mode 100644 index 000000000000..e029d40d6c0b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/logs.go @@ -0,0 +1,390 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "os" + "time" + + "github.com/docker/docker/pkg/jsonlog" + "github.com/fsnotify/fsnotify" + "github.com/golang/glog" + + "k8s.io/api/core/v1" + internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" + "k8s.io/kubernetes/pkg/util/tail" +) + +// Notice that the current CRI logs implementation doesn't handle +// log rotation. +// * It will not retrieve logs in rotated log file. +// * If log rotation happens when following the log: +// * If the rotation is using create mode, we'll still follow the old file. +// * If the rotation is using copytruncate, we'll be reading at the original position and get nothing. +// TODO(random-liu): Support log rotation. + +const ( + // timeFormat is the time format used in the log. + timeFormat = time.RFC3339Nano + // blockSize is the block size used in tail. + blockSize = 1024 + + // stateCheckPeriod is the period to check container state while following + // the container log. Kubelet should not keep following the log when the + // container is not running. + stateCheckPeriod = 5 * time.Second +) + +var ( + // eol is the end-of-line sign in the log. + eol = []byte{'\n'} + // delimiter is the delimiter for timestamp and stream type in log line. + delimiter = []byte{' '} + // tagDelimiter is the delimiter for log tags. + tagDelimiter = []byte(runtimeapi.LogTagDelimiter) +) + +// logMessage is the CRI internal log type. +type logMessage struct { + timestamp time.Time + stream runtimeapi.LogStreamType + log []byte +} + +// reset resets the log to nil. +func (l *logMessage) reset() { + l.timestamp = time.Time{} + l.stream = "" + l.log = nil +} + +// LogOptions is the CRI internal type of all log options. +type LogOptions struct { + tail int64 + bytes int64 + since time.Time + follow bool + timestamp bool +} + +// NewLogOptions convert the v1.PodLogOptions to CRI internal LogOptions. +func NewLogOptions(apiOpts *v1.PodLogOptions, now time.Time) *LogOptions { + opts := &LogOptions{ + tail: -1, // -1 by default which means read all logs. + bytes: -1, // -1 by default which means read all logs. + follow: apiOpts.Follow, + timestamp: apiOpts.Timestamps, + } + if apiOpts.TailLines != nil { + opts.tail = *apiOpts.TailLines + } + if apiOpts.LimitBytes != nil { + opts.bytes = *apiOpts.LimitBytes + } + if apiOpts.SinceSeconds != nil { + opts.since = now.Add(-time.Duration(*apiOpts.SinceSeconds) * time.Second) + } + if apiOpts.SinceTime != nil && apiOpts.SinceTime.After(opts.since) { + opts.since = apiOpts.SinceTime.Time + } + return opts +} + +// parseFunc is a function parsing one log line to the internal log type. +// Notice that the caller must make sure logMessage is not nil. +type parseFunc func([]byte, *logMessage) error + +var parseFuncs = []parseFunc{ + parseCRILog, // CRI log format parse function + parseDockerJSONLog, // Docker JSON log format parse function +} + +// parseCRILog parses logs in CRI log format. CRI Log format example: +// 2016-10-06T00:17:09.669794202Z stdout P log content 1 +// 2016-10-06T00:17:09.669794203Z stderr F log content 2 +func parseCRILog(log []byte, msg *logMessage) error { + var err error + // Parse timestamp + idx := bytes.Index(log, delimiter) + if idx < 0 { + return fmt.Errorf("timestamp is not found") + } + msg.timestamp, err = time.Parse(timeFormat, string(log[:idx])) + if err != nil { + return fmt.Errorf("unexpected timestamp format %q: %v", timeFormat, err) + } + + // Parse stream type + log = log[idx+1:] + idx = bytes.Index(log, delimiter) + if idx < 0 { + return fmt.Errorf("stream type is not found") + } + msg.stream = runtimeapi.LogStreamType(log[:idx]) + if msg.stream != runtimeapi.Stdout && msg.stream != runtimeapi.Stderr { + return fmt.Errorf("unexpected stream type %q", msg.stream) + } + + // Parse log tag + log = log[idx+1:] + idx = bytes.Index(log, delimiter) + if idx < 0 { + return fmt.Errorf("log tag is not found") + } + // Keep this forward compatible. + tags := bytes.Split(log[:idx], tagDelimiter) + partial := (runtimeapi.LogTag(tags[0]) == runtimeapi.LogTagPartial) + // Trim the tailing new line if this is a partial line. + if partial && len(log) > 0 && log[len(log)-1] == '\n' { + log = log[:len(log)-1] + } + + // Get log content + msg.log = log[idx+1:] + + return nil +} + +// parseDockerJSONLog parses logs in Docker JSON log format. Docker JSON log format +// example: +// {"log":"content 1","stream":"stdout","time":"2016-10-20T18:39:20.57606443Z"} +// {"log":"content 2","stream":"stderr","time":"2016-10-20T18:39:20.57606444Z"} +func parseDockerJSONLog(log []byte, msg *logMessage) error { + var l = &jsonlog.JSONLog{} + l.Reset() + + // TODO: JSON decoding is fairly expensive, we should evaluate this. + if err := json.Unmarshal(log, l); err != nil { + return fmt.Errorf("failed with %v to unmarshal log %q", err, l) + } + msg.timestamp = l.Created + msg.stream = runtimeapi.LogStreamType(l.Stream) + msg.log = []byte(l.Log) + return nil +} + +// getParseFunc returns proper parse function based on the sample log line passed in. +func getParseFunc(log []byte) (parseFunc, error) { + for _, p := range parseFuncs { + if err := p(log, &logMessage{}); err == nil { + return p, nil + } + } + return nil, fmt.Errorf("unsupported log format: %q", log) +} + +// logWriter controls the writing into the stream based on the log options. +type logWriter struct { + stdout io.Writer + stderr io.Writer + opts *LogOptions + remain int64 +} + +// errMaximumWrite is returned when all bytes have been written. +var errMaximumWrite = errors.New("maximum write") + +// errShortWrite is returned when the message is not fully written. +var errShortWrite = errors.New("short write") + +func newLogWriter(stdout io.Writer, stderr io.Writer, opts *LogOptions) *logWriter { + w := &logWriter{ + stdout: stdout, + stderr: stderr, + opts: opts, + remain: math.MaxInt64, // initialize it as infinity + } + if opts.bytes >= 0 { + w.remain = opts.bytes + } + return w +} + +// writeLogs writes logs into stdout, stderr. +func (w *logWriter) write(msg *logMessage) error { + if msg.timestamp.Before(w.opts.since) { + // Skip the line because it's older than since + return nil + } + line := msg.log + if w.opts.timestamp { + prefix := append([]byte(msg.timestamp.Format(timeFormat)), delimiter[0]) + line = append(prefix, line...) + } + // If the line is longer than the remaining bytes, cut it. + if int64(len(line)) > w.remain { + line = line[:w.remain] + } + // Get the proper stream to write to. + var stream io.Writer + switch msg.stream { + case runtimeapi.Stdout: + stream = w.stdout + case runtimeapi.Stderr: + stream = w.stderr + default: + return fmt.Errorf("unexpected stream type %q", msg.stream) + } + n, err := stream.Write(line) + w.remain -= int64(n) + if err != nil { + return err + } + // If the line has not been fully written, return errShortWrite + if n < len(line) { + return errShortWrite + } + // If there are no more bytes left, return errMaximumWrite + if w.remain <= 0 { + return errMaximumWrite + } + return nil +} + +// ReadLogs read the container log and redirect into stdout and stderr. +// Note that containerID is only needed when following the log, or else +// just pass in empty string "". +func ReadLogs(path, containerID string, opts *LogOptions, runtimeService internalapi.RuntimeService, stdout, stderr io.Writer) error { + f, err := os.Open(path) + if err != nil { + return fmt.Errorf("failed to open log file %q: %v", path, err) + } + defer f.Close() + + // Search start point based on tail line. + start, err := tail.FindTailLineStartIndex(f, opts.tail) + if err != nil { + return fmt.Errorf("failed to tail %d lines of log file %q: %v", opts.tail, path, err) + } + if _, err := f.Seek(start, os.SEEK_SET); err != nil { + return fmt.Errorf("failed to seek %d in log file %q: %v", start, path, err) + } + + // Start parsing the logs. + r := bufio.NewReader(f) + // Do not create watcher here because it is not needed if `Follow` is false. + var watcher *fsnotify.Watcher + var parse parseFunc + var stop bool + writer := newLogWriter(stdout, stderr, opts) + msg := &logMessage{} + for { + if stop { + glog.V(2).Infof("Finish parsing log file %q", path) + return nil + } + l, err := r.ReadBytes(eol[0]) + if err != nil { + if err != io.EOF { // This is an real error + return fmt.Errorf("failed to read log file %q: %v", path, err) + } + if opts.follow { + // Reset seek so that if this is an incomplete line, + // it will be read again. + if _, err := f.Seek(-int64(len(l)), os.SEEK_CUR); err != nil { + return fmt.Errorf("failed to reset seek in log file %q: %v", path, err) + } + if watcher == nil { + // Intialize the watcher if it has not been initialized yet. + if watcher, err = fsnotify.NewWatcher(); err != nil { + return fmt.Errorf("failed to create fsnotify watcher: %v", err) + } + defer watcher.Close() + if err := watcher.Add(f.Name()); err != nil { + return fmt.Errorf("failed to watch file %q: %v", f.Name(), err) + } + } + // Wait until the next log change. + if found, err := waitLogs(containerID, watcher, runtimeService); !found { + return err + } + continue + } + // Should stop after writing the remaining content. + stop = true + if len(l) == 0 { + continue + } + glog.Warningf("Incomplete line in log file %q: %q", path, l) + } + if parse == nil { + // Intialize the log parsing function. + parse, err = getParseFunc(l) + if err != nil { + return fmt.Errorf("failed to get parse function: %v", err) + } + } + // Parse the log line. + msg.reset() + if err := parse(l, msg); err != nil { + glog.Errorf("Failed with err %v when parsing log for log file %q: %q", err, path, l) + continue + } + // Write the log line into the stream. + if err := writer.write(msg); err != nil { + if err == errMaximumWrite { + glog.V(2).Infof("Finish parsing log file %q, hit bytes limit %d(bytes)", path, opts.bytes) + return nil + } + glog.Errorf("Failed with err %v when writing log for log file %q: %+v", err, path, msg) + return err + } + } +} + +// waitLogs wait for the next log write. It returns a boolean and an error. The boolean +// indicates whether a new log is found; the error is error happens during waiting new logs. +func waitLogs(id string, w *fsnotify.Watcher, runtimeService internalapi.RuntimeService) (bool, error) { + errRetry := 5 + for { + select { + case e := <-w.Events: + switch e.Op { + case fsnotify.Write: + return true, nil + default: + glog.Errorf("Unexpected fsnotify event: %v, retrying...", e) + } + case err := <-w.Errors: + glog.Errorf("Fsnotify watch error: %v, %d error retries remaining", err, errRetry) + if errRetry == 0 { + return false, err + } + errRetry-- + case <-time.After(stateCheckPeriod): + s, err := runtimeService.ContainerStatus(id) + if err != nil { + return false, err + } + // Only keep following container log when it is running. + if s.State != runtimeapi.ContainerState_CONTAINER_RUNNING { + glog.Errorf("Container %q is not running (state=%q)", id, s.State) + // Do not return error because it's normal that the container stops + // during waiting. + return false, nil + } + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go index c0d4ce7347a1..3d48d372ac44 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go @@ -75,7 +75,7 @@ func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Po } // verifyRunAsNonRoot verifies RunAsNonRoot. -func verifyRunAsNonRoot(pod *v1.Pod, container *v1.Container, uid int64) error { +func verifyRunAsNonRoot(pod *v1.Pod, container *v1.Container, uid *int64, username string) error { effectiveSc := securitycontext.DetermineEffectiveSecurityContext(pod, container) // If the option is not set, or if running as root is allowed, return nil. if effectiveSc == nil || effectiveSc.RunAsNonRoot == nil || !*effectiveSc.RunAsNonRoot { @@ -89,11 +89,14 @@ func verifyRunAsNonRoot(pod *v1.Pod, container *v1.Container, uid int64) error { return nil } - if uid == 0 { + switch { + case uid != nil && *uid == 0: return fmt.Errorf("container has runAsNonRoot and image will run as root") + case uid == nil && len(username) > 0: + return fmt.Errorf("container has runAsNonRoot and image has non-numeric user (%s), cannot verify user is non-root", username) + default: + return nil } - - return nil } // convertToRuntimeSecurityContext converts v1.SecurityContext to runtimeapi.SecurityContext. diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/leaky/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/leaky/BUILD index 3c17746f1920..9b5b362fa019 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/leaky/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/leaky/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["leaky.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/leaky", ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/BUILD index 0db7c55df0aa..d2463e5de9de 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/BUILD @@ -16,6 +16,7 @@ go_library( "interfaces.go", "predicate.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/lifecycle", deps = [ "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/types:go_default_library", @@ -34,9 +35,11 @@ go_library( go_test( name = "go_default_test", srcs = ["handlers_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/lifecycle", library = ":go_default_library", deps = [ "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/util/format:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/handlers.go b/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/handlers.go index 450c985e2a49..b941d85537de 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/handlers.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/handlers.go @@ -58,14 +58,14 @@ func (hr *HandlerRunner) Run(containerID kubecontainer.ContainerID, pod *v1.Pod, // TODO(tallclair): Pass a proper timeout value. output, err := hr.commandRunner.RunInContainer(containerID, handler.Exec.Command, 0) if err != nil { - msg := fmt.Sprintf("Exec lifecycle hook (%v) for Container %q in Pod %q failed - error: %v, message: %q", handler.Exec.Command, container.Name, format.Pod(pod), err, string(output)) + msg = fmt.Sprintf("Exec lifecycle hook (%v) for Container %q in Pod %q failed - error: %v, message: %q", handler.Exec.Command, container.Name, format.Pod(pod), err, string(output)) glog.V(1).Infof(msg) } return msg, err case handler.HTTPGet != nil: msg, err := hr.runHTTPHandler(pod, container, handler) if err != nil { - msg := fmt.Sprintf("Http lifecycle hook (%s) for Container %q in Pod %q failed - error: %v, message: %q", handler.HTTPGet.Path, container.Name, format.Pod(pod), err, msg) + msg = fmt.Sprintf("Http lifecycle hook (%s) for Container %q in Pod %q failed - error: %v, message: %q", handler.HTTPGet.Path, container.Name, format.Pod(pod), err, msg) glog.V(1).Infof(msg) } return msg, err diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go b/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go index 75c7663dcf4c..9b8ad4d3cc12 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go @@ -29,6 +29,8 @@ import ( type getNodeAnyWayFuncType func() (*v1.Node, error) +type pluginResourceUpdateFuncType func(*schedulercache.NodeInfo, *PodAdmitAttributes) error + // AdmissionFailureHandler is an interface which defines how to deal with a failure to admit a pod. // This allows for the graceful handling of pod admission failure. type AdmissionFailureHandler interface { @@ -36,15 +38,17 @@ type AdmissionFailureHandler interface { } type predicateAdmitHandler struct { - getNodeAnyWayFunc getNodeAnyWayFuncType - admissionFailureHandler AdmissionFailureHandler + getNodeAnyWayFunc getNodeAnyWayFuncType + pluginResourceUpdateFunc pluginResourceUpdateFuncType + admissionFailureHandler AdmissionFailureHandler } var _ PodAdmitHandler = &predicateAdmitHandler{} -func NewPredicateAdmitHandler(getNodeAnyWayFunc getNodeAnyWayFuncType, admissionFailureHandler AdmissionFailureHandler) *predicateAdmitHandler { +func NewPredicateAdmitHandler(getNodeAnyWayFunc getNodeAnyWayFuncType, admissionFailureHandler AdmissionFailureHandler, pluginResourceUpdateFunc pluginResourceUpdateFuncType) *predicateAdmitHandler { return &predicateAdmitHandler{ getNodeAnyWayFunc, + pluginResourceUpdateFunc, admissionFailureHandler, } } @@ -63,6 +67,16 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult pods := attrs.OtherPods nodeInfo := schedulercache.NewNodeInfo(pods...) nodeInfo.SetNode(node) + // ensure the node has enough plugin resources for that required in pods + if err = w.pluginResourceUpdateFunc(nodeInfo, attrs); err != nil { + message := fmt.Sprintf("Update plugin resources failed due to %v, which is unexpected.", err) + glog.Warningf("Failed to admit pod %v - %s", format.Pod(pod), message) + return PodAdmitResult{ + Admit: false, + Reason: "UnexpectedAdmissionError", + Message: message, + } + } fit, reasons, err := predicates.GeneralPredicates(pod, nil, nodeInfo) if err != nil { message := fmt.Sprintf("GeneralPredicates failed due to %v, which is unexpected.", err) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/BUILD index d64c12f4db94..568ae707755c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["metrics.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/metrics", deps = [ "//pkg/kubelet/container:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go b/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go index 86b84c244fdf..0fc10dc189a4 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go @@ -30,10 +30,6 @@ const ( PodWorkerLatencyKey = "pod_worker_latency_microseconds" PodStartLatencyKey = "pod_start_latency_microseconds" CgroupManagerOperationsKey = "cgroup_manager_latency_microseconds" - DockerOperationsLatencyKey = "docker_operations_latency_microseconds" - DockerOperationsKey = "docker_operations" - DockerOperationsErrorsKey = "docker_operations_errors" - DockerOperationsTimeoutKey = "docker_operations_timeout" PodWorkerStartLatencyKey = "pod_worker_start_latency_microseconds" PLEGRelistLatencyKey = "pleg_relist_latency_microseconds" PLEGRelistIntervalKey = "pleg_relist_interval_microseconds" @@ -48,6 +44,9 @@ const ( RuntimeOperationsKey = "runtime_operations" RuntimeOperationsLatencyKey = "runtime_operations_latency_microseconds" RuntimeOperationsErrorsKey = "runtime_operations_errors" + // Metrics keys of device plugin operations + DevicePluginRegistrationCountKey = "device_plugin_registration_count" + DevicePluginAllocationLatencyKey = "device_plugin_alloc_latency_microseconds" ) var ( @@ -88,39 +87,6 @@ var ( Help: "Latency in microseconds from seeing a pod to starting a worker.", }, ) - // TODO(random-liu): Move the following docker metrics into shim once dockertools is deprecated. - DockerOperationsLatency = prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Subsystem: KubeletSubsystem, - Name: DockerOperationsLatencyKey, - Help: "Latency in microseconds of Docker operations. Broken down by operation type.", - }, - []string{"operation_type"}, - ) - DockerOperations = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: DockerOperationsKey, - Help: "Cumulative number of Docker operations by operation type.", - }, - []string{"operation_type"}, - ) - DockerOperationsErrors = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: DockerOperationsErrorsKey, - Help: "Cumulative number of Docker operation errors by operation type.", - }, - []string{"operation_type"}, - ) - DockerOperationsTimeout = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: DockerOperationsTimeoutKey, - Help: "Cumulative number of Docker operation timeout by operation type.", - }, - []string{"operation_type"}, - ) PLEGRelistLatency = prometheus.NewSummary( prometheus.SummaryOpts{ Subsystem: KubeletSubsystem, @@ -216,6 +182,22 @@ var ( }, []string{"namespace", "persistentvolumeclaim"}, ) + DevicePluginRegistrationCount = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: KubeletSubsystem, + Name: DevicePluginRegistrationCountKey, + Help: "Cumulative number of device plugin registrations. Broken down by resource name.", + }, + []string{"resource_name"}, + ) + DevicePluginAllocationLatency = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Subsystem: KubeletSubsystem, + Name: DevicePluginAllocationLatencyKey, + Help: "Latency in microseconds to serve a device plugin Allocation request. Broken down by resource name.", + }, + []string{"resource_name"}, + ) ) var registerMetrics sync.Once @@ -226,13 +208,9 @@ func Register(containerCache kubecontainer.RuntimeCache) { registerMetrics.Do(func() { prometheus.MustRegister(PodWorkerLatency) prometheus.MustRegister(PodStartLatency) - prometheus.MustRegister(DockerOperationsLatency) prometheus.MustRegister(CgroupManagerLatency) prometheus.MustRegister(PodWorkerStartLatency) prometheus.MustRegister(ContainersPerPodCount) - prometheus.MustRegister(DockerOperations) - prometheus.MustRegister(DockerOperationsErrors) - prometheus.MustRegister(DockerOperationsTimeout) prometheus.MustRegister(newPodAndContainerCollector(containerCache)) prometheus.MustRegister(PLEGRelistLatency) prometheus.MustRegister(PLEGRelistInterval) @@ -246,6 +224,8 @@ func Register(containerCache kubecontainer.RuntimeCache) { prometheus.MustRegister(VolumeStatsInodes) prometheus.MustRegister(VolumeStatsInodesFree) prometheus.MustRegister(VolumeStatsInodesUsed) + prometheus.MustRegister(DevicePluginRegistrationCount) + prometheus.MustRegister(DevicePluginAllocationLatency) }) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/mountpod/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/mountpod/BUILD new file mode 100644 index 000000000000..d21981739a79 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/mountpod/BUILD @@ -0,0 +1,45 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["mount_pod.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/mountpod", + visibility = ["//visibility:public"], + deps = [ + "//pkg/kubelet/config:go_default_library", + "//pkg/kubelet/pod:go_default_library", + "//pkg/util/strings:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["mount_pod_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/mountpod", + library = ":go_default_library", + deps = [ + "//pkg/kubelet/configmap:go_default_library", + "//pkg/kubelet/pod:go_default_library", + "//pkg/kubelet/pod/testing:go_default_library", + "//pkg/kubelet/secret:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/client-go/util/testing:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/mountpod/mount_pod.go b/vendor/k8s.io/kubernetes/pkg/kubelet/mountpod/mount_pod.go new file mode 100644 index 000000000000..6c7afda4810e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/mountpod/mount_pod.go @@ -0,0 +1,120 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mountpod + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path" + + "k8s.io/api/core/v1" + "k8s.io/kubernetes/pkg/kubelet/config" + kubepod "k8s.io/kubernetes/pkg/kubelet/pod" + "k8s.io/kubernetes/pkg/util/strings" +) + +// Manager is an interface that tracks pods with mount utilities for individual +// volume plugins. +type Manager interface { + GetMountPod(pluginName string) (pod *v1.Pod, container string, err error) +} + +// basicManager is simple implementation of Manager. Pods with mount utilities +// are registered by placing a JSON file into +// /var/lib/kubelet/plugin-containers/.json and this manager just +// finds them there. +type basicManager struct { + registrationDirectory string + podManager kubepod.Manager +} + +// volumePluginRegistration specified format of the json files placed in +// /var/lib/kubelet/plugin-containers/ +type volumePluginRegistration struct { + PodName string `json:"podName"` + PodNamespace string `json:"podNamespace"` + PodUID string `json:"podUID"` + ContainerName string `json:"containerName"` +} + +// NewManager returns a new mount pod manager. +func NewManager(rootDirectory string, podManager kubepod.Manager) (Manager, error) { + regPath := path.Join(rootDirectory, config.DefaultKubeletPluginContainersDirName) + + // Create the directory on startup + os.MkdirAll(regPath, 0700) + + return &basicManager{ + registrationDirectory: regPath, + podManager: podManager, + }, nil +} + +func (m *basicManager) getVolumePluginRegistrationPath(pluginName string) string { + // sanitize plugin name so it does not escape directory + safePluginName := strings.EscapePluginName(pluginName) + ".json" + return path.Join(m.registrationDirectory, safePluginName) +} + +func (m *basicManager) GetMountPod(pluginName string) (pod *v1.Pod, containerName string, err error) { + // Read /var/lib/kubelet/plugin-containers/.json + regPath := m.getVolumePluginRegistrationPath(pluginName) + regBytes, err := ioutil.ReadFile(regPath) + if err != nil { + if os.IsNotExist(err) { + // No pod is registered for this plugin + return nil, "", nil + } + return nil, "", fmt.Errorf("cannot read %s: %v", regPath, err) + } + + // Parse json + var reg volumePluginRegistration + if err := json.Unmarshal(regBytes, ®); err != nil { + return nil, "", fmt.Errorf("unable to parse %s: %s", regPath, err) + } + if len(reg.ContainerName) == 0 { + return nil, "", fmt.Errorf("unable to parse %s: \"containerName\" is not set", regPath) + } + if len(reg.PodUID) == 0 { + return nil, "", fmt.Errorf("unable to parse %s: \"podUID\" is not set", regPath) + } + if len(reg.PodNamespace) == 0 { + return nil, "", fmt.Errorf("unable to parse %s: \"podNamespace\" is not set", regPath) + } + if len(reg.PodName) == 0 { + return nil, "", fmt.Errorf("unable to parse %s: \"podName\" is not set", regPath) + } + + pod, ok := m.podManager.GetPodByName(reg.PodNamespace, reg.PodName) + if !ok { + return nil, "", fmt.Errorf("unable to process %s: pod %s/%s not found", regPath, reg.PodNamespace, reg.PodName) + } + if string(pod.UID) != reg.PodUID { + return nil, "", fmt.Errorf("unable to process %s: pod %s/%s has unexpected UID", regPath, reg.PodNamespace, reg.PodName) + } + // make sure that reg.ContainerName exists in the pod + for i := range pod.Spec.Containers { + if pod.Spec.Containers[i].Name == reg.ContainerName { + return pod, reg.ContainerName, nil + } + } + return nil, "", fmt.Errorf("unable to process %s: pod %s/%s has no container named %q", regPath, reg.PodNamespace, reg.PodName, reg.ContainerName) + +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/network/BUILD index 31e267a2e639..51b32deeb272 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/network/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/BUILD @@ -11,10 +11,12 @@ go_library( "network.go", "plugins.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/network", deps = [ "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/network/hostport:go_default_library", + "//pkg/kubelet/network/metrics:go_default_library", "//pkg/util/sysctl:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -39,9 +41,11 @@ filegroup( srcs = [ ":package-srcs", "//pkg/kubelet/network/cni:all-srcs", + "//pkg/kubelet/network/dns:all-srcs", "//pkg/kubelet/network/hairpin:all-srcs", "//pkg/kubelet/network/hostport:all-srcs", "//pkg/kubelet/network/kubenet:all-srcs", + "//pkg/kubelet/network/metrics:all-srcs", "//pkg/kubelet/network/testing:all-srcs", ], tags = ["automanaged"], diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/BUILD index 7a97a8d43366..b91992366ec1 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/BUILD @@ -8,7 +8,16 @@ load( go_library( name = "go_default_library", - srcs = ["cni.go"], + srcs = [ + "cni.go", + "cni_others.go", + ] + select({ + "@io_bazel_rules_go//go/platform:windows_amd64": [ + "cni_windows.go", + ], + "//conditions:default": [], + }), + importpath = "k8s.io/kubernetes/pkg/kubelet/network/cni", deps = [ "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/container:go_default_library", @@ -17,7 +26,12 @@ go_library( "//vendor/github.com/containernetworking/cni/pkg/types:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:windows_amd64": [ + "//vendor/github.com/containernetworking/cni/pkg/types/020:go_default_library", + ], + "//conditions:default": [], + }), ) go_test( @@ -28,6 +42,7 @@ go_test( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/kubelet/network/cni", library = ":go_default_library", deps = select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/cni.go b/vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/cni.go index 0ae1edc801d8..540be4b70cf6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/cni.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/cni.go @@ -153,37 +153,12 @@ func vendorCNIDir(prefix, pluginType string) string { return fmt.Sprintf(VendorCNIDirTemplate, prefix, pluginType) } -func getLoNetwork(binDir, vendorDirPrefix string) *cniNetwork { - loConfig, err := libcni.ConfListFromBytes([]byte(`{ - "cniVersion": "0.2.0", - "name": "cni-loopback", - "plugins":[{ - "type": "loopback" - }] -}`)) - if err != nil { - // The hardcoded config above should always be valid and unit tests will - // catch this - panic(err) - } - cninet := &libcni.CNIConfig{ - Path: []string{vendorCNIDir(vendorDirPrefix, "loopback"), binDir}, - } - loNetwork := &cniNetwork{ - name: "lo", - NetworkConfig: loConfig, - CNIConfig: cninet, - } - - return loNetwork -} - func (plugin *cniNetworkPlugin) Init(host network.Host, hairpinMode kubeletconfig.HairpinMode, nonMasqueradeCIDR string, mtu int) error { - var err error - plugin.nsenterPath, err = plugin.execer.LookPath("nsenter") + err := plugin.platformInit() if err != nil { return err } + plugin.host = host plugin.syncNetworkConfig() @@ -239,10 +214,12 @@ func (plugin *cniNetworkPlugin) SetUpPod(namespace string, name string, id kubec return fmt.Errorf("CNI failed to retrieve network namespace path: %v", err) } - _, err = plugin.addToNetwork(plugin.loNetwork, name, namespace, id, netnsPath) - if err != nil { - glog.Errorf("Error while adding to cni lo network: %s", err) - return err + // Windows doesn't have loNetwork. It comes only with Linux + if plugin.loNetwork != nil { + if _, err = plugin.addToNetwork(plugin.loNetwork, name, namespace, id, netnsPath); err != nil { + glog.Errorf("Error while adding to cni lo network: %s", err) + return err + } } _, err = plugin.addToNetwork(plugin.getDefaultNetwork(), name, namespace, id, netnsPath) @@ -268,25 +245,6 @@ func (plugin *cniNetworkPlugin) TearDownPod(namespace string, name string, id ku return plugin.deleteFromNetwork(plugin.getDefaultNetwork(), name, namespace, id, netnsPath) } -// TODO: Use the addToNetwork function to obtain the IP of the Pod. That will assume idempotent ADD call to the plugin. -// Also fix the runtime's call to Status function to be done only in the case that the IP is lost, no need to do periodic calls -func (plugin *cniNetworkPlugin) GetPodNetworkStatus(namespace string, name string, id kubecontainer.ContainerID) (*network.PodNetworkStatus, error) { - netnsPath, err := plugin.host.GetNetNS(id.ID) - if err != nil { - return nil, fmt.Errorf("CNI failed to retrieve network namespace path: %v", err) - } - if netnsPath == "" { - return nil, fmt.Errorf("Cannot find the network namespace, skipping pod network status for container %q", id) - } - - ip, err := network.GetPodIP(plugin.execer, plugin.nsenterPath, netnsPath, network.DefaultInterfaceName) - if err != nil { - return nil, err - } - - return &network.PodNetworkStatus{IP: ip}, nil -} - func (plugin *cniNetworkPlugin) addToNetwork(network *cniNetwork, podName string, podNamespace string, podSandboxID kubecontainer.ContainerID, podNetnsPath string) (cnitypes.Result, error) { rt, err := plugin.buildCNIRuntimeConf(podName, podNamespace, podSandboxID, podNetnsPath) if err != nil { @@ -324,7 +282,7 @@ func (plugin *cniNetworkPlugin) deleteFromNetwork(network *cniNetwork, podName s func (plugin *cniNetworkPlugin) buildCNIRuntimeConf(podName string, podNs string, podSandboxID kubecontainer.ContainerID, podNetnsPath string) (*libcni.RuntimeConf, error) { glog.V(4).Infof("Got netns path %v", podNetnsPath) - glog.V(4).Infof("Using netns path %v", podNs) + glog.V(4).Infof("Using podns path %v", podNs) rt := &libcni.RuntimeConf{ ContainerID: podSandboxID.ID, diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/cni_others.go b/vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/cni_others.go new file mode 100644 index 000000000000..735db9cc69cd --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/cni_others.go @@ -0,0 +1,80 @@ +// +build !windows + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cni + +import ( + "fmt" + + "github.com/containernetworking/cni/libcni" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/network" +) + +func getLoNetwork(binDir, vendorDirPrefix string) *cniNetwork { + loConfig, err := libcni.ConfListFromBytes([]byte(`{ + "cniVersion": "0.2.0", + "name": "cni-loopback", + "plugins":[{ + "type": "loopback" + }] +}`)) + if err != nil { + // The hardcoded config above should always be valid and unit tests will + // catch this + panic(err) + } + cninet := &libcni.CNIConfig{ + Path: []string{vendorCNIDir(vendorDirPrefix, "loopback"), binDir}, + } + loNetwork := &cniNetwork{ + name: "lo", + NetworkConfig: loConfig, + CNIConfig: cninet, + } + + return loNetwork +} + +func (plugin *cniNetworkPlugin) platformInit() error { + var err error + plugin.nsenterPath, err = plugin.execer.LookPath("nsenter") + if err != nil { + return err + } + return nil +} + +// TODO: Use the addToNetwork function to obtain the IP of the Pod. That will assume idempotent ADD call to the plugin. +// Also fix the runtime's call to Status function to be done only in the case that the IP is lost, no need to do periodic calls +func (plugin *cniNetworkPlugin) GetPodNetworkStatus(namespace string, name string, id kubecontainer.ContainerID) (*network.PodNetworkStatus, error) { + netnsPath, err := plugin.host.GetNetNS(id.ID) + if err != nil { + return nil, fmt.Errorf("CNI failed to retrieve network namespace path: %v", err) + } + if netnsPath == "" { + return nil, fmt.Errorf("Cannot find the network namespace, skipping pod network status for container %q", id) + } + + ip, err := network.GetPodIP(plugin.execer, plugin.nsenterPath, netnsPath, network.DefaultInterfaceName) + if err != nil { + return nil, err + } + + return &network.PodNetworkStatus{IP: ip}, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/cni_windows.go b/vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/cni_windows.go new file mode 100644 index 000000000000..9a0b17c8f11b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/cni/cni_windows.go @@ -0,0 +1,61 @@ +// +build windows + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cni + +import ( + "fmt" + + cniTypes020 "github.com/containernetworking/cni/pkg/types/020" + "github.com/golang/glog" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/network" +) + +func getLoNetwork(binDir, vendorDirPrefix string) *cniNetwork { + return nil +} + +func (plugin *cniNetworkPlugin) platformInit() error { + return nil +} + +// GetPodNetworkStatus : Assuming addToNetwork is idempotent, we can call this API as many times as required to get the IPAddress +func (plugin *cniNetworkPlugin) GetPodNetworkStatus(namespace string, name string, id kubecontainer.ContainerID) (*network.PodNetworkStatus, error) { + netnsPath, err := plugin.host.GetNetNS(id.ID) + if err != nil { + return nil, fmt.Errorf("CNI failed to retrieve network namespace path: %v", err) + } + + result, err := plugin.addToNetwork(plugin.getDefaultNetwork(), name, namespace, id, netnsPath) + + glog.V(5).Infof("GetPodNetworkStatus result %+v", result) + if err != nil { + glog.Errorf("error while adding to cni network: %s", err) + return nil, err + } + + // Parse the result and get the IPAddress + var result020 *cniTypes020.Result + result020, err = cniTypes020.GetResult(result) + if err != nil { + glog.Errorf("error while cni parsing result: %s", err) + return nil, err + } + return &network.PodNetworkStatus{IP: result020.IP4.IP.IP}, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/BUILD new file mode 100644 index 000000000000..c0a4c05463af --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/BUILD @@ -0,0 +1,51 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["dns.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/network/dns", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/core/validation:go_default_library", + "//pkg/features:go_default_library", + "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", + "//pkg/kubelet/container:go_default_library", + "//pkg/kubelet/util/format:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["dns_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/network/dns", + library = ":go_default_library", + deps = [ + "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/OWNERS b/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/OWNERS new file mode 100644 index 000000000000..b5c11e8c5ad6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/OWNERS @@ -0,0 +1,4 @@ +approvers: +- sig-network-approvers +reviewers: +- sig-network-reviewers diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go b/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go new file mode 100644 index 000000000000..bcc717e01c1e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/dns/dns.go @@ -0,0 +1,414 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dns + +import ( + "fmt" + "io" + "io/ioutil" + "net" + "os" + "path/filepath" + "strings" + + "k8s.io/api/core/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/tools/record" + "k8s.io/kubernetes/pkg/apis/core/validation" + "k8s.io/kubernetes/pkg/features" + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" + kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/util/format" + + "github.com/golang/glog" +) + +var ( + // The default dns opt strings. + defaultDNSOptions = []string{"ndots:5"} +) + +type podDNSType int + +const ( + podDNSCluster podDNSType = iota + podDNSHost + podDNSNone +) + +// Configurer is used for setting up DNS resolver configuration when launching pods. +type Configurer struct { + recorder record.EventRecorder + nodeRef *v1.ObjectReference + nodeIP net.IP + + // If non-nil, use this for container DNS server. + clusterDNS []net.IP + // If non-empty, use this for container DNS search. + ClusterDomain string + // The path to the DNS resolver configuration file used as the base to generate + // the container's DNS resolver configuration file. This can be used in + // conjunction with clusterDomain and clusterDNS. + ResolverConfig string +} + +// NewConfigurer returns a DNS configurer for launching pods. +func NewConfigurer(recorder record.EventRecorder, nodeRef *v1.ObjectReference, nodeIP net.IP, clusterDNS []net.IP, clusterDomain, resolverConfig string) *Configurer { + return &Configurer{ + recorder: recorder, + nodeRef: nodeRef, + nodeIP: nodeIP, + clusterDNS: clusterDNS, + ClusterDomain: clusterDomain, + ResolverConfig: resolverConfig, + } +} + +func omitDuplicates(strs []string) []string { + uniqueStrs := make(map[string]bool) + + var ret []string + for _, str := range strs { + if !uniqueStrs[str] { + ret = append(ret, str) + uniqueStrs[str] = true + } + } + return ret +} + +func (c *Configurer) formDNSSearchFitsLimits(composedSearch []string, pod *v1.Pod) []string { + limitsExceeded := false + + if len(composedSearch) > validation.MaxDNSSearchPaths { + composedSearch = composedSearch[:validation.MaxDNSSearchPaths] + limitsExceeded = true + } + + if resolvSearchLineStrLen := len(strings.Join(composedSearch, " ")); resolvSearchLineStrLen > validation.MaxDNSSearchListChars { + cutDomainsNum := 0 + cutDomainsLen := 0 + for i := len(composedSearch) - 1; i >= 0; i-- { + cutDomainsLen += len(composedSearch[i]) + 1 + cutDomainsNum++ + + if (resolvSearchLineStrLen - cutDomainsLen) <= validation.MaxDNSSearchListChars { + break + } + } + + composedSearch = composedSearch[:(len(composedSearch) - cutDomainsNum)] + limitsExceeded = true + } + + if limitsExceeded { + log := fmt.Sprintf("Search Line limits were exceeded, some search paths have been omitted, the applied search line is: %s", strings.Join(composedSearch, " ")) + c.recorder.Event(pod, v1.EventTypeWarning, "DNSConfigForming", log) + glog.Error(log) + } + return composedSearch +} + +func (c *Configurer) formDNSNameserversFitsLimits(nameservers []string, pod *v1.Pod) []string { + if len(nameservers) > validation.MaxDNSNameservers { + nameservers = nameservers[0:validation.MaxDNSNameservers] + log := fmt.Sprintf("Nameserver limits were exceeded, some nameservers have been omitted, the applied nameserver line is: %s", strings.Join(nameservers, " ")) + c.recorder.Event(pod, v1.EventTypeWarning, "DNSConfigForming", log) + glog.Error(log) + } + return nameservers +} + +func (c *Configurer) formDNSConfigFitsLimits(dnsConfig *runtimeapi.DNSConfig, pod *v1.Pod) *runtimeapi.DNSConfig { + dnsConfig.Servers = c.formDNSNameserversFitsLimits(dnsConfig.Servers, pod) + dnsConfig.Searches = c.formDNSSearchFitsLimits(dnsConfig.Searches, pod) + return dnsConfig +} + +func (c *Configurer) generateSearchesForDNSClusterFirst(hostSearch []string, pod *v1.Pod) []string { + if c.ClusterDomain == "" { + return hostSearch + } + + nsSvcDomain := fmt.Sprintf("%s.svc.%s", pod.Namespace, c.ClusterDomain) + svcDomain := fmt.Sprintf("svc.%s", c.ClusterDomain) + clusterSearch := []string{nsSvcDomain, svcDomain, c.ClusterDomain} + + return omitDuplicates(append(clusterSearch, hostSearch...)) +} + +// CheckLimitsForResolvConf checks limits in resolv.conf. +func (c *Configurer) CheckLimitsForResolvConf() { + f, err := os.Open(c.ResolverConfig) + if err != nil { + c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", err.Error()) + glog.Error("CheckLimitsForResolvConf: " + err.Error()) + return + } + defer f.Close() + + _, hostSearch, _, err := parseResolvConf(f) + if err != nil { + c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", err.Error()) + glog.Error("CheckLimitsForResolvConf: " + err.Error()) + return + } + + domainCountLimit := validation.MaxDNSSearchPaths + + if c.ClusterDomain != "" { + domainCountLimit -= 3 + } + + if len(hostSearch) > domainCountLimit { + log := fmt.Sprintf("Resolv.conf file '%s' contains search line consisting of more than %d domains!", c.ResolverConfig, domainCountLimit) + c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", log) + glog.Error("CheckLimitsForResolvConf: " + log) + return + } + + if len(strings.Join(hostSearch, " ")) > validation.MaxDNSSearchListChars { + log := fmt.Sprintf("Resolv.conf file '%s' contains search line which length is more than allowed %d chars!", c.ResolverConfig, validation.MaxDNSSearchListChars) + c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", log) + glog.Error("CheckLimitsForResolvConf: " + log) + return + } + + return +} + +// parseResolveConf reads a resolv.conf file from the given reader, and parses +// it into nameservers, searches and options, possibly returning an error. +func parseResolvConf(reader io.Reader) (nameservers []string, searches []string, options []string, err error) { + file, err := ioutil.ReadAll(reader) + if err != nil { + return nil, nil, nil, err + } + + // Lines of the form "nameserver 1.2.3.4" accumulate. + nameservers = []string{} + + // Lines of the form "search example.com" overrule - last one wins. + searches = []string{} + + // Lines of the form "option ndots:5 attempts:2" overrule - last one wins. + // Each option is recorded as an element in the array. + options = []string{} + + lines := strings.Split(string(file), "\n") + for l := range lines { + trimmed := strings.TrimSpace(lines[l]) + if strings.HasPrefix(trimmed, "#") { + continue + } + fields := strings.Fields(trimmed) + if len(fields) == 0 { + continue + } + if fields[0] == "nameserver" && len(fields) >= 2 { + nameservers = append(nameservers, fields[1]) + } + if fields[0] == "search" { + searches = fields[1:] + } + if fields[0] == "options" { + options = fields[1:] + } + } + + return nameservers, searches, options, nil +} + +func (c *Configurer) getHostDNSConfig(pod *v1.Pod) (*runtimeapi.DNSConfig, error) { + var hostDNS, hostSearch, hostOptions []string + // Get host DNS settings + if c.ResolverConfig != "" { + f, err := os.Open(c.ResolverConfig) + if err != nil { + return nil, err + } + defer f.Close() + + hostDNS, hostSearch, hostOptions, err = parseResolvConf(f) + if err != nil { + return nil, err + } + } + return &runtimeapi.DNSConfig{ + Servers: hostDNS, + Searches: hostSearch, + Options: hostOptions, + }, nil +} + +func getPodDNSType(pod *v1.Pod) (podDNSType, error) { + dnsPolicy := pod.Spec.DNSPolicy + switch dnsPolicy { + case v1.DNSNone: + if utilfeature.DefaultFeatureGate.Enabled(features.CustomPodDNS) { + return podDNSNone, nil + } + // This should not happen as kube-apiserver should have rejected + // setting dnsPolicy to DNSNone when feature gate is disabled. + return podDNSCluster, fmt.Errorf(fmt.Sprintf("invalid DNSPolicy=%v: custom pod DNS is disabled", dnsPolicy)) + case v1.DNSClusterFirstWithHostNet: + return podDNSCluster, nil + case v1.DNSClusterFirst: + if !kubecontainer.IsHostNetworkPod(pod) { + return podDNSCluster, nil + } + // Fallback to DNSDefault for pod on hostnetowrk. + fallthrough + case v1.DNSDefault: + return podDNSHost, nil + } + // This should not happen as kube-apiserver should have rejected + // invalid dnsPolicy. + return podDNSCluster, fmt.Errorf(fmt.Sprintf("invalid DNSPolicy=%v", dnsPolicy)) +} + +// Merge DNS options. If duplicated, entries given by PodDNSConfigOption will +// overwrite the existing ones. +func mergeDNSOptions(existingDNSConfigOptions []string, dnsConfigOptions []v1.PodDNSConfigOption) []string { + optionsMap := make(map[string]string) + for _, op := range existingDNSConfigOptions { + if index := strings.Index(op, ":"); index != -1 { + optionsMap[op[:index]] = op[index+1:] + } else { + optionsMap[op] = "" + } + } + for _, op := range dnsConfigOptions { + if op.Value != nil { + optionsMap[op.Name] = *op.Value + } else { + optionsMap[op.Name] = "" + } + } + // Reconvert DNS options into a string array. + options := []string{} + for opName, opValue := range optionsMap { + op := opName + if opValue != "" { + op = op + ":" + opValue + } + options = append(options, op) + } + return options +} + +// appendDNSConfig appends DNS servers, search paths and options given by +// PodDNSConfig to the existing DNS config. Duplicated entries will be merged. +// This assumes existingDNSConfig and dnsConfig are not nil. +func appendDNSConfig(existingDNSConfig *runtimeapi.DNSConfig, dnsConfig *v1.PodDNSConfig) *runtimeapi.DNSConfig { + existingDNSConfig.Servers = omitDuplicates(append(existingDNSConfig.Servers, dnsConfig.Nameservers...)) + existingDNSConfig.Searches = omitDuplicates(append(existingDNSConfig.Searches, dnsConfig.Searches...)) + existingDNSConfig.Options = mergeDNSOptions(existingDNSConfig.Options, dnsConfig.Options) + return existingDNSConfig +} + +// GetPodDNS returns DNS setttings for the pod. +func (c *Configurer) GetPodDNS(pod *v1.Pod) (*runtimeapi.DNSConfig, error) { + dnsConfig, err := c.getHostDNSConfig(pod) + if err != nil { + return nil, err + } + + dnsType, err := getPodDNSType(pod) + if err != nil { + glog.Errorf("Failed to get DNS type for pod %q: %v. Falling back to DNSClusterFirst policy.", format.Pod(pod), err) + dnsType = podDNSCluster + } + switch dnsType { + case podDNSNone: + // DNSNone should use empty DNS settings as the base. + dnsConfig = &runtimeapi.DNSConfig{} + case podDNSCluster: + if len(c.clusterDNS) != 0 { + // For a pod with DNSClusterFirst policy, the cluster DNS server is + // the only nameserver configured for the pod. The cluster DNS server + // itself will forward queries to other nameservers that is configured + // to use, in case the cluster DNS server cannot resolve the DNS query + // itself. + dnsConfig.Servers = []string{} + for _, ip := range c.clusterDNS { + dnsConfig.Servers = append(dnsConfig.Servers, ip.String()) + } + dnsConfig.Searches = c.generateSearchesForDNSClusterFirst(dnsConfig.Searches, pod) + dnsConfig.Options = defaultDNSOptions + break + } + // clusterDNS is not known. Pod with ClusterDNSFirst Policy cannot be created. + nodeErrorMsg := fmt.Sprintf("kubelet does not have ClusterDNS IP configured and cannot create Pod using %q policy. Falling back to %q policy.", v1.DNSClusterFirst, v1.DNSDefault) + c.recorder.Eventf(c.nodeRef, v1.EventTypeWarning, "MissingClusterDNS", nodeErrorMsg) + c.recorder.Eventf(pod, v1.EventTypeWarning, "MissingClusterDNS", "pod: %q. %s", format.Pod(pod), nodeErrorMsg) + // Fallback to DNSDefault. + fallthrough + case podDNSHost: + // When the kubelet --resolv-conf flag is set to the empty string, use + // DNS settings that override the docker default (which is to use + // /etc/resolv.conf) and effectively disable DNS lookups. According to + // the bind documentation, the behavior of the DNS client library when + // "nameservers" are not specified is to "use the nameserver on the + // local machine". A nameserver setting of localhost is equivalent to + // this documented behavior. + if c.ResolverConfig == "" { + switch { + case c.nodeIP == nil || c.nodeIP.To4() != nil: + dnsConfig.Servers = []string{"127.0.0.1"} + case c.nodeIP.To16() != nil: + dnsConfig.Servers = []string{"::1"} + } + dnsConfig.Searches = []string{"."} + } + } + + if utilfeature.DefaultFeatureGate.Enabled(features.CustomPodDNS) && pod.Spec.DNSConfig != nil { + dnsConfig = appendDNSConfig(dnsConfig, pod.Spec.DNSConfig) + } + return c.formDNSConfigFitsLimits(dnsConfig, pod), nil +} + +// SetupDNSinContainerizedMounter replaces the nameserver in containerized-mounter's rootfs/etc/resolve.conf with kubelet.ClusterDNS +func (c *Configurer) SetupDNSinContainerizedMounter(mounterPath string) { + resolvePath := filepath.Join(strings.TrimSuffix(mounterPath, "/mounter"), "rootfs", "etc", "resolv.conf") + dnsString := "" + for _, dns := range c.clusterDNS { + dnsString = dnsString + fmt.Sprintf("nameserver %s\n", dns) + } + if c.ResolverConfig != "" { + f, err := os.Open(c.ResolverConfig) + defer f.Close() + if err != nil { + glog.Error("Could not open resolverConf file") + } else { + _, hostSearch, _, err := parseResolvConf(f) + if err != nil { + glog.Errorf("Error for parsing the reslov.conf file: %v", err) + } else { + dnsString = dnsString + "search" + for _, search := range hostSearch { + dnsString = dnsString + fmt.Sprintf(" %s", search) + } + dnsString = dnsString + "\n" + } + } + } + if err := ioutil.WriteFile(resolvePath, []byte(dnsString), 0600); err != nil { + glog.Errorf("Could not write dns nameserver in file %s, with error %v", resolvePath, err) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hairpin/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hairpin/BUILD index 88096d4557b3..d10f32a5c864 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hairpin/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hairpin/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["hairpin.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/network/hairpin", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", @@ -18,6 +19,7 @@ go_library( go_test( name = "go_default_test", srcs = ["hairpin_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/network/hairpin", library = ":go_default_library", deps = [ "//vendor/k8s.io/utils/exec:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/BUILD index e38aab179300..a6bb4d0e14b5 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/BUILD @@ -14,22 +14,26 @@ go_library( "hostport_manager.go", "hostport_syncer.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/network/hostport", deps = [ "//pkg/proxy/iptables:go_default_library", "//pkg/util/iptables:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", ], ) go_test( name = "go_default_test", srcs = [ + "fake_iptables_test.go", "hostport_manager_test.go", "hostport_syncer_test.go", "hostport_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/network/hostport", library = ":go_default_library", deps = [ "//pkg/util/iptables:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/fake_iptables.go b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/fake_iptables.go index 42f7c681164f..ab28a1d87477 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/fake_iptables.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/fake_iptables.go @@ -22,6 +22,7 @@ import ( "net" "strings" + "k8s.io/apimachinery/pkg/util/sets" utiliptables "k8s.io/kubernetes/pkg/util/iptables" ) @@ -36,12 +37,18 @@ type fakeTable struct { } type fakeIPTables struct { - tables map[string]*fakeTable + tables map[string]*fakeTable + builtinChains map[string]sets.String } func NewFakeIPTables() *fakeIPTables { return &fakeIPTables{ tables: make(map[string]*fakeTable, 0), + builtinChains: map[string]sets.String{ + string(utiliptables.TableFilter): sets.NewString("INPUT", "FORWARD", "OUTPUT"), + string(utiliptables.TableNAT): sets.NewString("PREROUTING", "INPUT", "OUTPUT", "POSTROUTING"), + string(utiliptables.TableMangle): sets.NewString("PREROUTING", "INPUT", "FORWARD", "OUTPUT", "POSTROUTING"), + }, } } @@ -246,6 +253,7 @@ func (f *fakeIPTables) SaveInto(tableName utiliptables.Table, buffer *bytes.Buff } func (f *fakeIPTables) restore(restoreTableName utiliptables.Table, data []byte, flush utiliptables.FlushFlag) error { + allLines := string(data) buf := bytes.NewBuffer(data) var tableName utiliptables.Table for { @@ -274,6 +282,13 @@ func (f *fakeIPTables) restore(restoreTableName utiliptables.Table, data []byte, } } _, _ = f.ensureChain(tableName, chainName) + // The --noflush option for iptables-restore doesn't work for user-defined chains, only builtin chains. + // We should flush user-defined chains if the chain is not to be deleted + if !f.isBuiltinChain(tableName, chainName) && !strings.Contains(allLines, "-X "+string(chainName)) { + if err := f.FlushChain(tableName, chainName); err != nil { + return err + } + } } else if strings.HasPrefix(line, "-A") { parts := strings.Split(line, " ") if len(parts) < 3 { @@ -329,3 +344,10 @@ func (f *fakeIPTables) AddReloadFunc(reloadFunc func()) { func (f *fakeIPTables) Destroy() { } + +func (f *fakeIPTables) isBuiltinChain(tableName utiliptables.Table, chainName utiliptables.Chain) bool { + if builtinChains, ok := f.builtinChains[string(tableName)]; ok && builtinChains.Has(string(chainName)) { + return true + } + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_manager.go index c2e588cd0e4e..a31b4da08420 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_manager.go @@ -21,6 +21,7 @@ import ( "crypto/sha256" "encoding/base32" "fmt" + "strconv" "strings" "sync" @@ -177,11 +178,8 @@ func (hm *hostportManager) Remove(id string, podPortMapping *PodPortMapping) (er chainsToRemove := []utiliptables.Chain{} for _, pm := range hostportMappings { chainsToRemove = append(chainsToRemove, getHostportChain(id, pm)) - - // To preserve backward compatibility for k8s 1.5 or earlier. - // Need to remove hostport chains added by hostportSyncer if there is any - // TODO: remove this in 1.7 - chainsToRemove = append(chainsToRemove, hostportChainName(pm, getPodFullName(podPortMapping))) + // TODO remove this after release 1.9, please refer https://github.com/kubernetes/kubernetes/pull/55153 + chainsToRemove = append(chainsToRemove, getBuggyHostportChain(id, pm)) } // remove rules that consists of target chains @@ -252,6 +250,16 @@ func (hm *hostportManager) closeHostports(hostportMappings []*PortMapping) error // WARNING: Please do not change this function. Otherwise, HostportManager may not be able to // identify existing iptables chains. func getHostportChain(id string, pm *PortMapping) utiliptables.Chain { + hash := sha256.Sum256([]byte(id + strconv.Itoa(int(pm.HostPort)) + string(pm.Protocol))) + encoded := base32.StdEncoding.EncodeToString(hash[:]) + return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16]) +} + +// This bugy func does bad conversion on HostPort from int32 to string. +// It may generates same chain names for different ports of the same pod, e.g. port 57119/55429/56833. +// `getHostportChain` fixed this bug. In order to cleanup the legacy chains/rules, it is temporarily left. +// TODO remove this after release 1.9, please refer https://github.com/kubernetes/kubernetes/pull/55153 +func getBuggyHostportChain(id string, pm *PortMapping) utiliptables.Chain { hash := sha256.Sum256([]byte(id + string(pm.HostPort) + string(pm.Protocol))) encoded := base32.StdEncoding.EncodeToString(hash[:]) return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16]) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_syncer.go b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_syncer.go index 03cca1a8d80e..3d7bfd6e4dc1 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_syncer.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_syncer.go @@ -21,6 +21,7 @@ import ( "crypto/sha256" "encoding/base32" "fmt" + "strconv" "strings" "time" @@ -64,7 +65,7 @@ func (hp *hostport) String() string { return fmt.Sprintf("%s:%d", hp.protocol, hp.port) } -//openPodHostports opens all hostport for pod and returns the map of hostport and socket +// openHostports opens all hostport for pod and returns the map of hostport and socket func (h *hostportSyncer) openHostports(podHostportMapping *PodPortMapping) error { var retErr error ports := make(map[hostport]closeable) @@ -142,7 +143,7 @@ func writeLine(buf *bytes.Buffer, words ...string) { // this because IPTables Chain Names must be <= 28 chars long, and the longer // they are the harder they are to read. func hostportChainName(pm *PortMapping, podFullName string) utiliptables.Chain { - hash := sha256.Sum256([]byte(string(pm.HostPort) + string(pm.Protocol) + podFullName)) + hash := sha256.Sum256([]byte(strconv.Itoa(int(pm.HostPort)) + string(pm.Protocol) + podFullName)) encoded := base32.StdEncoding.EncodeToString(hash[:]) return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16]) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/BUILD index 9ca069c654f8..13c4bce3db31 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/BUILD @@ -17,6 +17,7 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/kubelet/network/kubenet", deps = [ "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/container:go_default_library", @@ -53,6 +54,7 @@ go_test( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/kubelet/network/kubenet", library = ":go_default_library", deps = select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_linux.go index 8f0a9f19e808..8551946d4958 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/kubenet/kubenet_linux.go @@ -23,7 +23,6 @@ import ( "io/ioutil" "net" "path/filepath" - "strconv" "strings" "sync" "time" @@ -59,12 +58,6 @@ const ( // fallbackMTU is used if an MTU is not specified, and we cannot determine the MTU fallbackMTU = 1460 - // private mac prefix safe to use - // Universally administered and locally administered addresses are distinguished by setting the second-least-significant - // bit of the first octet of the address. If it is 1, the address is locally administered. For example, for address 0a:00:00:00:00:00, - // the first cotet is 0a(hex), the binary form of which is 00001010, where the second-least-significant bit is 1. - privateMACPrefix = "0a:58" - // ebtables Chain to store dedup rules dedupChain = utilebtables.Chain("KUBE-DEDUP") @@ -98,7 +91,7 @@ type kubenetNetworkPlugin struct { iptables utiliptables.Interface sysctl utilsysctl.Interface ebtables utilebtables.Interface - // vendorDir is passed by kubelet network-plugin-dir parameter. + // vendorDir is passed by kubelet cni-bin-dir parameter. // kubenet will search for cni binaries in DefaultCNIDir first, then continue to vendorDir. vendorDir string nonMasqueradeCIDR string @@ -311,6 +304,11 @@ func (plugin *kubenetNetworkPlugin) Capabilities() utilsets.Int { // TODO: Don't pass the pod to this method, it only needs it for bandwidth // shaping and hostport management. func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kubecontainer.ContainerID, pod *v1.Pod, annotations map[string]string) error { + // Disable DAD so we skip the kernel delay on bringing up new interfaces. + if err := plugin.disableContainerDAD(id); err != nil { + glog.V(3).Infof("Failed to disable DAD in container: %v", err) + } + // Bring up container loopback interface if _, err := plugin.addContainerToNetwork(plugin.loConfig, "lo", namespace, name, id); err != nil { return err @@ -334,22 +332,6 @@ func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kube return fmt.Errorf("CNI plugin reported an invalid IPv4 address for container %v: %+v.", id, res.IP4) } - // Explicitly assign mac address to cbr0. If bridge mac address is not explicitly set will adopt the lowest MAC address of the attached veths. - // TODO: Remove this once upstream cni bridge plugin handles this - link, err := netlink.LinkByName(BridgeName) - if err != nil { - return fmt.Errorf("failed to lookup %q: %v", BridgeName, err) - } - macAddr, err := generateHardwareAddr(plugin.gateway) - if err != nil { - return err - } - glog.V(3).Infof("Configure %q mac address to %v", BridgeName, macAddr) - err = netlink.LinkSetHardwareAddr(link, macAddr) - if err != nil { - return fmt.Errorf("Failed to configure %q mac address to %q: %v", BridgeName, macAddr, err) - } - // Put the container bridge into promiscuous mode to force it to accept hairpin packets. // TODO: Remove this once the kernel bug (#20096) is fixed. // TODO: check and set promiscuous mode with netlink once vishvananda/netlink supports it @@ -361,8 +343,14 @@ func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kube return fmt.Errorf("Error setting promiscuous mode on %s: %v", BridgeName, err) } } + + link, err := netlink.LinkByName(BridgeName) + if err != nil { + return fmt.Errorf("failed to lookup %q: %v", BridgeName, err) + } + // configure the ebtables rules to eliminate duplicate packets by best effort - plugin.syncEbtablesDedupRules(macAddr) + plugin.syncEbtablesDedupRules(link.Attrs().HardwareAddr) } plugin.podIPs[id] = ip4.String() @@ -761,7 +749,11 @@ func (plugin *kubenetNetworkPlugin) addContainerToNetwork(config *libcni.Network } glog.V(3).Infof("Adding %s/%s to '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt) + // The network plugin can take up to 3 seconds to execute, + // so yield the lock while it runs. + plugin.mu.Unlock() res, err := plugin.cniConfig.AddNetwork(config, rt) + plugin.mu.Lock() if err != nil { return nil, fmt.Errorf("Error adding container to network: %v", err) } @@ -834,20 +826,42 @@ func (plugin *kubenetNetworkPlugin) syncEbtablesDedupRules(macAddr net.HardwareA } } -// generateHardwareAddr generates 48 bit virtual mac addresses based on the IP input. -func generateHardwareAddr(ip net.IP) (net.HardwareAddr, error) { - if ip.To4() == nil { - return nil, fmt.Errorf("generateHardwareAddr only support valid ipv4 address as input") +// disableContainerDAD disables duplicate address detection in the container. +// DAD has a negative affect on pod creation latency, since we have to wait +// a second or more for the addresses to leave the "tentative" state. Since +// we're sure there won't be an address conflict (since we manage them manually), +// this is safe. See issue 54651. +// +// This sets net.ipv6.conf.default.dad_transmits to 0. It must be run *before* +// the CNI plugins are run. +func (plugin *kubenetNetworkPlugin) disableContainerDAD(id kubecontainer.ContainerID) error { + key := "net/ipv6/conf/default/dad_transmits" + + sysctlBin, err := plugin.execer.LookPath("sysctl") + if err != nil { + return fmt.Errorf("Could not find sysctl binary: %s", err) } - mac := privateMACPrefix - sections := strings.Split(ip.String(), ".") - for _, s := range sections { - i, _ := strconv.Atoi(s) - mac = mac + ":" + fmt.Sprintf("%02x", i) + + netnsPath, err := plugin.host.GetNetNS(id.ID) + if err != nil { + return fmt.Errorf("Failed to get netns: %v", err) } - hwAddr, err := net.ParseMAC(mac) + if netnsPath == "" { + return fmt.Errorf("Pod has no network namespace") + } + + // If the sysctl doesn't exist, it means ipv6 is disabled; log and move on + if _, err := plugin.sysctl.GetSysctl(key); err != nil { + return fmt.Errorf("Ipv6 not enabled: %v", err) + } + + output, err := plugin.execer.Command(plugin.nsenterPath, + fmt.Sprintf("--net=%s", netnsPath), "-F", "--", + sysctlBin, "-w", fmt.Sprintf("%s=%s", key, "0"), + ).CombinedOutput() if err != nil { - return nil, fmt.Errorf("Failed to parse mac address %s generated based on ip %s due to: %v", mac, ip, err) + return fmt.Errorf("Failed to write sysctl: output: %s error: %s", + output, err) } - return hwAddr, nil + return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/metrics/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/network/metrics/BUILD new file mode 100644 index 000000000000..6e3f4c4e50b8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/metrics/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["metrics.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/network/metrics", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/prometheus/client_golang/prometheus:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/metrics/metrics.go b/vendor/k8s.io/kubernetes/pkg/kubelet/network/metrics/metrics.go new file mode 100644 index 000000000000..9e4ff1855173 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/metrics/metrics.go @@ -0,0 +1,61 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + // NetworkPluginOperationsKey is the key for operation count metrics. + NetworkPluginOperationsKey = "network_plugin_operations" + // NetworkPluginOperationsLatencyKey is the key for the operation latency metrics. + NetworkPluginOperationsLatencyKey = "network_plugin_operations_latency_microseconds" + + // Keep the "kubelet" subsystem for backward compatibility. + kubeletSubsystem = "kubelet" +) + +var ( + // NetworkPluginOperationsLatency collects operation latency numbers by operation + // type. + NetworkPluginOperationsLatency = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Subsystem: kubeletSubsystem, + Name: NetworkPluginOperationsLatencyKey, + Help: "Latency in microseconds of network plugin operations. Broken down by operation type.", + }, + []string{"operation_type"}, + ) +) + +var registerMetrics sync.Once + +// Register all metrics. +func Register() { + registerMetrics.Do(func() { + prometheus.MustRegister(NetworkPluginOperationsLatency) + }) +} + +// SinceInMicroseconds gets the time since the specified start in microseconds. +func SinceInMicroseconds(start time.Time) float64 { + return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/plugins.go b/vendor/k8s.io/kubernetes/pkg/kubelet/network/plugins.go index 7fac1f137a1c..24e358943e99 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/network/plugins.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/plugins.go @@ -21,6 +21,7 @@ import ( "net" "strings" "sync" + "time" "github.com/golang/glog" "k8s.io/api/core/v1" @@ -32,6 +33,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/network/hostport" + "k8s.io/kubernetes/pkg/kubelet/network/metrics" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" utilexec "k8s.io/utils/exec" ) @@ -155,6 +157,7 @@ func InitNetworkPlugin(plugins []NetworkPlugin, networkPluginName string, host H if networkPluginName == "" { // default to the no_op plugin plug := &NoopNetworkPlugin{} + plug.Sysctl = utilsysctl.New() if err := plug.Init(host, hairpinMode, nonMasqueradeCIDR, mtu); err != nil { return nil, err } @@ -198,9 +201,11 @@ func UnescapePluginName(in string) string { } type NoopNetworkPlugin struct { + Sysctl utilsysctl.Interface } const sysctlBridgeCallIPTables = "net/bridge/bridge-nf-call-iptables" +const sysctlBridgeCallIP6Tables = "net/bridge/bridge-nf-call-ip6tables" func (plugin *NoopNetworkPlugin) Init(host Host, hairpinMode kubeletconfig.HairpinMode, nonMasqueradeCIDR string, mtu int) error { // Set bridge-nf-call-iptables=1 to maintain compatibility with older @@ -212,9 +217,16 @@ func (plugin *NoopNetworkPlugin) Init(host Host, hairpinMode kubeletconfig.Hairp // Ensure the netfilter module is loaded on kernel >= 3.18; previously // it was built-in. utilexec.New().Command("modprobe", "br-netfilter").CombinedOutput() - if err := utilsysctl.New().SetSysctl(sysctlBridgeCallIPTables, 1); err != nil { + if err := plugin.Sysctl.SetSysctl(sysctlBridgeCallIPTables, 1); err != nil { glog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIPTables, err) } + if val, err := plugin.Sysctl.GetSysctl(sysctlBridgeCallIP6Tables); err == nil { + if val != 1 { + if err = plugin.Sysctl.SetSysctl(sysctlBridgeCallIP6Tables, 1); err != nil { + glog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIP6Tables, err) + } + } + } return nil } @@ -304,6 +316,7 @@ type PluginManager struct { } func NewPluginManager(plugin NetworkPlugin) *PluginManager { + metrics.Register() return &PluginManager{ plugin: plugin, pods: make(map[string]*podLock), @@ -371,7 +384,13 @@ func (pm *PluginManager) podUnlock(fullPodName string) { } } +// recordOperation records operation and duration +func recordOperation(operation string, start time.Time) { + metrics.NetworkPluginOperationsLatency.WithLabelValues(operation).Observe(metrics.SinceInMicroseconds(start)) +} + func (pm *PluginManager) GetPodNetworkStatus(podNamespace, podName string, id kubecontainer.ContainerID) (*PodNetworkStatus, error) { + defer recordOperation("get_pod_network_status", time.Now()) fullPodName := kubecontainer.BuildPodFullName(podName, podNamespace) pm.podLock(fullPodName).Lock() defer pm.podUnlock(fullPodName) @@ -385,6 +404,7 @@ func (pm *PluginManager) GetPodNetworkStatus(podNamespace, podName string, id ku } func (pm *PluginManager) SetUpPod(podNamespace, podName string, id kubecontainer.ContainerID, annotations map[string]string) error { + defer recordOperation("set_up_pod", time.Now()) fullPodName := kubecontainer.BuildPodFullName(podName, podNamespace) pm.podLock(fullPodName).Lock() defer pm.podUnlock(fullPodName) @@ -398,6 +418,7 @@ func (pm *PluginManager) SetUpPod(podNamespace, podName string, id kubecontainer } func (pm *PluginManager) TearDownPod(podNamespace, podName string, id kubecontainer.ContainerID) error { + defer recordOperation("tear_down_pod", time.Now()) fullPodName := kubecontainer.BuildPodFullName(podName, podNamespace) pm.podLock(fullPodName).Lock() defer pm.podUnlock(fullPodName) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/networks.go b/vendor/k8s.io/kubernetes/pkg/kubelet/networks.go deleted file mode 100644 index 84bcdb8fa0de..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/networks.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubelet - -import ( - "k8s.io/api/core/v1" - clientset "k8s.io/client-go/kubernetes" - kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/kubelet/network" -) - -// This just exports required functions from kubelet proper, for use by network -// plugins. -// TODO(#35457): get rid of this backchannel to the kubelet. The scope of -// the back channel is restricted to host-ports/testing, and restricted -// to kubenet. No other network plugin wrapper needs it. Other plugins -// only require a way to access namespace information, which they can do -// directly through the methods implemented by criNetworkHost. -type networkHost struct { - kubelet *Kubelet -} - -func (nh *networkHost) GetPodByName(name, namespace string) (*v1.Pod, bool) { - return nh.kubelet.GetPodByName(name, namespace) -} - -func (nh *networkHost) GetKubeClient() clientset.Interface { - return nh.kubelet.kubeClient -} - -func (nh *networkHost) GetRuntime() kubecontainer.Runtime { - return nh.kubelet.GetRuntime() -} - -func (nh *networkHost) SupportsLegacyFeatures() bool { - return true -} - -// criNetworkHost implements the part of network.Host required by the -// cri (NamespaceGetter). It leechs off networkHost for all other -// methods, because networkHost is slated for deletion. -type criNetworkHost struct { - *networkHost - // criNetworkHost currently support legacy features. Hence no need to support PortMappingGetter - *network.NoopPortMappingGetter -} - -// GetNetNS returns the network namespace of the given containerID. -// This method satisfies the network.NamespaceGetter interface for -// networkHost. It's only meant to be used from network plugins -// that are directly invoked by the kubelet (aka: legacy, pre-cri). -// Any network plugin invoked by a cri must implement NamespaceGetter -// to talk directly to the runtime instead. -func (c *criNetworkHost) GetNetNS(containerID string) (string, error) { - return c.kubelet.GetRuntime().GetNetNS(kubecontainer.ContainerID{Type: "", ID: containerID}) -} - -// NoOpLegacyHost implements the network.LegacyHost interface for the remote -// runtime shim by just returning empties. It doesn't support legacy features -// like host port and bandwidth shaping. -type NoOpLegacyHost struct{} - -// GetPodByName always returns "nil, true" for 'NoOpLegacyHost' -func (n *NoOpLegacyHost) GetPodByName(namespace, name string) (*v1.Pod, bool) { - return nil, true -} - -// GetKubeClient always returns "nil" for 'NoOpLegacyHost' -func (n *NoOpLegacyHost) GetKubeClient() clientset.Interface { - return nil -} - -// GetRuntime always returns "nil" for 'NoOpLegacyHost' -func (n *NoOpLegacyHost) GetRuntime() kubecontainer.Runtime { - return nil -} - -// SupportsLegacyFeatures always returns "false" for 'NoOpLegacyHost' -func (n *NoOpLegacyHost) SupportsLegacyFeatures() bool { - return false -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD index 4bf6c7a7550b..122fd02436d9 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD @@ -13,7 +13,9 @@ go_library( "generic.go", "pleg.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/pleg", deps = [ + "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/metrics:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -27,6 +29,7 @@ go_library( go_test( name = "go_default_test", srcs = ["generic_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/pleg", library = ":go_default_library", deps = [ "//pkg/kubelet/container:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go b/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go index e12509f4b546..b873efb005b9 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/metrics" ) @@ -172,7 +173,7 @@ func (g *GenericPLEG) getRelistTime() time.Time { return val.(time.Time) } -func (g *GenericPLEG) updateRelisTime(timestamp time.Time) { +func (g *GenericPLEG) updateRelistTime(timestamp time.Time) { g.relistTime.Store(timestamp) } @@ -197,8 +198,7 @@ func (g *GenericPLEG) relist() { return } - // Update the relist time. - g.updateRelisTime(timestamp) + g.updateRelistTime(timestamp) pods := kubecontainer.Pods(podList) g.podRecords.setCurrent(pods) @@ -329,6 +329,41 @@ func (g *GenericPLEG) cacheEnabled() bool { return g.cache != nil } +// Preserve an older cached status' pod IP if the new status has no pod IP +// and its sandboxes have exited +func (g *GenericPLEG) getPodIP(pid types.UID, status *kubecontainer.PodStatus) string { + if status.IP != "" { + return status.IP + } + + oldStatus, err := g.cache.Get(pid) + if err != nil || oldStatus.IP == "" { + return "" + } + + for _, sandboxStatus := range status.SandboxStatuses { + // If at least one sandbox is ready, then use this status update's pod IP + if sandboxStatus.State == runtimeapi.PodSandboxState_SANDBOX_READY { + return status.IP + } + } + + if len(status.SandboxStatuses) == 0 { + // Without sandboxes (which built-in runtimes like rkt don't report) + // look at all the container statuses, and if any containers are + // running then use the new pod IP + for _, containerStatus := range status.ContainerStatuses { + if containerStatus.State == kubecontainer.ContainerStateCreated || containerStatus.State == kubecontainer.ContainerStateRunning { + return status.IP + } + } + } + + // For pods with no ready containers or sandboxes (like exited pods) + // use the old status' pod IP + return oldStatus.IP +} + func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error { if pod == nil { // The pod is missing in the current relist. This means that @@ -343,6 +378,14 @@ func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error { // all containers again. status, err := g.runtime.GetPodStatus(pod.ID, pod.Name, pod.Namespace) glog.V(4).Infof("PLEG: Write status for %s/%s: %#v (err: %v)", pod.Name, pod.Namespace, status, err) + if err == nil { + // Preserve the pod IP across cache updates if the new IP is empty. + // When a pod is torn down, kubelet may race with PLEG and retrieve + // a pod status after network teardown, but the kubernetes API expects + // the completed pod's IP to be available after the pod is dead. + status.IP = g.getPodIP(pid, status) + } + g.cache.Set(pod.ID, status, err, timestamp) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/pod/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/pod/BUILD index 937aae83c50a..2287b7cf9e9f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/pod/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/pod/BUILD @@ -12,7 +12,9 @@ go_library( "mirror_client.go", "pod_manager.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/pod", deps = [ + "//pkg/kubelet/checkpoint:go_default_library", "//pkg/kubelet/configmap:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/secret:go_default_library", @@ -32,6 +34,7 @@ go_test( "mirror_client_test.go", "pod_manager_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/pod", library = ":go_default_library", deps = [ "//pkg/kubelet/configmap:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go b/vendor/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go index fc81acec2b17..9b8add5bc5a0 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/pod/mirror_client.go @@ -63,7 +63,7 @@ func (mc *basicMirrorClient) CreateMirrorPod(pod *v1.Pod) error { } hash := getPodHash(pod) copyPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = hash - apiPod, err := mc.apiserverClient.Core().Pods(copyPod.Namespace).Create(©Pod) + apiPod, err := mc.apiserverClient.CoreV1().Pods(copyPod.Namespace).Create(©Pod) if err != nil && errors.IsAlreadyExists(err) { // Check if the existing pod is the same as the pod we want to create. if h, ok := apiPod.Annotations[kubetypes.ConfigMirrorAnnotationKey]; ok && h == hash { @@ -84,7 +84,7 @@ func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string) error { } glog.V(2).Infof("Deleting a mirror pod %q", podFullName) // TODO(random-liu): Delete the mirror pod with uid precondition in mirror pod manager - if err := mc.apiserverClient.Core().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0)); err != nil && !errors.IsNotFound(err) { + if err := mc.apiserverClient.CoreV1().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0)); err != nil && !errors.IsNotFound(err) { glog.Errorf("Failed deleting a mirror pod %q: %v", podFullName, err) } return nil diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go index 20940a7dc5bd..be15616a9eaa 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/pod/pod_manager.go @@ -19,8 +19,11 @@ package pod import ( "sync" + "github.com/golang/glog" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/kubelet/checkpoint" "k8s.io/kubernetes/pkg/kubelet/configmap" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/secret" @@ -116,8 +119,9 @@ type basicManager struct { translationByUID map[kubetypes.MirrorPodUID]kubetypes.ResolvedPodUID // basicManager is keeping secretManager and configMapManager up-to-date. - secretManager secret.Manager - configMapManager configmap.Manager + secretManager secret.Manager + configMapManager configmap.Manager + checkpointManager checkpoint.Manager // A mirror pod client to create/delete mirror pods. MirrorClient @@ -128,6 +132,7 @@ func NewBasicPodManager(client MirrorClient, secretManager secret.Manager, confi pm := &basicManager{} pm.secretManager = secretManager pm.configMapManager = configMapManager + pm.checkpointManager = checkpoint.GetInstance() pm.MirrorClient = client pm.SetPods(nil) return pm @@ -155,6 +160,11 @@ func (pm *basicManager) UpdatePod(pod *v1.Pod) { pm.lock.Lock() defer pm.lock.Unlock() pm.updatePodsInternal(pod) + if pm.checkpointManager != nil { + if err := pm.checkpointManager.WritePod(pod); err != nil { + glog.Errorf("Error writing checkpoint for pod: %v", pod.GetName()) + } + } } // updatePodsInternal replaces the given pods in the current state of the @@ -213,6 +223,11 @@ func (pm *basicManager) DeletePod(pod *v1.Pod) { delete(pm.podByUID, kubetypes.ResolvedPodUID(pod.UID)) delete(pm.podByFullName, podFullName) } + if pm.checkpointManager != nil { + if err := pm.checkpointManager.DeletePod(pod); err != nil { + glog.Errorf("Error deleting checkpoint for pod: %v", pod.GetName()) + } + } } func (pm *basicManager) GetPods() []*v1.Pod { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go b/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go index 91f48be899a1..6a8a33353f55 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go @@ -162,6 +162,9 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan UpdatePodOptions) { // the previous sync. status, err := p.podCache.GetNewerThan(podUID, lastSyncTime) if err != nil { + // This is the legacy event thrown by manage pod loop + // all other events are now dispatched from syncPodFn + p.recorder.Eventf(update.Pod, v1.EventTypeWarning, events.FailedSync, "error determining status: %v", err) return err } err = p.syncPodFn(syncPodOptions{ @@ -179,14 +182,8 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan UpdatePodOptions) { update.OnCompleteFunc(err) } if err != nil { + // IMPORTANT: we do not log errors here, the syncPodFn is responsible for logging errors glog.Errorf("Error syncing pod %s (%q), skipping: %v", update.Pod.UID, format.Pod(update.Pod), err) - // if we failed sync, we throw more specific events for why it happened. - // as a result, i question the value of this event. - // TODO: determine if we can remove this in a future release. - // do not include descriptive text that can vary on why it failed so in a pathological - // scenario, kubelet does not create enough discrete events that miss default aggregation - // window. - p.recorder.Eventf(update.Pod, v1.EventTypeWarning, events.FailedSync, "Error syncing pod") } p.wrapUp(update.Pod.UID, err) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/BUILD index cdc9c181195f..736a457de10a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/BUILD @@ -9,9 +9,10 @@ load( go_library( name = "go_default_library", srcs = ["preemption.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/preemption", deps = [ - "//pkg/api/v1/helper/qos:go_default_library", "//pkg/api/v1/resource:go_default_library", + "//pkg/apis/core/v1/helper/qos:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/events:go_default_library", "//pkg/kubelet/eviction:go_default_library", @@ -43,9 +44,10 @@ filegroup( go_test( name = "go_default_test", srcs = ["preemption_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/preemption", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/kubelet/types:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go b/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go index fd144632fe29..1f9214b83063 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go @@ -24,8 +24,8 @@ import ( "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" - v1qos "k8s.io/kubernetes/pkg/api/v1/helper/qos" "k8s.io/kubernetes/pkg/api/v1/resource" + v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/eviction" diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD index 380e5f5ee6e3..ef21ca9173f8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD @@ -13,6 +13,7 @@ go_library( "prober_manager.go", "worker.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/prober", deps = [ "//pkg/api/v1/pod:go_default_library", "//pkg/kubelet/container:go_default_library", @@ -44,6 +45,7 @@ go_test( "prober_test.go", "worker_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/prober", library = ":go_default_library", deps = [ "//pkg/kubelet/container:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go b/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go index 076d5b676c52..d81ee8f377ef 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go @@ -46,10 +46,14 @@ const maxProbeRetries = 3 // Prober helps to check the liveness/readiness of a container. type prober struct { - exec execprobe.ExecProber - http httprobe.HTTPProber - tcp tcprobe.TCPProber - runner kubecontainer.ContainerCommandRunner + exec execprobe.ExecProber + // probe types needs different httprobe instances so they don't + // share a connection pool which can cause collsions to the + // same host:port and transient failures. See #49740. + readinessHttp httprobe.HTTPProber + livenessHttp httprobe.HTTPProber + tcp tcprobe.TCPProber + runner kubecontainer.ContainerCommandRunner refManager *kubecontainer.RefManager recorder record.EventRecorder @@ -63,12 +67,13 @@ func newProber( recorder record.EventRecorder) *prober { return &prober{ - exec: execprobe.New(), - http: httprobe.New(), - tcp: tcprobe.New(), - runner: runner, - refManager: refManager, - recorder: recorder, + exec: execprobe.New(), + readinessHttp: httprobe.New(), + livenessHttp: httprobe.New(), + tcp: tcprobe.New(), + runner: runner, + refManager: refManager, + recorder: recorder, } } @@ -90,7 +95,7 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c return results.Success, nil } - result, output, err := pb.runProbeWithRetries(probeSpec, pod, status, container, containerID, maxProbeRetries) + result, output, err := pb.runProbeWithRetries(probeType, probeSpec, pod, status, container, containerID, maxProbeRetries) if err != nil || result != probe.Success { // Probe failed in one way or another. ref, hasRef := pb.refManager.GetRef(containerID) @@ -116,12 +121,12 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c // runProbeWithRetries tries to probe the container in a finite loop, it returns the last result // if it never succeeds. -func (pb *prober) runProbeWithRetries(p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID, retries int) (probe.Result, string, error) { +func (pb *prober) runProbeWithRetries(probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID, retries int) (probe.Result, string, error) { var err error var result probe.Result var output string for i := 0; i < retries; i++ { - result, output, err = pb.runProbe(p, pod, status, container, containerID) + result, output, err = pb.runProbe(probeType, p, pod, status, container, containerID) if err == nil { return result, output, nil } @@ -139,7 +144,7 @@ func buildHeader(headerList []v1.HTTPHeader) http.Header { return headers } -func (pb *prober) runProbe(p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) { +func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) { timeout := time.Duration(p.TimeoutSeconds) * time.Second if p.Exec != nil { glog.V(4).Infof("Exec-Probe Pod: %v, Container: %v, Command: %v", pod, container, p.Exec.Command) @@ -161,7 +166,11 @@ func (pb *prober) runProbe(p *v1.Probe, pod *v1.Pod, status v1.PodStatus, contai url := formatURL(scheme, host, port, path) headers := buildHeader(p.HTTPGet.HTTPHeaders) glog.V(4).Infof("HTTP-Probe Headers: %v", headers) - return pb.http.Probe(url, headers, timeout) + if probeType == liveness { + return pb.livenessHttp.Probe(url, headers, timeout) + } else { // readiness + return pb.readinessHttp.Probe(url, headers, timeout) + } } if p.TCPSocket != nil { port, err := extractPort(p.TCPSocket.Port, container) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/prober/results/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/prober/results/BUILD index b33f0f722d11..2e73fb74483a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/prober/results/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/prober/results/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["results_manager.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/prober/results", deps = [ "//pkg/kubelet/container:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -19,6 +20,7 @@ go_library( go_test( name = "go_default_test", srcs = ["results_manager_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/prober/results", library = ":go_default_library", deps = [ "//pkg/kubelet/container:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/BUILD index c7a50447f4aa..6160d7b41378 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["policy_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/qos", library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", @@ -22,8 +23,9 @@ go_library( "doc.go", "policy.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/qos", deps = [ - "//pkg/api/v1/helper/qos:go_default_library", + "//pkg/apis/core/v1/helper/qos:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go index 01125dd77087..b7177d2a4fd5 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go @@ -18,7 +18,7 @@ package qos import ( "k8s.io/api/core/v1" - v1qos "k8s.io/kubernetes/pkg/api/v1/helper/qos" + v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" ) const ( diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/remote/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/remote/BUILD index 91528ac553e2..cee27f87387d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/remote/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/remote/BUILD @@ -3,6 +3,7 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( @@ -13,6 +14,7 @@ go_library( "remote_runtime.go", "utils.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/remote", deps = [ "//pkg/kubelet/apis/cri:go_default_library", "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", @@ -33,6 +35,23 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//pkg/kubelet/remote/fake:all-srcs", + ], + tags = ["automanaged"], +) + +go_test( + name = "go_default_test", + srcs = ["remote_runtime_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/remote", + library = ":go_default_library", tags = ["automanaged"], + deps = [ + "//pkg/kubelet/apis/cri:go_default_library", + "//pkg/kubelet/apis/cri/testing:go_default_library", + "//pkg/kubelet/remote/fake:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/remote/doc.go b/vendor/k8s.io/kubernetes/pkg/kubelet/remote/doc.go index 6f3005428472..9571cbc595f7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/remote/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/remote/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package remote containers gRPC implementation of internalapi.RuntimeService +// Package remote contains gRPC implementation of internalapi.RuntimeService // and internalapi.ImageManagerService. package remote diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/rkt/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/rkt/BUILD index 5193140585a2..3b687934fd32 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/rkt/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/rkt/BUILD @@ -19,6 +19,7 @@ go_library( "systemd.go", "version.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/rkt", deps = [ "//pkg/credentialprovider:go_default_library", "//pkg/kubelet/container:go_default_library", @@ -65,6 +66,7 @@ go_test( "fake_rkt_interface_test.go", "rkt_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/rkt", library = ":go_default_library", deps = [ "//pkg/kubelet/container:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/rkt/rkt.go b/vendor/k8s.io/kubernetes/pkg/kubelet/rkt/rkt.go index 70838fa4c19d..b35288b4aab8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/rkt/rkt.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/rkt/rkt.go @@ -842,7 +842,7 @@ func (r *Runtime) newAppcRuntimeApp(pod *v1.Pod, podIP string, c v1.Container, r } // TODO: determine how this should be handled for rkt - opts, _, err := r.runtimeHelper.GenerateRunContainerOptions(pod, &c, podIP) + opts, err := r.runtimeHelper.GenerateRunContainerOptions(pod, &c, podIP) if err != nil { return err } @@ -1041,17 +1041,17 @@ func (r *Runtime) generateRunCommand(pod *v1.Pod, uuid, networkNamespaceID strin } } else { // Setup DNS. - dnsServers, dnsSearches, _, err := r.runtimeHelper.GetClusterDNS(pod) + dnsConfig, err := r.runtimeHelper.GetPodDNS(pod) if err != nil { return "", err } - for _, server := range dnsServers { + for _, server := range dnsConfig.Servers { runPrepared = append(runPrepared, fmt.Sprintf("--dns=%s", server)) } - for _, search := range dnsSearches { + for _, search := range dnsConfig.Searches { runPrepared = append(runPrepared, fmt.Sprintf("--dns-search=%s", search)) } - if len(dnsServers) > 0 || len(dnsSearches) > 0 { + if len(dnsConfig.Servers) > 0 || len(dnsConfig.Searches) > 0 { runPrepared = append(runPrepared, fmt.Sprintf("--dns-opt=%s", defaultDNSOption)) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/secret/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/secret/BUILD index e7be9d1a1368..26a879d8fa67 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/secret/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/secret/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["secret_manager_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/secret", library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", @@ -28,6 +29,7 @@ go_library( "fake_manager.go", "secret_manager.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/secret", deps = [ "//pkg/api/v1/pod:go_default_library", "//pkg/kubelet/util:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/secret/secret_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/secret/secret_manager.go index 3b7356f8aebe..3de230a90328 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/secret/secret_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/secret/secret_manager.go @@ -64,7 +64,7 @@ func NewSimpleSecretManager(kubeClient clientset.Interface) Manager { } func (s *simpleSecretManager) GetSecret(namespace, name string) (*v1.Secret, error) { - return s.kubeClient.Core().Secrets(namespace).Get(name, metav1.GetOptions{}) + return s.kubeClient.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) } func (s *simpleSecretManager) RegisterPod(pod *v1.Pod) { @@ -216,7 +216,7 @@ func (s *secretStore) Get(namespace, name string) (*v1.Secret, error) { // etcd and apiserver (the cache is eventually consistent). util.FromApiserverCache(&opts) } - secret, err := s.kubeClient.Core().Secrets(namespace).Get(name, opts) + secret, err := s.kubeClient.CoreV1().Secrets(namespace).Get(name, opts) if err != nil && !apierrors.IsNotFound(err) && data.secret == nil && data.err == nil { // Couldn't fetch the latest secret, but there is no cached data to return. // Return the fetch result instead. @@ -282,6 +282,11 @@ func (c *cachingSecretManager) RegisterPod(pod *v1.Pod) { c.registeredPods[key] = pod if prev != nil { for name := range getSecretNames(prev) { + // On an update, the .Add() call above will have re-incremented the + // ref count of any existing secrets, so any secrets that are in both + // names and prev need to have their ref counts decremented. Any that + // are only in prev need to be completely removed. This unconditional + // call takes care of both cases. c.secretStore.Delete(prev.Namespace, name) } } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/server/BUILD index 61c57935567e..7584f70d5228 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/BUILD @@ -13,9 +13,11 @@ go_library( "doc.go", "server.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/server", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/v1/validation:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/v1/validation:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/server/portforward:go_default_library", "//pkg/kubelet/server/remotecommand:go_default_library", @@ -54,10 +56,11 @@ go_test( "server_test.go", "server_websocket_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/server", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/install:go_default_library", "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", "//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/container:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/BUILD index 23f52c98fd1e..190ff72197f5 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/BUILD @@ -14,8 +14,9 @@ go_library( "portforward.go", "websocket.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/server/portforward", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/httpstream:go_default_library", @@ -32,9 +33,10 @@ go_test( "httpstream_test.go", "websocket_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/server/portforward", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/httpstream:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go index 5f872c820100..919ed5a7868d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/apimachinery/pkg/util/httpstream/spdy" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "github.com/golang/glog" ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/websocket.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/websocket.go index cb4bca04558c..b445c922f609 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/websocket.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/websocket.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/server/httplog" "k8s.io/apiserver/pkg/util/wsstream" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) const ( diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/BUILD index f93da253f84c..97da39d617a7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/BUILD @@ -14,8 +14,9 @@ go_library( "httpstream.go", "websocket.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/server/remotecommand", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go index 9d4883212d95..74f0595cf7e7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/util/wsstream" "k8s.io/client-go/tools/remotecommand" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "github.com/golang/glog" ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go index 285a0d376cef..d22096e48d25 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/server.go @@ -50,8 +50,9 @@ import ( "k8s.io/apiserver/pkg/server/httplog" "k8s.io/apiserver/pkg/util/flushwriter" "k8s.io/client-go/tools/remotecommand" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/v1/validation" + "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/v1/validation" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/server/portforward" remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand" @@ -210,6 +211,8 @@ func NewServer( if enableContentionProfiling { goruntime.SetBlockProfileRate(1) } + } else { + server.InstallDebuggingDisabledHandlers() } return server } @@ -233,14 +236,14 @@ func (s *Server) InstallAuthFilter() { attrs := s.auth.GetRequestAttributes(u, req.Request) // Authorize - authorized, _, err := s.auth.Authorize(attrs) + decision, _, err := s.auth.Authorize(attrs) if err != nil { msg := fmt.Sprintf("Authorization error (user=%s, verb=%s, resource=%s, subresource=%s)", u.GetName(), attrs.GetVerb(), attrs.GetResource(), attrs.GetSubresource()) glog.Errorf(msg, err) resp.WriteErrorString(http.StatusInternalServerError, msg) return } - if !authorized { + if decision != authorizer.DecisionAllow { msg := fmt.Sprintf("Forbidden (user=%s, verb=%s, resource=%s, subresource=%s)", u.GetName(), attrs.GetVerb(), attrs.GetResource(), attrs.GetSubresource()) glog.V(2).Info(msg) resp.WriteErrorString(http.StatusForbidden, msg) @@ -417,6 +420,20 @@ func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) { } } +// InstallDebuggingDisabledHandlers registers the HTTP request patterns that provide better error message +func (s *Server) InstallDebuggingDisabledHandlers() { + h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "Debug endpoints are disabled.", http.StatusMethodNotAllowed) + }) + + paths := []string{ + "/run/", "/exec/", "/attach/", "/portForward/", "/containerLogs/", + "/runningpods/", pprofBasePath, logsPath} + for _, p := range paths { + s.restfulCont.Handle(p, h) + } +} + // Checks if kubelet's sync loop that updates containers is working. func (s *Server) syncLoopHealthCheck(req *http.Request) error { duration := s.host.ResyncInterval() * 2 @@ -465,7 +482,7 @@ func (s *Server) getContainerLogs(request *restful.Request, response *restful.Re } // container logs on the kubelet are locked to the v1 API version of PodLogOptions logOptions := &v1.PodLogOptions{} - if err := api.ParameterCodec.DecodeParameters(query, v1.SchemeGroupVersion, logOptions); err != nil { + if err := legacyscheme.ParameterCodec.DecodeParameters(query, v1.SchemeGroupVersion, logOptions); err != nil { response.WriteError(http.StatusBadRequest, fmt.Errorf(`{"message": "Unable to decode query."}`)) return } @@ -531,7 +548,7 @@ func encodePods(pods []*v1.Pod) (data []byte, err error) { // TODO: this needs to be parameterized to the kubelet, not hardcoded. Depends on Kubelet // as API server refactor. // TODO: Locked to v1, needs to be made generic - codec := api.Codecs.LegacyCodec(schema.GroupVersion{Group: v1.GroupName, Version: "v1"}) + codec := legacyscheme.Codecs.LegacyCodec(schema.GroupVersion{Group: v1.GroupName, Version: "v1"}) return runtime.Encode(codec, podList) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/BUILD index c4f93551e2bf..00140b342bf3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/BUILD @@ -10,6 +10,7 @@ go_library( "summary.go", "volume_stat_calculator.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/server/stats", visibility = ["//visibility:public"], deps = [ "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", @@ -34,6 +35,7 @@ go_test( "summary_test.go", "volume_stat_calculator_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/server/stats", library = ":go_default_library", deps = [ "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/volume_stat_calculator.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/volume_stat_calculator.go index e151ab83d2d8..87355870cc4d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/volume_stat_calculator.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/stats/volume_stat_calculator.go @@ -42,9 +42,11 @@ type volumeStatCalculator struct { latest atomic.Value } -// PodVolumeStats encapsulates all VolumeStats for a pod +// PodVolumeStats encapsulates the VolumeStats for a pod. +// It consists of two lists, for local ephemeral volumes, and for persistent volumes respectively. type PodVolumeStats struct { - Volumes []stats.VolumeStats + EphemeralVolumes []stats.VolumeStats + PersistentVolumes []stats.VolumeStats } // newVolumeStatCalculator creates a new VolumeStatCalculator @@ -101,7 +103,8 @@ func (s *volumeStatCalculator) calcAndStoreStats() { } // Call GetMetrics on each Volume and copy the result to a new VolumeStats.FsStats - fsStats := make([]stats.VolumeStats, 0, len(volumes)) + ephemeralStats := []stats.VolumeStats{} + persistentStats := []stats.VolumeStats{} for name, v := range volumes { metric, err := v.GetMetrics() if err != nil { @@ -113,31 +116,38 @@ func (s *volumeStatCalculator) calcAndStoreStats() { } // Lookup the volume spec and add a 'PVCReference' for volumes that reference a PVC volSpec := volumesSpec[name] + var pvcRef *stats.PVCReference if pvcSource := volSpec.PersistentVolumeClaim; pvcSource != nil { - pvcRef := stats.PVCReference{ + pvcRef = &stats.PVCReference{ Name: pvcSource.ClaimName, Namespace: s.pod.GetNamespace(), } - fsStats = append(fsStats, s.parsePodVolumeStats(name, &pvcRef, metric)) // Set the PVC's prometheus metrics - s.setPVCMetrics(&pvcRef, metric) + s.setPVCMetrics(pvcRef, metric) + } + volumeStats := s.parsePodVolumeStats(name, pvcRef, metric, volSpec) + if isVolumeEphemeral(volSpec) { + ephemeralStats = append(ephemeralStats, volumeStats) } else { - fsStats = append(fsStats, s.parsePodVolumeStats(name, nil, metric)) + persistentStats = append(persistentStats, volumeStats) } + } // Store the new stats - s.latest.Store(PodVolumeStats{Volumes: fsStats}) + s.latest.Store(PodVolumeStats{EphemeralVolumes: ephemeralStats, + PersistentVolumes: persistentStats}) } // parsePodVolumeStats converts (internal) volume.Metrics to (external) stats.VolumeStats structures -func (s *volumeStatCalculator) parsePodVolumeStats(podName string, pvcRef *stats.PVCReference, metric *volume.Metrics) stats.VolumeStats { +func (s *volumeStatCalculator) parsePodVolumeStats(podName string, pvcRef *stats.PVCReference, metric *volume.Metrics, volSpec v1.Volume) stats.VolumeStats { available := uint64(metric.Available.Value()) capacity := uint64(metric.Capacity.Value()) used := uint64(metric.Used.Value()) inodes := uint64(metric.Inodes.Value()) inodesFree := uint64(metric.InodesFree.Value()) inodesUsed := uint64(metric.InodesUsed.Value()) + return stats.VolumeStats{ Name: podName, PVCRef: pvcRef, @@ -146,6 +156,14 @@ func (s *volumeStatCalculator) parsePodVolumeStats(podName string, pvcRef *stats } } +func isVolumeEphemeral(volume v1.Volume) bool { + if (volume.EmptyDir != nil && volume.EmptyDir.Medium == v1.StorageMediumDefault) || + volume.ConfigMap != nil || volume.GitRepo != nil { + return true + } + return false +} + // setPVCMetrics sets the given PVC's prometheus metrics to match the given volume.Metrics func (s *volumeStatCalculator) setPVCMetrics(pvcRef *stats.PVCReference, metric *volume.Metrics) { metrics.VolumeStatsAvailableBytes.WithLabelValues(pvcRef.Namespace, pvcRef.Name).Set(float64(metric.Available.Value())) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/streaming/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/server/streaming/BUILD index 21d1a4b6dbb1..70fe31d096a3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/streaming/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/streaming/BUILD @@ -13,6 +13,7 @@ go_library( "request_cache.go", "server.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/server/streaming", deps = [ "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", "//pkg/kubelet/server/portforward:go_default_library", @@ -33,9 +34,10 @@ go_test( "request_cache_test.go", "server_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/server/streaming", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", "//pkg/kubelet/server/portforward:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/streaming/server.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/streaming/server.go index 4eac080aa6d3..ba9f12ff1037 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/streaming/server.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/streaming/server.go @@ -158,9 +158,24 @@ type server struct { server *http.Server } -func (s *server) GetExec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) { +func validateExecRequest(req *runtimeapi.ExecRequest) error { if req.ContainerId == "" { - return nil, grpc.Errorf(codes.InvalidArgument, "missing required container_id") + return grpc.Errorf(codes.InvalidArgument, "missing required container_id") + } + if req.Tty && req.Stderr { + // If TTY is set, stderr cannot be true because multiplexing is not + // supported. + return grpc.Errorf(codes.InvalidArgument, "tty and stderr cannot both be true") + } + if !req.Stdin && !req.Stdout && !req.Stderr { + return grpc.Errorf(codes.InvalidArgument, "one of stdin, stdout, or stderr must be set") + } + return nil +} + +func (s *server) GetExec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) { + if err := validateExecRequest(req); err != nil { + return nil, err } token, err := s.cache.Insert(req) if err != nil { @@ -171,9 +186,24 @@ func (s *server) GetExec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, }, nil } -func (s *server) GetAttach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) { +func validateAttachRequest(req *runtimeapi.AttachRequest) error { if req.ContainerId == "" { - return nil, grpc.Errorf(codes.InvalidArgument, "missing required container_id") + return grpc.Errorf(codes.InvalidArgument, "missing required container_id") + } + if req.Tty && req.Stderr { + // If TTY is set, stderr cannot be true because multiplexing is not + // supported. + return grpc.Errorf(codes.InvalidArgument, "tty and stderr cannot both be true") + } + if !req.Stdin && !req.Stdout && !req.Stderr { + return grpc.Errorf(codes.InvalidArgument, "one of stdin, stdout, and stderr must be set") + } + return nil +} + +func (s *server) GetAttach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) { + if err := validateAttachRequest(req); err != nil { + return nil, err } token, err := s.cache.Insert(req) if err != nil { @@ -239,8 +269,8 @@ func (s *server) serveExec(req *restful.Request, resp *restful.Response) { streamOpts := &remotecommandserver.Options{ Stdin: exec.Stdin, - Stdout: true, - Stderr: !exec.Tty, + Stdout: exec.Stdout, + Stderr: exec.Stderr, TTY: exec.Tty, } @@ -273,8 +303,8 @@ func (s *server) serveAttach(req *restful.Request, resp *restful.Response) { streamOpts := &remotecommandserver.Options{ Stdin: attach.Stdin, - Stdout: true, - Stderr: !attach.Tty, + Stdout: attach.Stdout, + Stderr: attach.Stderr, TTY: attach.Tty, } remotecommandserver.ServeAttach( @@ -332,11 +362,11 @@ var _ remotecommandserver.Attacher = &criAdapter{} var _ portforward.PortForwarder = &criAdapter{} func (a *criAdapter) ExecInContainer(podName string, podUID types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error { - return a.Exec(container, cmd, in, out, err, tty, resize) + return a.Runtime.Exec(container, cmd, in, out, err, tty, resize) } func (a *criAdapter) AttachContainer(podName string, podUID types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { - return a.Attach(container, in, out, err, tty, resize) + return a.Runtime.Attach(container, in, out, err, tty, resize) } func (a *criAdapter) PortForward(podName string, podUID types.UID, port int32, stream io.ReadWriteCloser) error { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/stats/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/stats/BUILD index 2d027fc31a09..07732d9ad092 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/stats/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/stats/BUILD @@ -8,12 +8,14 @@ go_library( "helper.go", "stats_provider.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/stats", visibility = ["//visibility:public"], deps = [ "//pkg/kubelet/apis/cri:go_default_library", "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", "//pkg/kubelet/cadvisor:go_default_library", + "//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/leaky:go_default_library", "//pkg/kubelet/network:go_default_library", @@ -21,6 +23,7 @@ go_library( "//pkg/kubelet/server/stats:go_default_library", "//pkg/kubelet/types:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/golang/protobuf/proto:go_default_library", "//vendor/github.com/google/cadvisor/fs:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", @@ -51,6 +54,7 @@ go_test( "helper_test.go", "stats_provider_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/stats", library = ":go_default_library", deps = [ "//pkg/kubelet/apis/cri/testing:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cadvisor_stats_provider.go b/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cadvisor_stats_provider.go index 27538240fdd0..17a10df2f6af 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cadvisor_stats_provider.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cadvisor_stats_provider.go @@ -18,6 +18,7 @@ package stats import ( "fmt" + "path" "sort" "strings" @@ -28,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/types" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/cadvisor" + "k8s.io/kubernetes/pkg/kubelet/cm" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/leaky" "k8s.io/kubernetes/pkg/kubelet/server/stats" @@ -74,24 +76,13 @@ func (p *cadvisorStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { if err != nil { return nil, fmt.Errorf("failed to get imageFs info: %v", err) } - - infos, err := p.cadvisor.ContainerInfoV2("/", cadvisorapiv2.RequestOptions{ - IdType: cadvisorapiv2.TypeName, - Count: 2, // 2 samples are needed to compute "instantaneous" CPU - Recursive: true, - }) + infos, err := getCadvisorContainerInfo(p.cadvisor) if err != nil { - if _, ok := infos["/"]; ok { - // If the failure is partial, log it and return a best-effort - // response. - glog.Errorf("Partial failure issuing cadvisor.ContainerInfoV2: %v", err) - } else { - return nil, fmt.Errorf("failed to get root cgroup stats: %v", err) - } + return nil, fmt.Errorf("failed to get container info from cadvisor: %v", err) } - + // removeTerminatedContainerInfo will also remove pod level cgroups, so save the infos into allInfos first + allInfos := infos infos = removeTerminatedContainerInfo(infos) - // Map each container to a pod and update the PodStats with container data. podToStats := map[statsapi.PodReference]*statsapi.PodStats{} for key, cinfo := range infos { @@ -106,7 +97,7 @@ func (p *cadvisorStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { if !isPodManagedContainer(&cinfo) { continue } - ref := buildPodRef(&cinfo) + ref := buildPodRef(cinfo.Spec.Labels) // Lookup the PodStats for the pod using the PodRef. If none exists, // initialize a new entry. @@ -134,8 +125,19 @@ func (p *cadvisorStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { for _, podStats := range podToStats { // Lookup the volume stats for each pod. podUID := types.UID(podStats.PodRef.UID) + var ephemeralStats []statsapi.VolumeStats if vstats, found := p.resourceAnalyzer.GetPodVolumeStats(podUID); found { - podStats.VolumeStats = vstats.Volumes + ephemeralStats = make([]statsapi.VolumeStats, len(vstats.EphemeralVolumes)) + copy(ephemeralStats, vstats.EphemeralVolumes) + podStats.VolumeStats = append(vstats.EphemeralVolumes, vstats.PersistentVolumes...) + } + podStats.EphemeralStorage = calcEphemeralStorage(podStats.Containers, ephemeralStats, &rootFsInfo) + // Lookup the pod-level cgroup's CPU and memory stats + podInfo := getcadvisorPodInfoFromPodUID(podUID, allInfos) + if podInfo != nil { + cpu, memory := cadvisorInfoToCPUandMemoryStats(podInfo) + podStats.CPU = cpu + podStats.Memory = memory } result = append(result, *podStats) } @@ -143,6 +145,53 @@ func (p *cadvisorStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { return result, nil } +func calcEphemeralStorage(containers []statsapi.ContainerStats, volumes []statsapi.VolumeStats, rootFsInfo *cadvisorapiv2.FsInfo) *statsapi.FsStats { + result := &statsapi.FsStats{ + Time: metav1.NewTime(rootFsInfo.Timestamp), + AvailableBytes: &rootFsInfo.Available, + CapacityBytes: &rootFsInfo.Capacity, + InodesFree: rootFsInfo.InodesFree, + Inodes: rootFsInfo.Inodes, + } + for _, container := range containers { + addContainerUsage(result, &container) + } + for _, volume := range volumes { + result.UsedBytes = addUsage(result.UsedBytes, volume.FsStats.UsedBytes) + result.InodesUsed = addUsage(result.InodesUsed, volume.InodesUsed) + result.Time = maxUpdateTime(&result.Time, &volume.FsStats.Time) + } + return result +} + +func addContainerUsage(stat *statsapi.FsStats, container *statsapi.ContainerStats) { + if rootFs := container.Rootfs; rootFs != nil { + stat.Time = maxUpdateTime(&stat.Time, &rootFs.Time) + stat.InodesUsed = addUsage(stat.InodesUsed, rootFs.InodesUsed) + stat.UsedBytes = addUsage(stat.UsedBytes, rootFs.UsedBytes) + if logs := container.Logs; logs != nil { + stat.UsedBytes = addUsage(stat.UsedBytes, logs.UsedBytes) + stat.Time = maxUpdateTime(&stat.Time, &logs.Time) + } + } +} + +func maxUpdateTime(first, second *metav1.Time) metav1.Time { + if first.Before(second) { + return *second + } + return *first +} +func addUsage(first, second *uint64) *uint64 { + if first == nil { + return second + } else if second == nil { + return first + } + total := *first + *second + return &total +} + // ImageFsStats returns the stats of the filesystem for storing images. func (p *cadvisorStatsProvider) ImageFsStats() (*statsapi.FsStats, error) { imageFsInfo, err := p.cadvisor.ImagesFsInfo() @@ -172,10 +221,10 @@ func (p *cadvisorStatsProvider) ImageFsStats() (*statsapi.FsStats, error) { } // buildPodRef returns a PodReference that identifies the Pod managing cinfo -func buildPodRef(cinfo *cadvisorapiv2.ContainerInfo) statsapi.PodReference { - podName := kubetypes.GetPodName(cinfo.Spec.Labels) - podNamespace := kubetypes.GetPodNamespace(cinfo.Spec.Labels) - podUID := kubetypes.GetPodUID(cinfo.Spec.Labels) +func buildPodRef(containerLabels map[string]string) statsapi.PodReference { + podName := kubetypes.GetPodName(containerLabels) + podNamespace := kubetypes.GetPodNamespace(containerLabels) + podUID := kubetypes.GetPodUID(containerLabels) return statsapi.PodReference{Name: podName, Namespace: podNamespace, UID: podUID} } @@ -192,6 +241,19 @@ func isPodManagedContainer(cinfo *cadvisorapiv2.ContainerInfo) bool { return managed } +// getcadvisorPodInfoFromPodUID returns a pod cgroup information by matching the podUID with its CgroupName identifier base name +func getcadvisorPodInfoFromPodUID(podUID types.UID, infos map[string]cadvisorapiv2.ContainerInfo) *cadvisorapiv2.ContainerInfo { + for key, info := range infos { + if cm.IsSystemdStyleName(key) { + key = cm.RevertFromSystemdToCgroupStyleName(key) + } + if cm.GetPodCgroupNameSuffix(podUID) == path.Base(key) { + return &info + } + } + return nil +} + // removeTerminatedContainerInfo returns the specified containerInfo but with // the stats of the terminated containers removed. // @@ -204,7 +266,7 @@ func removeTerminatedContainerInfo(containerInfo map[string]cadvisorapiv2.Contai continue } cinfoID := containerID{ - podRef: buildPodRef(&cinfo), + podRef: buildPodRef(cinfo.Spec.Labels), containerName: kubetypes.GetContainerName(cinfo.Spec.Labels), } cinfoMap[cinfoID] = append(cinfoMap[cinfoID], containerInfoWithCgroup{ @@ -279,3 +341,21 @@ func hasMemoryAndCPUInstUsage(info *cadvisorapiv2.ContainerInfo) bool { } return cstat.CpuInst.Usage.Total != 0 && cstat.Memory.RSS != 0 } + +func getCadvisorContainerInfo(ca cadvisor.Interface) (map[string]cadvisorapiv2.ContainerInfo, error) { + infos, err := ca.ContainerInfoV2("/", cadvisorapiv2.RequestOptions{ + IdType: cadvisorapiv2.TypeName, + Count: 2, // 2 samples are needed to compute "instantaneous" CPU + Recursive: true, + }) + if err != nil { + if _, ok := infos["/"]; ok { + // If the failure is partial, log it and return a best-effort + // response. + glog.Errorf("Partial failure issuing cadvisor.ContainerInfoV2: %v", err) + } else { + return nil, fmt.Errorf("failed to get root cgroup stats: %v", err) + } + } + return infos, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go b/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go index f855e0db41af..5934c2962bfd 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider.go @@ -18,9 +18,13 @@ package stats import ( "fmt" + "path" + "sort" + "strings" "time" "github.com/golang/glog" + "github.com/golang/protobuf/proto" cadvisorfs "github.com/google/cadvisor/fs" cadvisorapiv2 "github.com/google/cadvisor/info/v2" @@ -31,6 +35,7 @@ import ( statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/server/stats" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) // criStatsProvider implements the containerStatsProvider interface by getting @@ -74,15 +79,10 @@ func (p *criStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { return nil, fmt.Errorf("failed to get rootFs info: %v", err) } - // Creates container map. - containerMap := make(map[string]*runtimeapi.Container) containers, err := p.runtimeService.ListContainers(&runtimeapi.ContainerFilter{}) if err != nil { return nil, fmt.Errorf("failed to list all containers: %v", err) } - for _, c := range containers { - containerMap[c.Id] = c - } // Creates pod sandbox map. podSandboxMap := make(map[string]*runtimeapi.PodSandbox) @@ -106,6 +106,19 @@ func (p *criStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { if err != nil { return nil, fmt.Errorf("failed to list all container stats: %v", err) } + + containers = removeTerminatedContainer(containers) + // Creates container map. + containerMap := make(map[string]*runtimeapi.Container) + for _, c := range containers { + containerMap[c.Id] = c + } + + caInfos, err := getCRICadvisorStats(p.cadvisor) + if err != nil { + return nil, fmt.Errorf("failed to get container info from cadvisor: %v", err) + } + for _, stats := range resp { containerID := stats.Attributes.Id container, found := containerMap[containerID] @@ -125,14 +138,31 @@ func (p *criStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { // container belongs to. ps, found := sandboxIDToPodStats[podSandboxID] if !found { - ps = p.makePodStats(podSandbox) + ps = buildPodStats(podSandbox) + // Fill stats from cadvisor is available for full set of required pod stats + caPodSandbox, found := caInfos[podSandboxID] + if !found { + glog.V(4).Info("Unable to find cadvisor stats for sandbox %q", podSandboxID) + } else { + p.addCadvisorPodStats(ps, &caPodSandbox) + } sandboxIDToPodStats[podSandboxID] = ps } - ps.Containers = append(ps.Containers, *p.makeContainerStats(stats, container, &rootFsInfo, uuidToFsInfo)) + cs := p.makeContainerStats(stats, container, &rootFsInfo, uuidToFsInfo) + // If cadvisor stats is available for the container, use it to populate + // container stats + caStats, caFound := caInfos[containerID] + if !caFound { + glog.V(4).Info("Unable to find cadvisor stats for %q", containerID) + } else { + p.addCadvisorContainerStats(cs, &caStats) + } + ps.Containers = append(ps.Containers, *cs) } result := make([]statsapi.PodStats, 0, len(sandboxIDToPodStats)) for _, s := range sandboxIDToPodStats { + p.makePodStorageStats(s, &rootFsInfo) result = append(result, *s) } return result, nil @@ -193,8 +223,9 @@ func (p *criStatsProvider) getFsInfo(storageID *runtimeapi.StorageIdentifier) *c return &fsInfo } -func (p *criStatsProvider) makePodStats(podSandbox *runtimeapi.PodSandbox) *statsapi.PodStats { - s := &statsapi.PodStats{ +// buildPodStats returns a PodStats that identifies the Pod managing cinfo +func buildPodStats(podSandbox *runtimeapi.PodSandbox) *statsapi.PodStats { + return &statsapi.PodStats{ PodRef: statsapi.PodReference{ Name: podSandbox.Metadata.Name, UID: podSandbox.Metadata.Uid, @@ -202,15 +233,27 @@ func (p *criStatsProvider) makePodStats(podSandbox *runtimeapi.PodSandbox) *stat }, // The StartTime in the summary API is the pod creation time. StartTime: metav1.NewTime(time.Unix(0, podSandbox.CreatedAt)), - // Network stats are not supported by CRI. } +} + +func (p *criStatsProvider) makePodStorageStats(s *statsapi.PodStats, rootFsInfo *cadvisorapiv2.FsInfo) *statsapi.PodStats { podUID := types.UID(s.PodRef.UID) if vstats, found := p.resourceAnalyzer.GetPodVolumeStats(podUID); found { - s.VolumeStats = vstats.Volumes + ephemeralStats := make([]statsapi.VolumeStats, len(vstats.EphemeralVolumes)) + copy(ephemeralStats, vstats.EphemeralVolumes) + s.VolumeStats = append(vstats.EphemeralVolumes, vstats.PersistentVolumes...) + s.EphemeralStorage = calcEphemeralStorage(s.Containers, ephemeralStats, rootFsInfo) } return s } +func (p *criStatsProvider) addCadvisorPodStats( + ps *statsapi.PodStats, + caPodSandbox *cadvisorapiv2.ContainerInfo, +) { + ps.Network = cadvisorInfoToNetworkStats(ps.PodRef.Name, caPodSandbox) +} + func (p *criStatsProvider) makeContainerStats( stats *runtimeapi.ContainerStats, container *runtimeapi.Container, @@ -221,9 +264,15 @@ func (p *criStatsProvider) makeContainerStats( Name: stats.Attributes.Metadata.Name, // The StartTime in the summary API is the container creation time. StartTime: metav1.NewTime(time.Unix(0, container.CreatedAt)), - CPU: &statsapi.CPUStats{}, - Memory: &statsapi.MemoryStats{}, - Rootfs: &statsapi.FsStats{}, + // Work around heapster bug. https://github.com/kubernetes/kubernetes/issues/54962 + // TODO(random-liu): Remove this after heapster is updated to newer than 1.5.0-beta.0. + CPU: &statsapi.CPUStats{ + UsageNanoCores: proto.Uint64(0), + }, + Memory: &statsapi.MemoryStats{ + RSSBytes: proto.Uint64(0), + }, + Rootfs: &statsapi.FsStats{}, Logs: &statsapi.FsStats{ Time: metav1.NewTime(rootFsInfo.Timestamp), AvailableBytes: &rootFsInfo.Available, @@ -279,3 +328,80 @@ func (p *criStatsProvider) makeContainerStats( return result } + +// removeTerminatedContainer returns the specified container but with +// the stats of the terminated containers removed. +func removeTerminatedContainer(containers []*runtimeapi.Container) []*runtimeapi.Container { + containerMap := make(map[containerID][]*runtimeapi.Container) + // Sort order by create time + sort.Slice(containers, func(i, j int) bool { + return containers[i].CreatedAt < containers[j].CreatedAt + }) + for _, container := range containers { + refID := containerID{ + podRef: buildPodRef(container.Labels), + containerName: kubetypes.GetContainerName(container.Labels), + } + containerMap[refID] = append(containerMap[refID], container) + } + + result := make([]*runtimeapi.Container, 0) + for _, refs := range containerMap { + if len(refs) == 1 { + result = append(result, refs[0]) + continue + } + found := false + for i := 0; i < len(refs); i++ { + if refs[i].State == runtimeapi.ContainerState_CONTAINER_RUNNING { + found = true + result = append(result, refs[i]) + } + } + if !found { + result = append(result, refs[len(refs)-1]) + } + } + return result +} + +func (p *criStatsProvider) addCadvisorContainerStats( + cs *statsapi.ContainerStats, + caPodStats *cadvisorapiv2.ContainerInfo, +) { + if caPodStats.Spec.HasCustomMetrics { + cs.UserDefinedMetrics = cadvisorInfoToUserDefinedMetrics(caPodStats) + } + + cpu, memory := cadvisorInfoToCPUandMemoryStats(caPodStats) + if cpu != nil { + cs.CPU = cpu + } + if memory != nil { + cs.Memory = memory + } +} + +func getCRICadvisorStats(ca cadvisor.Interface) (map[string]cadvisorapiv2.ContainerInfo, error) { + stats := make(map[string]cadvisorapiv2.ContainerInfo) + infos, err := getCadvisorContainerInfo(ca) + if err != nil { + return nil, fmt.Errorf("failed to fetch cadvisor stats: %v", err) + } + infos = removeTerminatedContainerInfo(infos) + for key, info := range infos { + // On systemd using devicemapper each mount into the container has an + // associated cgroup. We ignore them to ensure we do not get duplicate + // entries in our summary. For details on .mount units: + // http://man7.org/linux/man-pages/man5/systemd.mount.5.html + if strings.HasSuffix(key, ".mount") { + continue + } + // Build the Pod key if this container is managed by a Pod + if !isPodManagedContainer(&info) { + continue + } + stats[path.Base(key)] = info + } + return stats, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/stats/helper.go b/vendor/k8s.io/kubernetes/pkg/kubelet/stats/helper.go index 091a0d51da1d..6856a8c76da4 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/stats/helper.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/stats/helper.go @@ -30,21 +30,15 @@ import ( "k8s.io/kubernetes/pkg/kubelet/network" ) -// cadvisorInfoToContainerStats returns the statsapi.ContainerStats converted -// from the container and filesystem info. -func cadvisorInfoToContainerStats(name string, info *cadvisorapiv2.ContainerInfo, rootFs, imageFs *cadvisorapiv2.FsInfo) *statsapi.ContainerStats { - result := &statsapi.ContainerStats{ - StartTime: metav1.NewTime(info.Spec.CreationTime), - Name: name, - } - +func cadvisorInfoToCPUandMemoryStats(info *cadvisorapiv2.ContainerInfo) (*statsapi.CPUStats, *statsapi.MemoryStats) { cstat, found := latestContainerStats(info) if !found { - return result + return nil, nil } - + var cpuStats *statsapi.CPUStats + var memoryStats *statsapi.MemoryStats if info.Spec.HasCpu { - cpuStats := statsapi.CPUStats{ + cpuStats = &statsapi.CPUStats{ Time: metav1.NewTime(cstat.Timestamp), } if cstat.CpuInst != nil { @@ -53,13 +47,11 @@ func cadvisorInfoToContainerStats(name string, info *cadvisorapiv2.ContainerInfo if cstat.Cpu != nil { cpuStats.UsageCoreNanoSeconds = &cstat.Cpu.Usage.Total } - result.CPU = &cpuStats } - if info.Spec.HasMemory { pageFaults := cstat.Memory.ContainerData.Pgfault majorPageFaults := cstat.Memory.ContainerData.Pgmajfault - result.Memory = &statsapi.MemoryStats{ + memoryStats = &statsapi.MemoryStats{ Time: metav1.NewTime(cstat.Timestamp), UsageBytes: &cstat.Memory.Usage, WorkingSetBytes: &cstat.Memory.WorkingSet, @@ -70,50 +62,69 @@ func cadvisorInfoToContainerStats(name string, info *cadvisorapiv2.ContainerInfo // availableBytes = memory limit (if known) - workingset if !isMemoryUnlimited(info.Spec.Memory.Limit) { availableBytes := info.Spec.Memory.Limit - cstat.Memory.WorkingSet - result.Memory.AvailableBytes = &availableBytes + memoryStats.AvailableBytes = &availableBytes } } + return cpuStats, memoryStats +} - // The container logs live on the node rootfs device - result.Logs = &statsapi.FsStats{ - Time: metav1.NewTime(cstat.Timestamp), - AvailableBytes: &rootFs.Available, - CapacityBytes: &rootFs.Capacity, - InodesFree: rootFs.InodesFree, - Inodes: rootFs.Inodes, +// cadvisorInfoToContainerStats returns the statsapi.ContainerStats converted +// from the container and filesystem info. +func cadvisorInfoToContainerStats(name string, info *cadvisorapiv2.ContainerInfo, rootFs, imageFs *cadvisorapiv2.FsInfo) *statsapi.ContainerStats { + result := &statsapi.ContainerStats{ + StartTime: metav1.NewTime(info.Spec.CreationTime), + Name: name, + } + cstat, found := latestContainerStats(info) + if !found { + return result } - if rootFs.Inodes != nil && rootFs.InodesFree != nil { - logsInodesUsed := *rootFs.Inodes - *rootFs.InodesFree - result.Logs.InodesUsed = &logsInodesUsed + cpu, memory := cadvisorInfoToCPUandMemoryStats(info) + result.CPU = cpu + result.Memory = memory + + if rootFs != nil { + // The container logs live on the node rootfs device + result.Logs = buildLogsStats(cstat, rootFs) } - // The container rootFs lives on the imageFs devices (which may not be the node root fs) - result.Rootfs = &statsapi.FsStats{ - Time: metav1.NewTime(cstat.Timestamp), - AvailableBytes: &imageFs.Available, - CapacityBytes: &imageFs.Capacity, - InodesFree: imageFs.InodesFree, - Inodes: imageFs.Inodes, + if imageFs != nil { + // The container rootFs lives on the imageFs devices (which may not be the node root fs) + result.Rootfs = buildRootfsStats(cstat, imageFs) } cfs := cstat.Filesystem if cfs != nil { if cfs.BaseUsageBytes != nil { - rootfsUsage := *cfs.BaseUsageBytes - result.Rootfs.UsedBytes = &rootfsUsage - if cfs.TotalUsageBytes != nil { + if result.Rootfs != nil { + rootfsUsage := *cfs.BaseUsageBytes + result.Rootfs.UsedBytes = &rootfsUsage + } + if cfs.TotalUsageBytes != nil && result.Logs != nil { logsUsage := *cfs.TotalUsageBytes - *cfs.BaseUsageBytes result.Logs.UsedBytes = &logsUsage } } - if cfs.InodeUsage != nil { + if cfs.InodeUsage != nil && result.Rootfs != nil { rootInodes := *cfs.InodeUsage result.Rootfs.InodesUsed = &rootInodes } } + for _, acc := range cstat.Accelerators { + result.Accelerators = append(result.Accelerators, statsapi.AcceleratorStats{ + Make: acc.Make, + Model: acc.Model, + ID: acc.ID, + MemoryTotal: acc.MemoryTotal, + MemoryUsed: acc.MemoryUsed, + DutyCycle: acc.DutyCycle, + }) + } + result.UserDefinedMetrics = cadvisorInfoToUserDefinedMetrics(info) + return result } @@ -127,19 +138,29 @@ func cadvisorInfoToNetworkStats(name string, info *cadvisorapiv2.ContainerInfo) if !found { return nil } - for _, inter := range cstat.Network.Interfaces { + + iStats := statsapi.NetworkStats{ + Time: metav1.NewTime(cstat.Timestamp), + } + + for i := range cstat.Network.Interfaces { + inter := cstat.Network.Interfaces[i] + iStat := statsapi.InterfaceStats{ + Name: inter.Name, + RxBytes: &inter.RxBytes, + RxErrors: &inter.RxErrors, + TxBytes: &inter.TxBytes, + TxErrors: &inter.TxErrors, + } + if inter.Name == network.DefaultInterfaceName { - return &statsapi.NetworkStats{ - Time: metav1.NewTime(cstat.Timestamp), - RxBytes: &inter.RxBytes, - RxErrors: &inter.RxErrors, - TxBytes: &inter.TxBytes, - TxErrors: &inter.TxErrors, - } + iStats.InterfaceStats = iStat } + + iStats.Interfaces = append(iStats.Interfaces, iStat) } - glog.V(4).Infof("Missing default interface %q for %s", network.DefaultInterfaceName, name) - return nil + + return &iStats } // cadvisorInfoToUserDefinedMetrics returns the statsapi.UserDefinedMetric @@ -246,3 +267,29 @@ func getCgroupStats(cadvisor cadvisor.Interface, containerName string) (*cadviso } return stats, nil } + +func buildLogsStats(cstat *cadvisorapiv2.ContainerStats, rootFs *cadvisorapiv2.FsInfo) *statsapi.FsStats { + fsStats := &statsapi.FsStats{ + Time: metav1.NewTime(cstat.Timestamp), + AvailableBytes: &rootFs.Available, + CapacityBytes: &rootFs.Capacity, + InodesFree: rootFs.InodesFree, + Inodes: rootFs.Inodes, + } + + if rootFs.Inodes != nil && rootFs.InodesFree != nil { + logsInodesUsed := *rootFs.Inodes - *rootFs.InodesFree + fsStats.InodesUsed = &logsInodesUsed + } + return fsStats +} + +func buildRootfsStats(cstat *cadvisorapiv2.ContainerStats, imageFs *cadvisorapiv2.FsInfo) *statsapi.FsStats { + return &statsapi.FsStats{ + Time: metav1.NewTime(cstat.Timestamp), + AvailableBytes: &imageFs.Available, + CapacityBytes: &imageFs.Capacity, + InodesFree: imageFs.InodesFree, + Inodes: imageFs.Inodes, + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/stats/stats_provider.go b/vendor/k8s.io/kubernetes/pkg/kubelet/stats/stats_provider.go index af7e9d418bfb..b61a41079402 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/stats/stats_provider.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/stats/stats_provider.go @@ -86,21 +86,15 @@ type containerStatsProvider interface { ImageFsStats() (*statsapi.FsStats, error) } -// GetCgroupStats returns the stats of the cgroup with the cgroupName. +// GetCgroupStats returns the stats of the cgroup with the cgroupName. Note that +// this function doesn't generate filesystem stats. func (p *StatsProvider) GetCgroupStats(cgroupName string) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) { info, err := getCgroupInfo(p.cadvisor, cgroupName) if err != nil { return nil, nil, fmt.Errorf("failed to get cgroup stats for %q: %v", cgroupName, err) } - rootFsInfo, err := p.cadvisor.RootFsInfo() - if err != nil { - return nil, nil, fmt.Errorf("failed to get rootFs info: %v", err) - } - imageFsInfo, err := p.cadvisor.ImagesFsInfo() - if err != nil { - return nil, nil, fmt.Errorf("failed to get imageFs info: %v", err) - } - s := cadvisorInfoToContainerStats(cgroupName, info, &rootFsInfo, &imageFsInfo) + // Rootfs and imagefs doesn't make sense for raw cgroup. + s := cadvisorInfoToContainerStats(cgroupName, info, nil, nil) n := cadvisorInfoToNetworkStats(cgroupName, info) return s, n, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/status/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/status/BUILD index 3064ee30df66..0483a6a3b6ae 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/status/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/status/BUILD @@ -12,6 +12,7 @@ go_library( "generate.go", "status_manager.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/status", deps = [ "//pkg/api/v1/pod:go_default_library", "//pkg/kubelet/container:go_default_library", @@ -27,7 +28,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", ], ) @@ -37,10 +37,11 @@ go_test( "generate_test.go", "status_manager_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/status", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", "//pkg/api/v1/pod:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/kubelet/configmap:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/pod:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go index 20c751038a25..d9a027d544ec 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go @@ -17,12 +17,12 @@ limitations under the License. package status import ( + "fmt" "sort" "sync" "time" clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" "github.com/golang/glog" "k8s.io/api/core/v1" @@ -163,10 +163,8 @@ func (m *manager) SetPodStatus(pod *v1.Pod, status v1.PodStatus) { m.podStatusesLock.Lock() defer m.podStatusesLock.Unlock() // Make sure we're caching a deep copy. - status, err := copyStatus(&status) - if err != nil { - return - } + status = *status.DeepCopy() + // Force a status update if deletion timestamp is set. This is necessary // because if the pod is in the non-running state, the pod worker still // needs to be able to trigger an update and/or deletion. @@ -205,10 +203,7 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai } // Make sure we're not updating the cached version. - status, err := copyStatus(&oldStatus.status) - if err != nil { - return - } + status := *oldStatus.status.DeepCopy() containerStatus, _, _ = findContainerStatus(&status, containerID.String()) containerStatus.Ready = ready @@ -256,10 +251,7 @@ func (m *manager) TerminatePod(pod *v1.Pod) { if cachedStatus, ok := m.podStatuses[pod.UID]; ok { oldStatus = &cachedStatus.status } - status, err := copyStatus(oldStatus) - if err != nil { - return - } + status := *oldStatus.DeepCopy() for i := range status.ContainerStatuses { status.ContainerStatuses[i].State = v1.ContainerState{ Terminated: &v1.ContainerStateTerminated{}, @@ -273,6 +265,32 @@ func (m *manager) TerminatePod(pod *v1.Pod) { m.updateStatusInternal(pod, status, true) } +// checkContainerStateTransition ensures that no container is trying to transition +// from a terminated to non-terminated state, which is illegal and indicates a +// logical error in the kubelet. +func checkContainerStateTransition(oldStatuses, newStatuses []v1.ContainerStatus, restartPolicy v1.RestartPolicy) error { + // If we should always restart, containers are allowed to leave the terminated state + if restartPolicy == v1.RestartPolicyAlways { + return nil + } + for _, oldStatus := range oldStatuses { + // Skip any container that wasn't terminated + if oldStatus.State.Terminated == nil { + continue + } + // Skip any container that failed but is allowed to restart + if oldStatus.State.Terminated.ExitCode != 0 && restartPolicy == v1.RestartPolicyOnFailure { + continue + } + for _, newStatus := range newStatuses { + if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil { + return fmt.Errorf("terminated container %v attempted illegal transition to non-terminated state", newStatus.Name) + } + } + } + return nil +} + // updateStatusInternal updates the internal status cache, and queues an update to the api server if // necessary. Returns whether an update was triggered. // This method IS NOT THREAD SAFE and must be called from a locked function. @@ -287,6 +305,16 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp oldStatus = pod.Status } + // Check for illegal state transition in containers + if err := checkContainerStateTransition(oldStatus.ContainerStatuses, status.ContainerStatuses, pod.Spec.RestartPolicy); err != nil { + glog.Errorf("Status update on pod %v/%v aborted: %v", pod.Namespace, pod.Name, err) + return false + } + if err := checkContainerStateTransition(oldStatus.InitContainerStatuses, status.InitContainerStatuses, pod.Spec.RestartPolicy); err != nil { + glog.Errorf("Status update on pod %v/%v aborted: %v", pod.Namespace, pod.Name, err) + return false + } + // Set ReadyCondition.LastTransitionTime. if _, readyCondition := podutil.GetPodCondition(&status, v1.PodReady); readyCondition != nil { // Need to set LastTransitionTime. @@ -420,7 +448,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { } // TODO: make me easier to express from client code - pod, err := m.kubeClient.Core().Pods(status.podNamespace).Get(status.podName, metav1.GetOptions{}) + pod, err := m.kubeClient.CoreV1().Pods(status.podNamespace).Get(status.podName, metav1.GetOptions{}) if errors.IsNotFound(err) { glog.V(3).Infof("Pod %q (%s) does not exist on the server", status.podName, uid) // If the Pod is deleted the status will be cleared in @@ -441,7 +469,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { } pod.Status = status.status // TODO: handle conflict as a retry, make that easier too. - newPod, err := m.kubeClient.Core().Pods(pod.Namespace).UpdateStatus(pod) + newPod, err := m.kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod) if err != nil { glog.Warningf("Failed to update status for pod %q: %v", format.Pod(pod), err) return @@ -456,7 +484,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { deleteOptions := metav1.NewDeleteOptions(0) // Use the pod UID as the precondition for deletion to prevent deleting a newly created pod with the same name and namespace. deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod.UID)) - err = m.kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, deleteOptions) + err = m.kubeClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, deleteOptions) if err != nil { glog.Warningf("Failed to delete status for pod %q: %v", format.Pod(pod), err) return @@ -513,13 +541,10 @@ func (m *manager) needsReconcile(uid types.UID, status v1.PodStatus) bool { pod = mirrorPod } - podStatus, err := copyStatus(&pod.Status) - if err != nil { - return false - } - normalizeStatus(pod, &podStatus) + podStatus := pod.Status.DeepCopy() + normalizeStatus(pod, podStatus) - if isStatusEqual(&podStatus, &status) { + if isStatusEqual(podStatus, &status) { // If the status from the source is the same with the cached status, // reconcile is not needed. Just return. return false @@ -586,13 +611,3 @@ func normalizeStatus(pod *v1.Pod, status *v1.PodStatus) *v1.PodStatus { kubetypes.SortInitContainerStatuses(pod, status.InitContainerStatuses) return status } - -func copyStatus(source *v1.PodStatus) (v1.PodStatus, error) { - clone, err := scheme.Scheme.DeepCopy(source) - if err != nil { - glog.Errorf("Failed to clone status %+v: %v", source, err) - return v1.PodStatus{}, err - } - status := *clone.(*v1.PodStatus) - return status, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/BUILD index 4a67fb103f95..7d7cc778dc73 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/BUILD @@ -13,9 +13,10 @@ go_library( "runtime.go", "whitelist.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/sysctl", deps = [ - "//pkg/api/v1/helper:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//pkg/apis/extensions/validation:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", @@ -29,6 +30,7 @@ go_test( "namespace_test.go", "whitelist_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/sysctl", library = ":go_default_library", deps = ["//vendor/k8s.io/api/core/v1:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/runtime.go b/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/runtime.go index 36d8e1db3ab8..cd8e34f7c58f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/runtime.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/runtime.go @@ -19,7 +19,7 @@ package sysctl import ( "fmt" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/lifecycle" ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/whitelist.go b/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/whitelist.go index d78c7b7d103f..9bb1086a5587 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/whitelist.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/sysctl/whitelist.go @@ -21,8 +21,8 @@ import ( "strings" "k8s.io/api/core/v1" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" - "k8s.io/kubernetes/pkg/api/validation" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" + "k8s.io/kubernetes/pkg/apis/core/validation" extvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation" "k8s.io/kubernetes/pkg/kubelet/lifecycle" ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/types/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/types/BUILD index 296097268d7a..515d1e8a096a 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/types/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/types/BUILD @@ -15,8 +15,9 @@ go_library( "pod_update.go", "types.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/types", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", @@ -30,6 +31,7 @@ go_test( "pod_update_test.go", "types_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/types", library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go b/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go index 2b95b3b18793..6bfc3dac63f8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go @@ -21,7 +21,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kubeapi "k8s.io/kubernetes/pkg/api" + kubeapi "k8s.io/kubernetes/pkg/apis/core" ) const ( @@ -49,6 +49,8 @@ const ( // Pods with the given ids have unexpected status in this source, // kubelet should reconcile status with this source RECONCILE + // Pods with the given ids have been restored from a checkpoint. + RESTORE // These constants identify the sources of pods // Updates from a file diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD index 740d2e782a6e..ad092614a9be 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["util_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/util", library = ":go_default_library", deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"], ) @@ -20,6 +21,9 @@ go_library( "util.go", "util_unsupported.go", ] + select({ + "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "util_unix.go", + ], "@io_bazel_rules_go//go/platform:linux_amd64": [ "util_unix.go", ], @@ -28,9 +32,14 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/kubelet/util", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ] + select({ + "@io_bazel_rules_go//go/platform:darwin_amd64": [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", + ], "@io_bazel_rules_go//go/platform:linux_amd64": [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", @@ -51,11 +60,11 @@ filegroup( srcs = [ ":package-srcs", "//pkg/kubelet/util/cache:all-srcs", - "//pkg/kubelet/util/csr:all-srcs", "//pkg/kubelet/util/format:all-srcs", "//pkg/kubelet/util/ioutils:all-srcs", "//pkg/kubelet/util/queue:all-srcs", "//pkg/kubelet/util/sliceutils:all-srcs", + "//pkg/kubelet/util/store:all-srcs", ], tags = ["automanaged"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/cache/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/util/cache/BUILD index fa2bdda5e3a7..47910609800c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/util/cache/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/cache/BUILD @@ -9,12 +9,14 @@ load( go_library( name = "go_default_library", srcs = ["object_cache.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/util/cache", deps = ["//vendor/k8s.io/client-go/tools/cache:go_default_library"], ) go_test( name = "go_default_test", srcs = ["object_cache_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/util/cache", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/csr/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/util/csr/BUILD deleted file mode 100644 index 41a9a00526cb..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/util/csr/BUILD +++ /dev/null @@ -1,52 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = ["csr.go"], - deps = [ - "//pkg/apis/certificates/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/k8s.io/client-go/util/cert:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = ["csr_test.go"], - library = ":go_default_library", - deps = [ - "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/util/cert:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/csr/csr.go b/vendor/k8s.io/kubernetes/pkg/kubelet/util/csr/csr.go deleted file mode 100644 index 316de1abe5f8..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/util/csr/csr.go +++ /dev/null @@ -1,207 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package csr - -import ( - "crypto" - "crypto/sha512" - "crypto/x509/pkix" - "encoding/base64" - "fmt" - "reflect" - "time" - - "github.com/golang/glog" - - certificates "k8s.io/api/certificates/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/watch" - certificatesclient "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" - "k8s.io/client-go/tools/cache" - certutil "k8s.io/client-go/util/cert" - certhelper "k8s.io/kubernetes/pkg/apis/certificates/v1beta1" -) - -// RequestNodeCertificate will create a certificate signing request for a node -// (Organization and CommonName for the CSR will be set as expected for node -// certificates) and send it to API server, then it will watch the object's -// status, once approved by API server, it will return the API server's issued -// certificate (pem-encoded). If there is any errors, or the watch timeouts, it -// will return an error. This is intended for use on nodes (kubelet and -// kubeadm). -func RequestNodeCertificate(client certificatesclient.CertificateSigningRequestInterface, privateKeyData []byte, nodeName types.NodeName) (certData []byte, err error) { - subject := &pkix.Name{ - Organization: []string{"system:nodes"}, - CommonName: "system:node:" + string(nodeName), - } - - privateKey, err := certutil.ParsePrivateKeyPEM(privateKeyData) - if err != nil { - return nil, fmt.Errorf("invalid private key for certificate request: %v", err) - } - csrData, err := certutil.MakeCSR(privateKey, subject, nil, nil) - if err != nil { - return nil, fmt.Errorf("unable to generate certificate request: %v", err) - } - - usages := []certificates.KeyUsage{ - certificates.UsageDigitalSignature, - certificates.UsageKeyEncipherment, - certificates.UsageClientAuth, - } - name := digestedName(privateKeyData, subject, usages) - return requestCertificate(client, csrData, name, usages, privateKey) -} - -// requestCertificate will either use an existing (if this process has run -// before but not to completion) or create a certificate signing request using the -// PEM encoded CSR and send it to API server, then it will watch the object's -// status, once approved by API server, it will return the API server's issued -// certificate (pem-encoded). If there is any errors, or the watch timeouts, it -// will return an error. -func requestCertificate(client certificatesclient.CertificateSigningRequestInterface, csrData []byte, name string, usages []certificates.KeyUsage, privateKey interface{}) (certData []byte, err error) { - csr := &certificates.CertificateSigningRequest{ - // Username, UID, Groups will be injected by API server. - TypeMeta: metav1.TypeMeta{Kind: "CertificateSigningRequest"}, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: certificates.CertificateSigningRequestSpec{ - Request: csrData, - Usages: usages, - }, - } - - req, err := client.Create(csr) - switch { - case err == nil: - case errors.IsAlreadyExists(err): - glog.Infof("csr for this node already exists, reusing") - req, err = client.Get(name, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("cannot retrieve certificate signing request: %v", err) - } - if err := ensureCompatible(req, csr, privateKey); err != nil { - return nil, fmt.Errorf("retrieved csr is not compatible: %v", err) - } - glog.Infof("csr for this node is still valid") - default: - return nil, fmt.Errorf("cannot create certificate signing request: %v", err) - } - - fieldSelector := fields.OneTermEqualSelector("metadata.name", req.Name).String() - - event, err := cache.ListWatchUntil( - 3600*time.Second, - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = fieldSelector - return client.List(options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = fieldSelector - return client.Watch(options) - }, - }, - func(event watch.Event) (bool, error) { - switch event.Type { - case watch.Modified, watch.Added: - default: - return false, nil - } - csr := event.Object.(*certificates.CertificateSigningRequest) - if csr.UID != req.UID { - return false, fmt.Errorf("csr %q changed UIDs", csr.Name) - } - for _, c := range csr.Status.Conditions { - if c.Type == certificates.CertificateDenied { - return false, fmt.Errorf("certificate signing request is not approved, reason: %v, message: %v", c.Reason, c.Message) - } - if c.Type == certificates.CertificateApproved && csr.Status.Certificate != nil { - return true, nil - } - } - return false, nil - }, - ) - if err != nil { - return nil, fmt.Errorf("cannot watch on the certificate signing request: %v", err) - } - - return event.Object.(*certificates.CertificateSigningRequest).Status.Certificate, nil - -} - -// This digest should include all the relevant pieces of the CSR we care about. -// We can't direcly hash the serialized CSR because of random padding that we -// regenerate every loop and we include usages which are not contained in the -// CSR. This needs to be kept up to date as we add new fields to the node -// certificates and with ensureCompatible. -func digestedName(privateKeyData []byte, subject *pkix.Name, usages []certificates.KeyUsage) string { - hash := sha512.New512_256() - - // Here we make sure two different inputs can't write the same stream - // to the hash. This delimiter is not in the base64.URLEncoding - // alphabet so there is no way to have spill over collisions. Without - // it 'CN:foo,ORG:bar' hashes to the same value as 'CN:foob,ORG:ar' - const delimiter = '|' - encode := base64.RawURLEncoding.EncodeToString - - write := func(data []byte) { - hash.Write([]byte(encode(data))) - hash.Write([]byte{delimiter}) - } - - write(privateKeyData) - write([]byte(subject.CommonName)) - for _, v := range subject.Organization { - write([]byte(v)) - } - for _, v := range usages { - write([]byte(v)) - } - - return "node-csr-" + encode(hash.Sum(nil)) -} - -// ensureCompatible ensures that a CSR object is compatible with an original CSR -func ensureCompatible(new, orig *certificates.CertificateSigningRequest, privateKey interface{}) error { - newCsr, err := certhelper.ParseCSR(new) - if err != nil { - return fmt.Errorf("unable to parse new csr: %v", err) - } - origCsr, err := certhelper.ParseCSR(orig) - if err != nil { - return fmt.Errorf("unable to parse original csr: %v", err) - } - if !reflect.DeepEqual(newCsr.Subject, origCsr.Subject) { - return fmt.Errorf("csr subjects differ: new: %#v, orig: %#v", newCsr.Subject, origCsr.Subject) - } - signer, ok := privateKey.(crypto.Signer) - if !ok { - return fmt.Errorf("privateKey is not a signer") - } - newCsr.PublicKey = signer.Public() - if err := newCsr.CheckSignature(); err != nil { - return fmt.Errorf("error validating signature new CSR against old key: %v", err) - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/format/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/util/format/BUILD index a560f2cfe34e..887330b96aa7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/util/format/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/format/BUILD @@ -12,6 +12,7 @@ go_library( "pod.go", "resources.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/util/format", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", @@ -21,6 +22,7 @@ go_library( go_test( name = "go_default_test", srcs = ["resources_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/util/format", library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/BUILD index 52deb7510607..e84deec931ec 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["ioutils.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/util/ioutils", ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/queue/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/util/queue/BUILD index 137940238c1e..0b43effc87ac 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/util/queue/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/queue/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["work_queue.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/util/queue", deps = [ "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", @@ -18,6 +19,7 @@ go_library( go_test( name = "go_default_test", srcs = ["work_queue_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/util/queue", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/sliceutils/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/util/sliceutils/BUILD index 1d6438571e9c..4134706c80ed 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/util/sliceutils/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/sliceutils/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["sliceutils.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/util/sliceutils", deps = [ "//pkg/kubelet/container:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -31,6 +32,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["sliceutils_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/util/sliceutils", library = ":go_default_library", deps = [ "//pkg/kubelet/container:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/BUILD new file mode 100644 index 000000000000..66640ca19751 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/BUILD @@ -0,0 +1,42 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "filestore.go", + "store.go", + ], + importpath = "k8s.io/kubernetes/pkg/kubelet/util/store", + visibility = ["//visibility:public"], + deps = ["//pkg/util/filesystem:go_default_library"], +) + +go_test( + name = "go_default_test", + srcs = [ + "filestore_test.go", + "store_test.go", + ], + importpath = "k8s.io/kubernetes/pkg/kubelet/util/store", + library = ":go_default_library", + deps = [ + "//pkg/util/filesystem:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/doc.go b/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/doc.go new file mode 100644 index 000000000000..04f0129b2931 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package store hosts a Store interface and its implementations. +package store diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/filestore.go b/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/filestore.go new file mode 100644 index 000000000000..d22226637baa --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/filestore.go @@ -0,0 +1,167 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + utilfs "k8s.io/kubernetes/pkg/util/filesystem" +) + +const ( + // Name prefix for the temporary files. + tmpPrefix = "." +) + +// FileStore is an implementation of the Store interface which stores data in files. +type FileStore struct { + // Absolute path to the base directory for storing data files. + directoryPath string + + // filesystem to use. + filesystem utilfs.Filesystem +} + +// NewFileStore returns an instance of FileStore. +func NewFileStore(path string, fs utilfs.Filesystem) (Store, error) { + if err := ensureDirectory(fs, path); err != nil { + return nil, err + } + return &FileStore{directoryPath: path, filesystem: fs}, nil +} + +// Write writes the given data to a file named key. +func (f *FileStore) Write(key string, data []byte) error { + if err := ValidateKey(key); err != nil { + return err + } + if err := ensureDirectory(f.filesystem, f.directoryPath); err != nil { + return err + } + + return writeFile(f.filesystem, f.getPathByKey(key), data) +} + +// Read reads the data from the file named key. +func (f *FileStore) Read(key string) ([]byte, error) { + if err := ValidateKey(key); err != nil { + return nil, err + } + bytes, err := f.filesystem.ReadFile(f.getPathByKey(key)) + if os.IsNotExist(err) { + return bytes, ErrKeyNotFound + } + return bytes, err +} + +// Delete deletes the key file. +func (f *FileStore) Delete(key string) error { + if err := ValidateKey(key); err != nil { + return err + } + return removePath(f.filesystem, f.getPathByKey(key)) +} + +// List returns all keys in the store. +func (f *FileStore) List() ([]string, error) { + keys := make([]string, 0) + files, err := f.filesystem.ReadDir(f.directoryPath) + if err != nil { + return keys, err + } + for _, f := range files { + if !strings.HasPrefix(f.Name(), tmpPrefix) { + keys = append(keys, f.Name()) + } + } + return keys, nil +} + +// getPathByKey returns the full path of the file for the key. +func (f *FileStore) getPathByKey(key string) string { + return filepath.Join(f.directoryPath, key) +} + +// ensureDirectory creates the directory if it does not exist. +func ensureDirectory(fs utilfs.Filesystem, path string) error { + if _, err := fs.Stat(path); err != nil { + // MkdirAll returns nil if directory already exists. + return fs.MkdirAll(path, 0755) + } + return nil +} + +// writeFile writes data to path in a single transaction. +func writeFile(fs utilfs.Filesystem, path string, data []byte) (retErr error) { + // Create a temporary file in the base directory of `path` with a prefix. + tmpFile, err := fs.TempFile(filepath.Dir(path), tmpPrefix) + if err != nil { + return err + } + + tmpPath := tmpFile.Name() + shouldClose := true + + defer func() { + // Close the file. + if shouldClose { + if err := tmpFile.Close(); err != nil { + if retErr == nil { + retErr = fmt.Errorf("close error: %v", err) + } else { + retErr = fmt.Errorf("failed to close temp file after error %v; close error: %v", retErr, err) + } + } + } + + // Clean up the temp file on error. + if retErr != nil && tmpPath != "" { + if err := removePath(fs, tmpPath); err != nil { + retErr = fmt.Errorf("failed to remove the temporary file (%q) after error %v; remove error: %v", tmpPath, retErr, err) + } + } + }() + + // Write data. + if _, err := tmpFile.Write(data); err != nil { + return err + } + + // Sync file. + if err := tmpFile.Sync(); err != nil { + return err + } + + // Closing the file before renaming. + err = tmpFile.Close() + shouldClose = false + if err != nil { + return err + } + + return fs.Rename(tmpPath, path) +} + +func removePath(fs utilfs.Filesystem, path string) error { + if err := fs.Remove(path); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/store.go b/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/store.go new file mode 100644 index 000000000000..e797fd812b47 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/store/store.go @@ -0,0 +1,64 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store + +import ( + "fmt" + "regexp" +) + +const ( + keyMaxLength = 250 + + keyCharFmt string = "[A-Za-z0-9]" + keyExtCharFmt string = "[-A-Za-z0-9_.]" + qualifiedKeyFmt string = "(" + keyCharFmt + keyExtCharFmt + "*)?" + keyCharFmt +) + +var ( + // Key must consist of alphanumeric characters, '-', '_' or '.', and must start + // and end with an alphanumeric character. + keyRegex = regexp.MustCompile("^" + qualifiedKeyFmt + "$") + + // ErrKeyNotFound is the error returned if key is not found in Store. + ErrKeyNotFound = fmt.Errorf("key is not found") +) + +// Store provides the interface for storing keyed data. +// Store must be thread-safe +type Store interface { + // key must contain one or more characters in [A-Za-z0-9] + // Write writes data with key. + Write(key string, data []byte) error + // Read retrieves data with key + // Read must return ErrKeyNotFound if key is not found. + Read(key string) ([]byte, error) + // Delete deletes data by key + // Delete must not return error if key does not exist + Delete(key string) error + // List lists all existing keys. + List() ([]string, error) +} + +// ValidateKey returns an error if the given key does not meet the requirement +// of the key format and length. +func ValidateKey(key string) error { + if len(key) <= keyMaxLength && keyRegex.MatchString(key) { + return nil + } + return fmt.Errorf("invalid key: %q", key) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unix.go b/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unix.go index c0c707599740..0b8fea9c84ac 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unix.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unix.go @@ -1,4 +1,4 @@ -// +build freebsd linux +// +build freebsd linux darwin /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go b/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go index ffc4d642d70a..5fd2e9c6896e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go @@ -1,4 +1,4 @@ -// +build !freebsd,!linux,!windows +// +build !freebsd,!linux,!windows,!darwin /* Copyright 2017 The Kubernetes Authors. diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volume_host.go b/vendor/k8s.io/kubernetes/pkg/kubelet/volume_host.go index 26323b7d8488..ae7847bc6fb5 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volume_host.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volume_host.go @@ -20,11 +20,17 @@ import ( "fmt" "net" + "github.com/golang/glog" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/configmap" + "k8s.io/kubernetes/pkg/kubelet/container" + "k8s.io/kubernetes/pkg/kubelet/mountpod" "k8s.io/kubernetes/pkg/kubelet/secret" "k8s.io/kubernetes/pkg/util/io" "k8s.io/kubernetes/pkg/util/mount" @@ -43,11 +49,17 @@ func NewInitializedVolumePluginMgr( configMapManager configmap.Manager, plugins []volume.VolumePlugin, prober volume.DynamicPluginProber) (*volume.VolumePluginMgr, error) { + + mountPodManager, err := mountpod.NewManager(kubelet.getRootDir(), kubelet.podManager) + if err != nil { + return nil, err + } kvh := &kubeletVolumeHost{ kubelet: kubelet, volumePluginMgr: volume.VolumePluginMgr{}, secretManager: secretManager, configMapManager: configMapManager, + mountPodManager: mountPodManager, } if err := kvh.volumePluginMgr.InitPlugins(plugins, prober, kvh); err != nil { @@ -71,12 +83,21 @@ type kubeletVolumeHost struct { volumePluginMgr volume.VolumePluginMgr secretManager secret.Manager configMapManager configmap.Manager + mountPodManager mountpod.Manager +} + +func (kvh *kubeletVolumeHost) GetVolumeDevicePluginDir(pluginName string) string { + return kvh.kubelet.getVolumeDevicePluginDir(pluginName) } func (kvh *kubeletVolumeHost) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string { return kvh.kubelet.getPodVolumeDir(podUID, pluginName, volumeName) } +func (kvh *kubeletVolumeHost) GetPodVolumeDeviceDir(podUID types.UID, pluginName string) string { + return kvh.kubelet.getPodVolumeDeviceDir(podUID, pluginName) +} + func (kvh *kubeletVolumeHost) GetPodPluginDir(podUID types.UID, pluginName string) string { return kvh.kubelet.getPodPluginDir(podUID, pluginName) } @@ -119,7 +140,16 @@ func (kvh *kubeletVolumeHost) GetCloudProvider() cloudprovider.Interface { } func (kvh *kubeletVolumeHost) GetMounter(pluginName string) mount.Interface { - return kvh.kubelet.mounter + exec, err := kvh.getMountExec(pluginName) + if err != nil { + glog.V(2).Info("Error finding mount pod for plugin %s: %s", pluginName, err.Error()) + // Use the default mounter + exec = nil + } + if exec == nil { + return kvh.kubelet.mounter + } + return mount.NewExecMounter(exec, kvh.kubelet.mounter) } func (kvh *kubeletVolumeHost) GetWriter() io.Writer { @@ -158,6 +188,61 @@ func (kvh *kubeletVolumeHost) GetNodeLabels() (map[string]string, error) { return node.Labels, nil } +func (kvh *kubeletVolumeHost) GetNodeName() types.NodeName { + return kvh.kubelet.nodeName +} + func (kvh *kubeletVolumeHost) GetExec(pluginName string) mount.Exec { - return mount.NewOsExec() + exec, err := kvh.getMountExec(pluginName) + if err != nil { + glog.V(2).Info("Error finding mount pod for plugin %s: %s", pluginName, err.Error()) + // Use the default exec + exec = nil + } + if exec == nil { + return mount.NewOsExec() + } + return exec +} + +// getMountExec returns mount.Exec implementation that leads to pod with mount +// utilities. It returns nil,nil when there is no such pod and default mounter / +// os.Exec should be used. +func (kvh *kubeletVolumeHost) getMountExec(pluginName string) (mount.Exec, error) { + if !utilfeature.DefaultFeatureGate.Enabled(features.MountContainers) { + glog.V(5).Infof("using default mounter/exec for %s", pluginName) + return nil, nil + } + + pod, container, err := kvh.mountPodManager.GetMountPod(pluginName) + if err != nil { + return nil, err + } + if pod == nil { + // Use default mounter/exec for this plugin + glog.V(5).Infof("using default mounter/exec for %s", pluginName) + return nil, nil + } + glog.V(5).Infof("using container %s/%s/%s to execute mount utilites for %s", pod.Namespace, pod.Name, container, pluginName) + return &containerExec{ + pod: pod, + containerName: container, + kl: kvh.kubelet, + }, nil +} + +// containerExec is implementation of mount.Exec that executes commands in given +// container in given pod. +type containerExec struct { + pod *v1.Pod + containerName string + kl *Kubelet +} + +var _ mount.Exec = &containerExec{} + +func (e *containerExec) Run(cmd string, args ...string) ([]byte, error) { + cmdline := append([]string{cmd}, args...) + glog.V(5).Infof("Exec mounter running in pod %s/%s/%s: %v", e.pod.Namespace, e.pod.Name, e.containerName, cmdline) + return e.kl.RunInContainer(container.GetPodFullName(e.pod), e.pod.UID, e.containerName, cmdline) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/BUILD index e3259fb1e847..9139cae32c3c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["volume_manager.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager", deps = [ "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/container:go_default_library", @@ -20,6 +21,7 @@ go_library( "//pkg/kubelet/volumemanager/reconciler:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", + "//pkg/volume/util:go_default_library", "//pkg/volume/util/operationexecutor:go_default_library", "//pkg/volume/util/types:go_default_library", "//pkg/volume/util/volumehelper:go_default_library", @@ -37,6 +39,7 @@ go_library( go_test( name = "go_default_test", srcs = ["volume_manager_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager", library = ":go_default_library", deps = [ "//pkg/kubelet/config:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/OWNERS b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/OWNERS index 4739819ce0cc..0f0dc72d7177 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/OWNERS @@ -1,2 +1,10 @@ approvers: - saad-ali +reviewers: +- jsafrane +- gnufied +- rootfs +- jingxu97 +- msau42 +- verult +- davidz627 diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/BUILD index ec3515c9b648..cf5e351aa3b6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/BUILD @@ -12,6 +12,7 @@ go_library( "actual_state_of_world.go", "desired_state_of_world.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache", deps = [ "//pkg/volume:go_default_library", "//pkg/volume/util/operationexecutor:go_default_library", @@ -29,6 +30,7 @@ go_test( "actual_state_of_world_test.go", "desired_state_of_world_test.go", ], + importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache", library = ":go_default_library", deps = [ "//pkg/volume:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go index ff3b8925ca39..c22049e7bcaf 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go @@ -58,7 +58,7 @@ type ActualStateOfWorld interface { // volume, reset the pod's remountRequired value. // If a volume with the name volumeName does not exist in the list of // attached volumes, an error is returned. - AddPodToVolume(podName volumetypes.UniquePodName, podUID types.UID, volumeName v1.UniqueVolumeName, mounter volume.Mounter, outerVolumeSpecName string, volumeGidValue string) error + AddPodToVolume(podName volumetypes.UniquePodName, podUID types.UID, volumeName v1.UniqueVolumeName, mounter volume.Mounter, blockVolumeMapper volume.BlockVolumeMapper, outerVolumeSpecName string, volumeGidValue string) error // MarkRemountRequired marks each volume that is successfully attached and // mounted for the specified pod as requiring remount (if the plugin for the @@ -254,6 +254,9 @@ type mountedPod struct { // mounter used to mount mounter volume.Mounter + // mapper used to block volumes support + blockVolumeMapper volume.BlockVolumeMapper + // outerVolumeSpecName is the volume.Spec.Name() of the volume as referenced // directly in the pod. If the volume was referenced through a persistent // volume claim, this contains the volume.Spec.Name() of the persistent @@ -287,6 +290,7 @@ func (asw *actualStateOfWorld) MarkVolumeAsMounted( podUID types.UID, volumeName v1.UniqueVolumeName, mounter volume.Mounter, + blockVolumeMapper volume.BlockVolumeMapper, outerVolumeSpecName string, volumeGidValue string) error { return asw.AddPodToVolume( @@ -294,6 +298,7 @@ func (asw *actualStateOfWorld) MarkVolumeAsMounted( podUID, volumeName, mounter, + blockVolumeMapper, outerVolumeSpecName, volumeGidValue) } @@ -385,6 +390,7 @@ func (asw *actualStateOfWorld) AddPodToVolume( podUID types.UID, volumeName v1.UniqueVolumeName, mounter volume.Mounter, + blockVolumeMapper volume.BlockVolumeMapper, outerVolumeSpecName string, volumeGidValue string) error { asw.Lock() @@ -403,6 +409,7 @@ func (asw *actualStateOfWorld) AddPodToVolume( podName: podName, podUID: podUID, mounter: mounter, + blockVolumeMapper: blockVolumeMapper, outerVolumeSpecName: outerVolumeSpecName, volumeGidValue: volumeGidValue, } @@ -682,5 +689,7 @@ func getMountedVolume( PluginName: attachedVolume.pluginName, PodUID: mountedPod.podUID, Mounter: mountedPod.mounter, - VolumeGidValue: mountedPod.volumeGidValue}} + BlockVolumeMapper: mountedPod.blockVolumeMapper, + VolumeGidValue: mountedPod.volumeGidValue, + VolumeSpec: attachedVolume.spec}} } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/BUILD index 738d8d5421ba..a3bd36cc0577 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/BUILD @@ -9,7 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["desired_state_of_world_populator.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/populator", deps = [ + "//pkg/features:go_default_library", "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/pod:go_default_library", @@ -17,6 +19,7 @@ go_library( "//pkg/kubelet/util/format:go_default_library", "//pkg/kubelet/volumemanager/cache:go_default_library", "//pkg/volume:go_default_library", + "//pkg/volume/util:go_default_library", "//pkg/volume/util/types:go_default_library", "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -24,8 +27,8 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", ], ) @@ -45,6 +48,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["desired_state_of_world_populator_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/populator", library = ":go_default_library", deps = [ "//pkg/kubelet/configmap:go_default_library", @@ -60,6 +64,9 @@ go_test( "//pkg/volume/util/volumehelper:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", + "//vendor/k8s.io/client-go/testing:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index a584d535328a..ac762a040fa3 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -31,8 +31,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" + utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/pod" @@ -40,6 +41,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" "k8s.io/kubernetes/pkg/volume" + volumeutil "k8s.io/kubernetes/pkg/volume/util" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) @@ -261,11 +263,12 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) { } allVolumesAdded := true + mountsMap, devicesMap := dswp.makeVolumeMap(pod.Spec.Containers) // Process volume spec for each volume defined in pod for _, podVolume := range pod.Spec.Volumes { volumeSpec, volumeGidValue, err := - dswp.createVolumeSpec(podVolume, pod.Namespace) + dswp.createVolumeSpec(podVolume, pod.Name, pod.Namespace, mountsMap, devicesMap) if err != nil { glog.Errorf( "Error processing volume %q for pod %q: %v", @@ -337,7 +340,7 @@ func (dswp *desiredStateOfWorldPopulator) deleteProcessedPod( // specified volume. It dereference any PVC to get PV objects, if needed. // Returns an error if unable to obtain the volume at this time. func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( - podVolume v1.Volume, podNamespace string) (*volume.Spec, string, error) { + podVolume v1.Volume, podName string, podNamespace string, mountsMap map[string]bool, devicesMap map[string]bool) (*volume.Spec, string, error) { if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil { glog.V(10).Infof( @@ -382,33 +385,48 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( pvcSource.ClaimName, pvcUID) + // TODO: remove feature gate check after no longer needed + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + volumeMode, err := volumehelper.GetVolumeMode(volumeSpec) + if err != nil { + return nil, "", err + } + // Error if a container has volumeMounts but the volumeMode of PVC isn't Filesystem + if mountsMap[podVolume.Name] && volumeMode != v1.PersistentVolumeFilesystem { + return nil, "", fmt.Errorf( + "Volume %q has volumeMode %q, but is specified in volumeMounts for pod %q/%q", + podVolume.Name, + volumeMode, + podNamespace, + podName) + } + // Error if a container has volumeDevices but the volumeMode of PVC isn't Block + if devicesMap[podVolume.Name] && volumeMode != v1.PersistentVolumeBlock { + return nil, "", fmt.Errorf( + "Volume %q has volumeMode %q, but is specified in volumeDevices for pod %q/%q", + podVolume.Name, + volumeMode, + podNamespace, + podName) + } + } return volumeSpec, volumeGidValue, nil } // Do not return the original volume object, since the source could mutate it - clonedPodVolumeObj, err := scheme.Scheme.DeepCopy(&podVolume) - if err != nil || clonedPodVolumeObj == nil { - return nil, "", fmt.Errorf( - "failed to deep copy %q volume object. err=%v", podVolume.Name, err) - } - - clonedPodVolume, ok := clonedPodVolumeObj.(*v1.Volume) - if !ok { - return nil, "", fmt.Errorf( - "failed to cast clonedPodVolume %#v to v1.Volume", - clonedPodVolumeObj) - } + clonedPodVolume := podVolume.DeepCopy() return volume.NewSpecFromVolume(clonedPodVolume), "", nil } // getPVCExtractPV fetches the PVC object with the given namespace and name from -// the API server extracts the name of the PV it is pointing to and returns it. +// the API server, checks whether PVC is being deleted, extracts the name of the PV +// it is pointing to and returns it. // An error is returned if the PVC object's phase is not "Bound". func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV( namespace string, claimName string) (string, types.UID, error) { pvc, err := - dswp.kubeClient.Core().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) + dswp.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) if err != nil || pvc == nil { return "", "", fmt.Errorf( "failed to fetch PVC %s/%s from API server. err=%v", @@ -417,6 +435,23 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV( err) } + if utilfeature.DefaultFeatureGate.Enabled(features.PVCProtection) { + // Pods that uses a PVC that is being deleted must not be started. + // + // In case an old kubelet is running without this check or some kubelets + // have this feature disabled, the worst that can happen is that such + // pod is scheduled. This was the default behavior in 1.8 and earlier + // and users should not be that surprised. + // It should happen only in very rare case when scheduler schedules + // a pod and user deletes a PVC that's used by it at the same time. + if volumeutil.IsPVCBeingDeleted(pvc) { + return "", "", fmt.Errorf( + "can't start pod because PVC %s/%s is being deleted", + namespace, + claimName) + } + } + if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" { return "", "", fmt.Errorf( @@ -437,7 +472,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVSpec( name string, pvcReadOnly bool, expectedClaimUID types.UID) (*volume.Spec, string, error) { - pv, err := dswp.kubeClient.Core().PersistentVolumes().Get(name, metav1.GetOptions{}) + pv, err := dswp.kubeClient.CoreV1().PersistentVolumes().Get(name, metav1.GetOptions{}) if err != nil || pv == nil { return nil, "", fmt.Errorf( "failed to fetch PV %q from API server. err=%v", name, err) @@ -461,6 +496,28 @@ func (dswp *desiredStateOfWorldPopulator) getPVSpec( return volume.NewSpecFromPersistentVolume(pv, pvcReadOnly), volumeGidValue, nil } +func (dswp *desiredStateOfWorldPopulator) makeVolumeMap(containers []v1.Container) (map[string]bool, map[string]bool) { + volumeDevicesMap := make(map[string]bool) + volumeMountsMap := make(map[string]bool) + + for _, container := range containers { + if container.VolumeMounts != nil { + for _, mount := range container.VolumeMounts { + volumeMountsMap[mount.Name] = true + } + } + // TODO: remove feature gate check after no longer needed + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) && + container.VolumeDevices != nil { + for _, device := range container.VolumeDevices { + volumeDevicesMap[device.Name] = true + } + } + } + + return volumeMountsMap, volumeDevicesMap +} + func getPVVolumeGidAnnotationValue(pv *v1.PersistentVolume) string { if volumeGid, ok := pv.Annotations[volumehelper.VolumeGidAnnotationKey]; ok { return volumeGid diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD index 970655746367..6fca7c17d827 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/BUILD @@ -9,8 +9,10 @@ load( go_library( name = "go_default_library", srcs = ["reconciler.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler", deps = [ - "//cmd/kubelet/app/options:go_default_library", + "//pkg/features:go_default_library", + "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/volumemanager/cache:go_default_library", "//pkg/util/file:go_default_library", "//pkg/util/goroutinemap/exponentialbackoff:go_default_library", @@ -26,6 +28,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", ], ) @@ -33,6 +36,7 @@ go_library( go_test( name = "go_default_test", srcs = ["reconciler_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler", library = ":go_default_library", deps = [ "//pkg/kubelet/volumemanager/cache:go_default_library", @@ -43,10 +47,12 @@ go_test( "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/testing:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go index 73ac4da99c8c..2cb07477c74d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -22,6 +22,7 @@ package reconciler import ( "fmt" "io/ioutil" + "os" "path" "time" @@ -30,13 +31,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" + utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/cmd/kubelet/app/options" + "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" utilfile "k8s.io/kubernetes/pkg/util/file" "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" "k8s.io/kubernetes/pkg/util/mount" - "k8s.io/kubernetes/pkg/util/strings" + utilstrings "k8s.io/kubernetes/pkg/util/strings" volumepkg "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" @@ -171,10 +174,12 @@ func (rc *reconciler) reconcile() { // Ensure volumes that should be unmounted are unmounted. for _, mountedVolume := range rc.actualStateOfWorld.GetMountedVolumes() { if !rc.desiredStateOfWorld.PodExistsInVolume(mountedVolume.PodName, mountedVolume.VolumeName) { - // Volume is mounted, unmount it - glog.V(12).Infof(mountedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountVolume", "")) - err := rc.operationExecutor.UnmountVolume( - mountedVolume.MountedVolume, rc.actualStateOfWorld) + volumeHandler, err := operationexecutor.NewVolumeHandler(mountedVolume.VolumeSpec, rc.operationExecutor) + if err != nil { + glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.NewVolumeHandler for UnmountVolume failed"), err).Error()) + continue + } + err = volumeHandler.UnmountVolumeHandler(mountedVolume.MountedVolume, rc.actualStateOfWorld) if err != nil && !nestedpendingoperations.IsAlreadyExists(err) && !exponentialbackoff.IsExponentialBackoff(err) { @@ -239,12 +244,12 @@ func (rc *reconciler) reconcile() { if isRemount { remountingLogStr = "Volume is already mounted to pod, but remount was requested." } - glog.V(12).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.MountVolume", remountingLogStr)) - err := rc.operationExecutor.MountVolume( - rc.waitForAttachTimeout, - volumeToMount.VolumeToMount, - rc.actualStateOfWorld, - isRemount) + volumeHandler, err := operationexecutor.NewVolumeHandler(volumeToMount.VolumeSpec, rc.operationExecutor) + if err != nil { + glog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.NewVolumeHandler for MountVolume failed"), err).Error()) + continue + } + err = volumeHandler.MountVolumeHandler(rc.waitForAttachTimeout, volumeToMount.VolumeToMount, rc.actualStateOfWorld, isRemount, remountingLogStr) if err != nil && !nestedpendingoperations.IsAlreadyExists(err) && !exponentialbackoff.IsExponentialBackoff(err) { @@ -268,10 +273,12 @@ func (rc *reconciler) reconcile() { if !rc.desiredStateOfWorld.VolumeExists(attachedVolume.VolumeName) && !rc.operationExecutor.IsOperationPending(attachedVolume.VolumeName, nestedpendingoperations.EmptyUniquePodName) { if attachedVolume.GloballyMounted { - // Volume is globally mounted to device, unmount it - glog.V(12).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountDevice", "")) - err := rc.operationExecutor.UnmountDevice( - attachedVolume.AttachedVolume, rc.actualStateOfWorld, rc.mounter) + volumeHandler, err := operationexecutor.NewVolumeHandler(attachedVolume.VolumeSpec, rc.operationExecutor) + if err != nil { + glog.Errorf(attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.NewVolumeHandler for UnmountDevice failed"), err).Error()) + continue + } + err = volumeHandler.UnmountDeviceHandler(attachedVolume.AttachedVolume, rc.actualStateOfWorld, rc.mounter) if err != nil && !nestedpendingoperations.IsAlreadyExists(err) && !exponentialbackoff.IsExponentialBackoff(err) { @@ -332,6 +339,7 @@ type podVolume struct { volumeSpecName string mountPath string pluginName string + volumeMode v1.PersistentVolumeMode } type reconstructedVolume struct { @@ -345,6 +353,7 @@ type reconstructedVolume struct { devicePath string reportedInUse bool mounter volumepkg.Mounter + blockVolumeMapper volumepkg.BlockVolumeMapper } // reconstructFromDisk scans the volume directories under the given pod directory. If the volume is not @@ -419,20 +428,44 @@ func (rc *reconciler) syncStates(podsDir string) { // Reconstruct Volume object and reconstructedVolume data structure by reading the pod's volume directories func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, error) { + // plugin initializations plugin, err := rc.volumePluginMgr.FindPluginByName(volume.pluginName) if err != nil { return nil, err } - volumeSpec, err := plugin.ConstructVolumeSpec(volume.volumeSpecName, volume.mountPath) + attachablePlugin, err := rc.volumePluginMgr.FindAttachablePluginByName(volume.pluginName) if err != nil { return nil, err } + + // Create volumeSpec pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ UID: types.UID(volume.podName), }, } - attachablePlugin, err := rc.volumePluginMgr.FindAttachablePluginByName(volume.pluginName) + // TODO: remove feature gate check after no longer needed + var mapperPlugin volumepkg.BlockVolumePlugin + tmpSpec := &volumepkg.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{}}} + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + mapperPlugin, err = rc.volumePluginMgr.FindMapperPluginByName(volume.pluginName) + if err != nil { + return nil, err + } + tmpSpec = &volumepkg.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volume.volumeMode}}} + } + volumeHandler, err := operationexecutor.NewVolumeHandler(tmpSpec, rc.operationExecutor) + if err != nil { + return nil, err + } + volumeSpec, err := volumeHandler.ReconstructVolumeHandler( + plugin, + mapperPlugin, + pod.UID, + volume.podName, + volume.volumeSpecName, + volume.mountPath, + volume.pluginName) if err != nil { return nil, err } @@ -448,17 +481,14 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, uniqueVolumeName = volumehelper.GetUniqueVolumeNameForNonAttachableVolume(volume.podName, plugin, volumeSpec) } - if attachablePlugin != nil { - if isNotMount, mountCheckErr := rc.mounter.IsLikelyNotMountPoint(volume.mountPath); mountCheckErr != nil { - return nil, fmt.Errorf("Could not check whether the volume %q (spec.Name: %q) pod %q (UID: %q) is mounted with: %v", - uniqueVolumeName, - volumeSpec.Name(), - volume.podName, - pod.UID, - mountCheckErr) - } else if isNotMount { - return nil, fmt.Errorf("Volume: %q is not mounted", uniqueVolumeName) - } + // Check existence of mount point for filesystem volume or symbolic link for block volume + isExist, checkErr := volumeHandler.CheckVolumeExistence(volume.mountPath, volumeSpec.Name(), rc.mounter, uniqueVolumeName, volume.podName, pod.UID, attachablePlugin) + if checkErr != nil { + return nil, err + } + // If mount or symlink doesn't exist, volume reconstruction should be failed + if !isExist { + return nil, fmt.Errorf("Volume: %q is not mounted", uniqueVolumeName) } volumeMounter, newMounterErr := plugin.NewMounter( @@ -467,7 +497,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, volumepkg.VolumeOptions{}) if newMounterErr != nil { return nil, fmt.Errorf( - "MountVolume.NewMounter failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v", + "reconstructVolume.NewMounter failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v", uniqueVolumeName, volumeSpec.Name(), volume.podName, @@ -475,6 +505,27 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, newMounterErr) } + // TODO: remove feature gate check after no longer needed + var volumeMapper volumepkg.BlockVolumeMapper + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + var newMapperErr error + if mapperPlugin != nil { + volumeMapper, newMapperErr = mapperPlugin.NewBlockVolumeMapper( + volumeSpec, + pod, + volumepkg.VolumeOptions{}) + if newMapperErr != nil { + return nil, fmt.Errorf( + "reconstructVolume.NewBlockVolumeMapper failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v", + uniqueVolumeName, + volumeSpec.Name(), + volume.podName, + pod.UID, + newMapperErr) + } + } + } + reconstructedVolume := &reconstructedVolume{ volumeName: uniqueVolumeName, podName: volume.podName, @@ -489,13 +540,14 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, volumeGidValue: "", devicePath: "", mounter: volumeMounter, + blockVolumeMapper: volumeMapper, } return reconstructedVolume, nil } func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) error { // Get the node status to retrieve volume device path information. - node, fetchErr := rc.kubeClient.Core().Nodes().Get(string(rc.nodeName), metav1.GetOptions{}) + node, fetchErr := rc.kubeClient.CoreV1().Nodes().Get(string(rc.nodeName), metav1.GetOptions{}) if fetchErr != nil { glog.Errorf("updateStates in reconciler: could not get node status with error %v", fetchErr) } else { @@ -518,7 +570,6 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*re volumeToMount.VolumeName, volume.outerVolumeSpecName) } } - for _, volume := range volumesNeedUpdate { err := rc.actualStateOfWorld.MarkVolumeAsAttached( volume.volumeName, volume.volumeSpec, "" /* nodeName */, volume.devicePath) @@ -532,6 +583,7 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*re types.UID(volume.podName), volume.volumeName, volume.mounter, + volume.blockVolumeMapper, volume.outerVolumeSpecName, volume.volumeGidValue) if err != nil { @@ -574,33 +626,45 @@ func getVolumesFromPodDir(podDir string) ([]podVolume, error) { } podName := podsDirInfo[i].Name() podDir := path.Join(podDir, podName) - volumesDir := path.Join(podDir, options.DefaultKubeletVolumesDirName) - volumesDirInfo, err := ioutil.ReadDir(volumesDir) - if err != nil { - glog.Errorf("Could not read volume directory %q: %v", volumesDir, err) - continue - } - for _, volumeDir := range volumesDirInfo { - pluginName := volumeDir.Name() - volumePluginPath := path.Join(volumesDir, pluginName) - volumePluginDirs, err := utilfile.ReadDirNoStat(volumePluginPath) - if err != nil { - glog.Errorf("Could not read volume plugin directory %q: %v", volumePluginPath, err) + // Find filesystem volume information + // ex. filesystem volume: /pods/{podUid}/volume/{escapeQualifiedPluginName}/{volumeName} + volumesDirs := map[v1.PersistentVolumeMode]string{ + v1.PersistentVolumeFilesystem: path.Join(podDir, config.DefaultKubeletVolumesDirName), + } + // TODO: remove feature gate check after no longer needed + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + // Find block volume information + // ex. block volume: /pods/{podUid}/volumeDevices/{escapeQualifiedPluginName}/{volumeName} + volumesDirs[v1.PersistentVolumeBlock] = path.Join(podDir, config.DefaultKubeletVolumeDevicesDirName) + } + for volumeMode, volumesDir := range volumesDirs { + var volumesDirInfo []os.FileInfo + if volumesDirInfo, err = ioutil.ReadDir(volumesDir); err != nil { + // Just skip the loop becuase given volumesDir doesn't exist depending on volumeMode continue } - - unescapePluginName := strings.UnescapeQualifiedNameForDisk(pluginName) - for _, volumeName := range volumePluginDirs { - mountPath := path.Join(volumePluginPath, volumeName) - volumes = append(volumes, podVolume{ - podName: volumetypes.UniquePodName(podName), - volumeSpecName: volumeName, - mountPath: mountPath, - pluginName: unescapePluginName, - }) + for _, volumeDir := range volumesDirInfo { + pluginName := volumeDir.Name() + volumePluginPath := path.Join(volumesDir, pluginName) + volumePluginDirs, err := utilfile.ReadDirNoStat(volumePluginPath) + if err != nil { + glog.Errorf("Could not read volume plugin directory %q: %v", volumePluginPath, err) + continue + } + unescapePluginName := utilstrings.UnescapeQualifiedNameForDisk(pluginName) + for _, volumeName := range volumePluginDirs { + mountPath := path.Join(volumePluginPath, volumeName) + glog.V(5).Infof("podName: %v, mount path from volume plugin directory: %v, ", podName, mountPath) + volumes = append(volumes, podVolume{ + podName: volumetypes.UniquePodName(podName), + volumeSpecName: volumeName, + mountPath: mountPath, + pluginName: unescapePluginName, + volumeMode: volumeMode, + }) + } } - } } glog.V(10).Infof("Get volumes from pod directory %q %+v", podDir, volumes) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go index d48266f52a35..d3e7711407b2 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go @@ -41,6 +41,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" "k8s.io/kubernetes/pkg/volume/util/types" "k8s.io/kubernetes/pkg/volume/util/volumehelper" @@ -168,8 +169,8 @@ func NewVolumeManager( kubeClient, volumePluginMgr, recorder, - checkNodeCapabilitiesBeforeMount), - ), + checkNodeCapabilitiesBeforeMount, + util.NewBlockVolumePathHandler())), } vm.desiredStateOfWorldPopulator = populator.NewDesiredStateOfWorldPopulator( @@ -253,7 +254,11 @@ func (vm *volumeManager) Run(sourcesReady config.SourcesReady, stopCh <-chan str func (vm *volumeManager) GetMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap { podVolumes := make(container.VolumeMap) for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) { - podVolumes[mountedVolume.OuterVolumeSpecName] = container.VolumeInfo{Mounter: mountedVolume.Mounter} + podVolumes[mountedVolume.OuterVolumeSpecName] = container.VolumeInfo{ + Mounter: mountedVolume.Mounter, + BlockVolumeMapper: mountedVolume.BlockVolumeMapper, + ReadOnly: mountedVolume.VolumeSpec.ReadOnly, + } } return podVolumes } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/BUILD new file mode 100644 index 000000000000..049fae2385f1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/BUILD @@ -0,0 +1,57 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "winstats.go", + ] + select({ + "@io_bazel_rules_go//go/platform:windows_amd64": [ + "perfcounter_nodestats.go", + "perfcounters.go", + "version.go", + ], + "//conditions:default": [], + }), + importpath = "k8s.io/kubernetes/pkg/kubelet/winstats", + deps = [ + "//vendor/github.com/google/cadvisor/info/v1:go_default_library", + "//vendor/github.com/google/cadvisor/info/v2:go_default_library", + ] + select({ + "@io_bazel_rules_go//go/platform:windows_amd64": [ + "//vendor/github.com/JeffAshton/win_pdh:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/golang.org/x/sys/windows:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + ], + "//conditions:default": [], + }), +) + +go_test( + name = "go_default_test", + srcs = ["winstats_test.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/winstats", + library = ":go_default_library", + deps = [ + "//vendor/github.com/google/cadvisor/info/v1:go_default_library", + "//vendor/github.com/google/cadvisor/info/v2:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounter_nodestats.go b/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounter_nodestats.go new file mode 100644 index 000000000000..b6855df4013c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounter_nodestats.go @@ -0,0 +1,182 @@ +// +build windows + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package winstats + +import ( + "errors" + "os" + "runtime" + "sync" + "syscall" + "time" + "unsafe" + + "github.com/golang/glog" + cadvisorapi "github.com/google/cadvisor/info/v1" + "k8s.io/apimachinery/pkg/util/wait" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetPhysicallyInstalledSystemMemory = modkernel32.NewProc("GetPhysicallyInstalledSystemMemory") +) + +// NewPerfCounterClient creates a client using perf counters +func NewPerfCounterClient() (Client, error) { + return newClient(&perfCounterNodeStatsClient{}) +} + +// perfCounterNodeStatsClient is a client that provides Windows Stats via PerfCounters +type perfCounterNodeStatsClient struct { + nodeMetrics + mu sync.RWMutex // mu protects nodeMetrics + nodeInfo +} + +func (p *perfCounterNodeStatsClient) startMonitoring() error { + memory, err := getPhysicallyInstalledSystemMemoryBytes() + if err != nil { + return err + } + + kernelVersion, err := getKernelVersion() + if err != nil { + return err + } + + osImageVersion, err := getOSImageVersion() + if err != nil { + return err + } + + p.nodeInfo = nodeInfo{ + kernelVersion: kernelVersion, + osImageVersion: osImageVersion, + memoryPhysicalCapacityBytes: memory, + startTime: time.Now(), + } + + cpuCounter, err := newPerfCounter(cpuQuery) + if err != nil { + return err + } + + memWorkingSetCounter, err := newPerfCounter(memoryPrivWorkingSetQuery) + if err != nil { + return err + } + + memCommittedBytesCounter, err := newPerfCounter(memoryCommittedBytesQuery) + if err != nil { + return err + } + + go wait.Forever(func() { + p.collectMetricsData(cpuCounter, memWorkingSetCounter, memCommittedBytesCounter) + }, perfCounterUpdatePeriod) + + return nil +} + +func (p *perfCounterNodeStatsClient) getMachineInfo() (*cadvisorapi.MachineInfo, error) { + hostname, err := os.Hostname() + if err != nil { + return nil, err + } + + return &cadvisorapi.MachineInfo{ + NumCores: runtime.NumCPU(), + MemoryCapacity: p.nodeInfo.memoryPhysicalCapacityBytes, + MachineID: hostname, + }, nil +} + +func (p *perfCounterNodeStatsClient) getVersionInfo() (*cadvisorapi.VersionInfo, error) { + return &cadvisorapi.VersionInfo{ + KernelVersion: p.nodeInfo.kernelVersion, + ContainerOsVersion: p.nodeInfo.osImageVersion, + }, nil +} + +func (p *perfCounterNodeStatsClient) getNodeMetrics() (nodeMetrics, error) { + p.mu.RLock() + defer p.mu.RUnlock() + return p.nodeMetrics, nil +} + +func (p *perfCounterNodeStatsClient) getNodeInfo() nodeInfo { + return p.nodeInfo +} + +func (p *perfCounterNodeStatsClient) collectMetricsData(cpuCounter, memWorkingSetCounter, memCommittedBytesCounter *perfCounter) { + cpuValue, err := cpuCounter.getData() + if err != nil { + glog.Errorf("Unable to get cpu perf counter data; err: %v", err) + return + } + + memWorkingSetValue, err := memWorkingSetCounter.getData() + if err != nil { + glog.Errorf("Unable to get memWorkingSet perf counter data; err: %v", err) + return + } + + memCommittedBytesValue, err := memCommittedBytesCounter.getData() + if err != nil { + glog.Errorf("Unable to get memCommittedBytes perf counter data; err: %v", err) + return + } + + p.mu.Lock() + defer p.mu.Unlock() + p.nodeMetrics = nodeMetrics{ + cpuUsageCoreNanoSeconds: p.convertCPUValue(cpuValue), + memoryPrivWorkingSetBytes: memWorkingSetValue, + memoryCommittedBytes: memCommittedBytesValue, + timeStamp: time.Now(), + } +} + +func (p *perfCounterNodeStatsClient) convertCPUValue(cpuValue uint64) uint64 { + cpuCores := runtime.NumCPU() + // This converts perf counter data which is cpu percentage for all cores into nanoseconds. + // The formula is (cpuPercentage / 100.0) * #cores * 1e+9 (nano seconds). More info here: + // https://github.com/kubernetes/heapster/issues/650 + newValue := p.nodeMetrics.cpuUsageCoreNanoSeconds + uint64((float64(cpuValue)/100.0)*float64(cpuCores)*1e9) + return newValue +} + +func getPhysicallyInstalledSystemMemoryBytes() (uint64, error) { + var physicalMemoryKiloBytes uint64 + + if ok := getPhysicallyInstalledSystemMemory(&physicalMemoryKiloBytes); !ok { + return 0, errors.New("unable to read physical memory") + } + + return physicalMemoryKiloBytes * 1024, nil // convert kilobytes to bytes +} + +func getPhysicallyInstalledSystemMemory(totalMemoryInKilobytes *uint64) bool { + ret, _, _ := syscall.Syscall(procGetPhysicallyInstalledSystemMemory.Addr(), 1, + uintptr(unsafe.Pointer(totalMemoryInKilobytes)), + 0, + 0) + + return ret != 0 +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounters.go b/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounters.go new file mode 100644 index 000000000000..e5ed450b78ea --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/perfcounters.go @@ -0,0 +1,102 @@ +// +build windows + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package winstats + +import ( + "errors" + "fmt" + "time" + "unsafe" + + "github.com/JeffAshton/win_pdh" +) + +const ( + cpuQuery = "\\Processor(_Total)\\% Processor Time" + memoryPrivWorkingSetQuery = "\\Process(_Total)\\Working Set - Private" + memoryCommittedBytesQuery = "\\Memory\\Committed Bytes" + // Perf counters are updated every second. This is the same as the default cadvisor collection period + // see https://github.com/google/cadvisor/blob/master/docs/runtime_options.md#housekeeping + perfCounterUpdatePeriod = 1 * time.Second +) + +type perfCounter struct { + queryHandle win_pdh.PDH_HQUERY + counterHandle win_pdh.PDH_HCOUNTER +} + +func newPerfCounter(counter string) (*perfCounter, error) { + var queryHandle win_pdh.PDH_HQUERY + var counterHandle win_pdh.PDH_HCOUNTER + + ret := win_pdh.PdhOpenQuery(0, 0, &queryHandle) + if ret != win_pdh.ERROR_SUCCESS { + return nil, errors.New("unable to open query through DLL call") + } + + ret = win_pdh.PdhValidatePath(counter) + if ret != win_pdh.ERROR_SUCCESS { + return nil, fmt.Errorf("unable to valid path to counter. Error code is %x", ret) + } + + ret = win_pdh.PdhAddEnglishCounter(queryHandle, counter, 0, &counterHandle) + if ret != win_pdh.ERROR_SUCCESS { + return nil, fmt.Errorf("unable to add process counter. Error code is %x", ret) + } + + ret = win_pdh.PdhCollectQueryData(queryHandle) + if ret != win_pdh.ERROR_SUCCESS { + return nil, fmt.Errorf("unable to collect data from counter. Error code is %x", ret) + } + + return &perfCounter{ + queryHandle: queryHandle, + counterHandle: counterHandle, + }, nil +} + +func (p *perfCounter) getData() (uint64, error) { + ret := win_pdh.PdhCollectQueryData(p.queryHandle) + if ret != win_pdh.ERROR_SUCCESS { + return 0, fmt.Errorf("unable to collect data from counter. Error code is %x", ret) + } + + var bufSize, bufCount uint32 + var size = uint32(unsafe.Sizeof(win_pdh.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE{})) + var emptyBuf [1]win_pdh.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE // need at least 1 addressable null ptr. + var data uint64 + + ret = win_pdh.PdhGetFormattedCounterArrayDouble(p.counterHandle, &bufSize, &bufCount, &emptyBuf[0]) + if ret != win_pdh.PDH_MORE_DATA { + return 0, fmt.Errorf("unable to collect data from counter. Error code is %x", ret) + } + + filledBuf := make([]win_pdh.PDH_FMT_COUNTERVALUE_ITEM_DOUBLE, bufCount*size) + ret = win_pdh.PdhGetFormattedCounterArrayDouble(p.counterHandle, &bufSize, &bufCount, &filledBuf[0]) + if ret != win_pdh.ERROR_SUCCESS { + return 0, fmt.Errorf("unable to collect data from counter. Error code is %x", ret) + } + + for i := 0; i < int(bufCount); i++ { + c := filledBuf[i] + data = uint64(c.FmtValue.DoubleValue) + } + + return data, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/version.go b/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/version.go new file mode 100644 index 000000000000..e6cc097f5b1f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/version.go @@ -0,0 +1,96 @@ +// +build windows + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package winstats + +import ( + "fmt" + "unsafe" + + "golang.org/x/sys/windows" +) + +// getCurrentVersionVal gets value of speficied key from registry. +func getCurrentVersionVal(key string) (string, error) { + var h windows.Handle + if err := windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, + windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + windows.KEY_READ, + &h); err != nil { + return "", err + } + defer windows.RegCloseKey(h) + + var buf [128]uint16 + var typ uint32 + n := uint32(len(buf) * int(unsafe.Sizeof(buf[0]))) // api expects array of bytes, not uint16 + if err := windows.RegQueryValueEx(h, + windows.StringToUTF16Ptr(key), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return "", err + } + + return windows.UTF16ToString(buf[:]), nil +} + +// getVersionRevision gets revision from UBR registry. +func getVersionRevision() (uint16, error) { + revisionString, err := getCurrentVersionVal("UBR") + if err != nil { + return 0, err + } + + revision, err := windows.UTF16FromString(revisionString) + if err != nil { + return 0, err + } + + return revision[0], nil +} + +// getKernelVersion gets the version of windows kernel. +func getKernelVersion() (string, error) { + ver, err := windows.GetVersion() + if err != nil { + return "", err + } + + revision, err := getVersionRevision() + if err != nil { + return "", err + } + + major := ver & 0xFF + minor := (ver >> 8) & 0xFF + build := (ver >> 16) & 0xFFFF + return fmt.Sprintf("%d.%d.%05d.%d\n", major, minor, build, revision), nil +} + +// getOSImageVersion gets the osImage name and version. +func getOSImageVersion() (string, error) { + productName, err := getCurrentVersionVal("ProductName") + if err != nil { + return "", nil + } + + return productName, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/winstats.go b/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/winstats.go new file mode 100644 index 000000000000..b02bab4f6bd8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/winstats/winstats.go @@ -0,0 +1,136 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package winstats provides a client to get node and pod level stats on windows +package winstats + +import ( + "time" + + cadvisorapi "github.com/google/cadvisor/info/v1" + cadvisorapiv2 "github.com/google/cadvisor/info/v2" +) + +// Client is an interface that is used to get stats information. +type Client interface { + WinContainerInfos() (map[string]cadvisorapiv2.ContainerInfo, error) + WinMachineInfo() (*cadvisorapi.MachineInfo, error) + WinVersionInfo() (*cadvisorapi.VersionInfo, error) +} + +// StatsClient is a client that implements the Client interface +type statsClient struct { + client winNodeStatsClient +} + +type winNodeStatsClient interface { + startMonitoring() error + getNodeMetrics() (nodeMetrics, error) + getNodeInfo() nodeInfo + getMachineInfo() (*cadvisorapi.MachineInfo, error) + getVersionInfo() (*cadvisorapi.VersionInfo, error) +} + +type nodeMetrics struct { + cpuUsageCoreNanoSeconds uint64 + memoryPrivWorkingSetBytes uint64 + memoryCommittedBytes uint64 + timeStamp time.Time +} + +type nodeInfo struct { + memoryPhysicalCapacityBytes uint64 + kernelVersion string + osImageVersion string + // startTime is the time when the node was started + startTime time.Time +} + +// newClient constructs a Client. +func newClient(statsNodeClient winNodeStatsClient) (Client, error) { + statsClient := new(statsClient) + statsClient.client = statsNodeClient + + err := statsClient.client.startMonitoring() + if err != nil { + return nil, err + } + + return statsClient, nil +} + +// WinContainerInfos returns a map of container infos. The map contains node and +// pod level stats. Analogous to cadvisor GetContainerInfoV2 method. +func (c *statsClient) WinContainerInfos() (map[string]cadvisorapiv2.ContainerInfo, error) { + infos := make(map[string]cadvisorapiv2.ContainerInfo) + rootContainerInfo, err := c.createRootContainerInfo() + if err != nil { + return nil, err + } + + infos["/"] = *rootContainerInfo + + return infos, nil +} + +// WinMachineInfo returns a cadvisorapi.MachineInfo with details about the +// node machine. Analogous to cadvisor MachineInfo method. +func (c *statsClient) WinMachineInfo() (*cadvisorapi.MachineInfo, error) { + return c.client.getMachineInfo() +} + +// WinVersionInfo returns a cadvisorapi.VersionInfo with version info of +// the kernel and docker runtime. Analogous to cadvisor VersionInfo method. +func (c *statsClient) WinVersionInfo() (*cadvisorapi.VersionInfo, error) { + return c.client.getVersionInfo() +} + +func (c *statsClient) createRootContainerInfo() (*cadvisorapiv2.ContainerInfo, error) { + nodeMetrics, err := c.client.getNodeMetrics() + + if err != nil { + return nil, err + } + var stats []*cadvisorapiv2.ContainerStats + + stats = append(stats, &cadvisorapiv2.ContainerStats{ + Timestamp: nodeMetrics.timeStamp, + Cpu: &cadvisorapi.CpuStats{ + Usage: cadvisorapi.CpuUsage{ + Total: nodeMetrics.cpuUsageCoreNanoSeconds, + }, + }, + Memory: &cadvisorapi.MemoryStats{ + WorkingSet: nodeMetrics.memoryPrivWorkingSetBytes, + Usage: nodeMetrics.memoryCommittedBytes, + }, + }) + + nodeInfo := c.client.getNodeInfo() + rootInfo := cadvisorapiv2.ContainerInfo{ + Spec: cadvisorapiv2.ContainerSpec{ + CreationTime: nodeInfo.startTime, + HasCpu: true, + HasMemory: true, + Memory: cadvisorapiv2.MemorySpec{ + Limit: nodeInfo.memoryPhysicalCapacityBytes, + }, + }, + Stats: stats, + } + + return &rootInfo, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/master/BUILD b/vendor/k8s.io/kubernetes/pkg/master/BUILD index 8288c3346503..bcb36660bba2 100644 --- a/vendor/k8s.io/kubernetes/pkg/master/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/master/BUILD @@ -16,11 +16,9 @@ go_library( "master.go", "services.go", ], + importpath = "k8s.io/kubernetes/pkg/master", deps = [ - "//cmd/kube-apiserver/app/options:go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/endpoints:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/admission/install:go_default_library", "//pkg/apis/admissionregistration/install:go_default_library", "//pkg/apis/apps/install:go_default_library", @@ -30,6 +28,9 @@ go_library( "//pkg/apis/batch/install:go_default_library", "//pkg/apis/certificates/install:go_default_library", "//pkg/apis/componentconfig/install:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/install:go_default_library", + "//pkg/apis/events/install:go_default_library", "//pkg/apis/extensions/install:go_default_library", "//pkg/apis/imagepolicy/install:go_default_library", "//pkg/apis/networking/install:go_default_library", @@ -39,7 +40,9 @@ go_library( "//pkg/apis/settings/install:go_default_library", "//pkg/apis/storage/install:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", + "//pkg/kubeapiserver/options:go_default_library", "//pkg/kubelet/client:go_default_library", + "//pkg/master/reconcilers:go_default_library", "//pkg/master/tunneler:go_default_library", "//pkg/registry/admissionregistration/rest:go_default_library", "//pkg/registry/apps/rest:go_default_library", @@ -48,11 +51,14 @@ go_library( "//pkg/registry/autoscaling/rest:go_default_library", "//pkg/registry/batch/rest:go_default_library", "//pkg/registry/certificates/rest:go_default_library", + "//pkg/registry/core/endpoint:go_default_library", + "//pkg/registry/core/endpoint/storage:go_default_library", "//pkg/registry/core/rangeallocation:go_default_library", "//pkg/registry/core/rest:go_default_library", "//pkg/registry/core/service/ipallocator:go_default_library", "//pkg/registry/core/service/ipallocator/controller:go_default_library", "//pkg/registry/core/service/portallocator/controller:go_default_library", + "//pkg/registry/events/rest:go_default_library", "//pkg/registry/extensions/rest:go_default_library", "//pkg/registry/networking/rest:go_default_library", "//pkg/registry/policy/rest:go_default_library", @@ -65,6 +71,8 @@ go_library( "//pkg/util/node:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/api/authentication/v1:go_default_library", @@ -77,6 +85,7 @@ go_library( "//vendor/k8s.io/api/batch/v1beta1:go_default_library", "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/events/v1beta1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/api/networking/v1:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", @@ -95,6 +104,7 @@ go_library( "//vendor/k8s.io/apiserver/pkg/server:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/storage:go_default_library", + "//vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", ], @@ -109,19 +119,23 @@ go_test( "master_openapi_test.go", "master_test.go", ], + features = ["-race"], + importpath = "k8s.io/kubernetes/pkg/master", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/apis/apps:go_default_library", "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/apis/certificates:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", "//pkg/generated/openapi:go_default_library", "//pkg/kubelet/client:go_default_library", + "//pkg/master/reconcilers:go_default_library", "//pkg/registry/certificates/rest:go_default_library", "//pkg/registry/core/rest:go_default_library", "//pkg/registry/registrytest:go_default_library", @@ -172,6 +186,7 @@ filegroup( ":package-srcs", "//pkg/master/controller/crdregistration:all-srcs", "//pkg/master/ports:all-srcs", + "//pkg/master/reconcilers:all-srcs", "//pkg/master/tunneler:all-srcs", ], tags = ["automanaged"], diff --git a/vendor/k8s.io/kubernetes/pkg/master/client_ca_hook.go b/vendor/k8s.io/kubernetes/pkg/master/client_ca_hook.go index 3a2780c92a7b..fedfdca35ef7 100644 --- a/vendor/k8s.io/kubernetes/pkg/master/client_ca_hook.go +++ b/vendor/k8s.io/kubernetes/pkg/master/client_ca_hook.go @@ -26,7 +26,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" genericapiserver "k8s.io/apiserver/pkg/server" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" ) diff --git a/vendor/k8s.io/kubernetes/pkg/master/controller.go b/vendor/k8s.io/kubernetes/pkg/master/controller.go index feb347237a9d..ef919b2963d4 100644 --- a/vendor/k8s.io/kubernetes/pkg/master/controller.go +++ b/vendor/k8s.io/kubernetes/pkg/master/controller.go @@ -29,9 +29,9 @@ import ( "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" genericapiserver "k8s.io/apiserver/pkg/server" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/endpoints" + api "k8s.io/kubernetes/pkg/apis/core" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" + "k8s.io/kubernetes/pkg/master/reconcilers" "k8s.io/kubernetes/pkg/registry/core/rangeallocation" corerest "k8s.io/kubernetes/pkg/registry/core/rest" servicecontroller "k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller" @@ -57,7 +57,7 @@ type Controller struct { ServiceNodePortInterval time.Duration ServiceNodePortRange utilnet.PortRange - EndpointReconciler EndpointReconciler + EndpointReconciler reconcilers.EndpointReconciler EndpointInterval time.Duration SystemNamespaces []string @@ -112,6 +112,11 @@ func (c *Controller) PostStartHook(hookContext genericapiserver.PostStartHookCon return nil } +func (c *Controller) PreShutdownHook() error { + c.Stop() + return nil +} + // Start begins the core controller loops that must exist for bootstrapping // a cluster. func (c *Controller) Start() { @@ -140,6 +145,14 @@ func (c *Controller) Start() { c.runner.Start() } +func (c *Controller) Stop() { + if c.runner != nil { + c.runner.Stop() + } + endpointPorts := createEndpointPortSpec(c.PublicServicePort, "https", c.ExtraEndpointPorts) + c.EndpointReconciler.StopReconciling("kubernetes", c.PublicIP, endpointPorts) +} + // RunKubernetesNamespaces periodically makes sure that all internal namespaces exist func (c *Controller) RunKubernetesNamespaces(ch chan struct{}) { wait.Until(func() { @@ -242,7 +255,7 @@ func (c *Controller) CreateOrUpdateMasterServiceIfNeeded(serviceName string, ser if s, err := c.ServiceClient.Services(metav1.NamespaceDefault).Get(serviceName, metav1.GetOptions{}); err == nil { // The service already exists. if reconcile { - if svc, updated := getMasterServiceUpdateIfNeeded(s, servicePorts, serviceType); updated { + if svc, updated := reconcilers.GetMasterServiceUpdateIfNeeded(s, servicePorts, serviceType); updated { glog.Warningf("Resetting master service %q to %#v", serviceName, svc) _, err := c.ServiceClient.Services(metav1.NamespaceDefault).Update(svc) return err @@ -272,195 +285,3 @@ func (c *Controller) CreateOrUpdateMasterServiceIfNeeded(serviceName string, ser } return err } - -// EndpointReconciler knows how to reconcile the endpoints for the apiserver service. -type EndpointReconciler interface { - // ReconcileEndpoints sets the endpoints for the given apiserver service (ro or rw). - // ReconcileEndpoints expects that the endpoints objects it manages will all be - // managed only by ReconcileEndpoints; therefore, to understand this, you need only - // understand the requirements. - // - // Requirements: - // * All apiservers MUST use the same ports for their {rw, ro} services. - // * All apiservers MUST use ReconcileEndpoints and only ReconcileEndpoints to manage the - // endpoints for their {rw, ro} services. - // * ReconcileEndpoints is called periodically from all apiservers. - ReconcileEndpoints(serviceName string, ip net.IP, endpointPorts []api.EndpointPort, reconcilePorts bool) error -} - -// masterCountEndpointReconciler reconciles endpoints based on a specified expected number of -// masters. masterCountEndpointReconciler implements EndpointReconciler. -type masterCountEndpointReconciler struct { - masterCount int - endpointClient coreclient.EndpointsGetter -} - -var _ EndpointReconciler = &masterCountEndpointReconciler{} - -// NewMasterCountEndpointReconciler creates a new EndpointReconciler that reconciles based on a -// specified expected number of masters. -func NewMasterCountEndpointReconciler(masterCount int, endpointClient coreclient.EndpointsGetter) *masterCountEndpointReconciler { - return &masterCountEndpointReconciler{ - masterCount: masterCount, - endpointClient: endpointClient, - } -} - -// ReconcileEndpoints sets the endpoints for the given apiserver service (ro or rw). -// ReconcileEndpoints expects that the endpoints objects it manages will all be -// managed only by ReconcileEndpoints; therefore, to understand this, you need only -// understand the requirements and the body of this function. -// -// Requirements: -// * All apiservers MUST use the same ports for their {rw, ro} services. -// * All apiservers MUST use ReconcileEndpoints and only ReconcileEndpoints to manage the -// endpoints for their {rw, ro} services. -// * All apiservers MUST know and agree on the number of apiservers expected -// to be running (c.masterCount). -// * ReconcileEndpoints is called periodically from all apiservers. -func (r *masterCountEndpointReconciler) ReconcileEndpoints(serviceName string, ip net.IP, endpointPorts []api.EndpointPort, reconcilePorts bool) error { - e, err := r.endpointClient.Endpoints(metav1.NamespaceDefault).Get(serviceName, metav1.GetOptions{}) - if err != nil { - e = &api.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: metav1.NamespaceDefault, - }, - } - } - if errors.IsNotFound(err) { - // Simply create non-existing endpoints for the service. - e.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{IP: ip.String()}}, - Ports: endpointPorts, - }} - _, err = r.endpointClient.Endpoints(metav1.NamespaceDefault).Create(e) - return err - } - - // First, determine if the endpoint is in the format we expect (one - // subset, ports matching endpointPorts, N IP addresses). - formatCorrect, ipCorrect, portsCorrect := checkEndpointSubsetFormat(e, ip.String(), endpointPorts, r.masterCount, reconcilePorts) - if !formatCorrect { - // Something is egregiously wrong, just re-make the endpoints record. - e.Subsets = []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{IP: ip.String()}}, - Ports: endpointPorts, - }} - glog.Warningf("Resetting endpoints for master service %q to %#v", serviceName, e) - _, err = r.endpointClient.Endpoints(metav1.NamespaceDefault).Update(e) - return err - } - if ipCorrect && portsCorrect { - return nil - } - if !ipCorrect { - // We *always* add our own IP address. - e.Subsets[0].Addresses = append(e.Subsets[0].Addresses, api.EndpointAddress{IP: ip.String()}) - - // Lexicographic order is retained by this step. - e.Subsets = endpoints.RepackSubsets(e.Subsets) - - // If too many IP addresses, remove the ones lexicographically after our - // own IP address. Given the requirements stated at the top of - // this function, this should cause the list of IP addresses to - // become eventually correct. - if addrs := &e.Subsets[0].Addresses; len(*addrs) > r.masterCount { - // addrs is a pointer because we're going to mutate it. - for i, addr := range *addrs { - if addr.IP == ip.String() { - for len(*addrs) > r.masterCount { - // wrap around if necessary. - remove := (i + 1) % len(*addrs) - *addrs = append((*addrs)[:remove], (*addrs)[remove+1:]...) - } - break - } - } - } - } - if !portsCorrect { - // Reset ports. - e.Subsets[0].Ports = endpointPorts - } - glog.Warningf("Resetting endpoints for master service %q to %v", serviceName, e) - _, err = r.endpointClient.Endpoints(metav1.NamespaceDefault).Update(e) - return err -} - -// Determine if the endpoint is in the format ReconcileEndpoints expects. -// -// Return values: -// * formatCorrect is true if exactly one subset is found. -// * ipCorrect is true when current master's IP is found and the number -// of addresses is less than or equal to the master count. -// * portsCorrect is true when endpoint ports exactly match provided ports. -// portsCorrect is only evaluated when reconcilePorts is set to true. -func checkEndpointSubsetFormat(e *api.Endpoints, ip string, ports []api.EndpointPort, count int, reconcilePorts bool) (formatCorrect bool, ipCorrect bool, portsCorrect bool) { - if len(e.Subsets) != 1 { - return false, false, false - } - sub := &e.Subsets[0] - portsCorrect = true - if reconcilePorts { - if len(sub.Ports) != len(ports) { - portsCorrect = false - } - for i, port := range ports { - if len(sub.Ports) <= i || port != sub.Ports[i] { - portsCorrect = false - break - } - } - } - for _, addr := range sub.Addresses { - if addr.IP == ip { - ipCorrect = len(sub.Addresses) <= count - break - } - } - return true, ipCorrect, portsCorrect -} - -// * getMasterServiceUpdateIfNeeded sets service attributes for the -// given apiserver service. -// * getMasterServiceUpdateIfNeeded expects that the service object it -// manages will be managed only by getMasterServiceUpdateIfNeeded; -// therefore, to understand this, you need only understand the -// requirements and the body of this function. -// * getMasterServiceUpdateIfNeeded ensures that the correct ports are -// are set. -// -// Requirements: -// * All apiservers MUST use getMasterServiceUpdateIfNeeded and only -// getMasterServiceUpdateIfNeeded to manage service attributes -// * updateMasterService is called periodically from all apiservers. -func getMasterServiceUpdateIfNeeded(svc *api.Service, servicePorts []api.ServicePort, serviceType api.ServiceType) (s *api.Service, updated bool) { - // Determine if the service is in the format we expect - // (servicePorts are present and service type matches) - formatCorrect := checkServiceFormat(svc, servicePorts, serviceType) - if formatCorrect { - return svc, false - } - svc.Spec.Ports = servicePorts - svc.Spec.Type = serviceType - return svc, true -} - -// Determine if the service is in the correct format -// getMasterServiceUpdateIfNeeded expects (servicePorts are correct -// and service type matches). -func checkServiceFormat(s *api.Service, ports []api.ServicePort, serviceType api.ServiceType) (formatCorrect bool) { - if s.Spec.Type != serviceType { - return false - } - if len(ports) != len(s.Spec.Ports) { - return false - } - for i, port := range ports { - if port != s.Spec.Ports[i] { - return false - } - } - return true -} diff --git a/vendor/k8s.io/kubernetes/pkg/master/controller/crdregistration/BUILD b/vendor/k8s.io/kubernetes/pkg/master/controller/crdregistration/BUILD index b1e187cfd625..b902953bb816 100644 --- a/vendor/k8s.io/kubernetes/pkg/master/controller/crdregistration/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/master/controller/crdregistration/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["crdregistration_controller.go"], + importpath = "k8s.io/kubernetes/pkg/master/controller/crdregistration", deps = [ "//pkg/controller:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -42,6 +43,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["crdregistration_controller_test.go"], + importpath = "k8s.io/kubernetes/pkg/master/controller/crdregistration", library = ":go_default_library", deps = [ "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/master/controller/crdregistration/crdregistration_controller.go b/vendor/k8s.io/kubernetes/pkg/master/controller/crdregistration/crdregistration_controller.go index 5323206f9d67..c3a87b418a40 100644 --- a/vendor/k8s.io/kubernetes/pkg/master/controller/crdregistration/crdregistration_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/master/controller/crdregistration/crdregistration_controller.go @@ -60,10 +60,8 @@ type crdRegistrationController struct { queue workqueue.RateLimitingInterface } -// NewAutoRegistrationController returns a controller which will register TPR GroupVersions with the auto APIService registration +// NewAutoRegistrationController returns a controller which will register CRD GroupVersions with the auto APIService registration // controller so they automatically stay in sync. -// In order to stay sane with both TPR and CRD present, we have a single controller that manages both. When choosing whether to have an -// APIService, we simply iterate through both. func NewAutoRegistrationController(crdinformer crdinformers.CustomResourceDefinitionInformer, apiServiceRegistration AutoAPIServiceRegistration) *crdRegistrationController { c := &crdRegistrationController{ crdLister: crdinformer.Lister(), @@ -213,8 +211,8 @@ func (c *crdRegistrationController) handleVersionUpdate(groupVersion schema.Grou Spec: apiregistration.APIServiceSpec{ Group: groupVersion.Group, Version: groupVersion.Version, - GroupPriorityMinimum: 1000, // TPRs should have relatively low priority - VersionPriority: 100, // TPRs should have relatively low priority + GroupPriorityMinimum: 1000, // CRDs should have relatively low priority + VersionPriority: 100, // CRDs should have relatively low priority }, }) diff --git a/vendor/k8s.io/kubernetes/pkg/master/import_known_versions.go b/vendor/k8s.io/kubernetes/pkg/master/import_known_versions.go index 93388ad9b0fd..baf2502e92ef 100644 --- a/vendor/k8s.io/kubernetes/pkg/master/import_known_versions.go +++ b/vendor/k8s.io/kubernetes/pkg/master/import_known_versions.go @@ -20,8 +20,8 @@ package master import ( "fmt" - "k8s.io/kubernetes/pkg/api" - _ "k8s.io/kubernetes/pkg/api/install" + "k8s.io/kubernetes/pkg/api/legacyscheme" + _ "k8s.io/kubernetes/pkg/apis/admission/install" _ "k8s.io/kubernetes/pkg/apis/admissionregistration/install" _ "k8s.io/kubernetes/pkg/apis/apps/install" @@ -31,6 +31,8 @@ import ( _ "k8s.io/kubernetes/pkg/apis/batch/install" _ "k8s.io/kubernetes/pkg/apis/certificates/install" _ "k8s.io/kubernetes/pkg/apis/componentconfig/install" + _ "k8s.io/kubernetes/pkg/apis/core/install" + _ "k8s.io/kubernetes/pkg/apis/events/install" _ "k8s.io/kubernetes/pkg/apis/extensions/install" _ "k8s.io/kubernetes/pkg/apis/imagepolicy/install" _ "k8s.io/kubernetes/pkg/apis/networking/install" @@ -42,7 +44,7 @@ import ( ) func init() { - if missingVersions := api.Registry.ValidateEnvRequestedVersions(); len(missingVersions) != 0 { + if missingVersions := legacyscheme.Registry.ValidateEnvRequestedVersions(); len(missingVersions) != 0 { panic(fmt.Sprintf("KUBE_API_VERSIONS contains versions that are not installed: %q.", missingVersions)) } } diff --git a/vendor/k8s.io/kubernetes/pkg/master/master.go b/vendor/k8s.io/kubernetes/pkg/master/master.go index f9578947f3a8..174e150e7cee 100644 --- a/vendor/k8s.io/kubernetes/pkg/master/master.go +++ b/vendor/k8s.io/kubernetes/pkg/master/master.go @@ -24,6 +24,8 @@ import ( "strconv" "time" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" authenticationv1 "k8s.io/api/authentication/v1" @@ -36,6 +38,7 @@ import ( batchapiv1beta1 "k8s.io/api/batch/v1beta1" certificatesapiv1beta1 "k8s.io/api/certificates/v1beta1" apiv1 "k8s.io/api/core/v1" + eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsapiv1beta1 "k8s.io/api/extensions/v1beta1" networkingapiv1 "k8s.io/api/networking/v1" policyapiv1beta1 "k8s.io/api/policy/v1beta1" @@ -50,12 +53,17 @@ import ( genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/healthz" serverstorage "k8s.io/apiserver/pkg/server/storage" + storagefactory "k8s.io/apiserver/pkg/storage/storagebackend/factory" + "k8s.io/client-go/informers" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/kubernetes/cmd/kube-apiserver/app/options" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" + kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options" kubeletclient "k8s.io/kubernetes/pkg/kubelet/client" + "k8s.io/kubernetes/pkg/master/reconcilers" "k8s.io/kubernetes/pkg/master/tunneler" + "k8s.io/kubernetes/pkg/registry/core/endpoint" + endpointsstorage "k8s.io/kubernetes/pkg/registry/core/endpoint/storage" "k8s.io/kubernetes/pkg/routes" nodeutil "k8s.io/kubernetes/pkg/util/node" @@ -63,7 +71,6 @@ import ( "github.com/prometheus/client_golang/prometheus" // RESTStorage installers - "k8s.io/client-go/informers" admissionregistrationrest "k8s.io/kubernetes/pkg/registry/admissionregistration/rest" appsrest "k8s.io/kubernetes/pkg/registry/apps/rest" authenticationrest "k8s.io/kubernetes/pkg/registry/authentication/rest" @@ -72,6 +79,7 @@ import ( batchrest "k8s.io/kubernetes/pkg/registry/batch/rest" certificatesrest "k8s.io/kubernetes/pkg/registry/certificates/rest" corerest "k8s.io/kubernetes/pkg/registry/core/rest" + eventsrest "k8s.io/kubernetes/pkg/registry/events/rest" extensionsrest "k8s.io/kubernetes/pkg/registry/extensions/rest" networkingrest "k8s.io/kubernetes/pkg/registry/networking/rest" policyrest "k8s.io/kubernetes/pkg/registry/policy/rest" @@ -85,6 +93,8 @@ const ( // DefaultEndpointReconcilerInterval is the default amount of time for how often the endpoints for // the kubernetes Service are reconciled. DefaultEndpointReconcilerInterval = 10 * time.Second + // DefaultEndpointReconcilerTTL is the default TTL timeout for the storage layer + DefaultEndpointReconcilerTTL = 15 * time.Second ) type ExtraConfig struct { @@ -133,6 +143,19 @@ type ExtraConfig struct { // Number of masters running; all masters must be started with the // same value for this field. (Numbers > 1 currently untested.) MasterCount int + + // MasterEndpointReconcileTTL sets the time to live in seconds of an + // endpoint record recorded by each master. The endpoints are checked at an + // interval that is 2/3 of this value and this value defaults to 15s if + // unset. In very large clusters, this value may be increased to reduce the + // possibility that the master endpoint record expires (due to other load + // on the etcd server) and causes masters to drop in and out of the + // kubernetes service record. It is not recommended to set this value below + // 15s. + MasterEndpointReconcileTTL time.Duration + + // Selects which reconciler to use + EndpointReconcilerType reconcilers.Type } type Config struct { @@ -153,7 +176,7 @@ type CompletedConfig struct { // EndpointReconcilerConfig holds the endpoint reconciler and endpoint reconciliation interval to be // used by the master. type EndpointReconcilerConfig struct { - Reconciler EndpointReconciler + Reconciler reconcilers.EndpointReconciler Interval time.Duration } @@ -164,6 +187,56 @@ type Master struct { ClientCARegistrationHook ClientCARegistrationHook } +func (c *Config) createMasterCountReconciler() reconcilers.EndpointReconciler { + endpointClient := coreclient.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig) + return reconcilers.NewMasterCountEndpointReconciler(c.ExtraConfig.MasterCount, endpointClient) +} + +func (c *Config) createNoneReconciler() reconcilers.EndpointReconciler { + return reconcilers.NewNoneEndpointReconciler() +} + +func (c *Config) createLeaseReconciler() reconcilers.EndpointReconciler { + ttl := c.ExtraConfig.MasterEndpointReconcileTTL + config, err := c.ExtraConfig.StorageFactory.NewConfig(api.Resource("apiServerIPInfo")) + if err != nil { + glog.Fatalf("Error determining service IP ranges: %v", err) + } + leaseStorage, _, err := storagefactory.Create(*config) + if err != nil { + glog.Fatalf("Error creating storage factory: %v", err) + } + endpointConfig, err := c.ExtraConfig.StorageFactory.NewConfig(api.Resource("endpoints")) + if err != nil { + glog.Fatalf("Error getting storage config: %v", err) + } + endpointsStorage := endpointsstorage.NewREST(generic.RESTOptions{ + StorageConfig: endpointConfig, + Decorator: generic.UndecoratedStorage, + DeleteCollectionWorkers: 0, + ResourcePrefix: c.ExtraConfig.StorageFactory.ResourcePrefix(api.Resource("endpoints")), + }) + endpointRegistry := endpoint.NewRegistry(endpointsStorage) + masterLeases := reconcilers.NewLeases(leaseStorage, "/masterleases/", ttl) + return reconcilers.NewLeaseEndpointReconciler(endpointRegistry, masterLeases) +} + +func (c *Config) createEndpointReconciler() reconcilers.EndpointReconciler { + glog.Infof("Using reconciler: %v", c.ExtraConfig.EndpointReconcilerType) + switch c.ExtraConfig.EndpointReconcilerType { + // there are numerous test dependencies that depend on a default controller + case "", reconcilers.MasterCountReconcilerType: + return c.createMasterCountReconciler() + case reconcilers.LeaseEndpointReconcilerType: + return c.createLeaseReconciler() + case reconcilers.NoneEndpointReconcilerType: + return c.createNoneReconciler() + default: + glog.Fatalf("Reconciler not implemented: %v", c.ExtraConfig.EndpointReconcilerType) + } + return nil +} + // Complete fills in any fields not set that are required to have valid data. It's mutating the receiver. func (cfg *Config) Complete(informers informers.SharedInformerFactory) CompletedConfig { c := completedConfig{ @@ -192,7 +265,7 @@ func (cfg *Config) Complete(informers informers.SharedInformerFactory) Completed // We should probably allow this for clouds that don't require NodePort to do load-balancing (GCE) // but then that breaks the strict nestedness of ServiceType. // Review post-v1 - c.ExtraConfig.ServiceNodePortRange = options.DefaultServiceNodePortRange + c.ExtraConfig.ServiceNodePortRange = kubeoptions.DefaultServiceNodePortRange glog.Infof("Node port range unspecified. Defaulting to %v.", c.ExtraConfig.ServiceNodePortRange) } @@ -203,10 +276,12 @@ func (cfg *Config) Complete(informers informers.SharedInformerFactory) Completed c.ExtraConfig.EndpointReconcilerConfig.Interval = DefaultEndpointReconcilerInterval } + if c.ExtraConfig.MasterEndpointReconcileTTL == 0 { + c.ExtraConfig.MasterEndpointReconcileTTL = DefaultEndpointReconcilerTTL + } + if c.ExtraConfig.EndpointReconcilerConfig.Reconciler == nil { - // use a default endpoint reconciler if nothing is set - endpointClient := coreclient.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig) - c.ExtraConfig.EndpointReconcilerConfig.Reconciler = NewMasterCountEndpointReconciler(c.ExtraConfig.MasterCount, endpointClient) + c.ExtraConfig.EndpointReconcilerConfig.Reconciler = cfg.createEndpointReconciler() } // this has always been hardcoded true in the past @@ -278,6 +353,7 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) // See https://github.com/kubernetes/kubernetes/issues/42392 appsrest.RESTStorageProvider{}, admissionregistrationrest.RESTStorageProvider{}, + eventsrest.RESTStorageProvider{TTL: c.ExtraConfig.EventTTL}, } m.InstallAPIs(c.ExtraConfig.APIResourceConfigSource, c.GenericConfig.RESTOptionsGetter, restStorageProviders...) @@ -297,9 +373,11 @@ func (m *Master) InstallLegacyAPI(c *completedConfig, restOptionsGetter generic. } if c.ExtraConfig.EnableCoreControllers { + controllerName := "bootstrap-controller" coreClient := coreclient.NewForConfigOrDie(c.GenericConfig.LoopbackClientConfig) bootstrapController := c.NewBootstrapController(legacyRESTStorage, coreClient, coreClient) - m.GenericAPIServer.AddPostStartHookOrDie("bootstrap-controller", bootstrapController.PostStartHook) + m.GenericAPIServer.AddPostStartHookOrDie(controllerName, bootstrapController.PostStartHook) + m.GenericAPIServer.AddPreShutdownHookOrDie(controllerName, bootstrapController.PreShutdownHook) } if err := m.GenericAPIServer.InstallLegacyAPIGroup(genericapiserver.DefaultLegacyAPIPrefix, &apiGroupInfo); err != nil { @@ -395,6 +473,7 @@ func DefaultAPIResourceConfigSource() *serverstorage.ResourceConfig { autoscalingapiv2beta1.SchemeGroupVersion, appsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion, + appsv1.SchemeGroupVersion, policyapiv1beta1.SchemeGroupVersion, rbacv1.SchemeGroupVersion, rbacv1beta1.SchemeGroupVersion, @@ -404,6 +483,8 @@ func DefaultAPIResourceConfigSource() *serverstorage.ResourceConfig { authorizationapiv1.SchemeGroupVersion, authorizationapiv1beta1.SchemeGroupVersion, networkingapiv1.SchemeGroupVersion, + eventsv1beta1.SchemeGroupVersion, + admissionregistrationv1beta1.SchemeGroupVersion, ) // all extensions resources except these are disabled by default diff --git a/vendor/k8s.io/kubernetes/pkg/master/ports/BUILD b/vendor/k8s.io/kubernetes/pkg/master/ports/BUILD index 1b047cda995f..698868138043 100644 --- a/vendor/k8s.io/kubernetes/pkg/master/ports/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/master/ports/BUILD @@ -11,6 +11,7 @@ go_library( "doc.go", "ports.go", ], + importpath = "k8s.io/kubernetes/pkg/master/ports", ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/master/reconcilers/BUILD b/vendor/k8s.io/kubernetes/pkg/master/reconcilers/BUILD new file mode 100644 index 000000000000..3fbb514b0e67 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/master/reconcilers/BUILD @@ -0,0 +1,54 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "lease.go", + "mastercount.go", + "none.go", + "reconcilers.go", + ], + importpath = "k8s.io/kubernetes/pkg/master/reconcilers", + visibility = ["//visibility:public"], + deps = [ + "//pkg/api/endpoints:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", + "//pkg/registry/core/endpoint:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", + "//vendor/k8s.io/apiserver/pkg/storage:go_default_library", + "//vendor/k8s.io/client-go/util/retry:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["lease_test.go"], + importpath = "k8s.io/kubernetes/pkg/master/reconcilers", + library = ":go_default_library", + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/registry/registrytest:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/master/reconcilers/doc.go b/vendor/k8s.io/kubernetes/pkg/master/reconcilers/doc.go new file mode 100644 index 000000000000..d397aab778e1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/master/reconcilers/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package reconcilers provides objects for managing the list of active masters. +// NOTE: The Lease reconciler is not the intended way for any apiserver other +// than kube-apiserver to accomplish the task of Endpoint registration. This is +// a special case for the time being. +package reconcilers diff --git a/vendor/k8s.io/kubernetes/pkg/master/reconcilers/lease.go b/vendor/k8s.io/kubernetes/pkg/master/reconcilers/lease.go new file mode 100644 index 000000000000..934ece29003a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/master/reconcilers/lease.go @@ -0,0 +1,290 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcilers + +/* +Original Source: +https://github.com/openshift/origin/blob/bb340c5dd5ff72718be86fb194dedc0faed7f4c7/pkg/cmd/server/election/lease_endpoint_reconciler.go +*/ + +import ( + "fmt" + "net" + "path" + "sync" + "time" + + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kruntime "k8s.io/apimachinery/pkg/runtime" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/rest" + "k8s.io/apiserver/pkg/storage" + "k8s.io/kubernetes/pkg/api/endpoints" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/registry/core/endpoint" +) + +// Leases is an interface which assists in managing the set of active masters +type Leases interface { + // ListLeases retrieves a list of the current master IPs + ListLeases() ([]string, error) + + // UpdateLease adds or refreshes a master's lease + UpdateLease(ip string) error + + // RemoveLease removes a master's lease + RemoveLease(ip string) error +} + +type storageLeases struct { + storage storage.Interface + baseKey string + leaseTime time.Duration +} + +var _ Leases = &storageLeases{} + +// ListLeases retrieves a list of the current master IPs from storage +func (s *storageLeases) ListLeases() ([]string, error) { + ipInfoList := &api.EndpointsList{} + if err := s.storage.List(apirequest.NewDefaultContext(), s.baseKey, "0", storage.Everything, ipInfoList); err != nil { + return nil, err + } + + ipList := make([]string, len(ipInfoList.Items)) + for i, ip := range ipInfoList.Items { + ipList[i] = ip.Subsets[0].Addresses[0].IP + } + + glog.V(6).Infof("Current master IPs listed in storage are %v", ipList) + + return ipList, nil +} + +// UpdateLease resets the TTL on a master IP in storage +func (s *storageLeases) UpdateLease(ip string) error { + key := path.Join(s.baseKey, ip) + return s.storage.GuaranteedUpdate(apirequest.NewDefaultContext(), key, &api.Endpoints{}, true, nil, func(input kruntime.Object, respMeta storage.ResponseMeta) (kruntime.Object, *uint64, error) { + // just make sure we've got the right IP set, and then refresh the TTL + existing := input.(*api.Endpoints) + existing.Subsets = []api.EndpointSubset{ + { + Addresses: []api.EndpointAddress{{IP: ip}}, + }, + } + + // leaseTime needs to be in seconds + leaseTime := uint64(s.leaseTime / time.Second) + + // NB: GuaranteedUpdate does not perform the store operation unless + // something changed between load and store (not including resource + // version), meaning we can't refresh the TTL without actually + // changing a field. + existing.Generation++ + + glog.V(6).Infof("Resetting TTL on master IP %q listed in storage to %v", ip, leaseTime) + + return existing, &leaseTime, nil + }) +} + +// RemoveLease removes the lease on a master IP in storage +func (s *storageLeases) RemoveLease(ip string) error { + return s.storage.Delete(apirequest.NewDefaultContext(), s.baseKey+"/"+ip, &api.Endpoints{}, nil) +} + +// NewLeases creates a new etcd-based Leases implementation. +func NewLeases(storage storage.Interface, baseKey string, leaseTime time.Duration) Leases { + return &storageLeases{ + storage: storage, + baseKey: baseKey, + leaseTime: leaseTime, + } +} + +type leaseEndpointReconciler struct { + endpointRegistry endpoint.Registry + masterLeases Leases + stopReconcilingCalled bool + reconcilingLock sync.Mutex +} + +// NewLeaseEndpointReconciler creates a new LeaseEndpoint reconciler +func NewLeaseEndpointReconciler(endpointRegistry endpoint.Registry, masterLeases Leases) EndpointReconciler { + return &leaseEndpointReconciler{ + endpointRegistry: endpointRegistry, + masterLeases: masterLeases, + stopReconcilingCalled: false, + } +} + +// ReconcileEndpoints lists keys in a special etcd directory. +// Each key is expected to have a TTL of R+n, where R is the refresh interval +// at which this function is called, and n is some small value. If an +// apiserver goes down, it will fail to refresh its key's TTL and the key will +// expire. ReconcileEndpoints will notice that the endpoints object is +// different from the directory listing, and update the endpoints object +// accordingly. +func (r *leaseEndpointReconciler) ReconcileEndpoints(serviceName string, ip net.IP, endpointPorts []api.EndpointPort, reconcilePorts bool) error { + r.reconcilingLock.Lock() + defer r.reconcilingLock.Unlock() + + if r.stopReconcilingCalled { + return nil + } + + // Refresh the TTL on our key, independently of whether any error or + // update conflict happens below. This makes sure that at least some of + // the masters will add our endpoint. + if err := r.masterLeases.UpdateLease(ip.String()); err != nil { + return err + } + + return r.doReconcile(serviceName, endpointPorts, reconcilePorts) +} + +func (r *leaseEndpointReconciler) doReconcile(serviceName string, endpointPorts []api.EndpointPort, reconcilePorts bool) error { + ctx := apirequest.NewDefaultContext() + + // Retrieve the current list of endpoints... + e, err := r.endpointRegistry.GetEndpoints(ctx, serviceName, &metav1.GetOptions{}) + if err != nil { + if !errors.IsNotFound(err) { + return err + } + + e = &api.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: api.NamespaceDefault, + }, + } + } + + // ... and the list of master IP keys from etcd + masterIPs, err := r.masterLeases.ListLeases() + if err != nil { + return err + } + + // Since we just refreshed our own key, assume that zero endpoints + // returned from storage indicates an issue or invalid state, and thus do + // not update the endpoints list based on the result. + if len(masterIPs) == 0 { + return fmt.Errorf("no master IPs were listed in storage, refusing to erase all endpoints for the kubernetes service") + } + + // Next, we compare the current list of endpoints with the list of master IP keys + formatCorrect, ipCorrect, portsCorrect := checkEndpointSubsetFormatWithLease(e, masterIPs, endpointPorts, reconcilePorts) + if formatCorrect && ipCorrect && portsCorrect { + return nil + } + + if !formatCorrect { + // Something is egregiously wrong, just re-make the endpoints record. + e.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{}, + Ports: endpointPorts, + }} + } + + if !formatCorrect || !ipCorrect { + // repopulate the addresses according to the expected IPs from etcd + e.Subsets[0].Addresses = make([]api.EndpointAddress, len(masterIPs)) + for ind, ip := range masterIPs { + e.Subsets[0].Addresses[ind] = api.EndpointAddress{IP: ip} + } + + // Lexicographic order is retained by this step. + e.Subsets = endpoints.RepackSubsets(e.Subsets) + } + + if !portsCorrect { + // Reset ports. + e.Subsets[0].Ports = endpointPorts + } + + glog.Warningf("Resetting endpoints for master service %q to %v", serviceName, masterIPs) + return r.endpointRegistry.UpdateEndpoints(ctx, e, rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc) +} + +// checkEndpointSubsetFormatWithLease determines if the endpoint is in the +// format ReconcileEndpoints expects when the controller is using leases. +// +// Return values: +// * formatCorrect is true if exactly one subset is found. +// * ipsCorrect when the addresses in the endpoints match the expected addresses list +// * portsCorrect is true when endpoint ports exactly match provided ports. +// portsCorrect is only evaluated when reconcilePorts is set to true. +func checkEndpointSubsetFormatWithLease(e *api.Endpoints, expectedIPs []string, ports []api.EndpointPort, reconcilePorts bool) (formatCorrect bool, ipsCorrect bool, portsCorrect bool) { + if len(e.Subsets) != 1 { + return false, false, false + } + sub := &e.Subsets[0] + portsCorrect = true + if reconcilePorts { + if len(sub.Ports) != len(ports) { + portsCorrect = false + } else { + for i, port := range ports { + if port != sub.Ports[i] { + portsCorrect = false + break + } + } + } + } + + ipsCorrect = true + if len(sub.Addresses) != len(expectedIPs) { + ipsCorrect = false + } else { + // check the actual content of the addresses + // present addrs is used as a set (the keys) and to indicate if a + // value was already found (the values) + presentAddrs := make(map[string]bool, len(expectedIPs)) + for _, ip := range expectedIPs { + presentAddrs[ip] = false + } + + // uniqueness is assumed amongst all Addresses. + for _, addr := range sub.Addresses { + if alreadySeen, ok := presentAddrs[addr.IP]; alreadySeen || !ok { + ipsCorrect = false + break + } + + presentAddrs[addr.IP] = true + } + } + + return true, ipsCorrect, portsCorrect +} + +func (r *leaseEndpointReconciler) StopReconciling(serviceName string, ip net.IP, endpointPorts []api.EndpointPort) error { + r.reconcilingLock.Lock() + defer r.reconcilingLock.Unlock() + r.stopReconcilingCalled = true + + if err := r.masterLeases.RemoveLease(ip.String()); err != nil { + return err + } + + return r.doReconcile(serviceName, endpointPorts, true) +} diff --git a/vendor/k8s.io/kubernetes/pkg/master/reconcilers/mastercount.go b/vendor/k8s.io/kubernetes/pkg/master/reconcilers/mastercount.go new file mode 100644 index 000000000000..d7aa4c77185a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/master/reconcilers/mastercount.go @@ -0,0 +1,245 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package reconcilers master count based reconciler +package reconcilers + +import ( + "net" + "sync" + + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + "k8s.io/kubernetes/pkg/api/endpoints" + api "k8s.io/kubernetes/pkg/apis/core" + coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" +) + +// masterCountEndpointReconciler reconciles endpoints based on a specified expected number of +// masters. masterCountEndpointReconciler implements EndpointReconciler. +type masterCountEndpointReconciler struct { + masterCount int + endpointClient coreclient.EndpointsGetter + stopReconcilingCalled bool + reconcilingLock sync.Mutex +} + +// NewMasterCountEndpointReconciler creates a new EndpointReconciler that reconciles based on a +// specified expected number of masters. +func NewMasterCountEndpointReconciler(masterCount int, endpointClient coreclient.EndpointsGetter) EndpointReconciler { + return &masterCountEndpointReconciler{ + masterCount: masterCount, + endpointClient: endpointClient, + } +} + +// ReconcileEndpoints sets the endpoints for the given apiserver service (ro or rw). +// ReconcileEndpoints expects that the endpoints objects it manages will all be +// managed only by ReconcileEndpoints; therefore, to understand this, you need only +// understand the requirements and the body of this function. +// +// Requirements: +// * All apiservers MUST use the same ports for their {rw, ro} services. +// * All apiservers MUST use ReconcileEndpoints and only ReconcileEndpoints to manage the +// endpoints for their {rw, ro} services. +// * All apiservers MUST know and agree on the number of apiservers expected +// to be running (c.masterCount). +// * ReconcileEndpoints is called periodically from all apiservers. +func (r *masterCountEndpointReconciler) ReconcileEndpoints(serviceName string, ip net.IP, endpointPorts []api.EndpointPort, reconcilePorts bool) error { + r.reconcilingLock.Lock() + defer r.reconcilingLock.Unlock() + + if r.stopReconcilingCalled { + return nil + } + + e, err := r.endpointClient.Endpoints(metav1.NamespaceDefault).Get(serviceName, metav1.GetOptions{}) + if err != nil { + e = &api.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: metav1.NamespaceDefault, + }, + } + } + if errors.IsNotFound(err) { + // Simply create non-existing endpoints for the service. + e.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: ip.String()}}, + Ports: endpointPorts, + }} + _, err = r.endpointClient.Endpoints(metav1.NamespaceDefault).Create(e) + return err + } + + // First, determine if the endpoint is in the format we expect (one + // subset, ports matching endpointPorts, N IP addresses). + formatCorrect, ipCorrect, portsCorrect := checkEndpointSubsetFormat(e, ip.String(), endpointPorts, r.masterCount, reconcilePorts) + if !formatCorrect { + // Something is egregiously wrong, just re-make the endpoints record. + e.Subsets = []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: ip.String()}}, + Ports: endpointPorts, + }} + glog.Warningf("Resetting endpoints for master service %q to %#v", serviceName, e) + _, err = r.endpointClient.Endpoints(metav1.NamespaceDefault).Update(e) + return err + } + if ipCorrect && portsCorrect { + return nil + } + if !ipCorrect { + // We *always* add our own IP address. + e.Subsets[0].Addresses = append(e.Subsets[0].Addresses, api.EndpointAddress{IP: ip.String()}) + + // Lexicographic order is retained by this step. + e.Subsets = endpoints.RepackSubsets(e.Subsets) + + // If too many IP addresses, remove the ones lexicographically after our + // own IP address. Given the requirements stated at the top of + // this function, this should cause the list of IP addresses to + // become eventually correct. + if addrs := &e.Subsets[0].Addresses; len(*addrs) > r.masterCount { + // addrs is a pointer because we're going to mutate it. + for i, addr := range *addrs { + if addr.IP == ip.String() { + for len(*addrs) > r.masterCount { + // wrap around if necessary. + remove := (i + 1) % len(*addrs) + *addrs = append((*addrs)[:remove], (*addrs)[remove+1:]...) + } + break + } + } + } + } + if !portsCorrect { + // Reset ports. + e.Subsets[0].Ports = endpointPorts + } + glog.Warningf("Resetting endpoints for master service %q to %v", serviceName, e) + _, err = r.endpointClient.Endpoints(metav1.NamespaceDefault).Update(e) + return err +} + +func (r *masterCountEndpointReconciler) StopReconciling(serviceName string, ip net.IP, endpointPorts []api.EndpointPort) error { + r.reconcilingLock.Lock() + defer r.reconcilingLock.Unlock() + r.stopReconcilingCalled = true + + e, err := r.endpointClient.Endpoints(metav1.NamespaceDefault).Get(serviceName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + // Endpoint doesn't exist + return nil + } + return err + } + + // Remove our IP from the list of addresses + new := []api.EndpointAddress{} + for _, addr := range e.Subsets[0].Addresses { + if addr.IP != ip.String() { + new = append(new, addr) + } + } + e.Subsets[0].Addresses = new + e.Subsets = endpoints.RepackSubsets(e.Subsets) + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { + _, err := r.endpointClient.Endpoints(metav1.NamespaceDefault).Update(e) + return err + }) + return err +} + +// Determine if the endpoint is in the format ReconcileEndpoints expects. +// +// Return values: +// * formatCorrect is true if exactly one subset is found. +// * ipCorrect is true when current master's IP is found and the number +// of addresses is less than or equal to the master count. +// * portsCorrect is true when endpoint ports exactly match provided ports. +// portsCorrect is only evaluated when reconcilePorts is set to true. +func checkEndpointSubsetFormat(e *api.Endpoints, ip string, ports []api.EndpointPort, count int, reconcilePorts bool) (formatCorrect bool, ipCorrect bool, portsCorrect bool) { + if len(e.Subsets) != 1 { + return false, false, false + } + sub := &e.Subsets[0] + portsCorrect = true + if reconcilePorts { + if len(sub.Ports) != len(ports) { + portsCorrect = false + } + for i, port := range ports { + if len(sub.Ports) <= i || port != sub.Ports[i] { + portsCorrect = false + break + } + } + } + for _, addr := range sub.Addresses { + if addr.IP == ip { + ipCorrect = len(sub.Addresses) <= count + break + } + } + return true, ipCorrect, portsCorrect +} + +// GetMasterServiceUpdateIfNeeded sets service attributes for the +// given apiserver service. +// * GetMasterServiceUpdateIfNeeded expects that the service object it +// manages will be managed only by GetMasterServiceUpdateIfNeeded; +// therefore, to understand this, you need only understand the +// requirements and the body of this function. +// * GetMasterServiceUpdateIfNeeded ensures that the correct ports are +// are set. +// +// Requirements: +// * All apiservers MUST use GetMasterServiceUpdateIfNeeded and only +// GetMasterServiceUpdateIfNeeded to manage service attributes +// * updateMasterService is called periodically from all apiservers. +func GetMasterServiceUpdateIfNeeded(svc *api.Service, servicePorts []api.ServicePort, serviceType api.ServiceType) (s *api.Service, updated bool) { + // Determine if the service is in the format we expect + // (servicePorts are present and service type matches) + formatCorrect := checkServiceFormat(svc, servicePorts, serviceType) + if formatCorrect { + return svc, false + } + svc.Spec.Ports = servicePorts + svc.Spec.Type = serviceType + return svc, true +} + +// Determine if the service is in the correct format +// GetMasterServiceUpdateIfNeeded expects (servicePorts are correct +// and service type matches). +func checkServiceFormat(s *api.Service, ports []api.ServicePort, serviceType api.ServiceType) (formatCorrect bool) { + if s.Spec.Type != serviceType { + return false + } + if len(ports) != len(s.Spec.Ports) { + return false + } + for i, port := range ports { + if port != s.Spec.Ports[i] { + return false + } + } + return true +} diff --git a/vendor/k8s.io/kubernetes/pkg/master/reconcilers/none.go b/vendor/k8s.io/kubernetes/pkg/master/reconcilers/none.go new file mode 100644 index 000000000000..dce38c2a66aa --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/master/reconcilers/none.go @@ -0,0 +1,42 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package reconcilers a noop based reconciler +package reconcilers + +import ( + api "k8s.io/kubernetes/pkg/apis/core" + "net" +) + +// NoneEndpointReconciler allows for the endpoint reconciler to be disabled +type noneEndpointReconciler struct{} + +// NewNoneEndpointReconciler creates a new EndpointReconciler that reconciles based on a +// nothing. It is a no-op. +func NewNoneEndpointReconciler() EndpointReconciler { + return &noneEndpointReconciler{} +} + +// ReconcileEndpoints noop reconcile +func (r *noneEndpointReconciler) ReconcileEndpoints(serviceName string, ip net.IP, endpointPorts []api.EndpointPort, reconcilePorts bool) error { + return nil +} + +// StopReconciling noop reconcile +func (r *noneEndpointReconciler) StopReconciling(serviceName string, ip net.IP, endpointPorts []api.EndpointPort) error { + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/master/reconcilers/reconcilers.go b/vendor/k8s.io/kubernetes/pkg/master/reconcilers/reconcilers.go new file mode 100644 index 000000000000..8bbabc659019 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/master/reconcilers/reconcilers.go @@ -0,0 +1,70 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package reconcilers Endpoint Reconcilers for the apiserver +package reconcilers + +import ( + api "k8s.io/kubernetes/pkg/apis/core" + "net" +) + +// EndpointReconciler knows how to reconcile the endpoints for the apiserver service. +type EndpointReconciler interface { + // ReconcileEndpoints sets the endpoints for the given apiserver service (ro or rw). + // ReconcileEndpoints expects that the endpoints objects it manages will all be + // managed only by ReconcileEndpoints; therefore, to understand this, you need only + // understand the requirements. + // + // Requirements: + // * All apiservers MUST use the same ports for their {rw, ro} services. + // * All apiservers MUST use ReconcileEndpoints and only ReconcileEndpoints to manage the + // endpoints for their {rw, ro} services. + // * ReconcileEndpoints is called periodically from all apiservers. + ReconcileEndpoints(serviceName string, ip net.IP, endpointPorts []api.EndpointPort, reconcilePorts bool) error + StopReconciling(serviceName string, ip net.IP, endpointPorts []api.EndpointPort) error +} + +// Type the reconciler type +type Type string + +const ( + // MasterCountReconcilerType will select the original reconciler + MasterCountReconcilerType Type = "master-count" + // LeaseEndpointReconcilerType will select a storage based reconciler + LeaseEndpointReconcilerType = "lease" + // NoneEndpointReconcilerType will turn off the endpoint reconciler + NoneEndpointReconcilerType = "none" +) + +// Types an array of reconciler types +type Types []Type + +// AllTypes export all reconcilers +var AllTypes = Types{ + MasterCountReconcilerType, + LeaseEndpointReconcilerType, + NoneEndpointReconcilerType, +} + +// Names returns a slice of all the reconciler names +func (t Types) Names() []string { + strs := make([]string, len(t)) + for i, v := range t { + strs[i] = string(v) + } + return strs +} diff --git a/vendor/k8s.io/kubernetes/pkg/master/tunneler/BUILD b/vendor/k8s.io/kubernetes/pkg/master/tunneler/BUILD index 906195f4b541..dcaa51b8ea75 100644 --- a/vendor/k8s.io/kubernetes/pkg/master/tunneler/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/master/tunneler/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["ssh_test.go"], + importpath = "k8s.io/kubernetes/pkg/master/tunneler", library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", @@ -19,6 +20,7 @@ go_test( go_library( name = "go_default_library", srcs = ["ssh.go"], + importpath = "k8s.io/kubernetes/pkg/master/tunneler", deps = [ "//pkg/ssh:go_default_library", "//pkg/util/file:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/printers/BUILD b/vendor/k8s.io/kubernetes/pkg/printers/BUILD index b2336dc0ab40..c59f547fc22d 100644 --- a/vendor/k8s.io/kubernetes/pkg/printers/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/printers/BUILD @@ -21,6 +21,7 @@ go_library( "template.go", "versioned.go", ], + importpath = "k8s.io/kubernetes/pkg/printers", deps = [ "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", @@ -38,9 +39,11 @@ go_library( go_test( name = "go_default_xtest", srcs = ["customcolumn_test.go"], + importpath = "k8s.io/kubernetes/pkg/printers_test", deps = [ ":go_default_library", - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -63,3 +66,16 @@ filegroup( ], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = ["humanreadable_test.go"], + importpath = "k8s.io/kubernetes/pkg/printers", + library = ":go_default_library", + deps = [ + "//pkg/apis/core:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/printers/customcolumn.go b/vendor/k8s.io/kubernetes/pkg/printers/customcolumn.go index eec16ea18aa1..bba5bc4ced58 100644 --- a/vendor/k8s.io/kubernetes/pkg/printers/customcolumn.go +++ b/vendor/k8s.io/kubernetes/pkg/printers/customcolumn.go @@ -176,7 +176,7 @@ func (s *CustomColumnsPrinter) PrintObj(obj runtime.Object, out io.Writer) error } parsers := make([]*jsonpath.JSONPath, len(s.Columns)) for ix := range s.Columns { - parsers[ix] = jsonpath.New(fmt.Sprintf("column%d", ix)) + parsers[ix] = jsonpath.New(fmt.Sprintf("column%d", ix)).AllowMissingKeys(true) if err := parsers[ix].Parse(s.Columns[ix].FieldSpec); err != nil { return err } @@ -226,10 +226,10 @@ func (s *CustomColumnsPrinter) printOneObject(obj runtime.Object, parsers []*jso if err != nil { return err } + valueStrings := []string{} if len(values) == 0 || len(values[0]) == 0 { - fmt.Fprintf(out, "\t") + valueStrings = append(valueStrings, "") } - valueStrings := []string{} for arrIx := range values { for valIx := range values[arrIx] { valueStrings = append(valueStrings, fmt.Sprintf("%v", values[arrIx][valIx].Interface())) diff --git a/vendor/k8s.io/kubernetes/pkg/printers/humanreadable.go b/vendor/k8s.io/kubernetes/pkg/printers/humanreadable.go index a4b1945746c8..73ef967b1504 100644 --- a/vendor/k8s.io/kubernetes/pkg/printers/humanreadable.go +++ b/vendor/k8s.io/kubernetes/pkg/printers/humanreadable.go @@ -211,7 +211,7 @@ func (h *HumanReadablePrinter) DefaultTableHandler(columnDefinitions []metav1alp // func printFunc(object ObjectType, options PrintOptions) ([]metav1alpha1.TableRow, error) // where ObjectType is the type of the object that will be printed, and the first // return value is an array of rows, with each row containing a number of cells that -// match the number of coulmns defined for that printer function. +// match the number of columns defined for that printer function. func ValidateRowPrintHandlerFunc(printFunc reflect.Value) error { if printFunc.Kind() != reflect.Func { return fmt.Errorf("invalid print handler. %#v is not a function", printFunc) @@ -525,6 +525,16 @@ func (h *HumanReadablePrinter) PrintTable(obj runtime.Object, options PrintOptio ColumnDefinitions: columns, Rows: results[0].Interface().([]metav1alpha1.TableRow), } + if m, err := meta.ListAccessor(obj); err == nil { + table.ResourceVersion = m.GetResourceVersion() + table.SelfLink = m.GetSelfLink() + table.Continue = m.GetContinue() + } else { + if m, err := meta.CommonAccessor(obj); err == nil { + table.ResourceVersion = m.GetResourceVersion() + table.SelfLink = m.GetSelfLink() + } + } if err := DecorateTable(table, options); err != nil { return nil, err } @@ -535,6 +545,15 @@ func (h *HumanReadablePrinter) PrintTable(obj runtime.Object, options PrintOptio // different from lastType) including all the rows in the object. It returns the current type // or an error, if any. func printRowsForHandlerEntry(output io.Writer, handler *handlerEntry, obj runtime.Object, options PrintOptions, includeHeaders bool) error { + var results []reflect.Value + if handler.printRows { + args := []reflect.Value{reflect.ValueOf(obj), reflect.ValueOf(options)} + results = handler.printFunc.Call(args) + if !results[1].IsNil() { + return results[1].Interface().(error) + } + } + if includeHeaders { var headers []string for _, column := range handler.columnDefinitions { @@ -562,15 +581,12 @@ func printRowsForHandlerEntry(output io.Writer, handler *handlerEntry, obj runti return resultValue.Interface().(error) } - args := []reflect.Value{reflect.ValueOf(obj), reflect.ValueOf(options)} - results := handler.printFunc.Call(args) if results[1].IsNil() { rows := results[0].Interface().([]metav1alpha1.TableRow) printRows(output, rows, options) return nil } return results[1].Interface().(error) - } // printRows writes the provided rows to output. diff --git a/vendor/k8s.io/kubernetes/pkg/printers/interface.go b/vendor/k8s.io/kubernetes/pkg/printers/interface.go index 52712158646d..5bda6c61753d 100644 --- a/vendor/k8s.io/kubernetes/pkg/printers/interface.go +++ b/vendor/k8s.io/kubernetes/pkg/printers/interface.go @@ -57,6 +57,10 @@ func (fn ResourcePrinterFunc) IsGeneric() bool { } type PrintOptions struct { + // supported Format types can be found in pkg/printers/printers.go + OutputFormatType string + OutputFormatArgument string + NoHeaders bool WithNamespace bool WithKind bool @@ -66,6 +70,11 @@ type PrintOptions struct { AbsoluteTimestamps bool Kind string ColumnLabels []string + + SortBy string + + // indicates if it is OK to ignore missing keys for rendering an output template. + AllowMissingKeys bool } // Describer generates output for the named resource or an error @@ -100,13 +109,3 @@ type ErrNoDescriber struct { func (e ErrNoDescriber) Error() string { return fmt.Sprintf("no describer has been defined for %v", e.Types) } - -// OutputOptions represents resource output options which is used to generate a resource printer. -type OutputOptions struct { - // supported Format types can be found in pkg/printers/printers.go - FmtType string - FmtArg string - - // indicates if it is OK to ignore missing keys for rendering an output template. - AllowMissingKeys bool -} diff --git a/vendor/k8s.io/kubernetes/pkg/printers/internalversion/BUILD b/vendor/k8s.io/kubernetes/pkg/printers/internalversion/BUILD index d8d0eedf184e..522478a731e5 100644 --- a/vendor/k8s.io/kubernetes/pkg/printers/internalversion/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/printers/internalversion/BUILD @@ -13,16 +13,17 @@ go_test( "printers_test.go", "sorted_resource_name_list_test.go", ], + importpath = "k8s.io/kubernetes/pkg/printers/internalversion", library = ":go_default_library", deps = [ - "//federation/apis/federation/v1beta1:go_default_library", - "//federation/client/clientset_generated/federation_clientset/fake:go_default_library", - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/apis/apps:go_default_library", "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/batch:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", + "//pkg/apis/networking:go_default_library", "//pkg/apis/policy:go_default_library", "//pkg/apis/storage:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", @@ -54,20 +55,19 @@ go_library( "describe.go", "printers.go", ], + importpath = "k8s.io/kubernetes/pkg/printers/internalversion", deps = [ - "//federation/apis/federation:go_default_library", - "//federation/apis/federation/v1beta1:go_default_library", - "//federation/client/clientset_generated/federation_clientset:go_default_library", - "//pkg/api:go_default_library", "//pkg/api/events:go_default_library", - "//pkg/api/helper:go_default_library", - "//pkg/api/helper/qos:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/ref:go_default_library", "//pkg/api/resource:go_default_library", "//pkg/apis/apps:go_default_library", "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/apis/certificates:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", + "//pkg/apis/core/helper/qos:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/apis/networking:go_default_library", "//pkg/apis/policy:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/printers/internalversion/describe.go b/vendor/k8s.io/kubernetes/pkg/printers/internalversion/describe.go index 35cc15a847a2..c33b1c636ceb 100644 --- a/vendor/k8s.io/kubernetes/pkg/printers/internalversion/describe.go +++ b/vendor/k8s.io/kubernetes/pkg/printers/internalversion/describe.go @@ -19,7 +19,6 @@ package internalversion import ( "bytes" "crypto/x509" - "encoding/json" "fmt" "io" "net" @@ -47,18 +46,17 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/dynamic" clientextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" - federation "k8s.io/kubernetes/federation/apis/federation/v1beta1" - fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/events" - "k8s.io/kubernetes/pkg/api/helper" - "k8s.io/kubernetes/pkg/api/helper/qos" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/ref" resourcehelper "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/certificates" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" + "k8s.io/kubernetes/pkg/apis/core/helper/qos" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/networking" "k8s.io/kubernetes/pkg/apis/policy" @@ -211,7 +209,7 @@ func (g *genericDescriber) Describe(namespace, name string, describerSettings pr var events *api.EventList if describerSettings.ShowEvents { - events, _ = g.events.Events(namespace).Search(api.Scheme, obj) + events, _ = g.events.Events(namespace).Search(legacyscheme.Scheme, obj) } return tabbedString(func(out io.Writer) error { @@ -607,11 +605,11 @@ func (d *PodDescriber) Describe(namespace, name string, describerSettings printe var events *api.EventList if describerSettings.ShowEvents { - if ref, err := ref.GetReference(api.Scheme, pod); err != nil { + if ref, err := ref.GetReference(legacyscheme.Scheme, pod); err != nil { glog.Errorf("Unable to construct reference to '%#v': %v", pod, err) } else { ref.Kind = "" - events, _ = d.Core().Events(namespace).Search(api.Scheme, ref) + events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, ref) } } @@ -623,6 +621,10 @@ func describePod(pod *api.Pod, events *api.EventList) (string, error) { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", pod.Name) w.Write(LEVEL_0, "Namespace:\t%s\n", pod.Namespace) + if pod.Spec.Priority != nil { + w.Write(LEVEL_0, "Priority:\t%d\n", *pod.Spec.Priority) + w.Write(LEVEL_0, "PriorityClassName:\t%s\n", stringOrNone(pod.Spec.PriorityClassName)) + } if pod.Spec.NodeName == "" { w.Write(LEVEL_0, "Node:\t\n") } else { @@ -646,9 +648,6 @@ func describePod(pod *api.Pod, events *api.EventList) (string, error) { w.Write(LEVEL_0, "Message:\t%s\n", pod.Status.Message) } w.Write(LEVEL_0, "IP:\t%s\n", pod.Status.PodIP) - if createdBy := printCreator(pod.Annotations); len(createdBy) > 0 { - w.Write(LEVEL_0, "Created By:\t%s\n", createdBy) - } if controlledBy := printController(pod); len(controlledBy) > 0 { w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy) } @@ -687,18 +686,6 @@ func printController(controllee metav1.Object) string { return "" } -func printCreator(annotation map[string]string) string { - value, ok := annotation[api.CreatedByAnnotation] - if ok { - var r api.SerializedReference - err := json.Unmarshal([]byte(value), &r) - if err == nil { - return fmt.Sprintf("%s/%s", r.Reference.Kind, r.Reference.Name) - } - } - return "" -} - func describeVolumes(volumes []api.Volume, w PrefixWriter, space string) { if volumes == nil || len(volumes) == 0 { w.Write(LEVEL_0, "%sVolumes:\t\n", space) @@ -771,8 +758,14 @@ func describeVolumes(volumes []api.Volume, w PrefixWriter, space string) { } func printHostPathVolumeSource(hostPath *api.HostPathVolumeSource, w PrefixWriter) { + hostPathType := "" + if hostPath.Type != nil { + hostPathType = string(*hostPath.Type) + } w.Write(LEVEL_2, "Type:\tHostPath (bare host directory volume)\n"+ - " Path:\t%v\n", hostPath.Path) + " Path:\t%v\n"+ + " HostPathType:\t%v\n", + hostPath.Path, hostPathType) } func printEmptyDirVolumeSource(emptyDir *api.EmptyDirVolumeSource, w PrefixWriter) { @@ -863,6 +856,26 @@ func printISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, w PrefixWriter) { iscsi.TargetPortal, iscsi.IQN, iscsi.Lun, iscsi.ISCSIInterface, iscsi.FSType, iscsi.ReadOnly, iscsi.Portals, iscsi.DiscoveryCHAPAuth, iscsi.SessionCHAPAuth, iscsi.SecretRef, initiator) } +func printISCSIPersistentVolumeSource(iscsi *api.ISCSIPersistentVolumeSource, w PrefixWriter) { + initiatorName := "" + if iscsi.InitiatorName != nil { + initiatorName = *iscsi.InitiatorName + } + w.Write(LEVEL_2, "Type:\tISCSI (an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod)\n"+ + " TargetPortal:\t%v\n"+ + " IQN:\t%v\n"+ + " Lun:\t%v\n"+ + " ISCSIInterface\t%v\n"+ + " FSType:\t%v\n"+ + " ReadOnly:\t%v\n"+ + " Portals:\t%v\n"+ + " DiscoveryCHAPAuth:\t%v\n"+ + " SessionCHAPAuth:\t%v\n"+ + " SecretRef:\t%v\n"+ + " InitiatorName:\t%v\n", + iscsi.TargetPortal, iscsi.IQN, iscsi.Lun, iscsi.ISCSIInterface, iscsi.FSType, iscsi.ReadOnly, iscsi.Portals, iscsi.DiscoveryCHAPAuth, iscsi.SessionCHAPAuth, iscsi.SecretRef, initiatorName) +} + func printGlusterfsVolumeSource(glusterfs *api.GlusterfsVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tGlusterfs (a Glusterfs mount on the host that shares a pod's lifetime)\n"+ " EndpointsName:\t%v\n"+ @@ -891,6 +904,19 @@ func printRBDVolumeSource(rbd *api.RBDVolumeSource, w PrefixWriter) { rbd.CephMonitors, rbd.RBDImage, rbd.FSType, rbd.RBDPool, rbd.RadosUser, rbd.Keyring, rbd.SecretRef, rbd.ReadOnly) } +func printRBDPersistentVolumeSource(rbd *api.RBDPersistentVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tRBD (a Rados Block Device mount on the host that shares a pod's lifetime)\n"+ + " CephMonitors:\t%v\n"+ + " RBDImage:\t%v\n"+ + " FSType:\t%v\n"+ + " RBDPool:\t%v\n"+ + " RadosUser:\t%v\n"+ + " Keyring:\t%v\n"+ + " SecretRef:\t%v\n"+ + " ReadOnly:\t%v\n", + rbd.CephMonitors, rbd.RBDImage, rbd.FSType, rbd.RBDPool, rbd.RadosUser, rbd.Keyring, rbd.SecretRef, rbd.ReadOnly) +} + func printDownwardAPIVolumeSource(d *api.DownwardAPIVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tDownwardAPI (a volume populated by information about the pod)\n Items:\n") for _, mapping := range d.Items { @@ -950,6 +976,26 @@ func printScaleIOVolumeSource(sio *api.ScaleIOVolumeSource, w PrefixWriter) { sio.Gateway, sio.System, sio.ProtectionDomain, sio.StoragePool, sio.StorageMode, sio.VolumeName, sio.FSType, sio.ReadOnly) } +func printScaleIOPersistentVolumeSource(sio *api.ScaleIOPersistentVolumeSource, w PrefixWriter) { + var secretNS, secretName string + if sio.SecretRef != nil { + secretName = sio.SecretRef.Name + secretNS = sio.SecretRef.Namespace + } + w.Write(LEVEL_2, "Type:\tScaleIO (a persistent volume backed by a block device in ScaleIO)\n"+ + " Gateway:\t%v\n"+ + " System:\t%v\n"+ + " Protection Domain:\t%v\n"+ + " Storage Pool:\t%v\n"+ + " Storage Mode:\t%v\n"+ + " VolumeName:\t%v\n"+ + " SecretName:\t%v\n"+ + " SecretNamespace:\t%v\n"+ + " FSType:\t%v\n"+ + " ReadOnly:\t%v\n", + sio.Gateway, sio.System, sio.ProtectionDomain, sio.StoragePool, sio.StorageMode, sio.VolumeName, secretName, secretNS, sio.FSType, sio.ReadOnly) +} + func printLocalVolumeSource(ls *api.LocalVolumeSource, w PrefixWriter) { w.Write(LEVEL_2, "Type:\tLocalVolume (a persistent volume backed by local storage on a node)\n"+ " Path:\t%v\n", @@ -1047,6 +1093,14 @@ func printFlockerVolumeSource(flocker *api.FlockerVolumeSource, w PrefixWriter) flocker.DatasetName, flocker.DatasetUUID) } +func printCSIPersistentVolumeSource(csi *api.CSIPersistentVolumeSource, w PrefixWriter) { + w.Write(LEVEL_2, "Type:\tCSI (a Container Storage Interface (CSI) volume source)\n"+ + " Driver:\t%v\n"+ + " VolumeHandle:\t%v\n", + " ReadOnly:\t%v\n", + csi.Driver, csi.VolumeHandle, csi.ReadOnly) +} + type PersistentVolumeDescriber struct { clientset.Interface } @@ -1061,7 +1115,7 @@ func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSe var events *api.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(api.Scheme, pv) + events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, pv) } return describePersistentVolume(pv, events) @@ -1082,6 +1136,9 @@ func describePersistentVolume(pv *api.PersistentVolume, events *api.EventList) ( } w.Write(LEVEL_0, "Reclaim Policy:\t%v\n", pv.Spec.PersistentVolumeReclaimPolicy) w.Write(LEVEL_0, "Access Modes:\t%s\n", helper.GetAccessModesAsString(pv.Spec.AccessModes)) + if pv.Spec.VolumeMode != nil { + w.Write(LEVEL_0, "VolumeMode:\t%v\n", *pv.Spec.VolumeMode) + } storage := pv.Spec.Capacity[api.ResourceStorage] w.Write(LEVEL_0, "Capacity:\t%s\n", storage.String()) w.Write(LEVEL_0, "Message:\t%s\n", pv.Status.Message) @@ -1097,11 +1154,11 @@ func describePersistentVolume(pv *api.PersistentVolume, events *api.EventList) ( case pv.Spec.NFS != nil: printNFSVolumeSource(pv.Spec.NFS, w) case pv.Spec.ISCSI != nil: - printISCSIVolumeSource(pv.Spec.ISCSI, w) + printISCSIPersistentVolumeSource(pv.Spec.ISCSI, w) case pv.Spec.Glusterfs != nil: printGlusterfsVolumeSource(pv.Spec.Glusterfs, w) case pv.Spec.RBD != nil: - printRBDVolumeSource(pv.Spec.RBD, w) + printRBDPersistentVolumeSource(pv.Spec.RBD, w) case pv.Spec.Quobyte != nil: printQuobyteVolumeSource(pv.Spec.Quobyte, w) case pv.Spec.VsphereVolume != nil: @@ -1115,7 +1172,7 @@ func describePersistentVolume(pv *api.PersistentVolume, events *api.EventList) ( case pv.Spec.PortworxVolume != nil: printPortworxVolumeSource(pv.Spec.PortworxVolume, w) case pv.Spec.ScaleIO != nil: - printScaleIOVolumeSource(pv.Spec.ScaleIO, w) + printScaleIOPersistentVolumeSource(pv.Spec.ScaleIO, w) case pv.Spec.Local != nil: printLocalVolumeSource(pv.Spec.Local, w) case pv.Spec.CephFS != nil: @@ -1130,6 +1187,8 @@ func describePersistentVolume(pv *api.PersistentVolume, events *api.EventList) ( printFlexVolumeSource(pv.Spec.FlexVolume, w) case pv.Spec.Flocker != nil: printFlockerVolumeSource(pv.Spec.Flocker, w) + case pv.Spec.CSI != nil: + printCSIPersistentVolumeSource(pv.Spec.CSI, w) default: w.Write(LEVEL_1, "\n") } @@ -1154,7 +1213,7 @@ func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, descri return "", err } - events, _ := d.Core().Events(namespace).Search(api.Scheme, pvc) + events, _ := d.Core().Events(namespace).Search(legacyscheme.Scheme, pvc) return describePersistentVolumeClaim(pvc, events) } @@ -1165,10 +1224,15 @@ func describePersistentVolumeClaim(pvc *api.PersistentVolumeClaim, events *api.E w.Write(LEVEL_0, "Name:\t%s\n", pvc.Name) w.Write(LEVEL_0, "Namespace:\t%s\n", pvc.Namespace) w.Write(LEVEL_0, "StorageClass:\t%s\n", helper.GetPersistentVolumeClaimClass(pvc)) - w.Write(LEVEL_0, "Status:\t%v\n", pvc.Status.Phase) + if pvc.ObjectMeta.DeletionTimestamp != nil { + w.Write(LEVEL_0, "Status:\tTerminating (since %s)\n", pvc.ObjectMeta.DeletionTimestamp.Time.Format(time.RFC1123Z)) + } else { + w.Write(LEVEL_0, "Status:\t%v\n", pvc.Status.Phase) + } w.Write(LEVEL_0, "Volume:\t%s\n", pvc.Spec.VolumeName) printLabelsMultiline(w, "Labels", pvc.Labels) printAnnotationsMultiline(w, "Annotations", pvc.Annotations) + w.Write(LEVEL_0, "Finalizers:\t%v\n", pvc.ObjectMeta.Finalizers) storage := pvc.Spec.Resources.Requests[api.ResourceStorage] capacity := "" accessModes := "" @@ -1179,6 +1243,9 @@ func describePersistentVolumeClaim(pvc *api.PersistentVolumeClaim, events *api.E } w.Write(LEVEL_0, "Capacity:\t%s\n", capacity) w.Write(LEVEL_0, "Access Modes:\t%s\n", accessModes) + if pvc.Spec.VolumeMode != nil { + w.Write(LEVEL_0, "VolumeMode:\t%v\n", *pvc.Spec.VolumeMode) + } if events != nil { DescribeEvents(events, w) } @@ -1309,6 +1376,7 @@ func describeContainerProbe(container api.Container, w PrefixWriter) { } func describeContainerVolumes(container api.Container, w PrefixWriter) { + // Show volumeMounts none := "" if len(container.VolumeMounts) == 0 { none = "\t" @@ -1327,6 +1395,14 @@ func describeContainerVolumes(container api.Container, w PrefixWriter) { } w.Write(LEVEL_3, "%s from %s (%s)\n", mount.MountPath, mount.Name, strings.Join(flags, ",")) } + // Show volumeDevices if exists + if len(container.VolumeDevices) > 0 { + w.Write(LEVEL_2, "Devices:%s\n", none) + sort.Sort(SortableVolumeDevices(container.VolumeDevices)) + for _, device := range container.VolumeDevices { + w.Write(LEVEL_3, "%s from %s\n", device.DevicePath, device.Name) + } + } } func describeContainerEnvVars(container api.Container, resolverFn EnvVarResolverFunc, w PrefixWriter) { @@ -1424,7 +1500,7 @@ type EnvVarResolverFunc func(e api.EnvVar) string // EnvValueFrom is exported for use by describers in other packages func EnvValueRetriever(pod *api.Pod) EnvVarResolverFunc { return func(e api.EnvVar) string { - internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(e.ValueFrom.FieldRef.APIVersion, "Pod", e.ValueFrom.FieldRef.FieldPath, "") + internalFieldPath, _, err := legacyscheme.Scheme.ConvertFieldLabel(e.ValueFrom.FieldRef.APIVersion, "Pod", e.ValueFrom.FieldRef.FieldPath, "") if err != nil { return "" // pod validation should catch this on create } @@ -1525,7 +1601,7 @@ func (d *ReplicationControllerDescriber) Describe(namespace, name string, descri var events *api.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(api.Scheme, controller) + events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, controller) } return describeReplicationController(controller, events, running, waiting, succeeded, failed) @@ -1599,7 +1675,7 @@ func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings var events *api.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(api.Scheme, rs) + events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, rs) } return describeReplicaSet(rs, events, running, waiting, succeeded, failed, getPodErr) @@ -1651,7 +1727,7 @@ func (d *JobDescriber) Describe(namespace, name string, describerSettings printe var events *api.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(api.Scheme, job) + events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, job) } return describeJob(job, events) @@ -1666,8 +1742,8 @@ func describeJob(job *batch.Job, events *api.EventList) (string, error) { w.Write(LEVEL_0, "Selector:\t%s\n", selector) printLabelsMultiline(w, "Labels", job.Labels) printAnnotationsMultiline(w, "Annotations", job.Annotations) - if createdBy := printCreator(job.Annotations); len(createdBy) > 0 { - w.Write(LEVEL_0, "Created By:\t%s\n", createdBy) + if controlledBy := printController(job); len(controlledBy) > 0 { + w.Write(LEVEL_0, "Controlled By:\t%s\n", controlledBy) } w.Write(LEVEL_0, "Parallelism:\t%d\n", *job.Spec.Parallelism) if job.Spec.Completions != nil { @@ -1703,7 +1779,7 @@ func (d *CronJobDescriber) Describe(namespace, name string, describerSettings pr var events *api.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(api.Scheme, cronJob) + events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, cronJob) } return describeCronJob(cronJob, events) @@ -1802,7 +1878,7 @@ func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings var events *api.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(api.Scheme, daemon) + events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, daemon) } return describeDaemonSet(daemon, events, running, waiting, succeeded, failed) @@ -1953,7 +2029,7 @@ func (i *IngressDescriber) describeIngress(ing *extensions.Ingress, describerSet describeIngressAnnotations(w, ing.Annotations) if describerSettings.ShowEvents { - events, _ := i.Core().Events(ing.Namespace).Search(api.Scheme, ing) + events, _ := i.Core().Events(ing.Namespace).Search(legacyscheme.Scheme, ing) if events != nil { DescribeEvents(events, w) } @@ -2004,7 +2080,7 @@ func (d *ServiceDescriber) Describe(namespace, name string, describerSettings pr endpoints, _ := d.Core().Endpoints(namespace).Get(name, metav1.GetOptions{}) var events *api.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(api.Scheme, service) + events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, service) } return describeService(service, endpoints, events) } @@ -2062,7 +2138,7 @@ func describeService(service *api.Service, endpoints *api.Endpoints, events *api if sp.TargetPort.Type == intstr.Type(intstr.Int) { w.Write(LEVEL_0, "TargetPort:\t%d/%s\n", sp.TargetPort.IntVal, sp.Protocol) } else { - w.Write(LEVEL_0, "TargetPort:\t%d/%s\n", sp.TargetPort.StrVal, sp.Protocol) + w.Write(LEVEL_0, "TargetPort:\t%s/%s\n", sp.TargetPort.StrVal, sp.Protocol) } if sp.NodePort != 0 { w.Write(LEVEL_0, "NodePort:\t%s\t%d/%s\n", name, sp.NodePort, sp.Protocol) @@ -2101,7 +2177,7 @@ func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings var events *api.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(api.Scheme, ep) + events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, ep) } return describeEndpoints(ep, events) @@ -2213,7 +2289,7 @@ func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSett var events *api.EventList if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(api.Scheme, serviceAccount) + events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, serviceAccount) } return describeServiceAccount(serviceAccount, tokens, missingSecrets, events) @@ -2226,7 +2302,6 @@ func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Sec w.Write(LEVEL_0, "Namespace:\t%s\n", serviceAccount.Namespace) printLabelsMultiline(w, "Labels", serviceAccount.Labels) printAnnotationsMultiline(w, "Annotations", serviceAccount.Annotations) - w.WriteLine() var ( emptyHeader = " " @@ -2254,7 +2329,8 @@ func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Sec mountHeader: mountSecretNames, tokenHeader: tokenSecretNames, } - for header, names := range types { + for _, header := range sets.StringKeySet(types).List() { + names := types[header] if len(names) == 0 { w.Write(LEVEL_0, "%s\t\n", header) } else { @@ -2268,7 +2344,6 @@ func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Sec prefix = emptyHeader } } - w.WriteLine() } if events != nil { @@ -2467,12 +2542,12 @@ func (d *NodeDescriber) Describe(namespace, name string, describerSettings print var events *api.EventList if describerSettings.ShowEvents { - if ref, err := ref.GetReference(api.Scheme, node); err != nil { + if ref, err := ref.GetReference(legacyscheme.Scheme, node); err != nil { glog.Errorf("Unable to construct reference to '%#v': %v", node, err) } else { // TODO: We haven't decided the namespace for Node object yet. ref.UID = types.UID(ref.Name) - events, _ = d.Core().Events("").Search(api.Scheme, ref) + events, _ = d.Core().Events("").Search(legacyscheme.Scheme, ref) } } @@ -2587,7 +2662,7 @@ func (p *StatefulSetDescriber) Describe(namespace, name string, describerSetting var events *api.EventList if describerSettings.ShowEvents { - events, _ = p.client.Core().Events(namespace).Search(api.Scheme, ps) + events, _ = p.client.Core().Events(namespace).Search(legacyscheme.Scheme, ps) } return describeStatefulSet(ps, selector, events, running, waiting, succeeded, failed) @@ -2635,7 +2710,7 @@ func (p *CertificateSigningRequestDescriber) Describe(namespace, name string, de var events *api.EventList if describerSettings.ShowEvents { - events, _ = p.client.Core().Events(namespace).Search(api.Scheme, csr) + events, _ = p.client.Core().Events(namespace).Search(legacyscheme.Scheme, csr) } return describeCertificateSigningRequest(csr, cr, status, events) @@ -2703,7 +2778,7 @@ func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, desc var events *api.EventList if describerSettings.ShowEvents { - events, _ = d.client.Core().Events(namespace).Search(api.Scheme, hpa) + events, _ = d.client.Core().Events(namespace).Search(legacyscheme.Scheme, hpa) } return describeHorizontalPodAutoscaler(hpa, events, d) @@ -2903,13 +2978,13 @@ func (dd *DeploymentDescriber) Describe(namespace, name string, describerSetting return "", err } internalDeployment := &extensions.Deployment{} - if err := api.Scheme.Convert(d, internalDeployment, extensions.SchemeGroupVersion); err != nil { + if err := legacyscheme.Scheme.Convert(d, internalDeployment, extensions.SchemeGroupVersion); err != nil { return "", err } var events *api.EventList if describerSettings.ShowEvents { - events, _ = dd.Core().Events(namespace).Search(api.Scheme, d) + events, _ = dd.Core().Events(namespace).Search(legacyscheme.Scheme, d) } return describeDeployment(d, selector, internalDeployment, events, dd) @@ -3022,7 +3097,7 @@ func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings w.Write(LEVEL_0, "%s\n", string(v)) } if describerSettings.ShowEvents { - events, err := d.Core().Events(namespace).Search(api.Scheme, configMap) + events, err := d.Core().Events(namespace).Search(legacyscheme.Scheme, configMap) if err != nil { return err } @@ -3034,47 +3109,6 @@ func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings }) } -type ClusterDescriber struct { - fedclientset.Interface -} - -func (d *ClusterDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { - cluster, err := d.Federation().Clusters().Get(name, metav1.GetOptions{}) - if err != nil { - return "", err - } - return describeCluster(cluster) -} - -func describeCluster(cluster *federation.Cluster) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%s\n", cluster.Name) - w.Write(LEVEL_0, "Labels:\t%s\n", labels.FormatLabels(cluster.Labels)) - - w.Write(LEVEL_0, "ServerAddressByClientCIDRs:\n ClientCIDR\tServerAddress\n") - w.Write(LEVEL_1, "----\t----\n") - for _, cidrAddr := range cluster.Spec.ServerAddressByClientCIDRs { - w.Write(LEVEL_1, "%v \t%v\n\n", cidrAddr.ClientCIDR, cidrAddr.ServerAddress) - } - - if len(cluster.Status.Conditions) > 0 { - w.Write(LEVEL_0, "Conditions:\n Type\tStatus\tLastUpdateTime\tLastTransitionTime\tReason\tMessage\n") - w.Write(LEVEL_1, "----\t------\t-----------------\t------------------\t------\t-------\n") - for _, c := range cluster.Status.Conditions { - w.Write(LEVEL_1, "%v \t%v \t%s \t%s \t%v \t%v\n", - c.Type, - c.Status, - c.LastProbeTime.Time.Format(time.RFC1123Z), - c.LastTransitionTime.Time.Format(time.RFC1123Z), - c.Reason, - c.Message) - } - } - return nil - }) -} - // NetworkPolicyDescriber generates information about a networking.NetworkPolicy type NetworkPolicyDescriber struct { clientset.Interface @@ -3096,13 +3130,111 @@ func describeNetworkPolicy(networkPolicy *networking.NetworkPolicy) (string, err w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", networkPolicy.Name) w.Write(LEVEL_0, "Namespace:\t%s\n", networkPolicy.Namespace) + w.Write(LEVEL_0, "Created on:\t%s\n", networkPolicy.CreationTimestamp) printLabelsMultiline(w, "Labels", networkPolicy.Labels) printAnnotationsMultiline(w, "Annotations", networkPolicy.Annotations) - + describeNetworkPolicySpec(networkPolicy.Spec, w) return nil }) } +func describeNetworkPolicySpec(nps networking.NetworkPolicySpec, w PrefixWriter) { + w.Write(LEVEL_0, "Spec:\n") + w.Write(LEVEL_1, "PodSelector: ") + if len(nps.PodSelector.MatchLabels) == 0 && len(nps.PodSelector.MatchExpressions) == 0 { + w.Write(LEVEL_2, " (Allowing the specific traffic to all pods in this namespace)\n") + } else { + w.Write(LEVEL_2, "%s\n", metav1.FormatLabelSelector(&nps.PodSelector)) + } + w.Write(LEVEL_1, "Allowing ingress traffic:\n") + printNetworkPolicySpecIngressFrom(nps.Ingress, " ", w) + w.Write(LEVEL_1, "Allowing egress traffic:\n") + printNetworkPolicySpecEgressTo(nps.Egress, " ", w) + w.Write(LEVEL_1, "Policy Types: %v\n", policyTypesToString(nps.PolicyTypes)) +} + +func printNetworkPolicySpecIngressFrom(npirs []networking.NetworkPolicyIngressRule, initialIndent string, w PrefixWriter) { + if len(npirs) == 0 { + w.Write(LEVEL_0, "%s%s\n", initialIndent, " (Selected pods are isolated for ingress connectivity)") + return + } + for i, npir := range npirs { + if len(npir.Ports) == 0 { + w.Write(LEVEL_0, "%s%s\n", initialIndent, "To Port: (traffic allowed to all ports)") + } else { + for _, port := range npir.Ports { + var proto api.Protocol + if port.Protocol != nil { + proto = *port.Protocol + } else { + proto = api.ProtocolTCP + } + w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto) + } + } + if len(npir.From) == 0 { + w.Write(LEVEL_0, "%s%s\n", initialIndent, "From: (traffic not restricted by source)") + } else { + for _, from := range npir.From { + w.Write(LEVEL_0, "%s", initialIndent) + if from.PodSelector != nil { + w.Write(LEVEL_0, "%s: %s\n", "From PodSelector", metav1.FormatLabelSelector(from.PodSelector)) + } else if from.NamespaceSelector != nil { + w.Write(LEVEL_0, "%s: %s\n", "From NamespaceSelector", metav1.FormatLabelSelector(from.NamespaceSelector)) + } else if from.IPBlock != nil { + w.Write(LEVEL_0, "From IPBlock:\n") + w.Write(LEVEL_0, "%s%sCIDR: %s\n", initialIndent, initialIndent, from.IPBlock.CIDR) + w.Write(LEVEL_0, "%s%sExcept: %v\n", initialIndent, initialIndent, strings.Join(from.IPBlock.Except, ", ")) + } + } + } + if i != len(npirs)-1 { + w.Write(LEVEL_0, "%s%s\n", initialIndent, "----------") + } + } +} + +func printNetworkPolicySpecEgressTo(npers []networking.NetworkPolicyEgressRule, initialIndent string, w PrefixWriter) { + if len(npers) == 0 { + w.Write(LEVEL_0, "%s%s\n", initialIndent, " (Selected pods are isolated for egress connectivity)") + return + } + for i, nper := range npers { + if len(nper.Ports) == 0 { + w.Write(LEVEL_0, "%s%s\n", initialIndent, "To Port: (traffic allowed to all ports)") + } else { + for _, port := range nper.Ports { + var proto api.Protocol + if port.Protocol != nil { + proto = *port.Protocol + } else { + proto = api.ProtocolTCP + } + w.Write(LEVEL_0, "%s%s: %s/%s\n", initialIndent, "To Port", port.Port, proto) + } + } + if len(nper.To) == 0 { + w.Write(LEVEL_0, "%s%s\n", initialIndent, "To: (traffic not restricted by source)") + } else { + for _, to := range nper.To { + w.Write(LEVEL_0, "%s", initialIndent) + if to.PodSelector != nil { + w.Write(LEVEL_0, "%s: %s\n", "To PodSelector", metav1.FormatLabelSelector(to.PodSelector)) + } else if to.NamespaceSelector != nil { + w.Write(LEVEL_0, "%s: %s\n", "To NamespaceSelector", metav1.FormatLabelSelector(to.NamespaceSelector)) + } else if to.IPBlock != nil { + w.Write(LEVEL_0, "To IPBlock:\n") + w.Write(LEVEL_0, "%s%sCIDR: %s\n", initialIndent, initialIndent, to.IPBlock.CIDR) + w.Write(LEVEL_0, "%s%sExcept: %v\n", initialIndent, initialIndent, strings.Join(to.IPBlock.Except, ", ")) + } + } + } + if i != len(npers)-1 { + w.Write(LEVEL_0, "%s%s\n", initialIndent, "----------") + } + } +} + type StorageClassDescriber struct { clientset.Interface } @@ -3115,7 +3247,7 @@ func (s *StorageClassDescriber) Describe(namespace, name string, describerSettin var events *api.EventList if describerSettings.ShowEvents { - events, _ = s.Core().Events(namespace).Search(api.Scheme, sc) + events, _ = s.Core().Events(namespace).Search(legacyscheme.Scheme, sc) } return describeStorageClass(sc, events) @@ -3129,6 +3261,12 @@ func describeStorageClass(sc *storage.StorageClass, events *api.EventList) (stri w.Write(LEVEL_0, "Annotations:\t%s\n", labels.FormatLabels(sc.Annotations)) w.Write(LEVEL_0, "Provisioner:\t%s\n", sc.Provisioner) w.Write(LEVEL_0, "Parameters:\t%s\n", labels.FormatLabels(sc.Parameters)) + if sc.ReclaimPolicy != nil { + w.Write(LEVEL_0, "ReclaimPolicy:\t%s\n", *sc.ReclaimPolicy) + } + if sc.VolumeBindingMode != nil { + w.Write(LEVEL_0, "VolumeBindingMode:\t%s\n", *sc.VolumeBindingMode) + } if events != nil { DescribeEvents(events, w) } @@ -3149,7 +3287,7 @@ func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describe var events *api.EventList if describerSettings.ShowEvents { - events, _ = p.Core().Events(namespace).Search(api.Scheme, pdb) + events, _ = p.Core().Events(namespace).Search(legacyscheme.Scheme, pdb) } return describePodDisruptionBudget(pdb, events) @@ -3198,7 +3336,7 @@ func (s *PriorityClassDescriber) Describe(namespace, name string, describerSetti var events *api.EventList if describerSettings.ShowEvents { - events, _ = s.Core().Events(namespace).Search(api.Scheme, pc) + events, _ = s.Core().Events(namespace).Search(legacyscheme.Scheme, pc) } return describePriorityClass(pc, events) @@ -3248,6 +3386,9 @@ func describePodSecurityPolicy(psp *extensions.PodSecurityPolicy) (string, error w.Write(LEVEL_1, "Allowed Capabilities:\t%s\n", capsToString(psp.Spec.AllowedCapabilities)) w.Write(LEVEL_1, "Allowed Volume Types:\t%s\n", fsTypeToString(psp.Spec.Volumes)) + if len(psp.Spec.AllowedFlexVolumes) > 0 { + w.Write(LEVEL_1, "Allowed FlexVolume Types:\t%s\n", flexVolumesToString(psp.Spec.AllowedFlexVolumes)) + } w.Write(LEVEL_1, "Allow Host Network:\t%t\n", psp.Spec.HostNetwork) w.Write(LEVEL_1, "Allow Host Ports:\t%s\n", hostPortRangeToString(psp.Spec.HostPorts)) w.Write(LEVEL_1, "Allow Host PID:\t%t\n", psp.Spec.HostPID) @@ -3280,18 +3421,15 @@ func describePodSecurityPolicy(psp *extensions.PodSecurityPolicy) (string, error }) } -func stringOrAll(s string) string { - if len(s) > 0 { - return s - } - return "*" +func stringOrNone(s string) string { + return stringOrDefaultValue(s, "") } -func stringOrNone(s string) string { +func stringOrDefaultValue(s, defaultValue string) string { if len(s) > 0 { return s } - return "" + return defaultValue } func fsTypeToString(volumes []extensions.FSType) string { @@ -3302,6 +3440,14 @@ func fsTypeToString(volumes []extensions.FSType) string { return stringOrNone(strings.Join(strVolumes, ",")) } +func flexVolumesToString(flexVolumes []extensions.AllowedFlexVolume) string { + volumes := []string{} + for _, flexVolume := range flexVolumes { + volumes = append(volumes, "driver="+flexVolume.Driver) + } + return stringOrDefaultValue(strings.Join(volumes, ","), "") +} + func hostPortRangeToString(ranges []extensions.HostPortRange) string { formattedString := "" if ranges != nil { @@ -3350,6 +3496,18 @@ func capsToString(caps []api.Capability) string { return stringOrNone(formattedString) } +func policyTypesToString(pts []networking.PolicyType) string { + formattedString := "" + if pts != nil { + strPts := []string{} + for _, p := range pts { + strPts = append(strPts, string(p)) + } + formattedString = strings.Join(strPts, ", ") + } + return stringOrNone(formattedString) +} + // newErrNoDescriber creates a new ErrNoDescriber with the names of the provided types. func newErrNoDescriber(types ...reflect.Type) error { names := make([]string, 0, len(types)) @@ -3680,6 +3838,20 @@ func (list SortableVolumeMounts) Less(i, j int) bool { return list[i].MountPath < list[j].MountPath } +type SortableVolumeDevices []api.VolumeDevice + +func (list SortableVolumeDevices) Len() int { + return len(list) +} + +func (list SortableVolumeDevices) Swap(i, j int) { + list[i], list[j] = list[j], list[i] +} + +func (list SortableVolumeDevices) Less(i, j int) bool { + return list[i].DevicePath < list[j].DevicePath +} + // TODO: get rid of this and plumb the caller correctly func versionedExtensionsClientV1beta1(internalClient clientset.Interface) clientextensionsv1beta1.ExtensionsV1beta1Interface { if internalClient == nil { diff --git a/vendor/k8s.io/kubernetes/pkg/printers/internalversion/printers.go b/vendor/k8s.io/kubernetes/pkg/printers/internalversion/printers.go index aff3b46ceef8..0b8d1b192163 100644 --- a/vendor/k8s.io/kubernetes/pkg/printers/internalversion/printers.go +++ b/vendor/k8s.io/kubernetes/pkg/printers/internalversion/printers.go @@ -41,14 +41,13 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/federation/apis/federation" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/events" - "k8s.io/kubernetes/pkg/api/helper" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/certificates" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/networking" "k8s.io/kubernetes/pkg/apis/policy" @@ -304,15 +303,6 @@ func AddHandlers(h printers.PrintHandler) { h.TableHandler(componentStatusColumnDefinitions, printComponentStatus) h.TableHandler(componentStatusColumnDefinitions, printComponentStatusList) - thirdPartyResourceColumnDefinitions := []metav1alpha1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Description", Type: "string", Description: extensionsv1beta1.ThirdPartyResource{}.SwaggerDoc()["description"]}, - {Name: "Version(s)", Type: "string", Description: extensionsv1beta1.ThirdPartyResource{}.SwaggerDoc()["versions"]}, - } - - h.TableHandler(thirdPartyResourceColumnDefinitions, printThirdPartyResource) - h.TableHandler(thirdPartyResourceColumnDefinitions, printThirdPartyResourceList) - deploymentColumnDefinitions := []metav1alpha1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, {Name: "Desired", Type: "string", Description: extensionsv1beta1.DeploymentSpec{}.SwaggerDoc()["replicas"]}, @@ -361,14 +351,6 @@ func AddHandlers(h printers.PrintHandler) { h.TableHandler(podSecurityPolicyColumnDefinitions, printPodSecurityPolicy) h.TableHandler(podSecurityPolicyColumnDefinitions, printPodSecurityPolicyList) - clusterColumnDefinitions := []metav1alpha1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "Status", Type: "string", Description: "Status of the cluster"}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - } - h.TableHandler(clusterColumnDefinitions, printCluster) - h.TableHandler(clusterColumnDefinitions, printClusterList) - networkPolicyColumnDefinitioins := []metav1alpha1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, {Name: "Pod-Selector", Type: "string", Description: extensionsv1beta1.NetworkPolicySpec{}.SwaggerDoc()["podSelector"]}, @@ -411,6 +393,7 @@ func AddHandlers(h printers.PrintHandler) { storageClassColumnDefinitions := []metav1alpha1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, {Name: "Provisioner", Type: "string", Description: storagev1.StorageClass{}.SwaggerDoc()["provisioner"]}, + {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, } h.TableHandler(storageClassColumnDefinitions, printStorageClass) @@ -746,38 +729,6 @@ func printReplicaSetList(list *extensions.ReplicaSetList, options printers.Print return rows, nil } -func printCluster(obj *federation.Cluster, options printers.PrintOptions) ([]metav1alpha1.TableRow, error) { - row := metav1alpha1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - - var statuses []string - for _, condition := range obj.Status.Conditions { - if condition.Status == api.ConditionTrue { - statuses = append(statuses, string(condition.Type)) - } else { - statuses = append(statuses, "Not"+string(condition.Type)) - } - } - if len(statuses) == 0 { - statuses = append(statuses, "Unknown") - } - row.Cells = append(row.Cells, obj.Name, strings.Join(statuses, ","), translateTimestamp(obj.CreationTimestamp)) - return []metav1alpha1.TableRow{row}, nil -} - -func printClusterList(list *federation.ClusterList, options printers.PrintOptions) ([]metav1alpha1.TableRow, error) { - rows := make([]metav1alpha1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printCluster(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - func printJob(obj *batch.Job, options printers.PrintOptions) ([]metav1alpha1.TableRow, error) { row := metav1alpha1.TableRow{ Object: runtime.RawExtension{Object: obj}, @@ -817,10 +768,10 @@ func printCronJob(obj *batch.CronJob, options printers.PrintOptions) ([]metav1al lastScheduleTime := "" if obj.Status.LastScheduleTime != nil { - lastScheduleTime = obj.Status.LastScheduleTime.Time.Format(time.RFC1123Z) + lastScheduleTime = translateTimestamp(*obj.Status.LastScheduleTime) } - row.Cells = append(row.Cells, obj.Name, obj.Spec.Schedule, printBoolPtr(obj.Spec.Suspend), len(obj.Status.Active), lastScheduleTime) + row.Cells = append(row.Cells, obj.Name, obj.Spec.Schedule, printBoolPtr(obj.Spec.Suspend), len(obj.Status.Active), lastScheduleTime, translateTimestamp(obj.CreationTimestamp)) if options.Wide { names, images := layoutContainerCells(obj.Spec.JobTemplate.Spec.Template.Spec.Containers) row.Cells = append(row.Cells, names, images, metav1.FormatLabelSelector(obj.Spec.JobTemplate.Spec.Selector)) @@ -1275,6 +1226,10 @@ func printPersistentVolumeClaim(obj *api.PersistentVolumeClaim, options printers } phase := obj.Status.Phase + if obj.ObjectMeta.DeletionTimestamp != nil { + phase = "Terminating" + } + storage := obj.Spec.Resources.Requests[api.ResourceStorage] capacity := "" accessModes := "" @@ -1475,33 +1430,6 @@ func printComponentStatusList(list *api.ComponentStatusList, options printers.Pr return rows, nil } -func printThirdPartyResource(obj *extensions.ThirdPartyResource, options printers.PrintOptions) ([]metav1alpha1.TableRow, error) { - row := metav1alpha1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - - versions := make([]string, len(obj.Versions)) - for ix := range obj.Versions { - version := &obj.Versions[ix] - versions[ix] = fmt.Sprintf("%s", version.Name) - } - versionsString := strings.Join(versions, ",") - row.Cells = append(row.Cells, obj.Name, obj.Description, versionsString) - return []metav1alpha1.TableRow{row}, nil -} - -func printThirdPartyResourceList(list *extensions.ThirdPartyResourceList, options printers.PrintOptions) ([]metav1alpha1.TableRow, error) { - rows := make([]metav1alpha1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printThirdPartyResource(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - func truncate(str string, maxLen int) string { if len(str) > maxLen { return str[0:maxLen] + "..." @@ -1707,7 +1635,7 @@ func printStorageClass(obj *storage.StorageClass, options printers.PrintOptions) name += " (default)" } provtype := obj.Provisioner - row.Cells = append(row.Cells, name, provtype) + row.Cells = append(row.Cells, name, provtype, translateTimestamp(obj.CreationTimestamp)) return []metav1alpha1.TableRow{row}, nil } @@ -1748,10 +1676,7 @@ func layoutContainers(containers []api.Container, w io.Writer) error { } } _, err := fmt.Fprintf(w, "\t%s\t%s", namesBuffer.String(), imagesBuffer.String()) - if err != nil { - return err - } - return nil + return err } // Lay out all the containers on one line if use wide output. diff --git a/vendor/k8s.io/kubernetes/pkg/printers/printers.go b/vendor/k8s.io/kubernetes/pkg/printers/printers.go index c956170355b9..3ef696ed9b8c 100644 --- a/vendor/k8s.io/kubernetes/pkg/printers/printers.go +++ b/vendor/k8s.io/kubernetes/pkg/printers/printers.go @@ -29,12 +29,8 @@ import ( // a printer or an error. The printer is agnostic to schema versions, so you must // send arguments to PrintObj in the version you wish them to be shown using a // VersionedPrinter (typically when generic is true). -func GetStandardPrinter(outputOpts *OutputOptions, noHeaders bool, mapper meta.RESTMapper, typer runtime.ObjectTyper, encoder runtime.Encoder, decoders []runtime.Decoder, options PrintOptions) (ResourcePrinter, error) { - if outputOpts == nil { - return nil, fmt.Errorf("no output options specified") - } - - format, formatArgument, allowMissingTemplateKeys := outputOpts.FmtType, outputOpts.FmtArg, outputOpts.AllowMissingKeys +func GetStandardPrinter(mapper meta.RESTMapper, typer runtime.ObjectTyper, encoder runtime.Encoder, decoders []runtime.Decoder, options PrintOptions) (ResourcePrinter, error) { + format, formatArgument, allowMissingTemplateKeys := options.OutputFormatType, options.OutputFormatArgument, options.AllowMissingKeys var printer ResourcePrinter switch format { @@ -106,7 +102,7 @@ func GetStandardPrinter(outputOpts *OutputOptions, noHeaders bool, mapper meta.R case "custom-columns": var err error - if printer, err = NewCustomColumnsPrinterFromSpec(formatArgument, decoders[0], noHeaders); err != nil { + if printer, err = NewCustomColumnsPrinterFromSpec(formatArgument, decoders[0], options.NoHeaders); err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/printers/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/printers/storage/BUILD index e73577baa884..69772c8a4033 100644 --- a/vendor/k8s.io/kubernetes/pkg/printers/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/printers/storage/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/printers/storage", deps = [ "//pkg/printers:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/probe/BUILD b/vendor/k8s.io/kubernetes/pkg/probe/BUILD index 2e21d154fd2f..51c23b1de958 100644 --- a/vendor/k8s.io/kubernetes/pkg/probe/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/probe/BUILD @@ -11,6 +11,7 @@ go_library( "doc.go", "probe.go", ], + importpath = "k8s.io/kubernetes/pkg/probe", ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/probe/exec/BUILD b/vendor/k8s.io/kubernetes/pkg/probe/exec/BUILD index d817517657dd..7d69fa359ecc 100644 --- a/vendor/k8s.io/kubernetes/pkg/probe/exec/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/probe/exec/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["exec.go"], + importpath = "k8s.io/kubernetes/pkg/probe/exec", deps = [ "//pkg/probe:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -19,6 +20,7 @@ go_library( go_test( name = "go_default_test", srcs = ["exec_test.go"], + importpath = "k8s.io/kubernetes/pkg/probe/exec", library = ":go_default_library", deps = ["//pkg/probe:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/probe/http/BUILD b/vendor/k8s.io/kubernetes/pkg/probe/http/BUILD index 40eb47fd8901..7ef260235468 100644 --- a/vendor/k8s.io/kubernetes/pkg/probe/http/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/probe/http/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["http.go"], + importpath = "k8s.io/kubernetes/pkg/probe/http", deps = [ "//pkg/probe:go_default_library", "//pkg/version:go_default_library", @@ -20,6 +21,7 @@ go_library( go_test( name = "go_default_test", srcs = ["http_test.go"], + importpath = "k8s.io/kubernetes/pkg/probe/http", library = ":go_default_library", deps = ["//pkg/probe:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/probe/tcp/BUILD b/vendor/k8s.io/kubernetes/pkg/probe/tcp/BUILD index 7d19c9598ae5..5238534016fb 100644 --- a/vendor/k8s.io/kubernetes/pkg/probe/tcp/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/probe/tcp/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["tcp.go"], + importpath = "k8s.io/kubernetes/pkg/probe/tcp", deps = [ "//pkg/probe:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -18,6 +19,7 @@ go_library( go_test( name = "go_default_test", srcs = ["tcp_test.go"], + importpath = "k8s.io/kubernetes/pkg/probe/tcp", library = ":go_default_library", deps = ["//pkg/probe:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/BUILD b/vendor/k8s.io/kubernetes/pkg/proxy/BUILD index 03f7f583ac2f..597c49c46545 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/proxy/BUILD @@ -11,6 +11,7 @@ go_library( "doc.go", "types.go", ], + importpath = "k8s.io/kubernetes/pkg/proxy", deps = ["//vendor/k8s.io/apimachinery/pkg/types:go_default_library"], ) @@ -25,10 +26,12 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//pkg/proxy/apis/kubeproxyconfig:all-srcs", "//pkg/proxy/config:all-srcs", "//pkg/proxy/healthcheck:all-srcs", "//pkg/proxy/iptables:all-srcs", "//pkg/proxy/ipvs:all-srcs", + "//pkg/proxy/metrics:all-srcs", "//pkg/proxy/userspace:all-srcs", "//pkg/proxy/util:all-srcs", "//pkg/proxy/winkernel:all-srcs", diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/OWNERS b/vendor/k8s.io/kubernetes/pkg/proxy/OWNERS index f1f0145ed00a..311f4413163c 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/proxy/OWNERS @@ -10,3 +10,4 @@ reviewers: - justinsb - freehan - dcbw +- m1093782566 diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/BUILD b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/BUILD new file mode 100644 index 000000000000..d9887d9f9900 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/BUILD @@ -0,0 +1,40 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + "types.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/proxy/apis/kubeproxyconfig/scheme:all-srcs", + "//pkg/proxy/apis/kubeproxyconfig/v1alpha1:all-srcs", + "//pkg/proxy/apis/kubeproxyconfig/validation:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/OWNERS b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/OWNERS new file mode 100644 index 000000000000..34823356d226 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/OWNERS @@ -0,0 +1,4 @@ +approvers: +- thockin +reviewers: +- sig-network-reviewers diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/doc.go b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/doc.go new file mode 100644 index 000000000000..77dfe2082639 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +package kubeproxyconfig diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/register.go b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/register.go new file mode 100644 index 000000000000..d543b07a4d43 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeproxyconfig + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// GroupName is the group name use in this package +const GroupName = "kubeproxy.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +func addKnownTypes(scheme *runtime.Scheme) error { + // TODO this will get cleaned up with the scheme types are fixed + scheme.AddKnownTypes(SchemeGroupVersion, + &KubeProxyConfiguration{}, + ) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme/BUILD b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme/BUILD new file mode 100644 index 000000000000..2e5b9e8f2062 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme/BUILD @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["scheme.go"], + importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme", + visibility = ["//visibility:public"], + deps = [ + "//pkg/proxy/apis/kubeproxyconfig:go_default_library", + "//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme/scheme.go b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme/scheme.go new file mode 100644 index 000000000000..fbc3bdd47e7b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme/scheme.go @@ -0,0 +1,42 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig" + "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1" +) + +var ( + // Scheme defines methods for serializing and deserializing API objects. + Scheme = runtime.NewScheme() + // Codecs provides methods for retrieving codecs and serializers for specific + // versions and content types. + Codecs = serializer.NewCodecFactory(Scheme) +) + +func init() { + AddToScheme(Scheme) +} + +// AddToScheme adds the types of this group into the given scheme. +func AddToScheme(scheme *runtime.Scheme) { + v1alpha1.AddToScheme(scheme) + kubeproxyconfig.AddToScheme(scheme) +} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/types.go b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/types.go new file mode 100644 index 000000000000..2a971c120d75 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/types.go @@ -0,0 +1,257 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeproxyconfig + +import ( + "fmt" + "sort" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClientConnectionConfiguration contains details for constructing a client. +type ClientConnectionConfiguration struct { + // kubeConfigFile is the path to a kubeconfig file. + KubeConfigFile string + // acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the + // default value of 'application/json'. This field will control all connections to the server used by a particular + // client. + AcceptContentTypes string + // contentType is the content type used when sending data to the server from this client. + ContentType string + // qps controls the number of queries per second allowed for this connection. + QPS float32 + // burst allows extra queries to accumulate when a client is exceeding its rate. + Burst int32 +} + +// KubeProxyIPTablesConfiguration contains iptables-related configuration +// details for the Kubernetes proxy server. +type KubeProxyIPTablesConfiguration struct { + // masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using + // the pure iptables proxy mode. Values must be within the range [0, 31]. + MasqueradeBit *int32 + // masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. + MasqueradeAll bool + // syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', + // '2h22m'). Must be greater than 0. + SyncPeriod metav1.Duration + // minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m', + // '2h22m'). + MinSyncPeriod metav1.Duration +} + +// KubeProxyIPVSConfiguration contains ipvs-related configuration +// details for the Kubernetes proxy server. +type KubeProxyIPVSConfiguration struct { + // syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m', + // '2h22m'). Must be greater than 0. + SyncPeriod metav1.Duration + // minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m', + // '2h22m'). + MinSyncPeriod metav1.Duration + // ipvs scheduler + Scheduler string +} + +// KubeProxyConntrackConfiguration contains conntrack settings for +// the Kubernetes proxy server. +type KubeProxyConntrackConfiguration struct { + // max is the maximum number of NAT connections to track (0 to + // leave as-is). This takes precedence over maxPerCore and min. + Max *int32 + // maxPerCore is the maximum number of NAT connections to track + // per CPU core (0 to leave the limit as-is and ignore min). + MaxPerCore *int32 + // min is the minimum value of connect-tracking records to allocate, + // regardless of maxPerCore (set maxPerCore=0 to leave the limit as-is). + Min *int32 + // tcpEstablishedTimeout is how long an idle TCP connection will be kept open + // (e.g. '2s'). Must be greater than 0 to set. + TCPEstablishedTimeout *metav1.Duration + // tcpCloseWaitTimeout is how long an idle conntrack entry + // in CLOSE_WAIT state will remain in the conntrack + // table. (e.g. '60s'). Must be greater than 0 to set. + TCPCloseWaitTimeout *metav1.Duration +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeProxyConfiguration contains everything necessary to configure the +// Kubernetes proxy server. +type KubeProxyConfiguration struct { + metav1.TypeMeta + + // featureGates is a comma-separated list of key=value pairs that control + // which alpha/beta features are enabled. + // + // TODO this really should be a map but that requires refactoring all + // components to use config files because local-up-cluster.sh only supports + // the --feature-gates flag right now, which is comma-separated key=value + // pairs. + FeatureGates string + + // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 + // for all interfaces) + BindAddress string + // healthzBindAddress is the IP address and port for the health check server to serve on, + // defaulting to 0.0.0.0:10256 + HealthzBindAddress string + // metricsBindAddress is the IP address and port for the metrics server to serve on, + // defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces) + MetricsBindAddress string + // enableProfiling enables profiling via web interface on /debug/pprof handler. + // Profiling handlers will be handled by metrics server. + EnableProfiling bool + // clusterCIDR is the CIDR range of the pods in the cluster. It is used to + // bridge traffic coming from outside of the cluster. If not provided, + // no off-cluster bridging will be performed. + ClusterCIDR string + // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname. + HostnameOverride string + // clientConnection specifies the kubeconfig file and client connection settings for the proxy + // server to use when communicating with the apiserver. + ClientConnection ClientConnectionConfiguration + // iptables contains iptables-related configuration options. + IPTables KubeProxyIPTablesConfiguration + // ipvs contains ipvs-related configuration options. + IPVS KubeProxyIPVSConfiguration + // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within + // the range [-1000, 1000] + OOMScoreAdj *int32 + // mode specifies which proxy mode to use. + Mode ProxyMode + // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed + // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen. + PortRange string + // resourceContainer is the absolute name of the resource-only container to create and run + // the Kube-proxy in (Default: /kube-proxy). + ResourceContainer string + // udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). + // Must be greater than 0. Only applicable for proxyMode=userspace. + UDPIdleTimeout metav1.Duration + // conntrack contains conntrack-related configuration options. + Conntrack KubeProxyConntrackConfiguration + // configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater + // than 0. + ConfigSyncPeriod metav1.Duration +} + +// Currently, four modes of proxying are available total: 'userspace' (older, stable), 'iptables' +// (newer, faster), 'ipvs', and 'kernelspace' (Windows only, newer). +// +// If blank, use the best-available proxy (currently iptables, but may change in +// future versions). If the iptables proxy is selected, regardless of how, but +// the system's kernel or iptables versions are insufficient, this always falls +// back to the userspace proxy. +type ProxyMode string + +const ( + ProxyModeUserspace ProxyMode = "userspace" + ProxyModeIPTables ProxyMode = "iptables" + ProxyModeIPVS ProxyMode = "ipvs" + ProxyModeKernelspace ProxyMode = "kernelspace" +) + +// IPVSSchedulerMethod is the algorithm for allocating TCP connections and +// UDP datagrams to real servers. Scheduling algorithms are imple- +//wanted as kernel modules. Ten are shipped with the Linux Virtual Server. +type IPVSSchedulerMethod string + +const ( + // RoundRobin distributes jobs equally amongst the available real servers. + RoundRobin IPVSSchedulerMethod = "rr" + // WeightedRoundRobin assigns jobs to real servers proportionally to there real servers' weight. + // Servers with higher weights receive new jobs first and get more jobs than servers with lower weights. + // Servers with equal weights get an equal distribution of new jobs. + WeightedRoundRobin IPVSSchedulerMethod = "wrr" + // LeastConnection assigns more jobs to real servers with fewer active jobs. + LeastConnection IPVSSchedulerMethod = "lc" + // WeightedLeastConnection assigns more jobs to servers with fewer jobs and + // relative to the real servers' weight(Ci/Wi). + WeightedLeastConnection IPVSSchedulerMethod = "wlc" + // LocalityBasedLeastConnection assigns jobs destined for the same IP address to the same server if + // the server is not overloaded and available; otherwise assigns jobs to servers with fewer jobs, + // and keep it for future assignment. + LocalityBasedLeastConnection IPVSSchedulerMethod = "lblc" + // LocalityBasedLeastConnectionWithReplication with Replication assigns jobs destined for the same IP address to the + // least-connection node in the server set for the IP address. If all the node in the server set are overloaded, + // it picks up a node with fewer jobs in the cluster and adds it to the sever set for the target. + // If the server set has not been modified for the specified time, the most loaded node is removed from the server set, + // in order to avoid high degree of replication. + LocalityBasedLeastConnectionWithReplication IPVSSchedulerMethod = "lblcr" + // SourceHashing assigns jobs to servers through looking up a statically assigned hash table + // by their source IP addresses. + SourceHashing IPVSSchedulerMethod = "sh" + // DestinationHashing assigns jobs to servers through looking up a statically assigned hash table + // by their destination IP addresses. + DestinationHashing IPVSSchedulerMethod = "dh" + // ShortestExpectedDelay assigns an incoming job to the server with the shortest expected delay. + // The expected delay that the job will experience is (Ci + 1) / Ui if sent to the ith server, in which + // Ci is the number of jobs on the the ith server and Ui is the fixed service rate (weight) of the ith server. + ShortestExpectedDelay IPVSSchedulerMethod = "sed" + // NeverQueue assigns an incoming job to an idle server if there is, instead of waiting for a fast one; + // if all the servers are busy, it adopts the ShortestExpectedDelay policy to assign the job. + NeverQueue IPVSSchedulerMethod = "nq" +) + +func (m *ProxyMode) Set(s string) error { + *m = ProxyMode(s) + return nil +} + +func (m *ProxyMode) String() string { + if m != nil { + return string(*m) + } + return "" +} + +func (m *ProxyMode) Type() string { + return "ProxyMode" +} + +type ConfigurationMap map[string]string + +func (m *ConfigurationMap) String() string { + pairs := []string{} + for k, v := range *m { + pairs = append(pairs, fmt.Sprintf("%s=%s", k, v)) + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +func (m *ConfigurationMap) Set(value string) error { + for _, s := range strings.Split(value, ",") { + if len(s) == 0 { + continue + } + arr := strings.SplitN(s, "=", 2) + if len(arr) == 2 { + (*m)[strings.TrimSpace(arr[0])] = strings.TrimSpace(arr[1]) + } else { + (*m)[strings.TrimSpace(arr[0])] = "" + } + } + return nil +} + +func (*ConfigurationMap) Type() string { + return "mapStringString" +} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/BUILD new file mode 100644 index 000000000000..5ddeb90140bf --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/BUILD @@ -0,0 +1,43 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "defaults.go", + "doc.go", + "register.go", + "types.go", + "zz_generated.conversion.go", + "zz_generated.deepcopy.go", + "zz_generated.defaults.go", + ], + importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1", + deps = [ + "//pkg/kubelet/qos:go_default_library", + "//pkg/master/ports:go_default_library", + "//pkg/proxy/apis/kubeproxyconfig:go_default_library", + "//pkg/util/pointer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/defaults.go b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/defaults.go new file mode 100644 index 000000000000..af74a1424828 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/defaults.go @@ -0,0 +1,119 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/kubelet/qos" + "k8s.io/kubernetes/pkg/master/ports" + "k8s.io/kubernetes/pkg/util/pointer" +) + +func addDefaultingFuncs(scheme *kruntime.Scheme) error { + return RegisterDefaults(scheme) +} + +func SetDefaults_KubeProxyConfiguration(obj *KubeProxyConfiguration) { + if len(obj.BindAddress) == 0 { + obj.BindAddress = "0.0.0.0" + } + if obj.HealthzBindAddress == "" { + obj.HealthzBindAddress = fmt.Sprintf("0.0.0.0:%v", ports.ProxyHealthzPort) + } else if !strings.Contains(obj.HealthzBindAddress, ":") { + obj.HealthzBindAddress += fmt.Sprintf(":%v", ports.ProxyHealthzPort) + } + if obj.MetricsBindAddress == "" { + obj.MetricsBindAddress = fmt.Sprintf("127.0.0.1:%v", ports.ProxyStatusPort) + } else if !strings.Contains(obj.MetricsBindAddress, ":") { + obj.MetricsBindAddress += fmt.Sprintf(":%v", ports.ProxyStatusPort) + } + if obj.OOMScoreAdj == nil { + temp := int32(qos.KubeProxyOOMScoreAdj) + obj.OOMScoreAdj = &temp + } + if obj.ResourceContainer == "" { + obj.ResourceContainer = "/kube-proxy" + } + if obj.IPTables.SyncPeriod.Duration == 0 { + obj.IPTables.SyncPeriod = metav1.Duration{Duration: 30 * time.Second} + } + if obj.IPVS.SyncPeriod.Duration == 0 { + obj.IPVS.SyncPeriod = metav1.Duration{Duration: 30 * time.Second} + } + zero := metav1.Duration{} + if obj.UDPIdleTimeout == zero { + obj.UDPIdleTimeout = metav1.Duration{Duration: 250 * time.Millisecond} + } + // If ConntrackMax is set, respect it. + if obj.Conntrack.Max == nil { + // If ConntrackMax is *not* set, use per-core scaling. + if obj.Conntrack.MaxPerCore == nil { + obj.Conntrack.MaxPerCore = pointer.Int32Ptr(32 * 1024) + } + if obj.Conntrack.Min == nil { + obj.Conntrack.Min = pointer.Int32Ptr(128 * 1024) + } + } + if obj.IPTables.MasqueradeBit == nil { + temp := int32(14) + obj.IPTables.MasqueradeBit = &temp + } + if obj.Conntrack.TCPEstablishedTimeout == nil { + obj.Conntrack.TCPEstablishedTimeout = &metav1.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default) + } + if obj.Conntrack.TCPCloseWaitTimeout == nil { + // See https://github.com/kubernetes/kubernetes/issues/32551. + // + // CLOSE_WAIT conntrack state occurs when the Linux kernel + // sees a FIN from the remote server. Note: this is a half-close + // condition that persists as long as the local side keeps the + // socket open. The condition is rare as it is typical in most + // protocols for both sides to issue a close; this typically + // occurs when the local socket is lazily garbage collected. + // + // If the CLOSE_WAIT conntrack entry expires, then FINs from the + // local socket will not be properly SNAT'd and will not reach the + // remote server (if the connection was subject to SNAT). If the + // remote timeouts for FIN_WAIT* states exceed the CLOSE_WAIT + // timeout, then there will be an inconsistency in the state of + // the connection and a new connection reusing the SNAT (src, + // port) pair may be rejected by the remote side with RST. This + // can cause new calls to connect(2) to return with ECONNREFUSED. + // + // We set CLOSE_WAIT to one hour by default to better match + // typical server timeouts. + obj.Conntrack.TCPCloseWaitTimeout = &metav1.Duration{Duration: 1 * time.Hour} + } + if obj.ConfigSyncPeriod.Duration == 0 { + obj.ConfigSyncPeriod.Duration = 15 * time.Minute + } + + if len(obj.ClientConnection.ContentType) == 0 { + obj.ClientConnection.ContentType = "application/vnd.kubernetes.protobuf" + } + if obj.ClientConnection.QPS == 0.0 { + obj.ClientConnection.QPS = 5.0 + } + if obj.ClientConnection.Burst == 0 { + obj.ClientConnection.Burst = 10 + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/doc.go new file mode 100644 index 000000000000..e8b3f58861b7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +package v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/register.go new file mode 100644 index 000000000000..0c387fbc2e17 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "kubeproxy.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &KubeProxyConfiguration{}, + ) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/types.go new file mode 100644 index 000000000000..add86cdcbf0a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/types.go @@ -0,0 +1,161 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClientConnectionConfiguration contains details for constructing a client. +type ClientConnectionConfiguration struct { + // kubeConfigFile is the path to a kubeconfig file. + KubeConfigFile string `json:"kubeconfig"` + // acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the + // default value of 'application/json'. This field will control all connections to the server used by a particular + // client. + AcceptContentTypes string `json:"acceptContentTypes"` + // contentType is the content type used when sending data to the server from this client. + ContentType string `json:"contentType"` + // cps controls the number of queries per second allowed for this connection. + QPS float32 `json:"qps"` + // burst allows extra queries to accumulate when a client is exceeding its rate. + Burst int `json:"burst"` +} + +// KubeProxyIPTablesConfiguration contains iptables-related configuration +// details for the Kubernetes proxy server. +type KubeProxyIPTablesConfiguration struct { + // masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using + // the pure iptables proxy mode. Values must be within the range [0, 31]. + MasqueradeBit *int32 `json:"masqueradeBit"` + // masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. + MasqueradeAll bool `json:"masqueradeAll"` + // syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', + // '2h22m'). Must be greater than 0. + SyncPeriod metav1.Duration `json:"syncPeriod"` + // minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m', + // '2h22m'). + MinSyncPeriod metav1.Duration `json:"minSyncPeriod"` +} + +// KubeProxyIPVSConfiguration contains ipvs-related configuration +// details for the Kubernetes proxy server. +type KubeProxyIPVSConfiguration struct { + // syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m', + // '2h22m'). Must be greater than 0. + SyncPeriod metav1.Duration `json:"syncPeriod"` + // minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m', + // '2h22m'). + MinSyncPeriod metav1.Duration `json:"minSyncPeriod"` + // ipvs scheduler + Scheduler string `json:"scheduler"` +} + +// KubeProxyConntrackConfiguration contains conntrack settings for +// the Kubernetes proxy server. +type KubeProxyConntrackConfiguration struct { + // max is the maximum number of NAT connections to track (0 to + // leave as-is). This takes precedence over maxPerCore and min. + Max *int32 `json:"max"` + // maxPerCore is the maximum number of NAT connections to track + // per CPU core (0 to leave the limit as-is and ignore min). + MaxPerCore *int32 `json:"maxPerCore"` + // min is the minimum value of connect-tracking records to allocate, + // regardless of conntrackMaxPerCore (set maxPerCore=0 to leave the limit as-is). + Min *int32 `json:"min"` + // tcpEstablishedTimeout is how long an idle TCP connection will be kept open + // (e.g. '2s'). Must be greater than 0 to set. + TCPEstablishedTimeout *metav1.Duration `json:"tcpEstablishedTimeout"` + // tcpCloseWaitTimeout is how long an idle conntrack entry + // in CLOSE_WAIT state will remain in the conntrack + // table. (e.g. '60s'). Must be greater than 0 to set. + TCPCloseWaitTimeout *metav1.Duration `json:"tcpCloseWaitTimeout"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeProxyConfiguration contains everything necessary to configure the +// Kubernetes proxy server. +type KubeProxyConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // featureGates is a comma-separated list of key=value pairs that control + // which alpha/beta features are enabled. + // + // TODO this really should be a map but that requires refactoring all + // components to use config files because local-up-cluster.sh only supports + // the --feature-gates flag right now, which is comma-separated key=value + // pairs. + FeatureGates string `json:"featureGates"` + + // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 + // for all interfaces) + BindAddress string `json:"bindAddress"` + // healthzBindAddress is the IP address and port for the health check server to serve on, + // defaulting to 0.0.0.0:10256 + HealthzBindAddress string `json:"healthzBindAddress"` + // metricsBindAddress is the IP address and port for the metrics server to serve on, + // defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces) + MetricsBindAddress string `json:"metricsBindAddress"` + // enableProfiling enables profiling via web interface on /debug/pprof handler. + // Profiling handlers will be handled by metrics server. + EnableProfiling bool `json:"enableProfiling"` + // clusterCIDR is the CIDR range of the pods in the cluster. It is used to + // bridge traffic coming from outside of the cluster. If not provided, + // no off-cluster bridging will be performed. + ClusterCIDR string `json:"clusterCIDR"` + // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname. + HostnameOverride string `json:"hostnameOverride"` + // clientConnection specifies the kubeconfig file and client connection settings for the proxy + // server to use when communicating with the apiserver. + ClientConnection ClientConnectionConfiguration `json:"clientConnection"` + // iptables contains iptables-related configuration options. + IPTables KubeProxyIPTablesConfiguration `json:"iptables"` + // ipvs contains ipvs-related configuration options. + IPVS KubeProxyIPVSConfiguration `json:"ipvs"` + // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within + // the range [-1000, 1000] + OOMScoreAdj *int32 `json:"oomScoreAdj"` + // mode specifies which proxy mode to use. + Mode ProxyMode `json:"mode"` + // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed + // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen. + PortRange string `json:"portRange"` + // resourceContainer is the bsolute name of the resource-only container to create and run + // the Kube-proxy in (Default: /kube-proxy). + ResourceContainer string `json:"resourceContainer"` + // udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). + // Must be greater than 0. Only applicable for proxyMode=userspace. + UDPIdleTimeout metav1.Duration `json:"udpTimeoutMilliseconds"` + // conntrack contains conntrack-related configuration options. + Conntrack KubeProxyConntrackConfiguration `json:"conntrack"` + // configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater + // than 0. + ConfigSyncPeriod metav1.Duration `json:"configSyncPeriod"` +} + +// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables' +// (newer, faster). If blank, use the best-available proxy (currently iptables, but may +// change in future versions). If the iptables proxy is selected, regardless of how, but +// the system's kernel or iptables versions are insufficient, this always falls back to the +// userspace proxy. +type ProxyMode string + +const ( + ProxyModeUserspace ProxyMode = "userspace" + ProxyModeIPTables ProxyMode = "iptables" +) diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.conversion.go new file mode 100644 index 000000000000..1b3a91c5aaa6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,224 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig" + unsafe "unsafe" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration, + Convert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration, + Convert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration, + Convert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration, + Convert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration, + Convert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration, + Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration, + Convert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration, + Convert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration, + Convert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration, + ) +} + +func autoConvert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration(in *ClientConnectionConfiguration, out *kubeproxyconfig.ClientConnectionConfiguration, s conversion.Scope) error { + out.KubeConfigFile = in.KubeConfigFile + out.AcceptContentTypes = in.AcceptContentTypes + out.ContentType = in.ContentType + out.QPS = in.QPS + out.Burst = int32(in.Burst) + return nil +} + +// Convert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration(in *ClientConnectionConfiguration, out *kubeproxyconfig.ClientConnectionConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration(in, out, s) +} + +func autoConvert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(in *kubeproxyconfig.ClientConnectionConfiguration, out *ClientConnectionConfiguration, s conversion.Scope) error { + out.KubeConfigFile = in.KubeConfigFile + out.AcceptContentTypes = in.AcceptContentTypes + out.ContentType = in.ContentType + out.QPS = in.QPS + out.Burst = int(in.Burst) + return nil +} + +// Convert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration is an autogenerated conversion function. +func Convert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(in *kubeproxyconfig.ClientConnectionConfiguration, out *ClientConnectionConfiguration, s conversion.Scope) error { + return autoConvert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *kubeproxyconfig.KubeProxyConfiguration, s conversion.Scope) error { + out.FeatureGates = in.FeatureGates + out.BindAddress = in.BindAddress + out.HealthzBindAddress = in.HealthzBindAddress + out.MetricsBindAddress = in.MetricsBindAddress + out.EnableProfiling = in.EnableProfiling + out.ClusterCIDR = in.ClusterCIDR + out.HostnameOverride = in.HostnameOverride + if err := Convert_v1alpha1_ClientConnectionConfiguration_To_kubeproxyconfig_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { + return err + } + if err := Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration(&in.IPTables, &out.IPTables, s); err != nil { + return err + } + if err := Convert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration(&in.IPVS, &out.IPVS, s); err != nil { + return err + } + out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj)) + out.Mode = kubeproxyconfig.ProxyMode(in.Mode) + out.PortRange = in.PortRange + out.ResourceContainer = in.ResourceContainer + out.UDPIdleTimeout = in.UDPIdleTimeout + if err := Convert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil { + return err + } + out.ConfigSyncPeriod = in.ConfigSyncPeriod + return nil +} + +// Convert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *kubeproxyconfig.KubeProxyConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeProxyConfiguration_To_kubeproxyconfig_KubeProxyConfiguration(in, out, s) +} + +func autoConvert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *kubeproxyconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error { + out.FeatureGates = in.FeatureGates + out.BindAddress = in.BindAddress + out.HealthzBindAddress = in.HealthzBindAddress + out.MetricsBindAddress = in.MetricsBindAddress + out.EnableProfiling = in.EnableProfiling + out.ClusterCIDR = in.ClusterCIDR + out.HostnameOverride = in.HostnameOverride + if err := Convert_kubeproxyconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { + return err + } + if err := Convert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(&in.IPTables, &out.IPTables, s); err != nil { + return err + } + if err := Convert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(&in.IPVS, &out.IPVS, s); err != nil { + return err + } + out.OOMScoreAdj = (*int32)(unsafe.Pointer(in.OOMScoreAdj)) + out.Mode = ProxyMode(in.Mode) + out.PortRange = in.PortRange + out.ResourceContainer = in.ResourceContainer + out.UDPIdleTimeout = in.UDPIdleTimeout + if err := Convert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(&in.Conntrack, &out.Conntrack, s); err != nil { + return err + } + out.ConfigSyncPeriod = in.ConfigSyncPeriod + return nil +} + +// Convert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration is an autogenerated conversion function. +func Convert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *kubeproxyconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error { + return autoConvert_kubeproxyconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration(in *KubeProxyConntrackConfiguration, out *kubeproxyconfig.KubeProxyConntrackConfiguration, s conversion.Scope) error { + out.Max = (*int32)(unsafe.Pointer(in.Max)) + out.MaxPerCore = (*int32)(unsafe.Pointer(in.MaxPerCore)) + out.Min = (*int32)(unsafe.Pointer(in.Min)) + out.TCPEstablishedTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPEstablishedTimeout)) + out.TCPCloseWaitTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPCloseWaitTimeout)) + return nil +} + +// Convert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration(in *KubeProxyConntrackConfiguration, out *kubeproxyconfig.KubeProxyConntrackConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeProxyConntrackConfiguration_To_kubeproxyconfig_KubeProxyConntrackConfiguration(in, out, s) +} + +func autoConvert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in *kubeproxyconfig.KubeProxyConntrackConfiguration, out *KubeProxyConntrackConfiguration, s conversion.Scope) error { + out.Max = (*int32)(unsafe.Pointer(in.Max)) + out.MaxPerCore = (*int32)(unsafe.Pointer(in.MaxPerCore)) + out.Min = (*int32)(unsafe.Pointer(in.Min)) + out.TCPEstablishedTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPEstablishedTimeout)) + out.TCPCloseWaitTimeout = (*v1.Duration)(unsafe.Pointer(in.TCPCloseWaitTimeout)) + return nil +} + +// Convert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration is an autogenerated conversion function. +func Convert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in *kubeproxyconfig.KubeProxyConntrackConfiguration, out *KubeProxyConntrackConfiguration, s conversion.Scope) error { + return autoConvert_kubeproxyconfig_KubeProxyConntrackConfiguration_To_v1alpha1_KubeProxyConntrackConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration(in *KubeProxyIPTablesConfiguration, out *kubeproxyconfig.KubeProxyIPTablesConfiguration, s conversion.Scope) error { + out.MasqueradeBit = (*int32)(unsafe.Pointer(in.MasqueradeBit)) + out.MasqueradeAll = in.MasqueradeAll + out.SyncPeriod = in.SyncPeriod + out.MinSyncPeriod = in.MinSyncPeriod + return nil +} + +// Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration(in *KubeProxyIPTablesConfiguration, out *kubeproxyconfig.KubeProxyIPTablesConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeProxyIPTablesConfiguration_To_kubeproxyconfig_KubeProxyIPTablesConfiguration(in, out, s) +} + +func autoConvert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in *kubeproxyconfig.KubeProxyIPTablesConfiguration, out *KubeProxyIPTablesConfiguration, s conversion.Scope) error { + out.MasqueradeBit = (*int32)(unsafe.Pointer(in.MasqueradeBit)) + out.MasqueradeAll = in.MasqueradeAll + out.SyncPeriod = in.SyncPeriod + out.MinSyncPeriod = in.MinSyncPeriod + return nil +} + +// Convert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration is an autogenerated conversion function. +func Convert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in *kubeproxyconfig.KubeProxyIPTablesConfiguration, out *KubeProxyIPTablesConfiguration, s conversion.Scope) error { + return autoConvert_kubeproxyconfig_KubeProxyIPTablesConfiguration_To_v1alpha1_KubeProxyIPTablesConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration(in *KubeProxyIPVSConfiguration, out *kubeproxyconfig.KubeProxyIPVSConfiguration, s conversion.Scope) error { + out.SyncPeriod = in.SyncPeriod + out.MinSyncPeriod = in.MinSyncPeriod + out.Scheduler = in.Scheduler + return nil +} + +// Convert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration(in *KubeProxyIPVSConfiguration, out *kubeproxyconfig.KubeProxyIPVSConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_KubeProxyIPVSConfiguration_To_kubeproxyconfig_KubeProxyIPVSConfiguration(in, out, s) +} + +func autoConvert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in *kubeproxyconfig.KubeProxyIPVSConfiguration, out *KubeProxyIPVSConfiguration, s conversion.Scope) error { + out.SyncPeriod = in.SyncPeriod + out.MinSyncPeriod = in.MinSyncPeriod + out.Scheduler = in.Scheduler + return nil +} + +// Convert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration is an autogenerated conversion function. +func Convert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in *kubeproxyconfig.KubeProxyIPVSConfiguration, out *KubeProxyIPVSConfiguration, s conversion.Scope) error { + return autoConvert_kubeproxyconfig_KubeProxyIPVSConfiguration_To_v1alpha1_KubeProxyIPVSConfiguration(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000000..44b2a69902f3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,189 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientConnectionConfiguration) DeepCopyInto(out *ClientConnectionConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionConfiguration. +func (in *ClientConnectionConfiguration) DeepCopy() *ClientConnectionConfiguration { + if in == nil { + return nil + } + out := new(ClientConnectionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientConnection = in.ClientConnection + in.IPTables.DeepCopyInto(&out.IPTables) + out.IPVS = in.IPVS + if in.OOMScoreAdj != nil { + in, out := &in.OOMScoreAdj, &out.OOMScoreAdj + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + out.UDPIdleTimeout = in.UDPIdleTimeout + in.Conntrack.DeepCopyInto(&out.Conntrack) + out.ConfigSyncPeriod = in.ConfigSyncPeriod + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConfiguration. +func (in *KubeProxyConfiguration) DeepCopy() *KubeProxyConfiguration { + if in == nil { + return nil + } + out := new(KubeProxyConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeProxyConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeProxyConntrackConfiguration) DeepCopyInto(out *KubeProxyConntrackConfiguration) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.MaxPerCore != nil { + in, out := &in.MaxPerCore, &out.MaxPerCore + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Min != nil { + in, out := &in.Min, &out.Min + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.TCPEstablishedTimeout != nil { + in, out := &in.TCPEstablishedTimeout, &out.TCPEstablishedTimeout + if *in == nil { + *out = nil + } else { + *out = new(v1.Duration) + **out = **in + } + } + if in.TCPCloseWaitTimeout != nil { + in, out := &in.TCPCloseWaitTimeout, &out.TCPCloseWaitTimeout + if *in == nil { + *out = nil + } else { + *out = new(v1.Duration) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConntrackConfiguration. +func (in *KubeProxyConntrackConfiguration) DeepCopy() *KubeProxyConntrackConfiguration { + if in == nil { + return nil + } + out := new(KubeProxyConntrackConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeProxyIPTablesConfiguration) DeepCopyInto(out *KubeProxyIPTablesConfiguration) { + *out = *in + if in.MasqueradeBit != nil { + in, out := &in.MasqueradeBit, &out.MasqueradeBit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + out.SyncPeriod = in.SyncPeriod + out.MinSyncPeriod = in.MinSyncPeriod + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPTablesConfiguration. +func (in *KubeProxyIPTablesConfiguration) DeepCopy() *KubeProxyIPTablesConfiguration { + if in == nil { + return nil + } + out := new(KubeProxyIPTablesConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeProxyIPVSConfiguration) DeepCopyInto(out *KubeProxyIPVSConfiguration) { + *out = *in + out.SyncPeriod = in.SyncPeriod + out.MinSyncPeriod = in.MinSyncPeriod + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPVSConfiguration. +func (in *KubeProxyIPVSConfiguration) DeepCopy() *KubeProxyIPVSConfiguration { + if in == nil { + return nil + } + out := new(KubeProxyIPVSConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.defaults.go new file mode 100644 index 000000000000..843337ed89cf --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,37 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&KubeProxyConfiguration{}, func(obj interface{}) { SetObjectDefaults_KubeProxyConfiguration(obj.(*KubeProxyConfiguration)) }) + return nil +} + +func SetObjectDefaults_KubeProxyConfiguration(in *KubeProxyConfiguration) { + SetDefaults_KubeProxyConfiguration(in) +} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation/BUILD new file mode 100644 index 000000000000..2c8abb0dcab1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation/BUILD @@ -0,0 +1,45 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation", + deps = [ + "//pkg/apis/core/validation:go_default_library", + "//pkg/proxy/apis/kubeproxyconfig:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +go_test( + name = "go_default_test", + srcs = ["validation_test.go"], + importpath = "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation", + library = ":go_default_library", + deps = [ + "//pkg/proxy/apis/kubeproxyconfig:go_default_library", + "//pkg/util/pointer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation/validation.go new file mode 100644 index 000000000000..376d281c9d8c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation/validation.go @@ -0,0 +1,238 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "net" + "runtime" + "strconv" + "strings" + + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/validation/field" + apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" + "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig" +) + +// Validate validates the configuration of kube-proxy +func Validate(config *kubeproxyconfig.KubeProxyConfiguration) field.ErrorList { + allErrs := field.ErrorList{} + + newPath := field.NewPath("KubeProxyConfiguration") + + allErrs = append(allErrs, validateKubeProxyIPTablesConfiguration(config.IPTables, newPath.Child("KubeProxyIPTablesConfiguration"))...) + allErrs = append(allErrs, validateKubeProxyIPVSConfiguration(config.IPVS, newPath.Child("KubeProxyIPVSConfiguration"))...) + allErrs = append(allErrs, validateKubeProxyConntrackConfiguration(config.Conntrack, newPath.Child("KubeProxyConntrackConfiguration"))...) + allErrs = append(allErrs, validateProxyMode(config.Mode, newPath.Child("Mode"))...) + allErrs = append(allErrs, validateClientConnectionConfiguration(config.ClientConnection, newPath.Child("ClientConnection"))...) + + if config.OOMScoreAdj != nil && (*config.OOMScoreAdj < -1000 || *config.OOMScoreAdj > 1000) { + allErrs = append(allErrs, field.Invalid(newPath.Child("OOMScoreAdj"), *config.OOMScoreAdj, "must be within the range [-1000, 1000]")) + } + + if config.UDPIdleTimeout.Duration <= 0 { + allErrs = append(allErrs, field.Invalid(newPath.Child("UDPIdleTimeout"), config.UDPIdleTimeout, "must be greater than 0")) + } + + if config.ConfigSyncPeriod.Duration <= 0 { + allErrs = append(allErrs, field.Invalid(newPath.Child("ConfigSyncPeriod"), config.ConfigSyncPeriod, "must be greater than 0")) + } + + if net.ParseIP(config.BindAddress) == nil { + allErrs = append(allErrs, field.Invalid(newPath.Child("BindAddress"), config.BindAddress, "not a valid textual representation of an IP address")) + } + + allErrs = append(allErrs, validateHostPort(config.HealthzBindAddress, newPath.Child("HealthzBindAddress"))...) + allErrs = append(allErrs, validateHostPort(config.MetricsBindAddress, newPath.Child("MetricsBindAddress"))...) + + if config.ClusterCIDR != "" { + if _, _, err := net.ParseCIDR(config.ClusterCIDR); err != nil { + allErrs = append(allErrs, field.Invalid(newPath.Child("ClusterCIDR"), config.ClusterCIDR, "must be a valid CIDR block (e.g. 10.100.0.0/16)")) + } + } + + if _, err := utilnet.ParsePortRange(config.PortRange); err != nil { + allErrs = append(allErrs, field.Invalid(newPath.Child("PortRange"), config.PortRange, "must be a valid port range (e.g. 300-2000)")) + } + + return allErrs +} + +func validateKubeProxyIPTablesConfiguration(config kubeproxyconfig.KubeProxyIPTablesConfiguration, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if config.MasqueradeBit != nil && (*config.MasqueradeBit < 0 || *config.MasqueradeBit > 31) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("MasqueradeBit"), config.MasqueradeBit, "must be within the range [0, 31]")) + } + + if config.SyncPeriod.Duration <= 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("SyncPeriod"), config.SyncPeriod, "must be greater than 0")) + } + + if config.MinSyncPeriod.Duration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("MinSyncPeriod"), config.MinSyncPeriod, "must be greater than or equal to 0")) + } + + if config.MinSyncPeriod.Duration > config.SyncPeriod.Duration { + allErrs = append(allErrs, field.Invalid(fldPath.Child("SyncPeriod"), config.MinSyncPeriod, fmt.Sprintf("must be greater than or equal to %s", fldPath.Child("MinSyncPeriod").String()))) + } + + return allErrs +} + +func validateKubeProxyIPVSConfiguration(config kubeproxyconfig.KubeProxyIPVSConfiguration, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if config.SyncPeriod.Duration <= 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("SyncPeriod"), config.SyncPeriod, "must be greater than 0")) + } + + if config.MinSyncPeriod.Duration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("MinSyncPeriod"), config.MinSyncPeriod, "must be greater than or equal to 0")) + } + + if config.MinSyncPeriod.Duration > config.SyncPeriod.Duration { + allErrs = append(allErrs, field.Invalid(fldPath.Child("SyncPeriod"), config.MinSyncPeriod, fmt.Sprintf("must be greater than or equal to %s", fldPath.Child("MinSyncPeriod").String()))) + } + + allErrs = append(allErrs, validateIPVSSchedulerMethod(kubeproxyconfig.IPVSSchedulerMethod(config.Scheduler), fldPath.Child("Scheduler"))...) + + return allErrs +} + +func validateKubeProxyConntrackConfiguration(config kubeproxyconfig.KubeProxyConntrackConfiguration, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if config.Max != nil && *config.Max < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("Max"), config.Max, "must be greater than or equal to 0")) + } + + if config.MaxPerCore != nil && *config.MaxPerCore < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("MaxPerCore"), config.MaxPerCore, "must be greater than or equal to 0")) + } + + if config.Min != nil && *config.Min < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("Min"), config.Min, "must be greater than or equal to 0")) + } + + if config.TCPEstablishedTimeout.Duration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("TCPEstablishedTimeout"), config.TCPEstablishedTimeout, "must be greater than or equal to 0")) + } + + if config.TCPCloseWaitTimeout.Duration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("TCPCloseWaitTimeout"), config.TCPCloseWaitTimeout, "must be greater than or equal to 0")) + } + + return allErrs +} + +func validateProxyMode(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList { + if runtime.GOOS == "windows" { + return validateProxyModeWindows(mode, fldPath) + } + + return validateProxyModeLinux(mode, fldPath) +} + +func validateProxyModeLinux(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + switch mode { + case kubeproxyconfig.ProxyModeUserspace: + case kubeproxyconfig.ProxyModeIPTables: + case kubeproxyconfig.ProxyModeIPVS: + case "": + default: + modes := []string{string(kubeproxyconfig.ProxyModeUserspace), string(kubeproxyconfig.ProxyModeIPTables), string(kubeproxyconfig.ProxyModeIPVS)} + errMsg := fmt.Sprintf("must be %s or blank (blank means the best-available proxy [currently iptables])", strings.Join(modes, ",")) + allErrs = append(allErrs, field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)) + } + return allErrs +} + +func validateProxyModeWindows(mode kubeproxyconfig.ProxyMode, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + switch mode { + case kubeproxyconfig.ProxyModeUserspace: + case kubeproxyconfig.ProxyModeKernelspace: + default: + modes := []string{string(kubeproxyconfig.ProxyModeUserspace), string(kubeproxyconfig.ProxyModeKernelspace)} + errMsg := fmt.Sprintf("must be %s or blank (blank means the most-available proxy [currently userspace])", strings.Join(modes, ",")) + allErrs = append(allErrs, field.Invalid(fldPath.Child("ProxyMode"), string(mode), errMsg)) + } + return allErrs +} + +func validateClientConnectionConfiguration(config kubeproxyconfig.ClientConnectionConfiguration, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(config.Burst), fldPath.Child("Burst"))...) + return allErrs +} + +func validateHostPort(input string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + hostIP, port, err := net.SplitHostPort(input) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, input, "must be IP:port")) + return allErrs + } + + if ip := net.ParseIP(hostIP); ip == nil { + allErrs = append(allErrs, field.Invalid(fldPath, hostIP, "must be a valid IP")) + } + + if p, err := strconv.Atoi(port); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, port, "must be a valid port")) + } else if p < 1 || p > 65535 { + allErrs = append(allErrs, field.Invalid(fldPath, port, "must be a valid port")) + } + + return allErrs +} + +func validateIPVSSchedulerMethod(scheduler kubeproxyconfig.IPVSSchedulerMethod, fldPath *field.Path) field.ErrorList { + supportedMethod := []kubeproxyconfig.IPVSSchedulerMethod{ + kubeproxyconfig.RoundRobin, + kubeproxyconfig.WeightedRoundRobin, + kubeproxyconfig.LeastConnection, + kubeproxyconfig.WeightedLeastConnection, + kubeproxyconfig.LocalityBasedLeastConnection, + kubeproxyconfig.LocalityBasedLeastConnectionWithReplication, + kubeproxyconfig.SourceHashing, + kubeproxyconfig.DestinationHashing, + kubeproxyconfig.ShortestExpectedDelay, + kubeproxyconfig.NeverQueue, + "", + } + allErrs := field.ErrorList{} + var found bool + for i := range supportedMethod { + if scheduler == supportedMethod[i] { + found = true + break + } + } + // Not found + if !found { + errMsg := fmt.Sprintf("must be in %v, blank means the default algorithm method (currently rr)", supportedMethod) + allErrs = append(allErrs, field.Invalid(fldPath.Child("Scheduler"), string(scheduler), errMsg)) + } + return allErrs +} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/zz_generated.deepcopy.go new file mode 100644 index 000000000000..7527116372a5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/zz_generated.deepcopy.go @@ -0,0 +1,189 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package kubeproxyconfig + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientConnectionConfiguration) DeepCopyInto(out *ClientConnectionConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionConfiguration. +func (in *ClientConnectionConfiguration) DeepCopy() *ClientConnectionConfiguration { + if in == nil { + return nil + } + out := new(ClientConnectionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientConnection = in.ClientConnection + in.IPTables.DeepCopyInto(&out.IPTables) + out.IPVS = in.IPVS + if in.OOMScoreAdj != nil { + in, out := &in.OOMScoreAdj, &out.OOMScoreAdj + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + out.UDPIdleTimeout = in.UDPIdleTimeout + in.Conntrack.DeepCopyInto(&out.Conntrack) + out.ConfigSyncPeriod = in.ConfigSyncPeriod + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConfiguration. +func (in *KubeProxyConfiguration) DeepCopy() *KubeProxyConfiguration { + if in == nil { + return nil + } + out := new(KubeProxyConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeProxyConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeProxyConntrackConfiguration) DeepCopyInto(out *KubeProxyConntrackConfiguration) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.MaxPerCore != nil { + in, out := &in.MaxPerCore, &out.MaxPerCore + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Min != nil { + in, out := &in.Min, &out.Min + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.TCPEstablishedTimeout != nil { + in, out := &in.TCPEstablishedTimeout, &out.TCPEstablishedTimeout + if *in == nil { + *out = nil + } else { + *out = new(v1.Duration) + **out = **in + } + } + if in.TCPCloseWaitTimeout != nil { + in, out := &in.TCPCloseWaitTimeout, &out.TCPCloseWaitTimeout + if *in == nil { + *out = nil + } else { + *out = new(v1.Duration) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyConntrackConfiguration. +func (in *KubeProxyConntrackConfiguration) DeepCopy() *KubeProxyConntrackConfiguration { + if in == nil { + return nil + } + out := new(KubeProxyConntrackConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeProxyIPTablesConfiguration) DeepCopyInto(out *KubeProxyIPTablesConfiguration) { + *out = *in + if in.MasqueradeBit != nil { + in, out := &in.MasqueradeBit, &out.MasqueradeBit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + out.SyncPeriod = in.SyncPeriod + out.MinSyncPeriod = in.MinSyncPeriod + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPTablesConfiguration. +func (in *KubeProxyIPTablesConfiguration) DeepCopy() *KubeProxyIPTablesConfiguration { + if in == nil { + return nil + } + out := new(KubeProxyIPTablesConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeProxyIPVSConfiguration) DeepCopyInto(out *KubeProxyIPVSConfiguration) { + *out = *in + out.SyncPeriod = in.SyncPeriod + out.MinSyncPeriod = in.MinSyncPeriod + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyIPVSConfiguration. +func (in *KubeProxyIPVSConfiguration) DeepCopy() *KubeProxyIPVSConfiguration { + if in == nil { + return nil + } + out := new(KubeProxyIPVSConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/config/BUILD b/vendor/k8s.io/kubernetes/pkg/proxy/config/BUILD index 997dc924336f..a3f59aa45b2c 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/config/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/proxy/config/BUILD @@ -12,8 +12,9 @@ go_library( "config.go", "doc.go", ], + importpath = "k8s.io/kubernetes/pkg/proxy/config", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/informers/informers_generated/internalversion/core/internalversion:go_default_library", "//pkg/client/listers/core/internalversion:go_default_library", "//pkg/controller:go_default_library", @@ -29,9 +30,10 @@ go_test( "api_test.go", "config_test.go", ], + importpath = "k8s.io/kubernetes/pkg/proxy/config", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/config/config.go b/vendor/k8s.io/kubernetes/pkg/proxy/config/config.go index 1c79fdf6f240..33e0bd169efb 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/config/config.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/config/config.go @@ -23,7 +23,7 @@ import ( "github.com/golang/glog" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/cache" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion" listers "k8s.io/kubernetes/pkg/client/listers/core/internalversion" "k8s.io/kubernetes/pkg/controller" diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD index 77b102f482f9..a909cef8b54f 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/BUILD @@ -12,8 +12,9 @@ go_library( "doc.go", "healthcheck.go", ], + importpath = "k8s.io/kubernetes/pkg/proxy/healthcheck", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/renstrom/dedent:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -27,6 +28,7 @@ go_library( go_test( name = "go_default_test", srcs = ["healthcheck_test.go"], + importpath = "k8s.io/kubernetes/pkg/proxy/healthcheck", library = ":go_default_library", deps = [ "//vendor/github.com/davecgh/go-spew/spew:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go index 7ea31d0a4ede..7ee1da6ff4e2 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go @@ -33,7 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) var nodeHealthzRetryInterval = 60 * time.Second diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD b/vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD index ac49fb2093e4..cd7e76524e6d 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/proxy/iptables/BUILD @@ -9,23 +9,23 @@ load( go_library( name = "go_default_library", srcs = [ - "metrics.go", "proxier.go", ], + importpath = "k8s.io/kubernetes/pkg/proxy/iptables", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", "//pkg/api/service:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//pkg/features:go_default_library", "//pkg/proxy:go_default_library", "//pkg/proxy/healthcheck:go_default_library", + "//pkg/proxy/metrics:go_default_library", "//pkg/proxy/util:go_default_library", "//pkg/util/async:go_default_library", "//pkg/util/iptables:go_default_library", "//pkg/util/sysctl:go_default_library", "//pkg/util/version:go_default_library", "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", @@ -39,15 +39,17 @@ go_library( go_test( name = "go_default_test", srcs = ["proxier_test.go"], + importpath = "k8s.io/kubernetes/pkg/proxy/iptables", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/proxy:go_default_library", "//pkg/proxy/util:go_default_library", "//pkg/util/async:go_default_library", "//pkg/util/iptables:go_default_library", "//pkg/util/iptables/testing:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/metrics.go b/vendor/k8s.io/kubernetes/pkg/proxy/iptables/metrics.go deleted file mode 100644 index fabe6a59569d..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/metrics.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package iptables - -import ( - "sync" - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -const kubeProxySubsystem = "kubeproxy" - -var ( - SyncProxyRulesLatency = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Subsystem: kubeProxySubsystem, - Name: "sync_proxy_rules_latency_microseconds", - Help: "SyncProxyRules latency", - Buckets: prometheus.ExponentialBuckets(1000, 2, 15), - }, - ) -) - -var registerMetricsOnce sync.Once - -func RegisterMetrics() { - registerMetricsOnce.Do(func() { - prometheus.MustRegister(SyncProxyRulesLatency) - }) -} - -// Gets the time since the specified start in microseconds. -func sinceInMicroseconds(start time.Time) float64 { - return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) -} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go b/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go index 6d3bcc3606f1..1710d989f976 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go @@ -41,12 +41,13 @@ import ( "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" apiservice "k8s.io/kubernetes/pkg/api/service" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy/healthcheck" + "k8s.io/kubernetes/pkg/proxy/metrics" utilproxy "k8s.io/kubernetes/pkg/proxy/util" "k8s.io/kubernetes/pkg/util/async" utiliptables "k8s.io/kubernetes/pkg/util/iptables" @@ -79,6 +80,9 @@ const ( // the mark-for-drop chain KubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP" + + // the kubernetes forward chain + kubeForwardChain utiliptables.Chain = "KUBE-FORWARD" ) // IPTablesVersioner can query the current iptables version. @@ -166,12 +170,9 @@ type endpointsInfo struct { chainName utiliptables.Chain } -// Returns just the IP part of the endpoint. +// IPPart returns just the IP part of the endpoint. func (e *endpointsInfo) IPPart() string { - if index := strings.Index(e.endpoint, ":"); index != -1 { - return e.endpoint[0:index] - } - return e.endpoint + return utilproxy.IPPart(e.endpoint) } // Returns the endpoint chain name for a given endpointsInfo. @@ -320,12 +321,14 @@ func (scm *serviceChangeMap) update(namespacedName *types.NamespacedName, previo func (sm *proxyServiceMap) merge(other proxyServiceMap) sets.String { existingPorts := sets.NewString() for svcPortName, info := range other { + port := strconv.Itoa(info.port) + clusterIPPort := net.JoinHostPort(info.clusterIP.String(), port) existingPorts.Insert(svcPortName.Port) _, exists := (*sm)[svcPortName] if !exists { - glog.V(1).Infof("Adding new service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol) + glog.V(1).Infof("Adding new service port %q at %s/%s", svcPortName, clusterIPPort, info.protocol) } else { - glog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol) + glog.V(1).Infof("Updating existing service port %q at %s/%s", svcPortName, clusterIPPort, info.protocol) } (*sm)[svcPortName] = info } @@ -440,11 +443,6 @@ func NewProxier(ipt utiliptables.Interface, recorder record.EventRecorder, healthzServer healthcheck.HealthzUpdater, ) (*Proxier, error) { - // check valid user input - if minSyncPeriod > syncPeriod { - return nil, fmt.Errorf("minSyncPeriod (%v) must be <= syncPeriod (%v)", minSyncPeriod, syncPeriod) - } - // Set the route_localnet sysctl we need for if err := sysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil { return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlRouteLocalnet, err) @@ -458,9 +456,6 @@ func NewProxier(ipt utiliptables.Interface, } // Generate the masquerade mark to use for SNAT rules. - if masqueradeBit < 0 || masqueradeBit > 31 { - return nil, fmt.Errorf("invalid iptables-masquerade-bit %v not in [0, 31]", masqueradeBit) - } masqueradeValue := 1 << uint(masqueradeBit) masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue) @@ -543,6 +538,18 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { } } + // Unlink the forwarding chain. + args = []string{ + "-m", "comment", "--comment", "kubernetes forwarding rules", + "-j", string(kubeForwardChain), + } + if err := ipt.DeleteRule(utiliptables.TableFilter, utiliptables.ChainForward, args...); err != nil { + if !utiliptables.IsNotFoundError(err) { + glog.Errorf("Error removing pure-iptables proxy rule: %v", err) + encounteredError = true + } + } + // Flush and remove all of our chains. iptablesData := bytes.NewBuffer(nil) if err := ipt.SaveInto(utiliptables.TableNAT, iptablesData); err != nil { @@ -578,14 +585,28 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { encounteredError = true } } - { - filterBuf := bytes.NewBuffer(nil) - writeLine(filterBuf, "*filter") - writeLine(filterBuf, fmt.Sprintf(":%s - [0:0]", kubeServicesChain)) - writeLine(filterBuf, fmt.Sprintf("-X %s", kubeServicesChain)) - writeLine(filterBuf, "COMMIT") + + // Flush and remove all of our chains. + iptablesData = bytes.NewBuffer(nil) + if err := ipt.SaveInto(utiliptables.TableFilter, iptablesData); err != nil { + glog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableFilter, err) + encounteredError = true + } else { + existingFilterChains := utiliptables.GetChainLines(utiliptables.TableFilter, iptablesData.Bytes()) + filterChains := bytes.NewBuffer(nil) + filterRules := bytes.NewBuffer(nil) + writeLine(filterChains, "*filter") + for _, chain := range []utiliptables.Chain{kubeServicesChain, kubeForwardChain} { + if _, found := existingFilterChains[chain]; found { + chainString := string(chain) + writeLine(filterChains, existingFilterChains[chain]) + writeLine(filterRules, "-X", chainString) + } + } + writeLine(filterRules, "COMMIT") + filterLines := append(filterChains.Bytes(), filterRules.Bytes()...) // Write it. - if err := ipt.Restore(utiliptables.TableFilter, filterBuf.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters); err != nil { + if err := ipt.Restore(utiliptables.TableFilter, filterLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters); err != nil { glog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableFilter, err) encounteredError = true } @@ -798,11 +819,15 @@ func getLocalIPs(endpointsMap proxyEndpointsMap) map[types.NamespacedName]sets.S for svcPortName := range endpointsMap { for _, ep := range endpointsMap[svcPortName] { if ep.isLocal { - nsn := svcPortName.NamespacedName - if localIPs[nsn] == nil { - localIPs[nsn] = sets.NewString() + // If the endpoint has a bad format, utilproxy.IPPart() will log an + // error and ep.IPPart() will return a null string. + if ip := ep.IPPart(); ip != "" { + nsn := svcPortName.NamespacedName + if localIPs[nsn] == nil { + localIPs[nsn] = sets.NewString() + } + localIPs[nsn].Insert(ip) } - localIPs[nsn].Insert(ep.IPPart()) // just the IP part } } } @@ -924,10 +949,7 @@ type endpointServicePair struct { } func (esp *endpointServicePair) IPPart() string { - if index := strings.Index(esp.endpoint, ":"); index != -1 { - return esp.endpoint[0:index] - } - return esp.endpoint + return utilproxy.IPPart(esp.endpoint) } // After a UDP endpoint has been removed, we must flush any pending conntrack entries to it, or else we @@ -936,7 +958,7 @@ func (esp *endpointServicePair) IPPart() string { func (proxier *Proxier) deleteEndpointConnections(connectionMap map[endpointServicePair]bool) { for epSvcPair := range connectionMap { if svcInfo, ok := proxier.serviceMap[epSvcPair.servicePortName]; ok && svcInfo.protocol == api.ProtocolUDP { - endpointIP := epSvcPair.endpoint[0:strings.Index(epSvcPair.endpoint, ":")] + endpointIP := utilproxy.IPPart(epSvcPair.endpoint) err := utilproxy.ClearUDPConntrackForPeers(proxier.exec, svcInfo.clusterIP.String(), endpointIP) if err != nil { glog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.servicePortName.String(), err) @@ -954,7 +976,7 @@ func (proxier *Proxier) syncProxyRules() { start := time.Now() defer func() { - SyncProxyRulesLatency.Observe(sinceInMicroseconds(start)) + metrics.SyncProxyRulesLatency.Observe(metrics.SinceInMicroseconds(start)) glog.V(4).Infof("syncProxyRules took %v", time.Since(start)) }() // don't sync rules till we've received services and endpoints @@ -1026,6 +1048,21 @@ func (proxier *Proxier) syncProxyRules() { } } + // Create and link the kube forward chain. + { + if _, err := proxier.iptables.EnsureChain(utiliptables.TableFilter, kubeForwardChain); err != nil { + glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableFilter, kubeForwardChain, err) + return + } + + comment := "kubernetes forward rules" + args := []string{"-m", "comment", "--comment", comment, "-j", string(kubeForwardChain)} + if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, utiliptables.TableFilter, utiliptables.ChainForward, args...); err != nil { + glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainForward, kubeForwardChain, err) + return + } + } + // // Below this point we will not return until we try to write the iptables rules. // @@ -1068,6 +1105,11 @@ func (proxier *Proxier) syncProxyRules() { } else { writeLine(proxier.filterChains, utiliptables.MakeChainLine(kubeServicesChain)) } + if chain, ok := existingFilterChains[kubeForwardChain]; ok { + writeLine(proxier.filterChains, chain) + } else { + writeLine(proxier.filterChains, utiliptables.MakeChainLine(kubeForwardChain)) + } if chain, ok := existingNATChains[kubeServicesChain]; ok { writeLine(proxier.natChains, chain) } else { @@ -1162,7 +1204,7 @@ func (proxier *Proxier) syncProxyRules() { "-A", string(kubeServicesChain), "-m", "comment", "--comment", fmt.Sprintf(`"%s cluster IP"`, svcNameString), "-m", protocol, "-p", protocol, - "-d", fmt.Sprintf("%s/32", svcInfo.clusterIP.String()), + "-d", utilproxy.ToCIDR(svcInfo.clusterIP), "--dport", strconv.Itoa(svcInfo.port), ) if proxier.masqueradeAll { @@ -1216,7 +1258,7 @@ func (proxier *Proxier) syncProxyRules() { "-A", string(kubeServicesChain), "-m", "comment", "--comment", fmt.Sprintf(`"%s external IP"`, svcNameString), "-m", protocol, "-p", protocol, - "-d", fmt.Sprintf("%s/32", externalIP), + "-d", utilproxy.ToCIDR(net.ParseIP(externalIP)), "--dport", strconv.Itoa(svcInfo.port), ) // We have to SNAT packets to external IPs. @@ -1242,7 +1284,7 @@ func (proxier *Proxier) syncProxyRules() { "-A", string(kubeServicesChain), "-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString), "-m", protocol, "-p", protocol, - "-d", fmt.Sprintf("%s/32", externalIP), + "-d", utilproxy.ToCIDR(net.ParseIP(externalIP)), "--dport", strconv.Itoa(svcInfo.port), "-j", "REJECT", ) @@ -1268,7 +1310,7 @@ func (proxier *Proxier) syncProxyRules() { "-A", string(kubeServicesChain), "-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString), "-m", protocol, "-p", protocol, - "-d", fmt.Sprintf("%s/32", ingress.IP), + "-d", utilproxy.ToCIDR(net.ParseIP(ingress.IP)), "--dport", strconv.Itoa(svcInfo.port), ) // jump to service firewall chain @@ -1306,7 +1348,7 @@ func (proxier *Proxier) syncProxyRules() { // loadbalancer's backend hosts. In this case, request will not hit the loadbalancer but loop back directly. // Need to add the following rule to allow request on host. if allowFromNode { - writeLine(proxier.natRules, append(args, "-s", fmt.Sprintf("%s/32", ingress.IP), "-j", string(chosenChain))...) + writeLine(proxier.natRules, append(args, "-s", utilproxy.ToCIDR(net.ParseIP(ingress.IP)), "-j", string(chosenChain))...) } } @@ -1342,7 +1384,8 @@ func (proxier *Proxier) syncProxyRules() { // This is very low impact. The NodePort range is intentionally obscure, and unlikely to actually collide with real Services. // This only affects UDP connections, which are not common. // See issue: https://github.com/kubernetes/kubernetes/issues/49881 - err := utilproxy.ClearUDPConntrackForPort(proxier.exec, lp.Port) + isIPv6 := utilproxy.IsIPv6(svcInfo.clusterIP) + err := utilproxy.ClearUDPConntrackForPort(proxier.exec, lp.Port, isIPv6) if err != nil { glog.Errorf("Failed to clear udp conntrack for port %d, error: %v", lp.Port, err) } @@ -1389,7 +1432,7 @@ func (proxier *Proxier) syncProxyRules() { "-A", string(kubeServicesChain), "-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString), "-m", protocol, "-p", protocol, - "-d", fmt.Sprintf("%s/32", svcInfo.clusterIP.String()), + "-d", utilproxy.ToCIDR(svcInfo.clusterIP), "--dport", strconv.Itoa(svcInfo.port), "-j", "REJECT", ) @@ -1433,6 +1476,11 @@ func (proxier *Proxier) syncProxyRules() { // Now write loadbalancing & DNAT rules. n := len(endpointChains) for i, endpointChain := range endpointChains { + epIP := endpoints[i].IPPart() + if epIP == "" { + // Error parsing this endpoint has been logged. Skip to next endpoint. + continue + } // Balancing rules in the per-service chain. args = append(args[:0], []string{ "-A", string(svcChain), @@ -1456,7 +1504,7 @@ func (proxier *Proxier) syncProxyRules() { ) // Handle traffic that loops back to the originator with SNAT. writeLine(proxier.natRules, append(args, - "-s", fmt.Sprintf("%s/32", endpoints[i].IPPart()), + "-s", utilproxy.ToCIDR(net.ParseIP(epIP)), "-j", string(KubeMarkMasqChain))...) // Update client-affinity lists. if svcInfo.sessionAffinityType == api.ServiceAffinityClientIP { @@ -1509,6 +1557,18 @@ func (proxier *Proxier) syncProxyRules() { ) writeLine(proxier.natRules, args...) } else { + // First write session affinity rules only over local endpoints, if applicable. + if svcInfo.sessionAffinityType == api.ServiceAffinityClientIP { + for _, endpointChain := range localEndpointChains { + writeLine(proxier.natRules, + "-A", string(svcXlbChain), + "-m", "comment", "--comment", svcNameString, + "-m", "recent", "--name", string(endpointChain), + "--rcheck", "--seconds", strconv.Itoa(svcInfo.stickyMaxAgeSeconds), "--reap", + "-j", string(endpointChain)) + } + } + // Setup probability filter rules only over local endpoints for i, endpointChain := range localEndpointChains { // Balancing rules in the per-service chain. @@ -1555,6 +1615,40 @@ func (proxier *Proxier) syncProxyRules() { "-m", "addrtype", "--dst-type", "LOCAL", "-j", string(kubeNodePortsChain)) + // If the masqueradeMark has been added then we want to forward that same + // traffic, this allows NodePort traffic to be forwarded even if the default + // FORWARD policy is not accept. + writeLine(proxier.filterRules, + "-A", string(kubeForwardChain), + "-m", "comment", "--comment", `"kubernetes forwarding rules"`, + "-m", "mark", "--mark", proxier.masqueradeMark, + "-j", "ACCEPT", + ) + + // The following rules can only be set if clusterCIDR has been defined. + if len(proxier.clusterCIDR) != 0 { + // The following two rules ensure the traffic after the initial packet + // accepted by the "kubernetes forwarding rules" rule above will be + // accepted, to be as specific as possible the traffic must be sourced + // or destined to the clusterCIDR (to/from a pod). + writeLine(proxier.filterRules, + "-A", string(kubeForwardChain), + "-s", proxier.clusterCIDR, + "-m", "comment", "--comment", `"kubernetes forwarding conntrack pod source rule"`, + "-m", "conntrack", + "--ctstate", "RELATED,ESTABLISHED", + "-j", "ACCEPT", + ) + writeLine(proxier.filterRules, + "-A", string(kubeForwardChain), + "-m", "comment", "--comment", `"kubernetes forwarding conntrack pod destination rule"`, + "-d", proxier.clusterCIDR, + "-m", "conntrack", + "--ctstate", "RELATED,ESTABLISHED", + "-j", "ACCEPT", + ) + } + // Write the end-of-table markers. writeLine(proxier.filterRules, "COMMIT") writeLine(proxier.natRules, "COMMIT") @@ -1571,20 +1665,6 @@ func (proxier *Proxier) syncProxyRules() { err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters) if err != nil { glog.Errorf("Failed to execute iptables-restore: %v", err) - // ~rough approximation, assume ~100 chars per line - // we log first 1000 bytes, but full list at higher levels - rules := proxier.iptablesData.Bytes() - if len(rules) > 1000 { - abridgedRules := rules[:1000] - if glog.V(4) { - glog.V(4).Infof("Rules:\n%s", rules) - } else { - glog.V(2).Infof("Rules (abridged):\n%s", abridgedRules) - } - } else { - glog.V(2).Infof("Rules:\n%s", rules) - } - // Revert new local ports. glog.V(2).Infof("Closing local ports after iptables-restore failure") utilproxy.RevertPorts(replacementPortsMap, proxier.portsMap) @@ -1616,7 +1696,7 @@ func (proxier *Proxier) syncProxyRules() { // Finish housekeeping. // TODO: these could be made more consistent. - for _, svcIP := range staleServices.List() { + for _, svcIP := range staleServices.UnsortedList() { if err := utilproxy.ClearUDPConntrackForIP(proxier.exec, svcIP); err != nil { glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err) } diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/BUILD b/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/BUILD index bab8f9af8394..30945e443348 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -10,59 +8,77 @@ load( go_test( name = "go_default_test", - srcs = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ - "proxier_test.go", - ], - "//conditions:default": [], - }), + srcs = [ + "ipset_test.go", + "proxier_test.go", + ], + importpath = "k8s.io/kubernetes/pkg/proxy/ipvs", library = ":go_default_library", - tags = ["automanaged"], - deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ - "//pkg/api:go_default_library", - "//pkg/proxy:go_default_library", - "//pkg/proxy/util:go_default_library", - "//pkg/util/iptables:go_default_library", - "//pkg/util/iptables/testing:go_default_library", - "//pkg/util/ipvs:go_default_library", - "//pkg/util/ipvs/testing:go_default_library", - "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/utils/exec:go_default_library", - "//vendor/k8s.io/utils/exec/testing:go_default_library", - ], - "//conditions:default": [], - }), + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/proxy:go_default_library", + "//pkg/proxy/ipvs/testing:go_default_library", + "//pkg/proxy/util:go_default_library", + "//pkg/util/ipset:go_default_library", + "//pkg/util/ipset/testing:go_default_library", + "//pkg/util/iptables:go_default_library", + "//pkg/util/iptables/testing:go_default_library", + "//pkg/util/ipvs:go_default_library", + "//pkg/util/ipvs/testing:go_default_library", + "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/exec/testing:go_default_library", + ], ) go_library( name = "go_default_library", - srcs = ["proxier.go"], - tags = ["automanaged"], + srcs = [ + "ipset.go", + "netlink.go", + "netlink_unsupported.go", + "proxier.go", + ] + select({ + "@io_bazel_rules_go//go/platform:linux_amd64": [ + "netlink_linux.go", + ], + "//conditions:default": [], + }), + importpath = "k8s.io/kubernetes/pkg/proxy/ipvs", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", "//pkg/api/service:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//pkg/features:go_default_library", "//pkg/proxy:go_default_library", "//pkg/proxy/healthcheck:go_default_library", + "//pkg/proxy/metrics:go_default_library", "//pkg/proxy/util:go_default_library", + "//pkg/util/async:go_default_library", + "//pkg/util/ipset:go_default_library", "//pkg/util/iptables:go_default_library", "//pkg/util/ipvs:go_default_library", "//pkg/util/sysctl:go_default_library", + "//pkg/util/version:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", - "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:linux_amd64": [ + "//vendor/github.com/vishvananda/netlink:go_default_library", + ], + "//conditions:default": [], + }), ) filegroup( @@ -74,6 +90,9 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//pkg/proxy/ipvs/testing:all-srcs", + ], tags = ["automanaged"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/README.md b/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/README.md new file mode 100644 index 000000000000..c0f616c92170 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/README.md @@ -0,0 +1,75 @@ +# How to use IPVS + +This document shows how to use kube-proxy ipvs mode. + +## What is IPVS + +**IPVS (IP Virtual Server)** implements transport-layer load balancing, usually called Layer 4 LAN switching, as part of +Linux kernel. + +IPVS runs on a host and acts as a load balancer in front of a cluster of real servers. IPVS can direct requests for TCP +and UDP-based services to the real servers, and make services of real servers appear as irtual services on a single IP address. + +## How to use + +#### Load IPVS kernel modules + +Currently the IPVS kernel module can't be loaded automatically, so first we should use the following command to load IPVS kernel +modules manually. + +```shell +modprobe ip_vs +modprobe ip_vs_rr +modprobe ip_vs_wrr +modprobe ip_vs_sh +modprobe nf_conntrack_ipv4 +``` + +After that, use `lsmod | grep ip_vs` to make sure kernel modules are loaded. + +#### Run kube-proxy in ipvs mode + +#### Local UP Cluster + +Kube-proxy will run in iptables mode by default in a [local-up cluster](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md). + +Users should export the env `KUBEPROXY_MODE=ipvs` to specify the ipvs mode before deploying the cluster if want to run kube-proxy in ipvs mode. + +#### Cluster Created by Kubeadm + +Kube-proxy will run in iptables mode by default in a cluster deployed by [kubeadm](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/). + +Since IPVS mode is still feature-gated, users should add the flag `--feature-gates=SupportIPVSProxyMode=true` in `kubeadm init` command + +``` +kubeadm init --feature-gates=SupportIPVSProxyMode=true +``` + +to specify the ipvs mode before deploying the cluster if want to run kube-proxy in ipvs mode. + +If you are using kubeadm with a configuration file, you can specify the ipvs mode adding `SupportIPVSProxyMode: true` below the `featureGates` field. +Then the configuration file is similar to: + +```json +kind: MasterConfiguration +apiVersion: kubeadm.k8s.io/v1alpha1 +... +featureGates: + SupportIPVSProxyMode: true +... +``` + +#### Test + +Use `ipvsadm` tool to test whether the kube-proxy start succeed. By default we may get result like: + +```shell +# ipvsadm -ln +IP Virtual Server version 1.2.1 (size=4096) +Prot LocalAddress:Port Scheduler Flags + -> RemoteAddress:Port Forward Weight ActiveConn InActConn +TCP 10.0.0.1:443 rr persistent 10800 + -> 10.229.43.2:6443 Masq 1 0 0 +TCP 10.0.0.10:53 rr +UDP 10.0.0.10:53 rr +``` diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/ipset.go b/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/ipset.go new file mode 100644 index 000000000000..0a01f0bacc67 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/ipset.go @@ -0,0 +1,157 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipvs + +import ( + "k8s.io/apimachinery/pkg/util/sets" + utilipset "k8s.io/kubernetes/pkg/util/ipset" + utilversion "k8s.io/kubernetes/pkg/util/version" + + "github.com/golang/glog" +) + +const ( + // MinIPSetCheckVersion is the min ipset version we need. IPv6 is supported in ipset 6.x + MinIPSetCheckVersion = "6.0" + + // KubeLoopBackIPSet is used to store endpoints dst ip:port, source ip for solving hairpin purpose. + KubeLoopBackIPSet = "KUBE-LOOP-BACK" + + // KubeClusterIPSet is used to store service cluster ip + port for masquerade purpose. + KubeClusterIPSet = "KUBE-CLUSTER-IP" + + // KubeExternalIPSet is used to store service external ip + port for masquerade and filter purpose. + KubeExternalIPSet = "KUBE-EXTERNAL-IP" + + // KubeLoadBalancerSet is used to store service load balancer ingress ip + port, it is the service lb portal. + KubeLoadBalancerSet = "KUBE-LOAD-BALANCER" + + // KubeLoadBalancerMasqSet is used to store service load balancer ingress ip + port for masquerade purpose. + KubeLoadBalancerMasqSet = "KUBE-LOAD-BALANCER-MASQ" + + // KubeLoadBalancerSourceIPSet is used to store service load balancer ingress ip + port + source IP for packet filter purpose. + KubeLoadBalancerSourceIPSet = "KUBE-LOAD-BALANCER-SOURCE-IP" + + // KubeLoadBalancerSourceCIDRSet is used to store service load balancer ingress ip + port + source cidr for packet filter purpose. + KubeLoadBalancerSourceCIDRSet = "KUBE-LOAD-BALANCER-SOURCE-CIDR" + + // KubeNodePortSetTCP is used to store nodeport TCP port for masquerade purpose. + KubeNodePortSetTCP = "KUBE-NODE-PORT-TCP" + + // KubeNodePortSetUDP is used to store nodeport UDP port for masquerade purpose. + KubeNodePortSetUDP = "KUBE-NODE-PORT-UDP" +) + +// IPSetVersioner can query the current ipset version. +type IPSetVersioner interface { + // returns "X.Y" + GetVersion() (string, error) +} + +// IPSet wraps util/ipset which is used by IPVS proxier. +type IPSet struct { + utilipset.IPSet + // activeEntries is the current active entries of the ipset. + activeEntries sets.String + // handle is the util ipset interface handle. + handle utilipset.Interface +} + +// NewIPSet initialize a new IPSet struct +func NewIPSet(handle utilipset.Interface, name string, setType utilipset.Type, isIPv6 bool) *IPSet { + hashFamily := utilipset.ProtocolFamilyIPV4 + if isIPv6 { + hashFamily = utilipset.ProtocolFamilyIPV6 + } + set := &IPSet{ + IPSet: utilipset.IPSet{ + Name: name, + SetType: setType, + HashFamily: hashFamily, + }, + activeEntries: sets.NewString(), + handle: handle, + } + return set +} + +func (set *IPSet) isEmpty() bool { + return len(set.activeEntries.UnsortedList()) == 0 +} + +func (set *IPSet) resetEntries() { + set.activeEntries = sets.NewString() +} + +func (set *IPSet) syncIPSetEntries() { + appliedEntries, err := set.handle.ListEntries(set.Name) + if err != nil { + glog.Errorf("Failed to list ip set entries, error: %v", err) + return + } + + // currentIPSetEntries represents Endpoints watched from API Server. + currentIPSetEntries := sets.NewString() + for _, appliedEntry := range appliedEntries { + currentIPSetEntries.Insert(appliedEntry) + } + + if !set.activeEntries.Equal(currentIPSetEntries) { + // Clean legacy entries + for _, entry := range currentIPSetEntries.Difference(set.activeEntries).List() { + if err := set.handle.DelEntry(entry, set.Name); err != nil { + glog.Errorf("Failed to delete ip set entry: %s from ip set: %s, error: %v", entry, set.Name, err) + } else { + glog.V(3).Infof("Successfully delete legacy ip set entry: %s from ip set: %s", entry, set.Name) + } + } + // Create active entries + for _, entry := range set.activeEntries.Difference(currentIPSetEntries).List() { + if err := set.handle.AddEntry(entry, set.Name, true); err != nil { + glog.Errorf("Failed to add entry: %v to ip set: %s, error: %v", entry, set.Name, err) + } else { + glog.V(3).Infof("Successfully add entry: %v to ip set: %s", entry, set.Name) + } + } + } +} + +func ensureIPSets(ipSets ...*IPSet) error { + for _, set := range ipSets { + if err := set.handle.CreateSet(&set.IPSet, true); err != nil { + glog.Errorf("Failed to make sure ip set: %v exist, error: %v", set, err) + return err + } + } + return nil +} + +// checkMinVersion checks if ipset current version satisfies required min version +func checkMinVersion(vstring string) bool { + version, err := utilversion.ParseGeneric(vstring) + if err != nil { + glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) + return false + } + + minVersion, err := utilversion.ParseGeneric(MinIPSetCheckVersion) + if err != nil { + glog.Errorf("MinCheckVersion (%s) is not a valid version string: %v", MinIPSetCheckVersion, err) + return false + } + return !version.LessThan(minVersion) +} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink.go b/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink.go new file mode 100644 index 000000000000..4f66f706ee5a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink.go @@ -0,0 +1,29 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipvs + +// NetLinkHandle for revoke netlink interface +type NetLinkHandle interface { + // EnsureAddressBind checks if address is bound to the interface and, if not, binds it. If the address is already bound, return true. + EnsureAddressBind(address, devName string) (exist bool, err error) + // UnbindAddress unbind address from the interface + UnbindAddress(address, devName string) error + // EnsureDummyDevice checks if dummy device is exist and, if not, create one. If the dummy device is already exist, return true. + EnsureDummyDevice(devName string) (exist bool, err error) + // DeleteDummyDevice deletes the given dummy device by name. + DeleteDummyDevice(devName string) error +} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_linux.go b/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_linux.go new file mode 100644 index 000000000000..e709afafabbd --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_linux.go @@ -0,0 +1,98 @@ +// +build linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipvs + +import ( + "fmt" + "net" + "syscall" + + "github.com/vishvananda/netlink" +) + +type netlinkHandle struct { + netlink.Handle +} + +// NewNetLinkHandle will crate a new netlinkHandle +func NewNetLinkHandle() NetLinkHandle { + return &netlinkHandle{netlink.Handle{}} +} + +// EnsureAddressBind checks if address is bound to the interface and, if not, binds it. If the address is already bound, return true. +func (h *netlinkHandle) EnsureAddressBind(address, devName string) (exist bool, err error) { + dev, err := h.LinkByName(devName) + if err != nil { + return false, fmt.Errorf("error get interface: %s, err: %v", devName, err) + } + addr := net.ParseIP(address) + if addr == nil { + return false, fmt.Errorf("error parse ip address: %s", address) + } + if err := h.AddrAdd(dev, &netlink.Addr{IPNet: netlink.NewIPNet(addr)}); err != nil { + // "EEXIST" will be returned if the address is already bound to device + if err == syscall.Errno(syscall.EEXIST) { + return true, nil + } + return false, fmt.Errorf("error bind address: %s to interface: %s, err: %v", address, devName, err) + } + return false, nil +} + +// UnbindAddress unbind address from the interface +func (h *netlinkHandle) UnbindAddress(address, devName string) error { + dev, err := h.LinkByName(devName) + if err != nil { + return fmt.Errorf("error get interface: %s, err: %v", devName, err) + } + addr := net.ParseIP(address) + if addr == nil { + return fmt.Errorf("error parse ip address: %s", address) + } + if err := h.AddrDel(dev, &netlink.Addr{IPNet: netlink.NewIPNet(addr)}); err != nil { + return fmt.Errorf("error unbind address: %s from interface: %s, err: %v", address, devName, err) + } + return nil +} + +// EnsureDummyDevice is part of interface +func (h *netlinkHandle) EnsureDummyDevice(devName string) (bool, error) { + _, err := h.LinkByName(devName) + if err == nil { + // found dummy device + return true, nil + } + dummy := &netlink.Dummy{ + LinkAttrs: netlink.LinkAttrs{Name: devName}, + } + return false, h.LinkAdd(dummy) +} + +// DeleteDummyDevice is part of interface. +func (h *netlinkHandle) DeleteDummyDevice(devName string) error { + link, err := h.LinkByName(devName) + if err != nil { + return fmt.Errorf("error deleting a non-exist dummy device: %s", devName) + } + dummy, ok := link.(*netlink.Dummy) + if !ok { + return fmt.Errorf("expect dummy device, got device type: %s", link.Type()) + } + return h.LinkDel(dummy) +} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_unsupported.go b/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_unsupported.go new file mode 100644 index 000000000000..1e22685b2793 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_unsupported.go @@ -0,0 +1,51 @@ +// +build !linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipvs + +import ( + "fmt" +) + +type emptyHandle struct { +} + +// NewNetLinkHandle will create an EmptyHandle +func NewNetLinkHandle() NetLinkHandle { + return &emptyHandle{} +} + +// EnsureAddressBind checks if address is bound to the interface and, if not, binds it. If the address is already bound, return true. +func (h *emptyHandle) EnsureAddressBind(address, devName string) (exist bool, err error) { + return false, fmt.Errorf("netlink not supported for this platform") +} + +// UnbindAddress unbind address from the interface +func (h *emptyHandle) UnbindAddress(address, devName string) error { + return fmt.Errorf("netlink not supported for this platform") +} + +// EnsureDummyDevice is part of interface +func (h *emptyHandle) EnsureDummyDevice(devName string) (bool, error) { + return false, fmt.Errorf("netlink is not supported in this platform") +} + +// DeleteDummyDevice is part of interface. +func (h *emptyHandle) DeleteDummyDevice(devName string) error { + return fmt.Errorf("netlink is not supported in this platform") +} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go b/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go index 6c4ad3572402..5f5a09d24473 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go @@ -28,23 +28,28 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" "github.com/golang/glog" clientv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/flowcontrol" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" apiservice "k8s.io/kubernetes/pkg/api/service" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy/healthcheck" + "k8s.io/kubernetes/pkg/proxy/metrics" utilproxy "k8s.io/kubernetes/pkg/proxy/util" + "k8s.io/kubernetes/pkg/util/async" + utilipset "k8s.io/kubernetes/pkg/util/ipset" utiliptables "k8s.io/kubernetes/pkg/util/iptables" utilipvs "k8s.io/kubernetes/pkg/util/ipvs" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" @@ -55,6 +60,12 @@ const ( // kubeServicesChain is the services portal chain kubeServicesChain utiliptables.Chain = "KUBE-SERVICES" + // KubeServiceIPSetsChain is the services access IP chain + KubeServiceIPSetsChain utiliptables.Chain = "KUBE-SVC-IPSETS" + + // KubeFireWallChain is the kubernetes firewall chain. + KubeFireWallChain utiliptables.Chain = "KUBE-FIRE-WALL" + // kubePostroutingChain is the kubernetes postrouting chain kubePostroutingChain utiliptables.Chain = "KUBE-POSTROUTING" @@ -63,11 +74,10 @@ const ( // KubeMarkDropChain is the mark-for-drop chain KubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP" -) -const ( // DefaultScheduler is the default ipvs scheduler algorithm - round robin. DefaultScheduler = "rr" + // DefaultDummyDevice is the default dummy interface where ipvs service address will bind to it. DefaultDummyDevice = "kube-ipvs0" ) @@ -105,14 +115,15 @@ type Proxier struct { // with some partial data after kube-proxy restart. endpointsSynced bool servicesSynced bool - - throttle flowcontrol.RateLimiter + initialized int32 + syncRunner *async.BoundedFrequencyRunner // governs calls to syncProxyRules // These are effectively const and do not need the mutex to be held. syncPeriod time.Duration minSyncPeriod time.Duration iptables utiliptables.Interface ipvs utilipvs.Interface + ipset utilipset.Interface exec utilexec.Interface masqueradeAll bool masqueradeMark string @@ -131,6 +142,28 @@ type Proxier struct { iptablesData *bytes.Buffer natChains *bytes.Buffer natRules *bytes.Buffer + // Added as a member to the struct to allow injection for testing. + netlinkHandle NetLinkHandle + // loopbackSet is the ipset where stores all endpoints IP:Port,IP for solving hairpin mode purpose. + loopbackSet *IPSet + // clusterIPSet is the ipset where stores all service ClusterIP:Port + clusterIPSet *IPSet + // nodePortSetTCP is the bitmap:port type ipset where stores all TCP node port + nodePortSetTCP *IPSet + // nodePortSetTCP is the bitmap:port type ipset where stores all UDP node port + nodePortSetUDP *IPSet + // externalIPSet is the hash:ip,port type ipset where stores all service ExternalIP:Port + externalIPSet *IPSet + // lbIngressSet is the hash:ip,port type ipset where stores all service load balancer ingress IP:Port. + lbIngressSet *IPSet + // lbMasqSet is the hash:ip,port type ipset where stores all service load balancer ingress IP:Port which needs masquerade. + lbMasqSet *IPSet + // lbWhiteListIPSet is the hash:ip,port,ip type ipset where stores all service load balancer ingress IP:Port,sourceIP pair, any packets + // with the source IP visit ingress IP:Port can pass through. + lbWhiteListIPSet *IPSet + // lbWhiteListIPSet is the hash:ip,port,net type ipset where stores all service load balancer ingress IP:Port,sourceCIDR pair, any packets + // from the source CIDR visit ingress IP:Port can pass through. + lbWhiteListCIDRSet *IPSet } // IPGetter helps get node network interface IP @@ -153,17 +186,17 @@ func (r *realIPGetter) NodeIPs() (ips []net.IP, err error) { } intf, err := net.InterfaceByName(name) if err != nil { + utilruntime.HandleError(fmt.Errorf("Failed to get interface by name: %s, error: %v", name, err)) continue } addrs, err := intf.Addrs() if err != nil { + utilruntime.HandleError(fmt.Errorf("Failed to get addresses from interface: %s, error: %v", name, err)) continue } for _, a := range addrs { if ipnet, ok := a.(*net.IPNet); ok { - if ipnet.IP.To4() != nil { - ips = append(ips, ipnet.IP.To4()) - } + ips = append(ips, ipnet.IP) } } } @@ -178,7 +211,9 @@ var _ proxy.ProxyProvider = &Proxier{} // An error will be returned if it fails to update or acquire the initial lock. // Once a proxier is created, it will keep iptables and ipvs rules up to date in the background and // will not terminate if a particular iptables or ipvs call fails. -func NewProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, +func NewProxier(ipt utiliptables.Interface, + ipvs utilipvs.Interface, + ipset utilipset.Interface, sysctl utilsysctl.Interface, exec utilexec.Interface, syncPeriod time.Duration, @@ -192,11 +227,6 @@ func NewProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, healthzServer healthcheck.HealthzUpdater, scheduler string, ) (*Proxier, error) { - // check valid user input - if minSyncPeriod > syncPeriod { - return nil, fmt.Errorf("min-sync (%v) must be < sync(%v)", minSyncPeriod, syncPeriod) - } - // Set the route_localnet sysctl we need for if err := sysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil { return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlRouteLocalnet, err) @@ -220,9 +250,6 @@ func NewProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, } // Generate the masquerade mark to use for SNAT rules. - if masqueradeBit < 0 || masqueradeBit > 31 { - return nil, fmt.Errorf("invalid iptables-masquerade-bit %v not in [0, 31]", masqueradeBit) - } masqueradeValue := 1 << uint(masqueradeBit) masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue) @@ -242,41 +269,51 @@ func NewProxier(ipt utiliptables.Interface, ipvs utilipvs.Interface, healthChecker := healthcheck.NewServer(hostname, recorder, nil, nil) // use default implementations of deps - var throttle flowcontrol.RateLimiter - // Defaulting back to not limit sync rate when minSyncPeriod is 0. - if minSyncPeriod != 0 { - syncsPerSecond := float32(time.Second) / float32(minSyncPeriod) - // The average use case will process 2 updates in short succession - throttle = flowcontrol.NewTokenBucketRateLimiter(syncsPerSecond, 2) - } - - return &Proxier{ - portsMap: make(map[utilproxy.LocalPort]utilproxy.Closeable), - serviceMap: make(proxyServiceMap), - serviceChanges: newServiceChangeMap(), - endpointsMap: make(proxyEndpointsMap), - endpointsChanges: newEndpointsChangeMap(), - syncPeriod: syncPeriod, - minSyncPeriod: minSyncPeriod, - throttle: throttle, - iptables: ipt, - masqueradeAll: masqueradeAll, - masqueradeMark: masqueradeMark, - exec: exec, - clusterCIDR: clusterCIDR, - hostname: hostname, - nodeIP: nodeIP, - portMapper: &listenPortOpener{}, - recorder: recorder, - healthChecker: healthChecker, - healthzServer: healthzServer, - ipvs: ipvs, - ipvsScheduler: scheduler, - ipGetter: &realIPGetter{}, - iptablesData: bytes.NewBuffer(nil), - natChains: bytes.NewBuffer(nil), - natRules: bytes.NewBuffer(nil), - }, nil + isIPv6 := utilproxy.IsIPv6(nodeIP) + + glog.V(2).Infof("nodeIP: %v, isIPv6: %v", nodeIP, isIPv6) + + proxier := &Proxier{ + portsMap: make(map[utilproxy.LocalPort]utilproxy.Closeable), + serviceMap: make(proxyServiceMap), + serviceChanges: newServiceChangeMap(), + endpointsMap: make(proxyEndpointsMap), + endpointsChanges: newEndpointsChangeMap(hostname), + syncPeriod: syncPeriod, + minSyncPeriod: minSyncPeriod, + iptables: ipt, + masqueradeAll: masqueradeAll, + masqueradeMark: masqueradeMark, + exec: exec, + clusterCIDR: clusterCIDR, + hostname: hostname, + nodeIP: nodeIP, + portMapper: &listenPortOpener{}, + recorder: recorder, + healthChecker: healthChecker, + healthzServer: healthzServer, + ipvs: ipvs, + ipvsScheduler: scheduler, + ipGetter: &realIPGetter{}, + iptablesData: bytes.NewBuffer(nil), + natChains: bytes.NewBuffer(nil), + natRules: bytes.NewBuffer(nil), + netlinkHandle: NewNetLinkHandle(), + ipset: ipset, + loopbackSet: NewIPSet(ipset, KubeLoopBackIPSet, utilipset.HashIPPortIP, isIPv6), + clusterIPSet: NewIPSet(ipset, KubeClusterIPSet, utilipset.HashIPPort, isIPv6), + externalIPSet: NewIPSet(ipset, KubeExternalIPSet, utilipset.HashIPPort, isIPv6), + lbIngressSet: NewIPSet(ipset, KubeLoadBalancerSet, utilipset.HashIPPort, isIPv6), + lbMasqSet: NewIPSet(ipset, KubeLoadBalancerMasqSet, utilipset.HashIPPort, isIPv6), + lbWhiteListIPSet: NewIPSet(ipset, KubeLoadBalancerSourceIPSet, utilipset.HashIPPortIP, isIPv6), + lbWhiteListCIDRSet: NewIPSet(ipset, KubeLoadBalancerSourceCIDRSet, utilipset.HashIPPortNet, isIPv6), + nodePortSetTCP: NewIPSet(ipset, KubeNodePortSetTCP, utilipset.BitmapPort, false), + nodePortSetUDP: NewIPSet(ipset, KubeNodePortSetUDP, utilipset.BitmapPort, false), + } + burstSyncs := 2 + glog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs) + proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs) + return proxier, nil } type proxyServiceMap map[proxy.ServicePortName]*serviceInfo @@ -294,37 +331,41 @@ type serviceInfo struct { loadBalancerSourceRanges []string onlyNodeLocalEndpoints bool healthCheckNodePort int + // The following fields are computed and stored for performance reasons. + serviceNameString string } // is updated by this function (based on the given changes). // map is cleared after applying them. func updateServiceMap( serviceMap proxyServiceMap, - changes *serviceChangeMap) (syncRequired bool, hcServices map[types.NamespacedName]uint16, staleServices sets.String) { - syncRequired = false - staleServices = sets.NewString() - - for _, change := range changes.items { - mergeSyncRequired, existingPorts := serviceMap.mergeService(change.current) - unmergeSyncRequired := serviceMap.unmergeService(change.previous, existingPorts, staleServices) - syncRequired = syncRequired || mergeSyncRequired || unmergeSyncRequired - } - changes.items = make(map[types.NamespacedName]*serviceChange) + changes *serviceChangeMap) (result updateServiceMapResult) { + result.staleServices = sets.NewString() + + func() { + changes.lock.Lock() + defer changes.lock.Unlock() + for _, change := range changes.items { + existingPorts := serviceMap.merge(change.current) + serviceMap.unmerge(change.previous, existingPorts, result.staleServices) + } + changes.items = make(map[types.NamespacedName]*serviceChange) + }() // TODO: If this will appear to be computationally expensive, consider // computing this incrementally similarly to serviceMap. - hcServices = make(map[types.NamespacedName]uint16) - for svcPort, info := range serviceMap { + result.hcServices = make(map[types.NamespacedName]uint16) + for svcPortName, info := range serviceMap { if info.healthCheckNodePort != 0 { - hcServices[svcPort.NamespacedName] = uint16(info.healthCheckNodePort) + result.hcServices[svcPortName.NamespacedName] = uint16(info.healthCheckNodePort) } } - return syncRequired, hcServices, staleServices + return result } // returns a new serviceInfo struct -func newServiceInfo(serviceName proxy.ServicePortName, port *api.ServicePort, service *api.Service) *serviceInfo { +func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, service *api.Service) *serviceInfo { onlyNodeLocalEndpoints := false if utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) && apiservice.RequestsOnlyLocalTraffic(service) { @@ -347,90 +388,77 @@ func newServiceInfo(serviceName proxy.ServicePortName, port *api.ServicePort, se loadBalancerSourceRanges: make([]string, len(service.Spec.LoadBalancerSourceRanges)), onlyNodeLocalEndpoints: onlyNodeLocalEndpoints, } + copy(info.loadBalancerSourceRanges, service.Spec.LoadBalancerSourceRanges) copy(info.externalIPs, service.Spec.ExternalIPs) if apiservice.NeedsHealthCheck(service) { p := service.Spec.HealthCheckNodePort if p == 0 { - glog.Errorf("Service %q has no healthcheck nodeport", serviceName) + glog.Errorf("Service %q has no healthcheck nodeport", svcPortName.NamespacedName.String()) } else { info.healthCheckNodePort = int(p) } } + // Store the following for performance reasons. + info.serviceNameString = svcPortName.String() + return info } -func (sm *proxyServiceMap) mergeService(service *api.Service) (bool, sets.String) { - if service == nil { - return false, nil - } - svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - if utilproxy.ShouldSkipService(svcName, service) { - return false, nil - } - syncRequired := false +func (sm *proxyServiceMap) merge(other proxyServiceMap) sets.String { existingPorts := sets.NewString() - for i := range service.Spec.Ports { - servicePort := &service.Spec.Ports[i] - serviceName := proxy.ServicePortName{NamespacedName: svcName, Port: servicePort.Name} - existingPorts.Insert(servicePort.Name) - info := newServiceInfo(serviceName, servicePort, service) - oldInfo, exists := (*sm)[serviceName] - equal := reflect.DeepEqual(info, oldInfo) - if exists { - glog.V(1).Infof("Adding new service %q at %s:%d/%s", serviceName, info.clusterIP, servicePort.Port, servicePort.Protocol) - } else if !equal { - glog.V(1).Infof("Updating existing service %q at %s:%d/%s", serviceName, info.clusterIP, servicePort.Port, servicePort.Protocol) - } - if !equal { - (*sm)[serviceName] = info - syncRequired = true + for svcPortName, info := range other { + existingPorts.Insert(svcPortName.Port) + _, exists := (*sm)[svcPortName] + if !exists { + glog.V(1).Infof("Adding new service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol) + } else { + glog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol) } + (*sm)[svcPortName] = info } - return syncRequired, existingPorts + return existingPorts } -// are modified by this function with detected stale services. -func (sm *proxyServiceMap) unmergeService(service *api.Service, existingPorts, staleServices sets.String) bool { - if service == nil { - return false - } - svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - if utilproxy.ShouldSkipService(svcName, service) { - return false - } - syncRequired := false - for i := range service.Spec.Ports { - servicePort := &service.Spec.Ports[i] - if existingPorts.Has(servicePort.Name) { +func (sm *proxyServiceMap) unmerge(other proxyServiceMap, existingPorts, staleServices sets.String) { + for svcPortName := range other { + if existingPorts.Has(svcPortName.Port) { continue } - serviceName := proxy.ServicePortName{NamespacedName: svcName, Port: servicePort.Name} - info, exists := (*sm)[serviceName] + info, exists := (*sm)[svcPortName] if exists { - glog.V(1).Infof("Removing service %q", serviceName) + glog.V(1).Infof("Removing service port %q", svcPortName) if info.protocol == api.ProtocolUDP { staleServices.Insert(info.clusterIP.String()) } - delete(*sm, serviceName) - syncRequired = true + delete(*sm, svcPortName) } else { - glog.Errorf("Service %q removed, but doesn't exists", serviceName) + glog.Errorf("Service port %q removed, but doesn't exists", svcPortName) } } - return syncRequired } type serviceChangeMap struct { - sync.Mutex + lock sync.Mutex items map[types.NamespacedName]*serviceChange } type serviceChange struct { - previous *api.Service - current *api.Service + previous proxyServiceMap + current proxyServiceMap +} + +type updateEndpointMapResult struct { + hcEndpoints map[types.NamespacedName]int + staleEndpoints map[endpointServicePair]bool + staleServiceNames map[proxy.ServicePortName]bool +} + +type updateServiceMapResult struct { + hcServices map[types.NamespacedName]uint16 + staleServices sets.String } func newServiceChangeMap() serviceChangeMap { @@ -439,17 +467,42 @@ func newServiceChangeMap() serviceChangeMap { } } -func (scm *serviceChangeMap) update(namespacedName *types.NamespacedName, previous, current *api.Service) { - scm.Lock() - defer scm.Unlock() +func (scm *serviceChangeMap) update(namespacedName *types.NamespacedName, previous, current *api.Service) bool { + scm.lock.Lock() + defer scm.lock.Unlock() change, exists := scm.items[*namespacedName] if !exists { change = &serviceChange{} - change.previous = previous + change.previous = serviceToServiceMap(previous) scm.items[*namespacedName] = change } - change.current = current + change.current = serviceToServiceMap(current) + if reflect.DeepEqual(change.previous, change.current) { + delete(scm.items, *namespacedName) + } + return len(scm.items) > 0 +} + +// Translates single Service object to proxyServiceMap. +// +// NOTE: service object should NOT be modified. +func serviceToServiceMap(service *api.Service) proxyServiceMap { + if service == nil { + return nil + } + svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} + if utilproxy.ShouldSkipService(svcName, service) { + return nil + } + + serviceMap := make(proxyServiceMap) + for i := range service.Spec.Ports { + servicePort := &service.Spec.Ports[i] + svcPortName := proxy.ServicePortName{NamespacedName: svcName, Port: servicePort.Name} + serviceMap[svcPortName] = newServiceInfo(svcPortName, servicePort, service) + } + return serviceMap } // internal struct for endpoints information @@ -462,6 +515,16 @@ func (e *endpointsInfo) String() string { return fmt.Sprintf("%v", *e) } +// IPPart returns just the IP part of the endpoint. +func (e *endpointsInfo) IPPart() string { + return utilproxy.IPPart(e.endpoint) +} + +// PortPart returns just the Port part of the endpoint. +func (e *endpointsInfo) PortPart() (int, error) { + return utilproxy.PortPart(e.endpoint) +} + type endpointServicePair struct { endpoint string servicePortName proxy.ServicePortName @@ -470,33 +533,40 @@ type endpointServicePair struct { type proxyEndpointsMap map[proxy.ServicePortName][]*endpointsInfo type endpointsChange struct { - previous *api.Endpoints - current *api.Endpoints + previous proxyEndpointsMap + current proxyEndpointsMap } type endpointsChangeMap struct { - sync.Mutex - items map[types.NamespacedName]*endpointsChange + lock sync.Mutex + hostname string + items map[types.NamespacedName]*endpointsChange } -// are modified by this function with detected stale -// connections. -func detectStaleConnections(oldEndpointsMap, newEndpointsMap proxyEndpointsMap, staleEndpoints map[endpointServicePair]bool) { - for svcPort, epList := range oldEndpointsMap { +// and are modified by this function with detected stale connections. +func detectStaleConnections(oldEndpointsMap, newEndpointsMap proxyEndpointsMap, staleEndpoints map[endpointServicePair]bool, staleServiceNames map[proxy.ServicePortName]bool) { + for svcPortName, epList := range oldEndpointsMap { for _, ep := range epList { stale := true - for i := range newEndpointsMap[svcPort] { - if *newEndpointsMap[svcPort][i] == *ep { + for i := range newEndpointsMap[svcPortName] { + if *newEndpointsMap[svcPortName][i] == *ep { stale = false break } } if stale { - glog.V(4).Infof("Stale endpoint %v -> %v", svcPort, ep.endpoint) - staleEndpoints[endpointServicePair{endpoint: ep.endpoint, servicePortName: svcPort}] = true + glog.V(4).Infof("Stale endpoint %v -> %v", svcPortName, ep.endpoint) + staleEndpoints[endpointServicePair{endpoint: ep.endpoint, servicePortName: svcPortName}] = true } } } + + for svcPortName, epList := range newEndpointsMap { + // For udp service, if its backend changes from 0 to non-0. There may exist a conntrack entry that could blackhole traffic to the service. + if len(epList) > 0 && len(oldEndpointsMap[svcPortName]) == 0 { + staleServiceNames[svcPortName] = true + } + } } // is updated by this function (based on the given changes). @@ -504,20 +574,20 @@ func detectStaleConnections(oldEndpointsMap, newEndpointsMap proxyEndpointsMap, func updateEndpointsMap( endpointsMap proxyEndpointsMap, changes *endpointsChangeMap, - hostname string) (syncRequired bool, hcEndpoints map[types.NamespacedName]int, staleSet map[endpointServicePair]bool) { - syncRequired = false - staleSet = make(map[endpointServicePair]bool) - for _, change := range changes.items { - oldEndpointsMap := endpointsToEndpointsMap(change.previous, hostname) - newEndpointsMap := endpointsToEndpointsMap(change.current, hostname) - if !reflect.DeepEqual(oldEndpointsMap, newEndpointsMap) { - endpointsMap.unmerge(oldEndpointsMap) - endpointsMap.merge(newEndpointsMap) - detectStaleConnections(oldEndpointsMap, newEndpointsMap, staleSet) - syncRequired = true + hostname string) (result updateEndpointMapResult) { + result.staleEndpoints = make(map[endpointServicePair]bool) + result.staleServiceNames = make(map[proxy.ServicePortName]bool) + + func() { + changes.lock.Lock() + defer changes.lock.Unlock() + for _, change := range changes.items { + endpointsMap.unmerge(change.previous) + endpointsMap.merge(change.current) + detectStaleConnections(change.previous, change.current, result.staleEndpoints, result.staleServiceNames) } - } - changes.items = make(map[types.NamespacedName]*endpointsChange) + changes.items = make(map[types.NamespacedName]*endpointsChange) + }() if !utilfeature.DefaultFeatureGate.Enabled(features.ExternalTrafficLocalOnly) { return @@ -525,13 +595,13 @@ func updateEndpointsMap( // TODO: If this will appear to be computationally expensive, consider // computing this incrementally similarly to endpointsMap. - hcEndpoints = make(map[types.NamespacedName]int) + result.hcEndpoints = make(map[types.NamespacedName]int) localIPs := getLocalIPs(endpointsMap) for nsn, ips := range localIPs { - hcEndpoints[nsn] = len(ips) + result.hcEndpoints[nsn] = len(ips) } - return syncRequired, hcEndpoints, staleSet + return result } // Translates single Endpoints object to proxyEndpointsMap. @@ -582,23 +652,28 @@ func endpointsToEndpointsMap(endpoints *api.Endpoints, hostname string) proxyEnd return endpointsMap } -func newEndpointsChangeMap() endpointsChangeMap { +func newEndpointsChangeMap(hostname string) endpointsChangeMap { return endpointsChangeMap{ - items: make(map[types.NamespacedName]*endpointsChange), + hostname: hostname, + items: make(map[types.NamespacedName]*endpointsChange), } } -func (ecm *endpointsChangeMap) Update(namespacedName *types.NamespacedName, previous, current *api.Endpoints) { - ecm.Lock() - defer ecm.Unlock() +func (ecm *endpointsChangeMap) update(namespacedName *types.NamespacedName, previous, current *api.Endpoints) bool { + ecm.lock.Lock() + defer ecm.lock.Unlock() change, exists := ecm.items[*namespacedName] if !exists { change = &endpointsChange{} - change.previous = previous + change.previous = endpointsToEndpointsMap(previous, ecm.hostname) ecm.items[*namespacedName] = change } - change.current = current + change.current = endpointsToEndpointsMap(current, ecm.hostname) + if reflect.DeepEqual(change.previous, change.current) { + delete(ecm.items, *namespacedName) + } + return len(ecm.items) > 0 } func (em proxyEndpointsMap) merge(other proxyEndpointsMap) { @@ -614,10 +689,19 @@ func (em proxyEndpointsMap) unmerge(other proxyEndpointsMap) { } // CanUseIPVSProxier returns true if we can use the ipvs Proxier. -// This is determined by checking if all the required kernel modules are loaded. It may +// This is determined by checking if all the required kernel modules can be loaded. It may // return an error if it fails to get the kernel modules information without error, in which // case it will also return false. -func CanUseIPVSProxier() (bool, error) { +func CanUseIPVSProxier(ipsetver IPSetVersioner) (bool, error) { + // Try to load IPVS required kernel modules using modprobe + for _, kmod := range ipvsModules { + err := utilexec.New().Command("modprobe", "--", kmod).Run() + if err != nil { + glog.Warningf("Failed to load kernel module %v with modprobe. "+ + "You can ignore this message when kube-proxy is running inside container without mounting /lib/modules", kmod) + } + } + // Find out loaded kernel modules out, err := utilexec.New().Command("cut", "-f1", "-d", " ", "/proc/modules").CombinedOutput() if err != nil { @@ -629,9 +713,18 @@ func CanUseIPVSProxier() (bool, error) { loadModules := sets.NewString() wantModules.Insert(ipvsModules...) loadModules.Insert(mods...) - modules := wantModules.Difference(loadModules).List() + modules := wantModules.Difference(loadModules).UnsortedList() if len(modules) != 0 { - return false, fmt.Errorf("Failed to load kernel modules: %v", modules) + return false, fmt.Errorf("IPVS proxier will not be used because the following required kernel modules are not loaded: %v", modules) + } + + // Check ipset version + versionString, err := ipsetver.GetVersion() + if err != nil { + return false, fmt.Errorf("error getting ipset version, error: %v", err) + } + if !checkMinVersion(versionString) { + return false, fmt.Errorf("ipset version: %s is less than min required version: %s", versionString, MinIPSetCheckVersion) } return true, nil } @@ -684,7 +777,7 @@ func cleanupIptablesLeftovers(ipt utiliptables.Interface) (encounteredError bool natRules := bytes.NewBuffer(nil) writeLine(natChains, "*nat") // Start with chains we know we need to remove. - for _, chain := range []utiliptables.Chain{kubeServicesChain, kubePostroutingChain, KubeMarkMasqChain} { + for _, chain := range []utiliptables.Chain{kubeServicesChain, kubePostroutingChain, KubeMarkMasqChain, KubeServiceIPSetsChain} { if _, found := existingNATChains[chain]; found { chainString := string(chain) writeLine(natChains, existingNATChains[chain]) // flush @@ -704,103 +797,122 @@ func cleanupIptablesLeftovers(ipt utiliptables.Interface) (encounteredError bool } // CleanupLeftovers clean up all ipvs and iptables rules created by ipvs Proxier. -func CleanupLeftovers(execer utilexec.Interface, ipvs utilipvs.Interface, ipt utiliptables.Interface) (encounteredError bool) { - // Return immediately when ipvs interface is nil - Probably initialization failed in somewhere. - if ipvs == nil { - return true - } - encounteredError = false - // Currently we assume only ipvs proxier will create ipvs rules, ipvs proxier will flush all ipvs rules when clean up. - // Users do this operation should be with caution. - err := ipvs.Flush() - if err != nil { - encounteredError = true +func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset utilipset.Interface, cleanupIPVS bool) (encounteredError bool) { + if cleanupIPVS { + // Return immediately when ipvs interface is nil - Probably initialization failed in somewhere. + if ipvs == nil { + return true + } + encounteredError = false + err := ipvs.Flush() + if err != nil { + encounteredError = true + } } // Delete dummy interface created by ipvs Proxier. - err = deleteDummyDevice(execer, DefaultDummyDevice) + nl := NewNetLinkHandle() + err := nl.DeleteDummyDevice(DefaultDummyDevice) if err != nil { encounteredError = true } // Clear iptables created by ipvs Proxier. encounteredError = cleanupIptablesLeftovers(ipt) || encounteredError + // Destroy ip sets created by ipvs Proxier. We should call it after cleaning up + // iptables since we can NOT delete ip set which is still referenced by iptables. + ipSetsToDestroy := []string{KubeLoopBackIPSet, KubeClusterIPSet, KubeLoadBalancerSet, KubeNodePortSetTCP, KubeNodePortSetUDP, + KubeExternalIPSet, KubeLoadBalancerSourceIPSet, KubeLoadBalancerSourceCIDRSet, KubeLoadBalancerMasqSet} + for _, set := range ipSetsToDestroy { + err = ipset.DestroySet(set) + if err != nil { + encounteredError = true + } + } return encounteredError } -// Sync is called to immediately synchronize the proxier state to iptables +// Sync is called to synchronize the proxier state to iptables and ipvs as soon as possible. func (proxier *Proxier) Sync() { - proxier.syncProxyRules(syncReasonForce) + proxier.syncRunner.Run() } // SyncLoop runs periodic work. This is expected to run as a goroutine or as the main loop of the app. It does not return. func (proxier *Proxier) SyncLoop() { - t := time.NewTicker(proxier.syncPeriod) - defer t.Stop() // Update healthz timestamp at beginning in case Sync() never succeeds. if proxier.healthzServer != nil { proxier.healthzServer.UpdateTimestamp() } - for { - <-t.C - glog.V(6).Infof("Periodic sync") - proxier.Sync() + proxier.syncRunner.Loop(wait.NeverStop) +} + +func (proxier *Proxier) setInitialized(value bool) { + var initialized int32 + if value { + initialized = 1 } + atomic.StoreInt32(&proxier.initialized, initialized) +} + +func (proxier *Proxier) isInitialized() bool { + return atomic.LoadInt32(&proxier.initialized) > 0 } // OnServiceAdd is called whenever creation of new service object is observed. func (proxier *Proxier) OnServiceAdd(service *api.Service) { namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - proxier.serviceChanges.update(&namespacedName, nil, service) - - proxier.syncProxyRules(syncReasonServices) + if proxier.serviceChanges.update(&namespacedName, nil, service) && proxier.isInitialized() { + proxier.syncRunner.Run() + } } // OnServiceUpdate is called whenever modification of an existing service object is observed. func (proxier *Proxier) OnServiceUpdate(oldService, service *api.Service) { namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - proxier.serviceChanges.update(&namespacedName, oldService, service) - - proxier.syncProxyRules(syncReasonServices) + if proxier.serviceChanges.update(&namespacedName, oldService, service) && proxier.isInitialized() { + proxier.syncRunner.Run() + } } // OnServiceDelete is called whenever deletion of an existing service object is observed. func (proxier *Proxier) OnServiceDelete(service *api.Service) { namespacedName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - proxier.serviceChanges.update(&namespacedName, service, nil) - - proxier.syncProxyRules(syncReasonServices) + if proxier.serviceChanges.update(&namespacedName, service, nil) && proxier.isInitialized() { + proxier.syncRunner.Run() + } } // OnServiceSynced is called once all the initial even handlers were called and the state is fully propagated to local cache. func (proxier *Proxier) OnServiceSynced() { proxier.mu.Lock() proxier.servicesSynced = true + proxier.setInitialized(proxier.servicesSynced && proxier.endpointsSynced) proxier.mu.Unlock() - proxier.syncProxyRules(syncReasonServices) + // Sync unconditionally - this is called once per lifetime. + proxier.syncProxyRules() } // OnEndpointsAdd is called whenever creation of new endpoints object is observed. func (proxier *Proxier) OnEndpointsAdd(endpoints *api.Endpoints) { namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name} - proxier.endpointsChanges.Update(&namespacedName, nil, endpoints) - - proxier.syncProxyRules(syncReasonEndpoints) + if proxier.endpointsChanges.update(&namespacedName, nil, endpoints) && proxier.isInitialized() { + proxier.syncRunner.Run() + } } // OnEndpointsUpdate is called whenever modification of an existing endpoints object is observed. func (proxier *Proxier) OnEndpointsUpdate(oldEndpoints, endpoints *api.Endpoints) { namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name} - proxier.endpointsChanges.Update(&namespacedName, oldEndpoints, endpoints) - - proxier.syncProxyRules(syncReasonEndpoints) + if proxier.endpointsChanges.update(&namespacedName, oldEndpoints, endpoints) && proxier.isInitialized() { + proxier.syncRunner.Run() + } } // OnEndpointsDelete is called whenever deletion of an existing endpoints object is observed. func (proxier *Proxier) OnEndpointsDelete(endpoints *api.Endpoints) { namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name} - proxier.endpointsChanges.Update(&namespacedName, endpoints, nil) - - proxier.syncProxyRules(syncReasonEndpoints) + if proxier.endpointsChanges.update(&namespacedName, endpoints, nil) && proxier.isInitialized() { + proxier.syncRunner.Run() + } } // OnEndpointsSynced is called once all the initial event handlers were called and the state is fully propagated to local cache. @@ -809,27 +921,19 @@ func (proxier *Proxier) OnEndpointsSynced() { proxier.endpointsSynced = true proxier.mu.Unlock() - proxier.syncProxyRules(syncReasonEndpoints) + proxier.syncProxyRules() } -type syncReason string - -const syncReasonServices syncReason = "ServicesUpdate" -const syncReasonEndpoints syncReason = "EndpointsUpdate" -const syncReasonForce syncReason = "Force" - // This is where all of the ipvs calls happen. // assumes proxier.mu is held -func (proxier *Proxier) syncProxyRules(reason syncReason) { +func (proxier *Proxier) syncProxyRules() { proxier.mu.Lock() defer proxier.mu.Unlock() - if proxier.throttle != nil { - proxier.throttle.Accept() - } start := time.Now() defer func() { - glog.V(4).Infof("syncProxyRules(%s) took %v", reason, time.Since(start)) + metrics.SyncProxyRulesLatency.Observe(metrics.SinceInMicroseconds(start)) + glog.V(4).Infof("syncProxyRules took %v", time.Since(start)) }() // don't sync rules till we've received services and endpoints if !proxier.endpointsSynced || !proxier.servicesSynced { @@ -837,27 +941,21 @@ func (proxier *Proxier) syncProxyRules(reason syncReason) { return } - // Figure out the new services we need to activate. - proxier.serviceChanges.Lock() - serviceSyncRequired, hcServices, staleServices := updateServiceMap( + // We assume that if this was called, we really want to sync them, + // even if nothing changed in the meantime. In other words, callers are + // responsible for detecting no-op changes and not calling this function. + serviceUpdateResult := updateServiceMap( proxier.serviceMap, &proxier.serviceChanges) - proxier.serviceChanges.Unlock() - - // If this was called because of a services update, but nothing actionable has changed, skip it. - if reason == syncReasonServices && !serviceSyncRequired { - glog.V(3).Infof("Skipping ipvs sync because nothing changed") - return - } - - proxier.endpointsChanges.Lock() - endpointsSyncRequired, hcEndpoints, staleEndpoints := updateEndpointsMap( + endpointUpdateResult := updateEndpointsMap( proxier.endpointsMap, &proxier.endpointsChanges, proxier.hostname) - proxier.endpointsChanges.Unlock() - // If this was called because of an endpoints update, but nothing actionable has changed, skip it. - if reason == syncReasonEndpoints && !endpointsSyncRequired { - glog.V(3).Infof("Skipping ipvs sync because nothing changed") - return + staleServices := serviceUpdateResult.staleServices + // merge stale services gathered from updateEndpointsMap + for svcPortName := range endpointUpdateResult.staleServiceNames { + if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.protocol == api.ProtocolUDP { + glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.clusterIP.String()) + staleServices.Insert(svcInfo.clusterIP.String()) + } } glog.V(3).Infof("Syncing ipvs Proxier rules") @@ -912,12 +1010,22 @@ func (proxier *Proxier) syncProxyRules(reason syncReason) { // End install iptables // make sure dummy interface exists in the system where ipvs Proxier will bind service address on it - _, err = ensureDummyDevice(proxier.exec, DefaultDummyDevice) + _, err = proxier.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice) if err != nil { glog.Errorf("Failed to create dummy interface: %s, error: %v", DefaultDummyDevice, err) return } + // make sure ip sets exists in the system. + ipSets := []*IPSet{proxier.loopbackSet, proxier.clusterIPSet, proxier.externalIPSet, proxier.nodePortSetUDP, proxier.nodePortSetTCP, + proxier.lbIngressSet, proxier.lbMasqSet, proxier.lbWhiteListCIDRSet, proxier.lbWhiteListIPSet} + if err := ensureIPSets(ipSets...); err != nil { + return + } + for i := range ipSets { + ipSets[i].resetEntries() + } + // Accumulate the set of local ports that we will be holding open once this update is complete replacementPortsMap := map[utilproxy.LocalPort]utilproxy.Closeable{} // activeIPVSServices represents IPVS service successfully created in this round of sync @@ -937,6 +1045,17 @@ func (proxier *Proxier) syncProxyRules(reason syncReason) { // is just for efficiency, not correctness. args := make([]string, 64) + // Kube service portal + if err := proxier.linkKubeServiceChain(existingNATChains, proxier.natChains); err != nil { + glog.Errorf("Failed to link KUBE-SERVICES chain: %v", err) + return + } + // Kube service ipset + if err := proxier.createKubeFireWallChain(existingNATChains, proxier.natChains); err != nil { + glog.Errorf("Failed to create KUBE-FIRE-WALL chain: %v", err) + return + } + // Build IPVS rules for each service. for svcName, svcInfo := range proxier.serviceMap { protocol := strings.ToLower(string(svcInfo.protocol)) @@ -944,7 +1063,41 @@ func (proxier *Proxier) syncProxyRules(reason syncReason) { // to ServicePortName.String() show up in CPU profiles. svcNameString := svcName.String() + // Handle traffic that loops back to the originator with SNAT. + for _, ep := range proxier.endpointsMap[svcName] { + epIP := ep.IPPart() + epPort, err := ep.PortPart() + // Error parsing this endpoint has been logged. Skip to next endpoint. + if epIP == "" || err != nil { + continue + } + entry := &utilipset.Entry{ + IP: epIP, + Port: epPort, + Protocol: protocol, + IP2: epIP, + SetType: utilipset.HashIPPortIP, + } + proxier.loopbackSet.activeEntries.Insert(entry.String()) + } + // Capture the clusterIP. + // ipset call + entry := &utilipset.Entry{ + IP: svcInfo.clusterIP.String(), + Port: svcInfo.port, + Protocol: protocol, + SetType: utilipset.HashIPPort, + } + // add service Cluster IP:Port to kubeServiceAccess ip set for the purpose of solving hairpin. + // proxier.kubeServiceAccessSet.activeEntries.Insert(entry.String()) + // Install masquerade rules if 'masqueradeAll' or 'clusterCIDR' is specified. + if proxier.masqueradeAll { + proxier.clusterIPSet.activeEntries.Insert(entry.String()) + } else if len(proxier.clusterCIDR) > 0 { + proxier.clusterIPSet.activeEntries.Insert(entry.String()) + } + // ipvs call serv := &utilipvs.VirtualServer{ Address: svcInfo.clusterIP, Port: uint16(svcInfo.port), @@ -952,7 +1105,6 @@ func (proxier *Proxier) syncProxyRules(reason syncReason) { Scheduler: proxier.ipvsScheduler, } // Set session affinity flag and timeout for IPVS service - var flags utilipvs.ServiceFlags if svcInfo.sessionAffinityType == api.ServiceAffinityClientIP { serv.Flags |= utilipvs.FlagPersistent serv.Timeout = uint32(svcInfo.stickyMaxAgeSeconds) @@ -966,32 +1118,6 @@ func (proxier *Proxier) syncProxyRules(reason syncReason) { } else { glog.Errorf("Failed to sync service: %v, err: %v", serv, err) } - // Install masquerade rules if 'masqueradeAll' or 'clusterCIDR' is specified. - args = append(args[:0], - "-A", string(kubeServicesChain), - "-m", "comment", "--comment", fmt.Sprintf(`"%s cluster IP"`, svcNameString), - "-m", protocol, "-p", protocol, - "-d", fmt.Sprintf("%s/32", svcInfo.clusterIP.String()), - "--dport", strconv.Itoa(svcInfo.port), - ) - if proxier.masqueradeAll { - err = proxier.linkKubeServiceChain(existingNATChains, proxier.natChains) - if err != nil { - glog.Errorf("Failed to link KUBE-SERVICES chain: %v", err) - } - writeLine(proxier.natRules, append(args, "-j", string(KubeMarkMasqChain))...) - } else if len(proxier.clusterCIDR) > 0 { - // This masquerades off-cluster traffic to a service VIP. The idea - // is that you can establish a static route for your Service range, - // routing to any node, and that node will bridge into the Service - // for you. Since that might bounce off-node, we masquerade here. - // If/when we support "Local" policy for VIPs, we should update this. - err = proxier.linkKubeServiceChain(existingNATChains, proxier.natChains) - if err != nil { - glog.Errorf("Failed to link KUBE-SERVICES chain: %v", err) - } - writeLine(proxier.natRules, append(args, "! -s", proxier.clusterCIDR, "-j", string(KubeMarkMasqChain))...) - } // Capture externalIPs. for _, externalIP := range svcInfo.externalIPs { @@ -1026,14 +1152,25 @@ func (proxier *Proxier) syncProxyRules(reason syncReason) { } } // We're holding the port, so it's OK to install IPVS rules. + // ipset call + entry := &utilipset.Entry{ + IP: externalIP, + Port: svcInfo.port, + Protocol: protocol, + SetType: utilipset.HashIPPort, + } + // We have to SNAT packets to external IPs. + proxier.externalIPSet.activeEntries.Insert(entry.String()) + + // ipvs call serv := &utilipvs.VirtualServer{ Address: net.ParseIP(externalIP), Port: uint16(svcInfo.port), Protocol: string(svcInfo.protocol), - Flags: flags, Scheduler: proxier.ipvsScheduler, } if svcInfo.sessionAffinityType == api.ServiceAffinityClientIP { + serv.Flags |= utilipvs.FlagPersistent serv.Timeout = uint32(svcInfo.stickyMaxAgeSeconds) } // There is no need to bind externalIP to dummy interface, so set parameter `bindAddr` to `false`. @@ -1050,25 +1187,39 @@ func (proxier *Proxier) syncProxyRules(reason syncReason) { // Capture load-balancer ingress. for _, ingress := range svcInfo.loadBalancerStatus.Ingress { if ingress.IP != "" { + // ipset call + entry = &utilipset.Entry{ + IP: ingress.IP, + Port: svcInfo.port, + Protocol: protocol, + SetType: utilipset.HashIPPort, + } + // add service load balancer ingressIP:Port to kubeServiceAccess ip set for the purpose of solving hairpin. + // proxier.kubeServiceAccessSet.activeEntries.Insert(entry.String()) + // If we are proxying globally, we need to masquerade in case we cross nodes. + // If we are proxying only locally, we can retain the source IP. + if !svcInfo.onlyNodeLocalEndpoints { + proxier.lbMasqSet.activeEntries.Insert(entry.String()) + } if len(svcInfo.loadBalancerSourceRanges) != 0 { - err = proxier.linkKubeServiceChain(existingNATChains, proxier.natChains) - if err != nil { - glog.Errorf("Failed to link KUBE-SERVICES chain: %v", err) - } // The service firewall rules are created based on ServiceSpec.loadBalancerSourceRanges field. // This currently works for loadbalancers that preserves source ips. // For loadbalancers which direct traffic to service NodePort, the firewall rules will not apply. - args = append(args[:0], - "-A", string(kubeServicesChain), - "-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcNameString), - "-m", string(svcInfo.protocol), "-p", string(svcInfo.protocol), - "-d", fmt.Sprintf("%s/32", ingress.IP), - "--dport", fmt.Sprintf("%d", svcInfo.port), - ) + proxier.lbIngressSet.activeEntries.Insert(entry.String()) allowFromNode := false for _, src := range svcInfo.loadBalancerSourceRanges { - writeLine(proxier.natRules, append(args, "-s", src, "-j", "ACCEPT")...) + // ipset call + entry = &utilipset.Entry{ + IP: ingress.IP, + Port: svcInfo.port, + Protocol: protocol, + Net: src, + SetType: utilipset.HashIPPortNet, + } + // enumerate all white list source cidr + proxier.lbWhiteListCIDRSet.activeEntries.Insert(entry.String()) + // ignore error because it has been validated _, cidr, _ := net.ParseCIDR(src) if cidr.Contains(proxier.nodeIP) { @@ -1079,22 +1230,27 @@ func (proxier *Proxier) syncProxyRules(reason syncReason) { // loadbalancer's backend hosts. In this case, request will not hit the loadbalancer but loop back directly. // Need to add the following rule to allow request on host. if allowFromNode { - writeLine(proxier.natRules, append(args, "-s", fmt.Sprintf("%s/32", ingress.IP), "-j", "ACCEPT")...) + entry = &utilipset.Entry{ + IP: ingress.IP, + Port: svcInfo.port, + Protocol: protocol, + IP2: ingress.IP, + SetType: utilipset.HashIPPortIP, + } + // enumerate all white list source ip + proxier.lbWhiteListIPSet.activeEntries.Insert(entry.String()) } - - // If the packet was able to reach the end of firewall chain, then it did not get DNATed. - // It means the packet cannot go through the firewall, then DROP it. - writeLine(proxier.natRules, append(args, "-j", string(KubeMarkDropChain))...) } + // ipvs call serv := &utilipvs.VirtualServer{ Address: net.ParseIP(ingress.IP), Port: uint16(svcInfo.port), Protocol: string(svcInfo.protocol), - Flags: flags, Scheduler: proxier.ipvsScheduler, } if svcInfo.sessionAffinityType == api.ServiceAffinityClientIP { + serv.Flags |= utilipvs.FlagPersistent serv.Timeout = uint32(svcInfo.stickyMaxAgeSeconds) } // There is no need to bind LB ingress.IP to dummy interface, so set parameter `bindAddr` to `false`. @@ -1126,25 +1282,47 @@ func (proxier *Proxier) syncProxyRules(reason syncReason) { continue } if lp.Protocol == "udp" { - utilproxy.ClearUDPConntrackForPort(proxier.exec, lp.Port) + isIPv6 := utilproxy.IsIPv6(svcInfo.clusterIP) + utilproxy.ClearUDPConntrackForPort(proxier.exec, lp.Port, isIPv6) } replacementPortsMap[lp] = socket } // We're holding the port, so it's OK to install ipvs rules. + // Nodeports need SNAT, unless they're local. + // ipset call + if !svcInfo.onlyNodeLocalEndpoints { + entry = &utilipset.Entry{ + // No need to provide ip info + Port: svcInfo.nodePort, + Protocol: protocol, + SetType: utilipset.BitmapPort, + } + switch protocol { + case "tcp": + proxier.nodePortSetTCP.activeEntries.Insert(entry.String()) + case "udp": + proxier.nodePortSetUDP.activeEntries.Insert(entry.String()) + default: + // It should never hit + glog.Errorf("Unsupported protocol type: %s", protocol) + } + } + // Build ipvs kernel routes for each node ip address nodeIPs, err := proxier.ipGetter.NodeIPs() if err != nil { glog.Errorf("Failed to get node IP, err: %v", err) } else { for _, nodeIP := range nodeIPs { + // ipvs call serv := &utilipvs.VirtualServer{ Address: nodeIP, Port: uint16(svcInfo.nodePort), Protocol: string(svcInfo.protocol), - Flags: flags, Scheduler: proxier.ipvsScheduler, } if svcInfo.sessionAffinityType == api.ServiceAffinityClientIP { + serv.Flags |= utilipvs.FlagPersistent serv.Timeout = uint32(svcInfo.stickyMaxAgeSeconds) } // There is no need to bind Node IP to dummy interface, so set parameter `bindAddr` to `false`. @@ -1161,18 +1339,132 @@ func (proxier *Proxier) syncProxyRules(reason syncReason) { } } + // sync ipset entries + ipsetsToSync := []*IPSet{proxier.loopbackSet, proxier.clusterIPSet, proxier.lbIngressSet, proxier.lbMasqSet, proxier.nodePortSetTCP, + proxier.nodePortSetUDP, proxier.externalIPSet, proxier.lbWhiteListIPSet, proxier.lbWhiteListCIDRSet} + for i := range ipsetsToSync { + ipsetsToSync[i].syncIPSetEntries() + } + + // Tail call iptables rules for ipset, make sure only call iptables once + // in a single loop per ip set. + if !proxier.loopbackSet.isEmpty() { + args = append(args[:0], + "-A", string(kubePostroutingChain), + "-m", "set", "--match-set", proxier.loopbackSet.Name, + "dst,dst,src", + ) + writeLine(proxier.natRules, append(args, "-j", "MASQUERADE")...) + } + if !proxier.clusterIPSet.isEmpty() { + args = append(args[:0], + "-A", string(kubeServicesChain), + "-m", "set", "--match-set", proxier.clusterIPSet.Name, + "dst,dst", + ) + if proxier.masqueradeAll { + writeLine(proxier.natRules, append(args, "-j", string(KubeMarkMasqChain))...) + } else if len(proxier.clusterCIDR) > 0 { + // This masquerades off-cluster traffic to a service VIP. The idea + // is that you can establish a static route for your Service range, + // routing to any node, and that node will bridge into the Service + // for you. Since that might bounce off-node, we masquerade here. + // If/when we support "Local" policy for VIPs, we should update this. + writeLine(proxier.natRules, append(args, "! -s", proxier.clusterCIDR, "-j", string(KubeMarkMasqChain))...) + } + } + if !proxier.externalIPSet.isEmpty() { + // Build masquerade rules for packets to external IPs. + args = append(args[:0], + "-A", string(kubeServicesChain), + "-m", "set", "--match-set", proxier.externalIPSet.Name, + "dst,dst", + ) + writeLine(proxier.natRules, append(args, "-j", string(KubeMarkMasqChain))...) + // Allow traffic for external IPs that does not come from a bridge (i.e. not from a container) + // nor from a local process to be forwarded to the service. + // This rule roughly translates to "all traffic from off-machine". + // This is imperfect in the face of network plugins that might not use a bridge, but we can revisit that later. + externalTrafficOnlyArgs := append(args, + "-m", "physdev", "!", "--physdev-is-in", + "-m", "addrtype", "!", "--src-type", "LOCAL") + writeLine(proxier.natRules, append(externalTrafficOnlyArgs, "-j", "ACCEPT")...) + dstLocalOnlyArgs := append(args, "-m", "addrtype", "--dst-type", "LOCAL") + // Allow traffic bound for external IPs that happen to be recognized as local IPs to stay local. + // This covers cases like GCE load-balancers which get added to the local routing table. + writeLine(proxier.natRules, append(dstLocalOnlyArgs, "-j", "ACCEPT")...) + } + if !proxier.lbMasqSet.isEmpty() { + // Build masquerade rules for packets which cross node visit load balancer ingress IPs. + args = append(args[:0], + "-A", string(kubeServicesChain), + "-m", "set", "--match-set", proxier.lbMasqSet.Name, + "dst,dst", + ) + writeLine(proxier.natRules, append(args, "-j", string(KubeMarkMasqChain))...) + } + if !proxier.lbWhiteListCIDRSet.isEmpty() || !proxier.lbWhiteListIPSet.isEmpty() { + // link kube-services chain -> kube-fire-wall chain + args := []string{"-m", "set", "--match-set", proxier.lbIngressSet.Name, "dst,dst", "-j", string(KubeFireWallChain)} + if _, err := proxier.iptables.EnsureRule(utiliptables.Append, utiliptables.TableNAT, kubeServicesChain, args...); err != nil { + glog.Errorf("Failed to ensure that ipset %s chain %s jumps to %s: %v", proxier.lbIngressSet.Name, kubeServicesChain, KubeFireWallChain, err) + } + if !proxier.lbWhiteListCIDRSet.isEmpty() { + args = append(args[:0], + "-A", string(KubeFireWallChain), + "-m", "set", "--match-set", proxier.lbWhiteListCIDRSet.Name, + "dst,dst,src", + ) + writeLine(proxier.natRules, append(args, "-j", "ACCEPT")...) + } + if !proxier.lbWhiteListIPSet.isEmpty() { + args = append(args[:0], + "-A", string(KubeFireWallChain), + "-m", "set", "--match-set", proxier.lbWhiteListIPSet.Name, + "dst,dst,src", + ) + writeLine(proxier.natRules, append(args, "-j", "ACCEPT")...) + } + args = append(args[:0], + "-A", string(KubeFireWallChain), + ) + // If the packet was able to reach the end of firewall chain, then it did not get DNATed. + // It means the packet cannot go thru the firewall, then mark it for DROP + writeLine(proxier.natRules, append(args, "-j", string(KubeMarkDropChain))...) + } + if !proxier.nodePortSetTCP.isEmpty() { + // Build masquerade rules for packets which cross node visit nodeport. + args = append(args[:0], + "-A", string(kubeServicesChain), + "-m", "tcp", "-p", "tcp", + "-m", "set", "--match-set", proxier.nodePortSetTCP.Name, + "dst", + ) + writeLine(proxier.natRules, append(args, "-j", string(KubeMarkMasqChain))...) + } + if !proxier.nodePortSetUDP.isEmpty() { + args = append(args[:0], + "-A", string(kubeServicesChain), + "-m", "udp", "-p", "udp", + "-m", "set", "--match-set", proxier.nodePortSetUDP.Name, + "dst", + ) + writeLine(proxier.natRules, append(args, "-j", string(KubeMarkMasqChain))...) + } + // Write the end-of-table markers. writeLine(proxier.natRules, "COMMIT") // Sync iptables rules. // NOTE: NoFlushTables is used so we don't flush non-kubernetes chains in the table. - natLines := append(proxier.natChains.Bytes(), proxier.natRules.Bytes()...) - lines := natLines + proxier.iptablesData.Reset() + proxier.iptablesData.Write(proxier.natChains.Bytes()) + proxier.iptablesData.Write(proxier.natRules.Bytes()) - glog.V(3).Infof("Restoring iptables rules: %s", lines) - err = proxier.iptables.RestoreAll(lines, utiliptables.NoFlushTables, utiliptables.RestoreCounters) + glog.V(5).Infof("Restoring iptables rules: %s", proxier.iptablesData.Bytes()) + err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters) if err != nil { - glog.Errorf("Failed to execute iptables-restore: %v\nRules:\n%s", err, lines) + glog.Errorf("Failed to execute iptables-restore: %v\nRules:\n%s", err, proxier.iptablesData.Bytes()) // Revert new local ports. utilproxy.RevertPorts(replacementPortsMap, proxier.portsMap) return @@ -1197,29 +1489,29 @@ func (proxier *Proxier) syncProxyRules(reason syncReason) { } proxier.cleanLegacyService(activeIPVSServices, currentIPVSServices) - // Update healthz timestamp if it is periodic sync. - if proxier.healthzServer != nil && reason == syncReasonForce { + // Update healthz timestamp + if proxier.healthzServer != nil { proxier.healthzServer.UpdateTimestamp() } // Update healthchecks. The endpoints list might include services that are // not "OnlyLocal", but the services list will not, and the healthChecker // will just drop those endpoints. - if err := proxier.healthChecker.SyncServices(hcServices); err != nil { + if err := proxier.healthChecker.SyncServices(serviceUpdateResult.hcServices); err != nil { glog.Errorf("Error syncing healtcheck services: %v", err) } - if err := proxier.healthChecker.SyncEndpoints(hcEndpoints); err != nil { + if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.hcEndpoints); err != nil { glog.Errorf("Error syncing healthcheck endpoints: %v", err) } // Finish housekeeping. // TODO: these could be made more consistent. - for _, svcIP := range staleServices.List() { + for _, svcIP := range staleServices.UnsortedList() { if err := utilproxy.ClearUDPConntrackForIP(proxier.exec, svcIP); err != nil { glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err) } } - proxier.deleteEndpointConnections(staleEndpoints) + proxier.deleteEndpointConnections(endpointUpdateResult.staleEndpoints) } // After a UDP endpoint has been removed, we must flush any pending conntrack entries to it, or else we @@ -1228,7 +1520,7 @@ func (proxier *Proxier) syncProxyRules(reason syncReason) { func (proxier *Proxier) deleteEndpointConnections(connectionMap map[endpointServicePair]bool) { for epSvcPair := range connectionMap { if svcInfo, ok := proxier.serviceMap[epSvcPair.servicePortName]; ok && svcInfo.protocol == api.ProtocolUDP { - endpointIP := epSvcPair.endpoint[0:strings.Index(epSvcPair.endpoint, ":")] + endpointIP := utilproxy.IPPart(epSvcPair.endpoint) err := utilproxy.ClearUDPConntrackForPeers(proxier.exec, svcInfo.clusterIP.String(), endpointIP) if err != nil { glog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.servicePortName.String(), err) @@ -1261,7 +1553,7 @@ func (proxier *Proxier) syncService(svcName string, vs *utilipvs.VirtualServer, // bind service address to dummy interface even if service not changed, // in case that service IP was removed by other processes if bindAddr { - _, err := proxier.ipvs.EnsureVirtualServerAddressBind(vs, DefaultDummyDevice) + _, err := proxier.netlinkHandle.EnsureAddressBind(vs.Address.String(), DefaultDummyDevice) if err != nil { glog.Errorf("Failed to bind service address to dummy device %q: %v", svcName, err) return err @@ -1299,7 +1591,7 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode if !curEndpoints.Equal(newEndpoints) { // Create new endpoints - for _, ep := range newEndpoints.Difference(curEndpoints).List() { + for _, ep := range newEndpoints.Difference(curEndpoints).UnsortedList() { ip, port, err := net.SplitHostPort(ep) if err != nil { glog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err) @@ -1323,7 +1615,7 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode } } // Delete old endpoints - for _, ep := range curEndpoints.Difference(newEndpoints).List() { + for _, ep := range curEndpoints.Difference(newEndpoints).UnsortedList() { ip, port, err := net.SplitHostPort(ep) if err != nil { glog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err) @@ -1350,6 +1642,7 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode } func (proxier *Proxier) cleanLegacyService(atciveServices map[string]bool, currentServices map[string]*utilipvs.VirtualServer) { + unbindIPAddr := sets.NewString() for cS := range currentServices { if !atciveServices[cS] { svc := currentServices[cS] @@ -1357,16 +1650,19 @@ func (proxier *Proxier) cleanLegacyService(atciveServices map[string]bool, curre if err != nil { glog.Errorf("Failed to delete service, error: %v", err) } - err = proxier.ipvs.UnbindVirtualServerAddress(svc, DefaultDummyDevice) - if err != nil { - glog.Errorf("Failed to unbind service from dummy interface, error: %v", err) - } + unbindIPAddr.Insert(svc.Address.String()) + } + } + + for _, addr := range unbindIPAddr.UnsortedList() { + err := proxier.netlinkHandle.UnbindAddress(addr, DefaultDummyDevice) + if err != nil { + glog.Errorf("Failed to unbind service from dummy interface, error: %v", err) } } } // linkKubeServiceChain will Create chain KUBE-SERVICES and link the chin in PREROUTING and OUTPUT -// If not specify masqueradeAll or clusterCIDR or LB source range, won't create them. // Chain PREROUTING (policy ACCEPT) // target prot opt source destination @@ -1406,22 +1702,82 @@ func (proxier *Proxier) linkKubeServiceChain(existingNATChains map[utiliptables. return nil } +//// linkKubeIPSetsChain will Create chain KUBE-SVC-IPSETS and link the chin in KUBE-SERVICES +// +//// Chain KUBE-SERVICES (policy ACCEPT) +//// target prot opt source destination +//// KUBE-SVC-IPSETS all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-SERVICE-ACCESS dst,dst +// +//// Chain KUBE-SVC-IPSETS (1 references) +//// target prot opt source destination +//// KUBE-MARK-MASQ all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-EXTERNAL-IP dst,dst +//// ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-EXTERNAL-IP dst,dst PHYSDEV match ! --physdev-is-in ADDRTYPE match src-type !LOCAL +//// ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-EXTERNAL-IP dst,dst ADDRTYPE match dst-type LOCAL +//// ... +//func (proxier *Proxier) linkKubeIPSetsChain(existingNATChains map[utiliptables.Chain]string, natChains *bytes.Buffer) error { +// if _, err := proxier.iptables.EnsureChain(utiliptables.TableNAT, KubeServiceIPSetsChain); err != nil { +// return fmt.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeServiceIPSetsChain, err) +// } +// +// // TODO: iptables comment message for ipset? +// // The hash:ip,port type of sets require two src/dst parameters of the set match and SET target kernel modules. +// args := []string{"-m", "set", "--match-set", proxier.kubeServiceAccessSet.Name, "dst,dst", "-j", string(KubeServiceIPSetsChain)} +// if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, utiliptables.TableNAT, kubeServicesChain, args...); err != nil { +// return fmt.Errorf("Failed to ensure that ipset %s chain %s jumps to %s: %v", proxier.kubeServiceAccessSet.Name, kubeServicesChain, KubeServiceIPSetsChain, err) +// } +// +// // equal to `iptables -t nat -N KUBE-SVC-IPSETS` +// // write `:KUBE-SERVICES - [0:0]` in nat table +// if chain, ok := existingNATChains[KubeServiceIPSetsChain]; ok { +// writeLine(natChains, chain) +// } else { +// writeLine(natChains, utiliptables.MakeChainLine(KubeServiceIPSetsChain)) +// } +// return nil +//} + +func (proxier *Proxier) createKubeFireWallChain(existingNATChains map[utiliptables.Chain]string, natChains *bytes.Buffer) error { + // `iptables -t nat -N KUBE-FIRE-WALL` + if _, err := proxier.iptables.EnsureChain(utiliptables.TableNAT, KubeFireWallChain); err != nil { + return fmt.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeFireWallChain, err) + } + + // write `:KUBE-FIRE-WALL - [0:0]` in nat table + if chain, ok := existingNATChains[KubeFireWallChain]; ok { + writeLine(natChains, chain) + } else { + writeLine(natChains, utiliptables.MakeChainLine(KubeFireWallChain)) + } + return nil +} + // Join all words with spaces, terminate with newline and write to buff. func writeLine(buf *bytes.Buffer, words ...string) { - buf.WriteString(strings.Join(words, " ") + "\n") + // We avoid strings.Join for performance reasons. + for i := range words { + buf.WriteString(words[i]) + if i < len(words)-1 { + buf.WriteByte(' ') + } else { + buf.WriteByte('\n') + } + } } func getLocalIPs(endpointsMap proxyEndpointsMap) map[types.NamespacedName]sets.String { localIPs := make(map[types.NamespacedName]sets.String) - for svcPort := range endpointsMap { - for _, ep := range endpointsMap[svcPort] { + for svcPortName := range endpointsMap { + for _, ep := range endpointsMap[svcPortName] { if ep.isLocal { - nsn := svcPort.NamespacedName - if localIPs[nsn] == nil { - localIPs[nsn] = sets.NewString() + // If the endpoint has a bad format, utilproxy.IPPart() will log an + // error and ep.IPPart() will return a null string. + if ip := ep.IPPart(); ip != "" { + nsn := svcPortName.NamespacedName + if localIPs[nsn] == nil { + localIPs[nsn] = sets.NewString() + } + localIPs[nsn].Insert(ip) } - ip := strings.Split(ep.endpoint, ":")[0] // just the IP part - localIPs[nsn].Insert(ip) } } } @@ -1474,32 +1830,6 @@ func openLocalPort(lp *utilproxy.LocalPort) (utilproxy.Closeable, error) { return socket, nil } -const cmdIP = "ip" - -func ensureDummyDevice(execer utilexec.Interface, dummyDev string) (exist bool, err error) { - args := []string{"link", "add", dummyDev, "type", "dummy"} - out, err := execer.Command(cmdIP, args...).CombinedOutput() - if err != nil { - // "exit status code 2" will be returned if the device already exists - if ee, ok := err.(utilexec.ExitError); ok { - if ee.Exited() && ee.ExitStatus() == 2 { - return true, nil - } - } - return false, fmt.Errorf("error creating dummy interface %q: %v: %s", dummyDev, err, out) - } - return false, nil -} - -func deleteDummyDevice(execer utilexec.Interface, dummyDev string) error { - args := []string{"link", "del", dummyDev} - out, err := execer.Command(cmdIP, args...).CombinedOutput() - if err != nil { - return fmt.Errorf("error deleting dummy interface %q: %v: %s", dummyDev, err, out) - } - return nil -} - // ipvs Proxier fall back on iptables when it needs to do SNAT for engress packets // It will only operate iptables *nat table. // Create and link the kube postrouting chain for SNAT packets. diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/metrics/BUILD b/vendor/k8s.io/kubernetes/pkg/proxy/metrics/BUILD new file mode 100644 index 000000000000..f3166915150e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/metrics/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["metrics.go"], + importpath = "k8s.io/kubernetes/pkg/proxy/metrics", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/prometheus/client_golang/prometheus:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/metrics/metrics.go b/vendor/k8s.io/kubernetes/pkg/proxy/metrics/metrics.go new file mode 100644 index 000000000000..d442f2da320e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/metrics/metrics.go @@ -0,0 +1,52 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +const kubeProxySubsystem = "kubeproxy" + +var ( + // SyncProxyRulesLatency is the latency of one round of kube-proxy syncing proxy rules. + SyncProxyRulesLatency = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Subsystem: kubeProxySubsystem, + Name: "sync_proxy_rules_latency_microseconds", + Help: "SyncProxyRules latency", + Buckets: prometheus.ExponentialBuckets(1000, 2, 15), + }, + ) +) + +var registerMetricsOnce sync.Once + +// RegisterMetrics registers sync proxy rules latency metrics +func RegisterMetrics() { + registerMetricsOnce.Do(func() { + prometheus.MustRegister(SyncProxyRulesLatency) + }) +} + +// SinceInMicroseconds gets the time since the specified start in microseconds. +func SinceInMicroseconds(start time.Time) float64 { + return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) +} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD b/vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD index 096f905bf7c6..f533d0daa200 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/proxy/userspace/BUILD @@ -22,9 +22,10 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/proxy/userspace", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//pkg/proxy:go_default_library", "//pkg/proxy/util:go_default_library", "//pkg/util/iptables:go_default_library", @@ -48,9 +49,10 @@ go_test( "proxier_test.go", "roundrobin_test.go", ], + importpath = "k8s.io/kubernetes/pkg/proxy/userspace", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/proxy:go_default_library", "//pkg/util/iptables/testing:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/userspace/loadbalancer.go b/vendor/k8s.io/kubernetes/pkg/proxy/userspace/loadbalancer.go index 369094e3296c..ebaeb9f4db06 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/userspace/loadbalancer.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/userspace/loadbalancer.go @@ -17,10 +17,9 @@ limitations under the License. package userspace import ( - "net" - - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/proxy" + "net" ) // LoadBalancer is an interface for distributing incoming requests to service endpoints. diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go b/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go index a6304db35f4e..8c4f622e1158 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxier.go @@ -28,8 +28,8 @@ import ( "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" utilnet "k8s.io/apimachinery/pkg/util/net" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/proxy" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -48,7 +48,7 @@ type portal struct { // ServiceInfo contains information and state for a particular proxied service type ServiceInfo struct { - // Timeout is the the read/write timeout (used for UDP connections) + // Timeout is the read/write timeout (used for UDP connections) Timeout time.Duration // ActiveClients is the cache of active UDP clients being proxied by this proxy for this service ActiveClients *ClientCache @@ -503,7 +503,7 @@ func (proxier *Proxier) unmergeService(service *api.Service, existingPorts sets. } proxier.loadBalancer.DeleteService(serviceName) } - for _, svcIP := range staleUDPServices.List() { + for _, svcIP := range staleUDPServices.UnsortedList() { if err := utilproxy.ClearUDPConntrackForIP(proxier.exec, svcIP); err != nil { glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err) } @@ -942,7 +942,7 @@ func iptablesFlush(ipt iptables.Interface) error { var zeroIPv4 = net.ParseIP("0.0.0.0") var localhostIPv4 = net.ParseIP("127.0.0.1") -var zeroIPv6 = net.ParseIP("::0") +var zeroIPv6 = net.ParseIP("::") var localhostIPv6 = net.ParseIP("::1") // Build a slice of iptables args that are common to from-container and from-host portal rules. diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxysocket.go b/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxysocket.go index a28b6011b6c1..d65ac02a0cce 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxysocket.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/userspace/proxysocket.go @@ -27,7 +27,7 @@ import ( "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/proxy" ) diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin.go b/vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin.go index b32d69d8776a..e2aca2991331 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/userspace/roundrobin.go @@ -27,7 +27,7 @@ import ( "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/util/slice" ) diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/util/BUILD b/vendor/k8s.io/kubernetes/pkg/proxy/util/BUILD index 0fb554dc0cf7..2956b5212694 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/proxy/util/BUILD @@ -4,13 +4,15 @@ go_library( name = "go_default_library", srcs = [ "conntrack.go", + "endpoints.go", "port.go", "utils.go", ], + importpath = "k8s.io/kubernetes/pkg/proxy/util", visibility = ["//visibility:public"], deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", @@ -21,12 +23,14 @@ go_test( name = "go_default_test", srcs = [ "conntrack_test.go", + "endpoints_test.go", "port_test.go", "utils_test.go", ], + importpath = "k8s.io/kubernetes/pkg/proxy/util", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/util/conntrack.go b/vendor/k8s.io/kubernetes/pkg/proxy/util/conntrack.go index 6d7e4703ec0b..d99d61b5dbd5 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/util/conntrack.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/util/conntrack.go @@ -18,6 +18,7 @@ package util import ( "fmt" + "net" "strconv" "strings" @@ -26,13 +27,30 @@ import ( // Utilities for dealing with conntrack -const noConnectionToDelete = "0 flow entries have been deleted" +const NoConnectionToDelete = "0 flow entries have been deleted" -// DeleteServiceConnections uses the conntrack tool to delete the conntrack entries +func IsIPv6(netIP net.IP) bool { + return netIP != nil && netIP.To4() == nil +} + +func IsIPv6String(ip string) bool { + netIP := net.ParseIP(ip) + return IsIPv6(netIP) +} + +func parametersWithFamily(isIPv6 bool, parameters ...string) []string { + if isIPv6 { + parameters = append(parameters, "-f", "ipv6") + } + return parameters +} + +// ClearUDPConntrackForIP uses the conntrack tool to delete the conntrack entries // for the UDP connections specified by the given service IP func ClearUDPConntrackForIP(execer exec.Interface, ip string) error { - err := ExecConntrackTool(execer, "-D", "--orig-dst", ip, "-p", "udp") - if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) { + parameters := parametersWithFamily(IsIPv6String(ip), "-D", "--orig-dst", ip, "-p", "udp") + err := ExecConntrackTool(execer, parameters...) + if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) { // TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed. // These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it // is expensive to baby-sit all udp connections to kubernetes services. @@ -60,12 +78,13 @@ func ExecConntrackTool(execer exec.Interface, parameters ...string) error { // The solution is clearing the conntrack. Known issues: // https://github.com/docker/docker/issues/8795 // https://github.com/kubernetes/kubernetes/issues/31983 -func ClearUDPConntrackForPort(execer exec.Interface, port int) error { +func ClearUDPConntrackForPort(execer exec.Interface, port int, isIPv6 bool) error { if port <= 0 { return fmt.Errorf("Wrong port number. The port number must be greater than zero") } - err := ExecConntrackTool(execer, "-D", "-p", "udp", "--dport", strconv.Itoa(port)) - if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) { + parameters := parametersWithFamily(isIPv6, "-D", "-p", "udp", "--dport", strconv.Itoa(port)) + err := ExecConntrackTool(execer, parameters...) + if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) { return fmt.Errorf("error deleting conntrack entries for UDP port: %d, error: %v", port, err) } return nil @@ -74,8 +93,9 @@ func ClearUDPConntrackForPort(execer exec.Interface, port int) error { // ClearUDPConntrackForPeers uses the conntrack tool to delete the conntrack entries // for the UDP connections specified by the {origin, dest} IP pair. func ClearUDPConntrackForPeers(execer exec.Interface, origin, dest string) error { - err := ExecConntrackTool(execer, "-D", "--orig-dst", origin, "--dst-nat", dest, "-p", "udp") - if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) { + parameters := parametersWithFamily(IsIPv6String(origin), "-D", "--orig-dst", origin, "--dst-nat", dest, "-p", "udp") + err := ExecConntrackTool(execer, parameters...) + if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) { // TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed. // These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it // is expensive to baby sit all udp connections to kubernetes services. diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go b/vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go new file mode 100644 index 000000000000..449e112619db --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "net" + "strconv" + + "github.com/golang/glog" +) + +// IPPart returns just the IP part of an IP or IP:port or endpoint string. If the IP +// part is an IPv6 address enclosed in brackets (e.g. "[fd00:1::5]:9999"), +// then the brackets are stripped as well. +func IPPart(s string) string { + if ip := net.ParseIP(s); ip != nil { + // IP address without port + return s + } + // Must be IP:port + host, _, err := net.SplitHostPort(s) + if err != nil { + glog.Errorf("Error parsing '%s': %v", s, err) + return "" + } + // Check if host string is a valid IP address + if ip := net.ParseIP(host); ip != nil { + return ip.String() + } else { + glog.Errorf("invalid IP part '%s'", host) + } + return "" +} + +func PortPart(s string) (int, error) { + // Must be IP:port + _, port, err := net.SplitHostPort(s) + if err != nil { + glog.Errorf("Error parsing '%s': %v", s, err) + return -1, err + } + portNumber, err := strconv.Atoi(port) + if err != nil { + glog.Errorf("Error parsing '%s': %v", port, err) + return -1, err + } + return portNumber, nil +} + +// ToCIDR returns a host address of the form /32 for +// IPv4 and /128 for IPv6 +func ToCIDR(ip net.IP) string { + len := 32 + if ip.To4() == nil { + len = 128 + } + return fmt.Sprintf("%s/%d", ip.String(), len) +} diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/util/port.go b/vendor/k8s.io/kubernetes/pkg/proxy/util/port.go index fd1a024dac8a..96317b1dc820 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/util/port.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/util/port.go @@ -18,6 +18,8 @@ package util import ( "fmt" + "net" + "strconv" "github.com/golang/glog" ) @@ -37,7 +39,8 @@ type LocalPort struct { } func (lp *LocalPort) String() string { - return fmt.Sprintf("%q (%s:%d/%s)", lp.Description, lp.IP, lp.Port, lp.Protocol) + ipPort := net.JoinHostPort(lp.IP, strconv.Itoa(lp.Port)) + return fmt.Sprintf("%q (%s/%s)", lp.Description, ipPort, lp.Protocol) } // Closeable is an interface around closing an port. diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go b/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go index 81734f548927..cac0140c3860 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go @@ -20,8 +20,8 @@ import ( "net" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" "github.com/golang/glog" ) diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/BUILD b/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/BUILD index f60ceb4efbb5..42494391696e 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/BUILD @@ -10,14 +10,15 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/proxy/winkernel", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:windows_amd64": [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", "//pkg/api/service:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//pkg/features:go_default_library", "//pkg/proxy:go_default_library", "//pkg/proxy/healthcheck:go_default_library", @@ -43,10 +44,11 @@ go_test( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/proxy/winkernel", library = ":go_default_library", deps = select({ "@io_bazel_rules_go//go/platform:windows_amd64": [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/proxy:go_default_library", "//pkg/util/async:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/OWNERS b/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/OWNERS new file mode 100644 index 000000000000..605001e0a112 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/OWNERS @@ -0,0 +1,3 @@ +reviewers: +- dineshgovindasamy +- madhanrm diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go b/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go index d1f3b55182ea..5d56561c4222 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go @@ -1,7 +1,7 @@ // +build windows /* -Copyright 2015 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -37,9 +37,9 @@ import ( "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" apiservice "k8s.io/kubernetes/pkg/api/service" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy/healthcheck" @@ -93,7 +93,7 @@ type serviceInfo struct { targetPort int loadBalancerStatus api.LoadBalancerStatus sessionAffinityType api.ServiceAffinity - stickyMaxAgeMinutes int + stickyMaxAgeSeconds int externalIPs []*externalIPInfo loadBalancerIngressIPs []*loadBalancerIngressInfo loadBalancerSourceRanges []string @@ -165,6 +165,12 @@ func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, se onlyNodeLocalEndpoints = true } + // set default session sticky max age 180min=10800s + stickyMaxAgeSeconds := 10800 + if service.Spec.SessionAffinity == api.ServiceAffinityClientIP { + // Kube-apiserver side guarantees SessionAffinityConfig won't be nil when session affinity type is ClientIP + stickyMaxAgeSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds) + } info := &serviceInfo{ clusterIP: net.ParseIP(service.Spec.ClusterIP), port: int(port.Port), @@ -174,7 +180,7 @@ func newServiceInfo(svcPortName proxy.ServicePortName, port *api.ServicePort, se // Deep-copy in case the service instance changes loadBalancerStatus: *helper.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer), sessionAffinityType: service.Spec.SessionAffinity, - stickyMaxAgeMinutes: 180, // TODO: paramaterize this in the API. + stickyMaxAgeSeconds: stickyMaxAgeSeconds, loadBalancerSourceRanges: make([]string, len(service.Spec.LoadBalancerSourceRanges)), onlyNodeLocalEndpoints: onlyNodeLocalEndpoints, } @@ -447,15 +453,6 @@ func NewProxier( recorder record.EventRecorder, healthzServer healthcheck.HealthzUpdater, ) (*Proxier, error) { - // check valid user input - if minSyncPeriod > syncPeriod { - return nil, fmt.Errorf("min-sync (%v) must be < sync(%v)", minSyncPeriod, syncPeriod) - } - - // Generate the masquerade mark to use for SNAT rules. - if masqueradeBit < 0 || masqueradeBit > 31 { - return nil, fmt.Errorf("invalid iptables-masquerade-bit %v not in [0, 31]", masqueradeBit) - } masqueradeValue := 1 << uint(masqueradeBit) masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue) @@ -925,8 +922,7 @@ func serviceToServiceMap(service *api.Service) proxyServiceMap { return serviceMap } -// This is where all of the hns -save/restore calls happen. -// The only other hns rules are those that are setup in iptablesInit() +// This is where all of the hns save/restore calls happen. // assumes proxier.mu is held func (proxier *Proxier) syncProxyRules() { proxier.mu.Lock() @@ -970,53 +966,52 @@ func (proxier *Proxier) syncProxyRules() { var hnsEndpoints []hcsshim.HNSEndpoint glog.V(4).Infof("====Applying Policy for %s====", svcName) // Create Remote endpoints for every endpoint, corresponding to the service - if len(proxier.endpointsMap[svcName]) > 0 { - for _, ep := range proxier.endpointsMap[svcName] { - var newHnsEndpoint *hcsshim.HNSEndpoint - hnsNetworkName := proxier.network.name - var err error - if len(ep.hnsID) > 0 { - newHnsEndpoint, err = hcsshim.GetHNSEndpointByID(ep.hnsID) - } - if newHnsEndpoint == nil { - // First check if an endpoint resource exists for this IP, on the current host - // A Local endpoint could exist here already - // A remote endpoint was already created and proxy was restarted - newHnsEndpoint, err = getHnsEndpointByIpAddress(net.ParseIP(ep.ip), hnsNetworkName) + for _, ep := range proxier.endpointsMap[svcName] { + var newHnsEndpoint *hcsshim.HNSEndpoint + hnsNetworkName := proxier.network.name + var err error + if len(ep.hnsID) > 0 { + newHnsEndpoint, err = hcsshim.GetHNSEndpointByID(ep.hnsID) + } + + if newHnsEndpoint == nil { + // First check if an endpoint resource exists for this IP, on the current host + // A Local endpoint could exist here already + // A remote endpoint was already created and proxy was restarted + newHnsEndpoint, err = getHnsEndpointByIpAddress(net.ParseIP(ep.ip), hnsNetworkName) + } + + if newHnsEndpoint == nil { + if ep.isLocal { + glog.Errorf("Local endpoint not found for %v: err: %v on network %s", ep.ip, err, hnsNetworkName) + continue + } + // hns Endpoint resource was not found, create one + hnsnetwork, err := hcsshim.GetHNSNetworkByName(hnsNetworkName) + if err != nil { + glog.Errorf("%v", err) + continue } - if newHnsEndpoint == nil { - if ep.isLocal { - glog.Errorf("Local endpoint not found for %v: err : %v on network %s", ep.ip, err, hnsNetworkName) - continue - } - // hns Endpoint resource was not found, create one - hnsnetwork, err := hcsshim.GetHNSNetworkByName(hnsNetworkName) - if err != nil { - glog.Errorf("%v", err) - continue - } - - hnsEndpoint := &hcsshim.HNSEndpoint{ - MacAddress: ep.macAddress, - IPAddress: net.ParseIP(ep.ip), - } - - newHnsEndpoint, err = hnsnetwork.CreateRemoteEndpoint(hnsEndpoint) - if err != nil { - glog.Errorf("Remote endpoint creation failed: %v", err) - continue - } + hnsEndpoint := &hcsshim.HNSEndpoint{ + MacAddress: ep.macAddress, + IPAddress: net.ParseIP(ep.ip), } - // Save the hnsId for reference - LogJson(newHnsEndpoint, "Hns Endpoint resource", 1) - hnsEndpoints = append(hnsEndpoints, *newHnsEndpoint) - ep.hnsID = newHnsEndpoint.Id - ep.refCount++ - Log(ep, "Endpoint resource found", 3) + newHnsEndpoint, err = hnsnetwork.CreateRemoteEndpoint(hnsEndpoint) + if err != nil { + glog.Errorf("Remote endpoint creation failed: %v", err) + continue + } } + + // Save the hnsId for reference + LogJson(newHnsEndpoint, "Hns Endpoint resource", 1) + hnsEndpoints = append(hnsEndpoints, *newHnsEndpoint) + ep.hnsID = newHnsEndpoint.Id + ep.refCount++ + Log(ep, "Endpoint resource found", 3) } glog.V(3).Infof("Associated endpoints [%s] for service [%s]", spew.Sdump(hnsEndpoints), svcName) @@ -1039,8 +1034,8 @@ func (proxier *Proxier) syncProxyRules() { false, svcInfo.clusterIP.String(), Enum(svcInfo.protocol), - uint16(svcInfo.port), uint16(svcInfo.targetPort), + uint16(svcInfo.port), ) if err != nil { glog.Errorf("Policy creation failed: %v", err) @@ -1050,7 +1045,7 @@ func (proxier *Proxier) syncProxyRules() { svcInfo.hnsID = hnsLoadBalancer.ID glog.V(3).Infof("Hns LoadBalancer resource created for cluster ip resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID) - // If nodePort is speficied, user should be able to use nodeIP:nodePort to reach the backend endpoints + // If nodePort is specified, user should be able to use nodeIP:nodePort to reach the backend endpoints if svcInfo.nodePort > 0 { hnsLoadBalancer, err := getHnsLoadBalancer( hnsEndpoints, @@ -1077,8 +1072,8 @@ func (proxier *Proxier) syncProxyRules() { false, externalIp.ip, Enum(svcInfo.protocol), - uint16(svcInfo.port), uint16(svcInfo.targetPort), + uint16(svcInfo.port), ) if err != nil { glog.Errorf("Policy creation failed: %v", err) @@ -1095,8 +1090,8 @@ func (proxier *Proxier) syncProxyRules() { false, lbIngressIp.ip, Enum(svcInfo.protocol), - uint16(svcInfo.port), uint16(svcInfo.targetPort), + uint16(svcInfo.port), ) if err != nil { glog.Errorf("Policy creation failed: %v", err) @@ -1126,7 +1121,7 @@ func (proxier *Proxier) syncProxyRules() { // Finish housekeeping. // TODO: these could be made more consistent. - for _, svcIP := range staleServices.List() { + for _, svcIP := range staleServices.UnsortedList() { // TODO : Check if this is required to cleanup stale services here glog.V(5).Infof("Pending delete stale service IP %s connections", svcIP) } diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD b/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD index f288701bb4b5..cd583ef26959 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/BUILD @@ -16,9 +16,10 @@ go_library( "types.go", "udp_server.go", ], + importpath = "k8s.io/kubernetes/pkg/proxy/winuserspace", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//pkg/proxy:go_default_library", "//pkg/util/ipconfig:go_default_library", "//pkg/util/netsh:go_default_library", @@ -38,9 +39,10 @@ go_test( "proxier_test.go", "roundrobin_test.go", ], + importpath = "k8s.io/kubernetes/pkg/proxy/winuserspace", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/proxy:go_default_library", "//pkg/util/netsh/testing:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/loadbalancer.go b/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/loadbalancer.go index b4a5bc2c253e..eb0dc2dbf67e 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/loadbalancer.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/loadbalancer.go @@ -17,10 +17,9 @@ limitations under the License. package winuserspace import ( - "net" - - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/proxy" + "net" ) // LoadBalancer is an interface for distributing incoming requests to service endpoints. diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go b/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go index 4e6382918c65..5b318164220d 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxier.go @@ -29,8 +29,8 @@ import ( "k8s.io/apimachinery/pkg/types" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/util/netsh" ) diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go b/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go index ced3a8b0cc7f..ed87f1975377 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/proxysocket.go @@ -30,7 +30,7 @@ import ( "github.com/miekg/dns" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/util/ipconfig" "k8s.io/utils/exec" diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin.go b/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin.go index 86d7614c8503..27b835559947 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/winuserspace/roundrobin.go @@ -27,7 +27,7 @@ import ( "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/util/slice" ) diff --git a/vendor/k8s.io/kubernetes/pkg/quota/BUILD b/vendor/k8s.io/kubernetes/pkg/quota/BUILD index a6a27b32818d..7d5cd09601d9 100644 --- a/vendor/k8s.io/kubernetes/pkg/quota/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/quota/BUILD @@ -12,23 +12,26 @@ go_library( "interfaces.go", "resources.go", ], + importpath = "k8s.io/kubernetes/pkg/quota", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], ) go_test( name = "go_default_test", srcs = ["resources_test.go"], + importpath = "k8s.io/kubernetes/pkg/quota", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/BUILD b/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/BUILD index 14e63cf3d9d8..85d50d0855ae 100644 --- a/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/BUILD @@ -9,29 +9,25 @@ load( go_library( name = "go_default_library", srcs = [ - "configmap.go", "doc.go", "persistent_volume_claims.go", "pods.go", "registry.go", - "replication_controllers.go", - "resource_quotas.go", - "secrets.go", "services.go", ], + importpath = "k8s.io/kubernetes/pkg/quota/evaluator/core", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", - "//pkg/api/helper/qos:go_default_library", - "//pkg/api/v1:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", + "//pkg/apis/core/helper/qos:go_default_library", + "//pkg/apis/core/v1:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//pkg/features:go_default_library", "//pkg/kubeapiserver/admission/util:go_default_library", "//pkg/quota:go_default_library", "//pkg/quota/generic:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", @@ -42,8 +38,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", "//vendor/k8s.io/apiserver/pkg/features:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/k8s.io/client-go/informers:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", ], ) @@ -54,15 +48,17 @@ go_test( "pods_test.go", "services_test.go", ], + importpath = "k8s.io/kubernetes/pkg/quota/evaluator/core", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/quota:go_default_library", + "//pkg/quota/generic:go_default_library", "//pkg/util/node:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/configmap.go b/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/configmap.go deleted file mode 100644 index bde20fc72478..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/configmap.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package core - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" -) - -// listConfigMapsByNamespaceFuncUsingClient returns a configMap listing function based on the provided client. -func listConfigMapsByNamespaceFuncUsingClient(kubeClient clientset.Interface) generic.ListFuncByNamespace { - // TODO: ideally, we could pass dynamic client pool down into this code, and have one way of doing this. - // unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require - // structured objects. - return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { - itemList, err := kubeClient.Core().ConfigMaps(namespace).List(options) - if err != nil { - return nil, err - } - results := make([]runtime.Object, 0, len(itemList.Items)) - for i := range itemList.Items { - results = append(results, &itemList.Items[i]) - } - return results, nil - } -} - -// NewConfigMapEvaluator returns an evaluator that can evaluate configMaps -// if the specified shared informer factory is not nil, evaluator may use it to support listing functions. -func NewConfigMapEvaluator(kubeClient clientset.Interface, f informers.SharedInformerFactory) quota.Evaluator { - listFuncByNamespace := listConfigMapsByNamespaceFuncUsingClient(kubeClient) - if f != nil { - listFuncByNamespace = generic.ListResourceUsingInformerFunc(f, v1.SchemeGroupVersion.WithResource("configmaps")) - } - return &generic.ObjectCountEvaluator{ - AllowCreateOnUpdate: false, - InternalGroupKind: api.Kind("ConfigMap"), - ResourceName: api.ResourceConfigMaps, - ListFuncByNamespace: listFuncByNamespace, - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/persistent_volume_claims.go b/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/persistent_volume_claims.go index 9759c492e68d..9032dcaf7b1d 100644 --- a/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/persistent_volume_claims.go +++ b/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/persistent_volume_claims.go @@ -22,26 +22,25 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/initialization" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" - k8s_api_v1 "k8s.io/kubernetes/pkg/api/v1" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" + k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" k8sfeatures "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubeapiserver/admission/util" "k8s.io/kubernetes/pkg/quota" "k8s.io/kubernetes/pkg/quota/generic" ) +// the name used for object count quota +var pvcObjectCountName = generic.ObjectCountQuotaResourceNameFor(v1.SchemeGroupVersion.WithResource("persistentvolumeclaims").GroupResource()) + // pvcResources are the set of static resources managed by quota associated with pvcs. // for each resouce in this list, it may be refined dynamically based on storage class. var pvcResources = []api.ResourceName{ @@ -67,34 +66,11 @@ func V1ResourceByStorageClass(storageClass string, resourceName v1.ResourceName) return v1.ResourceName(string(storageClass + storageClassSuffix + string(resourceName))) } -// listPersistentVolumeClaimsByNamespaceFuncUsingClient returns a pvc listing function based on the provided client. -func listPersistentVolumeClaimsByNamespaceFuncUsingClient(kubeClient clientset.Interface) generic.ListFuncByNamespace { - // TODO: ideally, we could pass dynamic client pool down into this code, and have one way of doing this. - // unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require - // structured objects. - return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { - itemList, err := kubeClient.Core().PersistentVolumeClaims(namespace).List(options) - if err != nil { - return nil, err - } - results := make([]runtime.Object, 0, len(itemList.Items)) - for i := range itemList.Items { - results = append(results, &itemList.Items[i]) - } - return results, nil - } -} - // NewPersistentVolumeClaimEvaluator returns an evaluator that can evaluate persistent volume claims -// if the specified shared informer factory is not nil, evaluator may use it to support listing functions. -func NewPersistentVolumeClaimEvaluator(kubeClient clientset.Interface, f informers.SharedInformerFactory) quota.Evaluator { - listFuncByNamespace := listPersistentVolumeClaimsByNamespaceFuncUsingClient(kubeClient) - if f != nil { - listFuncByNamespace = generic.ListResourceUsingInformerFunc(f, v1.SchemeGroupVersion.WithResource("persistentvolumeclaims")) - } - return &pvcEvaluator{ - listFuncByNamespace: listFuncByNamespace, - } +func NewPersistentVolumeClaimEvaluator(f quota.ListerForResourceFunc) quota.Evaluator { + listFuncByNamespace := generic.ListResourceUsingListerFunc(f, v1.SchemeGroupVersion.WithResource("persistentvolumeclaims")) + pvcEvaluator := &pvcEvaluator{listFuncByNamespace: listFuncByNamespace} + return pvcEvaluator } // pvcEvaluator knows how to evaluate quota usage for persistent volume claims @@ -105,45 +81,13 @@ type pvcEvaluator struct { // Constraints verifies that all required resources are present on the item. func (p *pvcEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error { - pvc, ok := item.(*api.PersistentVolumeClaim) - if !ok { - return fmt.Errorf("unexpected input object %v", item) - } - - // these are the items that we will be handling based on the objects actual storage-class - pvcRequiredSet := append([]api.ResourceName{}, pvcResources...) - if storageClassRef := helper.GetPersistentVolumeClaimClass(pvc); len(storageClassRef) > 0 { - pvcRequiredSet = append(pvcRequiredSet, ResourceByStorageClass(storageClassRef, api.ResourcePersistentVolumeClaims)) - pvcRequiredSet = append(pvcRequiredSet, ResourceByStorageClass(storageClassRef, api.ResourceRequestsStorage)) - } - - // in effect, this will remove things from the required set that are not tied to this pvcs storage class - // for example, if a quota has bronze and gold storage class items defined, we should not error a bronze pvc for not being gold. - // but we should error a bronze pvc if it doesn't make a storage request size... - requiredResources := quota.Intersection(required, pvcRequiredSet) - requiredSet := quota.ToSet(requiredResources) - - // usage for this pvc will only include global pvc items + this storage class specific items - pvcUsage, err := p.Usage(item) - if err != nil { - return err - } - - // determine what required resources were not tracked by usage. - missingSet := sets.NewString() - pvcSet := quota.ToSet(quota.ResourceNames(pvcUsage)) - if diff := requiredSet.Difference(pvcSet); len(diff) > 0 { - missingSet.Insert(diff.List()...) - } - if len(missingSet) == 0 { - return nil - } - return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ",")) + // no-op for persistent volume claims + return nil } -// GroupKind that this evaluator tracks -func (p *pvcEvaluator) GroupKind() schema.GroupKind { - return api.Kind("PersistentVolumeClaim") +// GroupResource that this evaluator tracks +func (p *pvcEvaluator) GroupResource() schema.GroupResource { + return v1.SchemeGroupVersion.WithResource("persistentvolumeclaims").GroupResource() } // Handles returns true if the evaluator should handle the specified operation. @@ -183,6 +127,12 @@ func (p *pvcEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Ob func (p *pvcEvaluator) MatchingResources(items []api.ResourceName) []api.ResourceName { result := []api.ResourceName{} for _, item := range items { + // match object count quota fields + if quota.Contains([]api.ResourceName{pvcObjectCountName}, item) { + result = append(result, item) + continue + } + // match pvc resources if quota.Contains(pvcResources, item) { result = append(result, item) continue @@ -208,7 +158,8 @@ func (p *pvcEvaluator) Usage(item runtime.Object) (api.ResourceList, error) { } // charge for claim - result[api.ResourcePersistentVolumeClaims] = resource.MustParse("1") + result[api.ResourcePersistentVolumeClaims] = *(resource.NewQuantity(1, resource.DecimalSI)) + result[pvcObjectCountName] = *(resource.NewQuantity(1, resource.DecimalSI)) if utilfeature.DefaultFeatureGate.Enabled(features.Initializers) { if !initialization.IsInitialized(pvc.Initializers) { // Only charge pvc count for uninitialized pvc. @@ -218,7 +169,7 @@ func (p *pvcEvaluator) Usage(item runtime.Object) (api.ResourceList, error) { storageClassRef := helper.GetPersistentVolumeClaimClass(pvc) if len(storageClassRef) > 0 { storageClassClaim := api.ResourceName(storageClassRef + storageClassSuffix + string(api.ResourcePersistentVolumeClaims)) - result[storageClassClaim] = resource.MustParse("1") + result[storageClassClaim] = *(resource.NewQuantity(1, resource.DecimalSI)) } // charge for storage @@ -245,7 +196,7 @@ func toInternalPersistentVolumeClaimOrError(obj runtime.Object) (*api.Persistent pvc := &api.PersistentVolumeClaim{} switch t := obj.(type) { case *v1.PersistentVolumeClaim: - if err := k8s_api_v1.Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(t, pvc, nil); err != nil { + if err := k8s_api_v1.Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(t, pvc, nil); err != nil { return nil, err } case *api.PersistentVolumeClaim: diff --git a/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/pods.go b/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/pods.go index 7354c3d8bf23..ba935eab5afc 100644 --- a/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/pods.go +++ b/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/pods.go @@ -23,31 +23,29 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/clock" - "k8s.io/apimachinery/pkg/util/initialization" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apiserver/pkg/admission" - "k8s.io/apiserver/pkg/features" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper/qos" - k8s_api_v1 "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/api/validation" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper/qos" + k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" + "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/kubeapiserver/admission/util" "k8s.io/kubernetes/pkg/quota" "k8s.io/kubernetes/pkg/quota/generic" ) +// the name used for object count quota +var podObjectCountName = generic.ObjectCountQuotaResourceNameFor(v1.SchemeGroupVersion.WithResource("pods").GroupResource()) + // podResources are the set of resources managed by quota associated with pods. var podResources = []api.ResourceName{ + podObjectCountName, api.ResourceCPU, api.ResourceMemory, api.ResourceEphemeralStorage, @@ -60,35 +58,48 @@ var podResources = []api.ResourceName{ api.ResourcePods, } -// listPodsByNamespaceFuncUsingClient returns a pod listing function based on the provided client. -func listPodsByNamespaceFuncUsingClient(kubeClient clientset.Interface) generic.ListFuncByNamespace { - // TODO: ideally, we could pass dynamic client pool down into this code, and have one way of doing this. - // unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require - // structured objects. - return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { - itemList, err := kubeClient.Core().Pods(namespace).List(options) - if err != nil { - return nil, err - } - results := make([]runtime.Object, 0, len(itemList.Items)) - for i := range itemList.Items { - results = append(results, &itemList.Items[i]) - } - return results, nil - } +// podResourcePrefixes are the set of prefixes for resources (Hugepages, and other +// potential extended reources with specific prefix) managed by quota associated with pods. +var podResourcePrefixes = []string{ + api.ResourceHugePagesPrefix, + api.ResourceRequestsHugePagesPrefix, +} + +// requestedResourcePrefixes are the set of prefixes for resources +// that might be declared in pod's Resources.Requests/Limits +var requestedResourcePrefixes = []string{ + api.ResourceHugePagesPrefix, } +const ( + requestsPrefix = "requests." + limitsPrefix = "limits." +) + +// maskResourceWithPrefix mask resource with certain prefix +// e.g. hugepages-XXX -> requests.hugepages-XXX +func maskResourceWithPrefix(resource api.ResourceName, prefix string) api.ResourceName { + return api.ResourceName(fmt.Sprintf("%s%s", prefix, string(resource))) +} + +// NOTE: it was a mistake, but if a quota tracks cpu or memory related resources, +// the incoming pod is required to have those values set. we should not repeat +// this mistake for other future resources (gpus, ephemeral-storage,etc). +// do not add more resources to this list! +var validationSet = sets.NewString( + string(api.ResourceCPU), + string(api.ResourceMemory), + string(api.ResourceRequestsCPU), + string(api.ResourceRequestsMemory), + string(api.ResourceLimitsCPU), + string(api.ResourceLimitsMemory), +) + // NewPodEvaluator returns an evaluator that can evaluate pods -// if the specified shared informer factory is not nil, evaluator may use it to support listing functions. -func NewPodEvaluator(kubeClient clientset.Interface, f informers.SharedInformerFactory, clock clock.Clock) quota.Evaluator { - listFuncByNamespace := listPodsByNamespaceFuncUsingClient(kubeClient) - if f != nil { - listFuncByNamespace = generic.ListResourceUsingInformerFunc(f, v1.SchemeGroupVersion.WithResource("pods")) - } - return &podEvaluator{ - listFuncByNamespace: listFuncByNamespace, - clock: clock, - } +func NewPodEvaluator(f quota.ListerForResourceFunc, clock clock.Clock) quota.Evaluator { + listFuncByNamespace := generic.ListResourceUsingListerFunc(f, v1.SchemeGroupVersion.WithResource("pods")) + podEvaluator := &podEvaluator{listFuncByNamespace: listFuncByNamespace, clock: clock} + return podEvaluator } // podEvaluator knows how to measure usage of pods. @@ -110,6 +121,7 @@ func (p *podEvaluator) Constraints(required []api.ResourceName, item runtime.Obj // Pod level resources are often set during admission control // As a consequence, we want to verify that resources are valid prior // to ever charging quota prematurely in case they are not. + // TODO remove this entire section when we have a validation step in admission. allErrs := field.ErrorList{} fldPath := field.NewPath("spec").Child("containers") for i, ctr := range pod.Spec.Containers { @@ -123,10 +135,11 @@ func (p *podEvaluator) Constraints(required []api.ResourceName, item runtime.Obj return allErrs.ToAggregate() } - // TODO: fix this when we have pod level resource requirements - // since we do not yet pod level requests/limits, we need to ensure each - // container makes an explict request or limit for a quota tracked resource - requiredSet := quota.ToSet(required) + // BACKWARD COMPATIBILITY REQUIREMENT: if we quota cpu or memory, then each container + // must make an explicit request for the resource. this was a mistake. it coupled + // validation with resource counting, but we did this before QoS was even defined. + // let's not make that mistake again with other resources now that QoS is defined. + requiredSet := quota.ToSet(required).Intersection(validationSet) missingSet := sets.NewString() for i := range pod.Spec.Containers { enforcePodContainerConstraints(&pod.Spec.Containers[i], requiredSet, missingSet) @@ -140,9 +153,9 @@ func (p *podEvaluator) Constraints(required []api.ResourceName, item runtime.Obj return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ",")) } -// GroupKind that this evaluator tracks -func (p *podEvaluator) GroupKind() schema.GroupKind { - return api.Kind("Pod") +// GroupResource that this evaluator tracks +func (p *podEvaluator) GroupResource() schema.GroupResource { + return v1.SchemeGroupVersion.WithResource("pods").GroupResource() } // Handles returns true if the evaluator should handle the specified attributes. @@ -168,7 +181,14 @@ func (p *podEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Ob // MatchingResources takes the input specified list of resources and returns the set of resources it matches. func (p *podEvaluator) MatchingResources(input []api.ResourceName) []api.ResourceName { - return quota.Intersection(input, podResources) + result := quota.Intersection(input, podResources) + for _, resource := range input { + if quota.ContainsPrefix(podResourcePrefixes, resource) { + result = append(result, resource) + } + } + + return result } // Usage knows how to measure usage associated with pods @@ -190,7 +210,7 @@ var _ quota.Evaluator = &podEvaluator{} func enforcePodContainerConstraints(container *api.Container, requiredSet, missingSet sets.String) { requests := container.Resources.Requests limits := container.Resources.Limits - containerUsage := podUsageHelper(requests, limits) + containerUsage := podComputeUsageHelper(requests, limits) containerSet := quota.ToSet(quota.ResourceNames(containerUsage)) if !containerSet.Equal(requiredSet) { difference := requiredSet.Difference(containerSet) @@ -198,8 +218,8 @@ func enforcePodContainerConstraints(container *api.Container, requiredSet, missi } } -// podUsageHelper can summarize the pod quota usage based on requests and limits -func podUsageHelper(requests api.ResourceList, limits api.ResourceList) api.ResourceList { +// podComputeUsageHelper can summarize the pod compute quota usage based on requests and limits +func podComputeUsageHelper(requests api.ResourceList, limits api.ResourceList) api.ResourceList { result := api.ResourceList{} result[api.ResourcePods] = resource.MustParse("1") if request, found := requests[api.ResourceCPU]; found { @@ -223,6 +243,18 @@ func podUsageHelper(requests api.ResourceList, limits api.ResourceList) api.Reso if limit, found := limits[api.ResourceEphemeralStorage]; found { result[api.ResourceLimitsEphemeralStorage] = limit } + for resource, request := range requests { + if quota.ContainsPrefix(requestedResourcePrefixes, resource) { + result[resource] = request + result[maskResourceWithPrefix(resource, requestsPrefix)] = request + } + } + for resource, limit := range limits { + if quota.ContainsPrefix(requestedResourcePrefixes, resource) { + result[maskResourceWithPrefix(resource, limitsPrefix)] = limit + } + } + return result } @@ -230,7 +262,7 @@ func toInternalPodOrError(obj runtime.Object) (*api.Pod, error) { pod := &api.Pod{} switch t := obj.(type) { case *v1.Pod: - if err := k8s_api_v1.Convert_v1_Pod_To_api_Pod(t, pod, nil); err != nil { + if err := k8s_api_v1.Convert_v1_Pod_To_core_Pod(t, pod, nil); err != nil { return nil, err } case *api.Pod: @@ -269,18 +301,21 @@ func PodUsageFunc(obj runtime.Object, clock clock.Clock) (api.ResourceList, erro if err != nil { return api.ResourceList{}, err } - // by convention, we do not quota pods that have reached end-of life + + // always quota the object count (even if the pod is end of life) + // object count quotas track all objects that are in storage. + // where "pods" tracks all pods that have not reached a terminal state, + // count/pods tracks all pods independent of state. + result := api.ResourceList{ + podObjectCountName: *(resource.NewQuantity(1, resource.DecimalSI)), + } + + // by convention, we do not quota compute resources that have reached end-of life + // note: the "pods" resource is considered a compute resource since it is tied to life-cycle. if !QuotaPod(pod, clock) { - return api.ResourceList{}, nil - } - // Only charge pod count for uninitialized pod. - if utilfeature.DefaultFeatureGate.Enabled(features.Initializers) { - if !initialization.IsInitialized(pod.Initializers) { - result := api.ResourceList{} - result[api.ResourcePods] = resource.MustParse("1") - return result, nil - } + return result, nil } + requests := api.ResourceList{} limits := api.ResourceList{} // TODO: ideally, we have pod level requests and limits in the future. @@ -296,7 +331,8 @@ func PodUsageFunc(obj runtime.Object, clock clock.Clock) (api.ResourceList, erro limits = quota.Max(limits, pod.Spec.InitContainers[i].Resources.Limits) } - return podUsageHelper(requests, limits), nil + result = quota.Add(result, podComputeUsageHelper(requests, limits)) + return result, nil } func isBestEffort(pod *api.Pod) bool { diff --git a/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/registry.go b/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/registry.go index e6278c731c6c..5a642b386a9a 100644 --- a/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/registry.go @@ -17,33 +17,34 @@ limitations under the License. package core import ( + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/clock" - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/quota" "k8s.io/kubernetes/pkg/quota/generic" ) -// NewRegistry returns a registry that knows how to deal with core kubernetes resources -// If an informer factory is provided, evaluators will use them. -func NewRegistry(kubeClient clientset.Interface, f informers.SharedInformerFactory) quota.Registry { - pod := NewPodEvaluator(kubeClient, f, clock.RealClock{}) - service := NewServiceEvaluator(kubeClient, f) - replicationController := NewReplicationControllerEvaluator(kubeClient, f) - resourceQuota := NewResourceQuotaEvaluator(kubeClient, f) - secret := NewSecretEvaluator(kubeClient, f) - configMap := NewConfigMapEvaluator(kubeClient, f) - persistentVolumeClaim := NewPersistentVolumeClaimEvaluator(kubeClient, f) - return &generic.GenericRegistry{ - InternalEvaluators: map[schema.GroupKind]quota.Evaluator{ - pod.GroupKind(): pod, - service.GroupKind(): service, - replicationController.GroupKind(): replicationController, - secret.GroupKind(): secret, - configMap.GroupKind(): configMap, - resourceQuota.GroupKind(): resourceQuota, - persistentVolumeClaim.GroupKind(): persistentVolumeClaim, - }, +// legacyObjectCountAliases are what we used to do simple object counting quota with mapped to alias +var legacyObjectCountAliases = map[schema.GroupVersionResource]api.ResourceName{ + v1.SchemeGroupVersion.WithResource("configmaps"): api.ResourceConfigMaps, + v1.SchemeGroupVersion.WithResource("resourcequotas"): api.ResourceQuotas, + v1.SchemeGroupVersion.WithResource("replicationcontrollers"): api.ResourceReplicationControllers, + v1.SchemeGroupVersion.WithResource("secrets"): api.ResourceSecrets, +} + +// NewEvaluators returns the list of static evaluators that manage more than counts +func NewEvaluators(f quota.ListerForResourceFunc) []quota.Evaluator { + // these evaluators have special logic + result := []quota.Evaluator{ + NewPodEvaluator(f, clock.RealClock{}), + NewServiceEvaluator(f), + NewPersistentVolumeClaimEvaluator(f), + } + // these evaluators require an alias for backwards compatibility + for gvr, alias := range legacyObjectCountAliases { + result = append(result, + generic.NewObjectCountEvaluator(false, gvr.GroupResource(), generic.ListResourceUsingListerFunc(f, gvr), alias)) } + return result } diff --git a/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/replication_controllers.go b/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/replication_controllers.go deleted file mode 100644 index 062614608347..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/replication_controllers.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package core - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" -) - -// listReplicationControllersByNamespaceFuncUsingClient returns a replicationController listing function based on the provided client. -func listReplicationControllersByNamespaceFuncUsingClient(kubeClient clientset.Interface) generic.ListFuncByNamespace { - // TODO: ideally, we could pass dynamic client pool down into this code, and have one way of doing this. - // unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require - // structured objects. - return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { - itemList, err := kubeClient.Core().ReplicationControllers(namespace).List(options) - if err != nil { - return nil, err - } - results := make([]runtime.Object, 0, len(itemList.Items)) - for i := range itemList.Items { - results = append(results, &itemList.Items[i]) - } - return results, nil - } -} - -// NewReplicationControllerEvaluator returns an evaluator that can evaluate replicationControllers -// if the specified shared informer factory is not nil, evaluator may use it to support listing functions. -func NewReplicationControllerEvaluator(kubeClient clientset.Interface, f informers.SharedInformerFactory) quota.Evaluator { - listFuncByNamespace := listReplicationControllersByNamespaceFuncUsingClient(kubeClient) - if f != nil { - listFuncByNamespace = generic.ListResourceUsingInformerFunc(f, v1.SchemeGroupVersion.WithResource("replicationcontrollers")) - } - return &generic.ObjectCountEvaluator{ - AllowCreateOnUpdate: false, - InternalGroupKind: api.Kind("ReplicationController"), - ResourceName: api.ResourceReplicationControllers, - ListFuncByNamespace: listFuncByNamespace, - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/resource_quotas.go b/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/resource_quotas.go deleted file mode 100644 index 50033e068c2e..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/resource_quotas.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package core - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" -) - -// listResourceQuotasByNamespaceFuncUsingClient returns a resourceQuota listing function based on the provided client. -func listResourceQuotasByNamespaceFuncUsingClient(kubeClient clientset.Interface) generic.ListFuncByNamespace { - // TODO: ideally, we could pass dynamic client pool down into this code, and have one way of doing this. - // unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require - // structured objects. - return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { - itemList, err := kubeClient.Core().ResourceQuotas(namespace).List(options) - if err != nil { - return nil, err - } - results := make([]runtime.Object, 0, len(itemList.Items)) - for i := range itemList.Items { - results = append(results, &itemList.Items[i]) - } - return results, nil - } -} - -// NewResourceQuotaEvaluator returns an evaluator that can evaluate resourceQuotas -// if the specified shared informer factory is not nil, evaluator may use it to support listing functions. -func NewResourceQuotaEvaluator(kubeClient clientset.Interface, f informers.SharedInformerFactory) quota.Evaluator { - listFuncByNamespace := listResourceQuotasByNamespaceFuncUsingClient(kubeClient) - if f != nil { - listFuncByNamespace = generic.ListResourceUsingInformerFunc(f, v1.SchemeGroupVersion.WithResource("resourcequotas")) - } - return &generic.ObjectCountEvaluator{ - AllowCreateOnUpdate: false, - InternalGroupKind: api.Kind("ResourceQuota"), - ResourceName: api.ResourceQuotas, - ListFuncByNamespace: listFuncByNamespace, - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/secrets.go b/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/secrets.go deleted file mode 100644 index bfd678c95aa7..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/secrets.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package core - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" -) - -// listSecretsByNamespaceFuncUsingClient returns a secret listing function based on the provided client. -func listSecretsByNamespaceFuncUsingClient(kubeClient clientset.Interface) generic.ListFuncByNamespace { - // TODO: ideally, we could pass dynamic client pool down into this code, and have one way of doing this. - // unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require - // structured objects. - return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { - itemList, err := kubeClient.Core().Secrets(namespace).List(options) - if err != nil { - return nil, err - } - results := make([]runtime.Object, 0, len(itemList.Items)) - for i := range itemList.Items { - results = append(results, &itemList.Items[i]) - } - return results, nil - } -} - -// NewSecretEvaluator returns an evaluator that can evaluate secrets -// if the specified shared informer factory is not nil, evaluator may use it to support listing functions. -func NewSecretEvaluator(kubeClient clientset.Interface, f informers.SharedInformerFactory) quota.Evaluator { - listFuncByNamespace := listSecretsByNamespaceFuncUsingClient(kubeClient) - if f != nil { - listFuncByNamespace = generic.ListResourceUsingInformerFunc(f, v1.SchemeGroupVersion.WithResource("secrets")) - } - return &generic.ObjectCountEvaluator{ - AllowCreateOnUpdate: false, - InternalGroupKind: api.Kind("Secret"), - ResourceName: api.ResourceSecrets, - ListFuncByNamespace: listFuncByNamespace, - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/services.go b/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/services.go index 91a5ef0a059b..f4dec5973a26 100644 --- a/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/services.go +++ b/vendor/k8s.io/kubernetes/pkg/quota/evaluator/core/services.go @@ -18,58 +18,34 @@ package core import ( "fmt" - "strings" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/admission" - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/api" - k8s_api_v1 "k8s.io/kubernetes/pkg/api/v1" + api "k8s.io/kubernetes/pkg/apis/core" + k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" "k8s.io/kubernetes/pkg/quota" "k8s.io/kubernetes/pkg/quota/generic" ) +// the name used for object count quota +var serviceObjectCountName = generic.ObjectCountQuotaResourceNameFor(v1.SchemeGroupVersion.WithResource("services").GroupResource()) + // serviceResources are the set of resources managed by quota associated with services. var serviceResources = []api.ResourceName{ + serviceObjectCountName, api.ResourceServices, api.ResourceServicesNodePorts, api.ResourceServicesLoadBalancers, } -// listServicesByNamespaceFuncUsingClient returns a service listing function based on the provided client. -func listServicesByNamespaceFuncUsingClient(kubeClient clientset.Interface) generic.ListFuncByNamespace { - // TODO: ideally, we could pass dynamic client pool down into this code, and have one way of doing this. - // unfortunately, dynamic client works with Unstructured objects, and when we calculate Usage, we require - // structured objects. - return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { - itemList, err := kubeClient.Core().Services(namespace).List(options) - if err != nil { - return nil, err - } - results := make([]runtime.Object, 0, len(itemList.Items)) - for i := range itemList.Items { - results = append(results, &itemList.Items[i]) - } - return results, nil - } -} - -// NewServiceEvaluator returns an evaluator that can evaluate services -// if the specified shared informer factory is not nil, evaluator may use it to support listing functions. -func NewServiceEvaluator(kubeClient clientset.Interface, f informers.SharedInformerFactory) quota.Evaluator { - listFuncByNamespace := listServicesByNamespaceFuncUsingClient(kubeClient) - if f != nil { - listFuncByNamespace = generic.ListResourceUsingInformerFunc(f, v1.SchemeGroupVersion.WithResource("services")) - } - return &serviceEvaluator{ - listFuncByNamespace: listFuncByNamespace, - } +// NewServiceEvaluator returns an evaluator that can evaluate services. +func NewServiceEvaluator(f quota.ListerForResourceFunc) quota.Evaluator { + listFuncByNamespace := generic.ListResourceUsingListerFunc(f, v1.SchemeGroupVersion.WithResource("services")) + serviceEvaluator := &serviceEvaluator{listFuncByNamespace: listFuncByNamespace} + return serviceEvaluator } // serviceEvaluator knows how to measure usage for services. @@ -80,31 +56,13 @@ type serviceEvaluator struct { // Constraints verifies that all required resources are present on the item func (p *serviceEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error { - service, ok := item.(*api.Service) - if !ok { - return fmt.Errorf("unexpected input object %v", item) - } - - requiredSet := quota.ToSet(required) - missingSet := sets.NewString() - serviceUsage, err := p.Usage(service) - if err != nil { - return err - } - serviceSet := quota.ToSet(quota.ResourceNames(serviceUsage)) - if diff := requiredSet.Difference(serviceSet); len(diff) > 0 { - missingSet.Insert(diff.List()...) - } - - if len(missingSet) == 0 { - return nil - } - return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ",")) + // this is a no-op for services + return nil } -// GroupKind that this evaluator tracks -func (p *serviceEvaluator) GroupKind() schema.GroupKind { - return api.Kind("Service") +// GroupResource that this evaluator tracks +func (p *serviceEvaluator) GroupResource() schema.GroupResource { + return v1.SchemeGroupVersion.WithResource("services").GroupResource() } // Handles returns true of the evaluator should handle the specified operation. @@ -129,7 +87,7 @@ func toInternalServiceOrError(obj runtime.Object) (*api.Service, error) { svc := &api.Service{} switch t := obj.(type) { case *v1.Service: - if err := k8s_api_v1.Convert_v1_Service_To_api_Service(t, svc, nil); err != nil { + if err := k8s_api_v1.Convert_v1_Service_To_core_Service(t, svc, nil); err != nil { return nil, err } case *api.Service: @@ -149,6 +107,7 @@ func (p *serviceEvaluator) Usage(item runtime.Object) (api.ResourceList, error) } ports := len(svc.Spec.Ports) // default service usage + result[serviceObjectCountName] = *(resource.NewQuantity(1, resource.DecimalSI)) result[api.ResourceServices] = *(resource.NewQuantity(1, resource.DecimalSI)) result[api.ResourceServicesLoadBalancers] = resource.Quantity{Format: resource.DecimalSI} result[api.ResourceServicesNodePorts] = resource.Quantity{Format: resource.DecimalSI} diff --git a/vendor/k8s.io/kubernetes/pkg/quota/generic/BUILD b/vendor/k8s.io/kubernetes/pkg/quota/generic/BUILD index bfb9678a52a3..db871e85fa1c 100644 --- a/vendor/k8s.io/kubernetes/pkg/quota/generic/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/quota/generic/BUILD @@ -8,19 +8,21 @@ load( go_library( name = "go_default_library", srcs = [ + "configuration.go", "evaluator.go", "registry.go", ], + importpath = "k8s.io/kubernetes/pkg/quota/generic", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/quota:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/quota/generic/configuration.go b/vendor/k8s.io/kubernetes/pkg/quota/generic/configuration.go new file mode 100644 index 000000000000..59c009e13d3e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/quota/generic/configuration.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generic + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/pkg/quota" +) + +// implements a basic configuration +type simpleConfiguration struct { + evaluators []quota.Evaluator + ignoredResources map[schema.GroupResource]struct{} +} + +// NewConfiguration creates a quota configuration +func NewConfiguration(evaluators []quota.Evaluator, ignoredResources map[schema.GroupResource]struct{}) quota.Configuration { + return &simpleConfiguration{ + evaluators: evaluators, + ignoredResources: ignoredResources, + } +} + +func (c *simpleConfiguration) IgnoredResources() map[schema.GroupResource]struct{} { + return c.ignoredResources +} + +func (c *simpleConfiguration) Evaluators() []quota.Evaluator { + return c.evaluators +} diff --git a/vendor/k8s.io/kubernetes/pkg/quota/generic/evaluator.go b/vendor/k8s.io/kubernetes/pkg/quota/generic/evaluator.go index 1b574bb6b52e..5bcb1cb20fff 100644 --- a/vendor/k8s.io/kubernetes/pkg/quota/generic/evaluator.go +++ b/vendor/k8s.io/kubernetes/pkg/quota/generic/evaluator.go @@ -20,33 +20,51 @@ import ( "fmt" "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/admission" "k8s.io/client-go/informers" - "k8s.io/kubernetes/pkg/api" + "k8s.io/client-go/tools/cache" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/quota" ) -// ListResourceUsingInformerFunc returns a listing function based on the shared informer factory for the specified resource. -func ListResourceUsingInformerFunc(f informers.SharedInformerFactory, resource schema.GroupVersionResource) ListFuncByNamespace { - return func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) { - labelSelector, err := labels.Parse(options.LabelSelector) +// InformerForResourceFunc knows how to provision an informer +type InformerForResourceFunc func(schema.GroupVersionResource) (informers.GenericInformer, error) + +// ListerFuncForResourceFunc knows how to provision a lister from an informer func +func ListerFuncForResourceFunc(f InformerForResourceFunc) quota.ListerForResourceFunc { + return func(gvr schema.GroupVersionResource) (cache.GenericLister, error) { + informer, err := f(gvr) if err != nil { return nil, err } - informer, err := f.ForResource(resource) + return informer.Lister(), nil + } +} + +// ListResourceUsingListerFunc returns a listing function based on the shared informer factory for the specified resource. +func ListResourceUsingListerFunc(l quota.ListerForResourceFunc, resource schema.GroupVersionResource) ListFuncByNamespace { + return func(namespace string) ([]runtime.Object, error) { + lister, err := l(resource) if err != nil { return nil, err } - return informer.Lister().ByNamespace(namespace).List(labelSelector) + return lister.ByNamespace(namespace).List(labels.Everything()) } } +// ObjectCountQuotaResourceNameFor returns the object count quota name for specified groupResource +func ObjectCountQuotaResourceNameFor(groupResource schema.GroupResource) api.ResourceName { + if len(groupResource.Group) == 0 { + return api.ResourceName("count/" + groupResource.Resource) + } + return api.ResourceName("count/" + groupResource.Resource + "." + groupResource.Group) +} + // ListFuncByNamespace knows how to list resources in a namespace -type ListFuncByNamespace func(namespace string, options metav1.ListOptions) ([]runtime.Object, error) +type ListFuncByNamespace func(namespace string) ([]runtime.Object, error) // MatchesScopeFunc knows how to evaluate if an object matches a scope type MatchesScopeFunc func(scope api.ResourceQuotaScope, object runtime.Object) (bool, error) @@ -91,9 +109,7 @@ func CalculateUsageStats(options quota.UsageStatsOptions, for _, resourceName := range options.Resources { result.Used[resourceName] = resource.Quantity{Format: resource.DecimalSI} } - items, err := listFunc(options.Namespace, metav1.ListOptions{ - LabelSelector: labels.Everything().String(), - }) + items, err := listFunc(options.Namespace) if err != nil { return result, fmt.Errorf("failed to list content: %v", err) } @@ -121,63 +137,86 @@ func CalculateUsageStats(options quota.UsageStatsOptions, return result, nil } -// ObjectCountEvaluator provides an implementation for quota.Evaluator +// objectCountEvaluator provides an implementation for quota.Evaluator // that associates usage of the specified resource based on the number of items // returned by the specified listing function. -type ObjectCountEvaluator struct { - // AllowCreateOnUpdate if true will ensure the evaluator tracks create +type objectCountEvaluator struct { + // allowCreateOnUpdate if true will ensure the evaluator tracks create // and update operations. - AllowCreateOnUpdate bool - // GroupKind that this evaluator tracks. - InternalGroupKind schema.GroupKind + allowCreateOnUpdate bool + // GroupResource that this evaluator tracks. + // It is used to construct a generic object count quota name + groupResource schema.GroupResource // A function that knows how to list resources by namespace. // TODO move to dynamic client in future - ListFuncByNamespace ListFuncByNamespace - // Name associated with this resource in the quota. - ResourceName api.ResourceName + listFuncByNamespace ListFuncByNamespace + // Names associated with this resource in the quota for generic counting. + resourceNames []api.ResourceName } // Constraints returns an error if the configured resource name is not in the required set. -func (o *ObjectCountEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error { - if !quota.Contains(required, o.ResourceName) { - return fmt.Errorf("missing %s", o.ResourceName) - } +func (o *objectCountEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error { + // no-op for object counting return nil } -// GroupKind that this evaluator tracks -func (o *ObjectCountEvaluator) GroupKind() schema.GroupKind { - return o.InternalGroupKind -} - // Handles returns true if the object count evaluator needs to track this attributes. -func (o *ObjectCountEvaluator) Handles(a admission.Attributes) bool { +func (o *objectCountEvaluator) Handles(a admission.Attributes) bool { operation := a.GetOperation() - return operation == admission.Create || (o.AllowCreateOnUpdate && operation == admission.Update) + return operation == admission.Create || (o.allowCreateOnUpdate && operation == admission.Update) } // Matches returns true if the evaluator matches the specified quota with the provided input item -func (o *ObjectCountEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) { +func (o *objectCountEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) { return Matches(resourceQuota, item, o.MatchingResources, MatchesNoScopeFunc) } // MatchingResources takes the input specified list of resources and returns the set of resources it matches. -func (o *ObjectCountEvaluator) MatchingResources(input []api.ResourceName) []api.ResourceName { - return quota.Intersection(input, []api.ResourceName{o.ResourceName}) +func (o *objectCountEvaluator) MatchingResources(input []api.ResourceName) []api.ResourceName { + return quota.Intersection(input, o.resourceNames) } // Usage returns the resource usage for the specified object -func (o *ObjectCountEvaluator) Usage(object runtime.Object) (api.ResourceList, error) { +func (o *objectCountEvaluator) Usage(object runtime.Object) (api.ResourceList, error) { quantity := resource.NewQuantity(1, resource.DecimalSI) - return api.ResourceList{ - o.ResourceName: *quantity, - }, nil + resourceList := api.ResourceList{} + for _, resourceName := range o.resourceNames { + resourceList[resourceName] = *quantity + } + return resourceList, nil +} + +// GroupResource tracked by this evaluator +func (o *objectCountEvaluator) GroupResource() schema.GroupResource { + return o.groupResource } // UsageStats calculates aggregate usage for the object. -func (o *ObjectCountEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) { - return CalculateUsageStats(options, o.ListFuncByNamespace, MatchesNoScopeFunc, o.Usage) +func (o *objectCountEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) { + return CalculateUsageStats(options, o.listFuncByNamespace, MatchesNoScopeFunc, o.Usage) } // Verify implementation of interface at compile time. -var _ quota.Evaluator = &ObjectCountEvaluator{} +var _ quota.Evaluator = &objectCountEvaluator{} + +// NewObjectCountEvaluator returns an evaluator that can perform generic +// object quota counting. It allows an optional alias for backwards compatibilty +// purposes for the legacy object counting names in quota. Unless its supporting +// backward compatibility, alias should not be used. +func NewObjectCountEvaluator( + allowCreateOnUpdate bool, + groupResource schema.GroupResource, listFuncByNamespace ListFuncByNamespace, + alias api.ResourceName) quota.Evaluator { + + resourceNames := []api.ResourceName{ObjectCountQuotaResourceNameFor(groupResource)} + if len(alias) > 0 { + resourceNames = append(resourceNames, alias) + } + + return &objectCountEvaluator{ + allowCreateOnUpdate: allowCreateOnUpdate, + groupResource: groupResource, + listFuncByNamespace: listFuncByNamespace, + resourceNames: resourceNames, + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/quota/generic/registry.go b/vendor/k8s.io/kubernetes/pkg/quota/generic/registry.go index 5b1241dfe229..fdc38e02b1cd 100644 --- a/vendor/k8s.io/kubernetes/pkg/quota/generic/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/quota/generic/registry.go @@ -17,20 +17,65 @@ limitations under the License. package generic import ( + "sync" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/quota" ) -// Ensure it implements the required interface -var _ quota.Registry = &GenericRegistry{} +// implements a basic registry +type simpleRegistry struct { + lock sync.RWMutex + // evaluators tracked by the registry + evaluators map[schema.GroupResource]quota.Evaluator +} + +// NewRegistry creates a simple registry with initial list of evaluators +func NewRegistry(evaluators []quota.Evaluator) quota.Registry { + return &simpleRegistry{ + evaluators: evaluatorsByGroupResource(evaluators), + } +} + +func (r *simpleRegistry) Add(e quota.Evaluator) { + r.lock.Lock() + defer r.lock.Unlock() + r.evaluators[e.GroupResource()] = e +} + +func (r *simpleRegistry) Remove(e quota.Evaluator) { + r.lock.Lock() + defer r.lock.Unlock() + delete(r.evaluators, e.GroupResource()) +} + +func (r *simpleRegistry) Get(gr schema.GroupResource) quota.Evaluator { + r.lock.RLock() + defer r.lock.RUnlock() + return r.evaluators[gr] +} + +func (r *simpleRegistry) List() []quota.Evaluator { + r.lock.RLock() + defer r.lock.RUnlock() + + return evaluatorsList(r.evaluators) +} -// GenericRegistry implements Registry -type GenericRegistry struct { - // internal evaluators by group kind - InternalEvaluators map[schema.GroupKind]quota.Evaluator +// evaluatorsByGroupResource converts a list of evaluators to a map by group resource. +func evaluatorsByGroupResource(items []quota.Evaluator) map[schema.GroupResource]quota.Evaluator { + result := map[schema.GroupResource]quota.Evaluator{} + for _, item := range items { + result[item.GroupResource()] = item + } + return result } -// Evaluators returns the map of evaluators by groupKind -func (r *GenericRegistry) Evaluators() map[schema.GroupKind]quota.Evaluator { - return r.InternalEvaluators +// evaluatorsList converts a map of evaluators to list +func evaluatorsList(input map[schema.GroupResource]quota.Evaluator) []quota.Evaluator { + var result []quota.Evaluator + for _, item := range input { + result = append(result, item) + } + return result } diff --git a/vendor/k8s.io/kubernetes/pkg/quota/install/BUILD b/vendor/k8s.io/kubernetes/pkg/quota/install/BUILD index ffd846ea53e1..027c5835c0dc 100644 --- a/vendor/k8s.io/kubernetes/pkg/quota/install/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/quota/install/BUILD @@ -8,11 +8,12 @@ load( go_library( name = "go_default_library", srcs = ["registry.go"], + importpath = "k8s.io/kubernetes/pkg/quota/install", deps = [ "//pkg/quota:go_default_library", "//pkg/quota/evaluator/core:go_default_library", - "//vendor/k8s.io/client-go/informers:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//pkg/quota/generic:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/quota/install/registry.go b/vendor/k8s.io/kubernetes/pkg/quota/install/registry.go index a10dd964567c..367a6a401643 100644 --- a/vendor/k8s.io/kubernetes/pkg/quota/install/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/quota/install/registry.go @@ -17,15 +17,42 @@ limitations under the License. package install import ( - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/quota" "k8s.io/kubernetes/pkg/quota/evaluator/core" + "k8s.io/kubernetes/pkg/quota/generic" ) -// NewRegistry returns a registry of quota evaluators. -// If a shared informer factory is provided, it is used by evaluators rather than performing direct queries. -func NewRegistry(kubeClient clientset.Interface, f informers.SharedInformerFactory) quota.Registry { - // TODO: when quota supports resources in other api groups, we will need to merge - return core.NewRegistry(kubeClient, f) +// NewQuotaConfigurationForAdmission returns a quota configuration for admission control. +func NewQuotaConfigurationForAdmission() quota.Configuration { + evaluators := core.NewEvaluators(nil) + return generic.NewConfiguration(evaluators, DefaultIgnoredResources()) +} + +// NewQuotaConfigurationForControllers returns a quota configuration for controllers. +func NewQuotaConfigurationForControllers(f quota.ListerForResourceFunc) quota.Configuration { + evaluators := core.NewEvaluators(f) + return generic.NewConfiguration(evaluators, DefaultIgnoredResources()) +} + +// ignoredResources are ignored by quota by default +var ignoredResources = map[schema.GroupResource]struct{}{ + {Group: "extensions", Resource: "replicationcontrollers"}: {}, + {Group: "extensions", Resource: "networkpolicies"}: {}, + {Group: "", Resource: "bindings"}: {}, + {Group: "", Resource: "componentstatuses"}: {}, + {Group: "", Resource: "events"}: {}, + {Group: "authentication.k8s.io", Resource: "tokenreviews"}: {}, + {Group: "authorization.k8s.io", Resource: "subjectaccessreviews"}: {}, + {Group: "authorization.k8s.io", Resource: "selfsubjectaccessreviews"}: {}, + {Group: "authorization.k8s.io", Resource: "localsubjectaccessreviews"}: {}, + {Group: "authorization.k8s.io", Resource: "selfsubjectrulesreviews"}: {}, + {Group: "apiregistration.k8s.io", Resource: "apiservices"}: {}, + {Group: "apiextensions.k8s.io", Resource: "customresourcedefinitions"}: {}, +} + +// DefaultIgnoredResources returns the default set of resources that quota system +// should ignore. This is exposed so downstream integrators can have access to them. +func DefaultIgnoredResources() map[schema.GroupResource]struct{} { + return ignoredResources } diff --git a/vendor/k8s.io/kubernetes/pkg/quota/interfaces.go b/vendor/k8s.io/kubernetes/pkg/quota/interfaces.go index 56eacfe2d47f..8d7295ef0c99 100644 --- a/vendor/k8s.io/kubernetes/pkg/quota/interfaces.go +++ b/vendor/k8s.io/kubernetes/pkg/quota/interfaces.go @@ -20,7 +20,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/admission" - "k8s.io/kubernetes/pkg/api" + "k8s.io/client-go/tools/cache" + api "k8s.io/kubernetes/pkg/apis/core" ) // UsageStatsOptions is an options structs that describes how stats should be calculated @@ -39,12 +40,12 @@ type UsageStats struct { Used api.ResourceList } -// Evaluator knows how to evaluate quota usage for a particular group kind +// Evaluator knows how to evaluate quota usage for a particular group resource type Evaluator interface { // Constraints ensures that each required resource is present on item Constraints(required []api.ResourceName, item runtime.Object) error - // GroupKind returns the groupKind that this object knows how to evaluate - GroupKind() schema.GroupKind + // GroupResource returns the groupResource that this object knows how to evaluate + GroupResource() schema.GroupResource // Handles determines if quota could be impacted by the specified attribute. // If true, admission control must perform quota processing for the operation, otherwise it is safe to ignore quota. Handles(operation admission.Attributes) bool @@ -58,25 +59,25 @@ type Evaluator interface { UsageStats(options UsageStatsOptions) (UsageStats, error) } -// Registry holds the list of evaluators associated to a particular group kind -type Registry interface { - // Evaluators returns the set Evaluator objects registered to a groupKind - Evaluators() map[schema.GroupKind]Evaluator +// Configuration defines how the quota system is configured. +type Configuration interface { + // IgnoredResources are ignored by quota. + IgnoredResources() map[schema.GroupResource]struct{} + // Evaluators for quota evaluation. + Evaluators() []Evaluator } -// UnionRegistry combines multiple registries. Order matters because first registry to claim a GroupKind -// is the "winner" -type UnionRegistry []Registry - -// Evaluators returns a mapping of evaluators by group kind. -func (r UnionRegistry) Evaluators() map[schema.GroupKind]Evaluator { - ret := map[schema.GroupKind]Evaluator{} - - for i := len(r) - 1; i >= 0; i-- { - for k, v := range r[i].Evaluators() { - ret[k] = v - } - } - - return ret +// Registry maintains a list of evaluators +type Registry interface { + // Add to registry + Add(e Evaluator) + // Remove from registry + Remove(e Evaluator) + // Get by group resource + Get(gr schema.GroupResource) Evaluator + // List from registry + List() []Evaluator } + +// ListerForResourceFunc knows how to get a lister for a specific resource +type ListerForResourceFunc func(schema.GroupVersionResource) (cache.GenericLister, error) diff --git a/vendor/k8s.io/kubernetes/pkg/quota/resources.go b/vendor/k8s.io/kubernetes/pkg/quota/resources.go index b9bd39bc83ef..924ee41b9466 100644 --- a/vendor/k8s.io/kubernetes/pkg/quota/resources.go +++ b/vendor/k8s.io/kubernetes/pkg/quota/resources.go @@ -17,10 +17,12 @@ limitations under the License. package quota import ( + "strings" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // Equals returns true if the two lists are equivalent @@ -196,6 +198,16 @@ func Contains(items []api.ResourceName, item api.ResourceName) bool { return ToSet(items).Has(string(item)) } +// ContainsPrefix returns true if the specified item has a prefix that contained in given prefix Set +func ContainsPrefix(prefixSet []string, item api.ResourceName) bool { + for _, prefix := range prefixSet { + if strings.HasPrefix(string(item), prefix) { + return true + } + } + return false +} + // Intersection returns the intersection of both list of resources func Intersection(a []api.ResourceName, b []api.ResourceName) []api.ResourceName { setA := ToSet(a) @@ -247,7 +259,7 @@ func CalculateUsage(namespaceName string, scopes []api.ResourceQuotaScope, hardL // look to measure updated usage stats for hardResources := ResourceNames(hardLimits) potentialResources := []api.ResourceName{} - evaluators := registry.Evaluators() + evaluators := registry.List() for _, evaluator := range evaluators { potentialResources = append(potentialResources, evaluator.MatchingResources(hardResources)...) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/BUILD deleted file mode 100644 index a9da4037d284..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/BUILD +++ /dev/null @@ -1,39 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "strategy.go", - ], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apis/admissionregistration:go_default_library", - "//pkg/apis/admissionregistration/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/doc.go b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/doc.go deleted file mode 100644 index 2db1218c784f..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package externaladmissionhookconfiguration diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage/BUILD deleted file mode 100644 index 7dc51ca45290..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage/BUILD +++ /dev/null @@ -1,32 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["storage.go"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apis/admissionregistration:go_default_library", - "//pkg/registry/admissionregistration/externaladmissionhookconfiguration:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage/storage.go deleted file mode 100644 index 93e0e046e6c8..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage/storage.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apiserver/pkg/registry/generic" - genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/admissionregistration" - "k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration" -) - -// rest implements a RESTStorage for pod disruption budgets against etcd -type REST struct { - *genericregistry.Store -} - -// NewREST returns a RESTStorage object that will work against pod disruption budgets. -func NewREST(optsGetter generic.RESTOptionsGetter) *REST { - store := &genericregistry.Store{ - Copier: api.Scheme, - NewFunc: func() runtime.Object { return &admissionregistration.ExternalAdmissionHookConfiguration{} }, - NewListFunc: func() runtime.Object { return &admissionregistration.ExternalAdmissionHookConfigurationList{} }, - ObjectNameFunc: func(obj runtime.Object) (string, error) { - return obj.(*admissionregistration.ExternalAdmissionHookConfiguration).Name, nil - }, - DefaultQualifiedResource: admissionregistration.Resource("externaladmissionhookconfigurations"), - - CreateStrategy: externaladmissionhookconfiguration.Strategy, - UpdateStrategy: externaladmissionhookconfiguration.Strategy, - DeleteStrategy: externaladmissionhookconfiguration.Strategy, - } - options := &generic.StoreOptions{RESTOptions: optsGetter} - if err := store.CompleteWithOptions(options); err != nil { - panic(err) // TODO: Propagate error up - } - return &REST{store} -} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/strategy.go deleted file mode 100644 index d6d26379542c..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/strategy.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package externaladmissionhookconfiguration - -import ( - "reflect" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/validation/field" - genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/admissionregistration" - "k8s.io/kubernetes/pkg/apis/admissionregistration/validation" -) - -// externaladmissionhookConfigurationStrategy implements verification logic for ExternalAdmissionHookConfiguration. -type externaladmissionhookConfigurationStrategy struct { - runtime.ObjectTyper - names.NameGenerator -} - -// Strategy is the default logic that applies when creating and updating ExternalAdmissionHookConfiguration objects. -var Strategy = externaladmissionhookConfigurationStrategy{api.Scheme, names.SimpleNameGenerator} - -// NamespaceScoped returns true because all ExternalAdmissionHookConfiguration' need to be within a namespace. -func (externaladmissionhookConfigurationStrategy) NamespaceScoped() bool { - return false -} - -// PrepareForCreate clears the status of an ExternalAdmissionHookConfiguration before creation. -func (externaladmissionhookConfigurationStrategy) PrepareForCreate(ctx genericapirequest.Context, obj runtime.Object) { - ic := obj.(*admissionregistration.ExternalAdmissionHookConfiguration) - ic.Generation = 1 -} - -// PrepareForUpdate clears fields that are not allowed to be set by end users on update. -func (externaladmissionhookConfigurationStrategy) PrepareForUpdate(ctx genericapirequest.Context, obj, old runtime.Object) { - newIC := obj.(*admissionregistration.ExternalAdmissionHookConfiguration) - oldIC := old.(*admissionregistration.ExternalAdmissionHookConfiguration) - - // Any changes to the spec increment the generation number, any changes to the - // status should reflect the generation number of the corresponding object. - // See metav1.ObjectMeta description for more information on Generation. - if !reflect.DeepEqual(oldIC.ExternalAdmissionHooks, newIC.ExternalAdmissionHooks) { - newIC.Generation = oldIC.Generation + 1 - } -} - -// Validate validates a new ExternalAdmissionHookConfiguration. -func (externaladmissionhookConfigurationStrategy) Validate(ctx genericapirequest.Context, obj runtime.Object) field.ErrorList { - ic := obj.(*admissionregistration.ExternalAdmissionHookConfiguration) - return validation.ValidateExternalAdmissionHookConfiguration(ic) -} - -// Canonicalize normalizes the object after validation. -func (externaladmissionhookConfigurationStrategy) Canonicalize(obj runtime.Object) { -} - -// AllowCreateOnUpdate is true for ExternalAdmissionHookConfiguration; this means you may create one with a PUT request. -func (externaladmissionhookConfigurationStrategy) AllowCreateOnUpdate() bool { - return false -} - -// ValidateUpdate is the default update validation for an end user. -func (externaladmissionhookConfigurationStrategy) ValidateUpdate(ctx genericapirequest.Context, obj, old runtime.Object) field.ErrorList { - validationErrorList := validation.ValidateExternalAdmissionHookConfiguration(obj.(*admissionregistration.ExternalAdmissionHookConfiguration)) - updateErrorList := validation.ValidateExternalAdmissionHookConfigurationUpdate(obj.(*admissionregistration.ExternalAdmissionHookConfiguration), old.(*admissionregistration.ExternalAdmissionHookConfiguration)) - return append(validationErrorList, updateErrorList...) -} - -// AllowUnconditionalUpdate is the default update policy for ExternalAdmissionHookConfiguration objects. Status update should -// only be allowed if version match. -func (externaladmissionhookConfigurationStrategy) AllowUnconditionalUpdate() bool { - return false -} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/BUILD index d62b4cec5e5c..29c56e75b650 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/BUILD @@ -11,8 +11,9 @@ go_library( "doc.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/admissionregistration:go_default_library", "//pkg/apis/admissionregistration/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage/BUILD index afdd3a6cb419..208db975bae3 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage/BUILD @@ -8,8 +8,8 @@ load( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/admissionregistration:go_default_library", "//pkg/registry/admissionregistration/initializerconfiguration:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage/storage.go index f940605ef928..a8846f36edcf 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage/storage.go @@ -20,7 +20,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/admissionregistration" "k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration" ) @@ -33,7 +32,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against pod disruption budgets. func NewREST(optsGetter generic.RESTOptionsGetter) *REST { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &admissionregistration.InitializerConfiguration{} }, NewListFunc: func() runtime.Object { return &admissionregistration.InitializerConfigurationList{} }, ObjectNameFunc: func(obj runtime.Object) (string, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/strategy.go index 3f0e73925bc9..f32e4ea9dcb7 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/strategy.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/admissionregistration" "k8s.io/kubernetes/pkg/apis/admissionregistration/validation" ) @@ -35,7 +35,7 @@ type initializerConfigurationStrategy struct { } // Strategy is the default logic that applies when creating and updating InitializerConfiguration objects. -var Strategy = initializerConfigurationStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = initializerConfigurationStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} // NamespaceScoped returns true because all InitializerConfiguration' need to be within a namespace. func (initializerConfigurationStrategy) NamespaceScoped() bool { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration/BUILD new file mode 100644 index 000000000000..92ae7b566fc9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration/BUILD @@ -0,0 +1,40 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "strategy.go", + ], + importpath = "k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration", + deps = [ + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/admissionregistration:go_default_library", + "//pkg/apis/admissionregistration/validation:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", + "//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/registry/admissionregistration/mutatingwebhookconfiguration/storage:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration/doc.go b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration/doc.go new file mode 100644 index 000000000000..fb98ee9c7edc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutatingwebhookconfiguration diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration/storage/BUILD new file mode 100644 index 000000000000..7cb1a472f92f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration/storage/BUILD @@ -0,0 +1,32 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration/storage", + deps = [ + "//pkg/apis/admissionregistration:go_default_library", + "//pkg/registry/admissionregistration/mutatingwebhookconfiguration:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration/storage/storage.go new file mode 100644 index 000000000000..7f3be3492788 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration/storage/storage.go @@ -0,0 +1,51 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/registry/generic" + genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" + "k8s.io/kubernetes/pkg/apis/admissionregistration" + "k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration" +) + +// rest implements a RESTStorage for pod disruption budgets against etcd +type REST struct { + *genericregistry.Store +} + +// NewREST returns a RESTStorage object that will work against pod disruption budgets. +func NewREST(optsGetter generic.RESTOptionsGetter) *REST { + store := &genericregistry.Store{ + NewFunc: func() runtime.Object { return &admissionregistration.MutatingWebhookConfiguration{} }, + NewListFunc: func() runtime.Object { return &admissionregistration.MutatingWebhookConfigurationList{} }, + ObjectNameFunc: func(obj runtime.Object) (string, error) { + return obj.(*admissionregistration.MutatingWebhookConfiguration).Name, nil + }, + DefaultQualifiedResource: admissionregistration.Resource("mutatingwebhookconfigurations"), + + CreateStrategy: mutatingwebhookconfiguration.Strategy, + UpdateStrategy: mutatingwebhookconfiguration.Strategy, + DeleteStrategy: mutatingwebhookconfiguration.Strategy, + } + options := &generic.StoreOptions{RESTOptions: optsGetter} + if err := store.CompleteWithOptions(options); err != nil { + panic(err) // TODO: Propagate error up + } + return &REST{store} +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration/strategy.go new file mode 100644 index 000000000000..735980ac42fa --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration/strategy.go @@ -0,0 +1,90 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutatingwebhookconfiguration + +import ( + "reflect" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/storage/names" + "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/apis/admissionregistration" + "k8s.io/kubernetes/pkg/apis/admissionregistration/validation" +) + +// mutatingWebhookConfigurationStrategy implements verification logic for mutatingWebhookConfiguration. +type mutatingWebhookConfigurationStrategy struct { + runtime.ObjectTyper + names.NameGenerator +} + +// Strategy is the default logic that applies when creating and updating mutatingWebhookConfiguration objects. +var Strategy = mutatingWebhookConfigurationStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} + +// NamespaceScoped returns true because all mutatingWebhookConfiguration' need to be within a namespace. +func (mutatingWebhookConfigurationStrategy) NamespaceScoped() bool { + return false +} + +// PrepareForCreate clears the status of an mutatingWebhookConfiguration before creation. +func (mutatingWebhookConfigurationStrategy) PrepareForCreate(ctx genericapirequest.Context, obj runtime.Object) { + ic := obj.(*admissionregistration.MutatingWebhookConfiguration) + ic.Generation = 1 +} + +// PrepareForUpdate clears fields that are not allowed to be set by end users on update. +func (mutatingWebhookConfigurationStrategy) PrepareForUpdate(ctx genericapirequest.Context, obj, old runtime.Object) { + newIC := obj.(*admissionregistration.MutatingWebhookConfiguration) + oldIC := old.(*admissionregistration.MutatingWebhookConfiguration) + + // Any changes to the spec increment the generation number, any changes to the + // status should reflect the generation number of the corresponding object. + // See metav1.ObjectMeta description for more information on Generation. + if !reflect.DeepEqual(oldIC.Webhooks, newIC.Webhooks) { + newIC.Generation = oldIC.Generation + 1 + } +} + +// Validate validates a new mutatingWebhookConfiguration. +func (mutatingWebhookConfigurationStrategy) Validate(ctx genericapirequest.Context, obj runtime.Object) field.ErrorList { + ic := obj.(*admissionregistration.MutatingWebhookConfiguration) + return validation.ValidateMutatingWebhookConfiguration(ic) +} + +// Canonicalize normalizes the object after validation. +func (mutatingWebhookConfigurationStrategy) Canonicalize(obj runtime.Object) { +} + +// AllowCreateOnUpdate is true for mutatingWebhookConfiguration; this means you may create one with a PUT request. +func (mutatingWebhookConfigurationStrategy) AllowCreateOnUpdate() bool { + return false +} + +// ValidateUpdate is the default update validation for an end user. +func (mutatingWebhookConfigurationStrategy) ValidateUpdate(ctx genericapirequest.Context, obj, old runtime.Object) field.ErrorList { + validationErrorList := validation.ValidateMutatingWebhookConfiguration(obj.(*admissionregistration.MutatingWebhookConfiguration)) + updateErrorList := validation.ValidateMutatingWebhookConfigurationUpdate(obj.(*admissionregistration.MutatingWebhookConfiguration), old.(*admissionregistration.MutatingWebhookConfiguration)) + return append(validationErrorList, updateErrorList...) +} + +// AllowUnconditionalUpdate is the default update policy for mutatingWebhookConfiguration objects. Status update should +// only be allowed if version match. +func (mutatingWebhookConfigurationStrategy) AllowUnconditionalUpdate() bool { + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/rest/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/rest/BUILD index ad10e5415799..914682ae4a80 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/rest/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/rest/BUILD @@ -8,12 +8,15 @@ load( go_library( name = "go_default_library", srcs = ["storage_apiserver.go"], + importpath = "k8s.io/kubernetes/pkg/registry/admissionregistration/rest", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/admissionregistration:go_default_library", - "//pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage:go_default_library", "//pkg/registry/admissionregistration/initializerconfiguration/storage:go_default_library", + "//pkg/registry/admissionregistration/mutatingwebhookconfiguration/storage:go_default_library", + "//pkg/registry/admissionregistration/validatingwebhookconfiguration/storage:go_default_library", "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/server:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/rest/storage_apiserver.go b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/rest/storage_apiserver.go index 26d381a681ff..5b26e830bd72 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/rest/storage_apiserver.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/rest/storage_apiserver.go @@ -18,20 +18,22 @@ package rest import ( admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" serverstorage "k8s.io/apiserver/pkg/server/storage" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/admissionregistration" - externaladmissionhookconfigurationstorage "k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage" initializerconfigurationstorage "k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage" + mutatingwebhookconfigurationstorage "k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration/storage" + validatingwebhookconfigurationstorage "k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration/storage" ) type RESTStorageProvider struct{} func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) { - apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(admissionregistration.GroupName, api.Registry, api.Scheme, api.ParameterCodec, api.Codecs) + apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(admissionregistration.GroupName, legacyscheme.Registry, legacyscheme.Scheme, legacyscheme.ParameterCodec, legacyscheme.Codecs) // If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities. // TODO refactor the plumbing to provide the information in the APIGroupInfo @@ -39,6 +41,10 @@ func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorag apiGroupInfo.VersionedResourcesStorageMap[admissionregistrationv1alpha1.SchemeGroupVersion.Version] = p.v1alpha1Storage(apiResourceConfigSource, restOptionsGetter) apiGroupInfo.GroupMeta.GroupVersion = admissionregistrationv1alpha1.SchemeGroupVersion } + if apiResourceConfigSource.AnyResourcesForVersionEnabled(admissionregistrationv1beta1.SchemeGroupVersion) { + apiGroupInfo.VersionedResourcesStorageMap[admissionregistrationv1beta1.SchemeGroupVersion.Version] = p.v1beta1Storage(apiResourceConfigSource, restOptionsGetter) + apiGroupInfo.GroupMeta.GroupVersion = admissionregistrationv1beta1.SchemeGroupVersion + } return apiGroupInfo, true } @@ -49,9 +55,19 @@ func (p RESTStorageProvider) v1alpha1Storage(apiResourceConfigSource serverstora s := initializerconfigurationstorage.NewREST(restOptionsGetter) storage["initializerconfigurations"] = s } - if apiResourceConfigSource.ResourceEnabled(version.WithResource("externaladmissionhookconfigurations")) { - s := externaladmissionhookconfigurationstorage.NewREST(restOptionsGetter) - storage["externaladmissionhookconfigurations"] = s + return storage +} + +func (p RESTStorageProvider) v1beta1Storage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) map[string]rest.Storage { + version := admissionregistrationv1beta1.SchemeGroupVersion + storage := map[string]rest.Storage{} + if apiResourceConfigSource.ResourceEnabled(version.WithResource("validatingwebhookconfigurations")) { + s := validatingwebhookconfigurationstorage.NewREST(restOptionsGetter) + storage["validatingwebhookconfigurations"] = s + } + if apiResourceConfigSource.ResourceEnabled(version.WithResource("mutatingwebhookconfigurations")) { + s := mutatingwebhookconfigurationstorage.NewREST(restOptionsGetter) + storage["mutatingwebhookconfigurations"] = s } return storage } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration/BUILD new file mode 100644 index 000000000000..0b07d3028672 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration/BUILD @@ -0,0 +1,40 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "strategy.go", + ], + importpath = "k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration", + deps = [ + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/admissionregistration:go_default_library", + "//pkg/apis/admissionregistration/validation:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", + "//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/registry/admissionregistration/validatingwebhookconfiguration/storage:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration/doc.go b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration/doc.go new file mode 100644 index 000000000000..b97586ef6752 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validatingwebhookconfiguration diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration/storage/BUILD new file mode 100644 index 000000000000..30da94ee1a8a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration/storage/BUILD @@ -0,0 +1,32 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration/storage", + deps = [ + "//pkg/apis/admissionregistration:go_default_library", + "//pkg/registry/admissionregistration/validatingwebhookconfiguration:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration/storage/storage.go new file mode 100644 index 000000000000..6a0bee2f0683 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration/storage/storage.go @@ -0,0 +1,51 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/registry/generic" + genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" + "k8s.io/kubernetes/pkg/apis/admissionregistration" + "k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration" +) + +// rest implements a RESTStorage for pod disruption budgets against etcd +type REST struct { + *genericregistry.Store +} + +// NewREST returns a RESTStorage object that will work against pod disruption budgets. +func NewREST(optsGetter generic.RESTOptionsGetter) *REST { + store := &genericregistry.Store{ + NewFunc: func() runtime.Object { return &admissionregistration.ValidatingWebhookConfiguration{} }, + NewListFunc: func() runtime.Object { return &admissionregistration.ValidatingWebhookConfigurationList{} }, + ObjectNameFunc: func(obj runtime.Object) (string, error) { + return obj.(*admissionregistration.ValidatingWebhookConfiguration).Name, nil + }, + DefaultQualifiedResource: admissionregistration.Resource("validatingwebhookconfigurations"), + + CreateStrategy: validatingwebhookconfiguration.Strategy, + UpdateStrategy: validatingwebhookconfiguration.Strategy, + DeleteStrategy: validatingwebhookconfiguration.Strategy, + } + options := &generic.StoreOptions{RESTOptions: optsGetter} + if err := store.CompleteWithOptions(options); err != nil { + panic(err) // TODO: Propagate error up + } + return &REST{store} +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration/strategy.go new file mode 100644 index 000000000000..e59ecc989503 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration/strategy.go @@ -0,0 +1,90 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validatingwebhookconfiguration + +import ( + "reflect" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/storage/names" + "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/apis/admissionregistration" + "k8s.io/kubernetes/pkg/apis/admissionregistration/validation" +) + +// validatingWebhookConfigurationStrategy implements verification logic for validatingWebhookConfiguration. +type validatingWebhookConfigurationStrategy struct { + runtime.ObjectTyper + names.NameGenerator +} + +// Strategy is the default logic that applies when creating and updating validatingWebhookConfiguration objects. +var Strategy = validatingWebhookConfigurationStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} + +// NamespaceScoped returns true because all validatingWebhookConfiguration' need to be within a namespace. +func (validatingWebhookConfigurationStrategy) NamespaceScoped() bool { + return false +} + +// PrepareForCreate clears the status of an validatingWebhookConfiguration before creation. +func (validatingWebhookConfigurationStrategy) PrepareForCreate(ctx genericapirequest.Context, obj runtime.Object) { + ic := obj.(*admissionregistration.ValidatingWebhookConfiguration) + ic.Generation = 1 +} + +// PrepareForUpdate clears fields that are not allowed to be set by end users on update. +func (validatingWebhookConfigurationStrategy) PrepareForUpdate(ctx genericapirequest.Context, obj, old runtime.Object) { + newIC := obj.(*admissionregistration.ValidatingWebhookConfiguration) + oldIC := old.(*admissionregistration.ValidatingWebhookConfiguration) + + // Any changes to the spec increment the generation number, any changes to the + // status should reflect the generation number of the corresponding object. + // See metav1.ObjectMeta description for more information on Generation. + if !reflect.DeepEqual(oldIC.Webhooks, newIC.Webhooks) { + newIC.Generation = oldIC.Generation + 1 + } +} + +// Validate validates a new validatingWebhookConfiguration. +func (validatingWebhookConfigurationStrategy) Validate(ctx genericapirequest.Context, obj runtime.Object) field.ErrorList { + ic := obj.(*admissionregistration.ValidatingWebhookConfiguration) + return validation.ValidateValidatingWebhookConfiguration(ic) +} + +// Canonicalize normalizes the object after validation. +func (validatingWebhookConfigurationStrategy) Canonicalize(obj runtime.Object) { +} + +// AllowCreateOnUpdate is true for validatingWebhookConfiguration; this means you may create one with a PUT request. +func (validatingWebhookConfigurationStrategy) AllowCreateOnUpdate() bool { + return false +} + +// ValidateUpdate is the default update validation for an end user. +func (validatingWebhookConfigurationStrategy) ValidateUpdate(ctx genericapirequest.Context, obj, old runtime.Object) field.ErrorList { + validationErrorList := validation.ValidateValidatingWebhookConfiguration(obj.(*admissionregistration.ValidatingWebhookConfiguration)) + updateErrorList := validation.ValidateValidatingWebhookConfigurationUpdate(obj.(*admissionregistration.ValidatingWebhookConfiguration), old.(*admissionregistration.ValidatingWebhookConfiguration)) + return append(validationErrorList, updateErrorList...) +} + +// AllowUnconditionalUpdate is the default update policy for validatingWebhookConfiguration objects. Status update should +// only be allowed if version match. +func (validatingWebhookConfigurationStrategy) AllowUnconditionalUpdate() bool { + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/BUILD index bfc7313563d8..24e6e13da2c9 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/BUILD @@ -9,10 +9,11 @@ load( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/apps/controllerrevision", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/apps:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", @@ -25,8 +26,9 @@ go_library( "doc.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/apps/controllerrevision", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/apps:go_default_library", "//pkg/apis/apps/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage/BUILD index f13157c9fd3b..083eb3f06d17 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage/BUILD @@ -9,16 +9,18 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/apps:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) @@ -26,8 +28,8 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/apps:go_default_library", "//pkg/registry/apps/controllerrevision:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage/storage.go index fdc9851769d1..c1c1de7b04ae 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage/storage.go @@ -20,7 +20,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/registry/apps/controllerrevision" ) @@ -33,7 +32,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work with ControllerRevision objects. func NewREST(optsGetter generic.RESTOptionsGetter) *REST { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &apps.ControllerRevision{} }, NewListFunc: func() runtime.Object { return &apps.ControllerRevisionList{} }, DefaultQualifiedResource: apps.Resource("controllerrevisions"), diff --git a/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/strategy.go index f6f207b90539..08161d718ad4 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/strategy.go @@ -22,7 +22,7 @@ import ( genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/apps/validation" ) @@ -35,7 +35,7 @@ type strategy struct { // Strategy is the default logic that applies when creating and updating ControllerRevision // objects via the REST API. -var Strategy = strategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = strategy{legacyscheme.Scheme, names.SimpleNameGenerator} // Strategy should implement rest.RESTCreateStrategy var _ rest.RESTCreateStrategy = Strategy diff --git a/vendor/k8s.io/kubernetes/pkg/registry/apps/rest/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/apps/rest/BUILD index 50c102d96c41..70032abad7bc 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/apps/rest/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/apps/rest/BUILD @@ -8,14 +8,16 @@ load( go_library( name = "go_default_library", srcs = ["storage_apps.go"], + importpath = "k8s.io/kubernetes/pkg/registry/apps/rest", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/apps:go_default_library", "//pkg/registry/apps/controllerrevision/storage:go_default_library", "//pkg/registry/apps/statefulset/storage:go_default_library", "//pkg/registry/extensions/daemonset/storage:go_default_library", "//pkg/registry/extensions/deployment/storage:go_default_library", "//pkg/registry/extensions/replicaset/storage:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/apps/rest/storage_apps.go b/vendor/k8s.io/kubernetes/pkg/registry/apps/rest/storage_apps.go index c667c3048e93..f77900e88132 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/apps/rest/storage_apps.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/apps/rest/storage_apps.go @@ -17,13 +17,14 @@ limitations under the License. package rest import ( + appsapiv1 "k8s.io/api/apps/v1" appsapiv1beta1 "k8s.io/api/apps/v1beta1" appsapiv1beta2 "k8s.io/api/apps/v1beta2" "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" serverstorage "k8s.io/apiserver/pkg/server/storage" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/apps" controllerrevisionsstore "k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage" statefulsetstore "k8s.io/kubernetes/pkg/registry/apps/statefulset/storage" @@ -35,7 +36,7 @@ import ( type RESTStorageProvider struct{} func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) { - apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(apps.GroupName, api.Registry, api.Scheme, api.ParameterCodec, api.Codecs) + apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(apps.GroupName, legacyscheme.Registry, legacyscheme.Scheme, legacyscheme.ParameterCodec, legacyscheme.Codecs) // If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities. // TODO refactor the plumbing to provide the information in the APIGroupInfo @@ -47,6 +48,10 @@ func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorag apiGroupInfo.VersionedResourcesStorageMap[appsapiv1beta2.SchemeGroupVersion.Version] = p.v1beta2Storage(apiResourceConfigSource, restOptionsGetter) apiGroupInfo.GroupMeta.GroupVersion = appsapiv1beta2.SchemeGroupVersion } + if apiResourceConfigSource.AnyResourcesForVersionEnabled(appsapiv1.SchemeGroupVersion) { + apiGroupInfo.VersionedResourcesStorageMap[appsapiv1.SchemeGroupVersion.Version] = p.v1Storage(apiResourceConfigSource, restOptionsGetter) + apiGroupInfo.GroupMeta.GroupVersion = appsapiv1.SchemeGroupVersion + } return apiGroupInfo, true } @@ -109,6 +114,40 @@ func (p RESTStorageProvider) v1beta2Storage(apiResourceConfigSource serverstorag return storage } +func (p RESTStorageProvider) v1Storage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) map[string]rest.Storage { + version := appsapiv1.SchemeGroupVersion + + storage := map[string]rest.Storage{} + if apiResourceConfigSource.ResourceEnabled(version.WithResource("deployments")) { + deploymentStorage := deploymentstore.NewStorage(restOptionsGetter) + storage["deployments"] = deploymentStorage.Deployment + storage["deployments/status"] = deploymentStorage.Status + storage["deployments/scale"] = deploymentStorage.Scale + } + if apiResourceConfigSource.ResourceEnabled(version.WithResource("statefulsets")) { + statefulSetStorage := statefulsetstore.NewStorage(restOptionsGetter) + storage["statefulsets"] = statefulSetStorage.StatefulSet + storage["statefulsets/status"] = statefulSetStorage.Status + storage["statefulsets/scale"] = statefulSetStorage.Scale + } + if apiResourceConfigSource.ResourceEnabled(version.WithResource("daemonsets")) { + daemonSetStorage, daemonSetStatusStorage := daemonsetstore.NewREST(restOptionsGetter) + storage["daemonsets"] = daemonSetStorage + storage["daemonsets/status"] = daemonSetStatusStorage + } + if apiResourceConfigSource.ResourceEnabled(version.WithResource("replicasets")) { + replicaSetStorage := replicasetstore.NewStorage(restOptionsGetter) + storage["replicasets"] = replicaSetStorage.ReplicaSet + storage["replicasets/status"] = replicaSetStorage.Status + storage["replicasets/scale"] = replicaSetStorage.Scale + } + if apiResourceConfigSource.ResourceEnabled(version.WithResource("controllerrevisions")) { + historyStorage := controllerrevisionsstore.NewREST(restOptionsGetter) + storage["controllerrevisions"] = historyStorage + } + return storage +} + func (p RESTStorageProvider) GroupName() string { return apps.GroupName } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/BUILD index 253542236caf..deb73b96dc6e 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/BUILD @@ -13,16 +13,20 @@ go_library( "registry.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/apps/statefulset", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/pod:go_default_library", "//pkg/apis/apps:go_default_library", "//pkg/apis/apps/validation:go_default_library", + "//vendor/k8s.io/api/apps/v1beta1:go_default_library", + "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", @@ -34,10 +38,11 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/apps/statefulset", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/apps:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/registry.go index 25d136f86201..eb33b6e1ced4 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/registry.go @@ -25,7 +25,6 @@ import ( "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/apps" ) @@ -34,8 +33,8 @@ type Registry interface { ListStatefulSets(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*apps.StatefulSetList, error) WatchStatefulSets(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) GetStatefulSet(ctx genericapirequest.Context, statefulSetID string, options *metav1.GetOptions) (*apps.StatefulSet, error) - CreateStatefulSet(ctx genericapirequest.Context, statefulSet *apps.StatefulSet) (*apps.StatefulSet, error) - UpdateStatefulSet(ctx genericapirequest.Context, statefulSet *apps.StatefulSet) (*apps.StatefulSet, error) + CreateStatefulSet(ctx genericapirequest.Context, statefulSet *apps.StatefulSet, createValidation rest.ValidateObjectFunc) (*apps.StatefulSet, error) + UpdateStatefulSet(ctx genericapirequest.Context, statefulSet *apps.StatefulSet, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (*apps.StatefulSet, error) DeleteStatefulSet(ctx genericapirequest.Context, statefulSetID string) error } @@ -73,16 +72,16 @@ func (s *storage) GetStatefulSet(ctx genericapirequest.Context, statefulSetID st return obj.(*apps.StatefulSet), nil } -func (s *storage) CreateStatefulSet(ctx genericapirequest.Context, statefulSet *apps.StatefulSet) (*apps.StatefulSet, error) { - obj, err := s.Create(ctx, statefulSet, false) +func (s *storage) CreateStatefulSet(ctx genericapirequest.Context, statefulSet *apps.StatefulSet, createValidation rest.ValidateObjectFunc) (*apps.StatefulSet, error) { + obj, err := s.Create(ctx, statefulSet, rest.ValidateAllObjectFunc, false) if err != nil { return nil, err } return obj.(*apps.StatefulSet), nil } -func (s *storage) UpdateStatefulSet(ctx genericapirequest.Context, statefulSet *apps.StatefulSet) (*apps.StatefulSet, error) { - obj, _, err := s.Update(ctx, statefulSet.Name, rest.DefaultUpdatedObjectInfo(statefulSet, api.Scheme)) +func (s *storage) UpdateStatefulSet(ctx genericapirequest.Context, statefulSet *apps.StatefulSet, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (*apps.StatefulSet, error) { + obj, _, err := s.Update(ctx, statefulSet.Name, rest.DefaultUpdatedObjectInfo(statefulSet), createValidation, updateValidation) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/storage/BUILD index e713cb1c2ff2..6bbc1eee2042 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/storage/BUILD @@ -9,11 +9,12 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/apps/statefulset/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/apps:go_default_library", - "//pkg/apis/extensions:go_default_library", + "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -23,6 +24,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], @@ -31,11 +33,15 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/apps/statefulset/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/apps:go_default_library", + "//pkg/apis/apps/v1beta1:go_default_library", + "//pkg/apis/apps/v1beta2:go_default_library", + "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/autoscaling/v1:go_default_library", + "//pkg/apis/autoscaling/validation:go_default_library", "//pkg/apis/extensions:go_default_library", - "//pkg/apis/extensions/validation:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", "//pkg/printers/storage:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/storage/storage.go index d03f6f8683a7..d97f17270e56 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/storage/storage.go @@ -27,10 +27,13 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/apps" + appsv1beta1 "k8s.io/kubernetes/pkg/apis/apps/v1beta1" + appsv1beta2 "k8s.io/kubernetes/pkg/apis/apps/v1beta2" + "k8s.io/kubernetes/pkg/apis/autoscaling" + autoscalingv1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1" + autoscalingvalidation "k8s.io/kubernetes/pkg/apis/autoscaling/validation" "k8s.io/kubernetes/pkg/apis/extensions" - extvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation" "k8s.io/kubernetes/pkg/printers" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" printerstorage "k8s.io/kubernetes/pkg/printers/storage" @@ -63,7 +66,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against statefulsets. func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &apps.StatefulSet{} }, NewListFunc: func() runtime.Object { return &apps.StatefulSetList{} }, DefaultQualifiedResource: apps.Resource("statefulsets"), @@ -107,8 +109,8 @@ func (r *StatusREST) Get(ctx genericapirequest.Context, name string, options *me } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } // Implement ShortNamesProvider @@ -127,13 +129,20 @@ type ScaleREST struct { var _ = rest.Patcher(&ScaleREST{}) var _ = rest.GroupVersionKindProvider(&ScaleREST{}) -func (r *ScaleREST) GroupVersionKind() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "Scale"} +func (r *ScaleREST) GroupVersionKind(containingGV schema.GroupVersion) schema.GroupVersionKind { + switch containingGV { + case appsv1beta1.SchemeGroupVersion: + return appsv1beta1.SchemeGroupVersion.WithKind("Scale") + case appsv1beta2.SchemeGroupVersion: + return appsv1beta2.SchemeGroupVersion.WithKind("Scale") + default: + return autoscalingv1.SchemeGroupVersion.WithKind("Scale") + } } // New creates a new Scale object func (r *ScaleREST) New() runtime.Object { - return &extensions.Scale{} + return &autoscaling.Scale{} } func (r *ScaleREST) Get(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (runtime.Object, error) { @@ -148,7 +157,7 @@ func (r *ScaleREST) Get(ctx genericapirequest.Context, name string, options *met return scale, err } -func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { +func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { ss, err := r.registry.GetStatefulSet(ctx, name, &metav1.GetOptions{}) if err != nil { return nil, false, err @@ -166,18 +175,18 @@ func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo r if obj == nil { return nil, false, errors.NewBadRequest(fmt.Sprintf("nil update passed to Scale")) } - scale, ok := obj.(*extensions.Scale) + scale, ok := obj.(*autoscaling.Scale) if !ok { return nil, false, errors.NewBadRequest(fmt.Sprintf("wrong object passed to Scale update: %v", obj)) } - if errs := extvalidation.ValidateScale(scale); len(errs) > 0 { + if errs := autoscalingvalidation.ValidateScale(scale); len(errs) > 0 { return nil, false, errors.NewInvalid(extensions.Kind("Scale"), scale.Name, errs) } ss.Spec.Replicas = scale.Spec.Replicas ss.ResourceVersion = scale.ResourceVersion - ss, err = r.registry.UpdateStatefulSet(ctx, ss) + ss, err = r.registry.UpdateStatefulSet(ctx, ss, createValidation, updateValidation) if err != nil { return nil, false, err } @@ -189,8 +198,12 @@ func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo r } // scaleFromStatefulSet returns a scale subresource for a statefulset. -func scaleFromStatefulSet(ss *apps.StatefulSet) (*extensions.Scale, error) { - return &extensions.Scale{ +func scaleFromStatefulSet(ss *apps.StatefulSet) (*autoscaling.Scale, error) { + selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector) + if err != nil { + return nil, err + } + return &autoscaling.Scale{ // TODO: Create a variant of ObjectMeta type that only contains the fields below. ObjectMeta: metav1.ObjectMeta{ Name: ss.Name, @@ -199,12 +212,12 @@ func scaleFromStatefulSet(ss *apps.StatefulSet) (*extensions.Scale, error) { ResourceVersion: ss.ResourceVersion, CreationTimestamp: ss.CreationTimestamp, }, - Spec: extensions.ScaleSpec{ + Spec: autoscaling.ScaleSpec{ Replicas: ss.Spec.Replicas, }, - Status: extensions.ScaleStatus{ + Status: autoscaling.ScaleStatus{ Replicas: ss.Status.Replicas, - Selector: ss.Spec.Selector, + Selector: selector.String(), }, }, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/strategy.go index c070619c4c4d..08ff765a34dc 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/strategy.go @@ -17,13 +17,16 @@ limitations under the License. package statefulset import ( + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/pod" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/apps/validation" @@ -36,11 +39,20 @@ type statefulSetStrategy struct { } // Strategy is the default logic that applies when creating and updating Replication StatefulSet objects. -var Strategy = statefulSetStrategy{api.Scheme, names.SimpleNameGenerator} - -// DefaultGarbageCollectionPolicy returns Orphan because that was the default -// behavior before the server-side garbage collection was implemented. -func (statefulSetStrategy) DefaultGarbageCollectionPolicy() rest.GarbageCollectionPolicy { +var Strategy = statefulSetStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} + +// DefaultGarbageCollectionPolicy returns OrphanDependents by default. For apps/v1, returns DeleteDependents. +func (statefulSetStrategy) DefaultGarbageCollectionPolicy(ctx genericapirequest.Context) rest.GarbageCollectionPolicy { + if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { + groupVersion := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + switch groupVersion { + case appsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion: + // for back compatibility + return rest.OrphanDependents + default: + return rest.DeleteDependents + } + } return rest.OrphanDependents } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/authentication/rest/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/authentication/rest/BUILD index 51806bec7985..bd8731b24737 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/authentication/rest/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/authentication/rest/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["storage_authentication.go"], + importpath = "k8s.io/kubernetes/pkg/registry/authentication/rest", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/authentication:go_default_library", "//pkg/registry/authentication/tokenreview:go_default_library", "//vendor/k8s.io/api/authentication/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/authentication/rest/storage_authentication.go b/vendor/k8s.io/kubernetes/pkg/registry/authentication/rest/storage_authentication.go index f71f0d8360e1..e3f1aa6da953 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/authentication/rest/storage_authentication.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/authentication/rest/storage_authentication.go @@ -24,7 +24,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" serverstorage "k8s.io/apiserver/pkg/server/storage" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/authentication" "k8s.io/kubernetes/pkg/registry/authentication/tokenreview" ) @@ -39,7 +39,7 @@ func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorag // return genericapiserver.APIGroupInfo{}, false // } - apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(authentication.GroupName, api.Registry, api.Scheme, api.ParameterCodec, api.Codecs) + apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(authentication.GroupName, legacyscheme.Registry, legacyscheme.Scheme, legacyscheme.ParameterCodec, legacyscheme.Codecs) // If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities. // TODO refactor the plumbing to provide the information in the APIGroupInfo diff --git a/vendor/k8s.io/kubernetes/pkg/registry/authentication/tokenreview/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/authentication/tokenreview/BUILD index d5ab8782e09d..26eba385a93f 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/authentication/tokenreview/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/authentication/tokenreview/BUILD @@ -8,12 +8,14 @@ load( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/authentication/tokenreview", deps = [ "//pkg/apis/authentication:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/authentication/tokenreview/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/authentication/tokenreview/storage.go index f512907e6e5f..28c1da7c8d97 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/authentication/tokenreview/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/authentication/tokenreview/storage.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/authentication/authenticator" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/apis/authentication" ) @@ -39,7 +40,7 @@ func (r *REST) New() runtime.Object { return &authentication.TokenReview{} } -func (r *REST) Create(ctx genericapirequest.Context, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) { +func (r *REST) Create(ctx genericapirequest.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, includeUninitialized bool) (runtime.Object, error) { tokenReview, ok := obj.(*authentication.TokenReview) if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("not a TokenReview: %#v", obj)) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/authorization/localsubjectaccessreview/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/authorization/localsubjectaccessreview/BUILD index 183187ecac5e..6ed963cd2aa4 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/authorization/localsubjectaccessreview/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/authorization/localsubjectaccessreview/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["rest.go"], + importpath = "k8s.io/kubernetes/pkg/registry/authorization/localsubjectaccessreview", deps = [ "//pkg/apis/authorization:go_default_library", "//pkg/apis/authorization/validation:go_default_library", @@ -16,6 +17,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/authorization/localsubjectaccessreview/rest.go b/vendor/k8s.io/kubernetes/pkg/registry/authorization/localsubjectaccessreview/rest.go index d36f17582e1f..af49b6bd4d96 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/authorization/localsubjectaccessreview/rest.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/authorization/localsubjectaccessreview/rest.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/authorization/authorizer" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/rest" authorizationapi "k8s.io/kubernetes/pkg/apis/authorization" authorizationvalidation "k8s.io/kubernetes/pkg/apis/authorization/validation" authorizationutil "k8s.io/kubernetes/pkg/registry/authorization/util" @@ -40,7 +41,7 @@ func (r *REST) New() runtime.Object { return &authorizationapi.LocalSubjectAccessReview{} } -func (r *REST) Create(ctx genericapirequest.Context, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) { +func (r *REST) Create(ctx genericapirequest.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, includeUninitialized bool) (runtime.Object, error) { localSubjectAccessReview, ok := obj.(*authorizationapi.LocalSubjectAccessReview) if !ok { return nil, kapierrors.NewBadRequest(fmt.Sprintf("not a LocaLocalSubjectAccessReview: %#v", obj)) @@ -57,10 +58,11 @@ func (r *REST) Create(ctx genericapirequest.Context, obj runtime.Object, include } authorizationAttributes := authorizationutil.AuthorizationAttributesFrom(localSubjectAccessReview.Spec) - allowed, reason, evaluationErr := r.authorizer.Authorize(authorizationAttributes) + decision, reason, evaluationErr := r.authorizer.Authorize(authorizationAttributes) localSubjectAccessReview.Status = authorizationapi.SubjectAccessReviewStatus{ - Allowed: allowed, + Allowed: (decision == authorizer.DecisionAllow), + Denied: (decision == authorizer.DecisionDeny), Reason: reason, } if evaluationErr != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/authorization/rest/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/authorization/rest/BUILD index 59a4a229d8bd..029c7848d02e 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/authorization/rest/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/authorization/rest/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["storage_authorization.go"], + importpath = "k8s.io/kubernetes/pkg/registry/authorization/rest", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/authorization:go_default_library", "//pkg/registry/authorization/localsubjectaccessreview:go_default_library", "//pkg/registry/authorization/selfsubjectaccessreview:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/authorization/rest/storage_authorization.go b/vendor/k8s.io/kubernetes/pkg/registry/authorization/rest/storage_authorization.go index 19eeb2fd2544..10f3293c6db2 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/authorization/rest/storage_authorization.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/authorization/rest/storage_authorization.go @@ -24,7 +24,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" serverstorage "k8s.io/apiserver/pkg/server/storage" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/authorization" "k8s.io/kubernetes/pkg/registry/authorization/localsubjectaccessreview" "k8s.io/kubernetes/pkg/registry/authorization/selfsubjectaccessreview" @@ -42,7 +42,7 @@ func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorag return genericapiserver.APIGroupInfo{}, false } - apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(authorization.GroupName, api.Registry, api.Scheme, api.ParameterCodec, api.Codecs) + apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(authorization.GroupName, legacyscheme.Registry, legacyscheme.Scheme, legacyscheme.ParameterCodec, legacyscheme.Codecs) // If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities. // TODO refactor the plumbing to provide the information in the APIGroupInfo diff --git a/vendor/k8s.io/kubernetes/pkg/registry/authorization/selfsubjectaccessreview/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/authorization/selfsubjectaccessreview/BUILD index 183187ecac5e..06b8845dbc45 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/authorization/selfsubjectaccessreview/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/authorization/selfsubjectaccessreview/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["rest.go"], + importpath = "k8s.io/kubernetes/pkg/registry/authorization/selfsubjectaccessreview", deps = [ "//pkg/apis/authorization:go_default_library", "//pkg/apis/authorization/validation:go_default_library", @@ -16,6 +17,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/authorization/selfsubjectaccessreview/rest.go b/vendor/k8s.io/kubernetes/pkg/registry/authorization/selfsubjectaccessreview/rest.go index 4498b5f5a704..38900a877f4e 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/authorization/selfsubjectaccessreview/rest.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/authorization/selfsubjectaccessreview/rest.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/authorization/authorizer" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/rest" authorizationapi "k8s.io/kubernetes/pkg/apis/authorization" authorizationvalidation "k8s.io/kubernetes/pkg/apis/authorization/validation" authorizationutil "k8s.io/kubernetes/pkg/registry/authorization/util" @@ -40,7 +41,7 @@ func (r *REST) New() runtime.Object { return &authorizationapi.SelfSubjectAccessReview{} } -func (r *REST) Create(ctx genericapirequest.Context, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) { +func (r *REST) Create(ctx genericapirequest.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, includeUninitialized bool) (runtime.Object, error) { selfSAR, ok := obj.(*authorizationapi.SelfSubjectAccessReview) if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("not a SelfSubjectAccessReview: %#v", obj)) @@ -60,10 +61,11 @@ func (r *REST) Create(ctx genericapirequest.Context, obj runtime.Object, include authorizationAttributes = authorizationutil.NonResourceAttributesFrom(userToCheck, *selfSAR.Spec.NonResourceAttributes) } - allowed, reason, evaluationErr := r.authorizer.Authorize(authorizationAttributes) + decision, reason, evaluationErr := r.authorizer.Authorize(authorizationAttributes) selfSAR.Status = authorizationapi.SubjectAccessReviewStatus{ - Allowed: allowed, + Allowed: (decision == authorizer.DecisionAllow), + Denied: (decision == authorizer.DecisionDeny), Reason: reason, } if evaluationErr != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/authorization/selfsubjectrulesreview/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/authorization/selfsubjectrulesreview/BUILD index 8c293520bfb1..2afdc2381bf1 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/authorization/selfsubjectrulesreview/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/authorization/selfsubjectrulesreview/BUILD @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["rest.go"], + importpath = "k8s.io/kubernetes/pkg/registry/authorization/selfsubjectrulesreview", visibility = ["//visibility:public"], deps = [ "//pkg/apis/authorization:go_default_library", @@ -10,6 +11,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/authorization/selfsubjectrulesreview/rest.go b/vendor/k8s.io/kubernetes/pkg/registry/authorization/selfsubjectrulesreview/rest.go index 062829faba52..2093f324edee 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/authorization/selfsubjectrulesreview/rest.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/authorization/selfsubjectrulesreview/rest.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/authorization/authorizer" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/rest" authorizationapi "k8s.io/kubernetes/pkg/apis/authorization" ) @@ -42,7 +43,7 @@ func (r *REST) New() runtime.Object { } // Create attempts to get self subject rules in specific namespace. -func (r *REST) Create(ctx genericapirequest.Context, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) { +func (r *REST) Create(ctx genericapirequest.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, includeUninitialized bool) (runtime.Object, error) { selfSRR, ok := obj.(*authorizationapi.SelfSubjectRulesReview) if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("not a SelfSubjectRulesReview: %#v", obj)) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/authorization/subjectaccessreview/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/authorization/subjectaccessreview/BUILD index 48a8ed0edeb1..fb4474b43984 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/authorization/subjectaccessreview/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/authorization/subjectaccessreview/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["rest.go"], + importpath = "k8s.io/kubernetes/pkg/registry/authorization/subjectaccessreview", deps = [ "//pkg/apis/authorization:go_default_library", "//pkg/apis/authorization/validation:go_default_library", @@ -17,6 +18,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", ], ) @@ -36,11 +38,13 @@ filegroup( go_test( name = "go_default_test", srcs = ["rest_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/authorization/subjectaccessreview", library = ":go_default_library", deps = [ "//pkg/apis/authorization:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/authorization/subjectaccessreview/rest.go b/vendor/k8s.io/kubernetes/pkg/registry/authorization/subjectaccessreview/rest.go index 1583a32ac042..92da3d342443 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/authorization/subjectaccessreview/rest.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/authorization/subjectaccessreview/rest.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/authorization/authorizer" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/rest" authorizationapi "k8s.io/kubernetes/pkg/apis/authorization" authorizationvalidation "k8s.io/kubernetes/pkg/apis/authorization/validation" authorizationutil "k8s.io/kubernetes/pkg/registry/authorization/util" @@ -40,7 +41,7 @@ func (r *REST) New() runtime.Object { return &authorizationapi.SubjectAccessReview{} } -func (r *REST) Create(ctx genericapirequest.Context, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) { +func (r *REST) Create(ctx genericapirequest.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, includeUninitialized bool) (runtime.Object, error) { subjectAccessReview, ok := obj.(*authorizationapi.SubjectAccessReview) if !ok { return nil, kapierrors.NewBadRequest(fmt.Sprintf("not a SubjectAccessReview: %#v", obj)) @@ -50,10 +51,11 @@ func (r *REST) Create(ctx genericapirequest.Context, obj runtime.Object, include } authorizationAttributes := authorizationutil.AuthorizationAttributesFrom(subjectAccessReview.Spec) - allowed, reason, evaluationErr := r.authorizer.Authorize(authorizationAttributes) + decision, reason, evaluationErr := r.authorizer.Authorize(authorizationAttributes) subjectAccessReview.Status = authorizationapi.SubjectAccessReviewStatus{ - Allowed: allowed, + Allowed: (decision == authorizer.DecisionAllow), + Denied: (decision == authorizer.DecisionDeny), Reason: reason, } if evaluationErr != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/authorization/util/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/authorization/util/BUILD index b87df6971b72..0d1fc9be2de7 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/authorization/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/authorization/util/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["helpers.go"], + importpath = "k8s.io/kubernetes/pkg/registry/authorization/util", deps = [ "//pkg/apis/authorization:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", @@ -32,6 +33,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/authorization/util", library = ":go_default_library", deps = [ "//pkg/apis/authorization:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/BUILD index 8eb0c99ba174..e5fef0856af4 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/BUILD @@ -11,8 +11,9 @@ go_library( "doc.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/autoscaling/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage/BUILD index 499f00a7067c..209cfeeea2e0 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage/BUILD @@ -9,10 +9,11 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -20,6 +21,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) @@ -27,8 +29,8 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/autoscaling:go_default_library", "//pkg/registry/autoscaling/horizontalpodautoscaler:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage.go index cf5655d328e6..5d6602f5dd0b 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage.go @@ -23,7 +23,6 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler" ) @@ -35,7 +34,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against horizontal pod autoscalers. func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &autoscaling.HorizontalPodAutoscaler{} }, NewListFunc: func() runtime.Object { return &autoscaling.HorizontalPodAutoscalerList{} }, DefaultQualifiedResource: autoscaling.Resource("horizontalpodautoscalers"), @@ -85,6 +83,6 @@ func (r *StatusREST) Get(ctx genericapirequest.Context, name string, options *me } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/strategy.go index d9c81ab07e5b..3100a96982f9 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/strategy.go @@ -21,7 +21,7 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/autoscaling/validation" ) @@ -34,7 +34,7 @@ type autoscalerStrategy struct { // Strategy is the default logic that applies when creating and updating HorizontalPodAutoscaler // objects via the REST API. -var Strategy = autoscalerStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = autoscalerStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} // NamespaceScoped is true for autoscaler. func (autoscalerStrategy) NamespaceScoped() bool { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/rest/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/rest/BUILD index 7d8775ebb6b4..f0f62a9a8e93 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/rest/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/rest/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["storage_autoscaling.go"], + importpath = "k8s.io/kubernetes/pkg/registry/autoscaling/rest", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/autoscaling:go_default_library", "//pkg/registry/autoscaling/horizontalpodautoscaler/storage:go_default_library", "//vendor/k8s.io/api/autoscaling/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/rest/storage_autoscaling.go b/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/rest/storage_autoscaling.go index 0b24cd12262c..e6ce55b483d5 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/rest/storage_autoscaling.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/rest/storage_autoscaling.go @@ -23,7 +23,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" serverstorage "k8s.io/apiserver/pkg/server/storage" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/autoscaling" horizontalpodautoscalerstore "k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage" ) @@ -31,7 +31,7 @@ import ( type RESTStorageProvider struct{} func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) { - apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(autoscaling.GroupName, api.Registry, api.Scheme, api.ParameterCodec, api.Codecs) + apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(autoscaling.GroupName, legacyscheme.Registry, legacyscheme.Scheme, legacyscheme.ParameterCodec, legacyscheme.Codecs) // If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities. // TODO refactor the plumbing to provide the information in the APIGroupInfo diff --git a/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/BUILD index 0910d4e39608..4b037adf2d77 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/BUILD @@ -12,8 +12,9 @@ go_library( "doc.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/batch/cronjob", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/pod:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/apis/batch/validation:go_default_library", @@ -28,10 +29,11 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/batch/cronjob", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/batch:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/storage/BUILD index 326e3a01ebc6..c5c5c535c7f5 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/storage/BUILD @@ -9,11 +9,12 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/batch/cronjob/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/apis/batch:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -21,6 +22,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) @@ -28,8 +30,8 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/batch/cronjob/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/storage/storage.go index 126602c44279..d8ae47f38900 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/storage/storage.go @@ -23,7 +23,6 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/printers" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" @@ -39,7 +38,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against CronJobs. func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &batch.CronJob{} }, NewListFunc: func() runtime.Object { return &batch.CronJobList{} }, DefaultQualifiedResource: batch.Resource("cronjobs"), @@ -83,6 +81,6 @@ func (r *StatusREST) Get(ctx genericapirequest.Context, name string, options *me } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/strategy.go index 5b8b35ba0f7c..3544ca10b130 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/strategy.go @@ -22,7 +22,7 @@ import ( genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/pod" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/batch/validation" @@ -35,11 +35,11 @@ type cronJobStrategy struct { } // Strategy is the default logic that applies when creating and updating CronJob objects. -var Strategy = cronJobStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = cronJobStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} // DefaultGarbageCollectionPolicy returns Orphan because that was the default // behavior before the server-side garbage collection was implemented. -func (cronJobStrategy) DefaultGarbageCollectionPolicy() rest.GarbageCollectionPolicy { +func (cronJobStrategy) DefaultGarbageCollectionPolicy(ctx genericapirequest.Context) rest.GarbageCollectionPolicy { return rest.OrphanDependents } @@ -87,7 +87,7 @@ func (cronJobStrategy) AllowCreateOnUpdate() bool { // ValidateUpdate is the default update validation for an end user. func (cronJobStrategy) ValidateUpdate(ctx genericapirequest.Context, obj, old runtime.Object) field.ErrorList { - return validation.ValidateCronJob(obj.(*batch.CronJob)) + return validation.ValidateCronJobUpdate(obj.(*batch.CronJob), old.(*batch.CronJob)) } type cronJobStatusStrategy struct { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/batch/job/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/batch/job/BUILD index 76f4aaeb9c19..d5d3f23ffb32 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/batch/job/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/batch/job/BUILD @@ -12,8 +12,9 @@ go_library( "doc.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/batch/job", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/pod:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/apis/batch/validation:go_default_library", @@ -33,12 +34,13 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/batch/job", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", "//pkg/apis/batch:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/batch/job/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/batch/job/storage/BUILD index 2e0ebb73015e..40d02ffecd8b 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/batch/job/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/batch/job/storage/BUILD @@ -9,16 +9,18 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/batch/job/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/batch:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) @@ -26,8 +28,8 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/batch/job/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/batch/job/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/batch/job/storage/storage.go index 67905d3c865d..580cc5a7531c 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/batch/job/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/batch/job/storage/storage.go @@ -23,7 +23,6 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/printers" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" @@ -54,7 +53,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against Jobs. func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &batch.Job{} }, NewListFunc: func() runtime.Object { return &batch.JobList{} }, PredicateFunc: job.MatchJob, @@ -100,6 +98,6 @@ func (r *StatusREST) Get(ctx genericapirequest.Context, name string, options *me } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/batch/job/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/batch/job/strategy.go index b3f6ae33005b..aecf38a36d9f 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/batch/job/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/batch/job/strategy.go @@ -30,7 +30,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/pod" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/batch/validation" @@ -43,11 +43,11 @@ type jobStrategy struct { } // Strategy is the default logic that applies when creating and updating Replication Controller objects. -var Strategy = jobStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = jobStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} // DefaultGarbageCollectionPolicy returns Orphan because that was the default // behavior before the server-side garbage collection was implemented. -func (jobStrategy) DefaultGarbageCollectionPolicy() rest.GarbageCollectionPolicy { +func (jobStrategy) DefaultGarbageCollectionPolicy(ctx genericapirequest.Context) rest.GarbageCollectionPolicy { return rest.OrphanDependents } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/batch/rest/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/batch/rest/BUILD index 8db1c1f76410..16011acb05b5 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/batch/rest/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/batch/rest/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["storage_batch.go"], + importpath = "k8s.io/kubernetes/pkg/registry/batch/rest", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/registry/batch/cronjob/storage:go_default_library", "//pkg/registry/batch/job/storage:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/batch/rest/storage_batch.go b/vendor/k8s.io/kubernetes/pkg/registry/batch/rest/storage_batch.go index 6cfd8f4a6451..93c453de38d5 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/batch/rest/storage_batch.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/batch/rest/storage_batch.go @@ -24,7 +24,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" serverstorage "k8s.io/apiserver/pkg/server/storage" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/batch" cronjobstore "k8s.io/kubernetes/pkg/registry/batch/cronjob/storage" jobstore "k8s.io/kubernetes/pkg/registry/batch/job/storage" @@ -33,7 +33,7 @@ import ( type RESTStorageProvider struct{} func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) { - apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(batch.GroupName, api.Registry, api.Scheme, api.ParameterCodec, api.Codecs) + apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(batch.GroupName, legacyscheme.Registry, legacyscheme.Scheme, legacyscheme.ParameterCodec, legacyscheme.Codecs) // If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities. // TODO refactor the plumbing to provide the information in the APIGroupInfo diff --git a/vendor/k8s.io/kubernetes/pkg/registry/cachesize/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/cachesize/BUILD index 6fc1a9e06a9e..5ff0aab3da16 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/cachesize/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/cachesize/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["cachesize.go"], + importpath = "k8s.io/kubernetes/pkg/registry/cachesize", deps = ["//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/BUILD index 664b1617c9d7..91af9ed73cd1 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/BUILD @@ -13,8 +13,9 @@ go_library( "registry.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/certificates/certificates", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/certificates:go_default_library", "//pkg/apis/certificates/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", @@ -31,6 +32,7 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/certificates/certificates", library = ":go_default_library", deps = [ "//pkg/apis/certificates:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/registry.go index bff6e9dfb890..d3eea018bcea 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/registry.go @@ -22,15 +22,14 @@ import ( "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/certificates" ) // Registry is an interface for things that know how to store CSRs. type Registry interface { ListCSRs(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*certificates.CertificateSigningRequestList, error) - CreateCSR(ctx genericapirequest.Context, csr *certificates.CertificateSigningRequest) error - UpdateCSR(ctx genericapirequest.Context, csr *certificates.CertificateSigningRequest) error + CreateCSR(ctx genericapirequest.Context, csr *certificates.CertificateSigningRequest, createValidation rest.ValidateObjectFunc) error + UpdateCSR(ctx genericapirequest.Context, csr *certificates.CertificateSigningRequest, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error GetCSR(ctx genericapirequest.Context, csrID string, options *metav1.GetOptions) (*certificates.CertificateSigningRequest, error) DeleteCSR(ctx genericapirequest.Context, csrID string) error WatchCSRs(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) @@ -56,13 +55,13 @@ func (s *storage) ListCSRs(ctx genericapirequest.Context, options *metainternalv return obj.(*certificates.CertificateSigningRequestList), nil } -func (s *storage) CreateCSR(ctx genericapirequest.Context, csr *certificates.CertificateSigningRequest) error { - _, err := s.Create(ctx, csr, false) +func (s *storage) CreateCSR(ctx genericapirequest.Context, csr *certificates.CertificateSigningRequest, createValidation rest.ValidateObjectFunc) error { + _, err := s.Create(ctx, csr, createValidation, false) return err } -func (s *storage) UpdateCSR(ctx genericapirequest.Context, csr *certificates.CertificateSigningRequest) error { - _, _, err := s.Update(ctx, csr.Name, rest.DefaultUpdatedObjectInfo(csr, api.Scheme)) +func (s *storage) UpdateCSR(ctx genericapirequest.Context, csr *certificates.CertificateSigningRequest, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error { + _, _, err := s.Update(ctx, csr.Name, rest.DefaultUpdatedObjectInfo(csr), createValidation, updateValidation) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/storage/BUILD index 5941a64aa5cb..0ee0023203cf 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/storage/BUILD @@ -8,8 +8,8 @@ load( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/certificates/certificates/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/certificates:go_default_library", "//pkg/registry/certificates/certificates:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/storage/storage.go index 6d8a9a5b5ef8..4f69ac911894 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/storage/storage.go @@ -22,7 +22,6 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/certificates" csrregistry "k8s.io/kubernetes/pkg/registry/certificates/certificates" ) @@ -35,7 +34,6 @@ type REST struct { // NewREST returns a registry which will store CertificateSigningRequest in the given helper func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, *ApprovalREST) { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &certificates.CertificateSigningRequest{} }, NewListFunc: func() runtime.Object { return &certificates.CertificateSigningRequestList{} }, DefaultQualifiedResource: certificates.Resource("certificatesigningrequests"), @@ -80,8 +78,8 @@ func (r *StatusREST) New() runtime.Object { } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } // ApprovalREST implements the REST endpoint for changing the approval state of a CSR. @@ -94,6 +92,6 @@ func (r *ApprovalREST) New() runtime.Object { } // Update alters the approval subset of an object. -func (r *ApprovalREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *ApprovalREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/strategy.go index 8ef65a2c01ae..eff6a8965eaa 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/strategy.go @@ -19,11 +19,12 @@ package certificates import ( "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/certificates" "k8s.io/kubernetes/pkg/apis/certificates/validation" ) @@ -36,7 +37,7 @@ type csrStrategy struct { // csrStrategy is the default logic that applies when creating and updating // CSR objects. -var Strategy = csrStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = csrStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} // NamespaceScoped is true for CSRs. func (csrStrategy) NamespaceScoped() bool { @@ -142,6 +143,11 @@ func (csrStatusStrategy) PrepareForUpdate(ctx genericapirequest.Context, obj, ol // approval and certificate issuance. newCSR.Spec = oldCSR.Spec newCSR.Status.Conditions = oldCSR.Status.Conditions + for i := range newCSR.Status.Conditions { + if newCSR.Status.Conditions[i].LastUpdateTime.IsZero() { + newCSR.Status.Conditions[i].LastUpdateTime = metav1.Now() + } + } } func (csrStatusStrategy) ValidateUpdate(ctx genericapirequest.Context, obj, old runtime.Object) field.ErrorList { @@ -159,6 +165,9 @@ type csrApprovalStrategy struct { var ApprovalStrategy = csrApprovalStrategy{Strategy} +// PrepareForUpdate prepares the new certificate signing request by limiting +// the data that is updated to only the conditions. Also, if there is no +// existing LastUpdateTime on a condition, the current date/time will be set. func (csrApprovalStrategy) PrepareForUpdate(ctx genericapirequest.Context, obj, old runtime.Object) { newCSR := obj.(*certificates.CertificateSigningRequest) oldCSR := old.(*certificates.CertificateSigningRequest) @@ -166,6 +175,14 @@ func (csrApprovalStrategy) PrepareForUpdate(ctx genericapirequest.Context, obj, // Updating the approval should only update the conditions. newCSR.Spec = oldCSR.Spec oldCSR.Status.Conditions = newCSR.Status.Conditions + for i := range newCSR.Status.Conditions { + // The Conditions are an array of values, some of which may be + // pre-existing and unaltered by this update, so a LastUpdateTime is + // added only if one isn't already set. + if newCSR.Status.Conditions[i].LastUpdateTime.IsZero() { + newCSR.Status.Conditions[i].LastUpdateTime = metav1.Now() + } + } newCSR.Status = oldCSR.Status } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/certificates/rest/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/certificates/rest/BUILD index bce4c0e19907..6ab6c64cf927 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/certificates/rest/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/certificates/rest/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["storage_certificates.go"], + importpath = "k8s.io/kubernetes/pkg/registry/certificates/rest", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/certificates:go_default_library", "//pkg/registry/certificates/certificates/storage:go_default_library", "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/certificates/rest/storage_certificates.go b/vendor/k8s.io/kubernetes/pkg/registry/certificates/rest/storage_certificates.go index aeef7cf707dd..3fe8915b5777 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/certificates/rest/storage_certificates.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/certificates/rest/storage_certificates.go @@ -22,7 +22,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" serverstorage "k8s.io/apiserver/pkg/server/storage" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/certificates" certificatestore "k8s.io/kubernetes/pkg/registry/certificates/certificates/storage" ) @@ -30,7 +30,7 @@ import ( type RESTStorageProvider struct{} func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) { - apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(certificates.GroupName, api.Registry, api.Scheme, api.ParameterCodec, api.Codecs) + apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(certificates.GroupName, legacyscheme.Registry, legacyscheme.Scheme, legacyscheme.ParameterCodec, legacyscheme.Codecs) // If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities. // TODO refactor the plumbing to provide the information in the APIGroupInfo diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/componentstatus/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/componentstatus/BUILD index 626fa2128378..48e5f6c73bf5 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/componentstatus/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/componentstatus/BUILD @@ -13,8 +13,9 @@ go_library( "rest.go", "validator.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/componentstatus", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/probe:go_default_library", "//pkg/probe/http:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", @@ -32,9 +33,10 @@ go_test( "rest_test.go", "validator_test.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/componentstatus", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/probe:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/componentstatus/rest.go b/vendor/k8s.io/kubernetes/pkg/registry/core/componentstatus/rest.go index e0f0e18e4ec4..c67b42ead713 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/componentstatus/rest.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/componentstatus/rest.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/probe" ) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/BUILD index bfde09681bf5..7af8141a14a0 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/BUILD @@ -13,9 +13,11 @@ go_library( "registry.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/configmap", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -30,9 +32,10 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/configmap", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/registry.go index c5e8fcab9397..440300f2dcf8 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/registry.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // Registry is an interface for things that know how to store ConfigMaps. @@ -30,8 +30,8 @@ type Registry interface { ListConfigMaps(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*api.ConfigMapList, error) WatchConfigMaps(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) GetConfigMap(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (*api.ConfigMap, error) - CreateConfigMap(ctx genericapirequest.Context, cfg *api.ConfigMap) (*api.ConfigMap, error) - UpdateConfigMap(ctx genericapirequest.Context, cfg *api.ConfigMap) (*api.ConfigMap, error) + CreateConfigMap(ctx genericapirequest.Context, cfg *api.ConfigMap, createValidation rest.ValidateObjectFunc) (*api.ConfigMap, error) + UpdateConfigMap(ctx genericapirequest.Context, cfg *api.ConfigMap, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (*api.ConfigMap, error) DeleteConfigMap(ctx genericapirequest.Context, name string) error } @@ -68,8 +68,8 @@ func (s *storage) GetConfigMap(ctx genericapirequest.Context, name string, optio return obj.(*api.ConfigMap), nil } -func (s *storage) CreateConfigMap(ctx genericapirequest.Context, cfg *api.ConfigMap) (*api.ConfigMap, error) { - obj, err := s.Create(ctx, cfg, false) +func (s *storage) CreateConfigMap(ctx genericapirequest.Context, cfg *api.ConfigMap, createValidation rest.ValidateObjectFunc) (*api.ConfigMap, error) { + obj, err := s.Create(ctx, cfg, createValidation, false) if err != nil { return nil, err } @@ -77,8 +77,8 @@ func (s *storage) CreateConfigMap(ctx genericapirequest.Context, cfg *api.Config return obj.(*api.ConfigMap), nil } -func (s *storage) UpdateConfigMap(ctx genericapirequest.Context, cfg *api.ConfigMap) (*api.ConfigMap, error) { - obj, _, err := s.Update(ctx, cfg.Name, rest.DefaultUpdatedObjectInfo(cfg, api.Scheme)) +func (s *storage) UpdateConfigMap(ctx genericapirequest.Context, cfg *api.ConfigMap, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (*api.ConfigMap, error) { + obj, _, err := s.Update(ctx, cfg.Name, rest.DefaultUpdatedObjectInfo(cfg), createValidation, updateValidation) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/storage/BUILD index 3dc551c4c96e..948fa36104af 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/storage/BUILD @@ -9,15 +9,17 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/configmap/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) @@ -25,8 +27,9 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/configmap/storage", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/core/configmap:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/storage/storage.go index 0d344650f332..50855e364077 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/storage/storage.go @@ -21,7 +21,7 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/registry/core/configmap" ) @@ -33,7 +33,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work with ConfigMap objects. func NewREST(optsGetter generic.RESTOptionsGetter) *REST { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &api.ConfigMap{} }, NewListFunc: func() runtime.Object { return &api.ConfigMapList{} }, DefaultQualifiedResource: api.Resource("configmaps"), diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/strategy.go index b44da0074231..58f57f4f9cb7 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/strategy.go @@ -22,8 +22,9 @@ import ( genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/validation" ) // strategy implements behavior for ConfigMap objects @@ -34,7 +35,7 @@ type strategy struct { // Strategy is the default logic that applies when creating and updating ConfigMap // objects via the REST API. -var Strategy = strategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = strategy{legacyscheme.Scheme, names.SimpleNameGenerator} // Strategy should implement rest.RESTCreateStrategy var _ rest.RESTCreateStrategy = Strategy diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/BUILD index 7b69243684ef..775b1958394a 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/BUILD @@ -12,10 +12,12 @@ go_library( "registry.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/endpoint", deps = [ - "//pkg/api:go_default_library", "//pkg/api/endpoints:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/registry.go index 8ac57b2a9e08..3cb03c4f9c71 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/registry.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // Registry is an interface for things that know how to store endpoints. @@ -30,7 +30,7 @@ type Registry interface { ListEndpoints(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*api.EndpointsList, error) GetEndpoints(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (*api.Endpoints, error) WatchEndpoints(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) - UpdateEndpoints(ctx genericapirequest.Context, e *api.Endpoints) error + UpdateEndpoints(ctx genericapirequest.Context, e *api.Endpoints, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error DeleteEndpoints(ctx genericapirequest.Context, name string) error } @@ -65,8 +65,8 @@ func (s *storage) GetEndpoints(ctx genericapirequest.Context, name string, optio return obj.(*api.Endpoints), nil } -func (s *storage) UpdateEndpoints(ctx genericapirequest.Context, endpoints *api.Endpoints) error { - _, _, err := s.Update(ctx, endpoints.Name, rest.DefaultUpdatedObjectInfo(endpoints, api.Scheme)) +func (s *storage) UpdateEndpoints(ctx genericapirequest.Context, endpoints *api.Endpoints, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error { + _, _, err := s.Update(ctx, endpoints.Name, rest.DefaultUpdatedObjectInfo(endpoints), createValidation, updateValidation) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/storage/BUILD index 29e0ec5a564f..93b2770d2540 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/storage/BUILD @@ -9,15 +9,17 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/endpoint/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) @@ -25,8 +27,9 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/endpoint/storage", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", "//pkg/printers/storage:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/storage/storage.go index 055c5a755d79..902b7df0b55e 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/storage/storage.go @@ -21,7 +21,7 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/printers" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" printerstorage "k8s.io/kubernetes/pkg/printers/storage" @@ -35,7 +35,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against endpoints. func NewREST(optsGetter generic.RESTOptionsGetter) *REST { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &api.Endpoints{} }, NewListFunc: func() runtime.Object { return &api.EndpointsList{} }, DefaultQualifiedResource: api.Resource("endpoints"), diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/strategy.go index b3a6d4ee78b4..93204e6ca624 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/strategy.go @@ -21,9 +21,10 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" endptspkg "k8s.io/kubernetes/pkg/api/endpoints" - "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/validation" ) // endpointsStrategy implements behavior for Endpoints @@ -34,7 +35,7 @@ type endpointsStrategy struct { // Strategy is the default logic that applies when creating and updating Endpoint // objects via the REST API. -var Strategy = endpointsStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = endpointsStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} // NamespaceScoped is true for endpoints. func (endpointsStrategy) NamespaceScoped() bool { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/event/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/event/BUILD index 543db1db6136..8bdfa1d2d719 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/event/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/event/BUILD @@ -12,9 +12,11 @@ go_library( "doc.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/event", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -30,11 +32,13 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/event", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/event/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/event/storage/BUILD index 7ad8da9f3e06..efb61b4676fb 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/event/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/event/storage/BUILD @@ -9,13 +9,15 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/event/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) @@ -23,8 +25,9 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/event/storage", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/core/event:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/event/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/event/storage/storage.go index 44507d307b99..21fa08b3b739 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/event/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/event/storage/storage.go @@ -21,7 +21,7 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/registry/core/event" ) @@ -42,7 +42,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter, ttl uint64) *REST { opts.Decorator = generic.UndecoratedStorage // TODO use watchCacheSize=-1 to signal UndecoratedStorage store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &api.Event{} }, NewListFunc: func() runtime.Object { return &api.EventList{} }, PredicateFunc: event.MatchEvent, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/event/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/event/strategy.go index e70518af04b1..0ca15dfa684b 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/event/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/event/strategy.go @@ -28,8 +28,9 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/validation" ) type eventStrategy struct { @@ -39,9 +40,9 @@ type eventStrategy struct { // Strategy is the default logic that pplies when creating and updating // Event objects via the REST API. -var Strategy = eventStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = eventStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} -func (eventStrategy) DefaultGarbageCollectionPolicy() rest.GarbageCollectionPolicy { +func (eventStrategy) DefaultGarbageCollectionPolicy(ctx genericapirequest.Context) rest.GarbageCollectionPolicy { return rest.Unsupported } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/BUILD index 3399ff6c1c32..303471f795f7 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/BUILD @@ -11,9 +11,11 @@ go_library( "doc.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/limitrange", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/storage/BUILD index e318bfc91465..6de922d9d2a5 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/storage/BUILD @@ -9,9 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/limitrange/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -19,6 +20,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) @@ -26,8 +28,9 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/limitrange/storage", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/core/limitrange:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/storage/storage.go index 208843b32b04..adb60f252db9 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/storage/storage.go @@ -21,7 +21,7 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/registry/core/limitrange" ) @@ -32,7 +32,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against limitranges. func NewREST(optsGetter generic.RESTOptionsGetter) *REST { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &api.LimitRange{} }, NewListFunc: func() runtime.Object { return &api.LimitRangeList{} }, DefaultQualifiedResource: api.Resource("limitranges"), diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/strategy.go index 25b9f14135aa..b4a55075b982 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/strategy.go @@ -22,8 +22,9 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/validation" ) type limitrangeStrategy struct { @@ -33,7 +34,7 @@ type limitrangeStrategy struct { // Strategy is the default logic that applies when creating and updating // LimitRange objects via the REST API. -var Strategy = limitrangeStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = limitrangeStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} func (limitrangeStrategy) NamespaceScoped() bool { return true diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/BUILD index a01b00ac55d1..d9c43922006a 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/BUILD @@ -13,9 +13,11 @@ go_library( "registry.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/namespace", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", @@ -34,11 +36,13 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/namespace", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/registry.go index e71282515a2c..cbd470d15191 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/registry.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // Registry is an interface implemented by things that know how to store Namespace objects. @@ -30,8 +30,8 @@ type Registry interface { ListNamespaces(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*api.NamespaceList, error) WatchNamespaces(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) GetNamespace(ctx genericapirequest.Context, namespaceID string, options *metav1.GetOptions) (*api.Namespace, error) - CreateNamespace(ctx genericapirequest.Context, namespace *api.Namespace) error - UpdateNamespace(ctx genericapirequest.Context, namespace *api.Namespace) error + CreateNamespace(ctx genericapirequest.Context, namespace *api.Namespace, createValidation rest.ValidateObjectFunc) error + UpdateNamespace(ctx genericapirequest.Context, namespace *api.Namespace, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error DeleteNamespace(ctx genericapirequest.Context, namespaceID string) error } @@ -66,13 +66,13 @@ func (s *storage) GetNamespace(ctx genericapirequest.Context, namespaceName stri return obj.(*api.Namespace), nil } -func (s *storage) CreateNamespace(ctx genericapirequest.Context, namespace *api.Namespace) error { - _, err := s.Create(ctx, namespace, false) +func (s *storage) CreateNamespace(ctx genericapirequest.Context, namespace *api.Namespace, createValidation rest.ValidateObjectFunc) error { + _, err := s.Create(ctx, namespace, createValidation, false) return err } -func (s *storage) UpdateNamespace(ctx genericapirequest.Context, namespace *api.Namespace) error { - _, _, err := s.Update(ctx, namespace.Name, rest.DefaultUpdatedObjectInfo(namespace, api.Scheme)) +func (s *storage) UpdateNamespace(ctx genericapirequest.Context, namespace *api.Namespace, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error { + _, _, err := s.Update(ctx, namespace.Name, rest.DefaultUpdatedObjectInfo(namespace), createValidation, updateValidation) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/storage/BUILD index bf8201732c91..51a01d93ca80 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/storage/BUILD @@ -9,15 +9,18 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/namespace/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) @@ -25,8 +28,9 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/namespace/storage", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/core/namespace:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/storage/storage.go index da389c698d94..faa099a95109 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/storage/storage.go @@ -30,7 +30,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage" storageerr "k8s.io/apiserver/pkg/storage/errors" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/registry/core/namespace" ) @@ -53,7 +53,6 @@ type FinalizeREST struct { // NewREST returns a RESTStorage object that will work against namespaces. func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, *FinalizeREST) { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &api.Namespace{} }, NewListFunc: func() runtime.Object { return &api.NamespaceList{} }, PredicateFunc: namespace.MatchNamespace, @@ -90,12 +89,12 @@ func (r *REST) List(ctx genericapirequest.Context, options *metainternalversion. return r.store.List(ctx, options) } -func (r *REST) Create(ctx genericapirequest.Context, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) { - return r.store.Create(ctx, obj, includeUninitialized) +func (r *REST) Create(ctx genericapirequest.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, includeUninitialized bool) (runtime.Object, error) { + return r.store.Create(ctx, obj, createValidation, includeUninitialized) } -func (r *REST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *REST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } func (r *REST) Get(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (runtime.Object, error) { @@ -220,8 +219,8 @@ func (r *StatusREST) Get(ctx genericapirequest.Context, name string, options *me } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } func (r *FinalizeREST) New() runtime.Object { @@ -229,6 +228,6 @@ func (r *FinalizeREST) New() runtime.Object { } // Update alters the status finalizers subset of an object. -func (r *FinalizeREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *FinalizeREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/strategy.go index e0d05b18f8e5..53a139e5223f 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/strategy.go @@ -27,8 +27,9 @@ import ( "k8s.io/apiserver/pkg/registry/generic" apistorage "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/validation" ) // namespaceStrategy implements behavior for Namespaces @@ -39,7 +40,7 @@ type namespaceStrategy struct { // Strategy is the default logic that applies when creating and updating Namespace // objects via the REST API. -var Strategy = namespaceStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = namespaceStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} // NamespaceScoped is false for namespaces. func (namespaceStrategy) NamespaceScoped() bool { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/node/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/node/BUILD index 40a4110eeace..ad3fb4995df4 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/node/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/node/BUILD @@ -13,9 +13,11 @@ go_library( "registry.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/node", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/client:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -40,11 +42,13 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/node", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/node/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/core/node/registry.go index 1e064fef5100..e4476ded4b9a 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/node/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/node/registry.go @@ -22,14 +22,14 @@ import ( "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // Registry is an interface for things that know how to store node. type Registry interface { ListNodes(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*api.NodeList, error) - CreateNode(ctx genericapirequest.Context, node *api.Node) error - UpdateNode(ctx genericapirequest.Context, node *api.Node) error + CreateNode(ctx genericapirequest.Context, node *api.Node, createValidation rest.ValidateObjectFunc) error + UpdateNode(ctx genericapirequest.Context, node *api.Node, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error GetNode(ctx genericapirequest.Context, nodeID string, options *metav1.GetOptions) (*api.Node, error) DeleteNode(ctx genericapirequest.Context, nodeID string) error WatchNodes(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) @@ -55,13 +55,13 @@ func (s *storage) ListNodes(ctx genericapirequest.Context, options *metainternal return obj.(*api.NodeList), nil } -func (s *storage) CreateNode(ctx genericapirequest.Context, node *api.Node) error { - _, err := s.Create(ctx, node, false) +func (s *storage) CreateNode(ctx genericapirequest.Context, node *api.Node, createValidation rest.ValidateObjectFunc) error { + _, err := s.Create(ctx, node, createValidation, false) return err } -func (s *storage) UpdateNode(ctx genericapirequest.Context, node *api.Node) error { - _, _, err := s.Update(ctx, node.Name, rest.DefaultUpdatedObjectInfo(node, api.Scheme)) +func (s *storage) UpdateNode(ctx genericapirequest.Context, node *api.Node, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error { + _, _, err := s.Update(ctx, node.Name, rest.DefaultUpdatedObjectInfo(node), createValidation, updateValidation) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/node/rest/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/node/rest/BUILD index 98eb8bf8c8ab..1425ee92026d 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/node/rest/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/node/rest/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["proxy.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/node/rest", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/capabilities:go_default_library", "//pkg/kubelet/client:go_default_library", "//pkg/registry/core/node:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/node/rest/proxy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/node/rest/proxy.go index 61356d97b85b..b314617f42e4 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/node/rest/proxy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/node/rest/proxy.go @@ -27,7 +27,7 @@ import ( genericapirequest "k8s.io/apiserver/pkg/endpoints/request" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/kubelet/client" "k8s.io/kubernetes/pkg/registry/core/node" diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/node/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/node/storage/BUILD index dd75eb5e5277..1cb9081d2610 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/node/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/node/storage/BUILD @@ -9,9 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/node/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/kubelet/client:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -20,6 +21,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) @@ -27,9 +29,10 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/node/storage", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/v1:go_default_library", "//pkg/kubelet/client:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/node/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/node/storage/storage.go index 77489a7d5558..a61e575885bc 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/node/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/node/storage/storage.go @@ -28,8 +28,8 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" - k8s_api_v1 "k8s.io/kubernetes/pkg/api/v1" + api "k8s.io/kubernetes/pkg/apis/core" + k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" "k8s.io/kubernetes/pkg/kubelet/client" "k8s.io/kubernetes/pkg/printers" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" @@ -68,14 +68,13 @@ func (r *StatusREST) Get(ctx genericapirequest.Context, name string, options *me } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } // NewStorage returns a NodeStorage object that will work against nodes. func NewStorage(optsGetter generic.RESTOptionsGetter, kubeletClientConfig client.KubeletClientConfig, proxyTransport http.RoundTripper) (*NodeStorage, error) { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &api.Node{} }, NewListFunc: func() runtime.Object { return &api.NodeList{} }, PredicateFunc: node.MatchNode, @@ -113,7 +112,7 @@ func NewStorage(optsGetter generic.RESTOptionsGetter, kubeletClientConfig client } // TODO: Remove the conversion. Consider only return the NodeAddresses externalNode := &v1.Node{} - err = k8s_api_v1.Convert_api_Node_To_v1_Node(node, externalNode, nil) + err = k8s_api_v1.Convert_core_Node_To_v1_Node(node, externalNode, nil) if err != nil { return nil, fmt.Errorf("failed to convert to v1.Node: %v", err) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/node/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/node/strategy.go index 9b959bb7ba63..4653b3f445d2 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/node/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/node/strategy.go @@ -35,8 +35,9 @@ import ( pkgstorage "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/names" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/client" ) @@ -49,7 +50,7 @@ type nodeStrategy struct { // Nodes is the default logic that applies when creating and updating Node // objects. -var Strategy = nodeStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = nodeStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} // NamespaceScoped is false for nodes. func (nodeStrategy) NamespaceScoped() bool { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/BUILD index 8ca7cf31ddf0..b98e547e596c 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/BUILD @@ -12,9 +12,12 @@ go_library( "doc.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/persistentvolume", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/api/persistentvolume:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//pkg/volume/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", @@ -30,11 +33,13 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/persistentvolume", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", + "//pkg/apis/core:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage/BUILD index 3b5b22fc194e..d8cf0d27fdc8 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage/BUILD @@ -9,9 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -22,6 +23,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], @@ -30,8 +32,9 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/core/persistentvolume:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage/storage.go index a24aa391039d..b8f7948f9b6c 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage/storage.go @@ -23,7 +23,7 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/registry/core/persistentvolume" ) @@ -34,7 +34,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against persistent volumes. func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &api.PersistentVolume{} }, NewListFunc: func() runtime.Object { return &api.PersistentVolumeList{} }, PredicateFunc: persistentvolume.MatchPersistentVolumes, @@ -79,6 +78,6 @@ func (r *StatusREST) Get(ctx genericapirequest.Context, name string, options *me } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/strategy.go index 33dfd905f752..477bca2bad5b 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/strategy.go @@ -27,8 +27,10 @@ import ( "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/api/legacyscheme" + pvutil "k8s.io/kubernetes/pkg/api/persistentvolume" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/validation" volumevalidation "k8s.io/kubernetes/pkg/volume/validation" ) @@ -40,7 +42,7 @@ type persistentvolumeStrategy struct { // Strategy is the default logic that applies when creating and updating PersistentVolume // objects via the REST API. -var Strategy = persistentvolumeStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = persistentvolumeStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} func (persistentvolumeStrategy) NamespaceScoped() bool { return false @@ -50,6 +52,8 @@ func (persistentvolumeStrategy) NamespaceScoped() bool { func (persistentvolumeStrategy) PrepareForCreate(ctx genericapirequest.Context, obj runtime.Object) { pv := obj.(*api.PersistentVolume) pv.Status = api.PersistentVolumeStatus{} + + pvutil.DropDisabledAlphaFields(&pv.Spec) } func (persistentvolumeStrategy) Validate(ctx genericapirequest.Context, obj runtime.Object) field.ErrorList { @@ -71,6 +75,9 @@ func (persistentvolumeStrategy) PrepareForUpdate(ctx genericapirequest.Context, newPv := obj.(*api.PersistentVolume) oldPv := old.(*api.PersistentVolume) newPv.Status = oldPv.Status + + pvutil.DropDisabledAlphaFields(&newPv.Spec) + pvutil.DropDisabledAlphaFields(&oldPv.Spec) } func (persistentvolumeStrategy) ValidateUpdate(ctx genericapirequest.Context, obj, old runtime.Object) field.ErrorList { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/BUILD index 156985bb5ee2..6da3f612b1ab 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/BUILD @@ -12,9 +12,12 @@ go_library( "doc.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/api/persistentvolumeclaim:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -29,11 +32,13 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", + "//pkg/apis/core:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage/BUILD index 2b6160a697cd..0ef87b1507ce 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage/BUILD @@ -9,9 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -22,6 +23,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], @@ -30,8 +32,9 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/core/persistentvolumeclaim:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage/storage.go index 78c6a1e04111..c361d59a47ad 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage/storage.go @@ -23,7 +23,7 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim" ) @@ -34,7 +34,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against persistent volume claims. func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &api.PersistentVolumeClaim{} }, NewListFunc: func() runtime.Object { return &api.PersistentVolumeClaimList{} }, PredicateFunc: persistentvolumeclaim.MatchPersistentVolumeClaim, @@ -79,6 +78,6 @@ func (r *StatusREST) Get(ctx genericapirequest.Context, name string, options *me } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/strategy.go index c653d505c8e1..1a63e1faea81 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/strategy.go @@ -27,8 +27,10 @@ import ( "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/api/legacyscheme" + pvcutil "k8s.io/kubernetes/pkg/api/persistentvolumeclaim" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/validation" ) // persistentvolumeclaimStrategy implements behavior for PersistentVolumeClaim objects @@ -39,7 +41,7 @@ type persistentvolumeclaimStrategy struct { // Strategy is the default logic that applies when creating and updating PersistentVolumeClaim // objects via the REST API. -var Strategy = persistentvolumeclaimStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = persistentvolumeclaimStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} func (persistentvolumeclaimStrategy) NamespaceScoped() bool { return true @@ -47,8 +49,10 @@ func (persistentvolumeclaimStrategy) NamespaceScoped() bool { // PrepareForCreate clears the Status field which is not allowed to be set by end users on creation. func (persistentvolumeclaimStrategy) PrepareForCreate(ctx genericapirequest.Context, obj runtime.Object) { - pv := obj.(*api.PersistentVolumeClaim) - pv.Status = api.PersistentVolumeClaimStatus{} + pvc := obj.(*api.PersistentVolumeClaim) + pvc.Status = api.PersistentVolumeClaimStatus{} + + pvcutil.DropDisabledAlphaFields(&pvc.Spec) } func (persistentvolumeclaimStrategy) Validate(ctx genericapirequest.Context, obj runtime.Object) field.ErrorList { @@ -69,6 +73,9 @@ func (persistentvolumeclaimStrategy) PrepareForUpdate(ctx genericapirequest.Cont newPvc := obj.(*api.PersistentVolumeClaim) oldPvc := old.(*api.PersistentVolumeClaim) newPvc.Status = oldPvc.Status + + pvcutil.DropDisabledAlphaFields(&newPvc.Spec) + pvcutil.DropDisabledAlphaFields(&oldPvc.Spec) } func (persistentvolumeclaimStrategy) ValidateUpdate(ctx genericapirequest.Context, obj, old runtime.Object) field.ErrorList { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/BUILD index af48252b2788..cf35e0ba5bb1 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/BUILD @@ -12,11 +12,13 @@ go_library( "doc.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/pod", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper/qos:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/pod:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper/qos:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//pkg/kubelet/client:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", @@ -40,11 +42,13 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/pod", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/kubelet/client:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/rest/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/rest/BUILD index 3f9467129d38..122911086bc5 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/rest/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/rest/BUILD @@ -12,9 +12,10 @@ go_library( "log.go", "subresources.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/pod/rest", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//pkg/capabilities:go_default_library", "//pkg/kubelet/client:go_default_library", "//pkg/registry/core/pod:go_default_library", @@ -34,9 +35,10 @@ go_library( go_test( name = "go_default_test", srcs = ["log_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/pod/rest", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/rest/log.go b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/rest/log.go index cb637257a442..e33f2a7fe93a 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/rest/log.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/rest/log.go @@ -25,8 +25,8 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" genericrest "k8s.io/apiserver/pkg/registry/generic/rest" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/validation" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/kubelet/client" "k8s.io/kubernetes/pkg/registry/core/pod" ) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/rest/subresources.go b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/rest/subresources.go index b152688a101f..2df827976836 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/rest/subresources.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/rest/subresources.go @@ -29,7 +29,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/kubelet/client" "k8s.io/kubernetes/pkg/registry/core/pod" diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/BUILD index d5c04154d588..8545fae94453 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/BUILD @@ -9,9 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/pod/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//pkg/securitycontext:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", @@ -27,6 +28,7 @@ go_test( "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/features:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/errors:go_default_library", @@ -42,10 +44,11 @@ go_library( "eviction.go", "storage.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/pod/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/api/pod:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//pkg/apis/policy:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/policy/internalversion:go_default_library", "//pkg/kubelet/client:go_default_library", @@ -58,6 +61,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/eviction.go b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/eviction.go index 36b97350ffc9..ff03d96dd5e5 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/eviction.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/eviction.go @@ -24,12 +24,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/client-go/util/retry" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/policy" policyclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion" ) @@ -64,6 +65,12 @@ type EvictionREST struct { } var _ = rest.Creater(&EvictionREST{}) +var _ = rest.GroupVersionKindProvider(&EvictionREST{}) + +// GroupVersionKind specifies a particular GroupVersionKind to discovery +func (r *EvictionREST) GroupVersionKind(containingGV schema.GroupVersion) schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "Eviction"} +} // New creates a new eviction resource func (r *EvictionREST) New() runtime.Object { @@ -71,7 +78,7 @@ func (r *EvictionREST) New() runtime.Object { } // Create attempts to create a new eviction. That is, it tries to evict a pod. -func (r *EvictionREST) Create(ctx genericapirequest.Context, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) { +func (r *EvictionREST) Create(ctx genericapirequest.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, includeUninitialized bool) (runtime.Object, error) { eviction := obj.(*policy.Eviction) obj, err := r.store.Get(ctx, eviction.Name, &metav1.GetOptions{}) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/storage.go index 6cb38101d216..129fd1a43d0c 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/storage.go @@ -30,9 +30,9 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage" storeerr "k8s.io/apiserver/pkg/storage/errors" - "k8s.io/kubernetes/pkg/api" podutil "k8s.io/kubernetes/pkg/api/pod" - "k8s.io/kubernetes/pkg/api/validation" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/validation" policyclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion" "k8s.io/kubernetes/pkg/kubelet/client" "k8s.io/kubernetes/pkg/printers" @@ -65,7 +65,6 @@ type REST struct { func NewStorage(optsGetter generic.RESTOptionsGetter, k client.ConnectionInfoGetter, proxyTransport http.RoundTripper, podDisruptionBudgetClient policyclient.PodDisruptionBudgetsGetter) PodStorage { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &api.Pod{} }, NewListFunc: func() runtime.Object { return &api.PodList{} }, PredicateFunc: pod.MatchPod, @@ -136,7 +135,7 @@ func (r *BindingREST) New() runtime.Object { var _ = rest.Creater(&BindingREST{}) // Create ensures a pod is bound to a specific host. -func (r *BindingREST) Create(ctx genericapirequest.Context, obj runtime.Object, includeUninitialized bool) (out runtime.Object, err error) { +func (r *BindingREST) Create(ctx genericapirequest.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, includeUninitialized bool) (out runtime.Object, err error) { binding := obj.(*api.Binding) // TODO: move me to a binding strategy @@ -213,6 +212,6 @@ func (r *StatusREST) Get(ctx genericapirequest.Context, name string, options *me } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/strategy.go index bb6e0afb10fc..5cd0eb03cf73 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/strategy.go @@ -41,10 +41,11 @@ import ( "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/names" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper/qos" + "k8s.io/kubernetes/pkg/api/legacyscheme" podutil "k8s.io/kubernetes/pkg/api/pod" - "k8s.io/kubernetes/pkg/api/validation" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper/qos" + "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/kubelet/client" ) @@ -56,7 +57,7 @@ type podStrategy struct { // Strategy is the default logic that applies when creating and updating Pod // objects via the REST API. -var Strategy = podStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = podStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} // NamespaceScoped is true for pods. func (podStrategy) NamespaceScoped() bool { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/BUILD index 9f13517597c9..5841503ea288 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/BUILD @@ -11,10 +11,12 @@ go_library( "doc.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/podtemplate", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/pod:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/storage/BUILD index 5a849640aaed..103296b4a236 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/storage/BUILD @@ -9,15 +9,17 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/podtemplate/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) @@ -25,8 +27,9 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/podtemplate/storage", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", "//pkg/printers/storage:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/storage/storage.go index b28a450360f2..1767186b999f 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/storage/storage.go @@ -20,7 +20,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/printers" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" printerstorage "k8s.io/kubernetes/pkg/printers/storage" @@ -34,7 +34,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against pod templates. func NewREST(optsGetter generic.RESTOptionsGetter) *REST { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &api.PodTemplate{} }, NewListFunc: func() runtime.Object { return &api.PodTemplateList{} }, DefaultQualifiedResource: api.Resource("podtemplates"), diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/strategy.go index 30ff5995509b..0f19250d97fd 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/strategy.go @@ -21,9 +21,10 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/pod" - "k8s.io/kubernetes/pkg/api/validation" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/validation" ) // podTemplateStrategy implements behavior for PodTemplates @@ -34,7 +35,7 @@ type podTemplateStrategy struct { // Strategy is the default logic that applies when creating and updating PodTemplate // objects via the REST API. -var Strategy = podTemplateStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = podTemplateStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} // NamespaceScoped is true for pod templates. func (podTemplateStrategy) NamespaceScoped() bool { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/rangeallocation/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/rangeallocation/BUILD index bf1d577e3a75..8b55dd09d809 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/rangeallocation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/rangeallocation/BUILD @@ -11,7 +11,8 @@ go_library( "doc.go", "registry.go", ], - deps = ["//pkg/api:go_default_library"], + importpath = "k8s.io/kubernetes/pkg/registry/core/rangeallocation", + deps = ["//pkg/apis/core:go_default_library"], ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/rangeallocation/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/core/rangeallocation/registry.go index 5fd3aa3af777..d67b52f86f7e 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/rangeallocation/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/rangeallocation/registry.go @@ -17,7 +17,7 @@ limitations under the License. package rangeallocation import ( - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // RangeRegistry is a registry that can retrieve or persist a RangeAllocation object. diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/BUILD index cbe1d65d1817..931c1bb2877f 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/BUILD @@ -13,11 +13,13 @@ go_library( "registry.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/replicationcontroller", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/pod:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -37,11 +39,13 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/replicationcontroller", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/registry.go index df0d7418d02c..199981db0974 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/registry.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // Registry is an interface for things that know how to store ReplicationControllers. @@ -34,8 +34,8 @@ type Registry interface { ListControllers(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*api.ReplicationControllerList, error) WatchControllers(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) GetController(ctx genericapirequest.Context, controllerID string, options *metav1.GetOptions) (*api.ReplicationController, error) - CreateController(ctx genericapirequest.Context, controller *api.ReplicationController) (*api.ReplicationController, error) - UpdateController(ctx genericapirequest.Context, controller *api.ReplicationController) (*api.ReplicationController, error) + CreateController(ctx genericapirequest.Context, controller *api.ReplicationController, createValidation rest.ValidateObjectFunc) (*api.ReplicationController, error) + UpdateController(ctx genericapirequest.Context, controller *api.ReplicationController, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (*api.ReplicationController, error) DeleteController(ctx genericapirequest.Context, controllerID string) error } @@ -73,16 +73,16 @@ func (s *storage) GetController(ctx genericapirequest.Context, controllerID stri return obj.(*api.ReplicationController), nil } -func (s *storage) CreateController(ctx genericapirequest.Context, controller *api.ReplicationController) (*api.ReplicationController, error) { - obj, err := s.Create(ctx, controller, false) +func (s *storage) CreateController(ctx genericapirequest.Context, controller *api.ReplicationController, createValidation rest.ValidateObjectFunc) (*api.ReplicationController, error) { + obj, err := s.Create(ctx, controller, createValidation, false) if err != nil { return nil, err } return obj.(*api.ReplicationController), nil } -func (s *storage) UpdateController(ctx genericapirequest.Context, controller *api.ReplicationController) (*api.ReplicationController, error) { - obj, _, err := s.Update(ctx, controller.Name, rest.DefaultUpdatedObjectInfo(controller, api.Scheme)) +func (s *storage) UpdateController(ctx genericapirequest.Context, controller *api.ReplicationController, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (*api.ReplicationController, error) { + obj, _, err := s.Update(ctx, controller.Name, rest.DefaultUpdatedObjectInfo(controller), createValidation, updateValidation) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage/BUILD index 2df6d23af29e..d5177a1ff909 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage/BUILD @@ -9,10 +9,11 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -23,6 +24,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], @@ -31,10 +33,13 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/autoscaling/v1:go_default_library", "//pkg/apis/autoscaling/validation:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/extensions/v1beta1:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", "//pkg/printers/storage:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage/storage.go index d60cb82dcf7f..ead95842d96e 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage/storage.go @@ -30,9 +30,11 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/autoscaling" + autoscalingv1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1" "k8s.io/kubernetes/pkg/apis/autoscaling/validation" + api "k8s.io/kubernetes/pkg/apis/core" + extensionsv1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/printers" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" printerstorage "k8s.io/kubernetes/pkg/printers/storage" @@ -64,7 +66,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against replication controllers. func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &api.ReplicationController{} }, NewListFunc: func() runtime.Object { return &api.ReplicationControllerList{} }, PredicateFunc: replicationcontroller.MatchController, @@ -118,8 +119,8 @@ func (r *StatusREST) Get(ctx genericapirequest.Context, name string, options *me } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } type ScaleREST struct { @@ -130,8 +131,13 @@ type ScaleREST struct { var _ = rest.Patcher(&ScaleREST{}) var _ = rest.GroupVersionKindProvider(&ScaleREST{}) -func (r *ScaleREST) GroupVersionKind() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "autoscaling", Version: "v1", Kind: "Scale"} +func (r *ScaleREST) GroupVersionKind(containingGV schema.GroupVersion) schema.GroupVersionKind { + switch containingGV { + case extensionsv1beta1.SchemeGroupVersion: + return extensionsv1beta1.SchemeGroupVersion.WithKind("Scale") + default: + return autoscalingv1.SchemeGroupVersion.WithKind("Scale") + } } // New creates a new Scale object @@ -147,13 +153,14 @@ func (r *ScaleREST) Get(ctx genericapirequest.Context, name string, options *met return scaleFromRC(rc), nil } -func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { +func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { rc, err := r.registry.GetController(ctx, name, &metav1.GetOptions{}) if err != nil { return nil, false, errors.NewNotFound(autoscaling.Resource("replicationcontrollers/scale"), name) } oldScale := scaleFromRC(rc) + // TODO: should this pass validation? obj, err := objInfo.UpdatedObject(ctx, oldScale) if err != nil { return nil, false, err @@ -173,7 +180,7 @@ func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo r rc.Spec.Replicas = scale.Spec.Replicas rc.ResourceVersion = scale.ResourceVersion - rc, err = r.registry.UpdateController(ctx, rc) + rc, err = r.registry.UpdateController(ctx, rc, createValidation, updateValidation) if err != nil { return nil, false, err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/strategy.go index 2047afb6d3cf..ede6e6724817 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/strategy.go @@ -33,10 +33,11 @@ import ( "k8s.io/apiserver/pkg/registry/rest" apistorage "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/pod" - "k8s.io/kubernetes/pkg/api/validation" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" + "k8s.io/kubernetes/pkg/apis/core/validation" ) // rcStrategy implements verification logic for Replication Controllers. @@ -46,11 +47,11 @@ type rcStrategy struct { } // Strategy is the default logic that applies when creating and updating Replication Controller objects. -var Strategy = rcStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = rcStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} // DefaultGarbageCollectionPolicy returns Orphan because that was the default // behavior before the server-side garbage collection was implemented. -func (rcStrategy) DefaultGarbageCollectionPolicy() rest.GarbageCollectionPolicy { +func (rcStrategy) DefaultGarbageCollectionPolicy(ctx genericapirequest.Context) rest.GarbageCollectionPolicy { return rest.OrphanDependents } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/BUILD index a7d75810c4be..77150f2f29b1 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/BUILD @@ -12,9 +12,11 @@ go_library( "doc.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/resourcequota", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", @@ -25,9 +27,10 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/resourcequota", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/storage/BUILD index 0afd453990e9..38b63c3b8ab4 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/storage/BUILD @@ -9,9 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/resourcequota/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -21,6 +22,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], @@ -29,8 +31,9 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/resourcequota/storage", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/core/resourcequota:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/storage/storage.go index 11cdb8e50c68..391cb17f067f 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/storage/storage.go @@ -23,7 +23,7 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/registry/core/resourcequota" ) @@ -34,7 +34,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against resource quotas. func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &api.ResourceQuota{} }, NewListFunc: func() runtime.Object { return &api.ResourceQuotaList{} }, DefaultQualifiedResource: api.Resource("resourcequotas"), @@ -78,6 +77,6 @@ func (r *StatusREST) Get(ctx genericapirequest.Context, name string, options *me } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/strategy.go index e9c26c009caa..eecfc59d9366 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/strategy.go @@ -21,8 +21,9 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/validation" ) // resourcequotaStrategy implements behavior for ResourceQuota objects @@ -33,7 +34,7 @@ type resourcequotaStrategy struct { // Strategy is the default logic that applies when creating and updating ResourceQuota // objects via the REST API. -var Strategy = resourcequotaStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = resourcequotaStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} // NamespaceScoped is true for resourcequotas. func (resourcequotaStrategy) NamespaceScoped() bool { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/rest/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/rest/BUILD index d18bc4989b59..9e81ccda7793 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/rest/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/rest/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["storage_core_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/rest", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", @@ -20,8 +21,10 @@ go_test( go_library( name = "go_default_library", srcs = ["storage_core.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/rest", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/policy/internalversion:go_default_library", "//pkg/kubelet/client:go_default_library", "//pkg/master/ports:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/rest/storage_core.go b/vendor/k8s.io/kubernetes/pkg/registry/core/rest/storage_core.go index e46a7cf33304..ad7a9a91ade7 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/rest/storage_core.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/rest/storage_core.go @@ -35,7 +35,8 @@ import ( serverstorage "k8s.io/apiserver/pkg/server/storage" etcdutil "k8s.io/apiserver/pkg/storage/etcd/util" restclient "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" policyclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion" kubeletclient "k8s.io/kubernetes/pkg/kubelet/client" "k8s.io/kubernetes/pkg/master/ports" @@ -90,21 +91,15 @@ type LegacyRESTStorage struct { func (c LegacyRESTStorageProvider) NewLegacyRESTStorage(restOptionsGetter generic.RESTOptionsGetter) (LegacyRESTStorage, genericapiserver.APIGroupInfo, error) { apiGroupInfo := genericapiserver.APIGroupInfo{ - GroupMeta: *api.Registry.GroupOrDie(api.GroupName), + GroupMeta: *legacyscheme.Registry.GroupOrDie(api.GroupName), VersionedResourcesStorageMap: map[string]map[string]rest.Storage{}, - Scheme: api.Scheme, - ParameterCodec: api.ParameterCodec, - NegotiatedSerializer: api.Codecs, - SubresourceGroupVersionKind: map[string]schema.GroupVersionKind{}, - } - if autoscalingGroupVersion := (schema.GroupVersion{Group: "autoscaling", Version: "v1"}); api.Registry.IsEnabledVersion(autoscalingGroupVersion) { - apiGroupInfo.SubresourceGroupVersionKind["replicationcontrollers/scale"] = autoscalingGroupVersion.WithKind("Scale") + Scheme: legacyscheme.Scheme, + ParameterCodec: legacyscheme.ParameterCodec, + NegotiatedSerializer: legacyscheme.Codecs, } var podDisruptionClient policyclient.PodDisruptionBudgetsGetter - if policyGroupVersion := (schema.GroupVersion{Group: "policy", Version: "v1beta1"}); api.Registry.IsEnabledVersion(policyGroupVersion) { - apiGroupInfo.SubresourceGroupVersionKind["pods/eviction"] = policyGroupVersion.WithKind("Eviction") - + if policyGroupVersion := (schema.GroupVersion{Group: "policy", Version: "v1beta1"}); legacyscheme.Registry.IsEnabledVersion(policyGroupVersion) { var err error podDisruptionClient, err = policyclient.NewForConfig(c.LoopbackClientConfig) if err != nil { @@ -223,10 +218,10 @@ func (c LegacyRESTStorageProvider) NewLegacyRESTStorage(restOptionsGetter generi "componentStatuses": componentstatus.NewStorage(componentStatusStorage{c.StorageFactory}.serversToValidate), } - if api.Registry.IsEnabledVersion(schema.GroupVersion{Group: "autoscaling", Version: "v1"}) { + if legacyscheme.Registry.IsEnabledVersion(schema.GroupVersion{Group: "autoscaling", Version: "v1"}) { restStorageMap["replicationControllers/scale"] = controllerStorage.Scale } - if api.Registry.IsEnabledVersion(schema.GroupVersion{Group: "policy", Version: "v1beta1"}) { + if legacyscheme.Registry.IsEnabledVersion(schema.GroupVersion{Group: "policy", Version: "v1beta1"}) { restStorageMap["pods/eviction"] = podStorage.Eviction } apiGroupInfo.VersionedResourcesStorageMap["v1"] = restStorageMap diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/secret/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/secret/BUILD index ade27419594b..48332003ed0d 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/secret/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/secret/BUILD @@ -13,9 +13,11 @@ go_library( "registry.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/secret", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -35,11 +37,13 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/secret", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/secret/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/core/secret/registry.go index cfe8d98569ed..8d214d4f1fe8 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/secret/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/secret/registry.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // Registry is an interface implemented by things that know how to store Secret objects. @@ -30,8 +30,8 @@ type Registry interface { ListSecrets(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*api.SecretList, error) WatchSecrets(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) GetSecret(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (*api.Secret, error) - CreateSecret(ctx genericapirequest.Context, Secret *api.Secret) (*api.Secret, error) - UpdateSecret(ctx genericapirequest.Context, Secret *api.Secret) (*api.Secret, error) + CreateSecret(ctx genericapirequest.Context, Secret *api.Secret, createValidation rest.ValidateObjectFunc) (*api.Secret, error) + UpdateSecret(ctx genericapirequest.Context, Secret *api.Secret, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (*api.Secret, error) DeleteSecret(ctx genericapirequest.Context, name string) error } @@ -66,13 +66,13 @@ func (s *storage) GetSecret(ctx genericapirequest.Context, name string, options return obj.(*api.Secret), nil } -func (s *storage) CreateSecret(ctx genericapirequest.Context, secret *api.Secret) (*api.Secret, error) { - obj, err := s.Create(ctx, secret, false) +func (s *storage) CreateSecret(ctx genericapirequest.Context, secret *api.Secret, createValidation rest.ValidateObjectFunc) (*api.Secret, error) { + obj, err := s.Create(ctx, secret, createValidation, false) return obj.(*api.Secret), err } -func (s *storage) UpdateSecret(ctx genericapirequest.Context, secret *api.Secret) (*api.Secret, error) { - obj, _, err := s.Update(ctx, secret.Name, rest.DefaultUpdatedObjectInfo(secret, api.Scheme)) +func (s *storage) UpdateSecret(ctx genericapirequest.Context, secret *api.Secret, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (*api.Secret, error) { + obj, _, err := s.Update(ctx, secret.Name, rest.DefaultUpdatedObjectInfo(secret), createValidation, updateValidation) return obj.(*api.Secret), err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/secret/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/secret/storage/BUILD index 0d4bdcf0ea04..8e73a457f1c7 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/secret/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/secret/storage/BUILD @@ -9,15 +9,17 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/secret/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) @@ -25,8 +27,9 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/secret/storage", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/core/secret:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/secret/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/secret/storage/storage.go index c6b7b1de582d..13600daf0560 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/secret/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/secret/storage/storage.go @@ -20,7 +20,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/registry/core/secret" ) @@ -31,7 +31,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against secrets. func NewREST(optsGetter generic.RESTOptionsGetter) *REST { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &api.Secret{} }, NewListFunc: func() runtime.Object { return &api.SecretList{} }, PredicateFunc: secret.Matcher, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/secret/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/secret/strategy.go index 59150c6402dc..fbdb05572de1 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/secret/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/secret/strategy.go @@ -29,8 +29,9 @@ import ( "k8s.io/apiserver/pkg/registry/rest" apistorage "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/validation" ) // strategy implements behavior for Secret objects @@ -41,7 +42,7 @@ type strategy struct { // Strategy is the default logic that applies when creating and updating Secret // objects via the REST API. -var Strategy = strategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = strategy{legacyscheme.Scheme, names.SimpleNameGenerator} var _ = rest.RESTCreateStrategy(Strategy) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/service/BUILD index c6375b665dae..ca6517abf272 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/BUILD @@ -15,11 +15,13 @@ go_library( "rest.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/service", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/service:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//pkg/capabilities:go_default_library", "//pkg/features:go_default_library", "//pkg/registry/core/endpoint:go_default_library", @@ -48,11 +50,12 @@ go_test( "rest_test.go", "strategy_test.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/service", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", "//pkg/api/service:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//pkg/features:go_default_library", "//pkg/registry/core/service/ipallocator:go_default_library", "//pkg/registry/core/service/portallocator:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/BUILD index f397ec4b6d7d..ef863f8c3a25 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/BUILD @@ -13,6 +13,7 @@ go_library( "interfaces.go", "utils.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/service/allocator", ) go_test( @@ -21,6 +22,7 @@ go_test( "bitmap_test.go", "utils_test.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/service/allocator", library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/storage/BUILD index d24f9872c36b..18a175e63bbb 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/storage/BUILD @@ -9,9 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/service/allocator/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/core/service/allocator:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", @@ -23,8 +24,9 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/service/allocator/storage", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/core/rangeallocation:go_default_library", "//pkg/registry/core/service/allocator:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", @@ -32,6 +34,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/errors:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/storage/storage.go index 7c4d539718a1..03f28431ada1 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/allocator/storage/storage.go @@ -25,10 +25,11 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/registry/generic" + "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/storage" storeerr "k8s.io/apiserver/pkg/storage/errors" "k8s.io/apiserver/pkg/storage/storagebackend" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/registry/core/rangeallocation" "k8s.io/kubernetes/pkg/registry/core/service/allocator" @@ -61,7 +62,13 @@ var _ rangeallocation.RangeRegistry = &Etcd{} // NewEtcd returns an allocator that is backed by Etcd and can manage // persisting the snapshot state of allocation after each allocation is made. func NewEtcd(alloc allocator.Snapshottable, baseKey string, resource schema.GroupResource, config *storagebackend.Config) *Etcd { - storage, _ := generic.NewRawStorage(config) + storage, d := generic.NewRawStorage(config) + + // TODO : Remove RegisterStorageCleanup below when PR + // https://github.com/kubernetes/kubernetes/pull/50690 + // merges as that shuts down storage properly + registry.RegisterStorageCleanup(d) + return &Etcd{ alloc: alloc, storage: storage, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/BUILD index eea401e7bfe2..17b5d4925524 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/BUILD @@ -9,8 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["allocator.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/service/ipallocator", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/core/service/allocator:go_default_library", ], ) @@ -18,9 +19,10 @@ go_library( go_test( name = "go_default_test", srcs = ["allocator_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/service/ipallocator", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/allocator.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/allocator.go index dd206eb1d008..b480418c2f30 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/allocator.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/allocator.go @@ -19,11 +19,10 @@ package ipallocator import ( "errors" "fmt" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/registry/core/service/allocator" "math/big" "net" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/registry/core/service/allocator" ) // Interface manages the allocation of IP addresses out of a range. Interface @@ -263,8 +262,8 @@ func calculateIPOffset(base *big.Int, ip net.IP) int { // RangeSize returns the size of a range in valid addresses. func RangeSize(subnet *net.IPNet) int64 { ones, bits := subnet.Mask.Size() - if (bits - ones) >= 31 { - panic("masks greater than 31 bits are not supported") + if bits == 32 && (bits-ones) >= 31 || bits == 128 && (bits-ones) >= 63 { + return 0 } max := int64(1) << uint(bits-ones) return max diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller/BUILD index a5889cd2de0a..b6a8f0a64d13 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller/BUILD @@ -9,9 +9,10 @@ load( go_library( name = "go_default_library", srcs = ["repair.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", "//pkg/registry/core/rangeallocation:go_default_library", "//pkg/registry/core/service/ipallocator:go_default_library", @@ -26,9 +27,10 @@ go_library( go_test( name = "go_default_test", srcs = ["repair_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", "//pkg/registry/core/service/ipallocator:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller/repair.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller/repair.go index 7e13115f1f0a..fa2cae7f3a21 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller/repair.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller/repair.go @@ -26,8 +26,8 @@ import ( "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/registry/core/rangeallocation" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/portallocator/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/service/portallocator/BUILD index 483d8041834b..4d038b33add6 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/portallocator/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/portallocator/BUILD @@ -12,8 +12,9 @@ go_library( "allocator.go", "operation.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/service/portallocator", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/core/service/allocator:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", @@ -23,9 +24,10 @@ go_library( go_test( name = "go_default_test", srcs = ["allocator_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/service/portallocator", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/portallocator/allocator.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/portallocator/allocator.go index c73253f2dcb6..c31f4b045ea0 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/portallocator/allocator.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/portallocator/allocator.go @@ -21,7 +21,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/util/net" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/registry/core/service/allocator" "github.com/golang/glog" diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/portallocator/controller/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/service/portallocator/controller/BUILD index ca88cec33e3f..01408f6fba65 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/portallocator/controller/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/portallocator/controller/BUILD @@ -9,8 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["repair.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/service/portallocator/controller", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", "//pkg/registry/core/rangeallocation:go_default_library", "//pkg/registry/core/service:go_default_library", @@ -27,9 +28,10 @@ go_library( go_test( name = "go_default_test", srcs = ["repair_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/service/portallocator/controller", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", "//pkg/registry/core/service/portallocator:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/portallocator/controller/repair.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/portallocator/controller/repair.go index 25de257036ec..e02ca3ef1e4f 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/portallocator/controller/repair.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/portallocator/controller/repair.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/registry/core/rangeallocation" "k8s.io/kubernetes/pkg/registry/core/service" diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/proxy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/proxy.go index 03074de98319..1956f276fb38 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/proxy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/proxy.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/proxy" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/capabilities" ) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/registry.go index 0b7dad98bb07..332d651e7dd3 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/registry.go @@ -24,16 +24,16 @@ import ( "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // Registry is an interface for things that know how to store services. type Registry interface { ListServices(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*api.ServiceList, error) - CreateService(ctx genericapirequest.Context, svc *api.Service) (*api.Service, error) + CreateService(ctx genericapirequest.Context, svc *api.Service, createValidation rest.ValidateObjectFunc) (*api.Service, error) GetService(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (*api.Service, error) DeleteService(ctx genericapirequest.Context, name string) error - UpdateService(ctx genericapirequest.Context, svc *api.Service) (*api.Service, error) + UpdateService(ctx genericapirequest.Context, svc *api.Service, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (*api.Service, error) WatchServices(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) ExportService(ctx genericapirequest.Context, name string, options metav1.ExportOptions) (*api.Service, error) } @@ -57,8 +57,8 @@ func (s *storage) ListServices(ctx genericapirequest.Context, options *metainter return obj.(*api.ServiceList), nil } -func (s *storage) CreateService(ctx genericapirequest.Context, svc *api.Service) (*api.Service, error) { - obj, err := s.Create(ctx, svc, false) +func (s *storage) CreateService(ctx genericapirequest.Context, svc *api.Service, createValidation rest.ValidateObjectFunc) (*api.Service, error) { + obj, err := s.Create(ctx, svc, createValidation, false) if err != nil { return nil, err } @@ -78,8 +78,8 @@ func (s *storage) DeleteService(ctx genericapirequest.Context, name string) erro return err } -func (s *storage) UpdateService(ctx genericapirequest.Context, svc *api.Service) (*api.Service, error) { - obj, _, err := s.Update(ctx, svc.Name, rest.DefaultUpdatedObjectInfo(svc, api.Scheme)) +func (s *storage) UpdateService(ctx genericapirequest.Context, svc *api.Service, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (*api.Service, error) { + obj, _, err := s.Update(ctx, svc.Name, rest.DefaultUpdatedObjectInfo(svc), createValidation, updateValidation) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/rest.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/rest.go index 3a353c344952..689850c1b86b 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/rest.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/rest.go @@ -36,10 +36,10 @@ import ( genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" apiservice "k8s.io/kubernetes/pkg/api/service" - "k8s.io/kubernetes/pkg/api/validation" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" + "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/registry/core/endpoint" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" @@ -61,6 +61,16 @@ type REST struct { proxyTransport http.RoundTripper } +// ServiceNodePort includes protocol and port number of a service NodePort. +type ServiceNodePort struct { + // The IP protocol for this port. Supports "TCP" and "UDP". + Protocol api.Protocol + + // The port on each node on which this service is exposed. + // Default is to auto-allocate a port if the ServiceType of this Service requires one. + NodePort int32 +} + // NewStorage returns a new REST. func NewStorage(registry Registry, endpoints endpoint.Registry, serviceIPs ipallocator.Interface, serviceNodePorts portallocator.Interface, proxyTransport http.RoundTripper) *ServiceRest { @@ -88,7 +98,7 @@ func (rs *REST) Categories() []string { } // TODO: implement includeUninitialized by refactoring this to move to store -func (rs *REST) Create(ctx genericapirequest.Context, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) { +func (rs *REST) Create(ctx genericapirequest.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, includeUninitialized bool) (runtime.Object, error) { service := obj.(*api.Service) if err := rest.BeforeCreate(Strategy, ctx, obj); err != nil { @@ -133,7 +143,7 @@ func (rs *REST) Create(ctx genericapirequest.Context, obj runtime.Object, includ } } - out, err := rs.registry.CreateService(ctx, service) + out, err := rs.registry.CreateService(ctx, service, createValidation) if err != nil { err = rest.CheckGeneratedNameError(Strategy, err, service) } @@ -284,7 +294,7 @@ func (rs *REST) healthCheckNodePortUpdate(oldService, service *api.Service, node return true, nil } -func (rs *REST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { +func (rs *REST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { oldService, err := rs.registry.GetService(ctx, name, &metav1.GetOptions{}) if err != nil { return nil, false, err @@ -360,7 +370,7 @@ func (rs *REST) Update(ctx genericapirequest.Context, name string, objInfo rest. } } - out, err := rs.registry.UpdateService(ctx, service) + out, err := rs.registry.UpdateService(ctx, service, createValidation, updateValidation) if err == nil { el := nodePortOp.Commit() if el != nil { @@ -437,7 +447,7 @@ func (rs *REST) ResourceLocation(ctx genericapirequest.Context, id string) (*url // This is O(N), but we expect haystack to be small; // so small that we expect a linear search to be faster -func contains(haystack []int, needle int) bool { +func containsNumber(haystack []int, needle int) bool { for _, v := range haystack { if v == needle { return true @@ -446,6 +456,17 @@ func contains(haystack []int, needle int) bool { return false } +// This is O(N), but we expect serviceNodePorts to be small; +// so small that we expect a linear search to be faster +func containsNodePort(serviceNodePorts []ServiceNodePort, serviceNodePort ServiceNodePort) bool { + for _, snp := range serviceNodePorts { + if snp == serviceNodePort { + return true + } + } + return false +} + func CollectServiceNodePorts(service *api.Service) []int { servicePorts := []int{} for i := range service.Spec.Ports { @@ -569,44 +590,48 @@ func (rs *REST) initNodePorts(service *api.Service, nodePortOp *portallocator.Po } func (rs *REST) updateNodePorts(oldService, newService *api.Service, nodePortOp *portallocator.PortAllocationOperation) error { - oldNodePorts := CollectServiceNodePorts(oldService) + oldNodePortsNumbers := CollectServiceNodePorts(oldService) + newNodePorts := []ServiceNodePort{} + portAllocated := map[int]bool{} - newNodePorts := []int{} for i := range newService.Spec.Ports { servicePort := &newService.Spec.Ports[i] - nodePort := int(servicePort.NodePort) - if nodePort != 0 { - if !contains(oldNodePorts, nodePort) { - err := nodePortOp.Allocate(nodePort) + nodePort := ServiceNodePort{Protocol: servicePort.Protocol, NodePort: servicePort.NodePort} + if nodePort.NodePort != 0 { + if !containsNumber(oldNodePortsNumbers, int(nodePort.NodePort)) && !portAllocated[int(nodePort.NodePort)] { + err := nodePortOp.Allocate(int(nodePort.NodePort)) if err != nil { - el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), nodePort, err.Error())} + el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), nodePort.NodePort, err.Error())} return errors.NewInvalid(api.Kind("Service"), newService.Name, el) } + portAllocated[int(nodePort.NodePort)] = true } } else { - nodePort, err := nodePortOp.AllocateNext() + nodePortNumber, err := nodePortOp.AllocateNext() if err != nil { // TODO: what error should be returned here? It's not a // field-level validation failure (the field is valid), and it's // not really an internal error. return errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err)) } - servicePort.NodePort = int32(nodePort) + servicePort.NodePort = int32(nodePortNumber) + nodePort.NodePort = servicePort.NodePort } - // Detect duplicate node ports; this should have been caught by validation, so we panic - if contains(newNodePorts, nodePort) { - panic("duplicate node port") + if containsNodePort(newNodePorts, nodePort) { + return fmt.Errorf("duplicate nodePort: %v", nodePort) } newNodePorts = append(newNodePorts, nodePort) } + newNodePortsNumbers := CollectServiceNodePorts(newService) + // The comparison loops are O(N^2), but we don't expect N to be huge // (there's a hard-limit at 2^16, because they're ports; and even 4 ports would be a lot) - for _, oldNodePort := range oldNodePorts { - if contains(newNodePorts, oldNodePort) { + for _, oldNodePortNumber := range oldNodePortsNumbers { + if containsNumber(newNodePortsNumbers, oldNodePortNumber) { continue } - nodePortOp.ReleaseDeferred(oldNodePort) + nodePortOp.ReleaseDeferred(int(oldNodePortNumber)) } return nil diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/BUILD index d99d287fab34..49826b6330fd 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/BUILD @@ -9,9 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/service/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", @@ -19,6 +20,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) @@ -26,8 +28,9 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/service/storage", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", "//pkg/printers/storage:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/storage.go index 0dc8a1fee97c..0d492b7e8813 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/storage.go @@ -23,7 +23,7 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/printers" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" printerstorage "k8s.io/kubernetes/pkg/printers/storage" @@ -37,7 +37,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against services. func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &api.Service{} }, NewListFunc: func() runtime.Object { return &api.ServiceList{} }, DefaultQualifiedResource: api.Resource("services"), @@ -90,6 +89,6 @@ func (r *StatusREST) Get(ctx genericapirequest.Context, name string, options *me } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/strategy.go index e2bf47a545dd..278da88ed6b3 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/strategy.go @@ -23,8 +23,9 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/validation" ) // svcStrategy implements behavior for Services @@ -35,7 +36,7 @@ type svcStrategy struct { // Services is the default logic that applies when creating and updating Service // objects. -var Strategy = svcStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = svcStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} // NamespaceScoped is true for services. func (svcStrategy) NamespaceScoped() bool { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/BUILD index 152f2c6a452b..956de50f0d58 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/BUILD @@ -12,9 +12,11 @@ go_library( "registry.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/core/serviceaccount", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/validation:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/registry.go index 4fa100244ffd..8d13600577f5 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/registry.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // Registry is an interface implemented by things that know how to store ServiceAccount objects. @@ -30,8 +30,8 @@ type Registry interface { ListServiceAccounts(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*api.ServiceAccountList, error) WatchServiceAccounts(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) GetServiceAccount(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (*api.ServiceAccount, error) - CreateServiceAccount(ctx genericapirequest.Context, ServiceAccount *api.ServiceAccount) error - UpdateServiceAccount(ctx genericapirequest.Context, ServiceAccount *api.ServiceAccount) error + CreateServiceAccount(ctx genericapirequest.Context, ServiceAccount *api.ServiceAccount, createValidation rest.ValidateObjectFunc) error + UpdateServiceAccount(ctx genericapirequest.Context, ServiceAccount *api.ServiceAccount, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error DeleteServiceAccount(ctx genericapirequest.Context, name string) error } @@ -66,13 +66,13 @@ func (s *storage) GetServiceAccount(ctx genericapirequest.Context, name string, return obj.(*api.ServiceAccount), nil } -func (s *storage) CreateServiceAccount(ctx genericapirequest.Context, serviceAccount *api.ServiceAccount) error { - _, err := s.Create(ctx, serviceAccount, false) +func (s *storage) CreateServiceAccount(ctx genericapirequest.Context, serviceAccount *api.ServiceAccount, createValidation rest.ValidateObjectFunc) error { + _, err := s.Create(ctx, serviceAccount, createValidation, false) return err } -func (s *storage) UpdateServiceAccount(ctx genericapirequest.Context, serviceAccount *api.ServiceAccount) error { - _, _, err := s.Update(ctx, serviceAccount.Name, rest.DefaultUpdatedObjectInfo(serviceAccount, api.Scheme)) +func (s *storage) UpdateServiceAccount(ctx genericapirequest.Context, serviceAccount *api.ServiceAccount, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error { + _, _, err := s.Update(ctx, serviceAccount.Name, rest.DefaultUpdatedObjectInfo(serviceAccount), createValidation, updateValidation) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage/BUILD index 5e3fccb7be06..441485c32efb 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage/BUILD @@ -9,15 +9,17 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) @@ -25,8 +27,9 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/core/serviceaccount:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage/storage.go index ad116327d474..1b889374add8 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage/storage.go @@ -21,7 +21,7 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/registry/core/serviceaccount" ) @@ -32,7 +32,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against service accounts. func NewREST(optsGetter generic.RESTOptionsGetter) *REST { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &api.ServiceAccount{} }, NewListFunc: func() runtime.Object { return &api.ServiceAccountList{} }, DefaultQualifiedResource: api.Resource("serviceaccounts"), diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/strategy.go index 501e0436189f..8a6a372070ea 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/strategy.go @@ -21,8 +21,9 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/validation" ) // strategy implements behavior for ServiceAccount objects @@ -33,7 +34,7 @@ type strategy struct { // Strategy is the default logic that applies when creating and updating ServiceAccount // objects via the REST API. -var Strategy = strategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = strategy{legacyscheme.Scheme, names.SimpleNameGenerator} func (strategy) NamespaceScoped() bool { return true diff --git a/vendor/k8s.io/kubernetes/pkg/registry/events/rest/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/events/rest/BUILD new file mode 100644 index 000000000000..dce67d6052b8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/events/rest/BUILD @@ -0,0 +1,32 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["storage_events.go"], + importpath = "k8s.io/kubernetes/pkg/registry/events/rest", + visibility = ["//visibility:public"], + deps = [ + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/events:go_default_library", + "//pkg/registry/core/event/storage:go_default_library", + "//vendor/k8s.io/api/events/v1beta1:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server/storage:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/events/rest/storage_events.go b/vendor/k8s.io/kubernetes/pkg/registry/events/rest/storage_events.go new file mode 100644 index 000000000000..03d6c661a522 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/events/rest/storage_events.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "time" + + eventsapiv1beta1 "k8s.io/api/events/v1beta1" + "k8s.io/apiserver/pkg/registry/generic" + "k8s.io/apiserver/pkg/registry/rest" + genericapiserver "k8s.io/apiserver/pkg/server" + serverstorage "k8s.io/apiserver/pkg/server/storage" + "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/apis/events" + eventstore "k8s.io/kubernetes/pkg/registry/core/event/storage" +) + +type RESTStorageProvider struct { + TTL time.Duration +} + +func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) { + apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(events.GroupName, legacyscheme.Registry, legacyscheme.Scheme, legacyscheme.ParameterCodec, legacyscheme.Codecs) + // If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities. + // TODO refactor the plumbing to provide the information in the APIGroupInfo + + if apiResourceConfigSource.AnyResourcesForVersionEnabled(eventsapiv1beta1.SchemeGroupVersion) { + apiGroupInfo.VersionedResourcesStorageMap[eventsapiv1beta1.SchemeGroupVersion.Version] = p.v1beta1Storage(apiResourceConfigSource, restOptionsGetter) + apiGroupInfo.GroupMeta.GroupVersion = eventsapiv1beta1.SchemeGroupVersion + } + + return apiGroupInfo, true +} + +func (p RESTStorageProvider) v1beta1Storage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) map[string]rest.Storage { + version := eventsapiv1beta1.SchemeGroupVersion + + storage := map[string]rest.Storage{} + if apiResourceConfigSource.ResourceEnabled(version.WithResource("events")) { + eventsStorage := eventstore.NewREST(restOptionsGetter, uint64(p.TTL.Seconds())) + storage["events"] = eventsStorage + } + return storage +} + +func (p RESTStorageProvider) GroupName() string { + return events.GroupName +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/controller/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/extensions/controller/storage/BUILD index 7884e65a7402..8f4cf02eec69 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/controller/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/controller/storage/BUILD @@ -9,10 +9,11 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/controller/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/apis/extensions:go_default_library", + "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", @@ -27,14 +28,17 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/controller/storage", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/autoscaling/validation:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", - "//pkg/apis/extensions/validation:go_default_library", "//pkg/registry/core/replicationcontroller:go_default_library", "//pkg/registry/core/replicationcontroller/storage:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/controller/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/controller/storage/storage.go index a3fa80c464b6..774a78911f05 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/controller/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/controller/storage/storage.go @@ -21,13 +21,15 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/autoscaling" + autoscalingvalidation "k8s.io/kubernetes/pkg/apis/autoscaling/validation" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" - extvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation" "k8s.io/kubernetes/pkg/registry/core/replicationcontroller" controllerstore "k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage" ) @@ -58,7 +60,7 @@ var _ = rest.Patcher(&ScaleREST{}) // New creates a new Scale object func (r *ScaleREST) New() runtime.Object { - return &extensions.Scale{} + return &autoscaling.Scale{} } func (r *ScaleREST) Get(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (runtime.Object, error) { @@ -69,7 +71,7 @@ func (r *ScaleREST) Get(ctx genericapirequest.Context, name string, options *met return scaleFromRC(rc), nil } -func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { +func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { rc, err := (*r.registry).GetController(ctx, name, &metav1.GetOptions{}) if err != nil { return nil, false, errors.NewNotFound(extensions.Resource("replicationcontrollers/scale"), name) @@ -81,18 +83,18 @@ func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo r if obj == nil { return nil, false, errors.NewBadRequest(fmt.Sprintf("nil update passed to Scale")) } - scale, ok := obj.(*extensions.Scale) + scale, ok := obj.(*autoscaling.Scale) if !ok { return nil, false, errors.NewBadRequest(fmt.Sprintf("wrong object passed to Scale update: %v", obj)) } - if errs := extvalidation.ValidateScale(scale); len(errs) > 0 { + if errs := autoscalingvalidation.ValidateScale(scale); len(errs) > 0 { return nil, false, errors.NewInvalid(extensions.Kind("Scale"), scale.Name, errs) } rc.Spec.Replicas = scale.Spec.Replicas rc.ResourceVersion = scale.ResourceVersion - rc, err = (*r.registry).UpdateController(ctx, rc) + rc, err = (*r.registry).UpdateController(ctx, rc, createValidation, updateValidation) if err != nil { return nil, false, errors.NewConflict(extensions.Resource("replicationcontrollers/scale"), scale.Name, err) } @@ -100,8 +102,8 @@ func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo r } // scaleFromRC returns a scale subresource for a replication controller. -func scaleFromRC(rc *api.ReplicationController) *extensions.Scale { - return &extensions.Scale{ +func scaleFromRC(rc *api.ReplicationController) *autoscaling.Scale { + return &autoscaling.Scale{ ObjectMeta: metav1.ObjectMeta{ Name: rc.Name, Namespace: rc.Namespace, @@ -109,14 +111,12 @@ func scaleFromRC(rc *api.ReplicationController) *extensions.Scale { ResourceVersion: rc.ResourceVersion, CreationTimestamp: rc.CreationTimestamp, }, - Spec: extensions.ScaleSpec{ + Spec: autoscaling.ScaleSpec{ Replicas: rc.Spec.Replicas, }, - Status: extensions.ScaleStatus{ + Status: autoscaling.ScaleStatus{ Replicas: rc.Status.Replicas, - Selector: &metav1.LabelSelector{ - MatchLabels: rc.Spec.Selector, - }, + Selector: labels.SelectorFromSet(labels.Set(rc.Spec.Selector)).String(), }, } } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/BUILD index f658faf63074..76dcae9bd984 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/BUILD @@ -12,8 +12,9 @@ go_library( "doc.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/daemonset", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/pod:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/apis/extensions/validation:go_default_library", @@ -33,9 +34,10 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/daemonset", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage/BUILD index 594da94514c4..b1dd9b2ceb8c 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage/BUILD @@ -9,9 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -19,6 +20,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) @@ -26,8 +28,8 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage/storage.go index 6907389fdbbe..461e5a97ff2c 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage/storage.go @@ -23,7 +23,6 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/printers" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" @@ -39,7 +38,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against DaemonSets. func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &extensions.DaemonSet{} }, NewListFunc: func() runtime.Object { return &extensions.DaemonSetList{} }, DefaultQualifiedResource: extensions.Resource("daemonsets"), @@ -91,6 +89,6 @@ func (r *StatusREST) Get(ctx genericapirequest.Context, name string, options *me } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/strategy.go index 0b42f3a96784..04723fdcf250 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/strategy.go @@ -17,8 +17,6 @@ limitations under the License. package daemonset import ( - "fmt" - appsv1beta2 "k8s.io/api/apps/v1beta2" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" apiequality "k8s.io/apimachinery/pkg/api/equality" @@ -29,7 +27,7 @@ import ( genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/pod" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions/validation" @@ -42,11 +40,20 @@ type daemonSetStrategy struct { } // Strategy is the default logic that applies when creating and updating DaemonSet objects. -var Strategy = daemonSetStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = daemonSetStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} -// DefaultGarbageCollectionPolicy returns Orphan because that was the default -// behavior before the server-side garbage collection was implemented. -func (daemonSetStrategy) DefaultGarbageCollectionPolicy() rest.GarbageCollectionPolicy { +// DefaultGarbageCollectionPolicy returns OrphanDependents by default. For apps/v1, returns DeleteDependents. +func (daemonSetStrategy) DefaultGarbageCollectionPolicy(ctx genericapirequest.Context) rest.GarbageCollectionPolicy { + if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { + groupVersion := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + switch groupVersion { + case extensionsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion: + // for back compatibility + return rest.OrphanDependents + default: + return rest.DeleteDependents + } + } return rest.OrphanDependents } @@ -126,7 +133,7 @@ func (daemonSetStrategy) ValidateUpdate(ctx genericapirequest.Context, obj, old allErrs := validation.ValidateDaemonSet(obj.(*extensions.DaemonSet)) allErrs = append(allErrs, validation.ValidateDaemonSetUpdate(newDaemonSet, oldDaemonSet)...) - // Update is not allowed to set Spec.Selector for all groups/versions except extensions/v1beta1. + // Update is not allowed to set Spec.Selector for apps/v1 and apps/v1beta2 (allowed for extensions/v1beta1). // If RequestInfo is nil, it is better to revert to old behavior (i.e. allow update to set Spec.Selector) // to prevent unintentionally breaking users who may rely on the old behavior. // TODO(#50791): after extensions/v1beta1 is removed, move selector immutability check inside ValidateDaemonSetUpdate(). @@ -135,11 +142,9 @@ func (daemonSetStrategy) ValidateUpdate(ctx genericapirequest.Context, obj, old switch groupVersion { case extensionsv1beta1.SchemeGroupVersion: // no-op for compatibility - case appsv1beta2.SchemeGroupVersion: + default: // disallow mutation of selector allErrs = append(allErrs, apivalidation.ValidateImmutableField(newDaemonSet.Spec.Selector, oldDaemonSet.Spec.Selector, field.NewPath("spec").Child("selector"))...) - default: - panic(fmt.Sprintf("unexpected group/version: %v", groupVersion)) } } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/BUILD index 63476b6d2ed2..850dda20a540 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/BUILD @@ -13,8 +13,9 @@ go_library( "registry.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/deployment", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/pod:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/apis/extensions/validation:go_default_library", @@ -37,15 +38,17 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/deployment", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/registry.go index 88f147e2aade..951e86863fbb 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/registry.go @@ -23,7 +23,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" ) @@ -31,8 +30,8 @@ import ( type Registry interface { ListDeployments(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*extensions.DeploymentList, error) GetDeployment(ctx genericapirequest.Context, deploymentID string, options *metav1.GetOptions) (*extensions.Deployment, error) - CreateDeployment(ctx genericapirequest.Context, deployment *extensions.Deployment) (*extensions.Deployment, error) - UpdateDeployment(ctx genericapirequest.Context, deployment *extensions.Deployment) (*extensions.Deployment, error) + CreateDeployment(ctx genericapirequest.Context, deployment *extensions.Deployment, createValidation rest.ValidateObjectFunc) (*extensions.Deployment, error) + UpdateDeployment(ctx genericapirequest.Context, deployment *extensions.Deployment, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (*extensions.Deployment, error) DeleteDeployment(ctx genericapirequest.Context, deploymentID string) error } @@ -65,16 +64,16 @@ func (s *storage) GetDeployment(ctx genericapirequest.Context, deploymentID stri return obj.(*extensions.Deployment), nil } -func (s *storage) CreateDeployment(ctx genericapirequest.Context, deployment *extensions.Deployment) (*extensions.Deployment, error) { - obj, err := s.Create(ctx, deployment, false) +func (s *storage) CreateDeployment(ctx genericapirequest.Context, deployment *extensions.Deployment, createValidation rest.ValidateObjectFunc) (*extensions.Deployment, error) { + obj, err := s.Create(ctx, deployment, createValidation, false) if err != nil { return nil, err } return obj.(*extensions.Deployment), nil } -func (s *storage) UpdateDeployment(ctx genericapirequest.Context, deployment *extensions.Deployment) (*extensions.Deployment, error) { - obj, _, err := s.Update(ctx, deployment.Name, rest.DefaultUpdatedObjectInfo(deployment, api.Scheme)) +func (s *storage) UpdateDeployment(ctx genericapirequest.Context, deployment *extensions.Deployment, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (*extensions.Deployment, error) { + obj, _, err := s.Update(ctx, deployment.Name, rest.DefaultUpdatedObjectInfo(deployment), createValidation, updateValidation) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/storage/BUILD index 92523d8422c8..81b341d8208f 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/storage/BUILD @@ -9,9 +9,11 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/deployment/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", @@ -24,6 +26,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/errors:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", @@ -33,9 +36,15 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/deployment/storage", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/apps/v1beta1:go_default_library", + "//pkg/apis/apps/v1beta2:go_default_library", + "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/autoscaling/v1:go_default_library", + "//pkg/apis/autoscaling/validation:go_default_library", "//pkg/apis/extensions:go_default_library", + "//pkg/apis/extensions/v1beta1:go_default_library", "//pkg/apis/extensions/validation:go_default_library", "//pkg/registry/extensions/deployment:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/storage/storage.go index 1e15f92ebea0..6cf80f8dcc70 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/storage/storage.go @@ -30,8 +30,13 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage" storeerr "k8s.io/apiserver/pkg/storage/errors" - "k8s.io/kubernetes/pkg/api" + appsv1beta1 "k8s.io/kubernetes/pkg/apis/apps/v1beta1" + appsv1beta2 "k8s.io/kubernetes/pkg/apis/apps/v1beta2" + "k8s.io/kubernetes/pkg/apis/autoscaling" + autoscalingv1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1" + autoscalingvalidation "k8s.io/kubernetes/pkg/apis/autoscaling/validation" "k8s.io/kubernetes/pkg/apis/extensions" + extensionsv1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" extvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation" "k8s.io/kubernetes/pkg/registry/extensions/deployment" ) @@ -63,7 +68,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against deployments. func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, *RollbackREST) { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &extensions.Deployment{} }, NewListFunc: func() runtime.Object { return &extensions.DeploymentList{} }, DefaultQualifiedResource: extensions.Resource("deployments"), @@ -113,8 +117,8 @@ func (r *StatusREST) Get(ctx genericapirequest.Context, name string, options *me } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } // RollbackREST implements the REST endpoint for initiating the rollback of a deployment @@ -129,7 +133,7 @@ func (r *RollbackREST) New() runtime.Object { var _ = rest.Creater(&RollbackREST{}) -func (r *RollbackREST) Create(ctx genericapirequest.Context, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) { +func (r *RollbackREST) Create(ctx genericapirequest.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, includeUninitialized bool) (runtime.Object, error) { rollback, ok := obj.(*extensions.DeploymentRollback) if !ok { return nil, errors.NewBadRequest(fmt.Sprintf("not a DeploymentRollback: %#v", obj)) @@ -195,13 +199,22 @@ type ScaleREST struct { var _ = rest.Patcher(&ScaleREST{}) var _ = rest.GroupVersionKindProvider(&ScaleREST{}) -func (r *ScaleREST) GroupVersionKind() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "Scale"} +func (r *ScaleREST) GroupVersionKind(containingGV schema.GroupVersion) schema.GroupVersionKind { + switch containingGV { + case extensionsv1beta1.SchemeGroupVersion: + return extensionsv1beta1.SchemeGroupVersion.WithKind("Scale") + case appsv1beta1.SchemeGroupVersion: + return appsv1beta1.SchemeGroupVersion.WithKind("Scale") + case appsv1beta2.SchemeGroupVersion: + return appsv1beta2.SchemeGroupVersion.WithKind("Scale") + default: + return autoscalingv1.SchemeGroupVersion.WithKind("Scale") + } } // New creates a new Scale object func (r *ScaleREST) New() runtime.Object { - return &extensions.Scale{} + return &autoscaling.Scale{} } func (r *ScaleREST) Get(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (runtime.Object, error) { @@ -216,7 +229,7 @@ func (r *ScaleREST) Get(ctx genericapirequest.Context, name string, options *met return scale, nil } -func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { +func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { deployment, err := r.registry.GetDeployment(ctx, name, &metav1.GetOptions{}) if err != nil { return nil, false, errors.NewNotFound(extensions.Resource("deployments/scale"), name) @@ -234,18 +247,18 @@ func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo r if obj == nil { return nil, false, errors.NewBadRequest(fmt.Sprintf("nil update passed to Scale")) } - scale, ok := obj.(*extensions.Scale) + scale, ok := obj.(*autoscaling.Scale) if !ok { return nil, false, errors.NewBadRequest(fmt.Sprintf("expected input object type to be Scale, but %T", obj)) } - if errs := extvalidation.ValidateScale(scale); len(errs) > 0 { + if errs := autoscalingvalidation.ValidateScale(scale); len(errs) > 0 { return nil, false, errors.NewInvalid(extensions.Kind("Scale"), name, errs) } deployment.Spec.Replicas = scale.Spec.Replicas deployment.ResourceVersion = scale.ResourceVersion - deployment, err = r.registry.UpdateDeployment(ctx, deployment) + deployment, err = r.registry.UpdateDeployment(ctx, deployment, createValidation, updateValidation) if err != nil { return nil, false, err } @@ -257,8 +270,12 @@ func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo r } // scaleFromDeployment returns a scale subresource for a deployment. -func scaleFromDeployment(deployment *extensions.Deployment) (*extensions.Scale, error) { - return &extensions.Scale{ +func scaleFromDeployment(deployment *extensions.Deployment) (*autoscaling.Scale, error) { + selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) + if err != nil { + return nil, err + } + return &autoscaling.Scale{ // TODO: Create a variant of ObjectMeta type that only contains the fields below. ObjectMeta: metav1.ObjectMeta{ Name: deployment.Name, @@ -267,12 +284,12 @@ func scaleFromDeployment(deployment *extensions.Deployment) (*extensions.Scale, ResourceVersion: deployment.ResourceVersion, CreationTimestamp: deployment.CreationTimestamp, }, - Spec: extensions.ScaleSpec{ + Spec: autoscaling.ScaleSpec{ Replicas: deployment.Spec.Replicas, }, - Status: extensions.ScaleStatus{ + Status: autoscaling.ScaleStatus{ Replicas: deployment.Status.Replicas, - Selector: deployment.Spec.Selector, + Selector: selector.String(), }, }, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/strategy.go index 0b898fcc044d..29fcd750aac3 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/strategy.go @@ -17,8 +17,6 @@ limitations under the License. package deployment import ( - "fmt" - appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" @@ -30,7 +28,7 @@ import ( genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/pod" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions/validation" @@ -44,11 +42,20 @@ type deploymentStrategy struct { // Strategy is the default logic that applies when creating and updating Deployment // objects via the REST API. -var Strategy = deploymentStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = deploymentStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} -// DefaultGarbageCollectionPolicy returns Orphan because that's the default -// behavior before the server-side garbage collection is implemented. -func (deploymentStrategy) DefaultGarbageCollectionPolicy() rest.GarbageCollectionPolicy { +// DefaultGarbageCollectionPolicy returns OrphanDependents by default. For apps/v1, returns DeleteDependents. +func (deploymentStrategy) DefaultGarbageCollectionPolicy(ctx genericapirequest.Context) rest.GarbageCollectionPolicy { + if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { + groupVersion := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + switch groupVersion { + case extensionsv1beta1.SchemeGroupVersion, appsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion: + // for back compatibility + return rest.OrphanDependents + default: + return rest.DeleteDependents + } + } return rest.OrphanDependents } @@ -113,15 +120,11 @@ func (deploymentStrategy) ValidateUpdate(ctx genericapirequest.Context, obj, old if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { groupVersion := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} switch groupVersion { - case appsv1beta1.SchemeGroupVersion: - // no-op for compatibility - case extensionsv1beta1.SchemeGroupVersion: + case appsv1beta1.SchemeGroupVersion, extensionsv1beta1.SchemeGroupVersion: // no-op for compatibility - case appsv1beta2.SchemeGroupVersion: + default: // disallow mutation of selector allErrs = append(allErrs, apivalidation.ValidateImmutableField(newDeployment.Spec.Selector, oldDeployment.Spec.Selector, field.NewPath("spec").Child("selector"))...) - default: - panic(fmt.Sprintf("unexpected group/version: %v", groupVersion)) } } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/BUILD index 838e89271718..277a12e4a8a3 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/BUILD @@ -12,8 +12,9 @@ go_library( "doc.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/ingress", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/apis/extensions/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", @@ -27,9 +28,10 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/ingress", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/storage/BUILD index de9ab5b2ed3a..cc12d254a9fc 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/storage/BUILD @@ -9,9 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/ingress/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -20,6 +21,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) @@ -27,8 +29,8 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/ingress/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/storage/storage.go index 8a5c75c4af6a..0c7273d8578b 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/storage/storage.go @@ -23,7 +23,6 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/printers" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" @@ -39,7 +38,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against replication controllers. func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &extensions.Ingress{} }, NewListFunc: func() runtime.Object { return &extensions.IngressList{} }, DefaultQualifiedResource: extensions.Resource("ingresses"), @@ -83,6 +81,6 @@ func (r *StatusREST) Get(ctx genericapirequest.Context, name string, options *me } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/strategy.go index 41edf5bc7f48..e970d105c2f3 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/strategy.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions/validation" ) @@ -34,7 +34,7 @@ type ingressStrategy struct { } // Strategy is the default logic that applies when creating and updating Replication Ingress objects. -var Strategy = ingressStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = ingressStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} // NamespaceScoped returns true because all Ingress' need to be within a namespace. func (ingressStrategy) NamespaceScoped() bool { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/BUILD index c0956317452b..eadcc39d2f62 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/BUILD @@ -11,8 +11,9 @@ go_library( "doc.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/apis/extensions/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage/BUILD index 0118f0011280..b57880b72fa3 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage", library = ":go_default_library", deps = [ "//pkg/apis/extensions:go_default_library", @@ -19,6 +20,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) @@ -26,8 +28,8 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/registry/extensions/podsecuritypolicy:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage/storage.go index 6316f251c5e7..ff5382974615 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage/storage.go @@ -20,7 +20,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy" ) @@ -33,7 +32,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against PodSecurityPolicy objects. func NewREST(optsGetter generic.RESTOptionsGetter) *REST { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &extensions.PodSecurityPolicy{} }, NewListFunc: func() runtime.Object { return &extensions.PodSecurityPolicyList{} }, DefaultQualifiedResource: extensions.Resource("podsecuritypolicies"), diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/strategy.go index 130b90d338b6..118b6914684c 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/strategy.go @@ -22,7 +22,7 @@ import ( genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions/validation" ) @@ -35,7 +35,7 @@ type strategy struct { // Strategy is the default logic that applies when creating and updating PodSecurityPolicy // objects via the REST API. -var Strategy = strategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = strategy{legacyscheme.Scheme, names.SimpleNameGenerator} var _ = rest.RESTCreateStrategy(Strategy) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/BUILD index 6935e7000393..fd4e7eaa367c 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/BUILD @@ -13,8 +13,9 @@ go_library( "registry.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/replicaset", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/pod:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/apis/extensions/validation:go_default_library", @@ -41,13 +42,15 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/replicaset", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/registry.go index 87e01d8e0f41..029631eb68e0 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/registry.go @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" ) @@ -35,8 +34,8 @@ type Registry interface { ListReplicaSets(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*extensions.ReplicaSetList, error) WatchReplicaSets(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) GetReplicaSet(ctx genericapirequest.Context, replicaSetID string, options *metav1.GetOptions) (*extensions.ReplicaSet, error) - CreateReplicaSet(ctx genericapirequest.Context, replicaSet *extensions.ReplicaSet) (*extensions.ReplicaSet, error) - UpdateReplicaSet(ctx genericapirequest.Context, replicaSet *extensions.ReplicaSet) (*extensions.ReplicaSet, error) + CreateReplicaSet(ctx genericapirequest.Context, replicaSet *extensions.ReplicaSet, createValidation rest.ValidateObjectFunc) (*extensions.ReplicaSet, error) + UpdateReplicaSet(ctx genericapirequest.Context, replicaSet *extensions.ReplicaSet, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (*extensions.ReplicaSet, error) DeleteReplicaSet(ctx genericapirequest.Context, replicaSetID string) error } @@ -74,16 +73,16 @@ func (s *storage) GetReplicaSet(ctx genericapirequest.Context, replicaSetID stri return obj.(*extensions.ReplicaSet), nil } -func (s *storage) CreateReplicaSet(ctx genericapirequest.Context, replicaSet *extensions.ReplicaSet) (*extensions.ReplicaSet, error) { - obj, err := s.Create(ctx, replicaSet, false) +func (s *storage) CreateReplicaSet(ctx genericapirequest.Context, replicaSet *extensions.ReplicaSet, createValidation rest.ValidateObjectFunc) (*extensions.ReplicaSet, error) { + obj, err := s.Create(ctx, replicaSet, createValidation, false) if err != nil { return nil, err } return obj.(*extensions.ReplicaSet), nil } -func (s *storage) UpdateReplicaSet(ctx genericapirequest.Context, replicaSet *extensions.ReplicaSet) (*extensions.ReplicaSet, error) { - obj, _, err := s.Update(ctx, replicaSet.Name, rest.DefaultUpdatedObjectInfo(replicaSet, api.Scheme)) +func (s *storage) UpdateReplicaSet(ctx genericapirequest.Context, replicaSet *extensions.ReplicaSet, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (*extensions.ReplicaSet, error) { + obj, _, err := s.Update(ctx, replicaSet.Name, rest.DefaultUpdatedObjectInfo(replicaSet), createValidation, updateValidation) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage/BUILD index 5f70f3e5c1d8..77500a739a8c 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage/BUILD @@ -9,9 +9,11 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", @@ -23,6 +25,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], @@ -31,10 +34,15 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/apps/v1beta1:go_default_library", + "//pkg/apis/apps/v1beta2:go_default_library", + "//pkg/apis/autoscaling:go_default_library", + "//pkg/apis/autoscaling/v1:go_default_library", + "//pkg/apis/autoscaling/validation:go_default_library", "//pkg/apis/extensions:go_default_library", - "//pkg/apis/extensions/validation:go_default_library", + "//pkg/apis/extensions/v1beta1:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", "//pkg/printers/storage:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage/storage.go index 5c799e82ba7e..893e66390da8 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage/storage.go @@ -29,9 +29,13 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + appsv1beta1 "k8s.io/kubernetes/pkg/apis/apps/v1beta1" + appsv1beta2 "k8s.io/kubernetes/pkg/apis/apps/v1beta2" + "k8s.io/kubernetes/pkg/apis/autoscaling" + autoscalingv1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1" + autoscalingvalidation "k8s.io/kubernetes/pkg/apis/autoscaling/validation" "k8s.io/kubernetes/pkg/apis/extensions" - extvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation" + extensionsv1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/printers" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" printerstorage "k8s.io/kubernetes/pkg/printers/storage" @@ -63,7 +67,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against ReplicaSet. func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &extensions.ReplicaSet{} }, NewListFunc: func() runtime.Object { return &extensions.ReplicaSetList{} }, PredicateFunc: replicaset.MatchReplicaSet, @@ -117,8 +120,8 @@ func (r *StatusREST) Get(ctx genericapirequest.Context, name string, options *me } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } type ScaleREST struct { @@ -129,13 +132,22 @@ type ScaleREST struct { var _ = rest.Patcher(&ScaleREST{}) var _ = rest.GroupVersionKindProvider(&ScaleREST{}) -func (r *ScaleREST) GroupVersionKind() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "Scale"} +func (r *ScaleREST) GroupVersionKind(containingGV schema.GroupVersion) schema.GroupVersionKind { + switch containingGV { + case extensionsv1beta1.SchemeGroupVersion: + return extensionsv1beta1.SchemeGroupVersion.WithKind("Scale") + case appsv1beta1.SchemeGroupVersion: + return appsv1beta1.SchemeGroupVersion.WithKind("Scale") + case appsv1beta2.SchemeGroupVersion: + return appsv1beta2.SchemeGroupVersion.WithKind("Scale") + default: + return autoscalingv1.SchemeGroupVersion.WithKind("Scale") + } } // New creates a new Scale object func (r *ScaleREST) New() runtime.Object { - return &extensions.Scale{} + return &autoscaling.Scale{} } func (r *ScaleREST) Get(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (runtime.Object, error) { @@ -150,7 +162,7 @@ func (r *ScaleREST) Get(ctx genericapirequest.Context, name string, options *met return scale, err } -func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { +func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { rs, err := r.registry.GetReplicaSet(ctx, name, &metav1.GetOptions{}) if err != nil { return nil, false, errors.NewNotFound(extensions.Resource("replicasets/scale"), name) @@ -161,6 +173,7 @@ func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo r return nil, false, err } + // TODO: should this pass admission? obj, err := objInfo.UpdatedObject(ctx, oldScale) if err != nil { return nil, false, err @@ -168,18 +181,18 @@ func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo r if obj == nil { return nil, false, errors.NewBadRequest(fmt.Sprintf("nil update passed to Scale")) } - scale, ok := obj.(*extensions.Scale) + scale, ok := obj.(*autoscaling.Scale) if !ok { return nil, false, errors.NewBadRequest(fmt.Sprintf("wrong object passed to Scale update: %v", obj)) } - if errs := extvalidation.ValidateScale(scale); len(errs) > 0 { + if errs := autoscalingvalidation.ValidateScale(scale); len(errs) > 0 { return nil, false, errors.NewInvalid(extensions.Kind("Scale"), scale.Name, errs) } rs.Spec.Replicas = scale.Spec.Replicas rs.ResourceVersion = scale.ResourceVersion - rs, err = r.registry.UpdateReplicaSet(ctx, rs) + rs, err = r.registry.UpdateReplicaSet(ctx, rs, createValidation, updateValidation) if err != nil { return nil, false, err } @@ -191,8 +204,12 @@ func (r *ScaleREST) Update(ctx genericapirequest.Context, name string, objInfo r } // scaleFromReplicaSet returns a scale subresource for a replica set. -func scaleFromReplicaSet(rs *extensions.ReplicaSet) (*extensions.Scale, error) { - return &extensions.Scale{ +func scaleFromReplicaSet(rs *extensions.ReplicaSet) (*autoscaling.Scale, error) { + selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector) + if err != nil { + return nil, err + } + return &autoscaling.Scale{ // TODO: Create a variant of ObjectMeta type that only contains the fields below. ObjectMeta: metav1.ObjectMeta{ Name: rs.Name, @@ -201,12 +218,12 @@ func scaleFromReplicaSet(rs *extensions.ReplicaSet) (*extensions.Scale, error) { ResourceVersion: rs.ResourceVersion, CreationTimestamp: rs.CreationTimestamp, }, - Spec: extensions.ScaleSpec{ + Spec: autoscaling.ScaleSpec{ Replicas: rs.Spec.Replicas, }, - Status: extensions.ScaleStatus{ + Status: autoscaling.ScaleStatus{ Replicas: rs.Status.Replicas, - Selector: rs.Spec.Selector, + Selector: selector.String(), }, }, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/strategy.go index 1bd5bf5181e4..fac1f2a5a0b2 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/strategy.go @@ -36,7 +36,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" apistorage "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/pod" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/extensions/validation" @@ -49,11 +49,20 @@ type rsStrategy struct { } // Strategy is the default logic that applies when creating and updating ReplicaSet objects. -var Strategy = rsStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = rsStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} -// DefaultGarbageCollectionPolicy returns Orphan because that's the default -// behavior before the server-side garbage collection is implemented. -func (rsStrategy) DefaultGarbageCollectionPolicy() rest.GarbageCollectionPolicy { +// DefaultGarbageCollectionPolicy returns OrphanDependents by default. For apps/v1, returns DeleteDependents. +func (rsStrategy) DefaultGarbageCollectionPolicy(ctx genericapirequest.Context) rest.GarbageCollectionPolicy { + if requestInfo, found := genericapirequest.RequestInfoFrom(ctx); found { + groupVersion := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} + switch groupVersion { + case extensionsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion: + // for back compatibility + return rest.OrphanDependents + default: + return rest.DeleteDependents + } + } return rest.OrphanDependents } @@ -127,11 +136,9 @@ func (rsStrategy) ValidateUpdate(ctx genericapirequest.Context, obj, old runtime switch groupVersion { case extensionsv1beta1.SchemeGroupVersion: // no-op for compatibility - case appsv1beta2.SchemeGroupVersion: + default: // disallow mutation of selector allErrs = append(allErrs, apivalidation.ValidateImmutableField(newReplicaSet.Spec.Selector, oldReplicaSet.Spec.Selector, field.NewPath("spec").Child("selector"))...) - default: - panic(fmt.Sprintf("unexpected group/version: %v", groupVersion)) } } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/rest/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/extensions/rest/BUILD index cbb04d444ade..c4e43a547e5a 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/rest/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/rest/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["storage_extensions.go"], + importpath = "k8s.io/kubernetes/pkg/registry/extensions/rest", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/registry/extensions/controller/storage:go_default_library", "//pkg/registry/extensions/daemonset/storage:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/rest/storage_extensions.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/rest/storage_extensions.go index 57e85b3a6f86..6de5b96233a6 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/rest/storage_extensions.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/rest/storage_extensions.go @@ -22,7 +22,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" serverstorage "k8s.io/apiserver/pkg/server/storage" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/extensions" expcontrollerstore "k8s.io/kubernetes/pkg/registry/extensions/controller/storage" daemonstore "k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage" @@ -37,7 +37,7 @@ type RESTStorageProvider struct { } func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) { - apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(extensions.GroupName, api.Registry, api.Scheme, api.ParameterCodec, api.Codecs) + apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(extensions.GroupName, legacyscheme.Registry, legacyscheme.Scheme, legacyscheme.ParameterCodec, legacyscheme.Codecs) // If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities. // TODO refactor the plumbing to provide the information in the APIGroupInfo diff --git a/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/BUILD index 8479ea5f251c..f5e83616620e 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/BUILD @@ -12,8 +12,9 @@ go_library( "registry.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/networking/networkpolicy", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/networking:go_default_library", "//pkg/apis/networking/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/registry.go index 1efcf8ed2489..1ddcd2139f27 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/registry.go @@ -22,15 +22,14 @@ import ( "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/networking" ) // Registry is an interface for things that know how to store NetworkPolicies. type Registry interface { ListNetworkPolicies(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*networking.NetworkPolicyList, error) - CreateNetworkPolicy(ctx genericapirequest.Context, np *networking.NetworkPolicy) error - UpdateNetworkPolicy(ctx genericapirequest.Context, np *networking.NetworkPolicy) error + CreateNetworkPolicy(ctx genericapirequest.Context, np *networking.NetworkPolicy, createValidation rest.ValidateObjectFunc) error + UpdateNetworkPolicy(ctx genericapirequest.Context, np *networking.NetworkPolicy, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error GetNetworkPolicy(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (*networking.NetworkPolicy, error) DeleteNetworkPolicy(ctx genericapirequest.Context, name string) error WatchNetworkPolicies(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) @@ -56,13 +55,13 @@ func (s *storage) ListNetworkPolicies(ctx genericapirequest.Context, options *me return obj.(*networking.NetworkPolicyList), nil } -func (s *storage) CreateNetworkPolicy(ctx genericapirequest.Context, np *networking.NetworkPolicy) error { - _, err := s.Create(ctx, np, false) +func (s *storage) CreateNetworkPolicy(ctx genericapirequest.Context, np *networking.NetworkPolicy, createValidation rest.ValidateObjectFunc) error { + _, err := s.Create(ctx, np, createValidation, false) return err } -func (s *storage) UpdateNetworkPolicy(ctx genericapirequest.Context, np *networking.NetworkPolicy) error { - _, _, err := s.Update(ctx, np.Name, rest.DefaultUpdatedObjectInfo(np, api.Scheme)) +func (s *storage) UpdateNetworkPolicy(ctx genericapirequest.Context, np *networking.NetworkPolicy, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error { + _, _, err := s.Update(ctx, np.Name, rest.DefaultUpdatedObjectInfo(np), createValidation, updateValidation) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage/BUILD index bef04d6344c9..1570fd609d4f 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage/BUILD @@ -3,13 +3,14 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/networking:go_default_library", "//pkg/registry/networking/networkpolicy:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -31,3 +32,23 @@ filegroup( srcs = [":package-srcs"], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage", + library = ":go_default_library", + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/apis/networking:go_default_library", + "//pkg/registry/registrytest:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", + "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage/storage.go index 7e26cd9e0a16..96e51c95b1f2 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage/storage.go @@ -21,7 +21,6 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" networkingapi "k8s.io/kubernetes/pkg/apis/networking" "k8s.io/kubernetes/pkg/registry/networking/networkpolicy" ) @@ -34,7 +33,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against NetworkPolicies func NewREST(optsGetter generic.RESTOptionsGetter) *REST { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &networkingapi.NetworkPolicy{} }, NewListFunc: func() runtime.Object { return &networkingapi.NetworkPolicyList{} }, DefaultQualifiedResource: networkingapi.Resource("networkpolicies"), diff --git a/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/strategy.go index fe66f6f62e20..16e0b54a6234 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/strategy.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/networking" "k8s.io/kubernetes/pkg/apis/networking/validation" ) @@ -35,7 +35,7 @@ type networkPolicyStrategy struct { } // Strategy is the default logic that applies when creating and updating NetworkPolicy objects. -var Strategy = networkPolicyStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = networkPolicyStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} // NamespaceScoped returns true because all NetworkPolicies need to be within a namespace. func (networkPolicyStrategy) NamespaceScoped() bool { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/networking/rest/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/networking/rest/BUILD index 34b6c7889f4d..699702bb157b 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/networking/rest/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/networking/rest/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["storage_settings.go"], + importpath = "k8s.io/kubernetes/pkg/registry/networking/rest", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/networking:go_default_library", "//pkg/registry/networking/networkpolicy/storage:go_default_library", "//vendor/k8s.io/api/networking/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/networking/rest/storage_settings.go b/vendor/k8s.io/kubernetes/pkg/registry/networking/rest/storage_settings.go index 4f6c42170aff..e0489bcff577 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/networking/rest/storage_settings.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/networking/rest/storage_settings.go @@ -22,7 +22,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" serverstorage "k8s.io/apiserver/pkg/server/storage" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/networking" networkpolicystore "k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage" ) @@ -30,7 +30,7 @@ import ( type RESTStorageProvider struct{} func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) { - apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(networking.GroupName, api.Registry, api.Scheme, api.ParameterCodec, api.Codecs) + apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(networking.GroupName, legacyscheme.Registry, legacyscheme.Scheme, legacyscheme.ParameterCodec, legacyscheme.Codecs) // If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities. // TODO refactor the plumbing to provide the information in the APIGroupInfo diff --git a/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/BUILD index f1ad0fb60336..e0be96921f52 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/BUILD @@ -12,8 +12,9 @@ go_library( "doc.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/policy:go_default_library", "//pkg/apis/policy/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", @@ -27,6 +28,7 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget", library = ":go_default_library", deps = [ "//pkg/apis/policy:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage/BUILD index 1a91e49d3ff0..a4ce1e3d40da 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage/BUILD @@ -9,9 +9,9 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/policy:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -20,6 +20,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], @@ -28,8 +29,8 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/policy:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage/storage.go index 071ff01e3bdc..40aff589d1d8 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage/storage.go @@ -23,7 +23,6 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" policyapi "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/printers" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" @@ -39,7 +38,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against pod disruption budgets. func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &policyapi.PodDisruptionBudget{} }, NewListFunc: func() runtime.Object { return &policyapi.PodDisruptionBudgetList{} }, DefaultQualifiedResource: policyapi.Resource("poddisruptionbudgets"), @@ -80,6 +78,6 @@ func (r *StatusREST) Get(ctx genericapirequest.Context, name string, options *me } // Update alters the status subset of an object. -func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { - return r.store.Update(ctx, name, objInfo) +func (r *StatusREST) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { + return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/strategy.go index 43ef18c0a7a9..382dc92ab2cf 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/strategy.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/apis/policy/validation" ) @@ -34,7 +34,7 @@ type podDisruptionBudgetStrategy struct { } // Strategy is the default logic that applies when creating and updating PodDisruptionBudget objects. -var Strategy = podDisruptionBudgetStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = podDisruptionBudgetStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} // NamespaceScoped returns true because all PodDisruptionBudget' need to be within a namespace. func (podDisruptionBudgetStrategy) NamespaceScoped() bool { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/policy/rest/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/policy/rest/BUILD index 8d0ca5bb3482..95f5a4801435 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/policy/rest/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/policy/rest/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["storage_policy.go"], + importpath = "k8s.io/kubernetes/pkg/registry/policy/rest", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/policy:go_default_library", "//pkg/registry/policy/poddisruptionbudget/storage:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/policy/rest/storage_policy.go b/vendor/k8s.io/kubernetes/pkg/registry/policy/rest/storage_policy.go index a845f53e90f1..2cb65949dc7b 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/policy/rest/storage_policy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/policy/rest/storage_policy.go @@ -22,7 +22,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" serverstorage "k8s.io/apiserver/pkg/server/storage" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/policy" poddisruptionbudgetstore "k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage" ) @@ -30,7 +30,7 @@ import ( type RESTStorageProvider struct{} func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) { - apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(policy.GroupName, api.Registry, api.Scheme, api.ParameterCodec, api.Codecs) + apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(policy.GroupName, legacyscheme.Registry, legacyscheme.Scheme, legacyscheme.ParameterCodec, legacyscheme.Codecs) // If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities. // TODO refactor the plumbing to provide the information in the APIGroupInfo diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/rbac/BUILD index fa207aa7cbba..10bb6820839d 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/BUILD @@ -12,8 +12,8 @@ go_library( "escalation_check.go", "helpers.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/rbac", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/rbac:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", @@ -50,10 +50,11 @@ filegroup( go_test( name = "go_default_test", srcs = ["helpers_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/rbac", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/BUILD index 4bcf7f992481..57fac87d96e5 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/BUILD @@ -12,8 +12,9 @@ go_library( "registry.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/rbac/clusterrole", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/apis/rbac/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/policybased/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/policybased/BUILD index a00c40e80a7c..e58b32eac079 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/policybased/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/policybased/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/rbac/clusterrole/policybased", deps = [ - "//pkg/api/helper:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/registry/rbac:go_default_library", "//pkg/registry/rbac/validation:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/policybased/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/policybased/storage.go index 36650cbb7893..0b32fdebf1c3 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/policybased/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/policybased/storage.go @@ -18,11 +18,13 @@ limitations under the License. package policybased import ( - "k8s.io/apimachinery/pkg/api/errors" + "errors" + + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - kapihelper "k8s.io/kubernetes/pkg/api/helper" + kapihelper "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/apis/rbac" rbacregistry "k8s.io/kubernetes/pkg/registry/rbac" rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation" @@ -40,26 +42,39 @@ func NewStorage(s rest.StandardStorage, ruleResolver rbacregistryvalidation.Auth return &Storage{s, ruleResolver} } -func (s *Storage) Create(ctx genericapirequest.Context, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) { +var fullAuthority = []rbac.PolicyRule{ + rbac.NewRule("*").Groups("*").Resources("*").RuleOrDie(), + rbac.NewRule("*").URLs("*").RuleOrDie(), +} + +func (s *Storage) Create(ctx genericapirequest.Context, obj runtime.Object, createValidatingAdmission rest.ValidateObjectFunc, includeUninitialized bool) (runtime.Object, error) { if rbacregistry.EscalationAllowed(ctx) { - return s.StandardStorage.Create(ctx, obj, includeUninitialized) + return s.StandardStorage.Create(ctx, obj, createValidatingAdmission, includeUninitialized) } clusterRole := obj.(*rbac.ClusterRole) rules := clusterRole.Rules if err := rbacregistryvalidation.ConfirmNoEscalation(ctx, s.ruleResolver, rules); err != nil { - return nil, errors.NewForbidden(groupResource, clusterRole.Name, err) + return nil, apierrors.NewForbidden(groupResource, clusterRole.Name, err) + } + // to set the aggregation rule, since it can gather anything, requires * on *.* + if hasAggregationRule(clusterRole) { + if err := rbacregistryvalidation.ConfirmNoEscalation(ctx, s.ruleResolver, fullAuthority); err != nil { + return nil, apierrors.NewForbidden(groupResource, clusterRole.Name, errors.New("must have cluster-admin privileges to use the aggregationRule")) + } } - return s.StandardStorage.Create(ctx, obj, includeUninitialized) + + return s.StandardStorage.Create(ctx, obj, createValidatingAdmission, includeUninitialized) } -func (s *Storage) Update(ctx genericapirequest.Context, name string, obj rest.UpdatedObjectInfo) (runtime.Object, bool, error) { +func (s *Storage) Update(ctx genericapirequest.Context, name string, obj rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { if rbacregistry.EscalationAllowed(ctx) { - return s.StandardStorage.Update(ctx, name, obj) + return s.StandardStorage.Update(ctx, name, obj, createValidation, updateValidation) } nonEscalatingInfo := rest.WrapUpdatedObjectInfo(obj, func(ctx genericapirequest.Context, obj runtime.Object, oldObj runtime.Object) (runtime.Object, error) { clusterRole := obj.(*rbac.ClusterRole) + oldClusterRole := oldObj.(*rbac.ClusterRole) // if we're only mutating fields needed for the GC to eventually delete this obj, return if rbacregistry.IsOnlyMutatingGCFields(obj, oldObj, kapihelper.Semantic) { @@ -68,10 +83,21 @@ func (s *Storage) Update(ctx genericapirequest.Context, name string, obj rest.Up rules := clusterRole.Rules if err := rbacregistryvalidation.ConfirmNoEscalation(ctx, s.ruleResolver, rules); err != nil { - return nil, errors.NewForbidden(groupResource, clusterRole.Name, err) + return nil, apierrors.NewForbidden(groupResource, clusterRole.Name, err) + } + // to change the aggregation rule, since it can gather anything and prevent tightening, requires * on *.* + if hasAggregationRule(clusterRole) || hasAggregationRule(oldClusterRole) { + if err := rbacregistryvalidation.ConfirmNoEscalation(ctx, s.ruleResolver, fullAuthority); err != nil { + return nil, apierrors.NewForbidden(groupResource, clusterRole.Name, errors.New("must have cluster-admin privileges to use the aggregationRule")) + } } + return obj, nil }) - return s.StandardStorage.Update(ctx, name, nonEscalatingInfo) + return s.StandardStorage.Update(ctx, name, nonEscalatingInfo, createValidation, updateValidation) +} + +func hasAggregationRule(clusterRole *rbac.ClusterRole) bool { + return clusterRole.AggregationRule != nil && len(clusterRole.AggregationRule.ClusterRoleSelectors) > 0 } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/registry.go index 6af72e1fa18b..b8b1a21b54a4 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/registry.go @@ -22,15 +22,14 @@ import ( "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" ) // Registry is an interface for things that know how to store ClusterRoles. type Registry interface { ListClusterRoles(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*rbac.ClusterRoleList, error) - CreateClusterRole(ctx genericapirequest.Context, clusterRole *rbac.ClusterRole) error - UpdateClusterRole(ctx genericapirequest.Context, clusterRole *rbac.ClusterRole) error + CreateClusterRole(ctx genericapirequest.Context, clusterRole *rbac.ClusterRole, createValidation rest.ValidateObjectFunc) error + UpdateClusterRole(ctx genericapirequest.Context, clusterRole *rbac.ClusterRole, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error GetClusterRole(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (*rbac.ClusterRole, error) DeleteClusterRole(ctx genericapirequest.Context, name string) error WatchClusterRoles(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) @@ -56,13 +55,13 @@ func (s *storage) ListClusterRoles(ctx genericapirequest.Context, options *metai return obj.(*rbac.ClusterRoleList), nil } -func (s *storage) CreateClusterRole(ctx genericapirequest.Context, clusterRole *rbac.ClusterRole) error { - _, err := s.Create(ctx, clusterRole, false) +func (s *storage) CreateClusterRole(ctx genericapirequest.Context, clusterRole *rbac.ClusterRole, createValidation rest.ValidateObjectFunc) error { + _, err := s.Create(ctx, clusterRole, createValidation, false) return err } -func (s *storage) UpdateClusterRole(ctx genericapirequest.Context, clusterRole *rbac.ClusterRole) error { - _, _, err := s.Update(ctx, clusterRole.Name, rest.DefaultUpdatedObjectInfo(clusterRole, api.Scheme)) +func (s *storage) UpdateClusterRole(ctx genericapirequest.Context, clusterRole *rbac.ClusterRole, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error { + _, _, err := s.Update(ctx, clusterRole.Name, rest.DefaultUpdatedObjectInfo(clusterRole), createValidation, updateValidation) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage/BUILD index 0e280cd82e1a..bbd545df7dda 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage/BUILD @@ -8,8 +8,8 @@ load( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/registry/rbac/clusterrole:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage/storage.go index 965542cb2791..940713f305ad 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage/storage.go @@ -20,7 +20,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/registry/rbac/clusterrole" ) @@ -33,7 +32,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against ClusterRole objects. func NewREST(optsGetter generic.RESTOptionsGetter) *REST { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &rbac.ClusterRole{} }, NewListFunc: func() runtime.Object { return &rbac.ClusterRoleList{} }, DefaultQualifiedResource: rbac.Resource("clusterroles"), diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/strategy.go index 301418a1411c..e1be1f06cfde 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/strategy.go @@ -22,7 +22,7 @@ import ( genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/apis/rbac/validation" ) @@ -35,7 +35,7 @@ type strategy struct { // strategy is the default logic that applies when creating and updating // ClusterRole objects. -var Strategy = strategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = strategy{legacyscheme.Scheme, names.SimpleNameGenerator} // Strategy should implement rest.RESTCreateStrategy var _ rest.RESTCreateStrategy = Strategy diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/BUILD index f1d671f61bf0..3f7f3015b010 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/BUILD @@ -12,8 +12,9 @@ go_library( "registry.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/apis/rbac/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/policybased/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/policybased/BUILD index d9de68520bb1..2141978eec96 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/policybased/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/policybased/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/policybased", deps = [ - "//pkg/api/helper:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/registry/rbac:go_default_library", "//pkg/registry/rbac/validation:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/policybased/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/policybased/storage.go index 95130b8c9c0e..12264dba462d 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/policybased/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/policybased/storage.go @@ -24,7 +24,7 @@ import ( "k8s.io/apiserver/pkg/authorization/authorizer" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - kapihelper "k8s.io/kubernetes/pkg/api/helper" + kapihelper "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/apis/rbac" rbacregistry "k8s.io/kubernetes/pkg/registry/rbac" rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation" @@ -44,14 +44,14 @@ func NewStorage(s rest.StandardStorage, authorizer authorizer.Authorizer, ruleRe return &Storage{s, authorizer, ruleResolver} } -func (s *Storage) Create(ctx genericapirequest.Context, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) { +func (s *Storage) Create(ctx genericapirequest.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, includeUninitialized bool) (runtime.Object, error) { if rbacregistry.EscalationAllowed(ctx) { - return s.StandardStorage.Create(ctx, obj, includeUninitialized) + return s.StandardStorage.Create(ctx, obj, createValidation, includeUninitialized) } clusterRoleBinding := obj.(*rbac.ClusterRoleBinding) if rbacregistry.BindingAuthorized(ctx, clusterRoleBinding.RoleRef, metav1.NamespaceNone, s.authorizer) { - return s.StandardStorage.Create(ctx, obj, includeUninitialized) + return s.StandardStorage.Create(ctx, obj, createValidation, includeUninitialized) } rules, err := s.ruleResolver.GetRoleReferenceRules(clusterRoleBinding.RoleRef, metav1.NamespaceNone) @@ -61,12 +61,12 @@ func (s *Storage) Create(ctx genericapirequest.Context, obj runtime.Object, incl if err := rbacregistryvalidation.ConfirmNoEscalation(ctx, s.ruleResolver, rules); err != nil { return nil, errors.NewForbidden(groupResource, clusterRoleBinding.Name, err) } - return s.StandardStorage.Create(ctx, obj, includeUninitialized) + return s.StandardStorage.Create(ctx, obj, createValidation, includeUninitialized) } -func (s *Storage) Update(ctx genericapirequest.Context, name string, obj rest.UpdatedObjectInfo) (runtime.Object, bool, error) { +func (s *Storage) Update(ctx genericapirequest.Context, name string, obj rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { if rbacregistry.EscalationAllowed(ctx) { - return s.StandardStorage.Update(ctx, name, obj) + return s.StandardStorage.Update(ctx, name, obj, createValidation, updateValidation) } nonEscalatingInfo := rest.WrapUpdatedObjectInfo(obj, func(ctx genericapirequest.Context, obj runtime.Object, oldObj runtime.Object) (runtime.Object, error) { @@ -93,5 +93,5 @@ func (s *Storage) Update(ctx genericapirequest.Context, name string, obj rest.Up return obj, nil }) - return s.StandardStorage.Update(ctx, name, nonEscalatingInfo) + return s.StandardStorage.Update(ctx, name, nonEscalatingInfo, createValidation, updateValidation) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/registry.go index 525c464e83d3..2a3524e6ed39 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/registry.go @@ -22,15 +22,14 @@ import ( "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" ) // Registry is an interface for things that know how to store ClusterRoleBindings. type Registry interface { ListClusterRoleBindings(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*rbac.ClusterRoleBindingList, error) - CreateClusterRoleBinding(ctx genericapirequest.Context, clusterRoleBinding *rbac.ClusterRoleBinding) error - UpdateClusterRoleBinding(ctx genericapirequest.Context, clusterRoleBinding *rbac.ClusterRoleBinding) error + CreateClusterRoleBinding(ctx genericapirequest.Context, clusterRoleBinding *rbac.ClusterRoleBinding, createValidation rest.ValidateObjectFunc) error + UpdateClusterRoleBinding(ctx genericapirequest.Context, clusterRoleBinding *rbac.ClusterRoleBinding, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error GetClusterRoleBinding(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (*rbac.ClusterRoleBinding, error) DeleteClusterRoleBinding(ctx genericapirequest.Context, name string) error WatchClusterRoleBindings(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) @@ -56,13 +55,13 @@ func (s *storage) ListClusterRoleBindings(ctx genericapirequest.Context, options return obj.(*rbac.ClusterRoleBindingList), nil } -func (s *storage) CreateClusterRoleBinding(ctx genericapirequest.Context, clusterRoleBinding *rbac.ClusterRoleBinding) error { - _, err := s.Create(ctx, clusterRoleBinding, false) +func (s *storage) CreateClusterRoleBinding(ctx genericapirequest.Context, clusterRoleBinding *rbac.ClusterRoleBinding, createValidation rest.ValidateObjectFunc) error { + _, err := s.Create(ctx, clusterRoleBinding, createValidation, false) return err } -func (s *storage) UpdateClusterRoleBinding(ctx genericapirequest.Context, clusterRoleBinding *rbac.ClusterRoleBinding) error { - _, _, err := s.Update(ctx, clusterRoleBinding.Name, rest.DefaultUpdatedObjectInfo(clusterRoleBinding, api.Scheme)) +func (s *storage) UpdateClusterRoleBinding(ctx genericapirequest.Context, clusterRoleBinding *rbac.ClusterRoleBinding, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error { + _, _, err := s.Update(ctx, clusterRoleBinding.Name, rest.DefaultUpdatedObjectInfo(clusterRoleBinding), createValidation, updateValidation) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage/BUILD index 02ce6e4e023c..3a2f0a31061f 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage/BUILD @@ -8,8 +8,8 @@ load( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/registry/rbac/clusterrolebinding:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage/storage.go index e355c91b9c29..200e8794c80e 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage/storage.go @@ -20,7 +20,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding" ) @@ -33,7 +32,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against ClusterRoleBinding objects. func NewREST(optsGetter generic.RESTOptionsGetter) *REST { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &rbac.ClusterRoleBinding{} }, NewListFunc: func() runtime.Object { return &rbac.ClusterRoleBindingList{} }, DefaultQualifiedResource: rbac.Resource("clusterrolebindings"), diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/strategy.go index e1d83ecf4ab7..1b2e33772d47 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/strategy.go @@ -22,7 +22,7 @@ import ( genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/apis/rbac/validation" ) @@ -35,7 +35,7 @@ type strategy struct { // strategy is the default logic that applies when creating and updating // ClusterRoleBinding objects. -var Strategy = strategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = strategy{legacyscheme.Scheme, names.SimpleNameGenerator} // Strategy should implement rest.RESTCreateStrategy var _ rest.RESTCreateStrategy = Strategy diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/escalation_check.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/escalation_check.go index 4fa0d59ceafd..4ac1392fbbd4 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/escalation_check.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/escalation_check.go @@ -79,12 +79,12 @@ func BindingAuthorized(ctx genericapirequest.Context, roleRef rbac.RoleRef, bind return false } - ok, _, err := a.Authorize(attrs) + decision, _, err := a.Authorize(attrs) if err != nil { utilruntime.HandleError(fmt.Errorf( "error authorizing user %#v to bind %#v in namespace %s: %v", user, roleRef, bindingNamespace, err, )) } - return ok + return decision == authorizer.DecisionAllow } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/helpers.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/helpers.go index 011d12ea0537..0e10a65b855c 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/helpers.go @@ -17,21 +17,22 @@ limitations under the License. package rbac import ( + "reflect" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" - kapi "k8s.io/kubernetes/pkg/api" ) // IsOnlyMutatingGCFields checks finalizers and ownerrefs which GC manipulates // and indicates that only those fields are changing func IsOnlyMutatingGCFields(obj, old runtime.Object, equalities conversion.Equalities) bool { - // make a copy of the newObj so that we can stomp for comparison - copied, err := kapi.Scheme.Copy(obj) - if err != nil { - // if we couldn't copy, don't fail, just make it do the check + if old == nil || reflect.ValueOf(old).IsNil() { return false } + + // make a copy of the newObj so that we can stomp for comparison + copied := obj.DeepCopyObject() copiedMeta, err := meta.Accessor(copied) if err != nil { return false diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/BUILD index 6f5d4f7c6b73..72d6ae1d5b13 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/BUILD @@ -12,11 +12,13 @@ go_test( "reconcile_role_test.go", "reconcile_rolebindings_test.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/rbac/reconciliation", library = ":go_default_library", deps = [ - "//pkg/api/helper:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//pkg/apis/rbac:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", ], ) @@ -31,15 +33,16 @@ go_library( "rolebinding_interfaces.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/rbac/reconciliation", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion:go_default_library", "//pkg/registry/rbac/validation:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/clusterrole_interfaces.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/clusterrole_interfaces.go index cd4f221f37da..419cc1df26bf 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/clusterrole_interfaces.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/clusterrole_interfaces.go @@ -66,6 +66,14 @@ func (o ClusterRoleRuleOwner) SetRules(in []rbac.PolicyRule) { o.ClusterRole.Rules = in } +func (o ClusterRoleRuleOwner) GetAggregationRule() *rbac.AggregationRule { + return o.ClusterRole.AggregationRule +} + +func (o ClusterRoleRuleOwner) SetAggregationRule(in *rbac.AggregationRule) { + o.ClusterRole.AggregationRule = in +} + type ClusterRoleModifier struct { Client internalversion.ClusterRoleInterface } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/reconcile_role.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/reconcile_role.go index 873b329b0a69..b4605991011d 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/reconcile_role.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/reconcile_role.go @@ -20,7 +20,9 @@ import ( "fmt" "reflect" + "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/registry/rbac/validation" @@ -51,6 +53,8 @@ type RuleOwner interface { SetAnnotations(map[string]string) GetRules() []rbac.PolicyRule SetRules([]rbac.PolicyRule) + GetAggregationRule() *rbac.AggregationRule + SetAggregationRule(*rbac.AggregationRule) DeepCopyRuleOwner() RuleOwner } @@ -75,6 +79,11 @@ type ReconcileClusterRoleResult struct { // ExtraRules contains extra permissions the currently persisted role had ExtraRules []rbac.PolicyRule + // MissingAggregationRuleSelectors contains expected selectors that were missing from the currently persisted role + MissingAggregationRuleSelectors []metav1.LabelSelector + // ExtraAggregationRuleSelectors contains extra selectors the currently persisted role had + ExtraAggregationRuleSelectors []metav1.LabelSelector + // Operation is the API operation required to reconcile. // If no reconciliation was needed, it is set to ReconcileNone. // If options.Confirm == false, the reconcile was in dry-run mode, so the operation was not performed. @@ -101,10 +110,15 @@ func (o *ReconcileRoleOptions) run(attempts int) (*ReconcileClusterRoleResult, e existing, err := o.Client.Get(o.Role.GetNamespace(), o.Role.GetName()) switch { case errors.IsNotFound(err): + aggregationRule := o.Role.GetAggregationRule() + if aggregationRule == nil { + aggregationRule = &rbac.AggregationRule{} + } result = &ReconcileClusterRoleResult{ - Role: o.Role, - MissingRules: o.Role.GetRules(), - Operation: ReconcileCreate, + Role: o.Role, + MissingRules: o.Role.GetRules(), + MissingAggregationRuleSelectors: aggregationRule.ClusterRoleSelectors, + Operation: ReconcileCreate, } case err != nil: @@ -195,6 +209,26 @@ func computeReconciledRole(existing, expected RuleOwner, removeExtraPermissions result.Operation = ReconcileUpdate } + // Compute extra and missing rules + _, result.ExtraAggregationRuleSelectors = aggregationRuleCovers(expected.GetAggregationRule(), existing.GetAggregationRule()) + _, result.MissingAggregationRuleSelectors = aggregationRuleCovers(existing.GetAggregationRule(), expected.GetAggregationRule()) + + switch { + case !removeExtraPermissions && len(result.MissingAggregationRuleSelectors) > 0: + // add missing rules in the union case + aggregationRule := result.Role.GetAggregationRule() + if aggregationRule == nil { + aggregationRule = &rbac.AggregationRule{} + } + aggregationRule.ClusterRoleSelectors = append(aggregationRule.ClusterRoleSelectors, result.MissingAggregationRuleSelectors...) + result.Role.SetAggregationRule(aggregationRule) + result.Operation = ReconcileUpdate + + case removeExtraPermissions && (len(result.MissingAggregationRuleSelectors) > 0 || len(result.ExtraAggregationRuleSelectors) > 0): + result.Role.SetAggregationRule(expected.GetAggregationRule()) + result.Operation = ReconcileUpdate + } + return result, nil } @@ -211,3 +245,37 @@ func merge(maps ...map[string]string) map[string]string { } return output } + +// aggregationRuleCovers determines whether or not the ownerSelectors cover the servantSelectors in terms of semantically +// equal label selectors. +// It returns whether or not the ownerSelectors cover and a list of the rules that the ownerSelectors do not cover. +func aggregationRuleCovers(ownerRule, servantRule *rbac.AggregationRule) (bool, []metav1.LabelSelector) { + switch { + case ownerRule == nil && servantRule == nil: + return true, []metav1.LabelSelector{} + case ownerRule == nil && servantRule != nil: + return false, servantRule.ClusterRoleSelectors + case ownerRule != nil && servantRule == nil: + return true, []metav1.LabelSelector{} + + } + + ownerSelectors := ownerRule.ClusterRoleSelectors + servantSelectors := servantRule.ClusterRoleSelectors + uncoveredSelectors := []metav1.LabelSelector{} + + for _, servantSelector := range servantSelectors { + covered := false + for _, ownerSelector := range ownerSelectors { + if equality.Semantic.DeepEqual(ownerSelector, servantSelector) { + covered = true + break + } + } + if !covered { + uncoveredSelectors = append(uncoveredSelectors, servantSelector) + } + } + + return (len(uncoveredSelectors) == 0), uncoveredSelectors +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/role_interfaces.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/role_interfaces.go index f671c8c43b5d..b46e9e872ea1 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/role_interfaces.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/role_interfaces.go @@ -20,7 +20,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/rbac" core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion" @@ -69,6 +69,13 @@ func (o RoleRuleOwner) SetRules(in []rbac.PolicyRule) { o.Role.Rules = in } +func (o RoleRuleOwner) GetAggregationRule() *rbac.AggregationRule { + return nil +} + +func (o RoleRuleOwner) SetAggregationRule(in *rbac.AggregationRule) { +} + type RoleModifier struct { Client internalversion.RolesGetter NamespaceClient core.NamespaceInterface diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go index 2f26fc954130..126f479cb1ec 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go @@ -21,7 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/rbac" core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion" diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/zz_generated.deepcopy.go index b99814e331fb..f6a288fdd9c2 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/reconciliation/zz_generated.deepcopy.go @@ -21,35 +21,9 @@ limitations under the License. package reconciliation import ( - conversion "k8s.io/apimachinery/pkg/conversion" rbac "k8s.io/kubernetes/pkg/apis/rbac" - reflect "reflect" ) -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBindingAdapter).DeepCopyInto(out.(*ClusterRoleBindingAdapter)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBindingAdapter{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleRuleOwner).DeepCopyInto(out.(*ClusterRoleRuleOwner)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleRuleOwner{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBindingAdapter).DeepCopyInto(out.(*RoleBindingAdapter)) - return nil - }, InType: reflect.TypeOf(&RoleBindingAdapter{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleRuleOwner).DeepCopyInto(out.(*RoleRuleOwner)) - return nil - }, InType: reflect.TypeOf(&RoleRuleOwner{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterRoleBindingAdapter) DeepCopyInto(out *ClusterRoleBindingAdapter) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rest/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rest/BUILD index 87803421390a..685a8334bf15 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rest/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rest/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["storage_rbac.go"], + importpath = "k8s.io/kubernetes/pkg/registry/rbac/rest", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion:go_default_library", @@ -32,6 +33,7 @@ go_library( "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/api/rbac/v1alpha1:go_default_library", "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rest/storage_rbac.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rest/storage_rbac.go index 200bdd55c3ca..b8704f8e465e 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rest/storage_rbac.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rest/storage_rbac.go @@ -26,6 +26,7 @@ import ( rbacapiv1 "k8s.io/api/rbac/v1" rbacapiv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacapiv1beta1 "k8s.io/api/rbac/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -36,7 +37,7 @@ import ( genericapiserver "k8s.io/apiserver/pkg/server" serverstorage "k8s.io/apiserver/pkg/server/storage" "k8s.io/client-go/util/retry" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/rbac" coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" rbacclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion" @@ -66,7 +67,7 @@ type RESTStorageProvider struct { var _ genericapiserver.PostStartHookProvider = RESTStorageProvider{} func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) { - apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(rbac.GroupName, api.Registry, api.Scheme, api.ParameterCodec, api.Codecs) + apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(rbac.GroupName, legacyscheme.Registry, legacyscheme.Scheme, legacyscheme.ParameterCodec, legacyscheme.Codecs) // If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities. // TODO refactor the plumbing to provide the information in the APIGroupInfo @@ -74,15 +75,14 @@ func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorag apiGroupInfo.VersionedResourcesStorageMap[rbacapiv1alpha1.SchemeGroupVersion.Version] = p.storage(rbacapiv1alpha1.SchemeGroupVersion, apiResourceConfigSource, restOptionsGetter) apiGroupInfo.GroupMeta.GroupVersion = rbacapiv1alpha1.SchemeGroupVersion } - // TODO: move this after v1beta1 in 1.9, so RBAC objects write to storage in v1 - if apiResourceConfigSource.AnyResourcesForVersionEnabled(rbacapiv1.SchemeGroupVersion) { - apiGroupInfo.VersionedResourcesStorageMap[rbacapiv1.SchemeGroupVersion.Version] = p.storage(rbacapiv1.SchemeGroupVersion, apiResourceConfigSource, restOptionsGetter) - apiGroupInfo.GroupMeta.GroupVersion = rbacapiv1.SchemeGroupVersion - } if apiResourceConfigSource.AnyResourcesForVersionEnabled(rbacapiv1beta1.SchemeGroupVersion) { apiGroupInfo.VersionedResourcesStorageMap[rbacapiv1beta1.SchemeGroupVersion.Version] = p.storage(rbacapiv1beta1.SchemeGroupVersion, apiResourceConfigSource, restOptionsGetter) apiGroupInfo.GroupMeta.GroupVersion = rbacapiv1beta1.SchemeGroupVersion } + if apiResourceConfigSource.AnyResourcesForVersionEnabled(rbacapiv1.SchemeGroupVersion) { + apiGroupInfo.VersionedResourcesStorageMap[rbacapiv1.SchemeGroupVersion.Version] = p.storage(rbacapiv1.SchemeGroupVersion, apiResourceConfigSource, restOptionsGetter) + apiGroupInfo.GroupMeta.GroupVersion = rbacapiv1.SchemeGroupVersion + } return apiGroupInfo, true } @@ -135,10 +135,11 @@ func (p RESTStorageProvider) storage(version schema.GroupVersion, apiResourceCon func (p RESTStorageProvider) PostStartHook() (string, genericapiserver.PostStartHookFunc, error) { policy := &PolicyData{ - ClusterRoles: append(bootstrappolicy.ClusterRoles(), bootstrappolicy.ControllerRoles()...), - ClusterRoleBindings: append(bootstrappolicy.ClusterRoleBindings(), bootstrappolicy.ControllerRoleBindings()...), - Roles: bootstrappolicy.NamespaceRoles(), - RoleBindings: bootstrappolicy.NamespaceRoleBindings(), + ClusterRoles: append(bootstrappolicy.ClusterRoles(), bootstrappolicy.ControllerRoles()...), + ClusterRoleBindings: append(bootstrappolicy.ClusterRoleBindings(), bootstrappolicy.ControllerRoleBindings()...), + Roles: bootstrappolicy.NamespaceRoles(), + RoleBindings: bootstrappolicy.NamespaceRoleBindings(), + ClusterRolesToAggregate: bootstrappolicy.ClusterRolesToAggregate(), } return PostStartHookName, policy.EnsureRBACPolicy(), nil } @@ -148,6 +149,8 @@ type PolicyData struct { ClusterRoleBindings []rbac.ClusterRoleBinding Roles map[string][]rbac.Role RoleBindings map[string][]rbac.RoleBinding + // ClusterRolesToAggregate maps from previous clusterrole name to the new clusterrole name + ClusterRolesToAggregate map[string]string } func (p *PolicyData) EnsureRBACPolicy() genericapiserver.PostStartHookFunc { @@ -177,6 +180,13 @@ func (p *PolicyData) EnsureRBACPolicy() genericapiserver.PostStartHookFunc { return false, nil } + // if the new cluster roles to aggregate do not yet exist, then we need to copy the old roles if they don't exist + // in new locations + if err := primeAggregatedClusterRoles(p.ClusterRolesToAggregate, clientset); err != nil { + utilruntime.HandleError(fmt.Errorf("unable to prime aggregated clusterroles: %v", err)) + return false, nil + } + // ensure bootstrap roles are created or reconciled for _, clusterRole := range p.ClusterRoles { opts := reconciliation.ReconcileRoleOptions{ @@ -311,3 +321,33 @@ func (p *PolicyData) EnsureRBACPolicy() genericapiserver.PostStartHookFunc { func (p RESTStorageProvider) GroupName() string { return rbac.GroupName } + +// primeAggregatedClusterRoles copies roles that have transitioned to aggregated roles and may need to pick up changes +// that were done to the legacy roles. +func primeAggregatedClusterRoles(clusterRolesToAggregate map[string]string, clusterRoleClient rbacclient.ClusterRolesGetter) error { + for oldName, newName := range clusterRolesToAggregate { + _, err := clusterRoleClient.ClusterRoles().Get(newName, metav1.GetOptions{}) + if err == nil { + continue + } + if !apierrors.IsNotFound(err) { + return err + } + + existingRole, err := clusterRoleClient.ClusterRoles().Get(oldName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + continue + } + if err != nil { + return err + } + glog.V(1).Infof("migrating %v to %v", existingRole.Name, newName) + existingRole.Name = newName + existingRole.ResourceVersion = "" // clear this so the object can be created. + if _, err := clusterRoleClient.ClusterRoles().Create(existingRole); err != nil && !apierrors.IsAlreadyExists(err) { + return err + } + } + + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/BUILD index e24ad971db59..a4fde9839b41 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/BUILD @@ -12,8 +12,9 @@ go_library( "registry.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/rbac/role", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/apis/rbac/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/policybased/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/policybased/BUILD index a00c40e80a7c..65fa12025f0e 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/policybased/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/policybased/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/rbac/role/policybased", deps = [ - "//pkg/api/helper:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/registry/rbac:go_default_library", "//pkg/registry/rbac/validation:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/policybased/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/policybased/storage.go index 1f2443d2b09c..d67e3777fe00 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/policybased/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/policybased/storage.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - kapihelper "k8s.io/kubernetes/pkg/api/helper" + kapihelper "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/apis/rbac" rbacregistry "k8s.io/kubernetes/pkg/registry/rbac" rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation" @@ -40,9 +40,9 @@ func NewStorage(s rest.StandardStorage, ruleResolver rbacregistryvalidation.Auth return &Storage{s, ruleResolver} } -func (s *Storage) Create(ctx genericapirequest.Context, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) { +func (s *Storage) Create(ctx genericapirequest.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, includeUninitialized bool) (runtime.Object, error) { if rbacregistry.EscalationAllowed(ctx) { - return s.StandardStorage.Create(ctx, obj, includeUninitialized) + return s.StandardStorage.Create(ctx, obj, createValidation, includeUninitialized) } role := obj.(*rbac.Role) @@ -50,12 +50,12 @@ func (s *Storage) Create(ctx genericapirequest.Context, obj runtime.Object, incl if err := rbacregistryvalidation.ConfirmNoEscalation(ctx, s.ruleResolver, rules); err != nil { return nil, errors.NewForbidden(groupResource, role.Name, err) } - return s.StandardStorage.Create(ctx, obj, includeUninitialized) + return s.StandardStorage.Create(ctx, obj, createValidation, includeUninitialized) } -func (s *Storage) Update(ctx genericapirequest.Context, name string, obj rest.UpdatedObjectInfo) (runtime.Object, bool, error) { +func (s *Storage) Update(ctx genericapirequest.Context, name string, obj rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { if rbacregistry.EscalationAllowed(ctx) { - return s.StandardStorage.Update(ctx, name, obj) + return s.StandardStorage.Update(ctx, name, obj, createValidation, updateValidation) } nonEscalatingInfo := rest.WrapUpdatedObjectInfo(obj, func(ctx genericapirequest.Context, obj runtime.Object, oldObj runtime.Object) (runtime.Object, error) { @@ -73,5 +73,5 @@ func (s *Storage) Update(ctx genericapirequest.Context, name string, obj rest.Up return obj, nil }) - return s.StandardStorage.Update(ctx, name, nonEscalatingInfo) + return s.StandardStorage.Update(ctx, name, nonEscalatingInfo, createValidation, updateValidation) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/registry.go index 080cbd640661..bbf26b5e23dd 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/registry.go @@ -22,15 +22,14 @@ import ( "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" ) // Registry is an interface for things that know how to store Roles. type Registry interface { ListRoles(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*rbac.RoleList, error) - CreateRole(ctx genericapirequest.Context, role *rbac.Role) error - UpdateRole(ctx genericapirequest.Context, role *rbac.Role) error + CreateRole(ctx genericapirequest.Context, role *rbac.Role, createValidation rest.ValidateObjectFunc) error + UpdateRole(ctx genericapirequest.Context, role *rbac.Role, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error GetRole(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (*rbac.Role, error) DeleteRole(ctx genericapirequest.Context, name string) error WatchRoles(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) @@ -56,13 +55,14 @@ func (s *storage) ListRoles(ctx genericapirequest.Context, options *metainternal return obj.(*rbac.RoleList), nil } -func (s *storage) CreateRole(ctx genericapirequest.Context, role *rbac.Role) error { - _, err := s.Create(ctx, role, false) +func (s *storage) CreateRole(ctx genericapirequest.Context, role *rbac.Role, createValidation rest.ValidateObjectFunc) error { + _, err := s.Create(ctx, role, createValidation, false) return err } -func (s *storage) UpdateRole(ctx genericapirequest.Context, role *rbac.Role) error { - _, _, err := s.Update(ctx, role.Name, rest.DefaultUpdatedObjectInfo(role, api.Scheme)) +func (s *storage) UpdateRole(ctx genericapirequest.Context, role *rbac.Role, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error { + // TODO: any admission? + _, _, err := s.Update(ctx, role.Name, rest.DefaultUpdatedObjectInfo(role), createValidation, updateValidation) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/storage/BUILD index 19ce0bebfd8b..703f830d9597 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/storage/BUILD @@ -8,8 +8,8 @@ load( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/rbac/role/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/registry/rbac/role:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/storage/storage.go index 062977d57c83..6f151879d1f9 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/storage/storage.go @@ -20,7 +20,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/registry/rbac/role" ) @@ -33,7 +32,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against Role objects. func NewREST(optsGetter generic.RESTOptionsGetter) *REST { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &rbac.Role{} }, NewListFunc: func() runtime.Object { return &rbac.RoleList{} }, DefaultQualifiedResource: rbac.Resource("roles"), diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/strategy.go index 619cfeed1256..b815f98db405 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/strategy.go @@ -22,7 +22,7 @@ import ( genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/apis/rbac/validation" ) @@ -35,7 +35,7 @@ type strategy struct { // strategy is the default logic that applies when creating and updating // Role objects. -var Strategy = strategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = strategy{legacyscheme.Scheme, names.SimpleNameGenerator} // Strategy should implement rest.RESTCreateStrategy var _ rest.RESTCreateStrategy = Strategy diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/BUILD index 2549da198ce4..e30725311389 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/BUILD @@ -12,8 +12,9 @@ go_library( "registry.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/rbac/rolebinding", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/apis/rbac/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/policybased/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/policybased/BUILD index 40148153bf3e..fd05f21b4637 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/policybased/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/policybased/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/rbac/rolebinding/policybased", deps = [ - "//pkg/api/helper:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/registry/rbac:go_default_library", "//pkg/registry/rbac/validation:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/policybased/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/policybased/storage.go index 5f775d5c4910..9e539a77aca7 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/policybased/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/policybased/storage.go @@ -23,7 +23,7 @@ import ( "k8s.io/apiserver/pkg/authorization/authorizer" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - kapihelper "k8s.io/kubernetes/pkg/api/helper" + kapihelper "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/apis/rbac" rbacregistry "k8s.io/kubernetes/pkg/registry/rbac" rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation" @@ -43,9 +43,9 @@ func NewStorage(s rest.StandardStorage, authorizer authorizer.Authorizer, ruleRe return &Storage{s, authorizer, ruleResolver} } -func (s *Storage) Create(ctx genericapirequest.Context, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) { +func (s *Storage) Create(ctx genericapirequest.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, includeUninitialized bool) (runtime.Object, error) { if rbacregistry.EscalationAllowed(ctx) { - return s.StandardStorage.Create(ctx, obj, includeUninitialized) + return s.StandardStorage.Create(ctx, obj, createValidation, includeUninitialized) } // Get the namespace from the context (populated from the URL). @@ -57,7 +57,7 @@ func (s *Storage) Create(ctx genericapirequest.Context, obj runtime.Object, incl roleBinding := obj.(*rbac.RoleBinding) if rbacregistry.BindingAuthorized(ctx, roleBinding.RoleRef, namespace, s.authorizer) { - return s.StandardStorage.Create(ctx, obj, includeUninitialized) + return s.StandardStorage.Create(ctx, obj, createValidation, includeUninitialized) } rules, err := s.ruleResolver.GetRoleReferenceRules(roleBinding.RoleRef, namespace) @@ -67,12 +67,12 @@ func (s *Storage) Create(ctx genericapirequest.Context, obj runtime.Object, incl if err := rbacregistryvalidation.ConfirmNoEscalation(ctx, s.ruleResolver, rules); err != nil { return nil, errors.NewForbidden(groupResource, roleBinding.Name, err) } - return s.StandardStorage.Create(ctx, obj, includeUninitialized) + return s.StandardStorage.Create(ctx, obj, createValidation, includeUninitialized) } -func (s *Storage) Update(ctx genericapirequest.Context, name string, obj rest.UpdatedObjectInfo) (runtime.Object, bool, error) { +func (s *Storage) Update(ctx genericapirequest.Context, name string, obj rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { if rbacregistry.EscalationAllowed(ctx) { - return s.StandardStorage.Update(ctx, name, obj) + return s.StandardStorage.Update(ctx, name, obj, createValidation, updateValidation) } nonEscalatingInfo := rest.WrapUpdatedObjectInfo(obj, func(ctx genericapirequest.Context, obj runtime.Object, oldObj runtime.Object) (runtime.Object, error) { @@ -106,5 +106,5 @@ func (s *Storage) Update(ctx genericapirequest.Context, name string, obj rest.Up return obj, nil }) - return s.StandardStorage.Update(ctx, name, nonEscalatingInfo) + return s.StandardStorage.Update(ctx, name, nonEscalatingInfo, createValidation, updateValidation) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/registry.go index a2e483b56a25..c708d088bb02 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/registry.go @@ -22,15 +22,14 @@ import ( "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" ) // Registry is an interface for things that know how to store RoleBindings. type Registry interface { ListRoleBindings(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*rbac.RoleBindingList, error) - CreateRoleBinding(ctx genericapirequest.Context, roleBinding *rbac.RoleBinding) error - UpdateRoleBinding(ctx genericapirequest.Context, roleBinding *rbac.RoleBinding) error + CreateRoleBinding(ctx genericapirequest.Context, roleBinding *rbac.RoleBinding, createValidation rest.ValidateObjectFunc) error + UpdateRoleBinding(ctx genericapirequest.Context, roleBinding *rbac.RoleBinding, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error GetRoleBinding(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (*rbac.RoleBinding, error) DeleteRoleBinding(ctx genericapirequest.Context, name string) error WatchRoleBindings(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) @@ -56,14 +55,14 @@ func (s *storage) ListRoleBindings(ctx genericapirequest.Context, options *metai return obj.(*rbac.RoleBindingList), nil } -func (s *storage) CreateRoleBinding(ctx genericapirequest.Context, roleBinding *rbac.RoleBinding) error { +func (s *storage) CreateRoleBinding(ctx genericapirequest.Context, roleBinding *rbac.RoleBinding, createValidation rest.ValidateObjectFunc) error { // TODO(ericchiang): add additional validation - _, err := s.Create(ctx, roleBinding, false) + _, err := s.Create(ctx, roleBinding, createValidation, false) return err } -func (s *storage) UpdateRoleBinding(ctx genericapirequest.Context, roleBinding *rbac.RoleBinding) error { - _, _, err := s.Update(ctx, roleBinding.Name, rest.DefaultUpdatedObjectInfo(roleBinding, api.Scheme)) +func (s *storage) UpdateRoleBinding(ctx genericapirequest.Context, roleBinding *rbac.RoleBinding, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error { + _, _, err := s.Update(ctx, roleBinding.Name, rest.DefaultUpdatedObjectInfo(roleBinding), createValidation, updateValidation) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage/BUILD index ae65f10814a4..09341405d514 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage/BUILD @@ -8,8 +8,8 @@ load( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/registry/rbac/rolebinding:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage/storage.go index ff4d72ae0c4e..d549583a2467 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage/storage.go @@ -20,7 +20,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/registry/rbac/rolebinding" ) @@ -33,7 +32,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against RoleBinding objects. func NewREST(optsGetter generic.RESTOptionsGetter) *REST { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &rbac.RoleBinding{} }, NewListFunc: func() runtime.Object { return &rbac.RoleBindingList{} }, DefaultQualifiedResource: rbac.Resource("rolebindings"), diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/strategy.go index 9d4781a50c30..136cffd34b96 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/strategy.go @@ -22,7 +22,7 @@ import ( genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/apis/rbac/validation" ) @@ -35,7 +35,7 @@ type strategy struct { // strategy is the default logic that applies when creating and updating // RoleBinding objects. -var Strategy = strategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = strategy{legacyscheme.Scheme, names.SimpleNameGenerator} // Strategy should implement rest.RESTCreateStrategy var _ rest.RESTCreateStrategy = Strategy diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/BUILD index 116de73e5136..7618b8ff4ec9 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/BUILD @@ -13,6 +13,7 @@ go_test( "policy_comparator_test.go", "rule_test.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/rbac/validation", library = ":go_default_library", deps = [ "//pkg/apis/rbac:go_default_library", @@ -29,6 +30,7 @@ go_library( "policy_comparator.go", "rule.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/rbac/validation", deps = [ "//pkg/apis/rbac:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/policy_comparator.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/policy_comparator.go index 6c69c24fb22c..4b2ba5158145 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/policy_comparator.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/policy_comparator.go @@ -105,6 +105,31 @@ func hasAll(set, contains []string) bool { return true } +func resourceCoversAll(setResources, coversResources []string) bool { + // if we have a star or an exact match on all resources, then we match + if has(setResources, rbac.ResourceAll) || hasAll(setResources, coversResources) { + return true + } + + for _, path := range coversResources { + // if we have an exact match, then we match. + if has(setResources, path) { + continue + } + // if we're not a subresource, then we definitely don't match. fail. + if !strings.Contains(path, "/") { + return false + } + tokens := strings.SplitN(path, "/", 2) + resourceToCheck := "*/" + tokens[1] + if !has(setResources, resourceToCheck) { + return false + } + } + + return true +} + func nonResourceURLsCoversAll(set, covers []string) bool { for _, path := range covers { covered := false @@ -133,7 +158,7 @@ func nonResourceURLCovers(ownerPath, subPath string) bool { func ruleCovers(ownerRule, subRule rbac.PolicyRule) bool { verbMatches := has(ownerRule.Verbs, rbac.VerbAll) || hasAll(ownerRule.Verbs, subRule.Verbs) groupMatches := has(ownerRule.APIGroups, rbac.APIGroupAll) || hasAll(ownerRule.APIGroups, subRule.APIGroups) - resourceMatches := has(ownerRule.Resources, rbac.ResourceAll) || hasAll(ownerRule.Resources, subRule.Resources) + resourceMatches := resourceCoversAll(ownerRule.Resources, subRule.Resources) nonResourceURLMatches := nonResourceURLsCoversAll(ownerRule.NonResourceURLs, subRule.NonResourceURLs) resourceNameMatches := false diff --git a/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/BUILD index 2e62a31485e7..0ed675fcd9e9 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/scheduling/priorityclass", library = ":go_default_library", deps = [ "//pkg/apis/scheduling:go_default_library", @@ -24,8 +25,9 @@ go_library( "registry.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/scheduling/priorityclass", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/scheduling:go_default_library", "//pkg/apis/scheduling/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/registry.go index 53e2ade1974f..5e4c44dc1611 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/registry.go @@ -22,15 +22,14 @@ import ( "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/scheduling" ) // Registry is an interface for things that know how to store PriorityClass. type Registry interface { ListPriorityClasses(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*scheduling.PriorityClassList, error) - CreatePriorityClass(ctx genericapirequest.Context, pc *scheduling.PriorityClass) error - UpdatePriorityClass(ctx genericapirequest.Context, pc *scheduling.PriorityClass) error + CreatePriorityClass(ctx genericapirequest.Context, pc *scheduling.PriorityClass, createValidation rest.ValidateObjectFunc) error + UpdatePriorityClass(ctx genericapirequest.Context, pc *scheduling.PriorityClass, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error GetPriorityClass(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (*scheduling.PriorityClass, error) DeletePriorityClass(ctx genericapirequest.Context, name string) error WatchPriorityClasses(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) @@ -56,13 +55,13 @@ func (s *storage) ListPriorityClasses(ctx genericapirequest.Context, options *me return obj.(*scheduling.PriorityClassList), nil } -func (s *storage) CreatePriorityClass(ctx genericapirequest.Context, pc *scheduling.PriorityClass) error { - _, err := s.Create(ctx, pc, false) +func (s *storage) CreatePriorityClass(ctx genericapirequest.Context, pc *scheduling.PriorityClass, createValidation rest.ValidateObjectFunc) error { + _, err := s.Create(ctx, pc, createValidation, false) return err } -func (s *storage) UpdatePriorityClass(ctx genericapirequest.Context, pc *scheduling.PriorityClass) error { - _, _, err := s.Update(ctx, pc.Name, rest.DefaultUpdatedObjectInfo(pc, api.Scheme)) +func (s *storage) UpdatePriorityClass(ctx genericapirequest.Context, pc *scheduling.PriorityClass, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error { + _, _, err := s.Update(ctx, pc.Name, rest.DefaultUpdatedObjectInfo(pc), createValidation, updateValidation) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/storage/BUILD index eae915065a5a..cec9fe2e28f9 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/storage/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/storage", library = ":go_default_library", deps = [ "//pkg/apis/scheduling:go_default_library", @@ -18,6 +19,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) @@ -25,8 +27,8 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/scheduling:go_default_library", "//pkg/registry/scheduling/priorityclass:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/storage/storage.go index 6061f3b8f26b..b4203621199f 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/storage/storage.go @@ -21,7 +21,6 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" schedulingapi "k8s.io/kubernetes/pkg/apis/scheduling" "k8s.io/kubernetes/pkg/registry/scheduling/priorityclass" ) @@ -34,7 +33,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against priority classes. func NewREST(optsGetter generic.RESTOptionsGetter) *REST { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &schedulingapi.PriorityClass{} }, NewListFunc: func() runtime.Object { return &schedulingapi.PriorityClassList{} }, DefaultQualifiedResource: schedulingapi.Resource("priorityclasses"), diff --git a/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/strategy.go index 6a6e060eef70..f39dce1df1af 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/strategy.go @@ -21,7 +21,7 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/scheduling" "k8s.io/kubernetes/pkg/apis/scheduling/validation" ) @@ -33,7 +33,7 @@ type priorityClassStrategy struct { } // Strategy is the default logic that applies when creating and updating PriorityClass objects. -var Strategy = priorityClassStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = priorityClassStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} // NamespaceScoped returns true because all PriorityClasses are global. func (priorityClassStrategy) NamespaceScoped() bool { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/scheduling/rest/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/scheduling/rest/BUILD index 7a316a4473c4..54e6dd25ce16 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/scheduling/rest/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/scheduling/rest/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["storage_scheduling.go"], + importpath = "k8s.io/kubernetes/pkg/registry/scheduling/rest", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/scheduling:go_default_library", "//pkg/apis/scheduling/v1alpha1:go_default_library", "//pkg/registry/scheduling/priorityclass/storage:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/scheduling/rest/storage_scheduling.go b/vendor/k8s.io/kubernetes/pkg/registry/scheduling/rest/storage_scheduling.go index 49418a645bac..f56a58653822 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/scheduling/rest/storage_scheduling.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/scheduling/rest/storage_scheduling.go @@ -21,7 +21,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" serverstorage "k8s.io/apiserver/pkg/server/storage" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/scheduling" schedulingapiv1alpha1 "k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1" priorityclassstore "k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/storage" @@ -30,7 +30,7 @@ import ( type RESTStorageProvider struct{} func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) { - apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(scheduling.GroupName, api.Registry, api.Scheme, api.ParameterCodec, api.Codecs) + apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(scheduling.GroupName, legacyscheme.Registry, legacyscheme.Scheme, legacyscheme.ParameterCodec, legacyscheme.Codecs) if apiResourceConfigSource.AnyResourcesForVersionEnabled(schedulingapiv1alpha1.SchemeGroupVersion) { apiGroupInfo.VersionedResourcesStorageMap[schedulingapiv1alpha1.SchemeGroupVersion.Version] = p.v1alpha1Storage(apiResourceConfigSource, restOptionsGetter) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/BUILD index 3a5eea5b54da..5d58bda6a7dc 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/BUILD @@ -12,8 +12,9 @@ go_library( "registry.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/settings/podpreset", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/pod:go_default_library", "//pkg/apis/settings:go_default_library", "//pkg/apis/settings/validation:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/registry.go index d1c96f9a380c..74b056d0e67a 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/registry.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/registry.go @@ -22,15 +22,14 @@ import ( "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/settings" ) // Registry is an interface for things that know how to store PodPresets. type Registry interface { ListPodPresets(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (*settings.PodPresetList, error) - CreatePodPreset(ctx genericapirequest.Context, pp *settings.PodPreset) error - UpdatePodPreset(ctx genericapirequest.Context, pp *settings.PodPreset) error + CreatePodPreset(ctx genericapirequest.Context, pp *settings.PodPreset, createValidation rest.ValidateObjectFunc) error + UpdatePodPreset(ctx genericapirequest.Context, pp *settings.PodPreset, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error GetPodPreset(ctx genericapirequest.Context, ppID string, options *metav1.GetOptions) (*settings.PodPreset, error) DeletePodPreset(ctx genericapirequest.Context, ppID string) error WatchPodPresets(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) @@ -56,13 +55,13 @@ func (s *storage) ListPodPresets(ctx genericapirequest.Context, options *metaint return obj.(*settings.PodPresetList), nil } -func (s *storage) CreatePodPreset(ctx genericapirequest.Context, pp *settings.PodPreset) error { - _, err := s.Create(ctx, pp, false) +func (s *storage) CreatePodPreset(ctx genericapirequest.Context, pp *settings.PodPreset, createValidation rest.ValidateObjectFunc) error { + _, err := s.Create(ctx, pp, createValidation, false) return err } -func (s *storage) UpdatePodPreset(ctx genericapirequest.Context, pp *settings.PodPreset) error { - _, _, err := s.Update(ctx, pp.Name, rest.DefaultUpdatedObjectInfo(pp, api.Scheme)) +func (s *storage) UpdatePodPreset(ctx genericapirequest.Context, pp *settings.PodPreset, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error { + _, _, err := s.Update(ctx, pp.Name, rest.DefaultUpdatedObjectInfo(pp), createValidation, updateValidation) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/storage/BUILD index 615cd3fd882b..0057bb4acacf 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/storage/BUILD @@ -3,13 +3,14 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/settings/podpreset/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/settings:go_default_library", "//pkg/registry/settings/podpreset:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -30,3 +31,22 @@ filegroup( srcs = [":package-srcs"], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/settings/podpreset/storage", + library = ":go_default_library", + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/apis/settings:go_default_library", + "//pkg/registry/registrytest:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", + "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/storage/storage.go index b0eb11766c11..1bdc8c6115d7 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/storage/storage.go @@ -20,7 +20,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" - "k8s.io/kubernetes/pkg/api" settingsapi "k8s.io/kubernetes/pkg/apis/settings" "k8s.io/kubernetes/pkg/registry/settings/podpreset" ) @@ -33,7 +32,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against replication controllers. func NewREST(optsGetter generic.RESTOptionsGetter) *REST { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &settingsapi.PodPreset{} }, NewListFunc: func() runtime.Object { return &settingsapi.PodPresetList{} }, DefaultQualifiedResource: settingsapi.Resource("podpresets"), diff --git a/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/strategy.go index 65db5988adf4..eaeb927926ba 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/strategy.go @@ -21,7 +21,7 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/pod" "k8s.io/kubernetes/pkg/apis/settings" "k8s.io/kubernetes/pkg/apis/settings/validation" @@ -34,7 +34,7 @@ type podPresetStrategy struct { } // Strategy is the default logic that applies when creating and updating Pod Preset objects. -var Strategy = podPresetStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = podPresetStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} // NamespaceScoped returns true because all Pod Presets need to be within a namespace. func (podPresetStrategy) NamespaceScoped() bool { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/settings/rest/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/settings/rest/BUILD index 0251054ff159..29fac2542859 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/settings/rest/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/settings/rest/BUILD @@ -8,8 +8,9 @@ load( go_library( name = "go_default_library", srcs = ["storage_settings.go"], + importpath = "k8s.io/kubernetes/pkg/registry/settings/rest", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/settings:go_default_library", "//pkg/registry/settings/podpreset/storage:go_default_library", "//vendor/k8s.io/api/settings/v1alpha1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/settings/rest/storage_settings.go b/vendor/k8s.io/kubernetes/pkg/registry/settings/rest/storage_settings.go index 812b8be8df3a..f976b3726a56 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/settings/rest/storage_settings.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/settings/rest/storage_settings.go @@ -22,7 +22,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" serverstorage "k8s.io/apiserver/pkg/server/storage" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/settings" podpresetstore "k8s.io/kubernetes/pkg/registry/settings/podpreset/storage" ) @@ -30,7 +30,7 @@ import ( type RESTStorageProvider struct{} func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) { - apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(settings.GroupName, api.Registry, api.Scheme, api.ParameterCodec, api.Codecs) + apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(settings.GroupName, legacyscheme.Registry, legacyscheme.Scheme, legacyscheme.ParameterCodec, legacyscheme.Codecs) // If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities. // TODO refactor the plumbing to provide the information in the APIGroupInfo diff --git a/vendor/k8s.io/kubernetes/pkg/registry/storage/rest/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/storage/rest/BUILD index c5cb419e8b93..7fabe3b70381 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/storage/rest/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/storage/rest/BUILD @@ -8,11 +8,14 @@ load( go_library( name = "go_default_library", srcs = ["storage_storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/storage/rest", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/storage:go_default_library", "//pkg/registry/storage/storageclass/storage:go_default_library", + "//pkg/registry/storage/volumeattachment/storage:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", + "//vendor/k8s.io/api/storage/v1alpha1:go_default_library", "//vendor/k8s.io/api/storage/v1beta1:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/storage/rest/storage_storage.go b/vendor/k8s.io/kubernetes/pkg/registry/storage/rest/storage_storage.go index c4d03af4d416..c8ab31e4acd0 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/storage/rest/storage_storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/storage/rest/storage_storage.go @@ -18,24 +18,30 @@ package rest import ( storageapiv1 "k8s.io/api/storage/v1" + storageapiv1alpha1 "k8s.io/api/storage/v1alpha1" storageapiv1beta1 "k8s.io/api/storage/v1beta1" "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" serverstorage "k8s.io/apiserver/pkg/server/storage" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" storageapi "k8s.io/kubernetes/pkg/apis/storage" storageclassstore "k8s.io/kubernetes/pkg/registry/storage/storageclass/storage" + volumeattachmentstore "k8s.io/kubernetes/pkg/registry/storage/volumeattachment/storage" ) type RESTStorageProvider struct { } func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, bool) { - apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(storageapi.GroupName, api.Registry, api.Scheme, api.ParameterCodec, api.Codecs) + apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(storageapi.GroupName, legacyscheme.Registry, legacyscheme.Scheme, legacyscheme.ParameterCodec, legacyscheme.Codecs) // If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities. // TODO refactor the plumbing to provide the information in the APIGroupInfo + if apiResourceConfigSource.AnyResourcesForVersionEnabled(storageapiv1alpha1.SchemeGroupVersion) { + apiGroupInfo.VersionedResourcesStorageMap[storageapiv1alpha1.SchemeGroupVersion.Version] = p.v1alpha1Storage(apiResourceConfigSource, restOptionsGetter) + apiGroupInfo.GroupMeta.GroupVersion = storageapiv1alpha1.SchemeGroupVersion + } if apiResourceConfigSource.AnyResourcesForVersionEnabled(storageapiv1beta1.SchemeGroupVersion) { apiGroupInfo.VersionedResourcesStorageMap[storageapiv1beta1.SchemeGroupVersion.Version] = p.v1beta1Storage(apiResourceConfigSource, restOptionsGetter) apiGroupInfo.GroupMeta.GroupVersion = storageapiv1beta1.SchemeGroupVersion @@ -48,6 +54,19 @@ func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorag return apiGroupInfo, true } +func (p RESTStorageProvider) v1alpha1Storage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) map[string]rest.Storage { + version := storageapiv1alpha1.SchemeGroupVersion + + storage := map[string]rest.Storage{} + + if apiResourceConfigSource.ResourceEnabled(version.WithResource("volumeattachments")) { + volumeAttachmentStorage := volumeattachmentstore.NewREST(restOptionsGetter) + storage["volumeattachments"] = volumeAttachmentStorage + } + + return storage +} + func (p RESTStorageProvider) v1beta1Storage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) map[string]rest.Storage { version := storageapiv1beta1.SchemeGroupVersion diff --git a/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/BUILD index 9ad952bafc92..d8d1d0a375c5 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/BUILD @@ -12,9 +12,11 @@ go_library( "doc.go", "strategy.go", ], + importpath = "k8s.io/kubernetes/pkg/registry/storage/storageclass", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/storage:go_default_library", + "//pkg/apis/storage/util:go_default_library", "//pkg/apis/storage/validation:go_default_library", "//pkg/features:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -28,9 +30,10 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/storage/storageclass", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/storage:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/storage/BUILD index b4a8b204ef90..1d52ca0fb372 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/storage/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/storage/BUILD @@ -9,9 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/storage/storageclass/storage", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/storage:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -19,6 +20,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) @@ -26,8 +28,8 @@ go_test( go_library( name = "go_default_library", srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/storage/storageclass/storage", deps = [ - "//pkg/api:go_default_library", "//pkg/apis/storage:go_default_library", "//pkg/registry/storage/storageclass:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/storage/storage.go index 5026f41ace1e..da0a82d779c4 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/storage/storage.go @@ -21,7 +21,6 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" storageapi "k8s.io/kubernetes/pkg/apis/storage" "k8s.io/kubernetes/pkg/registry/storage/storageclass" ) @@ -33,7 +32,6 @@ type REST struct { // NewREST returns a RESTStorage object that will work against persistent volumes. func NewREST(optsGetter generic.RESTOptionsGetter) *REST { store := &genericregistry.Store{ - Copier: api.Scheme, NewFunc: func() runtime.Object { return &storageapi.StorageClass{} }, NewListFunc: func() runtime.Object { return &storageapi.StorageClassList{} }, DefaultQualifiedResource: storageapi.Resource("storageclasses"), diff --git a/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/strategy.go index 0da81213437a..b1cbf49ebcf5 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/strategy.go @@ -22,8 +22,9 @@ import ( genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/storage/names" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/storage" + storageutil "k8s.io/kubernetes/pkg/apis/storage/util" "k8s.io/kubernetes/pkg/apis/storage/validation" "k8s.io/kubernetes/pkg/features" ) @@ -36,7 +37,7 @@ type storageClassStrategy struct { // Strategy is the default logic that applies when creating and updating // StorageClass objects via the REST API. -var Strategy = storageClassStrategy{api.Scheme, names.SimpleNameGenerator} +var Strategy = storageClassStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} func (storageClassStrategy) NamespaceScoped() bool { return false @@ -49,6 +50,8 @@ func (storageClassStrategy) PrepareForCreate(ctx genericapirequest.Context, obj if !utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) { class.AllowVolumeExpansion = nil } + + storageutil.DropDisabledAlphaFields(class) } func (storageClassStrategy) Validate(ctx genericapirequest.Context, obj runtime.Object) field.ErrorList { @@ -73,6 +76,8 @@ func (storageClassStrategy) PrepareForUpdate(ctx genericapirequest.Context, obj, newClass.AllowVolumeExpansion = nil oldClass.AllowVolumeExpansion = nil } + storageutil.DropDisabledAlphaFields(oldClass) + storageutil.DropDisabledAlphaFields(newClass) } func (storageClassStrategy) ValidateUpdate(ctx genericapirequest.Context, obj, old runtime.Object) field.ErrorList { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/storage/volumeattachment/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/storage/volumeattachment/BUILD new file mode 100644 index 000000000000..7c014068604d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/storage/volumeattachment/BUILD @@ -0,0 +1,49 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "strategy.go", + ], + importpath = "k8s.io/kubernetes/pkg/registry/storage/volumeattachment", + visibility = ["//visibility:public"], + deps = [ + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/storage:go_default_library", + "//pkg/apis/storage/validation:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", + "//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/storage/volumeattachment", + library = ":go_default_library", + deps = [ + "//pkg/apis/storage:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/registry/storage/volumeattachment/storage:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/storage/volumeattachment/doc.go b/vendor/k8s.io/kubernetes/pkg/registry/storage/volumeattachment/doc.go new file mode 100644 index 000000000000..91bb452354e4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/storage/volumeattachment/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package volumeattachment provides Registry interface and its REST +// implementation for storing volumeattachment api objects. +package volumeattachment diff --git a/vendor/k8s.io/kubernetes/pkg/registry/storage/volumeattachment/storage/BUILD b/vendor/k8s.io/kubernetes/pkg/registry/storage/volumeattachment/storage/BUILD new file mode 100644 index 000000000000..5601437087c4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/storage/volumeattachment/storage/BUILD @@ -0,0 +1,49 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["storage.go"], + importpath = "k8s.io/kubernetes/pkg/registry/storage/volumeattachment/storage", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/storage:go_default_library", + "//pkg/registry/storage/volumeattachment:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["storage_test.go"], + importpath = "k8s.io/kubernetes/pkg/registry/storage/volumeattachment/storage", + library = ":go_default_library", + deps = [ + "//pkg/api/testapi:go_default_library", + "//pkg/apis/storage:go_default_library", + "//pkg/registry/registrytest:go_default_library", + "//vendor/k8s.io/api/storage/v1alpha1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", + "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", + "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/storage/volumeattachment/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/storage/volumeattachment/storage/storage.go new file mode 100644 index 000000000000..28a3208e4133 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/storage/volumeattachment/storage/storage.go @@ -0,0 +1,50 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/registry/generic" + genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" + storageapi "k8s.io/kubernetes/pkg/apis/storage" + "k8s.io/kubernetes/pkg/registry/storage/volumeattachment" +) + +// REST object that will work against persistent volumes. +type REST struct { + *genericregistry.Store +} + +// NewREST returns a RESTStorage object that will work against persistent volumes. +func NewREST(optsGetter generic.RESTOptionsGetter) *REST { + store := &genericregistry.Store{ + NewFunc: func() runtime.Object { return &storageapi.VolumeAttachment{} }, + NewListFunc: func() runtime.Object { return &storageapi.VolumeAttachmentList{} }, + DefaultQualifiedResource: storageapi.Resource("volumeattachments"), + + CreateStrategy: volumeattachment.Strategy, + UpdateStrategy: volumeattachment.Strategy, + DeleteStrategy: volumeattachment.Strategy, + ReturnDeletedObject: true, + } + options := &generic.StoreOptions{RESTOptions: optsGetter} + if err := store.CompleteWithOptions(options); err != nil { + panic(err) // TODO: Propagate error up + } + + return &REST{store} +} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/storage/volumeattachment/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/storage/volumeattachment/strategy.go new file mode 100644 index 000000000000..e319a0d878ef --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/registry/storage/volumeattachment/strategy.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volumeattachment + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/storage/names" + "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/apis/storage" + "k8s.io/kubernetes/pkg/apis/storage/validation" +) + +// volumeAttachmentStrategy implements behavior for VolumeAttachment objects +type volumeAttachmentStrategy struct { + runtime.ObjectTyper + names.NameGenerator +} + +// Strategy is the default logic that applies when creating and updating +// VolumeAttachment objects via the REST API. +var Strategy = volumeAttachmentStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} + +func (volumeAttachmentStrategy) NamespaceScoped() bool { + return false +} + +// ResetBeforeCreate clears the Status field which is not allowed to be set by end users on creation. +func (volumeAttachmentStrategy) PrepareForCreate(ctx genericapirequest.Context, obj runtime.Object) { +} + +func (volumeAttachmentStrategy) Validate(ctx genericapirequest.Context, obj runtime.Object) field.ErrorList { + volumeAttachment := obj.(*storage.VolumeAttachment) + return validation.ValidateVolumeAttachment(volumeAttachment) +} + +// Canonicalize normalizes the object after validation. +func (volumeAttachmentStrategy) Canonicalize(obj runtime.Object) { +} + +func (volumeAttachmentStrategy) AllowCreateOnUpdate() bool { + return false +} + +// PrepareForUpdate sets the Status fields which is not allowed to be set by an end user updating a PV +func (volumeAttachmentStrategy) PrepareForUpdate(ctx genericapirequest.Context, obj, old runtime.Object) { +} + +func (volumeAttachmentStrategy) ValidateUpdate(ctx genericapirequest.Context, obj, old runtime.Object) field.ErrorList { + newVolumeAttachmentObj := obj.(*storage.VolumeAttachment) + oldVolumeAttachmentObj := old.(*storage.VolumeAttachment) + errorList := validation.ValidateVolumeAttachment(newVolumeAttachmentObj) + return append(errorList, validation.ValidateVolumeAttachmentUpdate(newVolumeAttachmentObj, oldVolumeAttachmentObj)...) +} + +func (volumeAttachmentStrategy) AllowUnconditionalUpdate() bool { + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/routes/BUILD b/vendor/k8s.io/kubernetes/pkg/routes/BUILD index 276015ae5868..b62bff305db4 100644 --- a/vendor/k8s.io/kubernetes/pkg/routes/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/routes/BUILD @@ -12,6 +12,7 @@ go_library( "logs.go", "ui.go", ], + importpath = "k8s.io/kubernetes/pkg/routes", deps = [ "//vendor/github.com/emicklei/go-restful:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/mux:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/routes/ui.go b/vendor/k8s.io/kubernetes/pkg/routes/ui.go index 40f288ab2aef..de6ca3c3abdf 100644 --- a/vendor/k8s.io/kubernetes/pkg/routes/ui.go +++ b/vendor/k8s.io/kubernetes/pkg/routes/ui.go @@ -22,9 +22,9 @@ import ( "k8s.io/apiserver/pkg/server/mux" ) -const dashboardPath = "/api/v1/namespaces/kube-system/services/kubernetes-dashboard/proxy" +const dashboardPath = "/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/" -// UIRediect redirects /ui to the kube-ui proxy path. +// UIRedirect redirects /ui to the kube-ui proxy path. type UIRedirect struct{} func (r UIRedirect) Install(c *mux.PathRecorderMux) { diff --git a/vendor/k8s.io/kubernetes/pkg/security/apparmor/BUILD b/vendor/k8s.io/kubernetes/pkg/security/apparmor/BUILD index 66055365d6f0..ad686ff2ab28 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/apparmor/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/security/apparmor/BUILD @@ -13,6 +13,7 @@ go_library( "validate.go", "validate_disabled.go", ], + importpath = "k8s.io/kubernetes/pkg/security/apparmor", deps = [ "//pkg/features:go_default_library", "//pkg/kubelet/types:go_default_library", @@ -28,6 +29,7 @@ go_test( data = [ "testdata/profiles", ], + importpath = "k8s.io/kubernetes/pkg/security/apparmor", library = ":go_default_library", deps = [ "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go b/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go index 2224a7345c9d..5352f1332e2e 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go @@ -35,13 +35,16 @@ const ( ProfileRuntimeDefault = "runtime/default" // The prefix for specifying profiles loaded on the node. ProfileNamePrefix = "localhost/" + + // Unconfined profile + ProfileNameUnconfined = "unconfined" ) // Checks whether app armor is required for pod to be run. func isRequired(pod *v1.Pod) bool { - for key := range pod.Annotations { + for key, value := range pod.Annotations { if strings.HasPrefix(key, ContainerAnnotationKeyPrefix) { - return true + return value != ProfileNameUnconfined } } return false diff --git a/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go b/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go index 061a8750c2b1..740698f205c4 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go +++ b/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go @@ -136,7 +136,7 @@ func validateProfile(profile string, loadedProfiles map[string]bool) error { } func ValidateProfileFormat(profile string) error { - if profile == "" || profile == ProfileRuntimeDefault { + if profile == "" || profile == ProfileRuntimeDefault || profile == ProfileNameUnconfined { return nil } if !strings.HasPrefix(profile, ProfileNamePrefix) { diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/BUILD b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/BUILD index 20b6c7e26b0b..282d256aaef3 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/BUILD @@ -14,8 +14,9 @@ go_library( "provider.go", "types.go", ], + importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/security/podsecuritypolicy/apparmor:go_default_library", "//pkg/security/podsecuritypolicy/capabilities:go_default_library", @@ -25,6 +26,7 @@ go_library( "//pkg/security/podsecuritypolicy/sysctl:go_default_library", "//pkg/security/podsecuritypolicy/user:go_default_library", "//pkg/security/podsecuritypolicy/util:go_default_library", + "//pkg/securitycontext:go_default_library", "//pkg/util/maps:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", @@ -34,10 +36,11 @@ go_library( go_test( name = "go_default_test", srcs = ["provider_test.go"], + importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/v1:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/v1:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/security/apparmor:go_default_library", "//pkg/security/podsecuritypolicy/seccomp:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/OWNERS b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/OWNERS new file mode 100644 index 000000000000..d96860596215 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/OWNERS @@ -0,0 +1,13 @@ +approvers: + - deads2k + - ericchiang + - liggitt + - pweil- + - tallclair +reviewers: + - deads2k + - ericchiang + - liggitt + - php-coder + - pweil- + - tallclair diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor/BUILD b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor/BUILD index 4edc68a8d679..d1cf962c8faf 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor/BUILD @@ -9,8 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["strategy.go"], + importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/security/apparmor:go_default_library", "//pkg/util/maps:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", @@ -20,9 +21,10 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/security/apparmor:go_default_library", "//pkg/util/maps:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor/strategy.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor/strategy.go index 47755c80c021..9f6affdd2b3f 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor/strategy.go @@ -21,7 +21,7 @@ import ( "strings" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/security/apparmor" "k8s.io/kubernetes/pkg/util/maps" ) diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/BUILD b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/BUILD index a632b8c1bf72..87681f9e7887 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/BUILD @@ -13,8 +13,9 @@ go_library( "mustrunas.go", "types.go", ], + importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", @@ -24,9 +25,10 @@ go_library( go_test( name = "go_default_test", srcs = ["mustrunas_test.go"], + importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/mustrunas.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/mustrunas.go index d8efe066db20..bb713c7be1e3 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/mustrunas.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/mustrunas.go @@ -21,7 +21,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" ) @@ -48,13 +48,17 @@ func NewDefaultCapabilities(defaultAddCapabilities, requiredDropCapabilities, al // 1. a capabilities.Add set containing all the required adds (unless the // container specifically is dropping the cap) and container requested adds // 2. a capabilities.Drop set containing all the required drops and container requested drops +// +// Returns the original container capabilities if no changes are required. func (s *defaultCapabilities) Generate(pod *api.Pod, container *api.Container) (*api.Capabilities, error) { defaultAdd := makeCapSet(s.defaultAddCapabilities) requiredDrop := makeCapSet(s.requiredDropCapabilities) containerAdd := sets.NewString() containerDrop := sets.NewString() + var containerCapabilities *api.Capabilities if container.SecurityContext != nil && container.SecurityContext.Capabilities != nil { + containerCapabilities = container.SecurityContext.Capabilities containerAdd = makeCapSet(container.SecurityContext.Capabilities.Add) containerDrop = makeCapSet(container.SecurityContext.Capabilities.Drop) } @@ -62,32 +66,25 @@ func (s *defaultCapabilities) Generate(pod *api.Pod, container *api.Container) ( // remove any default adds that the container is specifically dropping defaultAdd = defaultAdd.Difference(containerDrop) - combinedAdd := defaultAdd.Union(containerAdd).List() - combinedDrop := requiredDrop.Union(containerDrop).List() + combinedAdd := defaultAdd.Union(containerAdd) + combinedDrop := requiredDrop.Union(containerDrop) - // nothing generated? return nil - if len(combinedAdd) == 0 && len(combinedDrop) == 0 { - return nil, nil + // no changes? return the original capabilities + if (len(combinedAdd) == len(containerAdd)) && (len(combinedDrop) == len(containerDrop)) { + return containerCapabilities, nil } return &api.Capabilities{ - Add: capabilityFromStringSlice(combinedAdd), - Drop: capabilityFromStringSlice(combinedDrop), + Add: capabilityFromStringSlice(combinedAdd.List()), + Drop: capabilityFromStringSlice(combinedDrop.List()), }, nil } // Validate ensures that the specified values fall within the range of the strategy. -func (s *defaultCapabilities) Validate(pod *api.Pod, container *api.Container) field.ErrorList { +func (s *defaultCapabilities) Validate(pod *api.Pod, container *api.Container, capabilities *api.Capabilities) field.ErrorList { allErrs := field.ErrorList{} - // if the security context isn't set then we haven't generated correctly. Shouldn't get here - // if using the provider correctly - if container.SecurityContext == nil { - allErrs = append(allErrs, field.Invalid(field.NewPath("securityContext"), container.SecurityContext, "no security context is set")) - return allErrs - } - - if container.SecurityContext.Capabilities == nil { + if capabilities == nil { // if container.SC.Caps is nil then nothing was defaulted by the strategy or requested by the pod author // if there are no required caps on the strategy and nothing is requested on the pod // then we can safely return here without further validation. @@ -97,7 +94,7 @@ func (s *defaultCapabilities) Validate(pod *api.Pod, container *api.Container) f // container has no requested caps but we have required caps. We should have something in // at least the drops on the container. - allErrs = append(allErrs, field.Invalid(field.NewPath("capabilities"), container.SecurityContext.Capabilities, + allErrs = append(allErrs, field.Invalid(field.NewPath("capabilities"), capabilities, "required capabilities are not set on the securityContext")) return allErrs } @@ -112,7 +109,7 @@ func (s *defaultCapabilities) Validate(pod *api.Pod, container *api.Container) f // validate that anything being added is in the default or allowed sets defaultAdd := makeCapSet(s.defaultAddCapabilities) - for _, cap := range container.SecurityContext.Capabilities.Add { + for _, cap := range capabilities.Add { sCap := string(cap) if !defaultAdd.Has(sCap) && !allowedAdd.Has(sCap) { allErrs = append(allErrs, field.Invalid(field.NewPath("capabilities", "add"), sCap, "capability may not be added")) @@ -120,12 +117,12 @@ func (s *defaultCapabilities) Validate(pod *api.Pod, container *api.Container) f } // validate that anything that is required to be dropped is in the drop set - containerDrops := makeCapSet(container.SecurityContext.Capabilities.Drop) + containerDrops := makeCapSet(capabilities.Drop) for _, requiredDrop := range s.requiredDropCapabilities { sDrop := string(requiredDrop) if !containerDrops.Has(sDrop) { - allErrs = append(allErrs, field.Invalid(field.NewPath("capabilities", "drop"), container.SecurityContext.Capabilities.Drop, + allErrs = append(allErrs, field.Invalid(field.NewPath("capabilities", "drop"), capabilities.Drop, fmt.Sprintf("%s is required to be dropped but was not found", sDrop))) } } diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/types.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/types.go index 558cfd683d95..47226fabab84 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/types.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities/types.go @@ -18,7 +18,7 @@ package capabilities import ( "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // Strategy defines the interface for all cap constraint strategies. @@ -26,5 +26,5 @@ type Strategy interface { // Generate creates the capabilities based on policy rules. Generate(pod *api.Pod, container *api.Container) (*api.Capabilities, error) // Validate ensures that the specified values fall within the range of the strategy. - Validate(pod *api.Pod, container *api.Container) field.ErrorList + Validate(pod *api.Pod, container *api.Container, capabilities *api.Capabilities) field.ErrorList } diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/factory.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/factory.go index 0fbd8a714a63..69cf3945d6aa 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/factory.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/factory.go @@ -20,7 +20,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor" "k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities" diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/BUILD b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/BUILD index b820249ccff7..2a7bbe66e3f8 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/BUILD @@ -14,8 +14,9 @@ go_library( "runasany.go", "types.go", ], + importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/group", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/security/podsecuritypolicy/util:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", @@ -28,9 +29,10 @@ go_test( "mustrunas_test.go", "runasany_test.go", ], + importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/group", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/mustrunas.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/mustrunas.go index 6413ed2d4d44..8fc48ac294e6 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/mustrunas.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/mustrunas.go @@ -20,7 +20,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util" ) @@ -46,13 +46,13 @@ func NewMustRunAs(ranges []extensions.GroupIDRange, field string) (GroupStrategy // Generate creates the group based on policy rules. By default this returns the first group of the // first range (min val). -func (s *mustRunAs) Generate(pod *api.Pod) ([]int64, error) { +func (s *mustRunAs) Generate(_ *api.Pod) ([]int64, error) { return []int64{s.ranges[0].Min}, nil } // Generate a single value to be applied. This is used for FSGroup. This strategy will return // the first group of the first range (min val). -func (s *mustRunAs) GenerateSingle(pod *api.Pod) (*int64, error) { +func (s *mustRunAs) GenerateSingle(_ *api.Pod) (*int64, error) { single := new(int64) *single = s.ranges[0].Min return single, nil @@ -61,14 +61,9 @@ func (s *mustRunAs) GenerateSingle(pod *api.Pod) (*int64, error) { // Validate ensures that the specified values fall within the range of the strategy. // Groups are passed in here to allow this strategy to support multiple group fields (fsgroup and // supplemental groups). -func (s *mustRunAs) Validate(pod *api.Pod, groups []int64) field.ErrorList { +func (s *mustRunAs) Validate(_ *api.Pod, groups []int64) field.ErrorList { allErrs := field.ErrorList{} - if pod.Spec.SecurityContext == nil { - allErrs = append(allErrs, field.Invalid(field.NewPath("securityContext"), pod.Spec.SecurityContext, "unable to validate nil security context")) - return allErrs - } - if len(groups) == 0 && len(s.ranges) > 0 { allErrs = append(allErrs, field.Invalid(field.NewPath(s.field), groups, "unable to validate empty groups against required ranges")) } diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/runasany.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/runasany.go index 0d3f1182e09a..3890f849919a 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/runasany.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/runasany.go @@ -18,7 +18,7 @@ package group import ( "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // runAsAny implements the GroupStrategy interface. @@ -33,17 +33,17 @@ func NewRunAsAny() (GroupStrategy, error) { } // Generate creates the group based on policy rules. This strategy returns an empty slice. -func (s *runAsAny) Generate(pod *api.Pod) ([]int64, error) { - return []int64{}, nil +func (s *runAsAny) Generate(_ *api.Pod) ([]int64, error) { + return nil, nil } // Generate a single value to be applied. This is used for FSGroup. This strategy returns nil. -func (s *runAsAny) GenerateSingle(pod *api.Pod) (*int64, error) { +func (s *runAsAny) GenerateSingle(_ *api.Pod) (*int64, error) { return nil, nil } // Validate ensures that the specified values fall within the range of the strategy. -func (s *runAsAny) Validate(pod *api.Pod, groups []int64) field.ErrorList { +func (s *runAsAny) Validate(_ *api.Pod, groups []int64) field.ErrorList { return field.ErrorList{} } diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/types.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/types.go index fa3a11d97d3a..dbf1520421c5 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/types.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/group/types.go @@ -18,7 +18,7 @@ package group import ( "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // GroupStrategy defines the interface for all group constraint strategies. diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/provider.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/provider.go index 0d171e1db1b5..b10aa15555c0 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/provider.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/provider.go @@ -21,9 +21,10 @@ import ( "strings" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util" + "k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/util/maps" ) @@ -66,42 +67,32 @@ func NewSimpleProvider(psp *extensions.PodSecurityPolicy, namespace string, stra // Create a PodSecurityContext based on the given constraints. If a setting is already set // on the PodSecurityContext it will not be changed. Validate should be used after the context // is created to ensure it complies with the required restrictions. -// -// NOTE: this method works on a copy of the PodSecurityContext. It is up to the caller to -// apply the PSC if validation passes. func (s *simpleProvider) CreatePodSecurityContext(pod *api.Pod) (*api.PodSecurityContext, map[string]string, error) { - var sc *api.PodSecurityContext = nil - if pod.Spec.SecurityContext != nil { - // work with a copy - copy := *pod.Spec.SecurityContext - sc = © - } else { - sc = &api.PodSecurityContext{} - } + sc := securitycontext.NewPodSecurityContextMutator(pod.Spec.SecurityContext) annotations := maps.CopySS(pod.Annotations) - if len(sc.SupplementalGroups) == 0 { + if sc.SupplementalGroups() == nil { supGroups, err := s.strategies.SupplementalGroupStrategy.Generate(pod) if err != nil { return nil, nil, err } - sc.SupplementalGroups = supGroups + sc.SetSupplementalGroups(supGroups) } - if sc.FSGroup == nil { + if sc.FSGroup() == nil { fsGroup, err := s.strategies.FSGroupStrategy.GenerateSingle(pod) if err != nil { return nil, nil, err } - sc.FSGroup = fsGroup + sc.SetFSGroup(fsGroup) } - if sc.SELinuxOptions == nil { + if sc.SELinuxOptions() == nil { seLinux, err := s.strategies.SELinuxStrategy.Generate(pod, nil) if err != nil { return nil, nil, err } - sc.SELinuxOptions = seLinux + sc.SetSELinuxOptions(seLinux) } // This is only generated on the pod level. Containers inherit the pod's profile. If the @@ -116,40 +107,34 @@ func (s *simpleProvider) CreatePodSecurityContext(pod *api.Pod) (*api.PodSecurit } annotations[api.SeccompPodAnnotationKey] = seccompProfile } - return sc, annotations, nil + return sc.PodSecurityContext(), annotations, nil } // Create a SecurityContext based on the given constraints. If a setting is already set on the // container's security context then it will not be changed. Validation should be used after // the context is created to ensure it complies with the required restrictions. -// -// NOTE: this method works on a copy of the SC of the container. It is up to the caller to apply -// the SC if validation passes. func (s *simpleProvider) CreateContainerSecurityContext(pod *api.Pod, container *api.Container) (*api.SecurityContext, map[string]string, error) { - var sc *api.SecurityContext = nil - if container.SecurityContext != nil { - // work with a copy of the original - copy := *container.SecurityContext - sc = © - } else { - sc = &api.SecurityContext{} - } + sc := securitycontext.NewEffectiveContainerSecurityContextMutator( + securitycontext.NewPodSecurityContextAccessor(pod.Spec.SecurityContext), + securitycontext.NewContainerSecurityContextMutator(container.SecurityContext), + ) + annotations := maps.CopySS(pod.Annotations) - if sc.RunAsUser == nil { + if sc.RunAsUser() == nil { uid, err := s.strategies.RunAsUserStrategy.Generate(pod, container) if err != nil { return nil, nil, err } - sc.RunAsUser = uid + sc.SetRunAsUser(uid) } - if sc.SELinuxOptions == nil { + if sc.SELinuxOptions() == nil { seLinux, err := s.strategies.SELinuxStrategy.Generate(pod, container) if err != nil { return nil, nil, err } - sc.SELinuxOptions = seLinux + sc.SetSELinuxOptions(seLinux) } annotations, err := s.strategies.AppArmorStrategy.Generate(annotations, container) @@ -157,82 +142,67 @@ func (s *simpleProvider) CreateContainerSecurityContext(pod *api.Pod, container return nil, nil, err } - if sc.Privileged == nil { - priv := false - sc.Privileged = &priv - } - // if we're using the non-root strategy set the marker that this container should not be // run as root which will signal to the kubelet to do a final check either on the runAsUser // or, if runAsUser is not set, the image UID will be checked. - if sc.RunAsNonRoot == nil && s.psp.Spec.RunAsUser.Rule == extensions.RunAsUserStrategyMustRunAsNonRoot { + if sc.RunAsNonRoot() == nil && sc.RunAsUser() == nil && s.psp.Spec.RunAsUser.Rule == extensions.RunAsUserStrategyMustRunAsNonRoot { nonRoot := true - sc.RunAsNonRoot = &nonRoot + sc.SetRunAsNonRoot(&nonRoot) } caps, err := s.strategies.CapabilitiesStrategy.Generate(pod, container) if err != nil { return nil, nil, err } - sc.Capabilities = caps + sc.SetCapabilities(caps) // if the PSP requires a read only root filesystem and the container has not made a specific // request then default ReadOnlyRootFilesystem to true. - if s.psp.Spec.ReadOnlyRootFilesystem && sc.ReadOnlyRootFilesystem == nil { + if s.psp.Spec.ReadOnlyRootFilesystem && sc.ReadOnlyRootFilesystem() == nil { readOnlyRootFS := true - sc.ReadOnlyRootFilesystem = &readOnlyRootFS + sc.SetReadOnlyRootFilesystem(&readOnlyRootFS) } // if the PSP sets DefaultAllowPrivilegeEscalation and the container security context // allowPrivilegeEscalation is not set, then default to that set by the PSP. - if s.psp.Spec.DefaultAllowPrivilegeEscalation != nil && sc.AllowPrivilegeEscalation == nil { - sc.AllowPrivilegeEscalation = s.psp.Spec.DefaultAllowPrivilegeEscalation + if s.psp.Spec.DefaultAllowPrivilegeEscalation != nil && sc.AllowPrivilegeEscalation() == nil { + sc.SetAllowPrivilegeEscalation(s.psp.Spec.DefaultAllowPrivilegeEscalation) } // if the PSP sets psp.AllowPrivilegeEscalation to false set that as the default - if !s.psp.Spec.AllowPrivilegeEscalation && sc.AllowPrivilegeEscalation == nil { - sc.AllowPrivilegeEscalation = &s.psp.Spec.AllowPrivilegeEscalation + if !s.psp.Spec.AllowPrivilegeEscalation && sc.AllowPrivilegeEscalation() == nil { + sc.SetAllowPrivilegeEscalation(&s.psp.Spec.AllowPrivilegeEscalation) } - return sc, annotations, nil + return sc.ContainerSecurityContext(), annotations, nil } // Ensure a pod's SecurityContext is in compliance with the given constraints. func (s *simpleProvider) ValidatePodSecurityContext(pod *api.Pod, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if pod.Spec.SecurityContext == nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("securityContext"), pod.Spec.SecurityContext, "No security context is set")) - return allErrs - } + sc := securitycontext.NewPodSecurityContextAccessor(pod.Spec.SecurityContext) fsGroups := []int64{} - if pod.Spec.SecurityContext.FSGroup != nil { - fsGroups = append(fsGroups, *pod.Spec.SecurityContext.FSGroup) + if fsGroup := sc.FSGroup(); fsGroup != nil { + fsGroups = append(fsGroups, *fsGroup) } allErrs = append(allErrs, s.strategies.FSGroupStrategy.Validate(pod, fsGroups)...) - allErrs = append(allErrs, s.strategies.SupplementalGroupStrategy.Validate(pod, pod.Spec.SecurityContext.SupplementalGroups)...) + allErrs = append(allErrs, s.strategies.SupplementalGroupStrategy.Validate(pod, sc.SupplementalGroups())...) allErrs = append(allErrs, s.strategies.SeccompStrategy.ValidatePod(pod)...) - // make a dummy container context to reuse the selinux strategies - container := &api.Container{ - Name: pod.Name, - SecurityContext: &api.SecurityContext{ - SELinuxOptions: pod.Spec.SecurityContext.SELinuxOptions, - }, - } - allErrs = append(allErrs, s.strategies.SELinuxStrategy.Validate(pod, container)...) + allErrs = append(allErrs, s.strategies.SELinuxStrategy.Validate(fldPath.Child("seLinuxOptions"), pod, nil, sc.SELinuxOptions())...) - if !s.psp.Spec.HostNetwork && pod.Spec.SecurityContext.HostNetwork { - allErrs = append(allErrs, field.Invalid(fldPath.Child("hostNetwork"), pod.Spec.SecurityContext.HostNetwork, "Host network is not allowed to be used")) + if !s.psp.Spec.HostNetwork && sc.HostNetwork() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostNetwork"), sc.HostNetwork(), "Host network is not allowed to be used")) } - if !s.psp.Spec.HostPID && pod.Spec.SecurityContext.HostPID { - allErrs = append(allErrs, field.Invalid(fldPath.Child("hostPID"), pod.Spec.SecurityContext.HostPID, "Host PID is not allowed to be used")) + if !s.psp.Spec.HostPID && sc.HostPID() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostPID"), sc.HostPID(), "Host PID is not allowed to be used")) } - if !s.psp.Spec.HostIPC && pod.Spec.SecurityContext.HostIPC { - allErrs = append(allErrs, field.Invalid(fldPath.Child("hostIPC"), pod.Spec.SecurityContext.HostIPC, "Host IPC is not allowed to be used")) + if !s.psp.Spec.HostIPC && sc.HostIPC() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostIPC"), sc.HostIPC(), "Host IPC is not allowed to be used")) } allErrs = append(allErrs, s.strategies.SysctlsStrategy.Validate(pod)...) @@ -263,9 +233,24 @@ func (s *simpleProvider) ValidatePodSecurityContext(pod *api.Pod, fldPath *field fmt.Sprintf("is not allowed to be used"))) } } + + if fsType == extensions.FlexVolume && len(s.psp.Spec.AllowedFlexVolumes) > 0 { + found := false + driver := v.FlexVolume.Driver + for _, allowedFlexVolume := range s.psp.Spec.AllowedFlexVolumes { + if driver == allowedFlexVolume.Driver { + found = true + break + } + } + if !found { + allErrs = append(allErrs, + field.Invalid(fldPath.Child("volumes").Index(i).Child("driver"), driver, + "Flexvolume driver is not allowed to be used")) + } + } } } - return allErrs } @@ -273,25 +258,23 @@ func (s *simpleProvider) ValidatePodSecurityContext(pod *api.Pod, fldPath *field func (s *simpleProvider) ValidateContainerSecurityContext(pod *api.Pod, container *api.Container, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if container.SecurityContext == nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("securityContext"), container.SecurityContext, "No security context is set")) - return allErrs - } + podSC := securitycontext.NewPodSecurityContextAccessor(pod.Spec.SecurityContext) + sc := securitycontext.NewEffectiveContainerSecurityContextAccessor(podSC, securitycontext.NewContainerSecurityContextMutator(container.SecurityContext)) - sc := container.SecurityContext - allErrs = append(allErrs, s.strategies.RunAsUserStrategy.Validate(pod, container)...) - allErrs = append(allErrs, s.strategies.SELinuxStrategy.Validate(pod, container)...) + allErrs = append(allErrs, s.strategies.RunAsUserStrategy.Validate(fldPath.Child("securityContext"), pod, container, sc.RunAsNonRoot(), sc.RunAsUser())...) + allErrs = append(allErrs, s.strategies.SELinuxStrategy.Validate(fldPath.Child("seLinuxOptions"), pod, container, sc.SELinuxOptions())...) allErrs = append(allErrs, s.strategies.AppArmorStrategy.Validate(pod, container)...) allErrs = append(allErrs, s.strategies.SeccompStrategy.ValidateContainer(pod, container)...) - if !s.psp.Spec.Privileged && *sc.Privileged { - allErrs = append(allErrs, field.Invalid(fldPath.Child("privileged"), *sc.Privileged, "Privileged containers are not allowed")) + privileged := sc.Privileged() + if !s.psp.Spec.Privileged && privileged != nil && *privileged { + allErrs = append(allErrs, field.Invalid(fldPath.Child("privileged"), *privileged, "Privileged containers are not allowed")) } - allErrs = append(allErrs, s.strategies.CapabilitiesStrategy.Validate(pod, container)...) + allErrs = append(allErrs, s.strategies.CapabilitiesStrategy.Validate(pod, container, sc.Capabilities())...) - if !s.psp.Spec.HostNetwork && pod.Spec.SecurityContext.HostNetwork { - allErrs = append(allErrs, field.Invalid(fldPath.Child("hostNetwork"), pod.Spec.SecurityContext.HostNetwork, "Host network is not allowed to be used")) + if !s.psp.Spec.HostNetwork && podSC.HostNetwork() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostNetwork"), podSC.HostNetwork(), "Host network is not allowed to be used")) } containersPath := fldPath.Child("containers") @@ -306,39 +289,40 @@ func (s *simpleProvider) ValidateContainerSecurityContext(pod *api.Pod, containe allErrs = append(allErrs, s.hasInvalidHostPort(&c, idxPath)...) } - if !s.psp.Spec.HostPID && pod.Spec.SecurityContext.HostPID { - allErrs = append(allErrs, field.Invalid(fldPath.Child("hostPID"), pod.Spec.SecurityContext.HostPID, "Host PID is not allowed to be used")) + if !s.psp.Spec.HostPID && podSC.HostPID() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostPID"), podSC.HostPID(), "Host PID is not allowed to be used")) } - if !s.psp.Spec.HostIPC && pod.Spec.SecurityContext.HostIPC { - allErrs = append(allErrs, field.Invalid(fldPath.Child("hostIPC"), pod.Spec.SecurityContext.HostIPC, "Host IPC is not allowed to be used")) + if !s.psp.Spec.HostIPC && podSC.HostIPC() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("hostIPC"), podSC.HostIPC(), "Host IPC is not allowed to be used")) } if s.psp.Spec.ReadOnlyRootFilesystem { - if sc.ReadOnlyRootFilesystem == nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("readOnlyRootFilesystem"), sc.ReadOnlyRootFilesystem, "ReadOnlyRootFilesystem may not be nil and must be set to true")) - } else if !*sc.ReadOnlyRootFilesystem { - allErrs = append(allErrs, field.Invalid(fldPath.Child("readOnlyRootFilesystem"), *sc.ReadOnlyRootFilesystem, "ReadOnlyRootFilesystem must be set to true")) + readOnly := sc.ReadOnlyRootFilesystem() + if readOnly == nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("readOnlyRootFilesystem"), readOnly, "ReadOnlyRootFilesystem may not be nil and must be set to true")) + } else if !*readOnly { + allErrs = append(allErrs, field.Invalid(fldPath.Child("readOnlyRootFilesystem"), *readOnly, "ReadOnlyRootFilesystem must be set to true")) } } - if !s.psp.Spec.AllowPrivilegeEscalation && sc.AllowPrivilegeEscalation == nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("allowPrivilegeEscalation"), sc.AllowPrivilegeEscalation, "Allowing privilege escalation for containers is not allowed")) - + allowEscalation := sc.AllowPrivilegeEscalation() + if !s.psp.Spec.AllowPrivilegeEscalation && allowEscalation == nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("allowPrivilegeEscalation"), allowEscalation, "Allowing privilege escalation for containers is not allowed")) } - if !s.psp.Spec.AllowPrivilegeEscalation && sc.AllowPrivilegeEscalation != nil && *sc.AllowPrivilegeEscalation { - allErrs = append(allErrs, field.Invalid(fldPath.Child("allowPrivilegeEscalation"), *sc.AllowPrivilegeEscalation, "Allowing privilege escalation for containers is not allowed")) + if !s.psp.Spec.AllowPrivilegeEscalation && allowEscalation != nil && *allowEscalation { + allErrs = append(allErrs, field.Invalid(fldPath.Child("allowPrivilegeEscalation"), *allowEscalation, "Allowing privilege escalation for containers is not allowed")) } return allErrs } -// hasHostPort checks the port definitions on the container for HostPort > 0. +// hasInvalidHostPort checks whether the port definitions on the container fall outside of the ranges allowed by the PSP. func (s *simpleProvider) hasInvalidHostPort(container *api.Container, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for _, cp := range container.Ports { - if cp.HostPort > 0 && !s.isValidHostPort(int(cp.HostPort)) { + if cp.HostPort > 0 && !s.isValidHostPort(cp.HostPort) { detail := fmt.Sprintf("Host port %d is not allowed to be used. Allowed ports: [%s]", cp.HostPort, hostPortRangesToString(s.psp.Spec.HostPorts)) allErrs = append(allErrs, field.Invalid(fldPath.Child("hostPort"), cp.HostPort, detail)) } @@ -347,7 +331,7 @@ func (s *simpleProvider) hasInvalidHostPort(container *api.Container, fldPath *f } // isValidHostPort returns true if the port falls in any range allowed by the PSP. -func (s *simpleProvider) isValidHostPort(port int) bool { +func (s *simpleProvider) isValidHostPort(port int32) bool { for _, hostPortRange := range s.psp.Spec.HostPorts { if port >= hostPortRange.Min && port <= hostPortRange.Max { return true diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/BUILD b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/BUILD index e53a2d778f37..c49f6fc6996b 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/BUILD @@ -9,8 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["strategy.go"], + importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", ], ) @@ -18,9 +19,10 @@ go_library( go_test( name = "go_default_test", srcs = ["strategy_test.go"], + importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/strategy.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/strategy.go index aaeb0c1f3634..715156e8a01f 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/strategy.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/strategy.go @@ -21,7 +21,7 @@ import ( "strings" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) const ( diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/BUILD b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/BUILD index 9231a39ae261..97f5e5292c07 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/BUILD @@ -14,8 +14,9 @@ go_library( "runasany.go", "types.go", ], + importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", ], @@ -27,9 +28,10 @@ go_test( "mustrunas_test.go", "runasany_test.go", ], + importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/mustrunas.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/mustrunas.go index 684f7bb9669c..b2605cd3304b 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/mustrunas.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/mustrunas.go @@ -20,7 +20,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" ) @@ -43,41 +43,33 @@ func NewMustRunAs(options *extensions.SELinuxStrategyOptions) (SELinuxStrategy, } // Generate creates the SELinuxOptions based on constraint rules. -func (s *mustRunAs) Generate(pod *api.Pod, container *api.Container) (*api.SELinuxOptions, error) { +func (s *mustRunAs) Generate(_ *api.Pod, _ *api.Container) (*api.SELinuxOptions, error) { return s.opts.SELinuxOptions, nil } // Validate ensures that the specified values fall within the range of the strategy. -func (s *mustRunAs) Validate(pod *api.Pod, container *api.Container) field.ErrorList { +func (s *mustRunAs) Validate(fldPath *field.Path, _ *api.Pod, _ *api.Container, seLinux *api.SELinuxOptions) field.ErrorList { allErrs := field.ErrorList{} - if container.SecurityContext == nil { - detail := fmt.Sprintf("unable to validate nil security context for %s", container.Name) - allErrs = append(allErrs, field.Invalid(field.NewPath("securityContext"), container.SecurityContext, detail)) + if seLinux == nil { + allErrs = append(allErrs, field.Required(fldPath, "")) return allErrs } - if container.SecurityContext.SELinuxOptions == nil { - detail := fmt.Sprintf("unable to validate nil seLinuxOptions for %s", container.Name) - allErrs = append(allErrs, field.Invalid(field.NewPath("seLinuxOptions"), container.SecurityContext.SELinuxOptions, detail)) - return allErrs - } - seLinuxOptionsPath := field.NewPath("seLinuxOptions") - seLinux := container.SecurityContext.SELinuxOptions if seLinux.Level != s.opts.SELinuxOptions.Level { - detail := fmt.Sprintf("seLinuxOptions.level on %s does not match required level. Found %s, wanted %s", container.Name, seLinux.Level, s.opts.SELinuxOptions.Level) - allErrs = append(allErrs, field.Invalid(seLinuxOptionsPath.Child("level"), seLinux.Level, detail)) + detail := fmt.Sprintf("must be %s", s.opts.SELinuxOptions.Level) + allErrs = append(allErrs, field.Invalid(fldPath.Child("level"), seLinux.Level, detail)) } if seLinux.Role != s.opts.SELinuxOptions.Role { - detail := fmt.Sprintf("seLinuxOptions.role on %s does not match required role. Found %s, wanted %s", container.Name, seLinux.Role, s.opts.SELinuxOptions.Role) - allErrs = append(allErrs, field.Invalid(seLinuxOptionsPath.Child("role"), seLinux.Role, detail)) + detail := fmt.Sprintf("must be %s", s.opts.SELinuxOptions.Role) + allErrs = append(allErrs, field.Invalid(fldPath.Child("role"), seLinux.Role, detail)) } if seLinux.Type != s.opts.SELinuxOptions.Type { - detail := fmt.Sprintf("seLinuxOptions.type on %s does not match required type. Found %s, wanted %s", container.Name, seLinux.Type, s.opts.SELinuxOptions.Type) - allErrs = append(allErrs, field.Invalid(seLinuxOptionsPath.Child("type"), seLinux.Type, detail)) + detail := fmt.Sprintf("must be %s", s.opts.SELinuxOptions.Type) + allErrs = append(allErrs, field.Invalid(fldPath.Child("type"), seLinux.Type, detail)) } if seLinux.User != s.opts.SELinuxOptions.User { - detail := fmt.Sprintf("seLinuxOptions.user on %s does not match required user. Found %s, wanted %s", container.Name, seLinux.User, s.opts.SELinuxOptions.User) - allErrs = append(allErrs, field.Invalid(seLinuxOptionsPath.Child("user"), seLinux.User, detail)) + detail := fmt.Sprintf("must be %s", s.opts.SELinuxOptions.User) + allErrs = append(allErrs, field.Invalid(fldPath.Child("user"), seLinux.User, detail)) } return allErrs diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/runasany.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/runasany.go index 711336fc5952..008ad0a41d55 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/runasany.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/runasany.go @@ -18,7 +18,7 @@ package selinux import ( "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" ) @@ -38,6 +38,6 @@ func (s *runAsAny) Generate(pod *api.Pod, container *api.Container) (*api.SELinu } // Validate ensures that the specified values fall within the range of the strategy. -func (s *runAsAny) Validate(pod *api.Pod, container *api.Container) field.ErrorList { +func (s *runAsAny) Validate(fldPath *field.Path, _ *api.Pod, _ *api.Container, options *api.SELinuxOptions) field.ErrorList { return field.ErrorList{} } diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/types.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/types.go index 043a3efe0208..cdaae8093118 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/types.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux/types.go @@ -18,7 +18,7 @@ package selinux import ( "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // SELinuxStrategy defines the interface for all SELinux constraint strategies. @@ -26,5 +26,5 @@ type SELinuxStrategy interface { // Generate creates the SELinuxOptions based on constraint rules. Generate(pod *api.Pod, container *api.Container) (*api.SELinuxOptions, error) // Validate ensures that the specified values fall within the range of the strategy. - Validate(pod *api.Pod, container *api.Container) field.ErrorList + Validate(fldPath *field.Path, pod *api.Pod, container *api.Container, options *api.SELinuxOptions) field.ErrorList } diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl/BUILD b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl/BUILD index 8da68d4a8d6d..dbd473ec8373 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl/BUILD @@ -12,9 +12,10 @@ go_library( "mustmatchpatterns.go", "types.go", ], + importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", ], ) @@ -22,10 +23,11 @@ go_library( go_test( name = "go_default_test", srcs = ["mustmatchpatterns_test.go"], + importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl/mustmatchpatterns.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl/mustmatchpatterns.go index 969993450af3..b59dd9d3fff0 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl/mustmatchpatterns.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl/mustmatchpatterns.go @@ -21,8 +21,8 @@ import ( "strings" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" ) // mustMatchPatterns implements the SysctlsStrategy interface diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl/types.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl/types.go index ee9291bfb761..a6c2034a8d41 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl/types.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl/types.go @@ -18,7 +18,7 @@ package sysctl import ( "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // SysctlsStrategy defines the interface for all sysctl strategies. diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/types.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/types.go index d067a0325bfe..31fcc5484d8b 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/types.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/types.go @@ -18,7 +18,7 @@ package podsecuritypolicy import ( "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor" "k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities" diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/BUILD b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/BUILD index 9cb2bf4053e8..f4e57284ad07 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/BUILD @@ -15,8 +15,9 @@ go_library( "runasany.go", "types.go", ], + importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/user", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/security/podsecuritypolicy/util:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", @@ -30,9 +31,10 @@ go_test( "nonroot_test.go", "runasany_test.go", ], + importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/user", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/mustrunas.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/mustrunas.go index abc631e28039..6b0ed1397d55 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/mustrunas.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/mustrunas.go @@ -20,7 +20,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util" ) @@ -49,27 +49,17 @@ func (s *mustRunAs) Generate(pod *api.Pod, container *api.Container) (*int64, er } // Validate ensures that the specified values fall within the range of the strategy. -func (s *mustRunAs) Validate(pod *api.Pod, container *api.Container) field.ErrorList { +func (s *mustRunAs) Validate(fldPath *field.Path, _ *api.Pod, _ *api.Container, runAsNonRoot *bool, runAsUser *int64) field.ErrorList { allErrs := field.ErrorList{} - securityContextPath := field.NewPath("securityContext") - if container.SecurityContext == nil { - detail := fmt.Sprintf("unable to validate nil security context for container %s", container.Name) - allErrs = append(allErrs, field.Invalid(securityContextPath, container.SecurityContext, detail)) - return allErrs - } - if container.SecurityContext.RunAsUser == nil { - detail := fmt.Sprintf("unable to validate nil RunAsUser for container %s", container.Name) - allErrs = append(allErrs, field.Invalid(securityContextPath.Child("runAsUser"), container.SecurityContext.RunAsUser, detail)) + if runAsUser == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("runAsUser"), "")) return allErrs } - if !s.isValidUID(*container.SecurityContext.RunAsUser) { - detail := fmt.Sprintf("UID on container %s does not match required range. Found %d, allowed: %v", - container.Name, - *container.SecurityContext.RunAsUser, - s.opts.Ranges) - allErrs = append(allErrs, field.Invalid(securityContextPath.Child("runAsUser"), *container.SecurityContext.RunAsUser, detail)) + if !s.isValidUID(*runAsUser) { + detail := fmt.Sprintf("must be in the ranges: %v", s.opts.Ranges) + allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *runAsUser, detail)) } return allErrs } diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/nonroot.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/nonroot.go index f53880a9a68a..68e644a7e32c 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/nonroot.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/nonroot.go @@ -17,10 +17,8 @@ limitations under the License. package user import ( - "fmt" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" ) @@ -43,22 +41,18 @@ func (s *nonRoot) Generate(pod *api.Pod, container *api.Container) (*int64, erro // or if the UID is set it is not root. Validation will fail if RunAsNonRoot is set to false. // In order to work properly this assumes that the kubelet performs a final check on runAsUser // or the image UID when runAsUser is nil. -func (s *nonRoot) Validate(pod *api.Pod, container *api.Container) field.ErrorList { +func (s *nonRoot) Validate(fldPath *field.Path, _ *api.Pod, _ *api.Container, runAsNonRoot *bool, runAsUser *int64) field.ErrorList { allErrs := field.ErrorList{} - securityContextPath := field.NewPath("securityContext") - if container.SecurityContext == nil { - detail := fmt.Sprintf("unable to validate nil security context for container %s", container.Name) - allErrs = append(allErrs, field.Invalid(securityContextPath, container.SecurityContext, detail)) + if runAsNonRoot == nil && runAsUser == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("runAsNonRoot"), "must be true")) return allErrs } - if container.SecurityContext.RunAsNonRoot != nil && *container.SecurityContext.RunAsNonRoot == false { - detail := fmt.Sprintf("RunAsNonRoot must be true for container %s", container.Name) - allErrs = append(allErrs, field.Invalid(securityContextPath.Child("runAsNonRoot"), *container.SecurityContext.RunAsNonRoot, detail)) + if runAsNonRoot != nil && *runAsNonRoot == false { + allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsNonRoot"), *runAsNonRoot, "must be true")) return allErrs } - if container.SecurityContext.RunAsUser != nil && *container.SecurityContext.RunAsUser == 0 { - detail := fmt.Sprintf("running with the root UID is forbidden by the pod security policy for container %s", container.Name) - allErrs = append(allErrs, field.Invalid(securityContextPath.Child("runAsUser"), *container.SecurityContext.RunAsUser, detail)) + if runAsUser != nil && *runAsUser == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *runAsUser, "running with the root UID is forbidden")) return allErrs } return allErrs diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/runasany.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/runasany.go index ffee679320d8..e1384f2da71d 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/runasany.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/runasany.go @@ -18,7 +18,7 @@ package user import ( "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" ) @@ -38,6 +38,6 @@ func (s *runAsAny) Generate(pod *api.Pod, container *api.Container) (*int64, err } // Validate ensures that the specified values fall within the range of the strategy. -func (s *runAsAny) Validate(pod *api.Pod, container *api.Container) field.ErrorList { +func (s *runAsAny) Validate(fldPath *field.Path, _ *api.Pod, _ *api.Container, runAsNonRoot *bool, runAsUser *int64) field.ErrorList { return field.ErrorList{} } diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/types.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/types.go index 8e754c32f6c2..fbcc34c79abd 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/types.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/user/types.go @@ -18,7 +18,7 @@ package user import ( "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // RunAsUserStrategy defines the interface for all uid constraint strategies. @@ -26,5 +26,5 @@ type RunAsUserStrategy interface { // Generate creates the uid based on policy rules. Generate(pod *api.Pod, container *api.Container) (*int64, error) // Validate ensures that the specified values fall within the range of the strategy. - Validate(pod *api.Pod, container *api.Container) field.ErrorList + Validate(fldPath *field.Path, pod *api.Pod, container *api.Container, runAsNonRoot *bool, runAsUser *int64) field.ErrorList } diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/BUILD b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/BUILD index 88e320d98c96..118784e9f0ff 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/BUILD @@ -12,8 +12,9 @@ go_library( "doc.go", "util.go", ], + importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", ], @@ -22,9 +23,10 @@ go_library( go_test( name = "go_default_test", srcs = ["util_test.go"], + importpath = "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go index 3ff6d89bc771..d654f88c77fa 100644 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go +++ b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go @@ -21,7 +21,7 @@ import ( "strings" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" ) @@ -67,6 +67,7 @@ func GetAllFSTypesAsSet() sets.String { string(extensions.Projected), string(extensions.PortworxVolume), string(extensions.ScaleIO), + string(extensions.CSI), ) return fstypes } diff --git a/vendor/k8s.io/kubernetes/pkg/securitycontext/BUILD b/vendor/k8s.io/kubernetes/pkg/securitycontext/BUILD index 38fd8d83145b..fa4eb424a0eb 100644 --- a/vendor/k8s.io/kubernetes/pkg/securitycontext/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/securitycontext/BUILD @@ -9,21 +9,31 @@ load( go_library( name = "go_default_library", srcs = [ + "accessors.go", "doc.go", "fake.go", "util.go", ], + importpath = "k8s.io/kubernetes/pkg/securitycontext", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", ], ) go_test( name = "go_default_test", - srcs = ["util_test.go"], + srcs = [ + "accessors_test.go", + "util_test.go", + ], + importpath = "k8s.io/kubernetes/pkg/securitycontext", library = ":go_default_library", - deps = ["//vendor/k8s.io/api/core/v1:go_default_library"], + deps = [ + "//pkg/apis/core:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + ], ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/securitycontext/accessors.go b/vendor/k8s.io/kubernetes/pkg/securitycontext/accessors.go new file mode 100644 index 000000000000..98ac6e0b92a8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/securitycontext/accessors.go @@ -0,0 +1,407 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package securitycontext + +import ( + "reflect" + + api "k8s.io/kubernetes/pkg/apis/core" +) + +// PodSecurityContextAccessor allows reading the values of a PodSecurityContext object +type PodSecurityContextAccessor interface { + HostNetwork() bool + HostPID() bool + HostIPC() bool + SELinuxOptions() *api.SELinuxOptions + RunAsUser() *int64 + RunAsNonRoot() *bool + SupplementalGroups() []int64 + FSGroup() *int64 +} + +// PodSecurityContextMutator allows reading and writing the values of a PodSecurityContext object +type PodSecurityContextMutator interface { + PodSecurityContextAccessor + + SetHostNetwork(bool) + SetHostPID(bool) + SetHostIPC(bool) + SetSELinuxOptions(*api.SELinuxOptions) + SetRunAsUser(*int64) + SetRunAsNonRoot(*bool) + SetSupplementalGroups([]int64) + SetFSGroup(*int64) + + // PodSecurityContext returns the current PodSecurityContext object + PodSecurityContext() *api.PodSecurityContext +} + +// NewPodSecurityContextAccessor returns an accessor for the given pod security context. +// May be initialized with a nil PodSecurityContext. +func NewPodSecurityContextAccessor(podSC *api.PodSecurityContext) PodSecurityContextAccessor { + return &podSecurityContextWrapper{podSC: podSC} +} + +// NewPodSecurityContextMutator returns a mutator for the given pod security context. +// May be initialized with a nil PodSecurityContext. +func NewPodSecurityContextMutator(podSC *api.PodSecurityContext) PodSecurityContextMutator { + return &podSecurityContextWrapper{podSC: podSC} +} + +type podSecurityContextWrapper struct { + podSC *api.PodSecurityContext +} + +func (w *podSecurityContextWrapper) PodSecurityContext() *api.PodSecurityContext { + return w.podSC +} + +func (w *podSecurityContextWrapper) ensurePodSC() { + if w.podSC == nil { + w.podSC = &api.PodSecurityContext{} + } +} + +func (w *podSecurityContextWrapper) HostNetwork() bool { + if w.podSC == nil { + return false + } + return w.podSC.HostNetwork +} +func (w *podSecurityContextWrapper) SetHostNetwork(v bool) { + if w.podSC == nil && v == false { + return + } + w.ensurePodSC() + w.podSC.HostNetwork = v +} +func (w *podSecurityContextWrapper) HostPID() bool { + if w.podSC == nil { + return false + } + return w.podSC.HostPID +} +func (w *podSecurityContextWrapper) SetHostPID(v bool) { + if w.podSC == nil && v == false { + return + } + w.ensurePodSC() + w.podSC.HostPID = v +} +func (w *podSecurityContextWrapper) HostIPC() bool { + if w.podSC == nil { + return false + } + return w.podSC.HostIPC +} +func (w *podSecurityContextWrapper) SetHostIPC(v bool) { + if w.podSC == nil && v == false { + return + } + w.ensurePodSC() + w.podSC.HostIPC = v +} +func (w *podSecurityContextWrapper) SELinuxOptions() *api.SELinuxOptions { + if w.podSC == nil { + return nil + } + return w.podSC.SELinuxOptions +} +func (w *podSecurityContextWrapper) SetSELinuxOptions(v *api.SELinuxOptions) { + if w.podSC == nil && v == nil { + return + } + w.ensurePodSC() + w.podSC.SELinuxOptions = v +} +func (w *podSecurityContextWrapper) RunAsUser() *int64 { + if w.podSC == nil { + return nil + } + return w.podSC.RunAsUser +} +func (w *podSecurityContextWrapper) SetRunAsUser(v *int64) { + if w.podSC == nil && v == nil { + return + } + w.ensurePodSC() + w.podSC.RunAsUser = v +} +func (w *podSecurityContextWrapper) RunAsNonRoot() *bool { + if w.podSC == nil { + return nil + } + return w.podSC.RunAsNonRoot +} +func (w *podSecurityContextWrapper) SetRunAsNonRoot(v *bool) { + if w.podSC == nil && v == nil { + return + } + w.ensurePodSC() + w.podSC.RunAsNonRoot = v +} +func (w *podSecurityContextWrapper) SupplementalGroups() []int64 { + if w.podSC == nil { + return nil + } + return w.podSC.SupplementalGroups +} +func (w *podSecurityContextWrapper) SetSupplementalGroups(v []int64) { + if w.podSC == nil && len(v) == 0 { + return + } + w.ensurePodSC() + if len(v) == 0 && len(w.podSC.SupplementalGroups) == 0 { + return + } + w.podSC.SupplementalGroups = v +} +func (w *podSecurityContextWrapper) FSGroup() *int64 { + if w.podSC == nil { + return nil + } + return w.podSC.FSGroup +} +func (w *podSecurityContextWrapper) SetFSGroup(v *int64) { + if w.podSC == nil && v == nil { + return + } + w.ensurePodSC() + w.podSC.FSGroup = v +} + +type ContainerSecurityContextAccessor interface { + Capabilities() *api.Capabilities + Privileged() *bool + SELinuxOptions() *api.SELinuxOptions + RunAsUser() *int64 + RunAsNonRoot() *bool + ReadOnlyRootFilesystem() *bool + AllowPrivilegeEscalation() *bool +} + +type ContainerSecurityContextMutator interface { + ContainerSecurityContextAccessor + + ContainerSecurityContext() *api.SecurityContext + + SetCapabilities(*api.Capabilities) + SetPrivileged(*bool) + SetSELinuxOptions(*api.SELinuxOptions) + SetRunAsUser(*int64) + SetRunAsNonRoot(*bool) + SetReadOnlyRootFilesystem(*bool) + SetAllowPrivilegeEscalation(*bool) +} + +func NewContainerSecurityContextAccessor(containerSC *api.SecurityContext) ContainerSecurityContextAccessor { + return &containerSecurityContextWrapper{containerSC: containerSC} +} + +func NewContainerSecurityContextMutator(containerSC *api.SecurityContext) ContainerSecurityContextMutator { + return &containerSecurityContextWrapper{containerSC: containerSC} +} + +type containerSecurityContextWrapper struct { + containerSC *api.SecurityContext +} + +func (w *containerSecurityContextWrapper) ContainerSecurityContext() *api.SecurityContext { + return w.containerSC +} + +func (w *containerSecurityContextWrapper) ensureContainerSC() { + if w.containerSC == nil { + w.containerSC = &api.SecurityContext{} + } +} + +func (w *containerSecurityContextWrapper) Capabilities() *api.Capabilities { + if w.containerSC == nil { + return nil + } + return w.containerSC.Capabilities +} +func (w *containerSecurityContextWrapper) SetCapabilities(v *api.Capabilities) { + if w.containerSC == nil && v == nil { + return + } + w.ensureContainerSC() + w.containerSC.Capabilities = v +} +func (w *containerSecurityContextWrapper) Privileged() *bool { + if w.containerSC == nil { + return nil + } + return w.containerSC.Privileged +} +func (w *containerSecurityContextWrapper) SetPrivileged(v *bool) { + if w.containerSC == nil && v == nil { + return + } + w.ensureContainerSC() + w.containerSC.Privileged = v +} +func (w *containerSecurityContextWrapper) SELinuxOptions() *api.SELinuxOptions { + if w.containerSC == nil { + return nil + } + return w.containerSC.SELinuxOptions +} +func (w *containerSecurityContextWrapper) SetSELinuxOptions(v *api.SELinuxOptions) { + if w.containerSC == nil && v == nil { + return + } + w.ensureContainerSC() + w.containerSC.SELinuxOptions = v +} +func (w *containerSecurityContextWrapper) RunAsUser() *int64 { + if w.containerSC == nil { + return nil + } + return w.containerSC.RunAsUser +} +func (w *containerSecurityContextWrapper) SetRunAsUser(v *int64) { + if w.containerSC == nil && v == nil { + return + } + w.ensureContainerSC() + w.containerSC.RunAsUser = v +} +func (w *containerSecurityContextWrapper) RunAsNonRoot() *bool { + if w.containerSC == nil { + return nil + } + return w.containerSC.RunAsNonRoot +} +func (w *containerSecurityContextWrapper) SetRunAsNonRoot(v *bool) { + if w.containerSC == nil && v == nil { + return + } + w.ensureContainerSC() + w.containerSC.RunAsNonRoot = v +} +func (w *containerSecurityContextWrapper) ReadOnlyRootFilesystem() *bool { + if w.containerSC == nil { + return nil + } + return w.containerSC.ReadOnlyRootFilesystem +} +func (w *containerSecurityContextWrapper) SetReadOnlyRootFilesystem(v *bool) { + if w.containerSC == nil && v == nil { + return + } + w.ensureContainerSC() + w.containerSC.ReadOnlyRootFilesystem = v +} +func (w *containerSecurityContextWrapper) AllowPrivilegeEscalation() *bool { + if w.containerSC == nil { + return nil + } + return w.containerSC.AllowPrivilegeEscalation +} +func (w *containerSecurityContextWrapper) SetAllowPrivilegeEscalation(v *bool) { + if w.containerSC == nil && v == nil { + return + } + w.ensureContainerSC() + w.containerSC.AllowPrivilegeEscalation = v +} + +func NewEffectiveContainerSecurityContextAccessor(podSC PodSecurityContextAccessor, containerSC ContainerSecurityContextMutator) ContainerSecurityContextAccessor { + return &effectiveContainerSecurityContextWrapper{podSC: podSC, containerSC: containerSC} +} + +func NewEffectiveContainerSecurityContextMutator(podSC PodSecurityContextAccessor, containerSC ContainerSecurityContextMutator) ContainerSecurityContextMutator { + return &effectiveContainerSecurityContextWrapper{podSC: podSC, containerSC: containerSC} +} + +type effectiveContainerSecurityContextWrapper struct { + podSC PodSecurityContextAccessor + containerSC ContainerSecurityContextMutator +} + +func (w *effectiveContainerSecurityContextWrapper) ContainerSecurityContext() *api.SecurityContext { + return w.containerSC.ContainerSecurityContext() +} + +func (w *effectiveContainerSecurityContextWrapper) Capabilities() *api.Capabilities { + return w.containerSC.Capabilities() +} +func (w *effectiveContainerSecurityContextWrapper) SetCapabilities(v *api.Capabilities) { + if !reflect.DeepEqual(w.Capabilities(), v) { + w.containerSC.SetCapabilities(v) + } +} +func (w *effectiveContainerSecurityContextWrapper) Privileged() *bool { + return w.containerSC.Privileged() +} +func (w *effectiveContainerSecurityContextWrapper) SetPrivileged(v *bool) { + if !reflect.DeepEqual(w.Privileged(), v) { + w.containerSC.SetPrivileged(v) + } +} +func (w *effectiveContainerSecurityContextWrapper) SELinuxOptions() *api.SELinuxOptions { + if v := w.containerSC.SELinuxOptions(); v != nil { + return v + } + return w.podSC.SELinuxOptions() +} +func (w *effectiveContainerSecurityContextWrapper) SetSELinuxOptions(v *api.SELinuxOptions) { + if !reflect.DeepEqual(w.SELinuxOptions(), v) { + w.containerSC.SetSELinuxOptions(v) + } +} +func (w *effectiveContainerSecurityContextWrapper) RunAsUser() *int64 { + if v := w.containerSC.RunAsUser(); v != nil { + return v + } + return w.podSC.RunAsUser() +} +func (w *effectiveContainerSecurityContextWrapper) SetRunAsUser(v *int64) { + if !reflect.DeepEqual(w.RunAsUser(), v) { + w.containerSC.SetRunAsUser(v) + } +} +func (w *effectiveContainerSecurityContextWrapper) RunAsNonRoot() *bool { + if v := w.containerSC.RunAsNonRoot(); v != nil { + return v + } + return w.podSC.RunAsNonRoot() +} +func (w *effectiveContainerSecurityContextWrapper) SetRunAsNonRoot(v *bool) { + if !reflect.DeepEqual(w.RunAsNonRoot(), v) { + w.containerSC.SetRunAsNonRoot(v) + } +} +func (w *effectiveContainerSecurityContextWrapper) ReadOnlyRootFilesystem() *bool { + return w.containerSC.ReadOnlyRootFilesystem() +} +func (w *effectiveContainerSecurityContextWrapper) SetReadOnlyRootFilesystem(v *bool) { + if !reflect.DeepEqual(w.ReadOnlyRootFilesystem(), v) { + w.containerSC.SetReadOnlyRootFilesystem(v) + } +} +func (w *effectiveContainerSecurityContextWrapper) AllowPrivilegeEscalation() *bool { + return w.containerSC.AllowPrivilegeEscalation() +} +func (w *effectiveContainerSecurityContextWrapper) SetAllowPrivilegeEscalation(v *bool) { + if !reflect.DeepEqual(w.AllowPrivilegeEscalation(), v) { + w.containerSC.SetAllowPrivilegeEscalation(v) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/securitycontext/fake.go b/vendor/k8s.io/kubernetes/pkg/securitycontext/fake.go index 2c2d0d22d455..975445bab06d 100644 --- a/vendor/k8s.io/kubernetes/pkg/securitycontext/fake.go +++ b/vendor/k8s.io/kubernetes/pkg/securitycontext/fake.go @@ -18,7 +18,7 @@ package securitycontext import ( "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // ValidSecurityContextWithContainerDefaults creates a valid security context provider based on diff --git a/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go b/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go index dee1564d0622..73d23a43131b 100644 --- a/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go +++ b/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go @@ -21,7 +21,7 @@ import ( "strings" "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // HasPrivilegedRequest returns the value of SecurityContext.Privileged, taking into account diff --git a/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD b/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD index 259190977ad0..afafa2849d9a 100644 --- a/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD @@ -12,8 +12,9 @@ go_library( "jwt.go", "util.go", ], + importpath = "k8s.io/kubernetes/pkg/serviceaccount", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/github.com/dgrijalva/jwt-go:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -26,6 +27,7 @@ go_library( go_test( name = "go_default_xtest", srcs = ["jwt_test.go"], + importpath = "k8s.io/kubernetes/pkg/serviceaccount_test", deps = [ ":go_default_library", "//pkg/controller/serviceaccount:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/serviceaccount/util.go b/vendor/k8s.io/kubernetes/pkg/serviceaccount/util.go index df4dc3a9140d..0503c1513e3b 100644 --- a/vendor/k8s.io/kubernetes/pkg/serviceaccount/util.go +++ b/vendor/k8s.io/kubernetes/pkg/serviceaccount/util.go @@ -20,7 +20,7 @@ import ( "k8s.io/api/core/v1" apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/apiserver/pkg/authentication/user" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // UserInfo returns a user.Info interface for the given namespace, service account name and UID diff --git a/vendor/k8s.io/kubernetes/pkg/ssh/BUILD b/vendor/k8s.io/kubernetes/pkg/ssh/BUILD index b2d85c491f8e..28794b702c4b 100644 --- a/vendor/k8s.io/kubernetes/pkg/ssh/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/ssh/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["ssh_test.go"], + importpath = "k8s.io/kubernetes/pkg/ssh", library = ":go_default_library", deps = [ "//vendor/github.com/golang/glog:go_default_library", @@ -20,6 +21,7 @@ go_test( go_library( name = "go_default_library", srcs = ["ssh.go"], + importpath = "k8s.io/kubernetes/pkg/ssh", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/util/async/BUILD b/vendor/k8s.io/kubernetes/pkg/util/async/BUILD index 72cae5beeca2..a00d4843d7b4 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/async/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/async/BUILD @@ -12,6 +12,7 @@ go_library( "bounded_frequency_runner.go", "runner.go", ], + importpath = "k8s.io/kubernetes/pkg/util/async", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", @@ -24,6 +25,7 @@ go_test( "bounded_frequency_runner_test.go", "runner_test.go", ], + importpath = "k8s.io/kubernetes/pkg/util/async", library = ":go_default_library", ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/bandwidth/BUILD b/vendor/k8s.io/kubernetes/pkg/util/bandwidth/BUILD index 4b73efbe35d3..f440a61cef4c 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/bandwidth/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/bandwidth/BUILD @@ -20,6 +20,7 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/util/bandwidth", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", ] + select({ @@ -42,9 +43,10 @@ go_test( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/util/bandwidth", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ] + select({ diff --git a/vendor/k8s.io/kubernetes/pkg/util/config/BUILD b/vendor/k8s.io/kubernetes/pkg/util/config/BUILD index 5bed8fca326f..68bf1ccc1077 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/config/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/config/BUILD @@ -12,12 +12,14 @@ go_library( "config.go", "doc.go", ], + importpath = "k8s.io/kubernetes/pkg/util/config", deps = ["//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library"], ) go_test( name = "go_default_test", srcs = ["config_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/config", library = ":go_default_library", ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/configz/BUILD b/vendor/k8s.io/kubernetes/pkg/util/configz/BUILD index e9e60ce3c103..2fbc52d4c92f 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/configz/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/configz/BUILD @@ -9,11 +9,13 @@ load( go_library( name = "go_default_library", srcs = ["configz.go"], + importpath = "k8s.io/kubernetes/pkg/util/configz", ) go_test( name = "go_default_test", srcs = ["configz_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/configz", library = ":go_default_library", ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/dbus/BUILD b/vendor/k8s.io/kubernetes/pkg/util/dbus/BUILD index 2356cfe90fd8..d596d30649f2 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/dbus/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/dbus/BUILD @@ -13,12 +13,14 @@ go_library( "doc.go", "fake_dbus.go", ], + importpath = "k8s.io/kubernetes/pkg/util/dbus", deps = ["//vendor/github.com/godbus/dbus:go_default_library"], ) go_test( name = "go_default_test", srcs = ["dbus_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/dbus", library = ":go_default_library", deps = ["//vendor/github.com/godbus/dbus:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/dbus/fake_dbus.go b/vendor/k8s.io/kubernetes/pkg/util/dbus/fake_dbus.go index 44131272e7a9..28cde82fb2f0 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/dbus/fake_dbus.go +++ b/vendor/k8s.io/kubernetes/pkg/util/dbus/fake_dbus.go @@ -18,6 +18,7 @@ package dbus import ( "fmt" + "sync" godbus "github.com/godbus/dbus" ) @@ -30,6 +31,7 @@ type DBusFake struct { // DBusFakeConnection represents a fake D-Bus connection type DBusFakeConnection struct { + lock sync.Mutex busObject *fakeObject objects map[string]*fakeObject signalHandlers []chan<- *godbus.Signal @@ -88,6 +90,8 @@ func (conn *DBusFakeConnection) Object(name, path string) Object { // Signal is part of the Connection interface func (conn *DBusFakeConnection) Signal(ch chan<- *godbus.Signal) { + conn.lock.Lock() + defer conn.lock.Unlock() for i := range conn.signalHandlers { if conn.signalHandlers[i] == ch { conn.signalHandlers = append(conn.signalHandlers[:i], conn.signalHandlers[i+1:]...) @@ -109,6 +113,8 @@ func (conn *DBusFakeConnection) AddObject(name, path string, handler DBusFakeHan // EmitSignal emits a signal on conn func (conn *DBusFakeConnection) EmitSignal(name, path, iface, signal string, args ...interface{}) { + conn.lock.Lock() + defer conn.lock.Unlock() sig := &godbus.Signal{ Sender: name, Path: godbus.ObjectPath(path), diff --git a/vendor/k8s.io/kubernetes/pkg/util/ebtables/BUILD b/vendor/k8s.io/kubernetes/pkg/util/ebtables/BUILD index 814c30418ac4..6a9f7f0a71ee 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/ebtables/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/ebtables/BUILD @@ -9,12 +9,14 @@ load( go_library( name = "go_default_library", srcs = ["ebtables.go"], + importpath = "k8s.io/kubernetes/pkg/util/ebtables", deps = ["//vendor/k8s.io/utils/exec:go_default_library"], ) go_test( name = "go_default_test", srcs = ["ebtables_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/ebtables", library = ":go_default_library", deps = [ "//vendor/k8s.io/utils/exec:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/util/env/BUILD b/vendor/k8s.io/kubernetes/pkg/util/env/BUILD index 646c4e6f6195..3e864094f69a 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/env/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/env/BUILD @@ -9,11 +9,13 @@ load( go_library( name = "go_default_library", srcs = ["env.go"], + importpath = "k8s.io/kubernetes/pkg/util/env", ) go_test( name = "go_default_test", srcs = ["env_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/env", library = ":go_default_library", deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/file/BUILD b/vendor/k8s.io/kubernetes/pkg/util/file/BUILD index 5e7263154a09..325d1da146ea 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/file/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/file/BUILD @@ -3,11 +3,13 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( name = "go_default_library", srcs = ["file.go"], + importpath = "k8s.io/kubernetes/pkg/util/file", ) filegroup( @@ -22,3 +24,14 @@ filegroup( srcs = [":package-srcs"], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = ["file_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/file", + library = ":go_default_library", + deps = [ + "//vendor/github.com/spf13/afero:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/util/filesystem/BUILD b/vendor/k8s.io/kubernetes/pkg/util/filesystem/BUILD index ec2c8fbb85db..fb44f1442e67 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/filesystem/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/filesystem/BUILD @@ -13,6 +13,7 @@ go_library( "filesystem.go", "watcher.go", ], + importpath = "k8s.io/kubernetes/pkg/util/filesystem", deps = [ "//vendor/github.com/fsnotify/fsnotify:go_default_library", "//vendor/github.com/spf13/afero:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/util/filesystem/defaultfs.go b/vendor/k8s.io/kubernetes/pkg/util/filesystem/defaultfs.go index ea9d73f139bc..f87fe3d6dff2 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/filesystem/defaultfs.go +++ b/vendor/k8s.io/kubernetes/pkg/util/filesystem/defaultfs.go @@ -62,6 +62,11 @@ func (DefaultFs) RemoveAll(path string) error { return os.RemoveAll(path) } +// Remove via os.RemoveAll +func (DefaultFs) Remove(name string) error { + return os.Remove(name) +} + // ReadFile via ioutil.ReadFile func (DefaultFs) ReadFile(filename string) ([]byte, error) { return ioutil.ReadFile(filename) diff --git a/vendor/k8s.io/kubernetes/pkg/util/filesystem/fakefs.go b/vendor/k8s.io/kubernetes/pkg/util/filesystem/fakefs.go index 28bbce8067b9..b103007a5571 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/filesystem/fakefs.go +++ b/vendor/k8s.io/kubernetes/pkg/util/filesystem/fakefs.go @@ -92,6 +92,11 @@ func (fs *fakeFs) RemoveAll(path string) error { return fs.a.RemoveAll(path) } +// Remove via afero.RemoveAll +func (fs *fakeFs) Remove(name string) error { + return fs.a.Remove(name) +} + // fakeFile implements File; for use with fakeFs type fakeFile struct { file afero.File diff --git a/vendor/k8s.io/kubernetes/pkg/util/filesystem/filesystem.go b/vendor/k8s.io/kubernetes/pkg/util/filesystem/filesystem.go index fcfef1d1f9cc..16e329b30f17 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/filesystem/filesystem.go +++ b/vendor/k8s.io/kubernetes/pkg/util/filesystem/filesystem.go @@ -31,6 +31,7 @@ type Filesystem interface { MkdirAll(path string, perm os.FileMode) error Chtimes(name string, atime time.Time, mtime time.Time) error RemoveAll(path string) error + Remove(name string) error // from "io/ioutil" ReadFile(filename string) ([]byte, error) diff --git a/vendor/k8s.io/kubernetes/pkg/util/flock/BUILD b/vendor/k8s.io/kubernetes/pkg/util/flock/BUILD index b5c07a60f8a1..ef9212a2075b 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/flock/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/flock/BUILD @@ -18,6 +18,7 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/util/flock", deps = select({ "@io_bazel_rules_go//go/platform:darwin_amd64": [ "//vendor/golang.org/x/sys/unix:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/BUILD b/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/BUILD index 8565a939effc..fe6895f0a6c6 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["goroutinemap.go"], + importpath = "k8s.io/kubernetes/pkg/util/goroutinemap", deps = [ "//pkg/util/goroutinemap/exponentialbackoff:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -19,6 +20,7 @@ go_library( go_test( name = "go_default_test", srcs = ["goroutinemap_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/goroutinemap", library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff/BUILD b/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff/BUILD index 929042897526..9f78c287368e 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["exponential_backoff.go"], + importpath = "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff", ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff/exponential_backoff.go b/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff/exponential_backoff.go index 9dbad16a2038..8cd00a4d44bc 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff/exponential_backoff.go +++ b/vendor/k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff/exponential_backoff.go @@ -69,9 +69,8 @@ func (expBackoff *ExponentialBackoff) Update(err *error) { expBackoff.lastErrorTime = time.Now() } -func (expBackoff *ExponentialBackoff) GenerateNoRetriesPermittedMsg( - operationName string) string { - return fmt.Sprintf("Operation for %q failed. No retries permitted until %v (durationBeforeRetry %v). Error: %v", +func (expBackoff *ExponentialBackoff) GenerateNoRetriesPermittedMsg(operationName string) string { + return fmt.Sprintf("Operation for %q failed. No retries permitted until %v (durationBeforeRetry %v). Error: %q", operationName, expBackoff.lastErrorTime.Add(expBackoff.durationBeforeRetry), expBackoff.durationBeforeRetry, diff --git a/vendor/k8s.io/kubernetes/pkg/util/hash/BUILD b/vendor/k8s.io/kubernetes/pkg/util/hash/BUILD index 0a066741af9f..bafca6ba8bcc 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/hash/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/hash/BUILD @@ -9,12 +9,14 @@ load( go_library( name = "go_default_library", srcs = ["hash.go"], + importpath = "k8s.io/kubernetes/pkg/util/hash", deps = ["//vendor/github.com/davecgh/go-spew/spew:go_default_library"], ) go_test( name = "go_default_test", srcs = ["hash_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/hash", library = ":go_default_library", deps = ["//vendor/github.com/davecgh/go-spew/spew:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/interrupt/BUILD b/vendor/k8s.io/kubernetes/pkg/util/interrupt/BUILD index 7c2a621e7c6a..6e4484940f29 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/interrupt/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/interrupt/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["interrupt.go"], + importpath = "k8s.io/kubernetes/pkg/util/interrupt", ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/util/io/BUILD b/vendor/k8s.io/kubernetes/pkg/util/io/BUILD index 243ef98712a3..da3218fb232e 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/io/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/io/BUILD @@ -11,6 +11,7 @@ go_library( "consistentread.go", "writer.go", ], + importpath = "k8s.io/kubernetes/pkg/util/io", deps = ["//vendor/github.com/golang/glog:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/ipconfig/BUILD b/vendor/k8s.io/kubernetes/pkg/util/ipconfig/BUILD index ea22fd91dce5..f452283eab73 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/ipconfig/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/ipconfig/BUILD @@ -12,6 +12,7 @@ go_library( "doc.go", "ipconfig.go", ], + importpath = "k8s.io/kubernetes/pkg/util/ipconfig", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", @@ -21,6 +22,7 @@ go_library( go_test( name = "go_default_test", srcs = ["ipconfig_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/ipconfig", library = ":go_default_library", deps = ["//vendor/k8s.io/utils/exec:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/ipset/BUILD b/vendor/k8s.io/kubernetes/pkg/util/ipset/BUILD new file mode 100644 index 000000000000..20f172c010e0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/ipset/BUILD @@ -0,0 +1,41 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "ipset.go", + "types.go", + ], + importpath = "k8s.io/kubernetes/pkg/util/ipset", + visibility = ["//visibility:public"], + deps = ["//vendor/k8s.io/utils/exec:go_default_library"], +) + +go_test( + name = "go_default_test", + srcs = ["ipset_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/ipset", + library = ":go_default_library", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/exec/testing:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/util/ipset/testing:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/util/ipset/ipset.go b/vendor/k8s.io/kubernetes/pkg/util/ipset/ipset.go new file mode 100644 index 000000000000..ba92bff74bf0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/ipset/ipset.go @@ -0,0 +1,325 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipset + +import ( + "bytes" + "fmt" + "regexp" + "strconv" + "strings" + + utilexec "k8s.io/utils/exec" +) + +// Interface is an injectable interface for running ipset commands. Implementations must be goroutine-safe. +type Interface interface { + // FlushSet deletes all entries from a named set. + FlushSet(set string) error + // DestroySet deletes a named set. + DestroySet(set string) error + // DestroyAllSets deletes all sets. + DestroyAllSets() error + // CreateSet creates a new set, it will ignore error when the set already exists if ignoreExistErr=true. + CreateSet(set *IPSet, ignoreExistErr bool) error + // AddEntry adds a new entry to the named set. + AddEntry(entry string, set string, ignoreExistErr bool) error + // DelEntry deletes one entry from the named set + DelEntry(entry string, set string) error + // Test test if an entry exists in the named set + TestEntry(entry string, set string) (bool, error) + // ListEntries lists all the entries from a named set + ListEntries(set string) ([]string, error) + // ListSets list all set names from kernel + ListSets() ([]string, error) + // GetVersion returns the "X.Y" version string for ipset. + GetVersion() (string, error) +} + +// IPSetCmd represents the ipset util. We use ipset command for ipset execute. +const IPSetCmd = "ipset" + +// EntryMemberPattern is the regular expression pattern of ipset member list. +// The raw output of ipset command `ipset list {set}` is similar to, +//Name: foobar +//Type: hash:ip,port +//Revision: 2 +//Header: family inet hashsize 1024 maxelem 65536 +//Size in memory: 16592 +//References: 0 +//Members: +//192.168.1.2,tcp:8080 +//192.168.1.1,udp:53 +var EntryMemberPattern = "(?m)^(.*\n)*Members:\n" + +// VersionPattern is the regular expression pattern of ipset version string. +// ipset version output is similar to "v6.10". +var VersionPattern = "v[0-9]+\\.[0-9]+" + +// IPSet implements an Interface to an set. +type IPSet struct { + // Name is the set name. + Name string + // SetType specifies the ipset type. + SetType Type + // HashFamily specifies the protocol family of the IP addresses to be stored in the set. + // The default is inet, i.e IPv4. If users want to use IPv6, they should specify inet6. + HashFamily string + // HashSize specifies the hash table size of ipset. + HashSize int + // MaxElem specifies the max element number of ipset. + MaxElem int + // PortRange specifies the port range of bitmap:port type ipset. + PortRange string +} + +// Entry represents a ipset entry. +type Entry struct { + // IP is the entry's IP. The IP address protocol corresponds to the HashFamily of IPSet. + // All entries' IP addresses in the same ip set has same the protocol, IPv4 or IPv6. + IP string + // Port is the entry's Port. + Port int + // Protocol is the entry's Protocol. The protocols of entries in the same ip set are all + // the same. The accepted protocols are TCP and UDP. + Protocol string + // Net is the entry's IP network address. Network address with zero prefix size can NOT + // be stored. + Net string + // IP2 is the entry's second IP. IP2 may not be empty for `hash:ip,port,ip` type ip set. + IP2 string + // SetType specifies the type of ip set where the entry exists. + SetType Type +} + +func (e *Entry) String() string { + switch e.SetType { + case HashIPPort: + // Entry{192.168.1.1, udp, 53} -> 192.168.1.1,udp:53 + // Entry{192.168.1.2, tcp, 8080} -> 192.168.1.2,tcp:8080 + return fmt.Sprintf("%s,%s:%s", e.IP, e.Protocol, strconv.Itoa(e.Port)) + case HashIPPortIP: + // Entry{192.168.1.1, udp, 53, 10.0.0.1} -> 192.168.1.1,udp:53,10.0.0.1 + // Entry{192.168.1.2, tcp, 8080, 192.168.1.2} -> 192.168.1.2,tcp:8080,192.168.1.2 + return fmt.Sprintf("%s,%s:%s,%s", e.IP, e.Protocol, strconv.Itoa(e.Port), e.IP2) + case HashIPPortNet: + // Entry{192.168.1.2, udp, 80, 10.0.1.0/24} -> 192.168.1.2,udp:80,10.0.1.0/24 + // Entry{192.168.2,25, tcp, 8080, 10.1.0.0/16} -> 192.168.2,25,tcp:8080,10.1.0.0/16 + return fmt.Sprintf("%s,%s:%s,%s", e.IP, e.Protocol, strconv.Itoa(e.Port), e.Net) + case BitmapPort: + // Entry{53} -> 53 + // Entry{8080} -> 8080 + return strconv.Itoa(e.Port) + } + return "" +} + +type runner struct { + exec utilexec.Interface +} + +// New returns a new Interface which will exec ipset. +func New(exec utilexec.Interface) Interface { + return &runner{ + exec: exec, + } +} + +// CreateSet creates a new set, it will ignore error when the set already exists if ignoreExistErr=true. +func (runner *runner) CreateSet(set *IPSet, ignoreExistErr bool) error { + // Using default values. + if set.HashSize == 0 { + set.HashSize = 1024 + } + if set.MaxElem == 0 { + set.MaxElem = 65536 + } + if set.HashFamily == "" { + set.HashFamily = ProtocolFamilyIPV4 + } + if len(set.HashFamily) != 0 && set.HashFamily != ProtocolFamilyIPV4 && set.HashFamily != ProtocolFamilyIPV6 { + return fmt.Errorf("Currently supported protocol families are: %s and %s, %s is not supported", ProtocolFamilyIPV4, ProtocolFamilyIPV6, set.HashFamily) + } + // Default ipset type is "hash:ip,port" + if len(set.SetType) == 0 { + set.SetType = HashIPPort + } + // Check if setType is supported + if !IsValidIPSetType(set.SetType) { + return fmt.Errorf("Currently supported ipset types are: %v, %s is not supported", ValidIPSetTypes, set.SetType) + } + + return runner.createSet(set, ignoreExistErr) +} + +// If ignoreExistErr is set to true, then the -exist option of ipset will be specified, ipset ignores the error +// otherwise raised when the same set (setname and create parameters are identical) already exists. +func (runner *runner) createSet(set *IPSet, ignoreExistErr bool) error { + args := []string{"create", set.Name, string(set.SetType)} + if set.SetType == HashIPPortIP || set.SetType == HashIPPort { + args = append(args, + "family", set.HashFamily, + "hashsize", strconv.Itoa(set.HashSize), + "maxelem", strconv.Itoa(set.MaxElem), + ) + } + if set.SetType == BitmapPort { + if len(set.PortRange) == 0 { + set.PortRange = DefaultPortRange + } + if !validatePortRange(set.PortRange) { + return fmt.Errorf("invalid port range for %s type ip set: %s, expect: a-b", BitmapPort, set.PortRange) + } + args = append(args, "range", set.PortRange) + } + if ignoreExistErr { + args = append(args, "-exist") + } + if _, err := runner.exec.Command(IPSetCmd, args...).CombinedOutput(); err != nil { + return fmt.Errorf("error creating ipset %s, error: %v", set.Name, err) + } + return nil +} + +// AddEntry adds a new entry to the named set. +// If the -exist option is specified, ipset ignores the error otherwise raised when +// the same set (setname and create parameters are identical) already exists. +func (runner *runner) AddEntry(entry string, set string, ignoreExistErr bool) error { + args := []string{"add", set, entry} + if ignoreExistErr { + args = append(args, "-exist") + } + if _, err := runner.exec.Command(IPSetCmd, args...).CombinedOutput(); err != nil { + return fmt.Errorf("error adding entry %s, error: %v", entry, err) + } + return nil +} + +// DelEntry is used to delete the specified entry from the set. +func (runner *runner) DelEntry(entry string, set string) error { + if _, err := runner.exec.Command(IPSetCmd, "del", set, entry).CombinedOutput(); err != nil { + return fmt.Errorf("error deleting entry %s: from set: %s, error: %v", entry, set, err) + } + return nil +} + +// TestEntry is used to check whether the specified entry is in the set or not. +func (runner *runner) TestEntry(entry string, set string) (bool, error) { + if out, err := runner.exec.Command(IPSetCmd, "test", set, entry).CombinedOutput(); err == nil { + reg, e := regexp.Compile("NOT") + if e == nil && reg.MatchString(string(out)) { + return false, nil + } else if e == nil { + return true, nil + } else { + return false, fmt.Errorf("error testing entry: %s, error: %v", entry, e) + } + } else { + return false, fmt.Errorf("error testing entry %s: %v (%s)", entry, err, out) + } +} + +// FlushSet deletes all entries from a named set. +func (runner *runner) FlushSet(set string) error { + if _, err := runner.exec.Command(IPSetCmd, "flush", set).CombinedOutput(); err != nil { + return fmt.Errorf("error flushing set: %s, error: %v", set, err) + } + return nil +} + +// DestroySet is used to destroy a named set. +func (runner *runner) DestroySet(set string) error { + if _, err := runner.exec.Command(IPSetCmd, "destroy", set).CombinedOutput(); err != nil { + return fmt.Errorf("error destroying set %s:, error: %v", set, err) + } + return nil +} + +// DestroyAllSets is used to destroy all sets. +func (runner *runner) DestroyAllSets() error { + if _, err := runner.exec.Command(IPSetCmd, "destroy").CombinedOutput(); err != nil { + return fmt.Errorf("error destroying all sets, error: %v", err) + } + return nil +} + +// ListSets list all set names from kernel +func (runner *runner) ListSets() ([]string, error) { + out, err := runner.exec.Command(IPSetCmd, "list", "-n").CombinedOutput() + if err != nil { + return nil, fmt.Errorf("error listing all sets, error: %v", err) + } + return strings.Split(string(out), "\n"), nil +} + +// ListEntries lists all the entries from a named set. +func (runner *runner) ListEntries(set string) ([]string, error) { + if len(set) == 0 { + return nil, fmt.Errorf("set name can't be nil") + } + out, err := runner.exec.Command(IPSetCmd, "list", set).CombinedOutput() + if err != nil { + return nil, fmt.Errorf("error listing set: %s, error: %v", set, err) + } + memberMatcher := regexp.MustCompile(EntryMemberPattern) + list := memberMatcher.ReplaceAllString(string(out[:]), "") + strs := strings.Split(list, "\n") + results := make([]string, 0) + for i := range strs { + if len(strs[i]) > 0 { + results = append(results, strs[i]) + } + } + return results, nil +} + +// GetVersion returns the version string. +func (runner *runner) GetVersion() (string, error) { + return getIPSetVersionString(runner.exec) +} + +// getIPSetVersionString runs "ipset --version" to get the version string +// in the form of "X.Y", i.e "6.19" +func getIPSetVersionString(exec utilexec.Interface) (string, error) { + cmd := exec.Command(IPSetCmd, "--version") + cmd.SetStdin(bytes.NewReader([]byte{})) + bytes, err := cmd.CombinedOutput() + if err != nil { + return "", err + } + versionMatcher := regexp.MustCompile(VersionPattern) + match := versionMatcher.FindStringSubmatch(string(bytes)) + if match == nil { + return "", fmt.Errorf("no ipset version found in string: %s", bytes) + } + return match[0], nil +} + +func validatePortRange(portRange string) bool { + strs := strings.Split(portRange, "-") + if len(strs) != 2 { + return false + } + for i := range strs { + if _, err := strconv.Atoi(strs[i]); err != nil { + return false + } + } + return true +} + +var _ = Interface(&runner{}) diff --git a/vendor/k8s.io/kubernetes/pkg/util/ipset/types.go b/vendor/k8s.io/kubernetes/pkg/util/ipset/types.go new file mode 100644 index 000000000000..d2406c00878b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/ipset/types.go @@ -0,0 +1,70 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipset + +// Type represents the ipset type +type Type string + +const ( + // HashIPPort represents the `hash:ip,port` type ipset. The hash:ip,port is similar to hash:ip but + // you can store IP address and protocol-port pairs in it. TCP, SCTP, UDP, UDPLITE, ICMP and ICMPv6 are supported + // with port numbers/ICMP(v6) types and other protocol numbers without port information. + HashIPPort Type = "hash:ip,port" + // HashIPPortIP represents the `hash:ip,port,ip` type ipset. The hash:ip,port,ip set type uses a hash to store + // IP address, port number and a second IP address triples. The port number is interpreted together with a + // protocol (default TCP) and zero protocol number cannot be used. + HashIPPortIP Type = "hash:ip,port,ip" + // HashIPPortNet represents the `hash:ip,port,net` type ipset. The hash:ip,port,net set type uses a hash to store IP address, port number and IP network address triples. The port + // number is interpreted together with a protocol (default TCP) and zero protocol number cannot be used. Network address + // with zero prefix size cannot be stored either. + HashIPPortNet Type = "hash:ip,port,net" + // BitmapPort represents the `bitmap:port` type ipset. The bitmap:port set type uses a memory range, where each bit + // represents one TCP/UDP port. A bitmap:port type of set can store up to 65535 ports. + BitmapPort Type = "bitmap:port" +) + +// DefaultPortRange defines the default bitmap:port valid port range. +const DefaultPortRange string = "0-65535" + +const ( + // ProtocolFamilyIPV4 represents IPv4 protocol. + ProtocolFamilyIPV4 = "inet" + // ProtocolFamilyIPV6 represents IPv6 protocol. + ProtocolFamilyIPV6 = "inet6" + // ProtocolTCP represents TCP protocol. + ProtocolTCP = "tcp" + // ProtocolUDP represents UDP protocol. + ProtocolUDP = "udp" +) + +// ValidIPSetTypes defines the supported ip set type. +var ValidIPSetTypes = []Type{ + HashIPPort, + HashIPPortIP, + BitmapPort, + HashIPPortNet, +} + +// IsValidIPSetType checks if the given ipset type is valid. +func IsValidIPSetType(set Type) bool { + for _, valid := range ValidIPSetTypes { + if set == valid { + return true + } + } + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/iptables/BUILD b/vendor/k8s.io/kubernetes/pkg/util/iptables/BUILD index c7970e748c65..0f3dbcef4bc2 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/iptables/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/iptables/BUILD @@ -19,6 +19,7 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/util/iptables", deps = [ "//pkg/util/dbus:go_default_library", "//pkg/util/version:go_default_library", @@ -44,6 +45,7 @@ go_test( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/util/iptables", library = ":go_default_library", deps = select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ diff --git a/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go b/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go index 84e0fe030102..292288d218b0 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go +++ b/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go @@ -82,6 +82,7 @@ type Table string const ( TableNAT Table = "nat" TableFilter Table = "filter" + TableMangle Table = "mangle" ) type Chain string @@ -91,13 +92,16 @@ const ( ChainPrerouting Chain = "PREROUTING" ChainOutput Chain = "OUTPUT" ChainInput Chain = "INPUT" + ChainForward Chain = "FORWARD" ) const ( - cmdIPTablesSave string = "iptables-save" - cmdIPTablesRestore string = "iptables-restore" - cmdIPTables string = "iptables" - cmdIp6tables string = "ip6tables" + cmdIPTablesSave string = "iptables-save" + cmdIPTablesRestore string = "iptables-restore" + cmdIPTables string = "iptables" + cmdIP6TablesRestore string = "ip6tables-restore" + cmdIP6TablesSave string = "ip6tables-save" + cmdIP6Tables string = "ip6tables" ) // Option flag for Restore @@ -116,9 +120,11 @@ const NoFlushTables FlushFlag = false // (test whether a rule exists). const MinCheckVersion = "1.4.11" -// Minimum iptables versions supporting the -w and -w2 flags -const MinWaitVersion = "1.4.20" -const MinWait2Version = "1.4.22" +// Minimum iptables versions supporting the -w and -w flags +const WaitMinVersion = "1.4.20" +const WaitSecondsMinVersion = "1.4.22" +const WaitString = "-w" +const WaitSecondsString = "-w5" const LockfilePath16x = "/run/xtables.lock" @@ -140,7 +146,7 @@ type runner struct { // newInternal returns a new Interface which will exec iptables, and allows the // caller to change the iptables-restore lockfile path func newInternal(exec utilexec.Interface, dbus utildbus.Interface, protocol Protocol, lockfilePath string) Interface { - vstring, err := getIPTablesVersionString(exec) + vstring, err := getIPTablesVersionString(exec, protocol) if err != nil { glog.Warningf("Error checking iptables version, assuming version at least %s: %v", MinCheckVersion, err) vstring = MinCheckVersion @@ -156,7 +162,7 @@ func newInternal(exec utilexec.Interface, dbus utildbus.Interface, protocol Prot protocol: protocol, hasCheck: getIPTablesHasCheckCommand(vstring), waitFlag: getIPTablesWaitFlag(vstring), - restoreWaitFlag: getIPTablesRestoreWaitFlag(exec), + restoreWaitFlag: getIPTablesRestoreWaitFlag(exec, protocol), lockfilePath: lockfilePath, } // TODO this needs to be moved to a separate Start() or Run() function so that New() has zero side @@ -207,7 +213,7 @@ func (runner *runner) connectToFirewallD() { // GetVersion returns the version string. func (runner *runner) GetVersion() (string, error) { - return getIPTablesVersionString(runner.exec) + return getIPTablesVersionString(runner.exec, runner.protocol) } // EnsureChain is part of Interface. @@ -310,9 +316,10 @@ func (runner *runner) SaveInto(table Table, buffer *bytes.Buffer) error { defer runner.mu.Unlock() // run and return + iptablesSaveCmd := iptablesSaveCommand(runner.protocol) args := []string{"-t", string(table)} - glog.V(4).Infof("running iptables-save %v", args) - cmd := runner.exec.Command(cmdIPTablesSave, args...) + glog.V(4).Infof("running %s %v", iptablesSaveCmd, args) + cmd := runner.exec.Command(iptablesSaveCmd, args...) // Since CombinedOutput() doesn't support redirecting it to a buffer, // we need to workaround it by redirecting stdout and stderr to buffer // and explicitly calling Run() [CombinedOutput() underneath itself @@ -370,8 +377,9 @@ func (runner *runner) restoreInternal(args []string, data []byte, flush FlushFla // run the command and return the output or an error including the output and error fullArgs := append(runner.restoreWaitFlag, args...) - glog.V(4).Infof("running iptables-restore %v", fullArgs) - cmd := runner.exec.Command(cmdIPTablesRestore, fullArgs...) + iptablesRestoreCmd := iptablesRestoreCommand(runner.protocol) + glog.V(4).Infof("running %s %v", iptablesRestoreCmd, fullArgs) + cmd := runner.exec.Command(iptablesRestoreCmd, fullArgs...) cmd.SetStdin(bytes.NewBuffer(data)) b, err := cmd.CombinedOutput() if err != nil { @@ -380,17 +388,32 @@ func (runner *runner) restoreInternal(args []string, data []byte, flush FlushFla return nil } -func (runner *runner) iptablesCommand() string { - if runner.IsIpv6() { - return cmdIp6tables +func iptablesSaveCommand(protocol Protocol) string { + if protocol == ProtocolIpv6 { + return cmdIP6TablesSave + } else { + return cmdIPTablesSave + } +} + +func iptablesRestoreCommand(protocol Protocol) string { + if protocol == ProtocolIpv6 { + return cmdIP6TablesRestore + } else { + return cmdIPTablesRestore + } +} + +func iptablesCommand(protocol Protocol) string { + if protocol == ProtocolIpv6 { + return cmdIP6Tables } else { return cmdIPTables } } func (runner *runner) run(op operation, args []string) ([]byte, error) { - iptablesCmd := runner.iptablesCommand() - + iptablesCmd := iptablesCommand(runner.protocol) fullArgs := append(runner.waitFlag, string(op)) fullArgs = append(fullArgs, args...) glog.V(5).Infof("running iptables %s %v", string(op), args) @@ -418,8 +441,9 @@ func trimhex(s string) string { // Present for compatibility with <1.4.11 versions of iptables. This is full // of hack and half-measures. We should nix this ASAP. func (runner *runner) checkRuleWithoutCheck(table Table, chain Chain, args ...string) (bool, error) { - glog.V(1).Infof("running iptables-save -t %s", string(table)) - out, err := runner.exec.Command(cmdIPTablesSave, "-t", string(table)).CombinedOutput() + iptablesSaveCmd := iptablesSaveCommand(runner.protocol) + glog.V(1).Infof("running %s -t %s", iptablesSaveCmd, string(table)) + out, err := runner.exec.Command(iptablesSaveCmd, "-t", string(table)).CombinedOutput() if err != nil { return false, fmt.Errorf("error checking rule: %v", err) } @@ -517,32 +541,33 @@ func getIPTablesWaitFlag(vstring string) []string { return nil } - minVersion, err := utilversion.ParseGeneric(MinWaitVersion) + minVersion, err := utilversion.ParseGeneric(WaitMinVersion) if err != nil { - glog.Errorf("MinWaitVersion (%s) is not a valid version string: %v", MinWaitVersion, err) + glog.Errorf("WaitMinVersion (%s) is not a valid version string: %v", WaitMinVersion, err) return nil } if version.LessThan(minVersion) { return nil } - minVersion, err = utilversion.ParseGeneric(MinWait2Version) + minVersion, err = utilversion.ParseGeneric(WaitSecondsMinVersion) if err != nil { - glog.Errorf("MinWait2Version (%s) is not a valid version string: %v", MinWait2Version, err) + glog.Errorf("WaitSecondsMinVersion (%s) is not a valid version string: %v", WaitSecondsMinVersion, err) return nil } if version.LessThan(minVersion) { - return []string{"-w"} + return []string{WaitString} } else { - return []string{"-w2"} + return []string{WaitSecondsString} } } // getIPTablesVersionString runs "iptables --version" to get the version string // in the form "X.X.X" -func getIPTablesVersionString(exec utilexec.Interface) (string, error) { +func getIPTablesVersionString(exec utilexec.Interface, protocol Protocol) (string, error) { // this doesn't access mutable state so we don't need to use the interface / runner - bytes, err := exec.Command(cmdIPTables, "--version").CombinedOutput() + iptablesCmd := iptablesCommand(protocol) + bytes, err := exec.Command(iptablesCmd, "--version").CombinedOutput() if err != nil { return "", err } @@ -558,8 +583,8 @@ func getIPTablesVersionString(exec utilexec.Interface) (string, error) { // --wait support landed in v1.6.1+ right before --version support, so // any version of iptables-restore that supports --version will also // support --wait -func getIPTablesRestoreWaitFlag(exec utilexec.Interface) []string { - vstring, err := getIPTablesRestoreVersionString(exec) +func getIPTablesRestoreWaitFlag(exec utilexec.Interface, protocol Protocol) []string { + vstring, err := getIPTablesRestoreVersionString(exec, protocol) if err != nil || vstring == "" { glog.V(3).Infof("couldn't get iptables-restore version; assuming it doesn't support --wait") return nil @@ -569,18 +594,19 @@ func getIPTablesRestoreWaitFlag(exec utilexec.Interface) []string { return nil } - return []string{"--wait=2"} + return []string{WaitSecondsString} } // getIPTablesRestoreVersionString runs "iptables-restore --version" to get the version string // in the form "X.X.X" -func getIPTablesRestoreVersionString(exec utilexec.Interface) (string, error) { +func getIPTablesRestoreVersionString(exec utilexec.Interface, protocol Protocol) (string, error) { // this doesn't access mutable state so we don't need to use the interface / runner // iptables-restore hasn't always had --version, and worse complains // about unrecognized commands but doesn't exit when it gets them. // Work around that by setting stdin to nothing so it exits immediately. - cmd := exec.Command(cmdIPTablesRestore, "--version") + iptablesRestoreCmd := iptablesRestoreCommand(protocol) + cmd := exec.Command(iptablesRestoreCmd, "--version") cmd.SetStdin(bytes.NewReader([]byte{})) bytes, err := cmd.CombinedOutput() if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/util/ipvs/BUILD b/vendor/k8s.io/kubernetes/pkg/util/ipvs/BUILD index 2a565b7d3bac..21c27d219c78 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/ipvs/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/ipvs/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -18,14 +16,11 @@ go_test( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/util/ipvs", library = ":go_default_library", - tags = ["automanaged"], deps = select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ "//vendor/github.com/docker/libnetwork/ipvs:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/utils/exec:go_default_library", - "//vendor/k8s.io/utils/exec/testing:go_default_library", ], "//conditions:default": [], }), @@ -42,7 +37,7 @@ go_library( ], "//conditions:default": [], }), - tags = ["automanaged"], + importpath = "k8s.io/kubernetes/pkg/util/ipvs", deps = [ "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ diff --git a/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs.go b/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs.go index 74917b9b7bf2..9d15beb0c636 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs.go +++ b/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs.go @@ -25,10 +25,6 @@ import ( type Interface interface { // Flush clears all virtual servers in system. return occurred error immediately. Flush() error - // EnsureVirtualServerAddressBind checks if virtual server's address is bound to dummy interface and, if not, binds it. If the address is already bound, return true. - EnsureVirtualServerAddressBind(serv *VirtualServer, dev string) (exist bool, err error) - // UnbindVirtualServerAddress checks if virtual server's address is bound to dummy interface and, if so, unbinds it. - UnbindVirtualServerAddress(serv *VirtualServer, dev string) error // AddVirtualServer creates the specified virtual server. AddVirtualServer(*VirtualServer) error // UpdateVirtualServer updates an already existing virtual server. If the virtual server does not exist, return error. @@ -63,6 +59,8 @@ type ServiceFlags uint32 const ( // FlagPersistent specify IPVS service session affinity FlagPersistent = 0x1 + // FlagHashed specify IPVS service hash flag + FlagHashed = 0x2 ) // Equal check the equality of virtual server. @@ -87,6 +85,14 @@ type RealServer struct { Weight int } -func (dest *RealServer) String() string { - return net.JoinHostPort(dest.Address.String(), strconv.Itoa(int(dest.Port))) +func (rs *RealServer) String() string { + return net.JoinHostPort(rs.Address.String(), strconv.Itoa(int(rs.Port))) +} + +// Equal check the equality of real server. +// We don't use struct == since it doesn't work because of slice. +func (rs *RealServer) Equal(other *RealServer) bool { + return rs.Address.Equal(other.Address) && + rs.Port == other.Port && + rs.Weight == other.Weight } diff --git a/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_linux.go b/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_linux.go index be50ad515c58..3652a9566369 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_linux.go @@ -30,8 +30,6 @@ import ( utilexec "k8s.io/utils/exec" ) -const cmdIP = "ip" - // runner implements Interface. type runner struct { exec utilexec.Interface @@ -51,34 +49,6 @@ func New(exec utilexec.Interface) Interface { } } -// EnsureVirtualServerAddressBind is part of Interface. -func (runner *runner) EnsureVirtualServerAddressBind(vs *VirtualServer, dummyDev string) (exist bool, err error) { - addr := vs.Address.String() + "/32" - args := []string{"addr", "add", addr, "dev", dummyDev} - out, err := runner.exec.Command(cmdIP, args...).CombinedOutput() - if err != nil { - // "exit status 2" will be returned if the address is already bound to dummy device - if ee, ok := err.(utilexec.ExitError); ok { - if ee.Exited() && ee.ExitStatus() == 2 { - return true, nil - } - } - return false, fmt.Errorf("error bind address: %s to dummy interface: %s, err: %v: %s", vs.Address.String(), dummyDev, err, out) - } - return false, nil -} - -// UnbindVirtualServerAddress is part of Interface. -func (runner *runner) UnbindVirtualServerAddress(vs *VirtualServer, dummyDev string) error { - addr := vs.Address.String() + "/32" - args := []string{"addr", "del", addr, "dev", dummyDev} - out, err := runner.exec.Command(cmdIP, args...).CombinedOutput() - if err != nil { - return fmt.Errorf("error unbind address: %s from dummy interface: %s, err: %v: %s", vs.Address.String(), dummyDev, err, out) - } - return nil -} - // AddVirtualServer is part of Interface. func (runner *runner) AddVirtualServer(vs *VirtualServer) error { eSvc, err := toBackendService(vs) @@ -142,18 +112,7 @@ func (runner *runner) GetVirtualServers() ([]*VirtualServer, error) { // Flush is part of Interface. Currently we delete IPVS services one by one func (runner *runner) Flush() error { - vss, err := runner.GetVirtualServers() - if err != nil { - return err - } - for _, vs := range vss { - err := runner.DeleteVirtualServer(vs) - // TODO: aggregate errors? - if err != nil { - return err - } - } - return nil + return runner.ipvsHandle.Flush() } // AddRealServer is part of Interface. @@ -214,10 +173,17 @@ func toVirtualServer(svc *ipvs.Service) (*VirtualServer, error) { Port: svc.Port, Scheduler: svc.SchedName, Protocol: protocolNumbeToString(ProtoType(svc.Protocol)), - Flags: ServiceFlags(svc.Flags), Timeout: svc.Timeout, } + // Test Flags >= 0x2, valid Flags ranges [0x2, 0x3] + if svc.Flags&FlagHashed == 0 { + return nil, fmt.Errorf("Flags of successfully created IPVS service should be >= %d since every service is hashed into the service table", FlagHashed) + } + // Sub Flags to 0x2 + // 011 -> 001, 010 -> 000 + vs.Flags = ServiceFlags(svc.Flags &^ uint32(FlagHashed)) + if vs.Address == nil { if svc.AddressFamily == syscall.AF_INET { vs.Address = net.IPv4zero diff --git a/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_unsupported.go b/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_unsupported.go index c2ccce9143bb..dd4d5b625be7 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_unsupported.go +++ b/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_unsupported.go @@ -36,14 +36,6 @@ func (runner *runner) Flush() error { return fmt.Errorf("IPVS not supported for this platform") } -func (runner *runner) EnsureVirtualServerAddressBind(*VirtualServer, string) (bool, error) { - return false, fmt.Errorf("IPVS not supported for this platform") -} - -func (runner *runner) UnbindVirtualServerAddress(*VirtualServer, string) error { - return fmt.Errorf("IPVS not supported for this platform") -} - func (runner *runner) AddVirtualServer(*VirtualServer) error { return fmt.Errorf("IPVS not supported for this platform") } diff --git a/vendor/k8s.io/kubernetes/pkg/util/keymutex/BUILD b/vendor/k8s.io/kubernetes/pkg/util/keymutex/BUILD index bde4b1f9f573..c954729108d4 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/keymutex/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/keymutex/BUILD @@ -9,12 +9,14 @@ load( go_library( name = "go_default_library", srcs = ["keymutex.go"], + importpath = "k8s.io/kubernetes/pkg/util/keymutex", deps = ["//vendor/github.com/golang/glog:go_default_library"], ) go_test( name = "go_default_test", srcs = ["keymutex_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/keymutex", library = ":go_default_library", ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/labels/BUILD b/vendor/k8s.io/kubernetes/pkg/util/labels/BUILD index 0b1a9d448f9a..96f42ae579dc 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/labels/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/labels/BUILD @@ -12,12 +12,14 @@ go_library( "doc.go", "labels.go", ], + importpath = "k8s.io/kubernetes/pkg/util/labels", deps = ["//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"], ) go_test( name = "go_default_test", srcs = ["labels_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/labels", library = ":go_default_library", deps = ["//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/limitwriter/BUILD b/vendor/k8s.io/kubernetes/pkg/util/limitwriter/BUILD index b827e9523bda..e04f353a41b5 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/limitwriter/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/limitwriter/BUILD @@ -12,11 +12,13 @@ go_library( "doc.go", "limitwriter.go", ], + importpath = "k8s.io/kubernetes/pkg/util/limitwriter", ) go_test( name = "go_default_test", srcs = ["limitwriter_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/limitwriter", library = ":go_default_library", ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/maps/BUILD b/vendor/k8s.io/kubernetes/pkg/util/maps/BUILD index 264cdadecf79..3c63137f084f 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/maps/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/maps/BUILD @@ -11,6 +11,7 @@ go_library( "doc.go", "string.go", ], + importpath = "k8s.io/kubernetes/pkg/util/maps", ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/util/metrics/BUILD b/vendor/k8s.io/kubernetes/pkg/util/metrics/BUILD index 6c330b8f1e0d..d0629b2b3599 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/metrics/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/metrics/BUILD @@ -3,11 +3,13 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( name = "go_default_library", srcs = ["util.go"], + importpath = "k8s.io/kubernetes/pkg/util/metrics", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", @@ -16,6 +18,16 @@ go_library( ], ) +go_test( + name = "go_default_test", + srcs = ["util_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/metrics", + library = ":go_default_library", + deps = [ + "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", + ], +) + filegroup( name = "package-srcs", srcs = glob(["**"]), diff --git a/vendor/k8s.io/kubernetes/pkg/util/metrics/util.go b/vendor/k8s.io/kubernetes/pkg/util/metrics/util.go index e183be1d55e0..3980ae818add 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/metrics/util.go +++ b/vendor/k8s.io/kubernetes/pkg/util/metrics/util.go @@ -34,38 +34,66 @@ const ( var ( metricsLock sync.Mutex - rateLimiterMetrics = make(map[string]prometheus.Gauge) + rateLimiterMetrics = make(map[string]rateLimiterMetric) ) +type rateLimiterMetric struct { + metric prometheus.Gauge + stopCh chan struct{} +} + func registerRateLimiterMetric(ownerName string) error { metricsLock.Lock() defer metricsLock.Unlock() if _, ok := rateLimiterMetrics[ownerName]; ok { - glog.Errorf("Metric for %v already registered", ownerName) - return fmt.Errorf("Metric for %v already registered", ownerName) + return fmt.Errorf("Rate Limiter Metric for %v already registered", ownerName) } metric := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "rate_limiter_use", Subsystem: ownerName, Help: fmt.Sprintf("A metric measuring the saturation of the rate limiter for %v", ownerName), }) - rateLimiterMetrics[ownerName] = metric - prometheus.MustRegister(metric) + if err := prometheus.Register(metric); err != nil { + return fmt.Errorf("error registering rate limiter usage metric: %v", err) + } + stopCh := make(chan struct{}) + rateLimiterMetrics[ownerName] = rateLimiterMetric{ + metric: metric, + stopCh: stopCh, + } return nil } // RegisterMetricAndTrackRateLimiterUsage registers a metric ownerName_rate_limiter_use in prometheus to track // how much used rateLimiter is and starts a goroutine that updates this metric every updatePeriod func RegisterMetricAndTrackRateLimiterUsage(ownerName string, rateLimiter flowcontrol.RateLimiter) error { - err := registerRateLimiterMetric(ownerName) - if err != nil { + if err := registerRateLimiterMetric(ownerName); err != nil { return err } - go wait.Forever(func() { + go wait.Until(func() { metricsLock.Lock() defer metricsLock.Unlock() - rateLimiterMetrics[ownerName].Set(rateLimiter.Saturation()) - }, updatePeriod) + rateLimiterMetrics[ownerName].metric.Set(rateLimiter.Saturation()) + }, updatePeriod, rateLimiterMetrics[ownerName].stopCh) return nil } + +// UnregisterMetricAndUntrackRateLimiterUsage unregisters a metric ownerName_rate_limiter_use from prometheus and +// stops the goroutine that updates this metric +func UnregisterMetricAndUntrackRateLimiterUsage(ownerName string) bool { + metricsLock.Lock() + defer metricsLock.Unlock() + + rlm, ok := rateLimiterMetrics[ownerName] + if !ok { + glog.Warningf("Rate Limiter Metric for %v not registered", ownerName) + return false + } + + close(rlm.stopCh) + prometheus.Unregister(rlm.metric) + delete(rateLimiterMetrics, ownerName) + + return true +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD b/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD index 4c87dbd2a4a3..af94fd0380e7 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD @@ -11,12 +11,14 @@ go_library( srcs = [ "doc.go", "exec.go", + "exec_mount_unsupported.go", "fake.go", "mount.go", "mount_unsupported.go", "nsenter_mount_unsupported.go", ] + select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ + "exec_mount.go", "mount_linux.go", "nsenter_mount.go", ], @@ -25,12 +27,14 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/util/mount", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ "//pkg/util/io:go_default_library", + "//pkg/util/nsenter:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", ], @@ -44,6 +48,7 @@ go_test( "safe_format_and_mount_test.go", ] + select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ + "exec_mount_test.go", "mount_linux_test.go", "nsenter_mount_test.go", ], @@ -52,6 +57,7 @@ go_test( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/util/mount", library = ":go_default_library", deps = ["//vendor/k8s.io/utils/exec/testing:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go b/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go new file mode 100644 index 000000000000..1dedc5b7aea1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go @@ -0,0 +1,140 @@ +// +build linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mount + +import ( + "fmt" + + "github.com/golang/glog" +) + +// ExecMounter is a mounter that uses provided Exec interface to mount and +// unmount a filesystem. For all other calls it uses a wrapped mounter. +type execMounter struct { + wrappedMounter Interface + exec Exec +} + +func NewExecMounter(exec Exec, wrapped Interface) Interface { + return &execMounter{ + wrappedMounter: wrapped, + exec: exec, + } +} + +// execMounter implements mount.Interface +var _ Interface = &execMounter{} + +// Mount runs mount(8) using given exec interface. +func (m *execMounter) Mount(source string, target string, fstype string, options []string) error { + bind, bindRemountOpts := isBind(options) + + if bind { + err := m.doExecMount(source, target, fstype, []string{"bind"}) + if err != nil { + return err + } + return m.doExecMount(source, target, fstype, bindRemountOpts) + } + + return m.doExecMount(source, target, fstype, options) +} + +// doExecMount calls exec(mount ) using given exec interface. +func (m *execMounter) doExecMount(source, target, fstype string, options []string) error { + glog.V(5).Infof("Exec Mounting %s %s %s %v", source, target, fstype, options) + mountArgs := makeMountArgs(source, target, fstype, options) + output, err := m.exec.Run("mount", mountArgs...) + glog.V(5).Infof("Exec mounted %v: %v: %s", mountArgs, err, string(output)) + if err != nil { + return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %s\n", + err, "mount", source, target, fstype, options, string(output)) + } + + return err +} + +// Unmount runs umount(8) using given exec interface. +func (m *execMounter) Unmount(target string) error { + outputBytes, err := m.exec.Run("umount", target) + if err == nil { + glog.V(5).Infof("Exec unmounted %s: %s", target, string(outputBytes)) + } else { + glog.V(5).Infof("Failed to exec unmount %s: err: %q, umount output: %s", target, err, string(outputBytes)) + } + + return err +} + +// List returns a list of all mounted filesystems. +func (m *execMounter) List() ([]MountPoint, error) { + return m.wrappedMounter.List() +} + +// IsLikelyNotMountPoint determines whether a path is a mountpoint. +func (m *execMounter) IsLikelyNotMountPoint(file string) (bool, error) { + return m.wrappedMounter.IsLikelyNotMountPoint(file) +} + +// DeviceOpened checks if block device in use by calling Open with O_EXCL flag. +// Returns true if open returns errno EBUSY, and false if errno is nil. +// Returns an error if errno is any error other than EBUSY. +// Returns with error if pathname is not a device. +func (m *execMounter) DeviceOpened(pathname string) (bool, error) { + return m.wrappedMounter.DeviceOpened(pathname) +} + +// PathIsDevice uses FileInfo returned from os.Stat to check if path refers +// to a device. +func (m *execMounter) PathIsDevice(pathname string) (bool, error) { + return m.wrappedMounter.PathIsDevice(pathname) +} + +//GetDeviceNameFromMount given a mount point, find the volume id from checking /proc/mounts +func (m *execMounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) { + return m.wrappedMounter.GetDeviceNameFromMount(mountPath, pluginDir) +} + +func (m *execMounter) IsMountPointMatch(mp MountPoint, dir string) bool { + return m.wrappedMounter.IsMountPointMatch(mp, dir) +} + +func (m *execMounter) IsNotMountPoint(dir string) (bool, error) { + return m.wrappedMounter.IsNotMountPoint(dir) +} + +func (m *execMounter) MakeRShared(path string) error { + return m.wrappedMounter.MakeRShared(path) +} + +func (m *execMounter) GetFileType(pathname string) (FileType, error) { + return m.wrappedMounter.GetFileType(pathname) +} + +func (m *execMounter) MakeFile(pathname string) error { + return m.wrappedMounter.MakeFile(pathname) +} + +func (m *execMounter) MakeDir(pathname string) error { + return m.wrappedMounter.MakeDir(pathname) +} + +func (m *execMounter) ExistsPath(pathname string) bool { + return m.wrappedMounter.ExistsPath(pathname) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount_unsupported.go b/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount_unsupported.go new file mode 100644 index 000000000000..136704b23e23 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount_unsupported.go @@ -0,0 +1,87 @@ +// +build !linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mount + +import ( + "errors" +) + +type execMounter struct{} + +// ExecMounter is a mounter that uses provided Exec interface to mount and +// unmount a filesystem. For all other calls it uses a wrapped mounter. +func NewExecMounter(exec Exec, wrapped Interface) Interface { + return &execMounter{} +} + +func (mounter *execMounter) Mount(source string, target string, fstype string, options []string) error { + return nil +} + +func (mounter *execMounter) Unmount(target string) error { + return nil +} + +func (mounter *execMounter) List() ([]MountPoint, error) { + return []MountPoint{}, nil +} + +func (mounter *execMounter) IsMountPointMatch(mp MountPoint, dir string) bool { + return (mp.Path == dir) +} + +func (mounter *execMounter) IsNotMountPoint(dir string) (bool, error) { + return IsNotMountPoint(mounter, dir) +} + +func (mounter *execMounter) IsLikelyNotMountPoint(file string) (bool, error) { + return true, nil +} + +func (mounter *execMounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) { + return "", nil +} + +func (mounter *execMounter) DeviceOpened(pathname string) (bool, error) { + return false, nil +} + +func (mounter *execMounter) PathIsDevice(pathname string) (bool, error) { + return true, nil +} + +func (mounter *execMounter) MakeRShared(path string) error { + return nil +} + +func (mounter *execMounter) GetFileType(pathname string) (FileType, error) { + return FileType("fake"), errors.New("not implemented") +} + +func (mounter *execMounter) MakeDir(pathname string) error { + return nil +} + +func (mounter *execMounter) MakeFile(pathname string) error { + return nil +} + +func (mounter *execMounter) ExistsPath(pathname string) bool { + return true +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go b/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go index 480d15b9a3e3..f4e2e411de1f 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go @@ -17,6 +17,7 @@ limitations under the License. package mount import ( + "os" "path/filepath" "sync" @@ -125,7 +126,7 @@ func (f *FakeMounter) List() ([]MountPoint, error) { } func (f *FakeMounter) IsMountPointMatch(mp MountPoint, dir string) bool { - return (mp.Path == dir) + return mp.Path == dir } func (f *FakeMounter) IsNotMountPoint(dir string) (bool, error) { @@ -136,6 +137,11 @@ func (f *FakeMounter) IsLikelyNotMountPoint(file string) (bool, error) { f.mutex.Lock() defer f.mutex.Unlock() + _, err := os.Stat(file) + if err != nil { + return true, err + } + // If file is a symlink, get its absolute path absFile, err := filepath.EvalSymlinks(file) if err != nil { @@ -175,3 +181,19 @@ func (f *FakeMounter) GetDeviceNameFromMount(mountPath, pluginDir string) (strin func (f *FakeMounter) MakeRShared(path string) error { return nil } + +func (f *FakeMounter) GetFileType(pathname string) (FileType, error) { + return FileType("fake"), nil +} + +func (f *FakeMounter) MakeDir(pathname string) error { + return nil +} + +func (f *FakeMounter) MakeFile(pathname string) error { + return nil +} + +func (f *FakeMounter) ExistsPath(pathname string) bool { + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go index 5be3ca40af53..953b571900a7 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go @@ -19,18 +19,20 @@ limitations under the License. package mount import ( - "fmt" - "path" "path/filepath" - "strings" - - "github.com/golang/glog" ) +type FileType string + const ( // Default mount command if mounter path is not specified - defaultMountCommand = "mount" - MountsInGlobalPDPath = "mounts" + defaultMountCommand = "mount" + MountsInGlobalPDPath = "mounts" + FileTypeDirectory FileType = "Directory" + FileTypeFile FileType = "File" + FileTypeSocket FileType = "Socket" + FileTypeCharDev FileType = "CharDevice" + FileTypeBlockDev FileType = "BlockDevice" ) type Interface interface { @@ -70,6 +72,18 @@ type Interface interface { // MakeRShared checks that given path is on a mount with 'rshared' mount // propagation. If not, it bind-mounts the path as rshared. MakeRShared(path string) error + // GetFileType checks for file/directory/socket/block/character devices. + // Will operate in the host mount namespace if kubelet is running in a container + GetFileType(pathname string) (FileType, error) + // MakeFile creates an empty file. + // Will operate in the host mount namespace if kubelet is running in a container + MakeFile(pathname string) error + // MakeDir creates a new directory. + // Will operate in the host mount namespace if kubelet is running in a container + MakeDir(pathname string) error + // ExistsPath checks whether the path exists. + // Will operate in the host mount namespace if kubelet is running in a container + ExistsPath(pathname string) bool } // Exec executes command where mount utilities are. This can be either the host, @@ -119,41 +133,6 @@ func (mounter *SafeFormatAndMount) FormatAndMount(source string, target string, return mounter.formatAndMount(source, target, fstype, options) } -// GetMountRefs finds all other references to the device referenced -// by mountPath; returns a list of paths. -func GetMountRefs(mounter Interface, mountPath string) ([]string, error) { - mps, err := mounter.List() - if err != nil { - return nil, err - } - // Find the device name. - deviceName := "" - // If mountPath is symlink, need get its target path. - slTarget, err := filepath.EvalSymlinks(mountPath) - if err != nil { - slTarget = mountPath - } - for i := range mps { - if mps[i].Path == slTarget { - deviceName = mps[i].Device - break - } - } - - // Find all references to the device. - var refs []string - if deviceName == "" { - glog.Warningf("could not determine device for path: %q", mountPath) - } else { - for i := range mps { - if mps[i].Device == deviceName && mps[i].Path != slTarget { - refs = append(refs, mps[i].Path) - } - } - } - return refs, nil -} - // GetMountRefsByDev finds all references to the device provided // by mountPath; returns a list of paths. func GetMountRefsByDev(mounter Interface, mountPath string) ([]string, error) { @@ -220,34 +199,6 @@ func GetDeviceNameFromMount(mounter Interface, mountPath string) (string, int, e return device, refCount, nil } -// getDeviceNameFromMount find the device name from /proc/mounts in which -// the mount path reference should match the given plugin directory. In case no mount path reference -// matches, returns the volume name taken from its given mountPath -func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (string, error) { - refs, err := GetMountRefs(mounter, mountPath) - if err != nil { - glog.V(4).Infof("GetMountRefs failed for mount path %q: %v", mountPath, err) - return "", err - } - if len(refs) == 0 { - glog.V(4).Infof("Directory %s is not mounted", mountPath) - return "", fmt.Errorf("directory %s is not mounted", mountPath) - } - basemountPath := path.Join(pluginDir, MountsInGlobalPDPath) - for _, ref := range refs { - if strings.HasPrefix(ref, basemountPath) { - volumeID, err := filepath.Rel(basemountPath, ref) - if err != nil { - glog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err) - return "", err - } - return volumeID, nil - } - } - - return path.Base(mountPath), nil -} - // IsNotMountPoint determines if a directory is a mountpoint. // It should return ErrNotExist when the directory does not exist. // This method uses the List() of all mountpoints diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go index 0efc5d6801f5..71064f321e35 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go @@ -22,6 +22,8 @@ import ( "fmt" "os" "os/exec" + "path" + "path/filepath" "strconv" "strings" "syscall" @@ -42,9 +44,6 @@ const ( procMountsPath = "/proc/mounts" // Location of the mountinfo file procMountInfoPath = "/proc/self/mountinfo" -) - -const ( // 'fsck' found errors and corrected them fsckErrorsCorrected = 1 // 'fsck' found errors but exited without correcting them @@ -71,12 +70,12 @@ func New(mounterPath string) Interface { // Mount mounts source to target as fstype with given options. 'source' and 'fstype' must // be an emtpy string in case it's not required, e.g. for remount, or for auto filesystem -// type, where kernel handles fs type for you. The mount 'options' is a list of options, +// type, where kernel handles fstype for you. The mount 'options' is a list of options, // currently come from mount(8), e.g. "ro", "remount", "bind", etc. If no more option is // required, call Mount with an empty string list or nil. func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error { // Path to mounter binary if containerized mounter is needed. Otherwise, it is set to empty. - // All Linux distros are expected to be shipped with a mount utility that an support bind mounts. + // All Linux distros are expected to be shipped with a mount utility that a support bind mounts. mounterPath := "" bind, bindRemountOpts := isBind(options) if bind { @@ -144,6 +143,41 @@ func (m *Mounter) doMount(mounterPath string, mountCmd string, source string, ta return err } +// GetMountRefs finds all other references to the device referenced +// by mountPath; returns a list of paths. +func GetMountRefs(mounter Interface, mountPath string) ([]string, error) { + mps, err := mounter.List() + if err != nil { + return nil, err + } + // Find the device name. + deviceName := "" + // If mountPath is symlink, need get its target path. + slTarget, err := filepath.EvalSymlinks(mountPath) + if err != nil { + slTarget = mountPath + } + for i := range mps { + if mps[i].Path == slTarget { + deviceName = mps[i].Device + break + } + } + + // Find all references to the device. + var refs []string + if deviceName == "" { + glog.Warningf("could not determine device for path: %q", mountPath) + } else { + for i := range mps { + if mps[i].Device == deviceName && mps[i].Path != slTarget { + refs = append(refs, mps[i].Path) + } + } + } + return refs, nil +} + // detectSystemd returns true if OS runs with systemd as init. When not sure // (permission errors, ...), it returns false. // There may be different ways how to detect systemd, this one makes sure that @@ -255,17 +289,29 @@ func (mounter *Mounter) DeviceOpened(pathname string) (bool, error) { // PathIsDevice uses FileInfo returned from os.Stat to check if path refers // to a device. func (mounter *Mounter) PathIsDevice(pathname string) (bool, error) { - return pathIsDevice(pathname) + pathType, err := mounter.GetFileType(pathname) + isDevice := pathType == FileTypeCharDev || pathType == FileTypeBlockDev + return isDevice, err } func exclusiveOpenFailsOnDevice(pathname string) (bool, error) { - isDevice, err := pathIsDevice(pathname) + var isDevice bool + finfo, err := os.Stat(pathname) + if os.IsNotExist(err) { + isDevice = false + } + // err in call to os.Stat if err != nil { return false, fmt.Errorf( "PathIsDevice failed for path %q: %v", pathname, err) } + // path refers to a device + if finfo.Mode()&os.ModeDevice != 0 { + isDevice = true + } + if !isDevice { glog.Errorf("Path %q is not refering to a device.", pathname) return false, nil @@ -285,26 +331,37 @@ func exclusiveOpenFailsOnDevice(pathname string) (bool, error) { return false, errno } -func pathIsDevice(pathname string) (bool, error) { - finfo, err := os.Stat(pathname) - if os.IsNotExist(err) { - return false, nil - } - // err in call to os.Stat +//GetDeviceNameFromMount: given a mount point, find the device name from its global mount point +func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) { + return getDeviceNameFromMount(mounter, mountPath, pluginDir) +} + +// getDeviceNameFromMount find the device name from /proc/mounts in which +// the mount path reference should match the given plugin directory. In case no mount path reference +// matches, returns the volume name taken from its given mountPath +func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (string, error) { + refs, err := GetMountRefs(mounter, mountPath) if err != nil { - return false, err + glog.V(4).Infof("GetMountRefs failed for mount path %q: %v", mountPath, err) + return "", err } - // path refers to a device - if finfo.Mode()&os.ModeDevice != 0 { - return true, nil + if len(refs) == 0 { + glog.V(4).Infof("Directory %s is not mounted", mountPath) + return "", fmt.Errorf("directory %s is not mounted", mountPath) + } + basemountPath := path.Join(pluginDir, MountsInGlobalPDPath) + for _, ref := range refs { + if strings.HasPrefix(ref, basemountPath) { + volumeID, err := filepath.Rel(basemountPath, ref) + if err != nil { + glog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err) + return "", err + } + return volumeID, nil + } } - // path does not refer to device - return false, nil -} -//GetDeviceNameFromMount: given a mount point, find the device name from its global mount point -func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) { - return getDeviceNameFromMount(mounter, mountPath, pluginDir) + return path.Base(mountPath), nil } func listProcMounts(mountFilePath string) ([]MountPoint, error) { @@ -353,9 +410,64 @@ func parseProcMounts(content []byte) ([]MountPoint, error) { } func (mounter *Mounter) MakeRShared(path string) error { - mountCmd := defaultMountCommand - mountArgs := []string{} - return doMakeRShared(path, procMountInfoPath, mountCmd, mountArgs) + return doMakeRShared(path, procMountInfoPath) +} + +func (mounter *Mounter) GetFileType(pathname string) (FileType, error) { + var pathType FileType + finfo, err := os.Stat(pathname) + if os.IsNotExist(err) { + return pathType, fmt.Errorf("path %q does not exist", pathname) + } + // err in call to os.Stat + if err != nil { + return pathType, err + } + + mode := finfo.Sys().(*syscall.Stat_t).Mode + switch mode & syscall.S_IFMT { + case syscall.S_IFSOCK: + return FileTypeSocket, nil + case syscall.S_IFBLK: + return FileTypeBlockDev, nil + case syscall.S_IFCHR: + return FileTypeBlockDev, nil + case syscall.S_IFDIR: + return FileTypeDirectory, nil + case syscall.S_IFREG: + return FileTypeFile, nil + } + + return pathType, fmt.Errorf("only recognise file, directory, socket, block device and character device") +} + +func (mounter *Mounter) MakeDir(pathname string) error { + err := os.MkdirAll(pathname, os.FileMode(0755)) + if err != nil { + if !os.IsExist(err) { + return err + } + } + return nil +} + +func (mounter *Mounter) MakeFile(pathname string) error { + f, err := os.OpenFile(pathname, os.O_CREATE, os.FileMode(0644)) + defer f.Close() + if err != nil { + if !os.IsExist(err) { + return err + } + } + return nil +} + +func (mounter *Mounter) ExistsPath(pathname string) bool { + _, err := os.Stat(pathname) + if err != nil { + return false + } + return true } // formatAndMount uses unix utils to format and mount the given disk @@ -386,7 +498,7 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, if mountErr != nil { // Mount failed. This indicates either that the disk is unformatted or // it contains an unexpected filesystem. - existingFormat, err := mounter.getDiskFormat(source) + existingFormat, err := mounter.GetDiskFormat(source) if err != nil { return err } @@ -424,8 +536,8 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, return mountErr } -// diskLooksUnformatted uses 'lsblk' to see if the given disk is unformated -func (mounter *SafeFormatAndMount) getDiskFormat(disk string) (string, error) { +// GetDiskFormat uses 'lsblk' to see if the given disk is unformated +func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) { args := []string{"-n", "-o", "FSTYPE", disk} glog.V(4).Infof("Attempting to determine if disk %q is formatted using lsblk with args: (%v)", disk, args) dataOut, err := mounter.Exec.Run("lsblk", args...) @@ -526,7 +638,7 @@ func parseMountInfo(filename string) ([]mountInfo, error) { // path is shared and bind-mounts it as rshared if needed. mountCmd and // mountArgs are expected to contain mount-like command, doMakeRShared will add // '--bind ' and '--make-rshared ' to mountArgs. -func doMakeRShared(path string, mountInfoFilename string, mountCmd string, mountArgs []string) error { +func doMakeRShared(path string, mountInfoFilename string) error { shared, err := isShared(path, mountInfoFilename) if err != nil { return err diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_unsupported.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_unsupported.go index 936c30aba5b5..87d1e3748195 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_unsupported.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_unsupported.go @@ -18,6 +18,10 @@ limitations under the License. package mount +import ( + "errors" +) + type Mounter struct { mounterPath string } @@ -39,6 +43,12 @@ func (mounter *Mounter) Unmount(target string) error { return nil } +// GetMountRefs finds all other references to the device referenced +// by mountPath; returns a list of paths. +func GetMountRefs(mounter Interface, mountPath string) ([]string, error) { + return []string{}, nil +} + func (mounter *Mounter) List() ([]MountPoint, error) { return []MountPoint{}, nil } @@ -59,6 +69,10 @@ func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (str return "", nil } +func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (string, error) { + return "", nil +} + func (mounter *Mounter) DeviceOpened(pathname string) (bool, error) { return false, nil } @@ -72,9 +86,25 @@ func (mounter *Mounter) MakeRShared(path string) error { } func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error { - return nil + return mounter.Interface.Mount(source, target, fstype, options) } func (mounter *SafeFormatAndMount) diskLooksUnformatted(disk string) (bool, error) { return true, nil } + +func (mounter *Mounter) GetFileType(pathname string) (FileType, error) { + return FileType("fake"), errors.New("not implemented") +} + +func (mounter *Mounter) MakeDir(pathname string) error { + return nil +} + +func (mounter *Mounter) MakeFile(pathname string) error { + return nil +} + +func (mounter *Mounter) ExistsPath(pathname string) bool { + return true +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_windows.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_windows.go index 63294df9f0da..b39951add1a1 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_windows.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_windows.go @@ -22,9 +22,11 @@ import ( "fmt" "os" "os/exec" + "path" "path/filepath" "strconv" "strings" + "syscall" "github.com/golang/glog" ) @@ -74,8 +76,24 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio return nil } - // empty implementation for mounting azure file - return os.MkdirAll(target, 0755) + // currently only cifs mount is supported + if strings.ToLower(fstype) != "cifs" { + return fmt.Errorf("azureMount: only cifs mount is supported now, fstype: %q, mounting source (%q), target (%q), with options (%q)", fstype, source, target, options) + } + + cmdLine := fmt.Sprintf(`$User = "%s";$PWord = ConvertTo-SecureString -String "%s" -AsPlainText -Force;`+ + `$Credential = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList $User, $PWord`, + options[0], options[1]) + + bindSource = source + cmdLine += fmt.Sprintf(";New-SmbGlobalMapping -RemotePath %s -Credential $Credential", source) + + if output, err := exec.Command("powershell", "/c", cmdLine).CombinedOutput(); err != nil { + // we don't return error here, even though New-SmbGlobalMapping failed, we still make it successful, + // will return error when Windows 2016 RS3 is ready on azure + glog.Errorf("azureMount: SmbGlobalMapping failed: %v, only SMB mount is supported now, output: %q", err, string(output)) + return os.MkdirAll(target, 0755) + } } if output, err := exec.Command("cmd", "/c", "mklink", "/D", target, bindSource).CombinedOutput(); err != nil { @@ -97,6 +115,16 @@ func (mounter *Mounter) Unmount(target string) error { return nil } +// GetMountRefs finds all other references to the device(drive) referenced +// by mountPath; returns a list of paths. +func GetMountRefs(mounter Interface, mountPath string) ([]string, error) { + refs, err := getAllParentLinks(normalizeWindowsPath(mountPath)) + if err != nil { + return nil, err + } + return refs, nil +} + // List returns a list of all mounted filesystems. todo func (mounter *Mounter) List() ([]MountPoint, error) { return []MountPoint{}, nil @@ -131,6 +159,33 @@ func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (str return getDeviceNameFromMount(mounter, mountPath, pluginDir) } +// getDeviceNameFromMount find the device(drive) name in which +// the mount path reference should match the given plugin directory. In case no mount path reference +// matches, returns the volume name taken from its given mountPath +func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (string, error) { + refs, err := GetMountRefs(mounter, mountPath) + if err != nil { + glog.V(4).Infof("GetMountRefs failed for mount path %q: %v", mountPath, err) + return "", err + } + if len(refs) == 0 { + return "", fmt.Errorf("directory %s is not mounted", mountPath) + } + basemountPath := normalizeWindowsPath(path.Join(pluginDir, MountsInGlobalPDPath)) + for _, ref := range refs { + if strings.Contains(ref, basemountPath) { + volumeID, err := filepath.Rel(normalizeWindowsPath(basemountPath), ref) + if err != nil { + glog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err) + return "", err + } + return volumeID, nil + } + } + + return path.Base(mountPath), nil +} + // DeviceOpened determines if the device is in use elsewhere func (mounter *Mounter) DeviceOpened(pathname string) (bool, error) { return false, nil @@ -147,6 +202,67 @@ func (mounter *Mounter) MakeRShared(path string) error { return nil } +// GetFileType checks for sockets/block/character devices +func (mounter *Mounter) GetFileType(pathname string) (FileType, error) { + var pathType FileType + info, err := os.Stat(pathname) + if os.IsNotExist(err) { + return pathType, fmt.Errorf("path %q does not exist", pathname) + } + // err in call to os.Stat + if err != nil { + return pathType, err + } + + mode := info.Sys().(*syscall.Win32FileAttributeData).FileAttributes + switch mode & syscall.S_IFMT { + case syscall.S_IFSOCK: + return FileTypeSocket, nil + case syscall.S_IFBLK: + return FileTypeBlockDev, nil + case syscall.S_IFCHR: + return FileTypeCharDev, nil + case syscall.S_IFDIR: + return FileTypeDirectory, nil + case syscall.S_IFREG: + return FileTypeFile, nil + } + + return pathType, fmt.Errorf("only recognise file, directory, socket, block device and character device") +} + +// MakeFile creates a new directory +func (mounter *Mounter) MakeDir(pathname string) error { + err := os.MkdirAll(pathname, os.FileMode(0755)) + if err != nil { + if !os.IsExist(err) { + return err + } + } + return nil +} + +// MakeFile creates an empty file +func (mounter *Mounter) MakeFile(pathname string) error { + f, err := os.OpenFile(pathname, os.O_CREATE, os.FileMode(0644)) + defer f.Close() + if err != nil { + if !os.IsExist(err) { + return err + } + } + return nil +} + +// ExistsPath checks whether the path exists +func (mounter *Mounter) ExistsPath(pathname string) bool { + _, err := os.Stat(pathname) + if err != nil { + return false + } + return true +} + func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error { // Try to mount the disk glog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, source, target) @@ -204,3 +320,30 @@ func getDriveLetterByDiskNumber(diskNum string, exec Exec) (string, error) { } return string(output)[:1], nil } + +// getAllParentLinks walks all symbolic links and return all the parent targets recursively +func getAllParentLinks(path string) ([]string, error) { + const maxIter = 255 + links := []string{} + for { + links = append(links, path) + if len(links) > maxIter { + return links, fmt.Errorf("unexpected length of parent links: %v", links) + } + + fi, err := os.Lstat(path) + if err != nil { + return links, fmt.Errorf("Lstat: %v", err) + } + if fi.Mode()&os.ModeSymlink == 0 { + break + } + + path, err = os.Readlink(path) + if err != nil { + return links, fmt.Errorf("Readlink error: %v", err) + } + } + + return links, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go b/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go index 24a095a6c392..a6c7869b0d82 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go @@ -25,78 +25,30 @@ import ( "strings" "github.com/golang/glog" - "k8s.io/utils/exec" + "k8s.io/kubernetes/pkg/util/nsenter" ) -// NsenterMounter is part of experimental support for running the kubelet -// in a container. Currently, all docker containers receive their own mount -// namespaces. NsenterMounter works by executing nsenter to run commands in +const ( + // hostProcMountsPath is the default mount path for rootfs + hostProcMountsPath = "/rootfs/proc/1/mounts" + // hostProcMountinfoPath is the default mount info path for rootfs + hostProcMountinfoPath = "/rootfs/proc/1/mountinfo" +) + +// Currently, all docker containers receive their own mount namespaces. +// NsenterMounter works by executing nsenter to run commands in // the host's mount namespace. -// -// NsenterMounter requires: -// -// 1. Docker >= 1.6 due to the dependency on the slave propagation mode -// of the bind-mount of the kubelet root directory in the container. -// Docker 1.5 used a private propagation mode for bind-mounts, so mounts -// performed in the host's mount namespace do not propagate out to the -// bind-mount in this docker version. -// 2. The host's root filesystem must be available at /rootfs -// 3. The nsenter binary must be on the Kubelet process' PATH in the container's -// filesystem. -// 4. The Kubelet process must have CAP_SYS_ADMIN (required by nsenter); at -// the present, this effectively means that the kubelet is running in a -// privileged container. -// 5. The volume path used by the Kubelet must be the same inside and outside -// the container and be writable by the container (to initialize volume) -// contents. TODO: remove this requirement. -// 6. The host image must have mount, findmnt, and umount binaries in /bin, -// /usr/sbin, or /usr/bin -// 7. The host image should have systemd-run in /bin, /usr/sbin, or /usr/bin -// For more information about mount propagation modes, see: -// https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt type NsenterMounter struct { - // a map of commands to their paths on the host filesystem - paths map[string]string + ne *nsenter.Nsenter } func NewNsenterMounter() *NsenterMounter { - m := &NsenterMounter{ - paths: map[string]string{ - "mount": "", - "findmnt": "", - "umount": "", - "systemd-run": "", - }, - } - // search for the mount command in other locations besides /usr/bin - for binary := range m.paths { - // default to root - m.paths[binary] = filepath.Join("/", binary) - for _, path := range []string{"/bin", "/usr/sbin", "/usr/bin"} { - binPath := filepath.Join(path, binary) - if _, err := os.Stat(filepath.Join(hostRootFsPath, binPath)); err != nil { - continue - } - m.paths[binary] = binPath - break - } - // TODO: error, so that the kubelet can stop if the mounts don't exist - // (don't forget that systemd-run is optional) - } - return m + return &NsenterMounter{ne: nsenter.NewNsenter()} } // NsenterMounter implements mount.Interface var _ = Interface(&NsenterMounter{}) -const ( - hostRootFsPath = "/rootfs" - hostProcMountsPath = "/rootfs/proc/1/mounts" - hostProcMountinfoPath = "/rootfs/proc/1/mountinfo" - hostMountNamespacePath = "/rootfs/proc/1/ns/mnt" - nsenterPath = "nsenter" -) - // Mount runs mount(8) in the host's root mount namespace. Aside from this // aspect, Mount has the same semantics as the mounter returned by mount.New() func (n *NsenterMounter) Mount(source string, target string, fstype string, options []string) error { @@ -116,26 +68,22 @@ func (n *NsenterMounter) Mount(source string, target string, fstype string, opti // doNsenterMount nsenters the host's mount namespace and performs the // requested mount. func (n *NsenterMounter) doNsenterMount(source, target, fstype string, options []string) error { - glog.V(5).Infof("nsenter Mounting %s %s %s %v", source, target, fstype, options) - args := n.makeNsenterArgs(source, target, fstype, options) - - glog.V(5).Infof("Mount command: %v %v", nsenterPath, args) - exec := exec.New() - outputBytes, err := exec.Command(nsenterPath, args...).CombinedOutput() + glog.V(5).Infof("nsenter mount %s %s %s %v", source, target, fstype, options) + cmd, args := n.makeNsenterArgs(source, target, fstype, options) + outputBytes, err := n.ne.Exec(cmd, args).CombinedOutput() if len(outputBytes) != 0 { glog.V(5).Infof("Output of mounting %s to %s: %v", source, target, string(outputBytes)) } - return err } // makeNsenterArgs makes a list of argument to nsenter in order to do the // requested mount. -func (n *NsenterMounter) makeNsenterArgs(source, target, fstype string, options []string) []string { - mountCmd := n.absHostPath("mount") +func (n *NsenterMounter) makeNsenterArgs(source, target, fstype string, options []string) (string, []string) { + mountCmd := n.ne.AbsHostPath("mount") mountArgs := makeMountArgs(source, target, fstype, options) - if systemdRunPath, hasSystemd := n.paths["systemd-run"]; hasSystemd { + if systemdRunPath, hasSystemd := n.ne.SupportsSystemd(); hasSystemd { // Complete command line: // nsenter --mount=/rootfs/proc/1/ns/mnt -- /bin/systemd-run --description=... --scope -- /bin/mount -t // Expected flow is: @@ -165,34 +113,20 @@ func (n *NsenterMounter) makeNsenterArgs(source, target, fstype string, options // No code here, mountCmd and mountArgs use /bin/mount } - nsenterArgs := []string{ - "--mount=" + hostMountNamespacePath, - "--", - mountCmd, - } - nsenterArgs = append(nsenterArgs, mountArgs...) - - return nsenterArgs + return mountCmd, mountArgs } // Unmount runs umount(8) in the host's mount namespace. func (n *NsenterMounter) Unmount(target string) error { - args := []string{ - "--mount=" + hostMountNamespacePath, - "--", - n.absHostPath("umount"), - target, - } + args := []string{target} // No need to execute systemd-run here, it's enough that unmount is executed // in the host's mount namespace. It will finish appropriate fuse daemon(s) // running in any scope. - glog.V(5).Infof("Unmount command: %v %v", nsenterPath, args) - exec := exec.New() - outputBytes, err := exec.Command(nsenterPath, args...).CombinedOutput() + glog.V(5).Infof("nsenter unmount args: %v", args) + outputBytes, err := n.ne.Exec("umount", args).CombinedOutput() if len(outputBytes) != 0 { glog.V(5).Infof("Output of unmounting %s: %v", target, string(outputBytes)) } - return err } @@ -207,7 +141,7 @@ func (m *NsenterMounter) IsNotMountPoint(dir string) (bool, error) { func (*NsenterMounter) IsMountPointMatch(mp MountPoint, dir string) bool { deletedDir := fmt.Sprintf("%s\\040(deleted)", dir) - return ((mp.Path == dir) || (mp.Path == deletedDir)) + return (mp.Path == dir) || (mp.Path == deletedDir) } // IsLikelyNotMountPoint determines whether a path is a mountpoint by calling findmnt @@ -227,11 +161,9 @@ func (n *NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) { // the first of multiple possible mountpoints using --first-only. // Also add fstype output to make sure that the output of target file will give the full path // TODO: Need more refactoring for this function. Track the solution with issue #26996 - args := []string{"--mount=" + hostMountNamespacePath, "--", n.absHostPath("findmnt"), "-o", "target,fstype", "--noheadings", "--first-only", "--target", file} - glog.V(5).Infof("findmnt command: %v %v", nsenterPath, args) - - exec := exec.New() - out, err := exec.Command(nsenterPath, args...).CombinedOutput() + args := []string{"-o", "target,fstype", "--noheadings", "--first-only", "--target", file} + glog.V(5).Infof("nsenter findmnt args: %v", args) + out, err := n.ne.Exec("findmnt", args).CombinedOutput() if err != nil { glog.V(2).Infof("Failed findmnt command for path %s: %v", file, err) // Different operating systems behave differently for paths which are not mount points. @@ -277,7 +209,9 @@ func (n *NsenterMounter) DeviceOpened(pathname string) (bool, error) { // PathIsDevice uses FileInfo returned from os.Stat to check if path refers // to a device. func (n *NsenterMounter) PathIsDevice(pathname string) (bool, error) { - return pathIsDevice(pathname) + pathType, err := n.GetFileType(pathname) + isDevice := pathType == FileTypeCharDev || pathType == FileTypeBlockDev + return isDevice, err } //GetDeviceNameFromMount given a mount point, find the volume id from checking /proc/mounts @@ -285,20 +219,54 @@ func (n *NsenterMounter) GetDeviceNameFromMount(mountPath, pluginDir string) (st return getDeviceNameFromMount(n, mountPath, pluginDir) } -func (n *NsenterMounter) absHostPath(command string) string { - path, ok := n.paths[command] - if !ok { - return command +func (n *NsenterMounter) MakeRShared(path string) error { + return doMakeRShared(path, hostProcMountinfoPath) +} + +func (mounter *NsenterMounter) GetFileType(pathname string) (FileType, error) { + var pathType FileType + outputBytes, err := mounter.ne.Exec("stat", []string{"-L", `--printf "%F"`, pathname}).CombinedOutput() + if err != nil { + return pathType, err + } + + switch string(outputBytes) { + case "socket": + return FileTypeSocket, nil + case "character special file": + return FileTypeCharDev, nil + case "block special file": + return FileTypeBlockDev, nil + case "directory": + return FileTypeDirectory, nil + case "regular file": + return FileTypeFile, nil } - return path + + return pathType, fmt.Errorf("only recognise file, directory, socket, block device and character device") } -func (n *NsenterMounter) MakeRShared(path string) error { - nsenterCmd := nsenterPath - nsenterArgs := []string{ - "--mount=" + hostMountNamespacePath, - "--", - n.absHostPath("mount"), +func (mounter *NsenterMounter) MakeDir(pathname string) error { + args := []string{"-p", pathname} + if _, err := mounter.ne.Exec("mkdir", args).CombinedOutput(); err != nil { + return err + } + return nil +} + +func (mounter *NsenterMounter) MakeFile(pathname string) error { + args := []string{pathname} + if _, err := mounter.ne.Exec("touch", args).CombinedOutput(); err != nil { + return err + } + return nil +} + +func (mounter *NsenterMounter) ExistsPath(pathname string) bool { + args := []string{pathname} + _, err := mounter.ne.Exec("ls", args).CombinedOutput() + if err == nil { + return true } - return doMakeRShared(path, hostProcMountinfoPath, nsenterCmd, nsenterArgs) + return false } diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount_unsupported.go b/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount_unsupported.go index f881c508711c..f4eb692f9584 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount_unsupported.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount_unsupported.go @@ -18,6 +18,10 @@ limitations under the License. package mount +import ( + "errors" +) + type NsenterMounter struct{} func NewNsenterMounter() *NsenterMounter { @@ -65,3 +69,19 @@ func (*NsenterMounter) GetDeviceNameFromMount(mountPath, pluginDir string) (stri func (*NsenterMounter) MakeRShared(path string) error { return nil } + +func (*NsenterMounter) GetFileType(_ string) (FileType, error) { + return FileType("fake"), errors.New("not implemented") +} + +func (*NsenterMounter) MakeDir(pathname string) error { + return nil +} + +func (*NsenterMounter) MakeFile(pathname string) error { + return nil +} + +func (*NsenterMounter) ExistsPath(pathname string) bool { + return true +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/sets/BUILD b/vendor/k8s.io/kubernetes/pkg/util/net/sets/BUILD index 176ae6888229..413f5e390530 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/net/sets/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/net/sets/BUILD @@ -12,11 +12,13 @@ go_library( "doc.go", "ipnet.go", ], + importpath = "k8s.io/kubernetes/pkg/util/net/sets", ) go_test( name = "go_default_test", srcs = ["ipnet_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/net/sets", library = ":go_default_library", ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/netsh/BUILD b/vendor/k8s.io/kubernetes/pkg/util/netsh/BUILD index 54f502de1d66..88c4ef7de78e 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/netsh/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/netsh/BUILD @@ -3,6 +3,7 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", + "go_test", ) go_library( @@ -11,6 +12,7 @@ go_library( "doc.go", "netsh.go", ], + importpath = "k8s.io/kubernetes/pkg/util/netsh", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", @@ -32,3 +34,16 @@ filegroup( ], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = ["netsh_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/netsh", + library = ":go_default_library", + deps = [ + "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/exec/testing:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/pkg/util/netsh/netsh.go b/vendor/k8s.io/kubernetes/pkg/util/netsh/netsh.go index c3b963c411b0..30b66536b1db 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/netsh/netsh.go +++ b/vendor/k8s.io/kubernetes/pkg/util/netsh/netsh.go @@ -190,9 +190,8 @@ func checkIPExists(ipToCheck string, args []string, runner *runner) (bool, error glog.V(3).Infof("Searching for IP: %v in IP dump: %v", ipToCheck, ipAddressString) showAddressArray := strings.Split(ipAddressString, "\n") for _, showAddress := range showAddressArray { - if strings.Contains(showAddress, "IP Address:") { - ipFromNetsh := strings.TrimLeft(showAddress, "IP Address:") - ipFromNetsh = strings.TrimSpace(ipFromNetsh) + if strings.Contains(showAddress, "IP") { + ipFromNetsh := getIP(showAddress) if ipFromNetsh == ipToCheck { return true, nil } @@ -201,3 +200,12 @@ func checkIPExists(ipToCheck string, args []string, runner *runner) (bool, error return false, nil } + +// getIP gets ip from showAddress (e.g. "IP Address: 10.96.0.4"). +func getIP(showAddress string) string { + list := strings.SplitN(showAddress, ":", 2) + if len(list) != 2 { + return "" + } + return strings.TrimSpace(list[1]) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/node/BUILD b/vendor/k8s.io/kubernetes/pkg/util/node/BUILD index 4c7f54878740..6ce01d7e6fbc 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/node/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/node/BUILD @@ -9,8 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["node.go"], + importpath = "k8s.io/kubernetes/pkg/util/node", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/kubelet/apis:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -25,6 +26,7 @@ go_library( go_test( name = "go_default_test", srcs = ["node_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/node", library = ":go_default_library", deps = [ "//pkg/kubelet/apis:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/util/node/node.go b/vendor/k8s.io/kubernetes/pkg/util/node/node.go index 0138a56b5995..a4c91bdbbf85 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/node/node.go +++ b/vendor/k8s.io/kubernetes/pkg/util/node/node.go @@ -31,7 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/strategicpatch" clientset "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) @@ -146,10 +146,25 @@ func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.N if err != nil { return nil } - _, err = c.Core().Nodes().PatchStatus(string(node), patch) + _, err = c.CoreV1().Nodes().PatchStatus(string(node), patch) return err } +// PatchNodeCIDR patches the specified node's CIDR to the given value. +func PatchNodeCIDR(c clientset.Interface, node types.NodeName, cidr string) error { + raw, err := json.Marshal(cidr) + if err != nil { + return fmt.Errorf("failed to json.Marshal CIDR: %v", err) + } + + patchBytes := []byte(fmt.Sprintf(`{"spec":{"podCIDR":%s}}`, raw)) + + if _, err := c.CoreV1().Nodes().Patch(string(node), types.StrategicMergePatchType, patchBytes); err != nil { + return fmt.Errorf("failed to patch node CIDR: %v", err) + } + return nil +} + // PatchNodeStatus patches node status. func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) (*v1.Node, error) { oldData, err := json.Marshal(oldNode) diff --git a/vendor/k8s.io/kubernetes/pkg/util/nsenter/BUILD b/vendor/k8s.io/kubernetes/pkg/util/nsenter/BUILD new file mode 100644 index 000000000000..b3f2d2b031d0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/nsenter/BUILD @@ -0,0 +1,37 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "nsenter_unsupported.go", + ] + select({ + "@io_bazel_rules_go//go/platform:linux_amd64": [ + "nsenter.go", + ], + "//conditions:default": [], + }), + importpath = "k8s.io/kubernetes/pkg/util/nsenter", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/utils/exec:go_default_library", + ] + select({ + "@io_bazel_rules_go//go/platform:linux_amd64": [ + "//vendor/github.com/golang/glog:go_default_library", + ], + "//conditions:default": [], + }), +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/util/nsenter/nsenter.go b/vendor/k8s.io/kubernetes/pkg/util/nsenter/nsenter.go new file mode 100644 index 000000000000..32fbc08487ea --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/nsenter/nsenter.go @@ -0,0 +1,124 @@ +// +build linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nsenter + +import ( + "fmt" + "os" + "path/filepath" + + "k8s.io/utils/exec" + + "github.com/golang/glog" +) + +const ( + hostRootFsPath = "/rootfs" + // hostProcMountNsPath is the default mount namespace for rootfs + hostProcMountNsPath = "/rootfs/proc/1/ns/mnt" + // nsenterPath is the default nsenter command + nsenterPath = "nsenter" +) + +// Nsenter is part of experimental support for running the kubelet +// in a container. +// +// Nsenter requires: +// +// 1. Docker >= 1.6 due to the dependency on the slave propagation mode +// of the bind-mount of the kubelet root directory in the container. +// Docker 1.5 used a private propagation mode for bind-mounts, so mounts +// performed in the host's mount namespace do not propagate out to the +// bind-mount in this docker version. +// 2. The host's root filesystem must be available at /rootfs +// 3. The nsenter binary must be on the Kubelet process' PATH in the container's +// filesystem. +// 4. The Kubelet process must have CAP_SYS_ADMIN (required by nsenter); at +// the present, this effectively means that the kubelet is running in a +// privileged container. +// 5. The volume path used by the Kubelet must be the same inside and outside +// the container and be writable by the container (to initialize volume) +// contents. TODO: remove this requirement. +// 6. The host image must have "mount", "findmnt", "umount", "stat", "touch", +// "mkdir", "ls", "sh" and "chmod" binaries in /bin, /usr/sbin, or /usr/bin +// 7. The host image should have systemd-run in /bin, /usr/sbin, or /usr/bin +// For more information about mount propagation modes, see: +// https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt +type Nsenter struct { + // a map of commands to their paths on the host filesystem + paths map[string]string +} + +// NewNsenter constructs a new instance of Nsenter +func NewNsenter() *Nsenter { + ne := &Nsenter{ + paths: map[string]string{ + "mount": "", + "findmnt": "", + "umount": "", + "systemd-run": "", + "stat": "", + "touch": "", + "mkdir": "", + "ls": "", + "sh": "", + "chmod": "", + }, + } + // search for the required commands in other locations besides /usr/bin + for binary := range ne.paths { + // default to root + ne.paths[binary] = filepath.Join("/", binary) + for _, path := range []string{"/bin", "/usr/sbin", "/usr/bin"} { + binPath := filepath.Join(path, binary) + if _, err := os.Stat(filepath.Join(hostRootFsPath, binPath)); err != nil { + continue + } + ne.paths[binary] = binPath + break + } + // TODO: error, so that the kubelet can stop if the paths don't exist + // (don't forget that systemd-run is optional) + } + return ne +} + +// Exec executes nsenter commands in hostProcMountNsPath mount namespace +func (ne *Nsenter) Exec(cmd string, args []string) exec.Cmd { + fullArgs := append([]string{fmt.Sprintf("--mount=%s", hostProcMountNsPath), "--"}, + append([]string{ne.AbsHostPath(cmd)}, args...)...) + glog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) + exec := exec.New() + return exec.Command(nsenterPath, fullArgs...) +} + +// AbsHostPath returns the absolute runnable path for a specified command +func (ne *Nsenter) AbsHostPath(command string) string { + path, ok := ne.paths[command] + if !ok { + return command + } + return path +} + +// SupportsSystemd checks whether command systemd-run exists +func (ne *Nsenter) SupportsSystemd() (string, bool) { + systemdRunPath, hasSystemd := ne.paths["systemd-run"] + return systemdRunPath, hasSystemd +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/nsenter/nsenter_unsupported.go b/vendor/k8s.io/kubernetes/pkg/util/nsenter/nsenter_unsupported.go new file mode 100644 index 000000000000..17b3b5722d39 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/nsenter/nsenter_unsupported.go @@ -0,0 +1,50 @@ +// +build !linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nsenter + +import ( + "k8s.io/utils/exec" +) + +// Nsenter is part of experimental support for running the kubelet +// in a container. +type Nsenter struct { + // a map of commands to their paths on the host filesystem + Paths map[string]string +} + +// NewNsenter constructs a new instance of Nsenter +func NewNsenter() *Nsenter { + return &Nsenter{} +} + +// Exec executes nsenter commands in hostProcMountNsPath mount namespace +func (ne *Nsenter) Exec(args ...string) exec.Cmd { + return nil +} + +// AbsHostPath returns the absolute runnable path for a specified command +func (ne *Nsenter) AbsHostPath(command string) string { + return "" +} + +// SupportsSystemd checks whether command systemd-run exists +func (ne *Nsenter) SupportsSystemd() (string, bool) { + return "", false +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/oom/BUILD b/vendor/k8s.io/kubernetes/pkg/util/oom/BUILD index 11d79ceaab00..9e027c7f4825 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/oom/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/oom/BUILD @@ -19,6 +19,7 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/util/oom", deps = select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ "//pkg/kubelet/cm/util:go_default_library", @@ -36,6 +37,7 @@ go_test( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/util/oom", library = ":go_default_library", deps = select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ diff --git a/vendor/k8s.io/kubernetes/pkg/util/oom/oom_linux.go b/vendor/k8s.io/kubernetes/pkg/util/oom/oom_linux.go index 4d0379fabd10..ad6d5c264b55 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/oom/oom_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/util/oom/oom_linux.go @@ -1,4 +1,4 @@ -// +build cgo,linux +// +build linux /* Copyright 2015 The Kubernetes Authors. diff --git a/vendor/k8s.io/kubernetes/pkg/util/oom/oom_unsupported.go b/vendor/k8s.io/kubernetes/pkg/util/oom/oom_unsupported.go index 063839d6b26d..cb362a85a5f9 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/oom/oom_unsupported.go +++ b/vendor/k8s.io/kubernetes/pkg/util/oom/oom_unsupported.go @@ -1,4 +1,4 @@ -// +build !cgo !linux +// +build !linux /* Copyright 2015 The Kubernetes Authors. diff --git a/vendor/k8s.io/kubernetes/pkg/util/parsers/BUILD b/vendor/k8s.io/kubernetes/pkg/util/parsers/BUILD index a3b04cd62980..7070f4b5f894 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/parsers/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/parsers/BUILD @@ -9,12 +9,14 @@ load( go_library( name = "go_default_library", srcs = ["parsers.go"], + importpath = "k8s.io/kubernetes/pkg/util/parsers", deps = ["//vendor/github.com/docker/distribution/reference:go_default_library"], ) go_test( name = "go_default_test", srcs = ["parsers_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/parsers", library = ":go_default_library", ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/pointer/BUILD b/vendor/k8s.io/kubernetes/pkg/util/pointer/BUILD index 3e75d456ee61..57cde0c2f07a 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/pointer/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/pointer/BUILD @@ -9,12 +9,14 @@ load( go_test( name = "go_default_test", srcs = ["pointer_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/pointer", library = ":go_default_library", ) go_library( name = "go_default_library", srcs = ["pointer.go"], + importpath = "k8s.io/kubernetes/pkg/util/pointer", ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/util/pointer/pointer.go b/vendor/k8s.io/kubernetes/pkg/util/pointer/pointer.go index 1a10939bae2f..a970bf7f5822 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/pointer/pointer.go +++ b/vendor/k8s.io/kubernetes/pkg/util/pointer/pointer.go @@ -60,3 +60,9 @@ func Int32PtrDerefOr(ptr *int32, def int32) int32 { } return def } + +// BoolPtr returns a pointer to a bool +func BoolPtr(b bool) *bool { + o := b + return &o +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/procfs/BUILD b/vendor/k8s.io/kubernetes/pkg/util/procfs/BUILD index 999226f1f17d..ea7c3be29f8a 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/procfs/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/procfs/BUILD @@ -19,6 +19,7 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/util/procfs", deps = select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ "//vendor/github.com/golang/glog:go_default_library", @@ -39,6 +40,7 @@ go_test( data = [ "example_proc_cgroup", ], + importpath = "k8s.io/kubernetes/pkg/util/procfs", library = ":go_default_library", deps = select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ diff --git a/vendor/k8s.io/kubernetes/pkg/util/reflector/prometheus/BUILD b/vendor/k8s.io/kubernetes/pkg/util/reflector/prometheus/BUILD index a13edbe11fe9..d5333e370698 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/reflector/prometheus/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/reflector/prometheus/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["prometheus.go"], + importpath = "k8s.io/kubernetes/pkg/util/reflector/prometheus", deps = [ "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/util/reflector/prometheus/prometheus.go b/vendor/k8s.io/kubernetes/pkg/util/reflector/prometheus/prometheus.go index ab16edf4911c..958a0007cddb 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/reflector/prometheus/prometheus.go +++ b/vendor/k8s.io/kubernetes/pkg/util/reflector/prometheus/prometheus.go @@ -68,6 +68,12 @@ var ( Name: "items_per_watch", Help: "How many items an API watch returns to the reflectors", }, []string{"name"}) + + lastResourceVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: reflectorSubsystem, + Name: "last_resource_version", + Help: "Last resource version seen for the reflectors", + }, []string{"name"}) ) func init() { @@ -78,6 +84,7 @@ func init() { prometheus.MustRegister(shortWatchesTotal) prometheus.MustRegister(watchDuration) prometheus.MustRegister(itemsPerWatch) + prometheus.MustRegister(lastResourceVersion) cache.SetReflectorMetricsProvider(prometheusMetricsProvider{}) } @@ -117,11 +124,5 @@ func (prometheusMetricsProvider) NewItemsInWatchMetric(name string) cache.Summar } func (prometheusMetricsProvider) NewLastResourceVersionMetric(name string) cache.GaugeMetric { - rv := prometheus.NewGauge(prometheus.GaugeOpts{ - Subsystem: name, - Name: "last_resource_version", - Help: "last resource version seen for the reflectors", - }) - prometheus.MustRegister(rv) - return rv + return lastResourceVersion.WithLabelValues(name) } diff --git a/vendor/k8s.io/kubernetes/pkg/util/removeall/BUILD b/vendor/k8s.io/kubernetes/pkg/util/removeall/BUILD index 4de7ab0c4eda..54d7332eb575 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/removeall/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/removeall/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["removeall_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/removeall", library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", @@ -19,6 +20,7 @@ go_test( go_library( name = "go_default_library", srcs = ["removeall.go"], + importpath = "k8s.io/kubernetes/pkg/util/removeall", deps = ["//pkg/util/mount:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/resizefs/BUILD b/vendor/k8s.io/kubernetes/pkg/util/resizefs/BUILD new file mode 100644 index 000000000000..aaa1f9aa5f49 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/resizefs/BUILD @@ -0,0 +1,38 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "resizefs_unsupported.go", + ] + select({ + "@io_bazel_rules_go//go/platform:linux_amd64": [ + "resizefs_linux.go", + ], + "//conditions:default": [], + }), + importpath = "k8s.io/kubernetes/pkg/util/resizefs", + visibility = ["//visibility:public"], + deps = [ + "//pkg/util/mount:go_default_library", + ] + select({ + "@io_bazel_rules_go//go/platform:linux_amd64": [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + ], + "//conditions:default": [], + }), +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_linux.go b/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_linux.go new file mode 100644 index 000000000000..6a4d82d6c037 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_linux.go @@ -0,0 +1,143 @@ +// +build linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resizefs + +import ( + "fmt" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/util/mount" + utilexec "k8s.io/utils/exec" +) + +const ( + // 'fsck' found errors and corrected them + fsckErrorsCorrected = 1 + // 'fsck' found errors but exited without correcting them + fsckErrorsUncorrected = 4 +) + +// ResizeFs Provides support for resizing file systems +type ResizeFs struct { + mounter *mount.SafeFormatAndMount +} + +// NewResizeFs returns new instance of resizer +func NewResizeFs(mounter *mount.SafeFormatAndMount) *ResizeFs { + return &ResizeFs{mounter: mounter} +} + +// Resize perform resize of file system +func (resizefs *ResizeFs) Resize(devicePath string) (bool, error) { + format, err := resizefs.mounter.GetDiskFormat(devicePath) + + if err != nil { + formatErr := fmt.Errorf("error checking format for device %s: %v", devicePath, err) + return false, formatErr + } + + // If disk has no format, there is no need to resize the disk because mkfs.* + // by default will use whole disk anyways. + if format == "" { + return false, nil + } + + deviceOpened, err := resizefs.mounter.DeviceOpened(devicePath) + + if err != nil { + deviceOpenErr := fmt.Errorf("error verifying if device %s is open: %v", devicePath, err) + return false, deviceOpenErr + } + + if deviceOpened { + deviceAlreadyOpenErr := fmt.Errorf("the device %s is already in use", devicePath) + return false, deviceAlreadyOpenErr + } + + switch format { + case "ext3", "ext4": + fsckErr := resizefs.extFsck(devicePath, format) + if fsckErr != nil { + return false, fsckErr + } + return resizefs.extResize(devicePath) + case "xfs": + fsckErr := resizefs.fsckDevice(devicePath) + if fsckErr != nil { + return false, fsckErr + } + return resizefs.xfsResize(devicePath) + } + return false, fmt.Errorf("resize of format %s is not supported for device %s", format, devicePath) +} + +func (resizefs *ResizeFs) fsckDevice(devicePath string) error { + glog.V(4).Infof("Checking for issues with fsck on device: %s", devicePath) + args := []string{"-a", devicePath} + out, err := resizefs.mounter.Exec.Run("fsck", args...) + if err != nil { + ee, isExitError := err.(utilexec.ExitError) + switch { + case err == utilexec.ErrExecutableNotFound: + glog.Warningf("'fsck' not found on system; continuing resizing without running 'fsck'.") + case isExitError && ee.ExitStatus() == fsckErrorsCorrected: + glog.V(2).Infof("Device %s has errors which were corrected by fsck: %s", devicePath, string(out)) + case isExitError && ee.ExitStatus() == fsckErrorsUncorrected: + return fmt.Errorf("'fsck' found errors on device %s but could not correct them: %s", devicePath, string(out)) + case isExitError && ee.ExitStatus() > fsckErrorsUncorrected: + glog.Infof("`fsck` error %s", string(out)) + } + } + return nil +} + +func (resizefs *ResizeFs) extFsck(devicePath string, fsType string) error { + glog.V(4).Infof("Checking for issues with fsck.%s on device: %s", fsType, devicePath) + args := []string{"-f", "-y", devicePath} + out, err := resizefs.mounter.Run("fsck."+fsType, args...) + if err != nil { + return fmt.Errorf("running fsck.%s failed on %s with error: %v\n Output: %s", fsType, devicePath, err, string(out)) + } + return nil +} + +func (resizefs *ResizeFs) extResize(devicePath string) (bool, error) { + output, err := resizefs.mounter.Exec.Run("resize2fs", devicePath) + if err == nil { + glog.V(2).Infof("Device %s resized successfully", devicePath) + return true, nil + } + + resizeError := fmt.Errorf("resize of device %s failed: %v. resize2fs output: %s", devicePath, err, string(output)) + return false, resizeError + +} + +func (resizefs *ResizeFs) xfsResize(devicePath string) (bool, error) { + args := []string{"-d", devicePath} + output, err := resizefs.mounter.Exec.Run("xfs_growfs", args...) + + if err == nil { + glog.V(2).Infof("Device %s resized successfully", devicePath) + return true, nil + } + + resizeError := fmt.Errorf("resize of device %s failed: %v. xfs_growfs output: %s", devicePath, err, string(output)) + return false, resizeError +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_unsupported.go b/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_unsupported.go new file mode 100644 index 000000000000..9241d7d5b27d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/resizefs/resizefs_unsupported.go @@ -0,0 +1,40 @@ +// +build !linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resizefs + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/util/mount" +) + +// ResizeFs Provides support for resizing file systems +type ResizeFs struct { + mounter *mount.SafeFormatAndMount +} + +// NewResizeFs returns new instance of resizer +func NewResizeFs(mounter *mount.SafeFormatAndMount) *ResizeFs { + return &ResizeFs{mounter: mounter} +} + +// Resize perform resize of file system +func (resizefs *ResizeFs) Resize(devicePath string) (bool, error) { + return false, fmt.Errorf("Resize is not supported for this build") +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/resourcecontainer/BUILD b/vendor/k8s.io/kubernetes/pkg/util/resourcecontainer/BUILD index aa49ef97f07b..05b817b4b122 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/resourcecontainer/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/resourcecontainer/BUILD @@ -15,6 +15,7 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/util/resourcecontainer", deps = select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/util/rlimit/BUILD b/vendor/k8s.io/kubernetes/pkg/util/rlimit/BUILD index 61c3f1d153ba..52fe550b1037 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/rlimit/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/rlimit/BUILD @@ -15,6 +15,7 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/util/rlimit", deps = select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ "//vendor/golang.org/x/sys/unix:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/util/selinux/BUILD b/vendor/k8s.io/kubernetes/pkg/util/selinux/BUILD index 7d41a9953ea3..f5eab6022f05 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/selinux/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/selinux/BUILD @@ -17,6 +17,7 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/util/selinux", deps = select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ "//vendor/github.com/opencontainers/selinux/go-selinux:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/util/slice/BUILD b/vendor/k8s.io/kubernetes/pkg/util/slice/BUILD index ce8ed8d49e8a..611545484c03 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/slice/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/slice/BUILD @@ -9,12 +9,14 @@ load( go_library( name = "go_default_library", srcs = ["slice.go"], + importpath = "k8s.io/kubernetes/pkg/util/slice", deps = ["//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library"], ) go_test( name = "go_default_test", srcs = ["slice_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/slice", library = ":go_default_library", ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/strings/BUILD b/vendor/k8s.io/kubernetes/pkg/util/strings/BUILD index abda343d5589..f71a3326e39d 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/strings/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/strings/BUILD @@ -13,6 +13,7 @@ go_library( "line_delimiter.go", "strings.go", ], + importpath = "k8s.io/kubernetes/pkg/util/strings", ) go_test( @@ -22,6 +23,7 @@ go_test( "line_delimiter_test.go", "strings_test.go", ], + importpath = "k8s.io/kubernetes/pkg/util/strings", library = ":go_default_library", ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/sysctl/BUILD b/vendor/k8s.io/kubernetes/pkg/util/sysctl/BUILD index 8e3964e0831c..77d699884cc3 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/sysctl/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/sysctl/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["sysctl.go"], + importpath = "k8s.io/kubernetes/pkg/util/sysctl", ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/util/system/BUILD b/vendor/k8s.io/kubernetes/pkg/util/system/BUILD index 76994cf9ba74..1c2f1a067f13 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/system/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/system/BUILD @@ -9,11 +9,13 @@ load( go_library( name = "go_default_library", srcs = ["system_utils.go"], + importpath = "k8s.io/kubernetes/pkg/util/system", ) go_test( name = "go_default_test", srcs = ["system_utils_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/system", library = ":go_default_library", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/util/tail/BUILD b/vendor/k8s.io/kubernetes/pkg/util/tail/BUILD index ab15a92fe5f2..4e81446dd40e 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/tail/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/tail/BUILD @@ -22,10 +22,12 @@ filegroup( go_test( name = "go_default_test", srcs = ["tail_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/tail", library = ":go_default_library", ) go_library( name = "go_default_library", srcs = ["tail.go"], + importpath = "k8s.io/kubernetes/pkg/util/tail", ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/taints/BUILD b/vendor/k8s.io/kubernetes/pkg/util/taints/BUILD index 754a714ae7c2..957af3c05f22 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/taints/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/taints/BUILD @@ -9,9 +9,10 @@ load( go_library( name = "go_default_library", srcs = ["taints.go"], + importpath = "k8s.io/kubernetes/pkg/util/taints", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", @@ -22,9 +23,10 @@ go_library( go_test( name = "go_default_test", srcs = ["taints_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/taints", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go b/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go index 5957adfb2780..76e4bb866814 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go +++ b/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go @@ -25,8 +25,8 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" ) const ( @@ -35,7 +35,7 @@ const ( UNTAINTED = "untainted" ) -// parseTaint parses a taint from a string. Taint must be off the format '=:'. +// parseTaint parses a taint from a string. Taint must be of the format '=:'. func parseTaint(st string) (v1.Taint, error) { var taint v1.Taint parts := strings.Split(st, "=") @@ -45,15 +45,14 @@ func parseTaint(st string) (v1.Taint, error) { parts2 := strings.Split(parts[1], ":") - effect := v1.TaintEffect(parts2[1]) - errs := validation.IsValidLabelValue(parts2[0]) if len(parts2) != 2 || len(errs) != 0 { return taint, fmt.Errorf("invalid taint spec: %v, %s", st, strings.Join(errs, "; ")) } - if effect != v1.TaintEffectNoSchedule && effect != v1.TaintEffectPreferNoSchedule && effect != v1.TaintEffectNoExecute { - return taint, fmt.Errorf("invalid taint spec: %v, unsupported taint effect", st) + effect := v1.TaintEffect(parts2[1]) + if err := validateTaintEffect(effect); err != nil { + return taint, err } taint.Key = parts[0] @@ -63,6 +62,14 @@ func parseTaint(st string) (v1.Taint, error) { return taint, nil } +func validateTaintEffect(effect v1.TaintEffect) error { + if effect != v1.TaintEffectNoSchedule && effect != v1.TaintEffectPreferNoSchedule && effect != v1.TaintEffectNoExecute { + return fmt.Errorf("invalid taint effect: %v, unsupported taint effect", effect) + } + + return nil +} + // NewTaintsVar wraps []api.Taint in a struct that implements flag.Value to allow taints to be // bound to command line flags. func NewTaintsVar(ptr *[]api.Taint) taintsVar { @@ -76,6 +83,10 @@ type taintsVar struct { } func (t taintsVar) Set(s string) error { + if len(s) == 0 { + *t.ptr = nil + return nil + } sts := strings.Split(s, ",") var taints []api.Taint for _, st := range sts { @@ -91,7 +102,7 @@ func (t taintsVar) Set(s string) error { func (t taintsVar) String() string { if len(*t.ptr) == 0 { - return "" + return "" } var taints []string for _, taint := range *t.ptr { @@ -134,6 +145,14 @@ func ParseTaints(spec []string) ([]v1.Taint, []v1.Taint, error) { taintKey = parts[0] effect = v1.TaintEffect(parts[1]) } + + // If effect is specified, need to validate it. + if len(effect) > 0 { + err := validateTaintEffect(effect) + if err != nil { + return nil, nil, err + } + } taintsToRemove = append(taintsToRemove, v1.Taint{Key: taintKey, Effect: effect}) } else { return nil, nil, fmt.Errorf("unknown taint spec: %v", taintSpec) @@ -238,11 +257,7 @@ func DeleteTaint(taints []v1.Taint, taintToDelete *v1.Taint) ([]v1.Taint, bool) // RemoveTaint tries to remove a taint from annotations list. Returns a new copy of updated Node and true if something was updated // false otherwise. func RemoveTaint(node *v1.Node, taint *v1.Taint) (*v1.Node, bool, error) { - objCopy, err := api.Scheme.DeepCopy(node) - if err != nil { - return nil, false, err - } - newNode := objCopy.(*v1.Node) + newNode := node.DeepCopy() nodeTaints := newNode.Spec.Taints if len(nodeTaints) == 0 { return newNode, false, nil @@ -260,11 +275,7 @@ func RemoveTaint(node *v1.Node, taint *v1.Taint) (*v1.Node, bool, error) { // AddOrUpdateTaint tries to add a taint to annotations list. Returns a new copy of updated Node and true if something was updated // false otherwise. func AddOrUpdateTaint(node *v1.Node, taint *v1.Taint) (*v1.Node, bool, error) { - objCopy, err := api.Scheme.DeepCopy(node) - if err != nil { - return nil, false, err - } - newNode := objCopy.(*v1.Node) + newNode := node.DeepCopy() nodeTaints := newNode.Spec.Taints var newTaints []v1.Taint diff --git a/vendor/k8s.io/kubernetes/pkg/util/term/BUILD b/vendor/k8s.io/kubernetes/pkg/util/term/BUILD index 1edebf3653c1..01b8ca19dc58 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/term/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/term/BUILD @@ -15,6 +15,7 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/util/term", deps = [ "//vendor/github.com/docker/docker/pkg/term:go_default_library", "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/util/tolerations/BUILD b/vendor/k8s.io/kubernetes/pkg/util/tolerations/BUILD index 69e92874db93..1e3b1189b0d0 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/tolerations/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/tolerations/BUILD @@ -12,7 +12,8 @@ go_library( "doc.go", "tolerations.go", ], - deps = ["//pkg/api:go_default_library"], + importpath = "k8s.io/kubernetes/pkg/util/tolerations", + deps = ["//pkg/apis/core:go_default_library"], ) filegroup( @@ -31,6 +32,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["tolerations_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/tolerations", library = ":go_default_library", - deps = ["//pkg/api:go_default_library"], + deps = ["//pkg/apis/core:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/tolerations/tolerations.go b/vendor/k8s.io/kubernetes/pkg/util/tolerations/tolerations.go index 9936d2d43ea1..5b5cec8b7d70 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/tolerations/tolerations.go +++ b/vendor/k8s.io/kubernetes/pkg/util/tolerations/tolerations.go @@ -17,7 +17,7 @@ limitations under the License. package tolerations import ( - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) type key struct { diff --git a/vendor/k8s.io/kubernetes/pkg/util/version/BUILD b/vendor/k8s.io/kubernetes/pkg/util/version/BUILD index ae2668b46cd0..9b3cf2bc9216 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/version/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/version/BUILD @@ -12,11 +12,13 @@ go_library( "doc.go", "version.go", ], + importpath = "k8s.io/kubernetes/pkg/util/version", ) go_test( name = "go_default_test", srcs = ["version_test.go"], + importpath = "k8s.io/kubernetes/pkg/util/version", library = ":go_default_library", ) diff --git a/vendor/k8s.io/kubernetes/pkg/util/version/version.go b/vendor/k8s.io/kubernetes/pkg/util/version/version.go index e8cd0cecfa7f..24724e50ac5b 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/version/version.go +++ b/vendor/k8s.io/kubernetes/pkg/util/version/version.go @@ -181,12 +181,11 @@ func (v *Version) String() string { // compareInternal returns -1 if v is less than other, 1 if it is greater than other, or 0 // if they are equal func (v *Version) compareInternal(other *Version) int { - for i := range v.components { + + vLen := len(v.components) + oLen := len(other.components) + for i := 0; i < vLen && i < oLen; i++ { switch { - case i >= len(other.components): - if v.components[i] != 0 { - return 1 - } case other.components[i] < v.components[i]: return 1 case other.components[i] > v.components[i]: @@ -194,6 +193,14 @@ func (v *Version) compareInternal(other *Version) int { } } + // If components are common but one has more items and they are not zeros, it is bigger + switch { + case oLen < vLen && !onlyZeros(v.components[oLen:]): + return 1 + case oLen > vLen && !onlyZeros(other.components[vLen:]): + return -1 + } + if !v.semver || !other.semver { return 0 } @@ -209,10 +216,7 @@ func (v *Version) compareInternal(other *Version) int { vPR := strings.Split(v.preRelease, ".") oPR := strings.Split(other.preRelease, ".") - for i := range vPR { - if i >= len(oPR) { - return 1 - } + for i := 0; i < len(vPR) && i < len(oPR); i++ { vNum, err := strconv.ParseUint(vPR[i], 10, 0) if err == nil { oNum, err := strconv.ParseUint(oPR[i], 10, 0) @@ -234,9 +238,26 @@ func (v *Version) compareInternal(other *Version) int { } } + switch { + case len(oPR) < len(vPR): + return 1 + case len(oPR) > len(vPR): + return -1 + } + return 0 } +// returns false if array contain any non-zero element +func onlyZeros(array []uint) bool { + for _, num := range array { + if num != 0 { + return false + } + } + return true +} + // AtLeast tests if a version is at least equal to a given minimum version. If both // Versions are Semantic Versions, this will use the Semantic Version comparison // algorithm. Otherwise, it will compare only the numeric components, with non-present diff --git a/vendor/k8s.io/kubernetes/pkg/util/workqueue/prometheus/BUILD b/vendor/k8s.io/kubernetes/pkg/util/workqueue/prometheus/BUILD index 671ebf7bcfd7..0f3671ebb393 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/workqueue/prometheus/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/workqueue/prometheus/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["prometheus.go"], + importpath = "k8s.io/kubernetes/pkg/util/workqueue/prometheus", deps = [ "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/client-go/util/workqueue:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/version/BUILD b/vendor/k8s.io/kubernetes/pkg/version/BUILD index 29363797f19b..8194b973437d 100644 --- a/vendor/k8s.io/kubernetes/pkg/version/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/version/BUILD @@ -12,6 +12,7 @@ go_library( "doc.go", "version.go", ], + importpath = "k8s.io/kubernetes/pkg/version", deps = ["//vendor/k8s.io/apimachinery/pkg/version:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/version/base.go b/vendor/k8s.io/kubernetes/pkg/version/base.go index 8dc21a97d068..730e79f03d14 100644 --- a/vendor/k8s.io/kubernetes/pkg/version/base.go +++ b/vendor/k8s.io/kubernetes/pkg/version/base.go @@ -39,11 +39,11 @@ var ( // them irrelevant. (Next we'll take it out, which may muck with // scripts consuming the kubectl version output - but most of // these should be looking at gitVersion already anyways.) - gitMajor string = "1" // major version, always numeric - gitMinor string = "8" // minor version, numeric possibly followed by "+" + gitMajor string // major version, always numeric + gitMinor string // minor version, numeric possibly followed by "+" // semantic version, derived by build scripts (see - // https://github.com/kubernetes/kubernetes/blob/master/docs/design/versioning.md + // https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md // for a detailed discussion of this field) // // TODO: This field is still called "gitVersion" for legacy @@ -51,9 +51,13 @@ var ( // semantic version is a git hash, but the version itself is no // longer the direct output of "git describe", but a slight // translation to be semver compliant. - gitVersion string = "v1.8.0+$Format:%h$" - gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) - gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty" - buildDate string = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') + // NOTE: The $Format strings are replaced during 'git archive' thanks to the + // companion .gitattributes file containing 'export-subst' in this same + // directory. See also https://git-scm.com/docs/gitattributes + gitVersion = "v0.0.0-master+$Format:%h$" + gitCommit = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) + gitTreeState = "" // state of git tree, either "clean" or "dirty" + + buildDate = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') ) diff --git a/vendor/k8s.io/kubernetes/pkg/version/verflag/BUILD b/vendor/k8s.io/kubernetes/pkg/version/verflag/BUILD index e3c7e41aabdf..5d725336ec29 100644 --- a/vendor/k8s.io/kubernetes/pkg/version/verflag/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/version/verflag/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["verflag.go"], + importpath = "k8s.io/kubernetes/pkg/version/verflag", deps = [ "//pkg/version:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/BUILD index d9646c1e06e7..8b7ef61afc3b 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/BUILD @@ -25,6 +25,7 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/volume", deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/util/io:go_default_library", @@ -52,9 +53,10 @@ go_test( "plugins_test.go", "util_test.go", ], + importpath = "k8s.io/kubernetes/pkg/volume", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/util/slice:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -76,6 +78,7 @@ go_test( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/volume_test", deps = [ ":go_default_library", "//pkg/volume/testing:go_default_library", @@ -105,6 +108,7 @@ filegroup( "//pkg/volume/cephfs:all-srcs", "//pkg/volume/cinder:all-srcs", "//pkg/volume/configmap:all-srcs", + "//pkg/volume/csi:all-srcs", "//pkg/volume/downwardapi:all-srcs", "//pkg/volume/empty_dir:all-srcs", "//pkg/volume/fc:all-srcs", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/OWNERS index 03856d7faa57..9ee3a37d7cef 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/OWNERS @@ -7,15 +7,11 @@ approvers: - gnufied - childsb reviewers: -- thockin -- smarterclayton -- brendandburns -- derekwaynecarr -- pmorie - saad-ali -- justinsb - jsafrane - rootfs - jingxu97 - msau42 - gnufied +- verult +- davidz627 diff --git a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/BUILD index a8d96f4b4ba1..e61d65a9da2d 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/BUILD @@ -14,6 +14,7 @@ go_library( "aws_util.go", "doc.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/aws_ebs", deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/aws:go_default_library", @@ -36,6 +37,7 @@ go_test( "attacher_test.go", "aws_ebs_test.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/aws_ebs", library = ":go_default_library", deps = [ "//pkg/cloudprovider/providers/aws:go_default_library", @@ -44,6 +46,7 @@ go_test( "//pkg/volume/testing:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher.go b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher.go index bf1414246901..19ca2ea49b45 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/attacher.go @@ -169,7 +169,7 @@ func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, d case <-ticker.C: glog.V(5).Infof("Checking AWS Volume %q is attached.", volumeID) if devicePath != "" { - devicePaths := getDiskByIdPaths(partition, devicePath) + devicePaths := getDiskByIdPaths(aws.KubernetesVolumeID(volumeSource.VolumeID), partition, devicePath) path, err := verifyDevicePath(devicePaths) if err != nil { // Log error, if any, and continue checking periodically. See issue #11321 @@ -253,24 +253,10 @@ func (plugin *awsElasticBlockStorePlugin) NewDetacher() (volume.Detacher, error) }, nil } -func (detacher *awsElasticBlockStoreDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error { - volumeID := aws.KubernetesVolumeID(path.Base(deviceMountPath)) +func (detacher *awsElasticBlockStoreDetacher) Detach(volumeName string, nodeName types.NodeName) error { + volumeID := aws.KubernetesVolumeID(path.Base(volumeName)) - attached, err := detacher.awsVolumes.DiskIsAttached(volumeID, nodeName) - if err != nil { - // Log error and continue with detach - glog.Errorf( - "Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v", - volumeID, nodeName, err) - } - - if err == nil && !attached { - // Volume is already detached from node. - glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName) - return nil - } - - if _, err = detacher.awsVolumes.DetachDisk(volumeID, nodeName); err != nil { + if _, err := detacher.awsVolumes.DetachDisk(volumeID, nodeName); err != nil { glog.Errorf("Error detaching volumeID %q: %v", volumeID, err) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go index 5c9f2702c00d..9a0091928e20 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_ebs.go @@ -241,6 +241,33 @@ func (plugin *awsElasticBlockStorePlugin) ConstructVolumeSpec(volName, mountPath return volume.NewSpecFromVolume(awsVolume), nil } +func (plugin *awsElasticBlockStorePlugin) RequiresFSResize() bool { + return true +} + +func (plugin *awsElasticBlockStorePlugin) ExpandVolumeDevice( + spec *volume.Spec, + newSize resource.Quantity, + oldSize resource.Quantity) (resource.Quantity, error) { + var awsVolume aws.Volumes + + awsVolume, err := getCloudProvider(plugin.host.GetCloudProvider()) + + if err != nil { + return oldSize, err + } + // we don't expect to receive this call for non PVs + rawVolumeName := spec.PersistentVolume.Spec.AWSElasticBlockStore.VolumeID + volumeID := aws.KubernetesVolumeID(rawVolumeName) + + if volumeID == "" { + return oldSize, fmt.Errorf("EBS.ExpandVolumeDevice Invalid volume id for %s", spec.Name()) + } + return awsVolume.ResizeDisk(volumeID, oldSize, newSize) +} + +var _ volume.ExpandableVolumePlugin = &awsElasticBlockStorePlugin{} + // Abstract interface to PD operations. type ebsManager interface { CreateVolume(provisioner *awsElasticBlockStoreProvisioner) (volumeID aws.KubernetesVolumeID, volumeSizeGB int, labels map[string]string, fstype string, err error) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go index 711a9eaed393..932617d4e9ad 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/aws_ebs/aws_util.go @@ -18,6 +18,8 @@ package aws_ebs import ( "fmt" + "os" + "path/filepath" "strconv" "strings" "time" @@ -178,7 +180,7 @@ func verifyAllPathsRemoved(devicePaths []string) (bool, error) { // Returns list of all paths for given EBS mount // This is more interesting on GCE (where we are able to identify volumes under /dev/disk-by-id) // Here it is mostly about applying the partition path -func getDiskByIdPaths(partition string, devicePath string) []string { +func getDiskByIdPaths(volumeID aws.KubernetesVolumeID, partition string, devicePath string) []string { devicePaths := []string{} if devicePath != "" { devicePaths = append(devicePaths, devicePath) @@ -190,6 +192,23 @@ func getDiskByIdPaths(partition string, devicePath string) []string { } } + // We need to find NVME volumes, which are mounted on a "random" nvme path ("/dev/nvme0n1"), + // and we have to get the volume id from the nvme interface + awsVolumeID, err := volumeID.MapToAWSVolumeID() + if err != nil { + glog.Warningf("error mapping volume %q to AWS volume: %v", volumeID, err) + } else { + // This is the magic name on which AWS presents NVME devices under /dev/disk/by-id/ + // For example, vol-0fab1d5e3f72a5e23 creates a symlink at /dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_vol0fab1d5e3f72a5e23 + nvmeName := "nvme-Amazon_Elastic_Block_Store_" + strings.Replace(string(awsVolumeID), "-", "", -1) + nvmePath, err := findNvmeVolume(nvmeName) + if err != nil { + glog.Warningf("error looking for nvme volume %q: %v", volumeID, err) + } else if nvmePath != "" { + devicePaths = append(devicePaths, nvmePath) + } + } + return devicePaths } @@ -202,3 +221,35 @@ func getCloudProvider(cloudProvider cloudprovider.Interface) (*aws.Cloud, error) return awsCloudProvider, nil } + +// findNvmeVolume looks for the nvme volume with the specified name +// It follows the symlink (if it exists) and returns the absolute path to the device +func findNvmeVolume(findName string) (device string, err error) { + p := filepath.Join("/dev/disk/by-id/", findName) + stat, err := os.Lstat(p) + if err != nil { + if os.IsNotExist(err) { + glog.V(6).Infof("nvme path not found %q", p) + return "", nil + } + return "", fmt.Errorf("error getting stat of %q: %v", p, err) + } + + if stat.Mode()&os.ModeSymlink != os.ModeSymlink { + glog.Warningf("nvme file %q found, but was not a symlink", p) + return "", nil + } + + // Find the target, resolving to an absolute path + // For example, /dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_vol0fab1d5e3f72a5e23 -> ../../nvme2n1 + resolved, err := filepath.EvalSymlinks(p) + if err != nil { + return "", fmt.Errorf("error reading target of symlink %q: %v", p, err) + } + + if !strings.HasPrefix(resolved, "/dev") { + return "", fmt.Errorf("resolved symlink for %q was unexpected: %q", p, resolved) + } + + return resolved, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD index 88f1a017e5d7..7a71a2c4bb21 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD @@ -24,8 +24,9 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/volume/azure_dd", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/azure:go_default_library", "//pkg/util/keymutex:go_default_library", @@ -65,6 +66,7 @@ go_test( "azure_common_test.go", "azure_dd_test.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/azure_dd", library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/OWNERS index 1d61e4196dd3..dbe4f0a2053f 100755 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/OWNERS @@ -2,14 +2,10 @@ approvers: - brendandburns - rootfs reviewers: -- justinsb - rootfs - brendandburns -- thockin -- smarterclayton -- derekwaynecarr -- pmorie - saad-ali - jsafrane - jingxu97 - msau42 +- andyzhangx diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go index 157be0d4220c..eda015907388 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go @@ -27,7 +27,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/strings" @@ -37,6 +37,7 @@ import ( const ( defaultFSType = "ext4" defaultStorageAccountType = storage.StandardLRS + defaultAzureDiskKind = v1.AzureSharedBlobDisk ) type dataDisk struct { @@ -57,7 +58,7 @@ var ( string(api.AzureDedicatedBlobDisk), string(api.AzureManagedDisk)) - supportedStorageAccountTypes = sets.NewString("Premium_LRS", "Standard_LRS") + supportedStorageAccountTypes = sets.NewString("Premium_LRS", "Standard_LRS", "Standard_GRS", "Standard_RAGRS") ) func getPath(uid types.UID, volName string, host volume.VolumeHost) string { @@ -116,7 +117,7 @@ func normalizeFsType(fsType string) string { func normalizeKind(kind string) (v1.AzureDataDiskKind, error) { if kind == "" { - return v1.AzureDedicatedBlobDisk, nil + return defaultAzureDiskKind, nil } if !supportedDiskKinds.Has(kind) { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_linux.go b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_linux.go index d5d0caa1e061..304a2bc28555 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_linux.go @@ -77,6 +77,19 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st if len(arr) < 4 { continue } + if len(azureDisks) == 0 { + glog.V(4).Infof("/dev/disk/azure is not populated, now try to parse %v directly", name) + target, err := strconv.Atoi(arr[0]) + if err != nil { + glog.Errorf("failed to parse target from %v (%v), err %v", arr[0], name, err) + continue + } + // as observed, targets 0-3 are used by OS disks. Skip them + if target <= 3 { + continue + } + } + // extract LUN from the path. // LUN is the last index of the array, i.e. 1 in /sys/bus/scsi/devices/3:0:0:1 l, err := strconv.Atoi(arr[3]) @@ -150,9 +163,10 @@ func formatIfNotFormatted(disk string, fstype string, exec mount.Exec) { _, err := exec.Run("mkfs."+fstype, args...) if err == nil { // the disk has been formatted successfully try to mount it again. - glog.Infof("azureDisk - Disk successfully formatted (mkfs): %s - %s %s", fstype, disk, "tt") + glog.Infof("azureDisk - Disk successfully formatted with 'mkfs.%s %v'", fstype, args) + } else { + glog.Warningf("azureDisk - Error formatting volume with 'mkfs.%s %v': %v", fstype, args, err) } - glog.Warningf("azureDisk - format of disk %q failed: type:(%q) target:(%q) options:(%q)error:(%v)", disk, fstype, "tt", "o", err) } else { if err != nil { glog.Warningf("azureDisk - Failed to check if the disk %s formatted with error %s, will attach anyway", disk, err) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go index bb45bf3b4d96..09e4cdb63532 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go @@ -28,8 +28,8 @@ import ( // interface exposed by the cloud provider implementing Disk functionlity type DiskController interface { - CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int, forceStandAlone bool) (string, error) - DeleteBlobDisk(diskUri string, wasForced bool) error + CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error) + DeleteBlobDisk(diskUri string) error CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error) DeleteManagedDisk(diskURI string) error @@ -48,7 +48,7 @@ type DiskController interface { GetNextDiskLun(nodeName types.NodeName) (int32, error) // Create a VHD blob - CreateVolume(name, storageAccount string, storageAccountType storage.SkuName, location string, requestGB int) (string, string, int, error) + CreateVolume(name, storageAccount, storageAccountType, location string, requestGB int) (string, string, int, error) // Delete a VHD blob DeleteVolume(diskURI string) error } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go index 74f3d39f0838..ccec2d25fb05 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go @@ -43,10 +43,16 @@ var _ volume.Unmounter = &azureDiskUnmounter{} var _ volume.Mounter = &azureDiskMounter{} func (m *azureDiskMounter) GetAttributes() volume.Attributes { - volumeSource, _ := getVolumeSource(m.spec) + readOnly := false + volumeSource, err := getVolumeSource(m.spec) + if err != nil { + glog.Infof("azureDisk - mounter failed to get volume source for spec %s %v", m.spec.Name(), err) + } else if volumeSource.ReadOnly != nil { + readOnly = *volumeSource.ReadOnly + } return volume.Attributes{ - ReadOnly: *volumeSource.ReadOnly, - Managed: !*volumeSource.ReadOnly, + ReadOnly: readOnly, + Managed: !readOnly, SupportsSELinux: true, } } @@ -94,7 +100,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { options := []string{"bind"} - if *volumeSource.ReadOnly { + if volumeSource.ReadOnly != nil && *volumeSource.ReadOnly { options = append(options, "ro") } @@ -138,7 +144,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { return mountErr } - if !*volumeSource.ReadOnly { + if volumeSource.ReadOnly == nil || !*volumeSource.ReadOnly { volume.SetVolumeOwnership(m, fsGroup) } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go index d037f636d803..5f11743d52bd 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go @@ -55,14 +55,13 @@ func (d *azureDiskDeleter) Delete() error { return err } - wasStandAlone := (*volumeSource.Kind != v1.AzureSharedBlobDisk) managed := (*volumeSource.Kind == v1.AzureManagedDisk) if managed { return diskController.DeleteManagedDisk(volumeSource.DataDiskURI) } - return diskController.DeleteBlobDisk(volumeSource.DataDiskURI, wasStandAlone) + return diskController.DeleteBlobDisk(volumeSource.DataDiskURI) } func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) { @@ -149,26 +148,13 @@ func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) { return nil, err } } else { - forceStandAlone := (kind == v1.AzureDedicatedBlobDisk) if kind == v1.AzureDedicatedBlobDisk { - if location != "" && account != "" { - // use dedicated kind (by default) for compatibility - _, diskURI, _, err = diskController.CreateVolume(name, account, skuName, location, requestGB) - if err != nil { - return nil, err - } - } else { - if location != "" || account != "" { - return nil, fmt.Errorf("AzureDisk - location(%s) and account(%s) must be both empty or specified for dedicated kind, only one value specified is not allowed", - location, account) - } - diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB, forceStandAlone) - if err != nil { - return nil, err - } + _, diskURI, _, err = diskController.CreateVolume(name, account, storageAccountType, location, requestGB) + if err != nil { + return nil, err } } else { - diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB, forceStandAlone) + diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB) if err != nil { return nil, err } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_file/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/azure_file/BUILD index c80778dfeec4..fec0c0a95043 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_file/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_file/BUILD @@ -14,6 +14,7 @@ go_library( "azure_util.go", "doc.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/azure_file", deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/azure:go_default_library", @@ -34,6 +35,7 @@ go_library( go_test( name = "go_default_test", srcs = ["azure_file_test.go"], + importpath = "k8s.io/kubernetes/pkg/volume/azure_file", library = ":go_default_library", deps = [ "//pkg/cloudprovider/providers/azure:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_file/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/azure_file/OWNERS index 1d61e4196dd3..dbe4f0a2053f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_file/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_file/OWNERS @@ -2,14 +2,10 @@ approvers: - brendandburns - rootfs reviewers: -- justinsb - rootfs - brendandburns -- thockin -- smarterclayton -- derekwaynecarr -- pmorie - saad-ali - jsafrane - jingxu97 - msau42 +- andyzhangx diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go b/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go index e418877c0797..e81d10de85fb 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go @@ -218,15 +218,16 @@ func (b *azureFileMounter) SetUpAt(dir string, fsGroup *int64) error { source = fmt.Sprintf("%s%s%s.file.%s%s%s", osSeparator, osSeparator, accountName, getStorageEndpointSuffix(b.plugin.host.GetCloudProvider()), osSeparator, b.shareName) if runtime.GOOS == "windows" { - mountOptions = []string{accountName, accountKey} + mountOptions = []string{fmt.Sprintf("AZURE\\%s", accountName), accountKey} } else { os.MkdirAll(dir, 0700) // parameters suggested by https://azure.microsoft.com/en-us/documentation/articles/storage-how-to-use-files-linux/ - options := []string{fmt.Sprintf("vers=3.0,username=%s,password=%s,dir_mode=0700,file_mode=0700", accountName, accountKey)} + options := []string{fmt.Sprintf("username=%s,password=%s", accountName, accountKey)} if b.readOnly { options = append(options, "ro") } mountOptions = volume.JoinMountOptions(b.mountOptions, options) + mountOptions = appendDefaultMountOptions(mountOptions) } err = b.mounter.Mount(source, dir, "cifs", mountOptions) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_util.go b/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_util.go index 41caf86deff6..038d78518df3 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_util.go @@ -18,6 +18,7 @@ package azure_file import ( "fmt" + "strings" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -25,6 +26,15 @@ import ( "k8s.io/kubernetes/pkg/volume" ) +const ( + fileMode = "file_mode" + dirMode = "dir_mode" + vers = "vers" + defaultFileMode = "0700" + defaultDirMode = "0700" + defaultVers = "3.0" +) + // Abstract interface to azure file operations. type azureUtil interface { GetAzureCredentials(host volume.VolumeHost, nameSpace, secretName string) (string, string, error) @@ -84,3 +94,36 @@ func (s *azureSvc) SetAzureCredentials(host volume.VolumeHost, nameSpace, accoun } return secretName, err } + +// check whether mountOptions contain file_mode and dir_mode, if not, append default mode +func appendDefaultMountOptions(mountOptions []string) []string { + fileModeFlag := false + dirModeFlag := false + versFlag := false + + for _, mountOption := range mountOptions { + if strings.HasPrefix(mountOption, fileMode) { + fileModeFlag = true + } + if strings.HasPrefix(mountOption, dirMode) { + dirModeFlag = true + } + if strings.HasPrefix(mountOption, vers) { + versFlag = true + } + } + + allMountOptions := mountOptions + if !fileModeFlag { + allMountOptions = append(allMountOptions, fmt.Sprintf("%s=%s", fileMode, defaultFileMode)) + } + + if !dirModeFlag { + allMountOptions = append(allMountOptions, fmt.Sprintf("%s=%s", dirMode, defaultDirMode)) + } + + if !versFlag { + allMountOptions = append(allMountOptions, fmt.Sprintf("%s=%s", vers, defaultVers)) + } + return allMountOptions +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/cephfs/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/cephfs/BUILD index 0680a740a8c1..a61e1fb8595b 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/cephfs/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/cephfs/BUILD @@ -12,6 +12,7 @@ go_library( "cephfs.go", "doc.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/cephfs", deps = [ "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", @@ -27,6 +28,7 @@ go_library( go_test( name = "go_default_test", srcs = ["cephfs_test.go"], + importpath = "k8s.io/kubernetes/pkg/volume/cephfs", library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/cephfs/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/cephfs/OWNERS index a8a2447c26dc..35dc489f9050 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/cephfs/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/cephfs/OWNERS @@ -2,13 +2,7 @@ approvers: - rootfs - saad-ali reviewers: -- justinsb - rootfs -- brendandburns -- thockin -- smarterclayton -- derekwaynecarr -- pmorie - saad-ali - jsafrane - jingxu97 diff --git a/vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD index d89b25de895a..35ef7a2a21f8 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/cinder/BUILD @@ -14,10 +14,10 @@ go_library( "cinder_util.go", "doc.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/cinder", deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/openstack:go_default_library", - "//pkg/cloudprovider/providers/rackspace:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/util/keymutex:go_default_library", "//pkg/util/mount:go_default_library", @@ -43,6 +43,7 @@ go_test( "attacher_test.go", "cinder_test.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/cinder", library = ":go_default_library", deps = [ "//pkg/cloudprovider:go_default_library", @@ -51,6 +52,7 @@ go_test( "//pkg/volume/testing:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/client-go/util/testing:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/volume/cinder/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/cinder/OWNERS index 2c5a28e05fc8..d63338e28f07 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/cinder/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/cinder/OWNERS @@ -2,16 +2,12 @@ approvers: - jsafrane - anguslees - dims +- FengyunPan reviewers: - anguslees -- justinsb - rootfs -- brendandburns -- thockin -- smarterclayton -- derekwaynecarr -- pmorie - saad-ali - jsafrane - jingxu97 - msau42 +- FengyunPan diff --git a/vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go b/vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go index f6d1e4be6990..dab97b9b9c46 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/cinder/attacher.go @@ -240,7 +240,6 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath defer timer.Stop() for { - probeAttachedVolume() select { case <-ticker.C: glog.V(5).Infof("Checking Cinder disk %q is attached.", volumeID) @@ -374,8 +373,8 @@ func (detacher *cinderDiskDetacher) waitDiskDetached(instanceID, volumeID string return err } -func (detacher *cinderDiskDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error { - volumeID := path.Base(deviceMountPath) +func (detacher *cinderDiskDetacher) Detach(volumeName string, nodeName types.NodeName) error { + volumeID := path.Base(volumeName) instances, res := detacher.cinderProvider.Instances() if !res { return fmt.Errorf("failed to list openstack instances") diff --git a/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go b/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go index 45b8d32825bf..c5b785cd0abd 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder.go @@ -29,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack" - "k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace" "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/mount" kstrings "k8s.io/kubernetes/pkg/util/strings" @@ -47,7 +46,7 @@ type CinderProvider interface { AttachDisk(instanceID, volumeID string) (string, error) DetachDisk(instanceID, volumeID string) error DeleteVolume(volumeID string) error - CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, error) + CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error) GetDevicePath(volumeID string) string InstanceID() (string, error) GetAttachmentDiskPath(instanceID, volumeID string) (string, error) @@ -56,6 +55,7 @@ type CinderProvider interface { DisksAreAttached(instanceID string, volumeIDs []string) (map[string]bool, error) ShouldTrustDevicePath() bool Instances() (cloudprovider.Instances, bool) + ExpandVolume(volumeID string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) } type cinderPlugin struct { @@ -188,9 +188,6 @@ func (plugin *cinderPlugin) newProvisionerInternal(options volume.VolumeOptions, } func getCloudProvider(cloudProvider cloudprovider.Interface) (CinderProvider, error) { - if cloud, ok := cloudProvider.(*rackspace.Rackspace); ok && cloud != nil { - return cloud, nil - } if cloud, ok := cloudProvider.(*openstack.OpenStack); ok && cloud != nil { return cloud, nil } @@ -205,12 +202,10 @@ func (plugin *cinderPlugin) getCloudProvider() (CinderProvider, error) { } switch cloud := cloud.(type) { - case *rackspace.Rackspace: - return cloud, nil case *openstack.OpenStack: return cloud, nil default: - return nil, errors.New("Invalid cloud provider: expected OpenStack or Rackspace.") + return nil, errors.New("Invalid cloud provider: expected OpenStack.") } } @@ -233,6 +228,31 @@ func (plugin *cinderPlugin) ConstructVolumeSpec(volumeName, mountPath string) (* return volume.NewSpecFromVolume(cinderVolume), nil } +var _ volume.ExpandableVolumePlugin = &cinderPlugin{} + +func (plugin *cinderPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) { + cinder, _, err := getVolumeSource(spec) + if err != nil { + return oldSize, err + } + cloud, err := plugin.getCloudProvider() + if err != nil { + return oldSize, err + } + + expandedSize, err := cloud.ExpandVolume(cinder.VolumeID, oldSize, newSize) + if err != nil { + return oldSize, err + } + + glog.V(2).Infof("volume %s expanded to new size %d successfully", cinder.VolumeID, int(newSize.Value())) + return expandedSize, nil +} + +func (plugin *cinderPlugin) RequiresFSResize() bool { + return true +} + // Abstract interface to PD operations. type cdManager interface { // Attaches the disk to the kubelet's host machine. diff --git a/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go b/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go index 9a4b824fabcc..661ad3f059cf 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/cinder/cinder_util.go @@ -144,7 +144,7 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) { // TODO: caching, currently it is overkill because it calls this function // only when it creates dynamic PV zones := make(sets.String) - nodes, err := kubeClient.Core().Nodes().List(metav1.ListOptions{}) + nodes, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { glog.V(2).Infof("Error listing nodes") return zones, err @@ -204,7 +204,7 @@ func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID s } } - volumeID, volumeAZ, errr := cloud.CreateVolume(name, volSizeGB, vtype, availability, c.options.CloudTags) + volumeID, volumeAZ, IgnoreVolumeAZ, errr := cloud.CreateVolume(name, volSizeGB, vtype, availability, c.options.CloudTags) if errr != nil { glog.V(2).Infof("Error creating cinder volume: %v", errr) return "", 0, nil, "", errr @@ -213,8 +213,9 @@ func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID s // these are needed that pod is spawning to same AZ volumeLabels = make(map[string]string) - volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ - + if IgnoreVolumeAZ == false { + volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ + } return volumeID, volSizeGB, volumeLabels, fstype, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/configmap/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/configmap/BUILD index 9e3537b62916..7cdad062d837 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/configmap/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/configmap/BUILD @@ -12,6 +12,7 @@ go_library( "configmap.go", "doc.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/configmap", deps = [ "//pkg/util/io:go_default_library", "//pkg/util/mount:go_default_library", @@ -29,6 +30,7 @@ go_library( go_test( name = "go_default_test", srcs = ["configmap_test.go"], + importpath = "k8s.io/kubernetes/pkg/volume/configmap", library = ":go_default_library", deps = [ "//pkg/volume:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/configmap/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/configmap/OWNERS index 72968b277dad..baaff6ff460f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/configmap/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/configmap/OWNERS @@ -7,13 +7,7 @@ reviewers: - ivan4th - rata - sjenning -- thockin -- smarterclayton -- brendandburns -- derekwaynecarr -- pmorie - saad-ali -- justinsb - jsafrane - rootfs - jingxu97 diff --git a/vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD new file mode 100644 index 000000000000..88a2e857fe59 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD @@ -0,0 +1,74 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "csi_attacher.go", + "csi_client.go", + "csi_mounter.go", + "csi_plugin.go", + ], + importpath = "k8s.io/kubernetes/pkg/volume/csi", + visibility = ["//visibility:public"], + deps = [ + "//pkg/util/mount:go_default_library", + "//pkg/util/strings:go_default_library", + "//pkg/volume:go_default_library", + "//pkg/volume/util:go_default_library", + "//vendor/github.com/container-storage-interface/spec/lib/go/csi:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/storage/v1alpha1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "csi_attacher_test.go", + "csi_client_test.go", + "csi_mounter_test.go", + "csi_plugin_test.go", + ], + importpath = "k8s.io/kubernetes/pkg/volume/csi", + library = ":go_default_library", + deps = [ + "//pkg/volume:go_default_library", + "//pkg/volume/csi/fake:go_default_library", + "//pkg/volume/testing:go_default_library", + "//vendor/github.com/container-storage-interface/spec/lib/go/csi:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/storage/v1alpha1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", + "//vendor/k8s.io/client-go/util/testing:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/volume/csi/fake:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/csi/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/csi/OWNERS new file mode 100644 index 000000000000..7d0605ba3dde --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/csi/OWNERS @@ -0,0 +1,4 @@ +approvers: +- jsafrane +- saad-ali +- vladimirvivien diff --git a/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go b/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go new file mode 100644 index 000000000000..2c0b577b8f00 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go @@ -0,0 +1,272 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package csi + +import ( + "crypto/sha256" + "errors" + "fmt" + "strings" + "time" + + "github.com/golang/glog" + + "k8s.io/api/core/v1" + storage "k8s.io/api/storage/v1alpha1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/pkg/volume" +) + +type csiAttacher struct { + plugin *csiPlugin + k8s kubernetes.Interface + waitSleepTime time.Duration +} + +// volume.Attacher methods +var _ volume.Attacher = &csiAttacher{} + +func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { + if spec == nil { + glog.Error(log("attacher.Attach missing volume.Spec")) + return "", errors.New("missing spec") + } + + csiSource, err := getCSISourceFromSpec(spec) + if err != nil { + glog.Error(log("attacher.Attach failed to get CSI persistent source: %v", err)) + return "", err + } + + node := string(nodeName) + pvName := spec.PersistentVolume.GetName() + attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, node) + + attachment := &storage.VolumeAttachment{ + ObjectMeta: meta.ObjectMeta{ + Name: attachID, + }, + Spec: storage.VolumeAttachmentSpec{ + NodeName: node, + Attacher: csiSource.Driver, + Source: storage.VolumeAttachmentSource{ + PersistentVolumeName: &pvName, + }, + }, + Status: storage.VolumeAttachmentStatus{Attached: false}, + } + + _, err = c.k8s.StorageV1alpha1().VolumeAttachments().Create(attachment) + alreadyExist := false + if err != nil { + if !apierrs.IsAlreadyExists(err) { + glog.Error(log("attacher.Attach failed: %v", err)) + return "", err + } + alreadyExist = true + } + + if alreadyExist { + glog.V(4).Info(log("attachment [%v] for volume [%v] already exists (will not be recreated)", attachID, csiSource.VolumeHandle)) + } else { + glog.V(4).Info(log("attachment [%v] for volume [%v] created successfully", attachID, csiSource.VolumeHandle)) + } + + // probe for attachment update here + // NOTE: any error from waiting for attachment is logged only. This is because + // the primariy intent of the enclosing method is to create VolumeAttachment. + // DONOT return that error here as it is mitigated in attacher.WaitForAttach. + volAttachmentOK := true + if _, err := c.waitForVolumeAttachment(csiSource.VolumeHandle, attachID, csiTimeout); err != nil { + volAttachmentOK = false + glog.Error(log("attacher.Attach attempted to wait for attachment to be ready, but failed with: %v", err)) + } + + glog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment verified=%t: attachment object [%s]", volAttachmentOK, attachID)) + + return attachID, nil +} + +func (c *csiAttacher) WaitForAttach(spec *volume.Spec, attachID string, pod *v1.Pod, timeout time.Duration) (string, error) { + source, err := getCSISourceFromSpec(spec) + if err != nil { + glog.Error(log("attacher.WaitForAttach failed to extract CSI volume source: %v", err)) + return "", err + } + + return c.waitForVolumeAttachment(source.VolumeHandle, attachID, timeout) +} + +func (c *csiAttacher) waitForVolumeAttachment(volumeHandle, attachID string, timeout time.Duration) (string, error) { + glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID)) + + ticker := time.NewTicker(c.waitSleepTime) + defer ticker.Stop() + + timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable + defer timer.Stop() + + //TODO (vladimirvivien) instead of polling api-server, change to a api-server watch + for { + select { + case <-ticker.C: + glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID)) + attach, err := c.k8s.StorageV1alpha1().VolumeAttachments().Get(attachID, meta.GetOptions{}) + if err != nil { + glog.Error(log("attacher.WaitForAttach failed (will continue to try): %v", err)) + continue + } + // if being deleted, fail fast + if attach.GetDeletionTimestamp() != nil { + glog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachID)) + return "", errors.New("volume attachment is being deleted") + } + // attachment OK + if attach.Status.Attached { + return attachID, nil + } + // driver reports attach error + attachErr := attach.Status.AttachError + if attachErr != nil { + glog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message)) + return "", errors.New(attachErr.Message) + } + case <-timer.C: + glog.Error(log("attacher.WaitForAttach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID)) + return "", fmt.Errorf("attachment timeout for volume %v", volumeHandle) + } + } +} + +func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { + glog.V(4).Info(log("probing attachment status for %d volume(s) ", len(specs))) + + attached := make(map[*volume.Spec]bool) + + for _, spec := range specs { + if spec == nil { + glog.Error(log("attacher.VolumesAreAttached missing volume.Spec")) + return nil, errors.New("missing spec") + } + source, err := getCSISourceFromSpec(spec) + if err != nil { + glog.Error(log("attacher.VolumesAreAttached failed: %v", err)) + continue + } + + attachID := getAttachmentName(source.VolumeHandle, source.Driver, string(nodeName)) + glog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID)) + attach, err := c.k8s.StorageV1alpha1().VolumeAttachments().Get(attachID, meta.GetOptions{}) + if err != nil { + glog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err)) + continue + } + glog.V(4).Info(log("attacher.VolumesAreAttached attachment [%v] has status.attached=%t", attachID, attach.Status.Attached)) + attached[spec] = attach.Status.Attached + } + + return attached, nil +} + +func (c *csiAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) { + glog.V(4).Info(log("attacher.GetDeviceMountPath is not implemented")) + return "", nil +} + +func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error { + glog.V(4).Info(log("attacher.MountDevice is not implemented")) + return nil +} + +var _ volume.Detacher = &csiAttacher{} + +func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error { + // volumeName in format driverNamevolumeHandle generated by plugin.GetVolumeName() + if volumeName == "" { + glog.Error(log("detacher.Detach missing value for parameter volumeName")) + return errors.New("missing exepected parameter volumeName") + } + parts := strings.Split(volumeName, volNameSep) + if len(parts) != 2 { + glog.Error(log("detacher.Detach insufficient info encoded in volumeName")) + return errors.New("volumeName missing expected data") + } + + driverName := parts[0] + volID := parts[1] + attachID := getAttachmentName(volID, driverName, string(nodeName)) + if err := c.k8s.StorageV1alpha1().VolumeAttachments().Delete(attachID, nil); err != nil { + glog.Error(log("detacher.Detach failed to delete VolumeAttachment [%s]: %v", attachID, err)) + return err + } + + glog.V(4).Info(log("detacher deleted ok VolumeAttachment.ID=%s", attachID)) + return c.waitForVolumeDetachment(volID, attachID) +} + +func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string) error { + glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID)) + + ticker := time.NewTicker(c.waitSleepTime) + defer ticker.Stop() + + timeout := c.waitSleepTime * 10 + timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable + defer timer.Stop() + + //TODO (vladimirvivien) instead of polling api-server, change to a api-server watch + for { + select { + case <-ticker.C: + glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID)) + attach, err := c.k8s.StorageV1alpha1().VolumeAttachments().Get(attachID, meta.GetOptions{}) + if err != nil { + if apierrs.IsNotFound(err) { + //object deleted or never existed, done + glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle)) + return nil + } + glog.Error(log("detacher.WaitForDetach failed for volume [%s] (will continue to try): %v", volumeHandle, err)) + continue + } + + // driver reports attach error + detachErr := attach.Status.DetachError + if detachErr != nil { + glog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message)) + return errors.New(detachErr.Message) + } + case <-timer.C: + glog.Error(log("detacher.WaitForDetach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID)) + return fmt.Errorf("detachment timed out for volume %v", volumeHandle) + } + } +} + +func (c *csiAttacher) UnmountDevice(deviceMountPath string) error { + glog.V(4).Info(log("detacher.UnmountDevice is not implemented")) + return nil +} + +// getAttachmentName returns csi- +func getAttachmentName(volName, csiDriverName, nodeName string) string { + result := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", volName, csiDriverName, nodeName))) + return fmt.Sprintf("csi-%x", result) +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go b/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go new file mode 100644 index 000000000000..ee1776ce4f83 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_client.go @@ -0,0 +1,244 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package csi + +import ( + "bytes" + "errors" + "fmt" + "net" + "time" + + csipb "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/glog" + grpctx "golang.org/x/net/context" + "google.golang.org/grpc" + api "k8s.io/api/core/v1" +) + +type csiClient interface { + AssertSupportedVersion(ctx grpctx.Context, ver *csipb.Version) error + NodeProbe(ctx grpctx.Context, ver *csipb.Version) error + NodePublishVolume( + ctx grpctx.Context, + volumeid string, + readOnly bool, + targetPath string, + accessMode api.PersistentVolumeAccessMode, + volumeInfo map[string]string, + volumeAttribs map[string]string, + fsType string, + ) error + NodeUnpublishVolume(ctx grpctx.Context, volID string, targetPath string) error +} + +// csiClient encapsulates all csi-plugin methods +type csiDriverClient struct { + network string + addr string + conn *grpc.ClientConn + idClient csipb.IdentityClient + nodeClient csipb.NodeClient + ctrlClient csipb.ControllerClient + versionAsserted bool + versionSupported bool + publishAsserted bool + publishCapable bool +} + +func newCsiDriverClient(network, addr string) *csiDriverClient { + return &csiDriverClient{network: network, addr: addr} +} + +// assertConnection ensures a valid connection has been established +// if not, it creates a new connection and associated clients +func (c *csiDriverClient) assertConnection() error { + if c.conn == nil { + conn, err := grpc.Dial( + c.addr, + grpc.WithInsecure(), + grpc.WithDialer(func(target string, timeout time.Duration) (net.Conn, error) { + return net.Dial(c.network, target) + }), + ) + if err != nil { + return err + } + c.conn = conn + c.idClient = csipb.NewIdentityClient(conn) + c.nodeClient = csipb.NewNodeClient(conn) + c.ctrlClient = csipb.NewControllerClient(conn) + + // set supported version + } + + return nil +} + +// AssertSupportedVersion ensures driver supports specified spec version. +// If version is not supported, the assertion fails with an error. +// This test should be done early during the storage operation flow to avoid +// unnecessary calls later. +func (c *csiDriverClient) AssertSupportedVersion(ctx grpctx.Context, ver *csipb.Version) error { + if c.versionAsserted { + if !c.versionSupported { + return fmt.Errorf("version %s not supported", verToStr(ver)) + } + return nil + } + + if err := c.assertConnection(); err != nil { + c.versionAsserted = false + return err + } + + glog.V(4).Info(log("asserting version supported by driver")) + rsp, err := c.idClient.GetSupportedVersions(ctx, &csipb.GetSupportedVersionsRequest{}) + if err != nil { + c.versionAsserted = false + return err + } + + supported := false + vers := rsp.GetSupportedVersions() + glog.V(4).Info(log("driver reports %d versions supported: %s", len(vers), versToStr(vers))) + + for _, v := range vers { + //TODO (vladimirvivien) use more lenient/heuristic for exact or match of ranges etc + if verToStr(v) == verToStr(ver) { + supported = true + break + } + } + + c.versionAsserted = true + c.versionSupported = supported + + if !supported { + return fmt.Errorf("version %s not supported", verToStr(ver)) + } + + glog.V(4).Info(log("version %s supported", verToStr(ver))) + return nil +} + +func (c *csiDriverClient) NodeProbe(ctx grpctx.Context, ver *csipb.Version) error { + glog.V(4).Info(log("sending NodeProbe rpc call to csi driver: [version %v]", ver)) + req := &csipb.NodeProbeRequest{Version: ver} + _, err := c.nodeClient.NodeProbe(ctx, req) + return err +} + +func (c *csiDriverClient) NodePublishVolume( + ctx grpctx.Context, + volID string, + readOnly bool, + targetPath string, + accessMode api.PersistentVolumeAccessMode, + volumeInfo map[string]string, + volumeAttribs map[string]string, + fsType string, +) error { + glog.V(4).Info(log("calling NodePublishVolume rpc [volid=%s,target_path=%s]", volID, targetPath)) + if volID == "" { + return errors.New("missing volume id") + } + if targetPath == "" { + return errors.New("missing target path") + } + if err := c.assertConnection(); err != nil { + glog.Errorf("%v: failed to assert a connection: %v", csiPluginName, err) + return err + } + + req := &csipb.NodePublishVolumeRequest{ + Version: csiVersion, + VolumeId: volID, + TargetPath: targetPath, + Readonly: readOnly, + PublishVolumeInfo: volumeInfo, + VolumeAttributes: volumeAttribs, + + VolumeCapability: &csipb.VolumeCapability{ + AccessMode: &csipb.VolumeCapability_AccessMode{ + Mode: asCSIAccessMode(accessMode), + }, + AccessType: &csipb.VolumeCapability_Mount{ + Mount: &csipb.VolumeCapability_MountVolume{ + FsType: fsType, + }, + }, + }, + } + + _, err := c.nodeClient.NodePublishVolume(ctx, req) + return err +} + +func (c *csiDriverClient) NodeUnpublishVolume(ctx grpctx.Context, volID string, targetPath string) error { + glog.V(4).Info(log("calling NodeUnpublishVolume rpc: [volid=%s, target_path=%s", volID, targetPath)) + if volID == "" { + return errors.New("missing volume id") + } + if targetPath == "" { + return errors.New("missing target path") + } + if err := c.assertConnection(); err != nil { + glog.Error(log("failed to assert a connection: %v", err)) + return err + } + + req := &csipb.NodeUnpublishVolumeRequest{ + Version: csiVersion, + VolumeId: volID, + TargetPath: targetPath, + } + + _, err := c.nodeClient.NodeUnpublishVolume(ctx, req) + return err +} + +func asCSIAccessMode(am api.PersistentVolumeAccessMode) csipb.VolumeCapability_AccessMode_Mode { + switch am { + case api.ReadWriteOnce: + return csipb.VolumeCapability_AccessMode_SINGLE_NODE_WRITER + case api.ReadOnlyMany: + return csipb.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER + case api.ReadWriteMany: + return csipb.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER + } + return csipb.VolumeCapability_AccessMode_UNKNOWN +} + +func verToStr(ver *csipb.Version) string { + if ver == nil { + return "" + } + return fmt.Sprintf("%d.%d.%d", ver.GetMajor(), ver.GetMinor(), ver.GetPatch()) +} + +func versToStr(vers []*csipb.Version) string { + if vers == nil { + return "" + } + str := bytes.NewBufferString("[") + for _, v := range vers { + str.WriteString(fmt.Sprintf("{%s};", verToStr(v))) + } + str.WriteString("]") + return str.String() +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go b/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go new file mode 100644 index 000000000000..84d10362bdbe --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go @@ -0,0 +1,401 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package csi + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path" + + "github.com/golang/glog" + grpctx "golang.org/x/net/context" + api "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + kstrings "k8s.io/kubernetes/pkg/util/strings" + "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" +) + +//TODO (vladimirvivien) move this in a central loc later +var ( + volDataKey = struct { + specVolID, + volHandle, + driverName, + nodeName, + attachmentID string + }{ + "specVolID", + "volumeHandle", + "driverName", + "nodeName", + "attachmentID", + } +) + +type csiMountMgr struct { + k8s kubernetes.Interface + csiClient csiClient + plugin *csiPlugin + driverName string + volumeID string + specVolumeID string + readOnly bool + spec *volume.Spec + pod *api.Pod + podUID types.UID + options volume.VolumeOptions + volumeInfo map[string]string + volume.MetricsNil +} + +// volume.Volume methods +var _ volume.Volume = &csiMountMgr{} + +func (c *csiMountMgr) GetPath() string { + dir := path.Join(getTargetPath(c.podUID, c.specVolumeID, c.plugin.host), "/mount") + glog.V(4).Info(log("mounter.GetPath generated [%s]", dir)) + return dir +} + +func getTargetPath(uid types.UID, specVolumeID string, host volume.VolumeHost) string { + specVolID := kstrings.EscapeQualifiedNameForDisk(specVolumeID) + return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(csiPluginName), specVolID) +} + +// volume.Mounter methods +var _ volume.Mounter = &csiMountMgr{} + +func (c *csiMountMgr) CanMount() error { + //TODO (vladimirvivien) use this method to probe controller using CSI.NodeProbe() call + // to ensure Node service is ready in the CSI plugin + return nil +} + +func (c *csiMountMgr) SetUp(fsGroup *int64) error { + return c.SetUpAt(c.GetPath(), fsGroup) +} + +func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error { + glog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir)) + + mounted, err := isDirMounted(c.plugin, dir) + if err != nil { + glog.Error(log("mounter.SetUpAt failed while checking mount status for dir [%s]", dir)) + return err + } + + if mounted { + glog.V(4).Info(log("mounter.SetUpAt skipping mount, dir already mounted [%s]", dir)) + return nil + } + + csiSource, err := getCSISourceFromSpec(c.spec) + if err != nil { + glog.Error(log("mounter.SetupAt failed to get CSI persistent source: %v", err)) + return err + } + + ctx, cancel := grpctx.WithTimeout(grpctx.Background(), csiTimeout) + defer cancel() + + csi := c.csiClient + nodeName := string(c.plugin.host.GetNodeName()) + attachID := getAttachmentName(csiSource.VolumeHandle, csiSource.Driver, nodeName) + + // ensure version is supported + if err := csi.AssertSupportedVersion(ctx, csiVersion); err != nil { + glog.Error(log("mounter.SetUpAt failed to assert version: %v", err)) + return err + } + + // probe driver + // TODO (vladimirvivien) move probe call where it is done only when it is needed. + if err := csi.NodeProbe(ctx, csiVersion); err != nil { + glog.Error(log("mounter.SetUpAt failed to probe driver: %v", err)) + return err + } + + // search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName + if c.volumeInfo == nil { + attachment, err := c.k8s.StorageV1alpha1().VolumeAttachments().Get(attachID, meta.GetOptions{}) + if err != nil { + glog.Error(log("mounter.SetupAt failed while getting volume attachment [id=%v]: %v", attachID, err)) + return err + } + + if attachment == nil { + glog.Error(log("unable to find VolumeAttachment [id=%s]", attachID)) + return errors.New("no existing VolumeAttachment found") + } + c.volumeInfo = attachment.Status.AttachmentMetadata + } + + // get volume attributes + // TODO: for alpha vol atttributes are passed via PV.Annotations + // Beta will fix that + attribs, err := getVolAttribsFromSpec(c.spec) + if err != nil { + glog.Error(log("mounter.SetUpAt failed to extract volume attributes from PV annotations: %v", err)) + return err + } + + // create target_dir before call to NodePublish + if err := os.MkdirAll(dir, 0750); err != nil { + glog.Error(log("mouter.SetUpAt failed to create dir %#v: %v", dir, err)) + return err + } + glog.V(4).Info(log("created target path successfully [%s]", dir)) + + // persist volume info data for teardown + volData := map[string]string{ + volDataKey.specVolID: c.spec.Name(), + volDataKey.volHandle: csiSource.VolumeHandle, + volDataKey.driverName: csiSource.Driver, + volDataKey.nodeName: nodeName, + volDataKey.attachmentID: attachID, + } + + if err := saveVolumeData(c.plugin, c.podUID, c.spec.Name(), volData); err != nil { + glog.Error(log("mounter.SetUpAt failed to save volume info data: %v", err)) + if err := removeMountDir(c.plugin, dir); err != nil { + glog.Error(log("mounter.SetUpAt failed to remove mount dir after a saveVolumeData() error [%s]: %v", dir, err)) + return err + } + return err + } + + //TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI + accessMode := api.ReadWriteOnce + if c.spec.PersistentVolume.Spec.AccessModes != nil { + accessMode = c.spec.PersistentVolume.Spec.AccessModes[0] + } + + err = csi.NodePublishVolume( + ctx, + c.volumeID, + c.readOnly, + dir, + accessMode, + c.volumeInfo, + attribs, + "ext4", //TODO needs to be sourced from PV or somewhere else + ) + + if err != nil { + glog.Errorf(log("mounter.SetupAt failed: %v", err)) + if err := removeMountDir(c.plugin, dir); err != nil { + glog.Error(log("mounter.SetuAt failed to remove mount dir after a NodePublish() error [%s]: %v", dir, err)) + return err + } + return err + } + + glog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir)) + return nil +} + +func (c *csiMountMgr) GetAttributes() volume.Attributes { + return volume.Attributes{ + ReadOnly: c.readOnly, + Managed: !c.readOnly, + SupportsSELinux: false, + } +} + +// volume.Unmounter methods +var _ volume.Unmounter = &csiMountMgr{} + +func (c *csiMountMgr) TearDown() error { + return c.TearDownAt(c.GetPath()) +} +func (c *csiMountMgr) TearDownAt(dir string) error { + glog.V(4).Infof(log("Unmounter.TearDown(%s)", dir)) + + // is dir even mounted ? + // TODO (vladimirvivien) this check may not work for an emptyDir or local storage + // see https://github.com/kubernetes/kubernetes/pull/56836#discussion_r155834524 + mounted, err := isDirMounted(c.plugin, dir) + if err != nil { + glog.Error(log("unmounter.Teardown failed while checking mount status for dir [%s]: %v", dir, err)) + return err + } + + if !mounted { + glog.V(4).Info(log("unmounter.Teardown skipping unmout, dir not mounted [%s]", dir)) + return nil + } + + // load volume info from file + dataDir := path.Dir(dir) // dropoff /mount at end + data, err := loadVolumeData(dataDir, volDataFileName) + if err != nil { + glog.Error(log("unmounter.Teardown failed to load volume data file using dir [%s]: %v", dir, err)) + return err + } + + volID := data[volDataKey.volHandle] + driverName := data[volDataKey.driverName] + + if c.csiClient == nil { + addr := fmt.Sprintf(csiAddrTemplate, driverName) + client := newCsiDriverClient("unix", addr) + glog.V(4).Infof(log("unmounter csiClient setup [volume=%v,driver=%v]", volID, driverName)) + c.csiClient = client + } + + ctx, cancel := grpctx.WithTimeout(grpctx.Background(), csiTimeout) + defer cancel() + + csi := c.csiClient + + // TODO make all assertion calls private within the client itself + if err := csi.AssertSupportedVersion(ctx, csiVersion); err != nil { + glog.Errorf(log("mounter.SetUpAt failed to assert version: %v", err)) + return err + } + + if err := csi.NodeUnpublishVolume(ctx, volID, dir); err != nil { + glog.Errorf(log("mounter.SetUpAt failed: %v", err)) + return err + } + + // clean mount point dir + if err := removeMountDir(c.plugin, dir); err != nil { + glog.Error(log("mounter.SetUpAt failed to clean mount dir [%s]: %v", dir, err)) + return err + } + glog.V(4).Infof(log("mounte.SetUpAt successfully unmounted dir [%s]", dir)) + + return nil +} + +// getVolAttribsFromSpec exracts CSI VolumeAttributes information from PV.Annotations +// using key csi.kubernetes.io/volume-attributes. The annotation value is expected +// to be a JSON-encoded object of form {"key0":"val0",...,"keyN":"valN"} +func getVolAttribsFromSpec(spec *volume.Spec) (map[string]string, error) { + if spec == nil { + return nil, errors.New("missing volume spec") + } + annotations := spec.PersistentVolume.GetAnnotations() + if annotations == nil { + return nil, nil // no annotations found + } + jsonAttribs := annotations[csiVolAttribsAnnotationKey] + if jsonAttribs == "" { + return nil, nil // csi annotation not found + } + attribs := map[string]string{} + if err := json.Unmarshal([]byte(jsonAttribs), &attribs); err != nil { + glog.Error(log("error parsing csi PV.Annotation [%s]=%s: %v", csiVolAttribsAnnotationKey, jsonAttribs, err)) + return nil, err + } + return attribs, nil +} + +// saveVolumeData persists parameter data as json file using the locagion +// generated by /var/lib/kubelet/pods//volumes/kubernetes.io~csi//volume_data.json +func saveVolumeData(p *csiPlugin, podUID types.UID, specVolID string, data map[string]string) error { + dir := getTargetPath(podUID, specVolID, p.host) + dataFilePath := path.Join(dir, volDataFileName) + + file, err := os.Create(dataFilePath) + if err != nil { + glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err)) + return err + } + defer file.Close() + if err := json.NewEncoder(file).Encode(data); err != nil { + glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err)) + return err + } + glog.V(4).Info(log("volume data file saved successfully [%s]", dataFilePath)) + return nil +} + +// loadVolumeData uses the directory returned by mounter.GetPath with value +// /var/lib/kubelet/pods//volumes/kubernetes.io~csi//mount. +// The function extracts specVolumeID and uses it to load the json data file from dir +// /var/lib/kubelet/pods//volumes/kubernetes.io~csi//volume_data.json +func loadVolumeData(dir string, fileName string) (map[string]string, error) { + // remove /mount at the end + dataFileName := path.Join(dir, fileName) + glog.V(4).Info(log("loading volume data file [%s]", dataFileName)) + + file, err := os.Open(dataFileName) + if err != nil { + glog.Error(log("failed to open volume data file [%s]: %v", dataFileName, err)) + return nil, err + } + defer file.Close() + data := map[string]string{} + if err := json.NewDecoder(file).Decode(&data); err != nil { + glog.Error(log("failed to parse volume data file [%s]: %v", dataFileName, err)) + return nil, err + } + + return data, nil +} + +// isDirMounted returns the !notMounted result from IsLikelyNotMountPoint check +func isDirMounted(plug *csiPlugin, dir string) (bool, error) { + mounter := plug.host.GetMounter(plug.GetPluginName()) + notMnt, err := mounter.IsLikelyNotMountPoint(dir) + if err != nil && !os.IsNotExist(err) { + glog.Error(log("isDirMounted IsLikelyNotMountPoint test failed for dir [%v]", dir)) + return false, err + } + return !notMnt, nil +} + +// removeMountDir cleans the mount dir when dir is not mounted and removed the volume data file in dir +func removeMountDir(plug *csiPlugin, mountPath string) error { + glog.V(4).Info(log("removing mount path [%s]", mountPath)) + if pathExists, pathErr := util.PathExists(mountPath); pathErr != nil { + glog.Error(log("failed while checking mount path stat [%s]", pathErr)) + return pathErr + } else if !pathExists { + glog.Warning(log("skipping mount dir removal, path does not exist [%v]", mountPath)) + return nil + } + + mounter := plug.host.GetMounter(plug.GetPluginName()) + notMnt, err := mounter.IsLikelyNotMountPoint(mountPath) + if err != nil { + glog.Error(log("mount dir removal failed [%s]: %v", mountPath, err)) + return err + } + if notMnt { + glog.V(4).Info(log("dir not mounted, deleting it [%s]", mountPath)) + if err := os.Remove(mountPath); err != nil && !os.IsNotExist(err) { + glog.Error(log("failed to remove dir [%s]: %v", mountPath, err)) + return err + } + // remove volume data file as well + dataFile := path.Join(path.Dir(mountPath), volDataFileName) + glog.V(4).Info(log("also deleting volume info data file [%s]", dataFile)) + if err := os.Remove(dataFile); err != nil && !os.IsNotExist(err) { + glog.Error(log("failed to delete volume data file [%s]: %v", dataFile, err)) + return err + } + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go b/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go new file mode 100644 index 000000000000..be40992df049 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go @@ -0,0 +1,253 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package csi + +import ( + "errors" + "fmt" + "regexp" + "time" + + csipb "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/glog" + api "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/volume" +) + +const ( + csiPluginName = "kubernetes.io/csi" + csiVolAttribsAnnotationKey = "csi.volume.kubernetes.io/volume-attributes" + + // TODO (vladimirvivien) implement a more dynamic way to discover + // the unix domain socket path for each installed csi driver. + // TODO (vladimirvivien) would be nice to name socket with a .sock extension + // for consistency. + csiAddrTemplate = "/var/lib/kubelet/plugins/%v/csi.sock" + csiTimeout = 15 * time.Second + volNameSep = "^" + volDataFileName = "vol_data.json" +) + +var ( + // csiVersion supported csi version + csiVersion = &csipb.Version{Major: 0, Minor: 1, Patch: 0} + driverNameRexp = regexp.MustCompile(`^[A-Za-z]+(\.?-?_?[A-Za-z0-9-])+$`) +) + +type csiPlugin struct { + host volume.VolumeHost +} + +// ProbeVolumePlugins returns implemented plugins +func ProbeVolumePlugins() []volume.VolumePlugin { + p := &csiPlugin{ + host: nil, + } + return []volume.VolumePlugin{p} +} + +// volume.VolumePlugin methods +var _ volume.VolumePlugin = &csiPlugin{} + +func (p *csiPlugin) Init(host volume.VolumeHost) error { + glog.Info(log("plugin initializing...")) + p.host = host + return nil +} + +func (p *csiPlugin) GetPluginName() string { + return csiPluginName +} + +// GetvolumeName returns a concatenated string of CSIVolumeSource.DriverCSIVolumeSource.VolumeHandle +// That string value is used in Detach() to extract driver name and volumeName. +func (p *csiPlugin) GetVolumeName(spec *volume.Spec) (string, error) { + csi, err := getCSISourceFromSpec(spec) + if err != nil { + glog.Error(log("plugin.GetVolumeName failed to extract volume source from spec: %v", err)) + return "", err + } + + //TODO (vladimirvivien) this validation should be done at the API validation check + if !isDriverNameValid(csi.Driver) { + glog.Error(log("plugin.GetVolumeName failed to create volume name: invalid csi driver name %s", csi.Driver)) + return "", errors.New("invalid csi driver name") + } + + // return driverNamevolumeHandle + return fmt.Sprintf("%s%s%s", csi.Driver, volNameSep, csi.VolumeHandle), nil +} + +func (p *csiPlugin) CanSupport(spec *volume.Spec) bool { + // TODO (vladimirvivien) CanSupport should also take into account + // the availability/registration of specified Driver in the volume source + return spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CSI != nil +} + +func (p *csiPlugin) RequiresRemount() bool { + return false +} + +func (p *csiPlugin) NewMounter( + spec *volume.Spec, + pod *api.Pod, + _ volume.VolumeOptions) (volume.Mounter, error) { + pvSource, err := getCSISourceFromSpec(spec) + if err != nil { + return nil, err + } + + // TODO (vladimirvivien) consider moving this check in API validation + // check Driver name to conform to CSI spec + if !isDriverNameValid(pvSource.Driver) { + glog.Error(log("driver name does not conform to CSI spec: %s", pvSource.Driver)) + return nil, errors.New("driver name is invalid") + } + + // before it is used in any paths such as socket etc + addr := fmt.Sprintf(csiAddrTemplate, pvSource.Driver) + glog.V(4).Infof(log("setting up mounter for [volume=%v,driver=%v]", pvSource.VolumeHandle, pvSource.Driver)) + client := newCsiDriverClient("unix", addr) + + k8s := p.host.GetKubeClient() + if k8s == nil { + glog.Error(log("failed to get a kubernetes client")) + return nil, errors.New("failed to get a Kubernetes client") + } + + mounter := &csiMountMgr{ + plugin: p, + k8s: k8s, + spec: spec, + pod: pod, + podUID: pod.UID, + driverName: pvSource.Driver, + volumeID: pvSource.VolumeHandle, + specVolumeID: spec.Name(), + csiClient: client, + } + return mounter, nil +} + +func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) { + glog.V(4).Infof(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID)) + unmounter := &csiMountMgr{ + plugin: p, + podUID: podUID, + specVolumeID: specName, + } + return unmounter, nil +} + +func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { + glog.V(4).Info(log("plugin.ConstructVolumeSpec [pv.Name=%v, path=%v]", volumeName, mountPath)) + + volData, err := loadVolumeData(mountPath, volDataFileName) + if err != nil { + glog.Error(log("plugin.ConstructVolumeSpec failed loading volume data using [%s]: %v", mountPath, err)) + return nil, err + } + + glog.V(4).Info(log("plugin.ConstructVolumeSpec extracted [%#v]", volData)) + + pv := &api.PersistentVolume{ + ObjectMeta: meta.ObjectMeta{ + Name: volData[volDataKey.specVolID], + }, + Spec: api.PersistentVolumeSpec{ + PersistentVolumeSource: api.PersistentVolumeSource{ + CSI: &api.CSIPersistentVolumeSource{ + Driver: volData[volDataKey.driverName], + VolumeHandle: volData[volDataKey.volHandle], + }, + }, + }, + } + + return volume.NewSpecFromPersistentVolume(pv, false), nil +} + +func (p *csiPlugin) SupportsMountOption() bool { + // TODO (vladimirvivien) use CSI VolumeCapability.MountVolume.mount_flags + // to probe for the result for this method:w + return false +} + +func (p *csiPlugin) SupportsBulkVolumeVerification() bool { + return false +} + +// volume.AttachableVolumePlugin methods +var _ volume.AttachableVolumePlugin = &csiPlugin{} + +func (p *csiPlugin) NewAttacher() (volume.Attacher, error) { + k8s := p.host.GetKubeClient() + if k8s == nil { + glog.Error(log("unable to get kubernetes client from host")) + return nil, errors.New("unable to get Kubernetes client") + } + + return &csiAttacher{ + plugin: p, + k8s: k8s, + waitSleepTime: 1 * time.Second, + }, nil +} + +func (p *csiPlugin) NewDetacher() (volume.Detacher, error) { + k8s := p.host.GetKubeClient() + if k8s == nil { + glog.Error(log("unable to get kubernetes client from host")) + return nil, errors.New("unable to get Kubernetes client") + } + + return &csiAttacher{ + plugin: p, + k8s: k8s, + waitSleepTime: 1 * time.Second, + }, nil +} + +func (p *csiPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) { + m := p.host.GetMounter(p.GetPluginName()) + return mount.GetMountRefs(m, deviceMountPath) +} + +func getCSISourceFromSpec(spec *volume.Spec) (*api.CSIPersistentVolumeSource, error) { + if spec.PersistentVolume != nil && + spec.PersistentVolume.Spec.CSI != nil { + return spec.PersistentVolume.Spec.CSI, nil + } + + return nil, fmt.Errorf("CSIPersistentVolumeSource not defined in spec") +} + +// log prepends log string with `kubernetes.io/csi` +func log(msg string, parts ...interface{}) string { + return fmt.Sprintf(fmt.Sprintf("%s: %s", csiPluginName, msg), parts...) +} + +// isDriverNameValid validates the driverName using CSI spec +func isDriverNameValid(name string) bool { + if len(name) == 0 || len(name) > 63 { + return false + } + return driverNameRexp.MatchString(name) +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/BUILD index 36ca1489344e..d84c809751a6 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/BUILD @@ -9,8 +9,8 @@ load( go_library( name = "go_default_library", srcs = ["downwardapi.go"], + importpath = "k8s.io/kubernetes/pkg/volume/downwardapi", deps = [ - "//pkg/api:go_default_library", "//pkg/api/v1/resource:go_default_library", "//pkg/fieldpath:go_default_library", "//pkg/util/strings:go_default_library", @@ -26,6 +26,7 @@ go_library( go_test( name = "go_default_test", srcs = ["downwardapi_test.go"], + importpath = "k8s.io/kubernetes/pkg/volume/downwardapi", library = ":go_default_library", deps = [ "//pkg/fieldpath:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/OWNERS index 72968b277dad..baaff6ff460f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/OWNERS @@ -7,13 +7,7 @@ reviewers: - ivan4th - rata - sjenning -- thockin -- smarterclayton -- brendandburns -- derekwaynecarr -- pmorie - saad-ali -- justinsb - jsafrane - rootfs - jingxu97 diff --git a/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go b/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go index 008e10f6b495..0b920b05054d 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go @@ -25,7 +25,6 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1/resource" "k8s.io/kubernetes/pkg/fieldpath" utilstrings "k8s.io/kubernetes/pkg/util/strings" @@ -246,7 +245,7 @@ func CollectData(items []v1.DownwardAPIVolumeFile, pod *v1.Pod, host volume.Volu nodeAllocatable, err := host.GetNodeAllocatable() if err != nil { errlist = append(errlist, err) - } else if values, err := resource.ExtractResourceValueByContainerNameAndNodeAllocatable(api.Scheme, fileInfo.ResourceFieldRef, pod, containerName, nodeAllocatable); err != nil { + } else if values, err := resource.ExtractResourceValueByContainerNameAndNodeAllocatable(fileInfo.ResourceFieldRef, pod, containerName, nodeAllocatable); err != nil { glog.Errorf("Unable to extract field %s: %s", fileInfo.ResourceFieldRef.Resource, err.Error()) errlist = append(errlist, err) } else { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/BUILD index 3dfe13136479..c20dd4f39f50 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/BUILD @@ -18,8 +18,9 @@ go_library( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/volume/empty_dir", deps = [ - "//pkg/api/v1/helper:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", @@ -45,6 +46,7 @@ go_test( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/volume/empty_dir", library = ":go_default_library", deps = select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ diff --git a/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/OWNERS index 72968b277dad..baaff6ff460f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/OWNERS @@ -7,13 +7,7 @@ reviewers: - ivan4th - rata - sjenning -- thockin -- smarterclayton -- brendandburns -- derekwaynecarr -- pmorie - saad-ali -- justinsb - jsafrane - rootfs - jingxu97 diff --git a/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir.go b/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir.go index 68cd56727b84..ef7807601eec 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/empty_dir/empty_dir.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/util/mount" stringsutil "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -227,7 +227,7 @@ func (ed *emptyDir) SetUpAt(dir string, fsGroup *int64) error { err = ed.setupDir(dir) case v1.StorageMediumMemory: err = ed.setupTmpfs(dir) - case v1.StorageMediumHugepages: + case v1.StorageMediumHugePages: err = ed.setupHugepages(dir) default: err = fmt.Errorf("unknown storage medium %q", ed.medium) @@ -392,7 +392,7 @@ func (ed *emptyDir) TearDownAt(dir string) error { ed.medium = v1.StorageMediumMemory return ed.teardownTmpfsOrHugetlbfs(dir) } else if medium == mediumHugepages { - ed.medium = v1.StorageMediumHugepages + ed.medium = v1.StorageMediumHugePages return ed.teardownTmpfsOrHugetlbfs(dir) } } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/fc/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/fc/BUILD index 5bc5321da7d9..b8d53706fb1a 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/fc/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/fc/BUILD @@ -15,7 +15,9 @@ go_library( "fc.go", "fc_util.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/fc", deps = [ + "//pkg/features:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", @@ -23,7 +25,9 @@ go_library( "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) @@ -33,6 +37,7 @@ go_test( "fc_test.go", "fc_util_test.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/fc", library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/fc/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/fc/OWNERS index 0f6062cc1716..5c95f6dfc500 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/fc/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/fc/OWNERS @@ -2,13 +2,7 @@ approvers: - rootfs - saad-ali reviewers: -- thockin -- smarterclayton -- brendandburns -- derekwaynecarr -- pmorie - saad-ali -- justinsb - jsafrane - rootfs - jingxu97 diff --git a/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go b/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go index 4a8121faee89..a0714dab36bd 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/fc/attacher.go @@ -26,6 +26,8 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -135,7 +137,7 @@ func (plugin *fcPlugin) NewDetacher() (volume.Detacher, error) { }, nil } -func (detacher *fcDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error { +func (detacher *fcDetacher) Detach(volumeName string, nodeName types.NodeName) error { return nil } @@ -176,17 +178,32 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost) (*fcDiskMoun } else { return nil, fmt.Errorf("fc: no fc disk information found. failed to make a new mounter") } - - return &fcDiskMounter{ - fcDisk: &fcDisk{ - plugin: &fcPlugin{ - host: host, - }, - wwns: fc.TargetWWNs, - lun: lun, - wwids: wwids, - io: &osIOHandler{}, + fcDisk := &fcDisk{ + plugin: &fcPlugin{ + host: host, }, + wwns: fc.TargetWWNs, + lun: lun, + wwids: wwids, + io: &osIOHandler{}, + } + // TODO: remove feature gate check after no longer needed + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + volumeMode, err := volumehelper.GetVolumeMode(spec) + if err != nil { + return nil, err + } + glog.V(5).Infof("fc: volumeSpecToMounter volumeMode %s", volumeMode) + return &fcDiskMounter{ + fcDisk: fcDisk, + fsType: fc.FSType, + volumeMode: volumeMode, + readOnly: readOnly, + mounter: volumehelper.NewSafeFormatAndMountFromHost(fcPluginName, host), + }, nil + } + return &fcDiskMounter{ + fcDisk: fcDisk, fsType: fc.FSType, readOnly: readOnly, mounter: volumehelper.NewSafeFormatAndMountFromHost(fcPluginName, host), diff --git a/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go b/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go index 33a0c422cfff..efd1881808cc 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go @@ -27,6 +27,7 @@ import ( // Abstract interface to disk operations. type diskManager interface { MakeGlobalPDName(disk fcDisk) string + MakeGlobalVDPDName(disk fcDisk) string // Attaches the disk to the kubelet's host machine. AttachDisk(b fcDiskMounter) (string, error) // Detaches the disk from the kubelet's host machine. @@ -89,33 +90,3 @@ func diskSetUp(manager diskManager, b fcDiskMounter, volPath string, mounter mou return nil } - -// utility to tear down a disk based filesystem -func diskTearDown(manager diskManager, c fcDiskUnmounter, volPath string, mounter mount.Interface) error { - noMnt, err := mounter.IsLikelyNotMountPoint(volPath) - if err != nil { - glog.Errorf("cannot validate mountpoint %s", volPath) - return err - } - if noMnt { - return os.Remove(volPath) - } - - if err := mounter.Unmount(volPath); err != nil { - glog.Errorf("failed to unmount %s", volPath) - return err - } - - noMnt, mntErr := mounter.IsLikelyNotMountPoint(volPath) - if mntErr != nil { - glog.Errorf("isMountpoint check failed: %v", mntErr) - return err - } - if noMnt { - if err := os.Remove(volPath); err != nil { - return err - } - } - return nil - -} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go b/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go index 2b4e387c25bd..f6c2841bda5f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go @@ -23,11 +23,15 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) // This is the primary entrypoint for volume plugins. @@ -41,6 +45,7 @@ type fcPlugin struct { var _ volume.VolumePlugin = &fcPlugin{} var _ volume.PersistentVolumePlugin = &fcPlugin{} +var _ volume.BlockVolumePlugin = &fcPlugin{} const ( fcPluginName = "kubernetes.io/fc" @@ -115,29 +120,75 @@ func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, return nil, err } - var lun string - var wwids []string - if fc.Lun != nil && len(fc.TargetWWNs) != 0 { - lun = strconv.Itoa(int(*fc.Lun)) - } else if len(fc.WWIDs) != 0 { - for _, wwid := range fc.WWIDs { - wwids = append(wwids, strings.Replace(wwid, " ", "_", -1)) - } - } else { + wwns, lun, wwids, err := getWwnsLunWwids(fc) + if err != nil { return nil, fmt.Errorf("fc: no fc disk information found. failed to make a new mounter") } - + fcDisk := &fcDisk{ + podUID: podUID, + volName: spec.Name(), + wwns: wwns, + lun: lun, + wwids: wwids, + manager: manager, + io: &osIOHandler{}, + plugin: plugin, + } + // TODO: remove feature gate check after no longer needed + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + volumeMode, err := volumehelper.GetVolumeMode(spec) + if err != nil { + return nil, err + } + glog.V(5).Infof("fc: newMounterInternal volumeMode %s", volumeMode) + return &fcDiskMounter{ + fcDisk: fcDisk, + fsType: fc.FSType, + volumeMode: volumeMode, + readOnly: readOnly, + mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, + }, nil + } return &fcDiskMounter{ + fcDisk: fcDisk, + fsType: fc.FSType, + readOnly: readOnly, + mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, + }, nil + +} + +func (plugin *fcPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) { + // If this called via GenerateUnmapDeviceFunc(), pod is nil. + // Pass empty string as dummy uid since uid isn't used in the case. + var uid types.UID + if pod != nil { + uid = pod.UID + } + return plugin.newBlockVolumeMapperInternal(spec, uid, &FCUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName())) +} + +func (plugin *fcPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.BlockVolumeMapper, error) { + fc, readOnly, err := getVolumeSource(spec) + if err != nil { + return nil, err + } + + wwns, lun, wwids, err := getWwnsLunWwids(fc) + if err != nil { + return nil, fmt.Errorf("fc: no fc disk information found. failed to make a new mapper") + } + + return &fcDiskMapper{ fcDisk: &fcDisk{ podUID: podUID, volName: spec.Name(), - wwns: fc.TargetWWNs, + wwns: wwns, lun: lun, wwids: wwids, manager: manager, io: &osIOHandler{}, plugin: plugin}, - fsType: fc.FSType, readOnly: readOnly, mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, }, nil @@ -161,6 +212,22 @@ func (plugin *fcPlugin) newUnmounterInternal(volName string, podUID types.UID, m }, nil } +func (plugin *fcPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) { + return plugin.newUnmapperInternal(volName, podUID, &FCUtil{}) +} + +func (plugin *fcPlugin) newUnmapperInternal(volName string, podUID types.UID, manager diskManager) (volume.BlockVolumeUnmapper, error) { + return &fcDiskUnmapper{ + fcDisk: &fcDisk{ + podUID: podUID, + volName: volName, + manager: manager, + plugin: plugin, + io: &osIOHandler{}, + }, + }, nil +} + func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { fcVolume := &v1.Volume{ Name: volumeName, @@ -171,6 +238,55 @@ func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volu return volume.NewSpecFromVolume(fcVolume), nil } +// ConstructBlockVolumeSpec creates a new volume.Spec with following steps. +// - Searchs a file whose name is {pod uuid} under volume plugin directory. +// - If a file is found, then retreives volumePluginDependentPath from globalMapPathUUID. +// - Once volumePluginDependentPath is obtained, store volume information to VolumeSource +// examples: +// mapPath: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} +// globalMapPathUUID : plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} +func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) { + pluginDir := plugin.host.GetVolumeDevicePluginDir(fcPluginName) + blkutil := util.NewBlockVolumePathHandler() + globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID) + if err != nil { + return nil, err + } + glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) + + // Retreive volumePluginDependentPath from globalMapPathUUID + // globalMapPathUUID examples: + // wwn+lun: plugins/kubernetes.io/fc/volumeDevices/50060e801049cfd1-lun-0/{pod uuid} + // wwid: plugins/kubernetes.io/fc/volumeDevices/3600508b400105e210000900000490000/{pod uuid} + arr := strings.Split(globalMapPathUUID, "/") + if len(arr) < 2 { + return nil, fmt.Errorf("Fail to retreive volume plugin information from globalMapPathUUID: %v", globalMapPathUUID) + } + l := len(arr) - 2 + volumeInfo := arr[l] + + // Create volume from wwn+lun or wwid + var fcPV *v1.PersistentVolume + if strings.Contains(volumeInfo, "-lun-") { + wwnLun := strings.Split(volumeInfo, "-lun-") + lun, err := strconv.Atoi(wwnLun[1]) + if err != nil { + return nil, err + } + lun32 := int32(lun) + fcPV = createPersistentVolumeFromFCVolumeSource(volumeName, + v1.FCVolumeSource{TargetWWNs: []string{wwnLun[0]}, Lun: &lun32}) + glog.V(5).Infof("ConstructBlockVolumeSpec: TargetWWNs: %v, Lun: %v", + fcPV.Spec.PersistentVolumeSource.FC.TargetWWNs, + fcPV.Spec.PersistentVolumeSource.FC.Lun) + } else { + fcPV = createPersistentVolumeFromFCVolumeSource(volumeName, + v1.FCVolumeSource{WWIDs: []string{volumeInfo}}) + glog.V(5).Infof("ConstructBlockVolumeSpec: WWIDs: %v", fcPV.Spec.PersistentVolumeSource.FC.WWIDs) + } + return volume.NewSpecFromPersistentVolume(fcPV, false), nil +} + type fcDisk struct { volName string podUID types.UID @@ -192,11 +308,26 @@ func (fc *fcDisk) GetPath() string { return fc.plugin.host.GetPodVolumeDir(fc.podUID, utilstrings.EscapeQualifiedNameForDisk(name), fc.volName) } +func (fc *fcDisk) fcGlobalMapPath(spec *volume.Spec) (string, error) { + mounter, err := volumeSpecToMounter(spec, fc.plugin.host) + if err != nil { + glog.Warningf("failed to get fc mounter: %v", err) + return "", err + } + return fc.manager.MakeGlobalVDPDName(*mounter.fcDisk), nil +} + +func (fc *fcDisk) fcPodDeviceMapPath() (string, string) { + name := fcPluginName + return fc.plugin.host.GetPodVolumeDeviceDir(fc.podUID, utilstrings.EscapeQualifiedNameForDisk(name)), fc.volName +} + type fcDiskMounter struct { *fcDisk - readOnly bool - fsType string - mounter *mount.SafeFormatAndMount + readOnly bool + fsType string + volumeMode v1.PersistentVolumeMode + mounter *mount.SafeFormatAndMount } var _ volume.Mounter = &fcDiskMounter{} @@ -243,16 +374,55 @@ func (c *fcDiskUnmounter) TearDown() error { } func (c *fcDiskUnmounter) TearDownAt(dir string) error { - if pathExists, pathErr := util.PathExists(dir); pathErr != nil { - return fmt.Errorf("Error checking if path exists: %v", pathErr) - } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) - return nil + return util.UnmountPath(dir, c.mounter) +} + +// Block Volumes Support +type fcDiskMapper struct { + *fcDisk + readOnly bool + mounter mount.Interface +} + +var _ volume.BlockVolumeMapper = &fcDiskMapper{} + +func (b *fcDiskMapper) SetUpDevice() (string, error) { + return "", nil +} + +type fcDiskUnmapper struct { + *fcDisk +} + +var _ volume.BlockVolumeUnmapper = &fcDiskUnmapper{} + +func (c *fcDiskUnmapper) TearDownDevice(_, devicePath string) error { + // Remove scsi device from the node. + if !strings.HasPrefix(devicePath, "/dev/") { + return fmt.Errorf("fc detach disk: invalid device name: %s", devicePath) } - return diskTearDown(c.manager, *c, dir, c.mounter) + arr := strings.Split(devicePath, "/") + dev := arr[len(arr)-1] + removeFromScsiSubsystem(dev, c.io) + return nil +} + +// GetGlobalMapPath returns global map path and error +// path: plugins/kubernetes.io/{PluginName}/volumeDevices/{WWID}/{podUid} +func (fc *fcDisk) GetGlobalMapPath(spec *volume.Spec) (string, error) { + return fc.fcGlobalMapPath(spec) +} + +// GetPodDeviceMapPath returns pod device map path and volume name +// path: pods/{podUid}/volumeDevices/kubernetes.io~fc +// volumeName: pv0001 +func (fc *fcDisk) GetPodDeviceMapPath() (string, string) { + return fc.fcPodDeviceMapPath() } func getVolumeSource(spec *volume.Spec) (*v1.FCVolumeSource, bool, error) { + // fc volumes used directly in a pod have a ReadOnly flag set by the pod author. + // fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV if spec.Volume != nil && spec.Volume.FC != nil { return spec.Volume.FC, spec.Volume.FC.ReadOnly, nil } else if spec.PersistentVolume != nil && @@ -262,3 +432,34 @@ func getVolumeSource(spec *volume.Spec) (*v1.FCVolumeSource, bool, error) { return nil, false, fmt.Errorf("Spec does not reference a FibreChannel volume type") } + +func createPersistentVolumeFromFCVolumeSource(volumeName string, fc v1.FCVolumeSource) *v1.PersistentVolume { + block := v1.PersistentVolumeBlock + return &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeName, + }, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + FC: &fc, + }, + VolumeMode: &block, + }, + } +} + +func getWwnsLunWwids(fc *v1.FCVolumeSource) ([]string, string, []string, error) { + var lun string + var wwids []string + if fc.Lun != nil && len(fc.TargetWWNs) != 0 { + lun = strconv.Itoa(int(*fc.Lun)) + return fc.TargetWWNs, lun, wwids, nil + } + if len(fc.WWIDs) != 0 { + for _, wwid := range fc.WWIDs { + wwids = append(wwids, strings.Replace(wwid, " ", "_", -1)) + } + return fc.TargetWWNs, lun, wwids, nil + } + return nil, "", nil, fmt.Errorf("fc: no fc disk information found") +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go b/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go index 050785c42216..83d930220b5b 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go @@ -25,6 +25,9 @@ import ( "strings" "github.com/golang/glog" + "k8s.io/api/core/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" ) @@ -143,7 +146,7 @@ func scsiHostRescan(io ioHandler) { } } -// make a directory like /var/lib/kubelet/plugins/kubernetes.io/pod/fc/target-lun-0 +// make a directory like /var/lib/kubelet/plugins/kubernetes.io/fc/target-lun-0 func makePDNameInternal(host volume.VolumeHost, wwns []string, lun string, wwids []string) string { if len(wwns) != 0 { return path.Join(host.GetPluginDir(fcPluginName), wwns[0]+"-lun-"+lun) @@ -152,13 +155,27 @@ func makePDNameInternal(host volume.VolumeHost, wwns []string, lun string, wwids } } +// make a directory like /var/lib/kubelet/plugins/kubernetes.io/fc/volumeDevices/target-lun-0 +func makeVDPDNameInternal(host volume.VolumeHost, wwns []string, lun string, wwids []string) string { + if len(wwns) != 0 { + return path.Join(host.GetVolumeDevicePluginDir(fcPluginName), wwns[0]+"-lun-"+lun) + } else { + return path.Join(host.GetVolumeDevicePluginDir(fcPluginName), wwids[0]) + } +} + type FCUtil struct{} func (util *FCUtil) MakeGlobalPDName(fc fcDisk) string { return makePDNameInternal(fc.plugin.host, fc.wwns, fc.lun, fc.wwids) } -func searchDisk(b fcDiskMounter) (string, string) { +// Global volume device plugin dir +func (util *FCUtil) MakeGlobalVDPDName(fc fcDisk) string { + return makeVDPDNameInternal(fc.plugin.host, fc.wwns, fc.lun, fc.wwids) +} + +func searchDisk(b fcDiskMounter) (string, error) { var diskIds []string var disk string var dm string @@ -198,14 +215,6 @@ func searchDisk(b fcDiskMounter) (string, string) { scsiHostRescan(io) rescaned = true } - return disk, dm -} - -func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) { - devicePath := "" - var disk, dm string - - disk, dm = searchDisk(b) // if no disk matches input wwn and lun, exit if disk == "" && dm == "" { return "", fmt.Errorf("no fc disk found") @@ -213,10 +222,26 @@ func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) { // if multipath devicemapper device is found, use it; otherwise use raw disk if dm != "" { - devicePath = dm - } else { - devicePath = disk + return dm, nil + } + return disk, nil +} + +func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) { + devicePath, err := searchDisk(b) + if err != nil { + return "", err } + // TODO: remove feature gate check after no longer needed + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + // If the volumeMode is 'Block', plugin don't have to format the volume. + // The globalPDPath will be created by operationexecutor. Just return devicePath here. + glog.V(5).Infof("fc: AttachDisk volumeMode: %s, devicePath: %s", b.volumeMode, devicePath) + if b.volumeMode == v1.PersistentVolumeBlock { + return devicePath, nil + } + } + // mount it globalPDPath := util.MakeGlobalPDName(*b.fcDisk) if err := os.MkdirAll(globalPDPath, 0750); err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/BUILD index 87062cbb9300..b55261ee62bb 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/BUILD @@ -25,6 +25,7 @@ go_library( "util.go", "volume.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/flexvolume", deps = [ "//pkg/util/filesystem:go_default_library", "//pkg/util/mount:go_default_library", @@ -53,6 +54,7 @@ go_test( "probe_test.go", "unmounter_test.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/flexvolume", library = ":go_default_library", deps = [ "//pkg/util/filesystem:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/OWNERS index 975407b4199c..3342c22c4c49 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/OWNERS @@ -6,13 +6,7 @@ reviewers: - ivan4th - rata - sjenning -- thockin -- smarterclayton -- brendandburns -- derekwaynecarr -- pmorie - saad-ali -- justinsb - jsafrane - rootfs - jingxu97 diff --git a/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher.go b/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher.go index c2efcffa1c37..2464b3f2392e 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/attacher.go @@ -113,6 +113,8 @@ func (a *flexVolumeAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName t volumesAttachedCheck[spec] = false glog.V(2).Infof("VolumesAreAttached: check volume (%q) is no longer attached", spec.Name()) } + } else { + return nil, err } } return volumesAttachedCheck, nil diff --git a/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher-defaults.go b/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher-defaults.go index 17c85a365320..181f87bde487 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher-defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher-defaults.go @@ -27,8 +27,8 @@ import ( type detacherDefaults flexVolumeDetacher // Detach is part of the volume.Detacher interface. -func (d *detacherDefaults) Detach(deviceName string, hostName types.NodeName) error { - glog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default Detach for device ", deviceName, ", host ", hostName) +func (d *detacherDefaults) Detach(volumeName string, hostName types.NodeName) error { + glog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default Detach for volume ", volumeName, ", host ", hostName) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher.go b/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher.go index 4acbd03eaef8..d2aba85b93fc 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/detacher.go @@ -34,15 +34,15 @@ type flexVolumeDetacher struct { var _ volume.Detacher = &flexVolumeDetacher{} // Detach is part of the volume.Detacher interface. -func (d *flexVolumeDetacher) Detach(pvOrVolumeName string, hostName types.NodeName) error { +func (d *flexVolumeDetacher) Detach(volumeName string, hostName types.NodeName) error { call := d.plugin.NewDriverCall(detachCmd) - call.Append(pvOrVolumeName) + call.Append(volumeName) call.Append(string(hostName)) _, err := call.Run() if isCmdNotSupportedErr(err) { - return (*detacherDefaults)(d).Detach(pvOrVolumeName, hostName) + return (*detacherDefaults)(d).Detach(volumeName, hostName) } return err } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter.go b/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter.go index 94b2ffd74886..5085293cc174 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter.go @@ -17,6 +17,7 @@ limitations under the License. package flexvolume import ( + "os" "strconv" "k8s.io/kubernetes/pkg/volume" @@ -70,6 +71,7 @@ func (f *flexVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { // Extract secret and pass it as options. if err := addSecretsToOptions(extraOptions, f.spec, f.podNamespace, f.driverName, f.plugin.host); err != nil { + os.Remove(dir) return err } @@ -86,6 +88,7 @@ func (f *flexVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } if err != nil { + os.Remove(dir) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/plugin.go b/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/plugin.go index ed351e05eada..ba46e2c52d12 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/plugin.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/plugin.go @@ -32,7 +32,10 @@ import ( "k8s.io/utils/exec" ) -const flexVolumePluginName = "kubernetes.io/flexvolume" +const ( + flexVolumePluginName = "kubernetes.io/flexvolume" + flexVolumePluginNamePrefix = "flexvolume-" +) // FlexVolumePlugin object. type flexVolumePlugin struct { @@ -102,7 +105,7 @@ func (plugin *flexVolumePlugin) getExecutable() string { // Name is part of the volume.VolumePlugin interface. func (plugin *flexVolumePlugin) GetPluginName() string { - return "flexvolume-" + plugin.driverName + return flexVolumePluginNamePrefix + plugin.driverName } // GetVolumeName is part of the volume.VolumePlugin interface. diff --git a/vendor/k8s.io/kubernetes/pkg/volume/flocker/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/flocker/BUILD index 468438111e93..3d21d92dc91f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/flocker/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/flocker/BUILD @@ -14,6 +14,7 @@ go_library( "flocker_util.go", "flocker_volume.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/flocker", deps = [ "//pkg/util/env:go_default_library", "//pkg/util/mount:go_default_library", @@ -38,6 +39,7 @@ go_test( "flocker_util_test.go", "flocker_volume_test.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/flocker", library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/flocker/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/flocker/OWNERS index 35cbcb214a6f..f9f7cef504ed 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/flocker/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/flocker/OWNERS @@ -4,13 +4,7 @@ approvers: reviewers: - agonzalezro - simonswine -- thockin -- smarterclayton -- brendandburns -- derekwaynecarr -- pmorie - saad-ali -- justinsb - jsafrane - rootfs - jingxu97 diff --git a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/BUILD index eb65ce354016..8c83b4d0d70a 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/BUILD @@ -14,6 +14,7 @@ go_library( "gce_pd.go", "gce_util.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/gce_pd", deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/gce:go_default_library", @@ -38,6 +39,7 @@ go_test( "attacher_test.go", "gce_pd_test.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/gce_pd", library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", @@ -45,6 +47,7 @@ go_test( "//pkg/volume/testing:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/OWNERS index 1f138a152913..5fa7bd72c200 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/OWNERS @@ -2,13 +2,11 @@ approvers: - saad-ali - thockin reviewers: -- thockin -- smarterclayton -- pmorie - saad-ali -- justinsb - jsafrane -- markturansky - jingxu97 - matchstick - gnufied +- msau42 +- verult +- davidz627 diff --git a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher.go b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher.go index 588d9aabe76d..6ee1fd6d2305 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher.go @@ -247,8 +247,8 @@ func (plugin *gcePersistentDiskPlugin) NewDetacher() (volume.Detacher, error) { // Callers are responsible for retrying on failure. // Callers are responsible for thread safety between concurrent attach and detach // operations. -func (detacher *gcePersistentDiskDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error { - pdName := path.Base(deviceMountPath) +func (detacher *gcePersistentDiskDetacher) Detach(volumeName string, nodeName types.NodeName) error { + pdName := path.Base(volumeName) attached, err := detacher.gceDisks.DiskIsAttached(pdName, nodeName) if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go index 9ddeb95787e0..5576f1fabea7 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go @@ -47,6 +47,7 @@ var _ volume.VolumePlugin = &gcePersistentDiskPlugin{} var _ volume.PersistentVolumePlugin = &gcePersistentDiskPlugin{} var _ volume.DeletableVolumePlugin = &gcePersistentDiskPlugin{} var _ volume.ProvisionableVolumePlugin = &gcePersistentDiskPlugin{} +var _ volume.ExpandableVolumePlugin = &gcePersistentDiskPlugin{} const ( gcePersistentDiskPluginName = "kubernetes.io/gce-pd" @@ -190,6 +191,28 @@ func (plugin *gcePersistentDiskPlugin) newProvisionerInternal(options volume.Vol }, nil } +func (plugin *gcePersistentDiskPlugin) RequiresFSResize() bool { + return true +} + +func (plugin *gcePersistentDiskPlugin) ExpandVolumeDevice( + spec *volume.Spec, + newSize resource.Quantity, + oldSize resource.Quantity) (resource.Quantity, error) { + cloud, err := getCloudProvider(plugin.host.GetCloudProvider()) + + if err != nil { + return oldSize, err + } + pdName := spec.PersistentVolume.Spec.GCEPersistentDisk.PDName + updatedQuantity, err := cloud.ResizeDisk(pdName, oldSize, newSize) + + if err != nil { + return oldSize, err + } + return updatedQuantity, nil +} + func (plugin *gcePersistentDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { mounter := plugin.host.GetMounter(plugin.GetPluginName()) pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName()) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go index 026324e6305a..3d17a455017b 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go @@ -153,7 +153,7 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin // 000 - neither "zone", "zones", or "replica-zones" specified // Pick a zone randomly selected from all active zones where // Kubernetes cluster has a node. - zones, err = cloud.GetAllZones() + zones, err = cloud.GetAllCurrentZones() if err != nil { glog.V(2).Infof("error getting zone information from GCE: %v", err) return "", 0, nil, "", err diff --git a/vendor/k8s.io/kubernetes/pkg/volume/git_repo/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/git_repo/BUILD index be57c22489e7..cd24945cf338 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/git_repo/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/git_repo/BUILD @@ -12,6 +12,7 @@ go_library( "doc.go", "git_repo.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/git_repo", deps = [ "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", @@ -25,6 +26,7 @@ go_library( go_test( name = "go_default_test", srcs = ["git_repo_test.go"], + importpath = "k8s.io/kubernetes/pkg/volume/git_repo", library = ":go_default_library", deps = [ "//pkg/volume:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/git_repo/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/git_repo/OWNERS index 801d55bc1f70..27d4e921659c 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/git_repo/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/git_repo/OWNERS @@ -2,13 +2,7 @@ approvers: - thockin - saad-ali reviewers: -- thockin -- smarterclayton -- brendandburns -- derekwaynecarr -- pmorie - saad-ali -- justinsb - jsafrane - rootfs - jingxu97 diff --git a/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD index 558fb615ab9c..b8371b5e8df0 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD @@ -14,8 +14,9 @@ go_library( "glusterfs_minmax.go", "glusterfs_util.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/glusterfs", deps = [ - "//pkg/api/v1/helper:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", @@ -41,6 +42,7 @@ go_test( "glusterfs_minmax_test.go", "glusterfs_test.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/glusterfs", library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/OWNERS index 75150d589380..080b69b30fbc 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/OWNERS @@ -4,13 +4,7 @@ approvers: - jingxu97 - humblec reviewers: -- thockin -- smarterclayton -- brendandburns -- derekwaynecarr -- pmorie - saad-ali -- justinsb - jsafrane - rootfs - humblec diff --git a/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go b/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go index e1118b3ba036..c42e3cdf7945 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go @@ -37,7 +37,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -60,6 +60,7 @@ var _ volume.VolumePlugin = &glusterfsPlugin{} var _ volume.PersistentVolumePlugin = &glusterfsPlugin{} var _ volume.DeletableVolumePlugin = &glusterfsPlugin{} var _ volume.ProvisionableVolumePlugin = &glusterfsPlugin{} +var _ volume.ExpandableVolumePlugin = &glusterfsPlugin{} var _ volume.Provisioner = &glusterfsVolumeProvisioner{} var _ volume.Deleter = &glusterfsVolumeDeleter{} @@ -148,7 +149,7 @@ func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volu if kubeClient == nil { return nil, fmt.Errorf("failed to get kube client to initialize mounter") } - ep, err := kubeClient.Core().Endpoints(podNs).Get(epName, metav1.GetOptions{}) + ep, err := kubeClient.CoreV1().Endpoints(podNs).Get(epName, metav1.GetOptions{}) if err != nil { glog.Errorf("failed to get endpoints %s[%v]", epName, err) return nil, err @@ -196,16 +197,11 @@ func (plugin *glusterfsPlugin) newUnmounterInternal(volName string, podUID types } func (plugin *glusterfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { - glusterfsVolume := &v1.Volume{ - Name: volumeName, - VolumeSource: v1.VolumeSource{ - Glusterfs: &v1.GlusterfsVolumeSource{ - EndpointsName: volumeName, - Path: volumeName, - }, - }, - } - return volume.NewSpecFromVolume(glusterfsVolume), nil + + // To reconstrcut volume spec we need endpoint where fetching endpoint from mount + // string looks to be impossible, so returning error. + + return nil, fmt.Errorf("impossible to reconstruct glusterfs volume spec from volume mountpath") } // Glusterfs volumes represent a bare host file or directory mount of an Glusterfs export. @@ -492,7 +488,7 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll if kubeClient == nil { return fmt.Errorf("failed to get kube client when collecting gids") } - pvList, err := kubeClient.Core().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) + pvList, err := kubeClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { glog.Errorf("failed to get existing persistent volumes") return err @@ -728,7 +724,7 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { } pv.Spec.Capacity = v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), + v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dG", sizeGB)), } return pv, nil } @@ -767,7 +763,7 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolum return nil, 0, fmt.Errorf("error [%v] when getting cluster nodes for volume %s", err, volume) } - // The 'endpointname' is created in form of 'gluster-dynamic-'. + // The 'endpointname' is created in form of 'glusterfs-dynamic-'. // createEndpointService() checks for this 'endpoint' existence in PVC's namespace and // If not found, it create an endpoint and svc using the IPs we dynamically picked at time // of volume creation. @@ -813,7 +809,7 @@ func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epS if kubeClient == nil { return nil, nil, fmt.Errorf("failed to get kube client when creating endpoint service") } - _, err = kubeClient.Core().Endpoints(namespace).Create(endpoint) + _, err = kubeClient.CoreV1().Endpoints(namespace).Create(endpoint) if err != nil && errors.IsAlreadyExists(err) { glog.V(1).Infof("endpoint [%s] already exist in namespace [%s]", endpoint, namespace) err = nil @@ -833,7 +829,7 @@ func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epS Spec: v1.ServiceSpec{ Ports: []v1.ServicePort{ {Protocol: "TCP", Port: 1}}}} - _, err = kubeClient.Core().Services(namespace).Create(service) + _, err = kubeClient.CoreV1().Services(namespace).Create(service) if err != nil && errors.IsAlreadyExists(err) { glog.V(1).Infof("service [%s] already exist in namespace [%s]", service, namespace) err = nil @@ -850,7 +846,7 @@ func (d *glusterfsVolumeDeleter) deleteEndpointService(namespace string, epServi if kubeClient == nil { return fmt.Errorf("failed to get kube client when deleting endpoint service") } - err = kubeClient.Core().Services(namespace).Delete(epServiceName, nil) + err = kubeClient.CoreV1().Services(namespace).Delete(epServiceName, nil) if err != nil { glog.Errorf("error deleting service %s/%s: %v", namespace, epServiceName, err) return fmt.Errorf("error deleting service %s/%s: %v", namespace, epServiceName, err) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/host_path/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/host_path/BUILD index c209bd636968..b158f6de8b1e 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/host_path/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/host_path/BUILD @@ -11,21 +11,10 @@ go_library( srcs = [ "doc.go", "host_path.go", - "nsenter_unsupported.go", - ] + select({ - "@io_bazel_rules_go//go/platform:darwin_amd64": [ - "host_path_unix.go", - ], - "@io_bazel_rules_go//go/platform:linux_amd64": [ - "host_path_unix.go", - "nsenter.go", - ], - "@io_bazel_rules_go//go/platform:windows_amd64": [ - "host_path_windows.go", - ], - "//conditions:default": [], - }), + ], + importpath = "k8s.io/kubernetes/pkg/volume/host_path", deps = [ + "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util/volumehelper:go_default_library", "//pkg/volume/validation:go_default_library", @@ -33,37 +22,26 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ - "//vendor/k8s.io/utils/exec:go_default_library", - ], - "//conditions:default": [], - }), + ], ) go_test( name = "go_default_test", - srcs = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ - "host_path_test.go", - ], - "//conditions:default": [], - }), + srcs = ["host_path_test.go"], + importpath = "k8s.io/kubernetes/pkg/volume/host_path", library = ":go_default_library", - deps = select({ - "@io_bazel_rules_go//go/platform:linux_amd64": [ - "//pkg/util/file:go_default_library", - "//pkg/volume:go_default_library", - "//pkg/volume/testing:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", - ], - "//conditions:default": [], - }), + deps = [ + "//pkg/util/file:go_default_library", + "//pkg/util/mount:go_default_library", + "//pkg/volume:go_default_library", + "//pkg/volume/testing:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", + ], ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/volume/host_path/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/host_path/OWNERS index 77e285d35a60..2a4ad74aecd6 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/host_path/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/host_path/OWNERS @@ -3,13 +3,7 @@ approvers: - saad-ali - thockin reviewers: -- thockin -- smarterclayton -- brendandburns -- derekwaynecarr -- pmorie - saad-ali -- justinsb - jsafrane - rootfs - jingxu97 diff --git a/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go b/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go index 0ca43cc85857..97e3fa5db558 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path.go @@ -25,6 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util/volumehelper" "k8s.io/kubernetes/pkg/volume/validation" @@ -113,8 +114,9 @@ func (plugin *hostPathPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts vo pathType = hostPathVolumeSource.Type } return &hostPathMounter{ - hostPath: &hostPath{path: path, pathType: pathType, containerized: opts.Containerized}, + hostPath: &hostPath{path: path, pathType: pathType}, readOnly: readOnly, + mounter: plugin.host.GetMounter(plugin.GetPluginName()), }, nil } @@ -182,9 +184,8 @@ func newProvisioner(options volume.VolumeOptions, host volume.VolumeHost, plugin // HostPath volumes represent a bare host file or directory mount. // The direct at the specified path will be directly exposed to the container. type hostPath struct { - path string - pathType *v1.HostPathType - containerized bool + path string + pathType *v1.HostPathType volume.MetricsNil } @@ -195,6 +196,7 @@ func (hp *hostPath) GetPath() string { type hostPathMounter struct { *hostPath readOnly bool + mounter mount.Interface } var _ volume.Mounter = &hostPathMounter{} @@ -224,7 +226,7 @@ func (b *hostPathMounter) SetUp(fsGroup *int64) error { if *b.pathType == v1.HostPathUnset { return nil } - return checkType(b.GetPath(), b.pathType, b.containerized) + return checkType(b.GetPath(), b.pathType, b.mounter) } // SetUpAt does not make sense for host paths - probably programmer error. @@ -340,132 +342,77 @@ type hostPathTypeChecker interface { GetPath() string } -type fileTypeChecker interface { - getFileType(fileInfo os.FileInfo) (v1.HostPathType, error) -} - -// this is implemented in per-OS files -type defaultFileTypeChecker struct{} - -type osFileTypeChecker struct { +type fileTypeChecker struct { path string exists bool - info os.FileInfo - checker fileTypeChecker + mounter mount.Interface } -func (ftc *osFileTypeChecker) Exists() bool { - return ftc.exists +func (ftc *fileTypeChecker) Exists() bool { + return ftc.mounter.ExistsPath(ftc.path) } -func (ftc *osFileTypeChecker) IsFile() bool { +func (ftc *fileTypeChecker) IsFile() bool { if !ftc.Exists() { return false } - return !ftc.info.IsDir() + return !ftc.IsDir() } -func (ftc *osFileTypeChecker) MakeFile() error { - f, err := os.OpenFile(ftc.path, os.O_CREATE, os.FileMode(0644)) - defer f.Close() - if err != nil { - if !os.IsExist(err) { - return err - } - } - return nil +func (ftc *fileTypeChecker) MakeFile() error { + return ftc.mounter.MakeFile(ftc.path) } -func (ftc *osFileTypeChecker) IsDir() bool { +func (ftc *fileTypeChecker) IsDir() bool { if !ftc.Exists() { return false } - return ftc.info.IsDir() -} - -func (ftc *osFileTypeChecker) MakeDir() error { - err := os.MkdirAll(ftc.path, os.FileMode(0755)) + pathType, err := ftc.mounter.GetFileType(ftc.path) if err != nil { - if !os.IsExist(err) { - return err - } + return false } - return nil + return string(pathType) == string(v1.HostPathDirectory) } -func (ftc *osFileTypeChecker) IsBlock() bool { - if !ftc.Exists() { - return false - } +func (ftc *fileTypeChecker) MakeDir() error { + return ftc.mounter.MakeDir(ftc.path) +} - blkDevType, err := ftc.checker.getFileType(ftc.info) +func (ftc *fileTypeChecker) IsBlock() bool { + blkDevType, err := ftc.mounter.GetFileType(ftc.path) if err != nil { return false } - return blkDevType == v1.HostPathBlockDev + return string(blkDevType) == string(v1.HostPathBlockDev) } -func (ftc *osFileTypeChecker) IsChar() bool { - if !ftc.Exists() { - return false - } - - charDevType, err := ftc.checker.getFileType(ftc.info) +func (ftc *fileTypeChecker) IsChar() bool { + charDevType, err := ftc.mounter.GetFileType(ftc.path) if err != nil { return false } - return charDevType == v1.HostPathCharDev + return string(charDevType) == string(v1.HostPathCharDev) } -func (ftc *osFileTypeChecker) IsSocket() bool { - if !ftc.Exists() { - return false - } - - socketType, err := ftc.checker.getFileType(ftc.info) +func (ftc *fileTypeChecker) IsSocket() bool { + socketType, err := ftc.mounter.GetFileType(ftc.path) if err != nil { return false } - return socketType == v1.HostPathSocket + return string(socketType) == string(v1.HostPathSocket) } -func (ftc *osFileTypeChecker) GetPath() string { +func (ftc *fileTypeChecker) GetPath() string { return ftc.path } -func newOSFileTypeChecker(path string, checker fileTypeChecker) (hostPathTypeChecker, error) { - ftc := osFileTypeChecker{path: path, checker: checker} - info, err := os.Stat(path) - if err != nil { - ftc.exists = false - if !os.IsNotExist(err) { - return nil, err - } - } else { - ftc.info = info - ftc.exists = true - } - return &ftc, nil -} - -func checkType(path string, pathType *v1.HostPathType, containerized bool) error { - var ftc hostPathTypeChecker - var err error - if containerized { - // For a containerized kubelet, use nsenter to run commands in - // the host's mount namespace. - // TODO(dixudx): setns into docker's mount namespace, and then run the exact same go code for checks/setup - ftc, err = newNsenterFileTypeChecker(path) - if err != nil { - return err - } - } else { - ftc, err = newOSFileTypeChecker(path, &defaultFileTypeChecker{}) - if err != nil { - return err - } - } - return checkTypeInternal(ftc, pathType) +func newFileTypeChecker(path string, mounter mount.Interface) hostPathTypeChecker { + return &fileTypeChecker{path: path, mounter: mounter} +} + +// checkType checks whether the given path is the exact pathType +func checkType(path string, pathType *v1.HostPathType, mounter mount.Interface) error { + return checkTypeInternal(newFileTypeChecker(path, mounter), pathType) } func checkTypeInternal(ftc hostPathTypeChecker, pathType *v1.HostPathType) error { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_unix.go b/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_unix.go deleted file mode 100644 index 05deaaed5b2a..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_unix.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build linux darwin - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package host_path - -import ( - "fmt" - "os" - "syscall" - - "k8s.io/api/core/v1" -) - -func (dftc *defaultFileTypeChecker) getFileType(info os.FileInfo) (v1.HostPathType, error) { - mode := info.Sys().(*syscall.Stat_t).Mode - switch mode & syscall.S_IFMT { - case syscall.S_IFSOCK: - return v1.HostPathSocket, nil - case syscall.S_IFBLK: - return v1.HostPathBlockDev, nil - case syscall.S_IFCHR: - return v1.HostPathCharDev, nil - } - return "", fmt.Errorf("only recognise socket, block device and character device") -} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_windows.go b/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_windows.go deleted file mode 100644 index ff27e49371f5..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/volume/host_path/host_path_windows.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package host_path - -import ( - "fmt" - "os" - "syscall" - - "k8s.io/api/core/v1" -) - -func (dftc *defaultFileTypeChecker) getFileType(info os.FileInfo) (v1.HostPathType, error) { - mode := info.Sys().(*syscall.Win32FileAttributeData).FileAttributes - switch mode & syscall.S_IFMT { - case syscall.S_IFSOCK: - return v1.HostPathSocket, nil - case syscall.S_IFBLK: - return v1.HostPathBlockDev, nil - case syscall.S_IFCHR: - return v1.HostPathCharDev, nil - } - return "", fmt.Errorf("only recognise socket, block device and character device") -} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/host_path/nsenter.go b/vendor/k8s.io/kubernetes/pkg/volume/host_path/nsenter.go deleted file mode 100644 index c3c3cfece577..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/volume/host_path/nsenter.go +++ /dev/null @@ -1,150 +0,0 @@ -// +build linux - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package host_path - -import ( - "fmt" - - "k8s.io/utils/exec" -) - -const ( - hostProcMountsNamespace = "/rootfs/proc/1/ns/mnt" - nsenterCmd = "nsenter" - statCmd = "stat" - touchCmd = "touch" - mkdirCmd = "mkdir" -) - -// nsenterFileTypeChecker is part of experimental support for running the kubelet -// in a container. nsenterFileTypeChecker works by executing "nsenter" to run commands in -// the host's mount namespace. -// -// nsenterFileTypeChecker requires: -// -// 1. The host's root filesystem must be available at "/rootfs"; -// 2. The "nsenter" binary must be on the Kubelet process' PATH in the container's -// filesystem; -// 3. The Kubelet process must have CAP_SYS_ADMIN (required by "nsenter"); at -// the present, this effectively means that the kubelet is running in a -// privileged container; -// 4. The host image must have "stat", "touch", "mkdir" binaries in "/bin", "/usr/sbin", or "/usr/bin"; - -type nsenterFileTypeChecker struct { - path string - exists bool -} - -func newNsenterFileTypeChecker(path string) (hostPathTypeChecker, error) { - ftc := &nsenterFileTypeChecker{path: path} - ftc.Exists() - return ftc, nil -} - -func (ftc *nsenterFileTypeChecker) Exists() bool { - args := []string{ - fmt.Sprintf("--mount=%s", hostProcMountsNamespace), - "--", - "ls", - ftc.path, - } - exec := exec.New() - _, err := exec.Command(nsenterCmd, args...).CombinedOutput() - if err == nil { - ftc.exists = true - } - return ftc.exists -} - -func (ftc *nsenterFileTypeChecker) IsFile() bool { - if !ftc.Exists() { - return false - } - return !ftc.IsDir() -} - -func (ftc *nsenterFileTypeChecker) MakeFile() error { - args := []string{ - fmt.Sprintf("--mount=%s", hostProcMountsNamespace), - "--", - touchCmd, - ftc.path, - } - exec := exec.New() - if _, err := exec.Command(nsenterCmd, args...).CombinedOutput(); err != nil { - return err - } - return nil -} - -func (ftc *nsenterFileTypeChecker) IsDir() bool { - return ftc.checkMimetype("directory") -} - -func (ftc *nsenterFileTypeChecker) MakeDir() error { - args := []string{ - fmt.Sprintf("--mount=%s", hostProcMountsNamespace), - "--", - mkdirCmd, - "-p", - ftc.path, - } - exec := exec.New() - if _, err := exec.Command(nsenterCmd, args...).CombinedOutput(); err != nil { - return err - } - return nil -} - -func (ftc *nsenterFileTypeChecker) IsBlock() bool { - return ftc.checkMimetype("block special file") -} - -func (ftc *nsenterFileTypeChecker) IsChar() bool { - return ftc.checkMimetype("character special file") -} - -func (ftc *nsenterFileTypeChecker) IsSocket() bool { - return ftc.checkMimetype("socket") -} - -func (ftc *nsenterFileTypeChecker) GetPath() string { - return ftc.path -} - -func (ftc *nsenterFileTypeChecker) checkMimetype(checkedType string) bool { - if !ftc.Exists() { - return false - } - - args := []string{ - fmt.Sprintf("--mount=%s", hostProcMountsNamespace), - "--", - statCmd, - "-L", - `--printf "%F"`, - ftc.path, - } - exec := exec.New() - outputBytes, err := exec.Command(nsenterCmd, args...).CombinedOutput() - if err != nil { - return false - } - return string(outputBytes) == checkedType -} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/host_path/nsenter_unsupported.go b/vendor/k8s.io/kubernetes/pkg/volume/host_path/nsenter_unsupported.go deleted file mode 100644 index 166ef9f0a391..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/volume/host_path/nsenter_unsupported.go +++ /dev/null @@ -1,66 +0,0 @@ -// +build !linux - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package host_path - -type nsenterFileTypeChecker struct { - path string - exists bool -} - -func newNsenterFileTypeChecker(path string) (hostPathTypeChecker, error) { - ftc := &nsenterFileTypeChecker{path: path} - ftc.Exists() - return ftc, nil -} - -func (ftc *nsenterFileTypeChecker) Exists() bool { - return false -} - -func (ftc *nsenterFileTypeChecker) IsFile() bool { - return false -} - -func (ftc *nsenterFileTypeChecker) MakeFile() error { - return nil -} - -func (ftc *nsenterFileTypeChecker) IsDir() bool { - return false -} - -func (ftc *nsenterFileTypeChecker) MakeDir() error { - return nil -} - -func (ftc *nsenterFileTypeChecker) IsBlock() bool { - return false -} - -func (ftc *nsenterFileTypeChecker) IsChar() bool { - return false -} - -func (ftc *nsenterFileTypeChecker) IsSocket() bool { - return false -} - -func (ftc *nsenterFileTypeChecker) GetPath() string { - return ftc.path -} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/BUILD index 4ed90f888698..3c7a7598136a 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/BUILD @@ -15,6 +15,7 @@ go_library( "iscsi.go", "iscsi_util.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/iscsi", deps = [ "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", @@ -22,6 +23,7 @@ go_library( "//pkg/volume/util:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", ], ) @@ -32,6 +34,7 @@ go_test( "iscsi_test.go", "iscsi_util_test.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/iscsi", library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/OWNERS index 99e2d8b366e6..a7992dec6529 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/OWNERS @@ -3,13 +3,7 @@ approvers: - saad-ali - rootfs reviewers: -- thockin -- smarterclayton -- brendandburns -- derekwaynecarr -- pmorie - saad-ali -- justinsb - jsafrane - rootfs - humblec diff --git a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go index 4ff499cc408d..c039a70db8c5 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go @@ -24,6 +24,7 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" @@ -100,7 +101,7 @@ func (attacher *iscsiAttacher) MountDevice(spec *volume.Spec, devicePath string, return err } } - volumeSource, readOnly, err := getVolumeSource(spec) + readOnly, fsType, err := getISCSIVolumeInfo(spec) if err != nil { return err } @@ -112,7 +113,7 @@ func (attacher *iscsiAttacher) MountDevice(spec *volume.Spec, devicePath string, if notMnt { diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Exec: attacher.host.GetExec(iscsiPluginName)} mountOptions := volume.MountOptionFromSpec(spec, options...) - err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions) + err = diskMounter.FormatAndMount(devicePath, deviceMountPath, fsType, mountOptions) if err != nil { os.Remove(deviceMountPath) return err @@ -137,7 +138,7 @@ func (plugin *iscsiPlugin) NewDetacher() (volume.Detacher, error) { }, nil } -func (detacher *iscsiDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error { +func (detacher *iscsiDetacher) Detach(volumeName string, nodeName types.NodeName) error { return nil } @@ -147,6 +148,11 @@ func (detacher *iscsiDetacher) UnmountDevice(deviceMountPath string) error { if err != nil { return fmt.Errorf("iscsi: failed to detach disk: %s\nError: %v", deviceMountPath, err) } + glog.V(4).Infof("iscsi: %q is unmounted, deleting the directory", deviceMountPath) + err = os.RemoveAll(deviceMountPath) + if err != nil { + return fmt.Errorf("iscsi: failed to delete the directory: %s\nError: %v", deviceMountPath, err) + } glog.V(4).Infof("iscsi: successfully detached disk: %s", deviceMountPath) return nil } @@ -154,29 +160,75 @@ func (detacher *iscsiDetacher) UnmountDevice(deviceMountPath string) error { func (attacher *iscsiAttacher) volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, pod *v1.Pod) (*iscsiDiskMounter, error) { var secret map[string]string var bkportal []string - iscsi, readOnly, err := getVolumeSource(spec) + readOnly, fsType, err := getISCSIVolumeInfo(spec) if err != nil { return nil, err } - // Obtain secret for AttachDisk - if iscsi.SecretRef != nil && pod != nil { - if secret, err = volumeutil.GetSecretForPod(pod, iscsi.SecretRef.Name, host.GetKubeClient()); err != nil { - glog.Errorf("Couldn't get secret from %v/%v", pod.Namespace, iscsi.SecretRef) + if pod != nil { + chapDiscovery, err := getISCSIDiscoveryCHAPInfo(spec) + if err != nil { + return nil, err + } + chapSession, err := getISCSISessionCHAPInfo(spec) + if err != nil { return nil, err } + if chapDiscovery || chapSession { + secretName, secretNamespace, err := getISCSISecretNameAndNamespace(spec, pod.Namespace) + if err != nil { + return nil, err + } + if len(secretNamespace) == 0 || len(secretName) == 0 { + return nil, fmt.Errorf("CHAP enabled but secret name or namespace is empty") + } + // if secret is provided, retrieve it + kubeClient := host.GetKubeClient() + if kubeClient == nil { + return nil, fmt.Errorf("Cannot get kube client") + } + secretObj, err := kubeClient.Core().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) + if err != nil { + err = fmt.Errorf("Couldn't get secret %v/%v error: %v", secretNamespace, secretName, err) + return nil, err + } + secret = make(map[string]string) + for name, data := range secretObj.Data { + glog.V(6).Infof("retrieving CHAP secret name: %s", name) + secret[name] = string(data) + } + } + + } + tp, portals, iqn, lunStr, err := getISCSITargetInfo(spec) + if err != nil { + return nil, err } - lun := strconv.Itoa(int(iscsi.Lun)) - portal := portalMounter(iscsi.TargetPortal) + + lun := strconv.Itoa(int(lunStr)) + portal := portalMounter(tp) bkportal = append(bkportal, portal) - for _, tp := range iscsi.Portals { - bkportal = append(bkportal, portalMounter(string(tp))) + for _, p := range portals { + bkportal = append(bkportal, portalMounter(string(p))) } - iface := iscsi.ISCSIInterface - exec := attacher.host.GetExec(iscsiPluginName) + + iface, initiatorNamePtr, err := getISCSIInitiatorInfo(spec) + if err != nil { + return nil, err + } + var initiatorName string - if iscsi.InitiatorName != nil { - initiatorName = *iscsi.InitiatorName + if initiatorNamePtr != nil { + initiatorName = *initiatorNamePtr } + chapDiscovery, err := getISCSIDiscoveryCHAPInfo(spec) + if err != nil { + return nil, err + } + chapSession, err := getISCSISessionCHAPInfo(spec) + if err != nil { + return nil, err + } + exec := attacher.host.GetExec(iscsiPluginName) return &iscsiDiskMounter{ iscsiDisk: &iscsiDisk{ @@ -185,15 +237,15 @@ func (attacher *iscsiAttacher) volumeSpecToMounter(spec *volume.Spec, host volum }, VolName: spec.Name(), Portals: bkportal, - Iqn: iscsi.IQN, + Iqn: iqn, lun: lun, Iface: iface, - chap_discovery: iscsi.DiscoveryCHAPAuth, - chap_session: iscsi.SessionCHAPAuth, + chap_discovery: chapDiscovery, + chap_session: chapSession, secret: secret, InitiatorName: initiatorName, manager: &ISCSIUtil{}}, - fsType: iscsi.FSType, + fsType: fsType, readOnly: readOnly, mounter: &mount.SafeFormatAndMount{Interface: host.GetMounter(iscsiPluginName), Exec: exec}, exec: exec, diff --git a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go index 4741aa09f980..ea00c7ebcbeb 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go @@ -94,36 +94,3 @@ func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter return nil } - -// utility to tear down a disk based filesystem -func diskTearDown(manager diskManager, c iscsiDiskUnmounter, volPath string, mounter mount.Interface) error { - notMnt, err := mounter.IsLikelyNotMountPoint(volPath) - if err != nil { - glog.Errorf("cannot validate mountpoint %s", volPath) - return err - } - if notMnt { - return os.Remove(volPath) - } - _, err = mount.GetMountRefs(mounter, volPath) - if err != nil { - glog.Errorf("failed to get reference count %s", volPath) - return err - } - if err := mounter.Unmount(volPath); err != nil { - glog.Errorf("failed to unmount %s", volPath) - return err - } - notMnt, mntErr := mounter.IsLikelyNotMountPoint(volPath) - if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) - return err - } - if notMnt { - if err := os.Remove(volPath); err != nil { - return err - } - } - return nil - -} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go index 6c7025669289..a7893ec7218f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go @@ -23,6 +23,7 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" @@ -56,16 +57,12 @@ func (plugin *iscsiPlugin) GetPluginName() string { } func (plugin *iscsiPlugin) GetVolumeName(spec *volume.Spec) (string, error) { - volumeSource, _, err := getVolumeSource(spec) + tp, _, iqn, lun, err := getISCSITargetInfo(spec) if err != nil { return "", err } - return fmt.Sprintf( - "%v:%v:%v", - volumeSource.TargetPortal, - volumeSource.IQN, - volumeSource.Lun), nil + return fmt.Sprintf("%v:%v:%v", tp, iqn, lun), nil } func (plugin *iscsiPlugin) CanSupport(spec *volume.Spec) bool { @@ -98,41 +95,80 @@ func (plugin *iscsiPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode { func (plugin *iscsiPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { // Inject real implementations here, test through the internal function. var secret map[string]string - source, _, err := getVolumeSource(spec) + if pod == nil { + return nil, fmt.Errorf("nil pod") + } + chapDiscover, err := getISCSIDiscoveryCHAPInfo(spec) if err != nil { return nil, err } - - if source.SecretRef != nil { - if secret, err = ioutil.GetSecretForPod(pod, source.SecretRef.Name, plugin.host.GetKubeClient()); err != nil { - glog.Errorf("Couldn't get secret from %v/%v", pod.Namespace, source.SecretRef) + chapSession, err := getISCSISessionCHAPInfo(spec) + if err != nil { + return nil, err + } + if chapDiscover || chapSession { + secretName, secretNamespace, err := getISCSISecretNameAndNamespace(spec, pod.Namespace) + if err != nil { return nil, err } - } + if len(secretName) > 0 && len(secretNamespace) > 0 { + // if secret is provideded, retrieve it + kubeClient := plugin.host.GetKubeClient() + if kubeClient == nil { + return nil, fmt.Errorf("Cannot get kube client") + } + secretObj, err := kubeClient.Core().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) + if err != nil { + err = fmt.Errorf("Couldn't get secret %v/%v error: %v", secretNamespace, secretName, err) + return nil, err + } + secret = make(map[string]string) + for name, data := range secretObj.Data { + glog.V(4).Infof("retrieving CHAP secret name: %s", name) + secret[name] = string(data) + } + } + } return plugin.newMounterInternal(spec, pod.UID, &ISCSIUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()), secret) } func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec, secret map[string]string) (volume.Mounter, error) { // iscsi volumes used directly in a pod have a ReadOnly flag set by the pod author. // iscsi volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV - iscsi, readOnly, err := getVolumeSource(spec) + readOnly, fsType, err := getISCSIVolumeInfo(spec) + if err != nil { + return nil, err + } + tp, portals, iqn, lunStr, err := getISCSITargetInfo(spec) if err != nil { return nil, err } - lun := strconv.Itoa(int(iscsi.Lun)) - portal := portalMounter(iscsi.TargetPortal) + lun := strconv.Itoa(int(lunStr)) + portal := portalMounter(tp) var bkportal []string bkportal = append(bkportal, portal) - for _, tp := range iscsi.Portals { - bkportal = append(bkportal, portalMounter(string(tp))) + for _, p := range portals { + bkportal = append(bkportal, portalMounter(string(p))) + } + + iface, initiatorNamePtr, err := getISCSIInitiatorInfo(spec) + if err != nil { + return nil, err } - iface := iscsi.ISCSIInterface var initiatorName string - if iscsi.InitiatorName != nil { - initiatorName = *iscsi.InitiatorName + if initiatorNamePtr != nil { + initiatorName = *initiatorNamePtr + } + chapDiscovery, err := getISCSIDiscoveryCHAPInfo(spec) + if err != nil { + return nil, err + } + chapSession, err := getISCSISessionCHAPInfo(spec) + if err != nil { + return nil, err } return &iscsiDiskMounter{ @@ -140,16 +176,16 @@ func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UI podUID: podUID, VolName: spec.Name(), Portals: bkportal, - Iqn: iscsi.IQN, + Iqn: iqn, lun: lun, Iface: iface, - chap_discovery: iscsi.DiscoveryCHAPAuth, - chap_session: iscsi.SessionCHAPAuth, + chap_discovery: chapDiscovery, + chap_session: chapSession, secret: secret, InitiatorName: initiatorName, manager: manager, plugin: plugin}, - fsType: iscsi.FSType, + fsType: fsType, readOnly: readOnly, mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, exec: exec, @@ -267,13 +303,7 @@ func (c *iscsiDiskUnmounter) TearDown() error { } func (c *iscsiDiskUnmounter) TearDownAt(dir string) error { - if pathExists, pathErr := ioutil.PathExists(dir); pathErr != nil { - return fmt.Errorf("Error checking if path exists: %v", pathErr) - } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) - return nil - } - return diskTearDown(c.manager, *c, dir, c.mounter) + return ioutil.UnmountPath(dir, c.mounter) } func portalMounter(portal string) string { @@ -283,13 +313,87 @@ func portalMounter(portal string) string { return portal } -func getVolumeSource(spec *volume.Spec) (*v1.ISCSIVolumeSource, bool, error) { +// get iSCSI volume info: readOnly and fstype +func getISCSIVolumeInfo(spec *volume.Spec) (bool, string, error) { + // for volume source, readonly is in volume spec + // for PV, readonly is in PV spec if spec.Volume != nil && spec.Volume.ISCSI != nil { - return spec.Volume.ISCSI, spec.Volume.ISCSI.ReadOnly, nil + return spec.Volume.ISCSI.ReadOnly, spec.Volume.ISCSI.FSType, nil } else if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.ISCSI != nil { - return spec.PersistentVolume.Spec.ISCSI, spec.ReadOnly, nil + return spec.ReadOnly, spec.PersistentVolume.Spec.ISCSI.FSType, nil + } + + return false, "", fmt.Errorf("Spec does not reference an ISCSI volume type") +} + +// get iSCSI target info: target portal, portals, iqn, and lun +func getISCSITargetInfo(spec *volume.Spec) (string, []string, string, int32, error) { + if spec.Volume != nil && spec.Volume.ISCSI != nil { + return spec.Volume.ISCSI.TargetPortal, spec.Volume.ISCSI.Portals, spec.Volume.ISCSI.IQN, spec.Volume.ISCSI.Lun, nil + } else if spec.PersistentVolume != nil && + spec.PersistentVolume.Spec.ISCSI != nil { + return spec.PersistentVolume.Spec.ISCSI.TargetPortal, spec.PersistentVolume.Spec.ISCSI.Portals, spec.PersistentVolume.Spec.ISCSI.IQN, spec.PersistentVolume.Spec.ISCSI.Lun, nil + } + + return "", nil, "", 0, fmt.Errorf("Spec does not reference an ISCSI volume type") +} + +// get iSCSI initiator info: iface and initiator name +func getISCSIInitiatorInfo(spec *volume.Spec) (string, *string, error) { + if spec.Volume != nil && spec.Volume.ISCSI != nil { + return spec.Volume.ISCSI.ISCSIInterface, spec.Volume.ISCSI.InitiatorName, nil + } else if spec.PersistentVolume != nil && + spec.PersistentVolume.Spec.ISCSI != nil { + return spec.PersistentVolume.Spec.ISCSI.ISCSIInterface, spec.PersistentVolume.Spec.ISCSI.InitiatorName, nil + } + + return "", nil, fmt.Errorf("Spec does not reference an ISCSI volume type") +} + +// get iSCSI Discovery CHAP boolean +func getISCSIDiscoveryCHAPInfo(spec *volume.Spec) (bool, error) { + if spec.Volume != nil && spec.Volume.ISCSI != nil { + return spec.Volume.ISCSI.DiscoveryCHAPAuth, nil + } else if spec.PersistentVolume != nil && + spec.PersistentVolume.Spec.ISCSI != nil { + return spec.PersistentVolume.Spec.ISCSI.DiscoveryCHAPAuth, nil + } + + return false, fmt.Errorf("Spec does not reference an ISCSI volume type") +} + +// get iSCSI Session CHAP boolean +func getISCSISessionCHAPInfo(spec *volume.Spec) (bool, error) { + if spec.Volume != nil && spec.Volume.ISCSI != nil { + return spec.Volume.ISCSI.SessionCHAPAuth, nil + } else if spec.PersistentVolume != nil && + spec.PersistentVolume.Spec.ISCSI != nil { + return spec.PersistentVolume.Spec.ISCSI.SessionCHAPAuth, nil + } + + return false, fmt.Errorf("Spec does not reference an ISCSI volume type") +} + +// get iSCSI CHAP Secret info: secret name and namespace +func getISCSISecretNameAndNamespace(spec *volume.Spec, defaultSecretNamespace string) (string, string, error) { + if spec.Volume != nil && spec.Volume.ISCSI != nil { + if spec.Volume.ISCSI.SecretRef != nil { + return spec.Volume.ISCSI.SecretRef.Name, defaultSecretNamespace, nil + } + return "", "", nil + } else if spec.PersistentVolume != nil && + spec.PersistentVolume.Spec.ISCSI != nil { + secretRef := spec.PersistentVolume.Spec.ISCSI.SecretRef + secretNs := defaultSecretNamespace + if secretRef != nil { + if len(secretRef.Namespace) != 0 { + secretNs = secretRef.Namespace + } + return secretRef.Name, secretNs, nil + } + return "", "", nil } - return nil, false, fmt.Errorf("Spec does not reference an ISCSI volume type") + return "", "", fmt.Errorf("Spec does not reference an ISCSI volume type") } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go index 471e2bb035f3..a72dace77352 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go @@ -43,6 +43,8 @@ var ( "node.session.auth.password", "node.session.auth.username_in", "node.session.auth.password_in"} + ifaceTransportNameRe = regexp.MustCompile(`iface.transport_name = (.*)\n`) + ifaceRe = regexp.MustCompile(`.+/iface-([^/]+)/.+`) ) func updateISCSIDiscoverydb(b iscsiDiskMounter, tp string) error { @@ -293,6 +295,9 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { glog.Errorf("iscsi: failed to get any path for iscsi disk, last err seen:\n%v", lastErr) return "", fmt.Errorf("failed to get any path for iscsi disk, last err seen:\n%v", lastErr) } + if lastErr != nil { + glog.Errorf("iscsi: last error occurred during iscsi init:\n%v", lastErr) + } //Make sure we use a valid devicepath to find mpio device. devicePath = devicePaths[0] @@ -426,9 +431,7 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error { } func extractTransportname(ifaceOutput string) (iscsiTransport string) { - re := regexp.MustCompile(`iface.transport_name = (.*)\n`) - - rexOutput := re.FindStringSubmatch(ifaceOutput) + rexOutput := ifaceTransportNameRe.FindStringSubmatch(ifaceOutput) if rexOutput == nil { return "" } @@ -457,9 +460,7 @@ func extractDeviceAndPrefix(mntPath string) (string, string, error) { } func extractIface(mntPath string) (string, bool) { - re := regexp.MustCompile(`.+/iface-([^/]+)/.+`) - - reOutput := re.FindStringSubmatch(mntPath) + reOutput := ifaceRe.FindStringSubmatch(mntPath) if reOutput != nil { return reOutput[1], true } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/local/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/local/BUILD index 931fba880c01..8c8d3b1d8a70 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/local/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/local/BUILD @@ -12,6 +12,7 @@ go_library( "doc.go", "local.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/local", deps = [ "//pkg/kubelet/events:go_default_library", "//pkg/util/keymutex:go_default_library", @@ -32,6 +33,7 @@ go_library( go_test( name = "go_default_test", srcs = ["local_test.go"], + importpath = "k8s.io/kubernetes/pkg/volume/local", library = ":go_default_library", deps = [ "//pkg/volume:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/local/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/local/OWNERS index 558224cf9a46..c995ab66a9eb 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/local/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/local/OWNERS @@ -6,13 +6,7 @@ approvers: - jingxu97 - jsafrane reviewers: -- thockin -- smarterclayton -- brendandburns -- derekwaynecarr -- pmorie - saad-ali -- justinsb - jsafrane - rootfs - jingxu97 diff --git a/vendor/k8s.io/kubernetes/pkg/volume/nfs/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/nfs/BUILD index 14937d80d96e..ad091ffb4d7e 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/nfs/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/nfs/BUILD @@ -12,6 +12,7 @@ go_library( "doc.go", "nfs.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/nfs", deps = [ "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", @@ -27,6 +28,7 @@ go_library( go_test( name = "go_default_test", srcs = ["nfs_test.go"], + importpath = "k8s.io/kubernetes/pkg/volume/nfs", library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/nfs/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/nfs/OWNERS index bd1a85144f0e..025423a9fb6d 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/nfs/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/nfs/OWNERS @@ -4,13 +4,7 @@ approvers: - jingxu97 reviewers: - sjenning -- thockin -- smarterclayton -- brendandburns -- derekwaynecarr -- pmorie - saad-ali -- justinsb - jsafrane - rootfs - jingxu97 diff --git a/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/BUILD index a57d2b6cefda..bc72ab05def5 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/BUILD @@ -13,6 +13,7 @@ go_library( "photon_pd.go", "photon_util.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/photon_pd", deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/photon:go_default_library", @@ -35,6 +36,7 @@ go_test( "attacher_test.go", "photon_pd_test.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/photon_pd", library = ":go_default_library", deps = [ "//pkg/cloudprovider/providers/photon:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/attacher.go b/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/attacher.go index 4af726716e69..c3938269caa3 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/attacher.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/photon_pd/attacher.go @@ -243,10 +243,10 @@ func (plugin *photonPersistentDiskPlugin) NewDetacher() (volume.Detacher, error) } // Detach the given device from the given host. -func (detacher *photonPersistentDiskDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error { +func (detacher *photonPersistentDiskDetacher) Detach(volumeName string, nodeName types.NodeName) error { hostName := string(nodeName) - pdID := deviceMountPath + pdID := volumeName attached, err := detacher.photonDisks.DiskIsAttached(pdID, nodeName) if err != nil { // Log error and continue with detach diff --git a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go index eb2f8149ac4e..42ceaa40d2f9 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go @@ -69,8 +69,6 @@ type VolumeOptions struct { CloudTags *map[string]string // Volume provisioning parameters from StorageClass Parameters map[string]string - // This flag helps identify whether kubelet is running in a container - Containerized bool } type DynamicPluginProber interface { @@ -210,6 +208,26 @@ type ExpandableVolumePlugin interface { RequiresFSResize() bool } +// BlockVolumePlugin is an extend interface of VolumePlugin and is used for block volumes support. +type BlockVolumePlugin interface { + VolumePlugin + // NewBlockVolumeMapper creates a new volume.BlockVolumeMapper from an API specification. + // Ownership of the spec pointer in *not* transferred. + // - spec: The v1.Volume spec + // - pod: The enclosing pod + NewBlockVolumeMapper(spec *Spec, podRef *v1.Pod, opts VolumeOptions) (BlockVolumeMapper, error) + // NewBlockVolumeUnmapper creates a new volume.BlockVolumeUnmapper from recoverable state. + // - name: The volume name, as per the v1.Volume spec. + // - podUID: The UID of the enclosing pod + NewBlockVolumeUnmapper(name string, podUID types.UID) (BlockVolumeUnmapper, error) + // ConstructBlockVolumeSpec constructs a volume spec based on the given + // podUID, volume name and a pod device map path. + // The spec may have incomplete information due to limited information + // from input. This function is used by volume manager to reconstruct + // volume spec by reading the volume directories from disk. + ConstructBlockVolumeSpec(podUID types.UID, volumeName, mountPath string) (*Spec, error) +} + // VolumeHost is an interface that plugins can use to access the kubelet. type VolumeHost interface { // GetPluginDir returns the absolute path to a directory under which @@ -218,6 +236,11 @@ type VolumeHost interface { // GetPodPluginDir(). GetPluginDir(pluginName string) string + // GetVolumeDevicePluginDir returns the absolute path to a directory + // under which a given plugin may store data. + // ex. plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/ + GetVolumeDevicePluginDir(pluginName string) string + // GetPodVolumeDir returns the absolute path a directory which // represents the named volume under the named plugin for the given // pod. If the specified pod does not exist, the result of this call @@ -230,6 +253,13 @@ type VolumeHost interface { // directory might not actually exist on disk yet. GetPodPluginDir(podUID types.UID, pluginName string) string + // GetPodVolumeDeviceDir returns the absolute path a directory which + // represents the named plugin for the given pod. + // If the specified pod does not exist, the result of this call + // might not exist. + // ex. pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/ + GetPodVolumeDeviceDir(podUID types.UID, pluginName string) string + // GetKubeClient returns a client interface GetKubeClient() clientset.Interface @@ -273,6 +303,9 @@ type VolumeHost interface { // Returns the labels on the node GetNodeLabels() (map[string]string, error) + + // Returns the name of the node + GetNodeName() types.NodeName } // VolumePluginMgr tracks registered plugins. @@ -677,6 +710,32 @@ func (pm *VolumePluginMgr) FindExpandablePluginByName(name string) (ExpandableVo return nil, nil } +// FindMapperPluginBySpec fetches a block volume plugin by spec. +func (pm *VolumePluginMgr) FindMapperPluginBySpec(spec *Spec) (BlockVolumePlugin, error) { + volumePlugin, err := pm.FindPluginBySpec(spec) + if err != nil { + return nil, err + } + + if blockVolumePlugin, ok := volumePlugin.(BlockVolumePlugin); ok { + return blockVolumePlugin, nil + } + return nil, nil +} + +// FindMapperPluginByName fetches a block volume plugin by name. +func (pm *VolumePluginMgr) FindMapperPluginByName(name string) (BlockVolumePlugin, error) { + volumePlugin, err := pm.FindPluginByName(name) + if err != nil { + return nil, err + } + + if blockVolumePlugin, ok := volumePlugin.(BlockVolumePlugin); ok { + return blockVolumePlugin, nil + } + return nil, nil +} + // NewPersistentVolumeRecyclerPodTemplate creates a template for a recycler // pod. By default, a recycler pod simply runs "rm -rf" on a volume and tests // for emptiness. Most attributes of the template will be correct for most @@ -713,7 +772,7 @@ func NewPersistentVolumeRecyclerPodTemplate() *v1.Pod { Containers: []v1.Container{ { Name: "pv-recycler", - Image: "gcr.io/google_containers/busybox", + Image: "busybox:1.27", Command: []string{"/bin/sh"}, Args: []string{"-c", "test -e /scrub && rm -rf /scrub/..?* /scrub/.[!.]* /scrub/* && test -z \"$(ls -A /scrub)\" || exit 1"}, VolumeMounts: []v1.VolumeMount{ diff --git a/vendor/k8s.io/kubernetes/pkg/volume/portworx/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/portworx/BUILD index 155eddc4ed3b..36714e9635f9 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/portworx/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/portworx/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["portworx_test.go"], + importpath = "k8s.io/kubernetes/pkg/volume/portworx", library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", @@ -27,8 +28,9 @@ go_library( "portworx.go", "portworx_util.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/portworx", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go b/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go index cf7ddf8edb98..60677df2c9a5 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go @@ -25,7 +25,7 @@ import ( volumeapi "github.com/libopenstorage/openstorage/volume" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/volume" ) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/projected/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/projected/BUILD index 32d9792192b2..219b8aa59d4a 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/projected/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/projected/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = ["projected_test.go"], + importpath = "k8s.io/kubernetes/pkg/volume/projected", library = ":go_default_library", deps = [ "//pkg/volume:go_default_library", @@ -26,6 +27,7 @@ go_test( go_library( name = "go_default_library", srcs = ["projected.go"], + importpath = "k8s.io/kubernetes/pkg/volume/projected", deps = [ "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/quobyte/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/quobyte/BUILD index af4351eb8d09..7adf8b7d7aa8 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/quobyte/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/quobyte/BUILD @@ -13,6 +13,7 @@ go_library( "quobyte.go", "quobyte_util.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/quobyte", deps = [ "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", @@ -32,6 +33,7 @@ go_library( go_test( name = "go_default_test", srcs = ["quobyte_test.go"], + importpath = "k8s.io/kubernetes/pkg/volume/quobyte", library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/quobyte/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/quobyte/OWNERS index 7e78589072f9..331e3c38d445 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/quobyte/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/quobyte/OWNERS @@ -3,13 +3,7 @@ approvers: - saad-ali reviewers: - johscheuer -- thockin -- smarterclayton -- brendandburns -- derekwaynecarr -- pmorie - saad-ali -- justinsb - jsafrane - rootfs - jingxu97 diff --git a/vendor/k8s.io/kubernetes/pkg/volume/rbd/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/rbd/BUILD index 633c6cbe5ba8..e97b0fc7567c 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/rbd/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/rbd/BUILD @@ -9,12 +9,15 @@ load( go_library( name = "go_default_library", srcs = [ + "attacher.go", "disk_manager.go", "doc.go", "rbd.go", "rbd_util.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/rbd", deps = [ + "//pkg/util/file:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/node:go_default_library", "//pkg/util/strings:go_default_library", @@ -35,15 +38,17 @@ go_library( go_test( name = "go_default_test", srcs = ["rbd_test.go"], + importpath = "k8s.io/kubernetes/pkg/volume/rbd", library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", - "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/util/testing:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/volume/rbd/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/rbd/OWNERS index d04766747bc7..a700b703ce81 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/rbd/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/rbd/OWNERS @@ -2,13 +2,7 @@ approvers: - rootfs reviewers: - sjenning -- thockin -- smarterclayton -- brendandburns -- derekwaynecarr -- pmorie - saad-ali -- justinsb - jsafrane - rootfs - jingxu97 diff --git a/vendor/k8s.io/kubernetes/pkg/volume/rbd/attacher.go b/vendor/k8s.io/kubernetes/pkg/volume/rbd/attacher.go new file mode 100644 index 000000000000..1454c231fb48 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/rbd/attacher.go @@ -0,0 +1,225 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rbd + +import ( + "fmt" + "os" + "time" + + "github.com/golang/glog" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/volume" + volutil "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/kubernetes/pkg/volume/util/volumehelper" +) + +// NewAttacher implements AttachableVolumePlugin.NewAttacher. +func (plugin *rbdPlugin) NewAttacher() (volume.Attacher, error) { + return plugin.newAttacherInternal(&RBDUtil{}) +} + +func (plugin *rbdPlugin) newAttacherInternal(manager diskManager) (volume.Attacher, error) { + return &rbdAttacher{ + plugin: plugin, + manager: manager, + mounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host), + }, nil +} + +// NewDetacher implements AttachableVolumePlugin.NewDetacher. +func (plugin *rbdPlugin) NewDetacher() (volume.Detacher, error) { + return plugin.newDetacherInternal(&RBDUtil{}) +} + +func (plugin *rbdPlugin) newDetacherInternal(manager diskManager) (volume.Detacher, error) { + return &rbdDetacher{ + plugin: plugin, + manager: manager, + mounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host), + }, nil +} + +// GetDeviceMountRefs implements AttachableVolumePlugin.GetDeviceMountRefs. +func (plugin *rbdPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) { + mounter := plugin.host.GetMounter(plugin.GetPluginName()) + return mount.GetMountRefs(mounter, deviceMountPath) +} + +// rbdAttacher implements volume.Attacher interface. +type rbdAttacher struct { + plugin *rbdPlugin + mounter *mount.SafeFormatAndMount + manager diskManager +} + +var _ volume.Attacher = &rbdAttacher{} + +// Attach implements Attacher.Attach. +// We do not lock image here, because it requires kube-controller-manager to +// access external `rbd` utility. And there is no need since AttachDetach +// controller will not try to attach RWO volumes which are already attached to +// other nodes. +func (attacher *rbdAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { + return "", nil +} + +// VolumesAreAttached implements Attacher.VolumesAreAttached. +// There is no way to confirm whether the volume is attached or not from +// outside of the kubelet node. This method needs to return true always, like +// iSCSI, FC plugin. +func (attacher *rbdAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { + volumesAttachedCheck := make(map[*volume.Spec]bool) + for _, spec := range specs { + volumesAttachedCheck[spec] = true + } + return volumesAttachedCheck, nil +} + +// WaitForAttach implements Attacher.WaitForAttach. It's called by kublet to +// attach volume onto the node. +// This method is idempotent, callers are responsible for retrying on failure. +func (attacher *rbdAttacher) WaitForAttach(spec *volume.Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error) { + glog.V(4).Infof("rbd: waiting for attach volume (name: %s) for pod (name: %s, uid: %s)", spec.Name(), pod.Name, pod.UID) + mounter, err := attacher.plugin.createMounterFromVolumeSpecAndPod(spec, pod) + if err != nil { + glog.Warningf("failed to create mounter: %v", spec) + return "", err + } + realDevicePath, err := attacher.manager.AttachDisk(*mounter) + if err != nil { + return "", err + } + glog.V(3).Infof("rbd: successfully wait for attach volume (spec: %s, pool: %s, image: %s) at %s", spec.Name(), mounter.Pool, mounter.Image, realDevicePath) + return realDevicePath, nil +} + +// GetDeviceMountPath implements Attacher.GetDeviceMountPath. +func (attacher *rbdAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) { + img, err := getVolumeSourceImage(spec) + if err != nil { + return "", err + } + pool, err := getVolumeSourcePool(spec) + if err != nil { + return "", err + } + return makePDNameInternal(attacher.plugin.host, pool, img), nil +} + +// MountDevice implements Attacher.MountDevice. It is called by the kubelet to +// mount device at the given mount path. +// This method is idempotent, callers are responsible for retrying on failure. +func (attacher *rbdAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error { + glog.V(4).Infof("rbd: mouting device %s to %s", devicePath, deviceMountPath) + notMnt, err := attacher.mounter.IsLikelyNotMountPoint(deviceMountPath) + if err != nil { + if os.IsNotExist(err) { + if err := os.MkdirAll(deviceMountPath, 0750); err != nil { + return err + } + notMnt = true + } else { + return err + } + } + if !notMnt { + return nil + } + fstype, err := getVolumeSourceFSType(spec) + if err != nil { + return err + } + ro, err := getVolumeSourceReadOnly(spec) + if err != nil { + return err + } + options := []string{} + if ro { + options = append(options, "ro") + } + mountOptions := volume.MountOptionFromSpec(spec, options...) + err = attacher.mounter.FormatAndMount(devicePath, deviceMountPath, fstype, mountOptions) + if err != nil { + os.Remove(deviceMountPath) + return fmt.Errorf("rbd: failed to mount device %s at %s (fstype: %s), error %v", devicePath, deviceMountPath, fstype, err) + } + glog.V(3).Infof("rbd: successfully mount device %s at %s (fstype: %s)", devicePath, deviceMountPath, fstype) + return nil +} + +// rbdDetacher implements volume.Detacher interface. +type rbdDetacher struct { + plugin *rbdPlugin + manager diskManager + mounter *mount.SafeFormatAndMount +} + +var _ volume.Detacher = &rbdDetacher{} + +// UnmountDevice implements Detacher.UnmountDevice. It unmounts the global +// mount of the RBD image. This is called once all bind mounts have been +// unmounted. +// Internally, it does four things: +// - Unmount device from deviceMountPath. +// - Detach device from the node. +// - Remove lock if found. (No need to check volume readonly or not, because +// device is not on the node anymore, it's safe to remove lock.) +// - Remove the deviceMountPath at last. +// This method is idempotent, callers are responsible for retrying on failure. +func (detacher *rbdDetacher) UnmountDevice(deviceMountPath string) error { + if pathExists, pathErr := volutil.PathExists(deviceMountPath); pathErr != nil { + return fmt.Errorf("Error checking if path exists: %v", pathErr) + } else if !pathExists { + glog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath) + return nil + } + devicePath, cnt, err := mount.GetDeviceNameFromMount(detacher.mounter, deviceMountPath) + if err != nil { + return err + } + if cnt > 1 { + return fmt.Errorf("rbd: more than 1 reference counts at %s", deviceMountPath) + } + if cnt == 1 { + // Unmount the device from the device mount point. + glog.V(4).Infof("rbd: unmouting device mountpoint %s", deviceMountPath) + if err = detacher.mounter.Unmount(deviceMountPath); err != nil { + return err + } + glog.V(3).Infof("rbd: successfully umount device mountpath %s", deviceMountPath) + } + glog.V(4).Infof("rbd: detaching device %s", devicePath) + err = detacher.manager.DetachDisk(detacher.plugin, deviceMountPath, devicePath) + if err != nil { + return err + } + glog.V(3).Infof("rbd: successfully detach device %s", devicePath) + err = os.Remove(deviceMountPath) + if err != nil { + return err + } + glog.V(3).Infof("rbd: successfully remove device mount point %s", deviceMountPath) + return nil +} + +// Detach implements Detacher.Detach. +func (detacher *rbdDetacher) Detach(volumeName string, nodeName types.NodeName) error { + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go b/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go index d219d62ab1b1..87607370448b 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go @@ -23,33 +23,47 @@ limitations under the License. package rbd import ( + "fmt" "os" "github.com/golang/glog" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" ) // Abstract interface to disk operations. type diskManager interface { + // MakeGlobalPDName creates global persistent disk path. MakeGlobalPDName(disk rbd) string // Attaches the disk to the kubelet's host machine. - AttachDisk(disk rbdMounter) error + // If it successfully attaches, the path to the device + // is returned. Otherwise, an error will be returned. + AttachDisk(disk rbdMounter) (string, error) // Detaches the disk from the kubelet's host machine. - DetachDisk(disk rbdUnmounter, mntPath string) error - // Creates a rbd image - CreateImage(provisioner *rbdVolumeProvisioner) (r *v1.RBDVolumeSource, volumeSizeGB int, err error) - // Deletes a rbd image + DetachDisk(plugin *rbdPlugin, deviceMountPath string, device string) error + // Creates a rbd image. + CreateImage(provisioner *rbdVolumeProvisioner) (r *v1.RBDPersistentVolumeSource, volumeSizeGB int, err error) + // Deletes a rbd image. DeleteImage(deleter *rbdVolumeDeleter) error + // Expands a rbd image + ExpandImage(expander *rbdVolumeExpander, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) } // utility to mount a disk based filesystem func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount.Interface, fsGroup *int64) error { globalPDPath := manager.MakeGlobalPDName(*b.rbd) - // TODO: handle failed mounts here. - notMnt, err := mounter.IsLikelyNotMountPoint(volPath) + notMnt, err := mounter.IsLikelyNotMountPoint(globalPDPath) + if err != nil && !os.IsNotExist(err) { + glog.Errorf("cannot validate mountpoint: %s", globalPDPath) + return err + } + if notMnt { + return fmt.Errorf("no device is mounted at %s", globalPDPath) + } + notMnt, err = mounter.IsLikelyNotMountPoint(volPath) if err != nil && !os.IsNotExist(err) { glog.Errorf("cannot validate mountpoint: %s", volPath) return err @@ -57,10 +71,6 @@ func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount. if !notMnt { return nil } - if err := manager.AttachDisk(b); err != nil { - glog.Errorf("failed to attach disk") - return err - } if err := os.MkdirAll(volPath, 0750); err != nil { glog.Errorf("failed to mkdir:%s", volPath) @@ -89,43 +99,31 @@ func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount. // utility to tear down a disk based filesystem func diskTearDown(manager diskManager, c rbdUnmounter, volPath string, mounter mount.Interface) error { notMnt, err := mounter.IsLikelyNotMountPoint(volPath) - if err != nil { - glog.Errorf("cannot validate mountpoint %s", volPath) + if err != nil && !os.IsNotExist(err) { + glog.Errorf("cannot validate mountpoint: %s", volPath) return err } if notMnt { + glog.V(3).Infof("volume path %s is not a mountpoint, deleting", volPath) return os.Remove(volPath) } - refs, err := mount.GetMountRefs(mounter, volPath) - if err != nil { - glog.Errorf("failed to get reference count %s", volPath) - return err - } + // Unmount the bind-mount inside this pod. if err := mounter.Unmount(volPath); err != nil { glog.Errorf("failed to umount %s", volPath) return err } - // If len(refs) is 1, then all bind mounts have been removed, and the - // remaining reference is the global mount. It is safe to detach. - if len(refs) == 1 { - mntPath := refs[0] - if err := manager.DetachDisk(c, mntPath); err != nil { - glog.Errorf("failed to detach disk from %s", mntPath) - return err - } - } notMnt, mntErr := mounter.IsLikelyNotMountPoint(volPath) - if mntErr != nil { + if err != nil && !os.IsNotExist(err) { glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if notMnt { if err := os.Remove(volPath); err != nil { + glog.V(2).Info("Error removing mountpoint ", volPath, ": ", err) return err } } return nil - } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go b/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go index cc345b22d70b..fcdb4fa5ee2a 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd.go @@ -41,9 +41,10 @@ var ( // This is the primary entrypoint for volume plugins. func ProbeVolumePlugins() []volume.VolumePlugin { - return []volume.VolumePlugin{&rbdPlugin{nil}} + return []volume.VolumePlugin{&rbdPlugin{}} } +// rbdPlugin implements Volume.VolumePlugin. type rbdPlugin struct { host volume.VolumeHost } @@ -52,6 +53,8 @@ var _ volume.VolumePlugin = &rbdPlugin{} var _ volume.PersistentVolumePlugin = &rbdPlugin{} var _ volume.DeletableVolumePlugin = &rbdPlugin{} var _ volume.ProvisionableVolumePlugin = &rbdPlugin{} +var _ volume.AttachableVolumePlugin = &rbdPlugin{} +var _ volume.ExpandableVolumePlugin = &rbdPlugin{} const ( rbdPluginName = "kubernetes.io/rbd" @@ -78,15 +81,19 @@ func (plugin *rbdPlugin) GetPluginName() string { } func (plugin *rbdPlugin) GetVolumeName(spec *volume.Spec) (string, error) { - volumeSource, _, err := getVolumeSource(spec) + mon, err := getVolumeSourceMonitors(spec) + if err != nil { + return "", err + } + img, err := getVolumeSourceImage(spec) if err != nil { return "", err } return fmt.Sprintf( "%v:%v", - volumeSource.CephMonitors, - volumeSource.RBDImage), nil + mon, + img), nil } func (plugin *rbdPlugin) CanSupport(spec *volume.Spec) bool { @@ -116,77 +123,223 @@ func (plugin *rbdPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode { } } -func (plugin *rbdPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { - var secret string +type rbdVolumeExpander struct { + *rbdMounter +} + +func (plugin *rbdPlugin) getAdminAndSecret(spec *volume.Spec) (string, string, error) { + class, err := volutil.GetClassForVolume(plugin.host.GetKubeClient(), spec.PersistentVolume) + if err != nil { + return "", "", err + } + adminSecretName := "" + adminSecretNamespace := rbdDefaultAdminSecretNamespace + admin := "" + + for k, v := range class.Parameters { + switch dstrings.ToLower(k) { + case "adminid": + admin = v + case "adminsecretname": + adminSecretName = v + case "adminsecretnamespace": + adminSecretNamespace = v + } + } + + if admin == "" { + admin = rbdDefaultAdminId + } + secret, err := parsePVSecret(adminSecretNamespace, adminSecretName, plugin.host.GetKubeClient()) + if err != nil { + return admin, "", fmt.Errorf("failed to get admin secret from [%q/%q]: %v", adminSecretNamespace, adminSecretName, err) + } + + return admin, secret, nil +} + +func (plugin *rbdPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) { + if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.RBD == nil { + return oldSize, fmt.Errorf("spec.PersistentVolumeSource.Spec.RBD is nil") + } + + // get admin and secret + admin, secret, err := plugin.getAdminAndSecret(spec) + if err != nil { + return oldSize, err + } + + expander := &rbdVolumeExpander{ + rbdMounter: &rbdMounter{ + rbd: &rbd{ + volName: spec.Name(), + Image: spec.PersistentVolume.Spec.RBD.RBDImage, + Pool: spec.PersistentVolume.Spec.RBD.RBDPool, + plugin: plugin, + manager: &RBDUtil{}, + mounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(plugin.GetPluginName())}, + exec: plugin.host.GetExec(plugin.GetPluginName()), + }, + Mon: spec.PersistentVolume.Spec.RBD.CephMonitors, + adminId: admin, + adminSecret: secret, + }, + } + + expandedSize, err := expander.ResizeImage(oldSize, newSize) + if err != nil { + return oldSize, err + } else { + return expandedSize, nil + } +} + +func (expander *rbdVolumeExpander) ResizeImage(oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) { + return expander.manager.ExpandImage(expander, oldSize, newSize) +} + +func (plugin *rbdPlugin) RequiresFSResize() bool { + return true +} + +func (plugin *rbdPlugin) createMounterFromVolumeSpecAndPod(spec *volume.Spec, pod *v1.Pod) (*rbdMounter, error) { var err error - source, _ := plugin.getRBDVolumeSource(spec) + mon, err := getVolumeSourceMonitors(spec) + if err != nil { + return nil, err + } + img, err := getVolumeSourceImage(spec) + if err != nil { + return nil, err + } + fstype, err := getVolumeSourceFSType(spec) + if err != nil { + return nil, err + } + pool, err := getVolumeSourcePool(spec) + if err != nil { + return nil, err + } + id, err := getVolumeSourceUser(spec) + if err != nil { + return nil, err + } + keyring, err := getVolumeSourceKeyRing(spec) + if err != nil { + return nil, err + } + ro, err := getVolumeSourceReadOnly(spec) + if err != nil { + return nil, err + } - if source.SecretRef != nil { - if secret, err = parsePodSecret(pod, source.SecretRef.Name, plugin.host.GetKubeClient()); err != nil { - glog.Errorf("Couldn't get secret from %v/%v", pod.Namespace, source.SecretRef) + secretName, secretNs, err := getSecretNameAndNamespace(spec, pod.Namespace) + if err != nil { + return nil, err + } + secret := "" + if len(secretName) > 0 && len(secretNs) > 0 { + // if secret is provideded, retrieve it + kubeClient := plugin.host.GetKubeClient() + if kubeClient == nil { + return nil, fmt.Errorf("Cannot get kube client") + } + secrets, err := kubeClient.Core().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) + if err != nil { + err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err) return nil, err } + for _, data := range secrets.Data { + secret = string(data) + } } - // Inject real implementations here, test through the internal function. - return plugin.newMounterInternal(spec, pod.UID, &RBDUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()), secret) + return &rbdMounter{ + rbd: newRBD("", spec.Name(), img, pool, ro, plugin, &RBDUtil{}), + Mon: mon, + Id: id, + Keyring: keyring, + Secret: secret, + fsType: fstype, + }, nil } -func (plugin *rbdPlugin) getRBDVolumeSource(spec *volume.Spec) (*v1.RBDVolumeSource, bool) { - // rbd volumes used directly in a pod have a ReadOnly flag set by the pod author. - // rbd volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV - if spec.Volume != nil && spec.Volume.RBD != nil { - return spec.Volume.RBD, spec.Volume.RBD.ReadOnly - } else { - return spec.PersistentVolume.Spec.RBD, spec.ReadOnly +func (plugin *rbdPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { + secretName, secretNs, err := getSecretNameAndNamespace(spec, pod.Namespace) + if err != nil { + return nil, err } + secret := "" + if len(secretName) > 0 && len(secretNs) > 0 { + // if secret is provideded, retrieve it + kubeClient := plugin.host.GetKubeClient() + if kubeClient == nil { + return nil, fmt.Errorf("Cannot get kube client") + } + secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{}) + if err != nil { + err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err) + return nil, err + } + for _, data := range secrets.Data { + secret = string(data) + } + } + + // Inject real implementations here, test through the internal function. + return plugin.newMounterInternal(spec, pod.UID, &RBDUtil{}, secret) } -func (plugin *rbdPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec, secret string) (volume.Mounter, error) { - source, readOnly := plugin.getRBDVolumeSource(spec) - pool := source.RBDPool - id := source.RadosUser - keyring := source.Keyring +func (plugin *rbdPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, secret string) (volume.Mounter, error) { + mon, err := getVolumeSourceMonitors(spec) + if err != nil { + return nil, err + } + img, err := getVolumeSourceImage(spec) + if err != nil { + return nil, err + } + fstype, err := getVolumeSourceFSType(spec) + if err != nil { + return nil, err + } + pool, err := getVolumeSourcePool(spec) + if err != nil { + return nil, err + } + id, err := getVolumeSourceUser(spec) + if err != nil { + return nil, err + } + keyring, err := getVolumeSourceKeyRing(spec) + if err != nil { + return nil, err + } + ro, err := getVolumeSourceReadOnly(spec) + if err != nil { + return nil, err + } return &rbdMounter{ - rbd: &rbd{ - podUID: podUID, - volName: spec.Name(), - Image: source.RBDImage, - Pool: pool, - ReadOnly: readOnly, - manager: manager, - mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, - exec: exec, - plugin: plugin, - MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)), - }, - Mon: source.CephMonitors, + rbd: newRBD(podUID, spec.Name(), img, pool, ro, plugin, manager), + Mon: mon, Id: id, Keyring: keyring, Secret: secret, - fsType: source.FSType, + fsType: fstype, mountOptions: volume.MountOptionFromSpec(spec), }, nil } func (plugin *rbdPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { // Inject real implementations here, test through the internal function. - return plugin.newUnmounterInternal(volName, podUID, &RBDUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName())) + return plugin.newUnmounterInternal(volName, podUID, &RBDUtil{}) } -func (plugin *rbdPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.Unmounter, error) { +func (plugin *rbdPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager) (volume.Unmounter, error) { return &rbdUnmounter{ rbdMounter: &rbdMounter{ - rbd: &rbd{ - podUID: podUID, - volName: volName, - manager: manager, - mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, - exec: exec, - plugin: plugin, - MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)), - }, + rbd: newRBD(podUID, volName, "", "", false, plugin, manager), Mon: make([]string, 0), }, }, nil @@ -208,47 +361,19 @@ func (plugin *rbdPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.RBD == nil { return nil, fmt.Errorf("spec.PersistentVolumeSource.Spec.RBD is nil") } - class, err := volutil.GetClassForVolume(plugin.host.GetKubeClient(), spec.PersistentVolume) + + admin, secret, err := plugin.getAdminAndSecret(spec) if err != nil { return nil, err } - adminSecretName := "" - adminSecretNamespace := rbdDefaultAdminSecretNamespace - admin := "" - for k, v := range class.Parameters { - switch dstrings.ToLower(k) { - case "adminid": - admin = v - case "adminsecretname": - adminSecretName = v - case "adminsecretnamespace": - adminSecretNamespace = v - } - } - - if admin == "" { - admin = rbdDefaultAdminId - } - secret, err := parsePVSecret(adminSecretNamespace, adminSecretName, plugin.host.GetKubeClient()) - if err != nil { - return nil, fmt.Errorf("failed to get admin secret from [%q/%q]: %v", adminSecretNamespace, adminSecretName, err) - } return plugin.newDeleterInternal(spec, admin, secret, &RBDUtil{}) } func (plugin *rbdPlugin) newDeleterInternal(spec *volume.Spec, admin, secret string, manager diskManager) (volume.Deleter, error) { return &rbdVolumeDeleter{ rbdMounter: &rbdMounter{ - rbd: &rbd{ - volName: spec.Name(), - Image: spec.PersistentVolume.Spec.RBD.RBDImage, - Pool: spec.PersistentVolume.Spec.RBD.RBDPool, - manager: manager, - plugin: plugin, - mounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(plugin.GetPluginName())}, - exec: plugin.host.GetExec(plugin.GetPluginName()), - }, + rbd: newRBD("", spec.Name(), spec.PersistentVolume.Spec.RBD.RBDImage, spec.PersistentVolume.Spec.RBD.RBDPool, false, plugin, manager), Mon: spec.PersistentVolume.Spec.RBD.CephMonitors, adminId: admin, adminSecret: secret, @@ -262,22 +387,20 @@ func (plugin *rbdPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Pr func (plugin *rbdPlugin) newProvisionerInternal(options volume.VolumeOptions, manager diskManager) (volume.Provisioner, error) { return &rbdVolumeProvisioner{ rbdMounter: &rbdMounter{ - rbd: &rbd{ - manager: manager, - plugin: plugin, - mounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(plugin.GetPluginName())}, - exec: plugin.host.GetExec(plugin.GetPluginName()), - }, + rbd: newRBD("", "", "", "", false, plugin, manager), }, options: options, }, nil } +// rbdVolumeProvisioner implements volume.Provisioner interface. type rbdVolumeProvisioner struct { *rbdMounter options volume.VolumeOptions } +var _ volume.Provisioner = &rbdVolumeProvisioner{} + func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { if !volume.AccessModesContainedInAll(r.plugin.GetAccessModes(), r.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", r.options.PVC.Spec.AccessModes, r.plugin.GetAccessModes()) @@ -289,8 +412,9 @@ func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { var err error adminSecretName := "" adminSecretNamespace := rbdDefaultAdminSecretNamespace - secretName := "" secret := "" + secretName := "" + secretNamespace := "" imageFormat := rbdImageFormat2 fstype := "" @@ -313,6 +437,8 @@ func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { r.Pool = v case "usersecretname": secretName = v + case "usersecretnamespace": + secretNamespace = v case "imageformat": imageFormat = v case "imagefeatures": @@ -370,8 +496,9 @@ func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { glog.Infof("successfully created rbd image %q", image) pv := new(v1.PersistentVolume) metav1.SetMetaDataAnnotation(&pv.ObjectMeta, volumehelper.VolumeDynamicallyCreatedByKey, "rbd-dynamic-provisioner") - rbd.SecretRef = new(v1.LocalObjectReference) + rbd.SecretRef = new(v1.SecretReference) rbd.SecretRef.Name = secretName + rbd.SecretRef.Namespace = secretNamespace rbd.RadosUser = r.Id rbd.FSType = fstype pv.Spec.PersistentVolumeSource.RBD = rbd @@ -387,10 +514,13 @@ func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { return pv, nil } +// rbdVolumeDeleter implements volume.Deleter interface. type rbdVolumeDeleter struct { *rbdMounter } +var _ volume.Deleter = &rbdVolumeDeleter{} + func (r *rbdVolumeDeleter) GetPath() string { return getPath(r.podUID, r.volName, r.plugin.host) } @@ -399,6 +529,8 @@ func (r *rbdVolumeDeleter) Delete() error { return r.manager.DeleteImage(r) } +// rbd implmenets volume.Volume interface. +// It's embedded in Mounter/Unmounter/Deleter. type rbd struct { volName string podUID types.UID @@ -413,11 +545,35 @@ type rbd struct { volume.MetricsProvider `json:"-"` } +var _ volume.Volume = &rbd{} + func (rbd *rbd) GetPath() string { // safe to use PodVolumeDir now: volume teardown occurs before pod is cleaned up return getPath(rbd.podUID, rbd.volName, rbd.plugin.host) } +// newRBD creates a new rbd. +func newRBD(podUID types.UID, volName string, image string, pool string, readOnly bool, plugin *rbdPlugin, manager diskManager) *rbd { + return &rbd{ + podUID: podUID, + volName: volName, + Image: image, + Pool: pool, + ReadOnly: readOnly, + plugin: plugin, + mounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host), + exec: plugin.host.GetExec(plugin.GetPluginName()), + manager: manager, + MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)), + } +} + +// rbdMounter implements volume.Mounter interface. +// It contains information which need to be persisted in whole life cycle of PV +// on the node. It is persisted at the very beginning in the pod mount point +// directory. +// Note: Capitalized field names of this struct determines the information +// persisted on the disk, DO NOT change them. (TODO: refactoring to use a dedicated struct?) type rbdMounter struct { *rbd // capitalized so they can be exported in persistRBD() @@ -456,14 +612,16 @@ func (b *rbdMounter) SetUp(fsGroup *int64) error { func (b *rbdMounter) SetUpAt(dir string, fsGroup *int64) error { // diskSetUp checks mountpoints and prevent repeated calls - glog.V(4).Infof("rbd: attempting to SetUp and mount %s", dir) + glog.V(4).Infof("rbd: attempting to setup at %s", dir) err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup) if err != nil { - glog.Errorf("rbd: failed to setup mount %s %v", dir, err) + glog.Errorf("rbd: failed to setup at %s %v", dir, err) } + glog.V(3).Infof("rbd: successfully setup at %s", dir) return err } +// rbdUnmounter implements volume.Unmounter interface. type rbdUnmounter struct { *rbdMounter } @@ -477,25 +635,98 @@ func (c *rbdUnmounter) TearDown() error { } func (c *rbdUnmounter) TearDownAt(dir string) error { + glog.V(4).Infof("rbd: attempting to teardown at %s", dir) if pathExists, pathErr := volutil.PathExists(dir); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) return nil } - return diskTearDown(c.manager, *c, dir, c.mounter) + err := diskTearDown(c.manager, *c, dir, c.mounter) + if err != nil { + return err + } + glog.V(3).Infof("rbd: successfully teardown at %s", dir) + return nil } -func getVolumeSource( - spec *volume.Spec) (*v1.RBDVolumeSource, bool, error) { +func getVolumeSourceMonitors(spec *volume.Spec) ([]string, error) { if spec.Volume != nil && spec.Volume.RBD != nil { - return spec.Volume.RBD, spec.Volume.RBD.ReadOnly, nil + return spec.Volume.RBD.CephMonitors, nil } else if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.RBD != nil { - return spec.PersistentVolume.Spec.RBD, spec.ReadOnly, nil + return spec.PersistentVolume.Spec.RBD.CephMonitors, nil } - return nil, false, fmt.Errorf("Spec does not reference a RBD volume type") + return nil, fmt.Errorf("Spec does not reference a RBD volume type") +} + +func getVolumeSourceImage(spec *volume.Spec) (string, error) { + if spec.Volume != nil && spec.Volume.RBD != nil { + return spec.Volume.RBD.RBDImage, nil + } else if spec.PersistentVolume != nil && + spec.PersistentVolume.Spec.RBD != nil { + return spec.PersistentVolume.Spec.RBD.RBDImage, nil + } + + return "", fmt.Errorf("Spec does not reference a RBD volume type") +} + +func getVolumeSourceFSType(spec *volume.Spec) (string, error) { + if spec.Volume != nil && spec.Volume.RBD != nil { + return spec.Volume.RBD.FSType, nil + } else if spec.PersistentVolume != nil && + spec.PersistentVolume.Spec.RBD != nil { + return spec.PersistentVolume.Spec.RBD.FSType, nil + } + + return "", fmt.Errorf("Spec does not reference a RBD volume type") +} + +func getVolumeSourcePool(spec *volume.Spec) (string, error) { + if spec.Volume != nil && spec.Volume.RBD != nil { + return spec.Volume.RBD.RBDPool, nil + } else if spec.PersistentVolume != nil && + spec.PersistentVolume.Spec.RBD != nil { + return spec.PersistentVolume.Spec.RBD.RBDPool, nil + } + + return "", fmt.Errorf("Spec does not reference a RBD volume type") +} + +func getVolumeSourceUser(spec *volume.Spec) (string, error) { + if spec.Volume != nil && spec.Volume.RBD != nil { + return spec.Volume.RBD.RadosUser, nil + } else if spec.PersistentVolume != nil && + spec.PersistentVolume.Spec.RBD != nil { + return spec.PersistentVolume.Spec.RBD.RadosUser, nil + } + + return "", fmt.Errorf("Spec does not reference a RBD volume type") +} + +func getVolumeSourceKeyRing(spec *volume.Spec) (string, error) { + if spec.Volume != nil && spec.Volume.RBD != nil { + return spec.Volume.RBD.Keyring, nil + } else if spec.PersistentVolume != nil && + spec.PersistentVolume.Spec.RBD != nil { + return spec.PersistentVolume.Spec.RBD.Keyring, nil + } + + return "", fmt.Errorf("Spec does not reference a RBD volume type") +} + +func getVolumeSourceReadOnly(spec *volume.Spec) (bool, error) { + if spec.Volume != nil && spec.Volume.RBD != nil { + return spec.Volume.RBD.ReadOnly, nil + } else if spec.PersistentVolume != nil && + spec.PersistentVolume.Spec.RBD != nil { + // rbd volumes used as a PersistentVolume gets the ReadOnly flag indirectly through + // the persistent-claim volume used to mount the PV + return spec.ReadOnly, nil + } + + return false, fmt.Errorf("Spec does not reference a RBD volume type") } func parsePodSecret(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (string, error) { @@ -531,3 +762,26 @@ func parseSecretMap(secretMap map[string]string) (string, error) { // If not found, the last secret in the map wins as done before return secret, nil } + +func getSecretNameAndNamespace(spec *volume.Spec, defaultNamespace string) (string, string, error) { + if spec.Volume != nil && spec.Volume.RBD != nil { + localSecretRef := spec.Volume.RBD.SecretRef + if localSecretRef != nil { + return localSecretRef.Name, defaultNamespace, nil + } + return "", "", nil + + } else if spec.PersistentVolume != nil && + spec.PersistentVolume.Spec.RBD != nil { + secretRef := spec.PersistentVolume.Spec.RBD.SecretRef + secretNs := defaultNamespace + if secretRef != nil { + if len(secretRef.Namespace) != 0 { + secretNs = secretRef.Namespace + } + return secretRef.Name, secretNs, nil + } + return "", "", nil + } + return "", "", fmt.Errorf("Spec does not reference an RBD volume type") +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go b/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go index 1f06f16a4e8d..15b4b52ee663 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/rbd/rbd_util.go @@ -23,27 +23,35 @@ package rbd import ( "encoding/json" - "errors" "fmt" "io/ioutil" "math/rand" "os" + "os/exec" "path" "regexp" + "strconv" "strings" "time" "github.com/golang/glog" "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/apimachinery/pkg/api/resource" + fileutil "k8s.io/kubernetes/pkg/util/file" "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/volume" + volutil "k8s.io/kubernetes/pkg/volume/util" ) const ( imageWatcherStr = "watcher=" + imageSizeStr = "size " + sizeDivStr = " MB in" kubeLockMagic = "kubelet_lock_magic_" - rbdCmdErr = "executable file not found in $PATH" +) + +var ( + clientKubeLockMagicRe = regexp.MustCompile("client.* " + kubeLockMagic + ".*") ) // search /sys/bus for rbd device that matches given pool and image @@ -107,18 +115,26 @@ func makePDNameInternal(host volume.VolumeHost, pool string, image string) strin return path.Join(host.GetPluginDir(rbdPluginName), "rbd", pool+"-image-"+image) } +// RBDUtil implements diskManager interface. type RBDUtil struct{} +var _ diskManager = &RBDUtil{} + func (util *RBDUtil) MakeGlobalPDName(rbd rbd) string { return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image) } + func rbdErrors(runErr, resultErr error) error { - if runErr.Error() == rbdCmdErr { - return fmt.Errorf("rbd: rbd cmd not found") + if err, ok := runErr.(*exec.Error); ok { + if err.Err == exec.ErrNotFound { + return fmt.Errorf("rbd: rbd cmd not found") + } } return resultErr } +// rbdLock acquires a lock on image if lock is true, otherwise releases if a +// lock is found on image. func (util *RBDUtil) rbdLock(b rbdMounter, lock bool) error { var err error var output, locker string @@ -167,19 +183,25 @@ func (util *RBDUtil) rbdLock(b rbdMounter, lock bool) error { return nil } // clean up orphaned lock if no watcher on the image - used, statusErr := util.rbdStatus(&b) - if statusErr == nil && !used { - re := regexp.MustCompile("client.* " + kubeLockMagic + ".*") - locks := re.FindAllStringSubmatch(output, -1) - for _, v := range locks { - if len(v) > 0 { - lockInfo := strings.Split(v[0], " ") - if len(lockInfo) > 2 { - args := []string{"lock", "remove", b.Image, lockInfo[1], lockInfo[0], "--pool", b.Pool, "--id", b.Id, "-m", mon} - args = append(args, secret_opt...) - cmd, err = b.exec.Run("rbd", args...) - glog.Infof("remove orphaned locker %s from client %s: err %v, output: %s", lockInfo[1], lockInfo[0], err, string(cmd)) - } + used, rbdOutput, statusErr := util.rbdStatus(&b) + if statusErr != nil { + return fmt.Errorf("rbdStatus failed error %v, rbd output: %v", statusErr, rbdOutput) + } + if used { + // this image is already used by a node other than this node + return fmt.Errorf("rbd image: %s/%s is already used by a node other than this node, rbd output: %v", b.Image, b.Pool, output) + } + + // best effort clean up orphaned locked if not used + locks := clientKubeLockMagicRe.FindAllStringSubmatch(output, -1) + for _, v := range locks { + if len(v) > 0 { + lockInfo := strings.Split(v[0], " ") + if len(lockInfo) > 2 { + args := []string{"lock", "remove", b.Image, lockInfo[1], lockInfo[0], "--pool", b.Pool, "--id", b.Id, "-m", mon} + args = append(args, secret_opt...) + cmd, err = b.exec.Run("rbd", args...) + glog.Infof("remove orphaned locker %s from client %s: err %v, rbd output: %s", lockInfo[1], lockInfo[0], err, string(cmd)) } } } @@ -188,6 +210,9 @@ func (util *RBDUtil) rbdLock(b rbdMounter, lock bool) error { args := []string{"lock", "add", b.Image, lock_id, "--pool", b.Pool, "--id", b.Id, "-m", mon} args = append(args, secret_opt...) cmd, err = b.exec.Run("rbd", args...) + if err == nil { + glog.V(4).Infof("rbd: successfully add lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon) + } } else { // defencing, find locker name ind := strings.LastIndex(output, lock_id) - 1 @@ -197,85 +222,38 @@ func (util *RBDUtil) rbdLock(b rbdMounter, lock bool) error { break } } - // remove a lock: rbd lock remove - args := []string{"lock", "remove", b.Image, lock_id, locker, "--pool", b.Pool, "--id", b.Id, "-m", mon} - args = append(args, secret_opt...) - cmd, err = b.exec.Run("rbd", args...) + // remove a lock if found: rbd lock remove + if len(locker) > 0 { + args := []string{"lock", "remove", b.Image, lock_id, locker, "--pool", b.Pool, "--id", b.Id, "-m", mon} + args = append(args, secret_opt...) + cmd, err = b.exec.Run("rbd", args...) + if err == nil { + glog.V(4).Infof("rbd: successfully remove lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon) + } + } } if err == nil { - //lock is acquired + // break if operation succeeds break } } return err } -func (util *RBDUtil) persistRBD(rbd rbdMounter, mnt string) error { - file := path.Join(mnt, "rbd.json") - fp, err := os.Create(file) - if err != nil { - return fmt.Errorf("rbd: create err %s/%s", file, err) - } - defer fp.Close() - - encoder := json.NewEncoder(fp) - if err = encoder.Encode(rbd); err != nil { - return fmt.Errorf("rbd: encode err: %v.", err) - } - - return nil -} - -func (util *RBDUtil) loadRBD(mounter *rbdMounter, mnt string) error { - file := path.Join(mnt, "rbd.json") - fp, err := os.Open(file) - if err != nil { - return fmt.Errorf("rbd: open err %s/%s", file, err) - } - defer fp.Close() - - decoder := json.NewDecoder(fp) - if err = decoder.Decode(mounter); err != nil { - return fmt.Errorf("rbd: decode err: %v.", err) - } - - return nil -} - -func (util *RBDUtil) fencing(b rbdMounter) error { - // no need to fence readOnly - if (&b).GetAttributes().ReadOnly { - return nil - } - return util.rbdLock(b, true) -} - -func (util *RBDUtil) defencing(c rbdUnmounter) error { - // no need to fence readOnly - if c.ReadOnly { - return nil - } - - return util.rbdLock(*c.rbdMounter, false) -} - -func (util *RBDUtil) AttachDisk(b rbdMounter) error { +// AttachDisk attaches the disk on the node. +// If Volume is not read-only, acquire a lock on image first. +func (util *RBDUtil) AttachDisk(b rbdMounter) (string, error) { var err error var output []byte - // create mount point - globalPDPath := b.manager.MakeGlobalPDName(*b.rbd) - notMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath) - // in the first time, the path shouldn't exist and IsLikelyNotMountPoint is expected to get NotExist - if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("rbd: %s failed to check mountpoint", globalPDPath) - } - if !notMnt { - return nil - } - if err = os.MkdirAll(globalPDPath, 0750); err != nil { - return fmt.Errorf("rbd: failed to mkdir %s, error", globalPDPath) + globalPDPath := util.MakeGlobalPDName(*b.rbd) + if pathExists, pathErr := volutil.PathExists(globalPDPath); pathErr != nil { + return "", fmt.Errorf("Error checking if path exists: %v", pathErr) + } else if !pathExists { + if err := os.MkdirAll(globalPDPath, 0750); err != nil { + return "", err + } } devicePath, found := waitForPath(b.Pool, b.Image, 1) @@ -285,16 +263,16 @@ func (util *RBDUtil) AttachDisk(b rbdMounter) error { glog.Warningf("rbd: failed to load rbd kernel module:%v", err) } - // fence off other mappers - if err = util.fencing(b); err != nil { - return rbdErrors(err, fmt.Errorf("rbd: failed to lock image %s (maybe locked by other nodes), error %v", b.Image, err)) + // Currently, we don't acquire advisory lock on image, but for backward + // compatibility, we need to check if the image is being used by nodes running old kubelet. + found, rbdOutput, err := util.rbdStatus(&b) + if err != nil { + return "", fmt.Errorf("error: %v, rbd output: %v", err, rbdOutput) + } + if found { + glog.Infof("rbd image %s/%s is still being used ", b.Pool, b.Image) + return "", fmt.Errorf("rbd image %s/%s is still being used. rbd output: %s", b.Pool, b.Image, rbdOutput) } - // rbd lock remove needs ceph and image config - // but kubelet doesn't get them from apiserver during teardown - // so persit rbd config so upon disk detach, rbd lock can be removed - // since rbd json is persisted in the same local directory that is used as rbd mountpoint later, - // the json file remains invisible during rbd mount and thus won't be removed accidentally. - util.persistRBD(b, globalPDPath) // rbd map l := len(b.Mon) @@ -314,55 +292,86 @@ func (util *RBDUtil) AttachDisk(b rbdMounter) error { if err == nil { break } - glog.V(1).Infof("rbd: map error %v %s", err, string(output)) + glog.V(1).Infof("rbd: map error %v, rbd output: %s", err, string(output)) } if err != nil { - return fmt.Errorf("rbd: map failed %v %s", err, string(output)) + return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", err, string(output)) } devicePath, found = waitForPath(b.Pool, b.Image, 10) if !found { - return errors.New("Could not map image: Timeout after 10s") + return "", fmt.Errorf("Could not map image %s/%s, Timeout after 10s", b.Pool, b.Image) } - glog.V(3).Infof("rbd: successfully map image %s/%s to %s", b.Pool, b.Image, devicePath) - } - - // mount it - if err = b.mounter.FormatAndMount(devicePath, globalPDPath, b.fsType, nil); err != nil { - err = fmt.Errorf("rbd: failed to mount rbd volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err) } - glog.V(3).Infof("rbd: successfully mount image %s/%s at %s", b.Pool, b.Image, globalPDPath) - return err + return devicePath, nil } -func (util *RBDUtil) DetachDisk(c rbdUnmounter, mntPath string) error { - device, cnt, err := mount.GetDeviceNameFromMount(c.mounter, mntPath) +// DetachDisk detaches the disk from the node. +// It detaches device from the node if device is provided, and removes the lock +// if there is persisted RBD info under deviceMountPath. +func (util *RBDUtil) DetachDisk(plugin *rbdPlugin, deviceMountPath string, device string) error { + if len(device) == 0 { + return fmt.Errorf("DetachDisk failed , device is empty") + } + // rbd unmap + exec := plugin.host.GetExec(plugin.GetPluginName()) + output, err := exec.Run("rbd", "unmap", device) if err != nil { - return fmt.Errorf("rbd detach disk: failed to get device from mnt: %s\nError: %v", mntPath, err) + return rbdErrors(err, fmt.Errorf("rbd: failed to unmap device %s, error %v, rbd output: %v", device, err, output)) } - if err = c.mounter.Unmount(mntPath); err != nil { - return fmt.Errorf("rbd detach disk: failed to umount: %s\nError: %v", mntPath, err) + glog.V(3).Infof("rbd: successfully unmap device %s", device) + + // Currently, we don't persist rbd info on the disk, but for backward + // compatbility, we need to clean it if found. + rbdFile := path.Join(deviceMountPath, "rbd.json") + exists, err := fileutil.FileExists(rbdFile) + if err != nil { + return err } - glog.V(3).Infof("rbd: successfully umount mountpoint %s", mntPath) - // if device is no longer used, see if can unmap - if cnt <= 1 { - // rbd unmap - _, err = c.exec.Run("rbd", "unmap", device) + if exists { + glog.V(3).Infof("rbd: old rbd.json is found under %s, cleaning it", deviceMountPath) + err = util.cleanOldRBDFile(plugin, rbdFile) if err != nil { - return rbdErrors(err, fmt.Errorf("rbd: failed to unmap device %s:Error: %v", device, err)) + glog.Errorf("rbd: failed to clean %s", rbdFile) + return err } + glog.V(3).Infof("rbd: successfully remove %s", rbdFile) + } + return nil +} - // load ceph and image/pool info to remove fencing - if err := util.loadRBD(c.rbdMounter, mntPath); err == nil { - // remove rbd lock - util.defencing(c) - } +// cleanOldRBDFile read rbd info from rbd.json file and removes lock if found. +// At last, it removes rbd.json file. +func (util *RBDUtil) cleanOldRBDFile(plugin *rbdPlugin, rbdFile string) error { + mounter := &rbdMounter{ + // util.rbdLock needs it to run command. + rbd: newRBD("", "", "", "", false, plugin, util), + } + fp, err := os.Open(rbdFile) + if err != nil { + return fmt.Errorf("rbd: open err %s/%s", rbdFile, err) + } + defer fp.Close() - glog.V(3).Infof("rbd: successfully unmap device %s", device) + decoder := json.NewDecoder(fp) + if err = decoder.Decode(mounter); err != nil { + return fmt.Errorf("rbd: decode err: %v.", err) } - return nil + + if err != nil { + glog.Errorf("failed to load rbd info from %s: %v", rbdFile, err) + return err + } + // remove rbd lock if found + // the disk is not attached to this node anymore, so the lock on image + // for this node can be removed safely + err = util.rbdLock(*mounter, false) + if err == nil { + os.Remove(rbdFile) + } + return err } -func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDVolumeSource, size int, err error) { +func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDPersistentVolumeSource, size int, err error) { var output []byte capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] volSizeBytes := capacity.Value() @@ -400,7 +409,7 @@ func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDVolumeSource return nil, 0, fmt.Errorf("failed to create rbd image: %v, command output: %s", err, string(output)) } - return &v1.RBDVolumeSource{ + return &v1.RBDPersistentVolumeSource{ CephMonitors: p.rbdMounter.Mon, RBDImage: p.rbdMounter.Image, RBDPool: p.rbdMounter.Pool, @@ -409,13 +418,13 @@ func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDVolumeSource func (util *RBDUtil) DeleteImage(p *rbdVolumeDeleter) error { var output []byte - found, err := util.rbdStatus(p.rbdMounter) + found, rbdOutput, err := util.rbdStatus(p.rbdMounter) if err != nil { - return err + return fmt.Errorf("error %v, rbd output: %v", err, rbdOutput) } if found { glog.Info("rbd is still being used ", p.rbdMounter.Image) - return fmt.Errorf("rbd %s is still being used", p.rbdMounter.Image) + return fmt.Errorf("rbd image %s/%s is still being used, rbd output: %v", p.rbdMounter.Pool, p.rbdMounter.Image, rbdOutput) } // rbd rm l := len(p.rbdMounter.Mon) @@ -433,44 +442,188 @@ func (util *RBDUtil) DeleteImage(p *rbdVolumeDeleter) error { glog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output)) } } - return err + return fmt.Errorf("error %v, rbd output: %v", err, string(output)) } -// run rbd status command to check if there is watcher on the image -func (util *RBDUtil) rbdStatus(b *rbdMounter) (bool, error) { +// ExpandImage runs rbd resize command to resize the specified image +func (util *RBDUtil) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) { + var output []byte + var err error + volSizeBytes := newSize.Value() + // convert to MB that rbd defaults on + sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024)) + newVolSz := fmt.Sprintf("%d", sz) + newSizeQuant := resource.MustParse(fmt.Sprintf("%dMi", sz)) + + // check the current size of rbd image, if equals to or greater that the new request size, do nothing + curSize, infoErr := util.rbdInfo(rbdExpander.rbdMounter) + if infoErr != nil { + return oldSize, fmt.Errorf("rbd info failed, error: %v", infoErr) + } + if curSize >= sz { + return newSizeQuant, nil + } + + // rbd resize + l := len(rbdExpander.rbdMounter.Mon) + // pick a mon randomly + start := rand.Int() % l + // iterate all monitors until resize succeeds. + for i := start; i < start+l; i++ { + mon := rbdExpander.rbdMounter.Mon[i%l] + glog.V(4).Infof("rbd: resize %s using mon %s, pool %s id %s key %s", rbdExpander.rbdMounter.Image, mon, rbdExpander.rbdMounter.Pool, rbdExpander.rbdMounter.adminId, rbdExpander.rbdMounter.adminSecret) + output, err = rbdExpander.exec.Run("rbd", + "resize", rbdExpander.rbdMounter.Image, "--size", newVolSz, "--pool", rbdExpander.rbdMounter.Pool, "--id", rbdExpander.rbdMounter.adminId, "-m", mon, "--key="+rbdExpander.rbdMounter.adminSecret) + if err == nil { + return newSizeQuant, nil + } else { + glog.Errorf("failed to resize rbd image: %v, command output: %s", err, string(output)) + } + } + return oldSize, err +} + +// rbdInfo runs `rbd info` command to get the current image size in MB +func (util *RBDUtil) rbdInfo(b *rbdMounter) (int, error) { var err error var output string var cmd []byte + // If we don't have admin id/secret (e.g. attaching), fallback to user id/secret. + id := b.adminId + secret := b.adminSecret + if id == "" { + id = b.Id + secret = b.Secret + } + l := len(b.Mon) start := rand.Int() % l - // iterate all hosts until mount succeeds. + // iterate all hosts until rbd command succeeds. + for i := start; i < start+l; i++ { + mon := b.Mon[i%l] + // cmd "rbd info" get the image info with the following output: + // + // # image exists (exit=0) + // rbd info volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08 + // size 1024 MB in 256 objects + // order 22 (4096 kB objects) + // block_name_prefix: rbd_data.1253ac238e1f29 + // format: 2 + // ... + // + // rbd info volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08 --format json + // {"name":"volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08","size":1073741824,"objects":256,"order":22,"object_size":4194304,"block_name_prefix":"rbd_data.1253ac238e1f29","format":2,"features":["layering","exclusive-lock","object-map","fast-diff","deep-flatten"],"flags":[]} + // + // + // # image does not exist (exit=2) + // rbd: error opening image 1234: (2) No such file or directory + // + glog.V(4).Infof("rbd: info %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret) + cmd, err = b.exec.Run("rbd", + "info", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret) + output = string(cmd) + + // break if command succeeds + if err == nil { + break + } + + if err, ok := err.(*exec.Error); ok { + if err.Err == exec.ErrNotFound { + glog.Errorf("rbd cmd not found") + // fail fast if command not found + return 0, err + } + } + } + + // If command never succeed, returns its last error. + if err != nil { + return 0, err + } + + if len(output) == 0 { + return 0, fmt.Errorf("can not get image size info %s: %s", b.Image, output) + } + + // get the size value string, just between `size ` and ` MB in`, such as `size 1024 MB in 256 objects` + sizeIndex := strings.Index(output, imageSizeStr) + divIndex := strings.Index(output, sizeDivStr) + if sizeIndex == -1 || divIndex == -1 || divIndex <= sizeIndex+5 { + return 0, fmt.Errorf("can not get image size info %s: %s", b.Image, output) + } + rbdSizeStr := output[sizeIndex+5 : divIndex] + rbdSize, err := strconv.Atoi(rbdSizeStr) + if err != nil { + return 0, fmt.Errorf("can not convert size str: %s to int", rbdSizeStr) + } + + return rbdSize, nil +} + +// rbdStatus runs `rbd status` command to check if there is watcher on the image. +func (util *RBDUtil) rbdStatus(b *rbdMounter) (bool, string, error) { + var err error + var output string + var cmd []byte + + // If we don't have admin id/secret (e.g. attaching), fallback to user id/secret. + id := b.adminId + secret := b.adminSecret + if id == "" { + id = b.Id + secret = b.Secret + } + + l := len(b.Mon) + start := rand.Int() % l + // iterate all hosts until rbd command succeeds. for i := start; i < start+l; i++ { mon := b.Mon[i%l] // cmd "rbd status" list the rbd client watch with the following output: + // + // # there is a watcher (exit=0) // Watchers: // watcher=10.16.153.105:0/710245699 client.14163 cookie=1 - glog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, b.adminId, b.adminSecret) + // + // # there is no watcher (exit=0) + // Watchers: none + // + // Otherwise, exit is non-zero, for example: + // + // # image does not exist (exit=2) + // rbd: error opening image kubernetes-dynamic-pvc-: (2) No such file or directory + // + glog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret) cmd, err = b.exec.Run("rbd", - "status", b.Image, "--pool", b.Pool, "-m", mon, "--id", b.adminId, "--key="+b.adminSecret) + "status", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret) output = string(cmd) - if err != nil { - if err.Error() == rbdCmdErr { + // break if command succeeds + if err == nil { + break + } + + if err, ok := err.(*exec.Error); ok { + if err.Err == exec.ErrNotFound { glog.Errorf("rbd cmd not found") - } else { - // ignore error code, just checkout output for watcher string - glog.Warningf("failed to execute rbd status on mon %s", mon) + // fail fast if command not found + return false, output, err } } + } - if strings.Contains(output, imageWatcherStr) { - glog.V(4).Infof("rbd: watchers on %s: %s", b.Image, output) - return true, nil - } else { - glog.Warningf("rbd: no watchers on %s", b.Image) - return false, nil - } + // If command never succeed, returns its last error. + if err != nil { + return false, output, err + } + + if strings.Contains(output, imageWatcherStr) { + glog.V(4).Infof("rbd: watchers on %s: %s", b.Image, output) + return true, output, nil + } else { + glog.Warningf("rbd: no watchers on %s", b.Image) + return false, output, nil } - return false, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/BUILD index 6374c05205c0..5c103e6e268d 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/BUILD @@ -13,6 +13,7 @@ go_test( "sio_util_test.go", "sio_volume_test.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/scaleio", library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", @@ -22,6 +23,7 @@ go_test( "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/util/testing:go_default_library", @@ -37,6 +39,7 @@ go_library( "sio_util.go", "sio_volume.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/scaleio", deps = [ "//pkg/util/keymutex:go_default_library", "//pkg/util/mount:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_client.go b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_client.go index 50d7bd11d8b0..18b8ffb5b18f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_client.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_client.go @@ -373,49 +373,25 @@ func (c *sioClient) GetVolumeRefs(volId sioVolumeID) (refs int, err error) { func (c *sioClient) Devs() (map[string]string, error) { volumeMap := make(map[string]string) - // grab the sdc tool output - out, err := c.exec.Run(c.getSdcCmd(), "--query_vols") - if err != nil { - glog.Error(log("sdc --query_vols failed: %v", err)) - return nil, err - } - - // --query_vols output is a heading followed by list of attached vols as follows: - // Retrieve ? volume(s) - // VOL-ID a2b8419300000000 MDM-ID 788d9efb0a8f20cb - // ... - // parse output and store it in a map as map[]volID - // that map is used later to retrieve device path (next section) - result := string(out) - mdmMap := make(map[string]string) - lines := strings.Split(result, "\n") - for _, line := range lines { - //line e.g.: "VOL-ID a2b8419300000000 MDM-ID 788d9efb0a8f20cb" - if strings.HasPrefix(line, "VOL-ID") { - //split[1] = volID; split[3] = mdmID - split := strings.Split(line, " ") - key := fmt.Sprintf("%s-%s", split[3], split[1]) - mdmMap[key] = split[1] - } - } - files, err := c.getSioDiskPaths() if err != nil { return nil, err } for _, f := range files { - // remove emec-vol- prefix to be left with concated mdmID-volID - mdmVolumeID := strings.Replace(f.Name(), "emc-vol-", "", 1) + // split emc-vol-- to pull out volumeID + parts := strings.Split(f.Name(), "-") + if len(parts) != 4 { + return nil, errors.New("unexpected ScaleIO device name format") + } + volumeID := parts[3] devPath, err := filepath.EvalSymlinks(fmt.Sprintf("%s/%s", sioDiskIDPath, f.Name())) if err != nil { glog.Error(log("devicepath-to-volID mapping error: %v", err)) return nil, err } - // map volID to devicePath - if volumeID, ok := mdmMap[mdmVolumeID]; ok { - volumeMap[volumeID] = devPath - } + // map volumeID to devicePath + volumeMap[volumeID] = devPath } return volumeMap, nil } @@ -430,7 +406,7 @@ func (c *sioClient) WaitForAttachedDevice(token string) (string, error) { ticker := time.NewTicker(time.Second) defer ticker.Stop() timer := time.NewTimer(30 * time.Second) - defer ticker.Stop() + defer timer.Stop() for { select { @@ -464,7 +440,7 @@ func (c *sioClient) WaitForDetachedDevice(token string) error { ticker := time.NewTicker(time.Second) defer ticker.Stop() timer := time.NewTimer(30 * time.Second) - defer ticker.Stop() + defer timer.Stop() for { select { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_plugin.go b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_plugin.go index d77587562162..babd840c6b74 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_plugin.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_plugin.go @@ -60,12 +60,11 @@ func (p *sioPlugin) GetPluginName() string { } func (p *sioPlugin) GetVolumeName(spec *volume.Spec) (string, error) { - source, err := getVolumeSourceFromSpec(spec) + attribs, err := getVolumeSourceAttribs(spec) if err != nil { return "", err } - - return source.VolumeName, nil + return attribs.volName, nil } func (p *sioPlugin) CanSupport(spec *volume.Spec) bool { @@ -81,29 +80,33 @@ func (p *sioPlugin) NewMounter( spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { - sioSource, err := getVolumeSourceFromSpec(spec) + + // extract source info from either ScaleIOVolumeSource or ScaleIOPersistentVolumeSource type + attribs, err := getVolumeSourceAttribs(spec) if err != nil { - glog.Error(log("failed to extract ScaleIOVolumeSource from spec: %v", err)) - return nil, err + return nil, errors.New(log("mounter failed to extract volume attributes from spec: %v", err)) + } + + secretName, secretNS, err := getSecretAndNamespaceFromSpec(spec, pod) + if err != nil { + return nil, errors.New(log("failed to get secret name or secretNamespace: %v", err)) } return &sioVolume{ - pod: pod, - spec: spec, - source: sioSource, - namespace: pod.Namespace, - volSpecName: spec.Name(), - volName: sioSource.VolumeName, - podUID: pod.UID, - readOnly: sioSource.ReadOnly, - fsType: sioSource.FSType, - plugin: p, + pod: pod, + spec: spec, + secretName: secretName, + secretNamespace: secretNS, + volSpecName: spec.Name(), + volName: attribs.volName, + podUID: pod.UID, + readOnly: attribs.readOnly, + fsType: attribs.fsType, + plugin: p, }, nil } // NewUnmounter creates a representation of the volume to unmount -// The specName param can be used to carry the namespace value (if needed) using format: -// specName = [nsSep] where the specname is pre-pended with the namespace func (p *sioPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) { glog.V(4).Info(log("Unmounter for %s", specName)) @@ -156,22 +159,25 @@ func (p *sioPlugin) GetAccessModes() []api.PersistentVolumeAccessMode { var _ volume.DeletableVolumePlugin = &sioPlugin{} func (p *sioPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { - sioSource, err := getVolumeSourceFromSpec(spec) + attribs, err := getVolumeSourceAttribs(spec) if err != nil { - glog.Error(log("deleter failed to extract source from spec: %v", err)) + glog.Error(log("deleter failed to extract volume attributes from spec: %v", err)) return nil, err } - namespace := spec.PersistentVolume.Spec.ClaimRef.Namespace + secretName, secretNS, err := getSecretAndNamespaceFromSpec(spec, nil) + if err != nil { + return nil, errors.New(log("failed to get secret name or secretNamespace: %v", err)) + } return &sioVolume{ - spec: spec, - source: sioSource, - namespace: namespace, - volSpecName: spec.Name(), - volName: sioSource.VolumeName, - plugin: p, - readOnly: sioSource.ReadOnly, + spec: spec, + secretName: secretName, + secretNamespace: secretNS, + volSpecName: spec.Name(), + volName: attribs.volName, + plugin: p, + readOnly: attribs.readOnly, }, nil } @@ -189,13 +195,26 @@ func (p *sioPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisi return nil, errors.New("option parameters missing") } - namespace := options.PVC.Namespace + // Supports ref of name of secret a couple of ways: + // options.Parameters["secretRef"] for backward compat, or + // options.Parameters["secretName"] + secretName := configData[confKey.secretName] + if secretName == "" { + secretName = configData["secretName"] + configData[confKey.secretName] = secretName + } + + secretNS := configData[confKey.secretNamespace] + if secretNS == "" { + secretNS = options.PVC.Namespace + } return &sioVolume{ - configData: configData, - plugin: p, - options: options, - namespace: namespace, - volSpecName: options.PVName, + configData: configData, + plugin: p, + options: options, + secretName: secretName, + secretNamespace: secretNS, + volSpecName: options.PVName, }, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_util.go b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_util.go index 3c2f13a3405e..de0c58116657 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_util.go @@ -31,11 +31,17 @@ import ( volutil "k8s.io/kubernetes/pkg/volume/util" ) +type volSourceAttribs struct { + volName, + fsType string + readOnly bool +} + var ( confKey = struct { gateway, sslEnabled, - secretRef, + secretName, system, protectionDomain, storagePool, @@ -47,12 +53,13 @@ var ( readOnly, username, password, - namespace, + secretNamespace, sdcGuid string }{ gateway: "gateway", sslEnabled: "sslEnabled", - secretRef: "secretRef", + secretName: "secretRef", + secretNamespace: "secretNamespace", system: "system", protectionDomain: "protectionDomain", storagePool: "storagePool", @@ -64,7 +71,6 @@ var ( readOnly: "readOnly", username: "username", password: "password", - namespace: "namespace", sdcGuid: "sdcGuid", } sdcGuidLabelName = "scaleio.sdcGuid" @@ -79,23 +85,32 @@ var ( protectionDomainNotProvidedErr = errors.New("ScaleIO protection domain not provided") ) -// mapScaleIOVolumeSource maps attributes from a ScaleIOVolumeSource to config -func mapVolumeSource(config map[string]string, source *api.ScaleIOVolumeSource) { - config[confKey.gateway] = source.Gateway - config[confKey.secretRef] = func() string { - if source.SecretRef != nil { - return string(source.SecretRef.Name) - } - return "" - }() - config[confKey.system] = source.System - config[confKey.volumeName] = source.VolumeName - config[confKey.sslEnabled] = strconv.FormatBool(source.SSLEnabled) - config[confKey.protectionDomain] = source.ProtectionDomain - config[confKey.storagePool] = source.StoragePool - config[confKey.storageMode] = source.StorageMode - config[confKey.fsType] = source.FSType - config[confKey.readOnly] = strconv.FormatBool(source.ReadOnly) +// mapVolumeSpec maps attributes from either ScaleIOVolumeSource or ScaleIOPersistentVolumeSource to config +func mapVolumeSpec(config map[string]string, spec *volume.Spec) { + + if source, err := getScaleIOPersistentVolumeSourceFromSpec(spec); err == nil { + config[confKey.gateway] = source.Gateway + config[confKey.system] = source.System + config[confKey.volumeName] = source.VolumeName + config[confKey.sslEnabled] = strconv.FormatBool(source.SSLEnabled) + config[confKey.protectionDomain] = source.ProtectionDomain + config[confKey.storagePool] = source.StoragePool + config[confKey.storageMode] = source.StorageMode + config[confKey.fsType] = source.FSType + config[confKey.readOnly] = strconv.FormatBool(source.ReadOnly) + } + + if source, err := getScaleIOVolumeSourceFromSpec(spec); err == nil { + config[confKey.gateway] = source.Gateway + config[confKey.system] = source.System + config[confKey.volumeName] = source.VolumeName + config[confKey.sslEnabled] = strconv.FormatBool(source.SSLEnabled) + config[confKey.protectionDomain] = source.ProtectionDomain + config[confKey.storagePool] = source.StoragePool + config[confKey.storageMode] = source.StorageMode + config[confKey.fsType] = source.FSType + config[confKey.readOnly] = strconv.FormatBool(source.ReadOnly) + } //optionals applyConfigDefaults(config) @@ -105,7 +120,7 @@ func validateConfigs(config map[string]string) error { if config[confKey.gateway] == "" { return gatewayNotProvidedErr } - if config[confKey.secretRef] == "" { + if config[confKey.secretName] == "" { return secretRefNotProvidedErr } if config[confKey.system] == "" { @@ -202,7 +217,7 @@ func saveConfig(configName string, data map[string]string) error { // attachSecret loads secret object and attaches to configData func attachSecret(plug *sioPlugin, namespace string, configData map[string]string) error { // load secret - secretRefName := configData[confKey.secretRef] + secretRefName := configData[confKey.secretName] kubeClient := plug.host.GetKubeClient() secretMap, err := volutil.GetSecretForPV(namespace, secretRefName, sioPluginName, kubeClient) if err != nil { @@ -244,8 +259,8 @@ func getSdcGuidLabel(plug *sioPlugin) (string, error) { return label, nil } -// getVolumeSourceFromSpec safely extracts ScaleIOVolumeSource from spec -func getVolumeSourceFromSpec(spec *volume.Spec) (*api.ScaleIOVolumeSource, error) { +// getVolumeSourceFromSpec safely extracts ScaleIOVolumeSource or ScaleIOPersistentVolumeSource from spec +func getVolumeSourceFromSpec(spec *volume.Spec) (interface{}, error) { if spec.Volume != nil && spec.Volume.ScaleIO != nil { return spec.Volume.ScaleIO, nil } @@ -257,6 +272,66 @@ func getVolumeSourceFromSpec(spec *volume.Spec) (*api.ScaleIOVolumeSource, error return nil, fmt.Errorf("ScaleIO not defined in spec") } +func getVolumeSourceAttribs(spec *volume.Spec) (*volSourceAttribs, error) { + attribs := new(volSourceAttribs) + if pvSource, err := getScaleIOPersistentVolumeSourceFromSpec(spec); err == nil { + attribs.volName = pvSource.VolumeName + attribs.fsType = pvSource.FSType + attribs.readOnly = pvSource.ReadOnly + } else if pSource, err := getScaleIOVolumeSourceFromSpec(spec); err == nil { + attribs.volName = pSource.VolumeName + attribs.fsType = pSource.FSType + attribs.readOnly = pSource.ReadOnly + } else { + msg := log("failed to get ScaleIOVolumeSource or ScaleIOPersistentVolumeSource from spec") + glog.Error(msg) + return nil, errors.New(msg) + } + return attribs, nil +} + +func getScaleIOPersistentVolumeSourceFromSpec(spec *volume.Spec) (*api.ScaleIOPersistentVolumeSource, error) { + source, err := getVolumeSourceFromSpec(spec) + if err != nil { + return nil, err + } + if val, ok := source.(*api.ScaleIOPersistentVolumeSource); ok { + return val, nil + } + return nil, fmt.Errorf("spec is not a valid ScaleIOPersistentVolume type") +} + +func getScaleIOVolumeSourceFromSpec(spec *volume.Spec) (*api.ScaleIOVolumeSource, error) { + source, err := getVolumeSourceFromSpec(spec) + if err != nil { + return nil, err + } + if val, ok := source.(*api.ScaleIOVolumeSource); ok { + return val, nil + } + return nil, fmt.Errorf("spec is not a valid ScaleIOVolume type") +} + +func getSecretAndNamespaceFromSpec(spec *volume.Spec, pod *api.Pod) (secretName string, secretNS string, err error) { + if source, err := getScaleIOVolumeSourceFromSpec(spec); err == nil { + secretName = source.SecretRef.Name + if pod != nil { + secretNS = pod.Namespace + } + } else if source, err := getScaleIOPersistentVolumeSourceFromSpec(spec); err == nil { + if source.SecretRef != nil { + secretName = source.SecretRef.Name + secretNS = source.SecretRef.Namespace + if secretNS == "" && pod != nil { + secretNS = pod.Namespace + } + } + } else { + return "", "", errors.New("failed to get ScaleIOVolumeSource or ScaleIOPersistentVolumeSource") + } + return secretName, secretNS, nil +} + func log(msg string, parts ...interface{}) string { return fmt.Sprintf(fmt.Sprintf("scaleio: %s", msg), parts...) } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume.go b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume.go index 85e7fb0b43f9..0d0d35608bd2 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume.go @@ -37,19 +37,19 @@ import ( ) type sioVolume struct { - sioMgr *sioMgr - plugin *sioPlugin - pod *api.Pod - podUID types.UID - spec *volume.Spec - source *api.ScaleIOVolumeSource - namespace string - volSpecName string - volName string - readOnly bool - fsType string - options volume.VolumeOptions - configData map[string]string + sioMgr *sioMgr + plugin *sioPlugin + pod *api.Pod + podUID types.UID + spec *volume.Spec + secretName string + secretNamespace string + volSpecName string + volName string + readOnly bool + fsType string + options volume.VolumeOptions + configData map[string]string volume.MetricsNil } @@ -59,7 +59,6 @@ type sioVolume struct { var _ volume.Volume = &sioVolume{} // GetPath returns the path where the volume will be mounted. -// The volumeName is prefixed with the pod's namespace a - func (v *sioVolume) GetPath() string { return v.plugin.host.GetPodVolumeDir( v.podUID, @@ -128,11 +127,11 @@ func (v *sioVolume) SetUpAt(dir string, fsGroup *int64) error { switch { default: options = append(options, "rw") - case isROM && !v.source.ReadOnly: + case isROM && !v.readOnly: options = append(options, "rw") case isROM: options = append(options, "ro") - case v.source.ReadOnly: + case v.readOnly: options = append(options, "ro") } @@ -327,10 +326,10 @@ func (v *sioVolume) Provision() (*api.PersistentVolume, error) { ), }, PersistentVolumeSource: api.PersistentVolumeSource{ - ScaleIO: &api.ScaleIOVolumeSource{ + ScaleIO: &api.ScaleIOPersistentVolumeSource{ Gateway: v.configData[confKey.gateway], SSLEnabled: sslEnabled, - SecretRef: &api.LocalObjectReference{Name: v.configData[confKey.secretRef]}, + SecretRef: &api.SecretReference{Name: v.secretName, Namespace: v.secretNamespace}, System: v.configData[confKey.system], ProtectionDomain: v.configData[confKey.protectionDomain], StoragePool: v.configData[confKey.storagePool], @@ -366,13 +365,17 @@ func (v *sioVolume) setSioMgr() error { glog.V(4).Info(log("previous config file not found, creating new one")) // prepare config data configData = make(map[string]string) - mapVolumeSource(configData, v.source) + mapVolumeSpec(configData, v.spec) + + // additional config data + configData[confKey.secretNamespace] = v.secretNamespace + configData[confKey.secretName] = v.secretName + configData[confKey.volSpecName] = v.volSpecName + if err := validateConfigs(configData); err != nil { glog.Error(log("config setup failed: %s", err)) return err } - configData[confKey.namespace] = v.namespace - configData[confKey.volSpecName] = v.volSpecName // persist config if err := saveConfig(configName, configData); err != nil { @@ -381,7 +384,7 @@ func (v *sioVolume) setSioMgr() error { } } // merge in secret - if err := attachSecret(v.plugin, v.namespace, configData); err != nil { + if err := attachSecret(v.plugin, v.secretNamespace, configData); err != nil { glog.Error(log("failed to load secret: %v", err)) return err } @@ -414,12 +417,13 @@ func (v *sioVolume) resetSioMgr() error { glog.Error(log("failed to load config data: %v", err)) return err } - v.namespace = configData[confKey.namespace] + v.secretName = configData[confKey.secretName] + v.secretNamespace = configData[confKey.secretNamespace] v.volName = configData[confKey.volumeName] v.volSpecName = configData[confKey.volSpecName] // attach secret - if err := attachSecret(v.plugin, v.namespace, configData); err != nil { + if err := attachSecret(v.plugin, v.secretNamespace, configData); err != nil { glog.Error(log("failed to load secret: %v", err)) return err } @@ -446,21 +450,22 @@ func (v *sioVolume) resetSioMgr() error { func (v *sioVolume) setSioMgrFromConfig() error { glog.V(4).Info(log("setting scaleio mgr from available config")) if v.sioMgr == nil { - configData := v.configData - applyConfigDefaults(configData) - if err := validateConfigs(configData); err != nil { + applyConfigDefaults(v.configData) + + v.configData[confKey.volSpecName] = v.volSpecName + + if err := validateConfigs(v.configData); err != nil { glog.Error(log("config data setup failed: %s", err)) return err } - configData[confKey.namespace] = v.namespace - configData[confKey.volSpecName] = v.volSpecName // copy config and attach secret data := map[string]string{} - for k, v := range configData { + for k, v := range v.configData { data[k] = v } - if err := attachSecret(v.plugin, v.namespace, data); err != nil { + + if err := attachSecret(v.plugin, v.secretNamespace, data); err != nil { glog.Error(log("failed to load secret: %v", err)) return err } @@ -483,16 +488,20 @@ func (v *sioVolume) setSioMgrFromSpec() error { if v.sioMgr == nil { // get config data form spec volume source configData := map[string]string{} - mapVolumeSource(configData, v.source) + mapVolumeSpec(configData, v.spec) + + // additional config + configData[confKey.secretNamespace] = v.secretNamespace + configData[confKey.secretName] = v.secretName + configData[confKey.volSpecName] = v.volSpecName + if err := validateConfigs(configData); err != nil { glog.Error(log("config setup failed: %s", err)) return err } - configData[confKey.namespace] = v.namespace - configData[confKey.volSpecName] = v.volSpecName // attach secret object to config data - if err := attachSecret(v.plugin, v.namespace, configData); err != nil { + if err := attachSecret(v.plugin, v.secretNamespace, configData); err != nil { glog.Error(log("failed to load secret: %v", err)) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/secret/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/secret/BUILD index 7743e788fb31..9fc423f420a8 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/secret/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/secret/BUILD @@ -12,6 +12,7 @@ go_library( "doc.go", "secret.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/secret", deps = [ "//pkg/util/io:go_default_library", "//pkg/util/mount:go_default_library", @@ -29,6 +30,7 @@ go_library( go_test( name = "go_default_test", srcs = ["secret_test.go"], + importpath = "k8s.io/kubernetes/pkg/volume/secret", library = ":go_default_library", deps = [ "//pkg/volume:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/secret/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/secret/OWNERS index 72968b277dad..baaff6ff460f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/secret/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/secret/OWNERS @@ -7,13 +7,7 @@ reviewers: - ivan4th - rata - sjenning -- thockin -- smarterclayton -- brendandburns -- derekwaynecarr -- pmorie - saad-ali -- justinsb - jsafrane - rootfs - jingxu97 diff --git a/vendor/k8s.io/kubernetes/pkg/volume/storageos/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/storageos/BUILD index fa29f8d24a1c..a66bdc936e35 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/storageos/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/storageos/BUILD @@ -13,6 +13,7 @@ go_library( "storageos.go", "storageos_util.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/storageos", deps = [ "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", @@ -36,6 +37,7 @@ go_test( "storageos_test.go", "storageos_util_test.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/storageos", library = ":go_default_library", deps = [ "//pkg/util/mount:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util.go b/vendor/k8s.io/kubernetes/pkg/volume/util.go index b976ce947f56..e28905169806 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util.go @@ -36,7 +36,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" - volutil "k8s.io/kubernetes/pkg/volume/util" ) type RecycleEventRecorder func(eventtype, message string) @@ -48,8 +47,8 @@ type RecycleEventRecorder func(eventtype, message string) // attempted before returning. // // In case there is a pod with the same namespace+name already running, this -// function assumes it's an older instance of the recycler pod and watches -// this old pod instead of starting a new one. +// function deletes it as it is not able to judge if it is an old recycler +// or user has forged a fake recycler to block Kubernetes from recycling.// // // pod - the pod designed by a volume plugin to recycle the volume. pod.Name // will be overwritten with unique name based on PV.Name. @@ -81,20 +80,44 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Po _, err = recyclerClient.CreatePod(pod) if err != nil { if errors.IsAlreadyExists(err) { - glog.V(5).Infof("old recycler pod %q found for volume", pod.Name) + deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace) + if deleteErr != nil { + return fmt.Errorf("failed to delete old recycler pod %s/%s: %s", pod.Namespace, pod.Name, deleteErr) + } + // Recycler will try again and the old pod will be hopefuly deleted + // at that time. + return fmt.Errorf("old recycler pod found, will retry later") } else { return fmt.Errorf("unexpected error creating recycler pod: %+v\n", err) } } - defer func(pod *v1.Pod) { - glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name) - if err := recyclerClient.DeletePod(pod.Name, pod.Namespace); err != nil { - glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err) - } - }(pod) + err = waitForPod(pod, recyclerClient, podCh) + + // In all cases delete the recycler pod and log its result. + glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name) + deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace) + if deleteErr != nil { + glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err) + } + + // Returning recycler error is preferred, the pod will be deleted again on + // the next retry. + if err != nil { + return fmt.Errorf("failed to recycle volume: %s", err) + } - // Now only the old pod or the new pod run. Watch it until it finishes - // and send all events on the pod to the PV + // Recycle succeeded but we failed to delete the recycler pod. Report it, + // the controller will re-try recycling the PV again shortly. + if deleteErr != nil { + return fmt.Errorf("failed to delete recycler pod: %s", deleteErr) + } + + return nil +} + +// waitForPod watches the pod it until it finishes and send all events on the +// pod to the PV. +func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.Event) error { for { event, ok := <-podCh if !ok { @@ -164,15 +187,15 @@ type realRecyclerClient struct { } func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) { - return c.client.Core().Pods(pod.Namespace).Create(pod) + return c.client.CoreV1().Pods(pod.Namespace).Create(pod) } func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) { - return c.client.Core().Pods(namespace).Get(name, metav1.GetOptions{}) + return c.client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) } func (c *realRecyclerClient) DeletePod(name, namespace string) error { - return c.client.Core().Pods(namespace).Delete(name, nil) + return c.client.CoreV1().Pods(namespace).Delete(name, nil) } func (c *realRecyclerClient) Event(eventtype, message string) { @@ -189,13 +212,13 @@ func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan s Watch: true, } - podWatch, err := c.client.Core().Pods(namespace).Watch(options) + podWatch, err := c.client.CoreV1().Pods(namespace).Watch(options) if err != nil { return nil, err } eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name) - eventWatch, err := c.client.Core().Events(namespace).Watch(metav1.ListOptions{ + eventWatch, err := c.client.CoreV1().Events(namespace).Watch(metav1.ListOptions{ FieldSelector: eventSelector.String(), Watch: true, }) @@ -400,13 +423,6 @@ func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) { func UnmountViaEmptyDir(dir string, host VolumeHost, volName string, volSpec Spec, podUID types.UID) error { glog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir) - if pathExists, pathErr := volutil.PathExists(dir); pathErr != nil { - return fmt.Errorf("Error checking if path exists: %v", pathErr) - } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) - return nil - } - // Wrap EmptyDir, let it do the teardown. wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID) if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD index 445cb425fb26..48a0b0ee6bf3 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD @@ -13,10 +13,13 @@ go_library( "device_util.go", "device_util_unsupported.go", "doc.go", + "error.go", + "finalizer.go", "fs_unsupported.go", "io_util.go", "metrics.go", "util.go", + "util_unsupported.go", ] + select({ "@io_bazel_rules_go//go/platform:darwin_amd64": [ "fs.go", @@ -24,12 +27,14 @@ go_library( "@io_bazel_rules_go//go/platform:linux_amd64": [ "device_util_linux.go", "fs.go", + "util_linux.go", ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/volume/util", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/v1/helper:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/util/mount:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -40,6 +45,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", ] + select({ @@ -56,6 +62,7 @@ go_library( go_test( name = "go_default_test", srcs = [ + "finalizer_test.go", "util_test.go", ] + select({ "@io_bazel_rules_go//go/platform:linux_amd64": [ @@ -64,10 +71,12 @@ go_test( ], "//conditions:default": [], }), + importpath = "k8s.io/kubernetes/pkg/volume/util", library = ":go_default_library", deps = [ - "//pkg/api/install:go_default_library", - "//pkg/api/v1/helper:go_default_library", + "//pkg/apis/core/install:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", + "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/util/OWNERS index abea2824a817..7a41ec511a71 100755 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/OWNERS @@ -1,11 +1,7 @@ approvers: - saad-ali reviewers: -- thockin -- smarterclayton -- pmorie - saad-ali -- justinsb - rootfs - jingxu97 - screeley44 diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/error.go b/vendor/k8s.io/kubernetes/pkg/volume/util/error.go new file mode 100644 index 000000000000..2c9655866b04 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/error.go @@ -0,0 +1,41 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + k8stypes "k8s.io/apimachinery/pkg/types" +) + +// This error on attach indicates volume is attached to a different node +// than we expected. +type DanglingAttachError struct { + msg string + CurrentNode k8stypes.NodeName + DevicePath string +} + +func (err *DanglingAttachError) Error() string { + return err.msg +} + +func NewDanglingError(msg string, node k8stypes.NodeName, devicePath string) error { + return &DanglingAttachError{ + msg: msg, + CurrentNode: node, + DevicePath: devicePath, + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go b/vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go new file mode 100644 index 000000000000..846315450604 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go @@ -0,0 +1,68 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "k8s.io/api/core/v1" +) + +const ( + // Name of finalizer on PVCs that have a running pod. + PVCProtectionFinalizer = "kubernetes.io/pvc-protection" +) + +// IsPVCBeingDeleted returns: +// true: in case PVC is being deleted, i.e. ObjectMeta.DeletionTimestamp is set +// false: in case PVC is not being deleted, i.e. ObjectMeta.DeletionTimestamp is nil +func IsPVCBeingDeleted(pvc *v1.PersistentVolumeClaim) bool { + return pvc.ObjectMeta.DeletionTimestamp != nil +} + +// IsProtectionFinalizerPresent returns true in case PVCProtectionFinalizer is +// present among the pvc.Finalizers +func IsProtectionFinalizerPresent(pvc *v1.PersistentVolumeClaim) bool { + for _, finalizer := range pvc.Finalizers { + if finalizer == PVCProtectionFinalizer { + return true + } + } + return false +} + +// RemoveProtectionFinalizer returns pvc without PVCProtectionFinalizer in case +// it's present in pvc.Finalizers. It expects that pvc is writable (i.e. is not +// informer's cached copy.) +func RemoveProtectionFinalizer(pvc *v1.PersistentVolumeClaim) { + newFinalizers := make([]string, 0) + for _, finalizer := range pvc.Finalizers { + if finalizer != PVCProtectionFinalizer { + newFinalizers = append(newFinalizers, finalizer) + } + } + if len(newFinalizers) == 0 { + // Sanitize for unit tests so we don't need to distinguish empty array + // and nil. + newFinalizers = nil + } + pvc.Finalizers = newFinalizers +} + +// AddProtectionFinalizer adds PVCProtectionFinalizer to pvc. It expects that +// pvc is writable (i.e. is not informer's cached copy.) +func AddProtectionFinalizer(pvc *v1.PersistentVolumeClaim) { + pvc.Finalizers = append(pvc.Finalizers, PVCProtectionFinalizer) +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go b/vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go index 087bbfff4169..ab2d76286bb7 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go @@ -24,8 +24,9 @@ import ( var storageOperationMetric = prometheus.NewHistogramVec( prometheus.HistogramOpts{ - Name: "storage_operation_duration_seconds", - Help: "Storage operation duration", + Name: "storage_operation_duration_seconds", + Help: "Storage operation duration", + Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 10, 15, 25, 50}, }, []string{"volume_plugin", "operation_name"}, ) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/BUILD index 60efd00a211d..c0623b47e1d4 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["nestedpendingoperations.go"], + importpath = "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations", deps = [ "//pkg/util/goroutinemap/exponentialbackoff:go_default_library", "//pkg/volume/util/types:go_default_library", @@ -21,6 +22,7 @@ go_library( go_test( name = "go_default_test", srcs = ["nestedpendingoperations_test.go"], + importpath = "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations", library = ":go_default_library", deps = [ "//pkg/volume/util/types:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/BUILD index 647fd569d334..15c2ac27d837 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/BUILD @@ -12,11 +12,13 @@ go_library( "operation_executor.go", "operation_generator.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/util/operationexecutor", deps = [ "//pkg/controller/volume/expand/cache:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/events:go_default_library", "//pkg/util/mount:go_default_library", + "//pkg/util/resizefs:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/nestedpendingoperations:go_default_library", @@ -27,6 +29,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", @@ -36,6 +39,7 @@ go_library( go_test( name = "go_default_test", srcs = ["operation_executor_test.go"], + importpath = "k8s.io/kubernetes/pkg/volume/util/operationexecutor", library = ":go_default_library", deps = [ "//pkg/controller/volume/expand/cache:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go index 8b11358436a6..543067be6b75 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go @@ -29,7 +29,9 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + utilfeature "k8s.io/apiserver/pkg/util/feature" expandcache "k8s.io/kubernetes/pkg/controller/volume/expand/cache" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" @@ -105,6 +107,28 @@ type OperationExecutor interface { // actual state of the world to reflect that. UnmountDevice(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) error + // MapVolume is used when the volumeMode is 'Block'. + // This method creates a symbolic link to the volume from both the pod + // specified in volumeToMount and global map path. + // Specifically it will: + // * Wait for the device to finish attaching (for attachable volumes only). + // * Update actual state of world to reflect volume is globally mounted/mapped. + // * Map volume to global map path using symbolic link. + // * Map the volume to the pod device map path using symbolic link. + // * Update actual state of world to reflect volume is mounted/mapped to the pod path. + MapVolume(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater) error + + // UnmapVolume unmaps symbolic link to the volume from both the pod device + // map path in volumeToUnmount and global map path. + // And then, updates the actual state of the world to reflect that. + UnmapVolume(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) error + + // UnmapDevice checks number of symbolic links under global map path. + // If number of reference is zero, remove global map path directory and + // free a volume for detach. + // It then updates the actual state of the world to reflect that. + UnmapDevice(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) error + // VerifyControllerAttachedVolume checks if the specified volume is present // in the specified nodes AttachedVolumes Status field. It uses kubeClient // to fetch the node object. @@ -139,7 +163,7 @@ func NewOperationExecutor( // state of the world cache after successful mount/unmount. type ActualStateOfWorldMounterUpdater interface { // Marks the specified volume as mounted to the specified pod - MarkVolumeAsMounted(podName volumetypes.UniquePodName, podUID types.UID, volumeName v1.UniqueVolumeName, mounter volume.Mounter, outerVolumeSpecName string, volumeGidValue string) error + MarkVolumeAsMounted(podName volumetypes.UniquePodName, podUID types.UID, volumeName v1.UniqueVolumeName, mounter volume.Mounter, blockVolumeMapper volume.BlockVolumeMapper, outerVolumeSpecName string, volumeGidValue string) error // Marks the specified volume as unmounted from the specified pod MarkVolumeAsUnmounted(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error @@ -495,8 +519,16 @@ type MountedVolume struct { // by kubelet to create container.VolumeMap. Mounter volume.Mounter + // BlockVolumeMapper is the volume mapper used to map this volume. It is required + // by kubelet to create container.VolumeMap. + BlockVolumeMapper volume.BlockVolumeMapper + // VolumeGidValue contains the value of the GID annotation, if present. VolumeGidValue string + + // VolumeSpec is a volume spec containing the specification for the volume + // that should be mounted. + VolumeSpec *volume.Spec } // GenerateMsgDetailed returns detailed msgs for mounted volumes @@ -733,6 +765,68 @@ func (oe *operationExecutor) ExpandVolume(pvcWithResizeRequest *expandcache.PVCW return oe.pendingOperations.Run(uniqueVolumeKey, "", expandFunc, opCompleteFunc) } +func (oe *operationExecutor) MapVolume( + waitForAttachTimeout time.Duration, + volumeToMount VolumeToMount, + actualStateOfWorld ActualStateOfWorldMounterUpdater) error { + mapFunc, plugin, err := oe.operationGenerator.GenerateMapVolumeFunc( + waitForAttachTimeout, volumeToMount, actualStateOfWorld) + if err != nil { + return err + } + + // Avoid executing map from multiple pods referencing the + // same volume in parallel + podName := nestedpendingoperations.EmptyUniquePodName + // TODO: remove this -- not necessary + if !volumeToMount.PluginIsAttachable { + // Non-attachable volume plugins can execute mount for multiple pods + // referencing the same volume in parallel + podName = volumehelper.GetUniquePodName(volumeToMount.Pod) + } + + opCompleteFunc := util.OperationCompleteHook(plugin, "map_volume") + return oe.pendingOperations.Run( + volumeToMount.VolumeName, podName, mapFunc, opCompleteFunc) +} + +func (oe *operationExecutor) UnmapVolume( + volumeToUnmount MountedVolume, + actualStateOfWorld ActualStateOfWorldMounterUpdater) error { + unmapFunc, plugin, err := + oe.operationGenerator.GenerateUnmapVolumeFunc(volumeToUnmount, actualStateOfWorld) + if err != nil { + return err + } + + // All volume plugins can execute unmap for multiple pods referencing the + // same volume in parallel + podName := volumetypes.UniquePodName(volumeToUnmount.PodUID) + + opCompleteFunc := util.OperationCompleteHook(plugin, "unmap_volume") + return oe.pendingOperations.Run( + volumeToUnmount.VolumeName, podName, unmapFunc, opCompleteFunc) +} + +func (oe *operationExecutor) UnmapDevice( + deviceToDetach AttachedVolume, + actualStateOfWorld ActualStateOfWorldMounterUpdater, + mounter mount.Interface) error { + unmapDeviceFunc, plugin, err := + oe.operationGenerator.GenerateUnmapDeviceFunc(deviceToDetach, actualStateOfWorld, mounter) + if err != nil { + return err + } + + // Avoid executing unmap device from multiple pods referencing + // the same volume in parallel + podName := nestedpendingoperations.EmptyUniquePodName + + opCompleteFunc := util.OperationCompleteHook(plugin, "unmap_device") + return oe.pendingOperations.Run( + deviceToDetach.VolumeName, podName, unmapDeviceFunc, opCompleteFunc) +} + func (oe *operationExecutor) VerifyControllerAttachedVolume( volumeToMount VolumeToMount, nodeName types.NodeName, @@ -748,6 +842,200 @@ func (oe *operationExecutor) VerifyControllerAttachedVolume( volumeToMount.VolumeName, "" /* podName */, verifyControllerAttachedVolumeFunc, opCompleteFunc) } +// VolumeStateHandler defines a set of operations for handling mount/unmount/detach/reconstruct volume-related operations +type VolumeStateHandler interface { + // Volume is attached, mount/map it + MountVolumeHandler(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, isRemount bool, remountingLogStr string) error + // Volume is mounted/mapped, unmount/unmap it + UnmountVolumeHandler(mountedVolume MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) error + // Volume is not referenced from pod, unmount/unmap and detach it + UnmountDeviceHandler(attachedVolume AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) error + // Reconstruct volume from mount path + ReconstructVolumeHandler(plugin volume.VolumePlugin, mapperPlugin volume.BlockVolumePlugin, uid types.UID, podName volumetypes.UniquePodName, volumeSpecName string, mountPath string, pluginName string) (*volume.Spec, error) + // check mount path if volume still exists + CheckVolumeExistence(mountPath, volumeName string, mounter mount.Interface, uniqueVolumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName, podUID types.UID, attachable volume.AttachableVolumePlugin) (bool, error) +} + +// NewVolumeHandler return a new instance of volumeHandler depens on a volumeMode +func NewVolumeHandler(volumeSpec *volume.Spec, oe OperationExecutor) (VolumeStateHandler, error) { + + // TODO: remove feature gate check after no longer needed + var volumeHandler VolumeStateHandler + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + volumeMode, err := volumehelper.GetVolumeMode(volumeSpec) + if err != nil { + return nil, err + } + if volumeMode == v1.PersistentVolumeFilesystem { + volumeHandler = NewFilesystemVolumeHandler(oe) + } else { + volumeHandler = NewBlockVolumeHandler(oe) + } + } else { + volumeHandler = NewFilesystemVolumeHandler(oe) + } + return volumeHandler, nil +} + +// NewFilesystemVolumeHandler returns a new instance of FilesystemVolumeHandler. +func NewFilesystemVolumeHandler(operationExecutor OperationExecutor) FilesystemVolumeHandler { + return FilesystemVolumeHandler{ + oe: operationExecutor} +} + +// NewBlockVolumeHandler returns a new instance of BlockVolumeHandler. +func NewBlockVolumeHandler(operationExecutor OperationExecutor) BlockVolumeHandler { + return BlockVolumeHandler{ + oe: operationExecutor} +} + +// FilesystemVolumeHandler is VolumeHandler for Filesystem volume +type FilesystemVolumeHandler struct { + oe OperationExecutor +} + +// BlockVolumeHandler is VolumeHandler for Block volume +type BlockVolumeHandler struct { + oe OperationExecutor +} + +// MountVolumeHandler mount/remount a volume when a volume is attached +// This method is handler for filesystem volume +func (f FilesystemVolumeHandler) MountVolumeHandler(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, isRemount bool, remountingLogStr string) error { + glog.V(12).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.MountVolume", remountingLogStr)) + err := f.oe.MountVolume( + waitForAttachTimeout, + volumeToMount, + actualStateOfWorld, + isRemount) + return err +} + +// UnmountVolumeHandler unmount a volume if a volume is mounted +// This method is handler for filesystem volume +func (f FilesystemVolumeHandler) UnmountVolumeHandler(mountedVolume MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) error { + glog.V(12).Infof(mountedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountVolume", "")) + err := f.oe.UnmountVolume( + mountedVolume, + actualStateOfWorld) + return err +} + +// UnmountDeviceHandler unmount and detach a device if a volume isn't referenced +// This method is handler for filesystem volume +func (f FilesystemVolumeHandler) UnmountDeviceHandler(attachedVolume AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) error { + glog.V(12).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountDevice", "")) + err := f.oe.UnmountDevice( + attachedVolume, + actualStateOfWorld, + mounter) + return err +} + +// ReconstructVolumeHandler create volumeSpec from mount path +// This method is handler for filesystem volume +func (f FilesystemVolumeHandler) ReconstructVolumeHandler(plugin volume.VolumePlugin, _ volume.BlockVolumePlugin, _ types.UID, _ volumetypes.UniquePodName, volumeSpecName string, mountPath string, _ string) (*volume.Spec, error) { + glog.V(12).Infof("Starting operationExecutor.ReconstructVolumepodName") + volumeSpec, err := plugin.ConstructVolumeSpec(volumeSpecName, mountPath) + if err != nil { + return nil, err + } + return volumeSpec, nil +} + +// CheckVolumeExistence checks mount path directory if volume still exists, return true if volume is there +// Also return true for non-attachable volume case without mount point check +// This method is handler for filesystem volume +func (f FilesystemVolumeHandler) CheckVolumeExistence(mountPath, volumeName string, mounter mount.Interface, uniqueVolumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName, podUID types.UID, attachable volume.AttachableVolumePlugin) (bool, error) { + if attachable != nil { + var isNotMount bool + var mountCheckErr error + if isNotMount, mountCheckErr = mounter.IsLikelyNotMountPoint(mountPath); mountCheckErr != nil { + return false, fmt.Errorf("Could not check whether the volume %q (spec.Name: %q) pod %q (UID: %q) is mounted with: %v", + uniqueVolumeName, + volumeName, + podName, + podUID, + mountCheckErr) + } + return !isNotMount, nil + } + return true, nil +} + +// MountVolumeHandler creates a map to device if a volume is attached +// This method is handler for block volume +func (b BlockVolumeHandler) MountVolumeHandler(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, _ bool, _ string) error { + glog.V(12).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.MapVolume", "")) + err := b.oe.MapVolume( + waitForAttachTimeout, + volumeToMount, + actualStateOfWorld) + return err +} + +// UnmountVolumeHandler unmap a volume if a volume is mapped +// This method is handler for block volume +func (b BlockVolumeHandler) UnmountVolumeHandler(mountedVolume MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) error { + glog.V(12).Infof(mountedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmapVolume", "")) + err := b.oe.UnmapVolume( + mountedVolume, + actualStateOfWorld) + return err +} + +// UnmountDeviceHandler detach a device and remove loopback if a volume isn't referenced +// This method is handler for block volume +func (b BlockVolumeHandler) UnmountDeviceHandler(attachedVolume AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) error { + glog.V(12).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmapDevice", "")) + err := b.oe.UnmapDevice( + attachedVolume, + actualStateOfWorld, + mounter) + return err +} + +// ReconstructVolumeHandler create volumeSpec from mount path +// This method is handler for block volume +func (b BlockVolumeHandler) ReconstructVolumeHandler(_ volume.VolumePlugin, mapperPlugin volume.BlockVolumePlugin, uid types.UID, podName volumetypes.UniquePodName, volumeSpecName string, mountPath string, pluginName string) (*volume.Spec, error) { + glog.V(12).Infof("Starting operationExecutor.ReconstructVolume") + if mapperPlugin == nil { + return nil, fmt.Errorf("Could not find block volume plugin %q (spec.Name: %q) pod %q (UID: %q)", + pluginName, + volumeSpecName, + podName, + uid) + } + // mountPath contains volumeName on the path. In the case of block volume, {volumeName} is symbolic link + // corresponding to raw block device. + // ex. mountPath: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} + volumeSpec, err := mapperPlugin.ConstructBlockVolumeSpec(uid, volumeSpecName, mountPath) + if err != nil { + return nil, err + } + return volumeSpec, nil +} + +// CheckVolumeExistence checks mount path directory if volume still exists, then return +// true if volume is there. Either plugin is attachable or non-attachable, the plugin +// should have symbolic link associated to raw block device under pod device map +// if volume exists. +// This method is handler for block volume +func (b BlockVolumeHandler) CheckVolumeExistence(mountPath, volumeName string, mounter mount.Interface, uniqueVolumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName, podUID types.UID, _ volume.AttachableVolumePlugin) (bool, error) { + blkutil := util.NewBlockVolumePathHandler() + var islinkExist bool + var checkErr error + if islinkExist, checkErr = blkutil.IsSymlinkExist(mountPath); checkErr != nil { + return false, fmt.Errorf("Could not check whether the block volume %q (spec.Name: %q) pod %q (UID: %q) is mapped to: %v", + uniqueVolumeName, + volumeName, + podName, + podUID, + checkErr) + } + return islinkExist, nil +} + // TODO: this is a workaround for the unmount device issue caused by gci mounter. // In GCI cluster, if gci mounter is used for mounting, the container started by mounter // script will cause additional mounts created in the container. Since these mounts are diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go index 93b05ae6dd1d..2ff4b668f00b 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go @@ -17,7 +17,9 @@ limitations under the License. package operationexecutor import ( + "encoding/json" "fmt" + "strings" "time" "github.com/golang/glog" @@ -25,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/record" @@ -32,6 +35,7 @@ import ( "k8s.io/kubernetes/pkg/features" kevents "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/util/resizefs" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/volumehelper" @@ -55,19 +59,24 @@ type operationGenerator struct { // which verifies that the components (binaries, etc.) required to mount // the volume are available on the underlying node before attempting mount. checkNodeCapabilitiesBeforeMount bool + + // blkUtil provides volume path related operations for block volume + blkUtil util.BlockVolumePathHandler } // NewOperationGenerator is returns instance of operationGenerator func NewOperationGenerator(kubeClient clientset.Interface, volumePluginMgr *volume.VolumePluginMgr, recorder record.EventRecorder, - checkNodeCapabilitiesBeforeMount bool) OperationGenerator { + checkNodeCapabilitiesBeforeMount bool, + blkUtil util.BlockVolumePathHandler) OperationGenerator { return &operationGenerator{ kubeClient: kubeClient, volumePluginMgr: volumePluginMgr, recorder: recorder, checkNodeCapabilitiesBeforeMount: checkNodeCapabilitiesBeforeMount, + blkUtil: blkUtil, } } @@ -94,6 +103,15 @@ type OperationGenerator interface { // Generates the function needed to check if the attach_detach controller has attached the volume plugin GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) + // Generates the MapVolume function needed to perform the map of a volume plugin + GenerateMapVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater) (func() error, string, error) + + // Generates the UnmapVolume function needed to perform the unmap of a volume plugin + GenerateUnmapVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, string, error) + + // Generates the UnmapDevice function needed to perform the unmap of a device + GenerateUnmapDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (func() error, string, error) + // GetVolumePluginMgr returns volume plugin manager GetVolumePluginMgr() *volume.VolumePluginMgr @@ -267,6 +285,18 @@ func (og *operationGenerator) GenerateAttachVolumeFunc( volumeToAttach.VolumeSpec, volumeToAttach.NodeName) if attachErr != nil { + if derr, ok := attachErr.(*util.DanglingAttachError); ok { + addErr := actualStateOfWorld.MarkVolumeAsAttached( + v1.UniqueVolumeName(""), + volumeToAttach.VolumeSpec, + derr.CurrentNode, + derr.DevicePath) + + if addErr != nil { + glog.Errorf("AttachVolume.MarkVolumeAsAttached failed to fix dangling volume error for volume %q with %s", volumeToAttach.VolumeName, addErr) + } + + } // On failure, return error. Caller will log and retry. eventErr, detailedErr := volumeToAttach.GenerateError("AttachVolume.Attach failed", attachErr) for _, pod := range volumeToAttach.ScheduledPods { @@ -422,7 +452,15 @@ func (og *operationGenerator) GenerateMountVolumeFunc( return volumeToMount.GenerateErrorDetailed("MountVolume.WaitForAttach failed", err) } - glog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.WaitForAttach succeeded", "")) + glog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.WaitForAttach succeeded", fmt.Sprintf("DevicePath %q", devicePath))) + + // resizeFileSystem will resize the file system if user has requested a resize of + // underlying persistent volume and is allowed to do so. + resizeError := og.resizeFileSystem(volumeToMount, devicePath, volumePlugin.GetPluginName()) + + if resizeError != nil { + return volumeToMount.GenerateErrorDetailed("MountVolume.Resize failed", resizeError) + } deviceMountPath, err := volumeAttacher.GetDeviceMountPath(volumeToMount.VolumeSpec) @@ -489,6 +527,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( volumeToMount.Pod.UID, volumeToMount.VolumeName, volumeMounter, + nil, volumeToMount.OuterVolumeSpecName, volumeToMount.VolumeGidValue) if markVolMountedErr != nil { @@ -500,6 +539,73 @@ func (og *operationGenerator) GenerateMountVolumeFunc( }, volumePlugin.GetPluginName(), nil } +func (og *operationGenerator) resizeFileSystem(volumeToMount VolumeToMount, devicePath string, pluginName string) error { + if !utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) { + glog.V(6).Infof("Resizing is not enabled for this volume %s", volumeToMount.VolumeName) + return nil + } + + mounter := og.volumePluginMgr.Host.GetMounter(pluginName) + // Get expander, if possible + expandableVolumePlugin, _ := + og.volumePluginMgr.FindExpandablePluginBySpec(volumeToMount.VolumeSpec) + + if expandableVolumePlugin != nil && + expandableVolumePlugin.RequiresFSResize() && + volumeToMount.VolumeSpec.PersistentVolume != nil { + pv := volumeToMount.VolumeSpec.PersistentVolume + pvc, err := og.kubeClient.CoreV1().PersistentVolumeClaims(pv.Spec.ClaimRef.Namespace).Get(pv.Spec.ClaimRef.Name, metav1.GetOptions{}) + if err != nil { + // Return error rather than leave the file system un-resized, caller will log and retry + return volumeToMount.GenerateErrorDetailed("MountVolume get PVC failed", err) + } + + pvcStatusCap := pvc.Status.Capacity[v1.ResourceStorage] + pvSpecCap := pv.Spec.Capacity[v1.ResourceStorage] + if pvcStatusCap.Cmp(pvSpecCap) < 0 { + // File system resize was requested, proceed + glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("MountVolume.resizeFileSystem entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath))) + + if volumeToMount.VolumeSpec.ReadOnly { + simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.resizeFileSystem failed", "requested read-only file system") + glog.Warningf(detailedMsg) + og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg) + return nil + } + + diskFormatter := &mount.SafeFormatAndMount{ + Interface: mounter, + Exec: og.volumePluginMgr.Host.GetExec(expandableVolumePlugin.GetPluginName()), + } + + resizer := resizefs.NewResizeFs(diskFormatter) + resizeStatus, resizeErr := resizer.Resize(devicePath) + + if resizeErr != nil { + resizeDetailedError := volumeToMount.GenerateErrorDetailed("MountVolume.resizeFileSystem failed", resizeErr) + glog.Error(resizeDetailedError) + og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, resizeDetailedError.Error()) + return resizeDetailedError + } + + if resizeStatus { + simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.resizeFileSystem succeeded", "") + og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.FileSystemResizeSuccess, simpleMsg) + glog.Infof(detailedMsg) + } + + // File system resize succeeded, now update the PVC's Capacity to match the PV's + err = updatePVCStatusCapacity(pvc.Name, pvc, pv.Spec.Capacity, og.kubeClient) + if err != nil { + // On retry, resizeFileSystem will be called again but do nothing + return volumeToMount.GenerateErrorDetailed("MountVolume update PVC status failed", err) + } + return nil + } + } + return nil +} + func (og *operationGenerator) GenerateUnmountVolumeFunc( volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, string, error) { @@ -592,19 +698,9 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc( // use mounter.PathIsDevice to check if the path is a device, // if so use mounter.DeviceOpened to check if the device is in use anywhere // else on the system. Retry if it returns true. - isDevicePath, devicePathErr := mounter.PathIsDevice(deviceToDetach.DevicePath) - var deviceOpened bool - var deviceOpenedErr error - if !isDevicePath && devicePathErr == nil { - // not a device path or path doesn't exist - //TODO: refer to #36092 - glog.V(3).Infof("Not checking device path %s", deviceToDetach.DevicePath) - deviceOpened = false - } else { - deviceOpened, deviceOpenedErr = mounter.DeviceOpened(deviceToDetach.DevicePath) - if deviceOpenedErr != nil { - return deviceToDetach.GenerateErrorDetailed("UnmountDevice.DeviceOpened failed", deviceOpenedErr) - } + deviceOpened, deviceOpenedErr := isDeviceOpened(deviceToDetach, mounter) + if deviceOpenedErr != nil { + return deviceOpenedErr } // The device is still in use elsewhere. Caller will log and retry. if deviceOpened { @@ -627,6 +723,337 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc( }, attachableVolumePlugin.GetPluginName(), nil } +// GenerateMapVolumeFunc marks volume as mounted based on following steps. +// If plugin is attachable, call WaitForAttach() and then mark the device +// as mounted. On next step, SetUpDevice is called without dependent of +// plugin type, but this method mainly is targeted for none attachable plugin. +// After setup is done, create symbolic links on both global map path and pod +// device map path. Once symbolic links are created, take fd lock by +// loopback for the device to avoid silent volume replacement. This lock +// will be realased once no one uses the device. +// If all steps are completed, the volume is marked as unmounted. +func (og *operationGenerator) GenerateMapVolumeFunc( + waitForAttachTimeout time.Duration, + volumeToMount VolumeToMount, + actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, string, error) { + + // Get block volume mapper plugin + var blockVolumeMapper volume.BlockVolumeMapper + blockVolumePlugin, err := + og.volumePluginMgr.FindMapperPluginBySpec(volumeToMount.VolumeSpec) + if err != nil { + return nil, "", volumeToMount.GenerateErrorDetailed("MapVolume.FindMapperPluginBySpec failed", err) + } + if blockVolumePlugin == nil { + return nil, "", volumeToMount.GenerateErrorDetailed("MapVolume.FindMapperPluginBySpec failed to find BlockVolumeMapper plugin. Volume plugin is nil.", nil) + } + affinityErr := checkNodeAffinity(og, volumeToMount, blockVolumePlugin) + if affinityErr != nil { + return nil, blockVolumePlugin.GetPluginName(), affinityErr + } + blockVolumeMapper, newMapperErr := blockVolumePlugin.NewBlockVolumeMapper( + volumeToMount.VolumeSpec, + volumeToMount.Pod, + volume.VolumeOptions{}) + if newMapperErr != nil { + eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.NewBlockVolumeMapper initialization failed", newMapperErr) + og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMapVolume, eventErr.Error()) + return nil, blockVolumePlugin.GetPluginName(), detailedErr + } + + // Get attacher, if possible + attachableVolumePlugin, _ := + og.volumePluginMgr.FindAttachablePluginBySpec(volumeToMount.VolumeSpec) + var volumeAttacher volume.Attacher + if attachableVolumePlugin != nil { + volumeAttacher, _ = attachableVolumePlugin.NewAttacher() + } + + return func() error { + var devicePath string + if volumeAttacher != nil { + // Wait for attachable volumes to finish attaching + glog.Infof(volumeToMount.GenerateMsgDetailed("MapVolume.WaitForAttach entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath))) + + devicePath, err = volumeAttacher.WaitForAttach( + volumeToMount.VolumeSpec, volumeToMount.DevicePath, volumeToMount.Pod, waitForAttachTimeout) + if err != nil { + // On failure, return error. Caller will log and retry. + return volumeToMount.GenerateErrorDetailed("MapVolume.WaitForAttach failed", err) + } + + glog.Infof(volumeToMount.GenerateMsgDetailed("MapVolume.WaitForAttach succeeded", fmt.Sprintf("DevicePath %q", devicePath))) + + // Update actual state of world to reflect volume is globally mounted + markDeviceMappedErr := actualStateOfWorld.MarkDeviceAsMounted( + volumeToMount.VolumeName) + if markDeviceMappedErr != nil { + // On failure, return error. Caller will log and retry. + return volumeToMount.GenerateErrorDetailed("MapVolume.MarkDeviceAsMounted failed", markDeviceMappedErr) + } + } + // A plugin doesn't have attacher also needs to map device to global map path with SetUpDevice() + pluginDevicePath, mapErr := blockVolumeMapper.SetUpDevice() + if mapErr != nil { + // On failure, return error. Caller will log and retry. + eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.SetUp failed", mapErr) + og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMapVolume, eventErr.Error()) + return detailedErr + } + // Update devicePath for none attachable plugin case + if len(devicePath) == 0 { + if len(pluginDevicePath) != 0 { + devicePath = pluginDevicePath + } else { + return volumeToMount.GenerateErrorDetailed("MapVolume failed", fmt.Errorf("Device path of the volume is empty")) + } + } + // Set up global map path under the given plugin directory using symbolic link + globalMapPath, err := + blockVolumeMapper.GetGlobalMapPath(volumeToMount.VolumeSpec) + if err != nil { + // On failure, return error. Caller will log and retry. + return volumeToMount.GenerateErrorDetailed("MapVolume.GetDeviceMountPath failed", err) + } + mapErr = og.blkUtil.MapDevice(devicePath, globalMapPath, string(volumeToMount.Pod.UID)) + if mapErr != nil { + // On failure, return error. Caller will log and retry. + eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.MapDevice failed", mapErr) + og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMapVolume, eventErr.Error()) + return detailedErr + } + // Device mapping for global map path succeeded + simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MapVolume.MapDevice succeeded", fmt.Sprintf("globalMapPath %q", globalMapPath)) + verbosity := glog.Level(4) + og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.SuccessfulMountVolume, simpleMsg) + glog.V(verbosity).Infof(detailedMsg) + + // Map device to pod device map path under the given pod directory using symbolic link + volumeMapPath, volName := blockVolumeMapper.GetPodDeviceMapPath() + mapErr = og.blkUtil.MapDevice(devicePath, volumeMapPath, volName) + if mapErr != nil { + // On failure, return error. Caller will log and retry. + eventErr, detailedErr := volumeToMount.GenerateError("MapVolume.MapDevice failed", mapErr) + og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMapVolume, eventErr.Error()) + return detailedErr + } + + // Take filedescriptor lock to keep a block device opened. Otherwise, there is a case + // that the block device is silently removed and attached another device with same name. + // Container runtime can't handler this problem. To avoid unexpected condition fd lock + // for the block device is required. + _, err = og.blkUtil.AttachFileDevice(devicePath) + if err != nil { + return volumeToMount.GenerateErrorDetailed("MapVolume.AttachFileDevice failed", err) + } + + // Device mapping for pod device map path succeeded + simpleMsg, detailedMsg = volumeToMount.GenerateMsg("MapVolume.MapDevice succeeded", fmt.Sprintf("volumeMapPath %q", volumeMapPath)) + verbosity = glog.Level(1) + og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.SuccessfulMountVolume, simpleMsg) + glog.V(verbosity).Infof(detailedMsg) + + // Update actual state of world + markVolMountedErr := actualStateOfWorld.MarkVolumeAsMounted( + volumeToMount.PodName, + volumeToMount.Pod.UID, + volumeToMount.VolumeName, + nil, + blockVolumeMapper, + volumeToMount.OuterVolumeSpecName, + volumeToMount.VolumeGidValue) + if markVolMountedErr != nil { + // On failure, return error. Caller will log and retry. + return volumeToMount.GenerateErrorDetailed("MapVolume.MarkVolumeAsMounted failed", markVolMountedErr) + } + + return nil + }, blockVolumePlugin.GetPluginName(), nil +} + +// GenerateUnmapVolumeFunc marks volume as unmonuted based on following steps. +// Remove symbolic links from pod device map path dir and global map path dir. +// Once those cleanups are done, remove pod device map path dir. +// If all steps are completed, the volume is marked as unmounted. +func (og *operationGenerator) GenerateUnmapVolumeFunc( + volumeToUnmount MountedVolume, + actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, string, error) { + + // Get block volume unmapper plugin + var blockVolumeUnmapper volume.BlockVolumeUnmapper + blockVolumePlugin, err := + og.volumePluginMgr.FindMapperPluginByName(volumeToUnmount.PluginName) + if err != nil { + return nil, "", volumeToUnmount.GenerateErrorDetailed("UnmapVolume.FindMapperPluginByName failed", err) + } + if blockVolumePlugin == nil { + return nil, "", volumeToUnmount.GenerateErrorDetailed("UnmapVolume.FindMapperPluginByName failed to find BlockVolumeMapper plugin. Volume plugin is nil.", nil) + } + blockVolumeUnmapper, newUnmapperErr := blockVolumePlugin.NewBlockVolumeUnmapper( + volumeToUnmount.InnerVolumeSpecName, volumeToUnmount.PodUID) + if newUnmapperErr != nil { + return nil, blockVolumePlugin.GetPluginName(), volumeToUnmount.GenerateErrorDetailed("UnmapVolume.NewUnmapper failed", newUnmapperErr) + } + + return func() error { + // Try to unmap volumeName symlink under pod device map path dir + // pods/{podUid}/volumeDevices/{escapeQualifiedPluginName}/{volumeName} + podDeviceUnmapPath, volName := blockVolumeUnmapper.GetPodDeviceMapPath() + unmapDeviceErr := og.blkUtil.UnmapDevice(podDeviceUnmapPath, volName) + if unmapDeviceErr != nil { + // On failure, return error. Caller will log and retry. + return volumeToUnmount.GenerateErrorDetailed("UnmapVolume.UnmapDevice on pod device map path failed", unmapDeviceErr) + } + // Try to unmap podUID symlink under global map path dir + // plugins/kubernetes.io/{PluginName}/volumeDevices/{volumePluginDependentPath}/{podUID} + globalUnmapPath, err := + blockVolumeUnmapper.GetGlobalMapPath(volumeToUnmount.VolumeSpec) + if err != nil { + // On failure, return error. Caller will log and retry. + return volumeToUnmount.GenerateErrorDetailed("UnmapVolume.GetGlobalUnmapPath failed", err) + } + unmapDeviceErr = og.blkUtil.UnmapDevice(globalUnmapPath, string(volumeToUnmount.PodUID)) + if unmapDeviceErr != nil { + // On failure, return error. Caller will log and retry. + return volumeToUnmount.GenerateErrorDetailed("UnmapVolume.UnmapDevice on global map path failed", unmapDeviceErr) + } + + glog.Infof( + "UnmapVolume succeeded for volume %q (OuterVolumeSpecName: %q) pod %q (UID: %q). InnerVolumeSpecName %q. PluginName %q, VolumeGidValue %q", + volumeToUnmount.VolumeName, + volumeToUnmount.OuterVolumeSpecName, + volumeToUnmount.PodName, + volumeToUnmount.PodUID, + volumeToUnmount.InnerVolumeSpecName, + volumeToUnmount.PluginName, + volumeToUnmount.VolumeGidValue) + + // Update actual state of world + markVolUnmountedErr := actualStateOfWorld.MarkVolumeAsUnmounted( + volumeToUnmount.PodName, volumeToUnmount.VolumeName) + if markVolUnmountedErr != nil { + // On failure, just log and exit + glog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmapVolume.MarkVolumeAsUnmounted failed", markVolUnmountedErr).Error()) + } + + return nil + }, blockVolumePlugin.GetPluginName(), nil +} + +// GenerateUnmapDeviceFunc marks device as unmounted based on following steps. +// Check under globalMapPath dir if there isn't pod's symbolic links in it. +// If symbolick link isn't there, the device isn't referenced from Pods. +// Call plugin TearDownDevice to clean-up device connection, stored data under +// globalMapPath, these operations depend on plugin implementation. +// Once TearDownDevice is completed, remove globalMapPath dir. +// After globalMapPath is removed, fd lock by loopback for the device can +// be released safely because no one can consume the device at this point. +// At last, device open status will be checked just in case. +// If all steps are completed, the device is marked as unmounted. +func (og *operationGenerator) GenerateUnmapDeviceFunc( + deviceToDetach AttachedVolume, + actualStateOfWorld ActualStateOfWorldMounterUpdater, + mounter mount.Interface) (func() error, string, error) { + + // Get block volume mapper plugin + var blockVolumeMapper volume.BlockVolumeMapper + blockVolumePlugin, err := + og.volumePluginMgr.FindMapperPluginBySpec(deviceToDetach.VolumeSpec) + if err != nil { + return nil, "", deviceToDetach.GenerateErrorDetailed("UnmapDevice.FindMapperPluginBySpec failed", err) + } + if blockVolumePlugin == nil { + return nil, "", deviceToDetach.GenerateErrorDetailed("UnmapDevice.FindMapperPluginBySpec failed to find BlockVolumeMapper plugin. Volume plugin is nil.", nil) + } + blockVolumeMapper, newMapperErr := blockVolumePlugin.NewBlockVolumeMapper( + deviceToDetach.VolumeSpec, + nil, /* Pod */ + volume.VolumeOptions{}) + if newMapperErr != nil { + return nil, "", deviceToDetach.GenerateErrorDetailed("UnmapDevice.NewBlockVolumeMapper initialization failed", newMapperErr) + } + + blockVolumeUnmapper, newUnmapperErr := blockVolumePlugin.NewBlockVolumeUnmapper( + string(deviceToDetach.VolumeName), + "" /* podUID */) + if newUnmapperErr != nil { + return nil, blockVolumePlugin.GetPluginName(), deviceToDetach.GenerateErrorDetailed("UnmapDevice.NewUnmapper failed", newUnmapperErr) + } + + return func() error { + // Search under globalMapPath dir if all symbolic links from pods have been removed already. + // If symbolick links are there, pods may still refer the volume. + globalMapPath, err := + blockVolumeMapper.GetGlobalMapPath(deviceToDetach.VolumeSpec) + if err != nil { + // On failure, return error. Caller will log and retry. + return deviceToDetach.GenerateErrorDetailed("UnmapDevice.GetGlobalMapPath failed", err) + } + refs, err := og.blkUtil.GetDeviceSymlinkRefs(deviceToDetach.DevicePath, globalMapPath) + if err != nil { + return deviceToDetach.GenerateErrorDetailed("UnmapDevice.GetDeviceSymlinkRefs check failed", err) + } + if len(refs) > 0 { + err = fmt.Errorf("The device %q is still referenced from other Pods %v", globalMapPath, refs) + return deviceToDetach.GenerateErrorDetailed("UnmapDevice failed", err) + } + + // Execute tear down device + unmapErr := blockVolumeUnmapper.TearDownDevice(globalMapPath, deviceToDetach.DevicePath) + if unmapErr != nil { + // On failure, return error. Caller will log and retry. + return deviceToDetach.GenerateErrorDetailed("UnmapDevice.TearDownDevice failed", unmapErr) + } + + // Plugin finished TearDownDevice(). Now globalMapPath dir and plugin's stored data + // on the dir are unnecessary, clean up it. + removeMapPathErr := og.blkUtil.RemoveMapPath(globalMapPath) + if removeMapPathErr != nil { + // On failure, return error. Caller will log and retry. + return deviceToDetach.GenerateErrorDetailed("UnmapDevice failed", removeMapPathErr) + } + + // The block volume is not referenced from Pods. Release file descriptor lock. + glog.V(5).Infof("UnmapDevice: deviceToDetach.DevicePath: %v", deviceToDetach.DevicePath) + loopPath, err := og.blkUtil.GetLoopDevice(deviceToDetach.DevicePath) + if err != nil { + glog.Warningf(deviceToDetach.GenerateMsgDetailed("UnmapDevice: Couldn't find loopback device which takes file descriptor lock", fmt.Sprintf("device path: %q", deviceToDetach.DevicePath))) + } else { + err = og.blkUtil.RemoveLoopDevice(loopPath) + if err != nil { + return deviceToDetach.GenerateErrorDetailed("UnmapDevice.AttachFileDevice failed", err) + } + } + + // Before logging that UnmapDevice succeeded and moving on, + // use mounter.PathIsDevice to check if the path is a device, + // if so use mounter.DeviceOpened to check if the device is in use anywhere + // else on the system. Retry if it returns true. + deviceOpened, deviceOpenedErr := isDeviceOpened(deviceToDetach, mounter) + if deviceOpenedErr != nil { + return deviceOpenedErr + } + // The device is still in use elsewhere. Caller will log and retry. + if deviceOpened { + return deviceToDetach.GenerateErrorDetailed( + "UnmapDevice failed", + fmt.Errorf("the device is in use when it was no longer expected to be in use")) + } + + glog.Infof(deviceToDetach.GenerateMsgDetailed("UnmapDevice succeeded", "")) + + // Update actual state of world + markDeviceUnmountedErr := actualStateOfWorld.MarkDeviceAsUnmounted( + deviceToDetach.VolumeName) + if markDeviceUnmountedErr != nil { + // On failure, return error. Caller will log and retry. + return deviceToDetach.GenerateErrorDetailed("MarkDeviceAsUnmounted failed", markDeviceUnmountedErr) + } + + return nil + }, blockVolumePlugin.GetPluginName(), nil +} + func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( volumeToMount VolumeToMount, nodeName types.NodeName, @@ -664,7 +1091,7 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( } // Fetch current node object - node, fetchErr := og.kubeClient.Core().Nodes().Get(string(nodeName), metav1.GetOptions{}) + node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(string(nodeName), metav1.GetOptions{}) if fetchErr != nil { // On failure, return error. Caller will log and retry. return volumeToMount.GenerateErrorDetailed("VerifyControllerAttachedVolume failed fetching node from API server", fetchErr) @@ -698,7 +1125,7 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( func (og *operationGenerator) verifyVolumeIsSafeToDetach( volumeToDetach AttachedVolume) error { // Fetch current node object - node, fetchErr := og.kubeClient.Core().Nodes().Get(string(volumeToDetach.NodeName), metav1.GetOptions{}) + node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(string(volumeToDetach.NodeName), metav1.GetOptions{}) if fetchErr != nil { if errors.IsNotFound(fetchErr) { glog.Warningf(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", "")) @@ -755,7 +1182,11 @@ func (og *operationGenerator) GenerateExpandVolumeFunc( og.recorder.Eventf(pvcWithResizeRequest.PVC, v1.EventTypeWarning, kevents.VolumeResizeFailed, expandErr.Error()) return expandErr } + glog.Infof("ExpandVolume succeeded for volume %s", pvcWithResizeRequest.QualifiedName()) newSize = updatedSize + // k8s doesn't have transactions, we can't guarantee that after updating PV - updating PVC will be + // successful, that is why all PVCs for which pvc.Spec.Size > pvc.Status.Size must be reprocessed + // until they reflect user requested size in pvc.Status.Size updateErr := resizeMap.UpdatePVSize(pvcWithResizeRequest, newSize) if updateErr != nil { @@ -763,9 +1194,12 @@ func (og *operationGenerator) GenerateExpandVolumeFunc( og.recorder.Eventf(pvcWithResizeRequest.PVC, v1.EventTypeWarning, kevents.VolumeResizeFailed, updateErr.Error()) return updateErr } + glog.Infof("ExpandVolume.UpdatePV succeeded for volume %s", pvcWithResizeRequest.QualifiedName()) } // No Cloudprovider resize needed, lets mark resizing as done + // Rest of the volume expand controller code will assume PVC as *not* resized until pvc.Status.Size + // reflects user requested size. if !volumePlugin.RequiresFSResize() { glog.V(4).Infof("Controller resizing done for PVC %s", pvcWithResizeRequest.QualifiedName()) err := resizeMap.MarkAsResized(pvcWithResizeRequest, newSize) @@ -816,3 +1250,51 @@ func checkNodeAffinity(og *operationGenerator, volumeToMount VolumeToMount, plug } return nil } + +// isDeviceOpened checks the device status if the device is in use anywhere else on the system +func isDeviceOpened(deviceToDetach AttachedVolume, mounter mount.Interface) (bool, error) { + isDevicePath, devicePathErr := mounter.PathIsDevice(deviceToDetach.DevicePath) + var deviceOpened bool + var deviceOpenedErr error + if !isDevicePath && devicePathErr == nil || + (devicePathErr != nil && strings.Contains(devicePathErr.Error(), "does not exist")) { + // not a device path or path doesn't exist + //TODO: refer to #36092 + glog.V(3).Infof("The path isn't device path or doesn't exist. Skip checking device path: %s", deviceToDetach.DevicePath) + deviceOpened = false + } else { + deviceOpened, deviceOpenedErr = mounter.DeviceOpened(deviceToDetach.DevicePath) + if deviceOpenedErr != nil { + return false, deviceToDetach.GenerateErrorDetailed("DeviceOpened failed", deviceOpenedErr) + } + } + return deviceOpened, nil +} + +func updatePVCStatusCapacity(pvcName string, pvc *v1.PersistentVolumeClaim, capacity v1.ResourceList, client clientset.Interface) error { + pvcCopy := pvc.DeepCopy() + + oldData, err := json.Marshal(pvcCopy) + if err != nil { + return fmt.Errorf("Failed to marshal oldData for pvc %q with %v", pvcName, err) + } + + pvcCopy.Status.Capacity = capacity + pvcCopy.Status.Conditions = []v1.PersistentVolumeClaimCondition{} + newData, err := json.Marshal(pvcCopy) + + if err != nil { + return fmt.Errorf("Failed to marshal newData for pvc %q with %v", pvcName, err) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, pvcCopy) + + if err != nil { + return fmt.Errorf("Failed to CreateTwoWayMergePatch for pvc %q with %v ", pvcName, err) + } + _, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace). + Patch(pvcName, types.StrategicMergePatchType, patchBytes, "status") + if err != nil { + return fmt.Errorf("Failed to patch PVC %q with %v", pvcName, err) + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/types/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/util/types/BUILD index 5d7380241ebd..e6be9eb1d7d1 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/types/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/types/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["types.go"], + importpath = "k8s.io/kubernetes/pkg/volume/util/types", deps = ["//vendor/k8s.io/apimachinery/pkg/types:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go index 976ad96890a8..106036dfb57f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go @@ -21,7 +21,7 @@ import ( "io/ioutil" "os" "path" - + "path/filepath" "strings" "github.com/golang/glog" @@ -30,15 +30,23 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/api" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + "k8s.io/kubernetes/pkg/api/legacyscheme" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/util/mount" ) -const readyFileName = "ready" +const ( + readyFileName = "ready" + losetupPath = "losetup" + + ErrDeviceNotFound = "device not found" + ErrDeviceNotSupported = "device not supported" + ErrNotAvailable = "not available" +) // IsReady checks for the existence of a regular file // called 'ready' in the given directory and returns @@ -147,7 +155,7 @@ func GetSecretForPod(pod *v1.Pod, secretName string, kubeClient clientset.Interf if kubeClient == nil { return secret, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.Core().Secrets(pod.Namespace).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(pod.Namespace).Get(secretName, metav1.GetOptions{}) if err != nil { return secret, err } @@ -163,7 +171,7 @@ func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeCl if kubeClient == nil { return secret, fmt.Errorf("Cannot get kube client") } - secrets, err := kubeClient.Core().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) + secrets, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(secretName, metav1.GetOptions{}) if err != nil { return secret, err } @@ -233,7 +241,7 @@ func LoadPodFromFile(filePath string) (*v1.Pod, error) { } pod := &v1.Pod{} - codec := api.Codecs.LegacyCodec(api.Registry.GroupOrDie(v1.GroupName).GroupVersion) + codec := legacyscheme.Codecs.UniversalDecoder() if err := runtime.DecodeInto(codec, podDef, pod); err != nil { return nil, fmt.Errorf("failed decoding file: %v", err) } @@ -270,3 +278,201 @@ func stringToSet(str, delimiter string) (sets.String, error) { } return zonesSet, nil } + +// BlockVolumePathHandler defines a set of operations for handling block volume-related operations +type BlockVolumePathHandler interface { + // MapDevice creates a symbolic link to block device under specified map path + MapDevice(devicePath string, mapPath string, linkName string) error + // UnmapDevice removes a symbolic link to block device under specified map path + UnmapDevice(mapPath string, linkName string) error + // RemovePath removes a file or directory on specified map path + RemoveMapPath(mapPath string) error + // IsSymlinkExist retruns true if specified symbolic link exists + IsSymlinkExist(mapPath string) (bool, error) + // GetDeviceSymlinkRefs searches symbolic links under global map path + GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error) + // FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath + // corresponding to map path symlink, and then return global map path with pod uuid. + FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) + // AttachFileDevice takes a path to a regular file and makes it available as an + // attached block device. + AttachFileDevice(path string) (string, error) + // GetLoopDevice returns the full path to the loop device associated with the given path. + GetLoopDevice(path string) (string, error) + // RemoveLoopDevice removes specified loopback device + RemoveLoopDevice(device string) error +} + +// NewBlockVolumePathHandler returns a new instance of BlockVolumeHandler. +func NewBlockVolumePathHandler() BlockVolumePathHandler { + var volumePathHandler VolumePathHandler + return volumePathHandler +} + +// VolumePathHandler is path related operation handlers for block volume +type VolumePathHandler struct { +} + +// MapDevice creates a symbolic link to block device under specified map path +func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName string) error { + // Example of global map path: + // globalMapPath/linkName: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{podUid} + // linkName: {podUid} + // + // Example of pod device map path: + // podDeviceMapPath/linkName: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} + // linkName: {volumeName} + if len(devicePath) == 0 { + return fmt.Errorf("Failed to map device to map path. devicePath is empty") + } + if len(mapPath) == 0 { + return fmt.Errorf("Failed to map device to map path. mapPath is empty") + } + if !filepath.IsAbs(mapPath) { + return fmt.Errorf("The map path should be absolute: map path: %s", mapPath) + } + glog.V(5).Infof("MapDevice: devicePath %s", devicePath) + glog.V(5).Infof("MapDevice: mapPath %s", mapPath) + glog.V(5).Infof("MapDevice: linkName %s", linkName) + + // Check and create mapPath + _, err := os.Stat(mapPath) + if err != nil && !os.IsNotExist(err) { + glog.Errorf("cannot validate map path: %s", mapPath) + return err + } + if err = os.MkdirAll(mapPath, 0750); err != nil { + return fmt.Errorf("Failed to mkdir %s, error %v", mapPath, err) + } + // Remove old symbolic link(or file) then create new one. + // This should be done because current symbolic link is + // stale accross node reboot. + linkPath := path.Join(mapPath, string(linkName)) + if err = os.Remove(linkPath); err != nil && !os.IsNotExist(err) { + return err + } + err = os.Symlink(devicePath, linkPath) + return err +} + +// UnmapDevice removes a symbolic link associated to block device under specified map path +func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string) error { + if len(mapPath) == 0 { + return fmt.Errorf("Failed to unmap device from map path. mapPath is empty") + } + glog.V(5).Infof("UnmapDevice: mapPath %s", mapPath) + glog.V(5).Infof("UnmapDevice: linkName %s", linkName) + + // Check symbolic link exists + linkPath := path.Join(mapPath, string(linkName)) + if islinkExist, checkErr := v.IsSymlinkExist(linkPath); checkErr != nil { + return checkErr + } else if !islinkExist { + glog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath) + return nil + } + err := os.Remove(linkPath) + return err +} + +// RemoveMapPath removes a file or directory on specified map path +func (v VolumePathHandler) RemoveMapPath(mapPath string) error { + if len(mapPath) == 0 { + return fmt.Errorf("Failed to remove map path. mapPath is empty") + } + glog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath) + err := os.RemoveAll(mapPath) + if err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// IsSymlinkExist returns true if specified file exists and the type is symbolik link. +// If file doesn't exist, or file exists but not symbolick link, return false with no error. +// On other cases, return false with error from Lstat(). +func (v VolumePathHandler) IsSymlinkExist(mapPath string) (bool, error) { + fi, err := os.Lstat(mapPath) + if err == nil { + // If file exits and it's symbolick link, return true and no error + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + return true, nil + } + // If file exits but it's not symbolick link, return fale and no error + return false, nil + } + // If file doesn't exist, return false and no error + if os.IsNotExist(err) { + return false, nil + } + // Return error from Lstat() + return false, err +} + +// GetDeviceSymlinkRefs searches symbolic links under global map path +func (v VolumePathHandler) GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error) { + var refs []string + files, err := ioutil.ReadDir(mapPath) + if err != nil { + return nil, fmt.Errorf("Directory cannot read %v", err) + } + for _, file := range files { + if file.Mode()&os.ModeSymlink != os.ModeSymlink { + continue + } + filename := file.Name() + filepath, err := os.Readlink(path.Join(mapPath, filename)) + if err != nil { + return nil, fmt.Errorf("Symbolic link cannot be retrieved %v", err) + } + glog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", filepath, devPath) + if filepath == devPath { + refs = append(refs, path.Join(mapPath, filename)) + } + } + glog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs) + return refs, nil +} + +// FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath +// corresponding to map path symlink, and then return global map path with pod uuid. +// ex. mapPath symlink: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} -> /dev/sdX +// globalMapPath/{pod uuid}: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} -> /dev/sdX +func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) { + var globalMapPathUUID string + // Find symbolic link named pod uuid under plugin dir + err := filepath.Walk(pluginDir, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + if (fi.Mode()&os.ModeSymlink == os.ModeSymlink) && (fi.Name() == string(podUID)) { + glog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath) + if res, err := compareSymlinks(path, mapPath); err == nil && res { + globalMapPathUUID = path + } + } + return nil + }) + if err != nil { + return "", err + } + glog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID) + // Return path contains global map path + {pod uuid} + return globalMapPathUUID, nil +} + +func compareSymlinks(global, pod string) (bool, error) { + devGlobal, err := os.Readlink(global) + if err != nil { + return false, err + } + devPod, err := os.Readlink(pod) + if err != nil { + return false, err + } + glog.V(5).Infof("CompareSymlinks: devGloBal %s, devPod %s", devGlobal, devPod) + if devGlobal == devPod { + return true, nil + } + return false, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/util_linux.go b/vendor/k8s.io/kubernetes/pkg/volume/util/util_linux.go new file mode 100644 index 000000000000..755a10f012a4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/util_linux.go @@ -0,0 +1,106 @@ +// +build linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "errors" + "fmt" + "os" + "os/exec" + "strings" + + "github.com/golang/glog" +) + +// AttachFileDevice takes a path to a regular file and makes it available as an +// attached block device. +func (v VolumePathHandler) AttachFileDevice(path string) (string, error) { + blockDevicePath, err := v.GetLoopDevice(path) + if err != nil && err.Error() != ErrDeviceNotFound { + return "", err + } + + // If no existing loop device for the path, create one + if blockDevicePath == "" { + glog.V(4).Infof("Creating device for path: %s", path) + blockDevicePath, err = makeLoopDevice(path) + if err != nil { + return "", err + } + } + return blockDevicePath, nil +} + +// GetLoopDevice returns the full path to the loop device associated with the given path. +func (v VolumePathHandler) GetLoopDevice(path string) (string, error) { + _, err := os.Stat(path) + if os.IsNotExist(err) { + return "", errors.New(ErrNotAvailable) + } + if err != nil { + return "", fmt.Errorf("not attachable: %v", err) + } + + args := []string{"-j", path} + cmd := exec.Command(losetupPath, args...) + out, err := cmd.CombinedOutput() + if err != nil { + glog.V(2).Infof("Failed device discover command for path %s: %v", path, err) + return "", err + } + return parseLosetupOutputForDevice(out) +} + +func makeLoopDevice(path string) (string, error) { + args := []string{"-f", "--show", path} + cmd := exec.Command(losetupPath, args...) + out, err := cmd.CombinedOutput() + if err != nil { + glog.V(2).Infof("Failed device create command for path %s: %v", path, err) + return "", err + } + return parseLosetupOutputForDevice(out) +} + +// RemoveLoopDevice removes specified loopback device +func (v VolumePathHandler) RemoveLoopDevice(device string) error { + args := []string{"-d", device} + cmd := exec.Command(losetupPath, args...) + out, err := cmd.CombinedOutput() + if err != nil { + if !strings.Contains(string(out), "No such device or address") { + return err + } + } + return nil +} + +func parseLosetupOutputForDevice(output []byte) (string, error) { + if len(output) == 0 { + return "", errors.New(ErrDeviceNotFound) + } + + // losetup returns device in the format: + // /dev/loop1: [0073]:148662 (/dev/sda) + device := strings.TrimSpace(strings.SplitN(string(output), ":", 2)[0]) + if len(device) == 0 { + return "", errors.New(ErrDeviceNotFound) + } + return device, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/util_unsupported.go b/vendor/k8s.io/kubernetes/pkg/volume/util/util_unsupported.go new file mode 100644 index 000000000000..930e4f663dd4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/util_unsupported.go @@ -0,0 +1,39 @@ +// +build !linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" +) + +// AttachFileDevice takes a path to a regular file and makes it available as an +// attached block device. +func (v VolumePathHandler) AttachFileDevice(path string) (string, error) { + return "", fmt.Errorf("AttachFileDevice not supported for this build.") +} + +// GetLoopDevice returns the full path to the loop device associated with the given path. +func (v VolumePathHandler) GetLoopDevice(path string) (string, error) { + return "", fmt.Errorf("GetLoopDevice not supported for this build.") +} + +// RemoveLoopDevice removes specified loopback device +func (v VolumePathHandler) RemoveLoopDevice(device string) error { + return fmt.Errorf("RemoveLoopDevice not supported for this build.") +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/volumehelper/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/util/volumehelper/BUILD index 849d3ee4e2c1..3a57c7dbdfb4 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/volumehelper/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/volumehelper/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["volumehelper.go"], + importpath = "k8s.io/kubernetes/pkg/volume/util/volumehelper", deps = [ "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/volumehelper/volumehelper.go b/vendor/k8s.io/kubernetes/pkg/volume/util/volumehelper/volumehelper.go index b0734601d0af..74b14be5de80 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/volumehelper/volumehelper.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/volumehelper/volumehelper.go @@ -136,3 +136,24 @@ func NewSafeFormatAndMountFromHost(pluginName string, host volume.VolumeHost) *m exec := host.GetExec(pluginName) return &mount.SafeFormatAndMount{Interface: mounter, Exec: exec} } + +// GetVolumeMode retrieves VolumeMode from pv. +// If the volume doesn't have PersistentVolume, it's an inline volume, +// should return volumeMode as filesystem to keep existing behavior. +func GetVolumeMode(volumeSpec *volume.Spec) (v1.PersistentVolumeMode, error) { + if volumeSpec == nil || volumeSpec.PersistentVolume == nil { + return v1.PersistentVolumeFilesystem, nil + } + if volumeSpec.PersistentVolume.Spec.VolumeMode != nil { + return *volumeSpec.PersistentVolume.Spec.VolumeMode, nil + } + return "", fmt.Errorf("cannot get volumeMode for volume: %v", volumeSpec.Name()) +} + +// GetPersistentVolumeClaimVolumeMode retrieves VolumeMode from pvc. +func GetPersistentVolumeClaimVolumeMode(claim *v1.PersistentVolumeClaim) (v1.PersistentVolumeMode, error) { + if claim.Spec.VolumeMode != nil { + return *claim.Spec.VolumeMode, nil + } + return "", fmt.Errorf("cannot get volumeMode from pvc: %v", claim.Name) +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/validation/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/validation/BUILD index b64dc983784d..a3cc585f7a41 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/validation/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/validation/BUILD @@ -9,9 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["pv_validation_test.go"], + importpath = "k8s.io/kubernetes/pkg/volume/validation", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], @@ -20,8 +21,9 @@ go_test( go_library( name = "go_default_library", srcs = ["pv_validation.go"], + importpath = "k8s.io/kubernetes/pkg/volume/validation", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/validation/pv_validation.go b/vendor/k8s.io/kubernetes/pkg/volume/validation/pv_validation.go index 45db2f5e5269..23fa51fdac98 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/validation/pv_validation.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/validation/pv_validation.go @@ -22,7 +22,7 @@ import ( "strings" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // ValidatePersistentVolume validates PV object for plugin specific validation diff --git a/vendor/k8s.io/kubernetes/pkg/volume/volume.go b/vendor/k8s.io/kubernetes/pkg/volume/volume.go index 78cb21a32a74..471963556a83 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/volume.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/volume.go @@ -37,6 +37,19 @@ type Volume interface { MetricsProvider } +// BlockVolume interface provides methods to generate global map path +// and pod device map path. +type BlockVolume interface { + // GetGlobalMapPath returns a global map path which contains + // symbolic links associated to a block device. + // ex. plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} + GetGlobalMapPath(spec *Spec) (string, error) + // GetPodDeviceMapPath returns a pod device map path + // and name of a symbolic link associated to a block device. + // ex. pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} + GetPodDeviceMapPath() (string, string) +} + // MetricsProvider exposes metrics (e.g. used,available space) related to a // Volume. type MetricsProvider interface { @@ -132,6 +145,34 @@ type Unmounter interface { TearDownAt(dir string) error } +// BlockVolumeMapper interface provides methods to set up/map the volume. +type BlockVolumeMapper interface { + BlockVolume + // SetUpDevice prepares the volume to a self-determined directory path, + // which may or may not exist yet and returns combination of physical + // device path of a block volume and error. + // If the plugin is non-attachable, it should prepare the device + // in /dev/ (or where appropriate) and return unique device path. + // Unique device path across kubelet node reboot is required to avoid + // unexpected block volume destruction. + // If the plugin is attachable, it should not do anything here, + // just return empty string for device path. + // Instead, attachable plugin have to return unique device path + // at attacher.Attach() and attacher.WaitForAttach(). + // This may be called more than once, so implementations must be idempotent. + SetUpDevice() (string, error) +} + +// BlockVolumeUnmapper interface provides methods to cleanup/unmap the volumes. +type BlockVolumeUnmapper interface { + BlockVolume + // TearDownDevice removes traces of the SetUpDevice procedure under + // a self-determined directory. + // If the plugin is non-attachable, this method detaches the volume + // from a node. + TearDownDevice(mapPath string, devicePath string) error +} + // Provisioner is an interface that creates templates for PersistentVolumes // and can create the volume as a new resource in the infrastructure provider. type Provisioner interface { @@ -195,8 +236,10 @@ type BulkVolumeVerifier interface { // Detacher can detach a volume from a node. type Detacher interface { - // Detach the given device from the node with the given Name. - Detach(deviceName string, nodeName types.NodeName) error + // Detach the given volume from the node with the given Name. + // volumeName is name of the volume as returned from plugin's + // GetVolumeName(). + Detach(volumeName string, nodeName types.NodeName) error // UnmountDevice unmounts the global mount of the disk. This // should only be called once all bind mounts have been diff --git a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/BUILD index 9f5a9ac63a6e..a2bd11e8fbcb 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/BUILD @@ -13,6 +13,7 @@ go_library( "vsphere_volume.go", "vsphere_volume_util.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/vsphere_volume", deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/vsphere:go_default_library", @@ -37,6 +38,7 @@ go_test( "attacher_test.go", "vsphere_volume_test.go", ], + importpath = "k8s.io/kubernetes/pkg/volume/vsphere_volume", library = ":go_default_library", deps = [ "//pkg/cloudprovider/providers/vsphere/vclib:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/OWNERS b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/OWNERS index b960f96b6812..5f7fd3d2e9ee 100755 --- a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/OWNERS @@ -7,11 +7,6 @@ approvers: reviewers: - abithap - abrarshivani -- thockin -- smarterclayton -- brendandburns -- derekwaynecarr -- pmorie - saad-ali - justinsb - jsafrane diff --git a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/attacher.go b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/attacher.go index 19edc58ca042..fb9886beba28 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/attacher.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/attacher.go @@ -76,7 +76,7 @@ func (attacher *vsphereVMDKAttacher) Attach(spec *volume.Spec, nodeName types.No // vsphereCloud.AttachDisk checks if disk is already attached to host and // succeeds in that case, so no need to do that separately. - diskUUID, err := attacher.vsphereVolumes.AttachDisk(volumeSource.VolumePath, volumeSource.StoragePolicyID, nodeName) + diskUUID, err := attacher.vsphereVolumes.AttachDisk(volumeSource.VolumePath, volumeSource.StoragePolicyName, nodeName) if err != nil { glog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.VolumePath, nodeName, err) return "", err @@ -86,34 +86,57 @@ func (attacher *vsphereVMDKAttacher) Attach(spec *volume.Spec, nodeName types.No } func (attacher *vsphereVMDKAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { - volumesAttachedCheck := make(map[*volume.Spec]bool) + glog.Warningf("Attacher.VolumesAreAttached called for node %q - Please use BulkVerifyVolumes for vSphere", nodeName) + volumeNodeMap := map[types.NodeName][]*volume.Spec{ + nodeName: specs, + } + nodeVolumesResult := make(map[*volume.Spec]bool) + nodesVerificationMap, err := attacher.BulkVerifyVolumes(volumeNodeMap) + if err != nil { + glog.Errorf("Attacher.VolumesAreAttached - error checking volumes for node %q with %v", nodeName, err) + return nodeVolumesResult, err + } + if result, ok := nodesVerificationMap[nodeName]; ok { + return result, nil + } + return nodeVolumesResult, nil +} + +func (attacher *vsphereVMDKAttacher) BulkVerifyVolumes(volumesByNode map[types.NodeName][]*volume.Spec) (map[types.NodeName]map[*volume.Spec]bool, error) { + volumesAttachedCheck := make(map[types.NodeName]map[*volume.Spec]bool) + volumePathsByNode := make(map[types.NodeName][]string) volumeSpecMap := make(map[string]*volume.Spec) - volumePathList := []string{} - for _, spec := range specs { - volumeSource, _, err := getVolumeSource(spec) - if err != nil { - glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err) - continue + + for nodeName, volumeSpecs := range volumesByNode { + for _, volumeSpec := range volumeSpecs { + volumeSource, _, err := getVolumeSource(volumeSpec) + if err != nil { + glog.Errorf("Error getting volume (%q) source : %v", volumeSpec.Name(), err) + continue + } + volPath := volumeSource.VolumePath + volumePathsByNode[nodeName] = append(volumePathsByNode[nodeName], volPath) + nodeVolume, nodeVolumeExists := volumesAttachedCheck[nodeName] + if !nodeVolumeExists { + nodeVolume = make(map[*volume.Spec]bool) + } + nodeVolume[volumeSpec] = true + volumeSpecMap[volPath] = volumeSpec + volumesAttachedCheck[nodeName] = nodeVolume } - volumePathList = append(volumePathList, volumeSource.VolumePath) - volumeSpecMap[volumeSource.VolumePath] = spec } - attachedResult, err := attacher.vsphereVolumes.DisksAreAttached(volumePathList, nodeName) + attachedResult, err := attacher.vsphereVolumes.DisksAreAttached(volumePathsByNode) if err != nil { - glog.Errorf( - "Error checking if volumes (%v) are attached to current node (%q). err=%v", - volumePathList, nodeName, err) - return nil, err + glog.Errorf("Error checking if volumes are attached to nodes: %+v. err: %v", volumePathsByNode, err) + return volumesAttachedCheck, err } - for volumePath, attached := range attachedResult { - spec := volumeSpecMap[volumePath] - if !attached { - volumesAttachedCheck[spec] = false - glog.V(2).Infof("VolumesAreAttached: volume %q (specName: %q) is no longer attached", volumePath, spec.Name()) - } else { - volumesAttachedCheck[spec] = true - glog.V(2).Infof("VolumesAreAttached: volume %q (specName: %q) is attached", volumePath, spec.Name()) + for nodeName, nodeVolumes := range attachedResult { + for volumePath, attached := range nodeVolumes { + if !attached { + spec := volumeSpecMap[volumePath] + setNodeVolume(volumesAttachedCheck, spec, nodeName, false) + } } } return volumesAttachedCheck, nil @@ -228,9 +251,9 @@ func (plugin *vsphereVolumePlugin) NewDetacher() (volume.Detacher, error) { } // Detach the given device from the given node. -func (detacher *vsphereVMDKDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error { +func (detacher *vsphereVMDKDetacher) Detach(volumeName string, nodeName types.NodeName) error { - volPath := getVolPathfromDeviceMountPath(deviceMountPath) + volPath := getVolPathfromVolumeName(volumeName) attached, err := detacher.vsphereVolumes.DiskIsAttached(volPath, nodeName) if err != nil { // Log error and continue with detach @@ -257,3 +280,17 @@ func (detacher *vsphereVMDKDetacher) Detach(deviceMountPath string, nodeName typ func (detacher *vsphereVMDKDetacher) UnmountDevice(deviceMountPath string) error { return volumeutil.UnmountPath(deviceMountPath, detacher.mounter) } + +func setNodeVolume( + nodeVolumeMap map[types.NodeName]map[*volume.Spec]bool, + volumeSpec *volume.Spec, + nodeName types.NodeName, + check bool) { + + volumeMap := nodeVolumeMap[nodeName] + if volumeMap == nil { + volumeMap = make(map[*volume.Spec]bool) + nodeVolumeMap[nodeName] = volumeMap + } + volumeMap[volumeSpec] = check +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go index 4963f179fe54..6c8fe5b9da68 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go @@ -85,7 +85,7 @@ func (plugin *vsphereVolumePlugin) SupportsMountOption() bool { } func (plugin *vsphereVolumePlugin) SupportsBulkVolumeVerification() bool { - return false + return true } func (plugin *vsphereVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go index 62ce1f7620b2..0f5d335ec54e 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume_util.go @@ -170,7 +170,7 @@ func (util *VsphereDiskUtil) DeleteVolume(vd *vsphereVolumeDeleter) error { return nil } -func getVolPathfromDeviceMountPath(deviceMountPath string) string { +func getVolPathfromVolumeName(deviceMountPath string) string { // Assumption: No file or folder is named starting with '[' in datastore volPath := deviceMountPath[strings.LastIndex(deviceMountPath, "["):] // space between datastore and vmdk name in volumePath is encoded as '\040' when returned by GetMountRefs(). diff --git a/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/BUILD b/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/BUILD index 39c97b1e6dcf..9de3152c18c2 100644 --- a/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/BUILD @@ -3,21 +3,24 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", - "go_test", ) go_library( name = "go_default_library", - srcs = [ - "configurator.go", - "server.go", - ], + srcs = ["server.go"], + importpath = "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/componentconfig:go_default_library", + "//pkg/apis/componentconfig/v1alpha1:go_default_library", + "//pkg/client/leaderelectionconfig:go_default_library", "//pkg/controller:go_default_library", "//pkg/features:go_default_library", + "//pkg/kubectl/cmd/util:go_default_library", + "//pkg/master/ports:go_default_library", "//pkg/util/configz:go_default_library", - "//plugin/cmd/kube-scheduler/app/options:go_default_library", + "//pkg/version:go_default_library", + "//pkg/version/verflag:go_default_library", "//plugin/pkg/scheduler:go_default_library", "//plugin/pkg/scheduler/algorithmprovider:go_default_library", "//plugin/pkg/scheduler/api:go_default_library", @@ -30,17 +33,19 @@ go_library( "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", - "//vendor/k8s.io/client-go/informers/apps/v1beta1:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", - "//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/informers/storage/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", + "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//vendor/k8s.io/client-go/tools/leaderelection:go_default_library", "//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", @@ -56,15 +61,6 @@ filegroup( filegroup( name = "all-srcs", - srcs = [ - ":package-srcs", - "//plugin/cmd/kube-scheduler/app/options:all-srcs", - ], + srcs = [":package-srcs"], tags = ["automanaged"], ) - -go_test( - name = "go_default_test", - srcs = ["configurator_test.go"], - library = ":go_default_library", -) diff --git a/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/configurator.go b/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/configurator.go deleted file mode 100644 index 4b1fa60835b6..000000000000 --- a/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/configurator.go +++ /dev/null @@ -1,193 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package app - -import ( - "fmt" - "io/ioutil" - "os" - - appsinformers "k8s.io/client-go/informers/apps/v1beta1" - coreinformers "k8s.io/client-go/informers/core/v1" - extensionsinformers "k8s.io/client-go/informers/extensions/v1beta1" - "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - utilfeature "k8s.io/apiserver/pkg/util/feature" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" - - clientset "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/record" - "k8s.io/kubernetes/pkg/api" - - "k8s.io/api/core/v1" - - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/plugin/pkg/scheduler" - _ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider" - schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" - latestschedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api/latest" - "k8s.io/kubernetes/plugin/pkg/scheduler/factory" - - "github.com/golang/glog" -) - -func createRecorder(kubecli *clientset.Clientset, s *options.SchedulerServer) record.EventRecorder { - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubecli.CoreV1().RESTClient()).Events("")}) - return eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: s.SchedulerName}) -} - -func createClient(s *options.SchedulerServer) (*clientset.Clientset, error) { - kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig) - if err != nil { - return nil, fmt.Errorf("unable to build config from flags: %v", err) - } - - kubeconfig.ContentType = s.ContentType - // Override kubeconfig qps/burst settings from flags - kubeconfig.QPS = s.KubeAPIQPS - kubeconfig.Burst = int(s.KubeAPIBurst) - - cli, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "leader-election")) - if err != nil { - return nil, fmt.Errorf("invalid API configuration: %v", err) - } - return cli, nil -} - -// CreateScheduler encapsulates the entire creation of a runnable scheduler. -func CreateScheduler( - s *options.SchedulerServer, - kubecli clientset.Interface, - nodeInformer coreinformers.NodeInformer, - podInformer coreinformers.PodInformer, - pvInformer coreinformers.PersistentVolumeInformer, - pvcInformer coreinformers.PersistentVolumeClaimInformer, - replicationControllerInformer coreinformers.ReplicationControllerInformer, - replicaSetInformer extensionsinformers.ReplicaSetInformer, - statefulSetInformer appsinformers.StatefulSetInformer, - serviceInformer coreinformers.ServiceInformer, - recorder record.EventRecorder, -) (*scheduler.Scheduler, error) { - configurator := factory.NewConfigFactory( - s.SchedulerName, - kubecli, - nodeInformer, - podInformer, - pvInformer, - pvcInformer, - replicationControllerInformer, - replicaSetInformer, - statefulSetInformer, - serviceInformer, - s.HardPodAffinitySymmetricWeight, - utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache), - ) - - // Rebuild the configurator with a default Create(...) method. - configurator = &schedulerConfigurator{ - configurator, - s.PolicyConfigFile, - s.AlgorithmProvider, - s.PolicyConfigMapName, - s.PolicyConfigMapNamespace, - s.UseLegacyPolicyConfig, - } - - return scheduler.NewFromConfigurator(configurator, func(cfg *scheduler.Config) { - cfg.Recorder = recorder - }) -} - -// schedulerConfigurator is an interface wrapper that provides a way to create -// a scheduler from a user provided config file or ConfigMap object. -type schedulerConfigurator struct { - scheduler.Configurator - policyFile string - algorithmProvider string - policyConfigMap string - policyConfigMapNamespace string - useLegacyPolicyConfig bool -} - -// getSchedulerPolicyConfig finds and decodes scheduler's policy config. If no -// such policy is found, it returns nil, nil. -func (sc schedulerConfigurator) getSchedulerPolicyConfig() (*schedulerapi.Policy, error) { - var configData []byte - var policyConfigMapFound bool - var policy schedulerapi.Policy - - // If not in legacy mode, try to find policy ConfigMap. - if !sc.useLegacyPolicyConfig && len(sc.policyConfigMap) != 0 { - namespace := sc.policyConfigMapNamespace - policyConfigMap, err := sc.GetClient().CoreV1().ConfigMaps(namespace).Get(sc.policyConfigMap, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("Error getting scheduler policy ConfigMap: %v.", err) - } - if policyConfigMap != nil { - var configString string - configString, policyConfigMapFound = policyConfigMap.Data[options.SchedulerPolicyConfigMapKey] - if !policyConfigMapFound { - return nil, fmt.Errorf("No element with key = '%v' is found in the ConfigMap 'Data'.", options.SchedulerPolicyConfigMapKey) - } - glog.V(5).Infof("Scheduler policy ConfigMap: %v", configString) - configData = []byte(configString) - } - } - - // If we are in legacy mode or ConfigMap name is empty, try to use policy - // config file. - if !policyConfigMapFound { - if _, err := os.Stat(sc.policyFile); err != nil { - // No config file is found. - return nil, nil - } - var err error - configData, err = ioutil.ReadFile(sc.policyFile) - if err != nil { - return nil, fmt.Errorf("unable to read policy config: %v", err) - } - } - - if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil { - return nil, fmt.Errorf("invalid configuration: %v", err) - } - return &policy, nil -} - -// Create implements the interface for the Configurator, hence it is exported -// even though the struct is not. -func (sc schedulerConfigurator) Create() (*scheduler.Config, error) { - policy, err := sc.getSchedulerPolicyConfig() - if err != nil { - return nil, err - } - // If no policy is found, create scheduler from algorithm provider. - if policy == nil { - if sc.Configurator != nil { - return sc.Configurator.CreateFromProvider(sc.algorithmProvider) - } - return nil, fmt.Errorf("Configurator was nil") - } - - return sc.CreateFromConfig(*policy) -} diff --git a/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options/BUILD b/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options/BUILD deleted file mode 100644 index 5d0364720259..000000000000 --- a/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options/BUILD +++ /dev/null @@ -1,36 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["options.go"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apis/componentconfig:go_default_library", - "//pkg/apis/componentconfig/install:go_default_library", - "//pkg/apis/componentconfig/v1alpha1:go_default_library", - "//pkg/client/leaderelectionconfig:go_default_library", - "//pkg/features:go_default_library", - "//pkg/kubelet/apis:go_default_library", - "//plugin/pkg/scheduler/factory:go_default_library", - "//vendor/github.com/spf13/pflag:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options/options.go b/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options/options.go deleted file mode 100644 index 4cacad8f5de3..000000000000 --- a/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options/options.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package options provides the scheduler flags -package options - -import ( - "fmt" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/componentconfig" - "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" - "k8s.io/kubernetes/pkg/client/leaderelectionconfig" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" - "k8s.io/kubernetes/plugin/pkg/scheduler/factory" - - // add the kubernetes feature gates - _ "k8s.io/kubernetes/pkg/features" - // install the componentconfig api so we get its defaulting and conversion functions - _ "k8s.io/kubernetes/pkg/apis/componentconfig/install" - - "github.com/spf13/pflag" -) - -// SchedulerPolicyConfigMapKey defines the key of the element in the -// scheduler's policy ConfigMap that contains scheduler's policy config. -const SchedulerPolicyConfigMapKey string = "policy.cfg" - -// SchedulerServer has all the context and params needed to run a Scheduler -type SchedulerServer struct { - componentconfig.KubeSchedulerConfiguration - // Master is the address of the Kubernetes API server (overrides any - // value in kubeconfig). - Master string - // Kubeconfig is Path to kubeconfig file with authorization and master - // location information. - Kubeconfig string - // Dynamic configuration for scheduler features. -} - -// NewSchedulerServer creates a new SchedulerServer with default parameters -func NewSchedulerServer() *SchedulerServer { - versioned := &v1alpha1.KubeSchedulerConfiguration{} - api.Scheme.Default(versioned) - cfg := componentconfig.KubeSchedulerConfiguration{} - api.Scheme.Convert(versioned, &cfg, nil) - cfg.LeaderElection.LeaderElect = true - s := SchedulerServer{ - KubeSchedulerConfiguration: cfg, - } - return &s -} - -// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet -func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) { - fs.Int32Var(&s.Port, "port", s.Port, "The port that the scheduler's http service runs on") - fs.StringVar(&s.Address, "address", s.Address, "The IP address to serve on (set to 0.0.0.0 for all interfaces)") - fs.StringVar(&s.AlgorithmProvider, "algorithm-provider", s.AlgorithmProvider, "The scheduling algorithm provider to use, one of: "+factory.ListAlgorithmProviders()) - fs.StringVar(&s.PolicyConfigFile, "policy-config-file", s.PolicyConfigFile, "File with scheduler policy configuration. This file is used if policy ConfigMap is not provided or --use-legacy-policy-config==true") - usage := fmt.Sprintf("Name of the ConfigMap object that contains scheduler's policy configuration. It must exist in the system namespace before scheduler initialization if --use-legacy-policy-config==false. The config must be provided as the value of an element in 'Data' map with the key='%v'", SchedulerPolicyConfigMapKey) - fs.StringVar(&s.PolicyConfigMapName, "policy-configmap", s.PolicyConfigMapName, usage) - fs.StringVar(&s.PolicyConfigMapNamespace, "policy-configmap-namespace", s.PolicyConfigMapNamespace, "The namespace where policy ConfigMap is located. The system namespace will be used if this is not provided or is empty.") - fs.BoolVar(&s.UseLegacyPolicyConfig, "use-legacy-policy-config", false, "When set to true, scheduler will ignore policy ConfigMap and uses policy config file") - fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/") - fs.BoolVar(&s.EnableContentionProfiling, "contention-profiling", false, "Enable lock contention profiling, if profiling is enabled") - fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") - fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.") - fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "Content type of requests sent to apiserver.") - fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver") - fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver") - fs.StringVar(&s.SchedulerName, "scheduler-name", s.SchedulerName, "Name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's \"spec.SchedulerName\".") - fs.StringVar(&s.LockObjectNamespace, "lock-object-namespace", s.LockObjectNamespace, "Define the namespace of the lock object.") - fs.StringVar(&s.LockObjectName, "lock-object-name", s.LockObjectName, "Define the name of the lock object.") - fs.IntVar(&s.HardPodAffinitySymmetricWeight, "hard-pod-affinity-symmetric-weight", api.DefaultHardPodAffinitySymmetricWeight, - "RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule corresponding "+ - "to every RequiredDuringScheduling affinity rule. --hard-pod-affinity-symmetric-weight represents the weight of implicit PreferredDuringScheduling affinity rule.") - fs.MarkDeprecated("hard-pod-affinity-symmetric-weight", "This option was moved to the policy configuration file") - fs.StringVar(&s.FailureDomains, "failure-domains", kubeletapis.DefaultFailureDomains, "Indicate the \"all topologies\" set for an empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.") - fs.MarkDeprecated("failure-domains", "Doesn't have any effect. Will be removed in future version.") - leaderelectionconfig.BindFlags(&s.LeaderElection, fs) - utilfeature.DefaultFeatureGate.AddFlag(fs) -} diff --git a/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/server.go b/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/server.go index 05bbbec38fd5..a116da41faf3 100644 --- a/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/server.go +++ b/vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/server.go @@ -18,36 +18,308 @@ limitations under the License. package app import ( + "errors" "fmt" + "io/ioutil" "net" "net/http" "net/http/pprof" "os" + "reflect" goruntime "runtime" "strconv" + "time" - "k8s.io/apiserver/pkg/server/healthz" - + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/server/healthz" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/informers" + coreinformers "k8s.io/client-go/informers/core/v1" + storageinformers "k8s.io/client-go/informers/storage/v1" + clientset "k8s.io/client-go/kubernetes" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" + "k8s.io/client-go/tools/record" + "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/apis/componentconfig" + componentconfigv1alpha1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" + "k8s.io/kubernetes/pkg/client/leaderelectionconfig" "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/features" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/util/configz" - "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options" - _ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider" + "k8s.io/kubernetes/pkg/version" + "k8s.io/kubernetes/pkg/version/verflag" + "k8s.io/kubernetes/plugin/pkg/scheduler" + "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider" + schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" + latestschedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api/latest" + "k8s.io/kubernetes/plugin/pkg/scheduler/factory" "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" "github.com/spf13/cobra" "github.com/spf13/pflag" + + "github.com/prometheus/client_golang/prometheus" ) +// SchedulerServer has all the context and params needed to run a Scheduler +type Options struct { + // ConfigFile is the location of the scheduler server's configuration file. + ConfigFile string + + // config is the scheduler server's configuration object. + config *componentconfig.KubeSchedulerConfiguration + + scheme *runtime.Scheme + codecs serializer.CodecFactory + + // The fields below here are placeholders for flags that can't be directly + // mapped into componentconfig.KubeSchedulerConfiguration. + // + // TODO remove these fields once the deprecated flags are removed. + + // master is the address of the Kubernetes API server (overrides any + // value in kubeconfig). + master string + healthzAddress string + healthzPort int32 + policyConfigFile string + policyConfigMapName string + policyConfigMapNamespace string + useLegacyPolicyConfig bool + algorithmProvider string +} + +// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet +func AddFlags(options *Options, fs *pflag.FlagSet) { + fs.StringVar(&options.ConfigFile, "config", options.ConfigFile, "The path to the configuration file.") + + // All flags below here are deprecated and will eventually be removed. + + fs.Int32Var(&options.healthzPort, "port", ports.SchedulerPort, "The port that the scheduler's http service runs on") + fs.StringVar(&options.healthzAddress, "address", options.healthzAddress, "The IP address to serve on (set to 0.0.0.0 for all interfaces)") + fs.StringVar(&options.algorithmProvider, "algorithm-provider", options.algorithmProvider, "The scheduling algorithm provider to use, one of: "+factory.ListAlgorithmProviders()) + fs.StringVar(&options.policyConfigFile, "policy-config-file", options.policyConfigFile, "File with scheduler policy configuration. This file is used if policy ConfigMap is not provided or --use-legacy-policy-config==true") + usage := fmt.Sprintf("Name of the ConfigMap object that contains scheduler's policy configuration. It must exist in the system namespace before scheduler initialization if --use-legacy-policy-config==false. The config must be provided as the value of an element in 'Data' map with the key='%v'", componentconfig.SchedulerPolicyConfigMapKey) + fs.StringVar(&options.policyConfigMapName, "policy-configmap", options.policyConfigMapName, usage) + fs.StringVar(&options.policyConfigMapNamespace, "policy-configmap-namespace", options.policyConfigMapNamespace, "The namespace where policy ConfigMap is located. The system namespace will be used if this is not provided or is empty.") + fs.BoolVar(&options.useLegacyPolicyConfig, "use-legacy-policy-config", false, "When set to true, scheduler will ignore policy ConfigMap and uses policy config file") + fs.BoolVar(&options.config.EnableProfiling, "profiling", options.config.EnableProfiling, "Enable profiling via web interface host:port/debug/pprof/") + fs.BoolVar(&options.config.EnableContentionProfiling, "contention-profiling", options.config.EnableContentionProfiling, "Enable lock contention profiling, if profiling is enabled") + fs.StringVar(&options.master, "master", options.master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") + fs.StringVar(&options.config.ClientConnection.KubeConfigFile, "kubeconfig", options.config.ClientConnection.KubeConfigFile, "Path to kubeconfig file with authorization and master location information.") + fs.StringVar(&options.config.ClientConnection.ContentType, "kube-api-content-type", options.config.ClientConnection.ContentType, "Content type of requests sent to apiserver.") + fs.Float32Var(&options.config.ClientConnection.QPS, "kube-api-qps", options.config.ClientConnection.QPS, "QPS to use while talking with kubernetes apiserver") + fs.Int32Var(&options.config.ClientConnection.Burst, "kube-api-burst", options.config.ClientConnection.Burst, "Burst to use while talking with kubernetes apiserver") + fs.StringVar(&options.config.SchedulerName, "scheduler-name", options.config.SchedulerName, "Name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's \"spec.SchedulerName\".") + fs.StringVar(&options.config.LeaderElection.LockObjectNamespace, "lock-object-namespace", options.config.LeaderElection.LockObjectNamespace, "Define the namespace of the lock object.") + fs.StringVar(&options.config.LeaderElection.LockObjectName, "lock-object-name", options.config.LeaderElection.LockObjectName, "Define the name of the lock object.") + fs.Int32Var(&options.config.HardPodAffinitySymmetricWeight, "hard-pod-affinity-symmetric-weight", options.config.HardPodAffinitySymmetricWeight, + "RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule corresponding "+ + "to every RequiredDuringScheduling affinity rule. --hard-pod-affinity-symmetric-weight represents the weight of implicit PreferredDuringScheduling affinity rule.") + fs.MarkDeprecated("hard-pod-affinity-symmetric-weight", "This option was moved to the policy configuration file") + fs.StringVar(&options.config.FailureDomains, "failure-domains", options.config.FailureDomains, "Indicate the \"all topologies\" set for an empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.") + fs.MarkDeprecated("failure-domains", "Doesn't have any effect. Will be removed in future version.") + leaderelectionconfig.BindFlags(&options.config.LeaderElection.LeaderElectionConfiguration, fs) + utilfeature.DefaultFeatureGate.AddFlag(fs) +} + +func NewOptions() (*Options, error) { + o := &Options{ + config: new(componentconfig.KubeSchedulerConfiguration), + } + + o.scheme = runtime.NewScheme() + o.codecs = serializer.NewCodecFactory(o.scheme) + + if err := componentconfig.AddToScheme(o.scheme); err != nil { + return nil, err + } + if err := componentconfigv1alpha1.AddToScheme(o.scheme); err != nil { + return nil, err + } + + return o, nil +} + +func (o *Options) Complete() error { + if len(o.ConfigFile) == 0 { + glog.Warning("WARNING: all flags than --config are deprecated. Please begin using a config file ASAP.") + o.applyDeprecatedHealthzAddressToConfig() + o.applyDeprecatedHealthzPortToConfig() + o.applyDeprecatedAlgorithmSourceOptionsToConfig() + } + + return nil +} + +// applyDeprecatedHealthzAddressToConfig sets o.config.HealthzBindAddress and +// o.config.MetricsBindAddress from flags passed on the command line based on +// the following rules: +// +// 1. If --address is empty, leave the config as-is. +// 2. Otherwise, use the value of --address for the address portion of +// o.config.HealthzBindAddress +func (o *Options) applyDeprecatedHealthzAddressToConfig() { + if len(o.healthzAddress) == 0 { + return + } + + _, port, err := net.SplitHostPort(o.config.HealthzBindAddress) + if err != nil { + glog.Fatalf("invalid healthz bind address %q: %v", o.config.HealthzBindAddress, err) + } + o.config.HealthzBindAddress = net.JoinHostPort(o.healthzAddress, port) + o.config.MetricsBindAddress = net.JoinHostPort(o.healthzAddress, port) +} + +// applyDeprecatedHealthzPortToConfig sets o.config.HealthzBindAddress and +// o.config.MetricsBindAddress from flags passed on the command line based on +// the following rules: +// +// 1. If --port is -1, disable the healthz server. +// 2. Otherwise, use the value of --port for the port portion of +// o.config.HealthzBindAddress +func (o *Options) applyDeprecatedHealthzPortToConfig() { + if o.healthzPort == -1 { + o.config.HealthzBindAddress = "" + return + } + + host, _, err := net.SplitHostPort(o.config.HealthzBindAddress) + if err != nil { + glog.Fatalf("invalid healthz bind address %q: %v", o.config.HealthzBindAddress, err) + } + o.config.HealthzBindAddress = net.JoinHostPort(host, strconv.Itoa(int(o.healthzPort))) + o.config.MetricsBindAddress = net.JoinHostPort(host, strconv.Itoa(int(o.healthzPort))) +} + +// applyDeprecatedAlgorithmSourceOptionsToConfig sets o.config.AlgorithmSource from +// flags passed on the command line in the following precedence order: +// +// 1. --use-legacy-policy-config to use a policy file. +// 2. --policy-configmap to use a policy config map value. +// 3. --algorithm-provider to use a named algorithm provider. +func (o *Options) applyDeprecatedAlgorithmSourceOptionsToConfig() { + switch { + case o.useLegacyPolicyConfig: + o.config.AlgorithmSource = componentconfig.SchedulerAlgorithmSource{ + Policy: &componentconfig.SchedulerPolicySource{ + File: &componentconfig.SchedulerPolicyFileSource{ + Path: o.policyConfigFile, + }, + }, + } + case len(o.policyConfigMapName) > 0: + o.config.AlgorithmSource = componentconfig.SchedulerAlgorithmSource{ + Policy: &componentconfig.SchedulerPolicySource{ + ConfigMap: &componentconfig.SchedulerPolicyConfigMapSource{ + Name: o.policyConfigMapName, + Namespace: o.policyConfigMapNamespace, + }, + }, + } + case len(o.algorithmProvider) > 0: + o.config.AlgorithmSource = componentconfig.SchedulerAlgorithmSource{ + Provider: &o.algorithmProvider, + } + } +} + +// Validate validates all the required options. +func (o *Options) Validate(args []string) error { + if len(args) != 0 { + return errors.New("no arguments are supported") + } + + return nil +} + +// loadConfigFromFile loads the contents of file and decodes it as a +// KubeSchedulerConfiguration object. +func (o *Options) loadConfigFromFile(file string) (*componentconfig.KubeSchedulerConfiguration, error) { + data, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + + return o.loadConfig(data) +} + +// loadConfig decodes data as a KubeSchedulerConfiguration object. +func (o *Options) loadConfig(data []byte) (*componentconfig.KubeSchedulerConfiguration, error) { + configObj, gvk, err := o.codecs.UniversalDecoder().Decode(data, nil, nil) + if err != nil { + return nil, err + } + config, ok := configObj.(*componentconfig.KubeSchedulerConfiguration) + if !ok { + return nil, fmt.Errorf("got unexpected config type: %v", gvk) + } + return config, nil +} + +func (o *Options) ApplyDefaults(in *componentconfig.KubeSchedulerConfiguration) (*componentconfig.KubeSchedulerConfiguration, error) { + external, err := o.scheme.ConvertToVersion(in, componentconfigv1alpha1.SchemeGroupVersion) + if err != nil { + return nil, err + } + + o.scheme.Default(external) + + internal, err := o.scheme.ConvertToVersion(external, componentconfig.SchemeGroupVersion) + if err != nil { + return nil, err + } + + out := internal.(*componentconfig.KubeSchedulerConfiguration) + + return out, nil +} + +func (o *Options) Run() error { + config := o.config + + if len(o.ConfigFile) > 0 { + if c, err := o.loadConfigFromFile(o.ConfigFile); err != nil { + return err + } else { + config = c + } + } + + // Apply algorithms based on feature gates. + // TODO: make configurable? + algorithmprovider.ApplyFeatureGates() + + server, err := NewSchedulerServer(config, o.master) + if err != nil { + return err + } + + stop := make(chan struct{}) + return server.Run(stop) +} + // NewSchedulerCommand creates a *cobra.Command object with default parameters func NewSchedulerCommand() *cobra.Command { - s := options.NewSchedulerServer() - s.AddFlags(pflag.CommandLine) + opts, err := NewOptions() + if err != nil { + glog.Fatalf("unable to initialize command options: %v", err) + } + cmd := &cobra.Command{ Use: "kube-scheduler", Long: `The Kubernetes scheduler is a policy-rich, topology-aware, @@ -58,126 +330,379 @@ constraints, affinity and anti-affinity specifications, data locality, inter-wor interference, deadlines, and so on. Workload-specific requirements will be exposed through the API as necessary.`, Run: func(cmd *cobra.Command, args []string) { + verflag.PrintAndExitIfRequested() + cmdutil.CheckErr(opts.Complete()) + cmdutil.CheckErr(opts.Validate(args)) + cmdutil.CheckErr(opts.Run()) }, } + opts.config, err = opts.ApplyDefaults(opts.config) + if err != nil { + glog.Fatalf("unable to apply config defaults: %v", err) + } + + flags := cmd.Flags() + AddFlags(opts, flags) + + cmd.MarkFlagFilename("config", "yaml", "yml", "json") + return cmd } -// Run runs the specified SchedulerServer. This should never exit. -func Run(s *options.SchedulerServer) error { - kubecli, err := createClient(s) - if err != nil { - return fmt.Errorf("unable to create kube client: %v", err) - } +// SchedulerServer represents all the parameters required to start the +// Kubernetes scheduler server. +type SchedulerServer struct { + SchedulerName string + Client clientset.Interface + InformerFactory informers.SharedInformerFactory + PodInformer coreinformers.PodInformer + AlgorithmSource componentconfig.SchedulerAlgorithmSource + HardPodAffinitySymmetricWeight int32 + EventClient v1core.EventsGetter + Recorder record.EventRecorder + Broadcaster record.EventBroadcaster + // LeaderElection is optional. + LeaderElection *leaderelection.LeaderElectionConfig + // HealthzServer is optional. + HealthzServer *http.Server + // MetricsServer is optional. + MetricsServer *http.Server +} - recorder := createRecorder(kubecli, s) +// NewSchedulerServer creates a runnable SchedulerServer from configuration. +func NewSchedulerServer(config *componentconfig.KubeSchedulerConfiguration, master string) (*SchedulerServer, error) { + if config == nil { + return nil, errors.New("config is required") + } - informerFactory := informers.NewSharedInformerFactory(kubecli, 0) - // cache only non-terminal pods - podInformer := factory.NewPodInformer(kubecli, 0) + // Configz registration. + if c, err := configz.New("componentconfig"); err == nil { + c.Set(config) + } else { + glog.Warningf("unable to register configz: %s", err) + } - sched, err := CreateScheduler( - s, - kubecli, - informerFactory.Core().V1().Nodes(), - podInformer, - informerFactory.Core().V1().PersistentVolumes(), - informerFactory.Core().V1().PersistentVolumeClaims(), - informerFactory.Core().V1().ReplicationControllers(), - informerFactory.Extensions().V1beta1().ReplicaSets(), - informerFactory.Apps().V1beta1().StatefulSets(), - informerFactory.Core().V1().Services(), - recorder, - ) + // Prepare some Kube clients. + client, leaderElectionClient, eventClient, err := createClients(config.ClientConnection, master) if err != nil { - return fmt.Errorf("error creating scheduler: %v", err) + return nil, err } - if s.Port != -1 { - go startHTTP(s) - } + // Prepare event clients. + eventBroadcaster := record.NewBroadcaster() + recorder := eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: config.SchedulerName}) - stop := make(chan struct{}) - defer close(stop) - go podInformer.Informer().Run(stop) - informerFactory.Start(stop) - // Waiting for all cache to sync before scheduling. - informerFactory.WaitForCacheSync(stop) - controller.WaitForCacheSync("scheduler", stop, podInformer.Informer().HasSynced) + // Set up leader election if enabled. + var leaderElectionConfig *leaderelection.LeaderElectionConfig + if config.LeaderElection.LeaderElect { + leaderElectionConfig, err = makeLeaderElectionConfig(config.LeaderElection, leaderElectionClient, recorder) + if err != nil { + return nil, err + } + } - run := func(stopCh <-chan struct{}) { - sched.Run() - <-stopCh + // Prepare a healthz server. If the metrics bind address is the same as the + // healthz bind address, consolidate the servers into one. + var healthzServer *http.Server + if len(config.HealthzBindAddress) != 0 { + healthzServer = makeHealthzServer(config) } - if !s.LeaderElection.LeaderElect { - run(stop) - return fmt.Errorf("finished without leader elect") + // Prepare a separate metrics server only if the bind address differs from the + // healthz bind address. + var metricsServer *http.Server + if len(config.MetricsBindAddress) > 0 && config.HealthzBindAddress != config.MetricsBindAddress { + metricsServer = makeMetricsServer(config) } - id, err := os.Hostname() + return &SchedulerServer{ + SchedulerName: config.SchedulerName, + Client: client, + InformerFactory: informers.NewSharedInformerFactory(client, 0), + PodInformer: factory.NewPodInformer(client, 0, config.SchedulerName), + AlgorithmSource: config.AlgorithmSource, + HardPodAffinitySymmetricWeight: config.HardPodAffinitySymmetricWeight, + EventClient: eventClient, + Recorder: recorder, + Broadcaster: eventBroadcaster, + LeaderElection: leaderElectionConfig, + HealthzServer: healthzServer, + MetricsServer: metricsServer, + }, nil +} + +// makeLeaderElectionConfig builds a leader election configuration. It will +// create a new resource lock associated with the configuration. +func makeLeaderElectionConfig(config componentconfig.KubeSchedulerLeaderElectionConfiguration, client clientset.Interface, recorder record.EventRecorder) (*leaderelection.LeaderElectionConfig, error) { + hostname, err := os.Hostname() if err != nil { - return fmt.Errorf("unable to get hostname: %v", err) + return nil, fmt.Errorf("unable to get hostname: %v", err) } - rl, err := resourcelock.New(s.LeaderElection.ResourceLock, - s.LockObjectNamespace, - s.LockObjectName, - kubecli.CoreV1(), + rl, err := resourcelock.New(config.ResourceLock, + config.LockObjectNamespace, + config.LockObjectName, + client.CoreV1(), resourcelock.ResourceLockConfig{ - Identity: id, + Identity: hostname, EventRecorder: recorder, }) if err != nil { - return fmt.Errorf("error creating lock: %v", err) - } - - leaderElector, err := leaderelection.NewLeaderElector( - leaderelection.LeaderElectionConfig{ - Lock: rl, - LeaseDuration: s.LeaderElection.LeaseDuration.Duration, - RenewDeadline: s.LeaderElection.RenewDeadline.Duration, - RetryPeriod: s.LeaderElection.RetryPeriod.Duration, - Callbacks: leaderelection.LeaderCallbacks{ - OnStartedLeading: run, - OnStoppedLeading: func() { - utilruntime.HandleError(fmt.Errorf("lost master")) - }, - }, - }) - if err != nil { - return err + return nil, fmt.Errorf("couldn't create resource lock: %v", err) } - leaderElector.Run() - - return fmt.Errorf("lost lease") + return &leaderelection.LeaderElectionConfig{ + Lock: rl, + LeaseDuration: config.LeaseDuration.Duration, + RenewDeadline: config.RenewDeadline.Duration, + RetryPeriod: config.RetryPeriod.Duration, + }, nil } -func startHTTP(s *options.SchedulerServer) { +// makeHealthzServer creates a healthz server from the config, and will also +// embed the metrics handler if the healthz and metrics address configurations +// are the same. +func makeHealthzServer(config *componentconfig.KubeSchedulerConfiguration) *http.Server { mux := http.NewServeMux() healthz.InstallHandler(mux) - if s.EnableProfiling { + if config.HealthzBindAddress == config.MetricsBindAddress { + configz.InstallHandler(mux) + mux.Handle("/metrics", prometheus.Handler()) + } + if config.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) mux.HandleFunc("/debug/pprof/trace", pprof.Trace) - if s.EnableContentionProfiling { + if config.EnableContentionProfiling { goruntime.SetBlockProfileRate(1) } } - if c, err := configz.New("componentconfig"); err == nil { - c.Set(s.KubeSchedulerConfiguration) - } else { - glog.Errorf("unable to register configz: %s", err) + return &http.Server{ + Addr: config.HealthzBindAddress, + Handler: mux, } +} + +// makeMetricsServer builds a metrics server from the config. +func makeMetricsServer(config *componentconfig.KubeSchedulerConfiguration) *http.Server { + mux := http.NewServeMux() configz.InstallHandler(mux) mux.Handle("/metrics", prometheus.Handler()) - - server := &http.Server{ - Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))), + if config.EnableProfiling { + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + mux.HandleFunc("/debug/pprof/trace", pprof.Trace) + if config.EnableContentionProfiling { + goruntime.SetBlockProfileRate(1) + } + } + return &http.Server{ + Addr: config.MetricsBindAddress, Handler: mux, } - glog.Fatal(server.ListenAndServe()) +} + +// createClients creates a kube client and an event client from the given config and masterOverride. +// TODO remove masterOverride when CLI flags are removed. +func createClients(config componentconfig.ClientConnectionConfiguration, masterOverride string) (clientset.Interface, clientset.Interface, v1core.EventsGetter, error) { + if len(config.KubeConfigFile) == 0 && len(masterOverride) == 0 { + glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") + } + + // This creates a client, first loading any specified kubeconfig + // file, and then overriding the Master flag, if non-empty. + kubeConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ExplicitPath: config.KubeConfigFile}, + &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterOverride}}).ClientConfig() + if err != nil { + return nil, nil, nil, err + } + + kubeConfig.AcceptContentTypes = config.AcceptContentTypes + kubeConfig.ContentType = config.ContentType + kubeConfig.QPS = config.QPS + //TODO make config struct use int instead of int32? + kubeConfig.Burst = int(config.Burst) + + client, err := clientset.NewForConfig(restclient.AddUserAgent(kubeConfig, "scheduler")) + if err != nil { + return nil, nil, nil, err + } + + leaderElectionClient, err := clientset.NewForConfig(restclient.AddUserAgent(kubeConfig, "leader-election")) + if err != nil { + return nil, nil, nil, err + } + + eventClient, err := clientset.NewForConfig(kubeConfig) + if err != nil { + return nil, nil, nil, err + } + + return client, leaderElectionClient, eventClient.CoreV1(), nil +} + +// Run runs the SchedulerServer. This should never exit. +func (s *SchedulerServer) Run(stop chan struct{}) error { + // To help debugging, immediately log version + glog.Infof("Version: %+v", version.Get()) + + // Build a scheduler config from the provided algorithm source. + schedulerConfig, err := s.SchedulerConfig() + if err != nil { + return err + } + + // Create the scheduler. + sched := scheduler.NewFromConfig(schedulerConfig) + + // Prepare the event broadcaster. + if !reflect.ValueOf(s.Broadcaster).IsNil() && !reflect.ValueOf(s.EventClient).IsNil() { + s.Broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: s.EventClient.Events("")}) + } + + // Start up the healthz server. + if s.HealthzServer != nil { + go wait.Until(func() { + glog.Infof("starting healthz server on %v", s.HealthzServer.Addr) + err := s.HealthzServer.ListenAndServe() + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to start healthz server: %v", err)) + } + }, 5*time.Second, stop) + } + + // Start up the metrics server. + if s.MetricsServer != nil { + go wait.Until(func() { + glog.Infof("starting metrics server on %v", s.MetricsServer.Addr) + err := s.MetricsServer.ListenAndServe() + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to start metrics server: %v", err)) + } + }, 5*time.Second, stop) + } + + // Start all informers. + go s.PodInformer.Informer().Run(stop) + s.InformerFactory.Start(stop) + + // Wait for all caches to sync before scheduling. + s.InformerFactory.WaitForCacheSync(stop) + controller.WaitForCacheSync("scheduler", stop, s.PodInformer.Informer().HasSynced) + + // Prepare a reusable run function. + run := func(stopCh <-chan struct{}) { + sched.Run() + <-stopCh + } + + // If leader election is enabled, run via LeaderElector until done and exit. + if s.LeaderElection != nil { + s.LeaderElection.Callbacks = leaderelection.LeaderCallbacks{ + OnStartedLeading: run, + OnStoppedLeading: func() { + utilruntime.HandleError(fmt.Errorf("lost master")) + }, + } + leaderElector, err := leaderelection.NewLeaderElector(*s.LeaderElection) + if err != nil { + return fmt.Errorf("couldn't create leader elector: %v", err) + } + + leaderElector.Run() + + return fmt.Errorf("lost lease") + } + + // Leader election is disabled, so run inline until done. + run(stop) + return fmt.Errorf("finished without leader elect") +} + +// SchedulerConfig creates the scheduler configuration. This is exposed for use +// by tests. +func (s *SchedulerServer) SchedulerConfig() (*scheduler.Config, error) { + var storageClassInformer storageinformers.StorageClassInformer + if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + storageClassInformer = s.InformerFactory.Storage().V1().StorageClasses() + } + + // Set up the configurator which can create schedulers from configs. + configurator := factory.NewConfigFactory( + s.SchedulerName, + s.Client, + s.InformerFactory.Core().V1().Nodes(), + s.PodInformer, + s.InformerFactory.Core().V1().PersistentVolumes(), + s.InformerFactory.Core().V1().PersistentVolumeClaims(), + s.InformerFactory.Core().V1().ReplicationControllers(), + s.InformerFactory.Extensions().V1beta1().ReplicaSets(), + s.InformerFactory.Apps().V1beta1().StatefulSets(), + s.InformerFactory.Core().V1().Services(), + s.InformerFactory.Policy().V1beta1().PodDisruptionBudgets(), + storageClassInformer, + s.HardPodAffinitySymmetricWeight, + utilfeature.DefaultFeatureGate.Enabled(features.EnableEquivalenceClassCache), + ) + + source := s.AlgorithmSource + var config *scheduler.Config + switch { + case source.Provider != nil: + // Create the config from a named algorithm provider. + sc, err := configurator.CreateFromProvider(*source.Provider) + if err != nil { + return nil, fmt.Errorf("couldn't create scheduler using provider %q: %v", *source.Provider, err) + } + config = sc + case source.Policy != nil: + // Create the config from a user specified policy source. + policy := &schedulerapi.Policy{} + switch { + case source.Policy.File != nil: + // Use a policy serialized in a file. + policyFile := source.Policy.File.Path + _, err := os.Stat(policyFile) + if err != nil { + return nil, fmt.Errorf("missing policy config file %s", policyFile) + } + data, err := ioutil.ReadFile(policyFile) + if err != nil { + return nil, fmt.Errorf("couldn't read policy config: %v", err) + } + err = runtime.DecodeInto(latestschedulerapi.Codec, []byte(data), policy) + if err != nil { + return nil, fmt.Errorf("invalid policy: %v", err) + } + case source.Policy.ConfigMap != nil: + // Use a policy serialized in a config map value. + policyRef := source.Policy.ConfigMap + policyConfigMap, err := s.Client.CoreV1().ConfigMaps(policyRef.Namespace).Get(policyRef.Name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("couldn't get policy config map %s/%s: %v", policyRef.Namespace, policyRef.Name, err) + } + data, found := policyConfigMap.Data[componentconfig.SchedulerPolicyConfigMapKey] + if !found { + return nil, fmt.Errorf("missing policy config map value at key %q", componentconfig.SchedulerPolicyConfigMapKey) + } + err = runtime.DecodeInto(latestschedulerapi.Codec, []byte(data), policy) + if err != nil { + return nil, fmt.Errorf("invalid policy: %v", err) + } + } + sc, err := configurator.CreateFromConfig(*policy) + if err != nil { + return nil, fmt.Errorf("couldn't create scheduler from policy: %v", err) + } + config = sc + default: + return nil, fmt.Errorf("unsupported algorithm source: %v", source) + } + // Additional tweaks to the config produced by the configurator. + config.Recorder = s.Recorder + return config, nil } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/admit/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/admit/BUILD index 49e9ec1e4777..d660932d2197 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/admit/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/admit/BUILD @@ -9,15 +9,17 @@ load( go_library( name = "go_default_library", srcs = ["admission.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/admit", deps = ["//vendor/k8s.io/apiserver/pkg/admission:go_default_library"], ) go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/admit", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/admit/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/admit/admission.go index 6ba8e59dd709..3c428d9136fb 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/admit/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/admit/admission.go @@ -29,19 +29,30 @@ func Register(plugins *admission.Plugins) { }) } -// alwaysAdmit is an implementation of admission.Interface which always says yes to an admit request. +// AlwaysAdmit is an implementation of admission.Interface which always says yes to an admit request. // It is useful in tests and when using kubernetes in an open manner. -type alwaysAdmit struct{} +type AlwaysAdmit struct{} -func (alwaysAdmit) Admit(a admission.Attributes) (err error) { +var _ admission.MutationInterface = AlwaysAdmit{} +var _ admission.ValidationInterface = AlwaysAdmit{} + +// Admit makes an admission decision based on the request attributes +func (AlwaysAdmit) Admit(a admission.Attributes) (err error) { + return nil +} + +// Validate makes an admission decision based on the request attributes. It is NOT allowed to mutate. +func (AlwaysAdmit) Validate(a admission.Attributes) (err error) { return nil } -func (alwaysAdmit) Handles(operation admission.Operation) bool { +// Handles returns true if this admission controller can handle the given operation +// where operation can be one of CREATE, UPDATE, DELETE, or CONNECT +func (AlwaysAdmit) Handles(operation admission.Operation) bool { return true } // NewAlwaysAdmit creates a new always admit admission handler -func NewAlwaysAdmit() admission.Interface { - return new(alwaysAdmit) +func NewAlwaysAdmit() *AlwaysAdmit { + return new(AlwaysAdmit) } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages/BUILD index 4066801864f7..ab8a3fd14db1 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages/BUILD @@ -9,9 +9,11 @@ load( go_library( name = "go_default_library", srcs = ["admission.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", ], ) @@ -19,9 +21,10 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages/admission.go index 0d71f127abff..c9a90f648a76 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages/admission.go @@ -28,8 +28,9 @@ import ( "io" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apiserver/pkg/admission" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // Register registers a plugin @@ -39,15 +40,19 @@ func Register(plugins *admission.Plugins) { }) } -// alwaysPullImages is an implementation of admission.Interface. +// AlwaysPullImages is an implementation of admission.Interface. // It looks at all new pods and overrides each container's image pull policy to Always. -type alwaysPullImages struct { +type AlwaysPullImages struct { *admission.Handler } -func (a *alwaysPullImages) Admit(attributes admission.Attributes) (err error) { +var _ admission.MutationInterface = &AlwaysPullImages{} +var _ admission.ValidationInterface = &AlwaysPullImages{} + +// Admit makes an admission decision based on the request attributes +func (a *AlwaysPullImages) Admit(attributes admission.Attributes) (err error) { // Ignore all calls to subresources or resources other than pods. - if len(attributes.GetSubresource()) != 0 || attributes.GetResource().GroupResource() != api.Resource("pods") { + if shouldIgnore(attributes) { return nil } pod, ok := attributes.GetObject().(*api.Pod) @@ -66,9 +71,51 @@ func (a *alwaysPullImages) Admit(attributes admission.Attributes) (err error) { return nil } +// Validate makes sure that all containers are set to always pull images +func (*AlwaysPullImages) Validate(attributes admission.Attributes) (err error) { + if shouldIgnore(attributes) { + return nil + } + + pod, ok := attributes.GetObject().(*api.Pod) + if !ok { + return apierrors.NewBadRequest("Resource was marked with kind Pod but was unable to be converted") + } + + for i := range pod.Spec.InitContainers { + if pod.Spec.InitContainers[i].ImagePullPolicy != api.PullAlways { + return admission.NewForbidden(attributes, + field.NotSupported(field.NewPath("spec", "initContainers").Index(i).Child("imagePullPolicy"), + pod.Spec.InitContainers[i].ImagePullPolicy, []string{string(api.PullAlways)}, + ), + ) + } + } + for i := range pod.Spec.Containers { + if pod.Spec.Containers[i].ImagePullPolicy != api.PullAlways { + return admission.NewForbidden(attributes, + field.NotSupported(field.NewPath("spec", "containers").Index(i).Child("imagePullPolicy"), + pod.Spec.Containers[i].ImagePullPolicy, []string{string(api.PullAlways)}, + ), + ) + } + } + + return nil +} + +func shouldIgnore(attributes admission.Attributes) bool { + // Ignore all calls to subresources or resources other than pods. + if len(attributes.GetSubresource()) != 0 || attributes.GetResource().GroupResource() != api.Resource("pods") { + return true + } + + return false +} + // NewAlwaysPullImages creates a new always pull images admission control handler -func NewAlwaysPullImages() admission.Interface { - return &alwaysPullImages{ +func NewAlwaysPullImages() *AlwaysPullImages { + return &AlwaysPullImages{ Handler: admission.NewHandler(admission.Create, admission.Update), } } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/antiaffinity/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/antiaffinity/BUILD index 908646924d00..c0e381995ab2 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/antiaffinity/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/antiaffinity/BUILD @@ -12,8 +12,9 @@ go_library( "admission.go", "doc.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/antiaffinity", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/kubelet/apis:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", @@ -23,9 +24,10 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/antiaffinity", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/kubelet/apis:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/antiaffinity/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/antiaffinity/admission.go index c42a1b211c13..bec95b8d8d79 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/antiaffinity/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/antiaffinity/admission.go @@ -22,7 +22,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apiserver/pkg/admission" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) @@ -33,21 +33,23 @@ func Register(plugins *admission.Plugins) { }) } -// plugin contains the client used by the admission controller -type plugin struct { +// Plugin contains the client used by the admission controller +type Plugin struct { *admission.Handler } +var _ admission.ValidationInterface = &Plugin{} + // NewInterPodAntiAffinity creates a new instance of the LimitPodHardAntiAffinityTopology admission controller -func NewInterPodAntiAffinity() admission.Interface { - return &plugin{ +func NewInterPodAntiAffinity() *Plugin { + return &Plugin{ Handler: admission.NewHandler(admission.Create, admission.Update), } } -// Admit will deny any pod that defines AntiAffinity topology key other than kubeletapis.LabelHostname i.e. "kubernetes.io/hostname" +// Validate will deny any pod that defines AntiAffinity topology key other than kubeletapis.LabelHostname i.e. "kubernetes.io/hostname" // in requiredDuringSchedulingRequiredDuringExecution and requiredDuringSchedulingIgnoredDuringExecution. -func (p *plugin) Admit(attributes admission.Attributes) (err error) { +func (p *Plugin) Validate(attributes admission.Attributes) (err error) { // Ignore all calls to subresources or resources other than pods. if len(attributes.GetSubresource()) != 0 || attributes.GetResource().GroupResource() != api.Resource("pods") { return nil diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds/BUILD index f61605ff3b71..00bfbc9d5ddd 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds/BUILD @@ -9,10 +9,11 @@ load( go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//plugin/pkg/scheduler/algorithm:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", ], @@ -21,9 +22,10 @@ go_test( go_library( name = "go_default_library", srcs = ["admission.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//plugin/pkg/scheduler/algorithm:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds/admission.go index 430ed22f5bea..05e170a96b40 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds/admission.go @@ -23,8 +23,8 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apiserver/pkg/admission" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" ) @@ -45,24 +45,27 @@ func Register(plugins *admission.Plugins) { }) } -// plugin contains the client used by the admission controller +// Plugin contains the client used by the admission controller // It will add default tolerations for every pod // that tolerate taints `notReady:NoExecute` and `unreachable:NoExecute`, // with tolerationSeconds of 300s. // If the pod already specifies a toleration for taint `notReady:NoExecute` // or `unreachable:NoExecute`, the plugin won't touch it. -type plugin struct { +type Plugin struct { *admission.Handler } +var _ admission.MutationInterface = &Plugin{} + // NewDefaultTolerationSeconds creates a new instance of the DefaultTolerationSeconds admission controller -func NewDefaultTolerationSeconds() admission.Interface { - return &plugin{ +func NewDefaultTolerationSeconds() *Plugin { + return &Plugin{ Handler: admission.NewHandler(admission.Create, admission.Update), } } -func (p *plugin) Admit(attributes admission.Attributes) (err error) { +// Admit makes an admission decision based on the request attributes +func (p *Plugin) Admit(attributes admission.Attributes) (err error) { if attributes.GetResource().GroupResource() != api.Resource("pods") { return nil } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/deny/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/deny/BUILD index 49e9ec1e4777..c5eb8bf5af3d 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/deny/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/deny/BUILD @@ -9,15 +9,17 @@ load( go_library( name = "go_default_library", srcs = ["admission.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/deny", deps = ["//vendor/k8s.io/apiserver/pkg/admission:go_default_library"], ) go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/deny", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/deny/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/deny/admission.go index bef3429c2512..f6f4951bed77 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/deny/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/deny/admission.go @@ -30,19 +30,30 @@ func Register(plugins *admission.Plugins) { }) } -// alwaysDeny is an implementation of admission.Interface which always says no to an admission request. +// AlwaysDeny is an implementation of admission.Interface which always says no to an admission request. // It is useful in unit tests to force an operation to be forbidden. -type alwaysDeny struct{} +type AlwaysDeny struct{} -func (alwaysDeny) Admit(a admission.Attributes) (err error) { +var _ admission.MutationInterface = AlwaysDeny{} +var _ admission.ValidationInterface = AlwaysDeny{} + +// Admit makes an admission decision based on the request attributes. +func (AlwaysDeny) Admit(a admission.Attributes) (err error) { + return admission.NewForbidden(a, errors.New("Admission control is denying all modifications")) +} + +// Validate makes an admission decision based on the request attributes. It is NOT allowed to mutate. +func (AlwaysDeny) Validate(a admission.Attributes) (err error) { return admission.NewForbidden(a, errors.New("Admission control is denying all modifications")) } -func (alwaysDeny) Handles(operation admission.Operation) bool { +// Handles returns true if this admission controller can handle the given operation +// where operation can be one of CREATE, UPDATE, DELETE, or CONNECT +func (AlwaysDeny) Handles(operation admission.Operation) bool { return true } // NewAlwaysDeny creates an always deny admission handler -func NewAlwaysDeny() admission.Interface { - return new(alwaysDeny) +func NewAlwaysDeny() *AlwaysDeny { + return new(AlwaysDeny) } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/BUILD index e2b9f31d81a0..7dbb98cfc80e 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -14,10 +12,10 @@ go_test( "admission_test.go", "cache_test.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit", library = ":go_default_library", - tags = ["automanaged"], deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//plugin/pkg/admission/eventratelimit/apis/eventratelimit:go_default_library", "//vendor/github.com/hashicorp/golang-lru:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -39,9 +37,9 @@ go_library( "doc.go", "limitenforcer.go", ], - tags = ["automanaged"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//plugin/pkg/admission/eventratelimit/apis/eventratelimit:go_default_library", "//plugin/pkg/admission/eventratelimit/apis/eventratelimit/install:go_default_library", "//plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/admission.go index 4318d66c6885..7e0253196902 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/admission.go @@ -21,7 +21,7 @@ import ( "k8s.io/apiserver/pkg/admission" "k8s.io/client-go/util/flowcontrol" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" eventratelimitapi "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit" "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/validation" ) @@ -45,8 +45,8 @@ func Register(plugins *admission.Plugins) { }) } -// eventRateLimitAdmission implements an admission controller that can enforce event rate limits -type eventRateLimitAdmission struct { +// Plugin implements an admission controller that can enforce event rate limits +type Plugin struct { *admission.Handler // limitEnforcers is the collection of limit enforcers. There is one limit enforcer for each // active limit type. As there are 4 limit types, the length of the array will be at most 4. @@ -54,8 +54,10 @@ type eventRateLimitAdmission struct { limitEnforcers []*limitEnforcer } +var _ admission.ValidationInterface = &Plugin{} + // newEventRateLimit configures an admission controller that can enforce event rate limits -func newEventRateLimit(config *eventratelimitapi.Configuration, clock flowcontrol.Clock) (admission.Interface, error) { +func newEventRateLimit(config *eventratelimitapi.Configuration, clock flowcontrol.Clock) (*Plugin, error) { limitEnforcers := make([]*limitEnforcer, 0, len(config.Limits)) for _, limitConfig := range config.Limits { enforcer, err := newLimitEnforcer(limitConfig, clock) @@ -65,7 +67,7 @@ func newEventRateLimit(config *eventratelimitapi.Configuration, clock flowcontro limitEnforcers = append(limitEnforcers, enforcer) } - eventRateLimitAdmission := &eventRateLimitAdmission{ + eventRateLimitAdmission := &Plugin{ Handler: admission.NewHandler(admission.Create, admission.Update), limitEnforcers: limitEnforcers, } @@ -73,8 +75,8 @@ func newEventRateLimit(config *eventratelimitapi.Configuration, clock flowcontro return eventRateLimitAdmission, nil } -// Admit makes admission decisions while enforcing event rate limits -func (a *eventRateLimitAdmission) Admit(attr admission.Attributes) (err error) { +// Validate makes admission decisions while enforcing event rate limits +func (a *Plugin) Validate(attr admission.Attributes) (err error) { // ignore all operations that do not correspond to an Event kind if attr.GetKind().GroupKind() != api.Kind("Event") { return nil diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/BUILD index 5c30dafeae59..7f723eadead9 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -15,10 +13,9 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], - tags = ["automanaged"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/doc.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/doc.go index c054066a9639..f6dfd6d0ddfb 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/doc.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package package eventratelimit diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/install/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/install/BUILD index a5d67e1170be..6e2a32ff65b4 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/install/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/install/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -10,7 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], - tags = ["automanaged"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/install", deps = [ "//plugin/pkg/admission/eventratelimit/apis/eventratelimit:go_default_library", "//plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/BUILD index ec318571dd86..44cc2ec98e34 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -18,7 +16,7 @@ go_library( "zz_generated.deepcopy.go", "zz_generated.defaults.go", ], - tags = ["automanaged"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1", deps = [ "//plugin/pkg/admission/eventratelimit/apis/eventratelimit:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/doc.go index 79f1f8939cce..63ad75be6409 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit // +k8s:defaulter-gen=TypeMeta diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.deepcopy.go index 49fc8779134e..3ed408c1a337 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/zz_generated.deepcopy.go @@ -21,32 +21,9 @@ limitations under the License. package v1alpha1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Configuration).DeepCopyInto(out.(*Configuration)) - return nil - }, InType: reflect.TypeOf(&Configuration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Limit).DeepCopyInto(out.(*Limit)) - return nil - }, InType: reflect.TypeOf(&Limit{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Configuration) DeepCopyInto(out *Configuration) { *out = *in diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/validation/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/validation/BUILD index 551eb5f51d52..1a272b783210 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/validation/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/validation/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -11,7 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["validation.go"], - tags = ["automanaged"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/validation", deps = [ "//plugin/pkg/admission/eventratelimit/apis/eventratelimit:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", @@ -34,7 +32,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/validation", library = ":go_default_library", - tags = ["automanaged"], deps = ["//plugin/pkg/admission/eventratelimit/apis/eventratelimit:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/zz_generated.deepcopy.go index a1bd74754104..006fcbc04ec0 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/zz_generated.deepcopy.go @@ -21,32 +21,9 @@ limitations under the License. package eventratelimit import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Configuration).DeepCopyInto(out.(*Configuration)) - return nil - }, InType: reflect.TypeOf(&Configuration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Limit).DeepCopyInto(out.(*Limit)) - return nil - }, InType: reflect.TypeOf(&Limit{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Configuration) DeepCopyInto(out *Configuration) { *out = *in diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/limitenforcer.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/limitenforcer.go index 4fa5ee90f73c..3faff618f015 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/limitenforcer.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/limitenforcer.go @@ -25,7 +25,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apiserver/pkg/admission" "k8s.io/client-go/util/flowcontrol" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" eventratelimitapi "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit" ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/exec/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/exec/BUILD index c416af3b0024..61fe91099546 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/exec/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/exec/BUILD @@ -9,8 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["admission.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/exec", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/kubeapiserver/admission:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -23,9 +24,10 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/exec", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/exec/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/exec/admission.go index f8f94b78f81f..456d47e07e79 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/exec/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/exec/admission.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" ) @@ -42,9 +42,9 @@ func Register(plugins *admission.Plugins) { }) } -// denyExec is an implementation of admission.Interface which says no to a pod/exec on +// DenyExec is an implementation of admission.Interface which says no to a pod/exec on // a pod using host based configurations. -type denyExec struct { +type DenyExec struct { *admission.Handler client internalclientset.Interface @@ -54,12 +54,14 @@ type denyExec struct { privileged bool } -var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&denyExec{}) +var _ admission.ValidationInterface = &DenyExec{} + +var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&DenyExec{}) // NewDenyEscalatingExec creates a new admission controller that denies an exec operation on a pod // using host based configurations. -func NewDenyEscalatingExec() admission.Interface { - return &denyExec{ +func NewDenyEscalatingExec() *DenyExec { + return &DenyExec{ Handler: admission.NewHandler(admission.Connect), hostIPC: true, hostPID: true, @@ -70,8 +72,8 @@ func NewDenyEscalatingExec() admission.Interface { // NewDenyExecOnPrivileged creates a new admission controller that is only checking the privileged // option. This is for legacy support of the DenyExecOnPrivileged admission controller. Most // of the time NewDenyEscalatingExec should be preferred. -func NewDenyExecOnPrivileged() admission.Interface { - return &denyExec{ +func NewDenyExecOnPrivileged() *DenyExec { + return &DenyExec{ Handler: admission.NewHandler(admission.Connect), hostIPC: false, hostPID: false, @@ -79,7 +81,8 @@ func NewDenyExecOnPrivileged() admission.Interface { } } -func (d *denyExec) Admit(a admission.Attributes) (err error) { +// Validate makes an admission decision based on the request attributes +func (d *DenyExec) Validate(a admission.Attributes) (err error) { connectRequest, ok := a.GetObject().(*rest.ConnectRequest) if !ok { return errors.NewBadRequest("a connect request was received, but could not convert the request object.") @@ -129,11 +132,13 @@ func isPrivileged(pod *api.Pod) bool { return false } -func (d *denyExec) SetInternalKubeClientSet(client internalclientset.Interface) { +// SetInternalKubeClientSet implements the WantsInternalKubeClientSet interface. +func (d *DenyExec) SetInternalKubeClientSet(client internalclientset.Interface) { d.client = client } -func (d *denyExec) Validate() error { +// ValidateInitialization implements the InitializationValidator interface. +func (d *DenyExec) ValidateInitialization() error { if d.client == nil { return fmt.Errorf("missing client") } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/extendedresourcetoleration/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/extendedresourcetoleration/BUILD new file mode 100644 index 000000000000..882b966cd7bc --- /dev/null +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/extendedresourcetoleration/BUILD @@ -0,0 +1,42 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["admission.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/extendedresourcetoleration", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/extendedresourcetoleration", + library = ":go_default_library", + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/extendedresourcetoleration/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/extendedresourcetoleration/admission.go new file mode 100644 index 000000000000..410c18160bc8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/extendedresourcetoleration/admission.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extendedresourcetoleration + +import ( + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" +) + +// Register is called by the apiserver to register the plugin factory. +func Register(plugins *admission.Plugins) { + plugins.Register("ExtendedResourceToleration", func(config io.Reader) (admission.Interface, error) { + return newExtendedResourceToleration(), nil + }) +} + +// newExtendedResourceToleration creates a new instance of the ExtendedResourceToleration admission controller. +func newExtendedResourceToleration() *plugin { + return &plugin{ + Handler: admission.NewHandler(admission.Create, admission.Update), + } +} + +// Make sure we are implementing the interface. +var _ admission.MutationInterface = &plugin{} + +type plugin struct { + *admission.Handler +} + +// Admit updates the toleration of a pod based on the resources requested by it. +// If an extended resource of name "example.com/device" is requested, it adds +// a toleration with key "example.com/device", operator "Exists" and effect "NoSchedule". +// The rationale for this is described in: +// https://github.com/kubernetes/kubernetes/issues/55080 +func (p *plugin) Admit(attributes admission.Attributes) error { + // Ignore all calls to subresources or resources other than pods. + if len(attributes.GetSubresource()) != 0 || attributes.GetResource().GroupResource() != core.Resource("pods") { + return nil + } + + pod, ok := attributes.GetObject().(*core.Pod) + if !ok { + return errors.NewBadRequest(fmt.Sprintf("expected *core.Pod but got %T", attributes.GetObject())) + } + + resources := sets.String{} + for _, container := range pod.Spec.Containers { + for resourceName := range container.Resources.Requests { + if helper.IsExtendedResourceName(resourceName) { + resources.Insert(string(resourceName)) + } + } + } + for _, container := range pod.Spec.InitContainers { + for resourceName := range container.Resources.Requests { + if helper.IsExtendedResourceName(resourceName) { + resources.Insert(string(resourceName)) + } + } + } + + // Doing .List() so that we get a stable sorted list. + // This allows us to test adding tolerations for multiple extended resources. + for _, resource := range resources.List() { + helper.AddOrUpdateTolerationInPod(pod, &core.Toleration{ + Key: resource, + Operator: core.TolerationOpExists, + Effect: core.TaintEffectNoSchedule, + }) + } + + return nil +} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/BUILD index d4adb0f97a47..e4d316030fe2 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["gc_admission.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/gc", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", @@ -24,14 +25,17 @@ go_library( go_test( name = "go_default_test", srcs = ["gc_admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/gc", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/kubeapiserver/admission:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/initializer:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/gc_admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/gc_admission.go index 5c4287925b65..7acd5cf48fdb 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/gc_admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/gc_admission.go @@ -63,6 +63,8 @@ type gcPermissionsEnforcement struct { whiteList []whiteListItem } +var _ admission.ValidationInterface = &gcPermissionsEnforcement{} + // whiteListItem describes an entry in a whitelist ignored by gc permission enforcement. type whiteListItem struct { groupResource schema.GroupResource @@ -79,7 +81,7 @@ func (a *gcPermissionsEnforcement) isWhiteListed(groupResource schema.GroupResou return false } -func (a *gcPermissionsEnforcement) Admit(attributes admission.Attributes) (err error) { +func (a *gcPermissionsEnforcement) Validate(attributes admission.Attributes) (err error) { // // if the request is in the whitelist, we skip mutation checks for this resource. if a.isWhiteListed(attributes.GetResource().GroupResource(), attributes.GetSubresource()) { return nil @@ -102,8 +104,8 @@ func (a *gcPermissionsEnforcement) Admit(attributes admission.Attributes) (err e ResourceRequest: true, Path: "", } - allowed, reason, err := a.authorizer.Authorize(deleteAttributes) - if !allowed { + decision, reason, err := a.authorizer.Authorize(deleteAttributes) + if decision != authorizer.DecisionAllow { return admission.NewForbidden(attributes, fmt.Errorf("cannot set an ownerRef on a resource you can't delete: %v, %v", reason, err)) } @@ -120,8 +122,8 @@ func (a *gcPermissionsEnforcement) Admit(attributes admission.Attributes) (err e // resources. User needs to have delete permission on all the // matched Resources. for _, record := range records { - allowed, reason, err := a.authorizer.Authorize(record) - if !allowed { + decision, reason, err := a.authorizer.Authorize(record) + if decision != authorizer.DecisionAllow { return admission.NewForbidden(attributes, fmt.Errorf("cannot set blockOwnerDeletion if an ownerReference refers to a resource you can't set finalizers on: %v, %v", reason, err)) } } @@ -260,7 +262,7 @@ func (a *gcPermissionsEnforcement) SetRESTMapper(restMapper meta.RESTMapper) { a.restMapper = restMapper } -func (a *gcPermissionsEnforcement) Validate() error { +func (a *gcPermissionsEnforcement) ValidateInitialization() error { if a.authorizer == nil { return fmt.Errorf("missing authorizer") } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/BUILD index eaee9ef8625c..9ea88c749b9f 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/BUILD @@ -13,8 +13,10 @@ go_library( "config.go", "doc.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/imagepolicy", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/imagepolicy/install:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/imagepolicy/v1alpha1:go_default_library", @@ -35,9 +37,10 @@ go_test( "certs_test.go", "config_test.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/imagepolicy", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/imagepolicy/install:go_default_library", "//vendor/k8s.io/api/imagepolicy/v1alpha1:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/admission.go index c123a116d7bf..6a0e21e6f582 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/admission.go @@ -28,17 +28,16 @@ import ( "github.com/golang/glog" + "k8s.io/api/imagepolicy/v1alpha1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime/schema" - kubeschema "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/cache" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/util/webhook" "k8s.io/client-go/rest" - - "k8s.io/api/imagepolicy/v1alpha1" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" // install the clientgo image policy API for use with api registry _ "k8s.io/kubernetes/pkg/apis/imagepolicy/install" @@ -59,8 +58,8 @@ func Register(plugins *admission.Plugins) { }) } -// imagePolicyWebhook is an implementation of admission.Interface. -type imagePolicyWebhook struct { +// Plugin is an implementation of admission.Interface. +type Plugin struct { *admission.Handler webhook *webhook.GenericWebhook responseCache *cache.LRUExpireCache @@ -70,7 +69,9 @@ type imagePolicyWebhook struct { defaultAllow bool } -func (a *imagePolicyWebhook) statusTTL(status v1alpha1.ImageReviewStatus) time.Duration { +var _ admission.ValidationInterface = &Plugin{} + +func (a *Plugin) statusTTL(status v1alpha1.ImageReviewStatus) time.Duration { if status.Allowed { return a.allowTTL } @@ -78,7 +79,7 @@ func (a *imagePolicyWebhook) statusTTL(status v1alpha1.ImageReviewStatus) time.D } // Filter out annotations that don't match *.image-policy.k8s.io/* -func (a *imagePolicyWebhook) filterAnnotations(allAnnotations map[string]string) map[string]string { +func (a *Plugin) filterAnnotations(allAnnotations map[string]string) map[string]string { annotations := make(map[string]string) for k, v := range allAnnotations { if strings.Contains(k, ".image-policy.k8s.io/") { @@ -89,7 +90,7 @@ func (a *imagePolicyWebhook) filterAnnotations(allAnnotations map[string]string) } // Function to call on webhook failure; behavior determined by defaultAllow flag -func (a *imagePolicyWebhook) webhookError(pod *api.Pod, attributes admission.Attributes, err error) error { +func (a *Plugin) webhookError(pod *api.Pod, attributes admission.Attributes, err error) error { if err != nil { glog.V(2).Infof("error contacting webhook backend: %s", err) if a.defaultAllow { @@ -108,13 +109,10 @@ func (a *imagePolicyWebhook) webhookError(pod *api.Pod, attributes admission.Att return nil } -func (a *imagePolicyWebhook) Admit(attributes admission.Attributes) (err error) { +// Validate makes an admission decision based on the request attributes +func (a *Plugin) Validate(attributes admission.Attributes) (err error) { // Ignore all calls to subresources or resources other than pods. - allowedResources := map[kubeschema.GroupResource]bool{ - api.Resource("pods"): true, - } - - if len(attributes.GetSubresource()) != 0 || !allowedResources[attributes.GetResource().GroupResource()] { + if attributes.GetSubresource() != "" || attributes.GetResource().GroupResource() != api.Resource("pods") { return nil } @@ -146,7 +144,7 @@ func (a *imagePolicyWebhook) Admit(attributes admission.Attributes) (err error) return nil } -func (a *imagePolicyWebhook) admitPod(pod *api.Pod, attributes admission.Attributes, review *v1alpha1.ImageReview) error { +func (a *Plugin) admitPod(pod *api.Pod, attributes admission.Attributes, review *v1alpha1.ImageReview) error { cacheKey, err := json.Marshal(review.Spec) if err != nil { return err @@ -183,7 +181,7 @@ func (a *imagePolicyWebhook) admitPod(pod *api.Pod, attributes admission.Attribu return nil } -// NewImagePolicyWebhook a new imagePolicyWebhook from the provided config file. +// NewImagePolicyWebhook a new ImagePolicyWebhook plugin from the provided config file. // The config file is specified by --admission-control-config-file and has the // following format for a webhook: // @@ -220,7 +218,11 @@ func (a *imagePolicyWebhook) admitPod(pod *api.Pod, attributes admission.Attribu // // For additional HTTP configuration, refer to the kubeconfig documentation // http://kubernetes.io/v1.1/docs/user-guide/kubeconfig-file.html. -func NewImagePolicyWebhook(configFile io.Reader) (admission.Interface, error) { +func NewImagePolicyWebhook(configFile io.Reader) (*Plugin, error) { + if configFile == nil { + return nil, fmt.Errorf("no config specified") + } + // TODO: move this to a versioned configuration file format var config AdmissionConfig d := yaml.NewYAMLOrJSONDecoder(configFile, 4096) @@ -234,11 +236,11 @@ func NewImagePolicyWebhook(configFile io.Reader) (admission.Interface, error) { return nil, err } - gw, err := webhook.NewGenericWebhook(api.Registry, api.Codecs, whConfig.KubeConfigFile, groupVersions, whConfig.RetryBackoff) + gw, err := webhook.NewGenericWebhook(legacyscheme.Registry, legacyscheme.Codecs, whConfig.KubeConfigFile, groupVersions, whConfig.RetryBackoff) if err != nil { return nil, err } - return &imagePolicyWebhook{ + return &Plugin{ Handler: admission.NewHandler(admission.Create, admission.Update), webhook: gw, responseCache: cache.NewLRUExpireCache(1024), diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/config.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/config.go index 558b290e8627..df34a4906b40 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/config.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/config.go @@ -86,7 +86,7 @@ func normalizeConfigDuration(name string, scale, value, min, max, defaultValue t value *= scale // check value is within range - if value <= min || value > max { + if value < min || value > max { return value, fmt.Errorf("valid value is between %v and %v, got %v", min, max, value) } return value, nil diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialization/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialization/BUILD deleted file mode 100644 index 05ecb5abaaae..000000000000 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialization/BUILD +++ /dev/null @@ -1,59 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = ["initialization.go"], - deps = [ - "//pkg/api:go_default_library", - "//pkg/kubeapiserver/admission/configuration:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", - "//vendor/k8s.io/apiserver/pkg/features:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = ["initialization_test.go"], - library = ":go_default_library", - deps = [ - "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", - ], -) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/BUILD index 81a1c3403347..a10961168a1c 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/BUILD @@ -15,8 +15,9 @@ go_library( "hawkular.go", "influxdb.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/initialresources", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/cloud.google.com/go/compute/metadata:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/hawkular/hawkular-client-go/metrics:go_default_library", @@ -42,9 +43,10 @@ go_test( "hawkular_test.go", "influxdb_test.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/initialresources", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", "//vendor/golang.org/x/oauth2:go_default_library", "//vendor/golang.org/x/oauth2/google:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/admission.go index 99af24fad137..360885091aa8 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/admission.go @@ -28,7 +28,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apiserver/pkg/admission" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) var ( @@ -57,15 +57,17 @@ func Register(plugins *admission.Plugins) { }) } -type initialResources struct { +type InitialResources struct { *admission.Handler source dataSource percentile int64 nsOnly bool } -func newInitialResources(source dataSource, percentile int64, nsOnly bool) admission.Interface { - return &initialResources{ +var _ admission.MutationInterface = &InitialResources{} + +func newInitialResources(source dataSource, percentile int64, nsOnly bool) *InitialResources { + return &InitialResources{ Handler: admission.NewHandler(admission.Create), source: source, percentile: percentile, @@ -73,7 +75,8 @@ func newInitialResources(source dataSource, percentile int64, nsOnly bool) admis } } -func (ir initialResources) Admit(a admission.Attributes) (err error) { +// Admit makes an admission decision based on the request attributes +func (ir InitialResources) Admit(a admission.Attributes) (err error) { // Ignore all calls to subresources or resources other than pods. if a.GetSubresource() != "" || a.GetResource().GroupResource() != api.Resource("pods") { return nil @@ -89,7 +92,7 @@ func (ir initialResources) Admit(a admission.Attributes) (err error) { // The method veryfies whether resources should be set for the given pod and // if there is estimation available the method fills Request field. -func (ir initialResources) estimateAndFillResourcesIfNotSet(pod *api.Pod) { +func (ir InitialResources) estimateAndFillResourcesIfNotSet(pod *api.Pod) { var annotations []string for i := range pod.Spec.InitContainers { annotations = append(annotations, ir.estimateContainer(pod, &pod.Spec.InitContainers[i], "init container")...) @@ -106,7 +109,7 @@ func (ir initialResources) estimateAndFillResourcesIfNotSet(pod *api.Pod) { } } -func (ir initialResources) estimateContainer(pod *api.Pod, c *api.Container, message string) []string { +func (ir InitialResources) estimateContainer(pod *api.Pod, c *api.Container, message string) []string { var annotations []string req := c.Resources.Requests cpu := ir.getEstimationIfNeeded(api.ResourceCPU, c, pod.ObjectMeta.Namespace) @@ -137,7 +140,7 @@ func (ir initialResources) estimateContainer(pod *api.Pod, c *api.Container, mes // getEstimationIfNeeded estimates compute resource for container if its corresponding // Request(min amount) and Limit(max amount) both are not specified. -func (ir initialResources) getEstimationIfNeeded(kind api.ResourceName, c *api.Container, ns string) *resource.Quantity { +func (ir InitialResources) getEstimationIfNeeded(kind api.ResourceName, c *api.Container, ns string) *resource.Quantity { requests := c.Resources.Requests limits := c.Resources.Limits var quantity *resource.Quantity @@ -152,7 +155,7 @@ func (ir initialResources) getEstimationIfNeeded(kind api.ResourceName, c *api.C } return quantity } -func (ir initialResources) getEstimation(kind api.ResourceName, c *api.Container, ns string) (*resource.Quantity, error) { +func (ir InitialResources) getEstimation(kind api.ResourceName, c *api.Container, ns string) (*resource.Quantity, error) { end := time.Now() start := end.Add(-week) var usage, samples int64 diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/data_source.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/data_source.go index 0d366c18c4cc..a25f5ee80078 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/data_source.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/data_source.go @@ -19,9 +19,8 @@ package initialresources import ( "flag" "fmt" + api "k8s.io/kubernetes/pkg/apis/core" "time" - - "k8s.io/kubernetes/pkg/api" ) var ( diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/gcm.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/gcm.go index ce981e3e8cc5..fbe64b5dbae1 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/gcm.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/gcm.go @@ -17,12 +17,11 @@ limitations under the License. package initialresources import ( + api "k8s.io/kubernetes/pkg/apis/core" "math" "sort" "time" - "k8s.io/kubernetes/pkg/api" - gce "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" "golang.org/x/oauth2/google" diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/hawkular.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/hawkular.go index fea6da8824ab..1f5aaea424e0 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/hawkular.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/hawkular.go @@ -30,7 +30,7 @@ import ( "github.com/golang/glog" "github.com/hawkular/hawkular-client-go/metrics" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/influxdb.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/influxdb.go index ad526ba37f5e..2986367a734e 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/influxdb.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/influxdb.go @@ -22,7 +22,7 @@ import ( "time" influxdb "github.com/influxdata/influxdb/client" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) const ( diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/BUILD index 9307f02c3117..f858867d3a6a 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/BUILD @@ -12,8 +12,9 @@ go_library( "admission.go", "interfaces.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/limitranger", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/client/listers/core/internalversion:go_default_library", @@ -32,9 +33,10 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/limitranger", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/admission.go index 46486c37db79..73336b7f56d7 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/admission.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apiserver/pkg/admission" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" corelisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -50,8 +50,8 @@ func Register(plugins *admission.Plugins) { }) } -// limitRanger enforces usage limits on a per resource basis in the namespace -type limitRanger struct { +// LimitRanger enforces usage limits on a per resource basis in the namespace +type LimitRanger struct { *admission.Handler client internalclientset.Interface actions LimitRangerActions @@ -64,18 +64,22 @@ type limitRanger struct { liveTTL time.Duration } +var _ admission.MutationInterface = &LimitRanger{} +var _ admission.ValidationInterface = &LimitRanger{} +var _ kubeapiserveradmission.WantsInternalKubeInformerFactory = &LimitRanger{} + type liveLookupEntry struct { expiry time.Time items []*api.LimitRange } -func (l *limitRanger) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) { +func (l *LimitRanger) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) { limitRangeInformer := f.Core().InternalVersion().LimitRanges() l.SetReadyFunc(limitRangeInformer.Informer().HasSynced) l.lister = limitRangeInformer.Lister() } -func (l *limitRanger) Validate() error { +func (l *LimitRanger) ValidateInitialization() error { if l.lister == nil { return fmt.Errorf("missing limitRange lister") } @@ -86,7 +90,16 @@ func (l *limitRanger) Validate() error { } // Admit admits resources into cluster that do not violate any defined LimitRange in the namespace -func (l *limitRanger) Admit(a admission.Attributes) (err error) { +func (l *LimitRanger) Admit(a admission.Attributes) (err error) { + return l.runLimitFunc(a, l.actions.MutateLimit) +} + +// Validate admits resources into cluster that do not violate any defined LimitRange in the namespace +func (l *LimitRanger) Validate(a admission.Attributes) (err error) { + return l.runLimitFunc(a, l.actions.ValidateLimit) +} + +func (l *LimitRanger) runLimitFunc(a admission.Attributes, limitFn func(limitRange *api.LimitRange, kind string, obj runtime.Object) error) (err error) { if !l.actions.SupportsAttributes(a) { return nil } @@ -100,9 +113,31 @@ func (l *limitRanger) Admit(a admission.Attributes) (err error) { } } + items, err := l.GetLimitRanges(a) + if err != nil { + return err + } + + // ensure it meets each prescribed min/max + for i := range items { + limitRange := items[i] + + if !l.actions.SupportsLimit(limitRange) { + continue + } + + err = limitFn(limitRange, a.GetResource().Resource, a.GetObject()) + if err != nil { + return admission.NewForbidden(a, err) + } + } + return nil +} + +func (l *LimitRanger) GetLimitRanges(a admission.Attributes) ([]*api.LimitRange, error) { items, err := l.lister.LimitRanges(a.GetNamespace()).List(labels.Everything()) if err != nil { - return admission.NewForbidden(a, fmt.Errorf("unable to %s %v at this time because there was an error enforcing limit ranges", a.GetOperation(), a.GetResource())) + return nil, admission.NewForbidden(a, fmt.Errorf("unable to %s %v at this time because there was an error enforcing limit ranges", a.GetOperation(), a.GetResource())) } // if there are no items held in our indexer, check our live-lookup LRU, if that misses, do the live lookup to prime it. @@ -116,7 +151,7 @@ func (l *limitRanger) Admit(a admission.Attributes) (err error) { // throttling - see #22422 for details. liveList, err := l.client.Core().LimitRanges(a.GetNamespace()).List(metav1.ListOptions{}) if err != nil { - return admission.NewForbidden(a, err) + return nil, admission.NewForbidden(a, err) } newEntry := liveLookupEntry{expiry: time.Now().Add(l.liveTTL)} for i := range liveList.Items { @@ -133,24 +168,11 @@ func (l *limitRanger) Admit(a admission.Attributes) (err error) { } - // ensure it meets each prescribed min/max - for i := range items { - limitRange := items[i] - - if !l.actions.SupportsLimit(limitRange) { - continue - } - - err = l.actions.Limit(limitRange, a.GetResource().Resource, a.GetObject()) - if err != nil { - return admission.NewForbidden(a, err) - } - } - return nil + return items, nil } // NewLimitRanger returns an object that enforces limits based on the supplied limit function -func NewLimitRanger(actions LimitRangerActions) (admission.Interface, error) { +func NewLimitRanger(actions LimitRangerActions) (*LimitRanger, error) { liveLookupCache, err := lru.New(10000) if err != nil { return nil, err @@ -160,7 +182,7 @@ func NewLimitRanger(actions LimitRangerActions) (admission.Interface, error) { actions = &DefaultLimitRangerActions{} } - return &limitRanger{ + return &LimitRanger{ Handler: admission.NewHandler(admission.Create, admission.Update), actions: actions, liveLookupCache: liveLookupCache, @@ -168,10 +190,10 @@ func NewLimitRanger(actions LimitRangerActions) (admission.Interface, error) { }, nil } -var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&limitRanger{}) -var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&limitRanger{}) +var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&LimitRanger{}) +var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&LimitRanger{}) -func (a *limitRanger) SetInternalKubeClientSet(client internalclientset.Interface) { +func (a *LimitRanger) SetInternalKubeClientSet(client internalclientset.Interface) { a.client = client } @@ -399,12 +421,23 @@ var _ LimitRangerActions = &DefaultLimitRangerActions{} // Limit enforces resource requirements of incoming resources against enumerated constraints // on the LimitRange. It may modify the incoming object to apply default resource requirements // if not specified, and enumerated on the LimitRange -func (d *DefaultLimitRangerActions) Limit(limitRange *api.LimitRange, resourceName string, obj runtime.Object) error { +func (d *DefaultLimitRangerActions) MutateLimit(limitRange *api.LimitRange, resourceName string, obj runtime.Object) error { switch resourceName { case "pods": - return PodLimitFunc(limitRange, obj.(*api.Pod)) + return PodMutateLimitFunc(limitRange, obj.(*api.Pod)) + } + return nil +} + +// Limit enforces resource requirements of incoming resources against enumerated constraints +// on the LimitRange. It may modify the incoming object to apply default resource requirements +// if not specified, and enumerated on the LimitRange +func (d *DefaultLimitRangerActions) ValidateLimit(limitRange *api.LimitRange, resourceName string, obj runtime.Object) error { + switch resourceName { + case "pods": + return PodValidateLimitFunc(limitRange, obj.(*api.Pod)) case "persistentvolumeclaims": - return PersistentVolumeClaimLimitFunc(limitRange, obj.(*api.PersistentVolumeClaim)) + return PersistentVolumeClaimValidateLimitFunc(limitRange, obj.(*api.PersistentVolumeClaim)) } return nil } @@ -424,11 +457,11 @@ func (d *DefaultLimitRangerActions) SupportsLimit(limitRange *api.LimitRange) bo return true } -// PersistentVolumeClaimLimitFunc enforces storage limits for PVCs. +// PersistentVolumeClaimValidateLimitFunc enforces storage limits for PVCs. // Users request storage via pvc.Spec.Resources.Requests. Min/Max is enforced by an admin with LimitRange. // Claims will not be modified with default values because storage is a required part of pvc.Spec. // All storage enforced values *only* apply to pvc.Spec.Resources.Requests. -func PersistentVolumeClaimLimitFunc(limitRange *api.LimitRange, pvc *api.PersistentVolumeClaim) error { +func PersistentVolumeClaimValidateLimitFunc(limitRange *api.LimitRange, pvc *api.PersistentVolumeClaim) error { var errs []error for i := range limitRange.Spec.Limits { limit := limitRange.Spec.Limits[i] @@ -452,14 +485,19 @@ func PersistentVolumeClaimLimitFunc(limitRange *api.LimitRange, pvc *api.Persist return utilerrors.NewAggregate(errs) } -// PodLimitFunc enforces resource requirements enumerated by the pod against +// PodMutateLimitFunc sets resource requirements enumerated by the pod against // the specified LimitRange. The pod may be modified to apply default resource // requirements if not specified, and enumerated on the LimitRange -func PodLimitFunc(limitRange *api.LimitRange, pod *api.Pod) error { - var errs []error - +func PodMutateLimitFunc(limitRange *api.LimitRange, pod *api.Pod) error { defaultResources := defaultContainerResourceRequirements(limitRange) mergePodResourceRequirements(pod, &defaultResources) + return nil +} + +// PodValidateLimitFunc enforces resource requirements enumerated by the pod against +// the specified LimitRange. +func PodValidateLimitFunc(limitRange *api.LimitRange, pod *api.Pod) error { + var errs []error for i := range limitRange.Spec.Limits { limit := limitRange.Spec.Limits[i] diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/interfaces.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/interfaces.go index f42b3269b94d..4c520c68480b 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/interfaces.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/interfaces.go @@ -19,12 +19,14 @@ package limitranger import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/admission" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) type LimitRangerActions interface { - // Limit is a pluggable function to enforce limits on the object. - Limit(limitRange *api.LimitRange, kind string, obj runtime.Object) error + // MutateLimit is a pluggable function to set limits on the object. + MutateLimit(limitRange *api.LimitRange, kind string, obj runtime.Object) error + // ValidateLimits is a pluggable function to enforce limits on the object. + ValidateLimit(limitRange *api.LimitRange, kind string, obj runtime.Object) error // SupportsAttributes is a pluggable function to allow overridding what resources the limitranger // supports. SupportsAttributes(attr admission.Attributes) bool diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision/BUILD index 3343556bda3d..649aa56785b8 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision/BUILD @@ -9,8 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["admission.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/client/listers/core/internalversion:go_default_library", @@ -24,9 +25,10 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision/admission.go index f7f2b343aa4b..51e113af00e7 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision/admission.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/admission" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" corelisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -37,19 +37,21 @@ func Register(plugins *admission.Plugins) { }) } -// provision is an implementation of admission.Interface. +// Provision is an implementation of admission.Interface. // It looks at all incoming requests in a namespace context, and if the namespace does not exist, it creates one. // It is useful in deployments that do not want to restrict creation of a namespace prior to its usage. -type provision struct { +type Provision struct { *admission.Handler client internalclientset.Interface namespaceLister corelisters.NamespaceLister } -var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&provision{}) -var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&provision{}) +var _ admission.MutationInterface = &Provision{} +var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&Provision{}) +var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&Provision{}) -func (p *provision) Admit(a admission.Attributes) error { +// Admit makes an admission decision based on the request attributes +func (p *Provision) Admit(a admission.Attributes) error { // if we're here, then we've already passed authentication, so we're allowed to do what we're trying to do // if we're here, then the API server has found a route, which means that if we have a non-empty namespace // its a namespaced resource. @@ -87,23 +89,26 @@ func (p *provision) Admit(a admission.Attributes) error { } // NewProvision creates a new namespace provision admission control handler -func NewProvision() admission.Interface { - return &provision{ +func NewProvision() *Provision { + return &Provision{ Handler: admission.NewHandler(admission.Create), } } -func (p *provision) SetInternalKubeClientSet(client internalclientset.Interface) { +// SetInternalKubeClientSet implements the WantsInternalKubeClientSet interface. +func (p *Provision) SetInternalKubeClientSet(client internalclientset.Interface) { p.client = client } -func (p *provision) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) { +// SetInternalKubeInformerFactory implements the WantsInternalKubeInformerFactory interface. +func (p *Provision) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) { namespaceInformer := f.Core().InternalVersion().Namespaces() p.namespaceLister = namespaceInformer.Lister() p.SetReadyFunc(namespaceInformer.Informer().HasSynced) } -func (p *provision) Validate() error { +// ValidateInitialization implements the InitializationValidator interface. +func (p *Provision) ValidateInitialization() error { if p.namespaceLister == nil { return fmt.Errorf("missing namespaceLister") } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/exists/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/exists/BUILD index 881ab061bc5c..8da3f51b78fd 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/exists/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/exists/BUILD @@ -9,8 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["admission.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/namespace/exists", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/client/listers/core/internalversion:go_default_library", @@ -24,9 +25,10 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/namespace/exists", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/exists/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/exists/admission.go index 8bf1e6eccc50..51307482245e 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/exists/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/exists/admission.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/admission" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" corelisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -37,19 +37,21 @@ func Register(plugins *admission.Plugins) { }) } -// exists is an implementation of admission.Interface. +// Exists is an implementation of admission.Interface. // It rejects all incoming requests in a namespace context if the namespace does not exist. // It is useful in deployments that want to enforce pre-declaration of a Namespace resource. -type exists struct { +type Exists struct { *admission.Handler client internalclientset.Interface namespaceLister corelisters.NamespaceLister } -var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&exists{}) -var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&exists{}) +var _ admission.ValidationInterface = &Exists{} +var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&Exists{}) +var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&Exists{}) -func (e *exists) Admit(a admission.Attributes) error { +// Validate makes an admission decision based on the request attributes +func (e *Exists) Validate(a admission.Attributes) error { // if we're here, then we've already passed authentication, so we're allowed to do what we're trying to do // if we're here, then the API server has found a route, which means that if we have a non-empty namespace // its a namespaced resource. @@ -82,23 +84,26 @@ func (e *exists) Admit(a admission.Attributes) error { } // NewExists creates a new namespace exists admission control handler -func NewExists() admission.Interface { - return &exists{ +func NewExists() *Exists { + return &Exists{ Handler: admission.NewHandler(admission.Create, admission.Update, admission.Delete), } } -func (e *exists) SetInternalKubeClientSet(client internalclientset.Interface) { +// SetInternalKubeClientSet implements the WantsInternalKubeClientSet interface. +func (e *Exists) SetInternalKubeClientSet(client internalclientset.Interface) { e.client = client } -func (e *exists) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) { +// SetInternalKubeInformerFactory implements the WantsInternalKubeInformerFactory interface. +func (e *Exists) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) { namespaceInformer := f.Core().InternalVersion().Namespaces() e.namespaceLister = namespaceInformer.Lister() e.SetReadyFunc(namespaceInformer.Informer().HasSynced) } -func (e *exists) Validate() error { +// ValidateInitialization implements the InitializationValidator interface. +func (e *Exists) ValidateInitialization() error { if e.namespaceLister == nil { return fmt.Errorf("missing namespaceLister") } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/BUILD index b81cb1c09fac..8d2777ea41f2 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/BUILD @@ -9,27 +9,32 @@ load( go_library( name = "go_default_library", srcs = ["admission.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/noderestriction", deps = [ - "//pkg/api:go_default_library", "//pkg/api/pod:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/policy:go_default_library", "//pkg/auth/nodeidentifier:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", + "//pkg/features:go_default_library", "//pkg/kubeapiserver/admission:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/noderestriction", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/policy:go_default_library", "//pkg/auth/nodeidentifier:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission.go index f1f77a955695..d958dad3762b 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission.go @@ -23,13 +23,16 @@ import ( apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/diff" "k8s.io/apiserver/pkg/admission" - "k8s.io/kubernetes/pkg/api" + utilfeature "k8s.io/apiserver/pkg/util/feature" podutil "k8s.io/kubernetes/pkg/api/pod" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/auth/nodeidentifier" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" coreinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" + "k8s.io/kubernetes/pkg/features" kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" ) @@ -69,7 +72,7 @@ func (p *nodePlugin) SetInternalKubeClientSet(f internalclientset.Interface) { p.podsGetter = f.Core() } -func (p *nodePlugin) Validate() error { +func (p *nodePlugin) ValidateInitialization() error { if p.nodeIdentifier == nil { return fmt.Errorf("%s requires a node identifier", PluginName) } @@ -82,6 +85,7 @@ func (p *nodePlugin) Validate() error { var ( podResource = api.Resource("pods") nodeResource = api.Resource("nodes") + pvcResource = api.Resource("persistentvolumeclaims") ) func (c *nodePlugin) Admit(a admission.Attributes) error { @@ -113,6 +117,14 @@ func (c *nodePlugin) Admit(a admission.Attributes) error { case nodeResource: return c.admitNode(nodeName, a) + case pvcResource: + switch a.GetSubresource() { + case "status": + return c.admitPVCStatus(nodeName, a) + default: + return admission.NewForbidden(a, fmt.Errorf("may only update PVC status")) + } + default: return nil } @@ -189,7 +201,7 @@ func (c *nodePlugin) admitPodStatus(nodeName string, a admission.Attributes) err // require an existing pod pod, ok := a.GetOldObject().(*api.Pod) if !ok { - return admission.NewForbidden(a, fmt.Errorf("unexpected type %T", a.GetObject())) + return admission.NewForbidden(a, fmt.Errorf("unexpected type %T", a.GetOldObject())) } // only allow a node to update status of a pod bound to itself if pod.Spec.NodeName != nodeName { @@ -241,6 +253,50 @@ func (c *nodePlugin) admitPodEviction(nodeName string, a admission.Attributes) e } } +func (c *nodePlugin) admitPVCStatus(nodeName string, a admission.Attributes) error { + switch a.GetOperation() { + case admission.Update: + if !utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) { + return admission.NewForbidden(a, fmt.Errorf("node %q may not update persistentvolumeclaim metadata", nodeName)) + } + + oldPVC, ok := a.GetOldObject().(*api.PersistentVolumeClaim) + if !ok { + return admission.NewForbidden(a, fmt.Errorf("unexpected type %T", a.GetOldObject())) + } + + newPVC, ok := a.GetObject().(*api.PersistentVolumeClaim) + if !ok { + return admission.NewForbidden(a, fmt.Errorf("unexpected type %T", a.GetObject())) + } + + // make copies for comparison + oldPVC = oldPVC.DeepCopy() + newPVC = newPVC.DeepCopy() + + // zero out resourceVersion to avoid comparing differences, + // since the new object could leave it empty to indicate an unconditional update + oldPVC.ObjectMeta.ResourceVersion = "" + newPVC.ObjectMeta.ResourceVersion = "" + + oldPVC.Status.Capacity = nil + newPVC.Status.Capacity = nil + + oldPVC.Status.Conditions = nil + newPVC.Status.Conditions = nil + + // ensure no metadata changed. nodes should not be able to relabel, add finalizers/owners, etc + if !apiequality.Semantic.DeepEqual(oldPVC, newPVC) { + return admission.NewForbidden(a, fmt.Errorf("node %q may not update fields other than status.capacity and status.conditions: %v", nodeName, diff.ObjectReflectDiff(oldPVC, newPVC))) + } + + return nil + + default: + return admission.NewForbidden(a, fmt.Errorf("unexpected operation %q", a.GetOperation())) + } +} + func (c *nodePlugin) admitNode(nodeName string, a admission.Attributes) error { requestedName := a.GetName() if a.GetOperation() == admission.Create { diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label/BUILD index 1e59fb935041..b88f19f863e0 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label/BUILD @@ -12,8 +12,9 @@ go_library( "admission.go", "doc.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/aws:go_default_library", "//pkg/cloudprovider/providers/gce:go_default_library", @@ -28,10 +29,12 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/cloudprovider/providers/aws:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label/admission.go index 92dd76f9921d..86e1921fcd90 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label/admission.go @@ -24,7 +24,7 @@ import ( "github.com/golang/glog" "k8s.io/apiserver/pkg/admission" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" @@ -52,6 +52,7 @@ type persistentVolumeLabel struct { gceCloudProvider *gce.GCECloud } +var _ admission.MutationInterface = &persistentVolumeLabel{} var _ kubeapiserveradmission.WantsCloudConfig = &persistentVolumeLabel{} // NewPersistentVolumeLabel returns an admission.Interface implementation which adds labels to PersistentVolume CREATE requests, diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/resize/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/resize/BUILD index 37289faba64b..90a12f1331c8 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/resize/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/resize/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -11,10 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/resize", library = ":go_default_library", - tags = ["automanaged"], deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/storage:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/controller:go_default_library", @@ -29,10 +27,10 @@ go_test( go_library( name = "go_default_library", srcs = ["admission.go"], - tags = ["automanaged"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/resize", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/client/listers/core/internalversion:go_default_library", "//pkg/client/listers/storage/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/resize/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/resize/admission.go index eb7b11cdac03..9909f5f3fb7d 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/resize/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/resize/admission.go @@ -21,8 +21,8 @@ import ( "io" "k8s.io/apiserver/pkg/admission" - "k8s.io/kubernetes/pkg/api" - apihelper "k8s.io/kubernetes/pkg/api/helper" + api "k8s.io/kubernetes/pkg/apis/core" + apihelper "k8s.io/kubernetes/pkg/apis/core/helper" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" pvlister "k8s.io/kubernetes/pkg/client/listers/core/internalversion" storagelisters "k8s.io/kubernetes/pkg/client/listers/storage/internalversion" @@ -43,6 +43,7 @@ func Register(plugins *admission.Plugins) { } var _ admission.Interface = &persistentVolumeClaimResize{} +var _ admission.ValidationInterface = &persistentVolumeClaimResize{} var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&persistentVolumeClaimResize{}) type persistentVolumeClaimResize struct { @@ -68,8 +69,8 @@ func (pvcr *persistentVolumeClaimResize) SetInternalKubeInformerFactory(f inform }) } -// Validate ensures lister is set. -func (pvcr *persistentVolumeClaimResize) Validate() error { +// ValidateInitialization ensures lister is set. +func (pvcr *persistentVolumeClaimResize) ValidateInitialization() error { if pvcr.pvLister == nil { return fmt.Errorf("missing persistent volume lister") } @@ -79,7 +80,7 @@ func (pvcr *persistentVolumeClaimResize) Validate() error { return nil } -func (pvcr *persistentVolumeClaimResize) Admit(a admission.Attributes) error { +func (pvcr *persistentVolumeClaimResize) Validate(a admission.Attributes) error { if a.GetResource().GroupResource() != api.Resource("persistentvolumeclaims") { return nil } @@ -148,9 +149,17 @@ func (pvcr *persistentVolumeClaimResize) allowResize(pvc, oldPvc *api.Persistent // checkVolumePlugin checks whether the volume plugin supports resize func (pvcr *persistentVolumeClaimResize) checkVolumePlugin(pv *api.PersistentVolume) bool { - if pv.Spec.Glusterfs != nil { + if pv.Spec.Glusterfs != nil || pv.Spec.Cinder != nil || pv.Spec.RBD != nil { + return true + } + + if pv.Spec.GCEPersistentDisk != nil { return true } - return false + if pv.Spec.AWSElasticBlockStore != nil { + return true + } + + return false } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolumeclaim/pvcprotection/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolumeclaim/pvcprotection/BUILD new file mode 100644 index 000000000000..e13f63e59349 --- /dev/null +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolumeclaim/pvcprotection/BUILD @@ -0,0 +1,51 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["admission.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/persistentvolumeclaim/pvcprotection", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/client/informers/informers_generated/internalversion:go_default_library", + "//pkg/client/listers/core/internalversion:go_default_library", + "//pkg/features:go_default_library", + "//pkg/kubeapiserver/admission:go_default_library", + "//pkg/volume/util:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/persistentvolumeclaim/pvcprotection", + library = ":go_default_library", + deps = [ + "//pkg/apis/core:go_default_library", + "//pkg/client/informers/informers_generated/internalversion:go_default_library", + "//pkg/controller:go_default_library", + "//pkg/volume/util:go_default_library", + "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolumeclaim/pvcprotection/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolumeclaim/pvcprotection/admission.go new file mode 100644 index 000000000000..218bca4b829c --- /dev/null +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolumeclaim/pvcprotection/admission.go @@ -0,0 +1,111 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pvcprotection + +import ( + "fmt" + "io" + + "github.com/golang/glog" + + admission "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/util/feature" + api "k8s.io/kubernetes/pkg/apis/core" + informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" + corelisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion" + "k8s.io/kubernetes/pkg/features" + kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" + volumeutil "k8s.io/kubernetes/pkg/volume/util" +) + +const ( + // PluginName is the name of this admission controller plugin + PluginName = "PVCProtection" +) + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + plugin := newPlugin() + return plugin, nil + }) +} + +// pvcProtectionPlugin holds state for and implements the admission plugin. +type pvcProtectionPlugin struct { + *admission.Handler + lister corelisters.PersistentVolumeClaimLister +} + +var _ admission.Interface = &pvcProtectionPlugin{} +var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&pvcProtectionPlugin{}) + +// newPlugin creates a new admission plugin. +func newPlugin() *pvcProtectionPlugin { + return &pvcProtectionPlugin{ + Handler: admission.NewHandler(admission.Create), + } +} + +func (c *pvcProtectionPlugin) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) { + informer := f.Core().InternalVersion().PersistentVolumeClaims() + c.lister = informer.Lister() + c.SetReadyFunc(informer.Informer().HasSynced) +} + +// ValidateInitialization ensures lister is set. +func (c *pvcProtectionPlugin) ValidateInitialization() error { + if c.lister == nil { + return fmt.Errorf("missing lister") + } + return nil +} + +// Admit sets finalizer on all PVCs. The finalizer is removed by +// PVCProtectionController when it's not referenced by any pod. +// +// This prevents users from deleting a PVC that's used by a running pod. +func (c *pvcProtectionPlugin) Admit(a admission.Attributes) error { + if !feature.DefaultFeatureGate.Enabled(features.PVCProtection) { + return nil + } + + if a.GetResource().GroupResource() != api.Resource("persistentvolumeclaims") { + return nil + } + + if len(a.GetSubresource()) != 0 { + return nil + } + + pvc, ok := a.GetObject().(*api.PersistentVolumeClaim) + // if we can't convert then we don't handle this object so just return + if !ok { + return nil + } + + for _, f := range pvc.Finalizers { + if f == volumeutil.PVCProtectionFinalizer { + // Finalizer is already present, nothing to do + return nil + } + } + + glog.V(4).Infof("adding PVC protection finalizer to %s/%s", pvc.Namespace, pvc.Name) + pvc.Finalizers = append(pvc.Finalizers, volumeutil.PVCProtectionFinalizer) + return nil +} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podnodeselector/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podnodeselector/BUILD index 3d589c9332d2..f0c03fb8c717 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podnodeselector/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podnodeselector/BUILD @@ -9,8 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["admission.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/podnodeselector", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/client/listers/core/internalversion:go_default_library", @@ -28,9 +29,10 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/podnodeselector", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podnodeselector/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podnodeselector/admission.go index 469286f1e9b6..84bb1d6ef146 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podnodeselector/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podnodeselector/admission.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/apiserver/pkg/admission" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" corelisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -59,6 +59,8 @@ type podNodeSelector struct { clusterNodeSelectors map[string]string } +var _ admission.MutationInterface = &podNodeSelector{} +var _ admission.ValidationInterface = &podNodeSelector{} var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&podNodeSelector{}) var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&podNodeSelector{}) @@ -93,77 +95,105 @@ func readConfig(config io.Reader) *pluginConfig { // Admit enforces that pod and its namespace node label selectors matches at least a node in the cluster. func (p *podNodeSelector) Admit(a admission.Attributes) error { - resource := a.GetResource().GroupResource() - if resource != api.Resource("pods") { + if shouldIgnore(a) { return nil } - if a.GetSubresource() != "" { - // only run the checks below on pods proper and not subresources + updateInitialized, err := util.IsUpdatingInitializedObject(a) + if err != nil { + return err + } + if updateInitialized { + // node selector of an initialized pod is immutable return nil } + if !p.WaitForReady() { + return admission.NewForbidden(a, fmt.Errorf("not yet ready to handle request")) + } - obj := a.GetObject() - pod, ok := obj.(*api.Pod) - if !ok { - glog.Errorf("expected pod but got %s", a.GetKind().Kind) - return nil + resource := a.GetResource().GroupResource() + pod := a.GetObject().(*api.Pod) + namespaceNodeSelector, err := p.getNamespaceNodeSelectorMap(a.GetNamespace()) + if err != nil { + return err + } + + if labels.Conflicts(namespaceNodeSelector, labels.Set(pod.Spec.NodeSelector)) { + return errors.NewForbidden(resource, pod.Name, fmt.Errorf("pod node label selector conflicts with its namespace node label selector")) } + // Merge pod node selector = namespace node selector + current pod node selector + // second selector wins + podNodeSelectorLabels := labels.Merge(namespaceNodeSelector, pod.Spec.NodeSelector) + pod.Spec.NodeSelector = map[string]string(podNodeSelectorLabels) + return p.Validate(a) +} + +// Validate ensures that the pod node selector is allowed +func (p *podNodeSelector) Validate(a admission.Attributes) error { + if shouldIgnore(a) { + return nil + } if !p.WaitForReady() { return admission.NewForbidden(a, fmt.Errorf("not yet ready to handle request")) } - updateInitialized, err := util.IsUpdatingInitializedObject(a) + resource := a.GetResource().GroupResource() + pod := a.GetObject().(*api.Pod) + + namespaceNodeSelector, err := p.getNamespaceNodeSelectorMap(a.GetNamespace()) if err != nil { return err } - if updateInitialized { - // node selector of an initialized pod is immutable - return nil + if labels.Conflicts(namespaceNodeSelector, labels.Set(pod.Spec.NodeSelector)) { + return errors.NewForbidden(resource, pod.Name, fmt.Errorf("pod node label selector conflicts with its namespace node label selector")) + } + + // whitelist verification + whitelist, err := labels.ConvertSelectorToLabelsMap(p.clusterNodeSelectors[a.GetNamespace()]) + if err != nil { + return err + } + if !labels.AreLabelsInWhiteList(pod.Spec.NodeSelector, whitelist) { + return errors.NewForbidden(resource, pod.Name, fmt.Errorf("pod node label selector labels conflict with its namespace whitelist")) } - name := pod.Name - nsName := a.GetNamespace() - var namespace *api.Namespace + return nil +} - namespace, err = p.namespaceLister.Get(nsName) +func (p *podNodeSelector) getNamespaceNodeSelectorMap(namespaceName string) (labels.Set, error) { + namespace, err := p.namespaceLister.Get(namespaceName) if errors.IsNotFound(err) { - namespace, err = p.defaultGetNamespace(nsName) + namespace, err = p.defaultGetNamespace(namespaceName) if err != nil { if errors.IsNotFound(err) { - return err + return nil, err } - return errors.NewInternalError(err) + return nil, errors.NewInternalError(err) } } else if err != nil { - return errors.NewInternalError(err) + return nil, errors.NewInternalError(err) } - namespaceNodeSelector, err := p.getNodeSelectorMap(namespace) - if err != nil { - return err - } + return p.getNodeSelectorMap(namespace) +} - if labels.Conflicts(namespaceNodeSelector, labels.Set(pod.Spec.NodeSelector)) { - return errors.NewForbidden(resource, name, fmt.Errorf("pod node label selector conflicts with its namespace node label selector")) +func shouldIgnore(a admission.Attributes) bool { + resource := a.GetResource().GroupResource() + if resource != api.Resource("pods") { + return true } - - whitelist, err := labels.ConvertSelectorToLabelsMap(p.clusterNodeSelectors[namespace.Name]) - if err != nil { - return err + if a.GetSubresource() != "" { + // only run the checks below on pods proper and not subresources + return true } - // Merge pod node selector = namespace node selector + current pod node selector - podNodeSelectorLabels := labels.Merge(namespaceNodeSelector, pod.Spec.NodeSelector) - - // whitelist verification - if !labels.AreLabelsInWhiteList(podNodeSelectorLabels, whitelist) { - return errors.NewForbidden(resource, name, fmt.Errorf("pod node label selector labels conflict with its namespace whitelist")) + _, ok := a.GetObject().(*api.Pod) + if !ok { + glog.Errorf("expected pod but got %s", a.GetKind().Kind) + return true } - // Updated pod node selector = namespace node selector + current pod node selector - pod.Spec.NodeSelector = map[string]string(podNodeSelectorLabels) - return nil + return false } func NewPodNodeSelector(clusterNodeSelectors map[string]string) *podNodeSelector { @@ -183,7 +213,7 @@ func (p *podNodeSelector) SetInternalKubeInformerFactory(f informers.SharedInfor p.SetReadyFunc(namespaceInformer.Informer().HasSynced) } -func (p *podNodeSelector) Validate() error { +func (p *podNodeSelector) ValidateInitialization() error { if p.namespaceLister == nil { return fmt.Errorf("missing namespaceLister") } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podpreset/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podpreset/BUILD index a7210dc67ad2..ca7376afd716 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podpreset/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podpreset/BUILD @@ -9,9 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/podpreset", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/settings:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", @@ -27,9 +28,11 @@ go_test( go_library( name = "go_default_library", srcs = ["admission.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/podpreset", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/ref:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/settings:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podpreset/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podpreset/admission.go index 86bf707c8a4b..30752a2666c4 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podpreset/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podpreset/admission.go @@ -29,8 +29,9 @@ import ( "k8s.io/apimachinery/pkg/labels" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apiserver/pkg/admission" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/ref" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/settings" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" @@ -58,6 +59,7 @@ type podPresetPlugin struct { lister settingslisters.PodPresetLister } +var _ admission.MutationInterface = &podPresetPlugin{} var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&podPresetPlugin{}) var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&podPresetPlugin{}) @@ -68,7 +70,7 @@ func NewPlugin() *podPresetPlugin { } } -func (plugin *podPresetPlugin) Validate() error { +func (plugin *podPresetPlugin) ValidateInitialization() error { if plugin.client == nil { return fmt.Errorf("%s requires a client", pluginName) } @@ -346,7 +348,7 @@ func mergeVolumes(volumes []api.Volume, podPresets []*settings.PodPreset) ([]api } func (c *podPresetPlugin) addEvent(pod *api.Pod, pip *settings.PodPreset, message string) { - ref, err := ref.GetReference(api.Scheme, pod) + ref, err := ref.GetReference(legacyscheme.Scheme, pod) if err != nil { glog.Errorf("pip %s: get reference for pod %s failed: %v", pip.GetName(), pod.GetName(), err) return diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/BUILD index 929165c92bf3..d3af4b936c48 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/BUILD @@ -9,9 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", @@ -32,10 +33,11 @@ go_library( "admission.go", "config.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper/qos:go_default_library", - "//pkg/api/v1:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper/qos:go_default_library", + "//pkg/apis/core/v1:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/client/listers/core/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/admission.go index 655a2e365a02..14ff93f221e5 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/admission.go @@ -27,9 +27,9 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/admission" - "k8s.io/kubernetes/pkg/api" - qoshelper "k8s.io/kubernetes/pkg/api/helper/qos" - k8s_api_v1 "k8s.io/kubernetes/pkg/api/v1" + api "k8s.io/kubernetes/pkg/apis/core" + qoshelper "k8s.io/kubernetes/pkg/apis/core/helper/qos" + k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" corelisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -57,6 +57,8 @@ const ( NSWLTolerations string = "scheduler.alpha.kubernetes.io/tolerationsWhitelist" ) +var _ admission.MutationInterface = &podTolerationsPlugin{} +var _ admission.ValidationInterface = &podTolerationsPlugin{} var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&podTolerationsPlugin{}) type podTolerationsPlugin struct { @@ -77,19 +79,7 @@ type podTolerationsPlugin struct { // scheduler.alpha.kubernetes.io/defaultTolerations and scheduler.alpha.kubernetes.io/tolerationsWhitelist // annotations keys. func (p *podTolerationsPlugin) Admit(a admission.Attributes) error { - resource := a.GetResource().GroupResource() - if resource != api.Resource("pods") { - return nil - } - if a.GetSubresource() != "" { - // only run the checks below on pods proper and not subresources - return nil - } - - obj := a.GetObject() - pod, ok := obj.(*api.Pod) - if !ok { - glog.Errorf("expected pod but got %s", a.GetKind().Kind) + if shouldIgnore(a) { return nil } @@ -97,35 +87,21 @@ func (p *podTolerationsPlugin) Admit(a admission.Attributes) error { return admission.NewForbidden(a, fmt.Errorf("not yet ready to handle request")) } - nsName := a.GetNamespace() - namespace, err := p.namespaceLister.Get(nsName) - if errors.IsNotFound(err) { - // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not - namespace, err = p.client.Core().Namespaces().Get(nsName, metav1.GetOptions{}) - if err != nil { - if errors.IsNotFound(err) { - return err - } - return errors.NewInternalError(err) - } - } else if err != nil { - return errors.NewInternalError(err) - } - + pod := a.GetObject().(*api.Pod) var finalTolerations []api.Toleration updateUninitialized, err := util.IsUpdatingUninitializedObject(a) if err != nil { return err } if a.GetOperation() == admission.Create || updateUninitialized { - ts, err := p.getNamespaceDefaultTolerations(namespace) + ts, err := p.getNamespaceDefaultTolerations(a.GetNamespace()) if err != nil { return err } // If the namespace has not specified its default tolerations, // fall back to cluster's default tolerations. - if len(ts) == 0 { + if ts == nil { ts = p.pluginConfig.Default } @@ -148,40 +124,71 @@ func (p *podTolerationsPlugin) Admit(a admission.Attributes) error { finalTolerations = pod.Spec.Tolerations } + if qoshelper.GetPodQOS(pod) != api.PodQOSBestEffort { + finalTolerations = tolerations.MergeTolerations(finalTolerations, []api.Toleration{ + { + Key: algorithm.TaintNodeMemoryPressure, + Operator: api.TolerationOpExists, + Effect: api.TaintEffectNoSchedule, + }, + }) + } + pod.Spec.Tolerations = finalTolerations + + return p.Validate(a) +} +func (p *podTolerationsPlugin) Validate(a admission.Attributes) error { + if shouldIgnore(a) { + return nil + } + + if !p.WaitForReady() { + return admission.NewForbidden(a, fmt.Errorf("not yet ready to handle request")) + } + // whitelist verification. - if len(finalTolerations) > 0 { - whitelist, err := p.getNamespaceTolerationsWhitelist(namespace) + pod := a.GetObject().(*api.Pod) + if len(pod.Spec.Tolerations) > 0 { + whitelist, err := p.getNamespaceTolerationsWhitelist(a.GetNamespace()) if err != nil { return err } // If the namespace has not specified its tolerations whitelist, // fall back to cluster's whitelist of tolerations. - if len(whitelist) == 0 { + if whitelist == nil { whitelist = p.pluginConfig.Whitelist } if len(whitelist) > 0 { // check if the merged pod tolerations satisfy its namespace whitelist - if !tolerations.VerifyAgainstWhitelist(finalTolerations, whitelist) { + if !tolerations.VerifyAgainstWhitelist(pod.Spec.Tolerations, whitelist) { return fmt.Errorf("pod tolerations (possibly merged with namespace default tolerations) conflict with its namespace whitelist") } } } - if qoshelper.GetPodQOS(pod) != api.PodQOSBestEffort { - finalTolerations = tolerations.MergeTolerations(finalTolerations, []api.Toleration{ - { - Key: algorithm.TaintNodeMemoryPressure, - Operator: api.TolerationOpExists, - Effect: api.TaintEffectNoSchedule, - }, - }) + return nil +} + +func shouldIgnore(a admission.Attributes) bool { + resource := a.GetResource().GroupResource() + if resource != api.Resource("pods") { + return true + } + if a.GetSubresource() != "" { + // only run the checks below on pods proper and not subresources + return true } - pod.Spec.Tolerations = finalTolerations - return nil + obj := a.GetObject() + _, ok := obj.(*api.Pod) + if !ok { + glog.Errorf("expected pod but got %s", a.GetKind().Kind) + return true + } + return false } func NewPodTolerationsPlugin(pluginConfig *pluginapi.Configuration) *podTolerationsPlugin { @@ -202,7 +209,7 @@ func (p *podTolerationsPlugin) SetInternalKubeInformerFactory(f informers.Shared } -func (p *podTolerationsPlugin) Validate() error { +func (p *podTolerationsPlugin) ValidateInitialization() error { if p.namespaceLister == nil { return fmt.Errorf("missing namespaceLister") } @@ -212,26 +219,72 @@ func (p *podTolerationsPlugin) Validate() error { return nil } -func (p *podTolerationsPlugin) getNamespaceDefaultTolerations(ns *api.Namespace) ([]api.Toleration, error) { +// in exceptional cases, this can result in two live calls, but once the cache catches up, that will stop. +func (p *podTolerationsPlugin) getNamespace(nsName string) (*api.Namespace, error) { + namespace, err := p.namespaceLister.Get(nsName) + if errors.IsNotFound(err) { + // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not + namespace, err = p.client.Core().Namespaces().Get(nsName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return nil, err + } + return nil, errors.NewInternalError(err) + } + } else if err != nil { + return nil, errors.NewInternalError(err) + } + + return namespace, nil +} + +func (p *podTolerationsPlugin) getNamespaceDefaultTolerations(nsName string) ([]api.Toleration, error) { + ns, err := p.getNamespace(nsName) + if err != nil { + return nil, err + } return extractNSTolerations(ns, NSDefaultTolerations) } -func (p *podTolerationsPlugin) getNamespaceTolerationsWhitelist(ns *api.Namespace) ([]api.Toleration, error) { +func (p *podTolerationsPlugin) getNamespaceTolerationsWhitelist(nsName string) ([]api.Toleration, error) { + ns, err := p.getNamespace(nsName) + if err != nil { + return nil, err + } return extractNSTolerations(ns, NSWLTolerations) } +// extractNSTolerations extracts default or whitelist of tolerations from +// following namespace annotations keys: "scheduler.alpha.kubernetes.io/defaultTolerations" +// and "scheduler.alpha.kubernetes.io/tolerationsWhitelist". If these keys are +// unset (nil), extractNSTolerations returns nil. If the value to these +// keys are set to empty, an empty toleration is returned, otherwise +// configured tolerations are returned. func extractNSTolerations(ns *api.Namespace, key string) ([]api.Toleration, error) { + // if a namespace does not have any annotations + if len(ns.Annotations) == 0 { + return nil, nil + } + + // if NSWLTolerations or NSDefaultTolerations does not exist + if _, ok := ns.Annotations[key]; !ok { + return nil, nil + } + + // if value is set to empty + if len(ns.Annotations[key]) == 0 { + return []api.Toleration{}, nil + } + var v1Tolerations []v1.Toleration - if len(ns.Annotations) > 0 && ns.Annotations[key] != "" { - err := json.Unmarshal([]byte(ns.Annotations[key]), &v1Tolerations) - if err != nil { - return nil, err - } + err := json.Unmarshal([]byte(ns.Annotations[key]), &v1Tolerations) + if err != nil { + return nil, err } ts := make([]api.Toleration, len(v1Tolerations)) for i := range v1Tolerations { - if err := k8s_api_v1.Convert_v1_Toleration_To_api_Toleration(&v1Tolerations[i], &ts[i], nil); err != nil { + if err := k8s_api_v1.Convert_v1_Toleration_To_core_Toleration(&v1Tolerations[i], &ts[i], nil); err != nil { return nil, err } } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/BUILD index c2cb617aaea2..ef58e5e1fb9a 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/BUILD @@ -13,10 +13,10 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/doc.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/doc.go index f09f9d2f5e32..023db9e2f621 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/doc.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package package podtolerationrestriction diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/install/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/install/BUILD index 0671d3e6af14..6b1210fe0356 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/install/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/install/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/install", deps = [ "//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library", "//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/types.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/types.go index 28ae2eae677b..d38c6e0613e9 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/types.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/types.go @@ -18,7 +18,7 @@ package podtolerationrestriction import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/BUILD index 78f31098ac7e..9a48ebb8c001 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/BUILD @@ -16,8 +16,9 @@ go_library( "zz_generated.deepcopy.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/doc.go index f439eced7a0e..f46bcbe7b5bc 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction // +k8s:defaulter-gen=TypeMeta diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.conversion.go index 8a44886b82c1..d72f98f2fa5a 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.conversion.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/api/core/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" + core "k8s.io/kubernetes/pkg/apis/core" podtolerationrestriction "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction" unsafe "unsafe" ) @@ -43,8 +43,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { } func autoConvert_v1alpha1_Configuration_To_podtolerationrestriction_Configuration(in *Configuration, out *podtolerationrestriction.Configuration, s conversion.Scope) error { - out.Default = *(*[]api.Toleration)(unsafe.Pointer(&in.Default)) - out.Whitelist = *(*[]api.Toleration)(unsafe.Pointer(&in.Whitelist)) + out.Default = *(*[]core.Toleration)(unsafe.Pointer(&in.Default)) + out.Whitelist = *(*[]core.Toleration)(unsafe.Pointer(&in.Whitelist)) return nil } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.deepcopy.go index 3f9e04a1d058..98ab9907278f 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/zz_generated.deepcopy.go @@ -22,28 +22,9 @@ package v1alpha1 import ( v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Configuration).DeepCopyInto(out.(*Configuration)) - return nil - }, InType: reflect.TypeOf(&Configuration{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Configuration) DeepCopyInto(out *Configuration) { *out = *in diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/validation/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/validation/BUILD index 7ef9df74fddd..42a0a1328b9a 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/validation/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/validation/BUILD @@ -9,8 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/validation", deps = [ - "//pkg/api/validation:go_default_library", + "//pkg/apis/core/validation:go_default_library", "//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", ], @@ -32,9 +33,10 @@ filegroup( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/validation", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/validation/validation.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/validation/validation.go index 6312ed12e312..99a36ed3aafe 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/validation/validation.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/validation/validation.go @@ -20,7 +20,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/apis/core/validation" internalapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction" ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/zz_generated.deepcopy.go index 8760a045b0f8..afe5bccec21d 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/zz_generated.deepcopy.go @@ -21,43 +21,24 @@ limitations under the License. package podtolerationrestriction import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" - reflect "reflect" + core "k8s.io/kubernetes/pkg/apis/core" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Configuration).DeepCopyInto(out.(*Configuration)) - return nil - }, InType: reflect.TypeOf(&Configuration{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Configuration) DeepCopyInto(out *Configuration) { *out = *in out.TypeMeta = in.TypeMeta if in.Default != nil { in, out := &in.Default, &out.Default - *out = make([]api.Toleration, len(*in)) + *out = make([]core.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Whitelist != nil { in, out := &in.Whitelist, &out.Whitelist - *out = make([]api.Toleration, len(*in)) + *out = make([]core.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/priority/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/priority/BUILD index 2217543459f3..db47cedb0274 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/priority/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/priority/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -11,10 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/priority", library = ":go_default_library", - tags = ["automanaged"], deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/scheduling:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/controller:go_default_library", @@ -29,9 +27,9 @@ go_test( go_library( name = "go_default_library", srcs = ["admission.go"], - tags = ["automanaged"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/priority", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/scheduling:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/priority/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/priority/admission.go index 517058e2404c..646ff176357f 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/priority/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/priority/admission.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apiserver/pkg/admission" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/scheduling" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" @@ -55,8 +55,8 @@ func Register(plugins *admission.Plugins) { }) } -// priorityPlugin is an implementation of admission.Interface. -type priorityPlugin struct { +// PriorityPlugin is an implementation of admission.Interface. +type PriorityPlugin struct { *admission.Handler client internalclientset.Interface lister schedulinglisters.PriorityClassLister @@ -64,17 +64,20 @@ type priorityPlugin struct { globalDefaultPriority *int32 } -var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&priorityPlugin{}) -var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&priorityPlugin{}) +var _ admission.MutationInterface = &PriorityPlugin{} +var _ admission.ValidationInterface = &PriorityPlugin{} +var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&PriorityPlugin{}) +var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&PriorityPlugin{}) // NewPlugin creates a new priority admission plugin. -func NewPlugin() admission.Interface { - return &priorityPlugin{ +func NewPlugin() *PriorityPlugin { + return &PriorityPlugin{ Handler: admission.NewHandler(admission.Create, admission.Update, admission.Delete), } } -func (p *priorityPlugin) Validate() error { +// ValidateInitialization implements the InitializationValidator interface. +func (p *PriorityPlugin) ValidateInitialization() error { if p.client == nil { return fmt.Errorf("%s requires a client", pluginName) } @@ -84,11 +87,13 @@ func (p *priorityPlugin) Validate() error { return nil } -func (p *priorityPlugin) SetInternalKubeClientSet(client internalclientset.Interface) { +// SetInternalKubeClientSet implements the WantsInternalKubeClientSet interface. +func (p *PriorityPlugin) SetInternalKubeClientSet(client internalclientset.Interface) { p.client = client } -func (p *priorityPlugin) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) { +// SetInternalKubeInformerFactory implements the WantsInternalKubeInformerFactory interface. +func (p *PriorityPlugin) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) { priorityInformer := f.Scheduling().InternalVersion().PriorityClasses() p.lister = priorityInformer.Lister() p.SetReadyFunc(priorityInformer.Informer().HasSynced) @@ -96,28 +101,42 @@ func (p *priorityPlugin) SetInternalKubeInformerFactory(f informers.SharedInform var ( podResource = api.Resource("pods") - priorityClassResource = api.Resource("priorityclasses") + priorityClassResource = scheduling.Resource("priorityclasses") ) -// Admit checks Pods and PriorityClasses and admits or rejects them. It also resolves the priority of pods based on their PriorityClass. -func (p *priorityPlugin) Admit(a admission.Attributes) error { +// Admit checks Pods and admits or rejects them. It also resolves the priority of pods based on their PriorityClass. +// Note that pod validation mechanism prevents update of a pod priority. +func (p *PriorityPlugin) Admit(a admission.Attributes) error { operation := a.GetOperation() - // Ignore all calls to subresources or resources other than pods. - // Ignore all operations other than Create and Update. + // Ignore all calls to subresources if len(a.GetSubresource()) != 0 { return nil } switch a.GetResource().GroupResource() { case podResource: - if operation == admission.Create || operation == admission.Update { + if operation == admission.Create { return p.admitPod(a) } return nil + default: + return nil + } +} + +// Validate checks PriorityClasses and admits or rejects them. +func (p *PriorityPlugin) Validate(a admission.Attributes) error { + operation := a.GetOperation() + // Ignore all calls to subresources + if len(a.GetSubresource()) != 0 { + return nil + } + + switch a.GetResource().GroupResource() { case priorityClassResource: if operation == admission.Create || operation == admission.Update { - return p.admitPriorityClass(a) + return p.validatePriorityClass(a) } if operation == admission.Delete { p.invalidateCachedDefaultPriority() @@ -131,8 +150,7 @@ func (p *priorityPlugin) Admit(a admission.Attributes) error { } // admitPod makes sure a new pod does not set spec.Priority field. It also makes sure that the PriorityClassName exists if it is provided and resolves the pod priority from the PriorityClassName. -// Note that pod validation mechanism prevents update of a pod priority. -func (p *priorityPlugin) admitPod(a admission.Attributes) error { +func (p *PriorityPlugin) admitPod(a admission.Attributes) error { operation := a.GetOperation() pod, ok := a.GetObject().(*api.Pod) if !ok { @@ -173,8 +191,8 @@ func (p *priorityPlugin) admitPod(a admission.Attributes) error { return nil } -// admitPriorityClass ensures that the value field is not larger than the highest user definable priority. If the GlobalDefault is set, it ensures that there is no other PriorityClass whose GlobalDefault is set. -func (p *priorityPlugin) admitPriorityClass(a admission.Attributes) error { +// validatePriorityClass ensures that the value field is not larger than the highest user definable priority. If the GlobalDefault is set, it ensures that there is no other PriorityClass whose GlobalDefault is set. +func (p *PriorityPlugin) validatePriorityClass(a admission.Attributes) error { operation := a.GetOperation() pc, ok := a.GetObject().(*scheduling.PriorityClass) if !ok { @@ -204,7 +222,7 @@ func (p *priorityPlugin) admitPriorityClass(a admission.Attributes) error { return nil } -func (p *priorityPlugin) getDefaultPriorityClass() (*scheduling.PriorityClass, error) { +func (p *PriorityPlugin) getDefaultPriorityClass() (*scheduling.PriorityClass, error) { list, err := p.lister.List(labels.Everything()) if err != nil { return nil, err @@ -217,7 +235,7 @@ func (p *priorityPlugin) getDefaultPriorityClass() (*scheduling.PriorityClass, e return nil, nil } -func (p *priorityPlugin) getDefaultPriority() (int32, error) { +func (p *PriorityPlugin) getDefaultPriority() (int32, error) { // If global default priority is cached, return it. if p.globalDefaultPriority != nil { return *p.globalDefaultPriority, nil @@ -236,6 +254,6 @@ func (p *priorityPlugin) getDefaultPriority() (int32, error) { } // invalidateCachedDefaultPriority sets global default priority to nil to indicate that it should be looked up again. -func (p *priorityPlugin) invalidateCachedDefaultPriority() { +func (p *PriorityPlugin) invalidateCachedDefaultPriority() { p.globalDefaultPriority = nil } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/BUILD index 256886eadd0f..d2518c15c88e 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/BUILD @@ -15,13 +15,15 @@ go_library( "doc.go", "resource_access.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/client/listers/core/internalversion:go_default_library", "//pkg/kubeapiserver/admission:go_default_library", "//pkg/quota:go_default_library", + "//pkg/quota/generic:go_default_library", "//pkg/util/reflector/prometheus:go_default_library", "//pkg/util/workqueue/prometheus:go_default_library", "//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library", @@ -50,20 +52,18 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/controller:go_default_library", - "//pkg/quota:go_default_library", - "//pkg/quota/generic:go_default_library", "//pkg/quota/install:go_default_library", "//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library", "//vendor/github.com/hashicorp/golang-lru:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/admission.go index cfb97af50b89..24f8b6354b92 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/admission.go @@ -22,12 +22,13 @@ import ( "time" "k8s.io/apiserver/pkg/admission" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" "k8s.io/kubernetes/pkg/quota" resourcequotaapi "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota" + resourcequotaapiv1alpha1 "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1" "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/validation" ) @@ -48,21 +49,26 @@ func Register(plugins *admission.Plugins) { } return NewResourceQuota(configuration, 5, make(chan struct{})) }) + + // add our config types + resourcequotaapi.AddToScheme(plugins.ConfigScheme) + resourcequotaapiv1alpha1.AddToScheme(plugins.ConfigScheme) } -// quotaAdmission implements an admission controller that can enforce quota constraints -type quotaAdmission struct { +// QuotaAdmission implements an admission controller that can enforce quota constraints +type QuotaAdmission struct { *admission.Handler - config *resourcequotaapi.Configuration - stopCh <-chan struct{} - registry quota.Registry - numEvaluators int - quotaAccessor *quotaAccessor - evaluator Evaluator + config *resourcequotaapi.Configuration + stopCh <-chan struct{} + quotaConfiguration quota.Configuration + numEvaluators int + quotaAccessor *quotaAccessor + evaluator Evaluator } -var _ = kubeapiserveradmission.WantsInternalKubeClientSet("aAdmission{}) -var _ = kubeapiserveradmission.WantsQuotaRegistry("aAdmission{}) +var _ admission.ValidationInterface = &QuotaAdmission{} +var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&QuotaAdmission{}) +var _ = kubeapiserveradmission.WantsQuotaConfiguration(&QuotaAdmission{}) type liveLookupEntry struct { expiry time.Time @@ -72,13 +78,13 @@ type liveLookupEntry struct { // NewResourceQuota configures an admission controller that can enforce quota constraints // using the provided registry. The registry must have the capability to handle group/kinds that // are persisted by the server this admission controller is intercepting -func NewResourceQuota(config *resourcequotaapi.Configuration, numEvaluators int, stopCh <-chan struct{}) (admission.Interface, error) { +func NewResourceQuota(config *resourcequotaapi.Configuration, numEvaluators int, stopCh <-chan struct{}) (*QuotaAdmission, error) { quotaAccessor, err := newQuotaAccessor() if err != nil { return nil, err } - return "aAdmission{ + return &QuotaAdmission{ Handler: admission.NewHandler(admission.Create, admission.Update), stopCh: stopCh, numEvaluators: numEvaluators, @@ -87,21 +93,21 @@ func NewResourceQuota(config *resourcequotaapi.Configuration, numEvaluators int, }, nil } -func (a *quotaAdmission) SetInternalKubeClientSet(client internalclientset.Interface) { +func (a *QuotaAdmission) SetInternalKubeClientSet(client internalclientset.Interface) { a.quotaAccessor.client = client } -func (a *quotaAdmission) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) { +func (a *QuotaAdmission) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) { a.quotaAccessor.lister = f.Core().InternalVersion().ResourceQuotas().Lister() } -func (a *quotaAdmission) SetQuotaRegistry(r quota.Registry) { - a.registry = r - a.evaluator = NewQuotaEvaluator(a.quotaAccessor, a.registry, nil, a.config, a.numEvaluators, a.stopCh) +func (a *QuotaAdmission) SetQuotaConfiguration(c quota.Configuration) { + a.quotaConfiguration = c + a.evaluator = NewQuotaEvaluator(a.quotaAccessor, a.quotaConfiguration, nil, a.config, a.numEvaluators, a.stopCh) } -// Validate ensures an authorizer is set. -func (a *quotaAdmission) Validate() error { +// ValidateInitialization ensures an authorizer is set. +func (a *QuotaAdmission) ValidateInitialization() error { if a.quotaAccessor == nil { return fmt.Errorf("missing quotaAccessor") } @@ -111,8 +117,8 @@ func (a *quotaAdmission) Validate() error { if a.quotaAccessor.lister == nil { return fmt.Errorf("missing quotaAccessor.lister") } - if a.registry == nil { - return fmt.Errorf("missing registry") + if a.quotaConfiguration == nil { + return fmt.Errorf("missing quotaConfiguration") } if a.evaluator == nil { return fmt.Errorf("missing evaluator") @@ -120,11 +126,15 @@ func (a *quotaAdmission) Validate() error { return nil } -// Admit makes admission decisions while enforcing quota -func (a *quotaAdmission) Admit(attr admission.Attributes) (err error) { +// Validate makes admission decisions while enforcing quota +func (a *QuotaAdmission) Validate(attr admission.Attributes) (err error) { // ignore all operations that correspond to sub-resource actions if attr.GetSubresource() != "" { return nil } + // ignore all operations that are not namespaced + if attr.GetNamespace() == "" { + return nil + } return a.evaluator.Evaluate(attr) } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/BUILD index 5a73d3cc9e3d..7c43ea4a1244 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/BUILD @@ -13,9 +13,9 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/doc.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/doc.go index 51021f243b9f..dad2f7fdea9e 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/doc.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package package resourcequota diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/install/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/install/BUILD index abe442997350..61d6cc714681 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/install/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/install/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["install.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/install", deps = [ "//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library", "//plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/BUILD index 0d3f4a995a26..0ffe75d6c0b5 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/BUILD @@ -16,6 +16,7 @@ go_library( "zz_generated.deepcopy.go", "zz_generated.defaults.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1", deps = [ "//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/doc.go index cf6b0d22e895..435eac2f61c1 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/doc.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota // +k8s:defaulter-gen=TypeMeta diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.deepcopy.go index 7146088ff441..130bdd7021cc 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.deepcopy.go @@ -21,32 +21,9 @@ limitations under the License. package v1alpha1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Configuration).DeepCopyInto(out.(*Configuration)) - return nil - }, InType: reflect.TypeOf(&Configuration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitedResource).DeepCopyInto(out.(*LimitedResource)) - return nil - }, InType: reflect.TypeOf(&LimitedResource{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Configuration) DeepCopyInto(out *Configuration) { *out = *in diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/validation/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/validation/BUILD index d5e5f6e1bde3..7067a2ed9c5b 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/validation/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/validation/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/validation", deps = [ "//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", @@ -31,6 +32,7 @@ filegroup( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/validation", library = ":go_default_library", deps = ["//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/zz_generated.deepcopy.go index 697a99623eef..5dc53616ce70 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/zz_generated.deepcopy.go @@ -21,32 +21,9 @@ limitations under the License. package resourcequota import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Configuration).DeepCopyInto(out.(*Configuration)) - return nil - }, InType: reflect.TypeOf(&Configuration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitedResource).DeepCopyInto(out.(*LimitedResource)) - return nil - }, InType: reflect.TypeOf(&LimitedResource{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Configuration) DeepCopyInto(out *Configuration) { *out = *in diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/controller.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/controller.go index 3650f2d39ea0..832943d64710 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/controller.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/controller.go @@ -32,8 +32,9 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/admission" "k8s.io/client-go/util/workqueue" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/quota" + "k8s.io/kubernetes/pkg/quota/generic" _ "k8s.io/kubernetes/pkg/util/reflector/prometheus" // for reflector metric registration _ "k8s.io/kubernetes/pkg/util/workqueue/prometheus" // for workqueue metric registration resourcequotaapi "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota" @@ -51,6 +52,9 @@ type quotaEvaluator struct { // lockAcquisitionFunc acquires any required locks and returns a cleanup method to defer lockAcquisitionFunc func([]api.ResourceQuota) func() + // how quota was configured + quotaConfiguration quota.Configuration + // registry that knows how to measure usage for objects registry quota.Registry @@ -106,16 +110,18 @@ func newAdmissionWaiter(a admission.Attributes) *admissionWaiter { // NewQuotaEvaluator configures an admission controller that can enforce quota constraints // using the provided registry. The registry must have the capability to handle group/kinds that // are persisted by the server this admission controller is intercepting -func NewQuotaEvaluator(quotaAccessor QuotaAccessor, registry quota.Registry, lockAcquisitionFunc func([]api.ResourceQuota) func(), config *resourcequotaapi.Configuration, workers int, stopCh <-chan struct{}) Evaluator { +func NewQuotaEvaluator(quotaAccessor QuotaAccessor, quotaConfiguration quota.Configuration, lockAcquisitionFunc func([]api.ResourceQuota) func(), config *resourcequotaapi.Configuration, workers int, stopCh <-chan struct{}) Evaluator { // if we get a nil config, just create an empty default. if config == nil { config = &resourcequotaapi.Configuration{} } + return "aEvaluator{ quotaAccessor: quotaAccessor, lockAcquisitionFunc: lockAcquisitionFunc, - registry: registry, + quotaConfiguration: quotaConfiguration, + registry: generic.NewRegistry(quotaConfiguration.Evaluators()), queue: workqueue.NewNamed("admission_quota_controller"), work: map[string][]*admissionWaiter{}, @@ -365,9 +371,8 @@ func limitedByDefault(usage api.ResourceList, limitedResources []resourcequotaap // that capture what the usage would be if the request succeeded. It return an error if there is insufficient quota to satisfy the request func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.Attributes) ([]api.ResourceQuota, error) { namespace := a.GetNamespace() - evaluators := e.registry.Evaluators() - evaluator, found := evaluators[a.GetKind().GroupKind()] - if !found { + evaluator := e.registry.Get(a.GetResource().GroupResource()) + if evaluator == nil { return quotas, nil } @@ -516,18 +521,27 @@ func (e *quotaEvaluator) Evaluate(a admission.Attributes) error { go e.run() }) - // if we do not know how to evaluate use for this kind, just ignore - evaluators := e.registry.Evaluators() - evaluator, found := evaluators[a.GetKind().GroupKind()] - if !found { + // is this resource ignored? + gvr := a.GetResource() + gr := gvr.GroupResource() + if _, ok := e.quotaConfiguration.IgnoredResources()[gr]; ok { return nil } + + // if we do not know how to evaluate use for this resource, create an evaluator + evaluator := e.registry.Get(gr) + if evaluator == nil { + // create an object count evaluator if no evaluator previously registered + // note, we do not need aggregate usage here, so we pass a nil infomer func + evaluator = generic.NewObjectCountEvaluator(false, gr, nil, "") + e.registry.Add(evaluator) + glog.Infof("quota admission added evaluator for: %s", gr) + } // for this kind, check if the operation could mutate any quota resources // if no resources tracked by quota are impacted, then just return if !evaluator.Handles(a) { return nil } - waiter := newAdmissionWaiter(a) e.addWork(waiter) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/resource_access.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/resource_access.go index edbaff90404b..9384770f514d 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/resource_access.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/resource_access.go @@ -25,7 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apiserver/pkg/storage/etcd" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" corelisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion" ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy/BUILD index a377da0fb287..a2f5467e0190 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy/BUILD @@ -9,21 +9,23 @@ load( go_library( name = "go_default_library", srcs = ["admission.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/client/listers/extensions/internalversion:go_default_library", "//pkg/kubeapiserver/admission:go_default_library", + "//pkg/registry/rbac:go_default_library", "//pkg/security/podsecuritypolicy:go_default_library", "//pkg/security/podsecuritypolicy/util:go_default_library", - "//pkg/securitycontext:go_default_library", "//pkg/serviceaccount:go_default_library", - "//pkg/util/maps:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/initializer:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", ], @@ -32,23 +34,26 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", - "//pkg/client/listers/extensions/internalversion:go_default_library", "//pkg/controller:go_default_library", "//pkg/security/apparmor:go_default_library", "//pkg/security/podsecuritypolicy:go_default_library", "//pkg/security/podsecuritypolicy/seccomp:go_default_library", "//pkg/security/podsecuritypolicy/util:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy/admission.go index 1a68ab3ffa74..15b94df75a00 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy/admission.go @@ -19,25 +19,27 @@ package podsecuritypolicy import ( "fmt" "io" + "sort" "strings" "github.com/golang/glog" + apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apiserver/pkg/admission" + genericadmissioninit "k8s.io/apiserver/pkg/admission/initializer" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/internalversion" kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" + rbacregistry "k8s.io/kubernetes/pkg/registry/rbac" psp "k8s.io/kubernetes/pkg/security/podsecuritypolicy" psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util" - sc "k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/serviceaccount" - "k8s.io/kubernetes/pkg/util/maps" ) const ( @@ -47,31 +49,27 @@ const ( // Register registers a plugin func Register(plugins *admission.Plugins) { plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { - plugin := NewPlugin(psp.NewSimpleStrategyFactory(), getMatchingPolicies, true) + plugin := newPlugin(psp.NewSimpleStrategyFactory(), true) return plugin, nil }) } -// PSPMatchFn allows plugging in how PSPs are matched against user information. -type PSPMatchFn func(lister extensionslisters.PodSecurityPolicyLister, user user.Info, sa user.Info, authz authorizer.Authorizer, namespace string) ([]*extensions.PodSecurityPolicy, error) - -// podSecurityPolicyPlugin holds state for and implements the admission plugin. -type podSecurityPolicyPlugin struct { +// PodSecurityPolicyPlugin holds state for and implements the admission plugin. +type PodSecurityPolicyPlugin struct { *admission.Handler strategyFactory psp.StrategyFactory - pspMatcher PSPMatchFn failOnNoPolicies bool authz authorizer.Authorizer lister extensionslisters.PodSecurityPolicyLister } // SetAuthorizer sets the authorizer. -func (plugin *podSecurityPolicyPlugin) SetAuthorizer(authz authorizer.Authorizer) { +func (plugin *PodSecurityPolicyPlugin) SetAuthorizer(authz authorizer.Authorizer) { plugin.authz = authz } -// Validate ensures an authorizer is set. -func (plugin *podSecurityPolicyPlugin) Validate() error { +// ValidateInitialization ensures an authorizer is set. +func (plugin *PodSecurityPolicyPlugin) ValidateInitialization() error { if plugin.authz == nil { return fmt.Errorf("%s requires an authorizer", PluginName) } @@ -81,21 +79,21 @@ func (plugin *podSecurityPolicyPlugin) Validate() error { return nil } -var _ admission.Interface = &podSecurityPolicyPlugin{} -var _ kubeapiserveradmission.WantsAuthorizer = &podSecurityPolicyPlugin{} -var _ kubeapiserveradmission.WantsInternalKubeInformerFactory = &podSecurityPolicyPlugin{} +var _ admission.MutationInterface = &PodSecurityPolicyPlugin{} +var _ admission.ValidationInterface = &PodSecurityPolicyPlugin{} +var _ genericadmissioninit.WantsAuthorizer = &PodSecurityPolicyPlugin{} +var _ kubeapiserveradmission.WantsInternalKubeInformerFactory = &PodSecurityPolicyPlugin{} -// NewPlugin creates a new PSP admission plugin. -func NewPlugin(strategyFactory psp.StrategyFactory, pspMatcher PSPMatchFn, failOnNoPolicies bool) *podSecurityPolicyPlugin { - return &podSecurityPolicyPlugin{ +// newPlugin creates a new PSP admission plugin. +func newPlugin(strategyFactory psp.StrategyFactory, failOnNoPolicies bool) *PodSecurityPolicyPlugin { + return &PodSecurityPolicyPlugin{ Handler: admission.NewHandler(admission.Create, admission.Update), strategyFactory: strategyFactory, - pspMatcher: pspMatcher, failOnNoPolicies: failOnNoPolicies, } } -func (a *podSecurityPolicyPlugin) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) { +func (a *PodSecurityPolicyPlugin) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) { podSecurityPolicyInformer := f.Extensions().InternalVersion().PodSecurityPolicies() a.lister = podSecurityPolicyInformer.Lister() a.SetReadyFunc(podSecurityPolicyInformer.Informer().HasSynced) @@ -109,21 +107,94 @@ func (a *podSecurityPolicyPlugin) SetInternalKubeInformerFactory(f informers.Sha // 3. Try to generate and validate a PSP with providers. If we find one then admit the pod // with the validated PSP. If we don't find any reject the pod and give all errors from the // failed attempts. -func (c *podSecurityPolicyPlugin) Admit(a admission.Attributes) error { - if a.GetResource().GroupResource() != api.Resource("pods") { +func (c *PodSecurityPolicyPlugin) Admit(a admission.Attributes) error { + if ignore, err := shouldIgnore(a); err != nil { + return err + } else if ignore { return nil } - if len(a.GetSubresource()) != 0 { + // only mutate if this is a CREATE request. On updates we only validate. + // TODO(liggitt): allow spec mutation during initializing updates? + if a.GetOperation() != admission.Create { + return nil + } + + pod := a.GetObject().(*api.Pod) + + // compute the context + allowedPod, pspName, validationErrs, err := c.computeSecurityContext(a, pod, true) + if err != nil { + return admission.NewForbidden(a, err) + } + if allowedPod != nil { + *pod = *allowedPod + // annotate and accept the pod + glog.V(4).Infof("pod %s (generate: %s) in namespace %s validated against provider %s", pod.Name, pod.GenerateName, a.GetNamespace(), pspName) + if pod.ObjectMeta.Annotations == nil { + pod.ObjectMeta.Annotations = map[string]string{} + } + pod.ObjectMeta.Annotations[psputil.ValidatedPSPAnnotation] = pspName + return nil + } + + // we didn't validate against any provider, reject the pod and give the errors for each attempt + glog.V(4).Infof("unable to validate pod %s (generate: %s) in namespace %s against any pod security policy: %v", pod.Name, pod.GenerateName, a.GetNamespace(), validationErrs) + return admission.NewForbidden(a, fmt.Errorf("unable to validate against any pod security policy: %v", validationErrs)) +} + +func (c *PodSecurityPolicyPlugin) Validate(a admission.Attributes) error { + if ignore, err := shouldIgnore(a); err != nil { + return err + } else if ignore { return nil } - pod, ok := a.GetObject().(*api.Pod) - // if we can't convert then we don't handle this object so just return - if !ok { + pod := a.GetObject().(*api.Pod) + + // compute the context. Mutation is not allowed. + allowedPod, _, validationErrs, err := c.computeSecurityContext(a, pod, false) + if err != nil { + return admission.NewForbidden(a, err) + } + if apiequality.Semantic.DeepEqual(pod, allowedPod) { return nil } + // we didn't validate against any provider, reject the pod and give the errors for each attempt + glog.V(4).Infof("unable to validate pod %s (generate: %s) in namespace %s against any pod security policy: %v", pod.Name, pod.GenerateName, a.GetNamespace(), validationErrs) + return admission.NewForbidden(a, fmt.Errorf("unable to validate against any pod security policy: %v", validationErrs)) +} + +func shouldIgnore(a admission.Attributes) (bool, error) { + if a.GetResource().GroupResource() != api.Resource("pods") { + return true, nil + } + if len(a.GetSubresource()) != 0 { + return true, nil + } + + // if we can't convert then fail closed since we've already checked that this is supposed to be a pod object. + // this shouldn't normally happen during admission but could happen if an integrator passes a versioned + // pod object rather than an internal object. + if _, ok := a.GetObject().(*api.Pod); !ok { + return false, admission.NewForbidden(a, fmt.Errorf("unexpected type %T", a.GetObject())) + } + + // if this is an update, see if we are only updating the ownerRef/finalizers. Garbage collection does this + // and we should allow it in general, since you had the power to update and the power to delete. + // The worst that happens is that you delete something, but you aren't controlling the privileged object itself + if a.GetOperation() == admission.Update && rbacregistry.IsOnlyMutatingGCFields(a.GetObject(), a.GetOldObject(), apiequality.Semantic) { + return true, nil + } + + return false, nil +} + +// computeSecurityContext derives a valid security context while trying to avoid any changes to the given pod. I.e. +// if there is a matching policy with the same security context as given, it will be reused. If there is no +// matching policy the returned pod will be nil and the pspName empty. +func (c *PodSecurityPolicyPlugin) computeSecurityContext(a admission.Attributes, pod *api.Pod, specMutationAllowed bool) (*api.Pod, string, field.ErrorList, error) { // get all constraints that are usable by the user glog.V(4).Infof("getting pod security policies for pod %s (generate: %s)", pod.Name, pod.GenerateName) var saInfo user.Info @@ -131,131 +202,131 @@ func (c *podSecurityPolicyPlugin) Admit(a admission.Attributes) error { saInfo = serviceaccount.UserInfo(a.GetNamespace(), pod.Spec.ServiceAccountName, "") } - matchedPolicies, err := c.pspMatcher(c.lister, a.GetUserInfo(), saInfo, c.authz, a.GetNamespace()) + policies, err := c.lister.List(labels.Everything()) if err != nil { - return admission.NewForbidden(a, err) + return nil, "", nil, err } // if we have no policies and want to succeed then return. Otherwise we'll end up with no // providers and fail with "unable to validate against any pod security policy" below. - if len(matchedPolicies) == 0 && !c.failOnNoPolicies { - return nil + if len(policies) == 0 && !c.failOnNoPolicies { + return pod, "", nil, nil } - providers, errs := c.createProvidersFromPolicies(matchedPolicies, pod.Namespace) - logProviders(pod, providers, errs) + // sort by name to make order deterministic + // TODO(liggitt): add priority field to allow admins to bucket differently + sort.SliceStable(policies, func(i, j int) bool { + return strings.Compare(policies[i].Name, policies[j].Name) < 0 + }) + + providers, errs := c.createProvidersFromPolicies(policies, pod.Namespace) + for _, err := range errs { + glog.V(4).Infof("provider creation error: %v", err) + } if len(providers) == 0 { - return admission.NewForbidden(a, fmt.Errorf("no providers available to validate pod request")) + return nil, "", nil, fmt.Errorf("no providers available to validate pod request") } - // all containers in a single pod must validate under a single provider or we will reject the request - validationErrs := field.ErrorList{} + var ( + allowedMutatedPod *api.Pod + allowingMutatingPSP string + // Map of PSP name to associated validation errors. + validationErrs = map[string]field.ErrorList{} + ) + for _, provider := range providers { - if errs := assignSecurityContext(provider, pod, field.NewPath(fmt.Sprintf("provider %s: ", provider.GetPSPName()))); len(errs) > 0 { - validationErrs = append(validationErrs, errs...) + podCopy := pod.DeepCopy() + + if errs := assignSecurityContext(provider, podCopy, field.NewPath(fmt.Sprintf("provider %s: ", provider.GetPSPName()))); len(errs) > 0 { + validationErrs[provider.GetPSPName()] = errs continue } - // the entire pod validated, annotate and accept the pod - glog.V(4).Infof("pod %s (generate: %s) validated against provider %s", pod.Name, pod.GenerateName, provider.GetPSPName()) - if pod.ObjectMeta.Annotations == nil { - pod.ObjectMeta.Annotations = map[string]string{} + // the entire pod validated + + mutated := !apiequality.Semantic.DeepEqual(pod, podCopy) + if mutated && !specMutationAllowed { + continue + } + + if !isAuthorizedForPolicy(a.GetUserInfo(), saInfo, a.GetNamespace(), provider.GetPSPName(), c.authz) { + continue + } + + switch { + case !mutated: + // if it validated without mutating anything, use this result + return podCopy, provider.GetPSPName(), nil, nil + + case specMutationAllowed && allowedMutatedPod == nil: + // if mutation is allowed and this is the first PSP to allow the pod, remember it, + // but continue to see if another PSP allows without mutating + allowedMutatedPod = podCopy + allowingMutatingPSP = provider.GetPSPName() } - pod.ObjectMeta.Annotations[psputil.ValidatedPSPAnnotation] = provider.GetPSPName() - return nil } - // we didn't validate against any provider, reject the pod and give the errors for each attempt - glog.V(4).Infof("unable to validate pod %s (generate: %s) against any pod security policy: %v", pod.Name, pod.GenerateName, validationErrs) - return admission.NewForbidden(a, fmt.Errorf("unable to validate against any pod security policy: %v", validationErrs)) + if allowedMutatedPod != nil { + return allowedMutatedPod, allowingMutatingPSP, nil, nil + } + + // Pod is rejected. Filter the validation errors to only include errors from authorized PSPs. + aggregate := field.ErrorList{} + for psp, errs := range validationErrs { + if isAuthorizedForPolicy(a.GetUserInfo(), saInfo, a.GetNamespace(), psp, c.authz) { + aggregate = append(aggregate, errs...) + } + } + return nil, "", aggregate, nil } // assignSecurityContext creates a security context for each container in the pod // and validates that the sc falls within the psp constraints. All containers must validate against // the same psp or is not considered valid. func assignSecurityContext(provider psp.Provider, pod *api.Pod, fldPath *field.Path) field.ErrorList { - generatedSCs := make([]*api.SecurityContext, len(pod.Spec.Containers)) - var generatedInitSCs []*api.SecurityContext - errs := field.ErrorList{} psc, pscAnnotations, err := provider.CreatePodSecurityContext(pod) if err != nil { errs = append(errs, field.Invalid(field.NewPath("spec", "securityContext"), pod.Spec.SecurityContext, err.Error())) } - - // save the original PSC and validate the generated PSC. Leave the generated PSC - // set for container generation/validation. We will reset to original post container - // validation. - originalPSC := pod.Spec.SecurityContext pod.Spec.SecurityContext = psc - originalAnnotations := maps.CopySS(pod.Annotations) pod.Annotations = pscAnnotations - errs = append(errs, provider.ValidatePodSecurityContext(pod, field.NewPath("spec", "securityContext"))...) - // Note: this is not changing the original container, we will set container SCs later so long - // as all containers validated under the same PSP. - for i, containerCopy := range pod.Spec.InitContainers { - // We will determine the effective security context for the container and validate against that - // since that is how the sc provider will eventually apply settings in the runtime. - // This results in an SC that is based on the Pod's PSC with the set fields from the container - // overriding pod level settings. - containerCopy.SecurityContext = sc.InternalDetermineEffectiveSecurityContext(pod, &containerCopy) + errs = append(errs, provider.ValidatePodSecurityContext(pod, field.NewPath("spec", "securityContext"))...) - sc, scAnnotations, err := provider.CreateContainerSecurityContext(pod, &containerCopy) + for i := range pod.Spec.InitContainers { + sc, scAnnotations, err := provider.CreateContainerSecurityContext(pod, &pod.Spec.InitContainers[i]) if err != nil { errs = append(errs, field.Invalid(field.NewPath("spec", "initContainers").Index(i).Child("securityContext"), "", err.Error())) continue } - generatedInitSCs = append(generatedInitSCs, sc) - - containerCopy.SecurityContext = sc + pod.Spec.InitContainers[i].SecurityContext = sc pod.Annotations = scAnnotations - errs = append(errs, provider.ValidateContainerSecurityContext(pod, &containerCopy, field.NewPath("spec", "initContainers").Index(i).Child("securityContext"))...) + errs = append(errs, provider.ValidateContainerSecurityContext(pod, &pod.Spec.InitContainers[i], field.NewPath("spec", "initContainers").Index(i).Child("securityContext"))...) } - // Note: this is not changing the original container, we will set container SCs later so long - // as all containers validated under the same PSP. - for i, containerCopy := range pod.Spec.Containers { - // We will determine the effective security context for the container and validate against that - // since that is how the sc provider will eventually apply settings in the runtime. - // This results in an SC that is based on the Pod's PSC with the set fields from the container - // overriding pod level settings. - containerCopy.SecurityContext = sc.InternalDetermineEffectiveSecurityContext(pod, &containerCopy) - - sc, scAnnotations, err := provider.CreateContainerSecurityContext(pod, &containerCopy) + for i := range pod.Spec.Containers { + sc, scAnnotations, err := provider.CreateContainerSecurityContext(pod, &pod.Spec.Containers[i]) if err != nil { errs = append(errs, field.Invalid(field.NewPath("spec", "containers").Index(i).Child("securityContext"), "", err.Error())) continue } - generatedSCs[i] = sc - containerCopy.SecurityContext = sc + pod.Spec.Containers[i].SecurityContext = sc pod.Annotations = scAnnotations - errs = append(errs, provider.ValidateContainerSecurityContext(pod, &containerCopy, field.NewPath("spec", "containers").Index(i).Child("securityContext"))...) + errs = append(errs, provider.ValidateContainerSecurityContext(pod, &pod.Spec.Containers[i], field.NewPath("spec", "containers").Index(i).Child("securityContext"))...) } if len(errs) > 0 { - // ensure psc is not mutated if there are errors - pod.Spec.SecurityContext = originalPSC - pod.Annotations = originalAnnotations return errs } - - // if we've reached this code then we've generated and validated an SC for every container in the - // pod so let's apply what we generated. Note: the psc is already applied. - for i, sc := range generatedInitSCs { - pod.Spec.InitContainers[i].SecurityContext = sc - } - for i, sc := range generatedSCs { - pod.Spec.Containers[i].SecurityContext = sc - } return nil } // createProvidersFromPolicies creates providers from the constraints supplied. -func (c *podSecurityPolicyPlugin) createProvidersFromPolicies(psps []*extensions.PodSecurityPolicy, namespace string) ([]psp.Provider, []error) { +func (c *PodSecurityPolicyPlugin) createProvidersFromPolicies(psps []*extensions.PodSecurityPolicy, namespace string) ([]psp.Provider, []error) { var ( // collected providers providers []psp.Provider @@ -274,72 +345,36 @@ func (c *podSecurityPolicyPlugin) createProvidersFromPolicies(psps []*extensions return providers, errs } -// getMatchingPolicies returns policies from the lister. For now this returns everything -// in the future it can filter based on UserInfo and permissions. -// -// TODO: this will likely need optimization since the initial implementation will -// always query for authorization. Needs scale testing and possibly checking against -// a cache. -func getMatchingPolicies(lister extensionslisters.PodSecurityPolicyLister, user user.Info, sa user.Info, authz authorizer.Authorizer, namespace string) ([]*extensions.PodSecurityPolicy, error) { - matchedPolicies := make([]*extensions.PodSecurityPolicy, 0) - - list, err := lister.List(labels.Everything()) - if err != nil { - return nil, err - } - - for _, constraint := range list { - if authorizedForPolicy(user, namespace, constraint, authz) || authorizedForPolicy(sa, namespace, constraint, authz) { - matchedPolicies = append(matchedPolicies, constraint) - } - } - - return matchedPolicies, nil +func isAuthorizedForPolicy(user, sa user.Info, namespace, policyName string, authz authorizer.Authorizer) bool { + // Check the service account first, as that is the more common use case. + return authorizedForPolicy(sa, namespace, policyName, authz) || + authorizedForPolicy(user, namespace, policyName, authz) } // authorizedForPolicy returns true if info is authorized to perform the "use" verb on the policy resource. -func authorizedForPolicy(info user.Info, namespace string, policy *extensions.PodSecurityPolicy, authz authorizer.Authorizer) bool { +func authorizedForPolicy(info user.Info, namespace string, policyName string, authz authorizer.Authorizer) bool { if info == nil { return false } - attr := buildAttributes(info, namespace, policy) - allowed, reason, err := authz.Authorize(attr) + attr := buildAttributes(info, namespace, policyName) + decision, reason, err := authz.Authorize(attr) if err != nil { glog.V(5).Infof("cannot authorize for policy: %v,%v", reason, err) } - return allowed + return (decision == authorizer.DecisionAllow) } // buildAttributes builds an attributes record for a SAR based on the user info and policy. -func buildAttributes(info user.Info, namespace string, policy *extensions.PodSecurityPolicy) authorizer.Attributes { +func buildAttributes(info user.Info, namespace string, policyName string) authorizer.Attributes { // check against the namespace that the pod is being created in to allow per-namespace PSP grants. attr := authorizer.AttributesRecord{ User: info, Verb: "use", Namespace: namespace, - Name: policy.Name, + Name: policyName, APIGroup: extensions.GroupName, Resource: "podsecuritypolicies", ResourceRequest: true, } return attr } - -// logProviders logs what providers were found for the pod as well as any errors that were encountered -// while creating providers. -func logProviders(pod *api.Pod, providers []psp.Provider, providerCreationErrs []error) { - for _, err := range providerCreationErrs { - glog.V(4).Infof("provider creation error: %v", err) - } - - if len(providers) == 0 { - glog.V(4).Infof("unable to validate pod %s (generate: %s) against any provider.", pod.Name, pod.GenerateName) - return - } - - names := make([]string, len(providers)) - for i, p := range providers { - names[i] = p.GetPSPName() - } - glog.V(4).Infof("validating pod %s (generate: %s) against providers: %s", pod.Name, pod.GenerateName, strings.Join(names, ",")) -} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny/BUILD index 7d32bd9f8193..a90da5e8f7fd 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny/BUILD @@ -9,8 +9,9 @@ load( go_library( name = "go_default_library", srcs = ["admission.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", ], @@ -19,9 +20,10 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny/admission.go index 24e2ede3a6a9..cace3843dad6 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny/admission.go @@ -22,7 +22,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apiserver/pkg/admission" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // Register registers a plugin @@ -32,19 +32,22 @@ func Register(plugins *admission.Plugins) { }) } -type plugin struct { +// Plugin implements admission.Interface. +type Plugin struct { *admission.Handler } +var _ admission.ValidationInterface = &Plugin{} + // NewSecurityContextDeny creates a new instance of the SecurityContextDeny admission controller -func NewSecurityContextDeny() admission.Interface { - return &plugin{ +func NewSecurityContextDeny() *Plugin { + return &Plugin{ Handler: admission.NewHandler(admission.Create, admission.Update), } } -// Admit will deny any pod that defines SELinuxOptions or RunAsUser. -func (p *plugin) Admit(a admission.Attributes) (err error) { +// Validate will deny any pod that defines SELinuxOptions or RunAsUser. +func (p *Plugin) Validate(a admission.Attributes) (err error) { if a.GetSubresource() != "" || a.GetResource().GroupResource() != api.Resource("pods") { return nil } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/BUILD index 0d9bf3a764d8..aaeedacddc46 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/BUILD @@ -12,9 +12,10 @@ go_library( "admission.go", "doc.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount", deps = [ - "//pkg/api:go_default_library", "//pkg/api/pod:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/client/listers/core/internalversion:go_default_library", @@ -34,9 +35,10 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/client/listers/core/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission.go index 9e319d26d6c8..6e245ecf8b0b 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission.go @@ -30,8 +30,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api" podutil "k8s.io/kubernetes/pkg/api/pod" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" corelisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion" @@ -80,6 +80,8 @@ type serviceAccount struct { secretLister corelisters.SecretLister } +var _ admission.MutationInterface = &serviceAccount{} +var _ admission.ValidationInterface = &serviceAccount{} var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&serviceAccount{}) var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&serviceAccount{}) @@ -117,8 +119,8 @@ func (a *serviceAccount) SetInternalKubeInformerFactory(f informers.SharedInform }) } -// Validate ensures an authorizer is set. -func (a *serviceAccount) Validate() error { +// ValidateInitialization ensures an authorizer is set. +func (a *serviceAccount) ValidateInitialization() error { if a.client == nil { return fmt.Errorf("missing client") } @@ -132,18 +134,56 @@ func (a *serviceAccount) Validate() error { } func (s *serviceAccount) Admit(a admission.Attributes) (err error) { - if a.GetResource().GroupResource() != api.Resource("pods") { + if shouldIgnore(a) { return nil } - obj := a.GetObject() - if obj == nil { - return nil + updateInitialized, err := util.IsUpdatingInitializedObject(a) + if err != nil { + return err } - pod, ok := obj.(*api.Pod) - if !ok { + if updateInitialized { + // related pod spec fields are immutable after the pod is initialized return nil } + pod := a.GetObject().(*api.Pod) + + // Don't modify the spec of mirror pods. + // That makes the kubelet very angry and confused, and it immediately deletes the pod (because the spec doesn't match) + // That said, don't allow mirror pods to reference ServiceAccounts or SecretVolumeSources either + if _, isMirrorPod := pod.Annotations[api.MirrorPodAnnotationKey]; isMirrorPod { + return s.Validate(a) + } + + // Set the default service account if needed + if len(pod.Spec.ServiceAccountName) == 0 { + pod.Spec.ServiceAccountName = DefaultServiceAccountName + } + + serviceAccount, err := s.getServiceAccount(a.GetNamespace(), pod.Spec.ServiceAccountName) + if err != nil { + return admission.NewForbidden(a, fmt.Errorf("error looking up service account %s/%s: %v", a.GetNamespace(), pod.Spec.ServiceAccountName, err)) + } + if s.MountServiceAccountToken && shouldAutomount(serviceAccount, pod) { + if err := s.mountServiceAccountToken(serviceAccount, pod); err != nil { + if _, ok := err.(errors.APIStatus); ok { + return err + } + return admission.NewForbidden(a, err) + } + } + if len(pod.Spec.ImagePullSecrets) == 0 { + pod.Spec.ImagePullSecrets = make([]api.LocalObjectReference, len(serviceAccount.ImagePullSecrets)) + copy(pod.Spec.ImagePullSecrets, serviceAccount.ImagePullSecrets) + } + + return s.Validate(a) +} + +func (s *serviceAccount) Validate(a admission.Attributes) (err error) { + if shouldIgnore(a) { + return nil + } updateInitialized, err := util.IsUpdatingInitializedObject(a) if err != nil { return err @@ -153,9 +193,9 @@ func (s *serviceAccount) Admit(a admission.Attributes) (err error) { return nil } - // Don't modify the spec of mirror pods. - // That makes the kubelet very angry and confused, and it immediately deletes the pod (because the spec doesn't match) - // That said, don't allow mirror pods to reference ServiceAccounts or SecretVolumeSources either + pod := a.GetObject().(*api.Pod) + + // Mirror pods have restrictions on what they can reference if _, isMirrorPod := pod.Annotations[api.MirrorPodAnnotationKey]; isMirrorPod { if len(pod.Spec.ServiceAccountName) != 0 { return admission.NewForbidden(a, fmt.Errorf("a mirror pod may not reference service accounts")) @@ -171,20 +211,11 @@ func (s *serviceAccount) Admit(a admission.Attributes) (err error) { return nil } - // Set the default service account if needed - if len(pod.Spec.ServiceAccountName) == 0 { - pod.Spec.ServiceAccountName = DefaultServiceAccountName - } - // Ensure the referenced service account exists serviceAccount, err := s.getServiceAccount(a.GetNamespace(), pod.Spec.ServiceAccountName) if err != nil { return admission.NewForbidden(a, fmt.Errorf("error looking up service account %s/%s: %v", a.GetNamespace(), pod.Spec.ServiceAccountName, err)) } - if serviceAccount == nil { - // TODO: convert to a ServerTimeout error (or other error that sends a Retry-After header) - return admission.NewForbidden(a, fmt.Errorf("service account %s/%s was not found, retry after the service account is created", a.GetNamespace(), pod.Spec.ServiceAccountName)) - } if s.enforceMountableSecrets(serviceAccount) { if err := s.limitSecretReferences(serviceAccount, pod); err != nil { @@ -192,21 +223,23 @@ func (s *serviceAccount) Admit(a admission.Attributes) (err error) { } } - if s.MountServiceAccountToken && shouldAutomount(serviceAccount, pod) { - if err := s.mountServiceAccountToken(serviceAccount, pod); err != nil { - if _, ok := err.(errors.APIStatus); ok { - return err - } - return admission.NewForbidden(a, err) - } - } + return nil +} - if len(pod.Spec.ImagePullSecrets) == 0 { - pod.Spec.ImagePullSecrets = make([]api.LocalObjectReference, len(serviceAccount.ImagePullSecrets)) - copy(pod.Spec.ImagePullSecrets, serviceAccount.ImagePullSecrets) +func shouldIgnore(a admission.Attributes) bool { + if a.GetResource().GroupResource() != api.Resource("pods") { + return true + } + obj := a.GetObject() + if obj == nil { + return true + } + _, ok := obj.(*api.Pod) + if !ok { + return true } - return nil + return false } func shouldAutomount(sa *api.ServiceAccount, pod *api.Pod) bool { @@ -267,7 +300,7 @@ func (s *serviceAccount) getServiceAccount(namespace string, name string) (*api. } } - return nil, nil + return nil, errors.NewNotFound(api.Resource("serviceaccount"), name) } // getReferencedServiceAccountToken returns the name of the first referenced secret which is a ServiceAccountToken for the service account diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault/BUILD index 5726f14e6357..27b114d0643a 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault/BUILD @@ -9,9 +9,10 @@ load( go_library( name = "go_default_library", srcs = ["admission.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/helper:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/helper:go_default_library", "//pkg/apis/storage:go_default_library", "//pkg/apis/storage/util:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", @@ -27,9 +28,10 @@ go_library( go_test( name = "go_default_test", srcs = ["admission_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/storage:go_default_library", "//pkg/apis/storage/util:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault/admission.go index 2e218bb0eda9..6a9fe197a082 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault/admission.go @@ -25,8 +25,8 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" admission "k8s.io/apiserver/pkg/admission" - api "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/apis/storage" storageutil "k8s.io/kubernetes/pkg/apis/storage/util" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" @@ -54,6 +54,7 @@ type claimDefaulterPlugin struct { } var _ admission.Interface = &claimDefaulterPlugin{} +var _ admission.MutationInterface = &claimDefaulterPlugin{} var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&claimDefaulterPlugin{}) // newPlugin creates a new admission plugin. @@ -69,8 +70,8 @@ func (a *claimDefaulterPlugin) SetInternalKubeInformerFactory(f informers.Shared a.SetReadyFunc(informer.Informer().HasSynced) } -// Validate ensures lister is set. -func (a *claimDefaulterPlugin) Validate() error { +// ValidateInitialization ensures lister is set. +func (a *claimDefaulterPlugin) ValidateInitialization() error { if a.lister == nil { return fmt.Errorf("missing lister") } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/webhook/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/webhook/BUILD deleted file mode 100644 index 0221e3ffbe38..000000000000 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/webhook/BUILD +++ /dev/null @@ -1,69 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = [ - "admission_test.go", - "certs_test.go", - "rules_test.go", - ], - library = ":go_default_library", - deps = [ - "//pkg/api:go_default_library", - "//pkg/apis/admission/install:go_default_library", - "//vendor/k8s.io/api/admission/v1alpha1:go_default_library", - "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - ], -) - -go_library( - name = "go_default_library", - srcs = [ - "admission.go", - "doc.go", - "rules.go", - ], - deps = [ - "//pkg/api:go_default_library", - "//pkg/apis/admission/install:go_default_library", - "//pkg/apis/admission/v1alpha1:go_default_library", - "//pkg/kubeapiserver/admission:go_default_library", - "//pkg/kubeapiserver/admission/configuration:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/admission/v1alpha1:go_default_library", - "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/webhook/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/webhook/admission.go deleted file mode 100644 index fa208cdb78b8..000000000000 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/webhook/admission.go +++ /dev/null @@ -1,275 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package webhook delegates admission checks to dynamically configured webhooks. -package webhook - -import ( - "context" - "fmt" - "io" - "net" - "net/http" - "sync" - "time" - - "github.com/golang/glog" - - admissionv1alpha1 "k8s.io/api/admission/v1alpha1" - "k8s.io/api/admissionregistration/v1alpha1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apiserver/pkg/admission" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/api" - admissionv1alpha1helper "k8s.io/kubernetes/pkg/apis/admission/v1alpha1" - admissioninit "k8s.io/kubernetes/pkg/kubeapiserver/admission" - "k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration" - - // install the clientgo admission API for use with api registry - _ "k8s.io/kubernetes/pkg/apis/admission/install" -) - -var ( - groupVersions = []schema.GroupVersion{ - admissionv1alpha1.SchemeGroupVersion, - } -) - -type ErrCallingWebhook struct { - WebhookName string - Reason error -} - -func (e *ErrCallingWebhook) Error() string { - if e.Reason != nil { - return fmt.Sprintf("failed calling admission webhook %q: %v", e.WebhookName, e.Reason) - } - return fmt.Sprintf("failed calling admission webhook %q; no further details available", e.WebhookName) -} - -// Register registers a plugin -func Register(plugins *admission.Plugins) { - plugins.Register("GenericAdmissionWebhook", func(configFile io.Reader) (admission.Interface, error) { - plugin, err := NewGenericAdmissionWebhook() - if err != nil { - return nil, err - } - - return plugin, nil - }) -} - -// WebhookSource can list dynamic webhook plugins. -type WebhookSource interface { - Run(stopCh <-chan struct{}) - ExternalAdmissionHooks() (*v1alpha1.ExternalAdmissionHookConfiguration, error) -} - -// NewGenericAdmissionWebhook returns a generic admission webhook plugin. -func NewGenericAdmissionWebhook() (*GenericAdmissionWebhook, error) { - return &GenericAdmissionWebhook{ - Handler: admission.NewHandler( - admission.Connect, - admission.Create, - admission.Delete, - admission.Update, - ), - negotiatedSerializer: serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{ - Serializer: api.Codecs.LegacyCodec(admissionv1alpha1.SchemeGroupVersion), - }), - }, nil -} - -// GenericAdmissionWebhook is an implementation of admission.Interface. -type GenericAdmissionWebhook struct { - *admission.Handler - hookSource WebhookSource - serviceResolver admissioninit.ServiceResolver - negotiatedSerializer runtime.NegotiatedSerializer - clientCert []byte - clientKey []byte - proxyTransport *http.Transport -} - -var ( - _ = admissioninit.WantsServiceResolver(&GenericAdmissionWebhook{}) - _ = admissioninit.WantsClientCert(&GenericAdmissionWebhook{}) - _ = admissioninit.WantsExternalKubeClientSet(&GenericAdmissionWebhook{}) -) - -func (a *GenericAdmissionWebhook) SetProxyTransport(pt *http.Transport) { - a.proxyTransport = pt -} - -func (a *GenericAdmissionWebhook) SetServiceResolver(sr admissioninit.ServiceResolver) { - a.serviceResolver = sr -} - -func (a *GenericAdmissionWebhook) SetClientCert(cert, key []byte) { - a.clientCert = cert - a.clientKey = key -} - -func (a *GenericAdmissionWebhook) SetExternalKubeClientSet(client clientset.Interface) { - a.hookSource = configuration.NewExternalAdmissionHookConfigurationManager(client.Admissionregistration().ExternalAdmissionHookConfigurations()) -} - -func (a *GenericAdmissionWebhook) Validate() error { - if a.hookSource == nil { - return fmt.Errorf("the GenericAdmissionWebhook admission plugin requires a Kubernetes client to be provided") - } - go a.hookSource.Run(wait.NeverStop) - return nil -} - -func (a *GenericAdmissionWebhook) loadConfiguration(attr admission.Attributes) (*v1alpha1.ExternalAdmissionHookConfiguration, error) { - hookConfig, err := a.hookSource.ExternalAdmissionHooks() - // if ExternalAdmissionHook configuration is disabled, fail open - if err == configuration.ErrDisabled { - return &v1alpha1.ExternalAdmissionHookConfiguration{}, nil - } - if err != nil { - e := apierrors.NewServerTimeout(attr.GetResource().GroupResource(), string(attr.GetOperation()), 1) - e.ErrStatus.Message = fmt.Sprintf("Unable to refresh the ExternalAdmissionHook configuration: %v", err) - e.ErrStatus.Reason = "LoadingConfiguration" - e.ErrStatus.Details.Causes = append(e.ErrStatus.Details.Causes, metav1.StatusCause{ - Type: "ExternalAdmissionHookConfigurationFailure", - Message: "An error has occurred while refreshing the externalAdmissionHook configuration, no resources can be created/updated/deleted/connected until a refresh succeeds.", - }) - return nil, e - } - return hookConfig, nil -} - -// Admit makes an admission decision based on the request attributes. -func (a *GenericAdmissionWebhook) Admit(attr admission.Attributes) error { - hookConfig, err := a.loadConfiguration(attr) - if err != nil { - return err - } - hooks := hookConfig.ExternalAdmissionHooks - ctx := context.TODO() - - errCh := make(chan error, len(hooks)) - wg := sync.WaitGroup{} - wg.Add(len(hooks)) - for i := range hooks { - go func(hook *v1alpha1.ExternalAdmissionHook) { - defer wg.Done() - if err := a.callHook(ctx, hook, attr); err == nil { - return - } else if callErr, ok := err.(*ErrCallingWebhook); ok { - glog.Warningf("Failed calling webhook %v: %v", hook.Name, callErr) - utilruntime.HandleError(callErr) - // Since we are failing open to begin with, we do not send an error down the channel - } else { - glog.Warningf("rejected by webhook %v %t: %v", hook.Name, err, err) - errCh <- err - } - }(&hooks[i]) - } - wg.Wait() - close(errCh) - - var errs []error - for e := range errCh { - errs = append(errs, e) - } - if len(errs) == 0 { - return nil - } - if len(errs) > 1 { - for i := 1; i < len(errs); i++ { - // TODO: merge status errors; until then, just return the first one. - utilruntime.HandleError(errs[i]) - } - } - return errs[0] -} - -func (a *GenericAdmissionWebhook) callHook(ctx context.Context, h *v1alpha1.ExternalAdmissionHook, attr admission.Attributes) error { - matches := false - for _, r := range h.Rules { - m := RuleMatcher{Rule: r, Attr: attr} - if m.Matches() { - matches = true - break - } - } - if !matches { - return nil - } - - // Make the webhook request - request := admissionv1alpha1helper.NewAdmissionReview(attr) - client, err := a.hookClient(h) - if err != nil { - return &ErrCallingWebhook{WebhookName: h.Name, Reason: err} - } - if err := client.Post().Context(ctx).Body(&request).Do().Into(&request); err != nil { - return &ErrCallingWebhook{WebhookName: h.Name, Reason: err} - } - - if request.Status.Allowed { - return nil - } - - if request.Status.Result == nil { - return fmt.Errorf("admission webhook %q denied the request without explanation", h.Name) - } - - return &apierrors.StatusError{ - ErrStatus: *request.Status.Result, - } -} - -func (a *GenericAdmissionWebhook) hookClient(h *v1alpha1.ExternalAdmissionHook) (*rest.RESTClient, error) { - u, err := a.serviceResolver.ResolveEndpoint(h.ClientConfig.Service.Namespace, h.ClientConfig.Service.Name) - if err != nil { - return nil, err - } - - var dial func(network, addr string) (net.Conn, error) - if a.proxyTransport != nil && a.proxyTransport.Dial != nil { - dial = a.proxyTransport.Dial - } - - // TODO: cache these instead of constructing one each time - cfg := &rest.Config{ - Host: u.Host, - APIPath: u.Path, - TLSClientConfig: rest.TLSClientConfig{ - ServerName: h.ClientConfig.Service.Name + "." + h.ClientConfig.Service.Namespace + ".svc", - CAData: h.ClientConfig.CABundle, - CertData: a.clientCert, - KeyData: a.clientKey, - }, - UserAgent: "kube-apiserver-admission", - Timeout: 30 * time.Second, - ContentConfig: rest.ContentConfig{ - NegotiatedSerializer: a.negotiatedSerializer, - }, - Dial: dial, - } - return rest.UnversionedRESTClientFor(cfg) -} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/webhook/doc.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/webhook/doc.go deleted file mode 100644 index ae654cd31bbf..000000000000 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/webhook/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package webhook checks a webhook for configured operation admission -package webhook diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/webhook/gencerts.sh b/vendor/k8s.io/kubernetes/plugin/pkg/admission/webhook/gencerts.sh deleted file mode 100755 index 3ee1a094fcd3..000000000000 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/webhook/gencerts.sh +++ /dev/null @@ -1,107 +0,0 @@ -#!/bin/bash - -# Copyright 2017 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e - -# gencerts.sh generates the certificates for the generic webhook admission plugin tests. -# -# It is not expected to be run often (there is no go generate rule), and mainly -# exists for documentation purposes. - -CN_BASE="generic_webhook_admission_plugin_tests" - -cat > server.conf << EOF -[req] -req_extensions = v3_req -distinguished_name = req_distinguished_name -[req_distinguished_name] -[ v3_req ] -basicConstraints = CA:FALSE -keyUsage = nonRepudiation, digitalSignature, keyEncipherment -extendedKeyUsage = clientAuth, serverAuth -subjectAltName = @alt_names -[alt_names] -IP.1 = 127.0.0.1 -EOF - -cat > client.conf << EOF -[req] -req_extensions = v3_req -distinguished_name = req_distinguished_name -[req_distinguished_name] -[ v3_req ] -basicConstraints = CA:FALSE -keyUsage = nonRepudiation, digitalSignature, keyEncipherment -extendedKeyUsage = clientAuth, serverAuth -subjectAltName = @alt_names -[alt_names] -IP.1 = 127.0.0.1 -EOF - -# Create a certificate authority -openssl genrsa -out caKey.pem 2048 -openssl req -x509 -new -nodes -key caKey.pem -days 100000 -out caCert.pem -subj "/CN=${CN_BASE}_ca" - -# Create a second certificate authority -openssl genrsa -out badCAKey.pem 2048 -openssl req -x509 -new -nodes -key badCAKey.pem -days 100000 -out badCACert.pem -subj "/CN=${CN_BASE}_ca" - -# Create a server certiticate -openssl genrsa -out serverKey.pem 2048 -openssl req -new -key serverKey.pem -out server.csr -subj "/CN=webhook-test.default.svc" -config server.conf -openssl x509 -req -in server.csr -CA caCert.pem -CAkey caKey.pem -CAcreateserial -out serverCert.pem -days 100000 -extensions v3_req -extfile server.conf - -# Create a client certiticate -openssl genrsa -out clientKey.pem 2048 -openssl req -new -key clientKey.pem -out client.csr -subj "/CN=${CN_BASE}_client" -config client.conf -openssl x509 -req -in client.csr -CA caCert.pem -CAkey caKey.pem -CAcreateserial -out clientCert.pem -days 100000 -extensions v3_req -extfile client.conf - -outfile=certs_test.go - -cat > $outfile << EOF -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -EOF - -echo "// This file was generated using openssl by the gencerts.sh script" >> $outfile -echo "// and holds raw certificates for the webhook tests." >> $outfile -echo "" >> $outfile -echo "package webhook" >> $outfile -for file in caKey caCert badCAKey badCACert serverKey serverCert clientKey clientCert; do - data=$(cat ${file}.pem) - echo "" >> $outfile - echo "var $file = []byte(\`$data\`)" >> $outfile -done - -# Clean up after we're done. -rm *.pem -rm *.csr -rm *.srl -rm *.conf diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap/BUILD index ff951252f089..b6f0426ab130 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap/BUILD @@ -9,9 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["bootstrap_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/bootstrap/api:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -24,8 +25,9 @@ go_test( go_library( name = "go_default_library", srcs = ["bootstrap.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/bootstrap/api:go_default_library", "//pkg/client/listers/core/internalversion:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap/bootstrap.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap/bootstrap.go index fecea0f6855f..90a0137489db 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap/bootstrap.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap/bootstrap.go @@ -31,7 +31,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/authentication/user" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api" "k8s.io/kubernetes/pkg/client/listers/core/internalversion" ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/BUILD index 926c876ee3b7..46bb915ba8a4 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/BUILD @@ -9,9 +9,10 @@ load( go_test( name = "go_default_test", srcs = ["node_authorizer_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/auth/authorizer/node", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/auth/nodeidentifier:go_default_library", "//plugin/pkg/auth/authorizer/rbac/bootstrappolicy:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -27,13 +28,15 @@ go_library( "graph_populator.go", "node_authorizer.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/auth/authorizer/node", deps = [ - "//pkg/api:go_default_library", "//pkg/api/persistentvolume:go_default_library", "//pkg/api/pod:go_default_library", + "//pkg/apis/core:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/auth/nodeidentifier:go_default_library", "//pkg/client/informers/informers_generated/internalversion/core/internalversion:go_default_library", + "//pkg/features:go_default_library", "//plugin/pkg/auth/authorizer/rbac:go_default_library", "//third_party/forked/gonum/graph:go_default_library", "//third_party/forked/gonum/graph/simple:go_default_library", @@ -41,6 +44,7 @@ go_library( "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph.go index cdcd0201f8b1..729cad2ea772 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph.go @@ -19,9 +19,9 @@ package node import ( "sync" - "k8s.io/kubernetes/pkg/api" pvutil "k8s.io/kubernetes/pkg/api/persistentvolume" podutil "k8s.io/kubernetes/pkg/api/pod" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/third_party/forked/gonum/graph" "k8s.io/kubernetes/third_party/forked/gonum/graph/simple" ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph_populator.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph_populator.go index ecea060d6ecc..64ea648227cf 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph_populator.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph_populator.go @@ -20,7 +20,7 @@ import ( "github.com/golang/glog" "k8s.io/client-go/tools/cache" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion" ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/node_authorizer.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/node_authorizer.go index 46a24eba1e04..bf728ccc5e63 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/node_authorizer.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/node_authorizer.go @@ -23,17 +23,19 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/authorization/authorizer" - "k8s.io/kubernetes/pkg/api" + utilfeature "k8s.io/apiserver/pkg/util/feature" + api "k8s.io/kubernetes/pkg/apis/core" rbacapi "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/auth/nodeidentifier" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac" "k8s.io/kubernetes/third_party/forked/gonum/graph" "k8s.io/kubernetes/third_party/forked/gonum/graph/traverse" ) // NodeAuthorizer authorizes requests from kubelets, with the following logic: -// 1. If a request is not from a node (IdentifyNode() returns isNode=false), reject -// 2. If a specific node cannot be identified (IdentifyNode() returns nodeName=""), reject +// 1. If a request is not from a node (NodeIdentity() returns isNode=false), reject +// 2. If a specific node cannot be identified (NodeIdentity() returns nodeName=""), reject // 3. If a request is for a secret, configmap, persistent volume or persistent volume claim, reject unless the verb is get, and the requested object is related to the requesting node: // node <- pod // node <- pod <- secret @@ -64,16 +66,16 @@ var ( pvResource = api.Resource("persistentvolumes") ) -func (r *NodeAuthorizer) Authorize(attrs authorizer.Attributes) (bool, string, error) { +func (r *NodeAuthorizer) Authorize(attrs authorizer.Attributes) (authorizer.Decision, string, error) { nodeName, isNode := r.identifier.NodeIdentity(attrs.GetUser()) if !isNode { // reject requests from non-nodes - return false, "", nil + return authorizer.DecisionNoOpinion, "", nil } if len(nodeName) == 0 { // reject requests from unidentifiable nodes glog.V(2).Infof("NODE DENY: unknown node for user %q", attrs.GetUser().GetName()) - return false, fmt.Sprintf("unknown node for user %q", attrs.GetUser().GetName()), nil + return authorizer.DecisionNoOpinion, fmt.Sprintf("unknown node for user %q", attrs.GetUser().GetName()), nil } // subdivide access to specific resources @@ -85,6 +87,11 @@ func (r *NodeAuthorizer) Authorize(attrs authorizer.Attributes) (bool, string, e case configMapResource: return r.authorizeGet(nodeName, configMapVertexType, attrs) case pvcResource: + if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) { + if attrs.GetSubresource() == "status" { + return r.authorizeStatusUpdate(nodeName, pvcVertexType, attrs) + } + } return r.authorizeGet(nodeName, pvcVertexType, attrs) case pvResource: return r.authorizeGet(nodeName, pvVertexType, attrs) @@ -92,31 +99,59 @@ func (r *NodeAuthorizer) Authorize(attrs authorizer.Attributes) (bool, string, e } // Access to other resources is not subdivided, so just evaluate against the statically defined node rules - return rbac.RulesAllow(attrs, r.nodeRules...), "", nil + if rbac.RulesAllow(attrs, r.nodeRules...) { + return authorizer.DecisionAllow, "", nil + } + return authorizer.DecisionNoOpinion, "", nil } -// authorizeGet authorizes "get" requests to objects of the specified type if they are related to the specified node -func (r *NodeAuthorizer) authorizeGet(nodeName string, startingType vertexType, attrs authorizer.Attributes) (bool, string, error) { - if attrs.GetVerb() != "get" || len(attrs.GetName()) == 0 { +// authorizeStatusUpdate authorizes get/update/patch requests to status subresources of the specified type if they are related to the specified node +func (r *NodeAuthorizer) authorizeStatusUpdate(nodeName string, startingType vertexType, attrs authorizer.Attributes) (authorizer.Decision, string, error) { + switch attrs.GetVerb() { + case "update", "patch": + // ok + default: glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) - return false, "can only get individual resources of this type", nil + return authorizer.DecisionNoOpinion, "can only get/update/patch this type", nil } + if attrs.GetSubresource() != "status" { + glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + return authorizer.DecisionNoOpinion, "can only update status subresource", nil + } + + return r.authorize(nodeName, startingType, attrs) +} + +// authorizeGet authorizes "get" requests to objects of the specified type if they are related to the specified node +func (r *NodeAuthorizer) authorizeGet(nodeName string, startingType vertexType, attrs authorizer.Attributes) (authorizer.Decision, string, error) { + if attrs.GetVerb() != "get" { + glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + return authorizer.DecisionNoOpinion, "can only get individual resources of this type", nil + } if len(attrs.GetSubresource()) > 0 { glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) - return false, "cannot get subresource", nil + return authorizer.DecisionNoOpinion, "cannot get subresource", nil + } + return r.authorize(nodeName, startingType, attrs) +} + +func (r *NodeAuthorizer) authorize(nodeName string, startingType vertexType, attrs authorizer.Attributes) (authorizer.Decision, string, error) { + if len(attrs.GetName()) == 0 { + glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + return authorizer.DecisionNoOpinion, "No Object name found", nil } ok, err := r.hasPathFrom(nodeName, startingType, attrs.GetNamespace(), attrs.GetName()) if err != nil { glog.V(2).Infof("NODE DENY: %v", err) - return false, "no path found to object", nil + return authorizer.DecisionNoOpinion, "no path found to object", nil } if !ok { glog.V(2).Infof("NODE DENY: %q %#v", nodeName, attrs) - return false, "no path found to object", nil + return authorizer.DecisionNoOpinion, "no path found to object", nil } - return ok, "", nil + return authorizer.DecisionAllow, "", nil } // hasPathFrom returns true if there is a directed path from the specified type/namespace/name to the specified Node diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/BUILD index 4325ab9c12b5..73dff4829009 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/BUILD @@ -12,6 +12,7 @@ go_library( "rbac.go", "subject_locator.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac", deps = [ "//pkg/apis/rbac:go_default_library", "//pkg/client/listers/rbac/internalversion:go_default_library", @@ -30,6 +31,7 @@ go_test( "rbac_test.go", "subject_locator_test.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac", library = ":go_default_library", deps = [ "//pkg/apis/rbac:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD index 501feb306622..e503bb50c65e 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD @@ -13,6 +13,7 @@ go_library( "namespace_policy.go", "policy.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy", deps = [ "//pkg/apis/rbac:go_default_library", "//pkg/features:go_default_library", @@ -31,10 +32,12 @@ go_test( data = glob([ "testdata/*", ]), + importpath = "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy_test", deps = [ ":go_default_library", - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/install:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/apis/rbac/install:go_default_library", "//pkg/registry/rbac/validation:go_default_library", @@ -52,6 +55,7 @@ go_test( name = "go_default_test", srcs = ["controller_policy_test.go"], data = glob(["testdata/**"]), + importpath = "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy", library = ":go_default_library", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go index f81abc720c2b..c94c02953341 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go @@ -58,14 +58,31 @@ func buildControllerRoles() ([]rbac.ClusterRole, []rbac.ClusterRoleBinding) { // controllerRoleBindings is a slice of roles used for controllers controllerRoleBindings := []rbac.ClusterRoleBinding{} + addControllerRole(&controllerRoles, &controllerRoleBindings, func() rbac.ClusterRole { + role := rbac.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "attachdetach-controller"}, + Rules: []rbac.PolicyRule{ + rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("persistentvolumes", "persistentvolumeclaims").RuleOrDie(), + rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + rbac.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(), + rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(), + eventsRule(), + }, + } + + if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) { + role.Rules = append(role.Rules, rbac.NewRule("get", "create", "delete", "list", "watch").Groups(storageGroup).Resources("volumeattachments").RuleOrDie()) + } + + return role + }()) + addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "attachdetach-controller"}, + ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "clusterrole-aggregation-controller"}, Rules: []rbac.PolicyRule{ - rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("persistentvolumes", "persistentvolumeclaims").RuleOrDie(), - rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), - rbac.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(), - rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(), - eventsRule(), + // this controller must have full permissions to allow it to mutate any role in any way + rbac.NewRule("*").Groups("*").Resources("*").RuleOrDie(), + rbac.NewRule("*").URLs("*").RuleOrDie(), }, }) addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ @@ -98,7 +115,7 @@ func buildControllerRoles() ([]rbac.ClusterRole, []rbac.ClusterRoleBinding) { rbac.NewRule("get", "list", "watch", "update").Groups(extensionsGroup, appsGroup).Resources("deployments").RuleOrDie(), rbac.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("deployments/status").RuleOrDie(), rbac.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("deployments/finalizers").RuleOrDie(), - rbac.NewRule("get", "list", "watch", "create", "update", "patch", "delete").Groups(extensionsGroup).Resources("replicasets").RuleOrDie(), + rbac.NewRule("get", "list", "watch", "create", "update", "patch", "delete").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(), // TODO: remove "update" once // https://github.com/kubernetes/kubernetes/issues/36897 is resolved. rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("pods").RuleOrDie(), @@ -109,7 +126,7 @@ func buildControllerRoles() ([]rbac.ClusterRole, []rbac.ClusterRoleBinding) { ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "disruption-controller"}, Rules: []rbac.PolicyRule{ rbac.NewRule("get", "list", "watch").Groups(extensionsGroup, appsGroup).Resources("deployments").RuleOrDie(), - rbac.NewRule("get", "list", "watch").Groups(extensionsGroup).Resources("replicasets").RuleOrDie(), + rbac.NewRule("get", "list", "watch").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(), rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("replicationcontrollers").RuleOrDie(), rbac.NewRule("get", "list", "watch").Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), rbac.NewRule("get", "list", "watch").Groups(appsGroup).Resources("statefulsets").RuleOrDie(), @@ -156,16 +173,13 @@ func buildControllerRoles() ([]rbac.ClusterRole, []rbac.ClusterRoleBinding) { Rules: []rbac.PolicyRule{ rbac.NewRule("get", "list", "watch").Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), rbac.NewRule("update").Groups(autoscalingGroup).Resources("horizontalpodautoscalers/status").RuleOrDie(), - rbac.NewRule("get", "update").Groups(legacyGroup).Resources("replicationcontrollers/scale").RuleOrDie(), - // TODO this should be removable when the HPA contoller is fixed - rbac.NewRule("get", "update").Groups(extensionsGroup).Resources("replicationcontrollers/scale").RuleOrDie(), - rbac.NewRule("get", "update").Groups(extensionsGroup, appsGroup).Resources("deployments/scale", "replicasets/scale").RuleOrDie(), + rbac.NewRule("get", "update").Groups("*").Resources("*/scale").RuleOrDie(), rbac.NewRule("list").Groups(legacyGroup).Resources("pods").RuleOrDie(), // TODO: restrict this to the appropriate namespace rbac.NewRule("get").Groups(legacyGroup).Resources("services/proxy").Names("https:heapster:", "http:heapster:").RuleOrDie(), // allow listing resource metrics and custom metrics rbac.NewRule("list").Groups(resMetricsGroup).Resources("pods").RuleOrDie(), - rbac.NewRule("list").Groups(customMetricsGroup).Resources("*").RuleOrDie(), + rbac.NewRule("get", "list").Groups(customMetricsGroup).Resources("*").RuleOrDie(), eventsRule(), }, }) @@ -230,9 +244,9 @@ func buildControllerRoles() ([]rbac.ClusterRole, []rbac.ClusterRoleBinding) { addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "replicaset-controller"}, Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch", "update").Groups(extensionsGroup).Resources("replicasets").RuleOrDie(), - rbac.NewRule("update").Groups(extensionsGroup).Resources("replicasets/status").RuleOrDie(), - rbac.NewRule("update").Groups(extensionsGroup).Resources("replicasets/finalizers").RuleOrDie(), + rbac.NewRule("get", "list", "watch", "update").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(), + rbac.NewRule("update").Groups(appsGroup, extensionsGroup).Resources("replicasets/status").RuleOrDie(), + rbac.NewRule("update").Groups(appsGroup, extensionsGroup).Resources("replicasets/finalizers").RuleOrDie(), rbac.NewRule("list", "watch", "patch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), eventsRule(), }, @@ -310,6 +324,16 @@ func buildControllerRoles() ([]rbac.ClusterRole, []rbac.ClusterRoleBinding) { eventsRule(), }, }) + if utilfeature.DefaultFeatureGate.Enabled(features.PVCProtection) { + addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pvc-protection-controller"}, + Rules: []rbac.PolicyRule{ + rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(), + rbac.NewRule("list", "watch", "get").Groups(legacyGroup).Resources("pods").RuleOrDie(), + eventsRule(), + }, + }) + } return controllerRoles, controllerRoleBindings } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go index 5510701bf17f..b54d7c6dfdc9 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go @@ -91,7 +91,7 @@ func addClusterRoleBindingLabel(rolebindings []rbac.ClusterRoleBinding) { } func NodeRules() []rbac.PolicyRule { - return []rbac.PolicyRule{ + nodePolicyRules := []rbac.PolicyRule{ // Needed to check API access. These creates are non-mutating rbac.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(), rbac.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews", "localsubjectaccessreviews").RuleOrDie(), @@ -123,11 +123,12 @@ func NodeRules() []rbac.PolicyRule { // Needed for imagepullsecrets, rbd/ceph and secret volumes, and secrets in envs // Needed for configmap volume and envs - // Use the NodeRestriction admission plugin to limit a node to get secrets/configmaps referenced by pods bound to itself. + // Use the Node authorization mode to limit a node to get secrets/configmaps referenced by pods bound to itself. rbac.NewRule("get").Groups(legacyGroup).Resources("secrets", "configmaps").RuleOrDie(), // Needed for persistent volumes - // Use the NodeRestriction admission plugin to limit a node to get pv/pvc objects referenced by pods bound to itself. + // Use the Node authorization mode to limit a node to get pv/pvc objects referenced by pods bound to itself. rbac.NewRule("get").Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(), + // TODO: add to the Node authorizer and restrict to endpoints referenced by pods or PVs bound to the node // Needed for glusterfs volumes rbac.NewRule("get").Groups(legacyGroup).Resources("endpoints").RuleOrDie(), @@ -135,6 +136,20 @@ func NodeRules() []rbac.PolicyRule { // for it to be signed. This allows the kubelet to rotate it's own certificate. rbac.NewRule("create", "get", "list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(), } + + if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) { + // Use the Node authorization mode to limit a node to update status of pvc objects referenced by pods bound to itself. + // Use the NodeRestriction admission plugin to limit a node to just update the status stanza. + pvcStatusPolicyRule := rbac.NewRule("get", "update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie() + nodePolicyRules = append(nodePolicyRules, pvcStatusPolicyRule) + } + + // CSI + if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) { + volAttachRule := rbac.NewRule("get").Groups(storageGroup).Resources("volumeattachments").RuleOrDie() + nodePolicyRules = append(nodePolicyRules, volAttachRule) + } + return nodePolicyRules } // ClusterRoles returns the cluster roles to bootstrap an API server with @@ -152,7 +167,16 @@ func ClusterRoles() []rbac.ClusterRole { // a role which provides just enough power to determine if the server is ready and discover API versions for negotiation ObjectMeta: metav1.ObjectMeta{Name: "system:discovery"}, Rules: []rbac.PolicyRule{ - rbac.NewRule("get").URLs("/healthz", "/version", "/swaggerapi", "/swaggerapi/*", "/api", "/api/*", "/apis", "/apis/*").RuleOrDie(), + rbac.NewRule("get").URLs( + "/healthz", "/version", + // remove once swagger 1.2 support is removed + "/swaggerapi", "/swaggerapi/*", + // do not expand this pattern for openapi discovery docs + // move to a single openapi endpoint that takes accept/accept-encoding headers + "/swagger.json", "/swagger-2.0.0.pb-v1", + "/api", "/api/*", + "/apis", "/apis/*", + ).RuleOrDie(), }, }, { @@ -160,13 +184,37 @@ func ClusterRoles() []rbac.ClusterRole { ObjectMeta: metav1.ObjectMeta{Name: "system:basic-user"}, Rules: []rbac.PolicyRule{ // TODO add future selfsubjectrulesreview, project request APIs, project listing APIs - rbac.NewRule("create").Groups(authorizationGroup).Resources("selfsubjectaccessreviews").RuleOrDie(), + rbac.NewRule("create").Groups(authorizationGroup).Resources("selfsubjectaccessreviews", "selfsubjectrulesreviews").RuleOrDie(), }, }, { // a role for a namespace level admin. It is `edit` plus the power to grant permissions to other users. ObjectMeta: metav1.ObjectMeta{Name: "admin"}, + AggregationRule: &rbac.AggregationRule{ + ClusterRoleSelectors: []metav1.LabelSelector{{MatchLabels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-admin": "true"}}}, + }, + }, + { + // a role for a namespace level editor. It grants access to all user level actions in a namespace. + // It does not grant powers for "privileged" resources which are domain of the system: `/status` + // subresources or `quota`/`limits` which are used to control namespaces + ObjectMeta: metav1.ObjectMeta{Name: "edit"}, + AggregationRule: &rbac.AggregationRule{ + ClusterRoleSelectors: []metav1.LabelSelector{{MatchLabels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-edit": "true"}}}, + }, + }, + { + // a role for namespace level viewing. It grants Read-only access to non-escalating resources in + // a namespace. + ObjectMeta: metav1.ObjectMeta{Name: "view"}, + AggregationRule: &rbac.AggregationRule{ + ClusterRoleSelectors: []metav1.LabelSelector{{MatchLabels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-view": "true"}}}, + }, + }, + { + // a role for a namespace level admin. It is `edit` plus the power to grant permissions to other users. + ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-admin", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-admin": "true"}}, Rules: []rbac.PolicyRule{ rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(), rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", @@ -179,7 +227,9 @@ func ClusterRoles() []rbac.ClusterRole { rbac.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(), rbac.NewRule(ReadWrite...).Groups(appsGroup).Resources("statefulsets", - "deployments", "deployments/scale", "deployments/rollback").RuleOrDie(), + "daemonsets", + "deployments", "deployments/scale", "deployments/rollback", + "replicasets", "replicasets/scale").RuleOrDie(), rbac.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), @@ -189,6 +239,8 @@ func ClusterRoles() []rbac.ClusterRole { "deployments", "deployments/scale", "deployments/rollback", "ingresses", "replicasets", "replicasets/scale", "replicationcontrollers/scale").RuleOrDie(), + rbac.NewRule(ReadWrite...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), + // additional admin powers rbac.NewRule("create").Groups(authorizationGroup).Resources("localsubjectaccessreviews").RuleOrDie(), rbac.NewRule(ReadWrite...).Groups(rbacGroup).Resources("roles", "rolebindings").RuleOrDie(), @@ -198,7 +250,7 @@ func ClusterRoles() []rbac.ClusterRole { // a role for a namespace level editor. It grants access to all user level actions in a namespace. // It does not grant powers for "privileged" resources which are domain of the system: `/status` // subresources or `quota`/`limits` which are used to control namespaces - ObjectMeta: metav1.ObjectMeta{Name: "edit"}, + ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-edit", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-edit": "true"}}, Rules: []rbac.PolicyRule{ rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(), rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", @@ -211,7 +263,9 @@ func ClusterRoles() []rbac.ClusterRole { rbac.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(), rbac.NewRule(ReadWrite...).Groups(appsGroup).Resources("statefulsets", - "deployments", "deployments/scale", "deployments/rollback").RuleOrDie(), + "daemonsets", + "deployments", "deployments/scale", "deployments/rollback", + "replicasets", "replicasets/scale").RuleOrDie(), rbac.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), @@ -220,12 +274,14 @@ func ClusterRoles() []rbac.ClusterRole { rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale", "deployments/rollback", "ingresses", "replicasets", "replicasets/scale", "replicationcontrollers/scale").RuleOrDie(), + + rbac.NewRule(ReadWrite...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), }, }, { // a role for namespace level viewing. It grants Read-only access to non-escalating resources in // a namespace. - ObjectMeta: metav1.ObjectMeta{Name: "view"}, + ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-view", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-view": "true"}}, Rules: []rbac.PolicyRule{ rbac.NewRule(Read...).Groups(legacyGroup).Resources("pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", "services", "endpoints", "persistentvolumeclaims", "configmaps").RuleOrDie(), @@ -235,7 +291,10 @@ func ClusterRoles() []rbac.ClusterRole { // indicator of which namespaces you have access to. rbac.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(), - rbac.NewRule(Read...).Groups(appsGroup).Resources("statefulsets", "deployments", "deployments/scale").RuleOrDie(), + rbac.NewRule(Read...).Groups(appsGroup).Resources("statefulsets", + "daemonsets", + "deployments", "deployments/scale", + "replicasets", "replicasets/scale").RuleOrDie(), rbac.NewRule(Read...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), @@ -243,6 +302,8 @@ func ClusterRoles() []rbac.ClusterRole { rbac.NewRule(Read...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale", "ingresses", "replicasets", "replicasets/scale", "replicationcontrollers/scale").RuleOrDie(), + + rbac.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), }, }, { @@ -335,12 +396,13 @@ func ClusterRoles() []rbac.ClusterRole { rbac.NewRule(Read...).Groups(legacyGroup).Resources("nodes").RuleOrDie(), rbac.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), rbac.NewRule("create").Groups(legacyGroup).Resources("pods/binding", "bindings").RuleOrDie(), - rbac.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(), + rbac.NewRule("patch", "update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(), // things that select pods rbac.NewRule(Read...).Groups(legacyGroup).Resources("services", "replicationcontrollers").RuleOrDie(), - rbac.NewRule(Read...).Groups(extensionsGroup).Resources("replicasets").RuleOrDie(), + rbac.NewRule(Read...).Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(), rbac.NewRule(Read...).Groups(appsGroup).Resources("statefulsets").RuleOrDie(), - // things that pods use + // things that pods use or applies to them + rbac.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), rbac.NewRule(Read...).Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(), }, }, @@ -366,6 +428,13 @@ func ClusterRoles() []rbac.ClusterRole { eventsRule(), }, }, + { + ObjectMeta: metav1.ObjectMeta{Name: "system:aws-cloud-provider"}, + Rules: []rbac.PolicyRule{ + rbac.NewRule("get", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + eventsRule(), + }, + }, { // a role making the csrapprover controller approve a node client CSR ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:certificatesigningrequests:nodeclient"}, @@ -392,6 +461,18 @@ func ClusterRoles() []rbac.ClusterRole { }) } + if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + // Find the scheduler role + for i, role := range roles { + if role.Name == "system:kube-scheduler" { + pvRule := rbac.NewRule("update").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie() + scRule := rbac.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie() + roles[i].Rules = append(role.Rules, pvRule, scRule) + break + } + } + } + addClusterRoleLabel(roles) return roles } @@ -408,6 +489,7 @@ func ClusterRoleBindings() []rbac.ClusterRoleBinding { rbac.NewClusterBinding("system:kube-controller-manager").Users(user.KubeControllerManager).BindingOrDie(), rbac.NewClusterBinding("system:kube-dns").SAs("kube-system", "kube-dns").BindingOrDie(), rbac.NewClusterBinding("system:kube-scheduler").Users(user.KubeScheduler).BindingOrDie(), + rbac.NewClusterBinding("system:aws-cloud-provider").SAs("kube-system", "aws-cloud-provider").BindingOrDie(), // This default binding of the system:node role to the system:nodes group is deprecated in 1.7 with the availability of the Node authorizer. // This leaves the binding, but with an empty set of subjects, so that tightening reconciliation can remove the subject. @@ -421,3 +503,11 @@ func ClusterRoleBindings() []rbac.ClusterRoleBinding { return rolebindings } + +func ClusterRolesToAggregate() map[string]string { + return map[string]string{ + "admin": "system:aggregate-to-admin", + "edit": "system:aggregate-to-edit", + "view": "system:aggregate-to-view", + } +} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/rbac.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/rbac.go index 68ef7b2567fd..92094807630a 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/rbac.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/rbac.go @@ -69,12 +69,12 @@ func (v *authorizingVisitor) visit(rule *rbac.PolicyRule, err error) bool { return true } -func (r *RBACAuthorizer) Authorize(requestAttributes authorizer.Attributes) (bool, string, error) { +func (r *RBACAuthorizer) Authorize(requestAttributes authorizer.Attributes) (authorizer.Decision, string, error) { ruleCheckingVisitor := &authorizingVisitor{requestAttributes: requestAttributes} r.authorizationRuleResolver.VisitRulesFor(requestAttributes.GetUser(), requestAttributes.GetNamespace(), ruleCheckingVisitor.visit) if ruleCheckingVisitor.allowed { - return true, "", nil + return authorizer.DecisionAllow, "", nil } // Build a detailed log of the denial. @@ -120,7 +120,7 @@ func (r *RBACAuthorizer) Authorize(requestAttributes authorizer.Attributes) (boo if len(ruleCheckingVisitor.errors) > 0 { reason = fmt.Sprintf("%v", utilerrors.NewAggregate(ruleCheckingVisitor.errors)) } - return false, reason, nil + return authorizer.DecisionNoOpinion, reason, nil } func (r *RBACAuthorizer) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { @@ -174,14 +174,14 @@ func RulesAllow(requestAttributes authorizer.Attributes, rules ...rbac.PolicyRul func RuleAllows(requestAttributes authorizer.Attributes, rule *rbac.PolicyRule) bool { if requestAttributes.IsResourceRequest() { - resource := requestAttributes.GetResource() + combinedResource := requestAttributes.GetResource() if len(requestAttributes.GetSubresource()) > 0 { - resource = requestAttributes.GetResource() + "/" + requestAttributes.GetSubresource() + combinedResource = requestAttributes.GetResource() + "/" + requestAttributes.GetSubresource() } return rbac.VerbMatches(rule, requestAttributes.GetVerb()) && rbac.APIGroupMatches(rule, requestAttributes.GetAPIGroup()) && - rbac.ResourceMatches(rule, resource) && + rbac.ResourceMatches(rule, combinedResource, requestAttributes.GetSubresource()) && rbac.ResourceNameMatches(rule, requestAttributes.GetName()) } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/BUILD index e68c63c33609..e20acec40603 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/BUILD @@ -9,21 +9,25 @@ load( go_test( name = "go_default_test", srcs = ["scheduler_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/controller/volume/persistentvolume:go_default_library", "//plugin/pkg/scheduler/algorithm:go_default_library", "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", "//plugin/pkg/scheduler/core:go_default_library", "//plugin/pkg/scheduler/schedulercache:go_default_library", "//plugin/pkg/scheduler/testing:go_default_library", "//plugin/pkg/scheduler/util:go_default_library", + "//plugin/pkg/scheduler/volumebinder:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", ], @@ -35,14 +39,17 @@ go_library( "scheduler.go", "testutil.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler", deps = [ "//pkg/features:go_default_library", "//plugin/pkg/scheduler/algorithm:go_default_library", + "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", "//plugin/pkg/scheduler/api:go_default_library", "//plugin/pkg/scheduler/core:go_default_library", "//plugin/pkg/scheduler/metrics:go_default_library", "//plugin/pkg/scheduler/schedulercache:go_default_library", "//plugin/pkg/scheduler/util:go_default_library", + "//plugin/pkg/scheduler/volumebinder:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -51,7 +58,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", ], ) @@ -76,6 +82,7 @@ filegroup( "//plugin/pkg/scheduler/schedulercache:all-srcs", "//plugin/pkg/scheduler/testing:all-srcs", "//plugin/pkg/scheduler/util:all-srcs", + "//plugin/pkg/scheduler/volumebinder:all-srcs", ], tags = ["automanaged"], ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/BUILD index b1e89a9fb16a..58a4684e1981 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/BUILD @@ -14,6 +14,7 @@ go_library( "types.go", "well_known_labels.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm", deps = [ "//plugin/pkg/scheduler/api:go_default_library", "//plugin/pkg/scheduler/schedulercache:go_default_library", @@ -30,6 +31,7 @@ go_test( "scheduler_interface_test.go", "types_test.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm", library = ":go_default_library", deps = [ "//plugin/pkg/scheduler/schedulercache:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/BUILD index ff893c3c4165..453fbafb726a 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/BUILD @@ -12,11 +12,14 @@ go_library( "error.go", "metadata.go", "predicates.go", + "testing_helper.go", "utils.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates", deps = [ - "//pkg/api/v1/helper:go_default_library", - "//pkg/api/v1/helper/qos:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/apis/core/v1/helper/qos:go_default_library", + "//pkg/cloudprovider/providers/aws:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/volume/util:go_default_library", @@ -24,15 +27,19 @@ go_library( "//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", "//plugin/pkg/scheduler/schedulercache:go_default_library", "//plugin/pkg/scheduler/util:go_default_library", + "//plugin/pkg/scheduler/volumebinder:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/storage/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", + "//vendor/k8s.io/client-go/listers/storage/v1:go_default_library", "//vendor/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset:go_default_library", ], ) @@ -43,18 +50,21 @@ go_test( "predicates_test.go", "utils_test.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates", library = ":go_default_library", deps = [ - "//pkg/api/v1/helper:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/kubelet/apis:go_default_library", "//plugin/pkg/scheduler/algorithm:go_default_library", "//plugin/pkg/scheduler/schedulercache:go_default_library", "//plugin/pkg/scheduler/testing:go_default_library", "//plugin/pkg/scheduler/util:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/storage/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/error.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/error.go index 155fe18f431d..a4450bc5643e 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/error.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/error.go @@ -30,24 +30,28 @@ var ( // be made to pass by removing pods, or you change an existing predicate so that // it can never be made to pass by removing pods, you need to add the predicate // failure error in nodesWherePreemptionMightHelp() in scheduler/core/generic_scheduler.go - ErrDiskConflict = newPredicateFailureError("NoDiskConflict") - ErrVolumeZoneConflict = newPredicateFailureError("NoVolumeZoneConflict") - ErrNodeSelectorNotMatch = newPredicateFailureError("MatchNodeSelector") - ErrPodAffinityNotMatch = newPredicateFailureError("MatchInterPodAffinity") - ErrTaintsTolerationsNotMatch = newPredicateFailureError("PodToleratesNodeTaints") - ErrPodNotMatchHostName = newPredicateFailureError("HostName") - ErrPodNotFitsHostPorts = newPredicateFailureError("PodFitsHostPorts") - ErrNodeLabelPresenceViolated = newPredicateFailureError("CheckNodeLabelPresence") - ErrServiceAffinityViolated = newPredicateFailureError("CheckServiceAffinity") - ErrMaxVolumeCountExceeded = newPredicateFailureError("MaxVolumeCount") - ErrNodeUnderMemoryPressure = newPredicateFailureError("NodeUnderMemoryPressure") - ErrNodeUnderDiskPressure = newPredicateFailureError("NodeUnderDiskPressure") - ErrNodeOutOfDisk = newPredicateFailureError("NodeOutOfDisk") - ErrNodeNotReady = newPredicateFailureError("NodeNotReady") - ErrNodeNetworkUnavailable = newPredicateFailureError("NodeNetworkUnavailable") - ErrNodeUnschedulable = newPredicateFailureError("NodeUnschedulable") - ErrNodeUnknownCondition = newPredicateFailureError("NodeUnknownCondition") - ErrVolumeNodeConflict = newPredicateFailureError("NoVolumeNodeConflict") + ErrDiskConflict = newPredicateFailureError("NoDiskConflict") + ErrVolumeZoneConflict = newPredicateFailureError("NoVolumeZoneConflict") + ErrNodeSelectorNotMatch = newPredicateFailureError("MatchNodeSelector") + ErrPodAffinityNotMatch = newPredicateFailureError("MatchInterPodAffinity") + ErrPodAffinityRulesNotMatch = newPredicateFailureError("PodAffinityRulesNotMatch") + ErrPodAntiAffinityRulesNotMatch = newPredicateFailureError("PodAntiAffinityRulesNotMatch") + ErrExistingPodsAntiAffinityRulesNotMatch = newPredicateFailureError("ExistingPodsAntiAffinityRulesNotMatch") + ErrTaintsTolerationsNotMatch = newPredicateFailureError("PodToleratesNodeTaints") + ErrPodNotMatchHostName = newPredicateFailureError("HostName") + ErrPodNotFitsHostPorts = newPredicateFailureError("PodFitsHostPorts") + ErrNodeLabelPresenceViolated = newPredicateFailureError("CheckNodeLabelPresence") + ErrServiceAffinityViolated = newPredicateFailureError("CheckServiceAffinity") + ErrMaxVolumeCountExceeded = newPredicateFailureError("MaxVolumeCount") + ErrNodeUnderMemoryPressure = newPredicateFailureError("NodeUnderMemoryPressure") + ErrNodeUnderDiskPressure = newPredicateFailureError("NodeUnderDiskPressure") + ErrNodeOutOfDisk = newPredicateFailureError("NodeOutOfDisk") + ErrNodeNotReady = newPredicateFailureError("NodeNotReady") + ErrNodeNetworkUnavailable = newPredicateFailureError("NodeNetworkUnavailable") + ErrNodeUnschedulable = newPredicateFailureError("NodeUnschedulable") + ErrNodeUnknownCondition = newPredicateFailureError("NodeUnknownCondition") + ErrVolumeNodeConflict = newPredicateFailureError("VolumeNodeAffinityConflict") + ErrVolumeBindConflict = newPredicateFailureError("VolumeBindingNoMatch") // ErrFakePredicate is used for test only. The fake predicates returning false also returns error // as ErrFakePredicate. ErrFakePredicate = newPredicateFailureError("FakePredicateError") diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/metadata.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/metadata.go index bb15272db3da..c0eda6a24dee 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/metadata.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/metadata.go @@ -46,7 +46,7 @@ type predicateMetadata struct { pod *v1.Pod podBestEffort bool podRequest *schedulercache.Resource - podPorts map[int]bool + podPorts map[string]bool //key is a pod full name with the anti-affinity rules. matchingAntiAffinityTerms map[string][]matchingPodAntiAffinityTerm serviceAffinityInUse bool @@ -172,7 +172,7 @@ func (meta *predicateMetadata) ShallowCopy() algorithm.PredicateMetadata { podRequest: meta.podRequest, serviceAffinityInUse: meta.serviceAffinityInUse, } - newPredMeta.podPorts = map[int]bool{} + newPredMeta.podPorts = map[string]bool{} for k, v := range meta.podPorts { newPredMeta.podPorts[k] = v } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go index d047c0ffcdfe..6cdaecb83cfe 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -17,21 +17,25 @@ limitations under the License. package predicates import ( + "errors" "fmt" - "math/rand" + "os" "strconv" "sync" - "time" "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/rand" utilfeature "k8s.io/apiserver/pkg/util/feature" corelisters "k8s.io/client-go/listers/core/v1" + storagelisters "k8s.io/client-go/listers/storage/v1" "k8s.io/client-go/util/workqueue" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" - v1qos "k8s.io/kubernetes/pkg/api/v1/helper/qos" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" + v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" + "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -39,18 +43,37 @@ import ( priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" schedutil "k8s.io/kubernetes/plugin/pkg/scheduler/util" - "k8s.io/metrics/pkg/client/clientset_generated/clientset" "github.com/golang/glog" + "k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder" ) const ( MatchInterPodAffinity = "MatchInterPodAffinity" + CheckVolumeBinding = "CheckVolumeBinding" + + // DefaultMaxGCEPDVolumes defines the maximum number of PD Volumes for GCE + // GCE instances can have up to 16 PD volumes attached. + DefaultMaxGCEPDVolumes = 16 + // DefaultMaxAzureDiskVolumes defines the maximum number of PD Volumes for Azure + // Larger Azure VMs can actually have much more disks attached. + // TODO We should determine the max based on VM size + DefaultMaxAzureDiskVolumes = 16 + + // KubeMaxPDVols defines the maximum number of PD Volumes per kubelet + KubeMaxPDVols = "KUBE_MAX_PD_VOLS" + + // for EBSVolumeFilter + EBSVolumeFilterType = "EBS" + // for GCEPDVolumeFilter + GCEPDVolumeFilterType = "GCE" + // for AzureDiskVolumeFilter + AzureDiskVolumeFilterType = "AzureDisk" ) // IMPORTANT NOTE for predicate developers: // We are using cached predicate result for pods belonging to the same equivalence class. -// So when updating a existing predicate, you should consider whether your change will introduce new +// So when updating an existing predicate, you should consider whether your change will introduce new // dependency to attributes of any API object like Pod, Node, Service etc. // If yes, you are expected to invalidate the cached predicate result for related API object change. // For example: @@ -97,7 +120,7 @@ func (c *CachedNodeInfo) GetNodeInfo(id string) (*v1.Node, error) { node, err := c.Get(id) if apierrors.IsNotFound(err) { - return nil, fmt.Errorf("node '%v' not found", id) + return nil, err } if err != nil { @@ -107,6 +130,19 @@ func (c *CachedNodeInfo) GetNodeInfo(id string) (*v1.Node, error) { return node, nil } +type StorageClassInfo interface { + GetStorageClassInfo(className string) (*storagev1.StorageClass, error) +} + +// CachedStorageClassInfo implements StorageClassInfo +type CachedStorageClassInfo struct { + storagelisters.StorageClassLister +} + +func (c *CachedStorageClassInfo) GetStorageClassInfo(className string) (*storagev1.StorageClass, error) { + return c.Get(className) +} + func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool { // fast path if there is no conflict checking targets. if volume.GCEPersistentDisk == nil && volume.AWSElasticBlockStore == nil && volume.RBD == nil && volume.ISCSI == nil { @@ -145,7 +181,7 @@ func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool { // two RBDs images are the same if they share the same Ceph monitor, are in the same RADOS Pool, and have the same image name // only one read-write mount is permitted for the same RBD image. // same RBD image mounted by multiple Pods conflicts unless all Pods mount the image read-only - if haveSame(mon, emon) && pool == epool && image == eimage && !(volume.RBD.ReadOnly && existingVolume.RBD.ReadOnly) { + if haveOverlap(mon, emon) && pool == epool && image == eimage && !(volume.RBD.ReadOnly && existingVolume.RBD.ReadOnly) { return true } } @@ -179,6 +215,11 @@ type MaxPDVolumeCountChecker struct { maxVolumes int pvInfo PersistentVolumeInfo pvcInfo PersistentVolumeClaimInfo + + // The string below is generated randomly during the struct's initialization. + // It is used to prefix volumeID generated inside the predicate() method to + // avoid conflicts with any real volume. + randomVolumeIDPrefix string } // VolumeFilter contains information on how to filter PD Volumes when checking PD Volume caps @@ -189,24 +230,61 @@ type VolumeFilter struct { } // NewMaxPDVolumeCountPredicate creates a predicate which evaluates whether a pod can fit based on the -// number of volumes which match a filter that it requests, and those that are already present. The -// maximum number is configurable to accommodate different systems. +// number of volumes which match a filter that it requests, and those that are already present. // // The predicate looks for both volumes used directly, as well as PVC volumes that are backed by relevant volume // types, counts the number of unique volumes, and rejects the new pod if it would place the total count over // the maximum. -func NewMaxPDVolumeCountPredicate(filter VolumeFilter, maxVolumes int, pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate { +func NewMaxPDVolumeCountPredicate(filterName string, pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate { + + var filter VolumeFilter + var maxVolumes int + + switch filterName { + + case EBSVolumeFilterType: + filter = EBSVolumeFilter + maxVolumes = getMaxVols(aws.DefaultMaxEBSVolumes) + case GCEPDVolumeFilterType: + filter = GCEPDVolumeFilter + maxVolumes = getMaxVols(DefaultMaxGCEPDVolumes) + case AzureDiskVolumeFilterType: + filter = AzureDiskVolumeFilter + maxVolumes = getMaxVols(DefaultMaxAzureDiskVolumes) + default: + glog.Fatalf("Wrong filterName, Only Support %v %v %v ", EBSVolumeFilterType, + GCEPDVolumeFilterType, AzureDiskVolumeFilterType) + return nil + + } c := &MaxPDVolumeCountChecker{ - filter: filter, - maxVolumes: maxVolumes, - pvInfo: pvInfo, - pvcInfo: pvcInfo, + filter: filter, + maxVolumes: maxVolumes, + pvInfo: pvInfo, + pvcInfo: pvcInfo, + randomVolumeIDPrefix: rand.String(32), } return c.predicate } +// getMaxVols checks the max PD volumes environment variable, otherwise returning a default value +func getMaxVols(defaultVal int) int { + if rawMaxVols := os.Getenv(KubeMaxPDVols); rawMaxVols != "" { + if parsedMaxVols, err := strconv.Atoi(rawMaxVols); err != nil { + glog.Errorf("Unable to parse maximum PD volumes value, using default of %v: %v", defaultVal, err) + } else if parsedMaxVols <= 0 { + glog.Errorf("Maximum PD volumes must be a positive value, using default of %v", defaultVal) + } else { + return parsedMaxVols + } + } + + return defaultVal +} + func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace string, filteredVolumes map[string]bool) error { + for i := range volumes { vol := &volumes[i] if id, ok := c.filter.FilterVolume(vol); ok { @@ -216,40 +294,38 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s if pvcName == "" { return fmt.Errorf("PersistentVolumeClaim had no name") } + + // Until we know real ID of the volume use namespace/pvcName as substitute + // with a random prefix (calculated and stored inside 'c' during initialization) + // to avoid conflicts with existing volume IDs. + pvId := fmt.Sprintf("%s-%s/%s", c.randomVolumeIDPrefix, namespace, pvcName) + pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName) - if err != nil { + if err != nil || pvc == nil { // if the PVC is not found, log the error and count the PV towards the PV limit - // generate a random volume ID since its required for de-dup glog.V(4).Infof("Unable to look up PVC info for %s/%s, assuming PVC matches predicate when counting limits: %v", namespace, pvcName, err) - source := rand.NewSource(time.Now().UnixNano()) - generatedID := "missingPVC" + strconv.Itoa(rand.New(source).Intn(1000000)) - filteredVolumes[generatedID] = true - return nil + filteredVolumes[pvId] = true + continue } - if pvc == nil { - return fmt.Errorf("PersistentVolumeClaim not found: %q", pvcName) + if pvc.Spec.VolumeName == "" { + // PVC is not bound. It was either deleted and created again or + // it was forcefuly unbound by admin. The pod can still use the + // original PV where it was bound to -> log the error and count + // the PV towards the PV limit + glog.V(4).Infof("PVC %s/%s is not bound, assuming PVC matches predicate when counting limits", namespace, pvcName) + filteredVolumes[pvId] = true + continue } pvName := pvc.Spec.VolumeName - if pvName == "" { - return fmt.Errorf("PersistentVolumeClaim is not bound: %q", pvcName) - } - pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName) - if err != nil { + if err != nil || pv == nil { // if the PV is not found, log the error // and count the PV towards the PV limit - // generate a random volume ID since it is required for de-dup glog.V(4).Infof("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err) - source := rand.NewSource(time.Now().UnixNano()) - generatedID := "missingPV" + strconv.Itoa(rand.New(source).Intn(1000000)) - filteredVolumes[generatedID] = true - return nil - } - - if pv == nil { - return fmt.Errorf("PersistentVolume not found: %q", pvName) + filteredVolumes[pvId] = true + continue } if id, ok := c.filter.FilterPersistentVolume(pv); ok { @@ -356,8 +432,9 @@ var AzureDiskVolumeFilter VolumeFilter = VolumeFilter{ } type VolumeZoneChecker struct { - pvInfo PersistentVolumeInfo - pvcInfo PersistentVolumeClaimInfo + pvInfo PersistentVolumeInfo + pvcInfo PersistentVolumeClaimInfo + classInfo StorageClassInfo } // NewVolumeZonePredicate evaluates if a pod can fit due to the volumes it requests, given @@ -374,10 +451,11 @@ type VolumeZoneChecker struct { // determining the zone of a volume during scheduling, and that is likely to // require calling out to the cloud provider. It seems that we are moving away // from inline volume declarations anyway. -func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate { +func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo, classInfo StorageClassInfo) algorithm.FitPredicate { c := &VolumeZoneChecker{ - pvInfo: pvInfo, - pvcInfo: pvcInfo, + pvInfo: pvInfo, + pvcInfo: pvcInfo, + classInfo: classInfo, } return c.predicate } @@ -429,6 +507,21 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetad pvName := pvc.Spec.VolumeName if pvName == "" { + if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + scName := pvc.Spec.StorageClassName + if scName != nil && len(*scName) > 0 { + class, _ := c.classInfo.GetStorageClassInfo(*scName) + if class != nil { + if class.VolumeBindingMode == nil { + return false, nil, fmt.Errorf("VolumeBindingMode not set for StorageClass %q", scName) + } + if *class.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer { + // Skip unbound volumes + continue + } + } + } + } return false, nil, fmt.Errorf("PersistentVolumeClaim is not bound: %q", pvcName) } @@ -514,16 +607,10 @@ func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource { result.NvidiaGPU = gpu } default: - if v1helper.IsExtendedResourceName(rName) { - value := rQuantity.Value() - if value > result.ExtendedResources[rName] { - result.SetExtended(rName, value) - } - } - if v1helper.IsHugePageResourceName(rName) { + if v1helper.IsScalarResourceName(rName) { value := rQuantity.Value() - if value > result.HugePages[rName] { - result.SetHugePages(rName, value) + if value > result.ScalarResources[rName] { + result.SetScalar(rName, value) } } } @@ -563,8 +650,7 @@ func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s podRequest.Memory == 0 && podRequest.NvidiaGPU == 0 && podRequest.EphemeralStorage == 0 && - len(podRequest.ExtendedResources) == 0 && - len(podRequest.HugePages) == 0 { + len(podRequest.ScalarResources) == 0 { return len(predicateFails) == 0, predicateFails, nil } @@ -583,15 +669,9 @@ func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceEphemeralStorage, podRequest.EphemeralStorage, nodeInfo.RequestedResource().EphemeralStorage, allocatable.EphemeralStorage)) } - for rName, rQuant := range podRequest.ExtendedResources { - if allocatable.ExtendedResources[rName] < rQuant+nodeInfo.RequestedResource().ExtendedResources[rName] { - predicateFails = append(predicateFails, NewInsufficientResourceError(rName, podRequest.ExtendedResources[rName], nodeInfo.RequestedResource().ExtendedResources[rName], allocatable.ExtendedResources[rName])) - } - } - - for rName, rQuant := range podRequest.HugePages { - if allocatable.HugePages[rName] < rQuant+nodeInfo.RequestedResource().HugePages[rName] { - predicateFails = append(predicateFails, NewInsufficientResourceError(rName, podRequest.HugePages[rName], nodeInfo.RequestedResource().HugePages[rName], allocatable.HugePages[rName])) + for rName, rQuant := range podRequest.ScalarResources { + if allocatable.ScalarResources[rName] < rQuant+nodeInfo.RequestedResource().ScalarResources[rName] { + predicateFails = append(predicateFails, NewInsufficientResourceError(rName, podRequest.ScalarResources[rName], nodeInfo.RequestedResource().ScalarResources[rName], allocatable.ScalarResources[rName])) } } @@ -843,7 +923,7 @@ func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta algorithm.Predi // PodFitsHostPorts checks if a node has free ports for the requested pod ports. func PodFitsHostPorts(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { - var wantPorts map[int]bool + var wantPorts map[string]bool if predicateMeta, ok := meta.(*predicateMetadata); ok { wantPorts = predicateMeta.podPorts } else { @@ -855,29 +935,28 @@ func PodFitsHostPorts(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s } existingPorts := nodeInfo.UsedPorts() - for wport := range wantPorts { - if wport != 0 && existingPorts[wport] { - return false, []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts}, nil - } + + // try to see whether existingPorts and wantPorts will conflict or not + if portsConflict(existingPorts, wantPorts) { + return false, []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts}, nil } + return true, nil, nil } // search two arrays and return true if they have at least one common element; return false otherwise -func haveSame(a1, a2 []string) bool { - m := map[string]int{} +func haveOverlap(a1, a2 []string) bool { + m := map[string]bool{} for _, val := range a1 { - m[val] = 1 + m[val] = true } for _, val := range a2 { - m[val] = m[val] + 1 - } - for _, val := range m { - if val > 1 { + if _, ok := m[val]; ok { return true } } + return false } @@ -970,8 +1049,9 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm if node == nil { return false, nil, fmt.Errorf("node not found") } - if !c.satisfiesExistingPodsAntiAffinity(pod, meta, nodeInfo) { - return false, []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}, nil + if failedPredicates, error := c.satisfiesExistingPodsAntiAffinity(pod, meta, nodeInfo); failedPredicates != nil { + failedPredicates := append([]algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}, failedPredicates) + return false, failedPredicates, error } // Now check if requirements will be satisfied on this node. @@ -979,8 +1059,9 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm if affinity == nil || (affinity.PodAffinity == nil && affinity.PodAntiAffinity == nil) { return true, nil, nil } - if !c.satisfiesPodsAffinityAntiAffinity(pod, nodeInfo, affinity) { - return false, []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}, nil + if failedPredicates, error := c.satisfiesPodsAffinityAntiAffinity(pod, nodeInfo, affinity); failedPredicates != nil { + failedPredicates := append([]algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}, failedPredicates) + return false, failedPredicates, error } if glog.V(10) { @@ -1022,7 +1103,7 @@ func (c *PodAffinityChecker) anyPodMatchesPodAffinityTerm(pod *v1.Pod, allPods [ return false, matchingPodExists, nil } -func getPodAffinityTerms(podAffinity *v1.PodAffinity) (terms []v1.PodAffinityTerm) { +func GetPodAffinityTerms(podAffinity *v1.PodAffinity) (terms []v1.PodAffinityTerm) { if podAffinity != nil { if len(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 { terms = podAffinity.RequiredDuringSchedulingIgnoredDuringExecution @@ -1035,7 +1116,7 @@ func getPodAffinityTerms(podAffinity *v1.PodAffinity) (terms []v1.PodAffinityTer return terms } -func getPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.PodAffinityTerm) { +func GetPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.PodAffinityTerm) { if podAntiAffinity != nil { if len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 { terms = podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution @@ -1085,7 +1166,7 @@ func getMatchingAntiAffinityTerms(pod *v1.Pod, nodeInfoMap map[string]*scheduler if affinity == nil { continue } - for _, term := range getPodAntiAffinityTerms(affinity.PodAntiAffinity) { + for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) { namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term) selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) if err != nil { @@ -1112,7 +1193,7 @@ func getMatchingAntiAffinityTermsOfExistingPod(newPod *v1.Pod, existingPod *v1.P var result []matchingPodAntiAffinityTerm affinity := existingPod.Spec.Affinity if affinity != nil && affinity.PodAntiAffinity != nil { - for _, term := range getPodAntiAffinityTerms(affinity.PodAntiAffinity) { + for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) { namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term) selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) if err != nil { @@ -1133,6 +1214,10 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *v1.Pod, allPods [ if affinity != nil && affinity.PodAntiAffinity != nil { existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName) if err != nil { + if apierrors.IsNotFound(err) { + glog.Errorf("Node not found, %v", existingPod.Spec.NodeName) + continue + } return nil, err } existingPodMatchingTerms, err := getMatchingAntiAffinityTermsOfExistingPod(pod, existingPod, existingPodNode) @@ -1150,10 +1235,10 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *v1.Pod, allPods [ // Checks if scheduling the pod onto this node would break any anti-affinity // rules indicated by the existing pods. -func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) bool { +func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { - return false + return ErrExistingPodsAntiAffinityRulesNotMatch, fmt.Errorf("Node is nil") } var matchingTerms map[string][]matchingPodAntiAffinityTerm if predicateMeta, ok := meta.(*predicateMetadata); ok { @@ -1163,25 +1248,28 @@ func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta // present in nodeInfo. Pods on other nodes pass the filter. filteredPods, err := c.podLister.FilteredList(nodeInfo.Filter, labels.Everything()) if err != nil { - glog.Errorf("Failed to get all pods, %+v", err) - return false + errMessage := fmt.Sprintf("Failed to get all pods, %+v", err) + glog.Error(errMessage) + return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage) } if matchingTerms, err = c.getMatchingAntiAffinityTerms(pod, filteredPods); err != nil { - glog.Errorf("Failed to get all terms that pod %+v matches, err: %+v", podName(pod), err) - return false + errMessage := fmt.Sprintf("Failed to get all terms that pod %+v matches, err: %+v", podName(pod), err) + glog.Error(errMessage) + return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage) } } for _, terms := range matchingTerms { for i := range terms { term := &terms[i] if len(term.term.TopologyKey) == 0 { - glog.Error("Empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity") - return false + errMessage := fmt.Sprintf("Empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity") + glog.Error(errMessage) + return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage) } if priorityutil.NodesHaveSameTopologyKey(node, term.node, term.term.TopologyKey) { glog.V(10).Infof("Cannot schedule pod %+v onto node %v,because of PodAntiAffinityTerm %v", podName(pod), node.Name, term.term) - return false + return ErrExistingPodsAntiAffinityRulesNotMatch, nil } } } @@ -1191,27 +1279,27 @@ func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta glog.Infof("Schedule Pod %+v on Node %+v is allowed, existing pods anti-affinity rules satisfied.", podName(pod), node.Name) } - return true + return nil, nil } // Checks if scheduling the pod onto this node would break any rules of this pod. -func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo, affinity *v1.Affinity) bool { +func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo, affinity *v1.Affinity) (algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { - return false + return ErrPodAffinityRulesNotMatch, fmt.Errorf("Node is nil") } filteredPods, err := c.podLister.FilteredList(nodeInfo.Filter, labels.Everything()) if err != nil { - return false + return ErrPodAffinityRulesNotMatch, err } // Check all affinity terms. - for _, term := range getPodAffinityTerms(affinity.PodAffinity) { + for _, term := range GetPodAffinityTerms(affinity.PodAffinity) { termMatches, matchingPodExists, err := c.anyPodMatchesPodAffinityTerm(pod, filteredPods, node, &term) if err != nil { - glog.Errorf("Cannot schedule pod %+v onto node %v, because of PodAffinityTerm %v, err: %v", - podName(pod), node.Name, term, err) - return false + errMessage := fmt.Sprintf("Cannot schedule pod %+v onto node %v, because of PodAffinityTerm %v, err: %v", podName(pod), node.Name, term, err) + glog.Error(errMessage) + return ErrPodAffinityRulesNotMatch, errors.New(errMessage) } if !termMatches { // If the requirement matches a pod's own labels are namespace, and there are @@ -1220,31 +1308,31 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, node if matchingPodExists { glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinityTerm %v", podName(pod), node.Name, term) - return false + return ErrPodAffinityRulesNotMatch, nil } namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(pod, &term) selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) if err != nil { - glog.Errorf("Cannot parse selector on term %v for pod %v. Details %v", - term, podName(pod), err) - return false + errMessage := fmt.Sprintf("Cannot parse selector on term %v for pod %v. Details %v", term, podName(pod), err) + glog.Error(errMessage) + return ErrPodAffinityRulesNotMatch, errors.New(errMessage) } match := priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) if !match { glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinityTerm %v", podName(pod), node.Name, term) - return false + return ErrPodAffinityRulesNotMatch, nil } } } // Check all anti-affinity terms. - for _, term := range getPodAntiAffinityTerms(affinity.PodAntiAffinity) { + for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) { termMatches, _, err := c.anyPodMatchesPodAffinityTerm(pod, filteredPods, node, &term) if err != nil || termMatches { glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinityTerm %v, err: %v", podName(pod), node.Name, term, err) - return false + return ErrPodAntiAffinityRulesNotMatch, nil } } @@ -1254,7 +1342,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, node glog.Infof("Schedule Pod %+v on Node %+v is allowed, pod affinity/anti-affinity constraints satisfied.", podName(pod), node.Name) } - return true + return nil, nil } // PodToleratesNodeTaints checks if a pod tolerations can tolerate the node taints @@ -1352,33 +1440,30 @@ func CheckNodeConditionPredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, return len(reasons) == 0, reasons, nil } -type VolumeNodeChecker struct { - pvInfo PersistentVolumeInfo - pvcInfo PersistentVolumeClaimInfo - client clientset.Interface +type VolumeBindingChecker struct { + binder *volumebinder.VolumeBinder } -// NewVolumeNodePredicate evaluates if a pod can fit due to the volumes it requests, given -// that some volumes have node topology constraints, particularly when using Local PVs. -// The requirement is that any pod that uses a PVC that is bound to a PV with topology constraints -// must be scheduled to a node that satisfies the PV's topology labels. -func NewVolumeNodePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo, client clientset.Interface) algorithm.FitPredicate { - c := &VolumeNodeChecker{ - pvInfo: pvInfo, - pvcInfo: pvcInfo, - client: client, +// NewVolumeBindingPredicate evaluates if a pod can fit due to the volumes it requests, +// for both bound and unbound PVCs. +// +// For PVCs that are bound, then it checks that the corresponding PV's node affinity is +// satisfied by the given node. +// +// For PVCs that are unbound, it tries to find available PVs that can satisfy the PVC requirements +// and that the PV node affinity is satisfied by the given node. +// +// The predicate returns true if all bound PVCs have compatible PVs with the node, and if all unbound +// PVCs can be matched with an available and node-compatible PV. +func NewVolumeBindingPredicate(binder *volumebinder.VolumeBinder) algorithm.FitPredicate { + c := &VolumeBindingChecker{ + binder: binder, } return c.predicate } -func (c *VolumeNodeChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { - if !utilfeature.DefaultFeatureGate.Enabled(features.PersistentLocalVolumes) { - return true, nil, nil - } - - // If a pod doesn't have any volume attached to it, the predicate will always be true. - // Thus we make a fast path for it, to avoid unnecessary computations in this case. - if len(pod.Spec.Volumes) == 0 { +func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { + if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { return true, nil, nil } @@ -1387,45 +1472,27 @@ func (c *VolumeNodeChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetad return false, nil, fmt.Errorf("node not found") } - glog.V(2).Infof("Checking for prebound volumes with node affinity") - namespace := pod.Namespace - manifest := &(pod.Spec) - for i := range manifest.Volumes { - volume := &manifest.Volumes[i] - if volume.PersistentVolumeClaim == nil { - continue - } - pvcName := volume.PersistentVolumeClaim.ClaimName - if pvcName == "" { - return false, nil, fmt.Errorf("PersistentVolumeClaim had no name") - } - pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName) - if err != nil { - return false, nil, err - } + unboundSatisfied, boundSatisfied, err := c.binder.Binder.FindPodVolumes(pod, node.Name) + if err != nil { + return false, nil, err + } - if pvc == nil { - return false, nil, fmt.Errorf("PersistentVolumeClaim was not found: %q", pvcName) - } - pvName := pvc.Spec.VolumeName - if pvName == "" { - return false, nil, fmt.Errorf("PersistentVolumeClaim is not bound: %q", pvcName) - } + failReasons := []algorithm.PredicateFailureReason{} + if !boundSatisfied { + glog.V(5).Infof("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) + failReasons = append(failReasons, ErrVolumeNodeConflict) + } - pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName) - if err != nil { - return false, nil, err - } - if pv == nil { - return false, nil, fmt.Errorf("PersistentVolume not found: %q", pvName) - } + if !unboundSatisfied { + glog.V(5).Infof("Couldn't find matching PVs for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) + failReasons = append(failReasons, ErrVolumeBindConflict) + } - err = volumeutil.CheckNodeAffinity(pv, node.Labels) - if err != nil { - glog.V(2).Infof("Won't schedule pod %q onto node %q due to volume %q node mismatch: %v", pod.Name, node.Name, pvName, err.Error()) - return false, []algorithm.PredicateFailureReason{ErrVolumeNodeConflict}, nil - } - glog.V(4).Infof("VolumeNode predicate allows node %q for pod %q due to volume %q", node.Name, pod.Name, pvName) + if len(failReasons) > 0 { + return false, failReasons, nil } + + // All volumes bound or matching PVs found for all unbound PVCs + glog.V(5).Infof("All PVCs found matches for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) return true, nil, nil } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/testing_helper.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/testing_helper.go new file mode 100644 index 000000000000..57306c58aad2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/testing_helper.go @@ -0,0 +1,75 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicates + +import ( + "fmt" + + "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" +) + +type FakePersistentVolumeClaimInfo []v1.PersistentVolumeClaim + +func (pvcs FakePersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, pvcID string) (*v1.PersistentVolumeClaim, error) { + for _, pvc := range pvcs { + if pvc.Name == pvcID && pvc.Namespace == namespace { + return &pvc, nil + } + } + return nil, fmt.Errorf("Unable to find persistent volume claim: %s/%s", namespace, pvcID) +} + +type FakeNodeInfo v1.Node + +func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*v1.Node, error) { + node := v1.Node(n) + return &node, nil +} + +type FakeNodeListInfo []v1.Node + +func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*v1.Node, error) { + for _, node := range nodes { + if node.Name == nodeName { + return &node, nil + } + } + return nil, fmt.Errorf("Unable to find node: %s", nodeName) +} + +type FakePersistentVolumeInfo []v1.PersistentVolume + +func (pvs FakePersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) { + for _, pv := range pvs { + if pv.Name == pvID { + return &pv, nil + } + } + return nil, fmt.Errorf("Unable to find persistent volume: %s", pvID) +} + +type FakeStorageClassInfo []storagev1.StorageClass + +func (classes FakeStorageClassInfo) GetStorageClassInfo(name string) (*storagev1.StorageClass, error) { + for _, sc := range classes { + if sc.Name == name { + return &sc, nil + } + } + return nil, fmt.Errorf("Unable to find storage class: %s", name) +} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/utils.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/utils.go index 84d096d23aa4..622bdc683597 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/utils.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/utils.go @@ -17,9 +17,16 @@ limitations under the License. package predicates import ( + "strings" + + "github.com/golang/glog" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" + schedutil "k8s.io/kubernetes/plugin/pkg/scheduler/util" ) // FindLabelsInSet gets as many key/value pairs as possible out of a label set. @@ -66,26 +73,130 @@ func CreateSelectorFromLabels(aL map[string]string) labels.Selector { return labels.Set(aL).AsSelector() } +// EquivalencePodGenerator is a generator of equivalence class for pod with consideration of PVC info. +type EquivalencePodGenerator struct { + pvcInfo PersistentVolumeClaimInfo +} + +// NewEquivalencePodGenerator returns a getEquivalencePod method with consideration of PVC info. +func NewEquivalencePodGenerator(pvcInfo PersistentVolumeClaimInfo) algorithm.GetEquivalencePodFunc { + g := &EquivalencePodGenerator{ + pvcInfo: pvcInfo, + } + return g.getEquivalencePod +} + // GetEquivalencePod returns a EquivalencePod which contains a group of pod attributes which can be reused. -func GetEquivalencePod(pod *v1.Pod) interface{} { +func (e *EquivalencePodGenerator) getEquivalencePod(pod *v1.Pod) interface{} { // For now we only consider pods: // 1. OwnerReferences is Controller // 2. with same OwnerReferences + // 3. with same PVC claim // to be equivalent - if len(pod.OwnerReferences) != 0 { - for _, ref := range pod.OwnerReferences { - if *ref.Controller { - // a pod can only belongs to one controller + for _, ref := range pod.OwnerReferences { + if ref.Controller != nil && *ref.Controller { + if pvcSet, err := e.getPVCSet(pod); err == nil { + // A pod can only belongs to one controller, so let's return. return &EquivalencePod{ ControllerRef: ref, + PVCSet: pvcSet, } + } else { + // If error encountered, log warning and return nil (i.e. no equivalent pod found) + glog.Warningf("[EquivalencePodGenerator] for pod: %v failed due to: %v", pod.GetName(), err) + return nil } } } return nil } +// getPVCSet returns a set of PVC UIDs of given pod. +func (e *EquivalencePodGenerator) getPVCSet(pod *v1.Pod) (sets.String, error) { + result := sets.NewString() + for _, volume := range pod.Spec.Volumes { + if volume.PersistentVolumeClaim == nil { + continue + } + pvcName := volume.PersistentVolumeClaim.ClaimName + pvc, err := e.pvcInfo.GetPersistentVolumeClaimInfo(pod.GetNamespace(), pvcName) + if err != nil { + return nil, err + } + result.Insert(string(pvc.UID)) + } + + return result, nil +} + // EquivalencePod is a group of pod attributes which can be reused as equivalence to schedule other pods. type EquivalencePod struct { ControllerRef metav1.OwnerReference + PVCSet sets.String +} + +type hostPortInfo struct { + protocol string + hostIP string + hostPort string +} + +// decode decodes string ("protocol/hostIP/hostPort") to *hostPortInfo object. +func decode(info string) *hostPortInfo { + hostPortInfoSlice := strings.Split(info, "/") + + protocol := hostPortInfoSlice[0] + hostIP := hostPortInfoSlice[1] + hostPort := hostPortInfoSlice[2] + + return &hostPortInfo{ + protocol: protocol, + hostIP: hostIP, + hostPort: hostPort, + } +} + +// specialPortConflictCheck detects whether specailHostPort(whose hostIP is 0.0.0.0) is conflict with otherHostPorts. +// return true if we have a conflict. +func specialPortConflictCheck(specialHostPort string, otherHostPorts map[string]bool) bool { + specialHostPortInfo := decode(specialHostPort) + + if specialHostPortInfo.hostIP == schedutil.DefaultBindAllHostIP { + // loop through all the otherHostPorts to see if there exists a conflict + for hostPortItem := range otherHostPorts { + hostPortInfo := decode(hostPortItem) + + // if there exists one hostPortItem which has the same hostPort and protocol with the specialHostPort, that will cause a conflict + if specialHostPortInfo.hostPort == hostPortInfo.hostPort && specialHostPortInfo.protocol == hostPortInfo.protocol { + return true + } + } + + } + + return false +} + +// portsConflict check whether existingPorts and wantPorts conflict with each other +// return true if we have a conflict +func portsConflict(existingPorts, wantPorts map[string]bool) bool { + + for existingPort := range existingPorts { + if specialPortConflictCheck(existingPort, wantPorts) { + return true + } + } + + for wantPort := range wantPorts { + if specialPortConflictCheck(wantPort, existingPorts) { + return true + } + + // general check hostPort conflict procedure for hostIP is not 0.0.0.0 + if existingPorts[wantPort] { + return true + } + } + + return false } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/BUILD index eff8faba1ea4..2c335c4004ef 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/BUILD @@ -18,12 +18,15 @@ go_library( "node_affinity.go", "node_label.go", "node_prefer_avoid_pods.go", + "reduce.go", + "resource_limits.go", "selector_spreading.go", "taint_toleration.go", "test_util.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities", deps = [ - "//pkg/api/v1/helper:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/util/node:go_default_library", "//plugin/pkg/scheduler/algorithm:go_default_library", @@ -33,6 +36,7 @@ go_library( "//plugin/pkg/scheduler/schedulercache:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", @@ -52,9 +56,11 @@ go_test( "node_affinity_test.go", "node_label_test.go", "node_prefer_avoid_pods_test.go", + "resource_limits_test.go", "selector_spreading_test.go", "taint_toleration_test.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities", library = ":go_default_library", deps = [ "//pkg/kubelet/apis:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go index 5739bedf85ac..718282da11f7 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go @@ -100,7 +100,7 @@ func fractionOfCapacity(requested, capacity int64) float64 { // BalancedResourceAllocationMap favors nodes with balanced resource usage rate. // BalancedResourceAllocationMap should **NOT** be used alone, and **MUST** be used together with LeastRequestedPriority. -// It calculates the difference between the cpu and memory fracion of capacity, and prioritizes the host based on how +// It calculates the difference between the cpu and memory fraction of capacity, and prioritizes the host based on how // close the two metrics are to each other. // Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by: // "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization" diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/image_locality.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/image_locality.go index 67dba1b1af21..1629dee4fdfe 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/image_locality.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/image_locality.go @@ -35,10 +35,8 @@ func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *scheduler return schedulerapi.HostPriority{}, fmt.Errorf("node not found") } - var sumSize int64 - for i := range pod.Spec.Containers { - sumSize += checkContainerImageOnNode(node, &pod.Spec.Containers[i]) - } + sumSize := totalImageSize(node, pod.Spec.Containers) + return schedulerapi.HostPriority{ Host: node.Name, Score: calculateScoreFromSize(sumSize), @@ -49,31 +47,35 @@ func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *scheduler // 1. Split image size range into 10 buckets. // 2. Decide the priority of a given sumSize based on which bucket it belongs to. func calculateScoreFromSize(sumSize int64) int { - var score int switch { case sumSize == 0 || sumSize < minImgSize: - // score == 0 means none of the images required by this pod are present on this + // 0 means none of the images required by this pod are present on this // node or the total size of the images present is too small to be taken into further consideration. - score = 0 - // If existing images' total size is larger than max, just make it highest priority. + return 0 + case sumSize >= maxImgSize: - score = schedulerapi.MaxPriority - default: - score = int((int64(schedulerapi.MaxPriority) * (sumSize - minImgSize) / (maxImgSize - minImgSize)) + 1) + // If existing images' total size is larger than max, just make it highest priority. + return schedulerapi.MaxPriority } - // Return which bucket the given size belongs to - return score + + return int((int64(schedulerapi.MaxPriority) * (sumSize - minImgSize) / (maxImgSize - minImgSize)) + 1) } -// checkContainerImageOnNode checks if a container image is present on a node and returns its size. -func checkContainerImageOnNode(node *v1.Node, container *v1.Container) int64 { +// totalImageSize returns the total image size of all the containers that are already on the node. +func totalImageSize(node *v1.Node, containers []v1.Container) int64 { + imageSizes := make(map[string]int64) for _, image := range node.Status.Images { for _, name := range image.Names { - if container.Image == name { - // Should return immediately. - return image.SizeBytes - } + imageSizes[name] = image.SizeBytes } } - return 0 + + var total int64 + for _, container := range containers { + if size, ok := imageSizes[container.Image]; ok { + total += size + } + } + + return total } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go index 7abe732d45c4..af2cd8f6c20f 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go @@ -21,6 +21,7 @@ import ( "sync" "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/workqueue" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" @@ -37,14 +38,14 @@ type InterPodAffinity struct { info predicates.NodeInfo nodeLister algorithm.NodeLister podLister algorithm.PodLister - hardPodAffinityWeight int + hardPodAffinityWeight int32 } func NewInterPodAffinityPriority( info predicates.NodeInfo, nodeLister algorithm.NodeLister, podLister algorithm.PodLister, - hardPodAffinityWeight int) algorithm.PriorityFunction { + hardPodAffinityWeight int32) algorithm.PriorityFunction { interPodAffinity := &InterPodAffinity{ info: info, nodeLister: nodeLister, @@ -137,6 +138,10 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node processPod := func(existingPod *v1.Pod) error { existingPodNode, err := ipa.info.GetNodeInfo(existingPod.Spec.NodeName) if err != nil { + if apierrors.IsNotFound(err) { + glog.Errorf("Node not found, %v", existingPod.Spec.NodeName) + return nil + } return err } existingPodAffinity := existingPod.Spec.Affinity @@ -189,19 +194,21 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node } processNode := func(i int) { nodeInfo := nodeNameToInfo[allNodeNames[i]] - if hasAffinityConstraints || hasAntiAffinityConstraints { - // We need to process all the nodes. - for _, existingPod := range nodeInfo.Pods() { - if err := processPod(existingPod); err != nil { - pm.setError(err) + if nodeInfo.Node() != nil { + if hasAffinityConstraints || hasAntiAffinityConstraints { + // We need to process all the nodes. + for _, existingPod := range nodeInfo.Pods() { + if err := processPod(existingPod); err != nil { + pm.setError(err) + } } - } - } else { - // The pod doesn't have any constraints - we need to check only existing - // ones that have some. - for _, existingPod := range nodeInfo.PodsWithAffinity() { - if err := processPod(existingPod); err != nil { - pm.setError(err) + } else { + // The pod doesn't have any constraints - we need to check only existing + // ones that have some. + for _, existingPod := range nodeInfo.PodsWithAffinity() { + if err := processPod(existingPod); err != nil { + pm.setError(err) + } } } } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/metadata.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/metadata.go index 6f3818eb5307..1e16c4aad4da 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/metadata.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/metadata.go @@ -18,26 +18,79 @@ package priorities import ( "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) +type PriorityMetadataFactory struct { + serviceLister algorithm.ServiceLister + controllerLister algorithm.ControllerLister + replicaSetLister algorithm.ReplicaSetLister + statefulSetLister algorithm.StatefulSetLister +} + +func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controllerLister algorithm.ControllerLister, replicaSetLister algorithm.ReplicaSetLister, statefulSetLister algorithm.StatefulSetLister) algorithm.MetadataProducer { + factory := &PriorityMetadataFactory{ + serviceLister: serviceLister, + controllerLister: controllerLister, + replicaSetLister: replicaSetLister, + statefulSetLister: statefulSetLister, + } + return factory.PriorityMetadata +} + // priorityMetadata is a type that is passed as metadata for priority functions type priorityMetadata struct { nonZeroRequest *schedulercache.Resource podTolerations []v1.Toleration affinity *v1.Affinity + podSelectors []labels.Selector } // PriorityMetadata is a MetadataProducer. Node info can be nil. -func PriorityMetadata(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} { +func (pmf *PriorityMetadataFactory) PriorityMetadata(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} { // If we cannot compute metadata, just return nil if pod == nil { return nil } tolerationsPreferNoSchedule := getAllTolerationPreferNoSchedule(pod.Spec.Tolerations) + podSelectors := getSelectors(pod, pmf.serviceLister, pmf.controllerLister, pmf.replicaSetLister, pmf.statefulSetLister) return &priorityMetadata{ nonZeroRequest: getNonZeroRequests(pod), podTolerations: tolerationsPreferNoSchedule, affinity: pod.Spec.Affinity, + podSelectors: podSelectors, + } +} + +// getSelectors returns selectors of services, RCs and RSs matching the given pod. +func getSelectors(pod *v1.Pod, sl algorithm.ServiceLister, cl algorithm.ControllerLister, rsl algorithm.ReplicaSetLister, ssl algorithm.StatefulSetLister) []labels.Selector { + var selectors []labels.Selector + if services, err := sl.GetPodServices(pod); err == nil { + for _, service := range services { + selectors = append(selectors, labels.SelectorFromSet(service.Spec.Selector)) + } + } + if rcs, err := cl.GetPodControllers(pod); err == nil { + for _, rc := range rcs { + selectors = append(selectors, labels.SelectorFromSet(rc.Spec.Selector)) + } + } + if rss, err := rsl.GetPodReplicaSets(pod); err == nil { + for _, rs := range rss { + if selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector); err == nil { + selectors = append(selectors, selector) + } + } + } + if sss, err := ssl.GetPodStatefulSets(pod); err == nil { + for _, ss := range sss { + if selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector); err == nil { + selectors = append(selectors, selector) + } + } } + return selectors } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go index 1f62091042ed..0ed713a3b5c1 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go @@ -21,11 +21,9 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - - "github.com/golang/glog" ) // CalculateNodeAffinityPriority prioritizes nodes according to node affinity scheduling preferences @@ -76,28 +74,4 @@ func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *s }, nil } -func CalculateNodeAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error { - var maxCount int - for i := range result { - if result[i].Score > maxCount { - maxCount = result[i].Score - } - } - maxCountFloat := float64(maxCount) - - var fScore float64 - for i := range result { - if maxCount > 0 { - fScore = float64(schedulerapi.MaxPriority) * (float64(result[i].Score) / maxCountFloat) - } else { - fScore = 0 - } - if glog.V(10) { - // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is - // not logged. There is visible performance gain from it. - glog.Infof("%v -> %v: NodeAffinityPriority, Score: (%d)", pod.Name, result[i].Host, int(fScore)) - } - result[i].Score = int(fScore) - } - return nil -} +var CalculateNodeAffinityPriorityReduce = NormalizeReduce(schedulerapi.MaxPriority, false) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go index 985e323f4669..c4311fb3ab5e 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go @@ -20,7 +20,7 @@ import ( "fmt" "k8s.io/api/core/v1" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/reduce.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/reduce.go new file mode 100644 index 000000000000..9ce84fd9f6b3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/reduce.go @@ -0,0 +1,64 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package priorities + +import ( + "k8s.io/api/core/v1" + "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" + schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" + "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" +) + +// NormalizeReduce generates a PriorityReduceFunction that can normalize the result +// scores to [0, maxPriority]. If reverse is set to true, it reverses the scores by +// subtracting it from maxPriority. +func NormalizeReduce(maxPriority int, reverse bool) algorithm.PriorityReduceFunction { + return func( + _ *v1.Pod, + _ interface{}, + _ map[string]*schedulercache.NodeInfo, + result schedulerapi.HostPriorityList) error { + + var maxCount int + for i := range result { + if result[i].Score > maxCount { + maxCount = result[i].Score + } + } + + if maxCount == 0 { + if reverse { + for i := range result { + result[i].Score = maxPriority + } + } + return nil + } + + for i := range result { + score := result[i].Score + + score = maxPriority * score / maxCount + if reverse { + score = maxPriority - score + } + + result[i].Score = score + } + return nil + } +} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/resource_limits.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/resource_limits.go new file mode 100644 index 000000000000..77ae0dca923c --- /dev/null +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/resource_limits.go @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package priorities + +import ( + "fmt" + + "k8s.io/api/core/v1" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" + schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" + "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" + + "github.com/golang/glog" +) + +// ResourceLimitsPriorityMap is a priority function that increases score of input node by 1 if the node satisfies +// input pod's resource limits. In detail, this priority function works as follows: If a node does not publish its +// allocatable resources (cpu and memory both), the node score is not affected. If a pod does not specify +// its cpu and memory limits both, the node score is not affected. If one or both of cpu and memory limits +// of the pod are satisfied, the node is assigned a score of 1. +// Rationale of choosing the lowest score of 1 is that this is mainly selected to break ties between nodes that have +// same scores assigned by one of least and most requested priority functions. +func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { + node := nodeInfo.Node() + if node == nil { + return schedulerapi.HostPriority{}, fmt.Errorf("node not found") + } + + allocatableResources := nodeInfo.AllocatableResource() + + // compute pod limits + podLimits := getResourceLimits(pod) + + cpuScore := computeScore(podLimits.MilliCPU, allocatableResources.MilliCPU) + memScore := computeScore(podLimits.Memory, allocatableResources.Memory) + + score := int(0) + if cpuScore == 1 || memScore == 1 { + score = 1 + } + + if glog.V(10) { + // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is + // not logged. There is visible performance gain from it. + glog.Infof( + "%v -> %v: Resource Limits Priority, allocatable %d millicores %d memory bytes, pod limits %d millicores %d memory bytes, score %d", + pod.Name, node.Name, + allocatableResources.MilliCPU, allocatableResources.Memory, + podLimits.MilliCPU, podLimits.Memory, + score, + ) + } + + return schedulerapi.HostPriority{ + Host: node.Name, + Score: score, + }, nil +} + +// computeScore return 1 if limit value is less than or equal to allocable +// value, otherwise it returns 0. +func computeScore(limit, allocatable int64) int64 { + if limit != 0 && allocatable != 0 && limit <= allocatable { + return 1 + } + return 0 +} + +// getResourceLimits computes resource limits for input pod. +// The reason to create this new function is to be consistent with other +// priority functions because most or perhaps all priority functions work +// with schedulercache.Resource. +// TODO: cache it as part of metadata passed to priority functions. +func getResourceLimits(pod *v1.Pod) *schedulercache.Resource { + result := &schedulercache.Resource{} + for _, container := range pod.Spec.Containers { + result.Add(container.Resources.Limits) + } + + // take max_resource(sum_pod, any_init_container) + for _, container := range pod.Spec.InitContainers { + for rName, rQuantity := range container.Resources.Limits { + switch rName { + case v1.ResourceMemory: + if mem := rQuantity.Value(); mem > result.Memory { + result.Memory = mem + } + case v1.ResourceCPU: + if cpu := rQuantity.MilliValue(); cpu > result.MilliCPU { + result.MilliCPU = cpu + } + // keeping these resources though score computation in other priority functions and in this + // are only computed based on cpu and memory only. + case v1.ResourceEphemeralStorage: + if ephemeralStorage := rQuantity.Value(); ephemeralStorage > result.EphemeralStorage { + result.EphemeralStorage = ephemeralStorage + } + case v1.ResourceNvidiaGPU: + if gpu := rQuantity.Value(); gpu > result.NvidiaGPU { + result.NvidiaGPU = gpu + } + default: + if v1helper.IsScalarResourceName(rName) { + value := rQuantity.Value() + if value > result.ScalarResources[rName] { + result.SetScalar(rName, value) + } + } + } + } + } + + return result +} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go index 8e5eb308ba6e..aa195b0e4f86 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go @@ -17,12 +17,10 @@ limitations under the License. package priorities import ( - "sync" + "fmt" "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/util/workqueue" utilnode "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" @@ -46,146 +44,133 @@ func NewSelectorSpreadPriority( serviceLister algorithm.ServiceLister, controllerLister algorithm.ControllerLister, replicaSetLister algorithm.ReplicaSetLister, - statefulSetLister algorithm.StatefulSetLister) algorithm.PriorityFunction { + statefulSetLister algorithm.StatefulSetLister) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) { selectorSpread := &SelectorSpread{ serviceLister: serviceLister, controllerLister: controllerLister, replicaSetLister: replicaSetLister, statefulSetLister: statefulSetLister, } - return selectorSpread.CalculateSpreadPriority + return selectorSpread.CalculateSpreadPriorityMap, selectorSpread.CalculateSpreadPriorityReduce } -// Returns selectors of services, RCs and RSs matching the given pod. -func getSelectors(pod *v1.Pod, sl algorithm.ServiceLister, cl algorithm.ControllerLister, rsl algorithm.ReplicaSetLister, ssl algorithm.StatefulSetLister) []labels.Selector { +// CalculateSpreadPriorityMap spreads pods across hosts, considering pods +// belonging to the same service,RC,RS or StatefulSet. +// When a pod is scheduled, it looks for services, RCs,RSs and StatefulSets that match the pod, +// then finds existing pods that match those selectors. +// It favors nodes that have fewer existing matching pods. +// i.e. it pushes the scheduler towards a node where there's the smallest number of +// pods which match the same service, RC,RSs or StatefulSets selectors as the pod being scheduled. +func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { var selectors []labels.Selector - if services, err := sl.GetPodServices(pod); err == nil { - for _, service := range services { - selectors = append(selectors, labels.SelectorFromSet(service.Spec.Selector)) - } + node := nodeInfo.Node() + if node == nil { + return schedulerapi.HostPriority{}, fmt.Errorf("node not found") } - if rcs, err := cl.GetPodControllers(pod); err == nil { - for _, rc := range rcs { - selectors = append(selectors, labels.SelectorFromSet(rc.Spec.Selector)) - } + + priorityMeta, ok := meta.(*priorityMetadata) + if ok { + selectors = priorityMeta.podSelectors + } else { + selectors = getSelectors(pod, s.serviceLister, s.controllerLister, s.replicaSetLister, s.statefulSetLister) } - if rss, err := rsl.GetPodReplicaSets(pod); err == nil { - for _, rs := range rss { - if selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector); err == nil { - selectors = append(selectors, selector) - } - } + + if len(selectors) == 0 { + return schedulerapi.HostPriority{ + Host: node.Name, + Score: int(0), + }, nil } - if sss, err := ssl.GetPodStatefulSets(pod); err == nil { - for _, ss := range sss { - if selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector); err == nil { - selectors = append(selectors, selector) + + count := int(0) + for _, nodePod := range nodeInfo.Pods() { + if pod.Namespace != nodePod.Namespace { + continue + } + // When we are replacing a failed pod, we often see the previous + // deleted version while scheduling the replacement. + // Ignore the previous deleted version for spreading purposes + // (it can still be considered for resource restrictions etc.) + if nodePod.DeletionTimestamp != nil { + glog.V(4).Infof("skipping pending-deleted pod: %s/%s", nodePod.Namespace, nodePod.Name) + continue + } + matches := false + for _, selector := range selectors { + if selector.Matches(labels.Set(nodePod.ObjectMeta.Labels)) { + matches = true + break } } + if matches { + count++ + } } - return selectors + return schedulerapi.HostPriority{ + Host: node.Name, + Score: int(count), + }, nil } -func (s *SelectorSpread) getSelectors(pod *v1.Pod) []labels.Selector { - return getSelectors(pod, s.serviceLister, s.controllerLister, s.replicaSetLister, s.statefulSetLister) -} - -// CalculateSpreadPriority spreads pods across hosts and zones, considering pods belonging to the same service or replication controller. -// When a pod is scheduled, it looks for services, RCs or RSs that match the pod, then finds existing pods that match those selectors. -// It favors nodes that have fewer existing matching pods. -// i.e. it pushes the scheduler towards a node where there's the smallest number of -// pods which match the same service, RC or RS selectors as the pod being scheduled. -// Where zone information is included on the nodes, it favors nodes in zones with fewer existing matching pods. -func (s *SelectorSpread) CalculateSpreadPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { - selectors := s.getSelectors(pod) - - // Count similar pods by node - countsByNodeName := make(map[string]float64, len(nodes)) - countsByZone := make(map[string]float64, 10) - maxCountByNodeName := float64(0) - countsByNodeNameLock := sync.Mutex{} - - if len(selectors) > 0 { - processNodeFunc := func(i int) { - nodeName := nodes[i].Name - count := float64(0) - for _, nodePod := range nodeNameToInfo[nodeName].Pods() { - if pod.Namespace != nodePod.Namespace { - continue - } - // When we are replacing a failed pod, we often see the previous - // deleted version while scheduling the replacement. - // Ignore the previous deleted version for spreading purposes - // (it can still be considered for resource restrictions etc.) - if nodePod.DeletionTimestamp != nil { - glog.V(4).Infof("skipping pending-deleted pod: %s/%s", nodePod.Namespace, nodePod.Name) - continue - } - matches := false - for _, selector := range selectors { - if selector.Matches(labels.Set(nodePod.ObjectMeta.Labels)) { - matches = true - break - } - } - if matches { - count++ - } - } - zoneId := utilnode.GetZoneKey(nodes[i]) - - countsByNodeNameLock.Lock() - defer countsByNodeNameLock.Unlock() - countsByNodeName[nodeName] = count - if count > maxCountByNodeName { - maxCountByNodeName = count - } - if zoneId != "" { - countsByZone[zoneId] += count - } +// CalculateSpreadPriorityReduce calculates the source of each node +// based on the number of existing matching pods on the node +// where zone information is included on the nodes, it favors nodes +// in zones with fewer existing matching pods. +func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error { + countsByZone := make(map[string]int, 10) + maxCountByZone := int(0) + maxCountByNodeName := int(0) + + for i := range result { + if result[i].Score > maxCountByNodeName { + maxCountByNodeName = result[i].Score + } + zoneId := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node()) + if zoneId == "" { + continue } - workqueue.Parallelize(16, len(nodes), processNodeFunc) + countsByZone[zoneId] += result[i].Score } - // Aggregate by-zone information - // Compute the maximum number of pods hosted in any zone - haveZones := len(countsByZone) != 0 - maxCountByZone := float64(0) - for _, count := range countsByZone { - if count > maxCountByZone { - maxCountByZone = count + for zoneId := range countsByZone { + if countsByZone[zoneId] > maxCountByZone { + maxCountByZone = countsByZone[zoneId] } } - result := make(schedulerapi.HostPriorityList, 0, len(nodes)) - //score int - scale of 0-maxPriority - // 0 being the lowest priority and maxPriority being the highest - for _, node := range nodes { + haveZones := len(countsByZone) != 0 + + maxCountByNodeNameFloat64 := float64(maxCountByNodeName) + maxCountByZoneFloat64 := float64(maxCountByZone) + MaxPriorityFloat64 := float64(schedulerapi.MaxPriority) + + for i := range result { // initializing to the default/max node score of maxPriority - fScore := float64(schedulerapi.MaxPriority) + fScore := MaxPriorityFloat64 if maxCountByNodeName > 0 { - fScore = float64(schedulerapi.MaxPriority) * ((maxCountByNodeName - countsByNodeName[node.Name]) / maxCountByNodeName) + fScore = MaxPriorityFloat64 * (float64(maxCountByNodeName-result[i].Score) / maxCountByNodeNameFloat64) } - // If there is zone information present, incorporate it if haveZones { - zoneId := utilnode.GetZoneKey(node) + zoneId := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node()) if zoneId != "" { - zoneScore := float64(schedulerapi.MaxPriority) * ((maxCountByZone - countsByZone[zoneId]) / maxCountByZone) + zoneScore := MaxPriorityFloat64 + if maxCountByZone > 0 { + zoneScore = MaxPriorityFloat64 * (float64(maxCountByZone-countsByZone[zoneId]) / maxCountByZoneFloat64) + } fScore = (fScore * (1.0 - zoneWeighting)) + (zoneWeighting * zoneScore) } } - - result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)}) + result[i].Score = int(fScore) if glog.V(10) { // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. glog.V(10).Infof( - "%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, node.Name, int(fScore), + "%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Host, int(fScore), ) } } - return result, nil + return nil } type ServiceAntiAffinity struct { diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/taint_toleration.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/taint_toleration.go index a017531f2f20..9e2905e8edb2 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/taint_toleration.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/taint_toleration.go @@ -20,11 +20,9 @@ import ( "fmt" "k8s.io/api/core/v1" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - - "github.com/golang/glog" ) // CountIntolerableTaintsPreferNoSchedule gives the count of intolerable taints of a pod with effect PreferNoSchedule @@ -75,26 +73,4 @@ func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo * } // ComputeTaintTolerationPriorityReduce calculates the source of each node based on the number of intolerable taints on the node -func ComputeTaintTolerationPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error { - var maxCount int - for i := range result { - if result[i].Score > maxCount { - maxCount = result[i].Score - } - } - maxCountFloat := float64(maxCount) - - for i := range result { - fScore := float64(schedulerapi.MaxPriority) - if maxCountFloat > 0 { - fScore = (1.0 - float64(result[i].Score)/maxCountFloat) * float64(schedulerapi.MaxPriority) - } - if glog.V(10) { - // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is - // not logged. There is visible performance gain from it. - glog.Infof("%v -> %v: Taint Toleration Priority, Score: (%d)", pod.Name, result[i].Host, int(fScore)) - } - result[i].Score = int(fScore) - } - return nil -} +var ComputeTaintTolerationPriorityReduce = NormalizeReduce(schedulerapi.MaxPriority, true) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/test_util.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/test_util.go index 9eb26f2d93c9..312c7619410c 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/test_util.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/test_util.go @@ -41,18 +41,18 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node { } } -func priorityFunction(mapFn algorithm.PriorityMapFunction, reduceFn algorithm.PriorityReduceFunction) algorithm.PriorityFunction { +func priorityFunction(mapFn algorithm.PriorityMapFunction, reduceFn algorithm.PriorityReduceFunction, mataData interface{}) algorithm.PriorityFunction { return func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { result := make(schedulerapi.HostPriorityList, 0, len(nodes)) for i := range nodes { - hostResult, err := mapFn(pod, nil, nodeNameToInfo[nodes[i].Name]) + hostResult, err := mapFn(pod, mataData, nodeNameToInfo[nodes[i].Name]) if err != nil { return nil, err } result = append(result, hostResult) } if reduceFn != nil { - if err := reduceFn(pod, nil, nodeNameToInfo, result); err != nil { + if err := reduceFn(pod, mataData, nodeNameToInfo, result); err != nil { return nil, err } } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util/BUILD index a034d5509606..9638eeab5115 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util/BUILD @@ -8,11 +8,21 @@ load( go_test( name = "go_default_test", - srcs = ["topologies_test.go"], + srcs = [ + "non_zero_test.go", + "topologies_test.go", + "util_test.go", + ], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util", library = ":go_default_library", deps = [ + "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/selection:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", ], ) @@ -23,6 +33,7 @@ go_library( "topologies.go", "util.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/scheduler_interface.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/scheduler_interface.go index 41a6dbb48bb4..5ef4fd6f407c 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/scheduler_interface.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/scheduler_interface.go @@ -49,8 +49,9 @@ type ScheduleAlgorithm interface { Schedule(*v1.Pod, NodeLister) (selectedMachine string, err error) // Preempt receives scheduling errors for a pod and tries to create room for // the pod by preempting lower priority pods if possible. - // It returns the node where preemption happened, a list of preempted pods, and error if any. - Preempt(*v1.Pod, NodeLister, error) (selectedNode *v1.Node, preemptedPods []*v1.Pod, err error) + // It returns the node where preemption happened, a list of preempted pods, a + // list of pods whose nominated node name should be removed, and error if any. + Preempt(*v1.Pod, NodeLister, error) (selectedNode *v1.Node, preemptedPods []*v1.Pod, cleanupNominatedPods []*v1.Pod, err error) // Predicates() returns a pointer to a map of predicate functions. This is // exposed for testing. Predicates() map[string]FitPredicate diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/types.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/types.go index 8932b1f6bd9e..b3e34e02401f 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/types.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/types.go @@ -53,6 +53,7 @@ type MetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercach type PriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) type PriorityConfig struct { + Name string Map PriorityMapFunction Reduce PriorityReduceFunction // TODO: Remove it after migrating all functions to diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/well_known_labels.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/well_known_labels.go index 209d72797439..bffe40a08858 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/well_known_labels.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/well_known_labels.go @@ -20,13 +20,21 @@ const ( // When feature-gate for TaintBasedEvictions=true flag is enabled, // TaintNodeNotReady would be automatically added by node controller // when node is not ready, and removed when node becomes ready. - TaintNodeNotReady = "node.alpha.kubernetes.io/notReady" + TaintNodeNotReady = "node.kubernetes.io/not-ready" + + // DeprecatedTaintNodeNotReady is the deprecated version of TaintNodeNotReady. + // It is deprecated since 1.9 + DeprecatedTaintNodeNotReady = "node.alpha.kubernetes.io/notReady" // When feature-gate for TaintBasedEvictions=true flag is enabled, // TaintNodeUnreachable would be automatically added by node controller // when node becomes unreachable (corresponding to NodeReady status ConditionUnknown) // and removed when node becomes reachable (NodeReady status ConditionTrue). - TaintNodeUnreachable = "node.alpha.kubernetes.io/unreachable" + TaintNodeUnreachable = "node.kubernetes.io/unreachable" + + // DeprecatedTaintNodeUnreachable is the deprecated version of TaintNodeUnreachable. + // It is deprecated since 1.9 + DeprecatedTaintNodeUnreachable = "node.alpha.kubernetes.io/unreachable" // When feature-gate for TaintBasedEvictions=true flag is enabled, // TaintNodeOutOfDisk would be automatically added by node controller diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/BUILD index 883faf0bf0d8..904796b74aa9 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/BUILD @@ -9,14 +9,19 @@ load( go_library( name = "go_default_library", srcs = ["plugins.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider", deps = ["//plugin/pkg/scheduler/algorithmprovider/defaults:go_default_library"], ) go_test( name = "go_default_test", srcs = ["plugins_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider", library = ":go_default_library", - deps = ["//plugin/pkg/scheduler/factory:go_default_library"], + deps = [ + "//plugin/pkg/scheduler/factory:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + ], ) filegroup( diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/BUILD index f2357cd6c85a..e72930b90f99 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/BUILD @@ -9,8 +9,8 @@ load( go_library( name = "go_default_library", srcs = ["defaults.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults", deps = [ - "//pkg/cloudprovider/providers/aws:go_default_library", "//pkg/features:go_default_library", "//plugin/pkg/scheduler/algorithm:go_default_library", "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", @@ -29,17 +29,18 @@ go_test( "compatibility_test.go", "defaults_test.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core/install:go_default_library", + "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", "//plugin/pkg/scheduler/api:go_default_library", "//plugin/pkg/scheduler/api/latest:go_default_library", "//plugin/pkg/scheduler/factory:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go index 65998f854a2c..9860efdd710b 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go @@ -17,12 +17,9 @@ limitations under the License. package defaults import ( - "os" - "strconv" - "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" @@ -34,19 +31,11 @@ import ( ) const ( - // DefaultMaxGCEPDVolumes defines the maximum number of PD Volumes for GCE - // GCE instances can have up to 16 PD volumes attached. - DefaultMaxGCEPDVolumes = 16 - // DefaultMaxAzureDiskVolumes defines the maximum number of PD Volumes for Azure - // Larger Azure VMs can actually have much more disks attached. - // TODO We should determine the max based on VM size - DefaultMaxAzureDiskVolumes = 16 + // ClusterAutoscalerProvider defines the default autoscaler provider ClusterAutoscalerProvider = "ClusterAutoscalerProvider" // StatefulSetKind defines the name of 'StatefulSet' kind StatefulSetKind = "StatefulSet" - // KubeMaxPDVols defines the maximum number of PD Volumes per kubelet - KubeMaxPDVols = "KUBE_MAX_PD_VOLS" ) func init() { @@ -57,15 +46,10 @@ func init() { }) factory.RegisterPriorityMetadataProducerFactory( func(args factory.PluginFactoryArgs) algorithm.MetadataProducer { - return priorities.PriorityMetadata + return priorities.NewPriorityMetadataFactory(args.ServiceLister, args.ControllerLister, args.ReplicaSetLister, args.StatefulSetLister) }) - // Registers algorithm providers. By default we use 'DefaultProvider', but user can specify one to be used - // by specifying flag. - factory.RegisterAlgorithmProvider(factory.DefaultProvider, defaultPredicates(), defaultPriorities()) - // Cluster autoscaler friendly scheduling algorithm. - factory.RegisterAlgorithmProvider(ClusterAutoscalerProvider, defaultPredicates(), - copyAndReplace(defaultPriorities(), "LeastRequestedPriority", "MostRequestedPriority")) + registerAlgorithmProvider(defaultPredicates(), defaultPriorities()) // IMPORTANT NOTES for predicate developers: // We are using cached predicate result for pods belonging to the same equivalence class. @@ -96,8 +80,12 @@ func init() { // Fit is determined by node selector query. factory.RegisterFitPredicate("MatchNodeSelector", predicates.PodMatchNodeSelector) - // Use equivalence class to speed up predicates & priorities - factory.RegisterGetEquivalencePodFunction(predicates.GetEquivalencePod) + // Use equivalence class to speed up heavy predicates phase. + factory.RegisterGetEquivalencePodFunction( + func(args factory.PluginFactoryArgs) algorithm.GetEquivalencePodFunc { + return predicates.NewEquivalencePodGenerator(args.PVCInfo) + }, + ) // ServiceSpreadingPriority is a priority config factory that spreads pods by minimizing // the number of pods (belonging to the same service) on the same node. @@ -106,13 +94,12 @@ func init() { factory.RegisterPriorityConfigFactory( "ServiceSpreadingPriority", factory.PriorityConfigFactory{ - Function: func(args factory.PluginFactoryArgs) algorithm.PriorityFunction { + MapReduceFunction: func(args factory.PluginFactoryArgs) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) { return priorities.NewSelectorSpreadPriority(args.ServiceLister, algorithm.EmptyControllerLister{}, algorithm.EmptyReplicaSetLister{}, algorithm.EmptyStatefulSetLister{}) }, Weight: 1, }, ) - // EqualPriority is a prioritizer function that gives an equal weight of one to all nodes // Register the priority function so that its available // but do not include it as part of the default priorities @@ -123,42 +110,40 @@ func init() { factory.RegisterPriorityFunction2("ImageLocalityPriority", priorities.ImageLocalityPriorityMap, nil, 1) // Optional, cluster-autoscaler friendly priority function - give used nodes higher priority. factory.RegisterPriorityFunction2("MostRequestedPriority", priorities.MostRequestedPriorityMap, nil, 1) + // Prioritizes nodes that satisfy pod's resource limits + if utilfeature.DefaultFeatureGate.Enabled(features.ResourceLimitsPriorityFunction) { + factory.RegisterPriorityFunction2("ResourceLimitsPriority", priorities.ResourceLimitsPriorityMap, nil, 1) + } } func defaultPredicates() sets.String { - predSet := sets.NewString( + return sets.NewString( // Fit is determined by volume zone requirements. factory.RegisterFitPredicateFactory( "NoVolumeZoneConflict", func(args factory.PluginFactoryArgs) algorithm.FitPredicate { - return predicates.NewVolumeZonePredicate(args.PVInfo, args.PVCInfo) + return predicates.NewVolumeZonePredicate(args.PVInfo, args.PVCInfo, args.StorageClassInfo) }, ), // Fit is determined by whether or not there would be too many AWS EBS volumes attached to the node factory.RegisterFitPredicateFactory( "MaxEBSVolumeCount", func(args factory.PluginFactoryArgs) algorithm.FitPredicate { - // TODO: allow for generically parameterized scheduler predicates, because this is a bit ugly - maxVols := getMaxVols(aws.DefaultMaxEBSVolumes) - return predicates.NewMaxPDVolumeCountPredicate(predicates.EBSVolumeFilter, maxVols, args.PVInfo, args.PVCInfo) + return predicates.NewMaxPDVolumeCountPredicate(predicates.EBSVolumeFilterType, args.PVInfo, args.PVCInfo) }, ), // Fit is determined by whether or not there would be too many GCE PD volumes attached to the node factory.RegisterFitPredicateFactory( "MaxGCEPDVolumeCount", func(args factory.PluginFactoryArgs) algorithm.FitPredicate { - // TODO: allow for generically parameterized scheduler predicates, because this is a bit ugly - maxVols := getMaxVols(DefaultMaxGCEPDVolumes) - return predicates.NewMaxPDVolumeCountPredicate(predicates.GCEPDVolumeFilter, maxVols, args.PVInfo, args.PVCInfo) + return predicates.NewMaxPDVolumeCountPredicate(predicates.GCEPDVolumeFilterType, args.PVInfo, args.PVCInfo) }, ), // Fit is determined by whether or not there would be too many Azure Disk volumes attached to the node factory.RegisterFitPredicateFactory( "MaxAzureDiskVolumeCount", func(args factory.PluginFactoryArgs) algorithm.FitPredicate { - // TODO: allow for generically parameterized scheduler predicates, because this is a bit ugly - maxVols := getMaxVols(DefaultMaxAzureDiskVolumes) - return predicates.NewMaxPDVolumeCountPredicate(predicates.AzureDiskVolumeFilter, maxVols, args.PVInfo, args.PVCInfo) + return predicates.NewMaxPDVolumeCountPredicate(predicates.AzureDiskVolumeFilterType, args.PVInfo, args.PVCInfo) }, ), // Fit is determined by inter-pod affinity. @@ -182,27 +167,51 @@ func defaultPredicates() sets.String { // Fit is determined by node disk pressure condition. factory.RegisterFitPredicate("CheckNodeDiskPressure", predicates.CheckNodeDiskPressurePredicate), - // Fit is determined by volume zone requirements. + // Fit is determied by node condtions: not ready, network unavailable and out of disk. + factory.RegisterMandatoryFitPredicate("CheckNodeCondition", predicates.CheckNodeConditionPredicate), + + // Fit is determined based on whether a pod can tolerate all of the node's taints + factory.RegisterFitPredicate("PodToleratesNodeTaints", predicates.PodToleratesNodeTaints), + + // Fit is determined by volume topology requirements. factory.RegisterFitPredicateFactory( - "NoVolumeNodeConflict", + predicates.CheckVolumeBinding, func(args factory.PluginFactoryArgs) algorithm.FitPredicate { - return predicates.NewVolumeNodePredicate(args.PVInfo, args.PVCInfo, nil) + return predicates.NewVolumeBindingPredicate(args.VolumeBinder) }, ), ) +} + +// ApplyFeatureGates applies algorithm by feature gates. +func ApplyFeatureGates() { if utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition) { + // Remove "CheckNodeCondition" predicate + factory.RemoveFitPredicate("CheckNodeCondition") + // Remove Key "CheckNodeCondition" From All Algorithm Provider + // The key will be removed from all providers which in algorithmProviderMap[] + // if you just want remove specific provider, call func RemovePredicateKeyFromAlgoProvider() + factory.RemovePredicateKeyFromAlgorithmProviderMap("CheckNodeCondition") + // Fit is determined based on whether a pod can tolerate all of the node's taints - predSet.Insert(factory.RegisterMandatoryFitPredicate("PodToleratesNodeTaints", predicates.PodToleratesNodeTaints)) + factory.RegisterMandatoryFitPredicate("PodToleratesNodeTaints", predicates.PodToleratesNodeTaints) + // Insert Key "PodToleratesNodeTaints" To All Algorithm Provider + // The key will insert to all providers which in algorithmProviderMap[] + // if you just want insert to specific provider, call func InsertPredicateKeyToAlgoProvider() + factory.InsertPredicateKeyToAlgorithmProviderMap("PodToleratesNodeTaints") + glog.Warningf("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory") - } else { - // Fit is determied by node condtions: not ready, network unavailable and out of disk. - predSet.Insert(factory.RegisterMandatoryFitPredicate("CheckNodeCondition", predicates.CheckNodeConditionPredicate)) - // Fit is determined based on whether a pod can tolerate all of the node's taints - predSet.Insert(factory.RegisterFitPredicate("PodToleratesNodeTaints", predicates.PodToleratesNodeTaints)) } +} - return predSet +func registerAlgorithmProvider(predSet, priSet sets.String) { + // Registers algorithm providers. By default we use 'DefaultProvider', but user can specify one to be used + // by specifying flag. + factory.RegisterAlgorithmProvider(factory.DefaultProvider, predSet, priSet) + // Cluster autoscaler friendly scheduling algorithm. + factory.RegisterAlgorithmProvider(ClusterAutoscalerProvider, predSet, + copyAndReplace(priSet, "LeastRequestedPriority", "MostRequestedPriority")) } func defaultPriorities() sets.String { @@ -211,7 +220,7 @@ func defaultPriorities() sets.String { factory.RegisterPriorityConfigFactory( "SelectorSpreadPriority", factory.PriorityConfigFactory{ - Function: func(args factory.PluginFactoryArgs) algorithm.PriorityFunction { + MapReduceFunction: func(args factory.PluginFactoryArgs) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) { return priorities.NewSelectorSpreadPriority(args.ServiceLister, args.ControllerLister, args.ReplicaSetLister, args.StatefulSetLister) }, Weight: 1, @@ -247,21 +256,6 @@ func defaultPriorities() sets.String { ) } -// getMaxVols checks the max PD volumes environment variable, otherwise returning a default value -func getMaxVols(defaultVal int) int { - if rawMaxVols := os.Getenv(KubeMaxPDVols); rawMaxVols != "" { - if parsedMaxVols, err := strconv.Atoi(rawMaxVols); err != nil { - glog.Errorf("Unable to parse maximum PD volumes value, using default of %v: %v", defaultVal, err) - } else if parsedMaxVols <= 0 { - glog.Errorf("Maximum PD volumes must be a positive value, using default of %v", defaultVal) - } else { - return parsedMaxVols - } - } - - return defaultVal -} - func copyAndReplace(set sets.String, replaceWhat, replaceWith string) sets.String { result := sets.NewString(set.List()...) if result.Has(replaceWhat) { diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/plugins.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/plugins.go index 2aace84d0460..f357a12d5a9b 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/plugins.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/plugins.go @@ -17,6 +17,10 @@ limitations under the License. package algorithmprovider import ( - // Import defaults of algorithmprovider for initialization. - _ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults" + "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults" ) + +// ApplyFeatureGates applies algorithm by feature gates. +func ApplyFeatureGates() { + defaults.ApplyFeatureGates() +} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/BUILD index 4475d9e7bb3d..593cff9295a5 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/BUILD @@ -13,10 +13,10 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/api", deps = [ "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/doc.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/doc.go index 37d30d69eef4..a326c9973378 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/doc.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // Package api contains scheduler plugin API objects. package api diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/latest/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/latest/BUILD index e2d6f0d1bbea..ee0f0ffa1c3f 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/latest/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/latest/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["latest.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/api/latest", deps = [ "//plugin/pkg/scheduler/api:go_default_library", "//plugin/pkg/scheduler/api/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/latest/latest.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/latest/latest.go index cef40b6acb1b..f225847737a5 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/latest/latest.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/latest/latest.go @@ -21,7 +21,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/json" "k8s.io/apimachinery/pkg/runtime/serializer/versioning" - "k8s.io/kubernetes/plugin/pkg/scheduler/api" + schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" _ "k8s.io/kubernetes/plugin/pkg/scheduler/api/v1" ) @@ -42,9 +42,9 @@ var Versions = []string{"v1"} var Codec runtime.Codec func init() { - jsonSerializer := json.NewSerializer(json.DefaultMetaFactory, api.Scheme, api.Scheme, true) + jsonSerializer := json.NewSerializer(json.DefaultMetaFactory, schedulerapi.Scheme, schedulerapi.Scheme, true) Codec = versioning.NewDefaultingCodecForScheme( - api.Scheme, + schedulerapi.Scheme, jsonSerializer, jsonSerializer, schema.GroupVersion{Version: Version}, diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/types.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/types.go index 0a8baf878e96..080fc386db5e 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/types.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/types.go @@ -46,7 +46,7 @@ type Policy struct { // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule // corresponding to every RequiredDuringScheduling affinity rule. // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 1-100. - HardPodAffinitySymmetricWeight int + HardPodAffinitySymmetricWeight int32 } type PredicatePolicy struct { @@ -70,7 +70,7 @@ type PriorityPolicy struct { Argument *PriorityArgument } -// Represents the arguments that the different types of predicates take +// PredicateArgument represents the arguments to configure predicate functions in scheduler policy configuration. // Only one of its members may be specified type PredicateArgument struct { // The predicate that provides affinity for pods belonging to a service @@ -81,7 +81,7 @@ type PredicateArgument struct { LabelsPresence *LabelsPresence } -// Represents the arguments that the different types of priorities take. +// PriorityArgument represents the arguments to configure priority functions in scheduler policy configuration. // Only one of its members may be specified type PriorityArgument struct { // The priority function that ensures a good spread (anti-affinity) for pods belonging to a service @@ -92,14 +92,14 @@ type PriorityArgument struct { LabelPreference *LabelPreference } -// Holds the parameters that are used to configure the corresponding predicate +// ServiceAffinity holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration. type ServiceAffinity struct { // The list of labels that identify node "groups" // All of the labels should match for the node to be considered a fit for hosting the pod Labels []string } -// Holds the parameters that are used to configure the corresponding predicate +// LabelsPresence holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration. type LabelsPresence struct { // The list of labels that identify node "groups" // All of the labels should be either present (or absent) for the node to be considered a fit for hosting the pod @@ -108,13 +108,13 @@ type LabelsPresence struct { Presence bool } -// Holds the parameters that are used to configure the corresponding priority function +// ServiceAntiAffinity holds the parameters that are used to configure the corresponding priority function type ServiceAntiAffinity struct { // Used to identify node "groups" Label string } -// Holds the parameters that are used to configure the corresponding priority function +// LabelPreference holds the parameters that are used to configure the corresponding priority function type LabelPreference struct { // Used to identify node "groups" Label string @@ -124,7 +124,7 @@ type LabelPreference struct { Presence bool } -// Holds the parameters used to communicate with the extender. If a verb is unspecified/empty, +// ExtenderConfig holds the parameters used to communicate with the extender. If a verb is unspecified/empty, // it is assumed that the extender chose not to provide that extension. type ExtenderConfig struct { // URLPrefix at which the extender is available diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/BUILD index 0a160932f6d8..2516dc06b128 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/BUILD @@ -13,11 +13,11 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/api/v1", deps = [ "//plugin/pkg/scheduler/api:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/doc.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/doc.go index 950030d49cf7..2a53ab7c17b8 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/doc.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // Package v1 contains scheduler plugin API objects. package v1 diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/register.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/register.go index 406c2e03f57e..292245a0a610 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/register.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/register.go @@ -19,7 +19,7 @@ package v1 import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/plugin/pkg/scheduler/api" + schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" ) // SchemeGroupVersion is group version used to register these objects @@ -27,7 +27,7 @@ import ( var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} func init() { - if err := addKnownTypes(api.Scheme); err != nil { + if err := addKnownTypes(schedulerapi.Scheme); err != nil { // Programmer error. panic(err) } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/types.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/types.go index 4f1b2369d225..3f6684a5f3cf 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/types.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/types.go @@ -62,7 +62,7 @@ type PriorityPolicy struct { Argument *PriorityArgument `json:"argument"` } -// Represents the arguments that the different types of predicates take +// PredicateArgument represents the arguments to configure predicate functions in scheduler policy configuration. // Only one of its members may be specified type PredicateArgument struct { // The predicate that provides affinity for pods belonging to a service @@ -73,7 +73,7 @@ type PredicateArgument struct { LabelsPresence *LabelsPresence `json:"labelsPresence"` } -// Represents the arguments that the different types of priorities take. +// PriorityArgument represents the arguments to configure priority functions in scheduler policy configuration. // Only one of its members may be specified type PriorityArgument struct { // The priority function that ensures a good spread (anti-affinity) for pods belonging to a service @@ -84,14 +84,14 @@ type PriorityArgument struct { LabelPreference *LabelPreference `json:"labelPreference"` } -// Holds the parameters that are used to configure the corresponding predicate +// ServiceAffinity holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration. type ServiceAffinity struct { // The list of labels that identify node "groups" // All of the labels should match for the node to be considered a fit for hosting the pod Labels []string `json:"labels"` } -// Holds the parameters that are used to configure the corresponding predicate +// LabelsPresence holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration. type LabelsPresence struct { // The list of labels that identify node "groups" // All of the labels should be either present (or absent) for the node to be considered a fit for hosting the pod @@ -100,13 +100,13 @@ type LabelsPresence struct { Presence bool `json:"presence"` } -// Holds the parameters that are used to configure the corresponding priority function +// ServiceAntiAffinity holds the parameters that are used to configure the corresponding priority function type ServiceAntiAffinity struct { // Used to identify node "groups" Label string `json:"label"` } -// Holds the parameters that are used to configure the corresponding priority function +// LabelPreference holds the parameters that are used to configure the corresponding priority function type LabelPreference struct { // Used to identify node "groups" Label string `json:"label"` @@ -116,7 +116,7 @@ type LabelPreference struct { Presence bool `json:"presence"` } -// Holds the parameters used to communicate with the extender. If a verb is unspecified/empty, +// ExtenderConfig holds the parameters used to communicate with the extender. If a verb is unspecified/empty, // it is assumed that the extender chose not to provide that extension. type ExtenderConfig struct { // URLPrefix at which the extender is available diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/zz_generated.deepcopy.go index 577a3c7f3e4e..0e23a656d0c0 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/v1/zz_generated.deepcopy.go @@ -22,85 +22,10 @@ package v1 import ( core_v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" rest "k8s.io/client-go/rest" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExtenderArgs).DeepCopyInto(out.(*ExtenderArgs)) - return nil - }, InType: reflect.TypeOf(&ExtenderArgs{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExtenderBindingArgs).DeepCopyInto(out.(*ExtenderBindingArgs)) - return nil - }, InType: reflect.TypeOf(&ExtenderBindingArgs{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExtenderBindingResult).DeepCopyInto(out.(*ExtenderBindingResult)) - return nil - }, InType: reflect.TypeOf(&ExtenderBindingResult{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExtenderConfig).DeepCopyInto(out.(*ExtenderConfig)) - return nil - }, InType: reflect.TypeOf(&ExtenderConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExtenderFilterResult).DeepCopyInto(out.(*ExtenderFilterResult)) - return nil - }, InType: reflect.TypeOf(&ExtenderFilterResult{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostPriority).DeepCopyInto(out.(*HostPriority)) - return nil - }, InType: reflect.TypeOf(&HostPriority{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LabelPreference).DeepCopyInto(out.(*LabelPreference)) - return nil - }, InType: reflect.TypeOf(&LabelPreference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LabelsPresence).DeepCopyInto(out.(*LabelsPresence)) - return nil - }, InType: reflect.TypeOf(&LabelsPresence{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Policy).DeepCopyInto(out.(*Policy)) - return nil - }, InType: reflect.TypeOf(&Policy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PredicateArgument).DeepCopyInto(out.(*PredicateArgument)) - return nil - }, InType: reflect.TypeOf(&PredicateArgument{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PredicatePolicy).DeepCopyInto(out.(*PredicatePolicy)) - return nil - }, InType: reflect.TypeOf(&PredicatePolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PriorityArgument).DeepCopyInto(out.(*PriorityArgument)) - return nil - }, InType: reflect.TypeOf(&PriorityArgument{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PriorityPolicy).DeepCopyInto(out.(*PriorityPolicy)) - return nil - }, InType: reflect.TypeOf(&PriorityPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAffinity).DeepCopyInto(out.(*ServiceAffinity)) - return nil - }, InType: reflect.TypeOf(&ServiceAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAntiAffinity).DeepCopyInto(out.(*ServiceAntiAffinity)) - return nil - }, InType: reflect.TypeOf(&ServiceAntiAffinity{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExtenderArgs) DeepCopyInto(out *ExtenderArgs) { *out = *in diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/validation/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/validation/BUILD index 3639204e7853..2120ba3dc092 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/validation/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/validation/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/api/validation", deps = [ "//plugin/pkg/scheduler/api:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", @@ -18,6 +19,7 @@ go_library( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/api/validation", library = ":go_default_library", deps = ["//plugin/pkg/scheduler/api:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/validation/validation.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/validation/validation.go index 620a0343ed70..cec33b1955b8 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/validation/validation.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/validation/validation.go @@ -36,7 +36,7 @@ func ValidatePolicy(policy schedulerapi.Policy) error { binders := 0 for _, extender := range policy.ExtenderConfigs { - if extender.Weight <= 0 { + if len(extender.PrioritizeVerb) > 0 && extender.Weight <= 0 { validationErrors = append(validationErrors, fmt.Errorf("Priority for extender %s should have a positive weight applied to it", extender.URLPrefix)) } if extender.BindVerb != "" { diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/zz_generated.deepcopy.go index b5288e26e5bc..df5e3e972e6a 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/zz_generated.deepcopy.go @@ -22,85 +22,10 @@ package api import ( v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" rest "k8s.io/client-go/rest" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExtenderArgs).DeepCopyInto(out.(*ExtenderArgs)) - return nil - }, InType: reflect.TypeOf(&ExtenderArgs{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExtenderBindingArgs).DeepCopyInto(out.(*ExtenderBindingArgs)) - return nil - }, InType: reflect.TypeOf(&ExtenderBindingArgs{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExtenderBindingResult).DeepCopyInto(out.(*ExtenderBindingResult)) - return nil - }, InType: reflect.TypeOf(&ExtenderBindingResult{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExtenderConfig).DeepCopyInto(out.(*ExtenderConfig)) - return nil - }, InType: reflect.TypeOf(&ExtenderConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExtenderFilterResult).DeepCopyInto(out.(*ExtenderFilterResult)) - return nil - }, InType: reflect.TypeOf(&ExtenderFilterResult{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostPriority).DeepCopyInto(out.(*HostPriority)) - return nil - }, InType: reflect.TypeOf(&HostPriority{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LabelPreference).DeepCopyInto(out.(*LabelPreference)) - return nil - }, InType: reflect.TypeOf(&LabelPreference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LabelsPresence).DeepCopyInto(out.(*LabelsPresence)) - return nil - }, InType: reflect.TypeOf(&LabelsPresence{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Policy).DeepCopyInto(out.(*Policy)) - return nil - }, InType: reflect.TypeOf(&Policy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PredicateArgument).DeepCopyInto(out.(*PredicateArgument)) - return nil - }, InType: reflect.TypeOf(&PredicateArgument{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PredicatePolicy).DeepCopyInto(out.(*PredicatePolicy)) - return nil - }, InType: reflect.TypeOf(&PredicatePolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PriorityArgument).DeepCopyInto(out.(*PriorityArgument)) - return nil - }, InType: reflect.TypeOf(&PriorityArgument{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PriorityPolicy).DeepCopyInto(out.(*PriorityPolicy)) - return nil - }, InType: reflect.TypeOf(&PriorityPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAffinity).DeepCopyInto(out.(*ServiceAffinity)) - return nil - }, InType: reflect.TypeOf(&ServiceAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAntiAffinity).DeepCopyInto(out.(*ServiceAntiAffinity)) - return nil - }, InType: reflect.TypeOf(&ServiceAntiAffinity{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExtenderArgs) DeepCopyInto(out *ExtenderArgs) { *out = *in diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/core/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/core/BUILD index f7c3d6b9c62c..aa14221fe524 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/core/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/core/BUILD @@ -12,7 +12,9 @@ go_test( "equivalence_cache_test.go", "extender_test.go", "generic_scheduler_test.go", + "scheduling_queue_test.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/core", library = ":go_default_library", deps = [ "//plugin/pkg/scheduler/algorithm:go_default_library", @@ -22,6 +24,7 @@ go_test( "//plugin/pkg/scheduler/api:go_default_library", "//plugin/pkg/scheduler/schedulercache:go_default_library", "//plugin/pkg/scheduler/testing:go_default_library", + "//plugin/pkg/scheduler/util:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", @@ -38,22 +41,31 @@ go_library( "equivalence_cache.go", "extender.go", "generic_scheduler.go", + "scheduling_queue.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/core", deps = [ + "//pkg/api/v1/pod:go_default_library", "//pkg/util/hash:go_default_library", "//plugin/pkg/scheduler/algorithm:go_default_library", "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", + "//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", "//plugin/pkg/scheduler/api:go_default_library", "//plugin/pkg/scheduler/schedulercache:go_default_library", "//plugin/pkg/scheduler/util:go_default_library", + "//plugin/pkg/scheduler/volumebinder:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/groupcache/lru:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/policy/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/trace:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/util/workqueue:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/core/equivalence_cache.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/core/equivalence_cache.go index 9977fe18d30b..ca27f40d57ae 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/core/equivalence_cache.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/core/equivalence_cache.go @@ -173,7 +173,7 @@ func (ec *EquivalenceCache) InvalidateAllCachedPredicateItemOfNode(nodeName stri // InvalidateCachedPredicateItemForPodAdd is a wrapper of InvalidateCachedPredicateItem for pod add case func (ec *EquivalenceCache) InvalidateCachedPredicateItemForPodAdd(pod *v1.Pod, nodeName string) { - // MatchInterPodAffinity: we assume scheduler can make sure newly binded pod + // MatchInterPodAffinity: we assume scheduler can make sure newly bound pod // will not break the existing inter pod affinity. So we does not need to invalidate // MatchInterPodAffinity when pod added. // @@ -188,12 +188,29 @@ func (ec *EquivalenceCache) InvalidateCachedPredicateItemForPodAdd(pod *v1.Pod, // GeneralPredicates: will always be affected by adding a new pod invalidPredicates := sets.NewString("GeneralPredicates") + + // MaxPDVolumeCountPredicate: we check the volumes of pod to make decision. + for _, vol := range pod.Spec.Volumes { + if vol.PersistentVolumeClaim != nil { + invalidPredicates.Insert("MaxEBSVolumeCount", "MaxGCEPDVolumeCount", "MaxAzureDiskVolumeCount") + } else { + if vol.AWSElasticBlockStore != nil { + invalidPredicates.Insert("MaxEBSVolumeCount") + } + if vol.GCEPersistentDisk != nil { + invalidPredicates.Insert("MaxGCEPDVolumeCount") + } + if vol.AzureDisk != nil { + invalidPredicates.Insert("MaxAzureDiskVolumeCount") + } + } + } ec.InvalidateCachedPredicateItem(nodeName, invalidPredicates) } // getHashEquivalencePod returns the hash of equivalence pod. // 1. equivalenceHash -// 2. if equivalence pod is found +// 2. if equivalence hash is valid func (ec *EquivalenceCache) getHashEquivalencePod(pod *v1.Pod) (uint64, bool) { equivalencePod := ec.getEquivalencePod(pod) if equivalencePod != nil { diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/core/generic_scheduler.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/core/generic_scheduler.go index abfd746c91bd..e14d220be3f5 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/core/generic_scheduler.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/core/generic_scheduler.go @@ -26,6 +26,9 @@ import ( "time" "k8s.io/api/core/v1" + policy "k8s.io/api/policy/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/errors" utiltrace "k8s.io/apiserver/pkg/util/trace" "k8s.io/client-go/util/workqueue" @@ -36,19 +39,26 @@ import ( "k8s.io/kubernetes/plugin/pkg/scheduler/util" "github.com/golang/glog" + "k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder" ) type FailedPredicateMap map[string][]algorithm.PredicateFailureReason type FitError struct { Pod *v1.Pod + NumAllNodes int FailedPredicates FailedPredicateMap } +type Victims struct { + pods []*v1.Pod + numPDBViolations int +} + var ErrNoNodesAvailable = fmt.Errorf("no nodes available to schedule pods") const ( - NoNodeAvailableMsg = "No nodes are available that match all of the predicates" + NoNodeAvailableMsg = "0/%v nodes are available" // NominatedNodeAnnotationKey is used to annotate a pod that has preempted other pods. // The scheduler uses the annotation to find that the pod shouldn't preempt more pods // when it gets to the head of scheduling queue again. @@ -68,28 +78,29 @@ func (f *FitError) Error() string { sortReasonsHistogram := func() []string { reasonStrings := []string{} for k, v := range reasons { - reasonStrings = append(reasonStrings, fmt.Sprintf("%v (%v)", k, v)) + reasonStrings = append(reasonStrings, fmt.Sprintf("%v %v", v, k)) } sort.Strings(reasonStrings) return reasonStrings } - reasonMsg := fmt.Sprintf(NoNodeAvailableMsg+": %v.", strings.Join(sortReasonsHistogram(), ", ")) + reasonMsg := fmt.Sprintf(NoNodeAvailableMsg+": %v.", f.NumAllNodes, strings.Join(sortReasonsHistogram(), ", ")) return reasonMsg } type genericScheduler struct { cache schedulercache.Cache equivalenceCache *EquivalenceCache + schedulingQueue SchedulingQueue predicates map[string]algorithm.FitPredicate priorityMetaProducer algorithm.MetadataProducer predicateMetaProducer algorithm.PredicateMetadataProducer prioritizers []algorithm.PriorityConfig extenders []algorithm.SchedulerExtender - pods algorithm.PodLister lastNodeIndexLock sync.Mutex lastNodeIndex uint64 cachedNodeInfoMap map[string]*schedulercache.NodeInfo + volumeBinder *volumebinder.VolumeBinder } // Schedule tries to schedule the given pod to one of node in the node list. @@ -114,7 +125,7 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister } trace.Step("Computing predicates") - filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, g.cachedNodeInfoMap, nodes, g.predicates, g.extenders, g.predicateMetaProducer, g.equivalenceCache) + filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, g.cachedNodeInfoMap, nodes, g.predicates, g.extenders, g.predicateMetaProducer, g.equivalenceCache, g.schedulingQueue) if err != nil { return "", err } @@ -122,11 +133,18 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister if len(filteredNodes) == 0 { return "", &FitError{ Pod: pod, + NumAllNodes: len(nodes), FailedPredicates: failedPredicateMap, } } trace.Step("Prioritizing") + + // When only one node after predicate, just use it. + if len(filteredNodes) == 1 { + return filteredNodes[0].Name, nil + } + metaPrioritiesInterface := g.priorityMetaProducer(pod, g.cachedNodeInfoMap) priorityList, err := PrioritizeNodes(pod, g.cachedNodeInfoMap, metaPrioritiesInterface, g.prioritizers, filteredNodes, g.extenders) if err != nil { @@ -170,61 +188,89 @@ func (g *genericScheduler) selectHost(priorityList schedulerapi.HostPriorityList // preempt finds nodes with pods that can be preempted to make room for "pod" to // schedule. It chooses one of the nodes and preempts the pods on the node and -// returns the node and the list of preempted pods if such a node is found. -// TODO(bsalamat): Add priority-based scheduling. More info: today one or more -// pending pods (different from the pod that triggered the preemption(s)) may -// schedule into some portion of the resources freed up by the preemption(s) -// before the pod that triggered the preemption(s) has a chance to schedule -// there, thereby preventing the pod that triggered the preemption(s) from -// scheduling. Solution is given at: -// https://github.com/kubernetes/community/blob/master/contributors/design-proposals/pod-preemption.md#preemption-mechanics -func (g *genericScheduler) Preempt(pod *v1.Pod, nodeLister algorithm.NodeLister, scheduleErr error) (*v1.Node, []*v1.Pod, error) { +// returns 1) the node, 2) the list of preempted pods if such a node is found, +// 3) A list of pods whose nominated node name should be cleared, and 4) any +// possible error. +func (g *genericScheduler) Preempt(pod *v1.Pod, nodeLister algorithm.NodeLister, scheduleErr error) (*v1.Node, []*v1.Pod, []*v1.Pod, error) { // Scheduler may return various types of errors. Consider preemption only if // the error is of type FitError. fitError, ok := scheduleErr.(*FitError) if !ok || fitError == nil { - return nil, nil, nil + return nil, nil, nil, nil } err := g.cache.UpdateNodeNameToInfoMap(g.cachedNodeInfoMap) if err != nil { - return nil, nil, err + return nil, nil, nil, err } if !podEligibleToPreemptOthers(pod, g.cachedNodeInfoMap) { glog.V(5).Infof("Pod %v is not eligible for more preemption.", pod.Name) - return nil, nil, nil + return nil, nil, nil, nil } allNodes, err := nodeLister.List() if err != nil { - return nil, nil, err + return nil, nil, nil, err } if len(allNodes) == 0 { - return nil, nil, ErrNoNodesAvailable + return nil, nil, nil, ErrNoNodesAvailable } potentialNodes := nodesWherePreemptionMightHelp(pod, allNodes, fitError.FailedPredicates) if len(potentialNodes) == 0 { glog.V(3).Infof("Preemption will not help schedule pod %v on any node.", pod.Name) - return nil, nil, nil + // In this case, we should clean-up any existing nominated node name of the pod. + return nil, nil, []*v1.Pod{pod}, nil + } + pdbs, err := g.cache.ListPDBs(labels.Everything()) + if err != nil { + return nil, nil, nil, err } - nodeToPods, err := selectNodesForPreemption(pod, g.cachedNodeInfoMap, potentialNodes, g.predicates, g.predicateMetaProducer) + nodeToVictims, err := selectNodesForPreemption(pod, g.cachedNodeInfoMap, potentialNodes, g.predicates, g.predicateMetaProducer, g.schedulingQueue, pdbs) if err != nil { - return nil, nil, err + return nil, nil, nil, err } - for len(nodeToPods) > 0 { - node := pickOneNodeForPreemption(nodeToPods) + for len(nodeToVictims) > 0 { + node := pickOneNodeForPreemption(nodeToVictims) if node == nil { - return nil, nil, err + return nil, nil, nil, err } - passes, pErr := nodePassesExtendersForPreemption(pod, node.Name, nodeToPods[node], g.cachedNodeInfoMap, g.extenders) + passes, pErr := nodePassesExtendersForPreemption(pod, node.Name, nodeToVictims[node].pods, g.cachedNodeInfoMap, g.extenders) if passes && pErr == nil { - return node, nodeToPods[node], err + // Lower priority pods nominated to run on this node, may no longer fit on + // this node. So, we should remove their nomination. Removing their + // nomination updates these pods and moves them to the active queue. It + // lets scheduler find another place for them. + nominatedPods := g.getLowerPriorityNominatedPods(pod, node.Name) + return node, nodeToVictims[node].pods, nominatedPods, err } if pErr != nil { glog.Errorf("Error occurred while checking extenders for preemption on node %v: %v", node, pErr) } // Remove the node from the map and try to pick a different node. - delete(nodeToPods, node) + delete(nodeToVictims, node) + } + return nil, nil, nil, err +} + +// GetLowerPriorityNominatedPods returns pods whose priority is smaller than the +// priority of the given "pod" and are nominated to run on the given node. +// Note: We could possibly check if the nominated lower priority pods still fit +// and return those that no longer fit, but that would require lots of +// manipulation of NodeInfo and PredicateMeta per nominated pod. It may not be +// worth the complexity, especially because we generally expect to have a very +// small number of nominated pods per node. +func (g *genericScheduler) getLowerPriorityNominatedPods(pod *v1.Pod, nodeName string) []*v1.Pod { + pods := g.schedulingQueue.WaitingPodsForNode(nodeName) + if len(pods) == 0 { + return nil + } + + var lowerPriorityPods []*v1.Pod + podPriority := util.GetPodPriority(pod) + for _, p := range pods { + if util.GetPodPriority(p) < podPriority { + lowerPriorityPods = append(lowerPriorityPods, p) + } } - return nil, nil, err + return lowerPriorityPods } // Filters the nodes to find the ones that fit based on the given predicate functions @@ -237,6 +283,7 @@ func findNodesThatFit( extenders []algorithm.SchedulerExtender, metadataProducer algorithm.PredicateMetadataProducer, ecache *EquivalenceCache, + schedulingQueue SchedulingQueue, ) ([]*v1.Node, FailedPredicateMap, error) { var filtered []*v1.Node failedPredicateMap := FailedPredicateMap{} @@ -255,7 +302,7 @@ func findNodesThatFit( meta := metadataProducer(pod, nodeNameToInfo) checkNode := func(i int) { nodeName := nodes[i].Name - fits, failedPredicates, err := podFitsOnNode(pod, meta, nodeNameToInfo[nodeName], predicateFuncs, ecache) + fits, failedPredicates, err := podFitsOnNode(pod, meta, nodeNameToInfo[nodeName], predicateFuncs, ecache, schedulingQueue) if err != nil { predicateResultLock.Lock() errs[err.Error()]++ @@ -299,9 +346,52 @@ func findNodesThatFit( return filtered, failedPredicateMap, nil } -// Checks whether node with a given name and NodeInfo satisfies all predicateFuncs. -func podFitsOnNode(pod *v1.Pod, meta algorithm.PredicateMetadata, info *schedulercache.NodeInfo, predicateFuncs map[string]algorithm.FitPredicate, - ecache *EquivalenceCache) (bool, []algorithm.PredicateFailureReason, error) { +// addNominatedPods adds pods with equal or greater priority which are nominated +// to run on the node given in nodeInfo to meta and nodeInfo. It returns 1) whether +// any pod was found, 2) augmented meta data, 3) augmented nodeInfo. +func addNominatedPods(podPriority int32, meta algorithm.PredicateMetadata, + nodeInfo *schedulercache.NodeInfo, queue SchedulingQueue) (bool, algorithm.PredicateMetadata, + *schedulercache.NodeInfo) { + if queue == nil || nodeInfo == nil || nodeInfo.Node() == nil { + // This may happen only in tests. + return false, meta, nodeInfo + } + nominatedPods := queue.WaitingPodsForNode(nodeInfo.Node().Name) + if nominatedPods == nil || len(nominatedPods) == 0 { + return false, meta, nodeInfo + } + var metaOut algorithm.PredicateMetadata = nil + if meta != nil { + metaOut = meta.ShallowCopy() + } + nodeInfoOut := nodeInfo.Clone() + for _, p := range nominatedPods { + if util.GetPodPriority(p) >= podPriority { + nodeInfoOut.AddPod(p) + if metaOut != nil { + metaOut.AddPod(p, nodeInfoOut) + } + } + } + return true, metaOut, nodeInfoOut +} + +// podFitsOnNode checks whether a node given by NodeInfo satisfies the given predicate functions. +// This function is called from two different places: Schedule and Preempt. +// When it is called from Schedule, we want to test whether the pod is schedulable +// on the node with all the existing pods on the node plus higher and equal priority +// pods nominated to run on the node. +// When it is called from Preempt, we should remove the victims of preemption and +// add the nominated pods. Removal of the victims is done by SelectVictimsOnNode(). +// It removes victims from meta and NodeInfo before calling this function. +func podFitsOnNode( + pod *v1.Pod, + meta algorithm.PredicateMetadata, + info *schedulercache.NodeInfo, + predicateFuncs map[string]algorithm.FitPredicate, + ecache *EquivalenceCache, + queue SchedulingQueue, +) (bool, []algorithm.PredicateFailureReason, error) { var ( equivalenceHash uint64 failedPredicates []algorithm.PredicateFailureReason @@ -311,34 +401,85 @@ func podFitsOnNode(pod *v1.Pod, meta algorithm.PredicateMetadata, info *schedule reasons []algorithm.PredicateFailureReason err error ) + predicateResults := make(map[string]HostPredicate) + if ecache != nil { // getHashEquivalencePod will return immediately if no equivalence pod found equivalenceHash, eCacheAvailable = ecache.getHashEquivalencePod(pod) } - for predicateKey, predicate := range predicateFuncs { - // If equivalenceCache is available - if eCacheAvailable { - // PredicateWithECache will returns it's cached predicate results - fit, reasons, invalid = ecache.PredicateWithECache(pod.GetName(), info.Node().GetName(), predicateKey, equivalenceHash) - } - - if !eCacheAvailable || invalid { - // we need to execute predicate functions since equivalence cache does not work - fit, reasons, err = predicate(pod, meta, info) - if err != nil { - return false, []algorithm.PredicateFailureReason{}, err + podsAdded := false + // We run predicates twice in some cases. If the node has greater or equal priority + // nominated pods, we run them when those pods are added to meta and nodeInfo. + // If all predicates succeed in this pass, we run them again when these + // nominated pods are not added. This second pass is necessary because some + // predicates such as inter-pod affinity may not pass without the nominated pods. + // If there are no nominated pods for the node or if the first run of the + // predicates fail, we don't run the second pass. + // We consider only equal or higher priority pods in the first pass, because + // those are the current "pod" must yield to them and not take a space opened + // for running them. It is ok if the current "pod" take resources freed for + // lower priority pods. + // Requiring that the new pod is schedulable in both circumstances ensures that + // we are making a conservative decision: predicates like resources and inter-pod + // anti-affinity are more likely to fail when the nominated pods are treated + // as running, while predicates like pod affinity are more likely to fail when + // the nominated pods are treated as not running. We can't just assume the + // nominated pods are running because they are not running right now and in fact, + // they may end up getting scheduled to a different node. + for i := 0; i < 2; i++ { + metaToUse := meta + nodeInfoToUse := info + if i == 0 { + podsAdded, metaToUse, nodeInfoToUse = addNominatedPods(util.GetPodPriority(pod), meta, info, queue) + } else if !podsAdded || len(failedPredicates) != 0 { + break + } + // Bypass eCache if node has any nominated pods. + // TODO(bsalamat): consider using eCache and adding proper eCache invalidations + // when pods are nominated or their nominations change. + eCacheAvailable = eCacheAvailable && !podsAdded + for predicateKey, predicate := range predicateFuncs { + if eCacheAvailable { + // PredicateWithECache will return its cached predicate results. + fit, reasons, invalid = ecache.PredicateWithECache(pod.GetName(), info.Node().GetName(), predicateKey, equivalenceHash) } - if eCacheAvailable { - // update equivalence cache with newly computed fit & reasons - // TODO(resouer) should we do this in another thread? any race? - ecache.UpdateCachedPredicateItem(pod.GetName(), info.Node().GetName(), predicateKey, fit, reasons, equivalenceHash) + // TODO(bsalamat): When one predicate fails and fit is false, why do we continue + // checking other predicates? + if !eCacheAvailable || invalid { + // we need to execute predicate functions since equivalence cache does not work + fit, reasons, err = predicate(pod, metaToUse, nodeInfoToUse) + if err != nil { + return false, []algorithm.PredicateFailureReason{}, err + } + if eCacheAvailable { + // Store data to update eCache after this loop. + if res, exists := predicateResults[predicateKey]; exists { + res.Fit = res.Fit && fit + res.FailReasons = append(res.FailReasons, reasons...) + predicateResults[predicateKey] = res + } else { + predicateResults[predicateKey] = HostPredicate{Fit: fit, FailReasons: reasons} + } + } + } + if !fit { + // eCache is available and valid, and predicates result is unfit, record the fail reasons + failedPredicates = append(failedPredicates, reasons...) } } + } - if !fit { - // eCache is available and valid, and predicates result is unfit, record the fail reasons - failedPredicates = append(failedPredicates, reasons...) + // TODO(bsalamat): This way of updating equiv. cache has a race condition against + // cache invalidations invoked in event handlers. This race has existed despite locks + // in eCache implementation. If cache is invalidated after a predicate is executed + // and before we update the cache, the updates should not be written to the cache. + if eCacheAvailable { + nodeName := info.Node().GetName() + for predKey, result := range predicateResults { + // update equivalence cache with newly computed fit & reasons + // TODO(resouer) should we do this in another thread? any race? + ecache.UpdateCachedPredicateItem(pod.GetName(), nodeName, predKey, result.Fit, result.FailReasons, equivalenceHash) } } return len(failedPredicates) == 0, failedPredicates, nil @@ -426,6 +567,11 @@ func PrioritizeNodes( if err := config.Reduce(pod, meta, nodeNameToInfo, results[index]); err != nil { appendError(err) } + if glog.V(10) { + for _, hostPriority := range results[index] { + glog.Infof("%v -> %v: %v, Score: (%d)", pod.Name, hostPriority.Host, config.Name, hostPriority.Score) + } + } }(i, priorityConfig) } // Wait for all computations to be finished. @@ -493,51 +639,66 @@ func EqualPriorityMap(_ *v1.Pod, _ interface{}, nodeInfo *schedulercache.NodeInf // pickOneNodeForPreemption chooses one node among the given nodes. It assumes // pods in each map entry are ordered by decreasing priority. // It picks a node based on the following criteria: -// 1. A node with minimum highest priority victim is picked. -// 2. Ties are broken by sum of priorities of all victims. -// 3. If there are still ties, node with the minimum number of victims is picked. -// 4. If there are still ties, the first such node is picked (sort of randomly). -//TODO(bsalamat): Try to reuse the "nodeScore" slices in order to save GC time. -func pickOneNodeForPreemption(nodesToPods map[*v1.Node][]*v1.Pod) *v1.Node { - type nodeScore struct { - node *v1.Node - highestPriority int32 - sumPriorities int64 - numPods int - } - if len(nodesToPods) == 0 { +// 1. A node with minimum number of PDB violations. +// 2. A node with minimum highest priority victim is picked. +// 3. Ties are broken by sum of priorities of all victims. +// 4. If there are still ties, node with the minimum number of victims is picked. +// 5. If there are still ties, the first such node is picked (sort of randomly). +//TODO(bsalamat): Try to reuse the "min*Nodes" slices in order to save GC time. +func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*Victims) *v1.Node { + if len(nodesToVictims) == 0 { return nil } - minHighestPriority := int32(math.MaxInt32) - minPriorityScores := []*nodeScore{} - for node, pods := range nodesToPods { - if len(pods) == 0 { + minNumPDBViolatingPods := math.MaxInt32 + var minPDBViolatingNodes []*v1.Node + for node, victims := range nodesToVictims { + if len(victims.pods) == 0 { // We found a node that doesn't need any preemption. Return it! // This should happen rarely when one or more pods are terminated between // the time that scheduler tries to schedule the pod and the time that // preemption logic tries to find nodes for preemption. return node } + numPDBViolatingPods := victims.numPDBViolations + if numPDBViolatingPods < minNumPDBViolatingPods { + minNumPDBViolatingPods = numPDBViolatingPods + minPDBViolatingNodes = nil + } + if numPDBViolatingPods == minNumPDBViolatingPods { + minPDBViolatingNodes = append(minPDBViolatingNodes, node) + } + } + if len(minPDBViolatingNodes) == 1 { + return minPDBViolatingNodes[0] + } + + // There are more than one node with minimum number PDB violating pods. Find + // the one with minimum highest priority victim. + minHighestPriority := int32(math.MaxInt32) + var minPriorityNodes []*v1.Node + for _, node := range minPDBViolatingNodes { + victims := nodesToVictims[node] // highestPodPriority is the highest priority among the victims on this node. - highestPodPriority := util.GetPodPriority(pods[0]) + highestPodPriority := util.GetPodPriority(victims.pods[0]) if highestPodPriority < minHighestPriority { minHighestPriority = highestPodPriority - minPriorityScores = nil + minPriorityNodes = nil } if highestPodPriority == minHighestPriority { - minPriorityScores = append(minPriorityScores, &nodeScore{node: node, highestPriority: highestPodPriority, numPods: len(pods)}) + minPriorityNodes = append(minPriorityNodes, node) } } - if len(minPriorityScores) == 1 { - return minPriorityScores[0].node + if len(minPriorityNodes) == 1 { + return minPriorityNodes[0] } + // There are a few nodes with minimum highest priority victim. Find the // smallest sum of priorities. minSumPriorities := int64(math.MaxInt64) - minSumPriorityScores := []*nodeScore{} - for _, nodeScore := range minPriorityScores { + var minSumPriorityNodes []*v1.Node + for _, node := range minPriorityNodes { var sumPriorities int64 - for _, pod := range nodesToPods[nodeScore.node] { + for _, pod := range nodesToVictims[node].pods { // We add MaxInt32+1 to all priorities to make all of them >= 0. This is // needed so that a node with a few pods with negative priority is not // picked over a node with a smaller number of pods with the same negative @@ -546,33 +707,34 @@ func pickOneNodeForPreemption(nodesToPods map[*v1.Node][]*v1.Pod) *v1.Node { } if sumPriorities < minSumPriorities { minSumPriorities = sumPriorities - minSumPriorityScores = nil + minSumPriorityNodes = nil } - nodeScore.sumPriorities = sumPriorities if sumPriorities == minSumPriorities { - minSumPriorityScores = append(minSumPriorityScores, nodeScore) + minSumPriorityNodes = append(minSumPriorityNodes, node) } } - if len(minSumPriorityScores) == 1 { - return minSumPriorityScores[0].node + if len(minSumPriorityNodes) == 1 { + return minSumPriorityNodes[0] } + // There are a few nodes with minimum highest priority victim and sum of priorities. // Find one with the minimum number of pods. minNumPods := math.MaxInt32 - minNumPodScores := []*nodeScore{} - for _, nodeScore := range minSumPriorityScores { - if nodeScore.numPods < minNumPods { - minNumPods = nodeScore.numPods - minNumPodScores = nil + var minNumPodNodes []*v1.Node + for _, node := range minSumPriorityNodes { + numPods := len(nodesToVictims[node].pods) + if numPods < minNumPods { + minNumPods = numPods + minNumPodNodes = nil } - if nodeScore.numPods == minNumPods { - minNumPodScores = append(minNumPodScores, nodeScore) + if numPods == minNumPods { + minNumPodNodes = append(minNumPodNodes, node) } } // At this point, even if there are more than one node with the same score, // return the first one. - if len(minNumPodScores) > 0 { - return minNumPodScores[0].node + if len(minNumPodNodes) > 0 { + return minNumPodNodes[0] } glog.Errorf("Error in logic of node scoring for preemption. We should never reach here!") return nil @@ -585,9 +747,11 @@ func selectNodesForPreemption(pod *v1.Pod, potentialNodes []*v1.Node, predicates map[string]algorithm.FitPredicate, metadataProducer algorithm.PredicateMetadataProducer, -) (map[*v1.Node][]*v1.Pod, error) { + queue SchedulingQueue, + pdbs []*policy.PodDisruptionBudget, +) (map[*v1.Node]*Victims, error) { - nodeNameToPods := map[*v1.Node][]*v1.Pod{} + nodeNameToVictims := map[*v1.Node]*Victims{} var resultLock sync.Mutex // We can use the same metadata producer for all nodes. @@ -598,15 +762,19 @@ func selectNodesForPreemption(pod *v1.Pod, if meta != nil { metaCopy = meta.ShallowCopy() } - pods, fits := selectVictimsOnNode(pod, metaCopy, nodeNameToInfo[nodeName], predicates) + pods, numPDBViolations, fits := selectVictimsOnNode(pod, metaCopy, nodeNameToInfo[nodeName], predicates, queue, pdbs) if fits { resultLock.Lock() - nodeNameToPods[potentialNodes[i]] = pods + victims := Victims{ + pods: pods, + numPDBViolations: numPDBViolations, + } + nodeNameToVictims[potentialNodes[i]] = &victims resultLock.Unlock() } } workqueue.Parallelize(16, len(potentialNodes), checkNode) - return nodeNameToPods, nil + return nodeNameToVictims, nil } func nodePassesExtendersForPreemption( @@ -643,6 +811,45 @@ func nodePassesExtendersForPreemption( return true, nil } +// filterPodsWithPDBViolation groups the given "pods" into two groups of "violatingPods" +// and "nonViolatingPods" based on whether their PDBs will be violated if they are +// preempted. +// This function is stable and does not change the order of received pods. So, if it +// receives a sorted list, grouping will preserve the order of the input list. +func filterPodsWithPDBViolation(pods []interface{}, pdbs []*policy.PodDisruptionBudget) (violatingPods, nonViolatingPods []*v1.Pod) { + for _, obj := range pods { + pod := obj.(*v1.Pod) + pdbForPodIsViolated := false + // A pod with no labels will not match any PDB. So, no need to check. + if len(pod.Labels) != 0 { + for _, pdb := range pdbs { + if pdb.Namespace != pod.Namespace { + continue + } + selector, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector) + if err != nil { + continue + } + // A PDB with a nil or empty selector matches nothing. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + // We have found a matching PDB. + if pdb.Status.PodDisruptionsAllowed <= 0 { + pdbForPodIsViolated = true + break + } + } + } + if pdbForPodIsViolated { + violatingPods = append(violatingPods, pod) + } else { + nonViolatingPods = append(nonViolatingPods, pod) + } + } + return violatingPods, nonViolatingPods +} + // selectVictimsOnNode finds minimum set of pods on the given node that should // be preempted in order to make enough room for "pod" to be scheduled. The // minimum set selected is subject to the constraint that a higher-priority pod @@ -650,17 +857,22 @@ func nodePassesExtendersForPreemption( // to one another, not relative to the preemptor "pod"). // The algorithm first checks if the pod can be scheduled on the node when all the // lower priority pods are gone. If so, it sorts all the lower priority pods by -// their priority and starts from the highest priority one, tries to keep as -// many of them as possible while checking that the "pod" can still fit on the node. +// their priority and then puts them into two groups of those whose PodDisruptionBudget +// will be violated if preempted and other non-violating pods. Both groups are +// sorted by priority. It first tries to reprieve as many PDB violating pods as +// possible and then does them same for non-PDB-violating pods while checking +// that the "pod" can still fit on the node. // NOTE: This function assumes that it is never called if "pod" cannot be scheduled // due to pod affinity, node affinity, or node anti-affinity reasons. None of // these predicates can be satisfied by removing more pods from the node. -// TODO(bsalamat): Add support for PodDisruptionBudget. func selectVictimsOnNode( pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo, - fitPredicates map[string]algorithm.FitPredicate) ([]*v1.Pod, bool) { + fitPredicates map[string]algorithm.FitPredicate, + queue SchedulingQueue, + pdbs []*policy.PodDisruptionBudget, +) ([]*v1.Pod, int, bool) { potentialVictims := util.SortableList{CompFunc: util.HigherPriorityPod} nodeInfoCopy := nodeInfo.Clone() @@ -691,24 +903,38 @@ func selectVictimsOnNode( // that we should check is if the "pod" is failing to schedule due to pod affinity // failure. // TODO(bsalamat): Consider checking affinity to lower priority pods if feasible with reasonable performance. - if fits, _, err := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil); !fits { + if fits, _, err := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, queue); !fits { if err != nil { glog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err) } - return nil, false + return nil, 0, false + } + var victims []*v1.Pod + numViolatingVictim := 0 + // Try to reprieve as many pods as possible. We first try to reprieve the PDB + // violating victims and then other non-violating ones. In both cases, we start + // from the highest priority victims. + violatingVictims, nonViolatingVictims := filterPodsWithPDBViolation(potentialVictims.Items, pdbs) + reprievePod := func(p *v1.Pod) bool { + addPod(p) + fits, _, _ := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, queue) + if !fits { + removePod(p) + victims = append(victims, p) + glog.V(5).Infof("Pod %v is a potential preemption victim on node %v.", p.Name, nodeInfo.Node().Name) + } + return fits } - victims := []*v1.Pod{} - // Try to reprieve as many pods as possible starting from the highest priority one. - for _, p := range potentialVictims.Items { - lpp := p.(*v1.Pod) - addPod(lpp) - if fits, _, _ := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil); !fits { - removePod(lpp) - victims = append(victims, lpp) - glog.V(5).Infof("Pod %v is a potential preemption victim on node %v.", lpp.Name, nodeInfo.Node().Name) + for _, p := range violatingVictims { + if !reprievePod(p) { + numViolatingVictim++ } } - return victims, true + // Now we try to reprieve non-violating victims. + for _, p := range nonViolatingVictims { + reprievePod(p) + } + return victims, numViolatingVictim, true } // nodesWherePreemptionMightHelp returns a list of nodes with failed predicates @@ -732,7 +958,10 @@ func nodesWherePreemptionMightHelp(pod *v1.Pod, nodes []*v1.Node, failedPredicat predicates.ErrNodeNotReady, predicates.ErrNodeNetworkUnavailable, predicates.ErrNodeUnschedulable, - predicates.ErrNodeUnknownCondition: + predicates.ErrNodeUnknownCondition, + predicates.ErrVolumeZoneConflict, + predicates.ErrVolumeNodeConflict, + predicates.ErrVolumeBindConflict: unresolvableReasonExist = true break // TODO(bsalamat): Please add affinity failure cases once we have specific affinity failure errors. @@ -752,7 +981,6 @@ func nodesWherePreemptionMightHelp(pod *v1.Pod, nodes []*v1.Node, failedPredicat // considered for preemption. // We look at the node that is nominated for this pod and as long as there are // terminating pods on the node, we don't consider this for preempting more pods. -// TODO(bsalamat): Revisit this algorithm once scheduling by priority is added. func podEligibleToPreemptOthers(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) bool { if nodeName, found := pod.Annotations[NominatedNodeAnnotationKey]; found { if nodeInfo, found := nodeNameToInfo[nodeName]; found { @@ -770,19 +998,23 @@ func podEligibleToPreemptOthers(pod *v1.Pod, nodeNameToInfo map[string]*schedule func NewGenericScheduler( cache schedulercache.Cache, eCache *EquivalenceCache, + podQueue SchedulingQueue, predicates map[string]algorithm.FitPredicate, predicateMetaProducer algorithm.PredicateMetadataProducer, prioritizers []algorithm.PriorityConfig, priorityMetaProducer algorithm.MetadataProducer, - extenders []algorithm.SchedulerExtender) algorithm.ScheduleAlgorithm { + extenders []algorithm.SchedulerExtender, + volumeBinder *volumebinder.VolumeBinder) algorithm.ScheduleAlgorithm { return &genericScheduler{ cache: cache, equivalenceCache: eCache, + schedulingQueue: podQueue, predicates: predicates, predicateMetaProducer: predicateMetaProducer, prioritizers: prioritizers, priorityMetaProducer: priorityMetaProducer, extenders: extenders, cachedNodeInfoMap: make(map[string]*schedulercache.NodeInfo), + volumeBinder: volumeBinder, } } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/core/scheduling_queue.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/core/scheduling_queue.go new file mode 100644 index 000000000000..78bdf0990153 --- /dev/null +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/core/scheduling_queue.go @@ -0,0 +1,734 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains structures that implement scheduling queue types. +// Scheduling queues hold pods waiting to be scheduled. This file has two types +// of scheduling queue: 1) a FIFO, which is mostly the same as cache.FIFO, 2) a +// priority queue which has two sub queues. One sub-queue holds pods that are +// being considered for scheduling. This is called activeQ. Another queue holds +// pods that are already tried and are determined to be unschedulable. The latter +// is called unschedulableQ. +// FIFO is here for flag-gating purposes and allows us to use the traditional +// scheduling queue when util.PodPriorityEnabled() returns false. + +package core + +import ( + "container/heap" + "fmt" + "sync" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" + "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" + priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" + "k8s.io/kubernetes/plugin/pkg/scheduler/util" + + "github.com/golang/glog" + "reflect" +) + +// SchedulingQueue is an interface for a queue to store pods waiting to be scheduled. +// The interface follows a pattern similar to cache.FIFO and cache.Heap and +// makes it easy to use those data structures as a SchedulingQueue. +type SchedulingQueue interface { + Add(pod *v1.Pod) error + AddIfNotPresent(pod *v1.Pod) error + AddUnschedulableIfNotPresent(pod *v1.Pod) error + Pop() (*v1.Pod, error) + Update(pod *v1.Pod) error + Delete(pod *v1.Pod) error + MoveAllToActiveQueue() + AssignedPodAdded(pod *v1.Pod) + AssignedPodUpdated(pod *v1.Pod) + WaitingPodsForNode(nodeName string) []*v1.Pod +} + +// NewSchedulingQueue initializes a new scheduling queue. If pod priority is +// enabled a priority queue is returned. If it is disabled, a FIFO is returned. +func NewSchedulingQueue() SchedulingQueue { + if util.PodPriorityEnabled() { + return NewPriorityQueue() + } + return NewFIFO() +} + +// FIFO is basically a simple wrapper around cache.FIFO to make it compatible +// with the SchedulingQueue interface. +type FIFO struct { + *cache.FIFO +} + +var _ = SchedulingQueue(&FIFO{}) // Making sure that FIFO implements SchedulingQueue. + +func (f *FIFO) Add(pod *v1.Pod) error { + return f.FIFO.Add(pod) +} + +func (f *FIFO) AddIfNotPresent(pod *v1.Pod) error { + return f.FIFO.AddIfNotPresent(pod) +} + +// AddUnschedulableIfNotPresent adds an unschedulable pod back to the queue. In +// FIFO it is added to the end of the queue. +func (f *FIFO) AddUnschedulableIfNotPresent(pod *v1.Pod) error { + return f.FIFO.AddIfNotPresent(pod) +} + +func (f *FIFO) Update(pod *v1.Pod) error { + return f.FIFO.Update(pod) +} + +func (f *FIFO) Delete(pod *v1.Pod) error { + return f.FIFO.Delete(pod) +} + +// Pop removes the head of FIFO and returns it. +// This is just a copy/paste of cache.Pop(queue Queue) from fifo.go that scheduler +// has always been using. There is a comment in that file saying that this method +// shouldn't be used in production code, but scheduler has always been using it. +// This function does minimal error checking. +func (f *FIFO) Pop() (*v1.Pod, error) { + var result interface{} + f.FIFO.Pop(func(obj interface{}) error { + result = obj + return nil + }) + return result.(*v1.Pod), nil +} + +// FIFO does not need to react to events, as all pods are always in the active +// scheduling queue anyway. +func (f *FIFO) AssignedPodAdded(pod *v1.Pod) {} +func (f *FIFO) AssignedPodUpdated(pod *v1.Pod) {} + +// MoveAllToActiveQueue does nothing in FIFO as all pods are always in the active queue. +func (f *FIFO) MoveAllToActiveQueue() {} + +// WaitingPodsForNode returns pods that are nominated to run on the given node, +// but FIFO does not support it. +func (f *FIFO) WaitingPodsForNode(nodeName string) []*v1.Pod { + return nil +} + +func NewFIFO() *FIFO { + return &FIFO{FIFO: cache.NewFIFO(cache.MetaNamespaceKeyFunc)} +} + +// UnschedulablePods is an interface for a queue that is used to keep unschedulable +// pods. These pods are not actively reevaluated for scheduling. They are moved +// to the active scheduling queue on certain events, such as termination of a pod +// in the cluster, addition of nodes, etc. +type UnschedulablePods interface { + Add(pod *v1.Pod) + Delete(pod *v1.Pod) + Update(pod *v1.Pod) + GetPodsWaitingForNode(nodeName string) []*v1.Pod + Get(pod *v1.Pod) *v1.Pod + Clear() +} + +// PriorityQueue implements a scheduling queue. It is an alternative to FIFO. +// The head of PriorityQueue is the highest priority pending pod. This structure +// has two sub queues. One sub-queue holds pods that are being considered for +// scheduling. This is called activeQ and is a Heap. Another queue holds +// pods that are already tried and are determined to be unschedulable. The latter +// is called unschedulableQ. +// Heap is already thread safe, but we need to acquire another lock here to ensure +// atomicity of operations on the two data structures.. +type PriorityQueue struct { + lock sync.RWMutex + cond sync.Cond + + // activeQ is heap structure that scheduler actively looks at to find pods to + // schedule. Head of heap is the highest priority pod. + activeQ *Heap + // unschedulableQ holds pods that have been tried and determined unschedulable. + unschedulableQ *UnschedulablePodsMap + // receivedMoveRequest is set to true whenever we receive a request to move a + // pod from the unschedulableQ to the activeQ, and is set to false, when we pop + // a pod from the activeQ. It indicates if we received a move request when a + // pod was in flight (we were trying to schedule it). In such a case, we put + // the pod back into the activeQ if it is determined unschedulable. + receivedMoveRequest bool +} + +// Making sure that PriorityQueue implements SchedulingQueue. +var _ = SchedulingQueue(&PriorityQueue{}) + +func NewPriorityQueue() *PriorityQueue { + pq := &PriorityQueue{ + activeQ: newHeap(cache.MetaNamespaceKeyFunc, util.HigherPriorityPod), + unschedulableQ: newUnschedulablePodsMap(), + } + pq.cond.L = &pq.lock + return pq +} + +// Add adds a pod to the active queue. It should be called only when a new pod +// is added so there is no chance the pod is already in either queue. +func (p *PriorityQueue) Add(pod *v1.Pod) error { + p.lock.Lock() + defer p.lock.Unlock() + err := p.activeQ.Add(pod) + if err != nil { + glog.Errorf("Error adding pod %v to the scheduling queue: %v", pod.Name, err) + } else { + if p.unschedulableQ.Get(pod) != nil { + glog.Errorf("Error: pod %v is already in the unschedulable queue.", pod.Name) + p.unschedulableQ.Delete(pod) + } + p.cond.Broadcast() + } + return err +} + +// AddIfNotPresent adds a pod to the active queue if it is not present in any of +// the two queues. If it is present in any, it doesn't do any thing. +func (p *PriorityQueue) AddIfNotPresent(pod *v1.Pod) error { + p.lock.Lock() + defer p.lock.Unlock() + if p.unschedulableQ.Get(pod) != nil { + return nil + } + if _, exists, _ := p.activeQ.Get(pod); exists { + return nil + } + err := p.activeQ.Add(pod) + if err != nil { + glog.Errorf("Error adding pod %v to the scheduling queue: %v", pod.Name, err) + } else { + p.cond.Broadcast() + } + return err +} + +func isPodUnschedulable(pod *v1.Pod) bool { + _, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) + return cond != nil && cond.Status == v1.ConditionFalse && cond.Reason == v1.PodReasonUnschedulable +} + +// AddUnschedulableIfNotPresent does nothing if the pod is present in either +// queue. Otherwise it adds the pod to the unschedulable queue if +// p.receivedMoveRequest is false, and to the activeQ if p.receivedMoveRequest is true. +func (p *PriorityQueue) AddUnschedulableIfNotPresent(pod *v1.Pod) error { + p.lock.Lock() + defer p.lock.Unlock() + if p.unschedulableQ.Get(pod) != nil { + return fmt.Errorf("pod is already present in unschedulableQ") + } + if _, exists, _ := p.activeQ.Get(pod); exists { + return fmt.Errorf("pod is already present in the activeQ") + } + if !p.receivedMoveRequest && isPodUnschedulable(pod) { + p.unschedulableQ.Add(pod) + return nil + } + err := p.activeQ.Add(pod) + if err == nil { + p.cond.Broadcast() + } + return err +} + +// Pop removes the head of the active queue and returns it. It blocks if the +// activeQ is empty and waits until a new item is added to the queue. It also +// clears receivedMoveRequest to mark the beginning of a new scheduling cycle. +func (p *PriorityQueue) Pop() (*v1.Pod, error) { + p.lock.Lock() + defer p.lock.Unlock() + for len(p.activeQ.data.queue) == 0 { + p.cond.Wait() + } + obj, err := p.activeQ.Pop() + if err != nil { + return nil, err + } + p.receivedMoveRequest = false + return obj.(*v1.Pod), err +} + +// isPodUpdated checks if the pod is updated in a way that it may have become +// schedulable. It drops status of the pod and compares it with old version. +func isPodUpdated(oldPod, newPod *v1.Pod) bool { + strip := func(pod *v1.Pod) *v1.Pod { + p := pod.DeepCopy() + p.ResourceVersion = "" + p.Generation = 0 + p.Status = v1.PodStatus{} + return p + } + return !reflect.DeepEqual(strip(oldPod), strip(newPod)) +} + +// Update updates a pod in the active queue if present. Otherwise, it removes +// the item from the unschedulable queue and adds the updated one to the active +// queue. +func (p *PriorityQueue) Update(pod *v1.Pod) error { + p.lock.Lock() + defer p.lock.Unlock() + // If the pod is already in the active queue, just update it there. + if _, exists, _ := p.activeQ.Get(pod); exists { + err := p.activeQ.Update(pod) + return err + } + // If the pod is in the unschedulable queue, updating it may make it schedulable. + if oldPod := p.unschedulableQ.Get(pod); oldPod != nil { + if isPodUpdated(oldPod, pod) { + p.unschedulableQ.Delete(oldPod) + err := p.activeQ.Add(pod) + if err == nil { + p.cond.Broadcast() + } + return err + } else { + p.unschedulableQ.Update(pod) + return nil + } + } + // If pod is not in any of the two queue, we put it in the active queue. + err := p.activeQ.Add(pod) + if err == nil { + p.cond.Broadcast() + } + return err +} + +// Delete deletes the item from either of the two queues. It assumes the pod is +// only in one queue. +func (p *PriorityQueue) Delete(pod *v1.Pod) error { + p.lock.Lock() + defer p.lock.Unlock() + if _, exists, _ := p.activeQ.Get(pod); exists { + return p.activeQ.Delete(pod) + } + p.unschedulableQ.Delete(pod) + return nil +} + +// AssignedPodAdded is called when a bound pod is added. Creation of this pod +// may make pending pods with matching affinity terms schedulable. +func (p *PriorityQueue) AssignedPodAdded(pod *v1.Pod) { + p.movePodsToActiveQueue(p.getUnschedulablePodsWithMatchingAffinityTerm(pod)) +} + +// AssignedPodUpdated is called when a bound pod is updated. Change of labels +// may make pending pods with matching affinity terms schedulable. +func (p *PriorityQueue) AssignedPodUpdated(pod *v1.Pod) { + p.movePodsToActiveQueue(p.getUnschedulablePodsWithMatchingAffinityTerm(pod)) +} + +// MoveAllToActiveQueue moves all pods from unschedulableQ to activeQ. This +// function adds all pods and then signals the condition variable to ensure that +// if Pop() is waiting for an item, it receives it after all the pods are in the +// queue and the head is the highest priority pod. +// TODO(bsalamat): We should add a back-off mechanism here so that a high priority +// pod which is unschedulable does not go to the head of the queue frequently. For +// example in a cluster where a lot of pods being deleted, such a high priority +// pod can deprive other pods from getting scheduled. +func (p *PriorityQueue) MoveAllToActiveQueue() { + p.lock.Lock() + defer p.lock.Unlock() + var unschedulablePods []interface{} + for _, pod := range p.unschedulableQ.pods { + unschedulablePods = append(unschedulablePods, pod) + } + p.activeQ.BulkAdd(unschedulablePods) + p.unschedulableQ.Clear() + p.receivedMoveRequest = true + p.cond.Broadcast() +} + +func (p *PriorityQueue) movePodsToActiveQueue(pods []*v1.Pod) { + p.lock.Lock() + defer p.lock.Unlock() + for _, pod := range pods { + p.activeQ.Add(pod) + p.unschedulableQ.Delete(pod) + } + p.receivedMoveRequest = true + p.cond.Broadcast() +} + +// getUnschedulablePodsWithMatchingAffinityTerm returns unschedulable pods which have +// any affinity term that matches "pod". +func (p *PriorityQueue) getUnschedulablePodsWithMatchingAffinityTerm(pod *v1.Pod) []*v1.Pod { + p.lock.RLock() + defer p.lock.RUnlock() + podsToMove := []*v1.Pod{} + for _, up := range p.unschedulableQ.pods { + affinity := up.Spec.Affinity + if affinity != nil && affinity.PodAffinity != nil { + terms := predicates.GetPodAffinityTerms(affinity.PodAffinity) + for _, term := range terms { + namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(up, &term) + selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) + if err != nil { + glog.Errorf("Error getting label selectors for pod: %v.", up.Name) + } + if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) { + podsToMove = append(podsToMove, up) + } + } + } + } + return podsToMove +} + +// WaitingPodsForNode returns pods that are nominated to run on the given node, +// but they are waiting for other pods to be removed from the node before they +// can be actually scheduled. +func (p *PriorityQueue) WaitingPodsForNode(nodeName string) []*v1.Pod { + p.lock.RLock() + defer p.lock.RUnlock() + pods := p.unschedulableQ.GetPodsWaitingForNode(nodeName) + for _, obj := range p.activeQ.List() { + pod := obj.(*v1.Pod) + if pod.Annotations != nil { + if n, ok := pod.Annotations[NominatedNodeAnnotationKey]; ok && n == nodeName { + pods = append(pods, pod) + } + } + } + return pods +} + +// UnschedulablePodsMap holds pods that cannot be scheduled. This data structure +// is used to implement unschedulableQ. +type UnschedulablePodsMap struct { + // pods is a map key by a pod's full-name and the value is a pointer to the pod. + pods map[string]*v1.Pod + // nominatedPods is a map keyed by a node name and the value is a list of + // pods' full-names which are nominated to run on the node. + nominatedPods map[string][]string + keyFunc func(*v1.Pod) string +} + +var _ = UnschedulablePods(&UnschedulablePodsMap{}) + +func NominatedNodeName(pod *v1.Pod) string { + nominatedNodeName, ok := pod.Annotations[NominatedNodeAnnotationKey] + if !ok { + return "" + } + return nominatedNodeName +} + +// Add adds a pod to the unschedulable pods. +func (u *UnschedulablePodsMap) Add(pod *v1.Pod) { + podKey := u.keyFunc(pod) + if _, exists := u.pods[podKey]; !exists { + u.pods[podKey] = pod + nominatedNodeName := NominatedNodeName(pod) + if len(nominatedNodeName) > 0 { + u.nominatedPods[nominatedNodeName] = append(u.nominatedPods[nominatedNodeName], podKey) + } + } +} + +func (u *UnschedulablePodsMap) deleteFromNominated(pod *v1.Pod) { + nominatedNodeName := NominatedNodeName(pod) + if len(nominatedNodeName) > 0 { + podKey := u.keyFunc(pod) + nps := u.nominatedPods[nominatedNodeName] + for i, np := range nps { + if np == podKey { + u.nominatedPods[nominatedNodeName] = append(nps[:i], nps[i+1:]...) + if len(u.nominatedPods[nominatedNodeName]) == 0 { + delete(u.nominatedPods, nominatedNodeName) + } + break + } + } + } +} + +// Delete deletes a pod from the unschedulable pods. +func (u *UnschedulablePodsMap) Delete(pod *v1.Pod) { + podKey := u.keyFunc(pod) + if p, exists := u.pods[podKey]; exists { + u.deleteFromNominated(p) + delete(u.pods, podKey) + } +} + +// Update updates a pod in the unschedulable pods. +func (u *UnschedulablePodsMap) Update(pod *v1.Pod) { + podKey := u.keyFunc(pod) + oldPod, exists := u.pods[podKey] + if !exists { + u.Add(pod) + return + } + u.pods[podKey] = pod + oldNominateNodeName := NominatedNodeName(oldPod) + nominatedNodeName := NominatedNodeName(pod) + if oldNominateNodeName != nominatedNodeName { + u.deleteFromNominated(oldPod) + if len(nominatedNodeName) > 0 { + u.nominatedPods[nominatedNodeName] = append(u.nominatedPods[nominatedNodeName], podKey) + } + } +} + +// Get returns the pod if a pod with the same key as the key of the given "pod" +// is found in the map. It returns nil otherwise. +func (u *UnschedulablePodsMap) Get(pod *v1.Pod) *v1.Pod { + podKey := u.keyFunc(pod) + if p, exists := u.pods[podKey]; exists { + return p + } + return nil +} + +// GetPodsWaitingForNode returns a list of unschedulable pods whose NominatedNodeNames +// are equal to the given nodeName. +func (u *UnschedulablePodsMap) GetPodsWaitingForNode(nodeName string) []*v1.Pod { + var pods []*v1.Pod + for _, key := range u.nominatedPods[nodeName] { + pods = append(pods, u.pods[key]) + } + return pods +} + +// Clear removes all the entries from the unschedulable maps. +func (u *UnschedulablePodsMap) Clear() { + u.pods = make(map[string]*v1.Pod) + u.nominatedPods = make(map[string][]string) +} + +// newUnschedulablePodsMap initializes a new object of UnschedulablePodsMap. +func newUnschedulablePodsMap() *UnschedulablePodsMap { + return &UnschedulablePodsMap{ + pods: make(map[string]*v1.Pod), + nominatedPods: make(map[string][]string), + keyFunc: util.GetPodFullName, + } +} + +// Below is the implementation of the a heap. The logic is pretty much the same +// as cache.heap, however, this heap does not perform synchronization. It leaves +// synchronization to the SchedulingQueue. + +type LessFunc func(interface{}, interface{}) bool +type KeyFunc func(obj interface{}) (string, error) + +type heapItem struct { + obj interface{} // The object which is stored in the heap. + index int // The index of the object's key in the Heap.queue. +} + +type itemKeyValue struct { + key string + obj interface{} +} + +// heapData is an internal struct that implements the standard heap interface +// and keeps the data stored in the heap. +type heapData struct { + // items is a map from key of the objects to the objects and their index. + // We depend on the property that items in the map are in the queue and vice versa. + items map[string]*heapItem + // queue implements a heap data structure and keeps the order of elements + // according to the heap invariant. The queue keeps the keys of objects stored + // in "items". + queue []string + + // keyFunc is used to make the key used for queued item insertion and retrieval, and + // should be deterministic. + keyFunc KeyFunc + // lessFunc is used to compare two objects in the heap. + lessFunc LessFunc +} + +var ( + _ = heap.Interface(&heapData{}) // heapData is a standard heap +) + +// Less compares two objects and returns true if the first one should go +// in front of the second one in the heap. +func (h *heapData) Less(i, j int) bool { + if i > len(h.queue) || j > len(h.queue) { + return false + } + itemi, ok := h.items[h.queue[i]] + if !ok { + return false + } + itemj, ok := h.items[h.queue[j]] + if !ok { + return false + } + return h.lessFunc(itemi.obj, itemj.obj) +} + +// Len returns the number of items in the Heap. +func (h *heapData) Len() int { return len(h.queue) } + +// Swap implements swapping of two elements in the heap. This is a part of standard +// heap interface and should never be called directly. +func (h *heapData) Swap(i, j int) { + h.queue[i], h.queue[j] = h.queue[j], h.queue[i] + item := h.items[h.queue[i]] + item.index = i + item = h.items[h.queue[j]] + item.index = j +} + +// Push is supposed to be called by heap.Push only. +func (h *heapData) Push(kv interface{}) { + keyValue := kv.(*itemKeyValue) + n := len(h.queue) + h.items[keyValue.key] = &heapItem{keyValue.obj, n} + h.queue = append(h.queue, keyValue.key) +} + +// Pop is supposed to be called by heap.Pop only. +func (h *heapData) Pop() interface{} { + key := h.queue[len(h.queue)-1] + h.queue = h.queue[0 : len(h.queue)-1] + item, ok := h.items[key] + if !ok { + // This is an error + return nil + } + delete(h.items, key) + return item.obj +} + +// Heap is a thread-safe producer/consumer queue that implements a heap data structure. +// It can be used to implement priority queues and similar data structures. +type Heap struct { + // data stores objects and has a queue that keeps their ordering according + // to the heap invariant. + data *heapData +} + +// Add inserts an item, and puts it in the queue. The item is updated if it +// already exists. +func (h *Heap) Add(obj interface{}) error { + key, err := h.data.keyFunc(obj) + if err != nil { + return cache.KeyError{Obj: obj, Err: err} + } + if _, exists := h.data.items[key]; exists { + h.data.items[key].obj = obj + heap.Fix(h.data, h.data.items[key].index) + } else { + heap.Push(h.data, &itemKeyValue{key, obj}) + } + return nil +} + +// BulkAdd adds all the items in the list to the queue. +func (h *Heap) BulkAdd(list []interface{}) error { + for _, obj := range list { + key, err := h.data.keyFunc(obj) + if err != nil { + return cache.KeyError{Obj: obj, Err: err} + } + if _, exists := h.data.items[key]; exists { + h.data.items[key].obj = obj + heap.Fix(h.data, h.data.items[key].index) + } else { + heap.Push(h.data, &itemKeyValue{key, obj}) + } + } + return nil +} + +// AddIfNotPresent inserts an item, and puts it in the queue. If an item with +// the key is present in the map, no changes is made to the item. +func (h *Heap) AddIfNotPresent(obj interface{}) error { + key, err := h.data.keyFunc(obj) + if err != nil { + return cache.KeyError{Obj: obj, Err: err} + } + if _, exists := h.data.items[key]; !exists { + heap.Push(h.data, &itemKeyValue{key, obj}) + } + return nil +} + +// Update is the same as Add in this implementation. When the item does not +// exist, it is added. +func (h *Heap) Update(obj interface{}) error { + return h.Add(obj) +} + +// Delete removes an item. +func (h *Heap) Delete(obj interface{}) error { + key, err := h.data.keyFunc(obj) + if err != nil { + return cache.KeyError{Obj: obj, Err: err} + } + if item, ok := h.data.items[key]; ok { + heap.Remove(h.data, item.index) + return nil + } + return fmt.Errorf("object not found") +} + +// Pop returns the head of the heap. +func (h *Heap) Pop() (interface{}, error) { + obj := heap.Pop(h.data) + if obj != nil { + return obj, nil + } else { + return nil, fmt.Errorf("object was removed from heap data") + } +} + +// Get returns the requested item, or sets exists=false. +func (h *Heap) Get(obj interface{}) (interface{}, bool, error) { + key, err := h.data.keyFunc(obj) + if err != nil { + return nil, false, cache.KeyError{Obj: obj, Err: err} + } + return h.GetByKey(key) +} + +// GetByKey returns the requested item, or sets exists=false. +func (h *Heap) GetByKey(key string) (interface{}, bool, error) { + item, exists := h.data.items[key] + if !exists { + return nil, false, nil + } + return item.obj, true, nil +} + +// List returns a list of all the items. +func (h *Heap) List() []interface{} { + list := make([]interface{}, 0, len(h.data.items)) + for _, item := range h.data.items { + list = append(list, item.obj) + } + return list +} + +// newHeap returns a Heap which can be used to queue up items to process. +func newHeap(keyFn KeyFunc, lessFn LessFunc) *Heap { + return &Heap{ + data: &heapData{ + items: map[string]*heapItem{}, + queue: []string{}, + keyFunc: keyFn, + lessFunc: lessFn, + }, + } +} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/factory/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/factory/BUILD index c1556c3c957f..c4fe5102bb55 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/factory/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/factory/BUILD @@ -12,9 +12,11 @@ go_library( "factory.go", "plugins.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/factory", deps = [ - "//pkg/api/helper:go_default_library", "//pkg/api/v1/pod:go_default_library", + "//pkg/apis/core/helper:go_default_library", + "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", "//plugin/pkg/scheduler:go_default_library", "//plugin/pkg/scheduler/algorithm:go_default_library", @@ -25,23 +27,31 @@ go_library( "//plugin/pkg/scheduler/core:go_default_library", "//plugin/pkg/scheduler/schedulercache:go_default_library", "//plugin/pkg/scheduler/util:go_default_library", + "//plugin/pkg/scheduler/volumebinder:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/informers/apps/v1beta1:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/informers/policy/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/informers/storage/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/listers/apps/v1beta1:go_default_library", "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", "//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/listers/policy/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/listers/storage/v1:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], ) @@ -52,14 +62,17 @@ go_test( "factory_test.go", "plugins_test.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/factory", library = ":go_default_library", deps = [ - "//pkg/api:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testing:go_default_library", "//plugin/pkg/scheduler/algorithm:go_default_library", "//plugin/pkg/scheduler/api:go_default_library", "//plugin/pkg/scheduler/api/latest:go_default_library", + "//plugin/pkg/scheduler/core:go_default_library", "//plugin/pkg/scheduler/schedulercache:go_default_library", + "//plugin/pkg/scheduler/testing:go_default_library", "//plugin/pkg/scheduler/util:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/factory/factory.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/factory/factory.go index ac80f5ae8d72..b2857e185520 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/factory/factory.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/factory/factory.go @@ -19,29 +19,40 @@ limitations under the License. package factory import ( + "encoding/json" "fmt" "reflect" "time" + "github.com/golang/glog" + "k8s.io/api/core/v1" + "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" + utilfeature "k8s.io/apiserver/pkg/util/feature" appsinformers "k8s.io/client-go/informers/apps/v1beta1" coreinformers "k8s.io/client-go/informers/core/v1" extensionsinformers "k8s.io/client-go/informers/extensions/v1beta1" + policyinformers "k8s.io/client-go/informers/policy/v1beta1" + storageinformers "k8s.io/client-go/informers/storage/v1" clientset "k8s.io/client-go/kubernetes" appslisters "k8s.io/client-go/listers/apps/v1beta1" corelisters "k8s.io/client-go/listers/core/v1" extensionslisters "k8s.io/client-go/listers/extensions/v1beta1" + policylisters "k8s.io/client-go/listers/policy/v1beta1" + storagelisters "k8s.io/client-go/listers/storage/v1" "k8s.io/client-go/tools/cache" - "k8s.io/kubernetes/pkg/api/helper" podutil "k8s.io/kubernetes/pkg/api/v1/pod" + "k8s.io/kubernetes/pkg/apis/core/helper" + "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/plugin/pkg/scheduler" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" @@ -51,8 +62,7 @@ import ( "k8s.io/kubernetes/plugin/pkg/scheduler/core" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" "k8s.io/kubernetes/plugin/pkg/scheduler/util" - - "github.com/golang/glog" + "k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder" ) const ( @@ -61,19 +71,18 @@ const ( ) var ( - serviceAffinitySet = sets.NewString("ServiceAffinity") - maxPDVolumeCountPredicateSet = sets.NewString("MaxPDVolumeCountPredicate") - matchInterPodAffinitySet = sets.NewString("MatchInterPodAffinity") - generalPredicatesSets = sets.NewString("GeneralPredicates") - noDiskConflictSet = sets.NewString("NoDiskConflict") + serviceAffinitySet = sets.NewString("ServiceAffinity") + matchInterPodAffinitySet = sets.NewString("MatchInterPodAffinity") + generalPredicatesSets = sets.NewString("GeneralPredicates") + noDiskConflictSet = sets.NewString("NoDiskConflict") + maxPDVolumeCountPredicateKeys = []string{"MaxGCEPDVolumeCount", "MaxAzureDiskVolumeCount", "MaxEBSVolumeCount"} ) -// ConfigFactory is the default implementation of the scheduler.Configurator interface. -// TODO make this private if possible, so that only its interface is externally used. -type ConfigFactory struct { +// configFactory is the default implementation of the scheduler.Configurator interface. +type configFactory struct { client clientset.Interface // queue for pods that need scheduling - podQueue *cache.FIFO + podQueue core.SchedulingQueue // a means to list all known scheduled pods. scheduledPodLister corelisters.PodLister // a means to list all known scheduled pods and pods assumed to have been scheduled. @@ -92,6 +101,10 @@ type ConfigFactory struct { replicaSetLister extensionslisters.ReplicaSetLister // a means to list all statefulsets statefulSetLister appslisters.StatefulSetLister + // a means to list all PodDisruptionBudgets + pdbLister policylisters.PodDisruptionBudgetLister + // a means to list all StorageClasses + storageClassLister storagelisters.StorageClassLister // Close this to stop all reflectors StopEverything chan struct{} @@ -107,13 +120,16 @@ type ConfigFactory struct { // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule // corresponding to every RequiredDuringScheduling affinity rule. // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. - hardPodAffinitySymmetricWeight int + hardPodAffinitySymmetricWeight int32 // Equivalence class cache equivalencePodCache *core.EquivalenceCache // Enable equivalence class cache enableEquivalenceClassCache bool + + // Handles volume binding decisions + volumeBinder *volumebinder.VolumeBinder } // NewConfigFactory initializes the default implementation of a Configurator To encourage eventual privatization of the struct type, we only @@ -129,22 +145,32 @@ func NewConfigFactory( replicaSetInformer extensionsinformers.ReplicaSetInformer, statefulSetInformer appsinformers.StatefulSetInformer, serviceInformer coreinformers.ServiceInformer, - hardPodAffinitySymmetricWeight int, + pdbInformer policyinformers.PodDisruptionBudgetInformer, + storageClassInformer storageinformers.StorageClassInformer, + hardPodAffinitySymmetricWeight int32, enableEquivalenceClassCache bool, ) scheduler.Configurator { stopEverything := make(chan struct{}) schedulerCache := schedulercache.New(30*time.Second, stopEverything) - c := &ConfigFactory{ + // storageClassInformer is only enabled through VolumeScheduling feature gate + var storageClassLister storagelisters.StorageClassLister + if storageClassInformer != nil { + storageClassLister = storageClassInformer.Lister() + } + + c := &configFactory{ client: client, podLister: schedulerCache, - podQueue: cache.NewFIFO(cache.MetaNamespaceKeyFunc), + podQueue: core.NewSchedulingQueue(), pVLister: pvInformer.Lister(), pVCLister: pvcInformer.Lister(), serviceLister: serviceInformer.Lister(), controllerLister: replicationControllerInformer.Lister(), replicaSetLister: replicaSetInformer.Lister(), statefulSetLister: statefulSetInformer.Lister(), + pdbLister: pdbInformer.Lister(), + storageClassLister: storageClassLister, schedulerCache: schedulerCache, StopEverything: stopEverything, schedulerName: schedulerName, @@ -186,19 +212,28 @@ func NewConfigFactory( }, Handler: cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - if err := c.podQueue.Add(obj); err != nil { + if err := c.podQueue.Add(obj.(*v1.Pod)); err != nil { runtime.HandleError(fmt.Errorf("unable to queue %T: %v", obj, err)) } }, UpdateFunc: func(oldObj, newObj interface{}) { - if err := c.podQueue.Update(newObj); err != nil { + pod := newObj.(*v1.Pod) + if c.skipPodUpdate(pod) { + return + } + if err := c.podQueue.Update(pod); err != nil { runtime.HandleError(fmt.Errorf("unable to update %T: %v", newObj, err)) } }, DeleteFunc: func(obj interface{}) { - if err := c.podQueue.Delete(obj); err != nil { + pod := obj.(*v1.Pod) + if err := c.podQueue.Delete(pod); err != nil { runtime.HandleError(fmt.Errorf("unable to dequeue %T: %v", obj, err)) } + if c.volumeBinder != nil { + // Volume binder only wants to keep unassigned pods + c.volumeBinder.DeletePodBindings(pod) + } }, }, }, @@ -207,59 +242,116 @@ func NewConfigFactory( // they may need to call. c.scheduledPodLister = assignedPodLister{podInformer.Lister()} - // Only nodes in the "Ready" condition with status == "True" are schedulable - nodeInformer.Informer().AddEventHandlerWithResyncPeriod( + nodeInformer.Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ AddFunc: c.addNodeToCache, UpdateFunc: c.updateNodeInCache, DeleteFunc: c.deleteNodeFromCache, }, - 0, ) c.nodeLister = nodeInformer.Lister() + pdbInformer.Informer().AddEventHandler( + cache.ResourceEventHandlerFuncs{ + AddFunc: c.addPDBToCache, + UpdateFunc: c.updatePDBInCache, + DeleteFunc: c.deletePDBFromCache, + }, + ) + c.pdbLister = pdbInformer.Lister() + // On add and delete of PVs, it will affect equivalence cache items // related to persistent volume - pvInformer.Informer().AddEventHandlerWithResyncPeriod( + pvInformer.Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ // MaxPDVolumeCountPredicate: since it relies on the counts of PV. AddFunc: c.onPvAdd, DeleteFunc: c.onPvDelete, }, - 0, ) c.pVLister = pvInformer.Lister() // This is for MaxPDVolumeCountPredicate: add/delete PVC will affect counts of PV when it is bound. - pvcInformer.Informer().AddEventHandlerWithResyncPeriod( + pvcInformer.Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ AddFunc: c.onPvcAdd, + UpdateFunc: c.onPvcUpdate, DeleteFunc: c.onPvcDelete, }, - 0, ) c.pVCLister = pvcInformer.Lister() // This is for ServiceAffinity: affected by the selector of the service is updated. // Also, if new service is added, equivalence cache will also become invalid since // existing pods may be "captured" by this service and change this predicate result. - serviceInformer.Informer().AddEventHandlerWithResyncPeriod( + serviceInformer.Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ AddFunc: c.onServiceAdd, UpdateFunc: c.onServiceUpdate, DeleteFunc: c.onServiceDelete, }, - 0, ) c.serviceLister = serviceInformer.Lister() // Existing equivalence cache should not be affected by add/delete RC/Deployment etc, // it only make sense when pod is scheduled or deleted + if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + // Setup volume binder + c.volumeBinder = volumebinder.NewVolumeBinder(client, pvcInformer, pvInformer, nodeInformer, storageClassInformer) + } + return c } -func (c *ConfigFactory) onPvAdd(obj interface{}) { +// skipPodUpdate checks whether the specified pod update should be ignored. +// This function will return true if +// - The pod has already been assumed, AND +// - The pod has only its ResourceVersion, Spec.NodeName and/or Annotations +// updated. +func (c *configFactory) skipPodUpdate(pod *v1.Pod) bool { + // Non-assumed pods should never be skipped. + isAssumed, err := c.schedulerCache.IsAssumedPod(pod) + if err != nil { + runtime.HandleError(fmt.Errorf("failed to check whether pod %s/%s is assumed: %v", pod.Namespace, pod.Name, err)) + return false + } + if !isAssumed { + return false + } + + // Gets the assumed pod from the cache. + assumedPod, err := c.schedulerCache.GetPod(pod) + if err != nil { + runtime.HandleError(fmt.Errorf("failed to get assumed pod %s/%s from cache: %v", pod.Namespace, pod.Name, err)) + return false + } + + // Compares the assumed pod in the cache with the pod update. If they are + // equal (with certain fields excluded), this pod update will be skipped. + f := func(pod *v1.Pod) *v1.Pod { + p := pod.DeepCopy() + // ResourceVersion must be excluded because each object update will + // have a new resource version. + p.ResourceVersion = "" + // Spec.NodeName must be excluded because the pod assumed in the cache + // is expected to have a node assigned while the pod update may nor may + // not have this field set. + p.Spec.NodeName = "" + // Annotations must be excluded for the reasons described in + // https://github.com/kubernetes/kubernetes/issues/52914. + p.Annotations = nil + return p + } + assumedPodCopy, podCopy := f(assumedPod), f(pod) + if !reflect.DeepEqual(assumedPodCopy, podCopy) { + return false + } + glog.V(3).Infof("Skipping pod %s/%s update", pod.Namespace, pod.Name) + return true +} + +func (c *configFactory) onPvAdd(obj interface{}) { if c.enableEquivalenceClassCache { pv, ok := obj.(*v1.PersistentVolume) if !ok { @@ -270,7 +362,7 @@ func (c *ConfigFactory) onPvAdd(obj interface{}) { } } -func (c *ConfigFactory) onPvDelete(obj interface{}) { +func (c *configFactory) onPvDelete(obj interface{}) { if c.enableEquivalenceClassCache { var pv *v1.PersistentVolume switch t := obj.(type) { @@ -291,8 +383,12 @@ func (c *ConfigFactory) onPvDelete(obj interface{}) { } } -func (c *ConfigFactory) invalidatePredicatesForPv(pv *v1.PersistentVolume) { - invalidPredicates := sets.NewString("MaxPDVolumeCountPredicate") +func (c *configFactory) invalidatePredicatesForPv(pv *v1.PersistentVolume) { + // You could have a PVC that points to a PV, but the PV object doesn't exist. + // So when the PV object gets added, we can recount. + invalidPredicates := sets.NewString() + + // PV types which impact MaxPDVolumeCountPredicate if pv.Spec.AWSElasticBlockStore != nil { invalidPredicates.Insert("MaxEBSVolumeCount") } @@ -302,10 +398,24 @@ func (c *ConfigFactory) invalidatePredicatesForPv(pv *v1.PersistentVolume) { if pv.Spec.AzureDisk != nil { invalidPredicates.Insert("MaxAzureDiskVolumeCount") } + + // If PV contains zone related label, it may impact cached NoVolumeZoneConflict + for k := range pv.ObjectMeta.Labels { + if k == kubeletapis.LabelZoneFailureDomain || k == kubeletapis.LabelZoneRegion { + invalidPredicates.Insert("NoVolumeZoneConflict") + break + } + } + + if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + // Add/delete impacts the available PVs to choose from + invalidPredicates.Insert(predicates.CheckVolumeBinding) + } + c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(invalidPredicates) } -func (c *ConfigFactory) onPvcAdd(obj interface{}) { +func (c *configFactory) onPvcAdd(obj interface{}) { if c.enableEquivalenceClassCache { pvc, ok := obj.(*v1.PersistentVolumeClaim) if !ok { @@ -314,9 +424,31 @@ func (c *ConfigFactory) onPvcAdd(obj interface{}) { } c.invalidatePredicatesForPvc(pvc) } + c.podQueue.MoveAllToActiveQueue() } -func (c *ConfigFactory) onPvcDelete(obj interface{}) { +func (c *configFactory) onPvcUpdate(old, new interface{}) { + if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + return + } + + if c.enableEquivalenceClassCache { + newPVC, ok := new.(*v1.PersistentVolumeClaim) + if !ok { + glog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", new) + return + } + oldPVC, ok := old.(*v1.PersistentVolumeClaim) + if !ok { + glog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", old) + return + } + c.invalidatePredicatesForPvcUpdate(oldPVC, newPVC) + } + c.podQueue.MoveAllToActiveQueue() +} + +func (c *configFactory) onPvcDelete(obj interface{}) { if c.enableEquivalenceClassCache { var pvc *v1.PersistentVolumeClaim switch t := obj.(type) { @@ -337,19 +469,47 @@ func (c *ConfigFactory) onPvcDelete(obj interface{}) { } } -func (c *ConfigFactory) invalidatePredicatesForPvc(pvc *v1.PersistentVolumeClaim) { - if pvc.Spec.VolumeName != "" { - c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(maxPDVolumeCountPredicateSet) +func (c *configFactory) invalidatePredicatesForPvc(pvc *v1.PersistentVolumeClaim) { + // We need to do this here because the ecache uses PVC uid as part of equivalence hash of pod + + // The bound volume type may change + invalidPredicates := sets.NewString(maxPDVolumeCountPredicateKeys...) + + // The bound volume's label may change + invalidPredicates.Insert("NoVolumeZoneConflict") + + if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + // Add/delete impacts the available PVs to choose from + invalidPredicates.Insert(predicates.CheckVolumeBinding) + } + c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(invalidPredicates) +} + +func (c *configFactory) invalidatePredicatesForPvcUpdate(old, new *v1.PersistentVolumeClaim) { + invalidPredicates := sets.NewString() + + if old.Spec.VolumeName != new.Spec.VolumeName { + if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + // PVC volume binding has changed + invalidPredicates.Insert(predicates.CheckVolumeBinding) + } + // The bound volume type may change + invalidPredicates.Insert(maxPDVolumeCountPredicateKeys...) + // The bound volume's label may change + invalidPredicates.Insert("NoVolumeZoneConflict") } + + c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(invalidPredicates) } -func (c *ConfigFactory) onServiceAdd(obj interface{}) { +func (c *configFactory) onServiceAdd(obj interface{}) { if c.enableEquivalenceClassCache { c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(serviceAffinitySet) } + c.podQueue.MoveAllToActiveQueue() } -func (c *ConfigFactory) onServiceUpdate(oldObj interface{}, newObj interface{}) { +func (c *configFactory) onServiceUpdate(oldObj interface{}, newObj interface{}) { if c.enableEquivalenceClassCache { // TODO(resouer) We may need to invalidate this for specified group of pods only oldService := oldObj.(*v1.Service) @@ -358,38 +518,40 @@ func (c *ConfigFactory) onServiceUpdate(oldObj interface{}, newObj interface{}) c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(serviceAffinitySet) } } + c.podQueue.MoveAllToActiveQueue() } -func (c *ConfigFactory) onServiceDelete(obj interface{}) { +func (c *configFactory) onServiceDelete(obj interface{}) { if c.enableEquivalenceClassCache { c.equivalencePodCache.InvalidateCachedPredicateItemOfAllNodes(serviceAffinitySet) } + c.podQueue.MoveAllToActiveQueue() } // GetNodeStore provides the cache to the nodes, mostly internal use, but may also be called by mock-tests. -func (c *ConfigFactory) GetNodeLister() corelisters.NodeLister { +func (c *configFactory) GetNodeLister() corelisters.NodeLister { return c.nodeLister } -func (c *ConfigFactory) GetHardPodAffinitySymmetricWeight() int { +func (c *configFactory) GetHardPodAffinitySymmetricWeight() int32 { return c.hardPodAffinitySymmetricWeight } -func (f *ConfigFactory) GetSchedulerName() string { +func (f *configFactory) GetSchedulerName() string { return f.schedulerName } // GetClient provides a kubernetes client, mostly internal use, but may also be called by mock-tests. -func (f *ConfigFactory) GetClient() clientset.Interface { +func (f *configFactory) GetClient() clientset.Interface { return f.client } // GetScheduledPodListerIndexer provides a pod lister, mostly internal use, but may also be called by mock-tests. -func (c *ConfigFactory) GetScheduledPodLister() corelisters.PodLister { +func (c *configFactory) GetScheduledPodLister() corelisters.PodLister { return c.scheduledPodLister } -func (c *ConfigFactory) addPodToCache(obj interface{}) { +func (c *configFactory) addPodToCache(obj interface{}) { pod, ok := obj.(*v1.Pod) if !ok { glog.Errorf("cannot convert to *v1.Pod: %v", obj) @@ -399,11 +561,14 @@ func (c *ConfigFactory) addPodToCache(obj interface{}) { if err := c.schedulerCache.AddPod(pod); err != nil { glog.Errorf("scheduler cache AddPod failed: %v", err) } + + c.podQueue.AssignedPodAdded(pod) + // NOTE: Updating equivalence cache of addPodToCache has been - // handled optimistically in InvalidateCachedPredicateItemForPodAdd. + // handled optimistically in: plugin/pkg/scheduler/scheduler.go#assume() } -func (c *ConfigFactory) updatePodInCache(oldObj, newObj interface{}) { +func (c *configFactory) updatePodInCache(oldObj, newObj interface{}) { oldPod, ok := oldObj.(*v1.Pod) if !ok { glog.Errorf("cannot convert oldObj to *v1.Pod: %v", oldObj) @@ -420,12 +585,13 @@ func (c *ConfigFactory) updatePodInCache(oldObj, newObj interface{}) { } c.invalidateCachedPredicatesOnUpdatePod(newPod, oldPod) + c.podQueue.AssignedPodUpdated(newPod) } -func (c *ConfigFactory) invalidateCachedPredicatesOnUpdatePod(newPod *v1.Pod, oldPod *v1.Pod) { +func (c *configFactory) invalidateCachedPredicatesOnUpdatePod(newPod *v1.Pod, oldPod *v1.Pod) { if c.enableEquivalenceClassCache { - // if the pod does not have binded node, updating equivalence cache is meaningless; - // if pod's binded node has been changed, that case should be handled by pod add & delete. + // if the pod does not have bound node, updating equivalence cache is meaningless; + // if pod's bound node has been changed, that case should be handled by pod add & delete. if len(newPod.Spec.NodeName) != 0 && newPod.Spec.NodeName == oldPod.Spec.NodeName { if !reflect.DeepEqual(oldPod.GetLabels(), newPod.GetLabels()) { // MatchInterPodAffinity need to be reconsidered for this node, @@ -443,7 +609,7 @@ func (c *ConfigFactory) invalidateCachedPredicatesOnUpdatePod(newPod *v1.Pod, ol } } -func (c *ConfigFactory) deletePodFromCache(obj interface{}) { +func (c *configFactory) deletePodFromCache(obj interface{}) { var pod *v1.Pod switch t := obj.(type) { case *v1.Pod: @@ -464,9 +630,10 @@ func (c *ConfigFactory) deletePodFromCache(obj interface{}) { } c.invalidateCachedPredicatesOnDeletePod(pod) + c.podQueue.MoveAllToActiveQueue() } -func (c *ConfigFactory) invalidateCachedPredicatesOnDeletePod(pod *v1.Pod) { +func (c *configFactory) invalidateCachedPredicatesOnDeletePod(pod *v1.Pod) { if c.enableEquivalenceClassCache { // part of this case is the same as pod add. c.equivalencePodCache.InvalidateCachedPredicateItemForPodAdd(pod, pod.Spec.NodeName) @@ -487,7 +654,7 @@ func (c *ConfigFactory) invalidateCachedPredicatesOnDeletePod(pod *v1.Pod) { } } -func (c *ConfigFactory) addNodeToCache(obj interface{}) { +func (c *configFactory) addNodeToCache(obj interface{}) { node, ok := obj.(*v1.Node) if !ok { glog.Errorf("cannot convert to *v1.Node: %v", obj) @@ -498,10 +665,11 @@ func (c *ConfigFactory) addNodeToCache(obj interface{}) { glog.Errorf("scheduler cache AddNode failed: %v", err) } + c.podQueue.MoveAllToActiveQueue() // NOTE: add a new node does not affect existing predicates in equivalence cache } -func (c *ConfigFactory) updateNodeInCache(oldObj, newObj interface{}) { +func (c *configFactory) updateNodeInCache(oldObj, newObj interface{}) { oldNode, ok := oldObj.(*v1.Node) if !ok { glog.Errorf("cannot convert oldObj to *v1.Node: %v", oldObj) @@ -518,23 +686,15 @@ func (c *ConfigFactory) updateNodeInCache(oldObj, newObj interface{}) { } c.invalidateCachedPredicatesOnNodeUpdate(newNode, oldNode) + c.podQueue.MoveAllToActiveQueue() } -func (c *ConfigFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node, oldNode *v1.Node) { +func (c *configFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node, oldNode *v1.Node) { if c.enableEquivalenceClassCache { // Begin to update equivalence cache based on node update // TODO(resouer): think about lazily initialize this set invalidPredicates := sets.NewString() - oldTaints, oldErr := helper.GetTaintsFromNodeAnnotations(oldNode.GetAnnotations()) - if oldErr != nil { - glog.Errorf("Failed to get taints from old node annotation for equivalence cache") - } - newTaints, newErr := helper.GetTaintsFromNodeAnnotations(newNode.GetAnnotations()) - if newErr != nil { - glog.Errorf("Failed to get taints from new node annotation for equivalence cache") - } - if !reflect.DeepEqual(oldNode.Status.Allocatable, newNode.Status.Allocatable) { invalidPredicates.Insert("GeneralPredicates") // "PodFitsResources" } @@ -553,9 +713,20 @@ func (c *ConfigFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node, } } } - if !reflect.DeepEqual(oldTaints, newTaints) { + + oldTaints, oldErr := helper.GetTaintsFromNodeAnnotations(oldNode.GetAnnotations()) + if oldErr != nil { + glog.Errorf("Failed to get taints from old node annotation for equivalence cache") + } + newTaints, newErr := helper.GetTaintsFromNodeAnnotations(newNode.GetAnnotations()) + if newErr != nil { + glog.Errorf("Failed to get taints from new node annotation for equivalence cache") + } + if !reflect.DeepEqual(oldTaints, newTaints) || + !reflect.DeepEqual(oldNode.Spec.Taints, newNode.Spec.Taints) { invalidPredicates.Insert("PodToleratesNodeTaints") } + if !reflect.DeepEqual(oldNode.Status.Conditions, newNode.Status.Conditions) { oldConditions := make(map[v1.NodeConditionType]v1.ConditionStatus) newConditions := make(map[v1.NodeConditionType]v1.ConditionStatus) @@ -582,7 +753,7 @@ func (c *ConfigFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node, } } -func (c *ConfigFactory) deleteNodeFromCache(obj interface{}) { +func (c *configFactory) deleteNodeFromCache(obj interface{}) { var node *v1.Node switch t := obj.(type) { case *v1.Node: @@ -606,13 +777,63 @@ func (c *ConfigFactory) deleteNodeFromCache(obj interface{}) { } } +func (c *configFactory) addPDBToCache(obj interface{}) { + pdb, ok := obj.(*v1beta1.PodDisruptionBudget) + if !ok { + glog.Errorf("cannot convert to *v1beta1.PodDisruptionBudget: %v", obj) + return + } + + if err := c.schedulerCache.AddPDB(pdb); err != nil { + glog.Errorf("scheduler cache AddPDB failed: %v", err) + } +} + +func (c *configFactory) updatePDBInCache(oldObj, newObj interface{}) { + oldPDB, ok := oldObj.(*v1beta1.PodDisruptionBudget) + if !ok { + glog.Errorf("cannot convert oldObj to *v1beta1.PodDisruptionBudget: %v", oldObj) + return + } + newPDB, ok := newObj.(*v1beta1.PodDisruptionBudget) + if !ok { + glog.Errorf("cannot convert newObj to *v1beta1.PodDisruptionBudget: %v", newObj) + return + } + + if err := c.schedulerCache.UpdatePDB(oldPDB, newPDB); err != nil { + glog.Errorf("scheduler cache UpdatePDB failed: %v", err) + } +} + +func (c *configFactory) deletePDBFromCache(obj interface{}) { + var pdb *v1beta1.PodDisruptionBudget + switch t := obj.(type) { + case *v1beta1.PodDisruptionBudget: + pdb = t + case cache.DeletedFinalStateUnknown: + var ok bool + pdb, ok = t.Obj.(*v1beta1.PodDisruptionBudget) + if !ok { + glog.Errorf("cannot convert to *v1beta1.PodDisruptionBudget: %v", t.Obj) + return + } + default: + glog.Errorf("cannot convert to *v1beta1.PodDisruptionBudget: %v", t) + return + } + if err := c.schedulerCache.RemovePDB(pdb); err != nil { + glog.Errorf("scheduler cache RemovePDB failed: %v", err) + } +} + // Create creates a scheduler with the default algorithm provider. -func (f *ConfigFactory) Create() (*scheduler.Config, error) { +func (f *configFactory) Create() (*scheduler.Config, error) { return f.CreateFromProvider(DefaultProvider) } // Creates a scheduler from the name of a registered algorithm provider. -func (f *ConfigFactory) CreateFromProvider(providerName string) (*scheduler.Config, error) { +func (f *configFactory) CreateFromProvider(providerName string) (*scheduler.Config, error) { glog.V(2).Infof("Creating scheduler from algorithm provider '%v'", providerName) provider, err := GetAlgorithmProvider(providerName) if err != nil { @@ -623,7 +844,7 @@ func (f *ConfigFactory) CreateFromProvider(providerName string) (*scheduler.Conf } // Creates a scheduler from the configuration file -func (f *ConfigFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler.Config, error) { +func (f *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler.Config, error) { glog.V(2).Infof("Creating scheduler from configuration: %v", policy) // validate the policy configuration @@ -663,7 +884,7 @@ func (f *ConfigFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler } // getBinder returns an extender that supports bind or a default binder. -func (f *ConfigFactory) getBinder(extenders []algorithm.SchedulerExtender) scheduler.Binder { +func (f *configFactory) getBinder(extenders []algorithm.SchedulerExtender) scheduler.Binder { for i := range extenders { if extenders[i].IsBinder() { return extenders[i] @@ -673,7 +894,7 @@ func (f *ConfigFactory) getBinder(extenders []algorithm.SchedulerExtender) sched } // Creates a scheduler from a set of registered fit predicate keys and priority keys. -func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*scheduler.Config, error) { +func (f *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*scheduler.Config, error) { glog.V(2).Infof("Creating scheduler with fit predicates '%v' and priority functions '%v'", predicateKeys, priorityKeys) if f.GetHardPodAffinitySymmetricWeight() < 1 || f.GetHardPodAffinitySymmetricWeight() > 100 { @@ -701,11 +922,18 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, } // Init equivalence class cache - if f.enableEquivalenceClassCache && getEquivalencePodFunc != nil { - f.equivalencePodCache = core.NewEquivalenceCache(getEquivalencePodFunc) + if f.enableEquivalenceClassCache && getEquivalencePodFuncFactory != nil { + pluginArgs, err := f.getPluginArgs() + if err != nil { + return nil, err + } + f.equivalencePodCache = core.NewEquivalenceCache( + getEquivalencePodFuncFactory(*pluginArgs), + ) glog.Info("Created equivalence class cache") } - algo := core.NewGenericScheduler(f.schedulerCache, f.equivalencePodCache, predicateFuncs, predicateMetaProducer, priorityConfigs, priorityMetaProducer, extenders) + + algo := core.NewGenericScheduler(f.schedulerCache, f.equivalencePodCache, f.podQueue, predicateFuncs, predicateMetaProducer, priorityConfigs, priorityMetaProducer, extenders, f.volumeBinder) podBackoff := util.CreateDefaultPodBackoff() return &scheduler.Config{ @@ -725,6 +953,7 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, }, Error: f.MakeDefaultErrorFunc(podBackoff, f.podQueue), StopEverything: f.StopEverything, + VolumeBinder: f.volumeBinder, }, nil } @@ -736,7 +965,7 @@ func (n *nodeLister) List() ([]*v1.Node, error) { return n.NodeLister.List(labels.Everything()) } -func (f *ConfigFactory) GetPriorityFunctionConfigs(priorityKeys sets.String) ([]algorithm.PriorityConfig, error) { +func (f *configFactory) GetPriorityFunctionConfigs(priorityKeys sets.String) ([]algorithm.PriorityConfig, error) { pluginArgs, err := f.getPluginArgs() if err != nil { return nil, err @@ -745,7 +974,7 @@ func (f *ConfigFactory) GetPriorityFunctionConfigs(priorityKeys sets.String) ([] return getPriorityFunctionConfigs(priorityKeys, *pluginArgs) } -func (f *ConfigFactory) GetPriorityMetadataProducer() (algorithm.MetadataProducer, error) { +func (f *configFactory) GetPriorityMetadataProducer() (algorithm.MetadataProducer, error) { pluginArgs, err := f.getPluginArgs() if err != nil { return nil, err @@ -754,7 +983,7 @@ func (f *ConfigFactory) GetPriorityMetadataProducer() (algorithm.MetadataProduce return getPriorityMetadataProducer(*pluginArgs) } -func (f *ConfigFactory) GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error) { +func (f *configFactory) GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error) { pluginArgs, err := f.getPluginArgs() if err != nil { return nil, err @@ -762,7 +991,7 @@ func (f *ConfigFactory) GetPredicateMetadataProducer() (algorithm.PredicateMetad return getPredicateMetadataProducer(*pluginArgs) } -func (f *ConfigFactory) GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error) { +func (f *configFactory) GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error) { pluginArgs, err := f.getPluginArgs() if err != nil { return nil, err @@ -771,35 +1000,33 @@ func (f *ConfigFactory) GetPredicates(predicateKeys sets.String) (map[string]alg return getFitPredicateFunctions(predicateKeys, *pluginArgs) } -func (f *ConfigFactory) getPluginArgs() (*PluginFactoryArgs, error) { +func (f *configFactory) getPluginArgs() (*PluginFactoryArgs, error) { return &PluginFactoryArgs{ - PodLister: f.podLister, - ServiceLister: f.serviceLister, - ControllerLister: f.controllerLister, - ReplicaSetLister: f.replicaSetLister, - StatefulSetLister: f.statefulSetLister, - NodeLister: &nodeLister{f.nodeLister}, - NodeInfo: &predicates.CachedNodeInfo{NodeLister: f.nodeLister}, - PVInfo: &predicates.CachedPersistentVolumeInfo{PersistentVolumeLister: f.pVLister}, - PVCInfo: &predicates.CachedPersistentVolumeClaimInfo{PersistentVolumeClaimLister: f.pVCLister}, + PodLister: f.podLister, + ServiceLister: f.serviceLister, + ControllerLister: f.controllerLister, + ReplicaSetLister: f.replicaSetLister, + StatefulSetLister: f.statefulSetLister, + NodeLister: &nodeLister{f.nodeLister}, + NodeInfo: &predicates.CachedNodeInfo{NodeLister: f.nodeLister}, + PVInfo: &predicates.CachedPersistentVolumeInfo{PersistentVolumeLister: f.pVLister}, + PVCInfo: &predicates.CachedPersistentVolumeClaimInfo{PersistentVolumeClaimLister: f.pVCLister}, + StorageClassInfo: &predicates.CachedStorageClassInfo{StorageClassLister: f.storageClassLister}, + VolumeBinder: f.volumeBinder, HardPodAffinitySymmetricWeight: f.hardPodAffinitySymmetricWeight, }, nil } -func (f *ConfigFactory) getNextPod() *v1.Pod { - for { - pod := cache.Pop(f.podQueue).(*v1.Pod) - if f.ResponsibleForPod(pod) { - glog.V(4).Infof("About to try and schedule pod %v", pod.Name) - return pod - } +func (f *configFactory) getNextPod() *v1.Pod { + if pod, err := f.podQueue.Pop(); err == nil { + glog.V(4).Infof("About to try and schedule pod %v", pod.Name) + return pod + } else { + glog.Errorf("Error while retrieving next pod from scheduling queue: %v", err) + return nil } } -func (f *ConfigFactory) ResponsibleForPod(pod *v1.Pod) bool { - return f.schedulerName == pod.Spec.SchedulerName -} - // unassignedNonTerminatedPod selects pods that are unassigned and non-terminal. func unassignedNonTerminatedPod(pod *v1.Pod) bool { if len(pod.Spec.NodeName) != 0 { @@ -894,25 +1121,44 @@ func (i *podInformer) Lister() corelisters.PodLister { } // NewPodInformer creates a shared index informer that returns only non-terminal pods. -func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) coreinformers.PodInformer { - selector := fields.ParseSelectorOrDie("status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed)) +func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration, schedulerName string) coreinformers.PodInformer { + selector := fields.ParseSelectorOrDie( + "spec.schedulerName=" + schedulerName + + ",status.phase!=" + string(v1.PodSucceeded) + + ",status.phase!=" + string(v1.PodFailed)) lw := cache.NewListWatchFromClient(client.CoreV1().RESTClient(), string(v1.ResourcePods), metav1.NamespaceAll, selector) return &podInformer{ informer: cache.NewSharedIndexInformer(lw, &v1.Pod{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}), } } -func (factory *ConfigFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue *cache.FIFO) func(pod *v1.Pod, err error) { +func (factory *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue core.SchedulingQueue) func(pod *v1.Pod, err error) { return func(pod *v1.Pod, err error) { if err == core.ErrNoNodesAvailable { glog.V(4).Infof("Unable to schedule %v %v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name) } else { if _, ok := err.(*core.FitError); ok { glog.V(4).Infof("Unable to schedule %v %v: no fit: %v; waiting", pod.Namespace, pod.Name, err) + } else if errors.IsNotFound(err) { + if errStatus, ok := err.(errors.APIStatus); ok && errStatus.Status().Details.Kind == "node" { + nodeName := errStatus.Status().Details.Name + // when node is not found, We do not remove the node right away. Trying again to get + // the node and if the node is still not found, then remove it from the scheduler cache. + _, err := factory.client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + if err != nil && errors.IsNotFound(err) { + node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} + factory.schedulerCache.RemoveNode(&node) + // invalidate cached predicate for the node + if factory.enableEquivalenceClassCache { + factory.equivalencePodCache.InvalidateAllCachedPredicateItemOfNode(nodeName) + } + } + } } else { glog.Errorf("Error scheduling %v %v: %v; retrying", pod.Namespace, pod.Name, err) } } + backoff.Gc() // Retry asynchronously. // Note that this is extremely rudimentary and we need a more real error handling path. @@ -922,11 +1168,18 @@ func (factory *ConfigFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, pod Namespace: pod.Namespace, Name: pod.Name, } - - entry := backoff.GetEntry(podID) - if !entry.TryWait(backoff.MaxDuration()) { - glog.Warningf("Request for pod %v already in flight, abandoning", podID) - return + origPod := pod + + // When pod priority is enabled, we would like to place an unschedulable + // pod in the unschedulable queue. This ensures that if the pod is nominated + // to run on a node, scheduler takes the pod into account when running + // predicates for the node. + if !util.PodPriorityEnabled() { + entry := backoff.GetEntry(podID) + if !entry.TryWait(backoff.MaxDuration()) { + glog.Warningf("Request for pod %v already in flight, abandoning", podID) + return + } } // Get the pod again; it may have changed/been scheduled already. getBackoff := initialGetBackoff @@ -934,12 +1187,22 @@ func (factory *ConfigFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, pod pod, err := factory.client.CoreV1().Pods(podID.Namespace).Get(podID.Name, metav1.GetOptions{}) if err == nil { if len(pod.Spec.NodeName) == 0 { - podQueue.AddIfNotPresent(pod) + podQueue.AddUnschedulableIfNotPresent(pod) + } else { + if factory.volumeBinder != nil { + // Volume binder only wants to keep unassigned pods + factory.volumeBinder.DeletePodBindings(pod) + } } break } if errors.IsNotFound(err) { glog.Warningf("A pod %v no longer exists", podID) + + if factory.volumeBinder != nil { + // Volume binder only wants to keep unassigned pods + factory.volumeBinder.DeletePodBindings(origPod) + } return } glog.Errorf("Error getting pod %v for retry: %v; retrying...", podID, err) @@ -1005,7 +1268,6 @@ func (p *podPreemptor) DeletePod(pod *v1.Pod) error { return p.Client.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{}) } -//TODO(bsalamat): change this to patch PodStatus to avoid overwriting potential pending status updates. func (p *podPreemptor) UpdatePodAnnotations(pod *v1.Pod, annotations map[string]string) error { podCopy := pod.DeepCopy() if podCopy.Annotations == nil { @@ -1014,6 +1276,33 @@ func (p *podPreemptor) UpdatePodAnnotations(pod *v1.Pod, annotations map[string] for k, v := range annotations { podCopy.Annotations[k] = v } - _, err := p.Client.CoreV1().Pods(podCopy.Namespace).UpdateStatus(podCopy) - return err + ret := &unstructured.Unstructured{} + ret.SetAnnotations(podCopy.Annotations) + patchData, err := json.Marshal(ret) + if err != nil { + return err + } + _, error := p.Client.CoreV1().Pods(podCopy.Namespace).Patch(podCopy.Name, types.MergePatchType, patchData, "status") + return error +} + +func (p *podPreemptor) RemoveNominatedNodeAnnotation(pod *v1.Pod) error { + podCopy := pod.DeepCopy() + if podCopy.Annotations == nil { + return nil + } + if _, exists := podCopy.Annotations[core.NominatedNodeAnnotationKey]; !exists { + return nil + } + // Note: Deleting the entry from the annotations and passing it to Patch() will + // not remove the annotation. That's why we set it to empty string. + podCopy.Annotations[core.NominatedNodeAnnotationKey] = "" + ret := &unstructured.Unstructured{} + ret.SetAnnotations(podCopy.Annotations) + patchData, err := json.Marshal(ret) + if err != nil { + return err + } + _, error := p.Client.CoreV1().Pods(podCopy.Namespace).Patch(podCopy.Name, types.MergePatchType, patchData, "status") + return error } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/factory/plugins.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/factory/plugins.go index 08138bea6ac5..6c7a7ab7d5f7 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/factory/plugins.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/factory/plugins.go @@ -30,6 +30,7 @@ import ( schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "github.com/golang/glog" + "k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder" ) // PluginFactoryArgs are passed to all plugin factory functions. @@ -43,7 +44,9 @@ type PluginFactoryArgs struct { NodeInfo predicates.NodeInfo PVInfo predicates.PersistentVolumeInfo PVCInfo predicates.PersistentVolumeClaimInfo - HardPodAffinitySymmetricWeight int + StorageClassInfo predicates.StorageClassInfo + VolumeBinder *volumebinder.VolumeBinder + HardPodAffinitySymmetricWeight int32 } // MetadataProducerFactory produces MetadataProducer from the given args. @@ -73,6 +76,9 @@ type PriorityConfigFactory struct { Weight int } +// EquivalencePodFuncFactory produces a function to get equivalence class for given pod. +type EquivalencePodFuncFactory func(PluginFactoryArgs) algorithm.GetEquivalencePodFunc + var ( schedulerFactoryMutex sync.Mutex @@ -87,7 +93,7 @@ var ( predicateMetadataProducer PredicateMetadataProducerFactory // get equivalence pod function - getEquivalencePodFunc algorithm.GetEquivalencePodFunc + getEquivalencePodFuncFactory EquivalencePodFuncFactory ) const ( @@ -105,6 +111,66 @@ func RegisterFitPredicate(name string, predicate algorithm.FitPredicate) string return RegisterFitPredicateFactory(name, func(PluginFactoryArgs) algorithm.FitPredicate { return predicate }) } +// RemoveFitPredicate removes a fit predicate from factory. +func RemoveFitPredicate(name string) { + schedulerFactoryMutex.Lock() + defer schedulerFactoryMutex.Unlock() + + validateAlgorithmNameOrDie(name) + delete(fitPredicateMap, name) + mandatoryFitPredicates.Delete(name) +} + +// RemovePredicateKeyFromAlgoProvider removes a fit predicate key from algorithmProvider. +func RemovePredicateKeyFromAlgoProvider(providerName, key string) error { + schedulerFactoryMutex.Lock() + defer schedulerFactoryMutex.Unlock() + + validateAlgorithmNameOrDie(providerName) + provider, ok := algorithmProviderMap[providerName] + if !ok { + return fmt.Errorf("plugin %v has not been registered", providerName) + } + provider.FitPredicateKeys.Delete(key) + return nil +} + +// RemovePredicateKeyFromAlgoProvider removes a fit predicate key from all algorithmProviders which in algorithmProviderMap. +func RemovePredicateKeyFromAlgorithmProviderMap(key string) { + schedulerFactoryMutex.Lock() + defer schedulerFactoryMutex.Unlock() + + for _, provider := range algorithmProviderMap { + provider.FitPredicateKeys.Delete(key) + } + return +} + +// InsertPredicateKeyToAlgoProvider insert a fit predicate key to algorithmProvider. +func InsertPredicateKeyToAlgoProvider(providerName, key string) error { + schedulerFactoryMutex.Lock() + defer schedulerFactoryMutex.Unlock() + + validateAlgorithmNameOrDie(providerName) + provider, ok := algorithmProviderMap[providerName] + if !ok { + return fmt.Errorf("plugin %v has not been registered", providerName) + } + provider.FitPredicateKeys.Insert(key) + return nil +} + +// InsertPredicateKeyToAlgorithmProviderMap insert a fit predicate key to all algorithmProviders which in algorithmProviderMap. +func InsertPredicateKeyToAlgorithmProviderMap(key string) { + schedulerFactoryMutex.Lock() + defer schedulerFactoryMutex.Unlock() + + for _, provider := range algorithmProviderMap { + provider.FitPredicateKeys.Insert(key) + } + return +} + // RegisterMandatoryFitPredicate registers a fit predicate with the algorithm registry, the predicate is used by // kubelet, DaemonSet; it is always included in configuration. Returns the name with which the predicate was // registered. @@ -276,8 +342,9 @@ func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string { return RegisterPriorityConfigFactory(policy.Name, *pcf) } -func RegisterGetEquivalencePodFunction(equivalenceFunc algorithm.GetEquivalencePodFunc) { - getEquivalencePodFunc = equivalenceFunc +// RegisterGetEquivalencePodFunction registers equivalenceFuncFactory to produce equivalence class for given pod. +func RegisterGetEquivalencePodFunction(equivalenceFuncFactory EquivalencePodFuncFactory) { + getEquivalencePodFuncFactory = equivalenceFuncFactory } // IsPriorityFunctionRegistered is useful for testing providers. @@ -369,12 +436,14 @@ func getPriorityFunctionConfigs(names sets.String, args PluginFactoryArgs) ([]al } if factory.Function != nil { configs = append(configs, algorithm.PriorityConfig{ + Name: name, Function: factory.Function(args), Weight: factory.Weight, }) } else { mapFunction, reduceFunction := factory.MapReduceFunction(args) configs = append(configs, algorithm.PriorityConfig{ + Name: name, Map: mapFunction, Reduce: reduceFunction, Weight: factory.Weight, diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/metrics/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/metrics/BUILD index b9badf163a2a..7d059ed21123 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/metrics/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/metrics/BUILD @@ -8,6 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["metrics.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/metrics", deps = ["//vendor/github.com/prometheus/client_golang/prometheus:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/scheduler.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/scheduler.go index 729ac23a6ccb..b69aacc8eaf0 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/scheduler.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/scheduler.go @@ -17,6 +17,7 @@ limitations under the License. package scheduler import ( + "fmt" "time" "k8s.io/api/core/v1" @@ -26,10 +27,10 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" + "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/core" "k8s.io/kubernetes/plugin/pkg/scheduler/metrics" @@ -37,6 +38,7 @@ import ( "k8s.io/kubernetes/plugin/pkg/scheduler/util" "github.com/golang/glog" + "k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder" ) // Binder knows how to write a binding. @@ -56,6 +58,7 @@ type PodPreemptor interface { GetUpdatedPod(pod *v1.Pod) (*v1.Pod, error) DeletePod(pod *v1.Pod) error UpdatePodAnnotations(pod *v1.Pod, annots map[string]string) error + RemoveNominatedNodeAnnotation(pod *v1.Pod) error } // Scheduler watches for new unscheduled pods. It attempts to find @@ -78,12 +81,9 @@ type Configurator interface { GetPriorityMetadataProducer() (algorithm.MetadataProducer, error) GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error) GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error) - GetHardPodAffinitySymmetricWeight() int + GetHardPodAffinitySymmetricWeight() int32 GetSchedulerName() string - MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue *cache.FIFO) func(pod *v1.Pod, err error) - - // Probably doesn't need to be public. But exposed for now in case. - ResponsibleForPod(pod *v1.Pod) bool + MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue core.SchedulingQueue) func(pod *v1.Pod, err error) // Needs to be exposed for things like integration tests where we want to make fake nodes. GetNodeLister() corelisters.NodeLister @@ -134,6 +134,9 @@ type Config struct { // Close this to shut down the scheduler. StopEverything chan struct{} + + // VolumeBinder handles PVC/PV binding for the pod. + VolumeBinder *volumebinder.VolumeBinder } // NewFromConfigurator returns a new scheduler that is created entirely by the Configurator. Assumes Create() is implemented. @@ -155,12 +158,24 @@ func NewFromConfigurator(c Configurator, modifiers ...func(c *Config)) (*Schedul return s, nil } +// NewFromConfig returns a new scheduler using the provided Config. +func NewFromConfig(config *Config) *Scheduler { + metrics.Register() + return &Scheduler{ + config: config, + } +} + // Run begins watching and scheduling. It waits for cache to be synced, then starts a goroutine and returns immediately. func (sched *Scheduler) Run() { if !sched.config.WaitForCacheSync() { return } + if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + go sched.config.VolumeBinder.Run(sched.bindVolumesWorker, sched.config.StopEverything) + } + go wait.Until(sched.scheduleOne, 0, sched.config.StopEverything) } @@ -188,8 +203,11 @@ func (sched *Scheduler) schedule(pod *v1.Pod) (string, error) { return host, err } +// preempt tries to create room for a pod that has failed to schedule, by preempting lower priority pods if possible. +// If it succeeds, it adds the name of the node where preemption has happened to the pod annotations. +// It returns the node name and an error if any. func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, error) { - if !utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) { + if !util.PodPriorityEnabled() { glog.V(3).Infof("Pod priority feature is not enabled. No preemption is performed.") return "", nil } @@ -198,29 +216,148 @@ func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, e glog.Errorf("Error getting the updated preemptor pod object: %v", err) return "", err } - node, victims, err := sched.config.Algorithm.Preempt(preemptor, sched.config.NodeLister, scheduleErr) + node, victims, nominatedPodsToClear, err := sched.config.Algorithm.Preempt(preemptor, sched.config.NodeLister, scheduleErr) if err != nil { glog.Errorf("Error preempting victims to make room for %v/%v.", preemptor.Namespace, preemptor.Name) return "", err } - if node == nil { - return "", err + var nodeName = "" + if node != nil { + nodeName = node.Name + annotations := map[string]string{core.NominatedNodeAnnotationKey: nodeName} + err = sched.config.PodPreemptor.UpdatePodAnnotations(preemptor, annotations) + if err != nil { + glog.Errorf("Error in preemption process. Cannot update pod %v annotations: %v", preemptor.Name, err) + return "", err + } + for _, victim := range victims { + if err := sched.config.PodPreemptor.DeletePod(victim); err != nil { + glog.Errorf("Error preempting pod %v/%v: %v", victim.Namespace, victim.Name, err) + return "", err + } + sched.config.Recorder.Eventf(victim, v1.EventTypeNormal, "Preempted", "by %v/%v on node %v", preemptor.Namespace, preemptor.Name, nodeName) + } } - glog.Infof("Preempting %d pod(s) on node %v to make room for %v/%v.", len(victims), node.Name, preemptor.Namespace, preemptor.Name) - annotations := map[string]string{core.NominatedNodeAnnotationKey: node.Name} - err = sched.config.PodPreemptor.UpdatePodAnnotations(preemptor, annotations) - if err != nil { - glog.Errorf("Error in preemption process. Cannot update pod %v annotations: %v", preemptor.Name, err) - return "", err + // Clearing nominated pods should happen outside of "if node != nil". Node could + // be nil when a pod with nominated node name is eligible to preempt again, + // but preemption logic does not find any node for it. In that case Preempt() + // function of generic_scheduler.go returns the pod itself for removal of the annotation. + for _, p := range nominatedPodsToClear { + rErr := sched.config.PodPreemptor.RemoveNominatedNodeAnnotation(p) + if rErr != nil { + glog.Errorf("Cannot remove nominated node annotation of pod: %v", rErr) + // We do not return as this error is not critical. + } } - for _, victim := range victims { - if err := sched.config.PodPreemptor.DeletePod(victim); err != nil { - glog.Errorf("Error preempting pod %v/%v: %v", victim.Namespace, victim.Name, err) - return "", err + return nodeName, err +} + +// assumeAndBindVolumes will update the volume cache and then asynchronously bind volumes if required. +// +// If volume binding is required, then the bind volumes routine will update the pod to send it back through +// the scheduler. +// +// Otherwise, return nil error and continue to assume the pod. +// +// This function modifies assumed if volume binding is required. +func (sched *Scheduler) assumeAndBindVolumes(assumed *v1.Pod, host string) error { + if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + allBound, bindingRequired, err := sched.config.VolumeBinder.Binder.AssumePodVolumes(assumed, host) + if err != nil { + sched.config.Error(assumed, err) + sched.config.Recorder.Eventf(assumed, v1.EventTypeWarning, "FailedScheduling", "AssumePodVolumes failed: %v", err) + sched.config.PodConditionUpdater.Update(assumed, &v1.PodCondition{ + Type: v1.PodScheduled, + Status: v1.ConditionFalse, + Reason: "SchedulerError", + Message: err.Error(), + }) + return err + } + if !allBound { + err = fmt.Errorf("Volume binding started, waiting for completion") + if bindingRequired { + if sched.config.Ecache != nil { + invalidPredicates := sets.NewString(predicates.CheckVolumeBinding) + sched.config.Ecache.InvalidateCachedPredicateItemOfAllNodes(invalidPredicates) + } + + // bindVolumesWorker() will update the Pod object to put it back in the scheduler queue + sched.config.VolumeBinder.BindQueue.Add(assumed) + } else { + // We are just waiting for PV controller to finish binding, put it back in the + // scheduler queue + sched.config.Error(assumed, err) + sched.config.Recorder.Eventf(assumed, v1.EventTypeNormal, "FailedScheduling", "%v", err) + sched.config.PodConditionUpdater.Update(assumed, &v1.PodCondition{ + Type: v1.PodScheduled, + Status: v1.ConditionFalse, + Reason: "VolumeBindingWaiting", + }) + } + return err + } + } + return nil +} + +// bindVolumesWorker() processes pods queued in assumeAndBindVolumes() and tries to +// make the API update for volume binding. +// This function runs forever until the volume BindQueue is closed. +func (sched *Scheduler) bindVolumesWorker() { + workFunc := func() bool { + keyObj, quit := sched.config.VolumeBinder.BindQueue.Get() + if quit { + return true + } + defer sched.config.VolumeBinder.BindQueue.Done(keyObj) + + assumed, ok := keyObj.(*v1.Pod) + if !ok { + glog.V(4).Infof("Object is not a *v1.Pod") + return false + } + + // TODO: add metrics + var reason string + var eventType string + + glog.V(5).Infof("Trying to bind volumes for pod \"%v/%v\"", assumed.Namespace, assumed.Name) + + // The Pod is always sent back to the scheduler afterwards. + err := sched.config.VolumeBinder.Binder.BindPodVolumes(assumed) + if err != nil { + glog.V(1).Infof("Failed to bind volumes for pod \"%v/%v\"", assumed.Namespace, assumed.Name, err) + reason = "VolumeBindingFailed" + eventType = v1.EventTypeWarning + } else { + glog.V(4).Infof("Successfully bound volumes for pod \"%v/%v\"", assumed.Namespace, assumed.Name) + reason = "VolumeBindingWaiting" + eventType = v1.EventTypeNormal + err = fmt.Errorf("Volume binding started, waiting for completion") + } + + // Always fail scheduling regardless of binding success. + // The Pod needs to be sent back through the scheduler to: + // * Retry volume binding if it fails. + // * Retry volume binding if dynamic provisioning fails. + // * Bind the Pod to the Node once all volumes are bound. + sched.config.Error(assumed, err) + sched.config.Recorder.Eventf(assumed, eventType, "FailedScheduling", "%v", err) + sched.config.PodConditionUpdater.Update(assumed, &v1.PodCondition{ + Type: v1.PodScheduled, + Status: v1.ConditionFalse, + Reason: reason, + }) + return false + } + + for { + if quit := workFunc(); quit { + glog.V(4).Infof("bindVolumesWorker shutting down") + break } - sched.config.Recorder.Eventf(victim, v1.EventTypeNormal, "Preempted", "by %v/%v on node %v", preemptor.Namespace, preemptor.Name, node.Name) } - return node.Name, err } // assume signals to the cache that a pod is already in the cache, so that binding can be asynchronous. @@ -317,16 +454,32 @@ func (sched *Scheduler) scheduleOne() { // Tell the cache to assume that a pod now is running on a given node, even though it hasn't been bound yet. // This allows us to keep scheduling without waiting on binding to occur. - assumedPod := *pod - // assume modifies `assumedPod` by setting NodeName=suggestedHost - err = sched.assume(&assumedPod, suggestedHost) + assumedPod := pod.DeepCopy() + + // Assume volumes first before assuming the pod. + // + // If no volumes need binding, then nil is returned, and continue to assume the pod. + // + // Otherwise, error is returned and volume binding is started asynchronously for all of the pod's volumes. + // scheduleOne() returns immediately on error, so that it doesn't continue to assume the pod. + // + // After the asynchronous volume binding updates are made, it will send the pod back through the scheduler for + // subsequent passes until all volumes are fully bound. + // + // This function modifies 'assumedPod' if volume binding is required. + err = sched.assumeAndBindVolumes(assumedPod, suggestedHost) if err != nil { return } + // assume modifies `assumedPod` by setting NodeName=suggestedHost + err = sched.assume(assumedPod, suggestedHost) + if err != nil { + return + } // bind the pod to its host asynchronously (we can do this b/c of the assumption step above). go func() { - err := sched.bind(&assumedPod, &v1.Binding{ + err := sched.bind(assumedPod, &v1.Binding{ ObjectMeta: metav1.ObjectMeta{Namespace: assumedPod.Namespace, Name: assumedPod.Name, UID: assumedPod.UID}, Target: v1.ObjectReference{ Kind: "Node", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/BUILD index d1f213cd7d84..6d5e6204f55e 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/BUILD @@ -1,10 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -14,12 +8,15 @@ go_library( "node_info.go", "util.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache", + visibility = ["//visibility:public"], deps = [ - "//pkg/api/v1/helper:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", "//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", "//plugin/pkg/scheduler/util:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", @@ -30,15 +27,17 @@ go_library( go_test( name = "go_default_test", srcs = ["cache_test.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache", library = ":go_default_library", deps = [ - "//pkg/api/v1/helper:go_default_library", "//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", "//plugin/pkg/scheduler/util:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", ], ) @@ -53,4 +52,5 @@ filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/cache.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/cache.go index 0f562e25925e..7e409903331b 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/cache.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/cache.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "github.com/golang/glog" + policy "k8s.io/api/policy/v1beta1" ) var ( @@ -55,6 +56,7 @@ type schedulerCache struct { // a map from pod key to podState. podStates map[string]*podState nodes map[string]*NodeInfo + pdbs map[string]*policy.PodDisruptionBudget } type podState struct { @@ -74,6 +76,7 @@ func newSchedulerCache(ttl, period time.Duration, stop <-chan struct{}) *schedul nodes: make(map[string]*NodeInfo), assumedPods: make(map[string]bool), podStates: make(map[string]*podState), + pdbs: make(map[string]*policy.PodDisruptionBudget), } } @@ -238,6 +241,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error { } delete(cache.assumedPods, key) cache.podStates[key].deadline = nil + cache.podStates[key].pod = pod case !ok: // Pod was expired. We should add it back. cache.addPod(pod) @@ -307,6 +311,39 @@ func (cache *schedulerCache) RemovePod(pod *v1.Pod) error { return nil } +func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) { + key, err := getPodKey(pod) + if err != nil { + return false, err + } + + cache.mu.Lock() + defer cache.mu.Unlock() + + b, found := cache.assumedPods[key] + if !found { + return false, nil + } + return b, nil +} + +func (cache *schedulerCache) GetPod(pod *v1.Pod) (*v1.Pod, error) { + key, err := getPodKey(pod) + if err != nil { + return nil, err + } + + cache.mu.Lock() + defer cache.mu.Unlock() + + podState, ok := cache.podStates[key] + if !ok { + return nil, fmt.Errorf("pod %v does not exist", key) + } + + return podState.pod, nil +} + func (cache *schedulerCache) AddNode(node *v1.Node) error { cache.mu.Lock() defer cache.mu.Unlock() @@ -349,6 +386,39 @@ func (cache *schedulerCache) RemoveNode(node *v1.Node) error { return nil } +func (cache *schedulerCache) AddPDB(pdb *policy.PodDisruptionBudget) error { + cache.mu.Lock() + defer cache.mu.Unlock() + + // Unconditionally update cache. + cache.pdbs[pdb.Name] = pdb + return nil +} + +func (cache *schedulerCache) UpdatePDB(oldPDB, newPDB *policy.PodDisruptionBudget) error { + return cache.AddPDB(newPDB) +} + +func (cache *schedulerCache) RemovePDB(pdb *policy.PodDisruptionBudget) error { + cache.mu.Lock() + defer cache.mu.Unlock() + + delete(cache.pdbs, pdb.Name) + return nil +} + +func (cache *schedulerCache) ListPDBs(selector labels.Selector) ([]*policy.PodDisruptionBudget, error) { + cache.mu.Lock() + defer cache.mu.Unlock() + var pdbs []*policy.PodDisruptionBudget + for _, pdb := range cache.pdbs { + if selector.Matches(labels.Set(pdb.Labels)) { + pdbs = append(pdbs, pdb) + } + } + return pdbs, nil +} + func (cache *schedulerCache) run() { go wait.Until(cache.cleanupExpiredAssumedPods, cache.period, cache.stop) } @@ -369,7 +439,7 @@ func (cache *schedulerCache) cleanupAssumedPods(now time.Time) { panic("Key found in assumed set but not in podStates. Potentially a logical error.") } if !ps.bindingFinished { - glog.Warningf("Couldn't expire cache for pod %v/%v. Binding is still in progress.", + glog.V(3).Infof("Couldn't expire cache for pod %v/%v. Binding is still in progress.", ps.pod.Namespace, ps.pod.Name) continue } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/interface.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/interface.go index 6acb92cd53dd..d8c09fb7e060 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/interface.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/interface.go @@ -18,6 +18,7 @@ package schedulercache import ( "k8s.io/api/core/v1" + policy "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/labels" ) @@ -79,6 +80,13 @@ type Cache interface { // RemovePod removes a pod. The pod's information would be subtracted from assigned node. RemovePod(pod *v1.Pod) error + // GetPod returns the pod from the cache with the same namespace and the + // same name of the specified pod. + GetPod(pod *v1.Pod) (*v1.Pod, error) + + // IsAssumedPod returns true if the pod is assumed and not expired. + IsAssumedPod(pod *v1.Pod) (bool, error) + // AddNode adds overall information about node. AddNode(node *v1.Node) error @@ -88,6 +96,18 @@ type Cache interface { // RemoveNode removes overall information about node. RemoveNode(node *v1.Node) error + // AddPDB adds a PodDisruptionBudget object to the cache. + AddPDB(pdb *policy.PodDisruptionBudget) error + + // UpdatePDB updates a PodDisruptionBudget object in the cache. + UpdatePDB(oldPDB, newPDB *policy.PodDisruptionBudget) error + + // RemovePDB removes a PodDisruptionBudget object from the cache. + RemovePDB(pdb *policy.PodDisruptionBudget) error + + // List lists all cached PDBs matching the selector. + ListPDBs(selector labels.Selector) ([]*policy.PodDisruptionBudget, error) + // UpdateNodeNameToInfoMap updates the passed infoMap to the current contents of Cache. // The node info contains aggregated information of pods scheduled (including assumed to be) // on this node. diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/node_info.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/node_info.go index a2808e2f3f6e..13f71d525a00 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/node_info.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/node_info.go @@ -24,7 +24,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" clientcache "k8s.io/client-go/tools/cache" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" "k8s.io/kubernetes/plugin/pkg/scheduler/util" ) @@ -38,7 +38,7 @@ type NodeInfo struct { pods []*v1.Pod podsWithAffinity []*v1.Pod - usedPorts map[int]bool + usedPorts map[string]bool // Total requested resource of all pods on this node. // It includes assumed pods which scheduler sends binding to apiserver but @@ -70,9 +70,9 @@ type Resource struct { EphemeralStorage int64 // We store allowedPodNumber (which is Node.Status.Allocatable.Pods().Value()) // explicitly as int, to avoid conversions and improve performance. - AllowedPodNumber int - ExtendedResources map[v1.ResourceName]int64 - HugePages map[v1.ResourceName]int64 + AllowedPodNumber int + // ScalarResources + ScalarResources map[v1.ResourceName]int64 } // New creates a Resource from ResourceList @@ -101,11 +101,8 @@ func (r *Resource) Add(rl v1.ResourceList) { case v1.ResourceEphemeralStorage: r.EphemeralStorage += rQuant.Value() default: - if v1helper.IsExtendedResourceName(rName) { - r.AddExtended(rName, rQuant.Value()) - } - if v1helper.IsHugePageResourceName(rName) { - r.AddHugePages(rName, rQuant.Value()) + if v1helper.IsScalarResourceName(rName) { + r.AddScalar(rName, rQuant.Value()) } } } @@ -119,11 +116,12 @@ func (r *Resource) ResourceList() v1.ResourceList { v1.ResourcePods: *resource.NewQuantity(int64(r.AllowedPodNumber), resource.BinarySI), v1.ResourceEphemeralStorage: *resource.NewQuantity(r.EphemeralStorage, resource.BinarySI), } - for rName, rQuant := range r.ExtendedResources { - result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI) - } - for rName, rQuant := range r.HugePages { - result[rName] = *resource.NewQuantity(rQuant, resource.BinarySI) + for rName, rQuant := range r.ScalarResources { + if v1helper.IsHugePageResourceName(rName) { + result[rName] = *resource.NewQuantity(rQuant, resource.BinarySI) + } else { + result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI) + } } return result } @@ -136,43 +134,25 @@ func (r *Resource) Clone() *Resource { AllowedPodNumber: r.AllowedPodNumber, EphemeralStorage: r.EphemeralStorage, } - if r.ExtendedResources != nil { - res.ExtendedResources = make(map[v1.ResourceName]int64) - for k, v := range r.ExtendedResources { - res.ExtendedResources[k] = v - } - } - if r.HugePages != nil { - res.HugePages = make(map[v1.ResourceName]int64) - for k, v := range r.HugePages { - res.HugePages[k] = v + if r.ScalarResources != nil { + res.ScalarResources = make(map[v1.ResourceName]int64) + for k, v := range r.ScalarResources { + res.ScalarResources[k] = v } } return res } -func (r *Resource) AddExtended(name v1.ResourceName, quantity int64) { - r.SetExtended(name, r.ExtendedResources[name]+quantity) +func (r *Resource) AddScalar(name v1.ResourceName, quantity int64) { + r.SetScalar(name, r.ScalarResources[name]+quantity) } -func (r *Resource) SetExtended(name v1.ResourceName, quantity int64) { - // Lazily allocate opaque integer resource map. - if r.ExtendedResources == nil { - r.ExtendedResources = map[v1.ResourceName]int64{} +func (r *Resource) SetScalar(name v1.ResourceName, quantity int64) { + // Lazily allocate scalar resource map. + if r.ScalarResources == nil { + r.ScalarResources = map[v1.ResourceName]int64{} } - r.ExtendedResources[name] = quantity -} - -func (r *Resource) AddHugePages(name v1.ResourceName, quantity int64) { - r.SetHugePages(name, r.HugePages[name]+quantity) -} - -func (r *Resource) SetHugePages(name v1.ResourceName, quantity int64) { - // Lazily allocate hugepages resource map. - if r.HugePages == nil { - r.HugePages = map[v1.ResourceName]int64{} - } - r.HugePages[name] = quantity + r.ScalarResources[name] = quantity } // NewNodeInfo returns a ready to use empty NodeInfo object. @@ -184,7 +164,7 @@ func NewNodeInfo(pods ...*v1.Pod) *NodeInfo { nonzeroRequest: &Resource{}, allocatableResource: &Resource{}, generation: 0, - usedPorts: make(map[int]bool), + usedPorts: make(map[string]bool), } for _, pod := range pods { ni.AddPod(pod) @@ -208,7 +188,7 @@ func (n *NodeInfo) Pods() []*v1.Pod { return n.pods } -func (n *NodeInfo) UsedPorts() map[int]bool { +func (n *NodeInfo) UsedPorts() map[string]bool { if n == nil { return nil } @@ -275,6 +255,11 @@ func (n *NodeInfo) AllocatableResource() Resource { return *n.allocatableResource } +// SetAllocatableResource sets the allocatableResource information of given node. +func (n *NodeInfo) SetAllocatableResource(allocatableResource *Resource) { + n.allocatableResource = allocatableResource +} + func (n *NodeInfo) Clone() *NodeInfo { clone := &NodeInfo{ node: n.node, @@ -284,7 +269,7 @@ func (n *NodeInfo) Clone() *NodeInfo { taintsErr: n.taintsErr, memoryPressureCondition: n.memoryPressureCondition, diskPressureCondition: n.diskPressureCondition, - usedPorts: make(map[int]bool), + usedPorts: make(map[string]bool), generation: n.generation, } if len(n.pods) > 0 { @@ -326,17 +311,11 @@ func (n *NodeInfo) AddPod(pod *v1.Pod) { n.requestedResource.Memory += res.Memory n.requestedResource.NvidiaGPU += res.NvidiaGPU n.requestedResource.EphemeralStorage += res.EphemeralStorage - if n.requestedResource.ExtendedResources == nil && len(res.ExtendedResources) > 0 { - n.requestedResource.ExtendedResources = map[v1.ResourceName]int64{} - } - for rName, rQuant := range res.ExtendedResources { - n.requestedResource.ExtendedResources[rName] += rQuant - } - if n.requestedResource.HugePages == nil && len(res.HugePages) > 0 { - n.requestedResource.HugePages = map[v1.ResourceName]int64{} + if n.requestedResource.ScalarResources == nil && len(res.ScalarResources) > 0 { + n.requestedResource.ScalarResources = map[v1.ResourceName]int64{} } - for rName, rQuant := range res.HugePages { - n.requestedResource.HugePages[rName] += rQuant + for rName, rQuant := range res.ScalarResources { + n.requestedResource.ScalarResources[rName] += rQuant } n.nonzeroRequest.MilliCPU += non0_cpu n.nonzeroRequest.Memory += non0_mem @@ -387,17 +366,11 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error { n.requestedResource.MilliCPU -= res.MilliCPU n.requestedResource.Memory -= res.Memory n.requestedResource.NvidiaGPU -= res.NvidiaGPU - if len(res.ExtendedResources) > 0 && n.requestedResource.ExtendedResources == nil { - n.requestedResource.ExtendedResources = map[v1.ResourceName]int64{} + if len(res.ScalarResources) > 0 && n.requestedResource.ScalarResources == nil { + n.requestedResource.ScalarResources = map[v1.ResourceName]int64{} } - for rName, rQuant := range res.ExtendedResources { - n.requestedResource.ExtendedResources[rName] -= rQuant - } - if len(res.HugePages) > 0 && n.requestedResource.HugePages == nil { - n.requestedResource.HugePages = map[v1.ResourceName]int64{} - } - for rName, rQuant := range res.HugePages { - n.requestedResource.HugePages[rName] -= rQuant + for rName, rQuant := range res.ScalarResources { + n.requestedResource.ScalarResources[rName] -= rQuant } n.nonzeroRequest.MilliCPU -= non0_cpu n.nonzeroRequest.Memory -= non0_mem @@ -435,7 +408,26 @@ func (n *NodeInfo) updateUsedPorts(pod *v1.Pod, used bool) { // "0" is explicitly ignored in PodFitsHostPorts, // which is the only function that uses this value. if podPort.HostPort != 0 { - n.usedPorts[int(podPort.HostPort)] = used + // user does not explicitly set protocol, default is tcp + portProtocol := podPort.Protocol + if podPort.Protocol == "" { + portProtocol = v1.ProtocolTCP + } + + // user does not explicitly set hostIP, default is 0.0.0.0 + portHostIP := podPort.HostIP + if podPort.HostIP == "" { + portHostIP = util.DefaultBindAllHostIP + } + + str := fmt.Sprintf("%s/%s/%d", portProtocol, portHostIP, podPort.HostPort) + + if used { + n.usedPorts[str] = used + } else { + delete(n.usedPorts, str) + } + } } } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/testutil.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/testutil.go index d4bd356aff65..7976353ed4e5 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/testutil.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/testutil.go @@ -23,9 +23,9 @@ import ( "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" + "k8s.io/kubernetes/plugin/pkg/scheduler/core" "k8s.io/kubernetes/plugin/pkg/scheduler/util" ) @@ -55,7 +55,7 @@ func (fc *FakeConfigurator) GetPredicates(predicateKeys sets.String) (map[string } // GetHardPodAffinitySymmetricWeight is not implemented yet. -func (fc *FakeConfigurator) GetHardPodAffinitySymmetricWeight() int { +func (fc *FakeConfigurator) GetHardPodAffinitySymmetricWeight() int32 { panic("not implemented") } @@ -65,7 +65,7 @@ func (fc *FakeConfigurator) GetSchedulerName() string { } // MakeDefaultErrorFunc is not implemented yet. -func (fc *FakeConfigurator) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue *cache.FIFO) func(pod *v1.Pod, err error) { +func (fc *FakeConfigurator) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue core.SchedulingQueue) func(pod *v1.Pod, err error) { return nil } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/util/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/util/BUILD index 8bc4cac5e958..1eed06de1836 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/util/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/util/BUILD @@ -10,12 +10,16 @@ go_test( name = "go_default_test", srcs = [ "backoff_utils_test.go", + "testutil_test.go", "utils_test.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/util", library = ":go_default_library", deps = [ "//pkg/apis/scheduling:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", ], ) @@ -27,15 +31,19 @@ go_library( "testutil.go", "utils.go", ], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/util", deps = [ - "//pkg/api:go_default_library", - "//pkg/api/install:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/core/install:go_default_library", "//pkg/apis/scheduling:go_default_library", + "//pkg/features:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/util/testutil.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/util/testutil.go index 8b5ef89e830a..45ce2df112d8 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/util/testutil.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/util/testutil.go @@ -25,9 +25,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" - _ "k8s.io/kubernetes/pkg/api/install" + _ "k8s.io/kubernetes/pkg/apis/core/install" ) type TestGroup struct { @@ -51,7 +52,7 @@ func init() { if err != nil { panic(err) } - serializer, ok = runtime.SerializerInfoForMediaType(api.Codecs.SupportedMediaTypes(), mediaType) + serializer, ok = runtime.SerializerInfoForMediaType(legacyscheme.Codecs.SupportedMediaTypes(), mediaType) if !ok { panic(fmt.Sprintf("no serializer for %s", apiMediaType)) } @@ -72,19 +73,19 @@ func init() { Groups[groupVersion.Group] = TestGroup{ externalGroupVersion: groupVersion, internalGroupVersion: internalGroupVersion, - internalTypes: api.Scheme.KnownTypes(internalGroupVersion), - externalTypes: api.Scheme.KnownTypes(groupVersion), + internalTypes: legacyscheme.Scheme.KnownTypes(internalGroupVersion), + externalTypes: legacyscheme.Scheme.KnownTypes(groupVersion), } } } if _, ok := Groups[api.GroupName]; !ok { - externalGroupVersion := schema.GroupVersion{Group: api.GroupName, Version: api.Registry.GroupOrDie(api.GroupName).GroupVersion.Version} + externalGroupVersion := schema.GroupVersion{Group: api.GroupName, Version: legacyscheme.Registry.GroupOrDie(api.GroupName).GroupVersion.Version} Groups[api.GroupName] = TestGroup{ externalGroupVersion: externalGroupVersion, internalGroupVersion: api.SchemeGroupVersion, - internalTypes: api.Scheme.KnownTypes(api.SchemeGroupVersion), - externalTypes: api.Scheme.KnownTypes(externalGroupVersion), + internalTypes: legacyscheme.Scheme.KnownTypes(api.SchemeGroupVersion), + externalTypes: legacyscheme.Scheme.KnownTypes(externalGroupVersion), } } @@ -95,9 +96,9 @@ func init() { // KUBE_TEST_API_TYPE env var. func (g TestGroup) Codec() runtime.Codec { if serializer.Serializer == nil { - return api.Codecs.LegacyCodec(g.externalGroupVersion) + return legacyscheme.Codecs.LegacyCodec(g.externalGroupVersion) } - return api.Codecs.CodecForVersions(serializer.Serializer, api.Codecs.UniversalDeserializer(), schema.GroupVersions{g.externalGroupVersion}, nil) + return legacyscheme.Codecs.CodecForVersions(serializer.Serializer, legacyscheme.Codecs.UniversalDeserializer(), schema.GroupVersions{g.externalGroupVersion}, nil) } // SelfLink returns a self link that will appear to be for the version Version(). diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/util/utils.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/util/utils.go index 2cbe26f2e3a5..66a004e00135 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/util/utils.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/util/utils.go @@ -17,16 +17,21 @@ limitations under the License. package util import ( + "fmt" "sort" "k8s.io/api/core/v1" + "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/apis/scheduling" + "k8s.io/kubernetes/pkg/features" ) +const DefaultBindAllHostIP = "0.0.0.0" + // GetUsedPorts returns the used host ports of Pods: if 'port' was used, a 'port:true' pair // will be in the result; but it does not resolve port conflict. -func GetUsedPorts(pods ...*v1.Pod) map[int]bool { - ports := make(map[int]bool) +func GetUsedPorts(pods ...*v1.Pod) map[string]bool { + ports := make(map[string]bool) for _, pod := range pods { for j := range pod.Spec.Containers { container := &pod.Spec.Containers[j] @@ -35,7 +40,20 @@ func GetUsedPorts(pods ...*v1.Pod) map[int]bool { // "0" is explicitly ignored in PodFitsHostPorts, // which is the only function that uses this value. if podPort.HostPort != 0 { - ports[int(podPort.HostPort)] = true + // user does not explicitly set protocol, default is tcp + portProtocol := podPort.Protocol + if podPort.Protocol == "" { + portProtocol = v1.ProtocolTCP + } + + // user does not explicitly set hostIP, default is 0.0.0.0 + portHostIP := podPort.HostIP + if podPort.HostIP == "" { + portHostIP = "0.0.0.0" + } + + str := fmt.Sprintf("%s/%s/%d", portProtocol, portHostIP, podPort.HostPort) + ports[str] = true } } } @@ -43,6 +61,11 @@ func GetUsedPorts(pods ...*v1.Pod) map[int]bool { return ports } +// PodPriorityEnabled indicates whether pod priority feature is enabled. +func PodPriorityEnabled() bool { + return feature.DefaultFeatureGate.Enabled(features.PodPriority) +} + // GetPodFullName returns a name that uniquely identifies a pod. func GetPodFullName(pod *v1.Pod) string { // Use underscore as the delimiter because it is not allowed in pod name diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder/BUILD new file mode 100644 index 000000000000..f942bfecdb87 --- /dev/null +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["volume_binder.go"], + importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder", + visibility = ["//visibility:public"], + deps = [ + "//pkg/controller/volume/persistentvolume:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", + "//vendor/k8s.io/client-go/informers/storage/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/util/workqueue:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder/volume_binder.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder/volume_binder.go new file mode 100644 index 000000000000..957c4e18aac6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder/volume_binder.go @@ -0,0 +1,74 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volumebinder + +import ( + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" + coreinformers "k8s.io/client-go/informers/core/v1" + storageinformers "k8s.io/client-go/informers/storage/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/util/workqueue" + "k8s.io/kubernetes/pkg/controller/volume/persistentvolume" +) + +// VolumeBinder sets up the volume binding library and manages +// the volume binding operations with a queue. +type VolumeBinder struct { + Binder persistentvolume.SchedulerVolumeBinder + BindQueue *workqueue.Type +} + +// NewVolumeBinder sets up the volume binding library and binding queue +func NewVolumeBinder( + client clientset.Interface, + pvcInformer coreinformers.PersistentVolumeClaimInformer, + pvInformer coreinformers.PersistentVolumeInformer, + nodeInformer coreinformers.NodeInformer, + storageClassInformer storageinformers.StorageClassInformer) *VolumeBinder { + + return &VolumeBinder{ + Binder: persistentvolume.NewVolumeBinder(client, pvcInformer, pvInformer, nodeInformer, storageClassInformer), + BindQueue: workqueue.NewNamed("podsToBind"), + } +} + +// NewFakeVolumeBinder sets up a fake volume binder and binding queue +func NewFakeVolumeBinder(config *persistentvolume.FakeVolumeBinderConfig) *VolumeBinder { + return &VolumeBinder{ + Binder: persistentvolume.NewFakeVolumeBinder(config), + BindQueue: workqueue.NewNamed("podsToBind"), + } +} + +// Run starts a goroutine to handle the binding queue with the given function. +func (b *VolumeBinder) Run(bindWorkFunc func(), stopCh <-chan struct{}) { + go wait.Until(bindWorkFunc, time.Second, stopCh) + + <-stopCh + b.BindQueue.ShutDown() +} + +// DeletePodBindings will delete the cached volume bindings for the given pod. +func (b *VolumeBinder) DeletePodBindings(pod *v1.Pod) { + cache := b.Binder.GetBindingsCache() + if cache != nil && pod != nil { + cache.DeleteBindings(pod) + } +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/rand/BUILD b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/rand/BUILD deleted file mode 100644 index a92d241f9a29..000000000000 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/rand/BUILD +++ /dev/null @@ -1,31 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_test( - name = "go_default_test", - srcs = ["rand_test.go"], - library = ":go_default_library", -) - -go_library( - name = "go_default_library", - srcs = ["rand.go"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/rand/rand.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/rand/rand.go deleted file mode 100644 index 4c593b3c4b3f..000000000000 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/rand/rand.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package rand provides utilities related to randomization. -package rand - -import ( - "math/rand" - "sync" - "time" -) - -var rng = struct { - sync.Mutex - rand *rand.Rand -}{ - rand: rand.New(rand.NewSource(time.Now().UTC().UnixNano())), -} - -// Intn generates an integer in range [0,max). -// By design this should panic if input is invalid, <= 0. -func Intn(max int) int { - rng.Lock() - defer rng.Unlock() - return rng.rand.Intn(max) -} - -// IntnRange generates an integer in range [min,max). -// By design this should panic if input is invalid, <= 0. -func IntnRange(min, max int) int { - rng.Lock() - defer rng.Unlock() - return rng.rand.Intn(max-min) + min -} - -// IntnRange generates an int64 integer in range [min,max). -// By design this should panic if input is invalid, <= 0. -func Int63nRange(min, max int64) int64 { - rng.Lock() - defer rng.Unlock() - return rng.rand.Int63n(max-min) + min -} - -// Seed seeds the rng with the provided seed. -func Seed(seed int64) { - rng.Lock() - defer rng.Unlock() - - rng.rand = rand.New(rand.NewSource(seed)) -} - -// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n) -// from the default Source. -func Perm(n int) []int { - rng.Lock() - defer rng.Unlock() - return rng.rand.Perm(n) -} - -// We omit vowels from the set of available characters to reduce the chances -// of "bad words" being formed. -var alphanums = []rune("bcdfghjklmnpqrstvwxz2456789") - -// String generates a random alphanumeric string, without vowels, which is n -// characters long. This will panic if n is less than zero. -func String(length int) string { - b := make([]rune, length) - for i := range b { - b[i] = alphanums[Intn(len(alphanums))] - } - return string(b) -} - -// SafeEncodeString encodes s using the same characters as rand.String. This reduces the chances of bad words and -// ensures that strings generated from hash functions appear consistent throughout the API. -func SafeEncodeString(s string) string { - r := make([]rune, len(s)) - for i, b := range []rune(s) { - r[i] = alphanums[(int(b) % len(alphanums))] - } - return string(r) -} diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/expansion/BUILD b/vendor/k8s.io/kubernetes/third_party/forked/golang/expansion/BUILD index 98c97738eefb..41a2e08cc634 100644 --- a/vendor/k8s.io/kubernetes/third_party/forked/golang/expansion/BUILD +++ b/vendor/k8s.io/kubernetes/third_party/forked/golang/expansion/BUILD @@ -11,13 +11,15 @@ load( go_library( name = "go_default_library", srcs = ["expand.go"], + importpath = "k8s.io/kubernetes/third_party/forked/golang/expansion", ) go_test( name = "go_default_test", srcs = ["expand_test.go"], + importpath = "k8s.io/kubernetes/third_party/forked/golang/expansion", library = ":go_default_library", - deps = ["//pkg/api:go_default_library"], + deps = ["//pkg/apis/core:go_default_library"], ) filegroup( diff --git a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/BUILD b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/BUILD index 3b95a781a937..c5bd55ee2b8d 100644 --- a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/BUILD +++ b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/BUILD @@ -10,6 +10,7 @@ load( go_library( name = "go_default_library", srcs = ["graph.go"], + importpath = "k8s.io/kubernetes/third_party/forked/gonum/graph", ) filegroup( diff --git a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear/BUILD b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear/BUILD index f4df14a2262c..3860059b7132 100644 --- a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear/BUILD +++ b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear/BUILD @@ -10,6 +10,7 @@ load( go_library( name = "go_default_library", srcs = ["linear.go"], + importpath = "k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear", deps = ["//third_party/forked/gonum/graph:go_default_library"], ) diff --git a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/BUILD b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/BUILD index e97317901ab9..d363239f26bd 100644 --- a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/BUILD +++ b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/BUILD @@ -15,6 +15,7 @@ go_test( "edgeholder_test.go", "undirected_test.go", ], + importpath = "k8s.io/kubernetes/third_party/forked/gonum/graph/simple", library = ":go_default_library", deps = ["//third_party/forked/gonum/graph:go_default_library"], ) @@ -27,6 +28,7 @@ go_library( "simple.go", "undirected.go", ], + importpath = "k8s.io/kubernetes/third_party/forked/gonum/graph/simple", deps = [ "//third_party/forked/gonum/graph:go_default_library", "//vendor/golang.org/x/tools/container/intsets:go_default_library", diff --git a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/BUILD b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/BUILD index 10bced1adcef..5952c8ccee96 100644 --- a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/BUILD +++ b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/BUILD @@ -13,6 +13,7 @@ go_library( "traverse.go", "visit_depth_first.go", ], + importpath = "k8s.io/kubernetes/third_party/forked/gonum/graph/traverse", deps = [ "//third_party/forked/gonum/graph:go_default_library", "//third_party/forked/gonum/graph/internal/linear:go_default_library", diff --git a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/traverse.go b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/traverse.go index cc361c85fe35..105c8f6e14c6 100644 --- a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/traverse.go +++ b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/traverse.go @@ -21,7 +21,7 @@ type BreadthFirst struct { } // Walk performs a breadth-first traversal of the graph g starting from the given node, -// depending on the the EdgeFilter field and the until parameter if they are non-nil. The +// depending on the EdgeFilter field and the until parameter if they are non-nil. The // traversal follows edges for which EdgeFilter(edge) is true and returns the first node // for which until(node, depth) is true. During the traversal, if the Visit field is // non-nil, it is called with the nodes joined by each followed edge. @@ -113,7 +113,7 @@ type DepthFirst struct { } // Walk performs a depth-first traversal of the graph g starting from the given node, -// depending on the the EdgeFilter field and the until parameter if they are non-nil. The +// depending on the EdgeFilter field and the until parameter if they are non-nil. The // traversal follows edges for which EdgeFilter(edge) is true and returns the first node // for which until(node) is true. During the traversal, if the Visit field is non-nil, it // is called with the nodes joined by each followed edge. diff --git a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/visit_depth_first.go b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/visit_depth_first.go index b7f45a7b3218..89df3c690260 100644 --- a/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/visit_depth_first.go +++ b/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/traverse/visit_depth_first.go @@ -30,7 +30,7 @@ type VisitingDepthFirst struct { } // Walk performs a depth-first traversal of the graph g starting from the given node, -// depending on the the EdgeFilter field and the until parameter if they are non-nil. The +// depending on the EdgeFilter field and the until parameter if they are non-nil. The // traversal follows edges for which EdgeFilter(edge) is true and returns the first node // for which until(node) is true. During the traversal, if the Visit field is non-nil, it // is called with the nodes joined by each followed edge. diff --git a/vendor/k8s.io/metrics/pkg/apis/custom_metrics/BUILD b/vendor/k8s.io/metrics/pkg/apis/custom_metrics/BUILD index 033148042453..93225be65382 100644 --- a/vendor/k8s.io/metrics/pkg/apis/custom_metrics/BUILD +++ b/vendor/k8s.io/metrics/pkg/apis/custom_metrics/BUILD @@ -13,10 +13,10 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/metrics/pkg/apis/custom_metrics", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/vendor/k8s.io/metrics/pkg/apis/custom_metrics/doc.go b/vendor/k8s.io/metrics/pkg/apis/custom_metrics/doc.go index 083943d1c7b2..73997cac16b8 100644 --- a/vendor/k8s.io/metrics/pkg/apis/custom_metrics/doc.go +++ b/vendor/k8s.io/metrics/pkg/apis/custom_metrics/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=custom.metrics.k8s.io package custom_metrics diff --git a/vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/BUILD b/vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/BUILD index e8f82ae9c614..f66b9494df4c 100644 --- a/vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/BUILD +++ b/vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/BUILD @@ -15,6 +15,7 @@ go_library( "zz_generated.conversion.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/metrics/pkg/apis/custom_metrics/v1beta1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/doc.go b/vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/doc.go index b20dfa79714d..57c08a216252 100644 --- a/vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/doc.go +++ b/vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/metrics/pkg/apis/custom_metrics // +k8s:openapi-gen=true diff --git a/vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/zz_generated.deepcopy.go index 5b78005256d8..615905874cb3 100644 --- a/vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/zz_generated.deepcopy.go @@ -21,32 +21,9 @@ limitations under the License. package v1beta1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetricValue).DeepCopyInto(out.(*MetricValue)) - return nil - }, InType: reflect.TypeOf(&MetricValue{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetricValueList).DeepCopyInto(out.(*MetricValueList)) - return nil - }, InType: reflect.TypeOf(&MetricValueList{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MetricValue) DeepCopyInto(out *MetricValue) { *out = *in diff --git a/vendor/k8s.io/metrics/pkg/apis/custom_metrics/zz_generated.deepcopy.go b/vendor/k8s.io/metrics/pkg/apis/custom_metrics/zz_generated.deepcopy.go index c61d3c4c8913..546411634fc4 100644 --- a/vendor/k8s.io/metrics/pkg/apis/custom_metrics/zz_generated.deepcopy.go +++ b/vendor/k8s.io/metrics/pkg/apis/custom_metrics/zz_generated.deepcopy.go @@ -21,36 +21,9 @@ limitations under the License. package custom_metrics import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetricValue).DeepCopyInto(out.(*MetricValue)) - return nil - }, InType: reflect.TypeOf(&MetricValue{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetricValueList).DeepCopyInto(out.(*MetricValueList)) - return nil - }, InType: reflect.TypeOf(&MetricValueList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectReference).DeepCopyInto(out.(*ObjectReference)) - return nil - }, InType: reflect.TypeOf(&ObjectReference{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MetricValue) DeepCopyInto(out *MetricValue) { *out = *in diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/BUILD b/vendor/k8s.io/metrics/pkg/apis/metrics/BUILD index e89b0408fce8..0b36e9d33dc5 100644 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/BUILD +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/BUILD @@ -13,10 +13,10 @@ go_library( "types.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/metrics/pkg/apis/metrics", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go b/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go index 548724f38d63..9437a13f8af6 100644 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=metrics.k8s.io package metrics diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/BUILD b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/BUILD index 909c853f648a..c05049f0b713 100644 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/BUILD +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/BUILD @@ -15,6 +15,7 @@ go_library( "zz_generated.conversion.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/metrics/pkg/apis/metrics/v1alpha1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/doc.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/doc.go index 76c2b9a7dbd4..f870a997b385 100644 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/doc.go +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/metrics/pkg/apis/metrics // +k8s:openapi-gen=true diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go index e4223daa495e..d15c9ce8e461 100644 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go @@ -22,44 +22,9 @@ package v1alpha1 import ( v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerMetrics).DeepCopyInto(out.(*ContainerMetrics)) - return nil - }, InType: reflect.TypeOf(&ContainerMetrics{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeMetrics).DeepCopyInto(out.(*NodeMetrics)) - return nil - }, InType: reflect.TypeOf(&NodeMetrics{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeMetricsList).DeepCopyInto(out.(*NodeMetricsList)) - return nil - }, InType: reflect.TypeOf(&NodeMetricsList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodMetrics).DeepCopyInto(out.(*PodMetrics)) - return nil - }, InType: reflect.TypeOf(&PodMetrics{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodMetricsList).DeepCopyInto(out.(*PodMetricsList)) - return nil - }, InType: reflect.TypeOf(&PodMetricsList{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ContainerMetrics) DeepCopyInto(out *ContainerMetrics) { *out = *in diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/BUILD b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/BUILD index 909c853f648a..903b94647fe6 100644 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/BUILD +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/BUILD @@ -15,6 +15,7 @@ go_library( "zz_generated.conversion.go", "zz_generated.deepcopy.go", ], + importpath = "k8s.io/metrics/pkg/apis/metrics/v1beta1", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/doc.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/doc.go index ec9ac583b032..2f93e6aa8be8 100644 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/doc.go +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/metrics/pkg/apis/metrics // +k8s:openapi-gen=true diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go index 3f67eeff203a..2a0571a90273 100644 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go @@ -22,44 +22,9 @@ package v1beta1 import ( v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerMetrics).DeepCopyInto(out.(*ContainerMetrics)) - return nil - }, InType: reflect.TypeOf(&ContainerMetrics{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeMetrics).DeepCopyInto(out.(*NodeMetrics)) - return nil - }, InType: reflect.TypeOf(&NodeMetrics{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeMetricsList).DeepCopyInto(out.(*NodeMetricsList)) - return nil - }, InType: reflect.TypeOf(&NodeMetricsList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodMetrics).DeepCopyInto(out.(*PodMetrics)) - return nil - }, InType: reflect.TypeOf(&PodMetrics{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodMetricsList).DeepCopyInto(out.(*PodMetricsList)) - return nil - }, InType: reflect.TypeOf(&PodMetricsList{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ContainerMetrics) DeepCopyInto(out *ContainerMetrics) { *out = *in diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go b/vendor/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go index 88b5d0a88a13..d551298c2d5c 100644 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go @@ -21,44 +21,9 @@ limitations under the License. package metrics import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerMetrics).DeepCopyInto(out.(*ContainerMetrics)) - return nil - }, InType: reflect.TypeOf(&ContainerMetrics{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeMetrics).DeepCopyInto(out.(*NodeMetrics)) - return nil - }, InType: reflect.TypeOf(&NodeMetrics{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeMetricsList).DeepCopyInto(out.(*NodeMetricsList)) - return nil - }, InType: reflect.TypeOf(&NodeMetricsList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodMetrics).DeepCopyInto(out.(*PodMetrics)) - return nil - }, InType: reflect.TypeOf(&PodMetrics{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodMetricsList).DeepCopyInto(out.(*PodMetricsList)) - return nil - }, InType: reflect.TypeOf(&PodMetricsList{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ContainerMetrics) DeepCopyInto(out *ContainerMetrics) { *out = *in diff --git a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/BUILD b/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/BUILD deleted file mode 100644 index a7250673e6ef..000000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/BUILD +++ /dev/null @@ -1,41 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "clientset.go", - "doc.go", - ], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/client-go/discovery:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - "//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1:go_default_library", - "//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake:all-srcs", - "//staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme:all-srcs", - "//staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1:all-srcs", - "//staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/clientset.go b/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/clientset.go deleted file mode 100644 index fa5da317aa8f..000000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/clientset.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientset - -import ( - glog "github.com/golang/glog" - discovery "k8s.io/client-go/discovery" - rest "k8s.io/client-go/rest" - flowcontrol "k8s.io/client-go/util/flowcontrol" - metricsv1alpha1 "k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1" - metricsv1beta1 "k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1" -) - -type Interface interface { - Discovery() discovery.DiscoveryInterface - MetricsV1alpha1() metricsv1alpha1.MetricsV1alpha1Interface - MetricsV1beta1() metricsv1beta1.MetricsV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Metrics() metricsv1beta1.MetricsV1beta1Interface -} - -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. -type Clientset struct { - *discovery.DiscoveryClient - metricsV1alpha1 *metricsv1alpha1.MetricsV1alpha1Client - metricsV1beta1 *metricsv1beta1.MetricsV1beta1Client -} - -// MetricsV1alpha1 retrieves the MetricsV1alpha1Client -func (c *Clientset) MetricsV1alpha1() metricsv1alpha1.MetricsV1alpha1Interface { - return c.metricsV1alpha1 -} - -// MetricsV1beta1 retrieves the MetricsV1beta1Client -func (c *Clientset) MetricsV1beta1() metricsv1beta1.MetricsV1beta1Interface { - return c.metricsV1beta1 -} - -// Deprecated: Metrics retrieves the default version of MetricsClient. -// Please explicitly pick a version. -func (c *Clientset) Metrics() metricsv1beta1.MetricsV1beta1Interface { - return c.metricsV1beta1 -} - -// Discovery retrieves the DiscoveryClient -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - if c == nil { - return nil - } - return c.DiscoveryClient -} - -// NewForConfig creates a new Clientset for the given config. -func NewForConfig(c *rest.Config) (*Clientset, error) { - configShallowCopy := *c - if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { - configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) - } - var cs Clientset - var err error - cs.metricsV1alpha1, err = metricsv1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.metricsV1beta1, err = metricsv1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) - if err != nil { - glog.Errorf("failed to create the DiscoveryClient: %v", err) - return nil, err - } - return &cs, nil -} - -// NewForConfigOrDie creates a new Clientset for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *Clientset { - var cs Clientset - cs.metricsV1alpha1 = metricsv1alpha1.NewForConfigOrDie(c) - cs.metricsV1beta1 = metricsv1beta1.NewForConfigOrDie(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &cs -} - -// New creates a new Clientset for the given RESTClient. -func New(c rest.Interface) *Clientset { - var cs Clientset - cs.metricsV1alpha1 = metricsv1alpha1.New(c) - cs.metricsV1beta1 = metricsv1beta1.New(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClient(c) - return &cs -} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/doc.go b/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/doc.go deleted file mode 100644 index 7d72e7fb20a3..000000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with custom arguments. - -// This package has the automatically generated clientset. -package clientset diff --git a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/BUILD b/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/BUILD index c4fa1c1ebe55..e6820fb4ad14 100644 --- a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/BUILD +++ b/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/BUILD @@ -11,6 +11,7 @@ go_library( "doc.go", "register.go", ], + importpath = "k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/doc.go b/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/doc.go index 5d8ec824f0fb..3ec2200d0990 100644 --- a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/doc.go +++ b/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package contains the scheme of the automatically generated clientset. package scheme diff --git a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/BUILD b/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/BUILD deleted file mode 100644 index d9540be52cc0..000000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/BUILD +++ /dev/null @@ -1,41 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "metrics_client.go", - "nodemetrics.go", - "podmetrics.go", - ], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library", - "//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/fake:all-srcs", - ], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/doc.go b/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/doc.go deleted file mode 100644 index ba8d10d3b666..000000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with custom arguments. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/generated_expansion.go b/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/generated_expansion.go deleted file mode 100644 index 919bdc2677e9..000000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -type NodeMetricsExpansion interface{} - -type PodMetricsExpansion interface{} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/metrics_client.go b/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/metrics_client.go deleted file mode 100644 index b838b83e1eed..000000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/metrics_client.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - rest "k8s.io/client-go/rest" - v1alpha1 "k8s.io/metrics/pkg/apis/metrics/v1alpha1" - "k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme" -) - -type MetricsV1alpha1Interface interface { - RESTClient() rest.Interface - NodeMetricsesGetter - PodMetricsesGetter -} - -// MetricsV1alpha1Client is used to interact with features provided by the metrics group. -type MetricsV1alpha1Client struct { - restClient rest.Interface -} - -func (c *MetricsV1alpha1Client) NodeMetricses() NodeMetricsInterface { - return newNodeMetricses(c) -} - -func (c *MetricsV1alpha1Client) PodMetricses(namespace string) PodMetricsInterface { - return newPodMetricses(c, namespace) -} - -// NewForConfig creates a new MetricsV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*MetricsV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &MetricsV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new MetricsV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *MetricsV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new MetricsV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *MetricsV1alpha1Client { - return &MetricsV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *MetricsV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/nodemetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/nodemetrics.go deleted file mode 100644 index 4883f07c38d0..000000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/nodemetrics.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - v1alpha1 "k8s.io/metrics/pkg/apis/metrics/v1alpha1" - scheme "k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme" -) - -// NodeMetricsesGetter has a method to return a NodeMetricsInterface. -// A group's client should implement this interface. -type NodeMetricsesGetter interface { - NodeMetricses() NodeMetricsInterface -} - -// NodeMetricsInterface has methods to work with NodeMetrics resources. -type NodeMetricsInterface interface { - Get(name string, options v1.GetOptions) (*v1alpha1.NodeMetrics, error) - List(opts v1.ListOptions) (*v1alpha1.NodeMetricsList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - NodeMetricsExpansion -} - -// nodeMetricses implements NodeMetricsInterface -type nodeMetricses struct { - client rest.Interface -} - -// newNodeMetricses returns a NodeMetricses -func newNodeMetricses(c *MetricsV1alpha1Client) *nodeMetricses { - return &nodeMetricses{ - client: c.RESTClient(), - } -} - -// Get takes name of the nodeMetrics, and returns the corresponding nodeMetrics object, and an error if there is any. -func (c *nodeMetricses) Get(name string, options v1.GetOptions) (result *v1alpha1.NodeMetrics, err error) { - result = &v1alpha1.NodeMetrics{} - err = c.client.Get(). - Resource("nodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of NodeMetricses that match those selectors. -func (c *nodeMetricses) List(opts v1.ListOptions) (result *v1alpha1.NodeMetricsList, err error) { - result = &v1alpha1.NodeMetricsList{} - err = c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested nodeMetricses. -func (c *nodeMetricses) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/podmetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/podmetrics.go deleted file mode 100644 index 0fbb38c68bc4..000000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/podmetrics.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - v1alpha1 "k8s.io/metrics/pkg/apis/metrics/v1alpha1" - scheme "k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme" -) - -// PodMetricsesGetter has a method to return a PodMetricsInterface. -// A group's client should implement this interface. -type PodMetricsesGetter interface { - PodMetricses(namespace string) PodMetricsInterface -} - -// PodMetricsInterface has methods to work with PodMetrics resources. -type PodMetricsInterface interface { - Get(name string, options v1.GetOptions) (*v1alpha1.PodMetrics, error) - List(opts v1.ListOptions) (*v1alpha1.PodMetricsList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - PodMetricsExpansion -} - -// podMetricses implements PodMetricsInterface -type podMetricses struct { - client rest.Interface - ns string -} - -// newPodMetricses returns a PodMetricses -func newPodMetricses(c *MetricsV1alpha1Client, namespace string) *podMetricses { - return &podMetricses{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the podMetrics, and returns the corresponding podMetrics object, and an error if there is any. -func (c *podMetricses) Get(name string, options v1.GetOptions) (result *v1alpha1.PodMetrics, err error) { - result = &v1alpha1.PodMetrics{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodMetricses that match those selectors. -func (c *podMetricses) List(opts v1.ListOptions) (result *v1alpha1.PodMetricsList, err error) { - result = &v1alpha1.PodMetricsList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podMetricses. -func (c *podMetricses) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/BUILD b/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/BUILD index e91e648b84e3..01ab463f6be1 100644 --- a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/BUILD +++ b/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/BUILD @@ -9,6 +9,7 @@ go_library( "nodemetrics.go", "podmetrics.go", ], + importpath = "k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1", visibility = ["//visibility:public"], deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/doc.go b/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/doc.go index 11b523897207..1b50aa199700 100644 --- a/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/doc.go +++ b/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with custom arguments. - // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/metrics/pkg/client/custom_metrics/BUILD b/vendor/k8s.io/metrics/pkg/client/custom_metrics/BUILD index 9bb6290b7a51..5aba1e5a1e08 100644 --- a/vendor/k8s.io/metrics/pkg/client/custom_metrics/BUILD +++ b/vendor/k8s.io/metrics/pkg/client/custom_metrics/BUILD @@ -11,6 +11,7 @@ go_library( "client.go", "interfaces.go", ], + importpath = "k8s.io/metrics/pkg/client/custom_metrics", deps = [ "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/k8s.io/utils/clock/clock.go b/vendor/k8s.io/utils/clock/clock.go new file mode 100644 index 000000000000..3d53c62b1a52 --- /dev/null +++ b/vendor/k8s.io/utils/clock/clock.go @@ -0,0 +1,94 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clock + +import "time" + +// Clock allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type Clock interface { + Now() time.Time + Since(time.Time) time.Duration + After(d time.Duration) <-chan time.Time + NewTimer(d time.Duration) Timer + Sleep(d time.Duration) + Tick(d time.Duration) <-chan time.Time +} + +var _ = Clock(RealClock{}) + +// RealClock really calls time.Now() +type RealClock struct{} + +// Now returns the current time. +func (RealClock) Now() time.Time { + return time.Now() +} + +// Since returns time since the specified timestamp. +func (RealClock) Since(ts time.Time) time.Duration { + return time.Since(ts) +} + +// Same as time.After(d). +func (RealClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +func (RealClock) NewTimer(d time.Duration) Timer { + return &realTimer{ + timer: time.NewTimer(d), + } +} + +func (RealClock) Tick(d time.Duration) <-chan time.Time { + return time.Tick(d) +} + +func (RealClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +// Timer allows for injecting fake or real timers into code that +// needs to do arbitrary things based on time. +type Timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +var _ = Timer(&realTimer{}) + +// realTimer is backed by an actual time.Timer. +type realTimer struct { + timer *time.Timer +} + +// C returns the underlying timer's channel. +func (r *realTimer) C() <-chan time.Time { + return r.timer.C +} + +// Stop calls Stop() on the underlying timer. +func (r *realTimer) Stop() bool { + return r.timer.Stop() +} + +// Reset calls Reset() on the underlying timer. +func (r *realTimer) Reset(d time.Duration) bool { + return r.timer.Reset(d) +}